pax_global_header00006660000000000000000000000064150342644510014516gustar00rootroot0000000000000052 comment=5f6d8626c8c13537866ec476e82bb9781afb32ac sqlfluff-3.4.2/000077500000000000000000000000001503426445100133465ustar00rootroot00000000000000sqlfluff-3.4.2/.dockerignore000066400000000000000000000012521503426445100160220ustar00rootroot00000000000000# Ignore IDE files .vscode .idea /.sqlfluff **/.DS_Store # Ignore Python cache and prebuilt things .cache __pycache__ *.egg-info *.pyc build _build dist .pytest_cache # Ignore the Environment env .tox venv .venv .python-version # Ignore coverage reports .coverage .coverage.* coverage.xml htmlcov *.cover # Ignore test reports .test-reports test-reports # Ignore root testing sql & python files /test*.sql /test*.py /.hypothesis/ # Ignore dbt outputs from testing /target # Ignore conda environment.yml contributors might be using and direnv config environment.yml .envrc **/*FIXED.sql # Others pip-log.txt pip-delete-this-directory.txt *.log .git .mypy_cache .pytest_cache sqlfluff-3.4.2/.editorconfig000066400000000000000000000031601503426445100160230ustar00rootroot00000000000000# editorconfig.org root = true [*] indent_style = space end_of_line = lf charset = utf-8 trim_trailing_whitespace = true insert_final_newline = true [*.{html,md,js,css}] indent_size = 2 [*.py] indent_size = 4 # Don't correct indentation for sql and yaml files as sometimes want them wrong for tests [*.{yml,yaml,sql}] indent_style = unset # Some specific tests with trailing newlines # If adding any exceptions here, make sure to add them to .pre-commit-config.yaml as well [test/fixtures/templater/jinja_l_metas/0{01,03,04,05,07,08,11}.sql] indent_style = unset insert_final_newline = unset [test/fixtures/linter/sqlfluffignore/*/*.sql] indent_style = unset trim_trailing_whitespace = unset insert_final_newline = unset [test/fixtures/config/inheritance_b/{,nested/}example.sql] indent_style = unset trim_trailing_whitespace = unset insert_final_newline = unset [trailing_newlines.sql] indent_style = unset trim_trailing_whitespace = unset insert_final_newline = unset [plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project/multiple_trailing_newline.sql] indent_style = unset insert_final_newline = unset [plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output/macro_in_macro.sql] indent_style = unset trim_trailing_whitespace = unset insert_final_newline = unset [plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output/{,dbt_utils_0.8.0/}last_day.sql] indent_style = unset trim_trailing_whitespace = unset insert_final_newline = unset [test/fixtures/linter/indentation_errors.sql,test/fixtures/templater/jinja_d_roundtrip/test.sql] trim_trailing_whitespace = false [*.rst] indent_size = 3 sqlfluff-3.4.2/.gitattributes000066400000000000000000000013241503426445100162410ustar00rootroot00000000000000# We'll let Git's auto-detection algorithm infer if a file is text. If it is, # enforce LF line endings regardless of OS or git configurations. * text=auto eol=lf # Allow Batch files to be CRLF: .bat text eol=crlf # Isolate binary files in case the auto-detection algorithm fails and # marks them as text files (which could brick them). *.{png,jpg,jpeg,gif,webp,woff,woff2} binary # Linguist was excluding our test suite from the repo language statistics # and as a result the repo indicated that no SQL is present in the repo. # Information on overrides can be found here: # https://github.com/github/linguist/blob/master/docs/overrides.md test/** linguist-vendored=false *.sql linguist-language=SQL linguist-detectable sqlfluff-3.4.2/.github/000077500000000000000000000000001503426445100147065ustar00rootroot00000000000000sqlfluff-3.4.2/.github/FUNDING.yml000066400000000000000000000014251503426445100165250ustar00rootroot00000000000000# sqlfluff is free to use to improve sql in whatever context you # wish to use it. # The BEST way to support sqlfluff if you can is to contribute TIME. # sqlfluff is a community project which needs the community to contribute # to if it's going to achieve its goals. # See CONTRIBUTING.md for more details. # If you'd like to contribute something, but your circumstances don't # allow you to commit time, then financial support is always welcome. # Anything you contribute will go toward supporting the project, either # as a donation toward infrastructure and hosting costs, or to enable # maintainers to spend more time on the project. # For more details on how this money is used, see the GitHub sponsor # page for sqlfluff at https://github.com/sponsors/sqlfluff. github: sqlfluff sqlfluff-3.4.2/.github/ISSUE_TEMPLATE/000077500000000000000000000000001503426445100170715ustar00rootroot00000000000000sqlfluff-3.4.2/.github/ISSUE_TEMPLATE/bug-report.yml000066400000000000000000000076541503426445100217160ustar00rootroot00000000000000name: Bug report description: Report a bug to help improve SQLFluff labels: [ "bug" ] body: - type: markdown attributes: value: | Use this bug report template to report issues with SQLFluff functionality, including missing syntax support for any of our currently supported dialects. - type: checkboxes attributes: label: Search before asking description: > Please make sure to search in the [issues](https://github.com/sqlfluff/sqlfluff/issues) first to see whether the same issue was reported already. options: - label: > I searched the [issues](https://github.com/sqlfluff/sqlfluff/issues) and found no similar issues. required: true - type: textarea attributes: label: What Happened description: Describe what happened. placeholder: > Please provide the context in which the problem occurred and explain what happened validations: required: true - type: textarea attributes: label: Expected Behaviour description: What is your expected behaviour placeholder: Please explain what behaviour you expected. validations: required: true - type: textarea attributes: label: Observed Behaviour description: What is your observed behaviour placeholder: > Please explain what you observed, and why you think the behaviour is erroneous. It is extremely helpful if you include SQL output and logging output with exact error messages, stack traces, etc. validations: required: true - type: textarea attributes: label: How to reproduce description: > What should we do to reproduce the problem? placeholder: > Please make sure you provide a reproducible step-by-step case of how to reproduce the problem, including the exact command(s) you ran as well as the `.sql` file. as minimally and precisely as possible. Keep in mind we do not have access to your deployment. If the issue requires more than two files (i.e. `.sql` file and `.sqlfluff` files) to reproduce, please consider providing a GitHub repo instead. Unfortunately non-reproducible issues will have to be closed. validations: required: true - type: textarea attributes: label: Dialect description: What SQLFluff dialect did you use? validations: required: true - type: textarea attributes: label: Version description: Which SQLFluff version did you use? placeholder: > * Include the output of `sqlfluff --version` along with your Python version. * If you are using dbt, then please additionally include the sqlfluff-templater-dbt and dbt package versions. validations: required: true - type: textarea attributes: label: Configuration description: Include your SQLFluff configuration (e.g. `.sqlfluff`, `.sqlfluffignore`) here validations: required: true - type: checkboxes attributes: label: Are you willing to work on and submit a PR to address the issue? description: > This is absolutely not required, but we are happy to guide you in the contribution process, especially if you already have a good understanding of how to implement the fix. SQLFluff is a totally community-driven project and we love to bring new contributors in. options: - label: Yes I am willing to submit a PR! - type: checkboxes attributes: label: Code of Conduct description: | The Code of Conduct helps create a safe space for everyone. We require that everyone agrees to it. options: - label: > I agree to follow this project's [Code of Conduct](https://github.com/sqlfluff/sqlfluff/blob/main/CODE_OF_CONDUCT.md) required: true - type: markdown attributes: value: "Thanks for completing our form!" sqlfluff-3.4.2/.github/ISSUE_TEMPLATE/config.yml000066400000000000000000000000341503426445100210560ustar00rootroot00000000000000blank_issues_enabled: false sqlfluff-3.4.2/.github/ISSUE_TEMPLATE/documentation.yml000066400000000000000000000044131503426445100224670ustar00rootroot00000000000000name: Documentation Issue description: Report an issue or a suggestion with our documentation to help improve SQLFluff labels: [ "documentation" ] body: - type: markdown attributes: value: | Use this documentation template to report issues with SQLFluff documentation, on [our website](https://docs.sqlfluff.com/en/stable/), GitHub, or within our command line. - type: checkboxes attributes: label: Search before asking description: > Please make sure to search in the [issues](https://github.com/sqlfluff/sqlfluff/issues) first to see whether the same issue was reported already. options: - label: > I searched the [issues](https://github.com/sqlfluff/sqlfluff/issues) and found no similar issues. required: true - type: textarea attributes: label: Links or command line description: What do you want to happen? placeholder: > * If you found in our website, the link may start with `https://docs.sqlfluff.com/en/stable/`. * If you found in CLI components, your command may start with `sqlfluff `. - type: textarea attributes: label: Issue/Suggested Improvement description: The issue you found or the improvement you suggest. validations: required: true - type: checkboxes attributes: label: Are you willing to work on and submit a PR to address the issue? description: > This is absolutely not required, but we are happy to guide you in the contribution process, especially if you already have a good understanding of how to implement the fix. SQLFluff is a totally community-driven project and we love to bring new contributors in. options: - label: Yes I am willing to submit a PR! - type: checkboxes attributes: label: Code of Conduct description: | The Code of Conduct helps create a safe space for everyone. We require that everyone agrees to it. options: - label: > I agree to follow this project's [Code of Conduct](https://github.com/sqlfluff/sqlfluff/blob/main/CODE_OF_CONDUCT.md) required: true - type: markdown attributes: value: "Thanks for completing our form!" sqlfluff-3.4.2/.github/ISSUE_TEMPLATE/enhancement.yml000066400000000000000000000047021503426445100221040ustar00rootroot00000000000000name: Enhancement description: Suggest an enhancement to help improve SQLFluff labels: [ "enhancement" ] body: - type: markdown attributes: value: > Use this enhancement template to suggest new features or functionality for SQLFluff. Please note that missing syntax support for any of our currently supported dialects, should instead be filed as a [Bug Report](https://github.com/sqlfluff/sqlfluff/issues/new?assignees=&labels=bug&template=bug-report.yml). - type: checkboxes attributes: label: Search before asking description: > Please make sure to search in the [issues](https://github.com/sqlfluff/sqlfluff/issues) first to see whether the same issue was reported already. options: - label: > I searched the [issues](https://github.com/sqlfluff/sqlfluff/issues) and found no similar issues. required: true - type: textarea attributes: label: Description description: A short description of the feature validations: required: true - type: textarea attributes: label: Use case description: What do you want to happen? placeholder: > Rather than telling us how you might implement this feature, try to take a step back and describe what you are trying to achieve. - type: textarea attributes: label: Dialect description: If the enhancement relates to a particular dialect, which one? validations: required: true - type: checkboxes attributes: label: Are you willing to work on and submit a PR to address the issue? description: > This is absolutely not required, but we are happy to guide you in the contribution process, especially if you already have a good understanding of how to implement the fix. SQLFluff is a totally community-driven project and we love to bring new contributors in. options: - label: Yes I am willing to submit a PR! - type: checkboxes attributes: label: Code of Conduct description: | The Code of Conduct helps create a safe space for everyone. We require that everyone agrees to it. options: - label: > I agree to follow this project's [Code of Conduct](https://github.com/sqlfluff/sqlfluff/blob/main/CODE_OF_CONDUCT.md) required: true - type: markdown attributes: value: "Thanks for completing our form!" sqlfluff-3.4.2/.github/PULL_REQUEST_TEMPLATE.md000066400000000000000000000020261503426445100205070ustar00rootroot00000000000000 ### Brief summary of the change made ### Are there any other side effects of this change that we should be aware of? ### Pull Request checklist - [ ] Please confirm you have completed any of the necessary steps below. - Included test cases to demonstrate any code changes, which may be one or more of the following: - `.yml` rule test cases in `test/fixtures/rules/std_rule_cases`. - `.sql`/`.yml` parser test cases in `test/fixtures/dialects` (note YML files can be auto generated with `tox -e generate-fixture-yml`). - Full autofix test cases in `test/fixtures/linter/autofix`. - Other. - Added appropriate documentation for the change. - Created GitHub issues for any relevant followup/future enhancements if appropriate. sqlfluff-3.4.2/.github/labeler.yml000066400000000000000000000014401503426445100170360ustar00rootroot00000000000000ansi: - "/(ansi)/i" athena: - "/(athena)/i" bigquery: - "/(bigquery)/i" clickhouse: - "/(clickhouse)/i" databricks: - "/(databricks)/i" db2: - "/(db2)/i" doris: - "/(doris)/i" duckdb: - "/(duckdb)/i" exasol: - "/(exasol)/i" flink: - "/(flink)/i" greenplum: - "/(greenplum)/i" hive: - "/(hive)/i" impala: - "/(impala)/i" mariadb: - "/(mariadb)/i" materialize: - "/(materialize)/i" mysql: - "/(mysql)/i" oracle: - "/(oracle)/i" postgres: - "/(postgres)/i" redshift: - "/(redshift)/i" snowflake: - "/(snowflake)/i" soql: - "/(soql)/i" sparksql: - "/(sparksql)/i" sqlite: - "/(sqlite)/i" starrocks: - "/(starrocks)/i" t-sql: - "/(t-sql|tsql)/i" teradata: - "/(teradata)/i" trino: - "/(trino)/i" vertica: - "/(vertica)/i" sqlfluff-3.4.2/.github/release-drafter.yml000066400000000000000000000006531503426445100205020ustar00rootroot00000000000000template: | ## Highlights > Maintainers: Copy and paste the commentary from the changelog here. > Check that the name and tag are correct before releasing. > Publishing a GitHub release will trigger the deploy to pypi and dockerhub. ## What’s Changed $CHANGES exclude-labels: - 'skip-changelog' categories: - title: '🚀 Enhancements' label: 'enhancement' - title: '🐛 Bug Fixes' label: 'bug' sqlfluff-3.4.2/.github/workflows/000077500000000000000000000000001503426445100167435ustar00rootroot00000000000000sqlfluff-3.4.2/.github/workflows/add-issue-labels.yaml000066400000000000000000000006741503426445100227540ustar00rootroot00000000000000name: "Add Issue Labels" on: issues: types: [opened] jobs: triage: runs-on: ubuntu-latest steps: # Update .github/labeler.yml for new dialects - uses: github/issue-labeler@v3.2 with: configuration-path: .github/labeler.yml include-title: 1 include-body: 0 not-before: 2023-07-06T02:54:32Z enable-versioned-regex: 0 repo-token: ${{ github.token }} sqlfluff-3.4.2/.github/workflows/add-to-release-notes.yml000066400000000000000000000006041503426445100234020ustar00rootroot00000000000000# # This updates the current draft release notes when a PR is merged # name: Add to Release Notes on: push: branches: - main jobs: draft-release: runs-on: ubuntu-latest if: github.repository == 'sqlfluff/sqlfluff' steps: - name: Update release notes uses: release-drafter/release-drafter@v6 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} sqlfluff-3.4.2/.github/workflows/ci-pr-comments.yml000066400000000000000000000075401503426445100223310ustar00rootroot00000000000000# This Workflow runs in a more secure context and comments # on pull requests. # https://securitylab.github.com/research/github-actions-preventing-pwn-requests/ name: Comment on the pull request # Run on completion of the CI job. # This workflow has access to write comments on PRs event when # that PR is triggered by a forked repo. on: workflow_run: workflows: - CI types: - completed jobs: comment-on-pr: runs-on: ubuntu-latest if: > github.event.workflow_run.event == 'pull_request' steps: - name: 'Download txt artifact' uses: actions/github-script@v6 with: script: | const artifacts = await github.rest.actions.listWorkflowRunArtifacts({ owner: context.repo.owner, repo: context.repo.repo, run_id: ${{github.event.workflow_run.id }}, }); const matchArtifact = artifacts.data.artifacts.filter((artifact) => { return artifact.name == "txt-report" })[0]; const download = await github.rest.actions.downloadArtifact({ owner: context.repo.owner, repo: context.repo.repo, artifact_id: matchArtifact.id, archive_format: 'zip', }); var fs = require('fs'); fs.writeFileSync('${{github.workspace}}/cov-report.zip', Buffer.from(download.data)); - name: Unzip Downloaded Artifact run: unzip cov-report.zip - name: Update PR comment with coverage report. uses: actions/github-script@v6 with: github-token: ${{ secrets.GITHUB_TOKEN }} script: | // First list the existing comments const trigger_str = 'Coverage Results'; console.log("Getting existing comments..."); const { promises: fs } = require('fs'); const issue_number = await fs.readFile('pr-number.txt', 'utf8'); console.log("Issue number: " + issue_number); const comments = await github.paginate( github.rest.issues.listComments, { owner: 'sqlfluff', repo: 'sqlfluff', issue_number: Number(issue_number) } ); let comment_id = null; console.log("Got %d comments", comments.length); comments.forEach(comment => { if (comment.body.indexOf(trigger_str) >= 0) { console.log("Found target comment ID: %d", comment.id); comment_id = comment.id; } else { console.log("Comment ID %d not valid with body:\n%s.", comment.id, comment.body); } }); const previous_outcome = await fs.readFile('outcome.txt', 'utf8'); console.log("Previous coverage step outcome: %s", previous_outcome); if (previous_outcome == "success\n") { status_emoji = "✅"; } else { status_emoji = "⚠️"; } const content = await fs.readFile('coverage-report.txt', 'utf8'); body = "# " + trigger_str + " " + status_emoji + "\n```\n" + content + "\n```\n"; if (comment_id > 0) { console.log("Updating comment id: %d", comment_id); await github.rest.issues.updateComment({ owner: context.repo.owner, repo: context.repo.repo, comment_id: comment_id, body: body }); } else { console.log("No existing comment matched, creating a new one..."); await github.rest.issues.createComment({ issue_number: Number(issue_number), owner: context.repo.owner, repo: context.repo.repo, body: body }); } sqlfluff-3.4.2/.github/workflows/ci-test-dbt.yml000066400000000000000000000044051503426445100216100ustar00rootroot00000000000000############################# ## GitHub Actions CI Tests ## ############################# # # This is a reusable workflow to make CI tests more modular. # See: https://docs.github.com/en/actions/using-workflows/reusing-workflows # # Called by ci-tests.yml # This one does the dbt tests # name: Modular SQLFluff dbt test workflow on: workflow_call: inputs: python-version: required: true type: string dbt-version: required: true type: string coverage: required: false type: boolean default: false secrets: gh_token: required: true jobs: modular-python-test: name: py${{ inputs.python-version }}-${{ inputs.dbt-version }} runs-on: ubuntu-latest env: FORCE_COLOR: 1 services: # Label used to access the service container postgres: # Docker Hub image image: postgres # Provide the password for postgres env: POSTGRES_PASSWORD: password # Set health checks to wait until postgres has started options: >- --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 5 ports: # Maps tcp port 5432 on service container to the host - 5432:5432 steps: - uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v5 with: python-version: ${{ inputs.python-version }} cache: 'pip' cache-dependency-path: | setup.cfg requirements_dev.txt - name: Install dependencies run: pip install tox - name: Run the tests (with coverage) if: ${{ inputs.coverage }} run: tox -e ${{ inputs.dbt-version }} -- --cov=sqlfluff_templater_dbt plugins/sqlfluff-templater-dbt - name: Run the tests (without coverage) if: ${{ !inputs.coverage }} run: tox -e ${{ inputs.dbt-version }} -- plugins/sqlfluff-templater-dbt - name: Upload coverage data (github) uses: actions/upload-artifact@v4 if: ${{ inputs.coverage }} with: name: coverage-data-py${{ inputs.python-version }}-${{ inputs.dbt-version }} path: ".coverage.*" if-no-files-found: ignore include-hidden-files: true sqlfluff-3.4.2/.github/workflows/ci-test-python.yml000066400000000000000000000060211503426445100223540ustar00rootroot00000000000000############################# ## GitHub Actions CI Tests ## ############################# # # This is a reusable workflow to make CI tests more modular. # See: https://docs.github.com/en/actions/using-workflows/reusing-workflows # # Called by ci-tests.yml # This one does the python tests # name: Modular SQLFluff python test workflow on: workflow_call: inputs: python-version: required: true type: string marks: required: false type: string default: "not integration" coverage: required: false type: boolean default: false secrets: gh_token: required: true jobs: modular-python-test: runs-on: ubuntu-latest name: py${{ inputs.python-version }} steps: - uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v5 with: python-version: ${{ inputs.python-version }} cache: 'pip' cache-dependency-path: | setup.cfg requirements_dev.txt - name: Install dependencies run: pip install tox - name: Parse Python Version id: py_version run: | PYVERSION=$(echo "${{ inputs.python-version }}" | sed -e 's/\.//g') echo "PYVERSION=$PYVERSION" >> $GITHUB_OUTPUT # Run test process (with or without coverage). # Arguments after the "--" are passed through to pytest: # --cov=... The library to include in coverage reporting. # -n 2 Runs with two parallel processes. # test The path to detect tests within. # -m ... The pytest marks to filter tests. # --durations=16 Displays the 16 slowest runs to help with performance debugging. - name: Run the tests (with coverage) # NOTE: We have a separate job for coverage reporting because # it impacts performance and slows the test suite significantly. if: ${{ inputs.coverage }} run: tox -e py${{ steps.py_version.outputs.PYVERSION }} -- --cov=sqlfluff -n 2 test -m "${{ inputs.marks }}" --durations=16 --verbosity=0 - name: Run the tests (without coverage) if: ${{ !inputs.coverage }} run: tox -e py${{ steps.py_version.outputs.PYVERSION }} -- -n 2 test -m "${{ inputs.marks }}" --durations=16 --verbosity=0 - name: Rename coverage files with suffix # NOTE: We do this because we're using the same tox environment for multiple # test jobs and we need to make sure that their coverage files don't collide.s id: cov_suffix if: ${{ inputs.coverage }} run: | COVSUFFIX=$(echo "${{ inputs.marks }}" | sed -e 's/ /-/g') echo "COVSUFFIX=$COVSUFFIX" >> $GITHUB_OUTPUT for file in .coverage.*; do mv "$file" "$file.$COVSUFFIX"; done; - name: Upload coverage data (github) uses: actions/upload-artifact@v4 if: ${{ inputs.coverage }} with: name: coverage-data-py${{ inputs.python-version }}-${{ inputs.marks }} path: ".coverage.*" if-no-files-found: ignore include-hidden-files: true sqlfluff-3.4.2/.github/workflows/ci-tests.yml000066400000000000000000000264561503426445100212360ustar00rootroot00000000000000############################# ## GitHub Actions CI Tests ## ############################# # # This can be kicked off manually in the Actions tab of GitHub # It will also run nightly at 2pm # It will run on any pull request, except non-code changes # (images, markdown files, ) # name: CI on: workflow_dispatch: schedule: # 2am each night - cron: '00 2 * * *' # Don't use pull_request_target here. See: # https://securitylab.github.com/research/github-actions-preventing-pwn-requests/ pull_request: push: branches: - main merge_group: # Merge Queue checks requested. This feature is still in beta # from Github and so may need updating later. # https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#merge_group types: [checks_requested] jobs: linting: runs-on: ubuntu-latest strategy: matrix: job: [ "linting", "doclinting", "docbuild", "yamllint", "mypy", "mypyc", "doctests", ] include: # Default to most recent python version - python-version: "3.13" # As at 2024-10-10, docbuild fails on 3.13, so fall back to 3.12 - job: docbuild python-version: "3.12" name: ${{ matrix.job }} tests steps: - uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: Install dependencies run: pip install tox - name: Run the tests run: tox -e ${{ matrix.job }} # Test with coverage tracking on most recent python (py313). python-version-tests: name: Python Tests strategy: matrix: python-version: [ "3.9", "3.10", "3.11", "3.12", "3.13" ] include: # Default to test without coverage tracking on older python versions. # This saves time, as testing without coverage tracking is faster. - coverage: false # Override coverage to be true for most recent python version. - python-version: "3.13" coverage: true permissions: contents: read pull-requests: write uses: ./.github/workflows/ci-test-python.yml with: python-version: ${{ matrix.python-version }} coverage: ${{ matrix.coverage }} secrets: gh_token: ${{ secrets.github_token }} dbt-tests: name: dbt Plugin Tests strategy: fail-fast: false matrix: dbt-version: - dbt140 - dbt150 - dbt160 - dbt170 - dbt180 - dbt190 include: # Default to python 3.12 for dbt tests. # * Python 3.13 not supported yet. # * Looks like it's due to psycopg2 support as of 2024-10-10 - python-version: "3.12" # For dbt 1.4 - 1.6 override to python 3.11 - dbt-version: dbt140 python-version: "3.11" - dbt-version: dbt150 python-version: "3.11" - dbt-version: dbt160 python-version: "3.11" permissions: contents: read pull-requests: write uses: ./.github/workflows/ci-test-dbt.yml with: python-version: ${{ matrix.python-version }} dbt-version: ${{ matrix.dbt-version }} coverage: true secrets: gh_token: ${{ secrets.github_token }} dialect-tests: name: Dialect ${{ matrix.marks }} strategy: matrix: include: # This runs the bulk of the dialect _parsing_ tests. # # It's run as a separate job as takes longer than the CI jobs and allows # them to be rerun separately if GitHub Actions or Coverage is experiencing # issues. - marks: "parse_suite" # We test coverage here for some parsing routines. coverage: true # This lints all our dialect fixtures to check rules can handle a variety # of SQL and don't error out badly. # # It's run as a separate job as takes longer than the CI jobs and allows # them to be rerun separately if GitHub Actions or Coverage is experiencing # issues. - marks: "fix_suite" coverage: false # This lints all our rules fixtures to check rules. # # It's run as a separate job as takes longer than the CI jobs and allows # them to be rerun separately if GitHub Actions or Coverage is experiencing # issues. - marks: "rules_suite" coverage: true permissions: contents: read pull-requests: write uses: ./.github/workflows/ci-test-python.yml with: python-version: "3.13" marks: ${{ matrix.marks }} coverage: ${{ matrix.coverage }} secrets: gh_token: ${{ secrets.github_token }} ymlchecks: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v5 with: python-version: '3.13' - name: Install dependencies run: | pip install -r requirements_dev.txt pip install -e . - name: Generate the YAML files run: | python test/generate_parse_fixture_yml.py - name: Test the generated YAML files run: | if [ -n "$(git status --porcelain)" ]; then git diff echo "Generated YAML files do not match branch." echo "Please run the following command to generate these:" echo " python test/generate_parse_fixture_yml.py" exit 1 fi examples: runs-on: ubuntu-latest name: example tests steps: - uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v5 with: python-version: '3.13' - name: Install dependencies run: | pip install -e . pip install tqdm - name: Test the example files run: | for file in examples/* do echo "Running $file" python "$file" done python-windows-tests: runs-on: windows-latest name: Python 3.13 Windows tests steps: - name: Set git to use LF run: | git config --global core.autocrlf false git config --global core.eol lf - uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v5 with: python-version: "3.13" - name: List Env shell: bash run: | env | sort - name: Install dependencies shell: bash run: pip install tox - name: Run the tests shell: bash # Set python temp dir in working dir as on GitHub Actions Windows # machine often has system temp dir (which tox uses) on C drive and # working dir on D drive which causes problems. run: | mkdir temp_pytest python -m tox -e winpy -- --cov=sqlfluff -n 2 test -m "not integration" - name: Upload coverage data (github) uses: actions/upload-artifact@v4 with: name: coverage-data-winpy3.13 path: ".coverage.*" if-no-files-found: ignore include-hidden-files: true python-windows-dbt-tests: runs-on: windows-latest name: dbt Plugin Python 3.12 Windows tests steps: - name: Start PostgreSQL on Windows run: | $pgService = Get-Service -Name postgresql* Set-Service -InputObject $pgService -Status running -StartupType automatic Start-Process -FilePath "$env:PGBIN\pg_isready" -Wait -PassThru - name: Set postgres user password run: | & $env:PGBIN\psql --command="ALTER USER postgres PASSWORD 'password';" --command="\du" - name: Set git to use LF run: | git config --global core.autocrlf false git config --global core.eol lf - uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v5 with: # NOTE: As of 2024-10-10, dbt does not yet support python 3.13. python-version: "3.12" - name: Install dependencies shell: bash run: pip install tox - name: Run the tests shell: bash # Do not set explicitly temp dir for dbt as causes problems # None of these test need temp dir set run: | python -m tox -e dbt180-winpy -- plugins/sqlfluff-templater-dbt pip-test-pull-request: # Test that using pip install works as we've missed # some dependencies in the past - see #1842 runs-on: ubuntu-latest if: github.event_name == 'pull_request' name: pip install tests steps: - name: Set up Python uses: actions/setup-python@v5 with: python-version: "3.13" - uses: actions/checkout@v4 - name: Install dependencies run: | pip install . - name: Run the version test run: | sqlfluff --version - name: Run a simple select parse test via stdin run: | echo "select 1" | sqlfluff parse --dialect=ansi - - name: Run a simple select lint test via stdin run: | echo "select 1" | sqlfluff lint --dialect=ansi - - name: Run a simple select parse test via file run: | sqlfluff parse --dialect=ansi <(echo "select 1") - name: Run a simple select lint test via file run: | sqlfluff lint --dialect=ansi <(echo "select 1") coverage_check: name: Combine & check 100% coverage. runs-on: ubuntu-latest needs: [python-version-tests, dbt-tests, python-windows-tests, dialect-tests] steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: python-version: "3.13" - run: python -m pip install --upgrade coverage[toml] - name: Download coverage data. uses: actions/download-artifact@v4 with: pattern: coverage-data-* merge-multiple: true - name: Combine coverage & fail if it's <100%. id: report_coverage # NOTE: Setting the pipefail option here means that even when # piping the output to `tee`, we still get the exit code of the # `coverage report` command. run: | set -o pipefail python -m coverage combine python -m coverage html --skip-covered --skip-empty python -m coverage report --fail-under=100 --skip-covered --skip-empty -m | tee coverage-report.txt - name: Upload HTML report if check failed. uses: actions/upload-artifact@v4 with: name: html-report path: htmlcov if: failure() && github.event_name == 'pull_request' - name: Stash PR Number. if: always() && github.event_name == 'pull_request' # NOTE: We do this so we know what PR to comment on when we pick up the report. run: | echo ${{ github.event.number }} > ./pr-number.txt echo ${{ steps.report_coverage.outcome }} > ./outcome.txt - name: Upload TXT report always (to add as comment to PR). # NOTE: We don't actually comment on the PR from here, we'll do that in # a more secure way by triggering a more secure workflow. # https://securitylab.github.com/research/github-actions-preventing-pwn-requests/ uses: actions/upload-artifact@v4 with: name: txt-report path: | coverage-report.txt pr-number.txt outcome.txt if: always() && github.event_name == 'pull_request' sqlfluff-3.4.2/.github/workflows/create-release-pull-request.yaml000066400000000000000000000056531503426445100251610ustar00rootroot00000000000000name: Create release pull request on: workflow_dispatch: inputs: newVersionNumber: description: 'New version number' required: true jobs: run: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Abort if branch already exists run: | _check_branch=$(git ls-remote --heads origin prep-${{ github.event.inputs.newVersionNumber }}) if [[ -z ${_check_branch} ]]; then echo "Release branch doesn't exist yet, continuing" else echo "Release branch already exists, aborting. Run the Python release script locally." exit 1 fi - name: Set up Python uses: actions/setup-python@v5 with: python-version: '3.11' - name: Install dependencies run: | pip install requests click pyyaml ghapi - name: Prepare release run: | python util.py release ${{ github.event.inputs.newVersionNumber }} env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_REPOSITORY_OWNER: ${{ secrets.GITHUB_REPOSITORY_OWNER }} - name: Create pull request uses: peter-evans/create-pull-request@v7 with: delete-branch: true branch: prep-${{ github.event.inputs.newVersionNumber }} commit-message: "Bump to version ${{ github.event.inputs.newVersionNumber }}" title: Prep version ${{ github.event.inputs.newVersionNumber }} body: | Prepare version ${{ github.event.inputs.newVersionNumber }} Please add all merged changes from [Release Draft][1] to the CHANGELOG.md file, along with a short commentary for users. * Summarise the main changes made. * Call out any specific changes that users should know about. * Celebrate new contributors to the project. The summary in this PR should also be added to the drafted GitHub release after merging this PR. Merging this PR does not trigger the release to pypi, that happens when the GitHub release is published. - Auto-generated by [create-pull-request][2] GitHub Action [1]: https://github.com/sqlfluff/sqlfluff/releases [2]: https://github.com/peter-evans/create-pull-request labels: | release skip-changelog - name: Update release title and tag uses: release-drafter/release-drafter@v6 with: # NOTE: We should eventually actually populate the date here, but that # will most likely change before the new pull request actually gets # merged, so we just add "YYYY-MM-DD" for now as a placeholder. name: "[${{ github.event.inputs.newVersionNumber }}] - YYYY-MM-DD" tag: ${{ github.event.inputs.newVersionNumber }} env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} sqlfluff-3.4.2/.github/workflows/pre-commit.yml000066400000000000000000000031061503426445100215420ustar00rootroot00000000000000--- name: pre-commit on: pull_request: push: jobs: pre-commit: runs-on: ubuntu-latest env: RAW_LOG: pre-commit.log CS_XML: pre-commit.xml SKIP: no-commit-to-branch steps: - run: sudo apt-get update && sudo apt-get install cppcheck if: false - uses: actions/checkout@v4 - uses: actions/setup-python@v5 if: false with: cache: pip python-version: 3.12.1 - run: python -m pip install pre-commit - uses: actions/cache/restore@v4 with: path: ~/.cache/pre-commit/ key: pre-commit-4|${{ env.pythonLocation }}|${{ hashFiles('.pre-commit-config.yaml') }} - name: Run pre-commit hooks run: | set -o pipefail pre-commit gc pre-commit run --show-diff-on-failure --color=always --all-files | tee ${RAW_LOG} - name: Convert Raw Log to Checkstyle format (launch action) uses: mdeweerd/logToCheckStyle@v2024.3.5 if: ${{ failure() }} with: in: ${{ env.RAW_LOG }} out: ${{ env.CS_XML }} - uses: actions/cache/save@v4 if: ${{ ! cancelled() }} with: path: ~/.cache/pre-commit/ key: pre-commit-4|${{ env.pythonLocation }}|${{ hashFiles('.pre-commit-config.yaml') }} - name: Provide log as artifact uses: actions/upload-artifact@v4 if: ${{ ! cancelled() }} with: name: precommit-logs path: | ${{ env.RAW_LOG }} ${{ env.CS_XML }} retention-days: 2 sqlfluff-3.4.2/.github/workflows/publish-dbt-templater-release-to-pypi.yaml000066400000000000000000000016731503426445100270630ustar00rootroot00000000000000name: Publish dbt templater PyPI Version on: release: types: - published workflow_dispatch: jobs: run: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install Dependencies run: | pip install --upgrade pip tox - name: Build Distribution (dbt plugin) # tox commands run relative to the repo root. run: tox -e build-dist -- plugins/sqlfluff-templater-dbt - name: Copy builds to main dist folder # We move them here so that the github action can still access them run: cp -r plugins/sqlfluff-templater-dbt/dist/. dist/ - name: Publish Python distribution to PyPI uses: pypa/gh-action-pypi-publish@release/v1 with: user: __token__ password: ${{ secrets.PYPI_DBT_TEMPLATER_TOKEN }} skip-existing: true sqlfluff-3.4.2/.github/workflows/publish-sqlfluff-docker-image-to-dockerhub.yaml000066400000000000000000000062401503426445100300300ustar00rootroot00000000000000# Create and push Docker image of latest release to DockerHub. name: Publish SQLFluff DockerHub Version on: release: types: - published workflow_dispatch: # Create tag for integration test. env: TEST_TAG: ${{ secrets.DOCKERHUB_USERNAME }}/sqlfluff:test jobs: docker: runs-on: ubuntu-latest permissions: contents: read packages: write attestations: write id-token: write steps: # Get the version of latest release in # order to tag published Docker image. - name: Get latest release name id: latest_release uses: pozetroninc/github-action-get-latest-release@master with: repository: ${{ github.repository }} # Setup QEMU and Buildx to allow for multi-platform builds. - name: Set up QEMU id: docker_qemu uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx id: docker_buildx uses: docker/setup-buildx-action@v3 # Authenticate with DockerHub. - name: Login to DockerHub id: docker_login uses: docker/login-action@v3 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} # Authenticate with Container registry - name: Login to Container registry uses: docker/login-action@v3 with: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} # Build amd64 image to use in the integration test. - name: Build and export to Docker id: docker_build uses: docker/build-push-action@v5 with: load: true tags: ${{ env.TEST_TAG }} cache-from: type=registry,ref=${{ secrets.DOCKERHUB_USERNAME }}/sqlfluff:latest cache-to: type=inline # Integration test to validate newly created image is working. - name: Test Docker image id: docker_test run: | echo "SELECT 1" > test.sql docker run --rm -i -v $PWD:/sql ${{ env.TEST_TAG }} lint --dialect ansi /sql/test.sql # Build arm64 image (amd64 is cached from docker_build step) and export to DockerHub and GHCR. # N.B. We tag this image as both latest and with its version number. - name: Build and push id: docker_build_push uses: docker/build-push-action@v5 with: push: true platforms: linux/amd64,linux/arm64 tags: | ${{ secrets.DOCKERHUB_USERNAME }}/sqlfluff:latest ${{ secrets.DOCKERHUB_USERNAME }}/sqlfluff:${{ steps.latest_release.outputs.release }} ghcr.io/${{ github.repository }}:latest ghcr.io/${{ github.repository }}:${{ steps.latest_release.outputs.release }} cache-from: type=registry,ref=${{ secrets.DOCKERHUB_USERNAME }}/sqlfluff:latest cache-to: type=inline # Add artifact attestation for GHCR - name: Generate artifact attestation uses: actions/attest-build-provenance@v2 with: subject-name: ghcr.io/${{ github.repository }} subject-digest: ${{ steps.docker_build_push.outputs.digest }} push-to-registry: true sqlfluff-3.4.2/.github/workflows/publish-sqlfluff-release-to-pypi.yaml000066400000000000000000000012171503426445100261330ustar00rootroot00000000000000name: Publish SQFluff PyPI Version on: release: types: - published workflow_dispatch: jobs: run: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install Dependencies run: | pip install --upgrade pip tox - name: Build Distribution (Core) run: tox -e build-dist - name: Publish Python distribution to PyPI uses: pypa/gh-action-pypi-publish@release/v1 with: user: __token__ password: ${{ secrets.PYPI_TOKEN }} skip-existing: true sqlfluff-3.4.2/.gitignore000066400000000000000000000015351503426445100153420ustar00rootroot00000000000000# Ignore IDE files .vscode .idea /.sqlfluff **/.DS_Store # Ignore Python cache and prebuilt things .cache __pycache__ *.egg-info *.pyc build _build dist .pytest_cache /sqlfluff-* # Ignore the Environment env .tox venv .venv .python-version uv.lock # Ignore coverage reports .coverage .coverage.* coverage.xml htmlcov # Ignore test reports .test-reports test-reports # Ignore root testing sql & python files /test*.sql /test*.py /test*.txt /.hypothesis/ # Ignore dbt outputs from testing /target # Ignore any timing outputs /*.csv # Ignore conda environment.yml contributors might be using and direnv config environment.yml .envrc **/*FIXED.sql *.prof # Ignore temp packages.yml generated during testing. plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/packages.yml # VSCode .vscode *.code-workspace # Emacs *~ # Mypyc outputs *.pyd *.so sqlfluff-3.4.2/.pre-commit-config.yaml000066400000000000000000000074711503426445100176400ustar00rootroot00000000000000repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v5.0.0 hooks: - id: no-commit-to-branch args: [--branch, main] # If adding any exceptions here, make sure to add them to .editorconfig as well - id: end-of-file-fixer exclude: | (?x)^ ( test/fixtures/templater/jinja_l_metas/0(0[134578]|11).sql| test/fixtures/linter/sqlfluffignore/[^/]*/[^/]*.sql| test/fixtures/config/inheritance_b/(nested/)?example.sql| (.*)/trailing_newlines.sql| plugins/sqlfluff-templater-dbt/test/fixtures/dbt.*/dbt_project/models/my_new_project/multiple_trailing_newline.sql| plugins/sqlfluff-templater-dbt/test/fixtures/dbt.*/templated_output/macro_in_macro.sql| plugins/sqlfluff-templater-dbt/test/fixtures/dbt.*/templated_output/(dbt_utils_0.8.0/)?last_day.sql| test/fixtures/linter/indentation_errors.sql| test/fixtures/templater/jinja_d_roundtrip/test.sql )$ - id: trailing-whitespace exclude: | (?x)^( test/fixtures/linter/indentation_errors.sql| test/fixtures/templater/jinja_d_roundtrip/test.sql| test/fixtures/config/inheritance_b/example.sql| test/fixtures/config/inheritance_b/nested/example.sql| plugins/sqlfluff-templater-dbt/test/fixtures/dbt.*/templated_output/macro_in_macro.sql| plugins/sqlfluff-templater-dbt/test/fixtures/dbt.*/templated_output/last_day.sql| plugins/sqlfluff-templater-dbt/test/fixtures/dbt.*/templated_output/dbt_utils_0.8.0/last_day.sql| test/fixtures/linter/sqlfluffignore/ )$ - repo: https://github.com/psf/black rev: 25.1.0 hooks: - id: black - repo: https://github.com/pre-commit/mirrors-mypy rev: v1.14.1 hooks: - id: mypy additional_dependencies: # NOTE: These dependencies should be the same as the `types-*` dependencies in # `requirements_dev.txt`. If you update these, make sure to update those too. [ types-toml, types-chardet, types-colorama, types-pyyaml, types-regex, types-tqdm, # Type stubs are obvious to import, but some dependencies also define their own # types directly (e.g. jinja). pre-commit doesn't actually install the python # package, and so doesn't automatically install the dependencies from # `pyproject.toml` either. We include them here to make sure mypy can function # properly. jinja2, pathspec, pytest, # and by extension... pluggy click, platformdirs ] files: ^src/sqlfluff/.* # The mypy pre-commit hook by default sets a few arguments that we don't normally # use. To undo that we reset the `args` to be empty here. This is important to # ensure we don't get conflicting results from the pre-commit hook and from the # CI job. args: [] - repo: https://github.com/pycqa/flake8 rev: 7.1.1 hooks: - id: flake8 additional_dependencies: [flake8-black>=0.3.6] - repo: https://github.com/pycqa/doc8 rev: v1.1.2 hooks: - id: doc8 args: [--file-encoding, utf8] files: docs/source/.*\.rst$ - repo: https://github.com/adrienverge/yamllint.git rev: v1.35.1 hooks: - id: yamllint args: [-c=.yamllint] - repo: https://github.com/charliermarsh/ruff-pre-commit # Ruff version. rev: "v0.9.3" hooks: - id: ruff - repo: https://github.com/codespell-project/codespell rev: v2.4.1 hooks: - id: codespell exclude: (?x)^(test/fixtures/.*|pyproject.toml)$ additional_dependencies: [tomli] sqlfluff-3.4.2/.pre-commit-hooks.yaml000066400000000000000000000017521503426445100175120ustar00rootroot00000000000000- id: sqlfluff-lint name: sqlfluff-lint # Set `--processes 0` to use maximum parallelism # - `--disable-progress-bar` pre-commit suppresses logging already # this can cause an unneeded slow down. entry: sqlfluff lint --processes 0 --disable-progress-bar language: python description: "Lints sql files with `SQLFluff`" types: [sql] require_serial: true additional_dependencies: [] - id: sqlfluff-fix name: sqlfluff-fix # Set a couple of default flags: # - `--show-lint-violations` shows issues to not require running `sqlfluff lint` # - `--processes 0` to use maximum parallelism # - `--disable-progress-bar` pre-commit suppresses logging already # this can cause an unneeded slow down. # By default, this hook applies all rules. entry: sqlfluff fix --show-lint-violations --processes 0 --disable-progress-bar language: python description: "Fixes sql lint errors with `SQLFluff`" types: [sql] require_serial: true additional_dependencies: [] sqlfluff-3.4.2/.readthedocs.yml000066400000000000000000000013441503426445100164360ustar00rootroot00000000000000# .readthedocs.yml # Read the Docs configuration file # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details # Required version: 2 # Build documentation in the docs/source directory with Sphinx sphinx: configuration: docs/source/conf.py # Don't build any additional formats formats: [] # Optionally set the version of Python and requirements required to build your docs. # In our case we need both the docs requirements and the package itself. python: install: - requirements: docs/requirements.txt - method: pip path: . build: os: ubuntu-22.04 tools: python: "3.11" jobs: # Before building, generate the rule & dialect docs pre_build: - python docs/generate-auto-docs.py sqlfluff-3.4.2/.yamllint000066400000000000000000000004461503426445100152040ustar00rootroot00000000000000--- extends: default ignore: | .tox/ .venv/ dbt_modules/ dbt_packages/ rules: brackets: disable document-start: disable indentation: indent-sequences: whatever line-length: disable truthy: check-keys: false # .github workflow uses "on:" (but not as a truthy value) sqlfluff-3.4.2/CHANGELOG.md000066400000000000000000022663351503426445100152000ustar00rootroot00000000000000# Changelog All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## [3.4.2] - 2025-07-11 ## Highlights * This release adds support for two new dialects: _Apache Doris_ and _FlinkSQL_. * It also has several CLI improvements, Dialect Improvements, and bugfixes This release has *25* contributions in it! Also, welcome to the **five** new contributors! Thanks for your contributions! ## What’s Changed * Postgres: Fix VARIADIC function call parsing [#7002](https://github.com/sqlfluff/sqlfluff/pull/7002) [@franloza](https://github.com/franloza) * Postgres: Add IS JSON syntax support [#7001](https://github.com/sqlfluff/sqlfluff/pull/7001) [@franloza](https://github.com/franloza) * feat(databricks): add support for single-line magics [#6999](https://github.com/sqlfluff/sqlfluff/pull/6999) [@phant0mw0lf](https://github.com/phant0mw0lf) * Trino: support ALTER TABLE [#6996](https://github.com/sqlfluff/sqlfluff/pull/6996) [@ykicisk](https://github.com/ykicisk) * Snowflake: Support UNION (ALL) BY NAME [#6994](https://github.com/sqlfluff/sqlfluff/pull/6994) [@WittierDinosaur](https://github.com/WittierDinosaur) * [#6522] FlinkSQL Dialect Implementation [#6985](https://github.com/sqlfluff/sqlfluff/pull/6985) [@kalanyuz](https://github.com/kalanyuz) * Tsql align alias when using equal operator [#6786](https://github.com/sqlfluff/sqlfluff/pull/6786) [@IngerMathilde](https://github.com/IngerMathilde) * TSQL: Rework the execute statement block to latest specs [#6953](https://github.com/sqlfluff/sqlfluff/pull/6953) [@peterbud](https://github.com/peterbud) * Add Apache Doris SQL dialect support to SQLFluff [#6979](https://github.com/sqlfluff/sqlfluff/pull/6979) [@morningman](https://github.com/morningman) * ST05: Support dialects that use WITH, INSERT, SELECT order [#6982](https://github.com/sqlfluff/sqlfluff/pull/6982) [@keraion](https://github.com/keraion) * CV12 and AL05: Prevent AL05 from deleting rewritten CV12 references [#6981](https://github.com/sqlfluff/sqlfluff/pull/6981) [@keraion](https://github.com/keraion) * DuckDB: add support for additional star expressions [#6977](https://github.com/sqlfluff/sqlfluff/pull/6977) [@keraion](https://github.com/keraion) * CLI: Add support for the `NO_COLOR` environment variable [#6976](https://github.com/sqlfluff/sqlfluff/pull/6976) [@keraion](https://github.com/keraion) * SQLite: Make `ACTION` a non-reserved keyword [#6970](https://github.com/sqlfluff/sqlfluff/pull/6970) [@keraion](https://github.com/keraion) * Snowflake: fix collate grammar with aliases [#6967](https://github.com/sqlfluff/sqlfluff/pull/6967) [@keraion](https://github.com/keraion) * Adds support for `json_object` syntax in Oracle. [#6973](https://github.com/sqlfluff/sqlfluff/pull/6973) [@joaostorrer](https://github.com/joaostorrer) * Upgrade OverlapsClauseSegment [#6964](https://github.com/sqlfluff/sqlfluff/pull/6964) [@kkozhakin](https://github.com/kkozhakin) * ANSI: Support CTEs in Merge [#6972](https://github.com/sqlfluff/sqlfluff/pull/6972) [@NormallyGaussian](https://github.com/NormallyGaussian) * Fix ST09 rule not detecting violations in Jinja templated SQL [#6958](https://github.com/sqlfluff/sqlfluff/pull/6958) [@kang8](https://github.com/kang8) * MariaDB: Allow CTEs in `INSERT` statements [#6955](https://github.com/sqlfluff/sqlfluff/pull/6955) [@keraion](https://github.com/keraion) * AliasInfo: handle quotes and special aliases in rules [#6954](https://github.com/sqlfluff/sqlfluff/pull/6954) [@keraion](https://github.com/keraion) * API: prioritize configuration file's dialect [#6951](https://github.com/sqlfluff/sqlfluff/pull/6951) [@keraion](https://github.com/keraion) * Tsql add alter constraint if exists [#6957](https://github.com/sqlfluff/sqlfluff/pull/6957) [@IngerMathilde](https://github.com/IngerMathilde) * TSQL: add support for extended Unicode characters in identifiers [#6952](https://github.com/sqlfluff/sqlfluff/pull/6952) [@peterbud](https://github.com/peterbud) * TSQL: Add support for CREATE TABLE GRAPH statements [#6948](https://github.com/sqlfluff/sqlfluff/pull/6948) [@peterbud](https://github.com/peterbud) ## New Contributors * [@peterbud](https://github.com/peterbud) made their first contribution in [#6948](https://github.com/sqlfluff/sqlfluff/pull/6948) * [@IngerMathilde](https://github.com/IngerMathilde) made their first contribution in [#6957](https://github.com/sqlfluff/sqlfluff/pull/6957) * [@morningman](https://github.com/morningman) made their first contribution in [#6979](https://github.com/sqlfluff/sqlfluff/pull/6979) * [@kalanyuz](https://github.com/kalanyuz) made their first contribution in [#6985](https://github.com/sqlfluff/sqlfluff/pull/6985) * [@phant0mw0lf](https://github.com/phant0mw0lf) made their first contribution in [#6999](https://github.com/sqlfluff/sqlfluff/pull/6999) * [@franloza](https://github.com/franloza) made their first contribution in [#7001](https://github.com/sqlfluff/sqlfluff/pull/7001) ## [3.4.1] - 2025-06-13 ## Highlights * This is primarily a bugfix, rule enhancement, and dialect enhancement release. * We also have done some light refactoring of the internal codebase. This release has *80* contributions in it! Also, welcome to the **fourteen** new contributors! Thanks for your contributions! ## What’s Changed * Adds support for `RETURNING INTO` clause in `INSERT`, `UPDATE`, `DELETE` and `MERGE` statements in Oracle. [#6941](https://github.com/sqlfluff/sqlfluff/pull/6941) [@joaostorrer](https://github.com/joaostorrer) * converted large_file_skip_byte_limit value to integer before comparison, fixes Issue #6847 [#6848](https://github.com/sqlfluff/sqlfluff/pull/6848) [@M-van-alten-BW](https://github.com/M-van-alten-BW) * Feat: Support explode in sparksql lateral clauses [#6875](https://github.com/sqlfluff/sqlfluff/pull/6875) [@ShaneMazur](https://github.com/ShaneMazur) * Trino: Support for INSERT, COMMIT, ROLLBACK and SET SESSION statements [#6928](https://github.com/sqlfluff/sqlfluff/pull/6928) [@ykicisk](https://github.com/ykicisk) * feat(redshift): add support for ALTER TABLE SET LOCATION syntax [#6936](https://github.com/sqlfluff/sqlfluff/pull/6936) [@studansp](https://github.com/studansp) * TSQL: fix `USER` (bare function) unparsable [#6935](https://github.com/sqlfluff/sqlfluff/pull/6935) [@keen85](https://github.com/keen85) * TSQL: add support for `SECURITY POLICY` (`CREATE, ALTER, DROP`) [#6937](https://github.com/sqlfluff/sqlfluff/pull/6937) [@keen85](https://github.com/keen85) * CI: update coverage paths for C:\ directory [#6938](https://github.com/sqlfluff/sqlfluff/pull/6938) [@keraion](https://github.com/keraion) * Adds support for column list after table name in COLLECT STATISTICS [#6896](https://github.com/sqlfluff/sqlfluff/pull/6896) [@thomascjohnson](https://github.com/thomascjohnson) * Adds support for trigger predicates in `IF` statements and `:NEW.` variables in assignment statements in Oracle. [#6930](https://github.com/sqlfluff/sqlfluff/pull/6930) [@joaostorrer](https://github.com/joaostorrer) * Adds support for `<=>` operator in MySQL. [#6929](https://github.com/sqlfluff/sqlfluff/pull/6929) [@joaostorrer](https://github.com/joaostorrer) * CLI: Read inline configurations when using `--stdin-filename` [#6900](https://github.com/sqlfluff/sqlfluff/pull/6900) [@keraion](https://github.com/keraion) * Refactor Bracketed [#6827](https://github.com/sqlfluff/sqlfluff/pull/6827) [@WittierDinosaur](https://github.com/WittierDinosaur) * cli: Enable `--disregard-sqlfluffignores` for `fix` and `format` [#6885](https://github.com/sqlfluff/sqlfluff/pull/6885) [@keraion](https://github.com/keraion) * Improves support for `SET` statements in MySQL. [#6922](https://github.com/sqlfluff/sqlfluff/pull/6922) [@joaostorrer](https://github.com/joaostorrer) * RF01: Fix quoted object references with dots [#6923](https://github.com/sqlfluff/sqlfluff/pull/6923) [@keraion](https://github.com/keraion) * LT14: Add exclusion configuration option [#6909](https://github.com/sqlfluff/sqlfluff/pull/6909) [@keraion](https://github.com/keraion) * Duckdb: Support double equals comparison [#6924](https://github.com/sqlfluff/sqlfluff/pull/6924) [@keraion](https://github.com/keraion) * Fix mypy 1.16 issues [#6920](https://github.com/sqlfluff/sqlfluff/pull/6920) [@keraion](https://github.com/keraion) * Databricks: Prevent parsing error when reading from a streaming file [#6910](https://github.com/sqlfluff/sqlfluff/pull/6910) [@cpwithers](https://github.com/cpwithers) * dbup style variables into placeholder templater [#6852](https://github.com/sqlfluff/sqlfluff/pull/6852) [@dreik](https://github.com/dreik) * Postgres: Add support for session authorization [#6903](https://github.com/sqlfluff/sqlfluff/pull/6903) [@pierrand](https://github.com/pierrand) * Redshift: Fix documentation link [#6901](https://github.com/sqlfluff/sqlfluff/pull/6901) [@joaonunesk](https://github.com/joaonunesk) * ST05: Ignore table_expressions that aren't bracketed [#6883](https://github.com/sqlfluff/sqlfluff/pull/6883) [@keraion](https://github.com/keraion) * Pin to Click < 8.3.0 [#6892](https://github.com/sqlfluff/sqlfluff/pull/6892) [@wyardley](https://github.com/wyardley) * Fix RawFileSlice idx and tags in lexer_test [#6890](https://github.com/sqlfluff/sqlfluff/pull/6890) [@keraion](https://github.com/keraion) * CV12: fix assertion on bracketed join [#6898](https://github.com/sqlfluff/sqlfluff/pull/6898) [@keraion](https://github.com/keraion) * Redshift: Add support for Kafka external schema [#6899](https://github.com/sqlfluff/sqlfluff/pull/6899) [@pierrand](https://github.com/pierrand) * Adds support for variadic declarations in Postgres. [#6887](https://github.com/sqlfluff/sqlfluff/pull/6887) [@joaostorrer](https://github.com/joaostorrer) * Adds support for Teradata CREATE TABLE with MAP option [#6895](https://github.com/sqlfluff/sqlfluff/pull/6895) [@thomascjohnson](https://github.com/thomascjohnson) * MySQL: Add collate expressions [#6886](https://github.com/sqlfluff/sqlfluff/pull/6886) [@keraion](https://github.com/keraion) * Add support for PRIVATE MATERIALIZED VIEW syntax to databricks dialect [#6882](https://github.com/sqlfluff/sqlfluff/pull/6882) [@TheCleric](https://github.com/TheCleric) * CI: Ignore mypy call-arg for CliRunner mix_stderr [#6884](https://github.com/sqlfluff/sqlfluff/pull/6884) [@keraion](https://github.com/keraion) * Postgres / Redshift: Add support for prepared statement [#6874](https://github.com/sqlfluff/sqlfluff/pull/6874) [@pierrand](https://github.com/pierrand) * Add support for Snowflake `LOAD_UNCERTAIN_FILES` [#6879](https://github.com/sqlfluff/sqlfluff/pull/6879) [@mxr](https://github.com/mxr) * Update the Dockerfile to use Python 3.12 [#6876](https://github.com/sqlfluff/sqlfluff/pull/6876) [@keraion](https://github.com/keraion) * TSQL: Support named windows [#6869](https://github.com/sqlfluff/sqlfluff/pull/6869) [@WittierDinosaur](https://github.com/WittierDinosaur) * Postgres: Support Security Label [#6868](https://github.com/sqlfluff/sqlfluff/pull/6868) [@WittierDinosaur](https://github.com/WittierDinosaur) * Bigquery: Support Pipe statements [#6870](https://github.com/sqlfluff/sqlfluff/pull/6870) [@WittierDinosaur](https://github.com/WittierDinosaur) * LT01: Brackets should touch datatypes [#6867](https://github.com/sqlfluff/sqlfluff/pull/6867) [@keraion](https://github.com/keraion) * chore: Clean up OptionallyDelimited workarounds in Snowflake [#6866](https://github.com/sqlfluff/sqlfluff/pull/6866) [@WittierDinosaur](https://github.com/WittierDinosaur) * Add pgvector operators [#6863](https://github.com/sqlfluff/sqlfluff/pull/6863) [@reteps](https://github.com/reteps) * Add support for OFFSET to ANSI and POSTGRES dialect [#6862](https://github.com/sqlfluff/sqlfluff/pull/6862) [@reteps](https://github.com/reteps) * Terminate SELECT with ON CONFLICT [#6864](https://github.com/sqlfluff/sqlfluff/pull/6864) [@reteps](https://github.com/reteps) * feat(snowflake): add support for 'DROP ICEBERG TABLE' statement [#6860](https://github.com/sqlfluff/sqlfluff/pull/6860) [@VishalSinha1103](https://github.com/VishalSinha1103) * SQLite: allow trailing unsigned [#6856](https://github.com/sqlfluff/sqlfluff/pull/6856) [@covracer](https://github.com/covracer) * Fixes Oracle parsing tree with `/` operator. [#6853](https://github.com/sqlfluff/sqlfluff/pull/6853) [@joaostorrer](https://github.com/joaostorrer) * OptionallyDelimited Grammar [#6837](https://github.com/sqlfluff/sqlfluff/pull/6837) [@WittierDinosaur](https://github.com/WittierDinosaur) * TSQL: Add support for JSON_ARRAY and JSON_OBJECT [#6845](https://github.com/sqlfluff/sqlfluff/pull/6845) [@nathanthorell](https://github.com/nathanthorell) * Optimise CI test performance [#6829](https://github.com/sqlfluff/sqlfluff/pull/6829) [@WittierDinosaur](https://github.com/WittierDinosaur) * Snowflake: add EXTERNAL VOLUME and EXECUTE MANAGED TASK parameters to GRANT clause [#6839](https://github.com/sqlfluff/sqlfluff/pull/6839) [@moreaupascal56](https://github.com/moreaupascal56) * Adds support for `REMOVE DUPLICATES` clause in merge statements in Redshift [#6841](https://github.com/sqlfluff/sqlfluff/pull/6841) [@joaostorrer](https://github.com/joaostorrer) * Redshift: Add support for `select exclude` syntax [#6838](https://github.com/sqlfluff/sqlfluff/pull/6838) [@pierrand](https://github.com/pierrand) * Snowflake: Create Network Policy Network Rule List [#6835](https://github.com/sqlfluff/sqlfluff/pull/6835) [@WittierDinosaur](https://github.com/WittierDinosaur) * Snowflake: Full support of resource monitors [#6834](https://github.com/sqlfluff/sqlfluff/pull/6834) [@WittierDinosaur](https://github.com/WittierDinosaur) * Remove AliasedObjectReferenceSegment [#6833](https://github.com/sqlfluff/sqlfluff/pull/6833) [@WittierDinosaur](https://github.com/WittierDinosaur) * Postgres: Fix RETURNING Select terminator [#6832](https://github.com/sqlfluff/sqlfluff/pull/6832) [@WittierDinosaur](https://github.com/WittierDinosaur) * Trino: Better support for CreateTable [#6831](https://github.com/sqlfluff/sqlfluff/pull/6831) [@WittierDinosaur](https://github.com/WittierDinosaur) * Snowflake: Support use_privatelink_endpoint [#6830](https://github.com/sqlfluff/sqlfluff/pull/6830) [@WittierDinosaur](https://github.com/WittierDinosaur) * CV12 rule to exclude APPLY clause from the rule [#6828](https://github.com/sqlfluff/sqlfluff/pull/6828) [@paysni](https://github.com/paysni) * Set up Docker development environment [#6826](https://github.com/sqlfluff/sqlfluff/pull/6826) [@WittierDinosaur](https://github.com/WittierDinosaur) * Feat: Snowflake, Add catalog integration & create iceberg table [#6793](https://github.com/sqlfluff/sqlfluff/pull/6793) [@moreaupascal56](https://github.com/moreaupascal56) * Snowflake: Add create API INTEGRATION support [#6815](https://github.com/sqlfluff/sqlfluff/pull/6815) [@moreaupascal56](https://github.com/moreaupascal56) ## New Contributors * [@pierrand](https://github.com/pierrand) made their first contribution in [#6838](https://github.com/sqlfluff/sqlfluff/pull/6838) * [@covracer](https://github.com/covracer) made their first contribution in [#6856](https://github.com/sqlfluff/sqlfluff/pull/6856) * [@VishalSinha1103](https://github.com/VishalSinha1103) made their first contribution in [#6860](https://github.com/sqlfluff/sqlfluff/pull/6860) * [@reteps](https://github.com/reteps) made their first contribution in [#6864](https://github.com/sqlfluff/sqlfluff/pull/6864) * [@mxr](https://github.com/mxr) made their first contribution in [#6879](https://github.com/sqlfluff/sqlfluff/pull/6879) * [@thomascjohnson](https://github.com/thomascjohnson) made their first contribution in [#6895](https://github.com/sqlfluff/sqlfluff/pull/6895) * [@wyardley](https://github.com/wyardley) made their first contribution in [#6892](https://github.com/sqlfluff/sqlfluff/pull/6892) * [@joaonunesk](https://github.com/joaonunesk) made their first contribution in [#6901](https://github.com/sqlfluff/sqlfluff/pull/6901) * [@dreik](https://github.com/dreik) made their first contribution in [#6852](https://github.com/sqlfluff/sqlfluff/pull/6852) * [@cpwithers](https://github.com/cpwithers) made their first contribution in [#6910](https://github.com/sqlfluff/sqlfluff/pull/6910) * [@studansp](https://github.com/studansp) made their first contribution in [#6936](https://github.com/sqlfluff/sqlfluff/pull/6936) * [@ykicisk](https://github.com/ykicisk) made their first contribution in [#6928](https://github.com/sqlfluff/sqlfluff/pull/6928) * [@ShaneMazur](https://github.com/ShaneMazur) made their first contribution in [#6875](https://github.com/sqlfluff/sqlfluff/pull/6875) * [@M-van-alten-BW](https://github.com/M-van-alten-BW) made their first contribution in [#6848](https://github.com/sqlfluff/sqlfluff/pull/6848) ## [3.4.0] - 2025-04-17 ## Highlights This release brings a couple of breaking changes: * We have dropped support for Python 3.8 * We have a new rule: `LT15` (`layout.newlines`). This rule allows you to control the number of consecutive blank lines both inside, and outside of queries. We also have a host of dialect improvements, bugfixes, and general code clean-up. This release has *79* contributions in it! Also, welcome to the **thirteen** new contributors! Thanks for your contributions! ## What’s Changed * Clickhouse: support more complex types and functions in table definition [#6804](https://github.com/sqlfluff/sqlfluff/pull/6804) [@LSturtew](https://github.com/LSturtew) * feat: Add possibility to set dbt project_dir as environment variable [#6737](https://github.com/sqlfluff/sqlfluff/pull/6737) [@svdimchenko](https://github.com/svdimchenko) * Add support for `ZEROFILL` columns in MySQL. [#6735](https://github.com/sqlfluff/sqlfluff/pull/6735) [@joaostorrer](https://github.com/joaostorrer) * RF02: Add config for ignoring external references found in subqueries [#6791](https://github.com/sqlfluff/sqlfluff/pull/6791) [@keraion](https://github.com/keraion) * Fix `CREATE TABLE` statements in Redshift. [#6808](https://github.com/sqlfluff/sqlfluff/pull/6808) [@joaostorrer](https://github.com/joaostorrer) * Improve support for `CREATE USER` statements in Oracle. [#6809](https://github.com/sqlfluff/sqlfluff/pull/6809) [@joaostorrer](https://github.com/joaostorrer) * Add support for the WITH clause for an OPENROWSET in T-SQL [#6810](https://github.com/sqlfluff/sqlfluff/pull/6810) [@rayz90](https://github.com/rayz90) * Add hybrid tables to Snowflake dialect [#6812](https://github.com/sqlfluff/sqlfluff/pull/6812) [@moreaupascal56](https://github.com/moreaupascal56) * Trino Dialect - Parse UNNEST (...) WITH ORDINALITY [#6813](https://github.com/sqlfluff/sqlfluff/pull/6813) [@rileymcdowell](https://github.com/rileymcdowell) * Adds support for `varchar(max)` in Redshift. [#6806](https://github.com/sqlfluff/sqlfluff/pull/6806) [@joaostorrer](https://github.com/joaostorrer) * BigQuery: Make select replace `AS` not optional [#6780](https://github.com/sqlfluff/sqlfluff/pull/6780) [@keraion](https://github.com/keraion) * CI: Update add-to-release-notes to use ubuntu-latest [#6798](https://github.com/sqlfluff/sqlfluff/pull/6798) [@keraion](https://github.com/keraion) * CI: mirror image deployment to ghcr.io [#6790](https://github.com/sqlfluff/sqlfluff/pull/6790) [@keraion](https://github.com/keraion) * add ERROR_INTEGRATION to AlterPipeSegment [#6796](https://github.com/sqlfluff/sqlfluff/pull/6796) [@moreaupascal56](https://github.com/moreaupascal56) * Housekeeping: snowflake use CommentEqualsClauseSegment everywhere [#6795](https://github.com/sqlfluff/sqlfluff/pull/6795) [@moreaupascal56](https://github.com/moreaupascal56) * Allow quoted charset identifiers for MySQL/MariaDB [#6781](https://github.com/sqlfluff/sqlfluff/pull/6781) [@pprkut](https://github.com/pprkut) * Fix reserved keywords in Postgres. [#6782](https://github.com/sqlfluff/sqlfluff/pull/6782) [@joaostorrer](https://github.com/joaostorrer) * Adds support for user defined datatypes in Redshift. [#6783](https://github.com/sqlfluff/sqlfluff/pull/6783) [@joaostorrer](https://github.com/joaostorrer) * improve alter table statement handling for clickhouse dialect [#6787](https://github.com/sqlfluff/sqlfluff/pull/6787) [@LSturtew](https://github.com/LSturtew) * Athena: Support some Trino-based v3 functions [#6788](https://github.com/sqlfluff/sqlfluff/pull/6788) [@keraion](https://github.com/keraion) * SparkSQL/Databricks: Support dot sign operator [#6789](https://github.com/sqlfluff/sqlfluff/pull/6789) [@keraion](https://github.com/keraion) * RF02: do not trigger on snowflake lambda anonymous parameters [#6689](https://github.com/sqlfluff/sqlfluff/pull/6689) [@shervinmathieu](https://github.com/shervinmathieu) * Adding additional object to grant statement in snowflake dialect [#6779](https://github.com/sqlfluff/sqlfluff/pull/6779) [@SteveFrensch](https://github.com/SteveFrensch) * Adds support for `+=`, `-=`, `*=`, `/=` and `%=` operators in tsql. [#6756](https://github.com/sqlfluff/sqlfluff/pull/6756) [@joaostorrer](https://github.com/joaostorrer) * ST11: Fix quoted table name comparisons [#6768](https://github.com/sqlfluff/sqlfluff/pull/6768) [@keraion](https://github.com/keraion) * Formatter Stub Types [#6761](https://github.com/sqlfluff/sqlfluff/pull/6761) [@alanmcruickshank](https://github.com/alanmcruickshank) * CP02, LT01: Fix duplicated elements generated from a `FixPatch` ordering conflict [#6769](https://github.com/sqlfluff/sqlfluff/pull/6769) [@keraion](https://github.com/keraion) * Added support for geospatial data for MySQL [#6765](https://github.com/sqlfluff/sqlfluff/pull/6765) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * Adds support for identity clause in `CREATE TABLE` and `ALTER TABLE` statements in Oracle. [#6767](https://github.com/sqlfluff/sqlfluff/pull/6767) [@joaostorrer](https://github.com/joaostorrer) * Fix `PivotSegment` to accept alias in `FunctionSegment` in Oracle. [#6766](https://github.com/sqlfluff/sqlfluff/pull/6766) [@joaostorrer](https://github.com/joaostorrer) * New Rule: LT15 [#6641](https://github.com/sqlfluff/sqlfluff/pull/6641) [@WittierDinosaur](https://github.com/WittierDinosaur) * Additional cleanup for python 3.9 [#6758](https://github.com/sqlfluff/sqlfluff/pull/6758) [@keraion](https://github.com/keraion) * feat(snowflake): allow reference variables in create task [#6759](https://github.com/sqlfluff/sqlfluff/pull/6759) [@mrlannigan](https://github.com/mrlannigan) * Always use utf-8 encoding for toml files [#6760](https://github.com/sqlfluff/sqlfluff/pull/6760) [@keraion](https://github.com/keraion) * Fix rule failures for MySQL/MariaDB with backticked identifiers [#6702](https://github.com/sqlfluff/sqlfluff/pull/6702) [@pprkut](https://github.com/pprkut) * `ON CONFLICT` indentation for Postgres [#6660](https://github.com/sqlfluff/sqlfluff/pull/6660) [@WillMatthews](https://github.com/WillMatthews) * Update release instructions [#6751](https://github.com/sqlfluff/sqlfluff/pull/6751) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add support for Oracle PL/SQL (Procedures, Functions, Packages, Triggers, Types, Cursors, Loops) [#6635](https://github.com/sqlfluff/sqlfluff/pull/6635) [@joaostorrer](https://github.com/joaostorrer) * Tighter typing of Rule configs [#6750](https://github.com/sqlfluff/sqlfluff/pull/6750) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix: column aliases should find the topmost alias [#6755](https://github.com/sqlfluff/sqlfluff/pull/6755) [@keraion](https://github.com/keraion) * AM08: Handle any-cased `UNNEST` [#6757](https://github.com/sqlfluff/sqlfluff/pull/6757) [@keraion](https://github.com/keraion) * Python 3.9+ Type simplifications [#6745](https://github.com/sqlfluff/sqlfluff/pull/6745) [@alanmcruickshank](https://github.com/alanmcruickshank) * Snowflake dynamic table fixes [#6748](https://github.com/sqlfluff/sqlfluff/pull/6748) [@ulixius9](https://github.com/ulixius9) * Update TSQL Execute Statement for Expressions [#6744](https://github.com/sqlfluff/sqlfluff/pull/6744) [@nathanthorell](https://github.com/nathanthorell) * Duckdb: Adding InsertStatementSegment for Duckdb specifics [#6740](https://github.com/sqlfluff/sqlfluff/pull/6740) [@nathanthorell](https://github.com/nathanthorell) * Drop support for Python 3.8 [#6743](https://github.com/sqlfluff/sqlfluff/pull/6743) [@WittierDinosaur](https://github.com/WittierDinosaur) * Fix tox config so that it supports spaces in the path name (#6726) [#6727](https://github.com/sqlfluff/sqlfluff/pull/6727) [@maxgrenderjones](https://github.com/maxgrenderjones) * BigQuery: support UNION ALL BY NAME and other new set operator modifiers [#6722](https://github.com/sqlfluff/sqlfluff/pull/6722) [@Robin-C](https://github.com/Robin-C) * Fill in gap in parsing `create user rsa_public_key=...` / snowflake [#6731](https://github.com/sqlfluff/sqlfluff/pull/6731) [@juli4nb4dillo](https://github.com/juli4nb4dillo) * Adds support for partition options in `ALTER TABLE` statements in MySQL. [#6742](https://github.com/sqlfluff/sqlfluff/pull/6742) [@joaostorrer](https://github.com/joaostorrer) * Update oracle reserved keywords. [#6730](https://github.com/sqlfluff/sqlfluff/pull/6730) [@joaostorrer](https://github.com/joaostorrer) * Add support in TSQL for Open Symmetric Key [#6736](https://github.com/sqlfluff/sqlfluff/pull/6736) [@nathanthorell](https://github.com/nathanthorell) * Cleaning: Remove Todo set tag from Snowflake ALTER TABLE statement [#6738](https://github.com/sqlfluff/sqlfluff/pull/6738) [@moreaupascal56](https://github.com/moreaupascal56) * SparkSQL: Make colon optional in `STRUCT` datatype [#6739](https://github.com/sqlfluff/sqlfluff/pull/6739) [@NormallyGaussian](https://github.com/NormallyGaussian) * Redshift: Support GROUP BY ALL [#6714](https://github.com/sqlfluff/sqlfluff/pull/6714) [@WittierDinosaur](https://github.com/WittierDinosaur) * Fix tests with Click 8.2 [#6706](https://github.com/sqlfluff/sqlfluff/pull/6706) [@cjwatson](https://github.com/cjwatson) * Remove `USER` from reserved words in Postgres. [#6707](https://github.com/sqlfluff/sqlfluff/pull/6707) [@joaostorrer](https://github.com/joaostorrer) * Add support for `key: value` syntax in functions in Postgres. [#6708](https://github.com/sqlfluff/sqlfluff/pull/6708) [@joaostorrer](https://github.com/joaostorrer) * Fill in Gap in Snowflake `DROP DYNAMIC TABLE` [#6673](https://github.com/sqlfluff/sqlfluff/pull/6673) [@LoganPrice344](https://github.com/LoganPrice344) * Fill in Gap in Snowflake `ALTER TABLE` Parsing [#6654](https://github.com/sqlfluff/sqlfluff/pull/6654) [@LoganPrice344](https://github.com/LoganPrice344) * Fill in Gap in Snowflake `CREATE AUTHENTICATION POLICY` Parsing [#6685](https://github.com/sqlfluff/sqlfluff/pull/6685) [@LoganPrice344](https://github.com/LoganPrice344) * Adds support for `JoinClauseSegment` in `DeleteStatementSegment` in Postgres [#6692](https://github.com/sqlfluff/sqlfluff/pull/6692) [@joaostorrer](https://github.com/joaostorrer) * Fix check constraints in `ALTER TABLE` statements in Oracle. [#6661](https://github.com/sqlfluff/sqlfluff/pull/6661) [@joaostorrer](https://github.com/joaostorrer) * Add support for Obevo annotations. [#6662](https://github.com/sqlfluff/sqlfluff/pull/6662) [@joaostorrer](https://github.com/joaostorrer) * Add support for `DEFAULT` keyword as function parameter in tsql. [#6663](https://github.com/sqlfluff/sqlfluff/pull/6663) [@joaostorrer](https://github.com/joaostorrer) * Fill in gap in `DECLARE` Syntax [#6665](https://github.com/sqlfluff/sqlfluff/pull/6665) [@LoganPrice344](https://github.com/LoganPrice344) * AL03: Lint for missing aliases on subqueries [#6666](https://github.com/sqlfluff/sqlfluff/pull/6666) [@keraion](https://github.com/keraion) * IN TABLE/CTE and implicit Array, Tuple support for ClickHouse [#6667](https://github.com/sqlfluff/sqlfluff/pull/6667) [@pheepa](https://github.com/pheepa) * ST11: Fix rule name in documentation for `noqa` [#6668](https://github.com/sqlfluff/sqlfluff/pull/6668) [@keraion](https://github.com/keraion) * Fill in Gap in Snowflake `ALTER TABLE` Parsing [#6671](https://github.com/sqlfluff/sqlfluff/pull/6671) [@LoganPrice344](https://github.com/LoganPrice344) * Allows Snowflake `CREATE ROW ACCESS POLICY` to Parse Quoted Policy Names [#6648](https://github.com/sqlfluff/sqlfluff/pull/6648) [@LoganPrice344](https://github.com/LoganPrice344) * Add support for `EVENT` statements in MySQL. [#6646](https://github.com/sqlfluff/sqlfluff/pull/6646) [@joaostorrer](https://github.com/joaostorrer) * Fill in Gap in Snowflake `Exception` Parsing [#6645](https://github.com/sqlfluff/sqlfluff/pull/6645) [@LoganPrice344](https://github.com/LoganPrice344) * Snowflake: Add TITLE parameter support in CREATE STREAMLIT statement [#6642](https://github.com/sqlfluff/sqlfluff/pull/6642) [@kawashiro](https://github.com/kawashiro) * TSQL: Add support for OPENQUERY [#6640](https://github.com/sqlfluff/sqlfluff/pull/6640) [@nathanthorell](https://github.com/nathanthorell) * Snowflake: Add support for CORTEX SEARCH SERVICE [#6639](https://github.com/sqlfluff/sqlfluff/pull/6639) [@sfc-gh-amauser](https://github.com/sfc-gh-amauser) * Add support for `WithCompoundStatementSegment` in procedures in Postgres [#6632](https://github.com/sqlfluff/sqlfluff/pull/6632) [@joaostorrer](https://github.com/joaostorrer) * Add support for `SET NEW.` statements in MySQL. [#6634](https://github.com/sqlfluff/sqlfluff/pull/6634) [@joaostorrer](https://github.com/joaostorrer) * feat: Add missing parquet file format keywords to SF dialect [#6638](https://github.com/sqlfluff/sqlfluff/pull/6638) [@jjlkant](https://github.com/jjlkant) ## New Contributors * [@jjlkant](https://github.com/jjlkant) made their first contribution in [#6638](https://github.com/sqlfluff/sqlfluff/pull/6638) * [@sfc-gh-amauser](https://github.com/sfc-gh-amauser) made their first contribution in [#6639](https://github.com/sqlfluff/sqlfluff/pull/6639) * [@cjwatson](https://github.com/cjwatson) made their first contribution in [#6706](https://github.com/sqlfluff/sqlfluff/pull/6706) * [@NormallyGaussian](https://github.com/NormallyGaussian) made their first contribution in [#6739](https://github.com/sqlfluff/sqlfluff/pull/6739) * [@juli4nb4dillo](https://github.com/juli4nb4dillo) made their first contribution in [#6731](https://github.com/sqlfluff/sqlfluff/pull/6731) * [@Robin-C](https://github.com/Robin-C) made their first contribution in [#6722](https://github.com/sqlfluff/sqlfluff/pull/6722) * [@maxgrenderjones](https://github.com/maxgrenderjones) made their first contribution in [#6727](https://github.com/sqlfluff/sqlfluff/pull/6727) * [@WillMatthews](https://github.com/WillMatthews) made their first contribution in [#6660](https://github.com/sqlfluff/sqlfluff/pull/6660) * [@mrlannigan](https://github.com/mrlannigan) made their first contribution in [#6759](https://github.com/sqlfluff/sqlfluff/pull/6759) * [@SteveFrensch](https://github.com/SteveFrensch) made their first contribution in [#6779](https://github.com/sqlfluff/sqlfluff/pull/6779) * [@shervinmathieu](https://github.com/shervinmathieu) made their first contribution in [#6689](https://github.com/sqlfluff/sqlfluff/pull/6689) * [@LSturtew](https://github.com/LSturtew) made their first contribution in [#6787](https://github.com/sqlfluff/sqlfluff/pull/6787) * [@svdimchenko](https://github.com/svdimchenko) made their first contribution in [#6737](https://github.com/sqlfluff/sqlfluff/pull/6737) ## [3.3.1] - 2025-02-05 ## Highlights This is a bugfix release resolving a several issues from 3.3.0 and previously. * Dialect improvements for Snowflake, Oracle, SQLite, BigQuery, TSQL, Databricks, SparkSQL, Hive, Trino & Postgres. * Rule improvements for AL04, AM08, RF01, RF02, ST03, ST05, ST09, ST10, ST11. This release also includes the first contributions from **thirteen** new contributors! Welcome to the project and thank you for your contributions. 🎉🎉🏆🏆🎉🎉 ## What’s Changed * Update acronym ETL/ELT in README.md [#6500](https://github.com/sqlfluff/sqlfluff/pull/6500) [@galenseilis](https://github.com/galenseilis) * Fill in Gap for Snowflake `CREATE` Syntax Parsing [#6514](https://github.com/sqlfluff/sqlfluff/pull/6514) [@LoganPrice344](https://github.com/LoganPrice344) * Add support for PostGIS `geometry(point)` datatype. [#6543](https://github.com/sqlfluff/sqlfluff/pull/6543) [@joaostorrer](https://github.com/joaostorrer) * Allow double equals in hive dialect [#6623](https://github.com/sqlfluff/sqlfluff/pull/6623) [@TheCleric](https://github.com/TheCleric) * Bump Python version in the docs and metadata [#6626](https://github.com/sqlfluff/sqlfluff/pull/6626) [@WittierDinosaur](https://github.com/WittierDinosaur) * Added support for Variable Substitution for `Snowflake` `Target_lag` [#6567](https://github.com/sqlfluff/sqlfluff/pull/6567) [@LoganPrice344](https://github.com/LoganPrice344) * Sqlite: Added `set_clause_list` to `UPDATE` statement [#6605](https://github.com/sqlfluff/sqlfluff/pull/6605) [@keraion](https://github.com/keraion) * Oracle: Support for `NOMAXVALUE` and `NOMINVALUE` [#6606](https://github.com/sqlfluff/sqlfluff/pull/6606) [@keraion](https://github.com/keraion) * RF02: Handle additional edge cases with subqueries [#6607](https://github.com/sqlfluff/sqlfluff/pull/6607) [@keraion](https://github.com/keraion) * TSQL Add Set Context_Info and Replicate function [#6613](https://github.com/sqlfluff/sqlfluff/pull/6613) [@nathanthorell](https://github.com/nathanthorell) * Add Support for Snowflake Variables [#6618](https://github.com/sqlfluff/sqlfluff/pull/6618) [@LoganPrice344](https://github.com/LoganPrice344) * Add support for BigQuery `LOAD DATA` statement [#6612](https://github.com/sqlfluff/sqlfluff/pull/6612) [@VMois](https://github.com/VMois) * CI: Update to black 25.1 and pre-commit hooks [#6615](https://github.com/sqlfluff/sqlfluff/pull/6615) [@keraion](https://github.com/keraion) * Add support for psql `\gset` and `\gexec` meta commands. [#6555](https://github.com/sqlfluff/sqlfluff/pull/6555) [@joaostorrer](https://github.com/joaostorrer) * TSQL improve support for CREATE USER [#6608](https://github.com/sqlfluff/sqlfluff/pull/6608) [@nathanthorell](https://github.com/nathanthorell) * Add support for Postgres `ALTER FOREIGN TABLE` statement. [#6597](https://github.com/sqlfluff/sqlfluff/pull/6597) [@joaostorrer](https://github.com/joaostorrer) * Add zip and zip_strict dbt jinja2 functions [#6564](https://github.com/sqlfluff/sqlfluff/pull/6564) [@boushphong](https://github.com/boushphong) * TSQL: fix for hex default constraint [#6586](https://github.com/sqlfluff/sqlfluff/pull/6586) [@nathanthorell](https://github.com/nathanthorell) * Add support for Oracle SQL*Plus substitution variable `&`. [#6573](https://github.com/sqlfluff/sqlfluff/pull/6573) [@joaostorrer](https://github.com/joaostorrer) * ST11: Table and wildcard reference fixes [#6572](https://github.com/sqlfluff/sqlfluff/pull/6572) [@keraion](https://github.com/keraion) * Adding support in TSQL for Create Login [#6570](https://github.com/sqlfluff/sqlfluff/pull/6570) [@nathanthorell](https://github.com/nathanthorell) * ST05: Ordering and rule conflict fixes [#6578](https://github.com/sqlfluff/sqlfluff/pull/6578) [@keraion](https://github.com/keraion) * TSQL: Add `WITH ROLLUP` option for `GROUP BY` [#6579](https://github.com/sqlfluff/sqlfluff/pull/6579) [@keraion](https://github.com/keraion) * ST03: Handle quotes in CTE names [#6591](https://github.com/sqlfluff/sqlfluff/pull/6591) [@keraion](https://github.com/keraion) * ST09: Handle `lt_eq_gt` operator [#6592](https://github.com/sqlfluff/sqlfluff/pull/6592) [@keraion](https://github.com/keraion) * AL04: Check for duplicates in subquery aliases and table references [#6593](https://github.com/sqlfluff/sqlfluff/pull/6593) [@keraion](https://github.com/keraion) * TSQL: adds drop and add period to alter table [#6590](https://github.com/sqlfluff/sqlfluff/pull/6590) [@nathanthorell](https://github.com/nathanthorell) * [Starrocks] Support for generated columns [#6581](https://github.com/sqlfluff/sqlfluff/pull/6581) [@maver1ck](https://github.com/maver1ck) * Fix TSQL OPENROWSET for ADLS use (#5114) [#6584](https://github.com/sqlfluff/sqlfluff/pull/6584) [@rayz90](https://github.com/rayz90) * Add support for Postgres `CREATE OPERATOR` syntax. [#6540](https://github.com/sqlfluff/sqlfluff/pull/6540) [@joaostorrer](https://github.com/joaostorrer) * Update postgres bare functions. [#6545](https://github.com/sqlfluff/sqlfluff/pull/6545) [@joaostorrer](https://github.com/joaostorrer) * Add Postgres `DROP FOREIGN TABLE` statement. [#6548](https://github.com/sqlfluff/sqlfluff/pull/6548) [@joaostorrer](https://github.com/joaostorrer) * Add support for Postgres full text search operator `!!`. [#6541](https://github.com/sqlfluff/sqlfluff/pull/6541) [@joaostorrer](https://github.com/joaostorrer) * Fix Snowflake Dynamic Table Parsing [#6566](https://github.com/sqlfluff/sqlfluff/pull/6566) [@ulixius9](https://github.com/ulixius9) * add variable use in tsql TemporalQuerySegment [#6565](https://github.com/sqlfluff/sqlfluff/pull/6565) [@nathanthorell](https://github.com/nathanthorell) * Add support for Postgres functions with column referenced type and argmode between parameter name and datatype. [#6542](https://github.com/sqlfluff/sqlfluff/pull/6542) [@joaostorrer](https://github.com/joaostorrer) * add MariaDB to README.md [#6559](https://github.com/sqlfluff/sqlfluff/pull/6559) [@robertsilen](https://github.com/robertsilen) * mypyc: workaround for ABC inheritance [#6552](https://github.com/sqlfluff/sqlfluff/pull/6552) [@keraion](https://github.com/keraion) * Add DuckDB to RF01 filter for dialects with dot-access [#6554](https://github.com/sqlfluff/sqlfluff/pull/6554) [@brunobeltran](https://github.com/brunobeltran) * Fill in Gap in Snowflake `ALTER TAG` Syntax [#6531](https://github.com/sqlfluff/sqlfluff/pull/6531) [@LoganPrice344](https://github.com/LoganPrice344) * Fill in Gap in `CREATE ROW ACCESS POLICY` Syntax Parsing [#6550](https://github.com/sqlfluff/sqlfluff/pull/6550) [@LoganPrice344](https://github.com/LoganPrice344) * [docs] Update CONTRIBUTING.md virtual env setup instructions [#6538](https://github.com/sqlfluff/sqlfluff/pull/6538) [@sarahmccuan](https://github.com/sarahmccuan) * Added logic to parse named row fields using reference operator (.) for trino dialect [#6536](https://github.com/sqlfluff/sqlfluff/pull/6536) [@prabh-me](https://github.com/prabh-me) * SparkSQL dialect: Allow datatypes for column definition in create view statements [#6518](https://github.com/sqlfluff/sqlfluff/pull/6518) [@KikeSenpai](https://github.com/KikeSenpai) * Fill in Gap for `ALTER_TABLE_COLUMN` Syntax Parsing [#6526](https://github.com/sqlfluff/sqlfluff/pull/6526) [@LoganPrice344](https://github.com/LoganPrice344) * fix: databricks describe volume recognition [#6529](https://github.com/sqlfluff/sqlfluff/pull/6529) [@benfdking](https://github.com/benfdking) * Fill in Gap in Snowflake `ALTER ROW ACCESS POLICY` Syntax Parsing [#6534](https://github.com/sqlfluff/sqlfluff/pull/6534) [@LoganPrice344](https://github.com/LoganPrice344) * Fill in Gap for Snowflake `ALTER warehouse` Syntax [#6516](https://github.com/sqlfluff/sqlfluff/pull/6516) [@LoganPrice344](https://github.com/LoganPrice344) * [docs] Update missing convention whitespace for CV10 [#6528](https://github.com/sqlfluff/sqlfluff/pull/6528) [@sarahmccuan](https://github.com/sarahmccuan) * Fill in gaps for `GRANT` syntax [#6520](https://github.com/sqlfluff/sqlfluff/pull/6520) [@LoganPrice344](https://github.com/LoganPrice344) * Snowflake: allow multiple predecessor tasks in CREATE/ALTER TASK [#6510](https://github.com/sqlfluff/sqlfluff/pull/6510) [@mrebaker](https://github.com/mrebaker) * AM08: fix edge cases [#6506](https://github.com/sqlfluff/sqlfluff/pull/6506) [@rogalski](https://github.com/rogalski) * ST10: Fix edge cases [#6505](https://github.com/sqlfluff/sqlfluff/pull/6505) [@rogalski](https://github.com/rogalski) * Snowflake - Fill in gaps in `GRANT` command parsing [#6503](https://github.com/sqlfluff/sqlfluff/pull/6503) [@korverdev](https://github.com/korverdev) * Add support for Snowflake's `CREATE OR ALTER` syntax [#6497](https://github.com/sqlfluff/sqlfluff/pull/6497) [@korverdev](https://github.com/korverdev) * TSQL dialect - Allow for multiple columns in ALTER TABLE DROP COLUMN statements [#6501](https://github.com/sqlfluff/sqlfluff/pull/6501) [@korverdev](https://github.com/korverdev) ## New Contributors * [@korverdev](https://github.com/korverdev) made their first contribution in [#6501](https://github.com/sqlfluff/sqlfluff/pull/6501) * [@LoganPrice344](https://github.com/LoganPrice344) made their first contribution in [#6520](https://github.com/sqlfluff/sqlfluff/pull/6520) * [@sarahmccuan](https://github.com/sarahmccuan) made their first contribution in [#6528](https://github.com/sqlfluff/sqlfluff/pull/6528) * [@benfdking](https://github.com/benfdking) made their first contribution in [#6529](https://github.com/sqlfluff/sqlfluff/pull/6529) * [@KikeSenpai](https://github.com/KikeSenpai) made their first contribution in [#6518](https://github.com/sqlfluff/sqlfluff/pull/6518) * [@prabh-me](https://github.com/prabh-me) made their first contribution in [#6536](https://github.com/sqlfluff/sqlfluff/pull/6536) * [@brunobeltran](https://github.com/brunobeltran) made their first contribution in [#6554](https://github.com/sqlfluff/sqlfluff/pull/6554) * [@robertsilen](https://github.com/robertsilen) made their first contribution in [#6559](https://github.com/sqlfluff/sqlfluff/pull/6559) * [@nathanthorell](https://github.com/nathanthorell) made their first contribution in [#6565](https://github.com/sqlfluff/sqlfluff/pull/6565) * [@rayz90](https://github.com/rayz90) made their first contribution in [#6584](https://github.com/sqlfluff/sqlfluff/pull/6584) * [@boushphong](https://github.com/boushphong) made their first contribution in [#6564](https://github.com/sqlfluff/sqlfluff/pull/6564) * [@VMois](https://github.com/VMois) made their first contribution in [#6612](https://github.com/sqlfluff/sqlfluff/pull/6612) * [@galenseilis](https://github.com/galenseilis) made their first contribution in [#6500](https://github.com/sqlfluff/sqlfluff/pull/6500) ## [3.3.0] - 2024-12-10 ## Highlights This release brings a few more significant changes. Especially given the introduction of several new rules, we highly recommend testing this release on your project before upgrading to make sure they are configured appropriately for your project style guide. As always, we have tried to make sure that the defaults for all new rules are both widely applicable, and fairly light touch. While all have been tested on some existing larger codebases which the maintainers have access to - do still report any bugs you might find on GitHub in the usual manner. * We've dropped the `appdirs` package as a dependency (as an abandoned project) and instead added `platformdirs` instead. Users should not notice any functionality changes beyond the different dependency. * *TWO* new dialects: _Impala_ and _StarRocks_. * *FIVE* new rules: * `AM08` (`ambiguous.join_condition`), which detects `JOIN` clauses without conditions (i.e. without an `ON` or `USING` clause). These are often typos and can result in significant row count increases if unintended. * `CV12` (`convention.join_condition`), which is related to `AM08` and detects cases where users have used a `WHERE` clause instead of a `JOIN ... ON ...` clause to do their join conditions. The join condition is a form of metadata and should communicate to the end user how the table should be joined. By mixing this information into the `WHERE` clause it makes the SQL harder to understand. * `LT14` (`layout.keyword_newline`), which allows certain keywords to trigger line breaks in queries. Primarily this forces the main `SELECT` statement clauses like `WHERE`, `GROUP BY` etc. onto new lines. This rule has been designed to be highly configurable, but with sensible light-touch defaults. Check out the docs to adapt it to the conventions of your project. * `ST10` (`structure.constant_expression`), some SQL users include redundant expressions in their code (e.g. `WHERE tbl.col = tbl.col`). These conditions always evaluate to a constant outcome (i.e. always evaluate as `TRUE` or `FALSE`) as so add no functionality or meaning to the query. This rule catches them. * `ST11` (`structure.unused_join`), which detects unused joins in SQL statements, and is designed to catch tables that were once used, but where the column references have since been removed and now the table is unnecessary. Beyond these changes, we've seen a whole host of dialect improvements to almost *all* of the supported dialects and several bugfixes which are combined into this release. We also welcome **TWELVE** new contributors to the project in this release. Thanks to all of them for their hard work 🚀🏆🚀. ## What’s Changed * New Rule LT14: Keyword line positioning [#6213](https://github.com/sqlfluff/sqlfluff/pull/6213) [@keraion](https://github.com/keraion) * New Rule ST11: Detect unused tables in join [#5266](https://github.com/sqlfluff/sqlfluff/pull/5266) [@danparizher](https://github.com/danparizher) * Snowflake Create Table allow inline foreign key with on delete … [#6486](https://github.com/sqlfluff/sqlfluff/pull/6486) [@WobblyRobbly](https://github.com/WobblyRobbly) * Fix minor linting error in CI [#6483](https://github.com/sqlfluff/sqlfluff/pull/6483) [@alanmcruickshank](https://github.com/alanmcruickshank) * Snowflake: alter table on delete and update support [#6473](https://github.com/sqlfluff/sqlfluff/pull/6473) [@WobblyRobbly](https://github.com/WobblyRobbly) * New Rules AM08 + CV12: Detect implicit cross joins [#6239](https://github.com/sqlfluff/sqlfluff/pull/6239) [@rogalski](https://github.com/rogalski) * New Rule ST10: const expression checker [#6392](https://github.com/sqlfluff/sqlfluff/pull/6392) [@rogalski](https://github.com/rogalski) * DuckDB: Support MAP data type [#6478](https://github.com/sqlfluff/sqlfluff/pull/6478) [@WittierDinosaur](https://github.com/WittierDinosaur) * Hive: Add 'ALTER VIEW' query grammar [#6479](https://github.com/sqlfluff/sqlfluff/pull/6479) [@mrebaker](https://github.com/mrebaker) * Standardise json operator spacing between dialects [#6447](https://github.com/sqlfluff/sqlfluff/pull/6447) [@WittierDinosaur](https://github.com/WittierDinosaur) * fixes #6463: Set Variable Parsing for SparkSQL and Databricks [#6464](https://github.com/sqlfluff/sqlfluff/pull/6464) [@fstg1992](https://github.com/fstg1992) * Teradata: support REPLACE VIEW and LOCKING ... FOR ... syntax [#6467](https://github.com/sqlfluff/sqlfluff/pull/6467) [@V-D-L-P](https://github.com/V-D-L-P) * Rule names in warnings logic [#6459](https://github.com/sqlfluff/sqlfluff/pull/6459) [@LuigiCerone](https://github.com/LuigiCerone) * Bigquery: Support column level key definitions [#6465](https://github.com/sqlfluff/sqlfluff/pull/6465) [@keraion](https://github.com/keraion) * Add Implicit Indents to Qualify [#6438](https://github.com/sqlfluff/sqlfluff/pull/6438) [@WittierDinosaur](https://github.com/WittierDinosaur) * Postgres: Fix Select statement ordering [#6446](https://github.com/sqlfluff/sqlfluff/pull/6446) [@WittierDinosaur](https://github.com/WittierDinosaur) * fixes #6457: databricks dialect alter table foo drop column bar [#6461](https://github.com/sqlfluff/sqlfluff/pull/6461) [@fstg1992](https://github.com/fstg1992) * Switch from `appdirs` to `platformdirs` [#6399](https://github.com/sqlfluff/sqlfluff/pull/6399) [@alanmcruickshank](https://github.com/alanmcruickshank) * Impala: support CREATE TABLE AS SELECT [#6458](https://github.com/sqlfluff/sqlfluff/pull/6458) [@mrebaker](https://github.com/mrebaker) * Databricks Dialect: Backticked function identifiers now parsable [#6453](https://github.com/sqlfluff/sqlfluff/pull/6453) [@fstg1992](https://github.com/fstg1992) * Issue #6417: Leading -- MAGIC Cells don't break parsing of notebooks [#6454](https://github.com/sqlfluff/sqlfluff/pull/6454) [@fstg1992](https://github.com/fstg1992) * Add "target_path" configuration to the dbt templater [#6423](https://github.com/sqlfluff/sqlfluff/pull/6423) [@wircho](https://github.com/wircho) * Sparksql: Fix ordering of create table options [#6441](https://github.com/sqlfluff/sqlfluff/pull/6441) [@WittierDinosaur](https://github.com/WittierDinosaur) * Dialect: Impala [#6445](https://github.com/sqlfluff/sqlfluff/pull/6445) [@mrebaker](https://github.com/mrebaker) * RF02: Allows for lambda functions in Databricks [#6444](https://github.com/sqlfluff/sqlfluff/pull/6444) [@keraion](https://github.com/keraion) * SQLite: Support any order of VARYING/NATIVE in CHAR types [#6443](https://github.com/sqlfluff/sqlfluff/pull/6443) [@keraion](https://github.com/keraion) * Snowflake: Allow literals in match_by_column_name [#6442](https://github.com/sqlfluff/sqlfluff/pull/6442) [@WittierDinosaur](https://github.com/WittierDinosaur) * Trino: Remove TemporaryTransientGrammar [#6440](https://github.com/sqlfluff/sqlfluff/pull/6440) [@WittierDinosaur](https://github.com/WittierDinosaur) * Mysql: Fix parsing of system variables [#6439](https://github.com/sqlfluff/sqlfluff/pull/6439) [@WittierDinosaur](https://github.com/WittierDinosaur) * Sparksql: Fix hint function for proper spacing [#6437](https://github.com/sqlfluff/sqlfluff/pull/6437) [@WittierDinosaur](https://github.com/WittierDinosaur) * Snowflake: Support ORDER BY boolean [#6435](https://github.com/sqlfluff/sqlfluff/pull/6435) [@WittierDinosaur](https://github.com/WittierDinosaur) * TSQL: allow `NEXT VALUE FOR` use as expression [#6431](https://github.com/sqlfluff/sqlfluff/pull/6431) [@timz-st](https://github.com/timz-st) * Prework for introducing mypyc [#6433](https://github.com/sqlfluff/sqlfluff/pull/6433) [@rogalski](https://github.com/rogalski) * Fix pre-commit on main branch [#6432](https://github.com/sqlfluff/sqlfluff/pull/6432) [@rogalski](https://github.com/rogalski) * Initial support for Starrocks dialect [#6415](https://github.com/sqlfluff/sqlfluff/pull/6415) [@maver1ck](https://github.com/maver1ck) * Databricks: Parse Table Valued Functions [#6417](https://github.com/sqlfluff/sqlfluff/pull/6417) [@fstg1992](https://github.com/fstg1992) * Snowflake: Support `PARTITION_TYPE` for `CREATE EXTERNAL TABLE` [#6422](https://github.com/sqlfluff/sqlfluff/pull/6422) [@ninazacharia-toast](https://github.com/ninazacharia-toast) * Fix docs for CP04 config and add test cases [#6416](https://github.com/sqlfluff/sqlfluff/pull/6416) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix: Parse violations not being shown when run `fix` command with `--show-lint-violations` [#6382](https://github.com/sqlfluff/sqlfluff/pull/6382) [@joaopamaral](https://github.com/joaopamaral) * RF01: refine support for dialects with dot access syntax [#6400](https://github.com/sqlfluff/sqlfluff/pull/6400) [@rogalski](https://github.com/rogalski) * Add new `TYPE` property to Snowflake users [#6411](https://github.com/sqlfluff/sqlfluff/pull/6411) [@mroy-seedbox](https://github.com/mroy-seedbox) * Document the config API [#6384](https://github.com/sqlfluff/sqlfluff/pull/6384) [@alanmcruickshank](https://github.com/alanmcruickshank) * Error handling for trying to render callable builtins #5463 [#6388](https://github.com/sqlfluff/sqlfluff/pull/6388) [@alanmcruickshank](https://github.com/alanmcruickshank) * SQLite : Add `CREATE VIRTUAL TABLE` Statement [#6406](https://github.com/sqlfluff/sqlfluff/pull/6406) [@R3gardless](https://github.com/R3gardless) * Updated README with Table Of Contents [#6407](https://github.com/sqlfluff/sqlfluff/pull/6407) [@27Jashshah](https://github.com/27Jashshah) ## New Contributors * [@27Jashshah](https://github.com/27Jashshah) made their first contribution in [#6407](https://github.com/sqlfluff/sqlfluff/pull/6407) * [@joaopamaral](https://github.com/joaopamaral) made their first contribution in [#6382](https://github.com/sqlfluff/sqlfluff/pull/6382) * [@ninazacharia-toast](https://github.com/ninazacharia-toast) made their first contribution in [#6422](https://github.com/sqlfluff/sqlfluff/pull/6422) * [@fstg1992](https://github.com/fstg1992) made their first contribution in [#6417](https://github.com/sqlfluff/sqlfluff/pull/6417) * [@maver1ck](https://github.com/maver1ck) made their first contribution in [#6415](https://github.com/sqlfluff/sqlfluff/pull/6415) * [@timz-st](https://github.com/timz-st) made their first contribution in [#6431](https://github.com/sqlfluff/sqlfluff/pull/6431) * [@mrebaker](https://github.com/mrebaker) made their first contribution in [#6445](https://github.com/sqlfluff/sqlfluff/pull/6445) * [@wircho](https://github.com/wircho) made their first contribution in [#6423](https://github.com/sqlfluff/sqlfluff/pull/6423) * [@LuigiCerone](https://github.com/LuigiCerone) made their first contribution in [#6459](https://github.com/sqlfluff/sqlfluff/pull/6459) * [@V-D-L-P](https://github.com/V-D-L-P) made their first contribution in [#6467](https://github.com/sqlfluff/sqlfluff/pull/6467) * [@WobblyRobbly](https://github.com/WobblyRobbly) made their first contribution in [#6473](https://github.com/sqlfluff/sqlfluff/pull/6473) * [@danparizher](https://github.com/danparizher) made their first contribution in [#5266](https://github.com/sqlfluff/sqlfluff/pull/5266) ## [3.2.5] - 2024-10-25 ## Highlights This release is mostly bugfixes and dialect improvements. Notably: * Whitespace handling improvements to `LT01` & `LT02`. * Better error messages around trying to iterate on missing jinja variables. * Better case sensitivity for `AL09`. * Improved handling of jinja context in inline config directives. * Enabling `AM02` for Trino and Snowflake. * Handling potential collisions between `ST02` & `LT01`. * Preventing false positives in AL05 with arrays. There's also a bunch of documentation improvements in this release, including guides on how to troubleshoot SQLFluff and how to write custom rules. Check out https://docs.sqlfluff.com for more details. We also saw **five** new contributors to the project this month. Welcome to the project, and thanks for taking the time to contribute! 🎉🏆🎉 ## What’s Changed * Guides for custom rules and for troubleshooting [#6379](https://github.com/sqlfluff/sqlfluff/pull/6379) [@alanmcruickshank](https://github.com/alanmcruickshank) * Documentation and small overhaul of parametrized rule test cases [#6380](https://github.com/sqlfluff/sqlfluff/pull/6380) [@alanmcruickshank](https://github.com/alanmcruickshank) * TSQL: add missing unreserved keyword NULLS (#5212) [#6390](https://github.com/sqlfluff/sqlfluff/pull/6390) [@simonhoerdumbonde](https://github.com/simonhoerdumbonde) * Introducing SQLFluff Guru on Gurubase.io [#6373](https://github.com/sqlfluff/sqlfluff/pull/6373) [@kursataktas](https://github.com/kursataktas) * Improve heuristics for inline config [#6391](https://github.com/sqlfluff/sqlfluff/pull/6391) [@rogalski](https://github.com/rogalski) * Postgres: Handle expressions that occur in `IN` functions [#6393](https://github.com/sqlfluff/sqlfluff/pull/6393) [@keraion](https://github.com/keraion) * Snowflake: Support bracketed lambda functions without datatypes [#6394](https://github.com/sqlfluff/sqlfluff/pull/6394) [@keraion](https://github.com/keraion) * LT01: Add default config for `match_condition` to touch [#6395](https://github.com/sqlfluff/sqlfluff/pull/6395) [@keraion](https://github.com/keraion) * Snowflake: Allow for additional `CONNECT BY` expressions that may use `PRIOR` [#6396](https://github.com/sqlfluff/sqlfluff/pull/6396) [@keraion](https://github.com/keraion) * Details on debugging and setup for diff-quality [#6381](https://github.com/sqlfluff/sqlfluff/pull/6381) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix edge case in Jinja reindents [#6383](https://github.com/sqlfluff/sqlfluff/pull/6383) [@rogalski](https://github.com/rogalski) * Support identifier clause for Databricks [#6377](https://github.com/sqlfluff/sqlfluff/pull/6377) [@PaulBurridge](https://github.com/PaulBurridge) * Enable AM02 for snowflake and trino by default. [#6369](https://github.com/sqlfluff/sqlfluff/pull/6369) [@mchen-codaio](https://github.com/mchen-codaio) * Postgres: Support identifiers in `ALTER DATABASE SET` [#6376](https://github.com/sqlfluff/sqlfluff/pull/6376) [@keraion](https://github.com/keraion) * SparkSQL: Improved lexing and parsing of file literals [#6375](https://github.com/sqlfluff/sqlfluff/pull/6375) [@keraion](https://github.com/keraion) * Fix Snowflake alter share [#6372](https://github.com/sqlfluff/sqlfluff/pull/6372) [@greg-finley](https://github.com/greg-finley) * Resolve collision between ST02 and LT01 [#6366](https://github.com/sqlfluff/sqlfluff/pull/6366) [@alanmcruickshank](https://github.com/alanmcruickshank) * Prevent false positives with AL05 and array functions [#6365](https://github.com/sqlfluff/sqlfluff/pull/6365) [@alanmcruickshank](https://github.com/alanmcruickshank) * Handle iteration and getting undefined jinja variables [#6364](https://github.com/sqlfluff/sqlfluff/pull/6364) [@alanmcruickshank](https://github.com/alanmcruickshank) * Update documentation with new dialects [#6337](https://github.com/sqlfluff/sqlfluff/pull/6337) [@mchen-codaio](https://github.com/mchen-codaio) * enable default values when creating databricks tables [#6362](https://github.com/sqlfluff/sqlfluff/pull/6362) [@VictorAtIfInsurance](https://github.com/VictorAtIfInsurance) * Postgres : Add `ALTER AGGREGATE` Statement [#6353](https://github.com/sqlfluff/sqlfluff/pull/6353) [@R3gardless](https://github.com/R3gardless) * Revise AL09 (self aliasing) - stricter case sensitivity [#6333](https://github.com/sqlfluff/sqlfluff/pull/6333) [@alanmcruickshank](https://github.com/alanmcruickshank) * Make dbt `RelationEmulator` safely callable [#6358](https://github.com/sqlfluff/sqlfluff/pull/6358) [@mroy-seedbox](https://github.com/mroy-seedbox) ## New Contributors * [@VictorAtIfInsurance](https://github.com/VictorAtIfInsurance) made their first contribution in [#6362](https://github.com/sqlfluff/sqlfluff/pull/6362) * [@mchen-codaio](https://github.com/mchen-codaio) made their first contribution in [#6337](https://github.com/sqlfluff/sqlfluff/pull/6337) * [@PaulBurridge](https://github.com/PaulBurridge) made their first contribution in [#6377](https://github.com/sqlfluff/sqlfluff/pull/6377) * [@kursataktas](https://github.com/kursataktas) made their first contribution in [#6373](https://github.com/sqlfluff/sqlfluff/pull/6373) * [@simonhoerdumbonde](https://github.com/simonhoerdumbonde) made their first contribution in [#6390](https://github.com/sqlfluff/sqlfluff/pull/6390) ## [3.2.4] - 2024-10-14 ## Highlights This release is almost all dialect fixes and bugfixes. Notably also, this release brings official python 3.13 support too (although most users should not realise any differences). We also saw **two** new contributors to the project. Welcome [@R3gardless](https://github.com/R3gardless) & [@brandonschabell](https://github.com/brandonschabell)! 🎉🎉🎉 ## What’s Changed * Utilize a deepcopy of the config object when parsing files [#6344](https://github.com/sqlfluff/sqlfluff/pull/6344) [@brandonschabell](https://github.com/brandonschabell) * Snowflake supports other literals in system functions [#6355](https://github.com/sqlfluff/sqlfluff/pull/6355) [@alanmcruickshank](https://github.com/alanmcruickshank) * Snowflake: Un-reserve CURRENT_USER [#6354](https://github.com/sqlfluff/sqlfluff/pull/6354) [@alanmcruickshank](https://github.com/alanmcruickshank) * tsql: handle additional primary/foreign key options in constraints [#6347](https://github.com/sqlfluff/sqlfluff/pull/6347) [@keraion](https://github.com/keraion) * Add `DROP COLUMN` support for multiple dialects [#6348](https://github.com/sqlfluff/sqlfluff/pull/6348) [@keraion](https://github.com/keraion) * TSQL: allow `UPDATE` to be a function name [#6349](https://github.com/sqlfluff/sqlfluff/pull/6349) [@keraion](https://github.com/keraion) * tsql: allow both on delete and on update in a `reference_constraint` [#6346](https://github.com/sqlfluff/sqlfluff/pull/6346) [@keraion](https://github.com/keraion) * Postgres : Allow Extensions with Special Characters in Name [#6345](https://github.com/sqlfluff/sqlfluff/pull/6345) [@R3gardless](https://github.com/R3gardless) * Fix `tox` command in test/fixtures/dialects/README.md [#6342](https://github.com/sqlfluff/sqlfluff/pull/6342) [@R3gardless](https://github.com/R3gardless) * Revise dbt warnings when a file fails to compile [#6338](https://github.com/sqlfluff/sqlfluff/pull/6338) [@alanmcruickshank](https://github.com/alanmcruickshank) * Postgres: Add `CREATE FOREIGN DATA WRAPPER` statement [#6335](https://github.com/sqlfluff/sqlfluff/pull/6335) [@keraion](https://github.com/keraion) * Trino: Add some support to `json_query` functions [#6336](https://github.com/sqlfluff/sqlfluff/pull/6336) [@keraion](https://github.com/keraion) * Handle deprecation warning of "fork" [#6332](https://github.com/sqlfluff/sqlfluff/pull/6332) [@alanmcruickshank](https://github.com/alanmcruickshank) * Python 3.13 support and make it default for test coverage [#6269](https://github.com/sqlfluff/sqlfluff/pull/6269) [@alanmcruickshank](https://github.com/alanmcruickshank) * ST06 - Fix union of CTE/Subquery [#6298](https://github.com/sqlfluff/sqlfluff/pull/6298) [@rogalski](https://github.com/rogalski) * Refactor timestamp grammar [#6331](https://github.com/sqlfluff/sqlfluff/pull/6331) [@greg-finley](https://github.com/greg-finley) ## New Contributors * [@R3gardless](https://github.com/R3gardless) made their first contribution in [#6342](https://github.com/sqlfluff/sqlfluff/pull/6342) * [@brandonschabell](https://github.com/brandonschabell) made their first contribution in [#6344](https://github.com/sqlfluff/sqlfluff/pull/6344) ## [3.2.3] - 2024-10-10 ## Highlights This is another release of dialect improvements and rule bugfixes. Notably: * More robust algorithms for the indentation of Jinja template tags in `LT02`. * The `github-annotation-native` format option now has _groups_ for each filename. There's also a refactor of where we guides and howtos in the docs. Keep an eye on that section going forward for more information about best practice and troubleshooting for SQLFluff. Even in this small PR, we've seen **two** new contributors. Welcome [@nspcc-cm](https://github.com/nspcc-cm) & [@rogalski](https://github.com/rogalski) to the project! ## What’s Changed * BigQuery: Support Tuple syntax in other locations [#6328](https://github.com/sqlfluff/sqlfluff/pull/6328) [@keraion](https://github.com/keraion) * Trino: Fix rule interactions with lambda functions [#6327](https://github.com/sqlfluff/sqlfluff/pull/6327) [@keraion](https://github.com/keraion) * Resolve some more edge cases in LT02 [#6324](https://github.com/sqlfluff/sqlfluff/pull/6324) [@alanmcruickshank](https://github.com/alanmcruickshank) * RF05 - fine tuning for snowflake dialect [#6297](https://github.com/sqlfluff/sqlfluff/pull/6297) [@rogalski](https://github.com/rogalski) * Indentation: `UPDATE` and `RETURNING` clauses [#6314](https://github.com/sqlfluff/sqlfluff/pull/6314) [@keraion](https://github.com/keraion) * Postgres: Fix lexing some JSON operators [#6323](https://github.com/sqlfluff/sqlfluff/pull/6323) [@keraion](https://github.com/keraion) * Add support for `grant monitor on user ...` in Snowflake dialect [#6322](https://github.com/sqlfluff/sqlfluff/pull/6322) [@mroy-seedbox](https://github.com/mroy-seedbox) * Exclude templated casts from CV11 [#6320](https://github.com/sqlfluff/sqlfluff/pull/6320) [@alanmcruickshank](https://github.com/alanmcruickshank) * Snowflake allow double-quoted comments [#6318](https://github.com/sqlfluff/sqlfluff/pull/6318) [@greg-finley](https://github.com/greg-finley) * Databricks materialized view [#6319](https://github.com/sqlfluff/sqlfluff/pull/6319) [@greg-finley](https://github.com/greg-finley) * Allow double quotes to be escaped by writing twice [#6316](https://github.com/sqlfluff/sqlfluff/pull/6316) [@alanmcruickshank](https://github.com/alanmcruickshank) * Resolve an oscillation bug with LT02 [#6306](https://github.com/sqlfluff/sqlfluff/pull/6306) [@alanmcruickshank](https://github.com/alanmcruickshank) * Create a "Guides and Howtos" section of the docs. [#6301](https://github.com/sqlfluff/sqlfluff/pull/6301) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add groups to the `github-annotation-native` format option. [#6312](https://github.com/sqlfluff/sqlfluff/pull/6312) [@alanmcruickshank](https://github.com/alanmcruickshank) * Snowflake: Support CTEs and multiple orders of CopyOptions in COPY INTO [#6313](https://github.com/sqlfluff/sqlfluff/pull/6313) [@keraion](https://github.com/keraion) * Allow expressions in ORDER BY for clickhouse [#6311](https://github.com/sqlfluff/sqlfluff/pull/6311) [@alanmcruickshank](https://github.com/alanmcruickshank) * Snowflake: support temp UDFs [#6309](https://github.com/sqlfluff/sqlfluff/pull/6309) [@rogalski](https://github.com/rogalski) * fix: tsql create function syntax corrections [#6289](https://github.com/sqlfluff/sqlfluff/pull/6289) [@nspcc-cm](https://github.com/nspcc-cm) ## New Contributors * [@nspcc-cm](https://github.com/nspcc-cm) made their first contribution in [#6289](https://github.com/sqlfluff/sqlfluff/pull/6289) * [@rogalski](https://github.com/rogalski) made their first contribution in [#6309](https://github.com/sqlfluff/sqlfluff/pull/6309) ## [3.2.2] - 2024-10-07 ## Highlights This is a hotfix release to resolve an issue with the JJ01 rule when running in parallel mode. ## What’s Changed * Hotfix for JJ01 [#6304](https://github.com/sqlfluff/sqlfluff/pull/6304) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add note on 3.0.x to main docs page. [#6302](https://github.com/sqlfluff/sqlfluff/pull/6302) [@alanmcruickshank](https://github.com/alanmcruickshank) ## [3.2.1] - 2024-10-06 ## Highlights This release is primarily housekeeping, bugfixes and dialect improvements. More specifically: * Resolving regressions regressions in `JJ01`, filename extension handling and the treatment of unfixable/unparsable files, which have been noticed with recent releases. * Resolving bugs in `LT07` & `LT12` which relate to jinja whitespace control. * More robust support for arbitrary methods on the `ref` and `source` macros for the dbt templater. There's also dialect improvements for BigQuery, TSQL, MySQL, MariaDB, Snowflake, DuckDB, Databricks, Postgres, Teradata, Exasol & Vertica. We also saw **six** new contributors merge their first pull request as part of this release. Welcome to the project! 🎉🏆🎉 ## What’s Changed * Postgres: Support walrus operator named arguments [#6299](https://github.com/sqlfluff/sqlfluff/pull/6299) [@keraion](https://github.com/keraion) * TSQL: handle nested joins, RF01 better aliasing [#6300](https://github.com/sqlfluff/sqlfluff/pull/6300) [@keraion](https://github.com/keraion) * Exclude Macros - Allow multiple paths. [#6221](https://github.com/sqlfluff/sqlfluff/pull/6221) [@culpgrant](https://github.com/culpgrant) * Dededuplicate rule ignore docs [#6296](https://github.com/sqlfluff/sqlfluff/pull/6296) [@alanmcruickshank](https://github.com/alanmcruickshank) * Bugfix for LT07 with consumed newlines. [#6294](https://github.com/sqlfluff/sqlfluff/pull/6294) [@alanmcruickshank](https://github.com/alanmcruickshank) * Bugfix for LT12 with jinja whitespace consumption [#6292](https://github.com/sqlfluff/sqlfluff/pull/6292) [@alanmcruickshank](https://github.com/alanmcruickshank) * RF02: Ignore `DECLARE` variables in BigQuery [#6295](https://github.com/sqlfluff/sqlfluff/pull/6295) [@keraion](https://github.com/keraion) * Bugfix for JJ01 in parallel mode [#6293](https://github.com/sqlfluff/sqlfluff/pull/6293) [@alanmcruickshank](https://github.com/alanmcruickshank) * Allow arbitrary attributes & methods for `ThisEmulator` [#6254](https://github.com/sqlfluff/sqlfluff/pull/6254) [@mroy-seedbox](https://github.com/mroy-seedbox) * RF05: Add `table_aliases` option [#6273](https://github.com/sqlfluff/sqlfluff/pull/6273) [@keraion](https://github.com/keraion) * BigQuery: Add support for concatenating in `EXECUTE IMMEDIATE` [#6287](https://github.com/sqlfluff/sqlfluff/pull/6287) [@keraion](https://github.com/keraion) * BigQuery: Add support for `SET` with system variables [#6288](https://github.com/sqlfluff/sqlfluff/pull/6288) [@keraion](https://github.com/keraion) * Plugins: Migrate example plugin to `pyproject.toml` [#6286](https://github.com/sqlfluff/sqlfluff/pull/6286) [@keraion](https://github.com/keraion) * TSQL: Add DATETRUC to date_part_function_name list [#6283](https://github.com/sqlfluff/sqlfluff/pull/6283) [@paysni](https://github.com/paysni) * MySQL Alter table convert to character set [#6277](https://github.com/sqlfluff/sqlfluff/pull/6277) [@greg-finley](https://github.com/greg-finley) * Remove dependency on coveralls. [#6284](https://github.com/sqlfluff/sqlfluff/pull/6284) [@alanmcruickshank](https://github.com/alanmcruickshank) * Test dbt Templater Plugin with dbt 1.9.0 [#6280](https://github.com/sqlfluff/sqlfluff/pull/6280) [@edgarrmondragon](https://github.com/edgarrmondragon) * AM06: Ignore array expressions in BigQuery [#6276](https://github.com/sqlfluff/sqlfluff/pull/6276) [@keraion](https://github.com/keraion) * Add mariadb to issue labeler [#6278](https://github.com/sqlfluff/sqlfluff/pull/6278) [@greg-finley](https://github.com/greg-finley) * BigQuery: Add `GROUPING SETS` clause [#6275](https://github.com/sqlfluff/sqlfluff/pull/6275) [@keraion](https://github.com/keraion) * Snowflake: Support `ARRAY` types [#6272](https://github.com/sqlfluff/sqlfluff/pull/6272) [@keraion](https://github.com/keraion) * Move most of the config validation settings out into rule bundles. [#6262](https://github.com/sqlfluff/sqlfluff/pull/6262) [@alanmcruickshank](https://github.com/alanmcruickshank) * Revise warnings with fixing unfixable files. [#6257](https://github.com/sqlfluff/sqlfluff/pull/6257) [@alanmcruickshank](https://github.com/alanmcruickshank) * Strict mypy on `sqlfluff.core` [#6246](https://github.com/sqlfluff/sqlfluff/pull/6246) [@alanmcruickshank](https://github.com/alanmcruickshank) * DuckDB: Add `DROP MACRO` [#6270](https://github.com/sqlfluff/sqlfluff/pull/6270) [@keraion](https://github.com/keraion) * Added Support for Databricks SQL Notebook Cells [#6267](https://github.com/sqlfluff/sqlfluff/pull/6267) [@gabepesco](https://github.com/gabepesco) * dbt templater `pyproject.toml` nits [#6268](https://github.com/sqlfluff/sqlfluff/pull/6268) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add UTF8 support for identifiers in Vertica dialect [#6183](https://github.com/sqlfluff/sqlfluff/pull/6183) [@troshnev](https://github.com/troshnev) * Almost all of `util` up to strict typing [#6263](https://github.com/sqlfluff/sqlfluff/pull/6263) [@alanmcruickshank](https://github.com/alanmcruickshank) * Update link to diff-cover docs [#6256](https://github.com/sqlfluff/sqlfluff/pull/6256) [@alanmcruickshank](https://github.com/alanmcruickshank) * MySQL: AL09 handle double quoted identifiers [#6249](https://github.com/sqlfluff/sqlfluff/pull/6249) [@keraion](https://github.com/keraion) * fix: complex file extensions discovery [#6228](https://github.com/sqlfluff/sqlfluff/pull/6228) [@Clepech](https://github.com/Clepech) * fix RF06 issue in postgres naked identifier regex [#6247](https://github.com/sqlfluff/sqlfluff/pull/6247) [@fvankrieken](https://github.com/fvankrieken) * Strict typing for `sqlfluff.core.linter` [#6240](https://github.com/sqlfluff/sqlfluff/pull/6240) [@alanmcruickshank](https://github.com/alanmcruickshank) * DuckDB: Fixed `DatatypeSegment` references [#6244](https://github.com/sqlfluff/sqlfluff/pull/6244) [@keraion](https://github.com/keraion) * Postgres: Support `SET` with double quoted identifiers [#6243](https://github.com/sqlfluff/sqlfluff/pull/6243) [@keraion](https://github.com/keraion) * Consolidate Teradata tests [#6241](https://github.com/sqlfluff/sqlfluff/pull/6241) [@greg-finley](https://github.com/greg-finley) * RF02: Ignore alias references that are self-inner [#6242](https://github.com/sqlfluff/sqlfluff/pull/6242) [@keraion](https://github.com/keraion) * Add additional CREATE TABLE support for Databricks [#6216](https://github.com/sqlfluff/sqlfluff/pull/6216) [@pahunter90](https://github.com/pahunter90) * Complete the support for PIVOT in Snowflake dialect [#6217](https://github.com/sqlfluff/sqlfluff/pull/6217) [@fpsebastiam](https://github.com/fpsebastiam) * Exasol - allow function calls in values clause [#6226](https://github.com/sqlfluff/sqlfluff/pull/6226) [@stephnan](https://github.com/stephnan) * Snowflake: Support defining virtual columns [#6237](https://github.com/sqlfluff/sqlfluff/pull/6237) [@babak-l1](https://github.com/babak-l1) * Teradata order of VOLATILE and MULTISET [#6233](https://github.com/sqlfluff/sqlfluff/pull/6233) [@greg-finley](https://github.com/greg-finley) * Remove duplicate timing columns from the timing records [#6229](https://github.com/sqlfluff/sqlfluff/pull/6229) [@Tenzer](https://github.com/Tenzer) * Fix time travel clauses in Snowflake dialect [#6230](https://github.com/sqlfluff/sqlfluff/pull/6230) [@fpsebastiam](https://github.com/fpsebastiam) ## New Contributors * [@fpsebastiam](https://github.com/fpsebastiam) made their first contribution in [#6230](https://github.com/sqlfluff/sqlfluff/pull/6230) * [@Tenzer](https://github.com/Tenzer) made their first contribution in [#6229](https://github.com/sqlfluff/sqlfluff/pull/6229) * [@Clepech](https://github.com/Clepech) made their first contribution in [#6228](https://github.com/sqlfluff/sqlfluff/pull/6228) * [@troshnev](https://github.com/troshnev) made their first contribution in [#6183](https://github.com/sqlfluff/sqlfluff/pull/6183) * [@gabepesco](https://github.com/gabepesco) made their first contribution in [#6267](https://github.com/sqlfluff/sqlfluff/pull/6267) * [@mroy-seedbox](https://github.com/mroy-seedbox) made their first contribution in [#6254](https://github.com/sqlfluff/sqlfluff/pull/6254) ## [3.2.0] - 2024-09-18 ## Highlights This release brings a few minor breaking changes, both for the core project and for the dbt templater. For the main project: * Resolving an issue with the spacing of functions (LT01), which involved a change to how functions are parsed. If your project relies on the specific parsing of functions, the bracketed arguments are now wrapped in a `function_contents` object. We recommend that you examine the new parsing structure using this new release in testing first. * `RF06` (`references.quoting`) is now case sensitive when removing quotes which are detected as unnecessary. This rule has also been re-enabled by default for Snowflake and Postgres where it had previously been disabled (for the reason that in the past it hadn't been appropriately case sensitive). Treatment for totally case-insensitive dialects like DuckDB and SparkSQL have also been included. Please check the new documentation for this rule (which is much more explicit now), for details related to your dialect. * Patterns equivalent to those from `.sqlfluffignore` can now be included in `.sqlfluff` and `pyproject.toml` files. * Using the `python` templater, users now have an option to include variables which include a dot in the path, like `{{ foo.bar }}` using a special `sqlfluff` context variable. * Significant changes under the hood to the handling of configuration files. Most of these should not be visible to end users, but for anyone integrating SQLFluff into a larger project and relying on native file loading may need to refactor their project for this release. Most notably here, for maintainers of plugins, the `ConfigLoader` class has been deprecated, and plugins should instead call the config loading functions directly. See the example plugin for details. * Documentation, especially for dialects, has been significantly improved. Documentation for `CP02` (`capitalisation.identifiers`) has also been clarified to make it's implication for references and aliases more clear. * During testing, to isolate the effect of specific rules, there's a new CLI option `--disable-noqa-except` which allows all `noqa` options to be ignored _except_ the ones provided in this option. For the dbt templater: * Support for dbt 1.1-1.3 has been removed. All have been in End of Life (EOL) support by dbtlabs for almost two years. They are also poorly supported by other projects and tools. * The dbt templater has been migrated to use `pyproject.toml`. * Handling of errors and exceptions raised within dbt has had an overhaul. Users may see a slightly different presentation of errors, but the overall stability should be more robust. In addition to those changes, there have been too many dialect contributions and bugfixes to mention specifically. We've also seen **six** people make their first contributions to the project as part of preparing for this release! 🎉🏆🎉. ## What’s Changed * Handle multi-processing dbt exceptions much better. [#6138](https://github.com/sqlfluff/sqlfluff/pull/6138) [@alanmcruickshank](https://github.com/alanmcruickshank) * Support variables with dot in python templater [#5872](https://github.com/sqlfluff/sqlfluff/pull/5872) [@timchurch](https://github.com/timchurch) * Add postgres normalization operator support [#6211](https://github.com/sqlfluff/sqlfluff/pull/6211) [@fnimick](https://github.com/fnimick) * Fix patch will anchor on first buffer insertion point [#6212](https://github.com/sqlfluff/sqlfluff/pull/6212) [@keraion](https://github.com/keraion) * Allow ignore patterns in other config files. [#6130](https://github.com/sqlfluff/sqlfluff/pull/6130) [@alanmcruickshank](https://github.com/alanmcruickshank) * Strict typing in `config` and `helpers`. [#6206](https://github.com/sqlfluff/sqlfluff/pull/6206) [@alanmcruickshank](https://github.com/alanmcruickshank) * TSQL: Support multiple options in `SET` statement [#6205](https://github.com/sqlfluff/sqlfluff/pull/6205) [@keraion](https://github.com/keraion) * DuckDB: Support `CREATE TYPE` statement [#6204](https://github.com/sqlfluff/sqlfluff/pull/6204) [@keraion](https://github.com/keraion) * Update Slack link [#6203](https://github.com/sqlfluff/sqlfluff/pull/6203) [@greg-finley](https://github.com/greg-finley) * Add quoted literal checking for Snowflake TARGET_LAG in dynamic tables. [#6201](https://github.com/sqlfluff/sqlfluff/pull/6201) [@mvastarelli](https://github.com/mvastarelli) * Databricks: Support `COMMENT ON` statement [#6196](https://github.com/sqlfluff/sqlfluff/pull/6196) [@keraion](https://github.com/keraion) * DuckDB: Support `STRUCT` datatype [#6198](https://github.com/sqlfluff/sqlfluff/pull/6198) [@keraion](https://github.com/keraion) * Deprecate the `ConfigLoader` [#6177](https://github.com/sqlfluff/sqlfluff/pull/6177) [@alanmcruickshank](https://github.com/alanmcruickshank) * DuckDB: Support `CREATE MACRO`/`CREATE FUNCTION` [#6194](https://github.com/sqlfluff/sqlfluff/pull/6194) [@keraion](https://github.com/keraion) * DuckDB: Support functions with walrus operators [#6193](https://github.com/sqlfluff/sqlfluff/pull/6193) [@keraion](https://github.com/keraion) * Add volume syntax support for Databricks [#6179](https://github.com/sqlfluff/sqlfluff/pull/6179) [@TheCleric](https://github.com/TheCleric) * Handle errors better in AL09 [#6186](https://github.com/sqlfluff/sqlfluff/pull/6186) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add support for managed locations to databricks dialect schemas [#6182](https://github.com/sqlfluff/sqlfluff/pull/6182) [@TheCleric](https://github.com/TheCleric) * MYSQL: Create Table Optional AS [#6109](https://github.com/sqlfluff/sqlfluff/pull/6109) [@WittierDinosaur](https://github.com/WittierDinosaur) * More dialect documentation [#6165](https://github.com/sqlfluff/sqlfluff/pull/6165) [@alanmcruickshank](https://github.com/alanmcruickshank) * Better documentation on how to cross reference rules and fix a few. [#6162](https://github.com/sqlfluff/sqlfluff/pull/6162) [@alanmcruickshank](https://github.com/alanmcruickshank) * RF06: Case Sensitivity [#6173](https://github.com/sqlfluff/sqlfluff/pull/6173) [@alanmcruickshank](https://github.com/alanmcruickshank) * SparkSQL/Databricks: Support for `VARIANT` type [#6167](https://github.com/sqlfluff/sqlfluff/pull/6167) [@keraion](https://github.com/keraion) * sparksql: Allow `INSERT OVERWRITE` after a CTE [#6172](https://github.com/sqlfluff/sqlfluff/pull/6172) [@keraion](https://github.com/keraion) * postgres: Add `SET CONSTRAINTS` statement [#6171](https://github.com/sqlfluff/sqlfluff/pull/6171) [@keraion](https://github.com/keraion) * TSQL: Fix `MERGE` without a target alias [#6170](https://github.com/sqlfluff/sqlfluff/pull/6170) [@keraion](https://github.com/keraion) * TSQL: add `OFFSET` and `FETCH` [#6169](https://github.com/sqlfluff/sqlfluff/pull/6169) [@keraion](https://github.com/keraion) * postgres: Add support for `SUBSCRIPTION` statements [#6168](https://github.com/sqlfluff/sqlfluff/pull/6168) [@keraion](https://github.com/keraion) * Duckdb: Add support for list comprehensions [#6166](https://github.com/sqlfluff/sqlfluff/pull/6166) [@keraion](https://github.com/keraion) * Update Docs and tests for CP02 [#6163](https://github.com/sqlfluff/sqlfluff/pull/6163) [@alanmcruickshank](https://github.com/alanmcruickshank) * Cached property in RF06 rather than DIY [#6164](https://github.com/sqlfluff/sqlfluff/pull/6164) [@alanmcruickshank](https://github.com/alanmcruickshank) * CI: Update `util.py` for dbt templater `pyproject.toml` [#6160](https://github.com/sqlfluff/sqlfluff/pull/6160) [@keraion](https://github.com/keraion) * Auto generate dialect docs [#6153](https://github.com/sqlfluff/sqlfluff/pull/6153) [@alanmcruickshank](https://github.com/alanmcruickshank) * Deprecate support for dbt 1.1-1.3 [#6159](https://github.com/sqlfluff/sqlfluff/pull/6159) [@WittierDinosaur](https://github.com/WittierDinosaur) * ST08: Ignore `DISTINCT`s with subqueries [#6146](https://github.com/sqlfluff/sqlfluff/pull/6146) [@keraion](https://github.com/keraion) * Duckdb: Fix Create View coverage [#6158](https://github.com/sqlfluff/sqlfluff/pull/6158) [@WittierDinosaur](https://github.com/WittierDinosaur) * Snowflake: Support Password Policies [#6154](https://github.com/sqlfluff/sqlfluff/pull/6154) [@WittierDinosaur](https://github.com/WittierDinosaur) * Postgres: Allow negative integers in sequences [#6111](https://github.com/sqlfluff/sqlfluff/pull/6111) [@WittierDinosaur](https://github.com/WittierDinosaur) * Postgres: Add SHOW Statement [#6110](https://github.com/sqlfluff/sqlfluff/pull/6110) [@WittierDinosaur](https://github.com/WittierDinosaur) * Migrate dbt templater to pyproject.toml [#6155](https://github.com/sqlfluff/sqlfluff/pull/6155) [@keraion](https://github.com/keraion) * Snowflake: Add DEFAULT option for function parameters [#6145](https://github.com/sqlfluff/sqlfluff/pull/6145) [@keraion](https://github.com/keraion) * Snowflake: fixes parsing for INCLUDE_METADATA in COPY INTO statement [#6150](https://github.com/sqlfluff/sqlfluff/pull/6150) [@jcrobak](https://github.com/jcrobak) * [SNOWFLAKE] Adding support for extended constraint definitions [#6151](https://github.com/sqlfluff/sqlfluff/pull/6151) [@babak-l1](https://github.com/babak-l1) * Snowflake: fixes parsing for PARSE_HEADER in FILE FORMAT statement [#6149](https://github.com/sqlfluff/sqlfluff/pull/6149) [@jcrobak](https://github.com/jcrobak) * fix: avoid strip_newlines when encounter comments in inline segments [#6140](https://github.com/sqlfluff/sqlfluff/pull/6140) [@Cynthia-Cheng](https://github.com/Cynthia-Cheng) * More robust exception handling for dbt. [#6144](https://github.com/sqlfluff/sqlfluff/pull/6144) [@alanmcruickshank](https://github.com/alanmcruickshank) * postgres: Add `ENCRYPTED PASSWORD` option in `CREATE USER` [#6143](https://github.com/sqlfluff/sqlfluff/pull/6143) [@keraion](https://github.com/keraion) * Fix support of INTERVAL in ClickHouse [#6112](https://github.com/sqlfluff/sqlfluff/pull/6112) [@Pavel-Strybuk](https://github.com/Pavel-Strybuk) * Add support for Snowflake Higher-Order Functions [#6136](https://github.com/sqlfluff/sqlfluff/pull/6136) [@amardatar](https://github.com/amardatar) * Method extraction and more robust typing in config. [#6135](https://github.com/sqlfluff/sqlfluff/pull/6135) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add missing databricks and sparksql ALTER statements [#6102](https://github.com/sqlfluff/sqlfluff/pull/6102) [@pahunter90](https://github.com/pahunter90) * fix: program_counter move in JinjaTracer (#6121) [#6123](https://github.com/sqlfluff/sqlfluff/pull/6123) [@Cynthia-Cheng](https://github.com/Cynthia-Cheng) * CI: allow hidden file upload for coverage files [#6139](https://github.com/sqlfluff/sqlfluff/pull/6139) [@keraion](https://github.com/keraion) * Fix: added DOWNSTREAM keyword for TARGET_LAG on dynamic tables in Snowflake. [#6131](https://github.com/sqlfluff/sqlfluff/pull/6131) [@mvastarelli](https://github.com/mvastarelli) * Trino Dialect: update ARRAY type handling [#6127](https://github.com/sqlfluff/sqlfluff/pull/6127) [@kirkhansen](https://github.com/kirkhansen) * Split apart config module [#6128](https://github.com/sqlfluff/sqlfluff/pull/6128) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add option for allowing only a subset of rules for noqa [#6115](https://github.com/sqlfluff/sqlfluff/pull/6115) [@keraion](https://github.com/keraion) * TSQL: Allow for empty catch block in try-catch [#6116](https://github.com/sqlfluff/sqlfluff/pull/6116) [@keraion](https://github.com/keraion) * Change evaluation order of literals before column refs in bracketed, delimited expressions [#6117](https://github.com/sqlfluff/sqlfluff/pull/6117) [@keraion](https://github.com/keraion) * Fix spacing rules for functions [#5809](https://github.com/sqlfluff/sqlfluff/pull/5809) [@WittierDinosaur](https://github.com/WittierDinosaur) * SQLite: Add conflict_clause to unique table constraint [#6106](https://github.com/sqlfluff/sqlfluff/pull/6106) [@WittierDinosaur](https://github.com/WittierDinosaur) * SQLite: Support Raise Function [#6108](https://github.com/sqlfluff/sqlfluff/pull/6108) [@WittierDinosaur](https://github.com/WittierDinosaur) * SQLite: Create Trigger WHEN optionally bracketed [#6107](https://github.com/sqlfluff/sqlfluff/pull/6107) [@WittierDinosaur](https://github.com/WittierDinosaur) * Snowflake: Added `INTERVAL`s to Frame Clause [#6105](https://github.com/sqlfluff/sqlfluff/pull/6105) [@keraion](https://github.com/keraion) * Postgres: Add `IS UNKNOWN` [#6094](https://github.com/sqlfluff/sqlfluff/pull/6094) [@keraion](https://github.com/keraion) * RF02: Handle subquery column qualification [#6091](https://github.com/sqlfluff/sqlfluff/pull/6091) [@keraion](https://github.com/keraion) * tsql: Allow leading dots in table references [#6093](https://github.com/sqlfluff/sqlfluff/pull/6093) [@keraion](https://github.com/keraion) ## New Contributors * [@mvastarelli](https://github.com/mvastarelli) made their first contribution in [#6131](https://github.com/sqlfluff/sqlfluff/pull/6131) * [@Cynthia-Cheng](https://github.com/Cynthia-Cheng) made their first contribution in [#6123](https://github.com/sqlfluff/sqlfluff/pull/6123) * [@pahunter90](https://github.com/pahunter90) made their first contribution in [#6102](https://github.com/sqlfluff/sqlfluff/pull/6102) * [@amardatar](https://github.com/amardatar) made their first contribution in [#6136](https://github.com/sqlfluff/sqlfluff/pull/6136) * [@jcrobak](https://github.com/jcrobak) made their first contribution in [#6149](https://github.com/sqlfluff/sqlfluff/pull/6149) * [@babak-l1](https://github.com/babak-l1) made their first contribution in [#6151](https://github.com/sqlfluff/sqlfluff/pull/6151) ## [3.1.1] - 2024-08-20 ## Highlights This release brings a bumper lot of bugfixes, dialect improvements and other minor improvements across the board. Most notably: * A rework of the structure of the docs. **NOTE**: This does change the url of some docs pages, but to prevent future moves, we've also provided permalinks to most important pages and rules. See the `conf.py` file in the `docs` folder for a full list of permalinks. * Solving rule conflicts between LT02 & LT02. * Bugfixes to AM07, CV11, ST03, ST05 & RF03, * Removes some redundant dependencies in for the dbt templater (which haven't been required for some time, but have been included in the install dependencies). Specifically: `markupsafe`, `ruamel.yaml`, `pydantic` & `rich`. * And too many dialect improvements to summarise! We've also seen **eleven** new contributors to the project! Thanks to all of them for taking the time to contribute. 🎉🎉🏆🎉🎉 ## What’s Changed * dbt Templater: Increase `dbt deps` test fixture timeout [#6088](https://github.com/sqlfluff/sqlfluff/pull/6088) [@keraion](https://github.com/keraion) * SparkSQL + Databricks: Add support for raw string literals [#6089](https://github.com/sqlfluff/sqlfluff/pull/6089) [@D-to-the-K](https://github.com/D-to-the-K) * fixes #4855 - Add DECLARE statement in snowflake dialect [#6059](https://github.com/sqlfluff/sqlfluff/pull/6059) [@YungChunLu](https://github.com/YungChunLu) * Adding CTE to mysql views [#6077](https://github.com/sqlfluff/sqlfluff/pull/6077) [@gone](https://github.com/gone) * Rationalise config discovery routines. [#6080](https://github.com/sqlfluff/sqlfluff/pull/6080) [@alanmcruickshank](https://github.com/alanmcruickshank) * fix(dialect-trino): Trino ROW datatype definition in queries [#6085](https://github.com/sqlfluff/sqlfluff/pull/6085) [@bonisb](https://github.com/bonisb) * Databricks: Add support for GROUP BY ALL [#6082](https://github.com/sqlfluff/sqlfluff/pull/6082) [@D-to-the-K](https://github.com/D-to-the-K) * fix(clickhouse): add support for tuple() and ENGINE MergeTree [#6079](https://github.com/sqlfluff/sqlfluff/pull/6079) [@ogirardot](https://github.com/ogirardot) * Add perma-links for rules [#6066](https://github.com/sqlfluff/sqlfluff/pull/6066) [@alanmcruickshank](https://github.com/alanmcruickshank) * fix(clickhouse): add support for rename statement [#6073](https://github.com/sqlfluff/sqlfluff/pull/6073) [@ogirardot](https://github.com/ogirardot) * fix(clickhouse): add support for INTO OUTFILE and supported FORMATs [#6065](https://github.com/sqlfluff/sqlfluff/pull/6065) [@ogirardot](https://github.com/ogirardot) * LT04: Fix indentation conflict with LT02 [#6068](https://github.com/sqlfluff/sqlfluff/pull/6068) [@keraion](https://github.com/keraion) * pre-commit: Disable progress bar [#6069](https://github.com/sqlfluff/sqlfluff/pull/6069) [@keraion](https://github.com/keraion) * feat(clickhouse): add support for decimal(x,y), decimal32(x) and match [#6063](https://github.com/sqlfluff/sqlfluff/pull/6063) [@ogirardot](https://github.com/ogirardot) * Big docs refactor. [#6052](https://github.com/sqlfluff/sqlfluff/pull/6052) [@alanmcruickshank](https://github.com/alanmcruickshank) * ST05: Handle set statement's subsequent queries [#6062](https://github.com/sqlfluff/sqlfluff/pull/6062) [@keraion](https://github.com/keraion) * fix(clickhouse): add support for limit by and bracketed format [#6061](https://github.com/sqlfluff/sqlfluff/pull/6061) [@ogirardot](https://github.com/ogirardot) * fix(clickhouse): add support for DateTime64(precision, tz) and Tuples() [#6060](https://github.com/sqlfluff/sqlfluff/pull/6060) [@ogirardot](https://github.com/ogirardot) * Copy statement postgres v9 compatibility support [#5181](https://github.com/sqlfluff/sqlfluff/pull/5181) [@Fullcure3](https://github.com/Fullcure3) * Run dbt tests in py312 by default [#5861](https://github.com/sqlfluff/sqlfluff/pull/5861) [@alanmcruickshank](https://github.com/alanmcruickshank) * Extract path discovery routines from config and linter. [#6057](https://github.com/sqlfluff/sqlfluff/pull/6057) [@alanmcruickshank](https://github.com/alanmcruickshank) * MySQL: Added SET TRANSACTION parsing [#5781](https://github.com/sqlfluff/sqlfluff/pull/5781) [@Xemptuous](https://github.com/Xemptuous) * Support declare or replace variable statement for Databricks dialect [#6054](https://github.com/sqlfluff/sqlfluff/pull/6054) [@urosstan-db](https://github.com/urosstan-db) * Exclude Macros from a path provided [#6031](https://github.com/sqlfluff/sqlfluff/pull/6031) [@culpgrant](https://github.com/culpgrant) * ST03: Detect CTE usage in nested `WITH` clauses [#6017](https://github.com/sqlfluff/sqlfluff/pull/6017) [@keraion](https://github.com/keraion) * TRINO: support FILTER after WITHIN GROUP agg expression [#6056](https://github.com/sqlfluff/sqlfluff/pull/6056) [@rileymcdowell](https://github.com/rileymcdowell) * Fix/snowflake double quotes [#5727](https://github.com/sqlfluff/sqlfluff/pull/5727) [@Starstruckk](https://github.com/Starstruckk) * bq_table_function : Added functionality to parse table functionsin BigQuery [#5480](https://github.com/sqlfluff/sqlfluff/pull/5480) [@moh-lch](https://github.com/moh-lch) * Fix Athena Partitioned By format for iceberg tables [#5399](https://github.com/sqlfluff/sqlfluff/pull/5399) [@jverhoeks](https://github.com/jverhoeks) * fix: redshift dialect, EXTENSION added [#6025](https://github.com/sqlfluff/sqlfluff/pull/6025) [@rafalbog](https://github.com/rafalbog) * Fix ignored inline rule overrides (#5697) [#6010](https://github.com/sqlfluff/sqlfluff/pull/6010) [@alesbukovsky](https://github.com/alesbukovsky) * Update the docs on RF03 [#6051](https://github.com/sqlfluff/sqlfluff/pull/6051) [@alanmcruickshank](https://github.com/alanmcruickshank) * RF03: Fixed some subquery reference scenarios [#6046](https://github.com/sqlfluff/sqlfluff/pull/6046) [@keraion](https://github.com/keraion) * CV11: Remove rogue print statement [#6047](https://github.com/sqlfluff/sqlfluff/pull/6047) [@keraion](https://github.com/keraion) * Snowflake: fixes parsing for AGGREGATE in CREATE FUNCTION statement [#6049](https://github.com/sqlfluff/sqlfluff/pull/6049) [@hawle](https://github.com/hawle) * Snowflake:adds optional IF NOT EXISTS to ADD COLUMN [#6050](https://github.com/sqlfluff/sqlfluff/pull/6050) [@hawle](https://github.com/hawle) * Replace types-pkg-resources with types-setuptools [#6039](https://github.com/sqlfluff/sqlfluff/pull/6039) [@keraion](https://github.com/keraion) * Remove old deps for dbt templater [#6028](https://github.com/sqlfluff/sqlfluff/pull/6028) [@alanmcruickshank](https://github.com/alanmcruickshank) * Added GENERATED AS IDENTITY support to DataBricks dialect [#6004](https://github.com/sqlfluff/sqlfluff/pull/6004) [@nicolb2305](https://github.com/nicolb2305) * Add support for Clickhouse ORDER BY WITH FILL [#6018](https://github.com/sqlfluff/sqlfluff/pull/6018) [@snikch](https://github.com/snikch) * Parse API example [#6021](https://github.com/sqlfluff/sqlfluff/pull/6021) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add additional dateparts from DATEPART() [#6012](https://github.com/sqlfluff/sqlfluff/pull/6012) [@yorickbouma](https://github.com/yorickbouma) * MariaDB: Add support for GROUP BY ... ASC/DESC and GROUP BY ... WITH ROLLUP [#6009](https://github.com/sqlfluff/sqlfluff/pull/6009) [@pprkut](https://github.com/pprkut) * AM07: Handle set expressions with bracketed selects [#6005](https://github.com/sqlfluff/sqlfluff/pull/6005) [@keraion](https://github.com/keraion) * MariaDB: Add support for DELETE/INSERT/REPLACE ... RETURNING [#6008](https://github.com/sqlfluff/sqlfluff/pull/6008) [@pprkut](https://github.com/pprkut) * MariaDB: Add mariadb specific syntax for generated columns [#6007](https://github.com/sqlfluff/sqlfluff/pull/6007) [@pprkut](https://github.com/pprkut) * Snowflake: fixes drop column if exists parsing rules [#5999](https://github.com/sqlfluff/sqlfluff/pull/5999) [@hawle](https://github.com/hawle) * Fix TSQL Post Table Expr intrepreted as function [#6001](https://github.com/sqlfluff/sqlfluff/pull/6001) [@ulixius9](https://github.com/ulixius9) ## New Contributors * [@yorickbouma](https://github.com/yorickbouma) made their first contribution in [#6012](https://github.com/sqlfluff/sqlfluff/pull/6012) * [@snikch](https://github.com/snikch) made their first contribution in [#6018](https://github.com/sqlfluff/sqlfluff/pull/6018) * [@nicolb2305](https://github.com/nicolb2305) made their first contribution in [#6004](https://github.com/sqlfluff/sqlfluff/pull/6004) * [@alesbukovsky](https://github.com/alesbukovsky) made their first contribution in [#6010](https://github.com/sqlfluff/sqlfluff/pull/6010) * [@rafalbog](https://github.com/rafalbog) made their first contribution in [#6025](https://github.com/sqlfluff/sqlfluff/pull/6025) * [@jverhoeks](https://github.com/jverhoeks) made their first contribution in [#5399](https://github.com/sqlfluff/sqlfluff/pull/5399) * [@moh-lch](https://github.com/moh-lch) made their first contribution in [#5480](https://github.com/sqlfluff/sqlfluff/pull/5480) * [@Starstruckk](https://github.com/Starstruckk) made their first contribution in [#5727](https://github.com/sqlfluff/sqlfluff/pull/5727) * [@culpgrant](https://github.com/culpgrant) made their first contribution in [#6031](https://github.com/sqlfluff/sqlfluff/pull/6031) * [@urosstan-db](https://github.com/urosstan-db) made their first contribution in [#6054](https://github.com/sqlfluff/sqlfluff/pull/6054) * [@ogirardot](https://github.com/ogirardot) made their first contribution in [#6060](https://github.com/sqlfluff/sqlfluff/pull/6060) * [@D-to-the-K](https://github.com/D-to-the-K) made their first contribution in [#6082](https://github.com/sqlfluff/sqlfluff/pull/6082) * [@bonisb](https://github.com/bonisb) made their first contribution in [#6085](https://github.com/sqlfluff/sqlfluff/pull/6085) * [@gone](https://github.com/gone) made their first contribution in [#6077](https://github.com/sqlfluff/sqlfluff/pull/6077) * [@YungChunLu](https://github.com/YungChunLu) made their first contribution in [#6059](https://github.com/sqlfluff/sqlfluff/pull/6059) ## [3.1.0] - 2024-07-03 ## Highlights This minor release has two breaking changes: - The addition of camelCase in the extended capitalisation policy. This change removes the ability to autodetect PascalCase, from now on PascalCase, and camelCase must be explicitly set in the config if desired. - The detection method for sqlfluff config has changed. It should now be more consistent, regardless of how deep if the directory structure you run the command from. This release also brings in support for the MariaDB dialect. As well as this, there are many bugfixes, and dialect improvements. Thanks also to the **twelve** new contributors whose work was included in this release! 🎉🎉🏆🎉🎉 ## What’s Changed * Snowflake: alter procedure & function updates [#5997](https://github.com/sqlfluff/sqlfluff/pull/5997) [@hawle](https://github.com/hawle) * Snowflake: fix connect by prior selects [#5996](https://github.com/sqlfluff/sqlfluff/pull/5996) [@hawle](https://github.com/hawle) * Snowflake: adds EVENT TABLE support [#5995](https://github.com/sqlfluff/sqlfluff/pull/5995) [@hawle](https://github.com/hawle) * Feature/MariaDB dialect [#5856](https://github.com/sqlfluff/sqlfluff/pull/5856) [@Xemptuous](https://github.com/Xemptuous) * Postgres: Fix multiline concat for special literals [#5965](https://github.com/sqlfluff/sqlfluff/pull/5965) [@keraion](https://github.com/keraion) * ST05: Evaluate nested queries as a whole [#5990](https://github.com/sqlfluff/sqlfluff/pull/5990) [@keraion](https://github.com/keraion) * Naïve multi-variant jinja linting [#5822](https://github.com/sqlfluff/sqlfluff/pull/5822) [@alanmcruickshank](https://github.com/alanmcruickshank) * Update Snowflake Unpivot Dialect to INCLUDE/EXCLUDE NULLs [#5961](https://github.com/sqlfluff/sqlfluff/pull/5961) [@danatmercury](https://github.com/danatmercury) * Snowflake: Adds parsing fixes for external access integrations in create procedure and function [#5986](https://github.com/sqlfluff/sqlfluff/pull/5986) [@hawle](https://github.com/hawle) * Select Analysis: Don't recursively crawl merge subselects [#5981](https://github.com/sqlfluff/sqlfluff/pull/5981) [@keraion](https://github.com/keraion) * Parent dir config search [#5958](https://github.com/sqlfluff/sqlfluff/pull/5958) [@j-svensmark](https://github.com/j-svensmark) * Enable AM02 for bigquery, clickhouse, databricks, db2 [#5979](https://github.com/sqlfluff/sqlfluff/pull/5979) [@WittierDinosaur](https://github.com/WittierDinosaur) * Add COMMENT ON support to Trino dialect [#5984](https://github.com/sqlfluff/sqlfluff/pull/5984) [@tunetheweb](https://github.com/tunetheweb) * Snowflake: fix scriptless stored procedure parsing [#5982](https://github.com/sqlfluff/sqlfluff/pull/5982) [@hawle](https://github.com/hawle) * Add support for custom JinjaTracer implementations [#5937](https://github.com/sqlfluff/sqlfluff/pull/5937) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * MySQL: Fix variable handlings inside expressions [#5967](https://github.com/sqlfluff/sqlfluff/pull/5967) [@kzosabe](https://github.com/kzosabe) * Allow anonymous PIVOTs in Databricks [#5968](https://github.com/sqlfluff/sqlfluff/pull/5968) [@TheCleric](https://github.com/TheCleric) * Rebreak: Fix meta dedent segment order [#5972](https://github.com/sqlfluff/sqlfluff/pull/5972) [@keraion](https://github.com/keraion) * Update athena dialect for CTAS [#5974](https://github.com/sqlfluff/sqlfluff/pull/5974) [@KulykDmytro](https://github.com/KulykDmytro) * fix(dialect-trino): Support Grouping Sets [#5970](https://github.com/sqlfluff/sqlfluff/pull/5970) [@eskabetxe](https://github.com/eskabetxe) * BigQuery: Support various DROP statements [#5966](https://github.com/sqlfluff/sqlfluff/pull/5966) [@kzosabe](https://github.com/kzosabe) * AL07: Fix self-referencing table aliases [#5963](https://github.com/sqlfluff/sqlfluff/pull/5963) [@keraion](https://github.com/keraion) * Clickhouse 'create view' support [#5910](https://github.com/sqlfluff/sqlfluff/pull/5910) [@DimaSamodurov](https://github.com/DimaSamodurov) * Capitalisation: Add camelCase [#5777](https://github.com/sqlfluff/sqlfluff/pull/5777) [@WittierDinosaur](https://github.com/WittierDinosaur) * Exasol: Use ANSI value_clause to handle insert_stmts correctly [#5959](https://github.com/sqlfluff/sqlfluff/pull/5959) [@stephnan](https://github.com/stephnan) * Support quoted psql parameters with `placeholder` templater [#5880](https://github.com/sqlfluff/sqlfluff/pull/5880) [@fvankrieken](https://github.com/fvankrieken) * Don't indent invisible template slices [#5938](https://github.com/sqlfluff/sqlfluff/pull/5938) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * 5944: Add support for databricks named parameters [#5946](https://github.com/sqlfluff/sqlfluff/pull/5946) [@TheCleric](https://github.com/TheCleric) * Add support for Databricks TRACK HISTORY [#5948](https://github.com/sqlfluff/sqlfluff/pull/5948) [@TheCleric](https://github.com/TheCleric) * BigQuery: Support various ALTER statements [#5943](https://github.com/sqlfluff/sqlfluff/pull/5943) [@kzosabe](https://github.com/kzosabe) * ClickHouse query-level SETTINGS support [#5941](https://github.com/sqlfluff/sqlfluff/pull/5941) [@pheepa](https://github.com/pheepa) * MySQL: Add support for generated columns [#5939](https://github.com/sqlfluff/sqlfluff/pull/5939) [@pprkut](https://github.com/pprkut) * Exasol: add REGEXP_LIKE [#5936](https://github.com/sqlfluff/sqlfluff/pull/5936) [@stephnan](https://github.com/stephnan) * SQLite: Over clause support for window functions [#5935](https://github.com/sqlfluff/sqlfluff/pull/5935) [@atishay](https://github.com/atishay) * T-SQL: Parameter assignment in SELECT vs alias [#5934](https://github.com/sqlfluff/sqlfluff/pull/5934) [@drjwelch](https://github.com/drjwelch) * SQLite: Add named parameters support [#5914](https://github.com/sqlfluff/sqlfluff/pull/5914) [@atishay](https://github.com/atishay) * SQLite: Support with key as a column name (as needed by json_each) [#5918](https://github.com/sqlfluff/sqlfluff/pull/5918) [@atishay](https://github.com/atishay) * Add loader_search_path setting to Jinja templater [#5930](https://github.com/sqlfluff/sqlfluff/pull/5930) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * SQLite: Add support for JSON functions. Fixes #5896 [#5917](https://github.com/sqlfluff/sqlfluff/pull/5917) [@atishay](https://github.com/atishay) * dbt Templater: Suppress dbt 1.8 log messages [#5907](https://github.com/sqlfluff/sqlfluff/pull/5907) [@keraion](https://github.com/keraion) * Clarify docs around subdir handling when loading macros [#5924](https://github.com/sqlfluff/sqlfluff/pull/5924) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * fix: add missing comma in Snowflake file types list [#5923](https://github.com/sqlfluff/sqlfluff/pull/5923) [@gvozdvmozgu](https://github.com/gvozdvmozgu) * SQLite: Specialize create view with support for temporary views. [#5919](https://github.com/sqlfluff/sqlfluff/pull/5919) [@atishay](https://github.com/atishay) * BigQuery: Fix array type parsing [#5912](https://github.com/sqlfluff/sqlfluff/pull/5912) [@kzosabe](https://github.com/kzosabe) * BigQuery: Support unimplemented alter table and view statements [#5911](https://github.com/sqlfluff/sqlfluff/pull/5911) [@kzosabe](https://github.com/kzosabe) ## New Contributors * [@atishay](https://github.com/atishay) made their first contribution in [#5919](https://github.com/sqlfluff/sqlfluff/pull/5919) * [@drjwelch](https://github.com/drjwelch) made their first contribution in [#5934](https://github.com/sqlfluff/sqlfluff/pull/5934) * [@stephnan](https://github.com/stephnan) made their first contribution in [#5936](https://github.com/sqlfluff/sqlfluff/pull/5936) * [@pprkut](https://github.com/pprkut) made their first contribution in [#5939](https://github.com/sqlfluff/sqlfluff/pull/5939) * [@pheepa](https://github.com/pheepa) made their first contribution in [#5941](https://github.com/sqlfluff/sqlfluff/pull/5941) * [@TheCleric](https://github.com/TheCleric) made their first contribution in [#5948](https://github.com/sqlfluff/sqlfluff/pull/5948) * [@fvankrieken](https://github.com/fvankrieken) made their first contribution in [#5880](https://github.com/sqlfluff/sqlfluff/pull/5880) * [@DimaSamodurov](https://github.com/DimaSamodurov) made their first contribution in [#5910](https://github.com/sqlfluff/sqlfluff/pull/5910) * [@eskabetxe](https://github.com/eskabetxe) made their first contribution in [#5970](https://github.com/sqlfluff/sqlfluff/pull/5970) * [@hawle](https://github.com/hawle) made their first contribution in [#5982](https://github.com/sqlfluff/sqlfluff/pull/5982) * [@danatmercury](https://github.com/danatmercury) made their first contribution in [#5961](https://github.com/sqlfluff/sqlfluff/pull/5961) * [@Xemptuous](https://github.com/Xemptuous) made their first contribution in [#5856](https://github.com/sqlfluff/sqlfluff/pull/5856) ## [3.0.7] - 2024-05-23 ## Highlights This is primarily a fix for compatibility with dbt 1.8+. Beyond that it also brings several dialect improvements to SQLite, Bigquery, MySQL, Oracle & Clickhouse. Thanks also to the **five** new contributors whose work was included in this release! 🎉🎉🏆🎉🎉 ## What’s Changed * Add more minor features and fixes to sqlite dialect [#5894](https://github.com/sqlfluff/sqlfluff/pull/5894) [@Enduriel](https://github.com/Enduriel) * Fix Clickhouse identifiers format [#5890](https://github.com/sqlfluff/sqlfluff/pull/5890) [@Pavel-Strybuk](https://github.com/Pavel-Strybuk) * Add full support for on conflict clause in SQLite [#5888](https://github.com/sqlfluff/sqlfluff/pull/5888) [@Enduriel](https://github.com/Enduriel) * dbt Templater Plugin: dbt 1.8 support [#5892](https://github.com/sqlfluff/sqlfluff/pull/5892) [@keraion](https://github.com/keraion) * Added support for oracle materialized view [#5883](https://github.com/sqlfluff/sqlfluff/pull/5883) [@harshsoni2024](https://github.com/harshsoni2024) * BigQuery: Support ALTER TABLE ADD KEY statements [#5881](https://github.com/sqlfluff/sqlfluff/pull/5881) [@kzosabe](https://github.com/kzosabe) * MySQL: Support DIV and MOD operators [#5879](https://github.com/sqlfluff/sqlfluff/pull/5879) [@kzosabe](https://github.com/kzosabe) * Update documentation to include all templaters [#5873](https://github.com/sqlfluff/sqlfluff/pull/5873) [@timchurch](https://github.com/timchurch) * MySQL: Define date part function names [#5874](https://github.com/sqlfluff/sqlfluff/pull/5874) [@kzosabe](https://github.com/kzosabe) * Remove typing_extensions requirement [#5860](https://github.com/sqlfluff/sqlfluff/pull/5860) [@qarkai](https://github.com/qarkai) * BigQuery: Fix EXPORT DATA statement [#5859](https://github.com/sqlfluff/sqlfluff/pull/5859) [@kzosabe](https://github.com/kzosabe) * BigQuery: Support CREATE INDEX statements [#5858](https://github.com/sqlfluff/sqlfluff/pull/5858) [@kzosabe](https://github.com/kzosabe) ## New Contributors * [@qarkai](https://github.com/qarkai) made their first contribution in [#5860](https://github.com/sqlfluff/sqlfluff/pull/5860) * [@timchurch](https://github.com/timchurch) made their first contribution in [#5873](https://github.com/sqlfluff/sqlfluff/pull/5873) * [@harshsoni2024](https://github.com/harshsoni2024) made their first contribution in [#5883](https://github.com/sqlfluff/sqlfluff/pull/5883) * [@Enduriel](https://github.com/Enduriel) made their first contribution in [#5888](https://github.com/sqlfluff/sqlfluff/pull/5888) * [@Pavel-Strybuk](https://github.com/Pavel-Strybuk) made their first contribution in [#5890](https://github.com/sqlfluff/sqlfluff/pull/5890) ## [3.0.6] - 2024-05-06 ## Highlights This release primarily fixes an issue introduced by the recent dbt 1.7.14 release, and better support for dbt 1.7+. It also includes a range of dialect improvements and CLI refinements. This release also includes the groundwork for linting the unrendered sections of Jinja templates. More documentation on this will be released in due course when it's ready for beta testing. Thanks also to [@padraic00](https://github.com/padraic00) & [@burhanyasar](https://github.com/burhanyasar) who made their first contributions in this release. 🎉🎉🏆🎉🎉 ## What’s Changed * [fix_clickhouse] Temporary Table Create AS SELECT [#5843](https://github.com/sqlfluff/sqlfluff/pull/5843) [@konnectr](https://github.com/konnectr) * Bugfix: ST02 - Compare entire condition expression [#5850](https://github.com/sqlfluff/sqlfluff/pull/5850) [@WittierDinosaur](https://github.com/WittierDinosaur) * Clichouse prewhere [#5849](https://github.com/sqlfluff/sqlfluff/pull/5849) [@konnectr](https://github.com/konnectr) * BigQuery: Support missing DROP statements [#5848](https://github.com/sqlfluff/sqlfluff/pull/5848) [@kzosabe](https://github.com/kzosabe) * BigQuery: various CREATE statements [#5846](https://github.com/sqlfluff/sqlfluff/pull/5846) [@greg-finley](https://github.com/greg-finley) * BigQuery Alter Schema [#5835](https://github.com/sqlfluff/sqlfluff/pull/5835) [@greg-finley](https://github.com/greg-finley) * Snowflake execute immediate from [#5836](https://github.com/sqlfluff/sqlfluff/pull/5836) [@greg-finley](https://github.com/greg-finley) * Support dbt 1.7 [#5842](https://github.com/sqlfluff/sqlfluff/pull/5842) [@WittierDinosaur](https://github.com/WittierDinosaur) * Postgres: Create extension cascade [#5834](https://github.com/sqlfluff/sqlfluff/pull/5834) [@greg-finley](https://github.com/greg-finley) * Postgres: Add Support for PostGIS operators [#5830](https://github.com/sqlfluff/sqlfluff/pull/5830) [@burhanyasar](https://github.com/burhanyasar) * Db2: Support additional CREATE INDEX options [#5827](https://github.com/sqlfluff/sqlfluff/pull/5827) [@keraion](https://github.com/keraion) * Allow to align all siblings when respacing [#5826](https://github.com/sqlfluff/sqlfluff/pull/5826) [@borchero](https://github.com/borchero) * BigQuery: Support EXECUTE IMMEDIATE [#5820](https://github.com/sqlfluff/sqlfluff/pull/5820) [@keraion](https://github.com/keraion) * BigQuery: Support CREATE ROW ACCESS POLICY statement [#5821](https://github.com/sqlfluff/sqlfluff/pull/5821) [@kzosabe](https://github.com/kzosabe) * Fix Jinja variant location correction [#5814](https://github.com/sqlfluff/sqlfluff/pull/5814) [@alanmcruickshank](https://github.com/alanmcruickshank) * Test cases for linter fails. [#5815](https://github.com/sqlfluff/sqlfluff/pull/5815) [@alanmcruickshank](https://github.com/alanmcruickshank) * BigQuery: Support nested BEGIN, Fix CREATE PROCEDURE OPTIONS [#5816](https://github.com/sqlfluff/sqlfluff/pull/5816) [@keraion](https://github.com/keraion) * Bring multiple jinja variants through to the parser. [#5794](https://github.com/sqlfluff/sqlfluff/pull/5794) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix placeholder labelling [#5813](https://github.com/sqlfluff/sqlfluff/pull/5813) [@alanmcruickshank](https://github.com/alanmcruickshank) * Tighten up the return from .process() [#5810](https://github.com/sqlfluff/sqlfluff/pull/5810) [@alanmcruickshank](https://github.com/alanmcruickshank) * BigQuery: Support CREATE MATERIALIZED VIEW AS REPLICA OF [#5811](https://github.com/sqlfluff/sqlfluff/pull/5811) [@kzosabe](https://github.com/kzosabe) * BigQuery: Support OPTIONS in CREATE FUNCTION statement [#5812](https://github.com/sqlfluff/sqlfluff/pull/5812) [@kzosabe](https://github.com/kzosabe) * TSQL: fix `ALTER TABLE ... SWITCH PARTITION` [#5807](https://github.com/sqlfluff/sqlfluff/pull/5807) [@keen85](https://github.com/keen85) * SparkSQL: Add functions that use UNIT keywords [#5806](https://github.com/sqlfluff/sqlfluff/pull/5806) [@keraion](https://github.com/keraion) * CLI: Add `--stdin-filename` option [#5805](https://github.com/sqlfluff/sqlfluff/pull/5805) [@keraion](https://github.com/keraion) * TSQL: parse `CREATE/ALTER/DROP MASTER KEY` [#5802](https://github.com/sqlfluff/sqlfluff/pull/5802) [@keen85](https://github.com/keen85) * Jinja Variant Configuration [#5785](https://github.com/sqlfluff/sqlfluff/pull/5785) [@alanmcruickshank](https://github.com/alanmcruickshank) * Small refactor in jinja templater [#5786](https://github.com/sqlfluff/sqlfluff/pull/5786) [@alanmcruickshank](https://github.com/alanmcruickshank) * BigQuery: Support FOR SYSTEM_TIME AS OF in CREATE TABLE CLONE statement [#5798](https://github.com/sqlfluff/sqlfluff/pull/5798) [@kzosabe](https://github.com/kzosabe) * TSQL: support for `CREATE/ALTER PARTITION FUNCTION/SCHEME` [#5793](https://github.com/sqlfluff/sqlfluff/pull/5793) [@keen85](https://github.com/keen85) * BigQuery: Support DEFAULT COLLATE segment [#5790](https://github.com/sqlfluff/sqlfluff/pull/5790) [@kzosabe](https://github.com/kzosabe) * TSQL: support computed columns [#5792](https://github.com/sqlfluff/sqlfluff/pull/5792) [@keen85](https://github.com/keen85) * Simplify one of the lexer methods [#5788](https://github.com/sqlfluff/sqlfluff/pull/5788) [@alanmcruickshank](https://github.com/alanmcruickshank) * Improve light colour highlight [#5784](https://github.com/sqlfluff/sqlfluff/pull/5784) [@alanmcruickshank](https://github.com/alanmcruickshank) * SparkSQL: Support TIMESTAMP_LTZ and TIMESTAMP_NTZ types [#5783](https://github.com/sqlfluff/sqlfluff/pull/5783) [@padraic00](https://github.com/padraic00) ## New Contributors * [@padraic00](https://github.com/padraic00) made their first contribution in [#5783](https://github.com/sqlfluff/sqlfluff/pull/5783) * [@burhanyasar](https://github.com/burhanyasar) made their first contribution in [#5830](https://github.com/sqlfluff/sqlfluff/pull/5830) ## [3.0.5] - 2024-04-19 ## Highlights This release contains one larger change, which is a big upgrade to case sensitivity in the alias use rules. Also allowing the customisation of how SQLFluff uses case sensitivity in rules like AL05. Beyond that, this also includes a handful of dialect improvements. Thanks especially to [@olshak](https://github.com/olshak), [@MarkPaulin](https://github.com/MarkPaulin), [@mhoogendoorn](https://github.com/mhoogendoorn) & [@kawashiro](https://github.com/kawashiro) who made their first contributions in this release! 🚀 ## What’s Changed * BigQuery: Support CREATE SNAPSHOT TABLE statement [#5779](https://github.com/sqlfluff/sqlfluff/pull/5779) [@kzosabe](https://github.com/kzosabe) * Upgrades to release actions. [#5774](https://github.com/sqlfluff/sqlfluff/pull/5774) [@alanmcruickshank](https://github.com/alanmcruickshank) * Improve Snowflake syntax support [#5770](https://github.com/sqlfluff/sqlfluff/pull/5770) [@kawashiro](https://github.com/kawashiro) * TSQL: allow 'OR ALTER' on 'CREATE TRIGGER' [#5772](https://github.com/sqlfluff/sqlfluff/pull/5772) [@mhoogendoorn](https://github.com/mhoogendoorn) * Enhancement: Improved Identifiers - casefolding, quoted values, and basic escaping [#5726](https://github.com/sqlfluff/sqlfluff/pull/5726) [@keraion](https://github.com/keraion) * TSQL: Fix bare functions in default constraints [#5771](https://github.com/sqlfluff/sqlfluff/pull/5771) [@MarkPaulin](https://github.com/MarkPaulin) * MySQL: Fix parsing 'ALTER TABLE ts ADD COLUMN modified_at TIMESTAMP NULL DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP;' (#5766) [#5767](https://github.com/sqlfluff/sqlfluff/pull/5767) [@olshak](https://github.com/olshak) ## New Contributors * [@olshak](https://github.com/olshak) made their first contribution in [#5767](https://github.com/sqlfluff/sqlfluff/pull/5767) * [@MarkPaulin](https://github.com/MarkPaulin) made their first contribution in [#5771](https://github.com/sqlfluff/sqlfluff/pull/5771) * [@mhoogendoorn](https://github.com/mhoogendoorn) made their first contribution in [#5772](https://github.com/sqlfluff/sqlfluff/pull/5772) * [@kawashiro](https://github.com/kawashiro) made their first contribution in [#5770](https://github.com/sqlfluff/sqlfluff/pull/5770) ## [3.0.4] - 2024-04-07 ## Highlights This is a standard bugfix release bringing a bunch of dialect improvements and bugfixes. Almost every dialect sees some improvements and it also includes quality of life improvements to the CLI, pre-commit hooks, docs and several rules. Thanks also to the **eight** new contributors whose first contributions are included in this release. 🎉🎉🏆🎉🎉 ## What’s Changed * TSQL: Move PROPERTY to unreserved [#5759](https://github.com/sqlfluff/sqlfluff/pull/5759) [@WittierDinosaur](https://github.com/WittierDinosaur) * Mysql: Add Character Set Literals [#5755](https://github.com/sqlfluff/sqlfluff/pull/5755) [@WittierDinosaur](https://github.com/WittierDinosaur) * Snowflake: Support ASOF Joins [#5756](https://github.com/sqlfluff/sqlfluff/pull/5756) [@WittierDinosaur](https://github.com/WittierDinosaur) * Mysql: Support scoped function calls [#5757](https://github.com/sqlfluff/sqlfluff/pull/5757) [@WittierDinosaur](https://github.com/WittierDinosaur) * Postgres: Support pgvector vector type [#5758](https://github.com/sqlfluff/sqlfluff/pull/5758) [@WittierDinosaur](https://github.com/WittierDinosaur) * SQLite: Support RETURNING Clause [#5760](https://github.com/sqlfluff/sqlfluff/pull/5760) [@WittierDinosaur](https://github.com/WittierDinosaur) * Postgres: Allow return control structures in atomic functions [#5761](https://github.com/sqlfluff/sqlfluff/pull/5761) [@WittierDinosaur](https://github.com/WittierDinosaur) * ST04: Retain comments when flattening `CASE` [#5753](https://github.com/sqlfluff/sqlfluff/pull/5753) [@keraion](https://github.com/keraion) * dbt templater: Raise UserError when using stdin [#5752](https://github.com/sqlfluff/sqlfluff/pull/5752) [@keraion](https://github.com/keraion) * SQLite: Add `GLOB`, `MATCH`. Improved `REGEXP` [#5745](https://github.com/sqlfluff/sqlfluff/pull/5745) [@keraion](https://github.com/keraion) * Databricks: Fix Aliases for Join-like objects [#5748](https://github.com/sqlfluff/sqlfluff/pull/5748) [@keraion](https://github.com/keraion) * Add missing README ref, and issues labels [#5741](https://github.com/sqlfluff/sqlfluff/pull/5741) [@WittierDinosaur](https://github.com/WittierDinosaur) * Qual: Add pre-commit to CI [#5730](https://github.com/sqlfluff/sqlfluff/pull/5730) [@mdeweerd](https://github.com/mdeweerd) * Added support for 'greater/less than or equal' on ANSI CASE statement [#5728](https://github.com/sqlfluff/sqlfluff/pull/5728) [@IliyanKostov9](https://github.com/IliyanKostov9) * Remove `--force` flag from pre-commit hook definition [#5739](https://github.com/sqlfluff/sqlfluff/pull/5739) [@borchero](https://github.com/borchero) * adding snake_case to CP01 extended_capitalisation_policy [#5736](https://github.com/sqlfluff/sqlfluff/pull/5736) [@alecsgonz](https://github.com/alecsgonz) * ST04: Ignore simplifying `CASE`s with different expressions [#5735](https://github.com/sqlfluff/sqlfluff/pull/5735) [@keraion](https://github.com/keraion) * Fix #5724 mysql: Allow Line comments without space after -- [#5731](https://github.com/sqlfluff/sqlfluff/pull/5731) [@mdeweerd](https://github.com/mdeweerd) * Fix spelling [#5729](https://github.com/sqlfluff/sqlfluff/pull/5729) [@mdeweerd](https://github.com/mdeweerd) * Fix implementation for view_column_name_list in BigQuery's CREATE VIEW [#5738](https://github.com/sqlfluff/sqlfluff/pull/5738) [@kzosabe](https://github.com/kzosabe) * CLI: Suppress tracebacks on render/fix/format [#5734](https://github.com/sqlfluff/sqlfluff/pull/5734) [@keraion](https://github.com/keraion) * Clickhouse: add parsing for select except clause [#5725](https://github.com/sqlfluff/sqlfluff/pull/5725) [@tojahech](https://github.com/tojahech) * Add array type support to Trino dialect [#5722](https://github.com/sqlfluff/sqlfluff/pull/5722) [@kirkhansen](https://github.com/kirkhansen) * Fix/snowflake unparsable tag in create stmt [#5720](https://github.com/sqlfluff/sqlfluff/pull/5720) [@mariq41](https://github.com/mariq41) * Fix/snowflake ext storage [#5714](https://github.com/sqlfluff/sqlfluff/pull/5714) [@mariq41](https://github.com/mariq41) * Clickhouse: add parsing for "distinct on" syntax [#5716](https://github.com/sqlfluff/sqlfluff/pull/5716) [@tojahech](https://github.com/tojahech) * added refresh mode init on create table statement [#5715](https://github.com/sqlfluff/sqlfluff/pull/5715) [@IliyanKostov9](https://github.com/IliyanKostov9) * added `ifNotExistsGrammar` to Snowflake procedure [#5709](https://github.com/sqlfluff/sqlfluff/pull/5709) [@IliyanKostov9](https://github.com/IliyanKostov9) * Trino: 'TIMESTAMP(p)' no longer triggers LT01 [#5711](https://github.com/sqlfluff/sqlfluff/pull/5711) [@rileymcdowell](https://github.com/rileymcdowell) * Snowflake: add support for streamlit [#5692](https://github.com/sqlfluff/sqlfluff/pull/5692) [@vgw-chriskruger](https://github.com/vgw-chriskruger) ## New Contributors * [@vgw-chriskruger](https://github.com/vgw-chriskruger) made their first contribution in [#5692](https://github.com/sqlfluff/sqlfluff/pull/5692) * [@IliyanKostov9](https://github.com/IliyanKostov9) made their first contribution in [#5709](https://github.com/sqlfluff/sqlfluff/pull/5709) * [@tojahech](https://github.com/tojahech) made their first contribution in [#5716](https://github.com/sqlfluff/sqlfluff/pull/5716) * [@mariq41](https://github.com/mariq41) made their first contribution in [#5714](https://github.com/sqlfluff/sqlfluff/pull/5714) * [@kirkhansen](https://github.com/kirkhansen) made their first contribution in [#5722](https://github.com/sqlfluff/sqlfluff/pull/5722) * [@kzosabe](https://github.com/kzosabe) made their first contribution in [#5738](https://github.com/sqlfluff/sqlfluff/pull/5738) * [@mdeweerd](https://github.com/mdeweerd) made their first contribution in [#5729](https://github.com/sqlfluff/sqlfluff/pull/5729) * [@alecsgonz](https://github.com/alecsgonz) made their first contribution in [#5736](https://github.com/sqlfluff/sqlfluff/pull/5736) ## [3.0.3] - 2024-03-22 ## Highlights This is a standard minor release fixing a set of dialect issues with Trino, BigQuery, Vertica and Snowflake. Thanks to [@maegan-canva](https://github.com/maegan-canva), [@rileymcdowell](https://github.com/rileymcdowell) & [@paysni](https://github.com/paysni) who made their first contributions in this release. ## What’s Changed * [TSQL] Create columnstore indexes [#5708](https://github.com/sqlfluff/sqlfluff/pull/5708) [@paysni](https://github.com/paysni) * [Vertica] fix gaps for some datatypes, complex alias support, fix group by for DDL [#5691](https://github.com/sqlfluff/sqlfluff/pull/5691) [@PolitePp](https://github.com/PolitePp) * BigQuery: Unreserve KEY keyword [#5703](https://github.com/sqlfluff/sqlfluff/pull/5703) [@greg-finley](https://github.com/greg-finley) * Trino: Add INTEGER synonym of INT [#5702](https://github.com/sqlfluff/sqlfluff/pull/5702) [@rileymcdowell](https://github.com/rileymcdowell) * Snowflake shouldn't reserve DO as a keyword. [#5699](https://github.com/sqlfluff/sqlfluff/pull/5699) [@alanmcruickshank](https://github.com/alanmcruickshank) * Allow use of STREAM in snowflake CHANGES clause [#5698](https://github.com/sqlfluff/sqlfluff/pull/5698) [@alanmcruickshank](https://github.com/alanmcruickshank) * Trino: Parse regexp_replace with lambda func [#5683](https://github.com/sqlfluff/sqlfluff/pull/5683) [@rileymcdowell](https://github.com/rileymcdowell) * Documentation update: Remove reference to alias' default config being "consistent" [#5689](https://github.com/sqlfluff/sqlfluff/pull/5689) [@maegan-canva](https://github.com/maegan-canva) ## New Contributors * [@maegan-canva](https://github.com/maegan-canva) made their first contribution in [#5689](https://github.com/sqlfluff/sqlfluff/pull/5689) * [@rileymcdowell](https://github.com/rileymcdowell) made their first contribution in [#5683](https://github.com/sqlfluff/sqlfluff/pull/5683) * [@paysni](https://github.com/paysni) made their first contribution in [#5708](https://github.com/sqlfluff/sqlfluff/pull/5708) ## [3.0.2] - 2024-03-17 ## Highlights This is primarily another hotfix release for 3.0.0. Specifically making sure the deprecation warnings for `-f/--force` go to `stderr` rather than `stdout`. It also includes two dialect improvements, one for Snowflake and one for T-SQL. ## What’s Changed * Snowflake: Support External Volumes [#5684](https://github.com/sqlfluff/sqlfluff/pull/5684) [@WittierDinosaur](https://github.com/WittierDinosaur) * T-SQL: Support Reconfigure [#5685](https://github.com/sqlfluff/sqlfluff/pull/5685) [@WittierDinosaur](https://github.com/WittierDinosaur) * CLI: Make `--force` deprecation print on stderr [#5681](https://github.com/sqlfluff/sqlfluff/pull/5681) [@keraion](https://github.com/keraion) ## [3.0.1] - 2024-03-13 ## Highlights This minor release is a hotfix to resolve a bug introduced affecting CLI exit codes in the 3.0.0 release. ## What’s Changed * Fix 5673 [#5676](https://github.com/sqlfluff/sqlfluff/pull/5676) [@alanmcruickshank](https://github.com/alanmcruickshank) ## [3.0.0] - 2024-03-12 ## Highlights This release brings several breaking changes to previous releases. Most notably: * It drops support for python 3.7, which reached end of life in June 2023. * It migrates to `pyproject.toml` rather than `setup.cfg` as the python packaging configuration file (although keeping `setuptools` as the default backend). * The serialised output for `sqlfluff lint` (and the corresponding API methods) now contains more information about the span of linting issues, initial proposed fixes and several statistics which were previously only accessible via csv export. Beside the *new* fields, the original fields of `line_pos` and `line_no` have been renamed to `start_line_pos` and `start_line_no`, to distinguish them from the new fields starting `end_*`. * The default `annotation_level` set by the `--annotation-level` option on the `sqlfluff lint` command has been changed from `notice` to `warning`, to better distinguish linting errors from warnings, which always now have the level of `notice`. This is only relevant when using the `github-annotation` or `github-annotation-native` formats. * A change in the default behaviour for `convention.not_equals`. The new default is to be `consistent`, which is slightly more relaxed than the original behaviour. * The `--force` option has been deprecated on `sqlfluff fix` as that option is now the default behaviour. This is to enable significant reductions in memory overhead when linting large projects. * The long since deprecated `--disable_progress_bar` option has been removed (which was replaced by the kabab-case `--disable-progress-bar` more than a year ago). * Plugins are now loaded progressively, and with error handling. If a plugin fails to load, SQLFluff will now continue onward and try to run regardless while also showing a more helpful error message. On top of these changes, there have a been a whole host of dialect improvements and additions, in particular the inclusion of a`vertica` dialect for the first time. There's also: * A new rule (`aliasing.self_alias.column`) which prevents aliasing a column as itself. * A change to disables AL01 (`aliasing.table`) by default for Oracle. * A change to allow AL05 to allow aliasing for a `VALUES` clause. For more specifics please take a look at the [release notes](https://docs.sqlfluff.com/en/latest/releasenotes.html). Thanks to the community for patience during the release cycle for 3.0.0, which has taken a little longer than expected. Thanks also to the **TWENTY SEVEN** new contributors whose changes are included in this release. 🎉🎉🏆🎉🎉 ## What’s Changed * Progressively load plugins [#5661](https://github.com/sqlfluff/sqlfluff/pull/5661) [@alanmcruickshank](https://github.com/alanmcruickshank) * Postgres: AL05, ignore aliases in values clause [#5669](https://github.com/sqlfluff/sqlfluff/pull/5669) [@keraion](https://github.com/keraion) * Add Postgres CREATE FOREIGN TABLE statement [#5657](https://github.com/sqlfluff/sqlfluff/pull/5657) [@edpft](https://github.com/edpft) * Lexer: Handle escaped curly brace slices from the python templater [#5666](https://github.com/sqlfluff/sqlfluff/pull/5666) [@keraion](https://github.com/keraion) * [CI]: Update pre-commit hook versions [#5665](https://github.com/sqlfluff/sqlfluff/pull/5665) [@keraion](https://github.com/keraion) * Resolves #5624: Snowflake unparsable unset table options [#5664](https://github.com/sqlfluff/sqlfluff/pull/5664) [@andychannery](https://github.com/andychannery) * Revert Ruff Changes [#5662](https://github.com/sqlfluff/sqlfluff/pull/5662) [@alanmcruickshank](https://github.com/alanmcruickshank) * Complete the memory overhead work on cli fix [#5653](https://github.com/sqlfluff/sqlfluff/pull/5653) [@alanmcruickshank](https://github.com/alanmcruickshank) * Resolve #5647: Snowflake unparsable variant access after cast [#5658](https://github.com/sqlfluff/sqlfluff/pull/5658) [@andychannery](https://github.com/andychannery) * BQ PK and FK [#5654](https://github.com/sqlfluff/sqlfluff/pull/5654) [@OTooleMichael](https://github.com/OTooleMichael) * Prep version 3.0.0a6 [#5652](https://github.com/sqlfluff/sqlfluff/pull/5652) [@github-actions](https://github.com/github-actions) * Add Support for Databricks `CREATE FUNCTION` Syntax in SparkSQL Parser [#5615](https://github.com/sqlfluff/sqlfluff/pull/5615) [@mitchellvanrijkom](https://github.com/mitchellvanrijkom) * Swap fix `--force` for `--check` [#5650](https://github.com/sqlfluff/sqlfluff/pull/5650) [@alanmcruickshank](https://github.com/alanmcruickshank) * Remove `DeprecatedOption` [#5649](https://github.com/sqlfluff/sqlfluff/pull/5649) [@alanmcruickshank](https://github.com/alanmcruickshank) * Resolve broken loop limit test [#5651](https://github.com/sqlfluff/sqlfluff/pull/5651) [@alanmcruickshank](https://github.com/alanmcruickshank) * Snowflake: Move NOTIFY to non-reserved words [#5645](https://github.com/sqlfluff/sqlfluff/pull/5645) [@greg-finley](https://github.com/greg-finley) * BigQuery: GROUP BY ALL [#5646](https://github.com/sqlfluff/sqlfluff/pull/5646) [@greg-finley](https://github.com/greg-finley) * chore: use pre-calculated `_code_indices` in `BaseSegment::raw_segmen… [#5644](https://github.com/sqlfluff/sqlfluff/pull/5644) [@gvozdvmozgu](https://github.com/gvozdvmozgu) * Fix Snowflake Semistructured identifier parsing regex-expression [#5635](https://github.com/sqlfluff/sqlfluff/pull/5635) [@DannyMor](https://github.com/DannyMor) * Postgres: Update ReferentialActionGrammar to support sets of columns [#5628](https://github.com/sqlfluff/sqlfluff/pull/5628) [@WittierDinosaur](https://github.com/WittierDinosaur) * Snowflake: Add syntax for masking policy force [#5629](https://github.com/sqlfluff/sqlfluff/pull/5629) [@WittierDinosaur](https://github.com/WittierDinosaur) * Postgres: Allow nested block comments [#5630](https://github.com/sqlfluff/sqlfluff/pull/5630) [@WittierDinosaur](https://github.com/WittierDinosaur) * Postgres: Add Create, Alter, Drop Statistics [#5631](https://github.com/sqlfluff/sqlfluff/pull/5631) [@WittierDinosaur](https://github.com/WittierDinosaur) * T-SQL Fix relative sql filepath lexer [#5632](https://github.com/sqlfluff/sqlfluff/pull/5632) [@WittierDinosaur](https://github.com/WittierDinosaur) * Tech Debt: Replace some sequences with their Ref equivalents [#5633](https://github.com/sqlfluff/sqlfluff/pull/5633) [@WittierDinosaur](https://github.com/WittierDinosaur) * ANSI/MYSQL: Support Create Role If Not Exists [#5634](https://github.com/sqlfluff/sqlfluff/pull/5634) [@WittierDinosaur](https://github.com/WittierDinosaur) * Add Vertica dialect [#5640](https://github.com/sqlfluff/sqlfluff/pull/5640) [@PolitePp](https://github.com/PolitePp) * Add Support for Snowflake Materialised View and Column Masking Policy [#5637](https://github.com/sqlfluff/sqlfluff/pull/5637) [@ulixius9](https://github.com/ulixius9) * [snowflake dialect] support ALTER TABLE ... ADD COLUMN IF NOT EXISTS [#5621](https://github.com/sqlfluff/sqlfluff/pull/5621) [@gshen7](https://github.com/gshen7) * SQLite: Make `DISTINCT FROM` optional; SQLite/TSQL/Exasol: Nothing'd `NanLiteralSegment` [#5620](https://github.com/sqlfluff/sqlfluff/pull/5620) [@keraion](https://github.com/keraion) * Upgrade greenplum dialect [#5546](https://github.com/sqlfluff/sqlfluff/pull/5546) [@kkozhakin](https://github.com/kkozhakin) * Oracle: parse length qualifier in types [#5613](https://github.com/sqlfluff/sqlfluff/pull/5613) [@Jefffrey](https://github.com/Jefffrey) * Multiple Dialects: Fix handling of nested sets expressions [#5606](https://github.com/sqlfluff/sqlfluff/pull/5606) [@keraion](https://github.com/keraion) * DB2: Add labeled durations and special registers [#5612](https://github.com/sqlfluff/sqlfluff/pull/5612) [@keraion](https://github.com/keraion) * Sparksql: Fix `LATERAL VIEW` following `JOIN`; `CLUSTER|SORT|DISTRIBUTE BY` or `QUALIFY` without `FROM` [#5602](https://github.com/sqlfluff/sqlfluff/pull/5602) [@keraion](https://github.com/keraion) * File helpers and config test parameterisation. [#5579](https://github.com/sqlfluff/sqlfluff/pull/5579) [@alanmcruickshank](https://github.com/alanmcruickshank) * Memory overhead optimisations during linting [#5585](https://github.com/sqlfluff/sqlfluff/pull/5585) [@alanmcruickshank](https://github.com/alanmcruickshank) * fix: multiple columns foreign key constraint (#5592) [#5594](https://github.com/sqlfluff/sqlfluff/pull/5594) [@maoxingda](https://github.com/maoxingda) * [CI] Add `no_implicit_reexport` mypy check [#5509](https://github.com/sqlfluff/sqlfluff/pull/5509) [@Koyaani](https://github.com/Koyaani) * Prep version 3.0.0a5 [#5512](https://github.com/sqlfluff/sqlfluff/pull/5512) [@github-actions](https://github.com/github-actions) * Add support & test for postgres alter policy with multiple clauses [#5577](https://github.com/sqlfluff/sqlfluff/pull/5577) [@fnimick](https://github.com/fnimick) * Update github actions to latest versions [#5584](https://github.com/sqlfluff/sqlfluff/pull/5584) [@alanmcruickshank](https://github.com/alanmcruickshank) * Allows using dbt cross project ref in jinja templater [#5574](https://github.com/sqlfluff/sqlfluff/pull/5574) [@alangner](https://github.com/alangner) * Improve support for Jinja templater plugins with custom tags [#5543](https://github.com/sqlfluff/sqlfluff/pull/5543) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * Databricks: fix `EXCEPT` with qualified column reference [#5557](https://github.com/sqlfluff/sqlfluff/pull/5557) [@keraion](https://github.com/keraion) * Stricter recommended config for not_equals convention [#5580](https://github.com/sqlfluff/sqlfluff/pull/5580) [@alanmcruickshank](https://github.com/alanmcruickshank) * CV01: Add options for ANSI and consistent style. [#5539](https://github.com/sqlfluff/sqlfluff/pull/5539) [@keraion](https://github.com/keraion) * DuckDB: Fix `REPLACE` after `EXCLUDE`. Fix AL03 linting for wildcard like expression. [#5556](https://github.com/sqlfluff/sqlfluff/pull/5556) [@keraion](https://github.com/keraion) * Clickhouse: Add `GLOBAL JOIN`, `GLOBAL IN`, and `PASTE JOIN` [#5560](https://github.com/sqlfluff/sqlfluff/pull/5560) [@keraion](https://github.com/keraion) * [Docs] Use extended policy for identifier capitalisation in starter config [#5562](https://github.com/sqlfluff/sqlfluff/pull/5562) [@j-svensmark](https://github.com/j-svensmark) * Build: linting black 24.1.0 rules update [#5573](https://github.com/sqlfluff/sqlfluff/pull/5573) [@keraion](https://github.com/keraion) * Snowflake: Updating Snowflake dialect to pass acceptable RLS policy objects [#5559](https://github.com/sqlfluff/sqlfluff/pull/5559) [@k1drobot](https://github.com/k1drobot) * Redshift Syntax: ALTER APPEND [#5545](https://github.com/sqlfluff/sqlfluff/pull/5545) [@OTooleMichael](https://github.com/OTooleMichael) * DuckDB: Add ANTI, SEMI, ASOF, and POSITIONAL joins [#5544](https://github.com/sqlfluff/sqlfluff/pull/5544) [@keraion](https://github.com/keraion) * MySQL: fix FIRST keyword in ALTER ADD/MODIFY [#5537](https://github.com/sqlfluff/sqlfluff/pull/5537) [@archer62](https://github.com/archer62) * Postgres/DB2/Oracle: Fix comma join `LATERAL`. [#5533](https://github.com/sqlfluff/sqlfluff/pull/5533) [@keraion](https://github.com/keraion) * Add new Rule AL09: Avoid Self Alias [#5528](https://github.com/sqlfluff/sqlfluff/pull/5528) [@aayushr7](https://github.com/aayushr7) * Rule AL01: disabled for Oracle dialect [#5517](https://github.com/sqlfluff/sqlfluff/pull/5517) [@keraion](https://github.com/keraion) * Postgres ALTER EXTENSION support: dialect & tests [#5527](https://github.com/sqlfluff/sqlfluff/pull/5527) [@remy-gohiring](https://github.com/remy-gohiring) * SparkSQL: Add `UNPIVOT` syntax. Fix `TABLESAMPLE` aliases. [#5524](https://github.com/sqlfluff/sqlfluff/pull/5524) [@keraion](https://github.com/keraion) * DuckDB: Added support for `PIVOT` and `UNPIVOT` [#5514](https://github.com/sqlfluff/sqlfluff/pull/5514) [@keraion](https://github.com/keraion) * Fix parse error databricks window function starts with order by [#5493](https://github.com/sqlfluff/sqlfluff/pull/5493) [@snkekorfus](https://github.com/snkekorfus) * Hive: allow UDTF to return multiple column aliases in SELECT [#5495](https://github.com/sqlfluff/sqlfluff/pull/5495) [@reata](https://github.com/reata) * DB2: Add support for `DECLARE GLOBAL TEMPORARY TABLES`, `OFFSET`, `CALL`, and non-bracketed `VALUES` [#5508](https://github.com/sqlfluff/sqlfluff/pull/5508) [@keraion](https://github.com/keraion) * DuckDB: Add CREATE OR REPLACE TABLE syntax [#5511](https://github.com/sqlfluff/sqlfluff/pull/5511) [@keraion](https://github.com/keraion) * TSQL: Top and Distinct in same query [#5491](https://github.com/sqlfluff/sqlfluff/pull/5491) [@greg-finley](https://github.com/greg-finley) * [Spark/Databricks] Fix: make COLUMNS in APPLY CHANGES INTO optional [#5498](https://github.com/sqlfluff/sqlfluff/pull/5498) [@rocwang](https://github.com/rocwang) * SparkSQL: exclamation mark as logical not [#5500](https://github.com/sqlfluff/sqlfluff/pull/5500) [@reata](https://github.com/reata) * SparkSQL: allow value in set_statement to be Java class name [#5504](https://github.com/sqlfluff/sqlfluff/pull/5504) [@reata](https://github.com/reata) * SparkSQL: allow distribute/sort/cluster by at end of set operation [#5502](https://github.com/sqlfluff/sqlfluff/pull/5502) [@reata](https://github.com/reata) * [CI] Add a few more mypy checks [#5505](https://github.com/sqlfluff/sqlfluff/pull/5505) [@Koyaani](https://github.com/Koyaani) * Snowflake dialect: Add support for DATABASE ROLE in GRANT/REVOKE [#5490](https://github.com/sqlfluff/sqlfluff/pull/5490) [@sfc-gh-dgupta](https://github.com/sfc-gh-dgupta) * DuckDB: Qualify and From-First [#5485](https://github.com/sqlfluff/sqlfluff/pull/5485) [@keraion](https://github.com/keraion) * MySql: create table: allow null/not null in any position [#5473](https://github.com/sqlfluff/sqlfluff/pull/5473) [@archer62](https://github.com/archer62) * Snowflake dialect: Support for CREATE DATABASE ROLE [#5475](https://github.com/sqlfluff/sqlfluff/pull/5475) [@sfc-gh-dgupta](https://github.com/sfc-gh-dgupta) * Clickhouse Dialect - Support BackQuoted Identifiers [#5457](https://github.com/sqlfluff/sqlfluff/pull/5457) [@kaiyannameighu](https://github.com/kaiyannameighu) * Change Color.lightgrey to have a white background - dark theme friendly [#5458](https://github.com/sqlfluff/sqlfluff/pull/5458) [@ryaminal](https://github.com/ryaminal) * Fix indentation for single cube clause [#5462](https://github.com/sqlfluff/sqlfluff/pull/5462) [@tunetheweb](https://github.com/tunetheweb) * Prep version 3.0.0a4 [#5455](https://github.com/sqlfluff/sqlfluff/pull/5455) [@github-actions](https://github.com/github-actions) * Build out rule and fix serialisation [#5364](https://github.com/sqlfluff/sqlfluff/pull/5364) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add warning about github actions annotations limit [#5450](https://github.com/sqlfluff/sqlfluff/pull/5450) [@alanmcruickshank](https://github.com/alanmcruickshank) * chore: remove unused line initialization in ParseContext [#5448](https://github.com/sqlfluff/sqlfluff/pull/5448) [@gvozdvmozgu](https://github.com/gvozdvmozgu) * Refine CLI testing fixture [#5446](https://github.com/sqlfluff/sqlfluff/pull/5446) [@alanmcruickshank](https://github.com/alanmcruickshank) * Update black linting [#5447](https://github.com/sqlfluff/sqlfluff/pull/5447) [@alanmcruickshank](https://github.com/alanmcruickshank) * Prep version 3.0.0a3 [#5444](https://github.com/sqlfluff/sqlfluff/pull/5444) [@github-actions](https://github.com/github-actions) * fix assertion in `test__api__lint_string_specific_exclude_single` [#5437](https://github.com/sqlfluff/sqlfluff/pull/5437) [@gvozdvmozgu](https://github.com/gvozdvmozgu) * Databricks CLUSTER BY and OPTIMIZE [#5436](https://github.com/sqlfluff/sqlfluff/pull/5436) [@greg-finley](https://github.com/greg-finley) * TSQL: ON DELETE NO ACTION [#5434](https://github.com/sqlfluff/sqlfluff/pull/5434) [@greg-finley](https://github.com/greg-finley) * Snowflake dynamic table [#5435](https://github.com/sqlfluff/sqlfluff/pull/5435) [@greg-finley](https://github.com/greg-finley) * Support parsing CONSTRAINT definitions when creating Delta Live Tables in SparkSQL/Databricks [#5438](https://github.com/sqlfluff/sqlfluff/pull/5438) [@rocwang](https://github.com/rocwang) * adds few fixes for databricks/sparksql [#5431](https://github.com/sqlfluff/sqlfluff/pull/5431) [@markbaas](https://github.com/markbaas) * TSQL: CREATE USER {FOR|FROM} LOGIN [#5426](https://github.com/sqlfluff/sqlfluff/pull/5426) [@greg-finley](https://github.com/greg-finley) * Snowflake Create table order/noorder [#5421](https://github.com/sqlfluff/sqlfluff/pull/5421) [@greg-finley](https://github.com/greg-finley) * Simplify Snowflake regexes [#5419](https://github.com/sqlfluff/sqlfluff/pull/5419) [@greg-finley](https://github.com/greg-finley) * Permit .* after each tbl_name in multi-table delete syntax [#5408](https://github.com/sqlfluff/sqlfluff/pull/5408) [@yoichi](https://github.com/yoichi) * Fix snowflake add search optimization grant [#5412](https://github.com/sqlfluff/sqlfluff/pull/5412) [@jongracecox](https://github.com/jongracecox) * ANSI: Allow combination of UNION clause and WITH clause [#5413](https://github.com/sqlfluff/sqlfluff/pull/5413) [@yoichi](https://github.com/yoichi) * SQLite: Allow block comments to be terminated by end of input [#5400](https://github.com/sqlfluff/sqlfluff/pull/5400) [@WittierDinosaur](https://github.com/WittierDinosaur) * Update publish github action to use kebab case [#5392](https://github.com/sqlfluff/sqlfluff/pull/5392) [@alanmcruickshank](https://github.com/alanmcruickshank) * Prep version 3.0.0a2 [#5391](https://github.com/sqlfluff/sqlfluff/pull/5391) [@github-actions](https://github.com/github-actions) * Update publish actions and Dockerfile. [#5390](https://github.com/sqlfluff/sqlfluff/pull/5390) [@alanmcruickshank](https://github.com/alanmcruickshank) * Prep version 3.0.0a1 [#5381](https://github.com/sqlfluff/sqlfluff/pull/5381) [@github-actions](https://github.com/github-actions) * Move the rest of pytest over to `pyproject.toml` [#5383](https://github.com/sqlfluff/sqlfluff/pull/5383) [@alanmcruickshank](https://github.com/alanmcruickshank) * Move doc8 over to pyproject [#5385](https://github.com/sqlfluff/sqlfluff/pull/5385) [@alanmcruickshank](https://github.com/alanmcruickshank) * Remove exceptions for typing stubs that now exist. [#5382](https://github.com/sqlfluff/sqlfluff/pull/5382) [@alanmcruickshank](https://github.com/alanmcruickshank) * Migrate to `pyproject.toml` for the core project. [#5373](https://github.com/sqlfluff/sqlfluff/pull/5373) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix the snippet from a pyproject.toml in configuration.rst [#5378](https://github.com/sqlfluff/sqlfluff/pull/5378) [@ishiis](https://github.com/ishiis) * Snowflake: Support AlterNetworkPolicy Statements [#5377](https://github.com/sqlfluff/sqlfluff/pull/5377) [@WittierDinosaur](https://github.com/WittierDinosaur) * postgres: add support for bodies of "language sql" functions [#5376](https://github.com/sqlfluff/sqlfluff/pull/5376) [@65278](https://github.com/65278) * Add support for SET NAMES statement in MySQL [#5374](https://github.com/sqlfluff/sqlfluff/pull/5374) [@joaostorrer](https://github.com/joaostorrer) * Fix GRANT ALL PRIVILEGES statement in MySQL [#5375](https://github.com/sqlfluff/sqlfluff/pull/5375) [@joaostorrer](https://github.com/joaostorrer) * Another extraction of fixing logic. [#5365](https://github.com/sqlfluff/sqlfluff/pull/5365) [@alanmcruickshank](https://github.com/alanmcruickshank) * Remove root requirements.txt [#5372](https://github.com/sqlfluff/sqlfluff/pull/5372) [@alanmcruickshank](https://github.com/alanmcruickshank) * Drop support of python 3.7 [#5288](https://github.com/sqlfluff/sqlfluff/pull/5288) [@zhongjiajie](https://github.com/zhongjiajie) * API configuration documentation [#5369](https://github.com/sqlfluff/sqlfluff/pull/5369) [@golergka](https://github.com/golergka) * add listagg extras support in trino dialect [#5368](https://github.com/sqlfluff/sqlfluff/pull/5368) [@wjhrdy](https://github.com/wjhrdy) * Allow ignoring of comments from indentation entirely #3311 [#5363](https://github.com/sqlfluff/sqlfluff/pull/5363) [@alanmcruickshank](https://github.com/alanmcruickshank) * Trino: Support Analyze statements [#5361](https://github.com/sqlfluff/sqlfluff/pull/5361) [@WittierDinosaur](https://github.com/WittierDinosaur) * Resolve #5327 (logging to stdout) [#5362](https://github.com/sqlfluff/sqlfluff/pull/5362) [@alanmcruickshank](https://github.com/alanmcruickshank) ## New Contributors * [@edpft](https://github.com/edpft) made their first contribution in [#5657](https://github.com/sqlfluff/sqlfluff/pull/5657) * [@maoxingda](https://github.com/maoxingda) made their first contribution in [#5594](https://github.com/sqlfluff/sqlfluff/pull/5594) * [@Jefffrey](https://github.com/Jefffrey) made their first contribution in [#5613](https://github.com/sqlfluff/sqlfluff/pull/5613) * [@kkozhakin](https://github.com/kkozhakin) made their first contribution in [#5546](https://github.com/sqlfluff/sqlfluff/pull/5546) * [@gshen7](https://github.com/gshen7) made their first contribution in [#5621](https://github.com/sqlfluff/sqlfluff/pull/5621) * [@PolitePp](https://github.com/PolitePp) made their first contribution in [#5640](https://github.com/sqlfluff/sqlfluff/pull/5640) * [@DannyMor](https://github.com/DannyMor) made their first contribution in [#5635](https://github.com/sqlfluff/sqlfluff/pull/5635) * [@mitchellvanrijkom](https://github.com/mitchellvanrijkom) made their first contribution in [#5615](https://github.com/sqlfluff/sqlfluff/pull/5615) * [@ryaminal](https://github.com/ryaminal) made their first contribution in [#5458](https://github.com/sqlfluff/sqlfluff/pull/5458) * [@sfc-gh-dgupta](https://github.com/sfc-gh-dgupta) made their first contribution in [#5475](https://github.com/sqlfluff/sqlfluff/pull/5475) * [@archer62](https://github.com/archer62) made their first contribution in [#5473](https://github.com/sqlfluff/sqlfluff/pull/5473) * [@keraion](https://github.com/keraion) made their first contribution in [#5485](https://github.com/sqlfluff/sqlfluff/pull/5485) * [@Koyaani](https://github.com/Koyaani) made their first contribution in [#5505](https://github.com/sqlfluff/sqlfluff/pull/5505) * [@snkekorfus](https://github.com/snkekorfus) made their first contribution in [#5493](https://github.com/sqlfluff/sqlfluff/pull/5493) * [@remy-gohiring](https://github.com/remy-gohiring) made their first contribution in [#5527](https://github.com/sqlfluff/sqlfluff/pull/5527) * [@aayushr7](https://github.com/aayushr7) made their first contribution in [#5528](https://github.com/sqlfluff/sqlfluff/pull/5528) * [@k1drobot](https://github.com/k1drobot) made their first contribution in [#5559](https://github.com/sqlfluff/sqlfluff/pull/5559) * [@alangner](https://github.com/alangner) made their first contribution in [#5574](https://github.com/sqlfluff/sqlfluff/pull/5574) * [@fnimick](https://github.com/fnimick) made their first contribution in [#5577](https://github.com/sqlfluff/sqlfluff/pull/5577) * [@jongracecox](https://github.com/jongracecox) made their first contribution in [#5412](https://github.com/sqlfluff/sqlfluff/pull/5412) * [@markbaas](https://github.com/markbaas) made their first contribution in [#5431](https://github.com/sqlfluff/sqlfluff/pull/5431) * [@rocwang](https://github.com/rocwang) made their first contribution in [#5438](https://github.com/sqlfluff/sqlfluff/pull/5438) * [@gvozdvmozgu](https://github.com/gvozdvmozgu) made their first contribution in [#5437](https://github.com/sqlfluff/sqlfluff/pull/5437) * [@wjhrdy](https://github.com/wjhrdy) made their first contribution in [#5368](https://github.com/sqlfluff/sqlfluff/pull/5368) * [@golergka](https://github.com/golergka) made their first contribution in [#5369](https://github.com/sqlfluff/sqlfluff/pull/5369) * [@65278](https://github.com/65278) made their first contribution in [#5376](https://github.com/sqlfluff/sqlfluff/pull/5376) * [@ishiis](https://github.com/ishiis) made their first contribution in [#5378](https://github.com/sqlfluff/sqlfluff/pull/5378) ## [3.0.0a6] - 2024-03-05 ## Highlights This introduces some memory optimisations in the linting operation which prevent a major cause of crashes when linting large projects. As part of that we've also deprecated the `--force` option on `sqlfluff fix` and made that the default behaviour (the associated memory optimisations will come shortly). This also removes the long since deprecated `--disable_progress_bar` option (which was replaced by the kabab-case `--disable-progress-bar` more than a year ago). On top of that this release also introduces the `vertica` dialect for the first time, and a whole host of bugfixes and improvements to other dialects. This release should be considered a release candidate for the final `3.0.0` release which will follow shortly in the next few days unless any other major issues are found. Thanks particularly to the **seven** new contributors we saw in this release 🏆🎉. ## What’s Changed * Add Support for Databricks `CREATE FUNCTION` Syntax in SparkSQL Parser [#5615](https://github.com/sqlfluff/sqlfluff/pull/5615) [@mitchellvanrijkom](https://github.com/mitchellvanrijkom) * Swap fix `--force` for `--check` [#5650](https://github.com/sqlfluff/sqlfluff/pull/5650) [@alanmcruickshank](https://github.com/alanmcruickshank) * Remove `DeprecatedOption` [#5649](https://github.com/sqlfluff/sqlfluff/pull/5649) [@alanmcruickshank](https://github.com/alanmcruickshank) * Resolve broken loop limit test [#5651](https://github.com/sqlfluff/sqlfluff/pull/5651) [@alanmcruickshank](https://github.com/alanmcruickshank) * Snowflake: Move NOTIFY to non-reserved words [#5645](https://github.com/sqlfluff/sqlfluff/pull/5645) [@greg-finley](https://github.com/greg-finley) * BigQuery: GROUP BY ALL [#5646](https://github.com/sqlfluff/sqlfluff/pull/5646) [@greg-finley](https://github.com/greg-finley) * chore: use pre-calculated `_code_indices` in `BaseSegment::raw_segmen… [#5644](https://github.com/sqlfluff/sqlfluff/pull/5644) [@gvozdvmozgu](https://github.com/gvozdvmozgu) * Fix Snowflake Semistructured identifier parsing regex-expression [#5635](https://github.com/sqlfluff/sqlfluff/pull/5635) [@DannyMor](https://github.com/DannyMor) * Postgres: Update ReferentialActionGrammar to support sets of columns [#5628](https://github.com/sqlfluff/sqlfluff/pull/5628) [@WittierDinosaur](https://github.com/WittierDinosaur) * Snowflake: Add syntax for masking policy force [#5629](https://github.com/sqlfluff/sqlfluff/pull/5629) [@WittierDinosaur](https://github.com/WittierDinosaur) * Postgres: Allow nested block comments [#5630](https://github.com/sqlfluff/sqlfluff/pull/5630) [@WittierDinosaur](https://github.com/WittierDinosaur) * Postgres: Add Create, Alter, Drop Statistics [#5631](https://github.com/sqlfluff/sqlfluff/pull/5631) [@WittierDinosaur](https://github.com/WittierDinosaur) * T-SQL Fix relative sql filepath lexer [#5632](https://github.com/sqlfluff/sqlfluff/pull/5632) [@WittierDinosaur](https://github.com/WittierDinosaur) * Tech Debt: Replace some sequences with their Ref equivalents [#5633](https://github.com/sqlfluff/sqlfluff/pull/5633) [@WittierDinosaur](https://github.com/WittierDinosaur) * ANSI/MYSQL: Support Create Role If Not Exists [#5634](https://github.com/sqlfluff/sqlfluff/pull/5634) [@WittierDinosaur](https://github.com/WittierDinosaur) * Add Vertica dialect [#5640](https://github.com/sqlfluff/sqlfluff/pull/5640) [@PolitePp](https://github.com/PolitePp) * Add Support for Snowflake Materialised View and Column Masking Policy [#5637](https://github.com/sqlfluff/sqlfluff/pull/5637) [@ulixius9](https://github.com/ulixius9) * [snowflake dialect] support ALTER TABLE ... ADD COLUMN IF NOT EXISTS [#5621](https://github.com/sqlfluff/sqlfluff/pull/5621) [@gshen7](https://github.com/gshen7) * SQLite: Make `DISTINCT FROM` optional; SQLite/TSQL/Exasol: Nothing'd `NanLiteralSegment` [#5620](https://github.com/sqlfluff/sqlfluff/pull/5620) [@keraion](https://github.com/keraion) * Upgrade greenplum dialect [#5546](https://github.com/sqlfluff/sqlfluff/pull/5546) [@kkozhakin](https://github.com/kkozhakin) * Oracle: parse length qualifier in types [#5613](https://github.com/sqlfluff/sqlfluff/pull/5613) [@Jefffrey](https://github.com/Jefffrey) * Multiple Dialects: Fix handling of nested sets expressions [#5606](https://github.com/sqlfluff/sqlfluff/pull/5606) [@keraion](https://github.com/keraion) * DB2: Add labeled durations and special registers [#5612](https://github.com/sqlfluff/sqlfluff/pull/5612) [@keraion](https://github.com/keraion) * Sparksql: Fix `LATERAL VIEW` following `JOIN`; `CLUSTER|SORT|DISTRIBUTE BY` or `QUALIFY` without `FROM` [#5602](https://github.com/sqlfluff/sqlfluff/pull/5602) [@keraion](https://github.com/keraion) * File helpers and config test parameterisation. [#5579](https://github.com/sqlfluff/sqlfluff/pull/5579) [@alanmcruickshank](https://github.com/alanmcruickshank) * Memory overhead optimisations during linting [#5585](https://github.com/sqlfluff/sqlfluff/pull/5585) [@alanmcruickshank](https://github.com/alanmcruickshank) * fix: multiple columns foreign key constraint (#5592) [#5594](https://github.com/sqlfluff/sqlfluff/pull/5594) [@maoxingda](https://github.com/maoxingda) * [CI] Add `no_implicit_reexport` mypy check [#5509](https://github.com/sqlfluff/sqlfluff/pull/5509) [@Koyaani](https://github.com/Koyaani) ## New Contributors * [@maoxingda](https://github.com/maoxingda) made their first contribution in [#5594](https://github.com/sqlfluff/sqlfluff/pull/5594) * [@Jefffrey](https://github.com/Jefffrey) made their first contribution in [#5613](https://github.com/sqlfluff/sqlfluff/pull/5613) * [@kkozhakin](https://github.com/kkozhakin) made their first contribution in [#5546](https://github.com/sqlfluff/sqlfluff/pull/5546) * [@gshen7](https://github.com/gshen7) made their first contribution in [#5621](https://github.com/sqlfluff/sqlfluff/pull/5621) * [@PolitePp](https://github.com/PolitePp) made their first contribution in [#5640](https://github.com/sqlfluff/sqlfluff/pull/5640) * [@DannyMor](https://github.com/DannyMor) made their first contribution in [#5635](https://github.com/sqlfluff/sqlfluff/pull/5635) * [@mitchellvanrijkom](https://github.com/mitchellvanrijkom) made their first contribution in [#5615](https://github.com/sqlfluff/sqlfluff/pull/5615) ## [3.0.0a5] - 2024-01-30 ## Highlights This release primarily brings through a large set of dialect improvements and bugfixes from over the holiday period. Notably also: * A change in the default behaviour for `convention.not_equals`. The new default is to be `consistent`, which is slightly more relaxed than the original behaviour. * A new rule (`aliasing.self_alias.column`) which prevents aliasing a column as itself. * Disables `AL01` (`aliasing.table`) by default for Oracle. This release also saw **ELEVEN** new contributors in this release 🎉🎉🏆🏆🎉🎉. Great to see so many new people getting involved with the project. Thank You 🙏. ## What’s Changed * Add support & test for postgres alter policy with multiple clauses [#5577](https://github.com/sqlfluff/sqlfluff/pull/5577) [@fnimick](https://github.com/fnimick) * Update github actions to latest versions [#5584](https://github.com/sqlfluff/sqlfluff/pull/5584) [@alanmcruickshank](https://github.com/alanmcruickshank) * Allows using dbt cross project ref in jinja templater [#5574](https://github.com/sqlfluff/sqlfluff/pull/5574) [@alangner](https://github.com/alangner) * Improve support for Jinja templater plugins with custom tags [#5543](https://github.com/sqlfluff/sqlfluff/pull/5543) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * Databricks: fix `EXCEPT` with qualified column reference [#5557](https://github.com/sqlfluff/sqlfluff/pull/5557) [@keraion](https://github.com/keraion) * Stricter recommended config for not_equals convention [#5580](https://github.com/sqlfluff/sqlfluff/pull/5580) [@alanmcruickshank](https://github.com/alanmcruickshank) * CV01: Add options for ANSI and consistent style. [#5539](https://github.com/sqlfluff/sqlfluff/pull/5539) [@keraion](https://github.com/keraion) * DuckDB: Fix `REPLACE` after `EXCLUDE`. Fix AL03 linting for wildcard like expression. [#5556](https://github.com/sqlfluff/sqlfluff/pull/5556) [@keraion](https://github.com/keraion) * Clickhouse: Add `GLOBAL JOIN`, `GLOBAL IN`, and `PASTE JOIN` [#5560](https://github.com/sqlfluff/sqlfluff/pull/5560) [@keraion](https://github.com/keraion) * [Docs] Use extended policy for identifier capitalisation in starter config [#5562](https://github.com/sqlfluff/sqlfluff/pull/5562) [@j-svensmark](https://github.com/j-svensmark) * Build: linting black 24.1.0 rules update [#5573](https://github.com/sqlfluff/sqlfluff/pull/5573) [@keraion](https://github.com/keraion) * Snowflake: Updating Snowflake dialect to pass acceptable RLS policy objects [#5559](https://github.com/sqlfluff/sqlfluff/pull/5559) [@k1drobot](https://github.com/k1drobot) * Redshift Syntax: ALTER APPEND [#5545](https://github.com/sqlfluff/sqlfluff/pull/5545) [@OTooleMichael](https://github.com/OTooleMichael) * DuckDB: Add ANTI, SEMI, ASOF, and POSITIONAL joins [#5544](https://github.com/sqlfluff/sqlfluff/pull/5544) [@keraion](https://github.com/keraion) * MySQL: fix FIRST keyword in ALTER ADD/MODIFY [#5537](https://github.com/sqlfluff/sqlfluff/pull/5537) [@archer62](https://github.com/archer62) * Postgres/DB2/Oracle: Fix comma join `LATERAL`. [#5533](https://github.com/sqlfluff/sqlfluff/pull/5533) [@keraion](https://github.com/keraion) * Add new Rule AL09: Avoid Self Alias [#5528](https://github.com/sqlfluff/sqlfluff/pull/5528) [@aayushr7](https://github.com/aayushr7) * Rule AL01: disabled for Oracle dialect [#5517](https://github.com/sqlfluff/sqlfluff/pull/5517) [@keraion](https://github.com/keraion) * Postgres ALTER EXTENSION support: dialect & tests [#5527](https://github.com/sqlfluff/sqlfluff/pull/5527) [@remy-gohiring](https://github.com/remy-gohiring) * SparkSQL: Add `UNPIVOT` syntax. Fix `TABLESAMPLE` aliases. [#5524](https://github.com/sqlfluff/sqlfluff/pull/5524) [@keraion](https://github.com/keraion) * DuckDB: Added support for `PIVOT` and `UNPIVOT` [#5514](https://github.com/sqlfluff/sqlfluff/pull/5514) [@keraion](https://github.com/keraion) * Fix parse error databricks window function starts with order by [#5493](https://github.com/sqlfluff/sqlfluff/pull/5493) [@snkekorfus](https://github.com/snkekorfus) * Hive: allow UDTF to return multiple column aliases in SELECT [#5495](https://github.com/sqlfluff/sqlfluff/pull/5495) [@reata](https://github.com/reata) * DB2: Add support for `DECLARE GLOBAL TEMPORARY TABLES`, `OFFSET`, `CALL`, and non-bracketed `VALUES` [#5508](https://github.com/sqlfluff/sqlfluff/pull/5508) [@keraion](https://github.com/keraion) * DuckDB: Add CREATE OR REPLACE TABLE syntax [#5511](https://github.com/sqlfluff/sqlfluff/pull/5511) [@keraion](https://github.com/keraion) * TSQL: Top and Distinct in same query [#5491](https://github.com/sqlfluff/sqlfluff/pull/5491) [@greg-finley](https://github.com/greg-finley) * [Spark/Databricks] Fix: make COLUMNS in APPLY CHANGES INTO optional [#5498](https://github.com/sqlfluff/sqlfluff/pull/5498) [@rocwang](https://github.com/rocwang) * SparkSQL: exclamation mark as logical not [#5500](https://github.com/sqlfluff/sqlfluff/pull/5500) [@reata](https://github.com/reata) * SparkSQL: allow value in set_statement to be Java class name [#5504](https://github.com/sqlfluff/sqlfluff/pull/5504) [@reata](https://github.com/reata) * SparkSQL: allow distribute/sort/cluster by at end of set operation [#5502](https://github.com/sqlfluff/sqlfluff/pull/5502) [@reata](https://github.com/reata) * [CI] Add a few more mypy checks [#5505](https://github.com/sqlfluff/sqlfluff/pull/5505) [@Koyaani](https://github.com/Koyaani) * Snowflake dialect: Add support for DATABASE ROLE in GRANT/REVOKE [#5490](https://github.com/sqlfluff/sqlfluff/pull/5490) [@sfc-gh-dgupta](https://github.com/sfc-gh-dgupta) * DuckDB: Qualify and From-First [#5485](https://github.com/sqlfluff/sqlfluff/pull/5485) [@keraion](https://github.com/keraion) * MySql: create table: allow null/not null in any position [#5473](https://github.com/sqlfluff/sqlfluff/pull/5473) [@archer62](https://github.com/archer62) * Snowflake dialect: Support for CREATE DATABASE ROLE [#5475](https://github.com/sqlfluff/sqlfluff/pull/5475) [@sfc-gh-dgupta](https://github.com/sfc-gh-dgupta) * Clickhouse Dialect - Support BackQuoted Identifiers [#5457](https://github.com/sqlfluff/sqlfluff/pull/5457) [@kaiyannameighu](https://github.com/kaiyannameighu) * Change Color.lightgrey to have a white background - dark theme friendly [#5458](https://github.com/sqlfluff/sqlfluff/pull/5458) [@ryaminal](https://github.com/ryaminal) * Fix indentation for single cube clause [#5462](https://github.com/sqlfluff/sqlfluff/pull/5462) [@tunetheweb](https://github.com/tunetheweb) ## New Contributors * [@ryaminal](https://github.com/ryaminal) made their first contribution in [#5458](https://github.com/sqlfluff/sqlfluff/pull/5458) * [@sfc-gh-dgupta](https://github.com/sfc-gh-dgupta) made their first contribution in [#5475](https://github.com/sqlfluff/sqlfluff/pull/5475) * [@archer62](https://github.com/archer62) made their first contribution in [#5473](https://github.com/sqlfluff/sqlfluff/pull/5473) * [@keraion](https://github.com/keraion) made their first contribution in [#5485](https://github.com/sqlfluff/sqlfluff/pull/5485) * [@Koyaani](https://github.com/Koyaani) made their first contribution in [#5505](https://github.com/sqlfluff/sqlfluff/pull/5505) * [@snkekorfus](https://github.com/snkekorfus) made their first contribution in [#5493](https://github.com/sqlfluff/sqlfluff/pull/5493) * [@remy-gohiring](https://github.com/remy-gohiring) made their first contribution in [#5527](https://github.com/sqlfluff/sqlfluff/pull/5527) * [@aayushr7](https://github.com/aayushr7) made their first contribution in [#5528](https://github.com/sqlfluff/sqlfluff/pull/5528) * [@k1drobot](https://github.com/k1drobot) made their first contribution in [#5559](https://github.com/sqlfluff/sqlfluff/pull/5559) * [@alangner](https://github.com/alangner) made their first contribution in [#5574](https://github.com/sqlfluff/sqlfluff/pull/5574) * [@fnimick](https://github.com/fnimick) made their first contribution in [#5577](https://github.com/sqlfluff/sqlfluff/pull/5577) ## [3.0.0a4] - 2023-12-05 ## Highlights This release makes a breaking change to the serialized output of the CLI (and by extension, any of the serialized outputs of the API). * The serialised output for `sqlfluff lint` now contains more information about the span of linting issues and initial proposed fixes. Beside the *new* fields, the original fields of `line_pos` and `line_no` have been renamed to `start_line_pos` and `start_line_no`, to distinguish them from the new fields starting `end_*`. * The default `annotation_level` set by the `--annotation-level` option on the `sqlfluff lint` command has been changed from `notice` to `warning`, to better distinguish linting errors from warnings, which always now have the level of `notice`. This is only relevant when using the `github-annotation` or `github-annotation-native` formats. ## What’s Changed * Build out rule and fix serialisation [#5364](https://github.com/sqlfluff/sqlfluff/pull/5364) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add warning about github actions annotations limit [#5450](https://github.com/sqlfluff/sqlfluff/pull/5450) [@alanmcruickshank](https://github.com/alanmcruickshank) * chore: remove unused line initialization in ParseContext [#5448](https://github.com/sqlfluff/sqlfluff/pull/5448) [@gvozdvmozgu](https://github.com/gvozdvmozgu) * Refine CLI testing fixture [#5446](https://github.com/sqlfluff/sqlfluff/pull/5446) [@alanmcruickshank](https://github.com/alanmcruickshank) * Update black linting [#5447](https://github.com/sqlfluff/sqlfluff/pull/5447) [@alanmcruickshank](https://github.com/alanmcruickshank) ## [3.0.0a3] - 2023-11-29 ## Highlights This brings no further _breaking_ changes on top of `3.0.0a2`, but instead releases a few of the more minor fixes flowing through while other breaking changes are staged in. In particular there are a few dialect improvements for Snowflake, TSQL, SQLite and Databricks alongsids a few further improvements to deployment scripts off the back of earlier changes for `3.x`. ## What’s Changed * fix assertion in `test__api__lint_string_specific_exclude_single` [#5437](https://github.com/sqlfluff/sqlfluff/pull/5437) [@gvozdvmozgu](https://github.com/gvozdvmozgu) * Databricks CLUSTER BY and OPTIMIZE [#5436](https://github.com/sqlfluff/sqlfluff/pull/5436) [@greg-finley](https://github.com/greg-finley) * TSQL: ON DELETE NO ACTION [#5434](https://github.com/sqlfluff/sqlfluff/pull/5434) [@greg-finley](https://github.com/greg-finley) * Snowflake dynamic table [#5435](https://github.com/sqlfluff/sqlfluff/pull/5435) [@greg-finley](https://github.com/greg-finley) * Support parsing CONSTRAINT definitions when creating Delta Live Tables in SparkSQL/Databricks [#5438](https://github.com/sqlfluff/sqlfluff/pull/5438) [@rocwang](https://github.com/rocwang) * adds few fixes for databricks/sparksql [#5431](https://github.com/sqlfluff/sqlfluff/pull/5431) [@markbaas](https://github.com/markbaas) * TSQL: CREATE USER {FOR|FROM} LOGIN [#5426](https://github.com/sqlfluff/sqlfluff/pull/5426) [@greg-finley](https://github.com/greg-finley) * Snowflake Create table order/noorder [#5421](https://github.com/sqlfluff/sqlfluff/pull/5421) [@greg-finley](https://github.com/greg-finley) * Simplify Snowflake regexes [#5419](https://github.com/sqlfluff/sqlfluff/pull/5419) [@greg-finley](https://github.com/greg-finley) * Permit .* after each tbl_name in multi-table delete syntax [#5408](https://github.com/sqlfluff/sqlfluff/pull/5408) [@yoichi](https://github.com/yoichi) * Fix snowflake add search optimization grant [#5412](https://github.com/sqlfluff/sqlfluff/pull/5412) [@jongracecox](https://github.com/jongracecox) * ANSI: Allow combination of UNION clause and WITH clause [#5413](https://github.com/sqlfluff/sqlfluff/pull/5413) [@yoichi](https://github.com/yoichi) * SQLite: Allow block comments to be terminated by end of input [#5400](https://github.com/sqlfluff/sqlfluff/pull/5400) [@WittierDinosaur](https://github.com/WittierDinosaur) * Update publish github action to use kebab case [#5392](https://github.com/sqlfluff/sqlfluff/pull/5392) [@alanmcruickshank](https://github.com/alanmcruickshank) ## New Contributors * [@jongracecox](https://github.com/jongracecox) made their first contribution in [#5412](https://github.com/sqlfluff/sqlfluff/pull/5412) * [@markbaas](https://github.com/markbaas) made their first contribution in [#5431](https://github.com/sqlfluff/sqlfluff/pull/5431) * [@rocwang](https://github.com/rocwang) made their first contribution in [#5438](https://github.com/sqlfluff/sqlfluff/pull/5438) * [@gvozdvmozgu](https://github.com/gvozdvmozgu) made their first contribution in [#5437](https://github.com/sqlfluff/sqlfluff/pull/5437) ## [3.0.0a2] - 2023-11-09 ## Highlights The initial 3.0.0a1 release failed to build a docker image, this resolves that issue. ## What’s Changed * Update publish actions and Dockerfile. [#5390](https://github.com/sqlfluff/sqlfluff/pull/5390) [@alanmcruickshank](https://github.com/alanmcruickshank) ## [3.0.0a1] - 2023-11-08 ## Highlights This release makes a couple of potentially breaking changes: * It drops support for python 3.7, which reached end of life in June 2023. * It migrates to `pyproject.toml` rather than `setup.cfg` as the python packaging configuration file (although keeping `setuptools` as the default backend). Further breaking changes may be made as part of the full 3.0.0 release, but this alpha release is designed to test the new packaging changes for any issues before releasing a stable version. ## What’s Changed * Move the rest of pytest over to `pyproject.toml` [#5383](https://github.com/sqlfluff/sqlfluff/pull/5383) [@alanmcruickshank](https://github.com/alanmcruickshank) * Move doc8 over to pyproject [#5385](https://github.com/sqlfluff/sqlfluff/pull/5385) [@alanmcruickshank](https://github.com/alanmcruickshank) * Remove exceptions for typing stubs that now exist. [#5382](https://github.com/sqlfluff/sqlfluff/pull/5382) [@alanmcruickshank](https://github.com/alanmcruickshank) * Migrate to `pyproject.toml` for the core project. [#5373](https://github.com/sqlfluff/sqlfluff/pull/5373) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix the snippet from a pyproject.toml in configuration.rst [#5378](https://github.com/sqlfluff/sqlfluff/pull/5378) [@ishiis](https://github.com/ishiis) * Snowflake: Support AlterNetworkPolicy Statements [#5377](https://github.com/sqlfluff/sqlfluff/pull/5377) [@WittierDinosaur](https://github.com/WittierDinosaur) * postgres: add support for bodies of "language sql" functions [#5376](https://github.com/sqlfluff/sqlfluff/pull/5376) [@65278](https://github.com/65278) * Add support for SET NAMES statement in MySQL [#5374](https://github.com/sqlfluff/sqlfluff/pull/5374) [@joaostorrer](https://github.com/joaostorrer) * Fix GRANT ALL PRIVILEGES statement in MySQL [#5375](https://github.com/sqlfluff/sqlfluff/pull/5375) [@joaostorrer](https://github.com/joaostorrer) * Another extraction of fixing logic. [#5365](https://github.com/sqlfluff/sqlfluff/pull/5365) [@alanmcruickshank](https://github.com/alanmcruickshank) * Remove root requirements.txt [#5372](https://github.com/sqlfluff/sqlfluff/pull/5372) [@alanmcruickshank](https://github.com/alanmcruickshank) * Drop support of python 3.7 [#5288](https://github.com/sqlfluff/sqlfluff/pull/5288) [@zhongjiajie](https://github.com/zhongjiajie) * API configuration documentation [#5369](https://github.com/sqlfluff/sqlfluff/pull/5369) [@golergka](https://github.com/golergka) * add listagg extras support in trino dialect [#5368](https://github.com/sqlfluff/sqlfluff/pull/5368) [@wjhrdy](https://github.com/wjhrdy) * Allow ignoring of comments from indentation entirely #3311 [#5363](https://github.com/sqlfluff/sqlfluff/pull/5363) [@alanmcruickshank](https://github.com/alanmcruickshank) * Trino: Support Analyze statements [#5361](https://github.com/sqlfluff/sqlfluff/pull/5361) [@WittierDinosaur](https://github.com/WittierDinosaur) * Resolve #5327 (logging to stdout) [#5362](https://github.com/sqlfluff/sqlfluff/pull/5362) [@alanmcruickshank](https://github.com/alanmcruickshank) ## New Contributors * [@wjhrdy](https://github.com/wjhrdy) made their first contribution in [#5368](https://github.com/sqlfluff/sqlfluff/pull/5368) * [@golergka](https://github.com/golergka) made their first contribution in [#5369](https://github.com/sqlfluff/sqlfluff/pull/5369) * [@65278](https://github.com/65278) made their first contribution in [#5376](https://github.com/sqlfluff/sqlfluff/pull/5376) * [@ishiis](https://github.com/ishiis) made their first contribution in [#5378](https://github.com/sqlfluff/sqlfluff/pull/5378) ## [2.3.5] - 2023-10-27 ## Highlights This is a fairly minor release, primarily bugfixes and dialect improvements. For python API users, there's the addition of a public method on the `FluffConfig` object allowing the construction of a config object from multiple strings to mimic the effect of nested config files in the CLI. This release also includes a selection of internal refactoring and reorganisation to support future development work. This also sees the first contributions by [@ShubhamJagtap2000](https://github.com/ShubhamJagtap2000) & [@kang8](https://github.com/kang8), particularly notable in that both were contributions to SQLFluff documentation! 🎉🎉🏆🎉🎉 ## What’s Changed * One (very) small typing improvements [#5355](https://github.com/sqlfluff/sqlfluff/pull/5355) [@alanmcruickshank](https://github.com/alanmcruickshank) * Unpick dependencies between modules in `sqlfluff.core` [#5348](https://github.com/sqlfluff/sqlfluff/pull/5348) [@alanmcruickshank](https://github.com/alanmcruickshank) * Resolve SparkSQL re-parsing issue + test validation in test suite. [#5351](https://github.com/sqlfluff/sqlfluff/pull/5351) [@alanmcruickshank](https://github.com/alanmcruickshank) * Snowflake: Support ALTER MASKING POLICY [#5350](https://github.com/sqlfluff/sqlfluff/pull/5350) [@jmks](https://github.com/jmks) * Add a public API for nesting config strings. [#5349](https://github.com/sqlfluff/sqlfluff/pull/5349) [@alanmcruickshank](https://github.com/alanmcruickshank) * Update handling of dbt compilation errors [#5345](https://github.com/sqlfluff/sqlfluff/pull/5345) [@alanmcruickshank](https://github.com/alanmcruickshank) * Snowflake - Extend Column Default Constraint [#5343](https://github.com/sqlfluff/sqlfluff/pull/5343) [@WittierDinosaur](https://github.com/WittierDinosaur) * Fix the dbt anchor link in the realworld documentation [#5341](https://github.com/sqlfluff/sqlfluff/pull/5341) [@kang8](https://github.com/kang8) * Update README.md [#5340](https://github.com/sqlfluff/sqlfluff/pull/5340) [@ShubhamJagtap2000](https://github.com/ShubhamJagtap2000) * Logic to render variants of Jinja templates for more coverage. [#5339](https://github.com/sqlfluff/sqlfluff/pull/5339) [@alanmcruickshank](https://github.com/alanmcruickshank) * Templater slicing refactoring of `RawFileSlice` [#5338](https://github.com/sqlfluff/sqlfluff/pull/5338) [@alanmcruickshank](https://github.com/alanmcruickshank) * BigQuery: Support multiple statements in the `BEGIN..EXCEPTION..END` [#5322](https://github.com/sqlfluff/sqlfluff/pull/5322) [@abdel](https://github.com/abdel) * Remove codecov traces [#5337](https://github.com/sqlfluff/sqlfluff/pull/5337) [@alanmcruickshank](https://github.com/alanmcruickshank) ## New Contributors * [@ShubhamJagtap2000](https://github.com/ShubhamJagtap2000) made their first contribution in [#5340](https://github.com/sqlfluff/sqlfluff/pull/5340) * [@kang8](https://github.com/kang8) made their first contribution in [#5341](https://github.com/sqlfluff/sqlfluff/pull/5341) ## [2.3.4] - 2023-10-17 ## Highlights This is a fairly small bugfix release, mostly to resolve a bug introduced in 2.3.3 with commas and LT09. This also includes a couple of additional small performance improvements and some dialect improvements for Oracle, BigQuery and MySQL. Thanks in particular to [@bonnal-enzo](https://github.com/bonnal-enzo) who made their first contribution as part of this release 🎉🎉🏆🎉🎉. ## What’s Changed * Commas fix in LT09 [#5335](https://github.com/sqlfluff/sqlfluff/pull/5335) [@alanmcruickshank](https://github.com/alanmcruickshank) * UUID Comparisons [#5332](https://github.com/sqlfluff/sqlfluff/pull/5332) [@alanmcruickshank](https://github.com/alanmcruickshank) * Two depth map performance improvements [#5333](https://github.com/sqlfluff/sqlfluff/pull/5333) [@alanmcruickshank](https://github.com/alanmcruickshank) * Stash parent idx with parent reference [#5331](https://github.com/sqlfluff/sqlfluff/pull/5331) [@alanmcruickshank](https://github.com/alanmcruickshank) * `Set` to `FrozenSet` in segment class_types [#5334](https://github.com/sqlfluff/sqlfluff/pull/5334) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add support for ANY_VALUE( _ HAVING MIN/MAX _ ) to BigQuery dialect [#5321](https://github.com/sqlfluff/sqlfluff/pull/5321) [@bonnal-enzo](https://github.com/bonnal-enzo) * Fix parsing error when using quoted slash in Oracle [#5323](https://github.com/sqlfluff/sqlfluff/pull/5323) [@joaostorrer](https://github.com/joaostorrer) * Add support for functions and procedures calls via database link in Oracle [#5326](https://github.com/sqlfluff/sqlfluff/pull/5326) [@joaostorrer](https://github.com/joaostorrer) * Fix parsing error with table name '_' in MySQL [#5324](https://github.com/sqlfluff/sqlfluff/pull/5324) [@joaostorrer](https://github.com/joaostorrer) ## New Contributors * [@bonnal-enzo](https://github.com/bonnal-enzo) made their first contribution in [#5321](https://github.com/sqlfluff/sqlfluff/pull/5321) ## [2.3.3] - 2023-10-13 ## Highlights There's a *lot* in this release. Most of it is under the covers and so shouldn't cause any breaking changes for most users. If your use case depends on some of the internals of SQLFluff, you may find some breaking changes. The bigger changes are: - Python 3.12 support is now official (although older releases may also work as only a few changes were required for full 3.12 support). - We've done a significant re-write of the parsing engine to remove some unnecessary segment manipulation and get us closer to "single pass" parsing. This changes the internal API being used on any `.match()` methods, and also removes the `parse_grammar` attribute on any dialect segments. We are not aware of any 3rd party libraries which rely on these APIs however and so have not triggered a more major release. These lead to significant performance improvements during parsing. - Standardisation of terminators in the parser, and the introduction of the `ParseMode` option has enabled the removal of the `StartsWith`, `GreedyUntil` and `EphemeralSegment` parser classes. - Several validation checks have been revised in this release, which should both improve performance (by reducing duplication), but also be more effective in preventing the application of any fixes which would result in unparsable files. Alongside the big things this also includes a host of bugfixes, dialect improvements and CI/testing improvements. This release also sees a bumper crop of new contributors, thanks to [@dehume](https://github.com/dehume), [@andychannery](https://github.com/andychannery), [@Kylea650](https://github.com/Kylea650), [@robin-alphasophia](https://github.com/robin-alphasophia), [@jtbg](https://github.com/jtbg), [@r-petit](https://github.com/r-petit), [@bpfaust](https://github.com/bpfaust) & [@freewaydev](https://github.com/freewaydev) who all made the first contributions in this release! 🎉🎉🎉 ## What’s Changed * Oracle space between alias and column reference [#5313](https://github.com/sqlfluff/sqlfluff/pull/5313) [@joaostorrer](https://github.com/joaostorrer) * Don't apply LT05 on templated rebreak locations #5096 [#5318](https://github.com/sqlfluff/sqlfluff/pull/5318) [@alanmcruickshank](https://github.com/alanmcruickshank) * Disable JJ01 unless jinja active [#5319](https://github.com/sqlfluff/sqlfluff/pull/5319) [@alanmcruickshank](https://github.com/alanmcruickshank) * Cache the `BaseSegment` hash in reflow [#5320](https://github.com/sqlfluff/sqlfluff/pull/5320) [@alanmcruickshank](https://github.com/alanmcruickshank) * Better error reporting for invalid macros [#5317](https://github.com/sqlfluff/sqlfluff/pull/5317) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add support for begin atomic functions in Postgres [#5316](https://github.com/sqlfluff/sqlfluff/pull/5316) [@joaostorrer](https://github.com/joaostorrer) * Fix parsing when statement uses plus_sign_join and function in Oracle [#5315](https://github.com/sqlfluff/sqlfluff/pull/5315) [@joaostorrer](https://github.com/joaostorrer) * Update rule docs with correct config [#5314](https://github.com/sqlfluff/sqlfluff/pull/5314) [@alanmcruickshank](https://github.com/alanmcruickshank) * Resolve #5258. More robust algorithm for multiline fix. [#5309](https://github.com/sqlfluff/sqlfluff/pull/5309) [@alanmcruickshank](https://github.com/alanmcruickshank) * BigQuery: Add support for `BEGIN..EXCEPTION...END` block [#5307](https://github.com/sqlfluff/sqlfluff/pull/5307) [@abdel](https://github.com/abdel) * Refine placement of metas around templated blocks [#5294](https://github.com/sqlfluff/sqlfluff/pull/5294) [@alanmcruickshank](https://github.com/alanmcruickshank) * Extend ruff checking to docstring rules [#5302](https://github.com/sqlfluff/sqlfluff/pull/5302) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix for strange TSQL bugs [#5306](https://github.com/sqlfluff/sqlfluff/pull/5306) [@alanmcruickshank](https://github.com/alanmcruickshank) * Staging PR for #5282 [#5305](https://github.com/sqlfluff/sqlfluff/pull/5305) [@alanmcruickshank](https://github.com/alanmcruickshank) * Resolve some strange whitespace indentation behaviour [#5292](https://github.com/sqlfluff/sqlfluff/pull/5292) [@alanmcruickshank](https://github.com/alanmcruickshank) * Simplify `_process_lint_result` [#5304](https://github.com/sqlfluff/sqlfluff/pull/5304) [@alanmcruickshank](https://github.com/alanmcruickshank) * Performance improvement on segment comparison [#5303](https://github.com/sqlfluff/sqlfluff/pull/5303) [@alanmcruickshank](https://github.com/alanmcruickshank) * Refactor LT09 [#5299](https://github.com/sqlfluff/sqlfluff/pull/5299) [@alanmcruickshank](https://github.com/alanmcruickshank) * Change drop function to allow DropBehaviourGrammar with space after function name [#5295](https://github.com/sqlfluff/sqlfluff/pull/5295) [@joaostorrer](https://github.com/joaostorrer) * Resolve click import options on autocomplete [#5293](https://github.com/sqlfluff/sqlfluff/pull/5293) [@alanmcruickshank](https://github.com/alanmcruickshank) * Updated docstrings with missing args/returns/etc info, added missing docstrings, minor formatting fixes. [#5278](https://github.com/sqlfluff/sqlfluff/pull/5278) [@freewaydev](https://github.com/freewaydev) * Use ruff rule I replace isort [#5289](https://github.com/sqlfluff/sqlfluff/pull/5289) [@zhongjiajie](https://github.com/zhongjiajie) * Snowflake: Parse ALTER DATABASE statement [#5284](https://github.com/sqlfluff/sqlfluff/pull/5284) [@jmks](https://github.com/jmks) * Snowflake: Parse ALTER ACCOUNT statements [#5283](https://github.com/sqlfluff/sqlfluff/pull/5283) [@jmks](https://github.com/jmks) * Snowflake: create AlterProcedureStatementSegment [#5291](https://github.com/sqlfluff/sqlfluff/pull/5291) [@moreaupascal56](https://github.com/moreaupascal56) * Rewrite of matching interface [#5230](https://github.com/sqlfluff/sqlfluff/pull/5230) [@alanmcruickshank](https://github.com/alanmcruickshank) * Follow noqa in block comments [#5133](https://github.com/sqlfluff/sqlfluff/pull/5133) [@daviewales](https://github.com/daviewales) * Fix insert on conflict with function in Postgres [#5286](https://github.com/sqlfluff/sqlfluff/pull/5286) [@joaostorrer](https://github.com/joaostorrer) * Add support for Pivot and Unpivot clauses in Oracle [#5285](https://github.com/sqlfluff/sqlfluff/pull/5285) [@joaostorrer](https://github.com/joaostorrer) * Adding "create table as" for greenplum dialect [#5173](https://github.com/sqlfluff/sqlfluff/pull/5173) [@bpfaust](https://github.com/bpfaust) * Update CI to python 3.12 [#5267](https://github.com/sqlfluff/sqlfluff/pull/5267) [@alanmcruickshank](https://github.com/alanmcruickshank) * Snowflake: Add CreateResourceMonitorStatementSegment & AlterResourceMonitorStatementSegment [#5272](https://github.com/sqlfluff/sqlfluff/pull/5272) [@moreaupascal56](https://github.com/moreaupascal56) * [TSQL] Add create fulltext index statement segment class [#5274](https://github.com/sqlfluff/sqlfluff/pull/5274) [@r-petit](https://github.com/r-petit) * Snowflake: Add CreateSequenceStatementSegment & AlterSequenceStatementSegment [#5270](https://github.com/sqlfluff/sqlfluff/pull/5270) [@moreaupascal56](https://github.com/moreaupascal56) * Add CommaSegment to AlterWarehouseStatementSegment SET clause [#5268](https://github.com/sqlfluff/sqlfluff/pull/5268) [@moreaupascal56](https://github.com/moreaupascal56) * Snowflake: Parse EXECUTE IMMEDIATE clause [#5275](https://github.com/sqlfluff/sqlfluff/pull/5275) [@jmks](https://github.com/jmks) * TSQL: Add missing `HISTORY_RETENTION_PERIOD` sequence to the table option segment [#5273](https://github.com/sqlfluff/sqlfluff/pull/5273) [@r-petit](https://github.com/r-petit) * Snowflake: Fix ScalingPolicy and WarehouseType Refs in WarehouseObjectProperties and use ObjectReferenceSegment in AlterWarehouseStatementSegment [#5264](https://github.com/sqlfluff/sqlfluff/pull/5264) [@moreaupascal56](https://github.com/moreaupascal56) * Finish the removal of `GreedyUntil` [#5263](https://github.com/sqlfluff/sqlfluff/pull/5263) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add support for date operations with intervals in Oracle [#5262](https://github.com/sqlfluff/sqlfluff/pull/5262) [@joaostorrer](https://github.com/joaostorrer) * Change RawSegment `type` to `instance_types` [#5253](https://github.com/sqlfluff/sqlfluff/pull/5253) [@alanmcruickshank](https://github.com/alanmcruickshank) * Revise MatchableType -> Matchable [#5252](https://github.com/sqlfluff/sqlfluff/pull/5252) [@alanmcruickshank](https://github.com/alanmcruickshank) * Unnest fixing and re-address validation triggers [#5249](https://github.com/sqlfluff/sqlfluff/pull/5249) [@alanmcruickshank](https://github.com/alanmcruickshank) * Resolves #5174: Snowflake alter table constraint [#5247](https://github.com/sqlfluff/sqlfluff/pull/5247) [@andychannery](https://github.com/andychannery) * Bring together the generic segments [#5243](https://github.com/sqlfluff/sqlfluff/pull/5243) [@alanmcruickshank](https://github.com/alanmcruickshank) * minor: update docs with correct link to airflow ds_filter [#5244](https://github.com/sqlfluff/sqlfluff/pull/5244) [@jtbg](https://github.com/jtbg) * #5245 - Snowflake dialect: Adds support for variable definitions in scripting blocks [#5246](https://github.com/sqlfluff/sqlfluff/pull/5246) [@robin-alphasophia](https://github.com/robin-alphasophia) * Introduce "word" segment [#5234](https://github.com/sqlfluff/sqlfluff/pull/5234) [@alanmcruickshank](https://github.com/alanmcruickshank) * #5239 Added (basic) support for properly linted Snowflake scripting [#5242](https://github.com/sqlfluff/sqlfluff/pull/5242) [@robin-alphasophia](https://github.com/robin-alphasophia) * Allow Snowflake pipe integration to be a quoted or unquoted [#5241](https://github.com/sqlfluff/sqlfluff/pull/5241) [@Kylea650](https://github.com/Kylea650) * Fix LT01 alignment regression #4023 [#5238](https://github.com/sqlfluff/sqlfluff/pull/5238) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add support for oracle non ansi joins [#5231](https://github.com/sqlfluff/sqlfluff/pull/5231) [@joaostorrer](https://github.com/joaostorrer) * add azure_storage_queue and quoted providers [#5236](https://github.com/sqlfluff/sqlfluff/pull/5236) [@Kylea650](https://github.com/Kylea650) * Set type automatically within the lexer. [#5232](https://github.com/sqlfluff/sqlfluff/pull/5232) [@alanmcruickshank](https://github.com/alanmcruickshank) * Resolve #5225: Snowflake unparsable select replace [#5227](https://github.com/sqlfluff/sqlfluff/pull/5227) [@andychannery](https://github.com/andychannery) * Spark Accessor Grammars [#5226](https://github.com/sqlfluff/sqlfluff/pull/5226) [@alanmcruickshank](https://github.com/alanmcruickshank) * Test Script Timing [#5228](https://github.com/sqlfluff/sqlfluff/pull/5228) [@alanmcruickshank](https://github.com/alanmcruickshank) * Unify lexer names and types for brackets [#5229](https://github.com/sqlfluff/sqlfluff/pull/5229) [@alanmcruickshank](https://github.com/alanmcruickshank) * Resolve #3176: Snowflake unparsable array casting [#5224](https://github.com/sqlfluff/sqlfluff/pull/5224) [@andychannery](https://github.com/andychannery) * BigQuery system time syntax [#5220](https://github.com/sqlfluff/sqlfluff/pull/5220) [@greg-finley](https://github.com/greg-finley) * Parser test nits [#5217](https://github.com/sqlfluff/sqlfluff/pull/5217) [@alanmcruickshank](https://github.com/alanmcruickshank) * Remove `parse_grammar` [#5189](https://github.com/sqlfluff/sqlfluff/pull/5189) [@alanmcruickshank](https://github.com/alanmcruickshank) * Revise segment whitespace validation [#5194](https://github.com/sqlfluff/sqlfluff/pull/5194) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add support for bpchar datatype in postgres [#5215](https://github.com/sqlfluff/sqlfluff/pull/5215) [@joaostorrer](https://github.com/joaostorrer) * Resolve #5203: `BaseSegment.copy()` isolation [#5206](https://github.com/sqlfluff/sqlfluff/pull/5206) [@alanmcruickshank](https://github.com/alanmcruickshank) * Update Materialize Syntax [#5210](https://github.com/sqlfluff/sqlfluff/pull/5210) [@dehume](https://github.com/dehume) * Validate fix parsing based on match_grammar [#5196](https://github.com/sqlfluff/sqlfluff/pull/5196) [@alanmcruickshank](https://github.com/alanmcruickshank) * Position assertions in BaseSegment [#5209](https://github.com/sqlfluff/sqlfluff/pull/5209) [@alanmcruickshank](https://github.com/alanmcruickshank) * MySQL bracketed column constraint [#5208](https://github.com/sqlfluff/sqlfluff/pull/5208) [@greg-finley](https://github.com/greg-finley) * Dialect spacing & quoting issues [#5205](https://github.com/sqlfluff/sqlfluff/pull/5205) [@alanmcruickshank](https://github.com/alanmcruickshank) * Update comment workflow again [#5201](https://github.com/sqlfluff/sqlfluff/pull/5201) [@alanmcruickshank](https://github.com/alanmcruickshank) * Stash PR Number and hydrate later [#5200](https://github.com/sqlfluff/sqlfluff/pull/5200) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix API issues in github comment action [#5199](https://github.com/sqlfluff/sqlfluff/pull/5199) [@alanmcruickshank](https://github.com/alanmcruickshank) * Consistency check in root parse [#5191](https://github.com/sqlfluff/sqlfluff/pull/5191) [@alanmcruickshank](https://github.com/alanmcruickshank) * PR Comment action [#5192](https://github.com/sqlfluff/sqlfluff/pull/5192) [@alanmcruickshank](https://github.com/alanmcruickshank) * Cache python dependencies in GHA [#5193](https://github.com/sqlfluff/sqlfluff/pull/5193) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add support for create server, create user mapping and import foreign schema in postgres [#5185](https://github.com/sqlfluff/sqlfluff/pull/5185) [@joaostorrer](https://github.com/joaostorrer) * Terminators on `Anything()` + Strip _most_ of the other `parse_grammar` [#5186](https://github.com/sqlfluff/sqlfluff/pull/5186) [@alanmcruickshank](https://github.com/alanmcruickshank) * Parse modes for `AnyNumberOf` [#5187](https://github.com/sqlfluff/sqlfluff/pull/5187) [@alanmcruickshank](https://github.com/alanmcruickshank) * Introduce `parse_mode` and remove `StartsWith` & `EphemeralSegment`. [#5167](https://github.com/sqlfluff/sqlfluff/pull/5167) [@alanmcruickshank](https://github.com/alanmcruickshank) ## New Contributors * [@dehume](https://github.com/dehume) made their first contribution in [#5210](https://github.com/sqlfluff/sqlfluff/pull/5210) * [@andychannery](https://github.com/andychannery) made their first contribution in [#5224](https://github.com/sqlfluff/sqlfluff/pull/5224) * [@Kylea650](https://github.com/Kylea650) made their first contribution in [#5236](https://github.com/sqlfluff/sqlfluff/pull/5236) * [@robin-alphasophia](https://github.com/robin-alphasophia) made their first contribution in [#5242](https://github.com/sqlfluff/sqlfluff/pull/5242) * [@jtbg](https://github.com/jtbg) made their first contribution in [#5244](https://github.com/sqlfluff/sqlfluff/pull/5244) * [@r-petit](https://github.com/r-petit) made their first contribution in [#5273](https://github.com/sqlfluff/sqlfluff/pull/5273) * [@bpfaust](https://github.com/bpfaust) made their first contribution in [#5173](https://github.com/sqlfluff/sqlfluff/pull/5173) * [@freewaydev](https://github.com/freewaydev) made their first contribution in [#5278](https://github.com/sqlfluff/sqlfluff/pull/5278) * [@abdel](https://github.com/abdel) made their first contribution in [#5307](https://github.com/sqlfluff/sqlfluff/pull/5307) ## [2.3.2] - 2023-09-10 ## Highlights Much of this release is internal optimisations and refactoring. We're in the process of upgrading some quite old code in the parser, most of which should not be visible to end users (apart from perhaps some performance improvements!). This release also allows missing template variables in the placeholder templater to be automatically filled with the name of the variable rather than raising an error (see: [#5101](https://github.com/sqlfluff/sqlfluff/pull/5101)). Beyond that this includes some dialect improvements for DuckDB, SparkSQL, Snowflake, Redshift & Postgres. Thanks particularly to [@shyaginuma](https://github.com/shyaginuma), [@Fullcure3](https://github.com/Fullcure3), [@adilkhanekt](https://github.com/adilkhanekt) & [@pilou-komoot](https://github.com/pilou-komoot) who made their first contributions as part of this release. 🎉🎉🎉 ## What’s Changed * Allow not specifying parameters names when using placeholder templater [#5101](https://github.com/sqlfluff/sqlfluff/pull/5101) [@shyaginuma](https://github.com/shyaginuma) * Update coverage job to run in the right conditions [#5183](https://github.com/sqlfluff/sqlfluff/pull/5183) [@alanmcruickshank](https://github.com/alanmcruickshank) * Duckdb: UNION BY NAME [#5176](https://github.com/sqlfluff/sqlfluff/pull/5176) [@greg-finley](https://github.com/greg-finley) * Output coverage report direct to PR [#5180](https://github.com/sqlfluff/sqlfluff/pull/5180) [@alanmcruickshank](https://github.com/alanmcruickshank) * Upgrades to the parse fixture generation script [#5182](https://github.com/sqlfluff/sqlfluff/pull/5182) [@alanmcruickshank](https://github.com/alanmcruickshank) * Refactor of Sequence match [#5177](https://github.com/sqlfluff/sqlfluff/pull/5177) [@alanmcruickshank](https://github.com/alanmcruickshank) * Simplify Greedy Match [#5178](https://github.com/sqlfluff/sqlfluff/pull/5178) [@alanmcruickshank](https://github.com/alanmcruickshank) * Quality of life improvements on parse fixture script [#5179](https://github.com/sqlfluff/sqlfluff/pull/5179) [@alanmcruickshank](https://github.com/alanmcruickshank) * Lift and shift matching algorithms [#5170](https://github.com/sqlfluff/sqlfluff/pull/5170) [@alanmcruickshank](https://github.com/alanmcruickshank) * Capitalise boolean values in example configs, for consistency [#5175](https://github.com/sqlfluff/sqlfluff/pull/5175) [@pilou-komoot](https://github.com/pilou-komoot) * Pull terminator setting up into the base grammar [#5172](https://github.com/sqlfluff/sqlfluff/pull/5172) [@alanmcruickshank](https://github.com/alanmcruickshank) * Flip the if in sequence and un-nest [#5171](https://github.com/sqlfluff/sqlfluff/pull/5171) [@alanmcruickshank](https://github.com/alanmcruickshank) * SparkSQL: Support CACHE TABLE without query [#5165](https://github.com/sqlfluff/sqlfluff/pull/5165) [@reata](https://github.com/reata) * Remove configurable `enforce_whitespace_preceding_terminator` [#5162](https://github.com/sqlfluff/sqlfluff/pull/5162) [@alanmcruickshank](https://github.com/alanmcruickshank) * Adding optional sequence block for columns parsing in Snowflake external tables [#5157](https://github.com/sqlfluff/sqlfluff/pull/5157) [@adilkhanekt](https://github.com/adilkhanekt) * SparkSQL: Support ALTER TABLE SET LOCATION without partition spec [#5168](https://github.com/sqlfluff/sqlfluff/pull/5168) [@reata](https://github.com/reata) * Tighten terminators on `Delimited` [#5161](https://github.com/sqlfluff/sqlfluff/pull/5161) [@alanmcruickshank](https://github.com/alanmcruickshank) * `terminator` > `terminators` on StartsWith [#5152](https://github.com/sqlfluff/sqlfluff/pull/5152) [@alanmcruickshank](https://github.com/alanmcruickshank) * Redshift: Support SELECT INTO [#5159](https://github.com/sqlfluff/sqlfluff/pull/5159) [@reata](https://github.com/reata) * Duckdb: Integer division [#5154](https://github.com/sqlfluff/sqlfluff/pull/5154) [@greg-finley](https://github.com/greg-finley) * `terminator` > `terminators` on Delimited grammar [#5150](https://github.com/sqlfluff/sqlfluff/pull/5150) [@alanmcruickshank](https://github.com/alanmcruickshank) * Tests for unparsable sections [#5149](https://github.com/sqlfluff/sqlfluff/pull/5149) [@alanmcruickshank](https://github.com/alanmcruickshank) * Un-nest the delimited match method [#5147](https://github.com/sqlfluff/sqlfluff/pull/5147) [@alanmcruickshank](https://github.com/alanmcruickshank) * Grammar .copy() assert no unexpected kwargs [#5148](https://github.com/sqlfluff/sqlfluff/pull/5148) [@alanmcruickshank](https://github.com/alanmcruickshank) * Postgres: CLUSTER [#5146](https://github.com/sqlfluff/sqlfluff/pull/5146) [@greg-finley](https://github.com/greg-finley) * Postgres alter policy [#5138](https://github.com/sqlfluff/sqlfluff/pull/5138) [@Fullcure3](https://github.com/Fullcure3) ## New Contributors * [@Fullcure3](https://github.com/Fullcure3) made their first contribution in [#5138](https://github.com/sqlfluff/sqlfluff/pull/5138) * [@adilkhanekt](https://github.com/adilkhanekt) made their first contribution in [#5157](https://github.com/sqlfluff/sqlfluff/pull/5157) * [@pilou-komoot](https://github.com/pilou-komoot) made their first contribution in [#5175](https://github.com/sqlfluff/sqlfluff/pull/5175) * [@shyaginuma](https://github.com/shyaginuma) made their first contribution in [#5101](https://github.com/sqlfluff/sqlfluff/pull/5101) ## [2.3.1] - 2023-08-29 ## Highlights This release is primarily a performance release, with most major changes aimed at the linting and fixing phases of operation. Most of the longest duration rules (excepting the layout rules) should see noticeable speed improvements. Alongside those changes, there are a selection of bugfixes and dialect improvements for Oracle, PostgreSQL, Snowflake & TSQL. ## What’s Changed * Postgres: Update returning with alias [#5137](https://github.com/sqlfluff/sqlfluff/pull/5137) [@greg-finley](https://github.com/greg-finley) * Reduce copying on _position_segments (improves `fix`) [#5119](https://github.com/sqlfluff/sqlfluff/pull/5119) [@alanmcruickshank](https://github.com/alanmcruickshank) * Import rationalisation [#5135](https://github.com/sqlfluff/sqlfluff/pull/5135) [@alanmcruickshank](https://github.com/alanmcruickshank) * Select Crawler Refactor: Part 3 [#5115](https://github.com/sqlfluff/sqlfluff/pull/5115) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add support for comparison operators with space in Oracle [#5132](https://github.com/sqlfluff/sqlfluff/pull/5132) [@joaostorrer](https://github.com/joaostorrer) * Snowflake support for bracketed query after `EXCEPT` [#5126](https://github.com/sqlfluff/sqlfluff/pull/5126) [@ulixius9](https://github.com/ulixius9) * Treatment of null literals. #5099 [#5125](https://github.com/sqlfluff/sqlfluff/pull/5125) [@alanmcruickshank](https://github.com/alanmcruickshank) * Allow double-quoted parameters in create procedure [#5131](https://github.com/sqlfluff/sqlfluff/pull/5131) [@greg-finley](https://github.com/greg-finley) * Fix coverage & mypy [#5134](https://github.com/sqlfluff/sqlfluff/pull/5134) [@alanmcruickshank](https://github.com/alanmcruickshank) * Ensure Unparsable can be given position. [#5117](https://github.com/sqlfluff/sqlfluff/pull/5117) [@alanmcruickshank](https://github.com/alanmcruickshank) * Reduce copying in LintFix instantiation [#5118](https://github.com/sqlfluff/sqlfluff/pull/5118) [@alanmcruickshank](https://github.com/alanmcruickshank) * Optimise crawl behaviour of JJ01 [#5116](https://github.com/sqlfluff/sqlfluff/pull/5116) [@alanmcruickshank](https://github.com/alanmcruickshank) * Simplify rules with improvement to SegmentSeeker [#5113](https://github.com/sqlfluff/sqlfluff/pull/5113) [@alanmcruickshank](https://github.com/alanmcruickshank) * Refactor AM07 [#5112](https://github.com/sqlfluff/sqlfluff/pull/5112) [@alanmcruickshank](https://github.com/alanmcruickshank) * Select Crawler Refactor: Part 2 [#5110](https://github.com/sqlfluff/sqlfluff/pull/5110) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add support to Hierarchical Queries in Oracle [#5108](https://github.com/sqlfluff/sqlfluff/pull/5108) [@joaostorrer](https://github.com/joaostorrer) * ✅ Strict MyPy for sqlfluff.core.parser [#5107](https://github.com/sqlfluff/sqlfluff/pull/5107) [@alanmcruickshank](https://github.com/alanmcruickshank) * Free up pydocstyle again [#5109](https://github.com/sqlfluff/sqlfluff/pull/5109) [@alanmcruickshank](https://github.com/alanmcruickshank) * Postgres: Allow CREATE TABLE INHERITS with no new columns [#5100](https://github.com/sqlfluff/sqlfluff/pull/5100) [@greg-finley](https://github.com/greg-finley) * Strict mypy in parser.segments [#5094](https://github.com/sqlfluff/sqlfluff/pull/5094) [@alanmcruickshank](https://github.com/alanmcruickshank) * Select Crawler Refactor: Part 1 [#5104](https://github.com/sqlfluff/sqlfluff/pull/5104) [@alanmcruickshank](https://github.com/alanmcruickshank) * RF01 & recursive_crawl improvements [#5102](https://github.com/sqlfluff/sqlfluff/pull/5102) [@alanmcruickshank](https://github.com/alanmcruickshank) * fix new more restrictive tox [#5103](https://github.com/sqlfluff/sqlfluff/pull/5103) [@alanmcruickshank](https://github.com/alanmcruickshank) * Make Day as Non Reserved Keyword [#5062](https://github.com/sqlfluff/sqlfluff/pull/5062) [@ulixius9](https://github.com/ulixius9) ## [2.3.0] - 2023-08-14 ## Highlights This release brings one new dialect, two new rules and some changes to the CLI: - We now support the [trino](https://trino.io/) dialect. This is a first version of support, so do post any issues on GitHub in the usual way. This was also the first contribution to the project from [@efung](https://github.com/efung) 🏆. - `ST09` / `structure.join_condition_order`: Which checks whether tables referenced in `JOIN` clauses are referenced in the order of their definition. By default this means that in the `ON` clause, the column referencing the table in the `FROM` clause should come before the column referencing the table in the `JOIN` clause (e.g. `... FROM a JOIN b on a.c = b.c`). This rule was also the first contribution to the project from [@thibonacci](https://github.com/thibonacci) 🏆. - `AL08` / `aliasing.unique.column`: Which checks that column aliases and names are not repeated within the same `SELECT` clause. This is normally an error as it implies the same column has been imported twice, or that two expressions have been given the same alias. - The `--profiler` option on `sqlfluff parse` has been removed. It was only present on the `parse` command and not `lint` or `fix`, and it is just as simple to invoke the python `cProfiler` directly. - The `--recurse` cli option and `sqlfluff.recurse` configuration option have both been removed. They both existed purely for debugging the parser, and were never used in a production setting. The improvement in other debugging messages when unparsable sections are found means that this option is no longer necessary. Along side these more significant changes this also includes: - Performance optimisations for `AL04`, `AL05`, `AM04`, `RF01` & `ST05` which cumulatively may save up to 30% on the total time spend in the linting phase for some projects. - Dialect improvements for Oracle & TSQL. ## What’s Changed * Remove IdentitySet [#5093](https://github.com/sqlfluff/sqlfluff/pull/5093) [@alanmcruickshank](https://github.com/alanmcruickshank) * Stricter typing in smaller sqlfluff.core.parser [#5088](https://github.com/sqlfluff/sqlfluff/pull/5088) [@alanmcruickshank](https://github.com/alanmcruickshank) * Preliminary support of Trino dialect [#4913](https://github.com/sqlfluff/sqlfluff/pull/4913) [@efung](https://github.com/efung) * Rename ST09 [#5091](https://github.com/sqlfluff/sqlfluff/pull/5091) [@alanmcruickshank](https://github.com/alanmcruickshank) * TSQL: Fix Clustered Index asc/desc [#5090](https://github.com/sqlfluff/sqlfluff/pull/5090) [@greg-finley](https://github.com/greg-finley) * Parent references and more efficient path_to [#5076](https://github.com/sqlfluff/sqlfluff/pull/5076) [@alanmcruickshank](https://github.com/alanmcruickshank) * New Rule: AL08 - column aliases must be unique [#5079](https://github.com/sqlfluff/sqlfluff/pull/5079) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add support for fetch first row(s) only in Oracle [#5089](https://github.com/sqlfluff/sqlfluff/pull/5089) [@joaostorrer](https://github.com/joaostorrer) * Fix bug around quoted identifiers for ST09 [#5087](https://github.com/sqlfluff/sqlfluff/pull/5087) [@thibonacci](https://github.com/thibonacci) * Add strict typing to the templating tracer [#5085](https://github.com/sqlfluff/sqlfluff/pull/5085) [@WittierDinosaur](https://github.com/WittierDinosaur) * Remove recurse config [#5065](https://github.com/sqlfluff/sqlfluff/pull/5065) [@alanmcruickshank](https://github.com/alanmcruickshank) * ✅ Strictly type dialect [#5067](https://github.com/sqlfluff/sqlfluff/pull/5067) [@pwildenhain](https://github.com/pwildenhain) * Add new rule ST09: Joins should list the table referenced earlier (default)/later first [#4974](https://github.com/sqlfluff/sqlfluff/pull/4974) [@thibonacci](https://github.com/thibonacci) * Remove the internal cProfiler option [#5081](https://github.com/sqlfluff/sqlfluff/pull/5081) [@alanmcruickshank](https://github.com/alanmcruickshank) * Optimisation on select analysis [#5082](https://github.com/sqlfluff/sqlfluff/pull/5082) [@alanmcruickshank](https://github.com/alanmcruickshank) ## New Contributors * [@thibonacci](https://github.com/thibonacci) made their first contribution in [#4974](https://github.com/sqlfluff/sqlfluff/pull/4974) * [@efung](https://github.com/efung) made their first contribution in [#4913](https://github.com/sqlfluff/sqlfluff/pull/4913) ## [2.2.1] - 2023-08-09 ## Highlights This is primarily a bugfix release for 2.2.0 which introduced a bug in the `exit_code` returned by linting commands which ignored errors while setting `processes > 1`. In addition to that this release introduces bugfixes for: - Errors raised by two specific `dbt` exceptions. - Issues with unwanted logging output when using `-f yaml` or `-f json` alongside the `dbt` templater. This also introduces dialect improvements for Oracle and for `LIMIT` clauses. Thanks also to [@adityapat3l](https://github.com/adityapat3l) who made their first contribution as part of this release! 🎉🎉🎉 ## What’s Changed * Split apart the grammar tests [#5078](https://github.com/sqlfluff/sqlfluff/pull/5078) [@alanmcruickshank](https://github.com/alanmcruickshank) * Resolve pickling of errors #5066 [#5074](https://github.com/sqlfluff/sqlfluff/pull/5074) [@alanmcruickshank](https://github.com/alanmcruickshank) * Better context based tracking [#5064](https://github.com/sqlfluff/sqlfluff/pull/5064) [@alanmcruickshank](https://github.com/alanmcruickshank) * fixing limit handling for bracketed arithmathic operations [#5068](https://github.com/sqlfluff/sqlfluff/pull/5068) [@adityapat3l](https://github.com/adityapat3l) * Never run in multiprocessing mode with only 1 file. [#5071](https://github.com/sqlfluff/sqlfluff/pull/5071) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add dbt 1.6 tests [#5073](https://github.com/sqlfluff/sqlfluff/pull/5073) [@alanmcruickshank](https://github.com/alanmcruickshank) * Handle two kinds of dbt errors more gracefully [#5072](https://github.com/sqlfluff/sqlfluff/pull/5072) [@alanmcruickshank](https://github.com/alanmcruickshank) * Try to silence dbt logging #5054 [#5070](https://github.com/sqlfluff/sqlfluff/pull/5070) [@alanmcruickshank](https://github.com/alanmcruickshank) * Move `_prune_options` within `_longest_trimmed_match`. [#5063](https://github.com/sqlfluff/sqlfluff/pull/5063) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix issue 4998 - Add backup and auto refresh grammar to redshift materialized view [#5060](https://github.com/sqlfluff/sqlfluff/pull/5060) [@adityapat3l](https://github.com/adityapat3l) * Add mypy strict typing for sqlfluff.core.rules [#5048](https://github.com/sqlfluff/sqlfluff/pull/5048) [@pwildenhain](https://github.com/pwildenhain) * :arrow_up: Bump mypy version in pre-commit [#5055](https://github.com/sqlfluff/sqlfluff/pull/5055) [@pwildenhain](https://github.com/pwildenhain) * Add SQL Plus bind variable support (Oracle) [#5053](https://github.com/sqlfluff/sqlfluff/pull/5053) [@joaostorrer](https://github.com/joaostorrer) ## New Contributors * [@adityapat3l](https://github.com/adityapat3l) made their first contribution in [#5060](https://github.com/sqlfluff/sqlfluff/pull/5060) ## [2.2.0] - 2023-08-04 ## Highlights This release changes some of the interfaces between SQLFluff core and our plugin ecosystem. The only *breaking* change is in the interface between SQLFluff and *templater* plugins (which are not common in the ecosystem, hence why this is only a minor and not a major release). For all plugins, we also recommend a different structure for their imports (especially for rule plugins which are more common in the ecosystem) - for performance and stability reasons. Some users had been experiencing very long import times with previous releases as a result of the layout of plugin imports. Users with affected plugins will begin to see a warning from this release onward, which can be resolved for their plugin by updating to a new version of that plugin which follows the guidelines. For more details (especially if you're a plugin maintainer) see our [release notes](https://docs.sqlfluff.com/en/latest/releasenotes.html). Additionally this release includes: - Some internal performance gains which may cumulatively save roughly 10% of the time spent in the parsing phase of larger files. - Improvements to the Simple API, including the ability to pass in a `FluffConfig` object directly, and better support for parsing config files directly from strings (see [the included example](examples/05_simple_api_config.py)). - A bugfix for `AM06`. - A new `--warn-unused-ignores` CLI option (and corresponding config setting) to allow warnings to be shown if any `noqa` comments in SQL files are unused. - Improvements to Redshift, Oracle, Clickhouse, Materialize & MySQL dialects. - A selection of internal improvements, documentation and type hints. Thanks also to [@kaiyannameighu](https://github.com/kaiyannameighu), [@josef-v](https://github.com/josef-v), [@aglebov](https://github.com/aglebov) & [@joaostorrer](https://github.com/joaostorrer) who made their first contributions as part of this release! 🎉🎉🎉 ## What’s Changed * Mypy: Ephemeral + Tuple Return on .parse() [#5044](https://github.com/sqlfluff/sqlfluff/pull/5044) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add support to oracle's global and private temporary tables [#5039](https://github.com/sqlfluff/sqlfluff/pull/5039) [@joaostorrer](https://github.com/joaostorrer) * Redshift-dialect: Support GRANT USAGE ON DATASHARE [#5007](https://github.com/sqlfluff/sqlfluff/pull/5007) [@josef-v](https://github.com/josef-v) * :white_check_mark: Add strict typing for errors module [#5047](https://github.com/sqlfluff/sqlfluff/pull/5047) [@pwildenhain](https://github.com/pwildenhain) * Less copying in the ParseContext [#5046](https://github.com/sqlfluff/sqlfluff/pull/5046) [@alanmcruickshank](https://github.com/alanmcruickshank) * Adding support to use `ADD COLUMN IF NOT EXISTS` syntax on `ALTER TABLE` [#5035](https://github.com/sqlfluff/sqlfluff/pull/5035) [@wfelipew](https://github.com/wfelipew) * Closes #4815 [#5042](https://github.com/sqlfluff/sqlfluff/pull/5042) [@joaostorrer](https://github.com/joaostorrer) * Fix for multiprocessing warnings. [#5032](https://github.com/sqlfluff/sqlfluff/pull/5032) [@alanmcruickshank](https://github.com/alanmcruickshank) * Mypy gain: Remove unnecessary tuple construction in MatchResult [#5045](https://github.com/sqlfluff/sqlfluff/pull/5045) [@alanmcruickshank](https://github.com/alanmcruickshank) * mypy strict in config [#5036](https://github.com/sqlfluff/sqlfluff/pull/5036) [@pwildenhain](https://github.com/pwildenhain) * strict mypy: match_wrapper & match_logging [#5033](https://github.com/sqlfluff/sqlfluff/pull/5033) [@alanmcruickshank](https://github.com/alanmcruickshank) * MyPy on errors, helpers, markers & context + remove ParseContext.denylist [#5030](https://github.com/sqlfluff/sqlfluff/pull/5030) [@alanmcruickshank](https://github.com/alanmcruickshank) * Warn on unused `noqa` directives [#5029](https://github.com/sqlfluff/sqlfluff/pull/5029) [@alanmcruickshank](https://github.com/alanmcruickshank) * Even more mypy strict [#5023](https://github.com/sqlfluff/sqlfluff/pull/5023) [@WittierDinosaur](https://github.com/WittierDinosaur) * Handle windows paths better in config files. [#5022](https://github.com/sqlfluff/sqlfluff/pull/5022) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix for parsing of Oracle functions with named arguments [#5027](https://github.com/sqlfluff/sqlfluff/pull/5027) [@joaostorrer](https://github.com/joaostorrer) * DOC: Fix .sqlfluff example in Getting Started [#5026](https://github.com/sqlfluff/sqlfluff/pull/5026) [@aglebov](https://github.com/aglebov) * Fix: Add exception to the warning & config for the BaseRule. [#5025](https://github.com/sqlfluff/sqlfluff/pull/5025) [@alanmcruickshank](https://github.com/alanmcruickshank) * Move from `make_template` to `render_func` in jinja and dbt [#4942](https://github.com/sqlfluff/sqlfluff/pull/4942) [@alanmcruickshank](https://github.com/alanmcruickshank) * Streamline imports to reduce initial load times #4917 [#5020](https://github.com/sqlfluff/sqlfluff/pull/5020) [@alanmcruickshank](https://github.com/alanmcruickshank) * More mypy strict [#5019](https://github.com/sqlfluff/sqlfluff/pull/5019) [@WittierDinosaur](https://github.com/WittierDinosaur) * Simple API config and examples [#5018](https://github.com/sqlfluff/sqlfluff/pull/5018) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix some new linting issues [#5021](https://github.com/sqlfluff/sqlfluff/pull/5021) [@alanmcruickshank](https://github.com/alanmcruickshank) * A step towards mypy strict [#5014](https://github.com/sqlfluff/sqlfluff/pull/5014) [@WittierDinosaur](https://github.com/WittierDinosaur) * Materialize: Make RETURNING a reserved keyword [#5017](https://github.com/sqlfluff/sqlfluff/pull/5017) [@bobbyiliev](https://github.com/bobbyiliev) * Config from string and load default_config as resource [#5012](https://github.com/sqlfluff/sqlfluff/pull/5012) [@alanmcruickshank](https://github.com/alanmcruickshank) * Documentation for the test suite (#2180) [#5011](https://github.com/sqlfluff/sqlfluff/pull/5011) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add support to oracle's listagg function [#4999](https://github.com/sqlfluff/sqlfluff/pull/4999) [@joaostorrer](https://github.com/joaostorrer) * Assorted typehints [#5013](https://github.com/sqlfluff/sqlfluff/pull/5013) [@alanmcruickshank](https://github.com/alanmcruickshank) * Refactor: Extract noqa methods and tests. [#5010](https://github.com/sqlfluff/sqlfluff/pull/5010) [@alanmcruickshank](https://github.com/alanmcruickshank) * AM06 to ignore aggregate ORDER BY clauses [#5008](https://github.com/sqlfluff/sqlfluff/pull/5008) [@tunetheweb](https://github.com/tunetheweb) * Bugfix: Treat Function name properly in grants [#5006](https://github.com/sqlfluff/sqlfluff/pull/5006) [@WittierDinosaur](https://github.com/WittierDinosaur) * Redshift: Add Qualify Clause [#5002](https://github.com/sqlfluff/sqlfluff/pull/5002) [@WittierDinosaur](https://github.com/WittierDinosaur) * Clickhouse Dialect - Support Dollar Quoted Literals [#5003](https://github.com/sqlfluff/sqlfluff/pull/5003) [@kaiyannameighu](https://github.com/kaiyannameighu) ## New Contributors * [@kaiyannameighu](https://github.com/kaiyannameighu) made their first contribution in [#5003](https://github.com/sqlfluff/sqlfluff/pull/5003) * [@joaostorrer](https://github.com/joaostorrer) made their first contribution in [#4999](https://github.com/sqlfluff/sqlfluff/pull/4999) * [@aglebov](https://github.com/aglebov) made their first contribution in [#5026](https://github.com/sqlfluff/sqlfluff/pull/5026) * [@josef-v](https://github.com/josef-v) made their first contribution in [#5007](https://github.com/sqlfluff/sqlfluff/pull/5007) ## [2.1.4] - 2023-07-25 ## Highlights This release brings some meaningful performance improvements to the parsing of complex SQL statements. In files with deeply nested expressions, we have seen up to a 50% reduction on time spent in the parsing phase. These changes are all internal optimisations and have minimal implications for the parser. In a few isolated cases they did highlight inconsistencies in the parsing of literals and so if your use case relies on the specific structure of literal and expression parsing you may find some small differences in how some expressions are parsed. Additionally this release brings new validation steps to configuration. Layout configuration is now validated on load (and so users with invalid layout configurations may see some of these being caught now) and inline configuration statements in files are also now validated for both their layout rules and for any removed or deprecated settings. On top of both we've seen dialect improvements to Databricks, PostgreSQL, BigQuery, Snowflake & Athena. ## What’s Changed * Databricks set time zone [#5000](https://github.com/sqlfluff/sqlfluff/pull/5000) [@greg-finley](https://github.com/greg-finley) * Terminator inheritance [#4981](https://github.com/sqlfluff/sqlfluff/pull/4981) [@alanmcruickshank](https://github.com/alanmcruickshank) * Reduce copying in the parse phase [#4988](https://github.com/sqlfluff/sqlfluff/pull/4988) [@alanmcruickshank](https://github.com/alanmcruickshank) * Validate layout configs #4578 [#4997](https://github.com/sqlfluff/sqlfluff/pull/4997) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix handling of keywords for roles in ALTER ROLE statement [#4994](https://github.com/sqlfluff/sqlfluff/pull/4994) [@anzelpwj](https://github.com/anzelpwj) * BigQuery: fixes parse error on some literals with data type and quoted [#4992](https://github.com/sqlfluff/sqlfluff/pull/4992) [@yoichi](https://github.com/yoichi) * Correct Snowflake `CROSS JOIN` syntax [#4996](https://github.com/sqlfluff/sqlfluff/pull/4996) [@tunetheweb](https://github.com/tunetheweb) * Remove broken 'fork me' banner from docs [#4989](https://github.com/sqlfluff/sqlfluff/pull/4989) [@greg-finley](https://github.com/greg-finley) * feat: support athena optional WITH ORDINALITY post UNNEST function [#4991](https://github.com/sqlfluff/sqlfluff/pull/4991) [@reata](https://github.com/reata) ## [2.1.3] - 2023-07-19 ## Highlights This release is a fairly standard incremental release. Highlights include bugfixes to `RF05` and dialect improvements to Snowflake, Teradata, MySQL, TSQL, SparkSQL & Postgres. Internally, the last few weeks have brought several improvements to developer tooling. We've also moved over to GitHub sponsorships - so if you previously used the old flattr link, you can find our new profile page at https://github.com/sponsors/sqlfluff. ## What’s Changed * Add the which dbt flag to DbtConfigArgs with default as "compile" [#4982](https://github.com/sqlfluff/sqlfluff/pull/4982) [@moreaupascal56](https://github.com/moreaupascal56) * feat: support tsql COPY INTO [#4985](https://github.com/sqlfluff/sqlfluff/pull/4985) [@reata](https://github.com/reata) * fix: sparksql lateral view parse tree for multiple column alias [#4980](https://github.com/sqlfluff/sqlfluff/pull/4980) [@reata](https://github.com/reata) * Revert "Ignore click mypy issues" [#4967](https://github.com/sqlfluff/sqlfluff/pull/4967) [@greg-finley](https://github.com/greg-finley) * Snowflake: Parse column named cross [#4975](https://github.com/sqlfluff/sqlfluff/pull/4975) [@greg-finley](https://github.com/greg-finley) * Snowflake: Group by all [#4976](https://github.com/sqlfluff/sqlfluff/pull/4976) [@greg-finley](https://github.com/greg-finley) * Update funding yaml to use github sponsors [#4973](https://github.com/sqlfluff/sqlfluff/pull/4973) [@alanmcruickshank](https://github.com/alanmcruickshank) * Added DEL keyword [#4962](https://github.com/sqlfluff/sqlfluff/pull/4962) [@dflem97](https://github.com/dflem97) * Remove mypy ignores [#4972](https://github.com/sqlfluff/sqlfluff/pull/4972) [@greg-finley](https://github.com/greg-finley) * Allow running one rule test locally [#4963](https://github.com/sqlfluff/sqlfluff/pull/4963) [@greg-finley](https://github.com/greg-finley) * Postgres support underscore array data type syntax [#4959](https://github.com/sqlfluff/sqlfluff/pull/4959) [@greg-finley](https://github.com/greg-finley) * Bump issue-labeler [#4958](https://github.com/sqlfluff/sqlfluff/pull/4958) [@greg-finley](https://github.com/greg-finley) * Standardize test fixture names [#4955](https://github.com/sqlfluff/sqlfluff/pull/4955) [@greg-finley](https://github.com/greg-finley) * RF05 BigQuery empty identifier bug [#4953](https://github.com/sqlfluff/sqlfluff/pull/4953) [@keitherskine](https://github.com/keitherskine) * New GitHub issue labeler library [#4952](https://github.com/sqlfluff/sqlfluff/pull/4952) [@greg-finley](https://github.com/greg-finley) * Ignore click mypy issues [#4954](https://github.com/sqlfluff/sqlfluff/pull/4954) [@greg-finley](https://github.com/greg-finley) * MySQL: Rename index [#4950](https://github.com/sqlfluff/sqlfluff/pull/4950) [@greg-finley](https://github.com/greg-finley) * Adding support to ALTER TABLE with RENAME COLUMN on MySQL dialect [#4948](https://github.com/sqlfluff/sqlfluff/pull/4948) [@jrballot](https://github.com/jrballot) ## New Contributors * [@jrballot](https://github.com/jrballot) made their first contribution in [#4948](https://github.com/sqlfluff/sqlfluff/pull/4948) * [@keitherskine](https://github.com/keitherskine) made their first contribution in [#4953](https://github.com/sqlfluff/sqlfluff/pull/4953) * [@reata](https://github.com/reata) made their first contribution in [#4980](https://github.com/sqlfluff/sqlfluff/pull/4980) ## [2.1.2] - 2023-07-03 ## Highlights This release resolves compatibility issues with a set of `dbt-core` versions. - `dbt-core` 1.5.2 onwards is now properly supported. - support for `dbt-core` 1.1 to 1.4 has now been re-enabled after support had to be abandoned a few releases ago. NOTE: We cannot guarantee that SQLFluff will always continue to remain compatible with all dbt versions, particularly as the folks at dbt-labs have often backported breaking changes to their internal APIs to previous versions of `dbt-core`. This release does at least bring more extensive internal testing to catch when this does occur to allow our community to react. This release fixes also resolves a potential security issue for when using external libraries (and the `library_path` config setting), and also contains various dialect improvements. ## What’s Changed * docs(templater): Add documentation for `SQLFLUFF_JINJA_FILTERS` [#4932](https://github.com/sqlfluff/sqlfluff/pull/4932) [@dmohns](https://github.com/dmohns) * Re-enable dbt 1.1 & 1.2 [#4944](https://github.com/sqlfluff/sqlfluff/pull/4944) [@alanmcruickshank](https://github.com/alanmcruickshank) * Re-enable dbt 1.4 & 1.3 [#4941](https://github.com/sqlfluff/sqlfluff/pull/4941) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix compatibility with dbt 1.5.2+ [#4939](https://github.com/sqlfluff/sqlfluff/pull/4939) [@alanmcruickshank](https://github.com/alanmcruickshank) * Security option for library path [#4925](https://github.com/sqlfluff/sqlfluff/pull/4925) [@alanmcruickshank](https://github.com/alanmcruickshank) * Remove extra code escapes from release notes docs [#4921](https://github.com/sqlfluff/sqlfluff/pull/4921) [@tunetheweb](https://github.com/tunetheweb) * Postgres frame_clause quoted interval [#4915](https://github.com/sqlfluff/sqlfluff/pull/4915) [@greg-finley](https://github.com/greg-finley) * Snowflake: CREATE TAG [#4914](https://github.com/sqlfluff/sqlfluff/pull/4914) [@greg-finley](https://github.com/greg-finley) * TSQL: support for `DROP EXTERNAL TABLE` [#4919](https://github.com/sqlfluff/sqlfluff/pull/4919) [@keen85](https://github.com/keen85) * fix(dialect-clickhouse): Support create database [#4620](https://github.com/sqlfluff/sqlfluff/pull/4620) [@germainlefebvre4](https://github.com/germainlefebvre4) * Snowflake: Actualize the CreateProcedureStatementSegment and CreateFunctionStatementSegment [#4908](https://github.com/sqlfluff/sqlfluff/pull/4908) [@moreaupascal56](https://github.com/moreaupascal56) * Oracle: Add support for `$` and `#` in identifier [#4903](https://github.com/sqlfluff/sqlfluff/pull/4903) [@ulixius9](https://github.com/ulixius9) * docs(templater): Refactor templater configuration docs [#4835](https://github.com/sqlfluff/sqlfluff/pull/4835) [@dmohns](https://github.com/dmohns) * Handle brackets in from clause with joins [#4890](https://github.com/sqlfluff/sqlfluff/pull/4890) [@ulixius9](https://github.com/ulixius9) * Postgres: Add support for dollar literal & mark collation as non-reserved [#4883](https://github.com/sqlfluff/sqlfluff/pull/4883) [@ulixius9](https://github.com/ulixius9) * MySQL: ON UPDATE NOW [#4898](https://github.com/sqlfluff/sqlfluff/pull/4898) [@greg-finley](https://github.com/greg-finley) * Support ROLLUP/CUBE in AM06 [#4892](https://github.com/sqlfluff/sqlfluff/pull/4892) [@tunetheweb](https://github.com/tunetheweb) ## [2.1.1] - 2023-05-25 ## Highlights This releases fixes a compatibility issue with the latest version of dbt. It also ships various dialect improvements. ## What’s Changed * profiles dir env var or default [#4886](https://github.com/sqlfluff/sqlfluff/pull/4886) [@JasonGluck](https://github.com/JasonGluck) * Bigquery: Allow empty `struct` in `TO_JSON` [#4879](https://github.com/sqlfluff/sqlfluff/pull/4879) [@dimitris-flyr](https://github.com/dimitris-flyr) * Set type of ARRAY function for BigQuery [#4880](https://github.com/sqlfluff/sqlfluff/pull/4880) [@tunetheweb](https://github.com/tunetheweb) * Full athena SHOW coverage [#4876](https://github.com/sqlfluff/sqlfluff/pull/4876) [@dogversioning](https://github.com/dogversioning) * Sparksql add star support in multiparameter functions [#4874](https://github.com/sqlfluff/sqlfluff/pull/4874) [@spex66](https://github.com/spex66) * Oracle create view with EDITIONING & FORCE [#4872](https://github.com/sqlfluff/sqlfluff/pull/4872) [@ulixius9](https://github.com/ulixius9) * Fixes pip installation link on Getting Started [#4867](https://github.com/sqlfluff/sqlfluff/pull/4867) [@segoldma](https://github.com/segoldma) * Athena: add "weird" test cases for `group by` [#4869](https://github.com/sqlfluff/sqlfluff/pull/4869) [@KulykDmytro](https://github.com/KulykDmytro) * Athena: add support for `CUBE` `ROLLUP` `GROUPING SETS` [#4862](https://github.com/sqlfluff/sqlfluff/pull/4862) [@KulykDmytro](https://github.com/KulykDmytro) * Add show tables/views to athena [#4854](https://github.com/sqlfluff/sqlfluff/pull/4854) [@dogversioning](https://github.com/dogversioning) * Adding support for NOCOPY and INSTANT algorithm on CREATE INDEX on MySQL dialect [#4865](https://github.com/sqlfluff/sqlfluff/pull/4865) [@wfelipew](https://github.com/wfelipew) * Add link to Trino keywords (Athena v3) [#4858](https://github.com/sqlfluff/sqlfluff/pull/4858) [@KulykDmytro](https://github.com/KulykDmytro) * TSQL: Create Role Authorization [#4852](https://github.com/sqlfluff/sqlfluff/pull/4852) [@greg-finley](https://github.com/greg-finley) * TSQL: DEADLOCK_PRIORITY [#4853](https://github.com/sqlfluff/sqlfluff/pull/4853) [@greg-finley](https://github.com/greg-finley) * fix(dialect-clickhouse): Support SYSTEM queries [#4625](https://github.com/sqlfluff/sqlfluff/pull/4625) [@germainlefebvre4](https://github.com/germainlefebvre4) * Fix #4807: LT02 & LT12 issues with empty files. [#4834](https://github.com/sqlfluff/sqlfluff/pull/4834) [@alanmcruickshank](https://github.com/alanmcruickshank) * Sqlite: COLLATE column constraint [#4845](https://github.com/sqlfluff/sqlfluff/pull/4845) [@greg-finley](https://github.com/greg-finley) * Hive: Support REGEXP and IREGEXP [#4846](https://github.com/sqlfluff/sqlfluff/pull/4846) [@greg-finley](https://github.com/greg-finley) ## New Contributors * [@dogversioning](https://github.com/dogversioning) made their first contribution in [#4854](https://github.com/sqlfluff/sqlfluff/pull/4854) * [@segoldma](https://github.com/segoldma) made their first contribution in [#4867](https://github.com/sqlfluff/sqlfluff/pull/4867) * [@spex66](https://github.com/spex66) made their first contribution in [#4874](https://github.com/sqlfluff/sqlfluff/pull/4874) * [@dimitris-flyr](https://github.com/dimitris-flyr) made their first contribution in [#4879](https://github.com/sqlfluff/sqlfluff/pull/4879) * [@JasonGluck](https://github.com/JasonGluck) made their first contribution in [#4886](https://github.com/sqlfluff/sqlfluff/pull/4886) ## [2.1.0] - 2023-05-03 ## Highlights This release brings support for dbt 1.5+. Some internals of dbt mean that SQFluff versions prior to this release may experience errors with dbt versions post 1.5. In addition to that there are some dialect and templating improvements bundled too: * Support for custom Jinja filters. * An additional configurable indent behaviour within `CASE WHEN` clauses. * Additional support for bracket quoted literals in TSQL and RF06. * Dialect improvements to Snowflake, Hive, Redshift, Postgres, Clickhouse, Oracle and SQLite ## What’s Changed * Add support for Jinja filters [#4810](https://github.com/sqlfluff/sqlfluff/pull/4810) [@dmohns](https://github.com/dmohns) * Postgres: Allow INSERT RETURNING [#4820](https://github.com/sqlfluff/sqlfluff/pull/4820) [@WittierDinosaur](https://github.com/WittierDinosaur) * SQLite: Support partial index [#4833](https://github.com/sqlfluff/sqlfluff/pull/4833) [@WittierDinosaur](https://github.com/WittierDinosaur) * Make SQLFluff compatible with DBT 1.5 [#4828](https://github.com/sqlfluff/sqlfluff/pull/4828) [@WittierDinosaur](https://github.com/WittierDinosaur) * Snowflake dialect: Add support for comment clause in the create warehouse statement [#4823](https://github.com/sqlfluff/sqlfluff/pull/4823) [@moreaupascal56](https://github.com/moreaupascal56) * fix(dialect-clickhouse): Support DROP statements [#4821](https://github.com/sqlfluff/sqlfluff/pull/4821) [@germainlefebvre4](https://github.com/germainlefebvre4) * Hive: INSERT INTO without TABLE keyword [#4819](https://github.com/sqlfluff/sqlfluff/pull/4819) [@greg-finley](https://github.com/greg-finley) * Fix: Small typo in error message [#4814](https://github.com/sqlfluff/sqlfluff/pull/4814) [@JavierMonton](https://github.com/JavierMonton) * Redshift: Support with no schema binding [#4813](https://github.com/sqlfluff/sqlfluff/pull/4813) [@WittierDinosaur](https://github.com/WittierDinosaur) * Detect tsql square bracket quotes for RF06 #4724 [#4781](https://github.com/sqlfluff/sqlfluff/pull/4781) [@daviewales](https://github.com/daviewales) * Apply implicit indents to `WHEN` blocks and introduce `indented_then_contents` [#4755](https://github.com/sqlfluff/sqlfluff/pull/4755) [@borchero](https://github.com/borchero) * Oracle: Update Drop Behaviour [#4803](https://github.com/sqlfluff/sqlfluff/pull/4803) [@WittierDinosaur](https://github.com/WittierDinosaur) * Oracle: Update bare functions [#4804](https://github.com/sqlfluff/sqlfluff/pull/4804) [@WittierDinosaur](https://github.com/WittierDinosaur) ## New Contributors * [@daviewales](https://github.com/daviewales) made their first contribution in [#4781](https://github.com/sqlfluff/sqlfluff/pull/4781) ## [2.0.7] - 2023-04-20 ## Highlights This is a bugfix release to resolve two regressions included in 2.0.6 related to implicit indents. This also includes a bugfix for config file on osx, contributed by first time contributor [@jpuris](https://github.com/jpuris) 🎉. ## What’s Changed * Fix regression in implicit indents [#4798](https://github.com/sqlfluff/sqlfluff/pull/4798) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix bug with brackets and implicit indents [#4797](https://github.com/sqlfluff/sqlfluff/pull/4797) [@alanmcruickshank](https://github.com/alanmcruickshank) * fix: correct macos/osx config file location [#4795](https://github.com/sqlfluff/sqlfluff/pull/4795) [@jpuris](https://github.com/jpuris) ## New Contributors * [@jpuris](https://github.com/jpuris) made their first contribution in [#4795](https://github.com/sqlfluff/sqlfluff/pull/4795) ## [2.0.6] - 2023-04-19 ## Highlights * Introduction of a `--quiet` option for the CLI for situations where less output is useful. * When using the `--force` option is used for `sqlfluff fix` each file is fixed during the linting process rather than at the end. * Bugfixes to comment and templated section indentation. * Performance improvements to parsing. * Bugfix to macros triggering LT01. * Renaming `layout.end-of-file` to `layout.end_of_file` in line with other rules. * Dialect improvements to SparkSQL, BigQuery, Hive & Snowflake. ## What’s Changed * Snowflake: Support Temporary View [#4789](https://github.com/sqlfluff/sqlfluff/pull/4789) [@WittierDinosaur](https://github.com/WittierDinosaur) * Inroduce `SAFE` prefix segment [#4773](https://github.com/sqlfluff/sqlfluff/pull/4773) [@dmohns](https://github.com/dmohns) * Fix #4660: Better handling of empty files. [#4780](https://github.com/sqlfluff/sqlfluff/pull/4780) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix #3538: (Fix files as we go) [#4777](https://github.com/sqlfluff/sqlfluff/pull/4777) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix #2855: (Tech debt: check consistency in TemplatedFile init) [#4776](https://github.com/sqlfluff/sqlfluff/pull/4776) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add a --quiet option for fix [#4764](https://github.com/sqlfluff/sqlfluff/pull/4764) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix #4603 indent after Jinja 'do' directive [#4778](https://github.com/sqlfluff/sqlfluff/pull/4778) [@fredriv](https://github.com/fredriv) * Snowflake Execute Task with Schema [#4771](https://github.com/sqlfluff/sqlfluff/pull/4771) [@Thashin](https://github.com/Thashin) * SQLite: Support CreateTrigger [#4767](https://github.com/sqlfluff/sqlfluff/pull/4767) [@WittierDinosaur](https://github.com/WittierDinosaur) * Fix #2865 (AL05 exception for Redshift Semi-structured) [#4775](https://github.com/sqlfluff/sqlfluff/pull/4775) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix #4540: Untaken indents evaluation order. [#4768](https://github.com/sqlfluff/sqlfluff/pull/4768) [@alanmcruickshank](https://github.com/alanmcruickshank) * Use the new CollationReferenceSegment everywhere [#4770](https://github.com/sqlfluff/sqlfluff/pull/4770) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * SQLite: Fix multiple parse issues in Expression_A_Grammar [#4769](https://github.com/sqlfluff/sqlfluff/pull/4769) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * SQLite: Remove refs to RESPECT and QUALIFY [#4765](https://github.com/sqlfluff/sqlfluff/pull/4765) [@WittierDinosaur](https://github.com/WittierDinosaur) * SQLite: Support STRICT [#4766](https://github.com/sqlfluff/sqlfluff/pull/4766) [@WittierDinosaur](https://github.com/WittierDinosaur) * Support hive set syntax [#4763](https://github.com/sqlfluff/sqlfluff/pull/4763) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix #4582: Comments after end of line [#4760](https://github.com/sqlfluff/sqlfluff/pull/4760) [@alanmcruickshank](https://github.com/alanmcruickshank) * Allow comment match with preceding line [#4758](https://github.com/sqlfluff/sqlfluff/pull/4758) [@alanmcruickshank](https://github.com/alanmcruickshank) * Remove the majority of greedy matchers [#4761](https://github.com/sqlfluff/sqlfluff/pull/4761) [@WittierDinosaur](https://github.com/WittierDinosaur) * Fix #4745: (max() error in reindent) [#4752](https://github.com/sqlfluff/sqlfluff/pull/4752) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix issue with macros triggering LT01 [#4757](https://github.com/sqlfluff/sqlfluff/pull/4757) [@alanmcruickshank](https://github.com/alanmcruickshank) * end-of-file > end_of_file [#4753](https://github.com/sqlfluff/sqlfluff/pull/4753) [@alanmcruickshank](https://github.com/alanmcruickshank) ## [2.0.5] - 2023-04-14 ## Highlights This is a relatively swift bugfix to refine some of the changes made to widow function indentation in `2.0.4`. In addition there are two dialect refinements also made since that release. ## What’s Changed * Refactor PG segments to reuse new common segments [#4726](https://github.com/sqlfluff/sqlfluff/pull/4726) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * Recognize quoted data types [#4747](https://github.com/sqlfluff/sqlfluff/pull/4747) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) ## [2.0.4] - 2023-04-14 ## Highlights This is primarily a _bugfix_ and _dialect_ release: * Several bugfixes related to templating and indentation, in particular some improvements to the indentation of aliases and window functions. * Performance improvements to the parser. * The `--persist-timing` option is now also available on `sqlfluff fix`. * A refresh to getting started and rule documentation. * Dialect improvements to PostgreSQL, Athena, SparkSQL, MySQL & Snowflake. Thanks also to [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) and [@Thashin](https://github.com/Thashin) who made their first contributions in this release. In particular, [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) made **twenty one** contributions in their first month! 🎉🎉🎉 ## What’s Changed * SparkSQL: Improvements to lateral view, hints, sort by [#4731](https://github.com/sqlfluff/sqlfluff/pull/4731) [@bmorck](https://github.com/bmorck) * Add ExpressionSegment to CREATE TABLE ... DEFAULT / Fix multiple parse issues in Expression_A_Grammar [#4717](https://github.com/sqlfluff/sqlfluff/pull/4717) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * Add support for the PG VACUUM statement [#4742](https://github.com/sqlfluff/sqlfluff/pull/4742) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * Simplify and fix PG array accessor segment & support expressions [#4748](https://github.com/sqlfluff/sqlfluff/pull/4748) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * SparkSQL: Allow for any ordering of create table clauses [#4721](https://github.com/sqlfluff/sqlfluff/pull/4721) [@bmorck](https://github.com/bmorck) * Suggested started config file [#4702](https://github.com/sqlfluff/sqlfluff/pull/4702) [@alanmcruickshank](https://github.com/alanmcruickshank) * Indents on window functions [#4560](https://github.com/sqlfluff/sqlfluff/pull/4560) [@alanmcruickshank](https://github.com/alanmcruickshank) * SparkSQL: Fix Group By Clause [#4732](https://github.com/sqlfluff/sqlfluff/pull/4732) [@bmorck](https://github.com/bmorck) * Improve support for EXCLUDE table constraints in PG [#4725](https://github.com/sqlfluff/sqlfluff/pull/4725) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * Add support for dropping multiple indexes in PG [#4737](https://github.com/sqlfluff/sqlfluff/pull/4737) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * Recognize "on" value and integers for PG SET statement [#4740](https://github.com/sqlfluff/sqlfluff/pull/4740) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * Improve interval expressions on MySQL [#4746](https://github.com/sqlfluff/sqlfluff/pull/4746) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * Keep out zero length keywords [#4723](https://github.com/sqlfluff/sqlfluff/pull/4723) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * Add PG support for CREATE SCHEMA AUTHORIZATION [#4735](https://github.com/sqlfluff/sqlfluff/pull/4735) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * Add support for dropping multiple views with PostgreSQL [#4736](https://github.com/sqlfluff/sqlfluff/pull/4736) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * Add CHAR VARYING data type for PG [#4738](https://github.com/sqlfluff/sqlfluff/pull/4738) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * fix(athena): map type matching failed, array type only contains a datatype [#4739](https://github.com/sqlfluff/sqlfluff/pull/4739) [@timcosta](https://github.com/timcosta) * Allow DML queries to be selectable in CTEs on PG [#4741](https://github.com/sqlfluff/sqlfluff/pull/4741) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * Add the CREATE/DROP CAST statements to ANSI and PG [#4744](https://github.com/sqlfluff/sqlfluff/pull/4744) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * Add support for PG SET ROLE / RESET ROLE [#4734](https://github.com/sqlfluff/sqlfluff/pull/4734) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * Support Spark Iceberg DDL [#4690](https://github.com/sqlfluff/sqlfluff/pull/4690) [@bmorck](https://github.com/bmorck) * Fix #4680 [#4707](https://github.com/sqlfluff/sqlfluff/pull/4707) [@alanmcruickshank](https://github.com/alanmcruickshank) * Indent Aliases [#4706](https://github.com/sqlfluff/sqlfluff/pull/4706) [@alanmcruickshank](https://github.com/alanmcruickshank) * SparkSQL: Improve window frame bounds [#4722](https://github.com/sqlfluff/sqlfluff/pull/4722) [@bmorck](https://github.com/bmorck) * Add support for PG CREATE/ALTER/DROP PUBLICATION stmts [#4716](https://github.com/sqlfluff/sqlfluff/pull/4716) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * SparkSQL: Create external table support [#4692](https://github.com/sqlfluff/sqlfluff/pull/4692) [@bmorck](https://github.com/bmorck) * SparkSQL: Fix file literal lexing [#4718](https://github.com/sqlfluff/sqlfluff/pull/4718) [@bmorck](https://github.com/bmorck) * Add PG DROP/REASSIGN OWNED statements [#4720](https://github.com/sqlfluff/sqlfluff/pull/4720) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * SparkSQL: Add distinct to comparison operator [#4719](https://github.com/sqlfluff/sqlfluff/pull/4719) [@bmorck](https://github.com/bmorck) * Rethink Rule Docs [#4695](https://github.com/sqlfluff/sqlfluff/pull/4695) [@alanmcruickshank](https://github.com/alanmcruickshank) * Performance: Reduce calls to _prune_options [#4705](https://github.com/sqlfluff/sqlfluff/pull/4705) [@alanmcruickshank](https://github.com/alanmcruickshank) * Snowflake: Add ReferencedVariableNameSegment to sample function [#4712](https://github.com/sqlfluff/sqlfluff/pull/4712) [@WittierDinosaur](https://github.com/WittierDinosaur) * Mark AM02 as fix compatible [#4714](https://github.com/sqlfluff/sqlfluff/pull/4714) [@yoichi](https://github.com/yoichi) * Fix LT01 spacing check in templated areas [#4698](https://github.com/sqlfluff/sqlfluff/pull/4698) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * Don't do newline conversion on write [#4703](https://github.com/sqlfluff/sqlfluff/pull/4703) [@alanmcruickshank](https://github.com/alanmcruickshank) * MySQL: CREATE/ALTER VIEW may take UNION [#4713](https://github.com/sqlfluff/sqlfluff/pull/4713) [@yoichi](https://github.com/yoichi) * Preserve zero-length template segments [#4708](https://github.com/sqlfluff/sqlfluff/pull/4708) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * CV06: don't flag files that don't have code [#4709](https://github.com/sqlfluff/sqlfluff/pull/4709) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * Add a no-output option [#4704](https://github.com/sqlfluff/sqlfluff/pull/4704) [@alanmcruickshank](https://github.com/alanmcruickshank) * Jinja templater: treat "import" and "from" as templated [#4696](https://github.com/sqlfluff/sqlfluff/pull/4696) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * Capitalization rules ignore templated code only if configured to [#4697](https://github.com/sqlfluff/sqlfluff/pull/4697) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * Update getting started docs [#4700](https://github.com/sqlfluff/sqlfluff/pull/4700) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add a default for config_keywords and remove noisy error. [#4701](https://github.com/sqlfluff/sqlfluff/pull/4701) [@alanmcruickshank](https://github.com/alanmcruickshank) * Snowflake Select System Functions [#4687](https://github.com/sqlfluff/sqlfluff/pull/4687) [@Thashin](https://github.com/Thashin) * SparkSQL: Add using and options clause to create view statement [#4691](https://github.com/sqlfluff/sqlfluff/pull/4691) [@bmorck](https://github.com/bmorck) * MySQL: Add RETURN Statement [#4693](https://github.com/sqlfluff/sqlfluff/pull/4693) [@yoichi](https://github.com/yoichi) * Safety valve for fixes in CV03 [#4685](https://github.com/sqlfluff/sqlfluff/pull/4685) [@alanmcruickshank](https://github.com/alanmcruickshank) * Allow persist timing on `fix` too. [#4679](https://github.com/sqlfluff/sqlfluff/pull/4679) [@alanmcruickshank](https://github.com/alanmcruickshank) * fix{dialect-snowflake}:Alter Table Column Set/Unset Tag [#4682](https://github.com/sqlfluff/sqlfluff/pull/4682) [@Thashin](https://github.com/Thashin) * fix{dialect-snowflake}:Execute Task [#4683](https://github.com/sqlfluff/sqlfluff/pull/4683) [@Thashin](https://github.com/Thashin) * Make version number an argument not an option in release script. [#4677](https://github.com/sqlfluff/sqlfluff/pull/4677) [@alanmcruickshank](https://github.com/alanmcruickshank) ## New Contributors * [@Thashin](https://github.com/Thashin) made their first contribution in [#4683](https://github.com/sqlfluff/sqlfluff/pull/4683) * [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) made their first contribution in [#4697](https://github.com/sqlfluff/sqlfluff/pull/4697) ## [2.0.3] - 2023-04-05 ## Highlights This is primarily a _bugfix_ and _dialect_ release: * Several bugfixes related to templating and indentation. * Configurable indentation before `THEN` in `CASE` statements (see [#4598](https://github.com/sqlfluff/sqlfluff/pull/4598)). * Performance improvements to `TypedParser`, `LT03` & `LT04`. * Rule timings now appear in the `--persist-timing` option for deeper performance understanding. * The introduction of a Greenplum dialect. * Dialect improvements to TSQL, Athena, Snowflake, MySQL, SparkSQL BigQuery, Databricks, Clickhouse & Postgres. We also saw a _huge number of first time contributors_ with **9** contributing in this release 🎉🏆🎉. ## What’s Changed * Better error message for missing keywords [#4676](https://github.com/sqlfluff/sqlfluff/pull/4676) [@tunetheweb](https://github.com/tunetheweb) * Add performance shortcuts to LT03 & LT04 [#4672](https://github.com/sqlfluff/sqlfluff/pull/4672) [@alanmcruickshank](https://github.com/alanmcruickshank) * Clickhouse: Add support for [LEFT] ARRAY JOIN [#4618](https://github.com/sqlfluff/sqlfluff/pull/4618) [@simpl1g](https://github.com/simpl1g) * Postgres - allow untyped OVERLAPS clauses [#4674](https://github.com/sqlfluff/sqlfluff/pull/4674) [@tunetheweb](https://github.com/tunetheweb) * Mark `is_alias_required` as a private class so it doesn't appear in docs [#4673](https://github.com/sqlfluff/sqlfluff/pull/4673) [@tunetheweb](https://github.com/tunetheweb) * Fix bug in templated with clauses LT07 [#4671](https://github.com/sqlfluff/sqlfluff/pull/4671) [@alanmcruickshank](https://github.com/alanmcruickshank) * TSQL: `OPENJSON()` [#4652](https://github.com/sqlfluff/sqlfluff/pull/4652) [@keen85](https://github.com/keen85) * fix(RF06/L059): allows configuring prefer_quoted_keywords to deconflict with L029 [#4396](https://github.com/sqlfluff/sqlfluff/pull/4396) [@timcosta](https://github.com/timcosta) * TSQL: `Create External Table` [#4642](https://github.com/sqlfluff/sqlfluff/pull/4642) [@aly76](https://github.com/aly76) * Consistent indentation in `MERGE` `INSERT` clause [#4666](https://github.com/sqlfluff/sqlfluff/pull/4666) [@dmohns](https://github.com/dmohns) * BigQuery: Fix null assignment in options segment [#4669](https://github.com/sqlfluff/sqlfluff/pull/4669) [@greg-finley](https://github.com/greg-finley) * BigQuery: Delete table reference [#4668](https://github.com/sqlfluff/sqlfluff/pull/4668) [@greg-finley](https://github.com/greg-finley) * TSQL: `CREATE EXTERNAL FILE FORMAT` [#4647](https://github.com/sqlfluff/sqlfluff/pull/4647) [@keen85](https://github.com/keen85) * Remove TIME as reserved keyword in SparkSQL [#4662](https://github.com/sqlfluff/sqlfluff/pull/4662) [@bmorck](https://github.com/bmorck) * Start of the Greenplum dialect implementation [#4661](https://github.com/sqlfluff/sqlfluff/pull/4661) [@JackWolverson](https://github.com/JackWolverson) * Enable configuring whether to require indent before THEN [#4598](https://github.com/sqlfluff/sqlfluff/pull/4598) [@fredriv](https://github.com/fredriv) * Sequence Meta Handling [#4622](https://github.com/sqlfluff/sqlfluff/pull/4622) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add support for non-quoted file paths in SparkSQL [#4650](https://github.com/sqlfluff/sqlfluff/pull/4650) [@bmorck](https://github.com/bmorck) * Remove three RegexParsers [#4658](https://github.com/sqlfluff/sqlfluff/pull/4658) [@alanmcruickshank](https://github.com/alanmcruickshank) * Make parse test readout more helpful [#4657](https://github.com/sqlfluff/sqlfluff/pull/4657) [@alanmcruickshank](https://github.com/alanmcruickshank) * TSQL: support for `sqlcmd` commands `:r` and `:setvar` [#4653](https://github.com/sqlfluff/sqlfluff/pull/4653) [@keen85](https://github.com/keen85) * Update README with Databricks note [#4632](https://github.com/sqlfluff/sqlfluff/pull/4632) [@liamperritt](https://github.com/liamperritt) * Athena: Fix parsing error with aliases starting with underscore [#4636](https://github.com/sqlfluff/sqlfluff/pull/4636) [@maiarareinaldo](https://github.com/maiarareinaldo) * Snowflake: Stop ever-increasing indent in CREATE USER [#4638](https://github.com/sqlfluff/sqlfluff/pull/4638) [@roman-ef](https://github.com/roman-ef) * TSQL: `PERIOD FOR SYSTEM_TIME` (temporal tables) [#4654](https://github.com/sqlfluff/sqlfluff/pull/4654) [@keen85](https://github.com/keen85) * MySQL: SelectStatementSegment in CREATE/ALTER VIEW may be bracketed [#4655](https://github.com/sqlfluff/sqlfluff/pull/4655) [@yoichi](https://github.com/yoichi) * TSQL: `CREATE EXTERNAL DATA SOURCE` [#4634](https://github.com/sqlfluff/sqlfluff/pull/4634) [@keen85](https://github.com/keen85) * Safety valve on source fixes [#4640](https://github.com/sqlfluff/sqlfluff/pull/4640) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add SparkSQL support for LONG primitive type [#4639](https://github.com/sqlfluff/sqlfluff/pull/4639) [@bmorck](https://github.com/bmorck) * Fix PIVOT clauses for BigQuery and SparkSQL [#4630](https://github.com/sqlfluff/sqlfluff/pull/4630) [@tunetheweb](https://github.com/tunetheweb) * Correct BigQuery WINDOW parsing [#4629](https://github.com/sqlfluff/sqlfluff/pull/4629) [@tunetheweb](https://github.com/tunetheweb) * Add Databricks dialect support for Unity Catalog [#4568](https://github.com/sqlfluff/sqlfluff/pull/4568) [@liamperritt](https://github.com/liamperritt) * .simple() matching for TypedMatcher [#4612](https://github.com/sqlfluff/sqlfluff/pull/4612) [@alanmcruickshank](https://github.com/alanmcruickshank) * --bench output with rule timings [#4601](https://github.com/sqlfluff/sqlfluff/pull/4601) [@alanmcruickshank](https://github.com/alanmcruickshank) * MySQL: Unnamed constraints [#4616](https://github.com/sqlfluff/sqlfluff/pull/4616) [@greg-finley](https://github.com/greg-finley) * TSQL: Create database scoped credential [#4615](https://github.com/sqlfluff/sqlfluff/pull/4615) [@greg-finley](https://github.com/greg-finley) * fix(dialect-clickhouse): Add materialized view statement [#4605](https://github.com/sqlfluff/sqlfluff/pull/4605) [@germainlefebvre4](https://github.com/germainlefebvre4) * Nicer formatted dbt errors [#4606](https://github.com/sqlfluff/sqlfluff/pull/4606) [@alanmcruickshank](https://github.com/alanmcruickshank) * add parse lambda function Clickhouse [#4611](https://github.com/sqlfluff/sqlfluff/pull/4611) [@konnectr](https://github.com/konnectr) * Support `WITH ORDINALITY` clauses in Postgres [#4599](https://github.com/sqlfluff/sqlfluff/pull/4599) [@tunetheweb](https://github.com/tunetheweb) ## New Contributors * [@germainlefebvre4](https://github.com/germainlefebvre4) made their first contribution in [#4605](https://github.com/sqlfluff/sqlfluff/pull/4605) * [@liamperritt](https://github.com/liamperritt) made their first contribution in [#4568](https://github.com/sqlfluff/sqlfluff/pull/4568) * [@bmorck](https://github.com/bmorck) made their first contribution in [#4639](https://github.com/sqlfluff/sqlfluff/pull/4639) * [@keen85](https://github.com/keen85) made their first contribution in [#4634](https://github.com/sqlfluff/sqlfluff/pull/4634) * [@roman-ef](https://github.com/roman-ef) made their first contribution in [#4638](https://github.com/sqlfluff/sqlfluff/pull/4638) * [@maiarareinaldo](https://github.com/maiarareinaldo) made their first contribution in [#4636](https://github.com/sqlfluff/sqlfluff/pull/4636) * [@fredriv](https://github.com/fredriv) made their first contribution in [#4598](https://github.com/sqlfluff/sqlfluff/pull/4598) * [@aly76](https://github.com/aly76) made their first contribution in [#4642](https://github.com/sqlfluff/sqlfluff/pull/4642) * [@simpl1g](https://github.com/simpl1g) made their first contribution in [#4618](https://github.com/sqlfluff/sqlfluff/pull/4618) ## [2.0.2] - 2023-03-23 ## Highlights This is primarily a _bugfix_ release. Most notably this solves some of the issues introduced in 2.0.1 around spacing within datatypes. Expressions like `1.0::double precision` should now be spaced correctly. Beyond that, this contains a selection of smaller bugfixes and dialect improvements. Even for a relatively small release we saw three new contributors (thanks [@aurany](https://github.com/aurany), [@JackWolverson](https://github.com/JackWolverson) & [@mikaeltw](https://github.com/mikaeltw) 🎉). The one new _feature_ (as such) is being able to now configure `LT05` (aka `layout.long_lines`) to optionally move trailing comments _after_ the line they are found on, rather than the default behaviour of moving them up and _before_. Users can enable this with the `trailing_comments` configuration setting in the `indentation` section. This release _also_ contains some performance optimisations in the parser, especially on queries with heavily nested expressions. There will be more to come in this space, but we hope this leads to a better experience for many users. 🚀 ## What’s Changed * Parse Caching [#4576](https://github.com/sqlfluff/sqlfluff/pull/4576) [@alanmcruickshank](https://github.com/alanmcruickshank) * Data type spacing [#4592](https://github.com/sqlfluff/sqlfluff/pull/4592) [@alanmcruickshank](https://github.com/alanmcruickshank) * MySQL: allow quoted literal in alias name [#4591](https://github.com/sqlfluff/sqlfluff/pull/4591) [@yoichi](https://github.com/yoichi) * Make implicit indents visible in the parse tree [#4584](https://github.com/sqlfluff/sqlfluff/pull/4584) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix #4559: TSQL implicit indents on WHERE [#4583](https://github.com/sqlfluff/sqlfluff/pull/4583) [@alanmcruickshank](https://github.com/alanmcruickshank) * Added keywords to DB2 dialect from IBM docs [#4575](https://github.com/sqlfluff/sqlfluff/pull/4575) [@aurany](https://github.com/aurany) * Remove matches_target_tuples (#3873) [#4561](https://github.com/sqlfluff/sqlfluff/pull/4561) [@alanmcruickshank](https://github.com/alanmcruickshank) * Use terminators in BaseExpression [#4577](https://github.com/sqlfluff/sqlfluff/pull/4577) [@alanmcruickshank](https://github.com/alanmcruickshank) * Address #1630: Optionally move comments after long line [#4558](https://github.com/sqlfluff/sqlfluff/pull/4558) [@alanmcruickshank](https://github.com/alanmcruickshank) * Added schema to set statement [#4580](https://github.com/sqlfluff/sqlfluff/pull/4580) [@JackWolverson](https://github.com/JackWolverson) * Refactor lint_line_length and fix comma bug [#4564](https://github.com/sqlfluff/sqlfluff/pull/4564) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix untaken indent bug [#4562](https://github.com/sqlfluff/sqlfluff/pull/4562) [@alanmcruickshank](https://github.com/alanmcruickshank) * SQLite: Fix SELECT LIMIT [#4566](https://github.com/sqlfluff/sqlfluff/pull/4566) [@greg-finley](https://github.com/greg-finley) * Fix #4453: Snowflake semi-stuctured casts in CV11 [#4571](https://github.com/sqlfluff/sqlfluff/pull/4571) [@alanmcruickshank](https://github.com/alanmcruickshank) * Name of LT07 [#4557](https://github.com/sqlfluff/sqlfluff/pull/4557) [@alanmcruickshank](https://github.com/alanmcruickshank) * Patch fetch and over [#4555](https://github.com/sqlfluff/sqlfluff/pull/4555) [@mikaeltw](https://github.com/mikaeltw) ## New Contributors * [@mikaeltw](https://github.com/mikaeltw) made their first contribution in [#4555](https://github.com/sqlfluff/sqlfluff/pull/4555) * [@JackWolverson](https://github.com/JackWolverson) made their first contribution in [#4580](https://github.com/sqlfluff/sqlfluff/pull/4580) * [@aurany](https://github.com/aurany) made their first contribution in [#4575](https://github.com/sqlfluff/sqlfluff/pull/4575) ## [2.0.1] - 2023-03-17 ## Highlights This is mostly a bugfix release addressing some of the issues from the recent 2.0 release. Notable fixes are: - Spacing for (as applied by `LT01`) for datatypes, hyphenated identifiers and casting operators. - Several bugs in the indentation routines (`LT02`), in particular with implicit indents. - Fixing a conflict between `LT09` and `LT02`, by only limiting `LT09` to bringing targets onto a single line if there is only one select target **and** that it contains no newlines. - Supporting arrays, and the new rules configuration more effectively in `pyproject.toml`. - Configuring dialects on a file by file basis using inline comments now works. This release also brings one small new feature in allowing additional flags to be passed to SQLFluff when called as a `pre-commit` hook. Thanks especially to [@JavierMonton](https://github.com/JavierMonton) and [@LauraRichter](https://github.com/LauraRichter) who made their first contributions to the project as part of this release! 🎉🏆 ## What’s Changed * Add support for arrays in TOML configuration [#4387](https://github.com/sqlfluff/sqlfluff/pull/4387) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * Rethink test segregation in CI [#4547](https://github.com/sqlfluff/sqlfluff/pull/4547) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix #4515 and add more test cases [#4525](https://github.com/sqlfluff/sqlfluff/pull/4525) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add additional flags to `sqlfluff` invocations in pre-commit hooks [#4546](https://github.com/sqlfluff/sqlfluff/pull/4546) [@borchero](https://github.com/borchero) * Resolve #4484 (issues with indented_joins indents) [#4544](https://github.com/sqlfluff/sqlfluff/pull/4544) [@alanmcruickshank](https://github.com/alanmcruickshank) * Per file dialect selection fix [#4518](https://github.com/sqlfluff/sqlfluff/pull/4518) [@LauraRichter](https://github.com/LauraRichter) * MySQL: Add CREATE INDEX [#4538](https://github.com/sqlfluff/sqlfluff/pull/4538) [@yoichi](https://github.com/yoichi) * Resolve implicit indent issues when catching negative indents [#4543](https://github.com/sqlfluff/sqlfluff/pull/4543) [@alanmcruickshank](https://github.com/alanmcruickshank) * Github Action Deprecations [#4545](https://github.com/sqlfluff/sqlfluff/pull/4545) [@alanmcruickshank](https://github.com/alanmcruickshank) * LT09 and multiline select targets [#4529](https://github.com/sqlfluff/sqlfluff/pull/4529) [@alanmcruickshank](https://github.com/alanmcruickshank) * Remove Codecov from CI [#4535](https://github.com/sqlfluff/sqlfluff/pull/4535) [@alanmcruickshank](https://github.com/alanmcruickshank) * Bigquery hyphentated identifiers [#4530](https://github.com/sqlfluff/sqlfluff/pull/4530) [@alanmcruickshank](https://github.com/alanmcruickshank) * Attempt in-house coverage [#4532](https://github.com/sqlfluff/sqlfluff/pull/4532) [@alanmcruickshank](https://github.com/alanmcruickshank) * Postgres datatype spacing issues [#4528](https://github.com/sqlfluff/sqlfluff/pull/4528) [@alanmcruickshank](https://github.com/alanmcruickshank) * Support new rules config in toml files. [#4526](https://github.com/sqlfluff/sqlfluff/pull/4526) [@alanmcruickshank](https://github.com/alanmcruickshank) * Resolve #1146 (log propagation) [#4513](https://github.com/sqlfluff/sqlfluff/pull/4513) [@alanmcruickshank](https://github.com/alanmcruickshank) * Snowflake: Optional quotes for `create user` statement [#4514](https://github.com/sqlfluff/sqlfluff/pull/4514) [@JavierMonton](https://github.com/JavierMonton) ## New Contributors * [@JavierMonton](https://github.com/JavierMonton) made their first contribution in [#4514](https://github.com/sqlfluff/sqlfluff/pull/4514) * [@LauraRichter](https://github.com/LauraRichter) made their first contribution in [#4518](https://github.com/sqlfluff/sqlfluff/pull/4518) ## [2.0.0] - 2023-03-13 ## Highlights Upgrading to 2.0 brings several important **breaking changes**: * All bundled rules have been recoded, both from generic `L00X` formats into groups within similar codes (e.g. an *aliasing* group with codes of the format `AL0X`), but also given *names* to allow much clearer referencing (e.g. `aliasing.column`). * [Configuring rules](https://docs.sqlfluff.com/en/latest/configuration.html#rule-configuration) now uses the rule *name* rather than the rule *code* to specify the section. Any unrecognised references in config files (whether they are references which *do* match existing rules by code or alias, or whether the match no rules at all) will raise warnings at runtime. * A complete re-write of layout and whitespace handling rules (see [layout](https://docs.sqlfluff.com/en/latest/layout.html)), and with that a change in how layout is configured (see [configuring layout](https://docs.sqlfluff.com/en/latest/layout.html#configuring-layout)) and the combination of some rules that were previously separate. One example of this is that the legacy rules `L001`, `L005`, `L006`, `L008`, `L023`, `L024`, `L039`, `L048` & `L071` have been combined simply into [LT01](https://docs.sqlfluff.com/en/latest/rules.html#sqlfluff.rules.sphinx.Rule_LT01). * Dropping support for dbt versions before `1.1`. To help users upgrade to 2.0, we've put together a recommended process as part of our [release notes](https://docs.sqlfluff.com/en/latest/releasenotes.html#upgrading-from-1-x-to-2-0). Beyond the breaking changes, this release brings *a load* of additional changes: * Introduces the the `sqlfluff format` CLI command (a la `sqlfmt` or `black`) to auto-format sql files using a known set of _fairly safe_ rules. * Databricks as a distinct new dialect (rather than as previously an alias for `sparksql`). * Performance improvements in our parsing engine. * Dialect improvements to _almost all of them_. As a new major release, especially with significant rewrites of large portions of the codebase, we recommend using [compatible release](https://peps.python.org/pep-0440/#compatible-release) specifiers in your dependencies (i.e. `sqlfluff~=2.0.0`) so that you can automatically take advantage of any bugfix releases in the coming weeks. The alpha releases of 2.0.0 have been tested on a range of large projects, but we know that the range of use cases _"in the wild"_ is very diverse. If you do experience issues, please post them [on GitHub](https://github.com/sqlfluff/sqlfluff/issues/new/choose) in the usual manner. Finally thanks to everyone who has worked on this release, especially [@konnectr](https://github.com/konnectr), [@ValentinCrr](https://github.com/ValentinCrr), [@FabianScheidt](https://github.com/FabianScheidt), [@dflem97](https://github.com/dflem97), [@timcosta](https://github.com/timcosta), [@AidanHarveyNelson](https://github.com/AidanHarveyNelson), [@joar](https://github.com/joar), [@jmpfar](https://github.com/jmpfar), [@jared-rimmer](https://github.com/jared-rimmer), [@vesatoivonen](https://github.com/vesatoivonen), [@briankravec](https://github.com/briankravec), [@saintamh](https://github.com/saintamh), [@tdurieux](https://github.com/tdurieux), [@baa-ableton](https://github.com/baa-ableton), & [@WillAyd](https://github.com/WillAyd) who made their first contributions during the development of 2.0.0. Thanks for your contributions, and especially your patience in the slightly slower release of your efforts into the wild. 🙏🎉 ## What’s Changed * Revise templating and lexing of calls. [#4506](https://github.com/sqlfluff/sqlfluff/pull/4506) [@alanmcruickshank](https://github.com/alanmcruickshank) * Struct Access Spacing [#4512](https://github.com/sqlfluff/sqlfluff/pull/4512) [@alanmcruickshank](https://github.com/alanmcruickshank) * Array and Struct Spacing [#4511](https://github.com/sqlfluff/sqlfluff/pull/4511) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add a deprecation warning for removed config option. [#4509](https://github.com/sqlfluff/sqlfluff/pull/4509) [@alanmcruickshank](https://github.com/alanmcruickshank) * Bigquery spacing (#4508) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix #4433 (more untaken positive indents) [#4499](https://github.com/sqlfluff/sqlfluff/pull/4499) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix parse error on double parentheses [#4504](https://github.com/sqlfluff/sqlfluff/pull/4504) [@yoichi](https://github.com/yoichi) * 2.0.0 Migration Guide [#4498](https://github.com/sqlfluff/sqlfluff/pull/4498) [@alanmcruickshank](https://github.com/alanmcruickshank) * Handle missing aliases and align constraints better [#4493](https://github.com/sqlfluff/sqlfluff/pull/4493) [@alanmcruickshank](https://github.com/alanmcruickshank) * TSQL: Add support For Clause [#4501](https://github.com/sqlfluff/sqlfluff/pull/4501) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * Allow Jinja rule to loop safely [#4495](https://github.com/sqlfluff/sqlfluff/pull/4495) [@alanmcruickshank](https://github.com/alanmcruickshank) * Trigger CI tests for merge groups [#4503](https://github.com/sqlfluff/sqlfluff/pull/4503) [@alanmcruickshank](https://github.com/alanmcruickshank) * Update Readme and Contributing [#4502](https://github.com/sqlfluff/sqlfluff/pull/4502) [@alanmcruickshank](https://github.com/alanmcruickshank) * Update layout docs [#4500](https://github.com/sqlfluff/sqlfluff/pull/4500) [@alanmcruickshank](https://github.com/alanmcruickshank) * Bug in operator precedence [#4497](https://github.com/sqlfluff/sqlfluff/pull/4497) [@alanmcruickshank](https://github.com/alanmcruickshank) * BigQuery: correct query syntax for single column `UNPIVOT` clauses [#4494](https://github.com/sqlfluff/sqlfluff/pull/4494) [@imrehg](https://github.com/imrehg) * Fix #4485 [#4491](https://github.com/sqlfluff/sqlfluff/pull/4491) [@alanmcruickshank](https://github.com/alanmcruickshank) * Update reserved keywords in Athena language [#4490](https://github.com/sqlfluff/sqlfluff/pull/4490) [@ValentinCrr](https://github.com/ValentinCrr) * Clickhouse support all join types [#4488](https://github.com/sqlfluff/sqlfluff/pull/4488) [@konnectr](https://github.com/konnectr) * Snowflake semi-structured spacing [#4487](https://github.com/sqlfluff/sqlfluff/pull/4487) [@alanmcruickshank](https://github.com/alanmcruickshank) * Prep version 2.0.0a6 [#4476](https://github.com/sqlfluff/sqlfluff/pull/4476) [@github-actions](https://github.com/github-actions) * Fix #4367 [#4479](https://github.com/sqlfluff/sqlfluff/pull/4479) [@alanmcruickshank](https://github.com/alanmcruickshank) * Teradata: Improve COLLECT STATS parsing [#4478](https://github.com/sqlfluff/sqlfluff/pull/4478) [@dflem97](https://github.com/dflem97) * Add a sqlfluff format CLI command [#4473](https://github.com/sqlfluff/sqlfluff/pull/4473) [@alanmcruickshank](https://github.com/alanmcruickshank) * Recode and disable L031 -> AL07 [#4471](https://github.com/sqlfluff/sqlfluff/pull/4471) [@alanmcruickshank](https://github.com/alanmcruickshank) * Named Config (part 2) [#4470](https://github.com/sqlfluff/sqlfluff/pull/4470) [@alanmcruickshank](https://github.com/alanmcruickshank) * Rule config lookup improvements & config warnings [#4465](https://github.com/sqlfluff/sqlfluff/pull/4465) [@alanmcruickshank](https://github.com/alanmcruickshank) * Recode L050 [#4468](https://github.com/sqlfluff/sqlfluff/pull/4468) [@alanmcruickshank](https://github.com/alanmcruickshank) * Implicit indent fixes #4467 [#4469](https://github.com/sqlfluff/sqlfluff/pull/4469) [@alanmcruickshank](https://github.com/alanmcruickshank) * ANSI: Add IfExistsGrammar to DropTrigger [#4466](https://github.com/sqlfluff/sqlfluff/pull/4466) [@WittierDinosaur](https://github.com/WittierDinosaur) * Rules Reorg Mopup [#4462](https://github.com/sqlfluff/sqlfluff/pull/4462) [@alanmcruickshank](https://github.com/alanmcruickshank) * Layout Rules Recode (part 2) [#4456](https://github.com/sqlfluff/sqlfluff/pull/4456) [@alanmcruickshank](https://github.com/alanmcruickshank) * fix(athena): resolve errors parsing around maps, structs, and arrays [#4391](https://github.com/sqlfluff/sqlfluff/pull/4391) [@timcosta](https://github.com/timcosta) * Layout Rules Recode (part 1) [#4432](https://github.com/sqlfluff/sqlfluff/pull/4432) [@alanmcruickshank](https://github.com/alanmcruickshank) * TSQL: EXEC string literal [#4458](https://github.com/sqlfluff/sqlfluff/pull/4458) [@jpers36](https://github.com/jpers36) * Teradata: Added SET QUERY_BAND statement [#4459](https://github.com/sqlfluff/sqlfluff/pull/4459) [@dflem97](https://github.com/dflem97) * Teradata: Added TOP select clause modifier [#4461](https://github.com/sqlfluff/sqlfluff/pull/4461) [@dflem97](https://github.com/dflem97) * Teradata: Addition of comparison operator extensions [#4451](https://github.com/sqlfluff/sqlfluff/pull/4451) [@dflem97](https://github.com/dflem97) * Add extensions and plugin section to the README.md [#4454](https://github.com/sqlfluff/sqlfluff/pull/4454) [@jared-rimmer](https://github.com/jared-rimmer) * Convention rules bundle [#4448](https://github.com/sqlfluff/sqlfluff/pull/4448) [@alanmcruickshank](https://github.com/alanmcruickshank) * References rule bundle [#4446](https://github.com/sqlfluff/sqlfluff/pull/4446) [@alanmcruickshank](https://github.com/alanmcruickshank) * Structure and Ambiguous rule bundles [#4444](https://github.com/sqlfluff/sqlfluff/pull/4444) [@alanmcruickshank](https://github.com/alanmcruickshank) * TSQL: Bare functions [#4439](https://github.com/sqlfluff/sqlfluff/pull/4439) [@jpers36](https://github.com/jpers36) * Pull dbt CI tests forward to 1.1 and 1.4 [#4442](https://github.com/sqlfluff/sqlfluff/pull/4442) [@WittierDinosaur](https://github.com/WittierDinosaur) * Teradata: Added "AND STATS" options when creating table [#4440](https://github.com/sqlfluff/sqlfluff/pull/4440) [@dflem97](https://github.com/dflem97) * Add Databricks as a distinct dialect [#4438](https://github.com/sqlfluff/sqlfluff/pull/4438) [@WittierDinosaur](https://github.com/WittierDinosaur) * Remove importlib deprecated methods [#4437](https://github.com/sqlfluff/sqlfluff/pull/4437) [@alanmcruickshank](https://github.com/alanmcruickshank) * SQLite: Support PRAGMA statements [#4431](https://github.com/sqlfluff/sqlfluff/pull/4431) [@WittierDinosaur](https://github.com/WittierDinosaur) * Proposed graceful handling of noqa by L016 (#4248) [#4424](https://github.com/sqlfluff/sqlfluff/pull/4424) [@alanmcruickshank](https://github.com/alanmcruickshank) * DuckDb: Allow quoted literals as identifiers [#4410](https://github.com/sqlfluff/sqlfluff/pull/4410) [@WittierDinosaur](https://github.com/WittierDinosaur) * SQLite Refactor to reduce statement and keyword scope [#4409](https://github.com/sqlfluff/sqlfluff/pull/4409) [@WittierDinosaur](https://github.com/WittierDinosaur) * L046 and L056 recode [#4430](https://github.com/sqlfluff/sqlfluff/pull/4430) [@alanmcruickshank](https://github.com/alanmcruickshank) * Recode Aliasing Rules [#4427](https://github.com/sqlfluff/sqlfluff/pull/4427) [@alanmcruickshank](https://github.com/alanmcruickshank) * Adjust MySQL dialect to support combination of not-null, default and … [#4426](https://github.com/sqlfluff/sqlfluff/pull/4426) [@FabianScheidt](https://github.com/FabianScheidt) * Revert some changes to tox [#4428](https://github.com/sqlfluff/sqlfluff/pull/4428) [@alanmcruickshank](https://github.com/alanmcruickshank) * Migrate capitalisation rules to plugin and recode [#4413](https://github.com/sqlfluff/sqlfluff/pull/4413) [@alanmcruickshank](https://github.com/alanmcruickshank) * Prep version 2.0.0a5 [#4419](https://github.com/sqlfluff/sqlfluff/pull/4419) [@github-actions](https://github.com/github-actions) * Handle long lines without trailing newlines gracefully (#4386) [#4423](https://github.com/sqlfluff/sqlfluff/pull/4423) [@alanmcruickshank](https://github.com/alanmcruickshank) * Resolve #4184 (index error in L007) [#4422](https://github.com/sqlfluff/sqlfluff/pull/4422) [@alanmcruickshank](https://github.com/alanmcruickshank) * Handle untaken positive indents with taken negative pair. [#4420](https://github.com/sqlfluff/sqlfluff/pull/4420) [@alanmcruickshank](https://github.com/alanmcruickshank) * Postgres: AS MATERIALIZED support [#4417](https://github.com/sqlfluff/sqlfluff/pull/4417) [@saintamh](https://github.com/saintamh) * Align warnings config with example shown [#4421](https://github.com/sqlfluff/sqlfluff/pull/4421) [@briankravec](https://github.com/briankravec) * BigQuery: parse "AS description" part of assert expressions [#4418](https://github.com/sqlfluff/sqlfluff/pull/4418) [@yoichi](https://github.com/yoichi) * Deprecate doc decorators (replace with metaclass) [#4415](https://github.com/sqlfluff/sqlfluff/pull/4415) [@alanmcruickshank](https://github.com/alanmcruickshank) * Enable noqa using aliases and groups [#4414](https://github.com/sqlfluff/sqlfluff/pull/4414) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add rule names to CLI outputs [#4400](https://github.com/sqlfluff/sqlfluff/pull/4400) [@alanmcruickshank](https://github.com/alanmcruickshank) * Postgres: Remove execution keyword inherited from ANSI [#4411](https://github.com/sqlfluff/sqlfluff/pull/4411) [@WittierDinosaur](https://github.com/WittierDinosaur) * Rule names, aliases and more complicated selection. [#4399](https://github.com/sqlfluff/sqlfluff/pull/4399) [@alanmcruickshank](https://github.com/alanmcruickshank) * Postgres: Support Recursive View [#4412](https://github.com/sqlfluff/sqlfluff/pull/4412) [@WittierDinosaur](https://github.com/WittierDinosaur) * T-SQL: Implement BULK INSERT statement [#4381](https://github.com/sqlfluff/sqlfluff/pull/4381) [@borchero](https://github.com/borchero) * L062: Add match_source (#4172) [#4335](https://github.com/sqlfluff/sqlfluff/pull/4335) [@vesatoivonen](https://github.com/vesatoivonen) * TSQL: Add SET to ALTER TABLE [#4407](https://github.com/sqlfluff/sqlfluff/pull/4407) [@jared-rimmer](https://github.com/jared-rimmer) * Snowflake: ALTER STORAGE INTEGRATION segment [#4406](https://github.com/sqlfluff/sqlfluff/pull/4406) [@jared-rimmer](https://github.com/jared-rimmer) * Fix incorrect link to pre-commit docs [#4405](https://github.com/sqlfluff/sqlfluff/pull/4405) [@pdebelak](https://github.com/pdebelak) * Add Snowflake dialect ALTER ROLE segment [#4403](https://github.com/sqlfluff/sqlfluff/pull/4403) [@jared-rimmer](https://github.com/jared-rimmer) * Improving Postgres create index statement [#4356](https://github.com/sqlfluff/sqlfluff/pull/4356) [@jmpfar](https://github.com/jmpfar) * Resolve #4291: Comments forcing unexpected indents. [#4384](https://github.com/sqlfluff/sqlfluff/pull/4384) [@alanmcruickshank](https://github.com/alanmcruickshank) * Resolve #4294: Comments affecting indentation [#4337](https://github.com/sqlfluff/sqlfluff/pull/4337) [@alanmcruickshank](https://github.com/alanmcruickshank) * Resolve #4292: Window function long line fixes [#4383](https://github.com/sqlfluff/sqlfluff/pull/4383) [@alanmcruickshank](https://github.com/alanmcruickshank) * TSQL: ALTER INDEX [#4364](https://github.com/sqlfluff/sqlfluff/pull/4364) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * Added Varying Keyword to allowed data type segments [#4375](https://github.com/sqlfluff/sqlfluff/pull/4375) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * Add ruff linter [#4372](https://github.com/sqlfluff/sqlfluff/pull/4372) [@greg-finley](https://github.com/greg-finley) * Fix postgres column constraint default syntax [#4379](https://github.com/sqlfluff/sqlfluff/pull/4379) [@pdebelak](https://github.com/pdebelak) * Allow function names to have a leading underscore [#4377](https://github.com/sqlfluff/sqlfluff/pull/4377) [@gavin-tsang](https://github.com/gavin-tsang) * TSQL: Merge Hints [#4354](https://github.com/sqlfluff/sqlfluff/pull/4354) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * TSQL: Temporal Table [#4358](https://github.com/sqlfluff/sqlfluff/pull/4358) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * TSQL: ALTER TABLE [#4369](https://github.com/sqlfluff/sqlfluff/pull/4369) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * Bugfix: Duckdb SELECT * [#4365](https://github.com/sqlfluff/sqlfluff/pull/4365) [@WittierDinosaur](https://github.com/WittierDinosaur) * Postgres: TABLESAMPLE query [#4357](https://github.com/sqlfluff/sqlfluff/pull/4357) [@greg-finley](https://github.com/greg-finley) * reindent refactor [#4338](https://github.com/sqlfluff/sqlfluff/pull/4338) [@alanmcruickshank](https://github.com/alanmcruickshank) * Snowflake: INSERT INTO [#4363](https://github.com/sqlfluff/sqlfluff/pull/4363) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * Docs: correct toml syntax of pyproject.toml file config example [#4361](https://github.com/sqlfluff/sqlfluff/pull/4361) [@imrehg](https://github.com/imrehg) * Allowed Naked Identifiers [#4359](https://github.com/sqlfluff/sqlfluff/pull/4359) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * TSQL: TABLESAMPLE [#4353](https://github.com/sqlfluff/sqlfluff/pull/4353) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * Tsql: Function Parameters [#4352](https://github.com/sqlfluff/sqlfluff/pull/4352) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * Postgres: Storage parameters [#4350](https://github.com/sqlfluff/sqlfluff/pull/4350) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * TSQL: Bare Function Set [#4351](https://github.com/sqlfluff/sqlfluff/pull/4351) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * Postgres: View options [#4340](https://github.com/sqlfluff/sqlfluff/pull/4340) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * BigQuery: SELECT DISTINCT AS STRUCT [#4341](https://github.com/sqlfluff/sqlfluff/pull/4341) [@joar](https://github.com/joar) * Snowflake: Fix Alter Warehouse [#4344](https://github.com/sqlfluff/sqlfluff/pull/4344) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * Parser: Optimise lookahead_match [#4327](https://github.com/sqlfluff/sqlfluff/pull/4327) [@WittierDinosaur](https://github.com/WittierDinosaur) * Add support for dbt test macros [#4319](https://github.com/sqlfluff/sqlfluff/pull/4319) [@pdebelak](https://github.com/pdebelak) * Bracket complex expressions before applying :: operator in Rule L067 [#4326](https://github.com/sqlfluff/sqlfluff/pull/4326) [@pdebelak](https://github.com/pdebelak) * Prep version 2.0.0a4 [#4322](https://github.com/sqlfluff/sqlfluff/pull/4322) [@github-actions](https://github.com/github-actions) * BigQuery: Alter table alter column [#4316](https://github.com/sqlfluff/sqlfluff/pull/4316) [@greg-finley](https://github.com/greg-finley) * Handle renamed dbt exceptions [#4317](https://github.com/sqlfluff/sqlfluff/pull/4317) [@greg-finley](https://github.com/greg-finley) * Parser: Fix early exit for simple matchers [#4305](https://github.com/sqlfluff/sqlfluff/pull/4305) [@WittierDinosaur](https://github.com/WittierDinosaur) * MySQL: Add CREATE DATABASE and ALTER DATABASE [#4307](https://github.com/sqlfluff/sqlfluff/pull/4307) [@yoichi](https://github.com/yoichi) * BigQuery: Add ALTER VIEW [#4306](https://github.com/sqlfluff/sqlfluff/pull/4306) [@yoichi](https://github.com/yoichi) * toml: only install `toml` dependency if < Python 3.11 (otherwise use builtin `tomllib`) [#4303](https://github.com/sqlfluff/sqlfluff/pull/4303) [@kevinmarsh](https://github.com/kevinmarsh) * Fix #4024 example plugin unit tests import [#4302](https://github.com/sqlfluff/sqlfluff/pull/4302) [@matthieucan](https://github.com/matthieucan) * Prep version 2.0.0a3 [#4290](https://github.com/sqlfluff/sqlfluff/pull/4290) [@github-actions](https://github.com/github-actions) * Move ISSUE from Snwoflake reserved keywords to unreserved ones [#4279](https://github.com/sqlfluff/sqlfluff/pull/4279) [@KaoutherElhamdi](https://github.com/KaoutherElhamdi) * Due to performance and other issues, revert the osmosis implementation of the templater for now [#4273](https://github.com/sqlfluff/sqlfluff/pull/4273) [@barrywhart](https://github.com/barrywhart) * Simplify lexing [#4289](https://github.com/sqlfluff/sqlfluff/pull/4289) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix #4255 (Fix exception on mixed indent description) [#4288](https://github.com/sqlfluff/sqlfluff/pull/4288) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix #4253 (incorrect trigger of L006 around placeholders) [#4287](https://github.com/sqlfluff/sqlfluff/pull/4287) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix #4249 (TSQL block comment indents) [#4286](https://github.com/sqlfluff/sqlfluff/pull/4286) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix #4252 (Resolve multiple sensible indents) [#4285](https://github.com/sqlfluff/sqlfluff/pull/4285) [@alanmcruickshank](https://github.com/alanmcruickshank) * Parser Performance: Cache segment string repr to reduce function calls [#4278](https://github.com/sqlfluff/sqlfluff/pull/4278) [@WittierDinosaur](https://github.com/WittierDinosaur) * Snowflake: GRANT SUPPORT CASES [#4283](https://github.com/sqlfluff/sqlfluff/pull/4283) [@WittierDinosaur](https://github.com/WittierDinosaur) * Dialect: duckdb [#4284](https://github.com/sqlfluff/sqlfluff/pull/4284) [@WittierDinosaur](https://github.com/WittierDinosaur) * Snowflake: Add variable pattern to CopyIntoTable [#4275](https://github.com/sqlfluff/sqlfluff/pull/4275) [@WittierDinosaur](https://github.com/WittierDinosaur) * Postgres: Non-reserved keyword bugfix [#4277](https://github.com/sqlfluff/sqlfluff/pull/4277) [@WittierDinosaur](https://github.com/WittierDinosaur) * Hive: Add Table constraints DISABLE VALIDATE [#4281](https://github.com/sqlfluff/sqlfluff/pull/4281) [@WittierDinosaur](https://github.com/WittierDinosaur) * Snowflake: Add Python and Java UDF support [#4280](https://github.com/sqlfluff/sqlfluff/pull/4280) [@WittierDinosaur](https://github.com/WittierDinosaur) * SparkSQL: Support DIV binary operator [#4282](https://github.com/sqlfluff/sqlfluff/pull/4282) [@WittierDinosaur](https://github.com/WittierDinosaur) * BigQuery: Add ALTER TABLE [#4272](https://github.com/sqlfluff/sqlfluff/pull/4272) [@yoichi](https://github.com/yoichi) * Snowflake: Update bare functions [#4276](https://github.com/sqlfluff/sqlfluff/pull/4276) [@WittierDinosaur](https://github.com/WittierDinosaur) * Improve Dockerfile to reduce image size [#4262](https://github.com/sqlfluff/sqlfluff/pull/4262) [@tdurieux](https://github.com/tdurieux) * Prep version 2.0.0a2 [#4247](https://github.com/sqlfluff/sqlfluff/pull/4247) [@github-actions](https://github.com/github-actions) * Push indents to after comments [#4239](https://github.com/sqlfluff/sqlfluff/pull/4239) [@alanmcruickshank](https://github.com/alanmcruickshank) * Templated fix improvements and indentation [#4245](https://github.com/sqlfluff/sqlfluff/pull/4245) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix block comment indent fixes #4224 [#4240](https://github.com/sqlfluff/sqlfluff/pull/4240) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix for #4222 [#4236](https://github.com/sqlfluff/sqlfluff/pull/4236) [@alanmcruickshank](https://github.com/alanmcruickshank) * Snowflake: Allow multiple unpivot [#4242](https://github.com/sqlfluff/sqlfluff/pull/4242) [@greg-finley](https://github.com/greg-finley) * postgres: add row-level locks to SELECT statements [#4209](https://github.com/sqlfluff/sqlfluff/pull/4209) [@Yiwen-Gao](https://github.com/Yiwen-Gao) * Add more parsing logic for db2 [#4206](https://github.com/sqlfluff/sqlfluff/pull/4206) [@NelsonTorres](https://github.com/NelsonTorres) * Include the filename in critical exceptions [#4225](https://github.com/sqlfluff/sqlfluff/pull/4225) [@alanmcruickshank](https://github.com/alanmcruickshank) * Update Readme Badges [#4219](https://github.com/sqlfluff/sqlfluff/pull/4219) [@alanmcruickshank](https://github.com/alanmcruickshank) * diff-quality: Handle the case where there are no files to check [#4220](https://github.com/sqlfluff/sqlfluff/pull/4220) [@barrywhart](https://github.com/barrywhart) * Prep version 2.0.0a1 [#4203](https://github.com/sqlfluff/sqlfluff/pull/4203) [@github-actions](https://github.com/github-actions) * Fixed False Positive for L037 [#4198](https://github.com/sqlfluff/sqlfluff/pull/4198) [@WillAyd](https://github.com/WillAyd) * Fix #4215 [#4217](https://github.com/sqlfluff/sqlfluff/pull/4217) [@alanmcruickshank](https://github.com/alanmcruickshank) * don't consider templated whitespace [#4213](https://github.com/sqlfluff/sqlfluff/pull/4213) [@alanmcruickshank](https://github.com/alanmcruickshank) * show fatal errors regardless [#4214](https://github.com/sqlfluff/sqlfluff/pull/4214) [@alanmcruickshank](https://github.com/alanmcruickshank) * don't pickle the templater [#4208](https://github.com/sqlfluff/sqlfluff/pull/4208) [@alanmcruickshank](https://github.com/alanmcruickshank) * MySQL: Support column character set and collation [#4204](https://github.com/sqlfluff/sqlfluff/pull/4204) [@yoichi](https://github.com/yoichi) * Fix some issues with Docker Compose environment [#4201](https://github.com/sqlfluff/sqlfluff/pull/4201) [@barrywhart](https://github.com/barrywhart) * Implicit Indents [#4054](https://github.com/sqlfluff/sqlfluff/pull/4054) [@alanmcruickshank](https://github.com/alanmcruickshank) * Tweak Coveralls settings [#4199](https://github.com/sqlfluff/sqlfluff/pull/4199) [@barrywhart](https://github.com/barrywhart) * In addition to Codecov, also upload to Coveralls [#4197](https://github.com/sqlfluff/sqlfluff/pull/4197) [@barrywhart](https://github.com/barrywhart) * Fix: create table default cast returns unparsable section [#4192](https://github.com/sqlfluff/sqlfluff/pull/4192) [@NelsonTorres](https://github.com/NelsonTorres) * Fix JSON parsing issue with diff-quality plugin [#4190](https://github.com/sqlfluff/sqlfluff/pull/4190) [@barrywhart](https://github.com/barrywhart) * Codecov migration [#4195](https://github.com/sqlfluff/sqlfluff/pull/4195) [@alanmcruickshank](https://github.com/alanmcruickshank) * Stop adding trailing os.sep if ignore file is on the root of the file… [#4182](https://github.com/sqlfluff/sqlfluff/pull/4182) [@baa-ableton](https://github.com/baa-ableton) * Port dbt-osmosis templater changes to SQLFluff [#3976](https://github.com/sqlfluff/sqlfluff/pull/3976) [@barrywhart](https://github.com/barrywhart) * Reflow 4: Long Lines [#4067](https://github.com/sqlfluff/sqlfluff/pull/4067) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix comment bug on reindent [#4179](https://github.com/sqlfluff/sqlfluff/pull/4179) [@alanmcruickshank](https://github.com/alanmcruickshank) * Reflow 3: Reindent [#3942](https://github.com/sqlfluff/sqlfluff/pull/3942) [@alanmcruickshank](https://github.com/alanmcruickshank) ## New Contributors * [@konnectr](https://github.com/konnectr) made their first contribution in [#4488](https://github.com/sqlfluff/sqlfluff/pull/4488) * [@ValentinCrr](https://github.com/ValentinCrr) made their first contribution in [#4490](https://github.com/sqlfluff/sqlfluff/pull/4490) * [@FabianScheidt](https://github.com/FabianScheidt) made their first contribution in [#4426](https://github.com/sqlfluff/sqlfluff/pull/4426) * [@dflem97](https://github.com/dflem97) made their first contribution in [#4440](https://github.com/sqlfluff/sqlfluff/pull/4440) * [@timcosta](https://github.com/timcosta) made their first contribution in [#4391](https://github.com/sqlfluff/sqlfluff/pull/4391) * [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) made their first contribution in [#4344](https://github.com/sqlfluff/sqlfluff/pull/4344) * [@joar](https://github.com/joar) made their first contribution in [#4341](https://github.com/sqlfluff/sqlfluff/pull/4341) * [@jmpfar](https://github.com/jmpfar) made their first contribution in [#4356](https://github.com/sqlfluff/sqlfluff/pull/4356) * [@jared-rimmer](https://github.com/jared-rimmer) made their first contribution in [#4403](https://github.com/sqlfluff/sqlfluff/pull/4403) * [@vesatoivonen](https://github.com/vesatoivonen) made their first contribution in [#4335](https://github.com/sqlfluff/sqlfluff/pull/4335) * [@briankravec](https://github.com/briankravec) made their first contribution in [#4421](https://github.com/sqlfluff/sqlfluff/pull/4421) * [@saintamh](https://github.com/saintamh) made their first contribution in [#4417](https://github.com/sqlfluff/sqlfluff/pull/4417) * [@tdurieux](https://github.com/tdurieux) made their first contribution in [#4262](https://github.com/sqlfluff/sqlfluff/pull/4262) * [@baa-ableton](https://github.com/baa-ableton) made their first contribution in [#4182](https://github.com/sqlfluff/sqlfluff/pull/4182) * [@WillAyd](https://github.com/WillAyd) made their first contribution in [#4198](https://github.com/sqlfluff/sqlfluff/pull/4198) ## [2.0.0a6] - 2023-03-06 > NOTE: This is effectively a release candidate for testing purposes. > There are several new features here, and breaking changes to > configuration. We welcome testing feedback from the community, and > the intent is that following this release there will be no more > major breaking changes in the before the 2.0.0 release. ## Highlights This is the sixth alpha release for 2.0.0, and effectively the first release candidate for 2.0.0. All the intended breaking changes for the upcoming release have now been made and only bugfixes and non breaking feature changes should happen between this release and the full release. It contains: * A reorganisation of rules. All rules have been recoded, and can now be referred to by their name, code, alias or group. The legacy code for the rule is included as an alias for each rule to support some backward compatibility. * Configuration files (and inline configuration flags), should now use the **name** of the rule rather than the **code**. Any configuration files which reference using legacy rules (or reference unknown rules) should now display warnings. * Introduces the the `sqlfluff format` CLI command (a la `sqlfmt` or `black`) to auto-format sql files using a known set of _fairly safe_ rules. * Databricks as a distinct new dialect (rather than as previously an alias for `sparksql`). There are also numerous dialect improvements to ANSI, Athena, TSQL, Teradata, SQLite & MySQL. ## What’s Changed * Fix #4367 [#4479](https://github.com/sqlfluff/sqlfluff/pull/4479) [@alanmcruickshank](https://github.com/alanmcruickshank) * Teradata: Improve COLLECT STATS parsing [#4478](https://github.com/sqlfluff/sqlfluff/pull/4478) [@dflem97](https://github.com/dflem97) * Add a sqlfluff format CLI command [#4473](https://github.com/sqlfluff/sqlfluff/pull/4473) [@alanmcruickshank](https://github.com/alanmcruickshank) * Recode and disable L031 -> AL07 [#4471](https://github.com/sqlfluff/sqlfluff/pull/4471) [@alanmcruickshank](https://github.com/alanmcruickshank) * Named Config (part 2) [#4470](https://github.com/sqlfluff/sqlfluff/pull/4470) [@alanmcruickshank](https://github.com/alanmcruickshank) * Rule config lookup improvements & config warnings [#4465](https://github.com/sqlfluff/sqlfluff/pull/4465) [@alanmcruickshank](https://github.com/alanmcruickshank) * Recode L050 [#4468](https://github.com/sqlfluff/sqlfluff/pull/4468) [@alanmcruickshank](https://github.com/alanmcruickshank) * Implicit indent fixes #4467 [#4469](https://github.com/sqlfluff/sqlfluff/pull/4469) [@alanmcruickshank](https://github.com/alanmcruickshank) * ANSI: Add IfExistsGrammar to DropTrigger [#4466](https://github.com/sqlfluff/sqlfluff/pull/4466) [@WittierDinosaur](https://github.com/WittierDinosaur) * Rules Reorg Mopup [#4462](https://github.com/sqlfluff/sqlfluff/pull/4462) [@alanmcruickshank](https://github.com/alanmcruickshank) * Layout Rules Recode (part 2) [#4456](https://github.com/sqlfluff/sqlfluff/pull/4456) [@alanmcruickshank](https://github.com/alanmcruickshank) * fix(athena): resolve errors parsing around maps, structs, and arrays [#4391](https://github.com/sqlfluff/sqlfluff/pull/4391) [@timcosta](https://github.com/timcosta) * Layout Rules Recode (part 1) [#4432](https://github.com/sqlfluff/sqlfluff/pull/4432) [@alanmcruickshank](https://github.com/alanmcruickshank) * TSQL: EXEC string literal [#4458](https://github.com/sqlfluff/sqlfluff/pull/4458) [@jpers36](https://github.com/jpers36) * Teradata: Added SET QUERY_BAND statement [#4459](https://github.com/sqlfluff/sqlfluff/pull/4459) [@dflem97](https://github.com/dflem97) * Teradata: Added TOP select clause modifier [#4461](https://github.com/sqlfluff/sqlfluff/pull/4461) [@dflem97](https://github.com/dflem97) * Teradata: Addition of comparison operator extensions [#4451](https://github.com/sqlfluff/sqlfluff/pull/4451) [@dflem97](https://github.com/dflem97) * Add extensions and plugin section to the README.md [#4454](https://github.com/sqlfluff/sqlfluff/pull/4454) [@jared-rimmer](https://github.com/jared-rimmer) * Convention rules bundle [#4448](https://github.com/sqlfluff/sqlfluff/pull/4448) [@alanmcruickshank](https://github.com/alanmcruickshank) * References rule bundle [#4446](https://github.com/sqlfluff/sqlfluff/pull/4446) [@alanmcruickshank](https://github.com/alanmcruickshank) * Structure and Ambiguous rule bundles [#4444](https://github.com/sqlfluff/sqlfluff/pull/4444) [@alanmcruickshank](https://github.com/alanmcruickshank) * TSQL: Bare functions [#4439](https://github.com/sqlfluff/sqlfluff/pull/4439) [@jpers36](https://github.com/jpers36) * Pull dbt CI tests forward to 1.1 and 1.4 [#4442](https://github.com/sqlfluff/sqlfluff/pull/4442) [@WittierDinosaur](https://github.com/WittierDinosaur) * Teradata: Added "AND STATS" options when creating table [#4440](https://github.com/sqlfluff/sqlfluff/pull/4440) [@dflem97](https://github.com/dflem97) * Add Databricks as a distinct dialect [#4438](https://github.com/sqlfluff/sqlfluff/pull/4438) [@WittierDinosaur](https://github.com/WittierDinosaur) * Remove importlib deprecated methods [#4437](https://github.com/sqlfluff/sqlfluff/pull/4437) [@alanmcruickshank](https://github.com/alanmcruickshank) * SQLite: Support PRAGMA statements [#4431](https://github.com/sqlfluff/sqlfluff/pull/4431) [@WittierDinosaur](https://github.com/WittierDinosaur) * Proposed graceful handling of noqa by L016 (#4248) [#4424](https://github.com/sqlfluff/sqlfluff/pull/4424) [@alanmcruickshank](https://github.com/alanmcruickshank) * DuckDb: Allow quoted literals as identifiers [#4410](https://github.com/sqlfluff/sqlfluff/pull/4410) [@WittierDinosaur](https://github.com/WittierDinosaur) * SQLite Refactor to reduce statement and keyword scope [#4409](https://github.com/sqlfluff/sqlfluff/pull/4409) [@WittierDinosaur](https://github.com/WittierDinosaur) * L046 and L056 recode [#4430](https://github.com/sqlfluff/sqlfluff/pull/4430) [@alanmcruickshank](https://github.com/alanmcruickshank) * Recode Aliasing Rules [#4427](https://github.com/sqlfluff/sqlfluff/pull/4427) [@alanmcruickshank](https://github.com/alanmcruickshank) * Adjust MySQL dialect to support combination of not-null, default and … [#4426](https://github.com/sqlfluff/sqlfluff/pull/4426) [@FabianScheidt](https://github.com/FabianScheidt) * Revert some changes to tox [#4428](https://github.com/sqlfluff/sqlfluff/pull/4428) [@alanmcruickshank](https://github.com/alanmcruickshank) * Migrate capitalisation rules to plugin and recode [#4413](https://github.com/sqlfluff/sqlfluff/pull/4413) [@alanmcruickshank](https://github.com/alanmcruickshank) ## New Contributors * [@FabianScheidt](https://github.com/FabianScheidt) made their first contribution in [#4426](https://github.com/sqlfluff/sqlfluff/pull/4426) * [@dflem97](https://github.com/dflem97) made their first contribution in [#4440](https://github.com/sqlfluff/sqlfluff/pull/4440) * [@timcosta](https://github.com/timcosta) made their first contribution in [#4391](https://github.com/sqlfluff/sqlfluff/pull/4391) ## [2.0.0a5] - 2023-02-24 > NOTE: This is an alpha release for testing purposes. There are several new features > here, and breaking changes to configuration. We welcome testing feedback from the > community, but know that this release may feel less polished than usual. ## Highlights This is the fifth alpha release for 2.0.0. It contains: * Significant rework to rule naming and categorisation. * Several performance improvements. * Many dialect improvements to several dialects. * Bugfixes to many of the issues raised in 2.0.0a4. There will likely be more changes to rule classification before a full release of 2.0.0, so anticipate that configuration files may change slightly again in future alpha releases. ## What’s Changed * Handle long lines without trailing newlines gracefully (#4386) [#4423](https://github.com/sqlfluff/sqlfluff/pull/4423) [@alanmcruickshank](https://github.com/alanmcruickshank) * Resolve #4184 (index error in L007) [#4422](https://github.com/sqlfluff/sqlfluff/pull/4422) [@alanmcruickshank](https://github.com/alanmcruickshank) * Handle untaken positive indents with taken negative pair. [#4420](https://github.com/sqlfluff/sqlfluff/pull/4420) [@alanmcruickshank](https://github.com/alanmcruickshank) * Postgres: AS MATERIALIZED support [#4417](https://github.com/sqlfluff/sqlfluff/pull/4417) [@saintamh](https://github.com/saintamh) * Align warnings config with example shown [#4421](https://github.com/sqlfluff/sqlfluff/pull/4421) [@briankravec](https://github.com/briankravec) * BigQuery: parse "AS description" part of assert expressions [#4418](https://github.com/sqlfluff/sqlfluff/pull/4418) [@yoichi](https://github.com/yoichi) * Deprecate doc decorators (replace with metaclass) [#4415](https://github.com/sqlfluff/sqlfluff/pull/4415) [@alanmcruickshank](https://github.com/alanmcruickshank) * Enable noqa using aliases and groups [#4414](https://github.com/sqlfluff/sqlfluff/pull/4414) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add rule names to CLI outputs [#4400](https://github.com/sqlfluff/sqlfluff/pull/4400) [@alanmcruickshank](https://github.com/alanmcruickshank) * Postgres: Remove execution keyword inherited from ANSI [#4411](https://github.com/sqlfluff/sqlfluff/pull/4411) [@WittierDinosaur](https://github.com/WittierDinosaur) * Rule names, aliases and more complicated selection. [#4399](https://github.com/sqlfluff/sqlfluff/pull/4399) [@alanmcruickshank](https://github.com/alanmcruickshank) * Postgres: Support Recursive View [#4412](https://github.com/sqlfluff/sqlfluff/pull/4412) [@WittierDinosaur](https://github.com/WittierDinosaur) * T-SQL: Implement BULK INSERT statement [#4381](https://github.com/sqlfluff/sqlfluff/pull/4381) [@borchero](https://github.com/borchero) * L062: Add match_source (#4172) [#4335](https://github.com/sqlfluff/sqlfluff/pull/4335) [@vesatoivonen](https://github.com/vesatoivonen) * TSQL: Add SET to ALTER TABLE [#4407](https://github.com/sqlfluff/sqlfluff/pull/4407) [@jared-rimmer](https://github.com/jared-rimmer) * Snowflake: ALTER STORAGE INTEGRATION segment [#4406](https://github.com/sqlfluff/sqlfluff/pull/4406) [@jared-rimmer](https://github.com/jared-rimmer) * Fix incorrect link to pre-commit docs [#4405](https://github.com/sqlfluff/sqlfluff/pull/4405) [@pdebelak](https://github.com/pdebelak) * Add Snowflake dialect ALTER ROLE segment [#4403](https://github.com/sqlfluff/sqlfluff/pull/4403) [@jared-rimmer](https://github.com/jared-rimmer) * Improving Postgres create index statement [#4356](https://github.com/sqlfluff/sqlfluff/pull/4356) [@jmpfar](https://github.com/jmpfar) * Resolve #4291: Comments forcing unexpected indents. [#4384](https://github.com/sqlfluff/sqlfluff/pull/4384) [@alanmcruickshank](https://github.com/alanmcruickshank) * Resolve #4294: Comments affecting indentation [#4337](https://github.com/sqlfluff/sqlfluff/pull/4337) [@alanmcruickshank](https://github.com/alanmcruickshank) * Resolve #4292: Window function long line fixes [#4383](https://github.com/sqlfluff/sqlfluff/pull/4383) [@alanmcruickshank](https://github.com/alanmcruickshank) * TSQL: ALTER INDEX [#4364](https://github.com/sqlfluff/sqlfluff/pull/4364) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * Added Varying Keyword to allowed data type segments [#4375](https://github.com/sqlfluff/sqlfluff/pull/4375) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * Add ruff linter [#4372](https://github.com/sqlfluff/sqlfluff/pull/4372) [@greg-finley](https://github.com/greg-finley) * Fix postgres column constraint default syntax [#4379](https://github.com/sqlfluff/sqlfluff/pull/4379) [@pdebelak](https://github.com/pdebelak) * Allow function names to have a leading underscore [#4377](https://github.com/sqlfluff/sqlfluff/pull/4377) [@gavin-tsang](https://github.com/gavin-tsang) * TSQL: Merge Hints [#4354](https://github.com/sqlfluff/sqlfluff/pull/4354) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * TSQL: Temporal Table [#4358](https://github.com/sqlfluff/sqlfluff/pull/4358) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * TSQL: ALTER TABLE [#4369](https://github.com/sqlfluff/sqlfluff/pull/4369) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * Bugfix: Duckdb SELECT * [#4365](https://github.com/sqlfluff/sqlfluff/pull/4365) [@WittierDinosaur](https://github.com/WittierDinosaur) * Postgres: TABLESAMPLE query [#4357](https://github.com/sqlfluff/sqlfluff/pull/4357) [@greg-finley](https://github.com/greg-finley) * reindent refactor [#4338](https://github.com/sqlfluff/sqlfluff/pull/4338) [@alanmcruickshank](https://github.com/alanmcruickshank) * Snowflake: INSERT INTO [#4363](https://github.com/sqlfluff/sqlfluff/pull/4363) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * Docs: correct toml syntax of pyproject.toml file config example [#4361](https://github.com/sqlfluff/sqlfluff/pull/4361) [@imrehg](https://github.com/imrehg) * Allowed Naked Identifiers [#4359](https://github.com/sqlfluff/sqlfluff/pull/4359) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * TSQL: TABLESAMPLE [#4353](https://github.com/sqlfluff/sqlfluff/pull/4353) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * Tsql: Function Parameters [#4352](https://github.com/sqlfluff/sqlfluff/pull/4352) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * Postgres: Storage parameters [#4350](https://github.com/sqlfluff/sqlfluff/pull/4350) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * TSQL: Bare Function Set [#4351](https://github.com/sqlfluff/sqlfluff/pull/4351) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * Postgres: View options [#4340](https://github.com/sqlfluff/sqlfluff/pull/4340) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * BigQuery: SELECT DISTINCT AS STRUCT [#4341](https://github.com/sqlfluff/sqlfluff/pull/4341) [@joar](https://github.com/joar) * Snowflake: Fix Alter Warehouse [#4344](https://github.com/sqlfluff/sqlfluff/pull/4344) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * Parser: Optimise lookahead_match [#4327](https://github.com/sqlfluff/sqlfluff/pull/4327) [@WittierDinosaur](https://github.com/WittierDinosaur) * Add support for dbt test macros [#4319](https://github.com/sqlfluff/sqlfluff/pull/4319) [@pdebelak](https://github.com/pdebelak) * Bracket complex expressions before applying :: operator in Rule L067 [#4326](https://github.com/sqlfluff/sqlfluff/pull/4326) [@pdebelak](https://github.com/pdebelak) ## New Contributors * [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) made their first contribution in [#4344](https://github.com/sqlfluff/sqlfluff/pull/4344) * [@joar](https://github.com/joar) made their first contribution in [#4341](https://github.com/sqlfluff/sqlfluff/pull/4341) * [@jmpfar](https://github.com/jmpfar) made their first contribution in [#4356](https://github.com/sqlfluff/sqlfluff/pull/4356) * [@jared-rimmer](https://github.com/jared-rimmer) made their first contribution in [#4403](https://github.com/sqlfluff/sqlfluff/pull/4403) * [@vesatoivonen](https://github.com/vesatoivonen) made their first contribution in [#4335](https://github.com/sqlfluff/sqlfluff/pull/4335) * [@briankravec](https://github.com/briankravec) made their first contribution in [#4421](https://github.com/sqlfluff/sqlfluff/pull/4421) * [@saintamh](https://github.com/saintamh) made their first contribution in [#4417](https://github.com/sqlfluff/sqlfluff/pull/4417) ## [2.0.0a4] - 2023-01-26 ## Highlights This is the fourth alpha release for 2.0.0. It contains a fix for the renamed dbt exceptions in dbt version 1.4.0, a fix for a major performance issue with the 2.0 dbt templater, and improvements to parse performance of large SQL files. ## What’s Changed * BigQuery: Alter table alter column [#4316](https://github.com/sqlfluff/sqlfluff/pull/4316) [@greg-finley](https://github.com/greg-finley) * Handle renamed dbt exceptions [#4317](https://github.com/sqlfluff/sqlfluff/pull/4317) [@greg-finley](https://github.com/greg-finley) * Parser: Fix early exit for simple matchers [#4305](https://github.com/sqlfluff/sqlfluff/pull/4305) [@WittierDinosaur](https://github.com/WittierDinosaur) * MySQL: Add CREATE DATABASE and ALTER DATABASE [#4307](https://github.com/sqlfluff/sqlfluff/pull/4307) [@yoichi](https://github.com/yoichi) * BigQuery: Add ALTER VIEW [#4306](https://github.com/sqlfluff/sqlfluff/pull/4306) [@yoichi](https://github.com/yoichi) * toml: only install `toml` dependency if < Python 3.11 (otherwise use builtin `tomllib`) [#4303](https://github.com/sqlfluff/sqlfluff/pull/4303) [@kevinmarsh](https://github.com/kevinmarsh) * Fix #4024 example plugin unit tests import [#4302](https://github.com/sqlfluff/sqlfluff/pull/4302) [@matthieucan](https://github.com/matthieucan) ## [2.0.0a3] - 2023-01-16 > NOTE: This is an alpha release for testing purposes. There are several new features > here, and breaking changes to configuration. We welcome testing feedback from the > community, but know that this release may feel less polished than usual. ## Highlights This is the third alpha release for 2.0.0. It contains primarily bugfixes from 2.0.0a2 to allow continued testing. In particular, some of the recent 2.0.0-related changes to the dbt templater have been reverted, primarily due to performance and other issues. If those issues can be resolved, the changes will be re-introduced. The long-term goal of this work is to ease maintenance of the templater by separating dbt integration concerns from SQLFluff concerns. There will likely be more changes to rule classification before a full release of 2.0.0, so anticipate that configuration files may change slightly again in future alpha releases. ## What’s Changed * Move ISSUE from Snowflake reserved keywords to unreserved ones [#4279](https://github.com/sqlfluff/sqlfluff/pull/4279) [@KaoutherElhamdi](https://github.com/KaoutherElhamdi) * Due to performance and other issues, revert the osmosis implementation of the templater for now [#4273](https://github.com/sqlfluff/sqlfluff/pull/4273) [@barrywhart](https://github.com/barrywhart) * Simplify lexing [#4289](https://github.com/sqlfluff/sqlfluff/pull/4289) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix #4255 (Fix exception on mixed indent description) [#4288](https://github.com/sqlfluff/sqlfluff/pull/4288) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix #4253 (incorrect trigger of L006 around placeholders) [#4287](https://github.com/sqlfluff/sqlfluff/pull/4287) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix #4249 (TSQL block comment indents) [#4286](https://github.com/sqlfluff/sqlfluff/pull/4286) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix #4252 (Resolve multiple sensible indents) [#4285](https://github.com/sqlfluff/sqlfluff/pull/4285) [@alanmcruickshank](https://github.com/alanmcruickshank) * Parser Performance: Cache segment string repr to reduce function calls [#4278](https://github.com/sqlfluff/sqlfluff/pull/4278) [@WittierDinosaur](https://github.com/WittierDinosaur) * Snowflake: GRANT SUPPORT CASES [#4283](https://github.com/sqlfluff/sqlfluff/pull/4283) [@WittierDinosaur](https://github.com/WittierDinosaur) * Dialect: duckdb [#4284](https://github.com/sqlfluff/sqlfluff/pull/4284) [@WittierDinosaur](https://github.com/WittierDinosaur) * Snowflake: Add variable pattern to CopyIntoTable [#4275](https://github.com/sqlfluff/sqlfluff/pull/4275) [@WittierDinosaur](https://github.com/WittierDinosaur) * Postgres: Non-reserved keyword bugfix [#4277](https://github.com/sqlfluff/sqlfluff/pull/4277) [@WittierDinosaur](https://github.com/WittierDinosaur) * Hive: Add Table constraints DISABLE VALIDATE [#4281](https://github.com/sqlfluff/sqlfluff/pull/4281) [@WittierDinosaur](https://github.com/WittierDinosaur) * Snowflake: Add Python and Java UDF support [#4280](https://github.com/sqlfluff/sqlfluff/pull/4280) [@WittierDinosaur](https://github.com/WittierDinosaur) * SparkSQL: Support DIV binary operator [#4282](https://github.com/sqlfluff/sqlfluff/pull/4282) [@WittierDinosaur](https://github.com/WittierDinosaur) * BigQuery: Add ALTER TABLE [#4272](https://github.com/sqlfluff/sqlfluff/pull/4272) [@yoichi](https://github.com/yoichi) * Snowflake: Update bare functions [#4276](https://github.com/sqlfluff/sqlfluff/pull/4276) [@WittierDinosaur](https://github.com/WittierDinosaur) * Improve Dockerfile to reduce image size [#4262](https://github.com/sqlfluff/sqlfluff/pull/4262) [@tdurieux](https://github.com/tdurieux) ## New Contributors * [@tdurieux](https://github.com/tdurieux) made their first contribution in [#4262](https://github.com/sqlfluff/sqlfluff/pull/4262) ## [2.0.0a2] - 2023-01-07 ## Highlights This is the second alpha release for 2.0.0. It contains primarily bugfixes from 2.0.0a1 to allow continued testing along with dialect improvements for Snowflake, Postgres and DB2. ## What’s Changed * Push indents to after comments [#4239](https://github.com/sqlfluff/sqlfluff/pull/4239) [@alanmcruickshank](https://github.com/alanmcruickshank) * Templated fix improvements and indentation [#4245](https://github.com/sqlfluff/sqlfluff/pull/4245) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix block comment indent fixes #4224 [#4240](https://github.com/sqlfluff/sqlfluff/pull/4240) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix for #4222 [#4236](https://github.com/sqlfluff/sqlfluff/pull/4236) [@alanmcruickshank](https://github.com/alanmcruickshank) * Snowflake: Allow multiple unpivot [#4242](https://github.com/sqlfluff/sqlfluff/pull/4242) [@greg-finley](https://github.com/greg-finley) * postgres: add row-level locks to SELECT statements [#4209](https://github.com/sqlfluff/sqlfluff/pull/4209) [@Yiwen-Gao](https://github.com/Yiwen-Gao) * Add more parsing logic for db2 [#4206](https://github.com/sqlfluff/sqlfluff/pull/4206) [@NelsonTorres](https://github.com/NelsonTorres) * Include the filename in critical exceptions [#4225](https://github.com/sqlfluff/sqlfluff/pull/4225) [@alanmcruickshank](https://github.com/alanmcruickshank) * Update Readme Badges [#4219](https://github.com/sqlfluff/sqlfluff/pull/4219) [@alanmcruickshank](https://github.com/alanmcruickshank) * diff-quality: Handle the case where there are no files to check [#4220](https://github.com/sqlfluff/sqlfluff/pull/4220) [@barrywhart](https://github.com/barrywhart) ## [2.0.0a1] - 2022-12-28 ## Highlights This is the first alpha version for 2.0.0. It brings all of the changes to whitespace handing, including a total rewrite of indentation and long line logic (L003 & L016). That brings several breaking changes to the configuration of layout, see the [layout docs](https://docs.sqlfluff.com/en/stable/layout.html) for more details and familiarise yourself with the new [default configuration](https://docs.sqlfluff.com/en/stable/configuration.html#default-configuration). In addition, for the dbt templater, this introduces a large re-write of the codebase, dropping support for dbt versions before 1.0.0. This leverages functionality from [dbt-osmosis](https://github.com/z3z1ma/dbt-osmosis) to reduce the amount of functionality supported directly by SQLFluff, and performance during testing of the new version has been reported as significantly faster. ## What’s Changed * Fixed False Positive for L037 [#4198](https://github.com/sqlfluff/sqlfluff/pull/4198) [@WillAyd](https://github.com/WillAyd) * Indentation bug [#4217](https://github.com/sqlfluff/sqlfluff/pull/4217) [@alanmcruickshank](https://github.com/alanmcruickshank) * Show fatal errors regardless [#4214](https://github.com/sqlfluff/sqlfluff/pull/4214) [@alanmcruickshank](https://github.com/alanmcruickshank) * Don't consider templated whitespace [#4213](https://github.com/sqlfluff/sqlfluff/pull/4213) [@alanmcruickshank](https://github.com/alanmcruickshank) * Don't pickle the templater [#4208](https://github.com/sqlfluff/sqlfluff/pull/4208) [@alanmcruickshank](https://github.com/alanmcruickshank) * MySQL: Support column character set and collation [#4204](https://github.com/sqlfluff/sqlfluff/pull/4204) [@yoichi](https://github.com/yoichi) * Fix some issues with Docker Compose environment [#4201](https://github.com/sqlfluff/sqlfluff/pull/4201) [@barrywhart](https://github.com/barrywhart) * Implicit Indents [#4054](https://github.com/sqlfluff/sqlfluff/pull/4054) [@alanmcruickshank](https://github.com/alanmcruickshank) * Tweak Coveralls settings [#4199](https://github.com/sqlfluff/sqlfluff/pull/4199) [@barrywhart](https://github.com/barrywhart) * In addition to Codecov, also upload to Coveralls [#4197](https://github.com/sqlfluff/sqlfluff/pull/4197) [@barrywhart](https://github.com/barrywhart) * Fix: create table default cast returns unparsable section [#4192](https://github.com/sqlfluff/sqlfluff/pull/4192) [@NelsonTorres](https://github.com/NelsonTorres) * Fix JSON parsing issue with diff-quality plugin [#4190](https://github.com/sqlfluff/sqlfluff/pull/4190) [@barrywhart](https://github.com/barrywhart) * Codecov migration [#4195](https://github.com/sqlfluff/sqlfluff/pull/4195) [@alanmcruickshank](https://github.com/alanmcruickshank) * Stop adding trailing os.sep if ignore file is on the root of the file… [#4182](https://github.com/sqlfluff/sqlfluff/pull/4182) [@baa-ableton](https://github.com/baa-ableton) * Port dbt-osmosis templater changes to SQLFluff [#3976](https://github.com/sqlfluff/sqlfluff/pull/3976) [@barrywhart](https://github.com/barrywhart) * Reflow 4: Long Lines [#4067](https://github.com/sqlfluff/sqlfluff/pull/4067) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix comment bug on reindent [#4179](https://github.com/sqlfluff/sqlfluff/pull/4179) [@alanmcruickshank](https://github.com/alanmcruickshank) * Reflow 3: Reindent [#3942](https://github.com/sqlfluff/sqlfluff/pull/3942) [@alanmcruickshank](https://github.com/alanmcruickshank) ## New Contributors * [@baa-ableton](https://github.com/baa-ableton) made their first contribution in [#4182](https://github.com/sqlfluff/sqlfluff/pull/4182) * [@WillAyd](https://github.com/WillAyd) made their first contribution in [#4198](https://github.com/sqlfluff/sqlfluff/pull/4198) ## [1.4.5] - 2022-12-18 ## Highlights This is a bugfix release, primarily for diff-quality. In addition, a new rules for spacing around parenthesis is also included. This is also the final 1.x.x release. Following releases will be a series of alpha releases for 2.x.x. If you affected by any outstanding bugs or regressions from this release, consider either rolling _backward_ to a previous release without those issues, or failing _forward_ to on an alpha release for 2.x.x (or a full release if that's out). Note that 2.x.x will bring a selection of breaking changes to config file structure, rule categorisation and dbt support. ## What’s Changed * Add rule for space around parenthesis [#4131](https://github.com/sqlfluff/sqlfluff/pull/4131) [@NelsonTorres](https://github.com/NelsonTorres) * diff-quality plugin: Print invalid JSON on parse failure [#4176](https://github.com/sqlfluff/sqlfluff/pull/4176) [@barrywhart](https://github.com/barrywhart) * Ensure diff-quality runs the correct SQLFluff [#4175](https://github.com/sqlfluff/sqlfluff/pull/4175) [@barrywhart](https://github.com/barrywhart) ## New Contributors * [@NelsonTorres](https://github.com/NelsonTorres) made their first contribution in [#4131](https://github.com/sqlfluff/sqlfluff/pull/4131) ## [1.4.4] - 2022-12-14 ## Highlights Bug fix for 1.4.3 which was incorrectly flagging L006 for concat operators (`||`) and other two-symbol binary operators. ## What’s Changed * Recognise || as an operator to avoid rule L006 flagging it [#4168](https://github.com/sqlfluff/sqlfluff/pull/4168) [@tunetheweb](https://github.com/tunetheweb) * :bug: Check verbosity level of pytest run before running certain tests [#4167](https://github.com/sqlfluff/sqlfluff/pull/4167) [@pwildenhain](https://github.com/pwildenhain) * [snowflake] Add support for snowflake select * exclude/replace syntax [#4160](https://github.com/sqlfluff/sqlfluff/pull/4160) [@moreaupascal56](https://github.com/moreaupascal56) ## [1.4.3] - 2022-12-13 ## Highlights * Rewrote `diff-quality` plugin to run SQLFluff as a subprocess. More reliable, easier to switch between `diff-quality` and running `sqlfluff lint` directly. * New rule L067 enforces consistent syntax for type casts. * New rule L068 enforces a consistent number of columns in set queries (e.g. UNION). * Initial support for Materialize dialect. ## What's Changed * Add flyway variables support via placeholder templater [#4026](https://github.com/sqlfluff/sqlfluff/pull/4026) [@srjonemed](https://github.com/srjonemed) * Fix Spark comparison parsing [#4066](https://github.com/sqlfluff/sqlfluff/pull/4066) [@ms32035](https://github.com/ms32035) * Add errors and fails to pytest summary [#4076](https://github.com/sqlfluff/sqlfluff/pull/4076) [@alanmcruickshank](https://github.com/alanmcruickshank) * Storage reference segment [#4057](https://github.com/sqlfluff/sqlfluff/pull/4057) [@YilangHe](https://github.com/YilangHe) * New rule L069: Consistent syntax for sql type casting [#3747](https://github.com/sqlfluff/sqlfluff/pull/3747) [@bolajiwahab](https://github.com/bolajiwahab) * Postgres: Views and named notations [#4073](https://github.com/sqlfluff/sqlfluff/pull/4073) [@davetapley](https://github.com/davetapley) * Switch reflow buffer from LintFix to LintResult [#4083](https://github.com/sqlfluff/sqlfluff/pull/4083) [@alanmcruickshank](https://github.com/alanmcruickshank) * Support parallel linting when many individual files specified [#4084](https://github.com/sqlfluff/sqlfluff/pull/4084) [@barrywhart](https://github.com/barrywhart) * Rule: check number of columns in set operations match [#4028](https://github.com/sqlfluff/sqlfluff/pull/4028) [@erevear](https://github.com/erevear) * Fix syntax in indentation sample code docs [#4087](https://github.com/sqlfluff/sqlfluff/pull/4087) [@PBalsdon](https://github.com/PBalsdon) * Remove "mystery" binary file added in PR #2923 [#4088](https://github.com/sqlfluff/sqlfluff/pull/4088) [@barrywhart](https://github.com/barrywhart) * Fix mypy issue with regex [#4097](https://github.com/sqlfluff/sqlfluff/pull/4097) [@barrywhart](https://github.com/barrywhart) * Enable variable reference names to have leading underscore for snowflake dialect [#4098](https://github.com/sqlfluff/sqlfluff/pull/4098) [@gavin-tsang](https://github.com/gavin-tsang) * Fix flake8 issue with segment_predicates.py [#4101](https://github.com/sqlfluff/sqlfluff/pull/4101) [@barrywhart](https://github.com/barrywhart) * Fix bug in example rule plugin [#4103](https://github.com/sqlfluff/sqlfluff/pull/4103) [@barrywhart](https://github.com/barrywhart) * Fix bug where L034 should ignore INSERT or "CREATE TABLE AS SELECT" with CTE [#4108](https://github.com/sqlfluff/sqlfluff/pull/4108) [@barrywhart](https://github.com/barrywhart) * Postgres: Alter type rename value [#4100](https://github.com/sqlfluff/sqlfluff/pull/4100) [@greg-finley](https://github.com/greg-finley) * Bug fix: dbt templater ignores .sqlfluff file encoding on Windows [#4109](https://github.com/sqlfluff/sqlfluff/pull/4109) [@barrywhart](https://github.com/barrywhart) * Add initial Materialize dialect [#4112](https://github.com/sqlfluff/sqlfluff/pull/4112) [@bobbyiliev](https://github.com/bobbyiliev) * L015: Handle COUNT(DISTINCT(col)) [#4110](https://github.com/sqlfluff/sqlfluff/pull/4110) [@barrywhart](https://github.com/barrywhart) * [Snowflake] format type options extensions for copy_into_location [#4129](https://github.com/sqlfluff/sqlfluff/pull/4129) [@YilangHe](https://github.com/YilangHe) * Fix tox arguments [#4144](https://github.com/sqlfluff/sqlfluff/pull/4144) [@greg-finley](https://github.com/greg-finley) * [DB2] Fix parsing of string identifiers [#4134](https://github.com/sqlfluff/sqlfluff/pull/4134) [@borchero](https://github.com/borchero) * BigQuery: Allow double quoted literal in export_option_list [#4126](https://github.com/sqlfluff/sqlfluff/pull/4126) [@yoichi](https://github.com/yoichi) * Only upload 3 sets of test results to codecov (possible workaround for hanging builds) [#4147](https://github.com/sqlfluff/sqlfluff/pull/4147) [@barrywhart](https://github.com/barrywhart) * SparkSQL: ILIKE [#4138](https://github.com/sqlfluff/sqlfluff/pull/4138) [@greg-finley](https://github.com/greg-finley) * SparkSQL: Mark `AS` as optional keyword for CTE & CTS [#4127](https://github.com/sqlfluff/sqlfluff/pull/4127) [@ulixius9](https://github.com/ulixius9) * Fix passenv to work with tox 4 [#4154](https://github.com/sqlfluff/sqlfluff/pull/4154) [@tunetheweb](https://github.com/tunetheweb) * Allow deprecated --disable_progress_bar flag for fix command [#4151](https://github.com/sqlfluff/sqlfluff/pull/4151) [@pdebelak](https://github.com/pdebelak) * Implement diff_quality_plugin using command-line rather than Python [#4148](https://github.com/sqlfluff/sqlfluff/pull/4148) [@barrywhart](https://github.com/barrywhart) * L037: insert ASC just after column_reference [#4149](https://github.com/sqlfluff/sqlfluff/pull/4149) [@yoichi](https://github.com/yoichi) ## New Contributors * [@srjonemed](https://github.com/srjonemed) made their first contribution in [#4026](https://github.com/sqlfluff/sqlfluff/pull/4026) * [@ms32035](https://github.com/ms32035) made their first contribution in [#4066](https://github.com/sqlfluff/sqlfluff/pull/4066) * [@davetapley](https://github.com/davetapley) made their first contribution in [#4073](https://github.com/sqlfluff/sqlfluff/pull/4073) * [@PBalsdon](https://github.com/PBalsdon) made their first contribution in [#4087](https://github.com/sqlfluff/sqlfluff/pull/4087) * [@gavin-tsang](https://github.com/gavin-tsang) made their first contribution in [#4098](https://github.com/sqlfluff/sqlfluff/pull/4098) * [@bobbyiliev](https://github.com/bobbyiliev) made their first contribution in [#4112](https://github.com/sqlfluff/sqlfluff/pull/4112) * [@ulixius9](https://github.com/ulixius9) made their first contribution in [#4127](https://github.com/sqlfluff/sqlfluff/pull/4127) ## [1.4.2] - 2022-11-13 ## Highlights This release is less about internals and much more about some quality of life improvements and dialect changes. The most notable are: - The introduction of a `sqlfluff render` command to preview the results of templated sql. - Linting errors within templated loops should now only appear once in the linting output. - Indentation around jinja `{% set %}` statements should now be more consistent. - Linting errors around unparsable code are now more appropriately handled (with more to come soon on that front). - Error messages when specified files aren't found are now more specific. We've also got dialect improvements for Redshift, SOQL & SparkSQL. ## What’s Changed * Fix type error in `get_rules` hook of plugin example [#4060](https://github.com/sqlfluff/sqlfluff/pull/4060) [@Samyak2](https://github.com/Samyak2) * L003: Add missing "pragma: no cover" [#4058](https://github.com/sqlfluff/sqlfluff/pull/4058) [@barrywhart](https://github.com/barrywhart) * Fix bug in sparksql SELECT statement termination at UNION #4050 [#4052](https://github.com/sqlfluff/sqlfluff/pull/4052) [@anna-azizian](https://github.com/anna-azizian) * Deduplicate violations in the source space [#4041](https://github.com/sqlfluff/sqlfluff/pull/4041) [@alanmcruickshank](https://github.com/alanmcruickshank) * Use "docker compose", not "docker-compose" [#4055](https://github.com/sqlfluff/sqlfluff/pull/4055) [@barrywhart](https://github.com/barrywhart) * Allow warnings for specific rules [#4053](https://github.com/sqlfluff/sqlfluff/pull/4053) [@alanmcruickshank](https://github.com/alanmcruickshank) * Better file not found error #1023 [#4051](https://github.com/sqlfluff/sqlfluff/pull/4051) [@alanmcruickshank](https://github.com/alanmcruickshank) * Filter out issues in unparsable sections [#4032](https://github.com/sqlfluff/sqlfluff/pull/4032) [@alanmcruickshank](https://github.com/alanmcruickshank) * Snowflake: ADD and DROP without COLUMN [#4049](https://github.com/sqlfluff/sqlfluff/pull/4049) [@greg-finley](https://github.com/greg-finley) * Make render command [#4043](https://github.com/sqlfluff/sqlfluff/pull/4043) [@alanmcruickshank](https://github.com/alanmcruickshank) * Bump after_n_builds to 10 [#4046](https://github.com/sqlfluff/sqlfluff/pull/4046) [@greg-finley](https://github.com/greg-finley) * Redshift: allows for parenthesis around FROM content [#3962](https://github.com/sqlfluff/sqlfluff/pull/3962) [@adam-tokarski](https://github.com/adam-tokarski) * Update CI to use Python 3.11 [#4038](https://github.com/sqlfluff/sqlfluff/pull/4038) [@greg-finley](https://github.com/greg-finley) * Classify self contained set statements as templated [#4034](https://github.com/sqlfluff/sqlfluff/pull/4034) [@alanmcruickshank](https://github.com/alanmcruickshank) * Date and Datetime literals in SOQL [#4037](https://github.com/sqlfluff/sqlfluff/pull/4037) [@alanmcruickshank](https://github.com/alanmcruickshank) * mypy edits for 0.990 [#4035](https://github.com/sqlfluff/sqlfluff/pull/4035) [@alanmcruickshank](https://github.com/alanmcruickshank) * sparksql: support for create/remove widget clause [#4021](https://github.com/sqlfluff/sqlfluff/pull/4021) [@Coola4kov](https://github.com/Coola4kov) * Redshift CREATE EXTERNAL FUNCTION statement [#4011](https://github.com/sqlfluff/sqlfluff/pull/4011) [@rpr-ableton](https://github.com/rpr-ableton) * Update Redshift bare functions [#4012](https://github.com/sqlfluff/sqlfluff/pull/4012) [@rpr-ableton](https://github.com/rpr-ableton) ## New Contributors * [@Coola4kov](https://github.com/Coola4kov) made their first contribution in [#4021](https://github.com/sqlfluff/sqlfluff/pull/4021) * [@anna-azizian](https://github.com/anna-azizian) made their first contribution in [#4052](https://github.com/sqlfluff/sqlfluff/pull/4052) ## [1.4.1] - 2022-10-31 ## Highlights This is a fix to the configuration migration from 1.4.0. In that release, the configuration of leading/trailing operators would be migrated the wrong way around and precedence between new and old configuration values behaved unexpectedly. ## What’s Changed * Config precedence [#4007](https://github.com/sqlfluff/sqlfluff/pull/4007) [@alanmcruickshank](https://github.com/alanmcruickshank) * Redshift CREATE/ATTACH/DETACH/DROP RLS POLICY statements [#4004](https://github.com/sqlfluff/sqlfluff/pull/4004) [@rpr-ableton](https://github.com/rpr-ableton) * Redshift: Add support for APPROXIMATE functions [#3997](https://github.com/sqlfluff/sqlfluff/pull/3997) [@rpr-ableton](https://github.com/rpr-ableton) * hotfix to config migration [#4005](https://github.com/sqlfluff/sqlfluff/pull/4005) [@alanmcruickshank](https://github.com/alanmcruickshank) ## [1.4.0] - 2022-10-31 ## Highlights This release brings several internal changes, and acts as a prelude to 2.0.0 which will be released fairly soon. In particular, the following config values have changed: - `sqlfluff:rules:L007:operator_new_lines` has been changed to `sqlfluff:layout:type:binary_operator:line_position`. - `sqlfluff:rules:comma_style` and `sqlfluff:rules:L019:comma_style` have both been consolidated into `sqlfluff:layout:type:comma:line_position`. If any of these values have been set in your config, they will be automatically translated to the new values at runtime, and a warning will be shown. To silence the warning, update your config file to the new values. For more details on configuring layout (including some changes yet to come in future versions) see [the docs](https://docs.sqlfluff.com/en/latest/layout.html#configuring-layout). These changes are driven by underlying centralisation in the routines which control layout. While for this release, no breaking changes are expected - you may find slight differences in how SQLFluff handles edge cases. We believe in the majority of cases these are _more_ consistent, but if you find any which are problematic then do post an issue on GitHub as usual. Other highlights from this release: - Better dbt supportfor graph nodes and avoiding dependency conflicts. - Numerous dialect improvements to T-SQL, MySQL, SparkSQL, SQLite, Athena Snowflake, Hive, Postgres & Databricks. There have also been first time contributions from **10 new contributors**! 🎉🎉🎉 ## What’s Changed * Snowflake partition nonreserved keyword [#3972](https://github.com/sqlfluff/sqlfluff/pull/3972) [@YilangHe](https://github.com/YilangHe) * Hive: Add support for EXCHANGE PARTITION in ALTER TABLE [#3991](https://github.com/sqlfluff/sqlfluff/pull/3991) [@nahuelverdugo](https://github.com/nahuelverdugo) * Resolve parse error on multiple bracketed statements [#3994](https://github.com/sqlfluff/sqlfluff/pull/3994) [@yoichi](https://github.com/yoichi) * Enable parsing of CLONE keyword in bigquery dialect [#3984](https://github.com/sqlfluff/sqlfluff/pull/3984) [@realLyans](https://github.com/realLyans) * BigQuery: allow nesting of SetExpressionSegment [#3990](https://github.com/sqlfluff/sqlfluff/pull/3990) [@yoichi](https://github.com/yoichi) * feat(clickhouse): Support non-standard CREATE TABLE statement [#3986](https://github.com/sqlfluff/sqlfluff/pull/3986) [@tomasfarias](https://github.com/tomasfarias) * Fix Windows CI check [#3992](https://github.com/sqlfluff/sqlfluff/pull/3992) [@greg-finley](https://github.com/greg-finley) * Snowflake tag reference segment [#3985](https://github.com/sqlfluff/sqlfluff/pull/3985) [@WittierDinosaur](https://github.com/WittierDinosaur) * Fix Jinja templater issue where undefined callable threw an exception [#3982](https://github.com/sqlfluff/sqlfluff/pull/3982) [@barrywhart](https://github.com/barrywhart) * Reflow Documentation V1 [#3970](https://github.com/sqlfluff/sqlfluff/pull/3970) [@alanmcruickshank](https://github.com/alanmcruickshank) * Allow lambda argument columns to be unqualified [#3971](https://github.com/sqlfluff/sqlfluff/pull/3971) [@olagjo](https://github.com/olagjo) * document inline configuration [#3981](https://github.com/sqlfluff/sqlfluff/pull/3981) [@alanmcruickshank](https://github.com/alanmcruickshank) * [BUGFIX] Changing cwd temporarily on manifest load as dbt is not using project_dir to read/write target folder [#3979](https://github.com/sqlfluff/sqlfluff/pull/3979) [@barrywhart](https://github.com/barrywhart) * Fix type annotation of user_rules in `Linter` [#3977](https://github.com/sqlfluff/sqlfluff/pull/3977) [@Samyak2](https://github.com/Samyak2) * Unpin `markupsafe` [#3967](https://github.com/sqlfluff/sqlfluff/pull/3967) [@judahrand](https://github.com/judahrand) * Snowflake frame clause variables [#3969](https://github.com/sqlfluff/sqlfluff/pull/3969) [@WittierDinosaur](https://github.com/WittierDinosaur) * SparkSQL: added support for : (colon sign) operator (Databricks SQL) [#3956](https://github.com/sqlfluff/sqlfluff/pull/3956) [@karabulute](https://github.com/karabulute) * Athena: Add support for using underscore aliases [#3965](https://github.com/sqlfluff/sqlfluff/pull/3965) [@hectcastro](https://github.com/hectcastro) * Snowflake: ALTER TABLE constraint actions [#3959](https://github.com/sqlfluff/sqlfluff/pull/3959) [@erevear](https://github.com/erevear) * MySQL: Support REPLACE statement [#3964](https://github.com/sqlfluff/sqlfluff/pull/3964) [@yoichi](https://github.com/yoichi) * TSQL: Add support for UPDATE STATISTICS option FULLSCAN [#3950](https://github.com/sqlfluff/sqlfluff/pull/3950) [@hectcastro](https://github.com/hectcastro) * ANSI: fixed typos in docstrings and comments [#3953](https://github.com/sqlfluff/sqlfluff/pull/3953) [@karabulute](https://github.com/karabulute) * Postgres: ALTER PROCEDURE [#3949](https://github.com/sqlfluff/sqlfluff/pull/3949) [@krokofant](https://github.com/krokofant) * T-SQL: Allow arbitrary expressions in PARTITION BY clause [#3939](https://github.com/sqlfluff/sqlfluff/pull/3939) [@borchero](https://github.com/borchero) * Enable dumping of performance information to csv. [#3937](https://github.com/sqlfluff/sqlfluff/pull/3937) [@alanmcruickshank](https://github.com/alanmcruickshank) * Consolidate comma style configs [#3945](https://github.com/sqlfluff/sqlfluff/pull/3945) [@alanmcruickshank](https://github.com/alanmcruickshank) * Adding missing KeywordSegments for different file types in Athena dialect [#3898](https://github.com/sqlfluff/sqlfluff/pull/3898) [@CommonCrisis](https://github.com/CommonCrisis) * Add templated block uuids [#3936](https://github.com/sqlfluff/sqlfluff/pull/3936) [@alanmcruickshank](https://github.com/alanmcruickshank) * Load the full dbt manifest [#3926](https://github.com/sqlfluff/sqlfluff/pull/3926) [@davajm](https://github.com/davajm) * MySQL: Support optional "IF NOT EXISTS" with CREATE TRIGGER [#3943](https://github.com/sqlfluff/sqlfluff/pull/3943) [@yoichi](https://github.com/yoichi) * T-SQL: Allow to parse SYNONYM statements [#3941](https://github.com/sqlfluff/sqlfluff/pull/3941) [@borchero](https://github.com/borchero) * Hive: Add support for LATERAL VIEW clause [#3935](https://github.com/sqlfluff/sqlfluff/pull/3935) [@hectcastro](https://github.com/hectcastro) * Fix crash in L042 on "UNION" or other "set" queries [#3931](https://github.com/sqlfluff/sqlfluff/pull/3931) [@barrywhart](https://github.com/barrywhart) * Refactor Lexer: Split apart elements_to_segments and refine placement of meta segments. [#3925](https://github.com/sqlfluff/sqlfluff/pull/3925) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add INSERT INTO <> DEFAULT VALUES to ANSI SQL and T-SQL [#3934](https://github.com/sqlfluff/sqlfluff/pull/3934) [@borchero](https://github.com/borchero) * Break apart reflow classes and extract methods [#3919](https://github.com/sqlfluff/sqlfluff/pull/3919) [@alanmcruickshank](https://github.com/alanmcruickshank) * T-SQL: Fix indendentation of OUTER APPLY [#3932](https://github.com/sqlfluff/sqlfluff/pull/3932) [@borchero](https://github.com/borchero) * MySQL: Fix create trigger [#3928](https://github.com/sqlfluff/sqlfluff/pull/3928) [@adam-tokarski](https://github.com/adam-tokarski) * SparkSQL: Fixed bug with `QUALIFY` usage without `WHERE` clause (applies also for Databricks dialect) [#3930](https://github.com/sqlfluff/sqlfluff/pull/3930) [@karabulute](https://github.com/karabulute) * T-SQL: Allow specifying join hints [#3921](https://github.com/sqlfluff/sqlfluff/pull/3921) [@borchero](https://github.com/borchero) * SQLite: Add support for table-level CHECK constraint [#3923](https://github.com/sqlfluff/sqlfluff/pull/3923) [@hectcastro](https://github.com/hectcastro) * sparksql: added * EXCEPT for SELECT clause [#3922](https://github.com/sqlfluff/sqlfluff/pull/3922) [@adam-tokarski](https://github.com/adam-tokarski) * Map old configs to new configs [#3915](https://github.com/sqlfluff/sqlfluff/pull/3915) [@alanmcruickshank](https://github.com/alanmcruickshank) * [issue_3794] allow to use 'usage' as identifier for postgres [#3914](https://github.com/sqlfluff/sqlfluff/pull/3914) [@adam-tokarski](https://github.com/adam-tokarski) * `DATABRICKS`: Add Support for Delta Live Tables (DLT) Syntax [#3899](https://github.com/sqlfluff/sqlfluff/pull/3899) [@R7L208](https://github.com/R7L208) * Postgres Revoke fix [#3912](https://github.com/sqlfluff/sqlfluff/pull/3912) [@greg-finley](https://github.com/greg-finley) * fix: Click output to stderr on errors [#3902](https://github.com/sqlfluff/sqlfluff/pull/3902) [@KingMichaelPark](https://github.com/KingMichaelPark) * fix issue with empty enum for postgres [#3910](https://github.com/sqlfluff/sqlfluff/pull/3910) [@adam-tokarski](https://github.com/adam-tokarski) * feat: Optional numerics for postgres arrays [#3903](https://github.com/sqlfluff/sqlfluff/pull/3903) [@KingMichaelPark](https://github.com/KingMichaelPark) * fix(test): Return ParseExample namedtuple in get_parse_fixtures [#3911](https://github.com/sqlfluff/sqlfluff/pull/3911) [@tomasfarias](https://github.com/tomasfarias) * Fix typos [#3901](https://github.com/sqlfluff/sqlfluff/pull/3901) [@kianmeng](https://github.com/kianmeng) * provide custom DeprecatedOption [#3904](https://github.com/sqlfluff/sqlfluff/pull/3904) [@adam-tokarski](https://github.com/adam-tokarski) * fix(redshift): Allow keywords in qualified references [#3905](https://github.com/sqlfluff/sqlfluff/pull/3905) [@tomasfarias](https://github.com/tomasfarias) * Reflow centralisation 2: Rebreak (operators & commas) [#3847](https://github.com/sqlfluff/sqlfluff/pull/3847) [@alanmcruickshank](https://github.com/alanmcruickshank) * Bring L008 into reflow work [#3908](https://github.com/sqlfluff/sqlfluff/pull/3908) [@alanmcruickshank](https://github.com/alanmcruickshank) * Snowflake: Create network policy ip lists [#3888](https://github.com/sqlfluff/sqlfluff/pull/3888) [@greg-finley](https://github.com/greg-finley) * MySQL: Implement (key_part, ...) in index definitions [#3887](https://github.com/sqlfluff/sqlfluff/pull/3887) [@yoichi](https://github.com/yoichi) * Reflow centralisation 1: Scope + Respace [#3824](https://github.com/sqlfluff/sqlfluff/pull/3824) [@alanmcruickshank](https://github.com/alanmcruickshank) * Update github badge and add docker badge [#3884](https://github.com/sqlfluff/sqlfluff/pull/3884) [@alanmcruickshank](https://github.com/alanmcruickshank) ## New Contributors * [@kianmeng](https://github.com/kianmeng) made their first contribution in [#3901](https://github.com/sqlfluff/sqlfluff/pull/3901) * [@KingMichaelPark](https://github.com/KingMichaelPark) made their first contribution in [#3903](https://github.com/sqlfluff/sqlfluff/pull/3903) * [@hectcastro](https://github.com/hectcastro) made their first contribution in [#3923](https://github.com/sqlfluff/sqlfluff/pull/3923) * [@karabulute](https://github.com/karabulute) made their first contribution in [#3930](https://github.com/sqlfluff/sqlfluff/pull/3930) * [@davajm](https://github.com/davajm) made their first contribution in [#3926](https://github.com/sqlfluff/sqlfluff/pull/3926) * [@CommonCrisis](https://github.com/CommonCrisis) made their first contribution in [#3898](https://github.com/sqlfluff/sqlfluff/pull/3898) * [@krokofant](https://github.com/krokofant) made their first contribution in [#3949](https://github.com/sqlfluff/sqlfluff/pull/3949) * [@Samyak2](https://github.com/Samyak2) made their first contribution in [#3977](https://github.com/sqlfluff/sqlfluff/pull/3977) * [@realLyans](https://github.com/realLyans) made their first contribution in [#3984](https://github.com/sqlfluff/sqlfluff/pull/3984) * [@nahuelverdugo](https://github.com/nahuelverdugo) made their first contribution in [#3991](https://github.com/sqlfluff/sqlfluff/pull/3991) * [@YilangHe](https://github.com/YilangHe) made their first contribution in [#3972](https://github.com/sqlfluff/sqlfluff/pull/3972) ## [1.3.2] - 2022-09-27 ## Highlights This is primarily a release for dialect fixes and improvements with additions and changes to TSQL, Snowflake, MySQL & Redshift. The other changes of note are: 1. Support for warnings when users set old removed config values. This supports future change work by allowing a mechanism to warn if they are used. 2. Improvements to the fix routines for L014 and L042 to handle some trickier cases. ## What’s Changed * Add CreateUserSegment for Snowflake dialect [#3880](https://github.com/sqlfluff/sqlfluff/pull/3880) [@Gal40n04ek](https://github.com/Gal40n04ek) * raw_segments_with_ancestors [#3878](https://github.com/sqlfluff/sqlfluff/pull/3878) [@alanmcruickshank](https://github.com/alanmcruickshank) * Adjust TSQL Operators [#3877](https://github.com/sqlfluff/sqlfluff/pull/3877) [@alanmcruickshank](https://github.com/alanmcruickshank) * Refactor path_to [#3875](https://github.com/sqlfluff/sqlfluff/pull/3875) [@alanmcruickshank](https://github.com/alanmcruickshank) * Support Removed warning on configs [#3874](https://github.com/sqlfluff/sqlfluff/pull/3874) [@alanmcruickshank](https://github.com/alanmcruickshank) * MySQL: Support column-path operator for JSON type [#3864](https://github.com/sqlfluff/sqlfluff/pull/3864) [@yoichi](https://github.com/yoichi) * T-SQL: ALTER FUNCTION/PROCEDURE/VIEW parsing [#3867](https://github.com/sqlfluff/sqlfluff/pull/3867) [@differgroup](https://github.com/differgroup) * MySQL: Support hexadecimal literals and bit value literals [#3869](https://github.com/sqlfluff/sqlfluff/pull/3869) [@yoichi](https://github.com/yoichi) * MySQL: Treat double quotes the same as single quotes [#3871](https://github.com/sqlfluff/sqlfluff/pull/3871) [@yoichi](https://github.com/yoichi) * Snowflake: COMMIT WORK [#3862](https://github.com/sqlfluff/sqlfluff/pull/3862) [@rglbr](https://github.com/rglbr) * Snowflake: AlterShareStatementSegment and CreateDatabaseFromShareStatementSegment [#3858](https://github.com/sqlfluff/sqlfluff/pull/3858) [@moreaupascal56](https://github.com/moreaupascal56) * MySQL: Add CREATE/ALTER VIEW [#3859](https://github.com/sqlfluff/sqlfluff/pull/3859) [@wfelipew](https://github.com/wfelipew) * Redshift: CREATE TABLE LIKE enhancements [#3853](https://github.com/sqlfluff/sqlfluff/pull/3853) [@greg-finley](https://github.com/greg-finley) * L014 leading underscore capitalization inference fix [#3841](https://github.com/sqlfluff/sqlfluff/pull/3841) [@j-svensmark](https://github.com/j-svensmark) * MySQL: Add extended DROP TRIGGER functionality [#3846](https://github.com/sqlfluff/sqlfluff/pull/3846) [@yoichi](https://github.com/yoichi) * Allow standalone aliases in L027 [#3848](https://github.com/sqlfluff/sqlfluff/pull/3848) [@olagjo](https://github.com/olagjo) * L042: Enable autofix for some tricky cases [#3700](https://github.com/sqlfluff/sqlfluff/pull/3700) [@barrywhart](https://github.com/barrywhart) * Snowflake: CREATE FUNCTION IF NOT EXISTS [#3845](https://github.com/sqlfluff/sqlfluff/pull/3845) [@greg-finley](https://github.com/greg-finley) * ignore functions with more than one element ... [#3792](https://github.com/sqlfluff/sqlfluff/pull/3792) [@fmms](https://github.com/fmms) * MySQL: support remaining constraint parts of CREATE/ALTER TABLE [#3827](https://github.com/sqlfluff/sqlfluff/pull/3827) [@yoichi](https://github.com/yoichi) ## New Contributors * [@olagjo](https://github.com/olagjo) made their first contribution in [#3848](https://github.com/sqlfluff/sqlfluff/pull/3848) * [@j-svensmark](https://github.com/j-svensmark) made their first contribution in [#3841](https://github.com/sqlfluff/sqlfluff/pull/3841) * [@wfelipew](https://github.com/wfelipew) made their first contribution in [#3859](https://github.com/sqlfluff/sqlfluff/pull/3859) * [@moreaupascal56](https://github.com/moreaupascal56) made their first contribution in [#3858](https://github.com/sqlfluff/sqlfluff/pull/3858) * [@rglbr](https://github.com/rglbr) made their first contribution in [#3862](https://github.com/sqlfluff/sqlfluff/pull/3862) * [@differgroup](https://github.com/differgroup) made their first contribution in [#3867](https://github.com/sqlfluff/sqlfluff/pull/3867) ## [1.3.1] - 2022-09-09 ## Highlights * More refactoring of parse structures in preparation for upcoming refactor of formatting/whitespace rules. * Fixes some bugs in L003 (indentation). * New config flag `large_file_skip_byte_limit` which applies **prior to** loading the file. ## What’s Changed * Snowflake: Fix syntax errors in tests [#3834](https://github.com/sqlfluff/sqlfluff/pull/3834) [@JamesRTaylor](https://github.com/JamesRTaylor) * Add support for additional magic methods on DummyUndefined [#3835](https://github.com/sqlfluff/sqlfluff/pull/3835) [@barrywhart](https://github.com/barrywhart) * MySQL: support variable assignments by assignment operator := [#3829](https://github.com/sqlfluff/sqlfluff/pull/3829) [@yoichi](https://github.com/yoichi) * MYSQL: improve lexing for single-quoted strings [#3831](https://github.com/sqlfluff/sqlfluff/pull/3831) [@mdahlman](https://github.com/mdahlman) * MySQL: More support for index definition in CREATE TABLE [#3826](https://github.com/sqlfluff/sqlfluff/pull/3826) [@yoichi](https://github.com/yoichi) * Typed matching and ripping out the rest of .name [#3819](https://github.com/sqlfluff/sqlfluff/pull/3819) [@alanmcruickshank](https://github.com/alanmcruickshank) * sparksql dialect to support lambda expressions (->) [#3821](https://github.com/sqlfluff/sqlfluff/pull/3821) [@juhoautio](https://github.com/juhoautio) * Fixed broken main branch [#3825](https://github.com/sqlfluff/sqlfluff/pull/3825) [@alanmcruickshank](https://github.com/alanmcruickshank) * Enable file name logging for multi-files w/ --show-lint-violations flag [#3788](https://github.com/sqlfluff/sqlfluff/pull/3788) [@thechopkins](https://github.com/thechopkins) * Take database and schema out of Snowflake reserved keywords list [#3818](https://github.com/sqlfluff/sqlfluff/pull/3818) [@NiallRees](https://github.com/NiallRees) * Remove a chunk of name references [#3814](https://github.com/sqlfluff/sqlfluff/pull/3814) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix typo in Snowflake dialect [#3813](https://github.com/sqlfluff/sqlfluff/pull/3813) [@Gal40n04ek](https://github.com/Gal40n04ek) * Allow the use of libraries in macro definitions [#3803](https://github.com/sqlfluff/sqlfluff/pull/3803) [@bjgbeelen](https://github.com/bjgbeelen) * Indentation fixes and rule logging improvements [#3808](https://github.com/sqlfluff/sqlfluff/pull/3808) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fixes a recursion error in JinjaTemplater handling of undefined values [#3809](https://github.com/sqlfluff/sqlfluff/pull/3809) [@barrywhart](https://github.com/barrywhart) * Snowflake: extend `GRANT` syntax [#3807](https://github.com/sqlfluff/sqlfluff/pull/3807) [@Gal40n04ek](https://github.com/Gal40n04ek) * add warehouse_type in snowflake dialect [#3805](https://github.com/sqlfluff/sqlfluff/pull/3805) [@Gal40n04ek](https://github.com/Gal40n04ek) * add Create Notification Integration syntax [#3801](https://github.com/sqlfluff/sqlfluff/pull/3801) [@Gal40n04ek](https://github.com/Gal40n04ek) * T-SQL: fix parsing PARTITION BY NULL in window function [#3790](https://github.com/sqlfluff/sqlfluff/pull/3790) [@fmms](https://github.com/fmms) * SparkSQL: Update L014 rule to not flag Delta Change Data Feed Session & Table Property [#3689](https://github.com/sqlfluff/sqlfluff/pull/3689) [@R7L208](https://github.com/R7L208) * Snowflake: OVER (ORDER BY) clause required for first_value (fixes #3797) [#3798](https://github.com/sqlfluff/sqlfluff/pull/3798) [@JamesRTaylor](https://github.com/JamesRTaylor) * add Alter Pipe syntax for snowflake dialect [#3796](https://github.com/sqlfluff/sqlfluff/pull/3796) [@Gal40n04ek](https://github.com/Gal40n04ek) * BigQuery: Parse WEEK() in date_part [#3787](https://github.com/sqlfluff/sqlfluff/pull/3787) [@yoichi](https://github.com/yoichi) * Postgres: Support setting user properties using intrinsic ON & OFF values [#3793](https://github.com/sqlfluff/sqlfluff/pull/3793) [@chris-codaio](https://github.com/chris-codaio) * extend SF dialect for File Format statements [#3774](https://github.com/sqlfluff/sqlfluff/pull/3774) [@Gal40n04ek](https://github.com/Gal40n04ek) * Add QUALIFY to SparkSQL dialect [#3778](https://github.com/sqlfluff/sqlfluff/pull/3778) [@ThijsKoot](https://github.com/ThijsKoot) * fix regex for S3Path [#3782](https://github.com/sqlfluff/sqlfluff/pull/3782) [@Gal40n04ek](https://github.com/Gal40n04ek) * Snowflake: add Optional parameter ERROR INTEGRATION for PIPE [#3785](https://github.com/sqlfluff/sqlfluff/pull/3785) [@Gal40n04ek](https://github.com/Gal40n04ek) * Add a file size check in bytes [#3770](https://github.com/sqlfluff/sqlfluff/pull/3770) [@alanmcruickshank](https://github.com/alanmcruickshank) * Require importlib_metadata >=1.0.0 [#3769](https://github.com/sqlfluff/sqlfluff/pull/3769) [@alanmcruickshank](https://github.com/alanmcruickshank) ## New Contributors * [@Gal40n04ek](https://github.com/Gal40n04ek) made their first contribution in [#3785](https://github.com/sqlfluff/sqlfluff/pull/3785) * [@ThijsKoot](https://github.com/ThijsKoot) made their first contribution in [#3778](https://github.com/sqlfluff/sqlfluff/pull/3778) * [@chris-codaio](https://github.com/chris-codaio) made their first contribution in [#3793](https://github.com/sqlfluff/sqlfluff/pull/3793) * [@JamesRTaylor](https://github.com/JamesRTaylor) made their first contribution in [#3798](https://github.com/sqlfluff/sqlfluff/pull/3798) * [@fmms](https://github.com/fmms) made their first contribution in [#3790](https://github.com/sqlfluff/sqlfluff/pull/3790) * [@bjgbeelen](https://github.com/bjgbeelen) made their first contribution in [#3803](https://github.com/sqlfluff/sqlfluff/pull/3803) * [@thechopkins](https://github.com/thechopkins) made their first contribution in [#3788](https://github.com/sqlfluff/sqlfluff/pull/3788) ## [1.3.0] - 2022-08-21 ## Highlights This release brings several potentially breaking changes to the underlying parse tree. For users of the cli tool in a linting context you should notice no change. If however your application relies on the structure of the SQLFluff parse tree or the naming of certain elements within the yaml format, then this may not be a drop-in replacement. Specifically: - The addition of a new `end_of_file` meta segment at the end of the parse structure. - The addition of a `template_loop` meta segment to signify a jump backward in the source file within a loop structure (e.g. a jinja for loop). - Much more specific types on some raw segments, in particular `identifier` and `literal` type segments will now appear in the parse tree with their more specific type (which used to be called `name`) e.g. `naked_identifier`, `quoted_identifier`, `numeric_literal` etc... If using the python api, the _parent_ type (such as `identifier`) will still register if you call `.is_type("identifier")`, as this function checks all inherited types. However the eventual type returned by `.get_type()` will now be (in most cases) what used to be accessible at `.name`. The `name` attribute will be deprecated in a future release. Other highlights: * New command-line option `--show-lint-violations` to show details on unfixable errors when running `sqlfluff fix`. * Improved consistency of process exit codes. * Short CLI options for many common options. * Jinja templater: When `--ignore=templating` is enabled, undefined Jinja variables now take on "reasonable" default values rather than blank string (`""`). This can streamline initial rollout of SQLFluff by reducing or eliminating the need to configure templater variables. There are also a _ton_ of other features and bug fixes in this release, including first-time contributions from **11 new contributors**! 🎉 ## What’s Changed * T-SQL: ALTER TABLE DROP COLUMN [#3749](https://github.com/sqlfluff/sqlfluff/pull/3749) [@greg-finley](https://github.com/greg-finley) * Add "# pragma: no cover" to work around sporadic, spurious coverage failure [#3767](https://github.com/sqlfluff/sqlfluff/pull/3767) [@barrywhart](https://github.com/barrywhart) * Add end_of_file and template_loop markers [#3766](https://github.com/sqlfluff/sqlfluff/pull/3766) [@alanmcruickshank](https://github.com/alanmcruickshank) * Provide usage examples for new users [#3765](https://github.com/sqlfluff/sqlfluff/pull/3765) [@sirlark](https://github.com/sirlark) * SQLite: deferrable in create table statement [#3757](https://github.com/sqlfluff/sqlfluff/pull/3757) [@RossOkuno](https://github.com/RossOkuno) * When ignore=templating and fix_even_unparsable=True, provide defaults for missing vars [#3753](https://github.com/sqlfluff/sqlfluff/pull/3753) [@barrywhart](https://github.com/barrywhart) * BigQuery: Support Materialized Views [#3759](https://github.com/sqlfluff/sqlfluff/pull/3759) [@yoichi](https://github.com/yoichi) * Enhance L062 to ignore blocked words in comments [#3754](https://github.com/sqlfluff/sqlfluff/pull/3754) [@tunetheweb](https://github.com/tunetheweb) * Fix bug where undefined Jinja variable in macro file crashes linter [#3751](https://github.com/sqlfluff/sqlfluff/pull/3751) [@barrywhart](https://github.com/barrywhart) * Migrate analysis, functional and testing to utils [#3743](https://github.com/sqlfluff/sqlfluff/pull/3743) [@alanmcruickshank](https://github.com/alanmcruickshank) * Build out rule crawling mechanisms [#3717](https://github.com/sqlfluff/sqlfluff/pull/3717) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add current_timestamp to Redshift as a bare function [#3741](https://github.com/sqlfluff/sqlfluff/pull/3741) [@RossOkuno](https://github.com/RossOkuno) * BigQuery: Fix parsing parameterized data types [#3735](https://github.com/sqlfluff/sqlfluff/pull/3735) [@yoichi](https://github.com/yoichi) * Change MySQL Create Statement Equals Segment to Optional [#3730](https://github.com/sqlfluff/sqlfluff/pull/3730) [@keyem4251](https://github.com/keyem4251) * SQLite: add parsing of INSERT statement [#3734](https://github.com/sqlfluff/sqlfluff/pull/3734) [@imrehg](https://github.com/imrehg) * SPARKSQL: Support Delta Lake Drop Column Clause in `ALTER TABLE` [#3727](https://github.com/sqlfluff/sqlfluff/pull/3727) [@R7L208](https://github.com/R7L208) * Add short versions of several cli options [#3732](https://github.com/sqlfluff/sqlfluff/pull/3732) [@alanmcruickshank](https://github.com/alanmcruickshank) * Build out type hints in Grammars [#3718](https://github.com/sqlfluff/sqlfluff/pull/3718) [@alanmcruickshank](https://github.com/alanmcruickshank) * dbt 1.3.0 compatibility [#3708](https://github.com/sqlfluff/sqlfluff/pull/3708) [@edgarrmondragon](https://github.com/edgarrmondragon) * Revise no cover direction and remove unused code. [#3723](https://github.com/sqlfluff/sqlfluff/pull/3723) [@alanmcruickshank](https://github.com/alanmcruickshank) * Update broken flattr link [#3720](https://github.com/sqlfluff/sqlfluff/pull/3720) [@alanmcruickshank](https://github.com/alanmcruickshank) * BigQuery: remove `key` from unreserved keywords list [#3719](https://github.com/sqlfluff/sqlfluff/pull/3719) [@sabrikaragonen](https://github.com/sabrikaragonen) * Bigquery reset primary and foreign keys [#3714](https://github.com/sqlfluff/sqlfluff/pull/3714) [@sabrikaragonen](https://github.com/sabrikaragonen) * Name Deprecation (Part 1) [#3701](https://github.com/sqlfluff/sqlfluff/pull/3701) [@alanmcruickshank](https://github.com/alanmcruickshank) * Teradata: Add two TdTableConstraints [#3690](https://github.com/sqlfluff/sqlfluff/pull/3690) [@greg-finley](https://github.com/greg-finley) * Redshift: support expressions in array accessors [#3706](https://github.com/sqlfluff/sqlfluff/pull/3706) [@chronitis](https://github.com/chronitis) * Handle logging issues at teardown [#3703](https://github.com/sqlfluff/sqlfluff/pull/3703) [@alanmcruickshank](https://github.com/alanmcruickshank) * L028, L032: Fix bug where fixes were copying templated table names [#3699](https://github.com/sqlfluff/sqlfluff/pull/3699) [@barrywhart](https://github.com/barrywhart) * L042: Autofix sometimes results in "fix looping", hitting the linter "loop limit" [#3697](https://github.com/sqlfluff/sqlfluff/pull/3697) [@barrywhart](https://github.com/barrywhart) * L042: Address corner cases where fix corrupts the SQL [#3694](https://github.com/sqlfluff/sqlfluff/pull/3694) [@barrywhart](https://github.com/barrywhart) * T-SQL: Properly parse collation names [#3686](https://github.com/sqlfluff/sqlfluff/pull/3686) [@borchero](https://github.com/borchero) * Allow escaping single quotes in single-quoted literal with '' [#3682](https://github.com/sqlfluff/sqlfluff/pull/3682) [@pdebelak](https://github.com/pdebelak) * T-SQL: Fix indentation after JOIN/APPLY clauses with no ON statement [#3684](https://github.com/sqlfluff/sqlfluff/pull/3684) [@borchero](https://github.com/borchero) * T-SQL: Parse `DATEPART` date type as date type instead of column name [#3681](https://github.com/sqlfluff/sqlfluff/pull/3681) [@borchero](https://github.com/borchero) * T-SQL: Allow `COLLATE` clause in `JOIN` conditions [#3680](https://github.com/sqlfluff/sqlfluff/pull/3680) [@borchero](https://github.com/borchero) * T-SQL: Fix parsing of CREATE VIEW statements with column name syntax [#3669](https://github.com/sqlfluff/sqlfluff/pull/3669) [@borchero](https://github.com/borchero) * Fix typo in github issue template [#3674](https://github.com/sqlfluff/sqlfluff/pull/3674) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add Athena issue label [#3676](https://github.com/sqlfluff/sqlfluff/pull/3676) [@greg-finley](https://github.com/greg-finley) * Set issue dialect labels via Github Actions [#3666](https://github.com/sqlfluff/sqlfluff/pull/3666) [@greg-finley](https://github.com/greg-finley) * Allow configuration of processes from config [#3662](https://github.com/sqlfluff/sqlfluff/pull/3662) [@alanmcruickshank](https://github.com/alanmcruickshank) * Reposition before recursion in fixes to avoid internal error [#3658](https://github.com/sqlfluff/sqlfluff/pull/3658) [@alanmcruickshank](https://github.com/alanmcruickshank) * Use UUIDs for matching [#3661](https://github.com/sqlfluff/sqlfluff/pull/3661) [@alanmcruickshank](https://github.com/alanmcruickshank) * Postgres: Add dialect-specific bare functions [#3660](https://github.com/sqlfluff/sqlfluff/pull/3660) [@WittierDinosaur](https://github.com/WittierDinosaur) * Postgres: Add `CALL` Support [#3659](https://github.com/sqlfluff/sqlfluff/pull/3659) [@WittierDinosaur](https://github.com/WittierDinosaur) * ANSI - Add support for `INTERSECT ALL` and `EXCEPT ALL` [#3657](https://github.com/sqlfluff/sqlfluff/pull/3657) [@WittierDinosaur](https://github.com/WittierDinosaur) * Option to show errors on fix [#3610](https://github.com/sqlfluff/sqlfluff/pull/3610) [@chaimt](https://github.com/chaimt) * L042: Fix internal error "Attempted to make a parent marker from multiple files" [#3655](https://github.com/sqlfluff/sqlfluff/pull/3655) [@barrywhart](https://github.com/barrywhart) * L026: Add support for `merge_statement` [#3654](https://github.com/sqlfluff/sqlfluff/pull/3654) [@barrywhart](https://github.com/barrywhart) * Add handling for Redshift `CONVERT` function data type argument [#3653](https://github.com/sqlfluff/sqlfluff/pull/3653) [@pdebelak](https://github.com/pdebelak) * Deduplicate files before and during templating [#3629](https://github.com/sqlfluff/sqlfluff/pull/3629) [@alanmcruickshank](https://github.com/alanmcruickshank) * Rationalise Rule Imports [#3631](https://github.com/sqlfluff/sqlfluff/pull/3631) [@alanmcruickshank](https://github.com/alanmcruickshank) * Handle Jinja `{% call ... %}` blocks [#3648](https://github.com/sqlfluff/sqlfluff/pull/3648) [@barrywhart](https://github.com/barrywhart) * SPARKSQL: Add Delta Lake Constraints syntax to `ALTER TABLE` [#3643](https://github.com/sqlfluff/sqlfluff/pull/3643) [@R7L208](https://github.com/R7L208) * Redshift: syntax for array unnesting with index [#3646](https://github.com/sqlfluff/sqlfluff/pull/3646) [@chronitis](https://github.com/chronitis) * Snowflake - `ALTER TABLE IF EXISTS` and `WHEN SYSTEM$STREAM_HAS_DATA()` [#3641](https://github.com/sqlfluff/sqlfluff/pull/3641) [@chrisalexeev](https://github.com/chrisalexeev) * L057: In BigQuery, allow hyphens by default [#3645](https://github.com/sqlfluff/sqlfluff/pull/3645) [@barrywhart](https://github.com/barrywhart) * Better messages for partial indentation in L003 [#3634](https://github.com/sqlfluff/sqlfluff/pull/3634) [@pdebelak](https://github.com/pdebelak) * Add `INTEGER` to `PrimitiveTypeSegment` for Sparksql [#3624](https://github.com/sqlfluff/sqlfluff/pull/3624) [@ciwassano](https://github.com/ciwassano) * Bump version in gettingstarted.rst via the release script [#3642](https://github.com/sqlfluff/sqlfluff/pull/3642) [@greg-finley](https://github.com/greg-finley) * Improve handling of BigQuery hyphenated table names [#3638](https://github.com/sqlfluff/sqlfluff/pull/3638) [@barrywhart](https://github.com/barrywhart) * update sqlfluff version in gettingstareted.rst [#3639](https://github.com/sqlfluff/sqlfluff/pull/3639) [@keyem4251](https://github.com/keyem4251) * L016: Ignore jinja comments if `ignore_comment_clauses=True` [#3637](https://github.com/sqlfluff/sqlfluff/pull/3637) [@barrywhart](https://github.com/barrywhart) * Add errors for redundant definitions. [#3626](https://github.com/sqlfluff/sqlfluff/pull/3626) [@alanmcruickshank](https://github.com/alanmcruickshank) * Object Literals [#3620](https://github.com/sqlfluff/sqlfluff/pull/3620) [@alanmcruickshank](https://github.com/alanmcruickshank) * Dialect Crumbs [#3625](https://github.com/sqlfluff/sqlfluff/pull/3625) [@alanmcruickshank](https://github.com/alanmcruickshank) * Consistent return codes [#3608](https://github.com/sqlfluff/sqlfluff/pull/3608) [@alanmcruickshank](https://github.com/alanmcruickshank) ## New Contributors * [@keyem4251](https://github.com/keyem4251) made their first contribution in [#3639](https://github.com/sqlfluff/sqlfluff/pull/3639) * [@ciwassano](https://github.com/ciwassano) made their first contribution in [#3624](https://github.com/sqlfluff/sqlfluff/pull/3624) * [@chronitis](https://github.com/chronitis) made their first contribution in [#3646](https://github.com/sqlfluff/sqlfluff/pull/3646) * [@chaimt](https://github.com/chaimt) made their first contribution in [#3610](https://github.com/sqlfluff/sqlfluff/pull/3610) * [@borchero](https://github.com/borchero) made their first contribution in [#3669](https://github.com/sqlfluff/sqlfluff/pull/3669) * [@sabrikaragonen](https://github.com/sabrikaragonen) made their first contribution in [#3714](https://github.com/sqlfluff/sqlfluff/pull/3714) * [@edgarrmondragon](https://github.com/edgarrmondragon) made their first contribution in [#3708](https://github.com/sqlfluff/sqlfluff/pull/3708) * [@imrehg](https://github.com/imrehg) made their first contribution in [#3734](https://github.com/sqlfluff/sqlfluff/pull/3734) * [@yoichi](https://github.com/yoichi) made their first contribution in [#3735](https://github.com/sqlfluff/sqlfluff/pull/3735) * [@RossOkuno](https://github.com/RossOkuno) made their first contribution in [#3741](https://github.com/sqlfluff/sqlfluff/pull/3741) * [@sirlark](https://github.com/sirlark) made their first contribution in [#3765](https://github.com/sqlfluff/sqlfluff/pull/3765) ## [1.2.1] - 2022-07-15 ## Highlights This is primarily a bugfix release to resolve an issue with the 1.2.0 release where the new version of `sqlfluff-templater-dbt` relied on functionality from the new version of `sqlfluff` but the package configuration had not been updated. Versions of the two packages are now pinned together. ## What’s Changed * Pin sqlfluff-templater-dbt via release script [#3613](https://github.com/sqlfluff/sqlfluff/pull/3613) [@greg-finley](https://github.com/greg-finley) * Specifying comma delimited is unnecessary [#3616](https://github.com/sqlfluff/sqlfluff/pull/3616) [@alanmcruickshank](https://github.com/alanmcruickshank) * Handle redshift temporary tables with # at the beginning of name [#3615](https://github.com/sqlfluff/sqlfluff/pull/3615) [@pdebelak](https://github.com/pdebelak) ## [1.2.0] - 2022-07-13 ## Highlights Major changes include: * Adding AWS Athena as a dialect. * A fix routine for L046 (whitespace in jinja tags), and the mechanisms for more source-only fixes in future. * By default, large files (over 20k characters) are now skipped by sqlfluff. This limit is configurable and disable-able but exists as a sensible default to avoid the performance overhead of linting *very* large files. * For the dbt templater, fatal compilation errors now no longer stop linting, and these files are now skipped instead. This enables projects to continue linting beyond the offending file and much better logging information to enable better debugging. ## What’s Changed * Improve documentation for custom implemented rules [#3604](https://github.com/sqlfluff/sqlfluff/pull/3603) [@Aditya-Tripuraneni](https://github.com/Aditya-Tripuraneni) * Add a skip and better logging for fatal dbt issues [#3603](https://github.com/sqlfluff/sqlfluff/pull/3603) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add large file check [#3600](https://github.com/sqlfluff/sqlfluff/pull/3600) [@alanmcruickshank](https://github.com/alanmcruickshank) * Oracle: extend support for `ALTER TABLE` [#3596](https://github.com/sqlfluff/sqlfluff/pull/3596) [@davidfuhr](https://github.com/davidfuhr) * Immutability fixes [#3428](https://github.com/sqlfluff/sqlfluff/pull/3428) [@alanmcruickshank](https://github.com/alanmcruickshank) * Struct type should be a segment [#3591](https://github.com/sqlfluff/sqlfluff/pull/3591) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix Bracketed Struct issue [#3590](https://github.com/sqlfluff/sqlfluff/pull/3590) [@alanmcruickshank](https://github.com/alanmcruickshank) * Allow spaces and multiple signs for numeric literals [#3581](https://github.com/sqlfluff/sqlfluff/pull/3581) [@tunetheweb](https://github.com/tunetheweb) * Add source fixing capability and fix routines for L046 [#3578](https://github.com/sqlfluff/sqlfluff/pull/3578) [@alanmcruickshank](https://github.com/alanmcruickshank) * Small grammar cleanup in team rollout docs [#3584](https://github.com/sqlfluff/sqlfluff/pull/3584) [@theianrobertson](https://github.com/theianrobertson) * Postgres: `CREATE COLLATION` support [#3571](https://github.com/sqlfluff/sqlfluff/pull/3571) [@greg-finley](https://github.com/greg-finley) * Redshift: Add `TOP X` to select clause modifiers [#3582](https://github.com/sqlfluff/sqlfluff/pull/3582) [@pdebelak](https://github.com/pdebelak) * Postgres: Small fixes to `COMMENT ON` [#3566](https://github.com/sqlfluff/sqlfluff/pull/3566) [@greg-finley](https://github.com/greg-finley) * Support MySQL system variables [#3576](https://github.com/sqlfluff/sqlfluff/pull/3576) [@qgallet](https://github.com/qgallet) * Allow no alias for selects in CTEs with a column list [#3580](https://github.com/sqlfluff/sqlfluff/pull/3580) [@pdebelak](https://github.com/pdebelak) * New dialect AWS Athena [#3551](https://github.com/sqlfluff/sqlfluff/pull/3551) [@cmotta](https://github.com/cmotta) * Split apart `fix_string()`. [#3568](https://github.com/sqlfluff/sqlfluff/pull/3568) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix incorrect L022 with postgres dialect with CTE argument list [#3570](https://github.com/sqlfluff/sqlfluff/pull/3570) [@pdebelak](https://github.com/pdebelak) * Simplify lint fixing (prep for source fixes) [#3567](https://github.com/sqlfluff/sqlfluff/pull/3567) [@alanmcruickshank](https://github.com/alanmcruickshank) * Exclude .coverage.py from linting [#3564](https://github.com/sqlfluff/sqlfluff/pull/3564) [@zidder](https://github.com/zidder) * L016: `ignore_comment_clauses` not working for postgres dialect [#3549](https://github.com/sqlfluff/sqlfluff/pull/3549) [@barrywhart](https://github.com/barrywhart) * Groundwork for a fix routine for L046 [#3552](https://github.com/sqlfluff/sqlfluff/pull/3552) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add better handling for SQLFluffUserError when running core cli commands [#3431](https://github.com/sqlfluff/sqlfluff/pull/3431) [@alanmcruickshank](https://github.com/alanmcruickshank) ## New Contributors * [@pdebelak](https://github.com/pdebelak) made their first contribution in [#3570](https://github.com/sqlfluff/sqlfluff/pull/3570) * [@cmotta](https://github.com/cmotta) made their first contribution in [#3551](https://github.com/sqlfluff/sqlfluff/pull/3551) * [@qgallet](https://github.com/qgallet) made their first contribution in [#3576](https://github.com/sqlfluff/sqlfluff/pull/3576) * [@theianrobertson](https://github.com/theianrobertson) made their first contribution in [#3584](https://github.com/sqlfluff/sqlfluff/pull/3584) * [@davidfuhr](https://github.com/davidfuhr) made their first contribution in [#3596](https://github.com/sqlfluff/sqlfluff/pull/3596) * [@Aditya-Tripuraneni](https://github.com/Aditya-Tripuraneni) made their first contribution in [#3604](https://github.com/sqlfluff/sqlfluff/pull/3596) ## [1.1.0] - 2022-07-03 ## Highlights Major changes include: * L066 - New rule to allow you to set min/max length requirements for aliases to ensure they are meaningful * L062 - addition of `blocked_regex` as well as `blocked_words` * L025 - fix several corner cases where aliases were removed inappropriately * L059 is now disabled by default for Postgres * Many more dialect improvements and bug fixes. ## Highlights ## What’s Changed * L025: Derived query requires alias -- also handle UNION, etc. [#3548](https://github.com/sqlfluff/sqlfluff/pull/3548) [@barrywhart](https://github.com/barrywhart) * L025 should not remove aliases from derived queries [#3546](https://github.com/sqlfluff/sqlfluff/pull/3546) [@barrywhart](https://github.com/barrywhart) * T-SQL keyword functions should be treated as keywords [#3540](https://github.com/sqlfluff/sqlfluff/pull/3540) [@tunetheweb](https://github.com/tunetheweb) * Fix issue where "--nocolor" is ignored [#3536)(https://github.com/sqlfluff/sqlfluff/pull/3536) [@barrywhart](https://github.com/barrywhart) * Clickhouse: allow `FINAL` modifier [#3534](https://github.com/sqlfluff/sqlfluff/pull/3534) [@ThomAub](https://github.com/ThomAub) * L018 change to just check for newlines rather than alignment [#3499](https://github.com/sqlfluff/sqlfluff/pull/3499) [@zidder](https://github.com/zidder) * SparkSQL: Update terminator grammar for `HAVING`, `WHERE`, `GROUP BY` [#3526](https://github.com/sqlfluff/sqlfluff/pull/3526) [@R7L208](https://github.com/R7L208) * Fix L025 false positive for T-SQL `VALUES` clause [#3533](https://github.com/sqlfluff/sqlfluff/pull/3533) [@barrywhart](https://github.com/barrywhart) * New rule L066 for enforcing table alias lengths [#3384](https://github.com/sqlfluff/sqlfluff/pull/3384) [@f0rk](https://github.com/f0rk) * Redshift: `CALL` statement [#3529](https://github.com/sqlfluff/sqlfluff/pull/3529) [@greg-finley](https://github.com/greg-finley) * Core: Compile regexes at init time to avoid overhead [#3511](https://github.com/sqlfluff/sqlfluff/pull/3511) [@judahrand](https://github.com/judahrand) * Disable L059 by default for Postgres [#3528](https://github.com/sqlfluff/sqlfluff/pull/3528) [@tunetheweb](https://github.com/tunetheweb) * Core: Add `MultiStringParser` to match a collection of strings [#3510](https://github.com/sqlfluff/sqlfluff/pull/3510) [@judahrand](https://github.com/judahrand) * SQLite: `PRIMARY KEY AUTOINCREMENT` [#3527](https://github.com/sqlfluff/sqlfluff/pull/3527) [@greg-finley](https://github.com/greg-finley) * MySQL: Support `LOAD DATA` [#3518](https://github.com/sqlfluff/sqlfluff/pull/3518) [@greg-finley](https://github.com/greg-finley) * Redshift: `GRANT EXECUTE ON PROCEDURES` [#3516](https://github.com/sqlfluff/sqlfluff/pull/3516) [@greg-finley](https://github.com/greg-finley) * Allow `DEFAULT` expression in Redshift `ALTER TABLE ADD COLUMN` statements [#3513](https://github.com/sqlfluff/sqlfluff/pull/3513) [@menzenski](https://github.com/menzenski) * BigQuery: Fix parsing of Array creation from full subquery [#3502](https://github.com/sqlfluff/sqlfluff/pull/3502) [@judahrand](https://github.com/judahrand) * SparkSQL: Allow dateparts as table aliases [#3500](https://github.com/sqlfluff/sqlfluff/pull/3500) [@R7L208](https://github.com/R7L208) * Fix `load_macros_from_path` to actually support multiple paths [#3488](https://github.com/sqlfluff/sqlfluff/pull/3488) [@emancu](https://github.com/emancu) * Allow linter to apply fixes spanning more than 2 slices [#3492](https://github.com/sqlfluff/sqlfluff/pull/3492) [@barrywhart](https://github.com/barrywhart) * Fix L022 false positive when the CTE definition has a column list [#3490](https://github.com/sqlfluff/sqlfluff/pull/3490) [@barrywhart](https://github.com/barrywhart) * SparkSQL: Support for Delta `RESTORE` statement [#3486](https://github.com/sqlfluff/sqlfluff/pull/3486) [@R7L208](https://github.com/R7L208) * Add values function to `SET` clause [#3483](https://github.com/sqlfluff/sqlfluff/pull/3483) [@hgranthorner](https://github.com/hgranthorner) * SparkSQL: Support for `CONVERT TO DELTA` command [#3482](https://github.com/sqlfluff/sqlfluff/pull/3482) [@R7L208](https://github.com/R7L208) * BigQuery: Remaining procedural statements [#3473](https://github.com/sqlfluff/sqlfluff/pull/3473) [@tunetheweb](https://github.com/tunetheweb) * Postgres: support grouping sets [#3477](https://github.com/sqlfluff/sqlfluff/pull/3477) [@tunetheweb](https://github.com/tunetheweb) * SparkSQL: Support for Delta syntax to create manifest files through the `GENERATE` command [#3478](https://github.com/sqlfluff/sqlfluff/pull/3478) [@R7L208](https://github.com/R7L208) * Add config for optionally indenting contents of `ON` blocks [#3471](https://github.com/sqlfluff/sqlfluff/pull/3471) [@PeterBalsdon](https://github.com/PeterBalsdon) * L026: check standalone aliases as well as table aliases [#3470](https://github.com/sqlfluff/sqlfluff/pull/3470) [@tunetheweb](https://github.com/tunetheweb) * L045: Add handling for nested queries and CTEs [#3468](https://github.com/sqlfluff/sqlfluff/pull/3468) [@barrywhart](https://github.com/barrywhart) * L062: add `blocked_regex` support [#3467](https://github.com/sqlfluff/sqlfluff/pull/3467) [@tunetheweb](https://github.com/tunetheweb) * SparkSQL: Support for the Delta `DESCRIBE DETAIL` command [#3465](https://github.com/sqlfluff/sqlfluff/pull/3465) [@R7L208](https://github.com/R7L208) ## New Contributors * [@PeterBalsdon](https://github.com/PeterBalsdon) made their first contribution in [#3471](https://github.com/sqlfluff/sqlfluff/pull/3471) * [@hgranthorner](https://github.com/hgranthorner) made their first contribution in [#3483](https://github.com/sqlfluff/sqlfluff/pull/3483) * [@emancu](https://github.com/emancu) made their first contribution in [#3488](https://github.com/sqlfluff/sqlfluff/pull/3488) * [@judahrand](https://github.com/judahrand) made their first contribution in [#3502](https://github.com/sqlfluff/sqlfluff/pull/3502) * [@f0rk](https://github.com/f0rk) made their first contribution in [#3384](https://github.com/sqlfluff/sqlfluff/pull/3384) * [@zidder](https://github.com/zidder) made their first contribution in [#3499](https://github.com/sqlfluff/sqlfluff/pull/3499) * [@ThomAub](https://github.com/ThomAub) made their first contribution in [#3534](https://github.com/sqlfluff/sqlfluff/pull/3534) ## [1.0.0] - 2022-06-17 ## Highlights This is the first _stable_ release of SQLFluff 🎉🎉🎉. - _Does this mean there are no more bugs?_ **No.** - _Does this mean we're going to stop developing new features?_ **No.** - _Does this mean that this is a tool that is now broadly usable for many teams?_ **Yes.** We've intentionally chosen to release 1.0.0 at a time of relative stability within SQLFluff and not at a time when new big structural changes are being made. This means that there's a good chance that this release is broadly usable. This also recognises that through the hard work of a _huge_ number of contributors that we've built out this from a fringe tool, to something which gets over 500k downloads a month and over 4k stars on Github. There's still a lot to do, and some more exciting things on the horizon. If you want to be part of this and join the team of contributors, come and hang out in our [slack community](https://join.slack.com/t/sqlfluff/shared_invite/zt-o1f4x0e8-pZzarAIlQmKj_6ZwD16w0g) or on our [twitter account](https://twitter.com/SQLFluff) where people can help you get started. If you're a long time user, keep submitting bug reports and inputting on [issues on Github](https://github.com/sqlfluff/sqlfluff/issues). If you've never used SQLFluff before, or are hesitant about starting to use it in your day to day work, now might be a good time to try it. We have guides on how to [get started with the tool](https://docs.sqlfluff.com/en/stable/gettingstarted.html), and how to [get started with rolling out to a team](https://docs.sqlfluff.com/en/stable/teamrollout.html) in our docs. ## What’s Changed * Swap to skip file if not found [#3464](https://github.com/sqlfluff/sqlfluff/pull/3464) [@alanmcruickshank](https://github.com/alanmcruickshank) * Postgres: treat `GENERATE_SERIES` as a value table function [#3463](https://github.com/sqlfluff/sqlfluff/pull/3463) [@tunetheweb](https://github.com/tunetheweb) * Postgres: Support multiple `CONSTRAINTS` in `CREATE DOMAIN` [#3460](https://github.com/sqlfluff/sqlfluff/pull/3460) [@tunetheweb](https://github.com/tunetheweb) * Redshift: Add `ANYELEMENT` support [#3458](https://github.com/sqlfluff/sqlfluff/pull/3458) [@tunetheweb](https://github.com/tunetheweb) * Postgres: Optional select clause elements and better `ON CONFLICT` support [#3452](https://github.com/sqlfluff/sqlfluff/pull/3452) [@tunetheweb](https://github.com/tunetheweb) * Add ClickHouse as a dialect [#3448](https://github.com/sqlfluff/sqlfluff/pull/3448) [@tunetheweb](https://github.com/tunetheweb) * Postgres: allow keywords in qualified column references [#3450](https://github.com/sqlfluff/sqlfluff/pull/3450) [@tunetheweb](https://github.com/tunetheweb) * Remove Baron Schwatz Dead Link [#3453](https://github.com/sqlfluff/sqlfluff/pull/3453) [@tunetheweb](https://github.com/tunetheweb) * Postgres: Finish `ALTER TYPE` [#3451](https://github.com/sqlfluff/sqlfluff/pull/3451) [@greg-finley](https://github.com/greg-finley) * SparkSQL: Add Delta Syntax for `DESCRIBE HISTORY` statement [#3447](https://github.com/sqlfluff/sqlfluff/pull/3447) [@R7L208](https://github.com/R7L208) * Snowflake: Support Stage data file parameters in `FROM` clauses [#3446](https://github.com/sqlfluff/sqlfluff/pull/3446) [@tunetheweb](https://github.com/tunetheweb) * Redshift: Support Object unpivoting [#3441](https://github.com/sqlfluff/sqlfluff/pull/3441) [@tunetheweb](https://github.com/tunetheweb) * Python script to automate release [#3403](https://github.com/sqlfluff/sqlfluff/pull/3403) [@greg-finley](https://github.com/greg-finley) * Remove Delta Lake Reference in README.md [#3444](https://github.com/sqlfluff/sqlfluff/pull/3444) [@R7L208](https://github.com/R7L208) * Add `databricks` dialect as an alias for `sparksql` dialect [#3440](https://github.com/sqlfluff/sqlfluff/pull/3440) [@R7L208](https://github.com/R7L208) * Make all Postgres identifiers quotable [#3442](https://github.com/sqlfluff/sqlfluff/pull/3442) [@tunetheweb](https://github.com/tunetheweb) * Update JinjaAnalyzer and JinjaTracer to handle `{% block %}` [#3436](https://github.com/sqlfluff/sqlfluff/pull/3436) [@barrywhart](https://github.com/barrywhart) * SparkSQL: Add support for Delta `VACUUM` statement [#3439](https://github.com/sqlfluff/sqlfluff/pull/3439) [@R7L208](https://github.com/R7L208) * Warning for parsing errors extended to all dialects [#3411](https://github.com/sqlfluff/sqlfluff/pull/3411) [@mdahlman](https://github.com/mdahlman) * Handle templater validation errors more gracefully [#3433](https://github.com/sqlfluff/sqlfluff/pull/3433) [@alanmcruickshank](https://github.com/alanmcruickshank) * MYSQL: allow for escaped single quotes [#3424](https://github.com/sqlfluff/sqlfluff/pull/3424) [@mdahlman](https://github.com/mdahlman) * L027: Fix false positives by reverting the PR for issue #2992: Check table aliases exist [#3435](https://github.com/sqlfluff/sqlfluff/pull/3435) [@barrywhart](https://github.com/barrywhart) * Allow `numeric_dollar` templater to have curly braces, update `dollar` + `numeric_dollar` templater examples in docs [#3432](https://github.com/sqlfluff/sqlfluff/pull/3432) [@menzenski](https://github.com/menzenski) * Allow Redshift `IDENTITY` column `(seed, step)` to be optional [#3430](https://github.com/sqlfluff/sqlfluff/pull/3430) [@menzenski](https://github.com/menzenski) * L036: Make wildcard behavior configurable [#3426](https://github.com/sqlfluff/sqlfluff/pull/3426) [@barrywhart](https://github.com/barrywhart) * L034: Don't autofix if numeric column references [#3423](https://github.com/sqlfluff/sqlfluff/pull/3423) [@barrywhart](https://github.com/barrywhart) * L036: Treat wildcard as multiple select targets (i.e. separate line) [#3422](https://github.com/sqlfluff/sqlfluff/pull/3422) [@barrywhart](https://github.com/barrywhart) * Snowflake: IDENTIFIER pseudo-function [#3409](https://github.com/sqlfluff/sqlfluff/pull/3409) [@mdahlman](https://github.com/mdahlman) * SNOWFLAKE: Fully referenced object names in clone statements [#3414](https://github.com/sqlfluff/sqlfluff/pull/3414) [@mdahlman](https://github.com/mdahlman) * Unpin coverage now issue with 6.3 has been resolved [#3393](https://github.com/sqlfluff/sqlfluff/pull/3393) [@tunetheweb](https://github.com/tunetheweb) * L045: handle `UPDATE` statements with CTEs [#3397](https://github.com/sqlfluff/sqlfluff/pull/3397) [@tunetheweb](https://github.com/tunetheweb) * L027: Add support for `ignore_words` [#3398](https://github.com/sqlfluff/sqlfluff/pull/3398) [@dmohns](https://github.com/dmohns) * Postgres: Allow `CREATE FUNCTION` to use Expressions in default values [#3408](https://github.com/sqlfluff/sqlfluff/pull/3408) [@tunetheweb](https://github.com/tunetheweb) * Fix bug in `apply_fixes()` with leading/trailing whitespace [#3407](https://github.com/sqlfluff/sqlfluff/pull/3407) [@barrywhart](https://github.com/barrywhart) * Redshift: Correct `ALTER TABLE` syntax [#3395](https://github.com/sqlfluff/sqlfluff/pull/3395) [@tunetheweb](https://github.com/tunetheweb) * Postgres: Parse index with column sort [#3405](https://github.com/sqlfluff/sqlfluff/pull/3405) [@greg-finley](https://github.com/greg-finley) * MySQL: Improve SET Syntax for Variable Assignment [#3394](https://github.com/sqlfluff/sqlfluff/pull/3394) [@mdahlman](https://github.com/mdahlman) * Handle Postgres-style type casts when using the `colon_nospaces` templating style [#3383](https://github.com/sqlfluff/sqlfluff/pull/3383) [@benji-york](https://github.com/benji-york) * Capitalization in help message [#3385](https://github.com/sqlfluff/sqlfluff/pull/3385) [@mdahlman](https://github.com/mdahlman) * MySQL: Update keywords [#3381](https://github.com/sqlfluff/sqlfluff/pull/3381) [@mdahlman](https://github.com/mdahlman) * Teradata: Database statement and Set Session Database [#3382](https://github.com/sqlfluff/sqlfluff/pull/3382) [@mdahlman](https://github.com/mdahlman) ## New Contributors * [@benji-york](https://github.com/benji-york) made their first contribution in [#3383](https://github.com/sqlfluff/sqlfluff/pull/3383) * [@menzenski](https://github.com/menzenski) made their first contribution in [#3430](https://github.com/sqlfluff/sqlfluff/pull/3430) ## [0.13.2] - 2022-05-20 ## Highlights Major changes include: * Fix bug causing L003 to report indentation errors for templated code - sorry we know that one's caused many of you some grief :-( * Initial support of SOQL (Salesforce Object Query Language). * Additional Placeholder templating options. * Start of BigQuery procedural language support (starting simple `FOR` statements and `CREATE PROCEDURE` statements). * New rule L065 to put set operators onto new lines. * Many more dialect improvements and bug fixes. ## What’s Changed * All dialects: Allow `RESPECT NULLS`/`IGNORE NULLS` in window functions [#3376](https://github.com/sqlfluff/sqlfluff/pull/3376) [@tunetheweb](https://github.com/tunetheweb) * Postgres: correct `RETURNS TABLE` column type [#3379](https://github.com/sqlfluff/sqlfluff/pull/3379) [@tunetheweb](https://github.com/tunetheweb) * L065: Add rule for set operators surrounded by newlines [#3330](https://github.com/sqlfluff/sqlfluff/pull/3330) [@dmohns](https://github.com/dmohns) * L064: Apply preferred quote-style for partially templated quoted literals [#3300](https://github.com/sqlfluff/sqlfluff/pull/3300) [@dmohns](https://github.com/dmohns) * BigQuery: Support Stored Procedures [#3369](https://github.com/sqlfluff/sqlfluff/pull/3369) [@tunetheweb](https://github.com/tunetheweb) * MySQL extra Boolean operators (`&&`, `||`, `!`) [#3359](https://github.com/sqlfluff/sqlfluff/pull/3359) [@mdahlman](https://github.com/mdahlman) * Postgres and Redshift: Support `LOCK [TABLE]` [#3350](https://github.com/sqlfluff/sqlfluff/pull/3350) [@tunetheweb](https://github.com/tunetheweb) * Placeholder updates: Allow optional braces in dollar placeholders, add `colon_nospaces`, and cast to string [#3354](https://github.com/sqlfluff/sqlfluff/pull/3354) [@tunetheweb](https://github.com/tunetheweb) * BigQuery: Basic `FOR..IN..DO...END FOR` support [#3340](https://github.com/sqlfluff/sqlfluff/pull/3340) [@tunetheweb](https://github.com/tunetheweb) * L025: exclude `VALUES` clauses [#3358](https://github.com/sqlfluff/sqlfluff/pull/3358) [@tunetheweb](https://github.com/tunetheweb) * GitHub Actions: Update existing PR on new runs [#3367](https://github.com/sqlfluff/sqlfluff/pull/3367) [@greg-finley](https://github.com/greg-finley) * GitHub Actions: Copy draft release notes to CHANGELOG [#3360](https://github.com/sqlfluff/sqlfluff/pull/3360) [@greg-finley](https://github.com/greg-finley) * GitHub Action to set version number [#3347](https://github.com/sqlfluff/sqlfluff/pull/3347) [@greg-finley](https://github.com/greg-finley) * Postgres and Redshift: support `ALTER SCHEMA` [#3346](https://github.com/sqlfluff/sqlfluff/pull/3346) [@mdahlman](https://github.com/mdahlman) * MySQL: better `SELECT..INTO` support [#3351](https://github.com/sqlfluff/sqlfluff/pull/3351) [@tunetheweb](https://github.com/tunetheweb) * Postgres: support better function calls in `CREATE TRIGGER` [#3349](https://github.com/sqlfluff/sqlfluff/pull/3349) [@tunetheweb](https://github.com/tunetheweb) * Misc rule doc updates [#3352](https://github.com/sqlfluff/sqlfluff/pull/3352) [@tunetheweb](https://github.com/tunetheweb) * Snowflake: Move `CASE` keyword to Unreserved list [#3353](https://github.com/sqlfluff/sqlfluff/pull/3353) [@tunetheweb](https://github.com/tunetheweb) * MySQL: Added support for multiple variables in `SET` statement. [#3328](https://github.com/sqlfluff/sqlfluff/pull/3328) [@cgkoutzigiannis](https://github.com/cgkoutzigiannis) * SOQL: Support `date_n_literal` [#3344](https://github.com/sqlfluff/sqlfluff/pull/3344) [@greg-finley](https://github.com/greg-finley) * Update Docs: Getting Started and Index [#3339](https://github.com/sqlfluff/sqlfluff/pull/3339) [@mdahlman](https://github.com/mdahlman) * SOQL: Disable L026 rule [#3338](https://github.com/sqlfluff/sqlfluff/pull/3338) [@greg-finley](https://github.com/greg-finley) * Fix critical parse error logged after L003 fix [#3337](https://github.com/sqlfluff/sqlfluff/pull/3337) [@barrywhart](https://github.com/barrywhart) * SOQL: Disallow non-`SELECT` statements [#3329](https://github.com/sqlfluff/sqlfluff/pull/3329) [@greg-finley](https://github.com/greg-finley) * ci: bump github actions [#3336](https://github.com/sqlfluff/sqlfluff/pull/3336) [@Fdawgs](https://github.com/Fdawgs) * Start SOQL dialect [#3312](https://github.com/sqlfluff/sqlfluff/pull/3312) [@greg-finley](https://github.com/greg-finley) * Hive: support `CLUSTER`, `DISTRIBUTE`, `SORT BY` [#3304](https://github.com/sqlfluff/sqlfluff/pull/3304) [@barunpuri](https://github.com/barunpuri) * Fix typo in Configuration documentation [#3319](https://github.com/sqlfluff/sqlfluff/pull/3319) [@mdahlman](https://github.com/mdahlman) * L011: Support `MERGE` statements [#3292](https://github.com/sqlfluff/sqlfluff/pull/3292) [@tunetheweb](https://github.com/tunetheweb) * BigQuery: Add workaround to fix false-positves of L063 [#3306](https://github.com/sqlfluff/sqlfluff/pull/3306) [@dmohns](https://github.com/dmohns) * Snowflake: `REMOVE` statement rework [#3308](https://github.com/sqlfluff/sqlfluff/pull/3308) [@jmc-bbk](https://github.com/jmc-bbk) * Snowflake: `PUT` statement [#3307](https://github.com/sqlfluff/sqlfluff/pull/3307) [@jmc-bbk](https://github.com/jmc-bbk) * Snowflake: `GET` statement [#3305](https://github.com/sqlfluff/sqlfluff/pull/3305) [@jmc-bbk](https://github.com/jmc-bbk) * Snowflake: Support `ALTER EXTERNAL TABLE` [#3302](https://github.com/sqlfluff/sqlfluff/pull/3302) [@jmc-bbk](https://github.com/jmc-bbk) * T-SQL: Fix `PIVOT` placement [#3298](https://github.com/sqlfluff/sqlfluff/pull/3298) [@jpers36](https://github.com/jpers36) * Cleanup role references [#3287](https://github.com/sqlfluff/sqlfluff/pull/3287) [@tunetheweb](https://github.com/tunetheweb) * Adding Typeform and videoask into inthewild.rst [#3296](https://github.com/sqlfluff/sqlfluff/pull/3296) [@omonereo-tf](https://github.com/omonereo-tf) * Snowflake: `LIST` statement enhancement [#3295](https://github.com/sqlfluff/sqlfluff/pull/3295) [@jmc-bbk](https://github.com/jmc-bbk) * MySQL: Support `CREATE USER` [#3289](https://github.com/sqlfluff/sqlfluff/pull/3289) [@greg-finley](https://github.com/greg-finley) * Snowflake: CREATE STAGE grammar enhancement for file formats [#3293](https://github.com/sqlfluff/sqlfluff/pull/3293) [@jmc-bbk](https://github.com/jmc-bbk) * T-SQL: Complete support for `DELETE` statement [#3285](https://github.com/sqlfluff/sqlfluff/pull/3285) [@pguyot](https://github.com/pguyot) * MySQL: Support account names [#3286](https://github.com/sqlfluff/sqlfluff/pull/3286) [@greg-finley](https://github.com/greg-finley) * L028: In T-SQL dialect, table variables cannot be used to qualify references [#3283](https://github.com/sqlfluff/sqlfluff/pull/3283) [@barrywhart](https://github.com/barrywhart) * L007: An operator on a line by itself is okay [#3281](https://github.com/sqlfluff/sqlfluff/pull/3281) [@barrywhart](https://github.com/barrywhart) * L046 (spaces around Jinja tags) should check all slices in a segment [#3279](https://github.com/sqlfluff/sqlfluff/pull/3279) [@barrywhart](https://github.com/barrywhart) * L003 bug fix: Not ignoring templated newline [#3278](https://github.com/sqlfluff/sqlfluff/pull/3278) [@barrywhart](https://github.com/barrywhart) ## New Contributors * [@omonereo-tf](https://github.com/omonereo-tf) made their first contribution in [#3296](https://github.com/sqlfluff/sqlfluff/pull/3296) * [@mdahlman](https://github.com/mdahlman) made their first contribution in [#3319](https://github.com/sqlfluff/sqlfluff/pull/3319) * [@cgkoutzigiannis](https://github.com/cgkoutzigiannis) made their first contribution in [#3328](https://github.com/sqlfluff/sqlfluff/pull/3328) ## [0.13.1] - 2022-05-06 ## Highlights Major changes include: * Addition of "rule groups" (currently `core` and `all`) to allow ease of turning on and off groups of rules. * Addition of `db2` dialect * PRS errors are now highlighted in red. * Many bugs fixes and dialect improvements ## What’s Changed * Allow optional `AS` in `MERGE` statements using `SELECT` [#3276](https://github.com/sqlfluff/sqlfluff/pull/3276) [@tunetheweb](https://github.com/tunetheweb) * Add groups each rule is in to Rules documentation [#3272](https://github.com/sqlfluff/sqlfluff/pull/3272) [@tunetheweb](https://github.com/tunetheweb) * BigQuery: Enhanced `EXPORT DATA` statement [#3267](https://github.com/sqlfluff/sqlfluff/pull/3267) [@tunetheweb](https://github.com/tunetheweb) * BigQuery: `CREATE TABLE` support for `COPY` and `LIKE` [#3266](https://github.com/sqlfluff/sqlfluff/pull/3266) [@tunetheweb](https://github.com/tunetheweb) * Improve error on missing keywords [#3268](https://github.com/sqlfluff/sqlfluff/pull/3268) [@greg-finley](https://github.com/greg-finley) * MySQL: Add `FLUSH` support [#3269](https://github.com/sqlfluff/sqlfluff/pull/3269) [@greg-finley](https://github.com/greg-finley) * Postgres: Add `ALTER TYPE` support [#3265](https://github.com/sqlfluff/sqlfluff/pull/3265) [@greg-finley](https://github.com/greg-finley) * Bug fix: L036 handle single-column `SELECT` with comment on same line as `SELECT` keyword [#3259](https://github.com/sqlfluff/sqlfluff/pull/3259) [@barrywhart](https://github.com/barrywhart) * Put working example in the README [#3261](https://github.com/sqlfluff/sqlfluff/pull/3261) [@greg-finley](https://github.com/greg-finley) * Snowflake: Add `CREATE FILE FORMAT` Support [#3104](https://github.com/sqlfluff/sqlfluff/pull/3104) [@jmc-bbk](https://github.com/jmc-bbk) * Bug fix: Disable L059 in snowflake dialect [#3260](https://github.com/sqlfluff/sqlfluff/pull/3260) [@barrywhart](https://github.com/barrywhart) * Remove redundant `Anything()` from `match_grammars` [#3258](https://github.com/sqlfluff/sqlfluff/pull/3258) [@tunetheweb](https://github.com/tunetheweb) * Postgres: Add `DOMAIN` support [#3257](https://github.com/sqlfluff/sqlfluff/pull/3257) [@tunetheweb](https://github.com/tunetheweb) * T-SQL: Allow optional brackets with `EXECUTE` [#3255](https://github.com/sqlfluff/sqlfluff/pull/3255) [@pguyot](https://github.com/pguyot) * Add rule groups and a core rules group [#3142](https://github.com/sqlfluff/sqlfluff/pull/3142) [@pwildenhain](https://github.com/pwildenhain) * MySQL: Better `UNSIGNED` support [#3250](https://github.com/sqlfluff/sqlfluff/pull/3250) [@tunetheweb](https://github.com/tunetheweb) * MySQL (and others): Support `DROP TEMPORARY TABLE` [#3251](https://github.com/sqlfluff/sqlfluff/pull/3251) [@tunetheweb](https://github.com/tunetheweb) * Add Db2 dialect [#3231](https://github.com/sqlfluff/sqlfluff/pull/3231) [@ddresslerlegalplans](https://github.com/ddresslerlegalplans) * BigQuery: Add `CREATE EXTERNAL TABLE` statement [#3241](https://github.com/sqlfluff/sqlfluff/pull/3241) [@dmohns](https://github.com/dmohns) * SQLite: Add support for expressions in `CREATE INDEX` columns [#3240](https://github.com/sqlfluff/sqlfluff/pull/3240) [@tunetheweb](https://github.com/tunetheweb) * Fix exception in `check_still_complete` and matching in `StartsWith` [#3236](https://github.com/sqlfluff/sqlfluff/pull/3236) [@tunetheweb](https://github.com/tunetheweb) * Snowflake: Add Support for `DROP` Statements [#3238](https://github.com/sqlfluff/sqlfluff/pull/3238) [@chrisalexeev](https://github.com/chrisalexeev) * Allow YAML generation script to accept arguments when run through `tox` [#3233](https://github.com/sqlfluff/sqlfluff/pull/3233) [@tunetheweb](https://github.com/tunetheweb) * Bug fix: Cleanly catch and report errors during `load_macros_from_path` [#3239](https://github.com/sqlfluff/sqlfluff/pull/3239) [@barrywhart](https://github.com/barrywhart) * Indent procedure parameters [#3234](https://github.com/sqlfluff/sqlfluff/pull/3234) [@fdw](https://github.com/fdw) * Enhance `apply_fixes()` to automatically fix violations of `can_start_end_non_code` [#3232](https://github.com/sqlfluff/sqlfluff/pull/3232) [@barrywhart](https://github.com/barrywhart) * T-SQL: Fix `for xml path` identifier [#3230](https://github.com/sqlfluff/sqlfluff/pull/3230) [@fdw](https://github.com/fdw) * SparkSQL: Additional Delta Merge Test Cases [#3228](https://github.com/sqlfluff/sqlfluff/pull/3228) [@R7L208](https://github.com/R7L208) * Fix bug where L018 warns inappropriately if CTE definition includes a column list [#3227](https://github.com/sqlfluff/sqlfluff/pull/3227) [@barrywhart](https://github.com/barrywhart) * BigQuery: Better `STRUCT` support [#3217](https://github.com/sqlfluff/sqlfluff/pull/3217) [@tunetheweb](https://github.com/tunetheweb) * Fix bug where L003 and L036 fixes caused a parse error [#3221](https://github.com/sqlfluff/sqlfluff/pull/3221) [@barrywhart](https://github.com/barrywhart) * Make `IF EXISTS` work with `UNION` selects [#3218](https://github.com/sqlfluff/sqlfluff/pull/3218) [@fdw](https://github.com/fdw) * Fix bug where the `fix_even_unparsable` setting was not being respected in `.sqlfluff` [#3220](https://github.com/sqlfluff/sqlfluff/pull/3220) [@barrywhart](https://github.com/barrywhart) * BigQuery: Better `DELETE` table support [#3224](https://github.com/sqlfluff/sqlfluff/pull/3224) [@tunetheweb](https://github.com/tunetheweb) * Snowflake: `ALTER MATERIALIZED VIEW` statement [#3215](https://github.com/sqlfluff/sqlfluff/pull/3215) [@jmc-bbk](https://github.com/jmc-bbk) * BigQuery: recognise `DATE`, `DATETIME` and `TIME` as a date parts for `EXTRACT` [#3209](https://github.com/sqlfluff/sqlfluff/pull/3209) [@tunetheweb](https://github.com/tunetheweb) * Postgres: enhanced `UPDATE` statement support [#3203](https://github.com/sqlfluff/sqlfluff/pull/3203) [@tunetheweb](https://github.com/tunetheweb) * Prevent Date Constructors from being changed to double quotes by L064 [#3212](https://github.com/sqlfluff/sqlfluff/pull/3212) [@tunetheweb](https://github.com/tunetheweb) * Postgres: Fix `DROP EXTENSION` syntax [#3213](https://github.com/sqlfluff/sqlfluff/pull/3213) [@tunetheweb](https://github.com/tunetheweb) * Snowflake: Handle `FLATTEN()` table function aliases correctly in L025, L027, L028 [#3194](https://github.com/sqlfluff/sqlfluff/pull/3194) [@barrywhart](https://github.com/barrywhart) * Snowflake: Function `LANGUAGE SQL` [#3202](https://github.com/sqlfluff/sqlfluff/pull/3202) [@WittierDinosaur](https://github.com/WittierDinosaur) * Postgres: Enhanced `CREATE MATERIALIZED VIEW` [#3204](https://github.com/sqlfluff/sqlfluff/pull/3204) [@tunetheweb](https://github.com/tunetheweb) * T-SQL: Support basic `FOR XML` statements [#3193](https://github.com/sqlfluff/sqlfluff/pull/3193) [@fdw](https://github.com/fdw) * T-SQL: Fix cursor syntax [#3192](https://github.com/sqlfluff/sqlfluff/pull/3192) [@fdw](https://github.com/fdw) * Snowflake: `REMOVE` statement enhancement [#3191](https://github.com/sqlfluff/sqlfluff/pull/3191) [@jmc-bbk](https://github.com/jmc-bbk) * Snowflake: Moved `VIEW` to unreserved keywords [#3190](https://github.com/sqlfluff/sqlfluff/pull/3190) [@WittierDinosaur](https://github.com/WittierDinosaur) * BigQuery: Support `EXPORT DATA` [#3177](https://github.com/sqlfluff/sqlfluff/pull/3177) [@tunetheweb](https://github.com/tunetheweb) * T-SQL: Fix exception when using variable names in `FROM` clause [#3175](https://github.com/sqlfluff/sqlfluff/pull/3175) [@tunetheweb](https://github.com/tunetheweb) * Fix bug where `encoding` setting in .sqlfluff file was not being respected [#3170](https://github.com/sqlfluff/sqlfluff/pull/3170) [@barrywhart](https://github.com/barrywhart) * Highlight `PRS` errors in red [#3168](https://github.com/sqlfluff/sqlfluff/pull/3168) [@OTooleMichael](https://github.com/OTooleMichael) * Remove unnecessary `StartsWith` and make `terminator` mandatory when using it [#3165](https://github.com/sqlfluff/sqlfluff/pull/3165) [@tunetheweb](https://github.com/tunetheweb) * Postgres: Support Composite Types [#3167](https://github.com/sqlfluff/sqlfluff/pull/3167) [@tunetheweb](https://github.com/tunetheweb) * T-SQL: Support opening, closing, deallocating and fetching cursors [#3166](https://github.com/sqlfluff/sqlfluff/pull/3166) [@fdw](https://github.com/fdw) * T-SQL: Add declaration of cursors [#3164](https://github.com/sqlfluff/sqlfluff/pull/3164) [@fdw](https://github.com/fdw) * Missed #3151 from CHANGELOG [#3163](https://github.com/sqlfluff/sqlfluff/pull/3163) [@tunetheweb](https://github.com/tunetheweb) * Bug fix: L028 sometimes makes incorrect fix when there are subqueries [#3156](https://github.com/sqlfluff/sqlfluff/pull/3156) [@barrywhart](https://github.com/barrywhart) * T-SQL: Support `OUTPUT INTO` [#3162](https://github.com/sqlfluff/sqlfluff/pull/3162) [@fdw](https://github.com/fdw) * T-SQL: Add `CREATE TYPE` statement [#3154](https://github.com/sqlfluff/sqlfluff/pull/3154) [@fdw](https://github.com/fdw) * Hive: Support`TABLESAMPLE` [#3159](https://github.com/sqlfluff/sqlfluff/pull/3159) [@barunpuri](https://github.com/barunpuri) * Hive: Support back quoted identifier and literal [#3158](https://github.com/sqlfluff/sqlfluff/pull/3158) [@barunpuri](https://github.com/barunpuri) * T-SQL: Add table hints to `INSERT` and `DELETE` [#3155](https://github.com/sqlfluff/sqlfluff/pull/3155) [@fdw](https://github.com/fdw) ## New Contributors * [@ddresslerlegalplans](https://github.com/ddresslerlegalplans) made their first contribution in [#3231](https://github.com/sqlfluff/sqlfluff/pull/3231) * [@greg-finley](https://github.com/greg-finley) made their first contribution in [#3261](https://github.com/sqlfluff/sqlfluff/pull/3261) ## [0.13.0] - 2022-04-22 ## Highlights Major changes include: * New Rule (L064) for preferred quotes for quoted literals * Rule speed improvements and fixing performance regression from 0.12.0 * Add configuration option to disallow hanging indents in L003 * Add `ignore_words_regex` configuration option for rules * New GitHub Annotations option * Many bug fixes and dialect improvements ## What’s Changed * Redshift: Fix CREATE TABLE column constraints and COPY [#3151](https://github.com/sqlfluff/sqlfluff/pull/3151) [@tunetheweb](https://github.com/tunetheweb) * New Rule L064: Consistent usage of preferred quotes for quoted literals [#3118](https://github.com/sqlfluff/sqlfluff/pull/3118) [@dmohns](https://github.com/dmohns) * L025 bug fix: stop incorrectly flagging on nested inner joins [#3145](https://github.com/sqlfluff/sqlfluff/pull/3145) [@tunetheweb](https://github.com/tunetheweb) * T-SQL: Add labels, as well as `GRANT`/`DENY`/`REVOKE` [#3149](https://github.com/sqlfluff/sqlfluff/pull/3149) [@tunetheweb](https://github.com/tunetheweb) * Snowflake: allow bracketless `VALUES` in `FROM` clauses [#3141](https://github.com/sqlfluff/sqlfluff/pull/3141) [@tunetheweb](https://github.com/tunetheweb) * T-SQL: Support `TRY_CONVERT` [#3143](https://github.com/sqlfluff/sqlfluff/pull/3143) [@fdw](https://github.com/fdw) * T-SQL: Support `NVARCHAR(MAX)` [#3130](https://github.com/sqlfluff/sqlfluff/pull/3130) [@fdw](https://github.com/fdw) * Allow column-less `INSERT INTO` with bracketed `SELECT` in ANSI and BigQuery [#3139](https://github.com/sqlfluff/sqlfluff/pull/3139) [@tunetheweb](https://github.com/tunetheweb) * Hive: Support dynamic partition insert [#3126](https://github.com/sqlfluff/sqlfluff/pull/3126) [@barunpuri](https://github.com/barunpuri) * T-SQL - `ALTER TABLE` - add support for `WITH CHECK ADD CONSTRAINT` and `CHECK CONSTRAINT` [#3132](https://github.com/sqlfluff/sqlfluff/pull/3132) [@nevado](https://github.com/nevado) * TSQL: Support names for transactions [#3129](https://github.com/sqlfluff/sqlfluff/pull/3129) [@fdw](https://github.com/fdw) * Snowflake: `StartsWith()` in `FromExpressionElementSegment` caused performance issues for large queries [#3128](https://github.com/sqlfluff/sqlfluff/pull/3128) [@WittierDinosaur](https://github.com/WittierDinosaur) * Fix parsing of Compound Statements [#3121](https://github.com/sqlfluff/sqlfluff/pull/3121) [@jonyscathe](https://github.com/jonyscathe) * SparkSQL: Update to support all valid Literal Types [#3102](https://github.com/sqlfluff/sqlfluff/pull/3102) [@R7L208](https://github.com/R7L208) * TSQL: Exclude non-function-name keywords from function names [#3112](https://github.com/sqlfluff/sqlfluff/pull/3112) [@jpers36](https://github.com/jpers36) * ANSI `AT TIME ZONE` parsing improvements [#3115](https://github.com/sqlfluff/sqlfluff/pull/3115) [@tunetheweb](https://github.com/tunetheweb) * When fixing a file, preserve the input file's permissions [#3114](https://github.com/sqlfluff/sqlfluff/pull/3114) [@barrywhart](https://github.com/barrywhart) * Bug: L058 (flatten nested `CASE`) triggers incorrectly (the `ELSE` contains additional code) [#3113](https://github.com/sqlfluff/sqlfluff/pull/3113) [@barrywhart](https://github.com/barrywhart) * Bug fix: Handle "lint" human-format file output correctly [#3109](https://github.com/sqlfluff/sqlfluff/pull/3109) [@barrywhart](https://github.com/barrywhart) * L003: Add configuration option to disallow hanging indents [#3063](https://github.com/sqlfluff/sqlfluff/pull/3063) [@dmohns](https://github.com/dmohns) * Add native Github-actions output [#3107](https://github.com/sqlfluff/sqlfluff/pull/3107) [@dmohns](https://github.com/dmohns) * Improved signed literal parsing [#3108](https://github.com/sqlfluff/sqlfluff/pull/3108) [@tunetheweb](https://github.com/tunetheweb) * Don't allow fixes to span template blocks [#3105](https://github.com/sqlfluff/sqlfluff/pull/3105) [@barrywhart](https://github.com/barrywhart) * Add `ignore_words_regex` configuration option [#3098](https://github.com/sqlfluff/sqlfluff/pull/3098) [@dmohns](https://github.com/dmohns) * Redshift: Better `AT TIME ZONE` support [#3087](https://github.com/sqlfluff/sqlfluff/pull/3087) [@tunetheweb](https://github.com/tunetheweb) * Fix In The Wild typo [#3100](https://github.com/sqlfluff/sqlfluff/pull/3100) [@sivaraam](https://github.com/sivaraam) * Snowflake: Add Create Storage Integration grammar. [#3075](https://github.com/sqlfluff/sqlfluff/pull/3075) [@jmc-bbk](https://github.com/jmc-bbk) * ANSI: Allow `indented_using_on` in `MERGE` statements `ON` [#3096](https://github.com/sqlfluff/sqlfluff/pull/3096) [@dmohns](https://github.com/dmohns) * Postgres: Support `COLLATE` in more clauses [#3095](https://github.com/sqlfluff/sqlfluff/pull/3095) [@tunetheweb](https://github.com/tunetheweb) * BigQuery: Support `NORMALIZE` function [#3086](https://github.com/sqlfluff/sqlfluff/pull/3086) [@tunetheweb](https://github.com/tunetheweb) * ANSI (and other dialects): Add `DROP FUNCTION` support [#3082](https://github.com/sqlfluff/sqlfluff/pull/3082) [@tunetheweb](https://github.com/tunetheweb) * Postgres: Support `DROP EXTENSION` [#3083](https://github.com/sqlfluff/sqlfluff/pull/3083) [@tunetheweb](https://github.com/tunetheweb) * Snowflake: Fix bug in Describe Statement [#3076](https://github.com/sqlfluff/sqlfluff/pull/3076) [@jmc-bbk](https://github.com/jmc-bbk) * Update individual rules to take advantage of core rule processing changes [#3041](https://github.com/sqlfluff/sqlfluff/pull/3041) [@barrywhart](https://github.com/barrywhart) * L003 forgives non misbalanced Jinja tags [#3065](https://github.com/sqlfluff/sqlfluff/pull/3065) [@OTooleMichael](https://github.com/OTooleMichael) * Fix tsql dialect `EXEC = @Variable StoredProc` Failed Parsing Bug (#3070) [#3077](https://github.com/sqlfluff/sqlfluff/pull/3077) [@MartynJones87](https://github.com/MartynJones87) * Snowflake Dialect: Add External Function DDL [#3071](https://github.com/sqlfluff/sqlfluff/pull/3071) [@chrisalexeev](https://github.com/chrisalexeev) * SparkSQL: Support for Delta `UPDATE` statement syntax [#3073](https://github.com/sqlfluff/sqlfluff/pull/3073) [@R7L208](https://github.com/R7L208) * SparkSQL: Test cases for Delta `DELETE FROM` syntax [#3072](https://github.com/sqlfluff/sqlfluff/pull/3072) [@R7L208](https://github.com/R7L208) * Postgres: Support quoted `LANGUAGE` params [#3068](https://github.com/sqlfluff/sqlfluff/pull/3068) [@tunetheweb](https://github.com/tunetheweb) * Fix bug handling Jinja set with multiple vars, e.g.: `{% set a, b = 1, 2 %}` [#3066](https://github.com/sqlfluff/sqlfluff/pull/3066) [@barrywhart](https://github.com/barrywhart) * L007 should ignore templated newlines [#3067](https://github.com/sqlfluff/sqlfluff/pull/3067) [@barrywhart](https://github.com/barrywhart) * Allow aliases to pass L028 [#3062](https://github.com/sqlfluff/sqlfluff/pull/3062) [@tunetheweb](https://github.com/tunetheweb) * Refactor core rule processing for flexibility and speed [#3061](https://github.com/sqlfluff/sqlfluff/pull/3061) [@barrywhart](https://github.com/barrywhart) * Add editorconfig and precommit for SQL and YML files [#3058](https://github.com/sqlfluff/sqlfluff/pull/3058) [@tunetheweb](https://github.com/tunetheweb) * Rule L003 performance: Cache the line number and last newline position [#3060](https://github.com/sqlfluff/sqlfluff/pull/3060) [@barrywhart](https://github.com/barrywhart) * Fixed documentation for `sql_file_exts` example [#3059](https://github.com/sqlfluff/sqlfluff/pull/3059) [@KulykDmytro](https://github.com/KulykDmytro) * BigQuery: Support `SAFE` functions [#3048](https://github.com/sqlfluff/sqlfluff/pull/3048) [@tunetheweb](https://github.com/tunetheweb) * Postgres: Fix `UNNEST` for L025 [#3054](https://github.com/sqlfluff/sqlfluff/pull/3054) [@tunetheweb](https://github.com/tunetheweb) * Exasol: `CREATE/DROP/ALTER USER/ROLE` clean up for consistency [#3045](https://github.com/sqlfluff/sqlfluff/pull/3045) [@tunetheweb](https://github.com/tunetheweb) * Postgres add `ALTER ROLE`/`ALTER USER` support [#3043](https://github.com/sqlfluff/sqlfluff/pull/3043) [@mrf](https://github.com/mrf) * Add CarePay to SQLFluff in the wild [#3038](https://github.com/sqlfluff/sqlfluff/pull/3038) [@pvonglehn](https://github.com/pvonglehn) * Postgres: Add `ON CONFLICT` Grammar [#3027](https://github.com/sqlfluff/sqlfluff/pull/3027) [@jmc-bbk](https://github.com/jmc-bbk) * Add dialect to Docker test [#3033](https://github.com/sqlfluff/sqlfluff/pull/3033) [@tunetheweb](https://github.com/tunetheweb) ## New Contributors * [@chrisalexeev](https://github.com/chrisalexeev) made their first contribution in [#3071](https://github.com/sqlfluff/sqlfluff/pull/3071) * [@MartynJones87](https://github.com/MartynJones87) made their first contribution in [#3077](https://github.com/sqlfluff/sqlfluff/pull/3077) * [@sivaraam](https://github.com/sivaraam) made their first contribution in [#3100](https://github.com/sqlfluff/sqlfluff/pull/3100) * [@jonyscathe](https://github.com/jonyscathe) made their first contribution in [#3121](https://github.com/sqlfluff/sqlfluff/pull/3121) * [@barunpuri](https://github.com/barunpuri) made their first contribution in [#3126](https://github.com/sqlfluff/sqlfluff/pull/3126) ## [0.12.0] - 2022-04-07 ## Highlights Major changes include: * Dialect is now mandatory, either in command line, or in config **BREAKING CHANGE** * Rename `spark3` dialect to `sparksql` **BREAKING CHANGE** * L027 now checks tables references exist **BREAKING CHANGE** * New rule L063 to allow Datatypes to have a different capitalisation policy from L010. **BREAKING CHANGE** * Refactor and performance improvements of Delimited and L003 * Many dialect improvements and fixes ## What’s Changed * MySQL: Allow `JOIN`s in `UPDATE` expressions [#3031](https://github.com/sqlfluff/sqlfluff/pull/3031) [@zapion](https://github.com/zapion) * Fix bug in patch generation for segments made of templated + literal fixes [#3030](https://github.com/sqlfluff/sqlfluff/pull/3030) [@barrywhart](https://github.com/barrywhart) * Formatters code cleanup [#3029](https://github.com/sqlfluff/sqlfluff/pull/3029) [@barrywhart](https://github.com/barrywhart) * Postgres better `CREATE USER`/`CREATE ROLE` support [#3016](https://github.com/sqlfluff/sqlfluff/pull/3016) [@mrf](https://github.com/mrf) * SparkSQL: Add `MERGE` syntax [#3025](https://github.com/sqlfluff/sqlfluff/pull/3025) [@PhilippLange](https://github.com/PhilippLange) * Remove Delimited workarounds [#3024](https://github.com/sqlfluff/sqlfluff/pull/3024) [@tunetheweb](https://github.com/tunetheweb) * Add `exclude` option for `Ref` grammar [#3028](https://github.com/sqlfluff/sqlfluff/pull/3028) [@tunetheweb](https://github.com/tunetheweb) * SparkSQL: Adding support for Delta Lake table schema updates [#3013](https://github.com/sqlfluff/sqlfluff/pull/3013) [@R7L208](https://github.com/R7L208) * L027: Check table aliases exists [#2998](https://github.com/sqlfluff/sqlfluff/pull/2998) [@dmohns](https://github.com/dmohns) * Snowflake: Added support for `REMOVE` statements [#3026](https://github.com/sqlfluff/sqlfluff/pull/3026) [@WittierDinosaur](https://github.com/WittierDinosaur) * BigQuery: Support `WEEK` function with days of weeks [#3021](https://github.com/sqlfluff/sqlfluff/pull/3021) [@tunetheweb](https://github.com/tunetheweb) * Sparksql quoted identifier in `STRUCT` [#3023](https://github.com/sqlfluff/sqlfluff/pull/3023) [@PhilippLange](https://github.com/PhilippLange) * Force user to specify a dialect [#2995](https://github.com/sqlfluff/sqlfluff/pull/2995) [@barrywhart](https://github.com/barrywhart) * BigQuery: Parse `CREATE TABLE` with trailing comma [#3018](https://github.com/sqlfluff/sqlfluff/pull/3018) [@dmohns](https://github.com/dmohns) * Snowflake: Add `IS (NOT) DISTINCT FROM` test cases [#3014](https://github.com/sqlfluff/sqlfluff/pull/3014) [@kd2718](https://github.com/kd2718) * BigQuery: Add support for column `OPTIONS` in `STRUCT` definitions [#3017](https://github.com/sqlfluff/sqlfluff/pull/3017) [@dmohns](https://github.com/dmohns) * PostgreSQL: added support for `CREATE ROLE` and `DROP ROLE` statements [#3010](https://github.com/sqlfluff/sqlfluff/pull/3010) [@dnim](https://github.com/dnim) * Separate slow CI job to it's own workflow [#3012](https://github.com/sqlfluff/sqlfluff/pull/3012) [@tunetheweb](https://github.com/tunetheweb) * SparkSQL: Test cases for Delta Variation of Writing a table [#3009](https://github.com/sqlfluff/sqlfluff/pull/3009) [@R7L208](https://github.com/R7L208) * Snowflake: Added support for `CLUSTER BY` and other `CREATE TABLE` improvements [#3008](https://github.com/sqlfluff/sqlfluff/pull/3008) [@WittierDinosaur](https://github.com/WittierDinosaur) * Support `TRIM` function parameters [#3007](https://github.com/sqlfluff/sqlfluff/pull/3007) [@tunetheweb](https://github.com/tunetheweb) * BigQuery: Support `AT TIME ZONE` in `EXTRACT` [#3004](https://github.com/sqlfluff/sqlfluff/pull/3004) [@tunetheweb](https://github.com/tunetheweb) * BigQuery: Move some keywords to unreserved [#3002](https://github.com/sqlfluff/sqlfluff/pull/3002) [@tunetheweb](https://github.com/tunetheweb) * BigQuery: Allow quoted variable names in `DECLARE` [#3006](https://github.com/sqlfluff/sqlfluff/pull/3006) [@dmohns](https://github.com/dmohns) * T-SQL: allow optional `AS` keyword in parameters def [#3001](https://github.com/sqlfluff/sqlfluff/pull/3001) [@pguyot](https://github.com/pguyot) * T-SQL: add support for `RETURNS @var TABLE` syntax [#3000](https://github.com/sqlfluff/sqlfluff/pull/3000) [@pguyot](https://github.com/pguyot) * T-SQL: add support for parenthesized nested joins [#2993](https://github.com/sqlfluff/sqlfluff/pull/2993) [@pguyot](https://github.com/pguyot) * dbt: Read builtins from code [#2988](https://github.com/sqlfluff/sqlfluff/pull/2988) [@dmohns](https://github.com/dmohns) * T-SQL: fix table type in `DECLARE` statements [#2999](https://github.com/sqlfluff/sqlfluff/pull/2999) [@pguyot](https://github.com/pguyot) * T-SQL: allow leading `GO` [#2997](https://github.com/sqlfluff/sqlfluff/pull/2997) [@pguyot](https://github.com/pguyot) * T-SQL: add support for assignment operators [#2996](https://github.com/sqlfluff/sqlfluff/pull/2996) [@pguyot](https://github.com/pguyot) * BigQuery: Add more `MERGE` statement variants [#2989](https://github.com/sqlfluff/sqlfluff/pull/2989) [@dmohns](https://github.com/dmohns) * L041: Fix bug when there is a newline after the select clause modifier [#2981](https://github.com/sqlfluff/sqlfluff/pull/2981) [@jmc-bbk](https://github.com/jmc-bbk) * Rule L045 doesn't recognise CTE usage in a subquery when rule L042 is enabled [#2980](https://github.com/sqlfluff/sqlfluff/pull/2980) [@barrywhart](https://github.com/barrywhart) * dbt: Make `is_incremental()` defaults consistent [#2985](https://github.com/sqlfluff/sqlfluff/pull/2985) [@dmohns](https://github.com/dmohns) * Rename Grammars for consistency [#2986](https://github.com/sqlfluff/sqlfluff/pull/2986) [@tunetheweb](https://github.com/tunetheweb) * Added support for MySQL `UPDATE` Statements [#2982](https://github.com/sqlfluff/sqlfluff/pull/2982) [@WittierDinosaur](https://github.com/WittierDinosaur) * Redshift: Added `CREATE EXTERNAL SCHEMA`, bugfix in `PARTITION BY` [#2983](https://github.com/sqlfluff/sqlfluff/pull/2983) [@WittierDinosaur](https://github.com/WittierDinosaur) * Added `ALTER INDEX` and `REINDEX` to Postgres, Some Grammar Cleaning [#2979](https://github.com/sqlfluff/sqlfluff/pull/2979) [@WittierDinosaur](https://github.com/WittierDinosaur) * T-SQL grammar deduplication [#2967](https://github.com/sqlfluff/sqlfluff/pull/2967) [@tunetheweb](https://github.com/tunetheweb) * L003 Refactor [#2884](https://github.com/sqlfluff/sqlfluff/pull/2884) [@OTooleMichael](https://github.com/OTooleMichael) * Delimited Refactor [#2831](https://github.com/sqlfluff/sqlfluff/pull/2831) [@WittierDinosaur](https://github.com/WittierDinosaur) * SparkSQL: Support for querying snapshots when reading data with Delta Lake [#2972](https://github.com/sqlfluff/sqlfluff/pull/2972) [@R7L208](https://github.com/R7L208) * Fix bug in L063 for BigQuery `STRUCT` params [#2975](https://github.com/sqlfluff/sqlfluff/pull/2975) [@tunetheweb](https://github.com/tunetheweb) * Fix assertion error in dbt templater when file ends with whitespace strip (`-%}`) [#2976](https://github.com/sqlfluff/sqlfluff/pull/2976) [@barrywhart](https://github.com/barrywhart) * Pass dbt vars to dbt [#2923](https://github.com/sqlfluff/sqlfluff/pull/2923) [@tcholewik](https://github.com/tcholewik) * BigQuery: Add support for column `OPTIONS` [#2973](https://github.com/sqlfluff/sqlfluff/pull/2973) [@dmohns](https://github.com/dmohns) * BigQuery: Allow expressions in `OPTIONS` clauses [#2971](https://github.com/sqlfluff/sqlfluff/pull/2971) [@dmohns](https://github.com/dmohns) * Bump black to 22.3.0 on pre-commit [#2969](https://github.com/sqlfluff/sqlfluff/pull/2969) [@pguyot](https://github.com/pguyot) * T-SQL: Redefine `DatatypeIdentifierSegment` [#2959](https://github.com/sqlfluff/sqlfluff/pull/2959) [@alanmcruickshank](https://github.com/alanmcruickshank) * T-SQL: Add support for `WAITFOR` statement [#2968](https://github.com/sqlfluff/sqlfluff/pull/2968) [@pguyot](https://github.com/pguyot) * T-SQL: Add `WHILE` statement support [#2966](https://github.com/sqlfluff/sqlfluff/pull/2966) [@pguyot](https://github.com/pguyot) * T-SQL: `INTO` is optional within `INSERT` statement [#2963](https://github.com/sqlfluff/sqlfluff/pull/2963) [@pguyot](https://github.com/pguyot) * Add basic `IS (NOT) DISTINCT FROM` support in most dialects [#2962](https://github.com/sqlfluff/sqlfluff/pull/2962) [@tunetheweb](https://github.com/tunetheweb) * SparkSQL: Create Table Delta Lake Variant [#2954](https://github.com/sqlfluff/sqlfluff/pull/2954) [@R7L208](https://github.com/R7L208) * T-SQL: Add support for `CREATE`/`DROP`/`DISABLE` `TRIGGER` [#2957](https://github.com/sqlfluff/sqlfluff/pull/2957) [@tunetheweb](https://github.com/tunetheweb) * Bug: L042 modifies parse tree even during "lint" [#2955](https://github.com/sqlfluff/sqlfluff/pull/2955) [@barrywhart](https://github.com/barrywhart) * Allow multiple post function clauses in Postgres and Redshift [#2952](https://github.com/sqlfluff/sqlfluff/pull/2952) [@aviv](https://github.com/aviv) * Fix bug in L022 for trailing comments in CTE [#2946](https://github.com/sqlfluff/sqlfluff/pull/2946) [@tunetheweb](https://github.com/tunetheweb) * More dialect checking, fixes, inheritance cleanup [#2942](https://github.com/sqlfluff/sqlfluff/pull/2942) [@barrywhart](https://github.com/barrywhart) * T-SQL: Support `OUTPUT` Params and `GOTO` Statements [#2949](https://github.com/sqlfluff/sqlfluff/pull/2949) [@tunetheweb](https://github.com/tunetheweb) * BREAKING CHANGE: change existing dialect name from `spark3` to `sparksql` [#2924](https://github.com/sqlfluff/sqlfluff/pull/2924) [@R7L208](https://github.com/R7L208) * Add Symend to SQLFluff In The Wild [#2940](https://github.com/sqlfluff/sqlfluff/pull/2940) [@HeyZiko](https://github.com/HeyZiko) * Simplify segment creation and inheritance in dialects [#2933](https://github.com/sqlfluff/sqlfluff/pull/2933) [@barrywhart](https://github.com/barrywhart) * Snowflake: Add `ALTER STREAM` support [#2939](https://github.com/sqlfluff/sqlfluff/pull/2939) [@HeyZiko](https://github.com/HeyZiko) * T-SQL: Handle multiple nested joins [#2938](https://github.com/sqlfluff/sqlfluff/pull/2938) [@tunetheweb](https://github.com/tunetheweb) * Snowflake: Add `CREATE STREAM` support [#2936](https://github.com/sqlfluff/sqlfluff/pull/2936) [@HeyZiko](https://github.com/HeyZiko) * T-SQL: Support nested joins [#2928](https://github.com/sqlfluff/sqlfluff/pull/2928) [@tunetheweb](https://github.com/tunetheweb) * To replace base dialect segment class, must subclass or provide same stuff [#2930](https://github.com/sqlfluff/sqlfluff/pull/2930) [@barrywhart](https://github.com/barrywhart) * Add new rule L063 to allow separate capitalisation policy for Datatypes [#2931](https://github.com/sqlfluff/sqlfluff/pull/2931) [@tunetheweb](https://github.com/tunetheweb) * Adds support for column definitions in table alias expressions [#2932](https://github.com/sqlfluff/sqlfluff/pull/2932) [@derickl](https://github.com/derickl) * BigQuery: support numeric aliases in `UNPIVOT` clauses [#2925](https://github.com/sqlfluff/sqlfluff/pull/2925) [@tunetheweb](https://github.com/tunetheweb) * T-SQL: Supported nested `MERGE` statements [#2926](https://github.com/sqlfluff/sqlfluff/pull/2926) [@tunetheweb](https://github.com/tunetheweb) ## New Contributors * [@HeyZiko](https://github.com/HeyZiko) made their first contribution in [#2936](https://github.com/sqlfluff/sqlfluff/pull/2936) * [@aviv](https://github.com/aviv) made their first contribution in [#2952](https://github.com/sqlfluff/sqlfluff/pull/2952) * [@pguyot](https://github.com/pguyot) made their first contribution in [#2963](https://github.com/sqlfluff/sqlfluff/pull/2963) * [@dmohns](https://github.com/dmohns) made their first contribution in [#2971](https://github.com/sqlfluff/sqlfluff/pull/2971) * [@tcholewik](https://github.com/tcholewik) made their first contribution in [#2923](https://github.com/sqlfluff/sqlfluff/pull/2923) * [@jmc-bbk](https://github.com/jmc-bbk) made their first contribution in [#2981](https://github.com/sqlfluff/sqlfluff/pull/2981) * [@dnim](https://github.com/dnim) made their first contribution in [#3010](https://github.com/sqlfluff/sqlfluff/pull/3010) * [@kd2718](https://github.com/kd2718) made their first contribution in [#3014](https://github.com/sqlfluff/sqlfluff/pull/3014) * [@mrf](https://github.com/mrf) made their first contribution in [#3016](https://github.com/sqlfluff/sqlfluff/pull/3016) * [@zapion](https://github.com/zapion) made their first contribution in [#3031](https://github.com/sqlfluff/sqlfluff/pull/3031) ## [0.11.2] - 2022-03-25 ## What’s Changed * Added Support For Snowflake Inline Comments [#2919](https://github.com/sqlfluff/sqlfluff/pull/2919) [@WittierDinosaur](https://github.com/WittierDinosaur) * Spark3: Fix bug which did not allow multiple joins [#2917](https://github.com/sqlfluff/sqlfluff/pull/2917) [@tunetheweb](https://github.com/tunetheweb) * Added Snowflake Alter View Support [#2915](https://github.com/sqlfluff/sqlfluff/pull/2915) [@WittierDinosaur](https://github.com/WittierDinosaur) * Adjust L010 to ignore nulls and booleans covered by L040 [#2913](https://github.com/sqlfluff/sqlfluff/pull/2913) [@tunetheweb](https://github.com/tunetheweb) * Fix bug where L043 corrupts SQL [#2908](https://github.com/sqlfluff/sqlfluff/pull/2908) [@barrywhart](https://github.com/barrywhart) * Jinja: Add support for Block Assignments [#2907](https://github.com/sqlfluff/sqlfluff/pull/2907) [@barrywhart](https://github.com/barrywhart) * L042 fix fails with missing function `get_identifier()` on Postgres, Redshift dialects [#2899](https://github.com/sqlfluff/sqlfluff/pull/2899) [@barrywhart](https://github.com/barrywhart) * BigQuery: Better Set Operators support [#2901](https://github.com/sqlfluff/sqlfluff/pull/2901) [@tunetheweb](https://github.com/tunetheweb) * Hive: support for complex types in `cast` `rowtype` definition [#2896](https://github.com/sqlfluff/sqlfluff/pull/2896) [@KulykDmytro](https://github.com/KulykDmytro) * Hive: added `json` type support [#2894](https://github.com/sqlfluff/sqlfluff/pull/2894) [@KulykDmytro](https://github.com/KulykDmytro) * Hive: fix incorrect L027 error for rowtype attribute name [#2893](https://github.com/sqlfluff/sqlfluff/pull/2893) [@KulykDmytro](https://github.com/KulykDmytro) * Hive: Add `ARRAY` support [#2891](https://github.com/sqlfluff/sqlfluff/pull/2891) [@tunetheweb](https://github.com/tunetheweb) * Implemented `PIVOT`/`UNPIVOT` Redshift + Fixed Snowflake Bug + Standardized `PIVOT`/`UNPIVOT` Parsing [#2888](https://github.com/sqlfluff/sqlfluff/pull/2888) [@PLBMR](https://github.com/PLBMR) * Fix AssertionError in dbt templater when file ends with multiple newlines [#2887](https://github.com/sqlfluff/sqlfluff/pull/2887) [@barrywhart](https://github.com/barrywhart) * Hive: Row typecasting in `cast` function [#2889](https://github.com/sqlfluff/sqlfluff/pull/2889) [@KulykDmytro](https://github.com/KulykDmytro) * dbt templater should gracefully skip macro files [#2886](https://github.com/sqlfluff/sqlfluff/pull/2886) [@barrywhart](https://github.com/barrywhart) * Disable L031 on BigQuery due to complex backtick / project name behavior [#2882](https://github.com/sqlfluff/sqlfluff/pull/2882) [@barrywhart](https://github.com/barrywhart) * Documentation: Update dbt templater docs with more detail about pros and cons [#2885](https://github.com/sqlfluff/sqlfluff/pull/2885) [@barrywhart](https://github.com/barrywhart) * BigQuery: Better `STRUCT` Array Support [#2881](https://github.com/sqlfluff/sqlfluff/pull/2881) [@tunetheweb](https://github.com/tunetheweb) * L042: Detect violations when column is templated [#2879](https://github.com/sqlfluff/sqlfluff/pull/2879) [@barrywhart](https://github.com/barrywhart) * Improve parsing of `BETWEEN` statements [#2878](https://github.com/sqlfluff/sqlfluff/pull/2878) [@MarcAntoineSchmidtQC](https://github.com/MarcAntoineSchmidtQC) ## [0.11.1] - 2022-03-17 ## Highlights Major changes include: * A number of changes to `fix` code to make these more robust * Improvements to templating blocks * `generate_parse_fixture_yml` options to allow quicker, partial regeneration of YML files * Numerous rule fixes including adding auto fix to L042 * Numerous grammar changes ## What’s Changed * Spark3: Support for `SHOW` statements [#2864](https://github.com/sqlfluff/sqlfluff/pull/2864) [@R7L208](https://github.com/R7L208) * Add Markerr to list of organizations using SQLFluff in the wild [#2874](https://github.com/sqlfluff/sqlfluff/pull/2874) [@kdw2126](https://github.com/kdw2126) * Refactor JinjaTracer: Split into two classes, break up `_slice_template()` function [#2870](https://github.com/sqlfluff/sqlfluff/pull/2870) [@barrywhart](https://github.com/barrywhart) * BigQuery: support Parameterized Numeric Literals [#2872](https://github.com/sqlfluff/sqlfluff/pull/2872) [@tunetheweb](https://github.com/tunetheweb) * L042 autofix [#2860](https://github.com/sqlfluff/sqlfluff/pull/2860) [@OTooleMichael](https://github.com/OTooleMichael) * Redshift: transaction statement [#2852](https://github.com/sqlfluff/sqlfluff/pull/2852) [@rpr-ableton](https://github.com/rpr-ableton) * JinjaTracer fix for endif/endfor inside "set" or "macro" blocks [#2868](https://github.com/sqlfluff/sqlfluff/pull/2868) [@barrywhart](https://github.com/barrywhart) * L009: Handle adding newline after `{% endif %}` at end of file [#2862](https://github.com/sqlfluff/sqlfluff/pull/2862) [@barrywhart](https://github.com/barrywhart) * Redshift: Add support for `AT TIME ZONE` [#2863](https://github.com/sqlfluff/sqlfluff/pull/2863) [@tunetheweb](https://github.com/tunetheweb) * L032 bug fix and fix improvement [#2859](https://github.com/sqlfluff/sqlfluff/pull/2859) [@OTooleMichael](https://github.com/OTooleMichael) * Refactor JinjaTracer; store lex output as individual strings where possible [#2856](https://github.com/sqlfluff/sqlfluff/pull/2856) [@barrywhart](https://github.com/barrywhart) * Add ability to regenerate subsets of fixture YAMLs (by dialect, or new only) [#2850](https://github.com/sqlfluff/sqlfluff/pull/2850) [@OTooleMichael](https://github.com/OTooleMichael) * Fix bug with Jinja and dbt `{% set %}` blocks [#2849](https://github.com/sqlfluff/sqlfluff/pull/2849) [@barrywhart](https://github.com/barrywhart) * Bug fix: `ValueError: Position Not Found for lint/parse/fix` in JinjaTracer [#2846](https://github.com/sqlfluff/sqlfluff/pull/2846) [@barrywhart](https://github.com/barrywhart) * Reduce unnecessary setting run ci [#2847](https://github.com/sqlfluff/sqlfluff/pull/2847) [@zhongjiajie](https://github.com/zhongjiajie) * Spark3: statements to `SET` and `RESET` spark runtime configurations [#2839](https://github.com/sqlfluff/sqlfluff/pull/2839) [@R7L208](https://github.com/R7L208) * BigQuery - prevent L006 flagging hyphenated table references [#2842](https://github.com/sqlfluff/sqlfluff/pull/2842) [@tunetheweb](https://github.com/tunetheweb) * T-SQL fix `CONVERT` function definition [#2843](https://github.com/sqlfluff/sqlfluff/pull/2843) [@tunetheweb](https://github.com/tunetheweb) * Change rule test script from bash to python [#2840](https://github.com/sqlfluff/sqlfluff/pull/2840) [@OTooleMichael](https://github.com/OTooleMichael) * Spark3: Support `DESCRIBE` statement [#2837](https://github.com/sqlfluff/sqlfluff/pull/2837) [@R7L208](https://github.com/R7L208) * Spark3: Refactor `REFRESH` statements into one class [#2838](https://github.com/sqlfluff/sqlfluff/pull/2838) [@R7L208](https://github.com/R7L208) * Prevent rules incorrectly returning conflicting fixes to same position [#2830](https://github.com/sqlfluff/sqlfluff/pull/2830) [@barrywhart](https://github.com/barrywhart) * Redshift and BigQuery: Update dateparts values and functions [#2829](https://github.com/sqlfluff/sqlfluff/pull/2829) [@rpr-ableton](https://github.com/rpr-ableton) * MySQL add `NOW` support [#2825](https://github.com/sqlfluff/sqlfluff/pull/2825) [@tunetheweb](https://github.com/tunetheweb) * MySQL `DELETE FROM` support [#2823](https://github.com/sqlfluff/sqlfluff/pull/2823) [@tunetheweb](https://github.com/tunetheweb) * Rule L059 bug with `IF` [#2824](https://github.com/sqlfluff/sqlfluff/pull/2824) [@tunetheweb](https://github.com/tunetheweb) * Prevent exceptions when running `fix` on dialect fixtures [#2818](https://github.com/sqlfluff/sqlfluff/pull/2818) [@tunetheweb](https://github.com/tunetheweb) * Spark3: Support to handle `CACHE` AND `UNCACHE` auxiliary statements [#2814](https://github.com/sqlfluff/sqlfluff/pull/2814) [@R7L208](https://github.com/R7L208) * Fix L036 error on `CREATE VIEW AS SELECT` [#2816](https://github.com/sqlfluff/sqlfluff/pull/2816) [@tunetheweb](https://github.com/tunetheweb) * Fixes for the new post-fix parse check [#2813](https://github.com/sqlfluff/sqlfluff/pull/2813) [@barrywhart](https://github.com/barrywhart) * Add initial `MERGE` syntax to most dialects [#2807](https://github.com/sqlfluff/sqlfluff/pull/2807) [@PhilippLange](https://github.com/PhilippLange) * Automated tests should fail if a lint fix introduces a parse error [#2809](https://github.com/sqlfluff/sqlfluff/pull/2809) [@barrywhart](https://github.com/barrywhart) ## New Contributors * [@kdw2126](https://github.com/kdw2126) made their first contribution in [#2874](https://github.com/sqlfluff/sqlfluff/pull/2874) ## [0.11.0] - 2022-03-07 ## Highlights Major changes include: * Changes rule L030 to use `extended_capitalisation_policy` to support PascalCase **BREAKING CHANGE** * Fixes dbt error on ephemeral models * Log warnings for fixes that seem to corrupt the parse SQL as may cause incorrect fixes in other rules. * Bug fix to rule L011 for `implicit` aliases * Bug fix to rule L019 for commas besides templated code * Rule L051 can now optionally be applied to `LEFT`/`RIGHT`/`OUTER JOIN`s * Improvements to Test Suite * Many dialect improvements ## What’s Changed * Exasol: Fix `INTERVAL` literals / expression [#2804](https://github.com/sqlfluff/sqlfluff/pull/2804) [@sti0](https://github.com/sti0) * Exasol: Add `IDLE_TIMEOUT` and `SNAPSHOT_MODE` [#2805](https://github.com/sqlfluff/sqlfluff/pull/2805) [@sti0](https://github.com/sti0) * Exasol: Support value range clause within `INSERT` statements (7.1+) [#2802](https://github.com/sqlfluff/sqlfluff/pull/2802) [@sti0](https://github.com/sti0) * Exasol: Add lua adapter scripts (7.1+) [#2801](https://github.com/sqlfluff/sqlfluff/pull/2801) [@sti0](https://github.com/sti0) * Exasol: Add openid support for create/alter user (7.1+) [#2800](https://github.com/sqlfluff/sqlfluff/pull/2800) [@sti0](https://github.com/sti0) * Exasol: New consumer group params and unreserved keywords (7.1+) [#2799](https://github.com/sqlfluff/sqlfluff/pull/2799) [@sti0](https://github.com/sti0) * Snowflake: Complete `INSERT` grammar [#2798](https://github.com/sqlfluff/sqlfluff/pull/2798) [@jpy-git](https://github.com/jpy-git) * Fix Postgres `VALUES`, make Spark3 `VALUES` consistent [#2797](https://github.com/sqlfluff/sqlfluff/pull/2797) [@jpy-git](https://github.com/jpy-git) * Postgres: `INSERT DEFAULT` value [#2796](https://github.com/sqlfluff/sqlfluff/pull/2796) [@jpy-git](https://github.com/jpy-git) * Postgres: Make `AS` optional in Postgres `DELETE` [#2794](https://github.com/sqlfluff/sqlfluff/pull/2794) [@jpy-git](https://github.com/jpy-git) * BigQuery support `UNEST` aliases [#2793](https://github.com/sqlfluff/sqlfluff/pull/2793) [@tunetheweb](https://github.com/tunetheweb) * Postgres: Add all range operators [#2789](https://github.com/sqlfluff/sqlfluff/pull/2789) [@jpy-git](https://github.com/jpy-git) * Snowflake: Complete `DELETE FROM` grammar [#2792](https://github.com/sqlfluff/sqlfluff/pull/2792) [@jpy-git](https://github.com/jpy-git) * Postgres: Complete `DELETE FROM` grammar [#2791](https://github.com/sqlfluff/sqlfluff/pull/2791) [@jpy-git](https://github.com/jpy-git) * Postgres: Add `RETURNING` grammar to `INSERT INTO` statement [#2790](https://github.com/sqlfluff/sqlfluff/pull/2790) [@jpy-git](https://github.com/jpy-git) * Snowflake: Complete `PATTERN` grammar [#2788](https://github.com/sqlfluff/sqlfluff/pull/2788) [@jpy-git](https://github.com/jpy-git) * Redshift: add `[ALTER/CREATE/DROP] PROCEDURE` segments [#2774](https://github.com/sqlfluff/sqlfluff/pull/2774) [@rpr-ableton](https://github.com/rpr-ableton) * Spark3: Support for `ANALYZE TABLE` statement [#2780](https://github.com/sqlfluff/sqlfluff/pull/2780) [@R7L208](https://github.com/R7L208) * Snowflake: Add `MATCH_RECOGNIZE` clause [#2781](https://github.com/sqlfluff/sqlfluff/pull/2781) [@jpy-git](https://github.com/jpy-git) * Snowflake: Complete `LIMIT` grammar [#2784](https://github.com/sqlfluff/sqlfluff/pull/2784) [@jpy-git](https://github.com/jpy-git) * Rough autofix for L028 [#2757](https://github.com/sqlfluff/sqlfluff/pull/2757) [@OTooleMichael](https://github.com/OTooleMichael) * Spark3 bug: Create with complex data types (#2761) [#2782](https://github.com/sqlfluff/sqlfluff/pull/2782) [@PhilippLange](https://github.com/PhilippLange) * Snowflake: Complete `LIKE` grammar [#2779](https://github.com/sqlfluff/sqlfluff/pull/2779) [@jpy-git](https://github.com/jpy-git) * Spark3: Auxiliary`FILE` and `JAR` statements [#2778](https://github.com/sqlfluff/sqlfluff/pull/2778) [@R7L208](https://github.com/R7L208) * Snowflake: Refine `SET`/`UNSET` `MASKING POLICY` grammar [#2775](https://github.com/sqlfluff/sqlfluff/pull/2775) [@jpy-git](https://github.com/jpy-git) * L049 bug: correct over zealous `=` --> `IS` [#2760](https://github.com/sqlfluff/sqlfluff/pull/2760) [@OTooleMichael](https://github.com/OTooleMichael) * Make extension case insensitive [#2773](https://github.com/sqlfluff/sqlfluff/pull/2773) [@tunetheweb](https://github.com/tunetheweb) * Snowflake: Add dollar quoted string literal [#2770](https://github.com/sqlfluff/sqlfluff/pull/2770) [@jpy-git](https://github.com/jpy-git) * Bug fix: L036 corrupts `SELECT DISTINCT id` query [#2768](https://github.com/sqlfluff/sqlfluff/pull/2768) [@barrywhart](https://github.com/barrywhart) * Snowflake: Add `CHANGES` clause [#2764](https://github.com/sqlfluff/sqlfluff/pull/2764) [@jpy-git](https://github.com/jpy-git) * Spark3: Support for `EXPLAIN` statement [#2767](https://github.com/sqlfluff/sqlfluff/pull/2767) [@R7L208](https://github.com/R7L208) * Snowflake: Add `CONNECT BY` clause [#2763](https://github.com/sqlfluff/sqlfluff/pull/2763) [@jpy-git](https://github.com/jpy-git) * Spark3: Support for `TRANSFORM` clause [#2762](https://github.com/sqlfluff/sqlfluff/pull/2762) [@R7L208](https://github.com/R7L208) * Snowflake: Fix `GROUP BY {CUBE|ROLLUP|GROUPING SETS}` parsing [#2759](https://github.com/sqlfluff/sqlfluff/pull/2759) [@jpy-git](https://github.com/jpy-git) * BigQuery: allow identifiers starting with dash [#2756](https://github.com/sqlfluff/sqlfluff/pull/2756) [@tunetheweb](https://github.com/tunetheweb) * Add `ignore_words` options to L057 and L059 [#2753](https://github.com/sqlfluff/sqlfluff/pull/2753) [@tunetheweb](https://github.com/tunetheweb) * L012 bug fix for T-SQL alternative alias types [#2750](https://github.com/sqlfluff/sqlfluff/pull/2750) [@tunetheweb](https://github.com/tunetheweb) * Spark3: Support for `PIVOT` clause [#2752](https://github.com/sqlfluff/sqlfluff/pull/2752) [@R7L208](https://github.com/R7L208) * Update Redshift reserved keywords list [#2751](https://github.com/sqlfluff/sqlfluff/pull/2751) [@rpr-ableton](https://github.com/rpr-ableton) * L007 autofix [#2735](https://github.com/sqlfluff/sqlfluff/pull/2735) [@OTooleMichael](https://github.com/OTooleMichael) * L032 fixable in easy cases [#2737](https://github.com/sqlfluff/sqlfluff/pull/2737) [@OTooleMichael](https://github.com/OTooleMichael) * Fix dbt templater runtime error in `inject_ctes_into_sql()` [#2748](https://github.com/sqlfluff/sqlfluff/pull/2748) [@barrywhart](https://github.com/barrywhart) * L059: Exasol: Allow quotes around passwords in `CREATE USER` [#2744](https://github.com/sqlfluff/sqlfluff/pull/2744) [@sti0](https://github.com/sti0) * Improve docs for `load_macros_from_path` [#2743](https://github.com/sqlfluff/sqlfluff/pull/2743) [@barrywhart](https://github.com/barrywhart) * Make L045 (Query defines a CTE but does not use it) case insensitive [#2746](https://github.com/sqlfluff/sqlfluff/pull/2746) [@barrywhart](https://github.com/barrywhart) * Add L049 test for T-SQL alternate alias syntax (`=`) [#2745](https://github.com/sqlfluff/sqlfluff/pull/2745) [@barrywhart](https://github.com/barrywhart) * `BaseSegment.pos_marker` is typed as non optional but sometimes set to `None` [#2741](https://github.com/sqlfluff/sqlfluff/pull/2741) [@barrywhart](https://github.com/barrywhart) * Support Pascal case for L030 [#2739](https://github.com/sqlfluff/sqlfluff/pull/2739) [@tunetheweb](https://github.com/tunetheweb) * Postgres, Redshift: Support `SIMILAR TO` pattern matching expressions [#2732](https://github.com/sqlfluff/sqlfluff/pull/2732) [@PLBMR](https://github.com/PLBMR) * Forgive shorthand cast only / bracket only expressions from L013 [#2729](https://github.com/sqlfluff/sqlfluff/pull/2729) [@OTooleMichael](https://github.com/OTooleMichael) * L052: Refactor `_eval()` into individual functions to improve readability [#2733](https://github.com/sqlfluff/sqlfluff/pull/2733) [@barrywhart](https://github.com/barrywhart) * L018: Move closing parenthesis to next line [#2734](https://github.com/sqlfluff/sqlfluff/pull/2734) [@barrywhart](https://github.com/barrywhart) * Improve rule yaml tests: assert that `fix_str` passes the rule [#2624](https://github.com/sqlfluff/sqlfluff/pull/2624) [@juhoautio](https://github.com/juhoautio) * Extend rule L051 to `LEFT`/`RIGHT`/`OUTER` `JOIN`s [#2719](https://github.com/sqlfluff/sqlfluff/pull/2719) [@rpr-ableton](https://github.com/rpr-ableton) * T-SQL: Allow aliases with `=` [#2727](https://github.com/sqlfluff/sqlfluff/pull/2727) [@fdw](https://github.com/fdw) * T-SQL: Support table variables [#2728](https://github.com/sqlfluff/sqlfluff/pull/2728) [@fdw](https://github.com/fdw) * Support for checking violations in YAML rule tests [#2718](https://github.com/sqlfluff/sqlfluff/pull/2718) [@juhoautio](https://github.com/juhoautio) * Roll back PR #2610 [#2726](https://github.com/sqlfluff/sqlfluff/pull/2726) [@barrywhart](https://github.com/barrywhart) * Redshift: Allow whitespace around cast operators [#2721](https://github.com/sqlfluff/sqlfluff/pull/2721) [@PLBMR](https://github.com/PLBMR) * Support database links in Oracle [#2725](https://github.com/sqlfluff/sqlfluff/pull/2725) [@tunetheweb](https://github.com/tunetheweb) * Rule L019: Ignore comma placement violations if the adjacent code is templated [#2717](https://github.com/sqlfluff/sqlfluff/pull/2717) [@barrywhart](https://github.com/barrywhart) * T-SQL: Add drop constraint syntax [#2724](https://github.com/sqlfluff/sqlfluff/pull/2724) [@fdw](https://github.com/fdw) * ANSI: Support optionally bracketed CTE [#2716](https://github.com/sqlfluff/sqlfluff/pull/2716) [@OTooleMichael](https://github.com/OTooleMichael) * Spark3: Test cases for `CASE` clause [#2714](https://github.com/sqlfluff/sqlfluff/pull/2714) [@R7L208](https://github.com/R7L208) * Spark3: Support for `WINDOW` functions [#2711](https://github.com/sqlfluff/sqlfluff/pull/2711) [@R7L208](https://github.com/R7L208) * T-SQL: Add variables as options for `RAISERROR` parameters [#2709](https://github.com/sqlfluff/sqlfluff/pull/2709) [@jpers36](https://github.com/jpers36) * T-SQL: Add `OPTION` clause to `UPDATE` [#2707](https://github.com/sqlfluff/sqlfluff/pull/2707) [@jpers36](https://github.com/jpers36) * Spark3: Test cases for `WHERE` clause [#2704](https://github.com/sqlfluff/sqlfluff/pull/2704) [@R7L208](https://github.com/R7L208) * Spark3: test cases for Table-Valued Functions [#2703](https://github.com/sqlfluff/sqlfluff/pull/2703) [@R7L208](https://github.com/R7L208) * T-SQL: Allow for optionally bracketed `PARTITION BY` elements [#2702](https://github.com/sqlfluff/sqlfluff/pull/2702) [@jpers36](https://github.com/jpers36) * T-SQL: Fix `SET TRANSACTION ISOLATION LEVEL` parsing [#2701](https://github.com/sqlfluff/sqlfluff/pull/2701) [@jpers36](https://github.com/jpers36) * Migrate tricky L004 tests to python [#2681](https://github.com/sqlfluff/sqlfluff/pull/2681) [@juhoautio](https://github.com/juhoautio) * Core linter enhancement: Check for successful parse after applying fixes [#2657](https://github.com/sqlfluff/sqlfluff/pull/2657) [@barrywhart](https://github.com/barrywhart) * Spark3: Support for `LATERAL VIEW` clause [#2687](https://github.com/sqlfluff/sqlfluff/pull/2687) [@R7L208](https://github.com/R7L208) * Document python requirement for tox/mypy & remove basepython from conf [#2644](https://github.com/sqlfluff/sqlfluff/pull/2644) [@juhoautio](https://github.com/juhoautio) * Fix rule L011 for implicit aliases [#2683](https://github.com/sqlfluff/sqlfluff/pull/2683) [@tunetheweb](https://github.com/tunetheweb) * Pin markupsafe to prevent CI failures [#2685](https://github.com/sqlfluff/sqlfluff/pull/2685) [@tunetheweb](https://github.com/tunetheweb) * Exasol: Allow `CROSS` joins [#2680](https://github.com/sqlfluff/sqlfluff/pull/2680) [@sti0](https://github.com/sti0) * Exasol: Improve function formatting [#2678](https://github.com/sqlfluff/sqlfluff/pull/2678) [@sti0](https://github.com/sti0) * T-SQL: Add indentation for `CREATE` `INDEX`/`STATISTICS` [#2679](https://github.com/sqlfluff/sqlfluff/pull/2679) [@jpers36](https://github.com/jpers36) * Spark3: Support for `TABLESAMPLE` clause [#2674](https://github.com/sqlfluff/sqlfluff/pull/2674) [@R7L208](https://github.com/R7L208) * T-SQL: Improve `RAISERROR` functionality [#2672](https://github.com/sqlfluff/sqlfluff/pull/2672) [@jpers36](https://github.com/jpers36) * Snowflake dialect update for `MERGE INTO` predicates [#2670](https://github.com/sqlfluff/sqlfluff/pull/2670) [@The-Loud](https://github.com/The-Loud) * Assert that fix_str is set [#2663](https://github.com/sqlfluff/sqlfluff/pull/2663) [@juhoautio](https://github.com/juhoautio) ## New Contributors * [@The-Loud](https://github.com/The-Loud) made their first contribution in [#2670](https://github.com/sqlfluff/sqlfluff/pull/2670) * [@OTooleMichael](https://github.com/OTooleMichael) made their first contribution in [#2716](https://github.com/sqlfluff/sqlfluff/pull/2716) * [@PhilippLange](https://github.com/PhilippLange) made their first contribution in [#2782](https://github.com/sqlfluff/sqlfluff/pull/2782) ## [0.10.1] - 2022-02-15 ## Highlights Major changes include: * Improvements to rules L023, L045, L048, L052, L059 to make them more accurate. * If `sqlfluff fix` cannot find a stable fix after `runaway_limit` iterations (default 10) then no fixes will be applied. * Addition of `--write-output` config to command line so prevent errors corrupting output. * Various dialect improvements ## What’s Changed * Redshift: Support DATETIME as a valid datatype [#2665](https://github.com/sqlfluff/sqlfluff/pull/2665) [@PLBMR](https://github.com/PLBMR) * Support L033 for RedShift [#2661](https://github.com/sqlfluff/sqlfluff/pull/2661) [@tunetheweb](https://github.com/tunetheweb) * Fix parsing types and add check to test in future [#2652](https://github.com/sqlfluff/sqlfluff/pull/2652) [@tunetheweb](https://github.com/tunetheweb) * Spark3: Support for `SORT BY` Clause [#2651](https://github.com/sqlfluff/sqlfluff/pull/2651) [@R7L208](https://github.com/R7L208) * Migrate issue template from markdown to yaml [#2626](https://github.com/sqlfluff/sqlfluff/pull/2626) [@zhongjiajie](https://github.com/zhongjiajie) * L048 - handle more statements and exclude casting operators [#2642](https://github.com/sqlfluff/sqlfluff/pull/2642) [@tunetheweb](https://github.com/tunetheweb) * MySQL support `CURRENT_TIMESTAMP()` in `CREATE TABLE` [#2648](https://github.com/sqlfluff/sqlfluff/pull/2648) [@tunetheweb](https://github.com/tunetheweb) * Postgres enhanced `DELETE FROM` syntax [#2643](https://github.com/sqlfluff/sqlfluff/pull/2643) [@tunetheweb](https://github.com/tunetheweb) * Bug fix: L025 should consider BigQuery `QUALIFY` clause [#2647](https://github.com/sqlfluff/sqlfluff/pull/2647) [@barrywhart](https://github.com/barrywhart) * Bug fix: L025 overlooking `JOIN ON` clause if join expression in parentheses [#2645](https://github.com/sqlfluff/sqlfluff/pull/2645) [@barrywhart](https://github.com/barrywhart) * L045 not reporting unused CTEs if the query uses templating [#2641](https://github.com/sqlfluff/sqlfluff/pull/2641) [@barrywhart](https://github.com/barrywhart) * Fix IndexError in L001 [#2640](https://github.com/sqlfluff/sqlfluff/pull/2640) [@barrywhart](https://github.com/barrywhart) * L052: If require_final_semicolon is set, ensure semicolon after ALL statements [#2610](https://github.com/sqlfluff/sqlfluff/pull/2610) [@barrywhart](https://github.com/barrywhart) * L023 to also fix extra newlines in CTE [#2623](https://github.com/sqlfluff/sqlfluff/pull/2623) [@juhoautio](https://github.com/juhoautio) * Spark3: Enhancements for Set Operators [#2622](https://github.com/sqlfluff/sqlfluff/pull/2622) [@R7L208](https://github.com/R7L208) * Doc a better choice for default env [#2630](https://github.com/sqlfluff/sqlfluff/pull/2630) [@juhoautio](https://github.com/juhoautio) * Ensure ordering of fix compatible and config in rules docs [#2620](https://github.com/sqlfluff/sqlfluff/pull/2620) [@zhongjiajie](https://github.com/zhongjiajie) * Pin python version for tox -e mypy [#2629](https://github.com/sqlfluff/sqlfluff/pull/2629) [@juhoautio](https://github.com/juhoautio) * Hitting the linter loop limit should be treated as an error [#2628](https://github.com/sqlfluff/sqlfluff/pull/2628) [@barrywhart](https://github.com/barrywhart) * Allow file output directly from cli [#2625](https://github.com/sqlfluff/sqlfluff/pull/2625) [@alanmcruickshank](https://github.com/alanmcruickshank) * BigQuery `UNPIVOT` and `PIVOT` fixes [#2619](https://github.com/sqlfluff/sqlfluff/pull/2619) [@tunetheweb](https://github.com/tunetheweb) * L059 quoted identifiers bug [#2614](https://github.com/sqlfluff/sqlfluff/pull/2614) [@tunetheweb](https://github.com/tunetheweb) * Snowflake dialect: Adjust snowflake array access [#2621](https://github.com/sqlfluff/sqlfluff/pull/2621) [@alanmcruickshank](https://github.com/alanmcruickshank) * Spark3: Test Cases for `ORDER BY` in `SELECT` [#2618](https://github.com/sqlfluff/sqlfluff/pull/2618) [@R7L208](https://github.com/R7L208) * Fix typos in 0.10.0 changelog [#2605](https://github.com/sqlfluff/sqlfluff/pull/2605) [@tunetheweb](https://github.com/tunetheweb) * T-SQL: Indent `IF` clause expression segments [#2615](https://github.com/sqlfluff/sqlfluff/pull/2615) [@jpers36](https://github.com/jpers36) * Spark3: Enhancements for `LIMIT` Clause [#2612](https://github.com/sqlfluff/sqlfluff/pull/2612) [@R7L208](https://github.com/R7L208) * Allow Bare Functions in column constraints [#2607](https://github.com/sqlfluff/sqlfluff/pull/2607) [@tunetheweb](https://github.com/tunetheweb) * Add Oracle at and double at sign (execution symbol) [#2608](https://github.com/sqlfluff/sqlfluff/pull/2608) [@r0fls](https://github.com/r0fls) * Spark3: Enhancements to `LIKE` clause [#2604](https://github.com/sqlfluff/sqlfluff/pull/2604) [@R7L208](https://github.com/R7L208) ## [0.10.0] - 2022-02-10 ## Highlights Major changes include: * Dropping support of DBT < 0.20 **BREAKING CHANGE** * `sqlfluff fix` no will no longer fix SQL containing parsing or templating errors **BREAKING CHANGE** * New rule L062 to allow blocking of list of configurable words (e.g. syntax, or schemas, or tables you do not want people to use) * Lots and lots of docs improvements * Looser requirements for `click` python package ## What’s Changed * L046: Detect Jinja spacing issues where segment begins with literal content [#2603](https://github.com/sqlfluff/sqlfluff/pull/2603) [@barrywhart](https://github.com/barrywhart) * MySQL Add BINARY support [#2602](https://github.com/sqlfluff/sqlfluff/pull/2602) [@tunetheweb](https://github.com/tunetheweb) * Support indenting WINDOWS clauses and (optionally) CTEs [#2601](https://github.com/sqlfluff/sqlfluff/pull/2601) [@tunetheweb](https://github.com/tunetheweb) * Postgres: Support expressions in arrays [#2599](https://github.com/sqlfluff/sqlfluff/pull/2599) [@tunetheweb](https://github.com/tunetheweb) * BigQuery support Array of Structs [#2598](https://github.com/sqlfluff/sqlfluff/pull/2598) [@tunetheweb](https://github.com/tunetheweb) * Support wildcards in triggers [#2597](https://github.com/sqlfluff/sqlfluff/pull/2597) [@tunetheweb](https://github.com/tunetheweb) * Support CTEs in CREATE VIEW statements [#2596](https://github.com/sqlfluff/sqlfluff/pull/2596) [@tunetheweb](https://github.com/tunetheweb) * SQLite Support more CREATE TRIGGER options [#2594](https://github.com/sqlfluff/sqlfluff/pull/2594) [@tunetheweb](https://github.com/tunetheweb) * Snowflake: Support Column Comments in Alter Table statements [#2593](https://github.com/sqlfluff/sqlfluff/pull/2593) [@tunetheweb](https://github.com/tunetheweb) * Redshift: Add DATETIME as reserved keyword [#2591](https://github.com/sqlfluff/sqlfluff/pull/2591) [@tunetheweb](https://github.com/tunetheweb) * Support LIMIT and ORDER BY clauses in Values clauses [#2590](https://github.com/sqlfluff/sqlfluff/pull/2590) [@tunetheweb](https://github.com/tunetheweb) * L016: New option "ignore_comment_clause" to ignore column COMMENTs, etc. [#2589](https://github.com/sqlfluff/sqlfluff/pull/2589) [@barrywhart](https://github.com/barrywhart) * Bug fix: L016 ("Line is too long") should consider length of prior fixes [#2587](https://github.com/sqlfluff/sqlfluff/pull/2587) [@barrywhart](https://github.com/barrywhart) * Add mysql INSERT ON DUPLICATE KEY [#2494](https://github.com/sqlfluff/sqlfluff/pull/2494) [@rpr-ableton](https://github.com/rpr-ableton) * Snowflake ALTER TABLE: Add multiple columns [#2578](https://github.com/sqlfluff/sqlfluff/pull/2578) [@erevear](https://github.com/erevear) * MySQL: UNIQUE KEY in CREATE TABLE [#2525](https://github.com/sqlfluff/sqlfluff/pull/2525) [@jpy-git](https://github.com/jpy-git) * Spark3: JOIN clause enhancements [#2570](https://github.com/sqlfluff/sqlfluff/pull/2570) [@R7L208](https://github.com/R7L208) * Bug fix: L003 should fix indentation for templated code [#2580](https://github.com/sqlfluff/sqlfluff/pull/2580) [@barrywhart](https://github.com/barrywhart) * Exasol: Improve `COMMENT` and `WITH [NO] DATA` clause usage. [#2583](https://github.com/sqlfluff/sqlfluff/pull/2583) [@sti0](https://github.com/sti0) * Exasol: Allow multiple `LOCAL` keywords in `WHERE` clause [#2582](https://github.com/sqlfluff/sqlfluff/pull/2582) [@sti0](https://github.com/sti0) * Exasol: Allow `LOCAL` keyword within `PREFERRING` clause [#2579](https://github.com/sqlfluff/sqlfluff/pull/2579) [@sti0](https://github.com/sti0) * Add/Improve docs for config settings: "ignore", "ignore_templated_areas" [#2574](https://github.com/sqlfluff/sqlfluff/pull/2574) [@barrywhart](https://github.com/barrywhart) * Look for .sqlfluffignore in current directory [#2573](https://github.com/sqlfluff/sqlfluff/pull/2573) [@barrywhart](https://github.com/barrywhart) * Snowflake: L054 should ignore "WITHIN GROUP" clauses [#2571](https://github.com/sqlfluff/sqlfluff/pull/2571) [@barrywhart](https://github.com/barrywhart) * Redshift: Support Redshift SUPER Data Types [#2564](https://github.com/sqlfluff/sqlfluff/pull/2564) [@PLBMR](https://github.com/PLBMR) * Capitalization rules (L010, L014, L030, L040) should ignore templated code [#2566](https://github.com/sqlfluff/sqlfluff/pull/2566) [@barrywhart](https://github.com/barrywhart) * T-SQL: Add Frame clause unreserved keywords [#2562](https://github.com/sqlfluff/sqlfluff/pull/2562) [@jpers36](https://github.com/jpers36) * Simple API: Fix bug where omitted parameters still override .sqlfluff [#2563](https://github.com/sqlfluff/sqlfluff/pull/2563) [@barrywhart](https://github.com/barrywhart) * Spark3: Add Direct File Query [#2553](https://github.com/sqlfluff/sqlfluff/pull/2553) [@R7L208](https://github.com/R7L208) * Redshift dialect: replace AnyNumberOf with AnySetOf where it makes sense [#2561](https://github.com/sqlfluff/sqlfluff/pull/2561) [@rpr-ableton](https://github.com/rpr-ableton) * jinja and dbt templaters: More robust handling of whitespace control [#2559](https://github.com/sqlfluff/sqlfluff/pull/2559) [@barrywhart](https://github.com/barrywhart) * Improve how "sqlfluff fix" handles templating and parse errors [#2546](https://github.com/sqlfluff/sqlfluff/pull/2546) [@barrywhart](https://github.com/barrywhart) * Jinja and dbt templater: Fix "list index out of range" error [#2555](https://github.com/sqlfluff/sqlfluff/pull/2555) [@barrywhart](https://github.com/barrywhart) * Fix typo in sqlfluffignore docs [#2551](https://github.com/sqlfluff/sqlfluff/pull/2551) [@tunetheweb](https://github.com/tunetheweb) * Correct parsing for BigQuery `SELECT REPLACE` clauses. [#2550](https://github.com/sqlfluff/sqlfluff/pull/2550) [@elyobo](https://github.com/elyobo) * Rules documentation improvements [#2542](https://github.com/sqlfluff/sqlfluff/pull/2542) [@tunetheweb](https://github.com/tunetheweb) * Remove requirement for Click>=8 [#2547](https://github.com/sqlfluff/sqlfluff/pull/2547) [@tunetheweb](https://github.com/tunetheweb) * Allow L059 to be configured to always prefer quoted identifiers [#2537](https://github.com/sqlfluff/sqlfluff/pull/2537) [@niconoe-](https://github.com/niconoe-) * Adds new rule L062 to allow blocking of certain words [#2540](https://github.com/sqlfluff/sqlfluff/pull/2540) [@tunetheweb](https://github.com/tunetheweb) * Update to latest Black, drop support for dbt < 0.20 [#2536](https://github.com/sqlfluff/sqlfluff/pull/2536) [@barrywhart](https://github.com/barrywhart) * dbt templater: Fix bug where profile wasn't found if DBT_PROFILES_DIR contained uppercase letters [#2539](https://github.com/sqlfluff/sqlfluff/pull/2539) [@barrywhart](https://github.com/barrywhart) * Spark3: Added segments & grammar needed for hints [#2528](https://github.com/sqlfluff/sqlfluff/pull/2528) [@R7L208](https://github.com/R7L208) * Spark3: parse some VALUES clauses [#2245](https://github.com/sqlfluff/sqlfluff/pull/2245) [@mcannamela](https://github.com/mcannamela) * T-SQL: Allow multiple params in SET statements [#2535](https://github.com/sqlfluff/sqlfluff/pull/2535) [@tunetheweb](https://github.com/tunetheweb) * T-SQL: Add indentation for SET statement [#2531](https://github.com/sqlfluff/sqlfluff/pull/2531) [@jpers36](https://github.com/jpers36) * Add additional documentation on dbt-adapter in pre-commit [#2530](https://github.com/sqlfluff/sqlfluff/pull/2530) [@robertdefilippi](https://github.com/robertdefilippi) * T-SQL: Add indentation for UPDATE statement [#2532](https://github.com/sqlfluff/sqlfluff/pull/2532) [@jpers36](https://github.com/jpers36) * Fix Snowflake Unordered Select Clause [#2529](https://github.com/sqlfluff/sqlfluff/pull/2529) [@tunetheweb](https://github.com/tunetheweb) * Fix Quoted Literals for Postgres and Redshift affecting rule L039 [#2526](https://github.com/sqlfluff/sqlfluff/pull/2526) [@tunetheweb](https://github.com/tunetheweb) * Postgres specific CTEDefinitionSegment [#2524](https://github.com/sqlfluff/sqlfluff/pull/2524) [@jpy-git](https://github.com/jpy-git) ## New Contributors * [@robertdefilippi](https://github.com/robertdefilippi) made their first contribution in [#2530](https://github.com/sqlfluff/sqlfluff/pull/2530) * [@niconoe-](https://github.com/niconoe-) made their first contribution in [#2537](https://github.com/sqlfluff/sqlfluff/pull/2537) * [@elyobo](https://github.com/elyobo) made their first contribution in [#2550](https://github.com/sqlfluff/sqlfluff/pull/2550) * [@erevear](https://github.com/erevear) made their first contribution in [#2578](https://github.com/sqlfluff/sqlfluff/pull/2578) ## [0.9.4] - 2022-01-30 ## Highlights Major changes include: * dbt performance improvements * Fix `click` dependency error. * Better datepart versus identifier parsing. * Fix some Jinja errors. * Various grammar fixes and improvements ## What’s Changed * Spark3: test cases for HAVING clause in SELECT statement [#2518](https://github.com/sqlfluff/sqlfluff/pull/2517) [@R7L208](https://github.com/R7L208) * Update click version requirement in setup.cfg to match that in requirements.txt [#2518](https://github.com/sqlfluff/sqlfluff/pull/2518) [@barrywhart](https://github.com/barrywhart) * Postgres: Implement DO Statements + Refactored Language Clause [#2511](https://github.com/sqlfluff/sqlfluff/pull/2511) [@PLBMR](https://github.com/PLBMR) * Spark3: Support for Grouping Sets, `CUBE` and `ROLLUP` in `GROUP BY` clause of `SELECT` statement [#2505](https://github.com/sqlfluff/sqlfluff/pull/2505) [@R7L208](https://github.com/R7L208) * Refactor date part functions [#2510](https://github.com/sqlfluff/sqlfluff/pull/2510) [@tunetheweb](https://github.com/tunetheweb) * Postgres: EXPLAIN ANALYSE allows British spelling [#2507](https://github.com/sqlfluff/sqlfluff/pull/2507) [@jpy-git](https://github.com/jpy-git) * "noqa": Add support for ignoring template (TMP) and parse (PRS) errors [#2509](https://github.com/sqlfluff/sqlfluff/pull/2509) [@barrywhart](https://github.com/barrywhart) * Freeze Black due to incompatibility between 22.1 and flake8-black [#2513](https://github.com/sqlfluff/sqlfluff/pull/2513) [@tunetheweb](https://github.com/tunetheweb) * Support NATURAL JOINS [#2506](https://github.com/sqlfluff/sqlfluff/pull/2506) [@tunetheweb](https://github.com/tunetheweb) * dbt Docker environment: Mount the test profiles.yml at ~/.dbt [#2502](https://github.com/sqlfluff/sqlfluff/pull/2502) [@barrywhart](https://github.com/barrywhart) * Add dbt_artifacts package to in the wild docs [#2504](https://github.com/sqlfluff/sqlfluff/pull/2504) [@NiallRees](https://github.com/NiallRees) * Spark3: Support `DISTRIBUTE BY` clause in `SELECT` statement [#2503](https://github.com/sqlfluff/sqlfluff/pull/2503) [@R7L208](https://github.com/R7L208) * dbt templater: For performance reasons, cache the database connection across models [#2498](https://github.com/sqlfluff/sqlfluff/pull/2498) [@barrywhart](https://github.com/barrywhart) * Bug fix: Defining and using Jinja macro in the same file causes runtime error [#2499](https://github.com/sqlfluff/sqlfluff/pull/2499) [@barrywhart](https://github.com/barrywhart) * Spark3: Support `CLUSTER BY` clause in `SELECT` statement [#2491](https://github.com/sqlfluff/sqlfluff/pull/2491) [@R7L208](https://github.com/R7L208) * Grammar: Adds support for COPY statement for Postgres dialect [#2481](https://github.com/sqlfluff/sqlfluff/pull/2481) [@derickl](https://github.com/derickl) * Add raiserror for T-SQL [#2490](https://github.com/sqlfluff/sqlfluff/pull/2490) [@fdw](https://github.com/fdw) * Enforce parentheses for function definitions in T-SQL [#2489](https://github.com/sqlfluff/sqlfluff/pull/2489) [@fdw](https://github.com/fdw) * Add guards to prevent rule crashes [#2488](https://github.com/sqlfluff/sqlfluff/pull/2488) [@barrywhart](https://github.com/barrywhart) ## New Contributors * [@PLBMR](https://github.com/PLBMR) made their first contribution in [#2511](https://github.com/sqlfluff/sqlfluff/pull/2511) ## [0.9.3] - 2022-01-26 ## Highlights Major changes include: * Add `ignore_words` option for rules L010, L014, L029, L030, L040 * Fix some issues in 0.9.2 preventing some queries linting ## What’s Changed * Prevent L031 throwing exception on unparsable code [#2486](https://github.com/sqlfluff/sqlfluff/pull/2486) [@tunetheweb](https://github.com/tunetheweb) * Add linting of fixtures SQL for critical rules errors to tox [#2473](https://github.com/sqlfluff/sqlfluff/pull/2473) [@tunetheweb](https://github.com/tunetheweb) * Fix L039 for T-SQL comparison operator using space [#2485](https://github.com/sqlfluff/sqlfluff/pull/2485) [@tunetheweb](https://github.com/tunetheweb) * Fix bug in get_alias causing rule Critical errors for T-SQL [#2479](https://github.com/sqlfluff/sqlfluff/pull/2479) [@tunetheweb](https://github.com/tunetheweb) * Tweak GitHub templates [#2471](https://github.com/sqlfluff/sqlfluff/pull/2471) [@tunetheweb](https://github.com/tunetheweb) * Small speed improvement to L054 [#2476](https://github.com/sqlfluff/sqlfluff/pull/2476) [@tunetheweb](https://github.com/tunetheweb) * L003: Revisit recent change to improve speed [#2474](https://github.com/sqlfluff/sqlfluff/pull/2474) [@barrywhart](https://github.com/barrywhart) * Fix select_crawler issue with some Exasol statements [#2470](https://github.com/sqlfluff/sqlfluff/pull/2470) [@tunetheweb](https://github.com/tunetheweb) * Cleanup date logic by removing DatePartClause and using DatetimeUnitSegment instead [#2464](https://github.com/sqlfluff/sqlfluff/pull/2464) [@tunetheweb](https://github.com/tunetheweb) * Fix L044 exception when final statement has no SELECT [#2468](https://github.com/sqlfluff/sqlfluff/pull/2468) [@tunetheweb](https://github.com/tunetheweb) * Support T-SQL system variables (e.g. @@rowcount) [#2463](https://github.com/sqlfluff/sqlfluff/pull/2463) [@tunetheweb](https://github.com/tunetheweb) * Add base rule to developing rules page [#2462](https://github.com/sqlfluff/sqlfluff/pull/2462) [@tunetheweb](https://github.com/tunetheweb) * L003: Ignore indentation of lines that only exist in templated space [#2460](https://github.com/sqlfluff/sqlfluff/pull/2460) [@barrywhart](https://github.com/barrywhart) * Ignore words for various rules [#2459](https://github.com/sqlfluff/sqlfluff/pull/2459) [@tunetheweb](https://github.com/tunetheweb) * Support Foreign Key options for MySQL [#2461](https://github.com/sqlfluff/sqlfluff/pull/2461) [@tunetheweb](https://github.com/tunetheweb) * Exclude WINDOW clauses from L054 [#2455](https://github.com/sqlfluff/sqlfluff/pull/2455) [@tunetheweb](https://github.com/tunetheweb) * Fix bug with L026 for simple deletes [#2458](https://github.com/sqlfluff/sqlfluff/pull/2458) [@tunetheweb](https://github.com/tunetheweb) * Spark3: test cases for Common Table Expressions [#2454](https://github.com/sqlfluff/sqlfluff/pull/2454) [@R7L208](https://github.com/R7L208) * Fix T-SQL's IDENTITY_INSERT syntax [#2452](https://github.com/sqlfluff/sqlfluff/pull/2452) [@fdw](https://github.com/fdw) * T-SQL: Support stored procedures in insert statements [#2451](https://github.com/sqlfluff/sqlfluff/pull/2451) [@fdw](https://github.com/fdw) * Spark3: Support for `LOAD DATA` statements [#2450](https://github.com/sqlfluff/sqlfluff/pull/2450) [@R7L208](https://github.com/R7L208) ## [0.9.2] - 2022-01-24 ## Highlights We are pleased to include 110 improvements and fixes in this release, and welcome 7 new contributors to the code. Major changes include: * Initial Oracle support (note: SQL, but not PL/SQL) * Fix more dbt 1.0.0 connection issues * Improved configuration documentation * New rule (L059) to flag unnecessary quoted identifiers * New rule (L060) to prefer `COALESCE` instead of `IFNULL` or `NVL` * New rule (L061) to prefer `!=` over `<>` * Many rule fixes * Many dialect improvements ## What’s Changed * Add Postgres DROP PROCEDURE support [#2446](https://github.com/sqlfluff/sqlfluff/pull/2446) [@rpr-ableton](https://github.com/rpr-ableton) * MySQL Alter table ADD/DROP/RENAME INDEX support [#2443](https://github.com/sqlfluff/sqlfluff/pull/2443) [@tunetheweb](https://github.com/tunetheweb) * Add basic CREATE PROCEDURE support to Postgres [#2441](https://github.com/sqlfluff/sqlfluff/pull/2441) [@tunetheweb](https://github.com/tunetheweb) * Indent T-SQL DECLARE and EXEC statements [#2439](https://github.com/sqlfluff/sqlfluff/pull/2439) [@tunetheweb](https://github.com/tunetheweb) * Hive alternative types: INTEGER, DEC, NUMERIC [#2438](https://github.com/sqlfluff/sqlfluff/pull/2438) [@tunetheweb](https://github.com/tunetheweb) * Implement Snowflake Dateparts [#2437](https://github.com/sqlfluff/sqlfluff/pull/2437) [@tunetheweb](https://github.com/tunetheweb) * Fix rule L028 for T-SQL for params [#2442](https://github.com/sqlfluff/sqlfluff/pull/2442) [@tunetheweb](https://github.com/tunetheweb) * Support CREATE UNIQUE INDEX [#2440](https://github.com/sqlfluff/sqlfluff/pull/2440) [@tunetheweb](https://github.com/tunetheweb) * Make BigQuery typeless STRUCTs Expressions [#2435](https://github.com/sqlfluff/sqlfluff/pull/2435) [@tunetheweb](https://github.com/tunetheweb) * T-SQL support default params and no RETURN value [#2434](https://github.com/sqlfluff/sqlfluff/pull/2434) [@tunetheweb](https://github.com/tunetheweb) * "sqlfluff fix" should report any parse errors found [#2423](https://github.com/sqlfluff/sqlfluff/pull/2423) [@barrywhart](https://github.com/barrywhart) * Redshift VACUUM support [#2433](https://github.com/sqlfluff/sqlfluff/pull/2433) [@rpr-ableton](https://github.com/rpr-ableton) * Add Oracle PROMPT statement [#2413](https://github.com/sqlfluff/sqlfluff/pull/2413) [@r0fls](https://github.com/r0fls) * Spark3: Support for `INSERT OVERWRITE DIRECTORY` with Hive Format [#2389](https://github.com/sqlfluff/sqlfluff/pull/2389) [@R7L208](https://github.com/R7L208) * Exasol: Fix escaped identifiers [#2431](https://github.com/sqlfluff/sqlfluff/pull/2431) [@sti0](https://github.com/sti0) * Exasol: Fix `LOCAL.ALIAS` Syntax [#2430](https://github.com/sqlfluff/sqlfluff/pull/2430) [@sti0](https://github.com/sti0) * Exasol: Allow quoted identifier for various statements. [#2428](https://github.com/sqlfluff/sqlfluff/pull/2428) [@sti0](https://github.com/sti0) * Misc grammar improvements for Snowflake [#2421](https://github.com/sqlfluff/sqlfluff/pull/2421) [@chwiese](https://github.com/chwiese) * New rule L061 to use != over <> [#2409](https://github.com/sqlfluff/sqlfluff/pull/2409) [@sti0](https://github.com/sti0) * Correct TRANS to TRAN [#2425](https://github.com/sqlfluff/sqlfluff/pull/2425) [@fdw](https://github.com/fdw) * Remove the "heuristic" slicer, as it was replaced by JinjaTracer [#2422](https://github.com/sqlfluff/sqlfluff/pull/2422) [@barrywhart](https://github.com/barrywhart) * L060: More specific description [#2419](https://github.com/sqlfluff/sqlfluff/pull/2419) [@jpy-git](https://github.com/jpy-git) * Fix code formatting in Rule docs [#2418](https://github.com/sqlfluff/sqlfluff/pull/2418) [@tunetheweb](https://github.com/tunetheweb) * Allow UPDATE SET statements in RedShift [#2417](https://github.com/sqlfluff/sqlfluff/pull/2417) [@tunetheweb](https://github.com/tunetheweb) * Add Redshift cursor DECLARE, FETCH & CLOSE support [#2414](https://github.com/sqlfluff/sqlfluff/pull/2414) [@rpr-ableton](https://github.com/rpr-ableton) * Add Redshift ANALYZE COMPRESSION support [#2412](https://github.com/sqlfluff/sqlfluff/pull/2412) [@rpr-ableton](https://github.com/rpr-ableton) * ANSI Values statement fixes [#2404](https://github.com/sqlfluff/sqlfluff/pull/2404) [@jpy-git](https://github.com/jpy-git) * Exasol: Overhaul drop statements [#2407](https://github.com/sqlfluff/sqlfluff/pull/2407) [@sti0](https://github.com/sti0) * L044, L045: Handle Exasol VALUES clause [#2400](https://github.com/sqlfluff/sqlfluff/pull/2400) [@barrywhart](https://github.com/barrywhart) * L060: Use COALESCE instead of IFNULL or NVL. [#2405](https://github.com/sqlfluff/sqlfluff/pull/2405) [@jpy-git](https://github.com/jpy-git) * Postgres: Fix Values alias regression [#2401](https://github.com/sqlfluff/sqlfluff/pull/2401) [@jpy-git](https://github.com/jpy-git) * Align line length in Python code to 88 characters [#2264](https://github.com/sqlfluff/sqlfluff/pull/2264) [@chwiese](https://github.com/chwiese) * Jinja templater: Allow "load_macros_from_path" to be a comma-separated list of paths [#2387](https://github.com/sqlfluff/sqlfluff/pull/2387) [@barrywhart](https://github.com/barrywhart) * Add "TRANS" keyword for T-SQL [#2399](https://github.com/sqlfluff/sqlfluff/pull/2399) [@fdw](https://github.com/fdw) * Docstrings: Replace double backticks with single quote for lint results. [#2386](https://github.com/sqlfluff/sqlfluff/pull/2386) [@jpy-git](https://github.com/jpy-git) * Spark3: Support for `INSERT OVERWRITE DIRECTORY` statements [#2385](https://github.com/sqlfluff/sqlfluff/pull/2385) [@R7L208](https://github.com/R7L208) * Fix unnecessary white underline in doc site [#2383](https://github.com/sqlfluff/sqlfluff/pull/2383) [@tunetheweb](https://github.com/tunetheweb) * Rolls back some code cleanup that caused coverage report to show gaps [#2384](https://github.com/sqlfluff/sqlfluff/pull/2384) [@barrywhart](https://github.com/barrywhart) * Fix "connection already closed" issue with dbt 1.0 and dbt_utils [#2382](https://github.com/sqlfluff/sqlfluff/pull/2382) [@barrywhart](https://github.com/barrywhart) * Spark3: Support for `INSERT [TABLE]` data manipulation statements [#2290](https://github.com/sqlfluff/sqlfluff/pull/2290) [@R7L208](https://github.com/R7L208) * Comment out line in bug report template [#2378](https://github.com/sqlfluff/sqlfluff/pull/2378) [@jpy-git](https://github.com/jpy-git) * Postgres: EXPLAIN statement updates [#2374](https://github.com/sqlfluff/sqlfluff/pull/2374) [@jpy-git](https://github.com/jpy-git) * Make TABLE a non-reserved word in Postgres [#2377](https://github.com/sqlfluff/sqlfluff/pull/2377) [@tunetheweb](https://github.com/tunetheweb) * Snowflake COLUMN is not a reserved word [#2376](https://github.com/sqlfluff/sqlfluff/pull/2376) [@tunetheweb](https://github.com/tunetheweb) * T-SQL: Complete ASA Table Index Clause functionality [#2373](https://github.com/sqlfluff/sqlfluff/pull/2373) [@jpers36](https://github.com/jpers36) * Add support for Jinja import and include [#2355](https://github.com/sqlfluff/sqlfluff/pull/2355) [@barrywhart](https://github.com/barrywhart) * Add Redshift INTERVAL datatype support [#2366](https://github.com/sqlfluff/sqlfluff/pull/2366) [@rpr-ableton](https://github.com/rpr-ableton) * Whitespace concatenated string literals for MySQL, Postgres and Redshift [#2356](https://github.com/sqlfluff/sqlfluff/pull/2356) [@jpy-git](https://github.com/jpy-git) * Fix L026 false positive on "SELECT INTO" statement [#2371](https://github.com/sqlfluff/sqlfluff/pull/2371) [@barrywhart](https://github.com/barrywhart) * Exclude EMIT clauses from rule L013 [#2364](https://github.com/sqlfluff/sqlfluff/pull/2364) [@tunetheweb](https://github.com/tunetheweb) * Functional API: Segments.recursive_crawl [#2369](https://github.com/sqlfluff/sqlfluff/pull/2369) [@jpy-git](https://github.com/jpy-git) * Complete Redshift CREATE EXTERNAL TABLE support [#2354](https://github.com/sqlfluff/sqlfluff/pull/2354) [@rpr-ableton](https://github.com/rpr-ableton) * L041: Fix duplicate DISTINCT corruption [#2365](https://github.com/sqlfluff/sqlfluff/pull/2365) [@jpy-git](https://github.com/jpy-git) * Bigquery Create View with Options [#2359](https://github.com/sqlfluff/sqlfluff/pull/2359) [@tunetheweb](https://github.com/tunetheweb) * L026: Handle DML statements and multiple levels of nesting [#2336](https://github.com/sqlfluff/sqlfluff/pull/2336) [@barrywhart](https://github.com/barrywhart) * Postgres & MySQL: cleanup AliasExpressionSegment [#2353](https://github.com/sqlfluff/sqlfluff/pull/2353) [@jpy-git](https://github.com/jpy-git) * Redefine MySQL Interval segment [#2351](https://github.com/sqlfluff/sqlfluff/pull/2351) [@rpr-ableton](https://github.com/rpr-ableton) * Postgres: INSERT INTO table alias [#2349](https://github.com/sqlfluff/sqlfluff/pull/2349) [@jpy-git](https://github.com/jpy-git) * L043: Remove redundant CASE statement replacing NULLS with NULLS [#2346](https://github.com/sqlfluff/sqlfluff/pull/2346) [@jpy-git](https://github.com/jpy-git) * Add RedShift DATASHARE support [#2350](https://github.com/sqlfluff/sqlfluff/pull/2350) [@rpr-ableton](https://github.com/rpr-ableton) * Various documentation updates [#2347](https://github.com/sqlfluff/sqlfluff/pull/2347) [@tunetheweb](https://github.com/tunetheweb) * Snowflake ALTER TABLE: Drop multiple columns [#2348](https://github.com/sqlfluff/sqlfluff/pull/2348) [@jpy-git](https://github.com/jpy-git) * Configuration doc: add rule configuration section [#2291](https://github.com/sqlfluff/sqlfluff/pull/2291) [@juhoautio](https://github.com/juhoautio) * Redshift: create model, show model & data types [#2338](https://github.com/sqlfluff/sqlfluff/pull/2338) [@rpr-ableton](https://github.com/rpr-ableton) * L059: Unnecessary quoted identifier [#2341](https://github.com/sqlfluff/sqlfluff/pull/2341) [@jpy-git](https://github.com/jpy-git) * L043: Use simple replace to apply fixes [#2343](https://github.com/sqlfluff/sqlfluff/pull/2343) [@jpy-git](https://github.com/jpy-git) * T-SQL: Add functionality to PARTITION BY clause [#2335](https://github.com/sqlfluff/sqlfluff/pull/2335) [@jpers36](https://github.com/jpers36) * L039 casting operator postgres fix [#2334](https://github.com/sqlfluff/sqlfluff/pull/2334) [@jpy-git](https://github.com/jpy-git) * `AnySetOf` grammar [#2326](https://github.com/sqlfluff/sqlfluff/pull/2326) [@jpy-git](https://github.com/jpy-git) * Redshift: update CREATE TABLE AS match_grammar [#2333](https://github.com/sqlfluff/sqlfluff/pull/2333) [@rpr-ableton](https://github.com/rpr-ableton) * Redshift CREATE EXTERNAL TABLE: TABLE PROPERTIES [#2330](https://github.com/sqlfluff/sqlfluff/pull/2330) [@jpy-git](https://github.com/jpy-git) * Snowflake: Flush out `ALTER TABLE`'s `tableColumnAction` grammar [#2332](https://github.com/sqlfluff/sqlfluff/pull/2332) [@wong-codaio](https://github.com/wong-codaio) * Snowflake ALTER TABLE: Add clusteringAction [#2329](https://github.com/sqlfluff/sqlfluff/pull/2329) [@jpy-git](https://github.com/jpy-git) * Snowflake ALTER TABLE: Add searchOptimizationAction [#2328](https://github.com/sqlfluff/sqlfluff/pull/2328) [@jpy-git](https://github.com/jpy-git) * Fix numeric literal grammar for Postgres/MySQL/Exasol [#2324](https://github.com/sqlfluff/sqlfluff/pull/2324) [@jpy-git](https://github.com/jpy-git) * L039: Remove spaces between comparison operators (T-SQL) [#2325](https://github.com/sqlfluff/sqlfluff/pull/2325) [@jpy-git](https://github.com/jpy-git) * Enable setting a target of a dbt profile [#2236](https://github.com/sqlfluff/sqlfluff/pull/2236) [@yu-iskw](https://github.com/yu-iskw) * Snowflake: Add support for column rename [#2327](https://github.com/sqlfluff/sqlfluff/pull/2327) [@wong-codaio](https://github.com/wong-codaio) * Snowflake: Added `AlterTableStatement` specific for Snowflake [#2267](https://github.com/sqlfluff/sqlfluff/pull/2267) [@wong-codaio](https://github.com/wong-codaio) * Full REFERENCES grammar for CREATE TABLE statement [#2315](https://github.com/sqlfluff/sqlfluff/pull/2315) [@jpy-git](https://github.com/jpy-git) * Fix Spark numeric literals [#2317](https://github.com/sqlfluff/sqlfluff/pull/2317) [@jpy-git](https://github.com/jpy-git) * Change type of Snowflake stage paths to fix issues with L044 [#2320](https://github.com/sqlfluff/sqlfluff/pull/2320) [@chwiese](https://github.com/chwiese) * Add Bytes Quoted Literals to Spark dialect [#2312](https://github.com/sqlfluff/sqlfluff/pull/2312) [@jpy-git](https://github.com/jpy-git) * Fix L044 assertion failure with delete stmt & cte [#2321](https://github.com/sqlfluff/sqlfluff/pull/2321) [@barrywhart](https://github.com/barrywhart) * L003 should consider only *literal* leading whitespace (ignore templated) [#2304](https://github.com/sqlfluff/sqlfluff/pull/2304) [@barrywhart](https://github.com/barrywhart) * Redshift: update reserved keywords [#2318](https://github.com/sqlfluff/sqlfluff/pull/2318) [@rpr-ableton](https://github.com/rpr-ableton) * docs: Document how to run SQLFluff with local changes to test them [#2316](https://github.com/sqlfluff/sqlfluff/pull/2316) [@kayman-mk](https://github.com/kayman-mk) * Update redshift unreserved keywords [#2310](https://github.com/sqlfluff/sqlfluff/pull/2310) [@jpy-git](https://github.com/jpy-git) * Fix spark and hive quoted literals [#2311](https://github.com/sqlfluff/sqlfluff/pull/2311) [@jpy-git](https://github.com/jpy-git) * Oracle Dialect [#2293](https://github.com/sqlfluff/sqlfluff/pull/2293) [@r0fls](https://github.com/r0fls) * Redshift dialect: add COPY and UNLOAD statements [#2307](https://github.com/sqlfluff/sqlfluff/pull/2307) [@rpr-ableton](https://github.com/rpr-ableton) * L052: Fix case where no preceding segments and mulitline [#2279](https://github.com/sqlfluff/sqlfluff/pull/2279) [@jpy-git](https://github.com/jpy-git) * Update rule L049 to handle EXEC assignments [#2308](https://github.com/sqlfluff/sqlfluff/pull/2308) [@tunetheweb](https://github.com/tunetheweb) * Remove DATE, DATETIME and TIME from BigQuery DatePart [#2283](https://github.com/sqlfluff/sqlfluff/pull/2283) [@tunetheweb](https://github.com/tunetheweb) * Fix #1292: nocolor and verbose can work in config files [#2300](https://github.com/sqlfluff/sqlfluff/pull/2300) [@cympfh](https://github.com/cympfh) * Allow pyproject.toml as extra_config_path [#2305](https://github.com/sqlfluff/sqlfluff/pull/2305) [@jpy-git](https://github.com/jpy-git) * L009: Handle adding newline after trailing templated code [#2298](https://github.com/sqlfluff/sqlfluff/pull/2298) [@barrywhart](https://github.com/barrywhart) * added missing "t" in doc for Rule_L020 [#2294](https://github.com/sqlfluff/sqlfluff/pull/2294) [@Xilorole](https://github.com/Xilorole) * docs: Document configuration keyword for rule L054 [#2288](https://github.com/sqlfluff/sqlfluff/pull/2288) [@tomasfarias](https://github.com/tomasfarias) * Update L009 to operate in raw, not templated space [#2285](https://github.com/sqlfluff/sqlfluff/pull/2285) [@barrywhart](https://github.com/barrywhart) * Redshift CREATE LIBRARY statements [#2277](https://github.com/sqlfluff/sqlfluff/pull/2277) [@rpr-ableton](https://github.com/rpr-ableton) * L025 with 'bigquery' dialect: Correctly interpret calling functions with a table as a parameter [#2278](https://github.com/sqlfluff/sqlfluff/pull/2278) [@barrywhart](https://github.com/barrywhart) * Spark3: Coverage for `REFRESH` auxiliary statements [#2282](https://github.com/sqlfluff/sqlfluff/pull/2282) [@R7L208](https://github.com/R7L208) * Spark3: Coverage for `USE DATABASE` statement. [#2276](https://github.com/sqlfluff/sqlfluff/pull/2276) [@R7L208](https://github.com/R7L208) * Fix link for editing 'In The Wild' page with new base branch, `main` [#2280](https://github.com/sqlfluff/sqlfluff/pull/2280) [@barnett](https://github.com/barnett) * Optionally allow additional configurable characters in L057 [#2274](https://github.com/sqlfluff/sqlfluff/pull/2274) [@tunetheweb](https://github.com/tunetheweb) * L025 should look at subqueries [#2273](https://github.com/sqlfluff/sqlfluff/pull/2273) [@barrywhart](https://github.com/barrywhart) * Add coverage for `TRUNCATE` statement in Spark3 dialect [#2272](https://github.com/sqlfluff/sqlfluff/pull/2272) [@R7L208](https://github.com/R7L208) * Upgrade `click` version to 8.0+ to support `click.shell_completion` [#2271](https://github.com/sqlfluff/sqlfluff/pull/2271) [@wong-codaio](https://github.com/wong-codaio) * Improve release checklist to make releases easier [#2263](https://github.com/sqlfluff/sqlfluff/pull/2263) [@tunetheweb](https://github.com/tunetheweb) ## New Contributors * [@barnett](https://github.com/barnett) made their first contribution in [#2280](https://github.com/sqlfluff/sqlfluff/pull/2280) * [@tomasfarias](https://github.com/tomasfarias) made their first contribution in [#2288](https://github.com/sqlfluff/sqlfluff/pull/2288) * [@Xilorole](https://github.com/Xilorole) made their first contribution in [#2294](https://github.com/sqlfluff/sqlfluff/pull/2294) * [@cympfh](https://github.com/cympfh) made their first contribution in [#2300](https://github.com/sqlfluff/sqlfluff/pull/2300) * [@r0fls](https://github.com/r0fls) made their first contribution in [#2293](https://github.com/sqlfluff/sqlfluff/pull/2293) * [@yu-iskw](https://github.com/yu-iskw) made their first contribution in [#2236](https://github.com/sqlfluff/sqlfluff/pull/2236) * [@fdw](https://github.com/fdw) made their first contribution in [#2399](https://github.com/sqlfluff/sqlfluff/pull/2399) ## [0.9.1] - 2022-01-08 ## Highlights * Fix dbt 1.0.0 connection issue * Fix some SQL corruption issues with templated code * New components to simplify creating rules * Remove support for Python 3.6 ## What’s Changed * Fix delimited identifier parsing for spark3 [#2111](https://github.com/sqlfluff/sqlfluff/pull/2111) [@mcannamela](https://github.com/mcannamela) * Stop numeric literal from splitting valid naked identifiers. [#2114](https://github.com/sqlfluff/sqlfluff/pull/2114) [@jpy-git](https://github.com/jpy-git) * Grammar: Add CREATE USER/GROUP statement to Redshift dialect [#2115](https://github.com/sqlfluff/sqlfluff/pull/2115) [@jpy-git](https://github.com/jpy-git) * Fix mypy type raise in L003 [#2127](https://github.com/sqlfluff/sqlfluff/pull/2127) [@barrywhart](https://github.com/barrywhart) * Add ability to parse multiple GO/semicolon delimiters [#2124](https://github.com/sqlfluff/sqlfluff/pull/2124) [@jpy-git](https://github.com/jpy-git) * Allowed array/struct values in `default` definition of `declare` [#2120](https://github.com/sqlfluff/sqlfluff/pull/2120) [@KulykDmytro](https://github.com/KulykDmytro) * Normalise input newlines [#2128](https://github.com/sqlfluff/sqlfluff/pull/2128) [@jpy-git](https://github.com/jpy-git) * Clean up all files using the pre-commit hook [#2123](https://github.com/sqlfluff/sqlfluff/pull/2123) [@kayman-mk](https://github.com/kayman-mk) * Refined LintFix API [#2133](https://github.com/sqlfluff/sqlfluff/pull/2133) [@jpy-git](https://github.com/jpy-git) * Hotfix for LintFix comparisons [#2138](https://github.com/sqlfluff/sqlfluff/pull/2138) [@jpy-git](https://github.com/jpy-git) * Lint spaces in qualified names [#2130](https://github.com/sqlfluff/sqlfluff/pull/2130) [@jpers36](https://github.com/jpers36) * Remove support for Python 3.6 (it's "end of life" December 23, 2021) [#2141](https://github.com/sqlfluff/sqlfluff/pull/2141) [@barrywhart](https://github.com/barrywhart) * Fully remove python3.6 references [#2142](https://github.com/sqlfluff/sqlfluff/pull/2142) [@jpy-git](https://github.com/jpy-git) * Fix L022 to not flag CTE column definitions [#2139](https://github.com/sqlfluff/sqlfluff/pull/2139) [@jpy-git](https://github.com/jpy-git) * docs: set `dbt_modules` to `dbt_packages` [#2143](https://github.com/sqlfluff/sqlfluff/pull/2143) [@ciklista](https://github.com/ciklista) * Hive: add INTERVAL syntax [#2144](https://github.com/sqlfluff/sqlfluff/pull/2144) [@juhoautio](https://github.com/juhoautio) * Fix mypy error on python 3.7 [#2147](https://github.com/sqlfluff/sqlfluff/pull/2147) [@juhoautio](https://github.com/juhoautio) * Update PR template to reference tox generate-fixture-yml command [#2148](https://github.com/sqlfluff/sqlfluff/pull/2148) [@jpy-git](https://github.com/jpy-git) * Update index.rst notable changes with 0.9.0 details [#2132](https://github.com/sqlfluff/sqlfluff/pull/2132) [@jpy-git](https://github.com/jpy-git) * Add ALTER USER and ALTER GROUP to redshift dialect [#2131](https://github.com/sqlfluff/sqlfluff/pull/2131) [@jpy-git](https://github.com/jpy-git) * Add complete DESCRIBE grammar to Snowflake dialect [#2149](https://github.com/sqlfluff/sqlfluff/pull/2149) [@jpy-git](https://github.com/jpy-git) * Fix bug with BigQuery UNPIVOT [#2156](https://github.com/sqlfluff/sqlfluff/pull/2156) [@tunetheweb](https://github.com/tunetheweb) * Make L057 compatible with BigQuery [#2151](https://github.com/sqlfluff/sqlfluff/pull/2151) [@tunetheweb](https://github.com/tunetheweb) * T-SQL: Proper Indentation of ELSE IF [#2157](https://github.com/sqlfluff/sqlfluff/pull/2157) [@jpers36](https://github.com/jpers36) * Linter Test Name Duplication [#2158](https://github.com/sqlfluff/sqlfluff/pull/2158) [@jpers36](https://github.com/jpers36) * Add struct support for `hive` and `redshift` (L026, L028) [#2154](https://github.com/sqlfluff/sqlfluff/pull/2154) [@KulykDmytro](https://github.com/KulykDmytro) * Postgres - Support functions prepended with _ and containing $ [#2159](https://github.com/sqlfluff/sqlfluff/pull/2159) [@jpy-git](https://github.com/jpy-git) * T-SQL: function parsing/linting [#2155](https://github.com/sqlfluff/sqlfluff/pull/2155) [@jpers36](https://github.com/jpers36) * T-SQL: Add THROW statement [#2163](https://github.com/sqlfluff/sqlfluff/pull/2163) [@jpers36](https://github.com/jpers36) * Add yamllint to project [#2162](https://github.com/sqlfluff/sqlfluff/pull/2162) [@tunetheweb](https://github.com/tunetheweb) * Fix outdated docstring in dialects_test [#2166](https://github.com/sqlfluff/sqlfluff/pull/2166) [@juhoautio](https://github.com/juhoautio) * Minor comment fixes [#2179](https://github.com/sqlfluff/sqlfluff/pull/2179) [@juhoautio](https://github.com/juhoautio) * L010 to apply to date_part (capitalization policy for time units) [#2167](https://github.com/sqlfluff/sqlfluff/pull/2167) [@juhoautio](https://github.com/juhoautio) * ALTER GROUP fix to accommodate quoted objects [#2188](https://github.com/sqlfluff/sqlfluff/pull/2188) [@tdstark](https://github.com/tdstark) * Lexer: add non-breaking spaces to whitespace [#2189](https://github.com/sqlfluff/sqlfluff/pull/2189) [@jpers36](https://github.com/jpers36) * Grammar: Add COMMENT statement to Snowflake [#2173](https://github.com/sqlfluff/sqlfluff/pull/2173) [@jpy-git](https://github.com/jpy-git) * Grammar: Add DISCARD statement to Postgres dialect [#2175](https://github.com/sqlfluff/sqlfluff/pull/2175) [@jpy-git](https://github.com/jpy-git) * Grammar: Add UNDROP statement to Snowflake dialect [#2177](https://github.com/sqlfluff/sqlfluff/pull/2177) [@jpy-git](https://github.com/jpy-git) * Grammar: Add UNSET statement to Snowflake dialect [#2181](https://github.com/sqlfluff/sqlfluff/pull/2181) [@jpy-git](https://github.com/jpy-git) * Grammar: Add RESET statement to Postgres dialect [#2182](https://github.com/sqlfluff/sqlfluff/pull/2182) [@jpy-git](https://github.com/jpy-git) * Grammar: Add LOAD statement to Postgres dialect [#2183](https://github.com/sqlfluff/sqlfluff/pull/2183) [@jpy-git](https://github.com/jpy-git) * Grammar: Fix TRUNCATE statement in Snowflake dialect [#2184](https://github.com/sqlfluff/sqlfluff/pull/2184) [@jpy-git](https://github.com/jpy-git) * Grammar: Add HELP statement to MySQL dialect [#2191](https://github.com/sqlfluff/sqlfluff/pull/2191) [@jpy-git](https://github.com/jpy-git) * Grammar: Add PURGE BINARY LOGS statement to MySQL dialect [#2193](https://github.com/sqlfluff/sqlfluff/pull/2193) [@jpy-git](https://github.com/jpy-git) * Grammar: Add RESET MASTER statement to MySQL dialect [#2194](https://github.com/sqlfluff/sqlfluff/pull/2194) [@jpy-git](https://github.com/jpy-git) * Grammar: Add RENAME TABLE statement to MySQL dialect [#2195](https://github.com/sqlfluff/sqlfluff/pull/2195) [@jpy-git](https://github.com/jpy-git) * Grammar: Tidy up transaction statements in Snowflake dialect [#2196](https://github.com/sqlfluff/sqlfluff/pull/2196) [@jpy-git](https://github.com/jpy-git) * Modifying Redshift USER/GROUP Statements To Use `ObjectReferenceSegment` [#2190](https://github.com/sqlfluff/sqlfluff/pull/2190) [@tdstark](https://github.com/tdstark) * Grammar: Fix TRUNCATE statement in Postgres dialect [#2185](https://github.com/sqlfluff/sqlfluff/pull/2185) [@jpy-git](https://github.com/jpy-git) * Grammar: Add LISTEN, NOTIFY, and UNLISTEN statements to Postgres dialect [#2174](https://github.com/sqlfluff/sqlfluff/pull/2174) [@jpy-git](https://github.com/jpy-git) * Grammar: Tidy up Snowflake/MySQL/HIVE USE statements [#2187](https://github.com/sqlfluff/sqlfluff/pull/2187) [@jpy-git](https://github.com/jpy-git) * Make Snowflake keywords unreserved: account, organization, pivot [#2172](https://github.com/sqlfluff/sqlfluff/pull/2172) [@jpy-git](https://github.com/jpy-git) * Grammar: Add SET sql_log_bin statement to MySQL dialect [#2192](https://github.com/sqlfluff/sqlfluff/pull/2192) [@jpy-git](https://github.com/jpy-git) * Grammar: Add CALL statement to Snowflake dialect [#2176](https://github.com/sqlfluff/sqlfluff/pull/2176) [@jpy-git](https://github.com/jpy-git) * L027 Fix self referring column alias edge case [#2171](https://github.com/sqlfluff/sqlfluff/pull/2171) [@jpy-git](https://github.com/jpy-git) * T-SQL: Remove dependency on ANSI keyword lists [#2170](https://github.com/sqlfluff/sqlfluff/pull/2170) [@jpers36](https://github.com/jpers36) * Grammar: Add Table Maintenance Statements to MySQL dialect [#2198](https://github.com/sqlfluff/sqlfluff/pull/2198) [@jpy-git](https://github.com/jpy-git) * Adding CREATE TABLE AS to Redshift [#2205](https://github.com/sqlfluff/sqlfluff/pull/2205) [@tdstark](https://github.com/tdstark) * T-SQL: Add support for ALTER TABLE ALTER COLUMN [#2208](https://github.com/sqlfluff/sqlfluff/pull/2208) [@jpers36](https://github.com/jpers36) * Remove oyaml in favour of pyyaml [#2210](https://github.com/sqlfluff/sqlfluff/pull/2210) [@jpy-git](https://github.com/jpy-git) * Support Spark `CREATE TABLE LIKE` syntax [#2207](https://github.com/sqlfluff/sqlfluff/pull/2207) [@R7L208](https://github.com/R7L208) * Add override for linguist to include SQL in language statistics [#2214](https://github.com/sqlfluff/sqlfluff/pull/2214) [@jpy-git](https://github.com/jpy-git) * Add type stubs for appdirs and colorama to improve mypy coverage [#2211](https://github.com/sqlfluff/sqlfluff/pull/2211) [@jpy-git](https://github.com/jpy-git) * Remove cached-property in favour of stdlib functools implementation [#2212](https://github.com/sqlfluff/sqlfluff/pull/2212) [@jpy-git](https://github.com/jpy-git) * Restructure CASE segment (extract WHEN and ELSE into their own segment types) [#2213](https://github.com/sqlfluff/sqlfluff/pull/2213) [@barrywhart](https://github.com/barrywhart) * Add types-regex package for type checking [#2216](https://github.com/sqlfluff/sqlfluff/pull/2216) [@jpy-git](https://github.com/jpy-git) * Snowflake: Split out `CREATE VIEW` into its own segment [#2217](https://github.com/sqlfluff/sqlfluff/pull/2217) [@wong-codaio](https://github.com/wong-codaio) * Grammar: Fix multi-character comparison operators [#2197](https://github.com/sqlfluff/sqlfluff/pull/2197) [@jpy-git](https://github.com/jpy-git) * Snowflake: Support TOP N select clause modifier [#2222](https://github.com/sqlfluff/sqlfluff/pull/2222) [@wong-codaio](https://github.com/wong-codaio) * Fix CLI arguments to allow for autocompletion [#2218](https://github.com/sqlfluff/sqlfluff/pull/2218) [@jpy-git](https://github.com/jpy-git) * Simplify rule creation by adding a functional API to RuleContext [#2126](https://github.com/sqlfluff/sqlfluff/pull/2126) [@barrywhart](https://github.com/barrywhart) * Simplify nested cases [#2223](https://github.com/sqlfluff/sqlfluff/pull/2223) [@barrywhart](https://github.com/barrywhart) * Reword lint message for L058 per review [#2226](https://github.com/sqlfluff/sqlfluff/pull/2226) [@barrywhart](https://github.com/barrywhart) * Update BaseRule.discard_unsafe_fixes() to avoid touching templated code [#2220](https://github.com/sqlfluff/sqlfluff/pull/2220) [@barrywhart](https://github.com/barrywhart) * Add L059 - Capitalization on Data Types [#2227](https://github.com/sqlfluff/sqlfluff/pull/2227) [@tdstark](https://github.com/tdstark) * T-SQL: Table valued functions [#2233](https://github.com/sqlfluff/sqlfluff/pull/2233) [@jpers36](https://github.com/jpers36) * Don't allow fixes to COPY code from templated regions [#2231](https://github.com/sqlfluff/sqlfluff/pull/2231) [@barrywhart](https://github.com/barrywhart) * Fix several small issues with rule docs [#2234](https://github.com/sqlfluff/sqlfluff/pull/2234) [@barrywhart](https://github.com/barrywhart) * postgres: Add datatypes [#2121](https://github.com/sqlfluff/sqlfluff/pull/2121) [@kayman-mk](https://github.com/kayman-mk) * Combine L059 and L010 [#2238](https://github.com/sqlfluff/sqlfluff/pull/2238) [@tdstark](https://github.com/tdstark) * Fix L044 assertion failure: "SELECT *" with no "FROM" clause [#2239](https://github.com/sqlfluff/sqlfluff/pull/2239) [@barrywhart](https://github.com/barrywhart) * Docs: Make Specific Rules docstring more user friendly [#2241](https://github.com/sqlfluff/sqlfluff/pull/2241) [@jpy-git](https://github.com/jpy-git) * Fix a bug handling Jinja "{% set %}" blocks with a templated block inside [#2240](https://github.com/sqlfluff/sqlfluff/pull/2240) [@barrywhart](https://github.com/barrywhart) * Redshift lint create external table statements [#2229](https://github.com/sqlfluff/sqlfluff/pull/2229) [@tinder-albertyue](https://github.com/tinder-albertyue) * Update tox.ini for best practices [#2243](https://github.com/sqlfluff/sqlfluff/pull/2243) [@jpy-git](https://github.com/jpy-git) * Docs: Make code blocks consistent [#2242](https://github.com/sqlfluff/sqlfluff/pull/2242) [@jpy-git](https://github.com/jpy-git) * Add support for nested Jinja macros [#2246](https://github.com/sqlfluff/sqlfluff/pull/2246) [@barrywhart](https://github.com/barrywhart) * Support `DROP` DDL statements for Spark3 [#2215](https://github.com/sqlfluff/sqlfluff/pull/2215) [@R7L208](https://github.com/R7L208) * Docker Compose environment for SQLFluff developers [#2254](https://github.com/sqlfluff/sqlfluff/pull/2254) [@barrywhart](https://github.com/barrywhart) * T-SQL: Add OFFSET unreserved keyword [#2258](https://github.com/sqlfluff/sqlfluff/pull/2258) [@jpers36](https://github.com/jpers36) * Fix connection issue in dbt 1.0.0 [#2230](https://github.com/sqlfluff/sqlfluff/pull/2230) [@NiallRees](https://github.com/NiallRees) * Redshift CREATE SCHEMA statements [#2252](https://github.com/sqlfluff/sqlfluff/pull/2252) [@rpr-ableton](https://github.com/rpr-ableton) * Enhance Snowflake COPY INTO [#2250](https://github.com/sqlfluff/sqlfluff/pull/2250) [@chwiese](https://github.com/chwiese) * Coverage for 'REPAIR' Statements for Hive & Spark3 dialect [#2256](https://github.com/sqlfluff/sqlfluff/pull/2256) [@R7L208](https://github.com/R7L208) ## New Contributors * [@mcannamela](https://github.com/mcannamela) made their first contribution in [#2111](https://github.com/sqlfluff/sqlfluff/pull/2111) * [@ciklista](https://github.com/ciklista) made their first contribution in [#2143](https://github.com/sqlfluff/sqlfluff/pull/2143) * [@juhoautio](https://github.com/juhoautio) made their first contribution in [#2144](https://github.com/sqlfluff/sqlfluff/pull/2144) * [@tinder-albertyue](https://github.com/tinder-albertyue) made their first contribution in [#2229](https://github.com/sqlfluff/sqlfluff/pull/2229) * [@rpr-ableton](https://github.com/rpr-ableton) made their first contribution in [#2252](https://github.com/sqlfluff/sqlfluff/pull/2252) ## [0.9.0] - 2021-12-13 ## What’s Changed This release brings about several great new additions including: - dbt 1.0.0 compatibility. - CLI and Simple API parameters to provide custom paths to config files. - Refinement to Simple API to return parse output in JSON format rather than as an internal SQLFluff object (**BREAKING CHANGE**). - An [Official SQLFluff Docker Image](https://hub.docker.com/r/sqlfluff/sqlfluff). - Grammar improvements across various dialects. - A new rule (L057) to check for non-alphanumeric values in identifiers. There have also been many bug fixes and improvements to the CI and development processes. ## 🚀 Enhancements * T-SQL: Reserved Keyword cleanup [#2100](https://github.com/sqlfluff/sqlfluff/pull/2100) [@jpers36](https://github.com/jpers36) * Add wiki links to CONTRIBUTING.md [#2106](https://github.com/sqlfluff/sqlfluff/pull/2106) [@tunetheweb](https://github.com/tunetheweb) * Add snowflake create stage and alter stage statements + RegexParser case fix [#2098](https://github.com/sqlfluff/sqlfluff/pull/2098) [@chwiese](https://github.com/chwiese) * Allow for more value types in ALTER TABLE ALTER COLUMN SET DEFAULT statement [#2101](https://github.com/sqlfluff/sqlfluff/pull/2101) [@derickl](https://github.com/derickl) * Grammar: Adds support for ALTER VIEW statement for Postgres dialect [#2096](https://github.com/sqlfluff/sqlfluff/pull/2096) [@derickl](https://github.com/derickl) * Add example for using JSON output of Simple API parse function [#2099](https://github.com/sqlfluff/sqlfluff/pull/2099) [@jpy-git](https://github.com/jpy-git) * Allow optional keywords in create table unique constraints [#2077](https://github.com/sqlfluff/sqlfluff/pull/2077) [@kayman-mk](https://github.com/kayman-mk) * Grammar: Adds support for ALTER FUNCTION statement for Postgres dialect [#2090](https://github.com/sqlfluff/sqlfluff/pull/2090) [@derickl](https://github.com/derickl) * Grammar: adds support for CREATE/ALTER/DROP DATABASE for Postgres dialect [#2081](https://github.com/sqlfluff/sqlfluff/pull/2081) [@derickl](https://github.com/derickl) * Update parse method of Simple API to output JSON parse tree [#2082](https://github.com/sqlfluff/sqlfluff/pull/2082) [@jpy-git](https://github.com/jpy-git) * T-SQL dialect: add parsing for MERGE statement [#2057](https://github.com/sqlfluff/sqlfluff/pull/2057) [@tkachenkomaria244](https://github.com/tkachenkomaria244) * Simple API config path [#2080](https://github.com/sqlfluff/sqlfluff/pull/2080) [@jpy-git](https://github.com/jpy-git) * dbt 1.0.0 compatibility [#2079](https://github.com/sqlfluff/sqlfluff/pull/2079) [@alanmcruickshank](https://github.com/alanmcruickshank) * Parse `on delete` and `on update` clause for create table constraints [#2076](https://github.com/sqlfluff/sqlfluff/pull/2076) [@kayman-mk](https://github.com/kayman-mk) * Pre-commit: Add hook for doc8 [#2074](https://github.com/sqlfluff/sqlfluff/pull/2074) [@jpy-git](https://github.com/jpy-git) * Grammar: Fix typo in Alter Table parser in Postgres dialect [#2072](https://github.com/sqlfluff/sqlfluff/pull/2072) [@derickl](https://github.com/derickl) * Grammar: Adds support for materialized views for postgres dialect [#2041](https://github.com/sqlfluff/sqlfluff/pull/2041) [@derickl](https://github.com/derickl) * Add basic pre-commit config [#2067](https://github.com/sqlfluff/sqlfluff/pull/2067) [@jpy-git](https://github.com/jpy-git) * CLI: Add --ignore-local-config flag [#2061](https://github.com/sqlfluff/sqlfluff/pull/2061) [@jpy-git](https://github.com/jpy-git) * T-SQL: INSERT INTO [#2054](https://github.com/sqlfluff/sqlfluff/pull/2054) [@jpers36](https://github.com/jpers36) * Add --disable-noqa option to CLI and config [#2043](https://github.com/sqlfluff/sqlfluff/pull/2043) [@jpy-git](https://github.com/jpy-git) * T-SQL: TRY/CATCH [#2044](https://github.com/sqlfluff/sqlfluff/pull/2044) [@jpers36](https://github.com/jpers36) * enabled arrays support in `declare` and `set` statements for `bigquery` dialect [#2038](https://github.com/sqlfluff/sqlfluff/pull/2038) [@KulykDmytro](https://github.com/KulykDmytro) * L008 refactor [#2004](https://github.com/sqlfluff/sqlfluff/pull/2004) [@jpy-git](https://github.com/jpy-git) * Support __init__.py for library_path [#1976](https://github.com/sqlfluff/sqlfluff/pull/1976) [@Tonkonozhenko](https://github.com/Tonkonozhenko) * L052: Redefine semi-colon newline to multiline newline [#2022](https://github.com/sqlfluff/sqlfluff/pull/2022) [@jpy-git](https://github.com/jpy-git) * Grammar: Remove hash inline comment from Postgres [#2035](https://github.com/sqlfluff/sqlfluff/pull/2035) [@jpy-git](https://github.com/jpy-git) * `noqa` enhancement: Enable glob rule matching for inline comments [#2002](https://github.com/sqlfluff/sqlfluff/pull/2002) [@jpy-git](https://github.com/jpy-git) * T-SQL (ASA): Allow for table identifier in DELETE clause [#2031](https://github.com/sqlfluff/sqlfluff/pull/2031) [@jpers36](https://github.com/jpers36) * T-SQL (ASA): Fix CTAS with WITH statement [#2028](https://github.com/sqlfluff/sqlfluff/pull/2028) [@jpers36](https://github.com/jpers36) * Grammar: Parse multiple grants [#2023](https://github.com/sqlfluff/sqlfluff/pull/2023) [@jpy-git](https://github.com/jpy-git) * Add tsql nested block comment support and add regex package dependency [#2027](https://github.com/sqlfluff/sqlfluff/pull/2027) [@jpy-git](https://github.com/jpy-git) * Grammar: Add complete Snowflake datetime units [#2026](https://github.com/sqlfluff/sqlfluff/pull/2026) [@jpy-git](https://github.com/jpy-git) * Grammar: Add DROP POLICY statement to postgres dialect [#2024](https://github.com/sqlfluff/sqlfluff/pull/2024) [@jpy-git](https://github.com/jpy-git) * Grammar: Add complete datetime units to postgres dialect [#2025](https://github.com/sqlfluff/sqlfluff/pull/2025) [@jpy-git](https://github.com/jpy-git) * Grammar: Postgres CREATE POLICY [#2021](https://github.com/sqlfluff/sqlfluff/pull/2021) [@jpy-git](https://github.com/jpy-git) * Speed up CI [#1957](https://github.com/sqlfluff/sqlfluff/pull/1957) [@pwildenhain](https://github.com/pwildenhain) * Add support for Snowflake create/alter SQL and js UDF [#1993](https://github.com/sqlfluff/sqlfluff/pull/1993) [@chwiese](https://github.com/chwiese) * Add encoding CLI argument [#1994](https://github.com/sqlfluff/sqlfluff/pull/1994) [@jpy-git](https://github.com/jpy-git) * T-SQL: Spaces allowed in comparison operators [#1965](https://github.com/sqlfluff/sqlfluff/pull/1965) [@jpers36](https://github.com/jpers36) * Add Snowflake schema options [#1950](https://github.com/sqlfluff/sqlfluff/pull/1950) [@chwiese](https://github.com/chwiese) * CLI/`.sqlfluff` enhancement: Rule globs [#1972](https://github.com/sqlfluff/sqlfluff/pull/1972) [@jpy-git](https://github.com/jpy-git) * Add config CLI argument to lint, fix, and parse [#1986](https://github.com/sqlfluff/sqlfluff/pull/1986) [@jpy-git](https://github.com/jpy-git) * Add type hints to simple API [#1951](https://github.com/sqlfluff/sqlfluff/pull/1951) [@jpy-git](https://github.com/jpy-git) * New rule to flag special characters in identifiers [#1958](https://github.com/sqlfluff/sqlfluff/pull/1958) [@jpers36](https://github.com/jpers36) * Allow column references in IN statement [#1971](https://github.com/sqlfluff/sqlfluff/pull/1971) [@tunetheweb](https://github.com/tunetheweb) * Remove config.ini in favor of setup.cfg [#1966](https://github.com/sqlfluff/sqlfluff/pull/1966) [@jpy-git](https://github.com/jpy-git) * Convert sqlfluff-templater-dbt setup.py to setup.cfg [#1963](https://github.com/sqlfluff/sqlfluff/pull/1963) [@jpy-git](https://github.com/jpy-git) * Official Docker image: Dockerfile and Github Actions workflow [#1945](https://github.com/sqlfluff/sqlfluff/pull/1945) [@jpy-git](https://github.com/jpy-git) * Move package metadata to setup.cfg [#1960](https://github.com/sqlfluff/sqlfluff/pull/1960) [@jpy-git](https://github.com/jpy-git) ## 🐛 Bug Fixes * Fix tsql block comment close [#2095](https://github.com/sqlfluff/sqlfluff/pull/2095) [@jpy-git](https://github.com/jpy-git) * Fix PlaceholderTemplater slice_type for templated code (substitutions) [#2085](https://github.com/sqlfluff/sqlfluff/pull/2085) [@barrywhart](https://github.com/barrywhart) * Exasol: Fix UDF script syntax [#2083](https://github.com/sqlfluff/sqlfluff/pull/2083) [@sti0](https://github.com/sti0) * Fix issues with placeholder templating docs [#2078](https://github.com/sqlfluff/sqlfluff/pull/2078) [@jpy-git](https://github.com/jpy-git) * Update dbt templater docs to clarify that the profiles_dir setting is optional [#2070](https://github.com/sqlfluff/sqlfluff/pull/2070) [@barrywhart](https://github.com/barrywhart) * Bug fix of L054 for Snowflake and Exasol [#2069](https://github.com/sqlfluff/sqlfluff/pull/2069) [@tunetheweb](https://github.com/tunetheweb) * Fix L043 issue when trying to autofix functions [#2059](https://github.com/sqlfluff/sqlfluff/pull/2059) [@jpy-git](https://github.com/jpy-git) * Add request for users dbt version in bug_report issue template [#2058](https://github.com/sqlfluff/sqlfluff/pull/2058) [@jpy-git](https://github.com/jpy-git) * Fix parameters for Snowflake create tasks statement [#2037](https://github.com/sqlfluff/sqlfluff/pull/2037) [@chwiese](https://github.com/chwiese) * Linguist: Include test/** in language statistics to better reflect use of SQL [#2034](https://github.com/sqlfluff/sqlfluff/pull/2034) [@jpy-git](https://github.com/jpy-git) * L044 should handle nested CTEs [#1991](https://github.com/sqlfluff/sqlfluff/pull/1991) [@barrywhart](https://github.com/barrywhart) * Add dbt adapter install advice to configuration documentation [#2011](https://github.com/sqlfluff/sqlfluff/pull/2011) [@jpy-git](https://github.com/jpy-git) * Update pre-commit dbt instructions to reference separate dbt package [#2005](https://github.com/sqlfluff/sqlfluff/pull/2005) [@jpy-git](https://github.com/jpy-git) * Fix config.get for iterable sections [#2020](https://github.com/sqlfluff/sqlfluff/pull/2020) [@jpy-git](https://github.com/jpy-git) * Fix inline comment interactions with L052 [#2019](https://github.com/sqlfluff/sqlfluff/pull/2019) [@jpy-git](https://github.com/jpy-git) * Make Snowflake tags DRY [#1992](https://github.com/sqlfluff/sqlfluff/pull/1992) [@chwiese](https://github.com/chwiese) * Rename whitelist/blacklist to allowlist/denylist [#1989](https://github.com/sqlfluff/sqlfluff/pull/1989) [@jpy-git](https://github.com/jpy-git) * Fix issue with inline ignores not respecting comment lines [#1985](https://github.com/sqlfluff/sqlfluff/pull/1985) [@jpy-git](https://github.com/jpy-git) * Fix L009 FileSegment child + new create_before/create_after edit types [#1979](https://github.com/sqlfluff/sqlfluff/pull/1979) [@jpy-git](https://github.com/jpy-git) * Adds extra check to L054 to avoid weird error messages [#1988](https://github.com/sqlfluff/sqlfluff/pull/1988) [@tunetheweb](https://github.com/tunetheweb) * BigQuery: Allow keywords in column reference components [#1987](https://github.com/sqlfluff/sqlfluff/pull/1987) [@tunetheweb](https://github.com/tunetheweb) * L027: Remove unnecessary crawl in get_select_statement_info [#1974](https://github.com/sqlfluff/sqlfluff/pull/1974) [@jpy-git](https://github.com/jpy-git) * Add __all__ attributes to __init__.py files to resolve F401 [#1949](https://github.com/sqlfluff/sqlfluff/pull/1949) [@jpy-git](https://github.com/jpy-git) * Fix incorrect comment on L055 [#1967](https://github.com/sqlfluff/sqlfluff/pull/1967) [@jpy-git](https://github.com/jpy-git) * Docs: fix docker hub link to public URL [#1964](https://github.com/sqlfluff/sqlfluff/pull/1964) [@kevinmarsh](https://github.com/kevinmarsh) * Fix issue releasing dbt package: tox commands run relative to repo root [#1962](https://github.com/sqlfluff/sqlfluff/pull/1962) [@jpy-git](https://github.com/jpy-git) ## [0.8.2] - 2021-11-22 ## What’s Changed One of the biggest new features in this release is the support for SQLAlchemy and other "placeholder" templating within SQL queries. Check out [the documentation on how to set it up](https://docs.sqlfluff.com/en/latest/configuration.html#placeholder-templating). This release also adds **seven** new rules. Get some help with your leading whitespace, semi-colon placement, inconsistent column references in `GROUP BY/ORDER BY`, and getting rid of `RIGHT JOIN`'s among other useful lints with our new rules! See our [rules documentation](https://docs.sqlfluff.com/en/stable/rules.html) for more details. On top of those, we have made loads of grammar improvements across many dialects, improvements to the dbt templater (including issues where `sqlfluff fix` would corrupt the code :scream:), more fix routines, and lots more improvements. ## 🚀 Enhancements * [many dialects] Implement generic placeholder templating [#1887](https://github.com/sqlfluff/sqlfluff/pull/1887) [@jacopofar](https://github.com/jacopofar) * [many dialects] Add support for SQLAlchemy templating [#1878](https://github.com/sqlfluff/sqlfluff/pull/1878) [@jacopofar](https://github.com/jacopofar) * Add DROP PROCEDURE statement to T-SQL [#1921](https://github.com/sqlfluff/sqlfluff/pull/1921) [@jpy-git](https://github.com/jpy-git) * T-SQL dialect: fix index/tables creation options [#1955](https://github.com/sqlfluff/sqlfluff/pull/1955) [@tkachenkomaria244](https://github.com/tkachenkomaria244) * Add DROP TYPE statement to ANSI dialect [#1919](https://github.com/sqlfluff/sqlfluff/pull/1919) [@jpy-git](https://github.com/jpy-git) * Add INSERT INTO statements to Redshift Dialect [#1896](https://github.com/sqlfluff/sqlfluff/pull/1896) [@tdstark](https://github.com/tdstark) * Added TABLESAMPLE support to Bigquery [#1897](https://github.com/sqlfluff/sqlfluff/pull/1897) [@WittierDinosaur](https://github.com/WittierDinosaur) * Add [LEFT] ANTI and [LEFT] SEMI joins to the Spark3 dialect [#1942](https://github.com/sqlfluff/sqlfluff/pull/1942) [@jpy-git](https://github.com/jpy-git) * Parse UPDATE/INSERT within WITH clause [#1889](https://github.com/sqlfluff/sqlfluff/pull/1889) [@jpy-git](https://github.com/jpy-git) * Add OVERRIDING SYSTEM/USER VALUE to insert statement in postgres dialect [#1869](https://github.com/sqlfluff/sqlfluff/pull/1869) [@jpy-git](https://github.com/jpy-git) * Add support for DROP SCHEMA [IF EXISTS] name [ CASCADE | RESTRICT ] [#1865](https://github.com/sqlfluff/sqlfluff/pull/1865) [@gimmyxd](https://github.com/gimmyxd) * Add CREATE TABLE Statement To Redshift [#1855](https://github.com/sqlfluff/sqlfluff/pull/1855) [@tdstark](https://github.com/tdstark) * Add DROP TYPE statement in postgres dialect [#1870](https://github.com/sqlfluff/sqlfluff/pull/1870) [@jpy-git](https://github.com/jpy-git) * Add SEQUENCE NAME to postgres sequence options [#1866](https://github.com/sqlfluff/sqlfluff/pull/1866) [@jpy-git](https://github.com/jpy-git) * Added SET Statement to Postgres [#1877](https://github.com/sqlfluff/sqlfluff/pull/1877) [@WittierDinosaur](https://github.com/WittierDinosaur) * Postgres: Allow use of quoted identifiers to ALTER TABLE OWNER TO [#1856](https://github.com/sqlfluff/sqlfluff/pull/1856) [@markpolyak](https://github.com/markpolyak) * Updates to COPY INTO grammar in Snowflake [#1884](https://github.com/sqlfluff/sqlfluff/pull/1884) [@WittierDinosaur](https://github.com/WittierDinosaur) * Postgres & T-SQL: Drop Function [#1924](https://github.com/sqlfluff/sqlfluff/pull/1924) [@WittierDinosaur](https://github.com/WittierDinosaur) * Add Expressions to SET syntax [#1852](https://github.com/sqlfluff/sqlfluff/pull/1852) [@tunetheweb](https://github.com/tunetheweb) * Update DbtTemplater to use JinjaTracer [#1788](https://github.com/sqlfluff/sqlfluff/pull/1788) [@barrywhart](https://github.com/barrywhart) * L043 refinement: Add autofix for common use of CASE to fill NULL values. [#1923](https://github.com/sqlfluff/sqlfluff/pull/1923) [@jpy-git](https://github.com/jpy-git) * New Rule L050: No leading whitespace [#1840](https://github.com/sqlfluff/sqlfluff/pull/1840) [@jpy-git](https://github.com/jpy-git) * L050: updating to target jinja templates [#1885](https://github.com/sqlfluff/sqlfluff/pull/1885) [@jpy-git](https://github.com/jpy-git) * New rule L051 to forbid lone JOIN [#1879](https://github.com/sqlfluff/sqlfluff/pull/1879) [@jpy-git](https://github.com/jpy-git) * New Rule L052: Semi colon alignment [#1902](https://github.com/sqlfluff/sqlfluff/pull/1902) [@jpy-git](https://github.com/jpy-git) * New Rule L053: Remove outer brackets from top-level statements. [#1916](https://github.com/sqlfluff/sqlfluff/pull/1916) [@jpy-git](https://github.com/jpy-git) * New Rule L054: Inconsistent column references in GROUP BY/ORDER BY clauses. [#1917](https://github.com/sqlfluff/sqlfluff/pull/1917) [@jpy-git](https://github.com/jpy-git) * New Rule L055: Use LEFT JOIN instead of RIGHT JOIN. [#1931](https://github.com/sqlfluff/sqlfluff/pull/1931) [@jpy-git](https://github.com/jpy-git) * New Rule L056: 'SP_' prefix should not be used for user-defined stored procedures [#1930](https://github.com/sqlfluff/sqlfluff/pull/1930) [@jpy-git](https://github.com/jpy-git) * Tsql partition by multiple columns [#1906](https://github.com/sqlfluff/sqlfluff/pull/1906) [@jpers36](https://github.com/jpers36) * Added bare functions to values clause [#1876](https://github.com/sqlfluff/sqlfluff/pull/1876) [@WittierDinosaur](https://github.com/WittierDinosaur) * Remove unnecessary context section, from code and the docs [#1905](https://github.com/sqlfluff/sqlfluff/pull/1905) [@jacopofar](https://github.com/jacopofar) * L036 docstring refinements [#1903](https://github.com/sqlfluff/sqlfluff/pull/1903) [@jpy-git](https://github.com/jpy-git) * Add `exclude_rules` option for the Simple API [#1850](https://github.com/sqlfluff/sqlfluff/pull/1850) [@tunetheweb](https://github.com/tunetheweb) * Tox improvements: Streamline development/testing environments. [#1860](https://github.com/sqlfluff/sqlfluff/pull/1860) [@jpy-git](https://github.com/jpy-git) * Add Tox publish commands [#1853](https://github.com/sqlfluff/sqlfluff/pull/1853) [@jpy-git](https://github.com/jpy-git) * Documentation: Change inheritance dialect example to Redshift [#1900](https://github.com/sqlfluff/sqlfluff/pull/1900) [@chwiese](https://github.com/chwiese) * Remove failing requires.io badge [#1898](https://github.com/sqlfluff/sqlfluff/pull/1898) [@jpy-git](https://github.com/jpy-git) * [Snowflake] Allow naked AUTOINCREMENT [#1883](https://github.com/sqlfluff/sqlfluff/pull/1883) [@gordonhart](https://github.com/gordonhart) * Add support for curly brackets in SnowSQL ampersand variables [#1901](https://github.com/sqlfluff/sqlfluff/pull/1901) [@chwiese](https://github.com/chwiese) * Add short form help option (-h) [#1947](https://github.com/sqlfluff/sqlfluff/pull/1947) [@jpy-git](https://github.com/jpy-git) * Remove plaintext API key from benchmark utility [#1863](https://github.com/sqlfluff/sqlfluff/pull/1863) [@jpy-git](https://github.com/jpy-git) * Add `skip_install` to static analysis sections of tox.ini [#1851](https://github.com/sqlfluff/sqlfluff/pull/1851) [@jpy-git](https://github.com/jpy-git) * Move typing_extensions from `requirements_dev.txt` to `requirements.txt` [#1956](https://github.com/sqlfluff/sqlfluff/pull/1956) [@jpy-git](https://github.com/jpy-git) ## 🐛 Bug Fixes * Fix bug where "sqlfluff fix" deletes dbt "{% snapshot %}" line [#1907](https://github.com/sqlfluff/sqlfluff/pull/1907) [@barrywhart](https://github.com/barrywhart) * Fix subquery bug in L026 [#1948](https://github.com/sqlfluff/sqlfluff/pull/1948) [@jpy-git](https://github.com/jpy-git) * Fix bug where L041 was confused by L016's placement of newlines in the parse tree [#1904](https://github.com/sqlfluff/sqlfluff/pull/1904) [@barrywhart](https://github.com/barrywhart) * Fix progressbar artifacts within linter errors [#1873](https://github.com/sqlfluff/sqlfluff/pull/1873) [@adam-tokarski](https://github.com/adam-tokarski) * Correct Snowflake warehouse sizes [#1872](https://github.com/sqlfluff/sqlfluff/pull/1872) [@jpy-git](https://github.com/jpy-git) * Fixed Delimited() logic, added T-SQL grammar [#1894](https://github.com/sqlfluff/sqlfluff/pull/1894) [@WittierDinosaur](https://github.com/WittierDinosaur) * L036 refinement - FROM clause interaction [#1893](https://github.com/sqlfluff/sqlfluff/pull/1893) [@jpy-git](https://github.com/jpy-git) * Add missing chardet install in setup.py [#1928](https://github.com/sqlfluff/sqlfluff/pull/1928) [@jpy-git](https://github.com/jpy-git) * Fix misplaced TableAliasInfo in L031 documentation [#1946](https://github.com/sqlfluff/sqlfluff/pull/1946) [@jpy-git](https://github.com/jpy-git) * Fix broken link to external SQL style guide [#1918](https://github.com/sqlfluff/sqlfluff/pull/1918) [@kevinmarsh](https://github.com/kevinmarsh) ## [0.8.1] - 2021-11-07 ## What’s Changed Fixes missing dependency issue with 0.8.0 for `tqdm`, plus add a test to ensure this does not happen again. ## 🐛 Bug Fixes * Fix: add tqdm to setup.py installation requirements [#1842](https://github.com/sqlfluff/sqlfluff/pull/1842) [@skykasko](https://github.com/skykasko) * Add test to ensure pip install works [#1843](https://github.com/sqlfluff/sqlfluff/pull/1843) [@tunetheweb](https://github.com/tunetheweb) ## [0.8.0] - 2021-11-07 ## What’s Changed This release brings an improvement to the performance of the parser, a rebuild of the Jinja Templater, and a progress bar for the CLI. Lots of dialect improvements have also been done. Full list of changes below: ## 🚀 Enhancements * Updated L009 logic to only allow a single trailing newline. [#1838](https://github.com/sqlfluff/sqlfluff/pull/1838) [@jpy-git](https://github.com/jpy-git) * Progressbar utility [#1609](https://github.com/sqlfluff/sqlfluff/pull/1609) [@adam-tokarski](https://github.com/adam-tokarski) * Teradata dialect: Add support for SEL form of SELECT [#1776](https://github.com/sqlfluff/sqlfluff/pull/1776) [@samlader](https://github.com/samlader) * Added trigger support in ANSI - and extended it in Postgres [#1818](https://github.com/sqlfluff/sqlfluff/pull/1818) [@WittierDinosaur](https://github.com/WittierDinosaur) * Exasol: Make references more strict [#1829](https://github.com/sqlfluff/sqlfluff/pull/1829) [@sti0](https://github.com/sti0) * Hive: INSERT statement support [#1828](https://github.com/sqlfluff/sqlfluff/pull/1828) [@mifercre](https://github.com/mifercre) * ANSI: Add TABLESAMPLE support [#1811](https://github.com/sqlfluff/sqlfluff/pull/1811) [@CrossNox](https://github.com/CrossNox) * T-SQL: Support trailing commas in CREATE TABLE [#1817](https://github.com/sqlfluff/sqlfluff/pull/1817) [@tommydb](https://github.com/tommydb) * Spark3: Add CREATE VIEW support [#1813](https://github.com/sqlfluff/sqlfluff/pull/1813) [@DipeshCS](https://github.com/DipeshCS) * BigQuery: Support PIVOT and UNPIVOT [#1794](https://github.com/sqlfluff/sqlfluff/pull/1794) [@tunetheweb](https://github.com/tunetheweb) * L029: Optionally check quoted identifiers in addition to naked identifiers [#1775](https://github.com/sqlfluff/sqlfluff/pull/1775) [@jpers36](https://github.com/jpers36) * Add sysdate to Redshift as a bare function [#1789](https://github.com/sqlfluff/sqlfluff/pull/1789) [@tdstark](https://github.com/tdstark) * Robust Jinja raw/template mapping [#1678](https://github.com/sqlfluff/sqlfluff/pull/1678) [@barrywhart](https://github.com/barrywhart) * Add CREATE TABLE AS to Postgres and Redshift [#1785](https://github.com/sqlfluff/sqlfluff/pull/1785) [@tdstark](https://github.com/tdstark) * Improve Parser Performance By Caching Values [#1744](https://github.com/sqlfluff/sqlfluff/pull/1744) [@WittierDinosaur](https://github.com/WittierDinosaur) * templater-dbt: Change dbt dependency to dbt-core [#1786](https://github.com/sqlfluff/sqlfluff/pull/1786) [@amardeep](https://github.com/amardeep) * T-SQL: Create Schema definition [#1773](https://github.com/sqlfluff/sqlfluff/pull/1773) [@jpers36](https://github.com/jpers36) * T-SQL: allow optional brackets for column default constraints [#1760](https://github.com/sqlfluff/sqlfluff/pull/1760) [@nevado](https://github.com/nevado) * Postgres: Support parameters and identifiers prepended with _ and containing $ [#1765](https://github.com/sqlfluff/sqlfluff/pull/1765) [@WittierDinosaur](https://github.com/WittierDinosaur) * Postgres: Added support for double precision [#1764](https://github.com/sqlfluff/sqlfluff/pull/1764) [@WittierDinosaur](https://github.com/WittierDinosaur) * "sqlfluff fix": Write to a temporary .sql file first [#1763](https://github.com/sqlfluff/sqlfluff/pull/1763) [@barrywhart](https://github.com/barrywhart) * Update older dbt dependency [#1756](https://github.com/sqlfluff/sqlfluff/pull/1756) [@alanmcruickshank](https://github.com/alanmcruickshank) * T-SQL: add IDENTITY column constraint [#1757](https://github.com/sqlfluff/sqlfluff/pull/1757) [@nevado](https://github.com/nevado) * Update CI to run under Python 3.10 [#1739](https://github.com/sqlfluff/sqlfluff/pull/1739) [@rooterkyberian](https://github.com/rooterkyberian) * MySQL: Add drop index support [#1738](https://github.com/sqlfluff/sqlfluff/pull/1738) [@fatelei](https://github.com/fatelei) * Snowflake dialect improvements [#1737](https://github.com/sqlfluff/sqlfluff/pull/1737) [@tunetheweb](https://github.com/tunetheweb) * Add missing test case [#1735](https://github.com/sqlfluff/sqlfluff/pull/1735) [@tunetheweb](https://github.com/tunetheweb) ## 🐛 Bug Fixes * Fix: Add missing init file to sqlfluff.core.templaters.slicers [#1826](https://github.com/sqlfluff/sqlfluff/pull/1826) [@CrossNox](https://github.com/CrossNox) * Hive: Fix order of CREATE TEMPORARY EXTERNAL TABLE [#1825](https://github.com/sqlfluff/sqlfluff/pull/1825) [@mifercre](https://github.com/mifercre) * T-SQL: add AS keyword as optional in PIVOT-UNPIVOT [#1807](https://github.com/sqlfluff/sqlfluff/pull/1807) [@tkachenkomaria244](https://github.com/tkachenkomaria244) * Prevent L019 plus L034 corrupting SQL [#1803](https://github.com/sqlfluff/sqlfluff/pull/1803) [@barrywhart](https://github.com/barrywhart) * L028 fix - Allow SELECT column alias in WHERE clauses for certain dialects [#1796](https://github.com/sqlfluff/sqlfluff/pull/1796) [@tunetheweb](https://github.com/tunetheweb) * Comment out instructions in GitHub templates [#1792](https://github.com/sqlfluff/sqlfluff/pull/1792) [@tunetheweb](https://github.com/tunetheweb) * Fix internal error in L016 when template/whitespace-only line too long [#1795](https://github.com/sqlfluff/sqlfluff/pull/1795) [@barrywhart](https://github.com/barrywhart) * Fix L049 to allow = NULL in SET clauses [#1791](https://github.com/sqlfluff/sqlfluff/pull/1791) [@tunetheweb](https://github.com/tunetheweb) * Hive: Fix bug in CREATE TABLE WITH syntax [#1790](https://github.com/sqlfluff/sqlfluff/pull/1790) [@iajoiner](https://github.com/iajoiner) * Fixed encoding error when linting to file [#1787](https://github.com/sqlfluff/sqlfluff/pull/1787) [@WittierDinosaur](https://github.com/WittierDinosaur) * Fix L012 documentation [#1782](https://github.com/sqlfluff/sqlfluff/pull/1782) [@jpers36](https://github.com/jpers36) * T-SQL: fix quote alias [#1766](https://github.com/sqlfluff/sqlfluff/pull/1766) [@jpers36](https://github.com/jpers36) * Fix incorrect indentation issue [#1733](https://github.com/sqlfluff/sqlfluff/pull/1733) [@tunetheweb](https://github.com/tunetheweb) * T-SQL: Fix OVER functionality for functions [#1731](https://github.com/sqlfluff/sqlfluff/pull/1731) [@jpers36](https://github.com/jpers36) ## [0.7.1] - 2021-10-22 ## What’s Changed Highlights of this release contains a lot of T-SQL dialect improvements (shout out to @jpers36 for most of these!). We also added Spark3 as a new dialect thanks to @R7L208. The complete list of changes are shown below. ## 🚀 Enhancements * T-SQL: Add rank functions [#1725](https://github.com/sqlfluff/sqlfluff/pull/1725) [@jpers36](https://github.com/jpers36) * Spark3 Dialect Support [#1706](https://github.com/sqlfluff/sqlfluff/pull/1706) [@R7L208](https://github.com/R7L208) * Postgres Array Support [#1722](https://github.com/sqlfluff/sqlfluff/pull/1722) [@WittierDinosaur](https://github.com/WittierDinosaur) * Hive: Add LEFT SEMI JOIN support [#1718](https://github.com/sqlfluff/sqlfluff/pull/1718) [@fatelei](https://github.com/fatelei) * MySQL: Change and drop column in alter table [#1670](https://github.com/sqlfluff/sqlfluff/pull/1670) [@MontealegreLuis](https://github.com/MontealegreLuis) * Added type hints to some rule files [#1616](https://github.com/sqlfluff/sqlfluff/pull/1616) [@ttomasz](https://github.com/ttomasz) * Added Redshift to README [#1720](https://github.com/sqlfluff/sqlfluff/pull/1720) [@WittierDinosaur](https://github.com/WittierDinosaur) * Exasol: Fix create table statement [#1700](https://github.com/sqlfluff/sqlfluff/pull/1700) [@sti0](https://github.com/sti0) * T-SQL: Add optional delimiter to SET [#1717](https://github.com/sqlfluff/sqlfluff/pull/1717) [@jpers36](https://github.com/jpers36) * T-SQL: Escaped quotes [#1715](https://github.com/sqlfluff/sqlfluff/pull/1715) [@jpers36](https://github.com/jpers36) * T-SQL: SELECT INTO [#1714](https://github.com/sqlfluff/sqlfluff/pull/1714) [@jpers36](https://github.com/jpers36) * Postgres: Added support for psql variables [#1709](https://github.com/sqlfluff/sqlfluff/pull/1709) [@WittierDinosaur](https://github.com/WittierDinosaur) * T-SQL: split location clause out from index clause [#1711](https://github.com/sqlfluff/sqlfluff/pull/1711) [@jpers36](https://github.com/jpers36) * T-SQL: Override ANSI HAVING [#1707](https://github.com/sqlfluff/sqlfluff/pull/1707) [@jpers36](https://github.com/jpers36) * T-SQL: Add UPDATE STATISTICS [#1703](https://github.com/sqlfluff/sqlfluff/pull/1703) [@jpers36](https://github.com/jpers36) * T-SQL: CTAS Option Clause [#1705](https://github.com/sqlfluff/sqlfluff/pull/1705) [@jpers36](https://github.com/jpers36) * T-SQL: DECLARE has optional AS [#1704](https://github.com/sqlfluff/sqlfluff/pull/1704) [@jpers36](https://github.com/jpers36) * T-SQL: DROP STATISTICS and INDEX [#1698](https://github.com/sqlfluff/sqlfluff/pull/1698) [@jpers36](https://github.com/jpers36) * T-SQL: CTAS select can be optionally bracketed [#1697](https://github.com/sqlfluff/sqlfluff/pull/1697) [@jpers36](https://github.com/jpers36) * Exasol: Make function_script_terminator more strict [#1696](https://github.com/sqlfluff/sqlfluff/pull/1696) [@sti0](https://github.com/sti0) * T-SQL distribution index location [#1695](https://github.com/sqlfluff/sqlfluff/pull/1695) [@jpers36](https://github.com/jpers36) * T-SQL: allow for non-alphanumeric initial characters in delimited identifiers [#1693](https://github.com/sqlfluff/sqlfluff/pull/1693) [@jpers36](https://github.com/jpers36) * T-SQL: allow for semi-colon after BEGIN in a BEGIN/END block [#1694](https://github.com/sqlfluff/sqlfluff/pull/1694) [@jpers36](https://github.com/jpers36) * Exasol: Fix adapter script syntax [#1692](https://github.com/sqlfluff/sqlfluff/pull/1692) [@sti0](https://github.com/sti0) * T-SQL: Basic EXECUTE functionality [#1691](https://github.com/sqlfluff/sqlfluff/pull/1691) [@jpers36](https://github.com/jpers36) * T-SQL: Add #, @ to valid identifier characters [#1690](https://github.com/sqlfluff/sqlfluff/pull/1690) [@jpers36](https://github.com/jpers36) * T-SQL - add support for Filegroups in table create [#1689](https://github.com/sqlfluff/sqlfluff/pull/1689) [@nevado](https://github.com/nevado) * Exclude Exasol scripts from rule L003 [#1684](https://github.com/sqlfluff/sqlfluff/pull/1684) [@tunetheweb](https://github.com/tunetheweb) * Added PostGIS keyword data types to Postgres [#1686](https://github.com/sqlfluff/sqlfluff/pull/1686) [@WittierDinosaur](https://github.com/WittierDinosaur) * Indent LIMIT values if on separate line [#1683](https://github.com/sqlfluff/sqlfluff/pull/1683) [@tunetheweb](https://github.com/tunetheweb) * Postgres: Added support for SELECT INTO statements [#1676](https://github.com/sqlfluff/sqlfluff/pull/1676) [@WittierDinosaur](https://github.com/WittierDinosaur) * Allow :: casting of CASE statements [#1657](https://github.com/sqlfluff/sqlfluff/pull/1657) [@tunetheweb](https://github.com/tunetheweb) * Add more keywords to Redhift and BigQuery to avoid errors [#1671](https://github.com/sqlfluff/sqlfluff/pull/1671) [@tunetheweb](https://github.com/tunetheweb) * T-SQL begin end delimiter [#1664](https://github.com/sqlfluff/sqlfluff/pull/1664) [@jpers36](https://github.com/jpers36) * Teradata: Added date as bare function for [#1663](https://github.com/sqlfluff/sqlfluff/pull/1663) [@anzelpwj](https://github.com/anzelpwj) * T-SQL: CREATE STATISTICS [#1662](https://github.com/sqlfluff/sqlfluff/pull/1662) [@jpers36](https://github.com/jpers36) * T-SQL table and query hints [#1661](https://github.com/sqlfluff/sqlfluff/pull/1661) [@jpers36](https://github.com/jpers36) * T-SQL: Allow spaces in qualified names [#1654](https://github.com/sqlfluff/sqlfluff/pull/1654) [@jpers36](https://github.com/jpers36) ## 🐛 Bug Fixes * EXASOL: Fix typo in alter_table_statement [#1726](https://github.com/sqlfluff/sqlfluff/pull/1726) [@sti0](https://github.com/sti0) * Fix markdown links in production.rst [#1721](https://github.com/sqlfluff/sqlfluff/pull/1721) [@asottile](https://github.com/asottile) * Correct contributing testing information [#1702](https://github.com/sqlfluff/sqlfluff/pull/1702) [@adam-tokarski](https://github.com/adam-tokarski) * More ORDER BY clarifications [#1681](https://github.com/sqlfluff/sqlfluff/pull/1681) [@tunetheweb](https://github.com/tunetheweb) * Fix T-SQL L025 linter exception [#1677](https://github.com/sqlfluff/sqlfluff/pull/1677) [@tunetheweb](https://github.com/tunetheweb) * Improve Jinja whitespace handling in rules [#1647](https://github.com/sqlfluff/sqlfluff/pull/1647) [@barrywhart](https://github.com/barrywhart) ## [0.7.0] - 2021-10-14 **BREAKING CHANGE** This release extracts the dbt templater to a separately installable plugin [sqlfluff-templater-dbt](https://pypi.org/project/sqlfluff-templater-dbt/). For users who take advantage of the dbt templater see the [updated docs on how to migrate](https://docs.sqlfluff.com/en/latest/configuration.html#installation-configuration). It also adds the `redshift` dialect and removes the `exasol_fs` dialect which has been merged into the `exasol` dialect. ## What’s Changed * src/sqlfluff/core/linter: Improve ignore file processing [#1650](https://github.com/sqlfluff/sqlfluff/pull/1650) [@CyberShadow](https://github.com/CyberShadow) * Misc documentation updates [#1644](https://github.com/sqlfluff/sqlfluff/pull/1644) [@tunetheweb](https://github.com/tunetheweb) * Segregate dbt plugin tests [#1610](https://github.com/sqlfluff/sqlfluff/pull/1610) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add initial Redshift support [#1641](https://github.com/sqlfluff/sqlfluff/pull/1641) [@tunetheweb](https://github.com/tunetheweb) * Update docs for dbt templater, improve error messages when not installed. [#1583](https://github.com/sqlfluff/sqlfluff/pull/1583) [@alanmcruickshank](https://github.com/alanmcruickshank) * Make templaters pluggable and move the dbt templater into a plugin [#1264](https://github.com/sqlfluff/sqlfluff/pull/1264) [@alanmcruickshank](https://github.com/alanmcruickshank) ## 🚀 Enhancements * T-SQL: CTAS delimiter [#1652](https://github.com/sqlfluff/sqlfluff/pull/1652) [@jpers36](https://github.com/jpers36) * T-SQL: Allow for multiple variables DECLAREd in the same statement [#1651](https://github.com/sqlfluff/sqlfluff/pull/1651) [@jpers36](https://github.com/jpers36) * T-SQL: Allow DECLARE/SET statements to parse using ExpressionStatement [#1649](https://github.com/sqlfluff/sqlfluff/pull/1649) [@jpers36](https://github.com/jpers36) * T-SQL PRINT statement parsing [#1648](https://github.com/sqlfluff/sqlfluff/pull/1648) [@jpers36](https://github.com/jpers36) * Better date function for tsql [#1636](https://github.com/sqlfluff/sqlfluff/pull/1636) [@tunetheweb](https://github.com/tunetheweb) * T-SQL: Allow for multiple statements in a procedure [#1637](https://github.com/sqlfluff/sqlfluff/pull/1637) [@jpers36](https://github.com/jpers36) * T-SQL: Allow for !>, !< operators [#1640](https://github.com/sqlfluff/sqlfluff/pull/1640) [@jpers36](https://github.com/jpers36) * T-SQL: Fix GROUP BY delimiter [#1635](https://github.com/sqlfluff/sqlfluff/pull/1635) [@jpers36](https://github.com/jpers36) * T-SQL: Fix DROP delimiter [#1633](https://github.com/sqlfluff/sqlfluff/pull/1633) [@jpers36](https://github.com/jpers36) * T-SQL: +RENAME statement for Azure Synapse Analytics [#1631](https://github.com/sqlfluff/sqlfluff/pull/1631) [@jpers36](https://github.com/jpers36) * T-SQL: Fix CASTing variables [#1627](https://github.com/sqlfluff/sqlfluff/pull/1627) [@jpers36](https://github.com/jpers36) * Snowflake: Add implementation for CREATE TASK statement [#1597](https://github.com/sqlfluff/sqlfluff/pull/1597) [#1603](https://github.com/sqlfluff/sqlfluff/pull/1603) [@JoeHut](https://github.com/JoeHut) * Allow global config for rule testcases [#1580](https://github.com/sqlfluff/sqlfluff/pull/1580) [@sti0](https://github.com/sti0) * Snowflake dollar sign literals [#1591](https://github.com/sqlfluff/sqlfluff/pull/1591) [@myschkyna](https://github.com/myschkyna) * Rename test/fixtures/parser directory to test/fixtures/dialects [#1585](https://github.com/sqlfluff/sqlfluff/pull/1585) [@tunetheweb](https://github.com/tunetheweb) * Rename keyword files [#1584](https://github.com/sqlfluff/sqlfluff/pull/1584) [@tunetheweb](https://github.com/tunetheweb) * Add some more unreserved keywords to BigQuery [#1588](https://github.com/sqlfluff/sqlfluff/pull/1588) [@tunetheweb](https://github.com/tunetheweb) * Increase minimum runs before coverage report is issued [#1596](https://github.com/sqlfluff/sqlfluff/pull/1596) [@tunetheweb](https://github.com/tunetheweb) * Snowflake: Support CURRENT_TIMESTAMP as a column default value [#1578](https://github.com/sqlfluff/sqlfluff/pull/1578) [@wong-codaio](https://github.com/wong-codaio) * T-SQL temp tables [#1574](https://github.com/sqlfluff/sqlfluff/pull/1574) [@jpers36](https://github.com/jpers36) ## 🐛 Bug Fixes * Fix NoneType exception in L031 [#1643](https://github.com/sqlfluff/sqlfluff/pull/1643) [@tunetheweb](https://github.com/tunetheweb) * Stop rule L048 complaining if literal is followed by a semicolon [#1638](https://github.com/sqlfluff/sqlfluff/pull/1638) [@tunetheweb](https://github.com/tunetheweb) * L031 desc updated to cover both 'from' and 'join' [#1625](https://github.com/sqlfluff/sqlfluff/pull/1625) [@nevado](https://github.com/nevado) * Snowflake auto increments fixes [#1620](https://github.com/sqlfluff/sqlfluff/pull/1620) [@myschkyna](https://github.com/myschkyna) * Fix DECLARE Delimitation [#1615](https://github.com/sqlfluff/sqlfluff/pull/1615) [@jpers36](https://github.com/jpers36) * Snowflake drop column fixes [#1618](https://github.com/sqlfluff/sqlfluff/pull/1618) [@myschkyna](https://github.com/myschkyna) * T-SQL: fix statement delimitation [#1612](https://github.com/sqlfluff/sqlfluff/pull/1612) [@jpers36](https://github.com/jpers36) * Snowflake: Fixed data type casting not working in `SET` statement [#1604](https://github.com/sqlfluff/sqlfluff/pull/1604) [@wong-codaio](https://github.com/wong-codaio) * Postgres dialect: Fix parse error for "on delete", "on update" clauses in column constraints [#1586](https://github.com/sqlfluff/sqlfluff/pull/1586) [@samlader](https://github.com/samlader) * Fix AttributeError: 'NoneType' object has no attribute 'get_child' error with rule L031 [#1595](https://github.com/sqlfluff/sqlfluff/pull/1595) [@barrywhart](https://github.com/barrywhart) * Fix zero length templated file bug. [#1577](https://github.com/sqlfluff/sqlfluff/pull/1577) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fully remove exasol_fs dialect and bump version [#1573](https://github.com/sqlfluff/sqlfluff/pull/1573) [@alanmcruickshank](https://github.com/alanmcruickshank) ## [0.6.9] - 2021-10-08 Another dbt bugfix from 0.6.7 and 0.6.8, plus a host of dialect and syntax improvements. ## 🚀 Enhancements * Correct and expand Snowflake CREATE TABLE syntax [#1567] [@tunetheweb](https://github.com/tunetheweb) * Support brackets in Postgres Meta commands [#1548](https://github.com/sqlfluff/sqlfluff/pull/1548) [@tunetheweb](https://github.com/tunetheweb) * added type hints to rule files [#1515](https://github.com/sqlfluff/sqlfluff/pull/1515) [@ttomasz](https://github.com/ttomasz) * Update Rule L028 to handle T-SQL PIVOT columns [#1545](https://github.com/sqlfluff/sqlfluff/pull/1545) [@tunetheweb](https://github.com/tunetheweb) * T-SQL IF/ELSE [#1564](https://github.com/sqlfluff/sqlfluff/pull/1564) [@jpers36](https://github.com/jpers36) * Enums for format types and colors added [#1558](https://github.com/sqlfluff/sqlfluff/pull/1558) [@adam-tokarski](https://github.com/adam-tokarski) * Add dbt 0.21.0 to the test suite [#1566](https://github.com/sqlfluff/sqlfluff/pull/1566) [@alanmcruickshank](https://github.com/alanmcruickshank) * Merge EXASOL_FS dialect into EXASOL dialect [#1498](https://github.com/sqlfluff/sqlfluff/pull/1498) [@sti0](https://github.com/sti0) * T-SQL - BEGIN/END blocks [#1553](https://github.com/sqlfluff/sqlfluff/pull/1553) [@jpers36](https://github.com/jpers36) * Small refactor with type hints and string formatting [#1525](https://github.com/sqlfluff/sqlfluff/pull/1525) [@adam-tokarski](https://github.com/adam-tokarski) * Add Github Preview Image [#1557](https://github.com/sqlfluff/sqlfluff/pull/1557) [@alanmcruickshank](https://github.com/alanmcruickshank) * Support SETOF in Postgres [#1522](https://github.com/sqlfluff/sqlfluff/pull/1522) [@tunetheweb](https://github.com/tunetheweb) * Support Double Precision in ANSI [#1524](https://github.com/sqlfluff/sqlfluff/pull/1524) [@tunetheweb](https://github.com/tunetheweb) * Support LATERAL joins in Postgres [#1519](https://github.com/sqlfluff/sqlfluff/pull/1519) [@adam-tokarski](https://github.com/adam-tokarski) * Add a rule to warn on "= NULL" or "<> NULL" comparisons [#1527](https://github.com/sqlfluff/sqlfluff/pull/1527) [@barrywhart](https://github.com/barrywhart) * Support Group and Groups as table names [#1546](https://github.com/sqlfluff/sqlfluff/pull/1546) [@tunetheweb](https://github.com/tunetheweb) * Support more complex IN (...) expressions [#1550](https://github.com/sqlfluff/sqlfluff/pull/1550) [@tunetheweb](https://github.com/tunetheweb) * Support CROSS APPLY and OUTER APPLY and TOP in T-SQL [#1551](https://github.com/sqlfluff/sqlfluff/pull/1551) [@tunetheweb](https://github.com/tunetheweb) * Add support for WITHOUT ROWID to SQLite [#1531](https://github.com/sqlfluff/sqlfluff/pull/1531) [@tunetheweb](https://github.com/tunetheweb) * Postgres: add `CONCURRENTLY` and `FINALIZE` keywords to `DETACH PARTITION` [#1529](https://github.com/sqlfluff/sqlfluff/pull/1529) [@kevinmarsh](https://github.com/kevinmarsh) * Better support of MySQL CREATE TABLE TIMESTAMP/DATESTAMP [#1530](https://github.com/sqlfluff/sqlfluff/pull/1530) [@tunetheweb](https://github.com/tunetheweb) * "Found unparsable section" instead of stack trace when multiple semicolons provided [#1517](https://github.com/sqlfluff/sqlfluff/pull/1517) [@adam-tokarski](https://github.com/adam-tokarski) ## 🐛 Bug Fixes * Fix test coverage [#1569](https://github.com/sqlfluff/sqlfluff/pull/1569) [@tunetheweb](https://github.com/tunetheweb) * Remove lint_templated_tokens as no longer does anything [#1570](https://github.com/sqlfluff/sqlfluff/pull/1570) [@tunetheweb](https://github.com/tunetheweb) * Fix broken block comments in exasol [#1565](https://github.com/sqlfluff/sqlfluff/pull/1565) [@sti0](https://github.com/sti0) * Rethink sequence_files in dbt templater. [#1563](https://github.com/sqlfluff/sqlfluff/pull/1563) [@alanmcruickshank](https://github.com/alanmcruickshank) * T-SQL: fix STRING_AGG() WITHIN GROUP clause [#1559](https://github.com/sqlfluff/sqlfluff/pull/1559) [@jpers36](https://github.com/jpers36) * fix spelling: occurrence>occurrence [#1507](https://github.com/sqlfluff/sqlfluff/pull/1507) [@jpers36](https://github.com/jpers36) ## [0.6.8] - 2021-10-05 Fixed a DBT bug introduced in 0.6.7 - apologies! ## What’s Changed SQLFluff can't find dbt models [#1513](https://github.com/sqlfluff/sqlfluff/pull/1513) [@barrywhart](https://github.com/barrywhart) T-SQL: Support for unicode literals [#1511](https://github.com/sqlfluff/sqlfluff/pull/1511) [@adam-tokarski](https://github.com/adam-tokarski) ## [0.6.7] - 2021-10-04 Lots of fixes to our rules (particularly when running `sqlfluff fix`, and particularly for Jinja and DBT templates). We also have good improvements to Exasol, Snowflake, and T-SQL dialects amongst others. Plus we added Hive and SQLite as supported dialects! ## What’s Changed * Snowflake better WAREHOUSE and CREATE (EXTERNAL) TABLES support [#1508](https://github.com/sqlfluff/sqlfluff/pull/1508) [@tunetheweb](https://github.com/tunetheweb) * Exasol: Fix typo in `REORGANIZE` statement [#1509](https://github.com/sqlfluff/sqlfluff/pull/1509) [@sti0](https://github.com/sti0) * Fix bug that can prevent linting ephemeral dbt models [#1496](https://github.com/sqlfluff/sqlfluff/pull/1496) [@barrywhart](https://github.com/barrywhart) * Disable rules L026 and L028 for BigQuery by default, with option to re-enable [#1504](https://github.com/sqlfluff/sqlfluff/pull/1504) [@tunetheweb](https://github.com/tunetheweb) * BigQuery keywords [#1506](https://github.com/sqlfluff/sqlfluff/pull/1506) [@tunetheweb](https://github.com/tunetheweb) * Inline --noqa not always honoured by "sqlfluff fix" [#1502](https://github.com/sqlfluff/sqlfluff/pull/1502) [@barrywhart](https://github.com/barrywhart) * Snowflake - fix parsing of UNPIVOT [#1505](https://github.com/sqlfluff/sqlfluff/pull/1505) [@michael-the1](https://github.com/michael-the1) * Better parsing of DATEADD function [#1486](https://github.com/sqlfluff/sqlfluff/pull/1486) [@jpers36](https://github.com/jpers36) * Fix handling of ISNULL and NOTNULL keywords [#1483](https://github.com/sqlfluff/sqlfluff/pull/1483) [@leamingrad](https://github.com/leamingrad) * Improved test cases names [#1501](https://github.com/sqlfluff/sqlfluff/pull/1501) [@ttomasz](https://github.com/ttomasz) * Exasol: Fix CREATE TABLE in-/outline constraint / Adjusted DISTRIBUTE/PARTITION clause [#1491](https://github.com/sqlfluff/sqlfluff/pull/1491) [@sti0](https://github.com/sti0) * Add support for SnowSQL variables [#1497](https://github.com/sqlfluff/sqlfluff/pull/1497) [@samlader](https://github.com/samlader) * Ignore erroneous newline segments in L016 (e.g. Jinja for loops) [#1494](https://github.com/sqlfluff/sqlfluff/pull/1494) [@tunetheweb](https://github.com/tunetheweb) * Indentation error on Jinja templated test case [#1444](https://github.com/sqlfluff/sqlfluff/pull/1444) [@barrywhart](https://github.com/barrywhart) * Improve EXASOL dialect [#1484](https://github.com/sqlfluff/sqlfluff/pull/1484) [@sti0](https://github.com/sti0) * T-SQL dialect - +support for CONVERT() special function [#1489](https://github.com/sqlfluff/sqlfluff/pull/1489) [@jpers36](https://github.com/jpers36) * Allow Postgres column references to use `AT TIME ZONE` [#1485](https://github.com/sqlfluff/sqlfluff/pull/1485) [@leamingrad](https://github.com/leamingrad) * T-SQL dialect - provide alternate ASA PR incorporating ASA into T-SQL [#1478](https://github.com/sqlfluff/sqlfluff/pull/1478) [@jpers36](https://github.com/jpers36) * Modest parser performance improvement [#1475](https://github.com/sqlfluff/sqlfluff/pull/1475) [@NathanHowell](https://github.com/NathanHowell) * Disable rule L033 for dialects that do not support it (e.g. Exasol, Postgres) [#1482](https://github.com/sqlfluff/sqlfluff/pull/1482) [@tunetheweb](https://github.com/tunetheweb) * Adding a new BaseFileSegment class for FileSegments to inherit from [#1473](https://github.com/sqlfluff/sqlfluff/pull/1473) [@sti0](https://github.com/sti0) * EXASOL_FS: Fix adapter script type [#1480](https://github.com/sqlfluff/sqlfluff/pull/1480) [@sti0](https://github.com/sti0) * Dialect/tsql update - added pivot / unpivot, view support, sequence support on table creation [#1469](https://github.com/sqlfluff/sqlfluff/pull/1469) [@ericmuijs](https://github.com/ericmuijs) * Correct typo in SQLFluff name [#1470](https://github.com/sqlfluff/sqlfluff/pull/1470) [@tunetheweb](https://github.com/tunetheweb) * Stop L008 from adding spaces for simple SELECTs [#1461](https://github.com/sqlfluff/sqlfluff/pull/1461) [@CyberShadow](https://github.com/CyberShadow) * Add SQLite dialect [#1453](https://github.com/sqlfluff/sqlfluff/pull/1453) [@tunetheweb](https://github.com/tunetheweb) * Fix Windows Clause for Exasol [#1463](https://github.com/sqlfluff/sqlfluff/pull/1463) [@tunetheweb](https://github.com/tunetheweb) * Add CHECK constraint syntax to ANSI SQL [#1451](https://github.com/sqlfluff/sqlfluff/pull/1451) [@tunetheweb](https://github.com/tunetheweb) * Move Exasol test statements fixtures from Python to SQL files [#1449](https://github.com/sqlfluff/sqlfluff/pull/1449) [@tunetheweb](https://github.com/tunetheweb) * fix spelling of "preceding" [#1455](https://github.com/sqlfluff/sqlfluff/pull/1455) [@jpers36](https://github.com/jpers36) * Add NORMALIZE to Teradata dialect [#1448](https://github.com/sqlfluff/sqlfluff/pull/1448) [@tunetheweb](https://github.com/tunetheweb) * Add @ and $ symbols to Exasol to avoid lexing errors [#1447](https://github.com/sqlfluff/sqlfluff/pull/1447) [@tunetheweb](https://github.com/tunetheweb) * Stop fix adding then removing whitespace [#1443](https://github.com/sqlfluff/sqlfluff/pull/1443) [@barrywhart](https://github.com/barrywhart) * Stop exception in L016 for long Jinja comments [#1440](https://github.com/sqlfluff/sqlfluff/pull/1440) [@tunetheweb](https://github.com/tunetheweb) * Fix some issues where the SQL file is corrupted by lint "fixes" in or near Jinja loops [#1431](https://github.com/sqlfluff/sqlfluff/pull/1431) [@barrywhart](https://github.com/barrywhart) * T-SQL: Remove Limit and NamedWindow segments as not supported in T-SQL [#1420](https://github.com/sqlfluff/sqlfluff/pull/1420) [@jpers36](https://github.com/jpers36) * Fix runtime error (IndexError ) when linting file with jinja "if" [#1430](https://github.com/sqlfluff/sqlfluff/pull/1430) [@barrywhart](https://github.com/barrywhart) * Add Hive dialect (#985) [@satish-ravi](https://github.com/satish-ravi) * Further fix for L036 [#1428](https://github.com/sqlfluff/sqlfluff/pull/1428) [@tunetheweb](https://github.com/tunetheweb) * Add default parameter to dbt "var" macro stub [#1426](https://github.com/sqlfluff/sqlfluff/pull/1426) [@CyberShadow](https://github.com/CyberShadow) ## [0.6.6] - 2021-09-20 Fixed some of our autofix rules where running `fix` sometimes made unintended changes. Added config to rules L011 and L012 to allow preferring implicit aliasing. Also further improved our Postgres support and documentation. ### What’s Changed * Rule L036 bug fixes [#1427](https://github.com/sqlfluff/sqlfluff/pull/1427) [@tunetheweb](https://github.com/tunetheweb) * Added support for psql meta commands to Postgres [#1423](https://github.com/sqlfluff/sqlfluff/pull/1423) [@WittierDinosaur](https://github.com/WittierDinosaur) * Remaining line endings [#1415](https://github.com/sqlfluff/sqlfluff/pull/1415) [@tunetheweb](https://github.com/tunetheweb) * T-SQL: Remove match possibilities for segments with no T-SQL equivalent [#1416](https://github.com/sqlfluff/sqlfluff/pull/1416) [@jpers36](https://github.com/jpers36) * Fix generate error on test file with just a comment [#1413](https://github.com/sqlfluff/sqlfluff/pull/1413) [@tunetheweb](https://github.com/tunetheweb) * Misc fixes to workflow files [#1412](https://github.com/sqlfluff/sqlfluff/pull/1412) [@tunetheweb](https://github.com/tunetheweb) * Added support for escape character strings to Postgres [#1409](https://github.com/sqlfluff/sqlfluff/pull/1409) [@WittierDinosaur](https://github.com/WittierDinosaur) * Issue 845: L016 should compute line length prior to template expansion [#1411](https://github.com/sqlfluff/sqlfluff/pull/1411) [@barrywhart](https://github.com/barrywhart) * Add .editorconfig config and enforce style rules [#1410](https://github.com/sqlfluff/sqlfluff/pull/1410) [@tunetheweb](https://github.com/tunetheweb) * Allow optional enforcing of implicit aliasing of tables (L011) and columns (L012) [#1402](https://github.com/sqlfluff/sqlfluff/pull/1402) [@tunetheweb](https://github.com/tunetheweb) * Better error messages on error [#1407](https://github.com/sqlfluff/sqlfluff/pull/1407) [@tunetheweb](https://github.com/tunetheweb) * Add README on how to generate docs [#1403](https://github.com/sqlfluff/sqlfluff/pull/1403) [@tunetheweb](https://github.com/tunetheweb) * Fix extra underscores in case rules (L010 and L014) [#1396](https://github.com/sqlfluff/sqlfluff/pull/1396) [@tunetheweb](https://github.com/tunetheweb) * Remove unused deps in tox test docbuild [#1406](https://github.com/sqlfluff/sqlfluff/pull/1406) [@zhongjiajie](https://github.com/zhongjiajie) * Prevent CodeCov commenting on coverage differences too early [#1404](https://github.com/sqlfluff/sqlfluff/pull/1404) [@tunetheweb](https://github.com/tunetheweb) * Fix "sqlfluff fix compatible" rules indenting to much in documentation [#1405](https://github.com/sqlfluff/sqlfluff/pull/1405) [@tunetheweb](https://github.com/tunetheweb) * Fix documentation SQL highlight error [#1393](https://github.com/sqlfluff/sqlfluff/pull/1393) [@zhongjiajie](https://github.com/zhongjiajie) * Support TIMESTAMPTZ in TIME ZONE queries for Postgres [#1398](https://github.com/sqlfluff/sqlfluff/pull/1398) [@tunetheweb](https://github.com/tunetheweb) * Improve datatypes: CHARACTER VARYING for ANSI, and Postgres and also TIMESTAMP AT TIME ZONE for Postgres [#1378](https://github.com/sqlfluff/sqlfluff/pull/1378) [@WittierDinosaur](https://github.com/WittierDinosaur) * Improve rules L003 and L019 by processing multi-line fixes in one pass. [#1391](https://github.com/sqlfluff/sqlfluff/pull/1391) [@barrywhart](https://github.com/barrywhart) * Correct codecov badge for Docs website [#1390](https://github.com/sqlfluff/sqlfluff/pull/1390) [@tunetheweb](https://github.com/tunetheweb) * Change fix to use non-zero exit code if unfixable [#1389](https://github.com/sqlfluff/sqlfluff/pull/1389) [@tunetheweb](https://github.com/tunetheweb) * Bugfix, frame clauses in window functions were not working [#1381](https://github.com/sqlfluff/sqlfluff/pull/1381) [@WittierDinosaur](https://github.com/WittierDinosaur) * Handle template and unfixable errors when fixing stdin [#1385](https://github.com/sqlfluff/sqlfluff/pull/1385) [@nolanbconaway](https://github.com/nolanbconaway) * CREATE, ALTER, DROP SEQUENCE support, with Postgres extensions [#1380](https://github.com/sqlfluff/sqlfluff/pull/1380) [@WittierDinosaur](https://github.com/WittierDinosaur) * Postgres analyze [#1377](https://github.com/sqlfluff/sqlfluff/pull/1377) [@WittierDinosaur](https://github.com/WittierDinosaur) * L016: "sqlfluff fix" adds too many newlines [#1382](https://github.com/sqlfluff/sqlfluff/pull/1382) [@barrywhart](https://github.com/barrywhart) * L003 fix mixes hanging and clean indents [#1383](https://github.com/sqlfluff/sqlfluff/pull/1383) [@barrywhart](https://github.com/barrywhart) * L034 should not fix inside "INSERT" or "CREATE TABLE AS SELECT" [#1384](https://github.com/sqlfluff/sqlfluff/pull/1384) [@barrywhart](https://github.com/barrywhart) ## [0.6.5] - 2021-09-10 ### What’s Changed This release includes initial support of Transact-SQL (T-SQL), much better Postgres and Snowflake support, improvements to our documentation, 100% coverage for Python code (with a small number of accepted exceptions), along with numerous other bug fixes and improvements. Many thanks to all the [contributors](https://github.com/sqlfluff/sqlfluff/graphs/contributors) helping to improve SQLFluff! ### Complete list of changes * Simplify rule L030 and fix recursion bug ([#1376](https://github.com/sqlfluff/sqlfluff/pull/1376)) ([@tunetheweb](https://github.com/tunetheweb) * Move from CircleCI to GitHub Actions for Continuous Integration ([#1361](https://github.com/sqlfluff/sqlfluff/pull/1361)) ([@tunetheweb](https://github.com/tunetheweb) * Postgres enhance create index ([#1375](https://github.com/sqlfluff/sqlfluff/pull/1375)) ([@WittierDinosaur](https://github.com/WittierDinosaur) * Initial support for Transact-SQL (T-SQL) dialect ([#1313](https://github.com/sqlfluff/sqlfluff/pull/1313)) ([@ericmuijs](https://github.com/ericmuijs) * Handle initial whitespace lines in rule L001 ([#1372](https://github.com/sqlfluff/sqlfluff/pull/1372)) ([@tunetheweb](https://github.com/tunetheweb) * Postgres Improved DEFAULT column constraint support ([#1373](https://github.com/sqlfluff/sqlfluff/pull/1373)) ([@WittierDinosaur](https://github.com/WittierDinosaur) * Minor grammar, spelling, and readability fixes ([#1370](https://github.com/sqlfluff/sqlfluff/pull/1370)) ([@WittierDinosaur](https://github.com/Fdawgs) * Issues 854, 1321: Handle Jinja leading whitespace-only lines ([#1364](https://github.com/sqlfluff/sqlfluff/pull/1364)) ([@barrywhart](https://github.com/barrywhart) * Enhanced the Postgres grammar for create table ([#1369](https://github.com/sqlfluff/sqlfluff/pull/1369)) ([@WittierDinosaur](https://github.com/WittierDinosaur) * Added ability to Grant and Revoke Grant to multiple users ([#1367](https://github.com/sqlfluff/sqlfluff/pull/1367)) ([@WittierDinosaur](https://github.com/WittierDinosaur) * Add BigQuery Parameter Lexing and Parsing ([#1363](https://github.com/sqlfluff/sqlfluff/pull/1363)) ([@rileyrunnoe](https://github.com/rileyrunnoe) * Rule L030 bugfix ([#1360](https://github.com/sqlfluff/sqlfluff/pull/1360)) ([@WittierDinosaur](https://github.com/WittierDinosaur) * Add Postgres dialect for COMMENT ON ([#1358](https://github.com/sqlfluff/sqlfluff/pull/1358)) ([@miketheman](https://github.com/miketheman) * Allow ORDER BY and LIMIT after QUALIFY in BigQuery ([#1362](https://github.com/sqlfluff/sqlfluff/pull/1362)) ([@tunetheweb](https://github.com/tunetheweb) * Correct CircleCI badge reference ([#1359](https://github.com/sqlfluff/sqlfluff/pull/1359)) [@miketheman](https://github.com/miketheman) * Minor grammar corrections to documentation ([#1355](https://github.com/sqlfluff/sqlfluff/pull/1355)) [@miketheman](https://github.com/miketheman) * Pytest coverage exceptions to get us to 100% coverage! ([#1346](https://github.com/sqlfluff/sqlfluff/pull/1346)) [@tunetheweb](https://github.com/tunetheweb) * Greatly improved Snowflake syntax support ([#1353](https://github.com/sqlfluff/sqlfluff/pull/1353)) [@tunetheweb](https://github.com/tunetheweb) * Postgres keyword support ([#1347](https://github.com/sqlfluff/sqlfluff/pull/1347)) [@WittierDinosaur](https://github.com/WittierDinosaur) * Added full support for postgres's ALTER DEFAULT PRIVILEGES. ([#1350](https://github.com/sqlfluff/sqlfluff/pull/1350)) [@creste](https://github.com/creste) * Show all LintResult in Rule_L020 ([#1348](https://github.com/sqlfluff/sqlfluff/pull/1348)) [@zhongjiajie](https://github.com/zhongjiajie) * Enhance error message L010 base on configure ([#1351](https://github.com/sqlfluff/sqlfluff/pull/1351)) [@zhongjiajie](https://github.com/zhongjiajie) * Remove unused variable insert_str ([#1352](https://github.com/sqlfluff/sqlfluff/pull/1352)) [@zhongjiajie](https://github.com/zhongjiajie) * Pytest coverage exceptions for Core code - part 1 ([#1343](https://github.com/sqlfluff/sqlfluff/pull/1343)) [@tunetheweb](https://github.com/tunetheweb) * BigQuery: Allow Qualify Clause for UnorderedSelectStatements ([#1341](https://github.com/sqlfluff/sqlfluff/pull/1341)) [@tunetheweb](https://github.com/tunetheweb) * Postgres "ALTER TABLE" enhancement, and timestamp bug fix ([#1338](https://github.com/sqlfluff/sqlfluff/pull/1338)) [@WittierDinosaur](https://github.com/WittierDinosaur) * Improve pytest coverage for non-core code ([#1319](https://github.com/sqlfluff/sqlfluff/pull/1319)) [@tunetheweb](https://github.com/tunetheweb) * Support additional GRANTs in Postgres ([#1339](https://github.com/sqlfluff/sqlfluff/pull/1339)) [@creste](https://github.com/creste) * Allow optional alias for BigQuery WITH OFFSET ([#1330](https://github.com/sqlfluff/sqlfluff/pull/1330)) [@tunetheweb](https://github.com/tunetheweb) * Improve function support in Postgres dialect ([#1336](https://github.com/sqlfluff/sqlfluff/pull/1336)) [@WittierDinosaur](https://github.com/WittierDinosaur) * Using github star instead of watch in docs ([#1337](https://github.com/sqlfluff/sqlfluff/pull/1337)) [@zhongjiajie](https://github.com/zhongjiajie) * Add unittest for rules docstring ([#1335](https://github.com/sqlfluff/sqlfluff/pull/1335)) [@zhongjiajie](https://github.com/zhongjiajie) * Bugfix PR, fixes issue [#1333](https://github.com/sqlfluff/sqlfluff/issues/#1333), wherein test___main___help() defaults to your default Python installation ([#1334](https://github.com/sqlfluff/sqlfluff/pull/1334)) [@WittierDinosaur](https://github.com/WittierDinosaur) * Improve wording of L007 now the before/after is configurable ([#1325](https://github.com/sqlfluff/sqlfluff/pull/1325)) [@tunetheweb](https://github.com/tunetheweb) * Fix a couple of small issues with CI jobs ([#1326](https://github.com/sqlfluff/sqlfluff/pull/1326)) [@tunetheweb](https://github.com/tunetheweb) * Add updated sqlfluff graphics and source. ([#1315](https://github.com/sqlfluff/sqlfluff/pull/1315)) [@alanmcruickshank](https://github.com/alanmcruickshank) * Issue 1277: Enforce that YML test files are computer generated and not edited ([#1279](https://github.com/sqlfluff/sqlfluff/pull/1279)) [@barrywhart](https://github.com/barrywhart) * Fix typo in README ([#1320](https://github.com/sqlfluff/sqlfluff/pull/1320)) [@tunetheweb](https://github.com/tunetheweb) * Fix link in README ([#1316](https://github.com/sqlfluff/sqlfluff/pull/1316)) [@jmks](https://github.com/jmks) * Update documentation to make the project more discoverable ([#1311](https://github.com/sqlfluff/sqlfluff/pull/1311)) [@tunetheweb](https://github.com/tunetheweb) * Show latest version number on unsupported Python error message ([#1307](https://github.com/sqlfluff/sqlfluff/pull/1307)) [@zhongjiajie](https://github.com/zhongjiajie) * Fix typo in github PR template ([#1308](https://github.com/sqlfluff/sqlfluff/pull/1308)) [@zhongjiajie](https://github.com/zhongjiajie) ## [0.6.4] - 2021-08-20 ### Added * Added support for empty WINDOWS specifications ([#1293](https://github.com/sqlfluff/sqlfluff/pull/1293)) [@matthieucan](https://github.com/matthieucan) * Added auto release drafter ([#1287](https://github.com/sqlfluff/sqlfluff/pull/1287)) [@tunetheweb](https://github.com/tunetheweb) ### Changed * Fix typo in the in the wild page ([#1285](https://github.com/sqlfluff/sqlfluff/pull/1285)) [@tunetheweb](https://github.com/tunetheweb) * Fix spacing issue for BigQuery UNNEST statement for rules L003 and L025 ([#1303](https://github.com/sqlfluff/sqlfluff/pull/1303)) [@tunetheweb](https://github.com/tunetheweb) * Update GitHub templates ([#1297](https://github.com/sqlfluff/sqlfluff/pull/1297)) [@tunetheweb](https://github.com/tunetheweb) * Allow BigQuery UDF with triple quoted bodies to pass rule L048 ([#1300](https://github.com/sqlfluff/sqlfluff/pull/1300)) [@tunetheweb](https://github.com/tunetheweb) * Add Parameterless Functions and more function names support to BigQuery ([#1299](https://github.com/sqlfluff/sqlfluff/pull/1299)) [@tunetheweb](https://github.com/tunetheweb) * Add release drafter ([#1295](https://github.com/sqlfluff/sqlfluff/pull/1295)) [@tunetheweb](https://github.com/tunetheweb) * Support empty OVER() clause in Window Specification ([#1294](https://github.com/sqlfluff/sqlfluff/pull/1294)) [@tunetheweb](https://github.com/tunetheweb) * Fix typo on the In the Wild page ([#1285](https://github.com/sqlfluff/sqlfluff/pull/1285)) [@tunetheweb](https://github.com/tunetheweb) ## [0.6.3] - 2021-08-16 ### Added - Support for primary index name, collect stats improvement, COMMENT statement for teradata dialect [#1232](https://github.com/sqlfluff/sqlfluff/issues/1232) - Support config for L007 to prefer end of line operators [#1261](https://github.com/sqlfluff/sqlfluff/issues/1261) - Support for DETERMINISTIC user defined functions in BigQuery dialect [#1251](https://github.com/sqlfluff/sqlfluff/issues/1251) - Support more identifiers in BigQuery dialect [#1253](https://github.com/sqlfluff/sqlfluff/issues/1253) - Support function member field references in BigQuery dialect [#1255](https://github.com/sqlfluff/sqlfluff/issues/1255) - Support alternative indentation for USING and ON clauses [#1250](https://github.com/sqlfluff/sqlfluff/issues/1250) - Support COUNT(0) preference over COUNT(*) or COUNT(1) [#1260](https://github.com/sqlfluff/sqlfluff/issues/1260) - Support for BigQuery "CREATE table OPTIONS ( description = 'desc' )" [#1205](https://github.com/sqlfluff/sqlfluff/issues/1205) - Support wildcard member field references in BigQuery dialect [#1269](https://github.com/sqlfluff/sqlfluff/issues/1269) - Support ARRAYS of STRUCTs in BigQuery dialect [#1271](https://github.com/sqlfluff/sqlfluff/issues/1271) - Support fields of field references in BigQuery dialect [#1276](https://github.com/sqlfluff/sqlfluff/issues/1276) - Support OFFSET and ORDINAL clauses of Array Functions in BigQuery dialect [#1171](https://github.com/sqlfluff/sqlfluff/issues/1171) - Added check for generated YML files [#1277](https://github.com/sqlfluff/sqlfluff/issues/1277) - Support QUALIFY to BigQuery dialect [#1242](https://github.com/sqlfluff/sqlfluff/issues/1242) ### Changed - Fix comma removed by L019 [#939](https://github.com/sqlfluff/sqlfluff/issues/939) - Update L019 (leading/trailng comma rule) so it doesn't run on unparsable code. - The `--nocolor` command-line option should suppress emoji output [#1246](https://github.com/sqlfluff/sqlfluff/issues/1246) - Added HTTP Archive to the [In The Wild page](https://docs.sqlfluff.com/en/stable/inthewild.html) ## [0.6.2] - 2021-07-22 ### Added - Support for looping statements (loop, while, repeat) and supporting statements to mysql dialect [#1180](https://github.com/sqlfluff/sqlfluff/issues/1180) ### Changed - Added dbt 0.20.* to the default test suite. - Updated manifest loading in dbt 0.20.* to use the new `ManifestLoader` [#1220](https://github.com/sqlfluff/sqlfluff/pull/1220) - Handle newlines in rule list configuration in .sqlfluff [#1215](https://github.com/sqlfluff/sqlfluff/issues/1215) - Fix looping interaction between L008 and L030 [#1207](https://github.com/sqlfluff/sqlfluff/issues/1207) ## [0.6.1] - 2021-07-16 ### Added - Linting output now supports GitHub Actions [#1190](https://github.com/sqlfluff/sqlfluff/issues/1190) - Support for QUALIFY syntax specific to teradata dialect [#1184](https://github.com/sqlfluff/sqlfluff/issues/1184) - Support for TRUNCATE statement [#1194](https://github.com/sqlfluff/sqlfluff/pull/1194) - Support for prepared statement syntaxes specific to mysql dialect [#1147](https://github.com/sqlfluff/sqlfluff/issues/1147) - Support for GET DIAGNOSTICS statement syntax specific to mysql dialect [#1148](https://github.com/sqlfluff/sqlfluff/issues/1148) - Support for cursor syntax specific to mysql dialect [#1145](https://github.com/sqlfluff/sqlfluff/issues/1145) - Support sequential shorthand casts [#1178](https://github.com/sqlfluff/sqlfluff/pull/1178) - Support for select statement syntax specific to mysql dialect [#1175](https://github.com/sqlfluff/sqlfluff/issues/1175) - Support for the CALL statement for the mysql dialect [#1144](https://github.com/sqlfluff/sqlfluff/issues/1144) - Support for OVERLAPS predicate [#1091](https://github.com/sqlfluff/sqlfluff/issues/1091) - Support for the CREATE/DROP PROCEDURE statement for the mysql dialect [#901](https://github.com/sqlfluff/sqlfluff/issues/901) - Specific allowed/required syntaxes for CREATE/DROP FUNCTION within the mysql dialect [#901](https://github.com/sqlfluff/sqlfluff/issues/901) - Support for DECLARE statement for the mysql dialect [#1140](https://github.com/sqlfluff/sqlfluff/issues/1140) - Support for the IF-THEN-ELSEIF-ELSE syntax for the mysql dialect [#1140](https://github.com/sqlfluff/sqlfluff/issues/1140) - Support for the DEFINER syntax for the mysql dialect [#1131](https://github.com/sqlfluff/sqlfluff/issues/1131) - Preserve existing file encoding in the "fix" command. Partially addresses [#654](https://github.com/sqlfluff/sqlfluff/issues/654) - Support for DECLARE and SET variable syntax for the BigQuery dialect [#1127](https://github.com/sqlfluff/sqlfluff/issues/1127) - Support for ALTER TASK statement on Snowflake [#1211](https://github.com/sqlfluff/sqlfluff/pull/1211) ### Changed - Fix runtime error in diff-cover plugin caused by new diff-cover release 6.1.0 [#1195](https://github.com/sqlfluff/sqlfluff/pull/1195) - Resolved an issue with the snowflake dialect where backslash escaped single quoted strings led to fatal lexing errors [#1200](https://github.com/sqlfluff/sqlfluff/pull/1200) ### Contributors - [@GitHub-Username](Link to GitHub profile) ([#PR-Number](Link to PR)) - [@dflss](https://github.com/dflss) ([#1154](https://github.com/sqlfluff/sqlfluff/pull/1154)) - [@barrywhart](https://github.com/barrywhart) ([#1177](https://github.com/sqlfluff/sqlfluff/pull/1177), [#1195](https://github.com/sqlfluff/sqlfluff/pull/1195)) - [@niallrees](https://github.com/niallrees) ([#1178](https://github.com/sqlfluff/sqlfluff/pull/1178)) - [@barnabyshearer](https://github.com/barnabyshearer) ([#1194](https://github.com/sqlfluff/sqlfluff/pull/1194)) - [@silverbullettruck2001](https://github.com/silverbullettruck2001) ([#1141](https://github.com/sqlfluff/sqlfluff/pull/1141), [#1159](https://github.com/sqlfluff/sqlfluff/pull/1159), [#1161](https://github.com/sqlfluff/sqlfluff/pull/1161), [#1176](https://github.com/sqlfluff/sqlfluff/pull/1176), [#1179](https://github.com/sqlfluff/sqlfluff/pull/1179), [#1181](https://github.com/sqlfluff/sqlfluff/pull/1181), [#1193](https://github.com/sqlfluff/sqlfluff/pull/1193), [#1203](https://github.com/sqlfluff/sqlfluff/pull/1203)) ## [0.6.0] - 2021-06-06 ### Added - Respect XDG base dirs on Mac OS ([#889](https://github.com/sqlfluff/sqlfluff/issues/889)). - Added support for additional delimiters by creating a new DelimiterSegment in the ANSI dialect which defaults to the semicolon, but allows it to be more intuitive when overridden in a specific child dialect (mysql) [#901](https://github.com/sqlfluff/sqlfluff/issues/901)) - Added support for the DELIMITER statement in the mysql dialect [#901](https://github.com/sqlfluff/sqlfluff/issues/901)) - Added support for additional delimiters by creating a new DelimiterSegment in the ANSI dialect which defaults to the semicolon, but allows it to be more intuitive when overridden in a specific child dialect (mysql) [#901](https://github.com/sqlfluff/sqlfluff/issues/901)) - Added support for function as a default column value [#849](https://github.com/sqlfluff/sqlfluff/issues/849). - Add an `--include-metas` option for parse output to show the meta segments in the parse tree. - Allow CREATE FUNCTION syntax without arguments [@bolajiwahab](https://github.com/bolajiwahab) [#1063](https://github.com/sqlfluff/sqlfluff/pull/1063). - Added support for the CREATE/DROP PROCEDURE statement for the mysql dialect [#901](https://github.com/sqlfluff/sqlfluff/issues/901)) - Added specific allowed/required syntaxes for CREATE/DROP FUNCTION within the mysql dialect [#901](https://github.com/sqlfluff/sqlfluff/issues/901)) - Now possible to run sqlfluff commands outside the project root when using the dbt templater. ### Changed - Renamed --parallel CLI argument to --processes to be more accurate. - L034 now ignores select statements which contain macros. - L034 now ignores select statements part of a set expression, most commonly a union. - Fix bug [#1082](https://github.com/sqlfluff/sqlfluff/issues/1082), adding support for BigQuery `select as struct '1' as bb, 2 as aa` syntax. - Rationalisation of the placement of meta segments within templated queries to support more reliable indentation. This includes prioritising _longer_ invariant sections first and then dropping any shorter ones which then are relatively out of place. - Additional logging within the lexer and templater engines. - Allow meta segments to parse within `Delimited` grammars which otherwise don't allow gaps. This is facilitated through an optional argument to `trim_non_code_segments`. - Fix bug [#1079](https://github.com/sqlfluff/sqlfluff/issues/1079), addressing issues with L025 and L026 with BigQuery column references involving `STRUCT`. - [#1080](https://github.com/sqlfluff/sqlfluff/issues/1080) Add SET SCHEMA and DROP SCHEMA support to ANSI dialect. ### Contributors - [@bolajiwahab](https://github.com/bolajiwahab) ([#1063](https://github.com/sqlfluff/sqlfluff/pull/1063)) - [@silverbullettruck2001](https://github.com/silverbullettruck2001) ([#1126](https://github.com/sqlfluff/sqlfluff/pull/1126), [#1099](https://github.com/sqlfluff/sqlfluff/pull/1099), [#1141](https://github.com/sqlfluff/sqlfluff/pull/1141)) ## [0.6.0a2] - 2021-05-27 ### Changed - Better exception handling for the simple parsing API (`sqlfluff.parse`) which now raises an exception which holds all potential parsing issues and prints nicely with more than one issue. - Fix bug [#1037](https://github.com/sqlfluff/sqlfluff/issues/1037), in which fix logging had been sent to stdout when reading data from stdin. - Add a little bit of fun on CLI exit 🎉! - Disabled models in the dbt templater are now skipped entirely rather than returning an untemplated file. - Add a changelog check to SQLFluff continuous integration. - Fix bug [#1083](https://github.com/sqlfluff/sqlfluff/issues/1083), adding support for BigQuery named function arguments, used with functions such as [ST_GEOGFROMGEOJSON()](https://cloud.google.com/bigquery/docs/reference/standard-sql/geography_functions#st_geogfromgeojson) - Update documentation links to sqlfluff-online. ## [0.6.0a1] - 2021-05-15 ### Added - Lint and fix parallelism using `--parallel` CLI argument - Fix [1051](https://github.com/sqlfluff/sqlfluff/issues/1051), adding support for bitwise operators `&`, `|`, `^`, `<<`, `>>` ## [0.5.6] - 2021-05-14 - Bugfix release for an issue in `L016` introduced in `0.5.4`. - Fix for `L016` issue where `DISTINCT` keywords were mangled during fixing [#1024](https://github.com/sqlfluff/sqlfluff/issues/1024). ## [0.5.5] - 2021-05-13 - Bugfix release for an off-by-one error introduced in L016 as part of `0.5.4`. ## [0.5.4] - 2021-05-12 ### Added - Parsing of Postgres dollar quoted literals. - Parsing of Postgres filter grammar. - Parsing of "ALTER DEFAULT PRIVILEGES" Postgres statement. - Parsing of Postgres non-explicit role granting and function execution. - Early failing on fatal dbt templater fails. ### Changed - Big rewrite of the lexer, segments and position markers for simplicity and to support future parallelism work. - Fix to L036 which previously mangled whitespace. ## [0.5.3] - 2021-05-04 ### Added - [`L009`](https://docs.sqlfluff.com/en/stable/rules.html#sqlfluff.core.rules.Rule_L009) can now be enforced when `templater = dbt`. - Parsing of `EXPLAIN`, `USE` statements. - Parsing of `ALTER TABLE x RENAME TO y` syntax. - Parsing of `ALTER SESSION` in snowflake. - Parsing of numeric literals with exponents. - Added rule codes to diff_cover output. ### Changed - Fix `templater = dbt` L009 bug [#861](https://github.com/sqlfluff/sqlfluff/issues/861) where: - `sqlfluff lint` would incorrectly always return `L009 | Files must end with a trailing newline.` - `sqlfluff fix` would remove trailing newlines when `exclude_rules = L009`. - Fix bug with BigQuery comparison operators. - Fix recursion bug with L045. - Fix tuple index bug with L016. - Fix mange coalecse bug with L043. - Fix Jinja templating error with _UnboundLocalError_. - Improve array parsing. - Simplify bracket parsing. - Speed up L010 with caching capitalisation policy. - Output of `sqlfluff dialects` is now sorted. - Handle disabled `dbt` models. ## [0.5.2] - 2021-04-11 ### Changed - Fix false positive in L045 when CTE used in WHERE clause ([#944](https://github.com/sqlfluff/sqlfluff/issues/944)) - Logging and readout now includes more detail and a notification of dbt compilation. - Fix bug in L048 which flagged adjoining commas as failures. - Fix bug in L019 with inline comments. - Fix bug in L036 with multiple newlines. - Skip disabled dbt models. ([#931](https://github.com/sqlfluff/sqlfluff/issues/931)). - Support "USE" statement in ANSI ([#902](https://github.com/sqlfluff/sqlfluff/issues/902)). - Parse explain statement ([#893](https://github.com/sqlfluff/sqlfluff/issues/893)). ## [0.5.1] - 2021-04-09 ### Changed - Parsing improvements around optional brackets. - Better parsing of set operators (like `UNION`) and how they interact with `ORDER BY` clauses. - Support for comparison operators like `~`. - Fix parsing of snowflake `SAMPLE` syntax. - Fix recursion issues in L044. - `SPACE` keyword now has no special meaning in the postgres dialect. ## [0.5.0] - 2021-04-05 ### Added - `pascal` (PascalCase) `capitalisation_policy` option for L014 (unquoted identifiers) - `only_aliases` configuration option for L014 (unquoted identifiers) - Dialects now have more advanced dependency options to allow less repetition between related dialects. The methods `get_segment` and `get_grammar` can be used on unexpanded grammars to access elements of the parent grammars. The `copy` method on grammars can be used to copy with alterations. - Rule L046 to line whitespace within jinja tags. - Enable and Disable syntax for [ignoring violations from ranges of lines](https://docs.sqlfluff.com/en/latest/configuration.html#ignoring-line-ranges). ### Changed - Renamed the BaseCrawler class to BaseRule. This is the base class for all rules. This is a breaking change for any custom rules that have been added via plugins or by forking the SQLFluff repo. - Renamed `sqlfluff.rules()` to `sqlfluff.list_rules()` and `sqlfluff.dialects()` to `sqlfluff.list_dialects()` due to naming conflicts with the now separate `sqlfluff.dialects` module. - Extracted dialect definitions from the `sqlfluff.core` module so that each dialect is better isolated from each other. This also allows more focused testing and the potential for dialect plugins in future. Dialects are now only imported as needed at runtime. All dialects should now be accessed using the selector methods in `sqlfluff.core.dialects` rather than importing from `sqlfluff.dialects` directly. - Add support for `ALTER USER` commands in Snowflake dialect. - Added describe statement to ANSI dialect - Renamed `capitalisation_policy` to `extended_capitalisation_policy` for L014 to reflect the fact that it now accepts more options (`pascal`) than regular `capitalisation_policy` still used by L010 and others. - Replaced `only_aliases` config with `unquoted_identifiers_policy` and added it to rule L014 in addition to L029. - Parse structure of `FROM` clauses to better represent nested joins and table functions. - Parse structure of expressions to avoid unnecessary nesting and overly recursive method calls. ## [0.4.1] - 2021-02-25 ### Added - Initial architecture for rule plugins to allow custom rules. This initial release should be considered _beta_ until the release of 0.5.0. - Add tests for dbt 0.19.0. - General increased parsing coverage. - Added some missing Postgres syntax elements. - Added some basic introspection API elements to output what dialects and rules are available for use within the API. ### Changed - Fix several Snowflake parsing bugs. - Refactor from clause to handle flattens after joins. - Fix .get_table_references() in Snowflake dialect. - Macros defined within the .sqlfluff config will take precedence over the macros defined in the path that is defined with config value `sqlfluff:templater:jinja:load_macros_from_path`. - Fix Snowflake indent parsing. - Fixed incorrect parsing of syntax-like elements in comments. - Altered parsing of `NULL` keywords, so parse as Literals where appropriate. - Fixed bug in expression parsing leading to recursion errors. ## [0.4.0] - 2021-02-14 ### Added - Public API to enable people to import `sqlfluff` as a python module and call `parse`, `lint` and `fix` within their own projects. See [the docs](https://docs.sqlfluff.com/en/latest/api.html) for more information. ([#501](https://github.com/sqlfluff/sqlfluff/pull/501)) - The ability to use `dbt` as a templating engine directly allowing richer and more accurate linting around `dbt` macros (and packages related to `dbt`). For more info see [the docs](https://docs.sqlfluff.com/en/latest/configuration.html#dbt-project-configuration). ([#508](https://github.com/sqlfluff/sqlfluff/pull/508)) - Support for modulo (`%`) operator. ([#447](https://github.com/sqlfluff/sqlfluff/pull/447)) - A limit in the internal fix routines to catch any infinite loops. ([#494](https://github.com/sqlfluff/sqlfluff/pull/494)) - Added the `.is_type()` method on segments to more intelligently deal with type matching in rules when inheritance is at play. - Added the ability for the user to add their own rules when interacting with the `Linter` directly using `user_rules`. - Added L034 'Fields should be stated before aggregates / window functions' per [dbt coding convenventions](https://github.com/fishtown-analytics/corp/blob/master/dbt_coding_conventions.md#sql-style-guide.) ([#495](https://github.com/sqlfluff/sqlfluff/pull/495)) - Templating tags, such as `{{ variables }}`, `{# comments #}` and `{% loops %}` (in jinja) now have placeholders in the parsed structure. Rule L003 (indentation), also now respects these placeholders so that their indentation is linted accordingly. For loop or block tags, they also generate an `Indent` and `Dedent` tag accordingly (which can be enabled or disabled) with a configuration value so that indentation around these functions can be linted accordingly. ([#541](https://github.com/sqlfluff/sqlfluff/pull/541)) - MyPy type linting into a large proportion of the core library. ([#526](https://github.com/sqlfluff/sqlfluff/pull/526), [#580](https://github.com/sqlfluff/sqlfluff/pull/580)) - Config values specific to a file can now be defined using a comment line starting with `-- sqlfluff:`. ([#541](https://github.com/sqlfluff/sqlfluff/pull/541)) - Added documentation for `--noqa:` use in rules. ([#552](https://github.com/sqlfluff/sqlfluff/pull/552)) - Added `pre-commit` hooks for `lint` and `fix`. ([#576](https://github.com/sqlfluff/sqlfluff/pull/576)) - Added a fix routine for Rule L019 (comma placement). ([#575](https://github.com/sqlfluff/sqlfluff/pull/575)) - Added Rule L031 to enforce "avoid using alias in the `FROM`/`JOIN` clauses" from the `dbt` coding conventions. ([#473](https://github.com/sqlfluff/sqlfluff/pull/473), [#479](https://github.com/sqlfluff/sqlfluff/pull/479)) - Added Rule L032 to enforce "do not use `USING`" from the `dbt` coding conventions. ([#487](https://github.com/sqlfluff/sqlfluff/pull/487)) - Added Rule L033 to enforce "prefer `UNION ALL` to `UNION *`" from the `dbt` coding conventions. ([#489](https://github.com/sqlfluff/sqlfluff/pull/489)) - Added Rule L034 to enforce "fields should be stated before aggregate/window functions" from the `dbt` coding conventions. ([#495](https://github.com/sqlfluff/sqlfluff/pull/495)) - Added Rule L038 to forbid (or require) trailing commas in select clauses. ([#362](https://github.com/sqlfluff/sqlfluff/pull/752)) - Added Rule L039 to lint unnecessary whitespace between elements. ([#502](https://github.com/sqlfluff/sqlfluff/pull/753)) - Added a fix routine for L015. ([#732](https://github.com/sqlfluff/sqlfluff/pull/732)) - Added a fix routine for L025. ([#404](https://github.com/sqlfluff/sqlfluff/pull/741)) - Adopted the `black` coding style. ([#485](https://github.com/sqlfluff/sqlfluff/pull/485)) - Added validation and documentation for rule configuration options. ([#462](https://github.com/sqlfluff/sqlfluff/pull/462)) - Added documentation for which rules are fixable. ([#594](https://github.com/sqlfluff/sqlfluff/pull/594)) - Added `EPOCH` keyword for postgres dialect. ([#522](https://github.com/sqlfluff/sqlfluff/pull/522)) - Added column index identifier in snowflake dialect. ([#458](https://github.com/sqlfluff/sqlfluff/pull/458)) - Added `USE` statement to the snowflake dialect. ([#537](https://github.com/sqlfluff/sqlfluff/pull/537)) - Added `CODE_OF_CONDUCT` to the project. ([#471](https://github.com/sqlfluff/sqlfluff/pull/471)) - Added `ISNULL` and `NOTNULL` keywords to ansi dialect. ([#441](https://github.com/sqlfluff/sqlfluff/pull/441)) - Added support for python 3.9. ([#482](https://github.com/sqlfluff/sqlfluff/pull/482)) - Added `requirements_dev.txt` for local testing/linting. ([#500](https://github.com/sqlfluff/sqlfluff/pull/500)) - Added CLI option `--disregard-sqlfluffignores` to allow direct linting of files in the `.sqlfluffignore`. ([#486](https://github.com/sqlfluff/sqlfluff/pull/486)) - Added `dbt` `incremental` macro. ([#363](https://github.com/sqlfluff/sqlfluff/pull/363)) - Added links to cockroachlabs expression grammars in ansi dialect. ([#592](https://github.com/sqlfluff/sqlfluff/pull/592)) - Added favicon to the docs website. ([#589](https://github.com/sqlfluff/sqlfluff/pull/589)) - Added `CREATE FUNCTION` syntax for postgres and for bigquery. ([#325](https://github.com/sqlfluff/sqlfluff/pull/325)) - Added `CREATE INDEX` and `DROP INDEX` for mysql. ([#740](https://github.com/sqlfluff/sqlfluff/pull/748)) - Added `IGNORE NULLS`, `RESPECT NULLS`, `GENERATE_DATE_ARRAY` and `GENERATE_TIMESTAMP_ARRAY` for bigquery. ( [#667](https://github.com/sqlfluff/sqlfluff/pull/727), [#527](https://github.com/sqlfluff/sqlfluff/pull/726)) - Added `CREATE` and `CREATE ... CLONE` for snowflake. ([#539](https://github.com/sqlfluff/sqlfluff/pull/670)) - Added support for EXASOL. ([#684](https://github.com/sqlfluff/sqlfluff/pull/684)) ### Changed - Fixed parsing of semi-structured objects in the snowflake of dialects with whitespace gaps. [#634](https://github.com/sqlfluff/sqlfluff/issues/635) - Handle internal errors elegantly, reporting the stacktrace and the error-surfacing file. [#632](https://github.com/sqlfluff/sqlfluff/pull/632) - Improve message for when an automatic fix is not available for L004. [#633](https://github.com/sqlfluff/sqlfluff/issues/633) - Linting errors raised on templated sections are now ignored by default and added a configuration value to show them. ([#713](https://github.com/sqlfluff/sqlfluff/pull/745)) - Big refactor of logging internally. `Linter` is now decoupled from logging so that it can be imported directly by subprojects without needing to worry about weird output or without the log handing getting in the way of your project. ([#460](https://github.com/sqlfluff/sqlfluff/pull/460)) - Linting errors in the final file are now reported with their position in the source file rather than in the templated file. This means when using sqlfluff as a plugabble library within an IDE, the references match the file which is being edited. ([#541](https://github.com/sqlfluff/sqlfluff/pull/541)) - Created new Github Organisation (https://github.com/sqlfluff) and migrated from https://github.com/alanmcruickshank/sqlfluff to https://github.com/sqlfluff/sqlfluff. ([#444](https://github.com/sqlfluff/sqlfluff/issues/444)) - Changed the handling of `*` and `a.b.*` expressions to have their own expressions. Any dependencies on this structure downstream will be broken. This also fixes the linting of both kinds of expressions with regard to L013 and L025. ([#454](https://github.com/sqlfluff/sqlfluff/pull/454)) - Refactor of L022 to handle poorly formatted CTEs better. ([#494](https://github.com/sqlfluff/sqlfluff/pull/494)) - Restriction of L017 to only fix when it would delete whitespace or newlines. ([#598](https://github.com/sqlfluff/sqlfluff/pull/756)) - Added a configuration value to L016 to optionally ignore lines containing only comments. ([#299](https://github.com/sqlfluff/sqlfluff/pull/751)) - Internally added an `EphemeralSegment` to aid with parsing efficiency without altering the end structure of the query. ([#491](https://github.com/sqlfluff/sqlfluff/pull/491)) - Split `ObjectReference` into `ColumnReference` and `TableReference` for more useful API access to the underlying structure. ([#504](https://github.com/sqlfluff/sqlfluff/pull/504)) - `KeywordSegment` and the new `SymbolSegment` both now inherit from `_ProtoKeywordSegment` which allows symbols to match in a very similar way to keywords without later appearing with the `type` of `keyword`. ([#504](https://github.com/sqlfluff/sqlfluff/pull/504)) - Introduced the `Parser` class to parse a lexed query rather than relying on users to instantiate a `FileSegment` directly. As a result the `FileSegment` has been moved from the core parser directly into the dialects. Users can refer to it via the `get_root_segment()` method of a dialect. ([#510](https://github.com/sqlfluff/sqlfluff/pull/510)) - Several performance improvements through removing unused functionality, sensible caching and optimising loops within functions. ([#526](https://github.com/sqlfluff/sqlfluff/pull/526)) - Split up rule tests into separate `yml` files. ([#553](https://github.com/sqlfluff/sqlfluff/pull/553)) - Allow escaped quotes in strings. ([#557](https://github.com/sqlfluff/sqlfluff/pull/557)) - Fixed `ESCAPE` parsing in `LIKE` clause. ([#566](https://github.com/sqlfluff/sqlfluff/pull/566)) - Fixed parsing of complex `BETWEEN` statements. ([#498](https://github.com/sqlfluff/sqlfluff/pull/498)) - Fixed BigQuery `EXCEPT` clause parsing. ([#472](https://github.com/sqlfluff/sqlfluff/pull/472)) - Fixed Rule L022 to respect leading comma configuration. ([#455](https://github.com/sqlfluff/sqlfluff/pull/455)) - Improved instructions on adding a virtual environment in the `README`. ([#457](https://github.com/sqlfluff/sqlfluff/pull/457)) - Improved documentation for passing CLI defaults in `.sqlfluff`. ([#452](https://github.com/sqlfluff/sqlfluff/pull/452)) - Fix bug with templated blocks + `capitalisation_policy = lower`. ([#477](https://github.com/sqlfluff/sqlfluff/pull/477)) - Fix array accessors in snowflake dialect. ([#442](https://github.com/sqlfluff/sqlfluff/pull/442)) - Color `logging` warnings red. ([#497](https://github.com/sqlfluff/sqlfluff/pull/497)) - Allow whitespace before a shorthand cast. ([#544](https://github.com/sqlfluff/sqlfluff/pull/544)) - Silenced warnings when fixing from stdin. ([#522](https://github.com/sqlfluff/sqlfluff/pull/522)) - Allow an underscore as the first char in a semi structured element key. ([#596](https://github.com/sqlfluff/sqlfluff/pull/596)) - Fix PostFunctionGrammar in the Snowflake dialect which was causing strange behaviour in L012. ([#619](https://github.com/sqlfluff/sqlfluff/pull/619/files)) - `Bracketed` segment now obtains its brackets directly from the dialect using a set named `bracket_pairs`. This now enables better configuration of brackets between dialects. ([#325](https://github.com/sqlfluff/sqlfluff/pull/325)) ### Removed - Dropped support for python 3.5. ([#482](https://github.com/sqlfluff/sqlfluff/pull/482)) - From the CLI, the `--no-safety` option has been removed, the default is now that all enabled rules will be fixed. ([#583](https://github.com/sqlfluff/sqlfluff/pull/583)) - Removed `BaseSegment.grammar`, `BaseSegment._match_grammar()` and `BaseSegment._parse_grammar()` instead preferring references directly to `BaseSegment.match_grammar` and `BaseSegment.parse_grammar`. ([#509](https://github.com/sqlfluff/sqlfluff/pull/509)) - Removed `EmptySegmentGrammar` and replaced with better non-code handling in the `FileSegment` itself. ([#509](https://github.com/sqlfluff/sqlfluff/pull/509)) - Remove the `ContainsOnly` grammar as it remained only as an anti-pattern. ([#509](https://github.com/sqlfluff/sqlfluff/pull/509)) - Removed the `expected_string()` functionality from grammars and segments ([#509](https://github.com/sqlfluff/sqlfluff/pull/509)) as it was poorly supported. - Removed `BaseSegment.as_optional()` as now this functionality happens mostly in grammars (including `Ref`). ([#509](https://github.com/sqlfluff/sqlfluff/pull/509)) - Removed `ColumnExpressionSegment` in favour of `ColumnReference`. ([#512](https://github.com/sqlfluff/sqlfluff/pull/512)) - Removed the `LambdaSegment` feature, instead replacing with an internal to the grammar module called `NonCodeMatcher`. ([#512](https://github.com/sqlfluff/sqlfluff/pull/512)) - Case sensitivity as a feature for segment matching has been removed as not required for existing dialects. ([#517](https://github.com/sqlfluff/sqlfluff/pull/517)) - Dependency on `difflib` or `cdifflib`, by relying on source mapping instead to apply fixes. ([#541](https://github.com/sqlfluff/sqlfluff/pull/541)) ## [0.3.6] - 2020-09-24 ### Added - `sqlfluff dialects` command to get a readout of available dialects [+ associated docs]. - More helpful error messages when trying to run in Python2. - Window functions now parse with `IGNORE`/`RESPECT` `NULLS`. - Parsing of `current_timestamp` and similar functions. Thanks [@dmateusp](https://github.com/dmateusp). - Snowflake `QUALIFY` clause. ### Changed - Respect user config directories. Thanks [@sethwoodworth](https://github.com/sethwoodworth). - Fix incorrect reporting of L013 with `*`. Thanks [@dmateusp](https://github.com/dmateusp). - Fix incorrect reporting of L027 with column aliases. Thanks [@pwildenhain](https://github.com/pwildenhain). - Simplification of application of fixes and correction of a case where fixes could be depleted. Thanks [@NiallRees](https://github.com/NiallRees). - Fix functions with a similar structure to `SUBSTRING`. - Refactor BigQuery `REPLACE` and `EXCEPT` clauses. - Bigquery date parts corrected. - Snowflake array accessors. - Psotgres `NOTNULL` and `ISNULL`. - Bugfix in snowflake for keywords used in semistructured queries. - Nested `WITH` statements now parse. - Performance improvements in the `fix` command. - Numeric literals starting with a decimal now parse. - Refactor the jinja templater. ## [0.3.5] - 2020-08-03 ### Added - Patterns and Anti-patterns in documentation. Thanks [@flpezet](https://github.com/flpezet). - Functions in `GROUP BY`. Thanks [@flpezet](https://github.com/flpezet). ### Changed - Deep bugfixes in the parser to handle simple matching better for a few edge cases. Also added some logging deeper in the parser. - Added in the `SelectableGrammar` and some related segments to make it easier to refer to _select-like_ things in other grammars. - Fixes to `CASE` statement parsing. Thanks [@azhard](https://github.com/azhard). - Fix to snowflake `SAMPLE` implementation. Thanks [@rkm3](https://github.com/rkm3). - Numerous docs fixes. Thanks [@SimonStJG](https://github.com/SimonStJG), [@flpezet](https://github.com/flpezet), [@s-pace](https://github.com/s-pace), [@nolanbconaway](https://github.com/nolanbconaway). ## [0.3.4] - 2020-05-13 ### Changed - Implementation of the bigquery `CREATE MODEL` syntax. Thanks [@barrywhart](https://github.com/barrywhart). - Bugfixes for: - Edge cases for L006 - False alarms on L025 - `ORDER BY x NULLS FIRST|LAST` - `FOR` keyword in bigquery `SYSTEM_TIME` syntax. ## [0.3.3] - 2020-05-11 ### Added - Added the `--nofail` option to `parse` and `lint` commands to assist rollout. - Added the `--version` option to complement the `version` option already available on the cli. - Parsing for `ALTER TABLE`. - Warning for unset dialects when getting parsing errors. - Configurable line lengths for output. ## [0.3.2] - 2020-05-08 ### Added - Support for the Teradata dialect. Thanks [@Katzmann1983](https://github.com/Katzmann1983)! - A much more detailed getting started guide in the docs. - For the `parse` command, added the `--profiler` and `--bench` options to help debugging performance issues. - Support for the `do` command in the jinja templater. - Proper parsing of the concatenate operator (`||`). - Proper indent handling of closing brackets. - Logging and benchmarking of parse performance as part of the CI pipeline. - Parsing of object references with defaults like `my_db..my_table`. - Support for the `INTERVAL '4 days'` style interval expression. - Configurable trailing or leading comma linting. - Configurable indentation for `JOIN` clauses. - Rules now have their own logging interface to improve debugging ability. - Snowflake and Postgres dialects. - Support for a `.sqlfluffignore` file to ignore certain paths. - More generic interfaces for managing keywords in dialects, including `set` interfaces for managing and creating keywords and the `Ref.keyword()` method to refer to them, and the ability to refer directly to keyword names in most grammars using strings directly. Includes `SegmentGenerator` objects to bind dialect objects at runtime from sets. Thanks [@Katzmann1983](https://github.com/Katzmann1983)! - Rule `L029` for using unreserved keywords as variable names. - The jinja templater now allows macros loaded from files, and the hydration of variables ending in `_path` in the config files. - JSON operators and the `DISTINCT ON ()` syntax for the postgres dialect. ### Changed - Refactor of whitespace and non-code handling so that segments are less greedy and default to not holding whitespace on ends. This allows more consistent linting rule application. - Change config file reading to _case-sensitive_ to support case sensitivity in jinja templating. - Non-string values (including lists) now function in the python and jinja templating libraries. - Validation of the match results of grammars has been reduced. In production cases the validation will still be done, but only on _parse_ and not on _match_. - At low verbosities, python level logging is also reduced. - Some matcher rules in the parser can now be classified as _simple_ which allows them to shortcut some of the matching routines. - Yaml output now double quotes values with newlines or tab characters. - Better handling on hanging and closing indents when linting rule L003. - More capable handline of multi-line comments so that indentation and line length parsing works. This involves some deep changes to the lexer. - Getting violations from the linter now automatically takes into account of ignore rules and filters. - Several bugfixes, including catching potential infinite regress during fixing of files, if one fix would re-introduce a problem with another. - Behaviour of the `Bracketed` grammar has been changed to treat its content as a `Sequence` rather than a `OneOf`. - Move to `SandboxedEnvironment` rather than `Environment` for jinja templating for security. - Improve reporting of templating issues, especially for the jinja templater so that missing variables are rendered as blanks, but still reported as templating violations. ## [0.3.1] - 2020-02-17 ### Added - Support for `a.b.*` on top of `a.*` in select target expressions. ## [0.3.0] - 2020-02-15 ### Changed - Deprecated python 2.7 and python 3.4 which are now both past their maintenance horizon. The 0.2.x branch will remain available for continued development for these versions. - Rule L003 is now significantly smarter in linting indentation with support for hanging indents and comparison to the most recent line which doesn't have an error. The old (more simple) functionality of directly checking whether an indent was a multiple of a preset value has been removed. - Fixed the "inconsistent" bug in L010. Thanks [@nolanbconaway](https://github.com/nolanbconaway). - Updated logging of parsing and lexing errors to have more useful error codes. - Changed parsing of expressions to favour functions over identifiers to [fix the expression bug](https://github.com/sqlfluff/sqlfluff/issues/96). - Fixed the "inconsistent" bug in L010. Thanks [@nolanbconaway](https://github.com/nolanbconaway). - Moved where the `SELECT` keyword is parsed within a select statement, so that it belongs as part of the newly renamed `select_clause` (renamed from previously `select_target_group`). - Clarified handling of the `type` and `name` properties of the BaseSegment class and its children. `name` should be specific to a particular kind of segment, and `type` should express a wider group. Handling of the `newline`, `whitespace` and `comma` segments has been updated so that we use the `type` property for most use cases rather than `name`. ### Added - _Meta segments_ for indicating where things can be present in the parsed tree. This is mostly illustrated using the `Indent` and `Dedent` segments used for indicating the position of theoretical indents in the structure. Several helper functions have been added across the codebase to handle this increase in the kinds of segments which might be encountered by various grammars. - Rule L016 has been added to lint long lines. In the `fix` phase of this rule, there is enough logic to try and reconstruct a sensible place for line breaks as re-flow the query. This will likely need further work and may still encounter places where it doesn't fix all errors but should be able to deal with the majority of simple cases. - BigQuery dialect, initially just for appropriate quoting. - Added parsing of DDL statements such as `COMMIT`, `DROP`, `GRANT`, `REVOKE` and `ROLLBACK`. Thanks [@barrywhart](https://github.com/barrywhart). - `--format` option to the `parse` command that allows a yaml output. This is mostly to make test writing easier in the development process but might also be useful for other things. - Parsing of set operations like `UNION`. - Support for the `diff-cover` tool. Thanks [@barrywhart](https://github.com/barrywhart). - Enabled the `fix` command while using `stdin`. Thanks [@nolanbconaway](https://github.com/nolanbconaway). - Rule to detect incorrect use of `DISTINCT`. Thanks [@barrywhart](https://github.com/barrywhart). - Security fixes from DeepCover. Thanks [@sanketsaurav](https://github.com/sanketsaurav). - Automatic fix testing, to help support the newer more complicated rules. - Interval literals - Support for the `source` macro from dbt. Thanks [@Dandandan](https://github.com/Dandandan) - Support for functions with spaces between the function name and the brackets and a linting rule `L017` to catch this. - Efficiency cache for faster pruning of the parse tree. - Parsing of array notation as using in BigQuery and Postgres. - Enable the `ignore` parameter on linting and fixing commands to ignore particular kinds of violations. ## [0.2.4] - 2019-12-06 ### Added - A `--code-only` option to the `parse` command to spit out a more simplified output with only the code elements. - Rules can now optionally override the description of the violation and pass that back via the `LintingResult`. ### Changed - Bugfix, correct missing files in `setup.py` `install_requires` section. - Better parsing of the _not equal_ operator. - Added more exclusions to identifier reserved words to fix cross joins. - At verbosity levels 2 or above, the root config is printed and then any diffs to that for specific files are also printed. - Linting and parsing of directories now reports files in alphabetical order. Thanks [@barrywhart](https://github.com/barrywhart). - Better python 2.7 stability. Thanks [@barrywhart](https://github.com/barrywhart). - Fixing parsing of `IN`/`NOT IN` and `IS`/`IS NOT`. ## [0.2.3] - 2019-12-02 ### Changed - Bugfix, default config not included. ## [0.2.2] - 2019-12-02 ### Changed - Tweak rule L005 to report more sensibly with newlines. - Rework testing of rules to be more modular. - Fix a config file bug if no root config file was present for some values. Thanks [@barrywhart](https://github.com/barrywhart). - Lexing rules are now part of the dialect rather than a global so that they can be overridden by other dialects when we get to that stage. ## [0.2.0] - 2019-12-01 ### Added - Templating support (jinja2, python or raw). - Variables + Macros. - The `fix` command is also sensitive to fixing over templates and will skip certain fixes if it feels that it's conflicted. - Config file support, including specifying context for the templater. - Documentation via Sphinx and readthedocs. - Including a guide on the role of SQL in the real world. Assisted by [@barrywhart](https://github.com/barrywhart). - Documentation LINTING (given we're a linting project) introduced in CI. - Reimplemented L006 & L007 which lint whitespace around operators. - Ability to configure rule behaviour directly from the config file. - Implemented L010 to lint capitalisation of keywords. - Allow casting in the parser using the `::` operator. - Implemented `GROUP BY`and `LIMIT`. - Added `ORDER BY` using indexes and expressions. - Added parsing of `CASE` statements. - Support for window/aggregate functions. - Added linting and parsing of alias expressions. ### Changed - Fixed a bug which could cause potential infinite recursion in configuration - Changed how negative literals are handled, so that they're now a compound segment rather than being identified at the lexing stage. This is to allow the parser to resolve the potential ambiguity. - Restructure of rule definitions to be more streamlined and also enable autodocumentation. This includes a more complete `RuleSet` class which now holds the filtering code. - Corrected logging in fix mode not to duplicate the reporting of errors. - Now allows insert statements with a nested `with` clause. - Fixed verbose logging during parsing. - Allow the `Bracketed` grammar to optionally match empty brackets using the optional keyword. ## [0.1.5] - 2019-11-11 ### Added - Python 3.8 Support! ### Changed - Moved some of the responsibility for formatted logging into the linter to mean that we can log progressively in large directories. - Fixed a bug in the grammar where one of the return values was messed up. ## [0.1.4] - 2019-11-10 ### Added - Added a `--exclude-rules` argument to most of the commands to allow rule users to exclude specific subset of rules, by [@sumitkumar1209](https://github.com/sumitkumar1209) - Added lexing for `!=`, `~` and `::`. - Added a new common segment: `LambdaSegment` which allows matching based on arbitrary functions which can be applied to segments. - Recursive Expressions for both arithmetic and functions, based heavily off the grammar provided by the guys at [CockroachDB](https://www.cockroachlabs.com/docs/stable/sql-grammar.html#select_stmt). - An `Anything` grammar, useful in matching rather than in parsing to match anything. ### Changed - Complete rewrite of the bracket counting functions, using some centralised class methods on the `BaseGrammar` class to support common matching features across multiple grammars. In particular this affects the `Delimited` grammar which is now _much simpler_ but does also require _slightly_ more liberal use of terminators to match effectively. - Rather than passing around multiple variables during parsing and matching, there is now a `ParseContext` object which contains things like the dialect and various depths. This simplifies the parsing and matching code significantly. - Bracket referencing is now done from the dialect directly, rather than in individual Grammars (except the `Bracketed` grammar, which still implements it directly). This takes out some originally duplicated code. - Corrected the parsing of ordering keywords in and `ORDER BY` clause. ### Removed - Removed the `bracket_sensitive_forward_match` method from the `BaseGrammar`. It was ugly and not flexible enough. It's been replaced by a suite of methods as described above. ## [0.1.3] - 2019-10-30 ### Changed - Tweak to the L001 rule so that it doesn't crash the whole thing. ## [0.1.2] - 2019-10-30 ### Changed - Fixed the errors raised by the lexer. ## [0.1.1] - 2019-10-30 ### Changed - Fixed which modules from sqlfluff are installed in the setup.py. This affects the `version` command. ## [0.1.0] - 2019-10-29 ### Changed - _Big Rewrite - some loss in functionality might be apparent compared to pre-0.1.0. Please submit any major problems as issues on github_ - Changed unicode handling for better escape codes in python 2. Thanks [@mrshu](https://github.com/mrshu) - BIG rewrite of the parser, completely new architecture. This introduces breaking changes and some loss of functionality while we catch up. - In particular, matches now return partial matches to speed up parsing. - The `Delimited` matcher has had a significant re-write with a major speedup and broken the dependency on `Sequence`. - Rewrite of `StartsWith` and `Sequence` to use partial matches properly. - Different treatment of numeric literals. - Both `Bracketed` and `Delimited` respect bracket counting. - MASSIVE rewrite of `Bracketed`. - Grammars now have timers. - Joins properly parsing, - Rewrite of logging to selectively output commands at different levels of verbosity. This uses the `verbosity_logger` method. - Added a command line `sqlfluff parse` option which runs just the parsing step of the process to better understand how a file is being parsed. This also has options to configure how deep we recurse. - Complete Re-write of the rules section, implementing new `crawlers` which implement the linting rules. Now with inbuilt fixers in them. - Old rules removed and re implemented so we now have parity with the old rule sets. - Moved to using Ref mostly within the core grammar so that we can have recursion. - Used recursion to do a first implementation of arithmetic parsing. Including a test for it. - Moved the main grammar into a separate dialect and renamed source and test files accordingly. - Moved to file-based tests for the ansi dialect to make it easier to test using the tool directly. - As part of file tests - expected outcomes are now encoded in yaml to make it easier to write new tests. - Vastly improved readability and debugging potential of the \_match logging. - Added support for windows line endings in the lexer. ## [0.0.7] - 2018-11-19 ### Added - Added a `sqlfluff fix` as a command to implement auto-fixing of linting errors. For now only `L001` is implemented as a rule that can fix things. - Added a `rules` command to introspect the available rules. - Updated the cli table function to use the `testwrap` library and also deal a lot better with longer values. - Added a `--rules` argument to most of the commands to allow rule users to focus their search on a specific subset of rules. ### Changed - Refactor the cli tests to use the click CliRunner. Much faster ## [0.0.6] - 2018-11-15 ### Added - Number matching ### Changed - Fixed operator parsing and linting (including allowing the exception of `(*)`) ## [0.0.5] - 2018-11-15 ### Added - Much better documentation including the DOCS.md ### Changed - Fixed comma parsing and linting ## [0.0.4] - 2018-11-14 ### Added - Added operator regexes - Added a priority for matchers to resolve some ambiguity - Added tests for operator regexes - Added ability to initialise the memory in rules ## [0.0.3] - 2018-11-14 ### Added - Refactor of rules to allow rules with memory - Adding comma linting rules (correcting the single character matchers) - Adding mixed indentation linting rules - Integration with CircleCI, CodeCov and lots of badges ### Changed - Changed import of version information to fix bug with importing config.ini - Added basic violations/file reporting for some verbosities - Refactor of rules to simplify definition - Refactor of color cli output to make it more reusable ## [0.0.2] - 2018-11-09 ### Added - Longer project description - Proper exit codes - colorama for colored output ### Changed - Significant CLI changes - Much improved output from CLI ## [0.0.1] - 2018-11-07 ### Added - Initial Commit! - VERY ALPHA - Restructure into [package layout](https://blog.ionelmc.ro/2014/05/25/python-packaging/#the-structure) - Adding Tox and Pytest so that they work sqlfluff-3.4.2/CODE_OF_CONDUCT.md000066400000000000000000000121631503426445100161500ustar00rootroot00000000000000 # Contributor Covenant Code of Conduct ## Our Pledge We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socioeconomic status, nationality, personal appearance, race, religion, or sexual identity and orientation. We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. ## Our Standards Examples of behavior that contributes to a positive environment for our community include: * Demonstrating empathy and kindness toward other people * Being respectful of differing opinions, viewpoints, and experiences * Giving and gracefully accepting constructive feedback * Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience * Focusing on what is best not just for us as individuals, but for the overall community Examples of unacceptable behavior include: * The use of sexualized language or imagery, and sexual attention or advances of any kind * Trolling, insulting or derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or email address, without their explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ## Enforcement Responsibilities Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate. ## Scope This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement via email to `admins`@`sqlfluff.com`. All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the reporter of any incident. ## Enforcement Guidelines Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: ### 1. Correction **Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. **Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. ### 2. Warning **Community Impact**: A violation through a single incident or series of actions. **Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. ### 3. Temporary Ban **Community Impact**: A serious violation of community standards, including sustained inappropriate behavior. **Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. ### 4. Permanent Ban **Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. **Consequence**: A permanent ban from any sort of public interaction within the community. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.0, available at https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/diversity). [homepage]: https://www.contributor-covenant.org For answers to common questions about this code of conduct, see the FAQ at https://www.contributor-covenant.org/faq. Translations are available at https://www.contributor-covenant.org/translations. sqlfluff-3.4.2/CONTRIBUTING.md000066400000000000000000000426321503426445100156060ustar00rootroot00000000000000# SQLFluff - Contributing :star2: **First** - thanks for being interested in improving SQLFluff! :smiley: :star2: **Second** - please read and familiarise yourself with both the content of this guide and also our [code of conduct](CODE_OF_CONDUCT.md). :star2: **Third** - the best way to get started contributing, is to use the tool in anger and then to submit bugs and features through GitHub. In particular, in helping to develop the parser, examples of queries that do not parse as expected are especially helpful. :star2: **Fourth** - making sure that our documentation is up-to-date and useful for new users is really important. If you are a new user, you are in precisely the best position to do this. Familiarise yourself with the tool (as per step 2 above) and familiarise yourself with the current documentation (live version at [docs.sqlfluff.com](https://docs.sqlfluff.com) and the source can be found in the [docs](./docs/) folder of the repo). Pull requests are always welcome with documentation improvements. Keep in mind that there are linting checks in place for good formatting so keep an eye on the tests whenever submitting a PR. We also have a [GitHub wiki](https://github.com/sqlfluff/sqlfluff/wiki) for longer tutorials. We welcome [contributions, suggestions or requests](https://github.com/sqlfluff/sqlfluff/issues/2104) for the wiki. :star2: **Fifth** - if you are so inclined - pull requests on the core codebase are always welcome. Dialect additions are often a good entry point for new contributors, and we have [a set of guides](https://docs.sqlfluff.com/en/stable/perma/guides.html) to help you through your first contribution. Bear in mind that all the tests should pass, and test coverage should not decrease unduly as part of the changes which you make. You may find it useful to familiarise yourself with the [architectural principles here](https://docs.sqlfluff.com/en/stable/perma/architecture.html) and with the [current documentation here](https://docs.sqlfluff.com). ## How The Community Works SQLFluff is maintained by a community of volunteers, which means we have a few processes in place to allow everyone to contribute at a level that suits them and at a time that suits them. These are not meant to be a way of restricting development, but a way of allowing the community to agree what to focus on and then effectively direct its focus toward that. Anyone can pipe up in these discussions, and the more we hear from users the more we can build a tool that is useful for the community. - Large features for consideration will be organised into _Major Releases_. These will usually include significant changes in functionality or backwards-incompatible changes. As some of these features may require significant coordination, discussion or development work, there is a process for each major release to work out what features will fit into that release. - Each major release will have its own GitHub issue. For example, the link to the issue for [0.6.0 is here](https://github.com/sqlfluff/sqlfluff/issues/922). - Features or issues are organised into a _shortlist_. During the initial discussion for the release, each feature is vetted for enough clarity that someone in the community can pick it up. Issues, where we cannot reach clarity, will be pushed to the next release. Getting this clarity is important before development work progresses so that we know that larger changes are a) in line with the aims of the project and b) are effectively pre-approved changes so that there are not any surprises when it comes to merging. - Once we reach the deadline for closing the roadmap for a release the focus on development work should be on those features. - Small features and bug fixes (assuming no backward compatibility issues) do not need to go through the same process and vetting and can be picked up and merged at any time. ### Maintainers A small group of people volunteer their time to maintain the project and share the responsibility for responding to issues and reviewing any proposed changes via pull requests. Each one of them will be trying to follow the process above and keep development work on the project moving. That means for smaller changes and improvements they may review changes as individuals and merge them into the project in a very lightweight way. For larger changes, especially if not already part of the current major release process the expectation is that they will involve other members or the maintainer community or the project admins before merging in larger changes or giving the green light to major structural project changes. ## Nerdy Details ### Developing and Running SQLFluff Locally #### Requirements If you plan on working with a particular dbt plugin, you will need to ensure your python version is high enough to support it. For example, the instructions below use `python3.13`, and we support as low as `python3.9`. The simplest way to set up a development environment is to use `tox`. First ensure that you have tox installed: ```shell python3.12 -m pip install -U tox ``` **IMPORTANT:** Python 3.9 is the minimum version we support. Feel free to test on anything between `python3.9` and `python3.13`. Note: Unfortunately tox does not currently support setting just a minimum Python version (though this may be be coming in tox 4!). #### Creating a virtual environment A virtual environment can then be created and activated. For the various versions currently available you can check the `tox.ini` file. The numbers correspond to the dbt core version; dbt180 will install dbt 1.8.0. To build and activate the virtual environment: ```shell tox -e dbt180 --devenv .venv source .venv/bin/activate ``` (The `dbt180` environment is a good default choice. However any version can be installed by replacing `dbt180` with `py`, `py39` through `py313`, `dbt140` through `dbt190`, etc. `py` defaults to the python version that was used to install tox. To be able to run all tests including the dbt templater, choose one of the dbt environments.) Windows users should call `.venv\Scripts\activate` rather than `source .venv/bin/activate`. They may also want to substitute `winpy` for `py` in the commands above. This virtual environment will already have the package installed in editable mode for you, as well as `requirements_dev.txt` and `plugins/sqlfluff-plugin-example`. Additionally if a dbt virtual environment was specified, you will also have `dbt-core`, `dbt-postgres`, and `plugins/sqlfluff-templater-dbt` available. A different dbt plugin can be selected by changing the appropriate file under `constraints` for the desired package and version. #### Developing in Docker To build a simple interactive Docker container, run the following commands: ```shell make build make shell ``` This container installs all Python dependencies, and mounts the project directory into the container. It installs SQLFluff in editable mode. The nuts and bolts are in place such that git should work seamlessly inside the container. It'll also install the dbt templater plugin. ### Wiki We have a [GitHub wiki](https://github.com/sqlfluff/sqlfluff/wiki) with some more long form tutorials for contributors, particularly those new to SQLFluff or contributing to open source. We welcome [contributions, suggestions or requests](https://github.com/sqlfluff/sqlfluff/issues/2104) for the wiki. ### Developing plugins If you're working on plugins (like the dbt templater), you'll also need to install those plugins too in an editable mode. This works the same way as the main project but you'll need to do each one explicitly. e.g. ```shell pip install -e plugins/sqlfluff-templater-dbt/. ``` > NOTE: For packages intended to be installed like this, the source code must be directly > within a subdirectory with the name of the package and not in a subdirectory such as > src. This is due to a restriction in the implementation of setup.py in editable mode. ### Testing To test locally, SQLFluff uses `tox` (check the [requirements](#requirements)!). The test suite can be run via: ```shell tox ``` This will build and test for several Python versions, and also lint the project. Practically on a day-to-day basis, you might only want to lint and test for one Python version, so you can always specify a particular environment. For example, if you are developing in Python 3.9 you might call... ```shell tox -e generate-fixture-yml,py39,linting,mypy ``` ...or if you also want to see the coverage reporting... ```shell tox -e generate-fixture-yml,cov-init,py39,cov-report,linting,mypy ``` > NB: The `cov-init` task clears the previous test results, the `py39` environment > generates the results for tests in that Python version and the `cov-report` > environment reports those results out to you (excluding dbt). `tox` accepts `posargs` to allow you to refine your test run, which is much faster while working on an issue, before running full tests at the end. For example, you can run specific tests by making use of the `-k` option in `pytest`: ``` tox -e py39 -- -k AL02 test ``` Alternatively, you can also run tests from a specific directory or file only: ``` tox -e py39 -- test/cli tox -e py39 -- test/cli/commands_test.py ``` You can also manually test your updated code against a SQL file via: ```shell sqlfluff parse test.sql ``` (ensure your virtual environment is activated first). #### How to use and understand the test suite When developing for SQLFluff, you may not need (or wish) to run the whole test suite, depending on what you are working on. Here are a couple of scenarios for development, and which parts of the test suite you may find most useful. 1. For dialect improvements (i.e. changes to anything in [src/sqlfluff/dialects](./src/sqlfluff/dialects)) you should not need to continuously run the full core test suite. Running either `tox -e generate-fixture-yml` (if using tox), or setting up a python virtualenv and running `test/generate_parse_fixture_yml.py` directly will usually be sufficient. Both of these options accept arguments to restrict runs to specific dialects to further improve iteration speed. e.g. - `tox -e generate-fixture-yml -- -d mysql` will run just the mysql tests. - `python test/generate_parse_fixture_yml.py -d mysql` will do the same. 2. Developing for the dbt templater should only require running the dbt test suite (see below). 3. Developing rules and rule plugins there are a couple of scenarios. - When developing a new rule or working with a more isolated rule, you should only need to run the tests for that rule. These are usually what are called the _yaml tests_. This refers to a body of example sql statements and potential fixes defined in a large set of yaml files found in [test/fixtures/rules/std_rule_cases](./test/fixtures/rules/std_rule_cases). The easiest way to run these is by calling that part of the suite directly and filtering to just that rule. For example: - `tox -e py39 -- test/rules/yaml_test_cases_test.py -k AL01` - `pytest test/rules/yaml_test_cases_test.py -k AL01` - When developing on some more complicated rules, or ones known to have interactions with other rules, there are a set of rule fixing tests which apply a set combination of those rules. These are best run via the `autofix` tests. For example: - `tox -e py39 -- test/rules/std_fix_auto_test.py` - `pytest test/rules/std_fix_auto_test.py` - Potentially even the full rules suite `tox -e py39 -- test/rules` - A small number of core rules are also used in making sure that inner parts of SQLFluff are also functioning. This isn't great isolation but does mean that occasionally you may find side effects of your changes in the wider test suite. These can usually be caught by running the full `tox -e py39` suite as a final check (or using the test suite on GitHub when posting your PR). 4. When developing the internals of SQLFluff (i.e. anything not already mentioned above), the test suite typically mirrors the structure of the internal submodules of sqlfluff: - When working with the CLI, the `sqlfluff.cli` module has a test suite called via `tox -e py39 -- test/cli`. - When working with the templaters (i.e. `sqlfluff.core.templaters`), the corresponding test suite is found via `tox -e py39 -- test/core/templaters`. - This rough guidance and may however not apply for all of the internals. For example, changes to the internals of the parsing module (`sqlfluff.core.parser`) are very likely to have knock-on implications across the rest of the test suite and it may be necessary to run the whole thing. In these situations however you can usually work slowly outward, for example: 1. If your change is to the `AnyOf()` grammar, first running `tox -e py39 -- test/core/parser/grammar_test.py` would be wise. 2. ...followed by `tox -e py39 -- test/core/parser` once the above is passing. 3. ...and then `tox -e py39 -- test/core`. 4. ...and finally the full suite `tox -e py39`. #### dbt templater tests The dbt templater tests require a locally running Postgres instance. See the required connection parameters in `plugins/sqlfluff-templater-dbt/test/fixtures/dbt/profiles.yml`. We recommend using https://postgresapp.com/. To run the dbt-related tests you will have to explicitly include these tests: ```shell tox -e cov-init,dbt019-py39,cov-report-dbt -- plugins/sqlfluff-templater-dbt ``` For more information on adding and running test cases see the [Parser Test README](test/fixtures/dialects/README.md) and the [Rules Test README](test/fixtures/rules/std_rule_cases/README.md). #### Running dbt templater tests in Docker The development Docker container has the dbt templater plugin installed, so you can run the tests inside the container. Inside the container, run: ``` pytest -v plugins/sqlfluff-templater-dbt/test/ ``` ### Pre-Commit Config For development convenience we also provide a `.pre-commit-config.yaml` file to allow the user to install a selection of pre-commit hooks by running (check the [requirements](#requirements) before running this): ``` tox -e pre-commit -- install ``` These hooks can help the user identify and fix potential linting/typing violations prior to committing their code and therefore reduce having to deal with these sort of issues during code review. ### Documentation Website Documentation is built using Sphinx with some pages being built based on the source code. See the [Documentation Website README.md](./docs/README.md) file for more information on how to build and test this. ### Building Package New versions of SQLFluff will be published to PyPI automatically via [GitHub Actions](.github/workflows/publish-release-to-pypi.yaml) whenever a new release is published to GitHub. #### Release checklist: The [release page](https://github.com/sqlfluff/sqlfluff/releases) shows maintainers all merges since last release. Once we have a long enough list, we should prepare a release. A release PR can be created by maintainers via the ["Create release pull request" GitHub Action](https://github.com/sqlfluff/sqlfluff/actions/workflows/create-release-pull-request.yaml). Once this is done, it's a good idea to put a short post on the #contributing channel on slack so that people know there will be a release soon. As further PRs are merged, we may need to rerun the release script again (or alternatively just manually updating the branch). This can only be rerun locally (the GitHub Action will exit error if the branch already exists to prevent overwriting it). Check out the release branch created by the GitHub Action locally and run the script. It will preserve any `Highlights` you have added and update the other sections with new contributions. It can be run as follows (you will need a [GitHub Personal Access Token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token) with read & write permissions on the "Content" scope, and read permissions on the "Metadata" scope. The reason we need both read & write access on the "Content" scope is that only tokens with write access can see _draft_ releases, which is what we need access to). All maintainers should have sufficient access to generate such a token: ```shell source .venv/bin/activate export GITHUB_REPOSITORY_OWNER=sqlfluff export GITHUB_TOKEN=gho_xxxxxxxx # Change to your token with "repo" permissions. python util.py release 2.0.3 # Change to your release number ``` When all of the changes planned for the release have been merged, and the release PR is up to date. The maintainer running the release should write a short commentary in the `CHANGELOG.md` file, describing any features of note, and also celebrating new contributors. Check out old releases for inspiration! When the PR is ready. Merge it. Once merged, follow these steps to actually publish the final release. 1. Edit the draft release on GitHub. Copy across the summary from the `CHANGELOG.md` 2. Update the title of the release to include the current date. 3. Check that the tag for the release is set to the release version. The tag will not have been created yet, so click the option to create the tag on release. 4. If this is an alpha release, make sure to set the "this is a pre-release" option on the release. Otherwise leave it blank. 5. When you're ready, click "Publish Release" and that will set off the automated processes to publish the release to PyPi and Dockerhub. 6. Finally, drop a note on #general on slack to let the community know there's a new release out. Then you're done! sqlfluff-3.4.2/Dockerfile000066400000000000000000000023351503426445100153430ustar00rootroot00000000000000FROM python:3.12-slim-bullseye # Set separate working directory for easier debugging. WORKDIR /app # Create virtual environment. ENV VIRTUAL_ENV=/app/.venv RUN python -m venv $VIRTUAL_ENV ENV PATH=$VIRTUAL_ENV/bin:$PATH RUN pip install --no-cache-dir --upgrade pip setuptools wheel pip-tools # Install requirements separately # to take advantage of layer caching. # N.B. we extract the requirements from pyproject.toml COPY pyproject.toml . # Use piptools to extract requirements from pyproject.toml as described in # https://github.com/pypa/pip/issues/11584 RUN pip-compile -o requirements.txt pyproject.toml -v --strip-extras RUN pip install --no-cache-dir --upgrade -r requirements.txt # Copy minimal set of SQLFluff package files. COPY MANIFEST.in . COPY README.md . COPY src ./src # Install sqlfluff package. RUN pip install --no-cache-dir --no-dependencies . # Switch to non-root user. USER 5000 # Switch to new working directory as default bind mount location. # User can bind mount to /sql and not have to specify the full file path in the command: # i.e. docker run --rm -it -v $PWD:/sql sqlfluff/sqlfluff:latest lint test.sql WORKDIR /sql # Set SQLFluff command as entry point for image. ENTRYPOINT ["sqlfluff"] CMD ["--help"] sqlfluff-3.4.2/LICENSE.md000066400000000000000000000020611503426445100147510ustar00rootroot00000000000000MIT License Copyright (c) 2023 Alan Cruickshank Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. sqlfluff-3.4.2/MANIFEST.in000066400000000000000000000000521503426445100151010ustar00rootroot00000000000000include README.md LICENSE.md CHANGELOG.md sqlfluff-3.4.2/Makefile000066400000000000000000000014121503426445100150040ustar00rootroot00000000000000.PHONY: help build clean fresh shell start stop .DEFAULT_GOAL := help help: ## Show this available targets @grep -E '^[/a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' build: ## Build the development container docker-compose build development clean: ## Clean up all containers and images docker system prune -f docker-compose stop docker rmi `docker images -a -q` fresh: ## Build the development container from scratch docker-compose build --no-cache development shell: ## Start a bash session in the development container docker-compose exec development bash start: ## Start the development container docker-compose up -d stop: ## Stop the development container docker-compose stop sqlfluff-3.4.2/README.md000066400000000000000000000217301503426445100146300ustar00rootroot00000000000000![SQLFluff](https://raw.githubusercontent.com/sqlfluff/sqlfluff/main/images/sqlfluff-wide.png) # The SQL Linter for Humans [![PyPi Version](https://img.shields.io/pypi/v/sqlfluff.svg?style=flat-square&logo=PyPi)](https://pypi.org/project/sqlfluff/) [![PyPi License](https://img.shields.io/pypi/l/sqlfluff.svg?style=flat-square)](https://pypi.org/project/sqlfluff/) [![PyPi Python Versions](https://img.shields.io/pypi/pyversions/sqlfluff.svg?style=flat-square)](https://pypi.org/project/sqlfluff/) [![PyPi Status](https://img.shields.io/pypi/status/sqlfluff.svg?style=flat-square)](https://pypi.org/project/sqlfluff/) [![PyPi Downloads](https://img.shields.io/pypi/dm/sqlfluff?style=flat-square)](https://pypi.org/project/sqlfluff/) [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/sqlfluff/sqlfluff/.github/workflows/ci-tests.yml?logo=github&style=flat-square)](https://github.com/sqlfluff/sqlfluff/actions/workflows/ci-tests.yml?query=branch%3Amain) [![ReadTheDocs](https://img.shields.io/readthedocs/sqlfluff?style=flat-square&logo=Read%20the%20Docs)](https://sqlfluff.readthedocs.io) [![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg?style=flat-square)](https://github.com/psf/black) [![Docker Pulls](https://img.shields.io/docker/pulls/sqlfluff/sqlfluff?logo=docker&style=flat-square)](https://hub.docker.com/r/sqlfluff/sqlfluff) [![Gurubase](https://img.shields.io/badge/Gurubase-Ask%20SQLFluff%20Guru-006BFF?style=flat-square)](https://gurubase.io/g/sqlfluff) **SQLFluff** is a dialect-flexible and configurable SQL linter. Designed with [ELT](https://www.techtarget.com/searchdatamanagement/definition/Extract-Load-Transform-ELT) applications in mind, **SQLFluff** also works with Jinja templating and dbt. **SQLFluff** will auto-fix most linting errors, allowing you to focus your time on what matters. ## Table of Contents 1. [Dialects Supported](#dialects-supported) 2. [Templates Supported](#templates-supported) 3. [VS Code Extension](#vs-code-extension) 4. [Getting Started](#getting-started) 5. [Documentation](#documentation) 6. [Releases](#releases) 7. [SQLFluff on Slack](#sqlfluff-on-slack) 8. [SQLFluff on Twitter](#sqlfluff-on-twitter) 9. [Contributing](#contributing) 10. [Sponsors](#sponsors) ## Dialects Supported Although SQL is reasonably consistent in its implementations, there are several different dialects available with variations of syntax and grammar. **SQLFluff** currently supports the following SQL dialects (though perhaps not in full): - ANSI SQL - this is the base version and on occasion may not strictly follow the ANSI/ISO SQL definition - [Athena](https://aws.amazon.com/athena/) - [BigQuery](https://cloud.google.com/bigquery/) - [ClickHouse](https://clickhouse.com/) - [Databricks](https://databricks.com/) (note: this extends the `sparksql` dialect with [Unity Catalog](https://docs.databricks.com/data-governance/unity-catalog/index.html) syntax). - [Db2](https://www.ibm.com/analytics/db2) - [Doris](https://doris.apache.org/) - [DuckDB](https://duckdb.org/) - [Exasol](https://www.exasol.com/) - [Greenplum](https://greenplum.org/) - [Hive](https://hive.apache.org/) - [Impala](https://impala.apache.org/) - [MariaDB](https://www.mariadb.com/) - [Materialize](https://materialize.com/) - [MySQL](https://www.mysql.com/) - [Oracle](https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/index.html) - [PostgreSQL](https://www.postgresql.org/) (aka Postgres) - [Redshift](https://docs.aws.amazon.com/redshift/index.html) - [Snowflake](https://www.snowflake.com/) - [SOQL](https://developer.salesforce.com/docs/atlas.en-us.soql_sosl.meta/soql_sosl/sforce_api_calls_soql.htm) - [SparkSQL](https://spark.apache.org/docs/latest/) - [SQLite](https://www.sqlite.org/) - [StarRocks](https://www.starrocks.io) - [Teradata](https://www.teradata.com/) - [Transact-SQL](https://docs.microsoft.com/en-us/sql/t-sql/language-reference) (aka T-SQL) - [Trino](https://trino.io/) - [Vertica](https://www.vertica.com/) We aim to make it easy to expand on the support of these dialects and also add other, currently unsupported, dialects. Please [raise issues](https://github.com/sqlfluff/sqlfluff/issues) (or upvote any existing issues) to let us know of demand for missing support. Pull requests from those that know the missing syntax or dialects are especially welcomed and are the question way for you to get support added. We are happy to work with any potential contributors on this to help them add this support. Please raise an issue first for any large feature change to ensure it is a good fit for this project before spending time on this work. ## Templates Supported SQL itself does not lend itself well to [modularity](https://docs.getdbt.com/docs/viewpoint#section-modularity), so to introduce some flexibility and reusability it is often [templated](https://en.wikipedia.org/wiki/Template_processor) as discussed more in [our modularity documentation](https://docs.sqlfluff.com/en/stable/perma/modularity.html). **SQLFluff** supports the following templates: - [Jinja](https://jinja.palletsprojects.com/) (aka Jinja2) - SQL placeholders (e.g. SQLAlchemy parameters) - [Python format strings](https://docs.python.org/3/library/string.html#format-string-syntax) - [dbt](https://www.getdbt.com/) (requires plugin) Again, please raise issues if you wish to support more templating languages/syntaxes. ## VS Code Extension We also have a VS Code extension: - [Github Repository](https://github.com/sqlfluff/vscode-sqlfluff) - [Extension in VS Code marketplace](https://marketplace.visualstudio.com/items?itemName=dorzey.vscode-sqlfluff) # Getting Started To get started, install the package and run `sqlfluff lint` or `sqlfluff fix`. ```shell $ pip install sqlfluff $ echo " SELECT a + b FROM tbl; " > test.sql $ sqlfluff lint test.sql --dialect ansi == [test.sql] FAIL L: 1 | P: 1 | LT01 | Expected only single space before 'SELECT' keyword. | Found ' '. [layout.spacing] L: 1 | P: 1 | LT02 | First line should not be indented. | [layout.indent] L: 1 | P: 1 | LT13 | Files must not begin with newlines or whitespace. | [layout.start_of_file] L: 1 | P: 11 | LT01 | Expected only single space before binary operator '+'. | Found ' '. [layout.spacing] L: 1 | P: 14 | LT01 | Expected only single space before naked identifier. | Found ' '. [layout.spacing] L: 1 | P: 27 | LT01 | Unnecessary trailing whitespace at end of file. | [layout.spacing] L: 1 | P: 27 | LT12 | Files must end with a single trailing newline. | [layout.end_of_file] All Finished 📜 🎉! ``` Alternatively, you can use the [**Official SQLFluff Docker Image**](https://hub.docker.com/r/sqlfluff/sqlfluff) or have a play using [**SQLFluff online**](https://online.sqlfluff.com/). For full [CLI usage](https://docs.sqlfluff.com/en/stable/perma/cli.html) and [rules reference](https://docs.sqlfluff.com/en/stable/perma/rules.html), see [the SQLFluff docs](https://docs.sqlfluff.com/en/stable/). # Documentation For full documentation visit [docs.sqlfluff.com](https://docs.sqlfluff.com/en/stable/). This documentation is generated from this repository so please raise [issues](https://github.com/sqlfluff/sqlfluff/issues) or pull requests for any additions, corrections, or clarifications. # Releases **SQLFluff** adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html), so breaking changes should be restricted to major versions releases. Some elements (such as the python API) are in a less stable state and may see more significant changes more often. For details on breaking changes and how to migrate between versions, see our [release notes](https://docs.sqlfluff.com/en/latest/perma/releasenotes.html). See the [changelog](CHANGELOG.md) for more details. If you would like to join in, please consider [contributing](CONTRIBUTING.md). New releases are made monthly. For more information, visit [Releases](https://github.com/sqlfluff/sqlfluff/releases). # SQLFluff on Slack We have a fast-growing community [on Slack](https://join.slack.com/t/sqlfluff/shared_invite/zt-2qtu36kdt-OS4iONPbQ3aCz2DIbYJdWg), come and join us! # SQLFluff on Twitter Follow us [on Twitter @SQLFluff](https://twitter.com/SQLFluff) for announcements and other related posts. # Contributing We are grateful to all our [contributors](https://github.com/sqlfluff/sqlfluff/graphs/contributors). There is a lot to do in this project, and we are just getting started. If you want to understand more about the architecture of **SQLFluff**, you can find [more here](https://docs.sqlfluff.com/en/latest/perma/architecture.html). If you would like to contribute, check out the [open issues on GitHub](https://github.com/sqlfluff/sqlfluff/issues). You can also see the guide to [contributing](CONTRIBUTING.md). # Sponsors Datacoves
The turnkey analytics stack, find out more at [Datacoves.com](https://datacoves.com/). sqlfluff-3.4.2/constraints/000077500000000000000000000000001503426445100157155ustar00rootroot00000000000000sqlfluff-3.4.2/constraints/dbt140.txt000066400000000000000000000000441503426445100174520ustar00rootroot00000000000000dbt-core~=1.4.1 dbt-postgres~=1.4.1 sqlfluff-3.4.2/constraints/dbt150-winpy.txt000066400000000000000000000000441503426445100206170ustar00rootroot00000000000000dbt-core~=1.5.0 dbt-postgres~=1.5.0 sqlfluff-3.4.2/constraints/dbt150.txt000066400000000000000000000000441503426445100174530ustar00rootroot00000000000000dbt-core~=1.5.0 dbt-postgres~=1.5.0 sqlfluff-3.4.2/constraints/dbt160.txt000066400000000000000000000000441503426445100174540ustar00rootroot00000000000000dbt-core~=1.6.0 dbt-postgres~=1.6.0 sqlfluff-3.4.2/constraints/dbt170.txt000066400000000000000000000000441503426445100174550ustar00rootroot00000000000000dbt-core~=1.7.0 dbt-postgres~=1.7.0 sqlfluff-3.4.2/constraints/dbt180-winpy.txt000066400000000000000000000000441503426445100206220ustar00rootroot00000000000000dbt-core~=1.8.0 dbt-postgres~=1.8.0 sqlfluff-3.4.2/constraints/dbt180.txt000066400000000000000000000000441503426445100174560ustar00rootroot00000000000000dbt-core~=1.8.0 dbt-postgres~=1.8.0 sqlfluff-3.4.2/constraints/dbt190-winpy.txt000066400000000000000000000000501503426445100206200ustar00rootroot00000000000000dbt-core~=1.9.0b1 dbt-postgres~=1.9.0b1 sqlfluff-3.4.2/constraints/dbt190.txt000066400000000000000000000000501503426445100174540ustar00rootroot00000000000000dbt-core~=1.9.0b1 dbt-postgres~=1.9.0b1 sqlfluff-3.4.2/docker-compose.yml000066400000000000000000000015001503426445100167770ustar00rootroot00000000000000services: development: build: context: . dockerfile: ./docker/development/Dockerfile environment: - SSH_AUTH_SOCK=/ssh-agent - POSTGRES_HOST=postgres volumes: - .:/app - ./test/fixtures/dbt/profiles_yml:/root/.dbt - ~/.gitconfig:/etc/gitconfig - ~/.ssh:/root/.ssh - /run/host-services/ssh-auth.sock:/ssh-agent stdin_open: true tty: true depends_on: - postgres entrypoint: /bin/bash postgres: image: postgres:14-bullseye environment: - POSTGRES_PASSWORD=password ports: # NOTE: "5432:5432" makes the Postgres server accessible to both the host # developer machine *and* the "app" container in Docker. If you don't want # it available on the host machine, change this to simply "5432". - "5432:5432" sqlfluff-3.4.2/docker/000077500000000000000000000000001503426445100146155ustar00rootroot00000000000000sqlfluff-3.4.2/docker/development/000077500000000000000000000000001503426445100171375ustar00rootroot00000000000000sqlfluff-3.4.2/docker/development/Dockerfile000066400000000000000000000014311503426445100211300ustar00rootroot00000000000000FROM python:3.12 # Set separate working directory for easier debugging. WORKDIR /app ENV PYTHONPATH=/app;/app/src # Install Git RUN apt-get -y update && apt-get -y install git && apt-get clean COPY requirements_dev.txt /requirements_dev.txt #Install Dependencies RUN --mount=type=cache,target=/root/.cache/pip pip install --upgrade pip setuptools wheel RUN --mount=type=cache,target=/root/.cache/pip pip install -r /requirements_dev.txt --upgrade --default-timeout=100 # Set up dbt-related dependencies. RUN --mount=type=cache,target=/root/.cache/pip pip install dbt-postgres # Copy everything. (Note: If needed, we can use .dockerignore to limit what's copied.) COPY . . # Install sqlfluff and the dbt templater in editable mode. RUN pip install -e . -e plugins/sqlfluff-templater-dbt sqlfluff-3.4.2/docs/000077500000000000000000000000001503426445100142765ustar00rootroot00000000000000sqlfluff-3.4.2/docs/Makefile000066400000000000000000000015421503426445100157400ustar00rootroot00000000000000# Minimal makefile for Sphinx documentation # # You can set these variables from the command line, and also # from the environment for the first two. # We want SPHINX to warn on error (-W) for example if we don't force SQL # formatting, but we also want to continue "--keep-going" and check the # other docs before exiting non-zero SPHINXOPTS ?= -W --keep-going SPHINXBUILD ?= sphinx-build SOURCEDIR = source BUILDDIR = build # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) .PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile python generate-auto-docs.py @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) sqlfluff-3.4.2/docs/README.md000066400000000000000000000027151503426445100155620ustar00rootroot00000000000000# SQLFluff - Generating the document website You can run the following steps to generate the documentation website: ``` tox -e docbuild,doclinting ``` The `docbuild` job will recognise when source files have changed and only generate the changed files. To force a clean build (for example when changing config) rather than the source files use the following command from the project root directory (drop the `-C docs` if running from within the `docs` directory). ``` make -C docs clean ``` The built HTML should be placed in `docs/build/html` and can be opened directly in the browser or you can launch a simple webserver with the below command and then navigate to http://127.0.0.1:8000/ to view the site locally: ``` python -m http.server --directory docs/build/html ``` Again, this command is run from the root server, not the `docs` subfolder but you can alter the path as appropriate if needs be. If you don't want to use `tox`, then you can complete the steps manually with the following commands after setting up your Python environment as detailed in the [CONTRIBUTING.md](../CONTRIBUTING.md) file. ``` cd docs pip install -r requirements.txt make html python -m http.server --directory build/html ``` Or alternatively from the root folder: ``` pip install -r docs/requirements.txt make -C docs html python -m http.server --directory docs/build/html ``` The docs use Sphinx and are generated from the source code. The config is available in `docs/source/conf.py`. sqlfluff-3.4.2/docs/generate-auto-docs.py000066400000000000000000000131311503426445100203350ustar00rootroot00000000000000"""Generate some documentation automatically. This script generates partial documentation sections (i.e. the content of `/docs/source/_partials/`) by importing SQLFluff and extracting data about rules and dialects. It should run before every docs generation so that those partial .rst files can then be correctly referenced by other sections of the docs. For example this file builds the file `/docs/source/_partials/rule_summaries.rst`, which is then inserted into `/docs/source/reference/rules.rst` using the directive `.. include:: ../_partials/rule_summaries.rst`. This script is referenced in the `Makefile` and the `make.bat` file to ensure it is run at the appropriate moment. """ import json from collections import defaultdict from pathlib import Path import sqlfluff from sqlfluff.core.plugin.host import get_plugin_manager base_path = Path(__file__).parent.absolute() ########################################## # Generate rule documentation dynamically. ########################################## autogen_header = """.. NOTE: This file is generated by the conf.py script. Don't edit this by hand """ table_header = f""" +{'-' * 42}+{'-' * 50}+{'-' * 30}+{'-' * 20}+ |{'Bundle' : <42}|{'Rule Name' : <50}|{'Code' : <30}|{'Aliases' : <20}| +{'=' * 42}+{'=' * 50}+{'=' * 30}+{'=' * 20}+ """ # Extract all the rules. print("Rule Docs Generation: Reading Rules...") rule_bundles = defaultdict(list) rule_list = [] for plugin_rules in get_plugin_manager().hook.get_rules(): for rule in plugin_rules: _bundle_name = rule.name.split(".")[0] rule_bundles[_bundle_name].append(rule) rule_list.append((rule.code, rule.name)) # Write them into a json file for use by redirects. print("Rule Docs Generation: Writing Rule JSON...") with open(base_path / "source/_partials/rule_list.json", "w", encoding="utf8") as f: json.dump(rule_list, f) # Write them into the table. Bundle by bundle. print("Rule Docs Generation: Writing Rule Table...") with open(base_path / "source/_partials/rule_table.rst", "w", encoding="utf8") as f: f.write(autogen_header) f.write(table_header) for bundle in sorted(rule_bundles.keys()): # Set the bundle name to the ref. _bundle_name = f":ref:`bundle_{bundle}`" for idx, rule in enumerate(rule_bundles[bundle]): step = 1 # The number of aliases per line. aliases = ", ".join(rule.aliases[:step]) + ( "," if len(rule.aliases) > step else "" ) name_ref = f":sqlfluff:ref:`{rule.name}`" code_ref = f":sqlfluff:ref:`{rule.code}`" f.write( f"| {_bundle_name : <40} | {name_ref : <48} " f"| {code_ref : <28} | {aliases : <18} |\n" ) j = 1 while True: if not rule.aliases[j:]: break aliases = ", ".join(rule.aliases[j : j + step]) + ( "," if len(rule.aliases[j:]) > step else "" ) f.write(f"|{' ' * 42}|{' ' * 50}|{' ' * 30}| {aliases : <18} |\n") j += step if idx + 1 < len(rule_bundles[bundle]): f.write(f"|{' ' * 42}+{'-' * 50}+{'-' * 30}+{'-' * 20}+\n") else: f.write(f"+{'-' * 42}+{'-' * 50}+{'-' * 30}+{'-' * 20}+\n") # Unset the bundle name so we don't repeat it. _bundle_name = "" f.write("\n\n") # Write each of the summary files. print("Rule Docs Generation: Writing Rule Summaries...") with open(base_path / "source/_partials/rule_summaries.rst", "w", encoding="utf8") as f: f.write(autogen_header) for bundle in sorted(rule_bundles.keys()): if "sql" in bundle: # This accounts for things like "TSQL" header_name = bundle.upper() else: header_name = bundle.capitalize() # Write the bundle header. f.write( f".. _bundle_{bundle}:\n\n" f"{header_name} bundle\n" f"{'-' * (len(bundle) + 7)}\n\n" ) for rule in rule_bundles[bundle]: f.write( f".. sqlfluff:rule:: {rule.code}\n" f" {rule.name}\n\n" ) # Separate off the heading so we can bold it. heading, _, doc_body = rule.__doc__.partition("\n") underline_char = '"' f.write(f" {heading}\n") f.write(f" {underline_char * len(heading)}\n\n") f.write(" " + doc_body) f.write("\n\n") print("Rule Docs Generation: Done") # Extract all the dialects. print("Dialect Docs Generation: Reading Dialects...") # We make a dictionary of all of them first, because we want to force the ANSI # one to be first. dialect_dict = {dialect.label: dialect for dialect in sqlfluff.list_dialects()} dialect_list = [dialect_dict["ansi"]] + [ dialect for dialect_name, dialect in dialect_dict.items() if dialect_name != "ansi" ] # Write each of the summary files. print("Dialect Docs Generation: Writing Dialect Summaries...") with open( base_path / "source/_partials/dialect_summaries.rst", "w", encoding="utf8" ) as f: f.write(autogen_header) for dialect in dialect_list: f.write( f".. _{dialect.label}_dialect_ref:\n\n" f"{dialect.name}\n{'-' * len(dialect.name)}\n\n" f"**Label**: ``{dialect.label}``\n\n" ) if dialect.label != "ansi": f.write( f"**Inherits from**: :ref:`{dialect.inherits_from}_dialect_ref`\n\n" ) if dialect.docstring: f.write(dialect.docstring + "\n\n") sqlfluff-3.4.2/docs/make.bat000066400000000000000000000014771503426445100157140ustar00rootroot00000000000000@ECHO OFF pushd %~dp0 REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set SOURCEDIR=source set BUILDDIR=build if "%1" == "" goto help %SPHINXBUILD% >NUL 2>NUL if errorlevel 9009 ( echo. echo.The 'sphinx-build' command was not found. Make sure you have Sphinx echo.installed, then set the SPHINXBUILD environment variable to point echo.to the full path of the 'sphinx-build' executable. Alternatively you echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.http://sphinx-doc.org/ exit /b 1 ) REM Generate the rule & dialect docs python generate-auto-docs.py %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% goto end :help %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% :end popd sqlfluff-3.4.2/docs/requirements.txt000066400000000000000000000002161503426445100175610ustar00rootroot00000000000000# Requirements for building docs sphinx>=2.4.1 # 2.13.0 added the sql+jinja syntax pygments>=2.13.0 sphinx-click sphinx-reredirects doc8 tqdm sqlfluff-3.4.2/docs/source/000077500000000000000000000000001503426445100155765ustar00rootroot00000000000000sqlfluff-3.4.2/docs/source/_ext/000077500000000000000000000000001503426445100165355ustar00rootroot00000000000000sqlfluff-3.4.2/docs/source/_ext/sqlfluff_domain.py000066400000000000000000000102731503426445100222630ustar00rootroot00000000000000"""The sqlfluff domain for documenting rules.""" from sphinx import addnodes from sphinx.directives import ObjectDescription from sphinx.domains import Domain, ObjType from sphinx.roles import XRefRole from sphinx.util.nodes import make_refnode class SQLFluffRule(ObjectDescription): """SQLFluff rule directive for sphinx. Rule directives can be used as shown below. .. code-block:: rst .. sqlfluff:rule:: AM01 ambiguous.distinct Write the documentation for the rule here. To cross reference (i.e. refer to) objects defined like this both the code and name reference is available: .. code-block:: rst :sqlfluff:ref:`CP02` :sqlfluff:ref:`capitalisation.identifiers` """ def handle_signature(self, sig, signode): """Handle the initial signature of the node. This formats the header of the section. """ raw_obj_type = "code" if len(sig) == 4 else "rule" obj_type = raw_obj_type.capitalize() + " " signode += addnodes.desc_type(obj_type, obj_type) signode += addnodes.desc_name(sig, sig) fullname = obj_type + sig signode["type"] = raw_obj_type signode["sig"] = sig signode["fullname"] = fullname return (fullname, raw_obj_type, sig) def add_target_and_index(self, name_cls, sig, signode): """Hook to add the permalink and index entries.""" # Add an ID for permalinks node_id = "rule" + "-" + sig signode["ids"].append(node_id) if len(sig) == 4: # If it's a code, add support for legacy links too. # Both of these formats have been used in the past. signode["ids"].append(f"sqlfluff.rules.Rule_{sig}") signode["ids"].append(f"sqlfluff.rules.sphinx.Rule_{sig}") # Add to domain for xref resolution fluff = self.env.get_domain("sqlfluff") fluff.add_rule(sig) # Add to index self.indexnode["entries"].append(("single", sig, node_id, "", None)) def _object_hierarchy_parts(self, sig_node): return ("bundle", "name") def _toc_entry_name(self, sig_node) -> str: # NOTE: toctree unpacking issues are due to incorrectly # setting _toc_parts. sig_node["_toc_parts"] = ( "bundle", sig_node["sig"], ) if len(sig_node["sig"]) == 4: # It's a code - don't return TOC entry. return "" else: # It's a name return sig_node["sig"] class SQLFluffDomain(Domain): """SQLFluff domain.""" name = "sqlfluff" label = "sqlfluff" object_types = { "rule": ObjType("rule", "rule", "obj"), } roles = { "ref": XRefRole(), } directives = { "rule": SQLFluffRule, } initial_data = { "rules": [], # object list } def get_full_qualified_name(self, node): """Get the fully qualified name of the rule.""" return f"rule.{node.arguments[0]}" def get_objects(self): """Hook to get all the rules.""" yield from self.data["rules"] def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode): """Hook to resolve xrefs. References can be made by code or by name, e.g. - :sqlfluff:ref:`LT01` - :sqlfluff:ref:`layout.spacing` """ match = [ (docname, anchor) for _, sig, _, docname, anchor, _ in self.get_objects() if sig == target ] if len(match) > 0: todocname = match[0][0] targ = match[0][1] return make_refnode(builder, fromdocname, todocname, targ, contnode, targ) else: print(f"Failed to match xref: {target!r}") return None def add_rule(self, signature): """Add a new recipe to the domain.""" name = f"rule.{signature}" anchor = f"rule-{signature}" # name, dispname, type, docname, anchor, priority self.data["rules"].append( (name, signature, "Rule", self.env.docname, anchor, 0) ) def setup(app): """Setup the domain.""" app.add_domain(SQLFluffDomain) sqlfluff-3.4.2/docs/source/_partials/000077500000000000000000000000001503426445100175545ustar00rootroot00000000000000sqlfluff-3.4.2/docs/source/_partials/.gitignore000066400000000000000000000001071503426445100215420ustar00rootroot00000000000000rule_table.rst rule_summaries.rst rule_list.json dialect_summaries.rst sqlfluff-3.4.2/docs/source/_partials/README.md000066400000000000000000000005761503426445100210430ustar00rootroot00000000000000This folder is ignored from the main build and intended only for files included in others via the `.. include::` directive. Some of those files are also auto-generated by scripts, in which case they should be included in the `.gitignore` and not edited by hand. See [generate-auto-docs.py](https://github.com/sqlfluff/sqlfluff/blob/main/docs/generate-auto-docs.py) for more info. sqlfluff-3.4.2/docs/source/_partials/starter_config.cfg000066400000000000000000000053721503426445100232550ustar00rootroot00000000000000[sqlfluff] # Supported dialects https://docs.sqlfluff.com/en/stable/perma/dialects.html # Or run 'sqlfluff dialects' dialect = snowflake # One of [raw|jinja|python|placeholder] templater = jinja # Comma separated list of rules to exclude, or None # See https://docs.sqlfluff.com/en/stable/perma/rule_disabling.html # AM04 (ambiguous.column_count) and ST06 (structure.column_order) are # two of the more controversial rules included to illustrate usage. exclude_rules = ambiguous.column_count, structure.column_order # The standard max_line_length is 80 in line with the convention of # other tools and several style guides. Many projects however prefer # something a little longer. # Set to zero or negative to disable checks. max_line_length = 120 # CPU processes to use while linting. # The default is "single threaded" to allow easy debugging, but this # is often undesirable at scale. # If positive, just implies number of processes. # If negative or zero, implies number_of_cpus - specified_number. # e.g. -1 means use all processors but one. 0 means all cpus. processes = -1 # If using the dbt templater, we recommend setting the project dir. [sqlfluff:templater:dbt] project_dir = ./ [sqlfluff:indentation] # While implicit indents are not enabled by default. Many of the # SQLFluff maintainers do use them in their projects. allow_implicit_indents = True [sqlfluff:rules:aliasing.length] min_alias_length = 3 # The default configuration for capitalisation rules is "consistent" # which will auto-detect the setting from the rest of the file. This # is less desirable in a new project and you may find this (slightly # more strict) setting more useful. # Typically we find users rely on syntax highlighting rather than # capitalisation to distinguish between keywords and identifiers. # Clearly, if your organisation has already settled on uppercase # formatting for any of these syntax elements then set them to "upper". # See https://stackoverflow.com/questions/608196/why-should-i-capitalize-my-sql-keywords-is-there-a-good-reason [sqlfluff:rules:capitalisation.keywords] capitalisation_policy = lower [sqlfluff:rules:capitalisation.identifiers] extended_capitalisation_policy = lower [sqlfluff:rules:capitalisation.functions] extended_capitalisation_policy = lower [sqlfluff:rules:capitalisation.literals] capitalisation_policy = lower [sqlfluff:rules:capitalisation.types] extended_capitalisation_policy = lower # The default configuration for the not equal convention rule is "consistent" # which will auto-detect the setting from the rest of the file. This # is less desirable in a new project and you may find this (slightly # more strict) setting more useful. [sqlfluff:rules:convention.not_equal] # Default to preferring the "c_style" (i.e. `!=`) preferred_not_equal_style = c_style sqlfluff-3.4.2/docs/source/_static/000077500000000000000000000000001503426445100172245ustar00rootroot00000000000000sqlfluff-3.4.2/docs/source/_static/custom.css000066400000000000000000000004461503426445100212540ustar00rootroot00000000000000/* Overrides for spacing within autodoc */ dl.py { /* Spacing is normally far too tight, this makes a bit more space between elements */ padding-bottom: 30px; } /* Override unnecessary underline in alabaster's default styling - see #2266 */ .highlight .w { text-decoration: none; } sqlfluff-3.4.2/docs/source/_static/images/000077500000000000000000000000001503426445100204715ustar00rootroot00000000000000sqlfluff-3.4.2/docs/source/_static/images/sqlfluff-lrg.png000066400000000000000000002547621503426445100236230ustar00rootroot00000000000000PNG  IHDR9b4sBIT|d pHYs.#.#x?vtEXtSoftwarewww.inkscape.org< IDATx{x;3{IB@@AE*IvFѪւZVjZckZEz[̆ЪJ *.c7 efv7<>99I&9sBQVVWUթ@?}gf# "j`wv3ofdhΠ}38.)b愢([29x(:;ζmf捭˖-{%vtIӧO;#}y"y F6Q"V1/&-9 >Ifެ(ff&ff Wd7&DG\)ʈh63@g{Dm,k|7bwiZ8": @QK%Q"z4kŊ[ݨǫZ;e e<ѕ4$g0d`*zz]ٳӉTf.ef3H]& CPs$y ]d(@q]R D,F"ID@3K϶w҉7(xa@إhGboFwÄL9sw "C"d&u}E"l=CDD"GheY(!ړ[VV6RӴ_9s*$ኔ.D"oLw[?ҽu].]tu$|OӧOrf^[H$@ssUmG ønذa\xqn;2zѩ(-..~$]|_`HsCnOWW}'ϕOwchKwZqq.+5 c(3 6sgY֝n(=\^o$NQB0 4 Hi0I"3NdF5tf>;=VYY٭r=kq,5Gp9;k֬"]k*Ȭzim?82JdU5 Tf~@-'=ٳgO8>n`O14 n'H$Cf2J쾬'޹s 2siށhUUU|7D.< 36[I1"z㼯(Jm'C(G&:ܗΎ#bUU;Y8SDDΎPdFf]1;DDft7fֈ(ifD)30c\u}mۏţ#2cWi.3wdtϡ|HD{ `Av??~$  DNbԎ&KܽyW\V|mn&zYfX޲F,{340H&pW{"2JHd: ~K,آp8|3OBfpeYe!iDDZ(f> fe~`Ki.vqRo#trm/#)_X~ èN^S -}+r*!Բ.//vbnS,iڳ|ICDS^Hdn9]?ZOdۊ`_B ÇY`CTĆ@7>m$E^rhl0DtiOdY]]ݿO>.!x<~jb Ѧvht]dTϿIO`Olnn>HnrʘeY?p Ϸwi>l봙xѭ~Tw$i6OD":! ˲NZ|W>ybY9׉mGhKJfCk^HDjr˲!^0 ޓC.b"ǸkY% ":@0EmY ?$˲V(Cjk}Ņ.WQk%LBEBfcDttmm4Jt9egDtS}lvJ4oCԒn%dK>GtQhf01N$2CUҝ_lZuV ц ݅2{lٲby10 5$;gΜQ]< ёX,V`eY\22)~Iq.ɲ޲2/|o-F47<8ç`a*ql3^#D{xqq4دnrn;@l (}\ؐL&/IBJӴmYy|lf]CYm݉zE9m"~MMn /2YpxYh"z=#D[w>G獍^5n#;"=^] W˲AB`Oޮh퇰>X@_["z-vU7#Į.al^,M-jIn| 1BmTZZ-ڲe𡛲txUUmEF$ާjjjl?#D;ƺ,BuukKDOBwCD07E#D;ܮ+z6"zM9q.v(TU?~7Fv rY]_[!zq\DZ)f`2W3; }hWv'R pSPUU'R ](eb_"z-@Dbmb[l)*7czT!ZSrp8v'#!2 \_E3.lv;v(Y(bn b1 D"b(;Bt9 @}7!2 ]xmۖgx.<@\%2?"D[b %]d+ )@-B"NXLzbrW ; d2b +v޿$^ >pS&!vio2+$WB˲̟?_)YH$\'fIbvDUbk.OJ}m; {M"LP+e֭hrY3/=B]=^oʕ+p}~6F:qIbxE\V,9sdqjflpy(@D+pFxb1 IWxa 6!eH&͛%]x\%/Xxq2̼& 5/=KyFuuxg/jDOqVUU=B=KFK}% ϶'P(0]O({6l؅׷xew"f >ƗV %H?~xg;\ԈNU[$D+z$D 5oذAlkywaxk_0YzMzWLrQ#_{ ̿1 `/1haV$^&/xl"=^.54𤇘3g(1  yjnjjj|t;3˹˴ٛPe]Jyr12JzM^+3^0 Ҫ*ՇxBኗ~r AfU,{0>BݦxE^u:cM"uNa##ac\D+I$xg=abwY1/~[yy0?c !D{](ӧ$^YH~hQs|!؁ȣo"S'ޚCDdE1[a̗ XBl2JvkU 47.;A 3gFU݈"߼ 5K~Llpg6О3󃺮~2ԜL&% \[WWf f[;V`ee,%*,,+3\}]rPbxE^yN-LӬ4Kى4yI)/=p8,Wx[%K|nY\f>{hg{AljDWb۶\_g&hp8<.)uKt].9SD"߲x4͛5M ZlյCui0QBn=^!bɒ%_ZumAOF"G'N!D{Օ^t/bٲeXu(x:WѪHd\+!xq"o9K-jkk,'XjUDD"#rTٶmah!yMc˲>|~D97"Za^!D x/NqQAy뷗QB<2,x[m,UUw\|UUuszE< U)>7xg]"Xte8N \d2TTTaBxCJ-^,Df>9E|5=ϖ-[\'^+%oh4o˲f*r ê,mC]ׯú]TGCnȜ覺tmQ[[iS; ê,b è.++ú<+xE 5KW[$46MtB:`Wi&{ ͅB!P&;%^@uucfM8? 9~Z\UUUú>شiLyo& >D3n$ p<0B9[ၗ)nl1M{#GSf|iJ8;df!DOo˗eY%dr/vBX,&CB!:coe]8wHHBx[խ, ,`7e=mۮnoLezlmaYZ˲83mV%>+iz|mQWWeY+rR a! X!zQmQ[[eY;3 r3,\K-Z6b _@",!&ijY։"J/Ea?<  (2w@xo ˲,kvz+hb è&Wt%~ĻV:MxWlG8{9_!DImiXu(x&X K].1ۉG-˚}/ː}a%9?qq !b7H4J-ւxA Øú=Ļ+4ADCqa!Io>kYD0|uQ0qݻM'jiWe敽iQ!MoD,E^vp3?ӑHDqe]ZBCDO!x󬶶e _ [DdF"g KeYu)3·Ph>Bt@oaYeH~":L0VQ.o8^>mJ]*((4D^Ib.]ڲ#'3󃺮p29,n!^-Mzy%DoeYtޗМ&Nu2uo'Cs]^VVTTC^C&LI,ZF-r܄I|>Cěj'|(OHx ˲>,GFj&7f~<<-2TSScә~(aI!rIo7dɒ-˺p7rP0 oIIj}mے|$n4,˺8La_؜hlarUxD8>EZea!$n~}4,Lp 9`"z0u=^TWWop8|(s̙Cn=d)I=i-˺6a*0dL?^.hiemOn86GȔ$nŊ[px/"Z@De>fHeyÆ rAkeŊ[ɤ uq]!Vv yL_xx{f4'q~,Vw7Db'px|7c|S~HhZLx{eY>|8f.wTB]x -[ֆM`x@buk2,2%Zxq"3| ,TsGz eI.hXtd2y,|7DQ̙3чXݖ2HZ͛B8A]tK'!ޜ|eYdJAK0,7Sbد|x<. Y2< v10!VV?+dh)IMe}aYADWBuDu Z?0֯ld3J0Gۺq=d K)I#lf53c,WUUwf{,ж>hyü̼'&H+:FP %3NUU]'^j}˖-CS}9jr:/\dIJzf>{|EyyLonnvx$^ W~,3!*)I"ch=x[Lӧ\\2MmEQf‡p{XD!V%LIŲcsG{:"=^ojkkG҇p{ъ9sC.I+v[iiepzZa2q&z\%׻h4_۶9&_\%2%WjGUxeeerWcٲeC~,u>rw9!u_peҥ9C@`ngyǛL&'˲S f<C4sʶmlpGh&=.IJE ɇpE"tb均"r2x'˖-{.Uٞ$fGmmaA""+=^)Ix(wf,s rA˲V\qQ0>EHý…gei IDATgVWW{zjm[o03 C"f>ӇXyvrǧx),,U"#x.hu/=^UU",˺n-A]+}x<6yK.LorgS[B!׋+mY?˲.%} ]P#:inջ B$Io{)z.;3M9=i.<DND"m5v6hݔmnnD4M9UlYyDtODaeZ  M:Cܔݓ$^ᛦ p7Q$bŊ*hw#Wl|O'gT9>.뉥G`:\=e9 I!.$n:ss>uN+fegpG.c˹ڋHa]}_wTQ)nʉܨCwݧ*{"w:9y钨D?7hr{+BDtUqWVVvSVFMMOO!U"0SzJ0M`f~3c\UUU%IaLP,uM&u(㔹-+rU| 2]j1uxC"@,R[ +|̻\2( ~@gZ+{Fi') /;'_q HD:;Ʋ~~gC䢇+T%UUUAngifry (//MYѽHiv26Ù.㓪]9DיݯJD6 㻱Xx0qnˋܫo óv0sif?yl]קy(/8I5۶ozq?Cx|'dnf|v<~HUUU.ˋ.Np%ef/?ҳ3kÑaȃKndc gw{TMMM}c_8.Fm=^T̷,iWfIuY, MӎB6/N,G۟a,P^tQxnꧪ?u^FKݔ,cCa C%K>'vkă pfM||m{_KXUUUA,3%mۗ#g"u̼bٲx7cgdrٮn%(yѲe.CHYYY(8K"W]]r/1,˪ VUUUqD( kX͗PLl`f׽4_ è5Hicm?M,xADb'e'=n:T"m_Cau2۹+:TYY9:=PWWc!}5EUg̙3чX",s赖e#Do=bتRb<+eFm/8ȇpKtB8as7 >fern0.3eY#5)o]oç"d1@W|.9G̬vDʉ( ZXuhPdn0:KU\nY֯ 6k֬P( `=] `i}+Hh 8o0¯h4z_v~cȭ~o^bV (e,~D"3m3ADXϱ$ }1SXUoiZqq 3|oTU}ҥ}-vaCdg˺h4~ś3gюxSUuՒ%KR=%IX$AD'`,T}3C`sbv BOYTVV mL/md/QuD&? 7ϐ _fj ;>]wOލ"fz @DЖbfYÇNa `ڙy(m;̞.5%ɘN"P]]FӫR f(Q<PQi>r0 \Bj۷׉-f2 m+!O>=\TT##ܗN+Dj6)Pgi1:HSEmID?3M~3 G:6[Ju]_,kM=zGXV+D"ѣH%!veYuB&keY;\'J#|EDt @DspȆ#U~,|m ]:&^",2@D3LO>af"=U~_mWLѥ~ef 2L|5 ˲s-GZg򭭭}"ULzxEgvgZW[tM":KncEHZv8d;s;DtuiiiE]]]&3Ma͇𸩂DteY~*0WSSdY֏H_qE^x3(ѝ}LDv ae^^N&g-[앖" Dw\ e]cCMM4h rvWzD?":4gt]?Lw[b|g lD]'ع M\W *^b9eЍxE&> pt'KKK'щw{g[OlY]Cؓd0MU˲Nwg_f5Sd.94JsDW3ŋ'ݘlIop2Ew{Dqe濸H$]YVVZu$3p 1٬OgYV2Y)fvV[%8RfSnLMp~ynh\սG@02=Zurtor8-k+lZ땖d}RWchAAAmMMMN7xt]/D1,cۢ^+`5R>̏GяQQEEŁh5{XU08}({(B;Vfj_m;(JOUAF+tgff۶FmQUumۉ5_Ϥ f޷C~JmyHo׉t,OJ&ߖ/|AD2"z%USScgn0qDz"Nw܌a R =q:x!?1&fh6"j&"jtm͑#Gɷ9D$R ӵdE)p'k1q:AQFq:8]__B!B!B!B!*e`3g}Y"5c?H=/]]!BN(F [܄Bѭ>Lm}} !s9n/ UU1h Ю[Kae֜P("B04 ZãMMMhhhڵkuE0 !~3-_AL4 'NDIIĘKd< V\ ˥s!%3J1cN@QQWA̿o,_iBjt* @!JvZ2bضmcHMB!rʏMOA1!B!P?GQr!_:6_mBѻ!ɓ'O>>߸qZyyjB^k-/L1\ 0}nM!v$Ț#P7A2d.lQr hNo&?Y[WBg(gp!gsW4& @{3rUt~ݥl0 ~ITUL{{wȑ "[~!@/2qٶd2~|.ضzjx +4MFUBcpx4"": _ q HB ç$z5~H%ߡ闏B{{ԪP(tRd5#N-GDͺBt)`Z˂x<~A@ P~[^FvPGƦQ^6!RE!goѿ "lU *8ǣh_(ZV UUg(rZ˾PI(ʯsjK9EQ755M?/L&w>{U2Pr-pH/BZJD[[n;_UէEi_WrA:1lذ~)>.E/K7x<_^655{͛uV03^x6c;j*|#Nͣy QfW4f.=[*((89sxsss_|iu?qNO5'4OӴaA4{ߋEdA0#G78 ]?u 9H=:@ X,v qGsUUw(K/|РAz+Ϛ5+9uW~Ǐo2dH/ѣ?2dO038cɄ 훼k;oUUՙ6moC߾}&LHD"G;pee#G~\ZZ9s#G\?l00a»?O:th_P(d}5XQDzE"G +VfĈ͞=i9I&% xdƍ7koxӇ}xFp |'n>裷pIIIgh4ʁ@ϟCGцۯwp. /:UUs.\e]Ag}paasW#K/˥K6رc/O?tm8vNCu l8psLD|3+ŋ?k98∟B%l#fUUld xcoll|~{)8q3pp8 ׮]EEEbUU_G};B{ŊW\q3&#FHzo(¯?cibŊ8-Zws( g?g̘?)S|>o}~^{5o|M( IDATv$YS__<8r SN9͡C=C&׭[7|30 bŊE}.//_( _|żn8qO|{ƍ^{s=p ؂p=xP(YII-o6777ǫFfĈ飯Kjs=chUU+**ׯ_bڵQUU;ѣg̘q>J8 4 -u644pUUUr xsCC/]{DīWc64io|p̙3>`/8o`M8uZ(--]>}$훘1c3QF7yGN8kcOqVDj7;yJf7Λ75ЈYp!j3f`|ꩧrCC>LD|WrCC[KJJ{.744< .ǍW]uϘ1 ~}Gܿ>|8Ϝ9?:t(\p++Wx֬Y˿oӿ;gΜqMӒ/wy oV裏x̘1M#d<~L<9WZÆ k`xO?4.VU9 vI'5$_Wk׮>}ԏ=zw]@|z!&";$o B4MKA!zIAEjZ\TT裏_'^~ekkCC|1s9ihh뮻eV\Dė]v_}N hDj+|wi@ 5jTl͚5:^{sqqq<$GeMӒ@ OpCC7K?=}֭[]zb1rK>}}UU}_r%(J똦i_x㍼n:8p`Ú%Ok>?C>boVu׿uxMD|W_~RMÇ~=D4dȐm1? @?1s= :~gCL; " Xbb|X &cK1hLL](XPD;wY%w̙33w>egݻwW^}}} ^O0AGHK0L^ {xPj CW\_lY-q,f@y4 гgOFhhX(  ¶m0 Ǝ hРhz聴4\ Xr%+F$ڴik׮ `РA8rѤI#,, V{AlL6 =9_5դIsI֭-[$QQQǏgE Q兆 :wlrss ,BCCh1p@Zj8$I-ZI;v,/C.]<;;;#::@2#00PoɌjjJk۶-@^{50ֹcbb6m؎;:E)BR5SNu5[[n _rya9aXNA^^^CK x5jt  D00j( *&& Atܹ 4HS Dhт(jEQZʕѺukBe$yJL&/ zH lذf͚8q"9i$* Я_?~hժ-;ƢXG@=(ޒ$)sάCW(JAxx(Cxx8(B=8777ڵ=ciܹs9He{ﱀ%AQ δi$Im۶ VmggGFF;NO&MУG tܙVY#`a=1cgooQakkO>wU!@hh(YV-6mڠJ*puuExx8ѥK,[:zvaeȐ!! /G}ryhIIIUdY|reٜz̜9! &&svv֊ڧ=z,|:&M|E޽{#88~~~̙Ô|_57|bDZ *`\ LвeK :Č3ڵ 7x. GDDɁ+Fx_ij}D$a;AUVgEd2IGDpp0֯_;4h.]޾lP^=4kvwh^и&1c{quuܹsѾ}{y\z&M*gff:twW_}2.YuX,?x0ۗ.*a\x%UYfMTPM vvv+B֮]z*|}}ɖ-o߾G#A Wf͚E啬qpvvF~~>ѨߝP777ۗb~~~(^t DHJ*hذ]-{{{$&&"&&5jرcսо}{t!6mu=?hEdgUU[(Q>u}`Fdy&ٹfѣ(0 jԨQY`^]q&7nceܹj5hqΝmOZ(/\mڵ7FF?-Z;}y&6n܈իWǏy6ͫlqֈP7ޘ~SN9@LL &O oa˖-Xx|9<+Ew}gZO-\Z5լ!bSο04MoX,n&I۷/٫WR(-[`ѢE 4v>c,u2•YuaY ~A8߼yS+,\2fΜY2( tIf[_pA_d2R%\UUS͚5Ն 2aaa Eks߿9R>| IJJȑ#d$3Iv/_4ͤ*( `;?*SNi8nMӵV(ܸ́q(WWǗ[$'ҢE6m #j!4e4iiܽUZU "ѼysSy8|0Μ9 6&HhXg)y_\` :͛7%`EԮ]K.O^vxQ8}O$߯_5ׯoRC )EV#F{WV3ĉ=zrm5333g]ryJׯCd'M*~_(HLLč7Ca׮]/އa,7|D1Qfٞ(gϞZ^ׯc߾}Xz5┶<&b ps׮]˶lْh۶-"##{]O?W_˲2dȃK-l6c̙O?7oȶmn{xxr^l6v۷c^0]ۑ8۫W/W_}5O崲'EQjhXdYj׮j/ѽy&>3lٲEy&Iؿ6=={.E\Ѿ^AV[,$!,, ͚5cq+yp{{{ؠN:Xb'Obڴi 2.8pBwB /L9^i?Q]6ѫW/,_˗RJ]ZR( fy^spp\]]iooԩSMn޼YIHH`OpW M lIGm*]x񚪪VkeMӊF`׮]QF!99.zHr^ 4Mòe'...ɑ>zSb;x@U;|3EOo3fPUUûgoڴi)*kW\yk޽, ;u$ϝ;i|.ٽ{7w׺9pl6`?Y`/"$9ÅF)>@ulܸ~)TUѭ[^6]ŋ,葓ӧ/i^`ZUN9O+qGV3EQhժZn ;;;TRσbߢEHђ%K7 ==<_Zf-[8Se$IvvvvN:|5Pԩr'7={>sL|Gںu?~_-ezrN:accs1c ݿ!Ct7o^$СC6_5ѻwﻌ.`v {|m۶Aѣ.\7nP(޷̙3E~嗧ULJEbŊIA.ZN9/WVk5YsNoEjj4weYƅ tq pidffb}(Ǐ(;wn 'j&1 AQV$&!{u˂$IOr2ӧe\~Vu]',YB~c6,l\ wss ֭&L֭[ ?ѹsg^BTT֮]4iGҥKq̙܀[n=X~'HOOddd`8x#ԩSqu>֭[T|We:Ȳ  ,,Lݸqc,EQl ?@9hrEQ\fΜݺu'>GEEi27o7obwL>]MKKǕk_aȑʜ9s[ƶm0x`cϞ=֭|,|7k.]qdɒV<@UVBESPP𧧧Da޼yСC'[v-ugee0G( СCpAl6߷eˊgϞŹstQѺukh)`Ν:&N777Ν;Gg&IHtMǭ8;m4kpp0j֬k ggg 8֭ɓ'1i$TR+WDN닆 o߾ b֭HLLD ~dffoK`߾}ҥKup@ӧ[uRI'ŋ9rq9UUŀk̘1ҙ3gqCulذ5?[(aXqٙ':qi*>>>CVZO*Rd| %ŋǏV^ 2eqF|Ԋ+PPP@^ڪ:}OEQ$y@pNF?>Օ?K?xdбcGh͚5Ki-++ ?V4۹s']M6.\`mۆnݺ;v츫cĉ0`i&[?Ppt]ǥKZM6iS-l۶ [l dHS{Pg^[oj]_PP 9999dbb|rjΜ9˗˜[Fll,֬YTc֬Yhݺ5lllp%l߾3gk x{{#** 'NĆ Jmʍ7M̛7x*g֭4 -E˗q\ѣGɢQ3g?~\l޼yVlb-,GWl-FOxe˖a˖-lvvC TUł 矗8q:v숡Cիj;SFiժUfӦM#ccceYOznD888,X`}6qIkBB~vܹ,&>1CNСCLl躎~MKMM F|Pt钞pL5- IDAT~7HR[ƪX֯5MsХvo߾Θ1C;wɓ8}\lllХK}DJJ El&.\>,_IIIشiH{n-[,u7jժZjŹyb ##'Ođ#G0{l 88]vEΝ0AH~-1p@6lS~~]vq#x$oN>֭zG"Ӗ-[+o߾ c]%''1cƌ*| 9 vA_r%ךuΝ;v cǎ;w0aBWݺu7ĉHHH>QFǝZLS? ˲t~мys3[3أ!^^^/aÆGʕ+)絬,sj'N0׭[Wr:hŋ={Ν3֭[ێ;0+Wt]'dY୷޲MOO?1bD75M /#G ???rʋMHHܹsAÆ Ô?ӷo_zٲe2eZ~ժU:bǎz:u;w ׯ_4irr߾}u'Y)>c ̙3֭7LEaZ3g(GlUU%Ϟ=kת.\ mۆ yEEVZO>3f$inH˲Uwuug2dBCCWdY0 -[bAXn>c={իW+CiILLwݱc3(rssMDll,?ϧ~~~yϿ:}  .Txz CƍSCBBNݻ3gN$IѣG/^^^^8~8̮ *%eS=<#-66ɡ̙CĔ{5jED=IC$InSUu"RN99,ˮ$qAK,Ǐ'ɲ)k֬Q-[F?n݂$I AVV~QrÆ s~^^O",,ڥK*<(Bvj˖-oϝ;-;;39::Z, coooMKKclmmU Ii,Cﯤ( qFQGuիW:pѣG`~[jǒ%Kޛ˗/ڵҸq:uu+V)S۷o `ԨQ˗/suԹu80b )DjZ,W])pttՕ傃˴tR}Μ9TFE%6My]X, |`5WDMt(&aȲ\'oX5AvhVA$fwes%I`dFm60 W,;eYQU&ef`Yv$I]Pre MԦMز^5Jy֪U+yƏ?ɓ'xߥ(J# EqTr֭D @aMntСΝ;;wL(*PU^5 ,1n0ڷ7IQ7hUUQ~Pvv6ANQEe7$Yl6$5MsP̋/z5k:x`iӦϭȕ+W0{lZ Çȑ#K5qA:Aw(}D%>>~I&鈢(!}pl  #7.0 sKEw$-DjjVkb,0At]i/Mk\FSnݺU]G㩢uҲa|GLU6m"㸵hi-U1 KnŲ&N4b 8m:ZfEYi666DQSUFh J$IN4m" 梥۳i6Z+Wޞ={3g`_?AЉ=z4lmm?*iͻnZL,Ka@[݅U] ga5M#TU*E)}8 md`3˲9sX$?snݺ(E/?$Ij$In""jժ}Dz,nܸ,ܸq7oDtt\i眜 4qqqi:w޷:vh߻w0q)e%&5ZHyC9xܼy/^瑘+Wyyypj9>R:<EqH;cwYi 9(_q_QMU?P݊raX&7FѾ}{pJdqو VgϞA4 ˲=,$ucY6Bv%7LViM&qZ`'ߗ}:# ExNӴw~~~/$>zjm[QpEQ #0tddŴhy'DQĸqpA\^^^i2337o͛r ̙TG [CM-lGQTsUU7(s` 5ME=TAt]$ya$ )8]OIiZlI-k#Fdgg׹S Af]׋"0 )cTuiӦ*nJ4.]!iSx@1G˲4PQ0CQcK,cd2ͰX,q/EQ ɩRŊ} }grbYV_d ѨCHff&!"Νz|Z`Y1kPA|A_A˜Gdx#L&t^ڵ+Ӯ];2<<<Оcظq#FɈkEQRU"0JbpÙc$|=~eEQ\3aZAx`, \/GW^M&8|pW^UySNGuz֬YtG9 @]WW^c4\GGG|MK/_7Z>|IIIHLLTt$)iv:{Y:؎ta?,ˎSwPP:b]vOu=~Ν:t.I$y\hh(žH711Gzz:RRR,iiiDmhGSD9114MÁf}ƍj-PUu?8VcJNˆn2\.]T%##B4SRMD$l޼={MӨUOoߞu>}:O䛊p0Fq+yAeeTϧH899rOq9swQrQЇI0)f&EQKTUu+l6[mmmVZyY޳nڴIJLLܹêJ(Bt-M>zW\@N0 D^Nt#G$jԨQfŪ7nD\\zbh999 xEYT_q-ogddXt·oߞ~!G)))hݺ5Ο?@#ٌ/Et]_0bwK*X ^E_+VjժU懦i>uk׮,JZIyO9<<RU(uJ[7n`tCxǏڵKr劉$InݺI([rJpFfT^yFaaa{*'۷GZ֒[gY6YV0t^I#l?piNz=>>d2-[" I퍄TR8@}U,Vg+A^;y^U*T;::RGe:i/\[nP`ZOn>Kyd2Z 3mڴADD]򐙙Y&]gŁcǎaԩHHH=qD-666w˾H݇涑$InӦ :th666e.XUU\3grrr.8n9/4mei`]ÃmӦ աC25k$''s(R \ey,0REE׭[ٳg;Hԭ[ײyf#Ky,cԨQʟ޹sUVnZX"q`mнy>}SC:~J<#Iك7*[l딢(DQ\` ^}9 _N9/iL???il߾}4|y]wAvvvҥKw5,~,I'G8qdY={jՂT(EIIIصk6oެS,uY7X҉=dMad!b`-E\HKKC۶mDk!UU ddd ==WͱZ#q5K˲$I 22RQ兺u׷ej$;v ڱcϓ4MY=VPP{5IQqUU+@ʕ!WPiܸ1:w.B֭[7|S$:th" y2u<ѣG# LN<ϊKSNrssIIHQ=zTJSSU1*UFLÆ Үߺu W^Epp0Ѯ];4h)))6&M|h>WI2bLi(Ykngko0v0 mG8P#>[:0Fz 0R IDAT@CUUѼysԨQ \(}d Ƿ~+={$I^'I +OT%;߳o-^p90t?2#˲zfM\Ν1`#,,_~NiAIsΥDQ$իDGGSG5M :f$z0tK '~D5`t( Xh6h攙͛tSLя? YfƸqP$~ᇨ[]s]s, `_4)Ϟ=;v~&<<:::UV3fYYYoiUU7e<[X 1`2e'H(^UE0^TlY =hp= ?{1 })qLaLL&Aeq8d PaZʄ}PbT)آ$I(('E(\P@20mD b߄ӦMCttCnSY|9֮]K+a «E QdZ,: EQ`t8n{D$e3`ȸVaŕ$atTlcck%6(jx @,A{a(ш$V^,cݺuXpnoo^+YdHIIAfʹGWw؁?ȐdJس, I˲  0o}}d'B0(c4M7yC!Qr҂9e eٝCEǎG-SaO?E֭djyܹYӦMK (L& MM_ B9a=Y-9o `0pחe@JD Mq~E!,E1@Q@.Ma"IR/]Vk$I]Q 뺾㸯eYbeffd*92 %bW6$9F&p$Ir4 ~=pW0,~  NVu&Y M\Y ^j taxBQha94MiC|@Ӵ6<3i> "I2L Ü I4MQ+yxxTmӦTZ5)99?~<ի&$$if"3fP5kDjj*6nHnKB 4$I8xK4{EQj PO0uj @}iZQcY6D1hqE &{Equ);3˲)""" D1 v-cď&Dc]Ƃذ"v{C+EW RL?vGM2%qMhdYnR^p bEX= k)x,ˎu$`RE5Je4jԨ(Rip&!y~<XQQwlBػ+ҋaH̓T0 iLt$q(hAFTX0d2yfXG-jdks8Uqq1 Μ9ٳgl6SذakܘL& lZ=vl'X] *TB>>m.,&"lV&I#vF3j6rBtܽ{d\jj*1n8׷L\4M[n0-X@&P7תUV|j;㸉bыؤjժPcmtd}VV-d2ڑLӴ'˲?|)q`>eYn#I U*U(x Z}h4ȑ#Ibb"IOO'?ǎ#9'ZtԉL6(+4<_iZ#nnn{dѢEdhҤIQR)*G^o(ei'ׯ_RNH&MU*aYT*#իW|H\\\ʥK2 Co& Сc|(~nDo4MT*eKJJα, /ӱcܹ3a0 (Ҹqc%$$i$&&^[rm^REݛ:f̘H֭矯_~q-?s'OQ("vJj t;ZV% ÐEQ[nJV޽{+UTUV,'&&V GDD(g޽{dСC[@ڶmK޽K|||dZ-GquuU<==}Q.^H!Gj듆 qƛ߿0 SׯߣzL0zEyuԑܔEeΝivt:iȐ!e˖J```R\9%,,,VjZ*ZV=<<cBQ)W@jԨx{{[4ܾ}{eW@x'_~%K'OaZVI&Mo6344A 2f̘rff&?g6l ,˒Ç_ *RTuy<+۶m# Ðs!)''㳚4i"WREGEErqqQ|||K1VXq8am۶Ag ":Q߿Z ;x 7niѢtDӑM>L:lٲ;vOرceDVm۶`#s%i:\Z53 6e (&@3DDMy߮VK)RjX4f,<> իW_FlK.Y7o& .$}$pBӴb%wqq 0'7&E)ʕ3Fo^Ѯ]uHxxذ0Kr,< NlHHH2bĈ9F2zyyݢ( @fwaAHTTDR]vIԪUx{{ 4Hqtt===S5jdvqq/_l0H۶mE$&& WVLPPЯ;wH``T~:ӓԨQdzWTɒJ6l@ZmqNNN/^d֬YDJ)jղܻw 9::f[6 QFgg  MӊF)Zd 5j@BCC,Z H7nږe˖$""B48xR^=zjbYO Ϸu֊﵁wwwL6899FQrqңG ҳgO[nk׮ҠA+ըQ#N7|CM f`N+Wܣɓ'V+nz#EQԩS tI@6mDɠAd2E-ZD|}}-HʕM{tj+1c1 $44"j*BӴj,X@ iР(Ν;$<<\R95j 2/_>˒AܹCjժe\n޽{˫W&<ϛ+W|W^rxxظq=;v(J>!!aHTTòCX/ >:Fڷoo+ER'J|'yQF1cnxyy>}d:J09::JnnnQFgϞ}5>>~k4xq ̜9ܽ{9z(atdRR|`.96l ,X4-WPӄa?V]FMecY5nٲlۨdZmM_4M//4X+Z SNDREY>3BӴLQ,Y4nܸ2e (zwd۶m>"m֬BӴR͜9tY@&OL(M{& `9z(1 իVNrrr2ݽ{ ֭# #,_ 7.1 ^zFŋ$66 Z*mϺvjUˈ#aÆI'^zF@t",[jnJ(M EQ 0I}mI` ֭#ȤIŋ 2j(r²0LAPP@~Gb0HEd…v&d̙dԨQ EQ+_r4L: rصkW^ 6GGGFϞ=[/]T\DQ۷ZLLL$4MٮӰd„ ]*T0fff@>#h0Ȟ={|̙3ϝ;z/_&^^^FqƑ:uĉ MӅZMqppȡiz:lsly a%qƩwZha8p t1 5kwޘ8q"֮]'N۸y&`UTG0sLԭ[͛7_ϗy /eݺu?>t:t:!C5jZhu"99Y-`Y)))88 4HHIIIjڴqj۷eYhB$Iܹs… eVR@@\B͛7-Z^EaeSZ=Ŕ Ì(j 1c0 Fy-ex Ξ=x۷֭ WT9rh61gKRT5jTMBHHxhڴijŤ$bn˖-())Aql#IsBXX'-Y0`(HJ5G$G^uh^*VVۜү_?vСk0zh$%%V'0EQoݺpEQT5k7nOggg?郃è[eÆ Rqq1 *))oR7U|ut&`)/;wk...1bz*/_ơC>&rssqcTk3n޼Y}@~~>C]F988ۛzѻg%88½{ +a'ظqC`6zoEJ0h lٲ={M[U%"w(**B`` 5kխ[Æ CDDZn`ɒ%hlݺ...1-Ot/\t4ڵA;v@=PPPTڵkb &&vɓ'a6Vo杊,GfzAeYכ)X,8t6oތSNa1bNG$鹬J(ZjlNz!iyUVqK,~ { Z*IKKSP%%% D-ǎh.?~cbb"뇤$b qҿvڵçN>#GGGX'xQ5Zz8EW_}RcQq!lݺU>t0Lh ֵ}ܹs;v}LLN$wyG8q"]^.))ɓqI:t<ѣGsg;wN9y)##C( 0E3~52/Q`#0,WEQL p,/g.uكΝ;?-;;شi\:e˖!??_}Ĉ#JEpp3{!33њb[x.e˖~!I&aʔ&C|8Ո|III$+EQJcX S4izÆ 00F'N`ܸq²ejǏ׭[̛7zznҥK~޽{}ݹsJ``3oyPPYfYYqu$&&bȐ!رcƌK TգGqV2VVWBfffŋtr.M@F IDAT$((mۖԩ+{.piܹs3gR/}ӡy(uECj߾=W_Aӽ@ٌܹrAÃqrrښ<9 Nk֬y4f̘{ob_7111dȐ!P2%%e-M8- ˲-xg4k+R}FÆ Q:O&ƍCRR6mڄ{޽{عses<?Mӥ.pBH((?G\U}4݅iMӦM-ڵ{=<=P Xrrwd2k"@Sa)h#0MP^/9;;3۷o_v}a̙HOOMf7|~VZ&*j( i&jZ$Ç{|_8?~\㾒ey,u5`XhJʕ+ǏLBgffz^iRTmY&bls!<<(IN n߾=j.JPZ5 4={Dpp08ӯ^={l[_ꛒ$̜? ƚkں(eYI&3a.⯆(xE$!C/"pAKVVz^Ed:(J8[1111@ggg?f>Wu/?^˲{l[a6~`@ժU*T UV&Md1DQe:uN봜/ 0LB=˲rŊ5kdWzi 0Xt?]0p(˗O7n;G +qu |u?8a4+Vnݺ%ԬY9stСyy{|ĉ@OOOO>w(HLLۥ_~h6lVQ.ፁ#TƢ(ad( AAAиq*׮ '1c e޽ Vo۶~Ν#a =CaDҀa ^ߢWegϟ%W}$%%ar\\q8V5:vر#|||[*(,,ӧDGGej:h4}P @a*lrPBBb"##5_(**ӱ~z(ҥKNöe\[n=g?i4+t:Khh(ӴiSwoMY7g( VbXȓ'OW &0 `,˛^A}~XfYQi\z^Mcǎ!77=Brr29qݍd|0L_RCBBv2$<14}Ro D 1 ! ᅬ *bŊoοo޽˗;v`A7V!x9Ë}7B࣏>{E٩ 4nW^^vv+c8B%µ/?0d2ٗRIAZ푒f^|yРA}YZjz(!$f͚э55jOO2 qde=!P8b/\V8;;'ODDD;ԩS':::*(G$Id `ϟ^=...c }[jeׯ׼yv1z-_m۶tQAVV֯_O6n(<P\\u:]}EQ<~ (nnn}ReHEw[p-$&&Ǐ}X`'VS d2U???ĉe k8q~zF$z=jn [^7X,5kքY-L%/.߿_Ure"IҮiMQEEEayGRM(jm[<ܢE>}0+W~mٳg,5jOzQ=z`, Ο?D1j:Xͯ *QmSLݸq^Ǐ!IتU+N:ey 999HHH &<ߒ$[ A|$I*MtEQ<JPPթS'|elْ1bլY3b -- Mu:]kI!o"Yc\PT&e+]]]PN:LH/VѥK111q>Eqbs RRRt,^wܹsѣzߓ&MzU999HLL$ 2ƍL҈iӦ#F`Ϟ=X|9ټyyJ ˲<8o߾ \vppP,YB}$IBrr#=yRQ߿ W͛b+VXΞ= 獎 !JJJ^߷BIIIݻwۻwL9CٳgU2~V://W^(`,; g4+W~²b4qM1??L.իcڴiիWUGZjFDD`SaEF돠h~!ݖ5)D\\j5yY?­[J [nn.>C%#?IdYرct㏖hp,YBN> шݻLBküh"h4D$? ܸq$I )k||}W^]$IV _Wf.Mӊ?6mڼv$IP6mj_|k׮XSmڴv,YB/>~5kT:t,X@&7o̙3SrJq̾}S ٳ'sΑ۷owArrBbbb~4x|t橲"''CUݻn޼Μ9ݱL"ڵ +VPa֭JJJ rssǏ}رcrss1}t6667n܀/8q"6m Gsssbܹraa!ui$%%q|W&G޽;ɓÇ,;vݻB0}t{bܸqΝ;m68p.\Ȭ\Ra 0m4cǎ ޚ5k̙3邂~~ʕ+SVO8˓׮]kg3gDϞ=-5VAA!ؽ{7i&˗/ !TFF֬Ycn݊)S/^ (((ڵkIxx8~zzj["##ɾ}VۋyDΝF327n>JHHNgS 9r6lx)>(((`(o-]p111/4yyy}:),,|Nnll,9vl7޽%%Ź ~gXJk/;7::zbi _ޱ6ju& 5jlԩW\.]@վgѵkW9s9r$ٿ oO fKΝ㙎;";;)))t^+JKKe[M&O)úhd"k֬A^0z貹v˲شi]RRC]v?5ҥÇѣGɓ'|NNj*RTTDr[juZ߻w/dYoܸFSлw&DGGV:uqqqHKK!SA%I!C,EEEܠAb˲sҲeK)..hܸ~TBFF;w4ٷ}g+Xݿ$QѣGAnݺeٶmEvUPP߻wGX~=X}gݻw-={6999Xp fQT(2Dllie͛7ǏuY 77[RӘ|71b;VNO>44rHK~~>j5INNRSSk.ǹlDDD&L eddHյk`ڵkb… M;v.\`ׯ#**0Z_²썒!>>$ھ} wju+W|ri$P{_jD\\\QkT*yyYYYU?@qơyyyذa:wd3gp!'$$`ʔ)s43f@dd$F ۷©S{=76~x={C#6oތ[")) Z* غu+ߏDoƆ (Y͛? 4=ykذ!p,]&`Сf+WpA~?sB tݺug̛7Z*,h b*/_֭SӧOiv=vUV-dggO>>`0tC˗/۷oNGwÇN:@TBX%~i,@oNh&ٳg˗N#]v%!!!jժ$## J3v1 $**4-,+%&&=t%,N裏d@( :t0ۏ۷o@"##IppÆ {@5kf(aÆ&1۶m#,p޽{rQQQߎw?6lLQT1$99gܸqz, X4=m q !z6mפI4M[/&eҗ` a;@/V_^8.{xxZj;t`9r$6mY~}rKիWIjj*i׮HQh4- E䀀&-Tl'''c1 ^LӴ²o߾xP%k4a.]\x̛7O)3'55Zի9f͚4Mλgg*UL{|OFu:|+Jervv[Dwww?04vnnn6888ۿme-v:*#NNNm`2z}&˲ D ,[TXqN{h+ҫW/$V}M6ׯ^oFqpp80RL-[g/Gǎ}WoFY5ETR=˶wwZVh2R@\]]/TR{m۶\\\u: GyHdJ4hЀPTTԺuJNK1 dРAՕ,K<==I&M^'&L(Uqrr"|W\rDM͛Gr1H<==IZZ(4i҄TTTݻw/ )5:u"dҤId„ \rd`0w}ԩSpGڴiC Y|9qvv~{UVc//]ti6 q\rcǎ>;/@:w,2 clР)990 #0`qu@+-**j־N:H6mP`k 0:q IDATͭo߾RTTT ɷ~+rNqorR`iHL<٢h$WIQ7AtlVDՊƬj-:N.AK18 `0T*aXFol`5oL۾PA~n + [jZz glן0edȐ!DRIbJJJ[ʑdӦM$$$EYhy `mlmߏm_ozaQ0 Yg5eBaq{[ydXgq\ ˲ =m;=uFwV BZ~`<͝;;.Xxxxh4RrrmYJ,[Vٶ PjLmoe;>݀AϝJ*y~Mlԁ5 fha(N...g$$$d(EٶhѢZw零? ~~~x!j51gAQQ ,X{nnnsVZFa\<|EM޽{ϱ.Xh֬k"8kE8cҥr 8,˰X,8tv QwZ'OT=j孏%~i N8wQQQ7xbcc%>>>>>dժU֪UKٻw/=z_?~0Lٹ;P#ATЦM,Zr*--9sLԬY3j 5VV,Y)2o,lj Ju/la&d2} њa3L&ieÚ"]( CӋ$IߪTmEa&AQ(QF+W/C/ *}EPPY!D(<˾ V+ dfn|N-JŲXeq,$i-*j _J,*Ees@40rAm;g\y9pd2eX,fمK{.'Oʬˎ;ԩSNA $ɹ3eb˲DH<h4F? <0Lܶmi^(re~cfdsG4e6|cY ƫ,N0ChEQGyGQ;}vSH ! !ACA*rlGl(E;f $B 6ٝ>cHHv7x>f}v&y6K4U8֦Mϋ/[n;ЧO۷oGEEƍ>v}A^^ƌ7y$%%aΜ9Xr%***ЫW/$%%!$$,cرXf #..ƍCHH}x{{SNՆI&•+WV՘6m_UVVƒ$Nxx޽S6jm޼W\)E1b-Y$4%%XzY @$?i~i q #""`d=Yׯ޽{튢Yxy @ɓ'ÕD/+|  45a„e˖pM c(Լys/tu~Y=ZOOO4(5$?E`zOveũS/K.9,yQxxx ++ 7oFii),9Aށ-{0;3--`0DLaatEQ2k֬APPWy7/^… Ç7oܸN,7)B3se xkO$躾$ɗt]WQQq᫢ q%IZqIK `3{,Y:s L:HIIqdee1AP)))<[ڎ;@2UUw puLh Ir,sY'p,.e)Vu̒FAVȲ M7Mӆ߸qhҤ ZnD4iıcphѣaZQPPǏM6h۶-RSSp80|pŋE&M I?Ç#88]v8v0tяq%x{{^,=s挅BO_ϟ?WZZJmۖ0`ƅ(xxxEEEE$I˲v$I7`k:}t]`H^^1119r$Yr\ro~9"00R%鶚-I09400w5cml@ a^AAA'/^X`PrrO>>>LVVq\$I`T0Kf<$qYsyhzB$cZMAX,7neddʢEVZ9qz&6lP.\(oܸ+**HUU>qfL9-{DӴ`uP,̝3jAQw4 .Fll,|}},bȐ!E-[A֭ VLCZ2U_[lׯի7&A0`N,bPU.EQ.(o7]9iiip8kbȲ#ׯ'TU-9%6b\eHX,WaސU&⊢(5+w|lcƌu]ߚ:O<nz'Nի(((P'E fYa[ຈ<СCMMF={kR/cXjvZfĉ gTU'+:tҥKXt)6mj4aر 1k֬axȲ U8l8X`%]QoܸA͘1k?~\c^cZjE|g(vh `?4Mm/U7s!BUU"_6N88ɲ2dff&*Id>L)F;߿b ˪Q2U1GEdd$裏V 6Dƍ^dd$ª  HtaY'Ȳ|Z3)w Ӆu ] Q øz2X 9`JI )F5 c eX,ArJ:t(4iQQQ5BQ߿_|2m[dIy:Z_.`&MD(Ɔ CITxx8K/owya.EY2ǝe90 "??;v5kFEE˲N$܊+dɒgY }t0P f͚Eߟ rBa2C |rc޽0 $ɹB0'N&IXd |yԨQQ.\~۷@ 0 o 'iORTu0@rrr2$!55EsssۂFUժ>},DU]-c%Q/֥`鋅c#y#?vs7]'hC :wr+ӧOcʕZBB0/ |`#jaEx 0~󁚦Eڵk-l6vލkԩS՞!eʲ %IU ФUVիѧOSNQT^ؼyOzcP5BӴ?6e 6v뺎+Wܹsؾ}qȑ{'dg} LEQ PcĈD߾}Ѯ]?] 'OVwu{2U<Ͽ)Ik 8 À*H Ξ={ ,_\_v-p840LјZeذaw&%%eYc/|ע& 8PNJJb **J~0=i0>o+;رҌ!,?w^( Ɵ.BhX,2މĎ;䄄̙34qJgШQ#C޽;PPP@4KGUUIt0 R$?/// >;w.N^&Mbq)_~,ZE9(~?~M%={/2JQu֘0aڶmkׯ_l;IqEE:e]@g>s7nܨ[1bp8OOO(4$ig EQ'YYyDnOt8ٳgEEQxgGU_z%`u]'XhnQ{uwر}Æ _r%GYyZZq,h۶OE^wuYvGoBuw Yq$Ie˖ĸq0zhvmUh3gķ~KK.%dYi)_FZ)={iӦ!!!餺|mۘɓ'{hCӴo+++RjJ80a…XnK1___:**r84ѣUUDQ-g]7`$I^lRϱ{n"""idHHضm۫0=x?Ya0uԪ_N_~֭:BjM4YxQ#F%.I>CcŊ:AKTU} _00몟d(ޞFTTC5oޜ6mSaYW_… FffpNE HTiΗe'jVkz_iF Tu$IC=ХKcΝ[nI0&R^S+X_v/^l;ReaƍXn@ӴHQT$IO~.Z8{E'z쩽 f͛7㧟~R._L<(jF^$YjxŒ@hh(&O|_UUl2l=ߢ )Nh⟆a ̙cy'l)HRRUEQ䜜0 N4ؚ5kBeY<<<_~r.]~4xxxdɲ|LQ4+Pw> nժU͛7W>ﵼ6mÇ'N@ERii&sN5_7KJJ{4iQ9PSo߮M2 eart]a[κdQW@6mnݺ񁯯/ڵkT.(>}v`u]ϧ4M#a6r]U Y 3Kiw_lmpYT FfhfzLQԳWf͚0 (͛䯿Jk׮FJ<`ѢEy~/Gڕ?<YAggϞ ϟO|G+U YYYn(*WӴa+IYa ,**"4h`t C !Jh©)µk@$eZZhH{">>.]IϏ˻iZ 0_% al6ʕ+tjP00c ʕ+ʯv(e|*v;6mڄJMOO0DQ\{NqLMbnyΝ;m۶UJ0w ,&yyy㸀ɓ'>,l9DEEJKKap-ax"x/Eu_~؃Mvomc qeO?xV4O>rʄx55VMڈ0֥K*&&ԩ[ ~wﮮ:`Yv,k\'( MӪǽп >{xزe 6nܨ^tfF1cFu^ j5&L@M6EppKG(((>>w\u:rUvpN` |$! -=0 lܸ꥕pM8sҥC7믗X ZlItq\u|СC4áN筯Y-,$I3gmۖ7oݪU+4hiKQFMO2h۶-:PU՗ySU `B]0AijlEE*++EIRyN'W]v | ,Y",_%4hp `inQW p,,,F߾}$''[wڥ_pСC9EQ%yKJJCy[,q$Iv4-C9$0cy$i9~8al6.U|7Җ-[ϓ(..>s^?y{rvUb̙Nu YSp`&^!e UU 6oޜjݺ53cƌ:wڅW^yE+(( z}}4Y! f{O0aS +֭S0 C4f9Y'VUuvi<400pzQQVU۾};ٲeK接K*~h aPe|,'?S!88>}:7vX;f/b׮]زe!2M$iN(˲/TfO>KB$Wr IDATU`ΝmfEQ7IxGƩS<܎ (1x`L>[v_~EZlu>hu4y4M̞cƌA͛; dJJJԌ ` i-eJK`WUJq7Q"aСtAK۶m 6-[tz#GǏGvv6)I u=MPԆmu DQѣGjw?8sLu'ZÏ)) BrrTXXȥlxx8BCCk<`qq1\W",, VZ hcYH,))u]'Μ9×ƍs:f!++ mڴAAA~֭z4}IUՓncZs8O8L6 r9`xśG-}h%KnV p7T萐yРAl޽ѵkW$&&bFYYZҡCۗ¬?LC=2$UYYI$i4lƍ#?~*7ѫW/ɓs%IJAW+":vu eW˲<*""AEEE|rr2Li7|S())!oݺ޾olEQ^a/-_L:hvK ½CP@1 eYn!CCCt/aӧRW֯_?ފ~]QEߟt1&$$Dc:9}Tll,uQd0i hS/.4MiZ?0 2B`Fڵ٩۷bbb]x0 90̳Eul> CT׮]ڤ$&&b۶mHIIQJpКE<_,Bf;00PhƋ"BCC .64$KtDQG]j'㪃3gܷ& % 54u]%IR &sE۶m{EEEN[rF=ztw$<&<ϧƍٳSx{{W5F _|?0'B]X anځSU+>5""yFcՄjEddRUUG>>M:,hvS˨gksWT&[Mp8裏lZк WJJJl1yd!C\vi:u /j^Iơn֣ 0b41 #TŪk׮ڵkk111"A:u".\$Iy0=0΢n2˲ҥ@< 7o'ocl6:cΝFNNΝ VItp;b.IҏS-[ƏO=crj]viӦM#E EQ+4M{Sn˲ȲܤE;w.""" g7ϟ?+Wr~~> Q`NQ9Qs6@;`&tܹV'%%!==0@<[3$9u1bDŋcZnrf͛#G_~%v UUIMӚ1.>&$ٔeY!#""Bӧݿƙ `mr$˲i xm[(ҽ:j]tǏO8ktSTb;芊 cǎ?/t]8_@UU%((ț+,6vhXVadffJWA"0v;A$֚e3 kxZ_|9,֩StmݺurZZ_^^jwС<..@RRDUUv4/%IBPPPY8pee՛v6 %0 E T 2xyyץ7oNi,kw)a, ,ۄ EQl0 1zhbxcٲeS TTT0ax1 *yMO0oN:3gP~-ѱcG4M QF0 csii0D +**X֭[S}uQ7zAL:޽;rrr,%u[̐Wi~Eubƍg5ymDD2;v$TUE^^mpB4),Ju&#F5nዊ3Uş$'W 7o~衇v8g̘AΞ=۩2R`ێ;0~Ìq\3!WAHd8qsН\x{]pҥjsUU@/pR/h }AϞ=7o3y Ӛ$ ,>KdCңFBΝ^eׯܹs8{~i-##!IR` |3fY``. 5… +qݻw>, 0c0W`*xbknjX,mر?JJJw^ٳI kI] xALL2k,GqEd8x ~g̙3A*IcQ;`LÆ _,,,ڵ4vXnРANڵC[|3 \;ڥcUW0cru]-б GNjȁ",,i F#Oݺu˸x"Ct?x`e\aN^UUGڀtm֬Om۶0 ׮] 0e1 A| `Taa!k+VqlٲǎSSSr(e\Y7~ᒒ1͛͛iƩ?/._\۱cKP<_Hf24 0҇(]t~a` %77W4jԨQUgꄵb|7>|u7ok+ 0M;>S}Ȑ!urqkuXna^AueoQ% VǏ'D]בZ/ŋ-q\ q[dY^䞳=`Q0t˘?׭[,\ $IDnK%,^p?ƨQ')m6YRRXjUi]DQW)0 c)TUEll޹sg<%Ɗ+K.1_5٪U"j0=_/ÛS<-dggyxu[eya jҥhӦK/iiixyyU<_ke M۶m[QTTSNq:{1(/wVRݫq$I^$)Y(Ey0K{1 >-os Zə esIH|aF7k׮ RkӮ]ɓ͛ѫW3gΌy>| ep"Y?`\V\xϞ=z\\>uT|(a$///FSyh!y7n^~=.]Tu6x=Czz.]\6 #M6ƒ%KgjS+++qFZJd{ꕿo߾1a,۟$(_˖-޽{s={t\Ll߾}rsTKT $IW_UMTN4ػw$00" 3Z[,ǵeiU/ Nl6> ÁO|2H"I(5A)5ojFɁj)6&&O?SXlvA@d+;6wu٥~/nlhTMjF7ӻe˖ ^z59s&yY.\ŋq9gYV8n$IBӯ}8['IRwYg9ڵ"##ѤI_~GJOfh @pDaIӧ4i ]vukEQ3f |}}r=z dYݻw| Jq<7E!!!>}"=1uT<<͚5 , `z|PEi;^y4jA+++x;_jSu (_4-zԩSɦMe_GŦMpInIY,..n#G4I, >>5   믺  w$IY<QcF;.X}MDQrI)99"˲xsָ,Xh I&\$IT/[ʠj5]Q~Ĉ hgwwu׎pxq5 t]_SNCUUu]u;ww܉`?g)UU0 <<<Pq\jGH!I~A)--mXVCTddכbуX,X~}gIu#p_'WaɲLt֍hР$Ir𦧧cժUچ @W^]6^Ӵz? /`Hf&;(,K[Ȳfգܹs׮]1 c8eYxgtRxsS_n CӴIvڿ?Zhgfaʔ)ґ#G8___CӴa^0%6wKG:eY=iZh޽{C$_ ޲E/[^X#bV edYA{=O>N1bĝpD\ϐZ& 5jYYY3'N4uQ/yg)6l`PeeedBBBOOODUU n7I߆:J;)-K^ɆMǔʈgyƘ;w.a̘1tLL .]b/+03M*c!""_|^^^DGGcN7>Y-[ap勫Tv`[ne$wy{W_{9$Iy)GʲteeEqʝ/4@Zu\]vg}E!++ V=Ad7]uupպ@O?( VհX,QUUݶmi)UU-4MeY>3-nmx3s{Kiq<ޫW/c sjժ'$rEQiHM|l7 Q(|eJbb"AӴ$ݒ$m)Xyy_W_},ڋ/]`ӵkW>|G  6M P'N0*++%WEѳ[θnjcYdz$Xul6zʔ)GѠA$Ii]vO~nC17YUpKf 0GQ״iSmȐ!;bUKL\\N:'OjO6 c0A t5pJ 4MаaCL4 cƌaUvYf) &::ZMOOtemK(0"<ϫ]t17ot8w?Caz-;O>dΝ;wڵhذ?5O$ wƺu#GP4MǏjժ^$@c<<<nݺ)?ˍ>Rzz:w{U f]}h`j#BL˖-sǏ31U cÆ ؿ?t]A0dY}7oތ\vmaX=<<+`ĉ5=y&ך%H IDAT~mڵː$yr޼y . (40a41w1O=Sr͆k׮!==ľ}Q<[ f̘ E/o; }iiiLNEj4}fڹB[$OΓ$nݺivvڹ4f˗"!!^с0[WeYIQT(Aj,X`4X`߳}%***f>};Ipp$X,gvtt:rHzСv i?M6i;w4 ANQ_CNqG(%66 Bxx85jm:%NnZu):tu_|O<[M :=fÒ%K|$ ޖ$)r̘1x7\vWe$&&Gկ_NsWN!A ~<0'Iž}K,qJ)##3fp$I*~~~EݻwߴiޘnvYaX˗/; ?#n*]p(..y(*nTUB(]FigΜƍxwԲ2LKKcg(i7~]$ba蛻w۴iS-cǐK.iRffEEbH Leĺ\Usc.2Vqzf4k$IZ `̎;ٳ'dĉ oVȐ;]wng,UaQQ+byA;M66m:3Y%PQV>qDg*ʭ[~Q8k(k/K Wݖ<999s ={6V999HOOΝ;iӦ\ak|]Gx$ɠ]:q7#&&ƩAeYƙ3gW_aϞ=rӲ,,/k@o$r7DVZIgmmAp$Ik[ރ{(<mFذaC$IUUUĉ3ώ,Arr2~Tc<狢86-Z4#;;O?\nzY^yᆪu]7 ?} ;꡻~xʤr%S_)+Uޢ('OԚ7oN9'p8pQ]VIMMeX' 0~e4߉/bLLLֿgunܸQ]hNRU:jԨxYЈK(:@JJ >I9sx("z!ø꺎[ncYGYGb IY@EHH7'55Z4idBB>mڴ6$Iޔey/~)xM(6N4Ν;qqqx衇бcG]p!7o<_}]ȑ#V3I=VL3;eU$LV`8qo*;;{@ƍٳg[/^L=ک K,s=={ӧOիW߇yɃaź/Paetf͚TTTD YC$f cEQl(ӺI A è6<4M~~~D݉dH[.,,dY{i {CӴop$v$I7ogê?=9dHDDyFQsRɡWMӲL!-r.s6rMYQqYAA˽3Bh E}g=gZ߅yQQQQ%5kxzza.A Йer숞.-B`Srr2yرCnѢ /ۚdYYY[" urr(*x+^)x @N~  &I6`L8Ĺ`2bHLLDZhѢJV)V).l`Y( nݺ/֖EѼys0 ܹs矱j*ҥK0e~$uݻWeY̙3StAΝq!lMӢal6Zx$!1Zn Qe"44X L&ܹCozR~i@׼yqfCJǎ *sq}ڴie`,^ opރu}1IXb#GqƱ97ccEQ 5M[ggk<99={6j׮mm۶3rYtR,X jVr,XaÆ(LEqfOOOqƌ/Dq?MX,$I udXփ9A0u8/k޽{pJJ s… =CiSN0*/DQ y>F걱utʕ+6ooo Ӈٳ'~w۷ ,0rrr̀]v2) o$y4 e窂=왋u t]74hP iܸq|ƍÇ1i$>}A({3gVB8O Ȳkq VZr,CW^8|0<۷}ذa,iP7 _whfdꓑQի B:uJEAll,޽K$%blZ3"sh+==$Iu֘7o^fݻwnܸuڵk4OUSTJdI -[zRSS.]h{3==mۦ߿EeQQӴ _WeWAtT5ꫯ;j |M E`/̙3 Ξ={W^ èԤI>ԩS䶒uVlܸQ1|~UϤ}a2>ZXot_$aiv/^H]xQ;w.}vrʅ_t [nũS'NwY5lܹs~Nv4Uغlٲ3s5N8A0j(rOOMӆS,DQLvp eΝs>>>S,Y"~Ɉ,֭[۷`UU%70rxE4L&SïGGG+g.VΝ;!"222j*]P,y? m6[Y^=4i۷oOOOǡCvZ̟?_OOO78lS ]ZV~!TZeTU-V ѣGYYY+eŐ={6d2U(y<!X]f3ggg]vTIy~ڼy . 0HA^xE"TҵRJ_ٳ޳gbŊɓ'}饗p)ŋwVSUuLp@f2ʕ+疓rE!''Gj߾=bxZ)±cwRpmVQʕMh62.</ {UBfz5k czg0e?VUWxxpEALL ΝkvݩS6lhgd\Q%r&>>/ң{$)))HLL8}4;]EGyrMbġl{-E4n``mȐ!\!N`8}4=cǎ$I$MgE`&OeX,˲8;;+aaaK/~~~RJ0rrrpMܸqׯ_ǒ%KpE$~w܈.<ϯQU% (*T/^;w[lٳg$%O#W2bIxѥi>033q! ooC=a"Q>:u^{mp5#FCڟ?l:ӂ_؆ i{7UU]jժh۶-"""oo"5M6UHMMe`JD ]}F7 /rXv~%V/qUn߾ȲL<UVsY/$ScYCf)55ΪG￯O%7 $O8ugI*ICM&HQCॗ^R5kƻyWzuʕ+q2v#F<Ċw7H +R7o ЩSbY  `SzO?JLL1sL3zh>::Ѕ)]?04M#8l-[VqqqRCxcY e[zy{{7fz-(} 4שSm>35 *!q\#__&f6LkCCCu",, +Rrʡuhݺ51qDSvv6:Μ9ճg0{xkbr7fs\]]1`tڕ) ~,ˢAmۆիWXGd,=], ^4X,wDW8PGXʕ+͛jժpuuV+ժSb68"0LWQ8|||(WWW Ȳ\"ëa65LMwUUMuWuAdާOY;?GžrTdȲϚ(իW?u=G-J]P~B( >tBBׯ9Xb8=1Tn?!ba~3f U 11.\PaW [$6ɲaɒ%Xd (2OǏݻw'TU%iiӦ={|EaO+ @gDppڪU+J*(}+W0萐TP3glӝ62Nk^>k,Qu tǏ/h@@qeVE TU-0Ld ZG]_&I?4M J~;AAAѣլY3jڵسg?ł \E{44kvڵRǏGRҟ_ m#,ܹk7w,3䴸|2ѠA(^ǑaÆi۷oW-K [Se4M^j,(4lo&Ѵib @:AbNN}Eu8 PdY3& ).lyU={"$$&SɅК6mJկ_UVa8ydsEQ1Mӻt]bAh*IG^\9_NJ 8nnnmQ܏=!^Nhf@Q͎| i6ϯرcӧm۶%\]]K бcGlٲW^ "Mӱ(&=4x^Lڵpao-X={㱚aΜ9ɯ 8+WwƍW[je0aW[A^}Gڵ;¾: p@YWczꆄDկ_+WޒeO!C#t=8q6m¦M Q b!"$Ie{EeqKEKԈ$I{K,top!oz\\jp~u8b`EeOdY>>>֭j֬Ν;llٲ999$ ۷o3A/bslI{Çɒ~.\8ڵK?rAd,LPZf'%2ڵk1pyȲJQñwo~M䁰>33!yɯqC IDAT\ҳQFرÀ0f8ݻwcjrwބ$IQuMlf0l6cǎAV*vQh۶mx\R=r䈜Zpy 0ueCz_8j5IkjA$!k&41x`Ogddݻnܸaۻw/=l0bŊŚ+~gFFF[cǎ-SNɲ977贴sEppܹsbo\t M$f͚YI\(vL IR^vg^%Il޽{E/yػw!7o0LEQ[Z+6<Ǝ[Μ9ӧOk׮Əh:u 999$IF; 9;;;Ocf#m,4/<Ͽ+iQ3hOEJ+2ʕKu'Owa{лwozݻMw6ܹ]r%e9L-jl{V{eU཮x0Um<8Z3fJyc.y>Fzn40ɓ'#22Rw|,Djj~(BrwUQv)M]\\^IJxy 6 AAAJ۶mXvrJΝ;b!E gQ^|xf2GGAj00~3bt3*E z*$I8;;[n^|{b@@@JqG`U{A_:x`eΜ9HO/|+nݺpqqy|W(Vs0pEq$qj2a(~ @{ (j(PUӴPba?$IzE EqG }G$y~ŗ EM Ň~ ժUsuaFmdgg+ʳcYv-Wffpqy~EQ. ,[O='b$I"I2aE-rѮ];ѣxwL#GTrcto&-[F4-Z E1P :dE$''gj|޽'Nq7===Yf8ys璕*URgf0UӴS%uU`͛7cƌӅ#G&`_ܹVj֬`YA(3dt3xyy h۶-L&ZjCgp oarweODQ윒͛i]RRRlwC T̟?fP8nUUTU *pqqMUU/˲ àl6[;Mu@ Ct]~I@ъ87egl#jՒƏ(ʴi>"11quMhӦ p!|gX"8;E-4&4hРcdd$ڷok.yСC?(QQz cx)zx]Px~pYaÆQ&Mr^f WZ3}t]$/ѺukGEEMr)SaA(a IҾ)SN4IMMCvNNNy;ԍ7~H􄗗׫f[o܆m̘1X,NJ WKU999 'Lu%R)jbСjʕ+V<ϣ|ܷ~}g}Fw&6mEE0L >';wccc ÐӦM7qĄrĈlHHȺyņ5k֤'NxCcW 2efĉ #6lÇS...dggiii_>vխ[ז͝;Ԍ_RR5j$u?$It]L&=o/ە}e8r䈾uVz˖-đ#GR3 +M Px`lv WB[$I7nĀPn]Ŋ4 ƍbbb~a4`t) QIaKݺu+q5(:ݼ'ݣ|h]P=8(Q۷oGR* ϟǚ5k;*#F'Njڴa$m^vVFFmҥ8y$h6FѤIyߺn:zuE߾}qQCeu[lxbm۶`Y΄  `Ib&L@1n8UBB ׬YC P%3?ZV\Y_3""^z%I 6DPP?Drr\z-̘1#{wѤI .7jw} V+T7rÆ =< 777e˖Q$IޱZK뜅 @]M'"::Ze ,2;w,Y92Қ?EQ^rKZ5jnݺ6TwwwK.T ew\FSNXr%f30(J eG>Ci|/MӅV/a/aX#-|cـN䑌 #::|~رB=KAAA7|sEQ'OTxxի^^^׭[ǹ ** k׮EJvګGPM6IF~hР q_`m6f_|ɪ 4=M4M^pYjU͛c޽<;99f͚`ȑl5ӰaCJ7x ]6o6:WvmÇ,K۵kWP%fjժ  :umҤ q/9s& __&L~'J_$;vm۶233:^`.ͪ߹sf={6ǨQ]b޼yt=]#M&SAndWZE'&&SLZr3ʕ+#&&FBHHFh4,?)P7?Fxp%[fޫPrmxV ZQر_~X"k׮S6cƌaҤIի߭iѣ)___UVиqcv\qrrB mڴɓƍS5j԰uÆ }}}fҥ ;v7nl[R6m$ލ?3L^a {o(uUU6<<\bY=.|g~7n ѿ:t(~e1b#,, zEQFhh(؃u*UdE4(WoEy=zb#͛7e__߿L\%yd_իRSSW9s kW8(0#!!fBLό3طzKcYVgFX,ӭVkI9e'' $Iy> m˖->sb_[("!!Ɋ$I E%K) UQ"! G)]UV-pBVVVxhw{^Ytij%u]_Z5i+R"AeC幝d+o_xW9`۶m.q4Mf[n=gϞ@w:?)_]@b`FFF;wЭZ 6o FnBCCѾ}{=R wK6mki >+~K&Oر#m20uT899=^j+ۇ?LyjYn]Xa//y'՟P Øy$*TкuV`;]۷okT||<`5˲,Ws%oXVNd;ܹ]R%_ڴiSkK$,]T4MEu=r1=r!)0 u?˔>Tu\UCFiϦ^yUVU֭CשSԪP}'Ggk֪)l-x.h Ga\ޒopm{)DgI+W:y$Qr EOVZ5۸qL8q|M<ݻeddF *<套^*Zju򰔡b']tqLLHH( hѢ1̙3;u]/<lX]j/K7okժO?5֭[Dׅ p&\+tyyyexyy]puֹ-YFZ$jR9s1h ê_/[ilْ ௳G{رlذjk֬?U1IDxzzYYYD&777 /M&HRJAhSL 0r!** N޽{Mފara<]8RӴ8doߞ+VՊ^xȿeWWWt={/, ib+&ԫJk78deeJ- _)G=SU ]=a/w2::vҥኢԯ_6vXѮӧÇm& I0[n,˵|,NeybaڲX,q...!999{wpg|X2F*Upm2++Eovc DQ|u/0 s E&@t]3NNN2MӒb)ѢE RJ3f (I|lϐWi|%k׮AUBԲeKj@*T@߾} |ҥY_O!A:kמ<}tnZ:~GTUEFF!2$Q M,$Ir -'ViCeY7n %.]frɒ%(?%0 "%%ʝ;w@Ӵ{zMPm`Swm^|y}:UU`Z(T\0 #R4(+w>}:]ב!IuAV .@ӴSb<.$=^4Ǽyb/PhFӦM1g-%%] MUrijH M UUSח:t(fϞM;*NVjEzz:uU*%%VIDATU5#b. Z(͎ͨY$I?)R?yO/ƿG@#숌#jX!uE wũSpiٳ8v ä+GE]f$m۶%ŭoN;v }͍ذaSyz9p@i3g΀ </[=_9y `mۖ@ѿ,NB=-[י<Ϸ$;U}y ԪZW^4 +VTO>^uòlEQx㍪...N>}b4 ֈ}p c #^0,n*ijĊ+Ta(b׮]KSUgٳ ]|^zu[VIU,uDI,K7UU!qFZj0E?>н{w$&&^3dY `m BOYWh&v5¾}bn33376DQ͚5СCr7a~v^{5TZTNARv (j#{سg"##Ԣ۷ocٲeƲe͖+3gtY< ++j7n`V[nVYn`-,,j:jpw0?ˍ :TFj<_rT% X=OOO,ѣ1lذ" ?dgg#22R2:v+p(Cxj:tQ6lȑVkӦ͡;vDJe6 {uC> k$IŠ+ؑ4p¡'*,~'[ صklZjժqqq4EVܾ}=MdY/ҕ+W'O$I&,k$3AMPU7Ο?dz1w\ƍ-uVoʜ9sh1%%8 d-ZD0@$fs~$Ic׮]߿:<&?<_''y>{,^Xqssӗ,Y\T%uo>ۨP\_J0 cV/#A( xUUW麎N:N> [VII*Mv +`?juHnn.~ᇂF]㸣, ijN6a„·~|G~5TZ8z(vٌɓ'8FQTbN']'ITe*,;4eY^Zg`ƍD\\yNrJ$IBlllssrr~z&)) (7o._̽⋄$IX,4hb[gϞ|,㫯… x}Ŋ|Gѯ_?0 ̙3IQ47EQ\|,OOOmȑ#1f_^yr$IFIDÆ 0L'Ne˖GdYiwJq<IJ'Xx123K ~U|wY5G506 /99ԩSue֭[WiHMM$I{./_غuVrŋ6!{ԨQ2_|EgA&N|Pű`bΜ9ظq#֯_UU1c c/_`۷`i oGeyi)y-ge׋mԩ b۰Z$ FxAv؁k׮fa*._ٜ&!{nlذᑃ9~ UUqY\x8p K,QEu777k yzu&tnn.h"$$$fٶmyfhҤzYtRu˖-8|c|rnn.w@zFEQ+ Ir7osa!2_=z#vC2eL>)))l;vMu5UU=&NXbI( Q46dȐAAA`/j2Mʛ7oFbb"l٢mڴ)*DCUU|ŋaE͛75kV%K^$4M9g"3<qѩS'c„ Ƨ~Z#G5k40 }]أ9$/_6Zn{͛ڵkiiiFZZ1g[MII1ҌaÆ...rgΜ18+WlIKK3fΜlZZyf駟iiiشiS[+V֨Q#cwQTiwoխGN'$;A `@2, dyxpFWPa8Dvr`qV`]ʠY֙ JqY #]QIgx9}V{{577SRRRv,X@`0uV!EQ^<{}$M@l0sn&%%H4y 7|;PVVVtϞ=dD|A?Sg@=AK,!C}Ggnڴ-[FAUUUSaaaĉdM6-f-[,HwqGDUchѢE^xF޹sgGz m999Q}Fݻ Ec( |4][`#$ŊZkkk 6<OrssCnFv8-(==};,#w׀s 1L$ԵsnYf+V ͛7c>w\SӴ_֫W;W+`i&<߿=󔓓 EEEm3g͛Ghƍt]o? g7VvG4vXGsޕ{j ę3gGmVUUE_~!EcOaѣ àVtA2 fϞmr[nwh9_C8:4;ٳg϶'Oڵk;_(##u/2pss3RRRږ/_NhԫW6׶w^ vmaT[[kA~snV\IEc^znw877 UUUEVZEh "r)bHm_4rssHMMmӝeJ5zIX0rȍ ŵ'|HvߘE֭[']^a0#G۷$Ik>iZh͚5q&3555aԻwѣy<-ZD'NW@А!C"ʹ~@999R4hЮuBDzNMM59-[N\&,(g,w}{tX۶m4zZL91ƨ^t:駟&XVedd۷/-3fzI,ӣ>ڪ*m޼9iҤIJJJ(99٬ 1~( 8̌Bix>} 4(( y<5 6mZnje9QR~~~ǹ8OzQeA>cQ_MpGc@ uֳ :c8@ǜN'edd->O*AӴ'8K` D9 ñ R>ŋJDwЪw1t,\Ӵ?OEi w"˰|@+3O}yRRR9yӧO{Wd3  y<ȑ#MN\r%KҜ9s0|ȑ&4tP0aBK~~~dڴiTZZjN|iժU^p8^{8zhSVVVtʔ)ta@.˜;w.}b397n5k97\KcMݯ_E~ƈ#Vâۿi*^zS3ǎ4Ǝ~ a^@ϟ`C544SU597gΜY7xPvv]hҤIOp;$I_ =LӴg..XkVr~1,@swmDt2_NZc1UUdY^eUƍ3UU8Q{xg?nJ@S&YӴv B UU-Gfi$IXQIq@}ʲܦ(a_{p|p8r8$) `}A;c=z3,kwb-r+!6ɲˮ0:x's$Iwj8EU՟{ !Bl1dY!,^4 Yd;w4M;@RUmA`#:w_#|T1~`:<,.?` 6|@QU#[ LrOGmu]?!EȔ$)t: `c,.SsP3PUu^0c;%$`^ۂEQn-H6I/?^(?*76K> kN6cRy8W$g3zNQ:MΫt簾a]$ 4MKO% dy#I4ƮGGeY.SQ y#ey8%"soHºQUz\|a; $U^]+✇ErVsӹs^ E}^#XQ2cat? e7q)(|p8Ng|5SBi:4tp:eq RS`{V!rִye T}uM{ `, +Ƭ%"@7H ]RbVr8o^1bsa,$" ޅE>Zq$Ikq +=!f/)X7qv .Ia  Y$][FJ]|,ɫtX Ɨb t SW݁%эnt /pg/aF7эo% 4SNIENDB`sqlfluff-3.4.2/docs/source/_static/images/sqlfluff-sm2-sq.png000066400000000000000000000307311503426445100241450ustar00rootroot00000000000000PNG  IHDR}&fsBIT|d pHYs qKtEXtSoftwarewww.inkscape.org< IDATxy@TUǿwgC46w ť\ -zJ]kYi .Y)&.l2ìM̽{f{99B&O0h  `:0u&a>ϹkkXt uSN 峲o mmUwu:dx٦-azf\GMLp,hm0.6HqP{P( V 5AmbQ3vTTT ;;.\ 7%0222m_e;#]J%&Mk)zol;-~N7㸟ܔ)!7RT  qޜ8)A0 Sz7N ?>^ UozQQQ2=nx&Av{wq@`&˲jɍm6_%C[b111w2n<@=zMb1|||pu8IZ$Z8827nܸ ]BnZy(Z?/g}P0 y!-- ϟDž {ncǎErr21c #)) c999Xv-D"***|ZZjeU*q5ji2 .SnݺyL8eO>V*P(T=CW*+ Ã>@=JVk\3p@t-gϞG,d.YJr○V$tA|Y^^ȑ#6{T$IQ^QQlZg:2 3f 7n[nڵ+Ǝ T*1|pk׮` 82 f{D"oR!00ѣr9Ν;O?~:V\ ???a̙޽;qi___EQLTTI"""T6(xzzb֬Y`zŎ='///ɬY̙3okon3fmرbX|FѠ'Of;X,k0&Z,PjEY-ͬ Wa &<V]fZ~GEիW+M& $͆fvTͅmLt $YsiX,xgqa>|+W+! 1c lذ2 & .\~J?~>O@$r9~W @ 1u_˖-3n޼5 \v˖-#ϝ;5A7  ~hlkj4Ԃ d3g:ggΜAHHH7eQTTooo@ii)T*~i]XXX}1pר$o{bŜqS%%%jvu:]A8 jN#666#*jVYYnOIoooL0מ_dff",,.ɒBj+..~c%X,S<G,wWZ鷣T*8#F 2 >,.].]vs-yyyظq#}vH$:QXX?*bRRRKo"IZѝ78Fx{`IϞ=3VQZ d2,99Ye*++4Myyy(D"`8Jy9SڒO1L_4BQ~gX,No:{,˾+88_~WRRRZ_bOXÑ]RRr9b! եKq޽{5ѵkWtܙ;q/q%;(L3Ac?N銗3U\\ܽO>)S BX d2X,,ˢW^5L"33_=z$Ibǎoo޼|M8q}P(3?,{p ۿy5kVVV>UXX ++3Lcfs/׮][aVlٳgb`0 )Sypdq\l\\d7h4oj4L<Yq<& 7oFll,/^\禇T;<==+z-֭[ ===}@)aY(\Dr$IAPPА3gblN Vn: jiFї gFCaaaf]+: 63sEL`IwЁ^~==hPc-CYY/^ ///,]މ-V:&H򊋋g,(j{bbpJTYYYP(*++d0vTJuV" 0*qnhgB]N_9٧#\>)9e#F N9r$ڪfӦM8u֯__um6S̹ƍR1UUF28U'*~eeeWWUL&h| T*է$O*** oArKzz k}R0 Vۨ1y衇p M1cx3ä$ر">>Fı5ۜFge;,X?I===8!!!qƩnݺfz-3f ?zrUn` ޮu([;'3Yj'(;;QVVfmtH$BAAFǏwڥtyzzj4er[1=**_Z֦jj6(**y) 6=xϏkVNVV(ƒKP*J$&&4=eًK Řl22!!Qrp߿1Q^^^DO6M裏^^/55M]ʂH$˗2IIIX,l6h+ Z:)ޞ͆R c4!%K'7Ytqqq. k 7** [lǝ|bKcnn%%%q}]'r#DD-_m0>hћvW\O.СCy#""Hdݑ*e̊+lBeb8극<3gr8jl4ձ~-K4٧Oh4|7tmxaÆ BBBHsNNh07 %Ntλ[q㸵[SRu8?\1|p~~~s?p@HRx'K 1p@hZܹj639~GDDL%"|6[sWLD&EQGٓ lP8G>c դIhxk#G?W̤~w駟&AGD˲ lmT.f$@PD;u$1c*""h4xCO> xt3j- ZwMZ,Greee$xm2pRX67~\&flW=MPGP&Ir˲AdܹgϞ9!!!شi6ne[m֚3999NjRRRnX,UEŰ,$rW{^N*j:bH$:қҦz{zTTl6O"bx?pO>I,YkY M#ZhzΜ9OY}Ai!!!4hzCKI&,c2 T*ջeee9P(.//@T*6LjAQ~~k*w~'I2nL&,ժFMWq+Sz;75e0RYp߾}U`tp81bDن :46`spYBZ;vDйsg<ӧm^cǎ5VTTP+Wd2w`0t!Co>["ls8o(~~~ӿROL&noJ#I2$c$IPdkwx@uegqsZ `L&LdgRܹ3ݺuEGG (..Faa!qudff"11fʕ+9r-<<=t(..ڵ+z쉵krW\~嗜P((?~ҥKmB0_,H4窆}|0q$[bkP ,˒At0 IJ ~=EGGf̘![Dt>D"|8qO<+,Y /Bbr> HPPoQׯ_ǖ-[GᢢAd2z-%/$ "/˲-ˊAN ^Ip8kx2ea_!^hd@ * E`Zeڎ;ϟ?fǎ|K"44޲o)==ݱsN[iƍS e2Y]nnn ;%KTΟ? ݻ7V^@ڿC",7 +јq/g57h˖-#ϟ?*hhӷo_vꐐ*88̦n޼^x ,?\g4cǎ3`s1X|yia>osws[U-Ҳ&<9pu{MV%}hc6I$o%FuRVVYfaŊvjv+V@LL [RRBDJJuj.+--71#p7&Iz}N!AnބBO.3ӧOǸq㨀tԩ5J_~%^|EٳmD ĉP(߿ݸqcd֭fJE 04Zk.|N+WVkTTTMII)˃l?… رcǢGŶm`0i&l۶ yyytl6{^W1WZ ڵ is0Zɡ6olÇh 繹sv܉˗/} ?|?˱m۶?\^ίfcbb*oܸb54 ] eƾ{揄BE0@~eeG6l`vVGbضmҐшb Djj* ei&>}gϞ|ddd`޽ѣ._8",, ,IС]UVnCRYfMs!"(q9B*gggرc͕8pş9sZXn]@ ;s 222,c'|bLMMnl2Vl6M*^jKPk; O$nFD1AiiijdRbIIhT%TΝp8j1tP|נ( ΝC~~>r94 N< Z łdt .]B߾}p̟?Æ CAAJ\bdee!##HMMEN|h4Ü8qL&әW>f2.[,#ׯ__RTTkׂVkg`Wח+LNN^ʲlb 0F7;G8;S\\JVXVVfg5k ((gϞEHHj5?~A*"((t:u@DGGC$K.8]t3p? >ԕa͚5a忿kO?1 xJJ m2e˖+W$J rqq~mauVŋI h4~n谣W'Rq7s8L&Ly+**r HDѯ_?aê4; VEhh( tBAAAʪ=( ǏlXYYYY-..c΂Ok(h< .H 2lPu d} $[߰lb Pdګr:{\.Ө(uEU g[aPoVPR iϮqpJ?q?qKW3wMc l"Ri`m˗V~ׇ:Uh}GGG nܸQ#'EQccc RO#ݜQOF>}A1L&8 ְТ=}Æ v&We"˲gL29ѽ7v]CZ|ݎODAze_DNX,ŋ[Ţ%11ѹJV_۷oEQOp^u:wdH8|ʖe,^iu={~%I1 ]M`N#4luzm^]v v0(((i3޽{[od2/UմDΕ b_UDMHHdBשhҽ{&x<$IdG'LPq\ ]'A-r _.^ !dLLTEMjyU91&LZl `5,i6WΠ*ʲ`6~MS+lCC\-8qbc2+9 ÌFAitBB*=]m]AdcHO~a' 㸵+):YbfoنH"©'jz} .]ױ^8szlK4-q# saS&)ȑ# &HlrUew܃$ɾ}F]Fŵs t邹skƑ 6[{f[ zrq\I ɲ\f^6>h߂W w 崈oqO pY:t$h}i:PlȨ/IܸQ-S-ӤF1ǭ͒uβ:822_-|C@$rsIprwNpUHߍ Jς~Xx 9[<9[ַ}O5zl1L;v`ڌF7d2ADfh߾}pg'"##8!hTW5ms=BU̵%K@"n233w|5A"hs*%-\tFTII :w nEq G #[;5mch~9nR@y(666H#mfZ+v ->J;` P.=z3M'5r? Ҧ\{6A,tOMZ5TIl @ kO IjIn0*Uj(V]kܯB F<F(l$0ۘ.!=p ou?0j;tW ~~Hr{x ͸߄UA[kf-~OS(OOO4)zN۷:F(= .D 7rFF\kЩS'<Ώ ZzJ,WǕ'Ot q;5 {'? {W![m0ipz\]T~PU©cOLLt[$ꠃڀq7/Nb1,d۫ݝ-Z _;u{ق>LX,d2,]={~iiinKRd[EpǏT(hYYu{JV=;dnq99=n8իWᚖ[O!ˆ@jjK1cFgbzfs_z_NJB.]c'%ZڛW{_.|u gϞ5u|nߺrI$Wkw>E&WԅӴVϭ6kLj$л'ܜ5ni S33^]v5=a Xf|Fƺڵ+իWo |mjn~6~I@z#f… Btt4~+QK9G„޳MЍFcyX"""P(HcرuVehK7_~ :_xCu[i3cƌfkhG( ƍÐ!C\Un)//ѣG]di!L"Fy؏g^7|*T*1dۻZn24\xu =.M [TTb,wo'`vR( " Ȓ\EZ5;Z=mF 0'IHZZڱVc 7qnwե.2j&'WZ:eG1 _ 98Ak,Z.JEkR4~IENDB`sqlfluff-3.4.2/docs/source/conf.py000066400000000000000000000160041503426445100170760ustar00rootroot00000000000000"""Configuration file for the Sphinx documentation builder. This file only contains a selection of the most common options. For a full list see the documentation: https://www.sphinx-doc.org/en/master/usage/configuration.html """ import json import os import sys # tomllib is only in the stdlib from 3.11+ if sys.version_info >= (3, 11): import tomllib else: # pragma: no cover import toml as tomllib # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.append(os.path.abspath("./_ext")) # Get the global config info as currently stated # (we use the config file to avoid actually loading any python here) with open("../../pyproject.toml", "rb") as config_file: config = tomllib.load(config_file) stable_version = config.get("tool.sqlfluff_docs", "stable_version") # -- Project information ----------------------------------------------------- project = "SQLFluff" copyright = "2024, Alan Cruickshank" author = "Alan Cruickshank" # The full version, including alpha/beta/rc tags release = stable_version # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ # Autodocumentation from docstrings "sphinx.ext.autodoc", # Allow Google style docstrings "sphinx.ext.napoleon", # Documenting click commands "sphinx_click.ext", # Redirects "sphinx_reredirects", # SQLFluff domain "sqlfluff_domain", ] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [ # Exclude the partials folder, which is made up of files intended # to be included in others. "_partials", ] # Master doc master_doc = "index" # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = "alabaster" html_favicon = "favicon-fluff.png" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # -- Options for Alabaster Theme --------------------------------------------- html_theme_options = { # Set branch to main (used by Codecov button) "badge_branch": "main", "logo": "images/sqlfluff-lrg.png", # Icon for iOS shortcuts "touch_icon": "images/sqlfluff-sm2-sq.png", "github_user": "sqlfluff", "github_repo": "sqlfluff", # GitHub Fork button (points at a broken link, so disabling it) "github_banner": False, # GitHub star button "github_type": "star", # Use `"true"` instead of `True` for counting GitHub star, see https://ghbtns.com "github_count": "true", } # -- Options for redirects --------------------------------------------- # https://documatt.gitlab.io/sphinx-reredirects/usage.html # Load the rule lists to generate rule permalinks with open("_partials/rule_list.json", "r") as rule_file: rule_list = json.load(rule_file) redirects = { # Where there are references to the docs in any of the codebase (whether in # places like the README or in error messages), they should all reference # a perma link (to redirect). This ensures we can support a consistent # link location even if the docs move around. "perma/layout": "../configuration/layout.html", "perma/indent_locations": ( "../configuration/layout.html#configuring-indent-locations" ), "perma/hanging_indents": "../configuration/layout.html#hanging-indents", "perma/layout_spacing": ( "../configuration/layout.html#configuring-layout-and-spacing" ), "perma/configuration": "../configuration/index.html", "perma/dbt": "../configuration/templating/dbt.html", "perma/cli": "../reference/cli.html", "perma/rules": "../reference/rules.html", "perma/dialects": "../reference/dialects.html", "perma/architecture": "../guides/contributing/architecture.html", "perma/rule_disabling": ( "../configuration/rule_configuration.html#enabling-and-disabling-rules" ), "perma/internals": "../reference/internals/index.html", "perma/modularity": "../why_sqlfluff.html#modularity", "perma/indentation": "../configuration/layout.html#configuring-indent-locations", "perma/releasenotes": "../reference/releasenotes.html", "perma/why": "../why_sqlfluff.html", "perma/plugin_dev": "../guides/contributing/plugins.html", "perma/plugin_guide": "../guides/setup/developing_custom_rules.html", "perma/variables": "../configuration/templating/index.html", "perma/python_templating": "../configuration/templating/python.html", "perma/guides": "../guides/index.html", "perma/contribute_dialect_keywords": ( "../guides/contributing/dialect.html#dialect_keywords" ), # Add permalinks for rule codes **{ f"perma/rule/{code}": ( f"../../reference/rules.html#sqlfluff.rules.sphinx.Rule_{code}" ) for code, _ in rule_list }, # These are legacy links which used to exist in different parts of the # SQLFluff code base, and which we continue to support so those links # aren't dead ends. They should redirect to permalinks. "indentation": "perma/indentation.html", "architecture": "perma/architecture.html", "dialects": "perma/dialects.html", "internals": "perma/internals.html", "layout": "perma/layout.html", "releasenotes": "perma/releasenotes.html", "realworld": "perma/why.html", # This is a legacy link to support older versions of the VSCode plugin. # https://github.com/sqlfluff/vscode-sqlfluff/blob/master/src/features/providers/linter/actions/hover.ts "rules": "perma/rules.html", } def ultimate_replace(app, docname, source): """Replaces variables in docs, including code blocks. From: https://github.com/sphinx-doc/sphinx/issues/4054#issuecomment-329097229 """ result = source[0] for key in app.config.ultimate_replacements: result = result.replace(key, app.config.ultimate_replacements[key]) source[0] = result ultimate_replacements = {"|release|": release} def setup(app): """Configures the documentation app.""" app.add_config_value("ultimate_replacements", {}, True) app.connect("source-read", ultimate_replace) sqlfluff-3.4.2/docs/source/configuration/000077500000000000000000000000001503426445100204455ustar00rootroot00000000000000sqlfluff-3.4.2/docs/source/configuration/default_configuration.rst000066400000000000000000000030431503426445100255520ustar00rootroot00000000000000.. _defaultconfig: Default Configuration --------------------- The default configuration is as follows, note the :ref:`builtin_jinja_blocks` in section :code:`[sqlfluff:templater:jinja:macros]` as referred to above. .. note:: This shows the *entire* default config. **We do not recommend that users** **copy this whole config as the starter config file for their project**. This is for two reasons: #. The config file should act as a form of *documentation* for your team. A record of what decisions you've made which govern how your format your sql. By having a more concise config file, and only defining config settings where they differ from the defaults - you are more clearly stating to your team what choices you've made. #. As the project evolves, the structure of the config file may change and we will attempt to make changes as backward compatible as possible. If you have not overridden a config setting in your project, we can easily update the default config to match your expected behaviour over time. We may also find issues with the default config which we can also fix in the background. *However*, the longer your local config file, the more work it will be to update and migrate your config file between major versions. If you are starting a fresh project and are looking for a good *starter config*, check out the :ref:`starter_config` section above. .. literalinclude:: ../../../src/sqlfluff/core/default_config.cfg :language: cfg sqlfluff-3.4.2/docs/source/configuration/ignoring_configuration.rst000066400000000000000000000057351503426445100257540ustar00rootroot00000000000000.. _ignoreconfig: Ignoring Errors & Files ----------------------- .. _inline_ignoring_errors: Ignoring individual lines ^^^^^^^^^^^^^^^^^^^^^^^^^ Similar to `flake8's ignore`_, individual lines can be ignored by adding :code:`-- noqa` to the end of the line. Additionally, specific rules can be ignored by quoting their code or the category. .. code-block:: sql -- Ignore all errors SeLeCt 1 from tBl ; -- noqa -- Ignore rule CP02 & rule CP03 SeLeCt 1 from tBl ; -- noqa: CP02,CP03 -- Ignore all parsing errors SeLeCt from tBl ; -- noqa: PRS .. note:: It should be noted that ignoring ``TMP`` and ``PRS`` errors can lead to incorrect ``sqlfluff lint`` and ``sqfluff fix`` results as `SQLFluff` can misinterpret the SQL being analysed. .. _`flake8's ignore`: https://flake8.pycqa.org/en/3.1.1/user/ignoring-errors.html#in-line-ignoring-errors .. _inline_ignoring_ranges: Ignoring line ranges ^^^^^^^^^^^^^^^^^^^^ Similar to `pylint's "pylint" directive"`_, ranges of lines can be ignored by adding :code:`-- noqa:disable=[,...] | all` to the line. Following this directive, specified rules (or all rules, if "all" was specified) will be ignored until a corresponding `-- noqa:enable=[,...] | all` directive. .. code-block:: sql -- Ignore rule AL02 from this line forward SELECT col_a a FROM foo -- noqa: disable=AL02 -- Ignore all rules from this line forward SELECT col_a a FROM foo -- noqa: disable=all -- Enforce all rules from this line forward SELECT col_a a FROM foo -- noqa: enable=all .. _`pylint's "pylint" directive"`: http://pylint.pycqa.org/en/latest/user_guide/message-control.html .. _sqlfluffignore: :code:`.sqlfluffignore` ^^^^^^^^^^^^^^^^^^^^^^^ Similar to `Git's`_ :code:`.gitignore` and `Docker's`_ :code:`.dockerignore`, SQLFluff supports a :ref:`sqlfluffignore` file to control which files are and aren't linted. Under the hood we use the python `pathspec library`_ which also has a brief tutorial in their documentation. An example of a potential :ref:`sqlfluffignore` placed in the root of your project would be: .. code-block:: cfg # Comments start with a hash. # Ignore anything in the "temp" path /temp/ # Ignore anything called "testing.sql" testing.sql # Ignore any ".tsql" files *.tsql Ignore files can also be placed in subdirectories of a path which is being linted and the sub files will also be applied within that subdirectory. .. _`Git's`: https://git-scm.com/docs/gitignore#_pattern_format .. _`Docker's`: https://docs.docker.com/engine/reference/builder/#dockerignore-file .. _`pathspec library`: https://python-path-specification.readthedocs.io/ Ignoring types of errors ^^^^^^^^^^^^^^^^^^^^^^^^ General *categories* of errors can be ignored using the ``--ignore`` command line option or the ``ignore`` setting in :ref:`sqlfluffignore`. Types of errors that can be ignored include: * :code:`lexing` * :code:`linting` * :code:`parsing` * :code:`templating` sqlfluff-3.4.2/docs/source/configuration/index.rst000066400000000000000000000003101503426445100223000ustar00rootroot00000000000000.. _config: Configuration ============= .. toctree:: :maxdepth: 2 setting_configuration rule_configuration layout templating/index ignoring_configuration default_configuration sqlfluff-3.4.2/docs/source/configuration/layout.rst000066400000000000000000000735021503426445100225230ustar00rootroot00000000000000.. _layoutref: Layout & Whitespace Configuration ================================= If there is one part of building a linter that is going to be controversial it's going to be **whitespace** (closely followed by **cApiTaLiSaTiOn** 😁). More specifically, **whitespace** divides into three key themes: #. **Spacing**: The amount of whitespace between elements on the same line. #. **Line Breaks**: The choice of where within the code it is inappropriate, appropriate or even compulsory to have a line break. #. **Indentation**: Given a line break, how much whitespace should precede the first code element on that line. *SQLFluff* aims to be *opinionated* on this theme, but also *configurable* (see :ref:`layoutconfig`). The tool will have a default viewpoint and will aim to have views on all of the important aspects of SQL layout, but if you (or your organisation) don't like those views then we aim to allow enough configuration that you can lint in line with your views, and still use *SQLFluff*. For more information on how to configure rules to your own viewpoint see :ref:`config`. .. note:: This section of the docs handles the intent and reasoning behind how layout is handled by SQLFluff. For a deeper look at how this is achieved internally see :ref:`reflowinternals`. Spacing ------- Of the different elements of whitespace, spacing is likely the least controversial. By default, all elements are separated by a single space character. Except for very specific circumstances (see section on :ref:`alignedelements`), any additional space between elements is usually unwanted and a distraction for the reader. There are however several common cases where *no whitespace* is more appropriate, which fall into two cases (for more details on where to configure these see :ref:`layoutspacingconfig`). #. *No whitespace but a newline is allowed.* This option is configured using the :code:`touch` option in the :code:`spacing_*` configuration settings. The most common example of this is the spacing around commas. For example :code:`SELECT a , b` would be unusual and more normally be written :code:`SELECT a, b`. Inserting a newline between the :code:`a` and comma would not cause issues and may even be desired, for example: .. code-block:: sql SELECT col_a , col_b -- Newline present before column , col_c -- When inline, comma should still touch element before. , GREATEST(col_d, col_e) as col_f FROM tbl_a #. *No whitespace and a newline is not allowed.* This option is configured using the :code:`inline` option in the :code:`spacing_*` configuration settings. The most common example of this is spacing within the parts of qualified identifier e.g. :code:`my_schema.my_table`. If a newline were present between the :code:`.` and either :code:`my_schema` or :code:`my_table`, then the expression would not parse and so no newlines should be allowed. .. _alignedelements: Aligned elements ^^^^^^^^^^^^^^^^ A special case of spacing is where elements are set to be aligned within some limits. This is not enabled by default, but can be be configured to achieve layouts like: .. code-block:: sql SELECT a AS first_column, b AS second_column, (a + b) / 2 AS third_column FROM foo AS bar In this example, the alias expressions are all aligned with each other. To configure this, SQLFluff needs to know what elements to align and how far to search to find elements which should be aligned with each other. The configuration to achieve this layout is: .. code-block:: ini [sqlfluff:layout:type:alias_expression] # We want non-default spacing _before_ the alias expressions. spacing_before = align # We want to align them within the next outer select clause. # This means for example that alias expressions within the FROM # or JOIN clause would _not_ be aligned with them. align_within = select_clause # The point at which to stop searching outward for siblings, which # in this example would likely be the boundary of a CTE. Stopping # when we hit brackets is usually a good rule of thumb for this # configuration. align_scope = bracketed Of these configuration values, the :code:`align_scope` is potentially the least obvious. The following example illustrates the impact it has. .. code-block:: sql -- With -- align_scope = bracketed -- align_within = select_clause WITH foo as ( SELECT a, b, c AS first_column d + e AS second_column ) SELECT a AS first_column, (a + b) / 2 AS third_column FROM foo AS bar; -- With -- align_scope = bracketed -- align_within = statement WITH foo as ( SELECT a, b, c AS first_column d + e AS second_column ) SELECT a AS first_column, (a + b) / 2 AS third_column FROM foo AS bar -- Now the FROM alias is also aligned. -- With -- align_scope = file -- align_within = select_clause WITH foo as ( SELECT a, b, c AS first_column -- Now the aliases here are aligned d + e AS second_column -- with the outer query. ) SELECT a AS first_column, (a + b) / 2 AS third_column FROM foo AS bar -- With -- align_scope = file -- align_within = statement WITH foo as ( SELECT a, b, c AS first_column d + e AS second_column ) SELECT a AS first_column, (a + b) / 2 AS third_column FROM foo AS bar Line Breaks ----------- When controlling line breaks, we are trying to achieve a few different things: #. Do we have *enough* line breaks that *line length* doesn't become excessive. Long lines are hard to read, especially given that readers may be on varying screen sizes or have multiple windows open. This is (of course) configurable, but the default is 80 characters (in line with the `dbt Labs SQL style guide`_.) #. Is the positioning of *blank lines* (i.e. lines with nothing other than whitespace on them) appropriate. There are some circumstances where a blank line is *desired* (e.g. between CTEs). There are others where they are not, in particular *multiple blank lines*, for example at the beginning of a file. #. Where we do have line breaks, are they positioned appropriately and consistently with regards to other elements around them. This is most common when it comes to *commas*, and whether they should be *leading* (e.g. :code:`, my_column`) or *trailing* (e.g. :code:`my_column,`). In less common cases, it may also be desirable for some elements to have both a line break *before and after* (e.g. a set operator such as `UNION`). Indentation ----------- Lastly, given we have multiple lines of SQL, to what extent should we indent some lines to provide visual cues to the structure of that SQL. It's important to note that SQL is *not* whitespace sensitive in its interpretation and that means that any principles we apply here are entirely for the benefit of humans. *Your database doesn't care*. The indentation therefore should be treated as a *hint* to the reader of the structure of the code. This explains the common practice within most languages that nested elements (for example the contents of a set of brackets in a function call) should be indented one step from the outer elements. It's also convention that elements *with the same level* in a nested structure should have *the same indentation*, at least with regards to their local surroundings. As an example: .. code-block:: sql SELECT nested_within_select AS first_column, some_function( nested_within_function, also_nested_within_function ) AS indented_the_same_as_opening_bracket FROM indented_the_same_as_select Comment Indents ^^^^^^^^^^^^^^^ .. note:: The notes here about block comments are not implemented prior to 2.0.x. They should be coming in that release or soon after. **Comments** are dealt with differently, depending on whether they're *block* comments (:code:`/* like this */`), which might optionally include newlines, or *inline* comments (:code:`-- like this`) which are necessarily only on one line. * *Block comments* cannot share a line with any code elements (so in effect they must start on their own new line), they cannot be followed by any code elements on the same line (and so in effect must be followed by a newline, if we are to avoid trailing whitespace). None of the lines within the block comment may have an indent less than the first line of the block comment (although additional indentation within a comment is allowed), and that first line should be aligned with the first code element *following* the block comment. .. code-block:: sql SELECT /* This is a block comment starting on a new line which contains a newline (continuing with at least the same indent. - potentially containing greater indents - having no other code following it in the same line - and aligned with the line of code following it */ this_column as what_we_align_the_column_to FROM my_table * *Inline comments* can be on the same line as other code, but are subject to the same line-length restrictions. If they don't fit on the same line (or if it just looks nicer) they can also be the only element on a line. In this latter case, they should be aligned with the first code element *following* the comment. .. code-block:: sql SELECT -- This is fine this_column as what_we_align_to, another_column as something_short, -- Is ok case -- This is aligned correctly with below when indented then take_care else try_harder end as the_general_guidance -- Even here we align with the line below FROM my_table .. note:: When fixing issues with comment indentation, SQLFluff will attempt to keep comments in their original position but if line length concerns make this difficult, it will either abandon the fix, or move *same line* comments up and *before* the line they are currently on. This is in line with the assumption that comments on their own line refer to the elements of code which they come *before*, not *after*. .. _hangingindents: Hanging Indents ^^^^^^^^^^^^^^^ One approach to indenting nested elements is a layout called a *hanging indent*. In this layout, there is no line break before the first nested element, but subsequent elements are indented to match the line position of that first element. Two examples might be: .. code-block:: sql -- A select statement with two hanging indents: SELECT no_line_break_before_me, indented_to_match_the_first, 1 + (a + b) AS another_more_complex_example FROM my_table; -- This TSQL example is also in essence a hanging indent: DECLARE @prv_qtr_1st_dt DATETIME, @last_qtr INT, @last_qtr_first_mn INT, @last_qtr_yr INT; In some circumstances this layout can be quite neat (the :code:`DECLARE` statement is a good example of this), however once indents are nested or indentation styles are mixed it can rapidly become confusing (as partially shown in the first example). Additionally, unless the leading element of the first line is very short, hanging indents use much *larger indents* than a traditional simple indent where a line break is used before the first element. Hanging indents have been supported in SQLFluff up to the 1.x versions, however **they will no longer by supported from 2.0.0** onwards. This is due to the ambiguity which they bring to fixing poorly formatted SQL. Take the following code: .. code-block:: sql SELECT this_is, badly_formatted, code_and, not_obvious, what_was, intended FROM my_table Given the lack of line break between :code:`SELECT` and :code:`this_is`, it would appear that the user is intending a hanging indent, however it is also plausible that they did not and they just forgot to add a line break between them. This ambiguity is unhelpful, both for SQLFluff as a tool, but also for people who write SQL that there two ways of indenting their SQL. Given SQLFluff aims to provide consistency in SQL layout and remove some of the burden of needing to make choices like this - and that it would be very unusual to keep *only hanging indents and disable traditional ones* - the only route left to consistency is to **not allow hanging indents**. Starting in 2.0.0, any hanging indents detected will be converted to traditional indents. .. _implicitindents: Implicit Indents ^^^^^^^^^^^^^^^^ A close cousin of the hanging indent is the *implicit indent*. While it does look a little like a hanging indent, it's much more consistent in its behaviour and is supported from SQLFluff 2.0.0 onwards. An implicit indent is exactly like a normal indent, but doesn't have to be actually *taken* to influence the indentation of lines after it - it just needs to be left un-closed before the end of the line. These are normally available in clauses which take the form of :code:`KEYWORD `, like :code:`WHERE` clauses or :code:`CASE` expressions. .. code-block:: sql -- This WHERE clause here takes advantage of an implicit indent. SELECT * FROM my_table WHERE condition_a AND condition_b; -- With implicit indents disabled (which is currently the -- default), the above formulation is not allowed, and instead -- there should be a newline immediately after `WHERE` (which -- is the location of the _implicit_ indent). SELECT * FROM my_table WHERE condition_a AND condition_b; When addressing both indentation and line-length, implicit indents allow a slightly more compact layout, without significant drawbacks in legibility. They also enable a style much closer to some established style guides. They are however not recommended by many of the major style guides at time of writing (including the `dbt Labs SQL style guide`_ and the `Mozilla SQL style guide`_), and so are disabled by default. To enable them, set the :code:`allow_implicit_indents` flag in :code:`sqluff.indentation` to :code:`True`. .. _templatedindents: Templated Indents ^^^^^^^^^^^^^^^^^ SQLFluff supports templated elements in code, such as those offered by jinja2 (or dbt which relies on it). For simple cases, templated elements are handled as you would expect by introducing additional indents into the layout. .. code-block:: SQL+Jinja SELECT a, {% for n in ['b', 'c', 'd'] %} -- This section is indented relative to 'a' because -- it is inside a jinja for loop. {{ n }}, {% endfor %} e FROM my_table This functionality can be turned off if you wish using the :code:`template_blocks_indent` option in your :ref:`config`. It's important to note here, that SQLFluff lints the code after it has been rendered, and so only has access to code which is still present after that process. .. code-block:: SQL+Jinja SELECT a, {% if False %} -- This section of the code cannot be linted because -- it is never rendered due to the `if False` condition. my + poorly + spaced - and/indented AS section_of_code {% endif %} e FROM my_table More complex templated cases are usually characterised by templated tags *cutting across the parse tree*. This more formally is where the opening and closing tags of a templated section exist at different levels in the parsed structure. Starting in version 2.x, these will be treated differently (Prior to version 2.x, situations like this were sometimes handled inconsistently or incorrectly). Indentation should act as a visual cue to the structure of the written SQL, and as such, the most important thing is that template tags belonging to the same block structure use the same indentation. In the example below, this is the opening and closing elements of the second :code:`if` statement. If treated as a simple case, these tags would have different indents, because they are at different levels of the parse tree and so clearly there is a conflict to be resolved. The view SQLFluff takes on how to resolve this conflict is to pull all of the tags in this section down to the indent of the *least indented* (in the example below that would be the closing :code:`endif` tag). This is similar to the treatment of `C Preprocessor Directives`_, which are treated somewhat as being outside the structure of the rest of the file. In these cases, the content is also *not further indented* as in the simple case because it makes it harder to line up elements within the affected section and outside (in the example below the :code:`SELECT` and :code:`FROM` are a good illustration). .. code-block:: SQL+Jinja SELECT a, {% if True %} -- This is a simple case. The opening and closing tag are -- both at the same level within the SELECT clause. simple_case AS example, {% endif %} b, {% if True %} -- This is a complex case. The opening tag is within the SELECT -- clause, but the closing tag is outside the statement -- entirely. complex_case AS example FROM table_option_one {% else %} complex_case_two AS example FROM table_option_two {% endif %} .. _layoutconfig: Configuring Layout ------------------ Configuration for layout is spread across three places: #. Indent behavior for particular dialect elements is controlled by the parser. This is because in the background SQLFluff inserts :code:`Indent` and :code:`Dedent` tokens into the parse tree where those things are expected. For more detail see :ref:`layoutindentconfig`. #. Configuration for the spacing and line position of particular types of element (such as commas or operators) is set in the :code:`layout` section of the config file. For more detail see :ref:`layoutspacingconfig`. #. Some elements of layout are still controlled by rules directly. These are usually very specific cases, see :ref:`ruleref` for more details. .. _layoutindentconfig: Configuring indent locations ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ One of the key areas for this is the indentation of the :code:`JOIN` expression, which we'll use as an example. Semantically, a :code:`JOIN` expression is part of the :code:`FROM` expression and therefore would be expected to be indented. However according to many of the most common SQL style guides (including the `dbt Labs SQL style guide`_ and the `Mozilla SQL style guide`_) the :code:`JOIN` keyword is expected to at the same indent as the :code:`FROM` keyword. By default, *SQLFluff* sides with the current consensus, which is to *not* indent the :code:`JOIN` keyword, however this is one element which is configurable. By setting values in the :code:`sqlfluff:indentation` section of your config file you can control how this is parsed. For example, the default indentation would be as follows: .. code-block:: sql SELECT a, b FROM my_table JOIN another_table ON condition1 AND condition2 By setting your config file to: .. code-block:: cfg [sqlfluff:indentation] indented_joins = True Then the expected indentation will be: .. code-block:: sql SELECT a, b FROM my_table JOIN another_table ON condition1 AND condition2 There is a similar :code:`indented_using_on` config (defaulted to :code:`True`) which can be set to :code:`False` to prevent the :code:`USING` or :code:`ON` clause from being indented, in which case the original SQL would become: .. code-block:: sql SELECT a, b FROM my_table JOIN another_table ON condition1 AND condition2 It's worth noting at this point, that for some users, the additional line break after :code:`ON` is unexpected, and this is a good example of an :ref:`implicit indent `. By setting your config to: .. code-block:: cfg [sqlfluff:indentation] indented_using_on = False allow_implicit_indents = True Then the expected indentation will be: .. code-block:: sql SELECT a, b FROM my_table JOIN another_table ON condition1 AND condition2 There is also a similar :code:`indented_on_contents` config (defaulted to :code:`True`) which can be set to :code:`False` to align any :code:`AND` subsections of an :code:`ON` block with each other. If set to :code:`False` (assuming implicit indents are still enabled) the original SQL would become: .. code-block:: sql SELECT a, b FROM my_table JOIN another_table ON condition1 AND condition2 These can also be combined, so if :code:`indented_using_on` config is set to :code:`False`, :code:`indented_on_contents` is also set to :code:`False`, and :code:`allow_implicit_indents` is set tot :code:`True` then the SQL would become: .. code-block:: sql SELECT a, b FROM my_table JOIN another_table ON condition1 AND condition2 There is also a similar :code:`indented_ctes` config (defaulted to :code:`False`) which can be set to :code:`True` to enforce CTEs to be indented within the :code:`WITH` clause: .. code-block:: sql WITH some_cte AS ( SELECT 1 FROM table1 ), some_other_cte AS ( SELECT 1 FROM table1 ) SELECT 1 FROM some_cte There is also a similar :code:`indented_then` config (defaulted to :code:`True`) which can be set to :code:`False` to allow :code:`THEN` without an indent after :code:`WHEN`: .. code-block:: sql SELECT a, CASE WHEN b >= 42 THEN 1 ELSE 0 END AS c FROM some_table By default, *SQLFluff* aims to follow the most common approach to indentation. However, if you have other versions of indentation which are supported by published style guides, then please submit an issue on GitHub to have that variation supported by *SQLFluff*. .. _layoutspacingconfig: Configuring layout and spacing ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The :code:`[sqlfluff:layout]` section of the config controls the treatment of spacing and line breaks across all rules. The syntax of this section is very expressive; however in normal use, only very small alterations should be necessary from the :ref:`defaultconfig`. The syntax of the section headings here select by *type*, which corresponds to the :code:`type` defined in the dialect. For example the following section applies to elements of the *type* :code:`comma`, i.e. :code:`,`. .. code-block:: cfg [sqlfluff:layout:type:comma] spacing_before = touch line_position = trailing Within these configurable sections there are a few key elements which are available: * **Spacing Elements**: :code:`spacing_before`, :code:`spacing_after` and :code:`spacing_within`. For each of these options, there are a few possible settings: * The default spacing for all elements is :code:`single` unless otherwise specified. In this state, elements will be spaced with a single space character unless there is a line break between them. * The value of :code:`touch` allows line breaks, but if no line break is present, then no space should be present. A great example of this is the spacing before commas (as shown in the config above), where line breaks may be allowed, but if not they should *touch* the element before. * Both of the above can be qualified with the :code:`:inline` modifier - which prevents newlines within the segment. This is best illustrated by the spacing found in a qualified identifier like :code:`my_schema.my_table` which uses `touch:inline` or other clauses where we want to force some elements to be on the same line. * **Line Position**: set using the :code:`line_position` option. By default this is unset, which implies no particular line position requirements. The available options are: * :code:`trailing` and :code:`leading`, which are most common in the placement of commas. Both of these settings *also* allow the option of a comma on its own on a line, or in the middle of a line, *but* if there is a line break on *either side* then they make sure it's on the *correct side*. By default we assume *trailing* commas, but if you (or your organisation) have settled on *leading* commas then you should add the following section to your config: .. code-block:: cfg [sqlfluff:layout:type:comma] line_position = leading * :code:`alone`, which means if there is a line break on either side, then there must be a line break on *both sides* (i.e. that it should be the only thing on that line. * All of the above options can be qualified with the :code:`:strict` modifier - which prevents the *inline* case. For example: .. code-block:: sql -- Setting line_position to just `alone` -- within [sqlfluff:layout:type:set_operator] -- would not allow: SELECT a UNION SELECT b; -- ...or... SELECT a UNION SELECT b; -- but *would* allow both of the following: SELECT a UNION SELECT b; SELECT a UNION SELECT b; -- However the default is set to `alone:strict` -- then the *only* acceptable configuration is: SELECT a UNION SELECT b; * **Keyword Line Position**: set using the :code:`keyword_line_position` option. By default for most clauses this is unset, which implies no particular keyword line position requirements. The available options are: * :code:`leading` and :code:`alone`, which are most common in the placement of keywords. Both of these settings *also* allow the option of a keyword to end on a line. By default we assume *leading* :code:`WHERE` keywords, but if you (or your organisation) have settled on *alone* :code:`WHERE` keywords then you should add the following section to your config: .. code-block:: cfg [sqlfluff:layout:type:where_clause] keyword_line_position = alone * :code:`trailing`, which means there should be a line break after the keyword. This is fairly uncommon but may apply to the :code:`ON` keyword after a join. If you (or your organisation) have settled on *trailing* :code:`ON` keywords then you should add the following section to your config: .. code-block:: cfg [sqlfluff:layout:type:join_on_condition] keyword_line_position = trailing * The keyword positioning is valid across a number of different clauses. For example, to apply the :code:`leading` directive to the :code:`PARTITION BY` clause you would add the following configuration: .. code-block:: cfg [sqlfluff:layout:type:partitionby_clause] keyword_line_position = leading * If you (or your organisation) would prefer to unset the :ref:`defaultconfig` of some options, you may clear it by setting the configuration to :code:`none`. .. code-block:: cfg [sqlfluff:layout:type:where_clause] keyword_line_position = none [sqlfluff:layout:type:orderby_clause] keyword_line_position = none [sqlfluff:layout:type:groupby_clause] keyword_line_position = none [sqlfluff:layout:type:having_clause] keyword_line_position = none * **Exclusions**: The :code:`keyword_line_position_exclusions` option allows you to exclude specific types of segments from the :code:`keyword_line_position` rule. This is useful when certain segments, such as window specifications or aggregate functions, should not follow the same keyword line position rules as other segments in the same clause. For example, to exclude window specifications from the :code:`ORDER BY` clause's keyword line position rule, you can configure it as follows: .. code-block:: cfg [sqlfluff:layout:type:orderby_clause] keyword_line_position = leading keyword_line_position_exclusions = window_specification This configuration ensures that the `ORDER BY` clause follows the `leading` rule, except for window specifications, which are allowed to remain inline. You can also specify multiple exclusions by separating them with commas: .. code-block:: cfg [sqlfluff:layout:type:orderby_clause] keyword_line_position = leading keyword_line_position_exclusions = window_specification, aggregate_order_by In this case, both window specifications and aggregate functions with `ORDER BY` clauses are excluded from the `leading` rule. **Example Usage**: With the above configuration, the following SQL would pass: .. code-block:: sql SELECT a, b, ROW_NUMBER() OVER (PARTITION BY c ORDER BY d) AS e, STRING_AGG(a ORDER BY b, c) FROM f JOIN g ON g.h = f.h However, the following SQL would fail because the outer `ORDER BY` clause does not follow the `leading` rule: .. code-block:: sql SELECT a, b, ROW_NUMBER() OVER (PARTITION BY c ORDER BY d) AS e, STRING_AGG(a ORDER BY b, c) FROM f JOIN g ON g.h = f.h ORDER BY a The corrected version would be: .. code-block:: sql SELECT a, b, ROW_NUMBER() OVER (PARTITION BY c ORDER BY d) AS e, STRING_AGG(a ORDER BY b, c) FROM f JOIN g ON g.h = f.h ORDER BY a .. _`C Preprocessor Directives`: https://www.cprogramming.com/reference/preprocessor/ .. _`dbt Labs SQL style guide`: https://github.com/dbt-labs/corp/blob/main/dbt_style_guide.md .. _`Mozilla SQL style guide`: https://docs.telemetry.mozilla.org/concepts/sql_style.html#joins sqlfluff-3.4.2/docs/source/configuration/rule_configuration.rst000066400000000000000000000142031503426445100250750ustar00rootroot00000000000000.. _ruleconfig: Rule Configuration ------------------ Rules can be configured with the :code:`.sqlfluff` config files. Common rule configurations can be set in the :code:`[sqlfluff:rules]` section. For example: .. code-block:: cfg [sqlfluff:rules] allow_scalar = True single_table_references = consistent unquoted_identifiers_policy = all Rule specific configurations are set in rule specific subsections. For example, enforce that keywords are upper case by configuring the rule :sqlfluff:ref:`CP01`: .. code-block:: cfg [sqlfluff:rules:capitalisation.keywords] # Keywords capitalisation_policy = upper All possible options for rule sections are documented in :ref:`ruleref`. For an overview of the most common rule configurations that you may want to tweak, see :ref:`defaultconfig` (and use :ref:`ruleref` to find the available alternatives). .. _ruleselection: Enabling and Disabling Rules ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The decision as to which rules are applied to a given file is applied on a file by file basis, by the effective configuration for that file. There are two configuration values which you can use to set this: * :code:`rules`, which explicitly *enables* the specified rules. If this parameter is unset or empty for a file, this implies "no selection" and so "all rules" is taken to be the meaning. * :code:`exclude_rules`, which explicitly *disables* the specified rules. This parameter is applied *after* the :code:`rules` parameter so can be used to *subtract* from the otherwise enabled set. Each of these two configuration values accept a comma separated list of *references*. Each of those references can be: * a rule *code* e.g. :code:`LN01` * a rule *name* e.g. :code:`layout.indent` * a rule *alias*, which is often a deprecated *code* e.g. :code:`L003` * a rule *group* e.g. :code:`layout` or :code:`capitalisation` These different references can be mixed within a given expression, which results in a very powerful syntax for selecting exactly which rules are active for a given file. .. note:: It's worth mentioning here that the application of :code:`rules` and :code:`exclude_rules`, with *groups*, *aliases* and *names*, in projects with potentially multiple nested configuration files defining different rules for different areas of a project can get very confusing very fast. While this flexibility is intended for users to take advantage of, we do have some recommendations about how to do this is a way that remains manageable. When considering configuration inheritance, each of :code:`rules` and :code:`exclude_rules` will totally overwrite any values in parent config files if they are set in a child file. While the subtraction operation between both of them is calculated *"per file"*, there is no combination operation between two definitions of :code:`rules` (just one overwrites the other). The effect of this is that we recommend one of two approaches: #. Simply only use :code:`rules`. This has the upshot of each area of your project being very explicit in which rules are enabled. When that changes for part of your project you just reset the whole list of applicable rules for that part of the project. #. Set a single :code:`rules` value in your master project config file and then only use :code:`exclude_rules` in sub-configuration files to *turn off* specific rules for parts of the project where those rules are inappropriate. This keeps the simplicity of only having one value which is inherited, but allows slightly easier and simpler rollout of new rules because we manage by exception. For example, to disable the rules :sqlfluff:ref:`LT08` and :sqlfluff:ref:`RF02`: .. code-block:: cfg [sqlfluff] exclude_rules = LT08, RF02 To enable individual rules, configure :code:`rules`, respectively. For example, to enable :sqlfluff:ref:`RF02`: .. code-block:: cfg [sqlfluff] rules = RF02 Rules can also be enabled/disabled by their grouping. Right now, the only rule grouping is :code:`core`. This will enable (or disable) a select group of rules that have been deemed 'core rules'. .. code-block:: cfg [sqlfluff] rules = core More information about 'core rules' can be found in the :ref:`ruleref`. Additionally, some rules have a special :code:`force_enable` configuration option, which allows to enable the given rule even for dialects where it is disabled by default. The rules that support this can be found in the :ref:`ruleref`. The default values can be seen in :ref:`defaultconfig`. See :ref:`ignoreconfig` for more information on how to turn ignore particular rules for specific lines, sections or files. Downgrading rules to warnings ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ To keep displaying violations for specific rules, but not have those issues lead to a failed run, rules can be downgraded to *warnings*. Rules set as *warnings* won't cause a file to fail, but will still be shown in the CLI to warn users of their presence. The configuration of this behaves very like :code:`exclude_rules` above: .. code-block:: cfg [sqlfluff] warnings = LT01, LT04 With this configuration, files with no other issues (other than those set to warn) will pass. If there are still other issues, then the file will still fail, but will show both warnings and failures. .. code-block:: == [test.sql] PASS L: 2 | P: 9 | LT01 | WARNING: Missing whitespace before + == [test2.sql] FAIL L: 2 | P: 8 | CP02 | Unquoted identifiers must be consistently upper case. L: 2 | P: 11 | LT01 | WARNING: Missing whitespace before + This is particularly useful as a transitional tool when considering the introduction on new rules on a project where you might want to make users aware of issues without blocking their workflow (yet). You can use either rule code or rule name for this setting. Layout & Spacing Configuration ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The :code:`[sqlfluff:layout]` section of the config controls the treatment of spacing and line breaks across all rules. To understand more about this section, see the section of the docs dedicated to layout: :ref:`layoutconfig`. sqlfluff-3.4.2/docs/source/configuration/setting_configuration.rst000066400000000000000000000154541503426445100256140ustar00rootroot00000000000000.. _setting_config: Setting Configuration ===================== SQLFluff accepts configuration either through the command line or through configuration files. There is *rough* parity between the two approaches with the exception that *templating* configuration must be done via a file, because it otherwise gets slightly complicated. For details of what's available on the command line check out the :ref:`cliref`. .. _`config-files`: Configuration Files ------------------- For file based configuration *SQLFluff* will look for the following files in order. Later files will (if found) will be used to overwrite any values read from earlier files. - :code:`setup.cfg` - :code:`tox.ini` - :code:`pep8.ini` - :code:`.sqlfluff` - :code:`pyproject.toml` Within these files, the first four will be read like a `cfg file`_, and *SQLFluff* will look for sections which start with :code:`sqlfluff`, and where subsections are delimited by a semicolon. For example the *jinjacontext* section will be indicated in the section started with :code:`[sqlfluff:jinjacontext]`. For example, a snippet from a :code:`.sqlfluff` file (as well as any of the supported cfg file types): .. code-block:: cfg [sqlfluff] templater = jinja sql_file_exts = .sql,.sql.j2,.dml,.ddl [sqlfluff:indentation] indented_joins = False indented_using_on = True template_blocks_indent = False [sqlfluff:templater] unwrap_wrapped_queries = True [sqlfluff:templater:jinja] apply_dbt_builtins = True For the `pyproject.toml file`_, all valid sections start with :code:`tool.sqlfluff` and subsections are delimited by a dot. For example the *jinjacontext* section will be indicated in the section started with :code:`[tool.sqlfluff.jinjacontext]`. For example, a snippet from a :code:`pyproject.toml` file: .. code-block:: toml [tool.sqlfluff.core] templater = "jinja" sql_file_exts = ".sql,.sql.j2,.dml,.ddl" [tool.sqlfluff.indentation] indented_joins = false indented_using_on = true template_blocks_indent = false [tool.sqlfluff.templater] unwrap_wrapped_queries = true [tool.sqlfluff.templater.jinja] apply_dbt_builtins = true # For rule specific configuration, use dots between the names exactly # as you would in .sqlfluff. In the background, SQLFluff will unpack the # configuration paths accordingly. [tool.sqlfluff.rules.capitalisation.keywords] capitalisation_policy = "upper" .. _`cfg file`: https://docs.python.org/3/library/configparser.html .. _`pyproject.toml file`: https://www.python.org/dev/peps/pep-0518/ .. _starter_config: New Project Configuration ^^^^^^^^^^^^^^^^^^^^^^^^^ When setting up a new project with SQLFluff, we recommend keeping your configuration file fairly minimal. The config file should act as a form of *documentation* for your team i.e. a record of what decisions you've made which govern how your format your SQL. By having a more concise config file, and only defining config settings where they differ from the defaults - you are more clearly stating to your team what choices you've made. *However*, there are also a few places where the *default* configuration is designed more for *existing projects*, rather than *fresh projects*, and so there is an opportunity to be a little stricter than you might otherwise be with an existing codebase. Here is a simple configuration file which would be suitable for a starter project: .. literalinclude:: /_partials/starter_config.cfg :language: cfg .. _nesting: Nesting ^^^^^^^ **SQLFluff** uses **nesting** in its configuration files, with files closer *overriding* (or *patching*, if you will) values from other files. That means you'll end up with a final config which will be a patchwork of all the values from the config files loaded up to that path. The exception to this is the value for `templater`, which cannot be set in config files in subdirectories of the working directory. You don't **need** any config files to be present to make *SQLFluff* work. If you do want to override any values though SQLFluff will use files in the following locations in order, with values from later steps overriding those from earlier: 0. *[...and this one doesn't really count]* There's a default config as part of the SQLFluff package. You can find this below, in the :ref:`defaultconfig` section. 1. It will look in the user's os-specific app config directory. On macOS and Unix this is `~/.config/sqlfluff`, Windows is `\\AppData\\Local\\sqlfluff\\sqlfluff`, for any of the filenames above in the main :ref:`setting_config` section. If multiple are present, they will *patch*/*override* each other in the order above. 2. It will look for the same files in the user's home directory (~). 3. *[if the current working directory is a subdirectory of the user's home directory (~)]* It will look for the same files in all directories between the user's home directory (~), and the current working directory. 4. It will look for the same files in the current working directory. 5. *[if parsing a file in a subdirectory of the current working directory]* It will look for the same files in every subdirectory between the current working dir and the file directory. 6. It will look for the same files in the directory containing the file being linted. This whole structure leads to efficient configuration, in particular in projects which utilise a lot of complicated templating. .. _in_file_config: In-File Configuration Directives -------------------------------- In addition to configuration files mentioned above, SQLFluff also supports comment based configuration switching in files. This allows specific SQL file to modify a default configuration if they have specific needs. When used, these apply to the whole file, and are parsed from the file in an initial step before the rest of the file is properly parsed. This means they can be used for both rule configuration and also for parsing configuration. To use these, the syntax must start as an *inline sql comment* beginning with :code:`sqlfluff` (i.e. :code:`-- sqlfluff`). The line is then interpreted as a colon-separated address of the configuration value you wish to set. A few common examples are shown below: .. code-block:: sql -- Set Indented Joins -- sqlfluff:indentation:indented_joins:True -- Set a smaller indent for this file -- sqlfluff:indentation:tab_space_size:2 -- Set keywords to be capitalised -- sqlfluff:rules:capitalisation.keywords:capitalisation_policy:upper SELECT * FROM a JOIN b USING(c) We recommend only using this configuration approach for configuration that applies to one file in isolation. For configuration changes for areas of a project or for whole projects we recommend :ref:`nesting` of configuration files. This syntax is very similar to the method for :ref:`inline_ignoring_errors`. sqlfluff-3.4.2/docs/source/configuration/templating/000077500000000000000000000000001503426445100226115ustar00rootroot00000000000000sqlfluff-3.4.2/docs/source/configuration/templating/dbt.rst000066400000000000000000000133421503426445100241170ustar00rootroot00000000000000.. _dbt_templater: :code:`dbt` templater ^^^^^^^^^^^^^^^^^^^^^ .. note:: From sqlfluff version 0.7.0 onwards, the dbt templater has been moved to a separate plugin and python package. Projects that were already using the dbt templater may initially fail after an upgrade to 0.7.0+. See the installation instructions below to install the dbt templater. dbt templating is still a relatively new feature added in 0.4.0 and is still in very active development! If you encounter an issue, please let us know in a GitHub issue or on the SQLFluff slack workspace. :code:`dbt` is not the default templater for *SQLFluff* (it is :code:`jinja`). :code:`dbt` is a complex tool, so using the default :code:`jinja` templater will be simpler. You should be aware when using the :code:`dbt` templater that you will be exposed to some of the complexity of :code:`dbt`. Users may wish to try both templaters and choose according to how they intend to use *SQLFluff*. A simple rule of thumb might be: - If you are using *SQLFluff* in a CI/CD context, where speed is not critical but accuracy in rendering sql is, then the `dbt` templater may be more appropriate. - If you are using *SQLFluff* in an IDE or on a git hook, where speed of response may be more important, then the `jinja` templater may be more appropriate. Pros: * Most (potentially all) macros will work Cons: * More complex, e.g. using it successfully may require deeper understanding of your models and/or macros (including third-party macros) * More configuration decisions to make * Best practices are not yet established or documented * If your :code:`dbt` model files access a database at compile time, using SQLFluff with the :code:`dbt` templater will **also** require access to a database. * Note that you can often point SQLFluff and the :code:`dbt` templater at a test database (i.e. it doesn't have to be the production database). * Runs slower Installation & Configuration """""""""""""""""""""""""""" In order to get started using *SQLFluff* with a dbt project you will first need to install the relevant `dbt adapter`_ for your dialect and the :code:`sqlfluff-templater-dbt` package using your package manager of choice (e.g. :code:`pip install dbt-postgres sqlfluff-templater-dbt`) and then will need the following configuration: .. _`dbt adapter`: https://docs.getdbt.com/docs/available-adapters In *.sqlfluff*: .. code-block:: cfg [sqlfluff] templater = dbt In *.sqlfluffignore*: .. code-block:: text target/ # dbt <1.0.0 dbt_modules/ # dbt >=1.0.0 dbt_packages/ macros/ You can set the dbt project directory, profiles directory and profile with: .. code-block:: cfg [sqlfluff:templater:dbt] project_dir = profiles_dir = profile = target = dbt_skip_compilation_error = .. note:: If the `profiles_dir` setting is omitted, SQLFluff will look for the profile in the default location, which varies by operating system. On Unix-like operating systems (e.g. Linux or macOS), the default profile directory is `~/.dbt/`. On Windows, you can determine your default profile directory by running `dbt debug --config-dir`. .. note:: A fatal error can be raised at compile time. That can sometimes happen for SQLFluff related reasons (it used to happen if we tried to compile ephemeral models in the wrong order), but more often because a macro tries to query a table at compile time which doesn't exist. By default, `dbt_skip_compilation_error` parameter is set to `True`, that's why such errors will be ignored. However if you want to see them, you can set it to `False` and SQLFluff will raise a fatal error. To use builtin dbt Jinja functions SQLFluff provides a configuration option that enables usage within templates. .. code-block:: cfg [sqlfluff:templater:jinja] apply_dbt_builtins = True This will provide dbt macros like `ref`, `var`, `is_incremental()`. If the need arises builtin dbt macros can be customised via Jinja macros in `.sqlfluff` configuration file. .. code-block:: cfg [sqlfluff:templater:jinja:macros] # Macros provided as builtins for dbt projects dbt_ref = {% macro ref(model_ref) %}{{model_ref}}{% endmacro %} dbt_source = {% macro source(source_name, table) %}{{source_name}}_{{table}}{% endmacro %} dbt_config = {% macro config() %}{% for k in kwargs %}{% endfor %}{% endmacro %} dbt_var = {% macro var(variable, default='') %}item{% endmacro %} dbt_is_incremental = {% macro is_incremental() %}True{% endmacro %} If your project requires that you pass variables to dbt through command line, you can specify them in `template:dbt:context` section of `.sqlfluff`. See below configuration and its equivalent dbt command: .. code-block:: cfg [sqlfluff:templater:dbt:context] my_variable = 1 .. code-block:: text dbt run --vars '{"my_variable": 1}' Known Caveats """"""""""""" - To use the dbt templater, you must set `templater = dbt` in the `.sqlfluff` config file in the directory where sqlfluff is run. The templater cannot be changed in `.sqlfluff` files in subdirectories. - In SQLFluff 0.4.0 using the dbt templater requires that all files within the root and child directories of the dbt project must be part of the project. If there are deployment scripts which refer to SQL files not part of the project for instance, this will result in an error. You can overcome this by adding any non-dbt project SQL files to .sqlfluffignore. sqlfluff-3.4.2/docs/source/configuration/templating/index.rst000066400000000000000000000065771503426445100244710ustar00rootroot00000000000000.. _templateconfig: Templating Configuration ------------------------ This section explains how to configure templating for SQL files. When writing SQL files, users might utilise some kind of templating. The SQL file itself is written with placeholders which get rendered to proper SQL at run time. This can range from very simple placeholder templating to complex Jinja templating. SQLFluff supports templated sections in SQL, see :ref:`templater`. This is achieved by the following set of operations: 1. SQLFluff pre-renders the templated SQL 2. SQLFluff applies the lint and fix operations to the rendered file 3. SQLFluff backports the rule violations to the templated section of the SQL. SQLFluff does not automatically have access to the same environment used in production template setup. This means it is necessary to either provide that environment or provide dummy values to effectively render the template and generate valid SQL. Refer to the templater sections below for details. SQLFluff natively supports the following templating engines - :ref:`jinja_templater` - :ref:`placeholder_templater` - :ref:`python_templater` Also, SQLFluff has an integration to use :code:`dbt` as a templater. - :ref:`dbt_templater` (via plugin which is covered in a different section). .. note:: Templaters may not be able to generate a rendered SQL that cover the entire raw file. For example, if the raw SQL uses a :code:`{% if condition %}` block, the rendered version of the template will only include either the :code:`{% then %}` or the :code:`{% else %}` block (depending on the provided configuration for the templater), but not both. In this case, because SQLFluff linting can only operate on the output of the templater, some areas of the raw SQL will never be seen by the linter and will not be covered by lint rules. This is functionality we hope to support in future. .. toctree:: :maxdepth: 2 :caption: Templater Specific Configuration: jinja placeholder python dbt .. _generic_variable_templating: Generic Variable Templating ^^^^^^^^^^^^^^^^^^^^^^^^^^^ Variables are available in all the templaters. By default the templating engine will expect variables for templating to be available in the config, and the templater will be look in the section corresponding to the context for that templater. By convention, the config for the ``jinja`` templater is found in the ``sqlfluff:templater:jinja:context`` section, the config for the ``python`` templater is found in the ``sqlfluff:templater:python:context`` section, the one for the ``placeholder`` templater is found in the ``sqlfluff:templater:placeholder:context`` section. For example, if passed the following *.sql* file: .. code-block:: SQL+Jinja SELECT {{ num_things }} FROM {{ tbl_name }} WHERE id > 10 LIMIT 5 ...and the following configuration in *.sqlfluff* in the same directory: .. code-block:: cfg [sqlfluff:templater:jinja:context] num_things=456 tbl_name=my_table ...then before parsing, the sql will be transformed to: .. code-block:: sql SELECT 456 FROM my_table WHERE id > 10 LIMIT 5 .. note:: If there are variables in the template which cannot be found in the current configuration context, then this will raise a `SQLTemplatingError` and this will appear as a violation without a line number, quoting the name of the variable that couldn't be found. sqlfluff-3.4.2/docs/source/configuration/templating/jinja.rst000066400000000000000000000325431503426445100244450ustar00rootroot00000000000000.. _jinja_templater: Jinja templater ^^^^^^^^^^^^^^^ The Jinja templater uses Jinja2_ to render templates. .. _Jinja2: https://jinja.palletsprojects.com/ There are multiple, complementary ways of configuring the Jinja templater. - Reading variables and Jinja macros directly from the SQLFLuff config file - Loading macros from a path - Using a library .. list-table:: Overview of Jinja templater's configuration options :header-rows: 1 * - Configuration - Variables - Macros - Filters - Documentation * - Config file - ✅ - ✅ - ❌ - `Complex Jinja Variable Templating`_ and `Jinja Macro Templating (from config)`_ * - Macro Path - ❌ - ✅ - ❌ - `Jinja Macro Templating (from file)`_ * - Library - ✅ - ✅ - ✅ - `Library Templating`_ For example, a snippet from a :code:`.sqlfluff` file that uses all config options: .. code-block:: cfg [sqlfluff] templater = jinja [sqlfluff:templater:jinja] apply_dbt_builtins = True load_macros_from_path = my_macros loader_search_path = included_templates library_path = sqlfluff_libs exclude_macros_from_path = my_macros_exclude [sqlfluff:templater:jinja:context] my_list = ['a', 'b', 'c'] MY_LIST = ("d", "e", "f") my_where_dict = {"field_1": 1, "field_2": 2} [sqlfluff:templater:jinja:macros] a_macro_def = {% macro my_macro(n) %}{{ n }} + {{ n * 2 }}{% endmacro %} Complex Jinja Variable Templating """"""""""""""""""""""""""""""""" Apart from the Generic variable templating that is supported for all templaters, two more advanced features of variable templating are available for Jinja. *case sensitivity* and *native python types*. Both are illustrated in the following example: .. code-block:: cfg [sqlfluff:templater:jinja:context] my_list = ['a', 'b', 'c'] MY_LIST = ("d", "e", "f") my_where_dict = {"field_1": 1, "field_2": 2} .. code-block:: SQL+Jinja SELECT {% for elem in MY_LIST %} '{{elem}}' {% if not loop.last %}||{% endif %} {% endfor %} as concatenated_list FROM tbl WHERE {% for field, value in my_where_dict.items() %} {{field}} = {{value}} {% if not loop.last %}and{% endif %} {% endfor %} ...will render as... .. code-block:: sql SELECT 'd' || 'e' || 'f' as concatenated_list FROM tbl WHERE field_1 = 1 and field_2 = 2 Note that the variable was replaced in a case sensitive way and that the settings in the config file were interpreted as native python types. Jinja Macro Templating (from config) """""""""""""""""""""""""""""""""""" Macros (which also look and feel like *functions* are available only in the *jinja* templater. Similar to :ref:`generic_variable_templating`, these are specified in config files, what's different in this case is how they are named. Similar to the *context* section above, macros are configured separately in the *macros* section of the config. Consider the following example. If passed the following *.sql* file: .. code-block:: SQL+Jinja SELECT {{ my_macro(6) }} FROM some_table ...and the following configuration in *.sqlfluff* in the same directory (note the tight control of whitespace): .. code-block:: cfg [sqlfluff:templater:jinja:macros] a_macro_def = {% macro my_macro(n) %}{{ n }} + {{ n * 2 }}{% endmacro %} ...then before parsing, the sql will be transformed to: .. code-block:: sql SELECT 6 + 12 FROM some_table Note that in the code block above, the variable name in the config is *a_macro_def*, and this isn't apparently otherwise used anywhere else. Broadly this is accurate, however within the configuration loader this will still be used to overwrite previous *values* in other config files. As such this introduces the idea of config *blocks* which could be selectively overwritten by other configuration files downstream as required. Jinja Macro Templating (from file) """""""""""""""""""""""""""""""""" In addition to macros specified in the config file, macros can also be loaded from files or folders. This is specified in the config file: .. code-block:: cfg [sqlfluff:templater:jinja] load_macros_from_path = my_macros,other_macros ``load_macros_from_path`` is a comma-separated list of :code:`.sql` files or folders. Locations are *relative to the config file*. For example, if the config file above was found at :code:`/home/my_project/.sqlfluff`, then SQLFluff will look for macros in the folders :code:`/home/my_project/my_macros/` and :code:`/home/my_project/other_macros/`, including any of their subfolders. Any macros defined in the config will always take precedence over a macro defined in the path. ``exclude_macros_from_path`` works in the same manner as ``load_macros_from_path`` but allows you to have sqlfluff ignore certain macros. This can be useful if you have custom jinja tags. Macros loaded from these files are available in every :code:`.sql` file without requiring a Jinja :code:`include` or :code:`import`. They are loaded into the `Jinja Global Namespace `_. **Note:** The :code:`load_macros_from_path` setting also defines the search path for Jinja `include `_ or `import `_. As with loaded macros, subdirectories are also supported. For example, if :code:`load_macros_from_path` is set to :code:`my_macros`, and there is a file :code:`my_macros/subdir/my_file.sql`, you can do: .. code-block:: jinja {% include 'subdir/my_file.sql' %} If you would like to define the Jinja search path without also loading the macros into the global namespace, use the :code:`loader_search_path` setting instead. .. note:: Throughout the templating process **whitespace** will still be treated rigorously, and this includes **newlines**. In particular you may choose to provide *dummy* macros in your configuration different from the actual macros used in production. **REMEMBER:** The reason SQLFluff supports macros is to *enable* it to parse templated sql without it being a blocker. It shouldn't be a requirement that the *templating* is accurate - it only needs to work well enough that *parsing* and *linting* are helpful. .. _builtin_jinja_blocks: Builtin Jinja Macro Blocks """""""""""""""""""""""""" One of the main use cases which inspired *SQLFluff* as a project was `dbt`_. It uses jinja templating extensively and leads to some users maintaining large repositories of sql files which could potentially benefit from some linting. .. note:: *SQLFluff* has now a tighter integration with dbt through the "dbt" templater. It is the recommended templater for dbt projects. If used, it eliminates the need for the overrides described in this section. To use the dbt templater, go to :ref:`dbt_templater`. *SQLFluff* anticipates this use case and provides some built in macro blocks in the :ref:`defaultconfig` which assist in getting started with `dbt`_ projects. In particular it provides mock objects for: * *ref*: The mock version of this provided simply returns the model reference as the name of the table. In most cases this is sufficient. * *config*: A regularly used macro in `dbt`_ to set configuration values. For linting purposes, this makes no difference and so the provided macro simply returns nothing. .. note:: If there are other builtin macros which would make your life easier, consider submitting the idea (or even better a pull request) on `github`_. .. _`dbt`: https://www.getdbt.com/ .. _`github`: https://www.github.com/sqlfluff/sqlfluff .. _jinja_library_templating: Library Templating """""""""""""""""" If using *SQLFluff* with jinja as your templater, you may have library function calls within your sql files that can not be templated via the normal macro templating mechanisms: .. code-block:: SQL+Jinja SELECT foo, bar FROM baz {{ dbt_utils.group_by(2) }} To template these libraries, you can use the `sqlfluff:jinja:library_path` config option: .. code-block:: cfg [sqlfluff:templater:jinja] library_path = sqlfluff_libs This will pull in any python modules from that directory and allow sqlfluff to use them in templates. In the above example, you might define a file at `sqlfluff_libs/dbt_utils.py` as: .. code-block:: python def group_by(n): return "GROUP BY 1,2" If an `__init__.py` is detected, it will be loaded alongside any modules and submodules found within the library path. .. code-block:: SQL+Jinja SELECT {{ custom_sum('foo', 'bar') }}, {{ foo.bar.another_sum('foo', 'bar') }} FROM baz `sqlfluff_libs/__init__.py`: .. code-block:: python def custom_sum(a: str, b: str) -> str: return a + b `sqlfluff_libs/foo/__init__.py`: .. code-block:: python # empty file `sqlfluff_libs/foo/bar.py`: .. code-block:: python def another_sum(a: str, b: str) -> str: return a + b Additionally, the library can be used to expose `Jinja Filters `_ to the Jinja environment used by SQLFluff. This is achieve by setting a global variable named ``SQLFLUFF_JINJA_FILTERS``. ``SQLFLUFF_JINJA_FILTERS`` is a dictionary where * dictionary keys map to the Jinja filter name * dictionary values map to the Python callable For example, to make the Airflow filter ``ds`` available to SQLFLuff, add the following to the `__init__.py` of the library: .. code-block:: python # https://github.com/apache/airflow/blob/main/airflow/templates.py#L53 def ds_filter(value: datetime.date | datetime.time | None) -> str | None: """Date filter.""" if value is None: return None return value.strftime("%Y-%m-%d") SQLFLUFF_JINJA_FILTERS = {"ds": ds_filter} Now, ``ds`` can be used in SQL .. code-block:: SQL+Jinja SELECT "{{ "2000-01-01" | ds }}"; Jinja loader search path """""""""""""""""""""""" The Jinja environment can be configured to search for files included with `include `_ or `import `_ in a list of folders. This is specified in the config file: .. code-block:: cfg [sqlfluff:templater:jinja] loader_search_path = included_templates,other_templates ``loader_search_path`` is a comma-separated list of folders. Locations are *relative to the config file*. For example, if the config file above was found at :code:`/home/my_project/.sqlfluff`, then SQLFluff will look for included files in the folders :code:`/home/my_project/included_templates/` and :code:`/home/my_project/other_templates/`, including any of their subfolders. For example, this will read from :code:`/home/my_project/included_templates/my_template.sql`: .. code-block:: jinja {% include 'included_templates/my_template.sql' %} Any folders specified in the :code:`load_macros_from_path` setting are automatically appended to the ``loader_search_path``. It is not necessary to specify a given directory in both settings. Unlike the :code:`load_macros_from_path` setting, any macros within these folders are *not* automatically loaded into the global namespace. They must be explicitly imported using the `import `_ Jinja directive. If you would like macros to be automatically included in the global Jinja namespace, use the :code:`load_macros_from_path` setting instead. Interaction with ``--ignore=templating`` """""""""""""""""""""""""""""""""""""""" Ignoring Jinja templating errors provides a way for users to use SQLFluff while reducing or avoiding the need to spend a lot of time adding variables to ``[sqlfluff:templater:jinja:context]``. When ``--ignore=templating`` is enabled, the Jinja templater behaves a bit differently. This additional behavior is *usually* but not *always* helpful for making the file at least partially parsable and fixable. It definitely doesn’t **guarantee** that every file can be fixed, but it’s proven useful for some users. Here's how it works: * Within the expanded SQL, undefined variables are automatically *replaced* with the corresponding string value. * If you do: ``{% include query %}``, and the variable ``query`` is not defined, it returns a “file” containing the string ``query``. * If you do: ``{% include "query_file.sql" %}``, and that file does not exist or you haven’t configured a setting for ``load_macros_from_path`` or ``loader_search_path``, it returns a “file” containing the text ``query_file``. For example: .. code-block:: SQL+Jinja select {{ my_variable }} from {% include "my_table.sql" %} is interpreted as: .. code-block:: sql select my_variable from my_table The values provided by the Jinja templater act *a bit* (not exactly) like a mixture of several types: * ``str`` * ``int`` * ``list`` * Jinja's ``Undefined`` `class `_ Because the values behave like ``Undefined``, it's possible to replace them using Jinja's ``default()`` `filter `_. For example: .. code-block:: SQL+Jinja select {{ my_variable | default("col_a") }} from my_table is interpreted as: .. code-block:: sql select col_a from my_table sqlfluff-3.4.2/docs/source/configuration/templating/placeholder.rst000066400000000000000000000055321503426445100256320ustar00rootroot00000000000000.. _placeholder_templater: Placeholder templater ^^^^^^^^^^^^^^^^^^^^^ Libraries such as SQLAlchemy or Psycopg use different parameter placeholder styles to mark where a parameter has to be inserted in the query. For example a query in SQLAlchemy can look like this: .. code-block:: sql SELECT * FROM table WHERE id = :myid At runtime `:myid` will be replace by a value provided by the application and escaped as needed, but this is not standard SQL and cannot be parsed as is. In order to parse these queries is then necessary to replace these placeholders with sample values, and this is done with the placeholder templater. Placeholder templating can be enabled in the config using: .. code-block:: cfg [sqlfluff] templater = placeholder A few common styles are supported: .. code-block:: sql :force: -- colon WHERE bla = :my_name -- colon_nospaces -- (use with caution as more prone to false positives) WHERE bla = table:my_name -- colon_optional_quotes SELECT :"column" FROM :table WHERE bla = :'my_name' -- numeric_colon WHERE bla = :2 -- pyformat WHERE bla = %(my_name)s -- dollar WHERE bla = $my_name or WHERE bla = ${my_name} -- dollar_surround (DbUp compatible variable) WHERE bla = $my_name$ -- question_mark WHERE bla = ? -- numeric_dollar WHERE bla = $3 or WHERE bla = ${3} -- percent WHERE bla = %s -- ampersand WHERE bla = &s or WHERE bla = &{s} or USE DATABASE MARK_{ENV} These can be configured by setting `param_style` to the names above: .. code-block:: cfg [sqlfluff:templater:placeholder] param_style = colon my_name = 'john' then you can set sample values for each parameter, like `my_name` above. Notice that the value needs to be escaped as it will be replaced as a string during parsing. When the sample values aren't provided, the templater will use parameter names themselves by default. When parameters are positional, like `question_mark`, then their name is simply the order in which they appear, starting with `1`. .. code-block:: cfg [sqlfluff:templater:placeholder] param_style = question_mark 1 = 'john' In case you need a parameter style different from the ones above, you can pass a custom regex. .. code-block:: cfg [sqlfluff:templater:placeholder] param_regex = __(?P[\w_]+)__ my_name = 'john' N.B. quotes around `param_regex` in the config are interpreted literally by the templater. e.g. `param_regex='__(?P[\w_]+)__'` matches `'__some_param__'` not `__some_param__` the named parameter `param_name` will be used as the key to replace, if missing, the parameter is assumed to be positional and numbers are used instead. Also consider making a pull request to the project to have your style added, it may be useful to other people and simplify your configuration. sqlfluff-3.4.2/docs/source/configuration/templating/python.rst000066400000000000000000000027001503426445100246630ustar00rootroot00000000000000.. _python_templater: Python templater ^^^^^^^^^^^^^^^^ Uses native Python f-strings. As described in :ref:`generic_variable_templating`, an example usage would look be configured as follows: If passed the following *.sql* file: .. code-block:: SELECT * FROM {tbl_name} ...and the following configuration in *.sqlfluff* in the same directory: .. code-block:: cfg [sqlfluff] templater = python [sqlfluff:templater:python:context] tbl_name = my_table ...then before parsing, the sql will be transformed to: .. code-block:: sql SELECT * FROM my_table Complex Python Variable Templating """""""""""""""""""""""""""""""""""" `Python string formatting`_ supports accessing object attributes via dot notation (e.g. :code:`{foo.bar}`). However, since we cannot create Python objects within configuration files, we need a workaround in order to provide dummy values to render templates containing these values. The SQLFluff python templater will interpret any variable containing a "." as a dictionary lookup on the *magic* fixed context key :code:`sqlfluff`. .. code-block:: -- this SQL SELECT * FROM {foo.bar} -- becomes this SELECT * FROM {sqlfluff["foo.bar"]} ..which can be populated using the following configuration: .. code-block:: cfg [sqlfluff:templater:python:context] sqlfluff = {"foo.bar": "abc"} .. _`Python string formatting`: https://docs.python.org/3/library/string.html#format-string-syntax sqlfluff-3.4.2/docs/source/favicon-fluff.png000066400000000000000000000334761503426445100210460ustar00rootroot00000000000000PNG  IHDR@@iqzTXtRaw profile type exifxڭkr8p>@o[ή쬤VꑕY?xTjn9-6h?? K^Ư_yS0ɞoFz_-s,c&WpЇ/5`Do3wWS\w~ߐKYEn?޳zX*_>[x.-K|_ooexs\sntm?>k5䂨nZWkZ=Wyt;n/?uV+Y<.kd׏86dflb$[x_+&ىŸBrvrx䧻6 !l)?/-8" iSb%z )^)JC9KFJ,RJ-jZj[R˭\z硝[wGqGuOzSO?$guٗ[Ҋ+ʪvi]vm/^k75y,ʷx-$gxGNj<@@{.F/gw$Ex%9g:y |wߞG])s\K{=Wk[E elz U6='7snvtUx]Zէ&|֞+_{N֝t-.u'A@MҕF#n]\ЛUyyu_;Moj55<[7qڌQx}[we,puל,P1,"s%FZų!'rm+n@J.# N,Ê]zC'0vOV.\e f1?rM˱g={\tɷʳ?7%?kXk}sf: ְ3l;To5f;'p!r|js )8h1em&T!ӶfJsW_u Jl$Xxy`H'o#]y=v#@wS=@"0,%[XbWǣb87ϧgٝG+dn 85zpa!Q]*Y+#{XORZB(Fc'Hϫ1dl,67[b.VE`,۸`K+K'z>Tg_\ ؛!NN )[A~ \yࢂ^-;Sxܟ%+"`-&L~]e\KxOY Z3HkP31Gж 0@~M$e@R'zLmfX@zK>ci@ 0G]O.W" ٦ ` BLkQz끂Xx砲rxǡ'lwmBZqցJ!iNI%v*# d lM$GP9:5<>)5Vxn|GV Gu5t}FVgNA"m Yof=cm77.\JVInE `̶)uM 'e:ͮQU 5?XB!m&U!B-L4g<{CrXF.a߄50yI&;mSqA@ jn:/`.nEƓJ-FWU (@ST \D8B PKC1&oy%(d?hn.t0DQdȝN!HP?OvF>RIPТO -j2#R7N6r "G~AQY)TEƓ^KHc2!יLbZ kbٺϣ%`M".XN["rɎ@ا #wp[)Xh$ `A 6wECB7zzX cڭj/VٲJz\nj F[$uzd"e;?ѓK~|o2aL,5ŠKA>eBBgEsdz"nŦiy#{W`On{]KʠZ ՆIZbeG U*>(x$CH#عhljnC.Oj׎btCAt*u>CiM6nQH#/k;zfORĊLF6 hVh/G=WeB}x 0~#⒕KnSM]N`Á&̜BTXRZWGt8,~J+56xҭ S=6笄:{_kNC/ %}ODAL.]EG%; jHL-wMP\^y3$(DS"xYXRxRر(n@}J61[|ˈE]-o@ʵ -M.OU.<4I\<] ;R0}ܿ_t4%,'/+kW1)o^e㎲wϕ i46</}ǘn U{B}7Vn8a}ک]jBMqjBƷIxL/*'9*i>Ѹb{{:qRCLO:u\T~֖T]R=H[JuM$kB5^FXY7hC o$SEqY;hڟ f"Uy$X5ڐh((eHO)iųH lI#HЂ̔q i')~&Jр !jZڶ&SPVG j}_])k,AeyL.cEI!{$dǵD.x`vY(41پoXi1kh1x sea|Xv2{+DX!\BgOW5JeD6w=/pX*$4+~ښ>71b|]D֖KfS6v$`xN ,]B-\ <T3Wª&p"$hacJA ,B%("kV+׼G$v9k S}jȳ@*`_@;yǂғj\:X |[q+yvF}$vt+4kaWjsZWjp|>#04 ɆB|b@BҞ<$e2{]*hO]o}bZ[&ʲ"H 6\c nso]Sn1= G73lt>t5獬I DSLdoCzbJjޕDߡ_V(9&Z%hؗ\/n'/E>^P`D".k3jэi^4 m5ߴW)騊e*ubb{=؈-_FoC!JC=S3Q1Ecƽ(ƚ5#J)˪޾);~٣UB81(՗U.gzRE֝PE0%lt>9NȗZ))ԫs>ur\zuQrrJOɻۀ&叴j9!WQNNPFnymxdf;eN#o .1sGgII` CEKΉr }l\ˮ[l}x$ d(,rQٳua`w_zWɚ"X:a~ۧZЉn~αc(S(7F yR* }k8ҩ:t S?w~~~mDgQ;}r:^1>zSO&`8V|QL,/z{դHǴ⟵J;k܏Hґ<-˲}º.ֵW J$)vJjIB.:2xKU{hTE7| h&Ϛ"") t_H6r6cc3 Zyx/JtԺ*&jÚwj>Ө὿4 Y6੠) x>H M |'NOy^KZ O\՝lȾxu bJ&~z+c\ȱ}9AQח=YTġC2\ ?@-b YrNsAv<ůQDJgS7]6cf~ɦXz '#6)Xj[irNԷ+NҎ!"sӃT Uu+ DoJ'EӁ/G3:t9~S6hjI㚹 -~wj¶ϐU 2:/J)) ] ̏@oC:74y Pܭ,MhVrEQ$x7*Ɔ%~)v5YnIk&mLLb;ிil^AE\:=2|IꋈͫM=iZ&vL(iZic8i 4urgukYS_FH3mW\Q/U;Dt0+No[,;c` ?Z OEu+4H65d)݋L5l#DY'pƙ4؆C\t2f!I26!4dm:uZz)#uS; fu*0ĘQ>>22[IT ,;`l'I%[%"k:}{tJpޞw/k(RCM:$8jX!ZUB|:G:-6Ӻq>!SF:n!RH5fT#KAPviJfU1ј騂kQ0&;\驡dH/ξ_΍7LV"ޗkyGPEۆQp,C:Б׏Dk "Juh/BTyf!rJ8ZG:u(;)5nRЩ-)#ҁf|i8C3n$by9x+6 r:pAl9lHf~*~?A/^6 n tiǽ8yPsd͆ % gh;;>f!厢)C~CzwW';z0#ib~\1Ql>N@ }~7[^!=woX*=*S> sqsz(` ;z` # >3χbrVIIuN#_=3A2}|~:8y(8}+i\vw8_aE RoŶ.֤3\j!K`x@Ƕ?S:D jݟq;UWcUAslz"汝CHyIlyk>rxI!+iqOSs|o[;H!!Xl;M9I2ӝaZz$!kq}g\&c}DkoHtkN(ecgZ|{fCjIm G.#]t٦n5=R1s#ǂy\.옾kLzfzOњYlfkL*O9zbK%xLubKGD pHYs@V=tIME  kI^IDATx{ktTekrP FsuhΙլF3ts5=ڞݞ*v{Q0hB=J*{TNt8k'^K 'X`Y6"N !:G𺮏b(%Bs>e7nܘG 80 !  ~i5]S7o.{0M/fz{ggg9 !SRR]ǯ{3[ X0V0 M12 cY$"h4 BxQ ˲!1cipeńƙL&b0 _8А5JiIjj 2fD"v躎p#& %۷o1-[Viٲu]ߒ:NNUՇ%I`08\.Sg$IRW] DQX,TVV"`x # jعs'N'V\\+VY|^ojEEeʕ<0fVTT;w%I2,ʂqVvW8g۶m۷ ]v_|bn7F#V+n]hŗ_~DQ$ V|zii)J)RHuaHRR{f(D"q4M_ lt\O>dz^^h6aۙ:N!??ɈBDkk+*N%Ǐ+,7`Ϝ9=z 䤱)˾=yyy&٣GƬV+cf!A[[[uuu}---PJ @nUU )))$-U#eYl6$''###cYqX,$kCCCW9ruÆ |Y:uJooox8hUUW^EQOQJ VU5 b&qyUX dONNxjKL4Mɓ'P蹃RQ` PU`XRu]_fZA㐲PSSdtuu!E"C蚦MRJx<>(JqkF5Mc,躮RJY8==]_z0a'khh-0Sj<3sftvvbݺuLvvB <,] NNN6fdds{^<ݍ"XbCZZk׮AUEvgϪ~ᄪ]Z㪮uwwDV GFF"yyy{w&󳷴 Rt4pr c,{`0HKK-ˬ4h4 i Fhkkի6J)~+??~\.pBMM ˗<Or<x#<ݹs))';pAAq:tEvAEaZb !d F#S\\} 6Hom6QPP͆ӧOeY8NBʲ}Ǒu։ׯ粲Hyyrǽ[wt]'  @@,,,ԋ/'O=iZbknnjkk=!--ܬ7 (((@WWEUVql63a\.bXȊ+I7xD>Alo۷۸0<<{Nu󽯪Т‚ V5Do+--]\z5i`0 È(yC0r2 [VTTT>eYneqFan/++KZv-x/0^D~~>sbt]ͨ q999|vvŶ0--]xDpGoo455aIbţ>jtL:t]ގ{Ay_w*WWWÇIoo/كZ455Aul֭[q1#"%%_~%ݫFFF~g[kIII=׮]|Gq8*˲R޽{ݧOv7ǏiZ,˟9H$"lLIz6rTU_bʾ~ \w˖-FՊ_`xDVV?tBvTWW8&%%`އeff˗/ѣyyyl={[o|h|nwZZZZVVNEUU166h=P4 @<oe0i2ta0 3#(((@8III0oW\0@ 0!00^/x<|ɐ,yRc,S!ܪ]_[,s~:|BfVf&1(~s3:<(oh%OIIӴ{֯_x <ͭZ*űX‚ "1hիWHOO|>_+),4IW_}u… N`Y6N9+‡PJ? 7i4MbT4-ܴirf] !Ν+Sew |,hBRљߏ̒lV |E-!fSJR]]]{Ъ:^J5M{ TLNN P]~Ң(r]]]E&`TӴU__C-˗]b4kt$43^Ͳlb$nTE} }wϱ, `h)wOsh4J",ٺ߽> !#xOSݭSJ?/Xnmk֬]r%&aZIFFa3X"*M)4 WUU]jhh$IBWW FxtF޴8WGG?B)))zQ\pA'( (}>PJ/,˖- Lo14lڴ &Y^\(둢X,Ji/npEΒybX ܋jkM8DIcc=p8O>DommY_Yz'fff+˗ ؜C)wD7]t M&S>hd2vn>kvV˵d?w(++3[6s  a,zjn{KKK-3Q"0SA8{e)w/IF <ӎYX,Q !Rx4??E)Ecc#TUŋӝdزQ .b[]]]{ァG WTTOqa2`9Ky~lK$IYSeYF$2s:#73 ,ˢy) P|+sϴ iiid2~GJOO?'S]׿[<㹹{#`6ͳHCCC~B山NY]vh0%bSÖY+Vx\2FAgg^^^ΆB;v%}f8/dժUlnnP(j^{իW3V%33sMhpRj!dggw78q||DQiڵDUUB̉r}YY1L 04qE>/B~](,k3K 788̦S](SZZj BTQ 񹢢׎OO׮]ӏ92 CwP($L3srrr.--YzR&ER?KA2~͎vv'oe]1N)rHQTTey睜b~555o\XJΖ}ݒu֙YN>ej xF<๋/Fg2?ш]]]tR'|3uSN.++sηtTUEcccv޽SYGn< OLL999dMD{M(7Z LIItff^$(BA 4##DQN?U$IL<?^{FPg]]]$Ib80|>ɓ';dYK=SÓ7xl6Fvߞz+njj ^zFbp$I۪,7_m7kP,MIJ*Bj;#oUZl4/ObAAk555]vp4$UU G{zz~x3F" h3~?B= bdbID"aUUu[4%>bZY( BB`6MӾžʲlv, RwEg=IENDB`sqlfluff-3.4.2/docs/source/gettingstarted.rst000066400000000000000000000263621503426445100213710ustar00rootroot00000000000000.. _gettingstartedref: Getting Started =============== To get started with *SQLFluff* you'll need python and pip installed on your machine, if you're already set up, you can skip straight to `Installing sqlfluff`_. Installing Python ----------------- How to install *python* and *pip* depends on what operating system you're using. In any case, the python wiki provides up to date `instructions for all platforms here`_. There's a chance that you'll be offered the choice between python versions. Support for python 2 was dropped in early 2020, so you should always opt for a version number starting with a 3. As for more specific options beyond that, *SQLFluff* aims to be compatible with all current python versions, and so it's best to pick the most recent. You can confirm that python is working as expected by heading to your terminal or console of choice and typing :code:`python --version` which should give you a sensible read out and not an error. .. code-block:: text $ python --version Python 3.9.1 For most people, their installation of python will come with :code:`pip` (the python package manager) preinstalled. To confirm this you can type :code:`pip --version` similar to python above. .. code-block:: text $ pip --version pip 21.3.1 from ... If however, you do have python installed but not :code:`pip`, then the best instructions for what to do next are `on the python website`_. .. _`instructions for all platforms here`: https://wiki.python.org/moin/BeginnersGuide/Download .. _`on the python website`: https://pip.pypa.io/en/stable/installation/ .. _installingsqlfluff: Installing SQLFluff ------------------- Assuming that python and pip are already installed, then installing *SQLFluff* is straight forward. .. code-block:: text $ pip install sqlfluff You can confirm its installation by getting *SQLFluff* to show its version number. .. code-block:: text $ sqlfluff version 3.4.2 Basic Usage ----------- To get a feel for how to use *SQLFluff* it helps to have a small :code:`.sql` file which has a simple structure and some known issues for testing. Create a file called :code:`test.sql` in the same folder that you're currently in with the following content: .. code-block:: sql SELECT a+b AS foo, c AS bar from my_table You can then run :code:`sqlfluff lint test.sql --dialect ansi` to lint this file. .. code-block:: text $ sqlfluff lint test.sql --dialect ansi == [test.sql] FAIL L: 1 | P: 1 | LT09 | Select targets should be on a new line unless there is | only one select target. | [layout.select_targets] L: 1 | P: 1 | ST06 | Select wildcards then simple targets before calculations | and aggregates. [structure.column_order] L: 1 | P: 7 | LT02 | Expected line break and indent of 4 spaces before 'a'. | [layout.indent] L: 1 | P: 9 | LT01 | Expected single whitespace between naked identifier and | binary operator '+'. [layout.spacing] L: 1 | P: 10 | LT01 | Expected single whitespace between binary operator '+' | and naked identifier. [layout.spacing] L: 1 | P: 11 | LT01 | Expected only single space before 'AS' keyword. Found ' | '. [layout.spacing] L: 2 | P: 1 | LT02 | Expected indent of 4 spaces. | [layout.indent] L: 2 | P: 9 | LT02 | Expected line break and no indent before 'from'. | [layout.indent] L: 2 | P: 10 | CP01 | Keywords must be consistently upper case. | [capitalisation.keywords] All Finished 📜 🎉! You'll see that *SQLFluff* has failed the linting check for this file. On each of the following lines you can see each of the problems it has found, with some information about the location and what kind of problem there is. One of the errors has been found on *line 1*, *position * (as shown by :code:`L: 1 | P: 9`) and it's a problem with rule *LT01* (for a full list of rules, see :ref:`ruleref`). From this (and the following error) we can see that the problem is that there is no space either side of the :code:`+` symbol in :code:`a+b`. Head into the file, and correct this issue so that the file now looks like this: .. code-block:: sql SELECT a + b AS foo, c AS bar from my_table Rerun the same command as before, and you'll see that the original error (violation of *LT01*) no longer shows up. .. code-block:: text $ sqlfluff lint test.sql --dialect ansi == [test.sql] FAIL L: 1 | P: 1 | LT09 | Select targets should be on a new line unless there is | only one select target. | [layout.select_targets] L: 1 | P: 1 | ST06 | Select wildcards then simple targets before calculations | and aggregates. [structure.column_order] L: 1 | P: 7 | LT02 | Expected line break and indent of 4 spaces before 'a'. | [layout.indent] L: 1 | P: 13 | LT01 | Expected only single space before 'AS' keyword. Found ' | '. [layout.spacing] L: 2 | P: 1 | LT02 | Expected indent of 4 spaces. | [layout.indent] L: 2 | P: 9 | LT02 | Expected line break and no indent before 'from'. | [layout.indent] L: 2 | P: 10 | CP01 | Keywords must be consistently upper case. | [capitalisation.keywords] To fix the remaining issues, we're going to use one of the more advanced features of *SQLFluff*, which is the *fix* command. This allows more automated fixing of some errors, to save you time in sorting out your sql files. Not all rules can be fixed in this way and there may be some situations where a fix may not be able to be applied because of the context of the query, but in many simple cases it's a good place to start. For now, we only want to fix the following rules: *LT02*, *LT12*, *CP01* .. code-block:: text $ sqlfluff fix test.sql --rules LT02,LT12,CP01 --dialect ansi ==== finding violations ==== == [test.sql] FAIL L: 1 | P: 7 | LT02 | Expected line break and indent of 4 spaces before 'a'. | [layout.indent] L: 2 | P: 1 | LT02 | Expected indent of 4 spaces. | [layout.indent] L: 2 | P: 9 | LT02 | Expected line break and no indent before 'FROM'. | [layout.indent] L: 2 | P: 10 | CP01 | Keywords must be consistently upper case. | [capitalisation.keywords] ==== fixing violations ==== 4 fixable linting violations found Are you sure you wish to attempt to fix these? [Y/n] ...at this point you'll have to confirm that you want to make the changes by pressing :code:`y` on your keyboard... .. code-block:: text Are you sure you wish to attempt to fix these? [Y/n] ... Attempting fixes... Persisting Changes... == [test.sql] PASS Done. Please check your files to confirm. If we now open up :code:`test.sql`, we'll see the content is now different. .. code-block:: sql SELECT a + b AS foo, c AS bar FROM my_table In particular: * The two columns have been indented to reflect being inside the :code:`SELECT` statement. * The :code:`FROM` keyword has been capitalised to match the other keywords. We could also fix *all* of the fixable errors by not specifying :code:`--rules`. .. code-block:: text $ sqlfluff fix test.sql --dialect ansi ==== finding violations ==== == [test.sql] FAIL L: 1 | P: 1 | ST06 | Select wildcards then simple targets before calculations | and aggregates. [structure.column_order] L: 2 | P: 10 | LT01 | Expected only single space before 'AS' keyword. Found ' | '. [layout.spacing] ==== fixing violations ==== 2 fixable linting violations found Are you sure you wish to attempt to fix these? [Y/n] ... Attempting fixes... Persisting Changes... == [test.sql] PASS Done. Please check your files to confirm. If we now open up :code:`test.sql`, we'll see the content has been updated again. .. code-block:: sql SELECT c AS bar, a + b AS foo FROM my_table The SQL statement is now well formatted according to all the rules defined in SQLFluff. The :code:`--rules` argument is optional, and could be useful when you or your organisation follows a slightly different convention than what we have defined. Custom Usage ------------ So far we've covered the stock settings of *SQLFluff*, but there are many different ways that people style their sql, and if you or your organisation have different conventions, then many of these behaviours can be configured. For example, given the example above, what if we actually think that indents should only be two spaces, and rather than uppercase keywords, they should all be lowercase? To achieve this we create a configuration file named :code:`.sqlfluff` and place it in the same directory as the current file. In that file put the following content: .. code-block:: cfg [sqlfluff] dialect = ansi [sqlfluff:indentation] tab_space_size = 2 [sqlfluff:rules:capitalisation.keywords] capitalisation_policy = lower Then rerun the same command as before. .. code-block:: text $ sqlfluff fix test.sql --rules LT02,LT12,CP01,ST06,LT09,LT01 Then examine the file again, and you'll notice that the file has been fixed accordingly. .. code-block:: sql select c as bar, a + b as foo from my_table For a full list of configuration options check out :ref:`defaultconfig`. Note that in our example here we've only set a few configuration values and any other configuration settings remain as per the default config. To see how these options apply to specific rules check out the "Configuration" section within each rule's documentation in :ref:`ruleref`. Going further ------------- From here, there are several more things to explore. * To understand how *SQLFluff* is interpreting your file explore the :code:`parse` command. You can learn more about that command and more by running :code:`sqlfluff --help` or :code:`sqlfluff parse --help`. * To start linting more than just one file at a time, experiment with passing SQLFluff directories rather than just single files. Try running :code:`sqlfluff lint .` (to lint every sql file in the current folder) or :code:`sqlfluff lint path/to/my/sqlfiles`. * To find out more about which rules are available, see :ref:`ruleref`. * To find out more about configuring *SQLFluff* and what other options are available, see :ref:`config`. * Once you're ready to start using *SQLFluff* on a project or with the rest of your team, check out :ref:`production-use`. One last thing to note is that *SQLFluff* is a relatively new project and you may find bugs or strange things while using it. If you do find anything, the most useful thing you can do is to `post the issue on GitHub`_ where the maintainers of the project can work out what to do with it. The project is in active development and so updates and fixes may come out regularly. .. _`post the issue on GitHub`: https://github.com/sqlfluff/sqlfluff/issues sqlfluff-3.4.2/docs/source/guides/000077500000000000000000000000001503426445100170565ustar00rootroot00000000000000sqlfluff-3.4.2/docs/source/guides/contributing/000077500000000000000000000000001503426445100215655ustar00rootroot00000000000000sqlfluff-3.4.2/docs/source/guides/contributing/architecture.rst000066400000000000000000000132701503426445100250040ustar00rootroot00000000000000.. _architecture: Architecture ------------ At a high level, the behaviour of SQLFluff is divided into a few key stages. Whether calling `sqlfluff lint`, `sqlfluff fix` or `sqlfluff parse`, the internal flow is largely the same. .. _templater: Stage 1, the templater ^^^^^^^^^^^^^^^^^^^^^^ This stage only applies to templated SQL. Vanilla SQL is sent straight to stage 2, the lexer. In order to lint templated SQL, SQLFluff must first convert the 'raw' or pre-templated code into valid SQL, which can then be parsed. The templater returns both the raw and post-templated SQL so that any rule violations which occur in templated sections can be ignored and the rest mapped to their original line location for user feedback. .. _Jinja: https://jinja.palletsprojects.com/ .. _dbt: https://docs.getdbt.com/ .. _`Python format strings`: https://docs.python.org/3/library/string.html#format-string-syntax *SQLFluff* supports multiple templating engines: * Jinja_ * SQL placeholders (e.g. SQLAlchemy parameters) * `Python format strings`_ * dbt_ (via plugin) Under the hood dbt also uses Jinja, but in *SQLFluff* uses a separate mechanism which interfaces directly with the dbt python package. For more details on how to configure the templater see :ref:`templateconfig`. Stage 2, the lexer ^^^^^^^^^^^^^^^^^^ The lexer takes SQL and separates it into segments of whitespace and code. Where we can impart some high level meaning to segments, we do, but the result of this operation is still a flat sequence of typed segments (all subclasses of :code:`RawSegment`). Stage 3, the parser ^^^^^^^^^^^^^^^^^^^ The parser is arguably the most complicated element of SQLFluff, and is relied on by all the other elements of the tool to do most of the heavy lifting. #. The lexed segments are parsed using the specified dialect's grammars. In SQLFluff, grammars describe the shape of SQL statements (or their components). The parser attempts to apply each potential grammar to the lexed segments until all the segments have been matched. #. In SQLFluff, segments form a tree-like structure. The top-level segment is a :code:`FileSegment`, which contains zero or more :code:`StatementSegment`\ s, and so on. Before the segments have been parsed and named according to their type, they are 'raw', meaning they have no classification other than their literal value. #. A segment's :code:`.match()` method uses the :code:`match_grammar`, on which :code:`.match()` is called. SQLFluff parses in a single pass through the file, so segments will recursively match the file based on their respective grammars. In the example of a :code:`FileSegment`, it first divides up the query into statements, and then the :code:`.match()` method of those segments works out the structure within them. * *Segments* must implement a :code:`match_grammar`. When :code:`.match()` is called on a segment, this is the grammar which is used to decide whether there is a match. * *Grammars* combine *segments* or other *grammars* together in a pre-defined way. For example the :code:`OneOf` grammar will match if any one of its child elements match. #. During the recursion, the parser eventually reaches segments which have no children (raw segments containing a single token), and so the recursion naturally finishes. #. If no match is found for a segment, the contents will be wrapped in an :code:`UnparsableSegment` which is picked up as a *parsing* error later. This is usually facilitated by the :code:`ParseMode` on some grammars which can be set to :code:`GREEDY`, allowing the grammar to capture additional segments as unparsable. As an example, bracketed sections are often configured to capture anything unexpected as unparsable rather than simply failing to match if there is more than expected (which would be the default, :code:`STRICT`, behaviour). #. The result of the :code:`.match()` method is a :code:`MatchResult` which contains the instructions on how to turn the flat sequence of raw segments into a nested tree of segments. Calling :code:`.apply()` on this result at the end of the matching process is what finally creates the nested structure. When working on the parser there are a couple of design principles to keep in mind. - Grammars are contained in *dialects*, the root dialect being the *ansi* dialect. The ansi dialect is used to host logic common to all dialects, and so does not necessarily adhere to the formal ansi specification. Other SQL dialects inherit from the ansi dialect, replacing or patching any segments they need to. One reason for the *Ref* grammar is that it allows name resolution of grammar elements at runtime and so a *patched* grammar with some elements overridden can still rely on lower-level elements which haven't been redeclared within the dialect - All grammars and segments attempt to match as much as they can and will return partial matches where possible. It is up to the calling grammar or segment to decide whether a partial or complete match is required based on the context it is matching in. Stage 4, the linter ^^^^^^^^^^^^^^^^^^^ Given the complete parse tree, rule classes check for linting errors by traversing the tree, looking for segments and patterns of concern. If the rule discovers a violation, it returns a :py:class:`~sqlfluff.core.rules.base.LintResult` pointing to the segment which caused the violation. Some rules are able to *fix* the problems they find. If this is the case, the rule will return a list of fixes, which describe changes to be made to the tree. This can include edits, inserts, or deletions. Once the fixes have been applied, the updated tree is written to the original file. sqlfluff-3.4.2/docs/source/guides/contributing/dialect.rst000066400000000000000000001333371503426445100237360ustar00rootroot00000000000000.. _contributing_dialect_changes: Contributing dialect changes ============================ One of the best ways that SQLFluff users can improve SQLFluff for themselves and others is in contributing dialect changes. Users will likely know their syntax much better than the regular maintainers and will have access to an instance of that SQL dialect to confirm changes are valid SQL in that dialect. If you can fix your own issues then that's often the quickest way of unblocking any issues preventing you from using SQLFluff! The maintainers are all volunteers doing this in our spare time and (like you all I'm sure!), we only have so much time to work on this. How SQLFluff reads (or parses) SQL ---------------------------------- SQLFluff has a lexer and parser which is built in a very modular fashion that is easy to read, understand, and expand on without any core programming skills or deep knowledge of Python or how SQLFluff operates. For more information see the :ref:`Architecture Documentation `, but will cover that briefly here to give you enough to start contributing. We also have a robust Continuous Integration pipeline in GitHub where you can gain confidence your changes are correct and will not break other SQLFluff users, even before a regular maintainer reviews the code. SQLFluff defines the syntax it will used in dialect files (more on this later). If you look at the `dialect_ansi.py`_ file you will see it has syntax like this: .. _`dialect_ansi.py`: https://github.com/sqlfluff/sqlfluff/blob/main/src/sqlfluff/dialects/dialect_ansi.py .. code-block:: python class SelectClauseSegment(BaseSegment): """A group of elements in a select target statement.""" type = "select_clause" match_grammar = StartsWith( Sequence("SELECT", Ref("WildcardExpressionSegment", optional=True)), terminator=OneOf( "FROM", "WHERE", "ORDER", "LIMIT", "OVERLAPS", Ref("SetOperatorSegment"), ), enforce_whitespace_preceding_terminator=True, ) parse_grammar = Ref("SelectClauseSegmentGrammar") This says the :code:`SelectClauseSegment` starts with :code:`SELECT` or :code:`SELECT *` and ends when it encounters a :code:`FROM`, :code:`WHERE`, :code:`ORDER`...etc. line. The :code:`match_grammar` is what is used primarily to try to match and parse the statement. It can be relatively simple (as in this case), to quickly match just the start and terminating clauses. If that is the case, then a :code:`parse_grammar` is needed to actually delve into the statement itself with all the clauses and parts it is made up of. The :code:`parse_grammar` can be fully defined in the class or, like above example, reference another class with the definition. The :code:`match_grammar` is used to quickly identify the start and end of this block, as parsing can be quite intensive and complicated as the parser tries various combinations of classes and segments to match the SQL (particularly optional ones like the :code:`WildcardExpressionSegment` above, or when there is a choice of statements that could be used). For some statements a quick match is not needed, and so we can delve straight into the full grammar definition. In that case the :code:`match_grammar` will be sufficient and we don't need the optional :code:`parse_grammar`. Here's another statement, which only uses the :code:`match_grammar` and doesn't have (or need!) an optional :code:`parse_grammar`: .. code-block:: python class JoinOnConditionSegment(BaseSegment): """The `ON` condition within a `JOIN` clause.""" type = "join_on_condition" match_grammar = Sequence( "ON", Indent, OptionallyBracketed(Ref("ExpressionSegment")), Dedent, ) You may have noticed that a segment can refer to another segment, and that is a good way of splitting up a complex SQL expression into its component parts to manage and handle them separately. Segment grammar options ^^^^^^^^^^^^^^^^^^^^^^^ There are a number of options when creating SQL grammar including: .. list-table:: :header-rows: 1 * - Grammar - Used For - Example * - :code:`"KEYWORD"` - Having a raw SQL keyword - :code:`"SELECT"` * - :code:`Sequence()` - Having a known sequence of Keywords or Segments - :code:`Sequence("SELECT", Ref("SelectClauseElementSegment"), "FROM"...)` * - :code:`AnyNumberOf()` - Choose from a set of options which may be repeated - :code:`"SELECT", AnyNumberOf(Ref("WildcardExpressionSegment"), Ref("ColumnReferenceSegment")...)...` * - :code:`OneOf()` - A more restrictive from a set of `AnyNumberOf` limited to just one option - :code:`OneOf("INNER","OUTER","FULL"), "JOIN"` * - :code:`Delimited()` - Used for lists (e.g. comma-delimited - which is the default) - :code:`"SELECT", Delimited("SelectClauseElementSegment"), "FROM"...` * - :code:`Bracketed()` - Used for bracketed options - like function parameters - :code:`Ref("FunctionNameSegment"), Bracketed(Ref("FunctionContentsGrammar")` Some of the keywords have extra params you can give them, the most commonly used will be :code:`optional=True`. This allows you to further define the make up of a SQL statement. Here's the :code:`DeleteStatementSegment` definition: .. code-block:: python parse_grammar = Sequence( "DELETE", Ref("FromClauseSegment"), Ref("WhereClauseSegment", optional=True), ) You can see the :code:`WHERE` clause is optional (many's a head has been shaken because of deletes without :code:`WHERE` clauses I'm sure, but that's what SQL syntax allows!). Using these Grammar options, it's possible to build up complex structures to define SQL syntax. Segments and Grammars ^^^^^^^^^^^^^^^^^^^^^ A Segment is a piece of the syntax which defines a :code:`type` (which can be useful to reference later in rules or parse trees). This can be through one of the functions that creates a Segment (e.g. :code:`NamedParser`, :code:`SegmentGenerator`...etc.) or through a class. A Grammar is a section of syntax that can be used in a Segment. Typically these are created to avoid repeating the same code in multiple places. Think of a Grammar as an alias for a piece of syntax to avoid you having to type out the same code again and again and again. The other good thing about Grammars is it allows other dialects to override a specific part of a Segment without having to redefine the whole thing just to tweak one small part. For example ansi defines this: .. code-block:: python NotOperatorGrammar=StringParser("NOT", KeywordSegment, type="keyword") whereas mysql overrides this to: .. code-block:: python NotOperatorGrammar=OneOf( StringParser("NOT", KeywordSegment, type="keyword"), StringParser("!", CodeSegment, name="not_operator", type="not_operator"), ), This allows MySQL to use :code:`!` in all the places that :code:`NOT` was used (providing they use :code:`NotOperatorGrammar` rather than hardcode the :code:`NOT` keyword of course). This makes it much easier to customise syntax to a particular dialect without having to copy and paste (and maintain) nearly identical code multiple times just to add the extra :code:`!` syntax that MySQL supports to mean :code:`NOT`. Dialects ^^^^^^^^ A lot of SQL is the same no matter which particular type of SQL you are using. The basic :code:`SELECT.. FROM... WHERE` statement is common to them all. However lots of different SQL dialects (Postgres, Snowflake, Oracle... etc.) have sprung up as different companies have implemented SQL, or expanded it, for their own needs. For this reason, SQLFluff allows creating *dialects*, which can have different grammars from each other. SQLFluff has all the dialects in the `src/sqlfluff/dialects`_ folder. The main dialect file (that every other dialect ultimately inherits from) is the `dialect_ansi.py`_ file. In SQLFluff, a dialect is basically a file which inherits everything from the original ANSI dialect, and then adds or overrides parsing segments. If a dialect has the exact same :code:`SELECT`, :code:`FROM` and :code:`WHERE` clauses as ANSI but a different ::code:`ORDER BY` syntax, then only the ::code:`ORDER BY` clause needs to overridden so the dialect file will be very small. For some of the other dialects where there's lots of differences (:ref:`tsql_dialect_ref`!) you may be overriding a lot more. .. _`src/sqlfluff/dialects`: https://github.com/sqlfluff/sqlfluff/tree/main/src/sqlfluff/dialects Lexing ^^^^^^ I kind of skipped this part, but before a piece of SQL can be *parsed*, it is *lexed* - that is split up into symbols, and logical groupings. An inline comment, for example, is defined as this: .. code-block:: python RegexLexer( "inline_comment", r"(--|#)[^\n]*", CommentSegment, segment_kwargs={"trim_start": ("--", "#")}, ), That is, anything after :code:`--` or :code:`#` to the newline. This allows us to deal with that whole comment as one lexed block and so we don't need to define how to parse it (we even give that a parsing segment name here - :code:`CommentSegment`). For simple grammar addition, you won't need to to touch the lexing definitions as they usually cover most common ones already. But for slightly more complicated ones, you may have to add to this. So if you see lexing errors then you may have to add something here. Lexing happens in order. So it starts reading the SQL from the start, until it has the longest lexing match, then it chomps that up, files it away as a symbol to deal with later in the parsing, and starts again with the remaining text. So if you have :code:`SELECT * FROM table WHERE col1 = 12345` it will not break that up into :code:`S`, :code:`E`, :code:`L`...etc., but instead into :code:`SELECT`, :code:`*`, :code:`FROM`, :code:`table`...etc. An example of where we had to override lexing, is in BigQuery we have parameterised variables which are of the form :code:`@variable_name`. The ANSI lexer doesn't recognise the :code:`@` sign, so you could add a grammar or segment for that. But a better solution, since you don't need to know two parts (:code:`@` and :code:`variable_name`) is to just tell the lexer to go ahead and parse the whole thing into one big symbol, that we will then use later in the parser: .. code-block:: python bigquery_dialect.insert_lexer_matchers( [ RegexLexer("atsign_literal", r"@[a-zA-Z_][\w]*", CodeSegment), ], before="equals", ) Note the :code:`before="equals"` which means we tell the lexer the order of preference to try to match this symbol. For example if we'd defined an :code:`at_sign` lexing rule for other, standalone :code:`@` usage, then we'd want this to be considered first, and only fall back to that if we couldn't match this. .. _dialect_keywords: Keywords ^^^^^^^^ Most dialects have a keywords file, listing all the keywords. Some dialects just inherit the ANSI keywords and then add or remove keywords from that. Not quite as accurate as managing the actual keywords, but a lot quicker and easier to manage usually! Keywords are separated into RESERVED and UNRESERVED lists. RESERVED keywords have extra restrictions meaning they cannot be used as identifiers. If using a keyword in grammar (e.g. :code:`"SELECT"`), then it needs to be in one of the Keywords lists so you may have to add it or you might see error's like this (showing :code:`"NAN"` has not been added as a Keyword in this dialect):: RuntimeError: Grammar refers to 'NanKeywordSegment' which was not found in the redshift dialect Also if editing the main ANSI dialect, and adding the the ANSI keyword list, then take care to consider if it needs added to the other dialects if they will inherit this syntax - usually yes unless explicitly overridden in those dialects. Where to find the grammar for your database ------------------------------------------- Now that you know about some of the tools SQLFluff provides for lexing and parsing a SQL statement, what changes will you make to it? While devising ad-hoc changes to the grammar to fix particular issues can be better than nothing, the best and most robust contributions will be created by consulting the source of truth for the grammar of your dialect when mapping it to SQLFluff segments and grammars. This will help you exhaustively find all possible statements that would be accepted by the dialect. Many computer languages are written using venerable tools like `Flex`_ and `Bison`_, or similar parser generators, and SQL database engines are no exception. You can refer to the parser specification in the source code of your database engine for the ultimate source of truth of how a SQL statement will be parsed: you might be surprised at what your SQL engine will parse due to gaps in the documentation! You should also refer to the reference documentation for your SQL dialect to get a concise high-level overview of what the statement grammar looks like, as well as read of any further restrictions and intended use of the grammar that you find. If your SQL engine is closed-source, then you'll likely have only the reference documentation to work with. However, this will always be a less-accurate resource than the bison grammar that's actually used for code generation inside the database engine itself. It is also extremely helpful to try parsing the queries that you put into the test fixtures to make sure that they are actually parsable by the database engine. They don't have to be *valid* queries per se (can refer to non-existing table names, etc), but you should confirm that they are *parsable*. We do not want to *require* that SQLFluff be able to parse a statement that the actual database engine would reject: overeager matching logic can create parsing issues elsewhere. Here is a list of grammars and parsing techniques for some of the dialects implemented by SQLFluff: .. _`Flex`: https://en.wikipedia.org/wiki/Flex_(lexical_analyser_generator) .. _`Bison`: https://en.wikipedia.org/wiki/GNU_Bison ANSI SQL ^^^^^^^^ Unfortunately, the ANSI SQL standard is not free. If you want a licensed copy of the latest standard, it must be purchased: `Part 2`_ is the most useful section for SQLFluff since it contains the grammar. There are, however, other resources you can find on the Internet related to this standard: * `modern-sql.com/standard`_: has a discussion on the various parts of the standard, and links to some older/draft versions of it that are out there. * `jakewheat.github.io/sql-overview`_: has a nice browsable view of (only) the BNF grammar. * `web.cecs.pdx.edu/~len/sql1999.pdf`_: a copy of the (much older) SQL:1999 standard. * `developer.mimer.com/services/mimer-sql-validator/`_: the SQL-2016 validator can be used to verify if a query can be parsed using the ANSI standard. .. _`Part 2`: https://webstore.ansi.org/standards/iso/isoiec90752016-1646101 .. _`modern-sql.com/standard`: https://modern-sql.com/standard .. _`jakewheat.github.io/sql-overview`: https://jakewheat.github.io/sql-overview/ .. _`web.cecs.pdx.edu/~len/sql1999.pdf`: http://web.cecs.pdx.edu/~len/sql1999.pdf .. _`developer.mimer.com/services/mimer-sql-validator/`: https://developer.mimer.com/services/mimer-sql-validator/ PostgreSQL ^^^^^^^^^^ Simply Googling for :code:`pg ` will often bring up the documentation for an older PG version. Please be sure you're referring to the latest version of the documentation, as well as refer to the bison grammar. * `PostgreSQL Bison grammar `_ * `PostgreSQL Flex scanner `_ * `More information about the parsing stage `_ * `Reference documentation for Postgres SQL statements `_ * To check if a statement is parseable, simply paste it into :code:`psql`. If you get a :code:`ERROR: syntax error` then it means that it can't be parsed. These queries do not need to be parsed by SQLFluff; please do not include them in the main test fixtures. If you get a different error, then it means the query was parsed successfully, and might have failed for a different reason (e.g. non-existing column name, etc). In that case, it's probably best if SQLFluff can also parse it. * The `pgsql-parser `_ tool wraps the official PostgreSQL source code & bison grammar linked above into a simple CLI tool. You can use it if you want to view the exact parse tree that PG can see. MySQL ^^^^^ * `Reference documentation for MySQL SQL statements `_ * `MySQL Bison grammar `_ * To check if a statement is parsable, simply paste it into :code:`mysql`. Look for :code:`ERROR 1064 (42000): You have an error in your SQL syntax` to indicate a parse error. Example of contributing a syntax fix ------------------------------------ So that's a bit of theory but let's go through some actual examples of how to add to the SQLFluff code to address any issues you are seeing. In this I'm not going to explain about how to set up your Python development environment (see the :ref:`development` and the `CONTRIBUTING.md`_ file for that), nor how to manage Git (see our :ref:`using_git` guide if new to that, and we use the standard “Fork, and then open a PR” workflow common to GitHub projects). .. _`CONTRIBUTING.md`: https://github.com/sqlfluff/sqlfluff/blob/main/CONTRIBUTING.md So assuming you know (or are willing to follow above guides to find out!) how to set up Python environment, and commit via Git, how do you contribute a simple fix to a dialect for syntax you want SQLFluff to support? Example 1 ^^^^^^^^^ If we look at issue `#1520 `_ it was raised to say we couldn't parse this: .. code-block:: sql CREATE OR REPLACE FUNCTION public.postgres_setof_test() RETURNS SETOF text and instead returned this message:: Found unparsable section: 'CREATE OR REPLACE FUNCTION crw_public.po...' This was in the :code:`postgres` dialect, so I had a look at `dialect_postgres.py`_ and found the code in :code:`CreateFunctionStatementSegment` which had the following: .. _`dialect_postgres.py`: https://github.com/sqlfluff/sqlfluff/blob/main/src/sqlfluff/dialects/dialect_postgres.py .. code-block:: python parse_grammar = Sequence( "CREATE", Sequence("OR", "REPLACE", optional=True), Ref("TemporaryGrammar", optional=True), "FUNCTION", Sequence("IF", "NOT", "EXISTS", optional=True), Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar"), Sequence( # Optional function return type "RETURNS", OneOf( Sequence( "TABLE", Bracketed( Delimited( OneOf( Ref("DatatypeSegment"), Sequence( Ref("ParameterNameSegment"), Ref("DatatypeSegment") ), ), delimiter=Ref("CommaSegment"), ) ), optional=True, ), Ref("DatatypeSegment"), ), optional=True, ), Ref("FunctionDefinitionGrammar"), ) So it allowed returning a table, or a datatype. Fixing the issue was as simple as adding the :code:`SETOF` structure as another return option: .. code-block:: python parse_grammar = Sequence( "CREATE", Sequence("OR", "REPLACE", optional=True), Ref("TemporaryGrammar", optional=True), "FUNCTION", Sequence("IF", "NOT", "EXISTS", optional=True), Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar"), Sequence( # Optional function return type "RETURNS", OneOf( Sequence( "TABLE", Bracketed( Delimited( OneOf( Ref("DatatypeSegment"), Sequence( Ref("ParameterNameSegment"), Ref("DatatypeSegment") ), ), delimiter=Ref("CommaSegment"), ) ), optional=True, ), Sequence( "SETOF", Ref("DatatypeSegment"), ), Ref("DatatypeSegment"), ), optional=True, ), Ref("FunctionDefinitionGrammar"), ) With that code the above item could parse. I added a test case (covered below) and submitted `pull request #1522 `_ to fix this. Example 2 ^^^^^^^^^ If we look at issue `#1537 `_ it was raised to say we couldn't parse this: .. code-block:: sql select 1 from group And threw this error:: ==== parsing violations ==== L: 1 | P: 10 | PRS | Line 1, Position 10: Found unparsable section: 'from' L: 1 | P: 14 | PRS | Line 1, Position 14: Found unparsable section: ' group' The reporter had also helpfully included the parse tree (produced by :code:`sqlfluff parse`):: [L: 1, P: 1] |file: [L: 1, P: 1] | statement: [L: 1, P: 1] | select_statement: [L: 1, P: 1] | select_clause: [L: 1, P: 1] | keyword: 'select' [L: 1, P: 7] | [META] indent: [L: 1, P: 7] | whitespace: ' ' [L: 1, P: 8] | select_clause_element: [L: 1, P: 8] | literal: '1' [L: 1, P: 9] | whitespace: ' ' [L: 1, P: 10] | [META] dedent: [L: 1, P: 10] | from_clause: [L: 1, P: 10] | unparsable: !! Expected: 'FromClauseSegment' [L: 1, P: 10] | keyword: 'from' [L: 1, P: 14] | unparsable: !! Expected: 'Nothing...' [L: 1, P: 14] | whitespace: ' ' [L: 1, P: 15] | raw: 'group' [L: 1, P: 20] | newline: '\n' So the problem was it couldn't parse the :code:`FromClauseSegment`. Looking at that definition showed this: .. code-block:: python FromClauseTerminatorGrammar=OneOf( "WHERE", "LIMIT", "GROUP", "ORDER", "HAVING", "QUALIFY", "WINDOW", Ref("SetOperatorSegment"), Ref("WithNoSchemaBindingClauseSegment"), ), So the parser was terminating as soon as it saw the :code:`GROUP` and saying *"hey we must have reached the end of the :code:`FROM` clause"*. This was a little restrictive so changing that to this solved the problem: .. code-block:: python FromClauseTerminatorGrammar=OneOf( "WHERE", "LIMIT", Sequence("GROUP", "BY"), Sequence("ORDER", "BY"), "HAVING", "QUALIFY", "WINDOW", Ref("SetOperatorSegment"), Ref("WithNoSchemaBindingClauseSegment"), ), You can see we simply replaced the :code:`"GROUP"` by a :code:`Sequence("GROUP", "BY")` so it would *only* match if both words were given. Rechecking the example with this changed code, showed it now parsed. We did the same for :code:`"ORDER"`, and also changed a few other places in the code with similar clauses and added a test case (covered below) and submitted `pull request #1546 `_ to fix this. Example 3 ^^^^^^^^^ As an example of using the reference grammar to fix an existing SQLFluff grammar, `pull request #4744 `_ contributed the :code:`CREATE CAST` / :code:`DROP CAST` statements to SQLFluff from scratch for both ANSI and PostgreSQL dialects. The first step when contributing a new statement is to check whether the statement is part of the ANSI standard. If it is, then you very likely should first start by adding a generally vendor-neutral version to the SQLFluff ANSI dialect so that other dialects can inherit from it. Every database engine deviates from the ANSI standard in practice, but by adding a reasonably standard segment to the ANSI dialect, you'll probably do a reasonable thing for most other database dialects. In this case, `CREATE and DROP CAST were indeed defined in the ANSI standard `, as quickly revealed by a quick search of the document:: ::= CREATE CAST AS WITH [ AS ASSIGNMENT ] So the first step was to read this ANSI BNF grammar and use it to build a corresponding vendor-neutral :code:`CreateCastSegment` in `dialect_ansi.py`_. .. code-block:: python class CreateCastStatementSegment(BaseSegment): """A `CREATE CAST` statement. https://jakewheat.github.io/sql-overview/sql-2016-foundation-grammar.html#_11_63_user_defined_cast_definition """ type = "create_cast_statement" match_grammar: Matchable = Sequence( "CREATE", "CAST", Bracketed( Ref("DatatypeSegment"), "AS", Ref("DatatypeSegment"), ), "WITH", Ref.keyword("SPECIFIC", optional=True), OneOf( "ROUTINE", "FUNCTION", "PROCEDURE", Sequence( OneOf("INSTANCE", "STATIC", "CONSTRUCTOR", optional=True), "METHOD", ), ), Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar", optional=True), Sequence("FOR", Ref("ObjectReferenceSegment"), optional=True), Sequence("AS", "ASSIGNMENT", optional=True), ) # Not shown: register the CreateCastStatementSegment in StatementSegment As you work your way through the grammar, think about whether other parts of the SQL language might contain similar elements. For example, here we noticed that there are already segments we can reuse for data types, function names, and function parameter lists. This helped simplify our new grammar, as well as make it easy to centrally change those particular areas of the grammar in other dialects. Also consider whether there are entire new segments and grammars you should separately define in addition to the root statement segment you're writing. Introducing new and reusing existing segments adds structure to the SQLFluff parse tree that can make it easier for lint rules to analyze the tree. *A strong indicator that there should be a shared* *segment or grammar is when the reference grammar has a symbol that is reused* *from multiple other symbols/statements*. After writing the ANSI segment (and corresponding tests), it was time to move on to the PostgreSQL grammar. In this case, a quick glance at the `documentation `_ shows us that there are some notable differences from ANSI SQL: * You can only specify :code:`FUNCTION`. Other keywords like :code:`ROUTINE` and :code:`PROCEDURE` are rejected. * The `SPECIFIC` keyword is not supported. * Most importantly: PG provides some non-standard extensions which we'd like to include, like :code:`WITHOUT FUNCTION` and :code:`AS IMPLICIT`. However, we should also consult the `bison grammar for CREATE CAST`_. Bison grammars tend to be very lengthy and daunting, but the right techniques can help you quickly and easily find what you're looking for: * Search for a symbol by adding a :code:`:` to the end of it. * Start with the highest level of the thing you are looking for. For example, start with the top-level statement symbol. With PostgreSQL, all statements end with :code:`Stmt`. Putting it all together, if we search for :code:`CreateCastStmt:`, that takes us right to the definition for it. * Drill down into deeper parts of the parser to learn more. For example, we see :code:`function_with_argtypes` in the sequence; if we want to know what that means, search for :code:`function_with_argtypes:` to find it. Examining the Bison grammar can take a few extra minutes, but it can be rewarding. You'll be surprised what you might learn. I've found entire alternate spellings of keywords in there that were not in the documentation, and which testing showed were indeed valid SQL! The grammar in PG documentation is `human-maintained`_ and not auto-generated, so there can be and are gaps between what is parsable and what is documented. .. _`bison grammar for CREATE CAST`: https://github.com/postgres/postgres/blob/e0693faf797f997f45bee8e572e8b4288cc7eaeb/src/backend/parser/gram.y#L8938 .. _`human-maintained`: https://github.com/postgres/postgres/blob/master/doc/src/sgml/ref/create_cast.sgml A good approach if you're still learning might be to draft a segment from the high-level documentation, and then systematically go through it with the bison grammar and verify it's correct (and that you're not forgetting anything). One aspect of bison grammars to be aware of is that the tend to be very recursive, because it doesn't have the high-level constructs such as :code:`AnyOf`, :code:`Delimited`, :code:`Bracketed`, and so on that SQLFluff provides. On the other hand, SQLFluff doesn't scale well with recursion. Sometimes it's unavoidable and reasonable in many cases (e.g. parenthesized expression) to refer to another segment recursively. But many times the recursion is extremely trivial, and should always be rewritten using an existing high-level SQLFluff concept. For example, this bison grammar defines a bracketed comma-delimited list which would be better represented using :code:`Bracketed` and :code:`Delimited` in SQLFluff:: func_args: '(' func_args_list ')' { $$ = $2; } | '(' ')' { $$ = NIL; } ; func_args_list: func_arg { $$ = list_make1($1); } | func_args_list ',' func_arg { $$ = lappend($1, $3); } ; Example 4 ^^^^^^^^^ As an example of using the reference grammar to fix an existing SQLFluff grammar, `issue #4336 `_ reported that array slices were not being parsed correctly in PostgreSQL. A simple :code:`SELECT` statement was given that I further simplified to the following test case: .. code-block:: sql SELECT a[2:2+3]; Obviously, we know that a simple query like :code:`SELECT a;` would parse, so it's surely related to the array access. I started by looking up the bison grammar for PostgreSQL's :code:`SELECT` statement and drilling down into it to find an array accessor symbol; searching for :code:`SelectStmt:` proved to be a `lucky guess to start with`_:: SelectStmt: select_no_parens %prec UMINUS | select_with_parens %prec UMINUS ; .. _`lucky guess to start with`: https://github.com/postgres/postgres/blob/e0693faf797f997f45bee8e572e8b4288cc7eaeb/src/backend/parser/gram.y#L12497-L12504 Drilling down into the grammar via :code:`SelectStmt` --> :code:`select_no_parens` --> :code:`simple_select` --> :code:`target_list` --> :code:`target_el` show that we are dealing with an :code:`a_expr`, which is the main symbol widely used to represent an expression throughout the grammar. SQLFluff implements that as :code:`ExpressionSegment` (and more specifically :code:`Expression_A_Grammar`). Looking further: :code:`target_el` --> :code:`a_expr` --> :code:`c_expr` --> :code:`columnref`. Which brings us to a key rule:: columnref: | ColId indirection { $$ = makeColumnRef($1, $2, @1, yyscanner); } Digging into :code:`indirection`, we finally find where the array accessor is happening:: indirection: indirection_el { $$ = list_make1($1); } | indirection indirection_el { $$ = lappend($1, $2); } ; indirection_el: | '[' a_expr ']' { A_Indices *ai = makeNode(A_Indices); ai->is_slice = false; ai->lidx = NULL; ai->uidx = $2; $$ = (Node *) ai; } | '[' opt_slice_bound ':' opt_slice_bound ']' { A_Indices *ai = makeNode(A_Indices); ai->is_slice = true; ai->lidx = $2; ai->uidx = $4; $$ = (Node *) ai; } ; opt_slice_bound: a_expr { $$ = $1; } | /*EMPTY*/ { $$ = NULL; } ; From this we observe: * There is a sequence of indirection elements. * There can be a simple array index provided, which is an expression. * Most importantly, and most immediate to our problem, is the observation that each slice bound is optional, and if it is present, then it is an expression. Now that we looked up the relevant PG grammar, we can dig into the corresponding SQLFluff grammar in a similar top-down way: :code:`postgres.SelectStatementSegment` --> we see it's mostly a copy of the ANSI select statement, so --> :code:`ansi.SelectStatementSegment` --> remember :code:`Ref` always picks the dialect-specific grammar first --> :code:`postgres.SelectClauseSegment` --> :code:`ansi.SelectClauseSegment.parse_grammar` --> :code:`postgres.SelectClauseSegmentGrammar` --> :code:`ansi.SelectClauseElementSegment` --> :code:`ansi.BaseExpressionElementGrammar` --> :code:`ansi.ExpressionSegment` --> :code:`ansi.Expression_A_Grammar` --> :code:`ansi.Expression_C_Grammar` --> :code:`ansi.Expression_D_Grammar` --> notice this at the end of the sequence --> :code:`postgres.Accessor_Grammar` --> :code:`postgres.ArrayAccessorSegment`. As you navigate, always remember to check for dialect-specific grammar before falling back to the inherited grammar (e.g. ANSI). Finally, we have found the part of the grammar that corresponds to the :code:`indirection_el` in the bison grammar! .. code-block:: python class ArrayAccessorSegment(ansi.ArrayAccessorSegment): """Overwrites Array Accessor in ANSI to allow n many consecutive brackets. Postgres can also have array access like python [:2] or [2:] so numbers on either side of the slice segment are optional. """ match_grammar = Sequence( AnyNumberOf( Bracketed( Sequence( OneOf( OneOf( Ref("QualifiedNumericLiteralSegment"), Ref("NumericLiteralSegment"), ), Sequence( OneOf( Ref("QualifiedNumericLiteralSegment"), Ref("NumericLiteralSegment"), optional=True, ), Ref("SliceSegment"), OneOf( Ref("QualifiedNumericLiteralSegment"), Ref("NumericLiteralSegment"), ), ), Sequence( OneOf( Ref("QualifiedNumericLiteralSegment"), Ref("NumericLiteralSegment"), ), Ref("SliceSegment"), OneOf( Ref("QualifiedNumericLiteralSegment"), Ref("NumericLiteralSegment"), optional=True, ), ), ), ), bracket_type="square", ) ) ) Observing this, we can make a few observations. The most glaring are that: * Only numeric literals are accepted! No expressions. Clearly, that's the source of the issue that the person reported. * But while we are here, notice another problem we can fix: when a :code:`SliceSegment` (a |colon|) is present, you're forced to include a numeric literal either before or after the SliceSegment. You can't have :code:`[:]`, even though that's valid SQL that PG can parse. .. |colon| raw:: html : At this point, it's a simple matter of simplifying & rewriting the grammar to fix these shortcomings and better align it with the bison grammar, which was done in `pull request #4748 `_. Testing your changes -------------------- So you've made your fix, you've tested it fixed the original problem so just submit that change, and all is good now? Well, no. You want to do two further things: * Test your change hasn't broken anything else. To do that you run the test suite. * Add a test case, so others can check this in future. To test your changes you'll need to have your environment set up (again see the `CONTRIBUTING.md`_ file for how to do that). Adding test cases for your changes ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Adding a test case is simple. Just add a SQL file to `test/fixtures/dialects/`_ in the appropriate dialect directory. You can either expand an existing SQL file test case (e.g. if adding something similar to what's in there) or create a new one. I advise adding the original SQL raised in the issue, and if you have examples from the official syntax, then they are always good test cases to add as well. For example, the `Snowflake documentation has an example section`_ at the bottom of every syntax definition so just copy all them into your example file too. You should also use the reference grammar to exhaustively test various pedantic combinations of syntax. It doesn't have to be runnable. It just needs to parse correctly into the right structure, and be a statement that can get past the parsing stage of the database engine. The documentation often includes more simple examples that might not reflect all the real-world possibilities. While referring to the reference documentation / bison grammar, try to come up with a statement that uses as much of the grammar as it can! Be sure that you verify that the SQL statements in your test are *actually parsable by the database engine!* An easy way to do that is often to copy/paste the statement into the console and try running it, or use a CLI parsing tool that *uses the same source code as the database engine* (e.g. pgsql-parser). An error is ok (e.g. invalid column name), as long as it's not a syntax error from parsing. Check the reference section at the top of this document for dialect-specific resources. .. _`test/fixtures/dialects/`: https://github.com/sqlfluff/sqlfluff/tree/main/test/fixtures/dialects .. _`Snowflake documentation has an example section`: https://docs.snowflake.com/en/sql-reference/sql/select.html#examples YML test fixture files ^^^^^^^^^^^^^^^^^^^^^^ In addition to the SQL files, we have auto-generated YAML counterparts for them. The YAML contains the parsed version of the SQL, and having these in our source code, allows us to easily see if they change, so if someone redefines a syntax, which changes how a SQL statement is parsed, then the SQL won't change but the parse tree does, so by having that in our source code, and so checking that in with any pull request, we can spot that and make sure we're comfortable the change is expected. For most cases (except adding new test cases obviously!) you would not expect unrelated YML files to change so this is a good check. To regenerate all the YAML files when you add or edit any test fixture SQL files run the following command: .. code-block:: bash tox -e generate-fixture-yml You can also do the following to only generate for a particular dialect, or only for new and changed files, which is often quicker: .. code-block:: bash tox -e generate-fixture-yml -- --dialect postgres tox -e generate-fixture-yml -- --new-only It takes a few mins to run, and regenerates all the YAML files. You can then do a :code:`git status` to see any differences. When making changes, make sure to check the post-parse structure from the test output or from the associated YAML file: check that each query element is typed correctly. Typical bugs can be that a standalone keyword (such as :code:`INTERVAL`) is parsed as a function name, or that an element that should be :code:`date_part` is parsed as an :code:`identifier`. Typically there is no need to write assertions by hand, but it's the developer's responsibility to verify the structure from auto-generated YAML. One should not assume that everything is working just because no parsing error is raised. Running the test suite ^^^^^^^^^^^^^^^^^^^^^^ For the basic setup, see the local testing section of the `CONTRIBUTING.md`_ file first. There's a few ways of running the test suite. You could just run the :code:`tox` command, but this will run all the test suites, for various python versions, and with and without dbt, and take a long time. Best to leave that to our CI infrastructure. You just want to run what you need to have reasonable confidence before submitting. Testing a single fixture ^^^^^^^^^^^^^^^^^^^^^^^^ The :code:`dialects_test` is parametrized to automatically pick all files under :code:`test/fixtures/dialects/`. For example if you're adding or modifying :code:`dialects/hive/select_interval.sql`, you can test that with: .. code-block:: bash tox -e py39 -- -s test/dialects/dialects_test.py -k hive-select_interval.sql The :code:`-s` flag for pytest enables printing of post-parse structure, which allows you to quickly check that each query element is typed correctly. Same can be seen in the generated fixture YAML file. To run it a bit faster, you can invoke :code:`pytest` directly (requires that you have activated the project venv): .. code-block:: bash pytest -s test/dialects/dialects_test.py -k hive-select_interval.sql Running all dialect tests ^^^^^^^^^^^^^^^^^^^^^^^^^ The following command runs just the dialect tests, for **all** dialects: .. code-block:: bash tox -e py39 -- test/dialects/dialects_test.py The following command runs just the dialect tests, for **a specific** dialect: .. code-block:: bash tox -e py39 -- test/dialects/dialects_test.py -k ansi Or, if making a dialect change to fix a rule that is incorrectly flagging, you can just run the tests for that one rule, for example to run the :sqlfluff:ref:`LT01` tests: .. code-block:: bash tox -e py39 -- -k LT01 test Final checks before committing ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ For formatting and linting it's usually enough to rely on the `pre-commit hook`_. .. _`pre-commit hook`: https://github.com/sqlfluff/sqlfluff/blob/main/CONTRIBUTING.md#pre-commit-config Run all tests (but only on one Python version, and without dbt): .. code-block:: bash tox -e py311 I like to kick that off just before opening a PR but does take ~10 minutes to run. If you want also coverage & linting, run this instead (takes even more time): .. code-block:: bash tox -e generate-fixture-yml,cov-init,py311,cov-report,linting Also it should be noted that the coverage tests require several versions to run (windows, and dbt) so can report missing coverage when run locally. The rest can be left for the CI to check. Regardless of what testing you do, GitHub will run the full regression suite when the PR is opened or updated. Note first time contributors will need a maintainer to kick off the tests until their first PR is merged. Black code linting ^^^^^^^^^^^^^^^^^^ These tools are run automatically by the `pre-commit hook`_, but can also be run manually for those not using that. We use `ruff`_ to lint our python code (being a linter ourselves we should have high quality code!). Our CI, or the :code:`tox` commands above will run this and flag any errors. In most cases running `black`_ on the python file(s) will correct any simple errors (e.g. line formatting) but for some you'll need to run `ruff` to see the issues and manually correct them. .. _`ruff`: https://docs.astral.sh/ruff/ .. _`black`: https://github.com/psf/black Submitting your change ---------------------- We use the standard GitHub workflow so simply fork the repo, clone it locally, make the change, push it to your fork, then open a pull request back to the original SQLFluff repo. There’s lots more info in our :ref:`using_git` guide if you're new to Git. Once you open the PR CI tests will run, and after 5-10mins should complete. If all green, then a maintainer will pick it up as soon as they can. Have a good, easy to understand, small PR with all the tests passing, makes it easier to review so more likely to be merged quickly. Questions --------- Feel free to open up any issues on GitHub, or join the :ref:`sqlfluff_slack` for any quick questions to the community/maintainers. sqlfluff-3.4.2/docs/source/guides/contributing/docs.rst000066400000000000000000000122671503426445100232570ustar00rootroot00000000000000Documentation Contributions =========================== Contributing to the docs is one of the easiest and most helpful ways to help the project. Documentation changes require relatively little specialist knowledge apart from being familiar with how to use SQLFluff and the docs are read by a very wide range of people. Documentation takes two forms: 1. Embedded documentation found in function and module `docstrings`_. 2. The free-standing documentation which you're reading now, and hosted at `docs.sqlfluff.com`_ (built using `sphinx`_ and `ReadtheDocs`_). The two are somewhat blurred by the use of `autodoc`_ (and some other custom integrations), where documentation is generated directly off `docstrings`_ within the codebase, for example the :ref:`ruleref`, :ref:`cliref` and :ref:`dialectref`. To understand more about how the custom integrations we use to generate these docs, see the `generate-auto-docs.py`_ file. .. _`docstrings`: https://en.wikipedia.org/wiki/Docstring .. _`docs.sqlfluff.com`: https://docs.sqlfluff.com .. _`autodoc`: https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html .. _`generate-auto-docs.py`: https://github.com/sqlfluff/sqlfluff/blob/main/docs/generate-auto-docs.py .. _`ReadtheDocs`: https://about.readthedocs.com/ Docstrings ---------- Embedded documentation of functions, classes and modules is most useful for *developer-focussed* documentation as it's most accessible in the places which those developers are working: *directly in the codebase*. We enforce that docstrings are present and correctly formatted using the `pydocstyle rules for ruff`_, which we have configured to enforce the `google style of docstrings`_. .. _`pydocstyle rules for ruff`: https://docs.astral.sh/ruff/rules/#pydocstyle-d .. _`google style of docstrings`: https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html Sphinx Docs ----------- The main documentation (which you're reading now), is build using `sphinx`_, and written using `reStructuredText`_ (files ending with :code:`.rst`). The `sphinx`_ project offers a `reStructuredText primer`_ for people who are new to the syntax (and the SQLFluff project uses `doc8`_ in the CI process to try and catch any issues early). On top of those docs, there are a few areas worth highlighting for new (or returning) users, which are either specific to the SQLFluff project, or not particularly clear in the sphinx docs: * `reStructuredText`_ is very similar to, but differs from (the somewhat more well known) `Markdown`_ syntax. Importantly: * :code:`*text with single asterisks*` renders as *italics*. Use :code:`**double asterisks**` for **bold text**. * :code:`code snippets` are created using the |codesnippet| directive, rather than just lone backticks (|backquotes|) as found in most `Markdown`_. * To create links to other parts of the documentation (i.e. `Cross-referencing`_), use either the :code:`:ref:` syntax. * Docs for all the SQL dialects are auto generated with associated anchors to use for referencing. For example to link to the :ref:`postgres_dialect_ref` dialect docs, you can use the |postgresref|. Replace the :code:`postgres` portion with the :code:`name` of the dialect you want to link to. * Docs for all the bundled rules and handled using a customer `sphinx`_ plugin, which means you can refer to them using their name or code: |LT01ref| resolves to :sqlfluff:ref:`LT01` and |layoutspacingref| resolves to :sqlfluff:ref:`layout.spacing`. * Docs for any of the python classes and modules handled using `autodoc`_ can be referenced as per their docs, so the :py:class:`sqlfluff.core.rules.base.BaseRule` class can be referenced with |baseruleref|. You can also use the :code:`~` prefix (i.e. |shortbaseruleref|) so that it just renders as :py:class:`~sqlfluff.core.rules.base.BaseRule`. See the docs for `Cross-referencing`_ for more details. .. _`sphinx`: https://www.sphinx-doc.org/en/master/ .. _`reStructuredText`: https://www.sphinx-doc.org/en/master/usage/restructuredtext/index.html .. _`reStructuredText primer`: https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html .. _`doc8`: https://github.com/PyCQA/doc8 .. _`Markdown`: https://www.markdownguide.org/ .. _`Cross-referencing`: https://www.sphinx-doc.org/en/master/usage/referencing.html .. _`autodoc`: https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html .. |codesnippet| raw:: html :code:`...` .. |backquotes| raw:: html `...` .. |postgresref| raw:: html :ref:`postgres_dialect_ref` .. |LT01ref| raw:: html :sqlfluff:ref:`LT01` .. |layoutspacingref| raw:: html :sqlfluff:ref:`layout.spacing` .. |baseruleref| raw:: html :py:class:`sqlfluff.core.rules.base.BaseRule` .. |shortbaseruleref| raw:: html :py:class:`~sqlfluff.core.rules.base.BaseRule` sqlfluff-3.4.2/docs/source/guides/contributing/git.rst000066400000000000000000001364341503426445100231150ustar00rootroot00000000000000.. _using_git: How to use Git ============== Many of the contributors may not be familiar with Git and it can be a confusing world for those new to it with perplexing terms like *clone*, *fork*, *branch*, *merge conflicts* and *rebase*. This guide aims to provide some information to those of you new to Git about the best way we think of working with it it is and also serve as a quick reference to some of the Git terms, or commands to use. Introduction to Git ------------------- This section will give some basic background to complete newbies to Git. Feel free to skip to the next section, :ref:`using_git_for_sqlfluff`, where we talk about how we use it on SQLFluff if you understand the basics already. What is Git? ^^^^^^^^^^^^ Git is a *distributed version control system*. That mouthful basically means it's a way of keeping track of changes to our source code and other content - especially when many, many people are changing various parts of it. The *distributed* part of it is what makes Git so interesting (and so complicated!) - there can be many copies of our code, and that can cause fun and games when trying to keep it in sync! The original and primary copy of a code base (called a *repository* or *repo*) is hosted on a server (e.g. GitHub), people will be working on copies in their local machine, and people may have *forked* a copy of the repo to another one also hosted on the server - and then that forked copy may also be copied locally to your machine. Add in different branches in any of those copies and it can quickly become quite confusing. Git often involves working with the command line, which might be less familiar and a bit intimidating for those of you less technically minded. Graphical front end tools exist to try to replicate this command line functionality but it's helpful to have some familiarity with using Git on the command line and with a guide like this, hopefully that becomes less daunting a prospect! What is GitHub and how is it different than Git? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ GitHub is not Git, but it is one of the most commonly used instances of Git and adds various features on top of the core versioning of code that Git handles. The main thing GitHub gives you is a Git server to store your code, and a nice web front end to manage it all through. Using the web front end you can view (and even change!) code, raise issues, open and review pull requests, use GitHub Actions to automate things (e.g. test code) and even host wiki pages like this one. In this Wiki I've tried to differentiate between Git concepts and commands and those specific to GitHub. Other instances of Git that you might be familiar with, or use in work or other projects, include GitLab and BitBucket. They have many of the same features as GitHub. GitHub also have a graphical front end tool called `GitHub Desktop `_ for working on on Git locally and syncing it back to GitHub. Check out the :ref:`github_desktop` section for tips on how to use it. SQLFluff makes extensive use of GitHub to help us manage the project and allow all the many disparate contributors to collaborate easily. .. _installing_git: Installing Git ^^^^^^^^^^^^^^ While it is possible to work just using GitHub's website - especially if just comment on issues and adding your advice - managing the code really is best done locally on your own computer and then *pushing* changes back up to GitHub. Git is very popular and widely available (see `installation instructions`_ for Windows, Mac & Linux). You may already have it installed, so to check if that's the case, open a command line and type:: git --version If you see a version number returned then you've passed the first step! If not, then for Windows I recommend installing and using `Git Bash `_ which is a Linux-like command line. For MacOS the built in Terminal available under Launchpad is fine and running the above version check will prompt you to install XCode and Git. For Linux I presume you'll be familiar with how to install this. .. _`installation instructions`: https://git-scm.com/book/en/v2/Getting-Started-Installing-Git Git Repos ^^^^^^^^^ A Git *Repository* or *Repo* is a collection of all the code that makes up a project. Well that's not strictly true as a project may also depend on other programs and libraries, but typically they are not stored in the project repo - only the code specific to this project is stored in the repo along with config files that are used to install any necessary libraries to run the code and instead installed (e.g. using a command like :code:`npm install` for node modules). The main SQLFluff repo is available on GitHub at: https://github.com/sqlfluff/sqlfluff. However, we also have a few other repos for the VS Code extension and the like, available at https://github.com/sqlfluff. Git Branches ^^^^^^^^^^^^ A repo will usually contain a number of branches. These are copies of the code where you can work independently on a particular item. The name branch is used because, like a tree, these can diverge from each other - though, unlike a tree, they are usually merged back when the work is complete. There will be one main (or master) branch which everything should be merged back into when ready. Traditionally these have been called the :code:`master` branch, but many projects are trying to use more inclusive language and have switched to using the name :code:`main` or similar instead. SQLFluff moved to using :code:`main` in 2021. Creating a branch is very quick and is integral to how Git works. Git stores branches in an incredibly efficient way and doesn’t literally have a copy of the same code, but only differences basically. So do not feel like it's a big deal to create a branch (it's not!) and frequently creating small branches, and merging them back in to the main branch when ready is the best way to use Git. Creating large branches or reusing branches for lots of different changes is not the best way of using Git and will lead to issues. GitHub Pull Requests ^^^^^^^^^^^^^^^^^^^^ Once your changes are ready to merge back to :code:`main` you open a *pull request* (often shortened to PR), which creates a special type of GitHub issue which can be used to merge your changes into the :code:`main` branch. A pull request is really a GitHub concept and at the end of the day is basically a fancy way of actioning a merge in Git. Bitbucket also use the term Pull Request, while GitLab uses Merge Request. It should also not be confused with :code:`git pull`, which is a Git command to pull down changes from the server (e.g. GitHub) into your local copy. An example pull request on GitHub is shown below: .. image:: github_example_pr.png :alt: Screenshot of an example pull request on GitHub. In this pull request there are the following tabs: * Conversation - this allows you to give some info using GitHub markdown (including screenshots if you want). Reviewers can comment, and ask questions for you to answer, before merging the pull request into the :code:`main` code. * Commits - this shows a list of links to all the individual changes you made to the code. It's not that useful a tab to be honest! * Checks - this shows all the automated checks run on your code so we know it's good! These are setup in the main repo using GitHub Actions (or similar) and the results are also shown at the bottom of the Conversation tab for open pull requests. * Files Changed - this is one of the most useful tabs and shows each line of code changed. Reviewers should look those this tab, and can click on individual lines to make `comments or code improvement suggestions`_ which are added to the Conversation tab and the person who opened the pull request (called the pull request author) can then answer or address the concern (including accepting any suggested code changes directly into this change with a click). .. _`comments or code improvement suggestions`: https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/commenting-on-a-pull-request You can tag people to review your pull request, assign it to someone to deal with (not used much as kind of repeat of the author and reviewers), add labels...etc. At the bottom of the Conversation tab you will see the following: .. image:: github_example_merge_panel.png :alt: Bottom of a pull request with "Squash and Merge" and "Close" buttons. This shows on this PR that all checks have passed and this is ready to merge. Clicking the big green "Squash and Merge" button will copy (the "Merge" part) all this code into :code:`main` branch with one single commit (the "Squash" part). Usually you don't need to have all the 100s of commits you have have done while developing this code change so "Squash" is what you want but you can change it if you want. You can also close this pull request if you change your mind with the Close button at the bottom, or add a comment with the Comment button if you make a big change to it since opening that you want people following the pull request to be aware of. Please note you do **NOT** need to Close and Reopen the pull request (or even open a new pull request) when you need to make changes based on review feedback - simply pushing changes to the branch will cause any open pull request from that branch to automatically be updated and checks to automatically be rerun. It is expected (and a good thing!) to change your code based on feedback and this is very much part of the workflow of pull requests. GitHub Forks ^^^^^^^^^^^^ As well as branches, GitHub has the concept of *forks*, which basically means taking a complete copy of the repo (and all its branches at that time) into your own GitHub account. You can then create a branch in that fork, and then open a pull request to to merge code from your branch on your fork, all the way back to the the original repo (called the *upstream* repo). It may sound like an Inception level of abstraction and confusion but it actually works quite well once you get your head around it. .. note:: There is some confusion as to the name *fork* as traditionally that term was used when you wanted to take a project in a different direction than the original developers [#f1]_ - so you *forked* the code and never merged back again. In GitHub a *fork* is used to make changes outside of the original repo but usually with the intention of merging them back into the original repo once complete. .. [#f1] https://drewdevault.com/2019/05/24/What-is-a-fork.html Why would you fork when you can just work in the original repo? Well most projects don't want people messing with the original repo so restrict permissions to only allow core contributors to create branches in the original repo. Others **must** therefore fork to make changes and then open pull requests to the original repo for review before they are committed. And it's important to use the correct terminology when working with forks. Tempting as it is, the original repo should always be referred to as "original" or "upstream", and never "main" or "master" - which refer to branches within a repo. Similarly a "local" copy, or "clone" refers to the copy on your PC as we shall see and that can be of the original repo or a fork. Another extra bit of hassle with a fork, is that you must keep it reasonably up to date with the original, upstream repo. To do that you periodically merge or rebase the fork back to the original repo which pulls down changes into your fork. We'll explain how to do that later. Cloning a Git Repo ^^^^^^^^^^^^^^^^^^ To work on a project in GitHub you would normally *clone* a repo, which simply means taking a copy of it on your local PC. It is possible to make small edits on the GitHub.com website but it's quite limited and often doesn't allow you to run code locally to test it for example. You can clone a repo by clicking on green Code button on the repo's home page (make sure you do this on your fork and not on the main repo): .. image:: github_clone_button.png :alt: Screenshot of the clone button in GitHub. This offers a number of options: * "Clone with SSH" is the recommended way but is `a little more complicated to set up`_, but allows you to interact with GitHub without entering your GitHub password each time, and is basically mandatory if using `2FA for your GitHub account`_. * "Clone with HTTPS" works but requires you to enter your password each time so gets a little tiresome. Once you copy the SSH or HTTPS URL on the command line simply go to the command line on your PC, into a directory you want to create the copy in and type the following (assuming SSH):: git clone git@github.com:sqlfluff/sqlfluff.git You can clone a local copy of the original repo, if you plan to (and have access to work on branches of that, or you can clone a fork of the original repo. The above example command clones the original repo location, and not the fork location - you should change the git address to the forked version when working from a fork. After running this command you'll see the repo being downloaded locally. You can then branch, edit any of the files, or add new files, or even delete files to your hearts content. Any changes you make will only be on your machine and then you *push* changes back up to GitHub. We'll cover that later. Just like with a fork, you need to keep any local up to date with both the original, upstream repo, and the GitHub version. This is done by using the :code:`git pull`, :code:`git merge` and :code:`git rebase` commands. We'll explain how to do all that below. .. _`a little more complicated to set up`: https://docs.github.com/en/github/authenticating-to-github/connecting-to-github-with-ssh .. _`2FA for your GitHub account`: https://docs.github.com/en/github/authenticating-to-github/connecting-to-github-with-ssh .. _git_merge_conflicts: Git Merge Conflicts ^^^^^^^^^^^^^^^^^^^ When keeping all the different copies in sync you will inevitably run into the dreaded "merge conflict" - a rite of passage every developer must go through. This happens were you've changed some code, but so has someone else, and their changes has been merged into :code:`main`, so when you attempt to merge (either by syncing :code:`main` back **to** your branch to update your branch with any new changes since branching, or by attempting to open a pull request **from** your branch) Git will give up and say "I don't know what to do here - you deal with it!". In actually fact, dealing with merge conflicts is actually very simple. When you open the conflicted file you'll see something like this:: If you have questions, please <<<<<<< HEAD open an issue ======= ask your question in Slack >>>>>>> branch-a In this case someone changed the line to "open an issue" and merged that to :code:`main` (aka HEAD) and you've also changed it to "ask your question in Slack". Git is warning you that it has been changed since branching but you also changed it. You simply need to decide what line you want and then delete all the other lines (including the ones starting :code:`<<<<`, :code:`====` and :code:`>>>>`). Then :code:`git add` the "resolved" file to your branch. You can even do it `directly on GitHub`_. Merge conflicts get a bad name and people think they are scary to deal with but Git actually makes it fairly easy. It will also usually only complain if the exact same line has changed — two people working on different parts of the same file usually won't see any merge conflicts. Of course if you're both working on lots of the same code, across lots of files they can be a real pain to deal with - this is one of the main reasons to resync your branch back to the original :code:`main` branch frequently, and also to work on small PRs rather than big unwieldy ones! .. _`directly on GitHub`: https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/resolving-a-merge-conflict-on-github .. _`using_git_for_sqlfluff`: Recommended way to use Git for SQLFluff --------------------------------------- When working on SQLFluff you must fork SQLFluff to your own copy and work on that. SQLFluff contributors do not have access to create branches in the original repo. To create your own Fork do the following: 1. Click the Fork button in GitHub and wait for it to complete. 2. Clone your fork locally - for me (tunetheweb) that means running this command on your machine :code:`git clone git@github.tunetheweb/sqlfluff.git` 3. Add a link to upstream - :code:`git remote add upstream git@github.com:sqlfluff/sqlfluff.git` (change this to the HTTPS URL if not using SSH) It is also **strongly** recommended **not** to work on the :code:`main` branch of your forked repo. When creating a new branch you will usually branch from :code:`main`, so once your :code:`main` has extra changes in it, it is no longer possible (or at least easy!) to create a clean branch for other work. If you are only working on one thing at once, then using :code:`main` in your fork may seem the quickest and easiest thing to do, but you'd be surprised how often you may want to pause that work for a bit and work on something else instead - and that's one of the advantages of using Git. For example if you are waiting on a pull request to be reviewed, you may want to work on another feature in the meantime on a fresh branch, which is completely independent of your other work. Or perhaps someone discovers an urgent, and easily fixed, bug in the code that you can quickly fix, before coming back to the current work. See the :ref:`switching_between_branches` section below for more info on how to switch branches. Working on a separate branch to :code:`main` allows :code:`main` to be kept in sync with upstream :code:`main`, which allows new branches to be created more easily. It also allows you to merge upstream :code:`main` into your branch periodically more easily. It also keeps your history of changes without a long history on each future pull request. Finally, it also also you to completely reset your :code:`main` back to the same as upstream if you get it completely messed up, without losing any history in other branches. .. _resyncing_to_upstream: Resyncing your main branch to upstream ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ As discussed above it is recommended to use the :code:`main` branch in your fork **only** to sync up with the :code:`main` branch in the original SQLFluff repo. To do that you can do one of several things: 1. You can merge upstream :code:`main` into your :code:`main` branch - however if you have any changes, the this will add a commit message for each upstream change into your :code:`main`. This means the next time you branch and open a pull request you'll see a long history of old commits, which is a bit noisy and annoying when they were nothing to do with your old pull request. Of course you shouldn't have any changes on :code:`main` (because you're not using this as advised above aren't you?) so this shouldn't be an issue, but you never know... 2. You can `rebase` your :code:`main` branch to be based on upstream :code:`main`. This is a special Git command that basically makes it look like you just forked your :code:`main` off of upstream :code:`main` and then applies any changes you have made in your repo on top of that, so should lead to less risk of old commit messages, but it will still add commits for any changes you added. And again, you shouldn't have any changes on main so this shouldn't be an issue, but you never know... 3. You can blast away your :code:`main` and basically reset it back to upstream :code:`main`. This sounds extreme but is actually my preferred option to ensure :code:`main` is super clean and reset back to the same as upstream :code:`main`. Note this will (intentionally!) blast away any changes from your :code:`main` and you will lose them completely but, again, there shouldn't be any changes here if following above advice. So I prefer option 3 - it's the cleanest and ensures :code:`main` is exactly the same as upstream, and that there will be no long history of commit messages in your next branch and pull request. Commands to force reset your main branch to upstream """""""""""""""""""""""""""""""""""""""""""""""""""" You can use the below commands to reset your local fork to upstream and then push those changes to GitHub. .. note:: You should ONLY do this if you are happy to lose all changes to your :code:`main` branch and completely reset it as if you'd just forked it. If you have changes in flight on the :code:`main` branch then you will need to either wait until they have been merged, or move them to a new branch before you can do a clean reset of `main`. Check if upstream already exists:: git remote -v Add an upstream remote, if not already added previously:: git remote add upstream git@github.com:sqlfluff/sqlfluff.git Then force reset your main branch:: git fetch upstream git checkout main git reset --hard upstream/main git push origin main --force After this your should visit your forked repo on GitHub and check you get a message that *"This branch is even with sqlfluff:main."*: .. image:: github_fork_status.png :alt: A forked repo which is even with upstream. Creating and working on a branch ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ So, when you're ready to make your first changes, do the following: 1. Resync your local copy to upstream as per above (unless you've just forked it, in which case you can skip this step) 2. Make sure you are in the main branch - :code:`git checkout main` 3. Branch `main` to a new branch with a meaningful branch name. For example to work on an issue to add new Posgres Create Table you'd run this command - :code:`git checkout -b postgres-create-table` (note the :code:`-b` which means "create a new branch please"). 4. Make your changes using your favourite code editor (I recommend `VSCode`_ myself). 5. If making code changes to the website then test them - follow instructions in `CONTRIBUTING.md`_ to set up the environment and then use :code:`tox -e generate-fixture-yml,cov-init,py39,cov-report,linting` to run most of the tests. 6. Add any new files you added in this change that you want tracked in git - e.g. :code:`git add text/fixtures/dialects/*.sql`. You also need to do this to re-add merge conflict files that you have resolved. You typically do not need to add other files that are already in the repo that you have changed, as Git will track those already. 7. Commit your changes - :code:`git commit -a`. This means “save all changes to Git (for tracked files)”. If you only want to commit some files, then :code:`git add` just the files you want and then use :code:`git commit` without the :code:`-a` option. When you commit you need to add a message. Git will automatically open your default Git editor - usually :code:`vi` which can take some getting used to but basically type your message and then type :code:`Esc : w q !` to enter command mode (:code:`Esc`), start a command (|colon|) save your file (:code:`w` for write) and quit (:code:`q`) and force override any errors on save (:code:`!`). See the next section for an alternative method if you're not comfortable with :code:`vi`. And btw if you want to cancel the commit at this stage, remove any message you added and type `Esc q !` to quit without saving any changes and Git will see you didn't add a message and cancel the commit. 8. Push your committed changes to GitHub - :code:`git push`. If this is your first push of this branch to GitHub, then git will tell you you have to use a slightly different command: :code:`git push --set-upstream origin postgres-create-table` 9. If there are any changes already in GitHub (e.g. of other people are also working on this branch), then you will get and error and need to do a :code:`git pull` to pull them down locally before you are able to push any more changes back up to GitHub. 10. Repeat steps 4-9 as often as you want until you are happy to open a pull request to merge this back into SQLFluff code base. .. _`VSCode`: https://code.visualstudio.com/ .. _`CONTRIBUTING.md`: https://github.com/sqlfluff/sqlfluff/blob/main/CONTRIBUTING.md .. |colon| raw:: html : Committing changes - to vi or not to vi """"""""""""""""""""""""""""""""""""""" My preferred method of committing changes is to use this:: git commit -a This takes all the changes for existing tracked files and adds them to the commit. New files still need to be added with :code:`git add` but all files currently tracked by Git are automatically included without having to use :code:`git add`. This then opens the default Git editor (usually :code:`vi`) and lists the files for this commit, any files not included and allows you to add the message and complete the commit. If you close :code:`vi` without adding a message then it cancels the commit. However :code:`vi` can be quite a barrier to people as it's quite a confusing editor with a "command" and an "edit" mode, and needing to remember command sequences (like :code:`ESC + w + q + !`). For those not familiar with linux and :code:`vi` this might be quite a stumbling block. You can provide the message on the command line so you don't have to got near :code:`vi` using a sequence of commands like this:: git status git add file1 file2 git commit -m "Committing file1 and file2" This does require you to add the files or folders first so a bit more painful than :code:`git commit -a`. Alternatively you use the :code:`-a` and :code:`-m` switches together:: git status git commit -a -m "Committing all open files" The downside is that, unlike the :code:`vi` method, it won't show you the list of files it's going to commit, so is a bit more dangerous, hence why I prefer the :code:`vi` method instead. However, if you do a :code:`git status` before you commit, you should see the files that will be committed. Plus you can always revert changes if you need to. So, in summary :code:`vi` method is preferred but can be more complicated to those not familiar with it so can give commit message on command line but take care with it. Keeping your branch up to date """""""""""""""""""""""""""""" It is also recommended to merge any changes that have happened to SQLFluff code (in its :code:`main` branch) into your branch periodically in case it affects your code, and particularly important to do this just before opening a PR. To merge changes into a forked repo from upstream :code:`main` do the following: 1. Commit any changes in the branch you are working on. 2. Update you local copy of :code:`upstream` - :code:`git fetch upstream` 3. Merge the changes from upstream main into your branch - :code:`git merge upstream/main` 4. Commit the merge - :code:`git commit -a` 5. Push all your changes up to GitHub - :code:`git push` Or to merge from a branch on the main repo do the following: 1. Commit any changes in the branch you are working on. 2. Update you local copy of :code:`main` - :code:`git fetch origin main` 3. Merge the changes from upstream main into your branch - :code:`git merge main` 4. Commit the merge - :code:`git commit -a` 5. Push all your changes up to GitHub - :code:`git push` .. _switching_between_branches: Switching between branches """""""""""""""""""""""""" Git allows working on several branches at once. This allows you to work on something else while you are stuck on one bit of work (waiting for answers, or pull request feedback, or you just fancy a break!). Use :code:`git checkout` to switch between branches and use :code:`git status` to ensure all your changes are committed when switching between branches. For example, let's say you are working on :code:`feature1` branch:: git checkout main git pull git checkout -b feature1 # Make some changes git commit -m "Commit my changes for feature1" # Make some more changes git commit -m "Commit some more changes for feature1" # Push changes to Github.com if you want to (always good to do this in case your computer dies!) git push # Note the first time you push a new branch you will need a slightly different push comment: # `git push --set-upstream origin feature1` # Helpfully, git will tell you this if you try using just `git push`. And then you want to take a break from :code:`feature1`, in which case you should open a new branch - in most cases you want to branch from :code:`main` again, and not from :code:`feature1` branch so make sure you flip back to :code:`main` again if this is the case. If working on a fork, you should also check your :code:`main` is up to date first - see the :ref:`resyncing_to_upstream` section above:: # Check your branch is clean and everything has been committed git status # Create a new branch from main (note I've not included the resyncing of main to upstream here for forks) git checkout main git pull git checkout -b feature2 # Make some changes and push your new branch to GitHub git commit -m "Commit some changes for feature2" git push --set-upstream origin feature2 # Make some more changes and push those too git commit -m "Commit more changes for feature2" git push You are now free to switch back to :code:`feature1` if you want using :code:`git checkout` (note you don't need the :code:`-b` flag as that's only needed to create a new branch that doesn't exist, whereas just switching between branches that already exist don't need it):: git checkout feature1 And then switch back to :code:`feature2` later:: git checkout feature2 The primary concern with multiple branches like this is getting yourself confused! Using :code:`git status` here is your friend to double check you're on the correct branch and all outstanding changes have been committed. As with lots of things in Git, there are many ways to handle branches (including the :code:`git branch` command), but I'd encourage you to stick with few commands and use :code:`git checkout`, :code:`git commit`, :code:`git push`, :code:`git pull` and :code:`git status` as much as possible as you can do most things with those and it's already getting confusing! The other alternative is to just create a new folder and clone the whole repo again and manage it completely separately. This can be easier, safer and less confusing for those less familiar with Git if working on a limited number of branches. However it doesn't scale very well and is not the way you're going to get the most out of Git so as soon as you go beyond a second branch I'd strongly encourage you get used to checking out between branches. I do encourage separate folders however if you have different repos (e.g. your do some of the main work on the HTTPArchive repo, and some work on your own fork) as switching repo that a folder points to, while also possible, is adding yet more confusion to an already complex thing! 🙂 Opening a Pull Request ^^^^^^^^^^^^^^^^^^^^^^ Once you are finished making changes, you should take the following steps to open a pull request back to the original repo to accept your code into SQLFluff: 1. Merge in any changes that happened to SQLFluff code since you branches (see above). 2. Run all the automated tests :code:`tox -e generate-fixture-yml,cov-init,py39,cov-report,linting`. 3. Make sure all your changes are pushed to GitHub. 4. Open a pull request in GitHub. 5. If the pull request closes an issue then you can add "Closes #123" or "Fixes #123" in the first comment and GitHub will automatically close issue #123 when the pull request is merged. If it doesn't fully close the issue, then you should instead say something like "Makes progress on #123" which will create a link on original issue but not close it. As mentioned above, you can make more changes to your branch and push them up to GitHub and the Pull Request will automatically be updated. There is no need to close the PR and reopen a new one. Actioning feedback from pull requests ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ When your pull request is reviewed, the first bit of feedback you're likely to see if from the automated tests that run every time a pull request is opened. They take a few minutes to run and then you will then give you a satisfying green tick, or a scary red cross. Merge conflict checks """"""""""""""""""""" The first check that GitHub itself will do is check for any :ref:`git_merge_conflicts` and these must be resolved before the pull request can be merged. If you merge :code:`main` to your code before submitting a pull request, then it's unlikely you'll get any of these (unless someone's got in real quick while you were opening it!) which is why it's recommended to do that, but other pull requests can be action while yours is being reviewed so can still happen. Smaller pull request, touching few files reduces the chance of this so again, where at all possible, break up changes into smaller batches. Linter checks """"""""""""" Another check that we perform is linting the code in the pull request. This runs automated checks for errors or code styling and formatting issues that don't match the code conventions we use. Python code is linted with `flake8` and you can run this command on any files to see similar linting issues. You can use `black` to auto-fix most flake8 issues, though some need to be manually addresses. Code review feedback """""""""""""""""""" After the automated tests have passed the code will be reviewed manually by a maintainer, or another contributor. They can ask questions, or make suggestions to change the code. Look upon a code review as an opportunity to learn and improve your code. Feedback can be tough to hear after you've worked hard on code, but stay aware that it is meant with the best possible intention to provide feedback to you for this and future commits, and to keep our codebase to a high standard. It is not a personal sleight upon you or your code, and if you are getting annoyed with feedback I suggest you take a break and step away and read it again later, in a fresh light. Of course if if you feel that a reviewer is acting inappropriately then please raise it - we have a `Code of Conduct`_ and want all contributors to feel welcome. Feel free to also reach out to a maintainer if you would like to discuss something privately. When a reviewer makes a code suggestion you can accept it right in GitHub and it will automatically update your branch. As this happens in GitHub directly, just make sure you do a :code:`git pull` next time you are working locally on your code to pull down these changes. It's quite slow to accept a lot of changes this way, so if there are a lot of changes then sometimes better to change locally in your favourite editor, and then push one commit with the fixes, and then mark each of the suggestions as resolved. Any suggestions on lines which have changes since they were raised, will be marked as **outdated** which makes them easy to spot. Reviewers may also make small, seemingly pedantic changes - usually they will include the word "nit", to indicate this is a "nitpick" in these. Like the linting, they can seem needless, but they help maintain our code quality. It should also be noted that not all suggestions may be valid! Reviewers can make a mistake just as easily as the pull request author (more easily in some ways as they often will be reviewing based on reading the code rather than testing it). Feel free to push back on suggestions when you disagree with them. However, it is best to get consensus between reviewer and pull request author where at all possible so explain why you don't think you should make the change being suggested rather than just ignoring the suggestion or resolving it without a comment. After addressing feedback, please re-request a review by clicking the little two arrow icon next to the reviewer name, or make a comment (e.g. "All feedback addresses. I think this is good to merge now."). Sometimes it's difficult to know if someone is still working on feedback and a pull request may be left unintentionally, getting out of date, because reviewers are not aware that it's good for a re-review or merge. Only SQLFluff maintainers can merge pull requests, but **every contributor** can review pull requests, and merging becomes a lot easier (and so more likely!) if someone else has already had a look through the changes. Please, please, please help us `reviewing_pull_requests`_ to help spread the load! Draft pull requests """"""""""""""""""" It is also possible to open a draft pull request, if you want early feedback on your code or approach. Please remember to convert to a full pull request when happy. Additionally if making a large number of changes (for example if you need to update every dialect), then best to do this in only one or two files initially, open a pull request (whether draft or full) and get feedback on your approach before you spend time updating all the files! It's much easier to review code changes if they are not drowned out by lots of identical changes that needs that code in the same pull request, and much less likely to result in merge conflicts. Then the rest of the files can be added to the pull request, or a separate one opened for those (maybe a new pull request per language if you want different translators to approve any changes). .. _reviewing_pull_requests: Reviewing pull requests ^^^^^^^^^^^^^^^^^^^^^^^ As mentioned above we **strongly** encourage contributors to help review pull requests. This is a voluntary, collaborative effort for us all and depending on one or two people creates a bottleneck and a single point of failure for the project. Even if a review pulls up nothing, it is important to approve the pull request - this indicates that it has been reviewed and is just as useful (if not more so) as commenting on code or making suggestions. Do also be conscious of how feedback will be read. We have many first time contributors who may not be as familiar with language (either programming language or English language!) so do try to avoid technical terms, colloquialism...etc. Though we are aware of some very commonly used acronyms and terms (which we've included in our :ref:`glossary_of_git_terms`) like LGTM ("Looks Good To Me"). Do be mindful as well that pull request authors have voluntarily spent time on this and we wish to encourage that and foster an inclusive environment. Offensive language is forbidden by our `Code of Conduct`_. Do remember that a code review is also about reviewing the authors changes, and not about showing off your own knowledge! Try not to get side-tracked but instead raise a new issue if you want to consider something else that comes up during a code review but is not directly related. On that note, do also remember that code can be improved incrementally. Small changes are much better in Git. So, while it's OK to point out a fuller solution do remember that `Perfect is the enemy of good`_ and accepting a change that is an improvement and then improving further in future iterations can often be better than holding out for the perfect solution that may never come. Of course that doesn't mean we should accept code that regresses the quality, or seems like the wrong way of doing it completely! And finally, we strongly encourage positive GitHub reactions - particular for new contributors. They give quick, encouraging, feedback and add a playful, fun tone: .. image:: github_reactions.png :alt: GitHub Heart and Rocket reactions. We discourage the negative ones though (thumbs down 👎, or confused 😕). Better to add a comment (politely!) explaining your concerns and then if others agree with you, they can "thumbs up" your comment. This keeps things on a positive tone and also means your cause for concerns are fully understood. .. _`Code of Conduct`: https://github.com/sqlfluff/sqlfluff/blob/main/CODE_OF_CONDUCT.md .. _`Perfect is the enemy of good`: https://en.wikipedia.org/wiki/Perfect_is_the_enemy_of_good .. _github_desktop: GitHub Desktop -------------- GitHub Desktop is a Windows and MacOS app that provides a visual interface to GitHub. It reduces the need to use and understand Git via the command line. This section will provide some tips on performing some common tasks via the GitHub Desktop Installing GitHub Desktop ^^^^^^^^^^^^^^^^^^^^^^^^^ First make sure you have Git installed. See our section on :ref:`installing_git` for more details. You can then download the install file from https://desktop.github.com/, with further instructions from their `Installing and configuring GitHub Desktop`_ document. Your main tasks will be to `Authenticate with GitHub`_ and `Configuring Git for GitHub Desktop`_ so that the systems know who you are. .. _`Installing and configuring GitHub Desktop`: https://docs.github.com/en/free-pro-team@latest/desktop/installing-and-configuring-github-desktop .. _`Authenticate with GitHub`: https://docs.github.com/en/free-pro-team@latest/desktop/installing-and-configuring-github-desktop/authenticating-to-github .. _`Configuring Git for GitHub Desktop`: https://docs.github.com/en/free-pro-team@latest/desktop/installing-and-configuring-github-desktop/configuring-git-for-github-desktop Cloning the SQLFluff repo ^^^^^^^^^^^^^^^^^^^^^^^^^ If you have not done already, you will want to clone a copy of the https://github.com/sqlfluff/sqlfluff repo into your computer. The simplest way is to follow `Cloning a repository from GitHub to GitHub Desktop`_ where you go to the repository on the website and select "Open with GitHub Desktop". This will open a window where you can click "Clone" and the job will be done. .. _`Cloning a repository from GitHub to GitHub Desktop`: https://docs.github.com/en/free-pro-team@latest/desktop/contributing-and-collaborating-using-github-desktop/cloning-a-repository-from-github-to-github-desktop Navigating GitHub Desktop ^^^^^^^^^^^^^^^^^^^^^^^^^ Once you have cloned repositories you will be able to select them via the "Current repository" toolbar button, just under the menu on the left. By default the sidebar will show you what edits have been made to the repository, and the main section shows actions you may want to perform. Updating your repository (Pull origin) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Over time the original repository will get updated and your copy will become out of date. GitHub Desktop will highlight if your repository is out of date, with an option to pull any changes from the origin so that you have the latest versions. .. _github_desktop_creating_a_branch: Making your own edits (creating a branch) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ You want to create your own branch before you start as you very likely do not have permission to edit the SQLFluff :code:`main` branch. A branch is a way for you to group your own edits so you can later submit (push) them for review. Then, when they are approved, they will get merged back into the main branch. Before creating a branch, make sure you're currently on the :code:`main` branch and it is up to date (see above). If you click on the "Current branch" tab in the toolbar you will see all the public branches in play. To create your own branch, enter a new name in the textbox at the top and click the "Create new branch" button. Publishing your branch ^^^^^^^^^^^^^^^^^^^^^^ At the moment your branch is only known to you. If you want others to see it, then you need to publish it. GitHub Desktop will prompt you to do that. Once published you and others can select your branch on the GitHub website. Editing your branch ^^^^^^^^^^^^^^^^^^^ You can edit the repository using your favourite editor. As you edit, GitHub Desktop will show you what changes you have made. Note that you can change branches at any time, but I suggest you commit and push any edits (see next) before you switch as things can get confusing. If you are working with multiple branches, always keep an eye out to make sure you're on the right one when working. Committing and pushing your edits to the web ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Every once in a while you want to store and document your changes. This can help you or others in the future. You also have to commit before you can share (push) your changes with anyone. You can quickly commit your current edits via the form to the bottom left. Once you have commits you will be prompted to push those commits to GitHub. I typically do this straight after committing. .. _getting_your_changes_accepted: Getting your changes accepted ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ At this point you have a branch with edits committed and everything pushed to GitHub. Once you are happy with your work, you want it to be reviewed, approved and merged back into the main repository. For this I switch back to the website, as it is there you will be communicating with reviewers. To get this stage started you need to create a pull request. Go to the `SQLFluff responsitory on GitHub`_, make sure your branch is selected, then click the Pull request link and follow the instructions. This will notify the reviewers who will help you to get your changes live. .. _`SQLFluff responsitory on GitHub`: https://github.com/sqlfluff/sqlfluff Keeping the forked repository up to date """""""""""""""""""""""""""""""""""""""" The main branch of your fork should be kept in sync with the original repository (rebased). Especially before you create any branches to make edits. Details on how to do this are in the :ref:`resyncing_to_upstream` section. Making your own edits """"""""""""""""""""" This is done in the exact same way as before (i.e. in :ref:`github_desktop_creating_a_branch`). Create a branch from your master (make sure master is up to date using the above process), publish the branch, edit the files in the branch, commit your edits, push back to GitHub. With a forked repository the process to get your edits accepted is about the same as before (i.e. in :ref:`getting_your_changes_accepted`). Go to the web page for your copy of the repository and create a pull request. .. _glossary_of_git_terms: Glossary of terms ----------------- This is a list of terms to those less familiar with Git/GitHub: * **branch** - a copy of the code within a repo, where you may be working on code that is not ready to commit back to the main branch. Note Git actually only stores differences so it's not really a copy, but that's just an efficiency in Git and you can consider it a copy to all intents and purposes. * **fetch** - a git command which downloads all changes from a remote repo (i.e. a server) to a local one. * **fork** - a complete copy of a repo and all it's branches. * **LGTM** - Short hand for Looks Good To Me - typically used approving a pull request. * **local** - a complete copy of a repo on your PC * **main** - the primary branch of SQLFluff repo. Some other repos use :code:`master` for this. * **master** - an alternative name for :code:`main` branch used by some repos. * **merge** - to copy changes from one branch into another branch. * **merge request** - what a pull request is also known by particularly in GitLab an alternative to GitHub. * **origin** - the server version if the repo (the opposite of local). * **pull** - to fetch changes from a remote repo, and then merge them into this branch in one step. * **pull request** - a way to merge changes back to the main branch. A pull request is a special issue type that allows the potential merge to be reviewed and commented on before it is merged. * **rebase** - to bring a branch up to date, as if it had been created from now, while maintaining the existing changes on top. * **repo/repository** - a git project which is basically a collection of files - which may exist in several branches. * **upstream** - the original repo that a fork was created from. sqlfluff-3.4.2/docs/source/guides/contributing/github_clone_button.png000066400000000000000000003046431503426445100263420ustar00rootroot00000000000000PNG  IHDRDs giCCPICC ProfileHWTS[RIhH ҫZJ AŎ.*vŊVW@ւ/Tu(*oB+̝/;;VO*Eȓ#BXcRXv@ \/@U'W|qg<JrrTgC'BRTxg~x-i<, {gHb Z x˛A})0ͬe bU^B˥gi*|F"ʙ4;%1ZC#J)"T1_΁L](!DLq8bZ)n"/:dվLd~(rjoDB>Y$JL U89bM9 QjE"ǸL x$"De+̔ǫKbDbn/%Fꃝㇹ`-B ;iP>&z 04L;\(IJPīTinZF(y = xr\*x .Q'^_ P&l n넿T#d ?"PHBG_Y dO!Q Vϒ zKO #wl|o.l6dՌb#Kk@F %FÉ MJxDNh#ܞ(.(퇫k}-phuhgF ~x Y:neUX?[߽ ŅRP)v?tQŚ1Xoȏ9U_5 v; ;c#J<o@;}*+)wqp+N)Pn<$T8KTbïŕ𝇱\\P~kT_oyo)$q_B 4C-v07A0#A,H`s 9`X 6-` :pp :@ 3qD$ Cx$IG @#s2d9ٌT#"9ҰLLJr ۃ5|k:8g, H< L|߁'C J ?0EL(!NNxO$D[7܋l4"z^b#M" IR,G* ֐vI=d ٍNN#KrNQ3r/EbMR%eJ;CP9 )=[ _b4jf` Kgeleblzzezu=WocbL&\ϼ4d{p!{\`AР`uO,0eup#F627(hCӌ_4615019ai4 66]izԴÌah&6[ivKf*X'Y] I{-[R-},3-WZ6YvYYnUcuǚbc-^m}M|:\"{vt |*kD{-ȡ#(v\:0wdXհN4'SSCgsssVӆ/~fWO\.w]u]G6qspU]srw!q˓9sg/o/o+tu7}||%=˯o_N9;!u^怶@V`z ^PUУ``Agl{v6{UK,`g1 - m K [ "<+&+3bZDc$!2*rYM ϭv9c(ZTBڨGѲQ訑Vc#l~M7rxg v&O I\x7.IԔ<.:CJh1s!(UZFJKNۖ=6l쪱<Ǖ1v&MȝpdDy*^w7c]F_)t˅22g> Z! :Zrbs#Jr$''N2U(-Eɶxy}<_T)~R<, ,,왜<)):L]8YQx/iiMͧϙp{̌M,g͛>;b999s.//~77enCM w}aÕG,9J=:h߱cݍY7Ml{b̉k'Gl>u'ΰ;p9suŃ)hwz|+4bWAAD([B P:͝TVFu{R4x@ lڴI^ukUM5mL1lx}U,-eyOhȇLPQ TTՕ* ԓJ6)͛uY䙣Gݷ;4'{AEbVhEJqFBU֮ ҆ d7l@ lQ6/٠ eI4?O:= EBTUQ!l3/_^faTb#mBEΠss݃ ʹ[Kwڵ W 1aQN,%%%xZ,L/90iI( A UB AI7 ,%6RɺM2 ^s3EI Px^&WGFWuש*:!(?e6$~x/" ;B4?gɱPUCv?cg/Q. >@/k e$@"JfLtE%4a#d(qr\u'gbߗ<0dÿ*?V`\B 3Eb;~ri( i!vU L"r*Umz:6}I"l|=CIr498( |X?s3Xƫ(!@)R*P)rGUCuU(btj6}I" Lp\<姝`. (uI_*8^E$mKmCLʂ@>Ϻ~iߋgE0L#&O@?sLBK[y)(^lɮ*Q Q0 TRY6P?V%(O2Yz^O&9 Bo r. #P: =̟dK?@2ᓾ~@p9 jAsA,@ *. X AT7]&A     @n2     L( bA@@@@ Qdt@ … iɒ%~,XH/*oyf+Ӽyi垶O4^x%9k'C^  kNySi[Q-QʕRg>W_tګO:VZMv)xhqq4{:d0ya& |VUZ5# o_~2D=+Wn{gq&O=C>2yi?d]G27n$/;ҔxEf& ۢ+ӒWo@ׯ+|I3ޮ]TӖz?S.F?裔Dtm=]koj-Q;khRhZ\q[,VE0 Sg 8qbz쉧<fѶ߁υ]ͪ~7l@|vիU75[QTTDN8mڴE v ~s >f`8!)9REin\GB*ڷ2-stٗYrq%:~eݖTjz1M^}6[*ItO.XC6iԜ-4eԗ_}-ƍ{^yU\wݵ+]UI .{xv' :n UPю{>ˍ+<^އxQt7>mNóp~/]`l t λ#?6`|W;4xqAcqUW`E9>/'|j^Bgh׾f$ժU&5N.~מ}svXY k㥻6l 7LSuc4qT:h& ]!AT:Q G(ߓNI[[4kj[~=g{ijOa=/R&ò+H~sλ rѻo?ze IPHHL3y l?~2gOzOXDKR3OwgHYIː9}!t~1yyhF+ͷޡ7~NJI۳Գ@΄lSK^=3h?"ˮڰcd5ԭ?{RR޸MxNZw>;\zI1d!5-?tʇGBZ @e.L@>g;~tjSYgxbCz[姱dQ~y֩fğwrWߌ2_/8=w=|,2(f@3>1Bn6J;R /O0<覘#ܲG&ޘ]K Ui:{X6=^J:HH'kJn3K)]w 5bHs>bQgۈ!ԑO(bA #~zjl [qq{#8q<3B|NV?ބZ#,S&0|Zũr>]^6KXhǧ{o~% ꢞ+L;5mD͕gwx-Mbx XU'=|jv~Y{ƛ)JeF3.%xTIvZ"fF왟oBNSfWd=J'ըT]u8x!Գ<A $ g(h;o~B*DA[hI8m۶1أ~qOɞ6+IG{'P?(O |4N;uu]t6%wY#E5:R}DwMkجoL2X`L? 'GȨ."(jM[ nv׭R_NHÂbgq0R;xf" zn{8@@ ]@s&/>'O2įgs V`qw5l`ʳGb WڑbeWT|͚5)CKWmēOx^J`OFzvq?|vM4~OTqAIO(G,W 7&H*++֯2 φM3Į3KӾ^+OVNan֊yI*xDU`H>.YOA '<6@/rkm'>ny/}ѢhοQju8eNw͕vkgTԽ{#4;NDp#I}$iAԢysOq6ZaxoC~Yo/L K/xw>/)$mEի`RiE\b e#nUwob{ū$Nivw7dsgS>gan ~Z$NKŅ"hB% @.]v2'Rs=+ffܸqvmۘp-N{Ϙj.J;9{5ߙF{bd1!CZ*2ҿTixņ8[PyFO:m'f\:>6{>0b(q8e.qV Gy|wSԲSG{Ҕ@OꦽXiӧlڴ^}MV1<3gΔKIw켃DŽ}IJN*Ø_v˩C31jLC |:C;+}>K~{wW_+bGUٺ;xcf)yX"eյkםM됃29xgK3}uXיg_u\eʲC/ߕ&yG̬'У{ܦ@Oӎ`J:3%s1y˭R2d(O~.|@Ə/7 {C_LܰA{x9}'`udg!pA/'Wtx}6|*Ш"Ѷ{#;vٱ֞l#!q{甭SC %bEP@\ҿ}wfR{1݉tܪYμңqn;믥oUFl}$@֪YC#/n}9~lĒOjٲ9>gP438z[Xl#s_8{AoRE--Ij>}|>z_?PGKN=Ƴl"";gu[~ב]R2=ctig&2ƒ;RY^;3 yF3tctcMVG/ lrn| *O5ȝEGOTg[n]&Oc/@kGCû?I+U{=JkҼb{)ho>6{:UjW'E|E&dklA,ix3L3D s Plт} No# C.nF 0{xvmb=y(q,᱈c]OS,]DcN2rwg?(0_Y|<>D/4 >J=~)f|Imc/_j\5&Ow4-[9/ȕF;deD?fEf9DNѩc%N;͝G+{ZwA"pXn-5i܄q:I|̛?֯[/V}shG?]?Z6b]Jˇ*|'yvuh֊휟cfsܬfcڪnkU-^&/NKDt%mܼQr_eu>}6?-{,߬Y(?'^뤕_.qYel& VA̠` /`:Odj@Kp432AQp0) \DuBDPw?;"+r9tT|"w}Mj@y( *z    Z&ztqm.eU\ h!r^^\V SrMt [_SškvP~5նoޓRkdqQ̔7/QЉ—ޝ4" @@@@ c[8JZ+!/X.QP܇~P?@Ht<.RB,Q" +cT1XFk +k! w<֥*뉭tbW:u| PF Zl9^6lH|h;lذAkD@EhUx2SY"D jժedّ1I[ĭ]֭[oȱR5A!+fv*/i8 eѝOi藏H>dEQQ%u[5蠲" ,fg}A8fhhiYd23sd烿 41C<Tz]◮v:pnԦ^ ՗c[W<;D^A':l_KRTػaH?|(!\Ȕcxb(?Fj6/" ;@?g!^4Gz*ŐX*3C~E<Ԣ(hJ:tou&YۺH霮pc|dO%-4dUndf*dۮe;S(Yuavضcl{4-bA@@@@00^!vlQcS-tm/t.gA<(a`ٴDw0 -:c޲deUtp7b3X\mplXcZ!y"b @@@@| Z#Θ>14',"Đ.l"k,Os1gP?[&ЙtBw40ܿx{x\ۨrtsԠ\7aA&FtaoY;\Qnϓ+YخBa2;N.;[^Pۧ}X    yA vj7;hO ˳?XsvV]-x@݁N4]㱮k_۵[Gî(rm a E "0l8v-KL@ yk$\4z k2WئLNzLcGkSrsc@@@@0D=]%*4C(U6ҧ9X!V:GVr^ЉRp+t?]ڦnΕ-QWv,|vK}~oY_y1 {;z5L_Qk6s?|eEw)k]${+ !=HDZH:UYj߬W29j6-Fzv>TuWpbE-JN-}0De5ڒu蠸j/g3nY~ǫy|͕V-kVX+    I `P) ,OK i(ue&b-0k0i2g8{đNꄍ豰k[ʡ۠pv "wȝdEyY қc\~7X.W傪:F_E1nu{-\;1    Lg.]20f(3Fz\FvKYwD]$a0= Vm1cp1-#{,)En7i굴nڸėW W?C6ѼEKH? JJTzU]:UX#)8 5GI`<=eMDŔn_cWd,&`CrK OgyʗADy]gZy[[B     PNҨE֩U]5HQę>Pk$!UWZ'Vk(ʈ b`CF SJ+TK8a+i¥f&^TF5NQŊ1bn%     Qƍ*5|9>g,D5G<>WGMeF9aeQD†\LÜ2!Np9uH.<-mC$,(YcէJB B]#ޛ԰8[ 5OE6dgDɋ!n7BCNa坴ebfh΂%-w߮aR''כz])bgO40S-]׀Up )ξmذA I     9EQ:r΍;DW>Q "ٝhqr^fev|HN~v=Q_`'^&yu_Ho]4@:Y1?J#(~}i$(T(ru Dvdj=v籼GXQ_PYݿv!76ƧS/5"xҖ\îOndP$qWUS@8^y,(˳niu?\OHrSʫ@r[rLȀXQ&KdGo\PwUfؠD4M$Babo̭VUX5,!]( P$4"!beMW[t km=""BUh cSU%1ʞVneQ!%Ⱦp@s     c{3w3N*~DjBZIevH)5o#*aJ$>zX!pd ,#ٮ>{b@@@@@ !"U*wϳH=!.VljB]Fם)1%wŐ1"ITEybHfsMx@@@@@'BtW(n )(eo?1(vVx;;u,ۋX\#Wi3kXT(ԅ+{$R[24;eTu#4δX@@@@@ aWh[zB0|"gt \ϖF2I (UmdJ(Z*'#N1aJkQtoGH氵kAWlI,2YmMT^{>OAg"[Tی]x@@@@@r۳uυ(6&V2ɗ-K8k3DnA:}y8.W ]*Lym 9wI7Ijk O[ty5-AXֵUД1ŔG#w$s%"U     yB@yotk-F :`ᦛVTȷ2`Vt&BHv3rj0-@@@@@ oi;LakMu̫sUXDڨ?cdnhXFx{/詴     c{ -\|{to8YdyeDg DccFĦ<%1cx.i*'N@@@@@ DnӃgfbBʂXms-}vǨWmZP'nFiRD:l      xL/( AXߔQ_Sp)|-v8Vb|r9Evd./"@@@@@ cz6PAZBVJx9La-xL>$LE;;lL     /R7=] l.g6$kWʣSwݩDZGHZ0:c|n_7ISN8ɊS5qDB(Ԯk27O0G%UdAR)c/_1?-F.t_*7F|eJO㎤?/f!;fHiOŘ8#DB]Z]D, Q/`;TH*StK"~M4Y6ugzKڴ۰amܸQOgUӶ     XbQ6bOtںN4|#(i{X(-xϢEpQ!hOK!t9ʮb8wJHpts.NpJtŕ,l3[=*Z(oZ:s7fծUKv2׬exe焉4w<ͷ?`1s~"u hnwP216IWŚu& ,S]*ngѻt\tǼU!>UVU Q:F1hsOQ6=&M:7n a{Gzv~c&ήSǕU߿ͨ%uAL[u"f'nP"Y%Td.j疡wu?}eޞTWh޼9oWzu#teWШߙG{NNƳ`Bq87֭ZɺM͙/,zC-[&&N$ز];T\Øt,"Whќ;NӐqH40ozlpMN?,:wz3l|%gB:ԓI@(~wveqgғ?d#1?Kv<[۴l)F9ĐߌeâVPy]! 0a?)Yׄ9)lTW"L<4Eeg&1=3Mi\ |g=vU~vn[Z85<s1=Cǂ醛Ӎ7>& C}a!6Ŕ~̏\W㥄njoPāO?7Q/;>J 1ӣ]AK]qevJٶ(f&r=MUUA EZ)&>~CO'k+|)["۱T"Y\>^֮mko=i/:uy':CM#yc˯=6}8c:hw=qvoMM*>S>Ic[^Y3lc 8,|l!6>JҳIv:jL*9jHHA,O=P9T'?0OpdpZ.~`ټ|EK|#;0dg_1oPǭԆz j9"gZG~(U>~-]z ϿD Qf!7{v7Q6N8 kh߈eLFx@@@ -J53~X|L>Ua25Čvn\tI! #dJTИr:OT:S]Yu?h*)ҳE&2G=|3n%fX>mC1 ,zfg}\0*QB_j }=tkmH?7cK v$%ܚބ4T+vR%\wH%e1eѶ:|lc?Ѓ=aT\K8v9&/GeV-[z▊?jժҟ&z8kߴkםc!@@@2A`{'Ա^Bz_Q&엶 5UCvLP{H=.B7Z6ٗf{1gxנW)OZy}AӦMĦ7؎smZ Lb9ゅ^['jCH(GlOD4n#(x/rgĹtJ5ELD+A S>rJ,< {$գc#h`.AbH+*V^SC4IS7Cgkڤ\W\GGE-y[v>\o'   eK+$,tRӓ*gV˄؜@\͘?u.f2C7)!_FlLBFeGɏ4~,_"S|t7VWVC-=Uұ5k}!)6SN5k`՟55[|v{28۝xv/o9խS[V&<    ``A4qzU,e؟3e2УNJIdT%8BU'_*&Tmȓ.s~bVn/!P(8u,QV-~ 6ټtJ9g'J26>KXn?7jϒriƞ:XoOf@@@@ ]v ձ{(6%sg#/s)cU5Wfj O%n$ٜmJz/f CRSVFw?дJu>= Neݺut]蠼W7y'2$pzRO,}~'~x=#h1&,Y*gz=ϿsA =&">$X%YsLVnLieRO=s3;fwS:ԓޠYf\hCQNor8q2v=1'9\S.#^ydcӺU+:évZ⥨Si-C1:,m6:I"n->N7^Iujq3g2soB[4kv0   P ^ >.[GbܥuڝR,t51)9$}4 [f Ԅ,&PjUc`:TD睝Q|t6gコiğ5jxwЍioƮw6NuM>C|?vچ4<{ !g8ď?    @0gq{ꙧ$8|HB㎌SIzg:s*C$kMZDd;%'d~+Þ$:6D|7\sE:ݽwN;\~=zb/~1/i[Ӯ@@@ nXWm7]OlɫN!^w+z'>CݮT)o'4ߢs@ lقnr3*bai^$p!^o/(J',~qr>YƞBMАMJ4)ج8oXϊ']dYg;YeH]f[,u[ev-]X4jP.kܸQF‡( 6JY [3Zel͚54Wl͛=F*/0^<Z-9t^3Cիwޡ&MnJ18(@&^qdEq7s20GDJ)$ǽ;WGّ*Q9#;p\D;n%gIJbT    1|rͷEn;xVgv,^}8ʉk(7>_H˃w ^sD6 b/7,xV'n"a2W_ʙvLT;&&cH؜ 3AVUƘ5%a"x6:4imҚ)-,듍.&@)%m,`:Rb;(\Z&樱dA;1u=Db9P{rJ{T2lWYDd2]fύ=Dm@@@@% _ps,w=C|v&g̵l"g$c3Zk9Wsj0uxHl:D9I!RF G-n^B b lZ_yW|„IeqEj5c]^ǧܲen D' J8lYx̒9aJ"tz)6a @@@@r/y {P#ek/J`|Q2D^uw&,@y mS(@@@@@  keۅ2Df ZDZf c N7Tpt ~n2]ch͐jrD| >8$@>k8}1AD 4rM\^t@@@@@ QkDs     Q @@@@@\ *׷"AEi     Q@(:H( E@@@@@@EA@&ATo/:     E(@@@@@5r}{9(DQt     P @ۋ΁D 4rM\^t@@@@@ QkDs     Q @@@@@\ *׷"AEi     Q@(:H$zjZvm](D ?LM%%ԴIUfd/KQթd^Ξ='?Bj׮*/'ZlAUTEΛGSN&M{lѬ)ըQor?vҙ^3Y.(qʄ_iݺutSP7^y:︃L8h}ԧTR|]B!nyv5qFa-CC{Y~2 p@@@ ?`(?SNg>tO۶m#5a$yfqGSeR*ØLt!>8TEǯKU&6^ljhPڴnECnH +2;#ntigI!ym4|ħFl Syg*f-[L,S{^zi94ĶNK.KtdQC2v\?&uQZvm8HS~tᩧDz/qsO%ԼMլʲ3q: `Ah|_}`gժA߉ _~M;kBs˯1=;<6$>`5b֨VPā ,M>ci||Ty\l2gѳ~ɚ%>゚nyU+1|Tr߹sO>f<8pv mw Çep?3XſDlBEʓi ΎիW7n> *-   eN3Dn @={òټWyw+NG?gCx@r6m#v˦^y cb'@x\g'[T*<â\gҿp"cUw zxp_cq l<{״; 7 KyFI vmuW   C(gIn?<;ܗN=|%O;Ͽ;{LqYW@gK L`qr>>QϞn${Nϡs(ʕcF|<>uӔIWe!/>lMWBOcem WC(wwϐmԓs7^G|XoAR2ZyCFdQ'}<;gS6{.۟^x %xFya1iFp^x;Mv/^"@P$59<=&.UyRMܶSlvA,D\:udsA"A'ucH_}3*2=yI}u _>O1 ^jɃuâqg&J*Y bȾ羇FHsKaWny>nMz1?e*Z&t鲘.}zC'Z+/fD|뮦+oYD_'N-73Ͻ$)dAdpݝݽ7oWXj<''s<=?ež޷u?\>>jx"^1RYX檹]P?$gZx28+;:`7F:N=VI @ 7Za)pϮݻg48+"} 5DŽP#+:wrT¹w˕m̩ٽ{}v`ʅrX^|;i6B)K=DڵJieNu}1g?ʕ+$P\` ]gTNF5o*pLS1s`Cʾ~ew)?-hN7>"!ȥW\?gZxutV{PG)_^\%96lب6wPZ-zGryL$@$@$yT2ώ9z]`5>wfaLF p8ams.Vc?T5nP>݀j~}tvXI`nO?1XN"G) r}G q[7]%xϕPHuGFruܙ`%zҳ:jА' Qļ"[ڷk# h85'P}Oh dLryH4J3pҸJpS~|x3<.Sz5NX5le謖gWi͛l}WC j~ W .5iC&B$ݨ`V`ٙdϵ:ڊ#BUB$1yMPm,Bf;aU@%gr8Qz],p1zzh_P[El^U w%LGe2R .[   `n _q'iԏ4~ !q$&8pؗ—(|q=`OJ2Ǹ ntR?X!wK,HrRF;:1iڵ6uԖ˅E{o{5WB ff))))E"E,nY.U\nڼY/E=VY#۶neSV-ua$@$@$ pvNNsA3ǧH-pxPIDH?     k QxY|zU_eiEKp|"'g1"m=RٍEU}:!9z㥫7"B$@$@$@$@$@&Fnj$@$@$@$@$@y~Y     LBitH$@$@$@$@$ P!w'    4*DFnj$@$@$@$@$@y~Y     LBitH$@$@$@$@$ $ `O O|q   ȫh!ʫw&    2*DYFHHHHH**Dyα$@$@$@$@$@Y&9DYFruHHHZ= I QHHHHH B)HHHHH LTd$@$@$@$@$@wOyE$@$@$@$@$@aB&(&#    {+"    0A1 @#@(S^ @ HHHHH*DHHHHH$@(LPLF$@$@$@$@$P!WD$@$@$@$@$&*Dab2     G Q"     0 $2A`a9"R[Q#G2Q @%/.,!!A &%I"Db)BSmزC6oq>f     H >[Iť|2TI( H$wࠬ۴]YRtE IRb$*=11^ GRӒ @'~z}k%k͎ >^.?IQlLjH)Eo ,\c;J'q;!O7NR 1d?cR@嫁?N=$ѾC]ڸHX2TxQ)>R@SԇB$@$@$@$MQdh|M8)XPl۾]v+K'R i0A2EV"Lp3 Qe2= K  XʩD%tvX:nQUz' 4_ϧ6N4 X"Drc⮗9^q/A es24p.)$@$@$@$@$L*=|n˶RrܨNL,60gP2; @TBr<8Z(=Z*DDu &WDפe("|LL$@$@$@$,XH v*dxY4,7, km*CY$@$@$@$@Q'R XH Q "צ1 @(Xg!J]syTx-D &e$f'    !PVMUR pa@*Q =O~i,\4KtR:ҮMkiڸAjxU1(9\.B`LN$@$@$@1JЫ#FeYABY(}aHHHHr/P&Jl,6 kE$@$@$@$K֬]aOK"Hw譳,лH(|VLI$@$@$@$pȊ2tʳK QuY̕ IR% g-~wHui&Q̔Oʕ+'/ tٵk4nH7kꕄa$-&|~68UZ8-[ΑۅWktc].@VUʶ0#ٸqS@q䞻DD${nYnعs2}īass k>]N$bz-_!էfk_6I޽G_~iRx gʕ|NL0Ї6/Z֭_X&/X/^"|lU8tb5vϿ@0/ OdsyjJKeŊ5wk=E =aY2{ٻo~f;u$25[<>S/Yr\w޹C{lχpWs̳[;?& _ 0> Rc{{]^|#^ p-WV˻FoNsݵkZYr;wvh5H.lکB>AȎ*w9 #Tṡqp]wmˏSaN"%TCږ1ッ,l/ 9/Y;Ur0G)p` z0GxU7hQ2pziOMvj6F[`|^ڞ;>s'=ߞ=_˕v?IJ's5箞jgvct`ҽ~Bxw۞w4|_<( |N%:lyuu~S@<xmV6P^{M[ݬޕZaMMM\{M!fPN;,~+>CtysoY; GHHó ,+)6AvAYLhu:Dai9Fpi* @J}AP*( y}|9|̈Oص&v Q2EI#ge(TZ?fPGrN)Ud@nƳ+>5z7uǏyi= V |Ix݇G"k)vD;˯.~=7rZL.:qٍsyKU`Ϟ=7PxsۂF=Snjj)͚5;v~ wy۳ e<_󝾮ϿA[eYN49Bd%h<#TǟREYy`Q‹:< BnF'k'XVˉʽ~) " AH)HWv-VV3< %:hܠU_D]zYɇN}L|bۓO=C)eB *r :w v=V|9':PnC}3'Qvb] 20~2Emvӿq=X#*^4,Syb@`] WUg,A#h<{wM\NoX?B(Cja 3cq"=z(CHz%UW5Fq!]{jsN/7ne\~Y\/&NvN}Z49'&=X-XRzu'̣@~e(C( bF0w  PV[))*΋뮽&Hu2bs[!*x6He$u&ָQ##F-DX RVO3l:7FB<;n$؆a;wB9+Dc? (C(+C5%r\Ne(sc.Z]03#^p̭1rcV!6 upgeussPf[5 C}CCVB2{NAﵻqL `hX0jXdժUC&loRǍ ₹+F`r{In1 5w(f'5k8J}8&EKPW[GaXu@Q .ԻjLVhO_"2A0fMZȜӜ8RAzXV;-4V+|!=:HB ;Lx%8 'ck r+^e1, ߿ܧ@ P!<Ϲa€iA5-* b\ڍ/\*o#(~%^=^r:KB іJ SxR 31熄ʷn:9 ^b+^`YOJJ4AjIOLmqp(śQ 1効ۊJbp.`YQVJSB140ǰP,XI񟚃Lޞj>ǩ*H #>=Ӥ' exa~ 5Se-NPyt|4*Dɫ1 #0P! R${ZHswr9 /H26%%i -%KЍ4۫:v93>>_"J9 1h4"cӊpȑ#ν/ca6HEtp@B˂knmji & ~2fuV1 9i(wie-,1]w- --9;7 iq1PWXm3 1GZ2LbݶBd$)Xs#WxH Hk5+9ꏵE D]w/Ø8*{)M%YjaoQh}bk4 pV.(\p؊E^c*.^0`㽎1=ܾS0 Hxیux. 93[mM%. s8cz-[Ǟ|s8,0 L*iC/1T-j*d+v={}FulN o9`Δ]{С!z"'LߟX s1x,m1/#{u2f/?܇CM?C1cU"=^~b^72Jn}˗팋-vxj`bB&w?]d͑oT z'ޫS$8v='ٰbbaK1^ 7gjӘw*fo1 ,!#"f;٪U fq磏?G1xw9,!4č]m}pe Yn]: &.ySd4x<9ef;QAJծQ6SQ9xr99ڹ+tXbP_)|u}#&YFRjQ @g;1O?ulAN)i 2B·.i0`b#Xkm uyt:7 hD& օZФʶ\4GX(>[75Sbc pniqZZ.L.ܽv4y;cb)qj"&̥p_'^= U+'msnΙ󷣔spQL. ?3Lwx]W 3CĆrZ D6}%EK169u&?xss=3'zVT3&RkgL!бa7B(7gi7nt[lxzg/K,>lRe~x B^s ˋ!Xk`Vޖ.lX~0;N-fh" x `l%Zot z{6b%6h <}_uȣZYXC}q?lrla=/y&oNo#}j׾A=p7N4;?Cr{i`(#8?޿{z߯=amYs^Vq2FpZG(`.ڶ8dw6DcҶm:EþX5Pgð'D;_amOg~x}*\+\(?HnA^Wi-^rRUA( Jlؼ/Ak{?~?|.ʓ7vtWM- ?~u ]s:2hpM~4=#`37^.h̠qB-io-FAٺu "O=1DzS~|8TK%;:-m%P"1w?g-z-,pɥ;qؙ;'! ="gXAĽHt^Ոto\eGdQL@L#!Ey@C悋:s %9 \/({myz̻ccw}s0;ʑHΉyB-˯}<;?f<u69Ȝ߫ 4ƺY4頄}+/J` ֈO>|_{2=+zsͼ 0\ 7ݝ_{Ys~wsl&'ě-AڑwCYh|>2@§:?T;l+O_UM~>U7bzώ>?9ekG-K7͵؍u4paSҾ\8?Gmj_G}~.ڏ9@]^~NJ7^LݱkU6LR[zSϗ`kp{H{v)5Nk_\/&|(h;(CP8tYc  3iݪ)N-|c0V$'ҿ3_yuε@q0N]aC3~Ǡ[>g@]ǻPl@0!FP; Nv,vz,p:B/^ΎI;6/ӄ:P1, +S[<{psUVµapݻWοnwQ7u' 6oʥ7p`ż?brˉXRV-δXwfv<.Gr͚&([zv=%g?3;V~iuF y 's'4:+``Ҙct33lC8g2#N?yՍQP_~\Nǐ眣ZXp,YH p`zUEOQ!BՑyzMpfFo&35yp_`'\Q*'ѱvJY =ós'z5R/*r(D3 Q.R![XA.~WK;aPЃ㔟ssBXGha~SPo=LOB0 !4o޿r,a\oWc7^[sNLݹ%8> kF7o* 9U/W5TDbR 8FoX ,n:wTdR5c^p%wإ@y2 :۶9W0B$@BZ< dByI se[+x$L&5X\ؾurƒu4xIWlj\8&2\'&]| re  s7xˌ*jNxo̤phn5Vmό,^zdp@t޷TP#XQHH'G;4|nx3kVL*єjjtΐEl+pS?b5|fz]32nPZd }!Ϯ' gp[wsn"7m4@ g9B9'!   midufUVㆂ\)y,?,RS(BץbN$@$@$@$%3xTb0|MC漹ZPN oi    EmUc9Y'*DY]Qg$f'    !`:yT*DYy( QQ2; @68t(E\Pl:C- Q]ႾjeAbN$@$@$@$@Q#pa1k'/V4j旂)D 6_({"fƑWILLE L eK-v`nIHHHH ;vL6o,SKwzb o.|l߾C\uETs#r1ٹ?6cT._Z`~L9tXl3X    吲AJ,xH8je:;5CC\E ɺMrv)+L'$H    H )ڠe!8RHHHu@qmBr*Yns% Z!$V~>ù͛9KȿR¢%Jh/TDRNJqN:v>y|Vپ/U%Sf2sy߂7uUV'6YJ(&RR~HmGmSĦc< &   0bP…"#,CPtNIQ^]gu~FI=BZ?m~952RB ,?(}٧=rF4/w̯,9yt.yXm   p4M41C(dLV+DSV^M'^iЄJ:5-*=-%ep?%֤!oGwʋזwμnG1fs\RtlT\VK?Wsjeyݞ;$@$@$@$@$@A? @^ 48t F),&g5^H]S?VN䢆Ť.aZYLO-/ ~u}oT S&GwVpwL,$))5=+@    |KE!"cNʕsͻ$| R`֯^f{uڂ q2JJk *ènVZ{B$@$@$@$@$@9Fi Q KP tAw;[!4VPJЦT\rJG!     $`!*D!JR֝J˫?u(`[S5E&ťD@ ;\K*iJhWJ i( @p ` ~ڗ//9in(' 3'&mCVmVnƽښT+,5ʄ0      2K;wVBH3JH3J<OU9B6イiQ{{%]J iVYC-G$@$@$@$@$@Q'PT)(D&!5Pz/]zp ݩRD*g_bv=OS<0HHHHHL@i ѶmPRrUB~_ajthTLNF)w}zHHHHHrVڴ9W+D8%KIFuDвKW>G!tJBSH ;WY6as~F9wgаvʀ<      z-[:ξ3s9սP& V&ZTJQ%Q6}F wV*3Ŀ1ܒ @h Q 4K>L[T-)/VrJ)-DRZ),=e= ^2#N?M1~ܐ WL$@$@$@$@$'@ qK q{y$@$@$@$@$@ D֬GJreh"! ݺmٻOtUz 4<>^7s2ٷo,ɨͺ<#*wWt)p/\g~_@y4m ]o+WKㆧHJo{6]ΈaR`A;sСCtQHHH9C[ |C`ʴ_O"/6`J AENұ}Nyhc}=Ple:J(B2YyL$@$@B\Γ @l߲fz]7nQw>Жpu.C(Q}[*O\Y}uٽ{|dݹR`;$@$@$h! ac1n?={#TA#'ڲs.ٿ?a+T# בHyLYO9h9p"UoF.3@j8z" - \Fі5*39= u y1 :򷙳uՊy< !Yg.iVZϩԿY6"jլ.{NK9)dbIH&@ Q~4ƼϿKLL֖;_/e˔NWc4?l|7eX<&N=詍DРd—?:͜P\ٹ4o(0,泉_K*dP'_feRDMnF?ϳL40͜ 9zzM[n=؋,wxqwz&rӵWj&~iy6\tj9ĺ: oQ w,-56ޖ9լq<68W>.%ÇPWO\5ɩ$g<9P/* -;Gխ$ٰq.'(c>D~ÿRyB+/>N[zܯy(؏DwAWWB9Kǟ}Yy~,YB~6]w<֨VU6/\~g羃獷ixNc\SMY*t!od9^G;:]qzɇ91s`>]T.:?{ ̽1'x9hrZZ*AGv# w* x:$łVFe38xrÎH_~'&}# /?Nĵ~ußI3: h!{a$\7Ұo=/^JApoVFS!:=w>ê~!2'ҡa d/2R+ V.>NcG>/ \e˕lf1[͚4j'Ty-q|qU R$5u1z 6ϊa/3f8ŗz?SjPN8҃BuYzaoZ2m@Cpث#9>s9Y7yF2i˶BQ[* Ӹn(ͳ֨>RVt[mz2dR@_|a[iށw/`khe͂Y5Wv+ev@hNSwX(ȟO䴦uý#W„s]\5w&31,P؍riMg= ]n/Sp|R_fow^+i}kk۪yFsMx_, ޓ=9VP^ZSH <̛}?!xmeM >*D􏿯\C?]*H z]!VHa'=D@.{ΙRd cf(͕2Tk,/WF).mQ?znt +Dp]s(>E3\SY.7{T?(yr|~nak"2s2} A5^ {Y~eИGR?xR>,(4C0GP p _3uY^ >=V.ol`AZD=<ĝ}f Yf,R&.[4q>ոr]\'fn޹=<&[54r8(Pq1< >`aJ^Pqw}hu1Aee &!g8UkZ/5LЬŃy5*އ2,+gPOft\{Ugxn9,` 5k7<#f|nZJj-#s9eu@276(WәruAsM9I>{L…Ҟt$ x㹱]OWk!An#;Ӓ DXRF)曯J4y!n\ A@yle;ySZtܹNdAO~ |Azc!-V jG#h’YY'ȣ-<48<"(/F0ƮW

Bee1? XI Wpo=7uQL~<gGsA}{{?|(=^+y[ҁe(|om9h2O֞5/<<ˤ?ð4tz|`VB2r ዡ>YU S0G PP(}F07 2HH BA5,5w?ÀM! 7i, ~%d@"KF1 ȃ;{%d߾}{sקUg5@O*`xw"Yx#{7Xf` szyT[j`nF4J`@_|'Ӄ9,PLlA"43wsZk֭C0F)^lC1Bfꆹ"}%>eRottm\z)'738E)@f\pW]F$@$Eq81KrIE L&041,Ό_o0,Ӏ*a60Y5wn+3p4܍& a1 *`M$m „{(Xh?._BD=3=V_3H`ah==n haH<q mDC1g 0' gC K,x&Vzw+7Թd:8Tٕ+U*_f$Xy( qhBAعk@J8 6FyBXFb[^z7pFIkF{a]\(U˜U{g0x8:(F-X,, $烒3eW#Fw oplq,zyY'A~#~CAPڏw^20x4uHH P!{4؍-ޚ`ǣ1+b fhdG vCфea^<$w:sr miC֊jhzU3#hvwabrzV Ҵ1,e'#Wa;]$f,͝@;@o0 =pF;!9C zJ*Xw0$Y`&Wpvqi5 +Ž> [ql`*=МVʦV=M , [P ww^X wf*Eb =U?sv~ [9e1sBq #ڼ%ph}PqvHO> / qE{;zyaeRtd6ϭ[k0`qE-7(EF3rG! Tt/{LqfȉeΗ\Ep E z68zFL7anaqFc{2H55(& [sN&bcſC ca>T8"纫/P|y ~_D0WаoGs &̥еj_Цu|qPr͚. ʽM%@IDAT? HYl(y^C ɹL^96[XLk ;Yjmlp4a""s` GP(aVX *(mEHc̫Qx'}J0tּ5frftx`m#jt,N{{HYdsKp,,XS^xKlf E1 1l4m9|ł% @Fxp%xsǰSnY7\Uٽ{ޭ ޶pZ' WjNSCز"Ƴ]$XM qh`⠸j& [?bQR[xlkWZw?Nyf\7fm;ba ܇C)(zF!bvc]OiA}M`袹=$v+DQ6,.M5ܢ|L7R˺fjkִ}l70 gӎ;zL^"0 ]+힍~z;{Ah6yGq<.Rk.2Hep~!O&H*Qsr%R]Øseb::Щ(F X+45,^zW/q-`Nnq=Al֫yBh,a.“jɅ \ i\~8;EKr˜(wNϿq r y^yՓZ3&UA/?r1q& q` `e̤A]hX3V\#4³b,F"pKma} X\XB婡hPAa᜻ԙas}a4J80Mt`1ϒo2{ -p\*SW(XDeMX?}}94½=;y#v -›$ߵ_O{cJ=,b x9~pa @(a0'}X=>T7vh$>Agb;38xm仺чUӡ`aikt^wvGW"h8a׌Nkun#OedTW<\cg^4Lh6JTn(4 ݓ1 0D4h*KqL `>JAgQcNV > x\-=bPИ.`pЀ't ||lPB>,bPMC.Qkg v\},pB@-/XÓl素gѬs{IYs C-|P6q+iԜ(e,vyg:܆c$31G)5sOgN@;ש~1Ӊ#F?xoP &;~/w&>'SXy~Dޅ"oԳf:~fNB~~t$ dZŞy`lEvO5~3 .j1&42D#025<َ7^#pJ?$hпeA;lL<U ]}EGRu!F)c#czH 1\e|$39eº': (Ch}M zQ=/i`3p?M}L~lW.1_;9F(h97*̎rE 3믾\QʷHgo1[opց2q`z&ܹ`x}w!DbOc-ת?旙9W&fճ]w9[P>ߥmsٻ.&b, }1b[2o{\' k I9w->5HSvYn)ݳ`+CPbxQsq}.0)yܱy?l@S@APdmBZ Tl:' !!=hq̾4*\Gg'aq*Z}*\8SHE֩L'xm:I`tĺͅ Ucxڤ^B`HVeu(g^ E;mF>r`W{H5 {RcH5 'I ρ7y 1 M \lOt5ޥ01'C`1G`.3S<(jwR8z==9q.(|BNcz'1ѽ>T<ǻt@)hxq%օax`i ]; $`{ Ԝ<(4aj .@cJX' M㙀y ˵BK!G! }:$@$@$@$@$@$T ET]cIHHHHB QT0     H Q^k3 @TP! FB$@$@$@$@$ P!ʋwu&     *DQBHHHHH"7z;vU8uñ t6?8$Nv q,ο (_5Ǿ8D ,U] @h!  '&WWc[b|ؗK-S(Y"߱BGj?BΪ%wqHHHHH"&@ QȘHHHHH B_$HHHHH bT"F $@$@$@$@$@r'y$@$@$@$@$@B12f     /;     1 @~!@(I^ @EHHHHH *DN:HHHHH"&@(bd@$@$@$@$@$_P!/wA$@$@$@$@$1*D#c     B Q~     P!3 T˝u DL QȘHHHHH B_$HHHHH bT"F $@$@$@$@$@r'y$@$@$@$@$@B12f     /;     1 @~!@(I^ @EHHHHH ~!V  -D^nqѫ9z4x#     ;wg#GJNjI| LT!,q2\ @lgኘ`H g؏2Tpaԡ}LU!     -ZL{.}VҘz0? 8 7?Lu*3} @~!i9v_}+exHRIrb$)a$XrXRz*_҉N]jR$$ƇW;$@$@$@$@$@$OhraS:H Qy`&zJwLjOo"'W)&oAEoJg( KǗV"-KI=a d琹?WNo~XZ?B&-ZcEo9$])U[I^[#BrɆrÈӂi1г($@$@$@$@$@$eEK;\ٷw^i@y ԕND.E>[ڥY:4.M8|DY|\'S|k*/W(!ɷsoKɰo9ytAG$@$@$@$@$@Q&eJ{rRz kH8=,شWD<=c4^X{Y1E'yF_?XCWPCNSX^R(T:! @6TݧOUxqSbܞ> O|eҥyK8ZEY;iGu;A*(e="nez!'-wHHHHHH !W78T/V|($Rvϭ/#RݪMC,,B^b D@: Ѿ}iE=рKuo+c֫cJQ*$mk()ˤF}jжG8IwJUot3HHHHH"%BnO,*SM(ermrΓ˥ d.$Zgy ebvz ;$@$@$@$@$@$IEӆK3k!3%/+/mւp ;URͲT[9(Ƒ d@:(NƚgoPR:Qnm]FVU?UWRI&) |Ua' 5;N?+a$@$@$@$@$@$ "Zϻ=>ںeMR1衋;Yf8Z77@Q×_);$@$@$@$@$@$!"x۾cg^iRXr+EȰuOܡ&5Ӽɵ6${eCxcܣt` DgzukɲuV2KSId7]\jB}Ql.["uq47]Z&٫k=ğu;˷|N     2OMS-^\Nk(ഉqjPEFIU簈ۢ\jK15X4CT}W5:S/R5,5DYnhYJ޷*07$@$@$@$@$@$5iUdɎckŤvnVB)͒ R0)}Kv/Y$^&(gKQ %3P($@$@$@$@$@$U"xk֤1/Yal޲M*/kg٥;*" wP{GQTwE?Ů ~v`` (J 7̝v7;)''3w/9L\[W:;-o-UfrJO!htK6ҮqeiT+W3ݾ&2}M$@$@$@$@$@HrfOm 5ic%mײ5T 6ʱBwWiSk\iu @IHh3N>NrՁUV˨ǔd?HHHHHJ@R~rLG|!*j]Uתsf,>!     E P ɻ8縠W#u)ӵ,hVJx2Ъ.i%5M[ :Gm     ̀)3t/;ipF *2c[3 215L*,OK/ lhm7C&    0h@$@$@$@$@$AzHHHHH?$@$@$@$@$@[,D[IHHHHh3@$@$@$@$@$AzHHHHH?$@$@$@$@$@[,D[IHHHHh3@$@$@$@$@$AzHHHHHQ}̙+G;^90}2%    - 2o=JB0}c͒ M o~GbJyDSN_Y nxǵpiٲm$@$@$@$@$@E "@sgڎn$e[~ƍg8  le]iWRvbj.:(ߪKwFI;cXtT7,\Hjש-͚66[Im.8 9V-_InnW(ct9c/ ?OT&ϼ JҨQCԱle[ItƎ买PSIU][~z)((0Iժ٢\K1KbD"+΂W_+8%/@! (2330Y !"]fyBUOՑQŕ č*[׵iSB_AޖT60ɿOg.oz7aw0=R%.4&U~9Pd{O7h K Ͼ0\^~ Yzuֆ hÿFgkPC]6ڷk+Eux_x6"qOwvf͚ҼYSm{Doy﹤?IAHHBLB-ZG}BRcZ^䲁f͚b;䠀Lx*FÄ;y>1>bq 6uƾi3.0X1m9J]s~oLK/&7zGf)|)F,<|yejKWnbmgݷ/;vO]u]w &&GrdXZ 8|Wd3OHZ5oGWݏa6_^cHH, pvY/Gm8cW_!|TREp>L O>Y6,ءৼ<}zqYclBQ=Weؓ0\ue~".ljŃe˖ f?=哏|0 @E'@0/sa [m1 7TZλk'ч<3Α~z7R&}q{ނتRׇ-Z,$Fs={VR={x;wռY3nmeu#Vͬ?4iH9h?E֮Y+mzp2mhբ`% X6O?k+Pnݺ~9{Y8Ŀjjh8a4$uH_A@}I6m$~TRU@uf+dp^ n߻aYeeW\ܳ}%=v"D[FmhӪUst%CxU_j5m٣{V MH77gv}-J]w'u͊c~/Uo߮ԳGʿedwKL:M[jҩCi޼Y1aߒe6nHխS}~OkwEcm6kw?(FHHNQ֑+a3[|aȖ1'ş~EC 5&w>+ 6|gIyᥗ^ӟ~z}G<d޻vY+) F1|}r 2\BN8t+&ONQ ?[r҉%fa{gؤ7\y{;r۝yB<?S>XF'Z9l~ r1[5 ? %0ӏq?h6>)9(a^n!? qAJrn?S~3_yrٱρ[#w~=:0_wSaK/@.0Y@^6T}DVF&$zZkr7+ =x c_ Fn';D'4:>c2UE{wͷޙ;'.pqw_rf d@GYorT#Qj0D [Ͽ 4"U`x\yťza N<Ӂ [^,ZqdpU‰ 0:뼋rs͗U;,(C;eOL"Ïq5`ƹI (+ɻ}ŋ$@$@%F 4Sq$b_iB#t_ڡ}{c_z|qdkh0:wVz>su>Q[0 7^ꁗ({{;jKEk؃6[mۇp"Uc^};b&OGX&kqHOV/u 8}wd>{?be0.[.^$AC9PXDqǎ }YO[Us/X5wC³Odx5o]mw%K|B{%]=P@}L앻u .?Dw˅Yy\qFs. /t;vf3̘O>[r;R8sڀmmɖw+ @ Q3k!RT:7@1NVjܓ%K&Z|?nxa=o 1wզwCu\݈?ཾ1tYs Q,N &i墮q?˄zB f1wvo AjErH Qaqh6[o%&\} .XKSʰ}Lܐ܍+Sx{6|6bC<ԓ 8O+m[Nj*S&$3֨/ kyǶgR\-'΍`넄ݗP HHN+DYGBRi7^Jx/*B39j(x1C0\yzn.twN l{Smʿb '-bv x6|;6v^hЕ^վv9YTlyeݨ: l@:datϓ1lR\oIWLYoNXC+ +S~@n_~y<F-T 蘩V0>;D989#e{j#V<]7cp/ڄ~힮~]?\ "](=# (4JkZZ {f6mzBoo6hP <իWOSV)lyVQcNѤWxKpHԇuDs+Y {[l0Öy%<Ǚl„2> P#a,HĠ^xm sy WaI)tIͰr)o "K 25mXZ;(UovQ^pY'Xߡ~}#ֲ'һ/"B͕O'΁=]pc,\>.$@$@%OQ3.-4ia' 0"L61O5lHO+ d* `pDMd*^[=996?"?:L矙w &b'~NCwx^z9Cg2uKs-[tqϾ $ p47a|`Lֳ;7~z҉IۮkoڮȨwߏ40xgI3~0unw ~7<|ċ4 @HڬP)IeT Su bW"[w*^Ւ̡d`*xt:rmbsDms%vl=@gs~GʠkoTFFf! ny"\9-OuV/M6vd+W5 '[oh NF$r0H`>XV>F &^ 7^qյ]w9$1{\q?rG P 4fgJk_zE6l`Y-w QF)}T9KK.2v 'TWJ+{wa90[E_qOP~bW _+[?Qfu>;S}6yKKSFV wޔO?Mן~5`̠8J[L|+P,{qߘ:]q N0䞡|,ܚN8z{D:*/JHHOQFjV2l{uC{.uJ늗~[I*/x#ާ'!Lqw_~!\&=<4h7]p҈w|;qgV/?BxBNG 煷aY [a{<*_p?0}!"9YRn}8})[?Y+S/=nn+ A٣j? ;{XorNH5v^`$|ϡ#^.a'=W?''x\\`)'|q`oмY@g.ivP H(G/#|%|m L<{Do }YG[Ȇt^ os?΁>Ty5^К*/n @:kP5grhȨ|]dtm? y9aZ4qkQߏ#VtM2˦Ϝw4Rc `` sۢ2\J_~…l2=d n.U{^ŰeWVZ-…{~Hm͙`>#ФIi~rLƷ\'o .Ʋ@bӦMSI̲=L>1ƍr01ΗmRz7j(xqqy S!;ncj0G.ȥ.+0^+C2Y_K⻯(c (O֩yشڀcl%, cʌ:DUG Sn*(c1QАg "N8ݞ%Vv-x /v|*3SQy2>~J:رr-`գ۩X0lc}Wf,Fg&9E+ >lH{gї8:h_>q 22߫HHHHH B]q|qc F'U F9HU$@$@$@$@$@ Qz,/I"7Ŋ~M@{@U$@$PW * _><~.q] d@ys@(w:HHHHH"P "nKQHHHHH" TP(k6) HHHHHK or4UYGyN@Q Ezu.#ֲwHr;Hw^>bd*d8V\?A$@$@$@$@$@jPUu/x~e~XY)J|Mѡ Jff+䣉U6Z     (>K9so2ƐkIu]ÔU;O7ɇW)CȘBvPR lRZ5\۳:_yor4[WUuHHHHHHZ5TS: ik|T^j;RNVUU31Ow۵*=Q6}& "0B$@$@$@$@$P$E2WXHHHHHDf+$@$@$@$@$@KQfk$@$@$@$@$@ rt3     %P. "v lJ ʁ7:/,[oq961HHHHHJ@DkJ^%cxC±uj'b d@DX!Դe_wq९I^IHHHHH$|1kzS2e1xN~l\vHٱMuYfJuyk     H@Di,_O[-,$~Xr"}mh2HHHHHH Tu<T5}s3@ȫ`]ʣNͪd%mdjHAUk-;wQ*9XY^IHHHHH ]9*@ػ8b86Ȩ|]dVxtm? y9j*_ O_^֬/r෢Lm~~N9_w[{ s=Ԧ8 4ȰidcgTo;`wTA`]@ُfkrULOķyMNF_ "g[)ʦHHHHHHATn@$@$@$@$@$P6h wJ$@$@$@$@$P *7]     (4ʆ@IDAT;[%    (h. DeÝ 4M`HHHHHʆ VIHHHHD& $@$@$@$@$@eC l^Sgɞ2j"    آph, K`_!ڦ}Kw< c(W Q @iAT @"@\vHHHHH4 *MlHHHHH\ATn;C$@$@$@$@$Ph&mE$@$@$@$@$P *W!    (M4J6"    (Whΐ &3 Ct8ij6KzZįt/d nTߺM2 uI     ȐW2Fq     C o-hܕ3~]EW_{fHHHHH 26~K%6ԒU3V42ARm>n_>SHq( )ڵkφ m?Sulݺ8֯_J$@$@$@$PmK5¹63CXj^/O c;RUZ 0͸Yk8*4푲r*wԑq`!xs-s'99RZꁗ_^mV*{ rsd) tﶃ тB"*UHnn_3??_\J<ŖCfƍX=P.|,!  Ȝ@B"$ "J9O$ȆZjE獐.&8V&|WǎV?9꿚*I ڍjV VR 8o_)AbtX50v豫Rk֭dNC#)!ͳ@&醉M+7K/@.<{E;rЁ6)r r- 2I$@$@$@n~kvLR/o.P2fQAe4{+җj1;׍_+_N]JkF9ByC'LE*+癮G:lRQ6m f|RfMzϝ> Ա( 9Uɤsh5҉ښ2+vM4N$|    T O>'Rxs$'PhImNK颂t:*LX]mx ]ܙ"5+/ν<ԳKfBtI#'r̟0 @ iS]١u5eJfk8C)ʱxR4iqFy׍#fZGmBQQd=E6h }o ͟/Dk?{{@ ÖC`뭷7^y1Tg^~u_Sr D.bsء*ϝ7On<Ӿ:dFyJ>s9E]+y?pPW~gG͛5w-[:_3d:^IPc8ɿO5Bե˶d~eZmTThPt^З_O=4n$mZSC~Yf}׬l6ɓ [͚o uAREˌȟ%/J'uCvoiKH-4mZN6`' 3f0 {}]۶ҪUKie^{!'rͷcn[h1B$@$@$@aiDJL@*/sE\:5wn֤IOJL/2CL}i}F cÞԳFKӦMlR/S򽼡 ?J^^ iǟt_ިN>?~rI'* N+pmw1dWuCP#hz8Mj RWFڨS'2PޔkI֕л=㳤6tB   HFQ2:PE|uu   { =w܊}1讽O?HOIt髯 6bQ5udU 筮@H`5 +pU dմ}? F(VwqH9w}o]>hLBC*BϿgDoz?]VKFI;pJ$@$@$AT.2ZMg5܀^n8__iWr]/>#vme:ټY3}NƖp>nl _bİ5qFr7'~o -S&+ O>\Ѐ#_F=<}-0Ac<&Vw2[&O?9+E0s,yWlR_/8lwk/@y ,*wK:uDB=V?(oѼY  KH+†e7`5î`+('7\{r ]dE>pFӝ !mF^~A=m 5gNdpM| |jՊCbQ<+W*?#|wn+x6V@ 콗\BO s>CֹrōW_0.)Fq Ǫ܀+V-1]a}L l9b3sNekcPl23 eWE. :9ɸ[s#&|giZv K64&L4AXǵ{Nnoe@޿H'J̞37``%h+.Zn%{{ ^l$Vy[p86<c Fԟ걲N9T)OԓcErq6s:Og+sS^~;DM "CWq   -@-W)ph۰ʌEY._,fUe vΉXбcp5ɖ a ՔzhuI}.fg,_v̀a8dkF|GƎ^[4:7tMF`!wV,DptR-w~2,$   l\\mQ֏+eպ*ctŲ o oHx} 7$;z`6%+TO1N*#駞$dR{JA}CX # KT "t3[Hfihɪ|n, kjm<8k06-t`8u'4*^yXV-ۮwa煛p0&ox[?CvE֯F!ovkn?fE<66/\ -wEyaǴ£AcmW,6,4 "eq&ۉda{u;u>g{u.#5bOxsՄ7)+W Vw~+^쿨<]swYwp Vo@˶t=05t20ު_4"p z6|ԘO>I XѲ\ڷɟy7 o ;pz+7)xO>{n{38c[{𾻥j՘Q(dHHH!!5?Э2;o렏LPTٮ{y[٨k9lP:6,իV8غؒϝRϜ.^xIDnGq0Y|5gժU2w|p$vCnx1kڴH}- ^nFFMֱџk@WއZO ~"וwOg^w2o_P G="AɣC;CHPyGs1G ߫ru毽1ByIz yk73۹gSl'O^:URFnV,`k`؋_2y @I3Jn6uVɣ g-+o ْ:⭑ 'AN;뼄}u n\-<=rU_Yօ#؀sDaou(sݬH^OSghF=ߺ0Ox;{]{o%~<չɽw!W\uƩ5PbD^~ |V;~ I0oSa9 D &5-"q[沥z ̙;iWzS)`^xZ?_dq, g:z{o#r4,l~I_q5vk(Ua<x*A-v*w}y)nV\|@42֩-w $@$@$@$P(JP1͞}@0qKF[|Οd}ƙ}KuD !ǥ^;вe8"z'CNGRy5W6"p: ԉ |EXͲu{ސ.vnv\sc5i8-N C%2ǽ6=+Pr{d<"Y9!^ap{<    ͖@9rϹ㝅{7Vgv `Y*gdT.2e*\ rJ\ˡJʐ cgiuܾc/zp`V\-6ʳqaG5m=7C9eEyyҴI6;,`sS}ϗyj0H`T_P6l 6shWtXov걜HHHJ)3Nms}X9I*@e[W?l#A -guJ:Q*9!tJM^.$^Pj]xSE-<0xs F־};i GV-OQ`, Z:оyqnt)C$@$@$@Ls|.!jmx")RhJtvUhi0E]c* $@$@$@$@$P!9}2r|ч.;T^.H8ls˅ڵkl<ݿˍp_&    "P6B:Wٜ@~Xx,]d|ϳs?~>Է{pA]tS;3eBB{W>˼뤼!ry ??:\i    P3DsvR!ʵ=aiPBd:G^cPQ".7ʓ(~ɉO$@$@$@$@@}QKWpg6ЪZ!R V"njg^!bHHHHHBjb161^"W/2d UN1tD "mdat'E!h4RHHHHHH4Oe.M'Lgޥxq9.$@$@$@$@$@@ cH,G D2x2 C  "D D1HHHHHʞg$5k$؊U,{ @0/YԙuBR(qKS%0e)@$@$@$@$@$P~ a+esDQl-?w@Ek1b"    (9k Yn:1⿘5,tc%7y,%    ()%5OWoV¾fHHHHH`K$-L "ܰH-eh2cB[IHHHHgD#m [5Qv:Vm,e80 lma e8T6JV }Ƒ3^Y1 l^] vle+y-Ld,3N$@$@$@$@$0B93ZamxIھieT. jkaXuH@q_9 D=E]}+i     ͎ 0 #mDIDyCTMBQˮ1g j^9!D +3I$@$@$@$@$P :mq L0iRC_!2ƘkdT. w:ϳTcV*:8L? \1Ύ+g"M$@$@$@$@$Y kc %$GI8 lX+b%*RDVJDEX\ͩZf9 @0sߴ2Ci4'5m<<^FPdA(8 f@ nN w=Ft2#Dk#7W;]'dӱd)HHHHH6_j4d'jȺD-ʶȹQG2h%| ;Jq     R'zNZ"Q3E|(HXHQלW2+b2ټL( @9!`_<[gtV dp4U:BJޫUP[DA.;%IHHHH6sm ]/G0E4"5gɗq;HШ%+gHHHHHTk#;uV"(*A #ϊaǒyyHL Pc$@$@$@$@$@!?CC̐ Ap!7$:r,708`Y]HHHHH6o\ 1-sm8&9, [nmoЗ *lLuU :41_6Vؤ-LzFY     '= OۮxvoMDr5JL:m! R 8j1v^&`ffHw5]ߴ)`5     $ 9Άrm (ה&$/GLCI]1j<-rZIHHHH5;w/TۛN;;bSqy^F\S֓ATw Zv$tThITԮVASjU+W[dHHHHHs.s|t7l7e<ڦ@M]8ELHQr9Ѹ& y/b W kרUjlظU8 @9#9;v5Xz oxr%(6d%mU,9;ИAcׄjQ^^ԮYM]$ =;g/(푱bΏZ["h j14 "eԨ?>Wd"vUZ9~ը~Z5pC$@$@$@$@$P0Wǜs:n3v2|{t۵b'-@Q T%D@/Gp =:~qx\iҠ5o2YxY^ 1WGܽ0.x`nPs}u\KE/mP+tdbzE܀AwZMQcie9hM$[4W[>r{ٸgR @ss0g֎c~l-am ]7_V6ӪD]hX 0x쏂<2t[8oP-\B/a9^RVMUTV.8M3J$@$@$@$@$oTVY'+WjN+Cu1dӘC^Bj>ӵ_ 9HX%+A-1 ht2dzd@Hz`1()U`^|9n N%    (%kVW+CrYH汋'nT5  c3Zgi) 8vdqN nƏ!S[rHh Q@7kXWRFdR 0J$@$@$@$@$PrծjwVU2(И)=f^o236g,xodmk$2z<{A_tfya8(,`JLh;JUj1(fc&¤91@WZȳ[xʹR_mkls`o5 };^?ϻ)~[XŲƣ-C6O+LBfV@u,1    ͔٬Qkw*깳]}nel^L Ĩvc:PYcpԺ/эYZ뫘 ~i*^Ɨ`sR%Sf̠0h%qc(2P-\ VmO{Ż m .kkDI: /ꄩe~;qQ{ Pq L gs> Ӣ'ބkᄿ,b|K`ÆV{]&OUR^6Fm Rr%}SVMA쮺+ AGFs_NE0Eٻ"kWl)^J׵6a3I&^>W;WE*,x/ӗ>[ƽ,$MpLR^һh鷬[^Oѡ;D0Y2`iؠmB٪cj Qq3p(5n^a\r:;ʙX9(փE]UIxWyJNJGB 5SzF\ tLiSmt{VU)tѕ@Dͩh ̚3OF3s,BΖvm[˜y ^ wu'9_,\1U[^nYW?xv/QAqpZrM"U2m٤C7tвN_#W:_n,w3/TTd}jK)kd :{6e-HIL6/e3 'WY*R/ OՃdUe)B)ڍ2ezttlZEV?y)RZmR6KO {%+͛5MYʐ&iϜ%Gj֬![ul'͚4s"*:S3/ݥg++59don.樑K z}|9o$&]Ht'(W.xv|Zj*cԜe/ʟ2EYr٧H4b#1E`SlܴQrT+WcRhB5AԌb2d.(pQ0Q/O%,$#4&X6KeлI $_ij K&IJm[lle RPiw?,(SߎQl?/[,YzZi,SL+Kט{ns+Qو?|`Ƙ5u8M7ȘIЮ /-+O|Lj4[Y/Y@o~Vy52PQWHۥ#~4q<4f,Xw[7,7Dvh]ɍEբ}߱gjwAׯ' -_{[-D-[.?3\/X <`gʘ j^Q@Doi+R$:# H %@VnL}V[4̟o7_R>[cN/E2PkH?cF/dQ6:e2-~y2H[=tVyQY )[VoU[/7 ۵z VR #䩫bmzvWrz`jxlѪ*xꤧyZ=mjz{6_k_Ĥ7ԫki䳯{gUuew5oݲnNcuwڶi%7|;-ԋ&/2}׼ģCcz_'G vr訫O#Őhj| RSZ[Z5ՖcܼJ`|mӼjŖ?Z"/f0^fyPIZQzjZk,G㔘5+Ȉ5B'wz8̙=W~eqE} WA ]yk\MGqhxO{zp{ʁ_;o*"pts_7Xl|7c <̕11 (I`~cBȢצm=m~ƍ1|S |# ݦ׺J@Ĵgzi]P:~#ym;ciĀoS[`1Ԩa9ySϿۊnAytRj$h!  eP<}Ѱt]57m2n 94|tY}&l'Զe=73%ATȊ/fNR[!]tԫ![˕] .]:i"ED֫g>MZ._cBZ-mQd(~"FTORF!;Ԓێm.LklZ}/;TۥUFJznf-(0ٰC'ZWvXC˓m?\2rخ` VU_ JS/"-Y*w~[opN`52K^w/iצ,\D|8]WIV-e;48fz U~~ǸqmnNImbJPsSLMYQz|LR .S|ytUȹ)Sf}#:fY/`T6¦F9*cȆ[>[ر[.]ժUSz1x[(oKG0>3T|VNꜻJkLY+ӻiYɖz_J'77zc V*%|V~NH2=k%L}2cȕ+Fn.]&gvh2[q0Zֵ5ʃ(#,i:>Ȝ}:JׅjO[ 3̈́kj|j ThP3Wܺ% ߯:C<6tlREue:ɫ=Wj,^'ZOZgEnQkʤߦȎݺU3MW&oL[yg;i۪৺:0/5'496.qeP%PZf=6(-vhL2LϋѰV~s_LeWZaɮekPkʍc^{^lr#^\K Sm?O:Hz^ee T +8.>[v躭|?U#@g!uG약3d w [ :!'3gU';cE 1 - UkIv&gմbL R_MmEjؠ4n0,ǵjUTvB_3f:< #7Oy=+Եy&Ҥq8n)ͱLzu_3QEgc5k|p~5 VTefR㚟>Kf o&7\GyMo -xٱ}[M~.^Tʗ1Jƍ!׊]8;C8lZ]nVUIi<7:Tl:ğT~P5=Օs̙2:8:_s1țZ+֚U(贎mc: VV}yϖmxœ :TjSF}ޝ|*Q(Uȑ֔6ǯzPjhr:u.=ǟIBw/M2DMY+ ahRO}Dwy21Z/'ˎݹm.-:{ʴdJߝ-`ՖlߦLWBqf-zxHwS[jT.mQ giU58XYUΓ*͊q!xy\ҋVտqu ֺ^aC(Is ?-[yaJi$0&l\6SsM}W1wX<6i@6m[9B'hF 9B3P -`ʌ'n8uſms">x9۹?f0)3i΃vttZYysnZNX8* 71u߾K\dRVpΜEZ6͠g* ϋ^{KƎٰ3%}UOA:-2_7F >d&Xi'\$Mtߐeι*a}$0jhvz=#}HI;9(cU_' T _Gv:3 CأC싯"}+#F)σv&nBN;؄[Yoo|Moyb[KmY"71TrO#w>sF:st_'؍֙a2xU<7~ l< 2:}d:~)'S~'u6QF-/i_'KVyQ;]ꖷ|u ˟G= =y[SD-h/pL38g }Aܦ 1zmTt-k<3G֬7<3UO嗪-0||3|g3ܨj7k*u3VcFpZ1n*J[j1cֳY!jZ;7m' E;aB& |JwZfٖ@ߵ79L[O'VWתmaXM*P5PE5 |+orʉG~Ԕ?)/MuwN. h!]J X'1U:vA]ᴧфX(@W/T̻aDSc)JrAw_ԤLՌU3)?j;Ra GOZucc>Jm*(-^WU,SgT><^CV'ORpOVe^R_uZQ\GtEĆF7{cM{o1QHD"3;w{!=pΜ63gۜ3pnl=oԩs ޼VK_:W710U<9 6nÔtac3.a}4( p!yCO| Xˣ>A2+I^M[#*G~L*`u}g;ėW^G5F+$Ï ӿVX9—B4WgNkGPrM_OfLζ.̝[9k9Q&n9ZDYG2ڇd -C϶j)ud9ݎvFT |amӡ9_%O؀&u+vʤN#7Ж[&gOF*V#Z&kj# `]-[4vek KrYܿѭ/'MAGmϡOG]\{CЪUsb7Ĕ)_}mOs#;M(8D~<8,Gvk9ˋ/1ce_O¯]&GJ*m}8˷ Fu(9EDIx7i'-B,[x‰e9icm5s$==bn2n̶muWN6 &c8DS fG\hO9*O{sDhnQ1OF#|[XӼ5M :2!%z-&^&?e=t _8+Éb>񉜧7՗_r5kݪESe9],tޣ٫KM>?#2Ly}rs5/Z\{2W 4f8;$慑P^OYL[enx>vaԻomʫG Wd1 C7φktL4WaCjlp~E+Jݨx*hY/ȥw { r+n#tVyYe"mh%uoӲ 0C`hw|붭Ztj3`Ӟwl[̝pzm]A;ny&Nj:}ofG̶jt侮ߢQ渏GM;Acvf̎x,Fc? ,8vR迧M`l`@@=k^/gxtA/Z'VC3Ͽ,C2Y/:znl?9.~:ja~q,9S"Ԁ {xvhgO0c\q|(l0j,|9B+f{4G"peشIu~,]Qg+"V2QL"f#`[oSKy(G!"tY% ZYYlcԻ%:iJq~rR:f3?y~n'WaLd٫i!Xn{ɕ׃?qБDP] Q-y3 |DYQёC=sv:ahm *p#ݫQyBY;(_>ᔦK/8Ԃӣ@w{ϿU>=w^67mZ8Ṫ C |ѵ_s۶iek7|LN^AxsaCk{t}T!L',Ͻ7c_6TZ_OfXG}1Mwi2ݎȇxtҩLSt03RNڵFܶ-7K6}c2q/ib9{WUXR}]s3)'ׯd -*[v֡#>f2M\jW4g?^}hQ񓛙NMNW>o2/#af7+r>SQMJE]bW.´#J=1S9\^&7m5, O{l,PCr%s\rF>t(ׅǸg<3<߿ n>Ѕ|ǚUVPhPTTCgI_5ྚ11f>fS©y~vBQc>15]A횛dG)yLirx wɦb ݻvF.{V|4$xt?C?jnxнTgu7Y_<Sx Ȉ |e`FpΒ3%Nr ^ݖ2|BGX c4d)&T#(G5h1q L7*" Qg|04`:e+Wd/;<#yUfmX)yrI4#څa\:ʛu6IQ+d7?#}50`# kkQ6c 5J*aT|'y<L]sXuB?}|5iѣ0|N݈mS2ZQN!CqGږ_h`^SQ9q.Ňu(vHŝ:(aY1:CU輓ќȰt\BmKmH9v`Ihߤ6f[N0 録|=#gHyaO$hIK,oj7<{@gnetT:d Y2 L2G/62ھF+vwإI {|O_RW,fCoT&S3{3uhq}P?v6oN!BgH[cp n:UB !I V?RxU HxkXgLjCP@T0ƀM9` ŋG׮ ř5xڽ-8oԚ'{7ʀ+e5?OK&gڌ,H}a򋌥mՁr#$ áiPuIɈ~ #uii!>L7BTӐ/-#L_]D³/>E6m7imqv!C[Q.GDhzҘ·G+_ 0B(ise˕Ƕ\ޒ򢲒a~^|Cmrc΅773ZCG8ŎxY<<)j>혗G-f*tͧöCܸoPF*~U~ACҎe) TwW_=::ZbU4HuZ% bCm4[QȺzlui{p?3ZGy'51ug0caG2# 44PzL(DS;!U| .IPA+XD"_hU7  Qpi]t*(,$Oax1f9k-Cy z.to8>+@ּƮI5r/FYg="SN;rZg=&s/ٔ4 e4 p{~aY3i%x@`|\LsW$J)ii(?H93\5w ?N/otj ›r4,ZD/[:>jKtP/,q"&r̓@ ao[ΟaЁ+vzf-uQvbj gyX*S)4W0tK\m:9ѩrV3e ;&l5)M(` )V +lJ˝đ Y# c8{̔!z75WeԐ4>EY_1ݳ yjqCfgZp T;O 80 ]xF=v~_Hen{PU.8i7S-e5"e"+۞u+PTb'҆ Wu͇GH(+B"PFImhxZ=bGF%~xSzG!LN!:>^O"p17b'9ـri M12y,/ 娌(s{"%~i=쵐,#ip/>)'5oUUu'OZDx=Fk4|ô I|#gX<9@PD'@39z 3ka4 תV!I(+!J`u p=SLt^pA4кz;7eY4 < >+I_ sr`c{=D$@!xl20)K %ɖPt"o_}_K~{CvCHof)B%⨻DH0^w͂if f|C@Q93fF_ )f~ Onc-_]B|cfNອ+YJ~N6}F(ʼn v^f˜:.e%c,4G l6{yF>0j`fd!~CWȺ]svǎ??r Y'So_ea> .1&[+3qhw@0 A4<{vB#Mki֯/dS^׿\sh8Ǹ삳L]rVݚ}W^,Q!}MJd"QÄ'f D bq {dHD6WW[]-΀ 2a;uԂ^yLsX's}u 6/iJF_ 2C%jRLXK #5UjRBd#5CiH@: Mh'(E͞'jX{׬P}W`c/yHZD (ZO]MM Y֐u\+kkd% E,$#ì@Vy!u#]pW˗ytaQK "Q9ڐo>6f7_#( }ʗ+QFF]O4'E,@!]8y -[Ȧ,a4K[d7tS#n/.& 6mwJy"6_x_vHFtd'?f.ޮ3HC>i^>%NrG 39J NuEBiX'T1b}/m0 t˟jjulZUo^TYuVS Ƣ4Y2Ral-I#T>z"UR=5*QoxUQes>!\F:M-Mytf'F,6oџ|z>©֦D GJFѲ82]y,\{X&;dH!rt !iK^чߔRul:_w0/M)|H\KQO?—ㆨs9s2a4( 7BOSV-=%8yk>H`+D]o\ B_oùL˂BQ:8K; r8g9S +"}`$uIUE'NA^{+ϴ-DpIn/eSqԥB!>6a,gW6wf ,A cXp5MTmP It8f׬mjRJ;YϓIC9񧣰沫n@$beSNُeSl`ut;Q?mVYIxz}pN9 8q/931.9xצUذx"3'xVO»j ɪ &-ѡeO~fGLꔹ% )ik>혏s3b ԛke'I|i4SD<ک}Ҏ*ca4 F?TY!8ӗ1:MU6[6Z DWF-4:8K1d|K<71jױ]>3FkS/3ľz;>I/4ё޿U\  gmT$]Lmk纛0_z9¿ϽÏvcr8Mu;V{9$@?yo8[m@]@WᶻCQو9jڞM̮q`Fz|#0{\q}jfd,OH%qA NI>:CG;H_Y/iY,(%e+M΀X_I˧8)d=Y~ .֜x#43<=U<[d&b3FD..Θz򧟍C뙮kY.5FvSI)F ƊX㱡̗3 "Ļ>$;b]r|At/l*'۵vehǨwW\s#6MZҡ.la߽ꩳyŗSXDGW\/]e p+wjV8DP|98VkJI{ԡSck!F.9õP)ae]_ĩ\WνѶɮmѶ[6J>%^y6('kna.|g6Q*V2;mK>Yõnn^Yƍ{ciKQ 9w"x'|z MuSCypDCM{env҈>I#{6f5)hQ KLf5Ma6<~vTy?K өAv8F }Sـ,ٺeU04= ۭ8zJEB.CuʧhIs8b}0jD<1E:.=*21oi]tm^4]LAޛyhZ)3u;#mqpss͈+]jȾE&P5yo$]Jݕ*#dHZ` ǞF:%m#ģB+vXʶ)"֋QJqӝ$߫[3/*h~wW};Ⱦ/o&BthY W\w[̝'&cP.D:þ_a60mg3pXZFJoױvk+}bκ:}C,s rձV8Ry3Su7" \H:.<aSio7jZ1<:3g|gX?IkA'wOB(E矎{l ^ [6C8oB8Vrb5&lIܬ9/#F8]s)TcdBcD)kW;G+iCssX'dsfhL7# BzXpm ˚::Cul:\C'. ?!)ԩpx9%Qn0^z#S%Knlv,=A!uiӦ{Ȅrig= {5%AS|BלLBX<.XRǦUs35o{#rGm(㋞q$o7 | /yߏE#_{3ߟJ7r]k?y" v*V9f5C}$cVM߉AT?%=m1'KMyjM_VVF!N!ᷢB#ei_ʋ0 =&,twum]!QՊ[J%8Ub(5|&_2i?6_ƅN;XK)QG^b!cs#OK7O ËwkĖժT5W\xycֻC0TȦg}ves Ғ$/,L$1p3̃omXH$dV4=TgHp.oou!ZI^G_g9?5_$m62g6"C\ k sܑΫFЦʥ;f<<];w^eTg֊aذ0 ᧕ok}ڻ/g"dk| R2qnǹ {zؽK'sI">ŽҨgp޹Tȑd:DIǍY 楳ZCʈɒrq sI(iTϰ ^({؄e.^6<= GE jJxiZuHW?7k*`2 n[ͼ:ΕRB@ސ ٘\֦"pi=8,FQGS0Jg6hlC{C?Pضn3\Ƕ(X_/9>½[A~Nn:J;6>W_vGe'oۺlB:DG$=g5wa;|)o\T~؇GRŇH+q9ɾaԷfBƵl/-)IʶuwuVmPB!`^XUEx*4)>J24!#%y E5\IZŀ7G~7fv$bpb잾LF7v vu%nSo[Z+!ՄƘ2)U{40pWx  xQ#)Nu\kHtZN{tbNE0`%6ޜsc8ժJӷ8l6q/5#Ae ϘV7#W1U+G An?1]lװVyQ(tie& ^zzNynS? ]w6}@`?j*nm7ۭ[y?ʥ# pi _~a3loCunså##t9ߗ7Nը}dկ11AоNciJq ON-s.@Y*k @ѺDLŶ3LJXpϡ[x|I#ypEvhd P FZնde'EYjh*lF".yA&* *"T6Ϲ. ޶V !YeYvzq[ Ε3XP9mrJ9A:$X`m= iT,/ EwؼQ# py{aHt6ߠU Y?[4^ۖ-oY3 dX8nc +.2ǟ|&d}cE睎e]W`IVnṯ/ڃVkzڬ O"V_yNܾ6! 31 ŗO P4BV;Eq'G ՓH6THN7m)2-PDxV$>QQUY:*Ռe,eロkV漶b50F# snXU8O>Y _EUWg`o=UdISdsF!a=i>|"ux/VGSD~VΖrW1_hkK.8x2Q{8vv`Hb tJf߇J 2 d,Y @fY}XnQ0$ttҜq V[! -W}b-/_8f\P@2! :TnkJ%92B~\Qy,idce+,ão[ I4I2N2Xt7XK:9f,Y @f2 d 9$D}0MZ>Ŀ9N)D􃽀[)HyQ9=KiPjO!RY`~Y" "V*yCQrϔZѴUPK.'XU!F2e2 d,Y @f.G! qAͅEVDtHP@r & [nYay#94Z/qRsbm!q$WN\b2sAP2ycGtTsBXB:;QYVުijQ8%ə3 d,Y @f,DӰI''JH*y|{"iҹQF`}?eجUuRbi]Y\A)b', h3ZP[}Bcd1+nj+"Nf#yqHNB$FOo%3 d,Y @f2 X}Ԯf!]hEiݳ瑂=Uj4beG;^O 9VG!Z$>-_CDeQVhMrismY$JL U89bM9 QjE"ǸL x$"De+̔ǫKbDbn/%Fꃝㇹ`-B ;iP>&z 04L;\(IJPīTinZF(y = xr\*x .Q'^_ P&l n넿T#d ?"PHBG_Y dO!Q Vϒ zKO #wl|o.l6dՌb#Kk@F %FÉ MJxDNh#ܞ(.(퇫k}-phuhgF ~x Y:neUX?[߽ ŅRP)v?tQŚ1Xoȏ9U_5 v; ;c#J<o@;}*+)wqp+N)Pn<$T8KTbïŕ𝇱\\P~kT_oyo)$q_B 4C-v07A0#A,H`s 9`X 6-` :pp :@ 3qD$ Cx$IG @#s2d9ٌT#"9ҰLLJr ۃ5|k:8g, H< L|߁'C J ?0EL(!NNxO$D[7܋l4"z^b#M" IR,G* ֐vI=d ٍNN#KrNQ3r/EbMR%eJ;CP9 )=[ _b4jf` Kgeleblzzezu=WocbL&\ϼ4d{p!{\`AР`uO,0eup#F627(hCӌ_4615019ai4 66]izԴÌah&6[ivKf*X'Y] I{-[R-},3-WZ6YvYYnUcuǚbc-^m}M|:\"{vt |*kD{-ȡ#(v\:0wdXհN4'SSCgsssVӆ/~fWO\.w]u]G6qspU]srw!q˓9sg/o/o+tu7}||%=˯o_N9;!u^怶@V`z ^PUУ``Agl{v6{UK,`g1 - m K [ "<+&+3bZDc$!2*rYM ϭv9c(ZTBڨGѲQ訑Vc#l~M7rxg v&O I\x7.IԔ<.:CJh1s!(UZFJKNۖ=6l쪱<Ǖ1v&MȝpdDy*^w7c]F_)t˅22g> Z! :Zrbs#Jr$''N2U(-Eɶxy}<_T)~R<, ,,왜<)):L]8YQx/iiMͧϙp{̌M,g͛>;b999s.//~77enCM w}aÕG,9J=:h߱cݍY7Ml{b̉k'Gl>u'ΰ;p9suŃYy|ٿFR/J濙Sb|W&=*uZ ]*]@^5FHHHHHHHHHHH4Sɤm?^R/Fп'j^B_gκno)˶e/m:_B6@JnX&ׂHHHHHHHHHHH|< pф̛md]!O) <yh $!y 7()N_;(sn <yh $!y.vTB6@JO %           yh $!H$ې @p@ $Pc            Hb(#            pjlC$@$@$@$@$@$@$@$@$@$@$@$@IL]ew$@$@$@$@$@$@$@$@$@$@$@$@$(-)+Dk#erH\!sL5WLK(8W            d$@.tUu%mm3HHHHHHHHHHHHB2P`,9Ws zPAxLxF\i;И擶%QOE$@$@$@$@$@$@$@$@$@$@$@$@$z`VY !1u>Wϸȴ7rHHHHHHHHHHHHA *9]-Y2nIHHHHHHHHHHHH >(*cagg̑  OHHHHHHHHHHH0 w6!9 yY2n @l@\J,%           4H͢wmK4!ҸhmZ, -ݓ!$@$@$@$@$@$@$@$@$@$@$@iqK6ҫJg3x8G.֑E;G<"           g x,u_nas2dU5>vlPϚήNZ/hΟHHHHHHHHHHHsCixQ h PsφgHHHHHHHHHHHHJwVN]/ʷ6%gZ%nv&v\"n~|HHHHHHHHHHH q 5Y{Q[Z펅9p =)ǯaKyӠ?ΔSYKlD΢RVQ,@dyHHHHHHHHHHH p-M'ehDmgwfu?l=S%"Ǻ7n %j-K@fT */W*Ae)nIHHHHHHHHHH w X%ȴ]s27I+]DT,=8A֜جĺD/.7,RPuiT4-VG?;           n>erK㢵w hjndѡղd[K6> הu}v=vL$@$@$@$@$@$@$@$@$@$@$(%p ^.3 7k#erD=ɞ W\ɥD-gv}Kd΁1w ?ҾT3'_pU^HHHHHHHHHHHnp@I/ʛ+>N`zU,<;|s vQd枅2k"9xX`>Xt*Rb @2K>L9ے-Chܑ'u.ڝvVlVFn^S ƄHHHHHHHHHHHR w XKĻڌ4rŧ+DŽjuëoHߞsC=F|r6삼UK]6V d3v:Ɔ1HHHHHHHHHHHRԥܥ5q_𼃀E媫b^xʷ5G;m#$枋Wϸȴ7zNz/JY??ț^0~%G&M Ɔ1b4         #y[n=W#e ň_u``=v(DF9~9-gK#}n$sV5!ޝvF^d lC^q|m΁eM]ozV}{MޓˆIVC%ocdWp-IzV"Ja$u]f.]:ɜ993 C$jDFFʍ7L2!      B馮$+\oD~l R#8Pal{M88o)+J>~{4%BM *#3Sd]_sw/sI|<PKz@\C:x%agg z&yOv~x9}&yFw7Xz9yrT~iLyX=.P tAz!3{DGNu}ٝC | @$q[ > 21 M*(65=x])I&HIv}p =rʽxQ<{IڔhlTm[!9m ww3,i>؋f+(Lb>SNg'_2)D?c^a6{Uae1>ڬu7nxw;m#]xMo˘!.r)s6bXD;븶Mu,;vsHHHHHHHHRi.W6xǓ zKv\Mn/*ϼGnHDk+w.('.ݒMdv;#]aN!M nzVfHksc7_qWǶab}]P9F -oS7t|-.g+dKD/0Ĺly+PϚή7iaxU?7 e,-Z怹ܓ]?䯾^.DHq|%T ԭT^vV;<ة )WIHHHHHHHH |=FX(喊3X> N~17眓/8/=((ਯg]^/ː:!ۺJ6u90淦yʵ*{xyyc<}qK\ۦ]lChzfZϻ9b\0a\\]ֲq =c ʔ_b.?K HHHHHHHHH S*F`lƧpj6]7~: ڡ"k'd<&?=$'W6xj[5x.7f_WX3sjXE;Qjpi3D^JmŽzӦh|=¥.1GdpY%?ol;@!bBl S.ߒ*WV}\ 2cЧQ$ ey32B2GQL57I -?^oBeKi٫!c hΛ=""dѡCcu%{RMpk9=ЪhpB2m7tef+W={lfCm^AB^ժJ{J$00@v#_-[}}e+gN)_-[Zʗ-+5ӧ+sCJ5߻oY:Qt)P-.]ر5kV}d~-7cߴ܀?,6l,+W1;{ɝ[), ו{k?q)#{͈c}07|Cod}rҥ˔5JK@=rl/xEq.\ĸ{<-bE;(j{!-TUm AT3rlL$xt]i0y87v\YU#dPA@M%Znd}%ɕ* X'cRW"+aNh/(oVʣ z Ey"|& &ޟyw!^jbY9䘳 _ٶdX7OAfyv][zoYu|cjBnsZ7Lֹ͝Х{+V*0S&yz:ݼekǟ|nqeOS o{0ֻ;`μfGC2ή={P tkZ^|ud5=իKbE}{.⧮лgy~uס [bIU!e'IJHTTy mfDˁkp0 =܁c\VZ*2ђ7O? d>O._"(V˜?B>b|7GkUsا5P"%NrY 1;w\FNGP w2I2~dk5y#qO׬]'SCmvd8w;CC?zB|IaaajMȼ9DK,!Ç]rK$@$@$@$@$@$@@坶A*a(! cKdʫ!% ,~+ġ ɡs7 /jx)MPaV\r [𓭪d ^L&+[5Dxe-窱PBTaY˃a ZCN63BFhLfnyG'&ίe+> l[!9F%uu7>9v:6og_xEyYT܋P Bq]݇C,6ձs7Y/ja7nH^;ιoH pN>n+9t39c^O~Vu1fx{޸㽨9oh49YlsScxMrkLbt] ]kVݰqoݹ|hskcVh_Uӕ!)8ɧPkgF$@$@$@$@$@$@<>yyB삇GJPw~iA3 |Gc }lR/2E;]nR0)n6{XR 1V yĞV,Rlw0pmͩjL<*uS]Pf.s0P j}h!*riCOmIWoD1_s,/6kl(Z"M;a>kqi[Ͳ!<&`W#yԚI9t xxnYs"&\c3𽳘\kOtVk դe9W_v?, zo'x/6v횋]}Y;B7P`ܝ2Ϝ9+yw0Ǻֶ滂vVq h5x[Άj=wT=֢8*YE#x^Za^fq7NU!%cq>BN3͹.Ao/S#Ԧqmh=oR;rԸ~bOFk{!^17meе8g+ Byfϖ(LT M|^}x!4#:aa/\(O[`[#'%tʕI#      FੴPSUN4- TQ\lٿt'Ls}:sg"#䙃<Bs bB9NsѿJ"*Ⱥw= ǴK8|wic:w> U:9xL݂ECXԷ6‰n@|F5LfWqX)qG5b!8r}GOlIw׀}[GbwP#ю*c`YcΕlr΁eugW] h~}Rֳ;ҕR29]!œm>=QY G _3rKʗ/k2d0򔽠'kn5k64g:[=siެh2EViϚ"""< .a}`O-ڡ >PbU8w>nlڶ?0!ը~cmڄ4IjM*/girefڙ3A]͍xߴ-F~2>EDkgy!o_ *!nkK.׮5# ޴TĵƌXV!O򂅋OVYkw`M!"5ڵy&u}nIHHHHHH @hNJl͟N<섻\qQV  8X]Fz]0O`T~5OmPyKG 9eW>y̛iUwV yc-W'b3Pw3GU:Oe,Ɓ~0/bDyRݜq-Q,OQp| ׃ oV^e;_vJ ^/HU _E"Z߮. @{..VI{t,l=+i;M{Iڔh,?+#7NIT#Z$[Dc.\\LvxZֵ#dmӦuK?a ]Zamb\C2.{hg /pQOv"mNUL);8vez@t'\^Ve[#ދ])_ _~-ex5mӧ.ևڢm=lYyA0 /jtBbt9͆ B?vrXwzAA{W /G]G5w*Y9 wm5ilb=DpnuQ 0nm9p!ՌAyE><} 1<|Z[$:4E;1ouu%      _'4Z0Άto w,a}ʺ%cFw# ʡ=&XѢ\R:{ \f! )Qwvg:+(+!3CJ:Y'%w*Y=4 11!l*X@<%_\}b}7cݐx&M^<& O#l5W*dfg ^,[u)~;kmoRJxH]NC}oحk*0.$Z1th=++f=kyR[=,*JT`,< *Aey1*yqA4VC#Gbfk 3 w ;wtvg*ݯ]cb~ֱÃݒe˥3Oj.c@C\zfKI[muGƌ uYimu!+HHHHHHR#3=X=LQ4/VKě4'Cr־-E#:ȩ]9U3gѩ7P%]t^w4?k4ԉ BpW|l:ǞsyJKաH;FeB.BraW5p7iYt-t]?IՃ3̋d)_(|1"wqzŮkj>OfX%9Um>ͷ*v 0̵Y}9i YjkQo<ί!]>I1;G ḁNDMq6q"$v,Q^Sȹf59VA^]ja,kC8}>!e ބQ.ԫ>mɐ`lldruՕJ*w]vJ*ϝu=Ξ[4o*ּ6oE:tm zkqqO[!D3_ן<;;2,#     H ׬Q͡J` -=GxZ&)*= Y?GSGüd{]y2EGhe 娅5z5`U؋.q_]r9IG^Ud5C2A6u%y^NA|dQs:\ +Qn[!:!ݤe.`meW𢎢/rI) &“ȏ8vy呙M*:ȇva %T!7)a;ܛC^mǡ3R> !nUcצB#~HQOԏ~gp[w_gr!CHq#$@$@$@$@$@$@$:a ' rEc7ȜBQ)#^#Kr4⭅-*e~JlԦ}-Uci/tumz !"7>ֺkD+;@fZ\(!BĥvA1r̗  n)WaͶpʍxD UAY>E$GHKZVJϧ ! > ky Cn;|FJ&Ԫs=o>N'ߛgM6hes➮|M|kHqrvSr5$$X3_g{I'&jȫK}E &p%y]h@q|SwC+Vxp0by \[ sK燭UЊul| 8xH~c|O"ę4ķ.u.U~8/:. vtNˎ.`n5..?]yk-6K.p$7i|e{蓏 YMCt=v<9p#5XHHHHHH|-7T? t66C),i5f.2֠?.EBʣLYyQWe\/9l O) lme3VB_kf;|4m2f%ړQݙeWwm\X&rQAy :<}_./fy0?3Z}nN#o>gZfPCZ7wʳy}ono!YRgz=">yZ٥Ëߟ0s-~ z><i$nm[ǧF(zV ,gCvIO^D\hD9W3 s%]&:F~_p˯g;y:"¬}\D;_jZ)RX˚G?=ժq;ʜ*/P!ѣRhQr}!T2m2][xRA}՗mpg :v(GwnظS V}Ҡ^=4]%JXk~:[u *Ut{l!jm!܉v n;7F:KzUV^zýkٸy`h$@$@$@$@$@$@$@@2݌'ep'UTRtJr"Vlu @AGxߝU 3gH'EılұVP#mCKIsϱ̑r;!r5?v^cz/=2zx$^^Ͻ,g89} !t8١ ([or]ٲ9JȡBڴvqT4tCIYr%2k#tñ>p _Cv~2ryzW7oyQݯ4      Hk3I@Ȑbx7=Lęʕ*6mOpd`ZRSj=ݰE>1s)5'>W^/0IZ5_ *h|;Uþk@IDATةk1o\Zj\R>=Xjerq'ʜq:\M&R7#:33䀼iA[1~{?ø^OVkݪܷ[~`M{k gSrCȭ gHHHHHHHH" wiqKF rVxI9/[@XL sMNCH6 </&Yl?OZ4of{.) ?]A}dK;g/Ə^Ͼg/ƍ{?o 0vC|[WYhC>+ Bb,0O?@҆0ι9mݲ!ضuv9 %b`1?5ChBXBǖJxL[oԈ[>g>y\["s9;=6gU܆Ru7x5l Խ 8?Xw]dnG6+qHHHHHHHH(ML9m#;S$~ږl"h;滑h1$\磏荾/"xQ?o룏xT?xePl7V%ΝK~ц;ѥ xB"c≧6~H.}z^Y3~D4U =k[QY@\=Tٳe&}.=?. lO>M~G)Xé cjxs ̔Ix\P_-͕U6˧5mg7ٳˤ/>3)J ֮\b1cFyWoxs9/D(CNo`ԅgW|U2)FYŹ_;m j2lQBquXJ[[1 ^%FK玜uncs{Ҏ=`\#/>-c3@=pEgkTP^&{)$00жZ/?}/ s{21>yHHHHHHHHR#tjMM\n!_{R}3x-걲_8{R9BJf%)RXAjqGy)vqvdS~►? "֩c@ry99z:2]+V}be!(mݰڬЫRPy Wke͊IkTJ.v!I&vX7n戹Ҽox`~ #L9Y@}]st9Ҽ]5n$.].}{WTQ*V,oV/]gj>k!5U@y] BHHHHHHHHHH %p״X]b) vi٪篨epaɂ!%;LsiM $$Xz=GįA,˔.%~<4uLi1(ܹY=>/nxQȴ7FnktY=O>+ޔr2I4 2 wz]% ʢCew הu}wz me_JDDٜK.Rœ1U8u,u+Rʗ-%ּ'vϞpQy'i\}x‰"Yd8;_\@HHHHH po/h+K-+n5'6˵vk,RPuiT4-VG ~Ju|7m,^WJ ׵!H/˃"OR#<_Royy2?։&Tn<$N4wK2x~.{^ \n.^      ~8B/$!epCn|zl=S٥D qR-_9T^(U[(8y J(Yf6@lYIr'Ŕ Jx2ì`#OMHHHHHH;oY g @`"۱SA9p =)ǯaKyӘcF ;SNS g/Ų9J\!R$[M>=dߙe۷s~#\ * X| T?L3Ƨݸ!'gG`@^H9| 4.^,g%sfɓ'd̐,.Ǐy${l]:M9<;"B'1/j9{dʔIQS3 ",,Lgnə#G SkJhϪO?.0C^١Z*gƋv ;F<ʖ.r} 剮\ģI_Og~(#N/>5/%]! *TT^*߬?w,_4'-;\(ђ1cɘ Fِޔ%}BdA]ԙy"3ݻH2eɆMXrr9 eU1u{qze%zFӘnu2MPլ^UݏJ.ˋP+o&qi׺P' ̞H.\j~a U|׈䷔4|9G(K'4% r @҆7求R){{CoYFt) Oi[ ;#F(udO- 3ϙ_>#uC=_.5?+2ȷsmMgdZ-WyyyYnk+N+K.,Un@I cĬ˅Y@$@$@$@$@$5<ݤ׌!ʗOdݲȻv.q&zo)7X"B{gn.R[1/xP\?22J.Y!^W5"_D܊P/ǻ44+ϕ0?^Hx#^"׭]Ck֙\ע] 2Ru:vMr},Rsv( oᑄ㨨u~\W9!ZvbW߹vq?jGsm뫗oܪw;v5WŶZ{gZv^O2Uja{}:rz6۳oQVQ={M[ɰD;3l!D~ARvgW5NAzmTh؉_g |8|#SØ׬.[=,ψu_77pw'q_@dwdɇpj۴_kn7Z,G2kR5q]yN9xq/V5QO?Yl3u6ۣ6ʕ6#JSmRv @pvwiCPt@YrS XSo;9ul}s ;_so1JwA;' u=C0Hȳk5^wf&yXOʸMH_Ѥe%uΞcPv6#˒FZ^~>mv֊z\_sPm/|wBu4}kfvh<ɼsΌ.\)-?=$Gλepi?h~/^AJ|>'S(cQJh6sK$@$@$@$@$qQUR^\Fcʃ^xمs,1^yES/jnKbҢ}Rd ٽw,s!@Wv6B07ȫBXmw!!7(ϵҺyk3c/*ZM֓!L>x -;y%2DrrБW``&WUɐ↷^n?3%!m͛6Իm]:I e6ؘh?Uy5jP#M6BEȑBTHIE?<,hTʔ.!itk>CC¥JŜΨ0i toƋ[Px8Uusc5p-< <۱ A ʛW+F*{8;ݬčY Bk٦XZdk>f5 TqUSFÔ V%K)?(7|۵_jSĀ3Xy=|)21=W A?j ]vY:+l< !}+[^erzA,=>-%m66 ',Y2폿c'Cplʖ1דּ> sd] -\[QaG}4(ߥkv[|<ޥBlOp_}2vMʒ{@4Tm?`mES%mmwx`'w [֨:|ꭌ{O |Q7V ~f$AB?F(z𰇧0qQqOL5+?<ª-G3un2*dߖyo;z(G<u'Ճpsw/ޛ?4YwV* Sy5(YVBy9mߠ侲k.d%*Eyeߧ.T!>S~(!/M=af*dT?+zsiChKnc1Ƽq}u7kzY|C  O(B;[d>MrK_EbD݄uXF$@$@$@$@$](yzp4$& @oXV™QBBGzn=JxGvBf"hwoMÓ/ *x2BCcu~{1 zx/V01N2!hS9o O3!2Cg~ /xUclj{q]g>(>?$AH$cQA_s8=y`9+;g`F0b 9g%#ֿ홝 gw:VWg*9f^OSh]shL2s4QL]/P\~vv7eBS N}sg2,,@-gu- /״ovpLtQTV^QV-LvIlѹGys_zHNtto*:ndEY+š_~=g>|U& ?s31HٗC/KS9.;?rf~M1n~+7Q%mʹ/uuٷ6ƩAFu]<ݾ:5lAs)n#۷m5~_LZXkuk8 5S_؝䄹kmf"KϷ6VꊲAͦ:OfvoApnvY*ޅ@g+ف;U7M U&߉k~_A-\[b۞T$  PԾXBOf!@رe =HՏŗ{e$+Xp[[L/ӫ=گSfч6;=_MpvZf0h.t `ISU\&T*Ѭ0ͫO ^{UMAEho,;_K<.8~^a{S>eEǨ^&xhyYv~^5/>SFfhݯ.Jv$ƻ,$_tM̝E.;w6=|Y⃶ Sϗ6tU*0Ru( z;2TkUvCe2,U4n*T} ݯ.xL} k󈑹mVp*>(r]xUcԅ/T W.e _"i*싮J&KvWV\XӲZ5k/D\|>+(E٥D dϛh..5Wq=ͫE)y]-6tG2tŗO;}~=eʎYQnm;Cw Ə%/A:x;ZNc'ߓ==vZ翎3Fafrv14+o{e}8 y 7tMKTڱjvʆO|XtTK϶Uà߷pS@OE݊aί-]pR% fh h.X~)@xazNt⼼~e<)   PbWJ@`+P晾Ky1.'MϴڡyL:&̩hQpM%*5>ni& :Olw̶^Sԉ;&N܀ꑽ/рLTmԴID닲.Q}YLu}S#F~.Wơ JY ɢXҶ;~wE*_g2GTEqu|Ŋ ,&e(( ܬqAM3)RɊh+M=Tcqjj|2~T)>*~_e:)Ie;Nk2mmVUen2K]}ڢ/ (x P_-+u򔩹u<v)7y{ zMfu;w`|<"b_pU.3iwڹLuݛj\R{t]Zfv jeFږ$['ۿ`k<ςW Ngr_rMuOcr3k5/\*|ǂLDK&Wv>tvvCA֪T:څicz@hy}Kޗ|{/;R.;bP6e-qc-]cʰ{spu9d>m'f.k2wT-g)f1upYrJÚm?-:^c%]o01n%[˷W@@(YJ5l =KChQQA聞1ɢ^tM+p(2=4AʨdyApFA%hRu ! ]p㵩({ El쨊싯=.蒕&7(pW+"MWJ4AFLc5_dvJA句}hEa +Swz/N d*X`m]:ݕ*8+g,OXX]HYA"- ܩDˌ&gAcէLTNz+6?烀TD3D>8u=DMbElRPGeRWjR &/=7pkPSٜm&~̵~})֭Z&`. ڮf.mh]qgA+Q;,cw\cѪ3+8Z?5ZDueb{E_8xvj}.uݫ1:X6oe>CL:مɶv]jqn"LTVئ],Q\h'W*]A൱{F;6[i*u_dz<  ,>JVh leʰp61azo+ W̋.8%ehl14L-5.}٦Z5؋*>HTJWSQ6*zODl,\ƢʌRvTɲ\7V2%At{nG~5&\ 5%ږdr./[Uvգt*p,W\W hZ/4n 7Vg-/T!dA兽1] e꽣X_6uPNw9n¤`wL U3:H:gCX`_^l=p*%t+'wvntY]p5G]n!Zp{8vQFL5db+UпN ZWS&׫Y  _ W7hlEi`W1?0ezp}h2ifozQi1sN(qSWeMw8*z>zvOjwv2t|˳;_6>3/~] 1vU&f4^ݺȍcըQ옸u=LV^M Wo?ޒϲ&*1g03c',A{'J`߽ac!.Aza7vv9 =N6끸w۸.5g(E]mn3t٦ER}m RgZU(E=3SPuy ٗ^ ,J}>sS*[X|*zX* nHk|2:Ya U3:٥HZ?qRp%mMy\VQ6^Qku;u{;w=p E=oՍ7v>""ፅޭUd}J;p-xS.x:yc% LWA@@J@UKF{hl~6ebv11+q:BUkE Zlp=Ol|z@Kt\L,$j92nbx~ .*-+m׃eF鸏{rfa LE5kl[&j$M t|0TZRf/Iׯ.p衹/-\ҩC۠ ;-S֝ocj>E,> wjjhQѹd*l [_LKs1Yݝ ޷. g`Fq( t|Q6l.j 26j?9,N'7֝?\v<96Ww<7Reì4eL,N&y؇Ug vg?{azA|?l&~GvэGg͘*|bXEo U*DuqTJ+c-  l>wϞ##@ 9Bʶ{L<{=C Xtk][gn_᫺{ 7LDe|00I /oz%Xkg}&)[ 8˖rS/ӭŤD| HtGuw%WϽ]LGMyaQY?p_~#hڢvE>s Vuuc~Eh𰝻1IJ,~wAV#ܿ~hqU@:P۫FkTbn|S|;{k]Y?Gw'{EITG2=W (nCo_h_|8=U =KaƓvw۽KmcaY7o\ycЭ*W̡Ԧ֭Zڸ"dHXęO?)*`vMw:J.CMyXq΢U+蠠@ tcy&aWtۂ|އgnAauۭի\UXK]ʛzep]>fR{Kmv*+=,̿NG'O[uek;eg<~炇 h™Cxu㨲O=VH:LNaCm{vwyϦ^֡]ۯ\&ӽ{1( ਿ~;] w)CI؉;wjWaE &OWZW e&6_7b?.juny=3A ue _;*>W ьD]aj 2բ>'p/ۺ团l,ʹԯ['kcƲW6o6tḳ?$Mpoץj{;}Y |G?fUƮt^{vZ5k]J_:`?Auj4}8|f>{#VѿQt|Qy~WuъBfmV]U2칭=6ui[Wn7. P 4l[5hǶcM]uz Fw~[eW2ߖ,I 8 iE`w[ mvNխNrɸc#M(/륃oM_]Cp 5MA@@!@d\ZV-ˍ;C;3|FGQvO2ο*#lzPIMWO^pV8Rb㄂`z8"ė=w<_^yӵcM vSF W]j2Zzt=x wY*UU];d }p3fns<ՔX\;e+PT J?~}5w?YWeD\50utM/?NоMzq} eRU5u2y!}5rS`\E&'!o#f遼~⋺;C R(SշYلڴJmA O9h[hIͣR 1Qt9 3wݹ=l^gι:U0wA@oπljé'mC{:XE{gwwq~/R-{`RTї@}.v?NOTYƩQ]+uͪ/3N}6^3# -X(Ȏm,.tF]y Bi/8t+G~[RbGAzpE6nbn7Zn6+sg?sXi7Uf,Y)v7{vcY%/9O =bOо}ƹۚJ*[5>O erQė ֶH^YKևA;h$F  l:P tu Tw|))h .VFҭ v]P+.&__=ߍ.pE;纠T|I֖4٧v z=V.>3+%Kz`gmW4C0h=1l{+.Ůr]y=7ۭ_<,Vb%z-:8x@9Wֈf4tsE6t$zMlhwN Ɨ"g)h>z/=?=*Й,v'^{XA6~>ѶoW^6 gpk쵫]4ڭ\|=#A4h_Ps2wwnኘ,h;UYqŗheA9{sC޶=]i=ؽ}TK]:tMf Kc\WY[ZD>K[s;FxfNiOAV7_~v}h#yZZiZP.p y4tYE۝AZ~긿`sh^ZQ%Q.sRۜ?٢9VjYkRBL@)YOz)g]P(KV\.3u95g&1 nWD@@`dZi3GA (&Q[LPvƞ* |49vmݑ)ۮ@w]98FECuWZ8z./N˄۔A;SfMkrw.y+1KkI*IX.,K8Lg7R_Q7t }OU\3FdM\wITw]0K-q[(J:׋]q] E@@ swR k}{t>tWfS8ck׮~(͊R`O݉QJWcY*lNv5R@@@@@-5#[I.h1>btș4e'Zԭٵ,YIv1C'=5Z6;Y^@@@@@*U"_`ܩ֭vH]ZײEvXW5[睭]+!    @e&tpMX     A v      [@@@@@@'@@@@@@@ @\      ;@@@@@@Jph      @@@@@@(JE       @@@@@@@+&       @{@@@@@@ @\      ;@@@@@@Jph      @@@@@@(JE       @@@@@@@+&       @{@@@@@@ @\      ;@@@@@@Jph      @@@@@@(JE       @@@@@@@+&       @{@@@@@@ @\      ;@@@@@@Jph      @@@@@@(JE       )@@@@@(1dܕKAC@@@@@@f2_νu%4@@@@@Ȃ7A]f~      @VeJ@@@@@@L]f~      @VeJ@@@@@@L]f~      @VeJ@@@@@@L]f~      @VeJ@@@@@@L]f~      @VeJ@@@@@@L]f~      @VeJ@@@@@@L]f~      @VeJ@@@@@@L]f~      @VeJ@@@@@@L]f~      @VeJ@@@@@@L]f~      @VeJ@@@@@@L]f~      @VeJ@@@@@@L]f~      @VeJ@@@@@@L]f~      @VeJ@@@@@@L]f~      @Vg*A \֭OGe^*B /-^$SOQ-u     8w S;R;o=v@QÆvN)A4%_z&OzqK}@@@@@$M- ʸ+an+WЮ][w      p߄%lrale˖ͷa&7     vw R/(hwKy8; mɒJ `@@@@@6|8-tGhף˯"^ڦOaSM VZZͬRŊ 1sm]{;^z0SlYVrekѢ5i8 -+VUjuֵ˦MnӦMWwY;Zٲeձ|r[87Y\YkڤI͜s[}zXrb]f˯6o|[nsmղ5hP?f 6،3ejմZj~O79sիWתVk"UۘA@@@@ȪrREؒvѳn֬qֶmͷU)O+v٫p#?.rU dk_.>=ojԨo>i/e$="q7 ̮KVDv=6Gw+VvU}j).|6bdHtݭK0-۰r&@@@@@" ZlA;駝jC]tC16%-/jLכ^4N9DkxڱcKT|g~_ء}8.pYfw߽u-fSwem1ˣ3_|e0v{uPu?N<`Z`;Sً7\tC>Я ^y#eGqx5glʙ@@@@@(矾TwѠPݖV*sK&x)l ߠt-~4uEoϞ6sO)}ot{.P{ *k~g;­ĿgMn/L0֝tpҩa@O:w`܃k]/[,KMmƫ ƢS?.xࣺÏ!Y=|+W Qa7Zk_KPi@@@@(\|EեWN ^?MΝdw(;e=v۷%kK%= V<2~{핗hNU* <_~uhɊa|Cv:u.e1љ5 iy5;bo&Gw#vZQLګcw:u- Ŏ;Y˖mX!     @ 7^)KawvcK,M8'  [)xyG.`I_gؤSle}cʕaZa޵kh߮{,o1.'׶mDu2ap]n]:/ g̘3j֬aj/^| q|G7 *_2y@@@@@>[w2ﶦb;|`MA$p,ްa='뮐n:q0I1N8rYٲ׍?!~p^Eb׭['fتUi;d3TF1sf _}klc*e@@@@HQo"6G @2|@O֩mò<_a?jSNL`*oSk׭˿-Yb 1~f͟|k֬ E(W\V_e6qPh̴Mue9m|~罘e-[3I7nUf['$Sm5s,SЗUh=|Q}6뀘iS13     @ )@_Ν]< ڥZRnVsܸ6pJ]too:S>}k&Mb3\ic46O?+ơKts]nqUV ehطwt6h^V]}V^p^ٶ@@@@@ ]efp+k[?7sG'یIt5fo۶߾=&Om߯v% ]uŐ|]a3L;|ЁֱcMT*Wl{]p~ǟxi\իp8DiwλsvѶzZm     @ ޵m:Ϡҹ+wR A.Tݺucb^ vժUbLv;& -Y$fGyG[(+@bQ7\u)S=n.fueoO+l 4unL>e6_,@@@@@ ]'Y @@˕ !%8+L\1d]4|_^ڽn穘q QsXϞ=b駝jy *.+O̷Y>*UĬ+9Ifwv(HfzSY.SujԨ_]0mrye'$!0 @@@@@4ʸJs_vC m;^Hvz-^l˗/zusNv W_g)->`T-^,X`Gs5eyT+lTm      ,?Wb(i5@uj6dZ4܎;=N]ʤkڴI:yڵ3l-M^    ls%_] @@@@@@+5"     lt%_] T@cڭ]68vmڤ>X Zl     @:Qc(Q:gg@@@@@t@@@@@@, 2(!      t@@@@@@, 2(!      t@@@@@@, 2(!      t@@@@@@, 2(!      t@@@@@@, 2(!      t@@@@@@, 2(!      t@@@@@@, 2(!      t@@@@@@, 2(!      t@@@@@@, 2(!      t@@@@@@, 2(!      t@@@@@@, 2(!      @tvb%0qlUE=      @ T_>      q\Rz;mQJ[N@@@@@@8oS%.E06G@@@@@@8*u"      @@@@@@ pWԉ      @Rcs@@@@@@C]qR'      ) K@@@@@@(wšJ      (@.E06G@@@@@@8*u"      @@@@@@ pWԉ      @Rcs@@@@@@C]qR'      ) K@@@@@@(wšJ      (@.E06G@@@@@@8*u"      @@@@@@ pWԉ      @Rcs@@@@@@C]qR'      ) K@@@@@@(wšJ      (@.E06G@@@@@@8*u"      @@@@@@ pWԉ      @RcsXa釂     $(h!@ =Yلߧڔ?fڌsmxdgڠJ*vkY*lm[ mZMͶ-q@@@@@@ +?.`cb/g/o+׭*Ikis\@O??}}VRRuvƯ@@@@@w[E/n:xW1Ŝ.X:(7jZ*ֽQg٤tOPBS     lw%к(6|(9bkLn֧^vP}xT     _|W{sGO7i ?XVخos0@@@@@463G)6W&WnSlg/'ّ;Zugl#     P 6 +Շյ^ +i;kp&>Ҏ:3Xv팞Ƭ]b82hy]\וiّo6Q*,\%ێM*٭vmV9 w_n=ݺjn}{=s]^M3IW2?E+oYk;;[Bu ;=h\q}jMB_Qz^v PI|MK&J ܊ޛ:ޞ#s9mW^Kzo~o3fQM+@@@@@ +P%OXAWߵz¬9wz+dsZAs3'k߻فwN3m2ym LDε>Xu\;cw|A;m;yZyA{ӵ8䮼ݛ6 v󖮷noѠQDedܕE[.n:>mTz5f&vLqyU7hP>p$LLq{#9s͛?ߪTl:v[6;Z:uRjfNN:n[z7VZUۭ;Y;Zr oim6wμƍէVroHE+_w{oۤI6]9־][оmƚ6i쾁WP}lDyxT)k׮u&mI6nx2e5lڶnm;ji:u54=m ۱cly;׵J&c٤Slo93];;lߞ=a1̺ubZ]En*>1fΜWvMZ!o>S[pa]w Km溶OwlMp鵿wΑ g7{m)mZT6m^=O.]~e\? --ZXkw c{P9]G]?˄ YժUmw߹kء};k0Ho磾YfxzOW7ù-[wu 6{YFЎsEC2   @V4|uivavNML [eGGyc\h (/+l )苖 |~^0 pP5.m~$XwClإͬcA{pS0oD$|Ǽ`)`dryTo[(ZW+e)˯Wu)Mo )Y\EJ9Us]P*n= 3NӦ5̔}vՂks Ƚzs}t7˂*3OݪvnVžw{խoݠ]Էv~8T\jU-gNOT`wyn?S yxOPƟgԴm[-hSiƳ)loܻsҹmc6wޏ9^ hGwF ͟]q⫘vkfw{YE p?۔]-o^0@c'U5Tjժ5W#;4&vv=g_ȷg _O]`0?d<=0>Mx1Vbph_x){gIM*uz]>`^W^x&i?oԵk|uh}Abvg=fmO[ {}E|Q}>l={m|թ&ׇy~p믹–.]fbr?q׽^諂d *XoqˍG5fZ送{}wn͚6W3+V)ː.rb_vyD_׿Efvzn?sz-^h>SM @@@l T,_^<T89Ap\`Kuڢ/9kSe@AQpa ay݅ei|nSۣeqlYo_9.SfK흁͂zUwkN]X֩E.P7P}=yumѺ"D'+k:E叺.C-ZdM]6&ةq┩wm*~AyomǍSkN=`;}r * .M+7M )xXPNc&t״ W]-BTE;˻K>?_Yw=<4[g3lagw:֯_o EK==(A. "vq vmSpdCl5bet E7!W^k6+N*0hmݦȽƼ._N8A ݌QGwr_|9_#`0ȩ{Կs:sLAL//hSY *+(;~G& o?ڲRA$]hU (s'MAwٌ~U=G .;s_uτA;_o5E᫂dvt ] n^S6[|{+u:P⫯cPQ& lfn.;`esBYy ɧ#n6eϞyIvI{VB@@@(u(5Kn;\[0&[\`=S)@Jƥ.KLA7C  /eH@,eĕ~2L dD3~uI R~YspP󘠝W(TIm&Q/e)P_u.xߵFL/?aϼqiyCOmU-N_JlHXz}>__vxp[Qg﷝T+FgCDž9cQUCsTeԗ_DWW3$3ۭSC]E~2 1\]:bM\e# h"YTt]⋂8~qH.;uxׄKV.S)~83Wwߗ0o(SX9' jY\שz";Cbh֗ua݈,y {[d.|q~&}l7uAYc+1{8Qy4%Ѣ:xfb7zGR7%e?*8=ףT` /fѾ+m@e袤Ӻ &ݙ    uS`Șk[25 i7n{g0_* $*Uy&~e%+{hYKn.c .N?*"x/?V^e X"ǾwmW]o]tDR[[ \7ʴSJt\&}Q/>tݛwc}^Ͻ^{Ld(CQf5L;:$6[P|9}5ױ^39VT._S|&~mp(6uyJu\qm1Mqc~7޽w^y 8"s,< 2vٮri0< /NͥNc:"-]~{WUvC_^}|z'w]v0@N81+>0?˯ ^`xƝSEc(sIciŠ6^00f3e܌!ƙeyvpb{v1GMzU{.\w>mz%]{1u)@-s3UsႌQ}n ݡ|APIﻏ_enM h9>` **䣶SruOt||NJ3s)(uo^cZn:sc\j?`=;X}|dVj/;NcM2_}٘&hS/:weKj;>Z. )C{W{=&A6`|te6k@IDATvOZs^ 6y@@@l (3J(3mUA`s{וvۋJ_jo׎uߺ/46+SQ"gW\ xt@wNRwȞ|\UQ/uۙnW=uLT=ى6YuɐLhLhT>y5 9uG8TL^GX%u:,]dQy޳XmjS2/ث{i@MzX]Mߥ[u[8Yٲeo/l{48qŌSUfM+~.'-m14~Z4- Aw]+hh@mAc6 @}.G 񏼀){[e~2xUo_Ѩa{G짟yft:R{TCrqϘa(]zωvj侮Om';զ/#F~t@uժnW_˯OsO^Rz_~ ^ E<5q2 *ݥn컥tC*ME7:F[vmb+pコE})_Ee9~2xս[Ne .[pQBoscWƗ>Yw-m҆n6e/</4F]mW^{7ګ.?ƹء}Xٖڶyo+@@@@n* ܝDnFVw4#q(m,.S;e2&-Xk4[U5?)KmVڗs{^QƸ;o~ԪVMdͪl.:/›3WɊzXio^v/c.wV@'Vڿ?>qf5<{fV]Zܕ+G3|}`|Fuv?a_{a;v7OftεqSǍ@u w|^9~ᄋYwf͎ V=8 USq.0ctq'p\z,wL }}^ZO~rE3ԭw %E[n6&Z~bgϞcgsALC}{ EtfMDg`@??/@N]c& 0r4@Lzc$hбnQ1_]]~ +͛5d >F͛GgM\Ǽ'q~FV}}[PYo'plMtbԗ_FgO=92-8bwZw?reVg]r1A;mKڭwܝ   dA@]*lR4d}9Qm>һH{DAXPAYQT슨RE`/(HT **E{g`f'dd.LsI›ޕffhZ&fStA!xk]\Q.'F;ƥe7g] E-WWI] sL{"~mmZ&HQY\;>K *ck&n*50RX³.Z4oYi&z?t6?co\)\noя{ Sߖ#9P %sR`޵}NiUhqNQ5wOCҬiOn隲5{޶7 LWBՀ;tCB%=A {EؓQȱ&<pg-{wNiIv T\Z?tWRK6}aNWޕU$dt6}5iP4zRdZk$P)\ɓ5kDסCLvow_${͢fGGlL7luUfo,g=ޢYhw`uj sa6jR34w#M;OAC7U"sY|XzOOͅ %;δDŽVmWnvYft ep:O_<\\[gS&r,N@SJm?"owxncGNY>+،~kigڇr?;^:t|tz{"Oz?v_VЯ0h׆GZk/ۅ3D_#]| P2?+q fR|fؽ;e~A4@twsL]3#wٻw{6ZXP;p࠳'gvN+q_]:wOlNN?zix@9x{!3J5cȑ:ؙG_|&wQ'̎   V Kύ.m(3͸`:.ܾN/sȔ>&&ܢ8c[Or袄m޲YJ/n=Pz|= *hp#k Sd T R5fW^qy50dE-Wtiϲbexx]ɗ7oбF{zYqs4+WNٷ0JժVxf6 2{ɤo:-jhLVhfYgw-%QҥI5k&u|kзoٳ{bu7t9`ۄl`$g57BS'}y' xfgQk]djיK3<{>Ycjhr Y3t};HLBw"զls\Pu|ܱh&]4Cp%媾Kԗh-MI)\r 72ft2+{v`R墙ֺb6*G"hɤtNۙ~Fqq1;isu>֟]|mryznv9-I<>yne{.fi:hq1~Y3f}wVws~>-d+)c>av^M3+o5LVM yY+P c㵻&e)5|D^lzi [ ݧ JY tozMӫ.۱c5VQ~nE}o'?z7cq2Iݭ,KZ@\@MWe   D$мZNb),:YR"oF+`]zYY_ nmtf&sKsOaZv5vE[4e5̸z2%fBSP1W.$Vk=Nf&T'mauAiu6M{)):b]EǗ;dɶ nX${5]PfTIь%z4ҕErgGsw_V&O Zu|19a8vjSɴ᏷Xـ]M0}eDrOdܥCRL`ѳENS\xkx|"9tn'pa2pgZ[89`ƘwCtr67V\||_ ?/5hɓbwŧ;fʘdY]14ꡎ$P|}49[ʀWz;މF4'j D=tdV&"9KMs/t/Op1^_*z ug_w@@@$iĉz1+ ?Dɐt\hmOXϓQ4ygW?nh`cgW͊s l{5ْ.w%~۹h֜{_SCx۽qRR nV@g#3S*>gf2he4@-rf}6v϶fW7Yװ[f~7[ֺԹԼT7|ls7B3t:o.R]/ fy˜~ Jzx5w|1޻ +YgyLV]nl3uYįYhtzlڴY\йLQW\~+>-ͷyOԇ   @J h@|̢j M. U5]bV+%IAh?T}w:RC .CBSr=AU ƛXYY`K,h'iUhhK!Zʓ+&ɫ A@Ǐֲ?-몮'XRzjr}*s ifhǹV.u3}Gw׽M\\SrV[^71oMWv,W #m_;񷥿wqޯięNlB})-Zs7~ lj׎Z~N[-a1;ܥb1}啗ZEeMILe?d}2Fmk6~VRtbgf=>>N4jҌDll*)W8pP1>#dʔ)aASz Yi3fTt)Q<>> t1ZKȾ}LEVzbO jZ;wfy3jTfU׸QC2Mj ?3r߽wIU;g[^6:bŊZһg{;ʍjV?beU?-^Y_ֶ7UZͷrUvsT    y%@ܜ dLA2$GOm:43cv1XukJSUL ׂ" YYy%]4s/;}}e'kAiZvE 0W.yOG|AI}+z@i^^b+[b뮨ON}M{p͞ Qi¿L/`|WRPAyV'ia5xy+bWQ5P;f)畄";=$ޏmn<=V/UC4 .uڳmʷfǧ[.{_{/ty j֝_QB֡~e@@@@8q%@,H&&{WKvg#hmֹG_ܙINLk6URʵf/wLU&,ܙCj&vyzI>܍4 }+ Vq]䱇 :K\췟f=ݥ!A/>X4))EMK>FլPe`ăHuL6oW^4^#l# p>wij~}zJq~ѐݭj;Yb2Y׿o6\b۫ c|_u_w1dP?iwM>踑o1$Yڠy`b2AV_X    p ;w-g@ެ ZL^39R iâuIeWe4ov{D+;o"]2O }3d8LzSg%}g$8s nvxOM@&%]?jݹklܸٜ{V)ZHg^ټeO"}Lnt-?GnXѢ>QS-ӧ`}5Иܢm3E(OAM:K٢. ޹Ǽpvzz@@@@@HR5) P,g!QB ,u 9R@@@@@@ UfF0j(s7/I5n垵VݣWLJc$R=G  Lઆ |> )@@@@@ b!R@MwY#)ss ~cU,G@@@@ @WgCjeEjƤ@+)      6ܥD+SHuR_|ng_      }w74$ШD]h45UIύ     iG]ڹV42(M@gJ='=7      wiZh@ZjBǾZ=='      -wizh]_X,j]zz.@@@@@H{5) P@%l9U9P@@@@@@ K{׌@rצP)_]ρ     iS]ڼn:2 +%@)[Yۮ@A@@@@@)@.m^7ZB;*_/L#DZmYNA@@@@@+@.^;ZBݒ&wӶj)      W/m_?ZBUl S(UcjA@@@@@"@.\ ڑ4 wJެɘߤRZkL;LU      \&xW~$MΞ1\V\Y4*QtY7n9?ΗeN>ĕ(.*V͛H9ٲu|8Yr=vڿ`r٥'/+ӆcSf4em!>oիLJb}<0g>5F ܅in    J]99%fqWX;Wʒʯ;˯;V ?ak٤F R#EUT_>zb?K!oɓCk[9?.:K|y~U{ϽȪKyCPI~쁰3|*f=+ZXJ+"3e:+     @j pZ J3`ln{ʚ}ev}d9O:3I,%oR4GA)]\_/rJ3 eʫo8mnPRX@.X$6o3/^+Y2gvՉu7.WGW$;w/'~+mO>Rڷg_fҎV;CRZeg @@@@@w L!ոDJ L6î*[]{2hlf]]rq |7Jk;W^^Wʾ'O6Ymc'V̙Sҙ"5p}.}#f7mƔlڼřV314 EI-ɤciuܼy 6p藁w,5=:{.7Z_5@toь{ v2;ygѬ0oyVN]L%o7ҽؚg7ӥT3Eϯ w]SN:5{.NK5H{ˮނ~[ُJ8Lh>3/<(`[wb'T&cR;w4h'~v|0'ϑ#GWxG@@@@( q%HAM&]{6۴eJ3rno^@~KM i'}~x,_Z~}8> /dէkŊثe9N@q[HUMUk@-:lL       h`.eӁ sr/.p>]uLPe&>zZ4b2;l&HcΏ? UMp.:oYZ4vӅ&kkPfvk+Ow25w.J9YOZt<&bw:=fe&1p|@n }~1c6sѢ-?_^k>{]f0wwi_խ-}5yZ; `w3ٟiשK]S{Mv[ivӏȫ5 sx@@@@@ bwRx47ŚQo޲k^k+qVo۾3I .GQ34?G.95;jʊ8z=)]| 58쓝DeԢ|}w9A;גtL޳5clxk:t>zݣ2lb՟r[:5}vWԳu~,h`MK$l_[uȟWڶieMAW^>ѬTnSlՠf zvҝ 6f?/dN     pGjA3˯:]=Hǀ6vOo@TbEv09{>{d:PKoZAu`J}Orw@rrAw׮,ezPFr]mkouִEhP]1+Uhu:҉Hʗ-uLC]p`¸^3 ڽǪ_ f]g+Xٰqt 6z=/1TZYL%r TV4)     2RƕZ8/:$]{nի(j4dg,j]@&h7}gEԗ~klhUu+`sV%.C̘v)V'7?u]z zmǣYӚؠҴQ)Q}TЅ&,PQ@%kdK']ǤsLAکhWzh:^v{kw}]93g䮒i@@@@P ]; ` [/htM׷g Shkf7+8aZ4?&owh%]4Xń3/ KUzԩdե]j9ex):v] /^˛7* 5e]6E?J_?ηHW$'5Kﺿ4r2^:K3Fd-|S=vO乗;#6Z@@@@g2Ϲ#% H|el;dkռDk/^~,@H+T0Չ.wjv+?-U2 .ji:_.P9rhRtfG%53P<Աo,D,W3fZ]f%0Rusci&5c jyQrqAL9*2g եjzý^:™tecj\$ɀ!oYwt}*!egaL7gdɘ/o? @@@@@  i"ߗ=@v7͚4n!;_^%Ec<IWɛ… Xq>P|ʦM[-ҥ.[L{'47d/KUόp'l.n{u ׬KJѽ_rO3 ^RK^~u˟'EH T6lhqX"^aN^ 2XYZ˺} >:c#]2ɕ3E]j{r֜ ^bE7^lcR@@@@@ zgIMn9IeMƘ݅K׬OYvuP'Otney\~K3|hן2,3;iWz>ޢp,_Y]8A^lutc~-eK'd58r?poHǞ^ -#|鳦;̽&- Jz 6_:jO?;f1LJ _$׬\֡5_5c}2t[^{&@~~o쉜9r8ݬY؋yG@@@@("U p |X0!Т]j* |R:nWZ;tMުZڛ}>U+Up 5?_^ΟMfko}h%4 v!k|E?fUuj.eJY'O}[8pPZF x]H$ﲫ1÷ t_ ivѮDkdiׅ&,;viCuN knhU]{2t?q_[]"jj+ڇp|Lw6ǎk8VXRyXIpw)Ӿ2-\fn_3()$kpW8kvSgZA`+Y5h w86^R`9?{Wvt&?GX^iI     DO !$zuRuO3+u.!Y$d4 4{]k]xoyb nZY2+_vmAngݦA(m:t{˖,g*W,'SxwhOt='.L˓s/%+Z-" J6 N]ǭ7]{DX̩@cvkip| 8z&;0Ppg;sJNɜ)owl_ܥt'+ӽ>^ǻ}L/<%vq/o^vM _jNYtω&^5x/o dzg@@@@t y*p䝼ͣ?PiqMc{ϻ|N4S)9x"2t`/]O5:W}.h׍4 0xE_^K\z׺9?4nx:6u5^wvkʕgN;{|>ժTJ܋%`nr]fN]_|\ X-v /ݙn>uE&ԣfi.qP\}o=^3ݷX΢ALo{?1mh׶zlܴE>= =gwkסzM> _sg@@@@t}_' qQqU^CLה.e&%8}&;py3@o`(Xڵud7.T@Ƃ˵f<2OSu?qBvme@*'Sλ{^ۻ{^+[0 hw {[t8>:vufVX $؁^L‹LbQ½^-k;v 6WFX)оV;wQsB^6,C@@@@|4A|{GzŦsh@@@@@@ @>VB@@@@@@T/@IDATX(s @@@@@@B j@@@@@@b!@.@@@@@@B@@@@@@X2@@@@@@@ @F@@@@@@ b1@@@@@@!@.@@@@@@X(s @@@@@@B j@@@@@@b!@.@@@@@@B@@@@@@X2@@@@@@@ @F@@@@@@ b1@@@@@@!@.@@@@@@X(s @@@@@@B j@@@@@@b!@.@@@@@@B@@@@@@X2@@@@@@@ @F@@@@@@ b1@@@@@@!@.@@@@@@X(s @@@@@@B j@@@@@@b!@.@@@@@@B@@@@@@X2@@@@@@@ @F@@@@@@ b1@@@@@@!@.@@@@@@X(s @@@@@@B j@@@@@@b!@.@@@@@@Bg5)*ۊ)Z?#     iErh'      9-@9}ySUI @@@@@@,NF!. k@@@@@@Qs @@@@@@  n@@@@@@b&@.f@@@@@@۰@@@@@@ 5B@@@@@@ 6A@@@@@@ fbF́@@@@@@.@. k@@@@@@Qs @@@@@@  n@@@@@@b&@.f@@@@@@۰@@@@@@ 5B@@@@@@ 6A@@@@@@ fbF́@@@@@@.@. k@@@@@@Qs @@@@@@  n@@@@@@b&@.f@@@@@@۰@@@@@@ 5B@@@@@@ 6A@@@@@@ fbF́@@@@@@.@. k@@@@@@Qs @@@@@@  n@@@@@@b&@.f@@@@@@۰ ?uBEA@@@@@ de d޵fFppl>]v'{c'*͜>ɒ[f-Es9 KťER,g^      ܥK m;Wʒʯ;˯;V$ڤ'f_dϘMj 5WZ+KU#     y @<Ȝb:.6,9ɼ-KLHрKWY"uQ1]ƈ@@@@@HRuP`eں): N_?z]^4/@@@@@ξ hAe2zLfvL[B}7i],P)`      wq(iX22~4YY; {X)t]      @ ?PzxZYM8) Zlr_\ߺdϞ=9UDm4'Ɯc9zTNL8YսD/^Y~D|8ɛ'}v95wNm vjNIxݓ~vgwÇ; $[l<   pn ;/gLNǴblm͖!9zX  ꤗܙ*"6qcα jr޳9 &:ќ屢n:L.#G+VnOHOׅw5h7hP jkdÆM;mK~gq2VỶP4Euܶ`'t0/O=gw?@kԬO~;[.(2f@@@ pwn^W* NȨe_1Bv7E eLjZo\\ݩNl$={:A;{I|]>ރ :{25#^Hm;mKD=-\mt*k@@@l >K@5UƬ& [MsvZĤ۶mq9zܕsTTQ\Y\4 @Z%ᤖFW])igL.O?kJrvFSI*2|(VVVg^g }Sv,VRZٜHm;.@@@ pԝf~ٱLƯfzKmr,97p,yj\lys(˗;N'׵j!qLe^ܺmL4 uyq` 2vܗNu:h]!;+./w[>=FJ@nhxvZR ܥ^#   p ;׮(׫g ª$ff;vO<)k׭Y7O+u9slի?'NH |E˕z@ 3%H8鬆]pIn`}˽LZ7ktIrMҏ–    p 4\O^&Rz..h'|1:`̫RRu=_*Mn,V3b%q@ {9_|V$cd`i؋]@@@@| pw^y8~L[7sjRdL87ߖO~_{%Z5D *Yxs= 8{e5|_VʿVʕ$[R|9)[5V^…|tF;owk|)}hQόh&qL7x-mncT-i,Zϐ^no֯ҥXsݴi(Q̜gy)gγbAg̚-;w&tIRT߿_~^,\XbŊJ2媆 $d &N:e2E~;OnfTH!Z՞ ˊvknԩmZvT\I.6^.%cm1~[edZT(wϘ1+˖0jYbYNЪ\U+5f;d9W֬Y-Sjj_PNR\biڤ P8u Y/ٲu>,X6ܺV΁#XfL1tCYm.YdU*[Ey?_$a-2etYal޲ڧhw5MXmVQ4>?z:~Ǝ,w{.E{`%SNʒ_~>Ff&G _M9LW14i|U{F}eZ1},{w|Ӎ[n={:ďs;ҥΨtVx&">rWڦ`{uMw~/ZTiv)]L;=޻ ]-[M UtL?\nգe]R:>^ʙ:VU ;s7SߢgZ2eJIͬ]kaȥr~F(wf3\ŋ x`=ǏX&9Lo O?1`^!k_dE}mTr< =&3-Kξ}J*XARNzX6_3Rjg=?gE3(Sw9ˬg_7P>YmAӀ_^ɮl_ۓ[CIg}7G:vzW'^˨ޗK/mFɱ{|$ k[Kyǟ70 59l@~`\⯿}uZOǞE-hйr1X0]{ogzYA}V&cFvw <ާ@VвO%wϏvg|kؐ$S~kK=s렁v&>Oz۶mgd7z-7L':,;`u~Dzw@EQ}ںյ5}@Z"> TijjۤSϠ6W\.#w{L;m׭&N9.b.Ac=?wB??]އkҫO?YݺE̛ϑw{ :? [apN.@ƺVMÏ!};6 `k1?|$֭~o?wB ʸFҽ-   6N6J+^gj\87 4۱/݋4Am y+lHfup]uvE*cƎ yyvԺMw{^3$V25#&0S~ U{`PYޠwwu`m2JK}9P{lͬ۬ :}Hwu։t}}}ʻVܕh0Nִf=XITZp|7{{Quڴy}dDhрf*dmuP%O>ZiWJ~?g˖M]zOsYR3le?vi4/*|t#m0`A;ʤ|}G;5-o S3e!cz~7B53hޢoz,Ѡχ}l%6I}_byoG s'^=z =Z:uZA;6&=O;ѽN_![H`.{~gcv:}WsmfjR@@@ m K׏G tJeI5]r8iJks >7ps^iy5~ݦifŸ" j&uf}/$8~'ٳλEv5]`l[$([Pt-ڮ@Eb%&0ɱ&@C->_^Ї?ȓ<^dMj\g2גqסn]}1+͆ }#h -u[m+ 'ovY^ՌۡoS|ـգJ tW'KA>7m[̙݇h:RtY]r;i]R݁zv,pe$|y;wϟvȑ#IT]pA"> Vijh? 2->^^@?m h!R޻~~-!s֮kn֠Mh[4+:m{w}ロo ]zw覆Nh]65y%g~ mw`Yw8 F@@%@.u]ZC%Ǐ=P#͖U~i'ti_'Co 虧ߕD|Eϟwǎ YwR6htՕ;Es˱uoYNޒ3W>IˈQl g|;Q~5UƍD/t6jc3~[H}ܥ+o5U&[姹V׎'$t='/Y(+~_"#>xAͼDwIX0M9O8^}u ayLL N'#}ܚ3[ sOEZ~ MYF3Yt}ys|s|oe>g~ג _% >e.sOMÉ'EYww?+AG ~^:}zuϏv__{MSgOnڞ`%ROZr}N;ձ~W7N?SϘ6D?w~s h{kw:"Q6v4̳XzgN=3/ ~;5sIL8ޮyoyϐ_u˛X)_YTU[v=wwk~;{{cڥf42}oc~$7Y u"[oo9Zvw+~ٺlr}l;oսYS'Y{I@4ssнh  i[ :mZ s992@izsWH t+-)e[n(^gWe~>]̙gpfAf<A餯 LF7gwj1%uλhfիUB7j3΅rnog=tw_B/2wt}f?~Za 1^?Co (   6ܥF#tpcE/{DZj(,A\<#P@~ +}3mqqzUn胕gN/gL!f>w'h$vyz.EA;{=y M>=9A;ghѼ6,`&wj԰=냲HwKwiG K}wwk[nswrN&k +Zo V-|w kڤ4_2edtw74ow7 e.O|u@v+}{{޿.;hgo[XQyчw";vCf;wաÇ?P(^-}U&PwuF>?8r~SDm9{,fG@'E_,5hJh"3O?sUwmwv2}#onN;>~<gG; t*ӥ7'}Nomws/3wNu4  nwкXw&=jЮBwzzi5p]+ŊYuc>CdrA4L3&W"5LKK: j都@E$׽On 6u]R3 Pݷ:tȼ̝ݕz.XѠ;pwqgS}-q_d7Z4)Pѿ6Wǎh$/dEvZG}Yc<2Z^Jkz-CJ9K3<їMhZ{Q6of310h_{m=¸eS^y#WjFwP^/5ۙ~.tv1da.hxXtdڌoQ[ gq׽A.|ڭ׀F܍lPghYJŊ۸iSвzm1m~. vyxxjZP+n}p]~/h͙S3}֖@M7}:^W|~(L1/nz L]I.wްacԁxܧ4_ƽxugim]8xnwI=m}/Mdn"ORS9ivH #bqcycw.5^;,z_*{yf Azc*ZhL2  $'1F l[p˟llM>Y4-vvcɶ^9A#g*\BwrWˮ]bvŊ®պoϞ- UZشZ8X aJm7v4No@]vNk}?Q~TA;_CN,<4"daO9gs<1Yj 82 ']$ɫ%e4߹kp׊h?3O!:4ۉ>-ƌ}QyLbJ곶κ*C`BMr[lڹ#ko~+{pLP˗/h}կR1|ص{_|@@@<+@<{hٱp+w."y5*nL}Nũ rݷ^u8\3XZv|,;'qҴ.-Y4hq4[-t>q0LλU^¡ߺ LLhT:}ŗ_Tv @isL4Ht۝ c 0--|i1*@R=fc`?-RbiȶY}XoZ'ޑ$V+Wڵ̬eeYn z/[4]|9AvW|7}?dٲ*k~v\wYsQZ d""O<ՖeW^vquכ[zѷp>=eWuEi]ր[;[s嗚3dWesBC-hWQk =t=C]5;WL w;[b ľu^:ouŔ 0uݑm|c8ڲ ]5׽?h_l| yזqѦYښSO *NDZaZfGj֤If>|r_WS0C jc=3>s%~ͷZo=+˔ڝ;-7]e3A,gZlg|=-[4Cbdۚ83AL314Q23ZΠ|y> zX?ādr{u(b~~c,9 #bF G b2E8QtNS~9Q>gfh^?<4iѦly5o^ohzm-_G:/jlgQ:]ǿoЮ4F)]]t`&u-<ˮƽjTӗ 3ҽy57g9ݺv xW#^$zknpzb&tݻi@@t1{@/yp<6A*3ifOzַ:.$V&HP`_yPx/Nhf~pID ^xv3hӂd+$>e ?Q-ܫ:ԢySd3cܳ2•?NuWp~^` wy>wԀo jA8[4Kja~0RiYzHbl"usnu v՝9 F :cE}Zϙ>xvg1/p[#ۼ]}CPq/& w"}jqD~'}yu%^Pl}9DW{x-Ҷy  yC w7_ǀe=ԝ: YnN2א;Kk^c}j)T>;_:}q}r{^zbdq Pa{o4QxBLp䙧X=/xTz\tmKot1(]"C<y0/OmIt}wEēf&XA`rognNw0-bIsfXͺ9UJ*ʻo^3_}9Rr4['tBL<ЈK2fk[[A?/ڽ;9'u ;-UPlefa _xiixWc׺za{_x.Gv׃i@@Hws,I6 T(ݣs'j*xSA;XշLxR(%2xXAPPӽ;vU(z~(C nri?t`L)ItiPK[&n" \ߖ~>\{걇͸<i_cWF|gdĽBkΖo"`.0)Eѝ2xnoceY^]r,Mh-䓏uRn{4PWO30i`vji^m)RS^yt^ޖ&u3;6z2rnz7kv-2 NZ笤xާ+ޛF{v?pT*v dڗ:oG;oY^TX+h_]/O5۽{9-|l6͉{=w^I7 i@(YRNM5+vmeV͚ r޵ks]cWX qn&K8i;wj,5k@DoWv *V5'^~FCef,z}h{nVԮ];0+$/  @w9{c=qc{~xX[?0@;}0r\ݣ};heƉU @@@@@X _sonek}l+:n3)'vD{F@@@@@$O;JA "FRdJ& ޹ǼTNQ     /@.5L@UʍP8׹w9ӝ}}%!     @ Uf#j #*5+%tmyNo-mDR# @@@@@rqeTif,.;J@"9}}$!     @qeZVj$I@Q# @@@@@rqejsؼo9K @@@@@Rb.Qtqd.qdU}}#!     @ p{5M@ႅw (9g}}#!     @ p{5M@nr|ݣTzDB@@@@@%@.w/j {Jj *=}}!!     @ p5N@Mep (9{}}!!     @ p5NI ȩ'u} !     @ p;N@ႅ䬦g (=Ej$@@@@@ȝrq Vlt:A[ZW֝     W]=v/   xNe 8iT>U&3U&-#.=Z)+7͑{mw?kHAV^<}k_5@-A۶ˣ:OgbEz#3/Ϣ%Rr%wQΛ|bߚ dҤQwDI=7ٮݡe?'e$YE@@@HĸRjЀJX2S]97[K#w/[l/+1{ټyJBdEޑv9˕=L#R`=~A+-M7 h+fJoظI&~6tLNrxWK3gy    w@Z;Wfsd֪yc_F*^[)YtFj'=jw0 ǫ踖S(%Wv#w9I|BMdZؠ]<:tRr.;"Y Xdo^9DXnL2٩:kw:@@@|tٷ h Ok?eޚuk Ψ[p 9rc9RiS(rrrf̴̔ %H4Ñ[H&%t<*7U"qa@@@HS| i,ڼ\m[-+ 6ݛeO^hJ)_T(^Vj"KWekIrRt\ 6o( 'KknYvh+U%gm]eUR|9^Yܼehʗ3wZzIU$J1v]'2qinfjR`߿4RpaZbsݕ+WKDiWJOO֬-_ǾPg^oӦ-Raα/E$uj yV^:v }ӡ&R/cY`\as\^+칯1VR9hz|WfjA߷n+V\7*HasFv)k֭bŊz'2B;x1:רJxG=۶j:;dV1׎R%K¾甗sGqk;{vիv!MB@@@*0xWʻt팛j/?PhxwRe8Sa~Õ.7m6z˘^֮skJzo3g?}o>?}rЛg6/ ym7/wz]U{uwNcG=${I~^۷ϗe&r:ALDurrR=_y=~yU,+:WnA_Cx'%TVU.l[jyշ8;'.(V\xiK˯]& 䋩_/p3vqL h/;6O}z-g 91`nxαGF)W_{[oߡTxq9A.vV{#_o{fͯ+ſ],cw*U`SL FC>l~*+YM|=}tՂ9d;gMuh7kHzqnjg^zcU=M+!Rl,].c_~S/Y淬Gːꇬ~ Bh,ho4oH>S7Nz]җ+/9|ڹ^s1n&iii8AoBV]?TXkklCZRhQ~"wY(MPd~/eiΤqi'OmYס]T`I[k4iT_:whH&{iyXn0v뾾2|!٧$L MBƶԖ|ݻtEWtZج5fLjZl8G3쁻oI2z*N zu@ǟLvZR>k7wiflZfS]OL74X\FuV>߯"ٹs93|wvVSʦ%&5Հ6~/ʔ.-]&|s|8s[P5- ! 5=`0:p3\e)'RMK9M/]iλ5k˜`5Kh.x5hIpIP2=Ï;o妖w߃OM1;9LKE8կw(t޴kll9.P_۲MlITҤ?֔MK{ʼ;C=##,׮iW(4q sn)mim-2r.5G~~03Vaxeex5Rmnә^z鹣y;Fzv씣?Ж+J"~L㗉    +D%H~NζVG':.ғ̃MiA Rj3y`ivнlL}Ki'4-zthE}aSnV-M\ h1o w`(uzݏ|C]ln.;u8Rn}3cYM]i`& xip?v>8ԸUwr4ꬫsɲŌ- 0!/s w獾Zʝt-];:sYn&XIn7!5-d7QQ5QmrF I ӭ 3NݶM>5reuݣvKi[:x-~ښ Mw w{/N[=xϭ!Ӡܝ\#Edm5pA# h Ljni2mzҴҤӻo]RD`zǟNPOk+%" 4zPVӠ7u"ME= Xsݦ&hӘqf~Uyjgh7ꦭb { vU\Ct^}NmeW8A.{E㕕}C%8t!Rڪ?H%lmg]ͬWf/    tԇ!{40c,iT].jEj#YpיJ5>A;G[A'#]fG-u>kk <ɇ=Y54o'N 09iv't<*=.mZ-Wb~<} Dc>.Y7Ѐ;hgi+;VYk'ul̘`6(==|A;WB:vM5ɻt:Q!,hd%|][ii ;l׫^nZVi`v镎3-B#ί~ulJ:? M+]ߵ{{Lm{ :& 2}^peA[ D^iΗhR+y!k+SQzLA| Xc.j7l1 K2WUe-g`jfγHRz60(Q7+"}yq"3MLk]ckvMk i+PF uy;Ͷ2[%7 \:b0aRɃQ[V|*}<7Ld7v,H]_]^#-m}wnN2u;6? !   @ pG u ^6pSWQ6 MHxgGw}eu)4 jYj;_ BtS"cF٤- n,_zm__wY)#ux8ekp^B$he:/[ZXV8`v{EK9Dvۙ[gBE"pxu;tF}Ktn:_Sɒ% g̍XCm%k3Tr  ܅Ά*/5Rt/1ތiH3<r]_7ݻDS4    @ jD* fLEoJ+ ӅmmY7ɝt,5G8]^yU94kֹMe~D!uJ)xGdw H6w|ytXj]J4/*WLI}n:zn  t<yV-]'^ўヘga"k ]y3uAhm4C$z@DCur+kO:m;?(m~XImlG/ڶ9?w<`׾L특J    @ !x6@^ytg'pٗ_9,>>Tcw N*kTň>XՖzKϫx>l*=/ur{to?~[tgI%_~{Ɨ';'A nvV M׌[će+Bv5,Q$|ݎ \oEz&Krcڱ2ؠz*N7֏ihZ;ܵu۱skθ^lSVe=^Yٞuzigw'._V6:;]fUcۂѴ B@@@rT[@[t8*c)_+o쬁ʹEu.iAa29Ts84N]ekdlyOTNۊ?̽cڭOS9ṳuqێ/h t<ÜLΏ۶;iƨkP?WEKu!,qmqkdmqk=a$A,Lt^)Թ΋;Tdq[lظ7б2#Iiӂ} 7ha=?xX}c?G{iSoZiyX[4knZ~O"t<7   ^wQsV>=iQ6;6:ۀj̠uKEn硠8i`_'G(N܌Jk/!wx4ۻLd6Mw>Xz?rhk)vl0Wx;wY}Zִo}ΉQϽ$ߝ;БOr(-6دyu+=f  1]عlj%|uDM]olGwkƠe3縺C^[y4   $]e&6 zu8t۽\$%p;z)LK_,M֭;cS-_ 0uj(eZ4!dJڝ&.m'âf\0 ^s-|U?['싯s]=d~l[LR^|+E0w݅~ƾsO?aTҡqξԬ1YZY:N&A#&B>ɧ߅ >͙N?xi"Pe5ӝs.'dPi~]sV鶛cTG݅xAWCMkj-ּZ]rndjz<'vΧd hT/> j&H}57_-rs-@ vLzf-vVD|.l6g;_Ϙ%r ;I<;Uiy-~o[N6w<^?`Ќ7]s8pvx+}UhztqQ?^uQ3.XaV`A=V'6|F@@@,@ hqS`kL :FZZԭS[0: _Oa7_uA+fhkq>"]n y1Ƀn ,*pp"#٦c=:Ckqy˗++=Dv' ij`޾k޷s#(O-1u=k^p![zy2I5~v9-aw`jټ<|- I0犞fS[ry8Q9ma\}]O,E,|=ed}]7ݝ.ݳӲvi{]jݳimFp:x!~ uɎQi18,ɬۣ{gwƗ5/,zt߉d{]u9wu]F,^Z~fla4H]Fzo3mllFw>{ͧΦn]w's&Z/ ?   9#PU$g*V@OrvzY翝g,s.g9]BrYŒVZ-;vt8ᨎufN:Z3] JO]gRtAX/3c%-O]0v̶nJܰqlݺMJҖoBf3f~/_xAZ'9T56   q pGLB!pn*ydBnn*cH 1FP8H:y)@ gc:6i~@@@!    hX'O hnw䋿gd@,?ذsb<һA7x`@@@@ p'߿gqZRMS@A$Ѣ6 N`K˖Hھ?]9?ؘϷrSd`c {    cr '+ޓéVⅥPR0a 8Q@ .BE8ťK}[HZZ<8Y{sۜMQ     T|yZ@[٠]Ê4iWjL }H @n؛/U9- ( eZV-_Bm#vs1hym !   q{Ndvۗ_9;v4^zz曟jHEoA$@@@@K]vI7=yXfC\uI{Ȩ w_KUNܷE     pJB@[R| ,Nئ$@)&xgZA˟{   PgI#YN]Rͷ]*䘀s/(\@R gd9V!6    oC͎z gvJ=-G',+3с}F@@@^w֒L`eN*vIvl 8ޠ+r>l@@@@ Ǚ !y֌%fCb6 @8+%;    Sm%@18tuB 9+p:"gk@@@@pGOw33@zo@@@@ =[a3G@@@@@;      g      ܹ0D@@@@@@ `R;,޵/9lY.̵/+O*X@m^±:N_?MH&Kê"u,ߟ43e^]ˆw#cNGUΟ,~ >'@IDATY HK(2xAA 6m{w1^y|@HNK.@TT=+dR_Ю<@o}W*Z UNʙ`'$huoy|XlZBZN|PGl/h]Wi}M_S=L/#4t_\4(tzl][qЗN2;Wʽ1ץfo\W$pI&3HR*]X|>Uz Sn8^ǂy @^Yj^C\"Z4,ꫭvyU?MOڜ-5%cL @ (QBEͷޑs>3|ԙWd k3/p +f%E_ٕb|[Ə4`yդUkg,O.<7n*J/p/!.Uo'3%K>em7[̻Ƚ'V<"SV@;v!X$>K]sۺ~s'E p .5o4k,וu8+//JW[~\X+#oGY1W,Pyvc.gvti_w1UdEI-Sɶuvmvw-eFҺJS]ion^/ԇ:ՐNf?^moZ!z)T% ^wrK[/~dlܽ9TkȪ͝cz:QnM*ԓEs_uڞBMfیax7Be[ft2]kؖ)Zie΍VtW {[v++QTX/)Kȸd/+{rsg_1]5];9ou=j=sn~kgs귮CRUGܯi-+ddc4ݻu^={Ȕ_#=!}zky2UfY뮕*|NOO-[Hr^<3'x-[͸}TRoIq=xEq RlYg7zmY7nf OGivA d5kRDmibJ3Mʗ675cv鶣ɧ]@0S~ƚoKEeL ytC~XlӤw.!ǎ+Q۴s.9egp1r=pќR|a9S]du9RrTjJo|X!1ֈSvUjT*]Yz3ׄPc-~N,>l _n߱CҢgF1/Wsf%mܑ\rP[gɥ̸Y34exm4^^U㟈.rL.{emc!+uy˱ ]U8#\|3K=;fM{]vd&2+ @spAG(Oc 4& =f)]/%ɋG$z6b/GTn"O÷n "'>楠;:\!;BP5Aۜc[L=^~G|hŝ{P pyjޫE{Mv돼@(L/`iG~bcv_.h~Wf3F5&8U™2y7r1R+NnW?"o/{?ZK&)a]6(:.H\zuj\\cxQS5Щ8ؓ|iR(=z~h0WϟP.טe׼z˧k}><`1B܁K[&7,)S3&2ߓ [L3oР }FH}` Lz>k@ɷ%!Nww=dqF<3ݲE 9q@g>Hf?G=wfMÆ1{yL{aMM[zBKԓ/ 'MD֌wJUVߨY+VtkסGj)*Qÿ%ϻ} ό~Nnq<Y.MjtMK懮΂Lj=g^=fL>iEy˗/oE{Iv[43Ϛ5kE[WvE:t8J  2vftq=΃e֥oC :=vP8S9ar2~y4-6̸gnS\|-nuMK:իin9mtFbDL-9#{@ci1;NF,rʺ rArt{M(RTTz{WmKTrʵظ_n~w5kYZNqĹf㸍6ޞb7cŽrQ52- =2ťSf|͗eUvf*ˬKEǃӱ܁w/}9Ҙ l]ʱ =TQ+Ρq2ҭQ yTs̮0NiQ}<[YQu˘sednW 2wnt9y`m_=|ORE=L} gXw㟶ˤ,s|DLE@}syuߌk8ޜMzMOyﲗ{^V}} %~s]kMڀA'ΝK/!{fܷo0'e\$ 9YtIʦ[䞻a;>o{͛5.;;\_5 ;`kCrg՜'ώy^M,ԇ7~T/*ڍ1{y>]NsӤznw/+~L>G~kdҌ{_w/#כڬGW^kD[v,MAJE偓+I݀Ju^zzxzg u/lvbsȚ-i򼹞.Y@ZSۗ;Tp'?'>˜Ow}\wFIgmez/'1o7]~on5?jwgoea׶fkh[3=o+,ҿB{-;ֿ;L+{W{n&whQ<-0{2Q>m}'ypb2|}+/'z ŦU*HlOO?/ZPIUܩ}Xuyd΋~4r4TnC[Wn*~Klr]E[y%mdWd)2Mrt^Ei>jƚpoŃ` ӒR[ѹScn/ҳvGR">Ӧu ٺ5P 6P_.*4{88ig?W(^V^e|tV!Ph+ijԠ[=v;8|Cr'cY`խ7(<4R&1Npnc_xpn؝o='L>t㏎=-ׇ~6ȥ#<;M0Ҥ䯿[E/mM8QN@fXlsER9s1OK "ٱswuT?qŭPhR\(P`qgزHqwX\C-PE/w2o^򒼤Ixso4sR]*? #L84yL!&ӎUc?䷹?*O!/> 28`9nvڇ…K/XЄ&o:܆CB1 ir9N-xOC%__ٮ_]8EB>R,ɗ/OOzj4k~_^g:2 kΑܒj^l<~2[?D͘W*wN HGа\/-q.O9+ >+'Aqvbb"D8t5f;AaK[,e%w+PaW.)_₭ ot.On-ְ0[!$dye ;%C{ܽl.&3^-_ͯ1QA B?*2P:)w~)--sّ[oSxm;ixW_zAСCO?"{7Ò?pe1NC#x末"4D X>]/7Q D}!?~>osDDc ף D_T(6'J{1ckQ'0n >*9X/ض9y<{Z3IH#h/ʎ7l+ t`/2kO/rWhzqlW(pnJ #?Zts'[Mf>,S!%ΖmnЮcVCxP .2n*`[CEeI!S vAh5l#<|ubʼn* yܢ9f/Dl d(]S)V[[cBz316tL'57BNsZavŸ.+Xeu,^^#Pvq79Ycpi o:XGw#6|#Iϛ2g~Z<)'Oɯ(|5]{俲z.rA]k. Gɔޑ} ׫< B]u͵d֞~vөx]ˌ>O>"7p6/CPO>Y_ zk-iw(hwwO+?̙)~8LzagE;xM<#2 cϢ^|Y᥀vN9?4xPPaDG :l.?K>̜8||؉%rcE Ι}@s|w񏎟^e:Tqt$&}pN/e8W=GvXAF 2"2Mi/[ɗ*LYoBt6g}걇&|?ʂ;Wg ggyyVo$:pzP}×&l'}ZV2Wi+ybm fC;t[^.V:A{&igEyouB謷 46pSDEo"q.Z;VvHl`g7# 0KzXmk_+  NtCu~qӢ;Pޝ<1z$1><'wd=^-+.Tl9_v'0_W0\+ mf?xٺ8$ F3؎r:j`1Kb_^(@T!FwBs{A:sąG^vPnSڄ;Vod' ;ԔxF;??YVȭ8e8|ƝfyJySE.wB}Ā\l0tsB:KYя9!0q@f }l)}7ege3pa _~}ݙu^?;cL]_I;Lpy AkyNy/}]\o!r8WvG4şj'9O0u֎nH'|pI@7L;*UÞfx/5kпnUr„'|i29旕O>m~!XhC(N8tA{ྻ֏5U^}S޽z9]h雙A;Ӥ#8Lsʫ7ޑnI= ga,-t60v˯{Pq䩃CʷHjڮ者!e}N@ÓO:1Nw|hB$_/:FoS±Y&]x`~X5 M20x$/[55ttK1vJ;ywa:7wf:#o([e~qɃX(Ci$w'hMG¯yCyҦ9k(h X^Kc/W-0^*l'Omg$ Q/BımCgkk:!<XHQ$EۼhNz=3 VE-zk}\'\g'Bv#O9z+:3m"q.bG{}lճNE{R{ kBس]5L0?c }ֳիz:ː׾/710ИEy 'dm.Mkp|tsiPo4]2n&gkTnFcFi/g۾͛ ~sE8wy'jAq@(!O*2ۿ!qnĘ[c͸Þ [/۹wvŰ^]]?P4x"Ak] { oՃhbHz9X5M,.5Q |t(Tn5x"L">xF< 0?uz/iR[|h~vpky1Nx ɽ="zb\w¯t_ Cn-:[$@$ {6@'!ṗdʢA&#!x]ÿ́Wq7H? c5w.7x ]9A(ѧ7g.>ZO v}tbZ/; eQ/!ï![0kC2F_g|+#rY۶Vvz8V/y[e r"9_ ڡ ySAG}^<∀ omR/Bkn/G;}K⦝a^1os/?|pj=N_@$x6zNrKh4Ֆ\ 5:hgY>\=&^-II;kϋ3̔s:L˯A><_{xmBN=E-e쳗Egii-.˫:3*wwH[nQJ/7kl>q:!ua=3|l4;"E=^g9P淕~_qH6Flv~Y^G֖EmIhgp1me1oUR!?,y"eȱ5녅=ސ޲F2k#NcRm{⠿].qt:Ck FChgd&G˕CW5P͑hE; 7L# 9`-=R =S-ڙڃv|bCۮvCyݡ^A9bu8Pض1GoLP~tıq, 6@%[;Qg l>|f+* 7%r# ?B&3iW}z?4g]dE;[~xRsAC~`aga`vP]kŮe†AheyUqwvv p-M[!Rn C+8 l ~a]x06𵞜s]8{4.}wwJA$5/l wYhE; i~j]?IH#]G+f7)ݷ;Sm 8yR꾌C ƙpW\ A޼[k|WQgt] mgn6u|וl0B,Q'BAEM^a'/?X8yu ,=x$,칝-fr yx2+1ݻؙ%蜾[&gלDBj);QUz6a[hLZ5 Y:w;So}vڞAM];, o^^xuolhnW}S%:68M$@@_l/ tRDq}wn%Tq]D{;d ;Lf0"^oMہXOzm/};9gBTCI䈃a!Tڼ|oīoo&M#Ȑ*iT[ާ,-+XmG X{D0<3K|s*2{-3{f1C9Ƀ8?4`0{ǀ,)ܹXi +mN1p[̟/Cj7_d5h@"gtٲV.\h6ӯo_ Ճ_oeawJNrϏ~^^b%V tO<<߇ME?+zuĀ-uy]oqqqƛy,wo u 9G.k'5a? qu۪x&1.KZͱCO8jO!'M8AS:[xSͷqƃL~ 0ݺvuڷ`B1f($ =s+tDbש5GC5]?t$mebxsIJe?;Ym Ʃy#H?4P u!!uo98?ܧKzLBwAw{ Zf."̾V4J v`WZkEHڄ6̍@cC!펗SFx2#o2n5EGyw<2yp2v*f&[37^FTuֳ@!WKMlV>p;+6OP̽_:l㍉wh1xɒ[Y֧Oog܎x?_]Pn`8D= NXRsc73!ht{055Ug 7R+LF;?9sSaqq!=*xmc8f5--ʖ>o>Bkoa߉ >$Qk^w̦XZk sߖoÎ{1~bg9=;@;vSÆme&Pd…,tN=kfF!W+mAJKK wuu Dx5+_"p|7ޗkBe-Z$p|qwV 2b3imA0C' B:sksł&`'O.;9 !2":,[~^V._e^&_ڛpdB# D8]wp|!6\zMeidmxvyS~ga;:)T!4Jڮƶ՜s3{^WVxʄP3ضlH\ V=?@w;{~sr 5^ܹuj}=wPs#Ϯ101׺BG "F(xfJhA M_>[ƽ~2Is o[!< 9e׫LNs,ٲێpXvJd̫'ʩS+xAؠZ֫iCj[l F$}fZ&X>@ #[ 9V=S}R{A0|Uu_#4kbޭ>&Yu(Y[ ,xgq;RT\,LzLȥ!6wfdžrkl|pG |7zﱲ~AXNr@x&E>x+~“cR~_4EvO?WԉO=mGek̶s帹;!X:G~f0yzlBw~muۺ턷߽}wQM/pk`A6 ܶ_oerj̽2F hCy3vv 9~wDo-1Mq62x+s=܀(C;P w!P6ki'vT Esؽ 'Lav!j0C8̇y܄t/OOOv͢})2bp^0sַ溃(gyc#6L+]fcP#o\*ްD&Uvs?< gtu4֞auysiB,_o}^E`B߇㸭Q[^wߟXf]ezcA kjSz欚-ns AIJ_#|%nfwΊ4l0x6NkXFnG>˕j #vpb]-\r g <5aE7]? 1oO(wX8˕7nWFlBuvCDKk֘mϯ5ap\Nz\Z Uwz4 }s\B>/_[vx̸ 9qA8O~/^i'C=XE{ަNlGWs̨w5?\c{LS)oˑW?S慄_%rez5{QT߆gz-y^֫380?hBQ< :1="Pzh 7\.Q>Dά}õ{4)Rv_6Yʫe>cUﳍmIH=[6fFus 9tm0Y2m+\BQ}VlL*Pyؐ} ÒZػ,-1ZRyyVy__V, VWsC̦ڕ\֬]k!ۛn9dxu]>z=LgL#&bw.+:fDr=py>-筷q@8Y)y1"v w1uAYMsx=C0>>\ ᙈ{[ oRkn uxstۖlwz/wr=?q\7u?۶`4S-v9# @%Wxm&F}/h1fg2e rjԆ^졢Lrl(ZsA̱[ 6gDV|Օ&Ԡ;]'pTy ҡۡSIY!P˕s' 8EY\zt3O/}Kw(\-̗ ٥6oS\¥NB@IDAT"T;=LxKVi?~ݰw|,G_lq74lQ}ܸmCwhiw{zf#}[&~=?VE/Tiᵿr ٧.#e `r)'kyitpN//YP/s»~8m {^rskHm휫^YwluXE^ng_[Daz-ǵGՎ)M׺ l=yzO|-0fc||-[X^cyÒr AtM]"JN{ g7.Ul],h1vD\Tc<>a ű:CCaݦXsv{x#x:pHhω}[, k_o>rYg470͑gEWl8Ꭰꥡ2כ8Wu`VcLVAT8Bb FlC3ECoۙrI_sOg _\Ck~8$E9}g='ty|HHC{ad Zg]mo.o3ж׶ߖi._|p4@K_]gr <-G85,toC?u4n Ч!}^[خ9w2Tn*Ծ>rz86P}=㍽?v{=X7='lr;mM}a?rtC H4Bݧlk&ǘqM3{h܋4oO0|8b`ͪo5B!O-kȫ͉eT%=_9p 8w/U5U^xv~ ᅟ,#8^?A;).\=s|,! ۮ"< c-6~}αd7&ǟڑ&?!(4"n}\M6o=Nx=ㄗwyJ^7ՙ6Kv>=S |ܒq7 ¸6d.)P\V?6}Zs_[nn4ٰAx5V rkkv+2siC^=uz@Xիט={zZ:vTU_~/_aILLln_ksoG{޷„Xqv?k~M;Rs]ƑKož-ۚ)N6&ᶽ%A(e*%C wٖ뢃s*S;-.d8oW,:!ȎM-F;qk{0bmZ7ض:ё\TVrQ!y#.B»̉L1\Ë4gXb\}GזZs-faNKKtͮBqnTW;%\-w(]b{ ]o-Nk^#wKs*P;["CS|?r\!q ~疼?^sǛC.ذɛ&G$@ u_lXe-,d\@$T:3~6k[ޚ3A7L L69܅56Yg~{S>9n{Q8^'r#t<*ÂTJجU{mBAXZ,f_ ZZλ_i"xG3w=w9Xz,~.Զ۱qC737Kww칹okN*9}cO$Ljj_iWU{ε`ucDa]a~ΙgE$@$@$@$@$@$@$@$ypywu-]^Fh$@$@$ }xVHHHHHHHHH-Pk F%''8B^wݶ # h ŕ;<N~YA#        @-(s@\\ _$Jwha$@$j<+h$@$@$@$@$@$@$@$@$(ܵen]HHL#'o}iՎ[kׇ# UKU^T&p} ضC@||$''ɱ4yǗщ1-QQ1.]$@$@M%P]#j|:ԡƄ!<&<8< @[pVvM ))IAcd[k'_,h$@$~kP5}S5x~#K"x6HHHHHHHHH- PkKV&,U?&F.y鼝{u;.F¦hG"       , w:8tԢӶ\E>i||  FDGEIBBib        V#@вJ%'>*+5&:1eIH hظ8Ii$@$@$@$@$@$@$@$@$) P۔vMmׇ#          NE S wHHHHHHHHHHHH:( w$@$@$@$@$@$@$@$@$@$@$@$@u<7$@$@$@$@$@$@$@$@$@$@$@$@zl            E]:            J]=pl6 @"@sO @%@ᮃ86HHHHHHHHHHHHsp׹'HHHHHHHHHHHHpAM$@$@$@$@$@$@$@$@$@$@$@$йP\Ǔ{C$@$@$@$@$@$@$@$@$@$@$@$A P&           \(uɽ!           (uf t.:ސ tP:cIHHHHHHHHHHH: wxroHHHHHHHHHHHH:( wuf'%u> @DPFV%RjΪ\HHHHHHHHHHHHh!|?m>ǜ{J$@$@$@$@$@$@$@$@$@$@<Ҝ}pj\$f:J*K[\+            MMjViN{(5i1!L jW HHHHHHHHHHHH`f54]sqع6)KKL4Oe$@$@$@$@$@$@$@$@$@$@$@h0&CŤX<2?T%PvHHHHHHHHHHH- @f5洁]sq[8/Jl OCe$@$@$@$@$@$@$@$@$@$@$@mNh 5%%ZLKtTQZƴNV@$@$@$@$@$@$@$@$@$@$@$@mE4xApz\E8_9۾}BkM  ɱIFY' @C]T-)K;c"eFłM`i*y2mɗ)O$@$@$@$@$@$@$@$@$@$@$@-"p1&]KcAMϖ͔& Ʋ|]4           جKb ';F?p @nqs$@$@$@$@$@$@$@$@$@$@$@$@$@AP HHHHHHHHHHHHH Pkk !@."            &@ᮭs{$@$@$@$@$@$@$@$@$@$@$@$@$ P8HHHHHHHHHHHHښ& @@,            hkښ8G$@$@$@$@$@$@$@$@$@$@$@$@AP HHHHHHHHHHHHH Pkk !@."            &@ᮭs{$@$@$@$@$@$@$@$@$@$@$@$@$ P8HHHHHHHHHHHHښ& @@,            hkښ8G$@$@$@$@$@$@$@$@$@$@$@$@AP HHHHHHHHHHHHH Pkk !@."            &@ᮭs{$@$@$@$@$@$@$@$@$@$@$@$@$ P8HHHHHHHHHHHHښ& @@,            hkښ8G$@$@$@$@$@$@$@$@$@$@$@$@AP HHHHHHHHHHHHH Pkk !@."            &@ᮭs{$@$@$@$@$@$@$@$@$@$@$@$@$@ly'RCWVVJUUTWS#5EEEI4~11Xxx#b            ̈́@L~i3&R)_iYSwyI$G#           < P qܫHL:UWX+ˤW&"5R0x/:&^b0*.A%:>IbJx))*ћo KQIWHz2R^8U8A$@$@$@$@$@$@$@$@$@$@$ІTX}%59Q2R$!>PDs(y(KAa1U|%RYZ30yQp1fYbbkZ!%JLb&#S<--M#InAQ] AF$@$@$@$@$@$@$@$@$@$@$@mA!m"+=Uzt͌G«.??_KJ\Q+R^Ԝvb9pt4*:Nb5ϝDKMUzj~X*)Z|yXz&Zӳ%:1CU̳,ÍybmY%#V7&1kg<'          hLE8_VQ^Qan;!av]KfZDvb|>噰B/_+뤢u "5uU5F,/ U瘯L uQђ[yEo 3yqqPwp;SE?Ī'^rDOf>3+3S׋ۥݯM5\!OV5`#;K(Rz8h$@$@$@$@$@$@$@$@$@$@$@$.@1NZ5vCXW,鞝6n] vsH宒R^R,BfvUe]b_z-]R-]9e굦('!CRrHudRRj- 5VCiF%.YY^g1x-_cvOlTJ #Jr$@$@$@$@$@$@$@$@$@$@$ /V4ʵrϻ·=P7Ɯ 1a9RqTtzA$J-Zr7/+*KzEל'e%Ribc(׿wOInkR={HrRzKlhNz{U^]HB!uΖx #ف 9fE;_zHHHHHHHHHHHH4 8$ AHINlQλ㮠PrrbQUEnX1כPfX {dR\xeWeDRRYPŻ 3^yyT#13ZR3$]_|~Q!G3X6m;4J v)ҽ{wsbmȓs]n]L^nHHHHHHHHHHHnW,鞝-ִ_ h,k5F%HYToZ6LL䰫R S#TUw]zUQ*FkK {/]StjIx^c.*e%'IzUIzUh̘YrIyUji}(O?z(>__䋏3u!&yJ$bD I#Xe)—$          h;:yҜ26gUH,/Ĵ<hpSJ=q.U +MrT-*հM)cFTK+vi)ɒ,%Yi2l'J5F=v zt5_X$)JKW4!Vl @jG coN+!]ΊvyxZ5qBf"JwTD+r^;JUٿJn"iɉZOTC:Z*II$*IIS/䳫H_j{Wk$$&ȧ)Zoz/6X̛/}'wLhkEkM'%ѷQ>c{a!gG ttVh) 6sU$&.EixLc(UG/ΕʊJ˗27*{NhXDY_(咒nQFV.]CnH/1 "Q4fDhvU6|#^}UjZObސ+UW#/,ٚ.MCWFUi8x[qSomw=^]2KFi2{IOJޙ*eJ_ Hː:JNdR4fάPBLLM4Oۛ**B߸a[JzŠM*b%e2xAC:xYTKBe + OQiZx(̑uasQ bcbTy)WX**P!25>Nu=u(]_ˬQqiF:s/*Ũ\\LkKUL|xڦJړ*#rKKdAQ<4+|w/N뛝%}{voZ2MmOL@3tYlz%Kbbz/hG j[ێZŦ @ xYyaUIVVo?+ѮW&*ډv0w5f x] շ?Po Cy$51^Eh#ڥ'$ih*̋S.IsוP5(/5e2I=|,thbYq5*"DIBgjOoA壹LU%--ص}c<"CMKuta6H#           h:ptss 咜eN](]RyzҫS ېu׮ )q IcUdS;xz(I*%@S1-Fը'O=*ŰRX,&*FSW- zpYr[k %V׫(/ 9统pph0iVuKqTaB+T-*.reSGzuxzūkMMIx4          :Ewz"WkBXhcPk'jT5ǹ[Tbr%Gm5TҒU89":O>lSJeY*̩j\tU64̬8S/A3›_:*dz`XŽ<ΙCmu*B9bӶ@ee@ψ)DݺŝznC^걤u>`+KD=Qsr%-UIfS|{D$@$@$@$@$@$@$@$@$@/ܕin"sJ70CÏvx!e̤zU=|* Ȑ$Zjl*U/_hRTKKՋ@ٍEFHCwl: N@mfrWIٙxhW;xRH44D?frB9hГ~DE'RPP"To8Cy}N#Wx> wk~uljC%jx;3Cv&*R$k(Ͳb)ӐZba*! S_BI{YV%kr!_nFA8EvXBñ1ƱA9@#          JC w*d(n8eyAUeEtj|?{'Uwvjٖܛ08` `0&`$/HL>`BmIftlܻ-*˖լmz̞hVZm9~9{yQ\<3W v><&]3gh { h6M+Qr}/USb:/븽0de B\K4fɖB+F\BGq1kENĽ9r=\\tQ0,E9vu-G^4!O[9aW'K uR9;2k$fV+g{EdDv!h" " " " " " " " " " V|`,nb.eh8W_+q!K[.8洶Z.C>*[Z]0eΰ^ ᰲu  1!1hF"χ0,iG-v@ܣp3 YsXp̵%[ љ' e/E_ύu)W‡zvosol;Y?T߼]cB =! #W KG>F2 u 7GK/ZumWdX^9"!歃PE:K#fPo=dٌ ٪#./IA:W^4}qcF %lAkByK+/P\#*κBV" !v~TN)ڭYtw@(mY3v VBivc!8=]wޘY=ToD@D@D@D@D@D@D@D@D@D@f2Jb&! ,(>پمt.:a, 5IlC6'yԺ?v䧿pWxKܹҙEQg?C+`LĻpnpނ81c; t|.ra)6gs t.$s Q>LfyBr\ 'K;7Z:/q[ yC!1.Z.yYPRl|U o]ƿL(#-pe,8D4aA[S(Wv e"8o5۾m-B Du #&er]t,__SK[u ޘ," " " " " " " " " " "0~M)ܥ&wS:2)yλx@m\eAl[pmٺ^ܾ#a56 [>[ɕ-(_BW \.DaރS?;`gs X%6X~vw[;uj歷T2cȱoXosη>P?Q1j8mUvE;9N3t *q1)ڱx1dO:vн0C.!" " " " " " " " "  VV̥@ŐP;uepಫ8*+/Y`׬5B¥؂i܁Nk@IDAT3.uClηbKZq4p aD.Dn> E ^9#ab8Wa`p!&8p~FcDNr]x q/NX]( "\'A{&r TwƗnڮzE@D@D@D@D@D@D@D@D@D@D@D` 4pG7wyю(Z9BgFyю.<ėvv(UxqI!&vi;Ն|ʖB~[Y9pĵ"\`Y]688`[Y_6cR {r۸r8_;ĸ9]ۺ;:9bZZY:1)8?.[uGˣX" W(RzAϯ4p@Թ&K!_2ܹkQ٢" " " " " " " " " " " "h>.1b6e9Ӣj˗íxEю2͗, CK!Bimqx lQ3o}-8S&P]p[~ ;`2{ƞYͶmmA ,]?`)-iZvE ['\x XZЮ0z1:t%!*BG29?t^أܟa)1\eN18=K-"# Í7гղPix8j pɸᠵwcϯPKN8;%f~}a-촹pYN?xꬫ%iXfwIǻf>ScӐ$G.\\yK gY}$:ыu+lkv8Xn3W!\e%Z]d2ai۶yu7"^K $b!e;\o#]qh^ܺY)ٖ-[8\,y Ƭ5c8 SpE˘/Z.`oeݰJ#dP %Â)fkh Yg UѮAN;&?Xv,:DD@D@D@D@D@D@D@D`x[1{]Cکt#@юBd~79$ލL_YC0"x_p4VA.ݧ^H|1Wytŧ~&wuľr]#n 6΄(\ mߝ_רxS~wBxauRmoւ}F}mn+d67cאg>D_v

04D4n][c`B`GZs]ڬ)Zt!{Ӫ=1JgY /owr\*;15=ݮsUv؆w _?sj? ?oc[&u5tMj5|f_ӱscr=9`Xݝgӟ@hG\\ 뗿zuYBW'>i~}z`">4L󟝔KNwЛ!w;+5{/y7Zʯ]^iWWÚY>eR֜[Oa3sv!:dӎ G ŃpA.mr-loŝ߬Y6yȖ2t"V`G"Y:A2\t_28B%&fT2;Ƹ"06Oy7˵|)A8>~q}a31FuǪœ;X7js2X&Kc7ގF;GnͺMN;h"wn ]-|OD@D@D@D@D@D`Ѓ~R okc5SApW ywZ:ncڶ1U:EQBo n E ݧ36S\O|s}S;S͌i/N5?6ߏ}`#qZ?ř\~ٰϼox1ݞ?׉t|+ýu\#nէvަtQ+]v!3=ZE$QD4oy+s yn˦讃prʩE:!a;)q|ZzsͳlR5 ^򰭌tֱQ`rWcɭItǖ)g;~c݇DBth24reg k9)p+iȡD7"sP]v E;ψ;~7ԇc_H֏]/OhDюuvXSX䳷Q~]~w?O 7w ^~K6pt/ 5pG'`8b%,s~HXb]-:+RnW*3S@^:x!"Ne*WͲXnjY* ;ĺ۲. G^ނmg6NٳCɵC.dfu[;jdJ |J(YG-NZ+B APg;˰TD(m2lfS @%_򗶰xWr"n9AJwQ&DEu '( ż}Ag<:$ל?@AO+'NWw3yG hw' SHKoz8|IQb'Et̋W_wvKE4zeZt$M]F%wዂU.:N.ϗpEP=ȹC9BaĶh BeY(!!ݖ~[ a5k/KrVV?֞xE(BZoJ>lZ.- <}x9s{"QlCcLvh?k!˹O#11ų43rUv}V;[^UG9l+fz`_ fzgbv7蹻>svytwъvσٳwU@Sy.LMݱIjw(F_+vV[ʯ]]unCؗ[y6};2/nr:Fdm\e5: Rs%C/(NQcqpr,bZ"п x͈"ZZ 1-ݖFAgy ms.ew>ƎYrJE{ot[ hZ:ֻς}Aȟs!-xr`MK΢V<آhyB5g%2\ yw]E\9P }}VY 4{J帣^h&pjgO;yG7= nsM~h.ѥxȞ_W QyrUvXr\k*N10 [_(wFWt|y0tӱ܇gԬ_䨣`Ew<_9hJG&$q=9N"nF wHQ&Z-ӻJ,\wtޱPaE;/P dvb;gnU#+ SIQ+4j>3ۖPi#lm=~[u^ F7]uɶ# ̺, X6KɈŠQP,GFM6Q+3MKQLiċkyos:1}\SNcF県~NEHoK=~oLy{‹lC:O|s{ۓSF@NL;lX|>:$-M\\<:BwqJ+@HVCcaWw@W 1\q ]1w!3kG%[[{~m Y1`-\:͛eք Z;-j!GrFHEnيwDJI念`E;'u,E"} U1ې,$6ssXXJa(J0MmIbn; y*9A"]P ,evo]6/IR=i:Uʙ=039ڞj|DR;;j]d}j_&=ެS!DOVO Zw8_+N5hGloF粛H=D޶/"v^=NM'1cEZ8b8XchLut19>$&v^Sֻ调 x X Sh޶RV.[O1kIO2Z-|u,_7g(j%k.s\})̣im.:Ӌ|zRZI玹XJiı9@5AWhEa99uBᰃx:'XB$;z m*5fGxsaqN]?{#=f^+o[pA.ZF]k/Đ\irͷٷZ׍w]g㛽Kj4-FC3u q 6S# w^}O(6:x:&kӋt\cajE;:U(|3'?!7yI}%2 L]Gc_?][tŌ7CЭ/rpW9vu}jCc3˵ۛuߩ>k/2*w͇?piE à2$,|1;v%›hOǎ_ w508hpG"1b/꘯.u,pV%D%B+qZ ǜc.έWekiKZ D(Զgϲg/hs3 q, Ԅ./0晻.0pB#A_AQ+|6 ?Ea3};/Q<B 7OSYwsMe{&\۶mN=U?ш}eo~m//~ߐ8HюٟiyxV}(zr[q!v5Y˴_ HЌL9_ t+~ꡯfIӁ]q tv}uD;nu: yײE!ʾ~_{ZQDN_K }7jUϗ?j;dO;dw{iĶvԮM)]{^ SeStQ`t rD4^Jn8 w<ĶAx6-h-oI{bp%!.52/[{(fޔ!WfIkGhOjom|BVaqW!|v9^ B^6Nu*{}pGюhWYF>>l,Γ K~(L&Nu!ޭ-r%b75:<>[nU~k[k\`~u]̽_]\fO ٕ'`wu;v#>f}7ۮG~}~}ÍMEGqmtbOm=#pu{OK$ n2LyGlޝÐo7 L+^N>.3zRbh: ڻȊ}at0pNNrt AorѠLkrv_i'Ȫ}64>O3cdl׻uv;_u5ڿQxF4~:0(?}75s)ߣ[m:.Wc eHt͂@Gѫ"hygYhv8>`%ߧ5 %vpԊ8Ƈ @Hkۆz jE!=dhvۆBDi* ﺗ.~ "]'4 V\ AKXAxlX!^ȅǶ)) __Qcay,[;ĻxWqNa&T}x|=g񦛇aN[U'pcW;_f-]UО/-o[]A׼bXCzm݋ɧu8~m v]ztn&" " " MG`%7s폆 < CX3m_8[>ЯiѼ߬}~`OYkE9QxO tZ7hǶ24f}xLe* Y(6 9SHS;-L!ku2gH혩kwS/QO!Gee2;x|dO]w9/3l6юoJ8Dkmע,bŢZ7m!dh2ׂ@gw~" gZY*)Bn&"E;P:z1W_ J;#'] aW~h4WIk+o3Xf?^;@>covмfIǾ6"݇1x~|:1s>^oiT/!Y ̽)XTg6w0'8(x(x}j ks92= LxrǕzE;^Vp@JwcrҐ/OWPi$1O m!mE(x4fPo39Kྋ,_Bs!E EsIK͚i oV[Et-Oֆ2ϒliqSL,tnEݏ}}QWRoA,Qd>ACGt6{kvg`m_o0Ov꿧ĹZ7Lm9ɏ}sZ<%8|'(ftlm=(غKJooL`;$ڍj0l!/aZ>L(q?\vƗ.|./o|(L_){d]vdѼrmX[7DP pxם[sf[M-h8r*,:s]77dWpa<2-2\K弝 {ffK!]m9*hk(ANRѶ2.M~{/s;J e>uW/pP?BpHT(̈́q)0C>-^}]o_7?#8S4SO?cz.+;bjgOO}#7=/|ɮչWqrU_˗UׯN_3c 25isNg W~6׿9Wxacֿa͕k_g۳kV3 ⟽ ۾ssio܋j?9!ƛonc=injxۛSVTG>և/XgwO7?<4rsP<3~h{LDݝԚsJ5^TB!9{7>$M [<̝J7\ptr\8燏+ֶ9u2=Q^ẉzŧ~Z+ֺ.xU54 ҚٕЗmmmY Ի,j:ZFAb ŰB P{I/a[" /i!kjpujhkѨ9κֺ]1+F3{vvҥ.m=VV{3P,CesgۊGrΕx,c5}_?:׼}?sr+cB ~)xpuu37nʓN׼|7!x}N<4x}UѮL(}GZCﲿ놢koK m t?P\ƳVtK[3bCюuwc?h?kuOu~fsWEPu$C;7Ҧ#jBo6/OOTWfD@D@D@D` %>r m&,tԇ }TllaFçD;UwaWh'D;<8 r1;;{15-x|^ě쳼ܳ)Gf:vW2'?t=ɨs$ދwxY0=-J3;T&׵ D'"l0B9`-K Z"6G{qXg7!2g6wA(nb2aNxx1,nA }1q(8׫_J1 8iEBq/7nNP_x'?TX]qa?zy3k?s7esPQHJ}Q N:9@]%+<`Ȝx l=??0yaÏ9a5ف/sj~Ql8FSڦpI30c;Bs>Ltf7}ƕxĊÆmӂ49t Q,\_Uzkv,Y]C{ ǿ_߽՜L,?T+ W:̛NΕ@aAN98MZkDbo`D9nL᳗빽ցǗ CMn󡫽?/u/~Fض[ݩ/owj57>N6lLzPӮMԠy8^!쩪`k@$)d y'P"^"7jK,Dg*XxHy4 9r?Gi.㡕텬mC l`[4"1 3O6"Ε@zJ6P8heL]>sm DLON;go:;䐃?eAz« Qr_c'p( s1m\?˪;ԓz a1#߉O:8֙s2bKc 92o\'{ۗň;M1_~t[?)Gi}+/.ԉw{]۟Mn~LG;Șm$A#pb\}kP\O!+Cr&?:a[3N[U" Fw9(Xcg_K3%pnNO;jQzgwkgg{QntZc;08Km~6:gS|ΉX(hQb6/r1?/y 吋%Ѧ"2uV!&hG{%DBb *D6󄐆`~S&g ^̣lY/v[z ڝ0H_\v?oڞZFgkC/ -Eg,}U;GeWn=Ptt܍F>I#>Ϝ!?xLqgѹy x3gx])#=gjy}|P 3]Gc#Ա;eGb*}Nu.\PI:责·s&,pdrC9B%n ‰QKF#EJpEcT,3֏ccj\cF!1]74Xc. T6 Ѱde̼vLў~){_!OmKv,ᐅe23n#R|t~N9ye2Y_~9F[n*u0tT>cnzޛ7rە<[aF7C{ =#@a}|vo~q#j{.?pXg`,駬t |v/[Zݗ/g#_h:6~jypP7\jHG]mxM wU"[&aO ݴ9@D Z86Z7Xϱ67'w^K23!(^хO.tQ6_Ȥ^ۚyaL{`+ݖe^s e9_/L湚n?~=K lC?sT/5tP/5K"c34ں^cg:;p^`m>+ڎw>E;/qϯ=&5i6o?Zu(A-A< HZ!ȵ˛#_%Be"SyKamBb-c9*-Н_8\u%DLq.T[}p6Zo6mCZj_5 xj-Z?b<70 B,G-g #7N`'Tq};;S /;,w7"?7zaH06ȬT33wݺm[B`i;ꆡsՆOˣ'pt#(݇DF2/cYvj rX( w>Lf;ªap4embe{{'d|mcL~^uܶ{M]ob7" " " Ao@r.X[<u=k𣭏 FXT3\v?eApg28ٗ9p|1\+]Wxõwh7A9;S"}p9S1X)ϥ3>.{ϟ{E/RO6W.iu/8מI$v?3l,͵nj8eZ|U(+]wu;6lbݝ֒lu^Io[/~^u$!W]bݶ~下X w8d3Ƿ6\ X7A:uC%,%x3D}H^ڶr8G38nmG~ºmWe.mՋ]2{]eжlo|SHlwٕvҏ #P/-Xeݕ^-K-ٳgٖ-=v[/c6L晧6Vpx?D:nc9pG dК,+?dn~L[᜙: :iffw` >dqiߏP-ؚ ysᨃSb]ǧ! CH(DY}ys;-9cZf:8ZJ㍄(v`RC/c/1P@_2?A(_e>]? sq;ij#SOoDRr1Fej {Yv5߲{pj[\:t禬mͩ\]ǼD?;Ӷvi)W}V_ͪl.hqPްF1 t_?q=u("_yňRԣk a? ev0g@\m6澇[lr9+Cs Y=۞۲B}B6slEn>8!vPJ2cB uttM0@IDATpIn֘CxhI[W!gMo}͚ 2Qz wGͷjx%+SL/l˙5mr]5!4g2K9 y(ܱ~=pwy[gu︇]6HѓBܣs39S XW<`^*Gj*" " " " " Ӗ;LTD` u9Su!@Z+0c痟{{1u;IɵE]- ͏lpmˏn+COy~7۹gj? ~_[{\f@[kcOY8r]KAX"eBc ؕÉ>%3s1[Z:b\,j}4[ yECi~ )K䭛mGq'g>~6gnݧo}Yn\:K*'k(=3=zg_󧞲:_;s޹@ ȗrCkw۷T?B$2wlFeKO8fܩ*ӛsO>(x5*%u.Ld~Ƽv:ѹ9g;Qm:{3 .m9raÜ{]ڃ6a?K^cL1wݖ-[|)nBs&"$=I!;QZyzr<8K=ruj ҥA<ٞ|?͚3Ǯ7vKi+WڍmAxr9a-\DD^Tdk.З,A+W kI u qX!=!&q-ӜșGNM[[:8眳?9['\%w#]/o}Lj'ƞ[w"__:ەg]"mV0%K63N~*IkXV&Q7rGՙ׿Nw}ȥ-l _sUo~͚(Qzǜh7s6oL}׹U'oƽ?̘'[DXˤpmvPW[61)2dҼ}0֣Gמ4,LB8 AkH{nVZ&o=ayڻfۊCޱtmsF|f@v.H8ha[+!e~8zSpY8B^D(l 8!W 90W])ʖ07M[0Ϝ{ IRúe!6n·מY% vvͱ.Y^W@K%@1sjsԑU߰ooB7{.}]Wv_ctk 'oy}_? 7l/UE;KY=V3ӛ;6g׬g]oznN _vk^g aẑ|n~LD(HRsYϼ},~SM`9<йs"F3{a>Q[puvBޟګ?Ǿ/vo@s)e{OW_y]_oݍs]+ݺ.jWk~wyo/3Іp'?![yq cz~!~qiT_ݓ&FcfFm"Џ0h7mL&c}Vh޷G}Q;-\s7o0ĸ[8Q+"݁K}g=۵qK-M#4sEWR[-g\Nd2nQhަX*SC˜E)B::BP6$Ew2Bp+,w_[Sp Ce#o{i|}Rh~^jmm-^l_r}Q%KO>f ͵g֬eo}66;ahr6j- Kpr:,(!]ɼvLok䓘-1.91ͅĔy-v9s"t=Jxp./^ʛa%?%y:;m^;-L_} nNlWYI_B `d"v^ ;/O# `"Lg*9!9?~À.,uHg3tς"͂hoI7s8f,zQ{O&[q1֊(ESyi᎗B9s!4{{{m05߾n6ۺy!mܸۊp`Bd& t/A]"^2aryw e'$cxlޒկy_}nHXGG1<% wR'Z_si|;ﲻB^l9o x!䯋"3|<|(j]\yalc;\X8z^Z;ishH11||*" " " " " " " " " " " " 3huT=P6Y.|e2[uJR}ֻ~m`m=6g3GX u6 E#h8[`s,9o1l-[a1TD@D@D@D@D@D@D@D@D@D@D@D@Fݧ0t2!⵴wςCWpVZ3A_"a-:FW)$܍JRPcJ~Xr9ay {"ee|XE:ÜzQ~Q7v}1IX"" " " " " " " " " " " " (Z#*Z'" " " " " " " " " " " " SL@D@D@D@D@D@D@D@D@D@D@D@D@D whL1 wS \F$5u" " " " " " " " " " " " "0$M1pND@D@D@D@D@D@D@D@D@D@D@D@p׈։p7u:hD@]#*Z'" " " " " " " " " " " " SL@D@D@D@D@D@D@D@D@D@D@D@D@D whL1 wS \F$5u" " " " " " " " " " " " "0S|>NxrͺaZ$k"i.& dV" " " " " " " " " " " " "0 r܍v<GUYK^%ku~9ƅOp71Uq" " " " " " " " " " " " "01$M G"" " " " " " " " " " " " " n\tL  wQH>," " " " " " " " " " " " C@pT-" " " " " " " " " " " " "0.ƅOOL5Elz=Uge+ӫj5Ζj YӱHWEm3n[ vcD4 )ނ]a[Vp7.3V ҍ'" " " " " " " " " " "o`>~Q>=$ъ}P/'볶nk~jW" " " " " " " " " " " 3]x<;|alwS^jX d e{_&**" " " " " " " " " " " "0V4Q;zibXq3޸ f:v>h7/'" " " " " " " " " " SDF!j VnoyEN;*߻*}=BVggh]MD@D@D@D@D@D@D@D@D@D@Daa فcwԒ$a-{AI$Fg27HL{R0ݮcR>Cr~U*" " " " "ٻ x+2HiPNQ[z׺^Zب(* M4Ú3{f9Ԝ>Þ{3~B@@J9|l1t]i(rX2o5W+m`      Py|sOܕ>F@@@@@@`;N@@@@@@* p@@@@@@q       PU&       @s@@@@@@ @6@@@@@@w      Tw       @@@@@@@M@@@@@@@      @]8l      8@@@@@@*A`@@@@@@ p9      @YM@,]̬ZTVͬV ]l     T}wUEϕ7%KZ\wEI@@@@@@ +s#A`ʴ}yo?j7긣6e y3k_k9x=MM fS/UtV4yKquW~{jڛ#{;Nf=vI p^pF Z67'{צ lmw窛>̼[Qֽ{ ҫͼ |{Uq\Rs?t;nxA;-_~3|?r]d0<Ġ&x$'->q"&Ou3 ُ6l%ac[2tY]VX^Y3^Ħq_V6ŘAV? ߡ֦[ie.LQ93pcMkj\Y-}Vuwٜj'V`QvjҴg:U4G9̴~1/]8Oǡnv0l;uhkz|6TRΗS;tlֿu$oW[nl0u0Mx`S:AAU]5C06~u.a3un>mJi/_w/ٮ_C??ٴ{FMju`Voۀu&@/?PK-U ^9V      @Mer& P ~UCA;?b?۟HhGKW_6i0S˗c,RQӅܫjB^fTzuM6 o1UvQAOPF seԟ` ikԨbz߯\fx= h痭W V*>vٱFM`iۺs[>AA V M:DA;f㓛 D_dhR˟TAu!#hA1c:kgȖ͛_Շ"@@@@@ 7w91 |ksv~0U]WƄ oVXa3dj.5.09^`e唵&,J:O*SKu>[$? %7!.;$MfcIf.M'O W6[i.(T|m2җmɚ7k'q oBe|5Ky_>TIM pt|͟ :     Yh*3 E`޼7@0PD5l9fF_,˅ & lܥ{N);l$/3Áy`;,W~u5)kfN;_]O4ۭ -J Z4KVjyTh&uk~J t,;/XCWއ1Wݔi?[WgsD}R@@@@@r pS!P6K8^߬"zx3fҏӒ&-Ngm'_2\%o&s#N S.\rs)p`PzgjJb{l^  ޫ6/ sQMumkxg͞c W23MP4w]wt7j60ۓyL:t}*653mfQ&W5Ο=7)սnڤq }scs5@@@@@ +}V͚6;̱sZrf*/ZDчQ}>{'y)o~"-}J|.pG*ƣ;`8y.x7/e-{mn~ם5S3Gr~׽['3&gj2}bla"sm qRIvMCyy펂ITyQFv:m Nui<Ku], @@@@l4Yَ[Gbs\n UҾsN?1hOSO84\// UjgrLl4~x]|'9L_~Y9ģs4ʎRU9*i6p <lFa`\5w]Q;]01z߲E3C_8 訌5B V-i_JY._* -~[6i,D?M>V9Zq ʚ     /Pg*-C#ļ 769aAtG.˫RWռ_6LM06n^w6Gami38Sp-bJD₅ Zjג+WYn3 pg%]Wܟs XuY$u5㤉4o׫5K1*񈅶9s4(7_/Eu^H1@@@@hOt?V<ʀu`EDFSQ#P  )R Zk22Ja \&M7tNXđ(S\Yew5n+:Jr:2^ױMgR@@@@@m!       K"       p7@@@@@@ܥN @@@@@@ P      @R'g      MA@@@@@@ uwB@@@@@@&       Y!      qwqj@@@@@@H]]@@@@@@ 5      .@.urV      @\]܄@@@@@@R p:9+D@@@@@@ .@.nB        K"       p7@@@@@@ܥN @@@@@@ P      @R'g      MA@@@@@@ uwB@@@@@@&       Y!      qwqj@@@@@@H]]@@@@@@ 5      .@.urV      @\]܄@@@@@@R p:9+D@@@@@@ .@.nB        K"       p7@@@@@@ܥN @@@@@@ P      @R'g      MA@@@@@@ uwB@@@@@@&       Y!      qwqj@@@@@@H]]@@@@@@ 5      .@.urV      @\]܄@@@@@@R p:9+D@@@@@@ .@.nB        K"       p7@@@@@@ܥN @@@@@@ P      @R'g      MA@@@@@@ uwB@@@@@@&       Y!      qwqj@@@@@@H]]@@@@@@ 5      .@.urV      @\]܄@@@@@@R p:9+D@@@@@@ .@.nB*@IDATB        K"       p7@@@@@@ܥN @@@@@@ P      @R'g      MA@@@@@@ uwB@@@@@@&       Y!      qwqj@@@@@@H]]@@@@@@ 5      .@.urV      @\]܄@@@@@@R p:9+D@@@@@@ .@.nB        K"       p7@@@@@@ܥN @@@@@@ P      @R'g      MA@@@@@@ uwB@@@@@@&       Y!      qwqj@@@@@@H]]@@@@@@ 5      .@.urV      @\]܄@@@@@@R p:9+D@@@@@@ .@.nB        K"       p7@@@@@@ܥN @@@@@@ P      @R'g      MA@@@@@@ uwB@@@@@@&       Y!      qwqj@@@@@@H]]@@@@@@ 5      .@.urV      @\]܄@@@@@@R p:9+D@@@@@@ .@.nB        K"       p7@@@@@@ܥN @@@@@@ P      @R'g      MA@@@@@@ uwB@@@@@@&       Y!      qwqj@@@@@@H]]@@@@@@ 5      .@.urV      @\]܄@@@@@@R p:9+D@@@@@@ .@.nB        K"       p7@@@@@@ tIFhGA@@@@@(3;[      @UA#8߿*wA      -Pуv_w}Y      @ A;P       P%h}&pWGe!     T s!      %PقvowuY      @(,h~ 7]E:l       @|@#0-ˏ5+6       X e_ (ȸ+! #     @eI]Gl      @ BNO'"     @U I]K@@@@@@wK)l|LTIஜY-      @\6>|iL{h~ČqG~$e'3>\A9~> zќUAUcUNY}l2Q~lSPyG8~0z ra{F\yAZjـ]gӥsGՔNGA -VTsg^0o!5c,9Lf3MU;Q)9 ol*ez0{B9bx͊+LM^2G@@&PA;o]U-eD@JA;@Y:>2Sd֩5pرU0Se ZK/:7˺uO9|闦[Vy?&^yɚ5k=v۩ܶ+V5M{!^zZ_9h/D{T!@p=hc^ j7b=7n?Vϊot'pWs@@@ pW^R AvIi6707 nj`>[l#znW9z5/ǎ yygT:l)f Uaipnq+;   dx[3F)?4nqчe{YˉϘ ^]BL.\d{ْvYjk O~.,XL(i,̛5 wܾ3wkHᢇr|Ѱe?b>+?{=K\yFTzcO:lܨ{ஶ_y|W}6+[C4ml_W+1K8M}%|}܋CC5Ou.=ԡ932)kwFe4k}ymA;V~yھ7i_ૌe3>`:kEPykλ2mް{׶o2;nԩ];8_>[4oju=:|kxwpUÏ?e>k.1PAQu]u˵nX;6>[2_c_ y18ӻf/t9}ͮ;o?:Cֈ2ۤIcs|뾇}o̭wse\ ulnuaˎfkWfp.6PѾ]kcܾ٤{W7=cVfV sϿ4Oپkm׽=SrA*6aN;`C@g_tT]1 7ds/v+cRQ6n9p wkaocqjjT}CEK{Txx vB卻u1'pS~b{cY O{isu.p}IJ4{৞ _U's&h{xصQ+}`:o4[uΉ6CrE>2=LƢt rG 1/ɽm\G O)2=Y-{~t^>\cZNq?O,Yj?9|a3 EWʚSþϺ\-]Ԝ|ynQ.~&|g\yV%]|E}1O}oAIq{}ѺvhƜtѦz fq?tJt;?`/?}a/?}o}-{U"  @]{]]wwS( dF # (H~fmvz;v1e6K [Y:Kny! +WgBcsU7ƂvZ9G_y=qz #Od4PKY/ϾtA?W6Mp$ 'gŒy5wG~M~b88=:ˋ;bA;-[W_{l`˗hP+c?PSS>m2i+zyl>hF {e_ǖw77zwh^9+t xqh"w=ڛ0l ї+3+VgeOף,5toNnm}13o|p[ -q&|c{1уCqɳo7klsO]tz۬ dX*uU(,˯9v<:t^E?5u /ß+{_yȨIH/f?'~cNonvmOm,7Gp 8^ue?*/W6^M-}Fg{nK<'| '̆_VQ?qG_pi~N¯XN$~wVa~ŽƊy /곌Oo]{Fg0b.<炟*gB2PUP~؛3{tf|5>\0  T@%A? S  p#@0Ue[\nQ6j o-7}mm{ qV-zm ۼoTשfs<<ƺv/CoQ.;mg+z_?f3O!˱?ve4MM_iW,Ν:a1Z@GAk|MzU~o*^v&͛K2f> 72eӮmk7vQ5n=.#FP:mfI3o@7l6RT_M̀7]tq\n^xb\VmUhQ/eYis躛tz߭k'|p曉߻W҃wM..4_[633Rm6SEy,H=xI.ح}E! "vYf,ik_C:Wugw*N x(Tܢ*Lټg`I2QRоmMJYoNCֳ_{U&m2F~0EM^{u͘qo{OfM2`5ywkDA֯>(aTe@c4Y8/#ZQld`,Dm]]ebP\ lqWR֖u|׎2[0ͧ2!w~[ftӏ,Mf/|/Rox[6Tf[X/~罬A؎BDC^s{}{d_3<ۦg\n/zp&h_Uc\םw k. G[z xH/K7jdTtgVSMΆ,66[uJY^-z"[Q.瘗ޗNE_mv^0u?d6y^~oPQ?Ҷ }Mwqo@@*.@.w+\Fn=Qtf Z5kƚgO _ۇ pn1`L)FMT6ⴱMe*jbPAhQOAG|9̌Yy}A)CC UMMXCb_&5U}Cߴ0"1OLcM{LTQY<\V3o'sYe3ei=UmvRe l|鷵Z?K`1z^i|J5Ϥh;Sf”A}Ho۬Vt봓LAeW,}\ɭ yC_۷sW7~g_RӄjUˮ/.5Ut]ipݺtrX5WnU>uN!#p6nal4i'kC/޲+ס][x͔P / 5;j"W\wTtѢsYA(盯S\lf+^{d[RQp/:퇚 iʹ꼹𼳂{ty1po=szUSO IE4aώpNeK%ڜl9N4G:(PLMҮZ>$ϻOz?4Pg. j狛9} ,:&Qk |i_4~ϐ~.9Uhӎ>dU槂s I //=?l`bE_NaB1zG]Ѻl~ sgig/s]}/ \tQw[.l&}9hI2@e%gmr/\4NAJAQq?OGkz˔)mmձ>MX]t Y>gnx5N<,sgVޱP vV᠝[,g᠝F;ឲA;?2M|4󀝷M҈psil3;Kct ^|eULt퇃v~~gZX_g_N?y q~ tF꓊aKÂʌ*j[ZS`eR-)([%Se^Z>gک^Y*AsU(?bar޴~\WL?ظ#oVYye8¶sKpl~>{Fi .5VϓjQ9mA".(8p]{Oߧ6뱱{˯}Щ׫_^u~]{aZ%_|:wC0   PaHf =|/^} %/dMSb y2(dE.eSi>4j 2ՇˢEmԼ/Άx.GB&Lx|moae2A^SU᭚WpƜY۶qlqmT;sW]mVQIs HCv _3mo8ٞW D(%\ᠲr-gu4nr.rVgreN l3fqO.Zue_9R6%?_:,lE@8Pmze`~e* +mRiCOMqqM`6+VMթ(7Xdsr:wP`͐K8Rp'[3 Y&=lovL:#Sm8)l >I kmZIEliҼquEMc`?ο*pUڥS{2 ƍvqt&?W>w8Oj;|RpF7KesRE焚Spi}%E9%YX}u|l/ft EKzykݺOo龳˲TϊO*M7vպB׫y?mjkS߳?ҬE?   @PKUz[V[O5MBϜ!/0*{hٱP@Η_VP}B j2QM (lN4_/qV=Ԯ(Er> \nz5_?5̀ӯM9~9l!+?ٜjZ.\TQӟ?oҫhT٨zAN~z׆,h&@Sg+)K>Z?5yW[DYnA;s:wys͍g_q0e` *~נv#혭(R6Tt(cJeCl_Y:9^}e+@ HEKAM-[ ҕ-]ٚU+K}Xٯ]jtyg!7ɧm*ڏ8eW,kn~7}<6t9)lZT%{Ci}e\zKz+hY?GIӇݶ_Q=?iIuŹƊyuIVeu_r_s껐j^FMSvHٷ\ma@@wm\Va;NKD-4͚P [}D G%K@`j?TPyF}vQVÕ%aݺuLmKd\_dYxz(e%5yGR}m^|_Sza7vk/4^w*KC5;l۷4VSPGᠨ~_f)ء_χ+o޼43f d| 7gm0[K7K;CMM$zl]]گzu':%2%]TײU*]I}ީׂN71a#y) we&ؾy7ouqm3ُ>y";MobOgER5mc   Pq)v.L޿f=w˘LuzM)֔#~K@Y+_|vx/~;8gFi+lSώnVF^ӦlkTth6 ]qɿc}䲟&9f(oΟs6`mP{47\\L g˺yufS4Luef2eխk` |ԱW0'%OYfsF{e:bZQϘ T ZrH NkpujJQ}%5%V)m +ct]xb#2fu5WjRxA祲:f̜f y^?f m27 eу~rͶ-c 2C=9r OWaeeӟc_; ?ՏF?bPƲ~PҢsBM"\e+齯 pjAp~ޞeUzoΥMwuyaFbFfY}s̑Y5|Vϯ\!JrC@@#\EA:?{\ uPP}@ Ǘ{n\'.n2eCe 嗙,Ši|֟~ȁA;D6&M,ۇYzד8Q ?j%-e^Է`?\1i򏉓η}پ6n>\hYQJK*?Lon4i|aucM&}m6VAѢlh3~|CّO>LUUr4EhQA&!e& |r+—@lM?营)SKZm6&q&~lh_|ŭS*k Wٰ[l\rW#>s9UI7]6n>#8,4?ܹ$ӔWzGbp| ?h߮uA,Ѹl?v$aK1[I.mڒԗgEt/ei^,@@2Hj>sM+ִ#,|~]Pb0-Q3Qf ^A1οe_\g+ʬ6OLE'|N,^5Vw?{V~5ksֹϾn3|[oYL0pCc?evI<s/d͢vmqFn%)a V7m/˄-)}t_Rpw=C}m^ ;->[lnF|5Ũ@KB?5q>G>ؖ-]fl4jow;̌Ϋi?vŮsg.Xi֩500_n_* N%3&;?9D}}?7\?Zmzh;L 2c5Գ/E(ckNnX;ngUt/%k$s1g-﷩ג Zܽ]y曞6_^]>q9uG+牾sט>P3gv+v9Yw>ۧp~ϊ=#6i>4}-Zu6ε .:Nsfa1@@*@V²"weg˒+o&S7q{ Ȑ`zxy)"X>!7],3 ^A'#rmQ%wnP懲梙sw9<n۔9=~|v:h?_`*DՃr'dU^ Lty@?ϫ_G3 zu M)K5omq;A޿y6 p%WA[nEA%e֥cVEApbmۺ_d9i:] L i;MwIǸkO۠z;AF6 b =XrN ,rt佣(@;3s82?-73groe.;Q-5mFwys2z^jv;儣f=]M6EI%/]gOy)6ο}Qi6[8,5ww'qUO4{&RJUVYe) ;g" "bʢHEAAmB%My}y3y3Yf2$[y}7>s/L" y9zŽwJJ0{ GgDm_"ԹL5C}i=lzY%/L@38KS8 \=n ~րҢu0]\?sϜMkgʡ([JYY ϕ&5i- A!ݮuv駘Vv2^ M{m^LqQl@IDATJZ} >GɓX뗒{VPӦLzM?"[.YQ]TS̈́!=FI/m.1#uc ]e2A~)}8o~`Wr9:קR=SMc\ێ'z^b۰mgҹ4 XRET+- ܚENp֩lO_[~ pi3l[L< ࢁFQ#+\W0;8 3#Cn24.1k;}أp2Ǽt7lWuarv{{n>wG=O"vo[G_ y!vlnpH=0nb]r>N w@tb>돿z]'|(IOOL ^X[Bm}vefZ7o`~n6w١?⼹v   HS9,s֒` h^)W\g v|a=ଇ78k ,&_BVAK1\Gz-NPB@!:u,CՌp_Ƅk'问:Ǚ7 ~yt=ˆV5Eˮֹ.;ΗȚMy(5Ƨܹ>MwϭZ4xleo[:̥MfݗuiQE\ h= ,،s, dY͢'%ÇFS?4[B?+}_Ӿ|:<㜌L]atH< >kyq'Vz?h,Zj_oӳyF"?5CjMj&ɂ+s6鰾'[Y#ϝsO@ |' i׮'?vլ=YzO t ^g_O_--N@'=9XOfo=˿O_amC?34`IA@@z"0Ϝ_`X@{^p:׿mf1o*Wݤ"͠e]"q.g__+D@@@` b/8/˝amOuH9îg+/yNb|w:])=y6s'O>Gg-Ţo,+VE>9Gd2>ϝuX̏tVuHSlٴ~׌dvx@}-g V*m"  [~= ByyΐYYYfrC3w.w߾H^}&hI6 9vh{E=xcO'o`@@@@@@b/@.@@@@@@N:a ds׌"     @g"M2[X.      Y]g wp[4]E@@@@@1sώO}mߺY|؈     al+Q׋:h(F :^ @@@@@@` 9_aEph"m@V:g|s߾U}      0l0T"^4!h6@^Fokǥ9۟~JUa#      '1h1zbN.Y\;rNs۞      0xllAc s+b~u#F.RA@*3TDYdd?      И4Ơ QĪL"3h O6_ug?Ub#     X iLABeۅMvL.ڢgcHV-t|Fɭyg #     00tN;!ؠ4W4&XXs!iEz#!Y&]u NjGGKH=      0DZeF[N@/SLPA;1X6Sb}R·{+W;sgL1M )qk[Ba      M#d;sڅSU姘EГqcro}T׷J }O`C^>hlV#؆     Q@3 eɞ3>]fK 9<gM3H8NH9P       Еk1*&HA` %'& zS@@@@@@H4栱AW!p_7b}9e3lč      YNcS|pgvDV2w@@@@@@;:443]QN|ݑa MMY*j[f      esd3<%Ag"7>jZe[u6Mmi     L 9)AS$+-Q4`W`F4XB"7>&W< @@@@@N\      ]؊      @LŔ!      /@߅      T]L9      ]؊      @LŔ!      /@߅      T]L9      ]؊      @LŔ!      /@߅      T]L9      ]؊      @LŔ!      /@߅      T]L9      ]؊      @LŔ!      /@߅      T]L9      ]؊      @LŔ!      /@߅      T]L9      ي    @tB6$5R__/.;@:$55U%3#CRRR:v- K O.OJ> Lj꤭- " 2LFd˸2p /!wQ@@@@ hBkj5@\C]](//L sƂnԦ +Z籶| 1r'g{>ؼ\ɫnf]T' lS%1YH8hmmfi6?55:鏷C Gόw]\t@@@۷fhI5_]h!cXЦ J#ZL"/7Wg}c2'#INJNр*T }hinlJiin?~EjeKAG@@@@-v6hcBEi*m6@h PS@;FIeegm V4N3tA-2IfnVgy@yw$|^-u5u=7w;~#   H@c暠]ime0W"@4 ugN2,kd6M`EZZ uzq+g{[q Pk{_Pë@@@@@A.PQeY`2̓<@ fNF glʎ-gow:s:EOc 6: vrQG;>xqɭCL ;%SdIvJd$KckT7ʍRT34.@n\    @7VxξCpwXsˍR;Y|)cFn*3Ǘg=M~~|+嘣nojjvY@SS{֎B76{i0P(V *٩3OyuӛRa}c#q"'7qA    @4t:oƜw+Y9킃};r 2h-++n\(wݽXZZC}/D` ,կC;K/r]~_r=Clғvq*yr#1^ȸ׀    P@Жw,}xncsڥex[u{RvfTԡ1 ,3,ٳA{/sY;%===8V@UN\[f L͝(sF`ms4403\fZR?!!AdeO,ė@Jb$Zsqwqb@@@@yV~"t(Ц{v:<oqU/ڣl[g;~k=Yb\oJqVg>s걂ě@ ՘t8̚:I Ir%/-63  0P W~#   J );K&|=>3 ΗN5YrCi;C,yhN<5đ=(ÀNA;.3'-'57xw}g.Y*Y6E˗'P&O$Ǎ sTǮ:Y3'/33CL,&V_?hl4Ï0ziӦʘѽOuN <999WY+uuN3a:.³TmTUU9[r:_Y~;LQGm7}'d[׻|wneq6]rЁ-?]y=o57BN9Ā_]x膯{}~_]@ο\u7r-9un. jX__5Ty? 7'_{qzV4>:t^kE Cl7 .k׹뺰aF'}N?w' a4@-vw[˱SuJý=GΕ٣zGQ,w/͖aNu M}&979 y13':^X/ \~@Ӳ6ז].&e25ߘ/:AA[Iq^8ϡ/j!ΰ~dh0N\u󚕚 -+KkpV߭[7Ԟ;Cʹ^@@@@ Г HNϗ_ 8\r]c9Z\|\rΣ'vp!vzn sEnV? O==dN>#rKSS`&$zoNeW|C4eMrw i{$/~? whʏ!W/ZuYc2ʟmWo9nH9}vaN{;4,yGKno4XѶOQ9kLidcgl_ 9=orDA>A(vO}Wt'v~̔NA;V|x _L~E!=)ʹo@f_;Sr^l x .{@@@@@ɒI'a35/O:24 xFyDzS~{ܬ2=~ܸry3EWVV&\@Q"WO: ͫs֭["}oN4Wm2vo-ĉwY|dc?4Ch^d~2kmgA>h}A7@yY:3` 2z(:%_½=+2ce˟Յ\*O,}L GtdB}N6ۀY S>sKHs'ue'lA3v"u}ٛw8m%5ݓV`İl!idv,y՛]s@ZҴ/ 2̭Ygڇ *1Mf[jR3ܤݯmM2A--M&5CsڢYcduz)YwZtXYf=#cqYaSk2q٩uFCPent[23éf&'ϭyjx1\:x 7/s[k<ӓRyz🶄Jҫ͵"   /O7si}v&O!ܷ FwGou谁;ddKK<-I?XwClmஶVxJv)aw~O:r;7~9V91Gߓ <& 0 ̳ωm_ۺ1C<{]w޾,2]h'˦CRE_G[n#pŻ#`83ɧCk3Y,+iC\Gr =6Cr?O:3vu„ re⹗6heV1ܢ֓g <|{^\ ǚ@>vwhV!mon} |AF^vsn < d٢Á i/to͏>etS6 ,՛};=RZ/]nOu8YZ4j.B?/>    }uoaEYv^4 [4s-ܠ~QGaWsԀwD yfcum;o9{~|t|٭Y#7hg+}#嘯tEznk|]{Opu4^͋ƩS4%]5 Xy횙H![-TvfLR0޺~ޠtE@Nmafj:NlX'_1yڪNnM˶moN+@Ӭ@oN[uneIvyӍ1h@3TьEoNow׳hf!wzQ    @_x-Ҿ[v7X:ͼӒ)wVwțoOMP/\Cj $'KEEEf^> Ǚ[[wm޲Ů}˷|E3yNj>vx5ش={g!VfasGۢ2sfkYCTt`s4M[9")fHoYU?_-3ժjuCVڢAb{.̮|zl|=)sS&O طjg2fm2t[`'ryZsҤifPe]݀n:齢s2W< >E?m79uVdNU;ʺd's΅*ހ{=3gww9Լpm0Jsk`v?Ќu}'\w=x<[̍W4! i@@@@>(*hC [ ;kvyrA?@^|?Ӗ*أ2 zxWeKqq@^q]]{6N ͊LK k3jHjT{{{o]-W\vܹpdv= zj<_zgޜMO4io-%g?dCO?e7c=9u=\     &]vY-{ǹ$ ]^1CcC˖-?I|'n_?q`UNf`B\S- 4voINlϮk1Y farɢӺyl3lk:zuAlx _Π{_A\     eKX1K;X >6xû{I'0??#U حmt_{BWd5kii<>gԾM0ީW^^di thœN<^\|ot v/xg3Vޔ&/P͖cFt E[X0ԧq]1{e }=:jHYpECڲbE`Ǻu\[ l g5OғB:`3V4򦠶v+;#X`w il gfՍVw>yݧØ&ק'  /D@@@OXРhܚtTi"y/w |rƙ7uiJVVVV8]WZ2\vg'x +_;lq)f*իln^{0E\)uk, 蒯;i@[=Y7%:-:&/̳;o|û:Cen;,-_y5>G63M?hOnopC_+O<{xin644l=f]t='$ '%re%wT 4:KP }1{ SG6{t"*ָ˺05w HjzfȭuCv;§A{]_.<Ѹ!Hwʛf] 9^R.@@@@ @OJO}vݟ֩7F֏x}ߒ'Y:s}'d2ud'{Ȼ-;wo|8>3C!w!{HvVsi}G.<ɃCa~[;Kgu3-_;7藖*?}!'Î89]g:CCyg'#GرcZGٿ>'SL 7vj#r,xA]v5Cj\|ud=xrɧ9C%><]֬Yp+:Fzt z?}r;~Vlpp>c>*2lxɥvNJQz.l|{뇲Ȏ49l~fεFZJR&]=ڪRV_nҹ,~O9ݶרms4O0l|`2ӹkD}퐢mŨ=k6̎l;I]OaLvP9:MW@@@@@KXpLym!oru0fhl5[ rIn܀1+ߛy|snPS+/=Dݲ6EvfRz/mȄ-%JfJoV^I&X*oNm)oخCvO[}6fy)]}-4pH pa    ;U^z$!96[Ͷ<͆](?2wfOiܭ7/aÆunKI ϗ}N =P`Β -pɟ?Ѐݯ-pdef-?yxin,hSr=[{}G匯@B?X\uer_h#j7z ݯO[;^'-srR`ݮ2F%Ocۤ^P:%d7'w A\G#u6"W_GE3e5ljCtvX.iuN gbBs4=RkϖeVo6D< QŴ۱G3ŻN@:,ydw|\L;m[<`@w+]K3^)1Czs@0B[i{_ i@@@M`@I4mlyUգ|Rgζ3մ=$r4 y.v[^6l([)'˴S5좉.wWL6֭[M4[K)//[HzzL0AL'{>ܬ˿@.dJ4#WZ[[JIF)Xﺽy ~o^9l;=_1aNrTf3ϝfU7暒.z~LfɑԤdonަyi9l# Go3|pGoJNN/'MثSh4Co  u.@}DRɽY޴14kDCăYH,PM؂    0|dvͶ{N!  n\    i7;1kZ&:O]w=IOhU6!  p7^p.@@@@]=Ha1ùEpa_&p4CRV@wVffߞ@A @n\    tOww""p9g>( `9Q @@@@@@> pק4      @uωZ      >q@@@@@@'@{NB@@@P%8@=M6O={?/]sn@@@@`$&8KDC~pJONsv4pN-B^)    HNNv~<.KD]aǏ%gp:ڃq IDAT(м#hm ySs#   C@ -=~<.KD] סQYEw7o ySs#   C@ c0*kkki\1~9z4%QZ.3 yB^%    HMMtikk%[!p\" /gZʈ;AMy#0(<߅]@@@III\ecc+@(+/٫]4]$}T[Y!G`PԘ{]y=.    HHH4񑓓\J)VJ ݹ<@kkJeɘ3E?sW?*+!{Tj>cT̽E}}t''}@@@@RRRDY/ZꤰHB @iniN33W?{[233eR nPjIgKr*:R/)vD{[,@<m*@@@@` hƇγf&&&Ji6CSm޼bYa0Es<}\ pf`]}yԛێ14Nv۝l;KY2KfHzRyJevILNKNK1)`~֍Xuf&hmnu:cjޏ p=D@@@~!dޙ/5Nx/QU>۠X.B3w|&?hmmi2Y2G%+du&" 0Yx Μv9٢vzK!p/@@@@@rrr{HcJYwf]3Ht(M  @C iN&hgϘa2ͼx_XWQkJa77HGA`hf]zrB3V4`E3)h}"p @@@@>fV ,)6,ɲk11egf @6'cړNznvɌd)nhP͌(WJv@@@@B 9Dwe}P@Q}hN ؅;cܑm#3:X#C5P;^ x}e    0fM@fGA$@@@@@@ p@@@@@@X(s@@@@@@ pF __ZE@@@@@ ؘz1 $ M477G      -`c6қD,Q[q[4     yHoC7j@VFFeumm      6ac ިqLٙNe۫2#@@@@@7uhC3z1 ,-G      @ X>4Bஷry N2h@@@@@84NcDc#Ј#66owqr0     X@cТ1H6St@ lV!Kڇ͒Q#KJJru"     atN;k7pDm'$p( ťbʐ4INNvKc΃     x4~, RY]txLʹV2^q`B+4!     QiNj^U#յR(--n&^\tN      ̺$IOKtΌx>;?@w~*lC@@@@@@ 1>C@@@@@@w>(lB@@@@@@ b-@@@@@@ p&@@@@@@b-@.@@@@@@w>(lB@@@@@@ b-@@@@@@ p&@@@@@@b-@.@@@@@@w>(lB@@@@@@ b-@@@@@@(IENDB`sqlfluff-3.4.2/docs/source/guides/contributing/github_example_pr.png000066400000000000000000004371731503426445100260100ustar00rootroot00000000000000PNG  IHDR*˽ giCCPICC ProfileHWTS[RIhH ҫZJ AŎ.*vŊVW@ւ/Tu(*oB+̝/;;VO*Eȓ#BXcRXv@ \/@U'W|qg<JrrTgC'BRTxg~x-i<, {gHb Z x˛A})0ͬe bU^B˥gi*|F"ʙ4;%1ZC#J)"T1_΁L](!DLq8bZ)n"/:dվLd~(rjoDB>Y$JL U89bM9 QjE"ǸL x$"De+̔ǫKbDbn/%Fꃝㇹ`-B ;iP>&z 04L;\(IJPīTinZF(y = xr\*x .Q'^_ P&l n넿T#d ?"PHBG_Y dO!Q Vϒ zKO #wl|o.l6dՌb#Kk@F %FÉ MJxDNh#ܞ(.(퇫k}-phuhgF ~x Y:neUX?[߽ ŅRP)v?tQŚ1Xoȏ9U_5 v; ;c#J<o@;}*+)wqp+N)Pn<$T8KTbïŕ𝇱\\P~kT_oyo)$q_B 4C-v07A0#A,H`s 9`X 6-` :pp :@ 3qD$ Cx$IG @#s2d9ٌT#"9ҰLLJr ۃ5|k:8g, H< L|߁'C J ?0EL(!NNxO$D[7܋l4"z^b#M" IR,G* ֐vI=d ٍNN#KrNQ3r/EbMR%eJ;CP9 )=[ _b4jf` Kgeleblzzezu=WocbL&\ϼ4d{p!{\`AР`uO,0eup#F627(hCӌ_4615019ai4 66]izԴÌah&6[ivKf*X'Y] I{-[R-},3-WZ6YvYYnUcuǚbc-^m}M|:\"{vt |*kD{-ȡ#(v\:0wdXհN4'SSCgsssVӆ/~fWO\.w]u]G6qspU]srw!q˓9sg/o/o+tu7}||%=˯o_N9;!u^怶@V`z ^PUУ``Agl{v6{UK,`g1 - m K [ "<+&+3bZDc$!2*rYM ϭv9c(ZTBڨGѲQ訑Vc#l~M7rxg v&O I\x7.IԔ<.:CJh1s!(UZFJKNۖ=6l쪱<Ǖ1v&MȝpdDy*^w7c]F_)t˅22g> Z! :Zrbs#Jr$''N2U(-Eɶxy}<_T)~R<, ,,왜<)):L]8YQx/iiMͧϙp{̌M,g͛>;b999s.//~77enCM w}aÕG,9J=:h߱cݍY7Ml{b̉k'Gl>u'ΰ;p9suŃ      d,x             M       3      @˂7*       @      Y @.       ;>      d,x             M       3      @˂7*       @      Y @.       ;>      d,x             M       3      @˂7*       @      Y @.       ;>      d,x             M       3      @˂7*       @      Y @.       @l"[i,^j.[aVUVTAd@[jժZ5k2_ԬQ=kL@@@@@U~,+`7{3ђ=D*@ 0    Iw,63g Fma]A:vzjP@JP7V+r͢% ;CmڤiPo     e'Pߧ2.wG]MY(r      @vˎZ      Trw#     dx      @% pW?>      @vˎZ      Trw#     dx      @% pW?>      @vTˎjP @ }V6V6W1kؿkZJR_NZ5TVTZ5M. `ih i7IV@ߴñy@ @2#@@ܼUf媕6XZ.gr]" ^SZuSF5Ћ!ɶWChi%@]]Cb \iK9 f$yv(^Z5LXE@2dJ4KI}C-(8rhoUSN]DQ| bJ{&W.^r5 yne[첼/vG @Y Tݢŋ!;}`~kZރM75rҲ=6-QvkdEeszk,nQ^CJ$Jip}W.a͉|oܕˁ@&ۍ` ޡI,bq]4ZX]e d}aI@ʛ@ =cv?xZ߯3665V'wiec.[aDsYQiL֫S+;+H@ ce^%c>}Nm(V p4@<enShDS)? JylOvV qji3VC@J] ;xykVw(> XhYxIwՏDTEՕ ?$bQPjtRkWFn75IV.Tۛ5*w?l8,[,]UN$%P7pRo+! vĝF&}Nuӕz3b?} ?&6޸i[o&Y1frO<9CAҎIy)L򎥷3eԛ~`iPitۚJoDU{&66}TRmlih퓑p?߳f9s7ب٦Vf͚[^eƾ3-^a]>^LO*uh3ͦ[li7d1/SȻ1w^yV{۹=60;x"Yk.;mi.蜊xYqLv,_l֦gNiSߴik  e&Piwat WLͥ9ݧin;sI5k֘Qa|3eQۙ;z2sy BPG\!w[)@[QV15W/oUI /n9=B3ZJQwXΞI.hjϲAߦn/4/WGmP\Yƾ;Όz-h۫WoC \Gh ,]̬\Y~:̨yyAWNd[Mr7lfɒ%_&7F5QontN ̷הTeatǎ̩c=p+ZYaSRYR֭iԨQL"'t?Mg-ZlTF:Rfd it*Z9L'Dmt>cQMn洓3ڴtUmj)O3_2_L s׭7 ]o=w}L͚m_ilMj{Ne8~^rr-lcS]JJEϸ: A@i_vwڠݤߦkotizUwT9c?n͘i.lZIȳ{,\doif :}f̝o:oe۝w!4Ŵ7q'пsL-}_dݧ_#V9cJtxgnoRg%d@*̿1m椒ʦMjtG;\*# Ǔu5nF*~ _~zj^8hw=wOs&<`իyŗ':]y9E}-'u 5kTg),ɶ^8Wr]zեvnm oͶG`co=Lzؼ9g2>KϬ9Fw)~Un@}ƯUݸ?7i# aeA*톪8#_-TO>k>+/HB{d,.4\ڛdһw~;u*RfMs''yvuZ6T<!]*DX@ p]GBщb;"A;ҍu?4x{ɧ1)r`C3?^}tMF/BMRNwi:[lnG[$UJ*kײsU-7cL7744k Qlwe숫݄{gn:Vl={٬YSӴ|iٻgɁ;sL=tMxԜs^jﺯ_oNM_:SI1/S_9ߍ %Z=VusI+ȜHt¿O԰YجMP"uSs)\,6yn[owf[0˞gʹ[7nf3RKTw`I7hW+noޏի e +rŽچ׹xq__bS5&\3޺%TonZv8!.F@ eۑXe~y"f537Aww#H6yy!hJӎ;y텑/^~`{eڵ=\ۥQBfgh{Ͱ/?ٿվbL#Z ?\銨>^]j:psG^n=įًL}ӭq~vɦݢT 1>1˯ۭlJS3 :y.^Wwb~~-'3PtT|h9͓w.4 6m#?͟8f_K.^o0m:|/mm{t룏? vկoka!/hڟwig}Xjw.<_H;]"#4"O0]l 0SZFW~_r PnGHb:6yQ 2\TO JY6l;#_29{^)v}rvE+ol\3W}_v ]|A;@cdn޺u}+wG3O?ɦb~#3W~~[j+4O;nswV,d{ 0cm zO55.\}y:K2ANA kԩ2^U ;bs37PrrAW wYNT>p ĺyN跡Ton輳cyjVȢvBnd?>v=\l.]-^bnM<~©BvL6cpm*ɶڵ=ToKu6j~OlU[]ʯ٠.nSH\ۢ9X]r&Q+v \)z :Gĝh.W^lkVK)+o7ƾWByv}U4Wk)ԞiY?j=cZ`9h.Bpuwziy{}mhl&uV|gͷTeŦ,}>t|{)^k:ˆh5V`; E.=֌X72GE)Rs=ty^N\yLMEnݦi^4=Ho/NP+lNV@({we$]q~{QG5=+FlU"/.O>Üw%E2~G~7燚;Q|gOnxᇟ ßuAT}l'L4_}x9kRh=PkkO˂Z/Y履y֎2eOve|<کvѥW kǛaڋ2]6ov7<.[ f"}0#s)e=F"Ա \b;(*?[Yy|c)c9t?vC_͟xQ6*xgvRBSÁ;un)9g :lbGt8 <ݏ=wMX#\IkUޒ?NM~Pѿ ||0C~[7Yw']~j8vֻz>|Ljs Gݴ~} /oE35?w?ɝ>:E!+6=ZQT@_NAG  R[( sF+jG|͵;:D=UcJ,ZqBwic}M{N;)D@ c2FM`mMp;#n~ym huyoͧQ[^1((hZK.2-mI]|TP޾#z}s'O? {ufmj\0QϾK:v }c>Ϛo<7O7f^ۥS*^ئ1-vp }01;LSϦ >^t6̐aW}Ҩ h_޴ְ='L;(KzO:wn{6bS!;N#u=QF*;QҦDMGYe/R.s޹zW>b{Y۶' }nߋTܕThEZ@L$^M}>h޷.ȿhS1æ3ձv([1nwsn3GQޝw=f}ώ W76[ǥhۤߦ'(pL=TPmm{OOtp 0g`Xw :hopЮݨdu.PjK};jI~ ,З 6/MH [oam9{Q-Mq:ſQtqO)+˨=A;KGz5zTKuRQlN-?A;sN6<ێ|g_{~Uޱڻx? ~crjTUﶾl`uBE~}gC]lJb#T`ot4􅠭U&ͧ|-m:\# 35jN|MAOkSg͇z1T~UۿUK~3ױƻGt/z {>vA;1A"E>Fv](/m@P#b$I~*Nxԫ?N-pE  pRًJe?sjĔFܩyn;b;׮vycXMEco=v*ͩԋ*wGq}&7Ƽ5p'ܿ}AJI[ (]`'6'aw= {T@N#Td}QP;}6euAȗ^y-4>nEhҸ5.ǟ揼ۋ(z/-x#3nKחϙmN3i{2txeU݇ ~ʽy7(?8Tb(&xo5tSlʑFrhM_TJǔ>}RAE]J*ej{1WIGj)VvT6.ZR=\poWAWG|~O=zO:o.=z=Rn]7Q- Ω-5>-0]7.kY]%rdx7y0U$_FyytGGbX \HqvP_n[VGjTwֵP*كXT:ڐtP.^$،OxG#2ԾՈ1oﶮ6+Sz^Vq{6wY@޶y~&}u}ݺ߱GZ(mWvѵQSI~c6ٻ hu^ ʄyzB#b]W0ˏ R.v+Kdg/a{tM]-_>ruOm2=WϨ:*eCws>ժW -LI+0̰nEPܩݳ7T{mvj۹rQ~f;S\?{JTɬN*:8Lx4:E i}9Uxdk\d]5][p|6K5՞VZުO? =as︎{˃McIen^ۉsIc~Smc;:bSHoS<2g_m8{k(S>Oh;I?ƿ_e.:wt9\4o;}[ru-Nj߳b"..EMbRp5r{p7yjwY)կ \uIs3dU#*M=k.F֫cd }>wwS{͜htŏӅ!\-ǥ]vGެ bB۶(ejwfSe{}Zf}TC|]8Ƌ~^Vo{;WӖ h9Ж]Ɨ6Y@hAv4?ڹ9u r/>H{~ݺnT~CHmO;7 iqܩ/:wG{ڎG=t#hlΦe8hkiGi+uΫsR۠2ٚ ay4'ަڶ-\(CGt~'wfHN" @fr2Egp~d*ˤ^ZwVs:uꘃ(4ŚMiP*Jp9Iwם_}lsZൟ߻1ۥE`93ͷ1{2NG/~lCyүҟ~%V^wGCfxk tr=]ێg;yby[z۔M[S02mD.SYlx+OӦ:,ijF:RKTPRmI(2AN F?H -3Ȏ~%r|M7w܌6XnRFi)eeIsz RnåJ>U#F: ?_\go矛mGܥRk*,jц&|8v(>ÚӗARJ;Լ +-c(ERdO#Z6 FݦuԠ J8ڶ\i}Gj9]%څzp݈!'t^)uԁe^ ܉J.}eB:mePyyJ(y[xqxF綞Qv լQ#Zp?FiSw=պێ! @|Eŷ^_J#q{p<|X7-xqn2[?e.ԖS#,mϝWl.Jڱ6їyC9NЏD>-m/ton߂JᇟźyMC#_W݌>*ڗ4!`4Gٹ֏r[k^O?Lrz|*= eJ̝[Ʀ+lr騢z"o{|Iߎnӹv㹯7zC6pj)cJ~d*(=R^gee|Q/ݮ.K~hԱRܨWy%[~Q=ȕNPvLBUd>%Se}| ?ڴዜ-ďܖFi.N_4JKcކGg9juڦF * P*ugJ %-voS[kxY]c}xk\xNg{.\y%K/%t41 TÝbS 'Tx8Á櫌U]>e4GcG+5uZFi5וQ1bb+M'[(h+K5Jysr_ۦ1O/5Y d+( FEC\u_|5r8j+(/͚t8ے~;>$PoujsI66r77q^xl|eSk@2&Piwh.KWFV$pU~"+DyBLF~bO`wewαi9v7iR0JbQ(Ń]9U xJOtdJsNt7,N1_Z4ryJS)FfT-s}k ]uA8BLFҦQsG@bwe/=5ラ?wOS;9Cgj|(Ѕhw4|*\#X2db4L86pJYbSȎ(lܰ Y=ƋOy|M={ٹo=J:Ni\n/Kitٴ;4ΕGNkpߦz=2 7Up %z!:= _p 9|?|>^?V[^7 93*:6+F|ܚSGN?ɜpQQO~hSɽ>_=,sA4nT/>LeNM=nS{zuj$migK^9cK: h⿟q[ѥM_vӟ2 tߛvYq"A;`vjk˴j(mʯ`KQ3f,v3^'Rs ǹ6tڞx Jډ}ш/:?,r԰#?jL>&/Oph fhtG{sSڴ RO*M1Gi6ol`RJRtn}Eꛎ'׫lo;?IfMǑw^pF.AXҨC1/9unN>f6R)su#~C!z.Z۝a:+?::ejjFS-bWF)2}p~sa*UoEov6JM$cv8Iz}q복f;A+`uPIn߮:~CؔۄWz?q*)en<'5gk`|C=7vWM-MN s<394:/t/oybc{~:}o/[Q!\Sm;u*g/ /sufVdJJ2b@Ȝ@ ܉X;e'"vrsmn# c=O}y)2Ld(Di .݅Ȁg̏.Eۆ6KIMk_Էs) iO>`IytN#MRw+Ez? RFoެkWUڡHz>_ITG?hLzho5{1PAl(KlJZD@IDAT3MnQA{MɖmtpҦbЪ<6p@iiKtnZw|ӏ4{N(u}oHxoiob6S\ֶY4o1{1ҿGq#J~dmm*h#;X0Mw߸WڵvU:ɡOûmĿ;w1o>V:ўKWm&6t{\* >6B;  P;,cM{q׾n}Ix;lfn]ļ}a+s;RLmosd'L9 Sxw=^mN?ŭ2L{Qǧ|MUX80gw'[8N\&c\h'GS ڟcG[/}/m_EJ ϣPv[ڋ~u5O3n]#)Ǚ03AX}n-6OgZ2ܘt/HȠ$_nuw 6pS\<_@_^n񽖫ڋ=~}_TV}jE!+cL$U68[>(E9Ӧg'{w[='~n(~Ӆ_J/6ٺ/N4JLvZw>(~q6ķ J%ﺹ@tl>ߵ N:1]UFm.Z+JS^vmQp V*.]_<ȢWt͞Sj˂ u5cRߓQWhXiAd܋hf6wۿqi\T|2?oĿy~sߋXF8%Mev'VMr|PE-5} .@.a"ANdyKx:Y};o~z]Ӛ:'"?u=qR=mأ 6u}k|bmGB(xy͵E^zUsǚC<(hybS~6>d)vn;Lu?f Z6u}=e nk$?Ŕ<>%;ϟںsbgʋ#_6?]iLS2VyX-ymRyQ@oy(O>(/GrkxdK<sbH}#m/_z ]/6M6;S" @*u0~j4RRzv_EnKcԎӼ)/1޸$#-tɶW{#5m={|Ԝzq6u6F)3zoLCʥ^dNiRMkv|geS153͉itL6݌0֡#Tn|ӵmn۔ߧkomގӅO>`t֗N:u'5jr4>s3{KIFnN/ڵZI}?R9H;j'otն1/3h 9PܚqGs|J<ئ /$_$Tv+.I'\ WsGF}Ѧѡj舑&9h?Qn8;7,wGI%*ivs{zJYN6杠mP&ZzvtW>s1Up͗CloH~g:'Q>ec_͵iJ%3y_U@'T&;͚nbuhےFŽ6B[9sk=]=c9Tk;S7ƾtbpse?<)jS@[ԥQRiU7ȀAأwϧ;--CN@(3s#8 _g~w.?﷟;ojFVo[}txi"/SO{vpxI؀^¯A:ӈB_k/q+ȅd[\6S5&_fm .PiG1E6XlNǢc.FE߂R*6tU E9vhp5W]bntaVgGC/t`\D|Ů ]pvf^d-P$YTwVjXS:̝\_(PjtV[~jDHG\ۈUTmguvޡ}x]gs±GDo~Cذ҇a-BS`+\fs} ~#-tv(%V5}n1ꣶ'.uW0cOju~YyTʬY;T FuKsZz"qz/.;*`NuM,q6XJIu@@ u*6gxewpV{ԑh8?OKsĝ.5Lin;םok4z"DkGy̲,Yb/`u#GD[sԐؤq"'Qf=;ѬiӨxCs'h^z6ݢ$4ںzn.iZ'yO,\hl?")MhɲBA: iyhCbVUolʲٚs~gNsS;0o|^n6inuњ&._¬3 L`U^iG״9L{8VlxN7_OwySQʺ.Izy0HK+hmG31Imr䈅Xi;ύtS;9ac.g|4מ'i~b}t19xnZ׌Fsoj)^0~5(=R{0B׾U7]{7{n }lpfOsMotm=gKM]iO#Tn$qcAJ'%K|HSh$r޶r*;_|يR?1ֿ@5 1]z( Owi|"w~Ӛnܻo"@m067/Gaf5|2k̊ˊJTPlnַ^w͍XRѼ_̫oAwmS$h\Z`I `L6ɾ?7%e"}|,QQ4WUY"~ݚec @v(^SNpi-KZZ??ϦZjG}i~:LγGi{Hlm7"cڛx S[{YO:˶>>i?m֟Φ8nf,s}ݏ~q|~E7pEq Y]}ԉ j @QV b`~䷖5Uw*@WC}Zz%f_'\i JpK;.\u>xhN.,Hlm7"cڛtsV,j([@)O:(wܦ'O/Zǎ@eXӥ[e[i֬*6pX!p8q@iWCq\4ڍd&Y9C(P7pe$q U]e}9n*gk0M jjjjzij6f4+Gv?JYZUSj @&mh2S v#٣IV@ɶߴÅy @e pWqJ AJFsTګ &rdXv#Hw  @nM@@@@@@_]G@@@@@@o2      =      @]%x9D@@@@@@ p5D@@@@@*!"     d!      @% pW d@@@@@@ e{D @@@@@@*J&s      /@.#j      P U7CD@@@@@~j_Ej PM466@@@@@`]p      @:%d[       #7c @@@@@@.@.l@@@@@@%n      ]]I        K܌5@@@@@@HA@@@@@@ pk       vwi'e      $.@.q3@@@@@@@ N@@@@@@H\]f      @ܥ "      X@@@@@@ K;)D@@@@@@ qw      i pvR6      @7c @@@@@@.@.l@@@@@@%n      ]]I        K܌5@@@@@@HA@@@@@@ pk       vwi'e      $.@.q3@@@@@@@ N@@@@@@H\]f      @ܥ "      X@@@@@@ TKx+W4yyy&Uի]k?*UUjڿjժիQäj      P VGaKs˖/7++\uS)DڵjZk:O=       Tݚ5k̒%K̒K  5ks Uq#\24/x6PyvԈ\exu[ԭ[V֒,\,Y¬3HƊ2Ǎ     @Иjv@V5L:̆gj֨Paw -Z,(em;" )WH:^I~[+m oͧf ae) ͞_$!+*@@@@@Ȅ@q5^iT… e7B#4]u%}ӼyTթc6pCx`1{n0n 뮋Vd=P96@@@@@HL@)k3**k !fꭟFKyr( `4N*5\Y毶!ӏS~z~^f|s-0;W *9#Efv     14jk׬5xl\4jP/Z\؀FJ9VvoxklJi;NSeM=(E#ϚiVjvnr     T pU1 _3gsGYyWwyvyc v7.^ZJٰASM#YnTڭ\UG@@@@@$PZ xgCNfeŜw&pȎ^;7W5MƬ^[.>CU4yY2 64QfD+=&Q{I@@@@@ 3gKYf|Ӎ˜Z ̳.ZF՞ɳ9HUsٹY^? Wу V4NA;i@@@@@@ (ơX G|]V4_ڜ9seL5;]K̊=1odV6w5r`,pRWCS*еz      M@q*v;<S Qzs9S;Zfg,Y|[vu ::Fky)KpUprTK=@@@@@@ su(c {=eeVXajj~9۬h3:MXw+rz5 DS@@@@@@r+Xb*>Ru; K5{5VV5'2*Kt[Ǥc1Xu:l/Vίb*^U     ,.@J^ԖȺ#V5ˬ^7c1w:l/>XnI@@@@@@|@ʲFY7o^0ݚoA;+xcs KNNN@@@@@^ʾ&dMnfEZj_L w@1e l. g<(6~ )4A cÂ]# **{ޱ+*RCH3lMi7s]wwvvv濛ew9gHHHHHHHHHj@DwaqKpmd W{ JCv0 0 4!m(D.|mOTfe& @%]FFKR\l}]| ):0 @ 5ʦ͛Mx|2:* jR/MC_JMB{2$?-ʰiѢEpY5            Рr4)^-щRZ\*A+)b6CdP.:>F$&x.*:Zs $K0j߮]CԞ$           G &!O>VU n}F/,z˅k2ܢ]bB oe]tِNvbnj9_Z?ͪUE{޽zJyNNwYrl޼Yz!mWv^vI#$Kejp϶® \ZVަu+}Iԑ^um[-|9͡/>%'@U#?+?/O=g9-Z$䫮C98n@}3ȰNBǞ|Έvh ϨjmiLY TC  7"y_ρj0q7 4k3Hϧ\[A6gdJWݶt֭Ft@|ng`&I?d^s7^v9y'*d&:vmB<w؏| T`!2/z#mӭWLkMCFg #4s4i`9s!0,-E浃݌;v>G~)˖ =^pk%,3M\u앭ʴ~`އJO[*NvgmN}riC>qeeŶC.#EDIR9~mڴz\ԛОa3ýJMTŇaJ PqzRAMŻ`M^E )ڔSV@Uw>uq׶D%KW.-.YA Հ&Yb;:*f(˺-Ά:~3oF}a;$3nɃGF>¤aϛo#M/'!tN5~x!8eC׭kgyM\ΫːɹBB<$Bjj 4iXSkt{G۾__N6l$"p<\r3}B'{ޑj*r}7i™r$g6$ࡢ b4,/.~+޲U.lĮ`j(*1Vwv* Wo 7pWbR$a6LLK $RwAzvhbۧva[AU$L]sg!Vb.ŋ=2{EK>aؿ?t2YpYֈZ}a `?4ı#֩4tk,Yg_O +2=]>oޯҥY~N{/^"tY RC0>} \Rywo?KM@w}ܧ/<{G1ZZy,/frܱc| v׬6KV-U,tLsk֮7_tiZ:`ll`=? !h˼QE10ra܆pv- !^V333KVZ7=vfq 0TJ5 8ǭkI㹧j۷T[LKxim(&2~)̇gq e;#ߋAv0(,*4LVE+Vd;i/{?}kV+I^mCP {2[${?ʯ* Dum t<k׭WBܩkưt}{~m۶ ?oпg۵kR}ùBhJi$@$@$@$@$@"0)ѷ"WkGh?]N9ir0g؎ԤAH2| Omz~h~_Kvu> a1Xt[4t!!D&'жnR3g#AO_5<`aVϖu[x.;kj({`Aឬɽy7h?#4kYO;[]BC6l&DBAan< @ wa p/H8i~77bWVL8GҪsa_B}Y>;kQ!v :js}e?nj\?Z9\=:bmr|2]mnʭe"UZ>WV(k_xsoeu/@J+ڿ+rۜcW05K}5$﷝hgH9%B=mvSlWƟs=%+9|ɏ?um{GʠTڇzfU!9Ncx؉_euߣ:f%v9vQb|>y}΃rkpQ1v˷ s3_Q_,G:LN>aL)sw|oe!Ϙ us[w>/S?J>>N?~#o}^mȠri':M6{w<}3/h2i:U` !:/hw6:y qo9Cew`۸Wx}/}AߓƎ D_G1ݷQ[n*0HHHHH \I`XopG;w; |qT8zݏod|C0w=SnTf6<ǻ Qxן;eu,=3̮9NÁ4 `QW>ko![cpl?:(ZCY${Ifė^}Ӭ?JF@BXCnŸ s]ЎW^G~/!aP@TtQ{s|wfI soЏ}=mK܃ق{>;i.9U-Q7'S 1F Pob`аh,J|[3sڕH U,*+) j 'Y=!V]q5wy9&Ztd筸X Vձ_ȧ}슫O> ;nq̝~> Vgӯ\C^tw_혣RxS,wr3]۶SzkBܾy1+yyǼ e^FF@drz>kZ"q>TF }Wvq;PMS#VgˆK&\cu6ͷYF]38B1 2=q]rɞ^zS.V)ϐ<Ë8leL{v1x Ԫ D&{/p7N!kz5 aEr EKh'PZmq@+ݻ{DuyN06^r Vsn^Y= "#]x9/|H[M?LޔM$@$@$@$@$K`ֲjIe}C{߂em̑:Ms/]t2Isn76h.[n~tȁnvDGfp6 FSGguDwxӴ>/-=~Ǚuvs/K\e`뜃+Y:_kHS+xb/V~JV{PڗݓUd.xA(Cq @6d&9#7az Po]IpgzÚoeSaY gpvUigzkk:QZFIawvl/{^a-[Ȅ.!7j=f}l[o ϟ/uEea-n9g~o8!.o36x{>U:^=?/.4r&~|}Nw>uGl}0AGV[^~,p7@zĽue #Ph7tb_%fAbB a`5rdD/G|amPPPߙ /RC#P7/뾇}=66O=Ͽa|C/VèQ{ 7 5ielӭkEթLrw:o_D"?W ~w\ )v'j_`"@XQ^w<͎?V3|Ch料{iRρmyCۙ~0s4jg{\(KLryg~X>=#!={h']? r'?|Ϣ }:ᚽ=n95kBX˺2sB8;Xn#"P>`̥GC6:7]m=vmԥʸKd̘T׎\ Y9g/tCmSe&m)?uQO`<qr֬'X1bWMjV}qu}[́p"xV ndEluRC~ \YT .ptk{khYkPQs4:"ʛ>h@emA9x" a40O f HN5O>Zr~r!o򱧞ɗ_蜢6?0JS~U     %PT\b8ƄYp~7QRp,~nrȠv) wu'T"K&q V t5DmC֥sʤ tJ vO߀8C=hWx"pvjD᱉32M?@nvEe\)1N=Xzcw՗ 4uvރ pl,0>4W9RNBIܓHԙU{2$W2l.:2û'TƗ]4l9مMY @U Fل<]&#;r ˟%8‰g; Wтk7l^V b DhbGw mto *{OS+Xe|.랜d71G#l9;W"E.=XGVZat329{Գ3Cz>}Cs /nL ~+xq!$`5LĔ`=g0aAziDXk}mW}quQ %^0%Wlzf:j<3k''(;R[m#+eNxVYo^=<7JmڜXlaJ; 4wSsƝ):tq{ 8Ӹ?@T';>2Ć}K^F-=5[NX!Y!B5YU[(3|xڰ͜ 䁋 )`J½w$E: š^^uo">QG7zbqϫN61>lݱ哯w'~k!tH$@$@$@$@@˳ :w*]#x/n^ m2)~c; !؈5!5{p-]^+]}=iμ_ͮ5*Nm޽"~{Knz۟l''U Nv j>G=+kWZMq׽{| :jStw[m[Cot]ƪ.FZz}^={$/Zl#L0'q'N\G@֯w+9P]yA;tRU9SO>шif33>gŗ#{<̓b]T~y|T_z0E{: uO>gߊ;'jz?gfnqJ[B: yYZZWOZ1ՋuQ&W2^=>XkA EE yߣp$ &(Yjy`cKwUֽ[Z}uӝSAtXoeVÜz =}^u/n v"d)Ba.S y~sxN?\m"#~ھvfn9EX8me,w=zt!>[zts}nryZ"*7U6M=e\!ե7瀻M8w Q.HHHH?c}#=:2ݞHeu [SwEEG'B5,w9\TXoZvm+؜D]qkǷ0Y= > 臰}7k|WSXv}-mbi7.ZEb|bPh `YwJwsW2/x'^2ZkGfyZkVWXU##LO'D\w|N Asw1c+/+lu*;0P>9&&k zaSv$yqh+!U.s]!xx6CV@C0e7 , ufHm8vMa'za`{o]eéi "`w1GVy:J4&atjߒ׳a`yJ*#U+ouVP.%CWLjkt-ܶAZg:vSO>);w/sˬY_;/ny2;}g;Cwog۽y/T&@%>:~i^HF:{˖ix}251!wm2lv`@]tߖÜ3~y)n}r_V!8o_mȦnFHx7@!Æ9z$G߇TڬrmLxֆﺳ]\&&&8+^`$̫הo(k_)G|xYs= [݇z< Hp֩_K~H葵ʤTY׭k*Qj/3M$Q $ 46 q $Uk4j;,s+7{ gk*n]*[/E'|gMuwTi{l.!m0{ЪHj%Aɺ6էp玸>6c҄q! RM*0*X9_Wt/\4l!c{2:CX&K^#M֭Kc&?-Yjmۧ칌sv3Ͽ9sky\\8ɓ%w>Ye!85]q{{U 3Xs g}#/Lng7劀տK*Xg%{>|ݏaCـe d5vog{dĮÜ~'W=hZ.ާ83'C;cU΍ȃlkgz Po.`&\f`[뭴,>#ީ]vZKxLڮzUyz;-;;[zlsQ::G=|YrGظoEZv[Nrwɤ /6SNg\Wk';df>}-[}F[Ϟ0QH ۑp8lmnʗ\|{;rWbcew-Oч]^B0Lo\0"#Skl#FAke/Nx)kn\9F{%_՝/u+X,gdfV*f݇;i3_ e+3u86+P]_g Tʊ?̞PA8 ;*<*F gG|x:FZ>pK8z깙k=x\x5weh{>'4u~`Te{'x) K zWup>Y8s <&o}sF]}jWUiEhq @S ]ll QajbGqS<[/xtu <抋o5:LfsaaE=/wс= ?Me(`<|~ٗ_;F'E\S3`.TsĠi_t(_d4Lth;p>sg.qUR +"ɺ6fͫb&nվ/UU4ٰXr i\6O*Ɵ-**XMr̲6:a3{&~u3v]ԣ@v-};/3Y%zkݪv)z>P`6zcUd$87xЎv:0t1"s|X5Oec:r~Y#*:}rء;o9P6lc+?QCxϋm#2<[ LSf2h*dC9ƬuN8(RyF ~S~;#u^2l}y'5So6^Į&Ι:?, .vyskc n.HdaC)𓿫2*v6T\sp ч;S=[vyAx 1j@Gw kh>FfΝΝ:͛eL\CAZ*`mxB}~ʹf"CxڭX*`qh ChKLef:!wuk8o;oxKbdbΰ?,ԏwv>YjmzezC=ѿw^^{hmzh7su{4%LЏz{MTe<ދ G1_2 ec sD=BkzU_BSx5)M6AL$@$@$@$@$ w-ߐ[#P>d׷އc0W60w') w?.rP6͌o}U;l$rN;Flw({(>6|[CC_V߽o?n.<a^N;G}6b>u{0܋aVDsik(Wx}-H:vrBMqlC7Jlm~G%%%ŝa"#}s̢ŋ?;w9=wyP.J"h7cK.+ʼK [DoW_y1nc /8<ÍNGM'A7::6^d1l[ ;8a}n9NqBkaDFpIQ/ xwg2m.pLQ!.1GjFkqa?wx 'THBNb, ;1SϛdnVx𢌗V$%D+w ?.l:Y= qpm_҉ _\ jՇ~˯X!pI bpO"')hzyyM8봠 ha&sC t"43O9Mm>zl4GM`W-"f    hBS[.+;GZq5jsղv٢ڵm#ޒ-Y}@ٹfno\ c:L dC42L+b0te}GS܍0 mô-ufF|~!|"ꍍ2TU0s&bq>&Ǽ5!!f|ɋ0]kuo߮DUx۴yq/Y90& ه:vx4pHC6:a-Y;b) .\h UL.m=v)ם@ٙY[l[RϠ9ZȁN`wףcB/Njbw~7pW uTV!kـ{1/?_;e"gvmL|P 3†Bx%*aE1w kw<լ?tdj[p1(>}`0Q۶mm ̒ / {_uoJρ$@$@$@$@$~ʵfcqf{} dn1*欛-qn/Ӿ.r2D; wF.-]i*0_HD(hB?pޘ% iwpvβ>c;W'Xkۦ Mp n8XzTWdjgRP?g wWzWV:VW7=g\p7vz0+{&A -?͞'3v5U.7zwZ\pUYM;U[W%~XMٔյIHHHH7}FDx|J*|XC a9^ޫoE˹w栻uο5 81D¹h$&P]rTw=^ 0Ydݎx`λZ=YxJ9l}4& @s%69D0{꜂F1]0uy.w&t3RP/+%     H 0Bf^Ity\sEBYA1Bdv0@ n4U 鷙9f﵍ip`clURzk0(j:+.hhG6nY]0I5HH=>.W[&x\aU>dy豧8?a`9M$@$@$@$@$@$PC۴ԹƋ%[Uk6HyWC<ܗ< ixp5wdƝunsc{Zh7¥zpf@(7 v izM;O#+=d 9Se=FV: »Ykhv\ @CHKMN?Il߯!N_+iȎr-Sɛz<̏y hPLd۴M.J]`E#         H$ᥭ9!suk**.1SEbY!0 ̄8INL`HAϳ4R &%&&JJJ Yeյzd=JUH+-,d W.ڣG(˖?׉?4XB\si)paV4         tIe"~$@N;piղ24m+)Q*hH xg<ۂ } >bR^PShcKi~Ejv-%oBxml# @'Р]ƶnJ6mެy4R8 w=g</X/8=t/KCCxL EH HHHHHHHHHHHH> '^;-գ sU*) - eժJ 4 .s6m AVZIYHHHHHHHHHHHH .>>^ڶmkCb(ڌ#D&X HHHHHHHHHHHHRS%=-M$'7Ywf F$@$@$@$@$@$@$@$@$@$@$@$мFRshL+EGGKl%Ok,`@#           h~"΢o߾$`#[6i;vh#ڊ64            I ;Y;t6hHkjf=Fmnlj׍!           "q0:vt<ڜw&i4            K b"+a7db紣hd.+B$@$@$@$@$@$@$@$@$@$@$@5"ZϴB2=-MdRRRR`DЌ(a/Л8 6/-7m@[&1-.IHHHHHHHHHHHym oӦˆ dsF$'%_ilwM")-- )QzLl$dIKmZJ"'h,a?X۶m%-5ùHHHHHHHHHHHHhĄٰqZbRZ«A7\(^w7UĤ'ZDuQeVEx!i$@$@$@$@$@$@$@$@$@$@$@$@$&h;TWN$CrsJjJjY B,-ᢢU FSDHńnJZliKHHHHHHHHHHHH|4*X-Ll')tL[^PKZzscbA MƔiEh$@$@$@$@$@$@$@$@$@$@$@$@$@a۵3!43.'7y,^;] 8C+P]IIYɒ.I#            4Z6 ~[n-[dn{III&&ĵlYxᤩ:K-Z)7m#    @IDAT        z';K BY;ixeH~~<'/VŋuP΍/aCdL+VO" Hhϋ%C鄧:C+..\^wO:t(a71o[n#Fj0e縗^}w+G޻ܴ}>29MKeB9grQtȩr ld{kn?g;ؕw*ij92hߎ`r_UsKf~Ingx^e~          st3sMmE>X@>,V ]rP{+4=otcb^:{Du|{p&۴sw ,%0ޥ0HHHHHHHHHHj@ǎSB:kN!!2LWSh+z=oLЗ𸃵M5<Qn>^.?zo3'p^̼bj'XȴW`W9{&Y 4$ w InŒWe$ec󥸨)))Ibb#*t%''Wjrz^Z Sǎҭk-bNJ233e%>>^z%))-\{[-Oo`J+͊vMN$ΪR^vK8=$G$@$@$@$@$@$@$@$@$@$@F]"{|$\~S=$#缻+Ͽ4'\8R{9 &v}7^嬯去 3\ 9c|3_zE{Yb;Yu*]scOzu\'.5~uü5*BD~7_w2&Ŭ: Pֆ=vVDOY'|bf+KO޽tH~xo*2Mا\9-37*Pg^           ^cI JnnY8@C>P>p_ //yISNC˸Oq݄I93_/G9.!rsLڅ* kg>wˀtmo3n6_Ϊ2ܦS\a s̮.:M{?(^/ qQ9uTn}S{{AHOk#WV;s_/_.1A30?          !@.b.+6ɠGTycS洫2cΩS4d? ʞ~m^{)ӜCgtㅇ_Kwvw<]})#-_\zav[W%01P/\۰r۹GO1?gl^|Iry۝:mw!2>BLmеUpvuVI$@$@$@$@$@$@$@$@$@$@(*Cy 4%%sScx>c𣏗~l0龍gYZu0Zi9xЎN imvUN9lyw$k'+-Pΐ7ozM|nAAe^%o?X=خQam23gx}4          4+DErEWyv}rΑO<ބ) 8jr1o֭w|zζ{|~O9&^y Y_wUSLp;CC^^Z&!dJbV%Dۥ>$ {R+?z>䉍ێhk׼)f!DD @{bH X)r =|g;{б%QQ6n̰Y-pm^S&P!]3OȽ<$/ ^~anm{o*n='^7/uR.Xhr@6uۦ?{fEͅHHY* ޻{{"+X6@ Jelܙw<$̛LIC"5iյ]8?g oa^9+v\Q^ڮ}ׄ @.2N`ԗ<` ̙?ĿW+mH6Gp[tǯgŗ ĒZƮ?.V~9a%C5ڎ{ۙdxN$@$@$@$@$@$@$@$@$@$maz+7+ӔrLd٭yXR3Tɜ9To/Ť)ŧPQ: *ŧTqJ-R0x,V&`"u~\.^܇j9?ȈgsKŝw?-7f7% D *%'!pIѧ ܹsUf-۪e+-X$+_N,QJoE-[OW~'.\Wݐ-_Vc3f2e4bE#*?wӏ\ץ}TuRܭz&y[OSg/r+m; vf-i#˘޿'^=E+G{vkxZ97 $7*;x#0pp6x>zÇkQi޻jq5W>pȑb[oKEf=bab7IcEJӧψbUnoA]Byg>q2a ׼=gYy(8sI)pX\odAz8%*lͻ:co~Eu+nA%Cam8          %)Tw'd#nUd-{y6+w(F`14=c{Sמbk;kX+jkl)n)X3O[AYغ կ{?ѤE[+n(O(e2M)@={q[{58]q oZqo;7Jt'         &`ZѣDATjѿOOthF-eYl*QsgrIM[o<"?`U7o .]«|V/w 鴇m\kQT.|w0~<wWe:Țe`nd{%1gs`vgcT\ *̋  D-"Q$}sXiI6A6b8K}zvZ{aL-(.]J ^E`ۊ׭7I;c\\AyFjMG1;{~_+j[ P4`鞱Og $0.pxW74q5sp'!          'pRcJ%{U#.>B2{ȑ]Ȟ=b߻,Y24*XHHHHHHHHHHH PѢH07 4G$u^+`;G!          -s|Z            &TESn0-$@$@$@$@$@$@$@$@$@$@$@$@1Kz>8 @4.ri!           YTlIHHHHHHHHHHHwєL @.fN$@$@$@$@$@$@$@$@$@$@$@$M`ZHHHHHHHHHHHHbw1|p            h"@]4B$@$@$@$@$@$@$@$@$@$@$@$٬烓 D*)7            %@]f=HHHHHHHHHHHH PqM ,*b6$@$@$@$@$@$@$@$@$@$@$@$@Dh HHHHHHHHHHHH f PqY'           &TESn0-$@$@$@$@$@$@$@$@$@$@$@$@1Kz>8 @4.ri!           YTlIHHHHHHHHHHHwєL @.fN$@$@$@$@$@$@$@$@$@$@$@$M`ZHHHHHHHHHHHHbw1|p            h"@]4B$@$@$@$@$@$@$@$@$@$@$@$٬烓 D*)7            %@]f=HHHHHHHHHHHH PqM ,*b6$@$@$@$@$@$@$@$@$@$@$@$@Dh HHHHHHHHHHHH f PqY'           &TESn0-$@$@$@$@$@$@$@$@$@$@$@$@1Kz>8 @4.ri!           YTlIHHHHHHHHHHHwєL @.fN$@$@$@$@$@$@$@$@$@$@$@$M`ZHHHHHHHHHHHHbw1|p            h"@]4B$@$@$@$@$@$@$@$@$@$@$@$٬烓 D*)7            %@]f=HHHHHHHHHHHH PqM ,*b6$@$@$@$@$@$@$@$@$@$@$@$@Dh HHHHHHHHHHHH f PqY'           &TESn0-$@$@$@$@$@$@$@$@$@$@$@$@1Kz>x$u8}L$b&G8VFG$`'޵M$@A8z <ܹҥ.\ -5`;^.SZ˕ӳ-\'uOi0 bp'+$@$@$Pq򋩕Ξ=91{|Ql ǺJ=",+6nڜ$ysE(qGJO UXoJEjr& PޝH #7&_%|M̙@|Cr` ퟊+W.Vq $w0TvQj^feAbOоڈ;v&ɛ/6*:mnnjb*\ T^SWמ}\8]:b([([RȓnBSz-?[t[<\HIqǽEA%wϞDLŤ)ӓ垩_gq,[R|4+_ϜZ3z;r<R>amFA$@$@ .0H*UPeıjUSO?SC!,^ բBG߉u2pr课jQdqQx1  n/+">dk=ٮ?V?J;v_vQ_j]3թ%}9*y@ Wqm>N1`0+LqonjsuBs#F5cȡM3"]9wޅu$ 쫍8qUϐ> S0x;ԛ)I._dɿRRc2UQ>.ZFѬIÀk[Mc Яd{'mֵ3gډ4iRΠaϜ mp _Os) ގDFrh)06#Z   HH_s8R p-i"i pJΓ9oP*4Ɖkճ4pb7ri6ĉfQn*}p(W0*ҌJ՞*?)z Mqꃘn#GDMvk5?kn*7nᨴC&ji >ڶ5 ~/(*Wy~)PD!tʀ¤VO(6;~b@[n~H i{O&%sLdbVŊ>h%.Փg e+v{Ve/+ .¤--+KJ;<@;)\1\O!@mFjE> Ow3d IH׋=:q\* ٭]w6vm[r)UiD%`6 Xq`~؁aVy]Nlg8p w+֮۠ܿ;QaSl|q tA׮CkV8 #nطo;~ؼ#U|Nlz-kSvRncI˸Rxe7oNlȑ='P 5rk޴x|9qk[7}-tC|]+׍@N,א1nsQK^{5Ⓩ6_~%JH+L2~H"H{[%ZTsfNH oіvmc뫯`fLjK/*6]->,W_ eV8a;<W Uj+3rmFjE> O2$$E@{ mڼRܵhXM6QSvUWbEoVx&Dy_bJ]>@GpwF :]:Y ی)' D7*;d }v+g},횂KrߝgϩYdvED|y], ]{>d7Jt}MZKPI,XR(Nj3d3cK^ҥrz{̙y`9rEWW,Yأ~>?gEBvtu ِ̙I`=2d JaeN *VfyC6kXRv:uqnG<w}[J%(!>*Iuu s׿ӧO'2ft(C9رUyb]3#x͖5ۗ[>J#|\Y&PNniO,!]K.WaqD=6aƜiux32uZ{8x59S|=׹sſ(xb7e^,s08GQޱ? މysl/%GJ=7YCP,Q+9tj3 Zm^+3fpm|KB{V̙C }@S>x6̺(}Or۾}Ly\{[~ϟ?/.^׫?=_8$W~D[mnW2gdqwb`[ AթSU9+ Nu17~/.wY/ZīC# i>3ct_FX!6x_ճ?w7՞vHӍ uTarl'r ']6|se-_CQn.;|O/U@~_2mFp}6 k]E[~?OoʟWd͚յf 2Md~K!  X&>ZT1I`bj0R _XuzwD}ygƞwpՀKO XAɖʏ?&漲@EVqE|J֏>=iĘj<{קeF1C\\=J+VSJEP|[dm~N8VTq0hXZݮϔecYki\kѢYk-[s;k/},Qr]9Nl7{ X%#t܋Le*%}*XKa` e*>xՂkS0czn 8E2U} u?9g4uuꔘ<%e:bz\˘ 'OWk+^[-^<\\xH'C p˩݃ފ?TN5+eWƾNZ՞1(eP]`-bmൾcG*W.2a;`ޅs璓 J4ZCVD~N}Zڴj.ztujollD~n.cbɄISg %}{Ub6rGHΘy %sy%G΢L:}Z/3EKB{IE._Z]~d?ϱ<9[[SɪW.NEOgߋ vo:sPLl2}GF?%TCd,?~xoJԯ[[;y1x?vdd k(;}{u{pǐᣬ[oow)Ғ{يU^ϜT~/ѽzt7Uq;1xs[r=q7T(tUjצjyF?]ZE$f] = #Z̥yxY ԹhQe -tļs-/͚#?pVN|k|G#De?8[[Hة\ \Er1J4_Gt)kԏU̴#UF1+|-T>E !mF}{{0AϥũE^ew(F7NoܼooX}tQih7}r i)|08?ΌvPf5Z p}̽-#:N,Eܶ}'<סl# 3޵N^G{ѶjQ]x_dl-ѻPv8DLz͍ԽC-&Ǘf;0xH1aTNt?ՐuF} 73yn3 G9 vmS0qT*(xMjNHf" v+mݏom ;viaJnE;qh*hQVm7U>d:hOT_e붏UɮCnڢwMqHyjg OQU~W:)p(jktJi?f1#7(_:^fNu,ښJO<㷞t?IHH ]JA?"LڴT`pC4k+*s  Mꈬ_'KTKb n3}r9-Xn"ٻW+XATM0x=kgzR.>'׽`J/>1Cˊ>Bv_REo7X}$`.x2tQ^Ffc 7n~\*Q\^D8+j fZ:H? ;, 'M~?fzk.u"? 4h){VKе %O<117~gY%{OZN`9 L@I=  ċze /?h,K8nȖM-e!7/A3ӧLprm Z`1\r%qz $jK8VӴ2(X2h@_q߽s,3K{X}k|}\N֭^gCY'XlӮUTXAZ갢DX6{Qjڼ'Vwu*[_]4Kn?H;W.nD0r $5 Yu3JZh*.!}z$IT9ʺ wSp3 zir#wtZ쫠_ xIԁk׿'b[?Fb<24zĉCUsDZ(2UFG$mF}:Z |עܡ_ A]lgĽ%v+ޔ"VX ]ł3VA8mYCۃ6_oA#F>RQ܏B$@$@@Vj{2> AQ:d |#QIwR1P޷Ww]bEE@[ޕKfPZY8o}ˊᖸy'P%Jف:!b OK? $:rBe1Z -\TnLjWiɨݎX: sj%l2U*WTMG;I: PaU+Y@­T$ATu^NԌF+{RI`pc ԋйaS+O7cHL$(,߸|S`%h.7_ +/,QYn1j!^@x\~y~UEѴK:@IDAT"*/]Sqc1 1fʄ1_53Mc0mz|G8ӳZvO g7*bi>hΘ edւwXB:w1Tim啗_X}2KzN1Y~q ˼’J{'rS+[S [yg9@!Y{ɖ-vDY"~԰Vr%`] &aR_{ECwe RH3d_ޡ FqjQo-3cI8r}Ǯ,L===7-CCOPHؒ3},Gn~k#S?ND@L龊[FlH/h=%L6)$/YUR~lN@{zi?f3\GK} 2zuԩOU&Wo蠔w5{#[ko]R;&Aj oLvkT #  TH M*|&> MMfJ;,|wQ+Iw>3gV@aOO@+3\X tR\\AP4à~"z̍us]oxFXe3Rфk ߀q[ 66iǏ[s_( x cV ?'ǎ|8ŏ mPފ B'Pkɓ'>uspb%o%tO8'!aݮCS_* C\)XG T2ň+Grj„yP\A?s??_ֹyzB_C>轤\I0 2L3ؕh: ؃6`=$ݎ5(^[4aYֺ̺ OЯJz(?!'uSO?[ E _9Ľ)raN?o^?h8?X@Yv{_;MqWfek=uҭ?ԧaWYqceߪk'@/\by:JnFb-WI%0yy$*ґe>I'یb@PK%\ov6g޿CM+\R ]}yD(NcG SK/5qeX/Zw hM.K䚖?\#T< 3^xHtZ>q=;xq ^sIJ=dEIu4ؗ\mjI]t|TPAWXW/uqUa_ܔSjHrҜ9-XFZL ~G*t_H?&?Q֌oMK@:EPX.iĽ))PInyh{ݼ#ɱ\vRuZG_u{NteZ%?p=gM?Nq}+̂@if*x=` ԃNrA9w)©xRcb*^.s w"HPBngS)?X+g52H@ҍ}^XNdAvb׮<<O۶;,u`/'X8D}!4~ϥ>\7qv:BI%W/nmO\b\* 3+wOP b/ AnV,c̽qĀ[9QS|,/Z6K6o J)= ep@֏z0, wW|-;Ff(o KK3md^ TVfOP?bU]tI]ܣ[^7mb'PRiodVV  u{~_h5%zuLv?cN'gaMoRj:y>In7Sm… V={ֺ|wyw0:Ľ)z3>& "uŶX&Gvi^J;\@`Od_${.44)%{X:6RRU.7ӠPǟ(NKOΝ@ulf}iEd̷iryCg}mMG3mMYkh(α@ g̗Ц]G˽OZ{[YvA0xXJ lԂm]֭ ,[aww$n !dWgIP~ĉ}Pm3%Gxpq՚7ģUQe#,&PDj-:tq' 9yf0HG?龊=HW?zL2Zu7hyEo'ی=`t[ժDB,䜯 > BZu GrQڪ79`i^ZXXOD6;z$n٦Ui_.]U#{UC K`;>b5+ ~z#F'(TJP(à{ r͛4RiBS B!,A7Trϭam4Ҭ9dqmbeƖʋP~k*[!].{zv2 :i; siErW5RG [>yϊ}iY{À> .v[a/{{lxCAV^{w?P< }Z ~c:-#25\Oa3OUW{[co~=}:69HcQwtZ8#\7crU=_rI(k Rko,Cz_rدj?fͨ!'{|cl:W+~(<\;NzժOɅXC>8qR/&?)RHHH ld#=5@\\A}0LHe;Go̤޵Z 1j?)/Eà53 k2n )sz@QAG5f+P&YX.P̕PeiNPb(vivAG/7>!\C e&"N9#̽.X⺓Y@8X͓ _{C 9V)u0gKS&uTv`Q+.O{i rBKi 'ρU;sMíM& ԭ]úɮ{gM,X).(3OIQu. _,?aYߑ¡,akЁJ'!׬&~=t)uIZ=[Hϔ"\_C9yʼn`ϗ_)=gp5l%b )&,;`ӀSfZI(oXYbN+c8DR"qpoσ:%Z|սhբi y2_6z=0c\\Aqq4Wo=8 ҮM+19uQjbrI 48 ?HǞ&w$*f<~LξgmָACI<l3q= | uR ~^@<צ^ψ28Xn1ѵg_o@N2m8%Kұqe)$@$@$ pԘ1L3KE-iǵ`>CQ3çK֒sjҰxjNZJP$i8U Fzּ7/7tx>(Mf,KSV^+'اa"gΜ%rٵP}LoPHKa|̩Jih8"3MrAˁf T0ckgii3nwZ[lWԌq0j̗N=p6E1fP`ݳz"5c!ʀK/X}uS&۵qL 0PXZ`p QK_}E˛-{x1$\(˓r={&t+c2}_ ݢW:Ψ?2n=d6B?~v3=7(>Tڼ9 'vA] kVx=&?qYi(PXaܗ 0iSv^ `z}O3fu'F 6}E{TXA%o)R4EwXdiqEe3;Ӽrxڔ"7`Y>o+^ xCdCQ=.'vRo[{H;τ4jqӠ,?G@ ib?:SnSNiFbP^nZ9Y8]Okuݫm>y<ݎݻttl/ҹcBە(:>(0ioZ&,=tWN|Q+*J&M;dg>]wV&rhSri<+(ۡŢBGAj˗,mrAm`֥f9Оl3BD\GCz.;8on_0=(FA~Ue݌j^Y}4_  H9ϔ8a;0VEuAY$b>q5^#rSFhsSx2fჷ t2}Ί|X2f8uo~qQmGgp?\>Z{JKϘ!u :(X"A1?͸3a=_͢`R!!:;-ȑ3%.NKFJpSOZ*]P0+K 0@\(iC9;"-Μ9#k2*Җ?/XrY5i >B?(B'sȮw(l n=}Lɥn`UH.GrRjŹe. L ٤*m͡ÇY|~ǰ^&t἗Rn엳ұDvni-<ǎ .5T;Nq^ûuŋ%#͙3\3SR-`(w?9sU9Cr:>b_;y~ÞX7OLH ?r +..WWb\ ~L;o>͂eM $_"qmF}pGC4[uFy!(V3 o6 ArIHR6hPqQ>Z n?8o`)ŧkSiLS&3$.7]2|0ȁer]ﱟP1grFqiIHHHHHHB'-B$ d'𛴜;Muɞ.&H v"c߁b7-/-r)$@$@$@$@$@$@$@$@$w: @!e61YVz6~\!񦬇"b>fO>aOR $&qtSaѲk*DG˗/ l*6m(HH xӧSJS= XHHHHHHHH DKe&u~$@F䉆 H o7        &2ЅHHHHHHHHHHHHwI7$            oTy3 $9*9oH$@$@$@$@$@$@$@$@$@$@$@$@fB            HrT%9rސHHHHHHHHHHHH Pq̈́.$@$@$@$@$@$@$@$@$@$@$@$@$Kr! xΛ ]HHHHHHHHHHHHH PqyC            &@ŝ7 @.ɑ$@$@$@$@$@$@$@$@$@$@$@$@$M;o&t!            $'@]# IH y u8}L&w'0  s>w>=zL>" C08wt .c~p LFGSV*7eSK$@Dh %`8{1֥jT k>""H"<)8=\*aR㔔[L?'OTXAL?Z,_rcyGᯍ̜)(YŭXs D35J,+[.[i)n1h@_x\Qp!U,JO+V('Ҥ0T4/P3&'}[wVZbc ? # Hl'`S3Ԯm+QBl*( + D9( &/`o[ S&xeyGH9gtɧ7y @Ox?Sz%u뮯֌)"Go(^TlXZ:|X.ɉpGj/ɌWorQsĦ+cw|z #pUWbEhcٞ8ei>gB9ϔ)(þ=* A|w(J;SG.6RRC)/Ϙb &TESn0-!Y0O.@!?8}+׍a =pZn)n)[Nþ |eQhHo {?>qK6-_5{,d̘Q͛[ɝ/lgϞS׳dɬ43*-8 |XҿĹ+ 3gV8Ϝ9s>rD`v̙C=s,Y,lP-pq♰ 7d$W%hUa?"Md~+xȐ!Kؿ6]ZDw~ {m . 暫(OE܇yE|JۭoW_։ ރ}j?_/ӟWd͚UG5o /FxBÉpyqfo~q!Y 8ˁšCGT/wWtZ9"owU-1u-q"gsg w*ԩӪ. -Ss3c "}^C9{D6Iׅnݿov^ªNtvIqaID>YVP[~ItuZ%lFBLnzcqq{ ͜9kۡkpo]. "N'xb_@n C[p`?ķnL{p{'ur,/f F^ |kNY]Cod\l+@7&c<1b4Z C4ety&]FI Z7SI N,[ݎ:yY/us[Uߦm ^cVE7:zHń=zHi:(ޕ->gBC6 $@Ʒ} 9~unN\ƥm]w1-=# $)ҏTT}HL0V0Hlx=4ΝK>D}ם׼˼Wг% wf}ǟ~E`9}Ტk;0s|1^RhXޥ0' W_4AYs^Y~o=-S_ʂvQe^/8E2Uӫ?8ޜ3kp3əWp3Rkfn溷ovT%XN;oVΠvmZvt$ؾLtDN`E.5k<%!Z~ ҧCۻjnjo tzV?08!$A_wM(bN+TԬw}kr&1VR]WZP_ϝXzP:b@zbubl0݂y.>e3O=ƾv01w@~ - P(Vecb؈1JϵM\'Y\9 v|K }Pz=jЂܱIjT1L?JyP˥Ǥ!l4]H Q Zc^J;^Ӗm;$ c'z #,fO2] 5gTСc7{祴̞`|f89`#njwL+87o-[{05J;{kj?NGv)%7}EmɫU^PVx-cxF"i~$j bƔ)`gF͔q( 9}#eW t~WmP>dm,yخ;fS7nQ`ǑXe@A1Y}RG' &:j¤I6ϋ4i.إcb0i6 ɺQ[nƉ.w6e9?ű>¤v3>6un?3= GPN[NH N aڠ#-]IO93n+Mom#ؠ&Dk7/s;|ŗڻړayc0u6&/.~Pe B Y^6xc"?E01Hopp@*׃Ì}|+ǕwLl.Wn`my([:PQNއoC iwFӠNiشdkpʥ=.N\K\= h+6X=*]Qt @O0LR*, ̎@o̙S<צ#4fur0~lX,\D H;F~r@ٻ6!,{XQ@IDAT@h+ֿ8zFvZ5D"9d2?MlƲNK+WQ4{9ԂYz?ޕT!ɯ E$ٗȾk!B-NDD %B%RvJDd--?y=={Ϲ}<ϽgΜ93ssf96( V7If0|m%̳ŤFۺM uy/,@Ѐ*(L份CY`>0{<V7`kzyN\xoZ=䅑nXG{dXzo:q*SN ׸ZM7P nk>#o˞Hlռȕ>ŖJK*Zv:X~g` 5WOn7lRڞmwLC,`M5-`\(HaVqPZ ͜G:>N7*$ RyTKl֤(BkKP?܂c ڟOwBvJ;]|)hJ }׺q?4Lq4!Cb GB<;t} OqbIh9i0߉[Z7l$BG}ļ)r 7!s{}WV+nyV-'NQk6r .C` &eߢ~O ;w{SqV'p\4X*͟|"^4Pxkb;c#:#eswяu(dem]|ycUkĠ~mڻXzi$X֮X>˝+?\mIH\˱w|%L}0"_sN|~\7*A pֳZ+"{|TRY1ȵ^AAxR,AKɋ5\w=J?M LPk^%ck2%;_QT]_a0@%D^{ ~fh 1fB*p1Ζ<e/kK|Lȡ(a^ֵڂ(ȱtsuU ba5<`oX)MYzkP(RY <0Z6oݪ`;BOxi3Urx?~k[2֯b]e LV<' V^&Ҵ».tu9MeػtQ dȼx\㎜*k +J(o} ׾okJuu݃cQ.RƝgڷQ|T%1gI՗oўC: ?CVKp1wPƘ{cyZ5S⤱Mih[ًß ̓[/اQXw<;ag6xc %{h(&BRYJqfFckBO)(ɣÑ;Z~@Ա}[ѮMk:U͚4Z=K9\HTZκd⿫ 6p g2k}Js1,Hp+Eo![n6?ӥ{:aܮ)]jW LKSgpi]bb;̑xA3b_TG|T:\[d8nfz#cM[pE֞>9s7Os4a)\Vf5 e+Ļ[S&rz R] xRmIW2ec=v߽wkа߼~z~Oo~IhcJyP6 [2Ik t!Q~!9K@`=IA c[DCdΊUefZs<_gu=s!uwnMu Fi5mR JLFĺ77"<_*+םCN#yWIԾө:~a\Usl3LWfCz\ZY~dyܩ_Yؕo^> Z| Ucw45ř2&,F)unDoSycAZz+uEGT=͏ϝm1(SEoM?M|i+O0!K9gF X't҉Rh߾z4 \2vt3SLp"!e!-DJJ`lfW (b๠i%70yMo!A}꺾ƙ)1qc  A'VԿ6{ݜ_{YGJTiGI K5y_YW"%/#`޽t)NMY DJaEW$DŽ%CѺW!ƱKgvKvZ=!JW? /P@BvhDX0m +#0i"Y){nʚU Nc%idg,-V{ah D'i'"-OM& 0 }&lPJF4wɖaa_ÞW"3~Jw,yasqO-';GJuwA~qi0R Ӻ5vRՠ^:c3!4?fdyLE;wU*Ljt,G|XO>[?p<+Ac^7o.KT"AZЎ^˗ShNigR"=S{-R$,bwa-iғby/K{SL73;k>U̹Tzlt ~X(f*JxǬ={Ȉ h0ړ~#{uLSW*+ F<r@#A7kD?Lc6ŋKBv6lDLqt,S$Î |)I׀\,ImK*Uxʆ1P7A:IЃ/gBFME' ~LNEG n7A;~<)&9B;pG>"r&Y Shp4h ޿=KCDFɓ:l 8+|`V$p3hK/{I RyP}fG7g˦9p=A M7E: bZw3K^o@gjaSp&s̙33-JD[ՍƟc k9# ?TaBCZMx;ƵAK#vIb3fuZyi]ci^gx_D;/ܵ` c>w1 .L"6kΠ$\\cik~g]e˖um)229R\LjhH}3xKf ]{4=7\Tc chtqNI`T5߳ +n^X(Q.#zTG&S)R^Cٜwu٦Pt"Xue{~JHYf,YY/3_#+gy!3VS;~XZ2^c 1qtoX[AΣg؝`:[|ّY{I1~$r9?vUꫮ;#rZXM[B٤,'s"ӅV͉~1ʭS2ᄈ&f{œL-TrQ-9NNA}z3gІ'rQVG#|0&Scp*+#ľpϥL"Fum Ç7~+}Sn a͔|[BٖO=^\:q tS:vu,T4W_8g?p>X9XBX"/[{KAaW7[o.Z ?LȽMIls[ "DL0MaI 85g[]_')n( TMrd3> 2Nv0Hsr> HDG`1Tq"ru"qJ6(;bAIրVWQNc<;)//G] by+Y=F? UP.02wԩU=L7{c'3_=er{CW$'cr΃vŚ(iHC] bE[kv%fMd+ bwAp]'hU*?zOR=$icHts$mH^s;G^ˈC",N:OPz&5ӳ!m;&ZrKpr-6n _gծ߱e1~PC];ێnˉ.zI!T]rz5UJps)ժTVL7eKTqA;;(pKOۊ4!A4˖). _C<XP9Oqew.BA5C9l0y 1W5J ^]~uAJՉ1V=Io~ {|Nynj︄A-Цp5Hm[E-'cꊲMzIq`` .U $}I%~9pPDb"g]{#| H<ֺ'9\mLA;i-z`QQsxˎE| 40&ɥ̣eJ+It[ވ M[k3xu{e.H_.~WYO[A7,%8Eex:SC"ϟPqI#ҼFw߭1 4u RyXܣ27H#`4;u HlD΄jѶC'OZQ uOaתESǻpE4ІŇ ܽa"ya 7)WNq/dj ,'!SnhX%Gc3Db)74ߗwe ԣc-Vu_r*uFcqXs녑ʰR>]λRzLrul>cꓷJ r"0"挔5s_α ~hψu9QLIutI mv]Hkh.5 bpMrlVVY`o si}1 siT$DzrT.(u!eBp AA .9m 5.v-z81|KE Hy;Q^^ǏʥyL.xrV ʫЦ-(0(X>;bD1zm{Z/25g)Yܥ$\$wi 7z.@(W>?۶|D\+LoǴ)}!KuvC;\]/&Px&WۻyHs Y>!/T6x^ܣe > ÞMоTRyHTha1itbn!oH)S$%QY'ePiܲVlwgk 571vFGʎ RJUkg͙'`fm滪U*)(}=VծQ-lg rN>i2]13G}7l΃?q^cMDfŚ6+5]߇ї_}CA㮝Ius ]Ϥf;^ kZkX7<~M5Ppw8AaJ&ha?k, 5WSϒVmܶ;=\ ;@1=b"w[tEZ*}ror3RAAP|(V7熈7w.v\k=q~t ۳ggREy3gwtB|/_Hy*ueRBsyz659-,q=R9oX锅vG6nqLwpŁ7f,?Iݕe=]zmz0@LÉ)-,Ծ.z.H2zk;Zl9ћoVRFNDgΜ>Wn5iTϖ G.6u"|lb47~zX* 퐏ӸD_ "7K2=5 m;tLZ0\C5jPW3e(GhNf|î\X`>vV"fW\.u'kQ)AxjZƟ9Skz+]r0g6m)IkUQILNě]Yg=b{bÊv˧ڇS'쭏$%H b:GV<^7:q?&w;llj%`l Ds"pܝ\}p򆁾1׎OJtvęc]~~.Zřn<19aru☉|DvLjh+EَN&׼x>?#ZQ9rN_& "zb0`DRڂQrT(TQ]&` ϯxL)+/Vd JOLUHo2}-5 E?'E:W@=*P0B~XzyFiM׻t&X7Kbt['DJmÀA=cm͟Km}cݥ1!P^ /Ǝ, 蜎~M 1Ny077am9`RpcNʕxNjo$]~cQl;z] W& 6K{+aLL0y5ڠu:wKAic>Nn7t)תM0v~Roqa^#L m;hR^)\zFZQt#qQ_n XֵfD_ "k<51>~jO@0[O"jm^5]Be%| 4,ːATG(Qz0?YTp=nlu}-qqԘ* p3녰: Æ'ͽ#M1!<%Ң~ %$ԃ}$U6ƾK D"tY#IAᛜumӾIx'z>Ls3mHߕ:9,QԼu[5>>Xt!Ϋ;su;ٚݹu}^S7?M 綾6ӲK @n# VVV*>& KWsԀ{ԿFHjcl6i4I! Ir xkyĜǰt-l'Gw1)3"D,EM'8}Ĭ{xh>yӠb$I6{m_WZRO& LKcJ@ {0_y,ͳaE:ar҇n@%TЌ- 2e[LAhay,QL}QI"¾VC(ؾCic_ko"*U(/+/Vnʴ:OhBz6hR7QIWSՙ})<*Ę4}K: C&,Ƽ(r[ kM m#P @^M^3t{vWq4i!JܹrIq' Dץ> AC +o4:|O};rRLIkw$d99$$9M|j܁HzU{ub׮[ٵ1v<77ಸ۳Ǩ*!FL1#I_i]B3۾Ht\NC6&*^[roEc=1v#Έ7u~i1GC2{da["͘-Y`amQNrɞݞػxY+T('șSˤr\$kwn6i˖"%$Υ-ZT~:t]dU>?If8*᠒`1F֨PY<$~w!c4Nxa)^Ӱ/W@~3AY x~[3Gz@PM1X}򤀥q9"΂;[C0obl9߷ȵ Ka<|1MJ, s i4ZeנiaOkL"ą<ځҘ^^(x7wkr-g^T&"  3R~u|00X)a19gw b=%-1CyF:Y'D͵=cm͟K7mi` G tgͦd׫lyFHht]&RcjU bHmzI_αX&2]bޥhټ*xS'SbD,\%C]еH wۻ)3@s^]Yjd'2;~Ȑ}஑Ah"C~vDVGI1g& v)|͚6I1@x V͵e)Ϥ6[IE>D_ "OR4jXOC݉DO|j$||E75 NsdnCDu酑ր;KK{4 rN>yuB|>֭ujVWkh; YG//DXA3 wV$2xՄqe`gunE 6z?PFP,0ގO/o pJJ4AYgham {ΠꑐCh(۽P۫J>= XCe ,;F.IAG?k,Qʘt︊|҄/L¸d~`=+7mkY( Sf)kVks?sQBB㇟r]=D>;X/l*VxLx=w Y'x)m }jO S[r|89HX5?ʦY..e:ei:2J!X[ .~}[륐HCB 5VstXM7yώڵi>̝:t1c0.iӦu,͞.qk}3l; y ]q/ӛ\3w<'|7{̼|M*N8G˔65޽ M'x0O~Ho+Aܭ#R(D\3sJk'`w/vB]:u`Ot~l~ YyCg;7NH!:_514_}erKV76A%yDzk!,Ot1PPX8oغiXbފ7۷6{x)OG:ʖ.>L[cܤƈJt 4Z80 +ѥ흄+XH;cZxzhΟʲދΝ9UdɒzYiտ"'"|.xy0]P&n W\Acl̻: j)֭yWlXZdhJԳcn)is ڟL馍l\0oPJ~[h3Kֹt)my(H āziSVX1TY&X@H:^F V[g eN4o%&Nw=㷭6쫸?R;&w>ڸJӸphW  X2w5mոA=k ù_}k)u=vgl4F=92_0kTBB~U aDecJ*2ӛxﱙaK]>29^N&onxG?1n\gSV\<P-5a&@ǎgc„vY㢭w,FK=~ݼi$8Ml罴E ܻ_L|n8yI+gA9vA&[G|lg̙7={V"-cG#mu͑#GUgy?!ی=F?/@D}ahz8sdf7JFNK53m'/5-[[{~*{X)AM? ;UZY:A<>:|X`.,eI\a#Z+ݐ=wNbI& Ry4kQ'-7gwlӞ}}>X>;ueYﶮNa~opGBˏ%?K8xP[%#[!Z~O!?SΏW)[j%}꥗"'-d̷径!0^"NȌ;#p79ws,aU{XI>s,֙7(a݃h=5y{aDžVw0,LDnϞP40\NSX=:2x0C|fgB^ blqr=0eTj9\k{c3=}AC-Lbɓ'u{{CrƪQjy Uk/s?RZP4\b-ٲeU5u!??sOA<>ƎS1i~޷_v&rS77yEJrS7J3%GIFJe3(8x^A]!J`a9>0xPPxƳ|(O%PtܽgE ^|z XyQCA?ЊЧ /w۶RUG+(XY%iJ1zz`ɱϦ\*njp"=JZXX\c?-' Uk/XMM|(_C?s|3\3X;D{&sS7J~bw{ 1#g~1.P(ab.d5iw&M;wyV`a}01)>l\ӬU3Cݸy\"314lI7~+~~1#mnbBSslJ pN~o^|t$վ2@nƚd޲S1fD]-qDÌ#0@",7`F;w%j+cH`َbcTώ)֯cwdA|mX|*d jV͜:1f~3/_<+Q^1+$@vDۇ)!}9[6obP`thܨT؊]x3!ǥ |W//AZ)7!Ox\CF`.uXpw#@{F |<z #ЪESQ#bKŗ_ F`|0J5|(_Q1w+˯Q{Rݰ]2gRH^mZv|W+r;OF`H#M FԲ9}8`R[@LJԌcǕ.񸜚[*v9qHZe89L5H\},m ׊`Dk~?mnU7(qF`@ ?.9Z`F"Fq)-}ۭW20, *+.]:)^ [0Ʌy!WiF`.XERhe~FF`F`F`F`F`F`T R}qF`F`F`F`F`F`.Xpw)2?##0#0#0#0#0#0@Gw#0#0#0#0#0#0,Z`F`F`F`F`F`F #TD\AF`F`F`F`F`F`K] 0#0#0#0#0#0#`]o" #0#0#0#0#0#0 .VgdF`F`F`F`F`FH.7W`F`F`F`F`F`FR@wB+32#0#0#0#0#0#zXpꛈ+0#0#0#0#0#0#p) KF`F`F`F`F`F`R=,KMLI?/)Y.d ^ϏS?a!p/m*~V?L@$Μ9#:)E N_s]X2#0#0#AM6W}+*.zX1]?.4r]?N]zbzXܓ!7ߪG1,e[<mS?՟jk~0u,5ߓ;?:u7/?[,RJ)3n̄m摒a7DW .*','r %fF`F`F U#T<\9?+5;~>lo}3sF|#p"{ce4jT~7]?0/4~@t0 I"A6E*#tפw--sC^#F'Dtq#yiλ_0yZL\ZZ {E=^Z0#0#0@CwIBn@co9}ޣ?qs0)2*VQ~^}Sq/XºG %q0R>[4 ,\ܟK K̟3CqGNaVDSyM=5M]5 j;|xyBp ӧ]:uqXW[ ȯ/-]RM{q}B[0֯!x`՘r`F`F`F t|\U1XӹyʫJlZV|`D W0qF?=esY)}޺s@IDAT'[6MZP<)W8q0gv m_l׷M4Ndz}a!phm**w;S5m@.25 q$ǚ-ɲ`|bŲD/>e!hW'O6o 9m10#0#0#$!$,8!pu׉G˔ F/iҤa˯T#*1H]hmj{s5{)wܞSz_Hh~)EM\.F`F`F |7'7gϞؓ?AN_N\qEd8As]c?#.Kwy'e+vtqwߋ$n֐{^CžgN7(ž\̎tb8pسgUs&/ܮ*aqxV0c~"{lyG0j+;->9D)O}7(TE`j{e|6'9sV>gپWP6YG]f*~vG7ސErͶLe?:3g:ѹsuYGlYMY%;,y~YB x?IӧOvv(`%Y~eQSS0na<1ˠ˵+njʗGʳat{ ȑkϟMd̘.ٳB7ߊݻ믿N]J>}XAD88!~.-VoΎvzKXIbƸ&{\h!v7cFث*7wӮQ hyͮHq4@'g{cf?^evyuqWJk^ ޟǏ抛|L=9?"Z7YkNۥsl51ɬ'<x/Hq5טIb sDZU6<É,Y2[e2ס8ohS)M'2ۭoa@~Eh#-n-vc*pxw>ƳCwAș3Ȓ9">gF`F`F EM "UB!F'f~9IS @%UԄ$8CG7\ЃbP>; P %^[* |^:!1zxbߜ.W]Hk8~s6bTX܀>RdQÿ2Ezw`[ʵ5W_v) ˍzM[ovyRxYWqF^}+ 'k ~8$<5g;~=tѢy1`bTW۶hº֋m@7zأtkȱlD`͜>c8zE#,7ښuvC~6`m:ʙ=w:}VX9hv.ry㏻Ő^6 >ɔYc%nԭMvV1 M֎[6OjX/yI }W~zjզ.{Ne&# pt;f[7~Nc~9Ibxv~F tλH~o-&}Q{:?u䍷*ĐA4}s0bRSgD%1bؐ:"=vʖv,}ibȞhcu9n8zk[kd%U׆\`}zu0rI/9Z@SHIgo!ڜIpɬGRܱ}gSmQlŰI'u"bm{wsC^~Qe !ʪwޤb!,9z`QHZ}zvW, }j3uQrm2}u) }6)I,XIJfC9ITOWÆXScʊn>C,[AצuK?GeA_7m$:vhַ, ,F0 =?J/} zw̲ՠ燉W/UIغ!D9 [g۶7/ꦫ!Õ`SGvO͛6vaaF`F`F`ByPq>O= UhhܬFsY> :t&C^`5iZYY` ̽z\3˴ew˿f+Cz٫Ӡxoa @0šcƉ/6ڱCaB;` qB!/  y颬p2q7ÉWaBdwE/@=7AްIlӾ3c=˕U_9kflXUnA{XJVirHW$KƤ r}PN*iLp ~eW_3ϺskF\`,}? sRr-Vx 0Ʈ^|"`F`F`B \%r9~Oҹ|svlH(Qb# *!SƋqp=Iȥ, 8#| ?gb:`LHPA/G0Y wz6ġ 0)|u]k`v%E]ǂMZ*;+VIbŒAB$ўzXY?0_vU4'&DjєNsZ'\x\> :'< ?\PRe#R ܭBuD2f}!{H;[ ] #zL) ]i7a(,^[@7 Ys=@;>1EMkU%B;?]z )-^?}qvuwaێ:>MxLʔ)SXy. hgƠgMW_}:E˧V;AhXn9'ۺԄ7ޚͣbO0W!L5Sk9~pH@b Dw[ <7T̜:QxRmI+sg+[0Xdgkme:SҕA9c=Ρ£B C5mڴN_oܴ%L IRNx*cIGKP(^7* heTwݩ.NgSA=0?6%g:0g$-.`#Vf"?XxY 'zLE]h .fYe#Zbl6x^bޅ;^ STD8G+$s._Q}~-MD?I0GƏB;3*]: fO> V J(MD/7Cޛ$:A_᾽{hf6뺆 #ڷam {(McFiUy}4vtV5ߐ]`J}J1ڵiHCZAZDx1Zhs"brM$%=b(ؾC vqz!B;J=Aݻ.c! Pd&U}it*-y>2\q˾A{{z<# xztwtRt[XJ2?<-_saa.S 1ANd.s貨Z{ @e ~-D_-/cm+o4nP61vt\E:6o٪B;*3}q WdeK& U]!}a W_vbnՂ|3K05u,PԃPێ9ָPsҨ F`F`F,mB;a+-f;%<@'1 pNXqbG`}O<vdO<' =͎ kwe˔ cj/^Ouŋ3\!y'8x ]$Q>>ώ~5+MۿTA섋lRtKݶ#PXΝqW[(!RXOBB(,K?`UcBxjuLO f9s!a;$A YXED"$ c %2Knau0#0#0@J"_%)>lX]Yp4 WͺŊ {qJ`>ӹ{D!SM7tA؁4GŴs/R数 Qo[Epw%,-4Q!X*A3\ĶT.C*_M<$T7ezTK;,]f [H`t࠽5Rp q⏐oJ]#8嘚3~?Ҙ& @ cҭZդs1ZvD٦kOlբc4&g"vYSujÚNyN\o*W!8ϝ;')#0#0#0) Ru.G nz `nW޼&IPu}o V25s &(,8 yaJFpYHR_>#&s̎//^g0]>AA0i-Y"pY>ִs$p7jZwxvKOv^ZYQpK D%X$e`jO[5`=Z&}2^D63y7Ljon|CW^Zܑ̼> >#MՔU3JacAM)<zD TU 0={lnI9~ݭNH&ayrawL~+gp QtDgNK ϒ~- ȧ~AYbb%*˙_f]}#0#0#s FBޥpu\`:]3x&K㍛uukHZx VqH fM*;떹 {p"an< ,l ^R@@gGk0̽~޷6~$0*X@gX/kQE ?0B 5 Xg=`@xhG+Mf;XJޝ,BNT C)֑gΜ(^[Av<ʄ'#,pp#G* Lnk5|p?\bNv}Ap  ;8z'0A#T'*rxt}ޣI}y붢aӖbKJ(R2?/CBX+sb{IスO_sX1Rgϸ̚vZ\,;~$Xu:݀S_> 6 0'/l;|nq[=հз~VK,N0#0#0Xp=70]ta ԫ#8/[u)W/_\N>+qۭ~a 14w+tr"__8yZ5:9v8E? ,$j{O 6J XUҤbPg MLq&;2zKAJ~M 1JG AB) r+ѩ>yU~cQl;zuӝCq>0xp>7 񓦄㏻EgSDh BJpw`9Iwtv:AUsmݶ +0hԬrM҂N%rZD"5SOjK lv"0&oii j֜yvnFېafEgLŻ i HG<ƃi<ѢN&ISvR% KO[މ ۾خs5m;%oH# ]{QYZ<& zX 6n"ZTA{0]q\YEvF*fi)g%$Þ(zSvJ˘Z׀իc'*+!2`֨Pཀ=X^9Rsk;_s5J(a݁*W:E$,}˷ʖ.g/~OAA+g-&ěnp}v&R"{ncRۧ <#3RZYZ{7e)5cEHط={J5}A`3,> *.׭ٴ%aH&1tY|jCͺD Dܹxj!qnvN1[a2B_l@~d(};,?ǔj{kr} eG`rXrm!+GOw"*j(Ɋ\611L2>YkaY2gfOcO˖10뮺ꪐx'JST0Ra aLw8\OH!ko/g&e'[5[B & l 0܇<_iK#zo̲ T.V^kF0oUkS0y:tꪯ!!ϝwnk`B K`k&zuݜbt 0:ɲ¼k* P}9yQN#UG0f|vD[Y۫\"N^vSDOد?+jִILA܂P .xuuIl(\YYY o.Y( O*GٵwvlG1jP>}bu*V+sfNuoK>)]4.^ Ste5ty8aHkFP9˼vloFYFo Zf2e1mhBWQ5c,7ÚiF K#҈̾oLo0NשUݚ;1.{}mPJ=aOA+<G ǁ3OJl bp;V|6f6fa3aMa сz=BHycz@QAوE[!1 c}`,f"K,HJ*n)֮M(뒅g;*J@gוYo>fL0akE[0pӯLOuubH2Ơi_r\.&uz֮Xg /&F`F`F`@'ʜܓj0.s*"T~K7d,_:BÞ*¢Î9lw 8xPqB V`{ۼK+r{Ĵ?Kez  q}`m3fTVvPdG@sJ앉wf+!-4z\ G #G~=[6O툽><ݫnС GX,$KU*v|t)GGINx/TPC Mx{L9XeEI&ZvA@X:'*_r<˖-fº0B g0^&qwPZ颯gFOc1-)e%aCt3ߑGոQ7e܌n-MJ^gϞHKސxચƋBEKqBf[omϞjr/LbvwLX1 k[B L. SN_%{ޱm>sr-]8kah ˳g99#0#0#\x .w'EkZ^!±TTNdA1=Ү`${ nwvqRb%E{ZU L&p]k' 'ݥ*6NV+- [.6 Y ,bF`F`F`.6R#/ņ2?#JxcbcPn󖭢nxn۠e7ߊYs) <,K Aĉ?BνEԩV"2ֱ|=wn_/kt]a0#0#0#0#p #KS]pn_ z'.q#hܨCr&V"@+:oWdgRy]HzߩzDFvh +{(ʞ{dEdg2BFȮ4DQi !EJBG|s~罿{y{g~>ytUh`F`F`F`F "[6@2g_߮]^0ܸ=5Op$^2gΤᄎNL~0ҧK0g^|?_,mwT `F`F`F`7|].>xycskVdɒ91r#0) ϋCRq^F`F`F`F:IwQ/jp@:)!/_^w#0#`A mڴ"w[~eF`F`F`FHNNN2#0#0#0#0#0#0)fܥԒ|1#0#0#0#0#0#$+q0#0#0#0#0#0#R`]J-Y#0#0#0#0#0#0@Bwɪ8#0#0#0#0#0#0)fܥԒ|1#0#0#0#0#0#$+q0#0#0#0#0#0#R`]J-Y#0#0#0#0#0#0@Bwɪ8#0#0#0#0#0#0)fܥԒ|1#0#0#0#0#0#$+q0#0#0#0#0#0#R`]J-Y#0#0#0#0#0#0@Bwɪ8#0#0#0#0#0#0)fܥԒ|11DٳС11EuqI/ad_ϟOv3`vwŀO #ISI*F`!.]w:S`GC ]bc\c~_NE 61@RBwI48-8}gnN9[,[Yt+ڵ[(SI?aiEk3^7w.* yPt+efsxorwli0thQR5Qb5o^J_E?=`o;E}EvwG"bϿ_}RbnF>>$)3vMc'G0=~i`_IY88U醢C5:JHHu+>JE,+VE!l hN+}G, ?Z/cH_P#}pڑ҄qcDRO<)e>w\A^V|JS8L:]t^N}e :bxc:>dJ?3b u Ïu-uB~10?,9 E;% Η%Z?L4t,(^LU\a>r$iרA]Qby;W.CL5kR%iQ#x 验Ń%u7Lipb9*[A3 n0XMʹEZO[oYrK!֕'_ܔmLvHR~wWyx"M+pM(<#`A 8jq,_S8fN{םw費k-7߬ap)WذqWdx*KYlcΓB_禭+/P7etT݌)#`]r.+02g}{7拃Rn /[#{>=vQEŇ˖R^9wh< տ/ԀDpIrܹCRtb`B|MSпQ( 66m-kVÕ'D_BmMvOցv'%p+7hr']wXjDbEDƌ:}V<:UjQK!l/e9T;O}][Ԯ؂BQQu3^t 8@rFwɹM|ŗqL&Dy+-7'A]'z؉L`cbr~l[RyiG${R~*ۤvN#0@q\7 Rt7.-V$io$Y+dOv +Ƴ+/e!̸KI.GoR&+gNב.=R:ȑELDE̙]T1 u7.+Ҧunfdt?Pi̞=ɯ?uTu3=]"CKطs"K,"O\Q2^W_(qw/U~o)C ?~\7>}zqsBkq- _8Atiil2dH/ҥKxG_eze6qE2HP~SGSu(oܚMy3 Q}%PDNuXt0gM|ꪫ7MB!}qE7Ϟ=' 5={1)7o6aϟWR89wA):bׯiw!fA,p(~>M>Сò#R/Oq9\Gn,`ur )?zXAe:u*9N\@8)7?UcqyE^e;])mf`y? 6yH$y=0>蔴K:`5\N*`I;=>4'N 7K bc5Xآ 1PA+?e[;7K< s/:y_gS'ڽw5gj`cHu.ԿE:'DFY)T!DNceiҦIPO'_)n75/Se*™w[λ[ rrt̴]߀3hL8kMGsZ*syϯ:ĩ͘y9;lyԱAL Ҿ㷝 * gfu;s?7XOkD#)=K0c\l,3Tfڹ4cַv,sa&o;#,sm=76];'l"(Mp*3Iu1VU,@IDATsjm8Џ60ᖑ]]3ˋ̨{Nh˱gDf3CkgJaM"7/B-q)pS>ac_?}\a@,pE#3g5IŝtTmѽk/J'ĄS J??գbh0 ͚1s`^ D ogUՄ\bbsNMV-D;Ҹ b7NymT\Y1c$e#bܾM+ѣ['e6,$MkeFXc'GKEW:!ՑG:1 3 c3WZ#AY4kV{Dկ2>dѫ~էG:A 4a^TOG?Kϊ: {A9̞;_6u-OK{۠bnY]jLa+Dh0|+U`÷_E>):b$6EƼ<^/666B ֞2J+Y >JE SqmqFo;(?ؕ_|%&N*HyV)۠,yN}F5;˯E]Tp06%O !;|@~10OLVm[3k4ca_tEJ,Z `smșW.Z4kPQ/qGpmLof? 8S^6#H(u2#; \ڿE:'€8RSWY;uXVx` ;'G78JnG&vڡ%E8kn1B7kĸlY><)>f0ao ʏp@=jQԃ>9mO"o/¡T!?\)O9}}a!$=D\߁CGfkذ{1lhmcve_Te{sRmߺIb0:>a՟}6TitR)`vek3sig%H3F i/`76wJ/A1ϠuX#-M|bZ(U*Z~hl2rѧ@e>e.#(Hs?=mϤ:B=9f3ShE=3z nN95O _{eo_dZ+s?ĺ`a&C^*H fZ̑{ְ"O}ISĢ%o'_x9^rS"wF ;Ce9.FGyoڔ2v0 vÝe=qZS'jzԘq $CM,4!6L;xB S^ ddsw0mڢ&aӖ9+5K^ &Izg( C3 (D-UN9E`Gd\. ` ߍiշu:}꒔3,lO"¨^LЧ6DP'L;Dr'iݠg' QVC[?pdx؄{QnZZ7/%MZ?nي2n{D[ JٖQ/%)Ϩ4v}:CIҬ߸\'l"Ʀ͛E0 L-۩Ӥx~HZi%7me;[$zd?RVo>R͙dfLmvpM:ێaVw_;ɭc` ok#0)]?sӹ6\4o8ރ GnZIX<~-ѡkL|g %ߵG x81}lyg U`פWنwS$ZnpjaQlm^xܘvֹ?1…ڭPk,]N2Ԗp^2Ek8(=b`Wp! 2:0qi7nʶʹzaA+H0 6jf]3'Q!54uX) 7#>0n1PFN-dYćx78obѽg3xw[1[/Eێ]m`hiY GjˆQ/٦ca/>h9H18@`G[L}dL7dFui/`Vఌmؠau~"" 3 F [wƤ6BB=~71`OG5 CXed86-qP؎i747D/L;[~:ԃ5b!TX񡚈Scd<5UP hs-P`4K45Pgڊ$8镗B0I4= 5Jb u'N_{$U3"u&.yKoeԳ{*v~o>M :JtS|(%g8-&@ݎBWkѺڼi#5gj۰IIcbq7=Ao.+%R(A0 WJcQ'x7O#ƖMTmcM.z|RP-j #lz@m&ڄ[2〓9ArF+>R 7ujگl> =˪O>՚ PDv$ F-i mrt`\'8 @n)>]:^0NM}m4x31f0 (^TYb*r?/k8vDXT(WF̦Zzf\9JէuڶӡK+>W}2@C3C$ҦM$n:|uʕ-4 Nw6jCi *I0$&0.`3/<jXX AO0>>`mK)~HalE_>bqa_a̕) BZ!|ϝ?'=xϗOKtש>d&iz`03ݙ -cI8HayV7_|'Pp:3HsL9/)Ρ-!)Yh[cB;ľ։|FZFf0`lc9mkX\Yi|2"7>ƀy/;_?Suuķ~{5诱^#kܧfm+"I93j" k 4qo^   {:/׬!Z&ANJ (ZdS&9G*" MrsN4Lu }RܐCI<-uj]rrIɈB lcSTr",,0zHi/,1Nuf@5  z8c2V~jj1;^P?ذ!Ƥdc S]0nLMX!P;G4sdEQeԆaNzz[!ʒ9sk4Oi_ O ({JSrګݰS)oST%>T?3@ņcN{]Rɣ[lQ;0 X[X,Y^l^h6޲)itey? < Ҍ3\3-#alg0?76Γl[6nuA h߭xIԭs;Y-5 ZE&)aHʫoAXȿ[Rn-Qg)3 :`bJBoA,ڱ48ioj5B&Lf|k͠$*, i}s ;PIStR>ܠ$ ?r`͐51g!E=&3pA]!NlD Ryo9 SauARE'$V}ZY*: (Zo4̘c3 >$M]xbdaN@ݳ::;W.}h/Yҿ3zi8ߩ[<`[I/>e>[,r?gf`9X@My&7`4PqtT=@aź#95zw!vOJY˯!Rˏ Dho 1.q4jbRRZuk^=!_M/TbT'.?exG?VC ;phs2 Um|zͷ;bV,_nk3d?oդ6|^=\`X_aQ#jdb;?[? O/edK4z/;+@2?%y^ƕ):~U;Xl0/(3qOq c?07d9G]6w~'?X!w*Vq<@EML;J&Փ6f2UnӎaC畱UmeXe0PJd:ym8+4 6`QjՀM0[$^ 6.m&I ,zjGXL;rc.:v󐔈 szL}#pG72JNXda+a E~cNm6xQ_fLO^Li-G0TR5w$sn0"?3hO,:b۩Mb2P!C^=v@d2(r䵿EX~ao{#Qq0 w@vEP nb w?!j&ӎ҆?Lu~?X!X!$ipڎ$.{FN@ 8oG!)6:jʗ+Cre7^i`{J~eT;ԛRdu봸b0&y IIXeU؀rVVIS^$@SI 6!kr[U*WtLޞEuf9aL:mu%3)O|bD8W ZFwHHt7}~_ZIjs(kPeylZˏ0ikCRtOd[Ϣޓi3D>c5oH=SHQܹ#k @ Hø8*F "`nX8݇A9 Ӓe7 =i`91=(Xr}'W 'rDL/cr`NM$#Ix*v0&;䖞팼$:ĩcA9@LJI _u ?0w\'<=j xT- 4zm${ FwJun+;?Q>^fɣ> ~޾ݖiI{t0F3dv0H-zÎ}f<^L F<Uo~3OJW!=$zBDbk?}98 pg߉vGe@nPYwENLԩzEFlny>q޸MS~ڇ; cP9|Pk{.%h5Tج'dͧ3u^@R"?` O&R$;3wlc< H{&2`Fxf27݆NǑLW8&Cΐ!-̹@UTRҾ AwA%:q7i&؞ +T^'9*DcA~~LǼ(^gB=%Vkީ]۹qOo}wm#fKF Dws_8w 0P3fHIw L2TPH7&Ę"{j Lr!"L `XA'L. `AOO?0ܸMw^C%H>9XxmK (͛'^|܄m&W7kI{4'k 'Pybbg_hoԘq*F,eͪi敪Έ~<&)Dg1q}tTYrE|vMP+5c1f+c^ܴ%]tagU&Md^rl*v5Mzpr=e0U6nw &%Q^Crg96sW\ Rvy8I ˍ]8h-̰c:Mj5٘`H1bނzCC?0:zm#~?ְ̓!uircbÍ9/b^ې/Dsu0US^) sn!GW#CE/B*pI(‰7[IquD&nGGn&Ps׉.ÝJs a~d?M:f?F 0. r)뮿. 3M x7_J/o;uo!N?ރ $;.0@`oŝ0;SR/}Nf,vPW0{l@wJ:ܥGatb=IYG%v ޭSVGr ,/6Έ`nfc>!؆94}Da{Q+;n .}Wb:ng'!{> %*#06yF荠RK(^^<D1^nSهzB'P U*;¡s0Ɲ߮U8p[Θp&Y2e&_տ'"en|6Rlݲc 2:X㖭ZaY^"dE]'EUh;O5c:(a5޼L׫_{5: ގi.f0Cj4 p`3pjk4OybտzmZk`0M,"5SŒ+>fXNJ p^ӄ01}0h걓4ټs8K,:)`>k_:0 Xoꪫ5okբRIRuj=vP\%= n*e(!B㪁2cOf{j!ݺO Y2gX]wuCJ# JO4',Y3Vh 3SJCOF Z0.Zr) ࣍,oVq<өXL;&b;︝jH;o:ٲi,|p~`AM ?qgz@iq(Fv3.w{SUNX`MjLV,n꛷ ;7 f|;w6_#_*k+0|;wdbĨi~Vy >Hdb9D%2S ڌ׵?nK2g4Qdtn0 ٲK\B*DIily)h!˯yܱ"V2O[[E=u/V<0#Ny o0\‰pc8kfz0574L{3ŪH#05);+>~)1‹fHa"ȃdYFY߈k'yH=;eOMdI[a%d}J3WT^Kd~×C!L f),} =';iSyYq2?nJj7zmZÕZ$yX窌E TP1J劑Duc j~qFIL;iﵿE~3namXt\CǞO8 JuyK6Fq&̸&vF<ߦCc)P%Q\QjuѤEk|Dn2iF6n,FM?a2Li ˻MPya &Bݟ훀 EP>nt,~ ZﲊH #MyW;aaA;w~7O_C-I;,{ (E6I FU_{&g.ϚQlSm0<,Z☦RU!VB۰n۞nUspsV$7qn9A0̾n㐃ݺ~+6ѠQ$VE=mё6Jѣ]eբ[o>'6iq;W~կ< ޔjf݆k3^R:a>+>:Ty "yw3$\~ixM5xJ-s@`- Zx?#(̓L`uůiwdm]U__{lxĘ㮸6 hߑnOh1,0r˺]ʕ*(/.aZ6UX^Y;({9N|pŠm,j4$VֶZw-# 2z:T3pŁp5Ypjw-0&- bfIN=)HP|b۶_ 0qn>tf`6tP\~C;&=T`Ƀ ,VAgaĨuPG[z0D #IND^=4e:T]$U}0 FNz0T:'dNt, ~t;XtyM>Yu6F}^P!_2 &] C  ,f-JBuDKKrSM9"_m;6\|t3_!LO~V-៩Jl:10*[o^mK=G8Unn֬b(rnHJO:]} "3_<L-כ-XxIo}&]{_NWRQ~͜֏xˆfcƉy*cvr uŊ>X1񺽘f~_̉6lR`ӧbt:TK>3NJDyd<[^^o">#ݗ|/>nzZ Ú :J)S3mZj*uf!pPt)1pXʪӔ`4LQ tׅ XhI3⇽*}^ \X۾==ʏ֏0Fv^E|՞1ر/t/X(-x1oF 3S3^؛֠qLwDfE];E#I°A$Mym vmZ*&R8`8 o.U?kXh4g n1y ߐYOjSj\δن?~P1c$9m4JXH>cp;m%6KQ6v Jՙ~_e5]4o^`b01$lޯ0nߴ8yX)mC}O P,X׬Y0Qzb '~66vbnW0d=U[@2d-EKwPZ%O|U/ɞ`v8pP Ng%l|;uS -\jL&Hൎ Z†`G~ 9WKC%w"dbQ( cLjio?{d Uf'YPD<\U  iU4мICr 5~}yUC2UrC sfN`ȃp5Pj>%Aw'6DI 6Ao.S=_iq@"N~^{u# ̑啤.́ i+~&;=h7m[?ڷi7g`x`\" 0_qNF楐RNxܿ H€6S0=~nQC_PRH763 (RIFu>hٮ̓PF9#+>g9Vc]JX9'v)r-ׁ,i͸M+e?+DٵcP 6>S،{[s,1NOakO G " |Dq y^7bIGu踬m փ@K̗'KUF{t0YTȰIei1f(brPQ=[rʑ#v'w/;rUNQ=$Hx#n8iYwB 0HMN>N>σkuLbۧNOWwC3\U Wn#-ݽAH wٳG;^荖$"X;{0#"Fr8 /9@)S&58mvc9'Dy‰inZtr?~k3g#lH׹DC}W?m"_~͜֏xnj}T;A漺K^Zݯ?}&^>: w:B:!3I/ *ݮp1\Ao- '$m_DǪFOB{8W`F;,o.md経PװnX7ك/yCRGbfmcn8؜H*obɿ8o`s?WΜ[Fuy!;WN 7zXc7 k1AGx)0[vxmYdj5*aܸ&%*Gܡ'S.';Ob/YNn:Q^G0J~rsIZ(RɌubЀT`h _ *3Žt,-XZr-RC\"TIcsJ;Ff蟬\nY_HI@aΓGy NcqNd X`GXȃeϾr9xF`F`F`F`#6g0@"qfӶU"`/➻JDq #GK.!rIv$ܹsI&Xt+ =%!pY]Or/GiI`]_Dgg[KEJ;F`F`F`F5̸5#h.\(׬%_ }@1T#]wAFq5׈[nYHi{eRE\9sj;6ǿآҕ?7@|{+i*pAvUL)a&G%SX*#0#0#0#0.`ƝK#DKbmu]+~ZlQX1c[wl$w YGᖛ &,%xĕ?8ػEPiW۶%JSE/;`F`F`F`R8̸KcRYoA}Z@ʀ$_w'Od6EMm}YqT]^PvUWoϜ_iҦ _*n!~t6' H9oQݩ*Rt((c[N~OJK'GT^e:zc"_O$wӮ>.mٺM\tQ-Կ={HGlYe۹I7?`F[?=p/c>@8b$`DYf'ԻShGܨ+(#G2ecLn1ⶫo(PcCRNVk蔴K:z暫5vvPqӮ&'~?8p@}آN&.M䎞NcG #0#0#0#`]++>$zt̯o;w);lZ sf.m|&J>X</KZ*ϟ#ҦK+%6cRw~[=^z?7@hizhw_yYT{~GƎ(>Y#3͛n]::2i>`0_]qѿo/qui;iWUʖƎ  Zy5|(]Am_vw6n,(QFضYxB1P^o/eסCwArبA]ѳ{WqאUcƊ^`uC=6k<^G}ٽhw^"Ͼ&O?mI{޵:Iх6FMF  ߃B r ]:~<#D͏["+DݫVlWvx~:/:ATX^ذ{1l{;k-iԑl@]֯L"oY}ڬ9o3$(/H5m@Q;fY(zB~Ȗ5+9T:d1,4 K]>L>2עM.\o%K`f'ĄS )XDX q1QtEkpX6֢7}Vn0]-pxw?ӧ;+yYLB?ӡm+ѠӦ2;__I`F`F`F`d@QI.'JpX.=4ӎ'/~97`I-[M/ fѦ͛E0uEdƐɴPmvp;kdbJ`z=R-ns޽VRr$x)0H*t+Plֲm@ h)&_}Ft+_D4{F5m& n䙀ofְg)v욀IDnTiYI]8W&* /ٶ`0x؋OhN4hpEgUk#u ve FCɀCXvָy~q;wNa 6' sSFv!  VoU?ce`[Xa F`F`F`FH.]r))N-P(W(XH*{))DiҨxD1qqEKVߚ>V,-'w.n&9jJBPJB EؼfyNĂ7f)5pk7/Ʃ_H>(|wJOkӾވVxZU_J0laXhbHaj3!eҘyfEuEf2ɐ\Mi-l.Bj`ګx) 7)c 5Uj CT$Vdbv^<ݠ ɓC) 0}HT/Z~Q>ذJbDA“$`tZ*nxjl1e⸰T6yt'ОΝ?'kPu mU8`gϝѐܲiV^;jIe[R#{vYkey hϋyghN3%O(PLJJ;6Q&MQq#՜% ).:HbF~ UD`@z4iR%E*o+Iag7lvf?VXqm L;"B2RUhvVw߷eT{?bneskުEw'jŋ?Oet?.q^) B=AY.snSYVn߹3stnI_#@&ٛ JIָ-RߝiWNa}BM6An&u}$8䕪B{`h+flfF`F`F`3WyqjmJN&ǞJU%}-7MuU$ʡռ*q"0LF }AO6T`̝Pn\1 P#6gT[ubI9l"{N}`:j< mW\QSG LbEEfDҊWI ?/H.$Ꮁ%}Rm^(JlB"Q̙f0G1m(VjTqG T f2$ܱ> { ߣp.TxiXyp0Bd /z?+U06W(C| crC4h6HÑ:X|:i|Xlң jӪ]oPc:{܃IiF,ʝK63uTE&d *QQ/רfϙ9-@+})D?K1*Br̳UvŸÁLu|yBH?oDPUuyAt钶w~ PSLRt&l~zqvx P:]]qxdEcVW|}8l7 7^.Z15D+sn[8 ÝW`l *,c]9=Cl߹v0@ԭg_qWm4_FbfF`F`F`FH^ċ&%tsjavL;|\6L} b~7 KSRadY#wx:or =HI>*ш a-$ɴxŮݿk3 fyM4>D.FE Mi%[6G+#yvw=u~D0 {lۻ-~M}kD~Jza29վm+6>_9\1_ܵ[Ii5e$G!m{c m[? >T&ɴ(/Hd1sa3}4CV4 3wbx<'X8%8q'5ΌcCWqlNɯhYehz?Ep?xG~pTɚL;J  {+ٯP>0#0#0#0If%D@}YN+Z4^ K[Jd {[ejؐcsH(^-3aZMMƝ)AUP=c%AvD1y+x ީrJn:U^)6mAG[B ]:<OS IIP;fouXP- ;Î&;m.ѨiKO nέY^2B:m0e/;ʗ+l2ufPM"G즕k_nVX/KIhXKqOEc"nQUV yR_}FōlǚtIgO,wwLYj@ |%1LF`F`F`FH4&Z1#6?c0D}4I?onm2ȟ N#9j+U-kVӋ6>|X#kdwD߷>rBa܇DCAȟx)pϑWcǏhsQuSoFxK>א.}BZLlpW_{|VM&tF,_0,.~Sg]~ PyAN-.C0bq}77IzһKG+~<ɛ7^Ԋs崗XsΏO8Cʸ <^? <Kb+nnF`F`F`F 0.xsl1D Glbd^O(RNeEɔ6[7D`qW~QAnٜ^<Ϟ=',D |@C]^@DW@>"3``#6ؽLϿlOego<3,` %@O1Vd1z#+ۧyEKV^:t!_Hgt[}Rmk'֭ߠ$Ο?/}*duK}$+ң2ثL;P pMLTV/}O:pBXaA p/"*z>4ZEG](57c$Ǥ^9#HY2Ik#]:1%A _q`F`F`F`DCw=Gm B p܄w;![/Kah'~ګqVw+Sϲ+jfD&({$^,`ܹsɀ}ǟں #}M@{*Z+!Ai)70Y}/Yv!M-/>LW:1jiЄX8?^]B^ iGH G!EJL~to56*:y;q _h˯fLG@z9\J~%ܴ{F`F`F`F≯>C"!PZUACJ ,۰X?) `tn>p65\1dM_=z t@@YJX@,Q{j+(;  BD nZhzV xE2 /o&$9alwf~<| ҲmsdǪ_djԴ4jBĦ{?|بk4vc}qltcѯ\u==\+3k}tZiYxJ64:j?]rzn3M޵S>t#Wهu2mmR39-xOF6}vz]v߷s ,㝠`l\9Vx3~fIڂ&-|$9ҡ5=_fYx/fJy Tcǧ2; Lw='}kAsJ"O}' qm8߁S۬^꘠T9g͑ߜkiެ]Q{d!Ҩaf_]ތ2cd=fVtu{i2 *s2'K=dˁꅋsO>gl|6o?*VKi#G m=H<}t//m9z}.ԿX4;MZQiQlymmrYNWuF9lH;.s>fGlUY2mع{o[l.ob7*#$ٮM"Kʫf&mSYjyF@@@@+9=) }aԌ }9/qccm"fi{YQsgϐ哻~Q,_!4РY3AYs -&xA-!%?. ]WGءf蟖?(!v[AZz$\#Yl@ѷK*r8F}#,s jy4m?Ww}rܺ6An4aO7,^`fi&5cFLC]G׭d^}U{y~v/J-$Ai ?:p@ק|xiKILd\P-BW]N.?ZyhA͒ԠWP"O Ϝ6&3lU t4a|9覅Fm nN6w>`j` d~vt8?k䚕6+D8h݄_4 UǓ@N<?;@@@(}yԕ}gOPoƏsSU'鱶4_l(_VM\m$2M5#KkRuz彿!4g}.G~fC}"OQG}iZjyf|ƣض'}>sBϑwF9\QۢIUfۺyX|@@@2,PREVVR.86w/(wfh3g.sվN]{JLrg@iH4pW}G@@@(%%}Q q:^^ϚcrwaߌE1ͬÏdU6Q+fQ d     @Hw!YJ66SdĨ;mpn{ݻcf1 @@@@@HEH%eQRJ(= R!rLFy dμ,BSF ixi}iݪ\ӣHK{$OgU    eR>i-*)mR.pٽ{TREWO;98~@@@@@()2NˍDDRSSnݳ]@@@@@Lv""@@@@@@ pk"      (@@@@@@%oǚ      D&@.2J*B@@@@@@ yw۱&       @@@@@@H^]v      @d""@@@@@@ pk"      (@@@@@@%oǚ      D&@.2J*B@@@@@@ yw۱&       @@@@@@H^U=bIENDB`sqlfluff-3.4.2/docs/source/guides/contributing/github_fork_status.png000066400000000000000000000327141503426445100262100ustar00rootroot00000000000000PNG  IHDRe7 IDATx]xG,s` >L>r"# r9眳9Gs6IdDQIHWF$@ʮݙuuuϮ= "(6ׁ("`7a%("%ew@PA@IYУ"(vt6APA@IYУ"(vt6APA@IYУ"(vt6APA@IYУ"(vt6APA H9,,c 7('GҭwmCwDz"@q'aat'ݼy> $!;q6EEP-q#刈y˓޽oyXx'10~A(@D nSp 0 }[a^?*"()CO}}c0B?@<%"$JޝKl:>ZF0R%ъ2vXur&_?A`yْb/orkՙ Dm8XPF}7zۂ12CGֺ#(^mb^ үEQMy!J %NJ *Sr-N܏blߣQ#elոeGrmޞ~,M3ߺc7e]-ZNMZunmJE$?X.]z\hϾܾ^+I~iȵy`)*PQܿ+,Ӥ[=zD.uYܽ{oc|2> lydS_QDx-X~?H˗o J,%M)98$|Ǖ(YG@Io`T=z8J x\A%ԣ!88| [wF #*V+3UdѢˣ_"w?LW>J޿v dŚ PfW/>ܾ{6jIk6d%9wLY&wH;:; ;=u{"ߤ[n)S*:k̸t<-1Ң;5f?58mEOqUkcnvYzoz&?l; Z{ . X1 eߞ6>ݯ?\T-_1qH$(6vt]t,ChOo!'OQcg rQu̜=*VN}$o:!ϟpZnm%?En߹Ve+y h՚t;4vuH/WlecKfbe4x$Z~K֤ Uνc~Q^[bx_jՍ9sjԩg ,KVS]4oJz'ㇵ^@N5Vx8Mop"7<žsͺ ,kd)_~MX4oՆZ4xP})kv*^ϒCҏ^D5o^q~6\ lbR.֜?8G k]MFptƀ{ *o# c>Nt_e //6jڜZt.*uҠWQNtY[L-_>vҸ TvSiF[Jm7KWsa+Z2lز8֞y[{A'oGLc̢]soeki,[[,J#!' <Ъo:1\ީ^3w`W9WOSzѦ("Ww3?ʵн^Ҭ#pT\-Z1qޠ$}/fLF})$<\KEKW܅h͆?( {)I:0_b/$9gtA9e i"r|(ԫ)iʒ3._˟9w> );ժCs5I?SFG]f⥔5Gn*Qp8 {$圿C,kh~V7ɺ )S(_b4l(jԬ?Ryl.޽go~dryhؚXTs(Xf+UaVi.Zb-[lR )K s0 }k 9x8iҖnc-;i j4h*Sϐ\I]CρvӶhzrw=Š{0.]?y]T3u1DpU9~;)㾭ļxZp95lٙnݾC/ȩAK|z/8_.@A@ؖ}zfTX!{*bRN1y׋sf-[ׁox|x~OX9.Q=#u9xO&=ҘIhNaw4bMJozM;Pnr&\b:ڎA0i yGw={I ϸ\E-_iQR+?BJ-UQۨ?6=!"VMi%FjԼ==yjˢmp\1?~m8yԖ~:t6r(;p0-ⰰc~k(UL˛ Xȋi}O>+mb*_$0ŏ6a%K6jڒk^hR.Kv: z_r([UԱ *Uͅ._,l 5P^C+c ^2~ɳNć^ڜ@Gۑ2nP^dr-ʾdbKGypLp_/Dm[aҭW?~DѪmڄঀpS-smLKBʿ(̋XnQ0)-IHX3Xt/9K)QO,凜iW;70hN*_Qi('MN,ܙ; 3{skϮs^;Lp[Ċ] 1`8^L ) _y?,aa[TQ+&.}ũ38z->`! =ud_$ ~ڏ_m3Y1ԉsXLoS}L0>M9X`{[&x0&ѺK?֎ZvK >v?|؛9ĺF0'ljPlG -fݨEG~EwŔ+x c"ǯa2J0Ql !CB9/Ey.7%y|2K5nىASt>ʎ׬O:EׯFWp0~' LkAI{@+`kA~$B:qH7 vJB̿³,mlq zɜyxX/;J8K:z}$pp9ҝ{~#ЗD0*c%|_VIW͏ʉ (b3&e#~;Rx%Z1 zL븶ؓs%8$Ե0\׍5{h_("` l@'"%e@P%e =QE()۾"()P"(G@I}-PE@ Hv2,Q TTTYޟ}tG?@I#>t࿀ ֿҿҿSPč#"">"Wk.č}}&d{u^Ձwҁw'eX!?N@l: _2C]:::o:wR QRVE7Er4,N:IQ^UFUkWu@ut@Iَ:C-Dgٞu@IYIY$;%e; {mj]$()+)::`G:lGHX"l:V递u=6.UFJRP#PRPK$a,YqgPRVRV+Iu@ut@Iَ:ÞGomZ /Wb~J o!0c: {'<($e#b]y o&&"! 3 GD "hg+\y|m$DpnZgn6[jmH̟޻OczK^>~-OO}|}GaQv*<<<Vm&<)K cO"?S_S?R23I?x $7u9}].irF"wAy cEx(o]F0 Q4xWmν|rmܷnuϑE:a)ceM]>úkšH~AMО}E~Np;}%;˵q`x)g1i9Թ[OJ昊Α+/+e#97)Qٸy H2f:({氜~멠uFW`.Kv>s9 1yRqѕEщ7}l$ڲ+/4ęa4}kn\WPԗpQrmlT>uڝtך`ctqΏx\ҡK7.ˏ|yNzg~zO.\;w#Ǩl*S|taڽg/:r2}&%8ؽ|Ȥ'hGt Ebj[7&5\=vwu_^'IY: )_|֭/^GRiɲw-\0j5kK&t)#t\H24rx:Nu4[%+ w-[9J5h7K߁rLw߷\#<._ɩV=j۾ᡆ]jݮը]MʏtX9rB.еG/zbHq4oJIDATKevUժۀvm v`Ѽu[Sq^|߸&Lq|n~ϣǜ9j֌8עs\D v7]r] H2|$ v"Фәdcы@r֮@/_Sy`aع+~ Ss-Ef SLg>TOЙ*Vqq?i }Mf#`IrGGI)P6oE/)F?,X_H-Z /%Z2>rO#\z x2Jt<3W`=}7??tY7`r}QTv]Ê+e|K0hp].>jfϙGV#Fqf TչYO9CKW A%[Ի߀ ]KGې(tҥg_H>+`aV8sN~*^NF:v|QrTtNo ?ԏG`&YBeڼe#T km 48 |L7H;>5Rn嚵:]zՅ8DxoR޺}/z`Yz ?h_2Hϥ_ S8RYI_RxiDzsA6?ky +$`e6Sfjs9#; l?[dm;$ucHgƬ9_ B<" O iBʰEVZ&i>OgX@wEQbUnmWjи)u˰ϝHJ8j5j.&ZQ^} [XV ,\71˜=,ZE?cWIxqOL:E9@"a:>Iv_xAއgϛϻ/0  ]/\088IV9.Q-ȲcHx$/2~K"5HG܃XG%J\kb_z+W$A_ "ƹ,\ F cf_`>]iq0{3kB)ww>¥2` (?:ō\ڍ[*fiն=/ R1B؆k8,Z\Y>#F4qa!0>_F&ױ(12,ek.YIʯp@9'd'}2=EA)S;K kbѥ} ܚ-tׯߠ}ޘ \^ԃ(|^_]ψ\ 1fR7p},9&)V5hJ@ʰ,QDXAq+j6d90UtR`;y~Utg|]Xx]I?1VQEK8bQQ] cƽ]."ej!"G[`YG_ {s:|zq#FX"!f*R8 ;p_NqBbrmTzqv=j٩4!eL+:=:EO-X5 wO?/8ͺfأo-#;{9S o2fawӉShwBrZ|R7^a?cb)#R%˔5 a3#GիEkߎ]enڳW.֨k>N"qM,3R0@BF`cHQΌ&y4LAAY1g_0}2<ȋ ؝`bbAp{\l9se &X_شyKz[ԋAzn={?-9$M $|a2a2}7lĻʜa}tB_z D= ev8EeLC36$K0^UsA3_AX xʓ/lT6^Q٩| ǎx'!nAx1rniѲ5_~=6:r\m#o]EEcEغR~py vD2@5{Nc;`::%q2͕k#3{ViBp-`5:=:EOwBN5kKQCc.ckCFad&N,M}i-w1[(?e:լE#hАaTly=8n@E?k 'ɒȳr⳴c؂-;!xfVyq0j#4Bd<ۂ AfΚs8 3h{b^TiYKˎ"1Js;?ڞP@P0[c L=2@^(1a+D" >:!uc:RNC~=ҟ3򀔞r1҅ !y_DE~0HGsҶ =x_:-8!+b>@ . _>@!#d_\OB,}d`u mpFsb5PXEi!- 24̺גTqDYq8=>ӂxǎ@]=J k=K(Y_ϝ ƍ!AAv m@U.ȳ<0$9>rm<#3ԍ>B^sN. b3sE 2탔0(Q\+8> XtsE3H~6K%N"MD]䕣hn;ہ/ KI|te~p$h^#|y/kuAѬ?R˜_Ҥ^ Q⭱tEq#Mڎirn}?2{k8s8K=~s9[9]'uݜ<#mA>Iù\wd뺭(H>Aʉҩj_i_Y뀒ƂV^ +u@IYIYIYu@ut@Iَ:8=5:n:V递uZfQ(^_QJRP#PR+zOjͪ()+)::`G:lGŻYW%e%eTTH3ޓZJJj%ؑ)bvtc::x)^q'ePPPxoxwRo~H_i'w'b=2~<ן<7д{@Hyuy=|Č+C?{@H2A| #ĝ?G1PPPx/xRri Q TTTGBzTE@3JqN *"()?Z"(qF@I9iAE@P%TkTE ():-(@#j"%8CE@ZQPE N8"׏b:::`sx10̋IENDB`sqlfluff-3.4.2/docs/source/guides/contributing/github_reactions.png000066400000000000000000000144571503426445100256370ustar00rootroot00000000000000PNG  IHDRT IDATx]xU!-D` t4H*E@,X須"&B 5l6fS6-ͦ,1kv>ܹms9wv@@UCөcUt.n]|-,d, :ѱ$ 6$1$dRJnSj( h-ݩ) )5Uz[zT[Ĕ*= H-=ZKw*A H-AbJM\RY"dIq.Z~CCG-nI333P(y\TQHR(X)))yʔV` qۑV6 Lm:tϡ>tAh߱k.2{+WW߻Uyb:1kxޱ2huвm; 5d\7o '捘S҈;yk;r-4j*8bjPk;v",&|8Uj7]7AOTttqZ<ֵk!yv˸t݇Z %W4.p柳LwFnHݜf I_ ^:|Ĕ5o'hߩ麸'NZHVkě2͘ϓ}y$u zNu(G#ϗ#. 17cmua ] iJ/ook7}.ƌRp͜FfEݠ1G԰1yyY='y3g I'NH7A0|h1:lJWy6=m1˿LSf's30EAg>7itTybђϸ/Cqn&N#G#ES'Og/?I':vxoV}a޾iŭ t5?^h֪MrjxGo˚V>4ufނELz0}.<gDVY'M_6ln㟳pn^ZC}1e ^H4Aίq^BG.czHæ->] Qc>QDP0pwݛ#],=cARV34PI2 $a)4/A: hLwĻQ3sq>ѵAÆwFdz MB{L@:N^ KM_noӝϜ=G,mӾ<7`)!>Kπ9`1tjV/?]j6 C݆MjϹO61E"SRf/2Mn;42p @bbacѻ aiFҭK7Pf=6R\Zn3""W턁CLU$v9WdH{%E@ 7ϝKAW]vP0JAI3_!^)P/ܬu;c¤)F<)eaYf?"9)cCH?aV6L*D$u<&JY~f=B9cx_ӌq?dur29b3f5fޓmIS=z|ar/0!i C=z_߮. @0]CT uéa/рΔ;_$idrD~gqLXATHRD@ML@C8E8>+ b,ՙ遫TT:zDl4Ki\KAaʞ L1pl4 pi. &ܲ&%JXYp%J-It\kJ[% 0 PQP~i[)@H#Wdi^v+qՀJ tt($We˧/)ϚL;,Mз&i咾a֠]W $ka\D,dgC>t$"nZc>mʔVȽF!w;0ȩn5FR4%H}A b}YB*44ͥai"h/"; v?L(%oR ~̞i߾@6y)֒\-Dy;g%LǨ-7X0%a D|Wҕ3R'*!5+e"p:8 +'LɀP V)R%-_AQY4KLj2p~4 'hh¡ lDRߩt䧚NfXrہOS&>·\(7[6 *} cD5r,h.%mlZ^)Z<.QF]6e'|:P)3T=Biky1K] W" A?.*n;}b3`aCpj̟ @~!@ׁ>}OO`1>0#5!SiALIPH*W`2:% y `|jj  A {AN}^󷘱.dxq8 F!Tj4$y-щZ#pçςB^xЮU @R=^}!qKʞ(q ڬm%Zͽ/7pQ "K:H}${7LJ2ቫ#ҥ`ϩC:%F|\gpjCӏQeP,y`4qDp} :VWJ t hg^l 3bܫ~ x q>iȅ- %U `Rb*g0aZ}]W+I ӥ?^#ƝIW-9Evxuaז컠1mWk`) .퀗^1(m Xo i8-۹Œ3(܋Daq8:ì \e(4@fO+Lh޽]8p0Ə@ሎ7S+>EUfNq82d|6naFATmfgk{UxWz.@f`Km&F!76OIڟ֚@K]%^;#03X[?dž67x z剐/EM LC4Ҧo|%8 fuv"a!4,)H]]g1_@Vu*,5ef-ÞK* oAcQx;wfTg(7{xC)D$j'‹ cG 7Q%˭꧔ tƥ? LD]AIAf-0MpK֞\|"x]P%;j28,3p%7lYA@2Pe%m#ךCZúqqD|JZu\**h-BQeR%xf t*w;Lw? 4n6Y@^sj[)"LR7H.= Zm׮%.22\.[zpY$q>GtbQwRُ{;Ƹnq>%\?AX;bPii޴i `jCelz"AT&oF5 <.M {>8 [v]K,3xSg1xc\CgD2M|P OY6=pIGOT&( 377 HTA:+5`Cu:0_aK#6YIZ<"`s IQ2>(^zrҮ;:]wr֒h9萂DbROFyGDJ!P8$N)?JUMV!T0%<UD潥{_:+t?cKnIENDB`sqlfluff-3.4.2/docs/source/guides/contributing/plugins.rst000066400000000000000000000076661503426445100240170ustar00rootroot00000000000000.. _developingpluginsref: Developing Plugins ================== *SQLFluff* is extensible through "plugins". We use the `pluggy library`_ to make linting Rules pluggable, which enable users to implement rules that are just too "organization specific" to be shared, or too platform specific to be included in the core library. .. note:: We recommend that the module in a plugin which defines all of the hook implementations (anything using the ``@hookimpl`` decorator) must be able to fully import before any rule implementations are imported. More specifically, SQLFluff must be able to both *import* **and** *run* any implementations of ``get_configs_info()`` before any plugin rules (i.e. any derivatives of :py:class:`~sqlfluff.core.rules.base.BaseRule`) are *imported*. Because of this, we recommend that rules are defined in a separate module to the root of the plugin and then only imported within the ``get_rules()`` method. Importing in the main body of the module was previously our recommendation and so may be the case for versions of some plugins. If one of your plugins does use imports in this way, a warning will be presented, recommending that you update your plugin. .. code-block:: python :emphasize-lines: 7,8 # The root module will need to import `hookimpl`, but # should not yet import the rule definitions for the plugin. from sqlfluff.core.plugin import hookimpl @hookimpl def get_rules(): # Rules should be imported within the `get_rules` method instead from my_plugin.rules import MyRule return [MyRule] .. _`pluggy library`: https://pluggy.readthedocs.io/en/latest/ Creating a plugin ----------------- We have an example plugin in `sqlfluff/plugins/sqlfluff-plugin-example`_ which you can use as a template for rules, or the `sqlfluff/plugins/sqlfluff-templater-dbt`_ which you can use as a template for templater plugins. Few things to note about plugins: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Currently, only Rules and Templaters can be added through plugins. Over time we expect more elements of SQLFluff will be extensible with plugins. Each plugin can implement multiple Rules or Templaters. We recommend that the name of a plugin should start with *"sqlfluff-"* to be clear on the purpose of your plugin. A plugin may need to include a default configuration if its rules are configurable: use plugin default configurations **only for that reason**! We advise against overwriting core configurations by using a default plugin configuration, as there is no mechanism in place to enforce precedence between the core library configs and plugin configs, and multiple plugins could clash. A plugin Rule class name should have the structure: "Rule_PluginName_L000". The 'L' can be any letter and is meant to categorize rules; you could use the letter 'S' to denote rules that enforce security checks for example. An important thing to note when running custom implemented rules: Run ``pip install -e .``, inside the plugin folder so custom rules in linting are included. A plugin Rule code includes the PluginName, so a rule "Rule_L000" in core will have code "L000", while "Rule_PluginName_L000" will have code "PluginName_L000". Codes are used to display errors, they are also used as configuration keys. We make it easy for plugin developers to test their rules by exposing a testing library in *sqlfluff.utils.testing*. .. _`sqlfluff/plugins/sqlfluff-plugin-example`: https://github.com/sqlfluff/sqlfluff/tree/main/plugins/sqlfluff-plugin-example .. _`sqlfluff/plugins/sqlfluff-templater-dbt`: https://github.com/sqlfluff/sqlfluff/tree/main/plugins/sqlfluff-templater-dbt Giving feedback --------------- Would you like to have other parts of *SQLFluff* be "pluggable"? Tell us about it in a `GitHub issue`_ 😄. .. _`GitHub issue`: https://github.com/sqlfluff/sqlfluff/issues/new?assignees=&labels=enhancement&template=enhancement.md sqlfluff-3.4.2/docs/source/guides/contributing/rules.rst000066400000000000000000000077131503426445100234610ustar00rootroot00000000000000.. _developingrulesref: Developing Rules ================ `Rules` in `SQLFluff` are implemented as classes inheriting from ``BaseRule``. SQLFluff crawls through the parse tree of a SQL file, calling the rule's ``_eval()`` function for each segment in the tree. For many rules, this allows the rule code to be really streamlined and only contain the logic for the rule itself, with all the other mechanics abstracted away. Running Tests ------------- The majority of the test cases for most bundled rules are *"yaml test cases"*, i.e. test cases defined in `yaml`_ files. You can find those `yaml fixtures on github`_. While this provides a very simple way to *write* tests, it can be occasionally tedious to *run* specific tests. Within either a `tox` environment or `virtualenv` (as described in the `contributing.md`_ file), you can either run all of the rule yaml tests with: .. code-block:: sh pytest test/rules/yaml_test_cases_test.py -vv ...or to just run tests for a specific rule, there are two options for a syntax to select only those tests: .. code-block:: sh pytest -vv test/rules/ -k RF01 The :code:`-k` option simply searches for the content of the argument being in the name of the test, which will match any single or combo tests for that rule. By convention, any test cases for a rule should include the code for that rule. .. _`yaml`: https://yaml.org/ .. _`yaml fixtures on github`: https://github.com/sqlfluff/sqlfluff/tree/main/test/fixtures/rules/std_rule_cases .. _`contributing.md`: https://github.com/sqlfluff/sqlfluff/blob/main/CONTRIBUTING.md Traversal Options ----------------- ``recurse_into`` ^^^^^^^^^^^^^^^^ Some rules are a poor fit for the simple traversal pattern described above. Typical reasons include: * The rule only looks at a small portion of the file (e.g. the beginning or end). * The rule needs to traverse the parse tree in a non-standard way. These rules can override ``BaseRule``'s ``recurse_into`` field, setting it to ``False``. For these rules ``False``, ``_eval()`` is only called *once*, with the root segment of the tree. This can be much more efficient, especially on large files. For example, see rules ``LT13`` and ``LT12`` , which only look at the beginning or end of the file, respectively. ``_works_on_unparsable`` ^^^^^^^^^^^^^^^^^^^^^^^^ By default, `SQLFluff` calls ``_eval()`` for all segments, even "unparsable" segments, i.e. segments that didn't match the parsing rules in the dialect. This causes issues for some rules. If so, setting ``_works_on_unparsable`` to ``False`` tells SQLFluff not to call ``_eval()`` for unparsable segments and their descendants. Performance-related Options --------------------------- These are other fields on ``BaseRule``. Rules can override them. ``needs_raw_stack`` ^^^^^^^^^^^^^^^^^^^ ``needs_raw_stack`` defaults to ``False``. Some rules use ``RuleContext.raw_stack`` property to access earlier segments in the traversal. This can be useful, but it adds significant overhead to the linting process. For this reason, it is disabled by default. ``lint_phase`` ^^^^^^^^^^^^^^ There are two phases of rule running. 1. The ``main`` phase is appropriate for most rules. These rules are assumed to interact and potentially cause a cascade of fixes requiring multiple passes. These rules run the `runaway_limit` number of times (default 10). 2. The ``post`` phase is for post-processing rules, not expected to trigger any downstream rules, e.g. capitalization fixes. They are run in a post-processing loop at the end. This loop is identical to the ``main`` loop, but is only run 2 times at the end (once to fix, and once again to confirm no remaining issues). The two phases add complexity, but they also improve performance by allowing SQLFluff to run fewer rules during the ``main`` phase, which often runs several times. NOTE: ``post`` rules also run on the *first* pass of the ``main`` phase so that any issues they find will be presented in the list of issues output by ``sqlfluff fix`` and ``sqlfluff lint``. sqlfluff-3.4.2/docs/source/guides/index.rst000066400000000000000000000020061503426445100207150ustar00rootroot00000000000000.. _guides: Guides & How-tos ================ This section is of short guides and articles is meant to be read alongside the rest of the documentation which is more reference-oriented. Setting up SQLFluff ------------------- .. toctree:: :maxdepth: 1 setup/teamrollout setup/developing_custom_rules Troubleshooting SQLFluff ------------------------ .. toctree:: :maxdepth: 1 troubleshooting/how_to .. _development: Contributing to SQLFluff ------------------------ It is recommended that the following is read in conjunction with exploring the codebase. `dialect_ansi.py`_ in particular is helpful to understand the recursive structure of segments and grammars. You may also need to reference the :ref:`internal_api_docs`. .. _`dialect_ansi.py`: https://github.com/sqlfluff/sqlfluff/blob/main/src/sqlfluff/dialects/dialect_ansi.py .. toctree:: :maxdepth: 1 contributing/architecture contributing/git contributing/dialect contributing/rules contributing/plugins contributing/docs sqlfluff-3.4.2/docs/source/guides/setup/000077500000000000000000000000001503426445100202165ustar00rootroot00000000000000sqlfluff-3.4.2/docs/source/guides/setup/developing_custom_rules.rst000066400000000000000000000145331503426445100257160ustar00rootroot00000000000000.. _developing_custom_rules: Developing Custom Rules ======================= It's quite common to have organisation-, or project-specific norms and conventions you might want to enforce using SQLFluff. With a little bit of python knowledge this is very achievable with SQLFluff, and there's a plugin architecture to support that. This guide should be read alongside the code for the `SQLFluff example plugin`_ and the more technical documentation for :ref:`developingpluginsref`. What Plugin do I need? ---------------------- When thinking about developing a rule, the following thought process will help you decide what to develop: 1. When do I want this rule to show a warning, when should it definitely **not** show one? What information do I need when evaluating whether a the rule has been followed or not? This information will tell you about the two important *locations* in the parse tree which will become important. * The *trigger* location: i.e. when should the rule be *called* for evaluation. e.g. :sqlfluff:ref:`CP01` triggers on keywords, because it only needs the information about that keyword to run, but :sqlfluff:ref:`LT08` triggers on ``WITH`` statements even though it's only interested in specific pieces of whitespace, because it needs the full context of the statement to evaluate. You may with to examine the parse structure of some example queries you'd want to handle by using ``sqlfluff parse my_file.sql`` to identify the right segment. This is then specified using the ``crawl_behaviour`` attribute on the rule. * The *anchor* location: i.e. which position will show up in the CLI readout back to the user. To continue the example of above, while :sqlfluff:ref:`LT08` *triggers* on a ``WITH`` statement, it *anchors* on a more specific segment just after where it expected whitespace. It specifies this using the ``anchor`` argument to the :py:class:`~sqlfluff.core.rules.base.LintResult` object. 2. How should the rule evaluate and should I implement an auto-fix? For the simplest rules, it the logic to evaluate whether there's an issue can be *very simple*. For example in the `SQLFluff example plugin`_, we are just checking the name of an element isn't in a configured list. Typically we recommend that for organisation-specific rules, **KEEP IT** **SIMPLE**. Some of the rules bundled with SQLFluff contain a lot of complexity for handling how to automatically fix lots of edge cases, but for your organisation it's probably not worth the overhead unless you're a **very big team** or **come across a huge amount of poorly** **formatted SQL**. * Consider the information not just to *trigger*, but also whether a custom error message would be appropriate and how to get the information to construct that too. The default error message will be the first line of the rule docstring_. Custom messages can be configured by setting the ``description`` argument of the :py:class:`~sqlfluff.core.rules.base.LintResult` object. * Do use the existing SQLFluff core rules as examples of what is possible and how to achieve various things - but remember that many of them implement a level of complexity and edge case handling which may not be necessary for your organisation. 3. How am I going to roll out my rule to the team? Thinking through this aspect of rule development is just as important as the technical aspect. Spending a lot of time on rule development for it to be rejected by the end users of it is both a waste of time and also counterproductive. * Consider manually fixing any pre-existing issues in your project which would trigger the rule before rollout. * Seek consensus on how strictly the rule will be enforced and what the step by step pathway is to strict enforcement. * Consider *beta-testing* your new rule with a smaller group of users who are more engaged with SQLFluff or code quality in general. .. _docstring: https://en.wikipedia.org/wiki/Docstring Plugin Discovery ---------------- One of most common questions asked with respect to custom plugins is *discovery*, or *"how do I tell SQLFluff where my plugin is"*. SQLFluff uses pluggy_ as it's plugin architecture (developed by the folks at pytest_). Pluggy uses the python packaging metadata for plugin discovery. This means that **your plugin must be installed as a python package for discovery**. Specifically, it must define an `entry point`_ for SQLFluff. When SQLFluff runs, it inspects installed python packages for this entry point and then can run any which define one. For example you'll see in the `SQLFluff example plugin`_ that the ``pyproject.toml`` file has the following section: .. code-block:: toml [project.entry-points.sqlfluff] # Change this name in your plugin, e.g. company name or plugin purpose. sqlfluff_example = "sqlfluff_plugin_example" You can find equivalent examples for ``setup.cfg`` and ``setup.py`` in the python docs for `entry point`_. This information is registered *on install* of your plugin, (i.e. when running `pip install`, or equivalent if you're using a different package manager) so if you change it later, you may need to re-install your plugin. You can test whether your rule has been successfully discovered by running ``sqlfluff rules`` and reviewing whether your new rule has been included in the readout. .. note:: If you're struggling with rule discovery, **use the example plugin**. It can be much easier to take a known working example and then modify from there: 1. Copy the code from the `SQLFluff example plugin`_ into a local folder. 2. Run `pip install -e /path/to/where/you/put/it`. 3. Run `sqlfluff rules`, to confirm that the example plugin is present to demonstrate to yourself that discovery is functional. 4. Then edit the example plugin to do what you want now that discovery isn't an issue. You may have to re-run `pip install ...` if you change anything in the rule metadata (like the entry point, filenames or plugin location). .. _pluggy: https://pluggy.readthedocs.io/en/latest/ .. _pytest: https://docs.pytest.org/en/stable/ .. _`entry point`: https://setuptools.pypa.io/en/stable/userguide/entry_point.html .. _`SQLFluff example plugin`: https://github.com/sqlfluff/sqlfluff/tree/main/plugins/sqlfluff-plugin-example sqlfluff-3.4.2/docs/source/guides/setup/teamrollout.rst000066400000000000000000000126671503426445100233330ustar00rootroot00000000000000.. _rolloutref: Rolling out SQLFluff with a new team ==================================== Rolling out SQLFluff, like rolling out any other linter or style guide, is not just about the *technical* rollout, but also how you introduce the tool to the team and organisation around you. *The effect of SQLFluff should be to change your behaviours, not* *just your SQL*. With that in mind, it's worth reminding ourselves what we're trying to achieve with a tool like this. A set of potential success criteria might be: #. **Faster comprehension and collaboration** by the team on a shared codebase. This includes more effective (and more enjoyable) code review on top of code which is easy to review and build upon. #. **Easier and faster onboarding** for new team members. By adopting a style which is clean and *consistent with other organisations* we make it easier for new people to join the team. #. **Improved adoption of shared SQL** from other sources. If the SQL found in open source projects is easy to read and *looks familiar* then you're more likely to use it. This means more reusable code across the industry. #. **Productive discussions around style**. By defining your organisation's style guide in code, it means you can version control it, discuss changes and ultimately give a concrete output to discussions over style. *You like leading commas? Make a PR to .sqlfluff and let's* *discuss with the team what the implications would be*. Consider which of these success measures is most important and most desirable for your team. *Write that down*. The following steps are a guide, which you should adapt to your organisation, and in particular its level of data maturity. 1. Assess the situation ----------------------- This step is done by you, or a small group of people who *already* *think that linting is a good idea*. * Run ``sqlfluff lint`` on your project with the stock configuration to find out how things work *out of the box*. * Set up your :ref:`config` so that things run and that you can get a readout of the errors which you would want the team to see and *not the ones you don't*. Great tools for this are to use :ref:`sqlfluffignore`, ``--exclude-rules`` or ``--ignore`` in the CLI (see :ref:`cliref`). * Identify which areas of your project are the worst and which are the tidiest. In particular, any areas which are particularly tidy already will be particularly useful in the next phase. 2. Make a plan -------------- There are three sensible rollout phases: #. **Pre CI/CD**. #. **Soft CI/CD** (warnings but no strict fails). #. **Hard CI/CD** (violations mean deployments fail). In each of these phases you have three levers to play with: #. Areas of the project in which to apply rules. #. Depth of rules enforced (this might also include whether to ignore parsing errors or not). #. Whether to just lint changes (:ref:`diff-quality`), or to lint all the existing code as well. Work out a sensible roadmap of how hard you want to go in each phase. Be clear who is responsible for changes at each phase. An example plan might look like this: #. **Pre CI/CD** we get the tidiest area of a project to a stage that it fully passes the rules we eventually want to enforce. The core project team will do this. Liberal use of ``sqlfluff fix`` can be a lifesaver in this phase. #. **Soft CI/CD** is applied to the whole project, team members are encouraged to write tidy SQL, but not *required* to. #. **Hard CI/CD** is applied to the tidy areas of the project and also to any changes to the whole project. Anyone making changes is *required* to write SQL which passes check. #. **Hard CI/CD** is applied to the whole project on not just changes, with only a few particularly problematic files explicitly ignored using :ref:`sqlfluffignore`. 3. Build the need ----------------- Bring your team together to introduce both linting as a concept and also SQLFluff as a tool. At this stage it's **really important** **that the team understand *why* this is a good thing**. Consider whether to discuss the whole plan from step 2, or whether to only talk about the first few steps. Aim to make this an empowering experience that everyone can get involved with rather than *another piece of admin they need to do*. At this stage, you might also want to consider other tools in the SQLFluff ecosystem such as the :ref:`SQLFluff pre-commit hook ` and the `SQLFluff VSCode plugin`_ or `SQLFluff online formatter`_. .. _`SQLFluff VSCode plugin`: https://github.com/sqlfluff/vscode-sqlfluff .. _`SQLFluff online formatter`: https://online.sqlfluff.com/ 4. Do, Review & Reassess ------------------------ Once the plan is in motion, make sure to start putting in place norms and rituals around how you change the rules. In particular: * How would someone suggest changing the style guide or enabling/disabling a rule? * How do we assess whether the changes are working for the team or whether some are creating unnecessary stress? It's normal for your usage of tools like SQLFluff to change and evolve over time. It's important to expect this change in advance, and welcome it when it happens. Always make sure you're driving toward the success measures you decided up front, rather than just resisting the change. 5. Spread the word 😁 --------------------- Did it work? If so, spread the word. Tell a friend about SQLFluff. If you're lucky they might share your views on comma placement 🤷‍♀️. sqlfluff-3.4.2/docs/source/guides/troubleshooting/000077500000000000000000000000001503426445100223055ustar00rootroot00000000000000sqlfluff-3.4.2/docs/source/guides/troubleshooting/how_to.rst000066400000000000000000000152311503426445100243400ustar00rootroot00000000000000How to Troubleshoot SQLFluff ============================ It can at times be tricky to troubleshoot SQLFluff as it exists within an ecosystem of other tools, and can be deployed in wide range of ways. This step by step guide can help you narrow down what's likely going wrong and point you toward the swiftest resolution. 1. Common Errors ---------------- There are a few error messages you may get which have relatively straightforward resolution paths. Parsing Errors ^^^^^^^^^^^^^^ SQLFluff needs to be able to parse your SQL to understand it's structure. That means if it fails to parse the SQL it will give you an error message. The intent is that if SQLFluff cannot parse the SQL, then it should mean the SQL is also invalid and help you understand where and why. For example, this is a simple query which is not valid SQL: .. code-block:: sql select 1 2 3 from my_table When running ``sqlfluff lint`` or ``sqlfluff parse`` we get the following error message:: ==== parsing violations ==== L: 1 | P: 10 | PRS | Line 1, Position 10: Found unparsable section: '2 3' Furthermore if we look at the full parsing output we can see an unparsable section in the parse tree: .. code-block:: :emphasize-lines: 12,13,14,15 [L: 1, P: 1] |file: [L: 1, P: 1] | statement: [L: 1, P: 1] | select_statement: [L: 1, P: 1] | select_clause: [L: 1, P: 1] | keyword: 'select' [L: 1, P: 7] | [META] indent: [L: 1, P: 7] | whitespace: ' ' [L: 1, P: 8] | select_clause_element: [L: 1, P: 8] | numeric_literal: '1' [L: 1, P: 9] | [META] dedent: [L: 1, P: 9] | whitespace: ' ' [L: 1, P: 10] | unparsable: !! Expected: 'Nothing here.' [L: 1, P: 10] | numeric_literal: '2' [L: 1, P: 11] | whitespace: ' ' [L: 1, P: 12] | numeric_literal: '3' [L: 1, P: 13] | newline: '\n' [L: 2, P: 1] | from_clause: [L: 2, P: 1] | keyword: 'from' [L: 2, P: 5] | whitespace: ' ' [L: 2, P: 6] | from_expression: [L: 2, P: 6] | [META] indent: [L: 2, P: 6] | from_expression_element: [L: 2, P: 6] | table_expression: [L: 2, P: 6] | table_reference: [L: 2, P: 6] | naked_identifier: 'my_table' [L: 2, P: 14] | [META] dedent: [L: 2, P: 14] | newline: '\n' [L: 3, P: 1] | [META] end_of_file: SQLFluff maintains it's own version of each SQL dialect, and this may not be exhaustive for some of the dialects which are newer to SQLFluff or which are in very active development themselves. This means in some scenarios you may find a query which runs fine in your environment, but cannot be parsed by SQLFluff. This is not a *"bug"* per-se, but is an indicator of a gap in the SQLFluff dialect. Many of the issues raised on GitHub relate to parsing errors like this, but it's also a great way to support the project if you feel able to contribute a dialect improvement yourself. We have a short guide on :ref:`contributing_dialect_changes` to walk you through the process. In the short term you can also ignore specific files from your overall project so that this specific file doesn't become a blocker for the rest. See :ref:`ignoreconfig`. Configuration Issues ^^^^^^^^^^^^^^^^^^^^ If you're getting ether unexpected behaviour with your config, or errors because config values haven't been set correctly, it's often due to config file discovery (i.e. whether SQLFluff can find your config file, and what order it's combining config files). For a more general guide to this topic see :ref:`setting_config`. To help troubleshoot issues, if you run ``sqlfluff`` with a more verbose logging setting (e.g. ``sqlfluff lint /my/model.sql -v``, or ``-vv``, or ``-vvvvvv``) you'll get a readout of the root config that SQLFluff is using. This can help debug which values are being used. 2. Isolating SQLFluff --------------------- If you're still getting strange errors, then the next most useful thing you can do, both to help narrow down the cause, but also to assist with fixing a bug if you have found one, is to isolate SQLFluff from any other tools you're using in parallel: 1. If you're using SQLFluff with the :ref:`dbt_templater`, then try and recreate the error with the :ref:`jinja_templater` to remove the influence of ``dbt`` and any database connection related issues. 2. If you're getting an error in a remote CI suite (for example on GitHub actions, or a server like Jenkins), try and recreate the issue locally on your machine using the same tools. 3. If you're :ref:`using-pre-commit`, :ref:`diff-quality` or the `VSCode extension`_ try to recreate the issue by running the SQLFluff :ref:`cliref` directly. Often this can make debugging significantly easier because some of these tools hide some of the error messages which SQLFluff gives the user to help debugging errors. .. _`VSCode extension`: https://github.com/sqlfluff/vscode-sqlfluff 3. Minimise the Query --------------------- Often SQL scripts can get very long, and if you're getting an error on a very long script, then it can be extremely difficult to work out what the issue is. To assist with this we recommend iteratively cutting down the file (or alternatively, iteratively building a file back up) until you have the smallest file which still exhibits the issue. Often after this step, the issue can become obvious. 1. If your file has multiple statements in it (i.e. statements separated by ``;``), then remove ones until SQLFluff no longer shows the issue. When you get to that point, add the offending one back in and remove all the others. 2. Simplify individual statements. For example in a ``SELECT`` statement, if you suspect the issue is coming from a particular column, then remove the others, or remove CTEs, until you've got the simplest query which still shows the issue. sqlfluff-3.4.2/docs/source/index.rst000066400000000000000000000054031503426445100174410ustar00rootroot00000000000000📜 The SQL Linter for Humans ============================ Bored of not having a good SQL linter that works with whichever dialect you're working with? Fluff is an extensible and modular linter designed to help you write good SQL and catch errors and bad SQL before it hits your database. Notable releases: * **1.0.x**: First *stable* release, no major changes to take advantage of a point of relative stability. * **2.0.x**: Recode of rules, whitespace fixing consolidation, :code:`sqlfluff format` and removal of support for dbt versions pre `1.1`. Note, that this release brings with it some breaking changes to rule coding and configuration, see :ref:`upgrading_2_0`. * **3.0.x**: :code:`sqlfluff fix` now defaults to *not* asking for confirmation and the `--force` option was removed. Richer information returned by the :code:`sqlfluff lint` command (although in a different structure to previous versions). See :ref:`upgrading_3_0`. For more detail on other releases, see our :ref:`releasenotes`. Want to see where and how people are using SQLFluff in their projects? Head over to :ref:`inthewildref` for inspiration. Getting Started ^^^^^^^^^^^^^^^ To get started just install the package, make a sql file and then run SQLFluff and point it at the file. For more details or if you don't have python or pip already installed see :ref:`gettingstartedref`. .. code-block:: text $ pip install sqlfluff $ echo " SELECT a + b FROM tbl; " > test.sql $ sqlfluff lint test.sql --dialect ansi == [test.sql] FAIL L: 1 | P: 1 | LT01 | Expected only single space before 'SELECT' keyword. | Found ' '. [layout.spacing] L: 1 | P: 1 | LT02 | First line should not be indented. | [layout.indent] L: 1 | P: 1 | LT13 | Files must not begin with newlines or whitespace. | [layout.start_of_file] L: 1 | P: 11 | LT01 | Expected only single space before binary operator '+'. | Found ' '. [layout.spacing] L: 1 | P: 14 | LT01 | Expected only single space before naked identifier. | Found ' '. [layout.spacing] L: 1 | P: 27 | LT01 | Unnecessary trailing whitespace at end of file. | [layout.spacing] L: 1 | P: 27 | LT12 | Files must end with a single trailing newline. | [layout.end_of_file] All Finished 📜 🎉! Contents ^^^^^^^^ .. toctree:: :maxdepth: 3 :caption: Documentation for SQLFluff: gettingstarted why_sqlfluff guides/index configuration/index production/index reference/index inthewild jointhecommunity Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` sqlfluff-3.4.2/docs/source/inthewild.rst000066400000000000000000000116771503426445100203330ustar00rootroot00000000000000.. _inthewildref: SQLFluff in the Wild ==================== Want to find other people who are using SQLFluff in production use cases? Want to brag about how you're using it? Just want to show solidarity with the project and provide a testimonial for it? Just add a section below by raising a PR on GitHub by `editing this file ✏️ `_. - SQLFluff in production `dbt `_ projects at `tails.com `_. We use the SQLFluff cli as part of our CI pipeline in `codeship `_ to enforce certain styles in our SQL codebase (with over 650 models) and keep code quality high. Contact `@alanmcruickshank `_. - `Netlify `_'s data team uses SQLFluff with `dbt `_ to keep code quality in more than 350 models (and growing). Previously, we had our SQL Guidelines defined in a site hosted with Netlify, and now we're enforcing these rules in our CI workflow thanks to SQLFluff. - `Drizly's `_ analytics team uses SQLFluff with `dbt `_ for over 700 models as part of our CI checks in GitHub. Before SQLFluff, we had SQL best practices outlined in a google doc and had to manually enforce through PR comments. We're now able to enforce much of our style guide automatically through SQLFluff. - `Petal's `_ data-eng team runs SQLFluff on our 100+ model `dbt `_ project. As a pre-commit hook and as a CI check, SQLFluff helps keep our SQL readable and consistent. - `Surfline `_'s Analytics Engineering team implemented SQLFluff as part of our continuous integration (CI) suite across our entire `dbt `_ project (700+ models). We implement the CI suite using `GitHub Actions and Workflows `_. The benefits of using SQLFluff at Surfline are: - The SQL in our dbt models is consistent and easily readable. - Our style guide is maintained as :code:`code`, not a README that is rarely updated. - Reduced burden on Analytics Engineers to remember every single style rule. - New Analytics Engineers can quickly see and learn what "good SQL" looks like at Surfline and start writing it from day 1. - The `HTTP Archive `_ uses SQLFluff to automatically check for quality and consistency of code submitted by the many contributors to this project. In particular our annual `Web Almanac `_ attracts hundreds of volunteers to help analyse our BigQuery dataset and being able automatically lint Pull Requests through GitHub Actions is a fantastic way to help us maintain our growing repository of `over a thousand queries `_. - `Brooklyn Data Co `_ has a `dbt_artifacts `_ dbt package from which runs SQLFluff in CI to lint pull requests automatically. It uses the `GitHub Actions workflow `_ contributed by Greg Clunies, with annotations on pull requests to make it easy for contributors to see where their SQL has failed any rules. See an `example pull request with SQLFluff annotations `_. - `Markerr `_ has tightly integrated SQLFluff into our CI/CD process for data model changes and process improvements. Since adopting SQLFluff across the organization, the clarity of our SQL code has risen dramatically, freeing up review time to focus on deeper data and process-specific questions. - `Symend `_ has a microservices platform supporting our SaaS product. We use SQLFLuff in the CI/CD process of several of our data-oriented microservices. Among other things, it validates our database migration scripts, deployed using `schemachange `_ and we have near-term plans to implement it for our `dbt`_ projects. - At `CarePay `_ we use SQLFLuff to lint and fix all our dbt models as well as several other SQL heavy projects. Locally we use SQLFluff with pre-commit and have also integrated it into our CI/CD pipelines. - Core Analytics Team from `Typeform `_ and `videoask `_ uses SQLFluff in the production `dbt `_ project for building our datawarehouse layer for both products: - We use it locally in our day to day work, helping us to write cleaner code. - We added SQLFluff to our CI processes, so during a PR we can check that any new or modified sql file has a consistent and easy-to-read format. sqlfluff-3.4.2/docs/source/jointhecommunity.rst000066400000000000000000000005421503426445100217360ustar00rootroot00000000000000.. _sqlfluff_slack: SQLFluff Slack ============== We have a fast-growing `community on Slack `_, come and join us! SQLFluff on Twitter ==================== Follow us On Twitter `@SQLFluff `_ for announcements and other related posts. sqlfluff-3.4.2/docs/source/production/000077500000000000000000000000001503426445100177645ustar00rootroot00000000000000sqlfluff-3.4.2/docs/source/production/cli_use.rst000066400000000000000000000021161503426445100221410ustar00rootroot00000000000000Using SQLFluff directly as a CLI application -------------------------------------------- The :ref:`SQLFluff CLI application ` is a python application which means if depends on your host python environment (see :ref:`installingsqlfluff`). The `exit code`_ provided by SQLFluff when run as a command line utility is designed to assist usefulness in deployment pipelines. If no violations are found then the `exit code`_ will be 0. If violations are found then a non-zero code will be returned which can be interrogated to find out more. - An error code of ``0`` means *operation success*, *no issues found*. - An error code of ``1`` means *operation success*, *issues found*. For example this might mean that a linting issue was found, or that one file could not be parsed. - An error code of ``2`` means an error occurred and the operation could not be completed. For example a configuration issue or an internal error within SQLFluff. For details of what commands and options are available in the CLI see the :ref:`cliref`. .. _`exit code`: https://shapeshed.com/unix-exit-codes/ sqlfluff-3.4.2/docs/source/production/diff_quality.rst000066400000000000000000000077161503426445100232110ustar00rootroot00000000000000.. _diff-quality: Using SQLFluff on changes using ``diff-quality`` ================================================ For projects with large amounts of (potentially imperfect) SQL code, the full SQLFluff output could be very large, which can be distracting -- perhaps the CI build for a one-line SQL change shouldn't encourage the developer to fix lots of unrelated quality issues. To support this use case, SQLFluff integrates with a quality checking tool called ``diff-quality``. By running SQLFluff using ``diff-quality`` (rather than running it directly), you can limit the the output to the new or modified SQL in the branch (aka pull request or PR) containing the proposed changes. Currently, ``diff-quality`` requires that you are using ``git`` for version control. NOTE: Installing SQLFluff automatically installs the ``diff_cover`` package that provides the ``diff-quality`` tool. Adding ``diff-quality`` to your builds -------------------------------------- In your CI build script: 1. Set the current working directory to the ``git`` repository containing the SQL code to be checked. 2. Run ``diff-quality``, specifying SQLFluff as the underlying tool: .. code-block:: text $ diff-quality --violations sqlfluff The output will look something like: .. code-block:: text ------------- Diff Quality Quality Report: sqlfluff Diff: origin/master...HEAD, staged and unstaged changes ------------- sql/audience_size_queries/constraints/_postcondition_check_gdpr_compliance.sql (0.0%): sql/audience_size_queries/constraints/_postcondition_check_gdpr_compliance.sql:5: Unquoted Identifiers must be consistently upper case. ------------- Total: 1 line Violations: 1 line % Quality: 0% ------------- These messages are basically the same as those provided directly by SQLFluff, although the format is a little different. Note that ``diff-quality`` only lists the line _numbers_, not the character position. If you need the character position, you will need to run SQLFluff directly. .. note:: When using ``diff-quality`` with ``.sqlfluff`` :ref:`config-files`, and especially together with the :ref:`dbt_templater`, it can be really easy to run into issues with file discovery. There are a few steps you can take to make it much less likely that this will happen: 1. ``diff-quality`` needs to be run from the root of your ``git`` repository (so that it can find the ``git`` metadata). 2. SQLFluff works best if the bulk of the configuration is done from a single ``.sqlfluff`` file, which should be in the root of your ``git`` repository. 3. If using :ref:`dbt_templater`, then either place your ``dbt_project.yml`` file in the same root folder, or if you put it in a subfolder, then only invoke ``diff-quality`` and ``sqlfluff`` from the root and define the subfolder that the ``dbt`` project lives in using the ``.sqlfluff`` config file. By aligning the paths of all three, you should be able to achieve a robust setup. If each is rooted in different paths if can be very difficult to achieve the same result, and the resulting behaviour can be difficult to debug. To debug any issues relating to this setup, we recommend occasionally running ``sqlfluff`` directly using the main cli (i.e. calling :code:`sqlfluff lint my/project/path`) and check whether that route gives you the results you expect. ``diff-quality`` should behave as though it's calling the SQLFluff CLI *from the same path that you* *invoke* ``diff-quality``. For more information on ``diff-quality`` and the ``diff_cover`` package, see the `documentation `_ on their github repository. It covers topics such as: * Generating HTML reports * Controlling which branch to compare against (i.e. to determine new/changed lines). The default is ``origin/main``. * Configuring ``diff-quality`` to return an error code if the quality is too low. * Troubleshooting sqlfluff-3.4.2/docs/source/production/github_actions.rst000066400000000000000000000033221503426445100235200ustar00rootroot00000000000000Using `GitHub Actions`_ to Annotate PRs ======================================= There are two way to utilize SQLFluff to annotate Github PRs. 1. When :code:`sqlfluff lint` is run with the :code:`--format github-annotation-native` option, it produces output formatted as `Github workflow commands`_ which are converted into pull request annotations by Github. 2. When :code:`sqlfluff lint` is run with the :code:`--format github-annotation` option, it produces output compatible with this `action from yuzutech`_. Which uses Github API to annotate the SQL in `GitHub pull requests`. .. warning:: At present (December 2023), limitations put in place by Github mean that only the first 10 annotations will be displayed if the first option (using :code:`github-annotation-native`) is used. This is a not something that SQLFluff can control itself and so we currently recommend using the the second option above and the `action from yuzutech`_. There is an `open feature request `_ for GitHub Actions which you can track to follow this issue. For more information and examples on using SQLFluff in GitHub Actions, see the `sqlfluff-github-actions repository `_. .. _`GitHub Actions`: https://github.com/features/actions .. _`GitHub pull requests`: https://docs.github.com/en/github/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/about-pull-requests .. _`Github workflow commands`: https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#setting-a-notice-message .. _`action from yuzutech`: https://github.com/yuzutech/annotations-action sqlfluff-3.4.2/docs/source/production/index.rst000066400000000000000000000005431503426445100216270ustar00rootroot00000000000000.. _production-use: Production Usage & Security =========================== SQLFluff is designed to be used both as a utility for developers but also to be part of `CI/CD`_ pipelines. .. _`CI/CD`: https://en.wikipedia.org/wiki/Continuous_integration .. toctree:: :maxdepth: 2 security cli_use diff_quality pre_commit github_actions sqlfluff-3.4.2/docs/source/production/pre_commit.rst000066400000000000000000000075641503426445100226700ustar00rootroot00000000000000.. _using-pre-commit: Using :code:`pre-commit` ======================== `pre-commit`_ is a framework to manage git "hooks" triggered right before a commit is made. A `git hook`_ is a git feature to "fire off custom scripts" when specific actions occur. Using `pre-commit`_ with SQLFluff is a good way to provide automated linting to SQL developers. With `pre-commit`_, you also get the benefit of only linting/fixing the files that changed. SQLFluff comes with two `pre-commit`_ hooks: * ``sqlfluff-lint``: returns linting errors. * ``sqlfluff-fix``: attempts to fix rule violations. .. warning:: For safety reasons, ``sqlfluff-fix`` by default will not make any fixes in files that had templating or parse errors, even if those errors were ignored using ``noqa`` or `--ignore``. Although it is not advised, you *can* tell SQLFluff to try and fix these files by overriding the ``fix_even_unparsable`` setting in ``.sqlfluff`` config file or using the ``sqlfluff fix --FIX-EVEN-UNPARSABLE`` command line option. *Overriding this behavior may break your SQL. If you use this override, always be sure to review any fixes applied to files with templating or parse errors to verify they are okay.* You should create a file named ``.pre-commit-config.yaml`` at the root of your git project, which should look like this: .. code-block:: yaml repos: - repo: https://github.com/sqlfluff/sqlfluff rev: |release| hooks: - id: sqlfluff-lint # For dbt projects, this installs the dbt "extras". # You will need to select the relevant dbt adapter for your dialect # (https://docs.getdbt.com/docs/available-adapters): # additional_dependencies: ['', 'sqlfluff-templater-dbt'] - id: sqlfluff-fix # Arbitrary arguments to show an example # args: [--rules, "LT02,CP02"] # additional_dependencies: ['', 'sqlfluff-templater-dbt'] When trying to use the `dbt templater`_, uncomment the ``additional_dependencies`` to install the extras. This is equivalent to running ``pip install sqlfluff-templater-dbt``. You can specify the version of ``dbt-adapter`` used in `pre-commit`_, for example: .. code-block:: yaml additional_dependencies : ['dbt-bigquery==1.0.0', 'sqlfluff-templater-dbt'] See the list of available `dbt-adapters`_. Note that you can pass the same arguments available through the CLI using ``args:``. .. _`pre-commit`: https://pre-commit.com/ .. _`git hook`: https://git-scm.com/book/en/v2/Customizing-Git-Git-Hooks .. _`dbt templater`: `dbt-project-configuration` .. _`dbt-adapters`: https://docs.getdbt.com/docs/available-adapters Ignoring files while using :code:`pre-commit` --------------------------------------------- Under the hood, `pre-commit`_ works by passing specific files to *SQLFluff*. For example, if the only two files that are modified in your commit are :code:`file_a.sql` and :code:`file_b.sql`, then the command which is called in the background is :code:`sqlfluff lint file_a.sql file_b.sql`. While this is efficient, it does produce some unwanted noise when also using :ref:`sqlfluffignore`. This is because *SQLFluff* is designed to allow users to override an *ignore* configuration by passing the name of the file directly. This makes a lot of sense in a CLI context, but less so in the context of being invoked by `pre-commit`_. To avoid noisy logs when using both `pre-commit`_ and :ref:`sqlfluffignore`, we recommend also setting the :code:`exclude` argument in your :code:`.pre-commit-config.yaml` file (either the `top level config`_ or the `hook specific config`_). This will prevent files matching the given pattern being passed to *SQLFluff* and so silence any warnings about the :ref:`sqlfluffignore` being overridden. .. _`top level config`: https://pre-commit.com/#top_level-exclude .. _`hook specific config`: https://pre-commit.com/#config-exclude sqlfluff-3.4.2/docs/source/production/security.rst000066400000000000000000000056241503426445100223740ustar00rootroot00000000000000.. _security: Security Considerations ======================= A full list of `Security Advisories is available on GitHub `_. Given the context of how SQLFluff is designed to be used, there are three different tiers of access which users may have access to manipulate how the tool functions in a secure environment. #. *Users may have edit access to the SQL code which is being linted*. While SQLFluff does not execute the SQL itself, in the process of the :ref:`templating step ` (in particular via jinja or dbt), certain macros may have the ability to execute arbitrary SQL code (e.g. the `dbt run_query macro`_). For the Jinja templater, SQLFluff uses the `Jinja2 SandboxedEnvironment`_ to limit the execution on unsafe code. When looking to further secure this situation, see below for ways to limit the ability of users to import other libraries. #. *Users may have edit access to the SQLFluff :ref:`config-files`*. In some (perhaps, many) environments, the users who can edit SQL files may also be able to access and edit the :ref:`config-files`. It's important to note that because of :ref:`in_file_config`, that users who can edit SQL files which are designed to be linted, will also have access to the vast majority of any configuration options available in :ref:`config-files`. This means that there is minimal additional protection from restricting access to :ref:`config-files` for users who already have access to edit the linting target files (as described above). #. *Users may have access to change how SQLFluff is invoked*. SQLFluff can be invoked either as a command line too or via the python API. Typically the method is fixed for a given application. When thinking about how to restrict the ability of users to call insecure code, SQLFluff aims to provide options at the point of invocation. In particular, as described above, the primary risk vector for SQLFluff is the macro environment as described in :ref:`templateconfig`. To restrict users being able to bring arbitrary python methods into sqlfluff via the ``library_path`` configuration value (see :ref:`jinja_library_templating`), we recommend that for secure environments you override this config value either by providing an ``override`` option to the :class:`FluffConfig` object if using the Python API or via the ``--library-path`` CLI option: To disable this option entirely via the CLI: .. code-block:: bash $ sqlfluff lint my_path --library-path none To disable this option entirely via the python API: .. literalinclude:: ../../../examples/04_config_overrides.py :language: python .. _`Jinja2 SandboxedEnvironment`: https://jinja.palletsprojects.com/en/3.0.x/sandbox/#jinja2.sandbox.SandboxedEnvironment .. _`dbt run_query macro`: https://docs.getdbt.com/reference/dbt-jinja-functions/run_query sqlfluff-3.4.2/docs/source/reference/000077500000000000000000000000001503426445100175345ustar00rootroot00000000000000sqlfluff-3.4.2/docs/source/reference/api.rst000066400000000000000000000031621503426445100210410ustar00rootroot00000000000000.. _apiref: Python API ========== SQLFluff exposes a public api for other python applications to use. A basic example of this usage is given here, with the documentation for each of the methods below. .. literalinclude:: ../../../examples/01_basic_api_usage.py :language: python Simple API commands ------------------- .. automodule:: sqlfluff :members: lint, fix, parse Advanced API usage ------------------ The simple API presents only a fraction of the functionality present within the core SQLFluff library. For more advanced use cases, users can import the :code:`Linter()` and :code:`FluffConfig()` classes from :code:`sqlfluff.core`. As of version 0.4.0 this is considered as *experimental only* as the internals may change without warning in any future release. If you come to rely on the internals of SQLFluff, please post an issue on GitHub to share what you're up to. This will help shape a more reliable, tidy and well documented public API for use. Configuring SQLFluff ~~~~~~~~~~~~~~~~~~~~ You can use :code:`FluffConfig()` class to configure SQLFluff behaviour. .. literalinclude:: ../../../examples/04_config_overrides.py :language: python Instances of :code:`FluffConfig()` can be created manually, or parsed. .. literalinclude:: ../../../examples/05_simple_api_config.py :language: python Supported dialects and rules are available through :code:`list_dialects()` and :code:`list_rules()`. .. literalinclude:: ../../../examples/03_getting_rules_and_dialects.py :language: python Advanced API reference ~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: sqlfluff.core :members: Linter, Lexer, Parser sqlfluff-3.4.2/docs/source/reference/cli.rst000066400000000000000000000001631503426445100210350ustar00rootroot00000000000000.. _cliref: CLI Reference ============= .. click:: sqlfluff.cli.commands:cli :prog: sqlfluff :show-nested: sqlfluff-3.4.2/docs/source/reference/dialects.rst000066400000000000000000000040631503426445100220610ustar00rootroot00000000000000.. _dialectref: Dialects Reference ================== SQLFluff is designed to be flexible in supporting a variety of dialects. Not all potential dialects are supported so far, but several have been implemented by the community. Below are a list of the currently available dialects. Each inherits from another, up to the root `ansi` dialect. For a canonical list of supported dialects, run the :program:`sqlfluff dialects` command, which will output a list of the current dialects available on your installation of SQLFluff. .. note:: For technical users looking to add new dialects or add new features to existing ones, the dependent nature of how dialects have been implemented is to try and reduce the amount of repetition in how different elements are defined. As an example, when we say that the :ref:`redshift_dialect_ref` dialect *inherits* from the :ref:`postgres_dialect_ref` dialect this is not because there is an agreement between those projects which means that features in one must end up in the other, but that the design of the :ref:`redshift_dialect_ref` dialect was heavily *inspired* by the postgres dialect and therefore when defining the dialect within sqlfuff it makes sense to use :ref:`postgres_dialect_ref` as a starting point rather than starting from scratch. Consider when adding new features to a dialect: - Should I be adding it just to this dialect, or adding it to a *parent* dialect? - If I'm creating a new dialect, which dialect would be best to inherit from? - Will the feature I'm adding break any *downstream* dependencies within dialects which inherit from this one? .. We define a shortcut to render double backticks here, which can then be referenced by individual dialects when they want to say how backtick quotes behave in that dialect. They would otherwise be interpreted as markup and so not shown as back quotes. .. |back_quotes| raw:: html `` .. include:: ../_partials/dialect_summaries.rst sqlfluff-3.4.2/docs/source/reference/index.rst000066400000000000000000000002111503426445100213670ustar00rootroot00000000000000.. _reference: Reference ========= .. toctree:: :maxdepth: 2 dialects rules cli api internals/index releasenotes sqlfluff-3.4.2/docs/source/reference/internals/000077500000000000000000000000001503426445100215335ustar00rootroot00000000000000sqlfluff-3.4.2/docs/source/reference/internals/config.rst000066400000000000000000000052441503426445100235370ustar00rootroot00000000000000.. _fluffconfig: :code:`sqlfluff.core.config`: Configuration & ``FluffConfig`` ------------------------------------------------------------- When using the Python API, there are additional options for configuration beyond those specified in the :ref:`setting_config` section of the main docs. Internally, SQLFluff uses a consistent :py:class:`~sqlfluff.core.config.fluffconfig.FluffConfig` class which is then made accessible to different parts of the tool during linting and fixing. As described in the :ref:`nesting` section of the configuration docs, multiple nested documentation files can be used in a project and the result is a combined config object which contains the resulting union of those files. Under the hood, this is stored in a dict object, and it's possible get and set individual values, using :py:meth:`~sqlfluff.core.config.fluffconfig.FluffConfig.get` & :py:meth:`~sqlfluff.core.config.fluffconfig.FluffConfig.set_value`, but also get entire portions of that config dict using :py:meth:`~sqlfluff.core.config.fluffconfig.FluffConfig.get_section`. Methods for creating config mappings ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ When instantiating a :py:class:`~sqlfluff.core.config.fluffconfig.FluffConfig` object, there are a few options to set specific config values (such as ``dialect`` or ``rules``), but to access the full available set of features it's best to pass in a :obj:`dict` of the values you want to set. This config :obj:`dict` is a nested object, where the colon (`:`) characters from the ``.sqlfluff`` config files, delimit the keys. For example, take the following config file: .. code-block:: cfg [sqlfluff:rules:capitalisation.keywords] capitalisation_policy = lower This would be represented in the config dict as below. See that the nested structure has been created by splitting the keys on the colon (`:`) characters: .. code-block:: python configs = { "rules":{ "capitalisation.keywords": { "capitalisation_policy": "lower" } } } The following methods are provided to allow conversion of a selection of file formats into a consistent mapping object for instantiating a :py:class:`~sqlfluff.core.config.fluffconfig.FluffConfig` object. .. autofunction:: sqlfluff.core.config.loader.load_config_string .. autofunction:: sqlfluff.core.config.loader.load_config_file .. autofunction:: sqlfluff.core.config.loader.load_config_resource .. autofunction:: sqlfluff.core.config.loader.load_config_at_path .. autofunction:: sqlfluff.core.config.loader.load_config_up_to_path The ``FluffConfig`` object ^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: sqlfluff.core.config.fluffconfig.FluffConfig :members: sqlfluff-3.4.2/docs/source/reference/internals/functional.rst000066400000000000000000000020241503426445100244250ustar00rootroot00000000000000:code:`sqlfluff.utils.functional`: Functional Traversal API ----------------------------------------------------------- These newer modules provide a higher-level API for rules working with segments and slices. Rules that need to navigate or search the parse tree may benefit from using these. Eventually, the plan is for **all** rules to use these modules. As of December 30, 2021, 17+ rules use these modules. The modules listed below are submodules of `sqlfluff.utils.functional`. :code:`segments` Module ^^^^^^^^^^^^^^^^^^^^^^^ .. automodule:: sqlfluff.utils.functional.segments :members: :code:`segment_predicates` Module ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. automodule:: sqlfluff.utils.functional.segment_predicates :members: :code:`raw_file_slices` Module ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. automodule:: sqlfluff.utils.functional.raw_file_slices :members: :code:`raw_file_slice_predicates` Module ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. automodule:: sqlfluff.utils.functional.raw_file_slice_predicates :members: sqlfluff-3.4.2/docs/source/reference/internals/index.rst000066400000000000000000000012371503426445100233770ustar00rootroot00000000000000.. _internal_api_docs: Internal API ============ Anything within this section should only be necessary for people who are developing plugins or rules to interact with SQLFluff on a deeper level or people who've decided to help the project by :ref:`contributing to SQLFluff `. As these docs are some of the least commonly used, you may find that not all modules are documented directly here, and so you may find it instructive to read this together with docstrings and comments directly within the SQLFluff codebase on `GitHub`_. .. _`GitHub`: https://github.com/sqlfluff/sqlfluff .. toctree:: :maxdepth: 2 config rules functional reflow sqlfluff-3.4.2/docs/source/reference/internals/reflow.rst000066400000000000000000000031221503426445100235610ustar00rootroot00000000000000.. _reflowinternals: :code:`sqlfluff.utils.reflow`: Whitespace Reflow API ---------------------------------------------------- Many rules supported by SQLFluff involve the spacing and layout of different elements, either to enforce a particular layout or just to add or remove code elements in a way sensitive to the existing layout configuration. The way this is achieved is through some centralised utilities in the `sqlfluff.utils.reflow` module. This module aims to achieve several things: * Less code duplication by implementing reflow logic in only one place. * Provide a streamlined interface for rules to easily utilise reflow logic. * Given this requirement, it's important that reflow utilities work within the existing framework for applying fixes to potentially templated code. We achieve this by returning `LintFix` objects which can then be returned by each rule wanting to use this logic. * Provide a consistent way of *configuring* layout requirements. For more details on configuration see :ref:`layoutconfig`. To support this, the module provides a :code:`ReflowSequence` class which allows access to all of the relevant operations which can be used to reformat sections of code, or even a whole file. Unless there is a very good reason, all rules should use this same approach to ensure consistent treatment of layout. .. autoclass:: sqlfluff.utils.reflow.ReflowSequence :members: .. autoclass:: sqlfluff.utils.reflow.elements.ReflowPoint :members: :inherited-members: .. autoclass:: sqlfluff.utils.reflow.elements.ReflowBlock :members: :inherited-members: sqlfluff-3.4.2/docs/source/reference/internals/rules.rst000066400000000000000000000002371503426445100234210ustar00rootroot00000000000000:code:`sqlfluff.core.rules.base`: Base Rule Classes --------------------------------------------------- .. automodule:: sqlfluff.core.rules.base :members: sqlfluff-3.4.2/docs/source/reference/releasenotes.rst000066400000000000000000000411251503426445100227620ustar00rootroot00000000000000.. _releasenotes: Release Notes ============= This page aims to act as a guide for migrating between major SQLFluff releases. Necessarily this means that bugfix releases, or releases requiring no change for the user are not mentioned. For full details of each individual release, see the detailed changelog_. .. _changelog: https://github.com/sqlfluff/sqlfluff/blob/main/CHANGELOG.md .. _upgrading_3_0: Upgrading to 3.x ---------------- This release makes a couple of potentially breaking changes: * It drops support for python 3.7, which reached end of life in June 2023. * It migrates to :code:`pyproject.toml` rather than :code:`setup.cfg` as the python packaging configuration file (although keeping :code:`setuptools` as the default backend). * The serialised output for :code:`sqlfluff lint` now contains more information about the span of linting issues and initial proposed fixes. Beside the *new* fields, the original fields of :code:`line_pos` and :code:`line_no` have been renamed to :code:`start_line_pos` and :code:`start_line_no`, to distinguish them from the new fields starting :code:`end_*`. * When linting from stdin, if there are no violations found - before this version, the serialised response would be simply an empty list (:code:`[]`). From 3.0 onwards, there will now be a record for the *file* with some statistics, but the *violations* section of the response for that file will still be an empty list. * The default :code:`annotation_level` set by the :code:`--annotation-level` option on the :code:`sqlfluff lint` command has been changed from :code:`notice` to :code:`warning`, to better distinguish linting errors from warnings, which always now have the level of :code:`notice`. This is only relevant when using the :code:`github-annotation` or :code:`github-annotation-native` formats. * The previously deprecated :code:`--disable_progress_bar` on `:code:lint`, :code:`fix` and :code:`format` has now been removed entirely. Please migrate to :code:`--disable-progress-bar` to continue using this option. * The :code:`--force` option on :code:`sqlfluff fix` is now the default behaviour and so the option has been deprecated. A new :code:`--check` option has been introduced which mimics the old default behaviour. This has been changed as it enables significantly lower memory overheads when linting and fixing large projects. Upgrading to 2.3 ---------------- This release include two minor breaking changes which will only affect users engaged in performance optimisation of SQLFluff itself. * The :code:`--profiler` option on :code:`sqlfluff parse` has been removed. It was only present on the `parse` command and not `lint` or `fix`, and it is just as simple to invoke the python `cProfiler` directly. * The :code:`--recurse` cli option and :code:`sqlfluff.recurse` configuration option have both been removed. They both existed purely for debugging the parser, and were never used in a production setting. The improvement in other debugging messages when unparsable sections are found means that this option is no longer necessary. Upgrading to 2.2 ---------------- This release changes some of the interfaces between SQLFluff core and our plugin ecosystem. The only *breaking* change is in the interface between SQLFluff and *templater* plugins (which are not common in the ecosystem, hence why this is only a minor and not a major release). For all plugins, we also recommend a different structure for their imports (especially for rule plugins which are more common in the ecosystem) - for performance and stability reasons. Some users had been experiencing very long import times with previous releases as a result of the layout of plugin imports. Users with affected plugins will begin to see a warning from this release onward, which can be resolved for their plugin by updating to a new version of that plugin which follows the guidelines. Templater plugins ^^^^^^^^^^^^^^^^^ Templaters before this version would pass a :code:`make_template()` callable to the slicing methods as part of being able to map the source file. This method would accept a :code:`str` and return a :code:`jinja2.environment.Template` object to allow the templater to render multiple variants of the template to do the slicing operation (which allows linting issues found in templated files to be mapped accurately back to their position in the unrendered source file). This approach is not very generalisable, and did not support templating operations with libraries other than :code:`jinja2`. As a result, we have amended the interface to instead pass a :code:`render_func()` callable, which accepts a :code:`str` and returns a :code:`str`. This works fine for the :code:`jinja` templater (and by extension the :code:`dbt` templater) as they can simply wrap the original callable with a method that calls :code:`render()` on the original :code:`Template` object. It also however opens up the door to other templating engines, and in particular to *remote* templaters which might pass unrendered code over a HTTP connection for rendering. Specifically: * The :code:`slice_file()` method of the base templater classes no longer accepts an optional :code:`make_template` argument or a :code:`templated_str` argument. * Instead a :code:`render_func` callable should be passed which can be called to generate the :code:`templated_str` on demand. * Unlike the optional :code:`make_template` - :code:`render_func` is **not** optional and should always be present. Rule plugins ^^^^^^^^^^^^ We recommend that the module in a plugin which defines all of the hook implementations (anything using the :code:`@hookimpl` decorator) must be able to fully import before any rule implementations are imported. More specifically, SQLFluff must be able to both *import* **and** *run* any implementations of :code:`get_configs_info()` before any plugin rules (i.e. any derivatives of :py:class:`BaseRule `) are *imported*. Because of this, we recommend that rules are defined in a separate module to the root of the plugin and then only imported *within* the :code:`get_rules()` method. Importing in the main body of the module was previously our recommendation and so may be the case for versions of some plugins. If one of your plugins does use imports in this way, a warning will be presented from this version onward, recommending that you update your plugin. See the :ref:`developingpluginsref` section of the docs for an example. .. _upgrading_2_0: Upgrading from 1.x to 2.0 ------------------------- Upgrading to 2.0 brings several important breaking changes: * All bundled rules have been recoded, both from generic :code:`L00X` formats into groups within similar codes (e.g. an *aliasing* group with codes of the format :code:`AL0X`), but also given *names* to allow much clearer referencing (e.g. :code:`aliasing.column`). * :ref:`ruleconfig` now uses the rule *name* rather than the rule *code* to specify the section. Any unrecognised references in config files (whether they are references which *do* match existing rules by code or alias, or whether the match no rules at all) will raise warnings at runtime. * A complete re-write of layout and whitespace handling rules (see :ref:`layoutref`), and with that a change in how layout is configured (see :ref:`layoutconfig`) and the combination of some rules that were previously separate. One example of this is that the legacy rules :code:`L001`, :code:`L005`, :code:`L006`, :code:`L008`, :code:`L023`, :code:`L024`, :code:`L039`, :code:`L048` & :code:`L071` have been combined simply into :sqlfluff:ref:`LT01`. Recommended upgrade steps ^^^^^^^^^^^^^^^^^^^^^^^^^ To upgrade smoothly between versions, we recommend the following sequence: #. The upgrade path will be simpler if you have a slimmer configuration file. Before upgrading, consider removing any sections from your configuration file (often :code:`.sqlfluff`, see :ref:`config`) which match the current :ref:`defaultconfig`. There is no need to respecify defaults in your local config if they are not different to the stock config. #. In a local (or other *non-production*) environment, upgrade to SQLFluff 2.0.x. We recommend using a `compatible release`_ specifier such as :code:`~=2.0.0`, to ensure any minor bugfix releases are automatically included. #. Examine your configuration file (as mentioned above), and evaluate how rules are currently specified. We recommend primarily using *either* :code:`rules` *or* :code:`exclude_rules` rather than both, as detailed in :ref:`ruleselection`. Using either the :code:`sqlfluff rules` CLI command or the online :ref:`ruleref`, replace *all references* to legacy rule codes (i.e. codes of the form :code:`L0XX`). Specifically: * In the :code:`rules` and :code:`exclude_rules` config values. Here, consider using group specifiers or names to make your config simpler to read and understand (e.g. :code:`capitalisation`, is much more understandable than :code:`CP01,CP02,CP03,CP04,CP05`, but the two specifiers will have the same effect). Note that while legacy codes *will still be understood* here (because they remain valid as aliases for those rules) - you may find that some rules no longer exist in isolation and so these references may be misleading. e.g. :code:`L005` is now an alias for :sqlfluff:ref:`layout.spacing` but that rule is much more broad ranging than the original scope of :code:`L005`, which was only spacing around commas. * In :ref:`ruleconfig`. In particular here, legacy references to rule codes are *no longer valid*, will raise warnings, and until resolved, the configuration in those sections will be ignored. The new section references should include the rule *name* (e.g. :code:`[sqlfluff:rules:capitalisation.keywords]` rather than :code:`[sqlfluff:rules:L010]`). This switch is designed to make configuration files more readable, but we cannot support backward compatibility here without also having to resolve the potential ambiguity of the scenario where both *code-based* and *name-based* are both used. * Review the :ref:`layoutconfig` documentation, and check whether any indentation or layout configuration should be revised. #. Check your project for :ref:`in_file_config` which refer to rule codes. Alter these in the same manner as described above for configuration files. #. Test linting your project for unexpected linting issues. Where found, consider whether to use :code:`sqlfluff fix` to repair them in bulk, or (if you disagree with the changes) consider changing which rules you enable or their configuration accordingly. In particular you may notice: * The indentation rule (:code:`L003` as was, now :sqlfluff:ref:`LT02`) has had a significant rewrite, and while much more flexible and accurate, it is also more specific. Note that :ref:`hangingindents` are no longer supported, and that while not enabled by default, many users may find the enabling :ref:`implicitindents` fits their organisation's style better. * The spacing rule (:sqlfluff:ref:`LT01`: :sqlfluff:ref:`layout.spacing`) has a much wider scope, and so may pick up spacing issues that were not previously enforced. If you disagree with any of these, you can override the :code:`sqlfluff:layout` sections of the config with different (or just more liberal settings, like :code:`any`). .. _`compatible release`: https://peps.python.org/pep-0440/#compatible-release Example 2.0 config ^^^^^^^^^^^^^^^^^^ To illustrate the points above, this is an illustrative example config for a 2.0 compatible project. Note that the config is fairly brief and sets only the values which differ from the default config. .. code-block:: cfg [sqlfluff] dialect = snowflake templater = dbt max_line_length = 120 # Exclude some specific rules based on a mixture of codes and names exclude_rules = RF02, RF03, RF04, ST06, ST07, AM05, AM06, convention.left_join, layout.select_targets [sqlfluff:indentation] # Enabling implicit indents for this project. # See https://docs.sqlfluff.com/en/stable/perma/indent_locations.html allow_implicit_indents = True # Add a few specific rule configurations, referenced by the rule names # and not by the rule codes. [sqlfluff:rules:capitalisation.keywords] capitalisation_policy = lower [sqlfluff:rules:capitalisation.identifiers] capitalisation_policy = lower [sqlfluff:rules:capitalisation.functions] extended_capitalisation_policy = lower # An example of setting a custom layout specification which # is more lenient than default config. [sqlfluff:layout:type:set_operator] line_position = alone Upgrading to 1.4 ---------------- This release brings several internal changes, and acts as a prelude to 2.0.0. In particular, the following config values have changed: * :code:`sqlfluff:rules:L007:operator_new_lines` has been changed to :code:`sqlfluff:layout:type:binary_operator:line_position`. * :code:`sqlfluff:rules:comma_style` and :code:`sqlfluff:rules:L019:comma_style` have both been consolidated into :code:`sqlfluff:layout:type:comma:line_position`. If any of these values have been set in your config, they will be automatically translated to the new values at runtime, and a warning will be shown. To silence the warning, update your config file to the new values. For more details on configuring layout see :ref:`layoutconfig`. Upgrading to 1.3 ---------------- This release brings several potentially breaking changes to the underlying parse tree. For users of the cli tool in a linting context you should notice no change. If however your application relies on the structure of the SQLFluff parse tree or the naming of certain elements within the yaml format, then this may not be a drop-in replacement. Specifically: * The addition of a new :code:`end_of_file`` meta segment at the end of the parse structure. * The addition of a :code:`template_loop`` meta segment to signify a jump backward in the source file within a loop structure (e.g. a jinja :code:`for`` loop). * Much more specific types on some raw segments, in particular :code:`identifier` and :code:`literal` type segments will now appear in the parse tree with their more specific type (which used to be called :code:`name`) e.g. :code:`naked_identifier`, :code:`quoted_identifier`, :code:`numeric_literal` etc... If using the python api, the *parent* type (such as :code:`identifier`) will still register if you call :code:`.is_type("identifier")`, as this function checks all inherited types. However the eventual type returned by :code:`.get_type()`` will now be (in most cases) what used to be accessible at :code:`.name`. The :code:`name` attribute will be deprecated in a future release. Upgrading to 1.2 ---------------- This release introduces the capability to automatically skip large files, and sets default limits on the maximum file size before a file is skipped. Users should see a performance gain, but may experience warnings associated with these skipped files. Upgrades pre 1.0 ---------------- * **0.13.x** new rule for quoted literals, option to remove hanging indents in rule L003, and introduction of ``ignore_words_regex``. * **0.12.x** dialect is now mandatory, the ``spark3`` dialect was renamed to ``sparksql`` and datatype capitalisation was extracted from L010 to it's own rule L063. * **0.11.x** rule L030 changed to use ``extended_capitalisation_policy``. * **0.10.x** removed support for older dbt versions < 0.20 and stopped ``fix`` attempting to fix unparsable SQL. * **0.9.x** refinement of the Simple API, dbt 1.0.0 compatibility, and the official SQLFluff Docker image. * **0.8.x** an improvement to the performance of the parser, a rebuild of the Jinja Templater, and a progress bar for the CLI. * **0.7.x** extracted the dbt templater to a separate plugin and removed the ``exasol_fs`` dialect (now merged in with the main ``exasol``). * **0.6.x** introduced parallel processing, which necessitated a big re-write of several innards. * **0.5.x** introduced some breaking changes to the API. * **0.4.x** dropped python 3.5, added the dbt templater, source mapping and also introduced the python API. * **0.3.x** drops support for python 2.7 and 3.4, and also reworks the handling of indentation linting in a potentially not backward compatible way. * **0.2.x** added templating support and a big restructure of rules and changed how users might interact with SQLFluff on templated code. * **0.1.x** involved a major re-write of the parser, completely changing the behaviour of the tool with respect to complex parsing. sqlfluff-3.4.2/docs/source/reference/rules.rst000066400000000000000000000026351503426445100214260ustar00rootroot00000000000000.. _ruleref: Rules Reference =============== This page is an index of available rules which are bundled with SQLFluff. * For information on how to configure which rules are enabled for your project see :ref:`ruleselection`. * If you just want to turn rules on or off for specific files, or specific sections of files, see :ref:`ignoreconfig`. * For more information on how to configure the rules which you do enable see :ref:`ruleconfig`. Core Rules ---------- Certain rules belong to the :code:`core` rule group. In order for a rule to be designated as :code:`core`, it must meet the following criteria: * Stable * Applies to most dialects * Could detect a syntax issue * Isn’t too opinionated toward one style (e.g. the :code:`dbt` style guide) Core rules can also make it easier to roll out SQLFluff to a team by only needing to follow a 'common sense' subset of rules initially, rather than spending time understanding and configuring all the rules, some of which your team may not necessarily agree with. We believe teams will eventually want to enforce more than just the core rules, and we encourage everyone to explore all the rules and customize a rule set that best suites their organization. See the :ref:`config` section for more information on how to enable only :code:`core` rules by default. Rule Index ---------- .. include:: ../_partials/rule_table.rst .. include:: ../_partials/rule_summaries.rst sqlfluff-3.4.2/docs/source/why_sqlfluff.rst000066400000000000000000000133561503426445100210510ustar00rootroot00000000000000.. _why_sqlfluff: Why SQLFluff? ============= SQL has been around for a long time, as a language for communicating with databases, like a communication protocol. More recently with the rise of *data* as a business function, or a domain in its own right SQL has also become an invaluable tool for defining the *structure* of data and analysis - not just as a one off but as a form of `infrastructure as code`_. As *analytics* transitions from a profession of people doing one-offs, and moves to building stable and reusable pieces of analytics, more and more principles from software engineering are moving in the analytics space. One of the best articulations of this is written in the `viewpoint section of the docs for the open-source tool dbt`_. Two of the principles mentioned in that article are `quality assurance`_ and `modularity`_. Quality assurance ----------------- The primary aim of `SQLFluff` as a project is in service of that first aim of `quality assurance`_. With larger and larger teams maintaining large bodies of SQL code, it becomes more and more important that the code is not just *valid* but also easily *comprehensible* by other users of the same codebase. One way to ensure readability is to enforce a `consistent style`_, and the tools used to do this are called `linters`_. Some famous `linters`_ which are well known in the software community are `flake8`_ and `jslint`_ (the former is used to lint the `SQLFluff` project itself). **SQLFluff** aims to fill this space for SQL. Modularity ---------- SQL itself doesn't lend itself well to `modularity`_, so to introduce some flexibility and reusability it is often `templated`_. Typically this is done in the wild in one of the following ways: 1. Using the limited inbuilt templating abilities of a programming language directly. For example in python this would be using the `format string syntax`_: .. code-block:: python "SELECT {foo} FROM {tbl}".format(foo="bar", tbl="mytable") Which would evaluate to: .. code-block:: sql SELECT bar FROM mytable 2. Using a dedicated templating library such as `jinja2`_. This allows a lot more flexibility and more powerful expressions and macros. See the :ref:`templateconfig` section for more detail on how this works. - Often there are tools like `dbt`_ or `apache airflow`_ which allow `templated`_ sql to be used directly, and they will implement a library like `jinja2`_ under the hood themselves. All of these templating tools are great for `modularity`_ but they also mean that the SQL files themselves are no longer valid SQL code, because they now contain these configured *placeholder* values, intended to improve modularity. SQLFluff supports both of the templating methods outlined above, as well as `dbt`_ projects, to allow you to still lint these "dynamic" SQL files as part of your CI/CD pipeline (which is great 🙌), rather than waiting until you're in production (which is bad 🤦, and maybe too late). During the CI/CD pipeline (or any time that we need to handle `templated`_ code), SQLFluff needs additional info in order to interpret your templates as valid SQL code. You do so by providing dummy parameters in SQLFluff configuration files. When substituted into the template, these values should evaluate to valid SQL (so SQLFluff can check its style, formatting, and correctness), but the values don't need to match actual values used in production. This means that you can use *much simpler* dummy values than what you would really use. The recommendation is to use *the simplest* possible dummy value that still allows your code to evaluate to valid SQL so that the configuration values can be as streamlined as possible. .. _`infrastructure as code`: https://en.wikipedia.org/wiki/Infrastructure_as_code .. _`viewpoint section of the docs for the open-source tool dbt`: https://docs.getdbt.com/docs/viewpoint .. _`quality assurance`: https://docs.getdbt.com/docs/viewpoint#quality-assurance .. _`modularity`: https://docs.getdbt.com/docs/viewpoint#modularity .. _`consistent style`: https://www.smashingmagazine.com/2012/10/why-coding-style-matters/ .. _`linters`: https://en.wikipedia.org/wiki/Lint_(software) .. _`flake8`: http://flake8.pycqa.org/ .. _`jslint`: https://www.jslint.com/ .. _`templated`: https://en.wikipedia.org/wiki/Template_processor .. _`format string syntax`: https://docs.python.org/3/library/string.html#formatstrings .. _`jinja2`: https://jinja.palletsprojects.com/ .. _`apache airflow`: https://airflow.apache.org .. _`dbt`: https://getdbt.com .. _vision: Vision for SQLFluff ------------------- SQLFluff has a few components: 1. A generic parser for SQL which aims to be able to unify SQL written in different dialects into a comparable format. The *parser*. 2. A mechanism for measuring written SQL against a set of rules, with the added ability to fix any violations found. The *linter*. 3. An opinionated set of guidelines for how SQL should be structured and formatted. The *rules*. The core vision [#f1]_ for SQLFluff is to be really good at being the *linter*. The reasoning for this is outlined in :ref:`why_sqlfluff`. Most of the codebase for SQLFluff is the *parser*, mostly because at the point of developing SQLFluff, there didn't appear to be a good option for a whitespace-aware parser that could be used instead. With regards to the *rules*, SQLFluff aims to be opinionated but it also accepts that many organisations and groups have pre-existing strong conventions around how to write SQL and so ultimately SQLFluff should be flexible enough to support whichever rule set a user wishes to. .. rubric:: Notes .. [#f1] Credit to `this article`_ for highlighting the importance of a good vision. .. _`this article`: https://opensource.com/business/16/6/bad-practice-foss-projects-management sqlfluff-3.4.2/examples/000077500000000000000000000000001503426445100151645ustar00rootroot00000000000000sqlfluff-3.4.2/examples/01_basic_api_usage.py000066400000000000000000000053741503426445100211450ustar00rootroot00000000000000"""This is an example of how to use the simple sqlfluff api.""" from typing import Any, Iterator, Union import sqlfluff # -------- LINTING ---------- my_bad_query = "SeLEct *, 1, blah as fOO from mySchema.myTable" # Lint the given string and return an array of violations in JSON representation. lint_result = sqlfluff.lint(my_bad_query, dialect="bigquery") # lint_result = # [ # { # "code": "CP01", # "line_no": 1, # "line_pos": 1, # "description": "Keywords must be consistently upper case.", # } # ... # ] # -------- FIXING ---------- # Fix the given string and get a string back which has been fixed. fix_result_1 = sqlfluff.fix(my_bad_query, dialect="bigquery") # fix_result_1 = 'SELECT *, 1, blah AS foo FROM myschema.mytable\n' # We can also fix just specific rules. fix_result_2 = sqlfluff.fix(my_bad_query, rules=["CP01"]) # fix_result_2 = 'SELECT *, 1, blah AS fOO FROM mySchema.myTable' # Or a subset of rules... fix_result_3 = sqlfluff.fix(my_bad_query, rules=["CP01", "CP02"]) # fix_result_3 = 'SELECT *, 1, blah AS fOO FROM myschema.mytable' # -------- PARSING ---------- # Parse the given string and return a JSON representation of the parsed tree. parse_result = sqlfluff.parse(my_bad_query) # parse_result = {'file': {'statement': {...}, 'newline': '\n'}} # This JSON structure can then be parsed as required. # An example usage is shown below: def get_json_segment( parse_result: dict[str, Any], segment_type: str ) -> Iterator[Union[str, dict[str, Any], list[dict[str, Any]]]]: """Recursively search JSON parse result for specified segment type. Args: parse_result (Dict[str, Any]): JSON parse result from `sqlfluff.fix`. segment_type (str): The segment type to search for. Yields: Iterator[Union[str, dict[str, Any], list[dict[str, Any]]]]: Retrieves children of specified segment type as either a string for a raw segment or as JSON or an array of JSON for non-raw segments. """ for k, v in parse_result.items(): if k == segment_type: yield v elif isinstance(v, dict): yield from get_json_segment(v, segment_type) elif isinstance(v, list): for s in v: yield from get_json_segment(s, segment_type) # e.g. Retrieve array of JSON for table references. table_references = list(get_json_segment(parse_result, "table_reference")) print(table_references) # [[{'identifier': 'mySchema'}, {'dot': '.'}, {'identifier': 'myTable'}]] # Retrieve raw table name from last identifier in the table reference. for table_reference in table_references: table_name = list(get_json_segment(parse_result, "naked_identifier"))[-1] print(f"table_name: {table_name}") # table_name: myTable sqlfluff-3.4.2/examples/02_timing_api_steps.py000066400000000000000000000021371503426445100214000ustar00rootroot00000000000000"""Performance testing on parsing and lexing.""" import timeit from sqlfluff.core import Lexer, Linter, Parser # Set up and output the query sql = "SeLEct *, 1, blah as fOO from myTable" print("Query: ", repr(sql)) def time_function(func, name, iterations=20): """A basic timing function.""" # Do the timing time = timeit.timeit(func, number=iterations) / iterations # Output the result print( "{:<35} {:.6}s [{} iterations]".format( f"Time to {name}:", time, iterations, ) ) # Set up some classes to process the data kwargs = dict(dialect="ansi") lexer = Lexer(**kwargs) parser = Parser(**kwargs) linter = Linter(**kwargs) # Pre-process the lexing step for the parsing step tokens, _ = lexer.lex(sql) # Pre-process the parsing step for the linting and parsing step parsed = parser.parse(tokens) # Time the steps time_function(lambda: lexer.lex(sql), name="lex") time_function(lambda: parser.parse(tokens), name="parse") time_function(lambda: linter.lint(parsed), name="lint") time_function(lambda: linter.fix(parsed), name="fix") sqlfluff-3.4.2/examples/03_getting_rules_and_dialects.py000066400000000000000000000011631503426445100234060ustar00rootroot00000000000000"""This is an example of how get basic options from sqlfluff.""" import sqlfluff # -------- DIALECTS ---------- dialects = sqlfluff.list_dialects() # dialects = [DialectTuple(label='ansi', name='ansi', inherits_from='nothing'), ...] dialect_names = [dialect.label for dialect in dialects] # dialect_names = ["ansi", "snowflake", ...] # -------- RULES ---------- rules = sqlfluff.list_rules() # rules = [ # RuleTuple( # code='Example_LT01', # description='ORDER BY on these columns is forbidden!' # ), # ... # ] rule_codes = [rule.code for rule in rules] # rule_codes = ["LT01", "LT02", ...] sqlfluff-3.4.2/examples/04_config_overrides.py000066400000000000000000000007711503426445100213750ustar00rootroot00000000000000"""This is an example of providing config overrides.""" from sqlfluff.core import FluffConfig, Linter sql = "SELECT 1\n" config = FluffConfig( overrides={ "dialect": "snowflake", # NOTE: We explicitly set the string "none" here rather # than a None literal so that it overrides any config # set by any config files in the path. "library_path": "none", } ) linted_file = Linter(config=config).lint_string(sql) assert linted_file.get_violations() == [] sqlfluff-3.4.2/examples/05_simple_api_config.py000066400000000000000000000040531503426445100215130ustar00rootroot00000000000000"""An example to show a few ways of configuring the API.""" import sqlfluff from sqlfluff.core import FluffConfig, Linter # ####################################### # The simple API can be configured in three ways. # 1. Limited keyword arguments sqlfluff.fix("SELECT 1", dialect="bigquery") # 2. Providing the path to a config file sqlfluff.fix("SELECT 1", config_path="test/fixtures/.sqlfluff") # 3. Providing a preconfigured FluffConfig object. # NOTE: This is the way of configuring SQLFluff which will give the most control. # 3a. FluffConfig objects can be created directly from a dictionary of values. config = FluffConfig(configs={"core": {"dialect": "bigquery"}}) # 3b. FluffConfig objects can be created from a config file in a string. config = FluffConfig.from_string("[sqlfluff]\ndialect=bigquery\n") # 3c. FluffConfig objects can be created from a config file in multiple strings # to simulate the effect of multiple nested config strings. config = FluffConfig.from_strings( # NOTE: Given these two strings, the resulting dialect would be "mysql" # as the later files take precedence. "[sqlfluff]\ndialect=bigquery\n", "[sqlfluff]\ndialect=mysql\n", ) # 3d. FluffConfig objects can be created from a path containing a config file. config = FluffConfig.from_path("test/fixtures/") # 3e. FluffConfig objects can be from keyword arguments config = FluffConfig.from_kwargs(dialect="bigquery", rules=["LT01"]) # The FluffConfig is then provided via a config argument. sqlfluff.fix("SELECT 1", config=config) # ####################################### # The core API is always configured using a FluffConfig object. # When instantiating a Linter (or Parser), a FluffConfig must be provided # on instantiation. See above for details on how to create a FluffConfig. linter = Linter(config=config) # The provided config will then be used in any operations. lint_result = linter.lint_string("SELECT 1", fix=True) fixed_string = lint_result.fix_string() # NOTE: The "True" element shows that fixing was a success. assert fixed_string == ("SELECT 1", True) sqlfluff-3.4.2/examples/06_full_parse_api.py000066400000000000000000000050351503426445100210330ustar00rootroot00000000000000"""Showing how to use the python API to filter SQL files. This example shows how to use the Linter class to parse, and then process, SQL scripts. The methods shown can be very powerful for searching and filtering SQL scripts. """ from sqlfluff.core import Linter # Let's make a SQL script with a few statements in it sql = """ SELECT 1; CREATE TABLE tbl (a int, b varchar); SELECT 100; INSERT INTO tbl VALUES (1, 'abc'); SELECT 10000; """ print("SQL Script: ", repr(sql)) # Call .parse_string() directly on the Linter object. # The result is a ParsedString object. linter = Linter(dialect="ansi") parsed = linter.parse_string(sql) # Get access to the parsed syntax tree. tree = parsed.tree # The root element of the parse tree should be a file segment assert tree.is_type("file") # The children of that segment are a mixture of statements # and separators. Each of those are also segments with the same # available methods. We can make a list of their raw representations. sections = [(segment.get_type(), segment.raw) for segment in tree.segments] assert sections == [ ("newline", "\n"), ("statement", "SELECT 1"), ("statement_terminator", ";"), ("newline", "\n"), ("statement", "CREATE TABLE tbl (a int, b varchar)"), ("statement_terminator", ";"), ("newline", "\n"), ("statement", "SELECT 100"), ("statement_terminator", ";"), ("newline", "\n"), ("statement", "INSERT INTO tbl VALUES (1, 'abc')"), ("statement_terminator", ";"), ("newline", "\n"), ("statement", "SELECT 10000"), ("statement_terminator", ";"), ("newline", "\n"), ("end_of_file", ""), # There's a final "end of file segment" ] # There are a few search methods available for filtering, in particular when # looking for segments of a specific type, when they might not be direct # siblings of the parent, you can use .recursive_crawl(). # NOTE: In a performance sensitive application, we recommend you limit the # search depth by setting the `no_recursive_seg_type` option. select_statements = tree.recursive_crawl("select_statement") selects = [(segment.get_type(), segment.raw) for segment in select_statements] assert selects == [ ("select_statement", "SELECT 1"), ("select_statement", "SELECT 100"), ("select_statement", "SELECT 10000"), ] # We could make a file, which only includes these statements by adding back in # the statement terminators: filtered_script = ";\n".join(select[1] for select in selects) assert filtered_script == "SELECT 1;\nSELECT 100;\nSELECT 10000" print("Filtered SQL Script: ", repr(filtered_script)) sqlfluff-3.4.2/images/000077500000000000000000000000001503426445100146135ustar00rootroot00000000000000sqlfluff-3.4.2/images/README.md000066400000000000000000000023251503426445100160740ustar00rootroot00000000000000# SQLFluff image assets The logos, both as png and svg files are available here for use when referencing the project. Contributions are welcome to these files in addition to contributions to the codebase of the project. This is especially useful if you have graphic design skills, but before planning any large scale changes, do raise an issue for discussion on GitHub to ensure that your planned changes are likely to be accepted. With that in mind please follow some of the following guidelines: - Be mindful that these files are linked to in several places, including the main `README.md` file which generates the pypi profile for the project. Don't remove or rename files without being mindful of this. - If you edit the `svg` files, please re-generate the corresponding `png` files accordingly. - Please use https://svgcrop.com/ and https://tinypng.com/ to crop and minimise `svg` and `png` files before committing. ## Licensing Unless otherwise noted, these works are licensed under the Creative Commons Attribution-ShareAlike 4.0 International License. To view a copy of this license, visit http://creativecommons.org/licenses/by-sa/4.0/ or send a letter to Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. sqlfluff-3.4.2/images/datacoves.png000066400000000000000000001176021503426445100173010ustar00rootroot00000000000000PNG  IHDR>žU pHYs  sRGBgAMA aIDATx\U~{~ά=Mz ';vD4 n00%@[ ف'^TO&paHu؍< SI6n)A\{Z{gV!RI* Uν^. ͼ0u_.MD@}/qFh^7QPW |L7yy32[gjM܄c>ܰVST3.F55 ireii"5#IsT"ok[15tgkOχacuܧ? 7k ü Z[VKG?2&g®¬{mOl]NS">bΫV,(Z"8s=QՌt^!m`\*kl_~oJkM.Rz}~O[1g s7G燀#,$0]Yo&m5w3& n5FlJΗ=̓~kl#{R;DkqIh(K9M/=+G]{Ժl6 _YKޜ~v}T{cO *ѓ.<+jXc\zޅMog9+zoxz³iERAX5Fsx)@Pzؙ޵|ѷ_i>MjT7YEK諴iU3 }W9%KL70vH.6Fi_l.􇱚MCclO֚F/׈Bnz]=tֽȞ|b_v9a}u.{1F4E)Z0Qr[?چu";^jeAgVջړkoS0<صޝ2|;SL7zV% [t26.[%_^RtQr-SQdj?4նJNc}o+:%="ĎM'c1vlTvSJ_inX~aٟVqOZ ʅ=tlO³w>|<1 c>|O%1'?3>|*3VFމA-n;'HͼP32xbi=@|UxM _yS뵵5pG%VyND'\tf.<3}N}DY2U5#Б=)>#, <kO?;D)MNNi̸LTsyuަCpLe¼A'FJ MQ߯әRRPde~wwqIpA)קK>@|1k)#gb>YV`BM.wScM>| 6mVEAYPmeœGg:+psWUiVkg 4}i5sU=1lnʇೀzmtWM@>&p{Qur*^I&-|Ov=Ħ?3U7nH`_"0WHd!DȞQY~eY;+ (&| =k T@@|g:+! 1zV&{ӚcgNsƇFs<3PCi9Sԑ=)!3g6COC@Y{r3G/?}!Rc ns‡ =X#@gkO ۹ uV#@8D^}^#B0z=L70"4p=6+5;h F3@BϚxZS#DyC_yٻ `30O_v?ȁ T| Y{\(kՒqmh+{hn@rKba|̦4oi?OԊ"5+Z0ZYDE>5Y#ո;ƞ@ >0BM֜e 9md/cUͿliF,$ Az$.5w4=M' ͜Z8R7lJp~D 6Јyڷbnzl -d^cs׿Q/y]UO:7tK bC3P# ߳Z_3yR<7m;B7uj.;Uͻ* }okKhw~QufGr-nnj)I[:ʓ/SȚdtDCbdO:(z_k4P*kbt󯯮H>?O 0 瀵.6kq,:뜾B}Z8Wa0h~vE0Vvk} ^&UKe?0H'vr u穦|T,O΢6n$mc+J?߯}t67n5ٸݴc]&+MMϏxw}t׶jv]5-ر*1-Z٧~맻^FQv?VV!+c&?puSqMnyj+7LdTֶ&g af^^1>s-)t`nt>5u:Sn=uDw]ɧ@۶UӍlp=6 ɨl2)9m?7tjk ާHk/NFǶɗi?Њ >PDa9[oUbKE&oV~}ɜ~ gDf^:6:`E!1{S?t|8YӆqGݥ]̮byfz4tƏ+lMV9Ի8̘vˏ?۝W.ޱ5yJ-T90]7MbGmc%p}؝_"`b7hJ_{6nAȸAv:ql?I)s>XQeC">,eݛHn"uٿc{o~p?a8Ʊ>5]0M@wp 穦W]ɉʤ@6Kv'Ɩ-w XwcKJ%+3q?w*;p!EU'ϫ-K'uQ/$]rֿ`a_Vޝ_2mLO+0sl]#;ӏLl C=x" Pm6ǘvo8,;骰"\Ks@|E-M\琞j% pD~il&\YpQ=i@=nj㼪 MDvò_3bnC|Z1~HF3# UM朐'>'>&DD}7.w6rywh s@easFEeZ_u-[u7m빘;3UoQ63d|7_Reh 2ߩ1nj`# A||լ_xVkw>߃>YJjA YÄނU]ӺyMGȂ7'@hb¦ˠ'~u8O-֘ :[gBEdn· ɯqѬ|nx 7fwLQG/XWjkAz)Q#B@u_ix uy]/^?~M-O5d(,Li4d!aښ| .FZ -0՞Wѭ6%± ѯӝ:}T@ۇ_:[=-AOrdUtDV4v\vǵeʫހCQSY}1^tW&4o?~祥YwUXs:PB7Yg %e]Ou@Sw &g/)y]y]{7\{m^rfZ^dm)B-msָbgYӡ{|K Vڿ U}tFvNHa2i~M,MߺIw]'Э}橴k83 [duVzsk!X|@=_<7=' ^x6@{mhsvȵPG% 83:|7á:\o >X|v޺[p#sM<)ʺ=^9RMFJ^(dWItZrSG}vBAmF^m^W7ɶ)6i> )NoNЕN!mZjC5nk&NCXnٰ]{S^痲0z=V]7%Z],qVPt~\q5zƟk|l8>՞g3`.P:z-?֟U>wK0ʳɾJ*c^W>˦g\Ɉ<5 " e^K}8d{HI\|-鱑V糺{PGOeY5 /&SwѽN>u7u3)m$?m&wm"P)I$-zb柙nv}ȷ.}\\+tҽZ^|Y~r'<g7:D-s<}*L761L_sE'LzZ^:^E;Oi|DuK2r%I;v (S?;^{$GcfMolޓ%O]p;649.9q({>:N9]bj[wf𡧿N寛x9y5_Q?VV\[`v#~^YQ|t&BKiJ,trw~yW]/ mȡx+?~,k\Vb=]ϲOF UкO~+~ғQDn| vcxgzF=y䟰Tz?>$a[By~aNvO*] D}?Okmc. w8>՞֞$̗^.jo. mvdO %w'[@ȲZϓ 0|xXWjZNo9f`b[rctB+zIjI'lze^pIK5{{5颐t;Yo)\?u[;gXwzQ]'7:O,)o.^]9nxݵ87>~fx՞.Xȝ3It_AcY=K -/\l̀(}^+8AnRI I`o.;?Lmds;, {dlMULs6P(AV~xٝO+\S^؛XZ߳~/WT0be[nNs^e8 G6&\zw.c3m_wG g,.^jOk)B|ϦOn.qׄB?;M2|+z>w,BgS[-_f{M I`lU9$uf#ycf{6[֌ 9tϙyQ I*Eo+ Phfe|ZB}w|A=!US M?/އN!lJn[[B{5%k^Z璽?]W mik/BD͚ L̇(4Ķ_~³wmbk݊Uy2JR*O?褂yltئ$1w}~b]ۑpނ:* ? փKr @o@ i[݆ڸmN>t;:lqkbi?w^fm?ן?v #uy/zk"'T%}=E͍[̿Νnv.s ҁy 㬒*On$i{l ֌Ɉ&/4=xF_17`⤠YjrLCgJ+P.huoȜ ~}ݼcwOub7Z6:cW{vF)}y~>#uu"MǿWN+0b;҅>fS'B0ߧ5koqn̂VsNhyX, A&wx'-$;V[3&#HH Og+k/=am-;V[HߛT_b.yrú=nSS;0+_Xg3 ih⠥lqPS!._q ,Dok+:Np|S4.9nwn5sC~/DUܒ@m«aZ*ȇp07pM5Z[;5x(9s3kBtQei]A @҉Ya5GXt;Ƅ >MKar@iV؁/>Y˪n =[⃩ &2I圵6q|jt/]k9^7g!ӯ0d}Z*a ?*BZ,?lxkPu+OYHҖIfEG1سm]AaH(}Sk*/1i & Y]U]=Uba[JjfT9Rۧ%VzlN?RBXU:K39i02{T?=r٧;_| /(n{b{.Ua(-Yk~ HG*P lpM0I'~G`{|vrȪF~j !T[ݞT=,ovukf0r~kzgz]jr"ܣE<$-n2nhw CtBكLګX[UHCEC I`VLxȅ.~b1m]Xq/s/<|醟C7͊<1ǵ]kx1s> ׏Ecs_5q;=Im>mnM-F{Ș'Kon^95`jű:a+pn;8gY ZYq.̵{ibiwj1kS v.|Ѫ` XP6z9PW|ݍd=sǷ>[. ܃՞UZbsD[|LiAK{bvc;/Q J'Vr΄Wf܍O){zQz>4XL½|Y=2E" \i2᷋?<`p}h;ZhG.M4r]vp?B>Fya*|TuaYiq=vg#5o?±'~/ST;4'Vzs! @ o㴜.~ٱ4`~N2{ݺsaܷWdFӪ3C;ȡ4c-gx ]7jOοǮhivU6)[UX,g(X8 Nr [2}f4ݐ EaOk^sϪ㡶⢛iS #H+Qpnߟv.n1.>՞[ߟ^P(c['6b+gXʽ4I6ou{BWt|([.}Ԕ̪biqknc XY}$߽jOUw޷B'–V{aoXݽiT ,*PD~Arzs- @;G%cK5 dWVYW #/tmy[jOG6ZVnUaǞ/>E9މM=+]7%Xeb Iu]/x8L,z ne/:LB:oԥ50TU8+7g~Η58l?WG3Qo]JS7VCKKT\Vww琖OeO7J 6C_f׊Q3}gy"|9S5&}34wW4k"369\ԗ] P0F&9wSj,9J<+쩴-@~Ė@ݖVJclC7 +•ui?Tt2.]hHiXՕ/BsyUϵt7?3J=qAqkq갏yuVF UV{v8%W?++3agoHb7cL4pj2OƲ7@|[_T^hs2r&CjvqG/uNgyX!s 5\:jߧgn3)LjN+B7V~B"T%Vm%+ &'yJ6qw?+]R 0>4:فV/.l('OMN.lDnm#D?T>.񾄻(]y glcgTjgD:jpޯ)3t=Uy۽d7sJ{s͋4k״x O,e Sn$>OYsMZQq Mk7C=^GT,UZw= ڸO[`<'Ml# ҏxps|φ{Ftc;?ՠhZ9VsMWkFM?zsҌd& ̫)P's- \K+iۺ揣yx{p;'ث\.K)?}C_BC>4w{3…cj]l*c"hC*⮞d&`Fqaǭd{yλME o,5U`tڻK_ɇJtju l /He[n Ӫ7|~R+ {_K5h~Gj%by)/v7ڋeu$<>P{K߲~v2s/q 7vXZL9(Bۭڟzn8j_ocNUa֘0_\諿x UyhsȎ`n,e&VvM?zp:nj3$`O>o9wzT&|uy7 (O[f0Qz4A=nd J8[I^y6]At;~Uc}ҽ~W6prfh;ݐ0=K.i/;vm@WVWXn2<[ dr^cnl1jFzkg>v|՝X1$ -\7f&9_j=/0~_`;V\2A9jC^>=vsjOTu4_UZΏI;4 ͙gtՅh٘3}ֱ/ppTk4wS,kP}?stlrZ9wa.byu@JkT_wGQ&Zqm5 (جe}Qw@yu޺[+ #' ;8-V~lsyftdɮ=~H0Q;y}vT2qU{Im.6q0tI>)xv:GAqn{ԥ5~}^Vf>mݼnZouƍ d]rʧ}яn͊|sGq߫>m#k_4yօ}9{hd3&wۯ,qj;= {{"LW>`Կ {{?>=l}QA.R#Lh]kdgtعܶmuRlB,;g?7<U;םɳV_D?V`ȈlwU *o]!W`җ&ϓ=# D9569DՍOx$k>8s~|t;zا+Us WQɿc ʗ~\-5滙ݩ{ڝ_km4}Uhs[eǛ^_EN7ܩ-G+I=*_Y*wgW8՞Wv9}=j|uOؔI孜'D*:pq=u:x>')z6g|2i+£9Mz~4}b>klhol,Vu9 qNwGH!|g睟jW-juC}rZrsa_Q_SH~ FC7=crz_VUyٻPji 1ʽ\W{fS٪8WDy6(=7.F'ޱVDؔjng^[];]il$]]y=#_z;qɴ-?T{{3z2g㵭([wl|\zlsUK'ɚ``%O՞uQOe ׬FV<; ox]K̞w|U~=;haMU7_% UYݪ(*Ưޞ^+*?_U߫LCӦPZYL uw}O<}0zb8R[Ƿs8՞ݪJfsZ>O Ut^|~ţu5yixPW>;ؐrwMꄼriGlϓx(a5-BvEKɛWZoz.*pc:]I륕Wlij> ҶɃ n%v7/I"=arm7n"_DK/q޺sW= çq_]se\;"|Hʆ}tRU~o_[uLX(T^%" k= zZYޘV̂߰{_S鞼yyh[~ES?_ ߊ?}3'l%v'J'>v}1ϮE'sJǖMjlx1]Qjפyw|/g>[3n|:Eqݟ/t_ ].v Z1l~89ᯛu?|:MV*Fb?A&n4՞U=(v讀lr2?G'~O¬ƿ*Z놞oͩL|s~@is|^eU+5}DvZ=^.{.,<2/v^[wVpb{[p}6۪c/eٳxU{X(X7P0;` -|t, P͚ e]dOUsS۟1@g? PF2?{ {;xCU־}@zb?%Jsn|y0\n-P*r~.[ÝS~VW{ *L=k b'n&•^nǑxJWt:([j& RDOJuP|]4Ӿ&ltVE<%?Yгg(;镋=:^WՄm7_'M" Qz˞8 S7oN|wӄ^'ҽ}Cn>? "VzTyW WWt3%s!}{`s+xY#9 om~<1;j}vL{[/b*nSX+Wka훶;Ђ+y~ߋ~ߏsb:ffvE O-T7®k_M(4Lm xoځΘ+|z^=xx{~/շ ʖy-z=?p qmnӕI9+o\+=T~X PȲN CWyͽv#% -!$jy[:8')t.1丆CMhzs-EJ wmjNw#F=TNuiSfgos˱چTGlynnsph">aW*RdiqECT!XW>Wu̼.Lńz8VXV p'Nz^smYZIxbi'BiLd-n1rW3V۲Yynn0s{ Sp} |fZw P-b _$q" ??> [ ܿgM-ͺKcJWRwkOk,fzw_eo Uܤ_X q{=N>D ±?ZEѭn<'\h!2g ;D{G@a;5lÆrNў5j3-~NSsmO~fAWV {auWLNu7lZl1AC_eJW a#K++JomXw p#j;0&p>XFx>*R{o  f`;NPIWv ;QH1{qIzgGeǹu?V]^w@vL[_Jie{/W:nn0k|?3{V'| "U&3~ T_.߄6A"?8gZII[p6"L؟5^ ]G0z{Yƺ]vbljmW*vbrjCgdYFՃ,)V6uh({@`YYօԺe?xڠWvBK.*1 JyUֳV~WBy$`Бe=uY* &2 ͚d5[ZIcB._mw{o.}l3)|kǡ s*)<aBXmZTE4hxt}&kU @6:0 :UOEqX ΛƲ{,6+; j߫n> a>oŎE9I'B=+UFaM$I^*>߯)DMngtCIҡV9wYZ<˜TvB|PB |6{6?Pp\ÍǢf㱴Q]KS 5e/zu" D(椉<{G7KZoeU:G tV(>[0#=/<{c洉̓ q?6\3P94r T,V|ZKST!)lqPy<69YӠp.2{(k^t_k:mZ.EGgͤաƼjfL>bt#jP筌3q*]$GkbΕ".sZjźI\=6j Uq#.[798q?:,ڕw ;}>tHsI0g0j;?0$IP(Nsҁyj0 T{zuIɫi5聇!Bq|B2iɷZX3}X?e 3~_}W jjK ]BS!x.i PaohZvUԔaTV [\@ED ׯ|۪3|n6glZ n>ICбֽi;\B PҎ.6kpB2ʃ +> aF". 1P X:ޒɹniɝC8WpA>**^,~Bo$Cس3M՞X՞խ)4maVR})Wed2b3)Q\[)8~,ԠR!$a.nZ1^C6 Ą@A]Z*D'Cu%1tr#1#c*P\#vux7nĿsӱ@.6ݑz\X$ow7q%lKd%i}U}x|EChiF)UBDh\_2Vۋu3z=w\rb{t]S;0u_bdaN!{g _ZJg:1C ;UsUK [k|Mvddۥ򡒹&d5'rv6Iӡ*X’e\4E44tYͤh$XDuȷ'n6;-eu)0=bFvnǮ|B!V]x.+Zu?Uؚ>Ўʸb0_/MMP4UkqxZ8Ȯŗ`!WK)HɚFaqѸzXlgqۂ{?~Z/]N^& X3٧~f':ZMP18 TpMQɃc < r̓dK۝CWzI’$aGuB94DE77r՞՟ .&m|#w)YUwd t V柙n P=(vggTik3Ua2CATL/#yEص[^*#G/!& o*PPQ5: ڷ5\v?ج^r# N ̍:.6k=~ 7WqʍudGs ~Mr,ۗ<3<5\sRz>xJ5eǪ({+?}Ƹss\ַu!,;` ќzo*VgR!Ŋݸ![{\W~3FӪDϾ!;k_yٻ T&2O*'ګ}ђާ֌?fէȚY& LB +]X^p|@:0D@|Ęו~秹qk% R,>lSNh]ڦ:g'{kYw}, Gi+>c.<;}N@Y]ҬQQϔ((ZӷZ wtPT_rIlK|B$.biov[V~g>w>ȞfMƜPB*fBl-5RecO" bZ>sDt>K24>5Q 4uӟfϑ;&~*֞LHjJUq=SG1ourbim&g9 S9,%KM-X. `&|ZH* xMΩcbK4gtad>w_m4gcj]lN2K$u!7|2I,;][TZ#BX]{KkNV}iMf4FHmGXj:p4DsL, nzޝ;³w+ZUѼׄ>2ı!e﨤k6ir$Y*4QShlì;m#–9\=&踅A 2Xlkh(!p~*Fsw}\@guϘ#kʝZ`\c#oadj1<ȧJպNSISQf%ʠ0Pρ܃M8mjbvK6ySqQ5:Xw,tF.&\x 6 e۳vh`IYrإjX g6~r 0%p$*Of{ل ~ոٛSpF|!T~T֔fi7lNIygN핇*PB!1UG &9;c4;`.6k6՞tZZRJ`3볎46Jlbi%*I PW-ltTKI?SI-*s|dPuhΏY76_96߰Tܚ1S o[h(%#T}.{kuFD|J5g^ӝÛt ҮN+'.YBbEAV[K@OT]-9*_: 7(_G j!g󣾝k*4'.P ETBз]-%7iоs>wa{7vgagO &2O*T=r&i*8DcP)tsXM (aVs2 _uEPaENρOL] \?{B[25(6& +p~ANΪ;"snNÅ B-p C Dw[a!5{~W_.՞\itv Db|'!%ȕպ-?o 2]\i}(1Zf ?"4 irLi9p7FҮ+DAU躜U,ÝO`wc}-.0 khn(ey䱺Zvʱ{4ZYVG C=jg+C^Ҭ0UawwPщ|5t>P zuU#%_7^W)G/z&wl wnhSu5Xjk*]"VfxP)*C<͵WzLӪnhoxBٴQŷR]+;na|KP FA/ luwn:r]nzlڝ~G"jxEhpύ3ls7 M7Ju7覎,/ZK1k̜Wƅ6`ͣWsN#J3 '^ҕKz+N?g :RoPU&݇5L~8{6BkoҪZZ9wla{}T >{TYm/$SZ⁢U^rsc),$5>$=to>sMR6CvoguEK>uKIvKxDp:a=>23Mq\ Ӎ&K'C*ڬ~\Zճ;OƲuᙢs`玽w.v sk f~qHhn[Ԑ&{=q6`b e|[v>ӛto?v_[4̏hw;{M׺wB -toO=/.rba|\ѐP۔ ;N;cfY j:Iƕʷ rFHSa/D Oy],=Juvha ȷY"3rAkvZG z= R(88TxvUّ=) Pi_$9ns U;KhPh`'^'Ʃܭw՞(j}teOڳ~fA?PX5̤>WiZ [I+ oBYW>w.Vl- "=s7rgh掕@ $x$ ҷ|ubhE ;oVQxmMÍ[1GS901_ mA`ia>?nvnԴӧSzpc=yɇc(vq5]QN:F2 ?ޞ_q>fޙUa]Q_.S홊O섻-V bz+-7ɯV;ns|`U&۳3Wl`mݭ;6_T^|uMA{]5_U$~\@|ŭ;mOh㋫zЇd(z-n-D5Z \Vk?'kn|M@;kV'WY=!MN6TXz܉1J8vp ba&uPBo<,t}zgNBϮ T-QeTnB{tr&G"ܧ/<{W.+жUϭ5! ÿq # n.PN迷eZk&n**VxSꈪXҮɚcB&JC7@\ZU+.$"۟0y. ɥl`WFbiy*ӽIκ!bq}܅_N+8.a>KJ@VͫY ׿Es8ͼ.2bǘnh yZԳ Xظ ᑛ:&Mv-߈¯f6&AW@4MVSaa T?WWjJ~\n!x~BbmS3MFO28⮏ӄ;mw8a೤|b-w7B~WHWASM4tI'scZBm$@M?͍b_E;@P<,&Rx[S}f zqA_H&C5B0B~?䅥GT5gJ}Z~eZvܩvd++?V8KȾy/g>Sg1QGۮuֽZ+ =w vacbN`*I瑉0o:+:OIhfMTG⌛îd.jy=l+X:i]HB؟?E_?>L}ښpXug/-o_.ߢ} -6f ?[SXwXawA| 2gL )k̻^'{}[s՞]aﱳT6~MWB=8L7T(ֲi< {r66|G6VR"㻚ce,?S.]+]TlfMO) ]GőwyXj1 P/Pi.dq]>KDgvgS ~oP^{+=SKQ?7^=o?J3tzM>fl4ϖn?+?= hlu4~ŠZhw:&5Mem芞/͞~?c@\= Iv"c?vV*£|^'a7os5zBδJ&^Uw]u ]&/f1.<&앇~*8EPS;0~ƈ& GxR>I ]u(o'mjޟvP^ d~j^᜔: ֺƆvgEY.td?C߄>{z1JU)wy=H(t?)e Rkr{~}B ̇Y,$*|Lz]- ?+_$ܛ;L~ ӫݻ9YY q&T1pLιZt>GnYWszf:+7jۙll̺f(tfv]ɪB)~WEi5~RIrl/Of *5<*$8Uj_҈tOuF>TAڳN#]E?'z*K'f}E '<^@8-u$_|0@n.ˤՎt(F,гxfeɺ}rlB?.6xF9Qtȷ )b+zZ۽&|XKt\Zuz#d濽vb/.]+[쮧\3}۽cNɫsP,m/|tRKD٪OV> P_֛tXI'Nf'/ia}K|uIV|z~4L'xUK4\d r*ֺl$W˹Wc[ɺ'x2=?cyut,U~sX|EKUqbV{y9Bjj3gi5ۜqK5K}ɝ_=l'z yUM vB/s;Aa$^k̻ bv,$c]ҫ(ل!Uo>RƄ 4v<Z8_Zz*,xܻtX0O(e=5ga}ZaUdNɎ 0͛~׺]aݶ.kMm@8_je?\YSiZ^Xޡ}칺%s`uORlq=r?)ٕEܷ=̪~Vgh|͵s*|3v_KۛJ+y ̤yPQBjϏs}֌9YVN?/ ~*ϵ_5]7~=.d}G7oMSl.vok`Q=5c2죥4ux) )U7܁PJ~~<|E_Xcii-;>r]C>=T//{pI_=މ\+{1]ϯ|s8=üf{c!q6~zqo\z_.B=7KpPϗ~@N̩}!sgfg>4uOM Cnft,=Ҵj;]=H,ޛ7Y%3uV:t!7Ѳbu{!gMoT\x7i5Gafh5ө V&^ 8 ~b+ )[K"yl6|Ls3-}뗖9MrtyzeM O(1`êܭNe;+Wt7ܐ}EÝЈEptgψ4?ƿ?fzW'ϫrhWn\+vgkq篶ݽC ?g$ t@KcV-iAhö7Z_ֽǞ%h;[θ1*Xk'XYiz`t]SWW\IʌأCfB ?}ǛW̶a.F#үȻSDŽ",+kQWwp J{9\S/U*ό':߯+m-x +j7]:/e*NgvF}M;rI횛)QE_sQMQEܮ7jy\)7*z w͏~np |OWt<ݯ';ɸy>קL0]'~+}s^$t4j]vqQ k%F|znM亮WnV:4.4q?}lf%cccnlYqcJo#^H ?'\(WY&֢Ļj~jZ`^Q<4Z7wU fR3]49C(˄V7ձ5Ym)_)y> h6J dgWJzI_Ɩ~>uw>o>/YQWv=|XjۥF2oX}o|hlvpȑlJu}y,U=&m7񎮘dzF?P.>Br6z Bzz@ϻ A7~L X(ߋrcm./O&LKxo?FKIM~]IT,ʊY0 geu5bxjZZvY~dm !nD[~;PdQY':{j~5t3دv`f*p4J(D) nmn@o.۫vozXߚwJQgYo;lY*a܀/>[ϫl?yPwΫTf|o.v m.\"SC'u{_U.؆S}mWzw9 ߷>+cZ.,ў٣GY 3jq5M/%.ZRY[ޢB>=iSQ#n\n`n{CeC?E2ҴŵJZn2%߃.ʒE9I-nj{e2mw|Mvs_^ӆq];6?"ς?>g"ryHlos֗:RQ]-nwPOiSaOQaskChwͲ,ӛ.,Mgum -oK: 'Ufe+s޵ҍə?/󶅁_/+K\tФu[ZE C$cy ==c5[7Xc޽bVfW\X;׿`ΝnY\ʘąg{]H[z൝( wL'ɋpo==x+sT7'M.ʜt6lf+D<+.i*Y|Uꐯz-4Tzs~~Ce,|ٿX^ N/W՚i־04X)`~۸e(G.#VEOcA׸ "츀 ⳋ"U|z%Z ވ_-W̮Ģ'UޱwlO yU? rX/%VهhU.b܎N,etYf{ژh Ѹ'{G%Qs/=@17ڟ_n~ѼW 7>謟YH+ZzWf՝ևSu*~_ώaܾۏE>`dW}RuC~1P8v#V@e7=W T~x)u2זȺgdMm㏧`*?o{('=KPQs3jq^UA:?t[\mWU=4il|lM&L{-_YjFo{@;Ҷ}~M.+'U|zDgU*>I#n|Buٟh Az,,{>nBrkpq s=}=o\]8\9M=&U\pO,\wP.T{Aܕsy۳ѼsDZWU'q6;ŝRJ;>PU겙Dڨֱd{1X2֞X菱Ⱥ7v+ӣsΘVP@ >EvKjwEVOq)a 2#Y~Sσ?4Zo>ĺsc0Tsltu|"xehF](2ZHnbV[I4v6KJjQd{9ǽ6.<7_Yܫ|. +53/j;ФgOYuY!5ZXq#~֘*@OϷ^[qY`O"g݄,-o(VIpc!U睟 7C}v{B[C= ۥϹycϜ* ]~Lsݬ02JtPk>q5.xs}=գygN {-/7ASؙ/L0[-71v`VC=rs}Tؙ+k515#_ [{~SwE{Ay+t[BWts&ޖn,0U&ֶQBפC{;%w$/ #ACKhiz8؇柚R5F %o3_3߱s-*ĵ;phnS E~z}yj7j8°A5i CWn7B9vꝦps~XCOAY"~!fyut>s支j8_NCg`ߊ;,99 n`ǹR> PeFاyO\o1s{[C6-?C[տ;<>w'vj}H7R`).z.oxn ,UI@Y~гg3dwqu[:|>sUu":N'rا =h+d6u0[oͧ7=!Zܶ9NOr\O=n[zMXtn놟UdOf_6w/fKg&WZR~:[͇,'ܽC!tJڕO`ŦwV‡ A ̙fͽyD=ysp 67nЃ# !k٬kO1|qmsb+Yسv=R{p?^PgM6g~^?.Bs~_LE{q1'?6VbDƂŐ'o?4d=V? &>3Jag_\HfuqMhtޙsn햄phz'7m~͌kIemW:uEG!Hgl ]b*%f[Ō?"a)tY(>JB)7C"I+hǸwo)83 9rEK&2O g|XGX4d~쵡1Ma!K]U=Z.H5*jd|^#8㚕TSo^vac" V+ypyD Q00鑅EҚqXO?I n,r=to4X ! ;o)3'gqEЬ=CB4Y OJCO}+͆/!kh%=dH#m„dIQ oQ:!|֜thY\胆ct?C^~먛3wvEpxɺ*zBWF M܅禇 vMz\,ƿ}U`F͐{S7_q= m}nj}zfg8OowTw(5^Kޗp&f[JճVsd짆ʽ|H}3 ec?Z9\NW|@bf4OWH|U& pOrlx"gHRz>tӉh?#q#45v= xgAį<5( =(BjQٜcP&k+Af9_$sDtU֍\迴ro9V;]AS'/ 6q?:Z" g|gjO3G}Ӂ 8AoUW VZaaBrꀱ1 !0 6_f>tߏnzzT@CYGoGn.K;5ڝA>_ Ov|7~;Qt1mwEȮ֍ao?vstP~{lY!Hk]lN<) D֞$?pzywqBh?BB4Očd2/VD 4 J-vmNӪiwv2}z6Y^vuiCY`~A&pWd~A\8L,ۇ ׫?^?G[kh-ZOdCwfHz>VsUͻBB|jp=VYi˯>?gLSO1o~2&004(?<|^v ڿO&qF*@e*`7^TyWGydz4]/uw-0s#5lޣ:gv#E(fiqYBn*v$[UVMap:QGdCdn| T!IWK|ClnkL>Ӥ~ **h(]M+n3x'Cךq|&ݧCrs{wzH?V,66kEhAfMƜk}гhs7'}<}:1ISm2h;Uyf Z=&գaݮΉ!R,huwybն.HCW]/ @ɪWi6+fXV,nM2>=1=&焂"n0|{n*M=bzob,Mv_k?sk*ڃr5^5]CuL3UW9'Pvld6ݲ3 K>S#`5Gp"Xnil/jguVx*1BpfN-LW' cf)k2/aӁc%Ja Dx)E~5}&ϻ Ƴ1 `Ղ!['ʬLc}':Z2~ȞDp{<ʙ*]{ąN4vhMd>[Y\1 |^@O٬UraVκqJ\Jعku11* > 3 bt/<;= QlIY? A\W&7>PZlk<:YJ>m+ib\G;g3+R]Mq=>{a%:#PϴZY@SÝ*/(Gi=!-EuU޹WI%^ߢgElE=Ir^79.rjꨴN(L6WQck]g\x=.z$ܗ٭E2?,ݏ4ܧ93/Oz,`>j8ybWJd /*aȉnKݰlQ" ꤛH'lA޵~:[ yu9SuVsoWW`nm[pjj\lo_ǣ6qkܸ3i0o=~WWڱtUuI˅xUq?rR]ZW|7ɮm \nԕ ݫDTq荷؟qhf 4qת;8~^೽{A#A]+t{a-⢳q?R6xo>9gܕ`ttC0&SnqO:i.sWM'k+?V :bZ-46 鞠>h[v?s lkꮧib#z6dbb\Z,Vlh7-1}.>߼1";l,79 _ͅ/>֟_P{_M;?us~ Q ZyȐ謀|_Yps>=4X}JSDSi@z3&jeUB-7ѸV?/*r&R &ozLbm?OQɄauy-V$q[ᖟM@[Nu\w@w|vs1ˑl话{go1/ckc ] 6(g +czW:؆s|+fyhl\yY>}s 6^ Pծ/4Y `bMr]GhB2nޣ^"a$|gÐd瀽QUtu<)`53M_\fUcJ%@9W4kdo3-(# ÛDY+5[g:+sY-_F&?hi ϡj]l֪^󓦱j|5`&$F##Z)ܨvTBK[m>ozMH[c?.VHnjjjIH{3dCԗi۟xl!b1S1k Z>t\Új!qh?Pwջ4yN5{FpiTEH8'`$@,M%.VŽPqؠ$+ )PujҪiR%Nob/<~^}{?V/>(茔746T}]*C/Sz6 n2.tElnb'sTեKϨWnWeZJy4h뢭n;3bvwQ08τ9;vѽ._y!G"Йb;S_oT'seS~zqJi}vI|Mz_ڍ2Ż֛Z?&4gο:Wb󧫓sP~o9kϱlXդlu#8v26FQRN4屟S zcLr.&QNC"F|s#[vRO.)۟9^wp"vz㫱X )+G`JvZOƦET~S BxQʅahIENDB`sqlfluff-3.4.2/images/sqlfluff-lrg-border.png000066400000000000000000000644161503426445100212130ustar00rootroot00000000000000PNG  IHDR,cPLTEMMM {{{qqqYYYdddgggAAAPPP999EEE555!!!kkkSSS~~~]]]tttVVV```==≠III222$$$Ønnn,,,'''///山鞞)))xxxǬvvv◗kV4'tRNSDUgުw3M!)9 ]>~p/cTfIDATxOP#(=Wn$i!Pdd 6paȐQElѲs]祯_>-l͏^iouRWmmw~rTv^Wٹqͪ`?itU+<U`<૾TK5 7_IjyZVfb89o%E#Eak[>y"JCB[0&֖8;!%\t4JL=>B uaVW .QDjf/-2Gքjp*5I;B,Lkc3Zga'co2!V4Ůo%f4˚-Z'6V]h\K:ML_焩/c6,y=} Uwzuv ;3Ψ[oiKfa8D&vSLa Vzҋ(ˎ! & -%pŢsdXwGӒd#7A:]F# :1&ՀE\Ϡ<2c[1(0k 7k>J*V)2L~ S 5HezEVH?v'b Rmj}g֘"H?8FDT9s%$)Sp!)&n <[c])8C[$YX5N,y6HdNIY67N'^]XǮ@V:oV)H9Cdi#$IFSb$?I>]-rӼQJ` Ip֒rpm&RmpXT9Ǯ=`*vNK' < #?)3O쯄3II&VXvIlݣ]`KY<[KP9~' *!m pr,{/<$9.4^b{lcGVŕ e}i/38>7 +yx,%v+5km*qfp"*i7ޏ)wtQ/P}‹}`Z"43{,)h T3}l}_p6uRzYbz%=Zdž? S>}jLx/0d`b_\=:GelfE![b_Ĩnؗ~=[^ ]]fp@F6Y*qΦf'H-<+D%}Y;$X'9h@`KՊMN`UG4k=w$6`i7Crd=U݈e8^-k0Bٚ֌ +wj% zPHF1[ҍkO qQk) : a:bY?6ה{<z= mزv\ Zƾ'Lu8qPW[pR#{FW]0w ՆH.oa[6sZClWma*iZ?wj(Rk{TgrwP+w5Ơl׮;tK/Ș&ho_&LNZsoUJ#ٹ@r<:I+ǭ'[ߠ#^;r| wS:_Z>NruhUkؗj0Br_ wCAq; )8Ԟ%[z|WU{-tU;Ʀxr?} Hhv҈j]/غ%]7@]CZVXYG`iSNOr]!JQT-k4i*3O^^GI75|<m}<5O?OaBfԴ4-?O֘'tرۀbKd0K;s, d\F=z3$w,e>54J: ' c^07~ܘݴq&+ mH1$P \dxQ|PGHe4}ֳԤ56d``l<62@[I8浥me0tchhp8fFZn,\:xf{##38ǯԬnHIλ0% HJqxYoReH R.Ֆ@|B?~bke?k烃ccO].ԅlbON Mt*#(u )5l"n<̴/ ږe5̶9ma ύޞ_;iHH%@!@CEZB@gGlFF) :pԐN#C/"#@ȒU`V/hB!W TAZP@a'@XJ>d͚x_'hC`&#JRe s% ̢6K﭅g 3\eSҏ$NmDCrױ7G(H55XowPkZpYyGRh@ZmKn?#jդ^U>4Lf; s2Z #t#f^-٫i쇶]&`nmW[FkdF7RA[V?6CڑVÏؾCCk ``汛h%ё-s7 j- E;]vajQBV\^HڠBƱh9)Ws"1/R.Ճ'vXAAv{V2,\u_JoQeQ&6KId '?bo~? " ͷƫܩj/[8-"V; ^p(YY"CuwK4r,p,8C9EnJ="m(CIҼ4͠ Glewm hN:`[ȧj֒mT#uC%cٷ6(㏕P}ͽ36^vB7 H ,!,c6nH\B_Kǂ<{?S~sJ|ٶ,߼'S+E_M6rAQywMF&B.W*}@ON±F]Uz2dT $p}>pP%m)Q,'߿cqIO_wG~|.VQ<^oЍ]ĝWWtG"GtQC0w@^y"\]^bϜS(d%j)U& y;m]txi̺&Lho+tk/y(qB.:NpVvQ4iOcV&Z3GЋwC+1R!+TB&r+.f!\")Ed+h#FV5R#F*t һC}(%Qr YQn5Yx*($΅PE1C y:Է(E2*ù35°X!y'TT"_tVY J[ %^&~"qD76TωαEۘ3ȇ4<΅ Мsv )%װkBcwW{ڱ.ۭ;.QR6(JRpjj,Q${ku U궔q-Ĩ d.`hrTnX.=\-%˗t{k;3!ERН'*q@D*teM%%6bFT+#s_4JzLBD>Ԯs%yOo@@WW줫-J^#"oojnA *JHJD*)w||VoG݈TT"\Ⱦ_0hU-b9;0VU}];6U/]E3+gmdʡ{U~\2FGb€+6d1AC6Zk{]4jq\cY<bEKҿЫ!jDSf2Ә>] q?bo+ ,+n#ʑ2jF9еWGxs-Eϕph*Eڲjmڂڳک ՚IQ VSD|>W Ӑfk@$̉bV0#Ғ4X=3%Θ͘`VDPK 64_ pS}S4RM؋ N'C(q`N]Iw F&J)ShϿ.&c@dyEB;Â:Kn|,ƅ)mGX,lk!O9lK\BXhDTC䲰~@OV=Z3ʼv[! ΫPZ=+˗tU)%*B[o`P-lદa-9aKϡi-2Z!\ E^{å ^a$7VF1sHcL31 3sĜkYl][k3H9VdSx1lL32OltzЀefY̝fCnW~/5KA\^۶m躮 Iv"/E髋? j̱dBebyGsĝe_ٝMv4q/v\ |{vsH(Nc6T?RkǼo ws̝kYۿ3}[̬YOsWnH{Jӧ]f->s/f~{N.DWVvLeNaοWDf<~Yf( lF_Э{ɟb4LL*Ol23s9=:pB <1S牃 ;X MKK|c?]a౳TŤBg> 3/o8}%UxN|{peNrlG6wq%"7[Ų:?8#ݮ@Dr 0~ZE,sD~_4PeNG_!Hh4476u$<* H*CN콴^n  wk LaһR?-&k'M[~Acc:د78=V6_FQ`2/f:m@'׹d-Yociݵݺ{=dQ8pw͉sW'u2dd$s8{%!@!D ucp ' l&em,=hڌ4Ç_xw7WzWůԕHrY{ְ'\yH 7,}뉧nů?pwk;5"a=jW9 N4o0z~)$~D+'v\!ukL̄ D-vo7U+mj3xDيWya f{"3ݲ?#_YRHE`߄N~NYƳ,́Vj3Iٲn.H6VF ^ŤG}q᷻sG<&w3օE,SmJ;7d fA$Jw`/n]Dp)0xx4C,N:Qx8D v!5_›py6+EZEx]kcˈ5Ś1Kx$U>onq[J^Dv@ o嬿oNW<,W BCsf]Lk>PE Ty13b]@@n*T݉F3rM&'W?lW(^q!m{ghPcPHUVD#d]g2Fģ #>H\sl]$抰m[פۆ!Eg{lVOfOu(it9it=21).N,aERiXcA* _@"D{zpag LJ>8K͊~Yd#{hv\eBBF}kݓkUCX]`*қZxtœ h}QZ"@^ßYÞX=q1N&Mڦ+#j+&WY+vCi͆Ԋ6W7oN֘9rs<.,?ȅq'{L }kv y ciz]D#4ًG"}wӏ#2; GUmÇ|k/nZ.-xDsg )Eť7>"}onm`0چ7קK \%ӃH#Xzjd~i3OL]lwŏS? sa'LAa#Lm| (/'د0_ۅG|{/3 hBR&b.NێŲ^XÞ89s%OݴX ok*FnS!b1܏:6ŭ;JZjw_q/G+5X_L<8t. ~-XNIIrҞ7MCh>Q Xz̫,Saݜ痨;}e78Ǒ?ZKG|!ZG7.Xф3M ǒk,oBn>vm9Βhz>=MrG,-q꥾x㳙H ,GSaOosb=2< ;3)Y~*M7~j?hBK+42scD$aYְG \F7n`A[AF95^VJSϰFE ".k]ؔ׍ ,~Flk#|sv uJa7 |0"cd홓x΀DL~U t-"K,kW ߥҸfF׫86qZ;#7򂤏B87Lay' ܶ|kHvrmͤ}xASDXE3&nwˍc$xC7,OU$7,a+h<"~$q >ze_N`q|(psbMί7TlUd&ޕ 79GͮӨ;ėZ=HH$Nc\M38˰R]'q|4;Ѩv3"DW:YM`%vDRu?L$:θts>PJWj8lWb}e#?J.Ga{9ܝ*.!N .X`vvj(O~X |ǑIDR-nWLJiVj)X U.I"eqF>@oaq E?-"#QEw?-j2૨0q4yʗk GP@R^?lCTS9ms!BG[X?[Bg;=0n7)΍Z$)N'X~D +' =w2a\ԦO&b[aQAj"*0j\T!唍O_ /ؤ pqy @1h܏~ j(I;Pq!5gC->A4Oz~ xcWJR y"^r"B KUGIX Zg\S B>*NEJ18KDI}j[Ƥ'zLjGF):i}rZ}KuM38KNLY6aNĴljlw${nӢ `߀M_``u=S rrqUϷ7ha, QT!nK 4H XSm)vHvOs`jnꈘBHw -Mq{aʹ.I8|12yCg2Q/7q))vއ:3檙 E#S'v3aԛ}ոw#?.l:KФ C*-jy|-_NZe- 4Hs=@ކ 2H4EP~%&T/ jVxu::j6euhx&.%y BaT3խ5! <'oiJEHxhVX4#wSRE?y> lDsa$0;<_ ṠtjZ-*'2_U{<Ƚeq )0{RyऄNs)V{l\~IgUqPD1"C@R ަTIQZrD©)صEVn7%tZ^봆q*M $'#QMH/ѡ@цHTul+xD_5H 7*Ka+*u2YI):CF-.uvD͘@ ٳmݫL}_8Gv9Z Ni{K¯Ӷ IkJpgUX#@ Aj2U6ȩNr3,EȈXJU'+[ʗ%^։?Sȭq/z"G2'm,7[]v=fcb?o1IUED?a7A#B{ВICQ\@xהa+DlyZQh'c=U@qPu/)|⬲W|5DN<Cl Q%m| K\ux?ZdZ=%oN qŭņu9N9@?Far Zhay1w ?P6ѢHxKGt4n7,J4=;s3 롾I諸[}RŎoXIFB8pb7ik9r-X'bE_:'.j+,a&*bz۸E3G S_L^ML*, $ۺ3SFܬ."+C($4[N]B7q)En)$4\_ܜH53, 4|d"E_ct+D<hݙz[C<+/`C ㄧ>}ⷙo6'߸u]7Lߕ ;gՀr/#fQIRHhU[xu!^Qݩw8J PϽ/dN=Ha?8O[CO8ߑ#bdM͏ic#ŁSa쯽Koy>H&q'"KT̟}X Qo4S-Gm` 8#߷IVٓa?ĵ\VR#Twt/>(1?bRNw_ c:b^aUnԔU\+:eiFϨsXӬ "%"z (8% ۻ%YZo igdNpOdC8od)q,35[67DLP*5ɴŸrq~~Dˆ9D/urKʔ%['CX{?̓&SұϥW=I I ?ȃx{b1D*ִdOBN=ajK Fo84myX@;wqΙB$R(й d MyETrZT q^K&S [qb*8rO:dY <=])xדPuԠ/zcʚ\lҌ ;y@;%:lr#:胗bp()>JрKv7gbR^I8tU%?R Z;$7I̶*Vaw{pTOt H.(k.)M'a?z n/DeIf|hoqs}Jq%q4=a[ora6Q`zk09Vib:,e붢8Tqd a!U/|,7MTvoNTv)v%N=tdg6J1v+MBTQ]dn(΂:e| `j%ҚdA"Ā܊!H)ηJzFQ>{|U"Jlr]ɩ^l+>j{8EU.\n4inr#Y C݈OTe*.L$&H[$Q c J<;~ɛ{w秞zŏ{sh+#pdIB饺Y7oW,deab+k # E]dBR.q> ; i{3AV4ΨjUH!CmaS1rJl2,UV._iz1IU˯׿0} wq+onܺ!?lROOPJr$G'F{2t~Indu0!5;2BMfs٠_PI'i ʹʅ\&S~n:z|3~┮'iuc7viu.&e> /40FY[xU[^3 hMI+ɿ5JŸfXM]&j b1Y'Mj93iri]l5*R#V*_LC/o_?!]]/>'8d/m?L'Vh2\TTeLq*N&E~i՛1*SqyҟdGdm--aԳn!z=t'P ֟/$Th2 l;U?az!@LsdNR=vBB>SFO52'|5*c:˂ڻ!c\*?gje׺56!֑WledʔaZVU)}?Z51<%N&/N3zɸrM3v-5_޴3'Qa=p{<^̨ɀA-7UMPߓF[lAH"A>l 2Mh}#K᭓1B\]NLYo'?ݵZb]FiڻE |j 4g: ttxI!C$'nI:+,i[EOcLg7,}tR аj[@ൊa/<lԸoBquoȌl0:Lkyg{ggpb` m4R8!^&kdܼKc*̾Z#^?N:|z-6ܐ*52a"qa{@S==7I J mǔjn g;4hB;%b:Ea1L#ha'xg:l}8}!*%*hsS)zl n 2fn+фl}l^+0v_5 HPɨVBtA;(T%ӄgb'lZ ct /oyomܐWQv ID\O" b2l+3ТT!+ktN<iB3O 3@xhsVw&ȰX-n3U׊~Ruo ].'"6ajv%;ɀt\ :фvy񍁭_Bb c.xyW 8Y7e. +PGf¾a'K?&#~חڝD{~YyLm~&&X)@( (6Ӛ#;a߭{jnN^_pj^DU,L$(qt@Fro p MZz۶Ol^U׭%]C Jc]V>ySRY:,OubZWA!Q&cJ#\(ti2KQ#XM9|evȔ]_Z{_ $57/!3IAa|&Fdz+ U@wtE7 M[߳!Z'E# gqL( S^2Pא@6Qbe rhjH糤QHL A/yɀ%8u@JmC21 Yt!+\B}o ?AL%+Bd@g2_f]g%g^Q&2Lx)!I2^D0EcƷؐWGUM̀/TC/?XoER (3ɮsBL*@.X, Uf>+q3k;rgB\8&.D#eYk/MM7u?ǹCDoNm$ 8E;BLe'd]!E[# |xbő)B< CL1q@F~^NdILa:{@;8Jf14|oi1V2]ZTƿ2rOsVQ\aCxJ;w#)Y\bL?jzIbV@VPul)7+AQ2/c{E=~X}3|h[[|hH-'ӹH@0o ]DjwAO˞m㫮NN JgLϻS_?VدS [D<ym#yAsb)z#*(FCRq=:b:a`ryrtUA`i~ {h-;M"7tn)M[y.$dvZ]VW8Ԍ{;EZ\hyo0nQ<Sېu=sd&G3@ӳ/̄yZIg>ˈ-ir[~ z[6l4o#74͵:=j=- :9qՅU2%2^b:ha)\ ;J]&yL)ɻf (/w'Q+宓d1_Dxe4 v2ֵS6S|K+3!Ungn s< F`i1QnAVA]cy!|L<G>;a*B(:+*Y6t25̓ɰ$>.DݨM. iC;FÔllL]% 2$i"s>6D!gc#cC5!P2[U2~z֒B)!')o][/Vp֪f_oRT|}:G&(-^j.m=%o`?i\/51IJ&rL*۟ߤe{h+a2}ݚ-^\!L_FpwH[J%n~/!H>P vv銊(ΘZ,{ ၄ZxNh4)R ղGbl|t:@;2$tЦ2nr磮_OL(֙o_{_W إO=F:φnn݃G*;G^;Mf1 )a:ؘM'm2 =/B=]wgYN[JU+G}<{6ɰڡ"rL#e2[>bX\.?nszk2&K᝵2:|K{/Oz93{&mEPQŵ誻] F\-or s71:2n:\K~^hdFX/nd/Yjrrg&+i@dA!ݮkg!Rdv#T ҿ(a_2߭@W`S;>WCDB\87P)tdD$R?DWu]ig.@ی)?8םuݫ |])ba\lVz K2Dy5EfVȱESEJRNT~J5ՀK&&fuvZ=ۆ(a$'g :(łtD0AMPNҚXܬJ]T?~«o#a]ZWN̓[X"8xY,Y"K(8ΈchNDI:OD38(XɌtYȇ uM>&L"ddМJ"^9` E/_fIm$rWѰH{HON m' I(DgLfT5(QXi?Pw9Aju(r2jzC"8,,vs ;(!{*xV-Y[D7 RЍ&(@箨nѰ!m *O)( @M B@W)ݷsYuN1^6'yr5SEb&2,{u4)x:吤VOhf';:ouZ}3Ş):4u#4caZ jxnx(GUȐMlK ٽn & sUÛym< 15=Z|\RF1tu׮1)aŕ/`$]\_ҭmzLD F>I#~6x ^7~K!UiGǐF|Tl p̓(œAU'R+|:>S㐒̞xohe`% un4}Ou@f*Շ6w_w ,R}{t'al7 H3Gs9dnqb١X";7A%f^Dx0ފTjݠCd| 6ئAjZ7$(H !-Y 9B{wZ٩cRlL zQߚ *녙yNsW6,nޯe=_T@--˭N8瞛T̛;>]$W#+>r 꺕VBψ|g55&7&B{PV{%٫/MiNKoT \_xWMoR47 ~%me \B֏ubkw뀯s%ħZqh.*ޱ0yDta@W*ٙˑw@oÆ=q?iS{Ʒz[h"C?fv'WFH49f:b൹!5v]|zaҺ]zh/C4FbبtT{\U(63 ]lzq 7_3)}%{^*8uLDwvd-%pF٘Z t:(ɀh!n 9_6ܵ8P|d% |9\O[B>w{D`5A@ϵ=ۇ{a;fb;d}X 562(l$\o@\jq+q5k d{.4_tS6H3\ڥhMZza$ ]>0%A 0uk a94s}< l{}^)һ%?kek,#\ZūmvTwFKF!tlE!LK)Ā ]n*KOp\l܃hiY+NܾV$-k4zԋDŽp5¤c5a[wuVAgE2b1iRw$ȍ1K޳Hb'6xyY PaӑQ3ӎ$c8vV;V-4Ps!;Ϯ 5AC؈B-xJwR΄9wC՝7laSMrZ}MO)qWŽNLMbgK'4du,9mD6p/D (_ Mc6 @rt&fGUMp ;< EsV麦 p0'yVl\IHD_wP, Wʗ(/ޠ$ek6OW5Uk}Gs<{-N9Sq̼FD7c !jf7#"6>V.7_xς|a 2V2R|yR=ՙ7t>!(=!XXb-$|\ v_|jM+ybdH˺Dj^fS/+ pef_$QG"e33yy/{̞gZ!öNJ>/@ v> pBxr7an')i@a|l<'aifseyTۺܬȇg̼`^s4WP땝9$gxpy tf?~{0spȽ]At$H)k)WSF p@sksӓssci9ߜa2ե)AΙ#Ɓ/[Q'6S9@Oݥ+oˑMi&mf.a6üޭe>0TSWZX̺-Y8؛Sؤ즔M.+Ii+AU$r"H p P`PF`)i2Ƽ4Q9`m%C)I"6CX1pX.ᙀmC=%$iSd"-QMs vfI9NjjJ>)9Q#ld3UC7i՛dDXѶ~C \)" 3Bڀl^TEsH |UN"9bI69u,BM(!*jZ֋TG├e2#ݽ?Yh Y&5m5ڗ봄.ɶVƷ;쩂b+;ͭHQ|ȏ|,֤ug㜲 -$3%Ti1#_wDAeYZZ.@ rWT4JxvlJ`& PC8˅mݓ JCl*;6!Xc2nbLT. Sծ(GU<7oi訉'ԓCKct vt)/Ji[ HJ]kKK1=/k`?Bg x,E,Sg`MQ( an$bP|QɁGV(ز1fITx^(;D>%Z ?w梛Sj:n5ci1'#62M/Ek)P)(d{7mXZ$Ψ/;JF^!GR.ZЬӰmBv@Ϥi(K \QmW5Y5g8CD?􌧺"IuϢiRDѽ1NӸ/ug?LrBD# wŗi>y/3N,JnMl 쟉QhF6a=eh>J2mܣti:f0$"}yh:tgo<|@WM7/܉O^A~4~ O%)ND*Q?w SL]={=(x-N2l #G/܍{nKOM=xV&7t ﺾ>c/ܥO|NuЯw㵏^[_yC8ozg߿/>O ާ+i`tlWko}ӏ| C_K7xo{^vo xfKu駫_ ~_`L4<5IENDB`sqlfluff-3.4.2/images/sqlfluff-lrg-border.svg000066400000000000000000001035661503426445100212260ustar00rootroot00000000000000 image/svg+xml sqlfluff-3.4.2/images/sqlfluff-wide-lrg.png000066400000000000000000004631271503426445100206700ustar00rootroot00000000000000PNG  IHDRTTPLTEMMMMMMMMMMMMMMMMMMMMMLLLLLLNNNMMMKKKNNNMMMLLLMMMMMMMMMMMMMMMLLLMMMOOOMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM%%% ...)))<<v ġ_+7KѧQKcIDATxmOP30A#DHu/ #}Pchec^/ }&s驊sjhZV: Қ5ZV+hI-jSGZxnkyPZƼx_HfqxtFğ_g|Lʼn齽S_/-L~}9v|K1\ RQJ96 ssّxWdZQ NR p)p>|oS;l0DwDn`̼^3u* LO g7ep$Jzhg`?? 4`,. .l(!1咂b2se$MU 1 #.\{#,zfg{g3|lmiXNlؚ!Nɲ`MdHl潆MNyvt(4fd{v_u.w#J433L Df`_C, l3M>W@i G)aY 7-"Ay1bnF:NJT!*bBjjǧJ41lo5!,lE9/ߡqd\G"*db\Z-ϛӮ^߾{Wmr R$ Jq{7Ċ2-Ka趒68KtPGnLi) ’!M/T~eLG H3kH ohS֙^QU],,;;Kvs + 4,)a'tN͟/ v\A+.[50nuZ0mfnV5?$!MqʴwU@,aJmt8z!'M |ugEUq4~^$nMvpo_j$Laٻ((džEA4tlaL@i13i6=AɠQVBiB\ʸ9x߷=\Q4^&wh'~+% 2׺_;U /jWb gAΞcBh`h@mB)-JuGk~TAlT@ E/FS" eٌq?Ƹ/ɣnduMώ sz a3E{tS~O5`N$K(CvHvո"T<HW qΞWc+}uYgiq-8_& jbƬx`P.YR1t_[̪iP ENu13/S[WuAOY$ocM" I5T 8'uvᘫMchI" j|m5p%+\ڧj'*}5'!{DEj41MllVZ}Y QrXŅP. ѵ?hm34Lz~"|]{{ED{nix2BYf#fh6jOL!f;I -41͢i}}i[ "^ 4B* [βSÏ 4 BD5tCEo5B܄)4g ÏvnHK8S5M.{[:i‡;漱ZϭaDZP . i?>KsfQ4""O$DOKě?+AƬL?Ksf ̉ϢCDnE,; "^ 4B|(uSi, `8QStcpC"p;D|80G2ٕI'Q/VNAL!ZqHq[ͦ֞+t+cI.8IG-+r(X Yy"qN4mI8lM=2ŢmpOC{vDOAHS lưq&zi>ٹOOiE\W1p l2,'],Cվ LoVvutlZp:]ě)-xqv=?c4Gxr*"8Nr3:$J:7Ň{bX`?18d" ,Y gQTlO@>̍va'QG]wP$hO!q @q2jxUw/ha(Imq0+<{,ڀ*(ى.Ԓ7GSpw=Y T9e0[ ,~ `i/#R3YҶ _IiHd %6 yHs{4_XB{\fpjAă%S%SU ;ɢJ4[WŨQ×+>k؛84˻ ~woQaeaaaX/iXZfz~ AҬ$3.V޲cO`; l>?^g;?V|J Y}DOCkx1xW6YdER!gNG- =/$bq5 3pV?4yk!SZMgFjw>տV!)EMSLL#Y Bl;uσl9!r]IH[b_wV dvYfU0`ӆ1/kTp/32 &LOg37 }Ax_Q,Z̗Up#60ucLzl<'pymDc;PZxYjQɸYiuXO } rيa2-˗ummo,MU$ӆ&3X&`̖14[?7 zqũOT[yb{VGLP!dz2`L7"Aq.l,IOG^4Ыj[u >bu 7AMi XDTRMR-&]R46IO3A˥26kKeX [` U$9"kY/ $JJ78? ~jz̎^Lv{ 37da0cƂ`J-m]p&F΅Ly!A@0dm擾<3qdBD9!9StrVR;A($y!$Ԋ1m` 4xWDUID%Ab\OAfPjp/ӥS+@!ݨL.o47%}I ?߂(^=BȪxkUQ ^FV Si M6 ʉ4_2ȩ>d,}B{G6qpkˤߍ#3iØgv|B_68Ǎ2}7_`HfZwI:_)< e)[jT 6MV+g ,u*?O &B]|)uV| +>SLߛYQTN8d:l&Az)ᆱj9\@/{]Hv_.9ZԻJ= p2ʣųaWzzsB= 6F#vLpKp5<ѩ=;\xahUz<~; Ȥ k^ݳg7aB`̅79q.=a΁?7 W`̝\\Cu{_5|!k9J)RjTKMA8u*pxght+*V9=$)ȹ%ؔu0)Z,1˶bV'` +V rAUNG1iF`; 0bTip>iT4=uRm+(;XtAmWHɇcjZğWmmOc.0E4ULrî\ۼш77nA_!,x02c4$Pdӷu%C]-p˛˯wXT1 >(JVHëlP yPsUa{NZ cz%0 >WT9\5Aa|Nխ݇H7r0R%m_*xֹ\$GSt?nEF_+_T>eKFLp4l8 ᳠!ʗeej7eOX8@J J zZ<W&`=?@p`W]Qɣ@tfEmfre,̔ltj厅>~Y2B02cMd2w\ϟ)̏Cผ?HC.x^뉪ǜp<:$ G6`&;*_&&OϷ#@G2ůJ±DwxB"C2b\5(xﳳ.SR ]/sRpB4SK ]l!m{&*&wbfvUV0mJ?#2B7几-ib_+;?n-h* { Öu Zy|yN?65cd ּ4xRX,vv,il算VŊ1ch!͐eweYsWm_ߙ)]hVNmW Ȼi}pu,/PQ\UG/t[gnqZb nj_Q.߭ksIBn\ MzCâBPM*d{I@\^)p&%GY2>]*|b,]RdjvedV$bsU#*_n;?^Pϋݠ ;ƕH7W!LkuZxOF=T(sﵧ_ qWvN5_4)%,ٳ 3W Avͭ?ߌ{ǪS] ǘb;hJހR6D^ԕZ5v>$Tlk+&-O U+/GR @2!Pur_SJD8 AF jgT˻ BjvjDKY2L 5mn\-=Bf/025Ob(>퓣ԥQ|^{*uIRdV4Dщmt*pr,o*h'[[~?O B~@C\'^V=쁏CK\AG*^(iֻ' 3S7_yyq̄  C?! 1?Nek|y~=Γ$,-pV]ko3(с;}GVo m-ʜiq ԩc6)ukr?|~C︐VH}+)EexԱ.~4BRB'Lv  50vP”ygO@)Ļ.CNAX{*S)?cMYf)^A,(!G"FJ[_Oerd8l:˃5`#z=<x AmW'\~@`'/PM΁^ܴ k{uvƂ-a2Ϭ=/3ϓg1WZct~Mm$2[OU-PWs͗! !rpR'oog| s#bBmG":޵?_Sebߑ_eꢀj ܎ێo&ܩ^5N>-uG?@P)#7AyLlxZ<=2V|'/ߴ*2Bp='~%~%oBp]~mʗIʨ . j0@u攴 LSǯ6.쯕nj}C DvwKKchƌ/I{ڵ"u4c Bm易sSйG۶u!#:4 xqL]7NꯀQMvm6$n}^-Spksk.et^ {2 򕓯AHP,1ʇ}⎂-NVa\PoK~3R*u=-S7G*\ayD-)"C%p% a䋼bdeJ8Y\{N%+j]|goYW\'ap ̏%tA0 O/@ O޶/OZ$f2 }p-{RU=ľQlmE0ri]$a0c{)CiVbPٸ׷bJzv"3&?B0\@\v]&xPRMϥGLP r13~;&`ޝEYq849SUt}.,.,qd $ <3gY,M!yR[|{\' "\Q WdY'+W7ȕJ˟Q˟>~+4SApSzg%pT1b̫.{w,:B ;}OEe+QXZT` }XP]7L_JxEI5eR5+-Ş?8BkgW%SE.Pt>˲ӱ/LiWQ(&d:CL?Pd#uFeyYQ5vkiښ3ߐSh1 r {%7)R$A|d*'tFk7pZt(X.f, PfZ̩"2/zGBP˿+kH+d8ЌsL,͇gwZ]obKyo/H1Lc N+'M1n>k7MChK"j?n 0fTE  \Y2xǘ7֬]1Z/o+ep/CN ,u4 LGTb3Bk]([&=2r^y*BLC0_%{կ:9FE;\ sA ˺l{6\2aN"mgA9'ܪ*n&nbHցQ%, : f l*ins[ F" t6Jq7[WR5ck \S+$TQ 0f(c@>p)B"p"hvt2ZM}z^%Q@v=H>oY۱Ъn_|2WluQftcMOȂE+9Vzձzcqpc,B.:oeST8ݏԢ?BJ1)x3ƤvJkGhTm+z]̮}Nw4õU%.S9 2VU6(G?x瓭Q d9y9ݥnw8ʂּZ(j):7ueY(";d 'EʽGez?=D,$p =Ni#I]@y:PH`l( Z bqHss^g5P(b>W(qP$6nM9 5וn51b5) 0̸88 .PhX?P42΀{V0jw] 3T%JAey_x-Z޴nv8ߔsj}w^S1uD*w-^<&;TU}2ס73^pGe#Hy黖þvXIIpC:w\&9 WWǢLÕOsum7AZ'9SMu`HG+<΍̰&Qf#@ sb[p |Lj@{-F`z."ylI%VNߏn,FKoUO2(/-^ZxԌcl ܞ'1)6 JV^ǒ:%k#]v{ υg6bMd6 e^zbgO jd[^ ?uӫt%¹26`]pU>F~< ]WFJٙ}rKA~w!H =7D "&ä[p\H%kzì{\md $x)[Qc({z'ikbB]b=቞0ov{O5"^tC)ru&k(T+8 Jq5śXTl \HB<W/nGYT7 O ]ˎeYoo.|ot D~iۢzEߘD@'JďUz cC^>p7o.}$zM7oxC٥˔#%Vzg*Y_GGƽR gPC4įDZ aD6=,vaHrrXX>&f֞XX8o{C?)OuM=ՁM#٢M.l0xP}Mf`<9@ #Ҕ,CsCY+@JfJzm g# f\kC͔L3|CQL#vc-u_.?NmZaC._[,si'I #~ nk'i)g_[kܯp-F )U0<"@ iHg͕EVF#7g }.+fNq%{Ȼ H#]u3lxX UUZD ᑍeYƝ% `bx4t}o4SA=Hm]8{0;5؊o p*'OD(E#FZ]4tjTDz p,˺jեl%f:OO]K -h#N*o24`ZO0DU~]ZH#ЛO6ڨ02˖+\=sM0m~SU8e/Ḡt,+H?O}mu Bl;q]2sU- ( xx"3TktG_vI\Mi4)6=RΖB 潷dSm/L3iI}=յlo>gnx5,g6G7b!m8㊲ǐthX2G'Ip* Hq~(ԠN vղ1cI*%Xi%1{|\ubߓ6Eź*ccFVv⧋]x=@$8K ; t@9}gm9F˼b^%| Kt}yo6( #n_GߩL!xSq+A'!:'ƨQta9DoxQDm&m-al-gJILAZ;qfR'mɻ.~ U{Xz09g+ -_ZpR z41(db8fs cջ #j֝$IM&!E뚝 {88u9#%Km4H|`y&n.3qv5?C[ D0bi7u,Sٗw%%w򃪪3t;{Vit8&3!ݟܤX*tE%ke kS 8^hxzyF y˵5TzˆV".i'|loPxrx.ÆӰB'ڭHT=?X68\,k_U VK(E,j|V0^z&L)0`хJ}B ă)$ՈV%X\skmb+^uE+E'@,* F)O3QZ٨^#[iYip(EHr?uxşđ}oXmK.mY"1rqb)&Se  y'8os'Ԅ3,<AmRtPq57_gWs:T g;L(,1,kb>=%v OA3 ,ReNݰ|~Uqt\]opEPh`p/I?a]+@BQLB1 pjc5[^cTUK-86 Hexv;!0ņ \6e4ߑ 8ꨵN0ˠ aQJ Dcp')<ԅ&jqצ881N)IjV4E ʡ NUQ0ؔM>ާcДez!urc{4%aܭ73Kw1"rPަϬA+$o/S4|7' d@rVkgC+.I˾]/!j`B̭ O !^GTg+DSPd })%v'iSP-TKwv]l,?ⲃL\| +Ƞ$aW~ .pl\2ك=imy*գ_|UmBu5[Wu0 -4+W]g/Cy38&ݣMDw"IR<&O\Rj!JZ R[~ MPVФ] oj-[Ap.IݙP[~o+9bYKvY'iUT'ZxowFm=сI8$#ap,5smRE yQEZmT]R HkG ̜kJJ -z\0TvyA8+cѱ'Ijƶ:aMep ĸ-SV}/>{${K߹õ_S%"o|ZRJ5%nmd?s7 OZϸ{~!,̊ %F{d1OS"c%g\NM9+ՄzBt&;1#'c@$1c5e"`N Fg" 7U\*Œ-zV!l~uϹ6JeO*ȸ 0ݔD8!'R{ x)q0'eufvR9L2IYXD3tqM ';8f_toxWmD(`wɎM";Ɗ7-U\PP/d2k:gubQݱ)q .}cb>LD|`zP1[J~ _9ΥwH$!6#p]2&b@^pxPc4Or{=d8𧑦,ϑ1d'i'J"SdnuьA&*cYELG_]M-mMzVJbˬ; /jY pDT!ذ:YOJsedab70PdTr-{J32o 5Hv8i dK)+ [Lk3skIWQGGu1 9[Jr-3,I)yC~Ub##2i$+1G3$h!ad};Izh\gH`.hݞ4 .c#EWed.qmJFd >ĺpA@JeKU#Yht18SA Kgƹ UɁCs]N8ef>hc,{1UYPٻ$0 QfZ}tV6V9ӷPhxUiyQQ5]f_ >fgg{5f+W3( \`J 96Pu$Vt7OBb,gAYZ^HSN.3dV&e,X1阠NGaPN~z:WP'~kO`33$] W>88kko%ICAɦ Ӵd gkSmkx?'oW&2brm { .(!uwfa!xP*`r dqsmmmq HR&Rb25YEǔgK@#v @N!& @ puY4G 37"i R[(05kV#?)S!&TpG1 MkS0y0hq8ߨw[kJf=¿. ov'Y~ƃyI,Y`7LL (R(ȃw\kʽi$bJ%N ½~{Je} *z!H”CK[\@̂Xx^LQ^$D&Raiu{* ksON$Nq-(go:j,x&LmrrzE!d-0K3Y bĤ+B2Ј=(>"e7y2P&Z^2~.4XSQ'#1|h02XYVL3d+,0tgdQ0o(W| ҥSh#\˺$)KѰ([ORh{Zs{;W)fo)|_&nEAfey%g2PΜ@!qS00UX[9xw| @mh;LsBԶ19(`J'ÉHg|cW2K ƲBٜFplrU vݔ;AIbi,)tEVڑF &LyKHYI.jwb@tsȻQ+. j٦,o>ܳS= p1 b )Wh*!Z+0 fd}ྕK;5j'J<tnm$2djޢ QMKO1žqhq;(ش:&8E;}YRr5NΌprL $R]C^+HN>I "1u)$U;Nb Km>(z<[׺,9m7m{DPPTƉ`^1jw-{2d)(8jŊ nAhOz澟W$#M{πsP|/3࿵4)GHb&@P&S 0kaV)ߖrw4vRJԐ_$_t nR޺/w2 !,q  fJN!>5yI@CIDX&>AB#d,Gfrڒ(ˬ `7jy&[/_$UG%<21ԬK|=>|RJBg5)a$l nE,yAMa8g0u@oHF bk8̤f[:2{,bh'I6̂ptEj#)huCz$%~tՐ+ގq#|ss=țG/M_A#"Wt(18F0N|YâmB?sm)*P>z\8E[W2;cl-vI_ƀZ*qEd"p34:YźD=m!Z,snOOlo٬ WʓwBv~o 2woXO;I( t}qd=[0VR /!Łj\qiꁦℰӽPnή8*1H `w}`LSZ+$([_;@(7[YjGqQZt5-%D+H:H҂J~ {xl?09=ԓ5Qd ",Dp؞}w@2/NS3K<0 3't{zt`pXs@reuɻBfC[Fc5dĴWzڪR߳*ą9Df #hc.a?IWo7Ix_c)@J `3vnanD3M!ym&ln Mu]S5ḞϥCt@]5 Ɗe5V~(&Xy'1^ 3[l 'H͏*2 0G8K3E} ؍y0MQU!m*6>,0*v;NQ@`|9WĬ|*@|^DIׇ^R8 S>1`~G5 ƊK5/ V,5,#<3I Śzc5IigmRpe2  d̊?Z"Ym+$XEkhL3,,^HP>a(h@i(E)#-Ѫ\l@X|vڀYZ/rϽU ˮŒ4*V~C zã8`^zz*dL|B@T в )q𒉧,sc6L>NƬG08Cr`b vD$Te,xF@1'ROx1CSawGʻZl3 MXiUuBK;7ӼCiYGȈ]B?$d7ߔ~,+K;Cre+#t뚒a*XotOf5=oD!%v^ p&"0/mS<1!;.>C^u`7OҲkMiפ;H6ʕ (-L ?йy] ƚLm&#I;=&3NS'UHݖ0C4P%*N _?.7x A?Xq}H[1*PNj #>|U]#Nn ɚV1QS QjyC dhA "-HiN:D] y0{=aݣX044nS0@7)uJ MF(c#frpI3141^fdu;%8`E$RZtC!zqz:&pi9CڬTL.h\wfq {vv@/Y`yF?+c7բ/BeuP&S~E))ځi\8]dmv-\nPo.aqW7 ͩ;T uzUB?؂  HfjpEEɲ}ibd V.P@!~|m,v*>f- /m;|į/!U{!O엫ǕrفH\a;J D"k@79u[m!䃗/00"!|9tՕ(VCAH(ѐ%XRm 7%Ԫ5 .R|82X:_\]lWE0֭A"M, o FR]ת4&!V2\JBu YxB7S ca=f^&0v\T\M>Z^o0EwZ#mADV(BKV!ՑQv X;Pd#Y 7ڒ[k+h=0#%]pNC1SG? ].1V ޱ6lɟ[ne4t>jI(c1]z -&4ǟ MtcnHJ+Myomm wC52~NcH _顋rͿMw~صY?Bo- \Ξ|& )X4JJO0n*sVIǾC7qS[K[7T5":^t heܨ4@_XU./[\v%!IZdk˖h|QR`wTj(jZ!8Y`R4jQתHUu<1{Ɣڸ1@6h $}p<69`#@2|1PZܵS}]w(y~}8~"[p9{[M5㖋}VlC8XJ=JչIǹ ywvk}$7] c+m COn`ES f\\/Ѥ^<+fI! ZCQ?t} ,fހ)zfpziXHU`]ra9v|M s4cƲ]֩aZA!˙*ߓR%8R؆>72 ${78u(ajoz[hU]5E.sO:,`8|ߏ규ޝvQ;#gS A7_n`]Uun*Ogae憞+KȄ2(wD5e~ 믖CW6!`Sf8۝3uwɶ}+R!k'΄p˺H5l&&wۮA{epͭP,*a`zF<2a'3mٛPomP r{AQ~{J`!%~pJ|p_ٻ~*b!QP|j\jK(]C7;>H'a[V)!wBӼ,K(7KD/酪aX.'aY^dL^<\bӐ$HyJ!TFYޟyop ܑ4e2!_+܊GKΕc0g dξm|G)?!V0b#U,FL%KCʏ3cy]Q-msfe(-#dyCx)m C'wޖ߰a-J5oп;W@T0.ͺ^n(Z)fw@: [N=y^j641Hbusl6:oAk|% qƕG/#%̒K)ey!G1|^yG(.@uaV)9O0p 'VZH),H' @8//<+Z5l#O,cWzƿ Io_Pϲߘt~ Lf&+;fS.4Ҥ鐽'!L*I3dʌ'jE]#|R[0^IEP?<4>Ů[$@&CvALr~ B#/LF%kipU2/`R,LzK..|]^vp%/".Дi5qpZ-X qm^_BX MA8)/\K]XM""{*VϘ{m1E`c[ Gicy.jptJFغcViE>%r|Ԗm G 6i*uϐF-)7Da?TS^=hH=T G%C_l@D_ ]!@ .۩I'-.Fwq=I/x9J.?1W BgQs ڇ'(J~np1u0eyh |es+JSL 4 Ch4' Ѽa7&0L`]6m;$ko_&qp)L}$rK(]P Εnh'?bB\o_Q8 PHҾ0ػ󧶪(e3 !;t)t@*M**2RZJTDhKmVۺ/yTJN dx{9B֭1=f@Ex~J2Xu/ёqCc c]OO4H.i 3ьo,N|AJ^= (U=$Bd%0EPZQ\_ׂ !wTn@u;ޅ-I_@!" E(TdnL ImvA u(E\¸{oB/sbyJKDhweR)E;]Ũ5GHг9JE@Gmico0Q|Tߘ3ꃣ 5RU$E׋q ҟW4z(=(G$%Ð BcZ?guZفB|؃&J"Qk1+^ ĸj%XNR${FM"p]>H㙰5/cl{xk6qRt+b t n:"Є&j#F]UfJ>eAÅkcYV @v ]&I$"Yrv k)Fܩi=y2T&;AE5u `OcPDS *Y0FU@C!'2qZ-ATa: P3'VtiEnQ ws#-C.#>Q:4ׯS^[,0y؃o'5vVBUy\QHtÕ_ sQi b!|TfJPj,=>-vŴmg^l024 >I_BjbQώ+q5`_x>1c iTڇR!5Mwbxtϕ1jy"Cd)pABxrȐ|LgUzUϯJrH ܁؃;MCJ9F^}L8Q*T10bؚw1=X 3 ㆀ).NnU$^ ʪ sIͧ0@h'C?W=Y#XKG|go˸4p2ӃG~>7y\h%= T .uB>ڠ  bCȎ NW_46\J&V0ّ[34~&EcE'fWahJ -ximZ@5dd`yfak;!/Q?k,OC8:j 1[: PEDuyD7,$GX8 U](WFUS77$ԫ'9(ݣ4v'JvYR0 v9zzP1 V%.Qhju z0 H <ce +0)Zܣak^0 nǓ(-DPP ̩OQCBA|1Puc,F8F+!ļYRRi].BevϬFp]Ż1-{s"IQˬwiO4Φރ5$)4J)s#*?R+_e`ACAT " ]wt_ DtۋԦZh\eg00olg5<5.R?[v \"Kd!ZB#(PBnèqVj'XD9CM=I4xݐ0cJK#Xgl FUiQ.`el0☞aӷƑj)آ.r[Jυ2@K&  {epf1~} J.ڷՈ`J] n_1y@HP-HJfj1&B aORؚ1-pvpf-1#AC;D{ʠ胠vԤ@](y5 iUc^Qcr8=h- }1flϯ}.y~G0hN`Lm Vh`%Z.`O^ѷhJ\(Ѝxc?U邵Pr:4/AN[_v=eE^+}ͅsBϒ̖ۘ[]dƘ5``; ThQ?O8N[#;KU ®t: dzҊ #}!BФtT~RAuZ&hU,`@pj0DV=8`;@#'"gE @udz"o͕SKI#@ ^L(_ֆQGNQ c>Qzq]zW/9[q8*\= ֟;ϟ0H5-TpPk){}`1M7 j+,Fť3zVx\:s{KU+pF:)CiSOy˼G9`[`3QEi - Ђ^L]0uJ6;ˣ[Ohqy~gXԅc?޺KGCEݒgLŦS>Z;}$ @:JV%p/iyH& sQQWέVLQvň;2O1 6_.LS$OArMkΫo6d5x ؃j*dM(jDjv0=Ɯ71?-)e6ѓTF|!aAPM fCa.x($V \:Sk5cSƘ}h?}msF0zXO+)7u@AO*V{*7g^d$J9ZSWCp )7; XP mda`Og M~H뵰5clZN mk=?#$Z;AĪ0.' M!-h+9[uGl#\1Kjm~471X܅2㴡@uuo5/JS>ܜotEoЛ -7bΜfVUCwo] ۏ(a~Q6lhU"}̼%ߡb 2; ,sCj @J@#z!Խ~ ^ 0[-(EO u˦?T73sHT~7j:[@rcaܜ/tFESㄨ]vH֝&ЃEb:=ymV+vT!>uLv8>o̵O~tӓed6 60$$+vu`r=lr@΃4Eݲ)x*ޝ4qP*ZxUШ oDE51u,PAr !(boEA[clwl3n7 !Iڈ6w}me" mƆb s\>uU=Zj,U&IċVꀓfVB0YLgZo Rҡ=+Ƨ}m:V !QLF*m^ԺͽL>84;pBdB~& 'wyӜ4 P6&K5.qAbMA\+K 6-AFhnn Ц ( (:tO&ʕɾ/[Y@{+cpd ;YX \y1@Nas&ȩy:]iBࣳpnmYHF >d[Se!D0V3i2rKܙHƄm βrau&+ JP,:<"#ю)^K #|TOx\X9*g NE8[qd` $*p˶fQ9/|> \r'S%A Pl1cޚQ\L%34ڳ);l~o˿j50o,Bh#H9'G{to`噲 ӧ$}Keq~]Fs2% PP%cD  9 >L8oWxºCŅrSx_&l.Fh_ߚ%' [>c*<;O}*p 1r ih;K.2iZb U_,,JՁUM'DHc 7F7zpD&j[@`ӌ*5%$_gl}ïQ[0@QۊR {KqAX3gJJNhsT=ZXl6tp T  ˵0?]K-`5UػmF=:K" 7xH$aԗYj4@@ptvx.(ˆhu}_&\, /jWנ`#RW|]z^@`*f||F0WuJj=l9oPa'K7/Px a/?n{;D$Y3 RwBPTO̴mwA())U_9TfU])|^:#ZJ"7Cva?@:y0T%ρ>;H`!=ucTkL.CZ[,) 0Ў}6tNRi rvH`>B}/US+D7%zj'NPD)@UjmŽǜ\! PLmAǪ2t PZ2aA.{#QsIU+H5YY|芐B@JF%aMT ]R! PL8ڒ2Sv;VxF i~@1:Aby~)-q/4ǩ lZj)J6)ErK.8ڟ~vRe.7KTr]$ab  ;M ^Mafw~D$_" d1p DgQ+B+7g@}gDޗ>EYyOnſai= %8ٞskh+n=a-t%N4CSLi V7hJkƣ!֩,_77A%ٴTgS՝&,{6A0@[\^O-JN4ScA(aϺeQa,8k$1ׄI].O-Z@*g$ 9wgDs*@Pƺ)Vu4Pd_y|]+=)%4 j:U+Fn6;:~~\S^iPjh6Imcڸ _ :گs$Oդ`w8 ZZHSCC'yt7J25ob)M_l[ihFhZ 紤F Lj bŕ=`ԔDPj=ɌH)߹:;ڊ9 sdٮb#=⤠ Õ?4dWWY5dkHn_:#i4@ttU*ՏnRJ.>/~:/B2 >򵸞8RJAy"Ehp-h`i+S˒eg K0irl@R$d^x8?Ondf-vk f+7 ?xך}t|.? $|ڨ}i'9˖C"\4p}pmFqj]H2޼Lm.kCHPBҁH1<skj^|t};YG$۬" gA^n)ג֯%@+O1 @39i$z#5:OD$jǩWɚJ0/sLQh^_B|km3JI_; & }05D_ H'(`/ QѱXGѱy$ Q:4JbCΐK G8Kݷ俳|W_BpzQx噼*6 iD; ut! ϒ^8kjs彚w"j3<[$ziTM92Vo&7fJ fcm*9~< 865S*֬Z^=*x!)c+r;u t7qe5αb g K! .1bbeKC$B8k%FTYtGά|aXd)ь2/sp8閸$S}}}ݯ+qȕ' , fW`E ( suԡJ!Ə5$hjX *Bc 9~`pȓh Y( '^=7(i3"*`؇>Uv6@x@-t ` eǥ[RMʬ_Ӏ-^A!ȂHB/nd% `wsoSje۵nm5Y'b%~@-X4W:x T_08 +WH cͪle<ı>%O3-Ow~V|2{<!A; Ǡը~@f08+]\@eL,9 ?#( {,a(F5N0y[-'GnQC00oFۜSd5J#ζ0tq` #&r"Q&ޝ>+pFD⣶g1 kUZF ZXR z zvJΉ\6Dɠ3Ojy!73  $vYRiEyr9]tIڿpi, ~Uۢ158886444>>~j}&wkbq&dQlu􈈔ԐFdԗo[ 1v8_q`no`>[bNOWFo6f7%$_ =¥VK_ZwSE,ߟB+{l?]YSz:j4jU˷c~ptƞٍ0{ڨɟBu%v6b.B}ÿ ,+c`{1Rc <<8_gaWqXV t6}U-FbZMkJ*9fO\^CдKiivn pX}d 1ƢYPtbMB[lLu<*jt-j "!'DذmЮ{ uS=n\ fbEoNR,?Y OCD،n)3 ;:IU5T28pieߺq}X Hm3 _K[7Fjn,_1/]x9: /(JJ$o{V ,.niCpW( HK(U=?coaѓrwj8 $kpNL~uΫ4 0b.97I*0uАt+jv9M@bjw-6]HR2 CzEElJ!P}QFɉȼ>ռEq`pӎa 8a$ 홛\ {z"e hhƢAEEQOG>z>בOS U isO_0wi1dBz"c K)OHLRNSh`UEcX8ĴTs,^Z57j,t\BNCkzq6i_Bxb ډ1XХ"- <^^T e Bg]<7<",Cb&FuBLV_CЌ`U:bFI:LA!Nc[.8jx[bic X̐ivDX}VDF~|`[׳2-8F~ &i,-|^s~I53RHV$&PHP`n M//}a3`T@ IJA9E>5b`^8p1*4 wcTѶ eU¿aqh~I,c2 |YlUB7Jc $C"l$.R& _: $)j?B=+3fs@XH#I LF &h`NQ|6vpX4 >17؝~Oîά58?_1=;q;Cc'H.5 : wE\K6&rw-#|(rI'TW׆M@-HTSNJSnlK7x&bK~l+.lP<"ڦK(}Xw">'88p`*4W#pw?cޝ*}2=q ,D^ Է[zdHkZ_u!䓻<1t*@ง>&!UܙZj@B ]ujtPh⻨PEA$b[X|h&'T,e]LCR҆m5@ IY TI{êvp`8Ĭ,v=Fyz꿳01-5X(vդb)(L:$pBt_Ў?SFaS^<|\>rFږX w~x IF=cJiW5|{h=F >>F+ ]*TVY=XV=3" Fi41ҏmUV<S&: 7@ݗSegsIp% 0:PXP%8Uktt|Z =֫^{x?h6w}wPKOƘl*P\~z@ 8gE3Xg&;"0|4WiISQ].׽QحyۛO8D7K1i ,-}=@YI4 {9/E>agA{?`ʊ'g]o\p9>7rUU)ch`0InjѼ]u,}BqHNt@W ]6 6&ꨨ)hI-/@q(ΜB޻/ o Uʄ KX<,kV4QM?r|Arhw/|!LQ_if럂©!!{]r0Ca wd{zl 6Q(+֠nb9 #Aq24YR1{ƶW3 m\k/;>҅O!: W#'2<7U]u F9-߹\Z?AQR!n2\YG^7a U'bt;w8*%'4i IW̐yi#ơr}Qr#H;rݩPTS e+P=UON UTj2஼J򗫰+R mie1Qڑ3&^ ɭͼؙ"Х(/p`hMMa @6~D4ȯNjQ2%ah:N>S#66x7K=ɽcdxbQ_G#tԒE~UɩQcuxb(Lrbr|Q:۹iPN@Xs^WO[jB5ځ_.G[([P fO`Cl|N *WOR:YX­ARU/8UvePdyq):kjxNM(1J͇vkCtW9hF$B^f <gV_es y>,ZG Kۗ"IuM A֍aEauг#"s$kolv/$&jdl/|(x&]df^QC͡`Ir"`VNC"C$$GRonY9u=.-@qP fa8*.g Z¨Dʑ׎Ò E*h Z¹c͑@^^ sd7KY0e+D obn-CFj0=Eؾ(I҈ t=rR0]Z@W@U|7A GX|$QGh澨>HMAdRF VC@";#UV7J# еQC{5/-vd+%#6 AAK.k0YI:'6@ȇK[i ;@)up c ক44ip$''^XNpw뿶pQFAuٴ"/4G.*0Ҽ / §;[uEw x=K9@-')V6k,lHkJZZ絨h^pja_,NxԄZ~Ot|~GgI'4_C _f;v ׎27 $.#^7F?hђr"?Y9 XpUYڑ{\.'UN*Pn"OY^Rɔ 6DQřDAĸ}(C&4g$/ٝC$J=rmR#b:xQJs M#&` Sx73?z(t)E=kiKFHמ tXٷe*۱ ?0C!'C\jz2y!c§5Kw-Hk/̭:PQb(yfƪ!`E0*yBgV!ʞxSݨ)#mf;-P̵[ge!ocj"WKk*rrVSk QC!~KiMΆy}\KY6uϠ`ѹ欑_-ײ+Db;_}+ ׇX]Za7Y|*~u(Hhfq_Z_b@ů?jK4>[7A!a%} NO\\ꥋsqe !D.fܐ%3V\ߋHԈ尪Dwۖ4R(@qw|z}9_m>JAr" +a!S&"iT֌ WA*bdvq$R̂[9L7t&sq?zm`* Q0)W{\" А@! ?؏GDڔÜ8B,P/ȋiH(Lw96dYc\A2x3๘\?9A0Vt}uF{E b \)g(_"+mM y(ޑ30ɐO}}#t?60i@Q"P CwkOcH:m\bY*J>k_X{13iʉғ(VFFf'w*ho1ԟ*؁!\噒n>fy rU2-C^Xs@*Y˯G/+6ԻLgaʞM4g΍/]Y^%&+ X>~ 9aWØпyObe"kZD Cf]TRh:9Hցs{SpUɃƽ%5Va:ܻœ KVҚsm^)1~i/als(O-̣B?dól{/7o] tO~t?f (9C{I&)^ %2xxQ*BwGlS]իTp:`My]yI{KTzR0xIa,E!&J-0;HGs\|>KEDńpX{ܼ3y fm ̨aXa,+B}<^ N k\?o㐇wyHL=D;G`sf1c}pu~Ɇ[t|5DQJ/i;|[>cq A aҠ`b.S2` a#tph٢(Ew'IS-FeW͊qt l|.砋d9G [k彨qڊ?By / w:Sk!>dǺ)V={M r3t l6oYBW C :8:m" "r P]ribJW-?#7UA[E=J:,b>2 ,rV[jh]zF@CJ3[5NڳJD0Rzơ{`?G j}IѸe2 B{4J@w. (U~bUtij0qL f>< _B>Ӭ܁!͵<֢X}VTG@:PW\3nܡ>%;gG1+ b`"jc"nA;[k%ƍxn [6ajw~~ǶZq\ U&:4 /-L>k6x};78ǥW!hl00Š;-rdj7b5 ]Y jI!Se׍g90teO#7dA~c"{ /\ocYIʽ0 [r"k6d?6~ZMb2-G4 ï-~&{FNv]W.>;n(c|^ரMھx2- N@61Ʉ#6m 04(*P *PbݹW^~W~Qv>{_S. ؅Ѕa#KRd ;D'-bW}c\Mp7![)8+NYw S hE L|Dy1k AFY*Ŭi262 j+;8M0cye~H 7 j"䦀sn[mtf)8vx]=%82h`I9ta`j>J%fxt6sJm>]v6!#wYJ=lurJbMN[ ҕOil rŮTs*Bs%)n 1nXtK/(yOHP"y$‘9>*q?Zw VϮf6Zw>^;8z}N!Y(wE=,AV@ lO,/Bw%/~ *$F_Qh ܪXY6?nƦХʖ{H.kVht !؜a6` nW?PDYi0]CMA%7AUChHKf;׭^bnEH=ň۞^ S\wv>G,!v{c7˸ I͝)@=^(O؎7mE0Ԏi d@UvJ~g;%uNyigkvIb] zi,Ċ:4KuㅹQpU1qZ5/aZTinsP2X Q{r ([R B5O^ R⬭`P",+5MX-D<x܃-,^tOCw߸/,XσEWd\Il?g ά`=ڂΝ]3" EKZ5`?5~ 4CW)7& |){KӦK͆N3oC~ sJA1"dR0c688ǎϽ4H:=,q|=P >e;Y=vn'cۧTj} \~H8)*8Qo/biy S {)p~݈X=m\ȯnxGktMTզ2?l=EgX'(&WNq!4+y s()* MtRAO1S458(5PBVJۈel ܙ2 iɫV},i| 01v&]j1<f;r#+OnO[a˅ZFb6AD:] κh3K<{4tp [dƷF!Ӛ,yV@4?j! z41FB>Ub$ SGvB6،UQ/ur$յ68Nv-t|YIt%حlެg;Cl;? U.̓X9uY)f{25ͣ_Y,p$eAW9] J@t 4+#[]f˝4ݕҧH*Obj)Y&hG9f4BQ쟂DNKbe%Ff@ښE*T@E[[t}4[ stI2ƌt=ϻϰM$´fs\8čOf}CSz`jYvbⲼ0LaոEq3jЕo8+[3B"N&,%Ǹ.ooByBYaB(<) y`DϓV-MITEqPR&IZnqJc }Wtza0d%E}@"pXTh J08f5qB c%+#]v |:բZ”?דּ}p ;:vM$V 񭵎lmMy. Ewo2)g_GwLhyjRWJ#B~4:N[,1CRKDb.fS-l\d"PS͗*WgjF-h2ԍâdB݊XWsl2Q|N+! F4p/ӹgˌ #jƮ)O-!׎Cƀm__|@&#}g~5>5+ݶբdڙM%`>%kbB C0غב5QCBV8C3q g)8vsKȹxȕÈLw=z},)e)ixtY)W Jpuk]k* L=UrӡZ[~FO_}%ew4\t5~W.G w&6.tlgXkw9%R(H{ G:4ѢLiZ TҏىՑx}O dKl6`voş]Rcy7ˑ&y#Ƙl?:>*A.j tKcnǩ0\\Pajf cܦ@6lK: =4w-,jLOa[o/8#i2O}VnILWzo,tW75ixh@K6X&dw4\Mq@OVk2IS`*]A(Ntce7qi,QfHwi6_Em2ŷ .O)t+ip{On0 ~յ\%A'i-fh ]cV @>#NׅSFZ^W2S-3]w*&(6*QpBdF*1 lthg|3CCuc|F#LYfqx@_n10 jWuahm<ۯƟ.(׷ۏSe0O~V^s|9Sp /k'~Qq6/ Co=Ѩ!V0K d)y&0tpC͙o h: 4bM.wS O/fvйseWam~~663m{RnlH)@/lV1g hn3Ӏm)snZ J{}/ ݱ'f3jj(^<+apT)L4ƔE&uD݂}])9X_`L:{wF| JkԣUgj#m=}xh|,;,cνu&rI0^R'%U2hwtϹ'k27gcm/sa|+ʍ?)mHhf` i )2N=FHe5d^}^цnk/O.:jy|FҘ֗ XV&Ku۞$!o%L{D@U'Oz<ޱXN=ѵt ?kS_)) )>N@Za$<* Z IqTg.]xh1̬jzd>R.9>: i<2+j@#eܟ$&0Su3>QjDsfsf"8߶u *|}6Nڛn#?\vJX4fW?ؕi;/!$}P?̱jW{W}R|ٍ*uE 4W%˜}^j@qoˠ()^k6ϑỐ]`ȩ/{U{Yo^ms>C ;Ӕi@;4f^&U- ;]Zv;̞Qt9CT hfMAqvhv(ud`]ӽs"*}!M#I!}[#SHֳd|MuD/L?/m{w$!? "oTC,C0a@yNFHzZPD&Q~ TKRAIjR\W+NBsAX hm'[s )pmo[4V^]&^vy!]UZ4K!?1^wsd\ Zu2-V0L@"LB>$5&hoDX}X\ʕc7 W@-FcZԸ~xܞ:9@;j9e0OF7=+b)A_PMj0O[O8C:ČGN?MHH$D@P\b?,ȷ杴qaRC!MEtCc=`d(HWvv6 'tt,>6| 4B8;Uː ("Rmf/ au{xL[Ry{O=qwyWw(ʭwZPYMQݚgx{ז~h'^7X3id 8+AŅi>pCwo gW>ؚPTXf&M`] 7]o5;Hڭq`P- +FGw~)#H@^ .[hC} `:NGY_԰&aza0P\Jm'X8A.B}"YxtnNsjr}Ae 2?\GդFӿȳZy웜R%Ζ=UE3ԎiƪkJ8ً6qуq.oP. {]vҞ iSC*ԄNpZ6lzhvGџPM^R]_EV m`ݐwӶB.K˛wD@ Sg>=芓KM_WiӶ {bdʗZtwMy4:ik1oP+YǡiW9j"yB9S&p0[n4!VP黕Wrj!_QM![jLKxv2^3!ސy9k wv!U'SH4=FZG>;m,H[agu+00$1߹cEP LQYp:~P< 2FvDVM,9uTjJ0@V}Lz55em룝i˾\>~Qx~|A" HٿiNF T 2@JCjj٧C";܆@7nw ۝yw?]y=3H#/p㿉Ke`\M+h6]W?eDd"̕Jܱǡ $hF=qnS Rj&!ڙF煚zzE^|@>`R" H9#͹!L#e>ԽxC쯙>("'&Ac1cj:גD@ re-Ā.KR©~<v u@ }p-L8i{0Qo\!i豊WO8p(Qtȏ_ m$0M )_)DӿOSQ+*UQp8q8po֘nZ*-T, *@?hmB}# $Ҽ=wTV!s=VsDH[B Yd:uPYUcP zÆ16G(E4B`ho~N^mDc*Fjt &a漣'Vg 87N=*,O5\OK;JaX?iz{+OT=XW8~ e !J.csH (i -Oux} ^7] \Cࣩ&od1HEx&g0M\ji%& ] V{eU$ MH51e9L )LdFP&Ųt4D{ad0@ 1⣝<;IG@T7A@klf1|wi &*ȢBƿ v7KHVuC6bTX! "$Xm;o<5r:!5cg i1(Ɋ~&Hw hجd,ЧYy/T1]^2E\J5&ƎY'YT~Ջ(=?ܹ, >BHڲ zi(dpe"n2/ 4GJyGg@G7zf=!\mW+ Gr]s`]!sf˙`9M T] JW4 ? @Icn:LWo )Rr #@(PH pw늤/Яh<H0AWtJEY( 2=2f²MSw.;Z4([i?CR*BƇL=qhѭrMB@I[/52o'!E6䵮 GJTZ!1 cDcXe:; . ._6k:Qaf.?iBҔ%Nto*v3% GmGk5$zH ݟk p*gSvLbٹTSAsMb2VH]B@F*Q:$LPcDi@2Ph?O񓡦֏Req)54h!Bʔ\8dXHM(C&c$Ck}o > A+aۿ)`,vc?~io@>`Sn=%Tn|X~!EƱq27Ύg3 m=s1}hUJgC % @q-mɯ%' wOR0*nlmp|4@v.[/*;Zuo5Bғdc:aT(BQ@"[(w F|w*VD7T13r~%:_4:]& lX8b_`li\vӺzl0hX2~C:k/2 Uvl,dhR.2V"8?/7xikT}_` W׵|pc;&`AciSؤЙo!y1h֞l]\'ˌ@ +-ȣcʹ!G:{O8 ]~U@IOu2sx ?^/( Hq[NHmַfP>5zYW0tQO16Inq{wT`xZ+VD1(FEqPFñ"YEA* P*ȽpoKLh)9WUt 9:s9uV;(B(Xz[ARF͝;!A|0`@Q5!:}E|&iW>>CX"*G=WYA0k[9_o31Rª\>[?!  HVt6` }Y lov.D8U6&oVSg$ +Pò1 &hXNt a@0 *0?`8j{ʊg|U; $6c><0~Ve%VQPPwA,GM6nd@芆5L5 m/( Eb6'A@ky%~N]$1&|5[gkф72hK4I LBb77e&a@0 UtVrP&X"S]R"!li]=E6nd@+/Jo{(;eFUP $b36h5 at`_u Xڿo8 VaQ hg$!Z^2&?@#  R/[܃ko`rO@"Gg%Rfߓb/\]IgT' fot~qЭ&+w-/fHds%2b%דp5p=aދ5j5Lt_) wOMQ7H4t0`A{F,?0::lGMH " =bA,QKBHsy?p߽}Q4rEײH^uyʛhpl,$6RNS'1c[(ô1F>O vsu ^PY~[ k@"ngnZ?4N K&H2KY'[PU xY\c~[ZvĜ<1‹-V8uZw&H0J~g(W^kzgyE)\hS 7.JsINJ7m_zmh.R~+A!|%>.PA y~7p3ʎK NG+1|[ k('[R8kxbȸ̓Ww ;~NX'p!@0`{]֗7LQ}'3eI6Ehy[Iل$b*I.+k`xxQ<3Z ;zO\A(4@ąa Ѳ)pXl8\x9 c;vFo7Fh%Z 䢷7#B CXU"S  l`8]m8MSo2 70,`@!*,*PqtȵwCK'j <=GlN_~tvʌ7V\0yK9w ` \9|;/Z`#b#6A(!^+gϼ^ 97RO(ӿb;X#äA{?2Z XP`l؃D]-=X@mҁ͛;mPTiK!ҿ@OWzSVʚg@9,`'م_9q%bȱa Iѥ# ~cil>oo"z`GRgT{X_]O^X_+eul3|mB2=PPvsAdrLsPf7jfmFJsnĻ޴l4=ɐ6 ?a cG9l5Rtw  ˤ A ~./`'I*s(:6&/b@S# I*qvSv<]zIC7Hް>+@@%C1h ̽FΜoz}_1RE| ]XG//|m,~'.xTnn<w=D*CIQL2zgƻ)5EYcOԌbj4:O^r?8fXh z|BB\k,Bb`u5$~R&#_"Di$)u0QR^',18)Е2JyRgY8Ud.en0zHJ*5_g 'X[jvQNJ6"10*p*+RUREhʂmHaeNApROq=$u[;bEDuähMŽ櫅qbD^3FZK.ф+Zoao%H`̝XTNa%rXG)8mVJ쳤^Ę_2cJ$A'Z/E(S6ѳ};e¶&Ĝzӕ1 K,`L~R>RǘnQ Q h^567rj;I6l>o%}qH:q^,>^G=LlZ$ֱ6 uqh%;I3c%=Bڔ_meb2m&PGfm[~DSC%[n}-9;&(g'VR 9+ H, [FFDfVc-q>|1gf 6xػ0WDC)\ҌlQKbNrowV}Q@@N}{B$C8r~}uNS>uah֡/%gwZ ?1eIbk-V&(%)FD}[:?n5ao=+ʊᇋ` 04ktB#Z{'[SuT ]'֔ ]:>h }aYhV) 4,XGtK^e9;Kg~#L.7pa@5YX@(&ZA-$k!6,C8?Hٳ!g0-ݎR^Ȱ@a(it!^6\7M}Dhio|F Zsf'*L}d5c 9b "cӜW8|CV-zaEæGk"]ձW7JL&b!٧9>,8Mm`!jY-=J~C#< jwɨT2S {[ɵn$WB V4P$p~M4N 9iA]{=]49LFN\9_< ;?v'+8Q|J gt/@g`ܻl(qq"% S +xg2tu^2-B Q^m.A9dg P I6,B'O- ,qR% [G`;Y1}d/d A0t ڴ /BAjN\e8 Ǣ)|thF-5{ \3B+%7uO) \qಌPt52dNvN` c6dmO٬Iw*a18B9t=v2g7ў 5/3;Nv Lxն<:W,^&U2 SHXx`߄-gK;dqFʵVh(Ӽ5A).V!{kxpٶK Ak`X+ ϵ:XM wP݂`]aSd Ctu0o,2 C(rDI DbǗo?uLpG Wjb'ǚ(mF"BkiyrykukmqYËeJV|Vڸ Ew {<|ȯnh~$:0*î/nU`)e0$v 86]&!88ULXE6ad@-Sܥ9Y.HD<_Rt+Konr#毰Į!V SoJMlpPc20PB0ܰ6.؅ r9&MP=hD-w VH7tLSzF/!I^:B'wiw&kpז#L>tz㫴!]Nmwx\@Vb^QڵsCËp ~ѕFopE)/=QUȹ0(/C!W8r2IʭUڽG[ ىiuKⷌL@T=%uX!7T׉c\ oC5:Jf~(vSb;~2M:k 0-,ښ74d;%t=EQ,<9zURdyY.xL LC8*L U^-DHt0DD/T'%^(Ȫ/u?3 \nb@o!e$h#Fu f v8)S`oyKzhMϕvC"'SHPGK};xP|ᵆڸ`#'?`UԹ}g'wO+nƾ2N N쀪x]y+EĻl!@=Lsf0mQ; CS:}OR8m=$f#ŸꮀfM`B'k@AUQm\D].VfT E3 vKL-<4h` nCQ؃켉Z-RALPi&z:vuמΝ 67pmnbAU>6SdH/Pmp?& Ik[G_hL"t̑Mlm2xeƉ d+*Wk*;{aS!Ӯ#U%4LRMu0_p6ܷgiD;-.|=ȗ#K#)[#j|Aֹ,􊛃lTo4+ -_p=!& W(a՗~ &ޱSP5MCImֻ)،Kjo4&G$8!.<:b(&9(3OV+:)O8yk{ߠBv^_w4n~h-L@5@`g:b/9SH?>}@/^q>08 <6h`Mm+S^bd7;@E 6Y?Bvb2ҵ̖㴆9['l}Y^a[;*[OW^Ei=;Xa ӡG u0'xjkS Qىsuw{p /LSE' F0 1''`jHEJRm j Puۖrx1(Pquܤ| C`Xڢ/-wUB KSZ'+bv}~N)vV }i7 hb0[?(W eo<O?$fCJR~7F( k{[ߏ}Bz:rt(Lj;jTM2^SCuV|}w_$WVn Wb [1˧t~NqN%N#:'`9^bDvY[EeK eFj#q9Jh;3A/4)bWa*95]8\&qg> )7B=xP?u3:Rd#xK HM`9x [*m,$SD}+rIIysŧjsfUVfb^յd]fcST %cGz<`q__# TTkeX.ʭ1#8Ksϫ'*JM`Jbr)@3L ^cQ5DyjzjDp#|Co;W{=w;07pm ? \莏Qe.5$)_MQꞎ"CuP= )>bpCe'$i{5+"h f}u_Ѧ:uB_&)o.!N*qI'y&W=Y^OګR"+Ȑ6"]6,LzEC!QД4 4YMĻzٴX5&C"ad({OhA81|-*,ºx(` jTUlD[4tٴ鋪bIy@UR( G -(*BPmPH]NJ/E[Ӟ+ٰzA  ncs%fTӧJljxx-,tߺVjE wؖmo\v(xMrk 1'Pww;]4:B~2C#M` 惁LRwv"S.^e5M_~ Y]esUj}d5fgV)ǂ{g.@CikQ 8+]M]oM]^Ogͯ ,u_0Mcz)*N:J V9P5܀5Q>&*TcU\(f[Ya E$̓PսePL N}\0\c Bu^ItKG`jyXodG}4BiۤW?I(@~$MPc/x cf¸zVQO:4@.$4~e<5O9P7㛠OO)l7{wUaaad-ᴱJEϡTCr K"*J%f-X`}CCt?4 edyg+u".#rr>RV^fI_T޲F_Sj/lՓK gy$)L1nYE F jWnoc PcISґZrL|veA$R{ڒm99 iۘFpu{ Nl( npkMCu8'$Yx\H-R ŧg& 9MWQ ݯ1mG;{5c<ƝB73fPKkCh(>YuTp cthƫa !)mgJkwlt!nC΂%y>4 i\cM1f$7G;Ngyc$u:if#f*tR%:Hl-.o3Mv 7JbKz8ˑvEmkFhGm+ Hd鼣 `s GX+tm}..|ϬZ@.}?+^ZPv, aєjݘ.'!!'{tReA7.BN)l Y32%ߜx0|*>\2ER"\>)fH3z;X+L507A1 C ߺv#Bo\yWvFpm8+TQEawFǘO$_HqH>"Ow}o)C_WxA6&eAڎP>D0*'Qf`c:Ay,i=J/p 727"{YrkҦ\[ر?oo(QW /ф-p ` Uњ*}2q:p呋!膇% FK~|R9m}-Ft5[}ҩj *R>?rMKf<0I]? "h P1K&b?BLc/ Hly\kh'XK%ĥw!ܛy>6FS&6D)}U߈NЭCWKELRy3LjEzQf1.#~D4Z<UKLOfA24u:V ˰ѐ%59`L7Q 7cp+6_xV5Bt5H@x)?Դ+>W ׮&\"# Ek"D;[vvh )߳tω.?d 6LY6f Bn$"29c1֫Fa[`~pدSbR{2k^V8di 4xan.kIr ɯޛw 2pVY_/]GONhf&d;J݃ufs4'J+?/brƼJ9?+1bJdeO>~?Wu1>~:x>J-,qY~Y >SO~K !BG)2?U%}4lodZKKS3\t3mlp"ĝly ց{$zsL3C(bɧ0 pབྷKpMRE'J"#h͹qy[N'Ƹ&oDɿUg-5?G8gt V%Z8#jtƉu3+ʼ"LwVcڕ3CgÒ+''dwP d`CelϪ˯|ec_=X_r1q5OZG}D#We6^ "peOѤ q}YЭ%WC%6/<vHc, b/eF)UMPm1rVA5:^|=8o UӒ(pvgڹ05!=23u"Ӿr4=''6|ĐtSDHh,\Rծ8Λep]MRi(*1E8˱Bwi&貦mo2U8|i)"6ncZ0[s6C7*p\;1-*u !',{ʁ X;MtnfA+6>D-%Yē"`nҽtՂ Jy8 հ.$:cbc==@A%tG32ErRSt{VeB QOWIp?%HVIida(ഥ޼q-Goڔ*:Y!L?-|.n~:~ Ir(]΂g8O%Tփ9^)ژH20Tz-p ~0"B?;H{S׮8kŹ ׮&\RC##YZཱnRdtLƭ3?]w ܔRM?bpU p&ߊĸpjM@Mjti 5i/۹d랹(-l}&L1Q9@E>TJ&c5a^KGZ;wk1}my3{~G 3A<%vH^@Kb8y%i/h+ j zr!xyueA 's #WdnL#߉1Oyؚ|W(HW.$O&tPC.EP.IG@^fhgmOR&B @/wYMly p@nӊvNROE[F Sh!邡^*Hab˯\ ׮&\)!-R,!;"0*nCF]k,a_,EXUD&DM@ؚf1kpē&MC܇C'+E{ 9xyE3@.\nKNopBe'~O/sa>,q}p%5BML_i~H}~iξ <˻K%G_@bT9``LB[M  cMͱLi (cOߕ&/ bՔQ]28w@[uW^P/d'$m 3lJp^zPLS@JkPclkU:V D-k$dfs!^ p! ('rJJbMQ Syb'_p_tQp[Q7mA\/;Q@ &ЍYR,,F>MnsTpy(W.\a-Gp;?M{,Us7vϿ_9 n|$٫jde|?'w- ]܁˸n]Ќgu,;7)Ⱥ& q"AK?j/X%+9, qJYTjt$Ǻ#4٤'0.lc9"s#ٖZ:Frp`{$U`:]`=`v_C!H, a}M@-(w Š'E2 ;[ h8H0i3 Ql2e]:!OG"K izRKpF\sPɀ`}C 8UbSIm" [Xu>롵6`ԖjqϪ|gRzNGwqz X̐Lϓ !An}u!w!9Te^_jBЅY_wߖ MR@eu"s qѝO 3>i߾~3j܇㿟NEtTGo c%H5 H4t0f@ͼ4jRWL37LS)ozӛ׍ÿNet5p:Oѿj$`)' HڿK# T7=YͦGrMѵ-=fH6up񏮈M]݀W DGT@M+?B@-@Wp[1N{+{ao@3Zh=܈1)R|ju a$>RL{ ȭۓڿn(Evde-ن!5dIg`3eZv W[a2V8D'kc//+eUR8 G:bUg'6f)bluCQ)wOrY)!z rA˹VGΉC 0P?yornxKL#@NDt+ _mʉfce&I`o5. CQZ n%[,ɟO <{x<341tE[d;50ѻJz :u= !̀)fSr:`|ٕvL$_=G`)'_MC_\"ە myݐf֒(Hn_hTo?Aq%&fȊ9J@YA]3K鮃$ǣSF핉Eqɫ96Zo QT5Ďam-MY5Yf)>//_{pq욖T݁ǘtq5,/Dh]ONK'C fg 7h?R'zv4_c'p 7%7fUmUd`]Mp@x4cYњ1K`DfEaEp箁Ơ;/=RI|.z;B44<:-)=}ǃsQˈa\'l(Cܥ~3߳F10SB߉I@f=9/|v!)=iڔl<>mP_OC.&?c[ǘMO fЪjO xrS;jq`~96> $~ B{-kjEO<9@L 7i& Y!cwoSʥ(}T f OO3p?pyägxK1K;FXَ^{J ta?? 1ZωtJIPx޵r؍qAq~gLeA"f|ȧHteQi=XwRArΑi +A$VS}aER&t =NEd.r+Sl# ?w+BmK%ȲXS=d$K5T]l_J*iH&s6y@qx\}8p8@i/@Qz^~dHwg~Aw!?W5J5w ̌95z 챎}6J紅OjnM!8͠G=d)R|KWJy EV*`-a5|DMԄ Q4Z(6'AGuЮMx&.*4ܿ$)<EuhR$hQuҧTO\c)$" UVA8V%ZWAt:i!' tv]XFPIRPHdu tEd/%zXXs.r<_J [6e,S*G6]OAzwK }j/7L3P3vٔjIw08 f$gT \+tp\\;#l Q6o yvDW]|$^.Xr3M O@KӉwp>ĔA{+.>$Wga`Ovd\1ٔglpb~Cr4O*r@`[PԟM'`fhڵ;=K*)7NY$d FQڍs;o4 ?qz@\C2lh9IV#[= 8u٩F<x(fIuڦ(|q<"p~M+͠}%C̎;6y%Am߲Zmpm0 hJOnudKկ5h?1? 4hC!;gGZP8gN6$5P7t/}ukPб E|)5YBȫɃmK 4F3:#9Q3k_ e==_!}RdBtB1VL,xUx<lUc%З{T;2Qu>4MBPFv CGiII @vA2w}J.)FNrE ʹ@+1kZa3}eF RIImc]s@eO =ֆ~0)6d{;1 ArjD^Nl՚T2DZzux<Tn=n*};y݊E6?{CG4 $CQ~K& GWT~c!I:8M*|p[% Y i09PԠے &K zDL}umg4{O8U eXėA)Y(>7:K5E9MZ:O'6))#. ^8p\Pqf5E(԰R/O˭r_3o74$ ٮ*\O&&CVH#N }8"*S zr@Ea?O"D%6[_ Qqo0r ZpX_m[̈(F95$:07roӯ'`*NI񩸨~l-] #=Gg;RBo/q<8'oXXF۽šU&.]wXlj'N}*LfՆ't R#2U[eptsJO͞ k\}gyT^;%ruGOB|y5r )NՕu͇i:[ Hk5sCc5T_oCkK. 58=~+|3p]ge'/[ߚb r)5 ALj>H%9 ~IoPrm,`GPs2 Clͺr*M!RtԞήĚa7ۼ43| ^ !M&u";Qu vSd?|sMXH/4o Vq&%t >zxx8<޻7 5wٝae. q䏳? ^0ںz  ˄o/Y:NnG} 46 )W^{K͵f. O TJؐw4. >p@*)T!<+]_ךМ~W4o3F<9O}?G\lx!Q ݁ϲ7^x OP @Q.XYuG`mW)61ܹLdSh%IɯN.&< wy3`Jn״lbW L,f@]A}C&WkՎ F@U~Rg'ga?HZiuqc_Mձ.z:EN'l C;Z>АKLwz_|ctXu!֏e?ǭ>tłO_"xE9<7 k: ; [ b6xt ;tg]îe!>cۼF*md#&7ؠE/iROc(@qdYԄM6gצmo, cz*(C*A ?O?ǵػ0㧥E)'np[qEg12U8+hHa<"C ;|FZT ͂D+ >0}2{t+\& `bo.cӛRՏ^}lPnXR䰚65׫z&(\\k-~ @fn!*:FڇZ%Z(.ï ̸&OG75Я4=ńɫ-eI 9 µP P@B4MuDB0>I_ʂz L&p 6hBiA*^jZ{Y m,(I^m_\><ƆdhfJ9.-H͝?5ٚbBбN#X4;F2*  :a|<^T&\6Әl * ltz4H*GyII6'sVԬfeZg֜:uJ1M Y)fx0?M}{ \5CF+ ᆄJ; (/k8ss\\3jQ`4p]鄏i1) sm-Y\1ŲYyx~ppCo"FF01# DC"!{٘Һ7 `Dooڹy Pim@ݵJc^Sޱqk|L_Tov @+dғ2iwffE[ayyr# $d[)D!Hk& 5s;jšU!U}⛳k<2@LEZ2*GC J0g8"-!*a^B uoI0sxfuuP ( nHdXi9Z6_ɒQPULȴpa&S6U`l u\;kmtǒT ^rq<Ǩ U1$JtB~[tE:tߞi+ ҽ(3H%#,ƥp?&"Ðhyl,Sh+6dkj%Fcg$*M@cP (X A"f$ͰρBeç^>f*i%v/ jUA"ʌmkr Ƈ7orݟ{zzpnwOͮ7?ml}_Qhp>v5j״T[4ֺOWvRo.(q6.gD@^KV-/ɬum7ds,[;@#'D4_3ʷO꾇\cM> v4*\\s}UA$˄\eUK]Cׁq]{}RɌDuFP # dLLD!aǤZ/OBGYse͎7+Fr]yѴ{"aDi/GPu`$zu >ޝ4D )V`7ޢFQAh4x%GmP@CPrxx}AM] ȬμgH? >m A H bqEVs pN LmA+){@R/Of9ߗ'0v%:JXC?75f2z"$ esf3f ިW6]$A"Ƚ(E_@!fJdm:㪛Ty!VXJQeoo;eIUc'E5#NM: !!IF[sPUD эS&?^KXie&u'$ 2񎜌5R#kζ6ًtIe0L3kv24 +E;5;6NL\Fzl,EL@ޣD\ $x DL׈n Y-&'Uoh9y,s2bqH*^URjz4ªlryfRa2%DZB+3GvQa@03JIDT+W+ؒ x}oT#].#9{0!׋ uP<_׃!$7ka=L[ANgtE ٩/0 lR@_ktkUU_$L3m%Yn7{'9B;sM|BP_Q>wW߹3Fl T_}s-e RV&H52 DD hzc("bsX@*kA! L3 (rN[Dﴼx2?yz釠>zhDL@Av׹4(Gyx h޿AcJ+(Df@reJ7ƢfS2D7qc3j\$d uc}fDAUPNgvL quYj$9%i 4:I_@i~KpMP H]օZ~OѮ(PC`A/;\lNHc=ODi3NH{χWZZ}nPCD8$?jﵸ@`;q%&lBafU3xcG(ƚFZgťrqXNZV6N{rɻ8f[/oWP (y6w~w'NIDqH 0%ˢ$M-,Ιj@ME$M$++23ӲrdwA`b~?a+N۳zubbi^^VF8:&j[sC^EVx\.Hgd8TFS! /N:٣0- |%Sy?CW['fA`B tN6&A{J 97Ca͂q(UQ``<昁aB[aI{pX*~`ȦMJ!J,ƚNs0"z54 J_s!cDj_w=}Ȫ Bp 1Tc Þ5 昳GO vΝ2I}/Sк4! ?7Q~}fuijArJ#seT3_ocf,*'rqA`F0/ǭU8[j|@d1!9Ĉ+T`%l0`@a8  ?Q by%@Dl$3un(vk.ڬrbɬO {%R&rh9Č;o48xl H$3HأAY9GҲIDG5?ñ#_a?AP5 8saEOYwM^XGۙ3:C`ZS&z’-V5q!vipp%N0 PXtĘ/NNAx#z5 L$ٿk4Heos.G!,7=DF'Zgjҵ"C Lj^5܅ rD,MRv"fmte˘1ĮpkeN0 PxR%O f[i6Bc7kFvgJH+NH6Qlz5lotu V@Ԭ|k ߻lyލ͖ͥEekMxyGdQiEQEµ8U0`@a N='IJMVj>' XzĺD4NUex:j@*֚s&V3tm˓ƙ"L‰S9YGh[ƃ?nE⒧Ē`'|}cISaצ+a@00[@N+%ݡwmSUD3Gl#cgbPpD-1͋fe9QChNB-,QcT=UK!L >Mr!a[O DÁ<vmR`0`@*z;j@u=@|o̧O\jNCH'n&Hi3Kh.lVqDi3ϐH\v$#ٺPGdt.śXs[R0v >;jH&1Tؾ[ =vMhGqYW9YWeZmԒ$AkAj5S >ωf c#Nb6]UyM5К’ q7Q½d»jYwe5 7pa¦1XEf cj+ܓ&mlX@Էf.փp1QfT hAitrhH5:¼g(ÏϸC;oTa—Cv 'eew2s%) P;k R'W+䋶|tWC0UN_L!kPKl\^o|/JPȑt+=[/J\N!Ask:.pYrsZ|iJy$DSJza[?['B`%٫Sv9a):rh!qAKsiSy7FGm?UݧOEc7so7["'1QRHYYxU1k+GyUs wܶZ}[Ln;Azs$v; W%.< ~-OVILπ _)QJ`gZαeI[ GKjUYAO@F1I{0OL'2 YwFB D+t=W%v&:ҧM$6`#,߫VJRZN(m )i(Vhnf>ݞ3*PJ9+mChc#S<[]{ cmaɩYӈ`=6T ~q9edn)a)  Xy",>u~u$ZA!qwу V[I8 .]^5UOVo}<\Ykvbf g𗒚'ǫ:`-V#!!%NՂd`UcF>п|v W%_r| ~(1Zek_ՍAV&a5T--1~3wRJ~S~i {+aӵn;.$Vg I0J"-bޥsS`BAe`s!UFgmKOz%ys@FsFȋܶE[]U@4%ŁgλTUڀ4 %I Td0M.$Xe&c~J9f_Lz2I[qЃB|Hp3ߣJZp,+7  <?{ n+-KtO.UPs(=$1\:KT K1 ~e`ʐ9DhFlrBK'=)1e MHي&-\( pN L e:yPBvwq $g*I ;Z3*gF`u ) rI@b_J讬D~v^2x[r`*vٸ٥WUZeyjg^gyQYwRkUUonnv:FMH5#c$׈Ja0O0Mbf2MHИ9\nY};x$?TX6* SM&0uR a"?F,6xvPnAМ*Xh<Ϻ)?.w.f\}` Q% <.vҳz")@ XSmw=R%XuaM4lPU* `9I.K99_}FST9h<} dEƜؗFvdURlhVqP>ܐ)պ?KB_}^KI38 JO &4J@bW@Q.EJh *( "b -)fK#w;0ϥXz_Z$ dϩ j{* -l/9,J?kw+'LiQ$͎ t9cT1ѡ I5T0-p%D %^)G`i'ɉX!^>De.x-,Al~Zvp~3AvnT+)df%a[ML(P^ !aukZR,(B#Dem+0 DlS&`$cƺt`9R? Ʌz } f)d0$4f!9&*yr(oAZRz}VdBHvmc̵VMsʪ-;UP&(òL.ng.>x5bUn#l:_>r8AZ:Awy>j{,Y=Me8z@ P0 bmSF`%(8]~Q*v)_EVCzY)qxB+_ۉ m-I MFHK3+2z,$n\|j/]i}i[_P x m PHj'`imN?m})O~t9/'RQYXD;AkFΒ?_`^B瘌,X`GTF/2 49G ʰm Q> 8 B!$kv!HnfN"ixyH?+un!:ʕ}η62h QL"'dnz* (!&e֜[c5hD*兜Ij*xY52M#y5" `X  [rE-6$8hoϊ3*k5ES|؛G;5zo M!DHN Qꀑ{gV"c64@ep xn!KO pzalg1HxZXkI B`¬qn攃Ôԏ@寣BJϝU扗f=9/DF((5ʓ n$vK07z렭6T ^)j=ŃВbAjn"J6ph `@(|`&|,m?>ݳSnn7D֟ ދ7dh.+W,PFJ dFUQa4_k ~sěa<%U=x@j 45Ƿ ʰ,O0u|0 T%zmaXY1ৣm+k1.10NQ0doG*<"'K̊ )?M'.eUd_FוKbӰɞĝ!M%~cgqp@Y]=t+:s=vNנ˼usRu5x$eqrg 2,^N @6^8 B?ٻ& t]P">E˳ԽɁ"`PPD |r #>½;ggG]Hc)A.U[)K1(= C⼨Tv#`} njWۼ㉠hsŇ; d^I,M?^ѫpPe~H9NɠXPBg*I !a4 `!I+ -vU'e !FeY~i`9V}O_[+̶ee9GXö`/T%ߘA\p1m$1Ik%.!u)5ǫs "vjFRGX!-V)4>d#L34S-UFy-"8->`$m%1>F %B/Fgd`BeG`>7+!HbP uRvԨ+ؐ~T *#zL2:}H,;H;/vŁu{ -T J<N>mpmYS)2ubArBRR47N$(<#%uqe ZT8A҃dn= 3?fkhrզS oy]jAENQihEXvQڟ5 4NlVp03*M'(X{Y¹;@Va\Bj9 aijb{&0m]R47ƬpTA!!MÔa%lqJTrc[ hqѦl|D%&.n" N[) tOJdj%Fp$wX^cpjJ5XpPXRަ@Ȅ[.AaL5PH JLM/('qs~$Kۈ0a4`F`13`7[0}c<3|ij6k~ܿnF DShRQJH2[2` 9X- pQzNQ7yY[<9 Ϛ)OO;AsX"J(p" aNsxl9I0@A0\/0n8๐T#C *9qE1%YP)īWw]딏reDP+ʴ\#:x"̰|4ls㐌?ܶՒ. #_ k4,#H ըqhR7ciy1ѱAzvb  g*~nhA֮8PTע#smӃW'Gc2?"`!0͗$.!V,0XYYU[kt #$r b޺ zpfj׍*lP||GzId/65F)S+I"8}; |cii.{Di%ED  *vmJ҄@E{E%w! 0d}_>:l2{$xF%Q K/ ؋G]ȵ.>xzk؞a*ikD͂r`RK'hCCL~b{M0֝XrSU0Pʜ$\B|Aɬ h=8 >U>6L04xL D_bL|pB(&¤T W/95ϟ  3%Aaۋ&”GڷuNVaÜ'`'H>5n#;Ekx?2z `Z]TDU_B[W*vEt,\KnR]) 0EJHWSZ/aׂhj[8JHQ1-!ʼnhj?88nY_!w`H_c+;,۲}~e.^~̌y)9-pPQ8F뼦f)%Ě^w8Za%my|&kCWi *'vS4c~М4~v3񩻊JkEd!"Սe^bkr4p9Iso"$lPGNf-*)2s(kiN|[V!R30dt|1پ9)~p#!AʶÖˎ, : u9pj-2= >\/Iy15e^Xd,B#8OߏH䔶cJ:vM ė 1|Q@/1&zQI SRj[eĚd}e-e%ݛ8ƺFD/ؒ_ *G{-{6QڮYSܚZ9M=l~|u7o2Na'@i[7r0'u6tL oomQ봝/*|~|q&d _zn 9DΎ>@fpC** B9y˲OSk=Qxݦ,h*Je7=T +v }`_){}Zuojkc9\3r{}JۛSa&jw~g EL]=z6=yS:6- wmDK0@ \ZQDдR#@^7 jdFH*C:&[jnJ񮧒 #UUv]a*ۍuQ㗼:\ $8%]8EfU8ie $KkW_~"+B'7yzXVuW,ͅxiJgTv,Gcoq̻-J/gC{ T5?GRAH^'ٻrde 4&A<[S7"U}oÔe n9(\eͥDˬWQ9,;#8WM1JPu ɻA |[h]4OM "߼ZgwF4o]bC$HMN,9M19oXde $-6y4#'码jÜjWpՋsGU)@{dQwż`\PB lL1S-x^)@,sF\BBepޱjNJI,=\C5E8)eO>HT̼+TQ 9~z⪹D2o6~J8LJ<"R @r;vX4`j"ADRԈ( v޻3K$M#QQAQ&@M6wvN( be'>EPdUƩa|#ǁey5?0ܙ ofkuRT֐z+TG2 +Y9CSj< W{'Ah)SJ;&WR@ u@H: 0@BzCdJVcsF˜/{Zur4#~ pҡ@̳SueZ1~ʊY V/R.4d̨z\ q5Q(;͙ Ve%i`δWV7]lE{O 4VQ 9# /hԫыG11MR%8-]Ml EP.|Q^mI ! ѥ^|å?MvpR 12])K&ɢ]Lie:@l(1'썾1YY%v@YG26]91,yv*N,>^uKfE=} 1go0G/@ts=6$9Ҿ"1MP)= 8XG 8#i`?dQ#@ue͝Kj!{vene+=prw"Dv0]'!hu3?oHs)v1aS``<-Kr4 Lw w 13sAJr1Y-93TQ`ԩ4R!ߴˤ#ӊbפrwV;D&3wC@6)0@HG 0yDcA:VͶƱH&hYJ ֘y+@ڣ'u{!ҴXPƹ+d9ȴ,ɍH1/rm5'96im[ۋKS:!*RoJ-J- 0A ##` щS*4\k\>B[9YPz0MG_ 5NgD#[ЋtQ=Qy Ie)J!0PREDf g&0N{; X'ZLp'jb0k7Ikix7M-gt+0B_RQ@(OO|f@"a]ˍkUh/ c}Jm"0VE#2 9X_oXYUڿ34Y^ʹϴJW&0n)I:/2 4ELD9ތ/_POhB(Nb>\ P9d U@9hHx_Uc]􅐀Bh*sT^Jc|%%:0\! YDV k^R{.p3 aK?*+t kY`5vf8޳~#x,%> QJ 4.Df"Y->1E~ P*#{Mca? 3e!n}=-^8}5 QnZځ;d  ܅G.+#UUU -sDSw4.DfL8HkEe2 /^xAI c>4\C~)!F.sWIaH$HyxβrZP@H(Pj8Ay"ҴDT].a ͸I{Dxt ,+vQ,&I@A@( F@Dld6ހ9?3?t\ GO1PՑAbG -8?4r:(;AY3BUR"Sf&xة]WW16wTU\$=Wb ^l+)wݭiR,8F` ̊ &p4 *H)@' FPJUVLM 2ݹv8FQT+LpXj-v`E_LqtrSeWAi/AH(.J!sx@`K0@hBz~24SsSZ68^s~ꦩ_bW P^3`_1YD4V壹*xD,'(*5~(햡{Wp,&L{o2 Xsu2$1Vc {"$ؤ|%$j iGj5VP.*6x-(N7[Qy}05H+6<|8al8ǰ./m*Be \Php q lw k3sh: R kG,dMZV[W ,6ŃG}<"s撰eȞ5S^0B^IAa؜%RH,YA0Ce !X^p3M$,+-oMT5qћ{-ca}_maoruO[ G(oMM_-{,\A:a))m , Dw[Bā;IRʦXiF\2x{EU~Sĕ% W[^6fXlNʞ , r1MEŒ4A})eŜr$k]|^ 3V,x8!$,<VA-1f6H.Gt/86XZ)'֑^.$mE ck\;Ǻz^[S2JUQ >2f+N/e"u[BDn(&odG]S U&̶*/{ |G4[)'ßzNays.(Y4dUk`()foYK  *kk+**F>mnobu V6auՍWw2F#&PHNo[w ,!(cg$>V`L`Ƀ>:A!VYFp;G \^GzYX;hLC5U}I_3ĮRoࠪE049")uex@Zt  $.0@(GgJ  ^ٽ`D'q/k׏ߞajh ver2gkrʤ `٨Z*e[& S0FW~Q8t\l a`s$`P@AaG5q1樿NwQ5x1g^5O L:}qUe.*جe;z 4s>Ŕ-q ƑBJiⴽ)?nD/U]!)9 B۸@k)q@N2!40qi!X 2r l5K2cI״tuS={f?&:`/Ɓ;TM#ml/~=axb?b=#)v7rR>NVB^lC[PiZ_`HvKmWGCB)1{;RU7C 0??7[o>6_*yp|%dM2XO^Xk<V1j3(u|G ~%fј{YE9q&B88flD@DB (mfp{8jlmz쀀A@Umw =_b_ RI*D;"@Nyæt&Stt4ѻry{(?*E%lw}c>oDAJ$DuԱ ]aAE^^&1RDR,QQ]_К`v"4)J aБU%N PpI1 X1l~euTNoa%R`:~;WPaND':bmr6ޘ4Mkمȴ: T%HO抷 ~[ D){sM|wF,~cC2%1VK2`e4GS 6b8f9󢧂.:fNޙKT=Zl?tjq7}/>~~-r5Z򤎊PAB( /t^Za@HE]  US6XAA*otJޘ56=n/cBeiw^(oR* sќ/~g 1\rs SY ;Gtb$y9 1tTC/.ClT?#*PeA .,c{Ndb (4xT JIt/жCu*YW)f@Pݗ'唟kc2C)e_ Q, B:uj$Bٛ+1sTd@.ph 8RGE4 pnZ@&ScPQwZƾK2 >G/s2ve,K6FA!vPu}r#TlEv6:2 e7AH.,D(\G~/k B*D_=R * _jzk[Z:;67w:}㩬{#'ߧC¡^QӠsˊ|FGSIyMiedb Jkә?ʩ(eC]* pu8x\b2" ?JV?7!TzaYkV ?eN/#Tɍ]mLsYp7TQ*_w\E|v݉OT 8B"I(D(LVXRZ A=qm 0$]9~~EoUu7JE&yˣ@FV_[g?d(H'Y#1 q,=5/b9S !O!ff:/$"|G?dg <@( $VGF`Dް+c^LA*x;t^b h늋W2'5:tfp$[-RLc6eO TwQaܒ'03WDԚ{L)_u0SC"؁*1/a˙sҞN= ǾК5 sE$NbЩ}`@HU"*\B{b#SZK(2Dg0rm1M;w)wJنk9(9>]a1Ys~.W$k_Dp5!;rR,ڛ+p 4aRU J}Mz:^없/[QC9DתK/;4V lʍiˌ4^5yK0 9v_vQض% ^r 1o2, /X|]ܣkL1Xwqa`Q@T<͔N-JhLBfMÔ?9ohxF;` ufpēs) e_+;nځMPfCH˛ӔO>^j VLMUwOϻ׽WPB. ?%DO.+Ⱥ)1 !q)%$H86'бK@o <>Bi3׈^dADTum׿Rc}1ѿ%)ӍgRBB lZ1+?0- u~[^ iqXleX z`k0hr9L5cs`!Dtj](ʅISMY[569rᅱ0O'wgOo\ķ8tJZ8tD(f*4xX+ 2k ds2q~־P2 Ԫ|gjz Վo,:^o?W*!Y3x郢](vrl2ӝ3_ z21&_3Ȗa@.4%POO o(S a BbI+qs 攴F( UXUsnrL1=X W`/\F|@WAQѪz6AA?Mcvj3$qo_].R>F AH0{HOXǥ-:XEg|vGi1/&leMߜکy3Q2 zޗ?@ݶr޺Ŗ TKu}~PKɫNX{Ϻ( AH4E>^,to!e BXnh%o-eEo3Ns۰q4_=ZiL{ػ$(@!beieu:-mֱͲLe0@(#|3Z96Cf?Eu'@IJđ5e5 $9;-TAv}_@_("Q-D\.@ަ0]/m)1QS<#b~4- @ΡTdCؓf7P Y8aɒ҄ƞo]~>lT.FOj#^cpp; f^CobPn@U*Dy t|iuti\}ɆX=>'DҖX2aV]@viŐ"vv@C X 4s(.εeeBv)[9 w ZN mt u r'JQ@!Rȸ7>q\/zKXst<sLGrg,(I>EOH~MU݄@h-BX1Nf%pȫzp"FN@l#lvk*LSIwCҪ Ye0@Y  YWr|3Pz)F_|mA$IJvmk{Ȧ}UhVZEO;Ztvv/OO!Rolـ&'}ǃPsnL\KaR0@(LVY8:KZMP R*879G_@B.i#m2ڄ^ w_ħ:_o@)X//BF 4*OQ2 e:Z2f$oW껾xiUܡqY*~y ҵPE,4Gaϑf:dTL;~ZUGDs35#Ȧ%;vjc ү3< C,mq  ?&[mXG?0Ukw=awQf3N嫿x ! L4O);cQ7*/"{%7wӫjJ9径1UugVB'ʲ o("KZ9W&2o 54VY@'YZ<8_[͜_!BM$a`ɞ@6rݼJ[zvDH] mKUeҝ< 71ВK/7L3hj'wa&W[o ' Nc޶p5v  WoM+@Z2Na!I}FׇssK<39gZVjmw 4Uh|—(?O.hxC&\0bۮj'{D D$*DEQT콌c{HbBBAD"lHք$4n;3h22 {ZsiI\~}恷7F=6BV9P]KT 8fH{Ie4HaBt+Ԏ)8P6)[C^Bh@Y*?p0 %J[]Q9BU7?լV5CS'W z mΪ;.+sy9DewpJx)%Z 0!BY(Sp`[yJ2  TzRp65I䗛@>/]V"#5@8w}{ݼ$,y^usnL`;&G^m,wywZRY" (>;*x'D'|64uev씱L+*ۣ6[楢5s.owDn{$8젒Ui`Dͮ?c/aom@4k0nHNHpA^=nP*7֎f;#Kol&)'T=WU]>I iOAL9 '-A!WG; %6ĥoc%:fK1o}u 3 "F #kNL]CkOT>bj;oAxکI@R,Sg-IXx*Xs +àh,qB,X㪳 Zrr)WhcXF)ʗrB$^B$rP7 "5zj4ƛ+rM/ahCe:Pr}AySͼZ0L<"TL4ߣk $#)4vc d66N5'DbK%;b1Pa}(3.1yWЭ0cg >3=\Ã7,!y01#׮Wh ܉NX䘎]P#Go%":4_fa x9!Pr` # 0t;*BԼi% =kYO!3C2K-m.9wf a@ o3s7ּwDh(Tz/@HGVc[ ~pVB,7BB&S:T~/7,JRJr1Bzn;JbqX۶@wChm`qz*S`<.ZPDl|ۈ4ge6^6((ra=\cw}V7(JlOL<u6 +ZSjf @nH)Ƭ=(8;A`[,+šlqEZLnR=#Cn[M(%b 9j'1nu>2 z@S \ KxB"BXknhX!HVe).I+eRw!o*^@sڈ>i,a5ݾl ]Q:#Gm:wZViEJϩBNA}5q &QQ{EE}goO$$ZBQPAlbM.x'e>zM}UMyiFN[0 /'$P%<aqbYzh6(!Pwi4:lr &]%- 7uR>jMgQᄠ e إtB$QlMӦ m8;`/jD?d*]X$- #n .fkX!`娮y`**E23LkǷ~3zA ֵB`9 ΑXMao DQOH r/M0CC M96]!~i2HTzsk ’S!-CcD܂#릊 úO_B**(v;F`hgD9jVAS9";D}hT2˯:lkh24ڷ@\FQ,Hx?qR~MbO4ia~&Զ6R?R$rK+Ӌ~P`ӽoRg&Jp撠1q D9 4d-+bI9w$=$ aLfѓX%mv2G8U6e'AY[Xy$Ŭ/γ%/Blm,=(:XYh&Va`$#>' D\[N*C Q)ʜ Q|\5FoE\ֶȫ烜Kߺ`?j{6#ݡ&@ B0kzLR]BW?9lR`hdJΓX9)-akOT)@B7sJWD_\ϭ$Z(g] ,1Cc C˞'UQ]wHH9bZaI C wx@΋R"M,!Yey|d෪f7v aFU߫ހDv2hpl~\M9sC#De RK S-0 r[Һ2Xzo`3eQPjzGTja d+`%4C%\Ft6 sYf]eeҚ[@*96¼6HznP|XqD{kRN0Kz}3]qYmnNkBX]\n:9bp*8Lhfj(l0 HZ r3gˁds?8jƀ1XsBapzE2 DvzIдڊKs02u4THpmIg-ᲂT H&:T ZK`E|im-I a: "JAk3\$}VPIaD<by7m'UOTl&AXQgv*I?<tA@C2z'7imU{ mEVS,DF%KLY}T-6s6o0J18AyعML&xqDҁhw*2[aC|u>k|y$HDnO0#@9* TwF| b,µH6 ] *Tr4 xod}j4~_޸%A[ne~Zp?﬽{0+9,] -}@؜hyT{[@$)'G1 S|{:Gw X!nKv6 mI>ʓ%^hd5Dr4F)g~$UZylZ R$F*,VKkh*Iʂ)QΆ~Hf>]D"DWul8'{Dp|#HURE@@(ElKHӀ@DD(*((XLmBn$wnv?)ZsZa`eFH ViI1R]aﵗ''c2dYýؒT{20a5ITj8ՁT:0ZD1EEA*0 { kĒ Z`-X wQ[ë/lJ8>Bjk5#%>ZR1l\K#^)=G ;_=k\'^v|JyOLZtdrl++r42 !8%HsAp)KI) `<5ߪJCTow:LM/4kG)_M,aHShxq#dž9WOo,Eb?j^;MF% H]. \g62  !^wV9-IKwGdo̵/T=bEg 孺w68L"-w],8d3MTw&2UOg%0$Hu . 29E 59kpܑl7Tns30tEjdeP^~yN2ԏ}< Vv"F o#.y?gY8M9hy`cv9i`5H9#>lg\܌*r !R.HTN $xKN<8g5;1[)d4>><?X~2%& P*$,}Ng,dS9 } S_-&ҝN6DWyv(q.*u`OTϴE*?"wJ)p8 `bNER. <9\h!N ҜF\P}"6[* ĞX:>RhIV?`rW([?z%&g%8vDJsʐy-E^rt?]2~ .Wi`P\J}>~O{cdǭ``>3R/ NtSޞM`/#}T#r A[K-ˣ7xrVbsi./KJB.(~-SHlh5GDR:$MX "~+,OPM#Va0/.wg١΋j'+amSE9\?^*/)sU|qd#Sv?UK/"+w uS%As 49;1l`P!CW wٰ^e(Sx~-9߸nVeY{`!2#Xz(gUo*abJ@Zwwe!X'bb۷KXhê Kbߒ" Ēh`]:;HܼnBg^d{4azGjrj4#wTu0:?cxqT0ٻF+WkJIe|S1xE҉:KcI[Z,1{(UZhX&fVi\.\/ []ÂDe=5I9@'f$o> ޢ]WD! S2=&C3Adwج! $uO}pa( TM<!i ܞX  eWGMO\krTFص{OTL4xa,uj _9$=oyc:ӬB\/jpg_[A)86Oak$QtZ,%MJ 0,B@xR|;Dtܬ\V;妪k<0ZY̓Φ`?B ؞[/-8 !r_E^Ŭ!4IoА$An #!^9( ?e{ @roDe΄QZs[=x 1\d /J3elCrN[Z>heRILެ|o͓k*F`GrNR I=i, .P2?PS@IsAvWhxb7ipؚm:*: }rꝳc l& ij " [$~8PKEx2S)AHWNyp M9Td,`YiZ9-{Y}V|L0QՆ7cuUaAF*o4P*,Shz'X%.j T@fW#YE.][d!Y$ ;H:' B ۈE'MW-*' 㠪=~4v;6406AVr1ųƅJ7)/+9=ނN-U٥jH V́qtR>9A\NҝAW%(^( &$ n(߀ SMC w%eiQ@C F=LyUSMO5R[.MEr-RgdZoWi~A4(Gz%kEȻ *`5GOK,6`пE(4is[ >Y/4P+#vhuP#,d< Ȍ@Z`Zq ܾcސ7fZVoKo-@ d!qZ.Tbs8z("BIX B %Qn `ʹveǩa5O.wHdz *»'ʗwjpI iF͉O6K:*ĕNys&֜:>觼59@f,YXk~2 B:a$ʅX /ہ1hGF*A S:PGi5{uJT4M%g ֓57+ZQqCVr4G ff8£rB|KTYDURgA?ػ϶& E H$(BT,;TPQQ Mc[L4 ^j]'ٛ. } %!ն^4N02'/C UrdY\Zb\E d*JceRՓG`1{|Te@،r JU&S<W!Ֆ'"]Tr{Ȥ]x[9/QGN/c:u&:lj1j暦m1j:qyUEq^y{G 6{AȬżp ,([~A!dhn`ƐbȪPpS_vQ\] +D`&Wx[Ic'G">sǍjÛ{.jvM֝0@}wqSB(B|<V`0d~b6{B*U4R05t-Ⱦԟi HX+^^6TRoZ]#2j[qs7< xh, UΗ+[] u߽|Ebd,(S2Pm4GTۭ1 ^=,t_3SAHRGxD8/jӊB(hS[39[ֿRG bKc@ihN.*'jFV RŲ Psr tB.ֱ@(޼:en/*5FKZ~!0:Kjՠ4:; d%&jAUql` !o "h `5QKjߨJمkT7N'eNȆTPe7)ׂ{4ZA!cC'SvY궆:JM0RO{56Pk dz{`15ij%AJ&ق6K Bdg1 106\ij6mP7Z*PA6IIB'Z (|8qؼ yY;S ~@R._4OBwL#k+ Fax!Fb`dM jZ=?A4bnb'q)yB.h \av؄IGsNкV"mXVf߱7VK JBF۩5~vvh`Zdbw)2^y@"28[Q 9*xVX wӻJ6rׂY e4A=qn>SKI J6(bcD"]{Z J/6gnNOC5( +N& hH0:FjJhؚeJPPOuQvYQJ1 ilqtώSKpMZ³`MrTIOB~8+Ff+`sφ(Xd"JD;A+&hee\S4~?rIuino:wB%>Lg!.":<yjqCҿG\֜E\;/wU, `%H|]n/y^f_GtZ |hRHRC| ˴nG+Џt !oe:Au/R{wT00 T Pyw`0LD !b``wAn'ó}}s lqc\JthR" 01+k)7~.V1C3lm.d\ 1׼e)ʑP !O`=R,Y1>4S!cB7Ϟz!ʭ.]mX%m_|@DW0Ց "<]c<$%QV͸0l $\1CUP9]\p2 [5vX|S'l!^4V:^0ʰPC7JoêHgkVE'PT5ipgP/fkEdJ%1s@XEi'ajb(HW+`{n_1MfHP`o^i͸gatj]!ғB{*=Ҩ\ּ2b۞6y A cE+Iƕwy!$:3iqaRJ/'Ats1J>b 7 9|#% jyOxs}^IJllÊ5{%ғDy@$Lݼٴl'D}+01 '){Ū(.jȱ?w|vNj;8Bg@5vt!޵Bjl!> ϣ 4j)⒊dx+:iԱ:NqXX AltaêY#GցH؍kҸGXl!:2i` agm؁r(gb*/6D̷.aU!=Կ!ۼUMَ_63- ßzwM=oDv+&YEz 6| Piֵ dr IqH[R'V"Ű@I[lzQl?LrjxqYAUT!I,ZNA]GbI%=P1nqqVDy]#VR^ YШ\'D[R "20j$L+c 'Ц> ibuhVY b$Dmmq/&"&ཤw,-oav _|A-V[%g/ҵG^ "𒯗bھonY& 'uw=j3c*b;Lc( g]4qSZ\XLԼOk\^18rk&ke%NL ?F+v@(Jmj^{:^-}^{}wr"B@{]p1,\}Ȼ^fv?L HPbNNw%žDląΙPNk?co%Ůsl7[- di`ZBS?zKz\[];h<:ِatAݟ< s?=[+q #UD$RlhDa{Prc#.Rl#[9w"&ZKXFt չ Ņ~/c@:<7t׀m nZM_zU `xz7.j|K7< 4n,w迳3zttJi21txC3#v gACtV-i?H6~Ϛ9i#p4eN+/7inCbdt *k$bi4AuKac)Ik\qaf%RӮC^#@riaY]T:!kP\G82@mgOЌo8eu  CtҵL}̬a6" FAO3%F\U3@I>A/ &ԱF}À)DT+0¯Zf^S,0L RQʥj[KmB%j/m˥x5wdR @&< {QI bdGD:& uů(% hJbIl نA_qe mqhH䕫zpؔ1!qiai5Z8IWehH5kŲqd5mQ3S%D Kx|UzFkwf!FvDig""0`ۿF&S(&yL:z:+"<()wŏ%tE@@ZVAe RFqbpgt7xfslQBvyrvǒpҢ쌱SM3[߱ܡS) de.딘E쒴~mfk! =~>:Wp@ـ [B}X?8Ǻ+ 4re Mk+6We"eeP ^nQmPDӂ#1j^h)pfڰj]am(뾅% °Cґ(ۍ0TS[8{9v&L,ȃ΍eYA-Y?! (zo5a:}Xzo(eat`r"j$J bkOwC N!P+~(@y=d!&̪r#Ha5W7_**Fb'Ҽj+>\Rѭ 8R,,yܝ/07Pi =!\Z,EDӎ EFѶ"1NkhH}) bp2n~}PZ\1{]M*(eqY x}0^ח?B ^j)v{Q"9C0Y" YgZ^d}oaLeUjajz/0E?[?׎̃%@ 3v( n'b4pCc^M--`4gfR!&F*N(\˷qPU&`#l\ފ 4X*"hSl`tBT2XW Ld.@\ON6*'ݮ tg߻TsDUU7{w&@"@"B-.?7a)UjBmZ׺tӶ_ ̙,2'r z==4Q܀Ržz!TTod,@4-k9@t1 7n rrc,\?;4qiJKK7LYʬb(7침!VţY,76Sr9.&n䛺wja\q _s|7&ƪPK SPdY%Z(`,!cLHiO?!Hƞ>ȺA(݌? Ra|<2>/<GYk32mVGF9t4w0أSUd0^;27u:Xaq_'~ۢ췋<3֜n?z0ًCLN:Qq}uw>@=7vmLX/?If,\<6jAa 6Z{тFz9hu?ݸw6KhTJ2 Ao=*YE.(ϓgA{c[v.t" QQ w˶kwx2?1 O۾Jfəf[F5R-mMܼč5t4kE =k@q_eBj))YbIg)׳RG#wD-+s ʙx¦~4@!ȈBc\3B;Vnc44LԒc1$Se:]lEZVN eq)Y 2w]ÿgy $rKXku4vתqC7@#S+ ˒>5 kf)9>YX) P(#ZZ ݁KPjVrTgӓ<W6 gn%,\F&\fb;.d:FՑ|"7E\Yy 4|WS]\gص,rXo GawI.wU1 :g׬&=̈́RRĒ1K| V3g𼐘^@A'T {i?7"$*-f$Oi3:K{7dkJ3Y4#c Doǹ d##Z2*gf3VHl; xWGZ{]3{O8rꫣ'*ojSran^^fk~6ֳ?q'4}bCHH4b@wcRu <"]&q5}ʁ"qf Z&:oq3F( lc@xz;L߬9.A@5 <8R3Xr 7ُPKEA|dbMcH\vY7tą_IF|ノ3^n/EynwOB/s[z?/} 7eh0AHQy(Ib+0?ul#IJ]A4p БLP!AS#S>mMC @z>]ԭ+jNo߷Rx`whUc;W`PįLPd5 v2?@^@洅]8qf7(]`Mu4|p]auV}ICKW?7(,.Mvhkn_k[/$<f3")qD<&f̊{B&iָw΁hdfMbBmKo_ӎh&O\e|*!A'eAVLV(T6(YOWdۮEg:7qݻZAk͋oUzV-98@E{CmN'D(3|{+rKwsqj:7"wUChO~}P|&enG[b&@7m5|I 7*M"7ו| 2rl7]0 ̢4JwG8tUd ۆoRY|ۮ /'G2x4ְw&@Tj 3kH @5dA5Gj64@dQCgՐeh6! {U~b_9 VԄ6Uؘ~)/"ڤ&z 0Lb7aP5ۭ <)F>87or0.q"\^E4%JdIJl_Hy7k.hMxW[L ocBLbIpGrפ@MSECyeb ,-%PyPt3cdB|67#|߇q]Vs~Pn+۔'ᗷdvn,Erlܮ;> M#΍ɬ+KDU( "hJ[ ֭v{{fB aqWJ- `gM%`Y&4E!h@νԤ{%xBd_OK꫶h~rQԝfZV%)kg47|5SVgyj~izqSJ*p:曯HG ^00rߵtȲk3np9A\s+AZ&3;G2|ijHZxo7>fsPwTΥ%NG}ui>N 9[,W).Chh#^f9GaʀAC#9p41pEÈ eg8毅-%͍aO5"#aD C~f[Ob|^zd*g.W^Y?ƹuP6g7XFqr}7PY 2u-@\s*$SlˆGo&+}PVI~2^z;-nM4|!Np9Ar7g]pJaʬ S\!+CU9ow x@5LC9A{[E}^GdJ{P0{Js5~⇄,Oi-5WV b ɓdY/Rs{LQS3$iLp*a r`":Aa#M aȁC#:UݮbX &~ʆSxߌNYWt}a?ߤLApt¹oC) ~+ 5"t x6 SШ-.F*%6b67{={y%?ɚ-{݈Y zNO BR䞽PV5-"K>O9W5]`nTWSo.@UArtke>X)n#K= >?0Z+Lj0nSI)O ~^*닪I_-eҋyQMr %v{=E Z:S:v`C!4E ٻ^)u-[ %e阑|\wj76z`@:\(LK̰:Z꟬UzG!{wG 'Mbٖȟ H)mTWl \$s RZ'L3\j]Fa;C( =F9!D"1y'Y..Xmot8 }ޣOQ%)nIzd{:$MFX-o}V[ ({^ci i-r]{/S(-hS ${k y)ª/c)zBv(F "l,0K}+4K( ޵5[wn)r+".¢߇pd]jbb$ CLEO6AXLJvZ*#zkM*WbŻɒs55_'7`)j ?HK򶋲溯wTT6e m .MrA)}od7gp*d8~sCChΡX(\aD~%XFMr;ZdAC31vc CΈlm K:,})"NuN Pmu]@uHzlcQ>pS=7R ;Nakj! .7 n{w_mTQ_۲Z֥ (.E=VOEVmݪV띛 K&!A0(PيP{dd/$2'{ߝ@vSx7ƣ\b0,2\r,Ũ?';,t_{B.Em^gs}jC#$KO"Ǻk,cTٺK!\CFh"{_RM"/Q#\UydU*HMG">~Prz7a b6p>xoO~ȳߤ>v:PXLa=ߤq36ZB#7)/wo+o[O >tk`+0r0@Mې: Kvڝg=s>9գkЂ{NW5Bn Iwz^&z5bMq} j:QsBSNDv !=%wf'?w!d֪[n#,0wr\:v|Ra$I=YM^:__.X6S]3iUXAΝEEkErHz)8ڞA hlڽ򠲱Z®tS_-|as]~NW[P_ $1䌙Ղ""$ڕ}l4= 3e/X-i.m-yg-@IªYi^w@4P=[щ+!1&!@Bqw&=H5Jys8pΏsiU *'#,z'!,vW$?^%t@tBd-2;l +:q{՗{n74zFrYM?X+t7ptyyQ=5Ew"P۴GPkP-/R0QE{̃]hŀ#}~rh r*+\7=MN^z~;hò,a7+_m#4^]>c$els`Y'gB ,~Wp8 ֠uwԂUHRQFxFXr$IP;i!7`M}0)PR 97UAjg7uRATV!f_~x ?8˭ud#8)4'!\ F':g접&J>'19 [< `[m}oz @3X9^=i"=heG4~+CBZnzr5,e?y^vȬ:&$$`t*>Ax٪ibq~z‰mi8 ۊ%X(9 `t6 Ux.=J/"Z ]c$HgRJd"; }ady FDI`0Ob1~-. hE7FJ%fD+pW~ujn8\׻~X2 Q eAAt 8ys;"95f"xܒѧRfuyg($zXFaJBЗe*>[Urȁ &A7="UD0* @kDH[;i+#~T-KnPcpԾOt%N'J>@?}3DP{Q j\uV u P5*H|2_0vH1XVd;̗+  ԵRUBe{*ץwe ۑ9$4e*;d6n-Q B\^}QR DS׾$I_R$!M kԜ|DT!jׯy) 8hnG怸8@zrxBi)v i-5-OO/c-Gi# kH̐ۊ mnhYԅ*`ځVq/q~ERr^oU"m) yIm/!AA֬9=$J.#9N)#BgaPÿo*ܠ>o%n)%:s՞^H4fFp 8!²3%5IHZQYaAXZ(ju8 l4aPk$rK@2QyvwNII4w`H)uI!AdTK; 8Bf֏"eWSsǷW.rT`"g8n:8lcsktբDW2iiq:@}30DCϰTֿA{J8vA؝NWJ꺡}񋤶Jht@07%՝i(Y1 rBλD`--9H' 4W$Z` ^3]Xz7H7[@Ĥ $z<=[=#𕏈 F:^RMtA0-~#.4,̉O@/YiWAMA$"J@ RTcY&= V5bm\K$!ٙO|/Fݴ`~49K2VF7р,}2T= H{aTU1ax&+s g %]lBm^U8Xq T/S@zb@1 b.è{8^ua~Mfi JQ~9|Ʋb}c+(| ]maMؗb4eG#hafmQ)UBHX-_11,w1،'vĩA 9>b4?@&a:adJ" r|h -zʽ_w8|lSt^a֝r/=a㰻蜲RAaW $fr"h{@C1TE@D`kI7@Qb60v^hzj Y֤X$6+ЌVa?I=nLX8\:caZ Ry0ALA@?x$'sJU"_f4fUO1#`?9|EUTc$}@]3>HC'j7<*o1@9/" `oo\X&+6Pz 零L$ ;#{'d- %#x$ԞU~hnQBa&0|9ڞaeO[,6Х0OHa@í|X J[B ^l/^ xtV"{1 HĦd5o2Ќ)i T9GѴ4=x{?aGċW8^"ڻpuQ`*bo?2a=gKxKvyj́z $*_&{n@;ҝvm]r#/roX@M# SV  yiFedjKKs7g[@nۉ8N6O t_ce.`b)ϟ-E;x1 1`O"f30ĊfY "ݦV7Zrp-&- jŔX:ݽVw G:082Q+Ρ:#гGSdYc]+lQZ 2 HFX uxl~(|^IhKbauͯT9n2P?k҄ Yrܨ3ł7y.KTA*_a'*ʖ`iL+$H7i:h wF̹Fcba줌sggހuhI0^̱_qdr7`zϺ 5%Z |tbzܹwB_dbŽhʩDQaBt;_r΄8cs0D:e@#7ـ2b9 ERfoO޺0ǚ?MX _ߥQ ^PƢoT(zLvL3%3v-SBz|0W`0Ɠ.W؊ H5Id"gfHIhjSzoih4KR usli~se+6 m u.-THV>K#I%4,4ʪ*;vJja_szu3qbψʶjL`ǘJʒ*Z-QJjTEĩWJFLdN?@HtR*5X KQԁ0 냶Z1A #j0-;ƤGcfz-:ۚ1 xdNݩʏۉ ShjiS%fJ17{wCZjX*"*r#δС$ D\ɑ#׫PL=c?mwI<"g1f 6L!AނR¨rjd,&o&*IMP*wl ׯ}^-H9f&'mLIm !;eT#>ˌhbk%HiV&3uE'3dJ,Af@>E,#R _/ Bl0ʶQjkk,F6nW)~g݊<5Ȥt海,uWGg!&Ƹ 18pMOL2Y~S6QÙx@21SS|&Ddy*h %usZ!`oA*ÔbIvjyTD 1DIff{q r ;ݠ.p2BDGToeߝHV,23'6ť{ ͿeY ' E`I(NKʱޔr$3Z]y;A"b$E} jXRzd7}>h %qwH_?Q=-u6`$iGZ{ =o.^Tq7 $!"| Ru&/E\0<$̀Q @N4)tE|z,m[:}|{ "ehz{<8M/(_oDDl|ء8?uu#n׻Wƭy%gyfgddGse+ՉZL)qXФw[r 0=wIq?E4P"^-9!5uxL(.,a׶(0_9f~Y,!WF@\܁Er厭+]/JoMkqhXw2d*"Ha,vPY&)2ۿr<2&OG&^A[AbXk.<m0EݖHJuΊI>{+^ [~_==5'nOOHHge[mo;Z;ߊтsVDδER%C͝m6isTb%E o>2ex[9ȹu'dia>hD9>- wM, 4 @j AR VKԪ޻@*v1z3TJ #7p- nbg/[e#Tjz#8r' \ U\܇/`b񉠣H?3]"}%X R]F* u˜#v$n/V{j[TZ|JJ Pb,s9E|}tvbr4x7/o)\0|v ͍!ր f?x!% [ Y̅Ř#hl42{RJ OԒ:Y$`n]75%>*bp"zJ3t}$T.O\F,fx~dL5[USVn+$ր )‡ :F~^Ǽ]Uf`2@wu @SR jb<휗>*Rğ[DP @!x8_!Q` uc{U |P}^P[%ft hQj(gj^!qP<az=WQM/?WkŻ8M ez©9BÞWe  <a(^5nZ 94ݢ*Z&z2/*ÀjjS>^@&1*У@6׾O5 *'d?!{$Xgl$%-.x ҁ(Bݏw-]MWEWC ]XȲ>!ý Twu{@%)sB" Ѐy ǰ HlNHF:a D'0d4#?BRA\y}ВQ2}i"-TWAo߂3,pWwqzjoATOz@eT2!k.0@8 A>(iQ+ep1‘MȦ^gy3 $>cYAi'>8{".Ȳ S !I{6LH(m._:-8#1@v!HZ]p꤄xUP WlF`gؚ esWAįEwZ:Oh'S`kef6Dq߹BR!c+ڈ0@HQ$~ӻF_p|8U#$ZDA w:Y0rS.o^P*ET 4:Iűv[iD^?EB2H(F#kpм_]ptU^nMV5SAxI_wKg=l'(CS~_hh O}(ȁ?\?}W;/7?=9WG ZRaLK Q:tB n^OgUD_NKk|v`\MX Jq \©YZ݁mBz <5a)T x~^ )('nPyA38? "4.W8qa~;X#EoW^u_p`xW@JqE6A "f*'ͭ^W-+I\3 ;(:8+`2"J#R"]j ()+ rEnQWL̓~ݺ~t"$*ܭsp5 |G*cF͖ 3Y-'\Y LkAMG;2 Z)9mcH@IXju%AP5HwBl!zBة8F8rbZɄ )x?Uq&yQ}]2 tRF~w]M\ao"){bl+ԢmRzRS׺EglKY"@AH]e233M2'!<'dM.̴1ͅa4԰mq7ZVQ P>=X_{-d,G7(iOAPpARӠU[ ,e%9D-u+(.,EK/m0Ze)' D |`5FK%F&,m.b, PovQ6eo2Xso+K%f(.퍴_ Z=]"GR<.E\oE&0IRDXT!mA0A5"ݡ$u {O`?FBi[Vv)y)wCo3O!92?+eAPѕEF="#RP#uT~+*FnOH}C$U OWWo2rZD\b- Q_[\͠~"D*9*EwttuSF:1,[䯃 5lB|#HՁFn=SJ"˙7P9 } 52",HÐ)0 RDHB[sh'5#2qqfm҉[P1 I0ЎK3ˈ699(A֮ Q1z @8g`7Q2WWeF/ 7!}{ *;DNw׊k/%uZZe MU5ߡw[S;2M(c7=cppuZ\}[G!W'RIŒ0썴M_f~Gt1ߺz σr1-fqt3a&JZb#.}t$ NvէavJ1 cb%K Pfy'!pw260D`NTjf} 1 "DV47a ONwtᷣ"j|(*^j%1PEAg!:`=c*n5B,:橃2d`Mh:ՆTK(V5/"_E8M _K# HL5?zyu@c RiM_wdsE$C.;+41 [Pc3A̲߮?|iwbQb_oPhFA 6ԟϩAj2)Y$tD-9by=uxc(KX6dGqTx*r)RJjX 8:R c2FxZ#ƑzBI_TU3Ȣ(e/hfVjfeiYYpgaٕMv9w9EP~  9y绿9}l<1zG$ۍL$X\N2w.w7+:Cm_ X +a7M|c]}!LdҤ&sO-дM ՈFj N4LQdofƊ 5 5 w#U'PcԷ?[WdOEK [b) Rso_u[g##0uCd7KRkaX0Mjru72aT.OvI7Lqgt$w7D ƵMK3'J.B6JR'Z&$4DW!*H6lW@-T;z )4"4 XY3m53 p$qqgr Z26RmKZzuvFFƦM_IxtcHuW܂z/oK nJmgsI7;^c.iŋ`L3$Z6!̰1ߥFk%)܊dBa3Mtbz3KTtt!%bKI/QBOQftXПQN24Tրl)~6+[ V%\,]"Rtˁ Q)!nOsOT8?'+V,X`eff6[c wFn^n$F<ljW )r?lD:g=M70ծ79%-S: -5 z3oj}ߧ&&JЏ^&Zzx@5.3iVׂ]ʦzPۡ>!ePҤz5*QԨnq@aLMRt÷:tz` 8*!Ҍaʃp{,e %V@Р?@9k85CE|غEdn 0!dU7 R fCu` R'\x?u07HiHP`u( sk:o|f2½dum@|Zp!3`*[NhK֯}!-`B @4p䬛ʿ+$Dnzb}|!S4W +_[p SH 7 ?@ _; %wVJ'=P#21KHk$f@ؼsSj $h-To4EM$EὥK [sc^WJ;Kuv'sHrumty")/(r`*A^M~?Oнqq{#AmFLUT |cYzve/.+=l|YFcBB :9F6-Z6E6p=vPN7FI3A3Śl=ރ=@uz*+}5X)77AWQU~0&M v4kzú\|ܫ_ ;)KU;FRuJHb # 8u~5"YVQPKD]lFyaJ[XrLA65P%u H5Qݬ;Ll[(Ir%PMBؙnEʀMS{\[(UR3?H]1Uo.k(gܯ\j[Ù;CA~"A1~-keB$&©dzjJز.NF|7n-Y KP rŧRJqcJlʂyn ^ny4k=1sS#0sokR'STNRФ).TwIk$a] Յia/D:y,DCm!pEmeY%!{>IaZÇk p:񱷹$WüI5XR@;FP"-Z,' m.J%fTU- G9V鴴P9D:?,2[T^ )5G T^ @y%5tŘTHCS>1\{ ͘ʵ̕s } .3\HP!E/ 9)檌CfІY<+|un0ylf~F_^:H?ww<dJOLGn?urN?PahkUs(A(;7I$u+ɷ\ C|AB57IɌ/"8w^{k7D.e&DjpNQ̭Q24[7eBi1݌6J!PA w uD)RV'8j.@r 0X%ƛU8MFnt ptި xoxN_CPiTU<`IiY3DݕPbvbY.V9 #Sbo.Ygwuih,8 @hk^R)`AG)l4#Y׈"t5B¨c\ltsUKKr4rk1㋜Y fayWju[$ݐw¨yr ITXWÚSrÇ=~T'OlE:4Ц/WtBZLv);dS6Ɠ33`ʛHʌ"%vC.ѭr Utӈ2Ƹ"EДJoކ?A{%漞}G.ٳ0`nJ5z9<0o.J̀3NXgKV2SZWC}1?a[\@aCd] _B_z|4)R8 >f|=rwz&ۜ n+TdKIܶ%+Q" UXwWص}NW4e)pT{b72S!s ͚*PAvjM=l: wu'7>Ԩ#n]# E%125\V뗬f JpGuUK_mbgPolJ5bFن yj*SӐ(k@Uv*hka;qc °w TM\׿M_k۟zr7>NϽ:wdxsx EpzMqg] /r+F v$a;=nڷqwj^kphΑeuiO'/4V :O2hjqi8UWf7@-z f(B;֐S.+je~qEDim/6h \"+˭QH[d҉NhrĭRGO:-/Me/C"=0\ֺ”uӇv~r$ӂ . !DW$ܐ-ϖ#QĴ.,t4U aAu&7>ی]oQr;>6cT Lq%pԎ=?Ɠ;$O<>s-4^hP#uס M$,,+#$-Ʋ/3ſ P\ ՗lF| -LUw &.x??餿}[ƿhx.ym2P+wqPT~X~aA*J" Bmo@pl2bc S}j( 獝mۀA}i8< uMFW1l@E8 ρ0a>0{ PXZդk5! ըg#2JL1P],KpmN;`:HaemMc \;&WA?>EQٺH1]UɁ6-,LN9e#|W}\yNK=w*j9J65,^>7sYSZcY3Ȓw ;߷6(s0Z\߱~~R;H=4븎*e qGwz·:z'ß XEGxz xnsJw΍;O/~po^=;7` &Fm[" w2g(baH mV]u$_jzTBWȓa:e#}%cs@ Gm72}0Di+P% hprx eqķ?42@:4Ja!EW[be.'Эȟl#smuCBI#_Xtbü |yʕp/P)7oE'tB}?hq|U%k[n)pGWHaߔ!y`񝸟EZl@4xd]_AuS7|#etzuL Il u5!%rT+ z2o"_6~!mfdz6mﱽ`ֿ\w0MvWM*1FPr"[&_RIok9@qW3plpTeS~tيS _4(v\ L`:E=f8F[T ׉ f.RCvy1'7X}jH0Siyˆ?E͕PrSG.ЂpCb&]5 hF&EeFFL@rFR ls@fܟ%rd6=,ltNkp43Tπ`TrCz2rR1:]RyC }ŷ j&@Iͽ9{nc;6q8o׎LgNu.ٻ vֿeorDNݖA\o(JsONmar/Fp]6 d$%:AF Y6~`+xALk;'s?zR-+ulfXiTjψ˓~1LG&,s@>C1]0OkOޤe#l`zY~d0%c((S6:LKy]^IZN 1V#{YRһK@JtBEʼ= Q®OK2o Gv@Jeo.9*\PI \ce6Vq7uMo\׬ٷ~6[ ܀fUn&y+Ȥ.Lͺ*& vu_0ڳ_0;H$3"Yj\=nLmW|H]D(^N>.-N/ Rkӥ H[pR'Lx` r( u篢zt`R=Lc$@J7N>Dg7B6Wu(柹^PK5ì+\!r q|H |?\Fr$XCWTWYt=W4nrʜWB ;|y>eESl;B@"ʼIHm+3SF:h"5\KP5Hz9}:`J4 B >}zdq_΂V#ꛬy˝G~d(ӂ4]@4%j2ZU?~.dcX오#|w=iQYXe*u|C(1& ?@Am9ϧ&ZSYi<3gD3=d1gvUZAKلBMDk@à8yo%vNVъ5>S v$P[+oQ'Ye >͞r7j= VꞅXyq}%sNQ,A " DZhN &> ^g_TFPyEԔ^`P 8 D(e[ xT%RQ &r4%߅,%Cfx1 `{I3cȢ_ _أ҅y}at?Dֱwl- %nҁ IWw<ר 7 Dua6z>t[_1rX~w[/ɏx)V&,;F)a "1~koU`~8Rm(Xt"K^wL v0qry-Le"njFJpeGN؈ԉ0c}VΠsR>Yj7'}7:4WI|VF64^jYGB_O D\*ʱYELNcx8'e =<ބ \RJx5u|z;Q)NҡrvB$<vo~_Zr<նɗ xH?kaT/O$!S茆;"G\jN Bfb6! )x@uc1^#~#8jFsʴٗ4E hEvΔ x]B0Aޒ31>>V6.s6Wd5W gO8@@\]G ~4I2&2q׳4kTԆ/>h7M\r5ELO*XlIs=j-%N\ѸIcDhfԋ]S| ;N %\0c19fC-b9m냺"Fp1H^vP%DL'0Wy㚑u@uMYf)VΞ_YG(1eE(!Kw=?(H,]jsheb; F-DOYts(e怾ߎr c1>Df\,]L}xl tWW]90[+%2=16!l`. 2.Gdy[B/a!gDc xZjr 7٩rA#(NvdZZNH :Z6̮f%=bylD&%_DdJ(#^lfwSzx '>H[9G3MG,8uLZA7c1cg6eʸ0ג OŰ hq+/03DIg6 ΓD{XLӍw`$rDXɗ+ hK%m.Sm`4yi؉1-IL1c?2Dsޜ ;XA:Qkz'q hF~P2 (P!Lrt5L5hT}삆1=8;W]1cmE,g' F*nQ+#Җ㋽v@*b<~Wi a /#3F60m'ԙ`1clc@MeX.{FXQ~,n`]/o%:>w1#4O^*;`9-?d1c`H 5}np/<4&VG6,%[h|b}?u{)sp,c uLO@Ǭ? >6>_J~H_wY /a{g|f†jw @% $7};| -e}?7wpkQ%>p幭>5xi[v)M54MwLf˝cG%J7w:Eool6t1'<{r+V_ƚeݷ=<3wTCs;v6 51u4ዛb O`39$b1 ioi~+6H]R&A}gJAƲ= e}Uv oyԟK(xzD \up`u4I<Ӽdq}zܴ+91shhM}:ΎKf/vjql-¬N)+ܢ$%3/ ϱb4JZ4n h2nX)Ĵ@qvsӲڐ4xz, .;uHd7=Ռ'TJALV6fﱂ@y]X yho3Yhhg˳LfXVrcEzfs%z纟o90xo'|)] u0!9L$0ݚ՜N$#d5 ^mΰa)-9*+ĆWBbX=Ó֖]UƈDB޾m۪:/!EB#Fw lǩ>R:\NO …tM֨RmIxUMc-dZOw k}I=@n2I(wgcC FɦE$iU/(@kۥe5 q g.uM$'G3lӧ@NV9h8I1!Ȥz4hDɥS*ظSH+n(lQ*c`hTX;]4Y¦ł,X0)#$[!(H$2tv q JmX]yJ&nr5\0|*,73癝ytާ5Uľ?1ܰ[z&̿w@/= _iCCPICC profilex}=H@_[*+8dh,q*Bi+`r4iHR\ׂUg]\AIEJ_Rhq?{ܽ S(DŽlnUCFaZ"w~/ x9&̦q'(OtAGKq.A=' :tx8$+*{˜8+kݓ0WW\98@$PF&"H~?jU#P ~?ݭQt1Ų>€h-ز'Rj$BG6pq֤=ryD]%Mo7[o魵 C]-x]ۿgZr iTXtXML:com.adobe.xmp $:|PLTEMMMMMMMMMMMMMMMMMMMMMLLLLLLNNNMMMKKKNNNMMMLLLMMMMMMMMMMMMMMMLLLMMMOOOMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM%%% ...)))<<l0 l7qmAm^l?4 lh@f٦Pu8њ{O&;XFjץt98؉uXY? vl%oi0[Rf/ A}MҀ/@?^黅ނ-&@X}9/_y*@C߆`K UYeNQGt@Ud`Bxߒ&?/_w4ނ$迟Vpsc:7$˛_flBn0Wztޛg pFj0c_+9w04}޿0%w?U6m 7'1R5{>}K/v_y1T0>{5qzM]s__%:sgy]Ŏ3???uo_veY~N?y] s5xC5o2/b~Ngk5/D|"cҚ0U5wPO:a6$OM 7!7S) NfbA]ոj <~s;"_˲uye;xIOa f Dy$ǽ5k'g3>jΪf?Mc컠 ȓGW' ~#2}w,GwW⛝ \r\|7,E>B\trүyޅ["+ůK5Dߋ,'w4/c_&({7x&BpG1[D{1wvz#t) m5sGؐԆķH{)gT%Sd8!># e=2M̸[?}@[D6ESC3Ly_vqqO~9˅PM=n"$HPpr GhM, 3SD#*˹ٗe7^*P5oR?=㪰gB@ {MW];~WIۄKQ&d$BLFҬT_k6 5NC^!/%A#cPI}&}x}f縶.9z?siM]tI/åw Itsu9E[p6$FH"{<=f߲#j kf}'mE $HյhN.3Fu xpMc1B^6 tȫuϵ|<=emYF_6v_s7s8YC$J>Up=]u,%h̞{\w8ܜ68y4!q1r?$*Lb=r<]A8N?js~('pTXOp1+Tr]B9ЯNP`JR_srVN)*b}2׶=psznBD~P$z0T/%,z.7 ,Zs|(9ATK*D ڣ7}JpX=k-K~q|o(Z-anN_ S~]\ԭ QvߎA<!>sgV=yL9فW7pJ 8c7Dq*>eckzCktӟܓ?=דӑnJ}`yx$yyQkkn *d7XM.L~w8jq'?+m6wA]"5D+͞5i/ Ǔ4nEa7glѷ៺, _x$G#s$G`ncݭP˯x ;+0I&adQ@u/}x\ߩgamȋR&|Pb7- @(pb5m'L[}ĺ*O? `Oג;gK6 EY?aK̇jD4p3 'P)>'D%/{M xg+MFq'g.T3~ysÒǀȣ6p}ИN?4Y+RbBj# 2`)ؙӉoo. P[`v%>c~@]^+FT5,G]&?rY; tE$y(#U"wjK[@ Q3 k.ަo0bl6fh[8ݵ9_)@9ޜN@ GOSM\ܬffR3Q\",︮ {,#l#psbbҬ F} *1.QnM26I<1`< "vl/;.lc\0^kLX6(B -@Cdjipr9kn=|## b̺ b/ߚUt\t6:663]qY}'p^ZPiFp/~`*g#$0^\Bɖ,hvqd:|g{G Owp1dVnKRmR lY9\|K2>P(@I6镳~3#-9dKv4_^me0BeR57} |! h, )l1MNX]2'(cZi$ ɧ]­/]bU(@Lo/!(ԽRJ-i+uE!8&yo#A YN|QM55|`{[*XbE95zJ$HN,dRG*_ eYjl+X rfDǓ% xWW=r}X=L_*iC7N ߇ NH'yQ iFiZHKfymqZ|LpG":qg~H-"e` AL7}_<• 8 _Jϖb B1IV/$Ԑ ;M^qy]v@8(@X|gA!ɟ k9 >RK1PC`"F{G .[%9ؒs±zf=.oV 4*¿?> -]`V/fX -iB|$M!8ty).V Az~b1LڞTzb?i.R4;; ɫM&ĴD\? <($Э0\#ߏ9ɴD}c.p|D=`j904 i|H*/k2/ 'Wh3UABJ6ӛMڻΟ, A8XrC!̣5{;?^_  ^.PK0?0n/͇ Tm3񡵥N;B}lxo l)C4{ovW3ȯ_hOHvwWYMQx;"@ּF"&>޵ott+=` NRc!W}MK ܂7ϽMx̨3$].Oxc^3vJWGp\!6~8a5ηF(qҎsq]`d^j0߈G gOHdaɓT9L& poK#g؁]{}빖3 Jl@g4fƿQ1ѬgwF$)'iS+̫mXVlFjOF4]_ 򈭠Ϩo!*EFV ЄD6[>"^ 8EW"1ʡEr̗9L. >mc\Sۻ"V;nDZn_W4 7kΦ πk/>40 .|X}.}:Ոf`kڲv~:<5JA%'އm@ߞn\u=,5F~XhM /c"KaO\7eh V u7 uǕ9(0`%LFn3zuA6܀()if05e^`VRѨ|0Έ(7(6 pr~Et倜\ KYy '~Ƿ<\xقè!ĬMoQizZ js_}9Lo36YmoH2SrӉ%}ayZݥ߸` \Kó߅=4 p'IwWThkzލp͸OKz!@#v/m&I;?Z&x*| h^2` "]dXHoǹ.f~~87DPI NwN=!=Lw`[\/Sօqݿ{%(Lo}<'1:BueLa@pDz?I{xT>\L 0}> {.7Aiʃ>tq(2u>kYFepGH; SOtџu3u&z ]GKؑ4_$]T%"d yRn'9Զ/~ ])>VtI3?!N]g;@'cv."^0DV{^rCѐ!U->X-Vm_TAB^z-;&>Cb`)C6GQ5$@3w'Th1@kr/ 5xHoyߕJ&vyBQZM"A/9nM S燮zCD|Eԝ 8% ÃH.噈s e %d,+"3A^Iĺ"Tg4p$vrDRIUa'B6L(uƤ;HnG2r?ȵi %pd4I! &<bԄ.c>6d'm:-'iP&)r[ IDATu4dt DI_$Opf+Or@/ 2 IU\4B0$@\]D38 U[đF/bǑ$Bg$TC_QXt!^KURkY?#\y:+{+x4۩g% ڋ㸼NsVo֫;*p+ \c?hBbj3Mv8 ^oҠaᱛך;ZW" &rB#r='*2?3X9msGINe|~GjMD'#GؒM<3Y&bh;I}eJ=Σvw8 F;awLՖ'+VYhqnm>רi%ƌE`!iO"p 9! rAwGj2X^tАa(p Ge%HӋx{'=(Sᩋ=Q.K9|rװ@FLԊ)Pq@z]EV{uA힜 UnD8=̾篾sݢ@w`'Ș%s]:,6i :S+3 XU\X|őbt /EX~P+@}X:D糎ŵsL[gCс=B0ҝO"Y$K]);Iٱ$@Z-xT˝C=6 }U¥tzK=0<-џ:@#r h9x߬Tnv7 VEg:v3E+nVH_nHcf Z`"Uc-V\jxcXwH>UVy\,e;ޢKg%^cX3b %>]bT1oQ;\|WZrkڋ`|)rÕ0{j.`3|~#t ~0 pa`2ZH2!`/ycz4 ,NUb91#֕(skȿv$[Z}@z hOK+ŀb>L݊~|Zh6,&p*Ny=s pV[}~岞UB(1%u;Υ# V6t!BY :JJ]/qDMA[Tfϲ;E)m׬ O n9 FDw8HVW s ~P[/@ڌ -]X-eA!V7y4&⌢B;szJ|:G6t%b⿆^u a DK`sU2ZkPC]d.4Uu<8y" f DM5V$k׭_ҝy_ @wd]Vkq'(|8{#?1 vO<3gu8~yqB%61W"6ݪ#,. G$%t'=/ăź*c=oME:Nh.|(0M s!v4Du<M,Dlǐt|V$wjN> լqc4b[*94HH fMfvt!5lҷ wyt+J^8{4 tyI! 5i "fM]z~JY:7H(uoV]V~ڪMy,ҋiĀB5H[kȯ bqKX^$1{ЌSh!>-q=8Bc[<#P!Źl(=GaB+KѢH!&\HouWPUjZX-Ӥ6c;mX!=@_j[hiaMQ p4OUZۉw{g%&C׽A*JٮB?{Ti0K7Qěͳj-р$j1`0KΚ+VUX t@a ˚/칉_\1D68/0R&Bc"z/;켻 ((U}oNhi4wС%Ci]W\sW 8nN,_ޣ%?{tihJ'{gi &vtD(|t ` 9S5t)5hp-x$mL*0xBZ eKw+} Ɯi]"`{c"$ҝviPkӾ'á0-B/H{M-$ڡ8p1.GWm&әLb'f,qPAC9߀]#BS% b41Ύ߸ }FOw,2~X>ZE'yྣ못&B/p /?|BFBOߜtgqZӣ//!ͻ^S&%Kȃ4 F޳^Qk/ثqIy_!LO&E`e" ڡa xO?HcM90=+65A|xvX:<ШC]ع-f"tBNHqʤJ5| 1@zg^vlj,,t'c"2sV_3w^u$0I⫴H!K6FfEk)G0} ozSФݷΚ{4?u7qu:J1T7g7y Y[IVVR-tq.nDH?IOlN\'v h|B!zZw:o#PY델tfvᕫ/V +v}ƇW^4g*8'7)0tN"љ_V15Ɂߎo7 _#+}^t/goT p#A>P(E#t-J'nLrMCl:MZ$X8k !%.GHa2ʮIW']4}ҋxfOO`Ǣ$\nUֈc1Pͧ1 4j|uR;:+R9{.Wj,,)I0^3=oT+.N9@C|+(U+asV/B͜rqv@۵{>n(ݥg:r-Y4<L CG]8ef^)B$=w4[c(gԾ(oj܌o݊<@ngigys9lC?8)l@g"@*@swN~{o? |>Bkq[1 <3C GlS^mEV/z澦@SQ38'DE`9x4_po6_­j4[.τNt{F9xX9'4 kFI7׷F5 14=I.gYPki(`?]; ||.'J #:n̯\av43՛" W1` lC*k,[Nk+;|-rZƨęZV/*pN(ETDuC4B> ShcG9n_93"3O6_c}ɰY\<,a6Үb3 qxY(FmQ*@󋪹yfq'Z@0։¾yGF'I* Jux .{uȐ@zq#~|W3Э ^.ƫ:l+T]/Ӱ ]rH:0_ c$.̀(gtk9|πu0CȔX`҃ڣhKang|eHި;LBe\[DP@F 0iLHbF{uYl+R&ڟ Pr&AԉbY)oH3xNwk5aYG bo 0&o: =3B@YR"We&AΙ901&m fܼNQT/&ڮ0(yKO3[- ,2;%s!BQ1f6Nhtf(U5,c0n%]fo[ހgyH~ǥ=ih_V* lU}o_cP hqt-OO;c Gt7NK!@W+rOǡUE~x:@KIYX+:όX7/@Cӣ/@uEh~Fj)*iMS#a%XD<{'5zxb8rnW1XwM<P*who ꕑ|O+-RtIiak-d؅J Wo|mK<7v ,(@lfòl 7oEW |M1BKg՛*f( _n({C\ 4Jpn>c \TܺΆ2VyzfAs1ϰ͋8+8ur_`&Wq1u+Rco+gD@9q*pC-~|Np;zQ.+p2X-@ڲ}ABv*'2 L,WkVf69[q%AOܾU:PnCC375fi34Tk|̅S <0Cbp"U^H(jo\ӥ*v1oZK5 "M;oM tB_ Q{i W<5d (꫹~dOhg 8 q'favF_[-Pfixg|kn3`3VA4W3GOh3ecZ*8b6f8 \oYD`9J|: MFT|<(U%`3Ngv9т@awR|4G TNHҀ׻ 8ymyI.} aL7Lzml~T/@O,3n`+AyH_֕ OXӪ寘j1%5uVFa!{i~ Sn >`L̜S9^w.\C#ٴSދ..w? ݬFQֲE6]Rgd#Ս믅4|_ |HU{JGKwסFY`n}[jBn0oH'vr0q~1 䃕0Wu QF ӴRm0"HtɊ/$f!'r {K:q ~bGKk3kAєghTI ,2ZSβI妮_ ?PXR<}<+hwQT7~^ayZ±M _z~U^Bhᐝc9]rSdm*+5@{Y>6D/@ s; ΄ vko*㤼0I".nV^/q$uf`>,g\ X7܀kSwn*j) rYV*@W s^V84L3 +FWݫnrڎ ~up'H,oĞwc#0zǬ5-˭NS;!b wO vwVgZ8 z4M^gڡz>`΅\,SiaW鸊!tZ usg \S*)k qo7W ] %?P̭zط4|T^2XVPj{KX"4iWa?k[((r|ɴ+% /E%f/s϶VN=A - ϘQ̗>4Ա]&@K;N-% N[_l3R_Uf6tu0LM QHi4; p s\ڀpM`r=`*F)6"siJ᯦ϯ?d;KZ>#W؎Qˠ4hUwGW4. KUVEn>iSU,xC{Qޥ7SB򡪗_r8t`Vum.t׳ 00ضT YsV8ΟuIDATF]f%1(m*wy.w<Yx(:ֵIo&A^KbVRP9X:5 !MxpOVI8 KNV']襫Ff4[rb/C aO D^B?=qEzzB^Es,UJѰdXv -o wX QmD=ٚ'~q>i_FnQ@k"tcpVZ 0 `K75m-tOQ(U |Ħ~.Rfp(, 5Ki Qjh,%Cnͷghhd*n+WNVit_vBMưKK̀<6Sȑ{㶩Q&7pE,vֱ 25< {}`|:P>yuD2Z2$cW#+tQ+pGPe 0e0Y2wF sY>^9'sӸ1@G_QXM%Dh s|eY4p)pk+^۰{UFOW6hn"*}/a32+ǜ( )g:hRdIWBhUYFPmT TW^yYUҷʰ'Fl]=i`ػ.GU ʇ|[`򴆥3r܁=!;,{Gt{5.*ub \ր-lc!OJ.NOP4tٖ_vߒwbJWx{O: v*wR4h4x y޵΍ XYӠiC;%tiy`c- CW/58K}pQ5YNOuq>$m8qCO{2:<6Y vQpH[df?a2d.QF,ZJ>yW٩s7w\ f闒#f(=*a -L Pۀ;*@tǵ*{ IWLVrZ Qg(dBTWE#VDW=VQ̔Hv=GU bt E|Qɣ .ƿRC"X޶i~f~g $k;Z*TԖ459sO#įÖv8)bNF`div Hj-3c:BY\jz*|v_L`LzQ2IötJPAacA np +x rlh{QDU*oVQ%H J/ Hoa|N% YԸ>*hu<ˍ] 7^[J,Ǟv&Ft ,+%Е:-?y`+e=Q tltO<cST߾4|) !j2=UVN#ek+/3hwC5 .2}Cf@}Os3_,w 1çA_ 1$I6ѥ;[Zz@miSR$$H8Ґ+tbL7oLDnzx @ a)Q;0 U 8kr&\暚_+/=:;k8ĵZC]fAifр(@ ynsuj?|97H/vC= gffl)AE9QpY+b>?[樤 \S4B՟ t0.e3l] ivk[ COj4S\"לl{ELZGك}<8YY{Div'BYf/:nf pܱ+⇉gƦEZ 1k"Lvjxf>z->U %?6 iU$IXv+@tq00t'ee>0K?o]s!߸WV] N3V+«"?ҬL̫ҰUsmN2_lq!?l:Nu5ڭQ`F}!քC@#O SX}撛?xΈ4 L'SFLZ/d;x. & Х@ӜZ”&-q8JS r il؄B"tĄ1V00865|;2#qH7@>;4 MaA@Db ;(g|0/Ita# vZ.ЮKK]=XFXwɯޛkt^RhOD3QO7'9.^53qHc$OT8UŏLoPiYLČ?qfJV˨)S L&@jЁ ,@Bs! 8|,L X9<{S@!HMkx>qC^ƀVA@V׌&3Ӫ. xXL9}w0l;vҀ `3Mh4Hk3p^!(5 S2,pjd7 7I^PnNR=ڹS)<8>T':j9!\bƕ;Ro!@[QW-Z*Vtppka6ۀ)n 4xL.^Yڧ3yhq $Q?B3EFND%'{i9պ<Aun- e  a8r28\ܧjdiiF%8O1 eI7G##3"likYlY޿b:K9e!h~cz _{H~(r >U^H1LQL9?ILXu"1YUKyL`EIf^[1n pF=, 3l;tD8h pfKiTBe-&W=%ù5euAo8[po 0 tmvHЈ޾?~?tmi *zi3oܡ+-Fp>̬1u-)t.$*k>Y=?p3 J]'qi(MnD XZӅVVbچm;tf>dO 0҇Vis_2F"2jxR{Yk$)e܃ug6E%>+q?S=u)I1;vizdICW_O+e90+֗O8 5n:FnH b0`98?gf`ǃ<ÀF*Bv.oNk~T+H]ŻuλVf8XUMXcz|_5 | Wit0I!ؐN0Fpsh췪B׽y"NCɦDTt0Qww=X.*唞 h=VoSZ臅oQ<xBl}  u^RKwMn. ˯0Ƚpl~kxal5oJfS?oIfv ^so?fm]`[j5[gL޽p[迍4򃓛,w 櫃IWĀnI[42gݣ_qe0= |hdJp1t~oFmp降fEf;'Qϫ}קQп=Nk&Q74/fcEu ՆrV0߶G֣  @\?upjF3z{ҔYUsG8Mx~t>5 As]T׳ $)<C [2<Ϙ7 FN6Im&\rWbY30<.˳:ed({Zӷ8/V{!5TfA/͕YP60|vh>81与4<.2/}U?43<|a9g3y u3WJ(8^)\/V?Z٠ Uki%*MjCH8O4^_9!Kfm*MTcͶyEdH.1^EC8~bߏ>.ꇟr &̚ >kDjBfRel!,c)%:Ocӵ!ޱN,4ej:\Vor-꡶ĉmUIIBM!juE^+[` ,mK'ܞXEN%CR'ĵ*RN4:%jىU$[Pet.R0 jFřކ/#}w Y <8OB]gNٓBs9\KLt% ǥL k JNC[GZzg- +U˷{!q%AlW~qsiJ>Q֏۪y:GDz_̎F6/\=e\˹g[ aVm(ZS8_ރN ݏמA"Hs |TKbza?W )B/Q#$&tHQ5%s $ہ/xEfL^#9M& ɇvzV"y!$I>wT@8&17PRu cg-,ԋF&FZq.Q(pu/IG ēouW/QD]bN2i 4Ϟ# E0xf""r5Yƹ~{٨u ۛ{×o2cx4-uāxv3A>Q:O4 Q̧]DMd5 p ӯߟoiziW ~hMy0-u`B"\ImtgyӢ_}czk_9QM8% oWRsjM9^* A~βsw u.NFqf_6[x!0}wJs*tl hb %Ϸ1tk|P\b3FAAAAAo  B!GEi`IENDB`sqlfluff-3.4.2/images/sqlfluff-wide.png000066400000000000000000000400651503426445100200760ustar00rootroot00000000000000PNG  IHDRig{PLTE!!!MMMMMMMMMNNNMMMLLLMMMMMM MMM MMMMMMMMMKKKLLLyyy &&&sssMMM555~~~ UUU"""OOOZZZnnn///FFFMMM>>>999,,,LLLkkk iiiCCCfff ???bbbBBB MMMMMM666888ZZZ ***QQQMMMxxx%%%MMMLLL>>>MMM LLL^^^MMMMMMMMMMMMppp///$$$MMMMMMEEEMMM333MMM``` MMMMMMLLLNNNHHH LLLRRRMMMMMMMMMMMM(((MMMLLLMMMܹƽ³խ222ttt;;;bbb VVV{{{fffQQQHHH}}}---^^^qtRNS 0"w,5>(9aO$ۥKFCS!쬗xd]睗 ~rи~äl)njѽ˥xgpᡑYȭﺷW}3¬jԲ:qS؃B_MFgZWQJ!{<IDATxw\g$S(e=(DP@D\,( :ZVֶj]ބ=DPpOKbmbj?| $#{q|"Vңݞ{@ik>84{A~V 4{,],Qxz;ΎiÇS:4Jx6W  siɷpѿ2@gE݆٬#q6+[lƠ"'ŗDZ=$kI`3Uws00>I}5LEIMnUd ܯQH9iCA"Dͭx0Y6Ӽ(\>Sݏ*fw#r?;Hz>(Ҷ޸wI{1\":qQW.͞-0j6@+p4X1Ppp69?9GmR?FXw굽( 8>C,0z:z{ZJ/(iN; U[>ڊ3%-w'+ou\6Jٹ欣jb16CPxp $,7bk]5ANC9 \N{y&Z,@{xhH5<% 'Hr%j/0*z"tϛ0k M!|0=$w,ˋ7TzF/y;|;g}ρo^gI bޖVu'=f[eYۭU"7M.=9w"?5|V載6ff$͈ez^Kpeig-M`z7 |˱K p/0WH KsgLyTBX\?Ên?),^6U]bjkYS~ 9 }yaO'&hne?$ [fl׊驗-ZzO|S,/fw ߷aSP <~Zƭ56 HA'¿C&R{FISѰEcᩋn5!bYâHyLXf̻-mW7E)vkB *2-ٯ=:-C@ hst4uPZiA*~F j@o;\=Ua~4bH7Ym^NN_3Б Nj `cN/t3ԶUǸSư\m~YeZjw0&쇆΄z3tȋav|(RŲv֡\G5jfV/]엹3~ۼB`8<V}C,RzHISf\kD﹨u@_x y1FBOQruA><چ^n ]~)֋s:=ZנDZV뱞zuoֺՊ]z^9왁boQψ]y|j:ZCۤe]J&/iAr z7'Gþ]Icht :.5(8FcL`U ::Oz5^INɰ T^CMQlT7R/Q2F`iUsI-Rʮ0(Pq(r^h-5uWߠ4bb H^ K3ѦH'&maOqXu_(k;^:#FY0@`K7V?p\a|&._#kܘvG[OrRkE1wwiu ݪ[d9DW# :_O=:M2df&:IN[_8#l<';Z 2Rݶ]vVdSSdO1P?.H6fטM=5#VJ&3" !( bsiIk5jyy䲭s$6}X2 d@5Xtf:h.&`I b썸#Y*D,[? |ҦKk(Kؔ[bK 'A_x;P묲+ #7w!,< xYnqFfX}Ԍj\LӍrRLTgкMQgتέμ:Ap& [8ޕNUΨ{&r#f=aʳtO8)4gMT;6bXЗ|cbib輒 #< QnQ2^.ٜ*Bc⟈* K& w ўS 73]{F[#!{A>ýJ_f\ЂwSsRYM? [xB/;%%W:JSn5lBf/i.3Nڀ9b! X_h9"5f 6`賞If2/(&t~| ,rCm9'j2PHZOّxwmP"0 \."XyرYMh90JaU)Qi9-AA!F8BOC~nCCCN3O;E aSIfjV]ٌoǹ-/J$̷_Elrpi*k 3UIHoWswx#,lEMd׫.!)kl-2Վۀ-XV*t3zۯb95eؙ~D@hcfy,^!Fa`fpl`ր4V/3\lcLb~ 1l)TjZB 4~L`n:˥ɤՅy w|uPHck)ʕfaiyaxwj7p6bTMaBߦ%X T7lzhX.hDي^nwvMs!b5:i{=hߘuE[%qci-.ɯo *g~8滆lUWfoF9Nn}QˢPЄ?zZ|S'62GI?-sR&!]}IL_}}Hɠ{R04#.Ω [Mm@u$9TqXj4=E=%nD}0R;rbn٠p5]jp Ё串*AϐYC?XÜ*hUVPUc+x@g'yl]ZR+>\ 8} =x1@ֶR 3{)E\a0ZԆ@z(vB ],e.F+q& cgl\c<&‚tah̚r"'8ޚ2wuBlƌzY-uyflaZ 7){r֐l3VpH·|6'% / 5z>0a|mfި"I1;5iIiRAs߉1[>G Y&e)hPjȪ .xyh0f:KѾf'%sWJD }]ܧ!6W#g=݀aiN<f/44!A!X )e=8,j3+s"𴰘@5JUKݍpܨ+]1?aohocB)Bք>5#]wɮY3*nʏ~(N>W媼Pzf֓$$=J[ۥHwj6 +LQv[g3Bp@I0U6t/}6ߵZVBbdItfp;1ƨOu}k@TkDg yQl{); i?>8̶* CZ@QS>F;u,`&T*R5l,m3¦Jeu.Y%dQty^J%:g)qm휲{7[w٥S%vc$/3{L&c2IhPrHDulߤ޽dv֭Q;zh³_qƗ]3OƆ[ۨɝqS$'isdH#V3_;YՓf֤M>fWmwk5u39x# TYO"~Rk%&[ͯ hj=}@TBL`j財=IyxlR{BA2q(w +Y$4O[=],UK7B1Ihd珄pa~1L5܇wUfݾ[b1TX\[M6K΢o^*"$ʼnc8AFW| jQP᤽~ϒFٌͱvUwRvtBdFērܐYhfUwwh3hL̊?BcfC,7߃?;0y50C@;H ")b(*.EኄYڂJiTڢ=T фLs4̻g,ұ9z87pj|4`SM#]s i۹K+&Li=;4{Ui\rr44}VLUnٷ'[;|J 5/af=f*,r-#CC|d$6 L]ejOO+8m˥"CB_$ /Iac!K4_Hr#8#'gUp0;xX5x|JkEOi kɰŵyDCIKF6$W؋)e>=\PP/ؖv>BTM[5Zi3q@еB:>[@QM8@dHj7^x,s9;uv RiSg7Nq7tEI7n9ЗdI{kTQD)J#SJ8u%#D<{1\ĕW.ܪڜ*Tv Y/r fHԬ!I i4#^^=*a7tdP 7]LT[eqFx eq.D]xȌE/5e<< #F(rK#_]=Ym~~mۉy1,Tm<٣еl5Q,J"T@o `Msu*,I1CK,xqۨXgfb}rd&KaǂeggDY[ Hx•FgÇ3]{+yz֓m7ۮ/l/OS[A։QpqJ39[ 1 {&Vy!nvfhZn|ӟ#%&yeIҽ3ְQ;Țr3ޚ# ,ň񺚧=w6iQB%Sq-#OWPHr7dh4G t q=:duK_G}Z=ZC 킑_$"AhshZMGmxp{ciJx*3ZS1p#r8cRс`ֳRqp6BXp$ S6UG;Ii M]V>zz:+j l?2ZRmDѻkܜMִP6;,yJ)ɘ{&¹DJCMaЇu]ݶDGu'cሺת4h/ۉ\ӑ"ض8Z3prиɜw3nt,+^C 0zvϗ}8^"R"ɰ n ƒ1gPww]5,΂Jp0;(ZZ{.[ ǰf&*@sA|${ ]Xc]#8}dRC=4}%D駱Ka)Rhbne2?Pq]`/ePfvċ!)6,+ȭ$Rfb,E=ck]Xy /8KJ3ELŁ v"YEnI徠`.k.zSs>JvSj}Ri):7z7a~l}Mi]gBTY#x3j)+3ח iM尃g +8,bﵢN"l< M';*t<ks"? ԓ|d]+ 0_6J>4T'H0"+ۄu5eZȗ)!A~6,I!s,2X؟ M'$8U3hN7i]$㯇z;[%!Cx ǔ?,+|VJWK£-x7QXqP)\JCm2V-!V&!&R_Q90D$v20OxuK4@ EA}ĝQM5-bcQsGK:PDNpK %ჭy\,7'hgGǚJ3 +tEt]T~@/)8P1)i2٤#x-IV\b uFcO6.ܐ gJ К7$reqC-*O4'TZn'B=.iӴH8CԔr# sc'*l+r|h3XkOД!,ͰqV%rq0u>;JC[T #`awaa3E+&ܪ L|I$ٓ+jf$atuRHV !}טC=6А7;}Gic:`<YTʜa =(1!vDD8rݭL3'+z\9>Hd%LbTb6L?4ۈL3J˃65!ሂs KUrgjA -9 ,4ÌP2=XYaHJ7e^d<&G4$zC G6CI H8ig<1-zCo4CWQ vIY`Xi%n ̇(`+M`~pH㋦M7|ٙa 9^*c"Q {ͧ֎s1yfrЍ#X;P 0ڶJ{=qȸrp0 M &]gtǑ, O(&G%ɣ+ikϊ$Ù?#JO0Ĝl8imqv"5*幵HFl `~azϘ$nPaTD'0Jp[ `(PFev ǟuI;Sn230/V[8P8|vϑ,ӡJ[ 78WO+fNqDxEJ(&r)*v*gc:FRD pVha}Ի|1P*My[iS;EipS.rw[Fw1ri:/WrTc*~'Mr엒Ϛ`’Տج p+0#旅2DOu)}VX+ڞV~s^]M"] Giu1\ 3'I2w"!XҌ DN6@ˋ|mG<]{dKi`a8cx[̦ !䥕!׳Q<0\=;YU֓o#Act(~e`f9bT.١zO=A &zEܟɴm[ABUqYn+ovSxqfs=z.0Aˌ|b.-5 < WC]W>Qi [g&`+ۉ"i]-y u8QYJb[W$c:(/ct!v?n($;R02 OJKPAJA=jX{927o3E61~ڒ$ؒ/12`9;.W/8لͰ_&wⒷzDxz (K9:m-vDkڭݡ4|>!Gݏ# w nޙGEUq00Cƌ3@,AȮ $& )%B, .$fai{.;,RKYe]M<5|KDxX @{3Dh0U8`TW*5sceBv\G@E{II|OF2GĮN0]U8` %7@c}?/'ޟYW5[sV,Xj|ӭsm|XEgZm'S"vp2MΦ2o&IN@3݊x׬1D4N^=IwԌpB·?(fH5Fz%޲ұ ;]l*%0'X|ey-9(Y{; Nb{&ksYPpE'0y< [d(>{a'4 QZ5mWm^|B 6#a50Љ\ >btH8%7(WN$#:JS$9V\ul.Iύv]{smjb؛.W5Y ћxXr31bUS>`d\ZVF>h !{K;/C +ͭXn~MB]]s *+ Czx|&74]*u#[JZFZ1YB8vw+`+YNwlKO7>,f^tj-S)UlMvGi@܅mE4h- pDjHH@h o@>dkpDzV>7װ'˯r6ݮ(yuT9ظB朖ss֜~g25*z9&6g\,aE*Q #LNJj c]4}2#DfvAx@66=&pKY|a'lћy\dDP%_Mih$,w)g@o<[dV҉o4,~WO\i4JPR׏>1 'Wi+V逺hF*;@ZNx>S7cl܍2NdJ=: }v2.|ѺF\cg߱24PdGLi!q|?h̯PJx6=U:%{1KR5O8Z(f_j@/7bՇ'pl޿H7|dA hyxPK:D˚bU |R\iAa4{SM@pV$rCdK6SObGK8Gj؞],,7`Kqx{Hנ>/;NQuc AϻOUEwKcnl;)yͧoQd[XǞ]Pa)G Lua1J.l.һ|MwdWo,6=%^_i؟FwftI8M!XOn.i}d"=Є~db2mJcJ[>omOcy:C[i ? 9EqoX.КN w;[l}x p3V؛N`/Z')󛻡f(s>$F& o3IX5VI IUi6 _a9nE4.6J$r 3҃^kOB&{9)5Q@U9mڹ_zZk'auZVDl;zv;=Z|vKhI;7h%A LuIH!(ÇُX*"8w-%NNJC/N?o*=>H9i!YhB4uY]cG!3cppnO3<`c +-X2?\ixzP%%Wj~'ϺN image/svg+xml sqlfluff-3.4.2/plugins/000077500000000000000000000000001503426445100150275ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-plugin-example/000077500000000000000000000000001503426445100215765ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-plugin-example/MANIFEST.in000066400000000000000000000000761503426445100233370ustar00rootroot00000000000000include src/sqlfluff_plugin_example/plugin_default_config.cfg sqlfluff-3.4.2/plugins/sqlfluff-plugin-example/README.md000066400000000000000000000050361503426445100230610ustar00rootroot00000000000000# Example rules plugin This example plugin showcases the ability to setup installable rule plugins. This interface is supported from version `0.4.0` of SQLFluff onwards. For a step by step guide on using custom rule plugins, see the [guide in the docs](https://docs.sqlfluff.com/en/stable/perma/plugin_guide.html), or the more technical docs on [developing plugins](https://docs.sqlfluff.com/en/stable/perma/plugin_dev.html). ## Discovery SQLFluff plugins use [pluggy](https://pluggy.readthedocs.io/en/latest/) to enable plugin discovery. This relies on the python packaging metadata, and therefore your plugin *must be installed as a python package* to be found by SQLFluff. This doesn't mean that you need to make your plugin *public* because you can install from a local path or private git repo (or any other location that you can `pip install` from). See the docs links above for more details. ## Plugin structure This plugin follows the structure we recommend for any custom plugin: * `pyproject.toml` defines all the package metadata, including importantly the `entry_point` configuration which allows SQLFluff to find your plugin once installed. See the [python docs](https://setuptools.pypa.io/en/stable/userguide/entry_point.html) for more detail and examples for `setup.cfg` or `setup.py` if you prefer that format instead. * `MANIFEST.in` defines any *non-python* files to include when the package is installed. This specifies that we should also include the bundled config file for the rule. If you don't specify any new config keys for your rule you don't need this. * `/src/sqlfluff_plugin_example` contains the main source code for the plugin. You should change the name to an appropriate one for your plugin and ensure that it matches the configuration in `pyproject.toml`. Within that folder you should find most of the methods are individually documented so you can understand what does what. * `/test` contains a test suite for this rule. We recommend that you *do* create tests for your rule, and so we include an example `pytest` suite for our example rule here. This folder is not *necessary* for the rule to function, and so the level of test coverage you implement is up to you. The test suite can be invoked by running [pytest](https://docs.pytest.org/en/stable/) on this folder. Great tests for your rules not only ensure consistent functionality over time, but can also be a great tool during initial development and serve as examples of how the rule operates to share with colleagues when rolling out your rule. sqlfluff-3.4.2/plugins/sqlfluff-plugin-example/pyproject.toml000066400000000000000000000007011503426445100245100ustar00rootroot00000000000000[build-system] requires = ["setuptools>=40.8.0", "wheel"] build-backend = "setuptools.build_meta" [project] # Change this name in your plugin, e.g. company name or plugin purpose. name = "sqlfluff-plugin-example" version = "1.0.0" requires-python = ">=3.9" dependencies = [ "sqlfluff>=3.1.0" ] [project.entry-points.sqlfluff] # Change this name in your plugin, e.g. company name or plugin purpose. sqlfluff_example = "sqlfluff_plugin_example" sqlfluff-3.4.2/plugins/sqlfluff-plugin-example/src/000077500000000000000000000000001503426445100223655ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-plugin-example/src/sqlfluff_plugin_example/000077500000000000000000000000001503426445100273005ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-plugin-example/src/sqlfluff_plugin_example/__init__.py000066400000000000000000000037031503426445100314140ustar00rootroot00000000000000"""An example of a custom rule implemented through the plugin system. This uses the rules API supported from 0.4.0 onwards. """ from typing import Any from sqlfluff.core.config import load_config_resource from sqlfluff.core.plugin import hookimpl from sqlfluff.core.rules import BaseRule, ConfigInfo # For backward compatibility we still support importing # rules within the body of the root plugin module. This is included # here for illustration, but also such that support for this import # order can be tested in the test suite (and that the associated # warning is triggered). # See note below in `get_rules()` for more details. # i.e. we DO NOT recommend importing here: from sqlfluff_plugin_example.rules import Rule_Example_L001 # noqa: F401 @hookimpl def get_rules() -> list[type[BaseRule]]: """Get plugin rules. NOTE: It is much better that we only import the rule on demand. The root module of the plugin (i.e. this file which contains all of the hook implementations) should have fully loaded before we try and import the rules. This is partly for performance reasons - but more because the definition of a BaseRule requires that all of the get_configs_info() methods have both been defined _and have run_ before so all the validation information is available for the validation steps in the meta class. """ # i.e. we DO recommend importing here: from sqlfluff_plugin_example.rules import Rule_Example_L001 # noqa: F811 return [Rule_Example_L001] @hookimpl def load_default_config() -> dict[str, Any]: """Loads the default configuration for the plugin.""" return load_config_resource( package="sqlfluff_plugin_example", file_name="plugin_default_config.cfg", ) @hookimpl def get_configs_info() -> dict[str, dict[str, ConfigInfo]]: """Get rule config validations and descriptions.""" return { "forbidden_columns": {"definition": "A list of column to forbid"}, } sqlfluff-3.4.2/plugins/sqlfluff-plugin-example/src/sqlfluff_plugin_example/plugin_default_config.cfg000066400000000000000000000000741503426445100343110ustar00rootroot00000000000000[sqlfluff:rules:Example_L001] forbidden_columns = bar, baaz sqlfluff-3.4.2/plugins/sqlfluff-plugin-example/src/sqlfluff_plugin_example/rules.py000066400000000000000000000030451503426445100310060ustar00rootroot00000000000000"""An example of a custom rule implemented through the plugin system. This uses the rules API supported from 0.4.0 onwards. """ from sqlfluff.core.rules import ( BaseRule, LintResult, RuleContext, ) from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler # These two decorators allow plugins # to be displayed in the sqlfluff docs class Rule_Example_L001(BaseRule): """ORDER BY on these columns is forbidden! **Anti-pattern** Using ``ORDER BY`` one some forbidden columns. .. code-block:: sql SELECT * FROM foo ORDER BY bar, baz **Best practice** Do not order by these columns. .. code-block:: sql SELECT * FROM foo ORDER BY bar """ groups = ("all",) config_keywords = ["forbidden_columns"] crawl_behaviour = SegmentSeekerCrawler({"orderby_clause"}) is_fix_compatible = True def __init__(self, *args, **kwargs): """Overwrite __init__ to set config.""" super().__init__(*args, **kwargs) self.forbidden_columns = [ col.strip() for col in self.forbidden_columns.split(",") ] def _eval(self, context: RuleContext): """We should not ORDER BY forbidden_columns.""" for seg in context.segment.segments: col_name = seg.raw.lower() if col_name in self.forbidden_columns: return LintResult( anchor=seg, description=f"Column `{col_name}` not allowed in ORDER BY.", ) sqlfluff-3.4.2/plugins/sqlfluff-plugin-example/test/000077500000000000000000000000001503426445100225555ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-plugin-example/test/rules/000077500000000000000000000000001503426445100237075ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-plugin-example/test/rules/rule_test_cases_test.py000066400000000000000000000016061503426445100305070ustar00rootroot00000000000000"""Runs the rule test cases.""" import os import pytest from sqlfluff.utils.testing.rules import ( RuleTestCase, load_test_cases, ) ids, test_cases = load_test_cases( test_cases_path=os.path.join( os.path.abspath(os.path.dirname(__file__)), "test_cases", "*.yml" ) ) @pytest.mark.parametrize("test_case", test_cases, ids=ids) def test__rule_test_case(test_case: RuleTestCase): """Evaluate the parameterized yaml test cases. NOTE: The test cases are loaded using `load_test_cases` above and then passed to this test case one by one. This allows fairly detailed rule testing, but defined only in the yaml files without any python overhead required. For examples of what features are available for parametrized rule testing, take a look at some of the test cases defined for the bundled SQLFluff core rules. """ test_case.evaluate() sqlfluff-3.4.2/plugins/sqlfluff-plugin-example/test/rules/test_cases/000077500000000000000000000000001503426445100260445ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-plugin-example/test/rules/test_cases/Rule_Example_L001.yml000066400000000000000000000004711503426445100316470ustar00rootroot00000000000000rule: Example_L001 no_forbidden_col_used: pass_str: | select a, sum(b) from tbl group by a order by a no_order_by_used: pass_str: | select a, b, c from tbl forbidden_col_used: fail_str: | select bar, baz from tbl order by bar sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/000077500000000000000000000000001503426445100214135ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/LICENSE.md000066400000000000000000000020611503426445100230160ustar00rootroot00000000000000MIT License Copyright (c) 2018 Alan Cruickshank Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/README.md000066400000000000000000000005171503426445100226750ustar00rootroot00000000000000# dbt plugin for SQLFluff This plugin works with [SQLFluff](https://pypi.org/project/sqlfluff/), the SQL linter for humans, to correctly parse and compile SQL projects using [dbt](https://pypi.org/project/dbt/). For more details on how to use this plugin, [see the documentation](https://docs.sqlfluff.com/en/stable/perma/dbt.html). sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/pyproject.toml000066400000000000000000000040171503426445100243310ustar00rootroot00000000000000[project] name = "sqlfluff-templater-dbt" version = "3.4.2" description = "Lint your dbt project SQL" readme = {file = "README.md", content-type = "text/markdown"} authors = [ {name = "Alan Cruickshank", email = "alan@designingoverload.com"}, ] license = {file = "LICENSE.md"} classifiers = [ "Development Status :: 5 - Production/Stable", "Environment :: Console", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: Unix", "Operating System :: POSIX", "Operating System :: MacOS", "Operating System :: Microsoft :: Windows", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: Implementation :: CPython", "Topic :: Utilities", "Topic :: Software Development :: Quality Assurance", ] keywords = [ "sqlfluff", "sql", "linter", "formatter", "bigquery", "clickhouse", "databricks", "db2", "duckdb", "exasol", "greenplum", "hive", "materialize", "mysql", "postgres", "redshift", "snowflake", "soql", "sparksql", "sqlite", "teradata", "tsql", "dbt", ] dependencies = [ "sqlfluff==3.4.2", "dbt-core>=1.4.1", "jinja2-simple-tags>=0.3.1", ] [project.urls] Homepage = "https://www.sqlfluff.com" Documentation = "https://docs.sqlfluff.com" Source = "https://github.com/sqlfluff/sqlfluff" Changes = "https://github.com/sqlfluff/sqlfluff/blob/main/CHANGELOG.md" "Issue Tracker" = "https://github.com/sqlfluff/sqlfluff/issues" Twitter = "https://twitter.com/SQLFluff" Chat = "https://github.com/sqlfluff/sqlfluff#sqlfluff-on-slack" [project.entry-points.sqlfluff] sqlfluff_templater_dbt = "sqlfluff_templater_dbt" [tool.setuptools.packages.find] include = ["sqlfluff_templater_dbt"] sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/sqlfluff_templater_dbt/000077500000000000000000000000001503426445100261435ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/sqlfluff_templater_dbt/__init__.py000066400000000000000000000003721503426445100302560ustar00rootroot00000000000000"""Defines the hook endpoints for the dbt templater plugin.""" from sqlfluff.core.plugin import hookimpl from sqlfluff_templater_dbt.templater import DbtTemplater @hookimpl def get_templaters(): """Get templaters.""" return [DbtTemplater] sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/sqlfluff_templater_dbt/templater.py000066400000000000000000001072371503426445100305240ustar00rootroot00000000000000"""Defines the dbt templater. NOTE: The dbt python package adds a significant overhead to import. This module is also loaded on every run of SQLFluff regardless of whether the dbt templater is selected in the configuration. The templater is however only _instantiated_ when selected, and as such, all imports of the dbt libraries are contained within the DbtTemplater class and so are only imported when necessary. """ import logging import os import os.path from collections import deque from contextlib import contextmanager from dataclasses import dataclass from functools import cached_property from typing import ( TYPE_CHECKING, Any, Callable, Deque, Dict, Iterator, Optional, TypeVar, Union, ) from jinja2 import Environment from jinja2_simple_tags import StandaloneTag from sqlfluff.core.errors import SQLFluffSkipFile, SQLFluffUserError, SQLTemplaterError from sqlfluff.core.templaters.base import TemplatedFile, large_file_check from sqlfluff.core.templaters.jinja import JinjaTemplater if TYPE_CHECKING: # pragma: no cover from dbt.semver import VersionSpecifier from sqlfluff.cli.formatters import OutputStreamFormatter from sqlfluff.core import FluffConfig # Instantiate the templater logger templater_logger = logging.getLogger("sqlfluff.templater") @dataclass class DbtConfigArgs: """Arguments to load dbt runtime config.""" project_dir: Optional[str] = None profiles_dir: Optional[str] = None profile: Optional[str] = None target: Optional[str] = None target_path: Optional[str] = None threads: int = 1 single_threaded: bool = False # dict in 1.5.x onwards, json string before. # NOTE: We always set this value when instantiating this # class. If we rely on defaults, this should default to # an empty string pre 1.5.x vars: Optional[Union[Dict, str]] = None # NOTE: The `which` argument here isn't covered in tests, but many # dbt packages assume that it will have been set. # https://github.com/sqlfluff/sqlfluff/issues/4861 # https://github.com/sqlfluff/sqlfluff/issues/4965 which: Optional[str] = "compile" # NOTE: As of dbt 1.8, the following is required to exist. REQUIRE_RESOURCE_NAMES_WITHOUT_SPACES: Optional[bool] = None def is_dbt_exception(exception: Optional[BaseException]) -> bool: """Check whether this looks like a dbt exception.""" # None is not a dbt exception. if not exception: return False return exception.__class__.__module__.startswith("dbt") def _extract_error_detail(exception: BaseException) -> str: """Serialise an exception into a string for reuse in other messages.""" return ( f"{exception.__class__.__module__}.{exception.__class__.__name__}: {exception}" ) T = TypeVar("T") def handle_dbt_errors( error_class: type[Exception], preamble: str ) -> Callable[[Callable[..., T]], Callable[..., T]]: """A decorator to safely catch dbt exceptions and raise native ones. NOTE: This looks and behaves a lot like a context manager, but it's important that it is *not* a context manager so that it can effectively strip the context from handled exceptions. That isn't possible (as far as we've tried) within a context manager. dbt exceptions don't pickle nicely, and python exception context tries very hard to make sure that the exception context of any new exceptions is preserved. This means we have to be quite deliberate in stripping any dbt exceptions, not just those that are directly raised, but those which are present within the `__context__` or `__cause__` attributes of any SQLFluff exceptions. This wrapper aims to do that, catching any dbt exceptions and raising SQLFluff exceptions, and also making sure that any native SQLFluff exceptions which are handled are also stripped of any unwanted dbt exceptions so that we don't cause issues when in multithreaded/multiprocess operation. https://docs.python.org/3/library/exceptions.html#inheriting-from-built-in-exceptions https://github.com/sqlfluff/sqlfluff/issues/6037 """ # noqa E501 def decorator(func: Callable[..., T]) -> Callable[..., T]: def wrapped_method(*args, **kwargs) -> T: # NOTE: `_detail` also acts as a flag to indicate whether an exception # has been raised that we should react to. _detail = "" try: result = func(*args, **kwargs) return result # If we handle any other exception, check for dbt exceptions. We check using # string matching rather than importing the exceptions because the dbt folks # keep changing the names, and we don't really care which one it is, only # whether it's a dbt exception. None of them pickle nicely. except Exception as err: if is_dbt_exception(err): _detail = _extract_error_detail(err) else: # Any other errors are re-raised but only after stripping any # dbt context errors they may have acquired. This includes any # native SQLFluff errors. if is_dbt_exception(err.__context__): err.__context__ = None if is_dbt_exception(err.__cause__): # pragma: no cover # This one seems to be less of an issue in testing, but I'm # keeping it in for completeness. err.__cause__ = None raise err # By raising the new exception outside of the try/except clause we prevent # the link between the new and old exceptions. Otherwise the old one is # likely included in the __context__ attribute of the new one. # Unfortunately the dbt exceptions do not pickle well, so if they were # raised here then they cause all kinds of threading errors during parallel # linting. Python really doesn't likely you trying to remove the `__cause__` # attribute of an exception so this is a mini-hack to sidestep that # behaviour. # Connection errors are handled more specifically (because they're fatal) if "FailedToConnect" in _detail: raise SQLTemplaterError( "dbt tried to connect to the database and failed. Consider " + "running `dbt debug` or `dbt compile` to get more " + "information from dbt. " + _detail, fatal=True, ) # Other errors will use the preamble given to the decorator. raise error_class(preamble + _detail) return wrapped_method return decorator class DbtTemplater(JinjaTemplater): """A templater using dbt.""" name = "dbt" sequential_fail_limit = 3 adapters = {} def __init__(self, override_context: Optional[dict[str, Any]] = None): self.sqlfluff_config = None self.formatter = None self.project_dir = None self.profiles_dir = None self.working_dir = os.getcwd() self.dbt_skip_compilation_error = True super().__init__(override_context=override_context) def config_pairs(self): """Returns info about the given templater for output by the cli.""" return [("templater", self.name), ("dbt", self.dbt_version)] @cached_property def _dbt_version(self) -> "VersionSpecifier": """Fetches the installed dbt version. This is cached in the raw dbt format. NOTE: We do this only on demand to reduce the amount of loading required to discover the templater. """ from dbt.version import get_installed_version return get_installed_version() @cached_property def dbt_version(self): """Gets the dbt version.""" return self._dbt_version.to_version_string() @cached_property def dbt_version_tuple(self): """Gets the dbt version.""" return int(self._dbt_version.major), int(self._dbt_version.minor) def try_silence_dbt_logs(self) -> None: """Attempt to silence dbt logs. During normal operation dbt is likely to log output such as: .. code-block:: 14:13:10 Registered adapter: snowflake=1.6.0 This is emitted by dbt directly to stdout/stderr, and so for us to silence it (e.g. when outputting to json or yaml) we need to reach into the internals of dbt and silence it directly. https://github.com/sqlfluff/sqlfluff/issues/5054 NOTE: We wrap this in a try clause so that if the API changes within dbt that we don't get a direct fail. This was tested on dbt-code==1.6.0. """ # First check whether we need to silence the logs. If a formatter # is present then assume that it's not a problem if not self.formatter: if self.dbt_version_tuple >= (1, 8): from dbt_common.events.event_manager_client import cleanup_event_logger else: from dbt.events.functions import cleanup_event_logger cleanup_event_logger() @cached_property def dbt_config(self): """Loads the dbt config.""" from dbt import flags from dbt.adapters.factory import register_adapter from dbt.config.runtime import RuntimeConfig as DbtRuntimeConfig if self.dbt_version_tuple >= (1, 8): from dbt_common.clients.system import get_env from dbt_common.context import set_invocation_context set_invocation_context(get_env()) # Attempt to silence internal logging at this point. # https://github.com/sqlfluff/sqlfluff/issues/5054 self.try_silence_dbt_logs() if self.dbt_version_tuple >= (1, 5): user_config = None # 1.5.x+ this is a dict. cli_vars = self._get_cli_vars() else: # Here, we read flags.PROFILE_DIR directly, prior to calling # set_from_args(). Apparently, set_from_args() sets PROFILES_DIR # to a lowercase version of the value, and the profile wouldn't be # found if the directory name contained uppercase letters. This fix # was suggested and described here: # https://github.com/sqlfluff/sqlfluff/issues/2253#issuecomment-1018722979 from dbt.config import read_user_config user_config = read_user_config(flags.PROFILES_DIR) # Pre 1.5.x this is a string. cli_vars = str(self._get_cli_vars()) flags.set_from_args( DbtConfigArgs( project_dir=self.project_dir, profiles_dir=self.profiles_dir, profile=self._get_profile(), target_path=self._get_target_path(), vars=cli_vars, threads=1, ), user_config, ) _dbt_config = DbtRuntimeConfig.from_args( DbtConfigArgs( project_dir=self.project_dir, profiles_dir=self.profiles_dir, profile=self._get_profile(), target=self._get_target(), target_path=self._get_target_path(), vars=cli_vars, threads=1, ) ) if self.dbt_version_tuple >= (1, 8): from dbt.mp_context import get_mp_context register_adapter(_dbt_config, get_mp_context()) else: register_adapter(_dbt_config) return _dbt_config @cached_property def dbt_compiler(self): """Loads the dbt compiler.""" from dbt.compilation import Compiler as DbtCompiler return DbtCompiler(self.dbt_config) @cached_property @handle_dbt_errors( SQLFluffUserError, "dbt failed during project compilation. Consider running `dbt debug` " "or `dbt compile` to get more information from dbt. ", ) def dbt_manifest(self): """Loads the dbt manifest.""" # Set dbt not to run tracking. We don't load # a full project and so some tracking routines # may fail. from dbt.tracking import do_not_track do_not_track() # dbt 0.20.* and onward from dbt.parser.manifest import ManifestLoader return ManifestLoader.get_full_manifest(self.dbt_config) @cached_property def dbt_selector_method(self): """Loads the dbt selector method.""" if self.formatter: # pragma: no cover TODO? self.formatter.dispatch_compilation_header( "dbt templater", "Compiling dbt project..." ) from dbt.graph.selector_methods import MethodManager as DbtSelectorMethodManager from dbt.graph.selector_methods import MethodName as DbtMethodName selector_methods_manager = DbtSelectorMethodManager( self.dbt_manifest, previous_state=None ) _dbt_selector_method = selector_methods_manager.get_method( DbtMethodName.Path, method_arguments=[] ) if self.formatter: # pragma: no cover TODO? self.formatter.dispatch_compilation_header( "dbt templater", "Project Compiled." ) return _dbt_selector_method def _get_profiles_dir(self): """Get the dbt profiles directory from the configuration. The default is `~/.dbt` but we use the default_profiles_dir from the dbt library to support a change of default in the future, as well as to support the same overwriting mechanism as dbt (currently an environment variable). """ # Where default_profiles_dir is available, use it. For dbt 1.2 and # earlier, it is not, so fall back to the flags option which should # still be available in those versions. from dbt import flags from dbt.cli.resolvers import default_profiles_dir default_dir = ( default_profiles_dir() if default_profiles_dir is not None else flags.PROFILES_DIR ) dbt_profiles_dir = os.path.abspath( os.path.expanduser( self.sqlfluff_config.get_section( (self.templater_selector, self.name, "profiles_dir") ) or (os.getenv("DBT_PROFILES_DIR") or default_dir) ) ) if not os.path.exists(dbt_profiles_dir): templater_logger.error( f"dbt_profiles_dir: {dbt_profiles_dir} could not be accessed. " "Check it exists." ) return dbt_profiles_dir def _get_project_dir(self): """Get the dbt project directory from the configuration. Defaults to the working directory. """ dbt_project_dir = os.path.abspath( os.path.expanduser( self.sqlfluff_config.get_section( (self.templater_selector, self.name, "project_dir") ) or os.getenv("DBT_PROJECT_DIR") or os.getcwd() ) ) if not os.path.exists(dbt_project_dir): templater_logger.error( f"dbt_project_dir: {dbt_project_dir} could not be accessed. " "Check it exists." ) return dbt_project_dir def _get_profile(self): """Get a dbt profile name from the configuration.""" return self.sqlfluff_config.get_section( (self.templater_selector, self.name, "profile") ) def _get_target(self): """Get a dbt target name from the configuration.""" return self.sqlfluff_config.get_section( (self.templater_selector, self.name, "target") ) def _get_target_path(self): """Get a dbt target path from the configuration.""" return self.sqlfluff_config.get_section( (self.templater_selector, self.name, "target_path") ) def _get_cli_vars(self) -> dict: cli_vars = self.sqlfluff_config.get_section( (self.templater_selector, self.name, "context") ) return cli_vars if cli_vars else {} def _get_dbt_skip_compilation_error(self) -> bool: return self.sqlfluff_config.get( val="dbt_skip_compilation_error", section=(self.templater_selector, self.name), default=True, ) def sequence_files( self, fnames: list[str], config=None, formatter=None ) -> Iterator[str]: """Reorder fnames to process dependent files first. This avoids errors when an ephemeral model is processed before use. """ if formatter: # pragma: no cover formatter.dispatch_compilation_header("dbt templater", "Sorting Nodes...") # Initialise config if not already done self.sqlfluff_config = config if not self.project_dir: self.project_dir = self._get_project_dir() if not self.profiles_dir: self.profiles_dir = self._get_profiles_dir() # Populate full paths for selected files full_paths: dict[str, str] = {} selected_files = set() for fname in fnames: fpath = os.path.join(self.working_dir, fname) full_paths[fpath] = fname selected_files.add(fpath) ephemeral_nodes: dict[str, tuple[str, Any]] = {} # Extract the ephemeral models for key, node in self.dbt_manifest.nodes.items(): if node.config.materialized == "ephemeral": # The key is the full filepath. # The value tuple, with the filepath and a list of dependent keys ephemeral_nodes[key] = ( os.path.join(self.project_dir, node.original_file_path), node.depends_on.nodes, ) # Yield ephemeral nodes first. We use a deque for efficient re-queuing. # We iterate through the deque, yielding any nodes without dependents, # or where those dependents have already yielded, first. The original # mapping is still used to hold the metadata on each key. already_yielded = set() ephemeral_buffer: Deque[str] = deque(ephemeral_nodes.keys()) while ephemeral_buffer: key = ephemeral_buffer.popleft() fpath, dependents = ephemeral_nodes[key] # If it's not in our selection, skip it if fpath not in selected_files: templater_logger.debug("- Purging unselected ephemeral: %r", fpath) # If there are dependent nodes in the set, don't process it yet. elif any( dependent in ephemeral_buffer for dependent in dependents ): # pragma: no cover templater_logger.debug( "- Requeuing ephemeral with dependents: %r", fpath ) # Requeue it for later ephemeral_buffer.append(key) # Otherwise yield it. else: templater_logger.debug("- Yielding Ephemeral: %r", fpath) yield full_paths[fpath] already_yielded.add(full_paths[fpath]) for fname in fnames: if fname not in already_yielded: yield fname # Dedupe here so we don't yield twice already_yielded.add(fname) else: templater_logger.debug( "- Skipping yield of previously sequenced file: %r", fname ) @large_file_check @handle_dbt_errors( SQLTemplaterError, "Error received from dbt during project compilation. " ) def process( self, *, fname: str, in_str: Optional[str] = None, config: Optional["FluffConfig"] = None, formatter: Optional["OutputStreamFormatter"] = None, ) -> tuple[TemplatedFile, list[SQLTemplaterError]]: """Compile a dbt model and return the compiled SQL. Args: fname: Path to dbt model(s) in_str: fname contents using configured encoding config: A specific config to use for this templating operation. Only necessary for some templaters. formatter: Optional object for output. """ # Stash the formatter if provided to use in cached methods. self.formatter = formatter self.sqlfluff_config = config self.project_dir = self._get_project_dir() self.profiles_dir = self._get_profiles_dir() self.dbt_skip_compilation_error = self._get_dbt_skip_compilation_error() fname_absolute_path = os.path.abspath(fname) if fname != "stdin" else fname # NOTE: dbt exceptions are caught and handled safely for pickling by the outer # `handle_dbt_errors` decorator. try: os.chdir(self.project_dir) return self._unsafe_process(fname_absolute_path, in_str, config) finally: os.chdir(self.working_dir) def _find_node(self, fname, config=None): if not config: # pragma: no cover raise ValueError( "For the dbt templater, the `process()` method " "requires a config object." ) if not fname: # pragma: no cover raise ValueError( "For the dbt templater, the `process()` method requires a file name" ) elif fname == "stdin": # pragma: no cover raise SQLFluffUserError( "The dbt templater does not support stdin input, provide a path instead" ) selected = self.dbt_selector_method.search( included_nodes=self.dbt_manifest.nodes, # Selector needs to be a relative path selector=os.path.relpath(fname, start=os.getcwd()), ) results = [self.dbt_manifest.expect(uid) for uid in selected] if not results: skip_reason = self._find_skip_reason(fname) if skip_reason: raise SQLFluffSkipFile( f"Skipped file {fname} because it is {skip_reason}" ) raise SQLFluffSkipFile( "File %s was not found in dbt project" % fname ) # pragma: no cover return results[0] def _find_skip_reason(self, fname) -> Optional[str]: """Return string reason if model okay to skip, otherwise None.""" # Scan macros. abspath = os.path.abspath(fname) for macro in self.dbt_manifest.macros.values(): if os.path.abspath(macro.original_file_path) == abspath: return "a macro" # Scan disabled nodes. for nodes in self.dbt_manifest.disabled.values(): for node in nodes: if os.path.abspath(node.original_file_path) == abspath: return "disabled" return None # pragma: no cover def _unsafe_process(self, fname, in_str=None, config=None): original_file_path = os.path.relpath(fname, start=os.getcwd()) # Below, we monkeypatch Environment.from_string() to intercept when dbt # compiles (i.e. runs Jinja) to expand the "node" corresponding to fname. # We do this to capture the Jinja context at the time of compilation, i.e.: # - Jinja Environment object # - Jinja "globals" dictionary # # This info is captured by the "make_template()" function, which in # turn is used by our parent class' (JinjaTemplater) slice_file() # function. old_from_string = Environment.from_string # Start with render_func undefined. We need to know whether it has been # overwritten. render_func: Optional[Callable[[str], str]] = None if self.dbt_version_tuple >= (1, 3): compiled_sql_attribute = "compiled_code" raw_sql_attribute = "raw_code" else: # pragma: no cover compiled_sql_attribute = "compiled_sql" raw_sql_attribute = "raw_sql" def from_string(*args, **kwargs): """Replaces (via monkeypatch) the jinja2.Environment function.""" nonlocal render_func # Is it processing the node corresponding to fname? globals = kwargs.get("globals") if globals: model = globals.get("model") if model: if model.get("original_file_path") == original_file_path: # Yes. Capture the important arguments and create # a render_func() closure with overwrites the variable # from within _unsafe_process when from_string is run. env = args[0] globals = args[2] if len(args) >= 3 else kwargs["globals"] # Overwrite the outer render_func def render_func(in_str): env.add_extension(SnapshotExtension) template = env.from_string(in_str, globals=globals) if self.dbt_version_tuple >= (1, 8): # dbt 1.8 requires a context for rendering the template. return template.render(globals) return template.render() return old_from_string(*args, **kwargs) # NOTE: We need to inject the project root here in reaction to the # breaking change upstream with dbt. Coverage works in 1.5.2, but # appears to no longer be covered in 1.5.3. # This change was backported and so exists in some versions # but not others. When not present, no additional action is needed. # https://github.com/dbt-labs/dbt-core/pull/7949 # On the 1.5.x branch this was between 1.5.1 and 1.5.2 try: from dbt.task.contextvars import cv_project_root cv_project_root.set(self.project_dir) # pragma: no cover except ImportError: cv_project_root = None # NOTE: _find_node will raise a compilation exception if the project # fails to compile, and we catch that in the outer `.process()` method. node = self._find_node(fname, config) templater_logger.debug( "_find_node for path %r returned object of type %s.", fname, type(node) ) save_ephemeral_nodes = dict( (k, v) for k, v in self.dbt_manifest.nodes.items() if v.config.materialized == "ephemeral" and not getattr(v, "compiled", False) ) if self.dbt_version_tuple >= (1, 8): from dbt_common.exceptions import UndefinedMacroError else: from dbt.exceptions import UndefinedMacroError with self.connection(): # Apply the monkeypatch. Environment.from_string = from_string try: node = self.dbt_compiler.compile_node( node=node, manifest=self.dbt_manifest, ) except UndefinedMacroError as err: # The explanation on the undefined macro error is already fairly # explanatory, so just pass it straight through. raise SQLTemplaterError(str(err)) except Exception as err: # This happens if there's a fatal error at compile time. That # can sometimes happen for SQLFluff related reasons (it used # to happen if we tried to compile ephemeral models in the # wrong order), but more often because a macro tries to query # a table at compile time which doesn't exist. if self.dbt_skip_compilation_error is False: raise SQLTemplaterError(str(err)) raise SQLFluffSkipFile( f"Skipped file {fname} because dbt raised a fatal " f"exception during compilation: {err!s}" ) # NOTE: We don't do a `raise ... from err` here because the # full trace is not useful for most users. In debugging # issues here it may be valuable to add the `from err` part # after the above `raise` statement. finally: # Undo the monkeypatch. Environment.from_string = old_from_string if hasattr(node, "injected_sql"): # If injected SQL is present, it contains a better picture # of what will actually hit the database (e.g. with tests). # However it's not always present. compiled_sql = node.injected_sql # pragma: no cover else: compiled_sql = getattr(node, compiled_sql_attribute) raw_sql = getattr(node, raw_sql_attribute) if not compiled_sql: # pragma: no cover raise SQLTemplaterError( "dbt templater compilation failed silently, check your " "configuration by running `dbt compile` directly." ) source_dbt_sql = in_str if not source_dbt_sql.rstrip().endswith("-%}"): n_trailing_newlines = len(source_dbt_sql) - len( source_dbt_sql.rstrip("\n") ) else: # Source file ends with right whitespace stripping, so there's # no need to preserve/restore trailing newlines, as they would # have been removed regardless of dbt's # keep_trailing_newlines=False behavior. n_trailing_newlines = 0 templater_logger.debug( " Trailing newline count in source dbt model: %r", n_trailing_newlines, ) templater_logger.debug(" Raw SQL before compile: %r", source_dbt_sql) templater_logger.debug(" Node raw SQL: %r", raw_sql) templater_logger.debug(" Node compiled SQL: %r", compiled_sql) # When using dbt-templater, trailing newlines are ALWAYS REMOVED during # compiling. Unless fixed (like below), this will cause: # 1. Assertion errors in TemplatedFile, when it sanity checks the # contents of the sliced_file array. # 2. LT12 linting errors when running "sqlfluff lint foo_bar.sql" # since the linter will use the compiled code with the newlines # removed. # 3. "No newline at end of file" warnings in Git/GitHub since # sqlfluff uses the compiled SQL to write fixes back to the # source SQL in the dbt model. # # The solution is (note that both the raw and compiled files have # had trailing newline(s) removed by the dbt-templater. # 1. Check for trailing newlines before compiling by looking at the # raw SQL in the source dbt file. Remember the count of trailing # newlines. # 2. Set node.raw_sql/node.raw_code to the original source file contents. # 3. Append the count from #1 above to compiled_sql. (In # production, slice_file() does not usually use this string, # but some test scenarios do. setattr(node, raw_sql_attribute, source_dbt_sql) # So for files that have no templated elements in them, render_func # will still be null at this point. If so, we replace it with a lambda # which just directly returns the input , but _also_ reset the trailing # newlines counter because they also won't have been stripped. if render_func is None: # NOTE: In this case, we shouldn't re-add newlines, because they # were never taken away. n_trailing_newlines = 0 # Overwrite the render_func placeholder. def render_func(in_str): """A render function which just returns the input.""" return in_str # At this point assert that we _have_ a render_func assert render_func is not None # TRICKY: dbt configures Jinja2 with keep_trailing_newline=False. # As documented (https://jinja.palletsprojects.com/en/3.0.x/api/), # this flag's behavior is: "Preserve the trailing newline when # rendering templates. The default is False, which causes a single # newline, if present, to be stripped from the end of the template." # # Below, we use "append_to_templated" to effectively "undo" this. raw_sliced, sliced_file, templated_sql = self.slice_file( source_dbt_sql, render_func=render_func, config=config, append_to_templated="\n" if n_trailing_newlines else "", ) # :HACK: If calling compile_node() compiled any ephemeral nodes, # restore them to their earlier state. This prevents a runtime error # in the dbt "_inject_ctes_into_sql()" function that occurs with # 2nd-level ephemeral model dependencies (e.g. A -> B -> C, where # both B and C are ephemeral). Perhaps there is a better way to do # this, but this seems good enough for now. for k, v in save_ephemeral_nodes.items(): if getattr(self.dbt_manifest.nodes[k], "compiled", False): self.dbt_manifest.nodes[k] = v return ( TemplatedFile( source_str=source_dbt_sql, templated_str=templated_sql, fname=fname, sliced_file=sliced_file, raw_sliced=raw_sliced, ), # No violations returned in this way. [], ) @contextmanager def connection(self): """Context manager that manages a dbt connection, if needed.""" from dbt.adapters.factory import get_adapter # We have to register the connection in dbt >= 1.0.0 ourselves # In previous versions, we relied on the functionality removed in # https://github.com/dbt-labs/dbt-core/pull/4062. adapter = self.adapters.get(self.project_dir) if adapter is None: adapter = get_adapter(self.dbt_config) self.adapters[self.project_dir] = adapter adapter.acquire_connection("master") if self.dbt_version_tuple >= (1, 8): # See notes from https://github.com/dbt-labs/dbt-adapters/discussions/87 # about the decoupling of the adapters from core. from dbt.context.providers import generate_runtime_macro_context adapter.set_macro_resolver(self.dbt_manifest) adapter.set_macro_context_generator(generate_runtime_macro_context) adapter.set_relations_cache(self.dbt_manifest.nodes.values()) else: adapter.set_relations_cache(self.dbt_manifest) yield # :TRICKY: Once connected, we never disconnect. Making multiple # connections during linting has proven to cause major performance # issues. class SnapshotExtension(StandaloneTag): """Dummy "snapshot" tags so raw dbt templates will parse. Context: dbt snapshots (https://docs.getdbt.com/docs/building-a-dbt-project/snapshots/#example) use custom Jinja "snapshot" and "endsnapshot" tags. However, dbt does not actually register those tags with Jinja. Instead, it finds and removes these tags during a preprocessing step. However, DbtTemplater needs those tags to actually parse, because JinjaTracer creates and uses Jinja to process another template similar to the original one. """ tags = {"snapshot", "endsnapshot"} def render(self, format_string=None): """Dummy method that renders the tag.""" return "" sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/000077500000000000000000000000001503426445100223725ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/.gitignore000066400000000000000000000001041503426445100243550ustar00rootroot00000000000000# Ignore the temporary folder for the dbt project temp_dbt_project/ sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/__init__.py000066400000000000000000000000311503426445100244750ustar00rootroot00000000000000"""Init PY for tests.""" sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/conftest.py000066400000000000000000000053471503426445100246020ustar00rootroot00000000000000"""pytest fixtures.""" import os import shutil import subprocess from importlib import metadata from pathlib import Path import pytest from sqlfluff.core import FluffConfig from sqlfluff_templater_dbt.templater import DbtTemplater def pytest_report_header() -> list[str]: """Return a list of strings to be displayed in the header of the report.""" return [ f"dbt-core: {metadata.version('dbt-core')}", f"dbt-postgres: {metadata.version('dbt-postgres')}", ] @pytest.fixture(scope="session", autouse=True) def dbt_flags(): """Set dbt flags for dbt templater tests.""" # Setting this to True disables some code in dbt-core that randomly runs # some test code in core/dbt/parser/models.py, ModelParser. render_update(). # We've seen occasional runtime errors from that code: # TypeError: cannot pickle '_thread.RLock' object os.environ["DBT_USE_EXPERIMENTAL_PARSER"] = "True" @pytest.fixture() def dbt_fluff_config(dbt_project_folder): """Returns SQLFluff dbt configuration dictionary.""" return { "core": { "templater": "dbt", "dialect": "postgres", }, "templater": { "dbt": { "profiles_dir": f"{dbt_project_folder}/profiles_yml", "project_dir": f"{dbt_project_folder}/dbt_project", }, }, } @pytest.fixture() def project_dir(dbt_fluff_config): """Returns the dbt project directory.""" return dbt_fluff_config["templater"]["dbt"]["project_dir"] @pytest.fixture() def profiles_dir(dbt_fluff_config): """Returns the dbt project directory.""" return dbt_fluff_config["templater"]["dbt"]["profiles_dir"] @pytest.fixture() def dbt_templater(): """Returns an instance of the DbtTemplater.""" return FluffConfig( overrides={"dialect": "ansi", "templater": "dbt"} ).get_templater() @pytest.fixture(scope="session") def dbt_project_folder(): """Fixture for a temporary dbt project directory.""" src = Path("plugins/sqlfluff-templater-dbt/test/fixtures/dbt") tmp = Path("plugins/sqlfluff-templater-dbt/test/temp_dbt_project") tmp.mkdir(exist_ok=True) shutil.copytree(src, tmp, dirs_exist_ok=True) if DbtTemplater().dbt_version_tuple >= (1, 8): # Configuration overrides for dbt 1.8+ dbt180_fixtures = src.with_name("dbt180") shutil.copytree(dbt180_fixtures, tmp, dirs_exist_ok=True) subprocess.Popen( [ "dbt", "deps", "--project-dir", f"{tmp}/dbt_project", "--profiles-dir", f"{tmp}/profiles_yml", ] ).wait(120) # Placeholder value for testing os.environ["passed_through_env"] = "_" yield tmp shutil.rmtree(tmp) sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/000077500000000000000000000000001503426445100242435ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/000077500000000000000000000000001503426445100250145ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/.sqlfluff000066400000000000000000000002251503426445100266360ustar00rootroot00000000000000[sqlfluff] templater = dbt dialect = postgres # exclude_rules = LT12 [sqlfluff:templater:dbt] profiles_dir = profiles_yml project_dir = dbt_project sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/000077500000000000000000000000001503426445100273135ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/.gitignore000066400000000000000000000001041503426445100312760ustar00rootroot00000000000000target/ # dbt <1.0.0 dbt_modules/ # dbt >=1.0.0 dbt_packages/ logs/ sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/.sqlfluffignore000066400000000000000000000000761503426445100323450ustar00rootroot00000000000000# dbt <1.0.0 dbt_modules/ # dbt >=1.0.0 dbt_packages/ target/ sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/dbt_project.yml000066400000000000000000000005141503426445100323350ustar00rootroot00000000000000name: 'my_new_project' version: '1.0.0' config-version: 2 profile: 'default' test-paths: ["tests"] models: my_new_project: materialized: view vars: my_new_project: # Default date stamp of run ds: "2020-01-01" # passed_through_cli: testing for vars passed through cli('--vars' option) rather than dbt_project sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/macros/000077500000000000000000000000001503426445100305775ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/macros/echo.sql000066400000000000000000000000651503426445100322370ustar00rootroot00000000000000{% macro echo(colname) %} {{colname}} {% endmacro %} my_default_config.sql000066400000000000000000000001471503426445100347210ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/macros-- Issue #335 {% macro my_default_config(type) %} {{ config(materialized="view") }} {% endmacro %} sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/macros/my_headers.sql000066400000000000000000000001621503426445100334370ustar00rootroot00000000000000-- Issue #516 {% macro my_headers() %} -- Materialization: {{ config.get('materialization') }} {% endmacro %} sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/000077500000000000000000000000001503426445100305765ustar00rootroot00000000000000depends_on_ephemeral/000077500000000000000000000000001503426445100346575ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/modelsa.sql000066400000000000000000000000641503426445100356200ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/depends_on_ephemeralselect 1 from {{ source('jaffle_shop', 'orders') }} b.sql000066400000000000000000000000521503426445100356160ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/depends_on_ephemeralselect * from {{ ref('c') }} where id = 1 c.sql000066400000000000000000000000671503426445100356250ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/depends_on_ephemeral{{ config(materialized='ephemeral') }} select 1 as id d.sql000066400000000000000000000000221503426445100356150ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/depends_on_ephemeralSELECT 1 FROM bar ephemeral_3_level/000077500000000000000000000000001503426445100340725ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/modelsfact_product_contract_values.sql000066400000000000000000000001561503426445100425460ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/ephemeral_3_level{{ config( materialized='ephemeral', ) }} SELECT * FROM {{ ref('stg_creditview_products') }} stg_creditview_products.sql000066400000000000000000000001651503426445100415620ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/ephemeral_3_level{{ config( materialized='ephemeral', ) }} SELECT * FROM {{ ref('stg_max_product_contract_seats') }} stg_max_product_contract_seats.sql000066400000000000000000000002261503426445100431110ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/ephemeral_3_level{{ config( materialized='ephemeral', ) }} SELECT 'Zaphod Breedlebrox' as numero_uno, 'Ford Prefect' as two, 'Vogon poetry' as trois sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project/000077500000000000000000000000001503426445100336225ustar00rootroot00000000000000AM03_test.sql000066400000000000000000000001251503426445100357610ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_projectselect birth_date, name from cows order by birth_date asc, name desc ST06_test.sql000066400000000000000000000003111503426445100360120ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project-- ST06 should ignore this as one of the select targets uses a macro select {{ dbt_utils.surrogate_key(['spots', 'moos']) }} as spot_moo_id, date(birth_date) as birth_date, name from cows access_graph_nodes.sql000066400000000000000000000004371503426445100401020ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project-- Verify that the dbt context variable graph is accessible {% set graph_node = graph.nodes.values() | selectattr('name', 'equalto', 'fact_product_contract_values') | first -%} {%- set num_parents = graph_node.depends_on.nodes | length -%} select {{ num_parents }} as number_of_parents call_statement.sql000066400000000000000000000002761503426445100372700ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project{% call statement('unique_keys', fetch_result=True) %} select 'tests' as key_name {% endcall %} {% set unique_keys = load_result('unique_keys') %} select 1, '{{ unique_keys.data[0][0] }}' disabled_model.sql000066400000000000000000000000461503426445100372130ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project{{ config(enabled=false) }} select 1 ends_with_whitespace_stripping.sql000066400000000000000000000001221503426445100425560ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_projectselect 1 {% if target.database == 'test' -%} union all select 2 {%- endif -%} incremental.sql000066400000000000000000000011521503426445100365640ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project-- https://github.com/sqlfluff/sqlfluff/issues/780 {{ config( materialized = 'incremental', unique_key='product_id' ) }} select {#- Attributes #} products.product_id, products.valid_date_local, products._fivetran_deleted, dispensaries.id from products inner join dispensaries on products.dispensary_id = dispensaries.dispensary_id and products.valid_date_local = dispensaries.valid_date_local where not products._fivetran_deleted {% if is_incremental() -%} and products.valid_date_local >= ( select max(valid_date_local) from {{ this }}) {% endif %} indent_loop_4.sql000066400000000000000000000003761503426445100370270ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project-- This file tests the indentation of line 6, which isn't rendered on the last pass. select a, {%- for i in range(1, 3) -%} 1 as b_{{ i }} {% if not loop.last %},{% endif %} {% endfor %} from {{ source("jaffle_shop", "orders") }} indent_loop_8.sql000066400000000000000000000004021503426445100370210ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project-- This file tests the indentation of line 6, which isn't rendered on the last pass. select a, {%- for i in range(1, 3) -%} 1 as b_{{ i }} {% if not loop.last %},{% endif %} {% endfor %} from {{ source("jaffle_shop", "orders") }} issue_1608.sql000066400000000000000000000003411503426445100360700ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project{{ config(materialized='view') }} with cte_example as ( select 1 as col_name ), final as ( select col_name, {{- echo('col_name') -}} as col_name2 from cte_example ) select * from final issue_1608.sql.after000066400000000000000000000003401503426445100371670ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project{{ config(materialized='view') }} with cte_example as ( select 1 as col_name ), final as ( select col_name, {{- echo('col_name') -}} as col_name2 from cte_example ) select * from final last_day.sql000066400000000000000000000001661503426445100360670ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_projectwith last_day_macro as ( select {{ dbt_utils.last_day('2021-11-05', 'month') }} ) select * from last_day_macro macro_in_macro.sql000066400000000000000000000002141503426445100372310ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project-- Issue #335 {{ my_default_config("table") }} with source_data as ( select "hello_world" as hello_world ) select * from source_data multiple_trailing_newline.sql000066400000000000000000000000271503426445100415300ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_projectselect a from table_a operator_errors.sql000066400000000000000000000000331503426445100375070ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_projectSELECT 1+2 FROM some_table select_distinct_group_by.sql000066400000000000000000000001151503426445100413470ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_projectselect distinct a, b, c from table_a {{ dbt_utils.group_by(3) }} single_trailing_newline.sql000066400000000000000000000000261503426445100411550ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_projectselect a from table_a src_jaffle_shop.yml000066400000000000000000000001441503426445100374140ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_projectversion: 2 sources: - name: jaffle_shop tables: - name: orders - name: customers templated_inside_comment.sql000066400000000000000000000000331503426445100413140ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project{# {{ 1 + 2 }} #} select 1 trailing_newlines.sql000066400000000000000000000000341503426445100377760ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project{{ "" }} select 1 use_dbt_utils.sql000066400000000000000000000007411503426445100371330ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project-- get_query_results_as_dict() verifies SQLFluff can successfully use dbt_utils -- functions that require a database connection. -- https://github.com/sqlfluff/sqlfluff/issues/2297 {% set saved_var = dbt_utils.get_query_results_as_dict( "SELECT schema_name FROM information_schema.schemata" ) %} with orders as ( select * from {{ source("jaffle_shop", "orders") }} ) select a, b, c, count(*) as occurrences from orders {{ dbt_utils.group_by(3) }} use_headers.sql000066400000000000000000000001201503426445100365440ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project{{ config(materialization="view") }} {{ my_headers() }} select * from table_a use_var.sql000066400000000000000000000001011503426445100357200ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project-- Issue #333 select * from table_a where ds = '{{ var("ds") }}' utf8/000077500000000000000000000000001503426445100344315ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project.sqlfluff000066400000000000000000000000701503426445100362510ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project/utf8[sqlfluff] dialect = ansi rules = CP01 encoding = utf-8 test.sql000066400000000000000000000002111503426445100361230ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project/utf8{{ config(materialized='table') }} SELECT FIRST_COLUMN, SECOND_COLUMN FROM TABLE_TO_TEST where TYPE_OF_TEST = 'TESTING ÅÄÖ' test.sql.fixed000066400000000000000000000002111503426445100372210ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project/utf8{{ config(materialized='table') }} SELECT FIRST_COLUMN, SECOND_COLUMN FROM TABLE_TO_TEST WHERE TYPE_OF_TEST = 'TESTING ÅÄÖ' sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/vars_from_cli.sql000066400000000000000000000000661503426445100341460ustar00rootroot00000000000000-- Issue #1262 SELECT {{ var('passed_through_cli') }} sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/vars_from_env.sql000066400000000000000000000000531503426445100341630ustar00rootroot00000000000000SELECT {{ env_var('passed_through_env') }} sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/package-lock.yml000066400000000000000000000001611503426445100323550ustar00rootroot00000000000000packages: - package: dbt-labs/dbt_utils version: 0.8.0 sha1_hash: 7d83bffde03c3384a3a5621255309bd45e1bd150 sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/packages.yml000066400000000000000000000003331503426445100316130ustar00rootroot00000000000000packages: # Reference: dbt_utils compatibility matrix: # https://docs.google.com/spreadsheets/d/1RoDdC69auAtrwiqmkRsgcFdZ3MdNpeKcJrWkmEpXVIs/edit#gid=0 - package: dbt-labs/dbt_utils version: ["0.8.0"] sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/snapshots/000077500000000000000000000000001503426445100313355ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/snapshots/issue_1771.sql000066400000000000000000000003461503426445100336700ustar00rootroot00000000000000{% snapshot dim_aggregated_brand_hierarchy_snapshot %} {{ config( strategy='check', unique_key='c1', target_schema='snapshots', check_cols='all' ) }} select c1 from foo {% endsnapshot %} issue_1771.sql.after000066400000000000000000000003561503426445100347120ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/snapshots{% snapshot dim_aggregated_brand_hierarchy_snapshot %} {{ config( strategy='check', unique_key='c1', target_schema='snapshots', check_cols='all' ) }} select c1 from foo {% endsnapshot %} sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/tests/000077500000000000000000000000001503426445100304555ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/tests/test.sql000066400000000000000000000000261503426445100321530ustar00rootroot00000000000000select a from table_a sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/error_models/000077500000000000000000000000001503426445100275105ustar00rootroot00000000000000compile_missing_table.sql000066400000000000000000000003111503426445100344750ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/error_models-- This Query triggers an exception at compilation time because it runs -- a query *at compile time*, which will fail. {% set results = run_query('select 1 from this_table_does_not_exist') %} select 1 sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/error_models/compiler_error.sql000066400000000000000000000001361503426445100332540ustar00rootroot00000000000000{% set cols = ["a", "b", "b"] %} select {% for col in cols %} {{ col }} from table_a exception_connect_database.sql000066400000000000000000000003141503426445100355030ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/error_models select {%- for col in dbt_utils.get_column_values( table=ref("select_distinct_group_by"), column="ids" ) %} {{ col }}{{ "," if not loop.last }} {%- endfor %} from table_a sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/error_models/unknown_macro.sql000066400000000000000000000002001503426445100331010ustar00rootroot00000000000000-- Refer to a macro which doesn't exist -- https://github.com/sqlfluff/sqlfluff/issues/3849 select * from {{ invalid_macro() }} sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/error_models/unknown_ref.sql000066400000000000000000000002111503426445100325560ustar00rootroot00000000000000-- Refer to a relation which doesn't exist -- https://github.com/sqlfluff/sqlfluff/issues/3849 select * from {{ ref("i_do_not_exist") }} sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/profiles_yml/000077500000000000000000000000001503426445100275205ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/profiles_yml/profiles.yml000066400000000000000000000004421503426445100320660ustar00rootroot00000000000000default: target: dev outputs: dev: type: postgres host: "{{ env_var('POSTGRES_HOST', 'localhost') }}" user: postgres pass: password port: 5432 dbname: postgres schema: dbt_alice threads: 4 config: send_anonymous_usage_stats: false sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/profiles_yml_fail/000077500000000000000000000000001503426445100305135ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/profiles_yml_fail/profiles.yml000066400000000000000000000003761503426445100330670ustar00rootroot00000000000000default: target: dev outputs: dev: type: postgres host: localhost user: postgres pass: password port: 2345 dbname: postgres schema: dbt_alice threads: 4 config: send_anonymous_usage_stats: false sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output/000077500000000000000000000000001503426445100304135ustar00rootroot00000000000000access_graph_nodes.sql000066400000000000000000000001321503426445100346630ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output-- Verify that the dbt context variable graph is accessible select 1 as number_of_parents sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output/call_statement.sql000066400000000000000000000000241503426445100341270ustar00rootroot00000000000000 select 1, 'tests' sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output/dbt_utils_0.8.0/000077500000000000000000000000001503426445100331275ustar00rootroot00000000000000last_day.sql000066400000000000000000000003301503426445100353650ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output/dbt_utils_0.8.0with last_day_macro as ( select cast( date_trunc('month', 2021-11-05) + ((interval '1 month') * (1)) + ((interval '1 day') * (-1)) as date) ) select * from last_day_macro ends_with_whitespace_stripping.sql000066400000000000000000000000111503426445100373440ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_outputselect 1 sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output/last_day.sql000066400000000000000000000003441503426445100327350ustar00rootroot00000000000000with last_day_macro as ( select cast( date_trunc('month', 2021-11-05) + ((interval '1 month') * (1)) + ((interval '1 day') * (-1)) as date) ) select * from last_day_macro sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output/macro_in_macro.sql000066400000000000000000000001621503426445100341030ustar00rootroot00000000000000-- Issue #335 with source_data as ( select "hello_world" as hello_world ) select * from source_data templated_inside_comment.sql000066400000000000000000000000121503426445100361020ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output select 1 trailing_newlines.sql000066400000000000000000000000241503426445100345660ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output select 1 sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output/use_dbt_utils.sql000066400000000000000000000005261503426445100340040ustar00rootroot00000000000000-- get_query_results_as_dict() verifies SQLFluff can successfully use dbt_utils -- functions that require a database connection. -- https://github.com/sqlfluff/sqlfluff/issues/2297 with orders as ( select * from "postgres"."jaffle_shop"."orders" ) select a, b, c, count(*) as occurrences from orders group by 1,2,3 sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output/use_headers.sql000066400000000000000000000000701503426445100334200ustar00rootroot00000000000000 -- Materialization: view select * from table_a sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output/use_var.sql000066400000000000000000000000741503426445100326010ustar00rootroot00000000000000-- Issue #333 select * from table_a where ds = '2020-01-01' sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/test.sql000066400000000000000000000001411503426445100265100ustar00rootroot00000000000000 with dbt__CTE__INTERNAL_test as ( select * from a )select count(*) from dbt__CTE__INTERNAL_test sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt180/000077500000000000000000000000001503426445100252455ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt180/dbt_project.yml000066400000000000000000000005701503426445100302710ustar00rootroot00000000000000name: 'my_new_project' version: '1.0.0' config-version: 2 profile: 'default' test-paths: ["tests"] models: my_new_project: materialized: view vars: my_new_project: # Default date stamp of run ds: "2020-01-01" # passed_through_cli: testing for vars passed through cli('--vars' option) rather than dbt_project flags: send_anonymous_usage_stats: false sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt180/profiles_yml/000077500000000000000000000000001503426445100277515ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt180/profiles_yml/profiles.yml000066400000000000000000000003651503426445100323230ustar00rootroot00000000000000default: target: dev outputs: dev: type: postgres host: "{{ env_var('POSTGRES_HOST', 'localhost') }}" user: postgres pass: password port: 5432 dbname: postgres schema: dbt_alice threads: 4 sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt180/profiles_yml_fail/000077500000000000000000000000001503426445100307445ustar00rootroot00000000000000sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/fixtures/dbt180/profiles_yml_fail/profiles.yml000066400000000000000000000003211503426445100333060ustar00rootroot00000000000000default: target: dev outputs: dev: type: postgres host: localhost user: postgres pass: password port: 2345 dbname: postgres schema: dbt_alice threads: 4 sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/linter_test.py000066400000000000000000000065621503426445100253110ustar00rootroot00000000000000"""The Test file for the linter class.""" import os import os.path import shutil import pytest from sqlfluff.cli.commands import lint from sqlfluff.core import FluffConfig, Linter from sqlfluff.utils.testing.cli import invoke_assert_code @pytest.mark.parametrize( "path", ["models/my_new_project/disabled_model.sql", "macros/echo.sql"] ) def test__linter__skip_file(path, project_dir, dbt_fluff_config): # noqa """Test that the linter skips disabled dbt models and macros.""" conf = FluffConfig(configs=dbt_fluff_config) lntr = Linter(config=conf) model_file_path = os.path.join(project_dir, path) linted_path = lntr.lint_path(path=model_file_path) # Check that the file is still there assert len(linted_path.files) == 1 linted_file = linted_path.files[0] # Normalise paths to control for OS variance assert os.path.normpath(linted_file.path) == os.path.normpath(model_file_path) assert not linted_file.templated_file assert not linted_file.tree def test__linter__lint_ephemeral_3_level(project_dir, dbt_fluff_config): """Test linter can lint a project with 3-level ephemeral dependencies.""" # This was previously crashing inside dbt, in a function named # inject_ctes_into_sql(). (issue 2671). conf = FluffConfig(configs=dbt_fluff_config) lntr = Linter(config=conf) model_file_path = os.path.join(project_dir, "models/ephemeral_3_level") lntr.lint_path(path=model_file_path) def test__linter__config_pairs(dbt_fluff_config): # noqa """Test that the dbt templater returns version information in it's config.""" conf = FluffConfig(configs=dbt_fluff_config) lntr = Linter(config=conf) # NOTE: This method is called within the config readout. assert lntr.templater.config_pairs() == [ ("templater", "dbt"), ("dbt", lntr.templater.dbt_version), ] def test_dbt_target_dir(tmpdir, dbt_project_folder, profiles_dir): """Test with dbt project in subdir that target/ is created in the correct place. https://github.com/sqlfluff/sqlfluff/issues/2895 """ tmp_base_dir = str(tmpdir) tmp_dbt_dir = os.path.join(tmp_base_dir, "dir1", "dir2", "dbt") tmp_project_dir = os.path.join(tmp_dbt_dir, "dbt_project") os.makedirs(os.path.dirname(tmp_dbt_dir)) shutil.copytree( dbt_project_folder, tmp_dbt_dir, ) os.unlink(os.path.join(tmp_dbt_dir, ".sqlfluff")) old_cwd = os.getcwd() # Invoke SQLFluff from <>, linting a file in the dbt project at # <>/dir1/dir2/dbt/dbt_project. Prior to the bug fix, a # "target" directory would incorrectly be created in <>. # (It should be created in <>/dir1/dir2/dbt/dbt_project.) os.chdir(tmp_base_dir) with open(".sqlfluff", "w") as f: print( f"""[sqlfluff] templater = dbt dialect = postgres [sqlfluff:templater:dbt] project_dir = {tmp_project_dir} profiles_dir = {old_cwd}/{profiles_dir} """, file=f, ) try: invoke_assert_code( ret_code=0, args=[ lint, [ "dir1/dir2/dbt/dbt_project/models/my_new_project/use_dbt_utils.sql", ], ], ) assert not os.path.exists("target") assert os.path.exists("dir1/dir2/dbt/dbt_project/target") finally: os.chdir(old_cwd) sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/rules_test.py000066400000000000000000000101601503426445100251330ustar00rootroot00000000000000"""Tests for the standard set of rules.""" import os import os.path from pathlib import Path import pytest from sqlfluff.core import Linter from sqlfluff.core.config import FluffConfig from sqlfluff.utils.testing.rules import assert_rule_raises_violations_in_file @pytest.mark.parametrize( "rule,path,violations", [ # Group By ("AM01", "models/my_new_project/select_distinct_group_by.sql", [(1, 8)]), # Multiple trailing newline ("LT12", "models/my_new_project/multiple_trailing_newline.sql", [(3, 1)]), ], ) def test__rules__std_file_dbt( rule, path, violations, project_dir, dbt_fluff_config ): # noqa """Test linter finds the given errors in (and only in) the right places (DBT).""" assert_rule_raises_violations_in_file( rule=rule, fpath=os.path.join(project_dir, path), violations=violations, fluff_config=FluffConfig(configs=dbt_fluff_config, overrides=dict(rules=rule)), ) def test__rules__fix_utf8(project_dir, dbt_fluff_config): # noqa """Verify that non-ASCII characters are preserved by 'fix'.""" rule = "CP01" path = "models/my_new_project/utf8/test.sql" linter = Linter( config=FluffConfig(configs=dbt_fluff_config, overrides=dict(rules=rule)) ) result = linter.lint_path(os.path.join(project_dir, path), fix=True) # Check that we did actually find issues. # NOTE: This test is mostly useful to distinguish between whether there's # a problem with the rule - or a problem with the file. record_map = {record["filepath"]: record for record in result.as_records()} print("Result Map: ", record_map) qual_path = os.path.normpath(Path(project_dir) / path) assert qual_path in record_map, f"{path} not in result." assert record_map[qual_path]["violations"], f"No issues found for {qual_path}." result.persist_changes(fixed_file_suffix="FIXED") # TODO: Check contents of file: # ./plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/ # my_new_project/utf8/testFIXED.sql # Against a git file, similar to the autofix tests fixed_path = Path(project_dir) / "models/my_new_project/utf8/testFIXED.sql" cmp_filepath = Path(project_dir) / "models/my_new_project/utf8/test.sql.fixed" fixed_buff = fixed_path.read_text("utf8") comp_buff = cmp_filepath.read_text("utf8") # Assert that we fixed as expected assert fixed_buff == comp_buff os.unlink(fixed_path) def test__rules__order_by(project_dir, dbt_fluff_config): # noqa """Verify that rule AM03 works with dbt.""" rule = "AM03" path = "models/my_new_project/AM03_test.sql" lntr = Linter( config=FluffConfig(configs=dbt_fluff_config, overrides=dict(rules=rule)) ) lnt = lntr.lint_path(os.path.join(project_dir, path)) violations = lnt.check_tuples() assert len(violations) == 0 def test__rules__indent_oscillate(project_dir, dbt_fluff_config): # noqa """Verify that we don't get oscillations with LT02 and dbt.""" # This *should* be the wrong format path_1 = "models/my_new_project/indent_loop_4.sql" # This *should* be the correct format path_2 = "models/my_new_project/indent_loop_8.sql" # Get the content of the latter with open(os.path.join(project_dir, path_2), "r") as f: path_2_content = f.read() linter = Linter( config=FluffConfig(configs=dbt_fluff_config, overrides={"rules": "LT02"}) ) # Check the wrong one first (path_1) linted_dir = linter.lint_path(os.path.join(project_dir, path_1), fix=True) linted_file = linted_dir.files[0] assert linted_file.check_tuples() == [("LT02", 6, 1)] fixed_file_1, _ = linted_file.fix_string() assert ( fixed_file_1 == path_2_content ), "indent_loop_4.sql should match indent_loop_8.sql post fix" # Check the correct one second, we shouldn't get any issues. # NOTE: This also checks that the fixed version of the first one wouldn't # change again. linted_dir = linter.lint_path(os.path.join(project_dir, path_2), fix=True) linted_file = linted_dir.files[0] assert linted_file.check_tuples() == [] # Should find no issues. sqlfluff-3.4.2/plugins/sqlfluff-templater-dbt/test/templater_test.py000066400000000000000000000573661503426445100260210ustar00rootroot00000000000000"""Tests for the dbt templater.""" import glob import json import logging import os import pickle import shutil import subprocess from copy import deepcopy from pathlib import Path from unittest import mock import pytest from sqlfluff.cli.commands import lint from sqlfluff.core import FluffConfig, Lexer, Linter from sqlfluff.core.errors import SQLFluffSkipFile, SQLFluffUserError, SQLTemplaterError from sqlfluff.utils.testing.cli import invoke_assert_code from sqlfluff.utils.testing.logging import fluff_log_catcher from sqlfluff_templater_dbt.templater import DbtTemplater def test__templater_dbt_missing(dbt_templater, project_dir, dbt_fluff_config): """Check that a nice error is returned when dbt module is missing.""" try: import dbt # noqa: F401 pytest.skip(reason="dbt is installed") except ModuleNotFoundError: pass with pytest.raises(ModuleNotFoundError, match=r"pip install sqlfluff\[dbt\]"): dbt_templater.process( in_str="", fname=os.path.join(project_dir, "models/my_new_project/test.sql"), config=FluffConfig(configs=dbt_fluff_config), ) def test__templater_dbt_profiles_dir_expanded(dbt_templater): """Check that the profiles_dir is expanded.""" dbt_templater.sqlfluff_config = FluffConfig( configs={ "core": {"dialect": "ansi"}, "templater": { "dbt": { "profiles_dir": "~/.dbt", "profile": "default", "target": "dev", "target_path": "target", } }, }, ) profiles_dir = dbt_templater._get_profiles_dir() # Normalise paths to control for OS variance assert os.path.normpath(profiles_dir) == os.path.normpath( os.path.expanduser("~/.dbt") ) assert dbt_templater._get_profile() == "default" assert dbt_templater._get_target() == "dev" assert dbt_templater._get_target_path() == "target" @pytest.mark.parametrize( "fname", [ # dbt_utils "use_dbt_utils.sql", # macro calling another macro "macro_in_macro.sql", # config.get(...) "use_headers.sql", # var(...) "use_var.sql", # {# {{ 1 + 2 }} #} "templated_inside_comment.sql", # {{ dbt_utils.last_day( "last_day.sql", # Many newlines at end, tests templater newline handling "trailing_newlines.sql", # Ends with whitespace stripping, so trailing newline handling should # be disabled "ends_with_whitespace_stripping.sql", # Access dbt graph nodes "access_graph_nodes.sql", # Call statements "call_statement.sql", ], ) def test__templater_dbt_templating_result( project_dir, dbt_templater, fname, dbt_fluff_config, dbt_project_folder, ): """Test that input sql file gets templated into output sql file.""" _run_templater_and_verify_result( dbt_templater, project_dir, fname, dbt_fluff_config, dbt_project_folder, ) def test_dbt_profiles_dir_env_var_uppercase( project_dir, dbt_templater, tmpdir, monkeypatch, dbt_fluff_config, dbt_project_folder, profiles_dir, ): """Tests specifying the dbt profile dir with env var.""" sub_profiles_dir = tmpdir.mkdir("SUBDIR") # Use uppercase to test issue 2253 monkeypatch.setenv("DBT_PROFILES_DIR", str(sub_profiles_dir)) shutil.copy(os.path.join(profiles_dir, "profiles.yml"), str(sub_profiles_dir)) _run_templater_and_verify_result( dbt_templater, project_dir, "use_dbt_utils.sql", dbt_fluff_config, dbt_project_folder, ) def _run_templater_and_verify_result( dbt_templater, project_dir, fname, dbt_fluff_config, dbt_project_folder, ): path = Path(project_dir) / "models/my_new_project" / fname config = FluffConfig(configs=dbt_fluff_config) templated_file, _ = dbt_templater.process( in_str=path.read_text(), fname=str(path), config=config, ) template_output_folder_path = dbt_project_folder / "templated_output/" fixture_path = _get_fixture_path(template_output_folder_path, fname) assert str(templated_file) == fixture_path.read_text() # Check we can lex the output too. # https://github.com/sqlfluff/sqlfluff/issues/4013 lexer = Lexer(config=config) _, lexing_violations = lexer.lex(templated_file) assert not lexing_violations def _get_fixture_path(template_output_folder_path, fname): fixture_path: Path = template_output_folder_path / fname # Default fixture location dbt_version_specific_fixture_folder = "dbt_utils_0.8.0" # Determine where it would exist. version_specific_path = ( Path(template_output_folder_path) / dbt_version_specific_fixture_folder / fname ) if version_specific_path.is_file(): # Ok, it exists. Use this path instead. fixture_path = version_specific_path return fixture_path @pytest.mark.parametrize( "fnames_input, fnames_expected_sequence", [ [ ( Path("models") / "depends_on_ephemeral" / "a.sql", Path("models") / "depends_on_ephemeral" / "b.sql", Path("models") / "depends_on_ephemeral" / "d.sql", ), # c.sql is not present in the original list and should not appear here, # even though b.sql depends on it. This test ensures that "out of scope" # files, e.g. those ignored using ".sqlfluffignore" or in directories # outside what was specified, are not inadvertently processed. ( Path("models") / "depends_on_ephemeral" / "a.sql", Path("models") / "depends_on_ephemeral" / "b.sql", Path("models") / "depends_on_ephemeral" / "d.sql", ), ], [ ( Path("models") / "depends_on_ephemeral" / "a.sql", Path("models") / "depends_on_ephemeral" / "b.sql", Path("models") / "depends_on_ephemeral" / "c.sql", Path("models") / "depends_on_ephemeral" / "d.sql", ), # c.sql should come before b.sql because b.sql depends on c.sql. # It also comes first overall because ephemeral models come first. ( Path("models") / "depends_on_ephemeral" / "c.sql", Path("models") / "depends_on_ephemeral" / "a.sql", Path("models") / "depends_on_ephemeral" / "b.sql", Path("models") / "depends_on_ephemeral" / "d.sql", ), ], ], ) def test__templater_dbt_sequence_files_ephemeral_dependency( project_dir, dbt_templater, fnames_input, fnames_expected_sequence, dbt_fluff_config, ): """Test that dbt templater sequences files based on dependencies.""" result = dbt_templater.sequence_files( [str(Path(project_dir) / fn) for fn in fnames_input], config=FluffConfig(configs=dbt_fluff_config), ) pd = Path(project_dir) expected = [str(pd / fn) for fn in fnames_expected_sequence] assert list(result) == expected @pytest.mark.parametrize( "raw_file,templated_file,result", [ ( "select * from a", """ with dbt__CTE__INTERNAL_test as ( select * from a )select count(*) from dbt__CTE__INTERNAL_test """, # The unwrapper should trim the ends. [ ("literal", slice(0, 15, None), slice(0, 15, None)), ], ) ], ) def test__templater_dbt_slice_file_wrapped_test( raw_file, templated_file, result, dbt_templater, caplog, ): """Test that wrapped queries are sliced safely using _check_for_wrapped().""" def _render_func(in_str) -> str: """Create a dummy render func. Importantly one that does actually allow different content to be added. """ # Find the raw location in the template for the test case. loc = templated_file.find(raw_file) # Replace the new content at the previous position. # NOTE: Doing this allows the tracer logic to do what it needs to do. return templated_file[:loc] + in_str + templated_file[loc + len(raw_file) :] with caplog.at_level(logging.DEBUG, logger="sqlfluff.templater"): _, resp, _ = dbt_templater.slice_file( raw_file, render_func=_render_func, ) assert resp == result @pytest.mark.parametrize( "fname", [ "tests/test.sql", "models/my_new_project/single_trailing_newline.sql", "models/my_new_project/multiple_trailing_newline.sql", ], ) def test__templater_dbt_templating_test_lex( project_dir, dbt_templater, fname, dbt_fluff_config, ): """Demonstrate the lexer works on both dbt models and dbt tests. Handle any number of newlines. """ path = Path(project_dir) / fname config = FluffConfig(configs=dbt_fluff_config) source_dbt_sql = path.read_text() # Count the newlines. n_trailing_newlines = len(source_dbt_sql) - len(source_dbt_sql.rstrip("\n")) print( f"Loaded {path!r} (n_newlines: {n_trailing_newlines}): " f"{source_dbt_sql!r}", ) templated_file, _ = dbt_templater.process( in_str=source_dbt_sql, fname=str(path), config=config, ) lexer = Lexer(config=config) # Test that it successfully lexes. _, _ = lexer.lex(templated_file) assert ( templated_file.source_str == "select a\nfrom table_a" + "\n" * n_trailing_newlines ) assert ( templated_file.templated_str == "select a\nfrom table_a" + "\n" * n_trailing_newlines ) @pytest.mark.parametrize( "path,reason", [ ( "models/my_new_project/disabled_model.sql", "it is disabled", ), ( "macros/echo.sql", "it is a macro", ), ], ) def test__templater_dbt_skips_file( path, reason, dbt_templater, project_dir, dbt_fluff_config, ): """A disabled dbt model should be skipped.""" with pytest.raises(SQLFluffSkipFile, match=reason): dbt_templater.process( in_str="", fname=os.path.join(project_dir, path), config=FluffConfig(configs=dbt_fluff_config), ) def test_dbt_fails_stdin(dbt_templater, dbt_fluff_config): """Reading from stdin is not supported with dbt templater.""" with pytest.raises(SQLFluffUserError): dbt_templater.process( in_str="", fname="stdin", config=FluffConfig(configs=dbt_fluff_config), ) @pytest.mark.parametrize( "fname", [ "use_var.sql", "incremental.sql", "single_trailing_newline.sql", "ST06_test.sql", ], ) def test__dbt_templated_models_do_not_raise_lint_error( project_dir, fname, caplog, dbt_fluff_config, ): """Test that templated dbt models do not raise a linting error.""" linter = Linter(config=FluffConfig(configs=dbt_fluff_config)) # Log rules output. with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules"): lnt = linter.lint_path( path=os.path.join(project_dir, "models/my_new_project/", fname) ) for linted_file in lnt.files: # Log the rendered file to facilitate better debugging of the files. print(f"## FILE: {linted_file.path}") print("\n\n## RENDERED FILE:\n\n") print(linted_file.templated_file.templated_str) print("\n\n## PARSED TREE:\n\n") print(linted_file.tree.stringify()) print("\n\n## VIOLATIONS:") for idx, v in enumerate(linted_file.violations): print(f" {idx}:{v.to_dict()}") violations = lnt.check_tuples() assert len(violations) == 0 def _clean_path(glob_expression): """Clear out files matching the provided glob expression.""" for fsp in glob.glob(glob_expression): os.remove(fsp) @pytest.mark.parametrize( "path", ["models/my_new_project/issue_1608.sql", "snapshots/issue_1771.sql"] ) def test__dbt_templated_models_fix_does_not_corrupt_file( project_dir, path, caplog, dbt_fluff_config, ): """Test issues where previously "sqlfluff fix" corrupted the file.""" test_glob = os.path.join(project_dir, os.path.dirname(path), "*FIXED.sql") _clean_path(test_glob) lntr = Linter(config=FluffConfig(configs=dbt_fluff_config)) with caplog.at_level(logging.INFO, logger="sqlfluff.linter"): lnt = lntr.lint_path(os.path.join(project_dir, path), fix=True) try: lnt.persist_changes(fixed_file_suffix="FIXED") with open(os.path.join(project_dir, path + ".after")) as f: comp_buff = f.read() with open(os.path.join(project_dir, path.replace(".sql", "FIXED.sql"))) as f: fixed_buff = f.read() assert fixed_buff == comp_buff finally: _clean_path(test_glob) def test__templater_dbt_templating_absolute_path( project_dir, dbt_templater, dbt_fluff_config, ): """Test that absolute path of input path does not cause RuntimeError.""" try: dbt_templater.process( in_str="", fname=os.path.abspath( os.path.join(project_dir, "models/my_new_project/use_var.sql") ), config=FluffConfig(configs=dbt_fluff_config), ) except Exception as e: pytest.fail(f"Unexpected RuntimeError: {e}") @pytest.mark.parametrize( ("fname", "exception_msg", "dbt_skip_compilation_error", "exception_class"), [ ( "compiler_error.sql", "Compilation Error in model compiler_error " "(models/my_new_project/compiler_error.sql)\n " "Unexpected end of template. Jinja was looking for the following tags: " "'endfor' or 'else'.", True, SQLFluffUserError, ), ( "unknown_ref.sql", # https://github.com/sqlfluff/sqlfluff/issues/3849 "Model 'model.my_new_project.unknown_ref' " "(models/my_new_project/unknown_ref.sql) depends on a node named " "'i_do_not_exist' which was not found", True, SQLFluffUserError, ), ( "unknown_macro.sql", # https://github.com/sqlfluff/sqlfluff/issues/3849 "Compilation Error in model unknown_macro " "(models/my_new_project/unknown_macro.sql)\n 'invalid_macro' is " "undefined. This can happen when calling a macro that does not exist.", True, SQLTemplaterError, ), ( "compile_missing_table.sql", # In the test suite we don't get a very helpful error message from dbt # but in live testing, the inclusion of the triggering error sometimes # gives us something much more useful. "because dbt raised a fatal exception during compilation", True, SQLFluffSkipFile, ), pytest.param( "compile_missing_table.sql", "Runtime Error", False, SQLTemplaterError, id="dbt_skip_compilation_error", ), ], ) def test__templater_dbt_handle_exceptions( project_dir, dbt_templater, dbt_fluff_config, dbt_project_folder, fname, exception_msg, dbt_skip_compilation_error, exception_class, ): """Test that exceptions during compilation are returned as violation.""" from dbt.adapters.factory import get_adapter src_fpath = dbt_project_folder / "error_models" / fname target_fpath = os.path.abspath( os.path.join(project_dir, "models/my_new_project/", fname) ) # We move the file that throws an error in and out of the project directory # as dbt throws an error if a node fails to parse while computing the DAG shutil.move(src_fpath, target_fpath) dbt_fluff_config["templater"]["dbt"][ "dbt_skip_compilation_error" ] = dbt_skip_compilation_error try: with pytest.raises(exception_class) as excinfo: dbt_templater.process( in_str="", fname=target_fpath, config=FluffConfig( configs=dbt_fluff_config, overrides={"dialect": "ansi"} ), ) finally: shutil.move(target_fpath, src_fpath) get_adapter(dbt_templater.dbt_config).connections.release() # Debug logging. print("Raised:", excinfo.value) for trace in excinfo.traceback: print(trace) # NB: Replace slashes to deal with different platform paths being returned. if exception_class is SQLTemplaterError: _msg = excinfo.value.desc().replace("\\", "/") else: _msg = str(excinfo.value).replace("\\", "/") assert exception_msg in _msg # Ensure that there's no context parent exception, because they don't pickle well. # https://github.com/sqlfluff/sqlfluff/issues/6037 # We *should* be stripping any inherited exceptions from anything returned here. # Any residual dbt exceptions are a risk for pickling errors. assert not excinfo.value.__context__ assert not excinfo.value.__cause__ # We also ensure that the exception can be pickled and unpickled safely. # Pickling of exceptions happens during parallel operation and so if it can't # be done safely then that will cause bugs. pickled_exception = pickle.dumps(excinfo.value) roundtrip_exception = pickle.loads(pickled_exception) assert isinstance(roundtrip_exception, type(excinfo.value)) assert str(roundtrip_exception) == str(excinfo.value) @mock.patch("dbt.adapters.postgres.impl.PostgresAdapter.set_relations_cache") def test__templater_dbt_handle_database_connection_failure( set_relations_cache, project_dir, dbt_templater, dbt_fluff_config, ): """Test the result of a failed database connection.""" from dbt.adapters.factory import get_adapter try: from dbt.adapters.exceptions import ( FailedToConnectError as DbtFailedToConnectException, ) except ImportError: try: from dbt.exceptions import ( FailedToConnectError as DbtFailedToConnectException, ) except ImportError: from dbt.exceptions import ( FailedToConnectException as DbtFailedToConnectException, ) # Clear the adapter cache to force this test to create a new connection. DbtTemplater.adapters.clear() set_relations_cache.side_effect = DbtFailedToConnectException("dummy error") src_fpath = ( "plugins/sqlfluff-templater-dbt/test/fixtures/dbt/error_models" "/exception_connect_database.sql" ) target_fpath = os.path.abspath( os.path.join( project_dir, "models/my_new_project/exception_connect_database.sql" ) ) dbt_fluff_config_fail = deepcopy(dbt_fluff_config) dbt_fluff_config_fail["templater"]["dbt"][ "profiles_dir" ] = "plugins/sqlfluff-templater-dbt/test/fixtures/dbt/profiles_yml_fail" # We move the file that throws an error in and out of the project directory # as dbt throws an error if a node fails to parse while computing the DAG shutil.move(src_fpath, target_fpath) try: with pytest.raises(SQLTemplaterError) as excinfo: dbt_templater.process( in_str="", fname=target_fpath, config=FluffConfig(configs=dbt_fluff_config), ) finally: shutil.move(target_fpath, src_fpath) get_adapter(dbt_templater.dbt_config).connections.release() # NB: Replace slashes to deal with different platform paths being returned. error_message = excinfo.value.desc().replace("\\", "/") assert "dbt tried to connect to the database" in error_message def test__project_dir_from_env(dbt_templater, project_dir, monkeypatch): """Test possibility to set project_dir from env variable.""" dbt_templater.sqlfluff_config = FluffConfig( configs={ "core": {"dialect": "ansi"}, "templater": {"dbt": {"project_dir": None}}, } ) assert dbt_templater._get_project_dir() == os.path.abspath(os.getcwd()) monkeypatch.setenv("DBT_PROJECT_DIR", project_dir) assert dbt_templater._get_project_dir() == os.path.abspath(project_dir) def test__project_dir_does_not_exist_error(dbt_templater): """Test an error is logged if the given dbt project directory doesn't exist.""" dbt_templater.sqlfluff_config = FluffConfig( configs={ "core": {"dialect": "ansi"}, "templater": {"dbt": {"project_dir": "./non_existing_directory"}}, } ) with fluff_log_catcher(logging.ERROR, "sqlfluff.templater") as caplog: dbt_project_dir = dbt_templater._get_project_dir() assert ( f"dbt_project_dir: {dbt_project_dir} could not be accessed. " "Check it exists." ) in caplog.text @pytest.mark.parametrize( ("model_path", "var_value"), [ ("models/vars_from_cli.sql", "expected_value"), ("models/vars_from_cli.sql", [1]), ("models/vars_from_cli.sql", {"nested": 1}), ], ) def test__context_in_config_is_loaded( project_dir, dbt_templater, model_path, var_value, dbt_fluff_config, ): """Test that variables inside .sqlfluff are passed to dbt.""" context = {"passed_through_cli": var_value} if var_value else {} config_dict = deepcopy(dbt_fluff_config) config_dict["templater"]["dbt"]["context"] = context config = FluffConfig(config_dict) path = Path(project_dir) / model_path processed, violations = dbt_templater.process( in_str=path.read_text(), fname=str(path), config=config ) assert violations == [] assert str(var_value) in processed.templated_str @pytest.mark.parametrize( ("model_path", "var_value"), [ ("models/vars_from_env.sql", "expected_value"), ], ) def test__context_in_env_is_loaded( project_dir, dbt_templater, model_path, var_value, dbt_fluff_config, ): """Test that variables inside env are passed to dbt.""" os.environ["passed_through_env"] = var_value config = FluffConfig(dbt_fluff_config) path = Path(project_dir) / model_path processed, violations = dbt_templater.process( in_str=path.read_text(), fname=str(path), config=config ) assert violations == [] assert str(var_value) in processed.templated_str def test__dbt_log_supression(dbt_project_folder): """Test that when we try and parse in JSON format we get JSON. This actually tests that we can successfully suppress unwanted logging from dbt. """ oldcwd = os.getcwd() try: os.chdir(dbt_project_folder) cli_options = [ "--disable-progress-bar", "dbt_project/models/my_new_project/operator_errors.sql", "-f", "json", ] result = invoke_assert_code( ret_code=1, args=[ lint, cli_options, ], ) # the CliRunner isn't isolated from the dbt plugin loading isolated_lint = subprocess.run( ["sqlfluff", "lint"] + cli_options, capture_output=True ) finally: os.chdir(oldcwd) # Check that the full output parses as json parsed = json.loads(result.output) assert isolated_lint.returncode == 1 assert b" Registered adapter:" not in isolated_lint.stdout assert isinstance(parsed, list) assert len(parsed) == 1 first_file = parsed[0] assert isinstance(first_file, dict) # NOTE: Path translation for linux/windows. assert ( first_file["filepath"].replace("\\", "/") == "dbt_project/models/my_new_project/operator_errors.sql" ) assert len(first_file["violations"]) == 2 sqlfluff-3.4.2/pyproject.toml000066400000000000000000000217621503426445100162720ustar00rootroot00000000000000[build-system] requires = ["setuptools>=40.8.0", "wheel"] build-backend = "setuptools.build_meta" [project] name = "sqlfluff" version = "3.4.2" description = "The SQL Linter for Humans" readme = {file = "README.md", content-type = "text/markdown"} requires-python = ">=3.9" authors = [ {name = "Alan Cruickshank", email = "alan@designingoverload.com"}, ] license = {file = "LICENSE.md"} classifiers = [ "Development Status :: 5 - Production/Stable", "Environment :: Console", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: Unix", "Operating System :: POSIX", "Operating System :: MacOS", "Operating System :: Microsoft :: Windows", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: SQL", "Topic :: Utilities", "Topic :: Software Development :: Quality Assurance", ] keywords = [ "sqlfluff", "sql", "linter", "formatter", "athena", "bigquery", "clickhouse", "databricks", "db2", "doris", "duckdb", "exasol", "flink", "greenplum", "hive", "impala", "materialize", "mariadb", "mysql", "postgres", "redshift", "snowflake", "soql", "sparksql", "sqlite", "starrocks", "teradata", "trino", "tsql", "vertica", "dbt", ] dependencies = [ # Used for finding os-specific application config dirs "platformdirs", # To get the encoding of files. "chardet", # Click can include breaking changes in minor releases. Make sure to test # well before updating upper bound. "click<=8.3.0", "colorama>=0.3", # Used for diffcover plugin "diff-cover>=2.5.0", "Jinja2", # Used for .sqlfluffignore "pathspec", # We provide a testing library for plugins in sqlfluff.utils.testing "pytest", # We require pyyaml >= 5.1 so that we can preserve the ordering of keys. "pyyaml>=5.1", # The new regex module to allow for more complex pattern matching, # whilst remaining backwards compatible with existing regex use cases. # e.g. capturing repeated groups in nested tsql block comments. # This was introduced in https://github.com/sqlfluff/sqlfluff/pull/2027 # and further details can be found in that PR. "regex", # For returning exceptions from multiprocessing.Pool.map() "tblib", # For parsing pyproject.toml "toml; python_version < '3.11'", # For handling progress bars "tqdm", ] [project.urls] Homepage = "https://www.sqlfluff.com" Documentation = "https://docs.sqlfluff.com" Source = "https://github.com/sqlfluff/sqlfluff" Changes = "https://github.com/sqlfluff/sqlfluff/blob/main/CHANGELOG.md" "Issue Tracker" = "https://github.com/sqlfluff/sqlfluff/issues" Twitter = "https://twitter.com/SQLFluff" Chat = "https://github.com/sqlfluff/sqlfluff#sqlfluff-on-slack" [project.scripts] sqlfluff = "sqlfluff.cli.commands:cli" [project.entry-points.diff_cover] sqlfluff = "sqlfluff.diff_quality_plugin" [project.entry-points.sqlfluff] sqlfluff = "sqlfluff.core.plugin.lib" # NOTE: We namespace the rules plugins with `rules`, because some # of them might later collide with other types of plugins. In particular # `tsql` may eventually refer to a dialect plugin and `jinja` may refer # to a templater plugin. sqlfluff_rules_capitalisation = "sqlfluff.rules.capitalisation" sqlfluff_rules_aliasing = "sqlfluff.rules.aliasing" sqlfluff_rules_layout = "sqlfluff.rules.layout" sqlfluff_rules_references = "sqlfluff.rules.references" sqlfluff_rules_ambiguous = "sqlfluff.rules.ambiguous" sqlfluff_rules_structure = "sqlfluff.rules.structure" sqlfluff_rules_convention = "sqlfluff.rules.convention" sqlfluff_rules_jinja = "sqlfluff.rules.jinja" sqlfluff_rules_tsql = "sqlfluff.rules.tsql" [tool.sqlfluff_docs] # NOTE: Stable version is used by docs/conf.py stable_version = "3.4.2" [tool.setuptools.package-data] # `default_config.cfg` is loaded by sqlfluff as part of config resolution. # For more details on `py.typed` see https://peps.python.org/pep-0561/ sqlfluff = ["core/default_config.cfg", "py.typed"] [tool.importlinter] root_package = "sqlfluff" [[tool.importlinter.contracts]] name = "Forbid dependencies outside core" type = "forbidden" source_modules = [ "sqlfluff.core", ] forbidden_modules = [ "sqlfluff.api", "sqlfluff.cli", "sqlfluff.dialects", "sqlfluff.rules", "sqlfluff.utils", ] [[tool.importlinter.contracts]] name = "API may not depend on CLI" type = "forbidden" source_modules = [ "sqlfluff.api", ] forbidden_modules = [ "sqlfluff.cli", ] [[tool.importlinter.contracts]] name = "Helper methods must be internally independent" type = "independence" modules = [ "sqlfluff.core.helpers.string", "sqlfluff.core.helpers.slice", "sqlfluff.core.helpers.dict", ] [[tool.importlinter.contracts]] name = "Dependency layers within core" # NOTE: Several modules within core currently have somewhat more # convoluted dependency loops, especially when it comes to type checking. # Those are currently excluded from this work, but might be picked up in # future work to help with better isolation. type = "layers" layers = [ # `linter` references many things, including rules. "sqlfluff.core.linter", # `rules` should be independent from linter, but can reference the others. "sqlfluff.core.rules", # `parser` should be independent of `rules` and `linter`. "sqlfluff.core.parser", # `errors` should be a utility library, which can be referenced by the others. "sqlfluff.core.errors", # `types` should be almost independent (but may use helpers) "sqlfluff.core.types", # `helpers` should be independent and not reference any of the above. "sqlfluff.core.helpers", ] ignore_imports = [ # `errors` references `rules` and `parser`, but only for type checking. # Splitting it up seems overkill for now, so an allowable exception. "sqlfluff.core.errors -> sqlfluff.core.rules", "sqlfluff.core.errors -> sqlfluff.core.parser", # The plugin host need the BaseRule type for type checking, because one # of the hooks returns rules. It's otherwise not something we can import # at this layer. "sqlfluff.core.plugin.hookspecs -> sqlfluff.core.rules.base", # The formatter needs the LintedFile type for type checking (and the # formatter is imported by lots of other things), and we aren't otherwise # allowed to depend on the linter at this layer. "sqlfluff.core.formatter -> sqlfluff.core.linter", ] [tool.mypy] warn_unused_configs = true warn_redundant_casts = true warn_unused_ignores = true strict_equality = true extra_checks = true no_implicit_reexport = true # skip type checking for 3rd party packages for which stubs are not available [[tool.mypy.overrides]] module = "diff_cover.*" ignore_missing_imports = true [[tool.mypy.overrides]] module = "tblib.*" ignore_missing_imports = true [tool.ruff.lint] extend-select = ["I", "D"] # D105: Missing docstring in magic method # D107: Missing docstring in __init__ # D418: Function/ Method decorated with @overload shouldn’t contain a docstring ignore = ["D107", "D105", "D418"] [tool.ruff.lint.isort] # Mark sqlfluff, test and it's plugins as known first party known-first-party = [ "sqlfluff", "sqlfluff_plugin_example", "sqlfluff_templater_dbt", "test", ] [tool.ruff.lint.pydocstyle] convention = "google" [tool.pytest.ini_options] python_files = "*_test.py" testpaths = "test" markers = [ "dbt: Marks tests needing the \"dbt\" plugin (deselect with '-m \"not dbt\"').", "integration: Marks tests outside of the core suite.", "parse_suite: Marks the suite of parsing tests across a range of dialects (part of integration).", "fix_suite: Marks the suite of fixing tests across a range of dialects (part of integration).", "rules_suite: Marks the suite of rules tests. Also known as the yaml tests (part of integration).", ] [tool.doc8] # Ignore auto-generated docs ignore-path = "docs/source/_partials/" [tool.codespell] # The configuration must be kept here to ensure that # `codespell` can be run as a standalone program from the CLI # with the appropriate default options. skip = "*/test/fixtures/*,*/.*,*/pyproject.toml" check-hidden = true quiet-level=2 # ignore-regex = '\\[fnrstv]' builtin = "clear,rare,informal,names" ignore-words-list = "fo,ws,falsy,coo,inout,deque,crate,trough,ro,mange,identifers,statment" # ignore-words = "dev/tools/codespell/codespell-ignore.txt" # exclude-file = "dev/tools/codespell/codespell-lines-ignore.txt" uri-ignore-words-list="crate" # For future reference: it is not currently possible to specify # the standard dictionary and the custom dictionary in the configuration # file # D = "-" # dictionary = "dev/tools/codespell/codespell-dict.txt" sqlfluff-3.4.2/requirements_dev.txt000066400000000000000000000013671503426445100174770ustar00rootroot00000000000000# NOTE: Install with -U to keep all requirements up-to-date. # code linting and formatting # --------------------------- flake8 flake8-docstrings pydocstyle!=6.2.0, !=6.2.1 # See: https://github.com/PyCQA/pydocstyle/issues/618 black>=22.1.0 flake8-black>=0.2.4 ruff import-linter yamllint # documentation checks # -------------------- doc8 Pygments # python testing # -------------- coverage>=6.4 hypothesis pytest pytest-cov pytest-xdist tox # MyPy # ---- # `types-*` dependencies here should be the same as in `.pre-commit-config.yaml`. # If you update these dependencies, make sure to update those too. mypy[mypyc] types-toml types-chardet types-appdirs types-colorama types-pyyaml types-regex types-tqdm # util.py dependencies # ---- requests ghapi sqlfluff-3.4.2/src/000077500000000000000000000000001503426445100141355ustar00rootroot00000000000000sqlfluff-3.4.2/src/sqlfluff/000077500000000000000000000000001503426445100157575ustar00rootroot00000000000000sqlfluff-3.4.2/src/sqlfluff/__init__.py000066400000000000000000000015431503426445100200730ustar00rootroot00000000000000"""Sqlfluff is a SQL linter for humans.""" import sys from importlib import metadata import pytest # Expose the public API. from sqlfluff.api import fix, lint, list_dialects, list_rules, parse __all__ = ( "lint", "fix", "parse", "list_rules", "list_dialects", ) # Get the current version __version__ = metadata.version("sqlfluff") # Check major python version if sys.version_info[0] < 3: raise Exception("Sqlfluff does not support Python 2. Please upgrade to Python 3.") # Check minor python version elif sys.version_info[1] < 8: raise Exception( "Sqlfluff %s only supports Python 3.9 and beyond. " "Use an earlier version of sqlfluff or a later version of Python" % __version__ ) # Register helper functions to support variable introspection on failure. pytest.register_assert_rewrite("sqlfluff.utils.testing") sqlfluff-3.4.2/src/sqlfluff/__main__.py000066400000000000000000000002131503426445100200450ustar00rootroot00000000000000"""Export cli to __main__ for use like python -m sqlfluff.""" from sqlfluff.cli.commands import cli if __name__ == "__main__": cli() sqlfluff-3.4.2/src/sqlfluff/api/000077500000000000000000000000001503426445100165305ustar00rootroot00000000000000sqlfluff-3.4.2/src/sqlfluff/api/__init__.py000066400000000000000000000005101503426445100206350ustar00rootroot00000000000000"""Elements which wrap the sqlfluff core library for public use.""" # Expose the simple api from sqlfluff.api.info import list_dialects, list_rules from sqlfluff.api.simple import APIParsingError, fix, lint, parse __all__ = ( "lint", "fix", "parse", "APIParsingError", "list_rules", "list_dialects", ) sqlfluff-3.4.2/src/sqlfluff/api/info.py000066400000000000000000000006671503426445100200460ustar00rootroot00000000000000"""Information API.""" from sqlfluff.core import Linter, dialect_readout from sqlfluff.core.dialects import DialectTuple from sqlfluff.core.linter import RuleTuple def list_rules() -> list[RuleTuple]: """Return a list of available rule tuples.""" linter = Linter() return linter.rule_tuples() def list_dialects() -> list[DialectTuple]: """Return a list of available dialect info.""" return list(dialect_readout()) sqlfluff-3.4.2/src/sqlfluff/api/simple.py000066400000000000000000000171741503426445100204050ustar00rootroot00000000000000"""The simple public API methods.""" from typing import Any, Optional from sqlfluff.core import ( FluffConfig, Linter, SQLBaseError, SQLFluffUserError, dialect_selector, ) from sqlfluff.core.types import ConfigMappingType def get_simple_config( dialect: Optional[str] = None, rules: Optional[list[str]] = None, exclude_rules: Optional[list[str]] = None, config_path: Optional[str] = None, ) -> FluffConfig: """Get a config object from simple API arguments.""" # Create overrides for simple API arguments. overrides: ConfigMappingType = {} if dialect is not None: # Check the requested dialect exists and is valid. try: dialect_selector(dialect) except SQLFluffUserError as err: # pragma: no cover raise SQLFluffUserError(f"Error loading dialect '{dialect}': {str(err)}") except KeyError: raise SQLFluffUserError(f"Error: Unknown dialect '{dialect}'") overrides["dialect"] = dialect if rules is not None: overrides["rules"] = ",".join(rules) if exclude_rules is not None: overrides["exclude_rules"] = ",".join(exclude_rules) # Instantiate a config object. try: config = FluffConfig.from_root( extra_config_path=config_path, ignore_local_config=True, overrides=overrides, require_dialect=False, ) # If no dialect was specified, set it to ansi. This allows for the legacy # behavior of the simple API to be maintained, where the dialect is not # required to be specified, but defaults to ansi. if not config.get("dialect"): overrides["dialect"] = "ansi" config = FluffConfig.from_root( extra_config_path=config_path, ignore_local_config=True, overrides=overrides, ) return config except SQLFluffUserError as err: # pragma: no cover raise SQLFluffUserError(f"Error loading config: {str(err)}") class APIParsingError(ValueError): """An exception which holds a set of violations.""" def __init__(self, violations: list[SQLBaseError], *args: Any): self.violations = violations msg = f"Found {len(violations)} issues while parsing string." for viol in violations: msg += f"\n{viol!s}" super().__init__(msg, *args) def lint( sql: str, dialect: Optional[str] = None, rules: Optional[list[str]] = None, exclude_rules: Optional[list[str]] = None, config: Optional[FluffConfig] = None, config_path: Optional[str] = None, ) -> list[dict[str, Any]]: """Lint a SQL string. Args: sql (:obj:`str`): The SQL to be linted. dialect (:obj:`Optional[str]`, optional): A reference to the dialect of the SQL to be linted. Defaults to `ansi`. rules (:obj:`Optional[list[str]`, optional): A list of rule references to lint for. Defaults to None. exclude_rules (:obj:`Optional[list[str]`, optional): A list of rule references to avoid linting for. Defaults to None. config (:obj:`Optional[FluffConfig]`, optional): A configuration object to use for the operation. Defaults to None. config_path (:obj:`Optional[str]`, optional): A path to a .sqlfluff config, which is only used if a `config` is not already provided. Defaults to None. Returns: :obj:`list[dict[str, Any]]` for each violation found. """ cfg = config or get_simple_config( dialect=dialect, rules=rules, exclude_rules=exclude_rules, config_path=config_path, ) linter = Linter(config=cfg) result = linter.lint_string_wrapped(sql) result_records = result.as_records() # Return just the violations for this file return [] if not result_records else result_records[0]["violations"] def fix( sql: str, dialect: Optional[str] = None, rules: Optional[list[str]] = None, exclude_rules: Optional[list[str]] = None, config: Optional[FluffConfig] = None, config_path: Optional[str] = None, fix_even_unparsable: Optional[bool] = None, ) -> str: """Fix a SQL string. Args: sql (:obj:`str`): The SQL to be fixed. dialect (:obj:`Optional[str]`, optional): A reference to the dialect of the SQL to be fixed. Defaults to `ansi`. rules (:obj:`Optional[list[str]`, optional): A subset of rule references to fix for. Defaults to None. exclude_rules (:obj:`Optional[list[str]`, optional): A subset of rule references to avoid fixing for. Defaults to None. config (:obj:`Optional[FluffConfig]`, optional): A configuration object to use for the operation. Defaults to None. config_path (:obj:`Optional[str]`, optional): A path to a .sqlfluff config, which is only used if a `config` is not already provided. Defaults to None. fix_even_unparsable (:obj:`bool`, optional): Optional override for the corresponding SQLFluff configuration value. Returns: :obj:`str` for the fixed SQL if possible. """ cfg = config or get_simple_config( dialect=dialect, rules=rules, exclude_rules=exclude_rules, config_path=config_path, ) linter = Linter(config=cfg) result = linter.lint_string_wrapped(sql, fix=True) if fix_even_unparsable is None: fix_even_unparsable = cfg.get("fix_even_unparsable") should_fix = True if not fix_even_unparsable: # If fix_even_unparsable wasn't set, check for templating or parse # errors and suppress fixing if there were any. _, num_filtered_errors = result.count_tmp_prs_errors() if num_filtered_errors > 0: should_fix = False if should_fix: sql = result.paths[0].files[0].fix_string()[0] return sql def parse( sql: str, dialect: Optional[str] = None, config: Optional[FluffConfig] = None, config_path: Optional[str] = None, ) -> dict[str, Any]: """Parse a SQL string. Args: sql (:obj:`str`): The SQL to be parsed. dialect (:obj:`Optional[str]`, optional): A reference to the dialect of the SQL to be parsed. Defaults to `ansi`. config (:obj:`Optional[FluffConfig]`, optional): A configuration object to use for the operation. Defaults to None. config_path (:obj:`Optional[str]`, optional): A path to a .sqlfluff config, which is only used if a `config` is not already provided. Defaults to None. Returns: :obj:`Dict[str, Any]` JSON containing the parsed structure. Note: In the case of multiple potential variants from the raw source file only the first variant is returned by the simple API. For access to the other variants, use the underlying main API directly. """ cfg = config or get_simple_config( dialect=dialect, config_path=config_path, ) linter = Linter(config=cfg) parsed = linter.parse_string(sql) # If we encounter any parsing errors, raise them in a combined issue. violations = parsed.violations if violations: raise APIParsingError(violations) # Return a JSON representation of the parse tree. # NOTE: For the simple API - only a single variant is returned. root_variant = parsed.root_variant() assert root_variant, "Files parsed without violations must have a valid variant" assert root_variant.tree, "Files parsed without violations must have a valid tree" record = root_variant.tree.as_record(show_raw=True) assert record return record sqlfluff-3.4.2/src/sqlfluff/cli/000077500000000000000000000000001503426445100165265ustar00rootroot00000000000000sqlfluff-3.4.2/src/sqlfluff/cli/__init__.py000066400000000000000000000001061503426445100206340ustar00rootroot00000000000000"""init py for cli.""" EXIT_SUCCESS = 0 EXIT_FAIL = 1 EXIT_ERROR = 2 sqlfluff-3.4.2/src/sqlfluff/cli/autocomplete.py000066400000000000000000000022321503426445100216000ustar00rootroot00000000000000"""autocompletion commands.""" from sqlfluff import list_dialects # Older versions of click don't have shell completion # so handle for now, as version 8 still fairly recent # See: https://github.com/sqlfluff/sqlfluff/issues/2543 shell_completion_enabled = True try: from click.shell_completion import CompletionItem except ImportError: # pragma: no cover # In older versions don't enable completion. # We don't force newer versions of click however. # See: https://github.com/sqlfluff/sqlfluff/issues/2543 shell_completion_enabled = False # NOTE: Important that we refer to the "CompletionItem" type # as a string rather than a direct reference so that we don't # get import errors when running with older versions of click. def dialect_shell_complete(ctx, param, incomplete) -> list["CompletionItem"]: """Shell completion for possible dialect names. We use this over click.Choice as we want to internally handle error messages and codes for incorrect/outdated dialects. """ dialect_names = [e.label for e in list_dialects()] return [ CompletionItem(name) for name in dialect_names if name.startswith(incomplete) ] sqlfluff-3.4.2/src/sqlfluff/cli/commands.py000066400000000000000000001474401503426445100207130ustar00rootroot00000000000000"""Contains the CLI.""" import json import logging import os import sys import time from itertools import chain from logging import LogRecord from typing import Callable, Optional import click # To enable colour cross platform import colorama import yaml from tqdm import tqdm from sqlfluff.cli import EXIT_ERROR, EXIT_FAIL, EXIT_SUCCESS from sqlfluff.cli.autocomplete import dialect_shell_complete, shell_completion_enabled from sqlfluff.cli.formatters import OutputStreamFormatter, format_linting_result_header from sqlfluff.cli.helpers import LazySequence, get_package_version from sqlfluff.cli.outputstream import OutputStream, make_output_stream # Import from sqlfluff core. from sqlfluff.core import ( FluffConfig, Linter, SQLFluffUserError, SQLLintError, SQLTemplaterError, dialect_readout, dialect_selector, ) from sqlfluff.core.config import progress_bar_configuration from sqlfluff.core.linter import LintingResult from sqlfluff.core.plugin.host import get_plugin_manager from sqlfluff.core.types import Color, FormatType class StreamHandlerTqdm(logging.StreamHandler): """Modified StreamHandler which takes care of writing within `tqdm` context. It uses `tqdm` write which takes care of conflicting prints with progressbar. Without it, there were left artifacts in DEBUG mode (not sure about another ones, but probably would happen somewhere). """ def emit(self, record: LogRecord) -> None: """Behaves like original one except uses `tqdm` to write.""" try: msg = self.format(record) tqdm.write(msg, file=self.stream) self.flush() except Exception: # pragma: no cover self.handleError(record) def set_logging_level( verbosity: int, formatter: OutputStreamFormatter, logger: Optional[logging.Logger] = None, stderr_output: bool = False, ) -> None: """Set up logging for the CLI. We either set up global logging based on the verbosity or, if `logger` is specified, we only limit to a single sqlfluff logger. Verbosity is applied in the same way. Implementation: If `logger` is not specified, the handler is attached to the `sqlfluff` logger. If it is specified then it attaches the the logger in question. In addition if `logger` is specified, then that logger will also not propagate. """ fluff_logger = logging.getLogger("sqlfluff") # Don't propagate logging fluff_logger.propagate = False # Enable colorama colorama.init() # Set up the log handler which is able to print messages without overlapping # with progressbars. handler = StreamHandlerTqdm(stream=sys.stderr if stderr_output else sys.stdout) # NB: the unicode character at the beginning is to squash any badly # tamed ANSI colour statements, and return us to normality. handler.setFormatter(logging.Formatter("\u001b[0m%(levelname)-10s %(message)s")) # Set up a handler to colour warnings red. # See: https://docs.python.org/3/library/logging.html#filter-objects def red_log_filter(record: logging.LogRecord) -> bool: if record.levelno >= logging.WARNING: record.msg = f"{formatter.colorize(record.msg, Color.red)} " return True handler.addFilter(red_log_filter) if logger: focus_logger = logging.getLogger(f"sqlfluff.{logger}") focus_logger.addHandler(handler) else: fluff_logger.addHandler(handler) # NB: We treat the parser logger slightly differently because it's noisier. # It's important that we set levels for all each time so # that we don't break tests by changing the granularity # between tests. parser_logger = logging.getLogger("sqlfluff.parser") if verbosity < 3: fluff_logger.setLevel(logging.WARNING) parser_logger.setLevel(logging.NOTSET) elif verbosity == 3: fluff_logger.setLevel(logging.INFO) parser_logger.setLevel(logging.WARNING) elif verbosity == 4: fluff_logger.setLevel(logging.DEBUG) parser_logger.setLevel(logging.INFO) elif verbosity > 4: fluff_logger.setLevel(logging.DEBUG) parser_logger.setLevel(logging.DEBUG) class PathAndUserErrorHandler: """Make an API call but with error handling for the CLI.""" def __init__(self, formatter: OutputStreamFormatter) -> None: self.formatter = formatter def __enter__(self) -> "PathAndUserErrorHandler": return self def __exit__(self, exc_type, exc_val, exc_tb) -> None: if exc_type is SQLFluffUserError: click.echo( "\nUser Error: " + self.formatter.colorize( str(exc_val), Color.red, ), err=True, ) sys.exit(EXIT_ERROR) def common_options(f: Callable) -> Callable: """Add common options to commands via a decorator. These are applied to all of the cli commands. """ f = click.version_option()(f) f = click.option( "-v", "--verbose", count=True, default=None, help=( "Verbosity, how detailed should the output be. This is *stackable*, so " "`-vv` is more verbose than `-v`. For the most verbose option try `-vvvv` " "or `-vvvvv`." ), )(f) f = click.option( "-n/ ", "--nocolor/--color", default=None, help="No color - output will be without ANSI color codes.", )(f) return f def core_options(f: Callable) -> Callable: """Add core operation options to commands via a decorator. These are applied to the main (but not all) cli commands like `parse`, `lint` and `fix`. """ # Only enable dialect completion if on version of click # that supports it if shell_completion_enabled: f = click.option( "-d", "--dialect", default=None, help="The dialect of SQL to lint", shell_complete=dialect_shell_complete, )(f) else: # pragma: no cover f = click.option( "-d", "--dialect", default=None, help="The dialect of SQL to lint", )(f) f = click.option( "-t", "--templater", default=None, help="The templater to use (default=jinja)", type=click.Choice( # Use LazySequence so that we don't load templaters until required. LazySequence( lambda: [ templater.name for templater in chain.from_iterable( get_plugin_manager().hook.get_templaters() ) ] ) ), )(f) f = click.option( "-r", "--rules", default=None, help=( "Narrow the search to only specific rules. For example " "specifying `--rules LT01` will only search for rule `LT01` (Unnecessary " "trailing whitespace). Multiple rules can be specified with commas e.g. " "`--rules LT01,LT02` will specify only looking for violations of rule " "`LT01` and rule `LT02`." ), )(f) f = click.option( "-e", "--exclude-rules", default=None, help=( "Exclude specific rules. For example " "specifying `--exclude-rules LT01` will remove rule `LT01` (Unnecessary " "trailing whitespace) from the set of considered rules. This could either " "be the allowlist, or the general set if there is no specific allowlist. " "Multiple rules can be specified with commas e.g. " "`--exclude-rules LT01,LT02` will exclude violations of rule " "`LT01` and rule `LT02`." ), )(f) f = click.option( "--config", "extra_config_path", default=None, help=( "Include additional config file. By default the config is generated " "from the standard configuration files described in the documentation. " "This argument allows you to specify an additional configuration file that " "overrides the standard configuration files. N.B. cfg format is required." ), type=click.Path(), )(f) f = click.option( "--ignore-local-config", is_flag=True, help=( "Ignore config files in default search path locations. " "This option allows the user to lint with the default config " "or can be used in conjunction with --config to only " "reference the custom config file." ), )(f) f = click.option( "--encoding", default=None, help=( "Specify encoding to use when reading and writing files. Defaults to " "autodetect." ), )(f) f = click.option( "-i", "--ignore", default=None, help=( "Ignore particular families of errors so that they don't cause a failed " "run. For example `--ignore parsing` would mean that any parsing errors " "are ignored and don't influence the success or fail of a run. " "`--ignore` behaves somewhat like `noqa` comments, except it " "applies globally. Multiple options are possible if comma separated: " "e.g. `--ignore parsing,templating`." ), )(f) f = click.option( "--bench", is_flag=True, help="Set this flag to engage the benchmarking tool output.", )(f) f = click.option( "--logger", type=click.Choice( ["templater", "lexer", "parser", "linter", "rules", "config"], case_sensitive=False, ), help="Choose to limit the logging to one of the loggers.", )(f) f = click.option( "--disable-noqa", is_flag=True, default=None, help="Set this flag to ignore inline noqa comments.", )(f) f = click.option( "--disable-noqa-except", default=None, help="Ignore all but the listed rules inline noqa comments.", )(f) f = click.option( "--library-path", default=None, help=( "Override the `library_path` value from the [sqlfluff:templater:jinja]" " configuration value. Set this to 'none' to disable entirely." " This overrides any values set by users in configuration files or" " inline directives." ), )(f) f = click.option( "--stdin-filename", default=None, help=( "When using stdin as an input, load the configuration as if the contents" " of stdin was in a file in the listed location." " This is useful for some editors that pass file contents from the editor" " that might not match the content on disk." ), type=click.Path(allow_dash=False), )(f) return f def lint_options(f: Callable) -> Callable: """Add lint operation options to commands via a decorator. These are cli commands that do linting, i.e. `lint` and `fix`. """ f = click.option( "-p", "--processes", type=int, default=None, help=( "The number of parallel processes to run. Positive numbers work as " "expected. Zero and negative numbers will work as number_of_cpus - " "number. e.g -1 means all cpus except one. 0 means all cpus." ), )(f) f = click.option( "--disable-progress-bar", is_flag=True, help="Disables progress bars.", )(f) f = click.option( "--persist-timing", default=None, help=( "A filename to persist the timing information for a linting run to " "in csv format for external analysis. NOTE: This feature should be " "treated as beta, and the format of the csv file may change in " "future releases without warning." ), )(f) f = click.option( "--warn-unused-ignores", is_flag=True, default=False, help="Warn about unneeded '-- noqa:' comments.", )(f) f = click.option( "--disregard-sqlfluffignores", is_flag=True, help="Perform the operation regardless of .sqlfluffignore configurations", )(f) return f def get_config( extra_config_path: Optional[str] = None, ignore_local_config: bool = False, **kwargs, ) -> FluffConfig: """Get a config object from kwargs.""" plain_output = OutputStreamFormatter.should_produce_plain_output(kwargs["nocolor"]) if kwargs.get("dialect"): try: # We're just making sure it exists at this stage. # It will be fetched properly in the linter. dialect_selector(kwargs["dialect"]) except SQLFluffUserError as err: click.echo( OutputStreamFormatter.colorize_helper( plain_output, f"Error loading dialect '{kwargs['dialect']}': {str(err)}", color=Color.red, ) ) sys.exit(EXIT_ERROR) except KeyError: click.echo( OutputStreamFormatter.colorize_helper( plain_output, f"Error: Unknown dialect '{kwargs['dialect']}'", color=Color.red, ) ) sys.exit(EXIT_ERROR) library_path = kwargs.pop("library_path", None) if not kwargs.get("warn_unused_ignores", True): # If it's present AND True, then keep it, otherwise remove this so # that we default to the root config. del kwargs["warn_unused_ignores"] # Instantiate a config object (filtering out the nulls) overrides = {k: kwargs[k] for k in kwargs if kwargs[k] is not None} if library_path is not None: # Check for a null value if library_path.lower() == "none": library_path = None # Set an explicit None value. # Set the global override overrides["library_path"] = library_path try: return FluffConfig.from_root( extra_config_path=extra_config_path, ignore_local_config=ignore_local_config, overrides=overrides, require_dialect=kwargs.pop("require_dialect", True), ) except SQLFluffUserError as err: # pragma: no cover click.echo( OutputStreamFormatter.colorize_helper( plain_output, f"Error loading config: {str(err)}", color=Color.red, ) ) sys.exit(EXIT_ERROR) def get_linter_and_formatter( cfg: FluffConfig, output_stream: Optional[OutputStream] = None, show_lint_violations: bool = False, ) -> tuple[Linter, OutputStreamFormatter]: """Get a linter object given a config.""" try: # We're just making sure it exists at this stage. # It will be fetched properly in the linter. dialect = cfg.get("dialect") if dialect: dialect_selector(dialect) except KeyError: # pragma: no cover click.echo(f"Error: Unknown dialect '{cfg.get('dialect')}'") sys.exit(EXIT_ERROR) formatter = OutputStreamFormatter( output_stream=output_stream or make_output_stream(cfg), nocolor=cfg.get("nocolor"), verbosity=cfg.get("verbose"), output_line_length=cfg.get("output_line_length"), show_lint_violations=show_lint_violations, ) return Linter(config=cfg, formatter=formatter), formatter @click.group( context_settings={"help_option_names": ["-h", "--help"]}, # NOTE: The code-block directive here looks a little odd in the CLI # but is a good balance between what appears in the CLI and what appears # in the auto generated docs for the CLI by sphinx. epilog="""Examples:\n .. code-block:: sh sqlfluff lint --dialect postgres .\n sqlfluff lint --dialect mysql --rules ST05 my_query.sql\n sqlfluff fix --dialect sqlite --rules LT10,ST05 src/queries\n sqlfluff parse --dialect duckdb --templater jinja path/my_query.sql\n\n """, ) @click.version_option() def cli() -> None: """SQLFluff is a modular SQL linter for humans.""" # noqa D403 @cli.command() @common_options def version(**kwargs) -> None: """Show the version of sqlfluff.""" c = get_config(**kwargs, require_dialect=False) if c.get("verbose") > 0: # Instantiate the linter lnt, formatter = get_linter_and_formatter(c) # Dispatch the detailed config from the linter. formatter.dispatch_config(lnt) else: # Otherwise just output the package version. click.echo(get_package_version(), color=c.get("color")) @cli.command() @common_options def rules(**kwargs) -> None: """Show the current rules in use.""" c = get_config(**kwargs, dialect="ansi") lnt, formatter = get_linter_and_formatter(c) try: click.echo(formatter.format_rules(lnt), color=c.get("color")) # No cover for clause covering poorly formatted rules. # Without creating a poorly formed plugin, these are hard to # test. except (SQLFluffUserError, AssertionError) as err: # pragma: no cover click.echo( OutputStreamFormatter.colorize_helper( c.get("color"), f"Error loading rules: {str(err)}", color=Color.red, ) ) sys.exit(EXIT_ERROR) @cli.command() @common_options def dialects(**kwargs) -> None: """Show the current dialects available.""" c = get_config(**kwargs, require_dialect=False) _, formatter = get_linter_and_formatter(c) click.echo(formatter.format_dialects(dialect_readout), color=c.get("color")) def dump_file_payload(filename: Optional[str], payload: str) -> None: """Write the output file content to stdout or file.""" # If there's a file specified to write to, write to it. if filename: with open(filename, "w") as out_file: out_file.write(payload) # Otherwise write to stdout else: click.echo(payload) @cli.command() @common_options @core_options @lint_options @click.option( "-f", "--format", "format", default="human", type=click.Choice([ft.value for ft in FormatType], case_sensitive=False), help="What format to return the lint result in (default=human).", ) @click.option( "--write-output", help=( "Optionally provide a filename to write the results to, mostly used in " "tandem with --format. NB: Setting an output file re-enables normal " "stdout logging." ), ) @click.option( "--annotation-level", default="warning", type=click.Choice(["notice", "warning", "failure", "error"], case_sensitive=False), help=( 'When format is set to "github-annotation" or "github-annotation-native", ' 'default annotation level (default="warning"). "failure" and "error" ' "are equivalent. Any rules configured only as warnings will always come " 'through with type "notice" regardless of this option.' ), ) @click.option( "--nofail", is_flag=True, help=( "If set, the exit code will always be zero, regardless of violations " "found. This is potentially useful during rollout." ), ) @click.argument("paths", nargs=-1, type=click.Path(allow_dash=True)) def lint( paths: tuple[str], format: str, write_output: Optional[str], annotation_level: str, nofail: bool, disregard_sqlfluffignores: bool, logger: Optional[logging.Logger] = None, bench: bool = False, processes: Optional[int] = None, disable_progress_bar: Optional[bool] = False, persist_timing: Optional[str] = None, extra_config_path: Optional[str] = None, ignore_local_config: bool = False, stdin_filename: Optional[str] = None, **kwargs, ) -> None: """Lint SQL files via passing a list of files or using stdin. PATH is the path to a sql file or directory to lint. This can be either a file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-') character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will be interpreted like passing the current working directory as a path argument. Linting SQL files: sqlfluff lint path/to/file.sql sqlfluff lint directory/of/sql/files Linting a file via stdin (note the lone '-' character): cat path/to/file.sql | sqlfluff lint - echo 'select col from tbl' | sqlfluff lint - """ config = get_config( extra_config_path, ignore_local_config, require_dialect=False, **kwargs ) non_human_output = (format != FormatType.human.value) or (write_output is not None) file_output = None output_stream = make_output_stream(config, format, write_output) lnt, formatter = get_linter_and_formatter(config, output_stream) verbose = config.get("verbose") progress_bar_configuration.disable_progress_bar = disable_progress_bar formatter.dispatch_config(lnt) # Set up logging. set_logging_level( verbosity=verbose, formatter=formatter, logger=logger, stderr_output=non_human_output, ) # Output the results as we go if verbose >= 1 and not non_human_output: click.echo(format_linting_result_header()) with PathAndUserErrorHandler(formatter): # add stdin if specified via lone '-' if ("-",) == paths: if stdin_filename: lnt.config = lnt.config.make_child_from_path( stdin_filename, require_dialect=False ) result = lnt.lint_string_wrapped(sys.stdin.read(), fname="stdin") else: result = lnt.lint_paths( paths, ignore_non_existent_files=False, ignore_files=not disregard_sqlfluffignores, processes=processes, # If we're just linting in the CLI, we don't need to retain the # raw file content. This allows us to reduce memory overhead. retain_files=False, ) # Output the final stats if verbose >= 1 and not non_human_output: click.echo(formatter.format_linting_stats(result, verbose=verbose)) if format == FormatType.json.value: file_output = json.dumps(result.as_records()) elif format == FormatType.yaml.value: file_output = yaml.dump( result.as_records(), sort_keys=False, allow_unicode=True, ) elif format == FormatType.none.value: file_output = "" elif format == FormatType.github_annotation.value: if annotation_level == "error": annotation_level = "failure" github_result = [] for record in result.as_records(): filepath = record["filepath"] for violation in record["violations"]: # NOTE: The output format is designed for this GitHub action: # https://github.com/yuzutech/annotations-action # It is similar, but not identical, to the native GitHub format: # https://docs.github.com/en/rest/reference/checks#annotations-items github_result.append( { "file": filepath, "start_line": violation["start_line_no"], "start_column": violation["start_line_pos"], # NOTE: There should always be a start, there _may_ not be an # end, so in that case we default back to just reusing # the start. "end_line": violation.get( "end_line_no", violation["start_line_no"] ), "end_column": violation.get( "end_line_pos", violation["start_line_pos"] ), "title": "SQLFluff", "message": f"{violation['code']}: {violation['description']}", # The annotation_level is configurable, but will only apply # to any SQLFluff rules which have not been downgraded # to warnings using the `warnings` config value. Any which have # been set to warn rather than fail will always be given the # `notice` annotation level in the serialised result. "annotation_level": ( annotation_level if not violation["warning"] else "notice" ), } ) file_output = json.dumps(github_result) elif format == FormatType.github_annotation_native.value: if annotation_level == "failure": annotation_level = "error" github_result_native = [] for record in result.as_records(): filepath = record["filepath"] # Add a group, titled with the filename if record["violations"]: github_result_native.append(f"::group::{filepath}") for violation in record["violations"]: # NOTE: The output format is designed for GitHub action: # https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#setting-a-notice-message # The annotation_level is configurable, but will only apply # to any SQLFluff rules which have not been downgraded # to warnings using the `warnings` config value. Any which have # been set to warn rather than fail will always be given the # `notice` annotation level in the serialised result. line = "::notice " if violation["warning"] else f"::{annotation_level} " line += "title=SQLFluff," line += f"file={filepath}," line += f"line={violation['start_line_no']}," line += f"col={violation['start_line_pos']}" if "end_line_no" in violation: line += f",endLine={violation['end_line_no']}" if "end_line_pos" in violation: line += f",endColumn={violation['end_line_pos']}" line += "::" line += f"{violation['code']}: {violation['description']}" if violation["name"]: line += f" [{violation['name']}]" github_result_native.append(line) # Close the group if record["violations"]: github_result_native.append("::endgroup::") file_output = "\n".join(github_result_native) if file_output: dump_file_payload(write_output, file_output) if persist_timing: result.persist_timing_records(persist_timing) output_stream.close() if bench: click.echo("==== overall timings ====") click.echo(formatter.cli_table([("Clock time", result.total_time)])) timing_summary = result.timing_summary() for step in timing_summary: click.echo(f"=== {step} ===") click.echo( formatter.cli_table(timing_summary[step].items(), cols=3, col_width=20) ) if not nofail: if not non_human_output: formatter.completion_message() exit_code = result.stats(EXIT_FAIL, EXIT_SUCCESS)["exit code"] assert isinstance(exit_code, int), "result.stats error code must be integer." sys.exit(exit_code) else: sys.exit(EXIT_SUCCESS) def do_fixes( result: LintingResult, formatter: Optional[OutputStreamFormatter] = None, fixed_file_suffix: str = "", ) -> bool: """Actually do the fixes.""" if formatter and formatter.verbosity >= 0: click.echo("Persisting Changes...") res = result.persist_changes( formatter=formatter, fixed_file_suffix=fixed_file_suffix ) if all(res.values()): if formatter and formatter.verbosity >= 0: click.echo("Done. Please check your files to confirm.") return True # If some failed then return false click.echo( "Done. Some operations failed. Please check your files to confirm." ) # pragma: no cover click.echo( "Some errors cannot be fixed or there is another error blocking it." ) # pragma: no cover return False # pragma: no cover def _handle_unparsable( fix_even_unparsable: bool, initial_exit_code: int, linting_result: LintingResult, formatter: OutputStreamFormatter, ): """Handles the treatment of files with templating and parsing issues. By default, any files with templating or parsing errors shouldn't have fixes attempted - because we can't guarantee the validity of the fixes. This method returns 1 if there are any files with templating or parse errors after filtering, else 0 (Intended as a process exit code). If `fix_even_unparsable` is set then it just returns whatever the pre-existing exit code was. NOTE: This method mutates the LintingResult so that future use of the object has updated violation counts which can be used for other exit code calcs. """ if fix_even_unparsable: # If we're fixing even when unparsable, don't perform any filtering. return initial_exit_code total_errors, num_filtered_errors = linting_result.count_tmp_prs_errors() linting_result.discard_fixes_for_lint_errors_in_files_with_tmp_or_prs_errors() formatter.print_out_residual_error_counts( total_errors, num_filtered_errors, force_stderr=True ) return EXIT_FAIL if num_filtered_errors else EXIT_SUCCESS def _stdin_fix( linter: Linter, formatter: OutputStreamFormatter, fix_even_unparsable: bool ) -> None: """Handle fixing from stdin.""" exit_code = EXIT_SUCCESS stdin = sys.stdin.read() result = linter.lint_string_wrapped(stdin, fname="stdin", fix=True) templater_error = result.num_violations(types=SQLTemplaterError) > 0 unfixable_error = result.num_violations(types=SQLLintError, fixable=False) > 0 exit_code = _handle_unparsable(fix_even_unparsable, exit_code, result, formatter) if result.num_violations(types=SQLLintError, fixable=True) > 0: stdout = result.paths[0].files[0].fix_string()[0] else: stdout = stdin if templater_error: click.echo( formatter.colorize( "Fix aborted due to unparsable template variables.", Color.red, ), err=True, ) click.echo( formatter.colorize( "Use --FIX-EVEN-UNPARSABLE' to attempt to fix the SQL anyway.", Color.red, ), err=True, ) if unfixable_error: click.echo( formatter.colorize("Unfixable violations detected.", Color.red), err=True, ) click.echo(stdout, nl=False) sys.exit(EXIT_FAIL if templater_error or unfixable_error else exit_code) def _paths_fix( linter: Linter, formatter: OutputStreamFormatter, paths, processes, fix_even_unparsable, fixed_suffix, bench, show_lint_violations, check: bool = False, persist_timing: Optional[str] = None, ignore_files: bool = True, ) -> None: """Handle fixing from paths.""" # Lint the paths (not with the fix argument at this stage), outputting as we go. if formatter.verbosity >= 0: click.echo("==== finding fixable violations ====") exit_code = EXIT_SUCCESS with PathAndUserErrorHandler(formatter): result: LintingResult = linter.lint_paths( paths, fix=True, ignore_non_existent_files=False, ignore_files=ignore_files, processes=processes, # If --check is set, then don't apply any fixes until the end. apply_fixes=not check, fixed_file_suffix=fixed_suffix, fix_even_unparsable=fix_even_unparsable, # If --check is not set, then don't apply any fixes until the end. # NOTE: This should enable us to limit the memory overhead of keeping # a large parsed project in memory unless necessary. retain_files=check, ) exit_code = _handle_unparsable(fix_even_unparsable, exit_code, result, formatter) # NB: We filter to linting violations here, because they're # the only ones which can be potentially fixed. violation_records = result.as_records() num_fixable = sum( # Coerce to boolean so that we effectively count the ones which have fixes. bool(v.get("fixes", [])) for rec in violation_records for v in rec["violations"] ) if num_fixable > 0: if check and formatter.verbosity >= 0: click.echo("==== fixing violations ====") click.echo(f"{num_fixable} fixable linting violations found") if check: click.echo( "Are you sure you wish to attempt to fix these? [Y/n] ", nl=False ) c = click.getchar().lower() click.echo("...") if c in ("y", "\r", "\n"): if formatter.verbosity >= 0: click.echo("Attempting fixes...") success = do_fixes( result, formatter, fixed_file_suffix=fixed_suffix, ) if not success: sys.exit(EXIT_FAIL) # pragma: no cover else: formatter.completion_message() elif c == "n": click.echo("Aborting...") exit_code = EXIT_FAIL else: # pragma: no cover click.echo("Invalid input, please enter 'Y' or 'N'") click.echo("Aborting...") exit_code = EXIT_FAIL else: if formatter.verbosity >= 0: click.echo("==== no fixable linting violations found ====") formatter.completion_message() num_unfixable = sum(p.num_unfixable_lint_errors for p in result.paths) if num_unfixable > 0 and formatter.verbosity >= 0: click.echo(" [{} unfixable linting violations found]".format(num_unfixable)) exit_code = max(exit_code, EXIT_FAIL) if bench: click.echo("==== overall timings ====") click.echo(formatter.cli_table([("Clock time", result.total_time)])) timing_summary = result.timing_summary() for step in timing_summary: click.echo(f"=== {step} ===") click.echo( formatter.cli_table(timing_summary[step].items(), cols=3, col_width=20) ) if show_lint_violations: click.echo("==== lint for unfixable violations ====") for record in result.as_records(): # Non fixable linting errors _have_ a `fixes` value, but it's an empty list. non_fixable = [ v for v in record["violations"] if v.get("fixes", None) == [] ] click.echo( formatter.format_filename(record["filepath"], success=(not non_fixable)) ) for violation in non_fixable: click.echo(formatter.format_violation(violation)) if persist_timing: result.persist_timing_records(persist_timing) sys.exit(exit_code) @cli.command() @common_options @core_options @lint_options @click.option( "-f", "--force", is_flag=True, help=( "[DEPRECATED - From 3.0 onward this is the default behaviour] " "Apply fixes will also be applied file by file, during the " "linting process, rather than waiting until all files are " "linted before fixing." ), ) @click.option( "--check", is_flag=True, help=( "Analyse all files and ask for confirmation before applying " "any fixes. Fixes will be applied all together at the end of " "the operation." ), ) @click.option( "-q", "--quiet", is_flag=True, help=( "Reduces the amount of output to stdout to a minimal level. " "This is effectively the opposite of -v. NOTE: It will only " "take effect if -f/--force is also set." ), ) @click.option( "-x", "--fixed-suffix", default=None, help="An optional suffix to add to fixed files.", ) @click.option( "--FIX-EVEN-UNPARSABLE", is_flag=True, default=None, help=( "Enables fixing of files that have templating or parse errors. " "Note that the similar-sounding '--ignore' or 'noqa' features merely " "prevent errors from being *displayed*. For safety reasons, the 'fix'" "command will not make any fixes in files that have templating or parse " "errors unless '--FIX-EVEN-UNPARSABLE' is enabled on the command line" "or in the .sqlfluff config file." ), ) @click.option( "--show-lint-violations", is_flag=True, help="Show lint violations", ) @click.argument("paths", nargs=-1, type=click.Path(allow_dash=True)) def fix( force: bool, paths: tuple[str], disregard_sqlfluffignores: bool, check: bool = False, bench: bool = False, quiet: bool = False, fixed_suffix: str = "", logger: Optional[logging.Logger] = None, processes: Optional[int] = None, disable_progress_bar: Optional[bool] = False, persist_timing: Optional[str] = None, extra_config_path: Optional[str] = None, ignore_local_config: bool = False, show_lint_violations: bool = False, stdin_filename: Optional[str] = None, **kwargs, ) -> None: """Fix SQL files. PATH is the path to a sql file or directory to lint. This can be either a file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-') character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will be interpreted like passing the current working directory as a path argument. """ # some quick checks fixing_stdin = ("-",) == paths if quiet: if kwargs["verbose"]: click.echo( "ERROR: The --quiet flag can only be used if --verbose is not set.", ) sys.exit(EXIT_ERROR) kwargs["verbose"] = -1 config = get_config( extra_config_path, ignore_local_config, require_dialect=False, **kwargs ) fix_even_unparsable = config.get("fix_even_unparsable") output_stream = make_output_stream( config, None, os.devnull if fixing_stdin else None ) lnt, formatter = get_linter_and_formatter( config, output_stream, show_lint_violations ) verbose = config.get("verbose") progress_bar_configuration.disable_progress_bar = disable_progress_bar formatter.dispatch_config(lnt) # Set up logging. set_logging_level( verbosity=verbose, formatter=formatter, logger=logger, stderr_output=fixing_stdin, ) if force: click.echo( formatter.colorize( "The -f/--force option is deprecated as it is now the " "default behaviour.", Color.red, ), err=True, ) with PathAndUserErrorHandler(formatter): # handle stdin case. should output formatted sql to stdout and nothing else. if fixing_stdin: if stdin_filename: lnt.config = lnt.config.make_child_from_path( stdin_filename, require_dialect=False ) _stdin_fix(lnt, formatter, fix_even_unparsable) else: _paths_fix( lnt, formatter, paths, processes, fix_even_unparsable, fixed_suffix, bench, show_lint_violations, check=check, persist_timing=persist_timing, ignore_files=not disregard_sqlfluffignores, ) @cli.command(name="format") @common_options @core_options @lint_options @click.option( "-x", "--fixed-suffix", default=None, help="An optional suffix to add to fixed files.", ) @click.argument("paths", nargs=-1, type=click.Path(allow_dash=True)) def cli_format( paths: tuple[str], disregard_sqlfluffignores: bool, bench: bool = False, fixed_suffix: str = "", logger: Optional[logging.Logger] = None, processes: Optional[int] = None, disable_progress_bar: Optional[bool] = False, persist_timing: Optional[str] = None, extra_config_path: Optional[str] = None, ignore_local_config: bool = False, stdin_filename: Optional[str] = None, **kwargs, ) -> None: """Autoformat SQL files. This effectively force applies `sqlfluff fix` with a known subset of fairly stable rules. Enabled rules are ignored, but rule exclusions (via CLI) or config are still respected. PATH is the path to a sql file or directory to lint. This can be either a file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-') character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will be interpreted like passing the current working directory as a path argument. """ # some quick checks fixing_stdin = ("-",) == paths if kwargs.get("rules"): click.echo( "Specifying rules is not supported for sqlfluff format.", ) sys.exit(EXIT_ERROR) # Override rules for sqlfluff format kwargs["rules"] = ( # All of the capitalisation rules "capitalisation," # All of the layout rules "layout," # Safe rules from other groups "ambiguous.union," "convention.not_equal," "convention.coalesce," "convention.select_trailing_comma," "convention.is_null," "jinja.padding," "structure.distinct," ) config = get_config( extra_config_path, ignore_local_config, require_dialect=False, **kwargs ) output_stream = make_output_stream( config, None, os.devnull if fixing_stdin else None ) lnt, formatter = get_linter_and_formatter(config, output_stream) verbose = config.get("verbose") progress_bar_configuration.disable_progress_bar = disable_progress_bar formatter.dispatch_config(lnt) # Set up logging. set_logging_level( verbosity=verbose, formatter=formatter, logger=logger, stderr_output=fixing_stdin, ) with PathAndUserErrorHandler(formatter): # handle stdin case. should output formatted sql to stdout and nothing else. if fixing_stdin: if stdin_filename: lnt.config = lnt.config.make_child_from_path( stdin_filename, require_dialect=False ) _stdin_fix(lnt, formatter, fix_even_unparsable=False) else: _paths_fix( lnt, formatter, paths, processes, fix_even_unparsable=False, fixed_suffix=fixed_suffix, bench=bench, show_lint_violations=False, persist_timing=persist_timing, ignore_files=not disregard_sqlfluffignores, ) def quoted_presenter(dumper, data): """Re-presenter which always double quotes string values needing escapes.""" if "\n" in data or "\t" in data or "'" in data: return dumper.represent_scalar("tag:yaml.org,2002:str", data, style='"') else: return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="") @cli.command() @common_options @core_options @click.argument("path", nargs=1, type=click.Path(allow_dash=True)) @click.option( "-c", "--code-only", is_flag=True, help="Output only the code elements of the parse tree.", ) @click.option( "-m", "--include-meta", is_flag=True, help=( "Include meta segments (indents, dedents and placeholders) in the output. " "This only applies when outputting json or yaml." ), ) @click.option( "-f", "--format", default=FormatType.human.value, type=click.Choice( [ FormatType.human.value, FormatType.json.value, FormatType.yaml.value, FormatType.none.value, ], case_sensitive=False, ), help="What format to return the parse result in.", ) @click.option( "--write-output", help=( "Optionally provide a filename to write the results to, mostly used in " "tandem with --format. NB: Setting an output file re-enables normal " "stdout logging." ), ) @click.option( "--parse-statistics", is_flag=True, help=( "Set this flag to enabled detailed debugging readout " "on the use of terminators in the parser." ), ) @click.option( "--nofail", is_flag=True, help=( "If set, the exit code will always be zero, regardless of violations " "found. This is potentially useful during rollout." ), ) def parse( path: str, code_only: bool, include_meta: bool, format: str, write_output: Optional[str], bench: bool, nofail: bool, logger: Optional[logging.Logger] = None, extra_config_path: Optional[str] = None, ignore_local_config: bool = False, parse_statistics: bool = False, stdin_filename: Optional[str] = None, **kwargs, ) -> None: """Parse SQL files and just spit out the result. PATH is the path to a sql file or directory to lint. This can be either a file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-') character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will be interpreted like passing the current working directory as a path argument. """ c = get_config( extra_config_path, ignore_local_config, require_dialect=False, **kwargs ) # We don't want anything else to be logged if we want json or yaml output # unless we're writing to a file. non_human_output = (format != FormatType.human.value) or (write_output is not None) output_stream = make_output_stream(c, format, write_output) lnt, formatter = get_linter_and_formatter(c, output_stream) verbose = c.get("verbose") progress_bar_configuration.disable_progress_bar = True formatter.dispatch_config(lnt) # Set up logging. set_logging_level( verbosity=verbose, formatter=formatter, logger=logger, stderr_output=non_human_output, ) t0 = time.monotonic() # handle stdin if specified via lone '-' with PathAndUserErrorHandler(formatter): if "-" == path: file_config = lnt.config if stdin_filename: file_config = file_config.make_child_from_path( stdin_filename, require_dialect=False ) parsed_strings = [ lnt.parse_string( sys.stdin.read(), "stdin", config=file_config, parse_statistics=parse_statistics, ), ] else: # A single path must be specified for this command parsed_strings = list( lnt.parse_path( path=path, parse_statistics=parse_statistics, ) ) total_time = time.monotonic() - t0 violations_count = 0 # iterative print for human readout if format == FormatType.human.value: violations_count = formatter.print_out_violations_and_timing( output_stream, bench, code_only, total_time, verbose, parsed_strings ) else: parsed_strings_dict = [] for parsed_string in parsed_strings: # TODO: Multiple variants aren't yet supported here in the non-human # output of the parse command. root_variant = parsed_string.root_variant() # Updating violation count ensures the correct return code below. violations_count += len(parsed_string.violations) if root_variant: assert root_variant.tree segments = root_variant.tree.as_record( code_only=code_only, show_raw=True, include_meta=include_meta ) else: # Parsing failed - return null for segments. segments = None parsed_strings_dict.append( {"filepath": parsed_string.fname, "segments": segments} ) if format == FormatType.yaml.value: # For yaml dumping always dump double quoted strings if they contain # tabs or newlines. yaml.add_representer(str, quoted_presenter) file_output = yaml.dump( parsed_strings_dict, sort_keys=False, allow_unicode=True, ) elif format == FormatType.json.value: file_output = json.dumps(parsed_strings_dict) elif format == FormatType.none.value: file_output = "" # Dump the output to stdout or to file as appropriate. dump_file_payload(write_output, file_output) if violations_count > 0 and not nofail: sys.exit(EXIT_FAIL) # pragma: no cover else: sys.exit(EXIT_SUCCESS) @cli.command() @common_options @core_options @click.argument("path", nargs=1, type=click.Path(allow_dash=True)) def render( path: str, bench: bool, logger: Optional[logging.Logger] = None, extra_config_path: Optional[str] = None, ignore_local_config: bool = False, **kwargs, ) -> None: """Render SQL files and just spit out the result. PATH is the path to a sql file. This should be either a single file file ('path/to/file.sql') or a single ('-') character to indicate reading from *stdin*. """ c = get_config( extra_config_path, ignore_local_config, require_dialect=False, **kwargs ) # We don't want anything else to be logged if we want json or yaml output # unless we're writing to a file. output_stream = make_output_stream(c, None, None) lnt, formatter = get_linter_and_formatter(c, output_stream) verbose = c.get("verbose") progress_bar_configuration.disable_progress_bar = True formatter.dispatch_config(lnt) # Set up logging. set_logging_level( verbosity=verbose, formatter=formatter, logger=logger, stderr_output=False, ) # handle stdin if specified via lone '-' with PathAndUserErrorHandler(formatter): if "-" == path: raw_sql = sys.stdin.read() fname = "stdin" file_config = lnt.config else: raw_sql, file_config, _ = lnt.load_raw_file_and_config(path, lnt.config) fname = path # Get file specific config file_config.process_raw_file_for_config(raw_sql, fname) rendered = lnt.render_string(raw_sql, fname, file_config, "utf8") if rendered.templater_violations: for v in rendered.templater_violations: click.echo(formatter.format_violation(v)) sys.exit(EXIT_FAIL) else: _num_variants = len(rendered.templated_variants) if _num_variants > 1: click.echo( formatter.colorize( f"SQLFluff rendered {_num_variants} variants of this file", Color.blue, ) ) for idx, variant in enumerate(rendered.templated_variants): click.echo( formatter.colorize( f"Variant {idx + 1}:", Color.blue, ) ) click.echo(variant) else: # No preamble if there's only one. click.echo(rendered.templated_variants[0]) sys.exit(EXIT_SUCCESS) # This "__main__" handler allows invoking SQLFluff using "python -m", which # simplifies the use of cProfile, e.g.: # python -m cProfile -s cumtime -m sqlfluff.cli.commands lint slow_file.sql if __name__ == "__main__": cli.main(sys.argv[1:]) # pragma: no cover sqlfluff-3.4.2/src/sqlfluff/cli/formatters.py000066400000000000000000000637541503426445100213050ustar00rootroot00000000000000"""Defines the formatters for the CLI.""" import os import sys from io import StringIO from typing import Optional, Union import click from colorama import Style from sqlfluff.cli import EXIT_FAIL, EXIT_SUCCESS from sqlfluff.cli.helpers import ( get_package_version, get_python_implementation, get_python_version, pad_line, wrap_field, ) from sqlfluff.cli.outputstream import OutputStream from sqlfluff.core import FluffConfig, Linter, SQLBaseError, TimingSummary from sqlfluff.core.linter import FormatterInterface, LintedFile, ParsedString from sqlfluff.core.types import Color def split_string_on_spaces(s: str, line_length: int = 100) -> list[str]: """Split a string into lines based on whitespace. For short strings the functionality is trivial. >>> split_string_on_spaces("abc") ['abc'] For longer sections it will split at an appropriate point. >>> split_string_on_spaces("abc def ghi", line_length=7) ['abc def', 'ghi'] After splitting, multi-space sections should be intact. >>> split_string_on_spaces("a ' ' b c d e f", line_length=11) ["a ' ' b c", 'd e f'] """ line_buff = [] str_buff = "" # NOTE: We *specify* the single space split, so that on reconstruction # we can accurately represent multi space strings. for token in s.split(" "): # Can we put this token on this line without going over? if str_buff: if len(str_buff) + len(token) > line_length: line_buff.append(str_buff) str_buff = token else: str_buff += " " + token else: # In the case that the buffer is already empty, add it without checking, # otherwise there might be things that we might never. str_buff = token # If we have left over buff, add it in if str_buff: line_buff.append(str_buff) return line_buff def format_linting_result_header() -> str: """Format the header of a linting result output.""" text_buffer = StringIO() text_buffer.write("==== readout ====\n") return text_buffer.getvalue() class OutputStreamFormatter(FormatterInterface): """Formatter which writes to an OutputStream. On instantiation, this formatter accepts a function to dispatch messages. Each public method accepts an object or data in a common format, with this class handling the formatting and output. This class is designed to be subclassed if we eventually want to provide other methods of surfacing output. Args: output_stream: Output is sent here verbosity: Specifies how verbose output should be filter_empty: If True, empty messages will not be dispatched output_line_length: Maximum line length """ def __init__( self, output_stream: OutputStream, nocolor: bool, verbosity: int = 0, filter_empty: bool = True, output_line_length: int = 80, show_lint_violations: bool = False, ): self._output_stream = output_stream self.plain_output = self.should_produce_plain_output(nocolor) self.verbosity = verbosity self._filter_empty = filter_empty self.output_line_length = output_line_length self.show_lint_violations = show_lint_violations @staticmethod def should_produce_plain_output(nocolor: bool) -> bool: """Returns True if text output should be plain (not colored).""" # If `--color` is specified (nocolor is False), we ignore `NO_COLOR` env_nocolor = bool(os.getenv("NO_COLOR")) and nocolor is not False return nocolor or not sys.stdout.isatty() or env_nocolor def _dispatch(self, s: str) -> None: """Dispatch a string to the callback. This method is designed as a point for subclassing. """ # The strip here is to filter out any empty messages if (not self._filter_empty) or s.strip(" \n\t"): self._output_stream.write(s) def _format_config(self, linter: Linter) -> str: """Format the config of a `Linter`.""" text_buffer = StringIO() # Only show version information if verbosity is high enough if self.verbosity > 0: text_buffer.write("==== sqlfluff ====\n") config_content = [ ("sqlfluff", get_package_version()), ("python", get_python_version()), ("implementation", get_python_implementation()), ("verbosity", self.verbosity), ] if linter.dialect: config_content.append(("dialect", linter.dialect.name)) config_content += linter.templater.config_pairs() text_buffer.write( self.cli_table(config_content, col_width=30, max_label_width=15) ) text_buffer.write("\n") if linter.config.get("rule_allowlist"): text_buffer.write( self.cli_table( [("rules", ", ".join(linter.config.get("rule_allowlist")))], col_width=41, ) ) if self.verbosity > 1: text_buffer.write("\n== Raw Config:\n") text_buffer.write(self.format_config_vals(linter.config.iter_vals())) return text_buffer.getvalue() def dispatch_config(self, linter: Linter) -> None: """Dispatch configuration output appropriately.""" self._dispatch(self._format_config(linter)) def dispatch_persist_filename(self, filename: str, result: str) -> None: """Dispatch filenames during a persist operation.""" # Only show the skip records at higher levels of verbosity if self.verbosity >= 2 or result != "SKIP": self._dispatch(self.format_filename(filename=filename, success=result)) def _format_path(self, path: str) -> str: """Format paths.""" return f"=== [ path: {self.colorize(path, Color.light)} ] ===\n" def dispatch_path(self, path: str) -> None: """Dispatch paths for display.""" if self.verbosity > 0: self._dispatch(self._format_path(path)) def dispatch_template_header( self, fname: str, linter_config: FluffConfig, file_config: Optional[FluffConfig] ) -> None: """Dispatch the header displayed before templating.""" if self.verbosity > 1: self._dispatch(self.format_filename(filename=fname, success="TEMPLATING")) # This is where we output config diffs if they exist. if file_config: # Only output config diffs if there is a config to diff to. config_diff = file_config.diff_to(linter_config) if config_diff: # pragma: no cover self._dispatch(" Config Diff:") self._dispatch( self.format_config_vals( linter_config.iter_vals(cfg=config_diff) ) ) def dispatch_parse_header(self, fname: str) -> None: """Dispatch the header displayed before parsing.""" if self.verbosity > 1: self._dispatch(self.format_filename(filename=fname, success="PARSING")) def dispatch_lint_header(self, fname: str, rules: list[str]) -> None: """Dispatch the header displayed before linting.""" if self.verbosity > 1: self._dispatch( self.format_filename( filename=fname, success=f"LINTING ({', '.join(rules)})" ) ) def dispatch_compilation_header(self, templater: str, message: str) -> None: """Dispatch the header displayed before linting.""" self._dispatch( f"=== [{self.colorize(templater, Color.light)}] {message}" ) # pragma: no cover def dispatch_processing_header(self, processes: int) -> None: """Dispatch the header displayed before linting.""" if self.verbosity > 0: self._dispatch( # pragma: no cover f"{self.colorize('effective configured processes: ', Color.light)} " f"{processes}" ) def dispatch_dialect_warning(self, dialect: str) -> None: """Dispatch a warning for dialects.""" self._dispatch(self.format_dialect_warning(dialect)) # pragma: no cover def _format_file_violations( self, fname: str, violations: list[SQLBaseError] ) -> str: """Format a set of violations in a `LintingResult`.""" text_buffer = StringIO() # Success is based on there being no fails, but we still # want to show the results if there are warnings (even # if no fails). fails = sum( int(not violation.ignore and not violation.warning) for violation in violations ) warns = sum(int(violation.warning) for violation in violations) show = fails + warns > 0 # Only print the filename if it's either a failure or verbosity > 1 if self.verbosity > 0 or show: text_buffer.write(self.format_filename(fname, success=fails == 0)) text_buffer.write("\n") # If we have violations, print them if show: # sort by position in file (using line number and position) s = sorted(violations, key=lambda v: (v.line_no, v.line_pos)) for violation in s: text_buffer.write( self.format_violation( violation, max_line_length=self.output_line_length ) ) text_buffer.write("\n") str_buffer = text_buffer.getvalue() # Remove the trailing newline if there is one if len(str_buffer) > 0 and str_buffer[-1] == "\n": str_buffer = str_buffer[:-1] return str_buffer def dispatch_file_violations( self, fname: str, linted_file: LintedFile, only_fixable: bool, warn_unused_ignores: bool, ) -> None: """Dispatch any violations found in a file.""" if self.verbosity < 0: return s = self._format_file_violations( fname, linted_file.get_violations( fixable=( True if bool(only_fixable and not self.show_lint_violations) else None ), filter_warning=False, warn_unused_ignores=warn_unused_ignores, ), ) self._dispatch(s) def colorize(self, s: str, color: Optional[Color] = None) -> str: """Optionally use ANSI colour codes to colour a string.""" return self.colorize_helper(self.plain_output, s, color) @staticmethod def colorize_helper( plain_output: bool, s: str, color: Optional[Color] = None ) -> str: """Static version of colorize() method.""" if not color or plain_output: return s else: return f"{color.value}{s}{Style.RESET_ALL}" def cli_table_row( self, fields: list[tuple[str, str]], col_width, max_label_width=10, sep_char=": ", divider_char=" ", label_color=Color.light, val_align="right", ) -> str: """Make a row of a CLI table, using wrapped values.""" # Do some intel first cols = len(fields) last_col_idx = cols - 1 wrapped_fields = [ wrap_field( field[0], field[1], width=col_width, max_label_width=max_label_width, sep_char=sep_char, ) for field in fields ] max_lines = max(fld["lines"] for fld in wrapped_fields) last_line_idx = max_lines - 1 # Make some text buff = StringIO() for line_idx in range(max_lines): for col_idx in range(cols): # Assume we pad labels left and values right fld = wrapped_fields[col_idx] ll = fld["label_list"] vl = fld["val_list"] buff.write( self.colorize( pad_line( ll[line_idx] if line_idx < len(ll) else "", width=fld["label_width"], ), color=label_color, ) ) if line_idx == 0: buff.write(sep_char) else: buff.write(" " * len(sep_char)) buff.write( pad_line( vl[line_idx] if line_idx < len(vl) else "", width=fld["val_width"], align=val_align, ) ) if col_idx != last_col_idx: buff.write(divider_char) elif line_idx != last_line_idx: buff.write("\n") return buff.getvalue() def cli_table( self, fields, col_width=20, cols=2, divider_char=" ", sep_char=": ", label_color=Color.light, float_format="{0:.2f}", max_label_width=10, val_align="right", ) -> str: """Make a crude ascii table. Assume that `fields` is an iterable of (label, value) pairs. """ # First format all the values into strings formatted_fields = [] for label, value in fields: label = str(label) if isinstance(value, float): value = float_format.format(value) else: value = str(value) formatted_fields.append((label, value)) # Set up a buffer to hold the whole table buff = StringIO() while len(formatted_fields) > 0: row_buff: list[tuple[str, str]] = [] while len(row_buff) < cols and len(formatted_fields) > 0: row_buff.append(formatted_fields.pop(0)) buff.write( self.cli_table_row( row_buff, col_width=col_width, max_label_width=max_label_width, sep_char=sep_char, divider_char=divider_char, label_color=label_color, val_align=val_align, ) ) if len(formatted_fields) > 0: buff.write("\n") return buff.getvalue() def format_filename( self, filename: str, success: Union[str, bool] = False, success_text: str = "PASS", ) -> str: """Format filenames.""" if isinstance(success, str): status_string = success else: status_string = success_text if success else "FAIL" if status_string in ("PASS", "FIXED", success_text): status_string = self.colorize(status_string, Color.green) elif status_string in ("FAIL", "ERROR"): status_string = self.colorize(status_string, Color.red) return f"== [{self.colorize(filename, Color.light)}] {status_string}" def format_violation( self, violation: Union[SQLBaseError, dict], max_line_length: int = 90, ) -> str: """Format a violation. NOTE: This method accepts both SQLBaseError objects and the serialised dict representation. If the former is passed, then the conversion is done within the method so we can work with a common representation. """ if isinstance(violation, dict): v_dict: dict = violation elif isinstance(violation, SQLBaseError): v_dict = violation.to_dict() elif not isinstance(violation, dict): # pragma: no cover raise ValueError(f"Unexpected violation format: {violation}") desc: str = v_dict["description"] code: str = v_dict["code"] name: str = v_dict["name"] line_no: int = v_dict["start_line_no"] line_pos: int = v_dict["start_line_pos"] warning: bool = v_dict["warning"] line_elem = " -" if line_no is None else f"{line_no:4d}" pos_elem = " -" if line_pos is None else f"{line_pos:4d}" if warning: desc = "WARNING: " + desc # pragma: no cover # If the rule has a name, add that the description. if name: desc += f" [{self.colorize(name, Color.light)}]" split_desc = split_string_on_spaces(desc, line_length=max_line_length - 25) out_buff = "" # Grey out the violation if we're ignoring or warning it. section_color: Color if warning: section_color = Color.light else: section_color = Color.blue for idx, line in enumerate(split_desc): if idx == 0: rule_code = code.rjust(4) if "PRS" in rule_code: section_color = Color.red out_buff += self.colorize( f"L:{line_elem} | P:{pos_elem} | {rule_code} | ", section_color, ) else: out_buff += ( "\n" + (" " * 23) + self.colorize( "| ", section_color, ) ) out_buff += line return out_buff def format_linting_stats(self, result, verbose=0) -> str: """Format a set of stats given a `LintingResult`.""" text_buffer = StringIO() all_stats = result.stats(EXIT_FAIL, EXIT_SUCCESS) text_buffer.write("==== summary ====\n") if verbose >= 2: output_fields = [ "files", "violations", "clean files", "unclean files", "avg per file", "unclean rate", "status", ] special_formats = {"unclean rate": "{0:.0%}"} else: output_fields = ["violations", "status"] special_formats = {} # Generate content tuples, applying special formats for some fields summary_content = [ ( key, ( special_formats[key].format(all_stats[key]) if key in special_formats else all_stats[key] ), ) for key in output_fields ] # Render it all as a table text_buffer.write(self.cli_table(summary_content, max_label_width=14)) return text_buffer.getvalue() def format_config_vals(self, config_vals) -> str: """Format an iterable of config values from a config object.""" text_buffer = StringIO() for i, k, v in config_vals: val = "" if v is None else str(v) text_buffer.write( (" " * i) + self.colorize(pad_line(str(k) + ":", 20, "left"), color=Color.light) + pad_line(val, 20, "left") + "\n" ) return text_buffer.getvalue() def _format_rule_description(self, rule) -> str: """Format individual rule. This is a helper function in .format_rules(). """ if rule.name: name = self.colorize(rule.name, Color.blue) description = f"[{name}] {rule.description}" else: description = rule.description if rule.groups: groups = self.colorize(", ".join(rule.groups), Color.light) description += f"\ngroups: {groups}" if rule.aliases: aliases = self.colorize(", ".join(rule.aliases), Color.light) description += f" aliases: {aliases}" return description def format_rules(self, linter: Linter, verbose: int = 0) -> str: """Format the a set of rules given a `Linter`.""" text_buffer = StringIO() text_buffer.write("==== sqlfluff - rules ====\n") text_buffer.write( self.cli_table( [ ( t.code, self._format_rule_description(t), ) for t in linter.rule_tuples() ], col_width=80, cols=1, label_color=Color.blue, val_align="left", ) ) return text_buffer.getvalue() def format_dialects(self, dialect_readout, verbose=0) -> str: """Format the dialects yielded by `dialect_readout`.""" text_buffer = StringIO() text_buffer.write("==== sqlfluff - dialects ====\n") readouts = [ ( dialect.label, f"{dialect.name} dialect [inherits from '{dialect.inherits_from}']", ) for dialect in dialect_readout() ] text_buffer.write( self.cli_table( readouts, col_width=60, cols=1, label_color=Color.blue, val_align="right", ) ) return text_buffer.getvalue() def format_dialect_warning(self, dialect: str) -> str: """Output a warning for parsing errors.""" return self.colorize( ( "WARNING: Parsing errors found and dialect is set to " f"'{dialect}'. Have you configured your dialect correctly?" ), Color.light, ) def print_out_residual_error_counts( self, total_errors: int, num_filtered_errors: int, force_stderr: bool = False ) -> None: """Output the residual error totals for the file. Args: total_errors (int): The total number of templating & parsing errors. num_filtered_errors (int): The number of templating & parsing errors which remain after any noqa and filters applied. force_stderr (bool): Whether to force the output onto stderr. By default the output is on stdout if there are no errors, otherwise stderr. """ if total_errors and not self.show_lint_violations: click.echo( message=self.colorize( f" [{total_errors} templating/parsing errors found]", Color.red ), color=self.plain_output, err=True, ) if num_filtered_errors < total_errors: color = Color.red if num_filtered_errors else Color.green click.echo( message=self.colorize( f" [{num_filtered_errors} templating/parsing errors " f'remaining after "ignore" & "warning"]', color=color, ), color=not self.plain_output, err=force_stderr or num_filtered_errors > 0, ) def print_out_violations_and_timing( self, output_stream: OutputStream, bench: bool, code_only: bool, total_time: float, verbose: int, parsed_strings: list[ParsedString], ) -> int: """Used by human formatting during the `sqlfluff parse` command.""" violations_count = 0 timing = TimingSummary() for parsed_string in parsed_strings: timing.add(parsed_string.time_dict) num_variants = len(parsed_string.parsed_variants) root_variant = parsed_string.root_variant() if not root_variant: # TODO: Make this prettier output_stream.write( self.colorize("...Failed to Parse...", Color.red) ) # pragma: no cover elif num_variants == 1: # Backward compatible single parse assert root_variant.tree output_stream.write(root_variant.tree.stringify(code_only=code_only)) else: # Multi variant parse setup. output_stream.write( self.colorize( f"SQLFluff parsed {num_variants} variants of this file", Color.blue, ) ) for idx, variant in enumerate(parsed_string.parsed_variants): output_stream.write( self.colorize( f"Variant {idx + 1}:", Color.blue, ) ) if variant.tree: output_stream.write(variant.tree.stringify(code_only=code_only)) else: # pragma: no cover output_stream.write( self.colorize("...Failed to Parse...", Color.red) ) violations = parsed_string.violations violations_count += len(violations) if violations: output_stream.write("==== parsing violations ====") # pragma: no cover for v in violations: output_stream.write(self.format_violation(v)) # pragma: no cover if violations: output_stream.write( self.format_dialect_warning(parsed_string.config.get("dialect")) ) if verbose >= 2: output_stream.write("==== timings ====") output_stream.write(self.cli_table(parsed_string.time_dict.items())) if verbose >= 2 or bench: output_stream.write("==== overall timings ====") output_stream.write(self.cli_table([("Clock time", total_time)])) timing_summary = timing.summary() for step in timing_summary: output_stream.write(f"=== {step} ===") output_stream.write(self.cli_table(timing_summary[step].items())) return violations_count def completion_message(self) -> None: """Prints message when SQLFluff is finished.""" click.echo(f"All Finished{'' if self.plain_output else ' 📜 🎉'}!") sqlfluff-3.4.2/src/sqlfluff/cli/helpers.py000066400000000000000000000051401503426445100205420ustar00rootroot00000000000000"""CLI helper utilities.""" import sys import textwrap from collections import abc from functools import cached_property from typing import Any, Callable from sqlfluff import __version__ as pkg_version def get_python_version() -> str: """Get the current python version as a string.""" return "{0[0]}.{0[1]}.{0[2]}".format(sys.version_info) def get_python_implementation() -> str: """Get the current python implementation as a string. This is useful if testing in pypy or similar. """ return sys.implementation.name def get_package_version() -> str: """Get the current version of the sqlfluff package.""" return pkg_version def wrap_elem(s: str, width: int) -> list[str]: """Wrap a string into a list of strings all less than .""" return textwrap.wrap(s, width=width) def wrap_field( label: str, val: str, width: int, max_label_width: int = 10, sep_char: str = ": " ) -> dict[str, Any]: """Wrap a field (label, val). Returns: A dict of {label_list, val_list, sep_char, lines} """ if len(label) > max_label_width: label_list = wrap_elem(label, width=max_label_width) label_width = max(len(line) for line in label_list) else: label_width = len(label) label_list = [label] max_val_width = width - len(sep_char) - label_width val_list = [] for v in val.split("\n"): val_list.extend(wrap_elem(v, width=max_val_width)) return dict( label_list=label_list, val_list=val_list, sep_char=sep_char, lines=max(len(label_list), len(val_list)), label_width=label_width, val_width=max_val_width, ) def pad_line(s: str, width: int, align: str = "left") -> str: """Pad a string with a given alignment to a specific width with spaces.""" gap = width - len(s) if gap <= 0: return s elif align == "left": return s + (" " * gap) elif align == "right": return (" " * gap) + s else: raise ValueError(f"Unknown alignment: {align}") # pragma: no cover class LazySequence(abc.Sequence): """A Sequence which only populates on the first access. This is useful for being able to define sequences within the click cli decorators, but that don't trigger their contents until first called. """ def __init__(self, getter=Callable[[], abc.Sequence]): self._getter = getter @cached_property def _sequence(self) -> abc.Sequence: return self._getter() def __getitem__(self, key): return self._sequence[key] def __len__(self): return len(self._sequence) sqlfluff-3.4.2/src/sqlfluff/cli/outputstream.py000066400000000000000000000043751503426445100216650ustar00rootroot00000000000000"""Classes for managing linter output, used with OutputStreamFormatter.""" import abc import os from typing import Any, Optional import click from tqdm import tqdm from sqlfluff.core import FluffConfig from sqlfluff.core.types import FormatType class OutputStream(abc.ABC): """Base class for linter output stream.""" def __init__(self, config: FluffConfig, context: Any = None) -> None: self.config = config def write(self, message: str) -> None: """Write message to output.""" raise NotImplementedError # pragma: no cover def close(self) -> None: """Close output stream.""" pass class TqdmOutput(OutputStream): """Outputs to stdout, coordinates to avoid conflict with tqdm. It may happen that progressbar conflicts with extra printing. Nothing very serious happens then, except that there is printed (not removed) progressbar line. The `external_write_mode` allows to disable tqdm for writing time. """ def __init__(self, config: FluffConfig) -> None: super().__init__(config) def write(self, message: str) -> None: """Write message to stdout.""" with tqdm.external_write_mode(): click.echo(message=message, color=self.config.get("color")) class FileOutput(OutputStream): """Outputs to a specified file.""" def __init__(self, config: FluffConfig, output_path: str) -> None: super().__init__(config) self.file = open(output_path, "w") def write(self, message: str) -> None: """Write message to output_path.""" print(message, file=self.file) def close(self) -> None: """Close output file.""" self.file.close() def make_output_stream( config: FluffConfig, format: Optional[str] = None, output_path: Optional[str] = None, ) -> OutputStream: """Create and return appropriate OutputStream instance.""" if format is None or format == FormatType.human.value: if not output_path: # Human-format output to stdout. return TqdmOutput(config) else: # Human-format output to a file. return FileOutput(config, output_path) else: # Discard human output as not required return FileOutput(config, os.devnull) sqlfluff-3.4.2/src/sqlfluff/core/000077500000000000000000000000001503426445100167075ustar00rootroot00000000000000sqlfluff-3.4.2/src/sqlfluff/core/__init__.py000066400000000000000000000030651503426445100210240ustar00rootroot00000000000000"""The core elements of sqlfluff.""" import tblib.pickling_support # Config objects from sqlfluff.core.config import FluffConfig # Dialect introspection from sqlfluff.core.dialects import dialect_readout, dialect_selector # All of the errors. from sqlfluff.core.errors import ( SQLBaseError, SQLFluffUserError, SQLLexError, SQLLintError, SQLParseError, SQLTemplaterError, ) # Public classes from sqlfluff.core.linter import Linter from sqlfluff.core.parser import Lexer, Parser # Timing objects from sqlfluff.core.timing import TimingSummary __all__ = ( "FluffConfig", "Linter", "Lexer", "Parser", "dialect_selector", "dialect_readout", "SQLBaseError", "SQLTemplaterError", "SQLLexError", "SQLParseError", "SQLLintError", "SQLFluffUserError", "TimingSummary", ) # This is for "sqlfluff lint" and "sqlfluff fix" multiprocessing (--processes) # support. If an exception (i.e. runtime error) occurs in a worker process, we # want to return the tracebook to the main process and report it there, as part # of the normal output. However, anything returned from a multiprocessing.Pool # worker must be serializable using "pickle". By default, Python traceback # objects cannot be pickled. The tblib package addresses this limitation; we # simply need to install it before creating the worker pool. See these links for # additional context: # https://pypi.org/project/tblib/ # https://stackoverflow.com/questions/6126007/python-getting-a-traceback-from-a-multiprocessing-process tblib.pickling_support.install() sqlfluff-3.4.2/src/sqlfluff/core/config/000077500000000000000000000000001503426445100201545ustar00rootroot00000000000000sqlfluff-3.4.2/src/sqlfluff/core/config/__init__.py000066400000000000000000000050141503426445100222650ustar00rootroot00000000000000"""Init file for the config module. This holds all the methods and classes for configuration. """ from typing import Optional from sqlfluff.core.config.file import ( load_config_file_as_dict, load_config_string_as_dict, ) from sqlfluff.core.config.fluffconfig import FluffConfig from sqlfluff.core.config.loader import ( ConfigLoader, load_config_at_path, load_config_file, load_config_resource, load_config_string, load_config_up_to_path, ) __all__ = ( "FluffConfig", "ConfigLoader", "load_config_file", "load_config_resource", "load_config_string", "load_config_at_path", "load_config_up_to_path", "progress_bar_configuration", "clear_config_caches", ) def clear_config_caches() -> None: """Clear any of the cached config methods. This is primarily used during testing where the cache may be be rendered unreliable by using moving around files while setting up tests. Some of the cached methods rely on *filename* caching, and so we may break one of the assumptions of the caching routines (that files aren't modified while SQLFluff is running) during the test suite. That means we need to clear the cache during those times to get reliable results. NOTE: You may not notice those results when running tests individually locally as they may only be visible when running the whole test suite. """ load_config_file_as_dict.cache_clear() load_config_at_path.cache_clear() load_config_string_as_dict.cache_clear() pass class ProgressBarConfiguration: """Singleton-esque progress bar configuration. It's expected to be set during starting with parameters coming from commands parameters, then to be just utilized as just ``` from sqlfluff.core.config import progress_bar_configuration is_progressbar_disabled = progress_bar_configuration.disable_progress_bar ``` """ _disable_progress_bar: Optional[bool] = True @property def disable_progress_bar(self) -> Optional[bool]: # noqa: D102 return self._disable_progress_bar @disable_progress_bar.setter def disable_progress_bar(self, value: Optional[bool]) -> None: """`disable_progress_bar` setter. `True` means that progress bar should be always hidden, `False` fallbacks into `None` which is an automatic mode. From tqdm documentation: 'If set to None, disable on non-TTY.' """ self._disable_progress_bar = value or None progress_bar_configuration = ProgressBarConfiguration() sqlfluff-3.4.2/src/sqlfluff/core/config/file.py000066400000000000000000000107721503426445100214540ustar00rootroot00000000000000"""Lower level routines for config file loading and caching. Functions in this module load config from *individual* files and resources. While some are cached, they are cached on the basis of not processing individual files more than once. For the cached functions it is VERY recommended to make sure they are copied before any edits happen to them, as those edits may propagate back up into the cache. Typically the results are passed to `nested_combine` either immediately, or eventually after returning which should negate this effect. """ import os.path from functools import cache from typing import Optional from sqlfluff.core.config.ini import load_ini_string from sqlfluff.core.config.toml import load_toml_file_config from sqlfluff.core.config.validate import validate_config_dict from sqlfluff.core.helpers.string import ( split_comma_separated_string, ) from sqlfluff.core.types import ConfigMappingType COMMA_SEPARATED_PATH_KEYS = ( "load_macros_from_path", "loader_search_path", "exclude_macros_from_path", ) RESOLVE_PATH_SUFFIXES = ("_path", "_dir") def _load_raw_file_as_dict(filepath: str) -> ConfigMappingType: """Loads the raw dict object from file without interpolation.""" filename = os.path.basename(filepath) if filename == "pyproject.toml": return load_toml_file_config(filepath) # If it's not a pyproject file, assume that it's an ini file. with open(filepath, mode="r") as file: return load_ini_string(file.read()) def _resolve_path(filepath: str, val: str) -> str: """Try to resolve a path found in a config value.""" # Make the referenced path. ref_path = os.path.join(os.path.dirname(filepath), val) # Check if it exists, and if it does, replace the value with the path. return ref_path if os.path.exists(ref_path) else val def _resolve_paths_in_config( config: ConfigMappingType, filepath: str, logging_reference: Optional[str] = None ) -> None: """Attempt to resolve any paths found in the config file. NOTE: This method is recursive to crawl the whole config object, and also mutates the underlying config object rather than returning it. """ log_filename: str = logging_reference or filepath for key, val in config.items(): # If it's a dict, recurse. if isinstance(val, dict): _resolve_paths_in_config(val, filepath, logging_reference=logging_reference) # If it's a potential multi-path, split, resolve and join if key.lower() in COMMA_SEPARATED_PATH_KEYS: assert isinstance( val, str ), f"Value for {key} in {log_filename} must be a string not {type(val)}." paths = split_comma_separated_string(val) config[key] = ",".join(_resolve_path(filepath, path) for path in paths) # It it's a single path key, resolve it. elif key.lower().endswith(RESOLVE_PATH_SUFFIXES): assert isinstance( val, str ), f"Value for {key} in {log_filename} must be a string not {type(val)}." config[key] = _resolve_path(filepath, val) @cache def load_config_file_as_dict(filepath: str) -> ConfigMappingType: """Load the given config file into a dict and validate. This method is cached to mitigate being called multiple times. This doesn't manage the combination of config files within a nested structure, that happens further up the stack. """ raw_config = _load_raw_file_as_dict(filepath) # The raw loaded files have some path interpolation which is necessary. _resolve_paths_in_config(raw_config, filepath) # Validate validate_config_dict(raw_config, filepath) # Return dict object (which will be cached) return raw_config @cache def load_config_string_as_dict( config_string: str, working_path: str, logging_reference: str ) -> ConfigMappingType: """Load the given config string and validate. This method is cached to mitigate being called multiple times. This doesn't manage the combination of config files within a nested structure, that happens further up the stack. The working path is necessary to resolve any paths in the config file. """ raw_config = load_ini_string(config_string) # The raw loaded files have some path interpolation which is necessary. _resolve_paths_in_config( raw_config, working_path, logging_reference=logging_reference ) # Validate validate_config_dict(raw_config, logging_reference) # Return dict object (which will be cached) return raw_config sqlfluff-3.4.2/src/sqlfluff/core/config/fluffconfig.py000066400000000000000000000761171503426445100230320ustar00rootroot00000000000000"""Module for loading config.""" from __future__ import annotations import logging from collections.abc import Iterable from copy import copy, deepcopy from itertools import chain from typing import TYPE_CHECKING, Any, Optional, Union import pluggy from sqlfluff.core.config.ini import coerce_value from sqlfluff.core.config.loader import load_config_string, load_config_up_to_path from sqlfluff.core.config.validate import validate_config_dict from sqlfluff.core.errors import SQLFluffUserError from sqlfluff.core.helpers.dict import ( dict_diff, iter_records_from_nested_dict, nested_combine, records_to_nested_dict, ) from sqlfluff.core.helpers.string import ( split_colon_separated_string, split_comma_separated_string, ) from sqlfluff.core.plugin.host import get_plugin_manager from sqlfluff.core.types import ConfigMappingType, ConfigValueOrListType if TYPE_CHECKING: # pragma: no cover from sqlfluff.core.templaters.base import RawTemplater # Instantiate the config logger config_logger = logging.getLogger("sqlfluff.config") class FluffConfig: """The persistent object for internal methods to access configuration. This class is designed to be instantiated once for each file and then be reused by each part of the process. For multiple files in the same path, a parent object will be created for the each path and then variants of it are created *for each file*. The object itself contains the references to any long lived objects which might be used by multiple parts of the codebase such as the dialect and the templater (both of which can be resource intensive to load & instantiate), which allows (for example), multiple files to reuse the same instance of the relevant dialect. It is also designed to pickle well for use in parallel operations. Args: configs (ConfigMappingType, optional): A nested dict of config values from which to construct the config. extra_config_path (str, optional): An optional additional path to load config files from. These are loaded last if found and take precedence over any pre-existing config values. Note that when provided directly to the class, this path is not loaded for the class in question (it's assumed that has already been done, and the results are incorporated in the `configs` argument), but it *is* passed onward to child config instances, which will use it. ignore_local_config (bool, optional, defaults to False): If set to True, this skips loading configuration from the user home directory (``~``) or ``appdir`` path. overrides (ConfigMappingType, optional): A additional set of configs to merge into the ``core`` section of the config object at the end. These values take precedence over all other provided values and are inherited by child configs. For example, override values provided in the CLI use this method to apply to all files in a linting operation. Note that this mapping dict *only* applies to the ``core`` section and so cannot be used for all values. plugin_manager (PluginManager, optional): Optional pre-loaded config manager. Generally users should not need to provide this, as the class will fetch it's own if not provided. This argument is used when creating new class instances to avoid reloading the manager. .. note:: Methods for accessing internal properties on the config are not particularly standardised as the project currently assumes that few other tools are using this interface directly. If you or your project would like more formally supported methods for access to the config object, raise an issue on GitHub with the kind of things you'd like to achieve. """ private_vals = "rule_denylist", "rule_allowlist", "dialect_obj", "templater_obj" def __init__( self, configs: Optional[ConfigMappingType] = None, extra_config_path: Optional[str] = None, ignore_local_config: bool = False, overrides: Optional[ConfigMappingType] = None, plugin_manager: Optional[pluggy.PluginManager] = None, # Ideally a dialect should be set when config is read but sometimes # it might only be set in nested .sqlfluff config files, so allow it # to be not required. require_dialect: bool = True, ) -> None: self._extra_config_path = ( extra_config_path # We only store this for child configs ) self._ignore_local_config = ( ignore_local_config # We only store this for child configs ) # If overrides are provided, validate them early. if overrides: overrides = {"core": overrides} validate_config_dict(overrides, "") # Stash overrides so we can pass them to child configs core_overrides = overrides["core"] if overrides else None assert isinstance(core_overrides, dict) or core_overrides is None self._overrides = core_overrides # Fetch a fresh plugin manager if we weren't provided with one self._plugin_manager = plugin_manager or get_plugin_manager() defaults = nested_combine(*self._plugin_manager.hook.load_default_config()) # If any existing configs are provided. Validate them: if configs: validate_config_dict(configs, "") self._configs = nested_combine( defaults, configs or {"core": {}}, overrides or {} ) # Some configs require special treatment self._configs["core"]["color"] = ( False if self._configs["core"].get("nocolor", False) else None ) # Handle inputs which are potentially comma separated strings self._handle_comma_separated_values() # Dialect and Template selection. _dialect = self._configs["core"]["dialect"] assert _dialect is None or isinstance(_dialect, str) self._initialise_dialect(_dialect, require_dialect) self._configs["core"]["templater_obj"] = self.get_templater() def _handle_comma_separated_values(self) -> None: for in_key, out_key in [ ("ignore", "ignore"), ("warnings", "warnings"), ("rules", "rule_allowlist"), ("exclude_rules", "rule_denylist"), ]: in_value = self._configs["core"].get(in_key, None) if in_value: assert not isinstance(in_value, dict) self._configs["core"][out_key] = split_comma_separated_string(in_value) else: self._configs["core"][out_key] = [] def _initialise_dialect( self, dialect: Optional[str], require_dialect: bool = True ) -> None: # NB: We import here to avoid a circular references. from sqlfluff.core.dialects import dialect_selector if dialect is not None: self._configs["core"]["dialect_obj"] = dialect_selector(dialect) elif require_dialect: self.verify_dialect_specified() def verify_dialect_specified(self) -> None: """Check if the config specifies a dialect, raising an error if not. Raises: SQLFluffUserError: If dialect config value is unset. The content of the error contains user-facing instructions on what dialects are available and how to set the dialect. """ if self._configs["core"].get("dialect", None) is None: # Get list of available dialects for the error message. We must # import here rather than at file scope in order to avoid a circular # import. from sqlfluff.core.dialects import dialect_readout raise SQLFluffUserError( "No dialect was specified. You must configure a dialect or " "specify one on the command line using --dialect after the " "command. Available dialects:\n" f"{', '.join([d.label for d in dialect_readout()])}" ) def __getstate__(self) -> dict[str, Any]: # Copy the object's state from self.__dict__ which contains # all our instance attributes. Always use the dict.copy() # method to avoid modifying the original state. state = self.__dict__.copy() # Remove the unpicklable entries. del state["_plugin_manager"] # The dbt templater doesn't pickle well, but isn't required # within threaded operations. If it was, it could easily be # rehydrated within the thread. For rules which want to determine # the type of a templater in their context, use # `get_templater_class()` instead, which avoids instantiating # a new templater instance. # NOTE: It's important that we do this on a copy so that we # don't disturb the original object if it's still in use. state["_configs"] = state["_configs"].copy() state["_configs"]["core"] = state["_configs"]["core"].copy() state["_configs"]["core"]["templater_obj"] = None return state def __setstate__(self, state: dict[str, Any]) -> None: # pragma: no cover # Restore instance attributes self.__dict__.update(state) # NOTE: Rather than rehydrating the previous plugin manager, we # fetch a fresh one. self._plugin_manager = get_plugin_manager() # NOTE: Likewise we don't reinstate the "templater_obj" config value # which should also only be used in the main thread rather than child # processes. def copy(self) -> FluffConfig: """Create a copy of this ``FluffConfig``. Copies created using this method can safely be modified without those changes propagating back up to the object which was originally copied. Returns: :obj:`FluffConfig`: A shallow copy of this config object but with a deep copy of the internal ``_configs`` dict. """ configs_attribute_copy = deepcopy(self._configs) config_copy = copy(self) config_copy._configs = configs_attribute_copy # During the initial `.copy()`, we use the same `__reduce__()` method # which is used during pickling. The `templater_obj` doesn't pickle # well so is normally removed, but it's ok for us to just pass across # the original object here as we're in the same process. configs_attribute_copy["core"]["templater_obj"] = self._configs["core"][ "templater_obj" ] return config_copy @classmethod def from_root( cls, extra_config_path: Optional[str] = None, ignore_local_config: bool = False, overrides: Optional[ConfigMappingType] = None, require_dialect: bool = True, ) -> FluffConfig: """Loads a config object based on the root directory. Args: extra_config_path (str, optional): An optional additional path to load config files from. These are loaded last if found and take precedence over any pre-existing config values. ignore_local_config (bool, optional, defaults to False): If set to True, this skips loading configuration from the user home directory (``~``) or ``appdir`` path. overrides (ConfigMappingType, optional): A additional set of configs to merge into the config object at the end. These values take precedence over all other provided values and are inherited by child configs. For example, override values provided in the CLI use this method to apply to all files in a linting operation. require_dialect (bool, optional, default is True): When True an error will be raise if the dialect config value is unset. Returns: :obj:`FluffConfig`: The loaded config object. """ configs = load_config_up_to_path( path=".", extra_config_path=extra_config_path, ignore_local_config=ignore_local_config, ) return cls( configs=configs, extra_config_path=extra_config_path, ignore_local_config=ignore_local_config, overrides=overrides, require_dialect=require_dialect, ) @classmethod def from_string( cls, config_string: str, overrides: Optional[ConfigMappingType] = None, ) -> FluffConfig: """Loads a config object from a single config string. Args: config_string (str): The config string, assumed to be in ``ini`` format (like a ``.sqlfluff`` file). overrides (ConfigMappingType, optional): A additional set of configs to merge into the config object at the end. These values take precedence over all other provided values and are inherited by child configs. For example, override values provided in the CLI use this method to apply to all files in a linting operation. Returns: :obj:`FluffConfig`: The loaded config object. """ return cls( configs=load_config_string(config_string), overrides=overrides, ) @classmethod def from_strings( cls, *config_strings: str, overrides: Optional[ConfigMappingType] = None, ) -> FluffConfig: """Loads a config object given a series of nested config strings. Args: *config_strings (str): An iterable of config strings, assumed to be in ``ini`` format (like a ``.sqlfluff`` file). overrides (ConfigMappingType, optional): A additional set of configs to merge into the config object at the end. These values take precedence over all other provided values and are inherited by child configs. For example, override values provided in the CLI use this method to apply to all files in a linting operation. Returns: :obj:`FluffConfig`: The loaded config object. Config strings are incorporated from first to last, treating the first element as the "root" config, and then later config strings will take precedence over any earlier values. """ config_state: ConfigMappingType = {} for config_string in config_strings: config_state = load_config_string(config_string, configs=config_state) return cls( configs=config_state, overrides=overrides, ) @classmethod def from_path( cls, path: str, extra_config_path: Optional[str] = None, ignore_local_config: bool = False, overrides: Optional[ConfigMappingType] = None, plugin_manager: Optional[pluggy.PluginManager] = None, require_dialect: bool = True, ) -> FluffConfig: """Loads a config object given a particular path. Args: path (str): The target path to load config files from. Files found between the working path and this path are also loaded and nested with files closest to this target path taking precedence. extra_config_path (str, optional): An optional additional path to load config files from. These are loaded last if found and take precedence over any pre-existing config values. ignore_local_config (bool, optional, defaults to False): If set to True, this skips loading configuration from the user home directory (``~``) or ``appdir`` path. overrides (ConfigMappingType, optional): A additional set of configs to merge into the ``core`` section of the config object at the end. These values take precedence over all other provided values and are inherited by child configs. Note that this mapping dict *only* applies to the ``core`` section and so cannot be used for all values. plugin_manager (PluginManager, optional): Optional pre-loaded config manager. Generally users should not need to provide this, as the class will fetch it's own if not provided. This argument is used when creating new class instances to avoid reloading the manager. require_dialect (bool, optional, default is True): When True an error will be raise if the dialect config value is unset. Returns: :obj:`FluffConfig`: The loaded config object. """ configs = load_config_up_to_path( path=path, extra_config_path=extra_config_path, ignore_local_config=ignore_local_config, ) return cls( configs=configs, extra_config_path=extra_config_path, ignore_local_config=ignore_local_config, overrides=overrides, plugin_manager=plugin_manager, require_dialect=require_dialect, ) @classmethod def from_kwargs( cls, dialect: Optional[str] = None, rules: Optional[list[str]] = None, exclude_rules: Optional[list[str]] = None, require_dialect: bool = True, ) -> FluffConfig: """Instantiate a config from a subset of common options. Args: dialect (str, optional): The name of the dialect to use. rules (list of str, optional): A list of rules to include. Rule specifiers can be codes, names, groups or aliases. If not set, defaults to all rules. exclude_rules (list of str, optional): A list of rules to exclude. Rule specifiers can be codes, names, groups or aliases. If not set, does not exclude any rules. require_dialect (bool, optional, default is True): When True an error will be raise if the dialect config value is unset. Returns: :obj:`FluffConfig`: The loaded config object. This is a convenience method for the ways that the public classes like Linter(), Parser() and Lexer() allow a subset of attributes to be set directly rather than requiring a pre-made `FluffConfig`. """ overrides: ConfigMappingType = {} if dialect: overrides["dialect"] = dialect if rules: # Make a comma separated string to pass in as override overrides["rules"] = ",".join(rules) if exclude_rules: # Make a comma separated string to pass in as override overrides["exclude_rules"] = ",".join(exclude_rules) return cls(overrides=overrides, require_dialect=require_dialect) def get_templater_class(self) -> type[RawTemplater]: """Get the configured templater class. .. note:: This is mostly useful to call directly when rules want to determine the *type* of a templater without (in particular to work out if it's a derivative of the jinja templater), without needing to instantiate a full templater. Instantiated templaters don't pickle well, so aren't automatically passed around between threads/processes. """ templater_lookup: dict[str, type[RawTemplater]] = { templater.name: templater for templater in chain.from_iterable( self._plugin_manager.hook.get_templaters() ) } # Fetch the config value. templater_name = self._configs["core"].get("templater", "") assert isinstance( templater_name, str ), f"Config value `templater` expected to be a string. Not: {templater_name!r}" try: cls = templater_lookup[templater_name] # Return class. Do not instantiate yet. That happens in `get_templater()` # for situations which require it. return cls except KeyError: if templater_name == "dbt": # pragma: no cover config_logger.warning( "Starting in sqlfluff version 0.7.0 the dbt templater is " "distributed as a separate python package. Please pip install " "sqlfluff-templater-dbt to use it." ) raise SQLFluffUserError( "Requested templater {!r} which is not currently available. Try one of " "{}".format(templater_name, ", ".join(templater_lookup.keys())) ) def get_templater(self, **kwargs: Any) -> RawTemplater: """Instantiate the configured templater.""" return self.get_templater_class()(**kwargs) def make_child_from_path( self, path: str, require_dialect: bool = True ) -> FluffConfig: """Make a child config at a path but pass on overrides and extra_config_path. Args: path (str): The path to load the new config object from, inheriting the content of the calling `FluffConfig` as base values. require_dialect (bool, optional, default is True): When True an error will be raise if the dialect config value is unset. Returns: :obj:`FluffConfig`: A new config object which copies the current config object, but overriding any values set by config values loaded from the given path. """ return self.from_path( path, extra_config_path=self._extra_config_path, ignore_local_config=self._ignore_local_config, overrides=self._overrides, plugin_manager=self._plugin_manager, require_dialect=require_dialect, ) def diff_to(self, other: FluffConfig) -> ConfigMappingType: """Compare this config to another. This is primarily used in the CLI logs to indicate to the user what values have been changed for each file compared to the root config for the project. Args: other (:obj:`FluffConfig`): Another config object to compare against. We will return keys from *this* object that are not in `other` or are different to those in `other`. Returns: :obj:`dict`: A filtered dict of items in this config that are not in the other or are different to the other. """ # We ignore some objects which are not meaningful in the comparison # e.g. dialect_obj, which is generated on the fly. return dict_diff(self._configs, other._configs, ignore=["dialect_obj"]) def get( self, val: str, section: Union[str, Iterable[str]] = "core", default: Any = None ) -> Any: """Get a particular value from the config. Args: val (str): The name of the config value to get. section (str or iterable of str, optional): The "path" to the config value. For values in the main ``[sqlfluff]`` section of the config, which are stored in the ``core`` section of the config this can be omitted. default: The value to return if the config value was not found. If no default is provided, then a ``KeyError`` will be raised if no value was found. The following examples show how to fetch various default values: >>> FluffConfig(overrides={"dialect": "ansi"}).get("dialect") 'ansi' >>> config = FluffConfig(overrides={"dialect": "ansi"}) >>> config.get("tab_space_size", section="indentation") 4 >>> FluffConfig(overrides={"dialect": "ansi"}).get( ... "capitalisation_policy", ... section=["rules", "capitalisation.keywords"] ... ) 'consistent' """ section_dict = self.get_section(section) if section_dict is None: return default return section_dict.get(val, default) def get_section(self, section: Union[str, Iterable[str]]) -> Any: """Return a whole section of config as a dict. If the element found at the address is a value and not a section, it is still returned and so this can be used as a more advanced from of the basic `get` method. Args: section: An iterable or string. If it's a string we load that root section. If it's an iterable of strings, then we treat it as a path within the dictionary structure. """ if isinstance(section, str): return self._configs.get(section, None) else: # Try iterating buff = self._configs for sec in section: temp = buff.get(sec, None) if temp is None: return None buff = temp return buff def set_value(self, config_path: Iterable[str], val: Any) -> None: """Set a value at a given path. Args: config_path: An iterable of strings. Each should be a one of the elements which is colon delimited in a standard config file. val: The value to set at the given path. >>> cfg = FluffConfig(overrides={"dialect": "ansi"}) >>> cfg.set_value(["dialect"], "postgres") >>> cfg.get("dialect") 'postgres' >>> cfg = FluffConfig(overrides={"dialect": "ansi"}) >>> cfg.set_value(["indentation", "tab_space_size"], 2) >>> cfg.get("tab_space_size", section="indentation") 2 """ # Make the path a list so we can index on it config_path = list(config_path) # Coerce the value into something more useful. config_val = coerce_value(val) # Sort out core if not there if len(config_path) == 1: # pragma: no cover TODO? config_path = ["core"] + config_path # Current section: dict_buff = [self._configs] for elem in config_path[:-1]: dict_buff.append(dict_buff[-1].get(elem, {})) # Set the value dict_buff[-1][config_path[-1]] = config_val # Rebuild the config for elem in reversed(config_path[:-1]): dict_elem = dict_buff.pop() dict_buff[-1][elem] = dict_elem self._configs = dict_buff[0] def iter_vals( self, cfg: Optional[ConfigMappingType] = None ) -> Iterable[tuple[int, str, ConfigValueOrListType]]: """Return an iterable of tuples representing keys. Args: cfg (optional): An optional config mapping to format instead. If not provided, we use the internal config object of the `FluffConfig`. This is primarily to enable formatting of config objects in the CLI. We show values before dicts, the tuple contains an indent value to know what level of the dict we're in. Dict labels will be returned as a blank value before their content. """ cfg = cfg or self._configs # Get keys and sort keys = sorted(cfg.keys()) # First iterate values (alphabetically): for k in keys: value = cfg[k] if ( not isinstance(value, dict) and value is not None and k not in self.private_vals ): yield (0, k, value) # Then iterate dicts (alphabetically (but `core` comes first if it exists)) for k in keys: value = cfg[k] if isinstance(value, dict): # First yield the dict label yield (0, k, "") # Then yield its content for idnt, key, val in self.iter_vals(cfg=value): yield (idnt + 1, key, val) def process_inline_config(self, config_line: str, fname: str) -> None: """Process an inline config command and update self. Args: config_line (str): The inline config section to be processed. This should usually begin with ``-- sqlfluff:``. fname (str): The name of the current file being processed. This is used purely for logging purposes in the case that an invalid config string is provided so that any error messages can reference the file with the issue. >>> cfg = FluffConfig(overrides={"dialect": "ansi"}) >>> cfg.process_inline_config( ... "-- sqlfluff:dialect:postgres", ... "test.sql" ... ) >>> cfg.get("dialect") 'postgres' """ # Strip preceding comment marks if config_line.startswith("--"): config_line = config_line[2:].strip() # Strip preceding sqlfluff line. if not config_line.startswith("sqlfluff:"): # pragma: no cover config_logger.warning( "Unable to process inline config statement: %r", config_line ) return config_line = config_line[9:].strip() config_key, config_value = split_colon_separated_string(config_line) # Move to core section if appropriate if len(config_key) == 1: config_key = ("core",) + config_key # Coerce data types config_record = (config_key, coerce_value(config_value)) # Convert to dict & validate config_dict: ConfigMappingType = records_to_nested_dict([config_record]) validate_config_dict(config_dict, f"inline config in {fname}") config_val = list(iter_records_from_nested_dict(config_dict))[0] # Set the value self.set_value(config_key, config_value) # If the config is for dialect, initialise the dialect. if config_val[0] == ("core", "dialect"): dialect_value = config_val[1] assert isinstance(dialect_value, str) self._initialise_dialect(dialect_value) def process_raw_file_for_config(self, raw_str: str, fname: str) -> None: """Process a full raw file for inline config and update self. Args: raw_str (str): The full SQL script to evaluate for inline configs. fname (str): The name of the current file being processed. This is used purely for logging purposes in the case that an invalid config string is provided so that any error messages can reference the file with the issue. >>> cfg = FluffConfig(overrides={"dialect": "ansi"}) >>> cfg.process_raw_file_for_config( ... "-- sqlfluff:dialect:postgres", ... "test.sql" ... ) >>> cfg.get("dialect") 'postgres' """ # Scan the raw file for config commands. for raw_line in raw_str.splitlines(): # With or without a space. if raw_line.startswith(("-- sqlfluff", "--sqlfluff")): # Found a in-file config command self.process_inline_config(raw_line, fname) # Deal with potential list-like inputs. self._handle_comma_separated_values() sqlfluff-3.4.2/src/sqlfluff/core/config/ini.py000066400000000000000000000056541503426445100213170ustar00rootroot00000000000000"""Methods for loading config files with an ini-style format. This includes `.sqlfluff` and `tox.ini` files. """ import configparser from sqlfluff.core.helpers.dict import NestedDictRecord, records_to_nested_dict from sqlfluff.core.types import ConfigMappingType, ConfigValueType def coerce_value(val: str) -> ConfigValueType: """Try to coerce to a more specific type.""" # Try to coerce it to a more specific type, # otherwise just make it a string. v: ConfigValueType try: v = int(val) except ValueError: try: v = float(val) except ValueError: cleaned_val = val.strip().lower() if cleaned_val == "true": v = True elif cleaned_val == "false": v = False elif cleaned_val == "none": v = None else: v = val return v def load_ini_string(cfg_content: str) -> ConfigMappingType: """Read an ini-style config string. This would include loading a `.sqlfluff` file. Notes: - We rename the root `sqlfluff` section, to `core` so that it's in line with other config files. - The `configparser` reads everything as strings, but this method will attempt to find better types for values based on their content. - Path resolution isn't done here, that all happens later. - Unlike most cfg file readers, SQLFluff is case-sensitive in how it reads config files. This is to ensure we support the case sensitivity of jinja. """ # If the string is empty, no need to parse it. if not cfg_content: return {} # Disable interpolation so we can load macros config = configparser.ConfigParser(delimiters="=", interpolation=None) # NB: We want to be case sensitive in how we read from files, # because jinja is also case sensitive. To do this we override # the optionxform attribute. config.optionxform = lambda option: option # type: ignore # Read the content. config.read_string(cfg_content) # Build up a buffer of config values. config_buffer: list[NestedDictRecord[ConfigValueType]] = [] for k in config.sections(): if k == "sqlfluff": key: tuple[str, ...] = ("core",) elif k.startswith("sqlfluff:"): # Return a tuple of nested values key = tuple(k[len("sqlfluff:") :].split(":")) else: # pragma: no cover # if it doesn't start with sqlfluff, then ignore this # section. It's not relevant to sqlfluff. continue for name, val in config.items(section=k): # Try to coerce it to a more specific type, # otherwise just make it a string. v = coerce_value(val) # Add the name to the end of the key config_buffer.append((key + (name,), v)) # Compress that buffer into a dictionary. return records_to_nested_dict(config_buffer) sqlfluff-3.4.2/src/sqlfluff/core/config/loader.py000066400000000000000000000333571503426445100220070ustar00rootroot00000000000000"""Config loading methods and helpers. This is designed to house the main functions which are exposed by the overall config module. There is some caching in this module, which is designed around caching the configuration loaded at *specific paths* rather than the individual file caching in the `file` module. """ from __future__ import annotations import logging import os import os.path import sys from functools import cache from importlib.resources import files from pathlib import Path from typing import ( Optional, ) import platformdirs import platformdirs.macos import platformdirs.unix from sqlfluff.core.config.file import ( load_config_file_as_dict, load_config_string_as_dict, ) from sqlfluff.core.errors import SQLFluffUserError from sqlfluff.core.helpers.dict import nested_combine from sqlfluff.core.helpers.file import iter_intermediate_paths from sqlfluff.core.types import ConfigMappingType # Instantiate the config logger config_logger = logging.getLogger("sqlfluff.config") global_loader = None """:obj:`ConfigLoader`: A variable to hold the single module loader when loaded. We define a global loader, so that between calls to load config, we can still cache appropriately """ def _get_user_config_dir_path(sys_platform: str) -> str: """Get the user config dir for this system. Args: sys_platform (str): The result of ``sys.platform()``. Provided as an argument here for ease of testing. In normal usage it should only be called with ``sys.platform()``. This argument only applies to switching between linux and macos. Win32 detection still uses the underlying ``sys.platform()`` methods. """ appname = "sqlfluff" appauthor = "sqlfluff" # First try the default SQLFluff specific cross-platform config path. cross_platform_path = os.path.expanduser("~/.config/sqlfluff") if os.path.exists(cross_platform_path): return cross_platform_path # Then try the platform specific paths, for MacOS, we check # the unix variant first to preferentially use the XDG config path if set. # https://github.com/sqlfluff/sqlfluff/issues/889 if sys_platform == "darwin": unix_config_path = platformdirs.unix.Unix( appname=appname, appauthor=appauthor ).user_config_dir if os.path.exists(os.path.expanduser(unix_config_path)): return unix_config_path # Technically we could just delegate to the generic `user_config_dir` # method, but for testing it's convenient to explicitly call the macos # methods here. return platformdirs.macos.MacOS( appname=appname, appauthor=appauthor ).user_config_dir # NOTE: We could delegate to the generic `user_config_dir` method here, # but for testing it's convenient to explicitly call the linux methods. elif sys_platform == "linux": return platformdirs.unix.Unix( appname=appname, appauthor=appauthor ).user_config_dir # Defer to the self-detecting paths. # NOTE: On Windows this means that the `sys_platform` argument is not # applied. return platformdirs.user_config_dir(appname, appauthor) def load_config_file( file_dir: str, file_name: str, configs: Optional[ConfigMappingType] = None ) -> ConfigMappingType: """Load a config file from the filesystem. Args: file_dir (str): The path to the location of file to be loaded. This should be a reference to the directory *only* and not include the filename itself. Any paths in the loaded file are resolved relative to this location. file_name (str): The filename of the file to be loaded. If the filename is ``pyproject.toml`` then the file is loaded in ``toml`` format, but otherwise is assumed to be in ``ini`` format (as per ``.sqlfluff``). configs (ConfigMappingType, optional): A base set of configs to merge the loaded configs onto. If not provided, the result will contain only the values loaded from the string. Returns: :obj:`ConfigMappingType`: A nested dictionary of config values. """ file_path = os.path.join(file_dir, file_name) raw_config = load_config_file_as_dict(file_path) # We always run `nested_combine()` because it has the side effect # of making a copy of the objects provided. This prevents us # from editing items which also sit within the cache. return nested_combine(configs or {}, raw_config) def load_config_resource(package: str, file_name: str) -> ConfigMappingType: """Load a config resource from a python package. Args: package (str): The name of the python package to load the resource from. file_name (str): The filename of the file to be loaded. If the filename is ``pyproject.toml`` then the file is loaded in ``toml`` format, but otherwise is assumed to be in ``ini`` format (as per ``.sqlfluff``). Returns: :obj:`ConfigMappingType`: A nested dictionary of config values. This is primarily used when loading configuration bundled with a SQLFluff plugin, or to load the default config for SQLFluff itself. By loading config from the package directly we avoid some of the path resolution which otherwise occurs. This is also more compatible with ``mypyc`` because it avoids the use of the ``__file__`` attribute to find the default config. Any paths found in the loaded config are resolved relative to ``os.getcwd()``. For more information about resource loading, see the docs for importlib: https://docs.python.org/3/library/importlib.resources.html """ config_string = files(package).joinpath(file_name).read_text() # NOTE: load_config_string_as_dict is cached. return load_config_string_as_dict( config_string, os.getcwd(), logging_reference=f"", ) def load_config_string( config_string: str, configs: Optional[ConfigMappingType] = None, working_path: Optional[str] = None, ) -> ConfigMappingType: """Load a config from a string in ini format. Args: config_string (str): The raw config file as a string. The content is assumed to be in the the ``.ini`` format of a ``.sqlfluff`` file (i.e. not in ``.toml`` format). configs (ConfigMappingType, optional): A base set of configs to merge the loaded configs onto. If not provided, the result will contain only the values loaded from the string. working_path (str, optional): The working path to use for the resolution of any paths specified in the config. If not provided then ``os.getcwd()`` is used as a default. Returns: :obj:`ConfigMappingType`: A nested dictionary of config values. """ filepath = working_path or os.getcwd() raw_config = load_config_string_as_dict( config_string, filepath, logging_reference="" ) # We always run `nested_combine()` because it has the side effect # of making a copy of the objects provided. This prevents us # from editing items which also sit within the cache. return nested_combine(configs or {}, raw_config) @cache def load_config_at_path(path: str) -> ConfigMappingType: """Load config files at a given path. Args: path (str): The directory to search for config files. Returns: :obj:`ConfigMappingType`: A nested dictionary of config values. This function will search for all valid config files at the given path, load any found and combine them into a config mapping. If multiple valid files are found, they are resolved in priority order, where ``pyproject.toml`` is given the highest precedence, followed by ``.sqlfluff``, ``pep8.ini``, ``tox.ini`` and finally ``setup.cfg``. By accepting only a path string, we enable efficient caching of results, such that configuration can be reused between files without reloading the information from disk. """ # The potential filenames we would look for at this path. # NB: later in this list overwrites earlier filename_options = [ "setup.cfg", "tox.ini", "pep8.ini", ".sqlfluff", "pyproject.toml", ] configs: ConfigMappingType = {} if os.path.isdir(path): p = path else: p = os.path.dirname(path) d = os.listdir(os.path.expanduser(p)) # iterate this way round to make sure things overwrite is the right direction. # NOTE: The `configs` variable is passed back in at each stage. for fname in filename_options: if fname in d: configs = load_config_file(p, fname, configs=configs) return configs def _load_user_appdir_config() -> ConfigMappingType: """Load the config from the user's OS specific appdir config directory.""" user_config_dir_path = _get_user_config_dir_path(sys.platform) if os.path.exists(user_config_dir_path): return load_config_at_path(user_config_dir_path) else: return {} def load_config_up_to_path( path: str, extra_config_path: Optional[str] = None, ignore_local_config: bool = False, ) -> ConfigMappingType: """Loads a selection of config files from both the path and its parent paths. Args: path (str): The directory which is the target of the search. Config files in subdirectories will not be loaded by this method, but valid config files between this path and the current working path will. extra_config_path (str, optional): An additional path to load config from. This path is not used in iterating through intermediate paths, and is loaded last (taking the highest precedence in combining the loaded configs). ignore_local_config (bool, optional, defaults to False): If set to True, this skips loading configuration from the user home directory (``~``) or ``appdir`` path. Returns: :obj:`ConfigMappingType`: A nested dictionary of config values. We layer each of the configs on top of each other, starting with any home or user configs (e.g. in ``appdir`` or home (``~``)), then any local project configuration and then any explicitly specified config paths. """ # 1) AppDir & Home config if not ignore_local_config: user_appdir_config = _load_user_appdir_config() user_config = load_config_at_path(os.path.expanduser("~")) else: user_config, user_appdir_config = {}, {} # 3) Local project config parent_config_stack = [] config_stack = [] if not ignore_local_config: # Finding all paths between here and the home # directory. We could start at the root of the filesystem, # but depending on the user's setup, this might result in # permissions errors. parent_config_paths = list( iter_intermediate_paths( Path(path).absolute(), Path(os.path.expanduser("~")) ) ) # Stripping off the home directory and the current working # directory, since they are both covered by other code # here parent_config_paths = parent_config_paths[1:-1] parent_config_stack = [ load_config_at_path(str(p.resolve())) for p in list(parent_config_paths) ] # Resolve paths to ensure caching is accurate. config_paths = iter_intermediate_paths(Path(path).absolute(), Path.cwd()) config_stack = [load_config_at_path(str(p.resolve())) for p in config_paths] # 4) Extra config paths. # When calling `load_config_file_as_dict` we resolve the path first so that caching # is more efficient. extra_config = {} if extra_config_path: try: extra_config = load_config_file_as_dict( str(Path(extra_config_path).resolve()) ) except FileNotFoundError: raise SQLFluffUserError( f"Extra config path '{extra_config_path}' does not exist." ) return nested_combine( user_appdir_config, user_config, *parent_config_stack, *config_stack, extra_config, ) class ConfigLoader: """The class for loading config files. NOTE: Deprecated class maintained because it was in our example plugin for a long while. Remove once this warning has been live for an appropriate amount of time. """ def __init__(self) -> None: # pragma: no cover config_logger.warning( "ConfigLoader is deprecated, and no longer necessary. " "Please update your plugin to use the config loading functions directly " "to remove this message." ) @classmethod def get_global(cls) -> ConfigLoader: # pragma: no cover """Get the singleton loader.""" config_logger.warning( "ConfigLoader.get_global() is deprecated, and no longer necessary. " "Please update your plugin to use the config loading functions directly " "to remove this message." ) return cls() def load_config_resource( self, package: str, file_name: str ) -> ConfigMappingType: # pragma: no cover """Load a config resource. NOTE: Deprecated classmethod maintained because it was in our example plugin for a long while. Remove once this warning has been live for an appropriate amount of time. """ config_logger.warning( "ConfigLoader.load_config_resource() is deprecated. Please update " "your plugin to call sqlfluff.core.config.loader.load_config_resource() " "directly to remove this message." ) return load_config_resource(package, file_name) sqlfluff-3.4.2/src/sqlfluff/core/config/removed.py000066400000000000000000000231551503426445100221750ustar00rootroot00000000000000"""Records of deprecated and removed config variables.""" import logging from dataclasses import dataclass from typing import Callable, Optional, Union from sqlfluff.core.errors import SQLFluffUserError from sqlfluff.core.helpers.dict import ( NestedStringDict, nested_dict_get, nested_dict_set, records_to_nested_dict, ) from sqlfluff.core.types import ConfigMappingType, ConfigValueOrListType # Instantiate the config logger config_logger = logging.getLogger("sqlfluff.config") @dataclass class _RemovedConfig: old_path: tuple[str, ...] warning: str new_path: Optional[tuple[str, ...]] = None translation_func: Optional[ Callable[[ConfigValueOrListType], ConfigValueOrListType] ] = None @property def formatted_old_key(self) -> str: """Format the old key in a way similar to a config file.""" return ":".join(self.old_path) @property def formatted_new_key(self) -> str: """Format the new key (assuming it exists) in a way similar to a config file.""" assert ( self.new_path ), "`formatted_new_key` can only be called if a `new_path` is set." return ":".join(self.new_path) RemovedConfigMapType = dict[str, Union[_RemovedConfig, "RemovedConfigMapType"]] REMOVED_CONFIGS = [ _RemovedConfig( ("rules", "L003", "hanging_indents"), ( "Hanging indents are no longer supported in SQLFluff " "from version 2.0.0 onwards. See " "https://docs.sqlfluff.com/en/stable/perma/hanging_indents.html" ), ), _RemovedConfig( ("rules", "max_line_length"), ( "The max_line_length config has moved " "from sqlfluff:rules to the root sqlfluff level." ), ("max_line_length",), (lambda x: x), ), _RemovedConfig( ("rules", "tab_space_size"), ( "The tab_space_size config has moved " "from sqlfluff:rules to sqlfluff:indentation." ), ("indentation", "tab_space_size"), (lambda x: x), ), _RemovedConfig( ("rules", "L002", "tab_space_size"), ( "The tab_space_size config has moved " "from sqlfluff:rules to sqlfluff:indentation." ), ("indentation", "tab_space_size"), (lambda x: x), ), _RemovedConfig( ("rules", "L003", "tab_space_size"), ( "The tab_space_size config has moved " "from sqlfluff:rules to sqlfluff:indentation." ), ("indentation", "tab_space_size"), (lambda x: x), ), _RemovedConfig( ("rules", "L004", "tab_space_size"), ( "The tab_space_size config has moved " "from sqlfluff:rules to sqlfluff:indentation." ), ("indentation", "tab_space_size"), (lambda x: x), ), _RemovedConfig( ("rules", "L016", "tab_space_size"), ( "The tab_space_size config has moved " "from sqlfluff:rules to sqlfluff:indentation." ), ("indentation", "tab_space_size"), (lambda x: x), ), _RemovedConfig( ("rules", "indent_unit"), ( "The indent_unit config has moved " "from sqlfluff:rules to sqlfluff:indentation." ), ("indentation", "indent_unit"), (lambda x: x), ), _RemovedConfig( ("rules", "LT03", "operator_new_lines"), ( "Use the line_position config in the appropriate " "sqlfluff:layout section (e.g. sqlfluff:layout:type" ":binary_operator)." ), ("layout", "type", "binary_operator", "line_position"), (lambda x: "trailing" if x == "before" else "leading"), ), _RemovedConfig( ("rules", "comma_style"), ( "Use the line_position config in the appropriate " "sqlfluff:layout section (e.g. sqlfluff:layout:type" ":comma)." ), ("layout", "type", "comma", "line_position"), (lambda x: x), ), # LT04 used to have a more specific version of the same /config itself. _RemovedConfig( ("rules", "LT04", "comma_style"), ( "Use the line_position config in the appropriate " "sqlfluff:layout section (e.g. sqlfluff:layout:type" ":comma)." ), ("layout", "type", "comma", "line_position"), (lambda x: x), ), _RemovedConfig( ("rules", "L003", "lint_templated_tokens"), "No longer used.", ), _RemovedConfig( ("core", "recurse"), "Removed as unused in production and unnecessary for debugging.", ), _RemovedConfig( ("rules", "references.quoting", "force_enable"), "No longer used. The dialects which used to block this rule, no longer do.", ), ] # Actually make a dict which matches the structure. REMOVED_CONFIG_MAP = records_to_nested_dict( (removed_config.old_path, removed_config) for removed_config in REMOVED_CONFIGS ) def validate_config_dict_for_removed( config: ConfigMappingType, logging_reference: str, removed_config: NestedStringDict[_RemovedConfig] = REMOVED_CONFIG_MAP, root_config_ref: Optional[ConfigMappingType] = None, ) -> None: """Validates a config dict against removed values. Where a value can be updated or translated, it mutates the config object. In general the `removed_config` & `root_config_ref` arguments are present only to enable recursion and shouldn't be necessary for general use of this function. """ # If no root ref provided, then assume it's the config provided. # NOTE: During recursion, this should be set explicitly. root_config_ref = root_config_ref or config # Iterate through a copy of the config keys, so we can safely mutate # the underlying dict. for key in list(config.keys()): # Is there a removed config to compare to? if key not in removed_config: continue removed_value = removed_config[key] # If it's a section, recurse if isinstance(removed_value, dict): config_section = config[key] assert isinstance( config_section, dict ), f"Expected `{key}` to be a section not a value." validate_config_dict_for_removed( config_section, logging_reference=logging_reference, removed_config=removed_value, root_config_ref=root_config_ref, ) # If that validation resulted in an empty dict, also remove # the reference in this layer. if not config_section: del config[key] continue # Otherwise handle it directly. assert isinstance(removed_value, _RemovedConfig) # If there isn't a mapping option, just raise an error if not (removed_value.translation_func and removed_value.new_path): raise SQLFluffUserError( f"Config file {logging_reference!r} set an outdated config " f"value {removed_value.formatted_old_key}." f"\n\n{removed_value.warning}\n\n" "See https://docs.sqlfluff.com/en/stable/perma/" "configuration.html for more details." ) # Otherwise perform the translation. # First check whether we have already set the new path? try: # Try and fetch a value at the new path. # NOTE: We don't actually handle the return value. nested_dict_get(root_config_ref, removed_value.new_path) # Raise an warning. config_logger.warning( f"\nWARNING: Config file {logging_reference} set a deprecated " f"config value `{removed_value.formatted_old_key}` (which can be " "migrated) but ALSO set the value it would be migrated to. The new " f"value (`{removed_value.formatted_new_key}`) takes precedence. " "Please update your configuration to remove this warning. " f"\n\n{removed_value.warning}\n\n" "See https://docs.sqlfluff.com/en/stable/perma/" "configuration.html for more details.\n" ) # Remove the corresponding value from the dict object as invalid. del config[key] continue except KeyError: pass # If we haven't already set the new path then mutate and warn. old_value = config[key] assert not isinstance( old_value, dict ), f"Expected `{key}` to be a value not a section." new_value = removed_value.translation_func(old_value) # NOTE: At the stage of emitting this warning, we may not yet # have set up red logging because we haven't yet loaded the config # file. For that reason, this error message has a bit more padding. config_logger.warning( f"\nWARNING: Config file {logging_reference} set a deprecated config " f"value `{removed_value.formatted_old_key}`. This will be " "removed in a later release. This has been mapped to " f"`{removed_value.formatted_new_key}` set to a value of " f"{new_value!r} for this run. " "Please update your configuration to remove this warning. " f"\n\n{removed_value.warning}\n\n" "See https://docs.sqlfluff.com/en/stable/perma/" "configuration.html for more details.\n" ) # Write the new value and delete the old nested_dict_set(root_config_ref, removed_value.new_path, new_value) del config[key] sqlfluff-3.4.2/src/sqlfluff/core/config/toml.py000066400000000000000000000056451503426445100215130ustar00rootroot00000000000000"""Methods for loading config from pyproject.toml files.""" import sys from typing import Any, TypeVar if sys.version_info >= (3, 11): import tomllib else: # pragma: no cover import toml as tomllib from sqlfluff.core.helpers.dict import ( NestedDictRecord, iter_records_from_nested_dict, records_to_nested_dict, ) from sqlfluff.core.types import ConfigMappingType T = TypeVar("T") def _condense_rule_record(record: NestedDictRecord[T]) -> NestedDictRecord[T]: """Helper function to condense the rule section of a toml config.""" key, value = record if len(key) > 2: key = (".".join(key[:-1]), key[-1]) return key, value def _validate_structure(raw_config: dict[str, Any]) -> ConfigMappingType: """Helper function to narrow types for use by SQLFluff. This is a recursive function on any dict keys found. """ validated_config: ConfigMappingType = {} for key, value in raw_config.items(): if isinstance(value, dict): validated_config[key] = _validate_structure(value) elif isinstance(value, list): # Coerce all list items to strings, to be in line # with the behaviour of ini configs. validated_config[key] = [str(item) for item in value] elif isinstance(value, (str, int, float, bool)) or value is None: validated_config[key] = value else: # pragma: no cover # Whatever we found, make it into a string. # This is very unlikely to happen and is more for completeness. validated_config[key] = str(value) return validated_config def load_toml_file_config(filepath: str) -> ConfigMappingType: """Read the SQLFluff config section of a pyproject.toml file. We don't need to change any key names here, because the root section of the toml file format is `tool.sqlfluff.core`. NOTE: Toml files are always encoded in UTF-8. That is a necessary part of the toml spec: https://toml.io/en/v1.0.0 """ with open(filepath, mode="r", encoding="utf-8") as file: toml_dict = tomllib.loads(file.read()) config_dict = _validate_structure(toml_dict.get("tool", {}).get("sqlfluff", {})) # NOTE: For the "rules" section of the sqlfluff config, # rule names are often qualified with a dot ".". In the # toml scenario this can get interpreted as a nested # section, and we resolve that edge case here. if "rules" not in config_dict: # No rules section, so no need to resolve. return config_dict rules_section = config_dict["rules"] assert isinstance(rules_section, dict), ( "Expected to find section in `rules` section of config, " f"but instead found {rules_section}" ) # Condense the rules section. config_dict["rules"] = records_to_nested_dict( _condense_rule_record(record) for record in iter_records_from_nested_dict(rules_section) ) return config_dict sqlfluff-3.4.2/src/sqlfluff/core/config/validate.py000066400000000000000000000072101503426445100223170ustar00rootroot00000000000000"""Methods for validating config dicts.""" from sqlfluff.core.config.removed import validate_config_dict_for_removed from sqlfluff.core.errors import SQLFluffUserError from sqlfluff.core.types import ConfigMappingType ALLOWABLE_LAYOUT_CONFIG_KEYS = ( "spacing_before", "spacing_after", "spacing_within", "line_position", "align_within", "align_scope", "keyword_line_position", "keyword_line_position_exclusions", ) def _validate_layout_config(config: ConfigMappingType, logging_reference: str) -> None: """Validate the layout config section of the config. We check for valid key values and for the depth of the structure. NOTE: For now we don't check that the "type" is a valid one to reference, or that the values are valid. For the values, these are likely to be rejected by the layout routines at runtime. The last risk area is validating that the type is a valid one, but that should be handled by the same as the above. """ layout_section = config.get("layout", {}) if not layout_section: return None preamble = f"Config file {logging_reference!r} set an invalid `layout` option. " reference = ( "See https://docs.sqlfluff.com/en/stable/perma/layout.html" "#configuring-layout for more details." ) if not isinstance(layout_section, dict): raise SQLFluffUserError( preamble + f"Found value {layout_section!r} instead of a valid layout section. " + reference ) # The sections within layout can only be "type" (currently). non_type_keys = set(layout_section.keys()) - {"type"} type_section = layout_section.get("type", {}) if non_type_keys or not type_section or not isinstance(type_section, dict): raise SQLFluffUserError( preamble + "Only sections of the form `sqlfluff:layout:type:...` are valid. " + reference ) for layout_type, layout_section in type_section.items(): if not isinstance(layout_section, dict): raise SQLFluffUserError( preamble + f"Layout config for {layout_type!r} is invalid. Expected a section. " + reference ) invalid_keys = set(layout_section.keys()) - set(ALLOWABLE_LAYOUT_CONFIG_KEYS) if invalid_keys: raise SQLFluffUserError( preamble + f"Layout config for type {layout_type!r} is invalid. " + f"Found the following invalid keys: {invalid_keys}. " + reference ) for key in ALLOWABLE_LAYOUT_CONFIG_KEYS: if key in layout_section: if isinstance(layout_section[key], dict): raise SQLFluffUserError( preamble + f"Layout config for type {layout_type!r} is invalid. " + "Found the an unexpected section rather than " + f"value for {key}. " + reference ) def validate_config_dict(config: ConfigMappingType, logging_reference: str) -> None: """Validate a config dict. Currently we validate: - Removed and deprecated values. - Layout configuration structure. Using this method ensures that any later validation will also be applied. NOTE: Some of these method may mutate the config object where they are able to correct issues. """ # Validate the config for any removed values validate_config_dict_for_removed(config, logging_reference) # Validate layout section _validate_layout_config(config, logging_reference) sqlfluff-3.4.2/src/sqlfluff/core/default_config.cfg000066400000000000000000000324221503426445100223440ustar00rootroot00000000000000[sqlfluff] # verbose is an integer (0-2) indicating the level of log output verbose = 0 # Turn off color formatting of output nocolor = None # Supported dialects https://docs.sqlfluff.com/en/stable/perma/dialects.html # Or run 'sqlfluff dialects' dialect = None # One of [raw|jinja|python|placeholder] templater = jinja # Comma separated list of rules to check, default to all rules = all # Comma separated list of rules to exclude, or None exclude_rules = None # Below controls SQLFluff output, see max_line_length for SQL output output_line_length = 80 # Number of passes to run before admitting defeat runaway_limit = 10 # Ignore errors by category (one or more of the following, separated by commas: lexing,linting,parsing,templating) ignore = None # Warn only for rule codes (one of more rule codes, separated by commas: e.g. LT01,LT02) # Also works for templating and parsing errors by using TMP or PRS warnings = None # Whether to warn about unneeded '-- noqa:' comments. warn_unused_ignores = False # Ignore linting errors found within sections of code coming directly from # templated code (e.g. from within Jinja curly braces. Note that it does not # ignore errors from literal code found within template loops. ignore_templated_areas = True # can either be autodetect or a valid encoding e.g. utf-8, utf-8-sig encoding = autodetect # Ignore inline overrides (e.g. to test if still required) disable_noqa = False # Ignore inline overrides except those listed. # This take priority over `disabled_noqa`. disable_noqa_except = None # Comma separated list of file extensions to lint # NB: This config will only apply in the root folder sql_file_exts = .sql,.sql.j2,.dml,.ddl # Allow fix to run on files, even if they contain parsing errors # Note altering this is NOT RECOMMENDED as can corrupt SQL fix_even_unparsable = False # Very large files can make the parser effectively hang. # The more efficient check is the _byte_ limit check which # is enabled by default. The previous _character_ limit check # is still present for backward compatibility. This will be # removed in a future version. # Set either to 0 to disable. large_file_skip_char_limit = 0 large_file_skip_byte_limit = 20000 # CPU processes to use while linting. # If positive, just implies number of processes. # If negative or zero, implies number_of_cpus - specified_number. # e.g. -1 means use all processors but one. 0 means all cpus. processes = 1 # Max line length is set by default to be in line with the dbt style guide. # https://github.com/dbt-labs/corp/blob/main/dbt_style_guide.md # Set to zero or negative to disable checks. max_line_length = 80 # NOTE: Templater Variant rendering should currently be considered EXPERIMENTAL. # Only set `render_variant_limit` to more than 1 if you know what you're doing! # Implementation of this will also depend on your templater. render_variant_limit = 1 [sqlfluff:indentation] # See https://docs.sqlfluff.com/en/stable/perma/indent_locations.html indent_unit = space tab_space_size = 4 indented_joins = False indented_ctes = False indented_using_on = True indented_on_contents = True indented_then = True indented_then_contents = True allow_implicit_indents = False template_blocks_indent = True # This is a comma separated list of elements to skip # indentation edits to. skip_indentation_in = script_content # If comments are found at the end of long lines, we default to moving # them to the line _before_ their current location as the convention is # that a comment precedes the line it describes. However if you prefer # comments moved _after_, this configuration setting can be set to "after". trailing_comments = before # To exclude comment lines from indentation entirely set this to "True". ignore_comment_lines = False # Layout configuration # See https://docs.sqlfluff.com/en/stable/perma/layout_spacing.html [sqlfluff:layout:type:comma] spacing_before = touch line_position = trailing [sqlfluff:layout:type:binary_operator] spacing_within = touch line_position = leading [sqlfluff:layout:type:column_path_operator] spacing_within = touch line_position = leading [sqlfluff:layout:type:statement_terminator] spacing_before = touch line_position = trailing [sqlfluff:layout:type:end_of_file] spacing_before = touch [sqlfluff:layout:type:set_operator] line_position = alone:strict [sqlfluff:layout:type:start_bracket] spacing_after = touch [sqlfluff:layout:type:end_bracket] spacing_before = touch [sqlfluff:layout:type:start_square_bracket] spacing_after = touch [sqlfluff:layout:type:end_square_bracket] spacing_before = touch [sqlfluff:layout:type:start_angle_bracket] spacing_after = touch [sqlfluff:layout:type:end_angle_bracket] spacing_before = touch [sqlfluff:layout:type:casting_operator] spacing_before = touch spacing_after = touch:inline [sqlfluff:layout:type:slice] spacing_before = touch spacing_after = touch [sqlfluff:layout:type:dot] spacing_before = touch spacing_after = touch [sqlfluff:layout:type:comparison_operator] spacing_within = touch line_position = leading [sqlfluff:layout:type:assignment_operator] spacing_within = touch line_position = leading [sqlfluff:layout:type:object_reference] spacing_within = touch:inline [sqlfluff:layout:type:numeric_literal] spacing_within = touch:inline [sqlfluff:layout:type:sign_indicator] spacing_after = touch:inline [sqlfluff:layout:type:tilde] spacing_after = touch:inline [sqlfluff:layout:type:function_name] spacing_within = touch:inline [sqlfluff:layout:type:function_contents] spacing_before = touch:inline [sqlfluff:layout:type:function_parameter_list] spacing_before = touch:inline [sqlfluff:layout:type:array_type] spacing_within = touch:inline [sqlfluff:layout:type:typed_array_literal] spacing_within = touch [sqlfluff:layout:type:sized_array_type] spacing_within = touch [sqlfluff:layout:type:struct_type] spacing_within = touch:inline [sqlfluff:layout:type:bracketed_arguments] spacing_before = touch:inline [sqlfluff:layout:type:match_condition] spacing_within = touch:inline [sqlfluff:layout:type:typed_struct_literal] spacing_within = touch [sqlfluff:layout:type:semi_structured_expression] spacing_within = touch:inline spacing_before = touch:inline [sqlfluff:layout:type:array_accessor] spacing_before = touch:inline [sqlfluff:layout:type:colon] spacing_before = touch [sqlfluff:layout:type:colon_delimiter] spacing_before = touch spacing_after = touch [sqlfluff:layout:type:path_segment] spacing_within = touch [sqlfluff:layout:type:sql_conf_option] spacing_within = touch [sqlfluff:layout:type:sqlcmd_operator] # NOTE: This is the spacing between the operator and the colon spacing_before = touch [sqlfluff:layout:type:slash] spacing_before = any spacing_after = any [sqlfluff:layout:type:comment] spacing_before = any spacing_after = any [sqlfluff:layout:type:pattern_expression] # Snowflake pattern expressions shouldn't have their spacing changed. spacing_within = any [sqlfluff:layout:type:placeholder] # Placeholders exist "outside" the rendered SQL syntax # so we shouldn't enforce any particular spacing around # them. spacing_before = any spacing_after = any [sqlfluff:layout:type:common_table_expression] # The definition part of a CTE should fit on one line where possible. # For users which regularly define column names in their CTEs they # may which to relax this config to just `single`. spacing_within = single:inline # By setting a selection of clauses to "alone", we hint to the reflow # algorithm that in the case of a long single line statement, the # first place to add newlines would be around these clauses. # Setting this to "alone:strict" would always _force_ line breaks # around them even if the line isn't too long. [sqlfluff:layout:type:select_clause] line_position = alone [sqlfluff:layout:type:where_clause] line_position = alone keyword_line_position = leading [sqlfluff:layout:type:from_clause] line_position = alone [sqlfluff:layout:type:join_clause] line_position = alone [sqlfluff:layout:type:groupby_clause] line_position = alone keyword_line_position = leading [sqlfluff:layout:type:orderby_clause] # NOTE: Order by clauses appear in many places other than in a select # clause. To avoid unexpected behaviour we use `leading` in this # case rather than `alone`. line_position = leading keyword_line_position = leading keyword_line_position_exclusions = window_specification, aggregate_order_by, withingroup_clause [sqlfluff:layout:type:having_clause] line_position = alone keyword_line_position = leading [sqlfluff:layout:type:limit_clause] line_position = alone # Template loop tokens shouldn't dictate spacing around them. [sqlfluff:layout:type:template_loop] spacing_before = any spacing_after = any [sqlfluff:templater] unwrap_wrapped_queries = True [sqlfluff:templater:jinja] apply_dbt_builtins = True # Some rules can be configured directly from the config common to other rules [sqlfluff:rules] allow_scalar = True single_table_references = consistent unquoted_identifiers_policy = all [sqlfluff:rules:capitalisation.keywords] # Keywords capitalisation_policy = consistent # Comma separated list of words to ignore for this rule ignore_words = None ignore_words_regex = None [sqlfluff:rules:capitalisation.identifiers] # Unquoted identifiers extended_capitalisation_policy = consistent # Comma separated list of words to ignore for this rule ignore_words = None ignore_words_regex = None [sqlfluff:rules:capitalisation.functions] # Function names extended_capitalisation_policy = consistent # Comma separated list of words to ignore for this rule ignore_words = None ignore_words_regex = None [sqlfluff:rules:capitalisation.literals] # Null & Boolean Literals capitalisation_policy = consistent # Comma separated list of words to ignore for this rule ignore_words = None ignore_words_regex = None [sqlfluff:rules:capitalisation.types] # Data Types extended_capitalisation_policy = consistent # Comma separated list of words to ignore for this rule ignore_words = None ignore_words_regex = None [sqlfluff:rules:ambiguous.join] # Fully qualify JOIN clause fully_qualify_join_types = inner [sqlfluff:rules:ambiguous.column_references] # GROUP BY/ORDER BY column references group_by_and_order_by_style = consistent [sqlfluff:rules:aliasing.table] # Aliasing preference for tables aliasing = explicit [sqlfluff:rules:aliasing.column] # Aliasing preference for columns aliasing = explicit [sqlfluff:rules:aliasing.unused] alias_case_check = dialect [sqlfluff:rules:aliasing.length] min_alias_length = None max_alias_length = None [sqlfluff:rules:aliasing.forbid] # Avoid table aliases in from clauses and join conditions. # Disabled by default for all dialects unless explicitly enabled. # We suggest instead using aliasing.length (AL06) in most cases. force_enable = False [sqlfluff:rules:convention.not_equal] # Consistent usage of preferred "not equal to" comparison preferred_not_equal_style = consistent [sqlfluff:rules:convention.select_trailing_comma] # Trailing commas select_clause_trailing_comma = forbid [sqlfluff:rules:convention.count_rows] # Consistent syntax to count all rows prefer_count_1 = False prefer_count_0 = False [sqlfluff:rules:convention.terminator] # Semi-colon formatting approach multiline_newline = False require_final_semicolon = False [sqlfluff:rules:convention.blocked_words] # Comma separated list of blocked words that should not be used blocked_words = None blocked_regex = None match_source = False [sqlfluff:rules:convention.quoted_literals] # Consistent usage of preferred quotes for quoted literals preferred_quoted_literal_style = consistent # Disabled for dialects that do not support single and double quotes for quoted literals (e.g. Postgres) force_enable = False [sqlfluff:rules:convention.casting_style] # SQL type casting preferred_type_casting_style = consistent [sqlfluff:rules:references.from] # References must be in FROM clause # Disabled for some dialects (e.g. bigquery) force_enable = False [sqlfluff:rules:references.qualification] # Comma separated list of words to ignore for this rule ignore_words = None ignore_words_regex = None subqueries_ignore_external_references = False [sqlfluff:rules:references.consistent] # References must be consistently used # Disabled for some dialects (e.g. bigquery) force_enable = False [sqlfluff:rules:references.keywords] # Keywords should not be used as identifiers. unquoted_identifiers_policy = aliases quoted_identifiers_policy = none # Comma separated list of words to ignore for this rule ignore_words = None ignore_words_regex = None [sqlfluff:rules:references.special_chars] # Special characters in identifiers unquoted_identifiers_policy = all quoted_identifiers_policy = all allow_space_in_identifier = False additional_allowed_characters = None ignore_words = None ignore_words_regex = None [sqlfluff:rules:references.quoting] # Policy on quoted and unquoted identifiers prefer_quoted_identifiers = False prefer_quoted_keywords = False ignore_words = None ignore_words_regex = None case_sensitive = True [sqlfluff:rules:layout.long_lines] # Line length ignore_comment_lines = False ignore_comment_clauses = False [sqlfluff:rules:layout.newlines] maximum_empty_lines_between_statements = 2 maximum_empty_lines_inside_statements = 1 [sqlfluff:rules:layout.select_targets] wildcard_policy = single [sqlfluff:rules:structure.subquery] # By default, allow subqueries in from clauses, but not join clauses forbid_subquery_in = join [sqlfluff:rules:structure.join_condition_order] preferred_first_table_in_join_clause = earlier sqlfluff-3.4.2/src/sqlfluff/core/dialects/000077500000000000000000000000001503426445100204775ustar00rootroot00000000000000sqlfluff-3.4.2/src/sqlfluff/core/dialects/__init__.py000066400000000000000000000101341503426445100226070ustar00rootroot00000000000000"""Contains SQL Dialects. Note that individual dialects are only imported as needed at runtime. This avoids circular references. To enable this, any modules outside of .dialects cannot import dialects directly. They should import `dialect_selector` and use that to fetch dialects. Within .dialects, each dialect is free to depend on other dialects as required. Any dependent dialects will be loaded as needed. """ from collections.abc import Iterator from importlib import import_module from typing import NamedTuple # Eventually it would be a good to dynamically discover dialects # from any module beginning with "dialect_" within this folder. from sqlfluff.core.dialects.base import Dialect from sqlfluff.core.errors import SQLFluffUserError _dialect_lookup = { "ansi": ("dialect_ansi", "ansi_dialect"), "athena": ("dialect_athena", "athena_dialect"), "bigquery": ("dialect_bigquery", "bigquery_dialect"), "clickhouse": ("dialect_clickhouse", "clickhouse_dialect"), "databricks": ("dialect_databricks", "databricks_dialect"), "db2": ("dialect_db2", "db2_dialect"), "doris": ("dialect_doris", "doris_dialect"), "duckdb": ("dialect_duckdb", "duckdb_dialect"), "exasol": ("dialect_exasol", "exasol_dialect"), "flink": ("dialect_flink", "flink_dialect"), "greenplum": ("dialect_greenplum", "greenplum_dialect"), "hive": ("dialect_hive", "hive_dialect"), "impala": ("dialect_impala", "impala_dialect"), "materialize": ("dialect_materialize", "materialize_dialect"), "mariadb": ("dialect_mariadb", "mariadb_dialect"), "mysql": ("dialect_mysql", "mysql_dialect"), "oracle": ("dialect_oracle", "oracle_dialect"), "postgres": ("dialect_postgres", "postgres_dialect"), "redshift": ("dialect_redshift", "redshift_dialect"), "snowflake": ("dialect_snowflake", "snowflake_dialect"), "soql": ("dialect_soql", "soql_dialect"), "sparksql": ("dialect_sparksql", "sparksql_dialect"), "sqlite": ("dialect_sqlite", "sqlite_dialect"), "starrocks": ("dialect_starrocks", "starrocks_dialect"), "teradata": ("dialect_teradata", "teradata_dialect"), "trino": ("dialect_trino", "trino_dialect"), "tsql": ("dialect_tsql", "tsql_dialect"), "vertica": ("dialect_vertica", "vertica_dialect"), } _legacy_dialects = { "exasol_fs": ( "As of 0.7.0 the 'exasol_fs' dialect has been combined with " "the 'exasol' dialect, and is no longer a standalone dialect. " "Please use the 'exasol' dialect instead." ), "spark3": ( "The 'spark3' dialect has been renamed to sparksql. " "Please use the 'sparksql' dialect instead." ), } def load_raw_dialect(label: str, base_module: str = "sqlfluff.dialects") -> Dialect: """Dynamically load a dialect.""" if label in _legacy_dialects: raise SQLFluffUserError(_legacy_dialects[label]) elif label not in _dialect_lookup: raise KeyError("Unknown dialect") module_name, name = _dialect_lookup[label] module = import_module(f"{base_module}.{module_name}") result: Dialect = getattr(module, name) result.add_update_segments({k: getattr(module, k) for k in dir(module)}) return result class DialectTuple(NamedTuple): """Dialect Tuple object for describing dialects.""" label: str name: str inherits_from: str docstring: str def dialect_readout() -> Iterator[DialectTuple]: """Generate a readout of available dialects.""" for dialect_label in sorted(_dialect_lookup): dialect = load_raw_dialect(dialect_label) yield DialectTuple( label=dialect_label, name=dialect.formatted_name, inherits_from=dialect.inherits_from or "nothing", docstring=dialect.docstring, ) def dialect_selector(s: str) -> Dialect: """Return a dialect given its name.""" dialect = load_raw_dialect(s) # Expand any callable references at this point. # NOTE: The result of .expand() is a new class. return dialect.expand() __all__ = [ "Dialect", "DialectTuple", "SQLFluffUserError", "load_raw_dialect", "dialect_readout", "dialect_selector", ] sqlfluff-3.4.2/src/sqlfluff/core/dialects/base.py000066400000000000000000000374011503426445100217700ustar00rootroot00000000000000"""Defines the base dialect class.""" import sys from typing import Any, Optional, Union, cast from sqlfluff.core.parser import ( BaseSegment, KeywordSegment, SegmentGenerator, StringParser, ) from sqlfluff.core.parser.grammar.base import BaseGrammar, Nothing from sqlfluff.core.parser.lexer import LexerType from sqlfluff.core.parser.matchable import Matchable from sqlfluff.core.parser.types import BracketPairTuple, DialectElementType class Dialect: """Serves as the basis for runtime resolution of Grammar. Args: name (:obj:`str`): The name of the dialect, used for lookup. lexer_matchers (iterable of :obj:`StringLexer`): A structure defining the lexing config for this dialect. """ def __init__( self, name: str, root_segment_name: str, lexer_matchers: Optional[list[LexerType]] = None, library: Optional[dict[str, DialectElementType]] = None, sets: Optional[dict[str, set[Union[str, BracketPairTuple]]]] = None, inherits_from: Optional[str] = None, formatted_name: Optional[str] = None, docstring: Optional[str] = None, ) -> None: self._library = library or {} self.name = name self.lexer_matchers = lexer_matchers self.expanded = False self._sets = sets or {} self.inherits_from = inherits_from self.root_segment_name = root_segment_name # Attributes for documentation self.formatted_name: str = formatted_name or name self.docstring = docstring or f"The dialect for {self.formatted_name}." def __repr__(self) -> str: # pragma: no cover return f"" def expand(self) -> "Dialect": """Expand any callable references to concrete ones. This must be called before using the dialect. But allows more flexible definitions to happen at runtime. NOTE: This method returns a copy of the current dialect so that we don't pollute the original dialect and get dependency issues. Returns: :obj:`Dialect`: a copy of the given dialect but with expanded references. """ # Are we already expanded? if self.expanded: # pragma: no cover raise ValueError("Attempted to re-expand an already expanded dialect.") expanded_copy = self.copy_as(name=self.name) # Expand any callable elements of the dialect. for key in expanded_copy._library: seg_gen = expanded_copy._library[key] if isinstance(seg_gen, SegmentGenerator): # If the element is callable, call it passing the current # dialect and store the result in its place. # Use the .replace() method for its error handling. expanded_copy.replace(**{key: seg_gen.expand(expanded_copy)}) # Expand any keyword sets. for keyword_set in [ "unreserved_keywords", "reserved_keywords", ]: # e.g. reserved_keywords, (JOIN, ...) # Make sure the values are available as KeywordSegments keyword_sets = expanded_copy.sets(keyword_set) for kw in keyword_sets: n = kw.capitalize() + "KeywordSegment" if n not in expanded_copy._library: expanded_copy._library[n] = StringParser(kw.lower(), KeywordSegment) expanded_copy.expanded = True return expanded_copy def sets(self, label: str) -> set[str]: """Allows access to sets belonging to this dialect. These sets belong to the dialect and are copied for sub dialects. These are used in combination with late-bound dialect objects to create some of the bulk-produced rules. """ assert label not in ( "bracket_pairs", "angle_bracket_pairs", ), f"Use `bracket_sets` to retrieve {label} set." if label not in self._sets: self._sets[label] = set() return cast(set[str], self._sets[label]) def bracket_sets(self, label: str) -> set[BracketPairTuple]: """Allows access to bracket sets belonging to this dialect.""" assert label in ( "bracket_pairs", "angle_bracket_pairs", ), "Invalid bracket set. Consider using `sets` instead." if label not in self._sets: self._sets[label] = set() return cast(set[BracketPairTuple], self._sets[label]) def update_keywords_set_from_multiline_string( self, set_label: str, values: str ) -> None: """Special function to update a keywords set from a multi-line string.""" self.sets(set_label).update( [n.strip().upper() for n in values.strip().split("\n")] ) def copy_as( self, name: str, formatted_name: Optional[str] = None, docstring: Optional[str] = None, ) -> "Dialect": """Copy this dialect and create a new one with a different name. This is the primary method for inheritance, after which, the `replace` method can be used to override particular rules. """ # Are we already expanded? if self.expanded: # pragma: no cover # If we copy an already expanded dialect then any SegmentGenerators # won't respond. This is most likely a mistake. raise ValueError("Attempted to copy an already expanded dialect.") # Copy sets if they are passed, so they can be mutated independently new_sets = {} for label in self._sets: new_sets[label] = self._sets[label].copy() assert self.lexer_matchers return self.__class__( name=name, library=self._library.copy(), lexer_matchers=self.lexer_matchers.copy(), sets=new_sets, inherits_from=self.name, root_segment_name=self.root_segment_name, # NOTE: We don't inherit the documentation fields. formatted_name=formatted_name, docstring=docstring, ) def add(self, **kwargs: DialectElementType) -> None: """Add a segment to the dialect directly. This is the alternative to the decorator route, most useful for segments defined using `make`. Segments are passed in as kwargs. e.g. dialect.add(SomeSegment=StringParser("blah", KeywordSegment)) Note that multiple segments can be added in the same call as this method will iterate through the kwargs """ for n in kwargs: if n in self._library: # pragma: no cover raise ValueError(f"{n!r} is already registered in {self!r}") self._library[n] = kwargs[n] def replace(self, **kwargs: DialectElementType) -> None: """Override a segment on the dialect directly. Usage is very similar to add, but elements specified must already exist. """ for n in kwargs: if n not in self._library: # pragma: no cover raise ValueError(f"{n!r} is not already registered in {self!r}") replacement = kwargs[n] # If trying to replace with same, just skip. if self._library[n] is replacement: continue # Check for replacement with a new but identical class. # This would be a sign of redundant definitions in the dialect. elif self._library[n] == replacement: raise ValueError( f"Attempted unnecessary identical redefinition of {n!r} in {self!r}" ) # pragma: no cover # To replace a segment, the replacement must either be a # subclass of the original, *or* it must have the same # public methods and/or fields as it. # NOTE: Other replacements aren't validated. subclass = False if isinstance(self._library[n], type) and not isinstance( # NOTE: The exception here is we _are_ allowed to replace a # segment with a `Nothing()` grammar, which shows that a segment # has been disabled. replacement, Nothing, ): assert isinstance( replacement, type ), f"Cannot replace {n!r} with {replacement}" old_seg = cast(type["BaseSegment"], self._library[n]) new_seg = cast(type["BaseSegment"], replacement) assert issubclass(old_seg, BaseSegment) assert issubclass(new_seg, BaseSegment) subclass = issubclass(new_seg, old_seg) if not subclass: if old_seg.type != new_seg.type: raise ValueError( # pragma: no cover f"Cannot replace {n!r} because 'type' property does not " f"match: {new_seg.type} != {old_seg.type}" ) base_dir = set(dir(self._library[n])) cls_dir = set(dir(new_seg)) missing = { n for n in base_dir.difference(cls_dir) if not n.startswith("_") } if missing: raise ValueError( # pragma: no cover f"Cannot replace {n!r} because it's not a subclass and " f"is missing these from base: {', '.join(missing)}" ) self._library[n] = replacement def add_update_segments(self, module_dct: dict[str, Any]) -> None: """Scans module dictionary, adding or replacing segment definitions.""" for k, v in module_dct.items(): if isinstance(v, type) and issubclass(v, BaseSegment): if k not in self._library: self.add(**{k: v}) else: non_seg_v = cast(Union[Matchable, SegmentGenerator], v) self.replace(**{k: non_seg_v}) def get_grammar(self, name: str) -> BaseGrammar: """Allow access to grammars pre-expansion. This is typically for dialect inheritance. This method also validates that the result is a grammar. """ if name not in self._library: # pragma: no cover raise ValueError(f"Element {name} not found in dialect.") grammar = self._library[name] if not isinstance(grammar, BaseGrammar): # pragma: no cover raise TypeError( f"Attempted to fetch non grammar [{name}] with get_grammar." ) return grammar def get_segment(self, name: str) -> type["BaseSegment"]: """Allow access to segments pre-expansion. This is typically for dialect inheritance. This method also validates that the result is a segment. """ if name not in self._library: # pragma: no cover raise ValueError(f"Element {name} not found in dialect.") segment = cast(type["BaseSegment"], self._library[name]) if issubclass(segment, BaseSegment): return segment else: # pragma: no cover raise TypeError( f"Attempted to fetch non segment [{name}] " f"with get_segment - type{type(segment)}" ) def ref(self, name: str) -> Matchable: """Return an object which acts as a late binding reference to the element named. NB: This requires the dialect to be expanded, and only returns Matchables as a result. """ if not self.expanded: # pragma: no cover raise RuntimeError("Dialect must be expanded before use.") if name in self._library: res = self._library[name] if res: assert not isinstance(res, SegmentGenerator) return res else: # pragma: no cover raise ValueError( "Unexpected Null response while fetching {!r} from {}".format( name, self.name ) ) elif name.endswith("KeywordSegment"): # pragma: no cover keyword = name[0:-14] keyword_tip = ( "\n\nThe syntax in the query is not (yet?) supported. Try to" " narrow down your query to a minimal, reproducible case and" " raise an issue on GitHub.\n\n" "Or, even better, see this guide on how to help contribute" " keyword and/or dialect updates:\n" "https://docs.sqlfluff.com/en/stable/perma/contribute_dialect_keywords.html" # noqa E501 ) # Keyword errors are common so avoid printing the whole, scary, # traceback as not that useful and confusing to people. sys.tracebacklimit = 0 raise RuntimeError( "Grammar refers to the " "{!r} keyword which was not found in the {} dialect.{}".format( keyword.upper(), self.name, keyword_tip ) ) else: # pragma: no cover raise RuntimeError( "Grammar refers to " "{!r} which was not found in the {} dialect.".format(name, self.name) ) def set_lexer_matchers(self, lexer_matchers: list[LexerType]) -> None: """Set the lexer struct for the dialect. This is what is used for base dialects. For derived dialects (which don't exist yet) the assumption is that we'll introduce some kind of *patch* function which could be used to mutate an existing `lexer_matchers`. """ self.lexer_matchers = lexer_matchers def get_lexer_matchers(self) -> list[LexerType]: """Fetch the lexer struct for this dialect.""" if self.lexer_matchers: return self.lexer_matchers else: # pragma: no cover raise ValueError(f"Lexing struct has not been set for dialect {self}") def patch_lexer_matchers(self, lexer_patch: list[LexerType]) -> None: """Patch an existing lexer struct. Used to edit the lexer of a sub-dialect. """ buff = [] if not self.lexer_matchers: # pragma: no cover raise ValueError("Lexer struct must be defined before it can be patched!") # Make a new data struct for lookups patch_dict = {elem.name: elem for elem in lexer_patch} for elem in self.lexer_matchers: if elem.name in patch_dict: buff.append(patch_dict[elem.name]) else: buff.append(elem) # Overwrite with the buffer once we're done self.lexer_matchers = buff def insert_lexer_matchers(self, lexer_patch: list[LexerType], before: str) -> None: """Insert new records into an existing lexer struct. Used to edit the lexer of a sub-dialect. The patch is inserted *before* whichever element is named in `before`. """ buff = [] found = False if not self.lexer_matchers: # pragma: no cover raise ValueError("Lexer struct must be defined before it can be patched!") for elem in self.lexer_matchers: if elem.name == before: found = True for patch in lexer_patch: buff.append(patch) buff.append(elem) else: buff.append(elem) if not found: # pragma: no cover raise ValueError( f"Lexer struct insert before '{before}' failed because tag never found." ) # Overwrite with the buffer once we're done self.lexer_matchers = buff def get_root_segment(self) -> Union[type[BaseSegment], Matchable]: """Get the root segment of the dialect.""" return self.ref(self.root_segment_name) sqlfluff-3.4.2/src/sqlfluff/core/dialects/common.py000066400000000000000000000012231503426445100223370ustar00rootroot00000000000000"""Common classes for dialects to use.""" from typing import NamedTuple, Optional from sqlfluff.core.parser import BaseSegment class AliasInfo(NamedTuple): """Details about a table alias.""" ref_str: str # Name given to the alias segment: Optional[BaseSegment] # Identifier segment containing the name aliased: bool from_expression_element: BaseSegment alias_expression: Optional[BaseSegment] object_reference: Optional[BaseSegment] class ColumnAliasInfo(NamedTuple): """Details about a column alias.""" alias_identifier_name: str aliased_segment: BaseSegment column_reference_segments: list[BaseSegment] sqlfluff-3.4.2/src/sqlfluff/core/errors.py000066400000000000000000000304151503426445100206000ustar00rootroot00000000000000"""Errors - these are closely linked to what used to be called violations. NOTE: The BaseException class, which ValueError inherits from, defines a custom __reduce__() method for picking and unpickling exceptions. For the SQLBaseError, and it's dependent classes, we define properties of these exceptions which don't work well with that method, which is why we redefine __reduce__() on each of these classes. Given the circumstances in which they are called, they don't show up on coverage tracking. https://stackoverflow.com/questions/49715881/how-to-pickle-inherited-exceptions """ from typing import TYPE_CHECKING, Any, Optional, Union, cast if TYPE_CHECKING: # pragma: no cover from sqlfluff.core.parser import BaseSegment, PositionMarker from sqlfluff.core.rules import BaseRule, LintFix CheckTuple = tuple[str, int, int] SerializedObject = dict[str, Union[str, int, bool, list["SerializedObject"]]] def _extract_position(segment: Optional["BaseSegment"]) -> dict[str, int]: """If a segment is present and is a literal, return it's source length.""" if segment: position = segment.pos_marker assert position if position.is_literal(): return position.to_source_dict() # An empty location is an indicator of not being able to accurately # represent the location. return {} # pragma: no cover class SQLBaseError(ValueError): """Base Error Class for all violations.""" _code: Optional[str] = None _name: Optional[str] = None _identifier = "base" _warning = False # The default value for `warning` def __init__( self, description: Optional[str] = None, pos: Optional["PositionMarker"] = None, line_no: int = 0, line_pos: int = 0, ignore: bool = False, fatal: bool = False, warning: Optional[bool] = None, ) -> None: self.fatal = fatal self.ignore = ignore self.warning: bool = warning if warning is not None else self._warning self.description = description if pos: self.line_no, self.line_pos = pos.source_position() else: self.line_no = line_no self.line_pos = line_pos super().__init__(self.desc()) def __eq__(self, other: Any) -> bool: """Errors compare equal if they are the same type and same content.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ def __reduce__( self, ) -> tuple[type["SQLBaseError"], tuple[Any, ...]]: """Prepare the SQLBaseError for pickling.""" return type(self), ( self.description, None, self.line_no, self.line_pos, self.ignore, self.fatal, self.warning, ) @property def fixable(self) -> bool: """Should this error be considered fixable?""" return False def rule_code(self) -> str: """Fetch the code of the rule which cause this error.""" return self._code or "????" def rule_name(self) -> str: """Fetch the name of the rule which cause this error.""" return self._name or "????" def desc(self) -> str: """Fetch a description of this violation.""" if self.description: return self.description return self.__class__.__name__ # pragma: no cover def to_dict(self) -> SerializedObject: """Return a dict of properties. This is useful in the API for outputting violations. """ return { "start_line_no": self.line_no, "start_line_pos": self.line_pos, "code": self.rule_code(), "description": self.desc(), "name": getattr(self, "rule").name if hasattr(self, "rule") else "", "warning": self.warning, } def check_tuple(self) -> CheckTuple: """Get a tuple representing this error. Mostly for testing.""" return ( self.rule_code(), self.line_no, self.line_pos, ) def source_signature(self) -> tuple[Any, ...]: """Return hashable source signature for deduplication.""" return (self.check_tuple(), self.desc()) def ignore_if_in(self, ignore_iterable: list[str]) -> None: """Ignore this violation if it matches the iterable.""" if self._identifier in ignore_iterable: self.ignore = True def warning_if_in(self, warning_iterable: list[str]) -> None: """Warning only for this violation if it matches the iterable. Designed for rule codes so works with L001, LL0X but also TMP or PRS for templating and parsing errors. Args: warning_iterable (list[str]): A list of strings representing the warning codes to check. Returns: None """ if self.rule_code() in warning_iterable or self.rule_name() in warning_iterable: self.warning = True class SQLTemplaterError(SQLBaseError): """An error which occurred during templating. Args: pos (:obj:`PosMarker`, optional): The position which the error occurred at. """ _code = "TMP" _identifier = "templating" class SQLFluffSkipFile(RuntimeError): """An error returned from a templater to skip a file.""" pass class SQLLexError(SQLBaseError): """An error which occurred during lexing. Args: pos (:obj:`PosMarker`, optional): The position which the error occurred at. """ _code = "LXR" _identifier = "lexing" class SQLParseError(SQLBaseError): """An error which occurred during parsing. Args: segment (:obj:`BaseSegment`, optional): The segment which is relevant for the failure in parsing. This is likely to be a subclass of `BaseSegment` rather than the parent class itself. This is mostly used for logging and for referencing position. """ _code = "PRS" _identifier = "parsing" def __init__( self, description: Optional[str] = None, segment: Optional["BaseSegment"] = None, line_no: int = 0, line_pos: int = 0, ignore: bool = False, fatal: bool = False, warning: Optional[bool] = None, ) -> None: # Store the segment on creation - we might need it later self.segment = segment super().__init__( description=description, pos=segment.pos_marker if segment else None, line_no=line_no, line_pos=line_pos, ignore=ignore, fatal=fatal, warning=warning, ) def __reduce__( self, ) -> tuple[type["SQLParseError"], tuple[Any, ...]]: """Prepare the SQLParseError for pickling.""" return type(self), ( self.description, self.segment, self.line_no, self.line_pos, self.ignore, self.fatal, self.warning, ) def to_dict(self) -> SerializedObject: """Return a dict of properties. This is useful in the API for outputting violations. For parsing errors we additionally add the length of the unparsable segment. """ _base_dict = super().to_dict() _base_dict.update( **_extract_position(self.segment), ) return _base_dict class SQLLintError(SQLBaseError): """An error which occurred during linting. In particular we reference the rule here to do extended logging based on the rule in question which caused the fail. Args: segment (:obj:`BaseSegment`, optional): The segment which is relevant for the failure in parsing. This is likely to be a subclass of `BaseSegment` rather than the parent class itself. This is mostly used for logging and for referencing position. """ _identifier = "linting" def __init__( self, description: str, segment: "BaseSegment", rule: "BaseRule", fixes: Optional[list["LintFix"]] = None, ignore: bool = False, fatal: bool = False, warning: Optional[bool] = None, ) -> None: self.segment = segment self.rule = rule self.fixes = fixes or [] super().__init__( description=description, pos=segment.pos_marker if segment else None, ignore=ignore, fatal=fatal, warning=warning, ) def __reduce__( self, ) -> tuple[type["SQLLintError"], tuple[Any, ...]]: """Prepare the SQLLintError for pickling.""" return type(self), ( self.description, self.segment, self.rule, self.fixes, self.ignore, self.fatal, self.warning, ) def to_dict(self) -> SerializedObject: """Return a dict of properties. This is useful in the API for outputting violations. For linting errors we additionally add details of any fixes. """ _base_dict = super().to_dict() _base_dict.update( fixes=[fix.to_dict() for fix in self.fixes], **_extract_position(self.segment), ) # Edge case: If the base error doesn't have an end position # but we only have one fix and it _does_. Then use use that in the # overall fix. _fixes = cast(list[SerializedObject], _base_dict.get("fixes", [])) if "end_line_pos" not in _base_dict and len(_fixes) == 1: _fix = _fixes[0] # If the mandatory keys match... if ( _fix["start_line_no"] == _base_dict["start_line_no"] and _fix["start_line_pos"] == _base_dict["start_line_pos"] ): # ...then hoist all the optional ones from the fix. for key in [ "start_file_pos", "end_line_no", "end_line_pos", "end_file_pos", ]: _base_dict[key] = _fix[key] return _base_dict @property def fixable(self) -> bool: """Should this error be considered fixable?""" if self.fixes: return True return False def rule_code(self) -> str: """Fetch the code of the rule which cause this error.""" return self.rule.code def rule_name(self) -> str: """Fetch the name of the rule which cause this error.""" return self.rule.name def source_signature(self) -> tuple[Any, ...]: """Return hashable source signature for deduplication. For linting errors we need to dedupe on more than just location and description, we also need to check the edits potentially made, both in the templated file but also in the source. """ fix_raws = tuple( tuple(e.raw for e in f.edit) if f.edit else None for f in self.fixes ) _source_fixes: list[tuple[str, int, int]] = [] for fix in self.fixes: if not fix.edit: continue for edit in fix.edit: for source_edit in edit.source_fixes: # NOTE: It's important that we don't dedupe on the # templated slice for the source fix, because that will # be different for different locations in any loop. _source_fixes.append( ( source_edit.edit, source_edit.source_slice.start, source_edit.source_slice.stop, ) ) return (self.check_tuple(), self.description, fix_raws, tuple(_source_fixes)) def __repr__(self) -> str: return "".format( self.rule_code(), (self.line_no, self.line_pos), len(self.fixes), self.description, ) class SQLUnusedNoQaWarning(SQLBaseError): """A warning about an unused noqa directive.""" _code = "NOQA" _identifier = "noqa" _warning = True class SQLFluffUserError(ValueError): """An error which should be fed back to the user.""" sqlfluff-3.4.2/src/sqlfluff/core/formatter.py000066400000000000000000000046471503426445100212770ustar00rootroot00000000000000"""Defines the formatter interface which can be used by the CLI. The linter module provides an optional formatter input which effectively allows callbacks at various points of the linting process. This is primarily to allow printed output at various points by the CLI, but could also be used for logging our other processes looking to report back as the linting process continues. In this module we only define the interface. Any modules wishing to use the interface should override with their own implementation. """ from abc import ABC, abstractmethod from typing import TYPE_CHECKING, Optional from sqlfluff.core.types import Color if TYPE_CHECKING: # pragma: no cover from sqlfluff.core.config import FluffConfig from sqlfluff.core.linter import LintedFile class FormatterInterface(ABC): """Generic formatter interface.""" @abstractmethod def dispatch_persist_filename(self, filename: str, result: str) -> None: """Called after a formatted file as been persisted to disk.""" ... @abstractmethod def dispatch_lint_header(self, fname: str, rules: list[str]) -> None: """Dispatch the header displayed before linting.""" ... @abstractmethod def dispatch_file_violations( self, fname: str, linted_file: "LintedFile", only_fixable: bool, warn_unused_ignores: bool, ) -> None: """Dispatch any violations found in a file.""" ... @abstractmethod def dispatch_dialect_warning(self, dialect: str) -> None: """Dispatch a warning for dialects.""" ... @abstractmethod def dispatch_template_header( self, fname: str, linter_config: "FluffConfig", file_config: Optional["FluffConfig"], ) -> None: """Dispatch the header displayed before templating.""" ... @abstractmethod def dispatch_parse_header(self, fname: str) -> None: """Dispatch the header displayed before parsing.""" ... @abstractmethod def dispatch_processing_header(self, processes: int) -> None: """Dispatch the header displayed before linting.""" ... @abstractmethod def dispatch_path(self, path: str) -> None: """Dispatch paths for display.""" ... @abstractmethod def colorize(self, s: str, color: Optional[Color] = None) -> str: """Optionally use ANSI colour codes to colour a string.""" ... sqlfluff-3.4.2/src/sqlfluff/core/helpers/000077500000000000000000000000001503426445100203515ustar00rootroot00000000000000sqlfluff-3.4.2/src/sqlfluff/core/helpers/__init__.py000066400000000000000000000005571503426445100224710ustar00rootroot00000000000000"""Helper methods for other classes. This module should have no dependencies on other parts of the SQLFluff package and each should also be free of dependencies on each other. This is to ensure each of these methods can be used anywhere within the project without fear of dependency issues. Methods are organised by the datatype they are designed to help with. """ sqlfluff-3.4.2/src/sqlfluff/core/helpers/dict.py000066400000000000000000000231551503426445100216540ustar00rootroot00000000000000"""Dict helpers, mostly used in config routines.""" from collections.abc import Iterable, Iterator, Sequence from copy import deepcopy from typing import Optional, TypeVar, Union, cast T = TypeVar("T") NestedStringDict = dict[str, Union[T, "NestedStringDict[T]"]] """Nested dict, with keys as strings. All values of the dict are either values of the given type variable T, or are themselves dicts with the same nested properties. Variables of this type are used regularly in configuration methods and classes. """ NestedDictRecord = tuple[tuple[str, ...], T] """Tuple form record of a setting in a NestedStringDict. The tuple of strings in the first element is the "address" in the NestedStringDict with the value as the second element on the tuple. """ def nested_combine(*dicts: NestedStringDict[T]) -> NestedStringDict[T]: """Combine an iterable of dictionaries. Each dictionary is combined into a result dictionary. For each key in the first dictionary, it will be overwritten by any same-named key in any later dictionaries in the iterable. If the element at that key is a dictionary, rather than just overwriting we use the same function to combine those dictionaries. Args: *dicts: An iterable of dictionaries to be combined. Returns: `dict`: A combined dictionary from the input dictionaries. NOTE: This method has the added side effect of copying all the dict objects within it. This effectively means that it can provide a layer of isolation. A simple example: >>> nested_combine({"a": {"b": "c"}}, {"a": {"d": "e"}}) {'a': {'b': 'c', 'd': 'e'}} Keys overwrite left to right: >>> nested_combine({"a": {"b": "c"}}, {"a": {"b": "e"}}) {'a': {'b': 'e'}} """ r: NestedStringDict[T] = {} for d in dicts: for k in d: if k in r and isinstance(r[k], dict): if isinstance(d[k], dict): # NOTE: The cast functions here are to appease mypy which doesn't # pick up on the `isinstance` calls above. r[k] = nested_combine( cast(NestedStringDict[T], r[k]), cast(NestedStringDict[T], d[k]) ) else: # pragma: no cover raise ValueError( "Key {!r} is a dict in one config but not another! PANIC: " "{!r}".format(k, d[k]) ) else: # In normal operation, these nested dicts should only contain # immutable objects like strings, or contain lists or dicts # which are simple to copy. We use deep copy to make sure that # and dicts or lists within the value are also copied. This should # also protect in future in case more exotic objects get added to # the dict. r[k] = deepcopy(d[k]) return r def dict_diff( left: NestedStringDict[T], right: NestedStringDict[T], ignore: Optional[list[str]] = None, ) -> NestedStringDict[T]: """Work out the difference between two dictionaries. Returns a dictionary which represents elements in the `left` dictionary which aren't in the `right` or are different to those in the `right`. If the element is a dictionary, we recursively look for differences in those dictionaries, likewise only returning the differing elements. NOTE: If an element is in the `right` but not in the `left` at all (i.e. an element has been *removed*) then it will not show up in the comparison. Args: left (:obj:`dict`): The object containing the *new* elements which will be compared against the other. right (:obj:`dict`): The object to compare against. ignore (:obj:`list` of `str`, optional): Keys to ignore. Returns: `dict`: A dictionary representing the difference. Basic functionality shown, especially returning the left as: >>> dict_diff({"a": "b", "c": "d"}, {"a": "b", "c": "e"}) {'c': 'd'} Ignoring works on a key basis: >>> dict_diff({"a": "b"}, {"a": "c"}) {'a': 'b'} >>> dict_diff({"a": "b"}, {"a": "c"}, ["a"]) {} """ buff: NestedStringDict[T] = {} for k in left: if ignore and k in ignore: continue # Is the key there at all? if k not in right: buff[k] = left[k] # Is the content the same? elif left[k] == right[k]: continue # If it's not the same but both are dicts, then compare elif isinstance(left[k], dict) and isinstance(right[k], dict): diff = dict_diff( cast(NestedStringDict[T], left[k]), cast(NestedStringDict[T], right[k]), ignore=ignore, ) # Only include the difference if non-null. if diff: buff[k] = diff # It's just different else: buff[k] = left[k] return buff def records_to_nested_dict( records: Iterable[NestedDictRecord[T]], ) -> NestedStringDict[T]: """Reconstruct records into a dict. >>> records_to_nested_dict( ... [(("foo", "bar", "baz"), "a"), (("foo", "bar", "biz"), "b")] ... ) {'foo': {'bar': {'baz': 'a', 'biz': 'b'}}} """ result: NestedStringDict[T] = {} for key, val in records: ref: NestedStringDict[T] = result for step in key[:-1]: # If the subsection isn't there, make it. if step not in ref: ref[step] = {} # Then step into it. subsection = ref[step] assert isinstance(subsection, dict) ref = subsection ref[key[-1]] = val return result def iter_records_from_nested_dict( nested_dict: NestedStringDict[T], ) -> Iterator[NestedDictRecord[T]]: """Walk a config dict and get config elements. >>> list( ... iter_records_from_nested_dict( ... {"foo":{"bar":{"baz": "a", "biz": "b"}}} ... ) ... ) [(('foo', 'bar', 'baz'), 'a'), (('foo', 'bar', 'biz'), 'b')] """ for key, val in nested_dict.items(): if isinstance(val, dict): for partial_key, sub_val in iter_records_from_nested_dict(val): yield (key,) + partial_key, sub_val else: yield (key,), val def nested_dict_get( dict_obj: NestedStringDict[T], keys: Sequence[str], key_index: int = 0 ) -> Union[T, NestedStringDict[T]]: """Perform a lookup in a nested dict object. Lookups are performed by iterating keys. >>> nested_dict_get( ... {"a": {"b": "c"}}, ("a", "b") ... ) 'c' Lookups may return sections of nested dicts. >>> nested_dict_get( ... {"a": {"b": "c"}}, ("a",) ... ) {'b': 'c'} Raises `KeyError` if any keys are not found. >>> nested_dict_get( ... {"a": {"b": "c"}}, ("p", "q") ... ) Traceback (most recent call last): ... KeyError: "'p' not found in nested dict lookup" Raises `KeyError` we run out of dicts before keys are exhausted. >>> nested_dict_get( ... {"a": {"b": "d"}}, ("a", "b", "c") ... ) Traceback (most recent call last): ... KeyError: "'b' found non dict value, but there are more keys to iterate: ('c',)" """ assert keys, "Nested dict lookup called without keys." assert key_index < len(keys), "Key exhaustion on nested dict lookup" next_key = keys[key_index] if next_key not in dict_obj: raise KeyError(f"{next_key!r} not found in nested dict lookup") next_value = dict_obj[next_key] # Are we all the way through the keys? if key_index + 1 == len(keys): # NOTE: Could be a section or a value. return next_value # If we're not all the way through the keys, go deeper if we can. if not isinstance(next_value, dict): raise KeyError( f"{next_key!r} found non dict value, but there are more keys to " f"iterate: {keys[key_index + 1 :]}" ) return nested_dict_get(next_value, keys, key_index=key_index + 1) def nested_dict_set( dict_obj: NestedStringDict[T], keys: Sequence[str], value: Union[T, NestedStringDict[T]], key_index: int = 0, ) -> None: """Set a value in a nested dict object. Lookups are performed by iterating keys. >>> d = {"a": {"b": "c"}} >>> nested_dict_set(d, ("a", "b"), "d") >>> d {'a': {'b': 'd'}} Values may set dicts. >>> d = {"a": {"b": "c"}} >>> nested_dict_set(d, ("a", "b"), {"d": "e"}) >>> d {'a': {'b': {'d': 'e'}}} Any keys not found will be created. >>> d = {"a": {"b": "c"}} >>> nested_dict_set(d, ("p", "q"), "r") >>> d {'a': {'b': 'c'}, 'p': {'q': 'r'}} Values may be overwritten with sub keys. >>> d = {"a": {"b": "c"}} >>> nested_dict_set(d, ("a", "b", "d"), "e") >>> d {'a': {'b': {'d': 'e'}}} """ assert keys, "Nested dict lookup called without keys." assert key_index < len(keys), "Key exhaustion on nested dict lookup" next_key = keys[key_index] # Create an empty dictionary if key not found. if next_key not in dict_obj: dict_obj[next_key] = {} # Overwrite the value to a dict if the existing value isn't one. elif not isinstance(dict_obj[next_key], dict): dict_obj[next_key] = {} next_value = dict_obj[next_key] assert isinstance(next_value, dict) # Do we have more keys to set? # If we do, recurse: if key_index + 1 < len(keys): nested_dict_set(next_value, keys=keys, value=value, key_index=key_index + 1) # If we don't, then just set the value: else: dict_obj[next_key] = value sqlfluff-3.4.2/src/sqlfluff/core/helpers/file.py000066400000000000000000000063641503426445100216530ustar00rootroot00000000000000"""File Helpers for the parser module.""" import os.path from collections.abc import Iterator from pathlib import Path from typing import Optional import chardet def get_encoding(fname: str, config_encoding: str = "autodetect") -> str: """Get the encoding of the file (autodetect).""" if config_encoding != "autodetect": return config_encoding with open(fname, "rb") as f: data = f.read() return chardet.detect(data)["encoding"] def iter_intermediate_paths(inner_path: Path, outer_path: Path) -> Iterator[Path]: """Iterate paths between two given paths. If the `inner_path` is a subdirectory of the `outer_path` then all steps in between the two are yielded as Path objects, from outer to inner including the two at each end. If not, then the just the `outer_path` and `inner_path` are returned (in that order). NOTE: The current behaviour is not currently precisely as described above. Instead, we look for the lowest *common path* between the inner and outer paths. This is a superset of the originally intended logic, but is convenient until we have a good solution for the dbt templater project root path. * If there is not common path, the outer path and inner path are yielded *only*. * If there is a common path, then that common path is yielded first, and then paths leading up to the inner path are yielded. Unless the inner path is a subdirectory of the outer path, the *outer path is not yielded*. In both scenarios, the inner path is always the last path to be yielded. """ inner_path = inner_path.absolute() outer_path = outer_path.absolute() # If we've been passed a file and not a directory, # then go straight to the directory. # NOTE: We only check this for the inner path. if not inner_path.is_dir(): inner_path = inner_path.parent common_path: Optional[Path] try: common_path = Path(os.path.commonpath([inner_path, outer_path])).absolute() except ValueError: # Getting a value error means that we're likely on a windows system # and have been provided a `inner_path` and `outer_path` which are # in different drives. In this situation, there's no shared path, # so just yield the given path. common_path = None # NOTE: I think the following logic here isn't correct. It is too expansive # in the search locations for config files. Correcting that without access # to the root project location for a dbt project and therefore allowing a # a more accurate search is not feasible. In future that path should somehow # be made available here. if not common_path: yield outer_path.resolve() else: # we have a sub path! We can load nested paths path_to_visit = common_path while path_to_visit != inner_path: yield path_to_visit.resolve() next_path_to_visit = ( path_to_visit / inner_path.relative_to(path_to_visit).parts[0] ) if next_path_to_visit == path_to_visit: # pragma: no cover # we're not making progress... # [prevent infinite loop] break path_to_visit = next_path_to_visit yield inner_path.resolve() sqlfluff-3.4.2/src/sqlfluff/core/helpers/slice.py000066400000000000000000000025451503426445100220300ustar00rootroot00000000000000"""Helpers for handling slices.""" def to_tuple(s: slice) -> tuple[int, int]: """Convert a slice into a tuple of (start, stop).""" assert s.start is not None and s.stop is not None return (s.start, s.stop) def slice_length(s: slice) -> int: """Get the length of a slice.""" length: int = s.stop - s.start return length def is_zero_slice(s: slice) -> bool: """Return true if this is a zero slice.""" is_zero: bool = s.stop == s.start return is_zero def zero_slice(i: int) -> slice: """Construct a zero slice from a single integer.""" return slice(i, i) def offset_slice(start: int, offset: int) -> slice: """Construct a slice from a start and offset.""" return slice(start, start + offset) def slice_overlaps(s1: slice, s2: slice) -> bool: """Check whether two slices overlap. NOTE: This is designed only for use with *closed* and *positive* slices. """ assert s1.start is not None, f"{s1} is not closed" assert s1.stop is not None, f"{s1} is not closed" assert s2.start is not None, f"{s2} is not closed" assert s2.stop is not None, f"{s2} is not closed" assert s1.start <= s1.stop, f"{s1} is not positive" assert s2.start <= s2.stop, f"{s2} is not positive" if s2.start >= s1.stop: return False if s1.start >= s2.stop: return False return True sqlfluff-3.4.2/src/sqlfluff/core/helpers/string.py000066400000000000000000000075551503426445100222450ustar00rootroot00000000000000"""String Helpers for the parser module.""" from collections.abc import Iterator from typing import Union def curtail_string(s: str, length: int = 20) -> str: """Trim a string nicely to length.""" if len(s) > length: return s[:length] + "..." else: return s def findall(substr: str, in_str: str) -> Iterator[int]: """Yields all the positions sbstr within in_str. https://stackoverflow.com/questions/4664850/how-to-find-all-occurrences-of-a-substring """ # Return nothing if one of the inputs is trivial if not substr or not in_str: return idx = in_str.find(substr) while idx != -1: yield idx idx = in_str.find(substr, idx + 1) def split_colon_separated_string(in_str: str) -> tuple[tuple[str, ...], str]: r"""Converts a colon separated string. The final value in the string is handled separately the other others. >>> split_colon_separated_string("a:b") (('a',), 'b') >>> split_colon_separated_string("a:b:c") (('a', 'b'), 'c') >>> split_colon_separated_string("a:b:c:d") (('a', 'b', 'c'), 'd') >>> split_colon_separated_string("a") ((), 'a') NOTE: This also includes some heuristics for legit values containing colon. >>> split_colon_separated_string("foo:bar:C:\\Users") (('foo', 'bar'), 'C:\\Users') >>> split_colon_separated_string('foo:bar:{"k":"v"}') (('foo', 'bar'), '{"k":"v"}') >>> split_colon_separated_string('foo:bar:[{"k":"v"}]') (('foo', 'bar'), '[{"k":"v"}]') """ config_path: list[str] = [] leftover = in_str while ":" in leftover: element, _, value = leftover.partition(":") element = element.strip() value = value.strip() config_path.append(element) leftover = value if not should_split_on_colon(value): break # last part - actual value config_path.append(leftover) return tuple(config_path[:-1]), config_path[-1] def should_split_on_colon(value: str) -> bool: """Heuristic for legit values containing comma.""" if len(value) >= 2 and value[1] == ":" and value[2] == "\\": # Likely a Windows path return False if len(value) >= 2 and (value[0] == "[" and value[-1] == "]"): # Likely a JSON array return False if len(value) >= 2 and (value[0] == "{" and value[-1] == "}"): # Likely a JSON object return False return True def split_comma_separated_string(raw: Union[str, list[str]]) -> list[str]: """Converts comma separated string to List, stripping whitespace.""" if isinstance(raw, str): return [s.strip() for s in raw.split(",") if s.strip()] assert isinstance(raw, list) return raw def get_trailing_whitespace_from_string(in_str: str) -> str: r"""Returns the trailing whitespace from a string. Designed to work with source strings of placeholders. >>> get_trailing_whitespace_from_string("") '' >>> get_trailing_whitespace_from_string("foo") '' >>> get_trailing_whitespace_from_string(" ") ' ' >>> get_trailing_whitespace_from_string(" foo ") ' ' >>> get_trailing_whitespace_from_string("foo\n") '\n' >>> get_trailing_whitespace_from_string("bar \t \n \r ") ' \t \n \r ' """ whitespace_chars = " \t\r\n" if not in_str or in_str[-1] not in whitespace_chars: return "" # No whitespace for i in range(1, len(in_str)): if in_str[-(i + 1)] not in whitespace_chars: # NOTE: The partial whitespace case is included as # future-proofing. In testing it appears it is never # required, and so only covered in the doctests above. # doctest coverage isn't included in the overall coverage # check and so the line below is excluded. return in_str[-i:] # pragma: no cover else: return in_str # All whitespace sqlfluff-3.4.2/src/sqlfluff/core/linter/000077500000000000000000000000001503426445100202045ustar00rootroot00000000000000sqlfluff-3.4.2/src/sqlfluff/core/linter/__init__.py000066400000000000000000000007511503426445100223200ustar00rootroot00000000000000"""Linter class and helper classes.""" from sqlfluff.core.formatter import FormatterInterface from sqlfluff.core.linter.common import ParsedString, RenderedFile, RuleTuple from sqlfluff.core.linter.linted_file import LintedFile from sqlfluff.core.linter.linter import Linter from sqlfluff.core.linter.linting_result import LintingResult __all__ = ( "FormatterInterface", "RuleTuple", "ParsedString", "LintedFile", "LintingResult", "Linter", "RenderedFile", ) sqlfluff-3.4.2/src/sqlfluff/core/linter/common.py000066400000000000000000000114001503426445100220420ustar00rootroot00000000000000"""Defines small container classes to hold intermediate results during linting.""" from typing import Any, NamedTuple, Optional, Union from sqlfluff.core.config import FluffConfig from sqlfluff.core.errors import ( SQLBaseError, SQLLexError, SQLParseError, SQLTemplaterError, ) from sqlfluff.core.parser.segments.base import BaseSegment from sqlfluff.core.templaters import TemplatedFile class RuleTuple(NamedTuple): """Rule Tuple object for describing rules.""" code: str name: str description: str groups: tuple[str, ...] aliases: tuple[str, ...] class RenderedFile(NamedTuple): """An object to store the result of a templated file/string. This is notable as it's the intermediate state between what happens in the main process and the child processes when running in parallel mode. """ templated_variants: list[TemplatedFile] templater_violations: list[SQLTemplaterError] config: FluffConfig time_dict: dict[str, float] fname: str encoding: str source_str: str class ParsedVariant(NamedTuple): """An object to store the result of parsing a single TemplatedFile. Args: templated_file (:obj:`TemplatedFile`): Containing the details of the templated file. If templating fails, this will be `None`. tree (:obj:`BaseSegment`): The segment structure representing the parsed file. If parsing fails due to an unrecoverable violation then we will be None. lexing_violations (:obj:`list` of :obj:`SQLLexError`): Any violations raised during the lexing phase. parsing_violations (:obj:`list` of :obj:`SQLParseError`): Any violations raised during the lexing phase. """ templated_file: TemplatedFile tree: Optional[BaseSegment] lexing_violations: list[SQLLexError] parsing_violations: list[SQLParseError] def violations(self) -> list[Union[SQLLexError, SQLParseError]]: """Returns the combined lexing and parsing violations for this variant.""" return [*self.lexing_violations, *self.parsing_violations] class ParsedString(NamedTuple): """An object to store the result of parsing a string. Args: parsed_variants (:obj:`list` of :obj:`ParsedVariant`): The parsed variants of this file. Empty if parsing or templating failed. templating_violations (:obj:`list` of :obj:`SQLTemplaterError`): Any violations raised during the templating phase. Any violations raised during lexing or parsing can be found in the `parsed_variants`, or accessed using the `.violations()` method which combines all the violations. time_dict (:obj:`dict`): Contains timings for how long each step took in the process. config (:obj:`FluffConfig`): The active config for this file, including any parsed in-file directives. fname (str): The name of the file. Used mostly for user feedback. source_str (str): The raw content of the source file. """ parsed_variants: list[ParsedVariant] templating_violations: list[SQLTemplaterError] time_dict: dict[str, Any] config: FluffConfig fname: str source_str: str @property def violations(self) -> list[SQLBaseError]: """Returns the combination of violations for this variant. NOTE: This is implemented as a property for backward compatibility. """ return [ *self.templating_violations, *(v for variant in self.parsed_variants for v in variant.violations()), ] def root_variant(self) -> Optional[ParsedVariant]: """Returns the root variant if successfully parsed, otherwise None.""" if not self.parsed_variants: # In the case of a fatal templating error, there will be no valid # variants. Return None. return None root_variant = self.parsed_variants[0] if not root_variant.tree: # In the case of a parsing fail, there will be a variant, but it will # have failed to parse and so will have a null tree. Count this as # an inappropriate variant to return, so return None. return None return root_variant @property def tree(self) -> BaseSegment: """Return the main variant tree. NOTE: This method is primarily for testing convenience and therefore asserts that parsing has been successful. If this isn't appropriate for the given use case, then don't use this property. """ assert self.parsed_variants, "No successfully parsed variants." root_variant = self.parsed_variants[0] assert root_variant.tree, "Root variant not successfully parsed." return root_variant.tree sqlfluff-3.4.2/src/sqlfluff/core/linter/discovery.py000066400000000000000000000271361503426445100225760ustar00rootroot00000000000000"""Discovery methods for sql files. The main public method here is `paths_from_path` which takes potentially ambiguous paths and file input and resolves them into specific file references. The method also processes the `.sqlfluffignore` functionality in the process. """ import logging import os from collections.abc import Iterable, Iterator, Sequence from pathlib import Path from typing import Callable, Optional import pathspec from sqlfluff.core.config.file import load_config_file_as_dict from sqlfluff.core.errors import SQLFluffUserError from sqlfluff.core.helpers.file import iter_intermediate_paths # Instantiate the linter logger linter_logger: logging.Logger = logging.getLogger("sqlfluff.linter") WalkableType = Iterable[tuple[str, Optional[list[str]], list[str]]] IgnoreSpecRecord = tuple[str, str, pathspec.PathSpec] IgnoreSpecRecords = list[IgnoreSpecRecord] def _check_ignore_specs( absolute_filepath: str, ignore_specs: IgnoreSpecRecords ) -> Optional[str]: """Check a filepath against the loaded ignore files. Returns: The path of an ignorefile if found, None otherwise. """ for dirname, filename, spec in ignore_specs: if spec.match_file(os.path.relpath(absolute_filepath, dirname)): return os.path.join(dirname, filename) return None def _load_specs_from_lines( lines: Iterable[str], logging_reference: str ) -> pathspec.PathSpec: """Load the ignore spec from an iterable of lines. Raises SQLFluffUserError if unparsable for any reason. """ try: return pathspec.PathSpec.from_lines("gitwildmatch", lines) except Exception: _error_msg = f"Error parsing ignore patterns in {logging_reference}" # If the iterable is a Sequence type, then include the patterns. if isinstance(lines, Sequence): _error_msg += f": {lines}" raise SQLFluffUserError(_error_msg) def _load_ignorefile(dirpath: str, filename: str) -> IgnoreSpecRecord: """Load a sqlfluffignore file, returning the parsed spec.""" filepath = os.path.join(dirpath, filename) with open(filepath, mode="r") as f: spec = _load_specs_from_lines(f, filepath) return dirpath, filename, spec def _load_configfile(dirpath: str, filename: str) -> Optional[IgnoreSpecRecord]: """Load ignore specs from a standard config file. This function leverages the caching used in the config module to ensure that anything loaded here, can be reused later. Those functions also handle the difference between toml and ini based config files. """ filepath = os.path.join(dirpath, filename) # Use normalised path to ensure reliable caching. config_dict = load_config_file_as_dict(Path(filepath).resolve()) ignore_section = config_dict.get("core", {}) if not isinstance(ignore_section, dict): return None # pragma: no cover patterns = ignore_section.get("ignore_paths", []) # If it's already a list, then we don't need to edit `patterns`, # but if it's not then we either split a string into a list and # then process it, or if there's nothing in the patterns list # (or the pattern input is invalid by not being something other # than a string or list) then we assume there's no ignore pattern # to process and just return None. if isinstance(patterns, str): patterns = patterns.split(",") elif not patterns or not isinstance(patterns, list): return None # By reaching here, we think there is a valid set of ignore patterns # to process. spec = _load_specs_from_lines(patterns, filepath) return dirpath, filename, spec ignore_file_loaders: dict[str, Callable[[str, str], Optional[IgnoreSpecRecord]]] = { ".sqlfluffignore": _load_ignorefile, "pyproject.toml": _load_configfile, ".sqlfluff": _load_configfile, } def _iter_config_files( target_path: Path, working_path: Path, ) -> Iterator[tuple[str, str]]: """Iterate through paths looking for valid config files.""" for search_path in iter_intermediate_paths(target_path.absolute(), working_path): for _filename in ignore_file_loaders: filepath = os.path.join(search_path, _filename) if os.path.isfile(filepath): # Yield if a config file with this name exists at this path. yield str(search_path), _filename def _match_file_extension(filepath: str, valid_extensions: Sequence[str]) -> bool: """Match file path against extensions. Assumes that valid_extensions is already all lowercase. Returns: True if the file has an extension in `valid_extensions`. """ filepath = filepath.lower() return any(filepath.endswith(ext) for ext in valid_extensions) def _process_exact_path( path: str, working_path: str, lower_file_exts: tuple[str, ...], outer_ignore_specs: IgnoreSpecRecords, ) -> list[str]: """Handle exact paths being passed to paths_from_path. If it's got the right extension and it's not ignored, then we just return the normalised version of the path. If it's not the right extension, return nothing, and if it's ignored then return nothing, but include a warning for the user. """ # Does it have a relevant extension? If not, just return an empty list. if not _match_file_extension(path, lower_file_exts): return [] # It's an exact file. We only need to handle the outer ignore files. # There won't be any "inner" ignores because an exact file doesn't create # any sub paths. abs_fpath = os.path.abspath(path) ignore_file = _check_ignore_specs(abs_fpath, outer_ignore_specs) if not ignore_file: # If not ignored, just return the file. return [os.path.normpath(path)] ignore_rel_path = os.path.relpath(ignore_file, working_path) linter_logger.warning( f"Exact file path {path} was given but it was " f"ignored by an ignore pattern set in {ignore_rel_path}, " "re-run with `--disregard-sqlfluffignores` to not process " "ignore files." ) # Return no match, because the file is ignored. return [] def _iter_files_in_path( path: str, ignore_files: bool, outer_ignore_specs: IgnoreSpecRecords, lower_file_exts: tuple[str, ...], ) -> Iterator[str]: """Handle directory paths being passed to paths_from_path. We're going to walk the path progressively, processing ignore files as we go. Those ignore files that we find (inner ignore files) only apply within the folder they are found, whereas the ignore files from outside the path (the outer ignore files) will always apply, so we handle them separately. """ inner_ignore_specs: IgnoreSpecRecords = [] ignore_filename_set = frozenset(ignore_file_loaders.keys()) for dirname, subdirs, filenames in os.walk(path, topdown=True): # Before adding new ignore specs, remove any which are no longer relevant # as indicated by us no longer being in a subdirectory of them. # NOTE: Slice so we can modify as we go. for inner_dirname, inner_file, inner_spec in inner_ignore_specs[:]: if not ( dirname == inner_dirname or dirname.startswith(os.path.abspath(inner_dirname) + os.sep) ): inner_ignore_specs.remove((inner_dirname, inner_file, inner_spec)) # Then look for any ignore files in the path (if ignoring files), add them # to the inner buffer if found. if ignore_files: for ignore_file in set(filenames) & ignore_filename_set: ignore_spec = ignore_file_loaders[ignore_file](dirname, ignore_file) if ignore_spec: inner_ignore_specs.append(ignore_spec) # Then prune any subdirectories which are ignored (by modifying `subdirs`) # https://docs.python.org/3/library/os.html#os.walk for subdir in subdirs[:]: # slice it so that we can modify it in the process. # NOTE: The "*" in this next section is a bit of a hack, but pathspec # doesn't like matching _directories_ directly, but if we instead match # `directory/*` we get the same effect. absolute_path = os.path.abspath(os.path.join(dirname, subdir, "*")) if _check_ignore_specs( absolute_path, outer_ignore_specs ) or _check_ignore_specs(absolute_path, inner_ignore_specs): subdirs.remove(subdir) continue # Then look for any relevant sql files in the path. for filename in filenames: relative_path = os.path.join(dirname, filename) absolute_path = os.path.abspath(relative_path) # Check file extension is relevant if not _match_file_extension(filename, lower_file_exts): continue # Check not ignored by outer & inner ignore specs if _check_ignore_specs(absolute_path, outer_ignore_specs): continue if _check_ignore_specs(absolute_path, inner_ignore_specs): continue # If we get here, it's one we want. Yield it. yield os.path.normpath(relative_path) def paths_from_path( path: str, ignore_non_existent_files: bool = False, ignore_files: bool = True, working_path: str = os.getcwd(), target_file_exts: Sequence[str] = (".sql",), ) -> list[str]: """Return a set of sql file paths from a potentially more ambiguous path string. Here we also deal with the any ignore files file if present, whether as raw ignore files (`.sqlfluffignore`) or embedded in more general config files like `.sqlfluff` or `pyproject.toml`. Only files within the path provided are returned, *however* the search area for ignore files is wider. They can both be within the provided path, and also between the working path and the given path. NOTE: In the situation that the given path is *not* a subdirectory of the working path, the current behaviour is to search for the *lowest common path* of the two. This might be counterintuitive, but supports an appropriate solution for the dbt templater without having to additionally pass the project root path. """ if not os.path.exists(path): if ignore_non_existent_files: return [] else: raise SQLFluffUserError( f"Specified path does not exist. Check it/they exist(s): {path}." ) lower_file_exts = tuple(ext.lower() for ext in target_file_exts) # First load any ignore files from outside the path. # These will be applied to every file within the path, because we know that # they're in a parent folder. outer_ignore_specs: IgnoreSpecRecords = [] # Only load them if we're using ignore files. NOTE: That if `ignore_files` # is False, we keep the routines for _checking_ we just never load the # files in the first place. if ignore_files: for ignore_path, ignore_file in _iter_config_files( Path(path).absolute(), Path(working_path) if isinstance(working_path, str) else working_path, ): ignore_spec = ignore_file_loaders[ignore_file](ignore_path, ignore_file) if ignore_spec: outer_ignore_specs.append(ignore_spec) # Handle being passed an exact file first. if os.path.isfile(path): return _process_exact_path( path, working_path, lower_file_exts, outer_ignore_specs ) # Otherwise, it's not an exact path and we're going to walk the path # progressively, processing ignore files as we go. return sorted( _iter_files_in_path(path, ignore_files, outer_ignore_specs, lower_file_exts) ) sqlfluff-3.4.2/src/sqlfluff/core/linter/fix.py000066400000000000000000000335751503426445100213610ustar00rootroot00000000000000"""Helper classes & methods for applying fixes to segments.""" import logging from collections import defaultdict from dataclasses import dataclass, field from typing import TYPE_CHECKING, Optional from sqlfluff.core.parser import BaseSegment, SourceFix from sqlfluff.core.rules.fix import LintFix if TYPE_CHECKING: # pragma: no cover from sqlfluff.core.dialects import Dialect linter_logger = logging.getLogger("sqlfluff.linter") @dataclass class AnchorEditInfo: """For a given fix anchor, count of the fix edit types and fixes for it.""" delete: int = field(default=0) replace: int = field(default=0) create_before: int = field(default=0) create_after: int = field(default=0) fixes: list["LintFix"] = field(default_factory=list) source_fixes: list[SourceFix] = field(default_factory=list) # First fix of edit_type "replace" in "fixes" _first_replace: Optional["LintFix"] = field(default=None) def add(self, fix: "LintFix") -> None: """Adds the fix and updates stats. We also allow potentially multiple source fixes on the same anchor by condensing them together here. """ if fix in self.fixes: # Deduplicate fixes in case it's already in there. return if fix.is_just_source_edit(): assert fix.edit # is_just_source_edit confirms there will be a list # so we can hint that to mypy. self.source_fixes += fix.edit[0].source_fixes # is there already a replace? if self._first_replace: assert self._first_replace.edit # is_just_source_edit confirms there will be a list # and that's the only way to get into _first_replace # if it's populated so we can hint that to mypy. linter_logger.info( "Multiple edits detected, condensing %s onto %s", fix, self._first_replace, ) self._first_replace.edit[0] = self._first_replace.edit[0].edit( source_fixes=self.source_fixes ) linter_logger.info("Condensed fix: %s", self._first_replace) # Return without otherwise adding in this fix. return self.fixes.append(fix) if fix.edit_type == "replace" and not self._first_replace: self._first_replace = fix setattr(self, fix.edit_type, getattr(self, fix.edit_type) + 1) @property def total(self) -> int: """Returns total count of fixes.""" return len(self.fixes) @property def is_valid(self) -> bool: """Returns True if valid combination of fixes for anchor. Cases: * 0-1 fixes of any type: Valid * 2 fixes: Valid if and only if types are create_before and create_after """ if self.total <= 1: # Definitely valid (i.e. no conflict) if 0 or 1. In practice, this # function probably won't be called if there are 0 fixes, but 0 is # valid; it simply means "no fixes to apply". return True if self.total == 2: # This is only OK for this special case. We allow this because # the intent is clear (i.e. no conflict): Insert something *before* # the segment and something else *after* the segment. return self.create_before == 1 and self.create_after == 1 # Definitely bad if > 2. return False # pragma: no cover def compute_anchor_edit_info(fixes: list["LintFix"]) -> dict[int, AnchorEditInfo]: """Group and count fixes by anchor, return dictionary.""" anchor_info = defaultdict(AnchorEditInfo) # type: ignore for fix in fixes: # :TRICKY: Use segment uuid as the dictionary key since # different segments may compare as equal. anchor_id = fix.anchor.uuid anchor_info[anchor_id].add(fix) return dict(anchor_info) def apply_fixes( segment: BaseSegment, dialect: "Dialect", rule_code: str, fixes: dict[int, AnchorEditInfo], fix_even_unparsable: bool = False, ) -> tuple["BaseSegment", list["BaseSegment"], list["BaseSegment"], bool]: """Apply a dictionary of fixes to this segment. Used in to apply fixes found in linting. If a segment remains unchanged then the original is returned, but if any changes are made to it, or any of it's child segments, then it returns a copy rather than mutating the original. Most fixes are usually applied when this method is called on their parent segment, this is because that's where we can insert or move segments relative to the anchor specified in the fix. This has the implication that if the method is called on a `RawSegment`, then no changes will be applied, because a `RawSegment` never has child segments. After fixing, it calls `validate_segment_with_reparse` on the segment to check that the segment still parses after any changes are made. The result of this is returned as a boolean in the last element of the return tuple. As the function recurses, if an inner element doesn't parse after fixing, then the outer segment will also be checked, and if found to parse successfully then the method returns `True` as valid. This is because sometimes the fixes change the structure enough that a wider reparse is necessary. Because of this validity checking, any unparsable sections are assumed unfixable (because we won't know if we're corrupting the SQL). The method will therefore return early without applying any fixes if the segment it's called on is unparsable (because we already know that validation check will fail already). If `fix_even_unparsable` is True, then we will still apply fixes to unparsable sections, but will do so *without validation*. That means that the final element of the return value will always return `True`, so that we don't interrupt the validity checking of any outer (parsable) sections. """ if not fixes or segment.is_raw(): return segment, [], [], True seg_buffer = [] before = [] after = [] fixes_applied: list[LintFix] = [] requires_validate = False for seg in segment.segments: # Look for uuid match. # This handles potential positioning ambiguity. anchor_info: Optional[AnchorEditInfo] = fixes.pop(seg.uuid, None) if anchor_info is None: # No fix matches here, just add the segment and move on. seg_buffer.append(seg) continue # Otherwise there is a fix match. seg_fixes = anchor_info.fixes if ( len(seg_fixes) == 2 and seg_fixes[0].edit_type == "create_after" ): # pragma: no cover # Must be create_before & create_after. Swap so the # "before" comes first. seg_fixes.reverse() for f in anchor_info.fixes: assert f.anchor.uuid == seg.uuid fixes_applied.append(f) linter_logger.debug( "Matched fix for %s against segment: %s -> %s", rule_code, f, seg, ) # Deletes are easy. if f.edit_type == "delete": # We're just getting rid of this segment. requires_validate = True # NOTE: We don't add the segment in this case. continue # Otherwise it must be a replace or a create. assert f.edit_type in ( "replace", "create_before", "create_after", ), f"Unexpected edit_type: {f.edit_type!r} in {f!r}" if f.edit_type == "create_after" and len(anchor_info.fixes) == 1: # in the case of a creation after that is not part # of a create_before/create_after pair, also add # this segment before the edit. seg_buffer.append(seg) # We're doing a replacement (it could be a single # segment or an iterable) assert f.edit, f"Edit {f.edit_type!r} requires `edit`." consumed_pos = False for s in f.edit: seg_buffer.append(s) # If one of them has the same raw representation # then the first that matches gets to take the # original position marker. if f.edit_type == "replace" and s.raw == seg.raw and not consumed_pos: seg_buffer[-1].pos_marker = seg.pos_marker consumed_pos = True # If we're just editing a segment AND keeping the type the # same then no need to validate. Otherwise we should # trigger a validation (e.g. for creations or # multi-replace). if not ( f.edit_type == "replace" and len(f.edit) == 1 and f.edit[0].class_types == seg.class_types ): requires_validate = True if f.edit_type == "create_before": # in the case of a creation before, also add this # segment on the end seg_buffer.append(seg) # Invalidate any caches segment.invalidate_caches() # If any fixes applied, do an intermediate reposition. When applying # fixes to children and then trying to reposition them, that recursion # may rely on the parent having already populated positions for any # of the fixes applied there first. This ensures those segments have # working positions to work with. if fixes_applied: assert segment.pos_marker seg_buffer = list( segment._position_segments(tuple(seg_buffer), parent_pos=segment.pos_marker) ) # Then recurse (i.e. deal with the children) (Requeueing) seg_queue = seg_buffer seg_buffer = [] for seg in seg_queue: s, pre, post, validated = apply_fixes(seg, dialect, rule_code, fixes) # 'before' and 'after' will usually be empty. Only used when # lower-level fixes left 'seg' with non-code (usually # whitespace) segments as the first or last children. This is # generally not allowed (see the can_start_end_non_code field), # and these segments need to be "bubbled up" the tree. seg_buffer += pre + [s] + post # If we fail to validate a child segment, make sure to validate this # segment. if not validated: requires_validate = True # Most correct whitespace positioning will have already been handled # _however_, the exception is `replace` edits which match start or # end with whitespace. We also need to handle any leading or trailing # whitespace ejected from the any fixes applied to child segments. # Here we handle those by checking the start and end of the resulting # segment sequence for whitespace. # If we're left with any non-code at the end, trim them off and pass them # up to the parent segment for handling. if not segment.can_start_end_non_code: _idx = 0 for _idx in range(0, len(seg_buffer)): if segment._is_code_or_meta(seg_buffer[_idx]): break before = seg_buffer[:_idx] seg_buffer = seg_buffer[_idx:] _idx = len(seg_buffer) for _idx in range(len(seg_buffer), 0, -1): if segment._is_code_or_meta(seg_buffer[_idx - 1]): break after = seg_buffer[_idx:] seg_buffer = seg_buffer[:_idx] # Reform into a new segment assert segment.pos_marker try: new_seg = segment.__class__( # Realign the segments within segments=segment._position_segments( tuple(seg_buffer), parent_pos=segment.pos_marker ), pos_marker=segment.pos_marker, # Pass through any additional kwargs **{k: getattr(segment, k) for k in segment.additional_kwargs}, ) except AssertionError as err: # pragma: no cover # An AssertionError on creating a new segment is likely a whitespace # check fail. If possible add information about the fixes we tried to # apply, before re-raising. # NOTE: only available in python 3.11+. if hasattr(err, "add_note"): err.add_note(f" After applying fixes: {fixes_applied}.") raise err # Handle any necessary validation. if requires_validate: # Was it already unparsable? if "unparsable" in segment.descendant_type_set | segment.class_types: if fix_even_unparsable: # If we're fixing even unparsable sections, there's no point trying # to validate, it will always fail. We may still want to validate # other sections of the file though, so we should just declare *this* # part of the file to be all good. validated = True else: # It was already unparsable, but we're being asked to validate. # Don't any apply fixes from within this region and just return the # original segment. return segment, [], [], True # Otherwise only validate if there's a match_grammar. Otherwise we may get # strange results (for example with the BracketedSegment). elif hasattr(new_seg, "match_grammar"): validated = new_seg.validate_segment_with_reparse(dialect) else: validated = not requires_validate # Return the new segment and any non-code that needs to bubble up # the tree. # NOTE: We pass on whether this segment has been validated. It's # very possible that our parsing here may fail depending on the # type of segment that has been replaced, but if not we rely on # a parent segment still being valid. If we get all the way up # to the root and it's still not valid - that's a problem. return new_seg, before, after, validated sqlfluff-3.4.2/src/sqlfluff/core/linter/linted_dir.py000066400000000000000000000227651503426445100227070ustar00rootroot00000000000000"""Defines the LintedDir class. This stores the idea of a collection of linted files at a single start path """ from collections.abc import Iterable from typing import Optional, TypedDict, Union from sqlfluff.core.errors import ( CheckTuple, SerializedObject, SQLBaseError, SQLLintError, ) from sqlfluff.core.formatter import FormatterInterface from sqlfluff.core.linter.linted_file import TMP_PRS_ERROR_TYPES, LintedFile from sqlfluff.core.parser.segments.base import BaseSegment class LintingRecord(TypedDict): """A class to store the linted file statistics.""" filepath: str violations: list[SerializedObject] # Things like file length statistics: dict[str, int] # Raw timings, in seconds, for both rules and steps timings: dict[str, float] class LintedDir: """A class to store the idea of a collection of linted files at a single start path. A LintedDir may contain files in subdirectories, but they all share a common root. Importantly, this class also abstracts away from the given LintedFile object and allows us to either _keep_ those objects for later use, or extract the results from them and allow the original object to be discarded and save memory overhead if not required. """ def __init__(self, path: str, retain_files: bool = True) -> None: self.files: list[LintedFile] = [] self.path: str = path self.retain_files: bool = retain_files # Records self._records: list[LintingRecord] = [] # Stats self._num_files: int = 0 self._num_clean: int = 0 self._num_unclean: int = 0 self._num_violations: int = 0 self.num_unfiltered_tmp_prs_errors: int = 0 self._unfiltered_tmp_prs_errors_map: dict[str, int] = {} self.num_tmp_prs_errors: int = 0 self.num_unfixable_lint_errors: int = 0 # Timing self.step_timings: list[dict[str, float]] = [] self.rule_timings: list[tuple[str, str, float]] = [] def add(self, file: LintedFile) -> None: """Add a file to this path. This function _always_ updates the metadata tracking, but may or may not persist the `file` object itself depending on the `retain_files` argument given on instantiation. """ # Generate serialised violations. violation_records = sorted( # Keep the warnings (v.to_dict() for v in file.get_violations(filter_warning=False)), # The tuple allows sorting by line number, then position, then code key=lambda v: (v["start_line_no"], v["start_line_pos"], v["code"]), ) record: LintingRecord = { "filepath": file.path, "violations": violation_records, "statistics": { "source_chars": ( len(file.templated_file.source_str) if file.templated_file else 0 ), "templated_chars": ( len(file.templated_file.templated_str) if file.templated_file else 0 ), # These are all the segments in the tree "segments": ( file.tree.count_segments(raw_only=False) if file.tree else 0 ), # These are just the "leaf" nodes of the tree "raw_segments": ( file.tree.count_segments(raw_only=True) if file.tree else 0 ), }, "timings": {}, } if file.timings: record["timings"] = { # linting, parsing, templating etc... **file.timings.step_timings, # individual rule timings, by code. **file.timings.get_rule_timing_dict(), } self._records.append(record) # Update the stats self._num_files += 1 if file.is_clean(): self._num_clean += 1 else: self._num_unclean += 1 self._num_violations += file.num_violations() _unfiltered_tmp_prs_errors = file.num_violations( types=TMP_PRS_ERROR_TYPES, filter_ignore=False, filter_warning=False, ) self.num_unfiltered_tmp_prs_errors += _unfiltered_tmp_prs_errors self._unfiltered_tmp_prs_errors_map[file.path] = _unfiltered_tmp_prs_errors self.num_tmp_prs_errors += file.num_violations( types=TMP_PRS_ERROR_TYPES, ) self.num_unfixable_lint_errors += file.num_violations( types=SQLLintError, fixable=False, ) # Append timings if present if file.timings: self.step_timings.append(file.timings.step_timings) self.rule_timings.extend(file.timings.rule_timings) # Finally, if set to persist files, do that. if self.retain_files: self.files.append(file) def check_tuples( self, raise_on_non_linting_violations: bool = True ) -> list[CheckTuple]: """Compress all the tuples into one list. NB: This is a little crude, as you can't tell which file the violations are from. Good for testing though. For more control use `check_tuples_by_path`. """ return [ check_tuple for file in self.files for check_tuple in file.check_tuples( raise_on_non_linting_violations=raise_on_non_linting_violations ) ] def check_tuples_by_path( self, raise_on_non_linting_violations: bool = True ) -> dict[str, list[CheckTuple]]: """Fetch all check_tuples from all contained `LintedDir` objects. Returns: A dict, with lists of tuples grouped by path. """ assert ( self.retain_files ), "cannot `check_tuples_by_path()` without `retain_files`" return { file.path: file.check_tuples( raise_on_non_linting_violations=raise_on_non_linting_violations ) for file in self.files } def num_violations( self, types: Optional[Union[type[SQLBaseError], Iterable[type[SQLBaseError]]]] = None, fixable: Optional[bool] = None, ) -> int: """Count the number of violations in the path.""" return sum( file.num_violations(types=types, fixable=fixable) for file in self.files ) def get_violations( self, rules: Optional[Union[str, tuple[str, ...]]] = None ) -> list[SQLBaseError]: """Return a list of violations in the path.""" return [v for file in self.files for v in file.get_violations(rules=rules)] def as_records(self) -> list[LintingRecord]: """Return the result as a list of dictionaries. Each record contains a key specifying the filepath, and a list of violations. This method is useful for serialization as all objects will be builtin python types (ints, strs). """ return self._records def stats(self) -> dict[str, int]: """Return a dict containing linting stats about this path.""" return { "files": self._num_files, "clean": self._num_clean, "unclean": self._num_unclean, "violations": self._num_violations, } def persist_changes( self, formatter: Optional[FormatterInterface] = None, fixed_file_suffix: str = "", ) -> dict[str, Union[bool, str]]: """Persist changes to files in the given path. This also logs the output as we go using the formatter if present. """ assert self.retain_files, "cannot `persist_changes()` without `retain_files`" # Run all the fixes for all the files and return a dict buffer: dict[str, Union[bool, str]] = {} for file in self.files: buffer[file.path] = file.persist_tree( suffix=fixed_file_suffix, formatter=formatter ) return buffer def discard_fixes_for_lint_errors_in_files_with_tmp_or_prs_errors(self) -> None: """Discard lint fixes for files with templating or parse errors.""" if self.num_unfiltered_tmp_prs_errors: # Filter serialised versions if present. for record in self._records: if self._unfiltered_tmp_prs_errors_map[record["filepath"]]: for v_dict in record["violations"]: if v_dict.get("fixes", []): # We're changing a violating with fixes, to one without, # so we need to increment the cache value. self.num_unfixable_lint_errors += 1 v_dict["fixes"] = [] # Filter the full versions if present. for linted_file in self.files: if self._unfiltered_tmp_prs_errors_map[linted_file.path]: for violation in linted_file.violations: if isinstance(violation, SQLLintError): violation.fixes = [] @property def tree(self) -> Optional[BaseSegment]: """A convenience method for when there is only one file and we want the tree.""" assert self.retain_files, ".tree() cannot be called if `retain_files` is False." assert ( len(self.files) == 1 ), ".tree() cannot be called when a LintedDir contains more than one file." assert ( self.files ), "LintedDir has no parsed files. There is probably a parsing error." return self.files[0].tree sqlfluff-3.4.2/src/sqlfluff/core/linter/linted_file.py000066400000000000000000000425001503426445100230350ustar00rootroot00000000000000"""Defines the LintedFile class. This holds linting results for a single file, and also contains all of the routines to apply fixes to that file post linting. """ import logging import os import shutil import stat import tempfile from collections import defaultdict from collections.abc import Iterable from dataclasses import dataclass from typing import NamedTuple, Optional, Union from sqlfluff.core.errors import ( CheckTuple, SQLBaseError, SQLLintError, SQLParseError, SQLTemplaterError, ) from sqlfluff.core.formatter import FormatterInterface from sqlfluff.core.linter.patch import FixPatch, generate_source_patches # Classes needed only for type checking from sqlfluff.core.parser.segments import BaseSegment from sqlfluff.core.rules.noqa import IgnoreMask from sqlfluff.core.templaters import RawFileSlice, TemplatedFile # Instantiate the linter logger linter_logger: logging.Logger = logging.getLogger("sqlfluff.linter") TMP_PRS_ERROR_TYPES = (SQLTemplaterError, SQLParseError) @dataclass class FileTimings: """A dataclass for holding the timings information for a file.""" step_timings: dict[str, float] # NOTE: Because rules may run more than once for any # given file we record each run and then we can post # process this as we wish later. rule_timings: list[tuple[str, str, float]] def __repr__(self) -> str: # pragma: no cover return "" def get_rule_timing_dict(self) -> dict[str, float]: """Generate a summary to total time in each rule. This is primarily for csv export. """ total_times: dict[str, float] = defaultdict(float) for code, _, time in self.rule_timings: total_times[code] += time # Return as plain dict return dict(total_times.items()) class LintedFile(NamedTuple): """A class to store the idea of a linted file.""" path: str violations: list[SQLBaseError] timings: Optional[FileTimings] tree: Optional[BaseSegment] ignore_mask: Optional[IgnoreMask] templated_file: Optional[TemplatedFile] encoding: str def check_tuples( self, raise_on_non_linting_violations: bool = True ) -> list[CheckTuple]: """Make a list of check_tuples. This assumes that all the violations found are linting violations. If they don't then this function raises that error. """ vs: list[CheckTuple] = [] for v in self.get_violations(): if isinstance(v, SQLLintError): vs.append(v.check_tuple()) elif raise_on_non_linting_violations: raise v return vs @staticmethod def deduplicate_in_source_space( violations: list[SQLBaseError], ) -> list[SQLBaseError]: """Removes duplicates in the source space. This is useful for templated files with loops, where we'll get a violation for each pass around the loop, but the user only cares about it once and we're only going to fix it once. By filtering them early we get a more a more helpful CLI output *and* and more efficient fixing routine (by handling fewer fixes). """ new_violations = [] dedupe_buffer = set() for v in violations: signature = v.source_signature() if signature not in dedupe_buffer: new_violations.append(v) dedupe_buffer.add(signature) else: linter_logger.debug("Removing duplicate source violation: %r", v) # Sort on return so that if any are out of order, they're now ordered # appropriately. This happens most often when linting multiple variants. return sorted(new_violations, key=lambda v: (v.line_no, v.line_pos)) def get_violations( self, rules: Optional[Union[str, tuple[str, ...]]] = None, types: Optional[Union[type[SQLBaseError], Iterable[type[SQLBaseError]]]] = None, filter_ignore: bool = True, filter_warning: bool = True, warn_unused_ignores: bool = False, fixable: Optional[bool] = None, ) -> list[SQLBaseError]: """Get a list of violations, respecting filters and ignore options. Optionally now with filters. """ violations = self.violations # Filter types if types: # If it's a singular type, make it a single item in a tuple # otherwise coerce to tuple normally so that we can use it with # isinstance. if isinstance(types, type) and issubclass(types, SQLBaseError): types = (types,) else: types = tuple(types) # pragma: no cover TODO? violations = [v for v in violations if isinstance(v, types)] # Filter rules if rules: if isinstance(rules, str): rules = (rules,) else: rules = tuple(rules) violations = [v for v in violations if v.rule_code() in rules] # Filter fixable if fixable is not None: # Assume that fixable is true or false if not None. # Fatal errors should always come through, regardless. violations = [v for v in violations if v.fixable is fixable or v.fatal] # Filter ignorable violations if filter_ignore: violations = [v for v in violations if not v.ignore] # Ignore any rules in the ignore mask if self.ignore_mask: violations = self.ignore_mask.ignore_masked_violations(violations) # Filter warning violations if filter_warning: violations = [v for v in violations if not v.warning] # Add warnings for unneeded noqa if applicable if warn_unused_ignores and not filter_warning and self.ignore_mask: violations += self.ignore_mask.generate_warnings_for_unused() return violations def num_violations( self, types: Optional[Union[type[SQLBaseError], Iterable[type[SQLBaseError]]]] = None, filter_ignore: bool = True, filter_warning: bool = True, fixable: Optional[bool] = None, ) -> int: """Count the number of violations. Optionally now with filters. """ violations = self.get_violations( types=types, filter_ignore=filter_ignore, filter_warning=filter_warning, fixable=fixable, ) return len(violations) def is_clean(self) -> bool: """Return True if there are no ignorable violations.""" return not any(self.get_violations(filter_ignore=True)) def fix_string(self) -> tuple[str, bool]: """Obtain the changes to a path as a string. We use the source mapping features of TemplatedFile to generate a list of "patches" which cover the non templated parts of the file and refer back to the locations in the original file. NB: This is MUCH FASTER than the original approach using difflib in pre 0.4.0. There is an important distinction here between Slices and Segments. A Slice is a portion of a file which is determined by the templater based on which portions of the source file are templated or not, and therefore before Lexing and so is completely dialect agnostic. A Segment is determined by the Lexer from portions of strings after templating. """ assert self.templated_file, "Fixing a string requires successful templating." linter_logger.debug("Original Tree: %r", self.templated_file.templated_str) assert self.tree, "Fixing a string requires successful parsing." linter_logger.debug("Fixed Tree: %r", self.tree.raw) # The sliced file is contiguous in the TEMPLATED space. # NB: It has gaps and repeats in the source space. # It's also not the FIXED file either. linter_logger.debug("### Templated File.") for idx, file_slice in enumerate(self.templated_file.sliced_file): t_str = self.templated_file.templated_str[file_slice.templated_slice] s_str = self.templated_file.source_str[file_slice.source_slice] if t_str == s_str: linter_logger.debug( " File slice: %s %r [invariant]", idx, file_slice ) else: linter_logger.debug(" File slice: %s %r", idx, file_slice) linter_logger.debug(" \t\t\ttemplated: %r\tsource: %r", t_str, s_str) original_source = self.templated_file.source_str # Generate patches from the fixed tree. In the process we sort # and deduplicate them so that the resultant list is in the # the right order for the source file without any duplicates. filtered_source_patches = generate_source_patches( self.tree, self.templated_file ) linter_logger.debug("Filtered source patches:") for idx, patch in enumerate(filtered_source_patches): linter_logger.debug(" %s: %s", idx, patch) # Any Template tags in the source file are off limits, unless # we're explicitly fixing the source file. source_only_slices = self.templated_file.source_only_slices() linter_logger.debug("Source-only slices: %s", source_only_slices) # We now slice up the file using the patches and any source only slices. # This gives us regions to apply changes to. slice_buff = self._slice_source_file_using_patches( filtered_source_patches, source_only_slices, self.templated_file.source_str ) linter_logger.debug("Final slice buffer: %s", slice_buff) # Iterate through the patches, building up the new string. fixed_source_string = self._build_up_fixed_source_string( slice_buff, filtered_source_patches, self.templated_file.source_str ) # The success metric here is whether anything ACTUALLY changed. return fixed_source_string, fixed_source_string != original_source @staticmethod def _slice_source_file_using_patches( source_patches: list[FixPatch], source_only_slices: list[RawFileSlice], raw_source_string: str, ) -> list[slice]: """Use patches to safely slice up the file before fixing. This uses source only slices to avoid overwriting sections of templated code in the source file (when we don't want to). We assume that the source patches have already been sorted and deduplicated. Sorting is important. If the slices aren't sorted then this function will miss chunks. If there are overlaps or duplicates then this function may produce strange results. """ # We now slice up the file using the patches and any source only slices. # This gives us regions to apply changes to. slice_buff = [] source_idx = 0 for patch in source_patches: # Are there templated slices at or before the start of this patch? # TODO: We'll need to explicit handling for template fixes here, because # they ARE source only slices. If we can get handling to work properly # here then this is the last hurdle and it will flow through # smoothly from here. while ( source_only_slices and source_only_slices[0].source_idx < patch.source_slice.start ): next_so_slice = source_only_slices.pop(0).source_slice() # Add a pre-slice before the next templated slices if needed. if next_so_slice.start > source_idx: slice_buff.append(slice(source_idx, next_so_slice.start)) # Add the templated slice. slice_buff.append(next_so_slice) source_idx = next_so_slice.stop # Does this patch cover the next source-only slice directly? if ( source_only_slices and patch.source_slice == source_only_slices[0].source_slice() ): linter_logger.info( "Removing next source only slice from the stack because it " "covers the same area of source file as the current patch: %s %s", source_only_slices[0], patch, ) # If it does, remove it so that we don't duplicate it. source_only_slices.pop(0) # Is there a gap between current position and this patch? if patch.source_slice.start > source_idx: # Add a slice up to this patch. slice_buff.append(slice(source_idx, patch.source_slice.start)) # Is this patch covering an area we've already covered? if patch.source_slice.start < source_idx: # pragma: no cover # NOTE: This shouldn't happen. With more detailed templating # this shouldn't happen - but in the off-chance that this does # happen - then this code path remains. linter_logger.info( "Skipping overlapping patch at Index %s, Patch: %s", source_idx, patch, ) # Ignore the patch for now... continue # Add this patch. slice_buff.append(patch.source_slice) source_idx = patch.source_slice.stop # Add a tail slice. if source_idx < len(raw_source_string): slice_buff.append(slice(source_idx, len(raw_source_string))) return slice_buff @staticmethod def _build_up_fixed_source_string( source_file_slices: list[slice], source_patches: list[FixPatch], raw_source_string: str, ) -> str: """Use patches and raw file to fix the source file. This assumes that patches and slices have already been coordinated. If they haven't then this will fail because we rely on patches having a corresponding slice of exactly the right file in the list of file slices. """ # Iterate through the patches, building up the new string. str_buff = "" for source_slice in source_file_slices: # Is it one in the patch buffer: for patch in source_patches: if patch.source_slice == source_slice: # Use the patched version linter_logger.debug( "%-30s %s %r > %r", f"Appending {patch.patch_category} Patch:", patch.source_slice, patch.source_str, patch.fixed_raw, ) str_buff += patch.fixed_raw break else: # Use the raw string linter_logger.debug( "Appending Raw: %s %r", source_slice, raw_source_string[source_slice], ) str_buff += raw_source_string[source_slice] return str_buff def persist_tree( self, suffix: str = "", formatter: Optional[FormatterInterface] = None ) -> bool: """Persist changes to the given path.""" if self.num_violations(fixable=True) > 0: write_buff, success = self.fix_string() if success: fname = self.path # If there is a suffix specified, then use it.s if suffix: root, ext = os.path.splitext(fname) fname = root + suffix + ext self._safe_create_replace_file( self.path, fname, write_buff, self.encoding ) result_label = "FIXED" else: # pragma: no cover result_label = "FAIL" else: result_label = "SKIP" success = True if formatter: formatter.dispatch_persist_filename(filename=self.path, result=result_label) return success @staticmethod def _safe_create_replace_file( input_path: str, output_path: str, write_buff: str, encoding: str ) -> None: # Write to a temporary file first, so in case of encoding or other # issues, we don't delete or corrupt the user's existing file. # Get file mode (i.e. permissions) on existing file. We'll preserve the # same permissions on the output file. mode = None try: status = os.stat(input_path) except FileNotFoundError: pass else: if stat.S_ISREG(status.st_mode): mode = stat.S_IMODE(status.st_mode) dirname, basename = os.path.split(output_path) with tempfile.NamedTemporaryFile( mode="w", encoding=encoding, newline="", # NOTE: No newline conversion. Write as read. prefix=basename, dir=dirname, suffix=os.path.splitext(output_path)[1], delete=False, ) as tmp: tmp.file.write(write_buff) tmp.flush() os.fsync(tmp.fileno()) # Once the temp file is safely written, replace the existing file. if mode is not None: os.chmod(tmp.name, mode) shutil.move(tmp.name, output_path) sqlfluff-3.4.2/src/sqlfluff/core/linter/linter.py000066400000000000000000001367221503426445100220660ustar00rootroot00000000000000"""Defines the linter class.""" import fnmatch import logging import os import time from collections.abc import Iterator, Sequence from typing import TYPE_CHECKING, Optional, cast import regex from tqdm import tqdm from sqlfluff.core.config import FluffConfig, progress_bar_configuration from sqlfluff.core.errors import ( SQLBaseError, SQLFluffSkipFile, SQLLexError, SQLLintError, SQLParseError, SQLTemplaterError, ) from sqlfluff.core.formatter import FormatterInterface from sqlfluff.core.helpers.file import get_encoding from sqlfluff.core.linter.common import ( ParsedString, ParsedVariant, RenderedFile, RuleTuple, ) from sqlfluff.core.linter.discovery import paths_from_path from sqlfluff.core.linter.fix import apply_fixes, compute_anchor_edit_info from sqlfluff.core.linter.linted_dir import LintedDir from sqlfluff.core.linter.linted_file import ( TMP_PRS_ERROR_TYPES, FileTimings, LintedFile, ) from sqlfluff.core.linter.linting_result import LintingResult from sqlfluff.core.parser import Lexer, Parser from sqlfluff.core.parser.segments.base import BaseSegment, SourceFix from sqlfluff.core.rules import BaseRule, RulePack, get_ruleset from sqlfluff.core.rules.fix import LintFix from sqlfluff.core.rules.noqa import IgnoreMask if TYPE_CHECKING: # pragma: no cover from sqlfluff.core.dialects import Dialect from sqlfluff.core.parser.segments.meta import MetaSegment from sqlfluff.core.templaters import RawTemplater, TemplatedFile RuleTimingsType = list[tuple[str, str, float]] # Instantiate the linter logger linter_logger: logging.Logger = logging.getLogger("sqlfluff.linter") class Linter: """The interface class to interact with the linter.""" # Default to allowing process parallelism allow_process_parallelism = True def __init__( self, config: Optional[FluffConfig] = None, formatter: Optional[FormatterInterface] = None, dialect: Optional[str] = None, rules: Optional[list[str]] = None, user_rules: Optional[list[type[BaseRule]]] = None, exclude_rules: Optional[list[str]] = None, ) -> None: if config and (dialect or rules or exclude_rules): raise ValueError( # pragma: no cover "Linter does not support setting both `config` and any of " "`dialect`, `rules` or `exclude_rules`. The latter are " "provided as convenience methods to avoid needing to " "set the `config` object. If using `config`, please " "provide all the other values within that object." ) # Use the provided config or create one from the kwargs. self.config = config or FluffConfig.from_kwargs( dialect=dialect, rules=rules, exclude_rules=exclude_rules, # Don't require a dialect to be provided yet. Defer this until we # are actually linting something, since the directory we are linting # from may provide additional configuration, including a dialect. require_dialect=False, ) # Get the dialect and templater self.dialect: "Dialect" = cast("Dialect", self.config.get("dialect_obj")) self.templater: "RawTemplater" = cast( "RawTemplater", self.config.get("templater_obj") ) # Store the formatter for output self.formatter = formatter # Store references to user rule classes self.user_rules = user_rules or [] def get_rulepack(self, config: Optional[FluffConfig] = None) -> RulePack: """Get hold of a set of rules.""" rs = get_ruleset() # Register any user rules for rule in self.user_rules: rs.register(rule) cfg = config or self.config return rs.get_rulepack(config=cfg) def rule_tuples(self) -> list[RuleTuple]: """A simple pass through to access the rule tuples of the rule set.""" rs = self.get_rulepack() return [ RuleTuple(rule.code, rule.name, rule.description, rule.groups, rule.aliases) for rule in rs.rules ] # #### Static methods # These are the building blocks of the linting process. @staticmethod def load_raw_file_and_config( fname: str, root_config: FluffConfig ) -> tuple[str, FluffConfig, str]: """Load a raw file and the associated config.""" file_config = root_config.make_child_from_path(fname) config_encoding: str = file_config.get("encoding", default="autodetect") encoding = get_encoding(fname=fname, config_encoding=config_encoding) # Check file size before loading. limit = file_config.get("large_file_skip_byte_limit") if limit: # make sure the limit becomes an integer try: limit = int(limit) except ValueError: raise ValueError( f""" large_file_skip_byte_limit parameter from config cannot be converted to integer, current value {limit}, type {type(limit)} """ ) except TypeError: raise TypeError( f""" failed to get large_file_skip_byte_limit parameter from config, or it is of invalid type {type(limit)} """ ) # Get the file size file_size = os.path.getsize(fname) if file_size > limit: raise SQLFluffSkipFile( f"Length of file {fname!r} is {file_size} bytes which is over " f"the limit of {limit} bytes. Skipping to avoid parser lock. " "Users can increase this limit in their config by setting the " "'large_file_skip_byte_limit' value, or disable by setting it " "to zero." ) with open(fname, encoding=encoding, errors="backslashreplace") as target_file: raw_file = target_file.read() # Scan the raw file for config commands. file_config.process_raw_file_for_config(raw_file, fname) # Return the raw file and config return raw_file, file_config, encoding @staticmethod def _normalise_newlines(string: str) -> str: """Normalise newlines to unix-style line endings.""" return regex.sub(r"\r\n|\r", "\n", string) @staticmethod def _lex_templated_file( templated_file: "TemplatedFile", config: FluffConfig ) -> tuple[Optional[Sequence[BaseSegment]], list[SQLLexError]]: """Lex a templated file.""" violations = [] linter_logger.info("LEXING RAW (%s)", templated_file.fname) # Get the lexer lexer = Lexer(config=config) # Lex the file and log any problems try: segments, lex_vs = lexer.lex(templated_file) # NOTE: There will always be segments, even if it's # just an end of file marker. assert segments, "The token sequence should never be empty." # We might just get the violations as a list violations += lex_vs linter_logger.info("Lexed segments: %s", [seg.raw for seg in segments]) except SQLLexError as err: # pragma: no cover linter_logger.info("LEXING FAILED! (%s): %s", templated_file.fname, err) violations.append(err) return None, violations # Check that we've got sensible indentation from the lexer. # We might need to suppress if it's a complicated file. templating_blocks_indent = config.get("template_blocks_indent", "indentation") if isinstance(templating_blocks_indent, str): force_block_indent = templating_blocks_indent.lower().strip() == "force" else: force_block_indent = False templating_blocks_indent = bool(templating_blocks_indent) # If we're forcing it through we don't check. if templating_blocks_indent and not force_block_indent: indent_balance = sum(getattr(elem, "indent_val", 0) for elem in segments) if indent_balance != 0: # pragma: no cover linter_logger.debug( "Indent balance test failed for %r. Template indents will not be " "linted for this file.", templated_file.fname, ) # Don't enable the templating blocks. templating_blocks_indent = False # The file will have been lexed without config, so check all indents # are enabled. new_segments = [] for segment in segments: if segment.is_meta: meta_segment = cast("MetaSegment", segment) if meta_segment.indent_val != 0: # Don't allow it if we're not linting templating block indents. if not templating_blocks_indent: continue # pragma: no cover new_segments.append(segment) # Return new buffer return new_segments, violations @staticmethod def _parse_tokens( tokens: Sequence[BaseSegment], config: FluffConfig, fname: Optional[str] = None, parse_statistics: bool = False, ) -> tuple[Optional[BaseSegment], list[SQLParseError]]: parser = Parser(config=config) violations = [] # Parse the file and log any problems try: parsed: Optional[BaseSegment] = parser.parse( # Regardless of how the sequence was passed in, we should # coerce it to a tuple here, before we head deeper into # the parsing process. tuple(tokens), fname=fname, parse_statistics=parse_statistics, ) except SQLParseError as err: linter_logger.info("PARSING FAILED! : %s", err) violations.append(err) return None, violations if parsed is None: # pragma: no cover return None, violations linter_logger.info("\n###\n#\n# {}\n#\n###".format("Parsed Tree:")) linter_logger.info("\n" + parsed.stringify()) # We may succeed parsing, but still have unparsable segments. Extract them # here. for unparsable in parsed.iter_unparsables(): # No exception has been raised explicitly, but we still create one here # so that we can use the common interface assert unparsable.pos_marker violations.append( SQLParseError( "Line {0[0]}, Position {0[1]}: Found unparsable section: " "{1!r}".format( unparsable.pos_marker.working_loc, ( unparsable.raw if len(unparsable.raw) < 40 else unparsable.raw[:40] + "..." ), ), segment=unparsable, ) ) linter_logger.info("Found unparsable segment...") linter_logger.info(unparsable.stringify()) return parsed, violations @staticmethod def remove_templated_errors( linting_errors: list[SQLBaseError], ) -> list[SQLBaseError]: """Filter a list of lint errors, removing those from the templated slices.""" # Filter out any linting errors in templated sections if relevant. result: list[SQLBaseError] = [] for e in linting_errors: if isinstance(e, SQLLintError): assert e.segment.pos_marker if ( # Is it in a literal section? e.segment.pos_marker.is_literal() # Is it a rule that is designed to work on templated sections? or e.rule.targets_templated ): result.append(e) else: # If it's another type, just keep it. (E.g. SQLParseError from # malformed "noqa" comment). result.append(e) return result @staticmethod def _report_conflicting_fixes_same_anchor(message: str) -> None: # pragma: no cover # This function exists primarily in order to let us monkeypatch it at # runtime (replacing it with a function that raises an exception). linter_logger.critical(message) @staticmethod def _warn_unfixable(code: str) -> None: linter_logger.warning( f"One fix for {code} not applied, it would re-cause the same error." ) # ### Class Methods # These compose the base static methods into useful recipes. @classmethod def parse_rendered( cls, rendered: RenderedFile, parse_statistics: bool = False, ) -> ParsedString: """Parse a rendered file.""" tokens: Optional[Sequence[BaseSegment]] parsed_variants: list[ParsedVariant] = [] _lexing_time = 0.0 _parsing_time = 0.0 for idx, variant in enumerate(rendered.templated_variants): t0 = time.monotonic() linter_logger.info("Parse Rendered. Lexing Variant %s", idx) tokens, lex_errors = cls._lex_templated_file(variant, rendered.config) t1 = time.monotonic() linter_logger.info("Parse Rendered. Parsing Variant %s", idx) if tokens: parsed, parse_errors = cls._parse_tokens( tokens, rendered.config, fname=rendered.fname, parse_statistics=parse_statistics, ) else: # pragma: no cover parsed = None parse_errors = [] _lt = t1 - t0 _pt = time.monotonic() - t1 linter_logger.info( "Parse Rendered. Variant %s. Lex in %s. Parse in %s.", idx, _lt, _pt ) parsed_variants.append( ParsedVariant( variant, parsed, lex_errors, parse_errors, ) ) _lexing_time += _lt _parsing_time += _pt time_dict = { **rendered.time_dict, "lexing": _lexing_time, "parsing": _parsing_time, } return ParsedString( parsed_variants=parsed_variants, templating_violations=rendered.templater_violations, time_dict=time_dict, config=rendered.config, fname=rendered.fname, source_str=rendered.source_str, ) @classmethod def lint_fix_parsed( cls, tree: BaseSegment, config: FluffConfig, rule_pack: RulePack, fix: bool = False, fname: Optional[str] = None, templated_file: Optional["TemplatedFile"] = None, formatter: Optional[FormatterInterface] = None, ) -> tuple[BaseSegment, list[SQLBaseError], Optional[IgnoreMask], RuleTimingsType]: """Lint and optionally fix a tree object.""" # Keep track of the linting errors on the very first linter pass. The # list of issues output by "lint" and "fix" only includes issues present # in the initial SQL code, EXCLUDING any issues that may be created by # the fixes themselves. initial_linting_errors = [] # A placeholder for the fixes we had on the previous loop last_fixes: Optional[list[LintFix]] = None # Keep a set of previous versions to catch infinite loops. previous_versions: set[tuple[str, tuple["SourceFix", ...]]] = {(tree.raw, ())} # Keep a buffer for recording rule timings. rule_timings: RuleTimingsType = [] # If we are fixing then we want to loop up to the runaway_limit, otherwise just # once for linting. loop_limit = config.get("runaway_limit") if fix else 1 # Dispatch the output for the lint header if formatter: formatter.dispatch_lint_header( fname or "", sorted(rule_pack.codes()) ) # Look for comment segments which might indicate lines to ignore. disable_noqa_except: Optional[str] = config.get("disable_noqa_except") if not config.get("disable_noqa") or disable_noqa_except: allowed_rules_ref_map = cls.allowed_rule_ref_map( rule_pack.reference_map, disable_noqa_except ) ignore_mask, ivs = IgnoreMask.from_tree(tree, allowed_rules_ref_map) initial_linting_errors += ivs else: ignore_mask = None save_tree = tree # There are two phases of rule running. # 1. The main loop is for most rules. These rules are assumed to # interact and cause a cascade of fixes requiring multiple passes. # These are run the `runaway_limit` number of times (default 10). # 2. The post loop is for post-processing rules, not expected to trigger # any downstream rules, e.g. capitalization fixes. They are run on the # first loop and then twice at the end (once to fix, and once again to # check result of fixes), but not in the intervening loops. phases = ["main"] if fix: phases.append("post") for phase in phases: if len(phases) > 1: rules_this_phase = [ rule for rule in rule_pack.rules if rule.lint_phase == phase ] else: rules_this_phase = rule_pack.rules for loop in range(loop_limit if phase == "main" else 2): def is_first_linter_pass() -> bool: return phase == phases[0] and loop == 0 # Additional newlines are to assist in scanning linting loops # during debugging. linter_logger.info( f"\n\nEntering linter phase {phase}, loop {loop + 1}/{loop_limit}\n" ) changed = False if is_first_linter_pass(): # In order to compute initial_linting_errors correctly, need # to run all rules on the first loop of the main phase. rules_this_phase = rule_pack.rules progress_bar_crawler = tqdm( rules_this_phase, desc="lint by rules", leave=False, disable=progress_bar_configuration.disable_progress_bar, ) for crawler in progress_bar_crawler: # Performance: After first loop pass, skip rules that don't # do fixes. Any results returned won't be seen by the user # anyway (linting errors ADDED by rules changing SQL, are # not reported back to the user - only initial linting errors), # so there's absolutely no reason to run them. if ( fix and not is_first_linter_pass() and not crawler.is_fix_compatible ): continue progress_bar_crawler.set_description(f"rule {crawler.code}") t0 = time.monotonic() # fixes should be a dict {} with keys edit, delete, create # delete is just a list of segments to delete # edit and create are list of tuples. The first element is # the "anchor", the segment to look for either to edit or to # insert BEFORE. The second is the element to insert or create. linting_errors, _, fixes, _ = crawler.crawl( tree, dialect=config.get("dialect_obj"), fix=fix, templated_file=templated_file, ignore_mask=ignore_mask, fname=fname, config=config, ) if is_first_linter_pass(): initial_linting_errors += linting_errors if fix and fixes: linter_logger.info(f"Applying Fixes [{crawler.code}]: {fixes}") # Do some sanity checks on the fixes before applying. anchor_info = compute_anchor_edit_info(fixes) if any( not info.is_valid for info in anchor_info.values() ): # pragma: no cover message = ( f"Rule {crawler.code} returned conflicting " "fixes with the same anchor. This is only " "supported for create_before+create_after, so " "the fixes will not be applied. " ) for uuid, info in anchor_info.items(): if not info.is_valid: message += f"\n{uuid}:" for _fix in info.fixes: message += f"\n {_fix}" cls._report_conflicting_fixes_same_anchor(message) for lint_result in linting_errors: lint_result.fixes = [] elif fixes == last_fixes: # If we generate the same fixes two times in a row, # that means we're in a loop, and we want to stop. # (Fixes should address issues, hence different # and/or fewer fixes next time.) # This is most likely because fixes could not be safely # applied last time, so we should stop gracefully. linter_logger.debug( f"Fixes generated for {crawler.code} are the same as " "the previous pass. Assuming that we cannot apply them " "safely. Passing gracefully." ) else: # This is the happy path. We have fixes, now we want to # apply them. last_fixes = fixes new_tree, _, _, _valid = apply_fixes( tree, config.get("dialect_obj"), crawler.code, anchor_info, fix_even_unparsable=config.get("fix_even_unparsable"), ) # Check for infinite loops. We use a combination of the # fixed templated file and the list of source fixes to # apply. loop_check_tuple = ( new_tree.raw, tuple(new_tree.source_fixes), ) # Was anything actually applied? If not, then the fixes we # had cannot be safely applied and we should stop trying. if loop_check_tuple == (tree.raw, tuple(tree.source_fixes)): linter_logger.debug( f"Fixes for {crawler.code} could not be safely be " "applied. Likely due to initially unparsable file." ) elif not _valid: # The fixes result in an invalid file. Don't apply # the fix and skip onward. Show a warning. linter_logger.warning( f"Fixes for {crawler.code} not applied, as it " "would result in an unparsable file. Please " "report this as a bug with a minimal query " "which demonstrates this warning." ) elif loop_check_tuple not in previous_versions: # We've not seen this version of the file so # far. Continue. tree = new_tree previous_versions.add(loop_check_tuple) changed = True continue else: # Applying these fixes took us back to a state # which we've seen before. We're in a loop, so # we want to stop. cls._warn_unfixable(crawler.code) # Record rule timing rule_timings.append( (crawler.code, crawler.name, time.monotonic() - t0) ) if fix and not changed: # We did not change the file. Either the file is clean (no # fixes), or any fixes which are present will take us back # to a previous state. linter_logger.info( f"Fix loop complete for {phase} phase. Stability " f"achieved after {loop}/{loop_limit} loops." ) break else: if fix: # The linter loop hit the limit before reaching a stable point # (i.e. free of lint errors). If this happens, it's usually # because one or more rules produced fixes which did not address # the original issue **or** created new issues. linter_logger.warning( f"Loop limit on fixes reached [{loop_limit}]." ) # Discard any fixes for the linting errors, since they caused a # loop. IMPORTANT: By doing this, we are telling SQLFluff that # these linting errors are "unfixable". This is important, # because when "sqlfluff fix" encounters unfixable lint errors, # it exits with a "failure" exit code, which is exactly what we # want in this situation. (Reason: Although this is more of an # internal SQLFluff issue, users deserve to know about it, # because it means their file(s) weren't fixed. for violation in initial_linting_errors: if isinstance(violation, SQLLintError): violation.fixes = [] # Return the original parse tree, before any fixes were applied. # Reason: When the linter hits the loop limit, the file is often # messy, e.g. some of the fixes were applied repeatedly, possibly # other weird things. We don't want the user to see this junk! return save_tree, initial_linting_errors, ignore_mask, rule_timings if config.get("ignore_templated_areas", default=True): initial_linting_errors = cls.remove_templated_errors(initial_linting_errors) linter_logger.info("\n###\n#\n# {}\n#\n###".format("Fixed Tree:")) linter_logger.info("\n" + tree.stringify()) return tree, initial_linting_errors, ignore_mask, rule_timings @classmethod def lint_parsed( cls, parsed: ParsedString, rule_pack: RulePack, fix: bool = False, formatter: Optional[FormatterInterface] = None, encoding: str = "utf8", ) -> LintedFile: """Lint a ParsedString and return a LintedFile.""" violations = parsed.violations time_dict = parsed.time_dict tree: Optional[BaseSegment] = None templated_file: Optional[TemplatedFile] = None t0 = time.monotonic() # First identify the root variant. That's the first variant # that successfully parsed. root_variant: Optional[ParsedVariant] = None for variant in parsed.parsed_variants: if variant.tree: root_variant = variant break else: linter_logger.info( "lint_parsed found no valid root variant for %s", parsed.fname ) # If there is a root variant, handle that first. if root_variant: linter_logger.info("lint_parsed - linting root variant (%s)", parsed.fname) assert root_variant.tree # We just checked this. ( fixed_tree, initial_linting_errors, ignore_mask, rule_timings, ) = cls.lint_fix_parsed( root_variant.tree, config=parsed.config, rule_pack=rule_pack, fix=fix, fname=parsed.fname, templated_file=variant.templated_file, formatter=formatter, ) # Set legacy variables for now # TODO: Revise this templated_file = variant.templated_file tree = fixed_tree # We're only going to return the *initial* errors, rather # than any generated during the fixing cycle. violations += initial_linting_errors # Attempt to lint other variants if they exist. # TODO: Revise whether this is sensible... for idx, alternate_variant in enumerate(parsed.parsed_variants): if alternate_variant is variant or not alternate_variant.tree: continue linter_logger.info("lint_parsed - linting alt variant (%s)", idx) ( _, # Fixed Tree alt_linting_errors, _, # Ignore Mask _, # Timings ) = cls.lint_fix_parsed( alternate_variant.tree, config=parsed.config, rule_pack=rule_pack, fix=fix, fname=parsed.fname, templated_file=alternate_variant.templated_file, formatter=formatter, ) violations += alt_linting_errors # If no root variant, we should still apply ignores to any parsing # or templating fails. else: rule_timings = [] disable_noqa_except: Optional[str] = parsed.config.get( "disable_noqa_except" ) if parsed.config.get("disable_noqa") and not disable_noqa_except: # NOTE: This path is only accessible if there is no valid `tree` # which implies that there was a fatal templating fail. Even an # unparsable file will still have a valid tree. ignore_mask = None else: # Templating and/or parsing have failed. Look for "noqa" # comments (the normal path for identifying these comments # requires access to the parse tree, and because of the failure, # we don't have a parse tree). allowed_rules_ref_map = cls.allowed_rule_ref_map( rule_pack.reference_map, disable_noqa_except ) ignore_mask, ignore_violations = IgnoreMask.from_source( parsed.source_str, [ lm for lm in parsed.config.get("dialect_obj").lexer_matchers if lm.name == "inline_comment" ][0], allowed_rules_ref_map, ) violations += ignore_violations # Update the timing dict time_dict["linting"] = time.monotonic() - t0 # We process the ignore config here if appropriate for violation in violations: violation.ignore_if_in(parsed.config.get("ignore")) violation.warning_if_in(parsed.config.get("warnings")) linted_file = LintedFile( parsed.fname, # Deduplicate violations LintedFile.deduplicate_in_source_space(violations), FileTimings(time_dict, rule_timings), tree, ignore_mask=ignore_mask, templated_file=templated_file, encoding=encoding, ) # This is the main command line output from linting. if formatter: formatter.dispatch_file_violations( parsed.fname, linted_file, only_fixable=fix, warn_unused_ignores=parsed.config.get("warn_unused_ignores"), ) # Safety flag for unset dialects if linted_file.get_violations( fixable=True if fix else None, types=SQLParseError ): if formatter: # pragma: no cover TODO? formatter.dispatch_dialect_warning( # The dialect property is the string, not the dialect object cast(str, parsed.config.get("dialect")) ) return linted_file @classmethod def allowed_rule_ref_map( cls, reference_map: dict[str, set[str]], disable_noqa_except: Optional[str] ) -> dict[str, set[str]]: """Generate a noqa rule reference map.""" # disable_noqa_except is not set, return the entire map. if not disable_noqa_except: return reference_map output_map = reference_map # Add the special rules so they can be excluded for `disable_noqa_except` usage for special_rule in ["PRS", "LXR", "TMP"]: output_map[special_rule] = {special_rule} # Expand glob usage of rules unexpanded_rules = tuple(r.strip() for r in disable_noqa_except.split(",")) noqa_set = set() for r in unexpanded_rules: for x in fnmatch.filter(output_map.keys(), r): noqa_set |= output_map.get(x, set()) # Return a new map with only the excluded rules return {k: v.intersection(noqa_set) for k, v in output_map.items()} @classmethod def lint_rendered( cls, rendered: RenderedFile, rule_pack: RulePack, fix: bool = False, formatter: Optional[FormatterInterface] = None, ) -> LintedFile: """Take a RenderedFile and return a LintedFile.""" parsed = cls.parse_rendered(rendered) return cls.lint_parsed( parsed, rule_pack=rule_pack, fix=fix, formatter=formatter, encoding=rendered.encoding, ) # ### Instance Methods # These are tied to a specific instance and so are not necessarily # safe to use in parallel operations. def render_string( self, in_str: str, fname: str, config: FluffConfig, encoding: str ) -> RenderedFile: """Template the file.""" linter_logger.info("Rendering String [%s] (%s)", self.templater.name, fname) # Start the templating timer t0 = time.monotonic() # Newlines are normalised to unix-style line endings (\n). # The motivation is that Jinja normalises newlines during templating and # we want consistent mapping between the raw and templated slices. in_str = self._normalise_newlines(in_str) # Since Linter.__init__() does not require a dialect to be specified, # check for one now. (We're processing a string, not a file, so we're # not going to pick up a .sqlfluff or other config file to provide a # missing dialect at this point.) config.verify_dialect_specified() if not config.get("templater_obj") == self.templater: linter_logger.warning( f"Attempt to set templater to {config.get('templater_obj').name} " f"failed. Using {self.templater.name} templater. Templater cannot " "be set in a .sqlfluff file in a subdirectory of the current " "working directory. It can be set in a .sqlfluff in the current " "working directory. See Nesting section of the docs for more " "details." ) variant_limit = config.get("render_variant_limit") templated_variants: list[TemplatedFile] = [] templater_violations: list[SQLTemplaterError] = [] try: for variant, templater_errs in self.templater.process_with_variants( in_str=in_str, fname=fname, config=config, formatter=self.formatter ): if variant: templated_variants.append(variant) # NOTE: We could very easily end up with duplicate errors between # different variants and this code doesn't currently do any # deduplication between them. That will be resolved in further # testing. # TODO: Resolve potential duplicate templater violations between # variants before we enable jinja variant linting by default. templater_violations += templater_errs if len(templated_variants) >= variant_limit: # Stop if we hit the limit. break except SQLTemplaterError as templater_err: # Fatal templating error. Capture it and don't generate a variant. templater_violations.append(templater_err) except SQLFluffSkipFile as skip_file_err: # pragma: no cover linter_logger.warning(str(skip_file_err)) if not templated_variants: linter_logger.info("TEMPLATING FAILED: %s", templater_violations) linter_logger.info("Rendered %s variants", len(templated_variants)) # Record time time_dict = {"templating": time.monotonic() - t0} return RenderedFile( templated_variants, templater_violations, config, time_dict, fname, encoding, in_str, ) def render_file(self, fname: str, root_config: FluffConfig) -> RenderedFile: """Load and render a file with relevant config.""" # Load the raw file. raw_file, config, encoding = self.load_raw_file_and_config(fname, root_config) # Render the file return self.render_string(raw_file, fname, config, encoding) def parse_string( self, in_str: str, fname: str = "", config: Optional[FluffConfig] = None, encoding: str = "utf-8", parse_statistics: bool = False, ) -> ParsedString: """Parse a string.""" violations: list[SQLBaseError] = [] # Dispatch the output for the template header (including the config diff) if self.formatter: self.formatter.dispatch_template_header(fname, self.config, config) # Just use the local config from here: config = (config or self.config).copy() # Scan the raw file for config commands. config.process_raw_file_for_config(in_str, fname) rendered = self.render_string(in_str, fname, config, encoding) violations += rendered.templater_violations # Dispatch the output for the parse header if self.formatter: self.formatter.dispatch_parse_header(fname) return self.parse_rendered(rendered, parse_statistics=parse_statistics) def fix( self, tree: BaseSegment, config: Optional[FluffConfig] = None, fname: Optional[str] = None, templated_file: Optional["TemplatedFile"] = None, ) -> tuple[BaseSegment, list[SQLBaseError]]: """Return the fixed tree and violations from lintfix when we're fixing.""" config = config or self.config rule_pack = self.get_rulepack(config=config) fixed_tree, violations, _, _ = self.lint_fix_parsed( tree, config, rule_pack, fix=True, fname=fname, templated_file=templated_file, formatter=self.formatter, ) return fixed_tree, violations def lint( self, tree: BaseSegment, config: Optional[FluffConfig] = None, fname: Optional[str] = None, templated_file: Optional["TemplatedFile"] = None, ) -> list[SQLBaseError]: """Return just the violations from lintfix when we're only linting.""" config = config or self.config rule_pack = self.get_rulepack(config=config) _, violations, _, _ = self.lint_fix_parsed( tree, config, rule_pack, fix=False, fname=fname, templated_file=templated_file, formatter=self.formatter, ) return violations def lint_string( self, in_str: str = "", fname: str = "", fix: bool = False, config: Optional[FluffConfig] = None, encoding: str = "utf8", ) -> LintedFile: """Lint a string. Returns: :obj:`LintedFile`: an object representing that linted file. """ # Sort out config, defaulting to the built in config if no override config = config or self.config # Parse the string. parsed = self.parse_string( in_str=in_str, fname=fname, config=config, ) # Get rules as appropriate rule_pack = self.get_rulepack(config=config) # Lint the file and return the LintedFile return self.lint_parsed( parsed, rule_pack, fix=fix, formatter=self.formatter, encoding=encoding, ) def lint_string_wrapped( self, string: str, fname: str = "", fix: bool = False, ) -> LintingResult: """Lint strings directly.""" result = LintingResult() linted_path = LintedDir(fname) linted_path.add(self.lint_string(string, fname=fname, fix=fix)) result.add(linted_path) result.stop_timer() return result def lint_path( self, path: str, fix: bool = False, ignore_non_existent_files: bool = False, ignore_files: bool = True, processes: Optional[int] = None, ) -> LintedDir: """Lint a path.""" return self.lint_paths( (path,), fix, ignore_non_existent_files, ignore_files, processes ).paths[0] def lint_paths( self, paths: tuple[str, ...], fix: bool = False, ignore_non_existent_files: bool = False, ignore_files: bool = True, processes: Optional[int] = None, apply_fixes: bool = False, fixed_file_suffix: str = "", fix_even_unparsable: bool = False, retain_files: bool = True, ) -> LintingResult: """Lint an iterable of paths.""" # If no paths specified - assume local if not paths: # pragma: no cover paths = (os.getcwd(),) # Set up the result to hold what we get back result = LintingResult() expanded_paths: list[str] = [] expanded_path_to_linted_dir = {} sql_exts = self.config.get("sql_file_exts", default=".sql").lower().split(",") for path in paths: linted_dir = LintedDir(path, retain_files=retain_files) result.add(linted_dir) for fname in paths_from_path( path, ignore_non_existent_files=ignore_non_existent_files, ignore_files=ignore_files, target_file_exts=sql_exts, ): expanded_paths.append(fname) expanded_path_to_linted_dir[fname] = linted_dir files_count = len(expanded_paths) if processes is None: processes = self.config.get("processes", default=1) assert processes is not None # Hard set processes to 1 if only 1 file is queued. # The overhead will never be worth it with one file. if files_count == 1: processes = 1 # to avoid circular import from sqlfluff.core.linter.runner import get_runner runner, effective_processes = get_runner( self, self.config, processes=processes, allow_process_parallelism=self.allow_process_parallelism, ) if self.formatter and effective_processes != 1: self.formatter.dispatch_processing_header(effective_processes) # Show files progress bar only when there is more than one. first_path = expanded_paths[0] if expanded_paths else "" progress_bar_files = tqdm( total=files_count, desc=f"file {first_path}", leave=False, disable=files_count <= 1 or progress_bar_configuration.disable_progress_bar, ) for i, linted_file in enumerate(runner.run(expanded_paths, fix), start=1): linted_dir = expanded_path_to_linted_dir[linted_file.path] linted_dir.add(linted_file) # If any fatal errors, then stop iteration. if any(v.fatal for v in linted_file.violations): # pragma: no cover linter_logger.error("Fatal linting error. Halting further linting.") break # If we're applying fixes, then do that here. if apply_fixes: num_tmp_prs_errors = linted_file.num_violations( types=TMP_PRS_ERROR_TYPES, filter_ignore=False, filter_warning=False, ) if fix_even_unparsable or num_tmp_prs_errors == 0: linted_file.persist_tree( suffix=fixed_file_suffix, formatter=self.formatter ) # Progress bar for files is rendered only when there is more than one file. # Additionally, as it's updated after each loop, we need to get file name # from the next loop. This is why `enumerate` starts with `1` and there # is `i < len` to not exceed files list length. progress_bar_files.update(n=1) if i < len(expanded_paths): progress_bar_files.set_description(f"file {expanded_paths[i]}") result.stop_timer() return result def parse_path( self, path: str, parse_statistics: bool = False, ) -> Iterator[ParsedString]: """Parse a path of sql files. NB: This a generator which will yield the result of each file within the path iteratively. """ sql_exts = self.config.get("sql_file_exts", default=".sql").lower().split(",") for fname in paths_from_path( path, target_file_exts=sql_exts, ): if self.formatter: self.formatter.dispatch_path(path) # Load the file with the config and yield the result. try: raw_file, config, encoding = self.load_raw_file_and_config( fname, self.config ) except SQLFluffSkipFile as s: linter_logger.warning(str(s)) continue yield self.parse_string( raw_file, fname=fname, config=config, encoding=encoding, parse_statistics=parse_statistics, ) sqlfluff-3.4.2/src/sqlfluff/core/linter/linting_result.py000066400000000000000000000204771503426445100236320ustar00rootroot00000000000000"""Defines the linter class.""" import csv import time from collections.abc import Iterable, Mapping from typing import TYPE_CHECKING, Any, Optional, TypeVar, Union from sqlfluff.core.errors import CheckTuple, SQLBaseError from sqlfluff.core.formatter import FormatterInterface from sqlfluff.core.linter.linted_dir import LintedDir, LintingRecord from sqlfluff.core.timing import RuleTimingSummary, TimingSummary if TYPE_CHECKING: # pragma: no cover from sqlfluff.core.parser.segments.base import BaseSegment def sum_dicts(d1: Mapping[str, int], d2: Mapping[str, int]) -> dict[str, int]: """Take the keys of two dictionaries and add their values.""" keys = set(d1.keys()) | set(d2.keys()) return {key: d1.get(key, 0) + d2.get(key, 0) for key in keys} T = TypeVar("T") def combine_dicts(*d: dict[str, T]) -> dict[str, T]: """Take any set of dictionaries and combine them.""" dict_buffer: dict[str, T] = {} for dct in d: dict_buffer.update(dct) return dict_buffer class LintingResult: """A class to represent the result of a linting operation. Notably this might be a collection of paths, all with multiple potential files within them. """ def __init__(self) -> None: self.paths: list[LintedDir] = [] self._start_time: float = time.monotonic() self.total_time: float = 0.0 def add(self, path: LintedDir) -> None: """Add a new `LintedDir` to this result.""" self.paths.append(path) def stop_timer(self) -> None: """Stop the linting timer.""" self.total_time = time.monotonic() - self._start_time def check_tuples( self, raise_on_non_linting_violations: bool = True ) -> list[CheckTuple]: """Fetch all check_tuples from all contained `LintedDir` objects. Returns: A list of check tuples. """ return [ t for path in self.paths for t in path.check_tuples( raise_on_non_linting_violations=raise_on_non_linting_violations ) ] def check_tuples_by_path(self) -> dict[str, list[CheckTuple]]: """Fetch all check_tuples from all contained `LintedDir` objects. Returns: A dict, with lists of tuples grouped by path. """ buff: dict[str, list[CheckTuple]] = {} for path in self.paths: buff.update(path.check_tuples_by_path()) return buff def num_violations( self, types: Optional[Union[type[SQLBaseError], Iterable[type[SQLBaseError]]]] = None, fixable: Optional[bool] = None, ) -> int: """Count the number of violations in the result.""" return sum( path.num_violations(types=types, fixable=fixable) for path in self.paths ) def get_violations( self, rules: Optional[Union[str, tuple[str, ...]]] = None ) -> list[SQLBaseError]: """Return a list of violations in the result.""" return [v for path in self.paths for v in path.get_violations(rules=rules)] def stats( self, fail_code: int, success_code: int ) -> dict[str, Union[int, float, str]]: """Return a stats dictionary of this result.""" # Add up all the counts for each file. # NOTE: Having a more strictly typed dict for the counts also helps with # typing later in this method. counts: dict[str, int] = dict(files=0, clean=0, unclean=0, violations=0) for path in self.paths: counts = sum_dicts(path.stats(), counts) # Set up the overall dictionary. all_stats: dict[str, Union[int, float, str]] = {} all_stats.update(counts) if counts["files"] > 0: all_stats["avg per file"] = counts["violations"] * 1.0 / counts["files"] all_stats["unclean rate"] = counts["unclean"] * 1.0 / counts["files"] else: all_stats["avg per file"] = 0 all_stats["unclean rate"] = 0 all_stats["clean files"] = all_stats["clean"] all_stats["unclean files"] = all_stats["unclean"] all_stats["exit code"] = fail_code if counts["violations"] > 0 else success_code all_stats["status"] = "FAIL" if counts["violations"] > 0 else "PASS" return all_stats def timing_summary(self) -> dict[str, dict[str, Any]]: """Return a timing summary.""" timing = TimingSummary() rules_timing = RuleTimingSummary() for dir in self.paths: # Add timings from cached values. # NOTE: This is so we don't rely on having the raw file objects any more. for t in dir.step_timings: timing.add(t) rules_timing.add(dir.rule_timings) return {**timing.summary(), **rules_timing.summary()} def persist_timing_records(self, filename: str) -> None: """Persist the timing records as a csv for external analysis.""" meta_fields = [ "path", "source_chars", "templated_chars", "segments", "raw_segments", ] timing_fields = ["templating", "lexing", "parsing", "linting"] # Iterate through all the files to get rule timing information so # we know what headings we're going to need. rule_codes: set[str] = set() for path in self.paths: for record in path.as_records(): if "timings" not in record: # pragma: no cover continue rule_codes.update(record["timings"].keys()) rule_codes -= set(timing_fields) with open(filename, "w", newline="") as f: writer = csv.DictWriter( # Metadata first, then step timings and then _sorted_ rule codes. f, fieldnames=meta_fields + timing_fields + sorted(rule_codes), ) # Write the header writer.writeheader() for path in self.paths: for record in path.as_records(): if "timings" not in record: # pragma: no cover continue writer.writerow( { "path": record["filepath"], **record["statistics"], # character and segment lengths. **record["timings"], # step and rule timings. } ) def as_records(self) -> list[LintingRecord]: """Return the result as a list of dictionaries. Each record contains a key specifying the filepath, and a list of violations. This method is useful for serialization as all objects will be builtin python types (ints, strs). """ return sorted( (record for linted_dir in self.paths for record in linted_dir.as_records()), # Sort records by filename key=lambda record: record["filepath"], ) def persist_changes( self, formatter: Optional[FormatterInterface], fixed_file_suffix: str = "" ) -> dict[str, Union[bool, str]]: """Run all the fixes for all the files and return a dict.""" return combine_dicts( *( path.persist_changes( formatter=formatter, fixed_file_suffix=fixed_file_suffix ) for path in self.paths ) ) @property def tree(self) -> Optional["BaseSegment"]: # pragma: no cover """A convenience method for when there is only one file and we want the tree.""" if len(self.paths) > 1: raise ValueError( ".tree() cannot be called when a LintingResult contains more than one " "path." ) return self.paths[0].tree def count_tmp_prs_errors(self) -> tuple[int, int]: """Count templating or parse errors before and after filtering.""" total_errors = sum(path.num_unfiltered_tmp_prs_errors for path in self.paths) num_filtered_errors = sum(path.num_tmp_prs_errors for path in self.paths) return total_errors, num_filtered_errors def discard_fixes_for_lint_errors_in_files_with_tmp_or_prs_errors(self) -> None: """Discard lint fixes for files with templating or parse errors.""" for path in self.paths: path.discard_fixes_for_lint_errors_in_files_with_tmp_or_prs_errors() sqlfluff-3.4.2/src/sqlfluff/core/linter/patch.py000066400000000000000000000321741503426445100216640ustar00rootroot00000000000000"""Helpers for generating patches to fix files.""" import logging from collections.abc import Iterator from dataclasses import dataclass from typing import Optional from sqlfluff.core.parser import BaseSegment from sqlfluff.core.parser.markers import PositionMarker from sqlfluff.core.templaters import TemplatedFile linter_logger = logging.getLogger("sqlfluff.linter") @dataclass class FixPatch: """An edit patch for a source file.""" templated_slice: slice fixed_raw: str # The patch category, functions mostly for debugging and explanation # than for function. It allows traceability of *why* this patch was # generated. It has no significance for processing. patch_category: str source_slice: slice templated_str: str source_str: str def dedupe_tuple(self) -> tuple[slice, str]: """Generate a tuple of this fix for deduping.""" return (self.source_slice, self.fixed_raw) def _iter_source_fix_patches( segment: BaseSegment, templated_file: TemplatedFile ) -> Iterator[FixPatch]: """Yield any source patches as fixes now. NOTE: This yields source fixes for the segment and any of its children, so it's important to call it at the right point in the recursion to avoid yielding duplicates. """ for source_fix in segment.source_fixes: yield FixPatch( source_fix.templated_slice, source_fix.edit, patch_category="source", source_slice=source_fix.source_slice, templated_str=templated_file.templated_str[source_fix.templated_slice], source_str=templated_file.source_str[source_fix.source_slice], ) def _iter_templated_patches( segment: BaseSegment, templated_file: TemplatedFile ) -> Iterator[FixPatch]: """Iterate through the segments generating fix patches. The patches are generated in TEMPLATED space. This is important so that we defer dealing with any loops until later. At this stage everything *should* happen in templated order. Occasionally we have an insertion around a placeholder, so we also return a hint to deal with that. """ # Does it match? If so we can ignore it. assert segment.pos_marker templated_raw = templated_file.templated_str[segment.pos_marker.templated_slice] matches = segment.raw == templated_raw if matches: # First yield any source fixes yield from _iter_source_fix_patches(segment, templated_file) # Then return. return # If we're here, the segment doesn't match the original. linter_logger.debug( "# Changed Segment Found: %s at %s: Original: [%r] Fixed: [%r]", type(segment).__name__, segment.pos_marker.templated_slice, templated_raw, segment.raw, ) # If it's all literal, then we don't need to recurse. if segment.pos_marker.is_literal(): # First yield any source fixes yield from _iter_source_fix_patches(segment, templated_file) # Then yield the position in the source file and the patch yield FixPatch( source_slice=segment.pos_marker.source_slice, templated_slice=segment.pos_marker.templated_slice, patch_category="literal", fixed_raw=segment.raw, templated_str=templated_file.templated_str[ segment.pos_marker.templated_slice ], source_str=templated_file.source_str[segment.pos_marker.source_slice], ) # Can we go deeper? elif not segment.segments: # It's not literal, but it's also a raw segment. If we're going # to yield a change, we would have done it from the parent, so # we just abort from here. return # pragma: no cover TODO? else: # This segment isn't a literal, but has changed, we need to go deeper. # If there's an end of file segment or indent, ignore them just for the # purposes of patch iteration. # NOTE: This doesn't mutate the underlying `self.segments`. segments = segment.segments while segments and segments[-1].is_type("end_of_file", "indent"): segments = segments[:-1] # Iterate through the child segments source_idx = segment.pos_marker.source_slice.start templated_idx = segment.pos_marker.templated_slice.start insert_buff = "" first_segment_pos: Optional[PositionMarker] = None for seg in segments: # First check for insertions. # At this stage, everything should have a position. assert seg.pos_marker # We know it's an insertion if it has length but not in the templated # file. if seg.raw and seg.pos_marker.is_point(): # Add it to the insertion buffer if it has length: if seg.raw: insert_buff += seg.raw # We want to capture the first position where we have a point. first_segment_pos = first_segment_pos or seg.pos_marker linter_logger.debug( "Appending insertion buffer. %r @idx: %s", insert_buff, templated_idx, ) continue # If we get here, then we know it's an original. Check for deletions at # the point before this segment (vs the TEMPLATED). # Deletions in this sense could also mean source consumption. start_diff = seg.pos_marker.templated_slice.start - templated_idx # Check to see whether there's a discontinuity before the current # segment if start_diff > 0 or insert_buff: # If we have an insert buffer, then it's an edit, otherwise a # deletion. # For the start of the next segment, we need the position of the # first raw, not the pos marker of the whole thing. That accounts # better for loops. first_segment_pos = first_segment_pos or seg.pos_marker yield FixPatch( # Whether the source slice is zero depends on the start_diff. # A non-zero start diff implies a deletion, or more likely # a consumed element of the source. We can use the tracking # markers from the last segment to recreate where this element # should be inserted in both source and template. # The slices must never go backwards so the end of the slice must # be greater than or equal to the start. source_slice=slice( source_idx, max(first_segment_pos.source_slice.start, source_idx), ), templated_slice=slice( templated_idx, max(first_segment_pos.templated_slice.start, templated_idx), ), patch_category="mid_point", fixed_raw=insert_buff, templated_str="", source_str="", ) # Reset the first position so we can move the pointer forward. first_segment_pos = None insert_buff = "" # Now we deal with any changes *within* the segment itself. yield from _iter_templated_patches(seg, templated_file=templated_file) # Once we've dealt with any patches from the segment, update # our position markers. source_idx = seg.pos_marker.source_slice.stop templated_idx = seg.pos_marker.templated_slice.stop # After the loop, we check whether there's a trailing deletion # or insert. Also valid if we still have an insertion buffer here. end_diff = segment.pos_marker.templated_slice.stop - templated_idx if end_diff or insert_buff: source_slice = slice( source_idx, segment.pos_marker.source_slice.stop, ) templated_slice = slice( templated_idx, segment.pos_marker.templated_slice.stop, ) # We determine the source_slice directly rather than # inferring it so that we can be very specific that # we ensure that fixes adjacent to source-only slices # (e.g. {% endif %}) are placed appropriately relative # to source-only slices. yield FixPatch( source_slice=source_slice, templated_slice=templated_slice, patch_category="end_point", fixed_raw=insert_buff, templated_str=templated_file.templated_str[templated_slice], source_str=templated_file.source_str[source_slice], ) def _log_hints(patch: FixPatch, templated_file: TemplatedFile) -> None: """Log hints for debugging during patch generation.""" max_log_length = 10 if patch.templated_slice.start >= max_log_length: pre_hint = templated_file.templated_str[ patch.templated_slice.start - max_log_length : patch.templated_slice.start ] else: pre_hint = templated_file.templated_str[: patch.templated_slice.start] if patch.templated_slice.stop + max_log_length < len(templated_file.templated_str): post_hint = templated_file.templated_str[ patch.templated_slice.stop : patch.templated_slice.stop + max_log_length ] else: post_hint = templated_file.templated_str[patch.templated_slice.stop :] linter_logger.debug(" Templated Hint: ...%r <> %r...", pre_hint, post_hint) def generate_source_patches( tree: BaseSegment, templated_file: TemplatedFile ) -> list[FixPatch]: """Use the fixed tree to generate source patches. Importantly here we deduplicate and sort the patches from their position in the templated file into their intended order in the source file. Any source fixes are generated in `_iter_templated_patches` and included alongside any standard fixes. That means we treat them the same here. """ # Iterate patches, filtering and translating as we go: linter_logger.debug("### Beginning Patch Iteration.") filtered_source_patches = [] dedupe_buffer = [] # We use enumerate so that we get an index for each patch. This is entirely # so when debugging logs we can find a given patch again! for idx, patch in enumerate( _iter_templated_patches(tree, templated_file=templated_file) ): linter_logger.debug(" %s Yielded patch: %s", idx, patch) _log_hints(patch, templated_file) # Check for duplicates if patch.dedupe_tuple() in dedupe_buffer: linter_logger.info( " - Skipping. Source space Duplicate: %s", patch.dedupe_tuple(), ) continue # We now evaluate patches in the source-space for whether they overlap # or disrupt any templated sections unless designed to do so. # NOTE: We rely here on the patches being generated in order. # Get the affected raw slices. local_raw_slices = templated_file.raw_slices_spanning_source_slice( patch.source_slice ) local_type_list = [slc.slice_type for slc in local_raw_slices] # Deal with the easy cases of 1) New code at end 2) only literals if not local_type_list or set(local_type_list) == {"literal"}: linter_logger.info( " * Keeping patch on new or literal-only section.", ) filtered_source_patches.append(patch) dedupe_buffer.append(patch.dedupe_tuple()) # Handle the easy case of an explicit source fix elif patch.patch_category == "source": linter_logger.info( " * Keeping explicit source fix patch.", ) filtered_source_patches.append(patch) dedupe_buffer.append(patch.dedupe_tuple()) # Is it a zero length patch. elif ( patch.source_slice.start == patch.source_slice.stop and patch.source_slice.start == local_raw_slices[0].source_idx ): linter_logger.info( " * Keeping insertion patch on slice boundary.", ) filtered_source_patches.append(patch) dedupe_buffer.append(patch.dedupe_tuple()) else: # pragma: no cover # We've got a situation where the ends of our patch need to be # more carefully mapped. This used to happen with greedy template # element matching, but should now never happen. In the event that # it does, we'll warn but carry on. linter_logger.warning( "Skipping edit patch on uncertain templated section [%s], " "Please report this warning on GitHub along with the query " "that produced it.", (patch.patch_category, patch.source_slice), ) continue # Sort the patches before building up the file. return sorted(filtered_source_patches, key=lambda x: x.source_slice.start) sqlfluff-3.4.2/src/sqlfluff/core/linter/runner.py000066400000000000000000000270661503426445100221020ustar00rootroot00000000000000"""Implements runner classes used internally by the Linter class. Implements various runner types for SQLFluff: - Serial - Parallel - Multiprocess - Multithread (used only by automated tests) """ import bdb import functools import logging import multiprocessing import multiprocessing.dummy import multiprocessing.pool import signal import sys import traceback from abc import ABC, abstractmethod from collections.abc import Iterable, Iterator from types import TracebackType from typing import Callable, Optional, Union from sqlfluff.core import FluffConfig, Linter from sqlfluff.core.errors import SQLFluffSkipFile from sqlfluff.core.linter import LintedFile, RenderedFile from sqlfluff.core.plugin.host import is_main_process linter_logger: logging.Logger = logging.getLogger("sqlfluff.linter") PartialLintCallable = Callable[[], LintedFile] class BaseRunner(ABC): """Base runner class.""" def __init__( self, linter: Linter, config: FluffConfig, ) -> None: self.linter = linter self.config = config pass_formatter = True def iter_rendered(self, fnames: list[str]) -> Iterator[tuple[str, RenderedFile]]: """Iterate through rendered files ready for linting.""" for fname in self.linter.templater.sequence_files( fnames, config=self.config, formatter=self.linter.formatter ): try: yield fname, self.linter.render_file(fname, self.config) except SQLFluffSkipFile as s: linter_logger.warning(str(s)) def iter_partials( self, fnames: list[str], fix: bool = False, ) -> Iterator[tuple[str, PartialLintCallable]]: """Iterate through partials for linted files. Generates filenames and objects which return LintedFiles. """ for fname, rendered in self.iter_rendered(fnames): # Generate a fresh ruleset rule_pack = self.linter.get_rulepack(config=rendered.config) yield ( fname, functools.partial( self.linter.lint_rendered, rendered, rule_pack, fix, # Formatters may or may not be passed. They don't pickle # nicely so aren't appropriate in a multiprocessing world. self.linter.formatter if self.pass_formatter else None, ), ) @abstractmethod def run(self, fnames: list[str], fix: bool) -> Iterator[LintedFile]: """Run linting on the specified list of files.""" ... @classmethod def _init_global(cls) -> None: """Initializes any global state. May be overridden by subclasses to apply global configuration, initialize logger state in child processes, etc. """ pass @staticmethod def _handle_lint_path_exception(fname: Optional[str], e: BaseException) -> None: if isinstance(e, IOError): # IOErrors are caught in commands.py, so propagate it raise (e) # pragma: no cover linter_logger.warning( f"""Unable to lint {fname} due to an internal error. \ Please report this as an issue with your query's contents and stacktrace below! To hide this warning, add the failing file to .sqlfluffignore {traceback.format_exc()}""", ) class SequentialRunner(BaseRunner): """Simple runner that does sequential processing.""" def run(self, fnames: list[str], fix: bool) -> Iterator[LintedFile]: """Sequential implementation.""" for fname, partial in self.iter_partials(fnames, fix=fix): try: yield partial() except (bdb.BdbQuit, KeyboardInterrupt): # pragma: no cover raise except Exception as e: self._handle_lint_path_exception(fname, e) class ParallelRunner(BaseRunner): """Base class for parallel runner implementations (process or thread).""" POOL_TYPE: Callable[..., multiprocessing.pool.Pool] # Don't pass the formatter in a parallel world, they # don't pickle well. pass_formatter = False def __init__(self, linter: Linter, config: FluffConfig, processes: int) -> None: super().__init__(linter, config) self.processes = processes def run(self, fnames: list[str], fix: bool) -> Iterator[LintedFile]: """Parallel implementation. Note that the partials are generated one at a time then passed directly into the pool as they're ready. This means the main thread can do the IO work while passing the parsing and linting work out to the threads. """ with self._create_pool( self.processes, self._init_global, ) as pool: try: for lint_result in self._map( pool, self._apply, self.iter_partials(fnames, fix=fix), ): if isinstance(lint_result, DelayedException): try: lint_result.reraise() except Exception as e: self._handle_lint_path_exception(lint_result.fname, e) else: # It's a LintedDir. if self.linter.formatter: self.linter.formatter.dispatch_file_violations( lint_result.path, lint_result, only_fixable=fix, warn_unused_ignores=self.linter.config.get( "warn_unused_ignores" ), ) yield lint_result except KeyboardInterrupt: # pragma: no cover # On keyboard interrupt (Ctrl-C), terminate the workers. # Notify the user we've received the signal and are cleaning up, # in case it takes awhile. print("Received keyboard interrupt. Cleaning up and shutting down...") pool.terminate() @staticmethod def _apply( partial_tuple: tuple[str, PartialLintCallable], ) -> Union["DelayedException", LintedFile]: """Shim function used in parallel mode.""" # Unpack the tuple and ditch the filename in this case. fname, partial = partial_tuple try: return partial() # Capture any exceptions and return as delayed exception to handle # in the main thread. except Exception as e: return DelayedException(e, fname=fname) @classmethod def _init_global(cls) -> None: # pragma: no cover """For the parallel runners indicate that we're not in the main thread.""" is_main_process.set(False) super()._init_global() @classmethod def _create_pool( cls, processes: int, initializer: Callable[[], None] ) -> multiprocessing.pool.Pool: return cls.POOL_TYPE(processes=processes, initializer=initializer) @classmethod @abstractmethod def _map( cls, pool: multiprocessing.pool.Pool, func: Callable[ [tuple[str, PartialLintCallable]], Union["DelayedException", LintedFile] ], iterable: Iterable[tuple[str, PartialLintCallable]], ) -> Iterable[Union["DelayedException", LintedFile]]: # pragma: no cover """Class-specific map method. NOTE: Must be overridden by an implementation. """ ... class MultiProcessRunner(ParallelRunner): """Runner that does parallel processing using multiple processes.""" # NOTE: Python 3.13 deprecates calling `Pool` without first setting # the context. The default was already "spawn" for MacOS and Windows # but was previously "fork" for other Linux platforms. From python # 3.14 onwards, the default will not be "fork" anymore. # In testing we've found no significant difference between "fork" # and "spawn", and so settle on "spawn" for all operating system. # https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods POOL_TYPE = multiprocessing.get_context("spawn").Pool @classmethod def _init_global(cls) -> None: # pragma: no cover super()._init_global() # Disable signal handling in the child processes to let the parent # control all KeyboardInterrupt handling (Control C). This is # necessary in order for keyboard interrupts to exit quickly and # cleanly. Adapted from this post: # https://stackoverflow.com/questions/11312525/catch-ctrlc-sigint-and-exit-multiprocesses-gracefully-in-python signal.signal(signal.SIGINT, signal.SIG_IGN) @classmethod def _map( cls, pool: multiprocessing.pool.Pool, func: Callable[ [tuple[str, PartialLintCallable]], Union["DelayedException", LintedFile] ], iterable: Iterable[tuple[str, PartialLintCallable]], ) -> Iterable[Union["DelayedException", LintedFile]]: """Map using imap unordered. We use this so we can iterate through results as they arrive, and while other files are still being processed. """ return pool.imap_unordered(func=func, iterable=iterable) class MultiThreadRunner(ParallelRunner): """Runner that does parallel processing using multiple threads. Used only by automated tests. """ POOL_TYPE = multiprocessing.dummy.Pool @classmethod def _map( cls, pool: multiprocessing.pool.Pool, func: Callable[ [tuple[str, PartialLintCallable]], Union["DelayedException", LintedFile] ], iterable: Iterable[tuple[str, PartialLintCallable]], ) -> Iterable[Union["DelayedException", LintedFile]]: """Map using imap. We use this so we can iterate through results as they arrive, and while other files are still being processed. """ return pool.imap(func=func, iterable=iterable) class DelayedException(Exception): """Multiprocessing process pool uses this to propagate exceptions.""" def __init__(self, ee: BaseException, fname: Optional[str] = None): self.ee = ee self.tb: Optional[TracebackType] _, _, self.tb = sys.exc_info() self.fname = fname super().__init__(str(ee)) def reraise(self) -> None: """Reraise the encapsulated exception.""" raise self.ee.with_traceback(self.tb) def get_runner( linter: Linter, config: FluffConfig, processes: int, allow_process_parallelism: bool = True, ) -> tuple[BaseRunner, int]: """Generate a runner instance based on parallel and system configuration. The processes argument can be positive or negative. - If positive, the integer is interpreted as the number of processes. - If negative or zero, the integer is interpreted as number_of_cpus - processes. e.g. -1 = all cpus but one. 0 = all cpus 1 = 1 cpu """ if processes <= 0: processes = max(multiprocessing.cpu_count() + processes, 1) if processes > 1: # Process parallelism isn't really supported during testing # so this flag allows us to fall back to a threaded runner # in those cases. if allow_process_parallelism: return MultiProcessRunner(linter, config, processes=processes), processes else: return MultiThreadRunner(linter, config, processes=processes), processes else: return SequentialRunner(linter, config), processes sqlfluff-3.4.2/src/sqlfluff/core/parser/000077500000000000000000000000001503426445100202035ustar00rootroot00000000000000sqlfluff-3.4.2/src/sqlfluff/core/parser/__init__.py000066400000000000000000000042611503426445100223170ustar00rootroot00000000000000"""init file for the parser.""" from sqlfluff.core.parser.grammar import ( AnyNumberOf, AnySetOf, Anything, Bracketed, Conditional, Delimited, Nothing, OneOf, OptionallyBracketed, OptionallyDelimited, Ref, Sequence, ) from sqlfluff.core.parser.lexer import Lexer, RegexLexer, StringLexer from sqlfluff.core.parser.markers import PositionMarker from sqlfluff.core.parser.matchable import Matchable from sqlfluff.core.parser.parser import Parser from sqlfluff.core.parser.parsers import ( MultiStringParser, RegexParser, StringParser, TypedParser, ) from sqlfluff.core.parser.segments import ( BaseFileSegment, BaseSegment, BinaryOperatorSegment, BracketedSegment, CodeSegment, CommentSegment, ComparisonOperatorSegment, CompositeBinaryOperatorSegment, CompositeComparisonOperatorSegment, Dedent, IdentifierSegment, ImplicitIndent, Indent, KeywordSegment, LiteralKeywordSegment, LiteralSegment, NewlineSegment, RawSegment, SegmentGenerator, SourceFix, SymbolSegment, UnlexableSegment, WhitespaceSegment, WordSegment, ) from sqlfluff.core.parser.types import ParseMode __all__ = ( "BaseSegment", "SourceFix", "BaseFileSegment", "BracketedSegment", "RawSegment", "CodeSegment", "UnlexableSegment", "CommentSegment", "WhitespaceSegment", "NewlineSegment", "KeywordSegment", "SymbolSegment", "IdentifierSegment", "LiteralSegment", "LiteralKeywordSegment", "BinaryOperatorSegment", "CompositeBinaryOperatorSegment", "ComparisonOperatorSegment", "CompositeComparisonOperatorSegment", "WordSegment", "Indent", "Dedent", "ImplicitIndent", "SegmentGenerator", "Sequence", "OneOf", "Delimited", "Bracketed", "AnyNumberOf", "AnySetOf", "Ref", "Anything", "Nothing", "OptionallyBracketed", "OptionallyDelimited", "Conditional", "StringParser", "MultiStringParser", "TypedParser", "RegexParser", "PositionMarker", "Lexer", "StringLexer", "RegexLexer", "Parser", "Matchable", "ParseMode", ) sqlfluff-3.4.2/src/sqlfluff/core/parser/context.py000066400000000000000000000320331503426445100222420ustar00rootroot00000000000000"""The parser context. This mirrors some of the same design of the flask context manager. https://flask.palletsprojects.com/en/1.1.x/ The context acts as a way of keeping track of state, references to common configuration and dialects, logging and also the parse and match depth of the current operation. """ import logging import uuid from collections import defaultdict from collections.abc import Iterator, Sequence from contextlib import contextmanager from typing import TYPE_CHECKING, Any, NoReturn, Optional from tqdm import tqdm from sqlfluff.core.config import progress_bar_configuration if TYPE_CHECKING: # pragma: no cover from sqlfluff.core.config import FluffConfig from sqlfluff.core.dialects.base import Dialect from sqlfluff.core.parser.match_result import MatchResult from sqlfluff.core.parser.matchable import Matchable # Get the parser logger parser_logger = logging.getLogger("sqlfluff.parser") class ParseContext: """Object to handle the context at hand during parsing. Holds two tiers of references. 1. Persistent config, like references to the dialect or the current verbosity and logger. 2. Stack config, like the parse and match depth. The manipulation of the stack config is done using a context manager and layered config objects inside the context. NOTE: We use context managers here to avoid _copying_ the context, just to mutate it safely. This is significantly more performant than the copy operation, but does require some care to use properly. When fetching elements from the context, we first look at the top level stack config object and the persistent config values (stored as attributes of the ParseContext itself). """ def __init__( self, dialect: "Dialect", indentation_config: Optional[dict[str, Any]] = None, ) -> None: """Initialize a new instance of the class. Args: dialect (Dialect): The dialect used for parsing. indentation_config (Optional[dict[str, Any]], optional): The indentation configuration used by Indent and Dedent to control the intended indentation of certain features. Defaults to None. """ self.dialect = dialect # Indentation config is used by Indent and Dedent and used to control # the intended indentation of certain features. Specifically it is # used in the Conditional grammar. self.indentation_config = indentation_config or {} # This is the logger that child objects will latch onto. self.logger = parser_logger # A uuid for this parse context to enable cache invalidation self.uuid = uuid.uuid4() # A dict for parse caching. This is reset for each file, # but persists for the duration of an individual file parse. self._parse_cache: dict[tuple[Any, ...], "MatchResult"] = {} # A dictionary for keeping track of some statistics on parsing # for performance optimisation. # Focused around BaseGrammar._longest_trimmed_match(). # Initialise only with "next_counts", the rest will be int # and are dealt with in .increment(). self.parse_stats: dict[str, Any] = {"next_counts": defaultdict(int)} # The following attributes are only accessible via a copy # and not in the init method. # NOTE: We default to the name `File` which is not # particularly informative, does indicate the root segment. self.match_segment: str = "File" self._match_stack: list[str] = [] self._parse_stack: list[str] = [] self.match_depth = 0 self.parse_depth = 0 # self.terminators is a tuple to afford some level of isolation # and protection from edits to outside the context. This introduces # a little more overhead than a list, but we manage this by only # copying it when necessary. # NOTE: Includes inherited parent terminators. self.terminators: tuple["Matchable", ...] = () # Value for holding a reference to the progress bar. self._tqdm: Optional[tqdm[NoReturn]] = None # Variable to store whether we're tracking progress. When looking # ahead to terminators or suchlike, we set this to False so as not # to confuse the progress bar. self.track_progress = True # The current character, to store where the progress bar is at. self._current_char = 0 @classmethod def from_config(cls, config: "FluffConfig") -> "ParseContext": """Construct a `ParseContext` from a `FluffConfig`. Args: config (FluffConfig): The configuration object. Returns: ParseContext: The constructed ParseContext object. """ indentation_config = config.get_section("indentation") or {} try: indentation_config = {k: bool(v) for k, v in indentation_config.items()} except TypeError: # pragma: no cover raise TypeError( "One of the configuration keys in the `indentation` section is not " "True or False: {!r}".format(indentation_config) ) return cls( dialect=config.get("dialect_obj"), indentation_config=indentation_config, ) def _set_terminators( self, clear_terminators: bool = False, push_terminators: Optional[Sequence["Matchable"]] = None, ) -> tuple[int, tuple["Matchable", ...]]: """Set the terminators used in the class. This private method sets the terminators used in the class. If `clear_terminators` is True and the existing terminators are not already cleared, the method clears the terminators. If `push_terminators` is provided, the method appends them to the existing terminators if they are not already present. Args: clear_terminators (bool, optional): A flag indicating whether to clear the existing terminators. Defaults to False. push_terminators (Optional[Sequence["Matchable"]], optional): A sequence of `Matchable` objects to be added as terminators. Defaults to None. Returns: tuple[int, tuple["Matchable", ...]]: A tuple containing the number of terminators appended and the original terminators. """ _appended = 0 # Retain a reference to the original terminators. _terminators = self.terminators # Note: only need to reset if clear _and not already clear_. if clear_terminators and self.terminators: # NOTE: It's really important that we .copy() on the way in, because # we don't know what else has a reference to the input list, and # we rely a lot in this code on having full control over the # list of terminators. self.terminators = tuple(push_terminators) if push_terminators else () elif push_terminators: # Yes, inefficient for now. for terminator in push_terminators: if terminator not in self.terminators: self.terminators += (terminator,) _appended += 1 return _appended, _terminators def _reset_terminators( self, appended: int, terminators: tuple["Matchable", ...], clear_terminators: bool = False, ) -> None: """Reset the terminators attribute of the class. This method is used to reset the terminators attribute of the class. If the clear_terminators parameter is True, the terminators attribute is set to the provided terminators. If the clear_terminators parameter is False and the appended parameter is non-zero, the terminators attribute is trimmed to its original length minus the value of the appended parameter. Args: appended (int): The number of terminators that were appended. terminators (tuple["Matchable", ...]): The original terminators. clear_terminators (bool, optional): If True, clear the terminators attribute completely. Defaults to False. """ # If we totally reset them, just reinstate the old object. if clear_terminators: self.terminators = terminators # If we didn't, then trim any added ones. # NOTE: Because we dedupe, just because we had push_terminators # doesn't mean any of them actually got added here - we only trim # the number that actually got appended. elif appended: # Trim back to original length. self.terminators = self.terminators[:-appended] @contextmanager def deeper_match( self, name: str, clear_terminators: bool = False, push_terminators: Optional[Sequence["Matchable"]] = None, track_progress: Optional[bool] = None, ) -> Iterator["ParseContext"]: """Increment match depth. Args: name (:obj:`str`): Name of segment we are starting to parse. NOTE: This value is entirely used for tracking and logging purposes. clear_terminators (:obj:`bool`, optional): Whether to force clear any inherited terminators. This is useful in structures like brackets, where outer terminators shouldn't apply while within. Terminators are stashed until we return back out of this context. push_terminators (:obj:`Sequence` of :obj:`Matchable`): Additional terminators to add to the environment while in this context. track_progress (:obj:`bool`, optional): Whether to pause progress tracking for deeper matches. This avoids having the linting progress bar jump forward when performing greedy matches on terminators. """ self._match_stack.append(self.match_segment) self.match_segment = name self.match_depth += 1 _append, _terms = self._set_terminators(clear_terminators, push_terminators) _track_progress = self.track_progress if track_progress is False: self.track_progress = False elif track_progress is True: # pragma: no cover # We can't go from False to True. Raise an issue if not. assert self.track_progress is True, "Cannot set tracking from False to True" try: yield self finally: self._reset_terminators( _append, _terms, clear_terminators=clear_terminators ) self.match_depth -= 1 # Reset back to old name self.match_segment = self._match_stack.pop() # Reset back to old progress tracking. self.track_progress = _track_progress @contextmanager def progress_bar(self, last_char: int) -> Iterator["ParseContext"]: """Set up the progress bar (if it's not already set up). Args: last_char (:obj:`int`): The templated character position of the final segment in the sequence. This is usually populated from the end of `templated_slice` on the final segment. We require this on initialising the progress bar so that we know how far there is to go as we track progress through the file. """ assert not self._tqdm, "Attempted to re-initialise progressbar." self._tqdm = tqdm( # Progress is character by character in the *templated* file. total=last_char, desc="parsing", miniters=1, mininterval=0.2, disable=progress_bar_configuration.disable_progress_bar, leave=False, ) try: yield self finally: self._tqdm.close() def update_progress(self, char_idx: int) -> None: """Update the progress bar if configured. If progress isn't configured, we do nothing. If `track_progress` is false we do nothing. """ if not self._tqdm or not self.track_progress: return None if char_idx <= self._current_char: return None self._tqdm.update(char_idx - self._current_char) self._current_char = char_idx return None def stack(self) -> tuple[tuple[str, ...], tuple[str, ...]]: # pragma: no cover """Return stacks as a tuples so that it can't be edited.""" return tuple(self._parse_stack), tuple(self._match_stack) def check_parse_cache( self, loc_key: tuple[Any, ...], matcher_key: str ) -> Optional["MatchResult"]: """Check against the parse cache for a pre-existing match. If no match is found in the cache, this returns None. """ return self._parse_cache.get((loc_key, matcher_key)) def put_parse_cache( self, loc_key: tuple[Any, ...], matcher_key: str, match: "MatchResult" ) -> None: """Store a match in the cache for later retrieval.""" self._parse_cache[(loc_key, matcher_key)] = match sqlfluff-3.4.2/src/sqlfluff/core/parser/grammar/000077500000000000000000000000001503426445100216315ustar00rootroot00000000000000sqlfluff-3.4.2/src/sqlfluff/core/parser/grammar/__init__.py000066400000000000000000000012301503426445100237360ustar00rootroot00000000000000"""Definitions of grammars.""" from sqlfluff.core.parser.grammar.anyof import ( AnyNumberOf, AnySetOf, OneOf, OptionallyBracketed, ) from sqlfluff.core.parser.grammar.base import Anything, Nothing, Ref from sqlfluff.core.parser.grammar.conditional import Conditional from sqlfluff.core.parser.grammar.delimited import Delimited, OptionallyDelimited from sqlfluff.core.parser.grammar.sequence import Bracketed, Sequence __all__ = ( "Ref", "Anything", "Nothing", "AnyNumberOf", "AnySetOf", "OneOf", "OptionallyBracketed", "OptionallyDelimited", "Delimited", "Sequence", "Bracketed", "Conditional", ) sqlfluff-3.4.2/src/sqlfluff/core/parser/grammar/anyof.py000066400000000000000000000271701503426445100233260ustar00rootroot00000000000000"""AnyNumberOf, OneOf, OptionallyBracketed & AnySetOf.""" from collections.abc import Sequence as SequenceType from typing import Optional, Union, cast from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.grammar.base import ( BaseGrammar, cached_method_for_parse_context, ) from sqlfluff.core.parser.grammar.sequence import Bracketed, Sequence from sqlfluff.core.parser.match_algorithms import ( longest_match, skip_start_index_forward_to_code, trim_to_terminator, ) from sqlfluff.core.parser.match_result import MatchResult from sqlfluff.core.parser.matchable import Matchable from sqlfluff.core.parser.segments import BaseSegment, UnparsableSegment from sqlfluff.core.parser.types import ParseMode, SimpleHintType def _parse_mode_match_result( segments: SequenceType[BaseSegment], current_match: MatchResult, max_idx: int, parse_mode: ParseMode, ) -> MatchResult: """A helper function for the return values of AnyNumberOf. This method creates UnparsableSegments as appropriate depending on the parse mode and return values. """ # If we're being strict, just return. if parse_mode == ParseMode.STRICT: return current_match # Nothing in unmatched anyway? _stop_idx = current_match.matched_slice.stop if _stop_idx == max_idx or all(not s.is_code for s in segments[_stop_idx:max_idx]): return current_match _trim_idx = skip_start_index_forward_to_code(segments, _stop_idx) # Create an unmatched segment _expected = "Nothing else" if len(segments) > max_idx: _expected += f" before {segments[max_idx].raw!r}" unmatched_match = MatchResult( matched_slice=slice(_trim_idx, max_idx), matched_class=UnparsableSegment, segment_kwargs={"expected": _expected}, ) return current_match.append(unmatched_match) class AnyNumberOf(BaseGrammar): """A more configurable version of OneOf.""" supported_parse_modes = { ParseMode.STRICT, ParseMode.GREEDY, } def __init__( self, *args: Union[Matchable, str], max_times: Optional[int] = None, min_times: int = 0, max_times_per_element: Optional[int] = None, exclude: Optional[Matchable] = None, terminators: SequenceType[Union[Matchable, str]] = (), reset_terminators: bool = False, allow_gaps: bool = True, optional: bool = False, parse_mode: ParseMode = ParseMode.STRICT, ) -> None: self.max_times = max_times self.min_times = min_times self.max_times_per_element = max_times_per_element # Any patterns to _prevent_ a match. self.exclude = exclude super().__init__( *args, allow_gaps=allow_gaps, optional=optional, terminators=terminators, reset_terminators=reset_terminators, parse_mode=parse_mode, ) @cached_method_for_parse_context def simple( self, parse_context: ParseContext, crumbs: Optional[tuple[str]] = None ) -> SimpleHintType: """Does this matcher support a uppercase hash matching route? AnyNumberOf does provide this, as long as *all* the elements *also* do. """ option_simples: list[SimpleHintType] = [ opt.simple(parse_context=parse_context, crumbs=crumbs) for opt in self._elements ] if any(elem is None for elem in option_simples): return None # We now know that there are no Nones. simple_buff = cast(list[tuple[frozenset[str], frozenset[str]]], option_simples) # Combine the lists simple_raws = [simple[0] for simple in simple_buff if simple[0]] simple_types = [simple[1] for simple in simple_buff if simple[1]] return ( frozenset.union(*simple_raws) if simple_raws else frozenset(), frozenset.union(*simple_types) if simple_types else frozenset(), ) def is_optional(self) -> bool: """Return whether this element is optional. This is mostly set in the init method, but also in this case, if min_times is zero then this is also optional. """ return self.optional or self.min_times == 0 def match( self, segments: SequenceType["BaseSegment"], idx: int, parse_context: "ParseContext", ) -> MatchResult: """Match against any of the elements a relevant number of times. If it matches multiple, it returns the longest, and if any are the same length it returns the first (unless we explicitly just match first). """ if self.exclude: with parse_context.deeper_match( name=self.__class__.__name__ + "-Exclude" ) as ctx: if self.exclude.match(segments, idx, ctx): return MatchResult.empty_at(idx) n_matches = 0 # Keep track of the number of times each option has been matched. option_counter = {elem.cache_key(): 0 for elem in self._elements} # Keep track of how far we've got. matched_idx = idx # The working index is to cover non-code elements which aren't # claimed yet, but we should conditionally claim if the next # match is successful. working_idx = idx matched = MatchResult.empty_at(idx) max_idx = len(segments) # What is the limit if self.parse_mode == ParseMode.GREEDY: max_idx = trim_to_terminator( segments, idx, terminators=( # Only pass through the context terminators if not resetting. self.terminators if self.reset_terminators else [*self.terminators, *parse_context.terminators] ), parse_context=parse_context, ) while True: if n_matches >= self.min_times: if ( # Either nothing left to match... matched_idx >= max_idx # ...Or we've matched as many times as allowed. or (self.max_times and n_matches >= self.max_times) ): # NOTE: For OneOf, this is the matched return path. return _parse_mode_match_result( segments, matched, max_idx, self.parse_mode, ) # Is there nothing left to match? if matched_idx >= max_idx: # Return unsuccessful as we didn't meet the hurdle. # The positive exhausted return is above. return MatchResult.empty_at(idx) with parse_context.deeper_match( name=self.__class__.__name__, clear_terminators=self.reset_terminators, push_terminators=self.terminators, ) as ctx: match, matched_option = longest_match( # TODO: Resolve re-slice limit hack segments[:max_idx], self._elements, working_idx, ctx, ) # Did we fail to match? if not match: # If we haven't already met the hurdle rate, act as though # not match at all. if n_matches < self.min_times: matched = MatchResult.empty_at(idx) return _parse_mode_match_result( segments, matched, max_idx, self.parse_mode, ) # Otherwise we have a new clean match. assert match assert matched_option # Update counts of each option in case we've hit limits. matched_key = matched_option.cache_key() if matched_option.cache_key() in option_counter: option_counter[matched_key] += 1 # Check if we have matched an option too many times. if ( self.max_times_per_element and option_counter[matched_key] > self.max_times_per_element ): # Return the match so far, without the most recent match. return _parse_mode_match_result( segments, matched, max_idx, self.parse_mode, ) # If we haven't hit limits then consume and move on. matched = matched.append(match) matched_idx = matched.matched_slice.stop working_idx = matched_idx if self.allow_gaps: working_idx = skip_start_index_forward_to_code(segments, matched_idx) parse_context.update_progress(matched_idx) n_matches += 1 # Continue around the loop... class OneOf(AnyNumberOf): """Match any of the elements given once. If it matches multiple, it returns the longest, and if any are the same length it returns the first (unless we explicitly just match first). """ def __init__( self, *args: Union[Matchable, str], exclude: Optional[Matchable] = None, terminators: SequenceType[Union[Matchable, str]] = (), reset_terminators: bool = False, allow_gaps: bool = True, optional: bool = False, parse_mode: ParseMode = ParseMode.STRICT, ) -> None: super().__init__( *args, max_times=1, min_times=1, exclude=exclude, terminators=terminators, reset_terminators=reset_terminators, allow_gaps=allow_gaps, optional=optional, parse_mode=parse_mode, ) class OptionallyBracketed(OneOf): """Hybrid of Bracketed and Sequence: allows brackets but they aren't required. NOTE: This class is greedy on brackets so if they *can* be claimed, then they will be. """ def __init__( self, *args: Union[Matchable, str], exclude: Optional[Matchable] = None, terminators: SequenceType[Union[Matchable, str]] = (), reset_terminators: bool = False, optional: bool = False, parse_mode: ParseMode = ParseMode.STRICT, ) -> None: super().__init__( Bracketed(*args), # In the case that there is only one argument, no sequence is required. args[0] if len(args) == 1 else Sequence(*args), exclude=exclude, terminators=terminators, reset_terminators=reset_terminators, optional=optional, parse_mode=parse_mode, ) class AnySetOf(AnyNumberOf): """Match any number of the elements but each element can only be matched once.""" def __init__( self, *args: Union[Matchable, str], max_times: Optional[int] = None, min_times: int = 0, exclude: Optional[Matchable] = None, terminators: SequenceType[Union[Matchable, str]] = (), reset_terminators: bool = False, allow_gaps: bool = True, optional: bool = False, parse_mode: ParseMode = ParseMode.STRICT, ) -> None: super().__init__( *args, max_times_per_element=1, max_times=max_times, min_times=min_times, exclude=exclude, terminators=terminators, reset_terminators=reset_terminators, allow_gaps=allow_gaps, optional=optional, parse_mode=parse_mode, ) sqlfluff-3.4.2/src/sqlfluff/core/parser/grammar/base.py000066400000000000000000000471041503426445100231230ustar00rootroot00000000000000"""Base grammar, Ref, Anything and Nothing.""" import copy from collections.abc import Sequence from typing import ( TYPE_CHECKING, Any, Callable, Optional, TypeVar, Union, ) from uuid import UUID, uuid4 from sqlfluff.core.helpers.string import curtail_string from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.match_algorithms import greedy_match from sqlfluff.core.parser.match_result import MatchResult from sqlfluff.core.parser.matchable import Matchable from sqlfluff.core.parser.segments import BaseSegment from sqlfluff.core.parser.types import ParseMode, SimpleHintType if TYPE_CHECKING: # pragma: no cover from sqlfluff.core.dialects.base import Dialect def cached_method_for_parse_context( func: Callable[[Any, ParseContext, Optional[tuple[str]]], SimpleHintType], ) -> Callable[..., SimpleHintType]: """A decorator to cache the output of this method for a given parse context. This cache automatically invalidates if the uuid of the parse context changes. The value is store in the __dict__ attribute of the class against a key unique to that function. """ cache_key = "__cache_" + func.__name__ def wrapped_method( self: Any, parse_context: ParseContext, crumbs: Optional[tuple[str]] = None ) -> SimpleHintType: """Cache the output of the method against a given parse context. Note: kwargs are not taken into account in the caching, but for the current use case of dependency loop debugging that's ok. """ try: cache_tuple: tuple[UUID, SimpleHintType] = self.__dict__[cache_key] # Is the value for the current context? if cache_tuple[0] == parse_context.uuid: # If so return it. return cache_tuple[1] except KeyError: # Failed to find an item in the cache. pass # If we're here, we either didn't find a match in the cache or it # wasn't valid. Generate a new value, cache it and return result = func(self, parse_context, crumbs) self.__dict__[cache_key] = (parse_context.uuid, result) return result return wrapped_method T = TypeVar("T", bound="BaseGrammar") class BaseGrammar(Matchable): """Grammars are a way of composing match statements. Any grammar must implement the `match` function. Segments can also be passed to most grammars. Segments implement `match` as a classmethod. Grammars implement it as an instance method. """ is_meta = False equality_kwargs: tuple[str, ...] = ("_elements", "optional", "allow_gaps") # All grammars are assumed to support STRICT mode by default. # If they wish to support other modes, they should declare # it by overriding this attribute. supported_parse_modes: set[ParseMode] = {ParseMode.STRICT} @staticmethod def _resolve_ref(elem: Union[str, Matchable]) -> Matchable: """Resolve potential string references to things we can match against.""" if isinstance(elem, str): return Ref.keyword(elem) elif isinstance(elem, Matchable): # NOTE: BaseSegment types are an instance of Matchable. return elem raise TypeError( "Grammar element [{!r}] was found of unexpected type [{}] was " "found.".format(elem, type(elem)) # pragma: no cover ) def __init__( self, *args: Union[Matchable, str], allow_gaps: bool = True, optional: bool = False, terminators: Sequence[Union[Matchable, str]] = (), reset_terminators: bool = False, parse_mode: ParseMode = ParseMode.STRICT, ) -> None: """Deal with kwargs common to all grammars. Args: *args: Any number of elements which because the subjects of this grammar. Optionally these elements may also be string references to elements rather than the Matchable elements themselves. allow_gaps (:obj:`bool`, optional): Does this instance of the grammar allow gaps between the elements it matches? This may be exhibited slightly differently in each grammar. See that grammar for details. Defaults `True`. optional (:obj:`bool`, optional): In the context of a sequence, is this grammar *optional*, i.e. can it be skipped if no match is found. Outside of a Sequence, this option does nothing. Defaults `False`. terminators (Sequence of :obj:`str` or Matchable): Matchable objects which can terminate the grammar early. These are also used in some parse modes to dictate how many segments to claim when handling unparsable sections. Items passed as :obj:`str` are assumed to refer to keywords and so will be passed to `Ref.keyword()` to be resolved. Terminators are also added to the parse context during deeper matching of child elements. reset_terminators (:obj:`bool`, default `False`): Controls whether any inherited terminators from outer grammars should be cleared before matching child elements. Situations where this might be appropriate are within bracketed expressions, where outer terminators should be temporarily ignored. parse_mode (:obj:`ParseMode`): Defines how eager the grammar should be in claiming unmatched segments. By default, grammars usually only claim what they can match, but by setting this to something more eager, grammars can control how unparsable sections are treated to give the user more granular feedback on what can (and what *cannot*) be parsed. """ # We provide a common interface for any grammar that allows positional elements. # If *any* for the elements are a string and not a grammar, then this is a # shortcut to the Ref.keyword grammar by default. self._elements: list[Matchable] = [self._resolve_ref(e) for e in args] # Now we deal with the standard kwargs self.allow_gaps = allow_gaps self.optional: bool = optional # The intent here is that if we match something, and then the _next_ # item is one of these, we can safely conclude it's a "total" match. # In those cases, we return early without considering more options. self.terminators: Sequence[Matchable] = [ self._resolve_ref(t) for t in terminators ] self.reset_terminators = reset_terminators assert parse_mode in self.supported_parse_modes, ( f"{self.__class__.__name__} does not support {parse_mode} " f"(only {self.supported_parse_modes})" ) self.parse_mode = parse_mode # Generate a cache key self._cache_key = uuid4().hex def cache_key(self) -> str: """Get the cache key for this grammar. For grammars these are unique per-instance. """ return self._cache_key def is_optional(self) -> bool: """Return whether this segment is optional. The optional attribute is set in the __init__ method. """ return self.optional @cached_method_for_parse_context def simple( self, parse_context: ParseContext, crumbs: Optional[tuple[str]] = None ) -> SimpleHintType: """Does this matcher support a lowercase hash matching route?""" return None def __str__(self) -> str: # pragma: no cover TODO? """Return a string representation of the object.""" return repr(self) def __repr__(self) -> str: """Return a string representation suitable for debugging.""" return "<{}: [{}]>".format( self.__class__.__name__, curtail_string( ", ".join(curtail_string(repr(elem), 40) for elem in self._elements), 100, ), ) def __eq__(self, other: Any) -> bool: """Two grammars are equal if their elements and types are equal. NOTE: We use the equality_kwargs tuple on the class to define other kwargs which should also be checked so that things like "optional" is also taken into account in considering equality. """ return type(self) is type(other) and all( getattr(self, k, None) == getattr(other, k, None) for k in self.equality_kwargs ) def __ne__(self, other: Any) -> bool: # pragma: no cover """Two grammars are not equal if their elements or types are not equal. NOTE: This is to handle some conflicts with dunder methods in mypyc. """ return not self.__eq__(other) def copy( self: T, insert: Optional[list[Matchable]] = None, at: Optional[int] = None, before: Optional[Matchable] = None, remove: Optional[list[Matchable]] = None, terminators: list[Union[str, Matchable]] = [], replace_terminators: bool = False, # NOTE: Optionally allow other kwargs to be provided to this # method for type compatibility. Any provided won't be used. **kwargs: Any, ) -> T: """Create a copy of this grammar, optionally with differences. This is mainly used in dialect inheritance. Args: insert (:obj:`list`, optional): Matchable elements to insert. This is inserted pre-expansion so can include unexpanded elements as normal. at (:obj:`int`, optional): The position in the elements to insert the item. Defaults to `None` which means insert at the end of the elements. before (optional): An alternative to _at_ to determine the position of an insertion. Using this inserts the elements immediately before the position of this element. Note that this is not an _index_ but an element to look for (i.e. a Segment or Grammar which will be compared with other elements for equality). remove (:obj:`list`, optional): A list of individual elements to remove from a grammar. Removal is done *after* insertion so that order is preserved. Elements are searched for individually. terminators (:obj:`list` of :obj:`str` or Matchable): New terminators to add to the existing ones. Whether they replace or append is controlled by `append_terminators`. :obj:`str` objects will be interpreted as keywords and passed to `Ref.keyword()`. replace_terminators (:obj:`bool`, default False): When `True` we replace the existing terminators from the copied grammar, otherwise we just append. **kwargs: Optional additional values may be passed to this method for inherited classes, but if unused they will raise an `AssertionError`. """ assert not kwargs, f"Unexpected kwargs to .copy(): {kwargs}" # Copy only the *grammar* elements. The rest comes through # as is because they should just be classes rather than # instances. new_elems = [ elem.copy() if isinstance(elem, BaseGrammar) else elem for elem in self._elements ] if insert: if at is not None and before is not None: # pragma: no cover raise ValueError( "Cannot specify `at` and `before` in BaseGrammar.copy()." ) if before is not None: try: idx = new_elems.index(before) except ValueError: # pragma: no cover raise ValueError( "Could not insert {} in copy of {}. {} not Found.".format( insert, self, before ) ) new_elems = new_elems[:idx] + insert + new_elems[idx:] elif at is None: new_elems = new_elems + insert else: new_elems = new_elems[:at] + insert + new_elems[at:] if remove: for elem in remove: try: new_elems.remove(elem) except ValueError: # pragma: no cover raise ValueError( "Could not remove {} from copy of {}. Not Found.".format( elem, self ) ) new_grammar = copy.copy(self) new_grammar._elements = new_elems if replace_terminators: # pragma: no cover # Override (NOTE: Not currently used). new_grammar.terminators = [self._resolve_ref(t) for t in terminators] else: # NOTE: This is also safe in the case that neither `terminators` or # `replace_terminators` are set. In that case, nothing will change. new_grammar.terminators = [ *new_grammar.terminators, *(self._resolve_ref(t) for t in terminators), ] return new_grammar class Ref(BaseGrammar): """A kind of meta-grammar that references other grammars by name at runtime.""" equality_kwargs: tuple[str, ...] = ("_ref", "optional", "allow_gaps") def __init__( self, *args: str, exclude: Optional[Matchable] = None, terminators: Sequence[Union[Matchable, str]] = (), reset_terminators: bool = False, allow_gaps: bool = True, optional: bool = False, ) -> None: # For Ref, there should only be one arg. assert len(args) == 1, ( "Ref grammar can only deal with precisely one element for now. Instead " f"found {args!r}" ) assert isinstance(args[0], str), f"Ref must be string. Found {args}." self._ref = args[0] # Any patterns to _prevent_ a match. self.exclude = exclude super().__init__( # NOTE: Don't pass on any args (we've already handled it with self._ref) allow_gaps=allow_gaps, optional=optional, # Terminators don't take effect directly within this grammar, but # the Ref grammar is an effective place to manage the terminators # inherited via the context. terminators=terminators, reset_terminators=reset_terminators, ) @cached_method_for_parse_context def simple( self, parse_context: ParseContext, crumbs: Optional[tuple[str]] = None ) -> SimpleHintType: """Does this matcher support a uppercase hash matching route? A ref is simple, if the thing it references is simple. """ if crumbs and self._ref in crumbs: # pragma: no cover loop = " -> ".join(crumbs) raise RecursionError(f"Self referential grammar detected: {loop}") return self._get_elem(dialect=parse_context.dialect).simple( parse_context=parse_context, crumbs=(crumbs or ()) + (self._ref,), ) def _get_elem(self, dialect: "Dialect") -> Matchable: """Get the actual object we're referencing.""" if dialect: # Use the dialect to retrieve the grammar it refers to. return dialect.ref(self._ref) else: # pragma: no cover raise ReferenceError("No Dialect has been provided to Ref grammar!") def __repr__(self) -> str: """Return a string representation of the 'Ref' object.""" return "".format( repr(self._ref), " [opt]" if self.is_optional() else "" ) def match( self, segments: Sequence["BaseSegment"], idx: int, parse_context: "ParseContext", ) -> MatchResult: """Match a list of segments against this segment. Matching can be done from either the raw or the segments. This raw function can be overridden, or a grammar defined on the underlying class. Args: segments (tuple[BaseSegment, ...]): The sequence of segments to match against. idx (int): Index of the element in the sequence. parse_context (ParseContext): The parse context. Returns: MatchResult: The result of the matching process. """ elem = self._get_elem(dialect=parse_context.dialect) # First if we have an *exclude* option, we should check that # which would prevent the rest of this grammar from matching. if self.exclude: with parse_context.deeper_match( name=self._ref + "-Exclude", clear_terminators=self.reset_terminators, push_terminators=self.terminators, ) as ctx: if self.exclude.match(segments, idx, ctx): return MatchResult.empty_at(idx) # Match against that. NB We're not incrementing the match_depth here. # References shouldn't really count as a depth of match. with parse_context.deeper_match( name=self._ref, clear_terminators=self.reset_terminators, push_terminators=self.terminators, ) as ctx: return elem.match(segments, idx, parse_context) @classmethod def keyword(cls, keyword: str, optional: bool = False) -> BaseGrammar: """Generate a reference to a keyword by name. This function is entirely syntactic sugar, and designed for more readable dialects. Ref.keyword('select') == Ref('SelectKeywordSegment') Args: keyword (str): The name of the keyword. optional (bool, optional): Whether the keyword is optional or not. Defaults to False. Returns: BaseGrammar: An instance of the BaseGrammar class. """ name = keyword.capitalize() + "KeywordSegment" return cls(name, optional=optional) class Anything(BaseGrammar): """Matches anything.""" def match( self, segments: Sequence["BaseSegment"], idx: int, parse_context: "ParseContext", ) -> MatchResult: """Matches... Anything. Most useful in match grammars, where a later parse grammar will work out what's inside. NOTE: This grammar does still only match as far as any inherited terminators if they exist. """ terminators = [*self.terminators] if not self.reset_terminators: # Only add context terminators if we're not resetting. terminators.extend(parse_context.terminators) if not terminators: return MatchResult(slice(idx, len(segments))) return greedy_match( segments, idx, parse_context, terminators, # Using the nested match option means that we can match # any bracketed sections we find to persist the structure # even if this grammar is permissive on the meaning. # This preserves backward compatibility with older # parsing behaviour. nested_match=True, ) class Nothing(BaseGrammar): """Matches nothing. Useful for placeholders which might be overwritten by other dialects. """ def match( self, segments: Sequence["BaseSegment"], idx: int, parse_context: "ParseContext", ) -> MatchResult: """Always return a failed (empty) match.""" return MatchResult.empty_at(idx) sqlfluff-3.4.2/src/sqlfluff/core/parser/grammar/conditional.py000066400000000000000000000102111503426445100245010ustar00rootroot00000000000000"""Conditional Grammar.""" from collections.abc import Sequence from typing import Union from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.grammar.base import BaseGrammar from sqlfluff.core.parser.match_result import MatchResult from sqlfluff.core.parser.segments import BaseSegment, Indent class Conditional(BaseGrammar): """A grammar which is conditional on the parse context. | NOTE: The Conditional grammar is assumed to be operating | within a Sequence grammar, and some of the functionality | may not function within a different context. Args: *args: A meta segment which is instantiated conditionally upon the rules set. config_type: The area of the config that is used when evaluating the status of the given rules. rules: A set of `rule=boolean` pairs, which are evaluated when understanding whether conditions are met for this grammar to be enabled. Example: .. code-block:: python Conditional(Dedent, config_type="indent", indented_joins=False) This effectively says that if `indented_joins` in the "indent" section of the current config is set to `True`, then this grammar will allow a `Dedent` segment to be matched here. If `indented_joins` is set to `False`, it will be as though there was no `Dedent` in this sequence. | NOTE: While the Conditional grammar is set up to allow different | sources of configuration, it relies on configuration keys being | available within the ParseContext. Practically speaking only the | "indentation" keys are currently set up. """ def __init__( self, meta: type[Indent], config_type: str = "indentation", **rules: Union[str, bool], ): """Initialize a new instance of the class. This method initializes an instance of the class with the provided arguments. Args: meta (type[Indent]): The meta argument. config_type (str, optional): The config_type argument. Defaults to "indentation". **rules (Union[str, bool]): The rules argument. """ assert issubclass( meta, Indent ), "Conditional is only designed to work with Indent/Dedent segments." self._meta = meta if not config_type: # pragma: no cover raise ValueError("Conditional config_type must be set.") elif config_type not in ("indentation"): # pragma: no cover raise ValueError( "Only 'indentation' is supported as a Conditional config_type." ) if not rules: # pragma: no cover raise ValueError("Conditional requires rules to be set.") self._config_type = config_type self._config_rules = rules super().__init__() def is_enabled(self, parse_context: ParseContext) -> bool: """Evaluate conditionals and return whether enabled.""" # NOTE: Because only "indentation" is the only current config_type # supported, this code is much simpler that would be required in # future if multiple options are available. if self._config_type != "indentation": # pragma: no cover raise ValueError( "Only 'indentation' is supported as a Conditional config_type." ) config_section = parse_context.indentation_config # If any rules fail, return no match. for rule, val in self._config_rules.items(): # Assume False if not set. conf_val = config_section.get(rule, False) # Coerce to boolean. if val != bool(conf_val): return False return True def match( self, segments: Sequence["BaseSegment"], idx: int, parse_context: "ParseContext", ) -> MatchResult: """If enabled, return a single insert of the new segment.""" if not self.is_enabled(parse_context): return MatchResult.empty_at(idx) return MatchResult( matched_slice=slice(idx, idx), insert_segments=((idx, self._meta),) ) sqlfluff-3.4.2/src/sqlfluff/core/parser/grammar/delimited.py000066400000000000000000000173671503426445100241610ustar00rootroot00000000000000"""Definitions for Grammar.""" from collections.abc import Sequence from typing import Optional, Union from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.grammar import Ref from sqlfluff.core.parser.grammar.anyof import OneOf from sqlfluff.core.parser.grammar.noncode import NonCodeMatcher from sqlfluff.core.parser.match_algorithms import ( longest_match, skip_start_index_forward_to_code, ) from sqlfluff.core.parser.match_result import MatchResult from sqlfluff.core.parser.matchable import Matchable from sqlfluff.core.parser.segments import BaseSegment class Delimited(OneOf): """Match an arbitrary number of elements separated by a delimiter. Note that if there are multiple elements passed in that they will be treated as different options of what can be delimited, rather than a sequence. """ equality_kwargs: tuple[str, ...] = ( "_elements", "optional", "allow_gaps", "delimiter", "allow_trailing", "terminator", "min_delimiters", ) optional_delimiter: bool = False def __init__( self, *args: Union[Matchable, str], delimiter: Union[Matchable, str] = Ref("CommaSegment"), allow_trailing: bool = False, terminators: Sequence[Union[Matchable, str]] = (), reset_terminators: bool = False, min_delimiters: int = 0, bracket_pairs_set: str = "bracket_pairs", allow_gaps: bool = True, optional: bool = False, ) -> None: """Initialize the class object with the provided arguments. Args: *args (Union[Matchable, str]): Options for elements between delimiters. This is treated as a set of options rather than a sequence. delimiter (Union[Matchable, str], optional): Delimiter used for parsing. Defaults to Ref("CommaSegment"). allow_trailing (bool, optional): Flag indicating whether trailing delimiters are allowed. Defaults to False. terminators (Sequence[Union[Matchable, str]], optional): Sequence of terminators used to match the end of a segment. Defaults to (). reset_terminators (bool, optional): Flag indicating whether terminators should be reset. Defaults to False. min_delimiters (Optional[int], optional): Minimum number of delimiters to match. Defaults to None. bracket_pairs_set (str, optional): Name of the bracket pairs set. Defaults to "bracket_pairs". allow_gaps (bool, optional): Flag indicating whether gaps between segments are allowed. Defaults to True. optional (bool, optional): Flag indicating whether the segment is optional. Defaults to False. """ if delimiter is None: # pragma: no cover raise ValueError("Delimited grammars require a `delimiter`") self.bracket_pairs_set = bracket_pairs_set self.delimiter = self._resolve_ref(delimiter) self.allow_trailing = allow_trailing # Setting min delimiters means we have to match at least this number self.min_delimiters = min_delimiters super().__init__( *args, terminators=terminators, reset_terminators=reset_terminators, allow_gaps=allow_gaps, optional=optional, ) def match( self, segments: Sequence["BaseSegment"], idx: int, parse_context: "ParseContext", ) -> MatchResult: """Match delimited sequences. To achieve this we flip flop between looking for content and looking for delimiters. Individual elements of this grammar are treated as _options_ not as a _sequence_. """ delimiters = 0 seeking_delimiter = False max_idx = len(segments) working_idx = idx working_match = MatchResult.empty_at(idx) delimiter_match: Optional[MatchResult] = None delimiter_matchers = [self.delimiter] # NOTE: If the configured delimiter is in `parse_context.terminators` then # treat is _only_ as a delimiter and not as a terminator. This happens # frequently during nested comma expressions. terminator_matchers = [ *self.terminators, *(t for t in parse_context.terminators if t not in delimiter_matchers), ] # If gaps aren't allowed, a gap (or non-code segment), acts like a terminator. if not self.allow_gaps: terminator_matchers.append(NonCodeMatcher()) while True: # If we're past the start and allowed gaps, work forward # through any gaps. if self.allow_gaps and working_idx > idx: working_idx = skip_start_index_forward_to_code(segments, working_idx) # Do we have anything left to match on? if working_idx >= max_idx: break # Check whether there is a terminator before checking for content with parse_context.deeper_match(name="Delimited-Term") as ctx: match, _ = longest_match( segments=segments, matchers=terminator_matchers, idx=working_idx, parse_context=ctx, ) if match: break # Then match for content/delimiter as appropriate. _push_terminators = [] if delimiter_matchers and not seeking_delimiter: _push_terminators = delimiter_matchers with parse_context.deeper_match( name="Delimited", push_terminators=_push_terminators ) as ctx: match, _ = longest_match( segments=segments, matchers=( delimiter_matchers if seeking_delimiter else self._elements ), idx=working_idx, parse_context=ctx, ) if not match: if seeking_delimiter and self.optional_delimiter: # Failed to match a delimiter, but it's optional, so loop again. seeking_delimiter = False continue else: # Failed to match next element, stop here. break # Otherwise we _did_ match. Handle it. if seeking_delimiter: # It's a delimiter delimiter_match = match else: # It's content. Add both the last delimiter and the content to the # working match. if delimiter_match: # NOTE: This should happen on every loop _except_ the first. delimiters += 1 working_match = working_match.append(delimiter_match) working_match = working_match.append(match) # Prep for going back around the loop... working_idx = match.matched_slice.stop seeking_delimiter = not seeking_delimiter parse_context.update_progress(working_idx) if self.allow_trailing and delimiter_match and not seeking_delimiter: delimiters += 1 working_match = working_match.append(delimiter_match) if delimiters < self.min_delimiters: return MatchResult.empty_at(idx) return working_match class OptionallyDelimited(Delimited): """Match a number of elements optionally separated by a delimiter. Note that if there are multiple elements passed in that they will be treated as different options of what can be delimited, rather than a sequence. """ optional_delimiter: bool = True sqlfluff-3.4.2/src/sqlfluff/core/parser/grammar/noncode.py000066400000000000000000000035261503426445100236360ustar00rootroot00000000000000"""A non-code matcher. This is a stub of a grammar, intended for use entirely as a terminator or similar alongside other matchers. """ from collections.abc import Sequence from typing import Optional from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.match_result import MatchResult from sqlfluff.core.parser.matchable import Matchable from sqlfluff.core.parser.segments import BaseSegment from sqlfluff.core.parser.types import SimpleHintType class NonCodeMatcher(Matchable): """An object which behaves like a matcher to match non-code.""" def simple( self, parse_context: ParseContext, crumbs: Optional[tuple[str, ...]] = None ) -> SimpleHintType: """This element doesn't work with simple.""" return None def is_optional(self) -> bool: # pragma: no cover """Not optional. NOTE: The NonCodeMatcher is only normally only used as a terminator or other special instance matcher. As such the `.simple()` method is unlikely to be used. """ return False def cache_key(self) -> str: """Get the cache key for the matcher. NOTE: In this case, this class is a bit of a singleton and so we don't need a unique UUID in the same way as other classes. """ return "non-code-matcher" def match( self, segments: Sequence["BaseSegment"], idx: int, parse_context: "ParseContext", ) -> MatchResult: """Match any starting non-code segments.""" matched_idx = idx for matched_idx in range(idx, len(segments)): if segments[matched_idx].is_code: break if matched_idx > idx: return MatchResult(matched_slice=slice(idx, matched_idx)) # Otherwise return no match return MatchResult.empty_at(idx) sqlfluff-3.4.2/src/sqlfluff/core/parser/grammar/sequence.py000066400000000000000000000611401503426445100240150ustar00rootroot00000000000000"""Sequence and Bracketed Grammars.""" # NOTE: We rename the typing.Sequence here so it doesn't collide # with the grammar class that we're defining. from collections.abc import Sequence as SequenceType from os import getenv from typing import Optional, Union, cast from sqlfluff.core.errors import SQLParseError from sqlfluff.core.helpers.slice import is_zero_slice from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.grammar.base import ( BaseGrammar, cached_method_for_parse_context, ) from sqlfluff.core.parser.grammar.conditional import Conditional from sqlfluff.core.parser.match_algorithms import ( skip_start_index_forward_to_code, skip_stop_index_backward_to_code, trim_to_terminator, ) from sqlfluff.core.parser.match_result import MatchResult from sqlfluff.core.parser.matchable import Matchable from sqlfluff.core.parser.segments import ( BaseSegment, BracketedSegment, Dedent, Indent, MetaSegment, TemplateSegment, UnparsableSegment, ) from sqlfluff.core.parser.types import ParseMode, SimpleHintType def _flush_metas( pre_nc_idx: int, post_nc_idx: int, meta_buffer: SequenceType[type["MetaSegment"]], segments: SequenceType[BaseSegment], ) -> tuple[tuple[int, type[MetaSegment]], ...]: """Position any new meta segments relative to the non code section. It's important that we position the new meta segments appropriately around any templated sections and any whitespace so that indentation behaviour works as expected. There are four valid locations (which may overlap). 1. Before any non-code 2. Before the first block templated section (if it's a block opener). 3. After the last block templated section (if it's a block closer). 4. After any non code. If all the metas have a positive indent value then they should go in position 1 or 3, otherwise we're in position 2 or 4. Within each of those scenarios it depends on whether an appropriate block end exists. """ if all(m.indent_val >= 0 for m in meta_buffer): for _idx in range(post_nc_idx, pre_nc_idx, -1): if segments[_idx - 1].is_type("placeholder"): _seg = cast(TemplateSegment, segments[_idx - 1]) if _seg.block_type == "block_end": meta_idx = _idx else: meta_idx = pre_nc_idx break else: meta_idx = pre_nc_idx else: for _idx in range(pre_nc_idx, post_nc_idx): if segments[_idx].is_type("placeholder"): _seg = cast(TemplateSegment, segments[_idx]) if _seg.block_type == "block_start": meta_idx = _idx else: meta_idx = post_nc_idx break else: meta_idx = post_nc_idx return tuple((meta_idx, meta) for meta in meta_buffer) class Sequence(BaseGrammar): """Match a specific sequence of elements.""" supported_parse_modes = { ParseMode.STRICT, ParseMode.GREEDY, ParseMode.GREEDY_ONCE_STARTED, } test_env = getenv("SQLFLUFF_TESTENV", "") @cached_method_for_parse_context def simple( self, parse_context: ParseContext, crumbs: Optional[tuple[str]] = None ) -> SimpleHintType: """Does this matcher support a uppercase hash matching route? Sequence does provide this, as long as the *first* non-optional element does, *AND* and optional elements which preceded it also do. """ simple_raws: set[str] = set() simple_types: set[str] = set() for opt in self._elements: simple = opt.simple(parse_context=parse_context, crumbs=crumbs) if not simple: return None simple_raws.update(simple[0]) simple_types.update(simple[1]) if not opt.is_optional(): # We found our first non-optional element! return frozenset(simple_raws), frozenset(simple_types) # If *all* elements are optional AND simple, I guess it's also simple. return frozenset(simple_raws), frozenset(simple_types) def match( self, segments: SequenceType["BaseSegment"], idx: int, parse_context: "ParseContext", ) -> MatchResult: """Match a specific sequence of elements. When returning incomplete matches in one of the greedy parse modes, we don't return any new meta segments (whether from conditionals or otherwise). This is because we meta segments (typically indents) may only make sense in the context of a full sequence, as their corresponding pair may be later (and yet unrendered). Partial matches should however still return the matched (mutated) versions of any segments which _have_ been processed to provide better feedback to the user. """ start_idx = idx # Where did we start matched_idx = idx # Where have we got to max_idx = len(segments) # What is the limit insert_segments: tuple[tuple[int, type[MetaSegment]], ...] = () child_matches: tuple[MatchResult, ...] = () first_match = True # Metas with a negative indent value come AFTER # the whitespace. Positive or neutral come BEFORE. # HOWEVER: If one is already there, we must preserve # the order. This forced ordering is fine if there's # a positive followed by a negative in the sequence, # but if by design a positive arrives *after* a # negative then we should insert it after the positive # instead. # https://github.com/sqlfluff/sqlfluff/issues/3836 meta_buffer = [] if self.parse_mode == ParseMode.GREEDY: # In the GREEDY mode, we first look ahead to find a terminator # before matching any code. max_idx = trim_to_terminator( segments, idx, terminators=[*self.terminators, *parse_context.terminators], parse_context=parse_context, ) # Iterate elements for elem in self._elements: # 1. Handle any metas or conditionals. # We do this first so that it's the same whether we've run # out of segments or not. # If it's a conditional, evaluate it. # In both cases, we don't actually add them as inserts yet # because their position will depend on what types we accrue. if isinstance(elem, Conditional): # A conditional grammar will only ever return insertions. # If it's not enabled it returns an empty match. # NOTE: No deeper match here, it seemed unnecessary. _match = elem.match(segments, matched_idx, parse_context) # Rather than taking them as a match at this location, we # requeue them for addition later. for _, submatch in _match.insert_segments: meta_buffer.append(submatch) continue # If it's a raw meta, just add it to our list. elif isinstance(elem, type) and issubclass(elem, Indent): meta_buffer.append(elem) continue # 2. Match Segments. # At this point we know there are segments left to match # on and that the current element isn't a meta or conditional. _idx = matched_idx # TODO: Need test cases to cover overmatching non code properly # especially around optional elements. if self.allow_gaps: # First, if we're allowing gaps, consume any non-code. # NOTE: This won't consume from the end of a sequence # because this happens only in the run up to matching # another element. This is as designed. _idx = skip_start_index_forward_to_code(segments, matched_idx, max_idx) # Have we prematurely run out of segments? if _idx >= max_idx: # If the current element is optional, carry on. if elem.is_optional(): continue # Otherwise we have a problem. We've already consumed # any metas, optionals and conditionals. # This is a failed match because we couldn't complete # the sequence. if ( # In a strict mode, running out a segments to match # on means that we don't match anything. self.parse_mode == ParseMode.STRICT # If nothing has been matched _anyway_ then just bail out. or matched_idx == start_idx ): return MatchResult.empty_at(idx) # On any of the other modes (GREEDY or GREEDY_ONCE_STARTED) # we've effectively already claimed the segments, we've # just failed to match. In which case it's unparsable. insert_segments += tuple((matched_idx, meta) for meta in meta_buffer) return MatchResult( matched_slice=slice(start_idx, matched_idx), insert_segments=insert_segments, child_matches=child_matches, ).wrap( UnparsableSegment, segment_kwargs={ "expected": ( f"{elem} after {segments[matched_idx - 1]}. Found nothing." ) }, ) # Match the current element against the current position. with parse_context.deeper_match(name=f"Sequence-@{idx}") as ctx: # HACK: Segment slicing hack to limit elem_match = elem.match(segments[:max_idx], _idx, ctx) # Did we fail to match? (totally or un-cleanly) if not elem_match: # If we can't match an element, we should ascertain whether it's # required. If so then fine, move on, but otherwise we should # crash out without a match. We have not matched the sequence. if elem.is_optional(): # Pass this one and move onto the next element. continue if self.parse_mode == ParseMode.STRICT: # In a strict mode, failing to match an element means that # we don't match anything. return MatchResult.empty_at(idx) if ( self.parse_mode == ParseMode.GREEDY_ONCE_STARTED and matched_idx == start_idx ): # If it's only greedy once started, and we haven't matched # anything yet, then we also don't match anything. return MatchResult.empty_at(idx) # On any of the other modes (GREEDY or GREEDY_ONCE_STARTED) # we've effectively already claimed the segments, we've # just failed to match. In which case it's unparsable. # Handle the simple case where we haven't even started the # sequence yet first: if matched_idx == start_idx: return MatchResult( matched_slice=slice(start_idx, max_idx), matched_class=UnparsableSegment, segment_kwargs={ "expected": ( f"{elem} to start sequence. Found {segments[_idx]}" ) }, ) # Then handle the case of a partial match. _start_idx = skip_start_index_forward_to_code( segments, matched_idx, max_idx ) return MatchResult( # NOTE: We use the already matched segments in the # return value so that if any have already been # matched, the user can see that. Those are not # part of the unparsable section. # NOTE: The unparsable section is _included_ in the span # of the parent match. # TODO: Make tests to assert that child matches sit within # the parent!!! matched_slice=slice(start_idx, max_idx), insert_segments=insert_segments, child_matches=child_matches + ( MatchResult( # The unparsable section is just the remaining # segments we were unable to match from the # sequence. matched_slice=slice(_start_idx, max_idx), matched_class=UnparsableSegment, segment_kwargs={ "expected": ( f"{elem} after {segments[matched_idx - 1]}. " f"Found {segments[_idx]}" ) }, ), ), ) # Flush any metas... insert_segments += _flush_metas(matched_idx, _idx, meta_buffer, segments) meta_buffer = [] # Otherwise we _do_ have a match. Update the position. matched_idx = elem_match.matched_slice.stop parse_context.update_progress(matched_idx) if first_match and self.parse_mode == ParseMode.GREEDY_ONCE_STARTED: # In the GREEDY_ONCE_STARTED mode, we first look ahead to find a # terminator after the first match (and only the first match). max_idx = trim_to_terminator( segments, matched_idx, terminators=[*self.terminators, *parse_context.terminators], parse_context=parse_context, ) first_match = False # How we deal with child segments depends on whether it had a matched # class or not. # If it did, then just add it as a child match and we're done. Move on. if elem_match.matched_class: child_matches += (elem_match,) continue # Otherwise, we un-nest the returned structure, adding any inserts and # children into the inserts and children of this sequence. child_matches += elem_match.child_matches insert_segments += elem_match.insert_segments # If we get to here, we've matched all of the elements (or skipped them). insert_segments += tuple((matched_idx, meta) for meta in meta_buffer) # Finally if we're in one of the greedy modes, and there's anything # left as unclaimed, mark it as unparsable. if self.parse_mode in (ParseMode.GREEDY, ParseMode.GREEDY_ONCE_STARTED): if max_idx > matched_idx: _idx = skip_start_index_forward_to_code(segments, matched_idx, max_idx) _stop_idx = skip_stop_index_backward_to_code(segments, max_idx, _idx) if _stop_idx > _idx: child_matches += ( MatchResult( # The unparsable section is just the remaining # segments we were unable to match from the # sequence. matched_slice=slice(_idx, _stop_idx), matched_class=UnparsableSegment, # TODO: We should come up with a better "expected" string # than this segment_kwargs={"expected": "Nothing here."}, ), ) # Match up to the end. matched_idx = _stop_idx return MatchResult( matched_slice=slice(start_idx, matched_idx), insert_segments=insert_segments, child_matches=child_matches, ) class Bracketed(Sequence): """Match if a bracketed sequence, with content that matches one of the elements. Note that the contents of the Bracketed Expression are treated as an expected sequence. Changelog: - Post 0.3.2: Bracketed inherits from Sequence and anything within the the `Bracketed()` expression is treated as a sequence. For the content of the Brackets, we call the `match()` method of the sequence grammar. - Post 0.1.0: Bracketed was separate from sequence, and the content of the expression were treated as options (like OneOf). - Pre 0.1.0: Bracketed inherited from Sequence and simply added brackets to that sequence. """ def __init__( self, *args: Union[Matchable, str], bracket_type: str = "round", bracket_pairs_set: str = "bracket_pairs", start_bracket: Optional[Matchable] = None, end_bracket: Optional[Matchable] = None, allow_gaps: bool = True, optional: bool = False, parse_mode: ParseMode = ParseMode.STRICT, ) -> None: """Initialize the object. Args: *args (Union[Matchable, str]): Variable length arguments which can be of type 'Matchable' or 'str'. bracket_type (str, optional): The type of bracket used. Defaults to 'round'. bracket_pairs_set (str, optional): The set of bracket pairs. Defaults to 'bracket_pairs'. start_bracket (Optional[Matchable], optional): The start bracket. Defaults to None. end_bracket (Optional[Matchable], optional): The end bracket. Defaults to None. allow_gaps (bool, optional): Whether to allow gaps. Defaults to True. optional (bool, optional): Whether optional. Defaults to False. parse_mode (ParseMode, optional): The parse mode. Defaults to ParseMode.STRICT. """ # Store the bracket type. NB: This is only # hydrated into segments at runtime. self.bracket_type = bracket_type self.bracket_pairs_set = bracket_pairs_set # Allow optional override for special bracket-like things self.start_bracket = start_bracket self.end_bracket = end_bracket super().__init__( *args, allow_gaps=allow_gaps, optional=optional, parse_mode=parse_mode, ) @cached_method_for_parse_context def simple( self, parse_context: ParseContext, crumbs: Optional[tuple[str]] = None ) -> SimpleHintType: """Check if the matcher supports an uppercase hash matching route. Bracketed does this easily, we just look for the bracket. """ start_bracket, _, _ = self.get_bracket_from_dialect(parse_context) return start_bracket.simple(parse_context=parse_context, crumbs=crumbs) def get_bracket_from_dialect( self, parse_context: ParseContext ) -> tuple[Matchable, Matchable, bool]: """Rehydrate the bracket segments in question.""" bracket_pairs = parse_context.dialect.bracket_sets(self.bracket_pairs_set) for bracket_type, start_ref, end_ref, persists in bracket_pairs: if bracket_type == self.bracket_type: start_bracket = parse_context.dialect.ref(start_ref) end_bracket = parse_context.dialect.ref(end_ref) break else: # pragma: no cover raise ValueError( "bracket_type {!r} not found in bracket_pairs of {!r} dialect.".format( self.bracket_type, parse_context.dialect.name ) ) return start_bracket, end_bracket, persists def match( self, segments: SequenceType["BaseSegment"], idx: int, parse_context: "ParseContext", ) -> MatchResult: """Match a bracketed sequence of elements. This implementation matches a bracketed expression without recursion or seeking ahead. It simply matches: start bracket, content, end bracket in sequence. How the grammar behaves on different content depends on the `parse_mode`: - If the parse mode is `GREEDY`, this always returns a match if the opening and closing brackets are found. Anything unexpected within the brackets is marked as `unparsable`. - If the parse mode is `STRICT`, then this only returns a match if the content of the brackets matches (and matches *completely*) one of the elements of the grammar. Otherwise no match. """ # Rehydrate the bracket segments in question. start_bracket, end_bracket, bracket_persists = self.get_bracket_from_dialect( parse_context ) start_bracket = self.start_bracket or start_bracket end_bracket = self.end_bracket or end_bracket # Try to match the first bracket with parse_context.deeper_match(name="Bracketed-StartBracket") as ctx: start_match = start_bracket.match(segments, idx, ctx) if not start_match: return MatchResult.empty_at(idx) # Starting position for content matching after the start bracket content_start_idx = start_match.matched_slice.stop # Skip whitespace if allowed if self.allow_gaps: content_start_idx = skip_start_index_forward_to_code( segments, content_start_idx ) # Match the content with the end bracket as a terminator with parse_context.deeper_match( name="Bracketed-Content", clear_terminators=True, push_terminators=[end_bracket], ) as ctx: content_match = super().match(segments, content_start_idx, ctx) # Get position after content for end bracket check gap_start = end_bracket_idx = content_match.matched_slice.stop # Skip whitespace if allowed if self.allow_gaps: end_bracket_idx = skip_start_index_forward_to_code( segments, end_bracket_idx ) # If there's a gap, add it as a child match child_matches: tuple[MatchResult, ...] = (start_match,) if not is_zero_slice(content_match.matched_slice): if content_match.matched_class: child_matches += (content_match,) else: child_matches += content_match.child_matches if end_bracket_idx > gap_start: gap_match = MatchResult( matched_slice=slice(gap_start, end_bracket_idx), matched_class=None, segment_kwargs={}, insert_segments=(), child_matches=(), ) child_matches += (gap_match,) # Check if we've run out of segments if end_bracket_idx >= len(segments): # No end bracket found if self.parse_mode == ParseMode.STRICT: return MatchResult.empty_at(idx) raise SQLParseError( "Couldn't find closing bracket for opening bracket.", segment=segments[start_match.matched_slice.start], ) # Try to match the end bracket with parse_context.deeper_match(name="Bracketed-EndBracket") as ctx: end_match = end_bracket.match(segments, end_bracket_idx, ctx) if not end_match: # No end bracket found if self.parse_mode == ParseMode.STRICT: return MatchResult.empty_at(idx) raise SQLParseError( "Couldn't find closing bracket for opening bracket.", segment=segments[start_match.matched_slice.start], ) # Add end bracket match child_matches += (end_match,) # Create the final match result result = MatchResult( matched_slice=slice(idx, end_match.matched_slice.stop), matched_class=None, segment_kwargs={}, insert_segments=( (start_match.matched_slice.stop, Indent), (end_match.matched_slice.start, Dedent), ), child_matches=child_matches, ) # Wrap in a BracketedSegment if needed if bracket_persists: result = result.wrap( BracketedSegment, segment_kwargs={ "start_bracket": (segments[idx],), "end_bracket": (segments[end_bracket_idx],), }, ) return result sqlfluff-3.4.2/src/sqlfluff/core/parser/helpers.py000066400000000000000000000034711503426445100222240ustar00rootroot00000000000000"""Helpers for the parser module.""" from typing import TYPE_CHECKING from sqlfluff.core.errors import SQLParseError if TYPE_CHECKING: from sqlfluff.core.parser.segments import BaseSegment # pragma: no cover def join_segments_raw(segments: tuple["BaseSegment", ...]) -> str: """Make a string from the joined `raw` attributes of an iterable of segments.""" return "".join(s.raw for s in segments) def check_still_complete( segments_in: tuple["BaseSegment", ...], matched_segments: tuple["BaseSegment", ...], unmatched_segments: tuple["BaseSegment", ...], ) -> bool: """Check that the segments in are the same as the segments out.""" initial_str = join_segments_raw(segments_in) current_str = join_segments_raw(matched_segments + unmatched_segments) if initial_str != current_str: # pragma: no cover segment = unmatched_segments[0] if unmatched_segments else None raise SQLParseError( f"Parse completeness check fail: {current_str!r} != {initial_str!r}", segment=segment, ) return True def trim_non_code_segments( segments: tuple["BaseSegment", ...], ) -> tuple[ tuple["BaseSegment", ...], tuple["BaseSegment", ...], tuple["BaseSegment", ...] ]: """Take segments and split off surrounding non-code segments as appropriate. We use slices to avoid creating too many unnecessary tuples. """ pre_idx = 0 seg_len = len(segments) post_idx = seg_len if segments: seg_len = len(segments) # Trim the start while pre_idx < seg_len and not segments[pre_idx].is_code: pre_idx += 1 # Trim the end while post_idx > pre_idx and not segments[post_idx - 1].is_code: post_idx -= 1 return segments[:pre_idx], segments[pre_idx:post_idx], segments[post_idx:] sqlfluff-3.4.2/src/sqlfluff/core/parser/lexer.py000066400000000000000000001066501503426445100217040ustar00rootroot00000000000000"""The code for the Lexer.""" import logging from collections.abc import Iterator from typing import Any, NamedTuple, Optional, Union from uuid import UUID, uuid4 import regex from sqlfluff.core.config import FluffConfig from sqlfluff.core.errors import SQLLexError from sqlfluff.core.helpers.slice import is_zero_slice, offset_slice, to_tuple from sqlfluff.core.parser.markers import PositionMarker from sqlfluff.core.parser.segments import ( BaseSegment, Dedent, EndOfFile, Indent, MetaSegment, RawSegment, TemplateLoop, TemplateSegment, UnlexableSegment, ) from sqlfluff.core.templaters import TemplatedFile from sqlfluff.core.templaters.base import TemplatedFileSlice # Instantiate the lexer logger lexer_logger = logging.getLogger("sqlfluff.lexer") class BlockTracker: """This is an object for keeping track of templating blocks. Using the .enter() and .exit() methods on opening and closing blocks, we can match up tags of the same level so that later it's easier to treat them the same way in the linting engine. In case looping means that we encounter the same block more than once, we use cache uuids against their source location so that if we try to re-enter the block again, it will get the same uuid on the second pass. """ _stack: list[UUID] = [] _map: dict[tuple[int, int], UUID] = {} def enter(self, src_slice: slice) -> None: """Add a block to the stack.""" key = to_tuple(src_slice) uuid = self._map.get(key, None) if not uuid: uuid = uuid4() self._map[key] = uuid lexer_logger.debug( " Entering block stack @ %s: %s (fresh)", src_slice, uuid, ) else: lexer_logger.debug( " Entering block stack @ %s: %s (cached)", src_slice, uuid, ) self._stack.append(uuid) def exit(self) -> None: """Pop a block from the stack.""" uuid = self._stack.pop() lexer_logger.debug( " Exiting block stack: %s", uuid, ) def top(self) -> UUID: """Get the uuid on top of the stack.""" return self._stack[-1] class LexedElement(NamedTuple): """An element matched during lexing.""" raw: str matcher: "StringLexer" class TemplateElement(NamedTuple): """A LexedElement, bundled with it's position in the templated file.""" raw: str template_slice: slice matcher: "StringLexer" @classmethod def from_element( cls, element: LexedElement, template_slice: slice ) -> "TemplateElement": """Make a TemplateElement from a LexedElement.""" return cls( raw=element.raw, template_slice=template_slice, matcher=element.matcher ) def to_segment( self, pos_marker: PositionMarker, subslice: Optional[slice] = None ) -> RawSegment: """Create a segment from this lexed element.""" return self.matcher.construct_segment( self.raw[subslice] if subslice else self.raw, pos_marker=pos_marker ) class LexMatch(NamedTuple): """A class to hold matches from the Lexer.""" forward_string: str elements: list[LexedElement] def __bool__(self) -> bool: """A LexMatch is truthy if it contains a non-zero number of matched elements.""" return len(self.elements) > 0 LexerType = Union["RegexLexer", "StringLexer"] class StringLexer: """This singleton matcher matches strings exactly. This is the simplest usable matcher, but it also defines some of the mechanisms for more complicated matchers, which may simply override the `_match` function rather than the public `match` function. This acts as the base class for matchers. """ def __init__( self, name: str, template: str, segment_class: type[RawSegment], subdivider: Optional[LexerType] = None, trim_post_subdivide: Optional[LexerType] = None, segment_kwargs: Optional[dict[str, Any]] = None, ) -> None: self.name = name self.template = template self.segment_class = segment_class self.subdivider = subdivider self.trim_post_subdivide = trim_post_subdivide self.segment_kwargs = segment_kwargs or {} self.__post_init__() def __repr__(self) -> str: return f"<{self.__class__.__name__}: {self.name}>" def __post_init__(self) -> None: """Optional post-init method called after __init__(). Designed for subclasses to use. """ pass def _match(self, forward_string: str) -> Optional[LexedElement]: """The private match function. Just look for a literal string.""" if forward_string.startswith(self.template): return LexedElement(self.template, self) else: return None def search(self, forward_string: str) -> Optional[tuple[int, int]]: """Use string methods to find a substring.""" loc = forward_string.find(self.template) if loc >= 0: return loc, loc + len(self.template) else: return None def _trim_match(self, matched_str: str) -> list[LexedElement]: """Given a string, trim if we are allowed to. Returns: :obj:`tuple` of LexedElement """ elem_buff: list[LexedElement] = [] content_buff = "" str_buff = matched_str if self.trim_post_subdivide: while str_buff: # Iterate through subdividing as appropriate trim_pos = self.trim_post_subdivide.search(str_buff) # No match? Break if not trim_pos: break # Start match? elif trim_pos[0] == 0: elem_buff.append( LexedElement( str_buff[: trim_pos[1]], self.trim_post_subdivide, ) ) str_buff = str_buff[trim_pos[1] :] # End Match? elif trim_pos[1] == len(str_buff): elem_buff += [ LexedElement( content_buff + str_buff[: trim_pos[0]], self, ), LexedElement( str_buff[trim_pos[0] : trim_pos[1]], self.trim_post_subdivide, ), ] content_buff, str_buff = "", "" # Mid Match? (carry on) else: content_buff += str_buff[: trim_pos[1]] str_buff = str_buff[trim_pos[1] :] # Do we have anything left? (or did nothing happen) if content_buff + str_buff: elem_buff.append( LexedElement(content_buff + str_buff, self), ) return elem_buff def _subdivide(self, matched: LexedElement) -> list[LexedElement]: """Given a string, subdivide if we area allowed to. Returns: :obj:`tuple` of segments """ # Can we have to subdivide? if self.subdivider: # Yes subdivision elem_buff: list[LexedElement] = [] str_buff = matched.raw while str_buff: # Iterate through subdividing as appropriate div_pos = self.subdivider.search(str_buff) if div_pos: # Found a division trimmed_elems = self._trim_match(str_buff[: div_pos[0]]) div_elem = LexedElement( str_buff[div_pos[0] : div_pos[1]], self.subdivider ) elem_buff += trimmed_elems + [div_elem] str_buff = str_buff[div_pos[1] :] else: # No more division matches. Trim? trimmed_elems = self._trim_match(str_buff) elem_buff += trimmed_elems break return elem_buff else: return [matched] def match(self, forward_string: str) -> LexMatch: """Given a string, match what we can and return the rest. Returns: :obj:`LexMatch` """ if len(forward_string) == 0: # pragma: no cover raise ValueError("Unexpected empty string!") matched = self._match(forward_string) if matched: # Handle potential subdivision elsewhere. new_elements = self._subdivide(matched) return LexMatch( forward_string[len(matched.raw) :], new_elements, ) else: return LexMatch(forward_string, []) def construct_segment(self, raw: str, pos_marker: PositionMarker) -> RawSegment: """Construct a segment using the given class a properties. Unless an override `type` is provided in the `segment_kwargs`, it is assumed that the `name` of the lexer is designated as the intended `type` of the segment. """ # NOTE: Using a private attribute here feels a bit wrong. _segment_class_types = self.segment_class._class_types _kwargs = self.segment_kwargs assert not ( "type" in _kwargs and "instance_types" in _kwargs ), f"Cannot set both `type` and `instance_types` in segment kwargs: {_kwargs}" if "type" in _kwargs: # TODO: At some point we should probably deprecate this API and only # allow setting `instance_types`. assert _kwargs["type"] _kwargs["instance_types"] = (_kwargs.pop("type"),) elif "instance_types" not in _kwargs and self.name not in _segment_class_types: _kwargs["instance_types"] = (self.name,) return self.segment_class(raw=raw, pos_marker=pos_marker, **_kwargs) class RegexLexer(StringLexer): """This RegexLexer matches based on regular expressions.""" def __post_init__(self) -> None: """Handle setup for RegexLexer.""" # We might want to configure this at some point, but for now, newlines # do get matched by . flags = regex.DOTALL self._compiled_regex = regex.compile(self.template, flags) def _match(self, forward_string: str) -> Optional[LexedElement]: """Use regexes to match chunks.""" match = self._compiled_regex.match(forward_string) if match: # We can only match strings with length match_str = match.group(0) if match_str: return LexedElement(match_str, self) else: # pragma: no cover lexer_logger.warning( f"Zero length Lex item returned from {self.name!r}. Report this as " "a bug." ) return None def search(self, forward_string: str) -> Optional[tuple[int, int]]: """Use regex to find a substring.""" match = self._compiled_regex.search(forward_string) if match: # We can only match strings with length if match.group(0): return match.span() else: # pragma: no cover lexer_logger.warning( f"Zero length Lex item returned from {self.name!r}. Report this as " "a bug." ) return None def _handle_zero_length_slice( tfs: TemplatedFileSlice, next_tfs: Optional[TemplatedFileSlice], block_stack: BlockTracker, templated_file: TemplatedFile, add_indents: bool, ) -> Iterator[MetaSegment]: """Generate placeholders and loop segments from a zero length slice. This method checks for: 1. Backward jumps (inserting :obj:`TemplateLoop`). 2. Forward jumps (inserting :obj:`TemplateSegment`). 3. Blocks (inserting :obj:`TemplateSegment`). 4. Unrendered template elements(inserting :obj:`TemplateSegment`). For blocks and loops, :obj:`Indent` and :obj:`Dedent` segments are yielded around them as appropriate. NOTE: block_stack is _mutated_ by this method. """ assert is_zero_slice(tfs.templated_slice) # First check for jumps. Backward initially, because in the backward # case we don't render the element we find first. # That requires being able to look past to the next element. if tfs.slice_type.startswith("block") and next_tfs: # Look for potential backward jump if next_tfs.source_slice.start < tfs.source_slice.start: lexer_logger.debug(" Backward jump detected. Inserting Loop Marker") # If we're here remember we're on the tfs which is the block end # i.e. not the thing we want to render. pos_marker = PositionMarker.from_point( tfs.source_slice.start, tfs.templated_slice.start, templated_file, ) if add_indents: yield Dedent( is_template=True, pos_marker=pos_marker, ) yield TemplateLoop(pos_marker=pos_marker, block_uuid=block_stack.top()) if add_indents: yield Indent( is_template=True, pos_marker=pos_marker, ) # Move on to the next templated slice. Don't render this directly. return # Then handle blocks (which aren't jumps backward) if tfs.slice_type.startswith("block"): # It's a block. Yield a placeholder with potential indents. # Update block stack or add indents if tfs.slice_type == "block_start": block_stack.enter(tfs.source_slice) elif add_indents and tfs.slice_type in ("block_end", "block_mid"): yield Dedent( is_template=True, pos_marker=PositionMarker.from_point( tfs.source_slice.start, tfs.templated_slice.start, templated_file, ), # NOTE: We mark the dedent with the block uuid too. block_uuid=block_stack.top(), ) yield TemplateSegment.from_slice( tfs.source_slice, tfs.templated_slice, block_type=tfs.slice_type, templated_file=templated_file, block_uuid=block_stack.top(), ) # Update block stack or add indents if tfs.slice_type == "block_end": block_stack.exit() elif add_indents and tfs.slice_type in ("block_start", "block_mid"): yield Indent( is_template=True, pos_marker=PositionMarker.from_point( tfs.source_slice.stop, tfs.templated_slice.stop, templated_file, ), # NOTE: We mark the indent with the block uuid too. block_uuid=block_stack.top(), ) # Before we move on, we might have a _forward_ jump to the next # element. That element can handle itself, but we'll add a # placeholder for it here before we move on. if next_tfs and next_tfs.source_slice.start > tfs.source_slice.stop: # We do so extract the string. placeholder_str = templated_file.source_str[ tfs.source_slice.stop : next_tfs.source_slice.start ] # Trim it if it's too long to show. if len(placeholder_str) >= 20: placeholder_str = ( f"... [{len(placeholder_str)} unused template characters] ..." ) lexer_logger.debug(" Forward jump detected. Inserting placeholder") yield TemplateSegment( pos_marker=PositionMarker( slice(tfs.source_slice.stop, next_tfs.source_slice.start), # Zero slice in the template. tfs.templated_slice, templated_file, ), source_str=placeholder_str, block_type="skipped_source", ) # Move on return # Always return the slice, even if the source slice was also zero length. Some # templaters might want to pass through totally zero length slices as a way of # marking locations in the middle of templated output. yield TemplateSegment.from_slice( tfs.source_slice, tfs.templated_slice, tfs.slice_type, templated_file, ) def _iter_segments( lexed_elements: list[TemplateElement], templated_file: TemplatedFile, add_indents: bool = True, ) -> Iterator[RawSegment]: # An index to track where we've got to in the templated file. tfs_idx = 0 # We keep a map of previous block locations in case they re-occur. block_stack = BlockTracker() templated_file_slices = templated_file.sliced_file # Now work out source slices, and add in template placeholders. for idx, element in enumerate(lexed_elements): # We're working through elements in the rendered file. # When they enter this code they don't have a position in the source. # We already have a map of how templated elements map to the source file # so we work through them to work out what's going on. In theory we can # step through the two lists in lock step. # i.e. we worked through the lexed elements, but check off the templated # file slices as we go. # Output the slice as we lex. lexer_logger.debug(" %s: %s. [tfs_idx = %s]", idx, element, tfs_idx) # All lexed elements, by definition, have a position in the templated # file. That means we've potentially got zero-length elements we also # need to consider. We certainly need to consider templated slices # at tfs_idx. But we should consider some others after that which we # might also need to consider. # A lexed element is either a literal in the raw file or the result # (or part of the result) of a template placeholder. We don't make # placeholders for any variables which return a non-zero length of # code. We do add placeholders for others. # The amount of the current element which has already been consumed. consumed_element_length = 0 # The position in the source which we still need to yield from. stashed_source_idx = None for tfs_idx, tfs in enumerate(templated_file_slices[tfs_idx:], tfs_idx): lexer_logger.debug(" %s: %s", tfs_idx, tfs) # Is it a zero slice? if is_zero_slice(tfs.templated_slice): next_tfs = ( templated_file_slices[tfs_idx + 1] if tfs_idx + 1 < len(templated_file_slices) else None ) yield from _handle_zero_length_slice( tfs, next_tfs, block_stack, templated_file, add_indents ) continue if tfs.slice_type == "literal": # There's a literal to deal with here. Yield as much as we can. # Can we cover this whole lexed element with the current templated # slice without moving on? tfs_offset = tfs.source_slice.start - tfs.templated_slice.start # NOTE: Greater than OR EQUAL, to include the case of it matching # length exactly. if element.template_slice.stop <= tfs.templated_slice.stop: lexer_logger.debug( " Consuming whole from literal. Existing Consumed: %s", consumed_element_length, ) # If we have a stashed start use that. Otherwise infer start. if stashed_source_idx is not None: slice_start = stashed_source_idx else: slice_start = ( element.template_slice.start + consumed_element_length + tfs_offset ) yield element.to_segment( pos_marker=PositionMarker( slice( slice_start, element.template_slice.stop + tfs_offset, ), element.template_slice, templated_file, ), subslice=slice(consumed_element_length, None), ) # If it was an exact match, consume the templated element too. if element.template_slice.stop == tfs.templated_slice.stop: tfs_idx += 1 # In any case, we're done with this element. Move on break elif element.template_slice.start == tfs.templated_slice.stop: # Did we forget to move on from the last tfs and there's # overlap? # NOTE: If the rest of the logic works, this should never # happen. lexer_logger.debug(" NOTE: Missed Skip") # pragma: no cover continue # pragma: no cover else: # This means that the current lexed element spans across # multiple templated file slices. lexer_logger.debug(" Consuming whole spanning literal") # This almost certainly means there's a templated element # in the middle of a whole lexed element. # What we do here depends on whether we're allowed to split # lexed elements. This is basically only true if it's whitespace. # NOTE: We should probably make this configurable on the # matcher object, but for now we're going to look for the # name of the lexer. if element.matcher.name == "whitespace": # We *can* split it! # Consume what we can from this slice and move on. lexer_logger.debug( " Consuming split whitespace from literal. " "Existing Consumed: %s", consumed_element_length, ) if stashed_source_idx is not None: raise NotImplementedError( # pragma: no cover "Found literal whitespace with stashed idx!" ) incremental_length = ( tfs.templated_slice.stop - element.template_slice.start ) yield element.to_segment( pos_marker=PositionMarker( slice( element.template_slice.start + consumed_element_length + tfs_offset, tfs.templated_slice.stop + tfs_offset, ), element.template_slice, templated_file, ), # Subdivide the existing segment. subslice=offset_slice( consumed_element_length, incremental_length, ), ) consumed_element_length += incremental_length continue else: # We can't split it. We're going to end up yielding a segment # which spans multiple slices. Stash the type, and if we haven't # set the start yet, stash it too. lexer_logger.debug(" Spilling over literal slice.") if stashed_source_idx is None: stashed_source_idx = ( element.template_slice.start + tfs_offset ) lexer_logger.debug( " Stashing a source start. %s", stashed_source_idx ) continue elif tfs.slice_type in ("templated", "block_start", "escaped"): # Found a templated slice. Does it have length in the templated file? # If it doesn't, then we'll pick it up next. if not is_zero_slice(tfs.templated_slice): # If it's a block_start. Append to the block stack. # NOTE: This is rare, but call blocks do occasionally # have length (and so don't get picked up by # _handle_zero_length_slice) if tfs.slice_type == "block_start": block_stack.enter(tfs.source_slice) # Is our current element totally contained in this slice? if element.template_slice.stop <= tfs.templated_slice.stop: lexer_logger.debug(" Contained templated slice.") # Yes it is. Add lexed element with source slices as the whole # span of the source slice for the file slice. # If we've got an existing stashed source start, use that # as the start of the source slice. if stashed_source_idx is not None: slice_start = stashed_source_idx else: slice_start = ( tfs.source_slice.start + consumed_element_length ) yield element.to_segment( pos_marker=PositionMarker( slice( slice_start, # The end in the source is the end of the templated # slice. We can't subdivide any better. tfs.source_slice.stop, ), element.template_slice, templated_file, ), subslice=slice(consumed_element_length, None), ) # If it was an exact match, consume the templated element too. if element.template_slice.stop == tfs.templated_slice.stop: tfs_idx += 1 # Carry on to the next lexed element break # We've got an element which extends beyond this templated slice. # This means that a _single_ lexed element claims both some # templated elements and some non-templated elements. That could # include all kinds of things (and from here we don't know what # else is yet to come, comments, blocks, literals etc...). # In the `literal` version of this code we would consider # splitting the literal element here, but in the templated # side we don't. That's because the way that templated tokens # are lexed, means that they should arrive "pre-split". else: # Stash the source idx for later when we do make a segment. lexer_logger.debug(" Spilling over templated slice.") if stashed_source_idx is None: stashed_source_idx = tfs.source_slice.start lexer_logger.debug( " Stashing a source start as lexed element spans " "over the end of a template slice. %s", stashed_source_idx, ) # Move on to the next template slice continue raise NotImplementedError( f"Unable to process slice: {tfs}" ) # pragma: no cover # If templated elements are left, yield them. # We can assume they're all zero length if we're here. for tfs_idx, tfs in enumerate(templated_file_slices[tfs_idx:], tfs_idx): next_tfs = ( templated_file_slices[tfs_idx + 1] if tfs_idx + 1 < len(templated_file_slices) else None ) yield from _handle_zero_length_slice( tfs, next_tfs, block_stack, templated_file, add_indents ) class Lexer: """The Lexer class actually does the lexing step.""" def __init__( self, config: Optional[FluffConfig] = None, last_resort_lexer: Optional[StringLexer] = None, dialect: Optional[str] = None, ): if config and dialect: raise ValueError( # pragma: no cover "Lexer does not support setting both `config` and `dialect`." ) # Use the provided config or create one from the dialect. self.config = config or FluffConfig.from_kwargs(dialect=dialect) # Store the matchers self.lexer_matchers = self.config.get("dialect_obj").get_lexer_matchers() self.last_resort_lexer = last_resort_lexer or RegexLexer( "", r"[^\t\n\ ]*", UnlexableSegment, ) def lex( self, raw: Union[str, TemplatedFile] ) -> tuple[tuple[BaseSegment, ...], list[SQLLexError]]: """Take a string or TemplatedFile and return segments. If we fail to match the *whole* string, then we must have found something that we cannot lex. If that happens we should package it up as unlexable and keep track of the exceptions. """ # Make sure we've got a string buffer and a template # regardless of what was passed in. if isinstance(raw, str): template = TemplatedFile.from_string(raw) str_buff = raw else: template = raw str_buff = str(template) # Lex the string to get a tuple of LexedElement element_buffer: list[LexedElement] = [] while True: res = self.lex_match(str_buff, self.lexer_matchers) element_buffer += res.elements if res.forward_string: resort_res = self.last_resort_lexer.match(res.forward_string) if not resort_res: # pragma: no cover # If we STILL can't match, then just panic out. raise SQLLexError( "Fatal. Unable to lex characters: {0!r}".format( res.forward_string[:10] + "..." if len(res.forward_string) > 9 else res.forward_string ) ) str_buff = resort_res.forward_string element_buffer += resort_res.elements else: # pragma: no cover TODO? break # Map tuple LexedElement to list of TemplateElement. # This adds the template_slice to the object. templated_buffer = self.map_template_slices(element_buffer, template) # Turn lexed elements into segments. segments: tuple[RawSegment, ...] = self.elements_to_segments( templated_buffer, template ) # Generate any violations violations: list[SQLLexError] = self.violations_from_segments(segments) return segments, violations def elements_to_segments( self, elements: list[TemplateElement], templated_file: TemplatedFile ) -> tuple[RawSegment, ...]: """Convert a tuple of lexed elements into a tuple of segments.""" lexer_logger.info("Elements to Segments.") add_indents = self.config.get("template_blocks_indent", "indentation") # Delegate to _iter_segments segment_buffer: list[RawSegment] = list( _iter_segments(elements, templated_file, add_indents) ) # Add an end of file marker segment_buffer.append( EndOfFile( pos_marker=( segment_buffer[-1].pos_marker.end_point_marker() if segment_buffer else PositionMarker.from_point(0, 0, templated_file) ) ) ) # Convert to tuple before return return tuple(segment_buffer) @staticmethod def violations_from_segments(segments: tuple[RawSegment, ...]) -> list[SQLLexError]: """Generate any lexing errors for any unlexables.""" violations = [] for segment in segments: if segment.is_type("unlexable"): violations.append( SQLLexError( "Unable to lex characters: {!r}".format( segment.raw[:10] + "..." if len(segment.raw) > 9 else segment.raw ), pos=segment.pos_marker, ) ) return violations @staticmethod def lex_match(forward_string: str, lexer_matchers: list[StringLexer]) -> LexMatch: """Iteratively match strings using the selection of submatchers.""" elem_buff: list[LexedElement] = [] while True: if len(forward_string) == 0: return LexMatch(forward_string, elem_buff) for matcher in lexer_matchers: res = matcher.match(forward_string) if res.elements: # If we have new segments then whoop! elem_buff += res.elements forward_string = res.forward_string # Cycle back around again and start with the top # matcher again. break else: # We've got so far, but now can't match. Return return LexMatch(forward_string, elem_buff) @staticmethod def map_template_slices( elements: list[LexedElement], template: TemplatedFile ) -> list[TemplateElement]: """Create a tuple of TemplateElement from a tuple of LexedElement. This adds slices in the templated file to the original lexed elements. We'll need this to work out the position in the source file. """ idx = 0 templated_buff: list[TemplateElement] = [] for element in elements: template_slice = offset_slice(idx, len(element.raw)) idx += len(element.raw) templated_buff.append(TemplateElement.from_element(element, template_slice)) if ( template.templated_str[template_slice] != element.raw ): # pragma: no cover raise ValueError( "Template and lexed elements do not match. This should never " f"happen {element.raw!r} != " f"{template.templated_str[template_slice]!r}" ) return templated_buff sqlfluff-3.4.2/src/sqlfluff/core/parser/markers.py000066400000000000000000000227651503426445100222350ustar00rootroot00000000000000"""Implements the PositionMarker class. This class is a construct to keep track of positions within a file. """ from dataclasses import dataclass from typing import TYPE_CHECKING, Any, Optional from sqlfluff.core.helpers.slice import zero_slice if TYPE_CHECKING: from sqlfluff.core.templaters import TemplatedFile # pragma: no cover @dataclass(frozen=True) class PositionMarker: """A reference to a position in a file. Things to note: - This combines the previous functionality of FilePositionMarker and EnrichedFilePositionMarker. Additionally it contains a reference to the original templated file. - It no longer explicitly stores a line number or line position in the source or template. This is extrapolated from the templated file as required. - Positions in the source and template are with slices and therefore identify ranges. - Positions within the fixed file are identified with a line number and line position, which identify a point. - Arithmetic comparisons are on the location in the fixed file. """ source_slice: slice templated_slice: slice templated_file: "TemplatedFile" # If not set, these will be initialised in the post init. working_line_no: int = -1 working_line_pos: int = -1 def __post_init__(self) -> None: # If the working position has not been explicitly set # then infer it from the position in the templated file. # This is accurate up until the point that any fixes have # been applied. if self.working_line_no == -1 or self.working_line_pos == -1: line_no, line_pos = self.templated_position() # Use the base method because we're working with a frozen class object.__setattr__(self, "working_line_no", line_no) object.__setattr__(self, "working_line_pos", line_pos) def __str__(self) -> str: return self.to_source_string() def __gt__(self, other: "PositionMarker") -> bool: return self.working_loc > other.working_loc def __lt__(self, other: "PositionMarker") -> bool: return self.working_loc < other.working_loc def __ge__(self, other: "PositionMarker") -> bool: return self.working_loc >= other.working_loc def __le__(self, other: "PositionMarker") -> bool: return self.working_loc <= other.working_loc def __eq__(self, other: Any) -> bool: if not isinstance(other, PositionMarker): return False # pragma: no cover return self.working_loc == other.working_loc @property def working_loc(self) -> tuple[int, int]: """Location tuple for the working position.""" return self.working_line_no, self.working_line_pos def working_loc_after(self, raw: str) -> tuple[int, int]: """Location tuple for the working position.""" return self.infer_next_position( raw, self.working_line_no, self.working_line_pos, ) @classmethod def from_point( cls, source_point: int, templated_point: int, templated_file: "TemplatedFile", **kwargs: int, # kwargs can only contain working_line positions ) -> "PositionMarker": """Convenience method for creating point markers.""" return cls( zero_slice(source_point), zero_slice(templated_point), templated_file, **kwargs, ) @classmethod def from_points( cls, start_point_marker: "PositionMarker", end_point_marker: "PositionMarker", ) -> "PositionMarker": """Construct a position marker from the section between two points.""" return cls( slice( start_point_marker.source_slice.start, end_point_marker.source_slice.stop, ), slice( start_point_marker.templated_slice.start, end_point_marker.templated_slice.stop, ), # The templated file references from the point markers # should be the same, so we're just going to pick one. # TODO: If we assert that in this function, it's actually not # true - but preliminary debugging on this did not reveal why. start_point_marker.templated_file, # Line position should be of the _start_ of the section. start_point_marker.working_line_no, start_point_marker.working_line_pos, ) @classmethod def from_child_markers( cls, *markers: Optional["PositionMarker"] ) -> "PositionMarker": """Create a parent marker from it's children.""" source_slice = slice( min(m.source_slice.start for m in markers if m), max(m.source_slice.stop for m in markers if m), ) templated_slice = slice( min(m.templated_slice.start for m in markers if m), max(m.templated_slice.stop for m in markers if m), ) templated_files = {m.templated_file for m in markers if m} if len(templated_files) != 1: # pragma: no cover raise ValueError("Attempted to make a parent marker from multiple files.") templated_file = templated_files.pop() return cls(source_slice, templated_slice, templated_file) def source_position(self) -> tuple[int, int]: """Return the line and position of this marker in the source.""" return self.templated_file.get_line_pos_of_char_pos( self.source_slice.start, source=True ) def templated_position(self) -> tuple[int, int]: """Return the line and position of this marker in the source.""" return self.templated_file.get_line_pos_of_char_pos( self.templated_slice.start, source=False ) @property def line_no(self) -> int: """Return the line number in the source.""" return self.source_position()[0] @property def line_pos(self) -> int: """Return the line position in the source.""" return self.source_position()[1] def to_source_string(self) -> str: """Make a formatted string of this position.""" line, pos = self.source_position() return f"[L:{line:3d}, P:{pos:3d}]" def start_point_marker(self) -> "PositionMarker": """Get a point marker from the start.""" return self.__class__.from_point( self.source_slice.start, self.templated_slice.start, templated_file=self.templated_file, # Start points also pass on the working position. working_line_no=self.working_line_no, working_line_pos=self.working_line_pos, ) def end_point_marker(self) -> "PositionMarker": """Get a point marker from the end.""" return self.__class__.from_point( self.source_slice.stop, self.templated_slice.stop, templated_file=self.templated_file, ) @staticmethod def slice_is_point(test_slice: slice) -> bool: """Is this slice a point.""" is_point: bool = test_slice.start == test_slice.stop return is_point def is_point(self) -> bool: """A marker is a point if it has zero length in templated and source file.""" return self.slice_is_point(self.source_slice) and self.slice_is_point( self.templated_slice ) @staticmethod def infer_next_position(raw: str, line_no: int, line_pos: int) -> tuple[int, int]: """Using the raw string provided to infer the position of the next. NB: Line position in 1-indexed. """ # No content? if not raw: return line_no, line_pos split = raw.split("\n") return ( line_no + len(split) - 1, line_pos + len(raw) if len(split) == 1 else len(split[-1]) + 1, ) def with_working_position(self, line_no: int, line_pos: int) -> "PositionMarker": """Copy this position and replace the working position.""" return self.__class__( source_slice=self.source_slice, templated_slice=self.templated_slice, templated_file=self.templated_file, working_line_no=line_no, working_line_pos=line_pos, ) def is_literal(self) -> bool: """Infer literalness from context. is_literal should return True if a fix can be applied across this area in the templated file while being confident that the fix is still appropriate in the source file. This obviously applies to any slices which are the same in the source and the templated files. Slices which are zero-length in the source are also "literal" because they can't be "broken" by any fixes, because they don't exist in the source. This includes meta segments and any segments added during the fixing process. This value is used for: - Ignoring linting errors in templated sections. - Whether `_iter_templated_patches` can return without recursing. - Whether certain rules (such as JJ01) are triggered. """ return self.templated_file.is_source_slice_literal(self.source_slice) def source_str(self) -> str: """Returns the string in the source at this position.""" return self.templated_file.source_str[self.source_slice] def to_source_dict(self) -> dict[str, int]: """Serialise the source position.""" return self.templated_file.source_position_dict_from_slice(self.source_slice) sqlfluff-3.4.2/src/sqlfluff/core/parser/match_algorithms.py000066400000000000000000000672571503426445100241230ustar00rootroot00000000000000"""Matching algorithms. These are mostly extracted from the body of either BaseSegment or BaseGrammar to un-bloat those classes. """ from collections import defaultdict from collections.abc import Sequence from typing import DefaultDict, Optional, cast from sqlfluff.core.errors import SQLParseError from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.match_result import MatchResult from sqlfluff.core.parser.matchable import Matchable from sqlfluff.core.parser.segments import BaseSegment, BracketedSegment, Dedent, Indent def skip_start_index_forward_to_code( segments: Sequence[BaseSegment], start_idx: int, max_idx: Optional[int] = None ) -> int: """Move an index forward through segments until segments[index] is code.""" if max_idx is None: max_idx = len(segments) for _idx in range(start_idx, max_idx): if segments[_idx].is_code: break else: _idx = max_idx return _idx def skip_stop_index_backward_to_code( segments: Sequence[BaseSegment], stop_idx: int, min_idx: int = 0 ) -> int: """Move an index backward through segments until segments[index - 1] is code.""" for _idx in range(stop_idx, min_idx, -1): if segments[_idx - 1].is_code: break else: _idx = min_idx return _idx def first_trimmed_raw(seg: BaseSegment) -> str: """Trim whitespace off a whole element raw. Used as a helper function in BaseGrammar._look_ahead_match. For existing compound segments, we should assume that within that segment, things are internally consistent, that means rather than enumerating all the individual segments of a longer one we just dump out the whole segment, but splitting off the first element separated by whitespace. This is a) faster and also b) prevents some really horrible bugs with bracket matching. See https://github.com/sqlfluff/sqlfluff/issues/433 This fetches the _whole_ raw of a potentially compound segment to match against, trimming off any whitespace. This is the most efficient way to get at the first element of a potentially longer segment. """ s = seg.raw_upper.split(maxsplit=1) return s[0] if s else "" def first_non_whitespace( segments: Sequence[BaseSegment], start_idx: int = 0, ) -> Optional[tuple[str, frozenset[str]]]: """Return the upper first non-whitespace segment in the iterable.""" for i in range(start_idx, len(segments)): _segment = segments[i] if _segment.first_non_whitespace_segment_raw_upper: return ( _segment.first_non_whitespace_segment_raw_upper, _segment.class_types, ) return None def prune_options( options: Sequence[Matchable], segments: Sequence[BaseSegment], parse_context: ParseContext, start_idx: int = 0, ) -> list[Matchable]: """Use the simple matchers to prune which options to match on. Works in the context of a grammar making choices between options such as AnyOf or the content of Delimited. """ available_options = [] prune_buff = [] # Find the first code element to match against. first = first_non_whitespace(segments, start_idx=start_idx) # If we don't have an appropriate option to match against, # then we should just return immediately. Nothing will match. if not first: return list(options) first_raw, first_types = first for opt in options: simple = opt.simple(parse_context=parse_context) if simple is None: # This element is not simple, we have to do a # full match with it... available_options.append(opt) continue # Otherwise we have a simple option, so let's use # it for pruning. simple_raws, simple_types = simple matched = False # We want to know if the first meaningful element of the str_buff # matches the option, based on either simple _raw_ matching or # simple _type_ matching. # Match Raws if simple_raws and first_raw in simple_raws: # If we get here, it's matched the FIRST element of the string buffer. available_options.append(opt) matched = True # Match Types if simple_types and not matched and first_types.intersection(simple_types): # If we get here, it's matched the FIRST element of the string buffer. available_options.append(opt) matched = True if not matched: # Ditch this option, the simple match has failed prune_buff.append(opt) continue return available_options def longest_match( segments: Sequence[BaseSegment], matchers: Sequence[Matchable], idx: int, parse_context: ParseContext, ) -> tuple[MatchResult, Optional[Matchable]]: """Return longest match from a selection of matchers. Priority is: 1. The first total match, which means we've matched all available segments or that we've hit a valid terminator. 2. The longest clean match. 3. The longest unclean match. 4. An empty match. If for #2 and #3, there's a tie for the longest match, priority is given to the first in the iterable. Returns: `tuple` of (match_object, matcher). NOTE: This matching method is the workhorse of the parser. It drives the functionality of the AnyOf & AnyNumberOf grammars, and therefore by extension the degree of branching within the parser. It's performance can be monitored using the `parse_stats` object on the context. The things which determine the performance of this method are: 1. Pruning. This method uses `prune_options()` to filter down which matchable options proceed to the full matching step. Ideally only very few do and this can handle the majority of the filtering. 2. Caching. This method uses the parse cache (`check_parse_cache` and `put_parse_cache`) on the ParseContext to speed up repetitive matching operations. As we make progress through a file there will often not be a cached value already available, and so this cache has the greatest impact within poorly optimised (or highly nested) expressions. 3. Terminators. By default, _all_ the options are evaluated, and then the longest (the `best`) is returned. The exception to this is when the match is `complete` (i.e. it matches _all_ the remaining segments), or when a match is followed by a valid terminator (i.e. a segment which indicates that the match is _effectively_ complete). In these latter scenarios, the _first_ complete or terminated match is returned. In the ideal case, the only matcher which is evaluated should be the "correct" one, and then no others should be attempted. """ max_idx = len(segments) # What is the limit # No matchers or no segments? No match. if not matchers or idx == max_idx: return MatchResult.empty_at(idx), None # Prune available options, based on their simple representation for efficiency. # TODO: Given we don't allow trimming here we should be able to remove # some complexity from this function so that we just take the first segment. # Maybe that's just small potatoes though. available_options = prune_options( matchers, segments, parse_context=parse_context, start_idx=idx ) # If no available options, return no match. if not available_options: return MatchResult.empty_at(idx), None terminators = parse_context.terminators or () terminated = False # At parse time we should be able to count on there being a position marker. _cache_position = segments[idx].pos_marker assert _cache_position # Characterise this location. # Initial segment raw, loc, type and length of segment series. loc_key = ( segments[idx].raw, _cache_position.working_loc, segments[idx].get_type(), # The reason that the max_idx is part of the cache key is to # account for scenarios where the end of the segment sequence # has been trimmed and we don't want to assume we can match # things which have now been trimmed off. max_idx, ) best_match = MatchResult.empty_at(idx) best_matcher: Optional[Matchable] = None # iterate at this position across all the matchers for matcher_idx, matcher in enumerate(available_options): # Check parse cache. matcher_key = matcher.cache_key() res_match: Optional[MatchResult] = parse_context.check_parse_cache( loc_key, matcher_key ) # If cache miss, match fresh and repopulate. # NOTE: By comparing with None, "failed" matches can still be used # from cache. They a falsy, but not None. if res_match is None: # Match fresh if no cache hit res_match = matcher.match(segments, idx, parse_context) # Cache it for later to for performance. parse_context.put_parse_cache(loc_key, matcher_key, res_match) # Have we matched all available segments? if res_match and res_match.matched_slice.stop == max_idx: return res_match, matcher # Is this the best match so far? if res_match.is_better_than(best_match): best_match = res_match best_matcher = matcher # If we've got a terminator next, it's an opportunity to # end earlier, and claim an effectively "complete" match. # NOTE: This means that by specifying terminators, we can # significantly increase performance. if matcher_idx == len(available_options) - 1: # If it's the last option - no need to check terminators. # We're going to end anyway, so we can skip that step. terminated = True break elif terminators: _next_code_idx = skip_start_index_forward_to_code( segments, best_match.matched_slice.stop ) if _next_code_idx == len(segments): # We're run out of segments, we're effectively terminated. terminated = True break for terminator in terminators: terminator_match: MatchResult = terminator.match( segments, _next_code_idx, parse_context ) if terminator_match: terminated = True break if terminated: break # Return the best we found. return best_match, best_matcher def next_match( segments: Sequence[BaseSegment], idx: int, matchers: Sequence[Matchable], parse_context: ParseContext, ) -> tuple[MatchResult, Optional[Matchable]]: """Look ahead for matches beyond the first element of the segments list. NOTE: Returns *only clean* matches. This function also contains the performance improved hash-matching approach to searching for matches, which should significantly improve performance. Prioritise the first match, and if multiple match at the same point the longest. If two matches of the same length match at the same time, then it's the first in the iterable of matchers. Returns: `tuple` of (match_object, matcher). """ max_idx = len(segments) # Have we got any segments to match on? if idx >= max_idx: # No? Return empty. return MatchResult.empty_at(idx), None # This next section populates a lookup of the simple matchers. # TODO: This should really be populated on instantiation of the # host grammar. # NOTE: We keep the index of the matcher so we can prioritise # later. Mathchers themselves are obtained through direct lookup. raw_simple_map: DefaultDict[str, list[int]] = defaultdict(list) type_simple_map: DefaultDict[str, list[int]] = defaultdict(list) for _idx, matcher in enumerate(matchers): simple = matcher.simple(parse_context=parse_context) if not simple: # pragma: no cover # NOTE: For all bundled dialects, this clause is true, but until # the RegexMatcher is completely deprecated (and therefore that # `.simple()` must provide a result), it is still _possible_ # to end up here. raise NotImplementedError( "All matchers passed to `._next_match()` are " "assumed to have a functioning `.simple()` option. " "In a future release it will be compulsory for _all_ " "matchables to implement `.simple()`. Please report " "this as a bug on GitHub along with your current query " f"and dialect.\nProblematic matcher: {matcher}" ) for simple_raw in simple[0]: raw_simple_map[simple_raw].append(_idx) for simple_type in simple[1]: type_simple_map[simple_type].append(_idx) # TODO: There's an optimisation we could do here where we don't iterate # through them one by one, but we use a lookup which we pre-calculate # at the start of the whole matching process. for _idx in range(idx, max_idx): seg = segments[_idx] _matcher_idxs = [] # Raw matches first. _matcher_idxs.extend(raw_simple_map[first_trimmed_raw(seg)]) # Type matches second. _type_overlap = seg.class_types.intersection(type_simple_map.keys()) for _type in _type_overlap: _matcher_idxs.extend(type_simple_map[_type]) # If no matchers to work with, continue if not _matcher_idxs: continue # If we do have them, sort them and then do the full match. _matcher_idxs.sort() for _matcher_idx in _matcher_idxs: _matcher = matchers[_matcher_idx] _match = _matcher.match(segments, _idx, parse_context) # NOTE: We're only going to consider clean matches from this method. if _match: # This will do. Return. return _match, _matcher # If we finish the loop, we didn't find a match. Return empty. return MatchResult.empty_at(idx), None def resolve_bracket( segments: Sequence[BaseSegment], opening_match: MatchResult, opening_matcher: Matchable, start_brackets: list[Matchable], end_brackets: list[Matchable], bracket_persists: list[bool], parse_context: ParseContext, nested_match: bool = False, ) -> MatchResult: """Recursive match to resolve an opened bracket. If `nested_match` is True, then inner bracket matches are also returned as child matches. Otherwise only the outer match is returned. Returns when the opening bracket is resolved. """ assert opening_match assert opening_matcher in start_brackets type_idx = start_brackets.index(opening_matcher) matched_idx = opening_match.matched_slice.stop child_matches: tuple[MatchResult, ...] = (opening_match,) while True: # Look for the next relevant bracket. match, matcher = next_match( segments, matched_idx, matchers=start_brackets + end_brackets, parse_context=parse_context, ) # Was it a failed match? if not match: # If it was failed, then this is a problem, we started an # opening bracket but never found the end. raise SQLParseError( "Couldn't find closing bracket for opening bracket.", segment=segments[opening_match.matched_slice.start], ) # Did we find a closing bracket? if matcher in end_brackets: closing_idx = end_brackets.index(matcher) if closing_idx == type_idx: _persists = bracket_persists[type_idx] # We're closing the opening type. # Add the closing bracket match to the result as a child. child_matches += (match,) _match = MatchResult( # Slice should span from the first to the second. slice(opening_match.matched_slice.start, match.matched_slice.stop), child_matches=child_matches, insert_segments=( (opening_match.matched_slice.stop, Indent), (match.matched_slice.start, Dedent), ), ) # NOTE: This is how we exit the loop. if not _persists: return _match return _match.wrap( BracketedSegment, segment_kwargs={ # TODO: This feels a bit weird. # Could we infer it on construction? "start_bracket": (segments[opening_match.matched_slice.start],), "end_bracket": (segments[match.matched_slice.start],), }, ) # Otherwise we're closing an unexpected type. This is less good. raise SQLParseError( f"Found unexpected end bracket!, " f"was expecting {end_brackets[type_idx]}, " f"but got {matcher}", segment=segments[match.matched_slice.stop - 1], ) # Otherwise we found a new opening bracket. assert matcher in start_brackets # Recurse into a new bracket matcher. inner_match = resolve_bracket( segments, opening_match=match, opening_matcher=matcher, start_brackets=start_brackets, end_brackets=end_brackets, bracket_persists=bracket_persists, parse_context=parse_context, ) # This will either error, or only return once we're back out of the # bracket which started it. The return value will be a match result for # the inner BracketedSegment. We ignore the inner and don't return it # as we only want to mutate the outer brackets. matched_idx = inner_match.matched_slice.stop if nested_match: child_matches += (inner_match,) # Head back around the loop again to see if we can find the end... def next_ex_bracket_match( segments: Sequence[BaseSegment], idx: int, matchers: Sequence[Matchable], parse_context: ParseContext, bracket_pairs_set: str = "bracket_pairs", ) -> tuple[MatchResult, Optional[Matchable], tuple[MatchResult, ...]]: """Same as `next_match` but with bracket counting. NB: Given we depend on `next_match` we can also utilise the same performance optimisations which are implemented there. bracket_pairs_set: Allows specific segments to override the available bracket pairs. See the definition of "angle_bracket_pairs" in the BigQuery dialect for additional context on why this exists. Returns: `tuple` of (match_object, matcher, `tuple` of inner bracketed matches). """ max_idx = len(segments) # Have we got any segments to match on? if idx >= max_idx: # No? Return empty. return MatchResult.empty_at(idx), None, () # Get hold of the bracket matchers from the dialect, and append them # to the list of matchers. We get them from the relevant set on the # dialect. _, start_bracket_refs, end_bracket_refs, bracket_persists = zip( *parse_context.dialect.bracket_sets(bracket_pairs_set) ) # These are matchables, probably StringParsers. start_brackets = [ parse_context.dialect.ref(seg_ref) for seg_ref in start_bracket_refs ] end_brackets = [parse_context.dialect.ref(seg_ref) for seg_ref in end_bracket_refs] bracket_matchers = start_brackets + end_brackets _matchers = list(matchers) + bracket_matchers # Make some buffers matched_idx = idx child_matches: tuple[MatchResult, ...] = () while True: match, matcher = next_match( segments, matched_idx, _matchers, parse_context=parse_context, ) # Did we match? If so, is it a target or a bracket? if not match or matcher in matchers: # If there's either no match, or we hit a target, just pass the result. # NOTE: This method returns the same as `next_match` in a "no match" # scenario, which is why we can simplify like this. return match, matcher, child_matches # If it's a _closing_ bracket, then we also return no match. if matcher in end_brackets: # Unexpected end bracket! Return no match. return MatchResult.empty_at(idx), None, () # Otherwise we found a opening bracket before finding a target. # We now call the recursive function because there might be more # brackets inside. assert matcher, "If there's a match, there should be a matcher." # NOTE: This only returns on resolution of the opening bracket. bracket_match = resolve_bracket( segments, opening_match=match, opening_matcher=matcher, start_brackets=start_brackets, end_brackets=end_brackets, bracket_persists=cast(list[bool], bracket_persists), parse_context=parse_context, # Do keep the nested brackets in case the calling method # wants to use them. nested_match=True, ) matched_idx = bracket_match.matched_slice.stop child_matches += (bracket_match,) # Head back around the loop and keep looking. def greedy_match( segments: Sequence[BaseSegment], idx: int, parse_context: ParseContext, matchers: Sequence[Matchable], include_terminator: bool = False, nested_match: bool = False, ) -> MatchResult: """Match anything up to some defined terminator.""" working_idx = idx # NOTE: _stop_idx is always reset below after matching before reference # but mypy is unhappy unless we set a default value here. _stop_idx = idx # NOTE: child_matches is always tracked, but it will only ever have # _content_ if `nested_match` is True. It otherwise remains an empty tuple. child_matches: tuple[MatchResult, ...] = () while True: with parse_context.deeper_match(name="GreedyUntil") as ctx: match, matcher, inner_matches = next_ex_bracket_match( segments, idx=working_idx, matchers=matchers, parse_context=ctx, ) if nested_match: child_matches += inner_matches # No match? That means we've not found any terminators. if not match: # Claim everything left. return MatchResult(slice(idx, len(segments)), child_matches=child_matches) _start_idx = match.matched_slice.start _stop_idx = match.matched_slice.stop # NOTE: For some terminators we only count them if they're preceded # by whitespace, and others we don't. In principle, we aim that for # _keywords_ we require whitespace, and for symbols we don't. # We do this by looking at the `simple` method of the returned # matcher, and if it's entirely alphabetical (as defined by # str.isalpha()) then we infer that it's a keyword, and therefore # _does_ require whitespace before it. assert matcher, f"Match without matcher: {match}" _simple = matcher.simple(parse_context) assert _simple, f"Terminators require a simple method: {matcher}" _strings, _types = _simple # NOTE: Typed matchers aren't common here, but we assume that they # _don't_ require preceding whitespace. # Do we need to enforce whitespace preceding? if all(_s.isalpha() for _s in _strings) and not _types: allowable_match = False # NOTE: Edge case - if we're matching the _first_ element (i.e. that # there are no `pre` segments) then we _do_ allow it. # TODO: Review whether this is as designed, but it is consistent # with past behaviour. if _start_idx == working_idx: allowable_match = True # Work backward through previous segments looking for whitespace. for _idx in range(_start_idx, working_idx, -1): if segments[_idx - 1].is_meta: continue elif segments[_idx - 1].is_type("whitespace", "newline"): allowable_match = True break else: # Found something other than metas and whitespace. break # If this match isn't preceded by whitespace and that is # a requirement, then we can't use it. Carry on... if not allowable_match: working_idx = _stop_idx # Loop around, don't return yet continue # Otherwise, it's allowable! break # Return without any child matches or inserts. Greedy Matching # shouldn't be used for mutation. if include_terminator: return MatchResult(slice(idx, _stop_idx), child_matches=child_matches) # If we're _not_ including the terminator, we need to work back a little. # If it's preceded by any non-code, we can't claim that. # Work backwards so we don't include it. _stop_idx = skip_stop_index_backward_to_code( segments, match.matched_slice.start, idx ) # If we went all the way back to `idx`, then ignore the _stop_idx. # There isn't any code in the gap _anyway_ - so there's no point trimming. if idx == _stop_idx: # TODO: I don't really like this rule, it feels like a hack. # Review whether it should be here. return MatchResult( slice(idx, match.matched_slice.start), child_matches=child_matches ) # Otherwise return the trimmed version. return MatchResult(slice(idx, _stop_idx), child_matches=child_matches) def trim_to_terminator( segments: Sequence[BaseSegment], idx: int, terminators: Sequence[Matchable], parse_context: ParseContext, ) -> int: """Trim forward segments based on terminators. Given a forward set of segments, trim elements from `segments` to `tail` by using a `greedy_match()` to identify terminators. If no terminators are found, no change is made. NOTE: This method is designed replace a `max_idx`: .. code-block:: python max_idx = _trim_to_terminator(segments[:max_idx], idx, ...) """ # Is there anything left to match on. if idx >= len(segments): # Nope. No need to trim. return len(segments) # NOTE: If there is a terminator _immediately_, then greedy # match will appear to not match (because there's "nothing" before # the terminator). To resolve that case, we first match immediately # on the terminators and handle that case explicitly if it occurs. with parse_context.deeper_match(name="Trim-GreedyA-@0") as ctx: pruned_terms = prune_options( terminators, segments, start_idx=idx, parse_context=ctx ) for term in pruned_terms: if term.match(segments, idx, ctx): # One matched immediately. Claim everything to the tail. return idx # If the above case didn't match then we proceed as expected. with parse_context.deeper_match( name="Trim-GreedyB-@0", track_progress=False ) as ctx: term_match = greedy_match( segments, idx, parse_context=ctx, matchers=terminators, ) # Greedy match always returns. # Skip backward from wherever it got to (either a terminator, or # the end of the sequence). return skip_stop_index_backward_to_code( segments, term_match.matched_slice.stop, idx ) sqlfluff-3.4.2/src/sqlfluff/core/parser/match_result.py000066400000000000000000000266761503426445100232700ustar00rootroot00000000000000"""Source for the MatchResult class. This should be the default response from any `match` method. """ from collections import defaultdict from collections.abc import Sequence from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, DefaultDict, Optional, Union from sqlfluff.core.helpers.slice import slice_length from sqlfluff.core.parser.markers import PositionMarker if TYPE_CHECKING: # pragma: no cover from sqlfluff.core.parser.segments import BaseSegment, MetaSegment def _get_point_pos_at_idx( segments: Sequence["BaseSegment"], idx: int ) -> PositionMarker: if idx < len(segments): _next_pos = segments[idx].pos_marker assert _next_pos, "Segments passed to .apply() should all have position." return _next_pos.start_point_marker() else: _prev_pos = segments[idx - 1].pos_marker assert _prev_pos, "Segments passed to .apply() should all have position." return _prev_pos.end_point_marker() @dataclass(frozen=True) class MatchResult: """This should be the default response from any `match` method. All references and indices are in reference to a single root tuple of segments. This result contains enough information to actually create the nested tree structure, but shouldn't actually contain any new segments itself. That means keeping information about: 1. Ranges of segments which should be included segments to be created. 2. References to the segment classes which we would create. 3. Information about any _new_ segments to add in the process, such as MetaSegment classes. Given the segments aren't yet "nested", the structure of this result *will* need to be nested, ideally self nested. In the case of finding unparsable locations, we should return the "best" result, referencing the furthest that we got. That allows us to identify those parsing issues and create UnparsableSegment classes later. """ # Slice in the reference tuple matched_slice: slice # Reference to the kind of segment to create. # NOTE: If this is null, it means we've matched a sequence of segments # but not yet created a container to put them in. matched_class: Optional[type["BaseSegment"]] = None # kwargs to pass to the segment on creation. segment_kwargs: dict[str, Any] = field(default_factory=dict) # Types and indices to add in new segments (they'll be meta segments) insert_segments: tuple[tuple[int, type["MetaSegment"]], ...] = field( default_factory=tuple ) # Child segment matches (this is the recursive bit) child_matches: tuple["MatchResult", ...] = field(default_factory=tuple) def __post_init__(self) -> None: """Do some lightweight validation post instantiation.""" if not slice_length(self.matched_slice): # Zero length matches with inserts are allowed, but not with # matched_class or child_matches. assert not self.matched_class, ( "Tried to create zero length MatchResult with " "`matched_class`. This MatchResult is invalid. " f"{self.matched_class} @{self.matched_slice}" ) assert not self.child_matches, ( "Tried to create zero length MatchResult with " "`child_matches`. Is this allowed?! " f"Result: {self}" ) def __len__(self) -> int: return slice_length(self.matched_slice) def __bool__(self) -> bool: """A MatchResult is truthy if it has length or inserts.""" return len(self) > 0 or bool(self.insert_segments) def stringify(self, indent: str = "") -> str: """Pretty print a match for debugging.""" prefix = f"Match ({self.matched_class}): {self.matched_slice}" buffer = prefix for key, value in self.segment_kwargs.items(): buffer += f"\n {indent}-{key}: {value!r}" if self.insert_segments: for idx, insert in self.insert_segments: buffer += f"\n {indent}+{idx}: {insert}" if self.child_matches: for child in self.child_matches: buffer += f"\n {indent}+{child.stringify(indent + ' ')}" return buffer @classmethod def empty_at(cls, idx: int) -> "MatchResult": """Create an empty match at a particular index.""" return cls(slice(idx, idx)) def is_better_than(self, other: "MatchResult") -> bool: """A match is better compared on length.""" return len(self) > len(other) def append( self, other: "MatchResult", insert_segments: tuple[tuple[int, type["MetaSegment"]], ...] = (), ) -> "MatchResult": """Combine another subsequent match onto this one. NOTE: Because MatchResult is frozen, this returns a new match. """ # If the current match is empty, just return the other. if not len(self) and not self.insert_segments: return other # If the same is true of the other, just return self. if not len(other) and not other.insert_segments: return self # pragma: no cover # Otherwise the two must follow each other. # NOTE: A gap is allowed, but is assumed to be included in the # match. assert self.matched_slice.stop <= other.matched_slice.start new_slice = slice(self.matched_slice.start, other.matched_slice.stop) child_matches: tuple[MatchResult, ...] = () for match in (self, other): # If it's got a matched class, add it as a child. if match.matched_class: child_matches += (match,) # Otherwise incorporate else: # Note: We're appending to the optional insert segments # provided in the kwargs. insert_segments += match.insert_segments child_matches += match.child_matches return MatchResult( new_slice, insert_segments=insert_segments, child_matches=child_matches, ) def wrap( self, outer_class: type["BaseSegment"], insert_segments: tuple[tuple[int, type["MetaSegment"]], ...] = (), segment_kwargs: dict[str, Any] = {}, ) -> "MatchResult": """Wrap this result with an outer class. NOTE: Because MatchResult is frozen, this returns a new match. """ # If it's a failed (empty) match, then just pass straight # through. It's not valid to add a matched class to an empty # result. if not slice_length(self.matched_slice) and not self.insert_segments: assert not insert_segments, "Cannot wrap inserts onto an empty match." return self child_matches: tuple[MatchResult, ...] if self.matched_class: # If the match already has a class, then make # the current one and child match and clear the # other buffers. child_matches = (self,) else: # Otherwise flatten the existing match into # the new one. insert_segments = self.insert_segments + insert_segments child_matches = self.child_matches # Otherwise flatten the content return MatchResult( self.matched_slice, matched_class=outer_class, segment_kwargs=segment_kwargs, insert_segments=insert_segments, child_matches=child_matches, ) def apply(self, segments: tuple["BaseSegment", ...]) -> tuple["BaseSegment", ...]: """Actually this match to segments to instantiate. This turns a theoretical match into a nested structure of segments. We handle child segments _first_ so that we can then include them when creating the parent. That means sequentially working through the children and any inserts. If there are overlaps, then we have a problem, and we should abort. """ result_segments: tuple["BaseSegment", ...] = () if not slice_length(self.matched_slice): assert not self.matched_class, ( "Tried to apply zero length MatchResult with " "`matched_class`. This MatchResult is invalid. " f"{self.matched_class} @{self.matched_slice}" ) assert not self.child_matches, ( "Tried to apply zero length MatchResult with " "`child_matches`. This MatchResult is invalid. " f"Result: {self}" ) if self.insert_segments: assert segments, "Cannot insert segments without reference position." for idx, seg in self.insert_segments: assert idx == self.matched_slice.start, ( f"Tried to insert @{idx} outside of matched " f"slice {self.matched_slice}" ) _pos = _get_point_pos_at_idx(segments, idx) result_segments += (seg(pos_marker=_pos),) return result_segments assert len(segments) >= self.matched_slice.stop, ( f"Matched slice ({self.matched_slice}) sits outside segment " f"bounds: {len(segments)}" ) # Which are the locations we need to care about? trigger_locs: DefaultDict[ int, list[Union[MatchResult, type["MetaSegment"]]] ] = defaultdict(list) # Add the inserts first... for insert in self.insert_segments: trigger_locs[insert[0]].append(insert[1]) # ...and then the matches for match in self.child_matches: trigger_locs[match.matched_slice.start].append(match) # Then work through creating any subsegments. max_idx = self.matched_slice.start for idx in sorted(trigger_locs.keys()): # Have we passed any untouched segments? if idx > max_idx: # If so, add them in unchanged. result_segments += segments[max_idx:idx] max_idx = idx elif idx < max_idx: # pragma: no cover raise ValueError( "Segment skip ahead error. An outer match contains " "overlapping child matches. This MatchResult was " "wrongly constructed." ) # Then work through each of the triggers. for trigger in trigger_locs[idx]: # If it's a match, apply it. if isinstance(trigger, MatchResult): result_segments += trigger.apply(segments=segments) # Update the end slice. max_idx = trigger.matched_slice.stop continue # Otherwise it's a segment. # Get the location from the next segment unless there isn't one. _pos = _get_point_pos_at_idx(segments, idx) result_segments += (trigger(pos_marker=_pos),) # If we finish working through the triggers and there's # still something left, then add that too. if max_idx < self.matched_slice.stop: result_segments += segments[max_idx : self.matched_slice.stop] if not self.matched_class: return result_segments # Otherwise construct the subsegment new_seg: "BaseSegment" = self.matched_class.from_result_segments( result_segments, self.segment_kwargs ) return (new_seg,) sqlfluff-3.4.2/src/sqlfluff/core/parser/matchable.py000066400000000000000000000050531503426445100225000ustar00rootroot00000000000000"""The definition of a matchable interface.""" import copy from abc import ABC, abstractmethod from collections.abc import Sequence from typing import TYPE_CHECKING, Any, Optional, TypeVar if TYPE_CHECKING: # pragma: no cover from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.match_result import MatchResult from sqlfluff.core.parser.segments import BaseSegment T = TypeVar("T", bound="Matchable") class Matchable(ABC): """A base object defining the matching interface.""" # Matchables are also not meta unless otherwise defined is_meta = False @abstractmethod def is_optional(self) -> bool: """Return whether this element is optional.""" @abstractmethod def simple( self, parse_context: "ParseContext", crumbs: Optional[tuple[str, ...]] = None ) -> Optional[tuple[frozenset[str], frozenset[str]]]: """Try to obtain a simple response from the matcher. Returns: None - if not simple. Tuple of two sets of strings if simple. The first is a set of uppercase raw strings which would match. The second is a set of segment types that would match. NOTE: the crumbs kwarg is designed to be used by Ref to detect recursion. """ @abstractmethod def match( self, segments: Sequence["BaseSegment"], idx: int, parse_context: "ParseContext", ) -> "MatchResult": """Match against this matcher.""" def copy(self: T, **kwargs: Any) -> T: # pragma: no cover """Copy this Matchable. Matchable objects are usually copied during dialect inheritance. One dialect might make a copy (usually with some modifications) to a dialect element of a parent dialect which it can then use itself. This provides a little more modularity in dialect definition. NOTE: This method on the base class is not usually used, as the base matchable doesn't have any options for customisation. It is more frequently used by grammar objects such as Sequence, which provide more options for customisation. Those grammar objects should redefine this method accordingly. """ return copy.copy(self) @abstractmethod def cache_key(self) -> str: """A string to use for cache keying. This string should be unique at the parsing stage such that if there has already been a match against this key for a set of segments, that we can reuse that match. """ sqlfluff-3.4.2/src/sqlfluff/core/parser/parser.py000066400000000000000000000063531503426445100220600ustar00rootroot00000000000000"""Defines the Parser class.""" from collections.abc import Sequence from typing import TYPE_CHECKING, Optional from sqlfluff.core.config import FluffConfig from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.helpers import check_still_complete if TYPE_CHECKING: # pragma: no cover from sqlfluff.core.parser.segments import BaseFileSegment, BaseSegment class Parser: """Instantiates parsed queries from a sequence of lexed raw segments.""" def __init__( self, config: Optional[FluffConfig] = None, dialect: Optional[str] = None ): if config and dialect: raise ValueError( # pragma: no cover "Parser does not support setting both `config` and `dialect`." ) # Use the provided config or create one from the dialect. self.config = config or FluffConfig.from_kwargs(dialect=dialect) self.RootSegment: type[BaseFileSegment] = self.config.get( "dialect_obj" ).get_root_segment() def parse( self, segments: Sequence["BaseSegment"], fname: Optional[str] = None, parse_statistics: bool = False, ) -> Optional["BaseSegment"]: """Parse a series of lexed tokens using the current dialect.""" if not segments: # pragma: no cover # This should normally never happen because there will usually # be an end_of_file segment. It would probably only happen in # api use cases. return None # NOTE: This is the only time we use the parse context not in the # context of a context manager. That's because it's the initial # instantiation. ctx = ParseContext.from_config(config=self.config) # Kick off parsing with the root segment. The BaseFileSegment has # a unique entry point to facilitate exactly this. All other segments # will use the standard .match() route. root = self.RootSegment.root_parse( tuple(segments), fname=fname, parse_context=ctx ) # Basic Validation, that we haven't dropped anything. check_still_complete(tuple(segments), (root,), ()) if parse_statistics: # pragma: no cover # NOTE: We use ctx.logger.warning here to output the statistics. # It's not particularly beautiful, but for the users who do utilise # this functionality, I don't think they mind. ¯\_(ツ)_/¯ # In the future, this clause might become unnecessary. ctx.logger.warning("==== Parse Statistics ====") for key in ctx.parse_stats: if key == "next_counts": continue ctx.logger.warning(f"{key}: {ctx.parse_stats[key]}") ctx.logger.warning("## Tokens following un-terminated matches") ctx.logger.warning( "Adding terminator clauses to catch these may improve performance." ) for key, val in sorted( ctx.parse_stats["next_counts"].items(), reverse=True, key=lambda item: item[1], ): ctx.logger.warning(f"{val}: {key!r}") ctx.logger.warning("==== End Parse Statistics ====") return root sqlfluff-3.4.2/src/sqlfluff/core/parser/parsers.py000066400000000000000000000265371503426445100222510ustar00rootroot00000000000000"""Individual segment parsers. Matchable objects which return individual segments. """ from abc import abstractmethod from collections.abc import Collection, Sequence from typing import Any, Callable, Optional from uuid import uuid4 import regex from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.match_result import MatchResult from sqlfluff.core.parser.matchable import Matchable from sqlfluff.core.parser.segments import BaseSegment, RawSegment from sqlfluff.core.parser.types import SimpleHintType class BaseParser(Matchable): """An abstract class from which other Parsers should inherit.""" # Meta segments are handled separately. All Parser elements # are assumed to be not meta. is_meta: bool = False @abstractmethod def __init__( self, raw_class: type[RawSegment], type: Optional[str] = None, optional: bool = False, # The following kwargs are passed on to the segment: trim_chars: Optional[tuple[str, ...]] = None, casefold: Optional[Callable[[str], str]] = None, ) -> None: self.raw_class = raw_class # Store instance_types rather than just type to allow # for multiple possible types to be supported in derivative # classes. self._instance_types: tuple[str, ...] = (type or raw_class.type,) self.optional = optional self._trim_chars = trim_chars self.casefold = casefold # Generate a cache key self._cache_key = uuid4().hex def cache_key(self) -> str: """Get the cache key for this parser. For parsers, they're unique per-instance. """ return self._cache_key def is_optional(self) -> bool: """Return whether this element is optional.""" return self.optional def segment_kwargs(self) -> dict[str, Any]: """Generates the segment_kwargs package for generating a matched segment.""" segment_kwargs: dict[str, Any] = {} if self._instance_types: segment_kwargs["instance_types"] = self._instance_types if self._trim_chars: segment_kwargs["trim_chars"] = self._trim_chars if self.casefold: segment_kwargs["casefold"] = self.casefold return segment_kwargs def _match_at(self, idx: int) -> MatchResult: """Construct a MatchResult at a given index. This is a helper function for reuse by other parsers. """ return MatchResult( matched_slice=slice(idx, idx + 1), matched_class=self.raw_class, segment_kwargs=self.segment_kwargs(), ) class TypedParser(BaseParser): """An object which matches and returns raw segments based on types.""" def __init__( self, template: str, raw_class: type[RawSegment], type: Optional[str] = None, optional: bool = False, trim_chars: Optional[tuple[str, ...]] = None, casefold: Optional[Callable[[str], str]] = None, ) -> None: """Initialize a new instance of the class. Args: template (str): The template type. raw_class (type[RawSegment]): The raw segment class. type (Optional[str]): The type of the instance. optional (bool): Whether the instance is optional. trim_chars (Optional[tuple[str, ...]]): The characters to trim. casefold: (Optional[Callable[[str],str]]): The default casing used. Returns: None """ # NB: the template in this case is the _target_ type. # The type kwarg is the eventual type. self.template = template # Pre-calculate the appropriate frozenset for matching later. self._target_types = frozenset((template,)) super().__init__( raw_class=raw_class, optional=optional, trim_chars=trim_chars, casefold=casefold, ) # NOTE: We override the instance types after initialising the base # class. We want to ensure that re-matching is possible by ensuring that # the `type` pre-matching is still present post-match even if it's not # part of the natural type hierarchy for the new `raw_class`. # The new `type` becomes the "primary" type, but the template will still # be part of the resulting `class_types`. # We do this here rather than in the base class to keep the dialect-facing # API the same. self._instance_types: tuple[str, ...] = () # Primary type if set. if type is not None: self._instance_types += (type,) # New root types if type != raw_class.type: self._instance_types += (raw_class.type,) # Template type (if it's not in the subclasses of the raw_class). if not raw_class.class_is_type(template): self._instance_types += (template,) def __repr__(self) -> str: """Return a string representation of the TypedParser object.""" return f"" def simple( self, parse_context: ParseContext, crumbs: Optional[tuple[str, ...]] = None ) -> SimpleHintType: """Check if the matcher supports uppercase hash matching route. The TypedParser segment does not support matching against raw strings, but it does support matching against types. Matching is done against both the template and the resulting type, to support re-matching. Args: parse_context (ParseContext): The parse context. crumbs (Optional[tuple[str, ...]], optional): The crumbs. Defaults to None. Returns: SimpleHintType: A set of target types. """ return frozenset(), self._target_types def match( self, segments: Sequence["BaseSegment"], idx: int, parse_context: "ParseContext", ) -> MatchResult: """Match against this matcher.""" if segments[idx].is_type(self.template): return self._match_at(idx) return MatchResult.empty_at(idx) class StringParser(BaseParser): """An object which matches and returns raw segments based on strings.""" def __init__( self, template: str, raw_class: type[RawSegment], type: Optional[str] = None, optional: bool = False, trim_chars: Optional[tuple[str, ...]] = None, casefold: Optional[Callable[[str], str]] = None, ): self.template = template.upper() # Create list version upfront to avoid recreating it multiple times. self._simple = frozenset((self.template,)) super().__init__( raw_class=raw_class, type=type, optional=optional, trim_chars=trim_chars, casefold=casefold, ) def __repr__(self) -> str: return f"" def simple( self, parse_context: "ParseContext", crumbs: Optional[tuple[str, ...]] = None ) -> SimpleHintType: """Return simple options for this matcher. Because string matchers are not case sensitive we can just return the template here. """ return self._simple, frozenset() def match( self, segments: Sequence["BaseSegment"], idx: int, parse_context: "ParseContext", ) -> MatchResult: """Match against this matcher. NOTE: We check that the segment is also code to avoid matching unexpected comments. """ if segments[idx].raw_upper == self.template and segments[idx].is_code: return self._match_at(idx) return MatchResult.empty_at(idx) class MultiStringParser(BaseParser): """An object which matches and returns raw segments on a collection of strings.""" def __init__( self, templates: Collection[str], raw_class: type[RawSegment], type: Optional[str] = None, optional: bool = False, trim_chars: Optional[tuple[str, ...]] = None, casefold: Optional[Callable[[str], str]] = None, ): self.templates = {template.upper() for template in templates} # Create list version upfront to avoid recreating it multiple times. self._simple = frozenset(self.templates) super().__init__( raw_class=raw_class, type=type, optional=optional, trim_chars=trim_chars, casefold=casefold, ) def __repr__(self) -> str: return f"" def simple( self, parse_context: "ParseContext", crumbs: Optional[tuple[str, ...]] = None ) -> SimpleHintType: """Return simple options for this matcher. Because string matchers are not case sensitive we can just return the templates here. """ return self._simple, frozenset() def match( self, segments: Sequence["BaseSegment"], idx: int, parse_context: "ParseContext", ) -> MatchResult: """Match against this matcher. NOTE: We check that the segment is also code to avoid matching unexpected comments. """ if segments[idx].is_code and segments[idx].raw_upper in self.templates: return self._match_at(idx) return MatchResult.empty_at(idx) class RegexParser(BaseParser): """An object which matches and returns raw segments based on a regex.""" def __init__( self, template: str, raw_class: type[RawSegment], type: Optional[str] = None, optional: bool = False, anti_template: Optional[str] = None, trim_chars: Optional[tuple[str, ...]] = None, casefold: Optional[Callable[[str], str]] = None, ): # Store the optional anti-template self.template = template self.anti_template = anti_template # Compile regexes upfront to avoid repeated overhead self._anti_template = regex.compile(anti_template or r"", regex.IGNORECASE) self._template = regex.compile(template, regex.IGNORECASE) super().__init__( raw_class=raw_class, type=type, optional=optional, trim_chars=trim_chars, casefold=casefold, ) def __repr__(self) -> str: return f"" def simple( self, parse_context: ParseContext, crumbs: Optional[tuple[str, ...]] = None ) -> None: """Does this matcher support a uppercase hash matching route? Regex segment does NOT for now. We might need to later for efficiency. """ return None def match( self, segments: Sequence["BaseSegment"], idx: int, parse_context: "ParseContext", ) -> MatchResult: """Match against this matcher. NOTE: This method uses .raw_upper and so case sensitivity is not supported. """ _raw = segments[idx].raw_upper result = self._template.match(_raw) if result: result_string = result.group(0) # Check that we've fully matched if result_string == _raw: # Check that the anti_template (if set) hasn't also matched if not self.anti_template or not self._anti_template.match(_raw): return self._match_at(idx) return MatchResult.empty_at(idx) sqlfluff-3.4.2/src/sqlfluff/core/parser/segments/000077500000000000000000000000001503426445100220305ustar00rootroot00000000000000sqlfluff-3.4.2/src/sqlfluff/core/parser/segments/__init__.py000066400000000000000000000032631503426445100241450ustar00rootroot00000000000000"""Definitions of the segment classes.""" from sqlfluff.core.parser.segments.base import ( BaseSegment, SourceFix, UnparsableSegment, ) from sqlfluff.core.parser.segments.bracketed import BracketedSegment from sqlfluff.core.parser.segments.common import ( BinaryOperatorSegment, CodeSegment, CommentSegment, ComparisonOperatorSegment, CompositeBinaryOperatorSegment, CompositeComparisonOperatorSegment, IdentifierSegment, LiteralSegment, NewlineSegment, SymbolSegment, UnlexableSegment, WhitespaceSegment, WordSegment, ) from sqlfluff.core.parser.segments.file import BaseFileSegment from sqlfluff.core.parser.segments.generator import SegmentGenerator from sqlfluff.core.parser.segments.keyword import KeywordSegment, LiteralKeywordSegment from sqlfluff.core.parser.segments.meta import ( Dedent, EndOfFile, ImplicitIndent, Indent, MetaSegment, TemplateLoop, TemplateSegment, ) from sqlfluff.core.parser.segments.raw import RawSegment __all__ = ( "BaseSegment", "BaseFileSegment", "UnparsableSegment", "BracketedSegment", "SegmentGenerator", "RawSegment", "CodeSegment", "UnlexableSegment", "CommentSegment", "WhitespaceSegment", "NewlineSegment", "KeywordSegment", "LiteralKeywordSegment", "SymbolSegment", "MetaSegment", "Indent", "Dedent", "ImplicitIndent", "TemplateSegment", "EndOfFile", "TemplateLoop", "SourceFix", "IdentifierSegment", "LiteralSegment", "BinaryOperatorSegment", "CompositeBinaryOperatorSegment", "ComparisonOperatorSegment", "CompositeComparisonOperatorSegment", "WordSegment", ) sqlfluff-3.4.2/src/sqlfluff/core/parser/segments/base.py000066400000000000000000001403701503426445100233210ustar00rootroot00000000000000"""Base segment definitions. Here we define: - BaseSegment. This is the root class for all segments, and is designed to hold other subsegments. - UnparsableSegment. A special wrapper to indicate that the parse function failed on this block of segments and to prevent further analysis. """ # Import annotations for py 3.7 to allow `weakref.Referencetype["BaseSegment"]` from __future__ import annotations import logging import weakref from collections.abc import Iterator, Sequence from dataclasses import dataclass from functools import cached_property from io import StringIO from itertools import chain from typing import ( TYPE_CHECKING, Any, Callable, ClassVar, Optional, Union, cast, ) from uuid import uuid4 from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.helpers import trim_non_code_segments from sqlfluff.core.parser.markers import PositionMarker from sqlfluff.core.parser.match_result import MatchResult from sqlfluff.core.parser.matchable import Matchable from sqlfluff.core.parser.types import SimpleHintType if TYPE_CHECKING: # pragma: no cover from sqlfluff.core.dialects import Dialect from sqlfluff.core.parser.segments.raw import RawSegment # Instantiate the linter logger (only for use in methods involved with fixing.) linter_logger = logging.getLogger("sqlfluff.linter") TupleSerialisedSegment = tuple[str, Union[str, tuple["TupleSerialisedSegment", ...]]] RecordSerialisedSegment = dict[ str, Union[None, str, "RecordSerialisedSegment", list["RecordSerialisedSegment"]] ] @dataclass(frozen=True) class SourceFix: """A stored reference to a fix in the non-templated file.""" edit: str source_slice: slice # TODO: It might be possible to refactor this to not require # a templated_slice (because in theory it's unnecessary). # However much of the fix handling code assumes we need # a position in the templated file to interpret it. # More work required to achieve that if desired. templated_slice: slice def __hash__(self) -> int: # Only hash based on the source slice, not the # templated slice (which might change) return hash((self.edit, self.source_slice.start, self.source_slice.stop)) @dataclass(frozen=True) class PathStep: """An element of the response to BaseSegment.path_to(). Attributes: segment (:obj:`BaseSegment`): The segment in the chain. idx (int): The index of the target within its `segment`. len (int): The number of children `segment` has. code_idxs (:obj:`tuple` of int): The indices which contain code. """ segment: BaseSegment idx: int len: int code_idxs: tuple[int, ...] def _iter_base_types( new_type: Optional[str], bases: tuple[type[BaseSegment]] ) -> Iterator[str]: """Iterate types for a new segment class. This is a helper method used within in the construction of SegmentMetaclass so that we can construct a frozenset directly off the results. """ if new_type is not None: yield new_type for base in bases: yield from base._class_types class SegmentMetaclass(type, Matchable): """The metaclass for segments. This metaclass provides pre-computed class attributes based on the defined attributes of specific classes. Segments as a *type* should also implement the Matchable interface too. Once instantiated they no longer need to but we should be able to treat the BaseSegment class as a Matchable interface. """ def __new__( mcs: type[type], name: str, bases: tuple[type[BaseSegment]], class_dict: dict[str, Any], ) -> SegmentMetaclass: """Generate a new class. We use the `type` class attribute for the class and it's parent base classes to build up a `set` of types on construction to use in type checking later in the process. Doing it on construction here saves calculating it at runtime for each instance of the class. """ # Create a cache uuid on definition. # We do it here so every _definition_ of a segment # gets a unique UUID regardless of dialect. class_dict["_cache_key"] = uuid4().hex # Populate the `_class_types` property on creation. added_type = class_dict.get("type", None) class_dict["_class_types"] = frozenset(_iter_base_types(added_type, bases)) return cast(type["BaseSegment"], type.__new__(mcs, name, bases, class_dict)) class BaseSegment(metaclass=SegmentMetaclass): """The base segment element. This defines the base element which drives both Lexing, Parsing and Linting. A large chunk of the logic which defines those three operations are centered here. Much of what is defined in the BaseSegment is also used by its many subclasses rather than directly here. For clarity, the `BaseSegment` is mostly centered around a segment which contains other subsegments. For segments which don't have *children*, refer to the `RawSegment` class (which still inherits from this one). Segments are used both as instances to hold chunks of text, but also as classes themselves where they function a lot like grammars, and return instances of themselves when they match. The many classmethods in this class are usually to serve their purpose as a matcher. """ # `type` should be the *category* of this kind of segment type: ClassVar[str] = "base" _class_types: ClassVar[frozenset[str]] # NOTE: Set by SegmentMetaclass # We define the type here but no value. Subclasses must provide a value. match_grammar: Matchable comment_separate = False is_meta = False # Are we able to have non-code at the start or end? can_start_end_non_code = False # Can we allow it to be empty? Usually used in combination # with the can_start_end_non_code. allow_empty = False # What other kwargs need to be copied when applying fixes. additional_kwargs: list[str] = [] pos_marker: Optional[PositionMarker] # NOTE: Cache key is generated by the SegmentMetaclass _cache_key: str # _preface_modifier used in ._preface() _preface_modifier: str = "" # Optional reference to the parent. Stored as a weakref. _parent: Optional[weakref.ReferenceType[BaseSegment]] = None _parent_idx: Optional[int] = None def __init__( self, segments: tuple[BaseSegment, ...], pos_marker: Optional[PositionMarker] = None, uuid: Optional[int] = None, ) -> None: if len(segments) == 0: # pragma: no cover raise RuntimeError( "Setting {} with a zero length segment set. This shouldn't " "happen.".format(self.__class__) ) if not pos_marker: # If no pos given, work it out from the children. if all(seg.pos_marker for seg in segments): pos_marker = PositionMarker.from_child_markers( *(seg.pos_marker for seg in segments) ) assert not hasattr(self, "parse_grammar"), "parse_grammar is deprecated." self.pos_marker = pos_marker self.segments: tuple[BaseSegment, ...] = segments # Tracker for matching when things start moving. # NOTE: We're storing the .int attribute so that it's swifter # for comparisons. self.uuid = uuid or uuid4().int self.set_as_parent(recurse=False) self.validate_non_code_ends() self._recalculate_caches() def __setattr__(self, key: str, value: Any) -> None: try: if key == "segments": self._recalculate_caches() except (AttributeError, KeyError): # pragma: no cover pass super().__setattr__(key, value) def __eq__(self, other: Any) -> bool: # NB: this should also work for RawSegment if not isinstance(other, BaseSegment): return False # pragma: no cover # If the uuids match, then we can easily return early. if self.uuid == other.uuid: return True return ( # Same class NAME. (could be constructed elsewhere) self.__class__.__name__ == other.__class__.__name__ and (self.raw == other.raw) # Both must have a non-null position marker to compare. and self.pos_marker is not None and other.pos_marker is not None # We only match that the *start* is the same. This means we can # still effectively construct searches look for segments. # This is important for .apply_fixes(). # NOTE: `.working_loc` is much more performant than creating # a new start point marker for comparison. and (self.pos_marker.working_loc == other.pos_marker.working_loc) ) @cached_property def _hash(self) -> int: """Cache the hash property to avoid recalculating it often.""" return hash( ( self.__class__.__name__, self.raw, # NOTE: We use the start of the source slice because it's # the lowest cost way of getting a reliable location in the source # file for deduplication. self.pos_marker.source_slice.start if self.pos_marker else None, ) ) def __hash__(self) -> int: return self._hash def __repr__(self) -> str: return f"<{self.__class__.__name__}: ({self.pos_marker})>" def __getstate__(self) -> dict[str, Any]: """Get the current state to allow pickling.""" s = self.__dict__.copy() # Kill the parent ref. It won't pickle well. s["_parent"] = None return s def __setstate__(self, state: dict[str, Any]) -> None: """Set state during process of unpickling.""" self.__dict__ = state.copy() # Once state is ingested - repopulate, NOT recursing. # Child segments will do it for themselves on unpickling. self.set_as_parent(recurse=False) # ################ PRIVATE PROPERTIES @property def _comments(self) -> list[BaseSegment]: """Returns only the comment elements of this segment.""" return [seg for seg in self.segments if seg.is_type("comment")] @property def _non_comments(self) -> list[BaseSegment]: # pragma: no cover TODO? """Returns only the non-comment elements of this segment.""" return [seg for seg in self.segments if not seg.is_type("comment")] # ################ PUBLIC PROPERTIES @cached_property def is_code(self) -> bool: """Return True if this segment contains any code.""" return any(seg.is_code for seg in self.segments) @cached_property def _code_indices(self) -> tuple[int, ...]: """The indices of code elements. This is used in the path_to algorithm for tree traversal. """ return tuple(idx for idx, seg in enumerate(self.segments) if seg.is_code) @cached_property def is_comment(self) -> bool: # pragma: no cover TODO? """Return True if this is entirely made of comments.""" return all(seg.is_comment for seg in self.segments) @cached_property def is_whitespace(self) -> bool: """Return True if this segment is entirely whitespace.""" return all(seg.is_whitespace for seg in self.segments) @cached_property def raw(self) -> str: """Make a string from the segments of this segment.""" return "".join(seg.raw for seg in self.segments) @property def class_types(self) -> frozenset[str]: """The set of types for this segment.""" # NOTE: This version is simple, but some dependent classes # (notably RawSegment) override this with something more # custom. return self._class_types @cached_property def descendant_type_set(self) -> frozenset[str]: """The set of all contained types. This is used for rule crawling. NOTE: Does not include the types of the parent segment itself. """ return frozenset( chain.from_iterable( seg.descendant_type_set | seg.class_types for seg in self.segments ) ) @cached_property def direct_descendant_type_set(self) -> set[str]: """The set of all directly child types. This is used for rule crawling. NOTE: Does not include the types of the parent segment itself. """ return set(chain.from_iterable(seg.class_types for seg in self.segments)) @cached_property def raw_upper(self) -> str: """Make an uppercase string from the segments of this segment.""" return self.raw.upper() @cached_property def raw_segments(self) -> list[RawSegment]: """Returns a list of raw segments in this segment.""" return self.get_raw_segments() @cached_property def raw_segments_with_ancestors( self, ) -> list[tuple[RawSegment, list[PathStep]]]: """Returns a list of raw segments in this segment with the ancestors.""" buffer = [] for idx, seg in enumerate(self.segments): # If it's a raw, yield it with this segment as the parent new_step = [PathStep(self, idx, len(self.segments), self._code_indices)] if seg.is_type("raw"): buffer.append((cast("RawSegment", seg), new_step)) # If it's not, recurse - prepending self to the ancestor stack else: buffer.extend( [ (raw_seg, new_step + stack) for raw_seg, stack in seg.raw_segments_with_ancestors ] ) return buffer @cached_property def source_fixes(self) -> list[SourceFix]: """Return any source fixes as list.""" return list(chain.from_iterable(s.source_fixes for s in self.segments)) @cached_property def first_non_whitespace_segment_raw_upper(self) -> Optional[str]: """Returns the first non-whitespace subsegment of this segment.""" for seg in self.raw_segments: if seg.raw_upper.strip(): return seg.raw_upper return None # return [seg.raw_upper for seg in self.raw_segments] @cached_property def is_templated(self) -> bool: """Returns True if the segment includes any templated code. This is a simple, very efficient check that doesn't require looking up the RawFileSlices for the segment. NOTE: A segment returning a True result may still have some literal code as well (i.e. a mixture of literal and templated). """ # We check two things: # * Source slice not empty: If it's empty, this means it doesn't appear # in the source, e.g. because it is new code generated by a lint fix. # Return False for these. # * It's not a literal slice. If it's a literal and has size then it's # not templated. assert self.pos_marker return ( self.pos_marker.source_slice.start != self.pos_marker.source_slice.stop and not self.pos_marker.is_literal() ) # ################ STATIC METHODS def _suffix(self) -> str: """Return any extra output required at the end when logging. NB Override this for specific subclasses if we want extra output. """ return "" @classmethod def _position_segments( cls, segments: tuple[BaseSegment, ...], parent_pos: PositionMarker, ) -> tuple[BaseSegment, ...]: """Refresh positions of segments within a span. This does two things: - Assign positions to any segments without them. - Updates the working line_no and line_pos for all segments during fixing. New segments are assumed to be metas or insertions and so therefore have a zero-length position in the source and templated file. """ assert segments, "_position_segments called on empty sequence." line_no = parent_pos.working_line_no line_pos = parent_pos.working_line_pos # Use the index so that we can look forward # and backward. segment_buffer: tuple[BaseSegment, ...] = () for idx, segment in enumerate(segments): # Get hold of the current position. old_position = segment.pos_marker new_position = segment.pos_marker # Fill any that don't have a position. if not old_position: # Can we get a position from the previous? start_point = None if idx > 0: prev_seg = segment_buffer[idx - 1] # Given we're going back in the buffer we should # have set the position marker for everything already # in there. This is mostly a hint to mypy. assert prev_seg.pos_marker start_point = prev_seg.pos_marker.end_point_marker() # Can we get it from the parent? elif parent_pos: start_point = parent_pos.start_point_marker() # Search forward for the end point. end_point = None for fwd_seg in segments[idx + 1 :]: if fwd_seg.pos_marker: # NOTE: Use raw segments because it's more reliable. end_point = fwd_seg.raw_segments[ 0 ].pos_marker.start_point_marker() break if start_point and end_point and start_point != end_point: # We should construct a wider position marker. new_position = PositionMarker.from_points( start_point, end_point, ) # If we have start point (or if they were equal above), # just apply start point. elif start_point: new_position = start_point # Do we have an end? elif end_point: # pragma: no cover new_position = end_point else: # pragma: no cover raise ValueError("Unable to position new segment") assert new_position # Regardless of whether we change the position, we still need to # update the working location and keep track of it. new_position = new_position.with_working_position(line_no, line_pos) line_no, line_pos = new_position.infer_next_position( segment.raw, line_no, line_pos ) # NOTE: If the position is already correct, we still # need to copy, but we don't need to reposition any further. if segment.segments and old_position != new_position: # Recurse to work out the child segments FIRST, before # copying the parent so we don't double the work. assert new_position child_segments = cls._position_segments( segment.segments, parent_pos=new_position ) new_seg = segment.copy(segments=child_segments) new_seg.pos_marker = new_position else: new_seg = segment.copy() new_seg.pos_marker = new_position new_seg.pos_marker = new_position segment_buffer += (new_seg,) continue return segment_buffer # ################ CLASS METHODS @classmethod def simple( cls, parse_context: ParseContext, crumbs: Optional[tuple[str, ...]] = None ) -> Optional[SimpleHintType]: """Does this matcher support an uppercase hash matching route? This should be true if the MATCH grammar is simple. Most more complicated segments will be assumed to overwrite this method if they wish to be considered simple. """ if cls.match_grammar: return cls.match_grammar.simple(parse_context=parse_context, crumbs=crumbs) else: # pragma: no cover TODO? # Other segments will either override this method, or aren't # simple. return None @classmethod def cache_key(cls) -> str: """Return the cache key for this segment definition. NOTE: The key itself is generated on _definition_ by the metaclass. """ return cls._cache_key @classmethod def is_optional(cls) -> bool: # pragma: no cover """Returns False because Segments are never optional. This is used _only_ in the `Sequence` & `Bracketed` grammars to indicate optional elements in a sequence which may not be present while still returning a valid match. Typically in dialect definition, Segments are rarely referred to directly, but normally are referenced via a `Ref()` grammar. The `Ref()` grammar supports optional referencing and so we recommend wrapping a segment in an optional `Ref()` to take advantage of optional sequence elements as this is not supported directly on the Segment itself. """ return False @classmethod def class_is_type(cls, *seg_type: str) -> bool: """Is this segment class (or its parent) of the given type.""" # Use set intersection if cls._class_types.intersection(seg_type): return True return False @classmethod def structural_simplify( cls, elem: TupleSerialisedSegment ) -> RecordSerialisedSegment: """Simplify the structure recursively so it serializes nicely in json/yaml. This is used in the .as_record() method. """ assert len(elem) == 2 key, value = elem assert isinstance(key, str) if isinstance(value, str): return {key: value} assert isinstance(value, tuple) # If it's an empty tuple return a dict with None. if not value: return {key: None} # Otherwise value is a tuple with length. # Simplify all the child elements contents = [cls.structural_simplify(e) for e in value] # Any duplicate elements? subkeys: list[str] = [] for _d in contents: subkeys.extend(_d.keys()) if len(set(subkeys)) != len(subkeys): # Yes: use a list of single dicts. # Recurse directly. return {key: contents} # Otherwise there aren't duplicates, un-nest the list into a dict: content_dict = {} for record in contents: for k, v in record.items(): content_dict[k] = v return {key: content_dict} @classmethod def match( cls, segments: Sequence[BaseSegment], idx: int, parse_context: ParseContext ) -> MatchResult: """Match a list of segments against this segment. Note: Match for segments is done in the ABSTRACT. When dealing with concrete then we're always in parse. Parse is what happens during expand. Matching can be done from either the raw or the segments. This raw function can be overridden, or a grammar defined on the underlying class. """ if idx >= len(segments): # pragma: no cover return MatchResult.empty_at(idx) # Is this already the right kind of segment? if isinstance(segments[idx], cls): # Very simple "consume one" result. return MatchResult(slice(idx, idx + 1)) assert cls.match_grammar, f"{cls.__name__} has no match grammar." with parse_context.deeper_match(name=cls.__name__) as ctx: match = cls.match_grammar.match(segments, idx, ctx) # Wrap are return regardless of success. return match.wrap(cls) # ################ PRIVATE INSTANCE METHODS def _recalculate_caches(self) -> None: for key in [ "is_code", "is_comment", "is_whitespace", "raw", "raw_upper", "matched_length", "raw_segments", "raw_segments_with_ancestors", "first_non_whitespace_segment_raw_upper", "source_fixes", "full_type_set", "descendant_type_set", "direct_descendant_type_set", "_code_indices", "_hash", ]: self.__dict__.pop(key, None) def _preface(self, ident: int, tabsize: int) -> str: """Returns the preamble to any logging.""" padded_type = "{padding}{modifier}{type}".format( padding=" " * (ident * tabsize), modifier=self._preface_modifier, type=self.get_type() + ":", ) preface = "{pos:20}|{padded_type:60} {suffix}".format( pos=str(self.pos_marker) if self.pos_marker else "-", padded_type=padded_type, suffix=self._suffix() or "", ) # Trim unnecessary whitespace before returning return preface.rstrip() # ################ PUBLIC INSTANCE METHODS def set_as_parent(self, recurse: bool = True) -> None: """Set this segment as parent for child all segments.""" for idx, seg in enumerate(self.segments): seg.set_parent(self, idx) # Recurse if not disabled if recurse: seg.set_as_parent(recurse=recurse) def set_parent(self, parent: BaseSegment, idx: int) -> None: """Set the weak reference to the parent. We keep a reference to the index within the parent too as that is often used at the same point in the operation. NOTE: Don't validate on set, because we might not have fully initialised the parent yet (because we call this method during the instantiation of the parent). """ self._parent = weakref.ref(parent) self._parent_idx = idx def get_parent(self) -> Optional[tuple[BaseSegment, int]]: """Get the parent segment, with some validation. This is provided as a performance optimisation when searching through the syntax tree. Any methods which depend on this should have an alternative way of assessing position, and ideally also set the parent of any segments found without them. As a performance optimisation, we also store the index of the segment within the parent to avoid needing to recalculate that. NOTE: We only store a weak reference to the parent so it might not be present. We also validate here that it's _still_ the parent and potentially also return None if those checks fail. """ if not self._parent: return None _parent = self._parent() if not _parent or self not in _parent.segments: return None assert self._parent_idx is not None return _parent, self._parent_idx def get_type(self) -> str: """Returns the type of this segment as a string.""" return self.type def count_segments(self, raw_only: bool = False) -> int: """Returns the number of segments in this segment.""" if self.segments: self_count = 0 if raw_only else 1 return self_count + sum( seg.count_segments(raw_only=raw_only) for seg in self.segments ) else: return 1 def is_type(self, *seg_type: str) -> bool: """Is this segment (or its parent) of the given type.""" return self.class_is_type(*seg_type) def invalidate_caches(self) -> None: """Invalidate the cached properties. This should be called whenever the segments within this segment is mutated. """ for seg in self.segments: seg.invalidate_caches() self._recalculate_caches() def get_start_point_marker(self) -> PositionMarker: # pragma: no cover """Get a point marker at the start of this segment.""" assert self.pos_marker, f"{self} has no PositionMarker" return self.pos_marker.start_point_marker() def get_end_point_marker(self) -> PositionMarker: """Get a point marker at the end of this segment.""" assert self.pos_marker, f"{self} has no PositionMarker" return self.pos_marker.end_point_marker() def get_start_loc(self) -> tuple[int, int]: """Get a location tuple at the start of this segment.""" assert self.pos_marker, f"{self} has no PositionMarker" return self.pos_marker.working_loc def get_end_loc(self) -> tuple[int, int]: """Get a location tuple at the end of this segment.""" assert self.pos_marker, f"{self} has no PositionMarker" return self.pos_marker.working_loc_after( self.raw, ) def stringify( self, ident: int = 0, tabsize: int = 4, code_only: bool = False ) -> str: """Use indentation to render this segment and its children as a string.""" buff = StringIO() preface = self._preface(ident=ident, tabsize=tabsize) buff.write(preface + "\n") if not code_only and self.comment_separate and len(self._comments) > 0: if self._comments: # pragma: no cover TODO? buff.write((" " * ((ident + 1) * tabsize)) + "Comments:" + "\n") for seg in self._comments: buff.write( seg.stringify( ident=ident + 2, tabsize=tabsize, code_only=code_only, ) ) if self._non_comments: # pragma: no cover TODO? buff.write((" " * ((ident + 1) * tabsize)) + "Code:" + "\n") for seg in self._non_comments: buff.write( seg.stringify( ident=ident + 2, tabsize=tabsize, code_only=code_only, ) ) else: for seg in self.segments: # If we're in code_only, only show the code segments, otherwise always # true if not code_only or seg.is_code: buff.write( seg.stringify( ident=ident + 1, tabsize=tabsize, code_only=code_only, ) ) return buff.getvalue() def to_tuple( self, code_only: bool = False, show_raw: bool = False, include_meta: bool = False, ) -> TupleSerialisedSegment: """Return a tuple structure from this segment.""" # works for both base and raw if show_raw and not self.segments: return (self.get_type(), self.raw) elif code_only: return ( self.get_type(), tuple( seg.to_tuple( code_only=code_only, show_raw=show_raw, include_meta=include_meta, ) for seg in self.segments if seg.is_code and not seg.is_meta ), ) else: return ( self.get_type(), tuple( seg.to_tuple( code_only=code_only, show_raw=show_raw, include_meta=include_meta, ) for seg in self.segments if include_meta or not seg.is_meta ), ) def copy( self, segments: Optional[tuple[BaseSegment, ...]] = None, parent: Optional[BaseSegment] = None, parent_idx: Optional[int] = None, ) -> BaseSegment: """Copy the segment recursively, with appropriate copying of references. Optionally provide child segments which have already been dealt with to avoid another copy operation. NOTE: In the copy operation it's really important that we get a clean segregation so that we can't go backward and mutate the source object, but at the same time we should be mindful of what _needs_ to be copied to avoid a deep copy where one isn't required. """ cls = self.__class__ new_segment = cls.__new__(cls) # Position markers are immutable, and it's important that we keep # a reference to the same TemplatedFile, so keep the same position # marker. By updating from the source dict, we achieve that. # By using the __dict__ object we also transfer the _cache_ too # which is stored there by @cached_property. new_segment.__dict__.update(self.__dict__) # Reset the parent if provided. if parent: assert parent_idx is not None, "parent_idx must be provided it parent is." new_segment.set_parent(parent, parent_idx) # If the segment doesn't have a segments property, we're done. # NOTE: This is a proxy way of understanding whether it's a RawSegment # of not. Typically will _have_ a `segments` attribute, but it's an # empty tuple. if not self.__dict__.get("segments", None): assert ( not segments ), f"Cannot provide `segments` argument to {cls.__name__} `.copy()`\n" # If segments were provided, use them. elif segments: new_segment.segments = segments # Otherwise we should handle recursive segment coping. # We use the native .copy() method (this method!) appropriately # so that the same logic is applied in recursion. # We set the parent for children directly on the copy method # to ensure those line up properly. else: new_segment.segments = tuple( seg.copy(parent=new_segment, parent_idx=idx) for idx, seg in enumerate(self.segments) ) return new_segment def as_record(self, **kwargs: bool) -> Optional[RecordSerialisedSegment]: """Return the segment as a structurally simplified record. This is useful for serialization to yaml or json. kwargs passed to to_tuple """ return self.structural_simplify(self.to_tuple(**kwargs)) def get_raw_segments(self) -> list[RawSegment]: """Iterate raw segments, mostly for searching.""" return [item for s in self.segments for item in s.raw_segments] def raw_normalized(self, casefold: bool = True) -> str: """Iterate raw segments, return normalized value.""" return "".join(seg.raw_normalized(casefold) for seg in self.get_raw_segments()) def iter_segments( self, expanding: Optional[Sequence[str]] = None, pass_through: bool = False ) -> Iterator[BaseSegment]: """Iterate segments, optionally expanding some children.""" for s in self.segments: if expanding and s.is_type(*expanding): yield from s.iter_segments( expanding=expanding if pass_through else None ) else: yield s def iter_unparsables(self) -> Iterator[UnparsableSegment]: """Iterate through any unparsables this segment may contain.""" for s in self.segments: yield from s.iter_unparsables() def type_set(self) -> set[str]: """Return a set of the types contained, mostly for testing.""" typs = {self.type} for s in self.segments: typs |= s.type_set() return typs def is_raw(self) -> bool: """Return True if this segment has no children.""" return len(self.segments) == 0 def get_child(self, *seg_type: str) -> Optional[BaseSegment]: """Retrieve the first of the children of this segment with matching type.""" for seg in self.segments: if seg.is_type(*seg_type): return seg return None def get_children(self, *seg_type: str) -> list[BaseSegment]: """Retrieve the all of the children of this segment with matching type.""" buff = [] for seg in self.segments: if seg.is_type(*seg_type): buff.append(seg) return buff def select_children( self, start_seg: Optional[BaseSegment] = None, stop_seg: Optional[BaseSegment] = None, select_if: Optional[Callable[[BaseSegment], Any]] = None, loop_while: Optional[Callable[[BaseSegment], Any]] = None, ) -> list[BaseSegment]: """Retrieve subset of children based on range and filters. Often useful by linter rules when generating fixes, e.g. to find whitespace segments between two already known segments. """ start_index = self.segments.index(start_seg) if start_seg else -1 stop_index = self.segments.index(stop_seg) if stop_seg else len(self.segments) buff = [] for seg in self.segments[start_index + 1 : stop_index]: if loop_while and not loop_while(seg): break if not select_if or select_if(seg): buff.append(seg) return buff def recursive_crawl_all(self, reverse: bool = False) -> Iterator[BaseSegment]: """Recursively crawl all descendant segments.""" if reverse: for seg in reversed(self.segments): yield from seg.recursive_crawl_all(reverse=reverse) yield self if not reverse: for seg in self.segments: yield from seg.recursive_crawl_all(reverse=reverse) def recursive_crawl( self, *seg_type: str, recurse_into: bool = True, no_recursive_seg_type: Optional[Union[str, list[str]]] = None, allow_self: bool = True, ) -> Iterator[BaseSegment]: """Recursively crawl for segments of a given type. Args: seg_type: :obj:`str`: one or more type of segment to look for. recurse_into: :obj:`bool`: When an element of type "seg_type" is found, whether to recurse into it. no_recursive_seg_type: :obj:`Union[str, list[str]]`: a type of segment not to recurse further into. It is highly recommended to set this argument where possible, as it can significantly narrow the search pattern. allow_self: :obj:`bool`: Whether to allow the initial segment this is called on to be one of the results. """ if isinstance(no_recursive_seg_type, str): no_recursive_seg_type = [no_recursive_seg_type] # Assuming there is a segment to be found, first check self (if allowed): if allow_self and self.is_type(*seg_type): match = True yield self else: match = False # Check whether the types we're looking for are in this segment # at all. If not, exit early. if not self.descendant_type_set.intersection(seg_type): # Terminate iteration. return None # Then handle any recursion. if recurse_into or not match: for seg in self.segments: # Don't recurse if the segment is of a type we shouldn't # recurse into. # NOTE: Setting no_recursive_seg_type can significantly # improve performance in many cases. if not no_recursive_seg_type or not seg.is_type(*no_recursive_seg_type): yield from seg.recursive_crawl( *seg_type, recurse_into=recurse_into, no_recursive_seg_type=no_recursive_seg_type, ) def path_to(self, other: BaseSegment) -> list[PathStep]: """Given a segment which is assumed within self, get the intermediate segments. Returns: :obj:`list` of :obj:`PathStep`, not including the segment we're looking for. If `other` is not found, then empty list. This includes if called on self. The result of this should be interpreted as *the path from `self` to `other`*. If the return value is `[]` (an empty list), that implies there is no path from `self` to `other`. This would include the case where the two are the same segment, as there is no path from a segment to itself. Technically this could be seen as a "half open interval" of the path between two segments: in that it includes the root segment, but not the leaf. We first use any existing parent references to work upward, and then if that doesn't take us far enough we fill in from the top (setting any missing references as we go). This tries to be as efficient in that process as possible. """ # Return empty if they are the same segment. if self is other: return [] # pragma: no cover # Do we have any child segments at all? if not self.segments: return [] # Identifying the highest parent we can using any preset parent values. midpoint = other lower_path = [] while True: _higher = midpoint.get_parent() # If we've run out of parents, stop for now. if not _higher: break _seg, _idx = _higher # If the higher doesn't have a position we'll run into problems. # Check that in advance. assert _seg.pos_marker, ( f"`path_to()` found segment {_seg} without position. " "This shouldn't happen post-parse." ) lower_path.append( PathStep( _seg, _idx, len(_seg.segments), _seg._code_indices, ) ) midpoint = _seg # If we're found the target segment we can also stop. if midpoint == self: break # Reverse the path so far lower_path.reverse() # Have we already found the parent? if midpoint == self: return lower_path # Have we gone all the way up to the file segment? elif midpoint.class_is_type("file"): return [] # pragma: no cover # Are we in the right ballpark? # NOTE: Comparisons have a higher precedence than `not`. elif not self.get_start_loc() <= midpoint.get_start_loc() <= self.get_end_loc(): return [] # From here, we've worked "up" as far as we can, we now work "down". # When working down, we only need to go as far as the `midpoint`. # Check through each of the child segments for idx, seg in enumerate(self.segments): # Set the parent if it's not already set. seg.set_parent(self, idx) # Build the step. step = PathStep(self, idx, len(self.segments), self._code_indices) # Have we found the target? # NOTE: Check for _equality_ not _identity_ here as that's most reliable. if seg == midpoint: return [step] + lower_path # Is there a path to the target? res = seg.path_to(midpoint) if res: return [step] + res + lower_path # Not found. return [] # pragma: no cover @staticmethod def _is_code_or_meta(segment: BaseSegment) -> bool: return segment.is_code or segment.is_meta def validate_non_code_ends(self) -> None: """Validates the start and end of the sequence based on it's config. Most normal segments may *not* start or end with whitespace. Any surrounding whitespace should be within the outer segment containing this one. The exception is for segments which configure `can_start_end_non_code` for which not check is conducted. TODO: Check whether it's only `can_start_end_non_code` is only set for FileSegment, in which case - take away the config and just override this method for that segment. """ if self.can_start_end_non_code: return None if not self.segments: # pragma: no cover return None assert self._is_code_or_meta(self.segments[0]), ( f"Segment {self} starts with whitespace segment: " f"{self.segments[0].raw!r}.\n{self.segments!r}" ) assert self._is_code_or_meta(self.segments[-1]), ( f"Segment {self} ends with whitespace segment: " f"{self.segments[-1].raw!r}.\n{self.segments!r}" ) def validate_segment_with_reparse( self, dialect: Dialect, ) -> bool: """Checks correctness of new segment by re-parsing it.""" ctx = ParseContext(dialect=dialect) # We're going to check the rematch without any metas because the # matching routines will assume they haven't already been added. # We also strip any non-code from the ends which might have moved. raw_content = tuple(s for s in self.raw_segments if not s.is_meta) _, trimmed_content, _ = trim_non_code_segments(raw_content) if not trimmed_content and self.can_start_end_non_code: # Edge case for empty segments which are allowed to be empty. return True rematch = self.match(trimmed_content, 0, ctx) if not rematch.matched_slice == slice(0, len(trimmed_content)): linter_logger.debug( f"Validation Check Fail for {self}.Incomplete Match. " f"\nMatched: {rematch.apply(trimmed_content)}. " f"\nUnmatched: {trimmed_content[rematch.matched_slice.stop :]}." ) return False opening_unparsables = set(self.recursive_crawl("unparsable")) closing_unparsables: set[BaseSegment] = set() new_segments = rematch.apply(trimmed_content) for seg in new_segments: closing_unparsables.update(seg.recursive_crawl("unparsable")) # Check we don't introduce any _additional_ unparsables. # Pre-existing unparsables are ok, and for some rules that's as # designed. The idea is that we shouldn't make the situation _worse_. if opening_unparsables >= closing_unparsables: return True linter_logger.debug( f"Validation Check Fail for {self}.\nFound additional Unparsables: " f"{closing_unparsables - opening_unparsables}" ) for unparsable in closing_unparsables - opening_unparsables: linter_logger.debug(f"Unparsable:\n{unparsable.stringify()}\n") return False @staticmethod def _log_apply_fixes_check_issue( message: str, *args: Any ) -> None: # pragma: no cover linter_logger.critical(message, exc_info=True, *args) def edit( self, raw: Optional[str] = None, source_fixes: Optional[list[SourceFix]] = None ) -> BaseSegment: """Stub.""" raise NotImplementedError() @classmethod def from_result_segments( cls, result_segments: tuple[BaseSegment, ...], segment_kwargs: dict[str, Any], ) -> BaseSegment: """Create an instance of this class from a tuple of matched segments.""" return cls(segments=result_segments, **segment_kwargs) class UnparsableSegment(BaseSegment): """This is a segment which can't be parsed. It indicates a error during parsing.""" type = "unparsable" # From here down, comments are printed separately. comment_separate = True # Unparsable segments could contain anything. can_start_end_non_code = True _expected = "" def __init__( self, segments: tuple[BaseSegment, ...], pos_marker: Optional[PositionMarker] = None, expected: str = "", ) -> None: self._expected = expected super().__init__(segments=segments, pos_marker=pos_marker) def _suffix(self) -> str: """Return any extra output required at the end when logging. NB Override this for specific subclasses if we want extra output. """ return f"!! Expected: {self._expected!r}" def iter_unparsables(self) -> Iterator[UnparsableSegment]: """Iterate through any unparsables. As this is an unparsable, it should yield itself. """ yield self sqlfluff-3.4.2/src/sqlfluff/core/parser/segments/bracketed.py000066400000000000000000000060271503426445100243330ustar00rootroot00000000000000"""The BracketedSegment.""" from collections.abc import Sequence from typing import TYPE_CHECKING, Optional from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.markers import PositionMarker from sqlfluff.core.parser.match_result import MatchResult from sqlfluff.core.parser.segments.base import BaseSegment if TYPE_CHECKING: # pragma: no cover from sqlfluff.core.parser.types import SimpleHintType class BracketedSegment(BaseSegment): """A segment containing a bracketed expression.""" type = "bracketed" additional_kwargs = ["start_bracket", "end_bracket"] def __init__( self, segments: tuple["BaseSegment", ...], # These are tuples of segments but we're expecting them to # be tuples of length 1. This is because we'll almost always # be doing tuple arithmetic with the results and constructing # 1-tuples on the fly is very easy to misread. start_bracket: tuple[BaseSegment], end_bracket: tuple[BaseSegment], pos_marker: Optional[PositionMarker] = None, uuid: Optional[int] = None, ): """Stash the bracket segments for later.""" if not start_bracket or not end_bracket: # pragma: no cover raise ValueError( "Attempted to construct Bracketed segment without specifying brackets." ) self.start_bracket = start_bracket self.end_bracket = end_bracket super().__init__(segments=segments, pos_marker=pos_marker, uuid=uuid) @classmethod def simple( cls, parse_context: ParseContext, crumbs: Optional[tuple[str, ...]] = None ) -> Optional["SimpleHintType"]: """Simple methods for bracketed and the persistent brackets.""" start_brackets = [ start_bracket for _, start_bracket, _, persistent in parse_context.dialect.bracket_sets( "bracket_pairs" ) if persistent ] simple_raws: set[str] = set() for ref in start_brackets: bracket_simple = parse_context.dialect.ref(ref).simple( parse_context, crumbs=crumbs ) assert bracket_simple, "All bracket segments must support simple." assert bracket_simple[0], "All bracket segments must support raw simple." # NOTE: By making this assumption we don't have to handle the "typed" # simple here. simple_raws.update(bracket_simple[0]) return frozenset(simple_raws), frozenset() @classmethod def match( cls, segments: Sequence["BaseSegment"], idx: int, parse_context: "ParseContext", ) -> MatchResult: """Only useful as a terminator. NOTE: Coverage of this method is poor, because in typical use as a terminator - the `.simple()` method covers everything we need. """ if isinstance(segments[idx], cls): # pragma: no cover return MatchResult(slice(idx, idx + 1)) return MatchResult.empty_at(idx) sqlfluff-3.4.2/src/sqlfluff/core/parser/segments/common.py000066400000000000000000000056621503426445100237030ustar00rootroot00000000000000"""Common segment types used as building blocks of dialects. The expectation for these segments is that they have no additional logic (or very minimal logic). """ from sqlfluff.core.parser.segments.base import BaseSegment from sqlfluff.core.parser.segments.raw import RawSegment class CodeSegment(RawSegment): """An alias for RawSegment. This has a more explicit name for segment creation. """ pass class UnlexableSegment(CodeSegment): """A placeholder to unlexable sections. This otherwise behaves exactly like a code section. """ type = "unlexable" class CommentSegment(RawSegment): """Segment containing a comment.""" type = "comment" _is_code = False _is_comment = True class WhitespaceSegment(RawSegment): """Segment containing whitespace.""" type = "whitespace" _is_whitespace = True _is_code = False _is_comment = False _default_raw = " " class NewlineSegment(RawSegment): """Segment containing a newline. NOTE: NewlineSegment does not inherit from WhitespaceSegment. Therefore NewlineSegment.is_type('whitespace') returns False. This is intentional and convenient for rules. If users want to match on both, call .is_type('whitespace', 'newline') """ type = "newline" _is_whitespace = True _is_code = False _is_comment = False _default_raw = "\n" class SymbolSegment(CodeSegment): """A segment used for matching single entities which aren't keywords. We rename the segment class here so that descendants of _ProtoKeywordSegment can use the same functionality but don't end up being labelled as a `keyword` later. """ type = "symbol" class IdentifierSegment(CodeSegment): """An identifier segment. Defined here for type inheritance. """ type = "identifier" class LiteralSegment(CodeSegment): """A literal segment. Defined here for type inheritance. """ type = "literal" class BinaryOperatorSegment(CodeSegment): """A binary operator segment. Defined here for type inheritance. Inherits from RawSegment. """ type = "binary_operator" class CompositeBinaryOperatorSegment(BaseSegment): """A composite binary operator segment. Defined here for type inheritance. Inherits from BaseSegment. """ type = "binary_operator" class ComparisonOperatorSegment(CodeSegment): """A comparison operator segment. Defined here for type inheritance. Inherits from RawSegment. """ type = "comparison_operator" class CompositeComparisonOperatorSegment(BaseSegment): """A comparison operator segment. Defined here for type inheritance. Inherits from BaseSegment. """ type = "comparison_operator" class WordSegment(CodeSegment): """A generic (likely letters only) segment. Defined here for type inheritance. This is the base segment for things like keywords and naked identifiers. """ type = "word" sqlfluff-3.4.2/src/sqlfluff/core/parser/segments/file.py000066400000000000000000000075341503426445100233320ustar00rootroot00000000000000"""Definition of the BaseFileSegment.""" from abc import abstractmethod from typing import Optional from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.markers import PositionMarker from sqlfluff.core.parser.segments.base import BaseSegment, UnparsableSegment class BaseFileSegment(BaseSegment): """A segment representing a whole file or script. This is also the default "root" segment of the dialect, and so is usually instantiated directly. It therefore has no match_grammar. """ type = "file" # The file segment is the only one which can start or end with non-code can_start_end_non_code = True # A file can be empty! allow_empty = True def __init__( self, segments: tuple[BaseSegment, ...], pos_marker: Optional[PositionMarker] = None, fname: Optional[str] = None, ): self._file_path = fname super().__init__(segments, pos_marker=pos_marker) @property def file_path(self) -> Optional[str]: """File path of a parsed SQL file.""" return self._file_path @abstractmethod def get_table_references(self) -> set[str]: """Use parsed tree to extract table references.""" @classmethod def root_parse( cls, segments: tuple[BaseSegment, ...], parse_context: ParseContext, fname: Optional[str] = None, ) -> "BaseFileSegment": """This is the entry method into parsing a file lexed segments. For single pass matching, this trims any non code off the start, matches the middle and then trims the end. Anything unexpected at the end is regarded as unparsable. """ # Trim the start _start_idx = 0 for _start_idx in range(len(segments)): if segments[_start_idx].is_code: break # Trim the end _end_idx = len(segments) for _end_idx in range(len(segments), _start_idx - 1, -1): if segments[_end_idx - 1].is_code: break if _start_idx == _end_idx: # Return just a file of non-code segments. return cls(segments, fname=fname) # Match the middle assert not hasattr( cls, "parse_grammar" ), "`parse_grammar` is deprecated on FileSegment." assert cls.match_grammar # Set up the progress bar for parsing. _final_seg = segments[-1] assert _final_seg.pos_marker _closing_position = _final_seg.pos_marker.templated_slice.stop with parse_context.progress_bar(_closing_position): # NOTE: Don't call .match() on the segment class itself, but go # straight to the match grammar inside. match = cls.match_grammar.match( segments[:_end_idx], _start_idx, parse_context ) parse_context.logger.info("Root Match:\n%s", match.stringify()) _matched = match.apply(segments) _unmatched = segments[match.matched_slice.stop : _end_idx] content: tuple[BaseSegment, ...] if not match: content = ( UnparsableSegment( segments[_start_idx:_end_idx], expected=str(cls.match_grammar) ), ) elif _unmatched: _idx = 0 for _idx in range(len(_unmatched)): if _unmatched[_idx].is_code: break content = ( _matched + _unmatched[:_idx] + ( UnparsableSegment( _unmatched[_idx:], expected="Nothing else in FileSegment." ), ) ) else: content = _matched + _unmatched return cls( segments[:_start_idx] + content + segments[_end_idx:], fname=fname, ) sqlfluff-3.4.2/src/sqlfluff/core/parser/segments/generator.py000066400000000000000000000020431503426445100243670ustar00rootroot00000000000000"""A Segment Generator. Used to create Segments upon calling the expand function first. Helpful when using the sets attribute of the dialect. """ from typing import TYPE_CHECKING, Callable if TYPE_CHECKING: # pragma: no cover from sqlfluff.core.dialects.base import Dialect from sqlfluff.core.parser.matchable import Matchable class SegmentGenerator: """Defines a late-bound dialect object. It returns a single dialect object on expansion. These are defined using a callable, which is only called once everything else is defined. Very useful for template inheritance. """ def __init__(self, func: Callable[["Dialect"], "Matchable"]) -> None: self.func = func # For all functions, use the function call def expand(self, dialect: "Dialect") -> "Matchable": """Expand this object into its true dialect object. The inner function is passed an instance of the current dialect and so has access to the current sets of that dialect. """ return self.func(dialect) sqlfluff-3.4.2/src/sqlfluff/core/parser/segments/keyword.py000066400000000000000000000043601503426445100240710ustar00rootroot00000000000000"""The KeywordSegment class.""" from typing import Callable, Optional, Union from sqlfluff.core.parser.markers import PositionMarker from sqlfluff.core.parser.segments.base import SourceFix from sqlfluff.core.parser.segments.common import WordSegment class KeywordSegment(WordSegment): """A segment used for matching single words. We rename the segment class here so that descendants of _ProtoKeywordSegment can use the same functionality but don't end up being labelled as a `keyword` later. """ type = "keyword" def __init__( self, raw: Optional[str] = None, pos_marker: Optional[PositionMarker] = None, instance_types: tuple[str, ...] = (), source_fixes: Optional[list[SourceFix]] = None, trim_chars: Optional[tuple[str, ...]] = None, quoted_value: Optional[tuple[str, Union[int, str]]] = None, escape_replacements: Optional[list[tuple[str, str]]] = None, casefold: Optional[Callable[[str], str]] = None, ): """If no other name is provided we extrapolate it from the raw.""" super().__init__( raw=raw, pos_marker=pos_marker, instance_types=instance_types, source_fixes=source_fixes, trim_chars=trim_chars, quoted_value=quoted_value, escape_replacements=escape_replacements, casefold=casefold, ) def edit( self, raw: Optional[str] = None, source_fixes: Optional[list[SourceFix]] = None ) -> "KeywordSegment": """Create a new segment, with exactly the same position but different content. Returns: A copy of this object with new contents. Used mostly by fixes. NOTE: This *doesn't* copy the uuid. The edited segment is a new segment. """ return self.__class__( raw=raw or self.raw, pos_marker=self.pos_marker, instance_types=self.instance_types, source_fixes=source_fixes or self.source_fixes, ) class LiteralKeywordSegment(KeywordSegment): """A keyword style literal segment. This should be used for things like NULL, NAN, TRUE & FALSE. Defined here for type inheritance. """ type = "literal" sqlfluff-3.4.2/src/sqlfluff/core/parser/segments/meta.py000066400000000000000000000223561503426445100233400ustar00rootroot00000000000000"""Indent and Dedent classes.""" from collections.abc import Sequence from typing import Optional from uuid import UUID from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.markers import PositionMarker from sqlfluff.core.parser.match_result import MatchResult from sqlfluff.core.parser.segments.base import BaseSegment from sqlfluff.core.parser.segments.raw import RawSegment, SourceFix from sqlfluff.core.templaters.base import TemplatedFile class MetaSegment(RawSegment): """A segment which is empty but indicates where something should be.""" type = "meta" _is_code = False _template = "" indent_val = 0 # Implicit indents are to be considered _taken_ unless # closed on the same line. is_implicit = False is_meta = True _preface_modifier = "[META] " def __init__( self, pos_marker: Optional[PositionMarker] = None, is_template: bool = False, block_uuid: Optional[UUID] = None, source_fixes: Optional[list[SourceFix]] = None, ): """Constructor for MetaSegment. Args: pos_marker (:obj:`PositionMarker`, optional): The position of the segment. is_template (:obj:`bool`, optional): A flag to indicate whether this meta segment is related to a templated section. This allows proper handling. block_uuid (:obj:`UUID`, optional): A reference to link together markers which refer to the same structure in a template (e.g. the beginning and end of an if statement). source_fixes: (:obj:`list` of :obj:`SourceFix`, optional): A list of any source fixes to apply to this segment. """ super().__init__(pos_marker=pos_marker, source_fixes=source_fixes) self.is_template = is_template self.block_uuid = block_uuid def _suffix(self) -> str: """Return any extra output required at the end when logging. Meta classes have not much to say here so just stay blank. """ return "" @classmethod def match( cls, segments: Sequence["BaseSegment"], idx: int, parse_context: ParseContext ) -> MatchResult: # pragma: no cover """This will never be called. If it is then we're using it wrong.""" raise NotImplementedError( "{} has no match method, it should only be used in a Sequence!".format( cls.__name__ ) ) @classmethod def simple( cls, parse_context: ParseContext, crumbs: Optional[tuple[str, ...]] = None ) -> None: """Does this matcher support an uppercase hash matching route? This should be true if the MATCH grammar is simple. Most more complicated segments will be assumed to overwrite this method if they wish to be considered simple. """ return None class EndOfFile(MetaSegment): """A meta segment to indicate the end of the file.""" type = "end_of_file" class TemplateLoop(MetaSegment): """A meta segment to indicate the presence of a backward template jump. More specifically these indicate the presence of where there is a placeholder in the source, but in the templated file we don't have one _yet_ because we're going back for another pass around a loop. These are particularly useful for any rules concernced with layout, because and indented TemplateLoop is allowable, but without the marker we would just see trailing whitespace. """ type = "template_loop" class Indent(MetaSegment): """A segment which is empty but indicates where an indent should be. This segment is always empty, i.e. its raw format is '', but it indicates the position of a theoretical indent which will be used in linting and reconstruction. Even if there is an *actual indent* that occurs in the same place this intentionally *won't* capture it, they will just be compared later. """ type = "indent" indent_val = 1 def _suffix(self) -> str: """If present, output the block uuid.""" return f"[Block: {self.block_uuid.hex[:6]!r}]" if self.block_uuid else "" class ImplicitIndent(Indent): """A variant on the indent, that is considered *taken* unless closed in line. This is primarily for facilitating constructions which behave a little like hanging indents, without the complicated indentation spacing. .. code-block:: sql SELECT * FROM foo WHERE a -- The theoretical indent between WHERE and "a" is implicit. AND b """ _preface_modifier = "[META] (implicit) " is_implicit = True class Dedent(Indent): """A segment which is empty but indicates where an dedent should be. This segment is always empty, i.e. its raw format is '', but it indicates the position of a theoretical dedent which will be used in linting and reconstruction. Even if there is an *actual dedent* that occurs in the same place this intentionally *won't* capture it, they will just be compared later. """ type = "dedent" indent_val = -1 class TemplateSegment(MetaSegment): """A segment which is empty but indicates where something should be. This segment is always empty, i.e. its raw format is '', but it indicates the position of an element on a line which has been removed. This is used to record the position of template blocks, so that their indents are not removed during linting. This is used to hold a reference point for code from the source file which is removed in the templated version such as loop blocks or comments. On initialisation we optionally accept the source string as a kwarg in case rules want to lint this down the line. """ type = "placeholder" def __init__( self, pos_marker: Optional[PositionMarker] = None, source_str: str = "", block_type: str = "", source_fixes: Optional[list[SourceFix]] = None, block_uuid: Optional[UUID] = None, ): """Initialise a placeholder with the source code embedded.""" # NOTE: Empty string is ok, None is not. if source_str is None: # pragma: no cover raise ValueError("Cannot instantiate TemplateSegment without a source_str.") self.source_str = source_str self.block_type = block_type # Call the super of the pos_marker. super().__init__( pos_marker=pos_marker, source_fixes=source_fixes, block_uuid=block_uuid ) def _suffix(self) -> str: """Also output what it's a placeholder for.""" return ( f"[Type: {self.block_type!r}, Raw: {self.source_str!r}" + (f", Block: {self.block_uuid.hex[:6]!r}" if self.block_uuid else "") + "]" ) @classmethod def from_slice( cls, source_slice: slice, templated_slice: slice, block_type: str, templated_file: TemplatedFile, block_uuid: Optional[UUID] = None, ) -> "TemplateSegment": """Construct template segment from slice of a source file.""" pos_marker = PositionMarker( source_slice, templated_slice, templated_file, ) return cls( pos_marker=pos_marker, source_str=templated_file.source_str[source_slice], block_type=block_type, block_uuid=block_uuid, ) def to_tuple( self, code_only: bool = False, show_raw: bool = False, include_meta: bool = False, ) -> tuple[str, str]: """Return a tuple structure from this segment. Unlike most segments, we return the _source_ content for placeholders if viewing metas is allowed. This allows verification of the content of those placeholders for inspection or debugging. NOTE: This method does not use the `include_meta` argument. This method relies on any parent segment to do filtering associated with whether to include or not include meta segments. """ return (self.get_type(), self.source_str) def edit( self, raw: Optional[str] = None, source_fixes: Optional[list[SourceFix]] = None, source_str: Optional[str] = None, ) -> MetaSegment: """Create a new segment, with exactly the same position but different content. Returns: A copy of this object with new contents. Used mostly by fixes. NOTE: This *doesn't* copy the uuid. The edited segment is a new segment. """ if raw: raise ValueError( "Cannot set raw of a template placeholder!" ) # pragma: no cover if source_fixes or self.source_fixes: sf = (source_fixes or []) + (self.source_fixes + []) else: # pragma: no cover # There's _usually_ a source fix if we're editing a templated # segment - but not necessarily guaranteed. sf = None return self.__class__( pos_marker=self.pos_marker, source_str=source_str if source_str is not None else self.source_str, block_type=self.block_type, source_fixes=sf, block_uuid=self.block_uuid, ) sqlfluff-3.4.2/src/sqlfluff/core/parser/segments/raw.py000066400000000000000000000243771503426445100232100ustar00rootroot00000000000000"""Raw segment definitions. This is designed to be the root segment, without any children, and the output of the lexer. """ from typing import Any, Callable, Optional, Union, cast from uuid import uuid4 import regex as re from sqlfluff.core.parser.markers import PositionMarker from sqlfluff.core.parser.segments.base import BaseSegment, SourceFix class RawSegment(BaseSegment): """This is a segment without any subsegments.""" type = "raw" _is_code = True _is_comment = False _is_whitespace = False # Classes inheriting from RawSegment may provide a _default_raw # to enable simple initialisation. _default_raw = "" def __init__( self, raw: Optional[str] = None, pos_marker: Optional[PositionMarker] = None, # For legacy and syntactic sugar we allow the simple # `type` argument here, but for more precise inheritance # we suggest using the `instance_types` option. type: Optional[str] = None, instance_types: tuple[str, ...] = (), trim_start: Optional[tuple[str, ...]] = None, trim_chars: Optional[tuple[str, ...]] = None, source_fixes: Optional[list[SourceFix]] = None, uuid: Optional[int] = None, quoted_value: Optional[tuple[str, Union[int, str]]] = None, escape_replacements: Optional[list[tuple[str, str]]] = None, casefold: Optional[Callable[[str], str]] = None, ): """Initialise raw segment. If raw is not provided, we default to _default_raw if present. If pos_marker is not provided, it is assume that this will be inserted later as part of a reposition phase. """ if raw is not None: # NB, raw *can* be an empty string and be valid self._raw = raw else: self._raw = self._default_raw self._raw_upper = self._raw.upper() # pos marker is required here. We ignore the typing initially # because it might *initially* be unset, but it will be reset # later. self.pos_marker: PositionMarker = pos_marker # type: ignore # Set the segments attribute to be an empty tuple. self.segments = () self.instance_types: tuple[str, ...] if type: assert not instance_types, "Cannot set `type` and `instance_types`." self.instance_types = (type,) else: self.instance_types = instance_types # What should we trim off the ends to get to content self.trim_start = trim_start self.trim_chars = trim_chars # Keep track of any source fixes self._source_fixes = source_fixes # UUID for matching (the int attribute of it) self.uuid = uuid or uuid4().int self.representation = "<{}: ({}) {!r}>".format( self.__class__.__name__, self.pos_marker, self.raw ) self.quoted_value = quoted_value self.escape_replacements = escape_replacements self.casefold = casefold self._raw_value: str = self.normalize() def __repr__(self) -> str: # This is calculated at __init__, because all elements are immutable # and this was previously recalculating the pos marker, # and became very expensive return self.representation def __setattr__(self, key: str, value: Any) -> None: """Overwrite BaseSegment's __setattr__ with BaseSegment's superclass.""" super(BaseSegment, self).__setattr__(key, value) # ################ PUBLIC PROPERTIES @property def is_code(self) -> bool: """Return True if this segment is code.""" return self._is_code @property def is_comment(self) -> bool: """Return True if this segment is a comment.""" return self._is_comment @property def is_whitespace(self) -> bool: """Return True if this segment is whitespace.""" return self._is_whitespace @property def raw(self) -> str: """Returns the raw segment.""" return self._raw @property def raw_upper(self) -> str: """Returns the raw segment in uppercase.""" return self._raw_upper @property def raw_segments(self) -> list["RawSegment"]: """Returns self to be compatible with calls to its superclass.""" return [self] @property def class_types(self) -> frozenset[str]: """The set of full types for this segment, including inherited. Add the surrogate type for raw segments. """ return frozenset(self.instance_types) | super().class_types @property def source_fixes(self) -> list[SourceFix]: """Return any source fixes as list.""" return self._source_fixes or [] # ################ INSTANCE METHODS def invalidate_caches(self) -> None: """Overwrite superclass functionality.""" pass def get_type(self) -> str: """Returns the type of this segment as a string.""" if self.instance_types: return self.instance_types[0] return super().get_type() def is_type(self, *seg_type: str) -> bool: """Extend the parent class method with the surrogate types.""" if set(self.instance_types).intersection(seg_type): return True return self.class_is_type(*seg_type) def get_raw_segments(self) -> list["RawSegment"]: """Iterate raw segments, mostly for searching.""" return [self] def raw_trimmed(self) -> str: """Return a trimmed version of the raw content. Returns: str: The trimmed version of the raw content. """ raw_buff = self.raw if self.trim_start: for seq in self.trim_start: if raw_buff.startswith(seq): raw_buff = raw_buff[len(seq) :] if self.trim_chars: raw_buff = self.raw # for each thing to trim for seq in self.trim_chars: # trim start while raw_buff.startswith(seq): raw_buff = raw_buff[len(seq) :] # trim end while raw_buff.endswith(seq): raw_buff = raw_buff[: -len(seq)] return raw_buff return raw_buff def normalize(self, value: Optional[str] = None) -> str: """Returns the normalized version of a string using the segment's rules. By default this uses the raw value of the segment. E.g. This removes leading and trailing quote characters, removes escapes Return: str: The normalized value """ raw_buff = value or self.raw if self.quoted_value: _match = re.match(self.quoted_value[0], raw_buff) if _match: _group_match = _match.group(self.quoted_value[1]) if isinstance(_group_match, str): raw_buff = _group_match if self.escape_replacements: for old, new in self.escape_replacements: raw_buff = re.sub(old, new, raw_buff) return raw_buff def raw_normalized(self, casefold: bool = True) -> str: """Returns a normalized string of the raw content. E.g. This removes leading and trailing quote characters, removes escapes, optionally casefolds to the dialect's casing Return: str: The normalized version of the raw content """ raw_buff = self._raw_value if self.casefold and casefold: raw_buff = self.casefold(raw_buff) return raw_buff def stringify( self, ident: int = 0, tabsize: int = 4, code_only: bool = False ) -> str: """Use indentation to render this segment and its children as a string. Args: ident (int, optional): The indentation level. Defaults to 0. tabsize (int, optional): The size of each tab. Defaults to 4. code_only (bool, optional): Whether to render only the code. Defaults to False. Returns: str: The rendered string. """ preface = self._preface(ident=ident, tabsize=tabsize) return preface + "\n" def _suffix(self) -> str: """Return any extra output required at the end when logging. NB Override this for specific subclasses if we want extra output. Returns: str: The extra output. """ return f"{self.raw!r}" def edit( self, raw: Optional[str] = None, source_fixes: Optional[list[SourceFix]] = None ) -> "RawSegment": """Create a new segment, with exactly the same position but different content. Args: raw (Optional[str]): The new content for the segment. source_fixes (Optional[list[SourceFix]]): A list of fixes to be applied to the segment. Returns: RawSegment: A copy of this object with new contents. Used mostly by fixes. NOTE: This *doesn't* copy the uuid. The edited segment is a new segment. """ return self.__class__( raw=raw or self.raw, pos_marker=self.pos_marker, instance_types=self.instance_types, trim_start=self.trim_start, trim_chars=self.trim_chars, quoted_value=self.quoted_value, escape_replacements=self.escape_replacements, casefold=self.casefold, source_fixes=source_fixes or self.source_fixes, ) def _get_raw_segment_kwargs(self) -> dict[str, Any]: return { "quoted_value": self.quoted_value, "escape_replacements": self.escape_replacements, "casefold": self.casefold, } # ################ CLASS METHODS @classmethod def from_result_segments( cls, result_segments: tuple[BaseSegment, ...], segment_kwargs: dict[str, Any], ) -> "RawSegment": """Create a RawSegment from result segments.""" assert len(result_segments) == 1 raw_seg = cast("RawSegment", result_segments[0]) new_segment_kwargs = raw_seg._get_raw_segment_kwargs() new_segment_kwargs.update(segment_kwargs) return cls( raw=raw_seg.raw, pos_marker=raw_seg.pos_marker, **new_segment_kwargs, ) __all__ = [ "PositionMarker", "RawSegment", "SourceFix", ] sqlfluff-3.4.2/src/sqlfluff/core/parser/types.py000066400000000000000000000044021503426445100217210ustar00rootroot00000000000000"""Complex Type helpers.""" from enum import Enum from typing import TYPE_CHECKING, Optional, Union if TYPE_CHECKING: # pragma: no cover from sqlfluff.core.parser.matchable import Matchable from sqlfluff.core.parser.segments.generator import SegmentGenerator # When defining elements of a dialect they can be matchables or generators. DialectElementType = Union["Matchable", "SegmentGenerator"] # Simple hints has a set of strings first and a set of types second. SimpleHintType = Optional[tuple[frozenset[str], frozenset[str]]] # The content type of the set of bracket pairs. # bracket_type, start_ref, end_ref, persists BracketPairTuple = tuple[str, str, str, bool] # Define the potential parse modes. These are used in grammars # to define how greedy they are in claiming unmatched segments. # While the default is to only claim what they can match this # can make pinpointing unparsable sections very difficult. By # occasionally allowing more eager matching (for example in the # content of bracketed expressions), we can provide more helpful # feedback to the user. ParseMode = Enum( "ParseMode", [ # Strict only returns a match if the full content matches. # i.e. if it's not a successful match, then don't return _any_ # match and never raise unparsable sections. # NOTE: This is the default for all grammars. "STRICT", # Greedy will always return a match, providing there is at least # one code element before a terminators. Terminators are not included # in the match, but are searched for before matching any content. Segments # which are part of any terminator (or beyond) are not available for # matching by any content. # NOTE: This replicates the `GreedyUntil` semantics. "GREEDY", # Optionally, a variant on "GREEDY", will return behave like "STRICT" # if nothing matches, but behaves like "GREEDY" once something has # matched. # NOTE: This replicates the `StartsWith` semantics. "GREEDY_ONCE_STARTED", # TODO: All of the existing modes here match terminators _before_ # matching the majority of content. While that is safer, there should # be room for more efficient parsing modes in some cases. ], ) sqlfluff-3.4.2/src/sqlfluff/core/plugin/000077500000000000000000000000001503426445100202055ustar00rootroot00000000000000sqlfluff-3.4.2/src/sqlfluff/core/plugin/__init__.py000066400000000000000000000006111503426445100223140ustar00rootroot00000000000000"""Marker to be imported and used in plugins (and for own implementations).""" from typing import Any, Callable, TypeVar, cast import pluggy # Improvement suggested by @oremanj on python/typing gitter F = TypeVar("F", bound=Callable[..., Any]) project_name = "sqlfluff" plugin_base_name = f"{project_name}-plugin" hookimpl = cast(Callable[[F], F], pluggy.HookimplMarker(plugin_base_name)) sqlfluff-3.4.2/src/sqlfluff/core/plugin/hookspecs.py000066400000000000000000000022371503426445100225610ustar00rootroot00000000000000"""Defines the specification to implement a plugin.""" from abc import abstractmethod from typing import TYPE_CHECKING, Any import pluggy from sqlfluff.core.plugin import plugin_base_name if TYPE_CHECKING: # pragma: no cover # NOTE: This import is against the normal import rules, but is here for strict # type checking. We have an exception for this in the import linter. from sqlfluff.core.rules.base import BaseRule hookspec = pluggy.HookspecMarker(plugin_base_name) class PluginSpec: """Defines the method signatures for plugin implementations.""" @hookspec @abstractmethod def get_rules(self) -> list[type["BaseRule"]]: """Get plugin rules.""" @hookspec @abstractmethod def load_default_config(self) -> dict[str, Any]: """Loads the default configuration for the plugin.""" @hookspec @abstractmethod # TODO: This type annotation could probably be more specific but that would # require making the config info object something more like a namedTuple rather # than a dict. def get_configs_info(self) -> dict[str, dict[str, Any]]: """Get rule config validations and descriptions.""" sqlfluff-3.4.2/src/sqlfluff/core/plugin/host.py000066400000000000000000000105641503426445100215420ustar00rootroot00000000000000"""Defines the plugin manager getter. NOTE: The plugin manager will load all of the plugins on the first pass. Each plugin will also load the plugin manager on load to register themselves. To ensure this is as performant as possible, we cache the plugin manager within the context of each thread. """ import importlib.metadata import logging from collections.abc import Iterator from contextvars import ContextVar from typing import Optional import pluggy from sqlfluff.core.plugin import plugin_base_name, project_name from sqlfluff.core.plugin.hookspecs import PluginSpec plugin_logger = logging.getLogger("sqlfluff.plugin") _plugin_manager: ContextVar[Optional[pluggy.PluginManager]] = ContextVar( "_plugin_manager", default=None ) plugins_loaded: ContextVar[bool] = ContextVar("plugins_loaded", default=False) # NOTE: The is_main_process context var is defined here, but # we rely on each parallel runner (found in `runner.py`) to # maintain the value of this variable. is_main_process: ContextVar[bool] = ContextVar("is_main_process", default=True) def _get_sqlfluff_version() -> str: """Get the SQLFluff package version from importlib. NOTE: At the stage of loading plugins, SQLFluff isn't fully initialised and so we can't use the normal methods. """ return importlib.metadata.version("sqlfluff") def _discover_plugins() -> Iterator[tuple[importlib.metadata.EntryPoint, str, str]]: """Uses the same mechanism as pluggy to introspect available plugins. This method is then intended to allow loading of plugins individually, for better error handling. """ for dist in list(importlib.metadata.distributions()): for ep in dist.entry_points: # Check it's a SQLFluff one if ep.group != project_name: continue yield ep, ep.name, dist.version def _load_plugin( plugin_manager: pluggy.PluginManager, entry_point: importlib.metadata.EntryPoint, plugin_name: str, plugin_version: str, ) -> None: """Loads a single plugin with a bit of error handling.""" # NOTE: If the plugin is already loaded, then .register() will fail, # so it's important that we check whether it's loaded at this point. if plugin_manager.get_plugin(plugin_name): # pragma: no cover plugin_logger.info("...already loaded") return None try: plugin = entry_point.load() except Exception as err: plugin_logger.error( "ERROR: Failed to load SQLFluff plugin " f"{plugin_name} version {plugin_version}. " "Check your packages are compatible with the current SQLFluff version " f"({_get_sqlfluff_version()})." f"\n\n {err!r}\n\n" ) return None plugin_manager.register(plugin, name=plugin_name) return None def get_plugin_manager() -> pluggy.PluginManager: """Initializes the PluginManager. NOTE: We cache the plugin manager as a global to avoid reloading all the plugins each time. """ plugin_manager = _plugin_manager.get() if plugin_manager: return plugin_manager plugin_manager = pluggy.PluginManager(plugin_base_name) plugin_manager.add_hookspecs(PluginSpec) # NOTE: We set the plugin manager before loading the # entrypoints. This is because when we load the entry # points, this function gets called again - and we only # want to load the entry points once! _plugin_manager.set(plugin_manager) # Discover available plugins and load them individually. # If any fail, log the issue and carry on. for entry_point, plugin_name, plugin_version in _discover_plugins(): plugin_logger.info(f"Loading plugin {plugin_name} version {plugin_version}.") _load_plugin(plugin_manager, entry_point, plugin_name, plugin_version) # Once plugins are loaded we set a second context var # to indicate that loading is complete. Other parts of # the codebase can use this to detect whether it's safe. plugins_loaded.set(True) return plugin_manager def purge_plugin_manager() -> None: """Purge the current loaded plugin manager. NOTE: This method should not be used in normal SQFluff operation, but exists so that in the test suite we can reliably clear the cached plugin manager and force plugins to be reload. """ # Reset back to defaults. _plugin_manager.set(None) plugins_loaded.set(False) sqlfluff-3.4.2/src/sqlfluff/core/plugin/lib.py000066400000000000000000000023651503426445100213330ustar00rootroot00000000000000"""Base implementation for the plugin.""" from typing import Any from sqlfluff.core.config import load_config_resource from sqlfluff.core.plugin import hookimpl from sqlfluff.core.rules import BaseRule, ConfigInfo from sqlfluff.core.rules.config_info import STANDARD_CONFIG_INFO_DICT from sqlfluff.core.rules.loader import get_rules_from_path from sqlfluff.core.templaters import RawTemplater, core_templaters @hookimpl def get_rules() -> list[type[BaseRule]]: """Get plugin rules. NOTE: All standard rules will eventually be loaded as plugins and so before 2.0.0, once all legacy plugin definitions are migrated, this function will be amended to return no rules. """ return get_rules_from_path() @hookimpl def get_templaters() -> list[type[RawTemplater]]: """Get templaters.""" templaters = list(t for t in core_templaters()) return templaters @hookimpl def load_default_config() -> dict[str, Any]: """Loads the default configuration for the plugin.""" return load_config_resource( package="sqlfluff.core", file_name="default_config.cfg", ) @hookimpl def get_configs_info() -> dict[str, ConfigInfo]: """Get rule config validations and descriptions.""" return STANDARD_CONFIG_INFO_DICT sqlfluff-3.4.2/src/sqlfluff/core/rules/000077500000000000000000000000001503426445100200415ustar00rootroot00000000000000sqlfluff-3.4.2/src/sqlfluff/core/rules/__init__.py000066400000000000000000000025051503426445100221540ustar00rootroot00000000000000"""Configuration and examples for individual rules.""" from sqlfluff.core.plugin.host import get_plugin_manager from sqlfluff.core.rules.base import ( BaseRule, EvalResultType, LintResult, RuleGhost, RulePack, RuleSet, ) from sqlfluff.core.rules.config_info import ConfigInfo, get_config_info from sqlfluff.core.rules.context import RuleContext from sqlfluff.core.rules.fix import LintFix def _load_standard_rules() -> RuleSet: """Initialise the standard ruleset. We do this on each call so that dynamic rules changes are possible. """ std_rule_set = RuleSet(name="standard", config_info=get_config_info()) # Iterate through the rules list and register each rule with the standard set. for plugin_rules in get_plugin_manager().hook.get_rules(): for rule in plugin_rules: std_rule_set.register(rule) return std_rule_set def get_ruleset(name: str = "standard") -> RuleSet: """Get a ruleset by name.""" std_rules = _load_standard_rules() lookup = {std_rules.name: std_rules} # Return a copy in case someone modifies the register. return lookup[name].copy() __all__ = ( "get_ruleset", "RuleSet", "RulePack", "BaseRule", "LintResult", "LintFix", "RuleContext", "RuleGhost", "EvalResultType", "ConfigInfo", ) sqlfluff-3.4.2/src/sqlfluff/core/rules/base.py000066400000000000000000001410401503426445100213250ustar00rootroot00000000000000"""Implements the base rule class. Rules crawl through the trees returned by the parser and evaluate particular rules. The intent is that it should be possible for the rules to be expressed as simply as possible, with as much of the complexity abstracted away. The evaluation function should take enough arguments that it can evaluate the position of the given segment in relation to its neighbors, and that the segment which finally "triggers" the error, should be the one that would be corrected OR if the rule relates to something that is missing, then it should flag on the segment FOLLOWING, the place that the desired element is missing. """ import bdb import copy import fnmatch import logging import pathlib import re from collections import defaultdict, namedtuple from collections.abc import Iterator, Sequence from dataclasses import dataclass from typing import ( TYPE_CHECKING, Any, ClassVar, DefaultDict, Optional, Union, ) import regex from sqlfluff.core.errors import SQLFluffUserError, SQLLintError from sqlfluff.core.helpers.string import split_comma_separated_string from sqlfluff.core.parser import BaseSegment, RawSegment from sqlfluff.core.plugin.host import is_main_process, plugins_loaded from sqlfluff.core.rules.config_info import ConfigInfo, get_config_info from sqlfluff.core.rules.context import RuleContext from sqlfluff.core.rules.crawlers import BaseCrawler from sqlfluff.core.rules.fix import LintFix from sqlfluff.core.templaters.base import TemplatedFile # Best solution for generic types on older python versions # https://github.com/python/typeshed/issues/7855 if TYPE_CHECKING: # pragma: no cover from sqlfluff.core.config import FluffConfig from sqlfluff.core.dialects import Dialect from sqlfluff.core.plugin.hookspecs import PluginSpec from sqlfluff.core.rules.noqa import IgnoreMask _LoggerAdapter = logging.LoggerAdapter[logging.Logger] else: _LoggerAdapter = logging.LoggerAdapter # The ghost of a rule (mostly used for testing) RuleGhost = namedtuple("RuleGhost", ["code", "name", "description"]) # Instantiate the rules logger rules_logger = logging.getLogger("sqlfluff.rules") linter_logger: logging.Logger = logging.getLogger("sqlfluff.linter") class RuleLoggingAdapter(_LoggerAdapter): """A LoggingAdapter for rules which adds the code of the rule to it.""" def process(self, msg: str, kwargs: Any) -> tuple[str, Any]: """Add the code element to the logging message before emit.""" return "[{}] {}".format(self.extra["code"] if self.extra else "", msg), kwargs class LintResult: """A class to hold the results of a rule evaluation. Args: anchor (:obj:`BaseSegment`, optional): A segment which represents the *position* of the problem. NB: Each fix will also hold its own reference to position, so this position is mostly for alerting the user to where the *problem* is. fixes (:obj:`list` of :obj:`LintFix`, optional): An array of any fixes which would correct this issue. If not present then it's assumed that this issue will have to manually fixed. memory (:obj:`dict`, optional): An object which stores any working memory for the rule. The `memory` returned in any `LintResult` will be passed as an input to the next segment to be crawled. description (:obj:`str`, optional): A description of the problem identified as part of this result. This will override the description of the rule as what gets reported to the user with the problem if provided. source (:obj:`str`, optional): A string identifier for what generated the result. Within larger libraries like reflow this can be useful for tracking where a result came from. """ def __init__( self, anchor: Optional[BaseSegment] = None, fixes: Optional[list["LintFix"]] = None, memory: Optional[Any] = None, description: Optional[str] = None, source: Optional[str] = None, ): # An anchor of none, means no issue self.anchor = anchor # Fixes might be blank self.fixes = fixes or [] # Memory is passed back in the linting result self.memory = memory # store a description_override for later self.description = description # Optional code for where the result came from self.source: str = source or "" def __repr__(self) -> str: if not self.anchor: return "LintResult()" # The "F" at the end is short for "fixes", to indicate how many there are. fix_coda = f"+{len(self.fixes)}F" if self.fixes else "" if self.description: if self.source: return ( f"LintResult({self.description} [{self.source}]" f": {self.anchor}{fix_coda})" ) return f"LintResult({self.description}: {self.anchor}{fix_coda})" return f"LintResult({self.anchor}{fix_coda})" def to_linting_error(self, rule: "BaseRule") -> Optional[SQLLintError]: """Convert a linting result to a :exc:`SQLLintError` if appropriate.""" if self.anchor: # Allow description override from the LintResult description = self.description or rule.description return SQLLintError( rule=rule, segment=self.anchor, fixes=self.fixes, description=description, ) return None EvalResultType = Union[LintResult, list[LintResult], None] class RuleMetaclass(type): """The metaclass for rules. This metaclass provides provides auto-enrichment of the rule docstring so that examples, groups, aliases and names are added. The reason we enrich the docstring is so that it can be picked up by autodoc and all be displayed in the sqlfluff docs. """ # Precompile the regular expressions _doc_search_regex: ClassVar = re.compile( "(\\s{4}\\*\\*Anti-pattern\\*\\*|\\s{4}\\.\\. note::|" "\\s\\s{4}\\*\\*Configuration\\*\\*)", flags=re.MULTILINE, ) _valid_classname_regex: ClassVar = regex.compile( r"Rule_?([A-Z]{1}[a-zA-Z]+)?_([A-Z0-9]{4})" ) _valid_rule_name_regex: ClassVar = regex.compile(r"[a-z][a-z\.\_]+") @staticmethod def _populate_code_and_description( name: str, class_dict: dict[str, Any] ) -> dict[str, Any]: """Extract and validate the rule code & description. We expect that rules are defined as classes with the name `Rule_XXXX` where `XXXX` is of the form `LLNN`, where L is a letter and N is a two digit number. For backward compatibility we also still support the legacy format of LNNN i.e. a single letter and three digit number. The two letters should be indicative of the grouping and focus of the rule. e.g. capitalisation rules have the code CP for CaPitalisation. If this receives classes by any other name, then it will raise a :exc:`ValueError`. """ rule_name_match = RuleMetaclass._valid_classname_regex.match(name) # Validate the name if not rule_name_match: # pragma: no cover raise SQLFluffUserError( f"Tried to define rule class with " f"unexpected format: {name}. Format should be: " "'Rule_PluginName_LL23' (for plugins) or " "`Rule_LL23` (for core rules)." ) plugin_name, code = rule_name_match.groups() # If the docstring is multiline, then we extract just summary. description = class_dict["__doc__"].replace("``", "'").split("\n")[0] if plugin_name: code = f"{plugin_name}_{code}" class_dict["code"] = code class_dict["description"] = description return class_dict @staticmethod def _populate_docstring(name: str, class_dict: dict[str, Any]) -> dict[str, Any]: """Enrich the docstring in the class_dict. This takes the various defined values in the BaseRule class and uses them to populate documentation in the final class docstring so that it can be displayed in the sphinx docs. """ # Ensure that there _is_ a docstring. assert ( "__doc__" in class_dict ), f"Tried to define rule {name!r} without docstring." # Build up a buffer of entries to add to the docstring. fix_docs = ( " This rule is ``sqlfluff fix`` compatible.\n\n" if class_dict.get("is_fix_compatible", False) else "" ) name_docs = ( f" **Name**: ``{class_dict['name']}``\n\n" if class_dict.get("name", "") else "" ) alias_docs = ( (" **Aliases**: ``" + "``, ``".join(class_dict["aliases"]) + "``\n\n") if class_dict.get("aliases", []) else "" ) groups_docs = ( (" **Groups**: ``" + "``, ``".join(class_dict["groups"]) + "``\n\n") if class_dict.get("groups", []) else "" ) config_docs = "" # NOTE: We should only validate and add config keywords # into the docstring if the plugin loading methods have # fully completed (i.e. plugins_loaded.get() is True). if name == "BaseRule" or not is_main_process.get(): # Except if it's the base rule, or we're not in the main process/thread # in which case we shouldn't try and alter the docstrings anyway. # NOTE: The order of imports within child threads/processes is less # controllable, and so we should just avoid checking whether plugins # are already loaded. pass elif not plugins_loaded.get(): # Show a warning if a plugin has their imports set up in a suboptimal # way. The example plugin imports the rules in both ways, to test the # triggering of this warning. rules_logger.warning( f"Rule {name!r} has been imported before all plugins " "have been fully loaded. For best performance, plugins " "should import any rule definitions within their `get_rules()` " "method. Please update your plugin to remove this warning. See: " "https://docs.sqlfluff.com/en/stable/perma/plugin_dev.html" ) elif class_dict.get("config_keywords", []): config_docs = "\n **Configuration**\n" config_info = get_config_info() for keyword in sorted(class_dict["config_keywords"]): try: info_dict = config_info[keyword] except KeyError: # pragma: no cover # NOTE: For rule developers, please define config info values # within the specific rule bundle rather than in the central # `config_info` package unless the value is necessary for # multiple rules. raise KeyError( "Config value {!r} for rule {} is not configured in " "`config_info`.".format(keyword, name) ) config_docs += "\n * ``{}``: {}".format( keyword, info_dict["definition"] ) if ( config_docs[-1] != "." and config_docs[-1] != "?" and config_docs[-1] != "\n" ): config_docs += "." if "validation" in info_dict: config_docs += " Must be one of ``{}``.".format( info_dict["validation"] ) config_docs += "\n" all_docs = fix_docs + name_docs + alias_docs + groups_docs + config_docs # Modify the docstring using the search regex. class_dict["__doc__"] = RuleMetaclass._doc_search_regex.sub( f"\n\n{all_docs}\n\n\\1", class_dict["__doc__"], count=1 ) # If the inserted string is not now in the docstring - append it on # the end. This just means the regex didn't find a better place to # put it. if all_docs not in class_dict["__doc__"]: class_dict["__doc__"] += f"\n\n{all_docs}" # Return the modified class_dict return class_dict def __new__( mcs, name: str, bases: list["BaseRule"], class_dict: dict[str, Any], ) -> "RuleMetaclass": """Generate a new class.""" # Optionally, groups may be inherited. At this stage of initialisation # they won't have been. Check parent classes if they exist. # names, aliases and description are less appropriate to inherit. # NOTE: This applies in particular to CP02, which inherits all groups # from CP01. If we don't do this, those groups don't show in the docs. for base in reversed(bases): if "groups" in class_dict: break elif base.groups: class_dict["groups"] = base.groups break # If the rule doesn't itself define `config_keywords`, check the parent # classes for them. If we don't do this then they'll still be available to # the rule, but they won't appear in the docs. for base in reversed(bases): if "config_keywords" in class_dict: break elif base.config_keywords: class_dict["config_keywords"] = base.config_keywords break class_dict = RuleMetaclass._populate_docstring(name, class_dict) # Don't try and infer code and description for the base classes if name not in ("BaseRule",): class_dict = RuleMetaclass._populate_code_and_description(name, class_dict) # Validate rule names rule_name = class_dict.get("name", "") if rule_name: if not RuleMetaclass._valid_rule_name_regex.match(rule_name): raise SQLFluffUserError( f"Tried to define rule with unexpected " f"name format: {rule_name}. Rule names should be lowercase " "and snake_case with optional `.` characters to indicate " "a namespace or grouping. e.g. `layout.spacing`." ) # Use the stock __new__ method now we've adjusted the docstring. # There are no overload variants of type.__new__ that are compatible, so # we ignore type checking in this case. return super().__new__(mcs, name, bases, class_dict) # type: ignore class BaseRule(metaclass=RuleMetaclass): """The base class for a rule. Args: code (:obj:`str`): The identifier for this rule, used in inclusion or exclusion. description (:obj:`str`): A human readable description of what this rule does. It will be displayed when any violations are found. """ _check_docstring = True _works_on_unparsable = True _adjust_anchors = False targets_templated = False # Some fix routines do their own checking for whether their fixes # are safe around templated elements. For those - the default # safety checks might be inappropriate. In those cases, set # template_safe_fixes to True. template_safe_fixes = False # Config settings supported for this rule. # See config_info.py for supported values. config_keywords: list[str] = [] # Lint loop / crawl behavior. When appropriate, rules can (and should) # override these values to make linting faster. crawl_behaviour: BaseCrawler # Rules can override this to specify "post". "Post" rules are those that are # not expected to trigger any downstream rules, e.g. capitalization fixes. # They run on two occasions: # - On the first pass of the main phase # - In a second linter pass after the main phase lint_phase = "main" # Groups attribute to be overwritten. groups: tuple[str, ...] = () # Name attribute to be overwritten. # NOTE: for backward compatibility we should handle the case # where no name is set gracefully. name: str = "" # Optional set of aliases for the rule. Most often used for old codes which # referred to this rule. aliases: tuple[str, ...] = () # NOTE: code and description are provided here as hints, but should not # be set directly. They are set automatically by the metaclass based on # the class _name_ when defined. code: str description: str # Should we document this rule as fixable? Used by the metaclass to add # a line to the docstring. is_fix_compatible = False # Add comma separated string to Base Rule to ensure that it uses the same # Configuration that is defined in the Config.py file split_comma_separated_string = staticmethod(split_comma_separated_string) def __init__(self, code: str, description: str, **kwargs: Any) -> None: self.description = description self.code = code # kwargs represents the config passed to the rule. Add all kwargs as class # attributes so they can be accessed in rules which inherit from this class for key, value in kwargs.items(): self.__dict__[key] = value # We also define a custom logger here, which also includes the code # of the rule in the logging. self.logger = RuleLoggingAdapter(rules_logger, {"code": code}) # Validate that declared configuration options exist for keyword in self.config_keywords: if keyword not in kwargs.keys(): raise ValueError( ( "Unrecognized config '{}' for Rule {}. If this " "is a new option, please add it to " "`default_config.cfg` or plugin specific config." ).format(keyword, code) ) @classmethod def get_config_ref(cls) -> str: """Return the config lookup ref for this rule. If a `name` is defined, it's the name - otherwise the code. The name is a much more understandable reference and so makes config files more readable. For backward compatibility however we also support the rule code for those without names. """ return cls.name if cls.name else cls.code def _eval(self, context: RuleContext) -> EvalResultType: """Evaluate this rule against the current context. This should indicate whether a linting violation has occurred and/or whether there is something to remember from this evaluation. Note that an evaluate function should always accept `**kwargs`, but if it relies on any available kwargs, it should explicitly call them out at definition. Returns: :obj:`LintResult`, list of :obj:`LintResult` or :obj:`None`. The reason that this method is called :meth:`_eval` and not `eval` is a bit of a hack with sphinx autodoc, to make it so that the rule documentation auto-generates nicely. """ raise NotImplementedError( ( "{} has not had its `eval` function defined. This is a problem " "with the rule setup." ).format(self.__class__.__name__) ) # pragma: no cover def crawl( self, tree: BaseSegment, dialect: "Dialect", fix: bool, templated_file: Optional["TemplatedFile"], ignore_mask: Optional["IgnoreMask"], fname: Optional[str], config: "FluffConfig", ) -> tuple[ list[SQLLintError], tuple[RawSegment, ...], list[LintFix], Optional[dict[str, Any]], ]: """Run the rule on a given tree. Returns: A tuple of (vs, raw_stack, fixes, memory) """ root_context = RuleContext( dialect=dialect, fix=fix, templated_file=templated_file, path=pathlib.Path(fname) if fname else None, segment=tree, config=config, ) vs: list[SQLLintError] = [] fixes: list[LintFix] = [] # Propagates memory from one rule _eval() to the next. memory = root_context.memory context = root_context for context in self.crawl_behaviour.crawl(root_context): try: context.memory = memory res = self._eval(context=context) except (bdb.BdbQuit, KeyboardInterrupt): # pragma: no cover raise # Any exception at this point would halt the linter and # cause the user to get no results except Exception as e: # If a filename is present, include it in the critical exception. self.logger.critical( ( f"Applying rule {self.code} to {fname!r} " f"threw an Exception: {e}" if fname else f"Applying rule {self.code} threw an Exception: {e}" ), exc_info=True, ) assert context.segment.pos_marker exception_line, _ = context.segment.pos_marker.source_position() self._log_critical_errors(e) vs.append( SQLLintError( rule=self, segment=context.segment, fixes=[], description=( f"Unexpected exception: {str(e)};\n" "Could you open an issue at " "https://github.com/sqlfluff/sqlfluff/issues ?\n" "You can ignore this exception for now, by adding " f"'-- noqa: {self.code}' at the end\n" f"of line {exception_line}\n" ), ) ) return vs, context.raw_stack, fixes, context.memory new_lerrs: list[SQLLintError] = [] new_fixes: list[LintFix] = [] if res is None or res == []: # Assume this means no problems (also means no memory) pass elif isinstance(res, LintResult): # Extract any memory memory = res.memory self._adjust_anchors_for_fixes(context, res) self._process_lint_result( res, templated_file, ignore_mask, new_lerrs, new_fixes, tree ) elif isinstance(res, list) and all( isinstance(elem, LintResult) for elem in res ): # Extract any memory from the *last* one, assuming # it was the last to be added memory = res[-1].memory for elem in res: self._adjust_anchors_for_fixes(context, elem) self._process_lint_result( elem, templated_file, ignore_mask, new_lerrs, new_fixes, tree ) else: # pragma: no cover raise TypeError( "Got unexpected result [{!r}] back from linting rule: {!r}".format( res, self.code ) ) for lerr in new_lerrs: self.logger.info("!! Violation Found: %r", lerr.description) if new_fixes: if not self.is_fix_compatible: # pragma: no cover rules_logger.error( f"Rule {self.code} returned a fix but is not documented as " "`is_fix_compatible`, you may encounter unusual fixing " "behaviour. Report this a bug to the developer of this rule." ) for lfix in new_fixes: self.logger.info("!! Fix Proposed: %r", lfix) # Consume the new results vs += new_lerrs fixes += new_fixes return vs, context.raw_stack if context else tuple(), fixes, context.memory # HELPER METHODS -------- @staticmethod def _log_critical_errors(error: Exception) -> None: # pragma: no cover """This method is monkey patched into a "raise" for certain tests.""" pass def _process_lint_result( self, res: LintResult, templated_file: Optional[TemplatedFile], ignore_mask: Optional["IgnoreMask"], new_lerrs: list[SQLLintError], new_fixes: list[LintFix], root: BaseSegment, ) -> None: # Unless the rule declares that it's already template safe. Do safety # checks. if not self.template_safe_fixes: self.discard_unsafe_fixes(res, templated_file) lerr = res.to_linting_error(rule=self) if not lerr: return None if ignore_mask: if not ignore_mask.ignore_masked_violations([lerr]): return None # Check whether this should be filtered out for being unparsable. # To do that we check the parents of the anchors (of the violation # and fixes) against the filter in the crawler. # NOTE: We use `.passes_filter` here to do the test for unparsable # to avoid duplicating code because that test is already implemented # there. anchors = [lerr.segment] + [fix.anchor for fix in lerr.fixes] for anchor in anchors: if not self.crawl_behaviour.passes_filter(anchor): # pragma: no cover # NOTE: This clause is untested, because it's a hard to produce # edge case. The latter clause is much more likely. linter_logger.info( "Fix skipped due to anchor not passing filter: %s", anchor ) return None parent_stack = root.path_to(anchor) if not all( self.crawl_behaviour.passes_filter(ps.segment) for ps in parent_stack ): linter_logger.info( "Fix skipped due to parent of anchor not passing filter: %s", [ps.segment for ps in parent_stack], ) return None new_lerrs.append(lerr) new_fixes.extend(res.fixes) @staticmethod def filter_meta( segments: Sequence[BaseSegment], keep_meta: bool = False ) -> tuple[BaseSegment, ...]: """Filter the segments to non-meta. Or optionally the opposite if keep_meta is True. """ buff = [] for elem in segments: if elem.is_meta is keep_meta: buff.append(elem) return tuple(buff) @classmethod def get_parent_of( cls, segment: BaseSegment, root_segment: BaseSegment ) -> Optional[BaseSegment]: # pragma: no cover TODO? """Return the segment immediately containing segment. NB: This is recursive. Args: segment: The segment to look for. root_segment: Some known parent of the segment we're looking for (although likely not the direct parent in question). """ if segment in root_segment.segments: return root_segment elif root_segment.segments: # try each of the subsegments for sub in root_segment.segments: p = cls.get_parent_of(segment, sub) if p: return p # Not directly in the segment and # no subsegments to check. Return None. return None @staticmethod def discard_unsafe_fixes( lint_result: LintResult, templated_file: Optional[TemplatedFile] ) -> None: """Remove (discard) LintResult fixes if they are "unsafe". By removing its fixes, a LintResult will still be reported, but it will be treated as _unfixable_. """ if not lint_result.fixes or not templated_file: return # Check for fixes that touch templated code. for fix in lint_result.fixes: if fix.has_template_conflicts(templated_file): linter_logger.info( " * Discarding fixes that touch templated code: %s", lint_result.fixes, ) lint_result.fixes = [] return # Issue 3079: Fixes that span multiple template blocks are bad. Don't # permit them. block_indices: set[int] = set() for fix in lint_result.fixes: fix_slices = fix.get_fix_slices(templated_file, within_only=True) for fix_slice in fix_slices: # Ignore fix slices that exist only in the source. For purposes # of this check, it's not meaningful to say that a fix "touched" # one of these. if not fix_slice.is_source_only_slice(): block_indices.add(fix_slice.block_idx) if len(block_indices) > 1: linter_logger.info( " * Discarding fixes that span multiple template blocks: %s", lint_result.fixes, ) lint_result.fixes = [] return @classmethod def _adjust_anchors_for_fixes( cls, context: RuleContext, lint_result: LintResult ) -> None: """Makes simple fixes to the anchor position for fixes. Some rules return fixes where the anchor is too low in the tree. These are most often rules like LT02 and LT05 that make whitespace changes without a "deep" understanding of the parse structure. This function attempts to correct those issues automatically. It may not be perfect, but it should be an improvement over the old behaviour, where rules like LT02 often corrupted the parse tree, placing spaces in weird places that caused issues with other rules. For more context, see issue #1304. """ if not cls._adjust_anchors: return for fix in lint_result.fixes: if fix.anchor: fix.anchor = cls._choose_anchor_segment( # If no parent stack, that means the segment itself is the root ( context.parent_stack[0] if context.parent_stack else context.segment ), fix.edit_type, fix.anchor, ) @staticmethod def _choose_anchor_segment( root_segment: BaseSegment, edit_type: str, segment: BaseSegment, filter_meta: bool = False, ) -> BaseSegment: """Choose the anchor point for a lint fix, i.e. where to apply the fix. From a grammar perspective, segments near the leaf of the tree are generally less likely to allow general edits such as whitespace insertion. This function avoids such issues by taking a proposed anchor point (assumed to be near the leaf of the tree) and walking "up" the parse tree as long as the ancestor segments have the same start or end point (depending on the edit type) as "segment". This newly chosen anchor is more likely to be a valid anchor point for the fix. """ if edit_type not in ("create_before", "create_after"): return segment anchor: BaseSegment = segment child: BaseSegment = segment path: Optional[list[BaseSegment]] = ( [ps.segment for ps in root_segment.path_to(segment)] if root_segment else None ) assert path, f"No path found from {root_segment} to {segment}!" for seg in path[::-1]: # If the segment allows non code ends, then no problem. # We're done. This is usually the outer file segment. if seg.can_start_end_non_code: linter_logger.debug( "Stopping hoist at %s, as allows non code ends.", seg ) break # Which lists of children to check against. children_lists: list[list[BaseSegment]] = [] if filter_meta: # Optionally check against filtered (non-meta only) children. children_lists.append( [child for child in seg.segments if not child.is_meta] ) # Always check against the full set of children. children_lists.append(list(seg.segments)) children: list[BaseSegment] for children in children_lists: if edit_type == "create_before" and children[0] is child: linter_logger.debug( "Hoisting anchor from before %s to %s", anchor, seg ) anchor = seg assert anchor.raw.startswith(segment.raw) child = seg break elif edit_type == "create_after" and children[-1] is child: linter_logger.debug( "Hoisting anchor from after %s to %s", anchor, seg ) anchor = seg assert anchor.raw.endswith(segment.raw) child = seg break return anchor @dataclass(frozen=True) class RuleManifest: """Element in the rule register.""" code: str name: str description: str groups: tuple[str, ...] aliases: tuple[str, ...] rule_class: type[BaseRule] @dataclass class RulePack: """A bundle of rules to be applied. This contains a set of rules, post filtering but also contains the mapping required to interpret any noqa messages found in files. The reason for this object is that rules are filtered and instantiated into this pack in the main process when running in multi-processing mode so that user defined rules can be used without reference issues. Attributes: rules (:obj:`list` of :obj:`BaseRule`): A filtered list of instantiated rules to be applied to a given file. reference_map (:obj:`dict`): A mapping of rule references to the codes they refer to, e.g. `{"my_ref": {"LT01", "LT02"}}`. The references (i.e. the keys) may be codes, groups, aliases or names. The values of the mapping are sets of rule codes *only*. This object acts as a lookup to be able to translate selectors (which may contain diverse references) into a consolidated list of rule codes. This mapping contains the full set of rules, rather than just the filtered set present in the `rules` attribute. """ rules: list[BaseRule] reference_map: dict[str, set[str]] def codes(self) -> Iterator[str]: """Returns an iterator through the codes contained in the pack.""" return (r.code for r in self.rules) class RuleSet: """Class to define a ruleset. A rule set is instantiated on module load, but the references to each of its classes are instantiated at runtime. This means that configuration values can be passed to those rules live and be responsive to any changes in configuration from the path that the file is in. Rules should be fetched using the :meth:`get_rulelist` command which also handles any filtering (i.e. allowlisting and denylisting). New rules should be added to the instance of this class using the :meth:`register` decorator. That decorator registers the class, but also performs basic type and name-convention checks. The code for the rule will be parsed from the name, the description from the docstring. The eval function is assumed that it will be overridden by the subclass, and the parent class raises an error on this function if not overridden. """ def __init__(self, name: str, config_info: dict[str, ConfigInfo]) -> None: self.name = name self.config_info = config_info self._register: dict[str, RuleManifest] = {} def _validate_config_options( self, config: "FluffConfig", rule_ref: Optional[str] = None ) -> None: """Ensure that all config options are valid. Config options can also be checked for a specific rule e.g CP01. """ rule_config = config.get_section("rules") for config_name, info_dict in self.config_info.items(): config_option = ( rule_config.get(config_name) if not rule_ref else rule_config.get(rule_ref).get(config_name) ) valid_options = info_dict.get("validation") if ( valid_options and config_option not in valid_options and config_option is not None ): raise ValueError( ( "Invalid option '{}' for {} configuration. Must be one of {}" ).format( config_option, config_name, valid_options, ) ) def register( self, cls: type[BaseRule], plugin: Optional["PluginSpec"] = None ) -> type[BaseRule]: """Decorate a class with this to add it to the ruleset. .. code-block:: python @myruleset.register class Rule_LT01(BaseRule): "Description of rule." def eval(self, **kwargs): return LintResult() We expect that rules are defined as classes with the name `Rule_XXXX` where `XXXX` is of the form `LNNN`, where L is a letter (literally L for *linting* by default) and N is a three digit number. If this receives classes by any other name, then it will raise a :exc:`ValueError`. """ code = cls.code # Check for code collisions. if code in self._register: # pragma: no cover raise ValueError( "Rule {!r} has already been registered on RuleSet {!r}!".format( code, self.name ) ) assert "all" in cls.groups, "Rule {!r} must belong to the 'all' group".format( code ) self._register[code] = RuleManifest( code=code, name=cls.name, description=cls.description, groups=cls.groups, aliases=cls.aliases, rule_class=cls, ) # Make sure we actually return the original class return cls def _expand_rule_refs( self, glob_list: list[str], reference_map: dict[str, set[str]] ) -> set[str]: """Expand a list of rule references into a list of rule codes. Returns: :obj:`set` of :obj:`str` rule codes. """ expanded_rule_set: set[str] = set() for r in glob_list: # Is it a direct reference? if r in reference_map: expanded_rule_set.update(reference_map[r]) # Otherwise treat as a glob expression on all references. # NOTE: We expand _all_ references (i.e. groups, aliases, names # AND codes) so that we preserve the most backward compatibility # with existing references to legacy codes in config files. else: matched_refs = fnmatch.filter(reference_map.keys(), r) for matched in matched_refs: expanded_rule_set.update(reference_map[matched]) return expanded_rule_set def rule_reference_map(self) -> dict[str, set[str]]: """Generate a rule reference map for looking up rules. Generate the master reference map. The priority order is: codes > names > groups > aliases (i.e. if there's a collision between a name and an alias - we assume the alias is wrong) """ valid_codes: set[str] = set(self._register.keys()) reference_map: dict[str, set[str]] = {code: {code} for code in valid_codes} # Generate name map. name_map: dict[str, set[str]] = { manifest.name: {manifest.code} for manifest in self._register.values() if manifest.name } # Check collisions. name_collisions = set(name_map.keys()) & valid_codes if name_collisions: # pragma: no cover # NOTE: This clause is untested, because it's quite hard to actually # have a valid name which replicates a valid code. The name validation # will probably catch it first. rules_logger.warning( "The following defined rule names were found which collide " "with codes. Those names will not be available for selection: %s", name_collisions, ) # Incorporate (with existing references taking precedence). reference_map = {**name_map, **reference_map} # Generate the group map. group_map: DefaultDict[str, set[str]] = defaultdict(set) for manifest in self._register.values(): for group in manifest.groups: if group in reference_map: rules_logger.warning( "Rule %s defines group %r which is already defined as a " "name or code of %s. This group will not be available " "for use as a result of this collision.", manifest.code, group, reference_map[group], ) else: group_map[group].add(manifest.code) # Incorporate after all checks are done. reference_map = {**group_map, **reference_map} # Generate the alias map. alias_map: DefaultDict[str, set[str]] = defaultdict(set) for manifest in self._register.values(): for alias in manifest.aliases: if alias in reference_map: rules_logger.warning( "Rule %s defines alias %r which is already defined as a " "name, code or group of %s. This alias will " "not be available for use as a result of this collision.", manifest.code, alias, reference_map[alias], ) else: alias_map[alias].add(manifest.code) # Incorporate after all checks are done. return {**alias_map, **reference_map} def get_rulepack(self, config: "FluffConfig") -> RulePack: """Use the config to return the appropriate rules. We use the config both for allowlisting and denylisting, but also for configuring the rules given the given config. """ # Validate all generic rule configs self._validate_config_options(config) # Fetch config section: rules_config = config.get_section("rules") # Generate the master reference map. The priority order is: # codes > names > groups > aliases # (i.e. if there's a collision between a name and an # alias - we assume the alias is wrong.) valid_codes: set[str] = set(self._register.keys()) reference_map = self.rule_reference_map() valid_config_lookups = { manifest.rule_class.get_config_ref() for manifest in self._register.values() } # Validate config doesn't try to specify values for unknown rules. # NOTE: We _warn_ here rather than error. for unexpected_ref in [ # Filtering to dicts gives us the sections. k for k, v in rules_config.items() if isinstance(v, dict) # Only keeping ones we don't expect if k not in valid_config_lookups ]: rules_logger.warning( "Rule configuration contain a section for unexpected " f"rule {unexpected_ref!r}. These values will be ignored." ) # For convenience (and migration), if we do find a potential match # for the reference - add that as a warning. # NOTE: We don't actually accept config in these cases, even though # we could potentially match - because how to resolve _multiple_ # matching config sections is ambiguous. if unexpected_ref in reference_map: referenced_codes = reference_map[unexpected_ref] if len(referenced_codes) == 1: referenced_code = list(referenced_codes)[0] referenced_name = self._register[referenced_code].name config_ref = self._register[ referenced_code ].rule_class.get_config_ref() rules_logger.warning( "The reference was however found as a match for rule " f"{referenced_code} with name {referenced_name!r}. " "SQLFluff assumes configuration for this rule will " f"be specified in 'sqlfluff:rules:{config_ref}'." ) elif referenced_codes: rules_logger.warning( "The reference was found as a match for multiple rules: " f"{referenced_codes}. Config should be specified by the " "name of the relevant rule e.g. " "'sqlfluff:rules:capitalisation.keywords'." ) # The lists here are lists of references, which might be codes, # names, aliases or groups. # We default the allowlist to all the rules if not set (i.e. not specifying # any rules, just means "all the rules"). allowlist = config.get("rule_allowlist") or list(valid_codes) denylist = config.get("rule_denylist") or [] allowlisted_unknown_rule_codes = [ r for r in allowlist # Add valid groups to the register when searching for invalid rules _only_ if not fnmatch.filter(reference_map.keys(), r) ] if any(allowlisted_unknown_rule_codes): rules_logger.warning( "Tried to allowlist unknown rule references: {!r}".format( allowlisted_unknown_rule_codes ) ) denylisted_unknown_rule_codes = [ r for r in denylist if not fnmatch.filter(reference_map.keys(), r) ] if any(denylisted_unknown_rule_codes): # pragma: no cover rules_logger.warning( "Tried to denylist unknown rules references: {!r}".format( denylisted_unknown_rule_codes ) ) keylist = sorted(self._register.keys()) # First we expand the allowlist and denylist globs expanded_allowlist = self._expand_rule_refs(allowlist, reference_map) expanded_denylist = self._expand_rule_refs(denylist, reference_map) # Then we filter the rules keylist = [ r for r in keylist if r in expanded_allowlist and r not in expanded_denylist ] # Construct the kwargs for each rule and instantiate in turn. instantiated_rules = [] # Keep only config which isn't a section (for specific rule) (i.e. isn't a dict) # We'll handle those directly in the specific rule config section below. generic_rule_config = { k: v for k, v in rules_config.items() if not isinstance(v, dict) } for code in keylist: kwargs = {} rule_class = self._register[code].rule_class # Fetch the lookup code for the rule. rule_config_ref = rule_class.get_config_ref() specific_rule_config = config.get_section(("rules", rule_config_ref)) if generic_rule_config: kwargs.update(generic_rule_config) if specific_rule_config: # Validate specific rule config before adding self._validate_config_options(config, rule_config_ref) kwargs.update(specific_rule_config) kwargs["code"] = code # Allow variable substitution in making the description kwargs["description"] = self._register[code].description.format(**kwargs) # Instantiate when ready instantiated_rules.append(rule_class(**kwargs)) return RulePack(instantiated_rules, reference_map) def copy(self) -> "RuleSet": """Return a copy of self with a separate register.""" new_ruleset = copy.copy(self) new_ruleset._register = self._register.copy() return new_ruleset sqlfluff-3.4.2/src/sqlfluff/core/rules/config_info.py000066400000000000000000000071171503426445100227010ustar00rootroot00000000000000"""Documenting and validating rule configuration. Provide a mapping with default configuration options, which are common to multiple rules with information on valid inputs and definitions. This mapping is used to validate rule config inputs, as well as document rule configuration. It is assumed that most rule bundles will define their own additional sets of these which should be defined within that bundle rather than here. Unless your config value is used across multiple bundles, or is of more general wider use - please define it in the specific plugin rather than here. """ from typing import Optional, TypedDict, Union from sqlfluff.core.plugin.host import get_plugin_manager class ConfigInfo(TypedDict, total=False): """Type definition for a single config info value. This TypedDict defines the structure for configuration information used across SQLFluff rules. Each config value must have a definition, and may optionally include validation criteria. Args: definition: Required string containing a detailed description of the config option and its purpose. This should be clear enough for users to understand when and how to use the config. validation: Optional list or range of valid values for the config option. Can contain boolean, string, or integer values. If not provided, the config option accepts any value of its expected type. """ definition: str # NOTE: This type hint is a bit ugly, but necessary for now. # TODO: Tidy this up when we drop support for 3.9. validation: Optional[Union[list[Union[bool, str, int]], range]] STANDARD_CONFIG_INFO_DICT: dict[str, ConfigInfo] = { "force_enable": { "validation": [True, False], "definition": ( "Run this rule even for dialects where this rule is disabled by default." ), }, "ignore_words": { "definition": ("Comma separated list of words to ignore from rule"), }, "ignore_words_regex": { "definition": ( "Words to ignore from rule if they are a partial match for the regular " "expression. To ignore only full matches you can use ``^`` (beginning " "of text) and ``$`` (end of text). Due to regular expression operator " "precedence, it is good practice to use parentheses around everything " "between ``^`` and ``$``." ), }, "blocked_words": { "definition": ( "Optional, comma-separated list of blocked words which should not be used " "in statements." ), }, "blocked_regex": { "definition": ( "Optional, regex of blocked pattern which should not be used in statements." ), }, "match_source": { "definition": ( "Optional, also match regex of blocked pattern before applying templating" ), }, "case_sensitive": { "validation": [True, False], "definition": ( "If ``False``, comparison is done case in-sensitively. " "Defaults to ``True``." ), }, } def get_config_info() -> dict[str, ConfigInfo]: """Get the config from core sqlfluff and sqlfluff plugins and merges them. NOTE: This should be the entry point into getting config info rather than importing the default set above, as many values are defined only in rule packages. """ plugin_manager = get_plugin_manager() configs_info = plugin_manager.hook.get_configs_info() return { k: v for config_info_dict in configs_info for k, v in config_info_dict.items() } sqlfluff-3.4.2/src/sqlfluff/core/rules/context.py000066400000000000000000000033141503426445100221000ustar00rootroot00000000000000"""Define RuleContext class.""" import pathlib from dataclasses import dataclass, field from typing import Any, Optional from sqlfluff.core.config import FluffConfig from sqlfluff.core.dialects import Dialect from sqlfluff.core.parser import BaseSegment, RawSegment from sqlfluff.core.templaters.base import TemplatedFile @dataclass class RuleContext: """Class for holding the context passed to rule eval functions.""" # These don't change within a file. dialect: Dialect fix: bool templated_file: Optional[TemplatedFile] path: Optional[pathlib.Path] config: FluffConfig # These change within a file. # segment: The segment in question segment: BaseSegment # parent_stack: A tuple of the path from the root to this segment. parent_stack: tuple[BaseSegment, ...] = field(default=tuple()) # raw_stack: All of the raw segments so far in the file raw_stack: tuple[RawSegment, ...] = field(default=tuple()) # memory: Arbitrary storage for the rule memory: Any = field(default_factory=dict) # segment_idx: The index of this segment in the parent segment_idx: int = field(default=0) @property def siblings_pre(self) -> tuple[BaseSegment, ...]: # pragma: no cover """Return sibling segments prior to self.segment.""" if self.parent_stack: return self.parent_stack[-1].segments[: self.segment_idx] else: return tuple() @property def siblings_post(self) -> tuple[BaseSegment, ...]: """Return sibling segments after self.segment.""" if self.parent_stack: return self.parent_stack[-1].segments[self.segment_idx + 1 :] else: return tuple() # pragma: no cover sqlfluff-3.4.2/src/sqlfluff/core/rules/crawlers.py000066400000000000000000000134071503426445100222420ustar00rootroot00000000000000"""Definitions of crawlers.""" from abc import ABC, abstractmethod from collections.abc import Iterator from typing import Any, cast from sqlfluff.core.parser.segments.base import BaseSegment from sqlfluff.core.parser.segments.raw import RawSegment from sqlfluff.core.rules.context import RuleContext class BaseCrawler(ABC): """The base interface for crawler classes.""" def __init__(self, works_on_unparsable: bool = False, **kwargs: Any) -> None: self.works_on_unparsable = works_on_unparsable def passes_filter(self, segment: BaseSegment) -> bool: """Returns true if this segment considered at all. This method is called during crawling but also in evaluating the anchors for linting violations and their fixes to make sure we don't get issues with linting sections of queries that we can't parse. See `BaseRule._process_lint_result()`. """ return self.works_on_unparsable or not segment.is_type("unparsable") @abstractmethod def crawl(self, context: RuleContext) -> Iterator[RuleContext]: """Yields a RuleContext for each segment the rule should process.""" class RootOnlyCrawler(BaseCrawler): """A crawler that doesn't crawl. This just yields one context on the root-level (topmost) segment of the file. """ def crawl(self, context: RuleContext) -> Iterator[RuleContext]: """Yields a RuleContext for each segment the rule should process.""" if self.passes_filter(context.segment): yield context class SegmentSeekerCrawler(BaseCrawler): """A crawler that efficiently searches for specific segment types. The segment type(s) are specified on creation. """ def __init__( self, types: set[str], provide_raw_stack: bool = False, allow_recurse: bool = True, **kwargs: Any, ) -> None: self.types = types # Tracking a raw stack involves a lot of tuple manipulation, so we # only do it when required - otherwise we skip it. Rules can explicitly # request it when defining their crawler. self.provide_raw_stack = provide_raw_stack # If allow_recurse is false, then once a segment matches, none of it's # children will be returned. This is useful in cases where we might have # many start points, but one root segment will check any matching sub- # segments in the same evaluation. self.allow_recurse = allow_recurse super().__init__(**kwargs) def is_self_match(self, segment: BaseSegment) -> bool: """Does this segment match the relevant criteria.""" return segment.is_type(*self.types) def crawl(self, context: RuleContext) -> Iterator[RuleContext]: """Yields a RuleContext for each segment the rule should process. We assume that segments are yielded by their parent. """ # Check whether we should consider this segment _or it's children_ # at all. self_match = False if not self.passes_filter(context.segment): if self.provide_raw_stack: # pragma: no cover context.raw_stack += tuple(context.segment.raw_segments) return # Then check the segment itself, yield if it's a match. if self.is_self_match(context.segment): self_match = True yield context # Check whether any children? # Abort if not - we've already yielded self. # NOTE: This same clause also works if we did match but aren't # allowed to recurse. if not context.segment.segments or (self_match and not self.allow_recurse): # Add self to raw stack first if so. if self.provide_raw_stack: context.raw_stack += (cast(RawSegment, context.segment),) return # Check whether one of the targets is present (set intersection) if not self.types & context.segment.descendant_type_set: # None present. Don't look further. # This aggressive pruning helps performance. # Track raw stack if required. if self.provide_raw_stack: context.raw_stack += tuple(context.segment.raw_segments) return # NOTE: Full context is not implemented yet. More dev work required # before everything will be available here. # Given we know that one is present in here somewhere, search for it. new_parent_stack = context.parent_stack + (context.segment,) for idx, child in enumerate(context.segment.segments): # For performance reasons, don't create a new RuleContext for # each segment; just modify the existing one in place. This # requires some careful bookkeeping, but it avoids creating a # HUGE number of short-lived RuleContext objects # (#linter loops x #rules x #segments). # Importantly, we're resetting values here, because they # may have been modified deeper in the recursion. context.segment = child context.parent_stack = new_parent_stack context.segment_idx = idx yield from self.crawl(context) class ParentOfSegmentCrawler(SegmentSeekerCrawler): """A crawler that efficiently searches for parents of specific segment types. The segment type(s) are specified on creation. """ def is_self_match(self, segment: BaseSegment) -> bool: """Does this segment match the relevant criteria. We use the _direct_ child set here so that if any of the direct child segments match any of the types we're looking for, then we know that this segment is a parent of that kind of segment. """ return bool(self.types & segment.direct_descendant_type_set) sqlfluff-3.4.2/src/sqlfluff/core/rules/doc_decorators.py000066400000000000000000000030021503426445100234000ustar00rootroot00000000000000"""A collection of decorators to modify rule docstrings for Sphinx. NOTE: All of these decorators are deprecated from SQLFluff 2.0.0 onwards. They are still included to allow a transition period, but the functionality is now packaged in the BaseRule class via the RuleMetaclass. """ from typing import TYPE_CHECKING, Any from sqlfluff.core.rules.base import rules_logger # noqa if TYPE_CHECKING: # pragma: no cover from sqlfluff.core.rules.base import BaseRule def document_fix_compatible(cls: type["BaseRule"]) -> type["BaseRule"]: """Mark the rule as fixable in the documentation.""" rules_logger.warning( f"{cls.__name__} uses the @document_fix_compatible decorator " "which is deprecated in SQLFluff 2.0.0. Remove the decorator " "to resolve this warning." ) return cls def document_groups(cls: type["BaseRule"]) -> type["BaseRule"]: """Mark the rule as fixable in the documentation.""" rules_logger.warning( f"{cls.__name__} uses the @document_groups decorator " "which is deprecated in SQLFluff 2.0.0. Remove the decorator " "to resolve this warning." ) return cls def document_configuration(cls: type["BaseRule"], **kwargs: Any) -> type["BaseRule"]: """Add a 'Configuration' section to a Rule docstring.""" rules_logger.warning( f"{cls.__name__} uses the @document_configuration decorator " "which is deprecated in SQLFluff 2.0.0. Remove the decorator " "to resolve this warning." ) return cls sqlfluff-3.4.2/src/sqlfluff/core/rules/fix.py000066400000000000000000000433721503426445100212120ustar00rootroot00000000000000"""Defines the LintFix class, returned by rules when recommending a fix.""" import logging from collections.abc import Iterable, Sized from itertools import chain from typing import Any, Optional, cast from sqlfluff.core.parser import BaseSegment, PositionMarker, RawSegment, SourceFix from sqlfluff.core.templaters import RawFileSlice, TemplatedFile rules_logger = logging.getLogger("sqlfluff.rules") class LintFix: """A class to hold a potential fix to a linting violation. Args: edit_type (:obj:`str`): One of `create_before`, `create_after`, `replace`, `delete` to indicate the kind of fix this represents. anchor (:obj:`BaseSegment`): A segment which represents the *position* that this fix should be applied at. For deletions it represents the segment to delete, for creations it implies the position to create at (with the existing element at this position to be moved *after* the edit), for a `replace` it implies the segment to be replaced. edit (iterable of :obj:`BaseSegment`, optional): For `replace` and `create` fixes, this holds the iterable of segments to create or replace at the given `anchor` point. source (iterable of :obj:`BaseSegment`, optional): For `replace` and `create` fixes, this holds iterable of segments that provided code. IMPORTANT: The linter uses this to prevent copying material from templated areas. """ def __init__( self, edit_type: str, anchor: BaseSegment, edit: Optional[Iterable[BaseSegment]] = None, source: Optional[Iterable[BaseSegment]] = None, ) -> None: if edit_type not in ( "create_before", "create_after", "replace", "delete", ): # pragma: no cover raise ValueError(f"Unexpected edit_type: {edit_type}") self.edit_type = edit_type if not anchor: # pragma: no cover raise ValueError("Fixes must provide an anchor.") self.anchor = anchor self.edit: Optional[list[BaseSegment]] = None if edit is not None: # Copy all the elements of edit to stop contamination. # We're about to start stripping the position markers # off some of the elements and we don't want to end up # stripping the positions of the original elements of # the parsed structure. self.edit = [s.copy() for s in edit] # Check that any edits don't have a position marker set. # We should rely on realignment to make position markers. # Strip position markers of anything enriched, otherwise things can get # blurry for seg in self.edit: if seg.pos_marker: # Developer warning. rules_logger.debug( "Developer Note: Edit segment found with preset position " "marker. These should be unset and calculated later." ) seg.pos_marker = None # Once stripped, we shouldn't replace any markers because # later code may rely on them being accurate, which we # can't guarantee with edits. self.source = [seg for seg in source if seg.pos_marker] if source else [] # On creation of the fix we'll also validate the edits are non-trivial. if self.edit_type in ("create_before", "create_after"): assert self.edit, "A create fix must have an edit." # They should all have a non-zero raw. assert all( seg.raw for seg in self.edit ), f"Invalid edit found: {self.edit}." elif self.edit_type == "replace": assert ( self.edit != self.anchor ), "Fix created which replaces segment with itself." def is_just_source_edit(self, single_source_fix: bool = False) -> bool: """Return whether this a valid source only edit. Args: single_source_fix (:obj:`bool`): Check for a single source_fixes. """ if ( self.edit_type == "replace" and self.edit is not None and len(self.edit) == 1 and self.edit[0].raw == self.anchor.raw ): if single_source_fix: return len(self.edit[0].source_fixes) == 1 return True return False def __repr__(self) -> str: if self.edit_type == "delete": detail = f"delete:{self.anchor.raw!r}" elif self.edit_type in ("replace", "create_before", "create_after"): seg_list = cast(list[BaseSegment], self.edit) new_detail = "".join(s.raw for s in seg_list) if self.edit_type == "replace": if self.is_just_source_edit(): seg_list = cast(list[BaseSegment], self.edit) detail = f"src-edt:{seg_list[0].source_fixes!r}" else: detail = f"edt:{self.anchor.raw!r}->{new_detail!r}" else: detail = f"create:{new_detail!r}" else: detail = "" # pragma: no cover TODO? return ( f"" ) def to_dict(self) -> dict[str, Any]: """Serialise this LintFix as a dict.""" assert self.anchor _position = self.anchor.pos_marker assert _position _src_loc = _position.to_source_dict() if self.edit_type == "delete": return { "type": self.edit_type, "edit": "", **_src_loc, } elif self.edit_type == "replace" and self.is_just_source_edit( single_source_fix=True ): assert self.edit is not None assert len(self.edit) == 1 assert len(self.edit[0].source_fixes) == 1 _source_fix = self.edit[0].source_fixes[0] return { "type": self.edit_type, "edit": _source_fix.edit, **_position.templated_file.source_position_dict_from_slice( _source_fix.source_slice ), } # Otherwise it's a standard creation or a replace. seg_list = cast(list[BaseSegment], self.edit) _edit = "".join(s.raw for s in seg_list) if self.edit_type == "create_before": # If we're creating _before_, the end point isn't relevant. # Make it the same as the start. _src_loc["end_line_no"] = _src_loc["start_line_no"] _src_loc["end_line_pos"] = _src_loc["start_line_pos"] _src_loc["end_file_pos"] = _src_loc["start_file_pos"] elif self.edit_type == "create_after": # If we're creating _after_, the start point isn't relevant. # Make it the same as the end. _src_loc["start_line_no"] = _src_loc["end_line_no"] _src_loc["start_line_pos"] = _src_loc["end_line_pos"] _src_loc["start_file_pos"] = _src_loc["end_file_pos"] return { "type": self.edit_type, "edit": _edit, **_src_loc, } def __eq__(self, other: object) -> bool: """Compare equality with another fix. A fix is equal to another if is in the same place (position), with the same type and (if appropriate) the same edit values. """ # We have to assert this here rather in the type annotation so we don't # violate the Liskov substitution principle. # More context here: https://stackoverflow.com/a/37557540/11381493 if not isinstance(other, LintFix): # pragma: no cover return NotImplemented if not self.edit_type == other.edit_type: return False # For checking anchor equality, first check types. if not self.anchor.class_types == other.anchor.class_types: return False # If types match, check uuids to see if they're the same original segment. if self.anchor.uuid != other.anchor.uuid: return False # Then compare edits, here we only need to check the raw and source # fixes (positions are meaningless). # Only do this if we have edits. if self.edit: # We have to get weird here to appease mypy --strict # mypy seems to have a bug where even though we check above to make sure # self.edit is not None it still thinks it could be None when doing the # type check below. But if we use cast(list[BaseSegment], self.edit) then # it throws a redundant-cast error, because magically now it _does_ know # that self.edit is not None. So we have to cast to Sized for the len() # check and to Iterable[BaseSegment] for the looped check to make mypy # happy. # 1. Check lengths edit_list = cast(Sized, self.edit) other_list = cast(Sized, other.edit) if len(edit_list) != len(other_list): return False # pragma: no cover # 2. Zip and compare edit_list2 = cast(Iterable[BaseSegment], self.edit) other_list2 = cast(Iterable[BaseSegment], other.edit) for a, b in zip(edit_list2, other_list2): # Check raws if a.raw != b.raw: return False # Check source fixes if a.source_fixes != b.source_fixes: return False return True @classmethod def delete(cls, anchor_segment: BaseSegment) -> "LintFix": """Delete supplied anchor segment.""" return cls("delete", anchor_segment) @classmethod def replace( cls, anchor_segment: BaseSegment, edit_segments: Iterable[BaseSegment], source: Optional[Iterable[BaseSegment]] = None, ) -> "LintFix": """Replace supplied anchor segment with the edit segments.""" return cls("replace", anchor_segment, edit_segments, source) @classmethod def create_before( cls, anchor_segment: BaseSegment, edit_segments: Iterable[BaseSegment], source: Optional[Iterable[BaseSegment]] = None, ) -> "LintFix": """Create edit segments before the supplied anchor segment.""" return cls( "create_before", anchor_segment, edit_segments, source, ) @classmethod def create_after( cls, anchor_segment: BaseSegment, edit_segments: Iterable[BaseSegment], source: Optional[Iterable[BaseSegment]] = None, ) -> "LintFix": """Create edit segments after the supplied anchor segment.""" return cls( "create_after", anchor_segment, edit_segments, source, ) def get_fix_slices( self, templated_file: TemplatedFile, within_only: bool ) -> set[RawFileSlice]: """Returns slices touched by the fix.""" # Goal: Find the raw slices touched by the fix. Two cases, based on # edit type: # 1. "delete", "replace": Raw slices touching the anchor segment. # 2. "create_before", "create_after": Raw slices encompassing the two # character positions surrounding the insertion point (**NOT** the # whole anchor segment, because we're not *touching* the anchor # segment, we're inserting **RELATIVE** to it. assert self.anchor.pos_marker, f"Anchor missing position marker: {self.anchor}" anchor_slice = self.anchor.pos_marker.templated_slice templated_slices = [anchor_slice] # If "within_only" is set for a "create_*" fix, the slice should only # include the area of code "within" the area of insertion, not the other # side. adjust_boundary = 1 if not within_only else 0 if self.edit_type == "create_before": # Consider the first position of the anchor segment and the # position just before it. templated_slices = [ slice(anchor_slice.start - 1, anchor_slice.start + adjust_boundary), ] elif self.edit_type == "create_after": # Consider the last position of the anchor segment and the # character just after it. templated_slices = [ slice(anchor_slice.stop - adjust_boundary, anchor_slice.stop + 1), ] elif ( self.edit_type == "replace" and self.anchor.pos_marker.source_slice.stop == self.anchor.pos_marker.source_slice.start ): # We're editing something with zero size in the source. This means # it likely _didn't exist_ in the source and so can be edited safely. # We return an empty set because this edit doesn't touch anything # in the source. return set() elif ( self.edit_type == "replace" and all(edit.is_type("raw") for edit in cast(list[RawSegment], self.edit)) and all(edit._source_fixes for edit in cast(list[RawSegment], self.edit)) ): # As an exception to the general rule about "replace" fixes (where # they're only safe if they don't touch a templated section at all), # source-only fixes are different. This clause handles that exception. # So long as the fix is *purely* source-only we can assume that the # rule has done the relevant due diligence on what it's editing in # the source and just yield the source slices directly. # More complicated fixes that are a blend or source and templated # fixes are currently not supported but this (mostly because they've # not arisen yet!), so further work would be required to support them # elegantly. rules_logger.debug("Source only fix.") source_edit_slices = [ fix.source_slice # We can assume they're all raw and all have source fixes, because we # check that above. for fix in chain.from_iterable( cast(list[SourceFix], edit._source_fixes) for edit in cast(list[RawSegment], self.edit) ) ] if len(source_edit_slices) > 1: # pragma: no cover raise NotImplementedError( "Unable to handle multiple source only slices." ) return set( templated_file.raw_slices_spanning_source_slice(source_edit_slices[0]) ) # TRICKY: For creations at the end of the file, there won't be an # existing slice. In this case, the function adds file_end_slice to the # result, as a sort of placeholder or sentinel value. We pass a literal # slice for "file_end_slice" so that later in this function, the LintFix # is interpreted as literal code. Otherwise, it could be interpreted as # a fix to *templated* code and incorrectly discarded. return self._raw_slices_from_templated_slices( templated_file, templated_slices, file_end_slice=RawFileSlice("", "literal", -1), ) def has_template_conflicts(self, templated_file: TemplatedFile) -> bool: """Based on the fix slices, should we discard the fix?""" # Check for explicit source fixes. # TODO: This doesn't account for potentially more complicated source fixes. # If we're replacing a single segment with many *and* doing source fixes # then they will be discarded here as unsafe. if self.edit_type == "replace" and self.edit and len(self.edit) == 1: edit: BaseSegment = self.edit[0] if edit.raw == self.anchor.raw and edit.source_fixes: return False # Given fix slices, check for conflicts. check_fn = all if self.edit_type in ("create_before", "create_after") else any fix_slices = self.get_fix_slices(templated_file, within_only=False) result = check_fn(fs.slice_type == "templated" for fs in fix_slices) if result or not self.source: return result # Fix slices were okay. Now check template safety of the "source" field. templated_slices = [ cast(PositionMarker, source.pos_marker).templated_slice for source in self.source ] raw_slices = self._raw_slices_from_templated_slices( templated_file, templated_slices ) return any(fs.slice_type == "templated" for fs in raw_slices) @staticmethod def _raw_slices_from_templated_slices( templated_file: TemplatedFile, templated_slices: list[slice], file_end_slice: Optional[RawFileSlice] = None, ) -> set[RawFileSlice]: raw_slices: set[RawFileSlice] = set() for templated_slice in templated_slices: try: raw_slices.update( templated_file.raw_slices_spanning_source_slice( templated_file.templated_slice_to_source_slice(templated_slice) ) ) except (IndexError, ValueError): # These errors will happen with "create_before" at the beginning # of the file or "create_after" at the end of the file. By # default, we ignore this situation. If the caller passed # "file_end_slice", add that to the result. In effect, # file_end_slice serves as a placeholder or sentinel value. if file_end_slice is not None: raw_slices.add(file_end_slice) return raw_slices sqlfluff-3.4.2/src/sqlfluff/core/rules/loader.py000066400000000000000000000030501503426445100216570ustar00rootroot00000000000000"""Methods to load rules.""" import os from glob import glob from importlib import import_module from typing import TYPE_CHECKING if TYPE_CHECKING: # pragma: no cover from sqlfluff.core.rules.base import BaseRule def get_rules_from_path( # All rule files are expected in the format of L*.py rules_path: str = os.path.abspath( os.path.join(os.path.dirname(__file__), "../../rules", "L*.py") ), base_module: str = "sqlfluff.rules", ) -> list[type["BaseRule"]]: """Reads all of the Rule classes from a path into a list.""" # Create a rules dictionary for importing in # sqlfluff/src/sqlfluff/core/rules/__init__.py rules = [] for module in sorted(glob(rules_path)): # Manipulate the module path to extract the filename without the .py rule_id = os.path.splitext(os.path.basename(module))[0] # All rule classes are expected in the format of Rule_L* rule_class_name = f"Rule_{rule_id}" # NOTE: We import the module outside of the try clause to # properly catch any import errors. rule_module = import_module(f"{base_module}.{rule_id}") try: rule_class = getattr(rule_module, rule_class_name) except AttributeError as e: raise AttributeError( "Rule classes must be named in the format of Rule_*. " f"[{rule_class_name}]" ) from e # Add the rules to the rules dictionary for # sqlfluff/src/sqlfluff/core/rules/__init__.py rules.append(rule_class) return rules sqlfluff-3.4.2/src/sqlfluff/core/rules/noqa.py000066400000000000000000000325211503426445100213540ustar00rootroot00000000000000"""Defines container classes for handling noqa comments.""" import fnmatch import logging from dataclasses import dataclass from typing import Optional, Union, cast from sqlfluff.core.errors import SQLBaseError, SQLParseError, SQLUnusedNoQaWarning from sqlfluff.core.parser import BaseSegment, RawSegment, RegexLexer # Instantiate the linter logger linter_logger: logging.Logger = logging.getLogger("sqlfluff.linter") @dataclass class NoQaDirective: """Parsed version of a 'noqa' comment.""" line_no: int # Source line number line_pos: int # Source line position rules: Optional[tuple[str, ...]] # Affected rule names action: Optional[str] # "enable", "disable", or "None" raw_str: str = "" # The raw representation of the directive for warnings. used: bool = False # Has it been used. def _filter_violations_single_line( self, violations: list[SQLBaseError] ) -> list[SQLBaseError]: """Filter a list of violations based on this single line noqa. Also record whether this class was _used_ in any of that filtering. The "ignore" list is assumed to ONLY contain NoQaDirectives with action=None. """ assert not self.action matched_violations = [ v for v in violations if ( v.line_no == self.line_no and (self.rules is None or v.rule_code() in self.rules) ) ] if matched_violations: # Successful match, mark ignore as used. self.used = True return [v for v in violations if v not in matched_violations] else: return violations class IgnoreMask: """Structure to hold a set of 'noqa' directives.""" def __init__(self, ignores: list[NoQaDirective]): self._ignore_list = ignores def __repr__(self) -> str: # pragma: no cover return "" # ### Construction class methods. @staticmethod def _parse_noqa( comment: str, line_no: int, line_pos: int, reference_map: dict[str, set[str]], ) -> Union[NoQaDirective, SQLParseError, None]: """Extract ignore mask entries from a comment string.""" # Also trim any whitespace afterward # Comment lines can also have noqa e.g. # --dafhsdkfwdiruweksdkjdaffldfsdlfjksd -- noqa: LT05 # Therefore extract last possible inline ignore. comment = [c.strip() for c in comment.split("--")][-1] if comment.startswith("noqa"): # This is an ignore identifier comment_remainder = comment[4:] if comment_remainder: if not comment_remainder.startswith(":"): return SQLParseError( "Malformed 'noqa' section. Expected 'noqa: [,...]", line_no=line_no, ) comment_remainder = comment_remainder[1:].strip() if comment_remainder: action: Optional[str] if "=" in comment_remainder: action, rule_part = comment_remainder.split("=", 1) if action not in {"disable", "enable"}: # pragma: no cover return SQLParseError( "Malformed 'noqa' section. " "Expected 'noqa: enable=[,...] | all' " "or 'noqa: disable=[,...] | all", line_no=line_no, ) else: action = None rule_part = comment_remainder if rule_part in {"disable", "enable"}: return SQLParseError( "Malformed 'noqa' section. " "Expected 'noqa: enable=[,...] | all' " "or 'noqa: disable=[,...] | all", line_no=line_no, ) rules: Optional[tuple[str, ...]] if rule_part != "all": # Rules can be globs therefore we compare to the rule_set to # expand the globs. unexpanded_rules = tuple( r.strip() for r in rule_part.split(",") ) # We use a set to do natural deduplication. expanded_rules: set[str] = set() for r in unexpanded_rules: matched = False for expanded in ( reference_map[x] for x in fnmatch.filter(reference_map.keys(), r) ): expanded_rules |= expanded matched = True if not matched: # We were unable to expand the glob. # Therefore assume the user is referencing # a special error type (e.g. PRS, LXR, or TMP) # and add this to the list of rules to ignore. expanded_rules.add(r) # Sort for consistency rules = tuple(sorted(expanded_rules)) else: rules = None return NoQaDirective(line_no, line_pos, rules, action, comment) return NoQaDirective(line_no, line_pos, None, None, comment) return None @classmethod def _extract_ignore_from_comment( cls, comment: RawSegment, reference_map: dict[str, set[str]], ) -> Union[NoQaDirective, SQLParseError, None]: """Extract ignore mask entries from a comment segment.""" # Also trim any whitespace comment_content = comment.raw_trimmed().strip() # If we have leading or trailing block comment markers, also strip them. # NOTE: We need to strip block comment markers from the start # to ensure that noqa directives in the following form are followed: # /* noqa: disable=all */ if comment_content.endswith("*/"): comment_content = comment_content[:-2].rstrip() if comment_content.startswith("/*"): comment_content = comment_content[2:].lstrip() comment_line, comment_pos = comment.pos_marker.source_position() result = cls._parse_noqa( comment_content, comment_line, comment_pos, reference_map ) if isinstance(result, SQLParseError): result.segment = comment return result @classmethod def from_tree( cls, tree: BaseSegment, reference_map: dict[str, set[str]], ) -> tuple["IgnoreMask", list[SQLBaseError]]: """Look for inline ignore comments and return NoQaDirectives.""" ignore_buff: list[NoQaDirective] = [] violations: list[SQLBaseError] = [] for comment in tree.recursive_crawl("comment"): if comment.is_type("inline_comment", "block_comment"): ignore_entry = cls._extract_ignore_from_comment( cast(RawSegment, comment), reference_map ) if isinstance(ignore_entry, SQLParseError): violations.append(ignore_entry) elif ignore_entry: ignore_buff.append(ignore_entry) if ignore_buff: linter_logger.info("Parsed noqa directives from file: %r", ignore_buff) return cls(ignore_buff), violations @classmethod def from_source( cls, source: str, inline_comment_regex: RegexLexer, reference_map: dict[str, set[str]], ) -> tuple["IgnoreMask", list[SQLBaseError]]: """Look for inline ignore comments and return NoQaDirectives. Very similar to .from_tree(), but can be run on raw source (i.e. does not require the code to have parsed successfully). """ ignore_buff: list[NoQaDirective] = [] violations: list[SQLBaseError] = [] for idx, line in enumerate(source.split("\n")): match = inline_comment_regex.search(line) if line else None if match: ignore_entry = cls._parse_noqa( line[match[0] : match[1]], idx + 1, match[0], reference_map ) if isinstance(ignore_entry, SQLParseError): violations.append(ignore_entry) # pragma: no cover elif ignore_entry: ignore_buff.append(ignore_entry) if ignore_buff: linter_logger.info("Parsed noqa directives from file: %r", ignore_buff) return cls(ignore_buff), violations # ### Application methods. @staticmethod def _ignore_masked_violations_single_line( violations: list[SQLBaseError], ignore_mask: list[NoQaDirective] ) -> list[SQLBaseError]: """Filter a list of violations based on this single line noqa. The "ignore" list is assumed to ONLY contain NoQaDirectives with action=None. """ for ignore in ignore_mask: violations = ignore._filter_violations_single_line(violations) return violations @staticmethod def _should_ignore_violation_line_range( line_no: int, ignore_rules: list[NoQaDirective] ) -> tuple[bool, Optional[NoQaDirective]]: """Returns whether to ignore a violation at line_no. Loop through the NoQaDirectives to find the state of things at line_no. Assumptions about "ignore_rules": - Contains directives for only ONE RULE, i.e. the rule that was violated at line_no - Sorted in ascending order by line number """ ignore = False last_ignore: Optional[NoQaDirective] = None for idx, ignore_rule in enumerate(ignore_rules): if ignore_rule.line_no > line_no: # Peak at the next rule to see if it's a matching disable # and if it is, then mark it as used. if ignore_rule.action == "enable": # Mark as used ignore_rule.used = True break if ignore_rule.action == "enable": # First, if this enable did counteract a # corresponding _disable_, then it has been _used_. if last_ignore: ignore_rule.used = True last_ignore = None ignore = False elif ignore_rule.action == "disable": last_ignore = ignore_rule ignore = True return ignore, last_ignore @classmethod def _ignore_masked_violations_line_range( cls, violations: list[SQLBaseError], ignore_mask: list[NoQaDirective] ) -> list[SQLBaseError]: """Returns whether to ignore error for line-range directives. The "ignore" list is assumed to ONLY contain NoQaDirectives where action is "enable" or "disable". """ result = [] for v in violations: # Find the directives that affect the violated rule "v", either # because they specifically reference it or because they don't # specify a list of rules, thus affecting ALL rules. ignore_rule = sorted( ( ignore for ignore in ignore_mask if not ignore.rules or (v.rule_code() in ignore.rules) ), key=lambda ignore: ignore.line_no, ) # Determine whether to ignore the violation, based on the relevant # enable/disable directives. ignore, last_ignore = cls._should_ignore_violation_line_range( v.line_no, ignore_rule ) if not ignore: result.append(v) # If there was a previous ignore which mean that we filtered out # a violation, then mark it as used. elif last_ignore: last_ignore.used = True return result def ignore_masked_violations( self, violations: list[SQLBaseError] ) -> list[SQLBaseError]: """Remove any violations specified by ignore_mask. This involves two steps: 1. Filter out violations affected by single-line "noqa" directives. 2. Filter out violations affected by disable/enable "noqa" directives. """ ignore_specific = [ignore for ignore in self._ignore_list if not ignore.action] ignore_range = [ignore for ignore in self._ignore_list if ignore.action] violations = self._ignore_masked_violations_single_line( violations, ignore_specific ) violations = self._ignore_masked_violations_line_range(violations, ignore_range) return violations def generate_warnings_for_unused(self) -> list[SQLBaseError]: """Generates warnings for any unused NoQaDirectives.""" return [ SQLUnusedNoQaWarning( line_no=ignore.line_no, line_pos=ignore.line_pos, description=f"Unused noqa: {ignore.raw_str!r}", ) for ignore in self._ignore_list if not ignore.used ] sqlfluff-3.4.2/src/sqlfluff/core/rules/reference.py000066400000000000000000000021441503426445100223520ustar00rootroot00000000000000"""Components for working with object and table references.""" from collections.abc import Sequence def object_ref_matches_table( possible_references: Sequence[tuple[str, ...]], targets: Sequence[tuple[str, ...]] ) -> bool: """Return True if any of the possible references matches a target.""" # Simple case: If there are no references, assume okay # (i.e. no mismatch = good). if not possible_references: return True # Simple case: Reference exactly matches a target. if any(pr in targets for pr in possible_references): return True # Tricky case: If one is shorter than the other, check for a suffix match. # (Note this is an "optimistic" check, i.e. it assumes the ignored parts of # the target don't matter. In a SQL context, this is basically assuming # there was an earlier "USE <>" or similar directive. for pr in possible_references: for t in targets: if (len(pr) < len(t) and pr == t[-len(pr) :]) or ( len(t) < len(pr) and t == pr[-len(t) :] ): return True return False sqlfluff-3.4.2/src/sqlfluff/core/templaters/000077500000000000000000000000001503426445100210675ustar00rootroot00000000000000sqlfluff-3.4.2/src/sqlfluff/core/templaters/__init__.py000066400000000000000000000015371503426445100232060ustar00rootroot00000000000000"""Templater Code.""" from collections.abc import Iterator # Although these shouldn't usually be instantiated from here # we import them to make sure they get registered. from sqlfluff.core.templaters.base import RawFileSlice, RawTemplater, TemplatedFile from sqlfluff.core.templaters.jinja import JinjaTemplater from sqlfluff.core.templaters.placeholder import PlaceholderTemplater from sqlfluff.core.templaters.python import PythonTemplater def core_templaters() -> Iterator[type[RawTemplater]]: """Returns the templater tuples for the core templaters.""" yield from [ RawTemplater, JinjaTemplater, PythonTemplater, PlaceholderTemplater, ] __all__ = ( "RawFileSlice", "TemplatedFile", "RawTemplater", "JinjaTemplater", "PythonTemplater", "PlaceholderTemplater", "core_templaters", ) sqlfluff-3.4.2/src/sqlfluff/core/templaters/base.py000066400000000000000000000605411503426445100223610ustar00rootroot00000000000000"""Defines the templaters.""" import logging from bisect import bisect_left from collections.abc import Iterable, Iterator from typing import ( Any, Callable, NamedTuple, Optional, TypeVar, ) from sqlfluff.core.config import FluffConfig from sqlfluff.core.errors import SQLFluffSkipFile, SQLTemplaterError from sqlfluff.core.formatter import FormatterInterface from sqlfluff.core.helpers.slice import zero_slice # Instantiate the templater logger templater_logger = logging.getLogger("sqlfluff.templater") def iter_indices_of_newlines(raw_str: str) -> Iterator[int]: """Find the indices of all newlines in a string.""" init_idx = -1 while True: nl_pos = raw_str.find("\n", init_idx + 1) if nl_pos >= 0: yield nl_pos init_idx = nl_pos else: break # pragma: no cover TODO? T = TypeVar("T") def large_file_check(func: Callable[..., T]) -> Callable[..., T]: """Raise an exception if the file is over a defined size. Designed to be implemented as a decorator on `.process()` methods. If no config is provided or the relevant config value is set to zero then the check is skipped. """ def _wrapped( self: Any, *, in_str: str, fname: str, config: Optional[FluffConfig] = None, formatter: Optional[FormatterInterface] = None, ) -> T: if config: limit = config.get("large_file_skip_char_limit") if limit: templater_logger.warning( "The config value large_file_skip_char_limit was found set. " "This feature will be removed in a future release, please " "use the more efficient 'large_file_skip_byte_limit' instead." ) if limit and len(in_str) > limit: raise SQLFluffSkipFile( f"Length of file {fname!r} is over {limit} characters. " "Skipping to avoid parser lock. Users can increase this limit " "in their config by setting the 'large_file_skip_char_limit' " "value, or disable by setting it to zero." ) return func( self, in_str=in_str, fname=fname, config=config, formatter=formatter ) return _wrapped class RawFileSlice(NamedTuple): """A slice referring to a raw file.""" raw: str # Source string slice_type: str source_idx: int # Offset from beginning of source string # Block index, incremented on start or end block tags, e.g. "if", "for". # This is used in `BaseRule.discard_unsafe_fixes()` to reject any fixes # which span multiple templated blocks. block_idx: int = 0 # The command of a templated tag, e.g. "if", "for" # This is used in template tracing as a kind of cache to identify the kind # of template element this is without having to re-extract it each time. tag: Optional[str] = None def end_source_idx(self) -> int: """Return the closing index of this slice.""" return self.source_idx + len(self.raw) def source_slice(self) -> slice: """Return a slice object for this slice.""" return slice(self.source_idx, self.end_source_idx()) def is_source_only_slice(self) -> bool: """Based on its slice_type, does it only appear in the *source*? There are some slice types which are automatically source only. There are *also* some which are source only because they render to an empty string. """ # TODO: should any new logic go here? return self.slice_type in ("comment", "block_end", "block_start", "block_mid") class TemplatedFileSlice(NamedTuple): """A slice referring to a templated file.""" slice_type: str source_slice: slice templated_slice: slice class RawSliceBlockInfo(NamedTuple): """Template-related info about the raw slices in a TemplateFile.""" # Given a raw file slace, return its block ID. Useful for identifying # regions of a file with respect to template control structures (for, if). block_ids: dict[RawFileSlice, int] # List of block IDs that have the following characteristics: # - Loop body # - Containing only literals (no templating) literal_only_loops: list[int] class TemplatedFile: """A templated SQL file. This is the response of a templaters .process() method and contains both references to the original file and also the capability to split up that file when lexing. """ def __init__( self, source_str: str, fname: str, templated_str: Optional[str] = None, sliced_file: Optional[list[TemplatedFileSlice]] = None, raw_sliced: Optional[list[RawFileSlice]] = None, ): """Initialise the TemplatedFile. If no templated_str is provided then we assume that the file is NOT templated and that the templated view is the same as the source view. Args: source_str (str): The source string. fname (str): The file name. templated_str (Optional[str], optional): The templated string. Defaults to None. sliced_file (Optional[list[TemplatedFileSlice]], optional): The sliced file. Defaults to None. raw_sliced (Optional[list[RawFileSlice]], optional): The raw sliced file. Defaults to None. """ self.source_str = source_str # An empty string is still allowed as the templated string. self.templated_str = source_str if templated_str is None else templated_str # If no fname, we assume this is from a string or stdin. self.fname = fname # Assume that no sliced_file, means the file is not templated self.sliced_file: list[TemplatedFileSlice] if sliced_file is None: if self.templated_str != self.source_str: # pragma: no cover raise ValueError("Cannot instantiate a templated file unsliced!") # If we get here and we don't have sliced files, # then it's raw, so create them. self.sliced_file = [ TemplatedFileSlice( "literal", slice(0, len(source_str)), slice(0, len(source_str)) ) ] assert ( raw_sliced is None ), "Templated file was not sliced, but not has raw slices." self.raw_sliced: list[RawFileSlice] = [ RawFileSlice(source_str, "literal", 0) ] else: self.sliced_file = sliced_file assert raw_sliced is not None, "Templated file was sliced, but not raw." self.raw_sliced = raw_sliced # Precalculate newlines, character positions. self._source_newlines = list(iter_indices_of_newlines(self.source_str)) self._templated_newlines = list(iter_indices_of_newlines(self.templated_str)) # Consistency check raw string and slices. pos = 0 rfs: RawFileSlice for rfs in self.raw_sliced: assert rfs.source_idx == pos, ( "TemplatedFile. Consistency fail on running source length" f": {pos} != {rfs.source_idx}" ) pos += len(rfs.raw) assert pos == len(self.source_str), ( "TemplatedFile. Consistency fail on total source length" f": {pos} != {len(self.source_str)}" ) # Consistency check templated string and slices. previous_slice: Optional[TemplatedFileSlice] = None tfs: Optional[TemplatedFileSlice] = None for tfs in self.sliced_file: if previous_slice: if tfs.templated_slice.start != previous_slice.templated_slice.stop: raise SQLFluffSkipFile( # pragma: no cover "Templated slices found to be non-contiguous. " f"{tfs.templated_slice} (starting" f" {self.templated_str[tfs.templated_slice]!r})" f" does not follow {previous_slice.templated_slice} " "(starting " f"{self.templated_str[previous_slice.templated_slice]!r}" ")" ) else: if tfs.templated_slice.start != 0: raise SQLFluffSkipFile( # pragma: no cover "First Templated slice not started at index 0 " f"(found slice {tfs.templated_slice})" ) previous_slice = tfs if self.sliced_file and templated_str is not None and tfs: if tfs.templated_slice.stop != len(templated_str): raise SQLFluffSkipFile( # pragma: no cover "Length of templated file mismatch with final slice: " f"{len(templated_str)} != {tfs.templated_slice.stop}." ) @classmethod def from_string(cls, raw: str) -> "TemplatedFile": """Create TemplatedFile from a string.""" return cls(source_str=raw, fname="") def __repr__(self) -> str: # pragma: no cover TODO? """Return a string representation of the 'TemplatedFile' object.""" return "" def __str__(self) -> str: """Return the templated file if coerced to string.""" return self.templated_str def get_line_pos_of_char_pos( self, char_pos: int, source: bool = True ) -> tuple[int, int]: """Get the line number and position of a point in the source file. Args: char_pos: The character position in the relevant file. source: Are we checking the source file (as opposed to the templated file) Returns: line_number, line_position """ if source: ref_str = self._source_newlines else: ref_str = self._templated_newlines nl_idx = bisect_left(ref_str, char_pos) if nl_idx > 0: return nl_idx + 1, char_pos - ref_str[nl_idx - 1] else: # NB: line_pos is char_pos+1 because character position is 0-indexed, # but the line position is 1-indexed. return 1, char_pos + 1 def _find_slice_indices_of_templated_pos( self, templated_pos: int, start_idx: Optional[int] = None, inclusive: bool = True, ) -> tuple[int, int]: """Find a subset of the sliced file which touch this point. NB: the last_idx is exclusive, as the intent is to use this as a slice. """ start_idx = start_idx or 0 first_idx: Optional[int] = None last_idx = start_idx # Work through the sliced file, starting at the start_idx if given # as an optimisation hint. The sliced_file is a list of TemplatedFileSlice # which reference parts of the templated file and where they exist in the # source. for idx, elem in enumerate(self.sliced_file[start_idx:]): last_idx = idx + start_idx if elem[2].stop >= templated_pos: if first_idx is None: first_idx = idx + start_idx if elem[2].start > templated_pos: break elif not inclusive and elem[2].start >= templated_pos: break # If we got to the end add another index else: last_idx += 1 if first_idx is None: # pragma: no cover raise ValueError("Position Not Found") return first_idx, last_idx def raw_slices_spanning_source_slice( self, source_slice: slice ) -> list[RawFileSlice]: """Return a list of the raw slices spanning a set of indices.""" # Special case: The source_slice is at the end of the file. last_raw_slice = self.raw_sliced[-1] if source_slice.start >= last_raw_slice.source_idx + len(last_raw_slice.raw): return [] # First find the start index raw_slice_idx = 0 # Move the raw pointer forward to the start of this patch while ( raw_slice_idx + 1 < len(self.raw_sliced) and self.raw_sliced[raw_slice_idx + 1].source_idx <= source_slice.start ): raw_slice_idx += 1 # Find slice index of the end of this patch. slice_span = 1 while ( raw_slice_idx + slice_span < len(self.raw_sliced) and self.raw_sliced[raw_slice_idx + slice_span].source_idx < source_slice.stop ): slice_span += 1 # Return the raw slices: return self.raw_sliced[raw_slice_idx : raw_slice_idx + slice_span] def templated_slice_to_source_slice( self, template_slice: slice, ) -> slice: """Convert a template slice to a source slice.""" if not self.sliced_file: return template_slice # pragma: no cover TODO? ts_start_sf_start, ts_start_sf_stop = self._find_slice_indices_of_templated_pos( template_slice.start ) ts_start_subsliced_file = self.sliced_file[ts_start_sf_start:ts_start_sf_stop] # Work out the insertion point insertion_point = -1 for elem in ts_start_subsliced_file: # Do slice starts and ends: for slice_elem in ("start", "stop"): if getattr(elem[2], slice_elem) == template_slice.start: # Store the lowest. point = getattr(elem[1], slice_elem) if insertion_point < 0 or point < insertion_point: insertion_point = point # We don't break here, because we might find ANOTHER # later which is actually earlier. # Zero length slice. if template_slice.start == template_slice.stop: # Is it on a join? if insertion_point >= 0: return zero_slice(insertion_point) # It's within a segment. else: if ( ts_start_subsliced_file and ts_start_subsliced_file[0][0] == "literal" ): offset = template_slice.start - ts_start_subsliced_file[0][2].start return zero_slice( ts_start_subsliced_file[0][1].start + offset, ) else: raise ValueError( # pragma: no cover "Attempting a single length slice within a templated section! " f"{template_slice} within {ts_start_subsliced_file}." ) # Otherwise it's a slice with length. # Use a non inclusive match to get the end point. ts_stop_sf_start, ts_stop_sf_stop = self._find_slice_indices_of_templated_pos( template_slice.stop, inclusive=False ) # Update starting position based on insertion point: if insertion_point >= 0: for elem in self.sliced_file[ts_start_sf_start:]: if elem[1].start != insertion_point: ts_start_sf_start += 1 else: break subslices = self.sliced_file[ # Very inclusive slice min(ts_start_sf_start, ts_stop_sf_start) : max( ts_start_sf_stop, ts_stop_sf_stop ) ] if ts_start_sf_start == ts_start_sf_stop: if ts_start_sf_start > len(self.sliced_file): # pragma: no cover # We should never get here raise ValueError("Starting position higher than sliced file position") if ts_start_sf_start < len(self.sliced_file): # pragma: no cover return self.sliced_file[1].source_slice else: return self.sliced_file[-1].source_slice # pragma: no cover else: start_slices = self.sliced_file[ts_start_sf_start:ts_start_sf_stop] if ts_stop_sf_start == ts_stop_sf_stop: # pragma: no cover TODO? stop_slices = [self.sliced_file[ts_stop_sf_start]] else: stop_slices = self.sliced_file[ts_stop_sf_start:ts_stop_sf_stop] # if it's a literal segment then we can get the exact position # otherwise we're greedy. # Start. if insertion_point >= 0: source_start = insertion_point elif start_slices[0][0] == "literal": offset = template_slice.start - start_slices[0][2].start source_start = start_slices[0][1].start + offset else: source_start = start_slices[0][1].start # Stop. if stop_slices[-1][0] == "literal": offset = stop_slices[-1][2].stop - template_slice.stop source_stop = stop_slices[-1][1].stop - offset else: source_stop = stop_slices[-1][1].stop # Does this slice go backward? if source_start > source_stop: # If this happens, it's because one was templated and # the other isn't, or because a loop means that the segments # are in a different order. # Take the widest possible span in this case. source_start = min(elem[1].start for elem in subslices) source_stop = max(elem[1].stop for elem in subslices) source_slice = slice(source_start, source_stop) return source_slice def is_source_slice_literal(self, source_slice: slice) -> bool: """Work out whether a slice of the source file is a literal or not.""" # No sliced file? Everything is literal if not self.raw_sliced: # pragma: no cover TODO? return True # Zero length slice. It's a literal, because it's definitely not templated. if source_slice.start == source_slice.stop: return True is_literal = True for raw_slice in self.raw_sliced: # Reset if we find a literal and we're up to the start # otherwise set false. if raw_slice.source_idx <= source_slice.start: is_literal = raw_slice.slice_type == "literal" elif raw_slice.source_idx >= source_slice.stop: # We've gone past the end. Break and Return. break else: # We're in the middle. Check type if raw_slice.slice_type != "literal": is_literal = False return is_literal def source_only_slices(self) -> list[RawFileSlice]: """Return a list a slices which reference the parts only in the source. All of these slices should be expected to have zero-length in the templated file. The results are NECESSARILY sorted. """ ret_buff = [] for elem in self.raw_sliced: if elem.is_source_only_slice(): ret_buff.append(elem) return ret_buff def source_position_dict_from_slice(self, source_slice: slice) -> dict[str, int]: """Create a source position dict from a slice.""" start = self.get_line_pos_of_char_pos(source_slice.start, source=True) stop = self.get_line_pos_of_char_pos(source_slice.stop, source=True) return { "start_line_no": start[0], "start_line_pos": start[1], "start_file_pos": source_slice.start, "end_line_no": stop[0], "end_line_pos": stop[1], "end_file_pos": source_slice.stop, } class RawTemplater: """A templater which does nothing. This also acts as the base templating class. """ name = "raw" templater_selector = "templater" config_subsection: tuple[str, ...] = () def __init__( self, override_context: Optional[dict[str, Any]] = None, ) -> None: """Placeholder init function. We allow override context here even though the raw templater doesn't apply any templating variables. That's to enable classes which inherit from this class to reuse that logic. """ self.default_context = dict(test_value="__test__") self.override_context = override_context or {} def sequence_files( self, fnames: list[str], config: Optional[FluffConfig] = None, formatter: Optional[FormatterInterface] = None, ) -> Iterable[str]: """Given files to be processed, return a valid processing sequence.""" # Default is to process in the original order. return fnames @large_file_check def process( self, *, in_str: str, fname: str, config: Optional[FluffConfig] = None, formatter: Optional[FormatterInterface] = None, ) -> tuple[TemplatedFile, list[SQLTemplaterError]]: """Process a string and return a TemplatedFile. Note that the arguments are enforced as keywords because Templaters can have differences in their `process` method signature. A Templater that only supports reading from a file would need the following signature: process(*, fname, in_str=None, config=None) (arguments are swapped) Args: in_str (:obj:`str`): The input string. fname (:obj:`str`, optional): The filename of this string. This is mostly for loading config files at runtime. config (:obj:`FluffConfig`): A specific config to use for this templating operation. Only necessary for some templaters. formatter (:obj:`CallbackFormatter`): Optional object for output. Returns: :obj:`tuple` of :obj:`TemplatedFile` and a list of SQLTemplaterError if templating was successful enough that we may move to attempt parsing. Raises: SQLTemplaterError: If templating fails fatally, then this method should raise a :obj:`SQLTemplaterError` instead which will be caught and displayed appropriately. """ return TemplatedFile(in_str, fname=fname), [] @large_file_check def process_with_variants( self, *, in_str: str, fname: str, config: Optional[FluffConfig] = None, formatter: Optional[FormatterInterface] = None, ) -> Iterator[tuple[TemplatedFile, list[SQLTemplaterError]]]: """Extended version of `process` which returns multiple variants. Unless explicitly defined, this simply yields the result of .process(). """ yield self.process( in_str=in_str, fname=fname, config=config, formatter=formatter ) def __eq__(self, other: Any) -> bool: """Return true if `other` is of the same class as this one. NB: This is useful in comparing configs. """ return isinstance(other, self.__class__) def config_pairs(self) -> list[tuple[str, str]]: """Returns info about the given templater for output by the cli. Returns: list[tuple[str, str]]: A list of tuples containing information about the given templater. Each tuple contains two strings: the string 'templater' and the name of the templater. """ return [("templater", self.name)] def get_context( self, fname: Optional[str], config: Optional[FluffConfig], ) -> dict[str, Any]: """Get the templating context from the config. This function retrieves the templating context from the config by loading the config and updating the live_context dictionary with the loaded_context and other predefined context dictionaries. It then goes through the loaded_context dictionary returns the live_context dictionary. Args: fname (str, optional): The file name. config (`FluffConfig`, optional): The config object. Returns: dict: The templating context. """ # TODO: The config loading should be done outside the templater code. Here # is a silly place. if config: # This is now a nested section loaded_context = ( config.get_section( (self.templater_selector, self.name) + self.config_subsection ) or {} ) else: loaded_context = {} live_context = {} live_context.update(self.default_context) live_context.update(loaded_context) live_context.update(self.override_context) return live_context sqlfluff-3.4.2/src/sqlfluff/core/templaters/builtins/000077500000000000000000000000001503426445100227205ustar00rootroot00000000000000sqlfluff-3.4.2/src/sqlfluff/core/templaters/builtins/__init__.py000066400000000000000000000000541503426445100250300ustar00rootroot00000000000000"""Module for defining context builtins.""" sqlfluff-3.4.2/src/sqlfluff/core/templaters/builtins/common.py000066400000000000000000000020711503426445100245620ustar00rootroot00000000000000"""Common classes and functions for defining templating builtins.""" from typing import Any, Callable from sqlfluff.core.errors import SQLTemplaterError class FunctionWrapper: """Class to wrap a callable, for better error handling. When called, it just delegates to the provided callable, but if it is rendered as a string directly, it generates a templating error. """ def __init__(self, name: str, callable: Callable[..., Any]): self._name = name self._callable = callable def __call__(self, *args: Any, **kwargs: Any) -> Any: """When the wrapper is called, call the internal function.""" return self._callable(*args, **kwargs) def __str__(self) -> str: """If we try and render the wrapper directly, throw an error.""" raise SQLTemplaterError( f"Unable to render builtin callable {self._name!r} as a " "variable because it is defined as a function. To remove " "this function from the context, set `apply_dbt_builtins` " "to False." ) sqlfluff-3.4.2/src/sqlfluff/core/templaters/builtins/dbt.py000066400000000000000000000047301503426445100240470ustar00rootroot00000000000000"""Defines the jinja builtins for dbt.""" from typing import Any, Union from sqlfluff.core.templaters.builtins.common import FunctionWrapper class RelationEmulator: """A class which emulates the `this` class from dbt.""" # Tell Jinja this object is safe to call and does not alter data. # https://jinja.palletsprojects.com/en/3.0.x/sandbox/#jinja2.sandbox.SandboxedEnvironment.is_safe_callable unsafe_callable = False alters_data = False identifier = "this_model" schema = "this_schema" database = "this_database" def __init__(self, identifier: str = "this_model") -> None: self.identifier = identifier def __call__(self, *args: Any, **kwargs: Any) -> "RelationEmulator": """When relation(*) is called return self as another relation.""" return self def __getattr__(self, name: str) -> Union["RelationEmulator", bool]: """When relation.attribute is called return self as another relation. NOTE: If the attribute begins with `is_`, then return a boolean True. """ if name[0:3] == "is_": return True return self def __str__(self) -> str: return self.identifier # NOTE: we use `FunctionWrapper` on all of the callable builtins here # so that there's a sensible error message if someone tries to render # them directly. DBT_BUILTINS = { "ref": FunctionWrapper("ref", lambda *args, **kwargs: RelationEmulator(args[-1])), # In case of a cross project ref in dbt, model_ref is the second # argument. Otherwise it is the only argument. "source": FunctionWrapper( "source", lambda source_name, table: RelationEmulator(f"{source_name}_{table}"), ), "config": FunctionWrapper("config", lambda **kwargs: ""), "var": FunctionWrapper("var", lambda variable, default="": "item"), # `is_incremental()` renders as True, always in this case. # TODO: This means we'll never parse other parts of the query, # that are only reachable when `is_incremental()` returns False. # We should try to find a solution to that. Perhaps forcing the file # to be parsed TWICE if it uses this variable. "is_incremental": FunctionWrapper("is_incremental", lambda: True), "this": RelationEmulator(), "zip_strict": FunctionWrapper("zip_strict", zip), "zip": FunctionWrapper( "zip", lambda *args, default=None: ( zip(*args) if all(hasattr(arg, "__iter__") for arg in args) else default ), ), } sqlfluff-3.4.2/src/sqlfluff/core/templaters/jinja.py000066400000000000000000001407051503426445100225430ustar00rootroot00000000000000"""Defines the templaters.""" import copy import importlib import importlib.util import logging import os.path import pkgutil import sys from collections.abc import Iterable, Iterator from functools import reduce from typing import ( TYPE_CHECKING, Any, Callable, Optional, Union, cast, ) import jinja2.nodes import jinja2.parser from jinja2 import ( Environment, FileSystemLoader, TemplateError, TemplateSyntaxError, meta, ) from jinja2.exceptions import TemplateNotFound, UndefinedError from jinja2.ext import Extension from jinja2.sandbox import SandboxedEnvironment from sqlfluff.core.config import FluffConfig from sqlfluff.core.errors import SQLFluffUserError, SQLTemplaterError from sqlfluff.core.formatter import FormatterInterface from sqlfluff.core.helpers.slice import is_zero_slice, slice_length from sqlfluff.core.templaters.base import ( RawFileSlice, TemplatedFile, TemplatedFileSlice, large_file_check, ) from sqlfluff.core.templaters.builtins.dbt import DBT_BUILTINS from sqlfluff.core.templaters.python import PythonTemplater from sqlfluff.core.templaters.slicers.tracer import JinjaAnalyzer, JinjaTrace if TYPE_CHECKING: # pragma: no cover from jinja2.runtime import Macro # Instantiate the templater logger templater_logger = logging.getLogger("sqlfluff.templater") class UndefinedRecorder: """Similar to jinja2.StrictUndefined, but remembers, not fails.""" # Tell Jinja this object is safe to call and does not alter data. # https://jinja.palletsprojects.com/en/3.0.x/sandbox/#jinja2.sandbox.SandboxedEnvironment.is_safe_callable unsafe_callable = False alters_data = False def __init__(self, name: str, undefined_set: set[str]) -> None: self.name = name # Reference to undefined set to modify, it is assumed that the # calling code keeps a reference to this variable to they can # continue to access it after modification by this class. self.undefined_set = undefined_set def __str__(self) -> str: """Treat undefined vars as empty, but remember for later.""" self.undefined_set.add(self.name) return "" def __getattr__(self, item: str) -> "UndefinedRecorder": """Don't fail when called, remember instead.""" self.undefined_set.add(self.name) return UndefinedRecorder(f"{self.name}.{item}", self.undefined_set) def __getitem__(self, item: str) -> "UndefinedRecorder": """Don't fail when called, remember instead.""" self.undefined_set.add(self.name) return UndefinedRecorder(f"{self.name}.{item}", self.undefined_set) def __call__(self, *args: Any, **kwargs: Any) -> "UndefinedRecorder": """Don't fail when called unlike parent class.""" return UndefinedRecorder(f"{self.name}()", self.undefined_set) def __iter__(self) -> Iterator["UndefinedRecorder"]: """Don't fail when iterated, remember instead.""" self.undefined_set.add(self.name) yield UndefinedRecorder(f"iter({self.name})", self.undefined_set) class JinjaTemplater(PythonTemplater): """A templater using the jinja2 library. See: https://jinja.palletsprojects.com/ """ name = "jinja" class Libraries: """Mock namespace for user-defined Jinja library.""" pass @staticmethod def _extract_macros_from_template( template: str, env: Environment, ctx: dict[str, Any] ) -> dict[str, "Macro"]: """Take a template string and extract any macros from it. Lovingly inspired by http://codyaray.com/2015/05/auto-load-jinja2-macros Raises: TemplateSyntaxError: If the macro we try to load has invalid syntax. We assume that outer functions will catch this exception and handle it appropriately. """ from jinja2.runtime import Macro # noqa # Iterate through keys exported from the loaded template string context: dict[str, Macro] = {} # NOTE: `env.from_string()` will raise TemplateSyntaxError if `template` # is invalid. macro_template = env.from_string(template, globals=ctx) # This is kind of low level and hacky but it works try: for k in macro_template.module.__dict__: attr = getattr(macro_template.module, k) # Is it a macro? If so install it at the name of the macro if isinstance(attr, Macro): context[k] = attr except UndefinedError: # This occurs if any file in the macro path references an # undefined Jinja variable. It's safe to ignore this. Any # meaningful issues will surface later at linting time. pass # Return the context return context @classmethod def _extract_macros_from_path( cls, path: list[str], env: Environment, ctx: dict[str, Any], exclude_paths: Optional[list[str]] = None, ) -> dict[str, "Macro"]: """Take a path and extract macros from it. Args: path (list[str]): A list of paths. env (Environment): The environment object. ctx (Dict): The context dictionary. exclude_paths (Optional[[List][str]]): A list of paths to exclude Returns: dict: A dictionary containing the extracted macros. Raises: ValueError: If a path does not exist. SQLTemplaterError: If there is an error in the Jinja macro file. """ macro_ctx: dict[str, "Macro"] = {} for path_entry in path: # Does it exist? It should as this check was done on config load. if not os.path.exists(path_entry): raise ValueError(f"Path does not exist: {path_entry}") if os.path.isfile(path_entry): if exclude_paths: if cls._exclude_macros( macro_path=path_entry, exclude_macros_path=exclude_paths ): continue # It's a file. Extract macros from it. with open(path_entry) as opened_file: template = opened_file.read() # Update the context with macros from the file. try: macro_ctx.update( cls._extract_macros_from_template(template, env=env, ctx=ctx) ) except TemplateSyntaxError as err: raise SQLTemplaterError( f"Error in Jinja macro file {os.path.relpath(path_entry)}: " f"{err.message}", line_no=err.lineno, line_pos=1, ) from err else: # It's a directory. Iterate through files in it and extract from them. for dirpath, _, files in os.walk(path_entry): for fname in files: if fname.endswith(".sql"): macro_ctx.update( cls._extract_macros_from_path( [os.path.join(dirpath, fname)], env=env, ctx=ctx, exclude_paths=exclude_paths, ) ) return macro_ctx def _extract_macros_from_config( self, config: FluffConfig, env: Environment, ctx: dict[str, Any] ) -> dict[str, "Macro"]: """Take a config and load any macros from it. Args: config: The config to extract macros from. env: The environment. ctx: The context. Returns: dict: A dictionary containing the extracted macros. """ if config: loaded_context = ( config.get_section((self.templater_selector, self.name, "macros")) or {} ) else: # pragma: no cover TODO? loaded_context = {} # Iterate to load macros macro_ctx: dict[str, "Macro"] = {} for value in loaded_context.values(): try: macro_ctx.update( self._extract_macros_from_template(value, env=env, ctx=ctx) ) except TemplateSyntaxError as err: raise SQLFluffUserError( f"Error loading user provided macro:\n`{value}`\n> {err}." ) return macro_ctx def _extract_libraries_from_config(self, config: FluffConfig) -> dict[str, Any]: """Extracts libraries from the given configuration. This function iterates over the modules in the library path and imports them dynamically. The imported modules are then added to a 'Libraries' object, which is returned as a dictionary excluding magic methods. Args: config: The configuration object. Returns: dict: A dictionary containing the extracted libraries. """ # If a more global library_path is set, let that take precedence. library_path = config.get("library_path") or config.get_section( (self.templater_selector, self.name, "library_path") ) if not library_path: return {} libraries = JinjaTemplater.Libraries() # If library_path has __init__.py we parse it as one module, else we parse it # a set of modules is_library_module = os.path.exists(os.path.join(library_path, "__init__.py")) library_module_name = os.path.basename(library_path) # Need to go one level up to parse as a module correctly walk_path = ( os.path.join(library_path, "..") if is_library_module else library_path ) for module_finder, module_name, _ in pkgutil.walk_packages([walk_path]): # skip other modules that can be near module_dir if is_library_module and not module_name.startswith(library_module_name): continue # import_module is deprecated as of python 3.4. This follows roughly # the guidance of the python docs: # https://docs.python.org/3/library/importlib.html#approximating-importlib-import-module spec = module_finder.find_spec(module_name, None) assert ( spec ), f"Module {module_name} failed to be found despite being listed." module = importlib.util.module_from_spec(spec) sys.modules[module_name] = module assert spec.loader, f"Module {module_name} missing expected loader." spec.loader.exec_module(module) if "." in module_name: # nested modules have `.` in module_name *module_path, last_module_name = module_name.split(".") # find parent module recursively parent_module = reduce( lambda res, path_part: getattr(res, path_part), module_path, libraries, ) # set attribute on module object to make jinja working correctly setattr(parent_module, last_module_name, module) else: # set attr on `libraries` obj to make it work in jinja nicely setattr(libraries, module_name, module) if is_library_module: # when library is module we have one more root module in hierarchy and we # remove it libraries = getattr(libraries, library_module_name) # remove magic methods from result return {k: v for k, v in libraries.__dict__.items() if not k.startswith("__")} @classmethod def _crawl_tree( cls, tree: jinja2.nodes.Node, variable_names: set[str], raw: str ) -> Iterator[SQLTemplaterError]: """Crawl the tree looking for occurrences of the undeclared values.""" # First iterate through children for elem in tree.iter_child_nodes(): yield from cls._crawl_tree(elem, variable_names, raw) # Then assess self if ( isinstance(tree, jinja2.nodes.Name) and getattr(tree, "name") in variable_names ): line_no: int = getattr(tree, "lineno") tree_name: str = getattr(tree, "name") line = raw.split("\n")[line_no - 1] pos = line.index(tree_name) + 1 yield SQLTemplaterError( f"Undefined jinja template variable: {tree_name!r}", line_no=line_no, line_pos=pos, ) def _get_jinja_env(self, config: Optional[FluffConfig] = None) -> Environment: """Get a properly configured jinja environment. This method returns a properly configured jinja environment. It first checks if the 'ignore' key is present in the config dictionary and if it contains the value 'templating'. If so, it creates a subclass of FileSystemLoader called SafeFileSystemLoader that overrides the get_source method to handle missing templates when templating is ignored. If 'ignore' is not present or does not contain 'templating', it uses the regular FileSystemLoader. It then sets the extensions to ['jinja2.ext.do'] and adds the DBTTestExtension if the _apply_dbt_builtins method returns True. Finally, it returns a SandboxedEnvironment object with the specified settings. Args: config (dict, optional): A dictionary containing configuration settings. Returns: jinja2.Environment: A properly configured jinja environment. """ loader: Optional[FileSystemLoader] macros_path = self._get_macros_path(config, "load_macros_from_path") loader_search_path = self._get_loader_search_path(config) final_search_path = (loader_search_path or []) + (macros_path or []) ignore_templating = config and "templating" in config.get("ignore") if ignore_templating: class SafeFileSystemLoader(FileSystemLoader): def get_source( self, environment: Environment, name: str ) -> tuple[str, str, Callable[..., Any]]: try: if not isinstance(name, DummyUndefined): return super().get_source(environment, name) raise TemplateNotFound(str(name)) except TemplateNotFound: # When ignore=templating is set, treat missing files # or attempts to load an "Undefined" file as the first # 'base' part of the name / filename rather than failing. templater_logger.debug( "Providing dummy contents for Jinja macro file: %s", name ) value = os.path.splitext(os.path.basename(str(name)))[0] return value, f"{value}.sql", lambda: False loader = SafeFileSystemLoader(final_search_path or []) else: loader = FileSystemLoader(final_search_path) if final_search_path else None extensions: list[Union[str, type[Extension]]] = ["jinja2.ext.do"] if self._apply_dbt_builtins(config): extensions.append(DBTTestExtension) return SandboxedEnvironment( # We explicitly want to preserve newlines. keep_trailing_newline=True, # The do extension allows the "do" directive autoescape=False, extensions=extensions, loader=loader, ) def _get_macros_path( self, config: Optional[FluffConfig], key: str ) -> Optional[list[str]]: """Get the list of macros paths from the provided config object. This method searches for a config section specified by the templater_selector, name, and key specified. If the section is found, it retrieves the value associated with that section and splits it into a list of strings using a comma as the delimiter. The resulting list is stripped of whitespace and empty strings and returned. If the section is not found or the resulting list is empty, it returns None. Args: config (FluffConfig): The config object to search for the macros path section. key (str): Key to load the macros path from the config file. Also used for loading the excluding macros path from config. Returns: Optional[list[str]]: The list of macros paths if found, None otherwise. """ if config: macros_path = config.get_section((self.templater_selector, self.name, key)) if macros_path: result = [s.strip() for s in macros_path.split(",") if s.strip()] if result: return result return None def _get_loader_search_path( self, config: Optional[FluffConfig] ) -> Optional[list[str]]: """Get the list of Jinja loader search paths from the provided config object. This method searches for a config section specified by the templater_selector, name, and 'loader_search_path' keys. If the section is found, it retrieves the value associated with that section and splits it into a list of strings using a comma as the delimiter. The resulting list is stripped of whitespace and empty strings and returned. If the section is not found or the resulting list is empty, it returns None. Args: config (FluffConfig): The config object to search for the loader search path section. Returns: Optional[list[str]]: The list of loader search paths if found, None otherwise. """ if config: loader_search_path = config.get_section( (self.templater_selector, self.name, "loader_search_path") ) if loader_search_path: result = [s.strip() for s in loader_search_path.split(",") if s.strip()] if result: return result return None def _get_jinja_analyzer(self, raw_str: str, env: Environment) -> JinjaAnalyzer: """Creates a new object derived from JinjaAnalyzer. Derived classes can provide their own analyzers (e.g. to support custom Jinja tags). """ return JinjaAnalyzer(raw_str, env) def _apply_dbt_builtins(self, config: Optional[FluffConfig]) -> bool: """Check if dbt builtins should be applied from the provided config object. This method searches for a config section specified by the templater_selector, name, and 'apply_dbt_builtins' keys. If the section is found, it returns the value associated with that section. If the section is not found, it returns False. Args: config (FluffConfig): The config object to search for the apply_dbt_builtins section. Returns: bool: True if dbt builtins should be applied, False otherwise. """ if config: apply_dbt_builtins = config.get_section( (self.templater_selector, self.name, "apply_dbt_builtins") ) # If the config is totally absent for this templater, default to False, # but for any other value that isn't boolean, throw an error. if apply_dbt_builtins is None: apply_dbt_builtins = False assert isinstance(apply_dbt_builtins, bool), ( f"`apply_dbt_builtins` for {self.templater_selector}.{self.name} " f"must be True/False, not {apply_dbt_builtins!r}" ) return apply_dbt_builtins return False def _get_env_context( self, fname: Optional[str], config: Optional[FluffConfig], env: Environment, ) -> dict[str, Any]: """Get the templating context from the config. NOTE: This closely mirrors the `get_context` method which we inherit from the python templater, but extends the signature. For that reason we define a new method here, which internally refers to `get_context`. Args: fname (str, optional): The name of the file. config (dict, optional): The configuration. env: The Jinja Environment. Returns: dict: The templating context. """ # Load the context live_context = self.get_context(fname, config) # Apply dbt builtin functions if we're allowed. if config: # first make libraries available in the context # so they can be used by the macros too libraries = self._extract_libraries_from_config(config=config) live_context.update(libraries) jinja_filters = libraries.get("SQLFLUFF_JINJA_FILTERS") if jinja_filters: env.filters.update(jinja_filters) if self._apply_dbt_builtins(config): for name in DBT_BUILTINS: # Only apply if it hasn't already been set at this stage. if name not in live_context: live_context[name] = DBT_BUILTINS[name] # Load macros from path (if applicable) if config: macros_path = self._get_macros_path(config, "load_macros_from_path") exclude_macros_path = self._get_macros_path( config, "exclude_macros_from_path" ) if macros_path: live_context.update( self._extract_macros_from_path( macros_path, env=env, ctx=live_context, exclude_paths=exclude_macros_path, ) ) # Load config macros, these will take precedence over macros from the path live_context.update( self._extract_macros_from_config( config=config, env=env, ctx=live_context ) ) return live_context def construct_render_func( self, fname: Optional[str] = None, config: Optional[FluffConfig] = None ) -> tuple[Environment, dict[str, Any], Callable[[str], str]]: """Builds and returns objects needed to create and run templates. Args: fname (Optional[str]): The name of the file. config (Optional[dict]): The configuration settings. Returns: tuple[Environment, dict, Callable[[str], str]]: A tuple containing the following: - env (Environment): An instance of the 'Environment' class. - live_context (dict): A dictionary containing the live context. - render_func (Callable[[str], str]): A callable function that is used to instantiate templates. """ # Load the context env = self._get_jinja_env(config) live_context = self._get_env_context(fname, config, env) def render_func(in_str: str) -> str: """Used by JinjaTracer to instantiate templates. This function is a closure capturing internal state from process(). Note that creating templates involves quite a bit of state known to _this_ function but not to JinjaTracer. https://www.programiz.com/python-programming/closure """ try: template = env.from_string(in_str, globals=live_context) except TemplateSyntaxError as err: # pragma: no cover # NOTE: If the template fails to parse, then this clause # will be triggered. However in normal that should never # happen because the template should already have been # validated by the point this is called. Typically that # happens when searching for undefined variables. raise SQLTemplaterError( f"Late failure to parse jinja template: {err}.", line_no=err.lineno, ) return template.render() return env, live_context, render_func def _generate_violations_for_undefined_variables( self, in_str: str, syntax_tree: jinja2.nodes.Template, undefined_variables: set[str], ) -> list[SQLTemplaterError]: """Generates violations for any undefined variables.""" violations: list[SQLTemplaterError] = [] if undefined_variables: # Lets go through and find out where they are: for template_err_val in self._crawl_tree( syntax_tree, undefined_variables, in_str ): violations.append(template_err_val) return violations @staticmethod def _init_undefined_tracking( live_context: dict[str, Any], potentially_undefined_variables: Iterable[str], ignore_templating: bool = False, ) -> set[str]: """Sets up tracing of undefined template variables. NOTE: This works by mutating the `live_context` which is being used by the environment. """ # NOTE: This set is modified by the `UndefinedRecorder` when run. undefined_variables: set[str] = set() for val in potentially_undefined_variables: if val not in live_context: if ignore_templating: live_context[val] = DummyUndefined.create(val) else: live_context[val] = UndefinedRecorder(val, undefined_variables) return undefined_variables @large_file_check def process( self, *, in_str: str, fname: str, config: Optional[FluffConfig] = None, formatter: Optional[FormatterInterface] = None, ) -> tuple[TemplatedFile, list[SQLTemplaterError]]: """Process a string and return the new string. Note that the arguments are enforced as keywords because Templaters can have differences in their `process` method signature. A Templater that only supports reading from a file would need the following signature: process(*, fname, in_str=None, config=None) (arguments are swapped) Args: in_str (str): The input string. fname (str, optional): The filename of this string. This is mostly for loading config files at runtime. config (FluffConfig): A specific config to use for this templating operation. Only necessary for some templaters. formatter (CallbackFormatter): Optional object for output. Raises: ValueError: If the 'config' argument is not provided. SQLTemplaterError: If templating fails fatally, then this method should raise a :obj:`SQLTemplaterError` instead which will be caught and displayed appropriately. Returns: tuple[TemplatedFile, list[SQLTemplaterError]]: A tuple containing the templated file and a list of violations. """ if not config: # pragma: no cover raise ValueError( "For the jinja templater, the `process()` method requires a config " "object." ) env, live_context, render_func = self.construct_render_func( fname=fname, config=config ) # Attempt to identify any undeclared variables or syntax errors. # The majority of variables will be found during the _crawl_tree # step rather than this first Exception which serves only to catch # catastrophic errors. try: syntax_tree = env.parse(in_str) potentially_undefined_variables = meta.find_undeclared_variables( syntax_tree ) except Exception as err: templater_error = SQLTemplaterError( "Failed to parse Jinja syntax. Correct the syntax or select an " "alternative templater. Error: " + str(err) ) # Capture a line number if we can. if isinstance(err, TemplateSyntaxError): templater_error.line_no = err.lineno raise templater_error undefined_variables = self._init_undefined_tracking( live_context, potentially_undefined_variables, ignore_templating=("templating" in config.get("ignore")), ) try: # Slice the file once rendered. raw_sliced, sliced_file, out_str = self.slice_file( in_str, render_func=render_func, config=config, ) return ( TemplatedFile( source_str=in_str, templated_str=out_str, fname=fname, sliced_file=sliced_file, raw_sliced=raw_sliced, ), self._generate_violations_for_undefined_variables( in_str, syntax_tree, undefined_variables ), ) except (TemplateError, TypeError) as err: templater_logger.info("Unrecoverable Jinja Error: %s", err, exc_info=True) raise SQLTemplaterError( ( "Unrecoverable failure in Jinja templating: {}. Have you " "correctly configured your variables? " "https://docs.sqlfluff.com/en/latest/perma/variables.html" ).format(err), # We don't have actual line number information, but specify # line 1 so users can ignore with "noqa" if they want. (The # default is line 0, which can't be ignored because it's not # a valid line number.) line_no=1, line_pos=1, ) def slice_file( self, raw_str: str, render_func: Callable[[str], str], config: Optional[FluffConfig] = None, append_to_templated: str = "", ) -> tuple[list[RawFileSlice], list[TemplatedFileSlice], str]: """Slice the file to determine regions where we can fix. Args: raw_str (str): The raw string to be sliced. render_func (Callable[[str], str]): The rendering function to be used. config (optional): Optional configuration. append_to_templated: Optional string to append to the template. Returns: tuple[list[RawFileSlice], list[TemplatedFileSlice], str]: A tuple containing a list of raw file slices, a list of templated file slices, and the templated string. """ # The JinjaTracer slicing algorithm is more robust, but it requires # us to create and render a second template (not raw_str). templater_logger.info("Slicing File Template") templater_logger.debug(" Raw String: %r", raw_str[:80]) analyzer = self._get_jinja_analyzer(raw_str, self._get_jinja_env()) tracer = analyzer.analyze(render_func) trace = tracer.trace(append_to_templated=append_to_templated) return trace.raw_sliced, trace.sliced_file, trace.templated_str @staticmethod def _rectify_templated_slices( length_deltas: dict[int, int], sliced_template: list[TemplatedFileSlice] ) -> list[TemplatedFileSlice]: """This method rectifies the source slices of a variant template. :TRICKY: We want to yield variants that _look like_ they were rendered from the original template. However, they were actually rendered from a modified template, which means they have source indices which won't line up with the source files. We correct that here by using the length deltas generated earlier from the modifications. This should ensure that lint issues and fixes for the variants are handled correctly and can be combined with those from the original template. """ # NOTE: We sort the stack because it's important that it's in order # because we're going to be popping from one end of it. There's no # guarantee that the items are in a particular order a) because it's # a dict and b) because they may have been generated out of order. delta_stack = sorted(length_deltas.items(), key=lambda t: t[0]) adjusted_slices: list[TemplatedFileSlice] = [] carried_delta = 0 for tfs in sliced_template: if delta_stack: idx, d = delta_stack[0] if idx == tfs.source_slice.start + carried_delta: adjusted_slices.append( tfs._replace( # "stretch" the slice by adjusting the end more # than the start. source_slice=slice( tfs.source_slice.start + carried_delta, tfs.source_slice.stop + carried_delta - d, ) ) ) carried_delta -= d delta_stack.pop(0) continue # No delta match. Just shift evenly. adjusted_slices.append( tfs._replace( source_slice=slice( tfs.source_slice.start + carried_delta, tfs.source_slice.stop + carried_delta, ) ) ) return adjusted_slices @staticmethod def _calculate_variant_score( raw_sliced: list[RawFileSlice], sliced_file: list[TemplatedFileSlice], uncovered_slices: set[int], original_source_slices: dict[int, slice], ) -> int: """Compute a score for the variant based from size of covered slices. NOTE: We need to map this back to the positions in the original file, and only have the positions in the modified file here. That means we go translate back via the slice index in raw file. """ # First, work out the literal positions in the modified file which # are now covered. covered_source_positions = { tfs.source_slice.start for tfs in sliced_file if tfs.slice_type == "literal" and not is_zero_slice(tfs.templated_slice) } # Second, convert these back into indices so we can use them to # refer to the unmodified source file. covered_raw_slice_idxs = [ idx for idx, raw_slice in enumerate(raw_sliced) if raw_slice.source_idx in covered_source_positions ] return sum( slice_length(original_source_slices[idx]) for idx in covered_raw_slice_idxs if idx in uncovered_slices ) def _handle_unreached_code( self, in_str: str, render_func: Callable[[str], str], uncovered_slices: set[int], append_to_templated: str = "", ) -> Iterator[tuple[list[RawFileSlice], list[TemplatedFileSlice], str]]: """Address uncovered slices by tweaking the template to hit them. Args: in_str (:obj:`str`): The raw source file. render_func (:obj:`callable`): The render func for the templater. uncovered_slices (:obj:`set` of :obj:`int`): Indices of slices in the raw file which are not rendered in the original rendering. These are the slices we'll attempt to hit by modifying the template. NOTE: These are indices in the _sequence of slices_, not _character indices_ in the raw source file. append_to_templated (:obj:`str`, optional): Optional string to append to the templated file. """ analyzer = self._get_jinja_analyzer(in_str, self._get_jinja_env()) tracer_copy = analyzer.analyze(render_func) max_variants_generated = 10 max_variants_returned = 5 variants: dict[str, tuple[int, JinjaTrace, dict[int, int]]] = {} # Create a mapping of the original source slices before modification so # we can adjust the positions post-modification. original_source_slices = { idx: raw_slice.source_slice() for idx, raw_slice in enumerate(tracer_copy.raw_sliced) } for uncovered_slice in sorted(uncovered_slices)[:max_variants_generated]: tracer_probe = copy.deepcopy(tracer_copy) tracer_trace = copy.deepcopy(tracer_copy) override_raw_slices = [] # `length_deltas` is to keep track of the length changes associated # with the changes we're making so we can correct the positions in # the resulting template. length_deltas: dict[int, int] = {} # Find a path that takes us to 'uncovered_slice'. choices = tracer_probe.move_to_slice(uncovered_slice, 0) for branch, options in choices.items(): raw_file_slice = tracer_probe.raw_sliced[branch] if raw_file_slice.tag in ("if", "elif"): # Replace the existing "if" of "elif" expression with a new, # hardcoded value that hits the target slice in the template # (here that is options[0]). new_value = "True" if options[0] == branch + 1 else "False" new_source = f"{{% {raw_file_slice.tag} {new_value} %}}" tracer_trace.raw_slice_info[raw_file_slice].alternate_code = ( new_source ) override_raw_slices.append(branch) length_deltas[raw_file_slice.source_idx] = len(new_source) - len( raw_file_slice.raw ) # Render and analyze the template with the overrides. variant_key = tuple( ( cast(str, tracer_trace.raw_slice_info[rs].alternate_code) if idx in override_raw_slices and tracer_trace.raw_slice_info[rs].alternate_code is not None else rs.raw ) for idx, rs in enumerate(tracer_trace.raw_sliced) ) # In some cases (especially with nested if statements), we may # generate a variant that duplicates an existing variant. Skip # those. variant_raw_str = "".join(variant_key) if variant_raw_str not in variants: analyzer = self._get_jinja_analyzer( variant_raw_str, self._get_jinja_env() ) tracer_trace = analyzer.analyze(render_func) try: trace = tracer_trace.trace( append_to_templated=append_to_templated, ) except Exception: # If we get an error tracing the variant, skip it. This may # happen for a variety of reasons. Basically there's no # guarantee that the variant will be valid Jinja. continue else: # Compute a score for the variant based on the size of initially # uncovered literal slices it hits. score = self._calculate_variant_score( raw_sliced=trace.raw_sliced, sliced_file=trace.sliced_file, uncovered_slices=uncovered_slices, original_source_slices=original_source_slices, ) variants[variant_raw_str] = (score, trace, length_deltas) # Return the top-scoring variants. sorted_variants: list[tuple[int, JinjaTrace, dict[int, int]]] = sorted( variants.values(), key=lambda v: v[0], reverse=True ) for _, trace, deltas in sorted_variants[:max_variants_returned]: # Rectify the source slices of the generated template, which should # ensure that lint issues and fixes for the variants are handled # correctly and can be combined with those from the original template. adjusted_slices = self._rectify_templated_slices( deltas, trace.sliced_file, ) yield ( tracer_copy.raw_sliced, adjusted_slices, trace.templated_str, ) @large_file_check def process_with_variants( self, *, in_str: str, fname: str, config: Optional[FluffConfig] = None, formatter: Optional[FormatterInterface] = None, ) -> Iterator[tuple[TemplatedFile, list[SQLTemplaterError]]]: """Process a string and return one or more variant renderings. Note that the arguments are enforced as keywords because Templaters can have differences in their `process` method signature. A Templater that only supports reading from a file would need the following signature: process(*, fname, in_str=None, config=None) (arguments are swapped) Args: in_str (:obj:`str`): The input string. fname (:obj:`str`, optional): The filename of this string. This is mostly for loading config files at runtime. config (:obj:`FluffConfig`): A specific config to use for this templating operation. Only necessary for some templaters. formatter (:obj:`CallbackFormatter`): Optional object for output. """ templated_file, violations = self.process( in_str=in_str, fname=fname, config=config, formatter=formatter ) yield templated_file, violations # Find uncovered code (if any), tweak the template to hit that code. # First, identify the literals which _are_ covered. covered_literal_positions = { tfs.source_slice.start for tfs in templated_file.sliced_file # It's covered if it's rendered if not is_zero_slice(tfs.templated_slice) } templater_logger.debug( "Covered literal positions %s", covered_literal_positions ) uncovered_literal_idxs = { idx for idx, raw_slice in enumerate(templated_file.raw_sliced) if raw_slice.slice_type == "literal" and raw_slice.source_idx not in covered_literal_positions } templater_logger.debug( "Uncovered literals correspond to slices %s", uncovered_literal_idxs ) # NOTE: No validation required as all validation done in the `.process()` # call above. _, _, render_func = self.construct_render_func(fname=fname, config=config) for raw_sliced, sliced_file, templated_str in self._handle_unreached_code( in_str, render_func, uncovered_literal_idxs ): yield ( TemplatedFile( source_str=in_str, templated_str=templated_str, fname=fname, sliced_file=sliced_file, raw_sliced=raw_sliced, ), violations, ) @staticmethod def _exclude_macros(macro_path: str, exclude_macros_path: list[str]) -> bool: """Determines if a macro is within the exclude macros path. These macros will be ignored and not loaded into context Args: macro_path (str): Str of the path to the macro exclude_macros_path (list[str]): Str of the path to the macros to exclude Returns: bool: True if the macro should be excluded """ for exclude_path in exclude_macros_path: macro_path_normalized = os.path.normpath(os.path.abspath(macro_path)) exclude_path_normalized = os.path.normpath(exclude_path) if exclude_path_normalized in macro_path_normalized: templater_logger.debug("Skipping this macro file: %s", macro_path) return True return False class DummyUndefined(jinja2.Undefined): """Acts as a dummy value to try and avoid template failures. Inherits from jinja2.Undefined so Jinja's default() filter will treat it as a missing value, even though it has a non-empty value in normal contexts. """ # Tell Jinja this object is safe to call and does not alter data. # https://jinja.palletsprojects.com/en/2.9.x/sandbox/#jinja2.sandbox.SandboxedEnvironment.is_safe_callable unsafe_callable = False # https://jinja.palletsprojects.com/en/3.0.x/sandbox/#jinja2.sandbox.SandboxedEnvironment.is_safe_callable alters_data = False def __init__(self, name: str) -> None: super().__init__() self.name = name def __str__(self) -> str: return self.name.replace(".", "_") @classmethod def create(cls, name: str) -> "DummyUndefined": """Factory method. When ignoring=templating is configured, use 'name' as the value for undefined variables. We deliberately avoid recording and reporting undefined variables as errors. Using 'name' as the value won't always work, but using 'name', combined with implementing the magic methods (such as __eq__, see above), works well in most cases. """ templater_logger.debug( "Providing dummy value for undefined Jinja variable: %s", name ) result = DummyUndefined(name) return result def __getattr__(self, item: str) -> "DummyUndefined": """Intercept any calls to undefined attributes. Args: item (str): The name of the attribute. Returns: object: A dynamically created instance of this class. """ return self.create(f"{self.name}.{item}") # Implement the most common magic methods. This helps avoid # templating errors for undefined variables. # https://www.tutorialsteacher.com/python/magic-methods-in-python def _self_impl(self, *args: Any, **kwargs: Any) -> "DummyUndefined": """Return an instance of the class itself. Args: *args: Variable length argument list. **kwargs: Arbitrary keyword arguments. Returns: object: An instance of the class itself. """ return self def _bool_impl(self, *args: Any, **kwargs: Any) -> bool: """Return a boolean value. Args: *args: Variable length argument list. **kwargs: Arbitrary keyword arguments. Returns: bool: A boolean value. """ return True # We're intentionally changing the behaviour here compared to the base # class of Undefined. That means we're going to silence the `assignment` # mypy warnings. Operations on an undefined result in another undefined. __add__ = __sub__ = __mul__ = _self_impl # type: ignore[assignment] __floordiv__ = __truediv__ = _self_impl # type: ignore[assignment] __mod__ = __pow__ = _self_impl # type: ignore[assignment] __pos__ = __neg__ = _self_impl # type: ignore[assignment] __lshift__ = __rshift__ = _self_impl __getitem__ = _self_impl # type: ignore[assignment] __invert__ = _self_impl __call__ = _self_impl # type: ignore[assignment] # Boolean operations on an undefined are handled separately. __and__ = __or__ = __xor__ = __bool__ = _bool_impl __lt__ = __le__ = __ge__ = __gt__ = _bool_impl # type: ignore[assignment] __eq__ = __ne__ = _bool_impl def __hash__(self) -> int: # pragma: no cover """Return a constant hash value. Returns: int: A constant hash value. """ # This is called by the "in" operator, among other things. return 0 def __iter__(self) -> Iterator["DummyUndefined"]: """Return an iterator that contains only the instance of the class itself. Returns: iterator: An iterator. """ return [self].__iter__() class DBTTestExtension(Extension): """Jinja extension to handle the dbt test tag.""" tags = {"test"} def parse(self, parser: jinja2.parser.Parser) -> jinja2.nodes.Macro: """Parses out the contents of the test tag.""" node = jinja2.nodes.Macro(lineno=next(parser.stream).lineno) test_name = parser.parse_assign_target(name_only=True).name parser.parse_signature(node) node.name = f"test_{test_name}" node.body = parser.parse_statements(("name:endtest",), drop_needle=True) return node sqlfluff-3.4.2/src/sqlfluff/core/templaters/placeholder.py000066400000000000000000000216721503426445100237330ustar00rootroot00000000000000"""Defines the placeholder template.""" import logging from typing import Any, Optional import regex from sqlfluff.core.config import FluffConfig from sqlfluff.core.errors import SQLTemplaterError from sqlfluff.core.formatter import FormatterInterface from sqlfluff.core.helpers.slice import offset_slice from sqlfluff.core.templaters.base import ( RawFileSlice, RawTemplater, TemplatedFile, TemplatedFileSlice, large_file_check, ) # Instantiate the templater logger templater_logger = logging.getLogger("sqlfluff.templater") KNOWN_STYLES = { # e.g. WHERE bla = :name "colon": regex.compile(r"(?\w+)(?!:)", regex.UNICODE), # e.g. SELECT :"column" FROM :table WHERE bla = :'name' "colon_optional_quotes": regex.compile( r"(?['\"]?)(?P[\w_]+)\1", regex.UNICODE ), # e.g. WHERE bla = table:name - use with caution as more prone to false positives "colon_nospaces": regex.compile(r"(?\w+)", regex.UNICODE), # e.g. WHERE bla = :2 "numeric_colon": regex.compile( r"(?\d+)", regex.UNICODE ), # e.g. WHERE bla = %(name)s "pyformat": regex.compile( r"(?[\w_]+)\)s", regex.UNICODE ), # e.g. WHERE bla = $name or WHERE bla = ${name} "dollar": regex.compile( r"(?[\w_]+)}?", regex.UNICODE ), # e.g. WHERE bla = $name$ (DbUp compatible) "dollar_surround": regex.compile( r"(?[-\w]+)\$", regex.UNICODE ), # e.g. USE ${flyway:database}.schema_name; "flyway_var": regex.compile(r"\${(?P\w+[:\w_]+)}", regex.UNICODE), # e.g. WHERE bla = ? "question_mark": regex.compile(r"(?[\d]+)}?", regex.UNICODE ), # e.g. WHERE bla = %s "percent": regex.compile(r"(?[\w]+)}?", regex.UNICODE), } class PlaceholderTemplater(RawTemplater): """A templater for generic placeholders. Different libraries and tools use different styles of placeholders in order to escape them when running queries. In order to perform parsing of those templated queries, it's necessary to replace these placeholders with user-provided values, which is the job of this templater. See https://www.python.org/dev/peps/pep-0249/#paramstyle for the specifications for Python, they cover most cases. """ name = "placeholder" def __init__(self, override_context: Optional[dict[str, Any]] = None): self.default_context = dict(test_value="__test__") self.override_context = override_context or {} # copy of the Python templater def get_context( self, fname: Optional[str], config: Optional[FluffConfig], ) -> dict[str, Any]: """Get the templating context from the config.""" live_context = super().get_context(fname, config) if "param_regex" in live_context and "param_style" in live_context: raise ValueError( "Either param_style or param_regex must be provided, not both" ) if "param_regex" in live_context: live_context["__bind_param_regex"] = regex.compile( live_context["param_regex"] ) elif "param_style" in live_context: param_style = live_context["param_style"] if param_style not in KNOWN_STYLES: raise ValueError( 'Unknown param_style "{}", available are: {}'.format( param_style, list(KNOWN_STYLES.keys()) ) ) live_context["__bind_param_regex"] = KNOWN_STYLES[param_style] else: raise ValueError( "No param_regex nor param_style was provided to the placeholder " "templater!" ) return live_context @large_file_check def process( self, *, in_str: str, fname: str, config: Optional[FluffConfig] = None, formatter: Optional[FormatterInterface] = None, ) -> tuple[TemplatedFile, list[SQLTemplaterError]]: """Process a string and return a TemplatedFile. Note that the arguments are enforced as keywords because Templaters can have differences in their `process` method signature. A Templater that only supports reading from a file would need the following signature: process(*, fname, in_str=None, config=None) (arguments are swapped) Args: in_str (:obj:`str`): The input string. fname (:obj:`str`, optional): The filename of this string. This is mostly for loading config files at runtime. config (:obj:`FluffConfig`): A specific config to use for this templating operation. Only necessary for some templaters. formatter (:obj:`CallbackFormatter`): Optional object for output. """ context = self.get_context(fname, config) template_slices = [] raw_slices = [] last_pos_raw, last_pos_templated = 0, 0 out_str = "" regex = context["__bind_param_regex"] # when the param has no name, use a 1-based index param_counter = 1 for found_param in regex.finditer(in_str): span = found_param.span() if "param_name" not in found_param.groupdict(): param_name = str(param_counter) param_counter += 1 else: param_name = found_param["param_name"] last_literal_length = span[0] - last_pos_raw if param_name in context: replacement = str(context[param_name]) else: replacement = param_name if "quotation" in found_param.groupdict(): quotation = found_param["quotation"] replacement = quotation + replacement + quotation # add the literal to the slices template_slices.append( TemplatedFileSlice( slice_type="literal", source_slice=slice(last_pos_raw, span[0], None), templated_slice=offset_slice( last_pos_templated, last_literal_length, ), ) ) raw_slices.append( RawFileSlice( raw=in_str[last_pos_raw : span[0]], slice_type="literal", source_idx=last_pos_raw, ) ) out_str += in_str[last_pos_raw : span[0]] # add the current replaced element start_template_pos = last_pos_templated + last_literal_length template_slices.append( TemplatedFileSlice( slice_type="templated", source_slice=slice(span[0], span[1]), templated_slice=offset_slice(start_template_pos, len(replacement)), ) ) raw_slices.append( RawFileSlice( raw=in_str[span[0] : span[1]], slice_type="templated", source_idx=span[0], ) ) out_str += replacement # update the indexes last_pos_raw = span[1] last_pos_templated = start_template_pos + len(replacement) # add the last literal, if any if len(in_str) > last_pos_raw: template_slices.append( TemplatedFileSlice( slice_type="literal", source_slice=slice(last_pos_raw, len(in_str)), templated_slice=offset_slice( last_pos_templated, (len(in_str) - last_pos_raw), ), ) ) raw_slices.append( RawFileSlice( raw=in_str[last_pos_raw:], slice_type="literal", source_idx=last_pos_raw, ) ) out_str += in_str[last_pos_raw:] return ( TemplatedFile( # original string source_str=in_str, # string after all replacements templated_str=out_str, # filename fname=fname, # list of TemplatedFileSlice sliced_file=template_slices, # list of RawFileSlice, same size raw_sliced=raw_slices, ), [], # violations, always empty ) sqlfluff-3.4.2/src/sqlfluff/core/templaters/python.py000066400000000000000000001361561503426445100227760ustar00rootroot00000000000000"""Defines the templaters.""" import ast import re from collections.abc import Iterable, Iterator from string import Formatter from typing import Any, Callable, NamedTuple, Optional from sqlfluff.core.config import FluffConfig from sqlfluff.core.errors import SQLTemplaterError from sqlfluff.core.formatter import FormatterInterface from sqlfluff.core.helpers.slice import offset_slice, zero_slice from sqlfluff.core.helpers.string import findall from sqlfluff.core.templaters.base import ( RawFileSlice, RawTemplater, TemplatedFile, TemplatedFileSlice, large_file_check, templater_logger, ) class IntermediateFileSlice(NamedTuple): """An intermediate representation of a partially sliced File.""" intermediate_type: str source_slice: slice templated_slice: slice slice_buffer: list[RawFileSlice] def _trim_end( self, templated_str: str, target_end: str = "head" ) -> tuple["IntermediateFileSlice", list[TemplatedFileSlice]]: """Trim the ends of a intermediate segment.""" target_idx = 0 if target_end == "head" else -1 terminator_types = ("block_start") if target_end == "head" else ("block_end") main_source_slice = self.source_slice main_templated_slice = self.templated_slice slice_buffer = self.slice_buffer end_buffer = [] # Yield any leading literals, comments or blocks. while len(slice_buffer) > 0 and slice_buffer[target_idx].slice_type in ( "literal", "block_start", "block_end", "comment", ): focus = slice_buffer[target_idx] templater_logger.debug(" %s Focus: %s", target_end, focus) # Is it a zero length item? if focus.slice_type in ("block_start", "block_end", "comment"): # Only add the length in the source space. templated_len = 0 else: # Assume it's a literal, check the literal actually matches. templated_len = len(focus.raw) if target_end == "head": check_slice = offset_slice( main_templated_slice.start, templated_len, ) else: check_slice = slice( main_templated_slice.stop - templated_len, main_templated_slice.stop, ) if templated_str[check_slice] != focus.raw: # It doesn't match, we can't use it. break templater_logger.debug(" Nope") break # If it does match, set up the new slices if target_end == "head": division = ( main_source_slice.start + len(focus.raw), main_templated_slice.start + templated_len, ) new_slice = TemplatedFileSlice( focus.slice_type, slice(main_source_slice.start, division[0]), slice(main_templated_slice.start, division[1]), ) end_buffer.append(new_slice) main_source_slice = slice(division[0], main_source_slice.stop) main_templated_slice = slice(division[1], main_templated_slice.stop) else: division = ( main_source_slice.stop - len(focus.raw), main_templated_slice.stop - templated_len, ) new_slice = TemplatedFileSlice( focus.slice_type, slice(division[0], main_source_slice.stop), slice(division[1], main_templated_slice.stop), ) end_buffer.insert(0, new_slice) main_source_slice = slice(main_source_slice.start, division[0]) main_templated_slice = slice(main_templated_slice.start, division[1]) slice_buffer.pop(target_idx) if focus.slice_type in terminator_types: break # Return a new Intermediate slice and the buffer. # NB: Don't check size of slice buffer here. We can do that later. new_intermediate = self.__class__( "compound", main_source_slice, main_templated_slice, slice_buffer ) return new_intermediate, end_buffer def trim_ends( self, templated_str: str ) -> tuple[ list[TemplatedFileSlice], "IntermediateFileSlice", list[TemplatedFileSlice] ]: """Trim both ends of an intermediate slice.""" # Trim start: new_slice, head_buffer = self._trim_end( templated_str=templated_str, target_end="head" ) # Trim end: new_slice, tail_buffer = new_slice._trim_end( templated_str=templated_str, target_end="tail" ) # Return return head_buffer, new_slice, tail_buffer def try_simple(self) -> TemplatedFileSlice: """Try to turn this intermediate slice into a simple slice.""" # Yield anything simple if len(self.slice_buffer) == 1: return TemplatedFileSlice( self.slice_buffer[0].slice_type, self.source_slice, self.templated_slice, ) else: raise ValueError("IntermediateFileSlice is not simple!") def coalesce(self) -> TemplatedFileSlice: """Coalesce this whole slice into a single one. Brutally.""" return TemplatedFileSlice( PythonTemplater._coalesce_types(self.slice_buffer), self.source_slice, self.templated_slice, ) class PythonTemplater(RawTemplater): """A templater using python format strings. See: https://docs.python.org/3/library/string.html#format-string-syntax For the python templater we don't allow functions or macros because there isn't a good way of doing it securely. Use the jinja templater for this. The python templater also defines a lot of the logic for how to allow fixing and translation in a templated file. """ name = "python" config_subsection: tuple[str, ...] = ("context",) def __init__(self, override_context: Optional[dict[str, Any]] = None) -> None: self.default_context = dict(test_value="__test__") self.override_context = override_context or {} @staticmethod def infer_type(s: Any) -> Any: """Infer a python type from a string and convert. Given a string value, convert it to a more specific built-in Python type (e.g. int, float, list, dictionary) if possible. """ try: return ast.literal_eval(s) except (SyntaxError, ValueError): return s def get_context( self, fname: Optional[str], config: Optional[FluffConfig], ) -> dict[str, Any]: """Get the templating context from the config. This function retrieves the templating context from the config by loading the config and updating the live_context dictionary with the loaded_context and other predefined context dictionaries. It then goes through the loaded_context dictionary and infers the types of the values before returning the live_context dictionary. Args: fname (str, optional): The file name. config (dict, optional): The config dictionary. Returns: dict: The templating context. """ live_context = super().get_context(fname, config) # Infer types for k in live_context: live_context[k] = self.infer_type(live_context[k]) return live_context @large_file_check def process( self, *, in_str: str, fname: str, config: Optional[FluffConfig] = None, formatter: Optional[FormatterInterface] = None, ) -> tuple[TemplatedFile, list[SQLTemplaterError]]: """Process a string and return a TemplatedFile. Note that the arguments are enforced as keywords because Templaters can have differences in their `process` method signature. A Templater that only supports reading from a file would need the following signature: process(*, fname, in_str=None, config=None) (arguments are swapped) Args: in_str (:obj:`str`): The input string. fname (:obj:`str`, optional): The filename of this string. This is mostly for loading config files at runtime. config (:obj:`FluffConfig`): A specific config to use for this templating operation. Only necessary for some templaters. formatter (:obj:`CallbackFormatter`): Optional object for output. """ live_context = self.get_context(fname, config) def render_func(raw_str: str) -> str: """Render the string using the captured live_context. In order to support mocking of template variables containing "." characters, this function converts any template variable containing "." into a dictionary lookup. Example: {foo.bar} => {sqlfluff[foo.bar]} """ try: # Hack to allow template variables with dot notation (e.g. foo.bar) raw_str_with_dot_notation_hack = re.sub( r"{([^:}]*\.[^:}]*)(:\S*)?}", r"{sqlfluff[\1]\2}", raw_str ) templater_logger.debug( " Raw String with Dot Notation Hack: %r", raw_str_with_dot_notation_hack, ) rendered_str = raw_str_with_dot_notation_hack.format(**live_context) except KeyError as err: missing_key = err.args[0] if missing_key == "sqlfluff": # Give more useful error message related to dot notation hack # when user has not created the required, magic context key raise SQLTemplaterError( "Failure in Python templating: magic key 'sqlfluff' " "missing from context. This key is required " "for template variables containing '.'. " "https://docs.sqlfluff.com/en/stable/" "perma/python_templating.html" ) elif "." in missing_key: # Give more useful error message related to dot notation hack # for missing keys raise SQLTemplaterError( "Failure in Python templating: {} key missing from 'sqlfluff' " "dict in context. Template variables containing '.' are " "required to use the 'sqlfluff' magic fixed context key. " "https://docs.sqlfluff.com/en/stable/" "perma/python_templating.html".format(err) ) else: raise SQLTemplaterError( "Failure in Python templating: {}. Have you configured your " "variables? https://docs.sqlfluff.com/en/stable/" "perma/variables.html".format(err) ) return rendered_str raw_sliced, sliced_file, new_str = self.slice_file( in_str, render_func=render_func, config=config, ) return ( TemplatedFile( source_str=in_str, templated_str=new_str, fname=fname, sliced_file=sliced_file, raw_sliced=raw_sliced, ), [], ) def slice_file( self, raw_str: str, render_func: Callable[[str], str], config: Optional[FluffConfig] = None, append_to_templated: str = "", ) -> tuple[list[RawFileSlice], list[TemplatedFileSlice], str]: """Slice the file to determine regions where we can fix.""" templater_logger.info("Slicing File Template") templater_logger.debug(" Raw String: %r", raw_str) # Render the templated string. # NOTE: This seems excessive in this simple example, but for other templating # engines we need more control over the rendering so may need to call this # method more than once. templated_str = render_func(raw_str) templater_logger.debug(" Templated String: %r", templated_str) # Slice the raw file raw_sliced = list(self._slice_template(raw_str)) templater_logger.debug(" Raw Sliced:") for idx, raw_slice in enumerate(raw_sliced): templater_logger.debug(" %s: %r", idx, raw_slice) # Find the literals literals = [ raw_slice.raw for raw_slice in raw_sliced if raw_slice.slice_type == "literal" ] templater_logger.debug(" Literals: %s", literals) for loop_idx in range(2): templater_logger.debug(" # Slice Loop %s", loop_idx) # Calculate occurrences raw_occurrences = self._substring_occurrences(raw_str, literals) templated_occurrences = self._substring_occurrences(templated_str, literals) templater_logger.debug( " Occurrences: Raw: %s, Templated: %s", raw_occurrences, templated_occurrences, ) # Split on invariants split_sliced = list( self._split_invariants( raw_sliced, literals, raw_occurrences, templated_occurrences, templated_str, ) ) templater_logger.debug(" Split Sliced:") for idx, split_slice in enumerate(split_sliced): templater_logger.debug(" %s: %r", idx, split_slice) # Deal with uniques and coalesce the rest sliced_file = list( self._split_uniques_coalesce_rest( split_sliced, raw_occurrences, templated_occurrences, templated_str ) ) templater_logger.debug(" Fully Sliced:") for idx, templ_slice in enumerate(sliced_file): templater_logger.debug(" %s: %r", idx, templ_slice) unwrap_wrapped = ( True if config is None else config.get( "unwrap_wrapped_queries", section="templater", default=True ) ) sliced_file, new_templated_str = self._check_for_wrapped( sliced_file, templated_str, unwrap_wrapped=unwrap_wrapped ) if new_templated_str == templated_str: # If we didn't change it then we're done. break else: # If it's not equal, loop around templated_str = new_templated_str return raw_sliced, sliced_file, new_templated_str @classmethod def _check_for_wrapped( cls, slices: list[TemplatedFileSlice], templated_str: str, unwrap_wrapped: bool = True, ) -> tuple[list[TemplatedFileSlice], str]: """Identify a wrapped query (e.g. dbt test) and handle it. If unwrap_wrapped is true, we trim the wrapping from the templated file. If unwrap_wrapped is false, we add a slice at start and end. """ if not slices: # If there are no slices, return return slices, templated_str first_slice = slices[0] last_slice = slices[-1] if unwrap_wrapped: # If we're unwrapping, there is no need to edit the slices, but we do need # to trim the templated string. We should expect that the template will need # to be re-sliced but we should assume that the function calling this one # will deal with that eventuality. return ( slices, templated_str[ first_slice.templated_slice.start : last_slice.templated_slice.stop ], ) if ( first_slice.source_slice.start == 0 and first_slice.templated_slice.start != 0 ): # This means that there is text at the start of the templated file which # doesn't exist in the raw file. Handle this by adding a templated slice # (though it's not really templated) between 0 and 0 in the raw, and 0 and # the current first slice start index in the templated. slices.insert( 0, TemplatedFileSlice( "templated", slice(0, 0), slice(0, first_slice.templated_slice.start), ), ) if last_slice.templated_slice.stop != len(templated_str): # This means that there is text at the end of the templated file which # doesn't exist in the raw file. Handle this by adding a templated slice # beginning and ending at the end of the raw, and the current last slice # stop and file end in the templated. slices.append( TemplatedFileSlice( "templated", zero_slice(last_slice.source_slice.stop), slice(last_slice.templated_slice.stop, len(templated_str)), ) ) return slices, templated_str @classmethod def _substring_occurrences( cls, in_str: str, substrings: Iterable[str] ) -> dict[str, list[int]]: """Find every occurrence of the given substrings.""" occurrences = {} for substring in substrings: occurrences[substring] = list(findall(substring, in_str)) return occurrences @staticmethod def _sorted_occurrence_tuples( occurrences: dict[str, list[int]], ) -> list[tuple[str, int]]: """Sort a dict of occurrences into a sorted list of tuples.""" return sorted( ((raw, idx) for raw in occurrences.keys() for idx in occurrences[raw]), # Sort first by position, then by lexical (for stability) key=lambda x: (x[1], x[0]), ) @classmethod def _slice_template(cls, in_str: str) -> Iterator[RawFileSlice]: """Slice a templated python string into token tuples. This uses Formatter() as per: https://docs.python.org/3/library/string.html#string.Formatter """ fmt = Formatter() in_idx = 0 for literal_text, field_name, format_spec, conversion in fmt.parse(in_str): if literal_text: escape_chars = cls._sorted_occurrence_tuples( cls._substring_occurrences(literal_text, ["}", "{"]) ) idx = 0 while escape_chars: first_char = escape_chars.pop() # Is there a literal first? if first_char[1] > idx: yield RawFileSlice( literal_text[idx : first_char[1]], "literal", in_idx ) in_idx += first_char[1] - idx # Add the escaped idx = first_char[1] + len(first_char[0]) # We double them here to make the raw yield RawFileSlice( literal_text[first_char[1] : idx] * 2, "escaped", in_idx ) # Will always be 2 in this case. # This is because ALL escape sequences in the python formatter # are two characters which reduce to one. in_idx += 2 # Deal with last one (if present) if literal_text[idx:]: yield RawFileSlice(literal_text[idx:], "literal", in_idx) in_idx += len(literal_text) - idx # Deal with fields if field_name: constructed_token = "{{{field_name}{conv}{spec}}}".format( field_name=field_name, conv=f"!{conversion}" if conversion else "", spec=f":{format_spec}" if format_spec else "", ) yield RawFileSlice(constructed_token, "templated", in_idx) in_idx += len(constructed_token) @classmethod def _split_invariants( cls, raw_sliced: list[RawFileSlice], literals: list[str], raw_occurrences: dict[str, list[int]], templated_occurrences: dict[str, list[int]], templated_str: str, ) -> Iterator[IntermediateFileSlice]: """Split a sliced file on its invariant literals. We prioritise the _longest_ invariants first as they are more likely to the the anchors. """ # Calculate invariants invariants = [ literal for literal in literals if len(raw_occurrences[literal]) == 1 and len(templated_occurrences[literal]) == 1 ] # Work through the invariants and make sure they appear # in order. for linv in sorted(invariants, key=len, reverse=True): # Any invariants which have templated positions, relative # to source positions, which aren't in order, should be # ignored. # Is this one still relevant? if linv not in invariants: continue # pragma: no cover source_pos, templ_pos = raw_occurrences[linv], templated_occurrences[linv] # Copy the list before iterating because we're going to edit it. for tinv in invariants.copy(): if tinv != linv: src_dir = source_pos > raw_occurrences[tinv] tmp_dir = templ_pos > templated_occurrences[tinv] # If it's not in the same direction in the source and template # remove it. if src_dir != tmp_dir: # pragma: no cover templater_logger.debug( " Invariant found out of order: %r", tinv ) invariants.remove(tinv) # Set up some buffers buffer: list[RawFileSlice] = [] idx: Optional[int] = None templ_idx = 0 # Loop through for raw_file_slice in raw_sliced: if raw_file_slice.raw in invariants: if buffer: yield IntermediateFileSlice( "compound", slice(idx, raw_file_slice.source_idx), slice(templ_idx, templated_occurrences[raw_file_slice.raw][0]), buffer, ) buffer = [] idx = None yield IntermediateFileSlice( "invariant", offset_slice( raw_file_slice.source_idx, len(raw_file_slice.raw), ), offset_slice( templated_occurrences[raw_file_slice.raw][0], len(raw_file_slice.raw), ), [ RawFileSlice( raw_file_slice.raw, raw_file_slice.slice_type, templated_occurrences[raw_file_slice.raw][0], ) ], ) templ_idx = templated_occurrences[raw_file_slice.raw][0] + len( raw_file_slice.raw ) else: buffer.append( RawFileSlice( raw_file_slice.raw, raw_file_slice.slice_type, raw_file_slice.source_idx, ) ) if idx is None: idx = raw_file_slice.source_idx # If we have a final buffer, yield it if buffer: yield IntermediateFileSlice( "compound", slice((idx or 0), (idx or 0) + sum(len(slc.raw) for slc in buffer)), slice(templ_idx, len(templated_str)), buffer, ) @staticmethod def _filter_occurrences( file_slice: slice, occurrences: dict[str, list[int]] ) -> dict[str, list[int]]: """Filter a dict of occurrences to just those within a slice.""" filtered = { key: [ pos for pos in occurrences[key] if pos >= file_slice.start and pos < file_slice.stop ] for key in occurrences.keys() } return {key: filtered[key] for key in filtered.keys() if filtered[key]} @staticmethod def _coalesce_types(elems: list[RawFileSlice]) -> str: """Coalesce to the priority type.""" # Make a set of types types = {elem.slice_type for elem in elems} # Replace block types with templated for typ in list(types): if typ.startswith("block_"): # pragma: no cover types.remove(typ) types.add("templated") # Take the easy route if they're all the same type if len(types) == 1: return types.pop() # Then deal with priority priority = ["templated", "escaped", "literal"] for p in priority: if p in types: return p raise RuntimeError( f"Exhausted priorities in _coalesce_types! {types!r}" ) # pragma: no cover @classmethod def _split_uniques_coalesce_rest( cls, split_file: list[IntermediateFileSlice], raw_occurrences: dict[str, list[int]], templ_occurrences: dict[str, list[int]], templated_str: str, ) -> Iterator[TemplatedFileSlice]: """Within each of the compound sections split on unique literals. For everything else we coalesce to the dominant type. Returns: Iterable of the type of segment, the slice in the raw file and the slice in the templated file. """ # A buffer to capture tail segments tail_buffer: list[TemplatedFileSlice] = [] templater_logger.debug(" _split_uniques_coalesce_rest: %s", split_file) for int_file_slice in split_file: # Yield anything from the tail buffer if tail_buffer: # pragma: no cover templater_logger.debug( " Yielding Tail Buffer [start]: %s", tail_buffer ) yield from tail_buffer tail_buffer = [] # Check whether we're handling a zero length slice. if ( int_file_slice.templated_slice.stop - int_file_slice.templated_slice.start == 0 ): # pragma: no cover point_combo = int_file_slice.coalesce() templater_logger.debug( " Yielding Point Combination: %s", point_combo ) yield point_combo continue # Yield anything simple try: simple_elem = int_file_slice.try_simple() templater_logger.debug(" Yielding Simple: %s", simple_elem) yield simple_elem continue except ValueError: pass # Trim ends and overwrite the current working copy. head_buffer, int_file_slice, tail_buffer = int_file_slice.trim_ends( templated_str=templated_str ) if head_buffer: yield from head_buffer # pragma: no cover # Have we consumed the whole thing? if not int_file_slice.slice_buffer: continue # pragma: no cover # Try to yield simply again (post trim) try: # pragma: no cover simple_elem = int_file_slice.try_simple() templater_logger.debug(" Yielding Simple: %s", simple_elem) yield simple_elem continue except ValueError: pass templater_logger.debug(" Intermediate Slice: %s", int_file_slice) # Generate the coalesced version in case we need it coalesced = int_file_slice.coalesce() # Look for anchors raw_occs = cls._filter_occurrences( int_file_slice.source_slice, raw_occurrences ) templ_occs = cls._filter_occurrences( int_file_slice.templated_slice, templ_occurrences ) # Do we have any uniques to split on? # NB: We use `get` on the templated occurrences, because it's possible # that because of an if statement, something is in the source, but # not in the templated at all. In that case, we shouldn't use it. one_way_uniques = [ key for key in raw_occs.keys() if len(raw_occs[key]) == 1 and len(templ_occs.get(key, [])) >= 1 ] two_way_uniques = [ key for key in one_way_uniques if len(templ_occs[key]) == 1 ] # if we don't have anything to anchor on, then just return (coalescing # types) if not raw_occs or not templ_occs or not one_way_uniques: templater_logger.debug( " No Anchors or Uniques. Yielding Whole: %s", coalesced ) yield coalesced continue # Deal with the inner segment itself. templater_logger.debug( " Intermediate Slice [post trim]: %s: %r", int_file_slice, templated_str[int_file_slice.templated_slice], ) templater_logger.debug(" One Way Uniques: %s", one_way_uniques) templater_logger.debug(" Two Way Uniques: %s", two_way_uniques) # Hang onto the starting position, which we'll advance as we go. starts = ( int_file_slice.source_slice.start, int_file_slice.templated_slice.start, ) # Deal with two way uniques first, because they are easier. # If we do find any we use recursion, because we'll want to do # all of the above checks again. if two_way_uniques: # Yield the uniques and coalesce anything between. bookmark_idx = 0 for idx, raw_slice in enumerate(int_file_slice.slice_buffer): pos = 0 unq: Optional[str] = None # Does this element contain one of our uniques? If so, where? for unique in two_way_uniques: if unique in raw_slice.raw: pos = raw_slice.raw.index(unique) unq = unique if unq: # Yes it does. Handle it. # Get the position of the unique section. unique_position = ( raw_occs[unq][0], templ_occs[unq][0], ) templater_logger.debug( " Handling Unique: %r, %s, %s, %r", unq, pos, unique_position, raw_slice, ) # Handle full slices up to this one if idx > bookmark_idx: # Recurse to deal with any loops separately yield from cls._split_uniques_coalesce_rest( [ IntermediateFileSlice( "compound", # slice up to this unique slice(starts[0], unique_position[0] - pos), slice(starts[1], unique_position[1] - pos), int_file_slice.slice_buffer[bookmark_idx:idx], ) ], raw_occs, templ_occs, templated_str, ) # Handle any potential partial slice if we're part way through # this one. if pos > 0: yield TemplatedFileSlice( raw_slice.slice_type, slice(unique_position[0] - pos, unique_position[0]), slice(unique_position[1] - pos, unique_position[1]), ) # Handle the unique itself and update the bookmark starts = ( unique_position[0] + len(unq), unique_position[1] + len(unq), ) yield TemplatedFileSlice( raw_slice.slice_type, slice(unique_position[0], starts[0]), slice(unique_position[1], starts[1]), ) # Move the bookmark after this position bookmark_idx = idx + 1 # Handle any remnant after the unique. if raw_slice.raw[pos + len(unq) :]: remnant_length = len(raw_slice.raw) - (len(unq) + pos) _starts = starts starts = ( starts[0] + remnant_length, starts[1] + remnant_length, ) yield TemplatedFileSlice( raw_slice.slice_type, slice(_starts[0], starts[0]), slice(_starts[1], starts[1]), ) if bookmark_idx == 0: # pragma: no cover # This is a SAFETY VALVE. In Theory we should never be here # and if we are it implies an error elsewhere. This clause # should stop any potential infinite recursion in its tracks # by simply classifying the whole of the current block as # templated and just stopping here. # Bugs triggering this eventuality have been observed in 0.4.0. templater_logger.info( " Safety Value Info: %s, %r", two_way_uniques, templated_str[int_file_slice.templated_slice], ) templater_logger.warning( " Python templater safety value unexpectedly triggered. " "Please report your raw and compiled query on github for " "debugging." ) # NOTE: If a bug is reported here, this will incorrectly # classify more of the query as "templated" than it should. yield coalesced continue # At the end of the loop deal with any remaining slices. # The above "Safety Valve"TM should keep us safe from infinite # recursion. if len(int_file_slice.slice_buffer) > bookmark_idx: # Recurse to deal with any loops separately yield from cls._split_uniques_coalesce_rest( [ IntermediateFileSlice( "compound", # Slicing is easy here, we have no choice slice(starts[0], int_file_slice.source_slice.stop), slice(starts[1], int_file_slice.templated_slice.stop), # Calculate the subsection to deal with. int_file_slice.slice_buffer[ bookmark_idx : len(int_file_slice.slice_buffer) ], ) ], raw_occs, templ_occs, templated_str, ) # We continue here because the buffer should be exhausted, # and if there's more to do we'll do it in the recursion. continue # If we get here, then there ARE uniques, but they are only ONE WAY. # This means loops. Loops are tricky. # We're very unlikely to get here (impossible?) with just python # formatting, but this class is also the base for the jinja templater # (and others?) so it may be used there. # One way uniques give us landmarks to try and estimate what to do with # them. owu_templ_tuples = cls._sorted_occurrence_tuples( # pragma: no cover {key: templ_occs[key] for key in one_way_uniques} ) templater_logger.debug( # pragma: no cover " Handling One Way Uniques: %s", owu_templ_tuples ) # Hang onto out *ending* position too from here. stops = ( # pragma: no cover int_file_slice.source_slice.stop, int_file_slice.templated_slice.stop, ) # OWU in this context refers to "One Way Unique" this_owu_idx: Optional[int] = None # pragma: no cover last_owu_idx: Optional[int] = None # pragma: no cover # Iterate through occurrence tuples of the one-way uniques. for raw, template_idx in owu_templ_tuples: # pragma: no cover raw_idx = raw_occs[raw][0] raw_len = len(raw) # Find the index of this owu in the slice_buffer, store the previous last_owu_idx = this_owu_idx try: this_owu_idx = next( idx for idx, slc in enumerate(int_file_slice.slice_buffer) if slc.raw == raw ) except StopIteration: # pragma: no cover # This can happen if the unique was detected, but was introduced # by a templater step. This is a false positive. Skip and move on. templater_logger.info( "One Way Unique %r not found in slice buffer. Skipping...", raw ) continue templater_logger.debug( " Handling OWU: %r @%s (raw @%s) [this_owu_idx: %s, " "last_owu_dx: %s]", raw, template_idx, raw_idx, this_owu_idx, last_owu_idx, ) if template_idx > starts[1]: # Yield the bit before this literal. We yield it # all as a tuple, because if we could do any better # we would have done it by now. # Can we identify a meaningful portion of the patch # to recurse a split? sub_section: Optional[list[RawFileSlice]] = None # If it's the start, the slicing is easy if ( starts[1] == int_file_slice.templated_slice.stop ): # pragma: no cover TODO? sub_section = int_file_slice.slice_buffer[:this_owu_idx] # If we are AFTER the previous in the template, then it's # also easy. [assuming it's not the same owu] elif ( raw_idx > starts[0] and last_owu_idx != this_owu_idx ): # pragma: no cover if last_owu_idx: sub_section = int_file_slice.slice_buffer[ last_owu_idx + 1 : this_owu_idx ] else: sub_section = int_file_slice.slice_buffer[:this_owu_idx] # If we succeeded in one of the above, we can also recurse # and be more intelligent with the other sections. if sub_section: templater_logger.debug( " Attempting Subsplit [pre]: %s, %r", sub_section, templated_str[slice(starts[1], template_idx)], ) yield from cls._split_uniques_coalesce_rest( [ IntermediateFileSlice( "compound", # Slicing is easy here, we have no choice slice(starts[0], raw_idx), slice(starts[1], template_idx), sub_section, ) ], raw_occs, templ_occs, templated_str, ) # Otherwise, it's the tricky case. else: # In this case we've found a literal, coming AFTER another # in the templated version, but BEFORE (or the same) in the # raw version. This only happens during loops, but it means # that identifying exactly what the intervening bit refers # to is a bit arbitrary. In this case we're going to OVER # estimate and refer to the whole loop segment. # TODO: Maybe this should make two chunks instead, one # working backward, and one working forward. But that's # a job for another day. # First find where we are starting this remainder # in the template (as an index in the buffer). # Any segments *after* cur_idx are involved. if last_owu_idx is None or last_owu_idx + 1 >= len( int_file_slice.slice_buffer ): cur_idx = 0 else: cur_idx = last_owu_idx + 1 # We need to know how many block_ends are after this. block_ends = sum( slc.slice_type == "block_end" for slc in int_file_slice.slice_buffer[cur_idx:] ) # We can allow up to this number of preceding block starts block_start_indices = [ idx for idx, slc in enumerate( int_file_slice.slice_buffer[:cur_idx] ) if slc.slice_type == "block_start" ] # Trim anything which we're not allowed to use. if len(block_start_indices) > block_ends: # pragma: no cover offset = block_start_indices[-1 - block_ends] + 1 elem_sub_buffer = int_file_slice.slice_buffer[offset:] cur_idx -= offset else: elem_sub_buffer = int_file_slice.slice_buffer # We also need to know whether any of the *starting* # segments are involved. # Anything up to start_idx (exclusive) is included. include_start = raw_idx > elem_sub_buffer[0].source_idx # The ending point of this slice, is already decided. end_point = elem_sub_buffer[-1].end_source_idx() # If start_idx is None, we're in luck. We don't need to include # the beginning. if include_start: start_point = elem_sub_buffer[0].source_idx # Otherwise we know it's looped round, we need to include the # whole slice. else: # pragma: no cover start_point = elem_sub_buffer[cur_idx].source_idx tricky = TemplatedFileSlice( "templated", slice(start_point, end_point), slice(starts[1], template_idx), ) templater_logger.debug( " Yielding Tricky Case : %s", tricky, ) yield tricky # Yield the literal owu_literal_slice = TemplatedFileSlice( "literal", offset_slice(raw_idx, raw_len), offset_slice(template_idx, raw_len), ) templater_logger.debug( " Yielding Unique: %r, %s", raw, owu_literal_slice, ) yield owu_literal_slice # Update our bookmark starts = ( raw_idx + raw_len, template_idx + raw_len, ) if starts[1] < stops[1] and last_owu_idx is not None: # pragma: no cover # Yield the end bit templater_logger.debug(" Attempting Subsplit [post].") yield from cls._split_uniques_coalesce_rest( [ IntermediateFileSlice( "compound", # Slicing is easy here, we have no choice slice(raw_idx + raw_len, stops[0]), slice(starts[1], stops[1]), int_file_slice.slice_buffer[last_owu_idx + 1 :], ) ], raw_occs, templ_occs, templated_str, ) # Yield anything from the tail buffer if tail_buffer: # pragma: no cover templater_logger.debug( " Yielding Tail Buffer [end]: %s", tail_buffer ) yield from tail_buffer sqlfluff-3.4.2/src/sqlfluff/core/templaters/slicers/000077500000000000000000000000001503426445100225335ustar00rootroot00000000000000sqlfluff-3.4.2/src/sqlfluff/core/templaters/slicers/__init__.py000066400000000000000000000001111503426445100246350ustar00rootroot00000000000000"""Modules for slicing and mapping between the raw and templated SQL.""" sqlfluff-3.4.2/src/sqlfluff/core/templaters/slicers/tracer.py000066400000000000000000001110441503426445100243660ustar00rootroot00000000000000"""'Trace' Jinja template execution to map output back to the raw template. This is a newer slicing algorithm that handles cases heuristic.py does not. """ # Import annotations for py 3.7 to allow `regex.Match[str]` from __future__ import annotations import logging from dataclasses import dataclass, field from typing import Callable, ClassVar, NamedTuple, Optional, Union, cast import regex from jinja2 import Environment from jinja2.exceptions import TemplateSyntaxError from sqlfluff.core.templaters.base import RawFileSlice, TemplatedFileSlice # Instantiate the templater logger templater_logger = logging.getLogger("sqlfluff.templater") class JinjaTrace(NamedTuple): """Returned by JinjaTracer.trace().""" # Template output templated_str: str # Raw (i.e. before rendering) Jinja template sliced into tokens raw_sliced: list[RawFileSlice] # Rendered Jinja template (i.e. output) mapped back to rwa_str source sliced_file: list[TemplatedFileSlice] @dataclass class RawSliceInfo: """JinjaTracer-specific info about each RawFileSlice.""" unique_alternate_id: Optional[str] alternate_code: Optional[str] next_slice_indices: list[int] = field(default_factory=list) inside_block: bool = field(default=False) # {% block %} class JinjaTracer: """Records execution path of a Jinja template.""" def __init__( self, raw_str: str, raw_sliced: list[RawFileSlice], raw_slice_info: dict[RawFileSlice, RawSliceInfo], sliced_file: list[TemplatedFileSlice], render_func: Callable[[str], str], ): # Input self.raw_str = raw_str self.raw_sliced = raw_sliced self.raw_slice_info = raw_slice_info self.sliced_file = sliced_file self.render_func = render_func # Internal bookkeeping self.program_counter: int = 0 self.source_idx: int = 0 def trace( self, append_to_templated: str = "", ) -> JinjaTrace: """Executes raw_str. Returns template output and trace.""" trace_template_str = "".join( ( cast(str, self.raw_slice_info[rs].alternate_code) if self.raw_slice_info[rs].alternate_code is not None else rs.raw ) for rs in self.raw_sliced ) trace_template_output = self.render_func(trace_template_str) # Split output by section. Each section has two possible formats. trace_entries: list[regex.Match[str]] = list( regex.finditer(r"\0", trace_template_output) ) # If the file has no templated entries, we should just iterate # through the raw slices to add all the placeholders. if not trace_entries: for raw_idx, _ in enumerate(self.raw_sliced): self.record_trace(0, raw_idx) for match_idx, match in enumerate(trace_entries): pos1 = match.span()[0] try: pos2 = trace_entries[match_idx + 1].span()[0] except IndexError: pos2 = len(trace_template_output) p = trace_template_output[pos1 + 1 : pos2] m_id = regex.match(r"^([0-9a-f]+)(_(\d+))?", p) if not m_id: raise ValueError( # pragma: no cover "Internal error. Trace template output does not match expected " "format." ) if m_id.group(3): # E.g. "00000000000000000000000000000001_83". The number after # "_" is the length (in characters) of a corresponding literal # in raw_str. alt_id, slice_length = m_id.group(1), int(m_id.group(3)) else: # E.g. "00000000000000000000000000000002 a < 10". The characters # after the slice ID are executable code from raw_str. alt_id, slice_length = m_id.group(0), len(p[len(m_id.group(0)) + 1 :]) target_slice_idx = self.find_slice_index(alt_id) target_inside_block = self.raw_slice_info[ self.raw_sliced[target_slice_idx] ].inside_block if not target_inside_block: # Normal case: Walk through the template. self.move_to_slice(target_slice_idx, slice_length) else: # {% block %} executes code elsewhere in the template but does # not move there. It's a bit like macro invocation. self.record_trace(slice_length, target_slice_idx) # TRICKY: The 'append_to_templated' parameter is only used by the dbt # templater, passing "\n" for this parameter if we need to add one back. # (The Jinja templater does not pass this parameter, so # 'append_to_templated' gets the default value of "", empty string.) # For more detail, see the comments near the call to slice_file() in # plugins/sqlfluff-templater-dbt/sqlfluff_templater_dbt/templater.py. templated_str = self.render_func(self.raw_str) + append_to_templated return JinjaTrace(templated_str, self.raw_sliced, self.sliced_file) def find_slice_index(self, slice_identifier: Union[int, str]) -> int: """Given a slice identifier, return its index. A slice identifier is a string like 00000000000000000000000000000002. """ raw_slices_search_result = [ idx for idx, rs in enumerate(self.raw_sliced) if self.raw_slice_info[rs].unique_alternate_id == slice_identifier ] if len(raw_slices_search_result) != 1: raise ValueError( # pragma: no cover f"Internal error. Unable to locate slice for {slice_identifier}." ) return raw_slices_search_result[0] def move_to_slice( self, target_slice_idx: int, target_slice_length: int, ) -> dict[int, list[int]]: """Given a template location, walk execution to that point. This updates the internal `program_counter` to the appropriate location. Returns: :obj:`dict`: For each step in the template, a :obj:`list` of which steps are accessible. In many cases each step will only have one accessible next step (the following one), however for branches in the program there may be more than one. """ step_candidates = {} while self.program_counter < len(self.raw_sliced): self.record_trace( target_slice_length if self.program_counter == target_slice_idx else 0 ) current_raw_slice = self.raw_sliced[self.program_counter] if self.program_counter == target_slice_idx: # Reached the target slice. Go to next location and stop. self.program_counter += 1 break # Choose the next step. # We could simply go to the next slice (sequential execution). candidates = [self.program_counter + 1] # If we have other options, consider those. candidates.extend( filter( # They're a valid possibility if # they don't take us past the target. lambda idx: idx <= target_slice_idx, self.raw_slice_info[current_raw_slice].next_slice_indices, ) ) # Choose the candidate that takes us closest to the target. candidates.sort(key=lambda c: abs(target_slice_idx - c)) # Save all the candidates for each step so we can return them later. step_candidates[self.program_counter] = candidates # Step forward to the best step found. # https://github.com/sqlfluff/sqlfluff/issues/6121 next_indices = self.raw_slice_info[current_raw_slice].next_slice_indices if ( current_raw_slice.tag == "endfor" # noqa # Elements of inside_set_macro_or_call have empty next_slice_indices and next_indices # The next_slice_indices[0] of the 'endfor' is the first element of the # loop.If the target is within the current loop, the program_counter # should move to the first element of this loop. Otherwise, it will # exit this loop and lose the content rendered in the next iteration and next_indices[0] <= target_slice_idx < self.program_counter ): self.program_counter = next_indices[0] else: self.program_counter = candidates[0] # Return the candidates at each step. return step_candidates def record_trace( self, target_slice_length: int, slice_idx: Optional[int] = None, slice_type: Optional[str] = None, ) -> None: """Add the specified (default: current) location to the trace. Args: target_slice_length (int): The length of the target slice. slice_idx (Optional[int], optional): The index of the slice. Defaults to None. slice_type (Optional[str], optional): The type of the slice. Defaults to None. """ if slice_idx is None: slice_idx = self.program_counter if slice_type is None: slice_type = self.raw_sliced[slice_idx].slice_type self.sliced_file.append( TemplatedFileSlice( slice_type, slice( self.raw_sliced[slice_idx].source_idx, ( self.raw_sliced[slice_idx + 1].source_idx if slice_idx + 1 < len(self.raw_sliced) else len(self.raw_str) ), ), slice(self.source_idx, self.source_idx + target_slice_length), ) ) if target_slice_length: self.source_idx += target_slice_length @dataclass(frozen=True) class JinjaTagConfiguration: """Provides information about a Jinja tag and how it affects JinjaAnalyzer behavior. Attributes: block_type (str): The block type that the Jinja tag maps to; eventually stored in TemplatedFileSlice.slice_type and RawFileSlice.slice_type. block_tracking (bool): Whether the Jinja tag should be traced by JinjaTracer. If True, the Jinja tag will be treated as a conditional block similar to a "for/endfor" or "if/else/endif" block, and JinjaTracer will track potential execution path through the block. block_may_loop (bool): Whether the Jinja tag begins a block that might loop, similar to a "for" tag. If True, JinjaTracer will track the execution path through the block and record a potential backward jump to the loop beginning. """ block_type: str block_tracking: bool = False block_may_loop: bool = False class JinjaAnalyzer: """Analyzes a Jinja template to prepare for tracing.""" re_open_tag = regex.compile(r"^\s*({[{%])[\+\-]?\s*") re_close_tag = regex.compile(r"\s*[\+\-]?([}%]})\s*$") def __init__(self, raw_str: str, env: Environment) -> None: # Input self.raw_str: str = raw_str self.env = env # Output self.raw_sliced: list[RawFileSlice] = [] self.raw_slice_info: dict[RawFileSlice, RawSliceInfo] = {} self.sliced_file: list[TemplatedFileSlice] = [] # Internal bookkeeping self.slice_id: int = 0 # {% set %} or {% macro %} or {% call %} self.inside_set_macro_or_call: bool = False self.inside_block = False # {% block %} self.stack: list[int] = [] self.idx_raw: int = 0 __known_tag_configurations: ClassVar[dict[str, JinjaTagConfiguration]] = { # Conditional blocks: "if/elif/else/endif" blocks "if": JinjaTagConfiguration( block_type="block_start", block_tracking=True, ), "elif": JinjaTagConfiguration( block_type="block_mid", block_tracking=True, ), # NOTE: "else" is also used in for loops if there are no iterations "else": JinjaTagConfiguration( block_type="block_mid", block_tracking=True, ), "endif": JinjaTagConfiguration( block_type="block_end", block_tracking=True, ), # Conditional blocks: "for" loops "for": JinjaTagConfiguration( block_type="block_start", block_tracking=True, block_may_loop=True, ), "endfor": JinjaTagConfiguration( block_type="block_end", block_tracking=True, ), # Inclusions and imports # :TRICKY: Syntactically, the Jinja {% include %} directive looks like # a block, but its behavior is basically syntactic sugar for # {{ open("somefile).read() }}. Thus, treat it as templated code. # It's a similar situation with {% import %} and {% from ... import %}. "include": JinjaTagConfiguration( block_type="templated", ), "import": JinjaTagConfiguration( block_type="templated", ), "from": JinjaTagConfiguration( block_type="templated", ), "extends": JinjaTagConfiguration( block_type="block_start", ), # Macros and macro-like tags "macro": JinjaTagConfiguration( block_type="block_start", ), "endmacro": JinjaTagConfiguration( block_type="block_end", ), "call": JinjaTagConfiguration( block_type="block_start", ), "endcall": JinjaTagConfiguration( block_type="block_end", ), "set": JinjaTagConfiguration( block_type="block_start", ), "endset": JinjaTagConfiguration( block_type="block_end", ), "block": JinjaTagConfiguration( block_type="block_start", ), "endblock": JinjaTagConfiguration( block_type="block_end", ), "filter": JinjaTagConfiguration( block_type="block_start", ), "endfilter": JinjaTagConfiguration( block_type="block_end", ), # Common extensions # Expression statement (like {{ ... }} but doesn't actually print anything) "do": JinjaTagConfiguration( block_type="templated", ), } @classmethod def _get_tag_configuration(cls, tag: str) -> JinjaTagConfiguration: """Return information about the behaviors of a tag.""" # Ideally, we should have a known configuration for this Jinja tag. Derived # classes can override this method to provide additional information about the # tags they know about. if tag in cls.__known_tag_configurations: return cls.__known_tag_configurations[tag] # If we don't have a firm configuration for this tag that is most likely # provided by a Jinja extension, we'll try to make some guesses about it based # on some heuristics. But there's a decent chance we'll get this wrong, and # the user should instead consider overriding this method in a derived class to # handle their tag types. if tag.startswith("end"): return JinjaTagConfiguration( block_type="block_end", ) elif tag.startswith("el"): # else, elif return JinjaTagConfiguration( block_type="block_mid", ) return JinjaTagConfiguration( block_type="block_start", ) def _get_jinja_tracer( self, raw_str: str, raw_sliced: list[RawFileSlice], raw_slice_info: dict[RawFileSlice, RawSliceInfo], sliced_file: list[TemplatedFileSlice], render_func: Callable[[str], str], ) -> JinjaTracer: """Creates a new object derived from JinjaTracer. Derived classes can provide their own tracers with custom functionality. """ return JinjaTracer( raw_str, raw_sliced, raw_slice_info, sliced_file, render_func ) def next_slice_id(self) -> str: """Returns a new, unique slice ID.""" result = "{0:#0{1}x}".format(self.slice_id, 34)[2:] self.slice_id += 1 return result def slice_info_for_literal(self, length: int, prefix: str = "") -> RawSliceInfo: """Returns a RawSliceInfo for a literal. In the alternate template, literals are replaced with a uniquely numbered, easy-to-parse literal. JinjaTracer uses this output as a "breadcrumb trail" to deduce the execution path through the template. This is important even if the original literal (i.e. in the raw SQL file) was empty, as is the case when Jinja whitespace control is used (e.g. "{%- endif -%}"), because fewer breadcrumbs means JinjaTracer has to *guess* the path, in which case it assumes simple, straight-line execution, which can easily be wrong with loops and conditionals. """ unique_alternate_id = self.next_slice_id() alternate_code = f"\0{prefix}{unique_alternate_id}_{length}" return self.make_raw_slice_info( unique_alternate_id, alternate_code, inside_block=self.inside_block ) def update_inside_set_call_macro_or_block( self, block_type: str, trimmed_parts: list[str], m_open: Optional[regex.Match[str]], m_close: Optional[regex.Match[str]], tag_contents: list[str], ) -> tuple[Optional[RawSliceInfo], str]: """Based on block tag, update whether in a set/call/macro/block section.""" if block_type == "block_start" and trimmed_parts[0] in ( "block", "call", "macro", "set", ): # Jinja supports two forms of {% set %}: # - {% set variable = value %} # - {% set variable %}value{% endset %} # https://jinja.palletsprojects.com/en/2.10.x/templates/#block-assignments # When the second format is used, set one of the fields # 'inside_set_or_macro' or 'inside_block' to True. This info is # used elsewhere, as other code inside these regions require # special handling. (Generally speaking, JinjaAnalyzer ignores # the contents of these blocks, treating them like opaque templated # regions.) try: # Entering a set/macro block. Build a source string consisting # of just this one Jinja command and see if it parses. If so, # it's a standalone command. OTOH, if it fails with "Unexpected # end of template", it was the opening command for a block. self.env.from_string( f"{self.env.block_start_string} {' '.join(trimmed_parts)} " f"{self.env.block_end_string}" ) # Here we should mutate the block type to just templated # so we don't treat it as a block. # https://github.com/sqlfluff/sqlfluff/issues/3750 block_type = "templated" except TemplateSyntaxError as e: if ( isinstance(e.message, str) and "Unexpected end of template" in e.message ): # It was opening a block, thus we're inside a set, macro, or # block. if trimmed_parts[0] == "block": self.inside_block = True else: result = None if trimmed_parts[0] == "call": assert m_open and m_close result = self.track_call(m_open, m_close, tag_contents) self.inside_set_macro_or_call = True return result, block_type else: raise # pragma: no cover elif block_type == "block_end": if trimmed_parts[0] in ("endcall", "endmacro", "endset"): # Exiting a set or macro or block. self.inside_set_macro_or_call = False elif trimmed_parts[0] == "endblock": # Exiting a {% block %} block. self.inside_block = False return None, block_type def make_raw_slice_info( self, unique_alternate_id: Optional[str], alternate_code: Optional[str], inside_block: bool = False, ) -> RawSliceInfo: """Create RawSliceInfo as given, or "empty" if in set/macro block.""" if not self.inside_set_macro_or_call: return RawSliceInfo(unique_alternate_id, alternate_code, [], inside_block) else: return RawSliceInfo(None, None, [], False) # We decide the "kind" of element we're dealing with using its _closing_ # tag rather than its opening tag. The types here map back to similar types # of sections in the python slicer. block_types = { "variable_end": "templated", "block_end": "block", "comment_end": "comment", # Raw tags should behave like blocks. Note that # raw_end and raw_begin are whole tags rather # than blocks and comments where we get partial # tags. "raw_end": "block", "raw_begin": "block", } def analyze(self, render_func: Callable[[str], str]) -> JinjaTracer: """Slice template in jinja.""" # str_buff and str_parts are two ways we keep track of tokens received # from Jinja. str_buff concatenates them together, while str_parts # accumulates the individual strings. We generally prefer using # str_parts. That's because Jinja doesn't just split on whitespace, so # by keeping tokens as Jinja returns them, the code is more robust. # Consider the following: # {% set col= "col1" %} # Note there's no space after col. Jinja splits this up for us. If we # simply concatenated the parts together and later split on whitespace, # we'd need some ugly, fragile logic to handle various whitespace # possibilities: # {% set col= "col1" %} # {% set col = "col1" %} # {% set col ="col1" %} # By using str_parts and letting Jinja handle this, it just works. str_buff = "" str_parts = [] # https://jinja.palletsprojects.com/en/2.11.x/api/#jinja2.Environment.lex block_idx = 0 for _, elem_type, raw in self.env.lex(self.raw_str): if elem_type == "data": self.track_literal(raw, block_idx) continue str_buff += raw str_parts.append(raw) if elem_type.endswith("_begin"): self.handle_left_whitespace_stripping(raw, block_idx) raw_slice_info: RawSliceInfo = self.make_raw_slice_info(None, None) tag_contents = [] # raw_end and raw_begin behave a little differently in # that the whole tag shows up in one go rather than getting # parts of the tag at a time. m_open = None m_close = None if elem_type.endswith("_end") or elem_type == "raw_begin": block_type = self.block_types[elem_type] block_tag = None # Handle starts and ends of blocks if block_type in ("block", "templated"): m_open = self.re_open_tag.search(str_parts[0]) m_close = self.re_close_tag.search(str_parts[-1]) if m_open and m_close: tag_contents = self.extract_tag_contents( str_parts, m_close, m_open, str_buff ) if block_type == "block" and tag_contents: block_type = self._get_tag_configuration( tag_contents[0] ).block_type block_tag = tag_contents[0] if block_type == "templated" and tag_contents: assert m_open and m_close raw_slice_info = self.track_templated( m_open, m_close, tag_contents ) ( raw_slice_info_temp, block_type, ) = self.update_inside_set_call_macro_or_block( block_type, tag_contents, m_open, m_close, tag_contents ) if raw_slice_info_temp: raw_slice_info = raw_slice_info_temp m_strip_right = regex.search( r"\s+$", raw, regex.MULTILINE | regex.DOTALL ) if block_type == "block_start": block_idx += 1 if elem_type.endswith("_end") and raw.startswith("-") and m_strip_right: # Right whitespace was stripped after closing block. Split # off the trailing whitespace into a separate slice. The # desired behavior is to behave similarly as the left # stripping case. Note that the stakes are a bit lower here, # because lex() hasn't *omitted* any characters from the # strings it returns, it has simply grouped them differently # than we want. trailing_chars = len(m_strip_right.group(0)) self.raw_sliced.append( RawFileSlice( str_buff[:-trailing_chars], block_type, self.idx_raw, block_idx, block_tag, ) ) self.raw_slice_info[self.raw_sliced[-1]] = raw_slice_info slice_idx = len(self.raw_sliced) - 1 self.idx_raw += len(str_buff) - trailing_chars if block_type == "block_end": block_idx += 1 self.raw_sliced.append( RawFileSlice( str_buff[-trailing_chars:], "literal", self.idx_raw, block_idx, ) ) self.raw_slice_info[self.raw_sliced[-1]] = ( self.slice_info_for_literal(0) ) self.idx_raw += trailing_chars else: self.raw_sliced.append( RawFileSlice( str_buff, block_type, self.idx_raw, block_idx, block_tag, ) ) self.raw_slice_info[self.raw_sliced[-1]] = raw_slice_info slice_idx = len(self.raw_sliced) - 1 self.idx_raw += len(str_buff) if block_type == "block_end": block_idx += 1 if block_type.startswith("block"): self.track_block_end(block_type, tag_contents[0]) self.update_next_slice_indices( slice_idx, block_type, tag_contents[0] ) str_buff = "" str_parts = [] return self._get_jinja_tracer( self.raw_str, self.raw_sliced, self.raw_slice_info, self.sliced_file, render_func, ) def track_templated( self, m_open: regex.Match[str], m_close: regex.Match[str], tag_contents: list[str], ) -> RawSliceInfo: """Compute tracking info for Jinja templated region, e.g. {{ foo }}. Args: m_open (regex.Match): A regex match object representing the opening tag. m_close (regex.Match): A regex match object representing the closing tag. tag_contents (list[str]): A list of strings representing the contents of the tag. Returns: RawSliceInfo: A RawSliceInfo object containing the computed tracking info. """ unique_alternate_id = self.next_slice_id() open_ = m_open.group(1) close_ = m_close.group(1) # Here, we still need to evaluate the original tag contents, e.g. in # case it has intentional side effects, but also return a slice ID # for tracking. alternate_code = ( f"\0{unique_alternate_id} {open_} {''.join(tag_contents)} {close_}" ) return self.make_raw_slice_info(unique_alternate_id, alternate_code) def track_call( self, m_open: regex.Match[str], m_close: regex.Match[str], tag_contents: list[str], ) -> RawSliceInfo: """Set up tracking for "{% call ... %}". Args: m_open (regex.Match): A regex match object representing the opening tag. m_close (regex.Match): A regex match object representing the closing tag. tag_contents (list[str]): A list of strings representing the contents of the tag. Returns: RawSliceInfo: A RawSliceInfo object containing the computed tracking info. """ unique_alternate_id = self.next_slice_id() open_ = m_open.group(1) close_ = m_close.group(1) # Here, we still need to evaluate the original tag contents, e.g. in # case it has intentional side effects, but also return a slice ID # for tracking. alternate_code = ( f"\0{unique_alternate_id} {open_} {''.join(tag_contents)} {close_}" ) return self.make_raw_slice_info(unique_alternate_id, alternate_code) def track_literal(self, raw: str, block_idx: int) -> None: """Set up tracking for a Jinja literal.""" self.raw_sliced.append( RawFileSlice( raw, "literal", self.idx_raw, block_idx, ) ) # Replace literal text with a unique ID. self.raw_slice_info[self.raw_sliced[-1]] = self.slice_info_for_literal( len(raw), "" ) self.idx_raw += len(raw) @staticmethod def extract_tag_contents( str_parts: list[str], m_close: regex.Match[str], m_open: regex.Match[str], str_buff: str, ) -> list[str]: """Given Jinja tag info, return the stuff inside the braces. I.e. Trim off the brackets and the whitespace. Args: str_parts (list[str]): A list of string parts. m_close (regex.Match[str]): The regex match for the closing tag. m_open (regex.Match[str]): The regex match for the opening tag. str_buff (str): The string buffer. Returns: list[str]: The trimmed parts inside the Jinja tag. """ if len(str_parts) >= 3: # Handle a tag received as individual parts. trimmed_parts = str_parts[1:-1] if trimmed_parts[0].isspace(): del trimmed_parts[0] if trimmed_parts[-1].isspace(): del trimmed_parts[-1] else: # Handle a tag received in one go. trimmed_content = str_buff[len(m_open.group(0)) : -len(m_close.group(0))] trimmed_parts = trimmed_content.split() return trimmed_parts def track_block_end(self, block_type: str, tag_name: str) -> None: """On ending a 'for' or 'if' block, set up tracking. Args: block_type (str): The type of block ('block_start', 'block_mid', 'block_end'). tag_name (str): The name of the tag ('for', 'if', or other configured tag). """ if ( block_type == "block_end" and self._get_tag_configuration(tag_name).block_tracking ): # Replace RawSliceInfo for this slice with one that has alternate ID # and code for tracking. This ensures, for instance, that if a file # ends with "{% endif %} (with no newline following), that we still # generate a TemplateSliceInfo for it. unique_alternate_id = self.next_slice_id() alternate_code = f"{self.raw_sliced[-1].raw}\0{unique_alternate_id}_0" self.raw_slice_info[self.raw_sliced[-1]] = self.make_raw_slice_info( unique_alternate_id, alternate_code ) def update_next_slice_indices( self, slice_idx: int, block_type: str, tag_name: str ) -> None: """Based on block, update conditional jump info.""" if ( block_type == "block_start" and self._get_tag_configuration(tag_name).block_tracking ): self.stack.append(slice_idx) return None elif not self.stack: return None _idx = self.stack[-1] _raw_slice = self.raw_sliced[_idx] _slice_info = self.raw_slice_info[_raw_slice] if ( block_type == "block_mid" and self._get_tag_configuration(tag_name).block_tracking ): # Record potential forward jump over this block. _slice_info.next_slice_indices.append(slice_idx) self.stack.pop() self.stack.append(slice_idx) elif ( block_type == "block_end" and self._get_tag_configuration(tag_name).block_tracking ): if not self.inside_set_macro_or_call: # Record potential forward jump over this block. _slice_info.next_slice_indices.append(slice_idx) self.stack.pop() if _raw_slice.slice_type == "block_start": assert _raw_slice.tag if self._get_tag_configuration(_raw_slice.tag).block_may_loop: # Record potential backward jump to the loop beginning. self.raw_slice_info[ self.raw_sliced[slice_idx] ].next_slice_indices.append(_idx + 1) def handle_left_whitespace_stripping(self, token: str, block_idx: int) -> None: """If block open uses whitespace stripping, record it. When a "begin" tag (whether block, comment, or data) uses whitespace stripping (https://jinja.palletsprojects.com/en/3.0.x/templates/#whitespace-control) the Jinja lex() function handles this by discarding adjacent whitespace from 'raw_str'. For more insight, see the tokeniter() function in this file: https://github.com/pallets/jinja/blob/main/src/jinja2/lexer.py We want to detect and correct for this in order to: - Correctly update "idx" (if this is wrong, that's a potential DISASTER because lint fixes use this info to update the source file, and incorrect values often result in CORRUPTING the user's file so it's no longer valid SQL. :-O - Guarantee that the slices we return fully "cover" the contents of 'in_str'. We detect skipped characters by looking ahead in in_str for the token just returned from lex(). The token text will either be at the current 'idx_raw' position (if whitespace stripping did not occur) OR it'll be farther along in 'raw_str', but we're GUARANTEED that lex() only skips over WHITESPACE; nothing else. """ # Find the token returned. Did lex() skip over any characters? num_chars_skipped = self.raw_str.index(token, self.idx_raw) - self.idx_raw if not num_chars_skipped: return # Yes. It skipped over some characters. Compute a string # containing the skipped characters. skipped_str = self.raw_str[self.idx_raw : self.idx_raw + num_chars_skipped] # Sanity check: Verify that Jinja only skips over # WHITESPACE, never anything else. if not skipped_str.isspace(): # pragma: no cover templater_logger.warning( "Jinja lex() skipped non-whitespace: %s", skipped_str ) # Treat the skipped whitespace as a literal. self.raw_sliced.append( RawFileSlice(skipped_str, "literal", self.idx_raw, block_idx) ) self.raw_slice_info[self.raw_sliced[-1]] = self.slice_info_for_literal(0) self.idx_raw += num_chars_skipped sqlfluff-3.4.2/src/sqlfluff/core/timing.py000066400000000000000000000055011503426445100205510ustar00rootroot00000000000000"""Timing summary class.""" from collections import defaultdict from typing import Optional, Union class TimingSummary: """An object for tracking the timing of similar steps across many files.""" def __init__(self, steps: Optional[list[str]] = None): self.steps = steps self._timings: list[dict[str, float]] = [] def add(self, timing_dict: dict[str, float]) -> None: """Add a timing dictionary to the summary.""" self._timings.append(timing_dict) if not self.steps: self.steps = list(timing_dict.keys()) def summary(self) -> dict[str, dict[str, float]]: """Generate a summary for display.""" vals: dict[str, list[float]] = defaultdict(list) if not self.steps: # pragma: no cover return {} for timing_dict in self._timings: for step in self.steps: if step in timing_dict: vals[step].append(timing_dict[step]) summary = {} for step in self.steps: if vals[step]: summary[step] = { "cnt": len(vals[step]), "sum": sum(vals[step]), "min": min(vals[step]), "max": max(vals[step]), "avg": sum(vals[step]) / len(vals[step]), } return summary class RuleTimingSummary: """An object for tracking the timing of rules across many files.""" def __init__(self) -> None: self._timings: list[tuple[str, str, float]] = [] def add(self, rule_timings: list[tuple[str, str, float]]) -> None: """Add a set of rule timings.""" # Add records to the main list. self._timings.extend(rule_timings) def summary( self, threshold: float = 0.5 ) -> dict[str, dict[str, Union[float, str]]]: """Generate a summary for display.""" keys: set[tuple[str, str]] = set() vals: dict[tuple[str, str], list[float]] = defaultdict(list) for code, name, time in self._timings: vals[(code, name)].append(time) keys.add((code, name)) summary: dict[str, dict[str, Union[float, str]]] = {} for code, name in sorted(keys): timings = vals[(code, name)] # For brevity, if the total time taken is less than # `threshold`, then don't display. if sum(timings) < threshold: continue # NOTE: This summary isn't covered in tests, it's tricky # to force it to exist in a test environment without # making things complicated. summary[f"{code}: {name}"] = { # pragma: no cover "sum (n)": f"{sum(timings):.2f} ({len(timings)})", "min": min(timings), "max": max(timings), } return summary sqlfluff-3.4.2/src/sqlfluff/core/types.py000066400000000000000000000022061503426445100204250ustar00rootroot00000000000000"""Enums used by sqlfluff.""" from enum import Enum from typing import Union from colorama import Fore from sqlfluff.core.helpers.dict import NestedDictRecord, NestedStringDict ConfigValueType = Union[int, float, bool, None, str] # NOTE: We allow lists in the config types, but only lists # of strings. Lists of other things are not allowed and should # be rejected on load (or converted to strings). Given most # config loading starts as strings, it's more likely that we # just don't _try_ to convert lists from anything other than # strings. ConfigValueOrListType = Union[ConfigValueType, list[str]] ConfigMappingType = NestedStringDict[ConfigValueOrListType] ConfigRecordType = NestedDictRecord[ConfigValueOrListType] class FormatType(Enum): """Enums for formatting types.""" human = "human" json = "json" yaml = "yaml" github_annotation = "github-annotation" github_annotation_native = "github-annotation-native" none = "none" # An option to return _no output_. class Color(Enum): """Colors used by `colorize` function.""" red = Fore.RED green = Fore.GREEN blue = Fore.BLUE light = Fore.YELLOW sqlfluff-3.4.2/src/sqlfluff/dialects/000077500000000000000000000000001503426445100175475ustar00rootroot00000000000000sqlfluff-3.4.2/src/sqlfluff/dialects/__init__.py000066400000000000000000000003211503426445100216540ustar00rootroot00000000000000"""Dialects, segregated to make imports manageable. NOTE: dialects should not be imported directly from this module, but should be accessed instead using the selector methods in `sqlfluff.core.dialects`. """ sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_ansi.py000066400000000000000000004301121503426445100225410ustar00rootroot00000000000000"""The core ANSI dialect. This is the core SQL grammar. We'll probably extend this or make it pluggable for other dialects. Here we encode the structure of the language. There shouldn't be any underlying "machinery" here, that should all be defined elsewhere. A lot of the inspiration for this sql grammar is taken from the cockroach labs full sql grammar. In particular their way for dividing up the expression grammar. Check out their docs, they're awesome. https://www.cockroachlabs.com/docs/stable/sql-grammar.html#select_stmt """ from collections.abc import Generator from enum import Enum from typing import NamedTuple, Optional, Union, cast from sqlfluff.core.dialects.base import Dialect from sqlfluff.core.dialects.common import AliasInfo, ColumnAliasInfo from sqlfluff.core.parser import ( AnyNumberOf, AnySetOf, Anything, BaseFileSegment, BaseSegment, BinaryOperatorSegment, Bracketed, BracketedSegment, CodeSegment, CommentSegment, ComparisonOperatorSegment, CompositeBinaryOperatorSegment, CompositeComparisonOperatorSegment, Conditional, Dedent, Delimited, IdentifierSegment, ImplicitIndent, Indent, KeywordSegment, LiteralKeywordSegment, LiteralSegment, Matchable, MultiStringParser, NewlineSegment, Nothing, OneOf, OptionallyBracketed, ParseMode, RawSegment, Ref, RegexLexer, RegexParser, SegmentGenerator, Sequence, StringLexer, StringParser, SymbolSegment, TypedParser, WhitespaceSegment, WordSegment, ) from sqlfluff.dialects.dialect_ansi_keywords import ( ansi_reserved_keywords, ansi_unreserved_keywords, ) ansi_dialect = Dialect( "ansi", root_segment_name="FileSegment", formatted_name="ANSI", docstring="""**Default Casing**: ``UPPERCASE`` **Quotes**: String Literals: ``''``, Identifiers: ``""`` This is the base dialect which holds most of the definitions of common SQL commands and structures. If the dialect which you're actually using isn't specifically implemented by SQLFluff, using this dialect is a good place to start. This dialect doesn't intend to be brutal in adhering to (and only to) the ANSI SQL spec *(mostly because ANSI charges for access to that spec)*. It aims to be a representation of vanilla SQL before any other project adds their spin to it, and so may contain a slightly wider set of functions than actually available in true ANSI SQL.""", ) ansi_dialect.set_lexer_matchers( [ # Match all forms of whitespace except newlines and carriage returns: # https://stackoverflow.com/questions/3469080/match-whitespace-but-not-newlines # This pattern allows us to also match non-breaking spaces (#2189). RegexLexer("whitespace", r"[^\S\r\n]+", WhitespaceSegment), RegexLexer( "inline_comment", r"(--|#)[^\n]*", CommentSegment, segment_kwargs={"trim_start": ("--", "#")}, ), RegexLexer( "block_comment", r"\/\*([^\*]|\*(?!\/))*\*\/", CommentSegment, subdivider=RegexLexer( "newline", r"\r\n|\n", NewlineSegment, ), trim_post_subdivide=RegexLexer( "whitespace", r"[^\S\r\n]+", WhitespaceSegment, ), ), RegexLexer( "single_quote", r"'([^'\\]|\\.|'')*'", CodeSegment, segment_kwargs={ "quoted_value": (r"'((?:[^'\\]|\\.|'')*)'", 1), "escape_replacements": [(r"\\'|''", "'")], }, ), RegexLexer( "double_quote", r'"(""|[^"\\]|\\.)*"', CodeSegment, segment_kwargs={ "quoted_value": (r'"((?:[^"\\]|\\.)*)"', 1), "escape_replacements": [(r'\\"|""', '"')], }, ), RegexLexer( "back_quote", r"`(?:[^`\\]|\\.)*`", CodeSegment, segment_kwargs={ "quoted_value": (r"`((?:[^`\\]|\\.)*)`", 1), "escape_replacements": [(r"\\`", "`")], }, ), # See https://www.geeksforgeeks.org/postgresql-dollar-quoted-string-constants/ RegexLexer( "dollar_quote", r"\$(\w*)\$(.*?)\$\1\$", CodeSegment, segment_kwargs={ "quoted_value": (r"\$(\w*)\$(.*?)\$\1\$", 2), }, ), # Numeric literal matches integers, decimals, and exponential formats, # Pattern breakdown: # (?> Atomic grouping # (https://www.regular-expressions.info/atomic.html). # \d+\.\d+ e.g. 123.456 # |\d+\.(?![\.\w]) e.g. 123. # (N.B. negative lookahead assertion to ensure we # don't match range operators `..` in Exasol, and # that in bigquery we don't match the "." # in "asd-12.foo"). # |\.\d+ e.g. .456 # |\d+ e.g. 123 # ) # (\.?[eE][+-]?\d+)? Optional exponential. # ( # (?<=\.) If matched character ends with . (e.g. 123.) then # don't worry about word boundary check. # |(?=\b) Check that we are at word boundary to avoid matching # valid naked identifiers (e.g. 123column). # ) RegexLexer( "numeric_literal", r"(?>\d+\.\d+|\d+\.(?![\.\w])|\.\d+|\d+)(\.?[eE][+-]?\d+)?((?<=\.)|(?=\b))", LiteralSegment, ), RegexLexer( "obevo_annotation", r"////\s*(CHANGE|BODY|METADATA)[^\n]*", CommentSegment, ), StringLexer("glob_operator", r"~~~", ComparisonOperatorSegment), RegexLexer("like_operator", r"!?~~?\*?", ComparisonOperatorSegment), RegexLexer("newline", r"\r\n|\n", NewlineSegment), StringLexer("casting_operator", "::", CodeSegment), StringLexer("equals", "=", CodeSegment), StringLexer("greater_than", ">", CodeSegment), StringLexer("less_than", "<", CodeSegment), StringLexer("not", "!", CodeSegment), StringLexer("dot", ".", CodeSegment), StringLexer("comma", ",", CodeSegment), StringLexer("plus", "+", CodeSegment), StringLexer("minus", "-", CodeSegment), StringLexer("divide", "/", CodeSegment), StringLexer("percent", "%", CodeSegment), StringLexer("question", "?", CodeSegment), StringLexer("ampersand", "&", CodeSegment), StringLexer("vertical_bar", "|", CodeSegment), StringLexer("caret", "^", CodeSegment), StringLexer("star", "*", CodeSegment), StringLexer("start_bracket", "(", CodeSegment), StringLexer("end_bracket", ")", CodeSegment), StringLexer("start_square_bracket", "[", CodeSegment), StringLexer("end_square_bracket", "]", CodeSegment), StringLexer("start_curly_bracket", "{", CodeSegment), StringLexer("end_curly_bracket", "}", CodeSegment), StringLexer("colon", ":", CodeSegment), StringLexer("semicolon", ";", CodeSegment), # This is the "fallback" lexer for anything else which looks like SQL. RegexLexer("word", r"[0-9a-zA-Z_]+", WordSegment), ] ) # Set the bare functions ansi_dialect.sets("bare_functions").update( ["current_timestamp", "current_time", "current_date"] ) # Set the datetime units ansi_dialect.sets("datetime_units").update( [ "DAY", "DAYOFYEAR", "HOUR", "MILLISECOND", "MINUTE", "MONTH", "QUARTER", "SECOND", "WEEK", "WEEKDAY", "YEAR", ] ) ansi_dialect.sets("date_part_function_name").update(["DATEADD"]) # Set Keywords ansi_dialect.update_keywords_set_from_multiline_string( "unreserved_keywords", ansi_unreserved_keywords ) ansi_dialect.update_keywords_set_from_multiline_string( "reserved_keywords", ansi_reserved_keywords ) # Bracket pairs (a set of tuples). # (name, startref, endref, persists) # NOTE: The `persists` value controls whether this type # of bracket is persisted during matching to speed up other # parts of the matching process. Round brackets are the most # common and match the largest areas and so are sufficient. ansi_dialect.bracket_sets("bracket_pairs").update( [ ("round", "StartBracketSegment", "EndBracketSegment", True), ("square", "StartSquareBracketSegment", "EndSquareBracketSegment", False), ("curly", "StartCurlyBracketSegment", "EndCurlyBracketSegment", False), ] ) # Set the value table functions. These are functions that, if they appear as # an item in "FROM", are treated as returning a COLUMN, not a TABLE. Apparently, # among dialects supported by SQLFluff, only BigQuery has this concept, but this # set is defined in the ANSI dialect because: # - It impacts core linter rules (see AL04 and several other rules that subclass # from it) and how they interpret the contents of table_expressions # - At least one other database (DB2) has the same value table function, # UNNEST(), as BigQuery. DB2 is not currently supported by SQLFluff. ansi_dialect.sets("value_table_functions").update([]) ansi_dialect.add( # Real segments DelimiterGrammar=Ref("SemicolonSegment"), SemicolonSegment=StringParser(";", SymbolSegment, type="statement_terminator"), ColonSegment=StringParser(":", SymbolSegment, type="colon"), SliceSegment=StringParser(":", SymbolSegment, type="slice"), # NOTE: The purpose of the colon_delimiter is that it has different layout rules. # It assumes no whitespace on either side. ColonDelimiterSegment=StringParser(":", SymbolSegment, type="colon_delimiter"), StartBracketSegment=StringParser("(", SymbolSegment, type="start_bracket"), EndBracketSegment=StringParser(")", SymbolSegment, type="end_bracket"), StartSquareBracketSegment=StringParser( "[", SymbolSegment, type="start_square_bracket" ), EndSquareBracketSegment=StringParser("]", SymbolSegment, type="end_square_bracket"), StartCurlyBracketSegment=StringParser( "{", SymbolSegment, type="start_curly_bracket" ), EndCurlyBracketSegment=StringParser("}", SymbolSegment, type="end_curly_bracket"), CommaSegment=StringParser(",", SymbolSegment, type="comma"), DotSegment=StringParser(".", SymbolSegment, type="dot"), StarSegment=StringParser("*", SymbolSegment, type="star"), TildeSegment=StringParser("~", SymbolSegment, type="tilde"), ParameterSegment=StringParser("?", SymbolSegment, type="parameter"), CastOperatorSegment=StringParser("::", SymbolSegment, type="casting_operator"), PlusSegment=StringParser("+", SymbolSegment, type="binary_operator"), MinusSegment=StringParser("-", SymbolSegment, type="binary_operator"), PositiveSegment=StringParser("+", SymbolSegment, type="sign_indicator"), NegativeSegment=StringParser("-", SymbolSegment, type="sign_indicator"), DivideSegment=StringParser("/", SymbolSegment, type="binary_operator"), MultiplySegment=StringParser("*", SymbolSegment, type="binary_operator"), ModuloSegment=StringParser("%", SymbolSegment, type="binary_operator"), SlashSegment=StringParser("/", SymbolSegment, type="slash"), AmpersandSegment=StringParser("&", SymbolSegment, type="ampersand"), PipeSegment=StringParser("|", SymbolSegment, type="pipe"), BitwiseXorSegment=StringParser("^", SymbolSegment, type="binary_operator"), GlobOperatorSegment=TypedParser( "glob_operator", ComparisonOperatorSegment, type="glob_operator" ), LikeOperatorSegment=TypedParser( "like_operator", ComparisonOperatorSegment, type="like_operator" ), RawNotSegment=StringParser("!", SymbolSegment, type="raw_comparison_operator"), RawEqualsSegment=StringParser("=", SymbolSegment, type="raw_comparison_operator"), RawGreaterThanSegment=StringParser( ">", SymbolSegment, type="raw_comparison_operator" ), RawLessThanSegment=StringParser("<", SymbolSegment, type="raw_comparison_operator"), # The following functions can be called without parentheses per ANSI specification BareFunctionSegment=SegmentGenerator( lambda dialect: MultiStringParser( dialect.sets("bare_functions"), CodeSegment, type="bare_function", ) ), # The strange regex here it to make sure we don't accidentally match numeric # literals. We also use a regex to explicitly exclude disallowed keywords. NakedIdentifierSegment=SegmentGenerator( # Generate the anti template from the set of reserved keywords lambda dialect: RegexParser( r"[A-Z0-9_]*[A-Z][A-Z0-9_]*", IdentifierSegment, type="naked_identifier", anti_template=r"^(" + r"|".join(dialect.sets("reserved_keywords")) + r")$", casefold=str.upper, ) ), ParameterNameSegment=RegexParser( r"\"?[A-Z][A-Z0-9_]*\"?", CodeSegment, type="parameter" ), FunctionNameIdentifierSegment=TypedParser( "word", WordSegment, type="function_name_identifier" ), # Maybe data types should be more restrictive? DatatypeIdentifierSegment=SegmentGenerator( # Generate the anti template from the set of reserved keywords lambda dialect: OneOf( RegexParser( r"[A-Z_][A-Z0-9_]*", CodeSegment, type="data_type_identifier", anti_template=r"^(NOT)$", # TODO - this is a stopgap until we implement explicit data types ), Ref("SingleIdentifierGrammar", exclude=Ref("NakedIdentifierSegment")), ), ), # Ansi Intervals DatetimeUnitSegment=SegmentGenerator( lambda dialect: MultiStringParser( dialect.sets("datetime_units"), CodeSegment, type="date_part", ) ), DatePartFunctionName=SegmentGenerator( lambda dialect: MultiStringParser( dialect.sets("date_part_function_name"), CodeSegment, type="function_name_identifier", ) ), QuotedIdentifierSegment=TypedParser( "double_quote", IdentifierSegment, type="quoted_identifier" ), QuotedLiteralSegment=TypedParser( "single_quote", LiteralSegment, type="quoted_literal" ), SingleQuotedIdentifierSegment=TypedParser( "single_quote", IdentifierSegment, type="quoted_identifier" ), NumericLiteralSegment=TypedParser( "numeric_literal", LiteralSegment, type="numeric_literal" ), # NullSegment is defined separately to the keyword, so we can give it a different # type NullLiteralSegment=StringParser("null", LiteralKeywordSegment, type="null_literal"), NanLiteralSegment=StringParser("nan", LiteralKeywordSegment, type="null_literal"), UnknownLiteralSegment=Nothing(), NormalizedGrammar=Nothing(), TrueSegment=StringParser("true", LiteralKeywordSegment, type="boolean_literal"), FalseSegment=StringParser("false", LiteralKeywordSegment, type="boolean_literal"), # We use a GRAMMAR here not a Segment. Otherwise, we get an unnecessary layer SingleIdentifierGrammar=OneOf( Ref("NakedIdentifierSegment"), Ref("QuotedIdentifierSegment"), terminators=[Ref("DotSegment")], ), BooleanLiteralGrammar=OneOf(Ref("TrueSegment"), Ref("FalseSegment")), # We specifically define a group of arithmetic operators to make it easier to # override this if some dialects have different available operators ArithmeticBinaryOperatorGrammar=OneOf( Ref("PlusSegment"), Ref("MinusSegment"), Ref("DivideSegment"), Ref("MultiplySegment"), Ref("ModuloSegment"), Ref("BitwiseAndSegment"), Ref("BitwiseOrSegment"), Ref("BitwiseXorSegment"), Ref("BitwiseLShiftSegment"), Ref("BitwiseRShiftSegment"), ), SignedSegmentGrammar=OneOf(Ref("PositiveSegment"), Ref("NegativeSegment")), StringBinaryOperatorGrammar=OneOf(Ref("ConcatSegment")), BooleanBinaryOperatorGrammar=OneOf( Ref("AndOperatorGrammar"), Ref("OrOperatorGrammar") ), IsDistinctFromGrammar=Sequence( "IS", Ref.keyword("NOT", optional=True), "DISTINCT", "FROM" ), ComparisonOperatorGrammar=OneOf( Ref("EqualsSegment"), Ref("GreaterThanSegment"), Ref("LessThanSegment"), Ref("GreaterThanOrEqualToSegment"), Ref("LessThanOrEqualToSegment"), Ref("NotEqualToSegment"), Ref("LikeOperatorSegment"), Ref("IsDistinctFromGrammar"), ), # hookpoint for other dialects # e.g. EXASOL str to date cast with DATE '2021-01-01' # Give it a different type as needs to be single quotes and # should not be changed by rules (e.g. rule CV10) DateTimeLiteralGrammar=Sequence( OneOf("DATE", "TIME", "TIMESTAMP", "INTERVAL"), TypedParser("single_quote", LiteralSegment, type="date_constructor_literal"), ), # Hookpoint for other dialects # e.g. INTO is optional in BIGQUERY MergeIntoLiteralGrammar=Sequence("MERGE", "INTO"), LiteralGrammar=OneOf( Ref("QuotedLiteralSegment"), Ref("NumericLiteralSegment"), Ref("BooleanLiteralGrammar"), Ref("QualifiedNumericLiteralSegment"), # NB: Null is included in the literals, because it is a keyword which # can otherwise be easily mistaken for an identifier. Ref("NullLiteralSegment"), Ref("DateTimeLiteralGrammar"), Ref("ArrayLiteralSegment"), Ref("TypedArrayLiteralSegment"), Ref("ObjectLiteralSegment"), ), AndOperatorGrammar=StringParser("AND", BinaryOperatorSegment), OrOperatorGrammar=StringParser("OR", BinaryOperatorSegment), NotOperatorGrammar=StringParser("NOT", KeywordSegment, type="keyword"), # This is a placeholder for other dialects. PreTableFunctionKeywordsGrammar=Nothing(), BinaryOperatorGrammar=OneOf( Ref("ArithmeticBinaryOperatorGrammar"), Ref("StringBinaryOperatorGrammar"), Ref("BooleanBinaryOperatorGrammar"), Ref("ComparisonOperatorGrammar"), ), # This pattern is used in a lot of places. # Defined here to avoid repetition. BracketedColumnReferenceListGrammar=Bracketed( Delimited( Ref("ColumnReferenceSegment"), ), ), OrReplaceGrammar=Sequence("OR", "REPLACE"), TemporaryTransientGrammar=OneOf("TRANSIENT", Ref("TemporaryGrammar")), TemporaryGrammar=OneOf("TEMP", "TEMPORARY"), IfExistsGrammar=Sequence("IF", "EXISTS"), IfNotExistsGrammar=Sequence("IF", "NOT", "EXISTS"), LikeGrammar=OneOf("LIKE", "RLIKE", "ILIKE"), LikeExpressionGrammar=Sequence( Sequence( Ref.keyword("NOT", optional=True), Ref("LikeGrammar"), ), Ref("Expression_A_Grammar"), Sequence( "ESCAPE", Ref("Tail_Recurse_Expression_A_Grammar"), optional=True, ), ), PatternMatchingGrammar=Nothing(), UnionGrammar=Sequence("UNION", OneOf("DISTINCT", "ALL", optional=True)), IsClauseGrammar=OneOf( Ref("NullLiteralSegment"), Ref("NanLiteralSegment"), Ref("UnknownLiteralSegment"), Ref("BooleanLiteralGrammar"), Ref("NormalizedGrammar"), ), InOperatorGrammar=Sequence( Ref.keyword("NOT", optional=True), "IN", OneOf( Bracketed( OneOf( Delimited( Ref("Expression_A_Grammar"), ), Ref("SelectableGrammar"), ), parse_mode=ParseMode.GREEDY, ), Ref("FunctionSegment"), # E.g. UNNEST() ), ), SelectClauseTerminatorGrammar=OneOf( "FROM", "WHERE", Sequence("ORDER", "BY"), "LIMIT", "OVERLAPS", Ref("SetOperatorSegment"), "FETCH", ), # Define these as grammars to allow child dialects to enable them (since they are # non-standard keywords) IsNullGrammar=Nothing(), NotNullGrammar=Nothing(), CollateGrammar=Nothing(), FromClauseTerminatorGrammar=OneOf( "WHERE", "LIMIT", Sequence("GROUP", "BY"), Sequence("ORDER", "BY"), "HAVING", "QUALIFY", "WINDOW", Ref("SetOperatorSegment"), Ref("WithNoSchemaBindingClauseSegment"), Ref("WithDataClauseSegment"), "FETCH", "OFFSET", ), WhereClauseTerminatorGrammar=OneOf( "LIMIT", Sequence("GROUP", "BY"), Sequence("ORDER", "BY"), "HAVING", "QUALIFY", "WINDOW", "OVERLAPS", "FETCH", ), GroupByClauseTerminatorGrammar=OneOf( Sequence("ORDER", "BY"), "LIMIT", "HAVING", "QUALIFY", "WINDOW", "FETCH", ), HavingClauseTerminatorGrammar=OneOf( Sequence("ORDER", "BY"), "LIMIT", "QUALIFY", "WINDOW", "FETCH", ), OrderByClauseTerminators=OneOf( "LIMIT", "HAVING", "QUALIFY", # For window functions "WINDOW", Ref("FrameClauseUnitGrammar"), "SEPARATOR", "FETCH", ), PrimaryKeyGrammar=Sequence("PRIMARY", "KEY"), ForeignKeyGrammar=Sequence("FOREIGN", "KEY"), UniqueKeyGrammar=Sequence("UNIQUE"), NotEnforcedGrammar=Nothing(), # Odd syntax, but prevents eager parameters being confused for data types FunctionParameterGrammar=OneOf( Sequence( Ref("ParameterNameSegment", optional=True), OneOf(Sequence("ANY", "TYPE"), Ref("DatatypeSegment")), ), OneOf(Sequence("ANY", "TYPE"), Ref("DatatypeSegment")), ), AutoIncrementGrammar=Sequence("AUTO_INCREMENT"), # Base Expression element is the right thing to reference for everything # which functions as an expression, but could include literals. BaseExpressionElementGrammar=OneOf( Ref("LiteralGrammar"), Ref("BareFunctionSegment"), Ref("IntervalExpressionSegment"), Ref("FunctionSegment"), Ref("ColumnReferenceSegment"), Ref("ExpressionSegment"), Sequence( Ref("DatatypeSegment"), Ref("LiteralGrammar"), ), # These terminators allow better performance by giving a signal # of a likely complete match if they come after a match. For # example "123," only needs to match against the LiteralGrammar # and because a comma follows, never be matched against # ExpressionSegment or FunctionSegment, which are both much # more complicated. terminators=[ Ref("CommaSegment"), Ref.keyword("AS"), # TODO: We can almost certainly add a few more here. ], ), FilterClauseGrammar=Sequence( "FILTER", Bracketed(Sequence("WHERE", Ref("ExpressionSegment"))) ), IgnoreRespectNullsGrammar=Sequence(OneOf("IGNORE", "RESPECT"), "NULLS"), FrameClauseUnitGrammar=OneOf("ROWS", "RANGE"), # Some dialects do not support `ON` or `USING` with `CROSS JOIN` ConditionalCrossJoinKeywordsGrammar=Ref.keyword("CROSS"), JoinTypeKeywordsGrammar=OneOf( "INNER", Sequence( OneOf( "FULL", "LEFT", "RIGHT", ), Ref.keyword("OUTER", optional=True), ), ), # Extensible in individual dialects NonStandardJoinTypeKeywordsGrammar=Nothing(), ConditionalJoinKeywordsGrammar=OneOf( Ref("JoinTypeKeywordsGrammar"), Ref("ConditionalCrossJoinKeywordsGrammar"), Ref("NonStandardJoinTypeKeywordsGrammar"), ), JoinUsingConditionGrammar=Sequence( "USING", Indent, Bracketed( # NB: We don't use BracketedColumnReferenceListGrammar # here because we're just using SingleIdentifierGrammar, # rather than ObjectReferenceSegment or # ColumnReferenceSegment. # This is a) so that we don't lint it as a reference and # b) because the column will probably be returned anyway # during parsing. Delimited(Ref("SingleIdentifierGrammar")), parse_mode=ParseMode.GREEDY, ), Dedent, ), # It's as a sequence to allow to parametrize that in Postgres dialect with LATERAL JoinKeywordsGrammar=Sequence("JOIN"), # NATURAL joins are not supported in all dialects (e.g. not in Bigquery # or T-SQL). So define here to allow override with Nothing() for those. NaturalJoinKeywordsGrammar=Sequence( "NATURAL", Ref("JoinTypeKeywordsGrammar", optional=True), ), UnconditionalCrossJoinKeywordsGrammar=Nothing(), # Some dialects such as DuckDB and Clickhouse support a row by row # join between two tables (e.g. POSITIONAL and PASTE) HorizontalJoinKeywordsGrammar=Nothing(), UnconditionalJoinKeywordsGrammar=OneOf( Ref("NaturalJoinKeywordsGrammar"), Ref("UnconditionalCrossJoinKeywordsGrammar"), Ref("HorizontalJoinKeywordsGrammar"), ), # This can be overwritten by dialects ExtendedNaturalJoinKeywordsGrammar=Nothing(), NestedJoinGrammar=Nothing(), ReferentialActionGrammar=OneOf( "RESTRICT", "CASCADE", Sequence("SET", "NULL"), Sequence("NO", "ACTION"), Sequence("SET", "DEFAULT"), ), DropBehaviorGrammar=OneOf("RESTRICT", "CASCADE", optional=True), ColumnConstraintDefaultGrammar=OneOf( Ref("ShorthandCastSegment"), Ref("LiteralGrammar"), Ref("FunctionSegment"), Ref("BareFunctionSegment"), ), ReferenceMatchGrammar=Sequence( "MATCH", OneOf( "FULL", "PARTIAL", "SIMPLE", ), ), ReferenceDefinitionGrammar=Sequence( "REFERENCES", Ref("TableReferenceSegment"), # Foreign columns making up FOREIGN KEY constraint Ref("BracketedColumnReferenceListGrammar", optional=True), Ref("ReferenceMatchGrammar", optional=True), AnySetOf( # ON DELETE clause, e.g. ON DELETE NO ACTION Sequence( "ON", "DELETE", Ref("ReferentialActionGrammar"), ), # ON UPDATE clause, e.g. ON UPDATE SET NULL Sequence( "ON", "UPDATE", Ref("ReferentialActionGrammar"), ), ), ), TrimParametersGrammar=OneOf("BOTH", "LEADING", "TRAILING"), DefaultValuesGrammar=Sequence("DEFAULT", "VALUES"), ObjectReferenceDelimiterGrammar=OneOf( Ref("DotSegment"), # NOTE: The double dot syntax allows for default values. Sequence(Ref("DotSegment"), Ref("DotSegment")), ), ObjectReferenceTerminatorGrammar=OneOf( "ON", "AS", "USING", Ref("CommaSegment"), Ref("CastOperatorSegment"), Ref("StartSquareBracketSegment"), Ref("StartBracketSegment"), Ref("BinaryOperatorGrammar"), Ref("ColonSegment"), Ref("DelimiterGrammar"), Ref("JoinLikeClauseGrammar"), BracketedSegment, ), AlterTableOptionsGrammar=OneOf( # Table options Sequence( Ref("ParameterNameSegment"), Ref("EqualsSegment", optional=True), OneOf(Ref("LiteralGrammar"), Ref("NakedIdentifierSegment")), ), # Add things Sequence( OneOf("ADD", "MODIFY"), Ref.keyword("COLUMN", optional=True), Ref("ColumnDefinitionSegment"), OneOf( Sequence(OneOf("FIRST", "AFTER"), Ref("ColumnReferenceSegment")), # Bracketed Version of the same Ref("BracketedColumnReferenceListGrammar"), optional=True, ), ), # Drop Ref("AlterTableDropColumnGrammar"), # Rename Sequence( "RENAME", OneOf("AS", "TO", optional=True), Ref("TableReferenceSegment"), ), ), AlterTableDropColumnGrammar=Sequence( "DROP", Ref.keyword("COLUMN", optional=True), Ref("IfExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), ), OrderNoOrderGrammar=OneOf("ORDER", "NOORDER"), ColumnsExpressionNameGrammar=Nothing(), # Uses grammar for LT06 support ColumnsExpressionGrammar=Nothing(), ListComprehensionGrammar=Nothing(), TimeWithTZGrammar=Sequence( OneOf("TIME", "TIMESTAMP"), Ref("BracketedArguments", optional=True), Sequence(OneOf("WITH", "WITHOUT"), "TIME", "ZONE", optional=True), ), SequenceMinValueGrammar=OneOf( Sequence("MINVALUE", Ref("NumericLiteralSegment")), Sequence("NO", "MINVALUE"), ), SequenceMaxValueGrammar=OneOf( Sequence("MAXVALUE", Ref("NumericLiteralSegment")), Sequence("NO", "MAXVALUE"), ), ) class FileSegment(BaseFileSegment): """A segment representing a whole file or script. This is also the default "root" segment of the dialect, and so is usually instantiated directly. It therefore has no match_grammar. """ match_grammar = Delimited( Ref("StatementSegment"), delimiter=AnyNumberOf(Ref("DelimiterGrammar"), min_times=1), allow_gaps=True, allow_trailing=True, ) def get_table_references(self) -> set[str]: """Use parsed tree to extract table references.""" references = set() for stmt in self.get_children("statement"): stmt = cast(StatementSegment, stmt) references |= stmt.get_table_references() return references class IntervalExpressionSegment(BaseSegment): """An interval expression segment.""" type = "interval_expression" match_grammar: Matchable = Sequence( "INTERVAL", OneOf( # The Numeric Version Sequence( Ref("NumericLiteralSegment"), OneOf(Ref("QuotedLiteralSegment"), Ref("DatetimeUnitSegment")), ), # The String version Ref("QuotedLiteralSegment"), # Combine version Sequence( Ref("QuotedLiteralSegment"), OneOf(Ref("QuotedLiteralSegment"), Ref("DatetimeUnitSegment")), ), ), ) class TupleSegment(BaseSegment): """Expression to construct a TUPLE. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#tuple_syntax """ type = "tuple" match_grammar = Bracketed(Delimited(Ref("BaseExpressionElementGrammar"))) class ArrayTypeSegment(BaseSegment): """Prefix for array literals specifying the type. Often "ARRAY" or "ARRAY" """ type = "array_type" match_grammar: Matchable = Nothing() class SizedArrayTypeSegment(BaseSegment): """Array type with a size.""" type = "sized_array_type" match_grammar = Sequence( Ref("ArrayTypeSegment"), Ref("ArrayAccessorSegment"), ) class ArrayLiteralSegment(BaseSegment): """An array literal segment. An unqualified array literal: e.g. [1, 2, 3] """ type = "array_literal" match_grammar: Matchable = Bracketed( Delimited(Ref("BaseExpressionElementGrammar"), optional=True), bracket_type="square", ) class TypedArrayLiteralSegment(BaseSegment): """An array literal segment.""" type = "typed_array_literal" match_grammar: Matchable = Sequence( Ref("ArrayTypeSegment"), Ref("ArrayLiteralSegment"), ) class StructTypeSegment(BaseSegment): """Expression to construct a STRUCT datatype. (Used in BigQuery for example) """ type = "struct_type" match_grammar: Matchable = Nothing() class MapTypeSegment(BaseSegment): """Expression to construct a MAP datatype. (Used in DuckDB for example) """ type = "map_type" match_grammar: Matchable = Nothing() class StructLiteralSegment(BaseSegment): """An array literal segment. An unqualified struct literal: e.g. (1, 2 as foo, 3) NOTE: This rarely exists without a preceding type and exists mostly for structural & layout reasons. """ type = "struct_literal" match_grammar: Matchable = Bracketed( Delimited( Sequence( Ref("BaseExpressionElementGrammar"), Ref("AliasExpressionSegment", optional=True), ), ), ) class TypedStructLiteralSegment(BaseSegment): """An array literal segment.""" type = "typed_struct_literal" match_grammar: Matchable = Sequence( Ref("StructTypeSegment"), Ref("StructLiteralSegment"), ) class EmptyStructLiteralBracketsSegment(BaseSegment): """An empty struct literal segment - `()`. NOTE: This is only to set the right type so spacing rules are applied correctly. """ type = "struct_literal" match_grammar: Matchable = Bracketed() class EmptyStructLiteralSegment(BaseSegment): """An empty array literal segment - `STRUCT()`.""" type = "typed_struct_literal" match_grammar: Matchable = Sequence( Ref("StructTypeSegment"), Ref("EmptyStructLiteralBracketsSegment"), ) class ObjectLiteralSegment(BaseSegment): """An object literal segment.""" type = "object_literal" match_grammar: Matchable = Bracketed( Delimited( Ref("ObjectLiteralElementSegment"), optional=True, ), bracket_type="curly", ) class ObjectLiteralElementSegment(BaseSegment): """An object literal element segment.""" type = "object_literal_element" match_grammar: Matchable = Sequence( Ref("QuotedLiteralSegment"), Ref("ColonSegment"), Ref("BaseExpressionElementGrammar"), ) class TimeZoneGrammar(BaseSegment): """Casting to Time Zone.""" type = "time_zone_grammar" match_grammar = AnyNumberOf( Sequence("AT", "TIME", "ZONE", Ref("ExpressionSegment")), ) class BracketedArguments(BaseSegment): """A series of bracketed arguments. e.g. the bracketed part of numeric(1, 3) """ type = "bracketed_arguments" match_grammar = Bracketed( # The brackets might be empty for some cases... Delimited(Ref("LiteralGrammar"), optional=True), ) class DatatypeSegment(BaseSegment): """A data type segment. Supports timestamp with(out) time zone. Doesn't currently support intervals. """ type = "data_type" match_grammar: Matchable = OneOf( Ref("TimeWithTZGrammar"), Sequence( "DOUBLE", "PRECISION", ), Sequence( OneOf( Sequence( OneOf("CHARACTER", "BINARY"), OneOf("VARYING", Sequence("LARGE", "OBJECT")), ), Sequence( # Some dialects allow optional qualification of data types with # schemas Sequence( Ref("SingleIdentifierGrammar"), Ref("DotSegment"), allow_gaps=False, optional=True, ), Ref("DatatypeIdentifierSegment"), allow_gaps=False, ), ), # There may be no brackets for some data types Ref("BracketedArguments", optional=True), AnyNumberOf( "UNSIGNED", # UNSIGNED MySQL Ref("CharCharacterSetGrammar"), optional=True, ), ), Ref("ArrayTypeSegment"), ) # hookpoint ansi_dialect.add(CharCharacterSetGrammar=Nothing()) class ObjectReferenceSegment(BaseSegment): """A reference to an object.""" type = "object_reference" # match grammar (don't allow whitespace) match_grammar: Matchable = Delimited( Ref("SingleIdentifierGrammar"), delimiter=Ref("ObjectReferenceDelimiterGrammar"), terminators=[Ref("ObjectReferenceTerminatorGrammar")], allow_gaps=False, ) class ObjectReferencePart(NamedTuple): """Details about a table alias.""" part: str # Name of the part # Segment(s) comprising the part. Usually just one segment, but could # be multiple in dialects (e.g. BigQuery) that support unusual # characters in names (e.g. "-") segments: list[RawSegment] @classmethod def _iter_reference_parts( cls, elem: RawSegment ) -> Generator[ObjectReferencePart, None, None]: """Extract the elements of a reference and yield.""" # trim on quotes and split out any dots. yield cls.ObjectReferencePart(elem.raw_trimmed(), [elem]) def iter_raw_references(self) -> Generator[ObjectReferencePart, None, None]: """Generate a list of reference strings and elements. Each reference is an ObjectReferencePart. If some are split, then a segment may appear twice, but the substring will only appear once. """ # Extract the references from those identifiers (because some may be quoted) for elem in self.recursive_crawl("identifier"): yield from self._iter_reference_parts(cast(IdentifierSegment, elem)) def is_qualified(self) -> bool: """Return if there is more than one element to the reference.""" return len(list(self.iter_raw_references())) > 1 def qualification(self) -> str: """Return the qualification type of this reference.""" return "qualified" if self.is_qualified() else "unqualified" class ObjectReferenceLevel(Enum): """Labels for the "levels" of a reference. Note: Since SQLFluff does not have access to database catalog information, interpreting references will often be ambiguous. Typical example: The first part *may* refer to a schema, but that is almost always optional if referring to an object in some default or currently "active" schema. For this reason, use of this enum is optional and intended mainly to clarify the intent of the code -- no guarantees! Additionally, the terminology may vary by dialect, e.g. in BigQuery, "project" would be a more accurate term than "schema". """ OBJECT = 1 TABLE = 2 SCHEMA = 3 def extract_possible_references( self, level: Union[ObjectReferenceLevel, int] ) -> list[ObjectReferencePart]: """Extract possible references of a given level. "level" may be (but is not required to be) a value from the ObjectReferenceLevel enum defined above. NOTE: The base implementation here returns at most one part, but dialects such as BigQuery that support nesting (e.g. STRUCT) may return multiple reference parts. """ level = self._level_to_int(level) refs = list(self.iter_raw_references()) if len(refs) >= level: return [refs[-level]] return [] def extract_possible_multipart_references( self, levels: list[Union[ObjectReferenceLevel, int]] ) -> list[tuple[ObjectReferencePart, ...]]: """Extract possible multipart references, e.g. schema.table.""" levels_tmp = [self._level_to_int(level) for level in levels] min_level = min(levels_tmp) max_level = max(levels_tmp) refs = list(self.iter_raw_references()) if len(refs) >= max_level: return [tuple(refs[-max_level : 1 - min_level])] return [] @staticmethod def _level_to_int(level: Union[ObjectReferenceLevel, int]) -> int: # If it's an ObjectReferenceLevel, get the value. Otherwise, assume it's # an int. level = getattr(level, "value", level) assert isinstance(level, int) return level class TableReferenceSegment(ObjectReferenceSegment): """A reference to an table, CTE, subquery or alias.""" type = "table_reference" class SchemaReferenceSegment(ObjectReferenceSegment): """A reference to a schema.""" type = "schema_reference" class DatabaseReferenceSegment(ObjectReferenceSegment): """A reference to a database.""" type = "database_reference" class IndexReferenceSegment(ObjectReferenceSegment): """A reference to an index.""" type = "index_reference" class CollationReferenceSegment(ObjectReferenceSegment): """A reference to a collation.""" type = "collation_reference" # Some dialects like PostgreSQL want an identifier only, and quoted # literals aren't allowed. Other dialects like Snowflake only accept # a quoted string literal. We'll be a little overly-permissive and # accept either... it shouldn't be too greedy since this segment generally # occurs only in a Sequence after the "COLLATE" keyword. match_grammar: Matchable = OneOf( Ref("QuotedLiteralSegment"), Delimited( Ref("SingleIdentifierGrammar"), delimiter=Ref("ObjectReferenceDelimiterGrammar"), terminators=[Ref("ObjectReferenceTerminatorGrammar")], allow_gaps=False, ), ) class RoleReferenceSegment(ObjectReferenceSegment): """A reference to a role, user, or account.""" type = "role_reference" match_grammar: Matchable = Ref("SingleIdentifierGrammar") class TablespaceReferenceSegment(ObjectReferenceSegment): """A reference to a tablespace.""" type = "tablespace_reference" class ExtensionReferenceSegment(ObjectReferenceSegment): """A reference to an extension.""" type = "extension_reference" class ColumnReferenceSegment(ObjectReferenceSegment): """A reference to column, field or alias.""" type = "column_reference" class SequenceReferenceSegment(ObjectReferenceSegment): """A reference to a sequence.""" type = "sequence_reference" class TagReferenceSegment(ObjectReferenceSegment): """A reference to a tag.""" type = "tag_reference" class TriggerReferenceSegment(ObjectReferenceSegment): """A reference to a trigger.""" type = "trigger_reference" class SingleIdentifierListSegment(BaseSegment): """A comma delimited list of identifiers.""" type = "identifier_list" match_grammar: Matchable = Delimited(Ref("SingleIdentifierGrammar")) class ArrayAccessorSegment(BaseSegment): """An array accessor e.g. [3:4].""" type = "array_accessor" match_grammar: Matchable = Bracketed( Delimited( OneOf(Ref("NumericLiteralSegment"), Ref("ExpressionSegment")), delimiter=Ref("SliceSegment"), ), bracket_type="square", parse_mode=ParseMode.GREEDY, ) ansi_dialect.add( # This is a hook point to allow subclassing for other dialects AliasedTableReferenceGrammar=Sequence( Ref("TableReferenceSegment"), Ref("AliasExpressionSegment") ) ) class AliasExpressionSegment(BaseSegment): """A reference to an object with an `AS` clause. The optional AS keyword allows both implicit and explicit aliasing. """ type = "alias_expression" match_grammar: Matchable = Sequence( Indent, Ref("AsAliasOperatorSegment", optional=True), OneOf( Sequence( Ref("SingleIdentifierGrammar"), # Column alias in VALUES clause Bracketed(Ref("SingleIdentifierListSegment"), optional=True), ), Ref("SingleQuotedIdentifierSegment"), ), Dedent, ) class AsAliasOperatorSegment(BaseSegment): """The as alias expression operator.""" type = "alias_operator" match_grammar: Matchable = Sequence("AS") class ShorthandCastSegment(BaseSegment): """A casting operation using '::'.""" type = "cast_expression" match_grammar: Matchable = Sequence( OneOf( Ref("Expression_D_Grammar"), Ref("CaseExpressionSegment"), ), AnyNumberOf( Sequence( Ref("CastOperatorSegment"), Ref("DatatypeSegment"), Ref("TimeZoneGrammar", optional=True), ), min_times=1, ), ) class QualifiedNumericLiteralSegment(BaseSegment): """A numeric literal with one + or - sign preceding. The qualified numeric literal is a compound of a raw literal and a plus/minus sign. We do it this way rather than at the lexing step because the lexer doesn't deal well with ambiguity. """ type = "numeric_literal" match_grammar: Matchable = Sequence( Ref("SignedSegmentGrammar"), Ref("NumericLiteralSegment"), ) class AggregateOrderByClause(BaseSegment): """An order by clause for an aggregate function. Defined as a class to allow a specific type for rule AM06 """ type = "aggregate_order_by" match_grammar: Matchable = Ref("OrderByClauseSegment") ansi_dialect.add( # FunctionContentsExpressionGrammar intended as a hook to override # in other dialects. FunctionContentsExpressionGrammar=Ref("ExpressionSegment"), FunctionContentsGrammar=AnyNumberOf( Ref("ExpressionSegment"), # A Cast-like function Sequence(Ref("ExpressionSegment"), "AS", Ref("DatatypeSegment")), # Trim function Sequence( Ref("TrimParametersGrammar"), Ref("ExpressionSegment", optional=True, exclude=Ref.keyword("FROM")), "FROM", Ref("ExpressionSegment"), ), # An extract-like or substring-like function Sequence( OneOf(Ref("DatetimeUnitSegment"), Ref("ExpressionSegment")), "FROM", Ref("ExpressionSegment"), ), Sequence( # Allow an optional distinct keyword here. Ref.keyword("DISTINCT", optional=True), OneOf( # Most functions will be using the delimited route # but for COUNT(*) or similar we allow the star segment # here. Ref("StarSegment"), Delimited(Ref("FunctionContentsExpressionGrammar")), ), ), Ref( "AggregateOrderByClause" ), # used by string_agg (postgres), group_concat (exasol),listagg (snowflake).. Sequence(Ref.keyword("SEPARATOR"), Ref("LiteralGrammar")), # like a function call: POSITION ( 'QL' IN 'SQL') Sequence( OneOf( Ref("QuotedLiteralSegment"), Ref("SingleIdentifierGrammar"), Ref("ColumnReferenceSegment"), ), "IN", OneOf( Ref("QuotedLiteralSegment"), Ref("SingleIdentifierGrammar"), Ref("ColumnReferenceSegment"), ), ), Ref("IgnoreRespectNullsGrammar"), Ref("IndexColumnDefinitionSegment"), Ref("EmptyStructLiteralSegment"), ), PostFunctionGrammar=OneOf( # Optional OVER suffix for window functions. # This is supported in bigquery & postgres (and its derivatives) # and so is included here for now. Ref("OverClauseSegment"), # Filter clause supported by both Postgres and SQLite Ref("FilterClauseGrammar"), ), ) class OverClauseSegment(BaseSegment): """An OVER clause for window functions.""" type = "over_clause" match_grammar: Matchable = Sequence( Indent, Ref("IgnoreRespectNullsGrammar", optional=True), "OVER", OneOf( Ref("SingleIdentifierGrammar"), # Window name Bracketed( Ref("WindowSpecificationSegment", optional=True), parse_mode=ParseMode.GREEDY, ), ), Dedent, ) class WindowSpecificationSegment(BaseSegment): """Window specification within OVER(...).""" type = "window_specification" match_grammar: Matchable = Sequence( Ref( "SingleIdentifierGrammar", optional=True, exclude=OneOf(Ref.keyword("PARTITION"), Ref.keyword("ORDER")), ), # "Base" window name Ref("PartitionClauseSegment", optional=True), Ref("OrderByClauseSegment", optional=True), Ref("FrameClauseSegment", optional=True), optional=True, ) class FunctionNameSegment(BaseSegment): """Function name, including any prefix bits, e.g. project or schema.""" type = "function_name" match_grammar: Matchable = Sequence( # Project name, schema identifier, etc. AnyNumberOf( Sequence( Ref("SingleIdentifierGrammar"), Ref("DotSegment"), ), terminators=[Ref("BracketedSegment")], ), # Base function name OneOf( Ref("FunctionNameIdentifierSegment"), Ref("QuotedIdentifierSegment"), terminators=[Ref("BracketedSegment")], ), allow_gaps=False, ) class DateTimeFunctionContentsSegment(BaseSegment): """Datetime function contents.""" type = "function_contents" match_grammar = Sequence( Bracketed( Delimited( Ref("DatetimeUnitSegment"), Ref( "FunctionContentsGrammar", # The brackets might be empty for some functions... optional=True, ), ), ), ) class FunctionContentsSegment(BaseSegment): """Function Contents.""" type = "function_contents" match_grammar = Sequence( Bracketed( Ref( "FunctionContentsGrammar", # The brackets might be empty for some functions... optional=True, ), ), ) class FunctionSegment(BaseSegment): """A scalar or aggregate function. Maybe in the future we should distinguish between aggregate functions and other functions. For now, we treat them the same because they look the same for our purposes. """ type = "function" match_grammar: Matchable = OneOf( Sequence( Ref("DatePartFunctionNameSegment"), Ref("DateTimeFunctionContentsSegment"), ), Ref("ColumnsExpressionGrammar"), Sequence( Sequence( Ref( "FunctionNameSegment", exclude=OneOf( Ref("DatePartFunctionNameSegment"), Ref("ColumnsExpressionFunctionNameSegment"), Ref("ValuesClauseSegment"), ), ), Ref("FunctionContentsSegment"), ), Ref("PostFunctionGrammar", optional=True), ), ) class ColumnsExpressionFunctionNameSegment(BaseSegment): """COLUMNS function name segment. Need to be able to specify this as type function_name so that linting rules identify it properly """ type = "function_name" match_grammar: Matchable = Ref("ColumnsExpressionNameGrammar") class ColumnsExpressionFunctionContentsSegment(BaseSegment): """Columns expression in a select statement. From DuckDB: https://duckdb.org/docs/sql/expressions/star#columns-expression """ type = "columns_expression" match_grammar: Matchable = Nothing() class PartitionClauseSegment(BaseSegment): """A `PARTITION BY` for window functions.""" type = "partitionby_clause" match_grammar: Matchable = Sequence( "PARTITION", "BY", Indent, # Brackets are optional in a partition by statement OptionallyBracketed(Delimited(Ref("ExpressionSegment"))), Dedent, ) class FrameClauseSegment(BaseSegment): """A frame clause for window functions. https://docs.oracle.com/cd/E17952_01/mysql-8.0-en/window-functions-frames.html """ type = "frame_clause" _frame_extent = OneOf( Sequence("CURRENT", "ROW"), Sequence( OneOf( Ref("NumericLiteralSegment"), Sequence("INTERVAL", Ref("QuotedLiteralSegment")), "UNBOUNDED", ), OneOf("PRECEDING", "FOLLOWING"), ), ) match_grammar: Matchable = Sequence( Ref("FrameClauseUnitGrammar"), OneOf(_frame_extent, Sequence("BETWEEN", _frame_extent, "AND", _frame_extent)), ) ansi_dialect.add( # This is a hook point to allow subclassing for other dialects PostTableExpressionGrammar=Nothing() ) class FromExpressionElementSegment(BaseSegment): """A table expression.""" type = "from_expression_element" _base_from_expression_element = Sequence( Ref("PreTableFunctionKeywordsGrammar", optional=True), OptionallyBracketed(Ref("TableExpressionSegment")), Ref("TemporalQuerySegment", optional=True), Ref( "AliasExpressionSegment", exclude=OneOf( Ref("FromClauseTerminatorGrammar"), Ref("SamplingExpressionSegment"), Ref("JoinLikeClauseGrammar"), Ref("JoinClauseSegment"), ), optional=True, ), # https://cloud.google.com/bigquery/docs/reference/standard-sql/arrays#flattening_arrays Sequence("WITH", "OFFSET", Ref("AliasExpressionSegment"), optional=True), Ref("SamplingExpressionSegment", optional=True), Ref("PostTableExpressionGrammar", optional=True), ) match_grammar: Matchable = OneOf( _base_from_expression_element, Bracketed( Sequence( _base_from_expression_element, AnyNumberOf(Ref("JoinClauseSegment")), ), ), ) def get_eventual_alias(self) -> Generator[AliasInfo, None, None]: """Return the eventual table name referred to by this table expression. Returns: :obj:`tuple` of (:obj:`str`, :obj:`BaseSegment`, :obj:`bool`) containing a string representation of the alias, a reference to the segment containing it, and whether it's an alias. """ # Get any table expressions tbl_expression = self.get_child("table_expression") if not tbl_expression: # pragma: no cover _bracketed = self.get_child("bracketed") if _bracketed: tbl_expression = _bracketed.get_child("table_expression") # For TSQL nested, bracketed tables get the first table as reference if tbl_expression and not tbl_expression.get_child("object_reference"): _bracketed = tbl_expression.get_child("bracketed") if _bracketed: tbl_expression = _bracketed.get_child("table_expression") # Work out the references ref: Optional[ObjectReferenceSegment] = None if tbl_expression: _ref = tbl_expression.get_child("object_reference") if _ref: ref = cast(ObjectReferenceSegment, _ref) # Handle any aliases has_alias = False alias_expressions = self.get_children("alias_expression", "bracketed") for alias_expression in alias_expressions: if alias_expression.is_type("bracketed"): # pragma: no cover _alias_expression = alias_expression.get_child("alias_expression") if _alias_expression is None: continue alias_expression = _alias_expression # If it has an alias, return that has_alias = True segment = alias_expression.get_child("identifier") if segment: segment = cast(IdentifierSegment, segment) yield AliasInfo( segment.raw_normalized(casefold=False), segment, True, self, alias_expression, ref, ) if has_alias: return # If not return the object name (or None if there isn't one) if ref: references: list = list(ref.iter_raw_references()) # Return the last element of the reference. if references: penultimate_ref: ObjectReferenceSegment.ObjectReferencePart = ( references[-1] ) yield AliasInfo( penultimate_ref.part, penultimate_ref.segments[0], False, self, None, ref, ) return # No references or alias yield AliasInfo( "", None, False, self, None, ref, ) class FromExpressionSegment(BaseSegment): """A from expression segment.""" type = "from_expression" match_grammar: Matchable = OptionallyBracketed( Sequence( Indent, OneOf( # check first for MLTableExpression, # because of possible FunctionSegment in # MainTableExpression Ref("MLTableExpressionSegment"), Ref("FromExpressionElementSegment"), Bracketed(Ref("FromExpressionSegment")), terminators=[Sequence("ORDER", "BY"), Sequence("GROUP", "BY")], ), Dedent, Conditional(Indent, indented_joins=True), AnyNumberOf( Sequence( OneOf(Ref("JoinClauseSegment"), Ref("JoinLikeClauseGrammar")), ), optional=True, terminators=[Sequence("ORDER", "BY"), Sequence("GROUP", "BY")], ), Conditional(Dedent, indented_joins=True), ) ) class TableExpressionSegment(BaseSegment): """The main table expression e.g. within a FROM clause.""" type = "table_expression" match_grammar: Matchable = OneOf( Ref("ValuesClauseSegment"), Ref("BareFunctionSegment"), Ref("FunctionSegment"), Ref("TableReferenceSegment"), # Nested Selects Bracketed(Ref("SelectableGrammar")), Bracketed(Ref("MergeStatementSegment")), ) class WildcardIdentifierSegment(ObjectReferenceSegment): """Any identifier of the form a.b.*. This inherits iter_raw_references from the ObjectReferenceSegment. """ type = "wildcard_identifier" match_grammar: Matchable = Sequence( # *, blah.*, blah.blah.*, etc. AnyNumberOf( OneOf( Sequence( Ref("SingleIdentifierGrammar"), Ref("ObjectReferenceDelimiterGrammar"), allow_gaps=True, ), Sequence( Ref("StarSegment"), Ref("DotSegment"), ), ) ), Ref("StarSegment"), allow_gaps=False, ) def iter_raw_references(self): """Generate a list of reference strings and elements. Each element is a tuple of (str, segment). If some are split, then a segment may appear twice, but the substring will only appear once. """ # Extract the references from those identifiers (because some may be quoted) for elem in self.recursive_crawl("identifier", "star"): yield from self._iter_reference_parts(cast(RawSegment, elem)) class WildcardExpressionSegment(BaseSegment): """A star (*) expression for a SELECT clause. This is separate from the identifier to allow for some dialects which extend this logic to allow REPLACE, EXCEPT or similar clauses e.g. BigQuery. """ type = "wildcard_expression" match_grammar: Matchable = Sequence( # *, blah.*, blah.blah.*, etc. Ref("WildcardIdentifierSegment") ) class SelectClauseElementSegment(BaseSegment): """An element in the targets of a select statement.""" type = "select_clause_element" # Important to split elements before parsing, otherwise debugging is really hard. match_grammar = OneOf( # *, blah.*, blah.blah.*, etc. Ref("WildcardExpressionSegment"), Sequence( Ref("BaseExpressionElementGrammar"), Ref("AliasExpressionSegment", optional=True), ), ) def get_alias(self) -> Optional[ColumnAliasInfo]: """Get info on alias within SELECT clause element.""" alias_expression_segment = next( self.recursive_crawl( "alias_expression", # don't recurse into any subqueries no_recursive_seg_type="select_statement", ), None, ) if alias_expression_segment is None: # Return None if no alias expression is found. return None alias_identifier_segment = next( (s for s in alias_expression_segment.segments if s.is_type("identifier")), None, ) if alias_identifier_segment is None: # Return None if no alias identifier expression is found. # Happened in the past due to bad syntax return None # pragma: no cover # Get segment being aliased. aliased_segment = next( s for s in self.segments if not s.is_whitespace and not s.is_meta and s != alias_expression_segment ) # Find all the columns being aliased. column_reference_segments = [] if aliased_segment.is_type("column_reference"): column_reference_segments.append(aliased_segment) else: column_reference_segments.extend( aliased_segment.recursive_crawl("column_reference") ) return ColumnAliasInfo( alias_identifier_name=alias_identifier_segment.raw, aliased_segment=aliased_segment, column_reference_segments=column_reference_segments, ) class SelectClauseModifierSegment(BaseSegment): """Things that come after SELECT but before the columns.""" type = "select_clause_modifier" match_grammar: Matchable = OneOf( "DISTINCT", "ALL", ) class SelectClauseSegment(BaseSegment): """A group of elements in a select target statement.""" type = "select_clause" match_grammar: Matchable = Sequence( "SELECT", Ref("SelectClauseModifierSegment", optional=True), Indent, Delimited( Ref("SelectClauseElementSegment"), allow_trailing=True, ), Dedent, terminators=[Ref("SelectClauseTerminatorGrammar")], parse_mode=ParseMode.GREEDY_ONCE_STARTED, ) class MatchConditionSegment(BaseSegment): """A stub segment to be used in Snowflake ASOF joins.""" type = "match_condition" match_grammar: Matchable = Nothing() class JoinClauseSegment(BaseSegment): """Any number of join clauses, including the `JOIN` keyword.""" type = "join_clause" match_grammar: Matchable = OneOf( # NB These qualifiers are optional Sequence( Ref("ConditionalJoinKeywordsGrammar", optional=True), Ref("JoinKeywordsGrammar"), Indent, Ref("FromExpressionElementSegment"), AnyNumberOf(Ref("NestedJoinGrammar")), Dedent, Sequence( # Using nested sequence here so we only get the indents # if we also have content. Conditional(Indent, indented_using_on=True), # NB: this is optional Ref("MatchConditionSegment", optional=True), OneOf( # ON clause Ref("JoinOnConditionSegment"), # USING clause Ref("JoinUsingConditionGrammar"), # Unqualified joins *are* allowed. They just might not # be a good idea. ), Conditional(Dedent, indented_using_on=True), optional=True, ), ), # Note NATURAL joins do not support Join conditions Sequence( Ref("UnconditionalJoinKeywordsGrammar"), Ref("JoinKeywordsGrammar"), Indent, Ref("FromExpressionElementSegment"), Ref("MatchConditionSegment", optional=True), Dedent, ), # Sometimes, a natural join might already include the keyword Sequence( Ref("ExtendedNaturalJoinKeywordsGrammar"), Indent, Ref("FromExpressionElementSegment"), Dedent, ), ) def get_eventual_aliases(self) -> list[tuple[BaseSegment, AliasInfo]]: """Return the eventual table name referred to by this join clause.""" buff = [] from_expression = self.get_child("from_expression_element") # As per grammar above, there will always be a FromExpressionElementSegment assert from_expression from_aliases = cast( FromExpressionElementSegment, from_expression ).get_eventual_alias() # Only append if non-null. A None reference, may # indicate a generator expression or similar. for alias in from_aliases: buff.append((from_expression, alias)) # In some dialects, like TSQL, join clauses can have nested join clauses # recurse into them - but not if part of a sub-select statement (see #3144) for join_clause in self.recursive_crawl( "join_clause", no_recursive_seg_type="select_statement" ): if join_clause is self: # If the starting segment itself matches the list of types we're # searching for, recursive_crawl() will return it. Skip that. continue aliases: list[tuple[BaseSegment, AliasInfo]] = cast( JoinClauseSegment, join_clause ).get_eventual_aliases() # Only append if non-null. A None reference, may # indicate a generator expression or similar. if aliases: buff = buff + aliases return buff class JoinOnConditionSegment(BaseSegment): """The `ON` condition within a `JOIN` clause.""" type = "join_on_condition" match_grammar: Matchable = Sequence( "ON", Conditional(ImplicitIndent, indented_on_contents=True), OptionallyBracketed(Ref("ExpressionSegment")), Conditional(Dedent, indented_on_contents=True), ) ansi_dialect.add( # This is a hook point to allow subclassing for other dialects JoinLikeClauseGrammar=Nothing(), ) class FromClauseSegment(BaseSegment): """A `FROM` clause like in `SELECT`. NOTE: this is a delimited set of table expressions, with a variable number of optional join clauses with those table expressions. The delimited aspect is the higher of the two such that the following is valid (albeit unusual): ``` SELECT * FROM a JOIN b, c JOIN d ``` """ type = "from_clause" match_grammar: Matchable = Sequence( "FROM", Delimited( Ref("FromExpressionSegment"), ), ) def get_eventual_aliases(self) -> list[tuple[BaseSegment, AliasInfo]]: """List the eventual aliases of this from clause. Comes as a list of tuples (table expr, tuple (string, segment, bool)). """ buff: list[tuple[BaseSegment, AliasInfo]] = [] direct_table_children = [] join_clauses = [] for from_expression in self.get_children("from_expression"): direct_table_children += from_expression.get_children( "from_expression_element" ) join_clauses += from_expression.get_children("join_clause") # Iterate through the potential sources of aliases for clause in direct_table_children: direct_table_aliases = cast( FromExpressionElementSegment, clause ).get_eventual_alias() # Only append if non-null. A None reference, may # indicate a generator expression or similar. table_expr = ( clause if clause in direct_table_children else clause.get_child("from_expression_element") ) for alias in direct_table_aliases: assert table_expr buff.append((table_expr, alias)) for clause in join_clauses: aliases: list[tuple[BaseSegment, AliasInfo]] = cast( JoinClauseSegment, clause ).get_eventual_aliases() # Only append if non-null. A None reference, may # indicate a generator expression or similar. if aliases: buff = buff + aliases return buff class WhenClauseSegment(BaseSegment): """A 'WHEN' clause for a 'CASE' statement.""" type = "when_clause" match_grammar: Matchable = Sequence( "WHEN", # NOTE: The nested sequence here is to ensure the correct # placement of the meta segments when templated elements # are present. # https://github.com/sqlfluff/sqlfluff/issues/3988 Sequence( ImplicitIndent, Ref("ExpressionSegment"), Dedent, ), Conditional(Indent, indented_then=True), "THEN", Conditional(ImplicitIndent, indented_then_contents=True), Ref("ExpressionSegment"), Conditional(Dedent, indented_then_contents=True), Conditional(Dedent, indented_then=True), ) class ElseClauseSegment(BaseSegment): """An 'ELSE' clause for a 'CASE' statement.""" type = "else_clause" match_grammar: Matchable = Sequence( "ELSE", ImplicitIndent, Ref("ExpressionSegment"), Dedent ) class CaseExpressionSegment(BaseSegment): """A `CASE WHEN` clause.""" type = "case_expression" match_grammar: Matchable = OneOf( Sequence( "CASE", ImplicitIndent, AnyNumberOf( Ref("WhenClauseSegment"), reset_terminators=True, terminators=[Ref.keyword("ELSE"), Ref.keyword("END")], ), Ref( "ElseClauseSegment", optional=True, reset_terminators=True, terminators=[Ref.keyword("END")], ), Dedent, "END", ), Sequence( "CASE", Ref("ExpressionSegment"), ImplicitIndent, AnyNumberOf( Ref("WhenClauseSegment"), reset_terminators=True, terminators=[Ref.keyword("ELSE"), Ref.keyword("END")], ), Ref( "ElseClauseSegment", optional=True, reset_terminators=True, terminators=[Ref.keyword("END")], ), Dedent, "END", ), terminators=[ Ref("ComparisonOperatorGrammar"), Ref("CommaSegment"), Ref("BinaryOperatorGrammar"), ], ) ansi_dialect.add( # Expression_A_Grammar # https://www.cockroachlabs.com/docs/v20.2/sql-grammar.html#a_expr # The upstream grammar is defined recursively, which if implemented naively # will cause SQLFluff to overflow the stack from recursive function calls. # To work around this, the a_expr grammar is reworked a bit into sub-grammars # that effectively provide tail recursion. Expression_A_Unary_Operator_Grammar=OneOf( # This grammar corresponds to the unary operator portion of the initial # recursive block on the Cockroach Labs a_expr grammar. It includes the # unary operator matching sub-block, but not the recursive call to a_expr. Ref( "SignedSegmentGrammar", exclude=Sequence(Ref("QualifiedNumericLiteralSegment")), ), Ref("TildeSegment"), Ref("NotOperatorGrammar"), # used in CONNECT BY clauses (EXASOL, Snowflake, Postgres...) "PRIOR", ), Tail_Recurse_Expression_A_Grammar=Sequence( # This should be used instead of a recursive call to Expression_A_Grammar # whenever the repeating element in Expression_A_Grammar makes a recursive # call to itself at the _end_. If it's in the middle then you still need # to recurse into Expression_A_Grammar normally. AnyNumberOf( Ref("Expression_A_Unary_Operator_Grammar"), terminators=[Ref("BinaryOperatorGrammar")], ), Ref("Expression_C_Grammar"), ), Expression_A_Grammar=Sequence( # Grammar always starts with optional unary operator, plus c_expr. This # section must always match the tail recurse grammar. Ref("Tail_Recurse_Expression_A_Grammar"), # As originally pictured in the diagram, the grammar then repeats itself # for any number of times with a loop. AnyNumberOf( OneOf( # This corresponds to the big repeating block in the diagram that # has like dozens and dozens of possibilities. Some of them are # recursive. If the item __ends__ with a recursive call to "a_expr", # use Ref("Tail_Recurse_Expression_A_Grammar") instead so that the # stack depth can be minimized. If the item has a recursive call # in the middle of the expression, you'll need to recurse # Expression_A_Grammar normally. # # We need to add a lot more here... Ref("LikeExpressionGrammar"), Sequence( Ref("BinaryOperatorGrammar"), Ref("Tail_Recurse_Expression_A_Grammar"), ), Ref("InOperatorGrammar"), Sequence( "IS", Ref.keyword("NOT", optional=True), Ref("IsClauseGrammar"), ), Ref("IsNullGrammar"), Ref("NotNullGrammar"), Ref("CollateGrammar"), Sequence( Ref.keyword("NOT", optional=True), "BETWEEN", Ref("Expression_B_Grammar"), "AND", Ref("Tail_Recurse_Expression_A_Grammar"), ), Sequence( Ref("PatternMatchingGrammar"), Ref("Expression_A_Grammar"), ), ) ), ), # Expression_B_Grammar: Does not directly feed into Expression_A_Grammar # but is used for a BETWEEN statement within Expression_A_Grammar. # https://www.cockroachlabs.com/docs/v20.2/sql-grammar.htm#b_expr # # We use a similar trick as seen with Expression_A_Grammar to avoid recursion # by using a tail recursion grammar. See the comments for a_expr to see how # that works. Expression_B_Unary_Operator_Grammar=OneOf( Ref( "SignedSegmentGrammar", exclude=Sequence(Ref("QualifiedNumericLiteralSegment")), ), Ref("TildeSegment"), ), Tail_Recurse_Expression_B_Grammar=Sequence( # Only safe to use if the recursive call is at the END of the repeating # element in the main b_expr portion AnyNumberOf(Ref("Expression_B_Unary_Operator_Grammar")), Ref("Expression_C_Grammar"), ), Expression_B_Grammar=Sequence( # Always start with tail recursion element! Ref("Tail_Recurse_Expression_B_Grammar"), AnyNumberOf( OneOf( Sequence( OneOf( Ref("ArithmeticBinaryOperatorGrammar"), Ref("StringBinaryOperatorGrammar"), Ref("ComparisonOperatorGrammar"), ), Ref("Tail_Recurse_Expression_B_Grammar"), ), # TODO: Add more things from b_expr here ), ), ), # Expression_C_Grammar # https://www.cockroachlabs.com/docs/v20.2/sql-grammar.htm#c_expr Expression_C_Grammar=OneOf( Sequence("EXISTS", Bracketed(Ref("SelectableGrammar"))), # should be first priority, otherwise EXISTS() would be matched as a function Sequence( OneOf( Ref("Expression_D_Grammar"), Ref("CaseExpressionSegment"), ), AnyNumberOf(Ref("TimeZoneGrammar"), optional=True), ), Ref("ShorthandCastSegment"), terminators=[Ref("CommaSegment")], ), Expression_D_Potential_Select_Statement_Without_Brackets=OneOf( Ref("SelectStatementSegment"), Ref("LiteralGrammar"), Ref("IntervalExpressionSegment"), Ref("TypedStructLiteralSegment"), Ref("ArrayExpressionSegment"), Ref("ColumnReferenceSegment"), Ref("OverlapsClauseSegment"), ), # Expression_D_Grammar # https://www.cockroachlabs.com/docs/v20.2/sql-grammar.htm#d_expr Expression_D_Grammar=Sequence( OneOf( Ref("BareFunctionSegment"), Ref("FunctionSegment"), Bracketed( OneOf( # We're using the expression segment here rather than the grammar so # that in the parsed structure we get nested elements. Ref("ExpressionSegment"), Ref("SelectableGrammar"), Delimited( Ref("LiteralGrammar"), # WHERE (a, 2) IN (SELECT b, c FROM ...) Ref( "ColumnReferenceSegment" ), # WHERE (a,b,c) IN (select a,b,c FROM...) Ref( "FunctionSegment" ), # WHERE (a, substr(b,1,3)) IN (select c,d FROM...) Ref("LocalAliasSegment"), # WHERE (LOCAL.a, LOCAL.b) IN (...) Ref( "ExpressionSegment" ), # SELECT (1*1, 2) IN (STRUCT(1 AS a, 2 AS b)); ), ), parse_mode=ParseMode.GREEDY, ), # Allow potential select statement without brackets Ref("Expression_D_Potential_Select_Statement_Without_Brackets"), # For triggers, we allow "NEW.*" but not just "*" nor "a.b.*" # So can't use WildcardIdentifierSegment nor WildcardExpressionSegment Sequence( Ref("SingleIdentifierGrammar"), Ref("ObjectReferenceDelimiterGrammar"), Ref("StarSegment"), ), Sequence( OneOf(Ref("StructTypeSegment"), Ref("MapTypeSegment")), Bracketed(Delimited(Ref("ExpressionSegment"))), ), Sequence( Ref("DatatypeSegment"), # Don't use the full LiteralGrammar here # because only some of them are applicable. # Notably we shouldn't use QualifiedNumericLiteralSegment # here because it looks like an arithmetic operation. OneOf( Ref("QuotedLiteralSegment"), Ref("NumericLiteralSegment"), Ref("BooleanLiteralGrammar"), Ref("NullLiteralSegment"), Ref("DateTimeLiteralGrammar"), ), ), Ref("LocalAliasSegment"), Ref("ListComprehensionGrammar"), terminators=[Ref("CommaSegment")], ), Ref("AccessorGrammar", optional=True), allow_gaps=True, ), AccessorGrammar=AnyNumberOf(Ref("ArrayAccessorSegment")), ) class EqualsSegment(CompositeComparisonOperatorSegment): """Equals operator.""" match_grammar: Matchable = Ref("RawEqualsSegment") class GreaterThanSegment(CompositeComparisonOperatorSegment): """Greater than operator.""" match_grammar: Matchable = Ref("RawGreaterThanSegment") class LessThanSegment(CompositeComparisonOperatorSegment): """Less than operator.""" match_grammar: Matchable = Ref("RawLessThanSegment") class GreaterThanOrEqualToSegment(CompositeComparisonOperatorSegment): """Greater than or equal to operator.""" match_grammar: Matchable = Sequence( Ref("RawGreaterThanSegment"), Ref("RawEqualsSegment"), allow_gaps=False ) class LessThanOrEqualToSegment(CompositeComparisonOperatorSegment): """Less than or equal to operator.""" match_grammar: Matchable = Sequence( Ref("RawLessThanSegment"), Ref("RawEqualsSegment"), allow_gaps=False ) class NotEqualToSegment(CompositeComparisonOperatorSegment): """Not equal to operator.""" match_grammar: Matchable = OneOf( Sequence(Ref("RawNotSegment"), Ref("RawEqualsSegment"), allow_gaps=False), Sequence( Ref("RawLessThanSegment"), Ref("RawGreaterThanSegment"), allow_gaps=False ), ) class ConcatSegment(CompositeBinaryOperatorSegment): """Concat operator.""" match_grammar: Matchable = Sequence( Ref("PipeSegment"), Ref("PipeSegment"), allow_gaps=False ) class BitwiseAndSegment(CompositeBinaryOperatorSegment): """Bitwise and operator.""" match_grammar: Matchable = Ref("AmpersandSegment") class BitwiseOrSegment(CompositeBinaryOperatorSegment): """Bitwise or operator.""" match_grammar: Matchable = Ref("PipeSegment") class BitwiseLShiftSegment(CompositeBinaryOperatorSegment): """Bitwise left-shift operator.""" match_grammar: Matchable = Sequence( Ref("RawLessThanSegment"), Ref("RawLessThanSegment"), allow_gaps=False ) class BitwiseRShiftSegment(CompositeBinaryOperatorSegment): """Bitwise right-shift operator.""" match_grammar: Matchable = Sequence( Ref("RawGreaterThanSegment"), Ref("RawGreaterThanSegment"), allow_gaps=False ) class ExpressionSegment(BaseSegment): """An expression, either arithmetic or boolean. NB: This is potentially VERY recursive and mostly uses the grammars above. This version also doesn't bound itself first, and so is potentially VERY SLOW. I don't really like this solution. We rely on elements of the expression to bound themselves rather than bounding at the expression level. Trying to bound the ExpressionSegment itself has been too unstable and not resilient enough to other bugs. """ type = "expression" match_grammar: Matchable = Ref("Expression_A_Grammar") class WhereClauseSegment(BaseSegment): """A `WHERE` clause like in `SELECT` or `INSERT`.""" type = "where_clause" match_grammar: Matchable = Sequence( "WHERE", # NOTE: The indent here is implicit to allow # constructions like: # # WHERE a # AND b # # to be valid without forcing an indent between # "WHERE" and "a". ImplicitIndent, OptionallyBracketed(Ref("ExpressionSegment")), Dedent, ) class OrderByClauseSegment(BaseSegment): """A `ORDER BY` clause like in `SELECT`.""" type = "orderby_clause" match_grammar: Matchable = Sequence( "ORDER", "BY", Indent, Delimited( Sequence( OneOf( Ref("ColumnReferenceSegment"), # Can `ORDER BY 1` Ref("NumericLiteralSegment"), # Can order by an expression Ref("ExpressionSegment"), ), OneOf("ASC", "DESC", optional=True), # NB: This isn't really ANSI, and isn't supported in Mysql, but # is supported in enough other dialects for it to make sense here # for now. Sequence("NULLS", OneOf("FIRST", "LAST"), optional=True), Ref("WithFillSegment", optional=True), ), terminators=["LIMIT", Ref("FrameClauseUnitGrammar")], ), Dedent, ) class RollupFunctionNameSegment(BaseSegment): """ROLLUP function name segment. Need to be able to specify this as type `function_name_identifier` within a `function_name` so that linting rules identify it properly. """ type = "function_name" match_grammar: Matchable = StringParser( "ROLLUP", CodeSegment, type="function_name_identifier", ) class CubeFunctionNameSegment(BaseSegment): """ROLLUP function name segment. Need to be able to specify this as type `function_name_identifier` within a `function_name` so that linting rules identify it properly. """ type = "function_name" match_grammar: Matchable = StringParser( "CUBE", CodeSegment, type="function_name_identifier", ) class GroupingSetsClauseSegment(BaseSegment): """`GROUPING SETS` clause within the `GROUP BY` clause.""" type = "grouping_sets_clause" match_grammar: Matchable = Sequence( "GROUPING", "SETS", Bracketed( Delimited( Ref("CubeRollupClauseSegment"), Ref("GroupingExpressionList"), ), ), ) class GroupingExpressionList(BaseSegment): """A `GROUP BY` clause expression list like in `ROLLUP`.""" type = "grouping_expression_list" match_grammar: Matchable = Sequence( Delimited( OneOf( Ref("ColumnReferenceSegment"), # Can `GROUP BY ROLLUP(1)` Ref("NumericLiteralSegment"), # Can `GROUP BY ROLLUP(coalesce(col, 1))` Ref("ExpressionSegment"), Bracketed(), # Allows empty parentheses ), terminators=[Ref("GroupByClauseTerminatorGrammar")], ), ) class CubeRollupClauseSegment(BaseSegment): """`CUBE` / `ROLLUP` clause within the `GROUP BY` clause.""" type = "cube_rollup_clause" match_grammar = Sequence( OneOf(Ref("CubeFunctionNameSegment"), Ref("RollupFunctionNameSegment")), Bracketed( Ref("GroupingExpressionList"), ), ) class GroupByClauseSegment(BaseSegment): """A `GROUP BY` clause like in `SELECT`.""" type = "groupby_clause" match_grammar: Matchable = Sequence( "GROUP", "BY", Indent, OneOf( "ALL", Ref("GroupingSetsClauseSegment"), Ref("CubeRollupClauseSegment"), # We could replace this next bit with a GroupingExpressionList # reference (renaming that to a more generic name), to avoid # repeating this bit of code, but I would rather keep it flat # to avoid changing regular `GROUP BY` clauses. Sequence( Delimited( OneOf( Ref("ColumnReferenceSegment"), # Can `GROUP BY 1` Ref("NumericLiteralSegment"), # Can `GROUP BY coalesce(col, 1)` Ref("ExpressionSegment"), ), terminators=[Ref("GroupByClauseTerminatorGrammar")], ), ), ), Dedent, ) class HavingClauseSegment(BaseSegment): """A `HAVING` clause like in `SELECT`.""" type = "having_clause" match_grammar: Matchable = Sequence( "HAVING", ImplicitIndent, OptionallyBracketed(Ref("ExpressionSegment")), Dedent, ) class LimitClauseSegment(BaseSegment): """A `LIMIT` clause like in `SELECT`.""" type = "limit_clause" match_grammar: Matchable = Sequence( "LIMIT", Indent, OptionallyBracketed( OneOf( # Allow a number by itself OR Ref("NumericLiteralSegment"), # An arbitrary expression Ref("ExpressionSegment"), "ALL", ) ), OneOf( Sequence( "OFFSET", OneOf( # Allow a number by itself OR Ref("NumericLiteralSegment"), # An arbitrary expression Ref("ExpressionSegment"), ), ), Sequence( Ref("CommaSegment"), Ref("NumericLiteralSegment"), ), optional=True, ), Dedent, ) class OverlapsClauseSegment(BaseSegment): """An `OVERLAPS` clause like in `SELECT.""" type = "overlaps_clause" match_grammar: Matchable = Sequence( "OVERLAPS", OneOf( Sequence( Bracketed( Ref("DateTimeLiteralGrammar"), Ref("CommaSegment"), Ref("DateTimeLiteralGrammar"), ) ), Ref("ColumnReferenceSegment"), ), ) class NamedWindowSegment(BaseSegment): """A WINDOW clause.""" type = "named_window" match_grammar: Matchable = Sequence( "WINDOW", Indent, Delimited( Ref("NamedWindowExpressionSegment"), ), Dedent, ) class FetchClauseSegment(BaseSegment): """A `FETCH` clause like in `SELECT.""" type = "fetch_clause" match_grammar: Matchable = Sequence( "FETCH", OneOf( "FIRST", "NEXT", ), OneOf( Ref("NumericLiteralSegment"), Ref("ExpressionSegment", exclude=Ref.keyword("ROW")), optional=True, ), OneOf("ROW", "ROWS"), OneOf("ONLY", Sequence("WITH", "TIES")), ) class OffsetClauseSegment(BaseSegment): """An `OFFSET` clause like in `SELECT`.""" type = "offset_clause" match_grammar: Matchable = Sequence( "OFFSET", OneOf( Ref("NumericLiteralSegment"), Ref("ExpressionSegment", exclude=Ref.keyword("ROW")), ), OneOf("ROW", "ROWS"), ) class NamedWindowExpressionSegment(BaseSegment): """Named window expression.""" type = "named_window_expression" match_grammar: Matchable = Sequence( Ref("SingleIdentifierGrammar"), # Window name "AS", OneOf( Ref("SingleIdentifierGrammar"), # Window name Bracketed( Ref("WindowSpecificationSegment"), parse_mode=ParseMode.GREEDY, ), ), ) class ValuesClauseSegment(BaseSegment): """A `VALUES` clause like in `INSERT`.""" type = "values_clause" match_grammar: Matchable = Sequence( OneOf("VALUE", "VALUES"), Delimited( Sequence( # MySQL uses `ROW` in it's value statement. # Currently SQLFluff doesn't differentiate between # Values statement: # https://dev.mysql.com/doc/refman/8.0/en/values.html # and Values() function (used in INSERT statements): # https://dev.mysql.com/doc/refman/8.0/en/miscellaneous-functions.html#function_values # TODO: split these out in future. Ref.keyword("ROW", optional=True), Bracketed( Delimited( "DEFAULT", Ref("LiteralGrammar"), Ref("ExpressionSegment"), ), parse_mode=ParseMode.GREEDY, ), ), ), ) class UnorderedSelectStatementSegment(BaseSegment): """A `SELECT` statement without any ORDER clauses or later. This is designed for use in the context of set operations, for other use cases, we should use the main SelectStatementSegment. """ type = "select_statement" match_grammar: Matchable = Sequence( Ref("SelectClauseSegment"), Ref("FromClauseSegment", optional=True), Ref("WhereClauseSegment", optional=True), Ref("GroupByClauseSegment", optional=True), Ref("HavingClauseSegment", optional=True), Ref("OverlapsClauseSegment", optional=True), Ref("NamedWindowSegment", optional=True), terminators=[ Ref("SetOperatorSegment"), Ref("WithNoSchemaBindingClauseSegment"), Ref("WithDataClauseSegment"), Ref("OrderByClauseSegment"), Ref("LimitClauseSegment"), ], parse_mode=ParseMode.GREEDY_ONCE_STARTED, ) class SelectStatementSegment(BaseSegment): """A `SELECT` statement.""" type = "select_statement" # Inherit most of the parse grammar from the unordered version. match_grammar = UnorderedSelectStatementSegment.match_grammar.copy( insert=[ Ref("OrderByClauseSegment", optional=True), Ref("OffsetClauseSegment", optional=True), Ref("FetchClauseSegment", optional=True), Ref("LimitClauseSegment", optional=True), Ref("NamedWindowSegment", optional=True), ], # Overwrite the terminators, because we want to remove some. replace_terminators=True, terminators=[ Ref("SetOperatorSegment"), Ref("WithNoSchemaBindingClauseSegment"), Ref("WithDataClauseSegment"), ], ) ansi_dialect.add( # Things that behave like select statements SelectableGrammar=OneOf( OptionallyBracketed(Ref("WithCompoundStatementSegment")), OptionallyBracketed(Ref("WithCompoundNonSelectStatementSegment")), Ref("NonWithSelectableGrammar"), Bracketed(Ref("SelectableGrammar")), ), # Things that behave like select statements, which can form part of with # expressions. NonWithSelectableGrammar=OneOf( Ref("SetExpressionSegment"), OptionallyBracketed(Ref("SelectStatementSegment")), Ref("NonSetSelectableGrammar"), ), # Things that do not behave like select statements, which can form part of with # expressions. NonWithNonSelectableGrammar=OneOf( Ref("UpdateStatementSegment"), Ref("InsertStatementSegment"), Ref("DeleteStatementSegment"), Ref("MergeStatementSegment"), ), # Things that behave like select statements, which can form part of set expressions. NonSetSelectableGrammar=OneOf( Ref("ValuesClauseSegment"), Ref("UnorderedSelectStatementSegment"), # If it's bracketed, we can have the full select statement here, # otherwise we can't because any order by clauses should belong # to the set expression. Bracketed(Ref("SelectStatementSegment")), Bracketed(Ref("WithCompoundStatementSegment")), Bracketed(Ref("NonSetSelectableGrammar")), Ref("BracketedSetExpressionGrammar"), ), # Added as part of `NonSetSelectableGrammar` where a nested `SetExpressionSegment` # could be used. Some dialects don't support an "ordered" set, but some may. BracketedSetExpressionGrammar=Bracketed(Ref("UnorderedSetExpressionSegment")), ) class CTEColumnList(BaseSegment): """Bracketed column list portion of a CTE definition.""" type = "cte_column_list" match_grammar = Bracketed( Ref("SingleIdentifierListSegment"), ) class CTEDefinitionSegment(BaseSegment): """A CTE Definition from a WITH statement. `tab (col1,col2) AS (SELECT a,b FROM x)` """ type = "common_table_expression" match_grammar: Matchable = Sequence( Ref("SingleIdentifierGrammar"), Ref("CTEColumnList", optional=True), Ref.keyword("AS", optional=True), Bracketed( # Ephemeral here to subdivide the query. Ref("SelectableGrammar"), parse_mode=ParseMode.GREEDY, ), ) def get_identifier(self) -> IdentifierSegment: """Get the identifier of this CTE. Note: it blindly gets the first identifier it finds which given the structure of a CTE definition is usually the right one. """ _identifier = self.get_child("identifier") # There will always be one, given the grammar above. assert _identifier return cast(IdentifierSegment, _identifier) class WithCompoundStatementSegment(BaseSegment): """A `SELECT` statement preceded by a selection of `WITH` clauses. `WITH tab (col1,col2) AS (SELECT a,b FROM x)` """ type = "with_compound_statement" # match grammar match_grammar: Matchable = Sequence( "WITH", Ref.keyword("RECURSIVE", optional=True), Conditional(Indent, indented_ctes=True), Delimited( Ref("CTEDefinitionSegment"), terminators=["SELECT"], allow_trailing=True, ), Conditional(Dedent, indented_ctes=True), Ref("NonWithSelectableGrammar"), ) class WithCompoundNonSelectStatementSegment(BaseSegment): """A `UPDATE/INSERT/DELETE/MERGE` statement preceded by `WITH` clauses. `WITH tab (col1,col2) AS (SELECT a,b FROM x)` """ type = "with_compound_statement" # match grammar match_grammar: Matchable = Sequence( "WITH", Ref.keyword("RECURSIVE", optional=True), Conditional(Indent, indented_ctes=True), Delimited( Ref("CTEDefinitionSegment"), terminators=["SELECT"], allow_trailing=True, ), Conditional(Dedent, indented_ctes=True), Ref("NonWithNonSelectableGrammar"), ) class SetOperatorSegment(BaseSegment): """A set operator such as Union, Minus, Except or Intersect.""" type = "set_operator" match_grammar: Matchable = OneOf( Ref("UnionGrammar"), Sequence( OneOf( "INTERSECT", "EXCEPT", ), Ref.keyword("ALL", optional=True), ), "MINUS", exclude=Sequence("EXCEPT", Bracketed(Anything())), ) class UnorderedSetExpressionSegment(BaseSegment): """A set expression with either Union, Minus, Except or Intersect.""" type = "set_expression" # match grammar match_grammar: Matchable = Sequence( Ref("NonSetSelectableGrammar"), AnyNumberOf( Sequence( Ref("SetOperatorSegment"), Ref("NonSetSelectableGrammar"), ), min_times=1, ), ) class SetExpressionSegment(BaseSegment): """A set expression with either Union, Minus, Except or Intersect.""" type = "set_expression" # match grammar match_grammar: Matchable = UnorderedSetExpressionSegment.match_grammar.copy( insert=[ Ref("OrderByClauseSegment", optional=True), Ref("LimitClauseSegment", optional=True), Ref("NamedWindowSegment", optional=True), ], ) class InsertStatementSegment(BaseSegment): """An `INSERT` statement.""" type = "insert_statement" match_grammar: Matchable = Sequence( "INSERT", # Maybe OVERWRITE is just snowflake? # (It's also Hive but that has full insert grammar implementation) Ref.keyword("OVERWRITE", optional=True), "INTO", Ref("TableReferenceSegment"), OneOf( # As SelectableGrammar can be bracketed too, the parse gets confused, # so we need slightly odd syntax here to allow those to parse (rather # than just add optional=True to BracketedColumnReferenceListGrammar). Ref("SelectableGrammar"), Sequence( Ref("BracketedColumnReferenceListGrammar"), Ref("SelectableGrammar"), ), # This is part of ANSI SQL since SQL-92 Ref("DefaultValuesGrammar"), ), ) class MergeStatementSegment(BaseSegment): """A `MERGE` statement.""" type = "merge_statement" match_grammar = Sequence( Ref("MergeIntoLiteralGrammar"), Indent, OneOf( Ref("TableReferenceSegment"), Ref("AliasedTableReferenceGrammar"), ), Dedent, "USING", Indent, OneOf( Ref("TableReferenceSegment"), Ref("AliasedTableReferenceGrammar"), Sequence( Bracketed( Ref("SelectableGrammar"), ), Ref("AliasExpressionSegment", optional=True), ), ), Dedent, Conditional(Indent, indented_using_on=True), Ref("JoinOnConditionSegment"), Conditional(Dedent, indented_using_on=True), Ref("MergeMatchSegment"), ) class MergeMatchSegment(BaseSegment): """Contains dialect specific merge operations. Hookpoint for dialect specific behavior e.g. UpdateClause / DeleteClause, multiple MergeMatchedClauses """ type = "merge_match" match_grammar: Matchable = AnyNumberOf( Ref("MergeMatchedClauseSegment"), Ref("MergeNotMatchedClauseSegment"), min_times=1, ) class MergeMatchedClauseSegment(BaseSegment): """The `WHEN MATCHED` clause within a `MERGE` statement.""" type = "merge_when_matched_clause" match_grammar: Matchable = Sequence( "WHEN", "MATCHED", Sequence("AND", Ref("ExpressionSegment"), optional=True), "THEN", Indent, OneOf( Ref("MergeUpdateClauseSegment"), Ref("MergeDeleteClauseSegment"), ), Dedent, ) class MergeNotMatchedClauseSegment(BaseSegment): """The `WHEN NOT MATCHED` clause within a `MERGE` statement.""" type = "merge_when_not_matched_clause" match_grammar: Matchable = Sequence( "WHEN", "NOT", "MATCHED", Sequence("AND", Ref("ExpressionSegment"), optional=True), "THEN", Indent, Ref("MergeInsertClauseSegment"), Dedent, ) class MergeUpdateClauseSegment(BaseSegment): """`UPDATE` clause within the `MERGE` statement.""" type = "merge_update_clause" match_grammar: Matchable = Sequence( "UPDATE", Indent, Ref("SetClauseListSegment"), Dedent, ) class MergeInsertClauseSegment(BaseSegment): """`INSERT` clause within the `MERGE` statement.""" type = "merge_insert_clause" match_grammar: Matchable = Sequence( "INSERT", Indent, Ref("BracketedColumnReferenceListGrammar", optional=True), Dedent, Ref("ValuesClauseSegment", optional=True), ) class MergeDeleteClauseSegment(BaseSegment): """`DELETE` clause within the `MERGE` statement.""" type = "merge_delete_clause" match_grammar: Matchable = Ref.keyword("DELETE") class TransactionStatementSegment(BaseSegment): """A `COMMIT`, `ROLLBACK` or `TRANSACTION` statement.""" type = "transaction_statement" match_grammar: Matchable = Sequence( # COMMIT [ WORK ] [ AND [ NO ] CHAIN ] # ROLLBACK [ WORK ] [ AND [ NO ] CHAIN ] # BEGIN | END TRANSACTION | WORK # NOTE: "TO SAVEPOINT" is not yet supported # https://docs.snowflake.com/en/sql-reference/sql/begin.html # https://www.postgresql.org/docs/current/sql-end.html OneOf("START", "BEGIN", "COMMIT", "ROLLBACK", "END"), OneOf("TRANSACTION", "WORK", optional=True), Sequence("NAME", Ref("SingleIdentifierGrammar"), optional=True), Sequence("AND", Ref.keyword("NO", optional=True), "CHAIN", optional=True), ) class ColumnConstraintSegment(BaseSegment): """A column option; each CREATE TABLE column can have 0 or more.""" type = "column_constraint_segment" # Column constraint from # https://www.postgresql.org/docs/12/sql-createtable.html match_grammar: Matchable = Sequence( Sequence( "CONSTRAINT", Ref("ObjectReferenceSegment"), # Constraint name optional=True, ), OneOf( Sequence(Ref.keyword("NOT", optional=True), "NULL"), # NOT NULL or NULL Sequence("CHECK", Bracketed(Ref("ExpressionSegment"))), Sequence( # DEFAULT "DEFAULT", Ref("ColumnConstraintDefaultGrammar"), ), Sequence( Ref("PrimaryKeyGrammar"), Ref("NotEnforcedGrammar", optional=True) ), Ref("UniqueKeyGrammar"), # UNIQUE Ref("AutoIncrementGrammar"), Sequence( Ref("ReferenceDefinitionGrammar"), Ref("NotEnforcedGrammar", optional=True), ), # REFERENCES reftable [ ( refcolumn) ]x Ref("CommentClauseSegment"), Sequence( "COLLATE", Ref("CollationReferenceSegment") ), # https://www.sqlite.org/datatype3.html#collation ), ) class ColumnDefinitionSegment(BaseSegment): """A column definition, e.g. for CREATE TABLE or ALTER TABLE.""" type = "column_definition" match_grammar: Matchable = Sequence( Ref("SingleIdentifierGrammar"), # Column name Ref("DatatypeSegment"), # Column type Bracketed(Anything(), optional=True), # For types like VARCHAR(100) AnyNumberOf( Ref("ColumnConstraintSegment", optional=True), ), ) class IndexColumnDefinitionSegment(BaseSegment): """A column definition for CREATE INDEX.""" type = "index_column_definition" match_grammar: Matchable = Sequence( Ref("SingleIdentifierGrammar"), # Column name OneOf("ASC", "DESC", optional=True), ) class TableConstraintSegment(BaseSegment): """A table constraint, e.g. for CREATE TABLE.""" type = "table_constraint" # Later add support for CHECK constraint, others? # e.g. CONSTRAINT constraint_1 PRIMARY KEY(column_1) match_grammar: Matchable = Sequence( Sequence( # [ CONSTRAINT ] "CONSTRAINT", Ref("ObjectReferenceSegment"), optional=True ), OneOf( Sequence( # UNIQUE ( column_name [, ... ] ) "UNIQUE", Ref("BracketedColumnReferenceListGrammar"), # Later add support for index_parameters? ), Sequence( # PRIMARY KEY ( column_name [, ... ] ) index_parameters Ref("PrimaryKeyGrammar"), # Columns making up PRIMARY KEY constraint Ref("BracketedColumnReferenceListGrammar"), # Later add support for index_parameters? ), Sequence( # FOREIGN KEY ( column_name [, ... ] ) # REFERENCES reftable [ ( refcolumn [, ... ] ) ] Ref("ForeignKeyGrammar"), # Local columns making up FOREIGN KEY constraint Ref("BracketedColumnReferenceListGrammar"), Ref( "ReferenceDefinitionGrammar" ), # REFERENCES reftable [ ( refcolumn) ] ), ), ) class TableEndClauseSegment(BaseSegment): """Allow for additional table endings. (like WITHOUT ROWID for SQLite) """ type = "table_end_clause_segment" match_grammar: Matchable = Nothing() class ArrayExpressionSegment(BaseSegment): """Expression to construct a ARRAY from a subquery. (Yes in BigQuery for example) NOTE: This differs from an array _literal_ in that it takes the form of an expression. """ type = "array_expression" match_grammar: Matchable = Nothing() class CreateTableStatementSegment(BaseSegment): """A `CREATE TABLE` statement.""" type = "create_table_statement" # https://crate.io/docs/sql-99/en/latest/chapters/18.html # https://www.postgresql.org/docs/12/sql-createtable.html match_grammar: Matchable = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Ref("TemporaryTransientGrammar", optional=True), "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), OneOf( # Columns and comment syntax: Sequence( Bracketed( Delimited( OneOf( Ref("TableConstraintSegment"), Ref("ColumnDefinitionSegment"), ), ) ), Ref("CommentClauseSegment", optional=True), ), # Create AS syntax: Sequence( "AS", OptionallyBracketed(Ref("SelectableGrammar")), ), # Create like syntax Sequence("LIKE", Ref("TableReferenceSegment")), ), Ref("TableEndClauseSegment", optional=True), ) class CommentClauseSegment(BaseSegment): """A comment clause. e.g. COMMENT 'view/table/column description' """ type = "comment_clause" match_grammar: Matchable = Sequence("COMMENT", Ref("QuotedLiteralSegment")) class CreateSchemaStatementSegment(BaseSegment): """A `CREATE SCHEMA` statement.""" type = "create_schema_statement" match_grammar: Matchable = Sequence( "CREATE", "SCHEMA", Ref("IfNotExistsGrammar", optional=True), Ref("SchemaReferenceSegment"), ) class SetSchemaStatementSegment(BaseSegment): """A `SET SCHEMA` statement.""" type = "set_schema_statement" match_grammar: Matchable = Sequence( "SET", "SCHEMA", Ref("IfNotExistsGrammar", optional=True), Ref("SchemaReferenceSegment"), ) class DropSchemaStatementSegment(BaseSegment): """A `DROP SCHEMA` statement.""" type = "drop_schema_statement" match_grammar: Matchable = Sequence( "DROP", "SCHEMA", Ref("IfExistsGrammar", optional=True), Ref("SchemaReferenceSegment"), Ref("DropBehaviorGrammar", optional=True), ) class DropTypeStatementSegment(BaseSegment): """A `DROP TYPE` statement.""" type = "drop_type_statement" match_grammar: Matchable = Sequence( "DROP", "TYPE", Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), Ref("DropBehaviorGrammar", optional=True), ) class CreateDatabaseStatementSegment(BaseSegment): """A `CREATE DATABASE` statement.""" type = "create_database_statement" match_grammar: Matchable = Sequence( "CREATE", "DATABASE", Ref("IfNotExistsGrammar", optional=True), Ref("DatabaseReferenceSegment"), ) class DropDatabaseStatementSegment(BaseSegment): """A `DROP DATABASE` statement.""" type = "drop_database_statement" match_grammar: Matchable = Sequence( "DROP", "DATABASE", Ref("IfExistsGrammar", optional=True), Ref("DatabaseReferenceSegment"), Ref("DropBehaviorGrammar", optional=True), ) class CreateIndexStatementSegment(BaseSegment): """A `CREATE INDEX` statement.""" type = "create_index_statement" match_grammar: Matchable = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Ref.keyword("UNIQUE", optional=True), "INDEX", Ref("IfNotExistsGrammar", optional=True), Ref("IndexReferenceSegment"), "ON", Ref("TableReferenceSegment"), Sequence( Bracketed( Delimited( Ref("IndexColumnDefinitionSegment"), ), ) ), ) class AlterTableStatementSegment(BaseSegment): """An `ALTER TABLE` statement.""" type = "alter_table_statement" # Based loosely on: # https://dev.mysql.com/doc/refman/8.0/en/alter-table.html # TODO: Flesh this out with more detail. match_grammar: Matchable = Sequence( "ALTER", "TABLE", Ref("TableReferenceSegment"), Delimited( Ref("AlterTableOptionsGrammar"), ), ) class CreateViewStatementSegment(BaseSegment): """A `CREATE VIEW` statement.""" type = "create_view_statement" # https://crate.io/docs/sql-99/en/latest/chapters/18.html#create-view-statement # https://dev.mysql.com/doc/refman/8.0/en/create-view.html # https://www.postgresql.org/docs/12/sql-createview.html match_grammar: Matchable = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "VIEW", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), # Optional list of column names Ref("BracketedColumnReferenceListGrammar", optional=True), "AS", OptionallyBracketed(Ref("SelectableGrammar")), Ref("WithNoSchemaBindingClauseSegment", optional=True), ) class DropTableStatementSegment(BaseSegment): """A `DROP TABLE` statement.""" type = "drop_table_statement" match_grammar: Matchable = Sequence( "DROP", Ref("TemporaryGrammar", optional=True), "TABLE", Ref("IfExistsGrammar", optional=True), Delimited(Ref("TableReferenceSegment")), Ref("DropBehaviorGrammar", optional=True), ) class DropViewStatementSegment(BaseSegment): """A `DROP VIEW` statement.""" type = "drop_view_statement" match_grammar: Matchable = Sequence( "DROP", "VIEW", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), Ref("DropBehaviorGrammar", optional=True), ) class DropUserStatementSegment(BaseSegment): """A `DROP USER` statement.""" type = "drop_user_statement" match_grammar: Matchable = Sequence( "DROP", "USER", Ref("IfExistsGrammar", optional=True), Ref("RoleReferenceSegment"), ) class TruncateStatementSegment(BaseSegment): """`TRUNCATE TABLE` statement.""" type = "truncate_table" match_grammar: Matchable = Sequence( "TRUNCATE", Ref.keyword("TABLE", optional=True), Ref("TableReferenceSegment"), ) class DropIndexStatementSegment(BaseSegment): """A `DROP INDEX` statement.""" type = "drop_index_statement" # DROP INDEX [IF EXISTS] {RESTRICT | CASCADE} match_grammar: Matchable = Sequence( "DROP", "INDEX", Ref("IfExistsGrammar", optional=True), Ref("IndexReferenceSegment"), Ref("DropBehaviorGrammar", optional=True), ) class AccessStatementSegment(BaseSegment): """A `GRANT` or `REVOKE` statement. In order to help reduce code duplication we decided to implement other dialect specific grants (like Snowflake) here too which will help with maintainability. We also note that this causes the grammar to be less "correct", but the benefits outweigh the con in our opinion. Grant specific information: * https://www.postgresql.org/docs/9.0/sql-grant.html * https://docs.snowflake.com/en/sql-reference/sql/grant-privilege.html Revoke specific information: * https://www.postgresql.org/docs/9.0/sql-revoke.html * https://docs.snowflake.com/en/sql-reference/sql/revoke-role.html * https://docs.snowflake.com/en/sql-reference/sql/revoke-privilege.html * https://docs.snowflake.com/en/sql-reference/sql/revoke-privilege-share.html """ type = "access_statement" # Privileges that can be set on the account (specific to snowflake) _global_permissions = OneOf( Sequence( "CREATE", OneOf( "ROLE", "USER", "WAREHOUSE", "DATABASE", "INTEGRATION", ), ), Sequence("APPLY", "MASKING", "POLICY"), Sequence("EXECUTE", "TASK"), Sequence("MANAGE", "GRANTS"), Sequence("MONITOR", OneOf("EXECUTION", "USAGE")), ) _schema_object_names = [ "TABLE", "VIEW", "STAGE", "FUNCTION", "PROCEDURE", "ROUTINE", "SEQUENCE", "STREAM", "TASK", ] _schema_object_types = OneOf( *_schema_object_names, Sequence("MATERIALIZED", "VIEW"), Sequence("EXTERNAL", "TABLE"), Sequence("FILE", "FORMAT"), ) # We reuse the object names above and simply append an `S` to the end of them to get # plurals _schema_object_types_plural = OneOf( *[f"{object_name}S" for object_name in _schema_object_names] ) _permissions = Sequence( OneOf( Sequence( "CREATE", OneOf( "SCHEMA", Sequence("MASKING", "POLICY"), "PIPE", _schema_object_types, ), ), Sequence("IMPORTED", "PRIVILEGES"), "APPLY", "CONNECT", "CREATE", "DELETE", "EXECUTE", "INSERT", "MODIFY", "MONITOR", "OPERATE", "OWNERSHIP", "READ", "REFERENCE_USAGE", "REFERENCES", "SELECT", "TEMP", "TEMPORARY", "TRIGGER", "TRUNCATE", "UPDATE", "USAGE", "USE_ANY_ROLE", "WRITE", Sequence("ALL", Ref.keyword("PRIVILEGES", optional=True)), ), Ref("BracketedColumnReferenceListGrammar", optional=True), ) # All of the object types that we can grant permissions on. # This list will contain ansi sql objects as well as dialect specific ones. _objects = OneOf( "ACCOUNT", Sequence( OneOf( Sequence("RESOURCE", "MONITOR"), "WAREHOUSE", "DATABASE", "DOMAIN", "INTEGRATION", "LANGUAGE", "SCHEMA", "ROLE", "TABLESPACE", "TYPE", Sequence( "FOREIGN", OneOf("SERVER", Sequence("DATA", "WRAPPER")), ), Sequence("ALL", "SCHEMAS", "IN", "DATABASE"), Sequence("FUTURE", "SCHEMAS", "IN", "DATABASE"), _schema_object_types, Sequence("ALL", _schema_object_types_plural, "IN", "SCHEMA"), Sequence( "FUTURE", _schema_object_types_plural, "IN", OneOf("DATABASE", "SCHEMA"), ), optional=True, ), Delimited( Ref("ObjectReferenceSegment"), Sequence( Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar", optional=True), ), Ref("WildcardIdentifierSegment"), terminators=["TO", "FROM"], ), ), Sequence("LARGE", "OBJECT", Ref("NumericLiteralSegment")), ) match_grammar: Matchable = OneOf( # Based on https://www.postgresql.org/docs/13/sql-grant.html # and https://docs.snowflake.com/en/sql-reference/sql/grant-privilege.html Sequence( "GRANT", OneOf( Sequence( Delimited( OneOf(_global_permissions, _permissions), terminators=["ON"], ), "ON", _objects, ), Sequence("ROLE", Ref("ObjectReferenceSegment")), Sequence("OWNERSHIP", "ON", "USER", Ref("ObjectReferenceSegment")), # In the case where a role is granted non-explicitly, # e.g. GRANT ROLE_NAME TO OTHER_ROLE_NAME # See https://www.postgresql.org/docs/current/sql-grant.html Ref("ObjectReferenceSegment"), ), "TO", OneOf("GROUP", "USER", "ROLE", "SHARE", optional=True), Delimited( OneOf(Ref("RoleReferenceSegment"), Ref("FunctionSegment"), "PUBLIC"), ), OneOf( Sequence("WITH", "GRANT", "OPTION"), Sequence("WITH", "ADMIN", "OPTION"), Sequence("COPY", "CURRENT", "GRANTS"), optional=True, ), Sequence( "GRANTED", "BY", OneOf( "CURRENT_USER", "SESSION_USER", Ref("ObjectReferenceSegment"), ), optional=True, ), ), # Based on https://www.postgresql.org/docs/12/sql-revoke.html Sequence( "REVOKE", Sequence("GRANT", "OPTION", "FOR", optional=True), OneOf( Sequence( Delimited( OneOf(_global_permissions, _permissions), terminators=["ON"], ), "ON", _objects, ), Sequence("ROLE", Ref("ObjectReferenceSegment")), Sequence("OWNERSHIP", "ON", "USER", Ref("ObjectReferenceSegment")), Ref("ObjectReferenceSegment"), ), "FROM", OneOf("GROUP", "USER", "ROLE", "SHARE", optional=True), Delimited( Ref("ObjectReferenceSegment"), ), Ref("DropBehaviorGrammar", optional=True), ), ) class DeleteStatementSegment(BaseSegment): """A `DELETE` statement. DELETE FROM [ WHERE ] """ type = "delete_statement" # match grammar. This one makes sense in the context of knowing that it's # definitely a statement, we just don't know what type yet. match_grammar: Matchable = Sequence( "DELETE", Ref("FromClauseSegment"), Ref("WhereClauseSegment", optional=True), ) class UpdateStatementSegment(BaseSegment): """An `Update` statement. UPDATE
SET [ WHERE ] """ type = "update_statement" match_grammar: Matchable = Sequence( "UPDATE", Indent, Ref("TableReferenceSegment"), # SET is not a reserved word in all dialects (e.g. RedShift) # So specifically exclude as an allowed implicit alias to avoid parsing errors Ref("AliasExpressionSegment", exclude=Ref.keyword("SET"), optional=True), Dedent, Ref("SetClauseListSegment"), Ref("FromClauseSegment", optional=True), Ref("WhereClauseSegment", optional=True), ) class SetClauseListSegment(BaseSegment): """SQL 1992 set clause list. ::= [ { }... ] ::= ::= | | DEFAULT ::= """ type = "set_clause_list" match_grammar: Matchable = Sequence( "SET", Indent, Delimited(Ref("SetClauseSegment")), Dedent, ) class SetClauseSegment(BaseSegment): """SQL 1992 set clause. ::= ::= | | DEFAULT ::= """ type = "set_clause" match_grammar: Matchable = Sequence( Ref("ColumnReferenceSegment"), Ref("EqualsSegment"), OneOf( Ref("LiteralGrammar"), Ref("BareFunctionSegment"), Ref("FunctionSegment"), Ref("ColumnReferenceSegment"), Ref("ExpressionSegment"), Ref("ValuesClauseSegment"), "DEFAULT", ), ) class CreateCastStatementSegment(BaseSegment): """A `CREATE CAST` statement. https://jakewheat.github.io/sql-overview/sql-2016-foundation-grammar.html#_11_63_user_defined_cast_definition """ type = "create_cast_statement" match_grammar: Matchable = Sequence( "CREATE", "CAST", Bracketed( Ref("DatatypeSegment"), "AS", Ref("DatatypeSegment"), ), "WITH", Ref.keyword("SPECIFIC", optional=True), OneOf( "ROUTINE", "FUNCTION", "PROCEDURE", Sequence( OneOf("INSTANCE", "STATIC", "CONSTRUCTOR", optional=True), "METHOD", ), ), Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar", optional=True), Sequence("FOR", Ref("ObjectReferenceSegment"), optional=True), Sequence("AS", "ASSIGNMENT", optional=True), ) class DropCastStatementSegment(BaseSegment): """A `DROP CAST` statement. https://jakewheat.github.io/sql-overview/sql-2016-foundation-grammar.html#_11_64_drop_user_defined_cast_statement """ type = "drop_cast_statement" match_grammar: Matchable = Sequence( "DROP", "CAST", Bracketed( Ref("DatatypeSegment"), "AS", Ref("DatatypeSegment"), ), Ref("DropBehaviorGrammar", optional=True), ) class FunctionDefinitionGrammar(BaseSegment): """This is the body of a `CREATE FUNCTION AS` statement.""" type = "function_definition" match_grammar: Matchable = Sequence( "AS", Ref("QuotedLiteralSegment"), Sequence( "LANGUAGE", Ref("NakedIdentifierSegment"), optional=True, ), ) class CreateFunctionStatementSegment(BaseSegment): """A `CREATE FUNCTION` statement. This version in the ANSI dialect should be a "common subset" of the structure of the code for those dialects. postgres: https://www.postgresql.org/docs/9.1/sql-createfunction.html snowflake: https://docs.snowflake.com/en/sql-reference/sql/create-function.html bigquery: https://cloud.google.com/bigquery/docs/reference/standard-sql/user-defined-functions """ type = "create_function_statement" match_grammar: Matchable = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Ref("TemporaryGrammar", optional=True), "FUNCTION", Ref("IfNotExistsGrammar", optional=True), Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar"), Sequence( # Optional function return type "RETURNS", Ref("DatatypeSegment"), optional=True, ), Ref("FunctionDefinitionGrammar"), ) class FunctionParameterListGrammar(BaseSegment): """The parameters for a function ie. `(string, number)`.""" type = "function_parameter_list" # Function parameter list match_grammar: Matchable = Bracketed( Delimited( Ref("FunctionParameterGrammar"), optional=True, ), ) class DropFunctionStatementSegment(BaseSegment): """A `DROP FUNCTION` statement.""" type = "drop_function_statement" match_grammar = Sequence( "DROP", "FUNCTION", Ref("IfExistsGrammar", optional=True), Ref("FunctionNameSegment"), ) class CreateModelStatementSegment(BaseSegment): """A BigQuery `CREATE MODEL` statement.""" type = "create_model_statement" # https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-create match_grammar: Matchable = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "MODEL", Ref("IfNotExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), Sequence( "OPTIONS", Bracketed( Delimited( Sequence( Ref("ParameterNameSegment"), Ref("EqualsSegment"), OneOf( # This covers many but not all the extensive list of # possible 'CREATE MODEL' options. Ref("LiteralGrammar"), # Single value Bracketed( # E.g. input_label_cols: list of column names Delimited(Ref("QuotedLiteralSegment")), bracket_type="square", optional=True, ), ), ), ) ), optional=True, ), "AS", Ref("SelectableGrammar"), ) class CreateUserStatementSegment(BaseSegment): """A `CREATE USER` statement. A very simple create user syntax which can be extended by other dialects. """ type = "create_user_statement" match_grammar: Matchable = Sequence( "CREATE", "USER", Ref("RoleReferenceSegment"), ) class CreateRoleStatementSegment(BaseSegment): """A `CREATE ROLE` statement. A very simple create role syntax which can be extended by other dialects. """ type = "create_role_statement" match_grammar: Matchable = Sequence( "CREATE", "ROLE", Ref("IfNotExistsGrammar", optional=True), Ref("RoleReferenceSegment"), ) class DropRoleStatementSegment(BaseSegment): """A `DROP ROLE` statement with CASCADE option.""" type = "drop_role_statement" match_grammar = Sequence( "DROP", "ROLE", Ref("IfExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), ) class DropModelStatementSegment(BaseSegment): """A `DROP MODEL` statement.""" type = "drop_MODELstatement" # DROP MODEL [IF EXISTS} # https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-drop-model match_grammar: Matchable = Sequence( "DROP", "MODEL", Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), ) class MLTableExpressionSegment(BaseSegment): """An ML table expression.""" type = "ml_table_expression" # E.g. ML.WEIGHTS(MODEL `project.dataset.model`) match_grammar: Matchable = Sequence( "ML", Ref("DotSegment"), Ref("SingleIdentifierGrammar"), Bracketed( Sequence("MODEL", Ref("ObjectReferenceSegment")), Sequence( Ref("CommaSegment"), Bracketed( Ref("SelectableGrammar"), ), optional=True, ), ), ) class StatementSegment(BaseSegment): """A generic segment, to any of its child subsegments.""" type = "statement" match_grammar: Matchable = OneOf( Ref("SelectableGrammar"), Ref("MergeStatementSegment"), Ref("InsertStatementSegment"), Ref("TransactionStatementSegment"), Ref("DropTableStatementSegment"), Ref("DropViewStatementSegment"), Ref("CreateUserStatementSegment"), Ref("DropUserStatementSegment"), Ref("TruncateStatementSegment"), Ref("AccessStatementSegment"), Ref("CreateTableStatementSegment"), Ref("CreateRoleStatementSegment"), Ref("DropRoleStatementSegment"), Ref("AlterTableStatementSegment"), Ref("CreateSchemaStatementSegment"), Ref("SetSchemaStatementSegment"), Ref("DropSchemaStatementSegment"), Ref("DropTypeStatementSegment"), Ref("CreateDatabaseStatementSegment"), Ref("DropDatabaseStatementSegment"), Ref("CreateIndexStatementSegment"), Ref("DropIndexStatementSegment"), Ref("CreateViewStatementSegment"), Ref("DeleteStatementSegment"), Ref("UpdateStatementSegment"), Ref("CreateCastStatementSegment"), Ref("DropCastStatementSegment"), Ref("CreateFunctionStatementSegment"), Ref("DropFunctionStatementSegment"), Ref("CreateModelStatementSegment"), Ref("DropModelStatementSegment"), Ref("DescribeStatementSegment"), Ref("UseStatementSegment"), Ref("ExplainStatementSegment"), Ref("CreateSequenceStatementSegment"), Ref("AlterSequenceStatementSegment"), Ref("DropSequenceStatementSegment"), Ref("CreateTriggerStatementSegment"), Ref("DropTriggerStatementSegment"), terminators=[Ref("DelimiterGrammar")], ) def get_table_references(self) -> set[str]: """Use parsed tree to extract table references.""" table_refs = { tbl_ref.raw for tbl_ref in self.recursive_crawl("table_reference") } cte_refs = { cast(CTEDefinitionSegment, cte_def).get_identifier().raw for cte_def in self.recursive_crawl("common_table_expression") } # External references are any table references which aren't # also cte aliases. return table_refs - cte_refs class WithNoSchemaBindingClauseSegment(BaseSegment): """WITH NO SCHEMA BINDING clause for Redshift's Late Binding Views. https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_VIEW.html """ type = "with_no_schema_binding_clause" match_grammar: Matchable = Sequence( "WITH", "NO", "SCHEMA", "BINDING", ) class WithDataClauseSegment(BaseSegment): """WITH [NO] DATA clause for Postgres' MATERIALIZED VIEWS. https://www.postgresql.org/docs/9.3/sql-creatematerializedview.html """ type = "with_data_clause" match_grammar: Matchable = Sequence("WITH", Sequence("NO", optional=True), "DATA") class DescribeStatementSegment(BaseSegment): """A `Describe` statement. DESCRIBE """ type = "describe_statement" match_grammar: Matchable = Sequence( "DESCRIBE", Ref("NakedIdentifierSegment"), Ref("ObjectReferenceSegment"), ) class UseStatementSegment(BaseSegment): """A `USE` statement.""" type = "use_statement" match_grammar: Matchable = Sequence( "USE", Ref("DatabaseReferenceSegment"), ) class ExplainStatementSegment(BaseSegment): """An `Explain` statement. EXPLAIN explainable_stmt """ type = "explain_statement" explainable_stmt: Matchable = OneOf( Ref("SelectableGrammar"), Ref("InsertStatementSegment"), Ref("UpdateStatementSegment"), Ref("DeleteStatementSegment"), ) match_grammar: Matchable = Sequence( "EXPLAIN", explainable_stmt, ) class CreateSequenceOptionsSegment(BaseSegment): """Options for Create Sequence statement. https://docs.oracle.com/cd/B19306_01/server.102/b14200/statements_6015.htm """ type = "create_sequence_options_segment" match_grammar: Matchable = OneOf( Sequence("INCREMENT", "BY", Ref("NumericLiteralSegment")), Sequence( "START", Ref.keyword("WITH", optional=True), Ref("NumericLiteralSegment") ), Ref("SequenceMinValueGrammar"), Ref("SequenceMaxValueGrammar"), OneOf(Sequence("CACHE", Ref("NumericLiteralSegment")), "NOCACHE"), OneOf("CYCLE", "NOCYCLE"), Ref("OrderNoOrderGrammar"), ) class CreateSequenceStatementSegment(BaseSegment): """Create Sequence statement. https://docs.oracle.com/cd/B19306_01/server.102/b14200/statements_6015.htm """ type = "create_sequence_statement" match_grammar: Matchable = Sequence( "CREATE", "SEQUENCE", Ref("SequenceReferenceSegment"), AnyNumberOf(Ref("CreateSequenceOptionsSegment"), optional=True), ) class AlterSequenceOptionsSegment(BaseSegment): """Options for Alter Sequence statement. https://docs.oracle.com/cd/B19306_01/server.102/b14200/statements_2011.htm """ type = "alter_sequence_options_segment" match_grammar: Matchable = OneOf( Sequence("INCREMENT", "BY", Ref("NumericLiteralSegment")), Ref("SequenceMinValueGrammar"), Ref("SequenceMaxValueGrammar"), OneOf(Sequence("CACHE", Ref("NumericLiteralSegment")), "NOCACHE"), OneOf("CYCLE", "NOCYCLE"), Ref("OrderNoOrderGrammar"), ) class AlterSequenceStatementSegment(BaseSegment): """Alter Sequence Statement. https://docs.oracle.com/cd/B19306_01/server.102/b14200/statements_2011.htm """ type = "alter_sequence_statement" match_grammar: Matchable = Sequence( "ALTER", "SEQUENCE", Ref("SequenceReferenceSegment"), AnyNumberOf(Ref("AlterSequenceOptionsSegment")), ) class DropSequenceStatementSegment(BaseSegment): """Drop Sequence Statement. https://docs.oracle.com/cd/E11882_01/server.112/e41084/statements_9001.htm """ type = "drop_sequence_statement" match_grammar: Matchable = Sequence( "DROP", "SEQUENCE", Ref("SequenceReferenceSegment") ) class DatePartFunctionNameSegment(BaseSegment): """DATEADD function name segment. Need to be able to specify this as type function_name so that linting rules identify it properly """ type = "function_name" match_grammar: Matchable = Ref("DatePartFunctionName") class CreateTriggerStatementSegment(BaseSegment): """Create Trigger Statement. https://www.postgresql.org/docs/14/sql-createtrigger.html Edited as per notes in above - what doesn't match ANSI """ type = "create_trigger" match_grammar: Matchable = Sequence( "CREATE", "TRIGGER", Ref("TriggerReferenceSegment"), OneOf("BEFORE", "AFTER", Sequence("INSTEAD", "OF"), optional=True), Delimited( "INSERT", "DELETE", Sequence( "UPDATE", "OF", Delimited( Ref("ColumnReferenceSegment"), terminators=["OR", "ON"], ), ), delimiter="OR", terminators=["ON"], ), "ON", Ref("TableReferenceSegment"), AnyNumberOf( Sequence( "REFERENCING", "OLD", "ROW", "AS", Ref("ParameterNameSegment"), "NEW", "ROW", "AS", Ref("ParameterNameSegment"), ), Sequence("FROM", Ref("TableReferenceSegment")), OneOf( Sequence("NOT", "DEFERRABLE"), Sequence( Ref.keyword("DEFERRABLE", optional=True), OneOf( Sequence("INITIALLY", "IMMEDIATE"), Sequence("INITIALLY", "DEFERRED"), ), ), ), Sequence( "FOR", Ref.keyword("EACH", optional=True), OneOf("ROW", "STATEMENT") ), Sequence("WHEN", Bracketed(Ref("ExpressionSegment"))), ), Sequence( "EXECUTE", "PROCEDURE", Ref("FunctionNameIdentifierSegment"), Ref("FunctionContentsSegment"), optional=True, ), ) class DropTriggerStatementSegment(BaseSegment): """Drop Trigger Statement. Taken from specification in https://www.postgresql.org/docs/14/sql-droptrigger.html Edited as per notes in above - what doesn't match ANSI """ type = "drop_trigger" match_grammar: Matchable = Sequence( "DROP", "TRIGGER", Ref("IfExistsGrammar", optional=True), Ref("TriggerReferenceSegment"), ) class SamplingExpressionSegment(BaseSegment): """A sampling expression.""" type = "sample_expression" match_grammar: Matchable = Sequence( "TABLESAMPLE", OneOf("BERNOULLI", "SYSTEM"), Bracketed(Ref("NumericLiteralSegment")), Sequence( OneOf("REPEATABLE"), Bracketed(Ref("NumericLiteralSegment")), optional=True, ), ) class TemporalQuerySegment(BaseSegment): """A segment that allows Temporal Queries to be run. https://learn.microsoft.com/en-us/sql/relational-databases/tables/temporal-tables """ type = "temporal_query" match_grammar: Matchable = Nothing() class LocalAliasSegment(BaseSegment): """The `LOCAL.ALIAS` syntax allows to use an alias name of a column within clauses. A hookpoint for other dialects e.g. Exasol. """ type = "local_alias_segment" match_grammar: Matchable = Nothing() class PathSegment(BaseSegment): """A reference to a path.""" type = "path_segment" match_grammar: Matchable = OneOf( Sequence( Ref("SlashSegment"), Delimited( TypedParser("word", WordSegment, type="path_segment"), delimiter=Ref("SlashSegment"), allow_gaps=False, ), ), Ref("QuotedLiteralSegment"), ) class WithFillSegment(BaseSegment): """Prefix for WITH FILL clause. A hookpoint for other dialects e.g. ClickHouse. """ type = "with_fill" match_grammar: Matchable = Nothing() sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_ansi_keywords.py000066400000000000000000000172311503426445100244730ustar00rootroot00000000000000"""A list of all SQL key words.""" ansi_reserved_keywords = """SELECT JOIN ON USING CROSS INNER LEFT RIGHT OUTER INTERVAL CASE FULL NOT NULL UNION IGNORE RESPECT PARTITION ORDER ROWS SET NATURAL """ ansi_unreserved_keywords = """ABORT ABS ABSOLUTE ACCESS ACCOUNT ACCOUNTS ACTION ADA ADD ADMIN AFTER AGGREGATE ALIAS ALL ALLOCATE ALSO ALTER ALWAYS ANALYSE ANALYZE AND ANY APPLY ARE ARRAY AS ASC ASENSITIVE ASSERTION ASSIGNMENT ASYMMETRIC AT ATOMIC ATTRIBUTE ATTRIBUTES AUDIT AUTHORIZATION AUTO_INCREMENT AVG AVG_ROW_LENGTH BACKUP BACKWARD BEFORE BEGIN BERNOULLI BETWEEN BIGINT BINARY BINDING BIT BIT_LENGTH BITVAR BLOB BOOL BOOLEAN BOTH BREADTH BREAK BROWSE BULK BY CACHE CALL CALLED CARDINALITY CASCADE CASCADED CAST CATALOG CATALOG_NAME CEIL CEILING CHAIN CHANGE CHAR CHARACTER CHARACTERISTICS CHARACTER_LENGTH CHARACTERS CHARACTER_SET_CATALOG CHARACTER_SET_NAME CHARACTER_SET_SCHEMA CHAR_LENGTH CHECK CHECKED CHECKPOINT CHECKSUM CLASS CLASS_ORIGIN CLOB CLOSE CLUSTER CLUSTERED COALESCE COBOL COLLATE COLLATION COLLATION_CATALOG COLLATION_NAME COLLATION_SCHEMA COLLECT COLUMN COLUMN_NAME COLUMNS COMMAND_FUNCTION COMMAND_FUNCTION_CODE COMMENT COMMIT COMMITTED COMPLETION COMPRESS COMPUTE CONDITION CONDITION_NUMBER CONNECT CONNECTION CONNECTION_NAME CONSTRAINT CONSTRAINT_CATALOG CONSTRAINT_NAME CONSTRAINTS CONSTRAINT_SCHEMA CONSTRUCTOR CONTAINS CONTAINSTABLE CONTINUE CONVERSION CONVERT COPY CORR CORRESPONDING COUNT COVAR_POP COVAR_SAMP CREATE CREATEDB CREATEROLE CREATEUSER CSV CUBE CUME_DIST CURRENT CURRENT_DATE CURRENT_DEFAULT_TRANSFORM_GROUP CURRENT_PATH CURRENT_ROLE CURRENT_TIME CURRENT_TIMESTAMP CURRENT_TRANSFORM_GROUP_FOR_TYPE CURRENT_USER CURSOR CURSOR_NAME CYCLE DATA DATABASE DATABASES DATE DATETIME DATETIME_INTERVAL_CODE DATETIME_INTERVAL_PRECISION DAY DAYS DAY_HOUR DAY_MICROSECOND DAY_MINUTE DAYOFMONTH DAYOFWEEK DAYOFYEAR DAY_SECOND DBCC DEALLOCATE DEC DECIMAL DECLARE DEFAULT DEFAULTS DEFERRABLE DEFERRED DEFINED DEFINER DEGREE DELAYED DELAY_KEY_WRITE DELETE DELIMITER DELIMITERS DENSE_RANK DENY DEPTH DEREF DERIVED DESC DESCRIBE DESCRIPTOR DESTROY DESTRUCTOR DETERMINISTIC DIAGNOSTICS DICTIONARY DISABLE DISCONNECT DISK DISPATCH DISTINCT DISTINCTROW DISTRIBUTED DIV DO DOMAIN DOUBLE DROP DUMMY DUMP DYNAMIC DYNAMIC_FUNCTION DYNAMIC_FUNCTION_CODE EACH ELEMENT ELSE ELSEIF ENABLE ENCLOSED ENCODING ENCRYPTED END END-EXEC ENUM EQUALS ERRLVL ESCAPE ESCAPED EVERY EXCEPT EXCEPTION EXCLUDE EXCLUDING EXCLUSIVE EXEC EXECUTE EXECUTION EXISTING EXISTS EXIT EXP EXPLAIN EXTENSION EXTERNAL EXTRACT FALSE FETCH FIELDS FILE FILLFACTOR FILTER FINAL FIRST FLOAT FLOAT4 FLOAT8 FLOOR FLUSH FOLLOWING FOR FORCE FOREIGN FORMAT FORTRAN FORWARD FOUND FREE FREETEXT FREETEXTTABLE FREEZE FROM FULLTEXT FUNCTION FUNCTIONS FUSION FUTURE G GENERAL GENERATED GET GLOBAL GO GOTO GRANT GRANTED GRANTS GREATEST GROUP GROUPING HANDLER HAVING HEADER HEAP HIERARCHY HIGH_PRIORITY HOLD HOLDLOCK HOST HOSTS HOUR HOUR_MICROSECOND HOUR_MINUTE HOUR_SECOND IDENTIFIED IDENTITY IDENTITYCOL IDENTITY_INSERT IF ILIKE IMMEDIATE IMMUTABLE IMPLEMENTATION IMPLICIT IMPORTED IN INCLUDE INCLUDING INCREMENT INDEX INDICATOR INFILE INFIX INHERIT INHERITS INITIAL INITIALIZE INITIALLY INOUT INPUT INSENSITIVE INSERT INSERT_ID INSTANCE INSTANTIABLE INSTEAD INT INT1 INT2 INT3 INT4 INT8 INTEGER INTEGRATION INTEGRATIONS INTERSECT INTERSECTION INTO INVOKER IS ISAM ISNULL ISOLATION ITERATE JSON K KEY KEY_MEMBER KEYS KEY_TYPE KILL LANCOMPILER LANGUAGE LARGE LAST LAST_INSERT_ID LATERAL LEADING LEAST LEAVE LENGTH LESS LEVEL LIKE LIMIT LINENO LINES LISTEN LN LOAD LOCAL LOCALTIME LOCALTIMESTAMP LOCATION LOCATOR LOCK LOCKS LOGIN LOGS LONG LONGBLOB LONGTEXT LOOP LOWER LOW_PRIORITY M MANAGE MAP MASKING MATCH MATCHED MATERIALIZED MAX MAXEXTENTS MAX_ROWS MAXVALUE MEDIUMBLOB MEDIUMINT MEDIUMTEXT MEMBER MERGE MESSAGE_LENGTH MESSAGE_OCTET_LENGTH MESSAGE_TEXT METHOD MIDDLEINT MILLISECOND MIN MIN_ROWS MINUS MINUTE MINUTE_MICROSECOND MINUTE_SECOND MINVALUE ML MLSLABEL MOD MODE MODEL MODIFIES MODIFY MODULE MONITOR MONTH MONTHNAME MORE MOVE MULTISET MUMPS MYISAM NAME NAMES NAN NATIONAL NCHAR NCLOB NESTING NEW NEXT NO NOAUDIT NOCACHE NOCHECK NOCOMPRESS NOCREATEDB NOCREATEROLE NOCREATEUSER NOCYCLE NOINHERIT NOLOGIN NONCLUSTERED NONE NOORDER NORMALIZE NORMALIZED NOSUPERUSER NOTHING NOTIFY NOTNULL NOWAIT NO_WRITE_TO_BINLOG NULLABLE NULLIF NULLS NUMBER NUMERIC OBJECT OBJECTS OCTET_LENGTH OCTETS OF OFF OFFLINE OFFSET OFFSETS OIDS OLD ONLINE ONLY OPEN OPENDATASOURCE OPENQUERY OPENROWSET OPENXML OPERATE OPERATION OPERATOR OPTIMIZE OPTION OPTIONALLY OPTIONS OR ORDERING ORDINALITY OTHERS OUT OUTFILE OUTPUT OVER OVERLAPS OVERLAY OVERRIDING OVERWRITE OWNER OWNERSHIP PACK_KEYS PAD PARAMETER PARAMETER_MODE PARAMETER_NAME PARAMETER_ORDINAL_POSITION PARAMETERS PARAMETER_SPECIFIC_CATALOG PARAMETER_SPECIFIC_NAME PARAMETER_SPECIFIC_SCHEMA PARTIAL PASCAL PASSWORD PATH PCTFREE PERCENT PERCENTILE_CONT PERCENTILE_DISC PERCENT_RANK PIPE PLACING PLAN PLI POLICY POSITION POSTFIX POWER PRECEDING PRECISION PREFIX PREORDER PREPARE PREPARED PRESERVE PRIMARY PRINT PRIOR PRIVILEGES PROC PROCEDURAL PROCEDURE PROCEDURES PROCESS PROCESSLIST PUBLIC PURGE QUALIFY QUARTER QUOTE RAID0 RAISERROR RANGE RANK RAW READ READS READTEXT REAL RECHECK RECONFIGURE RECURSIVE REF REFERENCE_USAGE REFERENCES REFERENCING REGEXP REGR_AVGX REGR_AVGY REGR_COUNT REGR_INTERCEPT REGR_R2 REGR_SLOPE REGR_SXX REGR_SXY REGR_SYY REINDEX RELATIVE RELEASE RELOAD RENAME REPEAT REPEATABLE REPLACE REPLICATION REQUIRE RESET RESIGNAL RESOURCE RESTART RESTORE RESTRICT RESULT RETURN RETURNED_CARDINALITY RETURNED_LENGTH RETURNED_OCTET_LENGTH RETURNED_SQLSTATE RETURNS REVOKE RLIKE ROLE ROLES ROLLBACK ROLLUP ROUTINE ROUTINE_CATALOG ROUTINE_NAME ROUTINE_SCHEMA ROUTINES ROW ROWCOUNT ROW_COUNT ROWGUIDCOL ROWID ROWNUM ROW_NUMBER RULE SAVE SAVEPOINT SCALE SCHEMA SCHEMA_NAME SCHEMAS SCOPE SCOPE_CATALOG SCOPE_NAME SCOPE_SCHEMA SCROLL SEARCH SECOND SECOND_MICROSECOND SECTION SECURITY SELF SENSITIVE SEPARATOR SEQUENCE SEQUENCES SERIALIZABLE SERVER SERVER_NAME SESSION SESSION_USER SETOF SETS SETUSER SHARE SHARES SHOW SHUTDOWN SIGNAL SIMILAR SIMPLE SIZE SMALLINT SOME SONAME SOURCE SPACE SPATIAL SPECIFIC SPECIFIC_NAME SPECIFICTYPE SQL SQL_BIG_RESULT SQL_BIG_SELECTS SQL_BIG_TABLES SQLCA SQL_CALC_FOUND_ROWS SQLCODE SQLERROR SQLEXCEPTION SQL_LOG_OFF SQL_LOG_UPDATE SQL_LOW_PRIORITY_UPDATES SQL_SELECT_LIMIT SQL_SMALL_RESULT SQLSTATE SQLWARNING SQL_WARNINGS SQRT SSL STABLE STAGE STAGES START STARTING STARTS STATE STATEMENT STATIC STATISTICS STDDEV_POP STDDEV_SAMP STDIN STDOUT STORAGE STRAIGHT_JOIN STREAM STREAMS STRICT STRING STRUCTURE STYLE SUBCLASS_ORIGIN SUBLIST SUBMULTISET SUBSTRING SUCCESSFUL SUM SUPERUSER SYMMETRIC SYNONYM SYSDATE SYSID SYSTEM SYSTEM_USER TABLE TABLE_NAME TABLES TABLESAMPLE TABLESPACE TASK TASKS TEMP TEMPLATE TEMPORARY TERMINATE TERMINATED TEXT TEXTSIZE THAN THEN TIES TIME TIMESTAMP TIMEZONE_HOUR TIMEZONE_MINUTE TINYBLOB TINYINT TINYTEXT TO TOAST TOP TOP_LEVEL_COUNT TRAILING TRAN TRANSACTION TRANSACTION_ACTIVE TRANSACTIONS TRANSACTIONS_COMMITTED TRANSACTIONS_ROLLED_BACK TRANSFORM TRANSFORMS TRANSIENT TRANSLATE TRANSLATION TREAT TRIGGER TRIGGER_CATALOG TRIGGER_NAME TRIGGER_SCHEMA TRIM TRUE TRUNCATE TRUSTED TSEQUAL TYPE UESCAPE UID UNBOUNDED UNCOMMITTED UNDER UNDO UNENCRYPTED UNIQUE UNKNOWN UNLISTEN UNLOCK UNNAMED UNNEST UNSIGNED UNTIL UPDATE UPDATETEXT UPPER USAGE USE USE_ANY_ROLE USER USER_DEFINED_TYPE_CATALOG USER_DEFINED_TYPE_CODE USER_DEFINED_TYPE_NAME USER_DEFINED_TYPE_SCHEMA USERS UTC_DATE UTC_TIME UTC_TIMESTAMP VACUUM VALID VALIDATE VALIDATOR VALUE VALUES VARBINARY VARCHAR VARCHAR2 VARCHARACTER VARIABLE VARIABLES VAR_POP VAR_SAMP VARYING VERBOSE VERSION VIEW VIEWS VOLATILE WAITFOR WAREHOUSE WAREHOUSES WEEK WEEKDAY WHEN WHENEVER WHERE WHILE WIDTH_BUCKET WINDOW WITH WITHIN WITHOUT WORK WRAPPER WRITE WRITETEXT X509 XML XOR YAML YEAR YEAR_MONTH ZEROFILL ZONE""" sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_athena.py000066400000000000000000000606431503426445100230570ustar00rootroot00000000000000"""The AWS Athena dialect. https://docs.aws.amazon.com/athena/latest/ug/what-is.html """ from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( AnyNumberOf, BaseSegment, Bracketed, CodeSegment, Dedent, Delimited, IdentifierSegment, Indent, KeywordSegment, LiteralSegment, Matchable, Nothing, OneOf, OptionallyBracketed, Ref, RegexParser, SegmentGenerator, Sequence, StringLexer, StringParser, SymbolSegment, TypedParser, ) from sqlfluff.dialects import dialect_ansi as ansi from sqlfluff.dialects import dialect_trino as trino from sqlfluff.dialects.dialect_athena_keywords import ( athena_reserved_keywords, athena_unreserved_keywords, ) ansi_dialect = load_raw_dialect("ansi") athena_dialect = ansi_dialect.copy_as( "athena", formatted_name="AWS Athena", docstring="""**Default Casing**: ``lowercase`` **Quotes**: String Literals: ``''``, ``""`` or |back_quotes|, Identifiers: ``""`` or |back_quotes| The dialect for `Athena `_ on Amazon Web Services (AWS).""", ) athena_dialect.sets("unreserved_keywords").update(athena_unreserved_keywords) athena_dialect.sets("reserved_keywords").update(athena_reserved_keywords) athena_dialect.insert_lexer_matchers( # Array Operations: https://prestodb.io/docs/0.217/functions/array.html [ StringLexer("right_arrow", "->", CodeSegment), ], before="like_operator", ) athena_dialect.bracket_sets("angle_bracket_pairs").update( [ ("angle", "StartAngleBracketSegment", "EndAngleBracketSegment", False), ] ) athena_dialect.add( StartAngleBracketSegment=StringParser( "<", SymbolSegment, type="start_angle_bracket" ), EndAngleBracketSegment=StringParser(">", SymbolSegment, type="end_angle_bracket"), RightArrowOperator=StringParser("->", SymbolSegment, type="binary_operator"), JsonfileKeywordSegment=StringParser("JSONFILE", KeywordSegment, type="file_format"), RcfileKeywordSegment=StringParser("RCFILE", KeywordSegment, type="file_format"), OrcKeywordSegment=StringParser("ORCFILE", KeywordSegment, type="file_format"), ParquetKeywordSegment=StringParser( "PARQUETFILE", KeywordSegment, type="file_format" ), AvroKeywordSegment=StringParser("AVROFILE", KeywordSegment, type="file_format"), IonKeywordSegment=StringParser("IONFILE", KeywordSegment, type="file_format"), SequencefileKeywordSegment=StringParser( "SEQUENCEFILE", KeywordSegment, type="file_format" ), TextfileKeywordSegment=StringParser("TEXTFILE", KeywordSegment, type="file_format"), PropertyGrammar=Sequence( Ref("QuotedLiteralSegment"), Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), LocationGrammar=Sequence("LOCATION", Ref("QuotedLiteralSegment")), BracketedPropertyListGrammar=Bracketed(Delimited(Ref("PropertyGrammar"))), CTASPropertyGrammar=Sequence( OneOf( "format", "partitioned_by", "bucketed_by", "bucket_count", "write_compression", "orc_compression", "parquet_compression", "compression_level", "field_delimiter", "is_external", "table_type", "external_location", ), Ref("EqualsSegment"), Ref("LiteralGrammar"), ), CTASIcebergPropertyGrammar=Sequence( OneOf( "format", "partitioned_by", "bucketed_by", "bucket_count", "write_compression", "orc_compression", "parquet_compression", "compression_level", "field_delimiter", "is_external", "table_type", # Iceberg-specific properties "location", "partitioning", "vacuum_max_snapshot_age_seconds", "vacuum_min_snapshots_to_keep", "optimize_rewrite_min_data_file_size_bytes", "optimize_rewrite_max_data_file_size_bytes", "optimize_rewrite_data_file_threshold", "optimize_rewrite_delete_file_threshold", ), Ref("EqualsSegment"), Ref("LiteralGrammar"), ), BracketedCTASPropertyGrammar=Bracketed( OneOf( Delimited( Ref("CTASPropertyGrammar"), ), Delimited( Ref("CTASIcebergPropertyGrammar"), ), ), ), UnloadPropertyGrammar=Sequence( OneOf( "format", "partitioned_by", "compression", "field_delimiter", ), Ref("EqualsSegment"), Ref("LiteralGrammar"), ), BracketedUnloadPropertyGrammar=Bracketed(Delimited(Ref("UnloadPropertyGrammar"))), TablePropertiesGrammar=Sequence( "TBLPROPERTIES", Ref("BracketedPropertyListGrammar") ), SerdePropertiesGrammar=Sequence( "WITH", "SERDEPROPERTIES", Ref("BracketedPropertyListGrammar") ), TerminatedByGrammar=Sequence("TERMINATED", "BY", Ref("QuotedLiteralSegment")), FileFormatGrammar=OneOf( "SEQUENCEFILE", "TEXTFILE", "RCFILE", "ORC", "PARQUET", "AVRO", "JSONFILE", "ION", Sequence( "INPUTFORMAT", Ref("QuotedLiteralSegment"), "OUTPUTFORMAT", Ref("QuotedLiteralSegment"), ), ), StoredAsGrammar=Sequence("STORED", "AS", Ref("FileFormatGrammar")), StoredByGrammar=Sequence( "STORED", "BY", Ref("QuotedLiteralSegment"), Ref("SerdePropertiesGrammar", optional=True), ), StorageFormatGrammar=OneOf( Sequence( Ref("RowFormatClauseSegment", optional=True), Ref("StoredAsGrammar", optional=True), ), Ref("StoredByGrammar"), ), CommentGrammar=Sequence("COMMENT", Ref("QuotedLiteralSegment")), PartitionSpecGrammar=Sequence( "PARTITION", Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), Sequence( Ref("EqualsSegment"), Ref("LiteralGrammar"), optional=True, ), ) ) ), ), BackQuotedIdentifierSegment=TypedParser( "back_quote", IdentifierSegment, type="quoted_identifier", casefold=str.lower, ), ) athena_dialect.replace( LiteralGrammar=ansi_dialect.get_grammar("LiteralGrammar").copy( insert=[ Ref("ParameterSegment"), ] ), AccessorGrammar=Sequence( AnyNumberOf( Ref("ArrayAccessorSegment"), optional=True, ), AnyNumberOf( Sequence( Ref("ObjectReferenceDelimiterGrammar"), Ref("ObjectReferenceSegment"), ), optional=True, ), ), QuotedLiteralSegment=OneOf( TypedParser("single_quote", LiteralSegment, type="quoted_literal"), TypedParser("double_quote", LiteralSegment, type="quoted_literal"), TypedParser("back_quote", LiteralSegment, type="quoted_literal"), ), TrimParametersGrammar=Nothing(), NakedIdentifierSegment=SegmentGenerator( # Generate the anti template from the set of reserved keywords lambda dialect: RegexParser( r"[A-Z0-9_]*[A-Z_][A-Z0-9_]*", IdentifierSegment, type="naked_identifier", anti_template=r"^(" + r"|".join(dialect.sets("reserved_keywords")) + r")$", casefold=str.lower, ) ), QuotedIdentifierSegment=TypedParser( "double_quote", IdentifierSegment, type="quoted_identifier", casefold=str.lower ), SingleIdentifierGrammar=ansi_dialect.get_grammar("SingleIdentifierGrammar").copy( insert=[ Ref("BackQuotedIdentifierSegment"), ] ), BinaryOperatorGrammar=OneOf( Ref("ArithmeticBinaryOperatorGrammar"), Ref("StringBinaryOperatorGrammar"), Ref("BooleanBinaryOperatorGrammar"), Ref("ComparisonOperatorGrammar"), # Add arrow operators for functions (e.g. filter) Ref("RightArrowOperator"), ), PostFunctionGrammar=ansi_dialect.get_grammar("PostFunctionGrammar").copy( # UNNEST can optionally have a WITH ORDINALITY clause insert=[ Sequence("WITH", "ORDINALITY", optional=True), Ref("WithinGroupClauseSegment"), ] ), FunctionContentsGrammar=ansi_dialect.get_grammar("FunctionContentsGrammar").copy( insert=[ Ref("ListaggOverflowClauseSegment"), ] ), AlterTableDropColumnGrammar=Sequence( "DROP", Ref.keyword("COLUMN"), Ref("SingleIdentifierGrammar"), ), ) class WithinGroupClauseSegment(trino.WithinGroupClauseSegment): """An WITHIN GROUP clause for window functions. These are based on Trino. https://docs.aws.amazon.com/athena/latest/ug/functions-env3.html https://trino.io/docs/current/functions/aggregate.html#listagg """ class ListaggOverflowClauseSegment(trino.ListaggOverflowClauseSegment): """ON OVERFLOW clause of listagg function. These are based on Trino. https://docs.aws.amazon.com/athena/latest/ug/functions-env3.html https://trino.io/docs/current/functions/aggregate.html#listagg """ class ArrayTypeSegment(ansi.ArrayTypeSegment): """Prefix for array literals specifying the type.""" type = "array_type" match_grammar = Sequence( "ARRAY", Ref("ArrayTypeSchemaSegment", optional=True), ) class ArrayTypeSchemaSegment(ansi.ArrayTypeSegment): """Prefix for array literals specifying the type.""" type = "array_type_schema" match_grammar = Bracketed( Ref("DatatypeSegment"), bracket_pairs_set="angle_bracket_pairs", bracket_type="angle", ) class MapTypeSegment(BaseSegment): """Expression to construct a MAP datatype.""" type = "map_type" match_grammar = Sequence( "MAP", Ref("MapTypeSchemaSegment", optional=True), ) class MapTypeSchemaSegment(BaseSegment): """Expression to construct the schema of a MAP datatype.""" type = "map_type_schema" match_grammar = Bracketed( Sequence( Ref("PrimitiveTypeSegment"), Ref("CommaSegment"), Ref("DatatypeSegment"), ), bracket_pairs_set="angle_bracket_pairs", bracket_type="angle", ) class StructTypeSegment(ansi.StructTypeSegment): """Expression to construct a STRUCT datatype.""" match_grammar = Sequence( "STRUCT", Ref("StructTypeSchemaSegment", optional=True), ) class StructTypeSchemaSegment(BaseSegment): """Expression to construct the schema of a STRUCT datatype.""" type = "struct_type_schema" match_grammar = Bracketed( Delimited( Sequence( Ref("NakedIdentifierSegment"), Ref("ColonSegment"), Ref("DatatypeSegment"), Ref("CommentGrammar", optional=True), ), bracket_pairs_set="angle_bracket_pairs", ), bracket_pairs_set="angle_bracket_pairs", bracket_type="angle", ) class PrimitiveTypeSegment(BaseSegment): """Support Athena subset of Hive types. Primary Source: https://docs.aws.amazon.com/athena/latest/ug/data-types.html Additional Details: https://cwiki.apache.org/confluence/display/Hive/LanguageManual+Types """ type = "primitive_type" match_grammar = OneOf( "BOOLEAN", "TINYINT", "SMALLINT", "INTEGER", # used in DML queries "INT", # used in DDL queries "BIGINT", "DOUBLE", "FLOAT", # used in DDL "REAL", # used "in SQL functions like SELECT CAST" Sequence( OneOf("DECIMAL", "CHAR", "VARCHAR"), Ref("BracketedArguments", optional=True), ), "STRING", "BINARY", "DATE", "TIMESTAMP", "VARBINARY", "JSON", "TIME", "IPADDRESS", "HyperLogLog", "P4HyperLogLog", ) class DatatypeSegment(BaseSegment): """Support complex Athena data types. Complex data types are typically used in either DDL statements or as the target type in casts. """ type = "data_type" match_grammar = OneOf( Ref("PrimitiveTypeSegment"), Ref("StructTypeSegment"), Ref("ArrayTypeSegment"), Ref("MapTypeSegment"), Sequence( "ROW", Bracketed( Delimited( AnyNumberOf( Sequence( Ref("NakedIdentifierSegment"), Ref("DatatypeSegment"), ), Ref("LiteralGrammar"), ) ) ), ), Ref("TimeWithTZGrammar"), ) class StatementSegment(ansi.StatementSegment): """Overriding StatementSegment to allow for additional segment parsing.""" match_grammar = ansi.StatementSegment.match_grammar.copy( insert=[ Ref("MsckRepairTableStatementSegment"), Ref("UnloadStatementSegment"), Ref("PrepareStatementSegment"), Ref("ExecuteStatementSegment"), Ref("ShowStatementSegment"), ], remove=[ Ref("TransactionStatementSegment"), Ref("CreateSchemaStatementSegment"), Ref("SetSchemaStatementSegment"), Ref("CreateModelStatementSegment"), Ref("DropModelStatementSegment"), ], ) class CreateTableStatementSegment(BaseSegment): """A `CREATE TABLE` statement. Inspired on Hive Dialect with adjustments based on: https://docs.aws.amazon.com/pt_br/athena/latest/ug/create-table.html """ type = "create_table_statement" match_grammar = Sequence( "CREATE", Ref.keyword("EXTERNAL", optional=True), "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), OneOf( # Columns and comment syntax: Sequence( Bracketed( Delimited( OneOf( Ref("TableConstraintSegment", optional=True), Sequence( Ref("ColumnDefinitionSegment"), Ref("CommentGrammar", optional=True), ), ), bracket_pairs_set="angle_bracket_pairs", ), optional=True, ), Ref("CommentGrammar", optional=True), # `STORED AS` can be called before or after the additional table # properties below Ref("StoredAsGrammar", optional=True), Sequence( "PARTITIONED", "BY", Bracketed( Delimited( Sequence( OneOf( # External tables expect types... Ref("ColumnDefinitionSegment"), # Iceberg tables don't expect types. Ref("SingleIdentifierGrammar"), # Iceberg tables also allow partition transforms Ref("FunctionSegment"), ), Ref("CommentGrammar", optional=True), ), ), ), optional=True, ), Sequence( "CLUSTERED", "BY", Ref("BracketedColumnReferenceListGrammar"), "INTO", Ref("NumericLiteralSegment"), "BUCKETS", optional=True, ), # Second call of `STORED AS` to match when appears after Ref("StoredAsGrammar", optional=True), Ref("StorageFormatGrammar", optional=True), Ref("LocationGrammar", optional=True), Ref("TablePropertiesGrammar", optional=True), Ref("CommentGrammar", optional=True), ), Sequence( Sequence("WITH", Ref("BracketedCTASPropertyGrammar"), optional=True), "AS", OptionallyBracketed( Ref("SelectableGrammar"), ), Sequence("WITH", "NO", "DATA", optional=True), ), ), ) class MsckRepairTableStatementSegment(BaseSegment): """An `MSCK REPAIR TABLE` statement. The `MSCK REPAIR TABLE` command scans a file system such as Amazon S3 for Hive compatible partitions that were added to the file system after the table was created. https://docs.aws.amazon.com/athena/latest/ug/msck-repair-table.html """ type = "msck_repair_table_statement" match_grammar = Sequence( "MSCK", "REPAIR", "TABLE", Ref("TableReferenceSegment"), ) class RowFormatClauseSegment(BaseSegment): """`ROW FORMAT` clause in a CREATE statement.""" type = "row_format_clause" match_grammar = Sequence( "ROW", "FORMAT", OneOf( Sequence( "DELIMITED", Sequence( "FIELDS", Ref("TerminatedByGrammar"), Sequence( "ESCAPED", "BY", Ref("QuotedLiteralSegment"), optional=True ), optional=True, ), Sequence( "COLLECTION", "ITEMS", Ref("TerminatedByGrammar"), optional=True ), Sequence("MAP", "KEYS", Ref("TerminatedByGrammar"), optional=True), Sequence("LINES", Ref("TerminatedByGrammar"), optional=True), Sequence( "NULL", "DEFINED", "AS", Ref("QuotedLiteralSegment"), optional=True ), ), Sequence( "SERDE", Ref("QuotedLiteralSegment"), Ref("SerdePropertiesGrammar", optional=True), ), ), ) class ValuesClauseSegment(ansi.ValuesClauseSegment): """A `VALUES` clause within in `WITH`, `SELECT`, `INSERT`.""" match_grammar = Sequence( "VALUES", Delimited(Ref("ExpressionSegment")), ) class InsertStatementSegment(BaseSegment): """`INSERT INTO` statement. https://docs.aws.amazon.com/athena/latest/ug/insert-into.html """ type = "insert_statement" match_grammar = Sequence( "INSERT", "INTO", Ref("TableReferenceSegment"), OneOf( OptionallyBracketed(Ref("SelectableGrammar")), Sequence("DEFAULT", "VALUES"), Sequence( Ref("BracketedColumnReferenceListGrammar", optional=True), OneOf( Ref("ValuesClauseSegment"), OptionallyBracketed(Ref("SelectableGrammar")), ), ), ), ) class UnloadStatementSegment(BaseSegment): """An `UNLOAD` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_UNLOAD.html """ type = "unload_statement" match_grammar = Sequence( "UNLOAD", Bracketed(Ref("SelectableGrammar")), "TO", Ref("QuotedLiteralSegment"), Sequence("WITH", Ref("BracketedUnloadPropertyGrammar"), optional=True), ) class PrepareStatementSegment(BaseSegment): """A `prepare` statement. https://docs.aws.amazon.com/athena/latest/ug/querying-with-prepared-statements.html """ type = "prepare_statement" match_grammar = Sequence( "PREPARE", Ref("TableReferenceSegment"), "FROM", OptionallyBracketed( OneOf( Ref("SelectableGrammar"), Ref("UnloadStatementSegment"), Ref("InsertStatementSegment"), ), ), ) class ExecuteStatementSegment(BaseSegment): """An `execute` statement. https://docs.aws.amazon.com/athena/latest/ug/querying-with-prepared-statements.html """ type = "execute_statement" match_grammar = Sequence( "EXECUTE", Ref("TableReferenceSegment"), OneOf( Sequence( "USING", Delimited( Ref("LiteralGrammar"), ), ), optional=True, ), ) class IntervalExpressionSegment(BaseSegment): """An interval expression segment. Full Apache Hive `INTERVAL` reference here: https://cwiki.apache.org/confluence/display/hive/languagemanual+types#LanguageManualTypes-Intervals """ type = "interval_expression" match_grammar = Sequence( Ref.keyword("INTERVAL", optional=True), OneOf( Sequence( OneOf( Ref("QuotedLiteralSegment"), Ref("NumericLiteralSegment"), Bracketed(Ref("ExpressionSegment")), ), Ref("DatetimeUnitSegment"), Sequence("TO", Ref("DatetimeUnitSegment"), optional=True), ), ), ) class GroupByClauseSegment(ansi.GroupByClauseSegment): """A `GROUP BY` clause like in `SELECT`. https://docs.aws.amazon.com/athena/latest/ug/select.html#:~:text=%5B-,GROUP,-BY%20%5B%20ALL%20%7C%20DISTINCT%20%5D%20grouping_expressions """ match_grammar: Matchable = Sequence( "GROUP", "BY", Indent, Delimited( OneOf( Ref("CubeRollupClauseSegment"), Ref("GroupingSetsClauseSegment"), Ref("ColumnReferenceSegment"), Ref("NumericLiteralSegment"), # Can `GROUP BY 1` Ref("ExpressionSegment"), # Can `GROUP BY coalesce(col, 1)` ), terminators=[ Sequence("ORDER", "BY"), "LIMIT", "OFFSET", "HAVING", Ref("SetOperatorSegment"), ], ), Dedent, ) class ShowStatementSegment(BaseSegment): """A `show` execute statement. Full Apache Hive `SHOW` reference: https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-Show Athena supported subset: https://docs.aws.amazon.com/athena/latest/ug/ddl-reference.html """ type = "show_statement" match_grammar = Sequence( "SHOW", OneOf( Sequence( "COLUMNS", OneOf("FROM", "IN"), OneOf( Sequence( Ref("DatabaseReferenceSegment"), Ref("TableReferenceSegment") ), Sequence( Ref("TableReferenceSegment"), Sequence( OneOf("FROM", "IN"), Ref("DatabaseReferenceSegment"), optional=True, ), ), ), ), Sequence( "CREATE", OneOf("TABLE", "VIEW"), Ref("TableReferenceSegment"), ), Sequence( OneOf("DATABASES", "SCHEMAS"), Sequence("LIKE", Ref("QuotedLiteralSegment"), optional=True), ), Sequence( "PARTITIONS", Ref("TableReferenceSegment"), ), Sequence( "TABLES", Sequence("IN", Ref("DatabaseReferenceSegment"), optional=True), Ref("QuotedLiteralSegment", optional=True), ), Sequence( "TBLPROPERTIES", Ref("TableReferenceSegment"), Bracketed(Ref("QuotedLiteralSegment"), optional=True), ), Sequence( "VIEWS", Sequence("IN", Ref("DatabaseReferenceSegment"), optional=True), Sequence("LIKE", Ref("QuotedLiteralSegment"), optional=True), ), ), ) sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_athena_keywords.py000066400000000000000000000124551503426445100250040ustar00rootroot00000000000000"""A list of all Athena keywords. Presto List (for Athena v2): https://prestodb.io/docs/0.217/language/reserved.html Trino List (for Athena v3): https://trino.io/docs/current/language/reserved.html Hive List: https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL """ athena_reserved_keywords = [ "ALL", "ALTER", "AND", "ARRAY", "AS", "AUTHORIZATION", "BETWEEN", "BIGINT", "BINARY", "BOOLEAN", "BOTH", "BY", "CACHE", "CASE", "CAST", "CHAR", "COLUMN", "COMMIT", "CONF", "CONSTRAINT", "CREATE", "CROSS", "CUBE", "CURRENT_DATE", "CURRENT_TIMESTAMP", "CURRENT", "CURSOR", "DATABASE", "DECIMAL", "DELETE", "DESCRIBE", "DISTINCT", "DOUBLE", "DROP", "ELSE", "END", "EXCHANGE", "EXISTS", "EXTENDED", "EXTERNAL", "EXTRACT", "FALSE", "FETCH", "FLOAT", "FLOOR", "FOLLOWING", "FOR", "FOREIGN", "FROM", "FULL", "FUNCTION", "GRANT", "GROUP", "GROUPING", "HAVING", "IF", "IMPORT", "IN", "INNER", "INSERT", "INT", "INTEGER", "INTERSECT", "INTERVAL", "INTO", "IS", "JOIN", "LATERAL", "LEFT", "LESS", "LIKE", "LOCAL", "MACRO", "MAP", "MORE", "NONE", "NOT", "NULL", "NUMERIC", "OF", "ON", "ONLY", "OR", "ORDER", "OUT", "OUTER", "OVER", "PARTIALSCAN", "PARTITION", "PERCENT", "PRECEDING", "PRECISION", "PREPARE", "PRESERVE", "PRIMARY", "PROCEDURE", "RANGE", "READS", "REDUCE", "REFERENCES", "REGEXP", "REVOKE", "RIGHT", "RLIKE", "ROLLBACK", "ROLLUP", "SELECT", "SET", "SMALLINT", "START", "SYNC", "TABLE", "TABLESAMPLE", "THEN", "TO", "TRANSFORM", "TRIGGER", "TRUE", "TRUNCATE", "UNBOUNDED", "UNION", "UNIQUEJOIN", "UPDATE", "USING", "UTC_TMESTAMP", "VALUES", "VARCHAR", "WHEN", "WHERE", "WITH", ] athena_unreserved_keywords = [ "ABORT", "ADD", "ADMIN", "AFTER", "ANALYZE", "ARCHIVE", "ASC", "AUTOCOMMIT", "BEFORE", "BUCKET_COUNT", "BUCKET", "BUCKETED_BY", "BUCKETS", "CASCADE", "CHANGE", "CLUSTER", "CLUSTERED", "CLUSTERSTATUS", "COLLECTION", "COLUMNS", "COMMENT", "COMPACT", "COMPACTIONS", "COMPRESSION", "COMPRESSION_LEVEL", "COMPUTE", "CONCATENATE", "CONTINUE", "DATA", "DATABASES", "DATE", "DATETIME", "DAY", "DAYOFWEEK", "DBPROPERTIES", "DEFERRED", "DEFINED", "DELIMITED", "DEPENDENCY", "DESC", "DIRECTORIES", "DIRECTORY", "DISABLE", "DISTRIBUTE", "ELEM_TYPE", "ENABLE", "ERROR", "ESCAPED", "EXCLUSIVE", "EXPLAIN", "EXPORT", "EXTERNAL_LOCATION", "FIELD_DELIMITER", "FIELDS", "FILE", "FILEFORMAT", "FIRST", "FORMAT", "FORMATTED", "FUNCTIONS", "HOLD_DDLTIME", "HOUR", "HYPERLOGLOG", "IDXPROPERTIES", "IGNORE", "INDEX", "INDEXES", "INPATH", "INPUTDRIVER", "INPUTFORMAT", "IPADDRESS", "IS_EXTERNAL", "ISOLATION", "ITEMS", "JAR", "KEY_TYPE", "KEY", "KEYS", "LAST", "LEVEL", "LIMIT", "LINES", "LOAD", "LOCATION", "LOCK", "LOCKS", "LOGICAL", "LONG", "MAPJOIN", "MATERIALIZED", "METADATA", "MINUS", "MINUTE", "MONTH", "MSCK", "NO_DROP", "NORELY", "NOSCAN", "NOVALIDATE", "NULLS", "OFFLINE", "OFFSET", "OPTIMIZE_REWRITE_MIN_DATA_FILE_SIZE_BYTES", "OPTIMIZE_REWRITE_MAX_DATA_FILE_SIZE_BYTES", "OPTIMIZE_REWRITE_DATA_FILE_THRESHOLD", "OPTIMIZE_REWRITE_DELETE_FILE_THRESHOLD", "OPTION", "ORC_COMPRESSION", "OUTPUTDRIVER", "OUTPUTFORMAT", "OVERFLOW", "OVERWRITE", "OWNER", "P4HYPERLOGLOG", "PARQUET_COMPRESSION", "PARTITIONED_BY", "PARTITIONED", "PARTITIONING", "PARTITIONS", "PLUS", "PRETTY", "PRINCIPALS", "PROTECTION", "PURGE", "QDIGEST", "READ", "READONLY", "REBUILD", "RECORDREADER", "RECORDWRITER", "REGEXP", "RELOAD", "RELY", "RENAME", "REPAIR", "REPLACE", "REPLICATION", "RESTRICT", "REWRITE", "RLIKE", "ROLE", "ROLES", "ROW", "ROWS", "SCHEMA", "SCHEMAS", "SECOND", "SEMI", "SERDE", "SERDEPROPERTIES", "SERVER", "SETS", "SHARED", "SHOW_DATABASE", "SHOW", "SKEWED", "SNAPSHOT", "SORT", "SORTED", "SSL", "STATISTICS", "STORED", "STREAMTABLE", "STRING", "STRUCT", "TABLE_TYPE", "TABLES", "TBLPROPERTIES", "TEMPORARY", "TERMINATED", "TIME", "TIMESTAMP", "TIMESTAMPTZ", "TINYINT", "TOUCH", "TRANSACTION", "TRANSACTIONS", "UNARCHIVE", "UNDO", "UNIONTYPE", "UNLOAD", "UNLOCK", "UNSET", "UNSIGNED", "URI", "USE", "USER", "UTC", "UTCTIMESTAMP", "VACUUM_MAX_SNAPSHOT_AGE_SECONDS", "VACUUM_MIN_SNAPSHOTS_TO_KEEP", "VALIDATE", "VALUE_TYPE", "VIEW", "VIEWS", "WINDOW", "WHILE", "WRITE_COMPRESSION", "YEAR", "ZONE", ] sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_bigquery.py000066400000000000000000003256351503426445100234530ustar00rootroot00000000000000"""The BigQuery dialect. This inherits from the ansi dialect, with changes as specified by https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax and https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#string_and_bytes_literals """ from collections.abc import Generator from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( AnyNumberOf, Anything, BaseFileSegment, BaseSegment, Bracketed, BracketedSegment, CodeSegment, Dedent, Delimited, IdentifierSegment, ImplicitIndent, Indent, LiteralSegment, Matchable, MultiStringParser, Nothing, OneOf, OptionallyBracketed, ParseMode, RawSegment, Ref, RegexLexer, RegexParser, SegmentGenerator, Sequence, StringLexer, StringParser, SymbolSegment, TypedParser, ) from sqlfluff.dialects import dialect_ansi as ansi from sqlfluff.dialects.dialect_bigquery_keywords import ( bigquery_reserved_keywords, bigquery_unreserved_keywords, ) ansi_dialect = load_raw_dialect("ansi") bigquery_dialect = ansi_dialect.copy_as( "bigquery", formatted_name="Google BigQuery", docstring="""**Default Casing**: BigQuery resolves unquoted column identifiers case insensitively, and table/dataset identifiers case sensitively (by default, unless :code:`is_case_insensitive` is set for the latter). Unless specified, columns are returned in the case which they were defined in, which means columns can be re-cased in the result set without aliasing e.g. if a table is defined with :code:`CREATE TEMPORARY TABLE foo (col1 int, COL2 int)` then :code:`SELECT * FROM foo` returns :code:`col1` and :code:`COL2` in the result, but :code:`SELECT COL1, col2 FROM foo` returns :code:`COL1` and :code:`col2` in the result. **Quotes**: String Literals: ``''``, ``""``, ``@`` or ``@@`` (with the quoted options, also supporting variants prefixes with ``r``/``R`` (for raw/regex expressions) or ``b``/``B`` (for byte strings)), Identifiers: ``""`` or |back_quotes|. The dialect for `BigQuery `_ on Google Cloud Platform (GCP).""", ) bigquery_dialect.insert_lexer_matchers( # JSON Operators: https://www.postgresql.org/docs/9.5/functions-json.html [ StringLexer("right_arrow", "=>", CodeSegment), StringLexer("question_mark", "?", CodeSegment), RegexLexer( "at_sign_literal", r"@[a-zA-Z_][\w]*", LiteralSegment, segment_kwargs={"trim_chars": ("@",)}, ), RegexLexer( "double_at_sign_literal", r"@@[a-zA-Z_][\w\.]*", LiteralSegment, segment_kwargs={"trim_chars": ("@@",)}, ), StringLexer("pipe_operator", "|>", CodeSegment), ], before="equals", ) bigquery_dialect.patch_lexer_matchers( [ # Quoted literals can have r or b (case insensitive) prefixes, in any order, to # indicate a raw/regex string or byte sequence, respectively. Allow escaped # quote characters inside strings by allowing \" with an optional even multiple # of backslashes in front of it. # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#string_and_bytes_literals # Triple quoted variant first, then single quoted RegexLexer( "single_quote", r"([rR]?[bB]?|[bB]?[rR]?)?('''((?(?:(?(?:(?((?((?", SymbolSegment, type="end_angle_bracket"), RightArrowSegment=StringParser("=>", SymbolSegment, type="right_arrow"), DashSegment=StringParser("-", SymbolSegment, type="dash"), PipeOperatorSegment=StringParser("|>", SymbolSegment, type="pipe_operator"), SelectClauseElementListGrammar=Delimited( Ref("SelectClauseElementSegment"), allow_trailing=True, ), QuestionMarkSegment=StringParser("?", SymbolSegment, type="question_mark"), AtSignLiteralSegment=TypedParser( "at_sign_literal", LiteralSegment, type="at_sign_literal", ), DoubleAtSignLiteralSegment=TypedParser( "double_at_sign_literal", LiteralSegment, type="double_at_sign_literal", ), # Add a Full equivalent which also allow keywords NakedIdentifierFullSegment=RegexParser( r"[A-Z_][A-Z0-9_]*", IdentifierSegment, type="naked_identifier_all", ), NakedIdentifierPart=RegexParser( # The part of a an identifier after a hyphen. # NOTE: This one can match an "all numbers" variant. # https://cloud.google.com/resource-manager/docs/creating-managing-projects r"[A-Z0-9_]+", IdentifierSegment, type="naked_identifier", casefold=str.upper, ), NakedCSIdentifierPart=RegexParser( # Same as NakedIdentifierPart, but case-sensitive. r"[A-Z0-9_]+", IdentifierSegment, type="naked_identifier", ), NakedCSIdentifierSegment=SegmentGenerator( # Generate the anti template from the set of reserved keywords lambda dialect: RegexParser( r"[A-Z_][A-Z0-9_]*", IdentifierSegment, type="naked_identifier", anti_template=r"^(" + r"|".join(dialect.sets("reserved_keywords")) + r")$", ) ), QuotedCSIdentifierSegment=TypedParser( "back_quote", IdentifierSegment, type="quoted_identifier", trim_chars=("`",), ), SingleCSIdentifierGrammar=OneOf( Ref("NakedCSIdentifierSegment"), Ref("QuotedCSIdentifierSegment"), terminators=[Ref("DotSegment")], ), SingleIdentifierFullGrammar=OneOf( Ref("NakedIdentifierSegment"), Ref("QuotedIdentifierSegment"), Ref("NakedIdentifierFullSegment"), ), DefaultDeclareOptionsGrammar=Sequence( "DEFAULT", OneOf( Ref("LiteralGrammar"), Bracketed(Ref("SelectStatementSegment")), Ref("BareFunctionSegment"), Ref("FunctionSegment"), Ref("ArrayLiteralSegment"), Ref("TupleSegment"), Ref("BaseExpressionElementGrammar"), terminators=[ Ref("SemicolonSegment"), ], ), ), ExtendedDatetimeUnitSegment=SegmentGenerator( lambda dialect: MultiStringParser( dialect.sets("extended_datetime_units"), CodeSegment, type="date_part", ) ), ProcedureNameIdentifierSegment=OneOf( # In BigQuery struct() has a special syntax, so we don't treat it as a function RegexParser( r"[A-Z_][A-Z0-9_]*", CodeSegment, type="procedure_name_identifier", anti_template=r"STRUCT", ), RegexParser( r"`[^`]*`", CodeSegment, type="procedure_name_identifier", ), ), ProcedureParameterGrammar=OneOf( Sequence( OneOf("IN", "OUT", "INOUT", optional=True), Ref("ParameterNameSegment", optional=True), OneOf(Sequence("ANY", "TYPE"), Ref("DatatypeSegment")), ), OneOf(Sequence("ANY", "TYPE"), Ref("DatatypeSegment")), ), ) bigquery_dialect.replace( # Override to allow _01 type identifiers which are valid in BigQuery # The strange regex here it to make sure we don't accidentally match numeric # literals. We also use a regex to explicitly exclude disallowed keywords. NakedIdentifierSegment=SegmentGenerator( # Generate the anti template from the set of reserved keywords lambda dialect: RegexParser( r"[A-Z_][A-Z0-9_]*", IdentifierSegment, type="naked_identifier", anti_template=r"^(" + r"|".join(dialect.sets("reserved_keywords")) + r")$", casefold=str.upper, ) ), FunctionContentsExpressionGrammar=OneOf( Ref("DatetimeUnitSegment"), Ref("DatePartWeekSegment"), Sequence( Ref("ExpressionSegment"), Sequence(OneOf("IGNORE", "RESPECT"), "NULLS", optional=True), ), Sequence(Ref("ExpressionSegment"), "HAVING", OneOf("MIN", "MAX")), Ref("NamedArgumentSegment"), ), TrimParametersGrammar=Nothing(), # BigQuery allows underscore in parameter names, and also anything if quoted in # backticks ParameterNameSegment=OneOf( RegexParser(r"[A-Z_][A-Z0-9_]*", CodeSegment, type="parameter"), RegexParser(r"`[^`]*`", CodeSegment, type="parameter"), ), DateTimeLiteralGrammar=Sequence( OneOf("DATE", "DATETIME", "TIME", "TIMESTAMP"), TypedParser("single_quote", LiteralSegment, type="date_constructor_literal"), ), JoinLikeClauseGrammar=Sequence( AnyNumberOf( Ref("FromPivotExpressionSegment"), Ref("FromUnpivotExpressionSegment"), min_times=1, ), Ref("AliasExpressionSegment", optional=True), ), ConditionalCrossJoinKeywordsGrammar=Nothing(), NaturalJoinKeywordsGrammar=Nothing(), UnconditionalCrossJoinKeywordsGrammar=Ref.keyword("CROSS"), MergeIntoLiteralGrammar=Sequence("MERGE", Ref.keyword("INTO", optional=True)), AccessorGrammar=AnyNumberOf( Ref("ArrayAccessorSegment"), # Add in semi structured expressions Ref("SemiStructuredAccessorSegment"), ), BracketedSetExpressionGrammar=Bracketed(Ref("SetExpressionSegment")), NotEnforcedGrammar=Sequence("NOT", "ENFORCED"), ReferenceMatchGrammar=Nothing(), SelectClauseTerminatorGrammar=OneOf( "FROM", "WHERE", Sequence("ORDER", "BY"), "LIMIT", "OVERLAPS", Ref("SetOperatorSegment"), "FETCH", Ref("PipeOperatorSegment"), ), FromClauseTerminatorGrammar=OneOf( "WHERE", "LIMIT", Sequence("GROUP", "BY"), Sequence("ORDER", "BY"), "HAVING", "QUALIFY", "WINDOW", Ref("SetOperatorSegment"), Ref("WithNoSchemaBindingClauseSegment"), Ref("WithDataClauseSegment"), "FETCH", "OFFSET", Ref("PipeOperatorSegment"), ), WhereClauseTerminatorGrammar=OneOf( "LIMIT", Sequence("GROUP", "BY"), Sequence("ORDER", "BY"), "HAVING", "QUALIFY", "WINDOW", "OVERLAPS", "FETCH", Ref("PipeOperatorSegment"), ), GroupByClauseTerminatorGrammar=OneOf( Sequence("ORDER", "BY"), "LIMIT", "HAVING", "QUALIFY", "WINDOW", "FETCH", Ref("PipeOperatorSegment"), "ASC", "DESC", ), OrderByClauseTerminators=OneOf( "LIMIT", "HAVING", "QUALIFY", # For window functions "WINDOW", Ref("FrameClauseUnitGrammar"), "SEPARATOR", "FETCH", Ref("PipeOperatorSegment"), ), ) # Set Keywords bigquery_dialect.sets("unreserved_keywords").clear() bigquery_dialect.update_keywords_set_from_multiline_string( "unreserved_keywords", bigquery_unreserved_keywords ) bigquery_dialect.sets("reserved_keywords").clear() bigquery_dialect.update_keywords_set_from_multiline_string( "reserved_keywords", bigquery_reserved_keywords ) # Add additional datetime units # https://cloud.google.com/bigquery/docs/reference/standard-sql/timestamp_functions#extract bigquery_dialect.sets("datetime_units").update( [ "MICROSECOND", "MILLISECOND", "SECOND", "MINUTE", "HOUR", "DAY", "DAYOFWEEK", "DAYOFYEAR", "WEEK", "ISOWEEK", "MONTH", "QUARTER", "YEAR", "ISOYEAR", ] ) # Add additional datetime units only recognised in some functions (e.g. extract) bigquery_dialect.sets("extended_datetime_units").update(["DATE", "DATETIME", "TIME"]) bigquery_dialect.sets("date_part_function_name").clear() bigquery_dialect.sets("date_part_function_name").update( [ "DATE_DIFF", "DATE_TRUNC", "DATETIME_DIFF", "DATETIME_TRUNC", "EXTRACT", "LAST_DAY", "TIME_DIFF", "TIME_TRUNC", "TIMESTAMP_DIFF", "TIMESTAMP_TRUNC", ] ) # In BigQuery, UNNEST() returns a "value table". # https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#value_tables bigquery_dialect.sets("value_table_functions").update(["UNNEST"]) # Bracket pairs (a set of tuples). Note that BigQuery inherits the default # "bracket_pairs" set from ANSI. Here, we're adding a different set of bracket # pairs that are only available in specific contexts where they are # applicable. This limits the scope where BigQuery allows angle brackets, # eliminating many potential parsing errors with the "<" and ">" operators. bigquery_dialect.bracket_sets("angle_bracket_pairs").update( [ ("angle", "StartAngleBracketSegment", "EndAngleBracketSegment", False), ] ) class ArrayTypeSegment(ansi.ArrayTypeSegment): """Prefix for array literals specifying the type.""" type = "array_type" match_grammar = Sequence( "ARRAY", Bracketed( Ref("DatatypeSegment"), bracket_type="angle", bracket_pairs_set="angle_bracket_pairs", optional=True, ), ) class ForSystemTimeAsOfSegment(BaseSegment): """A `FOR SYSTEM_TIME AS OF` syntax. https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#for_system_time_as_of """ type = "for_system_time_as_of_segment" match_grammar = Sequence( "FOR", OneOf("SYSTEM_TIME", Sequence("SYSTEM", "TIME")), "AS", "OF", Ref("ExpressionSegment"), ) class QualifyClauseSegment(BaseSegment): """A `QUALIFY` clause like in `SELECT`.""" type = "qualify_clause" match_grammar = Sequence( "QUALIFY", ImplicitIndent, OptionallyBracketed(Ref("ExpressionSegment")), Dedent, ) class SetOperatorSegment(BaseSegment): """A set operator UNION, INTERSECT or EXCEPT. https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#set_operators """ type = "set_operator" match_grammar = OneOf( Sequence("UNION", OneOf("DISTINCT", "ALL")), Sequence("INTERSECT", "DISTINCT"), Sequence("EXCEPT", "DISTINCT"), Sequence( OneOf( "INNER", Sequence( OneOf("FULL", "LEFT"), Ref.keyword("OUTER", optional=True), ), "OUTER", optional=True, ), OneOf( Sequence("UNION", OneOf("ALL", "DISTINCT")), Sequence("INTERSECT", "DISTINCT"), Sequence("EXCEPT", "DISTINCT"), ), Sequence( OneOf( Sequence( "BY", "NAME", Sequence( "ON", Ref("BracketedColumnReferenceListGrammar"), optional=True, ), ), Sequence( Ref.keyword("STRICT", optional=True), "CORRESPONDING", Sequence( "BY", Ref("BracketedColumnReferenceListGrammar"), optional=True, ), ), ) ), ), ) class SelectStatementSegment(ansi.SelectStatementSegment): """Enhance `SELECT` statement to include QUALIFY.""" match_grammar = ansi.SelectStatementSegment.match_grammar.copy( insert=[Ref("QualifyClauseSegment", optional=True)], before=Ref("OrderByClauseSegment", optional=True), terminators=[ Ref("PipeOperatorSegment"), ], ) class UnorderedSelectStatementSegment(ansi.UnorderedSelectStatementSegment): """Enhance unordered `SELECT` statement to include QUALIFY.""" match_grammar = ansi.UnorderedSelectStatementSegment.match_grammar.copy( insert=[Ref("QualifyClauseSegment", optional=True)], before=Ref("OverlapsClauseSegment", optional=True), terminators=[ Ref("PipeOperatorSegment"), ], ) class MultiStatementSegment(BaseSegment): """Overriding StatementSegment to allow for additional segment parsing.""" type = "multi_statement_segment" match_grammar: Matchable = OneOf( Ref("ForInStatementSegment"), Ref("RepeatStatementSegment"), Ref("WhileStatementSegment"), Ref("LoopStatementSegment"), Ref("IfStatementSegment"), Ref("CreateProcedureStatementSegment"), Ref("BeginStatementSegment"), ) class FileSegment(BaseFileSegment): """A segment representing a whole file or script. This is also the default "root" segment of the dialect, and so is usually instantiated directly. It therefore has no match_grammar. """ # NB: We don't need a match_grammar here because we're # going straight into instantiating it directly usually. match_grammar = Sequence( Sequence( OneOf( Ref("MultiStatementSegment"), Ref("StatementSegment"), ), ), AnyNumberOf( Ref("DelimiterGrammar"), OneOf( Ref("MultiStatementSegment"), Ref("StatementSegment"), ), ), Ref("DelimiterGrammar", optional=True), ) class StatementSegment(ansi.StatementSegment): """Overriding StatementSegment to allow for additional segment parsing.""" match_grammar = ansi.StatementSegment.match_grammar.copy( insert=[ Ref("DeclareStatementSegment"), Ref("SetStatementSegment"), Ref("ExportStatementSegment"), Ref("LoadDataStatementSegment"), Ref("CreateExternalTableStatementSegment"), Ref("CreateSnapshotTableStatementSegment"), Ref("ExecuteImmediateSegment"), Ref("AssertStatementSegment"), Ref("CallStatementSegment"), Ref("ReturnStatementSegment"), Ref("BreakStatementSegment"), Ref("LeaveStatementSegment"), Ref("ContinueStatementSegment"), Ref("RaiseStatementSegment"), Ref("AlterViewStatementSegment"), Ref("AlterSchemaStatementSegment"), Ref("CreateMaterializedViewStatementSegment"), Ref("CreateMaterializedViewAsReplicaOfStatementSegment"), Ref("AlterMaterializedViewStatementSegment"), Ref("DropMaterializedViewStatementSegment"), Ref("DropProcedureStatementSegment"), Ref("UndropSchemaStatementSegment"), Ref("AlterOrganizationStatementSegment"), Ref("AlterProjectStatementSegment"), Ref("CreateSearchIndexStatementSegment"), Ref("DropSearchIndexStatementSegment"), Ref("CreateVectorIndexStatementSegment"), Ref("DropVectorIndexStatementSegment"), Ref("CreateRowAccessPolicyStatementSegment"), Ref("DropRowAccessPolicyStatementSegment"), Ref("AlterBiCapacityStatementSegment"), Ref("CreateCapacityStatementSegment"), Ref("AlterCapacityStatementSegment"), Ref("DropCapacityStatementSegment"), Ref("CreateReservationStatementSegment"), Ref("AlterReservationStatementSegment"), Ref("DropReservationStatementSegment"), Ref("CreateAssignmentStatementSegment"), Ref("DropAssignmentStatementSegment"), Ref("DropTableFunctionStatementSegment"), Ref("CreateTableFunctionStatementSegment"), Ref("PipeStatementSegment"), ], ) class AssertStatementSegment(BaseSegment): """ASSERT segment. https://cloud.google.com/bigquery/docs/reference/standard-sql/debugging-statements """ type = "assert_statement" match_grammar: Matchable = Sequence( "ASSERT", Ref("ExpressionSegment"), Sequence( "AS", Ref("QuotedLiteralSegment"), optional=True, ), ) class ForInStatementsSegment(BaseSegment): """Statements within a FOR..IN...DO...END FOR statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#for-in """ type = "for_in_statements" match_grammar = AnyNumberOf( Sequence( OneOf( Ref("StatementSegment"), Ref("MultiStatementSegment"), ), Ref("DelimiterGrammar"), ), terminators=[Sequence("END", "FOR")], reset_terminators=True, ) class ForInStatementSegment(BaseSegment): """FOR..IN...DO...END FOR statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#for-in """ type = "for_in_statement" match_grammar = Sequence( "FOR", Ref("SingleIdentifierGrammar"), "IN", Indent, Ref("SelectableGrammar"), Dedent, "DO", Indent, Ref("ForInStatementsSegment"), Dedent, "END", "FOR", ) class RepeatStatementsSegment(BaseSegment): """Statements within a REPEAT...UNTIL... END REPEAT statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#repeat """ type = "repeat_statements" match_grammar = AnyNumberOf( Sequence( OneOf( Ref("StatementSegment"), Ref("MultiStatementSegment"), ), Ref("DelimiterGrammar"), ), terminators=["UNTIL"], reset_terminators=True, ) class RepeatStatementSegment(BaseSegment): """REPEAT...END REPEAT statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#repeat """ type = "repeat_statement" match_grammar = Sequence( "REPEAT", Indent, Ref("RepeatStatementsSegment"), "UNTIL", Ref("ExpressionSegment"), Dedent, "END", "REPEAT", ) class IfStatementsSegment(BaseSegment): """Statements within a IF... END IF statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#if """ type = "if_statements" match_grammar = AnyNumberOf( Sequence( OneOf( Ref("StatementSegment"), Ref("MultiStatementSegment"), ), Ref("DelimiterGrammar"), ), terminators=[ "ELSE", "ELSEIF", Sequence("END", "IF"), ], reset_terminators=True, ) class IfStatementSegment(BaseSegment): """IF...END IF statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#if """ type = "if_statement" match_grammar = Sequence( "IF", Ref("ExpressionSegment"), "THEN", Indent, Ref("IfStatementsSegment"), Dedent, AnyNumberOf( Sequence( "ELSEIF", Ref("ExpressionSegment"), "THEN", Indent, Ref("IfStatementsSegment"), Dedent, ), ), Sequence( "ELSE", Indent, Ref("IfStatementsSegment"), Dedent, optional=True, ), "END", "IF", ) class LoopStatementsSegment(BaseSegment): """Statements within a LOOP... END LOOP statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#loop """ type = "loop_statements" match_grammar = AnyNumberOf( Sequence( OneOf( Ref("StatementSegment"), Ref("MultiStatementSegment"), ), Ref("DelimiterGrammar"), ), terminators=[Sequence("END", "LOOP")], reset_terminators=True, ) class LoopStatementSegment(BaseSegment): """LOOP...END LOOP statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#loop """ type = "loop_statement" match_grammar = Sequence( "LOOP", Indent, Ref("LoopStatementsSegment"), Dedent, "END", "LOOP", ) class WhileStatementsSegment(BaseSegment): """Statements within a WHILE... END WHILE statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#while """ type = "while_statements" match_grammar = AnyNumberOf( Sequence( Ref("StatementSegment"), Ref("DelimiterGrammar"), ), terminators=[Sequence("END", "WHILE")], reset_terminators=True, ) class WhileStatementSegment(BaseSegment): """WHILE...END WHILE statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#while """ type = "while_statement" match_grammar = Sequence( "WHILE", Ref("ExpressionSegment"), "DO", Indent, Ref("WhileStatementsSegment"), Dedent, "END", "WHILE", ) class SelectClauseModifierSegment(ansi.SelectClauseModifierSegment): """Things that come after SELECT but before the columns.""" match_grammar = Sequence( # https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax OneOf("DISTINCT", "ALL", optional=True), Sequence("AS", OneOf("STRUCT", "VALUE"), optional=True), ) # BigQuery allows functions in INTERVAL class IntervalExpressionSegment(ansi.IntervalExpressionSegment): """An interval with a function as value segment.""" match_grammar = Sequence( "INTERVAL", Ref("ExpressionSegment"), OneOf( Ref("QuotedLiteralSegment"), Ref("DatetimeUnitSegment"), Sequence( Ref("DatetimeUnitSegment"), "TO", Ref("DatetimeUnitSegment"), ), ), ) bigquery_dialect.replace( QuotedIdentifierSegment=TypedParser( "back_quote", IdentifierSegment, type="quoted_identifier", trim_chars=("`",), casefold=str.upper, ), # Add ParameterizedSegment to the ansi NumericLiteralSegment NumericLiteralSegment=OneOf( TypedParser("numeric_literal", LiteralSegment, type="numeric_literal"), Ref("ParameterizedSegment"), ), QuotedLiteralSegment=OneOf( Ref("SingleQuotedLiteralSegment"), Ref("DoubleQuotedLiteralSegment"), ), # Add elements to the ansi LiteralGrammar LiteralGrammar=ansi_dialect.get_grammar("LiteralGrammar").copy( insert=[ Ref("ParameterizedSegment"), Ref("SystemVariableSegment"), ] ), PostTableExpressionGrammar=Sequence( Ref("ForSystemTimeAsOfSegment", optional=True), Sequence( "WITH", "OFFSET", Sequence("AS", Ref("SingleIdentifierGrammar"), optional=True), optional=True, ), ), FunctionNameIdentifierSegment=OneOf( # In BigQuery struct() and array() have a special syntax, # so we don't treat them as functions RegexParser( r"[A-Z_][A-Z0-9_]*", CodeSegment, type="function_name_identifier", anti_template=r"^(STRUCT|ARRAY)$", ), RegexParser( r"`[^`]*`", CodeSegment, type="function_name_identifier", ), ), ) class ExtractFunctionNameSegment(BaseSegment): """EXTRACT function name segment. Need to be able to specify this as type `function_name_identifier` within a `function_name` so that linting rules identify it properly. """ type = "function_name" match_grammar: Matchable = StringParser( "EXTRACT", CodeSegment, type="function_name_identifier", ) class ArrayFunctionNameSegment(BaseSegment): """ARRAY function name segment. Need to be able to specify this as type `function_name_identifier` within a `function_name` so that linting rules identify it properly. """ type = "function_name" match_grammar: Matchable = StringParser( "ARRAY", CodeSegment, type="function_name_identifier", ) class DatePartWeekSegment(BaseSegment): """WEEK() in EXTRACT, DATE_DIFF, DATE_TRUNC, LAST_DAY. https://cloud.google.com/bigquery/docs/reference/standard-sql/date_functions#extract https://cloud.google.com/bigquery/docs/reference/standard-sql/date_functions#date_diff https://cloud.google.com/bigquery/docs/reference/standard-sql/date_functions#date_trunc https://cloud.google.com/bigquery/docs/reference/standard-sql/date_functions#last_day """ type = "date_part_week" match_grammar: Matchable = Sequence( "WEEK", Bracketed( OneOf( "SUNDAY", "MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", ), ), ) class NormalizeFunctionNameSegment(BaseSegment): """NORMALIZE function name segment. Need to be able to specify this as type `function_name_identifier` within a `function_name` so that linting rules identify it properly. """ type = "function_name" match_grammar: Matchable = OneOf( StringParser( "NORMALIZE", CodeSegment, type="function_name_identifier", ), StringParser( "NORMALIZE_AND_CASEFOLD", CodeSegment, type="function_name_identifier", ), ) class FunctionNameSegment(ansi.FunctionNameSegment): """Describes the name of a function. This includes any prefix bits, e.g. project, schema or the SAFE keyword. """ match_grammar: Matchable = Sequence( # Project name, schema identifier, etc. AnyNumberOf( Sequence( # BigQuery Function names can be prefixed by the keyword SAFE to # return NULL instead of error. # https://cloud.google.com/bigquery/docs/reference/standard-sql/functions-reference#safe_prefix OneOf("SAFE", Ref("SingleIdentifierGrammar")), Ref("DotSegment"), ), terminators=[Ref("BracketedSegment")], ), # Base function name OneOf( Ref("FunctionNameIdentifierSegment"), Ref("QuotedIdentifierSegment"), terminators=[Ref("BracketedSegment")], ), # BigQuery allows whitespaces between the `.` of a function reference or # SAFE prefix. Keeping the explicit `allow_gaps=True` here to # make the distinction from `ansi.FunctionNameSegment` clear. allow_gaps=True, ) class DateTimeFunctionContentsSegment(ansi.DateTimeFunctionContentsSegment): """Datetime function contents segment.""" match_grammar = Sequence( Bracketed( Delimited( Ref("DatetimeUnitSegment"), Ref("DatePartWeekSegment"), Ref("FunctionContentsGrammar"), ), ) ) class ExtractFunctionContentsSegment(BaseSegment): """Extract Function contents.""" type = "function_contents" match_grammar = Sequence( Bracketed( OneOf( Ref("DatetimeUnitSegment"), Ref("DatePartWeekSegment"), Ref("ExtendedDatetimeUnitSegment"), ), "FROM", Ref("ExpressionSegment"), ), ) class NormalizeFunctionContentsSegment(BaseSegment): """Normalize Function Contents.""" type = "function_contents" match_grammar = Sequence( Bracketed( Ref("ExpressionSegment"), Sequence( Ref("CommaSegment"), OneOf("NFC", "NFKC", "NFD", "NFKD"), optional=True, ), ), ) class FunctionSegment(ansi.FunctionSegment): """A scalar or aggregate function. Maybe in the future we should distinguish between aggregate functions and other functions. For now we treat them the same because they look the same for our purposes. """ match_grammar = Sequence( OneOf( Sequence( # BigQuery EXTRACT allows optional TimeZone Ref("ExtractFunctionNameSegment"), Ref("ExtractFunctionContentsSegment"), ), Sequence( # BigQuery NORMALIZE allows optional normalization_mode # https://cloud.google.com/bigquery/docs/reference/standard-sql/functions-and-operators#normalize Ref("NormalizeFunctionNameSegment"), Ref("NormalizeFunctionContentsSegment"), ), Sequence( # Treat functions which take date parts separately # So those functions parse date parts as DatetimeUnitSegment # rather than identifiers. Ref( "DatePartFunctionNameSegment", exclude=Ref("ExtractFunctionNameSegment"), ), Ref("DateTimeFunctionContentsSegment"), ), Sequence( Sequence( Ref( "FunctionNameSegment", exclude=OneOf( Ref("DatePartFunctionNameSegment"), Ref("NormalizeFunctionNameSegment"), Ref("ValuesClauseSegment"), ), ), Ref("FunctionContentsSegment"), ), # Functions returning ARRAYS in BigQuery can have optional # Array Accessor clauses Ref("ArrayAccessorSegment", optional=True), # Functions returning STRUCTs in BigQuery can have the fields # elements referenced (e.g. ".a"), including wildcards (e.g. ".*") # or multiple nested fields (e.g. ".a.b", or ".a.b.c") Ref("SemiStructuredAccessorSegment", optional=True), Ref("PostFunctionGrammar", optional=True), ), ), allow_gaps=False, ) class FunctionDefinitionGrammar(ansi.FunctionDefinitionGrammar): """This is the body of a `CREATE FUNCTION AS` statement.""" match_grammar = Sequence( AnyNumberOf( Sequence( OneOf("DETERMINISTIC", Sequence("NOT", "DETERMINISTIC")), optional=True, ), Sequence( "LANGUAGE", Ref("NakedIdentifierSegment"), Sequence( "OPTIONS", Bracketed( Delimited( Sequence( Ref("ParameterNameSegment"), Ref("EqualsSegment"), Anything(), ), ) ), optional=True, ), ), # There is some syntax not implemented here, Sequence( "AS", OneOf( Ref("DoubleQuotedUDFBody"), Ref("SingleQuotedUDFBody"), Bracketed( OneOf(Ref("ExpressionSegment"), Ref("SelectStatementSegment")) ), ), ), Ref("OptionsSegment", optional=True), ) ) class WildcardExpressionSegment(ansi.WildcardExpressionSegment): """An extension of the star expression for Bigquery.""" match_grammar = ansi.WildcardExpressionSegment.match_grammar.copy( insert=[ # Optional EXCEPT or REPLACE clause # https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#select_replace Ref("ExceptClauseSegment", optional=True), Ref("ReplaceClauseSegment", optional=True), ] ) class ExceptClauseSegment(BaseSegment): """SELECT EXCEPT clause.""" type = "select_except_clause" match_grammar = Sequence( "EXCEPT", Bracketed(Delimited(Ref("SingleIdentifierGrammar"))), ) class TransactionStatementSegment(ansi.TransactionStatementSegment): """A `BEGIN`, `COMMIT`, or `ROLLBACK` statement.""" match_grammar = Sequence( OneOf("BEGIN", "COMMIT", "ROLLBACK"), Ref.keyword("TRANSACTION", optional=True), terminators=[Ref("DelimiterGrammar")], ) class BeginStatementSegment(BaseSegment): """A `BEGIN...EXCEPTION...END` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#beginexceptionend """ type = "begin_statement" match_grammar = Sequence( Sequence( Ref("SingleIdentifierFullGrammar"), Ref("ColonSegment"), optional=True ), "BEGIN", Sequence( Indent, AnyNumberOf( Sequence( OneOf(Ref("StatementSegment"), Ref("MultiStatementSegment")), Ref("DelimiterGrammar"), ), # We can't terminate on `END` due to possible nesting terminators=["EXCEPTION"], reset_terminators=True, min_times=1, ), Dedent, Sequence( "EXCEPTION", "WHEN", "ERROR", "THEN", Indent, AnyNumberOf( Sequence( OneOf(Ref("StatementSegment"), Ref("MultiStatementSegment")), Ref("DelimiterGrammar"), ), min_times=1, # We can't terminate on `END` due to possible nesting reset_terminators=True, ), Dedent, optional=True, ), optional=True, ), "END", Ref("SingleIdentifierFullGrammar", optional=True), ) class ReplaceClauseSegment(BaseSegment): """SELECT REPLACE clause.""" type = "select_replace_clause" match_grammar = Sequence( "REPLACE", Bracketed( Delimited( Sequence( Ref("BaseExpressionElementGrammar"), "AS", Ref("SingleIdentifierGrammar"), ) ) ), ) class DatatypeSegment(ansi.DatatypeSegment): """A data type segment. In particular here, this enabled the support for the STRUCT datatypes. """ match_grammar = OneOf( # Parameter type Sequence( Ref("DatatypeIdentifierSegment"), # Simple type # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#parameterized_data_types Ref("BracketedArguments", optional=True), ), Sequence("ANY", "TYPE"), # SQL UDFs can specify this "type" Ref("ArrayTypeSegment"), Ref("StructTypeSegment"), ) class StructTypeSegment(ansi.StructTypeSegment): """Expression to construct a STRUCT datatype.""" match_grammar = Sequence( "STRUCT", Ref("StructTypeSchemaSegment", optional=True), ) class StructTypeSchemaSegment(BaseSegment): """Expression to construct the schema of a STRUCT datatype.""" type = "struct_type_schema" match_grammar = Bracketed( Delimited( # Comma-separated list of field names/types Sequence( OneOf( # ParameterNames can look like Datatypes so can't use # Optional=True here and instead do a OneOf in order # with DataType only first, followed by both. Ref("DatatypeSegment"), Sequence( Ref("ParameterNameSegment"), Ref("DatatypeSegment"), ), ), AnyNumberOf(Ref("ColumnConstraintSegment")), Ref("OptionsSegment", optional=True), ), ), bracket_type="angle", bracket_pairs_set="angle_bracket_pairs", ) class ArrayFunctionContentsSegment(BaseSegment): """Array function contents.""" type = "function_contents" match_grammar = Sequence( Bracketed( Ref("SelectableGrammar"), ), ) class ArrayExpressionSegment(ansi.ArrayExpressionSegment): """Expression to construct a ARRAY from a subquery. https://cloud.google.com/bigquery/docs/reference/standard-sql/array_functions#array """ match_grammar = Sequence( Ref("ArrayFunctionNameSegment"), Ref("ArrayFunctionContentsSegment"), ) class NamedArgumentSegment(BaseSegment): """Named argument to a function. https://cloud.google.com/bigquery/docs/reference/standard-sql/geography_functions#st_geogfromgeojson """ type = "named_argument" match_grammar = Sequence( Ref("NakedIdentifierSegment"), Ref("RightArrowSegment"), Ref("ExpressionSegment"), ) class SemiStructuredAccessorSegment(BaseSegment): """A semi-structured data accessor segment.""" type = "semi_structured_expression" match_grammar = Sequence( AnyNumberOf( Sequence( Ref("DotSegment"), OneOf( Ref("SingleIdentifierGrammar"), Ref("StarSegment"), ), allow_gaps=True, ), Ref("ArrayAccessorSegment", optional=True), allow_gaps=True, min_times=1, ), allow_gaps=True, ) class SplittableObjectReferenceGrammar(ansi.ObjectReferenceSegment): """An extended object reference grammar for BigQuery. This class customizes the splitting of object references (such as table or column names) to handle BigQuery's syntax, where object names may be quoted and can refer to columns, tables, datasets, or projects. In BigQuery, object references can be multi-part (e.g., `project.dataset.table.column`) and may include quoted identifiers that contain keywords or special characters. """ @classmethod def _iter_reference_parts( cls, elem: RawSegment ) -> Generator[ansi.ObjectReferenceSegment.ObjectReferencePart, None, None]: """Extract the elements of a reference and yield.""" # trim on quotes and split out any dots. for part in elem.raw_trimmed().split("."): yield cls.ObjectReferencePart(part, [elem]) class ColumnReferenceSegment(SplittableObjectReferenceGrammar): """A reference to column, field or alias. We override this for BigQuery to allow keywords in structures (using Full segments) and to properly return references for objects. Ref: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical "A reserved keyword must be a quoted identifier if it is a standalone keyword or the first component of a path expression. It may be unquoted as the second or later component of a path expression." """ type = "column_reference" match_grammar: Matchable = Sequence( Ref("SingleIdentifierGrammar"), Sequence( Ref("ObjectReferenceDelimiterGrammar"), Delimited( Ref("SingleIdentifierFullGrammar"), delimiter=Ref("ObjectReferenceDelimiterGrammar"), terminators=[ "ON", "AS", "USING", Ref("CommaSegment"), Ref("CastOperatorSegment"), Ref("StartSquareBracketSegment"), Ref("StartBracketSegment"), Ref("BinaryOperatorGrammar"), Ref("ColonSegment"), Ref("DelimiterGrammar"), BracketedSegment, ], allow_gaps=False, ), allow_gaps=False, optional=True, ), allow_gaps=False, ) def extract_possible_references(self, level): """Extract possible references of a given level. Overrides the parent-class function. BigQuery's support for things like the following: - Functions that take a table as a parameter (e.g. TO_JSON_STRING) https://cloud.google.com/bigquery/docs/reference/standard-sql/ json_functions#to_json_string - STRUCT means that, without schema information (which SQLFluff does not have), references to data are often ambiguous. """ level = self._level_to_int(level) refs = list(self.iter_raw_references()) if level == self.ObjectReferenceLevel.SCHEMA.value and len(refs) >= 3: return [refs[0]] # pragma: no cover if level == self.ObjectReferenceLevel.TABLE.value: # One part: Could be a table, e.g. TO_JSON_STRING(t) # Two parts: Could be dataset.table or table.column. # Three parts: Could be table.column.struct or dataset.table.column. # Four parts: dataset.table.column.struct # Five parts: project.dataset.table.column.struct # So... return the first 3 parts. return refs[:3] if ( level == self.ObjectReferenceLevel.OBJECT.value and len(refs) >= 3 ): # pragma: no cover # Ambiguous case: The object (i.e. column) could be the first or # second part, so return both. return [refs[1], refs[2]] return super().extract_possible_references(level) # pragma: no cover def extract_possible_multipart_references(self, levels): """Extract possible multipart references, e.g. schema.table.""" levels_tmp = [self._level_to_int(level) for level in levels] min_level = min(levels_tmp) max_level = max(levels_tmp) refs = list(self.iter_raw_references()) if max_level == self.ObjectReferenceLevel.SCHEMA.value and len(refs) >= 3: return [tuple(refs[0 : max_level - min_level + 1])] # Note we aren't handling other possible cases. We'll add these as # needed. return super().extract_possible_multipart_references(levels) class TableReferenceSegment(SplittableObjectReferenceGrammar): """A reference to an object that may contain embedded hyphens.""" type = "table_reference" match_grammar: Matchable = Delimited( Sequence( Ref("SingleCSIdentifierGrammar"), AnyNumberOf( Sequence( Ref("DashSegment"), Ref("NakedCSIdentifierPart"), allow_gaps=False, ), optional=True, ), allow_gaps=False, ), delimiter=Ref("ObjectReferenceDelimiterGrammar"), terminators=[ "ON", "AS", "USING", Ref("CommaSegment"), Ref("CastOperatorSegment"), Ref("StartSquareBracketSegment"), Ref("StartBracketSegment"), Ref("ColonSegment"), Ref("DelimiterGrammar"), Ref("JoinLikeClauseGrammar"), BracketedSegment, ], allow_gaps=False, ) def iter_raw_references(self): """Generate a list of reference strings and elements. Each reference is an ObjectReferencePart. Overrides the base class because hyphens (DashSegment) causes one logical part of the name to be split across multiple elements, e.g. "table-a" is parsed as three segments. """ # For each descendant element, group them, using "dot" elements as a # delimiter. parts = [] elems_for_parts = [] def flush(): nonlocal parts, elems_for_parts result = self.ObjectReferencePart("".join(parts), elems_for_parts) parts = [] elems_for_parts = [] return result for elem in self.recursive_crawl( "identifier", "literal", "dash", "dot", "star" ): if not elem.is_type("dot"): if elem.is_type("identifier"): # Found an identifier (potentially with embedded dots). elem_subparts = elem.raw_trimmed().split(".") for idx, part in enumerate(elem_subparts): # Save each part of the segment. parts.append(part) elems_for_parts.append(elem) if idx != len(elem_subparts) - 1: # For each part except the last, flush. yield flush() else: # For non-identifier segments, save the whole segment. parts.append(elem.raw_trimmed()) elems_for_parts.append(elem) else: yield flush() # Flush any leftovers. if parts: yield flush() class SystemVariableSegment(BaseSegment): """BigQuery supports usage of system-level variables, which are prefixed with @@. These are also used in exception blocks in the @@error object. https://cloud.google.com/bigquery/docs/reference/system-variables https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#beginexceptionend """ type = "system_variable" match_grammar = Ref("DoubleAtSignLiteralSegment") class DeclareStatementSegment(BaseSegment): """Declaration of a variable. https://cloud.google.com/bigquery/docs/reference/standard-sql/scripting#declare """ type = "declare_segment" match_grammar = Sequence( "DECLARE", Delimited(Ref("SingleIdentifierFullGrammar")), OneOf( Ref("DefaultDeclareOptionsGrammar"), Sequence( Ref("DatatypeSegment"), Ref("DefaultDeclareOptionsGrammar", optional=True), ), ), ) class SetStatementSegment(BaseSegment): """Setting an already declared variable. https://cloud.google.com/bigquery/docs/reference/standard-sql/scripting#set """ type = "set_segment" match_grammar = Sequence( "SET", OneOf( Ref("NakedIdentifierSegment"), Bracketed(Delimited(Ref("NakedIdentifierSegment"))), Ref("SystemVariableSegment"), ), Ref("EqualsSegment"), Delimited( OneOf( Ref("LiteralGrammar"), Bracketed(Ref("SelectStatementSegment")), Ref("BareFunctionSegment"), Ref("FunctionSegment"), Bracketed( Delimited( OneOf( Ref("LiteralGrammar"), Bracketed(Ref("SelectStatementSegment")), Ref("BareFunctionSegment"), Ref("FunctionSegment"), ) ) ), Ref("ArrayLiteralSegment"), Ref("ExpressionSegment"), ), ), ) class ExecuteImmediateSegment(BaseSegment): """An EXECUTE IMMEDIATE statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#execute_immediate """ type = "execute_immediate" match_grammar = Sequence( "EXECUTE", "IMMEDIATE", OptionallyBracketed( OneOf( Ref("QuotedLiteralSegment"), # String Ref("SingleIdentifierFullGrammar"), # Variable Ref("FunctionSegment"), # Function Ref("CaseExpressionSegment"), # Conditional Expression Ref("ExpressionSegment"), # Expression Bracketed(Ref("SelectableGrammar")), # Expression Subquery ) ), Sequence("INTO", Delimited(Ref("SingleIdentifierFullGrammar")), optional=True), Sequence( "USING", Delimited( Sequence( Ref("BaseExpressionElementGrammar"), # The `AS` is required when using an alias in this context Sequence("AS", Ref("SingleIdentifierFullGrammar"), optional=True), ), ), optional=True, ), ) class PartitionBySegment(BaseSegment): """PARTITION BY partition_expression.""" type = "partition_by_segment" match_grammar = Sequence( "PARTITION", "BY", Ref("ExpressionSegment"), ) class ClusterBySegment(BaseSegment): """CLUSTER BY clustering_column_list.""" type = "cluster_by_segment" match_grammar = Sequence( "CLUSTER", "BY", Delimited(Ref("ExpressionSegment")), ) class DefaultCollateSegment(BaseSegment): """DEFAULT COLLATE clause for a table or a dataset. https://cloud.google.com/bigquery/docs/reference/standard-sql/collation-concepts#default_collation """ type = "default_collate" match_grammar: Matchable = Sequence( "DEFAULT", "COLLATE", Ref("LiteralGrammar"), ) class GrantToSegment(BaseSegment): """GRANT TO (grantee_list). https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_row_access_policy_statement """ type = "grant_to_segment" match_grammar: Matchable = Sequence( "GRANT", "TO", Bracketed( Delimited( Ref("QuotedLiteralSegment"), ), ), ) class OptionsSegment(BaseSegment): """OPTIONS clause for a table.""" type = "options_segment" match_grammar = Sequence( "OPTIONS", Bracketed( Delimited( # Table options Sequence( Ref("ParameterNameSegment"), Ref("EqualsSegment"), Ref("BaseExpressionElementGrammar"), ) ) ), ) class TableConstraintSegment(ansi.TableConstraintSegment): """A table constraint segment. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#table_constraints """ type = "table_constraint" match_grammar = OneOf( Sequence( Ref("PrimaryKeyGrammar"), Ref("BracketedColumnReferenceListGrammar"), "NOT", "ENFORCED", ), Sequence( Ref("ForeignKeyGrammar"), Ref("BracketedColumnReferenceListGrammar"), "REFERENCES", Ref("TableReferenceSegment"), Ref("BracketedColumnReferenceListGrammar"), "NOT", "ENFORCED", ), ) class ColumnDefinitionSegment(ansi.ColumnDefinitionSegment): """A column definition, e.g. for CREATE TABLE or ALTER TABLE. Override ANSI support to allow passing of column options """ match_grammar: Matchable = Sequence( Ref("SingleIdentifierGrammar"), # Column name Ref("DatatypeSegment"), # Column type AnyNumberOf(Ref("ColumnConstraintSegment")), Ref("OptionsSegment", optional=True), ) class ViewColumnDefinitionSegment(ansi.ColumnDefinitionSegment): """A column definition, for view_column_name_list of CREATE VIEW statement.""" match_grammar: Matchable = Sequence( Ref("SingleIdentifierGrammar"), # Column name Ref("OptionsSegment", optional=True), ) class CreateSchemaStatementSegment(ansi.CreateSchemaStatementSegment): """A `CREATE SCHEMA` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_schema_statement """ match_grammar: Matchable = Sequence( "CREATE", "SCHEMA", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), Ref("DefaultCollateSegment", optional=True), Ref("OptionsSegment", optional=True), ) class CreateTableFunctionStatementSegment(BaseSegment): """A `CREATE TABLE FUNCTION` statement.""" # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_table_function_statement type = "create_table_function_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "TABLE", "FUNCTION", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), # Column list for input parameters Sequence( Bracketed( Delimited( Ref("ColumnDefinitionSegment"), allow_trailing=True, optional=True, ) ), optional=True, ), # Column list for the schema of the table that the function returns Sequence( "RETURNS", "TABLE", Bracketed( Delimited( # Comma-separated list of field names/types Sequence( Ref("ParameterNameSegment"), Ref("DatatypeSegment"), ), ), bracket_type="angle", bracket_pairs_set="angle_bracket_pairs", ), optional=True, ), Sequence( "AS", OptionallyBracketed(Ref("SelectableGrammar")), ), ) class DropTableFunctionStatementSegment(BaseSegment): """A `DROP TABLE FUNCTION` statement.""" type = "drop_table_function_statement" match_grammar: Matchable = Sequence( "DROP", "TABLE", "FUNCTION", Ref("IfExistsGrammar", optional=True), Delimited(Ref("TableReferenceSegment")), ) class CreateTableStatementSegment(ansi.CreateTableStatementSegment): """`CREATE TABLE` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_table_statement """ match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Ref("TemporaryTransientGrammar", optional=True), "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), Sequence( OneOf("COPY", "LIKE", "CLONE"), Ref("TableReferenceSegment"), Ref("ForSystemTimeAsOfSegment", optional=True), optional=True, ), # Column list Sequence( Bracketed( Delimited( OneOf( Ref("ColumnDefinitionSegment"), Ref("TableConstraintSegment"), ), allow_trailing=True, ) ), optional=True, ), Ref("DefaultCollateSegment", optional=True), Ref("PartitionBySegment", optional=True), Ref("ClusterBySegment", optional=True), Ref("OptionsSegment", optional=True), # Create AS syntax: Sequence( "AS", OptionallyBracketed(Ref("SelectableGrammar")), optional=True, ), ) class AlterTableStatementSegment(ansi.AlterTableStatementSegment): """A `ALTER TABLE` statement.""" match_grammar = Sequence( "ALTER", "TABLE", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), OneOf( # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_table_set_options_statement # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_table_collate_statement Sequence( "SET", OneOf( Ref("OptionsSegment"), Ref("DefaultCollateSegment"), ), ), # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_table_add_column_statement Delimited( Sequence( "ADD", "COLUMN", Ref("IfNotExistsGrammar", optional=True), Ref("ColumnDefinitionSegment"), ), allow_trailing=True, ), # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_table_add_foreign_key_statement # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_table_add_primary_key_statement Delimited( OneOf( Sequence( "ADD", Sequence( "CONSTRAINT", Ref("IfNotExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), optional=True, ), "FOREIGN", "KEY", Bracketed( Delimited( Ref("SingleIdentifierGrammar"), ), ), "REFERENCES", Ref("TableReferenceSegment"), Bracketed( Delimited( Ref("SingleIdentifierGrammar"), ), ), "NOT", "ENFORCED", ), Sequence( "ADD", "PRIMARY", "KEY", Bracketed( Delimited( Ref("SingleIdentifierGrammar"), ), ), "NOT", "ENFORCED", ), ), allow_trailing=True, ), # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_table_rename_to_statement Sequence( "RENAME", "TO", Ref("TableReferenceSegment"), ), # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_table_rename_column_statement Delimited( Sequence( "RENAME", "COLUMN", Ref("IfExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), # Column name "TO", Ref("SingleIdentifierGrammar"), # Column name ), allow_trailing=True, ), # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_table_drop_column_statement Delimited( Sequence( "DROP", "COLUMN", Ref("IfExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), # Column name ), ), # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_table_drop_constraint_statement Delimited( Sequence( "DROP", "CONSTRAINT", Ref("IfExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), # Constraint name ), ), # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_table_drop_primary_key_statement Delimited( Sequence( "DROP", "PRIMARY", "KEY", Ref("IfExistsGrammar", optional=True), ), ), # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_column_set_options_statement Delimited( Sequence( "ALTER", "COLUMN", Ref("IfExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), # Column name OneOf( Sequence( "SET", OneOf( Ref("OptionsSegment"), Sequence( "DATA", "TYPE", Ref("DatatypeSegment"), ), Sequence( "DEFAULT", OneOf( Ref("LiteralGrammar"), Ref("FunctionSegment"), ), ), ), ), Sequence("DROP", OneOf("DEFAULT", Sequence("NOT", "NULL"))), ), ), ), ), ) class AlterSchemaStatementSegment(BaseSegment): """A `ALTER SCHEMA` statement.""" type = "alter_schema_statement" match_grammar: Matchable = Sequence( "ALTER", "SCHEMA", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), OneOf( Sequence( "SET", OneOf( # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_schema_collate_statement Ref("DefaultCollateSegment"), # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_schema_set_options_statement Ref("OptionsSegment"), ), ), # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_schema_add_replica_statement Sequence( "ADD", "REPLICA", Ref("BaseExpressionElementGrammar"), Ref("OptionsSegment", optional=True), ), # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_schema_drop_replica_statement Sequence( "DROP", "REPLICA", Ref("BaseExpressionElementGrammar"), ), ), ) class CreateExternalTableStatementSegment(BaseSegment): """A `CREATE EXTERNAL TABLE` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_external_table_statement """ type = "create_external_table_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "EXTERNAL", "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), Bracketed( Delimited( Ref("ColumnDefinitionSegment"), allow_trailing=True, ), optional=True, ), # Although not specified in the BigQuery documentation optional arguments for # CREATE EXTERNAL TABLE statements can be ordered arbitrarily. AnyNumberOf( # connection names have the same rules as table names in BigQuery Sequence("WITH", "CONNECTION", Ref("TableReferenceSegment"), optional=True), Sequence( "WITH", "PARTITION", "COLUMNS", Bracketed( Delimited( Ref("ColumnDefinitionSegment"), allow_trailing=True, ), optional=True, ), optional=True, ), Ref("OptionsSegment", optional=True), ), ) class CreateSnapshotTableStatementSegment(BaseSegment): """A `CREATE SNAPSHOT TABLE` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_snapshot_table_statement """ type = "create_snapshot_table_statement" match_grammar = Sequence( "CREATE", "SNAPSHOT", "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), "CLONE", Ref("TableReferenceSegment"), Ref("ForSystemTimeAsOfSegment", optional=True), Ref("OptionsSegment", optional=True), ) class CreateViewStatementSegment(ansi.CreateViewStatementSegment): """A `CREATE VIEW` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#view_option_list """ match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "VIEW", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), # Optional list of column names Bracketed( Delimited( Ref("ViewColumnDefinitionSegment"), ), optional=True, ), Ref("OptionsSegment", optional=True), "AS", OptionallyBracketed(Ref("SelectableGrammar")), ) class AlterViewStatementSegment(BaseSegment): """A `ALTER VIEW` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_view_set_options_statement https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_column_set_options_statement """ type = "alter_view_statement" match_grammar = Sequence( "ALTER", "VIEW", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), OneOf( Sequence( "SET", Ref("OptionsSegment"), ), Delimited( Sequence( "ALTER", "COLUMN", Ref("IfExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), # Column name "SET", Ref("OptionsSegment"), ), ), ), ) class CreateMaterializedViewStatementSegment(BaseSegment): """A `CREATE MATERIALIZED VIEW` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_materialized_view_statement """ type = "create_materialized_view_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "MATERIALIZED", "VIEW", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), Ref("PartitionBySegment", optional=True), Ref("ClusterBySegment", optional=True), Ref("OptionsSegment", optional=True), "AS", OptionallyBracketed(Ref("SelectableGrammar")), ) class CreateMaterializedViewAsReplicaOfStatementSegment(BaseSegment): """A `CREATE MATERIALIZED VIEW AS REPLICA OF` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_materialized_view_as_replica_of_statement """ type = "create_materialized_view_as_replica_of_statement" match_grammar = Sequence( "CREATE", "MATERIALIZED", "VIEW", Ref("TableReferenceSegment"), Ref("OptionsSegment", optional=True), "AS", "REPLICA", "OF", Ref("TableReferenceSegment"), ) class AlterMaterializedViewStatementSegment(BaseSegment): """A `ALTER MATERIALIZED VIEW SET OPTIONS` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_materialized_view_set_options_statement """ type = "alter_materialized_view_set_options_statement" match_grammar = Sequence( "ALTER", "MATERIALIZED", "VIEW", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), "SET", Ref("OptionsSegment"), ) class DropTableStatementSegment(BaseSegment): """A `DROP [SNAPSHOT | EXTERNAL] TABLE` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_table_statement https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_snapshot_table_statement https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_external_table_statement """ type = "drop_table_statement" match_grammar: Matchable = Sequence( "DROP", OneOf("SNAPSHOT", "EXTERNAL", optional=True), "TABLE", Ref("IfExistsGrammar", optional=True), Delimited(Ref("TableReferenceSegment")), ) class DropFunctionStatementSegment(BaseSegment): """A `DROP [TABLE] FUNCTION` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_function_statement https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_table_function """ type = "drop_function_statement" match_grammar = Sequence( "DROP", Sequence("TABLE", optional=True), "FUNCTION", Ref("IfExistsGrammar", optional=True), Ref("FunctionNameSegment"), ) class DropProcedureStatementSegment(BaseSegment): """A `DROP PROCEDURE` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_procedure_statement """ type = "drop_procedure_statement" match_grammar = Sequence( "DROP", "PROCEDURE", Ref("IfExistsGrammar", optional=True), Ref("ProcedureNameSegment"), ) class UndropSchemaStatementSegment(BaseSegment): """A `UNDROP SCHEMA` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#undrop_schema_statement """ type = "undrop_schema_statement" match_grammar: Matchable = Sequence( "UNDROP", "SCHEMA", Ref("IfNotExistsGrammar", optional=True), Ref("SchemaReferenceSegment"), ) class DropMaterializedViewStatementSegment(BaseSegment): """A `DROP MATERIALIZED VIEW` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_materialized_view_statement """ type = "drop_materialized_view_statement" match_grammar = Sequence( "DROP", "MATERIALIZED", "VIEW", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), ) class ParameterizedSegment(BaseSegment): """BigQuery allows named and argument based parameters to prevent SQL Injection. https://cloud.google.com/bigquery/docs/parameterized-queries """ type = "parameterized_expression" match_grammar = OneOf(Ref("AtSignLiteralSegment"), Ref("QuestionMarkSegment")) class PivotForClauseSegment(BaseSegment): """The FOR part of a PIVOT expression. Needed to avoid BaseExpressionElementGrammar swallowing up the IN part """ type = "pivot_for_clause" match_grammar = Sequence( Ref("BaseExpressionElementGrammar"), terminators=["IN"], parse_mode=ParseMode.GREEDY, ) class FromPivotExpressionSegment(BaseSegment): """A PIVOT expression. https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#pivot_operator """ type = "from_pivot_expression" match_grammar = Sequence( "PIVOT", Bracketed( Delimited( Sequence( Ref("FunctionSegment"), Ref("AliasExpressionSegment", optional=True), ), ), "FOR", Ref("PivotForClauseSegment"), "IN", Bracketed( Delimited( Sequence( Ref("LiteralGrammar"), Ref("AliasExpressionSegment", optional=True), ), ) ), ), ) class UnpivotAliasExpressionSegment(BaseSegment): """In BigQuery UNPIVOT alias's can be single or double quoted or numeric.""" type = "alias_expression" match_grammar = Sequence( Indent, Ref("AsAliasOperatorSegment", optional=True), OneOf( Ref("QuotedLiteralSegment"), Ref("NumericLiteralSegment"), ), Dedent, ) class FromUnpivotExpressionSegment(BaseSegment): """An UNPIVOT expression. https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#unpivot_operator """ type = "from_unpivot_expression" match_grammar = Sequence( "UNPIVOT", Sequence( OneOf("INCLUDE", "EXCLUDE"), "NULLS", optional=True, ), OneOf( # single column unpivot Bracketed( Ref("SingleIdentifierGrammar"), "FOR", Ref("SingleIdentifierGrammar"), "IN", Bracketed( Delimited( Sequence( Delimited(Ref("SingleIdentifierGrammar")), Ref("UnpivotAliasExpressionSegment", optional=True), ), ), ), ), # multi column unpivot Bracketed( Bracketed( Delimited( Ref("SingleIdentifierGrammar"), min_delimiters=1, ), ), "FOR", Ref("SingleIdentifierGrammar"), "IN", Bracketed( Delimited( Sequence( Bracketed( Delimited( Ref("SingleIdentifierGrammar"), min_delimiters=1, ), ), Ref("UnpivotAliasExpressionSegment", optional=True), ), ), ), ), ), ) class InsertStatementSegment(ansi.InsertStatementSegment): """A `INSERT` statement. N.B. not a complete implementation. """ match_grammar = Sequence( "INSERT", Ref.keyword("INTO", optional=True), Ref("TableReferenceSegment"), Ref("BracketedColumnReferenceListGrammar", optional=True), Ref("SelectableGrammar"), ) class SamplingExpressionSegment(ansi.SamplingExpressionSegment): """A sampling expression. https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#tablesample_operator """ match_grammar = Sequence( "TABLESAMPLE", "SYSTEM", Bracketed(Ref("NumericLiteralSegment"), "PERCENT") ) class MergeMatchSegment(ansi.MergeMatchSegment): """Contains BigQuery specific merge operations. Overriding ANSI to allow `NOT MATCHED BY SOURCE` statements """ type = "merge_match" match_grammar: Matchable = AnyNumberOf( Ref("MergeMatchedClauseSegment"), Ref("MergeNotMatchedByTargetClauseSegment"), Ref("MergeNotMatchedBySourceClauseSegment"), min_times=1, ) class MergeNotMatchedByTargetClauseSegment(ansi.MergeNotMatchedClauseSegment): """The `WHEN NOT MATCHED [BY TARGET]` clause within a `MERGE` statement. Overriding ANSI to allow optionally `NOT MATCHED [BY TARGET]` statements """ type = "not_matched_by_target_clause" match_grammar: Matchable = Sequence( "WHEN", "NOT", "MATCHED", Sequence("BY", "TARGET", optional=True), Sequence("AND", Ref("ExpressionSegment"), optional=True), "THEN", Indent, Ref("MergeInsertClauseSegment"), Dedent, ) class MergeNotMatchedBySourceClauseSegment(ansi.MergeMatchedClauseSegment): """The `WHEN MATCHED BY SOURCE` clause within a `MERGE` statement. It inherits from `ansi.MergeMatchedClauseSegment` because NotMatchedBySource clause is conceptually more close to a Matched clause than to NotMatched clause, i.e. it gets combined with an UPDATE or DELETE, not with an INSERT. """ type = "merge_when_matched_clause" match_grammar: Matchable = Sequence( "WHEN", "NOT", "MATCHED", "BY", "SOURCE", Sequence("AND", Ref("ExpressionSegment"), optional=True), "THEN", Indent, OneOf( Ref("MergeUpdateClauseSegment"), Ref("MergeDeleteClauseSegment"), ), Dedent, ) class MergeInsertClauseSegment(ansi.MergeInsertClauseSegment): """`INSERT` clause within the `MERGE` statement. Overriding ANSI to allow `INSERT ROW` statements """ match_grammar: Matchable = OneOf( Sequence( "INSERT", Indent, Ref("BracketedColumnReferenceListGrammar", optional=True), Dedent, Ref("ValuesClauseSegment", optional=True), ), Sequence("INSERT", "ROW"), ) class DeleteStatementSegment(BaseSegment): """A `DELETE` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/dml-syntax#delete_statement """ type = "delete_statement" # match grammar. This one makes sense in the context of knowing that it's # definitely a statement, we just don't know what type yet. match_grammar: Matchable = Sequence( "DELETE", Ref.keyword("FROM", optional=True), Ref("TableReferenceSegment"), Ref("AliasExpressionSegment", optional=True), Ref("WhereClauseSegment", optional=True), ) class ExportStatementSegment(BaseSegment): """`EXPORT` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/other-statements#export_data_statement """ type = "export_statement" match_grammar: Matchable = Sequence( "EXPORT", "DATA", Sequence("WITH", "CONNECTION", Ref("ObjectReferenceSegment"), optional=True), "OPTIONS", Bracketed( Delimited( # String options # Note: adding as own type, rather than keywords as convention with # Bigquery, as per the docs, is to put Keywords in uppercase, and these # in lowercase. Sequence( OneOf( StringParser( "compression", CodeSegment, type="export_option", ), StringParser( "field_delimiter", CodeSegment, type="export_option", ), StringParser( "format", CodeSegment, type="export_option", ), StringParser( "header", CodeSegment, type="export_option", ), StringParser( "overwrite", CodeSegment, type="export_option", ), StringParser( "uri", CodeSegment, type="export_option", ), StringParser( "use_avro_logical_types", CodeSegment, type="export_option", ), ), Ref("EqualsSegment"), Ref("BaseExpressionElementGrammar"), ), # Bool options # Note: adding as own type, rather than keywords as convention with # Bigquery, as per the docs, is to put Keywords in uppercase, and these # in lowercase. Sequence( OneOf( StringParser( "header", CodeSegment, type="export_option", ), StringParser( "overwrite", CodeSegment, type="export_option", ), StringParser( "use_avro_logical_types", CodeSegment, type="export_option", ), ), Ref("EqualsSegment"), OneOf("TRUE", "FALSE"), ), ), ), "AS", Ref("SelectableGrammar"), ) class LoadDataStatementSegment(BaseSegment): """`LOAD DATA` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/load-statements """ type = "load_data_statement" match_grammar: Matchable = Sequence( "LOAD", "DATA", OneOf("INTO", "OVERWRITE"), Sequence( Ref("TemporaryGrammar"), "TABLE", optional=True, ), Ref("TableReferenceSegment"), Sequence( Bracketed( Delimited( Ref("ColumnDefinitionSegment"), Ref("TableConstraintSegment"), allow_trailing=True, ) ), optional=True, ), Sequence( Sequence("OVERWRITE", optional=True), "PARTITIONS", Bracketed( Delimited( Sequence( Ref("ParameterNameSegment"), Ref("EqualsSegment"), Ref("BaseExpressionElementGrammar"), ) ) ), optional=True, ), Ref("PartitionBySegment", optional=True), Ref("ClusterBySegment", optional=True), Ref("OptionsSegment", optional=True), Sequence( "FROM", "FILES", Bracketed( Delimited( Sequence( Ref("ParameterNameSegment"), Ref("EqualsSegment"), Ref("BaseExpressionElementGrammar"), ) ) ), ), Sequence( "WITH", "PARTITION", "COLUMNS", Bracketed( Delimited( Sequence( Ref("SingleIdentifierGrammar"), # Column name Ref("DatatypeSegment"), ), allow_trailing=True, ), optional=True, ), optional=True, ), Sequence("WITH", "CONNECTION", Ref("ObjectReferenceSegment"), optional=True), ) class ProcedureNameSegment(BaseSegment): """Procedure name, including any prefix bits, e.g. project or schema.""" type = "procedure_name" match_grammar: Matchable = Sequence( # Project name, schema identifier, etc. AnyNumberOf( Sequence( Ref("SingleIdentifierGrammar"), Ref("DotSegment"), ), ), # Base procedure name OneOf( Ref("ProcedureNameIdentifierSegment"), Ref("QuotedIdentifierSegment"), ), allow_gaps=False, ) class ProcedureParameterListSegment(BaseSegment): """The parameters for a prcoedure ie. `(string, number)`.""" # Procedure parameter list (based on FunctionsParameterListGrammar) type = "procedure_parameter_list" match_grammar = Bracketed( Delimited( Ref("ProcedureParameterGrammar"), optional=True, ) ) class CreateProcedureStatementSegment(BaseSegment): """A `CREATE PROCEDURE` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_procedure """ type = "create_procedure_statement" match_grammar: Matchable = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "PROCEDURE", Ref("IfNotExistsGrammar", optional=True), Ref("ProcedureNameSegment"), Ref("ProcedureParameterListSegment"), Ref("OptionsSegment", optional=True), Ref("BeginStatementSegment", reset_terminators=True), ) class CallStatementSegment(BaseSegment): """A `CALL` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#call """ type = "call_statement" match_grammar: Matchable = Sequence( "CALL", Ref("ProcedureNameSegment"), Bracketed( Delimited( Ref("ExpressionSegment"), optional=True, ), ), ) class ReturnStatementSegment(BaseSegment): """A `RETURN` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#return """ type = "return_statement" match_grammar: Matchable = Sequence( "RETURN", ) class BreakStatementSegment(BaseSegment): """A `BREAK` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#break """ type = "break_statement" match_grammar: Matchable = Sequence( "BREAK", ) class LeaveStatementSegment(BaseSegment): """A `LEAVE` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#leave """ type = "leave_statement" match_grammar: Matchable = Sequence( "LEAVE", ) class ContinueStatementSegment(BaseSegment): """A `CONTINUE` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#continue """ type = "continue_statement" match_grammar: Matchable = OneOf( "CONTINUE", "ITERATE", ) class RaiseStatementSegment(BaseSegment): """A `RAISE` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#raise """ type = "raise_statement" match_grammar: Matchable = Sequence( "RAISE", Sequence( "USING", "MESSAGE", Ref("EqualsSegment"), Ref("ExpressionSegment"), optional=True, ), ) class AlterOrganizationStatementSegment(BaseSegment): """A `ALTER ORGANIZATION` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_organization_set_options_statement """ type = "alter_organization_statement" match_grammar: Matchable = Sequence( "ALTER", "ORGANIZATION", "SET", Ref("OptionsSegment"), ) class AlterProjectStatementSegment(BaseSegment): """A `ALTER PROJECT` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_project_set_options_statement """ type = "alter_project_statement" match_grammar: Matchable = Sequence( "ALTER", "PROJECT", Ref("TableReferenceSegment"), # project_id "SET", Ref("OptionsSegment"), ) class CreateSearchIndexStatementSegment(BaseSegment): """A `CREATE SEARCH INDEX` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_search_index_statement """ type = "create_search_index_statement" match_grammar: Matchable = Sequence( "CREATE", "SEARCH", "INDEX", Ref("IfNotExistsGrammar", optional=True), Ref("IndexReferenceSegment"), "ON", Ref("TableReferenceSegment"), Bracketed( OneOf( Sequence("ALL", "COLUMNS"), Delimited( Ref("IndexColumnDefinitionSegment"), ), ) ), Ref("OptionsSegment", optional=True), ) class DropSearchIndexStatementSegment(BaseSegment): """A `DROP SEARCH INDEX` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_search_index """ type = "drop_search_index_statement" match_grammar: Matchable = Sequence( "DROP", "SEARCH", "INDEX", Ref("IfExistsGrammar", optional=True), Ref("IndexReferenceSegment"), "ON", Ref("TableReferenceSegment"), ) class CreateVectorIndexStatementSegment(BaseSegment): """A `CREATE VECTOR INDEX` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_vector_index_statement """ type = "create_vector_index_statement" match_grammar: Matchable = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "VECTOR", "INDEX", Ref("IfNotExistsGrammar", optional=True), Ref("IndexReferenceSegment"), "ON", Ref("TableReferenceSegment"), Bracketed( Delimited( Ref("IndexColumnDefinitionSegment"), ), ), Ref("OptionsSegment"), ) class DropVectorIndexStatementSegment(BaseSegment): """A `DROP VECTOR INDEX` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_vector_index """ type = "drop_vector_index_statement" match_grammar: Matchable = Sequence( "DROP", "VECTOR", "INDEX", Ref("IfExistsGrammar", optional=True), Ref("IndexReferenceSegment"), "ON", Ref("TableReferenceSegment"), ) class CreateRowAccessPolicyStatementSegment(BaseSegment): """A `CREATE ROW ACCESS POLICY` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_row_access_policy_statement """ type = "create_row_access_policy_statement" match_grammar: Matchable = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "ROW", "ACCESS", "POLICY", Ref("IfNotExistsGrammar", optional=True), Ref("NakedIdentifierSegment"), # row_access_policy_name "ON", Ref("TableReferenceSegment"), Ref("GrantToSegment", optional=True), "FILTER", "USING", Bracketed( Ref("ExpressionSegment"), ), ) class DropRowAccessPolicyStatementSegment(BaseSegment): """A `DROP ROW ACCESS POLICY` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_row_access_policy_statement """ type = "drop_row_access_policy_statement" match_grammar: Matchable = Sequence( "DROP", "ROW", "ACCESS", "POLICY", Ref("IfExistsGrammar", optional=True), Ref("NakedIdentifierSegment"), "ON", Ref("TableReferenceSegment"), ) class AlterBiCapacityStatementSegment(BaseSegment): """A `ALTER BI_CAPACITY` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_bi_capacity_set_options_statement """ type = "alter_bi_capacity_statement" match_grammar: Matchable = Sequence( "ALTER", "BI_CAPACITY", Ref("TableReferenceSegment"), "SET", Ref("OptionsSegment"), ) class CreateCapacityStatementSegment(BaseSegment): """A `CREATE CAPACITY` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_capacity_statement """ type = "create_capacity_statement" match_grammar: Matchable = Sequence( "CREATE", "CAPACITY", Ref("TableReferenceSegment"), Ref("OptionsSegment"), ) class AlterCapacityStatementSegment(BaseSegment): """A `ALTER CAPACITY` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_capacity_set_options_statement """ type = "alter_capacity_statement" match_grammar: Matchable = Sequence( "ALTER", "CAPACITY", Ref("TableReferenceSegment"), "SET", Ref("OptionsSegment"), ) class DropCapacityStatementSegment(BaseSegment): """A `DROP CAPACITY` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_capacity_statement """ type = "drop_capacity_statement" match_grammar: Matchable = Sequence( "DROP", "CAPACITY", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), ) class CreateReservationStatementSegment(BaseSegment): """A `CREATE RESERVATION` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_reservation_statement """ type = "create_reservation_statement" match_grammar: Matchable = Sequence( "CREATE", "RESERVATION", Ref("TableReferenceSegment"), Ref("OptionsSegment"), ) class AlterReservationStatementSegment(BaseSegment): """A `ALTER RESERVATION` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_reservation_set_options_statement """ type = "alter_reservation_statement" match_grammar: Matchable = Sequence( "ALTER", "RESERVATION", Ref("TableReferenceSegment"), "SET", Ref("OptionsSegment"), ) class DropReservationStatementSegment(BaseSegment): """A `DROP RESERVATION` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_reservation_statement """ type = "drop_reservation_statement" match_grammar: Matchable = Sequence( "DROP", "RESERVATION", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), ) class CreateAssignmentStatementSegment(BaseSegment): """A `CREATE ASSIGNMENT` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_assignment_statement """ type = "create_assignment_statement" match_grammar: Matchable = Sequence( "CREATE", "ASSIGNMENT", Ref("TableReferenceSegment"), Ref("OptionsSegment"), ) class DropAssignmentStatementSegment(BaseSegment): """A `DROP ASSIGNMENT` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_assignment_statement """ type = "drop_assignment_statement" match_grammar: Matchable = Sequence( "DROP", "ASSIGNMENT", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), ) class PipeStatementSegment(BaseSegment): """A `PIPE` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/pipe-syntax """ type = "pipe_statement" match_grammar: Matchable = OneOf( Sequence( Ref("FromClauseSegment"), AnyNumberOf(Ref("PipeOperatorClauseSegment")), ), Sequence( Ref("SelectableGrammar"), Ref("AliasExpressionSegment", optional=True), AnyNumberOf(Ref("PipeOperatorClauseSegment"), min_times=1), ), ) class PipeOperatorClauseSegment(BaseSegment): """A `PIPE` operator clause. https://cloud.google.com/bigquery/docs/reference/standard-sql/pipe-syntax """ type = "pipe_operator_clause" match_grammar: Matchable = Sequence( Ref("PipeOperatorSegment"), OneOf( Ref("SelectClauseSegment"), Ref("ExtendClauseSegment"), Ref("SetClauseListSegment"), Ref("DropColumnClauseSegment"), Ref("RenameColumnClauseSegment"), Ref("AliasExpressionSegment"), Ref("WhereClauseSegment"), Ref("LimitClauseSegment"), Ref("OrderByClauseSegment"), Ref("AggregateClauseSegment"), Ref("SetOperatorClauseSegment"), Ref("JoinClauseSegment"), Ref("CallOperatorSegment"), Ref("SamplingExpressionSegment"), Ref("PivotOperatorSegment"), Ref("UnpivotOperatorSegment"), ), ) class ExtendClauseSegment(BaseSegment): """An `EXTEND` clause. https://cloud.google.com/bigquery/docs/reference/standard-sql/pipe-syntax """ type = "extend_clause" match_grammar: Matchable = Sequence( "EXTEND", Delimited( Sequence( Ref("BaseExpressionElementGrammar"), Ref("AliasExpressionSegment", optional=True), ), ), ) class DropColumnClauseSegment(BaseSegment): """A `DROP COLUMN` clause. https://cloud.google.com/bigquery/docs/reference/standard-sql/pipe-syntax """ type = "drop_column_clause" match_grammar: Matchable = Sequence( "DROP", Delimited(Ref("ColumnReferenceSegment")), ) class RenameColumnClauseSegment(BaseSegment): """A `RENAME COLUMN` clause. https://cloud.google.com/bigquery/docs/reference/standard-sql/pipe-syntax """ type = "rename_column_clause" match_grammar: Matchable = Sequence( "RENAME", Delimited( Sequence( Ref("ColumnReferenceSegment"), Ref("AliasExpressionSegment"), ), ), ) class GroupAndOrderByClauseSegment(BaseSegment): """A `GROUP [AND ORDER] BY` clause.""" type = "group_and_orderby_clause" match_grammar: Matchable = Sequence( "GROUP", Sequence("AND", "ORDER", optional=True), "BY", Indent, OneOf( "ALL", Ref("GroupingSetsClauseSegment"), Ref("CubeRollupClauseSegment"), # We could replace this next bit with a GroupingExpressionList # reference (renaming that to a more generic name), to avoid # repeating this bit of code, but I would rather keep it flat # to avoid changing regular `GROUP BY` clauses. Sequence( Delimited( Sequence( OneOf( Ref("ColumnReferenceSegment"), # Can `GROUP BY 1` Ref("NumericLiteralSegment"), # Can `GROUP BY coalesce(col, 1)` Ref("ExpressionSegment"), terminators=[Ref("GroupByClauseTerminatorGrammar")], ), Ref("AliasExpressionSegment", optional=True), Sequence( OneOf("ASC", "DESC"), Sequence("NULLS", OneOf("FIRST", "LAST"), optional=True), optional=True, ), ), ), ), ), Dedent, ) class AggregateClauseSegment(BaseSegment): """An `AGGREGATE` clause. https://cloud.google.com/bigquery/docs/reference/standard-sql/pipe-syntax """ type = "aggregate_clause" match_grammar: Matchable = Sequence( "AGGREGATE", Delimited( Sequence( Ref("BaseExpressionElementGrammar"), Ref("AliasExpressionSegment", optional=True), Sequence( OneOf("ASC", "DESC"), Sequence("NULLS", OneOf("FIRST", "LAST"), optional=True), optional=True, ), ), ), Ref("GroupAndOrderByClauseSegment", optional=True), ) class SetOperatorClauseSegment(BaseSegment): """A UNION, INTERSECT, or EXCEPT clause.""" type = "set_operator_clause" match_grammar: Matchable = Sequence( Ref("SetOperatorSegment"), Delimited(Ref("NonSetSelectableGrammar")), ) class CallOperatorSegment(BaseSegment): """A Call operator.""" type = "call_operator" match_grammar: Matchable = Sequence( Ref("CallStatementSegment"), Ref("AliasExpressionSegment", optional=True), ) class PivotOperatorSegment(BaseSegment): """A Pivot operator.""" type = "pivot_operator" match_grammar: Matchable = Sequence( Ref("FromPivotExpressionSegment"), Ref("AliasExpressionSegment", optional=True), ) class UnpivotOperatorSegment(BaseSegment): """An Unpivot operator.""" type = "unpivot_operator" match_grammar: Matchable = Sequence( Ref("FromUnpivotExpressionSegment"), Ref("AliasExpressionSegment", optional=True), ) sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_bigquery_keywords.py000066400000000000000000000050541503426445100253700ustar00rootroot00000000000000"""A list of all BigQuery SQL key words.""" # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords bigquery_reserved_keywords = """AGGREGATE ALL AND ANY ARRAY AS ASC ASSERT_ROWS_MODIFIED AT BETWEEN BY CASE CAST CLONE COLLATE CONTAINS CORRESPONDING CREATE CROSS CUBE CURRENT DEFAULT DEFINE DESC DISTINCT ELSE END ENUM ESCAPE EXCEPT EXCLUDE EXISTS EXTEND FALSE FETCH FOLLOWING FOR FROM FULL GROUP GROUPING GROUPS HASH HAVING IF IGNORE IN INCLUDE INNER INTERSECT INTERVAL INTO IS JOIN LATERAL LEFT LIKE LIMIT LOOKUP MERGE NEW NO NOT NULL NULLS OF ON OR ORDER OUTER OVER PARTITION PIVOT PRECEDING PRIMARY PROTO RANGE RECURSIVE RESPECT RIGHT ROLLUP ROWS SELECT SET SOME STRUCT TABLESAMPLE THEN TO TREAT TRUE UNBOUNDED UNION UNNEST UNPIVOT USING WHEN WHERE WINDOW WITH WITHIN""" # Note BigQuery doesn't have a list of Unreserved Keywords # so these are just ones we need to allow parsing to work bigquery_unreserved_keywords = """ACCESS ACCOUNT ADD ADMIN AFTER ALTER APPLY ASSIGNMENT ASSERT AUTO_INCREMENT BEGIN BERNOULLI BI_CAPACITY BINARY BINDING BREAK CACHE CALL CAPACITY CASCADE CHAIN CHARACTER CHECK CLONE CLUSTER COLUMN COLUMNS COMMENT COMMIT CONCURRENTLY CONTINUE CONNECT CONNECTION CONSTRAINT COPY CURRENT_USER CYCLE DATA DATABASE DATE DATETIME DECLARE DELETE DESCRIBE DETERMINISTIC DO DOMAIN DOUBLE DROP ELSEIF ENFORCED ERROR EXCEPTION EXECUTE EXECUTION EXPLAIN EXPORT EXTENSION EXTERNAL FILE FILES FILTER FIRST FOREIGN FORMAT FRIDAY FUNCTION FUTURE GRANT GRANTED GRANTS HOUR ILIKE IMMEDIATE IMPORTED IN INCREMENT INDEX INOUT INSERT INTEGRATION KEY ITERATE LANGUAGE LARGE LAST LEAVE LOAD LOOP MANAGE MASKING MATCHED MATERIALIZED MAX MAXVALUE MESSAGE MIN MINUS MINVALUE ML MODEL MODIFY MONDAY MONITOR NAME NAN NFC NFKC NFD NFKD NOCACHE NOCYCLE NOORDER OBJECT OFFSET OPERATE OPTION OPTIONS ORDINAL ORGANIZATION OUT OVERLAPS OVERWRITE OWNERSHIP PARTITIONS PERCENT PIPE POLICY PRECISION PRIMARY PRIOR PRIVILEGES PROCEDURE PROJECT PUBLIC QUALIFY QUARTER RAISE READ REFERENCE_USAGE REFERENCES RENAME REPEAT REPEATABLE REPLACE REPLICA RESERVATION RESOURCE RESTRICT RETURN RETURNS REVOKE RLIKE ROLE ROLLBACK ROW ROUTINE SAFE SATURDAY SCHEMA SCHEMAS SEARCH SECOND SEPARATOR SERVER SEQUENCE SESSION_USER SETS SHARE SNAPSHOT SOURCE STAGE START STREAM STRICT SUNDAY SYSTEM SYSTEM_TIME TABLE TABLESPACE TARGET TASK TEMP TEMPORARY THURSDAY TIME TIMESTAMP TRANSACTION TRANSIENT TRIGGER TRUNCATE TUESDAY TYPE UNDROP UNIQUE UNSIGNED UNTIL UPDATE USAGE USE USE_ANY_ROLE USER VALUE VALUES VARYING VECTOR VERSION VIEW WAREHOUSE WEDNESDAY WEEK WHILE WITHOUT WORK WRAPPER WRITE ZONE""" sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_clickhouse.py000066400000000000000000002044611503426445100237460ustar00rootroot00000000000000"""The clickhouse dialect. https://clickhouse.com/ """ from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( AnyNumberOf, AnySetOf, BaseSegment, Bracketed, CodeSegment, Conditional, Dedent, Delimited, IdentifierSegment, ImplicitIndent, Indent, LiteralSegment, Matchable, Nothing, OneOf, OptionallyBracketed, ParseMode, Ref, RegexLexer, RegexParser, SegmentGenerator, Sequence, StringLexer, StringParser, SymbolSegment, TypedParser, ) from sqlfluff.dialects import dialect_ansi as ansi from sqlfluff.dialects.dialect_clickhouse_keywords import ( FORMAT_KEYWORDS, UNRESERVED_KEYWORDS, ) ansi_dialect = load_raw_dialect("ansi") clickhouse_dialect = ansi_dialect.copy_as( "clickhouse", formatted_name="ClickHouse", docstring="""**Default Casing**: Clickhouse is case sensitive throughout, regardless of quoting. An unquoted reference to an object using the wrong case will raise an :code:`UNKNOWN_IDENTIFIER` error. **Quotes**: String Literals: ``''``, Identifiers: ``""`` or |back_quotes|. Note as above, that because identifiers are always resolved case sensitively, the only reason for quoting identifiers is when they contain invalid characters or reserved keywords. The dialect for `ClickHouse `_.""", ) clickhouse_dialect.sets("unreserved_keywords").update(UNRESERVED_KEYWORDS) clickhouse_dialect.insert_lexer_matchers( # https://clickhouse.com/docs/en/sql-reference/functions#higher-order-functions---operator-and-lambdaparams-expr-function [StringLexer("lambda", r"->", SymbolSegment)], before="newline", ) clickhouse_dialect.patch_lexer_matchers( [ RegexLexer( "double_quote", r'"([^"\\]|""|\\.)*"', CodeSegment, segment_kwargs={ "quoted_value": (r'"((?:[^"\\]|""|\\.)*)"', 1), "escape_replacements": [(r'(""|\\")', '"')], }, ), RegexLexer( "back_quote", r"`(?:[^`\\]|``|\\.)*`", CodeSegment, segment_kwargs={ "quoted_value": (r"`((?:[^`\\]|``|\\.)*)`", 1), "escape_replacements": [(r"(``|\\`)", "`")], }, ), ] ) clickhouse_dialect.add( BackQuotedIdentifierSegment=TypedParser( "back_quote", IdentifierSegment, type="quoted_identifier", ), LambdaFunctionSegment=TypedParser("lambda", SymbolSegment, type="lambda"), ) clickhouse_dialect.replace( BinaryOperatorGrammar=OneOf( Ref("ArithmeticBinaryOperatorGrammar"), Ref("StringBinaryOperatorGrammar"), Ref("BooleanBinaryOperatorGrammar"), Ref("ComparisonOperatorGrammar"), # Add Lambda Function Ref("LambdaFunctionSegment"), ), # https://clickhouse.com/docs/en/sql-reference/statements/select/join/#supported-types-of-join JoinTypeKeywordsGrammar=Sequence( Ref.keyword("GLOBAL", optional=True), OneOf( # This case INNER [ANY,ALL] JOIN Sequence("INNER", OneOf("ALL", "ANY", optional=True)), # This case [ANY,ALL] INNER JOIN Sequence(OneOf("ALL", "ANY", optional=True), "INNER"), # This case FULL ALL OUTER JOIN Sequence( "FULL", Ref.keyword("ALL", optional=True), Ref.keyword("OUTER", optional=True), ), # This case ALL FULL OUTER JOIN Sequence( Ref.keyword("ALL", optional=True), "FULL", Ref.keyword("OUTER", optional=True), ), # This case LEFT [OUTER,ANTI,SEMI,ANY,ASOF] JOIN Sequence( "LEFT", OneOf( "ANTI", "SEMI", OneOf("ANY", "ALL", optional=True), "ASOF", optional=True, ), Ref.keyword("OUTER", optional=True), ), # This case [ANTI,SEMI,ANY,ASOF] LEFT JOIN Sequence( OneOf( "ANTI", "SEMI", OneOf("ANY", "ALL", optional=True), "ASOF", ), "LEFT", ), # This case RIGHT [OUTER,ANTI,SEMI,ANY,ASOF] JOIN Sequence( "RIGHT", OneOf( "OUTER", "ANTI", "SEMI", OneOf("ANY", "ALL", optional=True), optional=True, ), Ref.keyword("OUTER", optional=True), ), # This case [OUTER,ANTI,SEMI,ANY] RIGHT JOIN Sequence( OneOf( "ANTI", "SEMI", OneOf("ANY", "ALL", optional=True), optional=True, ), "RIGHT", ), # This case ASOF JOIN "ASOF", # This case ANY JOIN "ANY", # This case ALL JOIN "ALL", ), ), JoinUsingConditionGrammar=Sequence( "USING", Conditional(Indent, indented_using_on=False), Delimited( OneOf( Bracketed( Delimited(Ref("SingleIdentifierGrammar")), parse_mode=ParseMode.GREEDY, ), Delimited(Ref("SingleIdentifierGrammar")), ), ), Conditional(Dedent, indented_using_on=False), ), ConditionalCrossJoinKeywordsGrammar=Nothing(), UnconditionalCrossJoinKeywordsGrammar=Sequence( Ref.keyword("GLOBAL", optional=True), Ref.keyword("CROSS"), ), HorizontalJoinKeywordsGrammar=Sequence( Ref.keyword("GLOBAL", optional=True), Ref.keyword("PASTE"), ), NaturalJoinKeywordsGrammar=Nothing(), JoinLikeClauseGrammar=Sequence( AnyNumberOf( Ref("ArrayJoinClauseSegment"), min_times=1, ), Ref("AliasExpressionSegment", optional=True), ), QuotedLiteralSegment=OneOf( TypedParser( "single_quote", LiteralSegment, type="quoted_literal", ), TypedParser( "dollar_quote", LiteralSegment, type="quoted_literal", ), ), # Drop casefold from ANSI, clickhouse is always case sensitive, even when # unquoted. NakedIdentifierSegment=SegmentGenerator( # Generate the anti template from the set of reserved keywords lambda dialect: RegexParser( r"[a-zA-Z_][0-9a-zA-Z_]*", IdentifierSegment, type="naked_identifier", anti_template=r"^(" + r"|".join(dialect.sets("reserved_keywords")) + r")$", ) ), SingleIdentifierGrammar=OneOf( Ref("NakedIdentifierSegment"), Ref("QuotedIdentifierSegment"), Ref("SingleQuotedIdentifierSegment"), Ref("BackQuotedIdentifierSegment"), ), InOperatorGrammar=Sequence( Ref.keyword("GLOBAL", optional=True), Ref.keyword("NOT", optional=True), "IN", OneOf( Ref("FunctionSegment"), # E.g. IN tuple(1, 2) Ref("ArrayLiteralSegment"), # E.g. IN [1, 2] Ref("TupleSegment"), # E.g. IN (1, 2) Ref("SingleIdentifierGrammar"), # E.g. IN TABLE, IN CTE Bracketed( OneOf( Delimited( Ref("Expression_A_Grammar"), ), Ref("SelectableGrammar"), ), parse_mode=ParseMode.GREEDY, ), ), ), SelectClauseTerminatorGrammar=ansi_dialect.get_grammar( "SelectClauseTerminatorGrammar" ).copy( insert=[ Ref.keyword("PREWHERE"), Ref.keyword("INTO"), Ref.keyword("FORMAT"), ], before=Ref.keyword("WHERE"), ), FromClauseTerminatorGrammar=ansi_dialect.get_grammar("FromClauseTerminatorGrammar") .copy( insert=[ Ref.keyword("PREWHERE"), Ref.keyword("INTO"), Ref.keyword("FORMAT"), ], before=Ref.keyword("WHERE"), ) .copy(insert=[Ref("SettingsClauseSegment")]), DateTimeLiteralGrammar=Sequence( OneOf("DATE", "TIME", "TIMESTAMP"), TypedParser("single_quote", LiteralSegment, type="date_constructor_literal"), ), AlterTableDropColumnGrammar=Sequence( Ref("OnClusterClauseSegment", optional=True), "DROP", Ref.keyword("COLUMN"), Ref("IfExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), ), ) # Set the datetime units clickhouse_dialect.sets("datetime_units").clear() clickhouse_dialect.sets("datetime_units").update( [ # https://github.com/ClickHouse/ClickHouse/blob/1cdccd527f0cbf5629b21d29970e28d5156003dc/src/Parsers/parseIntervalKind.cpp#L8 "NANOSECOND", "NANOSECONDS", "SQL_TSI_NANOSECOND", "NS", "MICROSECOND", "MICROSECONDS", "SQL_TSI_MICROSECOND", "MCS", "MILLISECOND", "MILLISECONDS", "SQL_TSI_MILLISECOND", "MS", "SECOND", "SECONDS", "SQL_TSI_SECOND", "SS", "S", "MINUTE", "MINUTES", "SQL_TSI_MINUTE", "MI", "N", "HOUR", "HOURS", "SQL_TSI_HOUR", "HH", "H", "DAY", "DAYS", "SQL_TSI_DAY", "DD", "D", "WEEK", "WEEKS", "SQL_TSI_WEEK", "WK", "WW", "MONTH", "MONTHS", "SQL_TSI_MONTH", "MM", "M", "QUARTER", "QUARTERS", "SQL_TSI_QUARTER", "QQ", "Q", "YEAR", "YEARS", "SQL_TSI_YEAR", "YYYY", "YY", ] ) class IntoOutfileClauseSegment(BaseSegment): """An `INTO OUTFILE` clause like in `SELECT`.""" type = "into_outfile_clause" match_grammar: Matchable = Sequence( "INTO", "OUTFILE", Ref("QuotedLiteralSegment"), Ref("FormatClauseSegment", optional=True), ) class FormatClauseSegment(BaseSegment): """A `FORMAT` clause like in `SELECT`.""" type = "format_clause" match_grammar: Matchable = Sequence( "FORMAT", OneOf(*[Ref.keyword(allowed_format) for allowed_format in FORMAT_KEYWORDS]), Ref("SettingsClauseSegment", optional=True), ) class MergeTreesOrderByClauseSegment(BaseSegment): """A `ORDER BY` clause for the MergeTree family engine.""" type = "merge_tree_order_by_clause" match_grammar: Matchable = Sequence( "ORDER", "BY", OneOf( Sequence( "TUPLE", Bracketed(), # tuple() not tuple ), Bracketed( Delimited( Ref("ColumnReferenceSegment"), Ref("ExpressionSegment"), ), ), Ref("ColumnReferenceSegment"), ), ) class PreWhereClauseSegment(BaseSegment): """A `PREWHERE` clause like in `SELECT` or `INSERT`.""" type = "prewhere_clause" match_grammar: Matchable = Sequence( "PREWHERE", # NOTE: The indent here is implicit to allow # constructions like: # # PREWHERE a # AND b # # to be valid without forcing an indent between # "PREWHERE" and "a". ImplicitIndent, OptionallyBracketed(Ref("ExpressionSegment")), Dedent, ) class SettingsClauseSegment(BaseSegment): """A `SETTINGS` clause for engines or query-level settings.""" type = "settings_clause" match_grammar: Matchable = Sequence( "SETTINGS", Delimited( Sequence( Ref("NakedIdentifierSegment"), Ref("EqualsSegment"), OneOf( Ref("NakedIdentifierSegment"), Ref("NumericLiteralSegment"), Ref("QuotedLiteralSegment"), Ref("BooleanLiteralGrammar"), ), optional=True, ), ), optional=True, ) class SelectStatementSegment(ansi.SelectStatementSegment): """Enhance `SELECT` statement to include QUALIFY.""" match_grammar = ansi.SelectStatementSegment.match_grammar.copy( insert=[Ref("PreWhereClauseSegment", optional=True)], before=Ref("WhereClauseSegment", optional=True), ).copy( insert=[ Ref("FormatClauseSegment", optional=True), Ref("IntoOutfileClauseSegment", optional=True), Ref("SettingsClauseSegment", optional=True), ], ) class UnorderedSelectStatementSegment(ansi.UnorderedSelectStatementSegment): """Enhance unordered `SELECT` statement to include QUALIFY.""" match_grammar = ansi.UnorderedSelectStatementSegment.match_grammar.copy( insert=[Ref("PreWhereClauseSegment", optional=True)], before=Ref("WhereClauseSegment", optional=True), ) class WithFillSegment(ansi.WithFillSegment): """Enhances `ORDER BY` clauses to include WITH FILL. https://clickhouse.com/docs/en/sql-reference/statements/select/order-by#order-by-expr-with-fill-modifier """ match_grammar: Matchable = Sequence( "WITH", "FILL", Sequence("FROM", Ref("ExpressionSegment"), optional=True), Sequence("TO", Ref("ExpressionSegment"), optional=True), Sequence( "STEP", OneOf( Ref("NumericLiteralSegment"), Ref("IntervalExpressionSegment"), ), optional=True, ), ) class BracketedArguments(ansi.BracketedArguments): """A series of bracketed arguments. e.g. the bracketed part of numeric(1, 3) """ match_grammar = Bracketed( Delimited( OneOf( # Dataypes like Nullable allow optional datatypes here. Ref("DatatypeSegment"), ), # The brackets might be empty for some cases... optional=True, ), ) class DatatypeSegment(BaseSegment): """Support complex Clickhouse data types. Complex data types are typically used in either DDL statements or as the target type in casts. """ type = "data_type" match_grammar = OneOf( # Nullable(Type) Sequence( StringParser("NULLABLE", CodeSegment, type="data_type_identifier"), Bracketed(Ref("DatatypeSegment")), ), # LowCardinality(Type) Sequence( StringParser("LOWCARDINALITY", CodeSegment, type="data_type_identifier"), Bracketed(Ref("DatatypeSegment")), ), # DateTime64(precision, 'timezone') Sequence( StringParser("DATETIME64", CodeSegment, type="data_type_identifier"), Bracketed( Delimited( OneOf( Ref("NumericLiteralSegment"), # precision Ref("QuotedLiteralSegment"), # timezone ), delimiter=Ref("CommaSegment"), optional=True, ) ), ), # DateTime('timezone') Sequence( StringParser("DATETIME", CodeSegment, type="data_type_identifier"), Bracketed( Ref("QuotedLiteralSegment"), # timezone optional=True, ), ), # FixedString(length) Sequence( StringParser("FIXEDSTRING", CodeSegment, type="data_type_identifier"), Bracketed(Ref("NumericLiteralSegment")), # length ), # Array(Type) Sequence( StringParser("ARRAY", CodeSegment, type="data_type_identifier"), Bracketed(Ref("DatatypeSegment")), ), # Map(KeyType, ValueType) Sequence( StringParser("MAP", CodeSegment, type="data_type_identifier"), Bracketed( Delimited( Ref("DatatypeSegment"), delimiter=Ref("CommaSegment"), ) ), ), # Tuple(Type1, Type2) or Tuple(name1 Type1, name2 Type2) Sequence( StringParser("TUPLE", CodeSegment, type="data_type_identifier"), Bracketed( Delimited( OneOf( # Named tuple element: name Type Sequence( OneOf( Ref("SingleIdentifierGrammar"), Ref("QuotedIdentifierSegment"), ), Ref("DatatypeSegment"), ), # Regular tuple element: just Type Ref("DatatypeSegment"), ), delimiter=Ref("CommaSegment"), ) ), ), # Nested(name1 Type1, name2 Type2) Sequence( StringParser("NESTED", CodeSegment, type="data_type_identifier"), Bracketed( Delimited( Sequence( Ref("SingleIdentifierGrammar"), Ref("DatatypeSegment"), ), delimiter=Ref("CommaSegment"), ) ), ), # JSON data type StringParser("JSON", CodeSegment, type="data_type_identifier"), # Enum8('val1' = 1, 'val2' = 2) Sequence( OneOf( StringParser("ENUM8", CodeSegment, type="data_type_identifier"), StringParser("ENUM16", CodeSegment, type="data_type_identifier"), ), Bracketed( Delimited( Sequence( Ref("QuotedLiteralSegment"), Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), delimiter=Ref("CommaSegment"), ) ), ), # double args Sequence( OneOf( StringParser("DECIMAL", CodeSegment, type="data_type_identifier"), StringParser("NUMERIC", CodeSegment, type="data_type_identifier"), ), Ref("BracketedArguments", optional=True), ), # single args Sequence( OneOf( StringParser("DECIMAL32", CodeSegment, type="data_type_identifier"), StringParser("DECIMAL64", CodeSegment, type="data_type_identifier"), StringParser("DECIMAL128", CodeSegment, type="data_type_identifier"), StringParser("DECIMAL256", CodeSegment, type="data_type_identifier"), ), Bracketed(Ref("NumericLiteralSegment")), # scale ), Ref("TupleTypeSegment"), Ref("DatatypeIdentifierSegment"), Ref("NumericLiteralSegment"), Sequence( StringParser("DATETIME64", CodeSegment, type="data_type_identifier"), Bracketed( Delimited( Ref("NumericLiteralSegment"), # precision Ref("QuotedLiteralSegment", optional=True), # timezone # The brackets might be empty as well optional=True, ), optional=True, ), ), Sequence( StringParser("ARRAY", CodeSegment, type="data_type_identifier"), Bracketed(Ref("DatatypeSegment")), ), ) class TupleTypeSegment(ansi.StructTypeSegment): """Expression to construct a Tuple datatype.""" match_grammar = Sequence( "TUPLE", Ref("TupleTypeSchemaSegment"), # Tuple() can't be empty ) class TupleTypeSchemaSegment(BaseSegment): """Expression to construct the schema of a Tuple datatype.""" type = "tuple_type_schema" match_grammar = Bracketed( Delimited( Sequence( Ref("SingleIdentifierGrammar"), Ref("DatatypeSegment"), ), bracket_pairs_set="bracket_pairs", ), bracket_pairs_set="bracket_pairs", bracket_type="round", ) class ArrayJoinClauseSegment(BaseSegment): """[LEFT] ARRAY JOIN does not support Join conditions and doesn't work as real JOIN. https://clickhouse.com/docs/en/sql-reference/statements/select/array-join """ type = "array_join_clause" match_grammar: Matchable = Sequence( Ref.keyword("LEFT", optional=True), "ARRAY", Ref("JoinKeywordsGrammar"), Indent, Delimited( Ref("SelectClauseElementSegment"), ), Dedent, ) class CTEDefinitionSegment(ansi.CTEDefinitionSegment): """A CTE Definition from a WITH statement. Overridden from ANSI to allow expression CTEs. https://clickhouse.com/docs/en/sql-reference/statements/select/with/ """ type = "common_table_expression" match_grammar: Matchable = OneOf( Sequence( Ref("SingleIdentifierGrammar"), Ref("CTEColumnList", optional=True), "AS", Bracketed( # Ephemeral here to subdivide the query. Ref("SelectableGrammar"), parse_mode=ParseMode.GREEDY, ), ), Sequence( Ref("ExpressionSegment"), "AS", Ref("SingleIdentifierGrammar"), ), ) class AliasExpressionSegment(ansi.AliasExpressionSegment): """A reference to an object with an `AS` clause.""" type = "alias_expression" match_grammar: Matchable = Sequence( Indent, Ref("AsAliasOperatorSegment", optional=True), OneOf( Sequence( Ref("SingleIdentifierGrammar"), # Column alias in VALUES clause Bracketed(Ref("SingleIdentifierListSegment"), optional=True), ), Ref("SingleQuotedIdentifierSegment"), exclude=OneOf( "LATERAL", "WINDOW", "KEYS", ), ), Dedent, ) class WildcardExpressionSegment(ansi.WildcardExpressionSegment): """An extension of the star expression for Clickhouse.""" match_grammar = ansi.WildcardExpressionSegment.match_grammar.copy( insert=[ Ref("ExceptClauseSegment", optional=True), ] ) class ExceptClauseSegment(BaseSegment): """A Clickhouse SELECT EXCEPT clause. https://clickhouse.com/docs/en/sql-reference/statements/select#except """ type = "select_except_clause" match_grammar = Sequence( "EXCEPT", OneOf( Bracketed(Delimited(Ref("SingleIdentifierGrammar"))), Ref("SingleIdentifierGrammar"), ), ) class SelectClauseModifierSegment(ansi.SelectClauseModifierSegment): """Things that come after SELECT but before the columns. Overridden from ANSI to allow DISTINCT ON () https://clickhouse.com/docs/en/sql-reference/statements/select/distinct """ match_grammar = OneOf( Sequence( "DISTINCT", Sequence( "ON", Bracketed(Delimited(Ref("ExpressionSegment"))), optional=True, ), ), "ALL", ) class FromExpressionElementSegment(ansi.FromExpressionElementSegment): """A table expression. Overridden from ANSI to allow FINAL modifier. https://clickhouse.com/docs/en/sql-reference/statements/select/from#final-modifier """ type = "from_expression_element" match_grammar: Matchable = Sequence( Ref("PreTableFunctionKeywordsGrammar", optional=True), OptionallyBracketed(Ref("TableExpressionSegment")), Ref( "AliasExpressionSegment", exclude=OneOf( Ref("FromClauseTerminatorGrammar"), Ref("SamplingExpressionSegment"), Ref("JoinLikeClauseGrammar"), "FINAL", Ref("JoinClauseSegment"), ), optional=True, ), Ref.keyword("FINAL", optional=True), # https://cloud.google.com/bigquery/docs/reference/standard-sql/arrays#flattening_arrays Sequence("WITH", "OFFSET", Ref("AliasExpressionSegment"), optional=True), Ref("SamplingExpressionSegment", optional=True), Ref("PostTableExpressionGrammar", optional=True), ) class TableEngineFunctionSegment(BaseSegment): """A ClickHouse `ENGINE` clause function. With this segment we attempt to match all possible engines. """ type = "table_engine_function" match_grammar: Matchable = Sequence( Sequence( Ref( "FunctionNameSegment", exclude=OneOf( Ref("DatePartFunctionNameSegment"), Ref("ValuesClauseSegment"), ), ), Ref("FunctionContentsSegment", optional=True), ), ) class OnClusterClauseSegment(BaseSegment): """A `ON CLUSTER` clause.""" type = "on_cluster_clause" match_grammar = Sequence( "ON", "CLUSTER", OneOf( Ref("SingleIdentifierGrammar"), # Support for placeholders like '{cluster}' Ref("QuotedLiteralSegment"), ), ) class TableEngineSegment(BaseSegment): """An `ENGINE` used in `CREATE TABLE`.""" type = "engine" match_grammar = Sequence( "ENGINE", Ref("EqualsSegment", optional=True), Sequence( Ref("TableEngineFunctionSegment"), AnySetOf( Ref("MergeTreesOrderByClauseSegment"), Sequence( "PARTITION", "BY", Ref("ExpressionSegment"), ), Sequence( "PRIMARY", "KEY", Ref("ExpressionSegment"), ), Sequence( "SAMPLE", "BY", Ref("ExpressionSegment"), ), ), Ref("SettingsClauseSegment", optional=True), ), ) class DatabaseEngineFunctionSegment(BaseSegment): """A ClickHouse `ENGINE` clause function. With this segment we attempt to match all possible engines. """ type = "engine_function" match_grammar: Matchable = Sequence( Sequence( OneOf( "ATOMIC", "MYSQL", "MATERIALIZEDMYSQL", "LAZY", "POSTGRESQL", "MATERIALIZEDPOSTGRESQL", "REPLICATED", "SQLITE", ), Ref("FunctionContentsSegment", optional=True), ), ) class DatabaseEngineSegment(BaseSegment): """An `ENGINE` used in `CREATE TABLE`.""" type = "database_engine" match_grammar = Sequence( "ENGINE", Ref("EqualsSegment"), Sequence( Ref("DatabaseEngineFunctionSegment"), AnySetOf( Ref("MergeTreesOrderByClauseSegment"), Sequence( "PARTITION", "BY", Ref("ExpressionSegment"), optional=True, ), Sequence( "PRIMARY", "KEY", Ref("ExpressionSegment"), optional=True, ), Sequence( "SAMPLE", "BY", Ref("ExpressionSegment"), optional=True, ), ), Ref("SettingsClauseSegment", optional=True), ), ) class ColumnTTLSegment(BaseSegment): """A TTL clause for columns as used in CREATE TABLE. Specified in https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree/#mergetree-column-ttl """ type = "column_ttl_segment" match_grammar = Sequence( "TTL", Ref("ExpressionSegment"), ) class TableTTLSegment(BaseSegment): """A TTL clause for tables as used in CREATE TABLE. Specified in https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree/#mergetree-table-ttl """ type = "table_ttl_segment" match_grammar = Sequence( "TTL", Delimited( Sequence( Ref("ExpressionSegment"), OneOf( "DELETE", Sequence( "TO", "VOLUME", Ref("QuotedLiteralSegment"), ), Sequence( "TO", "DISK", Ref("QuotedLiteralSegment"), ), optional=True, ), Ref("WhereClauseSegment", optional=True), Ref("GroupByClauseSegment", optional=True), ) ), ) class ColumnConstraintSegment(BaseSegment): """ClickHouse specific column constraints. As specified in https://clickhouse.com/docs/en/sql-reference/statements/create/table#constraints """ type = "column_constraint_segment" match_grammar = AnySetOf( Sequence( Sequence( "CONSTRAINT", Ref("ObjectReferenceSegment"), optional=True, ), OneOf( Sequence(Ref.keyword("NOT", optional=True), "NULL"), Sequence("CHECK", Bracketed(Ref("ExpressionSegment"))), Sequence( OneOf( "DEFAULT", "MATERIALIZED", "ALIAS", ), OneOf( Ref("LiteralGrammar"), Ref("FunctionSegment"), Ref("BareFunctionSegment"), ), ), Sequence( "EPHEMERAL", OneOf( Ref("LiteralGrammar"), Ref("FunctionSegment"), Ref("BareFunctionSegment"), optional=True, ), ), Ref("PrimaryKeyGrammar"), Sequence( "CODEC", Ref("FunctionContentsGrammar"), optional=True, ), Ref("ColumnTTLSegment"), ), ) ) class CreateDatabaseStatementSegment(ansi.CreateDatabaseStatementSegment): """A `CREATE DATABASE` statement. As specified in https://clickhouse.com/docs/en/sql-reference/statements/create/database """ type = "create_database_statement" match_grammar = Sequence( "CREATE", "DATABASE", Ref("IfNotExistsGrammar", optional=True), Ref("DatabaseReferenceSegment"), AnySetOf( Ref("OnClusterClauseSegment", optional=True), Ref("DatabaseEngineSegment", optional=True), Sequence( "COMMENT", Ref("SingleIdentifierGrammar"), optional=True, ), ), AnyNumberOf( "TABLE", "OVERRIDE", Ref("TableReferenceSegment"), Bracketed( Delimited( Ref("TableConstraintSegment"), Ref("ColumnDefinitionSegment"), Ref("ColumnConstraintSegment"), ), optional=True, ), optional=True, ), ) class RenameStatementSegment(BaseSegment): """A `RENAME TABLE` statement. As specified in https://clickhouse.com/docs/en/sql-reference/statements/rename/ """ type = "rename_table_statement" match_grammar = Sequence( "RENAME", OneOf( Sequence( "TABLE", Delimited( Sequence( Ref("TableReferenceSegment"), "TO", Ref("TableReferenceSegment"), ) ), ), Sequence( "DATABASE", Delimited( Sequence( Ref("DatabaseReferenceSegment"), "TO", Ref("DatabaseReferenceSegment"), ) ), ), Sequence( "DICTIONARY", Delimited( Sequence( Ref("ObjectReferenceSegment"), "TO", Ref("ObjectReferenceSegment"), ) ), ), ), Ref("OnClusterClauseSegment", optional=True), ) class CreateTableStatementSegment(ansi.CreateTableStatementSegment): """A `CREATE TABLE` statement. As specified in https://clickhouse.com/docs/en/sql-reference/statements/create/table/ """ type = "create_table_statement" match_grammar: Matchable = OneOf( Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), Ref("OnClusterClauseSegment", optional=True), OneOf( # CREATE TABLE (...): Sequence( Bracketed( Delimited( OneOf( Ref("TableConstraintSegment"), Ref("ColumnDefinitionSegment"), Ref("ColumnConstraintSegment"), ), ), # Column definition may be missing if using AS SELECT optional=True, ), Ref("TableEngineSegment"), # CREATE TABLE (...) AS SELECT: Sequence( "AS", Ref("SelectableGrammar"), optional=True, ), ), # CREATE TABLE AS other_table: Sequence( "AS", Ref("TableReferenceSegment"), Ref("TableEngineSegment", optional=True), ), # CREATE TABLE AS table_function(): Sequence( "AS", Ref("FunctionSegment"), ), ), AnySetOf( Sequence( "COMMENT", OneOf( Ref("SingleIdentifierGrammar"), Ref("QuotedIdentifierSegment"), ), ), Ref("TableTTLSegment"), optional=True, ), Ref("TableEndClauseSegment", optional=True), ), # CREATE TEMPORARY TABLE Sequence( "CREATE", Ref.keyword("TEMPORARY"), "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), OneOf( # CREATE TEMPORARY TABLE (...): Sequence( Bracketed( Delimited( OneOf( Ref("TableConstraintSegment"), Ref("ColumnDefinitionSegment"), Ref("ColumnConstraintSegment"), ), ), # Column definition may be missing if using AS SELECT optional=True, ), Ref("TableEngineSegment"), # CREATE TEMPORARY TABLE (...) AS SELECT: Sequence( "AS", Ref("SelectableGrammar"), optional=True, ), ), # CREATE TEMPORARY TABLE AS other_table: Sequence( "AS", Ref("TableReferenceSegment"), Ref("TableEngineSegment", optional=True), ), # CREATE TEMPORARY TABLE AS table_function(): Sequence( "AS", Ref("FunctionSegment"), ), # CREATE TEMPORARY TABLE AS Sequence( "AS", Ref("SelectableGrammar"), optional=True, ), ), AnySetOf( Sequence( "COMMENT", OneOf( Ref("SingleIdentifierGrammar"), Ref("QuotedIdentifierSegment"), ), ), Ref("TableTTLSegment"), optional=True, ), Ref("TableEndClauseSegment", optional=True), ), ) class CreateViewStatementSegment(BaseSegment): """A `CREATE VIEW` statement. https://clickhouse.com/docs/en/sql-reference/statements/create/view """ type = "create_view_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "VIEW", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), Ref("OnClusterClauseSegment", optional=True), "AS", Ref("SelectableGrammar"), Ref("TableEndClauseSegment", optional=True), ) class CreateMaterializedViewStatementSegment(BaseSegment): """A `CREATE MATERIALIZED VIEW` statement. https://clickhouse.com/docs/en/sql-reference/statements/create/table/ """ type = "create_materialized_view_statement" match_grammar = Sequence( "CREATE", "MATERIALIZED", "VIEW", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), Ref("OnClusterClauseSegment", optional=True), OneOf( Sequence( "TO", Ref("TableReferenceSegment"), # Add support for column list in TO clause Bracketed( Delimited( Ref("SingleIdentifierGrammar"), ), optional=True, ), Ref("TableEngineSegment", optional=True), ), Sequence( Ref("TableEngineSegment", optional=True), # Add support for PARTITION BY clause Sequence( "PARTITION", "BY", Ref("ExpressionSegment"), optional=True, ), # Add support for ORDER BY clause Ref("MergeTreesOrderByClauseSegment", optional=True), # Add support for TTL clause Ref("TableTTLSegment", optional=True), # Add support for SETTINGS clause Ref("SettingsClauseSegment", optional=True), Sequence("POPULATE", optional=True), ), ), "AS", Ref("SelectableGrammar"), Ref("TableEndClauseSegment", optional=True), ) class DropTableStatementSegment(ansi.DropTableStatementSegment): """A `DROP TABLE` statement. As specified in https://clickhouse.com/docs/en/sql-reference/statements/drop/ """ type = "drop_table_statement" match_grammar = Sequence( "DROP", Ref.keyword("TEMPORARY", optional=True), "TABLE", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), Ref("OnClusterClauseSegment", optional=True), Ref.keyword("SYNC", optional=True), ) class DropDatabaseStatementSegment(ansi.DropDatabaseStatementSegment): """A `DROP DATABASE` statement. As specified in https://clickhouse.com/docs/en/sql-reference/statements/drop/ """ type = "drop_database_statement" match_grammar = Sequence( "DROP", "DATABASE", Ref("IfExistsGrammar", optional=True), Ref("DatabaseReferenceSegment"), Ref("OnClusterClauseSegment", optional=True), Ref.keyword("SYNC", optional=True), ) class DropDictionaryStatementSegment(BaseSegment): """A `DROP DICTIONARY` statement. As specified in https://clickhouse.com/docs/en/sql-reference/statements/drop/ """ type = "drop_dictionary_statement" match_grammar = Sequence( "DROP", "DICTIONARY", Ref("IfExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), Ref.keyword("SYNC", optional=True), ) class DropUserStatementSegment(ansi.DropUserStatementSegment): """A `DROP USER` statement. As specified in https://clickhouse.com/docs/en/sql-reference/statements/drop/ """ type = "drop_user_statement" match_grammar = Sequence( "DROP", "USER", Ref("IfExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), Ref("OnClusterClauseSegment", optional=True), ) class DropRoleStatementSegment(ansi.DropRoleStatementSegment): """A `DROP ROLE` statement. As specified in https://clickhouse.com/docs/en/sql-reference/statements/drop/ """ type = "drop_user_statement" match_grammar = Sequence( "DROP", "ROLE", Ref("IfExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), Ref("OnClusterClauseSegment", optional=True), ) class DropQuotaStatementSegment(BaseSegment): """A `DROP QUOTA` statement. As specified in https://clickhouse.com/docs/en/sql-reference/statements/drop/ """ type = "drop_quota_statement" match_grammar = Sequence( "DROP", "QUOTA", Ref("IfExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), Ref("OnClusterClauseSegment", optional=True), ) class DropSettingProfileStatementSegment(BaseSegment): """A `DROP setting PROFILE` statement. As specified in https://clickhouse.com/docs/en/sql-reference/statements/drop/ """ type = "drop_setting_profile_statement" match_grammar = Sequence( "DROP", Delimited( Ref("NakedIdentifierSegment"), min_delimiters=0, ), "PROFILE", Ref("IfExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), Ref("OnClusterClauseSegment", optional=True), ) class DropViewStatementSegment(ansi.DropViewStatementSegment): """A `DROP VIEW` statement. As specified in https://clickhouse.com/docs/en/sql-reference/statements/drop/ """ type = "drop_view_statement" match_grammar = Sequence( "DROP", "VIEW", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), Ref("OnClusterClauseSegment", optional=True), Ref.keyword("SYNC", optional=True), ) class DropFunctionStatementSegment(ansi.DropFunctionStatementSegment): """A `DROP FUNCTION` statement. As specified in https://clickhouse.com/docs/en/sql-reference/statements/drop/ """ type = "drop_function_statement" match_grammar = Sequence( "DROP", "FUNCTION", Ref("IfExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), Ref("OnClusterClauseSegment", optional=True), ) class SystemMergesSegment(BaseSegment): """A `SYSTEM ... MERGES` statement. https://clickhouse.com/docs/en/sql-reference/statements/system """ type = "system_merges_segment" match_grammar = Sequence( OneOf( "START", "STOP", ), "MERGES", OneOf( Sequence( "ON", "VOLUME", Ref("ObjectReferenceSegment"), ), Ref("TableReferenceSegment"), ), ) class SystemTTLMergesSegment(BaseSegment): """A `SYSTEM ... TTL MERGES` statement. https://clickhouse.com/docs/en/sql-reference/statements/system """ type = "system_ttl_merges_segment" match_grammar = Sequence( OneOf( "START", "STOP", ), "TTL", "MERGES", Ref("TableReferenceSegment", optional=True), ) class SystemMovesSegment(BaseSegment): """A `SYSTEM ... MOVES` statement. https://clickhouse.com/docs/en/sql-reference/statements/system """ type = "system_moves_segment" match_grammar = Sequence( OneOf( "START", "STOP", ), "MOVES", Ref("TableReferenceSegment", optional=True), ) class SystemReplicaSegment(BaseSegment): """A `SYSTEM ... REPLICA` statement. https://clickhouse.com/docs/en/sql-reference/statements/system """ type = "system_replica_segment" match_grammar = OneOf( Sequence( "SYNC", "REPLICA", Ref("OnClusterClauseSegment", optional=True), Ref("TableReferenceSegment"), Sequence("STRICT", optional=True), ), Sequence( "DROP", "REPLICA", Ref("SingleIdentifierGrammar"), Sequence( "FROM", OneOf( Sequence( "DATABASE", Ref("ObjectReferenceSegment"), ), Sequence( "TABLE", Ref("TableReferenceSegment"), ), Sequence( "ZKPATH", Ref("PathSegment"), ), ), optional=True, ), ), Sequence( "RESTART", "REPLICA", Ref("TableReferenceSegment"), ), Sequence( "RESTORE", "REPLICA", Ref("TableReferenceSegment"), Ref("OnClusterClauseSegment", optional=True), ), ) class SystemFilesystemSegment(BaseSegment): """A `SYSTEM ... FILESYSTEM` statement. https://clickhouse.com/docs/en/sql-reference/statements/system """ type = "system_filesystem_segment" match_grammar = Sequence( "DROP", "FILESYSTEM", "CACHE", ) class SystemReplicatedSegment(BaseSegment): """A `SYSTEM ... REPLICATED` statement. https://clickhouse.com/docs/en/sql-reference/statements/system """ type = "system_replicated_segment" match_grammar = Sequence( OneOf( "START", "STOP", ), "REPLICATED", "SENDS", Ref("TableReferenceSegment", optional=True), ) class SystemReplicationSegment(BaseSegment): """A `SYSTEM ... REPLICATION` statement. https://clickhouse.com/docs/en/sql-reference/statements/system """ type = "system_replication_segment" match_grammar = Sequence( OneOf( "START", "STOP", ), "REPLICATION", "QUEUES", Ref("TableReferenceSegment", optional=True), ) class SystemFetchesSegment(BaseSegment): """A `SYSTEM ... FETCHES` statement. https://clickhouse.com/docs/en/sql-reference/statements/system """ type = "system_fetches_segment" match_grammar = Sequence( OneOf( "START", "STOP", ), "FETCHES", Ref("TableReferenceSegment", optional=True), ) class SystemDistributedSegment(BaseSegment): """A `SYSTEM ... DISTRIBUTED` statement. https://clickhouse.com/docs/en/sql-reference/statements/system """ type = "system_distributed_segment" match_grammar = Sequence( OneOf( Sequence( OneOf( "START", "STOP", ), "DISTRIBUTED", "SENDS", Ref("TableReferenceSegment"), ), Sequence( "FLUSH", "DISTRIBUTED", Ref("TableReferenceSegment"), ), ), # Ref("TableReferenceSegment"), ) class SystemModelSegment(BaseSegment): """A `SYSTEM ... MODEL` statement. https://clickhouse.com/docs/en/sql-reference/statements/system """ type = "system_model_segment" match_grammar = Sequence( "RELOAD", OneOf( Sequence( "MODELS", Ref("OnClusterClauseSegment", optional=True), ), Sequence( "MODEL", AnySetOf( Ref("OnClusterClauseSegment", optional=True), Ref("PathSegment"), ), ), ), ) class SystemFileSegment(BaseSegment): """A `SYSTEM ... FILE` statement. https://clickhouse.com/docs/en/sql-reference/statements/system """ type = "system_file_segment" match_grammar = Sequence( "SYNC", "FILE", "CACHE", ) class SystemUnfreezeSegment(BaseSegment): """A `SYSTEM ... UNFREEZE` statement. https://clickhouse.com/docs/en/sql-reference/statements/system """ type = "system_unfreeze_segment" match_grammar = Sequence( "UNFREEZE", "WITH", "NAME", Ref("ObjectReferenceSegment"), ) class SystemStatementSegment(BaseSegment): """A `SYSTEM ...` statement. https://clickhouse.com/docs/en/sql-reference/statements/system """ type = "system_statement" match_grammar: Matchable = Sequence( "SYSTEM", OneOf( Ref("SystemMergesSegment"), Ref("SystemTTLMergesSegment"), Ref("SystemMovesSegment"), Ref("SystemReplicaSegment"), Ref("SystemReplicatedSegment"), Ref("SystemReplicationSegment"), Ref("SystemFetchesSegment"), Ref("SystemDistributedSegment"), Ref("SystemFileSegment"), Ref("SystemFilesystemSegment"), Ref("SystemUnfreezeSegment"), Ref("SystemModelSegment"), ), ) class AlterTableStatementSegment(BaseSegment): """An `ALTER TABLE` statement for ClickHouse. As specified in https://clickhouse.com/docs/en/sql-reference/statements/alter/ """ type = "alter_table_statement" match_grammar = Sequence( "ALTER", "TABLE", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), Ref("OnClusterClauseSegment", optional=True), OneOf( # ALTER TABLE ... DROP COLUMN [IF EXISTS] name Sequence( "DROP", "COLUMN", Ref("IfExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), # Column name ), # ALTER TABLE ... ADD COLUMN [IF NOT EXISTS] name [type] Sequence( "ADD", "COLUMN", Ref("IfNotExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), # Column name OneOf( # Regular column with type Sequence( Ref("DatatypeSegment"), # Data type Sequence( "DEFAULT", Ref("ExpressionSegment"), optional=True, ), Sequence( "MATERIALIZED", Ref("ExpressionSegment"), optional=True, ), Sequence( "CODEC", Bracketed( Delimited( OneOf( Ref("FunctionSegment"), Ref("SingleIdentifierGrammar"), ), ), ), optional=True, ), ), # Alias column with type Sequence( Ref("DatatypeSegment"), # Data type "ALIAS", Ref("ExpressionSegment"), ), # Alias column without type Sequence( "ALIAS", Ref("ExpressionSegment"), ), # Default could also be used without type Sequence( "DEFAULT", Ref("ExpressionSegment"), ), # Materialized could also be used without type Sequence( "MATERIALIZED", Ref("ExpressionSegment"), ), ), OneOf( Sequence( "AFTER", Ref("SingleIdentifierGrammar"), # Column name ), "FIRST", optional=True, ), ), # ALTER TABLE ... ADD ALIAS name FOR column_name Sequence( "ADD", "ALIAS", Ref("IfNotExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), # Alias name "FOR", Ref("SingleIdentifierGrammar"), # Column name ), # ALTER TABLE ... RENAME COLUMN [IF EXISTS] name to new_name Sequence( "RENAME", "COLUMN", Ref("IfExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), # Column name "TO", Ref("SingleIdentifierGrammar"), # New column name ), # ALTER TABLE ... COMMENT COLUMN [IF EXISTS] name 'Text comment' Sequence( "COMMENT", "COLUMN", Ref("IfExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), # Column name Ref("QuotedLiteralSegment"), # Comment text ), # ALTER TABLE ... COMMENT 'Text comment' Sequence( "COMMENT", Ref("QuotedLiteralSegment"), # Comment text ), # ALTER TABLE ... MODIFY COMMENT 'Text comment' Sequence( "MODIFY", "COMMENT", Ref("QuotedLiteralSegment"), # Comment text ), # ALTER TABLE ... MODIFY COLUMN [IF EXISTS] name [TYPE] [type] Sequence( "MODIFY", "COLUMN", Ref("IfExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), # Column name OneOf( # Type modification with explicit TYPE keyword Sequence( "TYPE", Ref("DatatypeSegment"), # Data type Sequence( "DEFAULT", Ref("ExpressionSegment"), optional=True, ), Sequence( "MATERIALIZED", Ref("ExpressionSegment"), optional=True, ), Sequence( "ALIAS", Ref("ExpressionSegment"), optional=True, ), Sequence( "CODEC", Bracketed( Delimited( OneOf( Ref("FunctionSegment"), Ref("SingleIdentifierGrammar"), ), delimiter=Ref("CommaSegment"), ), ), optional=True, ), ), # Type modification without TYPE keyword Sequence( Ref("DatatypeSegment", optional=True), # Data type Sequence( "DEFAULT", Ref("ExpressionSegment"), optional=True, ), Sequence( "MATERIALIZED", Ref("ExpressionSegment"), optional=True, ), Sequence( "ALIAS", Ref("ExpressionSegment"), optional=True, ), Sequence( "CODEC", Bracketed( Delimited( OneOf( Ref("FunctionSegment"), Ref("SingleIdentifierGrammar"), ), delimiter=Ref("CommaSegment"), ), ), optional=True, ), ), # Alias modification Sequence( "ALIAS", Ref("ExpressionSegment"), ), # Remove alias Sequence( "REMOVE", "ALIAS", ), # Remove property Sequence( "REMOVE", OneOf( "ALIAS", "DEFAULT", "MATERIALIZED", "CODEC", "COMMENT", "TTL", ), ), # Modify setting Sequence( "MODIFY", "SETTING", Ref("SingleIdentifierGrammar"), # Setting name Ref("EqualsSegment"), Ref("LiteralGrammar"), # Setting value ), # Reset setting Sequence( "RESET", "SETTING", Ref("SingleIdentifierGrammar"), # Setting name ), optional=True, ), OneOf( Sequence( "AFTER", Ref("SingleIdentifierGrammar"), # Column name ), "FIRST", optional=True, ), ), # ALTER TABLE ... ALTER COLUMN name [TYPE] [type] Sequence( "ALTER", "COLUMN", Ref("IfExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), # Column name OneOf( # With TYPE keyword Sequence( "TYPE", Ref("DatatypeSegment"), # Data type ), # Without TYPE keyword Ref("DatatypeSegment"), # Data type ), OneOf( Sequence( "AFTER", Ref("SingleIdentifierGrammar"), # Column name ), "FIRST", optional=True, ), ), # ALTER TABLE ... REMOVE TTL Sequence( "REMOVE", "TTL", ), # ALTER TABLE ... MODIFY TTL expression Sequence( "MODIFY", "TTL", Ref("ExpressionSegment"), ), # ALTER TABLE ... MODIFY QUERY select_statement Sequence( "MODIFY", "QUERY", Ref("SelectStatementSegment"), ), # ALTER TABLE ... MATERIALIZE COLUMN col Sequence( "MATERIALIZE", "COLUMN", Ref("SingleIdentifierGrammar"), # Column name OneOf( Sequence( "IN", "PARTITION", Ref("SingleIdentifierGrammar"), ), Sequence( "IN", "PARTITION", "ID", Ref("QuotedLiteralSegment"), ), optional=True, ), ), ), ) class StatementSegment(ansi.StatementSegment): """Overriding StatementSegment to allow for additional segment parsing.""" match_grammar = ansi.StatementSegment.match_grammar.copy( insert=[ Ref("CreateMaterializedViewStatementSegment"), Ref("DropDictionaryStatementSegment"), Ref("DropQuotaStatementSegment"), Ref("DropSettingProfileStatementSegment"), Ref("SystemStatementSegment"), Ref("RenameStatementSegment"), Ref("AlterTableStatementSegment"), ] ) class LimitClauseComponentSegment(BaseSegment): """A component of a `LIMIT` clause. https://clickhouse.com/docs/en/sql-reference/statements/select/limit """ type = "limit_clause_component" match_grammar = OptionallyBracketed( OneOf( # Allow a number by itself OR Ref("NumericLiteralSegment"), # An arbitrary expression Ref("ExpressionSegment"), ) ) class LimitClauseSegment(ansi.LimitClauseSegment): """Overriding LimitClauseSegment to allow for additional segment parsing.""" match_grammar: Matchable = Sequence( "LIMIT", Indent, Sequence( Ref("LimitClauseComponentSegment"), OneOf( Sequence( "OFFSET", Ref("LimitClauseComponentSegment"), ), Sequence( # LIMIT 1,2 only accepts constants # and can't be bracketed like that LIMIT (1, 2) # but can be bracketed like that LIMIT (1), (2) Ref("CommaSegment"), Ref("LimitClauseComponentSegment"), ), optional=True, ), Sequence( "BY", OneOf( Ref("BracketedColumnReferenceListGrammar"), Ref("ColumnReferenceSegment"), ), optional=True, ), ), Dedent, ) class IntervalExpressionSegment(BaseSegment): """An interval expression segment. https://clickhouse.com/docs/en/sql-reference/data-types/special-data-types/interval https://clickhouse.com/docs/en/sql-reference/operators#operator-interval """ type = "interval_expression" match_grammar: Matchable = Sequence( "INTERVAL", OneOf( # The Numeric Version Sequence( Ref("NumericLiteralSegment"), Ref("DatetimeUnitSegment"), ), # The String version Ref("QuotedLiteralSegment"), # Combine version Sequence( Ref("QuotedLiteralSegment"), Ref("DatetimeUnitSegment"), ), # With expression as value Sequence( Ref("ExpressionSegment"), Ref("DatetimeUnitSegment"), ), ), ) class ColumnDefinitionSegment(BaseSegment): """A column definition, e.g. for CREATE TABLE or ALTER TABLE. Supports ClickHouse specific options like CODEC, ALIAS, MATERIALIZED, etc. """ type = "column_definition" match_grammar = Sequence( OneOf( Ref("SingleIdentifierGrammar"), Ref("QuotedIdentifierSegment"), ), Ref("DatatypeSegment"), AnyNumberOf( OneOf( # DEFAULT expression Sequence( "DEFAULT", OneOf( Ref("LiteralGrammar"), Ref("FunctionSegment"), Ref("ExpressionSegment"), ), ), # ALIAS expression Sequence( "ALIAS", Ref("ExpressionSegment"), ), # MATERIALIZED expression Sequence( "MATERIALIZED", Ref("ExpressionSegment"), ), # CODEC(...) Sequence( "CODEC", Bracketed( Delimited( OneOf( Ref("FunctionSegment"), Ref("SingleIdentifierGrammar"), ), delimiter=Ref("CommaSegment"), ), ), ), # COMMENT 'text' Sequence( "COMMENT", Ref("QuotedLiteralSegment"), ), # Column constraint Ref("ColumnConstraintSegment"), ), optional=True, ), ) sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_clickhouse_keywords.py000066400000000000000000000116411503426445100256710ustar00rootroot00000000000000"""A list of ClickHouse keywords.""" # https://clickhouse.com/docs/en/sql-reference/syntax/#keywords FORMAT_KEYWORDS = [ "TabSeparated", "TabSeparatedRaw", "TabSeparatedWithNames", "TabSeparatedWithNamesAndTypes", "TabSeparatedRawWithNames", "TabSeparatedRawWithNamesAndTypes", "Template", "TemplateIgnoreSpaces", "CSV", "CSVWithNames", "CSVWithNamesAndTypes", "CustomSeparated", "CustomSeparatedWithNames", "CustomSeparatedWithNamesAndTypes", "SQLInsert", "Values", "Vertical", "JSON", "JSONAsString", "JSONAsObject", "JSONStrings", "JSONColumns", "JSONColumnsWithMetadata", "JSONCompact", "JSONCompactStrings", "JSONCompactColumns", "JSONEachRow", "PrettyJSONEachRow", "JSONEachRowWithProgress", "JSONStringsEachRow", "JSONStringsEachRowWithProgress", "JSONCompactEachRow", "JSONCompactEachRowWithNames", "JSONCompactEachRowWithNamesAndTypes", "JSONCompactStringsEachRow", "JSONCompactStringsEachRowWithNames", "JSONCompactStringsEachRowWithNamesAndTypes", "JSONObjectEachRow", "BSONEachRow", "TSKV", "Pretty", "PrettyNoEscapes", "PrettyMonoBlock", "PrettyNoEscapesMonoBlock", "PrettyCompact", "PrettyCompactNoEscapes", "PrettyCompactMonoBlock", "PrettyCompactNoEscapesMonoBlock", "PrettySpace", "PrettySpaceNoEscapes", "PrettySpaceMonoBlock", "PrettySpaceNoEscapesMonoBlock", "Prometheus", "Protobuf", "ProtobufSingle", "ProtobufList", "Avro", "AvroConfluent", "Parquet", "ParquetMetadata", "Arrow", "ArrowStream", "ORC", "One", "Npy", "RowBinary", "RowBinaryWithNames", "RowBinaryWithNamesAndTypes", "RowBinaryWithDefaults", "Native", "Null", "XML", "CapnProto", "LineAsString", "Regexp", "RawBLOB", "MsgPack", "MySQLDump", "DWARF", "Markdown", "Form", ] UNRESERVED_KEYWORDS = [ # All keywords are unreserved. They are only treated as reserved according to # context. # See: https://clickhouse.com/docs/en/sql-reference/syntax/#keywords # This means that, for example, using `join` or `select` as table identifiers # without quotes is allowed. "ADD", "AFTER", "ALIAS", "ALL", "ALTER", "AND", "ANTI", "ANY", "ARRAY", "AS", "ASCENDING", "ASOF", "AST", "ASYNC", "ATOMIC", "ATTACH", "BETWEEN", "BOTH", "BY", "CHECK", "CLEAR", "CLUSTER", "CODEC", "COLLATE", "COLUMN", "COMMENT", "CONSTRAINT", "CREATE", "CUBE", "DATABASE", "DATABASES", "DATE", "DATETIME32", "DATETIME64", "DAY", "DEDUPLICATE", "DEFAULT", "DELAY", "DELETE", "DESC", "DESCENDING", "DESCRIBE", "DETACH", "DICTIONARIES", "DICTIONARY", "DISK", "DISTINCT", "DISTRIBUTED", "DROP", "ELSE", "END", "ENGINE", "EPHEMERAL", "EVENTS", "EXCEPT", "EXISTS", "EXPLAIN", "EXPRESSION", "EXTRACT", "FETCHES", "FILE", "FILESYSTEM", "FILL", "FINAL", "FIRST", "FLUSH", "FOR", "FORMAT", "FREEZE", "FROM", "FUNCTION", "GLOBAL", "GRANULARITY", "GROUP", "HAVING", "HIERARCHICAL", "HOUR", "ID", "IF", "ILIKE", "IN", "INDEX", "INF", "INJECTIVE", "INSERT", "INTERVAL", "INTO", "IS", "IS_OBJECT_ID", "KEY", "KILL", "LAST", "LAYOUT", "LAZY", "LEADING", "LIFETIME", "LIKE", "LIMIT", "LIVE", "LOCAL", "LOGS", "MATERIALIZE", "MATERIALIZED", "MATERIALIZEDMYSQL", "MATERIALIZEDPOSTGRESQL", "MAX", "MERGES", "MIN", "MINUTE", "MODEL", "MODELS", "MODIFY", "MONTH", "MOVE", "MOVES", "MUTATION", "MYSQL", "NAN_SQL", "NO", "NOT", "NULLS", "NULL_SQL", "OFFSET", "OPTIMIZE", "OR", "OUTFILE", "OVERRIDE", "PASTE", "POPULATE", "POSTGRESQL", "PREWHERE", "PRIMARY", "PROFILE", "PROJECTION", "QUARTER", "QUERY", "QUOTA", "QUEUES", "RANGE", "RELOAD", "REMOVE", "RENAME", "REPLACE", "REPLICA", "REPLICATED", "REPLICATION", "ROLLUP", "SAMPLE", "SECOND", "SEMI", "SEND", "SENDS", "SETTINGS", "SHOW", "SOURCE", "SQLITE", "START", "STEP", "STOP", "SUBSTRING", "SYNC", "SYNTAX", "SYSTEM", "TABLE", "TABLES", "TEMPORARY", "TEST", "THEN", "TIES", "TIMEOUT", "TIMESTAMP", "TO", "TOP", "TOTALS", "TRAILING", "TRIM", "TRUNCATE", "TTL", "TUPLE", "TYPE", "UNFREEZE", "UPDATE", "USE", "UUID", "VALUES", "VIEW", "VOLUME", "WATCH", "WEEK", "WHEN", "WHERE", "WITH", "YEAR", "ZKPATH", ] + FORMAT_KEYWORDS sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_databricks.py000066400000000000000000001415671503426445100237330ustar00rootroot00000000000000"""The Databricks Dialect. Functionally, it is quite similar to SparkSQL, however it's much less strict on keywords. It also has some extensions. """ from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( AnyNumberOf, Anything, BaseSegment, Bracketed, CodeSegment, CommentSegment, Dedent, Delimited, IdentifierSegment, Indent, Matchable, OneOf, OptionallyBracketed, Ref, RegexLexer, RegexParser, Sequence, StringLexer, StringParser, SymbolSegment, TypedParser, WordSegment, ) from sqlfluff.dialects import dialect_ansi as ansi from sqlfluff.dialects import dialect_sparksql as sparksql from sqlfluff.dialects.dialect_databricks_keywords import ( RESERVED_KEYWORDS, UNRESERVED_KEYWORDS, ) sparksql_dialect = load_raw_dialect("sparksql") databricks_dialect = sparksql_dialect.copy_as( "databricks", formatted_name="Databricks", docstring="The dialect for `Databricks `_.", ) databricks_dialect.sets("unreserved_keywords").update(UNRESERVED_KEYWORDS) databricks_dialect.sets("unreserved_keywords").update( sparksql_dialect.sets("reserved_keywords") ) databricks_dialect.sets("unreserved_keywords").difference_update(RESERVED_KEYWORDS) databricks_dialect.sets("reserved_keywords").clear() databricks_dialect.sets("reserved_keywords").update(RESERVED_KEYWORDS) databricks_dialect.sets("date_part_function_name").update(["TIMEDIFF"]) databricks_dialect.insert_lexer_matchers( # Named Function Parameters: # https://docs.databricks.com/en/sql/language-manual/sql-ref-function-invocation.html#named-parameter-invocation [ StringLexer("right_arrow", "=>", CodeSegment), ], before="equals", ) databricks_dialect.insert_lexer_matchers( # Notebook Cell Delimiter: # https://learn.microsoft.com/en-us/azure/databricks/notebooks/notebook-export-import#sql-1 [ RegexLexer("command", r"(\r?\n){2}-- COMMAND ----------(\r?\n)", CodeSegment), ], before="newline", ) databricks_dialect.insert_lexer_matchers( # Databricks Notebook Start: # needed to insert "so early" to avoid magic + notebook # start to be interpreted as inline comments # https://learn.microsoft.com/en-us/azure/databricks/notebooks/notebooks-code#language-magic [ RegexLexer( "notebook_start", r"-- Databricks notebook source(\r?\n){1}", CommentSegment ), RegexLexer( "magic_single_line", r"(-- MAGIC %)([^\n]{2,})( [^%]{1})([^\n]*)", CodeSegment, ), RegexLexer("magic_line", r"(-- MAGIC)( [^%]{1})([^\n]*)", CodeSegment), RegexLexer("magic_start", r"(-- MAGIC %)([^\n]{2,})(\r?\n)", CodeSegment), ], before="inline_comment", ) databricks_dialect.add( CommandCellSegment=TypedParser("command", CodeSegment, type="statement_terminator"), DoubleQuotedUDFBody=TypedParser( "double_quote", CodeSegment, type="udf_body", trim_chars=('"',), ), SingleQuotedUDFBody=TypedParser( "single_quote", CodeSegment, type="udf_body", trim_chars=("'",), ), DollarQuotedUDFBody=TypedParser( "dollar_quote", CodeSegment, type="udf_body", trim_chars=("$",), ), RightArrowSegment=StringParser("=>", SymbolSegment, type="right_arrow"), # https://docs.databricks.com/en/sql/language-manual/sql-ref-principal.html PrincipalIdentifierSegment=OneOf( Ref("NakedIdentifierSegment"), Ref("BackQuotedIdentifierSegment"), ), PredictiveOptimizationGrammar=Sequence( OneOf("ENABLE", "DISABLE", "INHERIT"), "PREDICTIVE", "OPTIMIZATION", ), SetOwnerGrammar=Sequence( Ref.keyword("SET", optional=True), "OWNER", "TO", Ref("PrincipalIdentifierSegment"), ), SetTagsGrammar=Sequence( "SET", "TAGS", Ref("BracketedPropertyListGrammar"), ), UnsetTagsGrammar=Sequence( "UNSET", "TAGS", Ref("BracketedPropertyNameListGrammar"), ), ColumnDefaultGrammar=Sequence( "DEFAULT", OneOf( Ref("LiteralGrammar"), Ref("FunctionSegment"), ), ), ConstraintOptionGrammar=Sequence( Sequence("ENABLE", "NOVALIDATE", optional=True), Sequence("NOT", "ENFORCED", optional=True), Sequence("DEFERRABLE", optional=True), Sequence("INITIALLY", "DEFERRED", optional=True), OneOf("NORELY", "RELY", optional=True), ), ForeignKeyOptionGrammar=Sequence( Sequence("MATCH", "FULL", optional=True), Sequence("ON", "UPDATE", "NO", "ACTION", optional=True), Sequence("ON", "DELETE", "NO", "ACTION", optional=True), ), DropConstraintGrammar=Sequence( "DROP", OneOf( Sequence( Ref("PrimaryKeyGrammar"), Ref("IfExistsGrammar", optional=True), OneOf( "RESTRICT", "CASCADE", optional=True, ), ), Sequence( Ref("ForeignKeyGrammar"), Ref("IfExistsGrammar", optional=True), Bracketed( Delimited( Ref("ColumnReferenceSegment"), ) ), ), Sequence( "CONSTRAINT", Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), OneOf( "RESTRICT", "CASCADE", optional=True, ), ), ), ), AlterPartitionGrammar=Sequence( "PARTITION", Bracketed( Delimited( AnyNumberOf( OneOf( Ref("ColumnReferenceSegment"), Ref("SetClauseSegment"), ), min_times=1, ), ), ), ), RowFilterClauseGrammar=Sequence( "ROW", "FILTER", Ref("ObjectReferenceSegment"), "ON", Bracketed( Delimited( OneOf( Ref("ColumnReferenceSegment"), Ref("LiteralGrammar"), ), optional=True, ), ), ), PropertiesBackTickedIdentifierSegment=RegexParser( r"`.+`", IdentifierSegment, type="properties_naked_identifier", ), LocationWithCredentialGrammar=Sequence( "LOCATION", Ref("QuotedLiteralSegment"), Sequence( "WITH", Bracketed( "CREDENTIAL", Ref("PrincipalIdentifierSegment"), ), optional=True, ), ), NotebookStart=TypedParser("notebook_start", CommentSegment, type="notebook_start"), MagicSingleLineGrammar=TypedParser( "magic_single_line", CodeSegment, type="magic_single_line" ), MagicLineGrammar=TypedParser("magic_line", CodeSegment, type="magic_line"), MagicStartGrammar=TypedParser("magic_start", CodeSegment, type="magic_start"), VariableNameIdentifierSegment=OneOf( Ref("NakedIdentifierSegment"), Ref("BackQuotedIdentifierSegment"), ), ) databricks_dialect.replace( DelimiterGrammar=OneOf(Ref("SemicolonSegment"), Ref("CommandCellSegment")), # https://docs.databricks.com/en/sql/language-manual/sql-ref-syntax-aux-describe-volume.html DescribeObjectGrammar=sparksql_dialect.get_grammar("DescribeObjectGrammar").copy( insert=[ Sequence( "VOLUME", Ref("VolumeReferenceSegment"), ), ], at=0, ), FunctionContentsExpressionGrammar=OneOf( Ref("ExpressionSegment"), Ref("NamedArgumentSegment"), ), PropertiesNakedIdentifierSegment=RegexParser( r"[A-Z_][A-Z0-9_]*", IdentifierSegment, type="properties_naked_identifier", ), # https://docs.databricks.com/en/sql/language-manual/sql-ref-syntax-aux-show-schemas.html # Differences between this and the SparkSQL version: # - Support for `FROM`|`IN` at the catalog level # - `LIKE` keyword is optional ShowDatabasesSchemasGrammar=Sequence( # SHOW { DATABASES | SCHEMAS } OneOf("DATABASES", "SCHEMAS"), Sequence( OneOf("FROM", "IN"), Ref("DatabaseReferenceSegment"), optional=True, ), Sequence( Ref.keyword("LIKE", optional=True), Ref("QuotedLiteralSegment"), optional=True, ), ), # https://docs.databricks.com/en/sql/language-manual/sql-ref-syntax-aux-show-functions.html # Differences between this and the SparkSQL version: # - Support for `FROM`|`IN` at the schema level # - `LIKE` keyword is optional ShowFunctionsGrammar=Sequence( # SHOW FUNCTIONS OneOf("USER", "SYSTEM", "ALL", optional=True), "FUNCTIONS", Sequence( Sequence( OneOf("FROM", "IN"), Ref("DatabaseReferenceSegment"), optional=True, ), Sequence( Ref.keyword("LIKE", optional=True), OneOf( # qualified function from a database Sequence( Ref("DatabaseReferenceSegment"), Ref("DotSegment"), Ref("FunctionNameSegment"), allow_gaps=False, ), # non-qualified function Ref("FunctionNameSegment"), # Regex/like string Ref("QuotedLiteralSegment"), ), optional=True, ), optional=True, ), ), # https://docs.databricks.com/en/sql/language-manual/sql-ref-syntax-aux-show-tables.html # Differences between this and the SparkSQL version: # - `LIKE` keyword is optional ShowTablesGrammar=Sequence( # SHOW TABLES "TABLES", Sequence( OneOf("FROM", "IN"), Ref("DatabaseReferenceSegment"), optional=True, ), Sequence( Ref.keyword("LIKE", optional=True), Ref("QuotedLiteralSegment"), optional=True, ), ), # https://docs.databricks.com/en/sql/language-manual/sql-ref-syntax-aux-show-views.html # Only difference between this and the SparkSQL version: # - `LIKE` keyword is optional ShowViewsGrammar=Sequence( # SHOW VIEWS "VIEWS", Sequence( OneOf("FROM", "IN"), Ref("DatabaseReferenceSegment"), optional=True, ), Sequence( Ref.keyword("LIKE", optional=True), Ref("QuotedLiteralSegment"), optional=True, ), ), # https://docs.databricks.com/en/sql/language-manual/sql-ref-syntax-aux-show-volumes.html ShowObjectGrammar=sparksql_dialect.get_grammar("ShowObjectGrammar").copy( insert=[ Sequence( "VOLUMES", Sequence( OneOf("FROM", "IN"), Ref("DatabaseReferenceSegment"), optional=True, ), Sequence( Ref.keyword("LIKE", optional=True), Ref("QuotedLiteralSegment"), optional=True, ), ) ], ), NotNullGrammar=Sequence( "NOT", "NULL", ), FunctionNameIdentifierSegment=OneOf( TypedParser("word", WordSegment, type="function_name_identifier"), Ref("BackQuotedIdentifierSegment"), ), PreTableFunctionKeywordsGrammar=OneOf("STREAM"), ) class IdentifierClauseSegment(BaseSegment): """An `IDENTIFIER` clause segment. https://docs.databricks.com/en/sql/language-manual/sql-ref-names-identifier-clause.html """ type = "identifier_clause_segment" match_grammar = Sequence( "IDENTIFIER", Bracketed(Ref("ExpressionSegment")), ) class ObjectReferenceSegment(ansi.ObjectReferenceSegment): """A reference to an object.""" # Allow whitespace match_grammar: Matchable = Delimited( OneOf(Ref("SingleIdentifierGrammar"), Ref("IdentifierClauseSegment")), delimiter=Ref("ObjectReferenceDelimiterGrammar"), terminators=[Ref("ObjectReferenceTerminatorGrammar")], allow_gaps=False, ) class DatabaseReferenceSegment(ObjectReferenceSegment): """A reference to a database.""" type = "database_reference" class TableReferenceSegment(ObjectReferenceSegment): """A reference to an table, CTE, subquery or alias.""" type = "table_reference" class SchemaReferenceSegment(ObjectReferenceSegment): """A reference to a schema.""" type = "schema_reference" class TableExpressionSegment(sparksql.TableExpressionSegment): """The main table expression e.g. within a FROM clause. Enhance to allow for additional clauses allowed in Spark and Delta Lake. """ match_grammar = sparksql.TableExpressionSegment.match_grammar.copy( insert=[ Ref("IdentifierClauseSegment"), ], before=Ref("ValuesClauseSegment"), ) class CatalogReferenceSegment(ansi.ObjectReferenceSegment): """A reference to a catalog. https://docs.databricks.com/data-governance/unity-catalog/create-catalogs.html """ type = "catalog_reference" class VolumeReferenceSegment(ansi.ObjectReferenceSegment): """Volume reference.""" type = "volume_reference" class AlterCatalogStatementSegment(BaseSegment): """An `ALTER CATALOG` statement. https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-alter-catalog.html """ type = "alter_catalog_statement" match_grammar = Sequence( "ALTER", "CATALOG", Ref("CatalogReferenceSegment"), OneOf( Ref("SetOwnerGrammar"), Ref("SetTagsGrammar"), Ref("UnsetTagsGrammar"), Ref("PredictiveOptimizationGrammar"), ), ) class CreateCatalogStatementSegment(BaseSegment): """A `CREATE CATALOG` statement. https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-create-catalog.html """ type = "create_catalog_statement" match_grammar = Sequence( "CREATE", "CATALOG", Ref("IfNotExistsGrammar", optional=True), Ref("CatalogReferenceSegment"), Ref("CommentGrammar", optional=True), ) class DropCatalogStatementSegment(BaseSegment): """A `DROP CATALOG` statement. https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-drop-catalog.html """ type = "drop_catalog_statement" match_grammar = Sequence( "DROP", "CATALOG", Ref("IfExistsGrammar", optional=True), Ref("CatalogReferenceSegment"), Ref("DropBehaviorGrammar", optional=True), ) class UseCatalogStatementSegment(BaseSegment): """A `USE CATALOG` statement. https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-use-catalog.html """ type = "use_catalog_statement" match_grammar = Sequence( "USE", "CATALOG", Ref("CatalogReferenceSegment"), ) class UseDatabaseStatementSegment(sparksql.UseDatabaseStatementSegment): """A `USE DATABASE` statement. https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-usedb.html """ type = "use_database_statement" match_grammar = Sequence( "USE", OneOf("DATABASE", "SCHEMA", optional=True), Ref("DatabaseReferenceSegment"), ) class AlterDatabaseStatementSegment(sparksql.AlterDatabaseStatementSegment): """An `ALTER DATABASE/SCHEMA` statement. https://docs.databricks.com/en/sql/language-manual/sql-ref-syntax-ddl-alter-schema.html """ match_grammar = Sequence( "ALTER", OneOf("DATABASE", "SCHEMA"), Ref("DatabaseReferenceSegment"), OneOf( Sequence( "SET", Ref("DatabasePropertiesGrammar"), ), Ref("SetOwnerGrammar"), Ref("SetTagsGrammar"), Ref("UnsetTagsGrammar"), Ref("PredictiveOptimizationGrammar"), ), ) class AlterVolumeStatementSegment(BaseSegment): """Alter Volume Statement. https://docs.databricks.com/en/sql/language-manual/sql-ref-syntax-ddl-alter-volume.html """ type = "alter_volume_statement" match_grammar = Sequence( "ALTER", "VOLUME", Ref("VolumeReferenceSegment"), OneOf( Sequence( "RENAME", "TO", Ref("VolumeReferenceSegment"), ), Ref("SetOwnerGrammar"), Ref("SetTagsGrammar"), Ref("UnsetTagsGrammar"), ), ) class CreateVolumeStatementSegment(BaseSegment): """Create Volume Statement. https://docs.databricks.com/en/sql/language-manual/sql-ref-syntax-ddl-create-volume.html """ type = "create_volume_statement" match_grammar = OneOf( # You can create a non-external volume without a location Sequence( "CREATE", "VOLUME", Ref("IfNotExistsGrammar", optional=True), Ref("VolumeReferenceSegment"), Ref("CommentGrammar", optional=True), ), # Or you can create an external volume that must have a location Sequence( "CREATE", "EXTERNAL", "VOLUME", Ref("IfNotExistsGrammar", optional=True), Ref("VolumeReferenceSegment"), Ref("LocationGrammar"), Ref("CommentGrammar", optional=True), ), ) class DropVolumeStatementSegment(BaseSegment): """Drop Volume Statement. https://docs.databricks.com/en/sql/language-manual/sql-ref-syntax-ddl-drop-volume.html """ type = "drop_volume_statement" match_grammar = Sequence( "DROP", "VOLUME", Ref("IfExistsGrammar", optional=True), Ref("VolumeReferenceSegment"), ) class CreateDatabaseStatementSegment(sparksql.CreateDatabaseStatementSegment): """A `CREATE DATABASE` statement. https://docs.databricks.com/en/sql/language-manual/sql-ref-syntax-ddl-create-schema.html """ match_grammar = sparksql.CreateDatabaseStatementSegment.match_grammar.copy( insert=[ Sequence( Ref.keyword("MANAGED", optional=True), "LOCATION", Ref("QuotedLiteralSegment"), optional=True, ), ], at=5, remove=[ Ref("LocationGrammar", optional=True), ], ) class CreateViewStatementSegment(sparksql.CreateViewStatementSegment): """A `CREATE VIEW` statement. https://docs.databricks.com/aws/en/sql/language-manual/sql-ref-syntax-ddl-create-view https://docs.databricks.com/aws/en/dlt-ref/dlt-sql-ref-create-materialized-view """ match_grammar = sparksql.CreateViewStatementSegment.match_grammar.copy( insert=[ Sequence( Ref.keyword("PRIVATE", optional=True), Ref.keyword("MATERIALIZED"), optional=True, ), ], before=Ref.keyword("MATERIALIZED", optional=True), remove=[ Ref.keyword("MATERIALIZED", optional=True), ], ) class MaskStatementSegment(BaseSegment): """A `MASK` statement. https://docs.databricks.com/en/sql/language-manual/sql-ref-syntax-ddl-column-mask.html """ type = "mask_statement" match_grammar = Sequence( "MASK", Ref("FunctionNameSegment"), Sequence( "USING", "COLUMNS", Bracketed( AnyNumberOf( OneOf( Ref("ColumnReferenceSegment"), Ref("ExpressionSegment"), ), ), ), optional=True, ), ) class ColumnFieldDefinitionSegment(ansi.ColumnDefinitionSegment): """A column field definition, e.g. for CREATE TABLE or ALTER TABLE. This supports the iceberg syntax and allows for iceberg syntax such as ADD COLUMN a.b. """ match_grammar: Matchable = Sequence( Ref("ColumnReferenceSegment"), # Column name Ref("DatatypeSegment"), # Column type Bracketed(Anything(), optional=True), # For types like VARCHAR(100) AnyNumberOf( Ref("ColumnConstraintSegment", optional=True), Ref("ColumnDefaultGrammar", optional=True), # For default values ), ) class PropertyNameSegment(sparksql.PropertyNameSegment): """A property name segment. Databricks allows for back quoted segments.""" match_grammar = Sequence( OneOf( Delimited( OneOf( Ref("PropertiesNakedIdentifierSegment"), Ref("PropertiesBackTickedIdentifierSegment"), ), delimiter=Ref("DotSegment"), allow_gaps=False, ), Ref("SingleIdentifierGrammar"), ), ) class TableConstraintSegment(ansi.TableConstraintSegment): """A table constraint, e.g. for CREATE TABLE or ALTER TABLE. https://docs.databricks.com/en/sql/language-manual/sql-ref-syntax-ddl-create-table-constraint.html """ match_grammar = Sequence( "CONSTRAINT", OneOf( Sequence( Ref("ObjectReferenceSegment", optional=True), Ref("PrimaryKeyGrammar"), Bracketed( Delimited( Ref("ColumnReferenceSegment"), Ref.keyword("TIMESERIES", optional=True), ), ), Ref("ConstraintOptionGrammar", optional=True), ), Sequence( Ref("ObjectReferenceSegment", optional=True), Indent, Ref("ForeignKeyGrammar"), Bracketed( Delimited( Ref("ColumnReferenceSegment"), ), ), "REFERENCES", Ref("TableReferenceSegment"), Ref("BracketedColumnReferenceListGrammar", optional=True), OneOf( Ref("ForeignKeyOptionGrammar"), Ref("ConstraintOptionGrammar"), optional=True, ), Dedent, ), Sequence( Ref("ObjectReferenceSegment"), "CHECK", Bracketed(Ref("ExpressionSegment")), Ref.keyword("ENFORCED", optional=True), ), ), ) class AlterTableStatementSegment(sparksql.AlterTableStatementSegment): """An `ALTER TABLE` statement. https://docs.databricks.com/en/sql/language-manual/sql-ref-syntax-ddl-alter-table.html """ match_grammar = Sequence( "ALTER", "TABLE", Ref("TableReferenceSegment"), Indent, OneOf( Sequence( "RENAME", "TO", Ref("TableReferenceSegment"), ), Sequence( "ADD", OneOf("COLUMNS", "COLUMN"), Indent, Bracketed( Delimited( Sequence( Ref("ColumnFieldDefinitionSegment"), Ref("ColumnDefaultGrammar", optional=True), Ref("CommentGrammar", optional=True), Ref("FirstOrAfterGrammar", optional=True), Ref("MaskStatementSegment", optional=True), ), ), ), Dedent, ), Sequence( OneOf("ALTER", "CHANGE"), Ref.keyword("COLUMN", optional=True), Ref("ColumnReferenceSegment"), OneOf( Ref("CommentGrammar"), Ref("FirstOrAfterGrammar"), Sequence( OneOf("SET", "DROP"), "NOT", "NULL", ), Sequence( "TYPE", Ref("DatatypeSegment"), ), Sequence( "SET", Ref("ColumnDefaultGrammar"), ), Sequence( "DROP", "DEFAULT", ), Sequence( "SYNC", "IDENTITY", ), Sequence( "SET", Ref("MaskStatementSegment"), ), Sequence( "DROP", "MASK", ), Ref("SetTagsGrammar"), Ref("UnsetTagsGrammar"), ), ), Sequence( "DROP", OneOf("COLUMN", "COLUMNS", optional=True), Ref("IfExistsGrammar", optional=True), OptionallyBracketed( Delimited( Ref("ColumnReferenceSegment"), ), ), ), Sequence( "RENAME", "COLUMN", Ref("ColumnReferenceSegment"), "TO", Ref("ColumnReferenceSegment"), ), Sequence( "ADD", Ref("TableConstraintSegment"), ), Ref("DropConstraintGrammar"), Sequence( "DROP", "FEATURE", Ref("ObjectReferenceSegment"), Sequence( "TRUNCATE", "HISTORY", optional=True, ), ), Sequence( "ADD", Ref("IfNotExistsGrammar", optional=True), AnyNumberOf(Ref("AlterPartitionGrammar")), ), Sequence( "DROP", Ref("IfExistsGrammar", optional=True), AnyNumberOf(Ref("AlterPartitionGrammar")), ), Sequence( Ref("AlterPartitionGrammar"), "SET", Ref("LocationGrammar"), ), Sequence( Ref("AlterPartitionGrammar"), "RENAME", "TO", Ref("AlterPartitionGrammar"), ), Sequence( "RECOVER", "PARTITIONS", ), Sequence( "SET", Ref("RowFilterClauseGrammar"), ), Sequence( "DROP", "ROW", "FILTER", ), Sequence( "SET", Ref("TablePropertiesGrammar"), ), Ref("UnsetTablePropertiesGrammar"), Sequence( "SET", "SERDE", Ref("QuotedLiteralSegment"), Sequence( "WITH", "SERDEPROPERTIES", Ref("BracketedPropertyListGrammar"), optional=True, ), ), Sequence( "SET", Ref("LocationGrammar"), ), Ref("SetOwnerGrammar"), Sequence( Sequence( "ALTER", "COLUMN", Ref("ColumnReferenceSegment"), optional=True, ), Ref("SetTagsGrammar"), ), Sequence( Sequence( "ALTER", "COLUMN", Ref("ColumnReferenceSegment"), optional=True, ), Ref("UnsetTagsGrammar"), ), Ref("ClusterByClauseSegment"), Ref("PredictiveOptimizationGrammar"), ), Dedent, ) class AlterViewStatementSegment(sparksql.AlterViewStatementSegment): """An `ALTER VIEW` statement. https://docs.databricks.com/en/sql/language-manual/sql-ref-syntax-ddl-alter-view.html """ match_grammar = Sequence( "ALTER", Ref.keyword("MATERIALIZED", optional=True), "VIEW", Ref("TableReferenceSegment"), OneOf( Sequence( "RENAME", "TO", Ref("TableReferenceSegment"), ), Sequence( "SET", Ref("TablePropertiesGrammar"), ), Ref("UnsetTablePropertiesGrammar"), Sequence( "AS", Ref("SelectStatementSegment"), ), Sequence( "WITH", "SCHEMA", OneOf( "BINDING", "COMPENSATION", Sequence( Ref.keyword("TYPE", optional=True), "EVOLUTION", ), ), ), Ref("SetOwnerGrammar"), Ref("SetTagsGrammar"), Ref("UnsetTagsGrammar"), Sequence( Indent, OneOf( Sequence( OneOf("ADD", "ALTER"), "SCHEDULE", Ref.keyword("REFRESH", optional=True), "CRON", Ref("QuotedLiteralSegment"), Sequence( "AT", "TIME", "ZONE", Ref("QuotedLiteralSegment"), optional=True, ), ), Sequence( "DROP", "SCHEDULE", ), ), Dedent, ), ), ) class SetTimeZoneStatementSegment(BaseSegment): """A `SET TIME ZONE` statement. https://docs.databricks.com/sql/language-manual/sql-ref-syntax-aux-conf-mgmt-set-timezone.html """ type = "set_timezone_statement" match_grammar = Sequence( "SET", "TIME", "ZONE", OneOf("LOCAL", Ref("QuotedLiteralSegment"), Ref("IntervalExpressionSegment")), ) class OptimizeTableStatementSegment(BaseSegment): """An `OPTIMIZE` statement. https://docs.databricks.com/en/sql/language-manual/delta-optimize.html """ type = "optimize_table_statement" match_grammar = Sequence( "OPTIMIZE", Ref("TableReferenceSegment"), Sequence( "WHERE", Ref("ExpressionSegment"), optional=True, ), Sequence( "ZORDER", "BY", Bracketed(Delimited(Ref("ColumnReferenceSegment"))), optional=True, ), ) class StatementSegment(sparksql.StatementSegment): """Overriding StatementSegment to allow for additional segment parsing.""" match_grammar = sparksql.StatementSegment.match_grammar.copy( # Segments defined in Databricks SQL dialect insert=[ # Unity Catalog Ref("AlterCatalogStatementSegment"), Ref("CreateCatalogStatementSegment"), Ref("DropCatalogStatementSegment"), Ref("UseCatalogStatementSegment"), Ref("AlterVolumeStatementSegment"), Ref("CreateVolumeStatementSegment"), Ref("DropVolumeStatementSegment"), Ref("CreateDatabaseStatementSegment"), Ref("SetTimeZoneStatementSegment"), Ref("OptimizeTableStatementSegment"), Ref("CreateDatabricksFunctionStatementSegment"), Ref("FunctionParameterListGrammarWithComments"), Ref("DeclareOrReplaceVariableStatementSegment"), Ref("CommentOnStatementSegment"), # Notebook grammar Ref("MagicCellStatementSegment"), ] ) class FunctionParameterListGrammarWithComments(BaseSegment): """The parameters for a function ie. `(column type COMMENT 'comment')`.""" type = "function_parameter_list_with_comments" match_grammar: Matchable = Bracketed( Delimited( Sequence( Ref("FunctionParameterGrammar"), AnyNumberOf( Sequence("DEFAULT", Ref("LiteralGrammar"), optional=True), Ref("CommentClauseSegment", optional=True), ), ), optional=True, ), ) class FunctionDefinitionGrammar(ansi.FunctionDefinitionGrammar): """This is the body of a `CREATE FUNCTION AS` statement.""" match_grammar = Sequence( AnyNumberOf( Sequence( "LANGUAGE", OneOf(Ref.keyword("SQL"), Ref.keyword("PYTHON")), optional=True, ), Sequence( OneOf("DETERMINISTIC", Sequence("NOT", "DETERMINISTIC")), optional=True, ), Ref("CommentClauseSegment", optional=True), Sequence( OneOf(Sequence("CONTAINS", "SQL"), Sequence("READS", "SQL", "DATA")), optional=True, ), Sequence( OneOf( Sequence( "AS", OneOf( Ref("DoubleQuotedUDFBody"), Ref("SingleQuotedUDFBody"), Ref("DollarQuotedUDFBody"), Bracketed( OneOf( Ref("ExpressionSegment"), Ref("SelectStatementSegment"), ) ), ), ), Sequence( "RETURN", OneOf( Ref("ExpressionSegment"), Ref("SelectStatementSegment"), Ref("WithCompoundStatementSegment"), ), ), ) ), ) ) class CreateDatabricksFunctionStatementSegment(BaseSegment): """A `CREATE FUNCTION` statement. https://docs.databricks.com/en/sql/language-manual/sql-ref-syntax-ddl-create-sql-function.html """ type = "create_sql_function_statement" match_grammar: Matchable = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Ref("TemporaryGrammar", optional=True), "FUNCTION", Ref("IfNotExistsGrammar", optional=True), Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammarWithComments"), Sequence( "RETURNS", OneOf( Ref("DatatypeSegment"), Sequence( "TABLE", Sequence( Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), Ref("DatatypeSegment"), Ref("CommentGrammar", optional=True), ), ), ), optional=True, ), ), ), optional=True, ), Ref("FunctionDefinitionGrammar"), ) class NamedArgumentSegment(BaseSegment): """Named argument to a function. https://docs.databricks.com/en/sql/language-manual/sql-ref-function-invocation.html#named-parameter-invocation """ type = "named_argument" match_grammar = Sequence( Ref("NakedIdentifierSegment"), Ref("RightArrowSegment"), Ref("ExpressionSegment"), ) class AliasExpressionSegment(sparksql.AliasExpressionSegment): """A reference to an object with an `AS` clause. The optional AS keyword allows both implicit and explicit aliasing. Note also that it's possible to specify just column aliases without aliasing the table as well: .. code-block:: sql SELECT * FROM VALUES (1,2) as t (a, b); SELECT * FROM VALUES (1,2) as (a, b); SELECT * FROM VALUES (1,2) as t; Note that in Spark SQL, identifiers are quoted using backticks (`my_table`) rather than double quotes ("my_table"). Quoted identifiers are allowed in aliases, but unlike ANSI which allows single quoted identifiers ('my_table') in aliases, this is not allowed in Spark and so the definition of this segment must depart from ANSI. """ match_grammar = Sequence( Indent, Ref("AsAliasOperatorSegment", optional=True), OneOf( # maybe table alias and column aliases Sequence( Ref("SingleIdentifierGrammar", optional=True), Bracketed(Ref("SingleIdentifierListSegment")), ), # just a table alias Ref("SingleIdentifierGrammar"), exclude=OneOf( "LATERAL", Ref("JoinTypeKeywords"), "WINDOW", "PIVOT", "KEYS", "FROM", "FOR", ), ), Dedent, ) class GroupByClauseSegment(sparksql.GroupByClauseSegment): """Enhance `GROUP BY` clause like in `SELECT` for `CUBE`, `ROLLUP`, and `ALL`. https://docs.databricks.com/en/sql/language-manual/sql-ref-syntax-qry-select-groupby.html """ match_grammar = Sequence( "GROUP", "BY", Indent, OneOf( "ALL", Delimited( Ref("CubeRollupClauseSegment"), Ref("GroupingSetsClauseSegment"), Ref("ColumnReferenceSegment"), # Can `GROUP BY 1` Ref("NumericLiteralSegment"), # Can `GROUP BY coalesce(col, 1)` Ref("ExpressionSegment"), ), Sequence( Delimited( Ref("ColumnReferenceSegment"), # Can `GROUP BY 1` Ref("NumericLiteralSegment"), # Can `GROUP BY coalesce(col, 1)` Ref("ExpressionSegment"), ), OneOf( Ref("WithCubeRollupClauseSegment"), Ref("GroupingSetsClauseSegment"), ), ), ), Dedent, ) class ColumnConstraintSegment(ansi.ColumnConstraintSegment): """A column constraint, e.g. for CREATE TABLE or ALTER TABLE. https://docs.databricks.com/en/sql/language-manual/sql-ref-syntax-ddl-create-table-constraint.html """ match_grammar = Sequence( Ref("NotNullGrammar", optional=True), Sequence( Sequence( "CONSTRAINT", Ref("ObjectReferenceSegment"), optional=True, ), OneOf( Sequence( Ref("PrimaryKeyGrammar"), Ref("ConstraintOptionGrammar", optional=True), ), Sequence( Ref("ForeignKeyGrammar", optional=True), "REFERENCES", Ref("TableReferenceSegment"), Ref("BracketedColumnReferenceListGrammar", optional=True), OneOf( Ref("ForeignKeyOptionGrammar"), Ref("ConstraintOptionGrammar"), optional=True, ), ), ), optional=True, ), ) class CreateTableUsingStatementSegment(sparksql.CreateTableStatementSegment): """A `CREATE TABLE [USING]` statement. https://docs.databricks.com/en/sql/language-manual/sql-ref-syntax-ddl-create-table-using.html """ type = "create_table_using_statement" match_grammar = Sequence( OneOf( Sequence( Sequence( "CREATE", "OR", optional=True, ), "REPLACE", "TABLE", ), Sequence( "CREATE", Ref.keyword("EXTERNAL", optional=True), "TABLE", Ref("IfNotExistsGrammar", optional=True), ), ), Ref("TableReferenceSegment"), Ref("TableSpecificationSegment", optional=True), Sequence( "USING", Ref("DataSourceSegment"), optional=True, ), AnyNumberOf(Ref("TableClausesSegment")), Sequence( "AS", OneOf( Ref("SelectStatementSegment"), Ref("ValuesClauseSegment"), ), optional=True, ), ) class TableSpecificationSegment(BaseSegment): """A table specification, e.g. for CREATE TABLE or ALTER TABLE. https://docs.databricks.com/en/sql/language-manual/sql-ref-syntax-ddl-create-table-spec.html """ type = "table_specification_segment" match_grammar = Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), Ref("DatatypeSegment"), AnyNumberOf( Ref("ColumnPropertiesSegment"), ), ), ), ) class ColumnPropertiesSegment(BaseSegment): """Properties for a column in a table specification. https://docs.databricks.com/en/sql/language-manual/sql-ref-syntax-ddl-create-table-spec.html """ type = "column_properties_segment" match_grammar = OneOf( Ref("NotNullGrammar"), Ref("GeneratedColumnDefinitionSegment"), Sequence( "DEFAULT", Ref("ColumnConstraintDefaultGrammar"), ), Ref("CommentGrammar"), Ref("ColumnConstraintSegment"), Ref("MaskStatementSegment"), ) class TableClausesSegment(BaseSegment): """Clauses for a table specification. https://docs.databricks.com/en/sql/language-manual/sql-ref-syntax-ddl-create-table-spec.html """ type = "table_clauses_segment" match_grammar = OneOf( Ref("PartitionClauseSegment"), Ref("ClusterByClauseSegment"), Ref("LocationWithCredentialGrammar"), Ref("OptionsGrammar"), Ref("CommentGrammar"), Ref("TablePropertiesGrammar"), Sequence( "WITH", Ref("RowFilterClauseGrammar"), ), ) class GeneratedColumnDefinitionSegment(sparksql.GeneratedColumnDefinitionSegment): """A generated column definition, e.g. for CREATE TABLE or ALTER TABLE. https://docs.databricks.com/en/sql/language-manual/sql-ref-syntax-ddl-create-table-using.html """ match_grammar: Matchable = Sequence( Ref("SingleIdentifierGrammar"), # Column name Ref("DatatypeSegment"), # Column type Bracketed(Anything(), optional=True), # For types like DECIMAL(3, 2) OneOf( Sequence( "GENERATED", "ALWAYS", "AS", Bracketed( OneOf( Ref("FunctionSegment"), Ref("BareFunctionSegment"), Ref("ExpressionSegment"), ), ), ), Sequence( "GENERATED", OneOf( "ALWAYS", Sequence("BY", "DEFAULT"), ), "AS", "IDENTITY", Bracketed( Sequence( Sequence( "START", "WITH", Ref("NumericLiteralSegment"), optional=True, ), Sequence( "INCREMENT", "BY", Ref("NumericLiteralSegment"), optional=True, ), ), optional=True, ), ), ), ) class DeclareOrReplaceVariableStatementSegment(BaseSegment): """A `DECLARE [OR REPLACE] VARIABLE` statement. https://docs.databricks.com/en/sql/language-manual/sql-ref-syntax-ddl-declare-variable.html """ type = "declare_or_replace_variable_statement" match_grammar = Sequence( Ref.keyword("DECLARE"), Ref("OrReplaceGrammar", optional=True), Ref.keyword("VARIABLE", optional=True), Ref("SingleIdentifierGrammar"), # Variable name Ref("DatatypeSegment", optional=True), # Variable type Sequence( OneOf("DEFAULT", Ref("EqualsSegment")), Ref("ExpressionSegment"), optional=True, ), ) class CommentOnStatementSegment(BaseSegment): """`COMMENT ON` statement. https://docs.databricks.com/en/sql/language-manual/sql-ref-syntax-ddl-comment.html """ type = "comment_clause" match_grammar = Sequence( "COMMENT", "ON", OneOf( Sequence( "CATALOG", Ref("CatalogReferenceSegment"), ), Sequence( OneOf("DATABASE", "SCHEMA"), Ref("DatabaseReferenceSegment"), ), Sequence( "TABLE", Ref("TableReferenceSegment"), ), Sequence( "VOLUME", Ref("VolumeReferenceSegment"), ), # TODO: Split out individual items if they have references Sequence( OneOf( "CONNECTION", "PROVIDER", "RECIPIENT", "SHARE", ), Ref("ObjectReferenceSegment"), ), ), "IS", OneOf(Ref("QuotedLiteralSegment"), "NULL"), ) class FunctionNameSegment(BaseSegment): """Function name, including any prefix bits, e.g. project or schema.""" type = "function_name" match_grammar: Matchable = Sequence( # Project name, schema identifier, etc. AnyNumberOf( Sequence( Ref("SingleIdentifierGrammar"), Ref("DotSegment"), ), terminators=[Ref("BracketedSegment")], ), # Base function name Ref("FunctionNameIdentifierSegment", terminators=[Ref("BracketedSegment")]), allow_gaps=False, ) class MagicCellStatementSegment(BaseSegment): """Treat -- MAGIC %md/py/sh/... Cells as their own segments. N.B. This is a workaround, to make databricks notebooks with leading parsable by sqlfluff. https://learn.microsoft.com/en-us/azure/databricks/notebooks/notebooks-code#language-magic """ type = "magic_cell_segment" match_grammar = Sequence( Ref("NotebookStart", optional=True), OneOf( Sequence( Ref("MagicStartGrammar", optional=True), AnyNumberOf(Ref("MagicLineGrammar"), optional=True), ), Ref("MagicSingleLineGrammar", optional=True), ), terminators=[Ref("CommandCellSegment", optional=True)], reset_terminators=True, ) class SetVariableStatementSegment(BaseSegment): """A `SET VARIABLE` statement used to set session variables. https://docs.databricks.com/en/sql/language-manual/sql-ref-syntax-aux-set-variable.html """ type = "set_variable_statement" # set var v1=val, v2=val2; set_kv_pair = Sequence( Delimited( Ref("VariableNameIdentifierSegment"), Ref("EqualsSegment"), OneOf("DEFAULT", OptionallyBracketed(Ref("ExpressionSegment"))), ) ) # set var (v1,v2) = (values(100,200)) set_bracketed = Sequence( Bracketed( Ref("VariableNameIdentifierSegment"), ), Ref("EqualsSegment"), Bracketed( OneOf( Ref("SelectStatementSegment"), Ref("ValuesClauseSegment"), ) ), ) match_grammar = Sequence( "SET", OneOf( "VAR", "VARIABLE", ), OneOf( set_kv_pair, set_bracketed, ), allow_gaps=True, ) sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_databricks_keywords.py000066400000000000000000000013771503426445100256540ustar00rootroot00000000000000"""A list of databricks reserved keywords. https://docs.databricks.com/sql/language-manual/sql-ref-reserved-words.html """ RESERVED_KEYWORDS = [ "ANTI", "CROSS", "EXCEPT", "FULL", "INNER", "INTERSECT", "JOIN", "LATERAL", "LEFT", "MINUS", "NATURAL", "ON", "RIGHT", "SEMI", "UNION", "USING", ] UNRESERVED_KEYWORDS = [ "CATALOG", "COMPENSATION", "CRON", "ENFORCED", "EVOLUTION", "FEATURE", "IDENTIFIER", "MANAGED", "MASK", "NORELY", "OPTIMIZATION", "OPTIMIZE", "PREDICTIVE", "PRIVATE", "PROVIDER", "PYTHON", "RECIPIENT", "RELY", "SCHEDULE", "SQL", "TAGS", "TIMESERIES", "VOLUME", "VOLUMES", "ZORDER", ] sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_db2.py000066400000000000000000000424721503426445100222660ustar00rootroot00000000000000"""The Db2 dialect. https://www.ibm.com/docs/en/i/7.4?topic=overview-db2-i """ from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( AnyNumberOf, AnySetOf, BaseSegment, Bracketed, CodeSegment, CommentSegment, Dedent, Delimited, IdentifierSegment, Indent, Nothing, OneOf, OptionallyBracketed, ParseMode, Ref, RegexLexer, RegexParser, SegmentGenerator, Sequence, StringLexer, StringParser, SymbolSegment, WordSegment, ) from sqlfluff.dialects import dialect_ansi as ansi from sqlfluff.dialects.dialect_db2_keywords import UNRESERVED_KEYWORDS ansi_dialect = load_raw_dialect("ansi") db2_dialect = ansi_dialect.copy_as( "db2", formatted_name="IBM Db2", docstring="The dialect for IBM `Db2 `_.", ) db2_dialect.sets("reserved_keywords").remove("NATURAL") db2_dialect.sets("unreserved_keywords").update(UNRESERVED_KEYWORDS) db2_dialect.replace( # Db2 allows # in field names, and doesn't use it as a comment NakedIdentifierSegment=SegmentGenerator( # Generate the anti template from the set of reserved keywords lambda dialect: RegexParser( r"[A-Z0-9_#]*[A-Z#][A-Z0-9_#]*", IdentifierSegment, type="naked_identifier", anti_template=r"^(" + r"|".join(dialect.sets("reserved_keywords")) + r")$", casefold=str.upper, ) ), FunctionContentsExpressionGrammar=OneOf( Ref("ExpressionSegment"), Ref("NamedArgumentSegment"), ), ConditionalCrossJoinKeywordsGrammar=Nothing(), NaturalJoinKeywordsGrammar=Nothing(), UnconditionalCrossJoinKeywordsGrammar=Ref.keyword("CROSS"), PreTableFunctionKeywordsGrammar=OneOf("LATERAL"), PostFunctionGrammar=OneOf( Ref("OverClauseSegment"), Ref("WithinGroupClauseSegment"), ), FromClauseTerminatorGrammar=ansi_dialect.get_grammar( "FromClauseTerminatorGrammar" ).copy( insert=[Ref.keyword("OFFSET")], ), WhereClauseTerminatorGrammar=ansi_dialect.get_grammar( "WhereClauseTerminatorGrammar" ).copy( insert=[Ref.keyword("OFFSET")], ), GroupByClauseTerminatorGrammar=ansi_dialect.get_grammar( "GroupByClauseTerminatorGrammar" ).copy( insert=[Ref.keyword("OFFSET")], ), HavingClauseTerminatorGrammar=ansi_dialect.get_grammar( "HavingClauseTerminatorGrammar" ).copy( insert=[Ref.keyword("OFFSET")], ), OrderByClauseTerminators=ansi_dialect.get_grammar("OrderByClauseTerminators").copy( insert=[Ref.keyword("OFFSET")], ), Expression_C_Grammar=OneOf( Sequence("EXISTS", Bracketed(Ref("SelectableGrammar"))), # should be first priority, otherwise EXISTS() would be matched as a function Sequence( OneOf( Ref("Expression_D_Grammar"), Ref("CaseExpressionSegment"), ), AnyNumberOf(Ref("TimeZoneGrammar")), ), Ref("ShorthandCastSegment"), Ref("LabeledDurationGrammar"), ), BracketedSetExpressionGrammar=Bracketed(Ref("SetExpressionSegment")), AlterTableDropColumnGrammar=Sequence( "DROP", Ref.keyword("COLUMN", optional=True), Ref("SingleIdentifierGrammar"), Ref("DropBehaviorGrammar", optional=True), ), ) db2_dialect.insert_lexer_matchers( [ StringLexer("right_arrow", "=>", CodeSegment), ], before="equals", ) db2_dialect.patch_lexer_matchers( [ # Patching comments to remove hash comments RegexLexer( "inline_comment", r"(--)[^\n]*", CommentSegment, segment_kwargs={"trim_start": ("--")}, ), # In Db2, the only escape character is ' for single quote strings RegexLexer( "single_quote", r"'((?:[^']|'')*)'", CodeSegment, segment_kwargs={ "quoted_value": (r"'((?:[^']|'')*)'", 1), "escape_replacements": [(r"''", "'")], }, ), # In Db2, the escape character is "" for double quote strings RegexLexer( "double_quote", r'"((?:[^"]|"")*)"', CodeSegment, segment_kwargs={ "quoted_value": (r'"((?:[^"]|"")*)"', 1), "escape_replacements": [(r'""', '"')], }, ), # In Db2, a field could have a # pound/hash sign RegexLexer("word", r"[0-9a-zA-Z_#]+", WordSegment), ] ) db2_dialect.add( RightArrowSegment=StringParser("=>", SymbolSegment, type="right_arrow"), # https://www.ibm.com/docs/en/db2/11.5?topic=expressions-datetime-operations-durations LabeledDurationGrammar=Sequence( OneOf( Ref("LiteralGrammar"), Ref("BareFunctionSegment"), Ref("FunctionSegment"), Ref("ColumnReferenceSegment"), Ref("Expression_D_Grammar"), ), OneOf( "DAY", "DAYS", "HOUR", "HOURS", "MICROSECOND", "MICROSECONDS", "MINUTE", "MINUTES", "MONTH", "MONTHS", "SECOND", "SECONDS", "YEAR", "YEARS", ), ), # https://www.ibm.com/docs/en/db2/11.5?topic=elements-special-registers SpecialRegisterGrammar=OneOf( "CURRENT_DATE", "CURRENT_PATH", "CURRENT_SCHEMA", "CURRENT_SERVER", "CURRENT_TIME", "CURRENT_TIMESTAMP", "CURRENT_TIMEZONE", "CURRENT_USER", "SESSION_USER", "SYSTEM_USER", "USER", Sequence( "CURRENT", OneOf( "CLIENT_ACCTNG", "CLIENT_APPLNAME", "CLIENT_USERID", "CLIENT_WRKSTNNAME", "DATE", "DBPARTITIONNUM", Sequence("DECFLOAT", "ROUNDING", "MODE"), Sequence("DEFAULT", "TRANSFORM", "GROUP"), "DEGREE", Sequence("EXPLAIN", OneOf("MODE", "SNAPSHOT")), Sequence("FEDERATED", "ASYNCHRONY"), Sequence("IMPLICIT", "XMLPARSE", "OPTION"), "ISOLATION", Sequence("LOCALE", OneOf("LC_MESSAGES", "LC_TIME")), Sequence("LOCK", "TIMEOUT"), Sequence("MAINTAINED", "TABLE", "TYPES", "FOR", "OPTIMIZATION"), Sequence("MDC", "ROLLOUT", "MODE"), "MEMBER", Sequence("OPTIMIZATION", "PROFILE"), Sequence("PACKAGE", "PATH"), "PATH", Sequence("QUERY", "OPTIMIZATION"), Sequence("REFRESH", "AGE"), "SCHEMA", "SERVER", "SQL_CCFLAGS", Sequence("TEMPORAL", OneOf("BUSINESS_TIME", "SYSTEM_TIME")), "TIME", "TIMESTAMP", "TIMEZONE", "USER", ), ), ), XmlIndexSpecificationGrammar=Sequence( "GENERATE", OneOf("KEY", "KEYS"), "USING", "XMLPATTERN", Ref("QuotedLiteralSegment"), # XmlPatternClause Ref("XmlTypeClauseGrammar"), ), XmlTypeClauseGrammar=Sequence( "AS", "SQL", Ref("DatatypeSegment"), Sequence( OneOf("IGNORE", "REJECT"), "INVALID", "VALUES", optional=True, ), ), ) class BareFunctionSegment(BaseSegment): """A function that can be called without parenthesis per ANSI specification. DB2 extends this to include `special registers`. """ type = "bare_function" match_grammar = Ref("SpecialRegisterGrammar") class CallStoredProcedureSegment(BaseSegment): """This is a CALL statement used to execute a stored procedure. https://www.ibm.com/docs/en/db2/11.5?topic=statements-call """ type = "call_segment" match_grammar = Sequence( "CALL", OneOf( Ref("FunctionSegment"), # Call without parenthesis Ref("FunctionNameSegment", reset_terminators=True), ), ) class CopyOptionsSegment(BaseSegment): """Copy-options when using like or as for creating a table. https://www.ibm.com/docs/en/db2/11.5?topic=statements-create-table#sdx-synid_frag-copy-options """ type = "copy_options" match_grammar = AnySetOf( Sequence( OneOf("INCLUDING", "EXCLUDING"), Ref.keyword("COLUMN", optional=True), "DEFAULTS", ), Sequence( OneOf("INCLUDING", "EXCLUDING"), "IDENTITY", Sequence( "COLUMN", "ATTRIBUTES", optional=True, ), ), ) class DeclareGlobalTempTableSegment(BaseSegment): """DECLARE GLOBAL TEMPORARY TABLE statement. https://www.ibm.com/docs/en/db2/11.5?topic=statements-declare-global-temporary-table """ type = "declare_temp_table" match_grammar = Sequence( "DECLARE", "GLOBAL", "TEMPORARY", "TABLE", Ref("TableReferenceSegment"), OneOf( # Columns and comment syntax: Sequence( Bracketed( Delimited( Ref("ColumnDefinitionSegment"), ), ) ), # Create AS syntax: Sequence( "AS", OptionallyBracketed(Ref("SelectableGrammar")), Ref("WithDataClauseSegment"), Ref("CopyOptionsSegment", optional=True), ), # Create like syntax Sequence( "LIKE", Ref("TableReferenceSegment"), Ref("CopyOptionsSegment", optional=True), ), ), AnySetOf( Sequence( "ORGANIZE", "BY", OneOf("ROW", "COLUMN"), ), OneOf( Sequence( "ON", "COMMIT", OneOf("DELETE", "PRESERVE"), "ROWS", ), ), OneOf( "LOGGED", Sequence( "NOT", "LOGGED", Sequence( "ON", "ROLLBACK", OneOf("DELETE", "PRESERVE"), "ROWS", optional=True, ), ), ), Sequence( "WITH", "REPLACE", ), Sequence( "IN", Ref("TablespaceReferenceSegment"), ), Ref("DeclareDistributionClauseSegment"), ), ) class DeclareDistributionClauseSegment(BaseSegment): """Distribution clause in declaring table creation. https://www.ibm.com/docs/en/db2/11.5?topic=statements-declare-global-temporary-table#sdx-synid_frag-distribution-clause """ type = "distribution_clause" match_grammar = Sequence( "DISTRIBUTE", OneOf("BY", "ON"), OneOf( Sequence( Ref.keyword("HASH", optional=True), Bracketed( Delimited( Ref("ColumnReferenceSegment"), ), ), ), "RANDOM", ), ) class IndexColumnDefinitionSegment(ansi.IndexColumnDefinitionSegment): """A column definition for CREATE INDEX.""" type = "index_column_definition" match_grammar = Sequence( OneOf( Ref("SingleIdentifierGrammar"), # Column name Ref("ExpressionSegment"), # key expression ), OneOf("ASC", "DESC", "RANDOM", optional=True), ) class CreateIndexStatementSegment(ansi.CreateIndexStatementSegment): """A `CREATE INDEX` statement. https://www.ibm.com/docs/en/db2/11.5?topic=statements-create-index """ type = "create_index_statement" match_grammar = Sequence( "CREATE", Ref.keyword("UNIQUE", optional=True), "INDEX", Ref("IndexReferenceSegment"), "ON", Ref("TableReferenceSegment"), Sequence( Bracketed( Delimited( Ref("IndexColumnDefinitionSegment"), Sequence("BUSINESS_TIME", "WITHOUT", "OVERLAPS"), ), ) ), AnySetOf( Sequence(Ref.keyword("NOT", optional=True), "PARTITIONED"), Sequence("IN", Ref("TablespaceReferenceSegment")), Sequence("SPECIFICATION", "ONLY"), Sequence( "INCLUDE", Bracketed( Delimited( Ref("SingleIdentifierGrammar"), # Column name Ref("ExpressionSegment"), # key expression ) ), ), OneOf( Ref("XmlIndexSpecificationGrammar"), "CLUSTER", Sequence( "EXTEND", "USING", OptionallyBracketed( Ref("IndexReferenceSegment"), Bracketed(Delimited(Ref("BaseExpressionElementGrammar"))), ), ), ), Sequence("PCTFREE", Ref("NumericLiteralSegment")), Sequence("LEVEL2", "PCTFREE", Ref("NumericLiteralSegment")), Sequence("MINPCTUSED", Ref("NumericLiteralSegment")), Sequence(OneOf("ALLOW", "DISALLOW"), "REVERSE", "SCANS"), Sequence("PAGE", "SPLIT", OneOf("SYMMETRIC", "HIGH", "LOW")), Sequence( "COLLECT", Sequence( OneOf("SAMPLED", "UNSAMPLED", optional=True), "DETAILED", optional=True, ), "STATISTICS", ), Sequence("COMPRESS", OneOf("YES", "NO")), Sequence(OneOf("INCLUDE", "EXCLUDE"), "NULL", "KEYS"), ), ) class NamedArgumentSegment(BaseSegment): """Named argument to a function. https://www.ibm.com/docs/en/db2/11.5?topic=statements-call """ type = "named_argument" match_grammar = Sequence( Ref("NakedIdentifierSegment"), Ref("RightArrowSegment"), Ref("ExpressionSegment"), ) class OffsetClauseSegment(BaseSegment): """OFFSET clause in as SELECT statement.""" type = "offset_clause" match_grammar = Sequence( "OFFSET", OneOf( Ref("NumericLiteralSegment"), Ref("ExpressionSegment"), ), OneOf("ROW", "ROWS"), ) class LimitClauseSegment(BaseSegment): """A `LIMIT` clause like in `SELECT`.""" type = "limit_clause" match_grammar = OneOf( Sequence( "LIMIT", Indent, OptionallyBracketed( OneOf( # Allow a number by itself OR Ref("NumericLiteralSegment"), # An arbitrary expression Ref("ExpressionSegment"), "ALL", ) ), OneOf( Sequence( "OFFSET", OneOf( # Allow a number by itself OR Ref("NumericLiteralSegment"), # An arbitrary expression Ref("ExpressionSegment"), ), ), Sequence( Ref("CommaSegment"), Ref("NumericLiteralSegment"), ), optional=True, ), Dedent, ), Sequence( Ref("OffsetClauseSegment", optional=True), Ref("FetchClauseSegment", optional=True), ), ) class WithinGroupClauseSegment(BaseSegment): """An WITHIN GROUP clause for window functions.""" type = "withingroup_clause" match_grammar = Sequence( "WITHIN", "GROUP", Bracketed( Ref("OrderByClauseSegment", optional=True), parse_mode=ParseMode.GREEDY ), ) class StatementSegment(ansi.StatementSegment): """An element in the targets of a select statement.""" match_grammar = ansi.StatementSegment.match_grammar.copy( insert=[ Ref("CallStoredProcedureSegment"), Ref("DeclareGlobalTempTableSegment"), ] ) class ValuesClauseSegment(ansi.ValuesClauseSegment): """A `VALUES` clause like in `INSERT` or as a standalone statement. https://www.ibm.com/docs/en/db2/11.5?topic=queries-fullselect#sdx-synid_frag-values-clause """ type = "values_clause" match_grammar = Sequence( "VALUES", Delimited( Bracketed( Delimited( "DEFAULT", Ref("ExpressionSegment"), ), parse_mode=ParseMode.GREEDY, ), "DEFAULT", Ref("ExpressionSegment"), ), Ref("OrderByClauseSegment", optional=True), Ref("LimitClauseSegment", optional=True), ) sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_db2_keywords.py000066400000000000000000000153741503426445100242160ustar00rootroot00000000000000"""A list of db2 keywords.""" UNRESERVED_KEYWORDS = [ # https://www.ibm.com/docs/en/db2/11.5?topic=sql-reserved-schema-names-reserved-words "ACTIVATE", "ADD", "AFTER", "AGE", "ALIAS", "ALL", "ALLOCATE", "ALLOW", "ALTER", "AND", "ANY", "AS", "ASENSITIVE", "ASSOCIATE", "ASUTIME", "ASYNCHRONY", "AT", "ATTRIBUTES", "AUDIT", "AUTHORIZATION", "AUX", "AUXILIARY", "BEFORE", "BEGIN", "BETWEEN", "BINARY", "BUFFERPOOL", "BUSINESS_TIME", "BY", "CACHE", "CALL", "CALLED", "CAPTURE", "CARDINALITY", "CASCADED", "CASE", "CAST", "CCSID", "CHAR", "CHARACTER", "CHECK", "CLIENT_ACCTNG", "CLIENT_APPLNAME", "CLIENT_USERID", "CLIENT_WRKSTNNAME", "CLONE", "CLOSE", "CLUSTER", "COLLECTION", "COLLID", "COLUMN", "COMMENT", "COMMIT", "CONCAT", "CONDITION", "CONNECT", "CONNECTION", "CONSTRAINT", "CONTAINS", "CONTINUE", "COUNT", "COUNT_BIG", "CREATE", "CROSS", "CURRENT", "CURRENT_DATE", "CURRENT_LC_CTYPE", "CURRENT_PATH", "CURRENT_SCHEMA", "CURRENT_SERVER", "CURRENT_TIME", "CURRENT_TIMESTAMP", "CURRENT_TIMEZONE", "CURRENT_USER", "CURSOR", "CYCLE", "DATA", "DATABASE", "DATAPARTITIONNAME", "DATAPARTITIONNUM", "DATE", "DAY", "DAYS", "DB2GENERAL", "DB2GENRL", "DB2SQL", "DBINFO", "DBPARTITIONNAME", "DBPARTITIONNUM", "DEALLOCATE", "DECFLOAT", "DECLARE", "DEFAULT", "DEFAULTS", "DEFINITION", "DELETE", "DENSERANK", "DENSE_RANK", "DESCRIBE", "DESCRIPTOR", "DETAILED", "DETERMINISTIC", "DIAGNOSTICS", "DISABLE", "DISALLOW", "DISCONNECT", "DISTINCT", "DISTRIBUTE", "DO", "DOCUMENT", "DOUBLE", "DROP", "DSSIZE", "DYNAMIC", "EACH", "EDITPROC", "ELSE", "ELSEIF", "ENABLE", "ENCODING", "ENCRYPTION", "END", "END-EXEC", "ENDING", "ERASE", "ESCAPE", "EVERY", "EXCEPT", "EXCEPTION", "EXCLUDING", "EXCLUSIVE", "EXECUTE", "EXISTS", "EXIT", "EXPLAIN", "EXTEND", "EXTENDED", "EXTERNAL", "EXTRACT", "FEDERATED", "FENCED", "FETCH", "FIELDPROC", "FILE", "FINAL", "FIRST", "FOR", "FOREIGN", "FREE", "FROM", "FULL", "FUNCTION", "GENERAL", "GENERATE", "GENERATED", "GET", "GLOBAL", "GO", "GOTO", "GRANT", "GRAPHIC", "GROUP", "HANDLER", "HASH", "HASHED_VALUE", "HAVING", "HIGH", "HINT", "HOLD", "HOUR", "HOURS", "IDENTITY", "IF", "IMMEDIATE", "IMPORT", "IN", "INCLUDING", "INCLUSIVE", "INCREMENT", "INDEX", "INDICATOR", "INDICATORS", "INF", "INFINITY", "INHERIT", "INNER", "INOUT", "INSENSITIVE", "INSERT", "INTEGRITY", "INTERSECT", "INTO", "IS", "ISNULL", "ISOBID", "ISOLATION", "ITERATE", "JAR", "JAVA", "JOIN", "KEEP", "KEY", "LABEL", "LANGUAGE", "LAST", "LATERAL", "LC_CTYPE", "LC_MESSAGES", "LC_TIME", "LEAVE", "LEFT", "LEVEL2", "LIKE", "LIMIT", "LINKTYPE", "LOCAL", "LOCALDATE", "LOCALE", "LOCALTIME", "LOCALTIMESTAMP", "LOCATOR", "LOCATORS", "LOCK", "LOCKMAX", "LOCKSIZE", "LOGGED", "LONG", "LOOP", "LOW", "MAINTAINED", "MATERIALIZED", "MAXVALUE", "MDC", "MICROSECOND", "MICROSECONDS", "MINPCTUSED", "MINUS", "MINUTE", "MINUTES", "MINVALUE", "MODE", "MODIFIES", "MONTH", "MONTHS", "NAN", "NEW", "NEW_TABLE", "NEXTVAL", "NO", "NOCACHE", "NOCYCLE", "NODENAME", "NODENUMBER", "NOMAXVALUE", "NOMINVALUE", "NONE", "NOORDER", "NORMALIZED", "NOT", "NOTNULL", "NULL", "NULLS", "NUMPARTS", "OBID", "OF", "OFF", "OFFSET", "OLD", "OLD_TABLE", "ON", "OPEN", "OPTIMIZATION", "OPTIMIZE", "OPTION", "OR", "ORDER", "ORGANIZE", "OUT", "OUTER", "OVER", "OVERRIDING", "PACKAGE", "PADDED", "PAGE", "PAGESIZE", "PARAMETER", "PART", "PARTITION", "PARTITIONED", "PARTITIONING", "PARTITIONS", "PASSWORD", "PATH", "PERCENT", "PIECESIZE", "PLAN", "POSITION", "PRECISION", "PREPARE", "PREVVAL", "PRIMARY", "PRIQTY", "PRIVILEGES", "PROCEDURE", "PROFILE", "PROGRAM", "PSID", "PUBLIC", "QUERY", "QUERYNO", "RANDOM", "RANGE", "RANK", "READ", "READS", "RECOVERY", "REFERENCES", "REFERENCING", "REFRESH", "REJECT", "RELEASE", "RENAME", "REPEAT", "RESET", "RESIGNAL", "RESTART", "RESTRICT", "RESULT", "RESULT_SET_LOCATOR", "RETURN", "RETURNS", "REVERSE", "REVOKE", "RIGHT", "ROLE", "ROLLBACK", "ROLLOUT", "ROUND_CEILING", "ROUND_DOWN", "ROUND_FLOOR", "ROUND_HALF_DOWN", "ROUND_HALF_EVEN", "ROUND_HALF_UP", "ROUND_UP", "ROUNDING", "ROUTINE", "ROW", "ROWNUMBER", "ROWS", "ROWSET", "ROW_NUMBER", "RRN", "RUN", "SAVEPOINT", "SAMPLED", "SCANS", "SCHEMA", "SCRATCHPAD", "SCROLL", "SEARCH", "SECOND", "SECONDS", "SECQTY", "SECURITY", "SELECT", "SENSITIVE", "SEQUENCE", "SESSION", "SESSION_USER", "SET", "SIGNAL", "SIMPLE", "SNAN", "SNAPSHOT", "SOME", "SOURCE", "SPECIFIC", "SPECIFICATION", "SPLIT", "SQL", "SQL_CCFLAGS", "SQLID", "STACKED", "STANDARD", "START", "STARTING", "STATEMENT", "STATIC", "STATMENT", "STAY", "STOGROUP", "STORES", "STYLE", "SUBSTRING", "SUMMARY", "SYNONYM", "SYSFUN", "SYSIBM", "SYSPROC", "SYSTEM", "SYSTEM_TIME", "SYSTEM_USER", "TABLE", "TABLESPACE", "TEMPORAL", "THEN", "TIME", "TIMEOUT", "TIMESTAMP", "TIMEZONE", "TO", "TRANSACTION", "TRIGGER", "TRIM", "TRUNCATE", "TYPE", "TYPES", "UNDO", "UNION", "UNIQUE", "UNTIL", "UNSAMPLED", "UPDATE", "USAGE", "USER", "USING", "VALIDPROC", "VALUE", "VALUES", "VARIABLE", "VARIANT", "VCAT", "VERSION", "VIEW", "VOLATILE", "VOLUMES", "WHEN", "WHENEVER", "WHERE", "WHILE", "WITH", "WITHOUT", "WLM", "WRITE", "XMLELEMENT", "XMLEXISTS", "XMLNAMESPACES", "XMLPARSE", "XMLPATTERN", "YEAR", "YEARS", "YES", ] sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_doris.py000066400000000000000000000335311503426445100227330ustar00rootroot00000000000000"""The Apache Doris dialect. This dialect extends MySQL grammar with specific Apache Doris syntax features. """ from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( BaseSegment, Bracketed, CodeSegment, Delimited, Matchable, MultiStringParser, OneOf, OptionallyBracketed, Ref, SegmentGenerator, Sequence, ) from sqlfluff.dialects import dialect_mysql as mysql from sqlfluff.dialects.dialect_doris_keywords import ( doris_reserved_keywords, doris_unreserved_keywords, ) mysql_dialect = load_raw_dialect("mysql") doris_dialect = mysql_dialect.copy_as( "doris", formatted_name="Apache Doris", docstring="""**Default Casing**: ``lowercase`` **Quotes**: String Literals: ``''``, ``"``, Identifiers: |back_quotes|. The dialect for `Apache Doris `_.""", ) doris_dialect.update_keywords_set_from_multiline_string( "unreserved_keywords", doris_unreserved_keywords ) doris_dialect.sets("reserved_keywords").clear() doris_dialect.update_keywords_set_from_multiline_string( "reserved_keywords", doris_reserved_keywords ) # Add the engine types set for Doris doris_dialect.sets("engine_types").update( ["olap", "mysql", "elasticsearch", "hive", "hudi", "iceberg", "jdbc", "broker"] ) doris_dialect.add( EngineTypeSegment=SegmentGenerator( lambda dialect: MultiStringParser( dialect.sets("engine_types"), CodeSegment, type="engine_type", ) ), ) class ColumnDefinitionSegment(mysql.ColumnDefinitionSegment): """A column definition, e.g. for CREATE TABLE or ALTER TABLE. Doris-specific version that supports aggregation functions like MAX, MIN, REPLACE, SUM. """ match_grammar = mysql.ColumnDefinitionSegment.match_grammar.copy( insert=[ OneOf( "MAX", "MIN", "REPLACE", "SUM", "BITMAP_UNION", "HLL_UNION", "QUANTILE_UNION", optional=True, ), ] ) class CreateTableStatementSegment(mysql.CreateTableStatementSegment): """A `CREATE TABLE` statement. Doris-specific version that handles: - Different ENGINE types - Key types (DUPLICATE, AGGREGATE, UNIQUE) - Cluster by clause - Specific partition syntax - Distribution syntax - Rollup definitions - Specific table properties - CREATE TABLE ... AS SELECT (CTAS) - CREATE TABLE ... LIKE """ type = "create_table_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Ref.keyword("TEMPORARY", optional=True), Ref.keyword("EXTERNAL", optional=True), "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), OneOf( # Standard column definitions Sequence( Bracketed( Delimited( OneOf( Ref("TableConstraintSegment"), Ref("ColumnDefinitionSegment"), Ref("IndexDefinitionSegment"), ), ) ), # Doris specific Sequence( "ENGINE", Ref("EqualsSegment"), Ref("EngineTypeSegment"), optional=True, ), # Key type Sequence( OneOf( Sequence("DUPLICATE", "KEY"), Sequence("AGGREGATE", "KEY"), Sequence("UNIQUE", "KEY"), ), Bracketed(Delimited(Ref("ColumnReferenceSegment"))), # Cluster by clause Sequence( "CLUSTER", "BY", Bracketed(Delimited(Ref("ColumnReferenceSegment"))), optional=True, ), optional=True, ), Ref("CommentClauseSegment", optional=True), # Partitioning Ref("PartitionSegment", optional=True), # Distribution Ref("DistributionSegment", optional=True), # Rollup definitions Ref("RollupSegment", optional=True), # Properties Sequence( "PROPERTIES", Bracketed( Delimited( Sequence( Ref("QuotedLiteralSegment"), Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ) ) ), optional=True, ), ), # Create table like Sequence("LIKE", Ref("TableReferenceSegment")), # Create table as select (CTAS) Sequence( # Optional ENGINE clause Sequence( "ENGINE", Ref("EqualsSegment"), Ref("EngineTypeSegment"), optional=True, ), # Optional COMMENT clause Ref("CommentClauseSegment", optional=True), # Optional properties before AS SELECT Sequence( "PROPERTIES", Bracketed( Delimited( Sequence( Ref("QuotedLiteralSegment"), Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ) ) ), optional=True, ), "AS", OptionallyBracketed(Ref("SelectableGrammar")), ), ), ) class ColumnConstraintSegment(mysql.ColumnConstraintSegment): """A column option; each CREATE TABLE column can have 0 or more.""" match_grammar: Matchable = OneOf( mysql.ColumnConstraintSegment.match_grammar, Sequence("AS", Ref("ExpressionSegment")), Sequence("GENERATED", "ALWAYS", "AS", Bracketed(Ref("ExpressionSegment"))), ) class PartitionSegment(BaseSegment): """A partition segment supporting Doris specific syntax. Supports: 1. Auto partitioning (AUTO PARTITION BY RANGE) 2. Manual range partitioning (PARTITION BY RANGE) 3. List partitioning (PARTITION BY LIST) """ type = "partition_segment" match_grammar = OneOf( # Auto partitioning Sequence( "AUTO", "PARTITION", "BY", "RANGE", Bracketed(Ref("FunctionSegment")), Bracketed(), # Empty partition list for auto partitioning ), # Manual partitioning Sequence( "PARTITION", "BY", OneOf( # Manual range partitioning Sequence( "RANGE", Bracketed(Delimited(Ref("ColumnReferenceSegment"))), Bracketed( Delimited( OneOf( Ref("RangePartitionDefinitionSegment"), Ref("RangePartitionIntervalSegment"), ) ) ), ), # List partitioning Sequence( "LIST", Bracketed(Delimited(Ref("ColumnReferenceSegment"))), Bracketed(Delimited(Ref("ListPartitionDefinitionSegment"))), ), ), ), ) class RangePartitionDefinitionSegment(BaseSegment): """Range partition definition with VALUES LESS THAN or VALUES range.""" type = "range_partition_definition" match_grammar = Sequence( "PARTITION", Ref("IfNotExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), "VALUES", OneOf( Sequence( "LESS", "THAN", OneOf( "MAXVALUE", Bracketed(Delimited(Ref("LiteralGrammar"))), ), ), Sequence( Bracketed( Bracketed(Delimited(Ref("LiteralGrammar"))), ",", Bracketed(Delimited(Ref("LiteralGrammar"))), ) ), ), # Partition properties Sequence( Bracketed( Delimited( Sequence( Ref("QuotedLiteralSegment"), Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ) ) ), optional=True, ), ) class RangePartitionIntervalSegment(BaseSegment): """Range partition definition with FROM TO INTERVAL syntax.""" type = "range_partition_interval" match_grammar = Sequence( "FROM", Bracketed(Ref("QuotedLiteralSegment")), "TO", Bracketed(Ref("QuotedLiteralSegment")), "INTERVAL", Ref("NumericLiteralSegment"), OneOf("YEAR", "MONTH", "DAY", "HOUR", "MINUTE", "SECOND"), ) class ListPartitionDefinitionSegment(BaseSegment): """List partition definition with VALUES IN syntax.""" type = "list_partition_definition" match_grammar = Sequence( "PARTITION", Ref("IfNotExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), "VALUES", "IN", Bracketed( Delimited( OneOf( Bracketed(Delimited(Ref("LiteralGrammar"))), Ref("LiteralGrammar"), ) ) ), ) class DistributionSegment(BaseSegment): """A distribution segment supporting both hash and random distribution.""" type = "distribution_segment" match_grammar = Sequence( "DISTRIBUTED", "BY", OneOf( Sequence( "HASH", Bracketed(Delimited(Ref("ColumnReferenceSegment"))), Sequence( "BUCKETS", OneOf( Ref("NumericLiteralSegment"), "AUTO", ), optional=True, ), ), Sequence( "RANDOM", Sequence( "BUCKETS", OneOf( Ref("NumericLiteralSegment"), "AUTO", ), optional=True, ), ), ), ) class RollupSegment(BaseSegment): """Rollup definition for Doris tables.""" type = "rollup_segment" match_grammar = Sequence( "ROLLUP", Bracketed( Delimited( Sequence( Ref("ObjectReferenceSegment"), Bracketed(Delimited(Ref("ColumnReferenceSegment"))), Sequence( "DUPLICATE", "KEY", Bracketed(Delimited(Ref("ColumnReferenceSegment"))), optional=True, ), ) ) ), ) class IndexDefinitionSegment(BaseSegment): """Index definition specific to Doris.""" type = "index_definition" match_grammar = Sequence( "INDEX", Ref("IfNotExistsGrammar", optional=True), Ref("IndexReferenceSegment"), Bracketed(Delimited(Ref("ColumnReferenceSegment"))), Sequence("USING", OneOf("INVERTED", "BITMAP", "BLOOM_FILTER"), optional=True), Sequence( "PROPERTIES", Bracketed( Delimited( Sequence( Ref("QuotedLiteralSegment"), Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ) ) ), optional=True, ), Sequence("COMMENT", Ref("QuotedLiteralSegment"), optional=True), ) class DropTableStatementSegment(BaseSegment): """A `DROP TABLE` statement. Doris-specific version that supports: - IF EXISTS clause - Database-qualified table names - FORCE option """ type = "drop_table_statement" match_grammar = Sequence( "DROP", "TABLE", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), # Single table reference, not delimited Sequence("FORCE", optional=True), # Optional FORCE keyword ) class InsertStatementSegment(BaseSegment): """A `INSERT` statement. Doris-specific version that supports: - PARTITION clause - WITH LABEL clause - DEFAULT values """ type = "insert_statement" match_grammar = Sequence( "INSERT", Ref.keyword("INTO", optional=True), Ref("TableReferenceSegment"), # Optional PARTITION clause Sequence( "PARTITION", Bracketed(Delimited(Ref("SingleIdentifierGrammar"))), optional=True, ), # Optional WITH LABEL clause Sequence( "WITH", "LABEL", Ref("SingleIdentifierGrammar"), optional=True, ), # Optional column list Ref("BracketedColumnReferenceListGrammar", optional=True), # VALUES or SELECT OneOf( Ref("ValuesClauseSegment"), Ref("SelectableGrammar"), ), ) sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_doris_keywords.py000066400000000000000000000106271503426445100246630ustar00rootroot00000000000000"""A List of Apache Doris SQL keywords. See: https://github.com/apache/doris/blob/master/fe/fe-core/src/main/antlr4/org/apache/doris/nereids/DorisParser.g4 https://github.com/apache/doris/blob/master/fe/fe-core/src/main/antlr4/org/apache/doris/nereids/DorisLexer.g4 """ doris_reserved_keywords = """ ACCOUNT_LOCK ACCOUNT_UNLOCK ADD ADMIN ALL ALTER ANALYZE ANALYZER AND ANTI APPEND AS ASC AUTO BACKEND BETWEEN BIGINT BINARY BINLOG BY CANCEL CASE CAST CLEAN COLLATE COLUMN CONSTRAINT CREATE CROSS CUBE CURRENT DATABASE DATABASES DECOMMISSION DEFAULT DELETE DESC DESCRIBE DISK DISTINCT DISTRIBUTED DISTRIBUTION DIV DOUBLE DROP DROPP DUMP DUPLICATE ELSE ENTER EXCEPT EXECUTE EXISTS EXPLAIN EXPORT EXTENDED EXTRACT FALSE FLOAT FOLLOWER FOLLOWING FOR FORCE FOREIGN FROM FRONTEND FULL FUNCTIONS GRANT GRANTS GROUP HAVING HLL IF IN INDEX INFILE INNER INSERT INSTALL INT INTEGER INTERMEDIATE INTERSECT INTERVAL INTO IS JOIN KEY KEYS KILL LARGEINT LATERAL LAYOUT LEFT LIKE LIMIT LIST LOAD LOW_PRIORITY MATCH MAXVALUE MINUS NATURAL NOT NO_USE_MV NULL OBSERVER ON OR ORDER OUTER OUTFILE OVER OVERWRITE PARTITION PLACEHOLDER PLAY PRECEDING PREPARE PRIMARY PROCEDURE RANGE READ REAL REBALANCE REFERENCES REGEXP RELEASE RENAME REPAIR REPLICA REVOKE RIGHT ROLE ROLES ROW ROWS SCHEMAS SELECT SEMI SET SETS SHOW SIGNED SMALLINT SQL_BLOCK_RULE SUPERUSER SWITCH SYNC SYSTEM TABLE TABLESAMPLE TABLET TABLETS TERMINATED THEN TINYINT TO TOKENIZER TOKEN_FILTER TRASH TRIM TRUE TYPECAST UNBOUNDED UNINSTALL UNION UNIQUE UNSIGNED UPDATE USE USE_MV USING VALUES WHEN WHERE WHITELIST WITH WORKLOAD WRITE XOR """ doris_unreserved_keywords = """ ACTIONS AFTER AGGREGATE AGG_STATE ALIAS ALWAYS ANALYZED ARRAY AT AUTHORS AUTO_INCREMENT BACKENDS BACKUP BEGIN BELONG BIN BITAND BITMAP BITMAP_EMPTY BITMAP_UNION BITOR BITXOR BLOB BOOLEAN BRANCH BRIEF BROKER BUCKETS BUILD BUILTIN BULK CACHE CACHED CALL CATALOG CATALOGS CHAIN CHAR CHARSET CHECK CLUSTER CLUSTERS COLLATION COLLECT COLOCATE COLUMNS COMMENT COMMENT_START COMMIT COMMITTED COMPACT COMPLETE COMPRESS_TYPE COMPUTE CONDITIONS CONFIG CONNECTION CONNECTION_ID CONSISTENT CONSTRAINTS CONVERT CONVERT_LSC COPY COUNT CREATION CRON CURRENT_CATALOG CURRENT_DATE CURRENT_TIME CURRENT_TIMESTAMP CURRENT_USER DATA DATE DATETIME DATETIMEV1 DATETIMEV2 DATEV1 DATEV2 DAY DAYS DECIMAL DECIMALV2 DECIMALV3 DEFERRED DEMAND DIAGNOSE DIAGNOSIS DICTIONARIES DICTIONARY DISTINCTPC DISTINCTPCSA DO DORIS_INTERNAL_TABLE_ID DUAL DYNAMIC E ENABLE ENCRYPTKEY ENCRYPTKEYS END ENDS ENGINE ENGINES ERRORS EVENTS EVERY EXCLUDE EXPIRED EXTERNAL FAILED_LOGIN_ATTEMPTS FAST FEATURE FIELDS FILE FILTER FIRST FORMAT FREE FRONTENDS FUNCTION GENERATED GENERIC GLOBAL GRAPH GROUPING GROUPS HASH HASH_MAP HDFS HELP HINT_END HINT_START HISTOGRAM HLL_UNION HOSTNAME HOTSPOT HOUR HOURS HUB IDENTIFIED IGNORE IMMEDIATE INCREMENTAL INDEXES INVERTED IPV4 IPV6 IP_TRIE ISNULL ISOLATION IS_NOT_NULL_PRED IS_NULL_PRED JOB JOBS JSON JSONB LABEL LAST LDAP LDAP_ADMIN_PASSWORD LEFT_BRACE LESS LEVEL LINES LINK LOCAL LOCALTIME LOCALTIMESTAMP LOCATION LOCK LOGICAL MANUAL MAP MATCH_ALL MATCH_ANY MATCH_PHRASE MATCH_PHRASE_EDGE MATCH_PHRASE_PREFIX MATCH_REGEXP MATERIALIZED MAX MEMO MERGE MIGRATE MIGRATIONS MIN MINUTE MINUTES MODIFY MONTH MTMV NAME NAMES NEGATIVE NEVER NEXT NGRAM_BF NO NON_NULLABLE NULLS OF OFFSET ONLY OPEN OPTIMIZED PARAMETER PARSED PARTITIONS PASSWORD PASSWORD_EXPIRE PASSWORD_HISTORY PASSWORD_LOCK_TIME PASSWORD_REUSE PATH PAUSE PERCENT PERIOD PERMISSIVE PHYSICAL PI PLAN PLUGIN PLUGINS POLICY PRIVILEGES PROC PROCESS PROCESSLIST PROFILE PROPERTIES PROPERTY QUALIFY QUANTILE_STATE QUANTILE_UNION QUARTER QUERY QUEUED QUOTA RANDOM RECENT RECOVER RECYCLE REFRESH REPEATABLE REPLACE REPLACE_IF_NOT_NULL REPLAYER REPOSITORIES REPOSITORY RESOURCE RESOURCES RESTORE RESTRICTIVE RESUME RETAIN RETENTION RETURNS REWRITTEN RIGHT_BRACE RLIKE ROLLBACK ROLLUP ROUTINE S3 SAMPLE SCHEDULE SCHEDULER SCHEMA SECOND SERIALIZABLE SESSION SESSION_USER SET_SESSION_VARIABLE SHAPE SKEW SNAPSHOT SNAPSHOTS SONAME SPLIT SQL STAGE STAGES START STARTS STATS STATUS STOP STORAGE STREAM STREAMING STRING STRUCT SUM TABLES TAG TASK TASKS TEMPORARY TEXT THAN TIME TIMESTAMP TRANSACTION TREE TRIGGERS TRUNCATE TYPE TYPES UNCOMMITTED UNLOCK UNSET UP USER VALUE VARCHAR VARIABLE VARIABLES VARIANT VAULT VAULTS VERBOSE VERSION VIEW VIEWS WARM WARNINGS WEEK WORK YEAR """ # These are not Doris keywords, but MySQL keywords. # Because Doris dialect is inherit from MySQL doris_unreserved_keywords += """ BLOOM_FILTER STORED VIRTUAL """ sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_duckdb.py000066400000000000000000000654641503426445100230610ustar00rootroot00000000000000"""The DuckDB dialect. https://duckdb.org/docs/ """ from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( AnyNumberOf, BaseSegment, BinaryOperatorSegment, Bracketed, CodeSegment, ComparisonOperatorSegment, Dedent, Delimited, IdentifierSegment, ImplicitIndent, Indent, Matchable, Nothing, OneOf, OptionallyBracketed, Ref, RegexLexer, RegexParser, Sequence, StringLexer, StringParser, SymbolSegment, TypedParser, ) from sqlfluff.dialects import dialect_ansi as ansi from sqlfluff.dialects import dialect_postgres as postgres ansi_dialect = load_raw_dialect("ansi") postgres_dialect = load_raw_dialect("postgres") duckdb_dialect = postgres_dialect.copy_as( "duckdb", formatted_name="DuckDB", docstring="""**Default Casing**: DuckDB stores all identifiers in the case they were defined, however all identifier resolution is case-insensitive (when unquoted, and more unusually, *also when quoted*). See the `DuckDB Identifiers Documentation`_ for more details. **Quotes**: String Literals: ``''``, Identifiers: ``""`` or ``''`` The dialect for `DuckDB `_. .. _`DuckDB Identifiers Documentation`: https://duckdb.org/docs/sql/dialect/keywords_and_identifiers """, # noqa: E501 ) duckdb_dialect.sets("reserved_keywords").update( [ "PIVOT", "PIVOT_LONGER", "PIVOT_WIDER", "UNPIVOT", ] ) duckdb_dialect.sets("unreserved_keywords").update( [ "ANTI", "ASOF", "GLOB", "MACRO", "MAP", "POSITIONAL", "SEMI", "STRUCT", "VIRTUAL", ] ) duckdb_dialect.add( LambdaArrowSegment=StringParser("->", SymbolSegment, type="lambda_arrow"), OrIgnoreGrammar=Sequence("OR", "IGNORE"), EqualsSegment_a=StringParser("==", ComparisonOperatorSegment), UnpackingOperatorSegment=TypedParser("star", SymbolSegment, "unpacking_operator"), ) duckdb_dialect.replace( FunctionNameIdentifierSegment=RegexParser( r"[A-Z_][A-Z0-9_$]*", CodeSegment, type="function_name_identifier", anti_template=r"^(STRUCT|UNION|ENUM)$", ), DivideSegment=OneOf( StringParser("//", BinaryOperatorSegment), StringParser("/", BinaryOperatorSegment), ), CreateTableAsStatementSegment=Nothing(), UnionGrammar=ansi_dialect.get_grammar("UnionGrammar").copy( insert=[ Sequence("BY", "NAME", optional=True), ] ), JoinLikeClauseGrammar=Sequence( AnyNumberOf( Ref("FromPivotExpressionSegment"), Ref("FromUnpivotExpressionSegment"), min_times=1, ), Ref("AliasExpressionSegment", optional=True), ), NonSetSelectableGrammar=postgres_dialect.get_grammar( "NonSetSelectableGrammar" ).copy( insert=[ Ref("SimplifiedPivotExpressionSegment"), Ref("SimplifiedUnpivotExpressionSegment"), ], ), NonStandardJoinTypeKeywordsGrammar=OneOf( "ANTI", "SEMI", Sequence( "ASOF", OneOf( Ref("JoinTypeKeywordsGrammar"), "ANTI", "SEMI", optional=True, ), ), ), HorizontalJoinKeywordsGrammar=Ref.keyword("POSITIONAL"), FunctionContentsExpressionGrammar=OneOf( Ref("LambdaExpressionSegment"), Ref("NamedArgumentSegment"), Ref("ExpressionSegment"), ), ColumnsExpressionNameGrammar=Sequence( Ref("UnpackingOperatorSegment", optional=True), "COLUMNS" ), # Uses grammar for LT06 support ColumnsExpressionGrammar=Sequence( Ref("ColumnsExpressionFunctionNameSegment"), Ref("ColumnsExpressionFunctionContentsSegment"), ), # Matching postgres lower casefold, as it is case-insensitive QuotedIdentifierSegment=TypedParser( "double_quote", IdentifierSegment, type="quoted_identifier", casefold=str.lower ), SingleQuotedIdentifierSegment=TypedParser( "single_quote", IdentifierSegment, type="quoted_identifier", casefold=str.lower ), ListComprehensionGrammar=Ref("ListComprehensionExpressionSegment"), ComparisonOperatorGrammar=ansi_dialect.get_grammar( "ComparisonOperatorGrammar" ).copy( insert=[ Ref("EqualsSegment_a"), Ref("GlobOperatorSegment"), ] ), LikeGrammar=postgres_dialect.get_grammar("LikeGrammar").copy( insert=[Ref.keyword("GLOB")], ), ) duckdb_dialect.insert_lexer_matchers( [ StringLexer("double_divide", "//", CodeSegment), ], before="divide", ) duckdb_dialect.patch_lexer_matchers( [ # In DuckDB, a double single/double quote resolves as a single/double quote in # the string. RegexLexer( "single_quote", r"'([^']|'')*'", CodeSegment, segment_kwargs={ "quoted_value": (r"'((?:[^']|'')*)'", 1), "escape_replacements": [(r"''", "'")], }, ), RegexLexer( "double_quote", r'"([^"]|"")*"', CodeSegment, segment_kwargs={ "quoted_value": (r'"((?:[^"]|"")*)"', 1), "escape_replacements": [(r'""', '"')], }, ), RegexLexer("equals", r"==?", CodeSegment), ] ) class StructTypeSegment(ansi.StructTypeSegment): """Expression to construct a STRUCT datatype.""" match_grammar = Sequence( "STRUCT", Ref("StructTypeSchemaSegment", optional=True), ) class StructTypeSchemaSegment(BaseSegment): """Expression to construct the schema of a STRUCT datatype.""" type = "struct_type_schema" match_grammar = Bracketed( Delimited( # Comma-separated list of field names/types Sequence( OneOf( # ParameterNames can look like Datatypes so can't use # Optional=True here and instead do a OneOf in order # with DataType only first, followed by both. Ref("DatatypeSegment"), Sequence( Ref("ParameterNameSegment"), Ref("DatatypeSegment"), ), ), ), ), ) class MapTypeSegment(ansi.MapTypeSegment): """Expression to construct a MAP datatype.""" match_grammar = Sequence( "MAP", Ref("MapTypeSchemaSegment", optional=True), ) class MapTypeSchemaSegment(BaseSegment): """Expression to construct the schema of a MAP datatype.""" type = "map_type_schema" match_grammar = Bracketed( Delimited( Ref("DatatypeSegment"), ), ) class InsertStatementSegment(ansi.InsertStatementSegment): """An `INSERT` Statement. https://duckdb.org/docs/stable/sql/statements/insert.html """ type = "insert_statement" match_grammar: Matchable = Sequence( "INSERT", OneOf(Ref("OrReplaceGrammar"), Ref("OrIgnoreGrammar"), optional=True), "INTO", Ref("TableReferenceSegment"), Ref("AsAliasExpressionSegment", optional=True), OneOf( Ref("BracketedColumnReferenceListGrammar"), Sequence("BY", "POSITION"), Sequence("BY", "NAME"), optional=True, ), OneOf( Sequence("DEFAULT", "VALUES"), Ref("SelectStatementSegment"), Sequence( Ref("BracketedColumnReferenceListGrammar", optional=True), OneOf( Ref("ValuesClauseSegment"), OptionallyBracketed(Ref("SelectStatementSegment")), ), ), ), Sequence( "ON", "CONFLICT", Ref("ConflictTargetSegment", optional=True), Ref("ConflictActionSegment"), optional=True, ), Sequence( "RETURNING", OneOf( Ref("StarSegment"), Delimited( Sequence( Ref("ExpressionSegment"), Ref("AsAliasExpressionSegment", optional=True), ), ), ), optional=True, ), ) class ColumnConstraintSegment(ansi.ColumnConstraintSegment): """A column option; each CREATE TABLE column can have 0 or more. https://duckdb.org/docs/sql/statements/create_table https://duckdb.org/docs/sql/statements/alter_table """ # Column constraint from # https://duckdb.org/docs/sql/statements/create_table match_grammar = Sequence( OneOf( Sequence(Ref.keyword("NOT", optional=True), "NULL"), # NOT NULL or NULL Sequence( "CHECK", Bracketed(Ref("ExpressionSegment")), ), Sequence( # DEFAULT "DEFAULT", OneOf( Ref("LiteralGrammar"), Ref("ExpressionSegment"), ), ), "UNIQUE", Sequence( "PRIMARY", "KEY", ), Ref("ReferenceDefinitionGrammar"), Sequence( "COLLATE", Ref("CollationReferenceSegment"), ), ), ) class CreateTableStatementSegment(ansi.CreateTableStatementSegment): """A `CREATE TABLE` statement. As specified in https://duckdb.org/docs/sql/statements/create_table.html """ match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Ref("TemporaryGrammar", optional=True), "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), OneOf( Sequence( "AS", OptionallyBracketed(Ref("SelectableGrammar")), ), # Columns and comment syntax: Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), OneOf( Sequence( Ref("DatatypeSegment"), AnyNumberOf( OneOf( Ref("ColumnConstraintSegment"), ), ), ), Sequence( Ref( "DatatypeSegment", optional=True, exclude=Ref.keyword("AS"), ), Sequence("GENERATED", "ALWAYS", optional=True), "AS", Bracketed(Ref("ExpressionSegment")), OneOf("STORED", "VIRTUAL", optional=True), ), ), ), Ref("TableConstraintSegment"), ) ), ), ) class WildcardExcludeExpressionSegment(BaseSegment): """An `EXCLUDE` clause within a wildcard expression.""" type = "wildcard_exclude" match_grammar = Sequence( "EXCLUDE", OneOf( Ref("ColumnReferenceSegment"), Bracketed(Delimited(Ref("ColumnReferenceSegment"))), ), ) class WildcardReplaceExpressionSegment(BaseSegment): """A `REPLACE` clause within a wildcard expression.""" type = "wildcard_replace" match_grammar = Sequence( "REPLACE", OneOf( Bracketed( Delimited( Sequence( Ref("BaseExpressionElementGrammar"), Ref("AliasExpressionSegment", optional=True), ), ) ), Sequence( Ref("BaseExpressionElementGrammar"), Ref("AliasExpressionSegment", optional=True), ), ), ) class WildcardRenameExpressionSegment(BaseSegment): """A `RENAME` clause within a wildcard expression.""" type = "wildcard_rename" match_grammar = Sequence( "RENAME", OneOf( Bracketed( Delimited( Sequence( Ref("BaseExpressionElementGrammar"), Ref("AliasExpressionSegment", optional=True), ), ) ), Sequence( Ref("BaseExpressionElementGrammar"), Ref("AliasExpressionSegment", optional=True), ), ), ) class WildcardPatternMatchingSegment(BaseSegment): """A pattern matching operator clause within a wildcard expression.""" type = "wildcard_pattern_matching" match_grammar = OneOf( Ref("LikeExpressionGrammar"), Sequence( OneOf( Ref("LikeOperatorSegment"), Ref("GlobOperatorSegment"), ), Ref("QuotedLiteralSegment"), ), ) class WildcardExpressionSegment(ansi.WildcardExpressionSegment): """An extension of the star expression for DuckDB.""" match_grammar = Sequence( # *, blah.*, blah.blah.*, etc. Ref("WildcardIdentifierSegment"), # Optional EXCLUDE or REPLACE clause Ref("WildcardExcludeExpressionSegment", optional=True), OneOf( Sequence( Ref("WildcardReplaceExpressionSegment", optional=True), Ref("WildcardRenameExpressionSegment", optional=True), ), Ref("WildcardPatternMatchingSegment"), optional=True, ), ) class SelectClauseElementSegment(ansi.SelectClauseElementSegment): """An element in the targets of a select statement.""" type = "select_clause_element" match_grammar = OneOf( Sequence( Ref("WildcardExpressionSegment"), ), Sequence( Ref( "BaseExpressionElementGrammar", ), Ref("AliasExpressionSegment", optional=True), ), ) class ColumnsExpressionFunctionContentsSegment( ansi.ColumnsExpressionFunctionContentsSegment ): """Columns expression in a select statement. https://duckdb.org/docs/sql/expressions/star#columns-expression """ type = "function_contents" match_grammar = Sequence( Bracketed( OneOf( Ref("WildcardExpressionSegment"), Ref("LambdaExpressionSegment"), Ref("BaseExpressionElementGrammar"), ), ), ) class LambdaExpressionSegment(BaseSegment): """Lambda function used in a function or columns expression. https://duckdb.org/docs/sql/functions/lambda https://duckdb.org/docs/sql/expressions/star#columns-lambda-function """ type = "lambda_function" match_grammar = Sequence( OneOf( Ref("ParameterNameSegment"), Bracketed(Delimited(Ref("ParameterNameSegment"))), ), Ref("LambdaArrowSegment"), Ref("ExpressionSegment"), ) class ListComprehensionExpressionSegment(BaseSegment): """A list comprehension expression in duckdb. https://duckdb.org/docs/sql/functions/list#list-comprehension """ type = "list_comprehension" match_grammar = Bracketed( Ref("ExpressionSegment"), "FOR", Ref("ParameterNameSegment"), "IN", Ref("ExpressionSegment"), Sequence("IF", Ref("ExpressionSegment"), optional=True), bracket_type="square", ) class SelectStatementSegment(ansi.SelectStatementSegment): """A duckdb `SELECT` statement including optional Qualify. https://duckdb.org/docs/sql/query_syntax/qualify """ type = "select_statement" match_grammar = ansi.SelectStatementSegment.match_grammar.copy( insert=[Ref("QualifyClauseSegment", optional=True)], before=Ref("OrderByClauseSegment", optional=True), replace_terminators=True, terminators=[ Ref("SetOperatorSegment"), Ref("WithNoSchemaBindingClauseSegment"), Ref("WithDataClauseSegment"), Sequence("ON", "CONFLICT"), "RETURNING", Ref("WithCheckOptionSegment"), Ref("MetaCommandQueryBufferSegment"), ], ) class UnorderedSelectStatementSegment(ansi.UnorderedSelectStatementSegment): """A `SELECT` statement without any ORDER clauses or later. This is designed for use in the context of set operations, for other use cases, we should use the main SelectStatementSegment. """ type = "select_statement" match_grammar: Matchable = Sequence( OneOf( Sequence( Ref("SelectClauseSegment"), Ref("FromClauseSegment", optional=True), ), Sequence( # From-First Syntax: # https://duckdb.org/docs/sql/query_syntax/from Ref("FromClauseSegment"), Ref("SelectClauseSegment", optional=True), ), ), Ref("WhereClauseSegment", optional=True), Ref("GroupByClauseSegment", optional=True), Ref("HavingClauseSegment", optional=True), Ref("NamedWindowSegment", optional=True), Ref("QualifyClauseSegment", optional=True), terminators=[ Ref("SetOperatorSegment"), Ref("OrderByClauseSegment"), Ref("LimitClauseSegment"), Sequence("ON", "CONFLICT"), "RETURNING", ], ) class TableReferenceSegment(ansi.TableReferenceSegment): """A reference to an table, CTE, subquery or alias. Overload for DuckDB as only tables can be single quoted identifiers when used by the httpfs extension. """ match_grammar = ansi.TableReferenceSegment.match_grammar.copy( insert=[Ref("SingleQuotedIdentifierSegment")], ) class AliasExpressionSegment(ansi.AliasExpressionSegment): """A reference to an object with an `AS` clause. The optional AS keyword allows both implicit and explicit aliasing. Inherit from ANSI instead of Postgres. """ class OrderByClauseSegment(ansi.OrderByClauseSegment): """A `ORDER BY` clause like in `SELECT`.""" match_grammar: Matchable = Sequence( "ORDER", "BY", Indent, Delimited( Sequence( OneOf( "ALL", Ref("ColumnReferenceSegment"), Ref("NumericLiteralSegment"), Ref("ExpressionSegment"), ), OneOf("ASC", "DESC", optional=True), Sequence("NULLS", OneOf("FIRST", "LAST"), optional=True), ), allow_trailing=True, terminators=[Ref("OrderByClauseTerminators")], ), Dedent, ) class GroupByClauseSegment(ansi.GroupByClauseSegment): """A `GROUP BY` clause like in `SELECT`.""" match_grammar: Matchable = Sequence( "GROUP", "BY", Indent, Delimited( OneOf( "ALL", Ref("ColumnReferenceSegment"), Ref("NumericLiteralSegment"), Ref("ExpressionSegment"), ), allow_trailing=True, terminators=[Ref("GroupByClauseTerminatorGrammar")], ), Dedent, ) class QualifyClauseSegment(BaseSegment): """A `QUALIFY` clause like in `SELECT`. https://duckdb.org/docs/sql/query_syntax/qualify.html """ type = "qualify_clause" match_grammar = Sequence( "QUALIFY", ImplicitIndent, OptionallyBracketed(Ref("ExpressionSegment")), Dedent, ) class ObjectLiteralElementSegment(ansi.ObjectLiteralElementSegment): """An object literal element segment.""" match_grammar: Matchable = Sequence( OneOf( Ref("NakedIdentifierSegment"), Ref("QuotedLiteralSegment"), ), Ref("ColonSegment"), Ref("BaseExpressionElementGrammar"), ) class StatementSegment(postgres.StatementSegment): """An element in the targets of a select statement.""" match_grammar = postgres.StatementSegment.match_grammar.copy( insert=[ Ref("SimplifiedPivotExpressionSegment"), Ref("SimplifiedUnpivotExpressionSegment"), ] ) class FromPivotExpressionSegment(BaseSegment): """A PIVOT expression.""" type = "from_pivot_expression" match_grammar = Sequence( "PIVOT", Bracketed( Delimited( Sequence( Ref("FunctionSegment"), Ref("AliasExpressionSegment", optional=True), ) ), "FOR", AnyNumberOf( Sequence( Ref("SingleIdentifierGrammar"), "IN", Bracketed(Delimited(Ref("LiteralGrammar"))), ), ), Ref("GroupByClauseSegment", optional=True), Ref("OrderByClauseSegment", optional=True), Ref("LimitClauseSegment", optional=True), ), reset_terminators=True, ) class SimplifiedPivotExpressionSegment(BaseSegment): """The DuckDB simplified PIVOT syntax. https://duckdb.org/docs/sql/statements/pivot#simplified-pivot-full-syntax-diagram """ type = "simplified_pivot" match_grammar = Sequence( OneOf("PIVOT", "PIVOT_WIDER"), Ref("TableExpressionSegment"), Sequence( "ON", Delimited( OneOf( Ref("ColumnReferenceSegment"), Ref("ExpressionSegment"), ), Sequence( "IN", Bracketed(Delimited(Ref("LiteralGrammar"))), optional=True, ), ), optional=True, ), Sequence( "USING", Delimited( Sequence( Ref("FunctionSegment"), Ref("AliasExpressionSegment", optional=True), ), ), optional=True, ), Ref("GroupByClauseSegment", optional=True), Ref("OrderByClauseSegment", optional=True), Ref("LimitClauseSegment", optional=True), ) class FromUnpivotExpressionSegment(BaseSegment): """An UNPIVOT expression.""" type = "from_unpivot_expression" match_grammar = Sequence( "UNPIVOT", Sequence("INCLUDE", "NULLS", optional=True), Bracketed( OneOf( Ref("SingleIdentifierGrammar"), Bracketed(Delimited(Ref("SingleIdentifierGrammar"))), ), "FOR", AnyNumberOf( Sequence( Ref("SingleIdentifierGrammar"), "IN", Bracketed( Delimited( Sequence( OptionallyBracketed( Delimited(Ref("SingleIdentifierGrammar")) ), Ref("AliasExpressionSegment", optional=True), ), Ref("ColumnsExpressionGrammar"), ), ), ), min_times=1, ), ), ) class SimplifiedUnpivotExpressionSegment(BaseSegment): """The DuckDB simplified UNPIVOT syntax. https://duckdb.org/docs/sql/statements/unpivot#simplified-unpivot-full-syntax-diagram """ type = "simplified_unpivot" match_grammar = Sequence( OneOf("UNPIVOT", "PIVOT_LONGER"), Ref("TableExpressionSegment"), "ON", Delimited( Sequence( OneOf( Bracketed( Delimited( Ref("ColumnReferenceSegment"), ), ), Ref("ColumnReferenceSegment"), ), Ref("AliasExpressionSegment", optional=True), ), Ref("ColumnsExpressionGrammar"), ), "INTO", "NAME", Ref("SingleIdentifierGrammar"), "VALUE", Delimited( Ref("SingleIdentifierGrammar"), ), Ref("OrderByClauseSegment", optional=True), Ref("LimitClauseSegment", optional=True), ) class CreateViewStatementSegment(postgres.CreateViewStatementSegment): """An `Create VIEW` statement. https://duckdb.org/docs/sql/statements/create_view.html """ type = "create_view_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Ref("TemporaryGrammar", optional=True), "VIEW", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), Ref("BracketedColumnReferenceListGrammar", optional=True), "AS", OneOf( OptionallyBracketed(Ref("SelectableGrammar")), Ref("ValuesClauseSegment"), ), ) class CreateFunctionStatementSegment(postgres.CreateFunctionStatementSegment): """A `CREATE MACRO` or `CREATE FUNCTION` statement. https://duckdb.org/docs/sql/statements/create_macro """ match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Ref("TemporaryGrammar", optional=True), OneOf("MACRO", "FUNCTION"), Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar"), "AS", OneOf( Sequence("TABLE", Indent, Ref("SelectableGrammar"), Dedent), Ref("ExpressionSegment"), ), ) class DropFunctionStatementSegment(postgres.DropFunctionStatementSegment): """A `DROP MACRO` or `DROP FUNCTION` statement. https://duckdb.org/docs/sql/statements/drop.html """ match_grammar = Sequence( "DROP", OneOf("MACRO", "FUNCTION"), Ref.keyword("TABLE", optional=True), Ref("IfExistsGrammar", optional=True), Ref("FunctionNameSegment"), Ref("DropBehaviorGrammar", optional=True), ) class CreateTypeStatementSegment(postgres.CreateTypeStatementSegment): """A `CREATE TYPE` statement. https://duckdb.org/docs/sql/statements/create_type.html """ match_grammar = Sequence( "CREATE", "TYPE", Ref("DatatypeSegment"), "AS", OneOf( Ref("DatatypeSegment"), Sequence("ENUM", Bracketed(Delimited(Ref("QuotedLiteralSegment")))), Ref("StructTypeSegment"), Sequence("UNION", Ref("StructTypeSchemaSegment")), ), ) sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_exasol.py000066400000000000000000002661241503426445100231140ustar00rootroot00000000000000"""The EXASOL dialect. https://docs.exasol.com https://docs.exasol.com/sql_references/sqlstandardcompliance.htm """ from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( AnyNumberOf, Anything, BaseFileSegment, BaseSegment, Bracketed, CodeSegment, CommentSegment, Dedent, Delimited, ImplicitIndent, Indent, LiteralKeywordSegment, LiteralSegment, MultiStringParser, NewlineSegment, Nothing, OneOf, OptionallyBracketed, ParseMode, Ref, RegexLexer, RegexParser, SegmentGenerator, Sequence, StringLexer, StringParser, SymbolSegment, TypedParser, ) from sqlfluff.dialects import dialect_ansi as ansi from sqlfluff.dialects.dialect_exasol_keywords import ( BARE_FUNCTIONS, RESERVED_KEYWORDS, SESSION_PARAMETERS, SYSTEM_PARAMETERS, UNRESERVED_KEYWORDS, ) ansi_dialect = load_raw_dialect("ansi") exasol_dialect = ansi_dialect.copy_as( "exasol", formatted_name="Exasol", docstring="The dialect for `Exasol `_.", ) # Clear ANSI Keywords and add all EXASOL keywords exasol_dialect.sets("unreserved_keywords").clear() exasol_dialect.sets("unreserved_keywords").update(UNRESERVED_KEYWORDS) exasol_dialect.sets("reserved_keywords").clear() exasol_dialect.sets("reserved_keywords").update(RESERVED_KEYWORDS) exasol_dialect.sets("bare_functions").clear() exasol_dialect.sets("bare_functions").update(BARE_FUNCTIONS) exasol_dialect.sets("session_parameters").clear() exasol_dialect.sets("session_parameters").update(SESSION_PARAMETERS) exasol_dialect.sets("system_parameters").clear() exasol_dialect.sets("system_parameters").update(SYSTEM_PARAMETERS) exasol_dialect.sets("date_part_function_name").clear() exasol_dialect.sets("date_part_function_name").update( [ "ADD_DAYS", "ADD_HOURS", "ADD_MINUTES", "ADD_MONTHS", "ADD_SECONDS", "ADD_WEEKS", "ADD_YEARS", ] ) exasol_dialect.insert_lexer_matchers( [ RegexLexer("lua_nested_quotes", r"\[={1,3}\[.*\]={1,3}\]", CodeSegment), RegexLexer("lua_multiline_quotes", r"\[{2}([^[\\]|\\.)*\]{2}", CodeSegment), # This matches escaped identifier e.g. [day]. There can be reserved keywords # within the square brackets. RegexLexer( "escaped_identifier", r"\[\w+\]", CodeSegment, segment_kwargs={ "quoted_value": (r"\[(\w+)\]", 1), }, ), RegexLexer( "udf_param_dot_syntax", r"\.{3}", CodeSegment, ), RegexLexer( "range_operator", r"\.{2}", SymbolSegment, ), StringLexer("hash", "#", CodeSegment), StringLexer("walrus_operator", ":=", CodeSegment), RegexLexer( "function_script_terminator", r"\n/\n|\n/$", SymbolSegment, subdivider=RegexLexer( "newline", r"(\n|\r\n)+", NewlineSegment, ), ), RegexLexer("at_sign_literal", r"@[a-zA-Z_][\w]*", CodeSegment), RegexLexer("dollar_literal", r"[$][a-zA-Z0-9_.]*", CodeSegment), ], before="like_operator", ) exasol_dialect.patch_lexer_matchers( [ # In EXASOL, a double single/double quote resolves as a single/double quote in # the string. It's also used for escaping single quotes inside of STATEMENT # strings like in the IMPORT function # https://docs.exasol.com/sql_references/basiclanguageelements.htm#Delimited_Identifiers # https://docs.exasol.com/sql_references/literals.htm RegexLexer( "single_quote", r"'([^']|'')*'", CodeSegment, segment_kwargs={ "quoted_value": (r"'((?:[^']|'')*)'", 1), "escape_replacements": [(r"''", "'")], }, ), RegexLexer( "double_quote", r'"([^"]|"")*"', CodeSegment, segment_kwargs={ "quoted_value": (r'"((?:[^"]|"")*)"', 1), "escape_replacements": [(r'""', '"')], }, ), RegexLexer( "inline_comment", r"--[^\n]*", CommentSegment, segment_kwargs={"trim_start": ("--")}, ), ] ) exasol_dialect.add( PasswordLiteralSegment=TypedParser( "double_quote", CodeSegment, type="password_literal" ), UDFParameterDotSyntaxSegment=TypedParser( "udf_param_dot_syntax", SymbolSegment, type="identifier" ), RangeOperator=TypedParser("range_operator", SymbolSegment, type="range_operator"), UnknownSegment=StringParser( "unknown", LiteralKeywordSegment, type="boolean_literal" ), ForeignKeyReferencesClauseGrammar=Sequence( "REFERENCES", Ref("TableReferenceSegment"), Ref("BracketedColumnReferenceListGrammar", optional=True), ), ColumnReferenceListGrammar=Delimited( Ref("ColumnReferenceSegment"), ), TableDistributeByGrammar=Sequence( "DISTRIBUTE", "BY", Delimited( Ref("ColumnReferenceSegment"), terminators=[ Ref("TablePartitionByGrammar"), Ref("DelimiterGrammar"), ], ), ), TablePartitionByGrammar=Sequence( "PARTITION", "BY", Delimited( Ref("ColumnReferenceSegment"), terminators=[ Ref("TableDistributeByGrammar"), Ref("DelimiterGrammar"), ], ), ), TableConstraintEnableDisableGrammar=OneOf("ENABLE", "DISABLE"), EscapedIdentifierSegment=TypedParser( "escaped_identifier", SymbolSegment, type="identifier" ), SessionParameterSegment=SegmentGenerator( lambda dialect: MultiStringParser( dialect.sets("session_parameters"), CodeSegment, type="session_parameter", ) ), SystemParameterSegment=SegmentGenerator( lambda dialect: MultiStringParser( dialect.sets("system_parameters"), CodeSegment, type="system_parameter", ) ), UDFParameterGrammar=OneOf( # (A NUMBER, B VARCHAR) or (...) Delimited(Ref("ColumnDatatypeSegment")), Ref("UDFParameterDotSyntaxSegment"), ), FunctionScriptTerminatorSegment=TypedParser( "function_script_terminator", SymbolSegment, type="function_script_terminator", ), WalrusOperatorSegment=StringParser(":=", SymbolSegment, type="assignment_operator"), VariableNameSegment=RegexParser( r"[A-Z][A-Z0-9_]*", CodeSegment, type="variable", ), ) exasol_dialect.replace( SingleIdentifierGrammar=OneOf( Ref("NakedIdentifierSegment"), Ref("QuotedIdentifierSegment"), Ref("EscapedIdentifierSegment"), ), ParameterNameSegment=RegexParser( r"\"?[A-Z][A-Z0-9_]*\"?", CodeSegment, type="parameter", ), LikeGrammar=OneOf("LIKE", "REGEXP_LIKE"), NanLiteralSegment=Nothing(), SelectClauseTerminatorGrammar=OneOf( "FROM", "WHERE", Sequence("ORDER", "BY"), "LIMIT", Ref("SetOperatorSegment"), Ref("WithDataClauseSegment"), Ref("CommentClauseSegment"), ), FromClauseTerminatorGrammar=OneOf( "WHERE", "CONNECT", "START", "PREFERRING", "LIMIT", Sequence("GROUP", "BY"), Sequence("ORDER", "BY"), "HAVING", "QUALIFY", Ref("SetOperatorSegment"), Ref("WithDataClauseSegment"), Ref("CommentClauseSegment"), ), WhereClauseTerminatorGrammar=OneOf( "CONNECT", "START", "PREFERRING", "LIMIT", Sequence("GROUP", "BY"), Sequence("ORDER", "BY"), "HAVING", "QUALIFY", Ref("SetOperatorSegment"), Ref("WithDataClauseSegment"), Ref("CommentClauseSegment"), ), DateTimeLiteralGrammar=Sequence( OneOf("DATE", "TIMESTAMP"), TypedParser("single_quote", LiteralSegment, type="date_constructor_literal"), ), CharCharacterSetGrammar=OneOf( "UTF8", "ASCII", ), PreTableFunctionKeywordsGrammar=Ref.keyword("TABLE"), BooleanLiteralGrammar=OneOf( Ref("TrueSegment"), Ref("FalseSegment"), Ref("UnknownSegment") ), PostFunctionGrammar=OneOf( Ref("EmitsSegment"), # e.g. JSON_EXTRACT() Sequence( Sequence(OneOf("IGNORE", "RESPECT"), "NULLS", optional=True), Ref("OverClauseSegment"), ), ), ) ############################ # SELECT ############################ class UnorderedSelectStatementSegment(BaseSegment): """A `SELECT` statement without any ORDER clauses or later. This is designed for use in the context of set operations, for other use cases, we should use the main SelectStatementSegment. """ type = "select_statement" match_grammar = Sequence( Ref("SelectClauseSegment"), Ref("FromClauseSegment", optional=True), Ref("ReferencingClauseSegment", optional=True), Ref("WhereClauseSegment", optional=True), Ref("ConnectByClauseSegment", optional=True), Ref("PreferringClauseSegment", optional=True), Ref("GroupByClauseSegment", optional=True), Ref("HavingClauseSegment", optional=True), Ref("QualifyClauseSegment", optional=True), terminators=[ Ref("SetOperatorSegment"), Ref("WithDataClauseSegment"), Ref("CommentClauseSegment"), # within CREATE TABLE / VIEW statements Ref("OrderByClauseSegment"), Ref("LimitClauseSegment"), ], parse_mode=ParseMode.GREEDY_ONCE_STARTED, ) class SelectStatementSegment(BaseSegment): """A `SELECT` statement. https://docs.exasol.com/sql/select.htm """ type = "select_statement" # Inherit most of the match grammar from the original. match_grammar = UnorderedSelectStatementSegment.match_grammar.copy( insert=[ Ref("OrderByClauseSegment", optional=True), Ref("LimitClauseSegment", optional=True), ], terminators=[ Ref("SetOperatorSegment"), Ref("WithDataClauseSegment"), Ref("CommentClauseSegment"), # within CREATE TABLE / VIEW statements ], # Replace terminators because we're removing some. replace_terminators=True, ) class SelectClauseSegment(BaseSegment): """A group of elements in a select target statement.""" type = "select_clause" match_grammar = Sequence( "SELECT", Ref("SelectClauseModifierSegment", optional=True), Indent, Delimited( Ref( "SelectClauseElementSegment", exclude=OneOf( Sequence( Ref.keyword("WITH", optional=True), "INVALID", OneOf("FOREIGN", "PRIMARY"), ), Sequence("INTO", "TABLE"), ), ), allow_trailing=True, optional=True, # optional in favour of SELECT INVALID.... ), Ref("WithInvalidForeignKeySegment", optional=True), Ref("WithInvalidUniquePKSegment", optional=True), Ref("IntoTableSegment", optional=True), Dedent, terminators=[Ref("SelectClauseTerminatorGrammar")], parse_mode=ParseMode.GREEDY_ONCE_STARTED, ) class WithInvalidUniquePKSegment(BaseSegment): """`WITH INVALID UNIQUE` or `WITH INVALID PRIMARY KEY` clause within `SELECT`.""" type = "with_invalid_unique_pk_clause" match_grammar = Sequence( Ref.keyword("WITH", optional=True), "INVALID", OneOf("UNIQUE", Ref("PrimaryKeyGrammar")), Ref("BracketedColumnReferenceListGrammar"), ) class WithInvalidForeignKeySegment(BaseSegment): """`WITH INVALID FOREIGN KEY` clause within `SELECT`.""" type = "with_invalid_foreign_key_clause" match_grammar = Sequence( Ref.keyword("WITH", optional=True), "INVALID", Ref("ForeignKeyGrammar"), Ref("BracketedColumnReferenceListGrammar"), ) class ReferencingClauseSegment(BaseSegment): """Part of `WITH INVALID FOREIGN KEY` clause within `SELECT`.""" type = "referencing_clause" match_grammar = Sequence( "REFERENCING", Ref("TableReferenceSegment"), Ref("BracketedColumnReferenceListGrammar", optional=True), ) class IntoTableSegment(BaseSegment): """`INTO TABLE` clause within `SELECT`.""" type = "into_table_clause" match_grammar = Sequence("INTO", "TABLE", Ref("TableReferenceSegment")) class TableExpressionSegment(BaseSegment): """The main table expression e.g. within a FROM clause.""" type = "table_expression" match_grammar = OneOf( Ref("BareFunctionSegment"), Ref("FunctionSegment"), Ref("TableReferenceSegment"), Bracketed(Ref("SelectableGrammar")), Ref("ValuesRangeClauseSegment"), Ref("ValuesClauseSegment"), Ref("ImportStatementSegment"), # subimport Ref("ExplainVirtualSegment"), ) class ValuesClauseSegment(BaseSegment): """A `VALUES` clause within in `WITH` or `SELECT`.""" type = "values_clause" match_grammar = Sequence( "VALUES", Delimited( OneOf( Bracketed( Delimited( "DEFAULT", Ref("LiteralGrammar"), Ref("ExpressionSegment"), ), parse_mode=ParseMode.GREEDY, ), Delimited( "DEFAULT", Ref("ExpressionSegment"), ), ), ), Ref("AliasExpressionSegment", optional=True), ) class ValuesRangeClauseSegment(BaseSegment): """A `VALUES BETWEEN` clause within a `SELECT` statement. Supported since Exasol 7.1! """ type = "values_range_clause" match_grammar = Sequence( "VALUES", "BETWEEN", OneOf( Ref("NumericLiteralSegment"), Ref("BareFunctionSegment"), Ref("FunctionSegment"), ), "AND", OneOf( Ref("NumericLiteralSegment"), Ref("BareFunctionSegment"), Ref("FunctionSegment"), ), Sequence("WITH", "STEP", Ref("NumericLiteralSegment"), optional=True), ) class SetOperatorSegment(BaseSegment): """A set operator such as Union, Minus, Except or Intersect.""" type = "set_operator" match_grammar = OneOf( Sequence("UNION", Ref.keyword("ALL", optional=True)), "INTERSECT", OneOf("MINUS", "EXCEPT"), ) class ConnectByClauseSegment(BaseSegment): """`CONNECT BY` clause within a select statement.""" type = "connect_by_clause" match_grammar = OneOf( Sequence( "CONNECT", "BY", Ref.keyword("NOCYCLE", optional=True), Delimited( Ref("ExpressionSegment"), delimiter="AND", terminators=["START"], ), Sequence("START", "WITH", Ref("ExpressionSegment"), optional=True), ), Sequence( "START", "WITH", Ref("ExpressionSegment"), "CONNECT", "BY", Ref.keyword("NOCYCLE", optional=True), Delimited(Ref("ExpressionSegment"), delimiter="AND"), ), ) class GroupByClauseSegment(BaseSegment): """A `GROUP BY` clause like in `SELECT`.""" type = "groupby_clause" match_grammar = Sequence( "GROUP", "BY", Indent, Delimited( OneOf( Ref("ColumnReferenceSegment"), # Can `GROUP BY 1` Ref("NumericLiteralSegment"), # Can `GROUP BY coalesce(col, 1)` Ref("CubeRollupClauseSegment"), Ref("GroupingSetsClauseSegment"), Ref("ExpressionSegment"), Bracketed(), # Allows empty parentheses ), terminators=[ Sequence("ORDER", "BY"), "LIMIT", "HAVING", "QUALIFY", Ref("SetOperatorSegment"), ], ), Dedent, ) class QualifyClauseSegment(BaseSegment): """`QUALIFY` clause within `SELECT`.""" type = "qualify_clause" match_grammar = Sequence( "QUALIFY", ImplicitIndent, Ref("ExpressionSegment"), Dedent, ) class LimitClauseSegment(BaseSegment): """A `LIMIT` clause like in `SELECT`.""" type = "limit_clause" match_grammar = Sequence( "LIMIT", OneOf( Sequence( # offset, count Ref("NumericLiteralSegment"), Ref("CommaSegment"), Ref("NumericLiteralSegment"), ), Sequence( # count [OFFSET offset] Ref("NumericLiteralSegment"), Sequence("OFFSET", Ref("NumericLiteralSegment"), optional=True), ), ), ) class LocalAliasSegment(BaseSegment): """The `LOCAL.ALIAS` syntax allows to use a alias name of a column within clauses. E.g. `SELECT ABS(x) AS x FROM t WHERE local.x>10` This is supported by: `SELECT`, `WHERE`, `GROUP BY`, `ORDER BY`, `HAVING`, `QUALIFY` Note: it's not necessary to use `LOCAL` within `ÒRDER BY` and `QUALIFY` because the alias could be accessed directly (...but we can). """ type = "local_alias_segment" match_grammar = Sequence("LOCAL", Ref("DotSegment"), Ref("SingleIdentifierGrammar")) ############################ # SCHEMA ############################ class CreateSchemaStatementSegment(BaseSegment): """A `CREATE SCHEMA` statement. https://docs.exasol.com/sql/create_schema.htm """ type = "create_schema_statement" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "CREATE", "SCHEMA", Ref("IfNotExistsGrammar", optional=True), Ref("SchemaReferenceSegment"), ) class CreateVirtualSchemaStatementSegment(BaseSegment): """A `CREATE VIRTUAL SCHEMA` statement. https://docs.exasol.com/sql/create_schema.htm """ type = "create_virtual_schema_statement" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "CREATE", "VIRTUAL", "SCHEMA", Ref("IfNotExistsGrammar", optional=True), Ref("SchemaReferenceSegment"), "USING", Ref("ObjectReferenceSegment"), Ref.keyword("WITH", optional=True), AnyNumberOf( Sequence( Ref("ParameterNameSegment"), Ref("EqualsSegment"), Ref("LiteralGrammar"), ) ), ) class AlterSchemaStatementSegment(BaseSegment): """A `ALTER VIRTUAL SCHEMA` statement. https://docs.exasol.com/sql/alter_schema.htm """ type = "alter_schema_statement" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "ALTER", "SCHEMA", Ref("SchemaReferenceSegment"), OneOf( Sequence( "SET", "RAW_SIZE_LIMIT", Ref("EqualsSegment"), AnyNumberOf(Ref("NumericLiteralSegment"), Ref("StarSegment")), ), Sequence("CHANGE", "OWNER", Ref("SingleIdentifierGrammar")), ), ) class AlterVirtualSchemaStatementSegment(BaseSegment): """A `ALTER VIRTUAL SCHEMA` statement. https://docs.exasol.com/sql/alter_schema.htm """ type = "alter_virtual_schema_statement" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "ALTER", "VIRTUAL", "SCHEMA", Ref("SchemaReferenceSegment"), OneOf( Sequence( "SET", AnyNumberOf( Sequence( Ref("ColumnReferenceSegment"), Ref("EqualsSegment"), Ref("LiteralGrammar"), ) ), ), Sequence( "REFRESH", Sequence( "TABLES", Delimited(Ref("TableReferenceSegment")), optional=True, ), ), Sequence("CHANGE", "OWNER", Ref("SingleIdentifierGrammar")), ), ) class DropSchemaStatementSegment(BaseSegment): """A `DROP SCHEMA` statement for EXASOL schema. https://docs.exasol.com/sql/drop_schema.htm """ type = "drop_schema_statement" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "DROP", Ref.keyword("FORCE", optional=True), Ref.keyword("VIRTUAL", optional=True), "SCHEMA", Ref("IfExistsGrammar", optional=True), Ref("SchemaReferenceSegment"), Ref("DropBehaviorGrammar", optional=True), ) ############################ # VIEW ############################ class ViewReferenceSegment(ansi.ObjectReferenceSegment): """A reference to an schema.""" type = "view_reference" class CreateViewStatementSegment(BaseSegment): """A `CREATE VIEW` statement. https://docs.exasol.com/sql/create_view.htm """ type = "create_view_statement" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Ref.keyword("FORCE", optional=True), "VIEW", Ref("ViewReferenceSegment"), Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), Ref("CommentClauseSegment", optional=True), ), ), optional=True, ), "AS", OptionallyBracketed(Ref("SelectableGrammar")), Ref("CommentClauseSegment", optional=True), ) class DropViewStatementSegment(BaseSegment): """A `DROP VIEW` statement with CASCADE and RESTRICT option. https://docs.exasol.com/sql/drop_view.htm """ type = "drop_view_statement" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "DROP", "VIEW", Ref("IfExistsGrammar", optional=True), Ref("ViewReferenceSegment"), Ref("DropBehaviorGrammar", optional=True), ) ############################ # TABLE ############################ class CreateTableStatementSegment(BaseSegment): """A `CREATE TABLE` statement. https://docs.exasol.com/sql/create_table.htm """ type = "create_table_statement" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), OneOf( # Columns and comment syntax: Bracketed( Sequence( Delimited( Ref("TableContentDefinitionSegment"), ), Sequence( Ref("CommaSegment"), Ref("TableDistributionPartitionClause"), optional=True, ), ), ), # Create AS syntax: Sequence( "AS", Ref("SelectableGrammar"), Ref("WithDataClauseSegment", optional=True), ), # Create like syntax Ref("CreateTableLikeClauseSegment"), ), Ref("CommentClauseSegment", optional=True), ) class TableContentDefinitionSegment(BaseSegment): """The table content definition.""" type = "table_content_definition" match_grammar = OneOf( Ref("ColumnDefinitionSegment"), Ref("TableOutOfLineConstraintSegment"), Ref("CreateTableLikeClauseSegment"), ) class ColumnDatatypeSegment(BaseSegment): """sequence of column and datatype definition.""" type = "column_datatype_definition" match_grammar = Sequence( Ref("SingleIdentifierGrammar"), Ref("DatatypeSegment"), ) class BracketedArguments(ansi.BracketedArguments): """A series of bracketed arguments. e.g. the bracketed part of numeric(1, 3) """ match_grammar = Bracketed( # The brackets might be empty for some cases... Delimited(Ref("NumericLiteralSegment"), optional=True), # In exasol, some types offer on optional MAX # qualifier of BIT, BYTE or CHAR OneOf("BIT", "BYTE", "CHAR", optional=True), ) class DatatypeSegment(BaseSegment): """A data type segment. Supports all Exasol datatypes and their aliases https://docs.exasol.com/sql_references/data_types/datatypedetails.htm https://docs.exasol.com/sql_references/data_types/datatypealiases.htm . """ type = "data_type" match_grammar = OneOf( # Numeric Data Types Sequence( OneOf("DECIMAL", "DEC", "NUMBER", "NUMERIC"), Ref("BracketedArguments", optional=True), ), "BIGINT", Sequence("DOUBLE", Ref.keyword("PRECISION", optional=True)), "FLOAT", "INT", "INTEGER", "REAL", "SHORTINT", "TINYINT", "SMALLINT", OneOf("BOOLEAN", "BOOL"), OneOf( "DATE", Sequence( "TIMESTAMP", Sequence("WITH", "LOCAL", "TIME", "ZONE", optional=True) ), ), Sequence( "INTERVAL", "YEAR", Ref("BracketedArguments", optional=True), "TO", "MONTH", ), Sequence( "INTERVAL", "DAY", Ref("BracketedArguments", optional=True), "TO", "SECOND", Ref("BracketedArguments", optional=True), ), Sequence( "GEOMETRY", Ref("BracketedArguments", optional=True), ), Sequence( "HASHTYPE", Ref("BracketedArguments", optional=True), ), Sequence( OneOf( Sequence( OneOf( Sequence("CHAR", Ref.keyword("VARYING", optional=True)), "VARCHAR", "VARCHAR2", "NCHAR", "NVARCHAR", "NVARCHAR2", ), Ref("BracketedArguments", optional=True), ), Sequence("LONG", "VARCHAR"), Sequence( "CHARACTER", Sequence( OneOf(Sequence("LARGE", "OBJECT"), "VARYING", optional=True), Ref("BracketedArguments", optional=True), ), ), Sequence( "CLOB", Ref("BracketedArguments", optional=True), ), ), Ref("CharCharacterSetGrammar", optional=True), ), ) class IntervalExpressionSegment(BaseSegment): """An interval expression segment. https://docs.exasol.com/db/latest/sql_references/literals.htm """ type = "interval_expression" match_grammar = Sequence( "INTERVAL", Ref("QuotedLiteralSegment"), OneOf( # INTERVAL '5' MONTH # INTERVAL '130' MONTH (3) Sequence( "MONTH", Bracketed(Ref("NumericLiteralSegment"), optional=True), ), # INTERVAL '27' YEAR # INTERVAL '100-1' YEAR(3) TO MONTH Sequence( "YEAR", Bracketed(Ref("NumericLiteralSegment"), optional=True), Sequence("TO", "MONTH", optional=True), ), # INTERVAL '5' DAY # INTERVAL '100' HOUR(3) # INTERVAL '1.99999' SECOND(2,2) # INTERVAL '23:10:59.123' HOUR(2) TO SECOND(3) Sequence( OneOf( Sequence( OneOf("DAY", "HOUR", "MINUTE"), Bracketed(Ref("NumericLiteralSegment"), optional=True), ), Sequence( "SECOND", Bracketed( Delimited(Ref("NumericLiteralSegment")), optional=True, ), ), ), Sequence( "TO", OneOf( "HOUR", "MINUTE", Sequence( "SECOND", Bracketed(Ref("NumericLiteralSegment"), optional=True), ), ), optional=True, ), ), ), ) class ColumnDefinitionSegment(BaseSegment): """Column definition within a `CREATE / ALTER TABLE` statement.""" type = "column_definition" match_grammar = Sequence( Ref("ColumnDatatypeSegment"), Ref("ColumnConstraintSegment", optional=True), ) class ColumnConstraintSegment(ansi.ColumnConstraintSegment): """A column option; each CREATE TABLE column can have 0 or more.""" match_grammar = Sequence( OneOf( Sequence( "DEFAULT", OneOf(Ref("LiteralGrammar"), Ref("BareFunctionSegment")) ), Sequence( # IDENTITY(1000) or IDENTITY 1000 or IDENTITY "IDENTITY", OptionallyBracketed(Ref("NumericLiteralSegment"), optional=True), ), optional=True, ), Ref("TableInlineConstraintSegment", optional=True), Ref("CommentClauseSegment", optional=True), ) class TableInlineConstraintSegment(BaseSegment): """Inline table constraint for CREATE / ALTER TABLE.""" type = "table_constraint_definition" match_grammar = Sequence( Sequence( "CONSTRAINT", Ref( "SingleIdentifierGrammar", # exclude UNRESERVED_KEYWORDS which could used as NakedIdentifier # to make e.g. `id NUMBER CONSTRAINT PRIMARY KEY` work (which is equal # to just `id NUMBER PRIMARY KEY`) exclude=OneOf("NOT", "NULL", "PRIMARY", "FOREIGN"), optional=True, ), optional=True, ), OneOf( # (NOT) NULL Sequence(Ref.keyword("NOT", optional=True), "NULL"), # PRIMARY KEY Ref("PrimaryKeyGrammar"), # FOREIGN KEY Ref("ForeignKeyReferencesClauseGrammar"), ), Ref("TableConstraintEnableDisableGrammar", optional=True), ) class TableOutOfLineConstraintSegment(BaseSegment): """Out of line table constraint for CREATE / ALTER TABLE.""" type = "table_constraint_definition" match_grammar = Sequence( Sequence( "CONSTRAINT", Ref( "SingleIdentifierGrammar", # exclude UNRESERVED_KEYWORDS which could used as NakedIdentifier # to make e.g. `id NUMBER, CONSTRAINT PRIMARY KEY(id)` work (which is # equal to just `id NUMBER, PRIMARY KEY(id)`) exclude=OneOf("NOT", "NULL", "PRIMARY", "FOREIGN"), optional=True, ), optional=True, ), OneOf( # PRIMARY KEY Sequence( Ref("PrimaryKeyGrammar"), Ref("BracketedColumnReferenceListGrammar"), ), # FOREIGN KEY Sequence( Ref("ForeignKeyGrammar"), Ref("BracketedColumnReferenceListGrammar"), Ref("ForeignKeyReferencesClauseGrammar"), ), ), Ref("TableConstraintEnableDisableGrammar", optional=True), ) class CreateTableLikeClauseSegment(BaseSegment): """`CREATE TABLE` LIKE clause.""" type = "table_like_clause" match_grammar = Sequence( "LIKE", Ref("TableReferenceSegment"), Bracketed( Delimited( Sequence( Ref("SingleIdentifierGrammar"), Ref("AliasExpressionSegment", optional=True), ), ), optional=True, ), Sequence(OneOf("INCLUDING", "EXCLUDING"), "DEFAULTS", optional=True), Sequence(OneOf("INCLUDING", "EXCLUDING"), "IDENTITY", optional=True), Sequence(OneOf("INCLUDING", "EXCLUDING"), "COMMENTS", optional=True), ) class TableDistributionPartitionClause(BaseSegment): """`CREATE / ALTER TABLE` distribution / partition clause. DISTRIBUTE/PARTITION clause doesn't except the identifiers in brackets """ type = "table_distribution_partition_clause" match_grammar = OneOf( Sequence( Ref("TableDistributeByGrammar"), Ref("CommaSegment", optional=True), Ref("TablePartitionByGrammar", optional=True), ), Sequence( Ref("TablePartitionByGrammar"), Ref("CommaSegment", optional=True), Ref("TableDistributeByGrammar", optional=True), ), ) class AlterTableStatementSegment(BaseSegment): """`ALTER TABLE` statement.""" type = "alter_table_statement" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = OneOf( Ref("AlterTableColumnSegment"), Ref("AlterTableConstraintSegment"), Ref("AlterTableDistributePartitionSegment"), ) class AlterTableColumnSegment(BaseSegment): """A `ALTER TABLE` statement to add, modify, drop or rename columns. https://docs.exasol.com/sql/alter_table(column).htm """ type = "alter_table_column_statement" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "ALTER", "TABLE", Ref("TableReferenceSegment"), OneOf( Ref("AlterTableAddColumnSegment"), Ref("AlterTableDropColumnSegment"), Ref("AlterTableModifyColumnSegment"), Ref("AlterTableRenameColumnSegment"), Ref("AlterTableAlterColumnSegment"), ), ) class AlterTableAddColumnSegment(BaseSegment): """ALTER TABLE ADD..""" type = "alter_table_add_column" match_grammar = Sequence( "ADD", Ref.keyword("COLUMN", optional=True), Ref("IfNotExistsGrammar", optional=True), OptionallyBracketed(Ref("ColumnDefinitionSegment")), ) class AlterTableDropColumnSegment(BaseSegment): """ALTER TABLE DROP..""" type = "alter_table_drop_column" match_grammar = Sequence( "DROP", Ref.keyword("COLUMN", optional=True), Ref("IfExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), Sequence("CASCADE", "CONSTRAINTS", optional=True), ) class AlterTableModifyColumnSegment(BaseSegment): """ALTER TABLE MODIFY..""" type = "alter_table_modify_column" match_grammar = Sequence( "MODIFY", Ref.keyword("COLUMN", optional=True), OptionallyBracketed( Ref("SingleIdentifierGrammar"), Ref("DatatypeSegment", optional=True), Ref("ColumnConstraintSegment", optional=True), ), ) class AlterTableRenameColumnSegment(BaseSegment): """ALTER TABLE RENAME..""" type = "alter_table_rename_column" match_grammar = Sequence( "RENAME", "COLUMN", Ref("SingleIdentifierGrammar"), "TO", Ref("SingleIdentifierGrammar"), ) class AlterTableAlterColumnSegment(BaseSegment): """ALTER TABLE ALTER..""" type = "alter_table_alter_column" match_grammar = Sequence( "ALTER", Ref.keyword("COLUMN", optional=True), Ref("SingleIdentifierGrammar"), OneOf( Sequence( "SET", OneOf( Sequence( # IDENTITY(1000) or IDENTITY 1000 "IDENTITY", OptionallyBracketed(Ref("NumericLiteralSegment")), ), Sequence( "DEFAULT", OneOf(Ref("LiteralGrammar"), Ref("BareFunctionSegment")), ), ), ), Sequence("DROP", OneOf("IDENTITY", "DEFAULT")), ), ) class AlterTableConstraintSegment(BaseSegment): """A `ALTER TABLE` statement to add, modify, drop or rename constraints. https://docs.exasol.com/sql/alter_table(constraints).htm """ type = "alter_table_constraint_statement" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "ALTER", "TABLE", Ref("TableReferenceSegment"), OneOf( Sequence("ADD", Ref("TableOutOfLineConstraintSegment")), Sequence( "MODIFY", OneOf( Sequence("CONSTRAINT", Ref("SingleIdentifierGrammar")), Ref("PrimaryKeyGrammar"), ), Ref("TableConstraintEnableDisableGrammar"), ), Sequence( "DROP", OneOf( Sequence( "CONSTRAINT", Ref("IfExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), ), Ref("PrimaryKeyGrammar"), ), ), Sequence( "RENAME", "CONSTRAINT", Ref("SingleIdentifierGrammar"), "TO", Ref("SingleIdentifierGrammar"), ), ), ) class AlterTableDistributePartitionSegment(BaseSegment): """A `ALTER TABLE` statement to add or drop distribution / partition keys. https://docs.exasol.com/sql/alter_table(distribution_partitioning).htm """ type = "alter_table_distribute_partition_statement" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "ALTER", "TABLE", Ref("TableReferenceSegment"), OneOf( Ref("TableDistributionPartitionClause"), Sequence( "DROP", OneOf( Sequence( Ref.keyword("DISTRIBUTION"), Ref.keyword("AND", optional=True), Ref.keyword("PARTITION", optional=True), ), Sequence( Ref.keyword("PARTITION"), Ref.keyword("AND", optional=True), Ref.keyword("DISTRIBUTION", optional=True), ), ), "KEYS", ), ), ) class DropTableStatementSegment(BaseSegment): """A `DROP` table statement. https://docs.exasol.com/sql/drop_table.htm """ type = "drop_table_statement" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "DROP", "TABLE", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), Ref("DropBehaviorGrammar", optional=True), Sequence("CASCADE", "CONSTRAINTS", optional=True), ) class CommentClauseSegment(BaseSegment): """A comment clause within `CREATE TABLE` / `CREATE VIEW` statements. e.g. COMMENT IS 'view/table/column description' """ type = "comment_clause" match_grammar = Sequence("COMMENT", "IS", Ref("QuotedLiteralSegment")) ############################ # RENAME ############################ class RenameStatementSegment(BaseSegment): """`RENAME` statement. https://docs.exasol.com/sql/rename.htm """ type = "rename_statement" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "RENAME", OneOf( "SCHEMA", "TABLE", "VIEW", "FUNCTION", "SCRIPT", "USER", "ROLE", "CONNECTION", Sequence("CONSUMER", "GROUP"), optional=True, ), Ref("ObjectReferenceSegment"), "TO", Ref("ObjectReferenceSegment"), ) ############################ # COMMENT ############################ class CommentStatementSegment(BaseSegment): """`COMMENT` statement. https://docs.exasol.com/sql/comment.htm """ type = "comment_statement" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "COMMENT", "ON", OneOf( Sequence( Ref.keyword("TABLE", optional=True), Ref("TableReferenceSegment"), Sequence("IS", Ref("QuotedLiteralSegment"), optional=True), Bracketed( Delimited( Sequence( Ref("SingleIdentifierGrammar"), "IS", Ref("QuotedLiteralSegment"), ), ), optional=True, ), ), Sequence( OneOf( "COLUMN", "SCHEMA", "FUNCTION", "SCRIPT", "USER", "ROLE", "CONNECTION", Sequence("CONSUMER", "GROUP"), ), Ref("ObjectReferenceSegment"), "IS", Ref("QuotedLiteralSegment"), ), ), ) ############################ # INSERT ############################ class InsertStatementSegment(BaseSegment): """A `INSERT` statement.""" type = "insert_statement" is_ddl = False is_dml = True is_dql = False is_dcl = False match_grammar = Sequence( "INSERT", Ref.keyword("INTO", optional=True), Ref("TableReferenceSegment"), AnyNumberOf( Ref("ValuesRangeClauseSegment"), Sequence("DEFAULT", "VALUES"), Ref("SelectableGrammar"), Ref("BracketedColumnReferenceListGrammar", optional=True), ), ) ############################ # UPDATE ############################ class UpdateStatementSegment(BaseSegment): """A `Update` statement. UPDATE
SET [ WHERE ] https://docs.exasol.com/sql/update.htm """ type = "update_statement" is_ddl = False is_dml = True is_dql = False is_dcl = False match_grammar = Sequence( "UPDATE", Indent, OneOf(Ref("TableReferenceSegment"), Ref("AliasedTableReferenceGrammar")), Dedent, Ref("SetClauseListSegment"), Ref("FromClauseSegment", optional=True), Ref("WhereClauseSegment", optional=True), Ref("PreferringClauseSegment", optional=True), ) class SetClauseListSegment(BaseSegment): """Overwritten from ANSI.""" type = "set_clause_list" match_grammar = Sequence( "SET", Indent, Delimited( Ref("SetClauseSegment"), terminators=["FROM"], ), Dedent, ) class SetClauseSegment(BaseSegment): """Overwritten from ANSI.""" type = "set_clause" match_grammar = Sequence( Ref("ColumnReferenceSegment"), Ref("EqualsSegment"), OneOf( Ref("ExpressionSegment"), # Maybe add this to ANSI to match math x=x+1 Ref("LiteralGrammar"), Ref("BareFunctionSegment"), Ref("FunctionSegment"), Ref("ColumnReferenceSegment"), "NULL", "DEFAULT", ), ) ############################ # MERGE ############################ class MergeMatchSegment(BaseSegment): """Contains dialect specific merge operations.""" type = "merge_match" match_grammar = OneOf( Sequence( Ref("MergeMatchedClauseSegment"), Ref("MergeNotMatchedClauseSegment", optional=True), ), Sequence( Ref("MergeNotMatchedClauseSegment"), Ref("MergeMatchedClauseSegment", optional=True), ), ) class MergeMatchedClauseSegment(BaseSegment): """The `WHEN MATCHED` clause within a `MERGE` statement.""" type = "merge_when_matched_clause" match_grammar = Sequence( "WHEN", "MATCHED", "THEN", OneOf( Ref("MergeUpdateClauseSegment"), Ref("MergeDeleteClauseSegment"), ), ) class MergeNotMatchedClauseSegment(BaseSegment): """The `WHEN NOT MATCHED` clause within a `MERGE` statement.""" type = "merge_when_not_matched_clause" match_grammar = Sequence( "WHEN", "NOT", "MATCHED", "THEN", Ref("MergeInsertClauseSegment"), ) class MergeUpdateClauseSegment(BaseSegment): """`UPDATE` clause within the `MERGE` statement.""" type = "merge_update_clause" match_grammar = Sequence( "UPDATE", Ref("SetClauseListSegment"), Ref("WhereClauseSegment", optional=True), ) class MergeDeleteClauseSegment(BaseSegment): """`DELETE` clause within the `MERGE` statement.""" type = "merge_delete_clause" match_grammar = Sequence( "DELETE", Ref("WhereClauseSegment", optional=True), ) class MergeInsertClauseSegment(BaseSegment): """`INSERT` clause within the `MERGE` statement.""" type = "merge_insert_clause" match_grammar = Sequence( "INSERT", Indent, Ref("BracketedColumnReferenceListGrammar", optional=True), Dedent, Ref("ValuesClauseSegment", optional=True), Ref("WhereClauseSegment", optional=True), ) ############################ # DELETE ############################ class DeleteStatementSegment(BaseSegment): """`DELETE` statement. https://docs.exasol.com/sql/delete.htm """ type = "delete_statement" is_ddl = False is_dml = True is_dql = False is_dcl = False match_grammar = Sequence( "DELETE", Ref("StarSegment", optional=True), "FROM", OneOf(Ref("TableReferenceSegment"), Ref("AliasedTableReferenceGrammar")), Ref("WhereClauseSegment", optional=True), Ref("PreferringClauseSegment", optional=True), ) ############################ # TRUNCATE ############################ class TruncateStatementSegment(BaseSegment): """`TRUNCATE TABLE` statement. https://docs.exasol.com/sql/truncate.htm """ type = "truncate_table" is_ddl = False is_dml = True is_dql = False is_dcl = False match_grammar = Sequence( "TRUNCATE", "TABLE", Ref("TableReferenceSegment"), ) ############################ # IMPORT ############################ class ImportStatementSegment(BaseSegment): """`IMPORT` statement. https://docs.exasol.com/sql/import.htm """ type = "import_statement" is_ddl = False is_dml = True is_dql = False is_dcl = False match_grammar = Sequence( "IMPORT", Sequence( "INTO", OneOf( Sequence( Ref("TableReferenceSegment"), Bracketed( Ref("SingleIdentifierListSegment"), optional=True, ), ), Bracketed( Delimited(Ref("ImportColumnsSegment")), ), ), optional=True, ), Ref("ImportFromClauseSegment"), ) class ExportStatementSegment(BaseSegment): """`EXPORT` statement. https://docs.exasol.com/sql/export.htm """ type = "export_statement" is_ddl = False is_dml = True is_dql = False is_dcl = False match_grammar = Sequence( "EXPORT", OneOf( Sequence( Ref("TableReferenceSegment"), Bracketed( Ref("SingleIdentifierListSegment"), optional=True, ), ), Bracketed( Ref("SelectableGrammar"), ), ), Ref("ExportIntoClauseSegment"), ) class ExportIntoClauseSegment(BaseSegment): """EXPORT INTO CLAUSE.""" type = "export_into_clause" match_grammar = Sequence( "INTO", OneOf( Sequence( OneOf( Ref("ImportFromExportIntoDbSrcSegment"), Ref("ImportFromExportIntoFileSegment"), ), Ref("RejectClauseSegment", optional=True), ), Ref("ImportFromExportIntoScriptSegment"), ), ) class ImportColumnsSegment(BaseSegment): """IMPORT COLUMNS.""" type = "import_columns" match_grammar = Sequence( OneOf( Ref("ColumnDatatypeSegment"), Ref("CreateTableLikeClauseSegment"), ) ) class ImportFromClauseSegment(BaseSegment): """IMPORT FROM CLAUSE.""" type = "import_from_clause" match_grammar = Sequence( "FROM", OneOf( Sequence( OneOf( Ref("ImportFromExportIntoDbSrcSegment"), Ref("ImportFromExportIntoFileSegment"), ), Ref("ImportErrorsClauseSegment", optional=True), ), Ref("ImportFromExportIntoScriptSegment"), ), ) class ImportFromExportIntoDbSrcSegment(BaseSegment): """`IMPORT` from or `EXPORT` to a external database source (EXA,ORA,JDBC).""" type = "import_export_dbsrc" match_grammar = Sequence( OneOf( "EXA", "ORA", Sequence( "JDBC", Sequence( "DRIVER", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), ), ), Sequence("AT", Ref("ConnectionDefinition")), OneOf( Sequence( "TABLE", Ref("TableReferenceSegment"), Bracketed( Ref("SingleIdentifierListSegment"), optional=True, ), Sequence( # EXPORT only AnyNumberOf( OneOf("REPLACE", "TRUNCATE"), Sequence( "CREATED", "BY", Ref("QuotedLiteralSegment"), ), max_times=2, ), optional=True, ), ), AnyNumberOf( Sequence( "STATEMENT", Ref("QuotedLiteralSegment"), ), min_times=1, ), ), ) class ImportFromExportIntoFileSegment(BaseSegment): """`IMPORT` from or `EXPORT` to a file source (FBV,CSV).""" type = "import_file" match_grammar = Sequence( OneOf( Sequence( OneOf( "CSV", "FBV", ), AnyNumberOf( Sequence( "AT", Ref("ConnectionDefinition"), ), AnyNumberOf( "FILE", Ref("QuotedLiteralSegment"), min_times=1, ), min_times=1, ), ), Sequence( "LOCAL", Ref.keyword("SECURE", optional=True), OneOf( "CSV", "FBV", ), AnyNumberOf( "FILE", Ref("QuotedLiteralSegment"), min_times=1, ), ), ), OneOf( Ref("CSVColumnDefinitionSegment"), Ref("FBVColumnDefinitionSegment"), optional=True, ), Ref("FileOptionSegment", optional=True), ) class ImportFromExportIntoScriptSegment(BaseSegment): """`IMPORT` from / `EXPORT` to a executed database script.""" type = "import_script" match_grammar = Sequence( "SCRIPT", Ref("ObjectReferenceSegment"), Sequence("AT", Ref("ConnectionDefinition"), optional=True), Sequence( "WITH", AnyNumberOf( Sequence( Ref("ParameterNameSegment"), Ref("EqualsSegment"), Ref("LiteralGrammar"), ), min_times=1, ), optional=True, ), ) class ImportErrorsClauseSegment(BaseSegment): """`ERRORS` clause.""" type = "import_errors_clause" match_grammar = Sequence( "ERRORS", "INTO", Ref("ImportErrorDestinationSegment"), Bracketed( Ref("ExpressionSegment"), # maybe wrong implementation? optional=True, ), OneOf( "REPLACE", "TRUNCATE", optional=True, ), Ref("RejectClauseSegment", optional=True), ) class ImportErrorDestinationSegment(BaseSegment): """Error destination (csv file or table).""" type = "import_error_destination" match_grammar = OneOf( Sequence( "CSV", Sequence("AT", Ref("ConnectionDefinition")), "FILE", Ref("QuotedLiteralSegment"), ), Sequence( "LOCAL", Ref.keyword("SECURE", optional=True), "CSV", "FILE", Ref("QuotedLiteralSegment"), ), Sequence( Ref("TableReferenceSegment"), ), ) class RejectClauseSegment(BaseSegment): """`REJECT` clause within an import / export statement.""" type = "reject_clause" match_grammar = Sequence( "REJECT", "LIMIT", OneOf( Ref("NumericLiteralSegment"), "UNLIMITED", ), Ref.keyword("ERRORS", optional=True), ) class CSVColumnDefinitionSegment(BaseSegment): """Definition of csv columns within an `IMPORT` / `EXPORT` statement.""" type = "csv_cols" match_grammar = Bracketed( Delimited( Sequence( OneOf( Ref("NumericLiteralSegment"), Sequence( # Expression 1..3, for col 1, 2 and 3 Ref("NumericLiteralSegment"), Ref("RangeOperator"), Ref("NumericLiteralSegment"), ), ), Sequence( "FORMAT", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), Sequence( # EXPORT only "DELIMIT", Ref("EqualsSegment"), OneOf("ALWAYS", "NEVER", "AUTO"), optional=True, ), ), ) ) class FBVColumnDefinitionSegment(BaseSegment): """Definition of fbv columns within an `IMPORT` / `EXPORT` statement.""" type = "fbv_cols" match_grammar = Bracketed( Delimited( AnyNumberOf( # IMPORT valid: SIZE ,START, FORMAT, PADDING, ALIGN # EXPORT valid: SIZE, FORMAT, ALIGN, PADDING Sequence( OneOf("SIZE", "START"), Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( OneOf("FORMAT", "PADDING"), Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "ALIGN", Ref("EqualsSegment"), OneOf("LEFT", "RIGHT"), ), ), ) ) class FileOptionSegment(BaseSegment): """File options.""" type = "file_opts" match_grammar = AnyNumberOf( OneOf( # IMPORT valid: ENCODING, NULL, ROW SEPARATOR, COLUMN SEPARATOR / DELIMITER # TRIM, LTRIM, RTRIM, SKIP, ROW SIZE # EXPORT valid: REPLACE, TRUNCATE, ENCODING, NULL, BOOLEAN, ROW SEPARATOR # COLUMN SEPARATOR / DELIMITER, DELIMIT, WITH COLUMN NAMES "ENCODING", "NULL", "BOOLEAN", Sequence("ROW", "SEPARATOR"), Sequence( "COLUMN", OneOf("SEPARATOR", "DELIMITER"), ), Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), OneOf("TRIM", "LTRIM", "RTRIM"), Sequence( OneOf( "SKIP", Sequence("ROW", "SIZE"), ), Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), "REPLACE", "TRUNCATE", Sequence( "WITH", "COLUMN", "NAMES", ), Sequence( # EXPORT only "DELIMIT", Ref("EqualsSegment"), OneOf("ALWAYS", "NEVER", "AUTO"), ), ) ############################ # USER ############################ class CreateUserStatementSegment(ansi.CreateUserStatementSegment): """`CREATE USER` statement. https://docs.exasol.com/sql/create_user.htm """ is_ddl = False is_dml = False is_dql = False is_dcl = True match_grammar = Sequence( "CREATE", "USER", Ref("RoleReferenceSegment"), "IDENTIFIED", OneOf( Ref("UserPasswordAuthSegment"), Ref("UserKerberosAuthSegment"), Ref("UserLDAPAuthSegment"), Ref("UserOpenIDAuthSegment"), ), ) class AlterUserStatementSegment(BaseSegment): """`ALTER USER` statement. https://docs.exasol.com/sql/alter_user.htm """ type = "alter_user_statement" is_ddl = False is_dml = False is_dql = False is_dcl = True match_grammar = Sequence( "ALTER", "USER", Ref("RoleReferenceSegment"), OneOf( Sequence( "IDENTIFIED", OneOf( Sequence( Ref("UserPasswordAuthSegment"), Sequence( "REPLACE", Ref("PasswordLiteralSegment"), optional=True, ), ), Ref("UserLDAPAuthSegment"), Ref("UserKerberosAuthSegment"), Ref("UserOpenIDAuthSegment"), ), ), Sequence( "PASSWORD_EXPIRY_POLICY", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence("PASSWORD", "EXPIRE"), Sequence("RESET", "FAILED", "LOGIN", "ATTEMPTS"), Sequence( "SET", "CONSUMER_GROUP", Ref("EqualsSegment"), OneOf(Ref("SingleIdentifierGrammar"), "NULL"), ), ), ) class UserPasswordAuthSegment(BaseSegment): """user password authentication.""" type = "password_auth" match_grammar = Sequence( # password "BY", Ref("PasswordLiteralSegment"), ) class UserKerberosAuthSegment(BaseSegment): """user kerberos authentication.""" type = "kerberos_auth" match_grammar = Sequence( "BY", "KERBEROS", "PRINCIPAL", Ref("QuotedLiteralSegment"), ) class UserLDAPAuthSegment(BaseSegment): """user ldap authentication.""" type = "ldap_auth" match_grammar = Sequence( "AT", "LDAP", "AS", Ref("QuotedLiteralSegment"), Ref.keyword("FORCE", optional=True), ) class UserOpenIDAuthSegment(BaseSegment): """User OpenID authentication.""" type = "openid_auth" match_grammar = Sequence( "BY", "OPENID", "SUBJECT", Ref("QuotedLiteralSegment"), ) class DropUserStatementSegment(ansi.DropUserStatementSegment): """A `DROP USER` statement with CASCADE option. https://docs.exasol.com/sql/drop_user.htm """ is_ddl = False is_dml = False is_dql = False is_dcl = True match_grammar = Sequence( "DROP", "USER", Ref("IfExistsGrammar", optional=True), Ref("RoleReferenceSegment"), Ref.keyword("CASCADE", optional=True), ) ############################ # CONSUMER GROUP ############################ class CreateConsumerGroupSegment(BaseSegment): """`CREATE CONSUMER GROUP` statement.""" type = "create_consumer_group_statement" match_grammar = Sequence( "CREATE", "CONSUMER", "GROUP", Ref("SingleIdentifierGrammar"), "WITH", Delimited(Ref("ConsumerGroupParameterSegment")), ) class AlterConsumerGroupSegment(BaseSegment): """`ALTER CONSUMER GROUP` statement.""" type = "alter_consumer_group_statement" match_grammar = Sequence( "ALTER", "CONSUMER", "GROUP", Ref("SingleIdentifierGrammar"), "SET", Delimited(Ref("ConsumerGroupParameterSegment")), ) class ConsumerGroupParameterSegment(BaseSegment): """Consumer Group Parameters.""" type = "consumer_group_parameter" match_grammar = Sequence( OneOf( "CPU_WEIGHT", "PRECEDENCE", "GROUP_TEMP_DB_RAM_LIMIT", "USER_TEMP_DB_RAM_LIMIT", "SESSION_TEMP_DB_RAM_LIMIT", "QUERY_TIMEOUT", "IDLE_TIMEOUT", ), Ref("EqualsSegment"), OneOf(Ref("QuotedLiteralSegment"), Ref("NumericLiteralSegment")), ) class DropConsumerGroupSegment(BaseSegment): """A `DROP CONSUMER GROUP` statement. https://docs.exasol.com/sql/consumer_group.htm """ type = "drop_consumer_group_statement" match_grammar = Sequence( "DROP", Sequence("CONSUMER", "GROUP"), Ref("SingleIdentifierGrammar") ) ############################ # ROLE ############################ class CreateRoleStatementSegment(ansi.CreateRoleStatementSegment): """`CREATE ROLE` statement. https://docs.exasol.com/sql/create_role.htm """ is_ddl = False is_dml = False is_dql = False is_dcl = True match_grammar = Sequence( "CREATE", "ROLE", Ref("RoleReferenceSegment"), ) class AlterRoleStatementSegment(BaseSegment): """`ALTER ROLE` statement. Only allowed to alter CONSUMER GROUPs """ type = "alter_role_statement" is_ddl = False is_dml = False is_dql = False is_dcl = True match_grammar = Sequence( "ALTER", "ROLE", Ref("RoleReferenceSegment"), "SET", Sequence( "CONSUMER_GROUP", Ref("EqualsSegment"), OneOf(Ref("SingleIdentifierGrammar"), "NULL"), ), ) class DropRoleStatementSegment(ansi.DropRoleStatementSegment): """A `DROP ROLE` statement with CASCADE option. https://docs.exasol.com/sql/drop_role.htm """ is_ddl = False is_dml = False is_dql = False is_dcl = True match_grammar = Sequence( "DROP", "ROLE", Ref("IfExistsGrammar", optional=True), Ref("RoleReferenceSegment"), Ref.keyword("CASCADE", optional=True), ) ############################ # CONNECTION ############################ class CreateConnectionSegment(BaseSegment): """`CREATE CONNECTION` statement. https://docs.exasol.com/sql/create_connection.htm """ type = "create_connection" is_ddl = False is_dml = False is_dql = False is_dcl = True match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "CONNECTION", Ref("NakedIdentifierSegment"), "TO", Ref("ConnectionDefinition"), ) class AlterConnectionSegment(BaseSegment): """`ALTER CONNECTION` statement. https://docs.exasol.com/sql/alter_connection.htm """ type = "alter_connection" is_ddl = False is_dml = False is_dql = False is_dcl = True match_grammar = Sequence( "ALTER", "CONNECTION", Ref("NakedIdentifierSegment"), "TO", Ref("ConnectionDefinition"), ) class ConnectionDefinition(BaseSegment): """Definition of a connection.""" type = "connection_definition" match_grammar = Sequence( OneOf( # string or identifier Ref("SingleIdentifierGrammar"), Ref("QuotedLiteralSegment"), ), Sequence( "USER", Ref("QuotedLiteralSegment"), "IDENTIFIED", "BY", Ref("QuotedLiteralSegment"), optional=True, ), ) class DropConnectionStatementSegment(BaseSegment): """A `DROP CONNECTION` statement. https://docs.exasol.com/sql/drop_connection.htm """ type = "drop_connection_statement" is_ddl = False is_dml = False is_dql = False is_dcl = True match_grammar = Sequence( "DROP", "CONNECTION", Ref("IfExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), ) ############################ # GRANT / REVOKE ############################ class AccessStatementSegment(BaseSegment): """`GRANT` / `REVOKE` statement. https://docs.exasol.com/sql/grant.htm https://docs.exasol.com/sql/revoke.htm """ type = "access_statement" is_ddl = False is_dml = False is_dql = False is_dcl = True match_grammar = Sequence( OneOf("GRANT", "REVOKE"), OneOf( Ref("GrantRevokeSystemPrivilegesSegment"), Ref("GrantRevokeObjectPrivilegesSegment"), Ref("GrantRevokeRolesSegment"), Ref("GrantRevokeImpersonationSegment"), Ref("GrantRevokeConnectionSegment"), Ref("GrantRevokeConnectionRestrictedSegment"), ), ) class GrantRevokeSystemPrivilegesSegment(BaseSegment): """`GRANT` / `REVOKE` system privileges.""" type = "grant_revoke_system_privileges" match_grammar = Sequence( OneOf( Sequence( "ALL", Ref.keyword( "PRIVILEGES", optional=True, ), ), Delimited( Ref("SystemPrivilegesSegment"), terminators=["TO", "FROM"], ), ), OneOf("TO", "FROM"), Delimited( Ref("NakedIdentifierSegment"), ), Sequence("WITH", "ADMIN", "OPTION", optional=True), # Grant only ) class GrantRevokeObjectPrivilegesSegment(BaseSegment): """`GRANT` / `REVOKE` object privileges.""" type = "grant_revoke_object_privileges" match_grammar = Sequence( OneOf( Sequence("ALL", Ref.keyword("PRIVILEGES", optional=True)), Delimited(Ref("ObjectPrivilegesSegment"), terminators=["ON"]), ), "ON", OneOf( OneOf("SCHEMA", "TABLE", "VIEW", "FUNCTION", "SCRIPT"), Sequence("ALL", Ref.keyword("OBJECTS", optional=True)), # Revoke only optional=True, ), Ref("ObjectReferenceSegment"), OneOf( Sequence( # Grant only "TO", Delimited(Ref("SingleIdentifierGrammar")), ), Sequence( # Revoke only "FROM", Delimited(Ref("SingleIdentifierGrammar")), Sequence("CASCADE", "CONSTRAINTS", optional=True), ), ), ) class GrantRevokeRolesSegment(BaseSegment): """`GRANT` / `REVOKE` roles.""" type = "grant_revoke_roles" match_grammar = Sequence( OneOf( Sequence("ALL", "ROLES"), # Revoke only Delimited(Ref("RoleReferenceSegment"), terminators=["TO", "FROM"]), ), OneOf("TO", "FROM"), Delimited(Ref("RoleReferenceSegment")), Sequence("WITH", "ADMIN", "OPTION", optional=True), # Grant only ) class GrantRevokeImpersonationSegment(BaseSegment): """`GRANT` / `REVOKE` impersonation.""" type = "grant_revoke_impersonation" match_grammar = Sequence( "IMPERSONATION", "ON", Delimited( Ref("SingleIdentifierGrammar"), terminators=["TO", "FROM"], ), OneOf("TO", "FROM"), Delimited(Ref("SingleIdentifierGrammar")), ) class GrantRevokeConnectionSegment(BaseSegment): """`GRANT` / `REVOKE` connection.""" type = "grant_revoke_connection" match_grammar = Sequence( "CONNECTION", Delimited( Ref("SingleIdentifierGrammar"), terminators=["TO", "FROM"], ), OneOf("TO", "FROM"), Delimited(Ref("SingleIdentifierGrammar")), Sequence("WITH", "ADMIN", "OPTION", optional=True), ) class GrantRevokeConnectionRestrictedSegment(BaseSegment): """`GRANT` / `REVOKE` connection restricted.""" type = "grant_revoke_connection_restricted" match_grammar = Sequence( "ACCESS", "ON", "CONNECTION", Ref("SingleIdentifierGrammar"), Sequence( "FOR", OneOf("SCRIPT", "SCHEMA", optional=True), Ref("SingleIdentifierGrammar"), ), OneOf("TO", "FROM"), Delimited(Ref("SingleIdentifierGrammar")), ) class SystemPrivilegesSegment(BaseSegment): """System privileges. https://docs.exasol.com/database_concepts/privileges/details_rights_management.htm#System_Privileges """ type = "system_privilege" match_grammar = OneOf( Sequence("GRANT", "ANY", "OBJECT", "PRIVILEGE"), Sequence("GRANT", "ANY", "PRIVILEGE"), Sequence("SET", "ANY", "CONSUMER", "GROUP"), Sequence("MANAGE", "CONSUMER", "GROUPS"), Sequence("KILL", "ANY", "SESSION"), Sequence("ALTER", "SYSTEM"), Sequence(OneOf("CREATE", "ALTER", "DROP"), "USER"), Sequence("IMPERSONATE", "ANY", "USER"), Sequence(OneOf("DROP", "GRANT"), "ANY", "ROLE"), Sequence(OneOf("ALTER", "DROP", "GRANT", "USE", "ACCESS"), "ANY", "CONNECTION"), Sequence("CREATE", Ref.keyword("VIRTUAL", optional=True), "SCHEMA"), Sequence( OneOf("ALTER", "DROP", "USE"), "ANY", Ref.keyword("VIRTUAL", optional=True), "SCHEMA", Ref.keyword("REFRESH", optional=True), ), Sequence( "CREATE", OneOf( "TABLE", "VIEW", "CONNECTION", "ROLE", "SESSION", "FUNCTION", "SCRIPT" ), ), Sequence( OneOf("CREATE", "ALTER", "DELETE", "DROP", "INSERT", "SELECT", "UPDATE"), "ANY", "TABLE", ), Sequence("SELECT", "ANY", "DICTIONARY"), Sequence(OneOf("CREATE", "DROP"), "ANY", "VIEW"), Sequence( OneOf("CREATE", "DROP", "EXECUTE"), "ANY", OneOf("SCRIPT", "FUNCTION") ), "IMPORT", "EXPORT", ) class ObjectPrivilegesSegment(BaseSegment): """Object privileges. https://docs.exasol.com/database_concepts/privileges/details_rights_management.htm#System_Privileges """ type = "object_privilege" match_grammar = OneOf( "ALTER", "SELECT", "INSERT", "UPDATE", "DELETE", "REFERENCES", "EXECUTE", # Revoke only "IMPORT", "EXPORT", ) ############################ # SKYLINE ############################ class PreferringClauseSegment(BaseSegment): """`PREFERRING` clause of the Exasol Skyline extension. https://docs.exasol.com/advanced_analytics/skyline.htm#preferring_clause """ type = "preferring_clause" match_grammar = Sequence( "PREFERRING", OptionallyBracketed(Ref("PreferringPreferenceTermSegment")), Ref("PartitionClauseSegment", optional=True), ) class PreferringPreferenceTermSegment(BaseSegment): """The preference term of a `PREFERRING` clause.""" type = "preference_term" match_grammar = Sequence( OneOf( Sequence( OneOf("HIGH", "LOW"), OneOf( Ref("LiteralGrammar"), Ref("BareFunctionSegment"), Ref("FunctionSegment"), Ref("ColumnReferenceSegment"), Ref("LocalAliasSegment"), ), ), OneOf( Ref("LiteralGrammar"), Ref("BareFunctionSegment"), Ref("FunctionSegment"), Ref("ColumnReferenceSegment"), Ref("LocalAliasSegment"), ), ), Ref("PreferringPlusPriorTermSegment", optional=True), ) class PreferringPlusPriorTermSegment(BaseSegment): """The preferring preference term expression.""" type = "plus_prior_inverse" match_grammar = OneOf( Sequence( Sequence( OneOf( "PLUS", Sequence("PRIOR", "TO"), ), Ref("PreferringPreferenceTermSegment"), optional=True, ), ), Sequence( "INVERSE", Ref("PreferringPreferenceTermSegment"), ), ) class MLTableExpressionSegment(ansi.MLTableExpressionSegment): """Not supported.""" match_grammar = Nothing() ############################ # SYSTEM ############################ class AlterSessionSegment(BaseSegment): """`ALTER SESSION` statement.""" type = "alter_session_statement" match_grammar = Sequence( "ALTER", "SESSION", "SET", Ref("SessionParameterSegment"), Ref("EqualsSegment"), OneOf(Ref("QuotedLiteralSegment"), Ref("NumericLiteralSegment")), ) class AlterSystemSegment(BaseSegment): """`ALTER SYSTEM` statement.""" type = "alter_system_statement" match_grammar = Sequence( "ALTER", "SYSTEM", "SET", Ref("SystemParameterSegment"), Ref("EqualsSegment"), OneOf(Ref("QuotedLiteralSegment"), Ref("NumericLiteralSegment")), ) class OpenSchemaSegment(BaseSegment): """`OPEN SCHEMA` statement.""" type = "open_schema_statement" match_grammar = Sequence("OPEN", "SCHEMA", Ref("SchemaReferenceSegment")) class CloseSchemaSegment(BaseSegment): """`CLOSE SCHEMA` statement.""" type = "close_schema_statement" match_grammar = Sequence("CLOSE", "SCHEMA") class FlushStatisticsSegment(BaseSegment): """`FLUSH STATISTICS` statement.""" type = "flush_statistics_statement" match_grammar = Sequence("FLUSH", "STATISTICS") class RecompressReorganizeSegment(BaseSegment): """`RECOMPRESS` and `REOGRANIZE` statement.""" type = "recompress_reorganize_statement" match_grammar = Sequence( OneOf("RECOMPRESS", "REORGANIZE"), OneOf( Sequence( "TABLE", Ref("TableReferenceSegment"), Ref("BracketedColumnReferenceListGrammar"), ), Sequence("TABLES", Delimited(Ref("TableReferenceSegment"))), Sequence("SCHEMA", Ref("SchemaReferenceSegment")), Sequence("SCHEMAS", Delimited(Ref("SchemaReferenceSegment"))), "DATABASE", ), Ref.keyword("ENFORCE", optional=True), ) class PreloadSegment(BaseSegment): """`PRELOAD` statement.""" type = "preload_statement" match_grammar = Sequence( "PRELOAD", OneOf( Sequence( "TABLE", Ref("TableReferenceSegment"), Ref("BracketedColumnReferenceListGrammar"), ), Sequence("TABLES", Delimited(Ref("TableReferenceSegment"))), Sequence("SCHEMA", Ref("SchemaReferenceSegment")), Sequence("SCHEMAS", Delimited(Ref("SchemaReferenceSegment"))), "DATABASE", ), ) class ImpersonateSegment(BaseSegment): """`IMPERSONATE` statement.""" type = "impersonate_statement" match_grammar = Sequence("IMPERSONATE", Ref("SingleIdentifierGrammar")) class KillSegment(BaseSegment): """`KILL` statement.""" type = "kill_statement" match_grammar = Sequence( "KILL", OneOf( Sequence("SESSION", OneOf("CURRENT_SESSION", Ref("NumericLiteralSegment"))), Sequence( "STATEMENT", Ref("NumericLiteralSegment", optional=True), "IN", "SESSION", Ref("NumericLiteralSegment"), Sequence("WITH", "MESSAGE", Ref("QuotedLiteralSegment"), optional=True), ), ), ) class TruncateAuditLogsSegment(BaseSegment): """`TRUNCATE AUDIT LOGS` statement.""" type = "truncate_audit_logs_statement" match_grammar = Sequence( "TRUNCATE", "AUDIT", "LOGS", Sequence( "KEEP", OneOf( Sequence("LAST", OneOf("DAY", "MONTH", "YEAR")), Sequence("FROM", Ref("QuotedLiteralSegment")), ), optional=True, ), ) ############################ # OTHERS ############################ class TransactionStatementSegment(BaseSegment): """A `COMMIT` or `ROLLBACK` statement.""" type = "transaction_statement" match_grammar = Sequence( OneOf("COMMIT", "ROLLBACK"), Ref.keyword("WORK", optional=True) ) class ExecuteScriptSegment(BaseSegment): """`EXECUTE SCRIPT` statement.""" type = "execute_script_statement" match_grammar = Sequence( "EXECUTE", "SCRIPT", Ref("ScriptReferenceSegment"), Bracketed( Delimited(Ref.keyword("ARRAY", optional=True), Ref("ExpressionSegment")), optional=True, ), Sequence("WITH", "OUTPUT", optional=True), ) class ExplainVirtualSegment(BaseSegment): """`EXPLAIN VIRTUAL` statement.""" type = "explain_virtual_statement" match_grammar = Sequence("EXPLAIN", "VIRTUAL", Ref("SelectableGrammar")) ############################ # FUNCTION ############################ class FunctionReferenceSegment(ansi.ObjectReferenceSegment): """A reference to a function.""" type = "function_reference" class CreateFunctionStatementSegment(BaseSegment): """A `CREATE FUNCTION` statement.""" type = "create_function_statement" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "FUNCTION", Ref("FunctionReferenceSegment"), Bracketed( Delimited( Sequence( Ref("SingleIdentifierGrammar"), # Column name Ref.keyword("IN", optional=True), Ref("DatatypeSegment"), # Column type ), optional=True, ), ), "RETURN", Ref("DatatypeSegment"), OneOf("IS", "AS", optional=True), Indent, AnyNumberOf( Sequence( Ref("VariableNameSegment"), Ref("DatatypeSegment"), Ref("DelimiterGrammar"), ), optional=True, ), Dedent, "BEGIN", Indent, AnyNumberOf(Ref("FunctionBodySegment")), "RETURN", Ref("FunctionContentsExpressionGrammar"), Ref("DelimiterGrammar"), Dedent, "END", Ref("FunctionReferenceSegment", optional=True), Ref("SemicolonSegment", optional=True), ) class FunctionBodySegment(BaseSegment): """The definition of the function body.""" type = "function_body" match_grammar = Sequence( OneOf( Ref("FunctionAssignmentSegment"), Ref("FunctionIfBranchSegment"), Ref("FunctionForLoopSegment"), Ref("FunctionWhileLoopSegment"), ), ) class FunctionAssignmentSegment(BaseSegment): """The definition of a assignment within a function body.""" type = "function_assignment" match_grammar = Sequence( # assignment Ref("VariableNameSegment"), Ref("WalrusOperatorSegment"), OneOf( Ref("FunctionSegment"), Ref("VariableNameSegment"), Ref("LiteralGrammar"), Ref("ExpressionSegment"), ), Ref("SemicolonSegment"), ) class FunctionIfBranchSegment(BaseSegment): """The definition of a if branch within a function body.""" type = "function_if_branch" match_grammar = Sequence( "IF", AnyNumberOf(Ref("ExpressionSegment")), "THEN", Indent, AnyNumberOf(Ref("FunctionBodySegment"), min_times=1), Dedent, AnyNumberOf( Sequence( OneOf("ELSIF", "ELSEIF"), Ref("ExpressionSegment"), "THEN", Indent, AnyNumberOf(Ref("FunctionBodySegment"), min_times=1), Dedent, ), optional=True, ), Sequence( "ELSE", Indent, AnyNumberOf(Ref("FunctionBodySegment"), min_times=1), Dedent, optional=True, ), "END", "IF", Ref("SemicolonSegment"), ) class FunctionForLoopSegment(BaseSegment): """The definition of a for loop within a function body.""" type = "function_for_loop" match_grammar = Sequence( "FOR", Ref("NakedIdentifierSegment"), OneOf( # # for x := 1 to 10 do... Sequence( Ref("WalrusOperatorSegment"), Ref("ExpressionSegment"), # could be a variable "TO", Ref("ExpressionSegment"), # could be a variable "DO", AnyNumberOf(Ref("FunctionBodySegment"), min_times=1), "END", "FOR", ), # for x IN 1..10... Sequence( "IN", Ref("ExpressionSegment"), # could be a variable Ref("RangeOperator"), Ref("ExpressionSegment"), # could be a variable "LOOP", AnyNumberOf(Ref("FunctionBodySegment"), min_times=1), "END", "LOOP", ), ), Ref("SemicolonSegment"), ) class FunctionWhileLoopSegment(BaseSegment): """The definition of a while loop within a function body.""" type = "function_while_loop" match_grammar = Sequence( "WHILE", Ref("ExpressionSegment"), "DO", AnyNumberOf(Ref("FunctionBodySegment"), min_times=1), "END", "WHILE", Ref("SemicolonSegment"), ) class DropFunctionStatementSegment(BaseSegment): """A `DROP FUNCTION` statement with CASCADE and RESTRICT option. https://docs.exasol.com/sql/drop_function.htm """ type = "drop_function_statement" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "DROP", "FUNCTION", Ref("IfExistsGrammar", optional=True), Ref("FunctionNameSegment"), Ref("DropBehaviorGrammar", optional=True), ) ############################ # SCRIPT ############################ class ScriptReferenceSegment(ansi.ObjectReferenceSegment): """A reference to a script.""" type = "script_reference" class ScriptContentSegment(BaseSegment): """This represents the script content. Because the script content could be written in LUA, PYTHON, JAVA or R there is no further verification. """ type = "script_content" match_grammar = Anything( terminators=[Ref("FunctionScriptTerminatorSegment")], # Within the script we should _only_ look for the script # terminator segment. reset_terminators=True, ) class CreateScriptingLuaScriptStatementSegment(BaseSegment): """`CREATE SCRIPT` statement to create a Lua scripting script. https://docs.exasol.com/sql/create_script.htm """ type = "create_scripting_lua_script" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Ref.keyword("LUA", optional=True), "SCRIPT", Ref("ScriptReferenceSegment"), Bracketed( Delimited( Sequence( Ref.keyword("ARRAY", optional=True), Ref("SingleIdentifierGrammar") ), optional=True, ), optional=True, ), Sequence(Ref.keyword("RETURNS"), OneOf("TABLE", "ROWCOUNT"), optional=True), "AS", Indent, Ref("ScriptContentSegment"), Dedent, ) class CreateUDFScriptStatementSegment(BaseSegment): """`CREATE SCRIPT` statement create a UDF script. https://docs.exasol.com/sql/create_script.htm """ type = "create_udf_script" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), OneOf( "JAVA", "PYTHON", "LUA", "R", Ref("SingleIdentifierGrammar"), optional=True ), OneOf("SCALAR", "SET"), "SCRIPT", Ref("ScriptReferenceSegment"), Bracketed( Sequence( Ref("UDFParameterGrammar"), Ref("OrderByClauseSegment", optional=True), optional=True, ), ), OneOf(Sequence("RETURNS", Ref("DatatypeSegment")), Ref("EmitsSegment")), "AS", Indent, Ref("ScriptContentSegment"), Dedent, ) class CreateAdapterScriptStatementSegment(BaseSegment): """`CREATE SCRIPT` statement create a adapter script. https://docs.exasol.com/sql/create_script.htm """ type = "create_adapter_script" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), OneOf("JAVA", "PYTHON", "LUA", Ref("SingleIdentifierGrammar")), "ADAPTER", "SCRIPT", Ref("ScriptReferenceSegment"), "AS", Indent, Ref("ScriptContentSegment"), Dedent, ) class DropScriptStatementSegment(BaseSegment): """A `DROP SCRIPT` statement. https://docs.exasol.com/sql/drop_script.htm """ type = "drop_script_statement" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "DROP", Sequence( Ref.keyword("ADAPTER", optional=True), "SCRIPT", ), Ref("IfExistsGrammar", optional=True), Ref("ScriptReferenceSegment"), ) ############################ # DIALECT ############################ class FunctionScriptStatementSegment(BaseSegment): """A generic segment, to any of its child subsegments.""" type = "statement" match_grammar = OneOf( Ref("CreateFunctionStatementSegment"), Ref("CreateScriptingLuaScriptStatementSegment"), Ref("CreateUDFScriptStatementSegment"), Ref("CreateAdapterScriptStatementSegment"), ) class StatementSegment(ansi.StatementSegment): """A generic segment, to any of its child subsegments.""" type = "statement" match_grammar = OneOf( # Data Query Language (DQL) Ref("SelectableGrammar"), # Data Modifying Language (DML) Ref("DeleteStatementSegment"), Ref("ExportStatementSegment"), Ref("ImportStatementSegment"), Ref("InsertStatementSegment"), Ref("MergeStatementSegment"), Ref("TruncateStatementSegment"), Ref("UpdateStatementSegment"), # Data Definition Language (DDL) Ref("AlterTableStatementSegment"), Ref("AlterSchemaStatementSegment"), Ref("AlterVirtualSchemaStatementSegment"), Ref("CommentStatementSegment"), Ref("CreateSchemaStatementSegment"), Ref("CreateTableStatementSegment"), Ref("CreateViewStatementSegment"), Ref("CreateVirtualSchemaStatementSegment"), Ref("DropViewStatementSegment"), Ref("DropFunctionStatementSegment"), Ref("DropScriptStatementSegment"), Ref("DropSchemaStatementSegment"), Ref("DropTableStatementSegment"), Ref("RenameStatementSegment"), # Access Control Language (DCL) Ref("AccessStatementSegment"), Ref("AlterConnectionSegment"), Ref("AlterUserStatementSegment"), Ref("CreateConnectionSegment"), Ref("CreateRoleStatementSegment"), Ref("CreateUserStatementSegment"), Ref("DropRoleStatementSegment"), Ref("DropUserStatementSegment"), Ref("DropConnectionStatementSegment"), # System Ref("CreateConsumerGroupSegment"), Ref("AlterConsumerGroupSegment"), Ref("DropConsumerGroupSegment"), Ref("AlterRoleStatementSegment"), Ref("AlterSessionSegment"), Ref("AlterSystemSegment"), Ref("OpenSchemaSegment"), Ref("CloseSchemaSegment"), Ref("FlushStatisticsSegment"), Ref("ImpersonateSegment"), Ref("RecompressReorganizeSegment"), Ref("KillSegment"), Ref("PreloadSegment"), Ref("TruncateAuditLogsSegment"), Ref("ExplainVirtualSegment"), # Others Ref("TransactionStatementSegment"), Ref("ExecuteScriptSegment"), terminators=[Ref("DelimiterGrammar")], ) class FileSegment(BaseFileSegment): """This overwrites the FileSegment from ANSI. The reason is because SCRIPT and FUNCTION statements are terminated by a trailing / at the end. A semicolon is the terminator of the statement within the function / script """ match_grammar = Delimited( Ref("FunctionScriptStatementSegment"), Ref("StatementSegment"), delimiter=OneOf( Ref("DelimiterGrammar"), Ref("FunctionScriptTerminatorSegment"), ), allow_gaps=True, allow_trailing=True, ) class EmitsSegment(BaseSegment): """EMITS Segment for JSON_EXTRACT for example. In it's own segment to give it a type to allow AL03 to find it easily. """ type = "emits_segment" match_grammar = Sequence( "EMITS", Bracketed(Ref("UDFParameterGrammar")), ) sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_exasol_keywords.py000066400000000000000000000364471503426445100250460ustar00rootroot00000000000000"""A list of all SQL key words.""" RESERVED_KEYWORDS = [ "ABSOLUTE", "ACTION", "ADD", "AFTER", "ALL", "ALLOCATE", "ALTER", "AND", "ANY", "APPEND", "ARE", "ARRAY", "AS", "ASC", "ASENSITIVE", "ASSERTION", "AT", "ATTRIBUTE", "AUTHID", "AUTHORIZATION", "BEFORE", "BEGIN", "BETWEEN", "BIGINT", "BINARY", "BIT", "BLOB", "BLOCKED", "BOOL", "BOOLEAN", "BOTH", "BY", "BYTE", "CALL", "CALLED", "CARDINALITY", "CASCADE", "CASCADED", "CASE", "CASESPECIFIC", "CAST", "CATALOG", "CHAIN", "CHAR", "CHARACTER", "CHARACTERISTICS", "CHARACTER_SET_CATALOG", "CHARACTER_SET_NAME", "CHARACTER_SET_SCHEMA", "CHECK", "CHECKED", "CLOB", "CLOSE", "COALESCE", "COLLATE", "COLLATION", "COLLATION_CATALOG", "COLLATION_NAME", "COLLATION_SCHEMA", "COLUMN", "COMMIT", "CONDITION", "CONNECTION", "CONNECT_BY_ISCYCLE", "CONNECT_BY_ISLEAF", "CONNECT_BY_ROOT", "CONSTANT", "CONSTRAINT", "CONSTRAINTS", "CONSTRAINT_STATE_DEFAULT", "CONSTRUCTOR", "CONTAINS", "CONTINUE", "CONTROL", "CONVERT", "CORRESPONDING", "CREATE", "CROSS", # a unreserved keyword but needed to be reserved to make join clause work "CS", "CSV", "CUBE", "CURRENT", "CURRENT_DATE", "CURRENT_PATH", "CURRENT_ROLE", "CURRENT_SCHEMA", "CURRENT_SESSION", "CURRENT_STATEMENT", "CURRENT_TIME", "CURRENT_TIMESTAMP", "CURRENT_USER", "CURSOR", "CYCLE", "DATA", "DATALINK", "DATE", "DATETIME_INTERVAL_CODE", "DATETIME_INTERVAL_PRECISION", "DAY", "DBTIMEZONE", "DEALLOCATE", "DEC", "DECIMAL", "DECLARE", "DEFAULT", "DEFAULT_LIKE_ESCAPE_CHARACTER", "DEFERRABLE", "DEFERRED", "DEFINED", "DEFINER", "DELETE", "DEREF", "DERIVED", "DESC", "DESCRIBE", "DESCRIPTOR", "DETERMINISTIC", "DISABLE", "DISABLED", "DISCONNECT", "DISPATCH", "DISTINCT", "DLURLCOMPLETE", "DLURLPATH", "DLURLPATHONLY", "DLURLSCHEME", "DLURLSERVER", "DLVALUE", "DO", "DOMAIN", "DOUBLE", "DROP", "DYNAMIC", "DYNAMIC_FUNCTION", "DYNAMIC_FUNCTION_CODE", "EACH", "ELSE", "ELSEIF", "ELSIF", "EMITS", "ENABLE", "ENABLED", "END", "END-EXEC", "ENDIF", "ENFORCE", "EQUALS", "ERRORS", "ESCAPE", "EXCEPT", "EXCEPTION", "EXEC", "EXECUTE", "EXISTS", "EXIT", "EXPORT", "EXTERNAL", "EXTRACT", "FALSE", "FBV", "FETCH", "FILE", "FINAL", "FIRST", "FLOAT", "FOLLOWING", "FOR", "FORALL", "FORCE", "FORMAT", "FOUND", "FREE", "FROM", "FS", "FULL", "FUNCTION", "GENERAL", "GENERATED", "GEOMETRY", "GET", "GLOBAL", "GO", "GOTO", "GRANT", "GRANTED", "GROUP", "GROUPING", "GROUPS", "GROUP_CONCAT", "HASHTYPE", "HASHTYPE_FORMAT", "HAVING", "HIGH", "HOLD", "HOUR", "IDENTITY", "IF", "IFNULL", "IMMEDIATE", "IMPERSONATE", "IMPLEMENTATION", "IMPORT", "IN", "INDEX", "INDICATOR", "INNER", "INOUT", "INPUT", "INSENSITIVE", "INSERT", "INSTANCE", "INSTANTIABLE", "INT", "INTEGER", "INTEGRITY", "INTERSECT", "INTERVAL", "INTO", "INVERSE", "INVOKER", "IS", "ITERATE", "JOIN", "KEY_MEMBER", "KEY_TYPE", "LARGE", "LAST", "LATERAL", "LDAP", "LEADING", "LEAVE", "LEFT", "LEVEL", "LIKE", "LIMIT", "LISTAGG", "LOCAL", "LOCALTIME", "LOCALTIMESTAMP", "LOCATOR", "LOG", "LONGVARCHAR", "LOOP", "LOW", "MAP", "MATCH", "MATCHED", "MERGE", "METHOD", "MINUS", "MINUTE", "MOD", "MODIFIES", "MODIFY", "MODULE", "MONTH", "NAMES", "NATIONAL", "NATURAL", "NCHAR", "NCLOB", "NEW", "NEXT", "NLS_DATE_FORMAT", "NLS_DATE_LANGUAGE", "NLS_FIRST_DAY_OF_WEEK", "NLS_NUMERIC_CHARACTERS", "NLS_TIMESTAMP_FORMAT", "NO", "NOCYCLE", "NOLOGGING", "NONE", "NOT", "NULL", "NULLIF", "NUMBER", "NUMERIC", "NVARCHAR", "NVARCHAR2", "OBJECT", "OF", "OFF", "OLD", "ON", "ONLY", "OPEN", "OPTION", "OPTIONS", "OR", "ORDER", "ORDERING", "ORDINALITY", "OTHERS", "OUT", "OUTER", "OUTPUT", "OVER", "OVERLAPS", "OVERLAY", "OVERRIDING", "PAD", "PARALLEL_ENABLE", "PARAMETER", "PARAMETER_SPECIFIC_CATALOG", "PARAMETER_SPECIFIC_NAME", "PARAMETER_SPECIFIC_SCHEMA", "PARTIAL", "PARTITION", # Should really be an unreserved keyword but need for Window clauses "PATH", "PERMISSION", "PLACING", "PLUS", "POSITION", "PRECEDING", "PREFERRING", "PREPARE", "PRESERVE", "PRIOR", "PRIVILEGES", "PROCEDURE", "PROFILE", "QUALIFY", "RANDOM", "RANGE", "READ", "READS", "REAL", "RECOVERY", "RECURSIVE", "REF", "REFERENCES", "REFERENCING", "REFRESH", "REGEXP_LIKE", "RELATIVE", "RELEASE", "RENAME", "REPEAT", "REPLACE", "RESTORE", "RESTRICT", "RESULT", "RETURN", "RETURNED_LENGTH", "RETURNED_OCTET_LENGTH", "RETURNS", "REVOKE", "RIGHT", "ROLLBACK", "ROLLUP", "ROUTINE", "ROW", "ROWS", "ROWTYPE", "SAVEPOINT", "SCHEMA", "SCOPE", "SCOPE_USER", "SCRIPT", "SCROLL", "SEARCH", "SECOND", "SECTION", "SECURITY", "SELECT", "SELECTIVE", "SELF", "SENSITIVE", "SEPARATOR", "SEQUENCE", "SESSION", "SESSIONTIMEZONE", "SESSION_USER", "SET", "SETS", "SHORTINT", "SIMILAR", "SMALLINT", "SOME", "SOURCE", "SPACE", "SPECIFIC", "SPECIFICTYPE", "SQL", "SQLEXCEPTION", "SQLSTATE", "SQLWARNING", "SQL_BIGINT", "SQL_BIT", "SQL_CHAR", "SQL_DATE", "SQL_DECIMAL", "SQL_DOUBLE", "SQL_FLOAT", "SQL_INTEGER", "SQL_LONGVARCHAR", "SQL_NUMERIC", "SQL_PREPROCESSOR_SCRIPT", "SQL_REAL", "SQL_SMALLINT", "SQL_TIMESTAMP", "SQL_TINYINT", "SQL_TYPE_DATE", "SQL_TYPE_TIMESTAMP", "SQL_VARCHAR", "START", "STATE", "STATEMENT", "STATIC", "STRUCTURE", "STYLE", "SUBSTRING", "SUBTYPE", "SYSDATE", "SYSTEM", "SYSTEM_USER", "SYSTIMESTAMP", "TABLE", "TEMPORARY", "TEXT", "THEN", "TIME", "TIMESTAMP", "TIMEZONE_HOUR", "TIMEZONE_MINUTE", "TINYINT", "TO", "TRAILING", "TRANSACTION", "TRANSFORM", "TRANSFORMS", "TRANSLATION", "TREAT", "TRIGGER", "TRIM", "TRUE", "TRUNCATE", "UNDER", "UNION", "UNIQUE", "UNKNOWN", "UNLINK", "UNNEST", "UNTIL", "UPDATE", "USAGE", "USER", "USING", "VALUE", "VALUES", "VARCHAR", "VARCHAR2", "VARRAY", "VERIFY", "VIEW", "WHEN", "WHENEVER", "WHERE", "WHILE", "WINDOW", "WITH", "WITHIN", "WITHOUT", "WORK", "YEAR", "YES", "ZONE", ] UNRESERVED_KEYWORDS = [ "ABS", "ACCESS", "ACOS", "ADAPTER", "ADD_DAYS", "ADD_HOURS", "ADD_MINUTES", "ADD_MONTHS", "ADD_SECONDS", "ADD_WEEKS", "ADD_YEARS", "ADMIN", "ALIGN", "ALWAYS", "ANALYZE", "ANSI", "APPROXIMATE_COUNT_DISTINCT", "ASCII", "ASIN", "ASSIGNMENT", "ASYMMETRIC", "ATAN", "ATAN2", "ATOMIC", "ATTEMPTS", "AUDIT", "AUTHENTICATED", "AUTO", "AVG", "BACKUP", "BERNOULLI", "BIT_AND", "BIT_CHECK", "BIT_LENGTH", "BIT_LROTATE", "BIT_LSHIFT", "BIT_NOT", "BIT_OR", "BIT_RROTATE", "BIT_RSHIFT", "BIT_SET", "BIT_TO_NUM", "BIT_XOR", "BREADTH", "CEIL", "CEILING", "CHANGE", "CHARACTERS", "CHARACTER_LENGTH", "CHR", "CLEAR", "COBOL", "COLOGNE_PHONETIC", "COMMENT", "COMMENTS", "COMMITTED", "CONCAT", "CONNECT", "CONVERT_TZ", "CORR", "COS", "COSH", "COT", "COUNT", "COVAR_POP", "COVAR_SAMP", "CREATED", "CURDATE", "DATABASE", "DATE_TRUNC", "DAYS_BETWEEN", "DEBUG", "DECODE", "DEFAULTS", "DEFAULT_CONSUMER_GROUP", "DEGREES", "DELIMIT", "DELIMITER", "DENSE_RANK", "DEPTH", "DIAGNOSTICS", "DICTIONARY", "DISTRIBUTE", "DISTRIBUTION", "DIV", "DOWN", "DUMP", "EDIT_DISTANCE", "EMPTY", "ENCODING", "ERROR", "ESTIMATE", "EVALUATE", "EVERY", "EXA", "EXCLUDE", "EXCLUDING", "EXP", "EXPERIMENTAL", "EXPIRE", "EXPLAIN", "EXPRESSION", "FAILED", "FILES", "FIRST_VALUE", "FLOOR", "FLUSH", "FOREIGN", "FORTRAN", "FROM_POSIX_TIME", "GRAPH", "GREATEST", "GROUPING_ID", "HANDLER", "HAS", "HASH", "HASHTYPE_MD5", "HASHTYPE_SHA", "HASHTYPE_SHA1", "HASHTYPE_SHA256", "HASHTYPE_SHA512", "HASHTYPE_TIGER", "HASH_MD5", "HASH_SHA", "HASH_SHA1", "HASH_SHA256", "HASH_SHA512", "HASH_TIGER", "HIERARCHY", "HOURS_BETWEEN", "IDENTIFIED", "IDLE_TIMEOUT", "IGNORE", "IMPERSONATION", "INCLUDING", "INITCAP", "INITIALLY", "INSTR", "INVALID", "IPROC", "ISOLATION", "IS_BOOLEAN", "IS_DATE", "IS_DSINTERVAL", "IS_NUMBER", "IS_TIMESTAMP", "IS_YMINTERVAL", "JAVA", "JAVASCRIPT", "JSON", "JSON_EXTRACT", "JSON_VALUE", "KEEP", "KERBEROS", "KEY", "KEYS", "KILL", "LAG", "LANGUAGE", "LAST_VALUE", "LCASE", "LEAD", "LEAST", "LENGTH", "LINK", "LN", "LOCATE", "LOCK", "LOG10", "LOG2", "LOGIN", "LOGS", "LONG", "LOWER", "LPAD", "LTRIM", "LUA", "MANAGE", "MAX", "MAXIMAL", "MEDIAN", "MESSAGE", "MID", "MIN", "MINUTES_BETWEEN", "MIN_SCALE", "MONTHS_BETWEEN", "MUL", "MULTIPLE", "MUMPS", "NEVER", "NICE", "NORMALIZED", "NOTICE", "NOW", "NPROC", "NULLIFZERO", "NULLS", "NUMTODSINTERVAL", "NUMTOYMINTERVAL", "NVL", "NVL2", "OBJECTS", "OCTETS", "OCTET_LENGTH", "OFFSET", "OPENID", "OPTIMIZE", "OPTIMIZER", "ORA", "OVERFLOW", "OWNER", "PADDING", "PASCAL", "PASSWORD", "PASSWORD_EXPIRY_POLICY", "PASSWORD_SECURITY_POLICY", "PERCENTILE_CONT", "PERCENTILE_DISC", "PI", "PLI", "POSIX_TIME", "POWER", "PRECISION", "PRELOAD", "PRIMARY", "PRINCIPAL", "PRIVILEGE", "PYTHON", "QUERY", "QUERY_CACHE", "QUERY_TIMEOUT", "QUIET", "R", "RADIANS", "RAND", "RANK", "RATIO_TO_REPORT", "RAW_SIZE_LIMIT", "RECOMPRESS", "RECORD", "REGEXP_INSTR", "REGEXP_REPLACE", "REGEXP_SUBSTR", "REGR_AVGX", "REGR_AVGY", "REGR_COUNT", "REGR_INTERCEPT", "REGR_R2", "REGR_SLOPE", "REGR_SXX", "REGR_SXY", "REGR_SYY", "REJECT", "REORGANIZE", "REPEATABLE", "RESET", "RESPECT", "RETURNING", "REVERSE", "ROLE", "ROLES", "ROUND", "ROWID", "ROW_NUMBER", "RPAD", "RTRIM", "SCALAR", "SCHEMAS", "SCHEME", "SCRIPT_LANGUAGES", "SCRIPT_OUTPUT_ADDRESS", "SECONDS_BETWEEN", "SECURE", "SERIALIZABLE", "SESSION_PARAMETER", "SESSION_TEMP_DB_RAM_LIMIT", "SHUT", "SIGN", "SIMPLE", "SIN", "SINH", "SIZE", "SKIP", "SNAPSHOT_MODE", "SOUNDEX", "SQRT", "STATISTICS", "STDDEV", "STDDEV_POP", "STDDEV_SAMP", "STEP", "ST_AREA", "ST_BOUNDARY", "ST_BUFFER", "ST_CENTROID", "ST_CONTAINS", "ST_CONVEXHULL", "ST_CROSSES", "ST_DIFFERENCE", "ST_DIMENSION", "ST_DISJOINT", "ST_DISTANCE", "ST_ENDPOINT", "ST_ENVELOPE", "ST_EQUALS", "ST_EXTERIORRING", "ST_FORCE2D", "ST_GEOMETRYN", "ST_GEOMETRYTYPE", "ST_INTERIORRINGN", "ST_INTERSECTION", "ST_INTERSECTS", "ST_ISCLOSED", "ST_ISEMPTY", "ST_ISRING", "ST_ISSIMPLE", "ST_LENGTH", "ST_MAX_DECIMAL_DIGITS", "ST_NUMGEOMETRIES", "ST_NUMINTERIORRINGS", "ST_NUMPOINTS", "ST_OVERLAPS", "ST_POINTN", "ST_SETSRID", "ST_STARTPOINT", "ST_SYMDIFFERENCE", "ST_TOUCHES", "ST_TRANSFORM", "ST_UNION", "ST_WITHIN", "ST_X", "ST_Y", "SUBSTR", "SUM", "SYMMETRIC", "SYS_CONNECT_BY_PATH", "SYS_GUID", "TABLES", "TABLESAMPLE", "TAN", "TANH", "TASKS", "TEMP_DB_RAM_LIMIT", "TIES", "TIMESTAMP_ARITHMETIC_BEHAVIOR", "TIME_ZONE", "TIME_ZONE_BEHAVIOR", "TO_CHAR", "TO_DATE", "TO_DSINTERVAL", "TO_NUMBER", "TO_TIMESTAMP", "TO_YMINTERVAL", "TRACE", "TRANSLATE", "TRUNC", "TYPE", "TYPEOF", "UCASE", "UNBOUNDED", "UNCOMMITTED", "UNDO", "UNICODE", "UNICODECHR", "UNLIMITED", "UPPER", "USE", "USER_TEMP_DB_RAM_LIMIT", "UTF8", "VALUE2PROC", "VARIANCE", "VARYING", "VAR_POP", "VAR_SAMP", "VIRTUAL", "WEEK", "WRITE", "YEARS_BETWEEN", "ZEROIFNULL", # Additional unreserved keywords not defined in EXA_SQL_KEYWORDS "CONSUMER", "CONSUMER_GROUP", "CPU_WEIGHT", "DRIVER", "GROUP_TEMP_DB_RAM_LIMIT", "JDBC", "PRECEDENCE", "ROWCOUNT", "SUBJECT", ] BARE_FUNCTIONS = [ "CONNECT_BY_ISCYCLE", "CONNECT_BY_ISLEAF", "CONNECT_BY_ROOT", "CURDATE", "CURRENT_DATE", "CURRENT_SCHEMA", "CURRENT_SESSION", "CURRENT_STATEMENT", "CURRENT_TIMESTAMP", "CURRENT_USER", "DBTIMEZONE", "LEVEL", "LOCALTIMESTAMP", "NOW", "ROWID", "ROWNUM", "SESSIONTIMEZONE", "SYSDATE", "SYSTIMESTAMP", "USER", ] SYSTEM_PARAMETERS = [ "CONSTRAINT_STATE_DEFAULT", "DEFAULT_CONSUMER_GROUP", "DEFAULT_LIKE_ESCAPE_CHARACTER", "HASHTYPE_FORMAT", "IDLE_TIMEOUT", "NLS_DATE_FORMAT", "NLS_DATE_LANGUAGE", "NLS_FIRST_DAY_OF_WEEK", "NLS_NUMERIC_CHARACTERS", "NLS_TIMESTAMP_FORMAT", "PASSWORD_SECURITY_POLICY", "PASSWORD_EXPIRY_POLICY", "PROFILE", "QUERY_CACHE", "QUERY_TIMEOUT", "SCRIPT_OUTPUT_ADDRESS", "SCRIPT_LANGUAGES", "SESSION_TEMP_DB_RAM_LIMIT", "SNAPSHOT_MODE", "SQL_PREPROCESSOR_SCRIPT", "ST_MAX_DECIMAL_DIGITS", "TEMP_DB_RAM_LIMIT", "TIME_ZONE", "TIME_ZONE_BEHAVIOR", "TIMESTAMP_ARITHMETIC_BEHAVIOR", "USER_TEMP_DB_RAM_LIMIT", ] SESSION_PARAMETERS = [ "CONSTRAINT_STATE_DEFAULT", "DEFAULT_LIKE_ESCAPE_CHARACTER", "HASHTYPE_FORMAT", "IDLE_TIMEOUT", "NICE", "NLS_DATE_LANGUAGE", "NLS_DATE_FORMAT", "NLS_FIRST_DAY_OF_WEEK", "NLS_NUMERIC_CHARACTERS", "NLS_TIMESTAMP_FORMAT", "PROFILE", "QUERY_CACHE", "QUERY_TIMEOUT", "SCRIPT_LANGUAGES", "SCRIPT_OUTPUT_ADDRESS", "SESSION_TEMP_DB_RAM_LIMIT", "SNAPSHOT_MODE", "SQL_PREPROCESSOR_SCRIPT", "ST_MAX_DECIMAL_DIGITS", "TIME_ZONE", "TIME_ZONE_BEHAVIOR", "TIMESTAMP_ARITHMETIC_BEHAVIOR", ] sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_flink.py000066400000000000000000000365241503426445100227230ustar00rootroot00000000000000"""The FlinkSQL dialect. Inherits from ANSI SQL. FlinkSQL is based on ANSI SQL standard but includes additional features for stream processing and table operations. Based on: https://nightlies.apache.org/flink/flink-docs-release-1.18/docs/dev/table/sql/ """ from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( BaseSegment, Bracketed, CodeSegment, CommentSegment, Delimited, IdentifierSegment, OneOf, OptionallyBracketed, Ref, RegexLexer, Sequence, StringParser, SymbolSegment, TypedParser, ) from sqlfluff.dialects import dialect_ansi as ansi from sqlfluff.dialects.dialect_flink_keywords import ( RESERVED_KEYWORDS, UNRESERVED_KEYWORDS, ) ansi_dialect = load_raw_dialect("ansi") flink_dialect = ansi_dialect.copy_as( "flink", formatted_name="Apache Flink SQL", docstring="""**Default Casing**: FlinkSQL is case insensitive with both quoted and unquoted identifiers. **Quotes**: String Literals: ``'`` (single quotes), Identifiers: |back_quotes|. The dialect for Apache `Flink SQL`_. This dialect includes FlinkSQL-specific syntax for stream processing, table operations, and connector configurations. FlinkSQL supports advanced features like: - ROW data types for complex nested structures - Table connectors (Kafka, BigQuery, etc.) - Watermark definitions for event time processing - Computed columns and metadata columns - Temporal table functions .. _`Flink SQL`: https://nightlies.apache.org/flink/flink-docs-release-1.18/docs/dev/table/sql/ # noqa: E501 """, ) # Update keywords - extend ANSI keywords instead of replacing them flink_dialect.sets("reserved_keywords").update(RESERVED_KEYWORDS) flink_dialect.sets("unreserved_keywords").update(UNRESERVED_KEYWORDS) # Add FlinkSQL-specific lexer patterns flink_dialect.patch_lexer_matchers( [ # FlinkSQL uses -- for single-line comments RegexLexer( "inline_comment", r"(--)[^\n]*", CommentSegment, segment_kwargs={"trim_start": "--"}, ), # Support for backtick-quoted identifiers RegexLexer( "back_quote", r"`([^`]|``)*`", CodeSegment, segment_kwargs={ "quoted_value": (r"`((?:[^`]|``)*)`", 1), "escape_replacements": [(r"``", "`")], }, ), # Support for numeric literals with precision/scale RegexLexer( "numeric_literal", ( r"(?>(?>\d+\.\d+|\d+\.|\.\d+)([eE][+-]?\d+)?([dDfF]|BD|bd)?" r"|\d+[eE][+-]?\d+([dDfF]|BD|bd)?" r"|\d+([dDfFlLsSyY]|BD|bd)?)" r"((?<=\.)|(?=\b))" ), CodeSegment, ), # Support for string literals with single quotes RegexLexer( "single_quote", r"'([^'\\]|\\.)*'", CodeSegment, segment_kwargs={ "quoted_value": (r"'((?:[^'\\]|\\.)*)'", 1), "escape_replacements": [(r"''", "'"), (r"\\'", "'")], }, ), # Support for == equality operator RegexLexer("equals", r"==|=", CodeSegment), ] ) # Add FlinkSQL-specific datetime units flink_dialect.sets("datetime_units").clear() flink_dialect.sets("datetime_units").update( [ "YEAR", "MONTH", "DAY", "HOUR", "MINUTE", "SECOND", "MICROSECOND", "MILLISECOND", "NANOSECOND", "EPOCH", "DECADE", "CENTURY", "MILLENNIUM", "QUARTER", "WEEK", "DOW", "ISODOW", "DOY", "ISOYEAR", ] ) # Add FlinkSQL-specific bare functions flink_dialect.sets("bare_functions").clear() flink_dialect.sets("bare_functions").update( [ "CURRENT_DATE", "CURRENT_TIME", "CURRENT_TIMESTAMP", "CURRENT_USER", "LOCALTIME", "LOCALTIMESTAMP", "NOW", "CURRENT_WATERMARK", "PROCTIME", ] ) # Add angle brackets for generic types flink_dialect.bracket_sets("angle_bracket_pairs").update( [ ("angle", "StartAngleBracketSegment", "EndAngleBracketSegment", False), ] ) # Add FlinkSQL-specific segments flink_dialect.add( # Angle bracket segments StartAngleBracketSegment=StringParser( "<", SymbolSegment, type="start_angle_bracket", ), EndAngleBracketSegment=StringParser( ">", SymbolSegment, type="end_angle_bracket", ), # Backtick identifier support BackQuotedIdentifierSegment=TypedParser( "back_quote", IdentifierSegment, type="quoted_identifier", trim_chars=("`",), casefold=str.upper, ), # Double equals operator for connector options DoubleEqualsSegment=StringParser("==", SymbolSegment, type="double_equals"), # Connector options for CREATE TABLE CreateTableConnectorOptionsSegment=Sequence( "WITH", Bracketed( Delimited( Sequence( OneOf( Ref("QuotedLiteralSegment"), # 'key' format Ref("NakedIdentifierSegment"), # key format ), OneOf( Ref("EqualsSegment"), # single = Ref("DoubleEqualsSegment"), # double == format ), OneOf( Ref("QuotedLiteralSegment"), # 'value' format Ref("NakedIdentifierSegment"), # value format ), ), ), ), ), # Watermark definition WatermarkDefinitionSegment=Sequence( "WATERMARK", "FOR", Ref("ColumnReferenceSegment"), "AS", Ref("ExpressionSegment"), ), # Computed column definition ComputedColumnDefinitionSegment=Sequence( Ref("NakedIdentifierSegment"), # column name "AS", Ref("ExpressionSegment"), # computed expression Sequence("COMMENT", Ref("QuotedLiteralSegment"), optional=True), ), # Metadata column definition MetadataColumnDefinitionSegment=Sequence( Ref("NakedIdentifierSegment"), # column name Ref("DatatypeSegment"), # column type "METADATA", Sequence("FROM", Ref("QuotedLiteralSegment"), optional=True), Sequence("VIRTUAL", optional=True), Sequence("COMMENT", Ref("QuotedLiteralSegment"), optional=True), ), # Table constraint FlinkTableConstraintSegment=Sequence( Sequence("CONSTRAINT", Ref("NakedIdentifierSegment"), optional=True), "PRIMARY", "KEY", Bracketed( Delimited( Ref("ColumnReferenceSegment"), ), ), "NOT", "ENFORCED", ), # Partition definition PartitionDefinitionSegment=Sequence( "PARTITIONED", "BY", Bracketed( Delimited( Ref("ColumnReferenceSegment"), ), ), ), # Distribution definition DistributionDefinitionSegment=OneOf( # DISTRIBUTED BY [HASH|RANGE] (columns) [INTO n BUCKETS] Sequence( "DISTRIBUTED", "BY", OneOf("HASH", "RANGE", optional=True), Bracketed( Delimited( Ref("ColumnReferenceSegment"), ), ), Sequence("INTO", Ref("NumericLiteralSegment"), "BUCKETS", optional=True), ), # DISTRIBUTED INTO n BUCKETS Sequence( "DISTRIBUTED", "INTO", Ref("NumericLiteralSegment"), "BUCKETS", ), ), # LIKE options LikeOptionsSegment=Delimited( OneOf( # INCLUDING/EXCLUDING with ALL/CONSTRAINTS/DISTRIBUTION/PARTITIONS Sequence( OneOf("INCLUDING", "EXCLUDING"), OneOf("ALL", "CONSTRAINTS", "DISTRIBUTION", "PARTITIONS"), ), # INCLUDING/EXCLUDING/OVERWRITING with GENERATED/OPTIONS/WATERMARKS Sequence( OneOf("INCLUDING", "EXCLUDING", "OVERWRITING"), OneOf("GENERATED", "OPTIONS", "WATERMARKS"), ), ), ), # LIKE clause LikeClauseSegment=Sequence( "LIKE", Ref("TableReferenceSegment"), Bracketed( Ref("LikeOptionsSegment"), optional=True, ), ), ) flink_dialect.replace( SingleIdentifierGrammar=OneOf( Ref("NakedIdentifierSegment"), Ref("QuotedIdentifierSegment"), Ref("BackQuotedIdentifierSegment"), ), ) class CreateTableStatementSegment(ansi.CreateTableStatementSegment): """A `CREATE TABLE` statement for FlinkSQL.""" match_grammar = Sequence( "CREATE", Sequence("OR", "REPLACE", optional=True), "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), OneOf( # Columns definition with FlinkSQL-specific elements Sequence( Bracketed( Delimited( OneOf( # Physical column definition Ref("ColumnDefinitionSegment"), # Metadata column definition Ref("MetadataColumnDefinitionSegment"), # Computed column definition Ref("ComputedColumnDefinitionSegment"), # Watermark definition Ref("WatermarkDefinitionSegment"), # Table constraint Ref("FlinkTableConstraintSegment"), ), ), ), # Table comment Sequence("COMMENT", Ref("QuotedLiteralSegment"), optional=True), # Partition definition Ref("PartitionDefinitionSegment", optional=True), # Distribution definition Ref("DistributionDefinitionSegment", optional=True), # Connector options Ref("CreateTableConnectorOptionsSegment", optional=True), # LIKE clause Ref("LikeClauseSegment", optional=True), ), # Create AS syntax: Sequence( "AS", OptionallyBracketed(Ref("SelectableGrammar")), ), # Create like syntax Ref("LikeClauseSegment"), ), ) class CreateCatalogStatementSegment(BaseSegment): """A `CREATE CATALOG` statement.""" type = "create_catalog_statement" match_grammar = Sequence( "CREATE", "CATALOG", Ref("NakedIdentifierSegment"), Ref("CreateTableConnectorOptionsSegment"), ) class CreateDatabaseStatementSegment(BaseSegment): """A `CREATE DATABASE` statement for FlinkSQL.""" type = "create_database_statement" match_grammar = Sequence( "CREATE", "DATABASE", Ref("IfNotExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), Sequence("COMMENT", Ref("QuotedLiteralSegment"), optional=True), Ref("CreateTableConnectorOptionsSegment", optional=True), ) class DescribeStatementSegment(BaseSegment): """A `DESCRIBE` statement for FlinkSQL.""" type = "describe_statement" match_grammar = Sequence( "DESCRIBE", Ref("TableReferenceSegment"), ) class ExplainStatementSegment(ansi.ExplainStatementSegment): """An `EXPLAIN` statement for FlinkSQL.""" type = "explain_statement" match_grammar = Sequence( "EXPLAIN", Sequence("PLAN", "FOR", optional=True), Ref("SelectableGrammar"), ) class ShowStatementsSegment(BaseSegment): """A `SHOW` statement for FlinkSQL.""" type = "show_statement" match_grammar = Sequence( "SHOW", OneOf( "CATALOGS", "DATABASES", "TABLES", "VIEWS", "FUNCTIONS", "MODULES", "JARS", "JOBS", ), ) class StatementSegment(ansi.StatementSegment): """A generic segment, to any of its child subsegments.""" match_grammar = ansi.StatementSegment.match_grammar.copy( insert=[ # FlinkSQL-specific statements Ref("CreateCatalogStatementSegment"), Ref("CreateDatabaseStatementSegment"), Ref("DescribeStatementSegment"), Ref("ShowStatementsSegment"), ], ) class UseStatementSegment(ansi.UseStatementSegment): """A FlinkSQL USE statement.""" type = "use_statement" match_grammar = OneOf( # USE CATALOG catalog_name (needs to be first to prevent CATALOG being # parsed as database name) Sequence( "USE", "CATALOG", Ref("ObjectReferenceSegment"), ), # USE [catalog_name.]database_name Sequence( "USE", Ref("ObjectReferenceSegment"), ), ) class RowDataTypeSegment(BaseSegment): """A ROW data type for FlinkSQL.""" type = "row_data_type" match_grammar = Sequence( "ROW", OneOf( # ROW<...> syntax Bracketed( Delimited( Sequence( Ref("NakedIdentifierSegment"), # field name Ref("DatatypeSegment"), # field type Sequence("COMMENT", Ref("QuotedLiteralSegment"), optional=True), ), ), bracket_type="angle", bracket_pairs_set="angle_bracket_pairs", ), # ROW(...) syntax Bracketed( Delimited( Sequence( Ref("NakedIdentifierSegment"), # field name Ref("DatatypeSegment"), # field type Sequence("COMMENT", Ref("QuotedLiteralSegment"), optional=True), ), ), bracket_type="round", ), ), ) class DatatypeSegment(ansi.DatatypeSegment): """Enhanced data type segment for FlinkSQL.""" type = "data_type" match_grammar = OneOf( # FlinkSQL ROW data type Ref("RowDataTypeSegment"), # Array data types Sequence( "ARRAY", Bracketed( Ref("DatatypeSegment"), bracket_type="angle", bracket_pairs_set="angle_bracket_pairs", ), ), # Map data types Sequence( "MAP", Bracketed( Sequence( Ref("DatatypeSegment"), # key type Ref("CommaSegment"), Ref("DatatypeSegment"), # value type ), bracket_type="angle", bracket_pairs_set="angle_bracket_pairs", ), ), # Multiset data types Sequence( "MULTISET", Bracketed( Ref("DatatypeSegment"), bracket_type="angle", bracket_pairs_set="angle_bracket_pairs", ), ), # Include standard ANSI data types by inheriting parent match_grammar ansi.DatatypeSegment.match_grammar, ) sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_flink_keywords.py000066400000000000000000000066251503426445100246510ustar00rootroot00000000000000"""A List of FlinkSQL keywords. Based on Apache Flink SQL reference: https://nightlies.apache.org/flink/flink-docs-release-1.18/docs/dev/table/sql/queries/ """ # FlinkSQL Reserved Keywords RESERVED_KEYWORDS = [ "ALL", "AND", "ANY", "AS", "AUTHORIZATION", "BETWEEN", "BIGINT", "BINARY", "BOOLEAN", "BOTH", "BY", "CASE", "CAST", "CHAR", "CHARACTER", "CHECK", "COLLATE", "COLUMN", "CONSTRAINT", "CREATE", "CROSS", "CURRENT_DATE", "CURRENT_TIME", "CURRENT_TIMESTAMP", "CURRENT_USER", "CURSOR", "DATE", "DAY", "DECIMAL", "DECLARE", "DELETE", "DESC", "DISTINCT", "DOUBLE", "DROP", "ELSE", "END", "ESCAPE", "EXCEPT", "EXISTS", "EXTRACT", "FALSE", "FETCH", "FILTER", "FLOAT", "FOR", "FOREIGN", "FROM", "FULL", "FUNCTION", "GRANT", "GROUP", "HAVING", "HOUR", "IF", "IN", "INNER", "INSERT", "INT", "INTEGER", "INTERSECT", "INTERVAL", "INTO", "IS", "JOIN", "LEADING", "LEFT", "LIKE", "LIMIT", "LOCAL", "MINUTE", "MONTH", "NATURAL", "NOT", "NULL", "NUMERIC", "OF", "ON", "ONLY", "OR", "ORDER", "OUTER", "OVERLAPS", "OVERLAY", "PARTITION", "POSITION", "PRIMARY", "REAL", "REFERENCES", "RIGHT", "ROW", "SECOND", "SELECT", "SESSION_USER", "SET", "SMALLINT", "SOME", "SUBSTRING", "TABLE", "THEN", "TIME", "TIMESTAMP", "TINYINT", "TO", "TRAILING", "TRUE", "UNION", "UNIQUE", "UNKNOWN", "UPDATE", "USER", "USING", "VALUES", "VARCHAR", "WHEN", "WHERE", "WITH", "YEAR", ] # FlinkSQL Unreserved Keywords UNRESERVED_KEYWORDS = [ "ADD", "AFTER", "ANALYZE", "APPEND", "ARRAY", "ASC", "AVRO_CONFLUENT", "AVRO", "BEGIN", "BIGQUERY", "BYTES", "CALL", "CANAL", "CANCEL", "CASSANDRA", "CATALOG", "CATALOGS", "CHANGELOG", "CHECKPOINT", "COMMENT", "COMPACTION", "COMPUTED", "CONNECTOR", "CSV", "CURRENT", "DATABASE", "DATABASES", "DEBEZIUM", "DESCRIBE", "DISTRIBUTED", "DISTRIBUTION", "ELASTIC", "ELASTICSEARCH", "ENFORCED", "EXCLUDING", "EXIT", "EXPLAIN", "EXTENDED", "FILESYSTEM", "FIRST", "FORMAT", "FUNCTIONS", "GENERATED", "HASH", "HBASE", "HELP", "HISTORY", "INCLUDING", "JARS", "JDBC", "JOBS", "JSON", "KAFKA", "KINESIS", "LAST", "LOAD", "MAP", "MAXWELL", "MERGE", "METADATA", "MODULES", "MULTISET", "OGG", "OPTION", "OPTIONS", "ORC", "OVERWRITING", "PARQUET", "PARTITIONED", "PARTITIONS", "PHYSICAL", "PLAN", "PROCEDURE", "PROCEDURES", "PROTOBUF_CONFLUENT", "PROTOBUF", "PULSAR", "QUIT", "RANGE", "RAW", "REDIS", "REPLACE", "RESET", "RESTART", "RESUME", "RETRACT", "SAVEPOINT", "SET", "SHOW", "SINK", "SOURCE", "START", "STOP", "SUSPEND", "SYSTEM", "TABLES", "TEMPORARY", "TIMESTAMP_LTZ", "TRUNCATE", "UNLOAD", "UPSERT", "USE", "VALUE", "VIRTUAL", "WATERMARK", "WATERMARKS", "YEAR", "ZONE", ] sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_greenplum.py000066400000000000000000000461031503426445100236100ustar00rootroot00000000000000"""The Greenplum dialect. Greenplum (http://www.greenplum.org/) is a Massively Parallel Postgres, so we base this dialect on Postgres. """ from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( AnyNumberOf, AnySetOf, Anything, BaseSegment, Bracketed, Dedent, Delimited, Indent, OneOf, OptionallyBracketed, ParseMode, Ref, Sequence, ) from sqlfluff.dialects import dialect_ansi as ansi from sqlfluff.dialects import dialect_postgres as postgres from sqlfluff.dialects.dialect_greenplum_keywords import greenplum_keywords from sqlfluff.dialects.dialect_postgres_keywords import get_keywords postgres_dialect = load_raw_dialect("postgres") greenplum_dialect = postgres_dialect.copy_as( "greenplum", formatted_name="Greenplum", docstring="The dialect for `Greenplum `_.", ) greenplum_dialect.sets("reserved_keywords").update( get_keywords(greenplum_keywords, "reserved") ) greenplum_dialect.sets("unreserved_keywords").update( get_keywords(greenplum_keywords, "non-reserved") ) class StatementSegment(postgres.StatementSegment): """A generic segment, to any of its child subsegments.""" match_grammar = postgres.StatementSegment.match_grammar.copy( insert=[ Ref("FetchClauseSegment"), Ref("DeclareStatement"), Ref("CloseStatementSegment"), Ref("AnalizeSegment"), ], ) class SelectClauseSegment(postgres.SelectClauseSegment): """Overrides Postgres to allow DISTRIBUTED as a terminator.""" match_grammar = Sequence( "SELECT", Ref("SelectClauseModifierSegment", optional=True), Indent, Delimited( Ref("SelectClauseElementSegment"), # In Postgres you don't need an element so make it optional optional=True, allow_trailing=True, ), Dedent, terminators=[ "INTO", "FROM", "WHERE", Sequence("ORDER", "BY"), "LIMIT", "OVERLAPS", Ref("SetOperatorSegment"), Sequence("WITH", Ref.keyword("NO", optional=True), "DATA"), Ref("WithCheckOptionSegment"), Ref("DistributedBySegment"), ], parse_mode=ParseMode.GREEDY_ONCE_STARTED, ) class DistributedBySegment(BaseSegment): """A DISTRIBUTED BY clause.""" type = "distributed_by" match_grammar = Sequence( "DISTRIBUTED", OneOf( "RANDOMLY", "REPLICATED", Sequence("BY", Bracketed(Delimited(Ref("ColumnReferenceSegment")))), ), ) class CreateTableStatementSegment(postgres.CreateTableStatementSegment): """A `CREATE TABLE` statement. As specified in https://docs.vmware.com/en/VMware-Tanzu-Greenplum/6/greenplum-database/GUID-ref_guide-sql_commands-CREATE_TABLE.html This is overridden from Postgres to add the `DISTRIBUTED` clause. """ match_grammar = Sequence( "CREATE", OneOf( Sequence( OneOf("GLOBAL", "LOCAL", optional=True), Ref("TemporaryGrammar", optional=True), ), "UNLOGGED", optional=True, ), "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), OneOf( # Columns and comment syntax: Sequence( Bracketed( Delimited( OneOf( Sequence( Ref("ColumnReferenceSegment"), Ref("DatatypeSegment"), AnyNumberOf( # A single COLLATE segment can come before or after # constraint segments OneOf( Ref("ColumnConstraintSegment"), Sequence( "COLLATE", Ref("CollationReferenceSegment"), ), ), ), ), Ref("TableConstraintSegment"), Sequence( "LIKE", Ref("TableReferenceSegment"), AnyNumberOf(Ref("LikeOptionSegment"), optional=True), ), ), ) ), Sequence( "INHERITS", Bracketed(Delimited(Ref("TableReferenceSegment"))), optional=True, ), ), # Create OF syntax: Sequence( "OF", Ref("ParameterNameSegment"), Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), Sequence("WITH", "OPTIONS", optional=True), AnyNumberOf(Ref("ColumnConstraintSegment")), ), Ref("TableConstraintSegment"), ), optional=True, ), ), # Create PARTITION OF syntax Sequence( "PARTITION", "OF", Ref("TableReferenceSegment"), Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), Sequence("WITH", "OPTIONS", optional=True), AnyNumberOf(Ref("ColumnConstraintSegment")), ), Ref("TableConstraintSegment"), ), optional=True, ), OneOf( Sequence("FOR", "VALUES", Ref("PartitionBoundSpecSegment")), "DEFAULT", ), ), ), AnyNumberOf( Sequence( "PARTITION", "BY", OneOf("RANGE", "LIST"), Bracketed( Ref("ColumnReferenceSegment"), ), AnyNumberOf( Sequence( "SUBPARTITION", "BY", OneOf("RANGE", "LIST"), Bracketed( Ref("ColumnReferenceSegment"), ), Sequence( "SUBPARTITION", "TEMPLATE", Bracketed( # TODO: Is this too permissive? Anything(), ), optional=True, ), ), ), Bracketed( # TODO: Is this too permissive? Anything(), ), ), Sequence("USING", Ref("ParameterNameSegment")), Sequence( "WITH", Bracketed( Delimited( Sequence( Ref("ParameterNameSegment"), Sequence( Ref("EqualsSegment"), OneOf( Ref("LiteralGrammar"), Ref("NakedIdentifierSegment"), Ref("QuotedIdentifierSegment"), ), optional=True, ), ), ) ), ), Sequence( "ON", "COMMIT", OneOf(Sequence("PRESERVE", "ROWS"), Sequence("DELETE", "ROWS"), "DROP"), ), Sequence("TABLESPACE", Ref("TablespaceReferenceSegment")), Ref("DistributedBySegment"), ), ) class CreateTableAsStatementSegment(postgres.CreateTableAsStatementSegment): """A `CREATE TABLE AS` statement. As specified in https://docs.vmware.com/en/VMware-Tanzu-Greenplum/6/greenplum-database/GUID-ref_guide-sql_commands-CREATE_TABLE_AS.html This is overridden from Postgres to add the `DISTRIBUTED` clause. """ match_grammar = Sequence( "CREATE", OneOf( Sequence( OneOf("GLOBAL", "LOCAL", optional=True), Ref("TemporaryGrammar"), ), "UNLOGGED", optional=True, ), "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), AnyNumberOf( Bracketed( Delimited(Ref("ColumnReferenceSegment")), optional=True, ), Sequence("USING", Ref("ParameterNameSegment"), optional=True), OneOf( Sequence( "WITH", Bracketed( Delimited( Sequence( Ref("ParameterNameSegment"), Sequence( Ref("EqualsSegment"), OneOf( Ref("LiteralGrammar"), Ref("NakedIdentifierSegment"), Ref("QuotedIdentifierSegment"), ), optional=True, ), ) ) ), ), Sequence("WITHOUT", "OIDS"), optional=True, ), Sequence( "ON", "COMMIT", OneOf(Sequence("PRESERVE", "ROWS"), Sequence("DELETE", "ROWS"), "DROP"), optional=True, ), Sequence("TABLESPACE", Ref("TablespaceReferenceSegment"), optional=True), ), "AS", OneOf( OptionallyBracketed(Ref("SelectableGrammar")), OptionallyBracketed(Sequence("TABLE", Ref("TableReferenceSegment"))), Ref("ValuesClauseSegment"), OptionallyBracketed(Sequence("EXECUTE", Ref("FunctionSegment"))), ), Ref("WithDataClauseSegment", optional=True), Ref("DistributedBySegment", optional=True), ) class UnorderedSelectStatementSegment(postgres.UnorderedSelectStatementSegment): """Overrides Postgres Statement, adding DISTRIBUTED BY as a terminator.""" match_grammar = postgres.UnorderedSelectStatementSegment.match_grammar.copy( terminators=[ Ref("DistributedBySegment"), ], ) class SelectStatementSegment(postgres.SelectStatementSegment): """Overrides Postgres Statement, adding DISTRIBUTED BY as a terminator.""" match_grammar = postgres.SelectStatementSegment.match_grammar.copy( terminators=[ Ref("DistributedBySegment"), ], ) class AnalizeSegment(BaseSegment): """ANALYZE statement. https://docs.vmware.com/en/VMware-Greenplum/6/greenplum-database/ref_guide-sql_commands-ANALYZE.html """ type = "analize_statement" match_grammar = Sequence( OneOf("ANALYZE", "ANALYSE"), Ref.keyword("VERBOSE", optional=True), Ref.keyword("ROOTPARTITION", optional=True), OneOf( Sequence( Ref("TableReferenceSegment"), Bracketed( Delimited( Ref("ColumnReferenceSegment"), allow_trailing=True, ), optional=True, ), ), "ALL", optional=True, ), ) class FetchClauseSegment(ansi.FetchClauseSegment): """FETCH statement. https://docs.vmware.com/en/VMware-Greenplum/6/greenplum-database/ref_guide-sql_commands-FETCH.html """ type = "fetch_clause" match_grammar = Sequence( "FETCH", Sequence( OneOf( "FIRST", "NEXT", Sequence("ABSOLUTE", Ref("NumericLiteralSegment")), Sequence("RELATIVE", Ref("NumericLiteralSegment")), Ref("NumericLiteralSegment"), "ALL", "FORWARD", Sequence("FORWARD", Ref("NumericLiteralSegment")), Sequence("FORWARD", "ALL"), ), OneOf("FROM", "IN"), optional=True, ), Ref("TableReferenceSegment"), ) class DeclareStatement(BaseSegment): """DECLARE statement. https://docs.vmware.com/en/VMware-Greenplum/6/greenplum-database/ref_guide-sql_commands-DECLARE.html """ type = "declare_statement" match_grammar = Sequence( "DECLARE", Ref("TableReferenceSegment"), AnySetOf( Ref.keyword("BINARY", optional=True), Ref.keyword("INSENSITIVE", optional=True), Sequence( "NO", "SCROLL", optional=True, ), Sequence( "PARALLEL", "RETRIEVE", optional=True, ), optional=True, ), "CURSOR", Sequence( OneOf("WITH", "WITHOUT"), "HOLD", optional=True, ), "FOR", Ref("StatementSegment"), Sequence( "FOR", "READ", "ONLY", optional=True, ), ) class CloseStatementSegment(BaseSegment): """CLOSE statement. https://docs.vmware.com/en/VMware-Greenplum/7/greenplum-database/ref_guide-sql_commands-CLOSE.html """ type = "close_statement" match_grammar = Sequence( "CLOSE", OneOf(Ref("TableReferenceSegment"), "ALL"), ) class CopyStatementSegment(postgres.CopyStatementSegment): """COPY statement. https://docs.vmware.com/en/VMware-Greenplum/6/greenplum-database/ref_guide-sql_commands-COPY.html """ type = "copy_statement" _target_subset = OneOf( Ref("QuotedLiteralSegment"), Sequence("PROGRAM", Ref("QuotedLiteralSegment")), ) _table_definition = Sequence( Ref("TableReferenceSegment"), Bracketed( Delimited( Ref("ColumnReferenceSegment"), allow_trailing=True, ), optional=True, ), ) _option = Sequence( AnySetOf( Sequence("FORMAT", Ref("SingleIdentifierGrammar")), Sequence("ON", "SEGMENT"), "BINARY", Sequence("OIDS", Ref("BooleanLiteralGrammar", optional=True)), Sequence("FREEZE", Ref("BooleanLiteralGrammar", optional=True)), Sequence( "DELIMITER", Ref.keyword("AS", optional=True), Ref("QuotedLiteralSegment"), ), Sequence( "NULL", Ref.keyword("AS", optional=True), Ref("QuotedLiteralSegment"), ), Sequence("HEADER", Ref("BooleanLiteralGrammar", optional=True)), Sequence("QUOTE", Ref("QuotedLiteralSegment")), Sequence( "ESCAPE", Ref.keyword("AS", optional=True), Ref("QuotedLiteralSegment"), ), Sequence( "NEWLINE", Ref.keyword("AS", optional=True), Ref("QuotedLiteralSegment"), ), Sequence( "FORCE_QUOTE", OneOf( Bracketed(Delimited(Ref("ColumnReferenceSegment"))), Ref("StarSegment"), ), ), Sequence( "FORCE_NOT_NULL", Bracketed(Delimited(Ref("ColumnReferenceSegment"))), ), Sequence( "FORCE_NULL", Bracketed(Delimited(Ref("ColumnReferenceSegment"))), ), Sequence("ENCODING", Ref("QuotedLiteralSegment")), Sequence("FILL", "MISSING", "FIELDS"), Sequence( "LOG", "ERRORS", Sequence( "SEGMENT", "REJECT", "LIMIT", Ref("NumericLiteralSegment"), OneOf( "ROWS", "PERCENT", optional=True, ), optional=True, ), ), Sequence( "CSV", Sequence( "QUOTE", Ref.keyword("AS", optional=True), Ref("QuotedLiteralSegment"), optional=True, ), OneOf( Sequence( "FORCE", "NOT", "NULL", Bracketed(Delimited(Ref("ColumnReferenceSegment"))), ), Sequence( "FORCE", "QUOTE", Bracketed(Delimited(Ref("ColumnReferenceSegment"))), ), optional=True, ), ), Sequence("IGNORE", "EXTERNAL", "PARTITIONS"), ), optional=True, ) _bracketed_option = Sequence( Bracketed( Delimited( _option, ) ) ) match_grammar = Sequence( "COPY", OneOf( Sequence( _table_definition, "FROM", OneOf( _target_subset, Sequence("STDIN"), ), Ref.keyword("WITH", optional=True), OneOf(_option, _bracketed_option, optional=True), Sequence("ON", "SEGMENT", optional=True), ), Sequence( OneOf( _table_definition, Bracketed(Ref("UnorderedSelectStatementSegment")), ), "TO", OneOf( _target_subset, Sequence("STDOUT"), ), OneOf( Sequence( Ref.keyword("WITH", optional=True), OneOf(_option, _bracketed_option, optional=True), ), Ref("StarSegment"), optional=True, ), Sequence("ON", "SEGMENT", optional=True), ), ), ) sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_greenplum_keywords.py000066400000000000000000000051471503426445100255420ustar00rootroot00000000000000"""Keywords in the Greenplum Dialect. https://docs.vmware.com/en/VMware-Greenplum/6/greenplum-database/ref_guide-sql-keywords.html """ greenplum_keywords = [ ("ACTIVE", "non-reserved"), ("CONCURRENCY", "non-reserved"), ("CONTAINS", "non-reserved"), ("CPU_RATE_LIMIT", "non-reserved"), ("CPUSET", "non-reserved"), ("CREATEEXTTABLE", "non-reserved"), ("CUBE", "non-reserved-(cannot-be-function-or-type)"), ("DECODE", "reserved"), ("DENY", "non-reserved"), ("DISTRIBUTED", "reserved"), ("DXL", "non-reserved"), ("ERRORS", "non-reserved"), ("EVERY", "non-reserved"), ("EXCHANGE", "non-reserved"), # ("EXCLUDE", "reserved"), ("EXPAND", "non-reserved"), ("FIELDS", "non-reserved"), ("FILL", "non-reserved"), # ("FOLLOWING", "reserved"), ("FORMAT", "non-reserved"), ("FULLSCAN", "non-reserved"), ("GROUP_ID", "non-reserved-(cannot-be-function-or-type)"), ("GROUPING", "non-reserved-(cannot-be-function-or-type)"), ("HASH", "non-reserved"), ("HOST", "non-reserved"), ("IGNORE", "non-reserved"), ("INCLUSIVE", "non-reserved"), ("LIST", "non-reserved"), ("LOG", "reserved-(can-be-function-or-type)"), ("MASTER", "non-reserved"), ("MEDIAN", "non-reserved-(cannot-be-function-or-type)"), ("MEMORY_LIMIT", "non-reserved"), ("MEMORY_SHARED_QUOTA", "non-reserved"), ("MEMORY_SPILL_RATIO", "non-reserved"), ("MISSING", "non-reserved"), ("MODIFIES", "non-reserved"), ("NEWLINE", "non-reserved"), ("NOCREATEEXTTABLE", "non-reserved"), ("NOOVERCOMMIT", "non-reserved"), ("ORDERED", "non-reserved"), ("OTHERS", "non-reserved"), ("OVERCOMMIT", "non-reserved"), # ("PARTITION", "reserved"), ("PARTITIONS", "non-reserved"), ("PERCENT", "non-reserved"), # ("PRECEDING", "reserved"), ("PROTOCOL", "non-reserved"), ("QUEUE", "non-reserved"), ("RANDOMLY", "non-reserved"), ("READABLE", "non-reserved"), ("READS", "non-reserved"), ("REJECT", "non-reserved"), ("REPLICATED", "non-reserved"), ("RESOURCE", "non-reserved"), ("ROLLUP", "non-reserved-(cannot-be-function-or-type)"), ("ROOTPARTITION", "non-reserved"), ("SCATTER", "reserved"), ("SEGMENT", "non-reserved"), ("SEGMENTS", "non-reserved"), ("SETS", "non-reserved-(cannot-be-function-or-type)"), ("SPLIT", "non-reserved"), ("SQL", "non-reserved"), ("SUBPARTITION", "non-reserved"), ("THRESHOLD", "non-reserved"), ("TIES", "non-reserved"), # ("UNBOUNDED", "reserved"), ("VALIDATION", "non-reserved"), ("WEB", "non-reserved"), ("WRITABLE", "non-reserved"), ] sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_hive.py000066400000000000000000001023471503426445100225500ustar00rootroot00000000000000"""The Hive dialect.""" from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( AnyNumberOf, BaseSegment, Bracketed, CodeSegment, Dedent, Delimited, IdentifierSegment, Indent, KeywordSegment, LiteralSegment, Matchable, Nothing, OneOf, OptionallyBracketed, Ref, RegexParser, SegmentGenerator, Sequence, StringParser, SymbolSegment, TypedParser, ) from sqlfluff.dialects import dialect_ansi as ansi from sqlfluff.dialects.dialect_hive_keywords import ( RESERVED_KEYWORDS, UNRESERVED_KEYWORDS, ) ansi_dialect = load_raw_dialect("ansi") hive_dialect = ansi_dialect.copy_as( "hive", formatted_name="Apache Hive", docstring="The dialect for Apache `Hive `_.", ) # Clear ANSI Keywords and add all Hive keywords # Commented clearing for now as some are needed for some statements imported # from ANSI to work # hive_dialect.sets("unreserved_keywords").clear() hive_dialect.sets("unreserved_keywords").update(UNRESERVED_KEYWORDS) # hive_dialect.sets("reserved_keywords").clear() hive_dialect.sets("reserved_keywords").update(RESERVED_KEYWORDS) hive_dialect.bracket_sets("angle_bracket_pairs").update( [ ("angle", "StartAngleBracketSegment", "EndAngleBracketSegment", False), ] ) # Hive adds these timeunit aliases for intervals "to aid portability / readability" # https://cwiki.apache.org/confluence/display/hive/languagemanual+types#LanguageManualTypes-Intervals hive_dialect.sets("datetime_units").update( [ "NANO", "NANOS", "SECONDS", "MINUTES", "HOURS", "DAYS", "WEEKS", "MONTHS", "YEARS", ] ) hive_dialect.add( StartAngleBracketSegment=StringParser( "<", SymbolSegment, type="start_angle_bracket" ), EndAngleBracketSegment=StringParser(">", SymbolSegment, type="end_angle_bracket"), JsonfileKeywordSegment=StringParser("JSONFILE", KeywordSegment, type="file_format"), RcfileKeywordSegment=StringParser("RCFILE", KeywordSegment, type="file_format"), SequencefileKeywordSegment=StringParser( "SEQUENCEFILE", KeywordSegment, type="file_format" ), TextfileKeywordSegment=StringParser("TEXTFILE", KeywordSegment, type="file_format"), LocationGrammar=Sequence("LOCATION", Ref("QuotedLiteralSegment")), PropertyGrammar=Sequence( Ref("QuotedLiteralSegment"), Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), BracketedPropertyListGrammar=Bracketed(Delimited(Ref("PropertyGrammar"))), TablePropertiesGrammar=Sequence( "TBLPROPERTIES", Ref("BracketedPropertyListGrammar") ), SerdePropertiesGrammar=Sequence( "WITH", "SERDEPROPERTIES", Ref("BracketedPropertyListGrammar") ), TerminatedByGrammar=Sequence("TERMINATED", "BY", Ref("QuotedLiteralSegment")), FileFormatGrammar=OneOf( "SEQUENCEFILE", "TEXTFILE", "RCFILE", "ORC", "PARQUET", "AVRO", "JSONFILE", Sequence( "INPUTFORMAT", Ref("QuotedLiteralSegment"), "OUTPUTFORMAT", Ref("QuotedLiteralSegment"), ), ), StoredAsGrammar=Sequence("STORED", "AS", Ref("FileFormatGrammar")), StoredByGrammar=Sequence( "STORED", "BY", Ref("QuotedLiteralSegment"), Ref("SerdePropertiesGrammar", optional=True), ), StorageFormatGrammar=OneOf( Sequence( Ref("RowFormatClauseSegment", optional=True), Ref("StoredAsGrammar", optional=True), ), Ref("StoredByGrammar"), ), CommentGrammar=Sequence("COMMENT", Ref("QuotedLiteralSegment")), PartitionSpecGrammar=Sequence( "PARTITION", Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), Sequence( Ref("EqualsSegment"), Ref("LiteralGrammar"), optional=True, ), ) ) ), ), BackQuotedIdentifierSegment=TypedParser( "back_quote", IdentifierSegment, type="quoted_identifier", casefold=str.lower, ), ) # https://cwiki.apache.org/confluence/display/hive/languagemanual+joins hive_dialect.replace( JoinKeywordsGrammar=Sequence(Sequence("SEMI", optional=True), "JOIN"), QuotedLiteralSegment=OneOf( TypedParser( "single_quote", LiteralSegment, type="quoted_literal", casefold=str.lower ), TypedParser( "double_quote", LiteralSegment, type="quoted_literal", casefold=str.lower ), TypedParser( "back_quote", LiteralSegment, type="quoted_literal", casefold=str.lower ), ), TrimParametersGrammar=Nothing(), # ANSI with lower casefold NakedIdentifierSegment=SegmentGenerator( # Generate the anti template from the set of reserved keywords lambda dialect: RegexParser( r"[A-Z0-9_]*[A-Z][A-Z0-9_]*", IdentifierSegment, type="naked_identifier", anti_template=r"^(" + r"|".join(dialect.sets("reserved_keywords")) + r")$", casefold=str.lower, ) ), SingleIdentifierGrammar=ansi_dialect.get_grammar("SingleIdentifierGrammar").copy( insert=[ Ref("BackQuotedIdentifierSegment"), ] ), SelectClauseTerminatorGrammar=ansi_dialect.get_grammar( "SelectClauseTerminatorGrammar" ).copy( insert=[ Sequence("CLUSTER", "BY"), Sequence("DISTRIBUTE", "BY"), Sequence("SORT", "BY"), ], before=Sequence("ORDER", "BY"), ), FromClauseTerminatorGrammar=ansi_dialect.get_grammar( "FromClauseTerminatorGrammar" ).copy( insert=[ Sequence("CLUSTER", "BY"), Sequence("DISTRIBUTE", "BY"), Sequence("SORT", "BY"), ], before=Sequence("ORDER", "BY"), ), WhereClauseTerminatorGrammar=ansi_dialect.get_grammar( "WhereClauseTerminatorGrammar" ).copy( insert=[ Sequence("CLUSTER", "BY"), Sequence("DISTRIBUTE", "BY"), Sequence("SORT", "BY"), ], before=Sequence("ORDER", "BY"), ), GroupByClauseTerminatorGrammar=OneOf( Sequence( OneOf("ORDER", "CLUSTER", "DISTRIBUTE", "SORT"), "BY", ), "LIMIT", "HAVING", "QUALIFY", "WINDOW", ), HavingClauseTerminatorGrammar=OneOf( Sequence( OneOf( "ORDER", "CLUSTER", "DISTRIBUTE", "SORT", ), "BY", ), "LIMIT", "QUALIFY", "WINDOW", ), # Full Apache Hive `CREATE ALTER` reference here: # https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-AlterTable AlterTableOptionsGrammar=ansi_dialect.get_grammar("AlterTableOptionsGrammar").copy( insert=[ # Exchange Sequence( "EXCHANGE", Ref("PartitionSpecGrammar"), "WITH", "TABLE", Ref("TableReferenceSegment"), ), ] ), LikeGrammar=OneOf( "LIKE", "RLIKE", "ILIKE", "REGEXP", "IREGEXP" ), # Impala dialect uses REGEXP and IREGEXP ) class ArrayTypeSegment(ansi.ArrayTypeSegment): """Prefix for array literals specifying the type.""" type = "array_type" match_grammar = Sequence( "ARRAY", Bracketed( Ref("DatatypeSegment"), bracket_type="angle", bracket_pairs_set="angle_bracket_pairs", optional=True, ), ) class EqualsSegment(ansi.EqualsSegment): """Equals operator. Hive allows double equals: https://cwiki.apache.org/confluence/display/Hive/Hive+Operators """ match_grammar: Matchable = Sequence( Ref("RawEqualsSegment"), Ref("RawEqualsSegment", optional=True), ) class StructTypeSegment(ansi.StructTypeSegment): """Expression to construct a STRUCT datatype.""" match_grammar = Sequence( "STRUCT", Ref("StructTypeSchemaSegment", optional=True), ) class StructTypeSchemaSegment(BaseSegment): """Expression to construct the schema of a STRUCT datatype.""" type = "struct_type_schema" match_grammar = Bracketed( Delimited( Sequence( Ref("SingleIdentifierGrammar"), Ref("ColonSegment"), Ref("DatatypeSegment"), Ref("CommentGrammar", optional=True), ), bracket_pairs_set="angle_bracket_pairs", ), bracket_pairs_set="angle_bracket_pairs", bracket_type="angle", ) class CreateDatabaseStatementSegment(BaseSegment): """A `CREATE DATABASE` statement.""" type = "create_database_statement" match_grammar = Sequence( "CREATE", OneOf("DATABASE", "SCHEMA"), Ref("IfNotExistsGrammar", optional=True), Ref("DatabaseReferenceSegment"), Ref("CommentGrammar", optional=True), Ref("LocationGrammar", optional=True), Sequence("MANAGEDLOCATION", Ref("QuotedLiteralSegment"), optional=True), Sequence( "WITH", "DBPROPERTIES", Ref("BracketedPropertyListGrammar"), optional=True ), ) class CreateTableStatementSegment(BaseSegment): """A `CREATE TABLE` statement. Full Apache Hive `CREATE TABLE` reference here: https://cwiki.apache.org/confluence/display/hive/languagemanual+ddl#LanguageManualDDL-CreateTable """ type = "create_table_statement" match_grammar = Sequence( "CREATE", Ref.keyword("TEMPORARY", optional=True), Ref.keyword("EXTERNAL", optional=True), "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), OneOf( # Columns and comment syntax: Sequence( Bracketed( Delimited( OneOf( # TODO: support all constraints Ref("TableConstraintSegment", optional=True), Sequence( Ref("ColumnDefinitionSegment"), Ref("CommentGrammar", optional=True), ), ), bracket_pairs_set="angle_bracket_pairs", ), optional=True, ), Ref("CommentGrammar", optional=True), # `STORED AS` can be called before or after the additional table # properties below Ref("StoredAsGrammar", optional=True), Sequence( "PARTITIONED", "BY", Bracketed( Delimited( Sequence( Ref("ColumnDefinitionSegment"), Ref("CommentGrammar", optional=True), ), ), ), optional=True, ), Sequence( "CLUSTERED", "BY", Ref("BracketedColumnReferenceListGrammar"), Sequence( "SORTED", "BY", Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), OneOf("ASC", "DESC", optional=True), ) ) ), optional=True, ), "INTO", Ref("NumericLiteralSegment"), "BUCKETS", optional=True, ), # Second call of `STORED AS` to match when appears after Ref("StoredAsGrammar", optional=True), Ref("SkewedByClauseSegment", optional=True), Ref("StorageFormatGrammar", optional=True), Ref("LocationGrammar", optional=True), Ref("TablePropertiesGrammar", optional=True), Ref("CommentGrammar", optional=True), Sequence( "AS", OptionallyBracketed(Ref("SelectableGrammar")), optional=True, ), ), # Create like syntax Sequence( "LIKE", Ref("TableReferenceSegment"), Ref("LocationGrammar", optional=True), Ref("TablePropertiesGrammar", optional=True), ), ), ) class TableConstraintSegment(ansi.TableConstraintSegment): """A table constraint, e.g. for CREATE TABLE.""" type = "table_constraint" match_grammar: Matchable = Sequence( Sequence("CONSTRAINT", Ref("ObjectReferenceSegment"), optional=True), OneOf( Sequence( "UNIQUE", Ref("BracketedColumnReferenceListGrammar"), ), Sequence( Ref("PrimaryKeyGrammar"), Ref("BracketedColumnReferenceListGrammar"), Sequence( "DISABLE", "NOVALIDATE", OneOf("RELY", "NORELY", optional=True), optional=True, ), ), Sequence( Ref("ForeignKeyGrammar"), Ref("BracketedColumnReferenceListGrammar"), Ref( "ReferenceDefinitionGrammar" ), # REFERENCES reftable [ ( refcolumn) ] Sequence("DISABLE", "NOVALIDATE", optional=True), ), ), ) class FromExpressionElementSegment(ansi.FromExpressionElementSegment): """Modified from ANSI to allow for `LATERAL VIEW` clause.""" match_grammar = ( ansi.FromExpressionElementSegment._base_from_expression_element.copy( insert=[ AnyNumberOf(Ref("LateralViewClauseSegment")), ], before=Ref("PostTableExpressionGrammar", optional=True), ) ) class AliasExpressionSegment(ansi.AliasExpressionSegment): """Modified to allow UDTF in SELECT clause to return multiple columns aliases. Full Apache Hive `Built-in Table-Generating Functions (UDTF)` reference here: https://cwiki.apache.org/confluence/display/hive/languagemanual+udf#LanguageManualUDF-Built-inTable-GeneratingFunctions(UDTF) """ match_grammar = Sequence( Indent, Ref("AsAliasOperatorSegment", optional=True), OneOf( Sequence( Ref("SingleIdentifierGrammar", optional=True), Bracketed(Ref("SingleIdentifierListSegment")), ), Ref("SingleIdentifierGrammar"), ), Dedent, ) class LateralViewClauseSegment(BaseSegment): """A `LATERAL VIEW` in a `FROM` clause. https://cwiki.apache.org/confluence/display/hive/languagemanual+lateralview """ type = "lateral_view_clause" match_grammar = Sequence( Indent, "LATERAL", "VIEW", Ref.keyword("OUTER", optional=True), Ref("FunctionSegment"), # NB: AliasExpressionSegment is not used here for table # or column alias because `AS` is optional within it # (and in most scenarios). Here it's explicitly defined # for when it is required and not allowed. Ref("SingleIdentifierGrammar", optional=True), Sequence( "AS", Delimited( Ref("SingleIdentifierGrammar"), ), ), Dedent, ) class PrimitiveTypeSegment(BaseSegment): """Primitive data types.""" type = "primitive_type" match_grammar = OneOf( "TINYINT", "SMALLINT", "INT", "INTEGER", "BIGINT", "BOOLEAN", "FLOAT", Sequence("DOUBLE", Ref.keyword("PRECISION", optional=True)), "STRING", "BINARY", "TIMESTAMP", Sequence( OneOf("DECIMAL", "DEC", "NUMERIC"), Ref("BracketedArguments", optional=True), ), "DATE", "VARCHAR", "CHAR", "JSON", ) class DatatypeSegment(BaseSegment): """Data types.""" type = "data_type" match_grammar = OneOf( Ref("PrimitiveTypeSegment"), Ref("ArrayTypeSegment"), Ref("SizedArrayTypeSegment"), Sequence( "MAP", Bracketed( Sequence( Ref("PrimitiveTypeSegment"), Ref("CommaSegment"), Ref("DatatypeSegment"), ), bracket_pairs_set="angle_bracket_pairs", bracket_type="angle", ), ), Ref("StructTypeSegment"), Sequence( "UNIONTYPE", Bracketed( Delimited( Ref("DatatypeSegment"), bracket_pairs_set="angle_bracket_pairs" ), bracket_pairs_set="angle_bracket_pairs", bracket_type="angle", ), ), ) class SkewedByClauseSegment(BaseSegment): """`SKEWED BY` clause in a CREATE / ALTER statement.""" type = "skewed_by_clause" match_grammar = Sequence( "SKEWED", "BY", Ref("BracketedColumnReferenceListGrammar"), "ON", Bracketed( Delimited( OneOf( Ref("LiteralGrammar"), Bracketed(Delimited(Ref("LiteralGrammar"))) ) ) ), Sequence("STORED", "AS", "DIRECTORIES", optional=True), ) class RowFormatClauseSegment(BaseSegment): """`ROW FORMAT` clause in a CREATE statement.""" type = "row_format_clause" match_grammar = Sequence( "ROW", "FORMAT", OneOf( Sequence( "DELIMITED", Sequence( "FIELDS", Ref("TerminatedByGrammar"), Sequence( "ESCAPED", "BY", Ref("QuotedLiteralSegment"), optional=True ), optional=True, ), Sequence( "COLLECTION", "ITEMS", Ref("TerminatedByGrammar"), optional=True ), Sequence("MAP", "KEYS", Ref("TerminatedByGrammar"), optional=True), Sequence("LINES", Ref("TerminatedByGrammar"), optional=True), Sequence( "NULL", "DEFINED", "AS", Ref("QuotedLiteralSegment"), optional=True ), ), Sequence( "SERDE", Ref("QuotedLiteralSegment"), Ref("SerdePropertiesGrammar", optional=True), ), ), ) class AlterDatabaseStatementSegment(BaseSegment): """An `ALTER DATABASE/SCHEMA` statement.""" type = "alter_database_statement" match_grammar = Sequence( "ALTER", OneOf("DATABASE", "SCHEMA"), Ref("DatabaseReferenceSegment"), "SET", OneOf( Sequence("DBPROPERTIES", Ref("BracketedPropertyListGrammar")), Sequence( "OWNER", OneOf("USER", "ROLE"), Ref("QuotedLiteralSegment"), ), Ref("LocationGrammar"), Sequence("MANAGEDLOCATION", Ref("QuotedLiteralSegment")), ), ) class DropTableStatementSegment(BaseSegment): """A `DROP TABLE` statement.""" type = "drop_table_statement" match_grammar = Sequence( "DROP", "TABLE", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), Ref.keyword("PURGE", optional=True), ) class TruncateStatementSegment(BaseSegment): """`TRUNCATE TABLE` statement.""" type = "truncate_table" match_grammar = Sequence( "TRUNCATE", Ref.keyword("TABLE", optional=True), Ref("TableReferenceSegment"), Ref("PartitionSpecGrammar", optional=True), ) class SetStatementSegment(BaseSegment): """A `SET` statement. https://cwiki.apache.org/confluence/display/Hive/LanguageManual+Commands """ type = "set_statement" match_grammar = Sequence( "SET", OneOf( # set -v Sequence( StringParser("-", SymbolSegment, type="option_indicator"), StringParser("v", CodeSegment, type="option"), ), # set key = value Sequence( Delimited( Ref("ParameterNameSegment"), delimiter=OneOf(Ref("DotSegment"), Ref("ColonDelimiterSegment")), allow_gaps=False, ), Ref("RawEqualsSegment"), Ref("LiteralGrammar"), ), optional=True, ), ) class StatementSegment(ansi.StatementSegment): """Overriding StatementSegment to allow for additional segment parsing.""" match_grammar = ansi.StatementSegment.match_grammar.copy( insert=[ Ref("AlterDatabaseStatementSegment"), Ref("MsckRepairTableStatementSegment"), Ref("MsckTableStatementSegment"), Ref("SetStatementSegment"), Ref("AlterViewStatementSegment"), ], remove=[ Ref("TransactionStatementSegment"), Ref("CreateSchemaStatementSegment"), Ref("SetSchemaStatementSegment"), Ref("CreateModelStatementSegment"), Ref("DropModelStatementSegment"), ], ) class InsertStatementSegment(BaseSegment): """An `INSERT` statement. Full Apache Hive `INSERT` reference here: https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DML """ type = "insert_statement" match_grammar = Sequence( "INSERT", OneOf( Sequence( "OVERWRITE", OneOf( Sequence( "TABLE", Ref("TableReferenceSegment"), Ref("PartitionSpecGrammar", optional=True), Ref("IfNotExistsGrammar", optional=True), Ref("SelectableGrammar"), ), Sequence( Sequence("LOCAL", optional=True), "DIRECTORY", Ref("QuotedLiteralSegment"), Ref("RowFormatClauseSegment", optional=True), Ref("StoredAsGrammar", optional=True), Ref("SelectableGrammar"), ), ), ), Sequence( "INTO", Ref.keyword("TABLE", optional=True), Ref("TableReferenceSegment"), Ref("PartitionSpecGrammar", optional=True), OneOf( Ref("SelectableGrammar"), Ref("ValuesClauseSegment"), ), ), ), ) class IntervalExpressionSegment(BaseSegment): """An interval expression segment. Full Apache Hive `INTERVAL` reference here: https://cwiki.apache.org/confluence/display/hive/languagemanual+types#LanguageManualTypes-Intervals """ type = "interval_expression" match_grammar = Sequence( Ref.keyword("INTERVAL", optional=True), OneOf( Sequence( OneOf( Ref("QuotedLiteralSegment"), Ref("NumericLiteralSegment"), Bracketed(Ref("ExpressionSegment")), ), Ref("DatetimeUnitSegment"), Sequence("TO", Ref("DatetimeUnitSegment"), optional=True), ), ), ) class MsckRepairTableStatementSegment(BaseSegment): """An `MSCK REPAIR TABLE`statement. Updates the Hive metastore to be aware of any changes to partitions on the underlying file store. The `MSCK TABLE` command, and corresponding class in Hive dialect MsckTableStatementSegment, is used to determine mismatches between the Hive metastore and file system. Essentially, it is a dry run of the `MSCK REPAIR TABLE` command. https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-RecoverPartitions(MSCKREPAIRTABLE) """ type = "msck_repair_table_statement" match_grammar = Sequence( "MSCK", "REPAIR", "TABLE", Ref("TableReferenceSegment"), Sequence( OneOf( "ADD", "DROP", "SYNC", ), "PARTITIONS", optional=True, ), ) class MsckTableStatementSegment(BaseSegment): """An `MSCK TABLE`statement. Checks for difference between partition metadata in the Hive metastore and underlying file system. Commonly used prior to `MSCK REPAIR TABLE` command, corresponding with class `MsckRepairTableStatementSegment` in Hive dialect, to asses size of updates for one-time or irregularly sized file system updates. https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-RecoverPartitions(MSCKREPAIRTABLE) """ type = "msck_table_statement" match_grammar = Sequence( "MSCK", "TABLE", Ref("TableReferenceSegment"), Sequence( OneOf( "ADD", "DROP", "SYNC", ), "PARTITIONS", optional=True, ), ) class RowFunctionContentsSegment(BaseSegment): """Row Function Contents.""" type = "function_contents" match_grammar = Sequence( Bracketed( Delimited( Sequence( Ref("BaseExpressionElementGrammar"), ), ), ), ) class FunctionSegment(BaseSegment): """A scalar or aggregate function. Extended version of `ansi` to add support of row typecasting https://prestodb.io/docs/current/language/types.html#row ``` cast(row(val1, val2) as row(a integer, b integer)) ``` """ type = "function" match_grammar = OneOf( Sequence( # Treat functions which take date parts separately # So those functions parse date parts as DatetimeUnitSegment # rather than identifiers. Sequence( Ref("DatePartFunctionNameSegment"), Ref("DateTimeFunctionContentsSegment"), ), ), Sequence( # This unusual syntax is used to cast the Keyword ROW to # to the function_name to avoid rule linting exceptions StringParser("ROW", KeywordSegment, type="function_name"), Ref("RowFunctionContentsSegment"), "AS", "ROW", Bracketed( Delimited( Sequence( Ref("SingleIdentifierGrammar"), Ref("DatatypeSegment", optional=True), ), ), ), ), Sequence( Sequence( Ref( "FunctionNameSegment", exclude=OneOf( Ref("DatePartFunctionNameSegment"), Ref("ValuesClauseSegment"), ), ), Ref("FunctionContentsSegment"), ), Ref("PostFunctionGrammar", optional=True), ), ) class SamplingExpressionSegment(BaseSegment): """A sampling expression.""" type = "sample_expression" match_grammar = Sequence( "TABLESAMPLE", Bracketed( OneOf( Sequence( "BUCKET", Ref("NumericLiteralSegment"), "OUT", "OF", Ref("NumericLiteralSegment"), Sequence( "ON", OneOf( Ref("SingleIdentifierGrammar"), Ref("FunctionSegment"), ), optional=True, ), ), Sequence( Ref("NumericLiteralSegment"), OneOf("PERCENT", "ROWS", optional=True), ), RegexParser( r"\d+[bBkKmMgG]", CodeSegment, type="byte_length_literal", ), ), ), Ref( "AliasExpressionSegment", optional=True, ), ) class UnorderedSelectStatementSegment(ansi.UnorderedSelectStatementSegment): """Enhance unordered SELECT statement to include CLUSTER, DISTRIBUTE, SORT BY.""" match_grammar = ansi.UnorderedSelectStatementSegment.match_grammar.copy( terminators=[ Ref("ClusterByClauseSegment"), Ref("DistributeByClauseSegment"), Ref("SortByClauseSegment"), ], ) class SelectStatementSegment(ansi.SelectStatementSegment): """Overriding SelectStatementSegment to allow for additional segment parsing.""" match_grammar = ansi.SelectStatementSegment.match_grammar.copy( insert=[ Ref("ClusterByClauseSegment", optional=True), Ref("DistributeByClauseSegment", optional=True), Ref("SortByClauseSegment", optional=True), ], before=Ref("LimitClauseSegment", optional=True), ) class SelectClauseSegment(ansi.SelectClauseSegment): """Overriding SelectClauseSegment to allow for additional segment parsing.""" match_grammar = ansi.SelectClauseSegment.match_grammar.copy( # Add additional terminators terminators=[ Sequence("CLUSTER", "BY"), Sequence("DISTRIBUTE", "BY"), Sequence("SORT", "BY"), ], ) class SetExpressionSegment(ansi.SetExpressionSegment): """Overriding SetExpressionSegment to allow for additional segment parsing.""" match_grammar = ansi.SetExpressionSegment.match_grammar.copy( insert=[ Ref("ClusterByClauseSegment", optional=True), Ref("DistributeByClauseSegment", optional=True), Ref("SortByClauseSegment", optional=True), ], before=Ref("LimitClauseSegment", optional=True), ) class ClusterByClauseSegment(ansi.OrderByClauseSegment): """A `CLUSTER BY` clause like in `SELECT`.""" type = "clusterby_clause" match_grammar: Matchable = Sequence( "CLUSTER", "BY", Indent, Delimited( Sequence( OneOf( Ref("ColumnReferenceSegment"), Ref("NumericLiteralSegment"), Ref("ExpressionSegment"), ), ), terminators=["LIMIT", Ref("FrameClauseUnitGrammar")], ), Dedent, ) class DistributeByClauseSegment(ansi.OrderByClauseSegment): """A `DISTRIBUTE BY` clause like in `SELECT`.""" type = "distributeby_clause" match_grammar: Matchable = Sequence( "DISTRIBUTE", "BY", Indent, Delimited( Sequence( OneOf( Ref("ColumnReferenceSegment"), Ref("NumericLiteralSegment"), Ref("ExpressionSegment"), ), ), terminators=[ "SORT", "LIMIT", "HAVING", "QUALIFY", # For window functions "WINDOW", Ref("FrameClauseUnitGrammar"), "SEPARATOR", ], ), Dedent, ) class SortByClauseSegment(ansi.OrderByClauseSegment): """A `SORT BY` clause like in `SELECT`.""" type = "sortby_clause" match_grammar: Matchable = Sequence( "SORT", "BY", Indent, Delimited( Sequence( OneOf( Ref("ColumnReferenceSegment"), Ref("NumericLiteralSegment"), Ref("ExpressionSegment"), ), OneOf("ASC", "DESC", optional=True), Sequence("NULLS", OneOf("FIRST", "LAST"), optional=True), ), terminators=["LIMIT", Ref("FrameClauseUnitGrammar")], ), Dedent, ) class AlterViewStatementSegment(BaseSegment): """A `ALTER VIEW` statement to change the view schema or properties. https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-Create/Drop/AlterView """ type = "alter_view_statement" match_grammar = Sequence( "ALTER", "VIEW", Ref("TableReferenceSegment"), OneOf( Sequence("SET", Ref("TablePropertiesGrammar")), Sequence( "AS", OptionallyBracketed(Ref("SelectStatementSegment")), ), ), ) sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_hive_keywords.py000066400000000000000000000116121503426445100244710ustar00rootroot00000000000000"""A list of HiveQL keywords.""" RESERVED_KEYWORDS = [ "ALL", "ALTER", "AND", "ARRAY", "AS", "AUTHORIZATION", "BETWEEN", "BIGINT", "BINARY", "BOOLEAN", "BOTH", "BY", "CASE", "CAST", "CHAR", "COLUMN", "CONF", "CREATE", "CROSS", "CUBE", "CURRENT", "CURRENT_DATE", "CURRENT_TIMESTAMP", "CURSOR", "DATABASE", "DATE", "DEC", "DECIMAL", "DELETE", "DESCRIBE", "DISTINCT", "DOUBLE", "DROP", "ELSE", "END", "EXCHANGE", "EXISTS", "EXTENDED", "EXTERNAL", "FALSE", "FETCH", "FLOAT", "FOLLOWING", "FOR", "FROM", "FULL", "FUNCTION", "GRANT", "GROUP", "GROUPING", "HAVING", "IF", "IMPORT", "IN", "INNER", "INSERT", "INT", "INTEGER", "INTERSECT", "INTERVAL", "INTO", "IS", "JOIN", "LATERAL", "LEFT", "LESS", "LIKE", "LOCAL", "MACRO", "MAP", "MORE", "NONE", "NOT", "NULL", "NUMERIC", "OF", "ON", "OR", "ORDER", "OUT", "OUTER", "OVER", "PARTIALSCAN", "PARTITION", "PERCENT", "PRECEDING", "PRESERVE", "PROCEDURE", "RANGE", "READS", "REDUCE", "REVOKE", "RIGHT", "ROLLUP", "ROW", "ROWS", "SELECT", "SET", "SMALLINT", "TABLE", "TABLESAMPLE", "THEN", "TIMESTAMP", "TO", "TRANSFORM", "TRIGGER", "TRUE", "TRUNCATE", "UNBOUNDED", "UNION", "UNIQUEJOIN", "UPDATE", "USER", "USING", "UTC_TMESTAMP", "VALUES", "VARCHAR", "WHEN", "WHERE", "WINDOW", "WITH", "COMMIT", "ONLY", "REGEXP", "RLIKE", "ROLLBACK", "START", "CACHE", "CONSTRAINT", "FOREIGN", "PRIMARY", "REFERENCES", "DAYOFWEEK", "EXTRACT", "FLOOR", "INTEGER", "PRECISION", "VIEWS", "TIME", "NUMERIC", "SYNC", ] UNRESERVED_KEYWORDS = [ "ADD", "ADMIN", "AFTER", "ANALYZE", "ARCHIVE", "ASC", "BEFORE", "BERNOULLI", "BUCKET", "BUCKETS", "CASCADE", "CHANGE", "CLUSTER", "CLUSTERED", "CLUSTERSTATUS", "COLLECTION", "COLUMNS", "COMMENT", "COMPACT", "COMPACTIONS", "COMPUTE", "CONCATENATE", "CONTINUE", "DATA", "DATABASES", "DATETIME", "DAY", "DBPROPERTIES", "DEFERRED", "DEFINED", "DELIMITED", "DEPENDENCY", "DESC", "DIRECTORIES", "DIRECTORY", "DISABLE", "DISTRIBUTE", "ELEM_TYPE", "ENABLE", "ESCAPED", "EXCLUSIVE", "EXPLAIN", "EXPORT", "FIELDS", "FILE", "FILEFORMAT", "FIRST", "FORMAT", "FORMATTED", "FUNCTIONS", "HOLD_DDLTIME", "HOUR", "IDXPROPERTIES", "IGNORE", "INDEX", "INDEXES", "INPATH", "INPUTDRIVER", "INPUTFORMAT", "IREGEXP", # Impala dialect "ITEMS", "JAR", "KEYS", "KEY_TYPE", "LIMIT", "LINES", "LOAD", "LOCATION", "LOCK", "LOCKS", "LOGICAL", "LONG", "MAPJOIN", "MATERIALIZED", "METADATA", "MINUS", "MINUTE", "MONTH", "MSCK", "NOSCAN", "NO_DROP", "OFFLINE", "OPTION", "OUTPUTDRIVER", "OUTPUTFORMAT", "OVERWRITE", "OWNER", "PARTITIONED", "PARTITIONS", "PLUS", "PRETTY", "PRINCIPALS", "PROTECTION", "PURGE", "READ", "READONLY", "REBUILD", "RECORDREADER", "RECORDWRITER", "REGEXP", "RELOAD", "RENAME", "REPAIR", "REPEATABLE", "REPLACE", "REPLICATION", "RESTRICT", "REWRITE", "RLIKE", "ROLE", "ROLES", "SCHEMA", "SCHEMAS", "SECOND", "SEMI", "SERDE", "SERDEPROPERTIES", "SERVER", "SETS", "SHARED", "SHOW", "SHOW_DATABASE", "SKEWED", "SORT", "SORTED", "SSL", "STATISTICS", "STORED", "STREAMTABLE", "STRING", "STRUCT", "SYSTEM", "TABLES", "TBLPROPERTIES", "TEMPORARY", "TERMINATED", "TINYINT", "TOUCH", "TRANSACTIONS", "UNARCHIVE", "UNDO", "UNIONTYPE", "UNLOCK", "UNSET", "UNSIGNED", "URI", "USE", "UTC", "UTCTIMESTAMP", "VALUE_TYPE", "VIEW", "WHILE", "YEAR", "AUTOCOMMIT", "ISOLATION", "LEVEL", "OFFSET", "SNAPSHOT", "TRANSACTION", "WORK", "WRITE", "ABORT", "KEY", "LAST", "NORELY", "NOVALIDATE", "NULLS", "RELY", "VALIDATE", "DETAIL", "DOW", "EXPRESSION", "OPERATOR", "QUARTER", "SUMMARY", "VECTORIZATION", "WEEK", "YEARS", "MONTHS", "WEEKS", "DAYS", "HOURS", "MINUTES", "SECONDS", "TIMESTAMPTZ", "ZONE", # File format "SEQUENCEFILE", "TEXTFILE", "RCFILE", "ORC", "PARQUET", "AVRO", "JSONFILE", # Other "MANAGEDLOCATION", ] sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_impala.py000066400000000000000000000173411503426445100230570ustar00rootroot00000000000000"""The Impala dialect.""" from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( BaseSegment, BinaryOperatorSegment, Bracketed, Delimited, OneOf, Ref, Sequence, StringParser, ) from sqlfluff.dialects import dialect_hive as hive from sqlfluff.dialects.dialect_impala_keywords import ( RESERVED_KEYWORDS, UNRESERVED_KEYWORDS, ) hive_dialect = load_raw_dialect("hive") impala_dialect = hive_dialect.copy_as( "impala", formatted_name="Apache Impala", docstring="The dialect for Apache `Impala `_.", ) impala_dialect.sets("unreserved_keywords").update(UNRESERVED_KEYWORDS) impala_dialect.sets("reserved_keywords").update(RESERVED_KEYWORDS) impala_dialect.replace( DivideSegment=OneOf( StringParser("DIV", BinaryOperatorSegment), StringParser("/", BinaryOperatorSegment), ) ) class StatementSegment(hive.StatementSegment): """A generic segment, to any of its child subsegments.""" type = "statement" match_grammar = hive.StatementSegment.match_grammar.copy( insert=[ Ref("CreateTableAsSelectStatementSegment"), Ref("ComputeStatsStatementSegment"), Ref("InsertStatementSegment"), ] ) class ComputeStatsStatementSegment(BaseSegment): """A `COMPUTE STATS statement. Full Apache Impala `COMPUTE STATS` reference here: https://impala.apache.org/docs/build/html/topics/impala_compute_stats.html """ type = "compute_stats_statement" match_grammar = Sequence( "COMPUTE", OneOf( Sequence("STATS", Ref("TableReferenceSegment")), Sequence( "INCREMENTAL", "STATS", Ref("TableReferenceSegment"), Ref("PartitionSpecGrammar", optional=True), ), ), ) class CreateTableStatementSegment(hive.CreateTableStatementSegment): """A `CREATE_TABLE` statement. Full Apache Impala `CREATE TABLE` reference here: https://impala.apache.org/docs/build/html/topics/impala_create_table.html """ type = "create_table_statement" match_grammar = Sequence( "CREATE", Ref.keyword("EXTERNAL", optional=True), "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), Bracketed( Delimited( OneOf( Ref("TableConstraintSegment", optional=True), Sequence( Ref("ColumnDefinitionSegment"), Ref("CommentGrammar", optional=True), ), ), bracket_pairs_set="angle_bracket_pairs", ), optional=True, ), Sequence( "PARTITIONED", "BY", Bracketed( Delimited( Sequence( OneOf( Ref("ColumnDefinitionSegment"), Ref("SingleIdentifierGrammar"), ), Ref("CommentGrammar", optional=True), ), ), ), optional=True, ), Sequence( "SORT", "BY", Bracketed(Delimited(Sequence(Ref("ColumnReferenceSegment")))), optional=True, ), Ref("CommentGrammar", optional=True), Ref("RowFormatClauseSegment", optional=True), Ref("SerdePropertiesGrammar", optional=True), Ref("StoredAsGrammar", optional=True), Ref("LocationGrammar", optional=True), Sequence( OneOf( Sequence( "CACHED", "IN", Delimited(Ref("PoolNameReferenceSegment")), Sequence( "WITH", "REPLICATION", "=", Ref("NumericLiteralSegment"), optional=True, ), ), Ref.keyword("UNCACHED"), ), optional=True, ), Ref("TablePropertiesGrammar", optional=True), ) class CreateTableAsSelectStatementSegment(BaseSegment): """A `CREATE TABLE ... AS SELECT ...` statement. Full Apache Impala reference here: https://impala.apache.org/docs/build/html/topics/impala_create_table.html Unlike Hive, `AS SELECT ...` cannot be appended to any other SELECT statement, so this is implemented as a separate segment. """ type = "create_table_as_select_statement" match_grammar = Sequence( "CREATE", Ref.keyword("EXTERNAL", optional=True), "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), Sequence( "PARTITIONED", "BY", Bracketed( Delimited( Sequence( OneOf( Ref("ColumnDefinitionSegment"), Ref("SingleIdentifierGrammar"), ), Ref("CommentGrammar", optional=True), ), ), ), optional=True, ), Sequence( "SORT", "BY", Bracketed(Delimited(Sequence(Ref("ColumnReferenceSegment")))), optional=True, ), Ref("CommentGrammar", optional=True), Ref("RowFormatClauseSegment", optional=True), Ref("SerdePropertiesGrammar", optional=True), Ref("StoredAsGrammar", optional=True), Ref("LocationGrammar", optional=True), Sequence( OneOf( Sequence( "CACHED", "IN", Delimited(Ref("PoolNameReferenceSegment")), Sequence( "WITH", "REPLICATION", "=", Ref("NumericLiteralSegment"), optional=True, ), ), Ref.keyword("UNCACHED"), ), optional=True, ), Ref("TablePropertiesGrammar", optional=True), "AS", Ref("SelectableGrammar"), ) class InsertStatementSegment(BaseSegment): """An `INSERT` statement. Full Apache Impala `INSERT` reference here: https://impala.apache.org/docs/build/html/topics/impala_insert.html """ type = "insert_statement" match_grammar = Sequence( "INSERT", OneOf( Sequence( "OVERWRITE", Ref.keyword("TABLE", optional=True), Ref("TableReferenceSegment"), Ref("PartitionSpecGrammar", optional=True), Bracketed( OneOf("SHUFFLE", "NOSHUFFLE"), bracket_type="square", optional=True ), Ref("IfNotExistsGrammar", optional=True), Ref("SelectableGrammar"), ), Sequence( "INTO", Ref.keyword("TABLE", optional=True), Ref("TableReferenceSegment"), Sequence( Bracketed(Delimited(Sequence(Ref("ColumnReferenceSegment")))), optional=True, ), Ref("PartitionSpecGrammar", optional=True), Bracketed( OneOf("SHUFFLE", "NOSHUFFLE"), bracket_type="square", optional=True ), OneOf( Ref("SelectableGrammar"), Ref("ValuesClauseSegment"), ), ), ), ) sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_impala_keywords.py000066400000000000000000000210551503426445100250030ustar00rootroot00000000000000"""A list of Impala keywords. https://impala.apache.org/docs/build/html/topics/impala_reserved_words.html Impala docs recommend respecting Hive keywords, so both lists include Hive's as well. """ RESERVED_KEYWORDS = [ "ADD", "AGGREGATE", "ALL", "ALLOCATE", "ALTER", "ANALYTIC", "AND", "ANTI", "ANY", "API_VERSION", "ARE", "ARRAY", "ARRAY_AGG", "ARRAY_MAX_CARDINALITY", "AS", "ASC", "ASENSITIVE", "ASYMMETRIC", "AT", "ATOMIC", "AUTHORIZATION", "AVRO", "BEGIN_FRAME", "BEGIN_PARTITION", "BETWEEN", "BIGINT", "BINARY", "BLOB", "BLOCK_SIZE", "BOOLEAN", "BOTH", "BUCKETS", "BY", "CACHED", "CALLED", "CARDINALITY", "CASCADE", "CASCADED", "CASE", "CAST", "CHANGE", "CHAR", "CHARACTER", "CLASS", "CLOB", "CLOSE_FN", "COLLATE", "COLLECT", "COLUMN", "COLUMNS", "COMMENT", "COMMIT", "COMPRESSION", "COMPUTE", "CONDITION", "CONNECT", "CONSTRAINT", "CONTAINS", "CONVERT", "COPY", "CORR", "CORRESPONDING", "COVAR_POP", "COVAR_SAMP", "CREATE", "CROSS", "CUBE", "CURRENT", "CURRENT_DATE", "CURRENT_DEFAULT_TRANSFORM_GROUP", "CURRENT_PATH", "CURRENT_ROLE", "CURRENT_ROW", "CURRENT_SCHEMA", "CURRENT_TIME", "CURRENT_TRANSFORM_GROUP_FOR_TYPE", "CURSOR", "CYCLE", "DATA", "DATABASE", "DATABASES", "DATE", "DATETIME", "DEALLOCATE", "DEC", "DECFLOAT", "DECIMAL", "DECLARE", "DEFAULT", "DEFINE", "DELETE", "DELIMITED", "DEREF", "DESC", "DESCRIBE", "DETERMINISTIC", "DISABLE", "DISCONNECT", "DISTINCT", "DIV", "DOUBLE", "DROP", "DYNAMIC", "EACH", "ELEMENT", "ELSE", "EMPTY", "ENABLE", "ENCODING", "END", "END-EXEC", "END_FRAME", "END_PARTITION", "EQUALS", "ESCAPE", "ESCAPED", "EVERY", "EXCEPT", "EXEC", "EXECUTE", "EXISTS", "EXPLAIN", "EXTENDED", "EXTERNAL", "FALSE", "FETCH", "FIELDS", "FILEFORMAT", "FILES", "FILTER", "FINALIZE_FN", "FIRST", "FLOAT", "FOLLOWING", "FOR", "FOREIGN", "FORMAT", "FORMATTED", "FRAME_ROW", "FREE", "FROM", "FULL", "FUNCTION", "FUNCTIONS", "FUSION", "GET", "GLOBAL", "GRANT", "GROUP", "GROUPING", "GROUPS", "HASH", "HAVING", "HOLD", "HUDIPARQUET", "ICEBERG", "IF", "IGNORE", "ILIKE", "IN", "INCREMENTAL", "INDICATOR", "INIT_FN", "INITIAL", "INNER", "INOUT", "INPATH", "INSENSITIVE", "INSERT", "INT", "INTEGER", "INTERMEDIATE", "INTERSECT", "INTERSECTION", "INTERVAL", "INTO", "INVALIDATE", "IREGEXP", "IS", "JOIN", "JSONFILE", "JSON_ARRAY", "JSON_ARRAYAGG", "JSON_EXISTS", "JSON_OBJECT", "JSON_OBJECTAGG", "JSON_QUERY", "JSON_TABLE", "JSON_TABLE_PRIMITIVE", "JSON_VALUE", "KUDU", "LARGE", "LAST", "LATERAL", "LEADING", "LEFT", "LEXICAL", "LIKE", "LIKE_REGEX", "LIMIT", "LINES", "LISTAGG", "LOAD", "LOCAL", "LOCALTIMESTAMP", "LOCATION", "LOG10", "MAP", "MANAGEDLOCATION", "MATCH", "MATCH_NUMBER", "MATCH_RECOGNIZE", "MATCHES", "MERGE", "MERGE_FN", "METADATA", "METHOD", "MINUS", "MODIFIES", "MULTISET", "NATIONAL", "NATURAL", "NCHAR", "NCLOB", "NO", "NON", "NONE", "NORELY", "NORMALIZE", "NOT", "NOVALIDATE", "NTH_VALUE", "NULL", "NULLS", "NUMERIC", "OCCURRENCES_REGEX", "OCTET_LENGTH", "OF", "OFFSET", "OMIT", "ON", "ONE", "ONLY", "OPTIMIZE", "OR", "ORC", "ORDER", "OUT", "OUTER", "OVER", "OVERLAPS", "OVERLAY", "OVERWRITE", "PARQUET", "PARQUETFILE", "PARTITION", "PARTITIONED", "PARTITIONS", "PATTERN", "PER", "PERCENT", "PERCENTILE_CONT", "PERCENTILE_DISC", "PORTION", "POSITION", "POSITION_REGEX", "PRECEDES", "PRECEDING", "PREPARE", "PREPARE_FN", "PRIMARY", "PROCEDURE", "PRODUCED", "PTF", "PURGE", "RANGE", "RCFILE", "READS", "REAL", "RECOVER", "RECURSIVE", "REF", "REFERENCES", "REFERENCING", "REFRESH", "REGEXP", "REGR_AVGX", "REGR_AVGY", "REGR_COUNT", "REGR_INTERCEPT", "REGR_R2", "REGR_SLOPE", "REGR_SXX", "REGR_SXY", "REGR_SYY", "RELEASE", "RELY", "RENAME", "REPEATABLE", "REPLACE", "REPLICATION", "RESTRICT", "RETURNS", "REVOKE", "RIGHT", "RLIKE", "ROLE", "ROLES", "ROLLBACK", "ROLLUP", "ROW", "ROWS", "RUNNING", "RWSTORAGE", "SAVEPOINT", "SCHEMA", "SCHEMAS", "SCOPE", "SCROLL", "SEARCH", "SEEK", "SELECT", "SELECTIVITY", "SEMI", "SENSITIVE", "SEQUENCEFILE", "SERDEPROPERTIES", "SERIALIZE_FN", "SET", "SETS", "SHOW", "SIMILAR", "SKIP", "SMALLINT", "SOME", "SORT", "SPEC", "SPECIFIC", "SPECIFICTYPE", "SQLEXCEPTION", "SQLSTATE", "SQLWARNING", "STATIC", "STATS", "STORAGEHANDLER_URI", "STORED", "STRAIGHT_JOIN", "STRING", "STRUCT", "SUBMULTISET", "SUBSET", "SUBSTRING_REGEX", "SUCCEEDS", "SYMBOL", "SYMMETRIC", "SYSTEM_TIME", "SYSTEM_USER", "SYSTEM_VERSION", "TABLE", "TABLES", "TABLESAMPLE", "TBLPROPERTIES", "TERMINATED", "TEXTFILE", "THEN", "TIMESTAMP", "TIMEZONE_HOUR", "TIMEZONE_MINUTE", "TINYINT", "TO", "TRAILING", "TRANSLATE_REGEX", "TRANSLATION", "TREAT", "TRIGGER", "TRIM_ARRAY", "TRUE", "TRUNCATE", "UESCAPE", "UNBOUNDED", "UNCACHED", "UNION", "UNIQUE", "UNKNOWN", "UNNEST", "UNSET", "UPDATE", "UPDATE_FN", "UPSERT", "USE", "USER_DEFINED_FN", "USING", "VALIDATE", "VALUE_OF", "VALUES", "VARBINARY", "VARCHAR", "VARYING", "VERSIONING", "VIEW", "WHEN", "WHENEVER", "WHERE", "WIDTH_BUCKET", "WINDOW", "WITH", "WITHIN", "WITHOUT", "ZORDER", "CONF", "CURRENT_TIMESTAMP", "EXCHANGE", "IMPORT", "LESS", "MACRO", "MORE", "PARTIALSCAN", "PRESERVE", "REDUCE", "TRANSFORM", "UNIQUEJOIN", "USER", "UTC_TMESTAMP", "START", "CACHE", "DAYOFWEEK", "EXTRACT", "FLOOR", "PRECISION", "VIEWS", "TIME", "SYNC", ] UNRESERVED_KEYWORDS = [ # Impala-specific "SHUFFLE", "NOSHUFFLE", # Hive unreserved keywords "ADMIN", "AFTER", "ANALYZE", "ARCHIVE", "BEFORE", "BERNOULLI", "BUCKET", "CLUSTER", "CLUSTERED", "CLUSTERSTATUS", "COLLECTION", "COMPACT", "COMPACTIONS", "CONCATENATE", "CONTINUE", "DAY", "DBPROPERTIES", "DEFERRED", "DEFINED", "DEPENDENCY", "DIRECTORIES", "DIRECTORY", "DISTRIBUTE", "ELEM_TYPE", "EXCLUSIVE", "EXPORT", "FILE", "HOLD_DDLTIME", "HOUR", "IDXPROPERTIES", "INDEX", "INDEXES", "INPUTDRIVER", "INPUTFORMAT", "ITEMS", "JAR", "KEYS", "KEY_TYPE", "LOCK", "LOCKS", "LOGICAL", "LONG", "MAPJOIN", "MATERIALIZED", "MINUTE", "MONTH", "MSCK", "NOSCAN", "NO_DROP", "OFFLINE", "OPTION", "OUTPUTDRIVER", "OUTPUTFORMAT", "OWNER", "PLUS", "PRETTY", "PRINCIPALS", "PROTECTION", "READ", "READONLY", "REBUILD", "RECORDREADER", "RECORDWRITER", "RELOAD", "REPAIR", "REWRITE", "SECOND", "SERDE", "SERVER", "SHARED", "SHOW_DATABASE", "SKEWED", "SORTED", "SSL", "STATISTICS", "STREAMTABLE", "SYSTEM", "TEMPORARY", "TOUCH", "TRANSACTIONS", "UNARCHIVE", "UNDO", "UNIONTYPE", "UNLOCK", "UNSIGNED", "URI", "UTC", "UTCTIMESTAMP", "VALUE_TYPE", "WHILE", "YEAR", "AUTOCOMMIT", "ISOLATION", "LEVEL", "SNAPSHOT", "TRANSACTION", "WORK", "WRITE", "ABORT", "KEY", "DETAIL", "DOW", "EXPRESSION", "OPERATOR", "QUARTER", "SUMMARY", "VECTORIZATION", "WEEK", "YEARS", "MONTHS", "WEEKS", "DAYS", "HOURS", "MINUTES", "SECONDS", "TIMESTAMPTZ", "ZONE", ] sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_mariadb.py000066400000000000000000000520351503426445100232120ustar00rootroot00000000000000"""MariaDB Dialect. https://mariadb.com/kb/en/sql-statements-structure/ """ from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( AnyNumberOf, BaseSegment, Bracketed, Dedent, Delimited, Indent, Matchable, OneOf, OptionallyBracketed, ParseMode, Ref, Sequence, ) from sqlfluff.dialects import dialect_mysql as mysql from sqlfluff.dialects.dialect_mariadb_keywords import ( mariadb_reserved_keywords, mariadb_unreserved_keywords, ) # ansi_dialect = load_raw_dialect("ansi") mysql_dialect = load_raw_dialect("mysql") mariadb_dialect = mysql_dialect.copy_as( "mariadb", formatted_name="MariaDB", docstring="""**Default Casing**: ``lowercase`` **Quotes**: String Literals: ``''``, ``""`` or ``@``, Identifiers: |back_quotes|. The dialect for `MariaDB `_.""", ) mariadb_dialect.update_keywords_set_from_multiline_string( "unreserved_keywords", mariadb_unreserved_keywords ) mariadb_dialect.sets("reserved_keywords").clear() mariadb_dialect.update_keywords_set_from_multiline_string( "reserved_keywords", mariadb_reserved_keywords ) class ColumnConstraintSegment(mysql.ColumnConstraintSegment): """A column option; each CREATE TABLE column can have 0 or more.""" match_grammar: Matchable = OneOf( mysql.ColumnConstraintSegment.match_grammar, Sequence( Sequence("GENERATED", "ALWAYS", optional=True), "AS", Bracketed(Ref("ExpressionSegment")), OneOf("PERSISTENT", "STORED", "VIRTUAL", optional=True), ), ) class CreateUserStatementSegment(mysql.CreateUserStatementSegment): """`CREATE USER` statement. https://mariadb.com/kb/en/create-user/ """ match_grammar = mysql.CreateUserStatementSegment.match_grammar.copy( insert=[Ref("OrReplaceGrammar", optional=True)], before=Ref.keyword("USER"), ) class CreateTableStatementSegment(mysql.CreateTableStatementSegment): """`CREATE TABLE` segment. https://mariadb.com/kb/en/create-table/ """ ES = Ref("ExpressionSegment") CRS = Ref("ColumnReferenceSegment") NLS = Ref("NumericLiteralSegment") ORS = Ref("ObjectReferenceSegment") TRS = Ref("TableReferenceSegment") SQIS = Ref("SingleQuotedIdentifierSegment") match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Ref("TemporaryTransientGrammar", optional=True), "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), OneOf( # Columns and comment syntax: Sequence( Bracketed( Delimited( OneOf( Ref("TableConstraintSegment"), Ref("ColumnDefinitionSegment"), ), ) ), Ref("CommentClauseSegment", optional=True), Sequence( Ref.keyword("AS", optional=True), OptionallyBracketed(Ref("SelectableGrammar")), optional=True, ), ), # Create AS syntax: Sequence( Ref.keyword("AS", optional=True), OptionallyBracketed(Ref("SelectableGrammar")), ), # Create like syntax Sequence("LIKE", Ref("TableReferenceSegment")), ), Ref("TableEndClauseSegment", optional=True), AnyNumberOf( Sequence( Ref.keyword("DEFAULT", optional=True), OneOf( Ref("ParameterNameSegment"), Sequence("CHARACTER", "SET"), Sequence(OneOf("DATA", "INDEX"), "DIRECTORY"), Sequence("WITH", "SYSTEM"), ), Ref("EqualsSegment", optional=True), OneOf( Ref("LiteralGrammar"), Ref("ParameterNameSegment"), Ref("QuotedLiteralSegment"), Ref("SingleQuotedIdentifierSegment"), Ref("NumericLiteralSegment"), # Union option Bracketed( Delimited(Ref("TableReferenceSegment")), ), ), ), # Partition Options # https://dev.mysql.com/doc/refman/8.0/en/create-table.html#create-table-partitioning Sequence( "PARTITION", "BY", OneOf( Sequence( Ref.keyword("LINEAR", optional=True), OneOf( Sequence("HASH", Ref("ExpressionSegment")), Sequence( "KEY", Sequence( "ALGORITHM", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), optional=True, ), Delimited(Ref("ColumnReferenceSegment")), ), ), ), Sequence( OneOf("RANGE", "LIST"), OneOf( Ref("ExpressionSegment"), Bracketed(Delimited(Ref("ColumnReferenceSegment"))), ), ), ), Sequence("PARTITIONS", Ref("NumericLiteralSegment"), optional=True), Sequence( "SUBPARTITION", "BY", Sequence( Ref.keyword("LINEAR", optional=True), OneOf( Sequence("HASH", Ref("ExpressionSegment")), Sequence( "KEY", Sequence( "ALGORITHM", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), optional=True, ), Bracketed(Ref("ColumnReferenceSegment")), ), ), ), Sequence( "SUBPARTITIONS", Ref("NumericLiteralSegment"), optional=True, ), optional=True, ), # optional partition_definition(s) AnyNumberOf( Bracketed( Delimited( Sequence( "PARTITION", Ref("ColumnReferenceSegment"), AnyNumberOf( Sequence( "VALUES", OneOf( Sequence( "LESS", "THAN", OneOf( "MAXVALUE", Bracketed( OneOf( ES, CRS, NLS, Ref("LiteralGrammar"), ), ), ), ), Sequence( "IN", Bracketed( Ref("ObjectReferenceSegment") ), ), ), ), Sequence( OneOf( Ref("ParameterNameSegment"), Sequence("CHARACTER", "SET"), Sequence( OneOf("DATA", "INDEX"), "DIRECTORY", ), Sequence("WITH", "SYSTEM"), ), Ref("EqualsSegment", optional=True), OneOf( Ref("LiteralGrammar"), Ref("ParameterNameSegment"), Ref("QuotedLiteralSegment"), Ref("SingleQuotedIdentifierSegment"), Ref("NumericLiteralSegment"), # Union option Bracketed( Delimited(Ref("TableReferenceSegment")), ), ), ), # optional subpartition_definition(s) Sequence( Ref.keyword("SUBPARTITION", optional=True), Ref("LiteralGrammar"), AnyNumberOf( Sequence( "VALUES", OneOf( Sequence( "LESS", "THAN", OneOf( "MAXVALUE", Bracketed(ES), Bracketed(CRS), ), ), Sequence( "IN", Bracketed(ORS), ), ), ), Sequence( OneOf( Ref("ParameterNameSegment"), Sequence("CHARACTER", "SET"), Sequence( OneOf("DATA", "INDEX"), "DIRECTORY", ), Sequence("WITH", "SYSTEM"), ), Ref( "EqualsSegment", optional=True, ), OneOf( Ref("LiteralGrammar"), Ref("ParameterNameSegment"), Ref("QuotedLiteralSegment"), SQIS, Ref("NumericLiteralSegment"), # Union option Bracketed( Delimited(TRS), ), ), ), ), ), ), ), ), ), ), ), ), ) class DeleteStatementSegment(BaseSegment): """A `DELETE` statement. https://mariadb.com/kb/en/delete/ """ type = "delete_statement" match_grammar = Sequence( "DELETE", Ref.keyword("LOW_PRIORITY", optional=True), Ref.keyword("QUICK", optional=True), Ref.keyword("IGNORE", optional=True), OneOf( Sequence( "FROM", Delimited( Ref("DeleteTargetTableSegment"), terminators=["USING"], ), Ref("DeleteUsingClauseSegment"), Ref("WhereClauseSegment", optional=True), ), Sequence( Delimited( Ref("DeleteTargetTableSegment"), terminators=["FROM"], ), Ref("FromClauseSegment"), Ref("WhereClauseSegment", optional=True), ), Sequence( Ref("FromClauseSegment"), Ref("SelectPartitionClauseSegment", optional=True), Ref("WhereClauseSegment", optional=True), Ref("OrderByClauseSegment", optional=True), Ref("LimitClauseSegment", optional=True), Ref("ReturningClauseSegment", optional=True), ), ), ) class FlushStatementSegment(mysql.FlushStatementSegment): """A `Flush` statement. https://mariadb.com/kb/en/flush/ """ match_grammar: Matchable = Sequence( "FLUSH", OneOf( "NO_WRITE_TO_BINLOG", "LOCAL", optional=True, ), OneOf( Delimited( Sequence("BINARY", "LOGS"), Sequence("ENGINE", "LOGS"), Sequence("ERROR", "LOGS"), Sequence("GENERAL", "LOGS"), Sequence("QUERY", "CACHE"), Sequence("SLOW", "LOGS"), Sequence(Ref.keyword("RESET", optional=True), "MASTER"), Sequence(OneOf("GLOBAL", "SESSION", optional=True), "STATUS"), Sequence( "RELAY", "LOGS", Sequence("FOR", "CHANNEL", optional=True), Ref("ObjectReferenceSegment"), ), "HOSTS", "LOGS", "PRIVILEGES", "CHANGED_PAGE_BITMAPS", "CLIENT_STATISTICS", "DES_KEY_FILE", "INDEX_STATISTICS", "QUERY_RESPONSE_TIME", "SLAVE", "SSL", "TABLE_STATISTICS", "USER_STATISTICS", "USER_VARIABLES", "USER_RESOURCES", ), Sequence( "TABLES", Sequence( Delimited(Ref("TableReferenceSegment"), terminators=["WITH"]), optional=True, ), Sequence( "WITH", "READ", "LOCK", Sequence("AND", "DISABLE", "CHECKPOINT", optional=True), optional=True, ), ), Sequence( "TABLES", Sequence( Delimited(Ref("TableReferenceSegment"), terminators=["FOR"]), optional=False, ), Sequence("FOR", "EXPORT", optional=True), ), ), ) class GroupByClauseSegment(BaseSegment): """A `GROUP BY` clause like in `SELECT`.""" type = "groupby_clause" match_grammar: Matchable = Sequence( "GROUP", "BY", Indent, Sequence( Delimited( Sequence( OneOf( Ref("ColumnReferenceSegment"), # Can `GROUP BY 1` Ref("NumericLiteralSegment"), # Can `GROUP BY coalesce(col, 1)` Ref("ExpressionSegment"), ), OneOf("ASC", "DESC", optional=True), ), terminators=[Ref("GroupByClauseTerminatorGrammar")], ), ), Ref("WithRollupClauseSegment", optional=True), Dedent, ) class InsertStatementSegment(BaseSegment): """An `INSERT` statement. https://mariadb.com/kb/en/insert/ """ type = "insert_statement" match_grammar = Sequence( "INSERT", OneOf( "LOW_PRIORITY", "DELAYED", "HIGH_PRIORITY", optional=True, ), Ref.keyword("IGNORE", optional=True), Ref.keyword("INTO", optional=True), Ref("TableReferenceSegment"), Sequence( "PARTITION", Bracketed( Ref("SingleIdentifierListSegment"), ), optional=True, ), Ref("BracketedColumnReferenceListGrammar", optional=True), OneOf( Ref("ValuesClauseSegment"), Ref("SetClauseListSegment"), Ref( "SelectableGrammar", terminators=[Ref("ReturningClauseSegment")], ), optional=True, ), Ref("InsertRowAliasSegment", optional=True), Ref("UpsertClauseListSegment", optional=True), Ref("ReturningClauseSegment", optional=True), ) class ReplaceSegment(BaseSegment): """A `REPLACE` statement. As per https://mariadb.com/kb/en/replace/ """ type = "replace_statement" match_grammar = Sequence( "REPLACE", OneOf("LOW_PRIORITY", "DELAYED", optional=True), Sequence("INTO", optional=True), Ref("TableReferenceSegment"), Ref("SelectPartitionClauseSegment", optional=True), OneOf( Sequence( Ref("BracketedColumnReferenceListGrammar", optional=True), Ref("ValuesClauseSegment"), ), Ref("SetClauseListSegment"), Sequence( Ref("BracketedColumnReferenceListGrammar", optional=True), Ref("SelectStatementSegment"), ), ), Ref("ReturningClauseSegment", optional=True), ) class ReturningClauseSegment(BaseSegment): """This is a `RETURNING` clause. A RETURNING clause returns values modified by a INSERT, DELETE or REPLACE query. https://mariadb.com/kb/en/insert/ https://mariadb.com/kb/en/delete/ https://mariadb.com/kb/en/replace/ """ type = "returning_clause" match_grammar: Matchable = Sequence( "RETURNING", Indent, Delimited( Ref("SelectClauseElementSegment"), allow_trailing=True, ), Dedent, terminators=[Ref("SelectClauseTerminatorGrammar")], parse_mode=ParseMode.GREEDY_ONCE_STARTED, ) class SelectStatementSegment(mysql.SelectStatementSegment): """A `SELECT` statement. https://mariadb.com/kb/en/select/ """ # Inherit most of the parse grammar from the original. match_grammar = mysql.UnorderedSelectStatementSegment.match_grammar.copy( insert=[ Ref("OrderByClauseSegment", optional=True), Ref("LimitClauseSegment", optional=True), Ref("NamedWindowSegment", optional=True), Ref("IntoClauseSegment", optional=True), ], terminators=[ Ref("SetOperatorSegment"), Ref("UpsertClauseListSegment"), Ref("WithCheckOptionSegment"), Ref("ReturningClauseSegment"), ], # Overwrite the terminators, because we want to remove some from the # expression above. replace_terminators=True, ) class WithRollupClauseSegment(BaseSegment): """A `WITH ROLLUP` clause after the `GROUP BY` clause.""" type = "with_rollup_clause" match_grammar = Sequence( "WITH", "ROLLUP", ) sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_mariadb_keywords.py000066400000000000000000000146401503426445100251410ustar00rootroot00000000000000"""A List of MariaDB SQL keywords. The full list can be queried from MariaDB directly https://mariadb.com/kb/en/information-schema-keywords-table/ The reserved keywords are listed on https://mariadb.com/kb/en/reserved-words/ """ mariadb_reserved_keywords = """ ACCESSIBLE ADD ALL ALTER ANALYZE AND AS ASC ASENSITIVE BEFORE BETWEEN BIGINT BINARY BLOB BOTH BY CALL CASCADE CASE CHANGE CHANGED_PAGE_BITMAPS CHAR CHARACTER CHECK CLIENT_STATISTICS COLLATE COLUMN CONDITION CONSTRAINT CONTINUE CONVERT CREATE CROSS CURRENT_DATE CURRENT_ROLE CURRENT_TIME CURRENT_TIMESTAMP CURRENT_USER CURSOR DATABASE DATABASES DAY_HOUR DAY_MICROSECOND DAY_MINUTE DAY_SECOND DEC DECIMAL DECLARE DEFAULT DELAYED DELETE DELETE_DOMAIN_ID DESC DESCRIBE DETERMINISTIC DISTINCT DISTINCTROW DIV DO_DOMAIN_IDS DOUBLE DROP DUAL EACH ELSE ELSEIF ENCLOSED ESCAPED EXCEPT EXISTS EXIT EXPLAIN FALSE FETCH FLOAT FLOAT4 FLOAT8 FOR FORCE FOREIGN FROM FULLTEXT GENERAL GRANT GROUP HAVING HIGH_PRIORITY HOUR_MICROSECOND HOUR_MINUTE HOUR_SECOND IF IGNORE IGNORE_DOMAIN_IDS IGNORE_SERVER_IDS IN INDEX INDEX_STATISTICS INFILE INNER INOUT INSENSITIVE INSERT INT INT1 INT2 INT3 INT4 INT8 INTEGER INTERSECT INTERVAL INTO IS ITERATE JOIN KEY KEYS KILL LEADING LEAVE LEFT LIKE LIMIT LINEAR LINES LOAD LOCALTIME LOCALTIMESTAMP LOCK LONG LONGBLOB LONGTEXT LOOP LOW_PRIORITY MASTER_HEARTBEAT_PERIOD MASTER_SSL_VERIFY_SERVER_CERT MATCH MAXVALUE MEDIUMBLOB MEDIUMINT MEDIUMTEXT MIDDLEINT MINUTE_MICROSECOND MINUTE_SECOND MOD MODIFIES NATURAL NOT NO_WRITE_TO_BINLOG NULL NUMERIC OFFSET ON OPTIMIZE OPTION OPTIONALLY OR ORDER OUT OUTER OUTFILE OVER PAGE_CHECKSUM PARSE_VCOL_EXPR PARTITION PRECISION PRIMARY PROCEDURE PURGE RANGE READ READS READ_WRITE REAL RECURSIVE REF_SYSTEM_ID REFERENCES REGEXP RELEASE RENAME REPEAT REPLACE REQUIRE RESIGNAL RESTRICT RETURN RETURNING REVOKE RIGHT RLIKE ROW_NUMBER ROWS SCHEMA SCHEMAS SECOND_MICROSECOND SELECT SENSITIVE SEPARATOR SET SHOW SIGNAL SLOW SMALLINT SPATIAL SPECIFIC SQL SQLEXCEPTION SQLSTATE SQLWARNING SQL_BIG_RESULT SQL_CALC_FOUND_ROWS SQL_SMALL_RESULT SSL STARTING STATS_AUTO_RECALC STATS_PERSISTENT STATS_SAMPLE_PAGES STRAIGHT_JOIN TABLE TERMINATED THEN TINYBLOB TINYINT TINYTEXT TO TRAILING TRIGGER TRUE UNDO UNION UNIQUE UNLOCK UNSIGNED UPDATE USAGE USE USING UTC_DATE UTC_TIME UTC_TIMESTAMP VALUES VARBINARY VARCHAR VARCHARACTER VARYING WHEN WHERE WHILE WINDOW WITH WRITE XOR YEAR_MONTH ZEROFILL """ mariadb_unreserved_keywords = """ ACCOUNT ACTION ADMIN AFTER AGAINST AGGREGATE ALGORITHM ALWAYS ANY ASCII AT ATOMIC AUTHORS AUTO AUTO_INCREMENT AUTOEXTEND_SIZE AVG AVG_ROW_LENGTH BACKUP BEGIN BINLOG BIT BLOCK BODY BOOL BOOLEAN BTREE BYTE CACHE CASCADED CATALOG_NAME CHAIN CHANGED CHANNEL CHARSET CHECKPOINT CHECKSUM CIPHER CLASS_ORIGIN CLIENT CLOB CLOSE COALESCE CODE COLLATION COLUMN_NAME COLUMNS COLUMN_ADD COLUMN_CHECK COLUMN_CREATE COLUMN_DELETE COLUMN_GET COMMENT COMMIT COMMITTED COMPACT COMPLETION COMPRESSED CONCURRENT CONNECTION CONSISTENT CONSTRAINT_CATALOG CONSTRAINT_NAME CONSTRAINT_SCHEMA CONTAINS CONTEXT CONTRIBUTORS CPU CUBE CURRENT CURRENT_POS CURSOR_NAME CYCLE DATA DATAFILE DATE DATETIME DAY DEALLOCATE DEFINER DELAY_KEY_WRITE DES_KEY_FILE DIAGNOSTICS DIRECTORY DISABLE DISCARD DISK DO DUMPFILE DUPLICATE DYNAMIC ELSIF EMPTY ENABLE END ENDS ENGINE ENGINES ENUM ERROR ERRORS ESCAPE EVENT EVENTS EVERY EXAMINED EXCHANGE EXCLUDE EXECUTE EXCEPTION EXPANSION EXPIRE EXPORT EXTENDED EXTENT_SIZE FAST FAULTS FEDERATED FIELDS FILE FIRST FIXED FLUSH FOLLOWING FOLLOWS FORMAT FOUND FULL FUNCTION GENERATED GET_FORMAT GET GLOBAL GOTO GRANTS HANDLER HARD HASH HELP HISTORY HOST HOSTS HOUR ID IDENTIFIED IGNORED IMMEDIATE IMPORT INCREMENT INDEXES INITIAL_SIZE INSERT_METHOD INSTALL INVISIBLE IO IO_THREAD IPC ISOLATION ISOPEN ISSUER INVOKER JSON JSON_TABLE KEY_BLOCK_SIZE LANGUAGE LAST LAST_VALUE LASTVAL LEAVES LESS LEVEL LIST LOCAL LOCKED LOCKS LOGFILE LOGS MASTER MASTER_CONNECT_RETRY MASTER_DELAY MASTER_GTID_POS MASTER_HOST MASTER_LOG_FILE MASTER_LOG_POS MASTER_PASSWORD MASTER_PORT MASTER_SERVER_ID MASTER_SSL MASTER_SSL_CA MASTER_SSL_CAPATH MASTER_SSL_CERT MASTER_SSL_CIPHER MASTER_SSL_CRL MASTER_SSL_CRLPATH MASTER_SSL_KEY MASTER_USER MASTER_USE_GTID MASTER_DEMOTE_TO_SLAVE MAX_CONNECTIONS_PER_HOUR MAX_QUERIES_PER_HOUR MAX_ROWS MAX_SIZE MAX_STATEMENT_TIME MAX_UPDATES_PER_HOUR MAX_USER_CONNECTIONS MEDIUM MEMORY MERGE MESSAGE_TEXT MICROSECOND MIGRATE MINUS MINUTE MINVALUE MIN_ROWS MODE MODIFY MONITOR MONTH MUTEX MYSQL MYSQL_ERRNO NAME NAMES NATIONAL NCHAR NESTED NEVER NEXT NEXTVAL NO NOMAXVALUE NOMINVALUE NOCACHE NOCYCLE NO_WAIT NOWAIT NODEGROUP NONE NOTFOUND NUMBER NVARCHAR OF OLD_PASSWORD ONE ONLINE ONLY OPEN OPTIONS ORDINALITY OTHERS OVERLAPS OWNER PACKAGE PACK_KEYS PAGE PARSER PATH PERIOD PARTIAL PARTITIONING PARTITIONS PASSWORD PERSISTENT PHASE PLUGIN PLUGINS PORT PORTION PRECEDES PRECEDING PREPARE PRESERVE PREV PREVIOUS PRIVILEGES PROCESS PROCESSLIST PROFILE PROFILES PROXY QUARTER QUERY QUERY_RESPONSE_TIME QUICK RAISE RAW READ_ONLY REBUILD RECOVER REDO_BUFFER_SIZE REDOFILE REDUNDANT RELAY RELAYLOG RELAY_LOG_FILE RELAY_LOG_POS RELAY_THREAD RELOAD REMOVE REORGANIZE REPAIR REPEATABLE REPLAY REPLICA REPLICAS REPLICA_POS REPLICATION RESET RESTART RESTORE RESUME RETURNED_SQLSTATE RETURNS REUSE REVERSE ROLE ROLLBACK ROLLUP ROUTINE ROW ROWCOUNT ROWNUM ROWTYPE ROW_COUNT ROW_FORMAT RTREE SAVEPOINT SCHEDULE SCHEMA_NAME SECOND SECURITY SEQUENCE SERIAL SERIALIZABLE SESSION SERVER SETVAL SHARE SHUTDOWN SIGNED SIMPLE SKIP SLAVE SLAVES SLAVE_POS SNAPSHOT SOCKET SOFT SOME SONAME SOUNDS SOURCE STAGE STORED SQL_AFTER_GTIDS SQL_BEFORE_GTIDS SQL_BUFFER_RESULT SQL_CACHE SQL_NO_CACHE SQL_THREAD SQL_TSI_SECOND SQL_TSI_MINUTE SQL_TSI_HOUR SQL_TSI_DAY SQL_TSI_WEEK SQL_TSI_MONTH SQL_TSI_QUARTER SQL_TSI_YEAR START STARTS STATEMENT STATUS STOP STORAGE STRING SUBCLASS_ORIGIN SUBJECT SUBPARTITION SUBPARTITIONS SUPER SUSPEND SWAPS SWITCHES SYSDATE SYSTEM SYSTEM_TIME TABLE_NAME TABLES TABLESPACE TABLE_CHECKSUM TABLE_STATISTICS TEMPORARY TEMPTABLE TEXT THAN TIES TIME TIMESTAMP TIMESTAMPADD TIMESTAMPDIFF TRANSACTION TRANSACTIONAL THREADS TRIGGERS TRUNCATE TYPE UNBOUNDED UNCOMMITTED UNDEFINED UNDO_BUFFER_SIZE UNDOFILE UNICODE UNKNOWN UNINSTALL UNTIL UPGRADE USER USER_RESOURCES USER_STATISTICS USER_VARIABLES USE_FRM VALIDATION VALUE VARCHAR2 VARIABLES VIA VIEW VIRTUAL VISIBLE VERSIONING WAIT WARNINGS WEEK WEIGHT_STRING WITHIN WITHOUT WORK WRAPPER X509 XA XML YEAR """ # These are not MariaDB keywords, but needed to parse well. # Taken from the mysql dialect mariadb_unreserved_keywords += """ NOW SHARED INPLACE NOCOPY INSTANT """ sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_materialize.py000066400000000000000000000724711503426445100241270ustar00rootroot00000000000000"""The Materialize dialect. This is based on postgres dialect, since it was initially based off of Postgres. We should monitor in future and see if it should be rebased off of ANSI """ from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( Anything, BaseSegment, Bracketed, Delimited, KeywordSegment, MultiStringParser, OneOf, Ref, Sequence, ) from sqlfluff.dialects import dialect_ansi as ansi from sqlfluff.dialects.dialect_materialize_keywords import ( materialize_reserved_keywords, materialize_unreserved_keywords, ) postgres_dialect = load_raw_dialect("postgres") materialize_dialect = postgres_dialect.copy_as( "materialize", formatted_name="Materialize", docstring="The dialect for `Materialize `_.", ) materialize_dialect.update_keywords_set_from_multiline_string( "unreserved_keywords", materialize_unreserved_keywords ) materialize_dialect.sets("reserved_keywords").clear() materialize_dialect.update_keywords_set_from_multiline_string( "reserved_keywords", materialize_reserved_keywords ) class StatementSegment(ansi.StatementSegment): """A generic segment, to any of its child subsegments.""" match_grammar = ansi.StatementSegment.match_grammar.copy( insert=[ Ref("AlterOwnerStatementSegment"), Ref("AlterConnectionRotateKeys"), Ref("AlterDefaultPrivilegesStatementSegment"), Ref("AlterIndexStatementSegment"), Ref("AlterRenameStatementSegment"), Ref("AlterSecretStatementSegment"), Ref("AlterSetClusterStatementSegment"), Ref("AlterSourceSinkSizeStatementSegment"), Ref("CloseStatementSegment"), Ref("CopyToStatementSegment"), Ref("CopyFromStatementSegment"), Ref("CreateClusterStatementSegment"), Ref("CreateClusterReplicaStatementSegment"), Ref("CreateConnectionStatementSegment"), Ref("CreateIndexStatementSegment"), Ref("CreateMaterializedViewStatementSegment"), Ref("CreateSecretStatementSegment"), Ref("CreateSinkKafkaStatementSegment"), Ref("CreateSourceKafkaStatementSegment"), Ref("CreateSourceLoadGeneratorStatementSegment"), Ref("CreateSourcePostgresStatementSegment"), Ref("CreateSourceWebhookStatementSegment"), Ref("CreateTypeStatementSegment"), Ref("CreateViewStatementSegment"), Ref("DropStatementSegment"), Ref("FetchStatementSegment"), Ref("GrantStatementSegment"), Ref("MaterializeExplainStatementSegment"), Ref("ShowStatementSegment"), Ref("ShowCreateStatementSegment"), Ref("ShowIndexesStatementSegment"), Ref("ShowMaterializedViewsStatementSegment"), Ref("DeclareStatementSegment"), ], remove=[ Ref("CreateIndexStatementSegment"), Ref("DropIndexStatementSegment"), ], ) materialize_dialect.sets("materialize_sizes").clear() materialize_dialect.sets("materialize_sizes").update( [ "3xsmall", "2xsmall", "xsmall", "small", "medium", "large", "xlarge", "2xlarge", "3xlarge", "4xlarge", "5xlarge", "6xlarge", ], ) materialize_dialect.add( InstanceSizes=OneOf( MultiStringParser( materialize_dialect.sets("materialize_sizes"), KeywordSegment, type="materialize_size", ), MultiStringParser( [ f"'{compression}'" for compression in materialize_dialect.sets("materialize_sizes") ], KeywordSegment, type="compression_type", ), ), InCluster=Sequence( "IN", "CLUSTER", Ref("ObjectReferenceSegment"), ), Privileges=OneOf( "SELECT", "INSERT", "UPDATE", "DELETE", "CREATE", "USAGE", "CREATEROLE", "CREATEDB", "CREATECLUSTER", Sequence("ALL", Ref.keyword("PRIVILEGES", optional=True)), ), ) class AlterOwnerStatementSegment(BaseSegment): """A `ALTER OWNER` statement.""" type = "alter_owner_statement" match_grammar = Sequence( "ALTER", OneOf( "CONNECTION", "CLUSTER", Sequence("CLUSTER", "REPLICA"), "INDEX", "SOURCE", "SINK", "VIEW", Sequence("MATERIALIZED", "VIEW"), "TABLE", "SECRET", ), Ref("ObjectReferenceSegment"), Sequence("OWNER", "TO"), Ref("ObjectReferenceSegment"), ) class AlterConnectionRotateKeys(BaseSegment): """`ALTER CONNECTION` statement.""" type = "alter_connection_rotate_keys" match_grammar = Sequence( "ALTER", "CONNECTION", Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), "ROTATE", "KEYS", ) class AlterDefaultPrivilegesStatementSegment(BaseSegment): """A `ALTER DEFAULT PRIVILEGES` statement.""" type = "alter_default_privileges_statement" match_grammar = Sequence( Sequence("ALTER", "DEFAULT", "PRIVILEGES", "FOR"), OneOf( Sequence( OneOf("ROLE", "USER"), Ref("ObjectReferenceSegment"), ), Sequence("ALL", "ROLES"), ), Sequence( "IN", OneOf("SCHEMA", "DATABASE"), Ref("ObjectReferenceSegment"), optional=True, ), "GRANT", Ref("Privileges"), "ON", OneOf( "TABLES", "TYPES", "SECRETS", "CONNECTIONS", "DATABASES", "SCHEMAS", "CLUSTERS", ), "TO", Ref("ObjectReferenceSegment"), ) class AlterRenameStatementSegment(BaseSegment): """A `ALTER RENAME` statement.""" type = "alter_rename_statement" match_grammar = Sequence( "ALTER", OneOf( "CONNECTION", Sequence("CLUSTER", Ref.keyword("REPLICA", optional=True)), "INDEX", "SOURCE", "SINK", "VIEW", Sequence("MATERIALIZED", "VIEW"), "TABLE", "SECRET", ), Ref("ObjectReferenceSegment"), Sequence("RENAME", "TO"), Ref("ObjectReferenceSegment"), ) class AlterIndexStatementSegment(BaseSegment): """A `ALTER INDEX` statement.""" type = "alter_index_statement" match_grammar = Sequence( "ALTER", "INDEX", Ref("ObjectReferenceSegment"), Sequence("SET", "ENABLED"), ) class AlterSecretStatementSegment(BaseSegment): """A `ALTER SECRET` statement.""" type = "alter_secret_statement" match_grammar = Sequence( "ALTER", "SECRET", Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), "AS", Anything(), ) class AlterSetClusterStatementSegment(BaseSegment): """A `ALTER SET CLUSTER` statement.""" type = "alter_set_cluster_statement" match_grammar = Sequence( Sequence("ALTER", "MATERIALIZED", "VIEW"), Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), Sequence("IN", "CLUSTER"), Ref("ObjectReferenceSegment"), ) class AlterSourceSinkSizeStatementSegment(BaseSegment): """A `ALTER SOURCE/SINK SET SIZE` statement.""" type = "alter_source_sink_size_statement" match_grammar = Sequence( "ALTER", OneOf("SOURCE", "SINK"), Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), "SET", Bracketed( "SIZE", Ref("InstanceSizes"), ), ) class CloseStatementSegment(BaseSegment): """A `CLOSE` statement.""" type = "close_statement" match_grammar = Sequence( "CLOSE", Ref("ObjectReferenceSegment"), ) class CopyToStatementSegment(BaseSegment): """A `COPY TO` statement.""" type = "copy_to_statement" match_grammar = Sequence( "COPY", Bracketed( # SELECT statement or SUBSCRIBE statement OneOf( Ref("SelectStatementSegment"), Sequence( "SUBSCRIBE", Ref("ObjectReferenceSegment"), ), Sequence( "VALUES", Delimited( Anything(), ), ), ), ), "TO", "STDOUT", Sequence( "WITH", Bracketed( Anything(), ), optional=True, ), ) class CopyFromStatementSegment(BaseSegment): """A `COPY FROM` statement.""" type = "copy_from_statement" match_grammar = Sequence( "COPY", Ref("ObjectReferenceSegment"), Bracketed( Anything(), optional=True, ), "FROM", "STDIN", Sequence( Sequence( "WITH", optional=True, ), Bracketed( Anything(), ), optional=True, ), ) class CreateClusterStatementSegment(BaseSegment): """A `CREATE CLUSTER` statement.""" type = "create_cluster_statement" match_grammar = Sequence( "CREATE", "CLUSTER", Ref("ObjectReferenceSegment"), OneOf( Sequence( "REPLICAS", Bracketed( Delimited( Anything(), ) ), optional=True, ), Sequence( Anything(), optional=True, ), ), ) class CreateClusterReplicaStatementSegment(BaseSegment): """A `CREATE CLUSTER REPLICA` statement.""" type = "create_cluster_replica_statement" match_grammar = Sequence( "CREATE", "CLUSTER", "REPLICA", Ref("ObjectReferenceSegment"), Sequence( Anything(), optional=True, ), ) class CreateConnectionStatementSegment(BaseSegment): """A `CREATE CONNECTION` statement.""" type = "create_connection_statement" match_grammar = Sequence( "CREATE", "CONNECTION", Ref("IfNotExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), "TO", OneOf( Sequence( "AWS", "PRIVATELINK", ), Sequence( "CONFLUENT", "SCHEMA", "REGISTRY", ), "KAFKA", "POSTGRES", Sequence( "SSH", "TUNNEL", ), ), Bracketed(Anything()), ) class CreateIndexStatementSegment(BaseSegment): """A `CREATE INDEX` statement.""" type = "create_index_statement" match_grammar = Sequence( "CREATE", OneOf( Sequence( "INDEX", Ref("ObjectReferenceSegment"), Ref("InCluster", optional=True), "ON", Ref("ObjectReferenceSegment"), Sequence( "USING", Anything(), optional=True, ), Bracketed( Delimited( Anything(), ) ), ), Sequence( "DEFAULT", "INDEX", Ref("InCluster", optional=True), "ON", Ref("ObjectReferenceSegment"), Sequence( "USING", Anything(), optional=True, ), ), ), ) class CreateMaterializedViewStatementSegment(BaseSegment): """A `CREATE MATERIALIZED VIEW` statement.""" type = "create_materialized_view_statement" match_grammar = Sequence( "CREATE", OneOf( Sequence( "MATERIALIZED", "VIEW", Ref("IfNotExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), Bracketed( Delimited( Ref("ColumnReferenceSegment"), ), optional=True, ), Ref("InCluster", optional=True), "AS", Anything(), ), Sequence( Ref("OrReplaceGrammar"), "MATERIALIZED", "VIEW", Ref("ObjectReferenceSegment"), Bracketed( Delimited( Ref("ColumnReferenceSegment"), ), optional=True, ), Ref("InCluster", optional=True), "AS", Anything(), ), ), ) class CreateSecretStatementSegment(BaseSegment): """A `CREATE SECRET` statement.""" type = "create_secret_statement" match_grammar = Sequence( "CREATE", "SECRET", Ref("IfNotExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), "AS", Anything(), ) class CreateSinkKafkaStatementSegment(BaseSegment): """A `CREATE SINK KAFKA` statement.""" type = "create_sink_kafka_statement" match_grammar = Sequence( "CREATE", "SINK", Ref("IfNotExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), Ref("InCluster", optional=True), "FROM", Ref("ObjectReferenceSegment"), "INTO", Anything(), Sequence( "KEY", Bracketed( Delimited( Ref("ColumnReferenceSegment"), ) ), optional=True, ), Sequence( "FORMAT", Anything(), optional=True, ), Sequence( "ENVELOPE", OneOf( "DEBEZIUM", "UPSERT", ), optional=True, ), Sequence( "WITH", Bracketed( Delimited( Anything(), ) ), optional=True, ), ) class CreateSourceKafkaStatementSegment(BaseSegment): """A `CREATE SOURCE KAFKA` statement.""" type = "create_source_kafka_statement" match_grammar = Sequence( "CREATE", "SOURCE", Ref("IfNotExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), Ref("InCluster", optional=True), Bracketed( Delimited( Ref("ColumnReferenceSegment"), ), optional=True, ), "FROM", "KAFKA", "CONNECTION", Ref("ObjectReferenceSegment"), Bracketed( Delimited( Anything(), ) ), Sequence( "KEY", "FORMAT", Anything(), "VALUE", "FORMAT", Anything(), optional=True, ), Sequence( "FORMAT", Anything(), optional=True, ), Sequence( "INCLUDE", Delimited( Anything(), ), optional=True, ), Sequence( "ENVELOPE", OneOf( "NONE", "DEBEZIUM", "UPSERT", ), optional=True, ), Sequence( "WITH", Bracketed( Delimited( Anything(), ) ), optional=True, ), ) class CreateSourceLoadGeneratorStatementSegment(BaseSegment): """A `CREATE SOURCE LOAD GENERATOR` statement.""" type = "create_source_load_generator_statement" match_grammar = Sequence( "CREATE", "SOURCE", Ref("IfNotExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), Ref("InCluster", optional=True), Sequence( "FROM", "LOAD", "GENERATOR", ), OneOf( "AUCTION", "COUNTER", "MARKETING", "TPCH", ), Bracketed( Delimited( Anything(), ), optional=True, ), Sequence( "FOR", "ALL", "TABLES", optional=True, ), Sequence( "WITH", Bracketed( Delimited( Anything(), ) ), optional=True, ), ) class CreateSourcePostgresStatementSegment(BaseSegment): """A `CREATE SOURCE POSTGRES` statement.""" type = "create_source_postgres_statement" match_grammar = Sequence( "CREATE", "SOURCE", Ref("IfNotExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), Ref("InCluster", optional=True), Sequence( "FROM", "POSTGRES", "CONNECTION", Ref("ObjectReferenceSegment"), Bracketed( Delimited( Anything(), ) ), optional=True, ), OneOf( Sequence( "FOR", "ALL", "TABLES", ), Sequence( "FOR", "TABLES", Bracketed( Delimited( Anything(), ) ), ), optional=True, ), Sequence( "WITH", Bracketed( Delimited( Anything(), ) ), optional=True, ), ) class CreateSourceWebhookStatementSegment(BaseSegment): """A `CREATE SOURCE WEBHOOK` statement.""" type = "create_source_load_generator_statement" match_grammar = Sequence( "CREATE", "SOURCE", Ref("IfNotExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), Ref("InCluster", optional=True), Sequence( "FROM", "WEBHOOK", "BODY", "FORMAT", ), OneOf( "TEXT", "JSON", "BYTES", ), OneOf( Sequence( "INCLUDE", "HEADER", Sequence( Anything(), optional=True, ), ), Sequence( "INCLUDE", "HEADERS", Bracketed( Delimited( Anything(), ) ), ), optional=True, ), Sequence( "CHECK", Bracketed( "WITH", Bracketed( Delimited( Anything(), ) ), ), optional=True, ), Sequence( Anything(), optional=True, ), ) class CreateTypeStatementSegment(BaseSegment): """A `CREATE TYPE` statement.""" type = "create_type_statement" match_grammar = Sequence( "CREATE", "TYPE", Ref("ObjectReferenceSegment"), OneOf( Sequence( "AS", Bracketed( Delimited( Sequence( Ref("ObjectReferenceSegment"), Ref("DatatypeSegment"), ), ), ), ), Sequence( "AS", OneOf( "LIST", "MAP", ), Bracketed( Delimited( Sequence( Ref("ObjectReferenceSegment"), Ref("EqualsSegment"), Anything(), ) ) ), ), ), ) class CreateViewStatementSegment(BaseSegment): """A `CREATE VIEW` statement.""" type = "create_view_statement" match_grammar = Sequence( "CREATE", OneOf( "TEMP", "TEMPORARY", optional=True, ), "VIEW", Ref("IfNotExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), Bracketed( Delimited( Ref("ColumnReferenceSegment"), ), optional=True, ), "AS", Ref("SelectableGrammar"), ) class DropStatementSegment(BaseSegment): """A `DROP` statement.""" type = "drop_statement" match_grammar = Sequence( "DROP", OneOf( "CONNECTION", "CLUSTER", Sequence( "CLUSTER", "REPLICA", ), "DATABASE", "INDEX", Sequence( "MATERIALIZED", "VIEW", ), "ROLE", "SECRET", "SCHEMA", "SINK", "SOURCE", "TABLE", "TYPE", "VIEW", "USER", ), Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), OneOf( Sequence( "CASCADE", ), Sequence( "RESTRICT", ), optional=True, ), ) class ShowStatementSegment(BaseSegment): """A Materialize `SHOW` statement.""" type = "show_statement" match_grammar = Sequence( "SHOW", OneOf( "COLUMNS", "CONNECTIONS", "CLUSTERS", Sequence("CLUSTER", "REPLICAS"), "DATABASES", "INDEXES", Sequence("MATERIALIZED", "VIEWS"), "SECRETS", "SCHEMAS", "SINKS", "SOURCES", "TABLES", "TYPES", "VIEWS", "OBJECTS", ), Ref("ObjectReferenceSegment", optional=True), # FROM is optional for some object types Sequence( "FROM", Ref("ObjectReferenceSegment"), optional=True, ), # Like or where is optional for some object types OneOf( Sequence( "LIKE", Ref("QuotedLiteralSegment"), ), Sequence( "WHERE", Ref("ExpressionSegment"), ), optional=True, ), ) class ShowCreateStatementSegment(BaseSegment): """A Materialize `SHOW CREATE` statement.""" type = "show_create_statement" match_grammar = Sequence( "SHOW", "CREATE", OneOf( Sequence("CONNECTION", optional=True), Sequence("INDEX", optional=True), Sequence("MATERIALIZED", "VIEW", optional=True), Sequence("SINK", optional=True), Sequence("SOURCE", optional=True), Sequence("TABLE", optional=True), Sequence("VIEW", optional=True), ), Ref("ObjectReferenceSegment"), ) class ShowIndexesStatementSegment(BaseSegment): """A Materialize `SHOW INDEXES` statement.""" type = "show_indexes_statement" match_grammar = Sequence( "SHOW", "INDEXES", Sequence( "ON", Ref("ObjectReferenceSegment"), optional=True, ), Sequence( "FROM", Ref("ObjectReferenceSegment"), optional=True, ), Ref("InCluster", optional=True), OneOf( Sequence( "LIKE", Ref("QuotedLiteralSegment"), ), Sequence( "WHERE", Ref("ExpressionSegment"), ), optional=True, ), ) class ShowMaterializedViewsStatementSegment(BaseSegment): """A Materialize `SHOW MATERIALIZED VIEWS` statement.""" type = "show_materialized_views_statement" match_grammar = Sequence( "SHOW", "MATERIALIZED", "VIEWS", Sequence( "FROM", Ref("ObjectReferenceSegment"), optional=True, ), Ref("InCluster", optional=True), ) class MaterializeExplainStatementSegment(BaseSegment): """A `EXPLAIN` statement.""" type = "explain_statement" match_grammar = Sequence( "EXPLAIN", OneOf( Sequence( OneOf( "RAW", "DECORRELATED", "OPTIMIZED", "PHYSICAL", optional=True, ), "PLAN", optional=True, ), optional=True, ), Sequence( "WITH", Bracketed( Delimited( Anything(), ) ), optional=True, ), Sequence( "AS", OneOf( "TEXT", "JSON", ), optional=True, ), Sequence( "FOR", optional=True, ), OneOf( Ref("SelectableGrammar"), Sequence( "VIEW", Ref("ObjectReferenceSegment"), ), Sequence( "MATERIALIZED", "VIEW", Ref("ObjectReferenceSegment"), ), Sequence( Anything(), ), ), ) class FetchStatementSegment(BaseSegment): """A `FETCH` statement.""" type = "fetch_statement" match_grammar = Sequence( "FETCH", Sequence( "FORWARD", optional=True, ), OneOf( "ALL", Ref("NumericLiteralSegment"), optional=True, ), Sequence( "FROM", optional=True, ), Ref("ObjectReferenceSegment"), Sequence( "WITH", Bracketed( Delimited( Anything(), ) ), optional=True, ), ) class GrantStatementSegment(BaseSegment): """A `GRANT` statement.""" type = "grant_statement" match_grammar = Sequence( "GRANT", Ref("Privileges"), "ON", OneOf( Sequence( OneOf( "TABLE", "TYPE", "SECRET", "CONNECTION", "DATABASE", "SCHEMA", "CLUSTER", optional=True, ), Delimited( Ref("ObjectReferenceSegment"), ), ), "SYSTEM", Sequence( "ALL", OneOf( Sequence( OneOf( "TABLES", "TYPES", "SECRETS", "CONNECTIONS", ), "IN", "SCHEMA", Delimited( Ref("ObjectReferenceSegment"), ), ), Sequence( OneOf("TABLES", "TYPES", "SECRETS", "CONNECTIONS", "SCHEMAS"), "IN", "DATABASE", Delimited( Ref("ObjectReferenceSegment"), ), ), "DATABASES", "SCHEMAS", "CLUSTERS", ), ), ), "TO", Sequence("GROUP", optional=True), Delimited( Ref("ObjectReferenceSegment"), ), ) class DeclareStatementSegment(BaseSegment): """A `DECLARE` statement.""" type = "declare_statement" match_grammar = Sequence( "DECLARE", Ref("ObjectReferenceSegment"), "CURSOR", Sequence( "WITHOUT", "HOLD", optional=True, ), "FOR", OneOf( Ref("SelectableGrammar"), Sequence( "VIEW", Ref("ObjectReferenceSegment"), ), Sequence( "MATERIALIZED", "VIEW", Ref("ObjectReferenceSegment"), ), Sequence( Anything(), ), ), ) sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_materialize_keywords.py000066400000000000000000000053771503426445100260570ustar00rootroot00000000000000"""A list of all Materialize SQL key words. https://materialize.com/docs/sql/identifiers """ materialize_reserved_keywords = """ALL ALTER AND ANY AS BY CAST CHECK CLUSTER CLUSTERS CONNECTION CONNECTIONS CONSTRAINT CREATE CROSS CURRENT DELETE DISTINCT DROP ELSE EXISTS FOLLOWING FOR FROM FULL GROUP ILIKE IN INNER INSERT INTERSECT INTO IS JOIN LATERAL LEFT LIKE NATURAL NOT NULL NULLIF OF ON OR ORDER RETURNING ROW ROWS SELECT SET SINK SINKS TO UNION UNIQUE UPDATE USING VALUES WHEN WHERE WITH """ materialize_unreserved_keywords = """ACCESS ACKS ARN ARRANGEMENT ARRAY ASC AT AUCTION AUTHORITY AVAILABILITY AVRO AWS BEGIN BETWEEN BIGINT BODY BOOLEAN BOTH BPCHAR BROKER BROKERS BUCKET BYTES CASCADE CASE CERTIFICATE CHAIN CHAR CHARACTER CHARACTERISTICS CLIENT CLOSE COALESCE COLLATE COLUMNS COMMIT COMMITTED COMPACTION COMPRESSION COMPUTE CONFLUENT COPY COUNT COUNTER CREATEROLE CREATEDB CREATECLUSTER CSV CURSOR DATABASE DATABASES DATUMS DAY DAYS DEALLOCATE DEBEZIUM DEBUG DEBUGGING DEC DECIMAL DECLARE DECORRELATED DEFAULT DELIMITED DELIMITER DESC DETAILS DISCARD DISCOVER DOT DOUBLE EFFORT ELEMENT ENABLE ENABLED END ENDPOINT ENFORCED ENVELOPE ESCAPE EXCEPT EXECUTE EXPECTED EXPLAIN EXTRACT FACTOR FALSE FETCH FIELDS FILTER FIRST FLOAT FOREIGN FORMAT FORWARD FULLNAME GENERATOR GRAPH GREATEST GROUPS GZIP HAVING HEADER HEADERS HOLD HOST HOUR HOURS ID IDEMPOTENCE IDLE IF IGNORE INCLUDE INDEX INDEXES INFO INHERIT INLINE INT INTEGER INTERSECT INTERVAL INTROSPECTION ISNULL ISOLATION JSON KAFKA KEY KEYS KINESIS LAST LATEST LEADING LEAST LEVEL LIMIT LIST LOAD LOCAL LOG LOGICAL LOGIN MANAGED MAP MARKETING MATCHING MATERIALIZE MATERIALIZED MAX MECHANISMS MESSAGE METADATA MINUTE MINUTES MODE MONTH MONTHS MS NAME NAMES NEXT NO NOLOGIN NONE NOSUPERUSER NOTICE NOTIFICATIONS NULLS OBJECTS OFFSET ONLY OPERATOR OPTIMIZED OPTIMIZER OPTIONS ORDINALITY OUTER OVER OWNER PARTITION PASSWORD PHYSICAL PLAN PLANS PORT POSITION POSTGRES PRECEDING PRECISION PREFIX PREPARE PRIMARY PRIVATELINK PROGRESS PROTOBUF PUBLICATION QUERY QUOTE RAISE RANGE RAW READ REAL REFERENCES REFRESH REGEX REGION REGISTRY REMOTE RENAME REPEATABLE REPLACE REPLICA REPLICAS REPLICATION RESET RESTRICT RETENTION RIGHT ROLE ROLES ROLLBACK ROTATE S3 SASL SCALE SCAN SCHEMA SCHEMAS SCRIPT SECOND SECONDS SECRET SECRETS SEED SEQUENCES SERIALIZABLE SERVICE SESSION SHOW SIZE SMALLINT SNAPSHOT SOME SOURCE SOURCES SQS SSH SSL START STDIN STDOUT STRATEGY STRING SUBSCRIBE SUBSOURCE SUBSTRING SUPERUSER SYSTEM TABLE TABLES TAIL TEMP TEMPORARY TEST TEXT THEN TICK TIES TIME TIMELINE TIMEOUT TIMESTAMP TOKEN TOPIC TPCH TRACE TRAILING TRANSACTION TRIM TRUE TUNNEL TYPE TYPES UNBOUNDED UNCOMMITTED UNKNOWN UPSERT URL USER USERNAME USERS VALUE VARCHAR VARIADIC VARYING VIEW VIEWS WARNING WEBHOOK WINDOW WIRE WITHOUT WORK WORKERS WRITE YEAR YEARS ZONE ZONES """ sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_mysql.py000066400000000000000000003106261503426445100227630ustar00rootroot00000000000000"""The MySQL dialect. For now the only change is the parsing of comments. https://dev.mysql.com/doc/refman/8.0/en/differences-from-ansi.html """ from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( AnyNumberOf, AnySetOf, Anything, BaseSegment, BinaryOperatorSegment, Bracketed, CodeSegment, CommentSegment, CompositeComparisonOperatorSegment, Dedent, Delimited, IdentifierSegment, Indent, KeywordSegment, LiteralSegment, Matchable, OneOf, OptionallyBracketed, ParseMode, Ref, RegexLexer, RegexParser, SegmentGenerator, Sequence, StringLexer, StringParser, SymbolSegment, TypedParser, ) from sqlfluff.dialects import dialect_ansi as ansi from sqlfluff.dialects.dialect_mysql_keywords import ( mysql_reserved_keywords, mysql_unreserved_keywords, ) ansi_dialect = load_raw_dialect("ansi") mysql_dialect = ansi_dialect.copy_as( "mysql", formatted_name="MySQL", docstring="""**Default Casing**: ``lowercase`` **Quotes**: String Literals: ``''``, ``""`` or ``@``, Identifiers: |back_quotes|. The dialect for `MySQL `_.""", ) mysql_dialect.patch_lexer_matchers( [ RegexLexer( "inline_comment", r"(^--|-- |#)[^\n]*", CommentSegment, segment_kwargs={"trim_start": ("--", "#")}, ), # Pattern breakdown: # (?s) DOTALL (dot matches newline) # ( group1 start # ' single quote (start) # (?: non-capturing group: begin # \\' MySQL escaped single-quote # |'' or ANSI escaped single-quotes # |\\\\ or consecutive [escaped] backslashes # |[^'] or anything besides a single-quote # )* non-capturing group: end (zero or more times) # ' single quote (end of the single-quoted string) # (?!') negative lookahead: not single quote # ) group1 end RegexLexer( "single_quote", r"(?s)('(?:\\'|''|\\\\|[^'])*'(?!'))", CodeSegment, segment_kwargs={ "quoted_value": (r"(?s)('((?:\\'|''|\\\\|[^'])*)'(?!'))", 2), "escape_replacements": [(r"\\'|''", "'")], }, ), RegexLexer( "double_quote", r'(?s)("(?:\\"|""|\\\\|[^"])*"(?!"))', CodeSegment, segment_kwargs={ "quoted_value": (r'(?s)("((?:\\"|""|\\\\|[^"])*)"(?!"))', 2), "escape_replacements": [(r'\\"|""', '"')], }, ), ] ) mysql_dialect.insert_lexer_matchers( [ RegexLexer( "hexadecimal_literal", r"([xX]'([\da-fA-F][\da-fA-F])+'|0x[\da-fA-F]+)", LiteralSegment, segment_kwargs={"type": "numeric_literal"}, ), RegexLexer( "bit_value_literal", r"([bB]'[01]+'|0b[01]+)", LiteralSegment, segment_kwargs={"type": "numeric_literal"}, ), ], before="numeric_literal", ) # Set Keywords # Do not clear inherited unreserved ansi keywords. Too many are needed to parse well. # Just add MySQL unreserved keywords. mysql_dialect.update_keywords_set_from_multiline_string( "unreserved_keywords", mysql_unreserved_keywords ) mysql_dialect.sets("reserved_keywords").clear() mysql_dialect.update_keywords_set_from_multiline_string( "reserved_keywords", mysql_reserved_keywords ) # Set the datetime units mysql_dialect.sets("datetime_units").clear() mysql_dialect.sets("datetime_units").update( [ # https://github.com/mysql/mysql-server/blob/1bfe02bdad6604d54913c62614bde57a055c8332/sql/sql_yacc.yy#L12321-L12345 # interval: "DAY_HOUR", "DAY_MICROSECOND", "DAY_MINUTE", "DAY_SECOND", "HOUR_MICROSECOND", "HOUR_MINUTE", "HOUR_SECOND", "MINUTE_MICROSECOND", "MINUTE_SECOND", "SECOND_MICROSECOND", "YEAR_MONTH", # interval_time_stamp "DAY", "WEEK", "HOUR", "MINUTE", "MONTH", "QUARTER", "SECOND", "MICROSECOND", "YEAR", ] ) mysql_dialect.sets("date_part_function_name").clear() mysql_dialect.sets("date_part_function_name").update( [ "EXTRACT", "TIMESTAMPADD", "TIMESTAMPDIFF", ] ) mysql_dialect.replace( QuotedIdentifierSegment=TypedParser( "back_quote", IdentifierSegment, type="quoted_identifier", trim_chars=("`",), ), LiteralGrammar=ansi_dialect.get_grammar("LiteralGrammar").copy( insert=[ Ref("DoubleQuotedLiteralSegment"), Ref("SystemVariableSegment"), ] ), FromClauseTerminatorGrammar=ansi_dialect.get_grammar( "FromClauseTerminatorGrammar" ).copy( insert=[ Ref("IndexHintClauseSegment"), Ref("SelectPartitionClauseSegment"), Ref("ForClauseSegment"), Ref("SetOperatorSegment"), Ref("WithNoSchemaBindingClauseSegment"), Ref("WithCheckOptionSegment"), Ref("IntoClauseSegment"), ] ), WhereClauseTerminatorGrammar=ansi_dialect.get_grammar( "WhereClauseTerminatorGrammar" ).copy( insert=[ Ref("IntoClauseSegment"), ], ), BaseExpressionElementGrammar=ansi_dialect.get_grammar( "BaseExpressionElementGrammar" ).copy( insert=[ Ref("SessionVariableNameSegment"), Ref("LocalVariableNameSegment"), Ref("VariableAssignmentSegment"), ] ), Expression_D_Potential_Select_Statement_Without_Brackets=ansi_dialect.get_grammar( "Expression_D_Potential_Select_Statement_Without_Brackets" ).copy( insert=[ Ref("SessionVariableNameSegment"), ], at=0, ), BinaryOperatorGrammar=ansi_dialect.get_grammar("BinaryOperatorGrammar").copy( insert=[ Ref("ColumnPathOperatorSegment"), Ref("InlinePathOperatorSegment"), ] ), ArithmeticBinaryOperatorGrammar=ansi_dialect.get_grammar( "ArithmeticBinaryOperatorGrammar" ).copy( insert=[ Ref("DivOperatorSegment"), Ref("ModOperatorSegment"), ], ), DateTimeLiteralGrammar=Sequence( # MySQL does not require the keyword to be specified: # https://dev.mysql.com/doc/refman/8.0/en/date-and-time-literals.html OneOf( "DATE", "TIME", "TIMESTAMP", optional=True, ), OneOf( TypedParser( "single_quote", LiteralSegment, type="date_constructor_literal", ), Ref("NumericLiteralSegment"), ), ), QuotedLiteralSegment=AnyNumberOf( # MySQL allows whitespace-concatenated string literals (#1488). # Since these string literals can have comments between them, # we use grammar to handle this. TypedParser( "single_quote", LiteralSegment, type="quoted_literal", ), Ref("DoubleQuotedLiteralSegment"), min_times=1, ), UniqueKeyGrammar=Sequence( "UNIQUE", Ref.keyword("KEY", optional=True), ), # Odd syntax, but pr CharCharacterSetGrammar=Ref.keyword("BINARY"), DelimiterGrammar=OneOf(Ref("SemicolonSegment"), Ref("TildeSegment")), TildeSegment=StringParser("~", SymbolSegment, type="statement_terminator"), ParameterNameSegment=RegexParser( r"`?[A-Za-z0-9_]*`?", CodeSegment, type="parameter" ), SingleIdentifierGrammar=ansi_dialect.get_grammar("SingleIdentifierGrammar").copy( insert=[Ref("SessionVariableNameSegment")] ), AndOperatorGrammar=OneOf( StringParser("AND", BinaryOperatorSegment), StringParser("&&", BinaryOperatorSegment), ), OrOperatorGrammar=OneOf( StringParser("OR", BinaryOperatorSegment), StringParser("||", BinaryOperatorSegment), StringParser("XOR", BinaryOperatorSegment), ), NotOperatorGrammar=OneOf( StringParser("NOT", KeywordSegment, type="keyword"), StringParser("!", CodeSegment, type="not_operator"), ), Expression_C_Grammar=Sequence( Sequence( Ref("SessionVariableNameSegment"), Ref("WalrusOperatorSegment"), optional=True, ), ansi_dialect.get_grammar("Expression_C_Grammar"), ), ColumnConstraintDefaultGrammar=OneOf( Bracketed(ansi_dialect.get_grammar("ColumnConstraintDefaultGrammar")), ansi_dialect.get_grammar("ColumnConstraintDefaultGrammar"), ), NakedIdentifierSegment=SegmentGenerator( lambda dialect: RegexParser( r"([A-Z0-9_]*[A-Z][A-Z0-9_]*)|_", IdentifierSegment, type="naked_identifier", anti_template=r"^(" + r"|".join(dialect.sets("reserved_keywords")) + r")$", ) ), LikeGrammar=OneOf("LIKE", "RLIKE", "REGEXP"), CollateGrammar=Sequence("COLLATE", Ref("CollationReferenceSegment")), ComparisonOperatorGrammar=ansi_dialect.get_grammar( "ComparisonOperatorGrammar" ).copy(insert=[Ref("NullSafeEqualsSegment")]), ) mysql_dialect.add( DoubleQuotedLiteralSegment=TypedParser( "double_quote", LiteralSegment, type="quoted_literal", trim_chars=('"',), ), # MySQL allows the usage of a double quoted identifier for an alias. DoubleQuotedIdentifierSegment=TypedParser( "double_quote", IdentifierSegment, type="quoted_identifier" ), AtSignLiteralSegment=TypedParser( "at_sign_literal", LiteralSegment, type="at_sign_literal", ), SystemVariableSegment=RegexParser( r"@@((session|global|local|persist|persist_only)\.)?[A-Za-z0-9_]+", CodeSegment, type="system_variable", ), DivOperatorSegment=StringParser("DIV", KeywordSegment, type="binary_operator"), ModOperatorSegment=StringParser("MOD", KeywordSegment, type="binary_operator"), DoubleQuotedJSONPath=TypedParser( "double_quote", CodeSegment, type="json_path", trim_chars=('"',), ), SingleQuotedJSONPath=TypedParser( "single_quote", CodeSegment, type="json_path", trim_chars=("'",), ), ) class AliasExpressionSegment(ansi.AliasExpressionSegment): """A reference to an object with an `AS` clause. The optional AS keyword allows both implicit and explicit aliasing. """ type = "alias_expression" match_grammar = Sequence( Indent, Ref("AsAliasOperatorSegment", optional=True), OneOf( Ref("SingleIdentifierGrammar"), Ref("SingleQuotedIdentifierSegment"), Ref("DoubleQuotedIdentifierSegment"), ), Dedent, ) class ColumnDefinitionSegment(BaseSegment): """A column definition, e.g. for CREATE TABLE or ALTER TABLE.""" type = "column_definition" match_grammar = Sequence( Ref("SingleIdentifierGrammar"), # Column name OneOf( # Column type # DATETIME and TIMESTAMP take special logic Ref( "DatatypeSegment", exclude=OneOf("DATETIME", "TIMESTAMP"), ), Sequence( OneOf("DATETIME", "TIMESTAMP"), Ref("BracketedArguments", optional=True), # Precision AnyNumberOf( # Allow NULL/NOT NULL, DEFAULT, and ON UPDATE in any order Sequence(Sequence("NOT", optional=True), "NULL", optional=True), Sequence( "DEFAULT", OneOf( Sequence( OneOf("CURRENT_TIMESTAMP", "NOW"), Bracketed( Ref("NumericLiteralSegment", optional=True), optional=True, ), ), Ref("NumericLiteralSegment"), Ref("QuotedLiteralSegment"), "NULL", ), optional=True, ), Sequence( "ON", "UPDATE", OneOf( "CURRENT_TIMESTAMP", "NOW", Bracketed( Ref("NumericLiteralSegment", optional=True), optional=True, ), ), optional=True, ), optional=True, ), ), ), Bracketed(Anything(), optional=True), # For types like VARCHAR(100) AnyNumberOf( Ref("ColumnConstraintSegment", optional=True), ), ) class CreateTableStatementSegment(ansi.CreateTableStatementSegment): """Create table segment. https://dev.mysql.com/doc/refman/8.0/en/create-table.html """ ES = Ref("ExpressionSegment") CRS = Ref("ColumnReferenceSegment") NLS = Ref("NumericLiteralSegment") ORS = Ref("ObjectReferenceSegment") TRS = Ref("TableReferenceSegment") SQIS = Ref("SingleQuotedIdentifierSegment") match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Ref("TemporaryTransientGrammar", optional=True), "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), OneOf( # Columns and comment syntax: Sequence( Bracketed( Delimited( OneOf( Ref("TableConstraintSegment"), Ref("ColumnDefinitionSegment"), ), ) ), Sequence( Ref.keyword("AS", optional=True), OptionallyBracketed(Ref("SelectableGrammar")), optional=True, ), Ref("CommentClauseSegment", optional=True), ), # Create AS syntax: Sequence( Ref.keyword("AS", optional=True), OptionallyBracketed(Ref("SelectableGrammar")), ), # Create like syntax Sequence("LIKE", Ref("TableReferenceSegment")), ), Ref("TableEndClauseSegment", optional=True), AnyNumberOf( Sequence( Ref.keyword("DEFAULT", optional=True), OneOf( Ref("ParameterNameSegment"), Sequence("CHARACTER", "SET"), Sequence(OneOf("DATA", "INDEX"), "DIRECTORY"), Sequence("WITH", "SYSTEM"), ), Ref("EqualsSegment", optional=True), OneOf( Ref("LiteralGrammar"), Ref("ParameterNameSegment"), Ref("QuotedLiteralSegment"), Ref("SingleQuotedIdentifierSegment"), Ref("NumericLiteralSegment"), # Union option Bracketed( Delimited(Ref("TableReferenceSegment")), ), ), ), # Partition Options # https://dev.mysql.com/doc/refman/8.0/en/create-table.html#create-table-partitioning Sequence( "PARTITION", "BY", OneOf( Sequence( Ref.keyword("LINEAR", optional=True), OneOf( Sequence("HASH", Ref("ExpressionSegment")), Sequence( "KEY", Sequence( "ALGORITHM", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), optional=True, ), Delimited(Ref("ColumnReferenceSegment")), ), ), ), Sequence( OneOf("RANGE", "LIST"), OneOf( Ref("ExpressionSegment"), Bracketed(Delimited(Ref("ColumnReferenceSegment"))), ), ), ), Sequence("PARTITIONS", Ref("NumericLiteralSegment"), optional=True), Sequence( "SUBPARTITION", "BY", Sequence( Ref.keyword("LINEAR", optional=True), OneOf( Sequence("HASH", Ref("ExpressionSegment")), Sequence( "KEY", Sequence( "ALGORITHM", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), optional=True, ), Bracketed(Ref("ColumnReferenceSegment")), ), ), ), Sequence( "SUBPARTITIONS", Ref("NumericLiteralSegment"), optional=True, ), optional=True, ), # optional partition_definition(s) AnyNumberOf( Bracketed( Delimited( Sequence( "PARTITION", Ref("ColumnReferenceSegment"), AnyNumberOf( Sequence( "VALUES", OneOf( Sequence( "LESS", "THAN", OneOf( "MAXVALUE", Bracketed( OneOf( ES, CRS, NLS, Ref("LiteralGrammar"), ), ), ), ), Sequence( "IN", Bracketed( Ref("ObjectReferenceSegment") ), ), ), ), Sequence( OneOf( Ref("ParameterNameSegment"), Sequence("CHARACTER", "SET"), Sequence( OneOf("DATA", "INDEX"), "DIRECTORY", ), Sequence("WITH", "SYSTEM"), ), Ref("EqualsSegment", optional=True), OneOf( Ref("LiteralGrammar"), Ref("ParameterNameSegment"), Ref("QuotedLiteralSegment"), Ref("SingleQuotedIdentifierSegment"), Ref("NumericLiteralSegment"), # Union option Bracketed( Delimited(Ref("TableReferenceSegment")), ), ), ), # optional subpartition_definition(s) Sequence( Ref.keyword("SUBPARTITION", optional=True), Ref("LiteralGrammar"), AnyNumberOf( Sequence( "VALUES", OneOf( Sequence( "LESS", "THAN", OneOf( "MAXVALUE", Bracketed(ES), Bracketed(CRS), ), ), Sequence( "IN", Bracketed(ORS), ), ), ), Sequence( OneOf( Ref("ParameterNameSegment"), Sequence("CHARACTER", "SET"), Sequence( OneOf("DATA", "INDEX"), "DIRECTORY", ), Sequence("WITH", "SYSTEM"), ), Ref( "EqualsSegment", optional=True, ), OneOf( Ref("LiteralGrammar"), Ref("ParameterNameSegment"), Ref("QuotedLiteralSegment"), SQIS, Ref("NumericLiteralSegment"), # Union option Bracketed( Delimited(TRS), ), ), ), ), ), ), ), ), ), ), ), ), ) class CreateUserStatementSegment(ansi.CreateUserStatementSegment): """`CREATE USER` statement. https://dev.mysql.com/doc/refman/8.0/en/create-user.html """ match_grammar = Sequence( "CREATE", "USER", Ref("IfNotExistsGrammar", optional=True), Delimited( Sequence( Ref("RoleReferenceSegment"), Sequence( Delimited( Sequence( "IDENTIFIED", OneOf( Sequence( "BY", OneOf( Sequence("RANDOM", "PASSWORD"), Ref("QuotedLiteralSegment"), ), ), Sequence( "WITH", Ref("ObjectReferenceSegment"), Sequence( OneOf( Sequence( "BY", OneOf( Sequence("RANDOM", "PASSWORD"), Ref("QuotedLiteralSegment"), ), ), Sequence("AS", Ref("QuotedLiteralSegment")), Sequence( "INITIAL", "AUTHENTICATION", "IDENTIFIED", OneOf( Sequence( "BY", OneOf( Sequence( "RANDOM", "PASSWORD" ), Ref("QuotedLiteralSegment"), ), ), Sequence( "WITH", Ref("ObjectReferenceSegment"), "AS", Ref("QuotedLiteralSegment"), ), ), ), ), optional=True, ), ), ), ), delimiter="AND", ), optional=True, ), ), ), Sequence( "DEFAULT", "ROLE", Delimited(Ref("RoleReferenceSegment")), optional=True, ), Sequence( "REQUIRE", OneOf( "NONE", Delimited( OneOf( "SSL", "X509", Sequence("CIPHER", Ref("QuotedLiteralSegment")), Sequence("ISSUER", Ref("QuotedLiteralSegment")), Sequence("SUBJECT", Ref("QuotedLiteralSegment")), ), delimiter="AND", ), ), optional=True, ), Sequence( "WITH", AnyNumberOf( Sequence( OneOf( "MAX_QUERIES_PER_HOUR", "MAX_UPDATES_PER_HOUR", "MAX_CONNECTIONS_PER_HOUR", "MAX_USER_CONNECTIONS", ), Ref("NumericLiteralSegment"), ) ), optional=True, ), Sequence( AnyNumberOf( Sequence( "PASSWORD", "EXPIRE", Sequence( OneOf( "DEFAULT", "NEVER", Sequence("INTERVAL", Ref("NumericLiteralSegment"), "DAY"), ), optional=True, ), ), Sequence( "PASSWORD", "HISTORY", OneOf("DEFAULT", Ref("NumericLiteralSegment")), ), Sequence( "PASSWORD", "REUSE", "INTERVAL", OneOf("DEFAULT", Sequence(Ref("NumericLiteralSegment"), "DAY")), ), Sequence( "PASSWORD", "REQUIRE", "CURRENT", Sequence(OneOf("DEFAULT", "OPTIONAL"), optional=True), ), Sequence("FAILED_LOGIN_ATTEMPTS", Ref("NumericLiteralSegment")), Sequence( "PASSWORD_LOCK_TIME", OneOf(Ref("NumericLiteralSegment"), "UNBOUNDED"), ), ), optional=True, ), Sequence("ACCOUNT", OneOf("UNLOCK", "LOCK"), optional=True), Sequence( OneOf("COMMENT", "ATTRIBUTE"), Ref("QuotedLiteralSegment"), optional=True, ), ) class UpsertClauseListSegment(BaseSegment): """An `ON DUPLICATE KEY UPDATE` statement. https://dev.mysql.com/doc/refman/8.0/en/insert-on-duplicate.html """ type = "upsert_clause_list" match_grammar = Sequence( "ON", "DUPLICATE", "KEY", "UPDATE", Delimited(Ref("SetClauseSegment")), ) class InsertRowAliasSegment(BaseSegment): """A row alias segment (used in `INSERT` statements). https://dev.mysql.com/doc/refman/8.0/en/insert.html """ type = "insert_row_alias" match_grammar = Sequence( "AS", Ref("SingleIdentifierGrammar"), Bracketed( Ref("SingleIdentifierListSegment"), optional=True, ), ) class InsertStatementSegment(BaseSegment): """An `INSERT` statement. https://dev.mysql.com/doc/refman/8.0/en/insert.html """ type = "insert_statement" match_grammar = Sequence( "INSERT", OneOf( "LOW_PRIORITY", "DELAYED", "HIGH_PRIORITY", optional=True, ), Ref.keyword("IGNORE", optional=True), Ref.keyword("INTO", optional=True), Ref("TableReferenceSegment"), Sequence( "PARTITION", Bracketed( Ref("SingleIdentifierListSegment"), ), optional=True, ), Ref("BracketedColumnReferenceListGrammar", optional=True), AnySetOf( OneOf( Ref("ValuesClauseSegment"), Ref("SetClauseListSegment"), Sequence( OneOf( Ref("SelectableGrammar"), Sequence( "TABLE", Ref("TableReferenceSegment"), ), ), ), optional=False, ), Ref("InsertRowAliasSegment", optional=True), Ref("UpsertClauseListSegment", optional=True), ), ) class DeleteTargetTableSegment(BaseSegment): """A target table used in `DELETE` statement. https://dev.mysql.com/doc/refman/8.0/en/delete.html """ type = "delete_target_table" match_grammar = Sequence( Ref("TableReferenceSegment"), Sequence(Ref("DotSegment"), Ref("StarSegment"), optional=True), ) class DeleteUsingClauseSegment(BaseSegment): """A `USING` clause froma `DELETE` Statement`.""" type = "using_clause" match_grammar = Sequence( "USING", Delimited( Ref("FromExpressionSegment"), ), ) class DeleteStatementSegment(BaseSegment): """A `DELETE` statement. https://dev.mysql.com/doc/refman/8.0/en/delete.html """ type = "delete_statement" match_grammar = Sequence( "DELETE", Ref.keyword("LOW_PRIORITY", optional=True), Ref.keyword("QUICK", optional=True), Ref.keyword("IGNORE", optional=True), OneOf( Sequence( "FROM", Delimited( Ref("DeleteTargetTableSegment"), terminators=["USING"], ), Ref("DeleteUsingClauseSegment"), Ref("WhereClauseSegment", optional=True), ), Sequence( Delimited( Ref("DeleteTargetTableSegment"), terminators=["FROM"], ), Ref("FromClauseSegment"), Ref("WhereClauseSegment", optional=True), ), Sequence( Ref("FromClauseSegment"), Ref("SelectPartitionClauseSegment", optional=True), Ref("WhereClauseSegment", optional=True), Ref("OrderByClauseSegment", optional=True), Ref("LimitClauseSegment", optional=True), ), ), ) class ColumnConstraintSegment(ansi.ColumnConstraintSegment): """A column option; each CREATE TABLE column can have 0 or more.""" match_grammar: Matchable = OneOf( ansi.ColumnConstraintSegment.match_grammar, Sequence( "CHARACTER", "SET", OneOf( Ref("SingleIdentifierGrammar"), Ref("SingleQuotedIdentifierSegment"), Ref("DoubleQuotedIdentifierSegment"), ), ), Ref("CollateGrammar"), Sequence( Sequence("GENERATED", "ALWAYS", optional=True), "AS", Bracketed(Ref("ExpressionSegment")), OneOf("STORED", "VIRTUAL", optional=True), ), Sequence("SRID", Ref("NumericLiteralSegment")), ) class IndexTypeGrammar(BaseSegment): """index_type in table_constraint.""" type = "index_type" match_grammar = Sequence( "USING", OneOf("BTREE", "HASH"), ) class IndexOptionsSegment(BaseSegment): """index_option in `CREATE TABLE` and `ALTER TABLE` statement. https://dev.mysql.com/doc/refman/8.0/en/create-table.html https://dev.mysql.com/doc/refman/8.0/en/alter-table.html """ type = "index_option" match_grammar = AnySetOf( Sequence( "KEY_BLOCK_SIZE", Ref("EqualsSegment", optional=True), Ref("NumericLiteralSegment"), ), Ref("IndexTypeGrammar"), Sequence("WITH", "PARSER", Ref("ObjectReferenceSegment")), Ref("CommentClauseSegment"), OneOf("VISIBLE", "INVISIBLE"), # (SECONDARY_)ENGINE_ATTRIBUTE supported in `CREATE TABLE` Sequence( "ENGINE_ATTRIBUTE", Ref("EqualsSegment", optional=True), Ref("QuotedLiteralSegment"), ), Sequence( "SECONDARY_ENGINE_ATTRIBUTE", Ref("EqualsSegment", optional=True), Ref("QuotedLiteralSegment"), ), ) class TableConstraintSegment(BaseSegment): """A table constraint, e.g. for CREATE TABLE, ALTER TABLE. https://dev.mysql.com/doc/refman/8.0/en/create-table.html https://dev.mysql.com/doc/refman/8.0/en/alter-table.html """ type = "table_constraint" # e.g. CONSTRAINT constraint_1 PRIMARY KEY(column_1) match_grammar = OneOf( Sequence( Sequence( # [ CONSTRAINT ] "CONSTRAINT", Ref("ObjectReferenceSegment", optional=True), optional=True, ), OneOf( # UNIQUE [INDEX | KEY] [index_name] [index_type] (key_part,...) # [index_option] ... Sequence( "UNIQUE", OneOf("INDEX", "KEY", optional=True), Ref("IndexReferenceSegment", optional=True), Ref("IndexTypeGrammar", optional=True), Ref("BracketedKeyPartListGrammar"), Ref("IndexOptionsSegment", optional=True), ), # PRIMARY KEY [index_type] (key_part,...) [index_option] ... Sequence( Ref("PrimaryKeyGrammar"), Ref("IndexTypeGrammar", optional=True), # Columns making up PRIMARY KEY constraint Ref("BracketedKeyPartListGrammar"), Ref("IndexOptionsSegment", optional=True), ), # FOREIGN KEY [index_name] (col_name,...) reference_definition Sequence( # REFERENCES reftable [ ( refcolumn [, ... ] ) ] Ref("ForeignKeyGrammar"), Ref("IndexReferenceSegment", optional=True), # Local columns making up FOREIGN KEY constraint Ref("BracketedColumnReferenceListGrammar"), "REFERENCES", Ref("ColumnReferenceSegment"), # Foreign columns making up FOREIGN KEY constraint Ref("BracketedColumnReferenceListGrammar"), # Later add support for [MATCH FULL/PARTIAL/SIMPLE] ? # Later add support for [ ON DELETE/UPDATE action ] ? AnyNumberOf( Sequence( "ON", OneOf("DELETE", "UPDATE"), OneOf( "RESTRICT", "CASCADE", Sequence("SET", "NULL"), Sequence("NO", "ACTION"), Sequence("SET", "DEFAULT"), ), optional=True, ), ), ), # CHECK (expr) [[NOT] ENFORCED] Sequence( "CHECK", Bracketed(Ref("ExpressionSegment")), OneOf( "ENFORCED", Sequence("NOT", "ENFORCED"), optional=True, ), ), ), ), # {INDEX | KEY} [index_name] [index_type] (key_part,...) [index_option] ... Sequence( OneOf("INDEX", "KEY"), Ref("IndexReferenceSegment", optional=True), Ref("IndexTypeGrammar", optional=True), Ref("BracketedKeyPartListGrammar"), Ref("IndexOptionsSegment", optional=True), ), # {FULLTEXT | SPATIAL} [INDEX | KEY] [index_name] (key_part,...) # [index_option] ... Sequence( OneOf("FULLTEXT", "SPATIAL"), OneOf("INDEX", "KEY", optional=True), Ref("IndexReferenceSegment", optional=True), Ref("BracketedKeyPartListGrammar"), Ref("IndexOptionsSegment", optional=True), ), ) class CreateIndexStatementSegment(ansi.CreateIndexStatementSegment): """A `CREATE INDEX` statement. https://dev.mysql.com/doc/refman/8.0/en/create-index.html https://mariadb.com/kb/en/create-index/ """ match_grammar = Sequence( "CREATE", OneOf("UNIQUE", "FULLTEXT", "SPATIAL", optional=True), "INDEX", Ref("IndexReferenceSegment"), Ref("IndexTypeGrammar", optional=True), "ON", Ref("TableReferenceSegment"), Ref("BracketedKeyPartListGrammar"), Ref("IndexOptionsSegment", optional=True), AnySetOf( Sequence( "ALGORITHM", Ref("EqualsSegment", optional=True), OneOf("DEFAULT", "INPLACE", "COPY", "NOCOPY", "INSTANT"), ), Sequence( "LOCK", Ref("EqualsSegment", optional=True), OneOf("DEFAULT", "NONE", "SHARED", "EXCLUSIVE"), ), ), ) class IntervalExpressionSegment(BaseSegment): """An interval expression segment. https://dev.mysql.com/doc/refman/8.0/en/date-and-time-functions.html#function_adddate """ type = "interval_expression" match_grammar = Sequence( "INTERVAL", OneOf( Ref("DatetimeUnitSegment"), Sequence(Ref("ExpressionSegment"), Ref("DatetimeUnitSegment")), ), ) mysql_dialect.add( OutputParameterSegment=StringParser( "OUT", SymbolSegment, type="parameter_direction" ), InputParameterSegment=StringParser("IN", SymbolSegment, type="parameter_direction"), InputOutputParameterSegment=StringParser( "INOUT", SymbolSegment, type="parameter_direction" ), ProcedureParameterGrammar=OneOf( Sequence( OneOf( Ref("OutputParameterSegment"), Ref("InputParameterSegment"), Ref("InputOutputParameterSegment"), optional=True, ), Ref("ParameterNameSegment", optional=True), Ref("DatatypeSegment"), ), Ref("DatatypeSegment"), ), LocalVariableNameSegment=RegexParser( r"`?[a-zA-Z0-9_$]*`?", CodeSegment, type="variable", ), SessionVariableNameSegment=RegexParser( r"[@][a-zA-Z0-9_$]*", CodeSegment, type="variable", ), WalrusOperatorSegment=StringParser(":=", SymbolSegment, type="assignment_operator"), VariableAssignmentSegment=Sequence( Ref("SessionVariableNameSegment"), Ref("WalrusOperatorSegment"), Ref("BaseExpressionElementGrammar"), ), ColumnPathOperatorSegment=StringParser( "->", SymbolSegment, type="column_path_operator" ), InlinePathOperatorSegment=StringParser( "->>", SymbolSegment, type="column_path_operator" ), BooleanDynamicSystemVariablesGrammar=OneOf( # Boolean dynamic system variables can be set to ON/OFF, TRUE/FALSE, or 0/1: # https://dev.mysql.com/doc/refman/8.0/en/dynamic-system-variables.html # This allows us to match ON/OFF & TRUE/FALSE as keywords and therefore apply # the correct capitalisation policy. OneOf("ON", "OFF"), OneOf("TRUE", "FALSE"), ), # (key_part, ...) # key_part: {col_name [(length)] | (expr)} [ASC | DESC] # https://dev.mysql.com/doc/refman/8.0/en/create-table.html # https://dev.mysql.com/doc/refman/8.0/en/alter-table.html # https://dev.mysql.com/doc/refman/8.0/en/create-index.html BracketedKeyPartListGrammar=Bracketed( Delimited( Sequence( OneOf( Ref("ColumnReferenceSegment"), Sequence( Ref("ColumnReferenceSegment"), Bracketed(Ref("NumericLiteralSegment")), ), Bracketed(Ref("ExpressionSegment")), ), OneOf("ASC", "DESC", optional=True), ), ), ), ) mysql_dialect.insert_lexer_matchers( [ RegexLexer( "at_sign", r"@@?[a-zA-Z0-9_$]*(\.[a-zA-Z0-9_$]+)?", CodeSegment, segment_kwargs={"type": "at_sign_literal", "trim_chars": ("@",)}, ), ], before="word", ) mysql_dialect.insert_lexer_matchers( [ StringLexer("double_ampersand", "&&", CodeSegment), ], before="ampersand", ) mysql_dialect.insert_lexer_matchers( [ StringLexer("double_vertical_bar", "||", CodeSegment), ], before="vertical_bar", ) mysql_dialect.insert_lexer_matchers( [ StringLexer("walrus_operator", ":=", CodeSegment), ], before="equals", ) mysql_dialect.insert_lexer_matchers( [ StringLexer("inline_path_operator", "->>", SymbolSegment), StringLexer("column_path_operator", "->", SymbolSegment), ], before="greater_than", ) class RoleReferenceSegment(ansi.RoleReferenceSegment): """A reference to an account, role, or user. https://dev.mysql.com/doc/refman/8.0/en/account-names.html https://dev.mysql.com/doc/refman/8.0/en/role-names.html """ match_grammar: Matchable = OneOf( Sequence( OneOf( Ref("NakedIdentifierSegment"), Ref("QuotedIdentifierSegment"), Ref("SingleQuotedIdentifierSegment"), Ref("DoubleQuotedLiteralSegment"), ), Sequence( Ref("AtSignLiteralSegment"), OneOf( Ref("NakedIdentifierSegment"), Ref("QuotedIdentifierSegment"), Ref("SingleQuotedIdentifierSegment"), Ref("DoubleQuotedLiteralSegment"), ), optional=True, allow_gaps=False, ), allow_gaps=True, ), "CURRENT_USER", ) class DeclareStatement(BaseSegment): """DECLARE statement. https://dev.mysql.com/doc/refman/8.0/en/declare-local-variable.html https://dev.mysql.com/doc/refman/8.0/en/declare-handler.html https://dev.mysql.com/doc/refman/8.0/en/declare-condition.html https://dev.mysql.com/doc/refman/8.0/en/declare-cursor.html """ type = "declare_statement" match_grammar = OneOf( Sequence( "DECLARE", Ref("NakedIdentifierSegment"), "CURSOR", "FOR", Ref("StatementSegment"), ), Sequence( "DECLARE", OneOf("CONTINUE", "EXIT", "UNDO"), "HANDLER", "FOR", OneOf( "SQLEXCEPTION", "SQLWARNING", Sequence("NOT", "FOUND"), Sequence( "SQLSTATE", Ref.keyword("VALUE", optional=True), Ref("QuotedLiteralSegment"), ), OneOf( Ref("QuotedLiteralSegment"), Ref("NumericLiteralSegment"), Ref("NakedIdentifierSegment"), ), ), Sequence(Ref("StatementSegment")), ), Sequence( "DECLARE", Ref("NakedIdentifierSegment"), "CONDITION", "FOR", OneOf(Ref("QuotedLiteralSegment"), Ref("NumericLiteralSegment")), ), Sequence( "DECLARE", Ref("LocalVariableNameSegment"), Ref("DatatypeSegment"), Sequence( Ref.keyword("DEFAULT"), OneOf( Ref("QuotedLiteralSegment"), Ref("NumericLiteralSegment"), Ref("FunctionSegment"), ), optional=True, ), ), ) class StatementSegment(ansi.StatementSegment): """Overriding StatementSegment to allow for additional segment parsing.""" match_grammar = ansi.StatementSegment.match_grammar.copy( insert=[ Ref("DelimiterStatement"), Ref("CreateProcedureStatementSegment"), Ref("DeclareStatement"), Ref("SetTransactionStatementSegment"), Ref("SetAssignmentStatementSegment"), Ref("IfExpressionStatement"), Ref("WhileStatementSegment"), Ref("IterateStatementSegment"), Ref("RepeatStatementSegment"), Ref("LoopStatementSegment"), Ref("CallStoredProcedureSegment"), Ref("PrepareSegment"), Ref("ExecuteSegment"), Ref("DeallocateSegment"), Ref("GetDiagnosticsSegment"), Ref("ResignalSegment"), Ref("CursorOpenCloseSegment"), Ref("CursorFetchSegment"), Ref("DropProcedureStatementSegment"), Ref("AlterTableStatementSegment"), Ref("AlterViewStatementSegment"), Ref("CreateViewStatementSegment"), Ref("RenameTableStatementSegment"), Ref("ResetMasterStatementSegment"), Ref("PurgeBinaryLogsStatementSegment"), Ref("HelpStatementSegment"), Ref("CheckTableStatementSegment"), Ref("ChecksumTableStatementSegment"), Ref("AnalyzeTableStatementSegment"), Ref("RepairTableStatementSegment"), Ref("OptimizeTableStatementSegment"), Ref("UpsertClauseListSegment"), Ref("InsertRowAliasSegment"), Ref("FlushStatementSegment"), Ref("LoadDataSegment"), Ref("ReplaceSegment"), Ref("AlterDatabaseStatementSegment"), Ref("ReturnStatementSegment"), Ref("SetNamesStatementSegment"), Ref("CreateEventStatementSegment"), Ref("AlterEventStatementSegment"), Ref("DropEventStatementSegment"), ], remove=[ # handle CREATE SCHEMA in CreateDatabaseStatementSegment Ref("CreateSchemaStatementSegment"), ], ) class DelimiterStatement(BaseSegment): """DELIMITER statement.""" type = "delimiter_statement" match_grammar = Ref.keyword("DELIMITER") class CreateProcedureStatementSegment(BaseSegment): """A `CREATE PROCEDURE` statement. https://dev.mysql.com/doc/refman/8.0/en/create-procedure.html """ type = "create_procedure_statement" match_grammar = Sequence( "CREATE", Ref("DefinerSegment", optional=True), "PROCEDURE", Ref("FunctionNameSegment"), Ref("ProcedureParameterListGrammar", optional=True), Ref("CommentClauseSegment", optional=True), Ref("CharacteristicStatement", optional=True), Ref("FunctionDefinitionGrammar"), ) class FunctionDefinitionGrammar(BaseSegment): """This is the body of a `CREATE FUNCTION` statement.""" type = "function_definition" match_grammar = Ref("TransactionStatementSegment") class CharacteristicStatement(BaseSegment): """A Characteristics statement for functions/procedures.""" type = "characteristic_statement" match_grammar = Sequence( OneOf("DETERMINISTIC", Sequence("NOT", "DETERMINISTIC")), Sequence("LANGUAGE", "SQL", optional=True), OneOf( Sequence("CONTAINS", "SQL", optional=True), Sequence("NO", "SQL", optional=True), Sequence("READS", "SQL", "DATA", optional=True), Sequence("MODIFIES", "SQL", "DATA", optional=True), optional=True, ), Sequence("SQL", "SECURITY", OneOf("DEFINER", "INVOKER"), optional=True), ) class CreateFunctionStatementSegment(BaseSegment): """A `CREATE FUNCTION` statement. https://dev.mysql.com/doc/refman/8.0/en/create-procedure.html """ type = "create_function_statement" match_grammar = Sequence( "CREATE", Ref("DefinerSegment", optional=True), "FUNCTION", Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar", optional=True), Sequence( "RETURNS", Ref("DatatypeSegment"), ), Ref("CommentClauseSegment", optional=True), Ref("CharacteristicStatement"), Ref("FunctionDefinitionGrammar"), ) class AlterTableStatementSegment(BaseSegment): """An `ALTER TABLE .. ALTER COLUMN` statement. Overriding ANSI to add `CHANGE COLUMN` and `DROP COLUMN` support. https://dev.mysql.com/doc/refman/8.0/en/alter-table.html https://mariadb.com/kb/en/alter-table/ """ type = "alter_table_statement" match_grammar = Sequence( "ALTER", "TABLE", Ref("TableReferenceSegment"), Delimited( OneOf( # Table options Sequence( Ref("ParameterNameSegment"), Ref("EqualsSegment", optional=True), OneOf(Ref("LiteralGrammar"), Ref("NakedIdentifierSegment")), ), # Add column Sequence( "ADD", Ref.keyword("COLUMN", optional=True), Ref("IfNotExistsGrammar", optional=True), Ref("ColumnDefinitionSegment"), OneOf( "FIRST", Sequence("AFTER", Ref("ColumnReferenceSegment")), # Bracketed Version of the same Ref("BracketedColumnReferenceListGrammar"), optional=True, ), ), Sequence( "MODIFY", Ref.keyword("COLUMN", optional=True), Ref("ColumnDefinitionSegment"), OneOf( "FIRST", Sequence("AFTER", Ref("ColumnReferenceSegment")), # Bracketed Version of the same Ref("BracketedColumnReferenceListGrammar"), optional=True, ), ), # Add constraint Sequence( "ADD", Ref("TableConstraintSegment"), ), # Change column Sequence( "CHANGE", Ref.keyword("COLUMN", optional=True), Ref("ColumnReferenceSegment"), Ref("ColumnDefinitionSegment"), OneOf( Sequence( OneOf( "FIRST", Sequence("AFTER", Ref("ColumnReferenceSegment")), ), ), optional=True, ), ), # Drop column Sequence( "DROP", OneOf( Sequence( Ref.keyword("COLUMN", optional=True), Ref("ColumnReferenceSegment"), ), Sequence( OneOf("INDEX", "KEY", optional=True), Ref("IndexReferenceSegment"), ), Ref("PrimaryKeyGrammar"), Sequence( Ref("ForeignKeyGrammar"), Ref("ObjectReferenceSegment"), ), Sequence( OneOf("CHECK", "CONSTRAINT"), Ref("ObjectReferenceSegment"), ), ), ), # Alter constraint Sequence( "ALTER", OneOf("CHECK", "CONSTRAINT"), Ref("ObjectReferenceSegment"), OneOf( "ENFORCED", Sequence("NOT", "ENFORCED"), ), ), # Alter index Sequence( "ALTER", "INDEX", Ref("IndexReferenceSegment"), OneOf("VISIBLE", "INVISIBLE"), ), # Rename Sequence( "RENAME", OneOf( # Rename table Sequence( OneOf("AS", "TO", optional=True), Ref("TableReferenceSegment"), ), # Rename index Sequence( OneOf("INDEX", "KEY"), Ref("IndexReferenceSegment"), "TO", Ref("IndexReferenceSegment"), ), # Rename column Sequence( "COLUMN", Ref("ColumnReferenceSegment"), "TO", Ref("ColumnReferenceSegment"), ), ), ), # Enable/Disable updating nonunique indexes Sequence( OneOf("DISABLE", "ENABLE"), "KEYS", ), # CONVERT TO CHARACTER SET charset_name [COLLATE collation_name] Sequence("CONVERT", "TO", AnyNumberOf(Ref("AlterOptionSegment"))), ), optional=True, ), Sequence( OneOf( "ADD", "DROP", "DISCARD", "IMPORT", "TRUNCATE", "COALESCE", "REORGANIZE", "EXCHANGE", "ANALYZE", "CHECK", "OPTIMIZE", "REBUILD", "REPAIR", "REMOVE", ), OneOf("PARTITION", "PARTITIONING"), OneOf( Ref("SingleIdentifierGrammar"), Ref("NumericLiteralSegment"), "ALL", Bracketed(Delimited(Ref("ObjectReferenceSegment"))), ), Ref.keyword("TABLESPACE", optional=True), Sequence( "WITH", "TABLE", Ref("TableReference"), OneOf("WITH", "WITHOUT"), "VALIDATION", optional=True, ), Sequence( "INTO", Bracketed(Delimited(Ref("ObjectReferenceSegment"))), optional=True, ), optional=True, ), ) class WithCheckOptionSegment(BaseSegment): """WITH [CASCADED | LOCAL] CHECK OPTION for CREATE/ALTER View Syntax. As specified in https://dev.mysql.com/doc/refman/8.0/en/alter-view.html """ type = "with_check_options" match_grammar: Matchable = Sequence( "WITH", OneOf("CASCADED", "LOCAL", optional=True), "CHECK", "OPTION", ) class AlterViewStatementSegment(BaseSegment): """An `ALTER VIEW .. AS ..` statement. As specified in https://dev.mysql.com/doc/refman/8.0/en/alter-view.html """ type = "alter_view_statement" match_grammar = Sequence( "ALTER", Sequence( "ALGORITHM", Ref("EqualsSegment"), OneOf("UNDEFINED", "MERGE", "TEMPTABLE"), optional=True, ), Ref("DefinerSegment", optional=True), Sequence("SQL", "SECURITY", OneOf("DEFINER", "INVOKER"), optional=True), "VIEW", Ref("TableReferenceSegment"), Ref("BracketedColumnReferenceListGrammar", optional=True), "AS", OptionallyBracketed(Ref("SelectableGrammar")), Ref("WithCheckOptionSegment", optional=True), ) class CreateViewStatementSegment(BaseSegment): """An `CREATE VIEW .. AS ..` statement. As specified in https://dev.mysql.com/doc/refman/8.0/en/create-view.html """ type = "create_view_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Sequence( "ALGORITHM", Ref("EqualsSegment"), OneOf("UNDEFINED", "MERGE", "TEMPTABLE"), optional=True, ), Ref("DefinerSegment", optional=True), Sequence("SQL", "SECURITY", OneOf("DEFINER", "INVOKER"), optional=True), "VIEW", Ref("TableReferenceSegment"), Ref("BracketedColumnReferenceListGrammar", optional=True), "AS", OptionallyBracketed(Ref("SelectableGrammar")), Ref("WithCheckOptionSegment", optional=True), ) class ProcedureParameterListGrammar(BaseSegment): """The parameters for a procedure ie. `(in/out/inout name datatype)`.""" type = "procedure_parameter_list" match_grammar = Bracketed( Delimited( Ref("ProcedureParameterGrammar"), optional=True, ), ) class SetAssignmentStatementSegment(BaseSegment): """A `SET` statement. https://dev.mysql.com/doc/refman/9.3/en/set-variable.html """ type = "set_statement" match_grammar = Sequence( "SET", Delimited( Sequence( Sequence(OneOf("NEW", "OLD"), Ref("DotSegment"), optional=True), OneOf( "GLOBAL", "PERSIST", "PERSIST_ONLY", "SESSION", "LOCAL", optional=True, ), OneOf( Ref("SessionVariableNameSegment"), Ref("LocalVariableNameSegment"), Ref("SystemVariableSegment"), ), OneOf( Ref("EqualsSegment"), Ref("WalrusOperatorSegment"), ), AnyNumberOf( Ref("NumericLiteralSegment"), Ref("QuotedLiteralSegment"), Ref("DoubleQuotedLiteralSegment"), Ref("SessionVariableNameSegment"), Ref("SystemVariableSegment"), # Match boolean keywords before local variables. Ref("BooleanDynamicSystemVariablesGrammar"), Ref("LocalVariableNameSegment"), Ref("FunctionSegment"), Ref("ArithmeticBinaryOperatorGrammar"), Ref("ExpressionSegment"), ), ), ), ) class TransactionStatementSegment(BaseSegment): """A `COMMIT`, `ROLLBACK` or `TRANSACTION` statement. https://dev.mysql.com/doc/refman/8.0/en/commit.html https://dev.mysql.com/doc/refman/8.0/en/begin-end.html """ type = "transaction_statement" match_grammar = OneOf( Sequence("START", "TRANSACTION"), Sequence( Sequence( Ref("SingleIdentifierGrammar"), Ref("ColonSegment"), optional=True ), Sequence( "BEGIN", Ref.keyword("WORK", optional=True), Ref("StatementSegment"), ), ), Sequence( "LEAVE", Ref("SingleIdentifierGrammar", optional=True), ), Sequence( "COMMIT", Ref.keyword("WORK", optional=True), Sequence("AND", Ref.keyword("NO", optional=True), "CHAIN", optional=True), ), Sequence( "ROLLBACK", Ref.keyword("WORK", optional=True), ), Sequence( "END", Ref("SingleIdentifierGrammar", optional=True), ), ) class IfExpressionStatement(BaseSegment): """IF-THEN-ELSE-ELSEIF-END IF statement. https://dev.mysql.com/doc/refman/8.0/en/if.html """ type = "if_then_statement" match_grammar = AnyNumberOf( Sequence( "IF", Ref("ExpressionSegment"), "THEN", Ref("StatementSegment"), ), Sequence( "ELSEIF", Ref("ExpressionSegment"), "THEN", Ref("StatementSegment"), ), Sequence("ELSE", Ref("StatementSegment"), optional=True), Sequence("END", "IF"), ) class DefinerSegment(BaseSegment): """This is the body of a `CREATE FUNCTION` and `CREATE TRIGGER` statements.""" type = "definer_segment" match_grammar = Sequence( "DEFINER", Ref("EqualsSegment"), Ref("RoleReferenceSegment"), ) class SelectClauseModifierSegment(BaseSegment): """Things that come after SELECT but before the columns.""" type = "select_clause_modifier" match_grammar = Sequence( OneOf("DISTINCT", "ALL", "DISTINCTROW", optional=True), Ref.keyword("HIGH_PRIORITY", optional=True), Ref.keyword("STRAIGHT_JOIN", optional=True), Ref.keyword("SQL_SMALL_RESULT", optional=True), Ref.keyword("SQL_BIG_RESULT", optional=True), Ref.keyword("SQL_BUFFER_RESULT", optional=True), Ref.keyword("SQL_CACHE", optional=True), Ref.keyword("SQL_NO_CACHE", optional=True), Ref.keyword("SQL_CALC_FOUND_ROWS", optional=True), optional=True, ) class IntoClauseSegment(BaseSegment): """This is an `INTO` clause for assigning variables in a select statement. https://dev.mysql.com/doc/refman/5.7/en/load-data.html https://dev.mysql.com/doc/refman/5.7/en/select-into.html """ type = "into_clause" match_grammar = Sequence( "INTO", OneOf( Delimited( AnyNumberOf( Ref("SessionVariableNameSegment"), Ref("LocalVariableNameSegment"), ), Sequence("DUMPFILE", Ref("QuotedLiteralSegment")), Sequence( "OUTFILE", Ref("QuotedLiteralSegment"), Sequence( "CHARACTER", "SET", Ref("NakedIdentifierSegment"), optional=True ), Sequence( OneOf("FIELDS", "COLUMNS"), Sequence( "TERMINATED", "BY", Ref("QuotedLiteralSegment"), optional=True, ), Sequence( Ref.keyword("OPTIONALLY", optional=True), "ENCLOSED", "BY", Ref("QuotedLiteralSegment"), optional=True, ), Sequence( "ESCAPED", "BY", Ref("QuotedLiteralSegment"), optional=True ), optional=True, ), Sequence( "LINES", Sequence( "STARTING", "BY", Ref("QuotedLiteralSegment"), optional=True ), Sequence( "TERMINATED", "BY", Ref("QuotedLiteralSegment"), optional=True, ), optional=True, ), ), ), ), parse_mode=ParseMode.GREEDY_ONCE_STARTED, terminators=[Ref("SelectClauseTerminatorGrammar")], ) class UnorderedSelectStatementSegment(ansi.UnorderedSelectStatementSegment): """A `SELECT` statement without any ORDER clauses or later. This is designed for use in the context of set operations, for other use cases, we should use the main SelectStatementSegment. """ type = "select_statement" match_grammar = ( ansi.UnorderedSelectStatementSegment.match_grammar.copy( insert=[Ref("IntoClauseSegment", optional=True)], before=Ref("FromClauseSegment", optional=True), ) .copy(insert=[Ref("ForClauseSegment", optional=True)]) .copy( insert=[Ref("IndexHintClauseSegment", optional=True)], before=Ref("WhereClauseSegment", optional=True), ) .copy( insert=[Ref("SelectPartitionClauseSegment", optional=True)], before=Ref("WhereClauseSegment", optional=True), terminators=[ Ref("IntoClauseSegment"), Ref("ForClauseSegment"), Ref("IndexHintClauseSegment"), Ref("WithCheckOptionSegment"), Ref("SelectPartitionClauseSegment"), Ref("UpsertClauseListSegment"), ], ) ) class SelectClauseSegment(ansi.SelectClauseSegment): """A group of elements in a select target statement.""" match_grammar = ansi.SelectClauseSegment.match_grammar.copy( terminators=[Ref("IntoKeywordSegment")], ) class SelectStatementSegment(ansi.SelectStatementSegment): """A `SELECT` statement. https://dev.mysql.com/doc/refman/5.7/en/select.html """ # Inherit most of the parse grammar from the original. match_grammar = UnorderedSelectStatementSegment.match_grammar.copy( insert=[ Ref("OrderByClauseSegment", optional=True), Ref("LimitClauseSegment", optional=True), Ref("NamedWindowSegment", optional=True), Ref("IntoClauseSegment", optional=True), ], terminators=[ Ref("SetOperatorSegment"), Ref("UpsertClauseListSegment"), Ref("WithCheckOptionSegment"), ], # Overwrite the terminators, because we want to remove some from the # expression above. replace_terminators=True, ) class ForClauseSegment(BaseSegment): """This is the body of a `FOR` clause.""" type = "for_clause" match_grammar = OneOf( Sequence( Sequence( "FOR", OneOf("UPDATE", "SHARE"), ), Sequence("OF", Delimited(Ref("NakedIdentifierSegment")), optional=True), OneOf("NOWAIT", Sequence("SKIP", "LOCKED"), optional=True), ), Sequence("LOCK", "IN", "SHARE", "MODE"), optional=True, ) class IndexHintClauseSegment(BaseSegment): """This is the body of an index hint clause.""" type = "index_hint_clause" match_grammar = Sequence( OneOf("USE", "IGNORE", "FORCE"), OneOf("INDEX", "KEY"), Sequence( "FOR", OneOf( "JOIN", Sequence("ORDER", "BY"), Sequence("GROUP", "BY"), optional=True ), optional=True, ), Bracketed(Ref("ObjectReferenceSegment")), Ref("JoinOnConditionSegment", optional=True), ) class CallStoredProcedureSegment(BaseSegment): """This is a CALL statement used to execute a stored procedure. https://dev.mysql.com/doc/refman/8.0/en/call.html """ type = "call_statement" match_grammar = Sequence( "CALL", Ref("FunctionSegment"), ) class SelectPartitionClauseSegment(BaseSegment): """This is the body of a partition clause.""" type = "partition_clause" match_grammar = Sequence( "PARTITION", Bracketed(Delimited(Ref("ObjectReferenceSegment"))), ) class WhileStatementSegment(BaseSegment): """A `WHILE-DO-END WHILE` statement. https://dev.mysql.com/doc/refman/8.0/en/while.html """ type = "while_statement" match_grammar = OneOf( Sequence( Sequence( Ref("SingleIdentifierGrammar"), Ref("ColonSegment"), optional=True ), Sequence( "WHILE", Ref("ExpressionSegment"), "DO", AnyNumberOf( Ref("StatementSegment"), ), ), ), Sequence( "END", "WHILE", Ref("SingleIdentifierGrammar", optional=True), ), ) class PrepareSegment(BaseSegment): """This is the body of a `PREPARE` statement. https://dev.mysql.com/doc/refman/8.0/en/prepare.html """ type = "prepare_segment" match_grammar = Sequence( "PREPARE", Ref("NakedIdentifierSegment"), "FROM", OneOf( Ref("QuotedLiteralSegment"), Ref("SessionVariableNameSegment"), Ref("LocalVariableNameSegment"), ), ) class GetDiagnosticsSegment(BaseSegment): """This is the body of a `GET DIAGNOSTICS` statement. https://dev.mysql.com/doc/refman/8.0/en/get-diagnostics.html """ type = "get_diagnostics_segment" match_grammar = Sequence( "GET", Sequence("CURRENT", "STACKED", optional=True), "DIAGNOSTICS", Delimited( Sequence( OneOf( Ref("SessionVariableNameSegment"), Ref("LocalVariableNameSegment") ), Ref("EqualsSegment"), OneOf("NUMBER", "ROW_COUNT"), ), optional=True, ), "CONDITION", OneOf( Ref("SessionVariableNameSegment"), Ref("LocalVariableNameSegment"), Ref("NumericLiteralSegment"), ), Delimited( Sequence( OneOf( Ref("SessionVariableNameSegment"), Ref("LocalVariableNameSegment") ), Ref("EqualsSegment"), OneOf( "CLASS_ORIGIN", "SUBCLASS_ORIGIN", "RETURNED_SQLSTATE", "MESSAGE_TEXT", "MYSQL_ERRNO", "CONSTRAINT_CATALOG", "CONSTRAINT_SCHEMA", "CONSTRAINT_NAME", "CATALOG_NAME", "SCHEMA_NAME", "TABLE_NAME", "COLUMN_NAME", "CURSOR_NAME", ), ), optional=True, ), ) class LoopStatementSegment(BaseSegment): """A `LOOP` statement. https://dev.mysql.com/doc/refman/8.0/en/loop.html """ type = "loop_statement" match_grammar = OneOf( Sequence( Sequence( Ref("SingleIdentifierGrammar"), Ref("ColonSegment"), optional=True ), "LOOP", Delimited( Ref("StatementSegment"), ), ), Sequence( "END", "LOOP", Ref("SingleIdentifierGrammar", optional=True), ), ) class CursorOpenCloseSegment(BaseSegment): """This is a CLOSE or Open statement. https://dev.mysql.com/doc/refman/8.0/en/close.html https://dev.mysql.com/doc/refman/8.0/en/open.html """ type = "cursor_open_close_segment" match_grammar = Sequence( OneOf("CLOSE", "OPEN"), OneOf( Ref("SingleIdentifierGrammar"), Ref("QuotedIdentifierSegment"), ), ) class IterateStatementSegment(BaseSegment): """A `ITERATE` statement. https://dev.mysql.com/doc/refman/8.0/en/iterate.html """ type = "iterate_statement" match_grammar = Sequence( "ITERATE", Ref("SingleIdentifierGrammar"), ) class ExecuteSegment(BaseSegment): """This is the body of a `EXECUTE` statement. https://dev.mysql.com/doc/refman/8.0/en/execute.html """ type = "execute_segment" match_grammar = Sequence( "EXECUTE", Ref("NakedIdentifierSegment"), Sequence("USING", Delimited(Ref("SessionVariableNameSegment")), optional=True), ) class RepeatStatementSegment(BaseSegment): """A `REPEAT-UNTIL` statement. https://dev.mysql.com/doc/refman/8.0/en/repeat.html """ type = "repeat_statement" match_grammar = OneOf( Sequence( Sequence( Ref("SingleIdentifierGrammar"), Ref("ColonSegment"), optional=True ), "REPEAT", AnyNumberOf( Ref("StatementSegment"), ), ), Sequence( "UNTIL", Ref("ExpressionSegment"), Sequence( "END", "REPEAT", Ref("SingleIdentifierGrammar", optional=True), ), ), ) class DeallocateSegment(BaseSegment): """This is the body of a `DEALLOCATE/DROP` statement. https://dev.mysql.com/doc/refman/8.0/en/deallocate-prepare.html """ type = "deallocate_segment" match_grammar = Sequence( Sequence(OneOf("DEALLOCATE", "DROP"), "PREPARE"), Ref("NakedIdentifierSegment"), ) class ResignalSegment(BaseSegment): """This is the body of a `RESIGNAL` statement. https://dev.mysql.com/doc/refman/8.0/en/resignal.html """ type = "resignal_segment" match_grammar = Sequence( OneOf("SIGNAL", "RESIGNAL"), OneOf( Sequence( "SQLSTATE", Ref.keyword("VALUE", optional=True), Ref("QuotedLiteralSegment"), ), Ref("NakedIdentifierSegment"), optional=True, ), Sequence( "SET", Delimited( Sequence( OneOf( "CLASS_ORIGIN", "SUBCLASS_ORIGIN", "RETURNED_SQLSTATE", "MESSAGE_TEXT", "MYSQL_ERRNO", "CONSTRAINT_CATALOG", "CONSTRAINT_SCHEMA", "CONSTRAINT_NAME", "CATALOG_NAME", "SCHEMA_NAME", "TABLE_NAME", "COLUMN_NAME", "CURSOR_NAME", ), Ref("EqualsSegment"), OneOf( Ref("SessionVariableNameSegment"), Ref("LocalVariableNameSegment"), Ref("QuotedLiteralSegment"), ), ), ), optional=True, ), ) class CursorFetchSegment(BaseSegment): """This is a FETCH statement. https://dev.mysql.com/doc/refman/8.0/en/fetch.html """ type = "cursor_fetch_segment" match_grammar = Sequence( "FETCH", Sequence(Ref.keyword("NEXT", optional=True), "FROM", optional=True), Ref("NakedIdentifierSegment"), "INTO", Delimited( Ref("SessionVariableNameSegment"), Ref("LocalVariableNameSegment"), ), ) class DropIndexStatementSegment(ansi.DropIndexStatementSegment): """A `DROP INDEX` statement. https://dev.mysql.com/doc/refman/8.0/en/drop-index.html """ # DROP INDEX ON # [ALGORITHM [=] {DEFAULT | INPLACE | COPY} | LOCK [=] {DEFAULT | NONE | SHARED | # EXCLUSIVE}] match_grammar = Sequence( "DROP", "INDEX", Ref("IndexReferenceSegment"), "ON", Ref("TableReferenceSegment"), OneOf( Sequence( "ALGORITHM", Ref("EqualsSegment", optional=True), OneOf("DEFAULT", "INPLACE", "COPY"), ), Sequence( "LOCK", Ref("EqualsSegment", optional=True), OneOf("DEFAULT", "NONE", "SHARED", "EXCLUSIVE"), ), optional=True, ), ) class DropProcedureStatementSegment(BaseSegment): """A `DROP` statement that addresses stored procedures and functions. https://dev.mysql.com/doc/refman/8.0/en/drop-procedure.html """ type = "drop_procedure_statement" # DROP {PROCEDURE | FUNCTION} [IF EXISTS] sp_name match_grammar = Sequence( "DROP", OneOf("PROCEDURE", "FUNCTION"), Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), ) class DropFunctionStatementSegment(BaseSegment): """A `DROP` statement that addresses loadable functions. https://dev.mysql.com/doc/refman/8.0/en/drop-function-loadable.html """ type = "drop_function_statement" # DROP FUNCTION [IF EXISTS] function_name match_grammar = Sequence( "DROP", "FUNCTION", Ref("IfExistsGrammar", optional=True), Ref("FunctionNameSegment"), ) class RenameTableStatementSegment(BaseSegment): """A `RENAME TABLE` statement. https://dev.mysql.com/doc/refman/8.0/en/rename-table.html """ type = "rename_table_statement" match_grammar = Sequence( "RENAME", "TABLE", Delimited( Sequence( Ref("TableReferenceSegment"), "TO", Ref("TableReferenceSegment"), ), ), ) class ResetMasterStatementSegment(BaseSegment): """A `RESET MASTER` statement. https://dev.mysql.com/doc/refman/8.0/en/reset-master.html """ type = "reset_master_statement" match_grammar = Sequence( "RESET", "MASTER", Sequence("TO", Ref("NumericLiteralSegment"), optional=True), ) class PurgeBinaryLogsStatementSegment(BaseSegment): """A `PURGE BINARY LOGS` statement. https://dev.mysql.com/doc/refman/8.0/en/purge-binary-logs.html """ type = "purge_binary_logs_statement" match_grammar = Sequence( "PURGE", OneOf( "BINARY", "MASTER", ), "LOGS", OneOf( Sequence( "TO", Ref("QuotedLiteralSegment"), ), Sequence( "BEFORE", OneOf( Ref("ExpressionSegment"), ), ), ), ) class HelpStatementSegment(BaseSegment): """A `HELP` statement. https://dev.mysql.com/doc/refman/8.0/en/help.html """ type = "help_statement" match_grammar = Sequence( "HELP", Ref("QuotedLiteralSegment"), ) class CheckTableStatementSegment(BaseSegment): """A `CHECK TABLE` statement. https://dev.mysql.com/doc/refman/8.0/en/check-table.html """ type = "check_table_statement" match_grammar = Sequence( "CHECK", "TABLE", Delimited( Ref("TableReferenceSegment"), ), AnyNumberOf( Sequence("FOR", "UPGRADE"), "QUICK", "FAST", "MEDIUM", "EXTENDED", "CHANGED", min_times=1, ), ) class ChecksumTableStatementSegment(BaseSegment): """A `CHECKSUM TABLE` statement. https://dev.mysql.com/doc/refman/8.0/en/checksum-table.html """ type = "checksum_table_statement" match_grammar = Sequence( "CHECKSUM", "TABLE", Delimited( Ref("TableReferenceSegment"), ), OneOf( "QUICK", "EXTENDED", ), ) class AnalyzeTableStatementSegment(BaseSegment): """An `ANALYZE TABLE` statement. https://dev.mysql.com/doc/refman/8.0/en/analyze-table.html """ type = "analyze_table_statement" match_grammar = Sequence( "ANALYZE", OneOf( "NO_WRITE_TO_BINLOG", "LOCAL", optional=True, ), "TABLE", OneOf( Sequence( Delimited( Ref("TableReferenceSegment"), ), ), Sequence( Ref("TableReferenceSegment"), "UPDATE", "HISTOGRAM", "ON", Delimited( Ref("ColumnReferenceSegment"), ), Sequence( "WITH", Ref("NumericLiteralSegment"), "BUCKETS", optional=True, ), ), Sequence( Ref("TableReferenceSegment"), "DROP", "HISTOGRAM", "ON", Delimited( Ref("ColumnReferenceSegment"), ), ), ), ) class RepairTableStatementSegment(BaseSegment): """A `REPAIR TABLE` statement. https://dev.mysql.com/doc/refman/8.0/en/repair-table.html """ type = "repair_table_statement" match_grammar = Sequence( "REPAIR", OneOf( "NO_WRITE_TO_BINLOG", "LOCAL", optional=True, ), "TABLE", Delimited( Ref("TableReferenceSegment"), ), AnyNumberOf( "QUICK", "EXTENDED", "USE_FRM", ), ) class OptimizeTableStatementSegment(BaseSegment): """An `OPTIMIZE TABLE` statement. https://dev.mysql.com/doc/refman/8.0/en/optimize-table.html """ type = "optimize_table_statement" match_grammar = Sequence( "OPTIMIZE", OneOf( "NO_WRITE_TO_BINLOG", "LOCAL", optional=True, ), "TABLE", Delimited( Ref("TableReferenceSegment"), ), ) class UpdateStatementSegment(BaseSegment): """An `Update` statement. As per https://dev.mysql.com/doc/refman/8.0/en/update.html """ type = "update_statement" match_grammar: Matchable = Sequence( "UPDATE", Ref.keyword("LOW_PRIORITY", optional=True), Ref.keyword("IGNORE", optional=True), Indent, Delimited(Ref("TableReferenceSegment"), Ref("FromExpressionSegment")), Dedent, Ref("SetClauseListSegment"), Ref("WhereClauseSegment", optional=True), Ref("OrderByClauseSegment", optional=True), Ref("LimitClauseSegment", optional=True), ) class FlushStatementSegment(BaseSegment): """A `Flush` statement. As per https://dev.mysql.com/doc/refman/8.0/en/flush.html """ type = "flush_statement" match_grammar: Matchable = Sequence( "FLUSH", OneOf( "NO_WRITE_TO_BINLOG", "LOCAL", optional=True, ), OneOf( Delimited( Sequence("BINARY", "LOGS"), Sequence("ENGINE", "LOGS"), Sequence("ERROR", "LOGS"), Sequence("GENERAL", "LOGS"), "HOSTS", "LOGS", "PRIVILEGES", "OPTIMIZER_COSTS", Sequence( "RELAY", "LOGS", Sequence( "FOR", "CHANNEL", Ref("ObjectReferenceSegment"), optional=True ), ), Sequence("SLOW", "LOGS"), "STATUS", "USER_RESOURCES", ), Sequence( "TABLES", Sequence( Delimited(Ref("TableReferenceSegment"), terminators=["WITH"]), optional=True, ), Sequence("WITH", "READ", "LOCK", optional=True), ), Sequence( "TABLES", Sequence( Delimited(Ref("TableReferenceSegment"), terminators=["FOR"]), optional=False, ), Sequence("FOR", "EXPORT", optional=True), ), ), ) class LoadDataSegment(BaseSegment): """A `LOAD DATA` statement. As per https://dev.mysql.com/doc/refman/8.0/en/load-data.html """ type = "load_data_statement" match_grammar = Sequence( "LOAD", "DATA", OneOf("LOW_PRIORITY", "CONCURRENT", optional=True), Sequence("LOCAL", optional=True), "INFILE", Ref("QuotedLiteralSegment"), OneOf("REPLACE", "IGNORE", optional=True), "INTO", "TABLE", Ref("TableReferenceSegment"), Ref("SelectPartitionClauseSegment", optional=True), Sequence("CHARACTER", "SET", Ref("NakedIdentifierSegment"), optional=True), Sequence( OneOf("FIELDS", "COLUMNS"), Sequence("TERMINATED", "BY", Ref("QuotedLiteralSegment"), optional=True), Sequence( Sequence("OPTIONALLY", optional=True), "ENCLOSED", "BY", Ref("QuotedLiteralSegment"), optional=True, ), Sequence("ESCAPED", "BY", Ref("QuotedLiteralSegment"), optional=True), optional=True, ), Sequence( "LINES", Sequence("STARTING", "BY", Ref("QuotedLiteralSegment"), optional=True), Sequence("TERMINATED", "BY", Ref("QuotedLiteralSegment"), optional=True), optional=True, ), Sequence( "IGNORE", Ref("NumericLiteralSegment"), OneOf("LINES", "ROWS"), optional=True, ), Sequence( Bracketed(Delimited(Ref("ColumnReferenceSegment"))), optional=True, ), Sequence( "SET", Ref("Expression_B_Grammar"), optional=True, ), ) class ReplaceSegment(BaseSegment): """A `REPLACE` statement. As per https://dev.mysql.com/doc/refman/8.0/en/replace.html """ type = "replace_statement" match_grammar = Sequence( "REPLACE", OneOf("LOW_PRIORITY", "DELAYED", optional=True), Sequence("INTO", optional=True), Ref("TableReferenceSegment"), Ref("SelectPartitionClauseSegment", optional=True), OneOf( Sequence( Ref("BracketedColumnReferenceListGrammar", optional=True), Ref("ValuesClauseSegment"), ), Ref("SetClauseListSegment"), Sequence( Ref("BracketedColumnReferenceListGrammar", optional=True), OneOf( Ref("SelectableGrammar"), Sequence( "TABLE", Ref("TableReferenceSegment"), ), ), ), ), ) class CreateTriggerStatementSegment(ansi.CreateTriggerStatementSegment): """Create Trigger Statement. As Specified in https://dev.mysql.com/doc/refman/8.0/en/create-trigger.html """ # "DEFINED = user", optional match_grammar = Sequence( "CREATE", Ref("DefinerSegment", optional=True), "TRIGGER", Ref("IfNotExistsGrammar", optional=True), Ref("TriggerReferenceSegment"), OneOf("BEFORE", "AFTER"), OneOf("INSERT", "UPDATE", "DELETE"), "ON", Ref("TableReferenceSegment"), Sequence("FOR", "EACH", "ROW"), Sequence( OneOf("FOLLOWS", "PRECEDES"), Ref("SingleIdentifierGrammar"), optional=True ), OneOf( Ref("StatementSegment"), Sequence("BEGIN", Ref("StatementSegment"), "END"), ), ) class DropTriggerStatementSegment(ansi.DropTriggerStatementSegment): """A `DROP TRIGGER` Statement. As per https://dev.mysql.com/doc/refman/8.0/en/drop-trigger.html """ match_grammar = Sequence( "DROP", "TRIGGER", Ref("IfExistsGrammar", optional=True), Ref("TriggerReferenceSegment"), ) class CreateDatabaseStatementSegment(ansi.CreateDatabaseStatementSegment): """A `CREATE DATABASE` statement. As specified in https://dev.mysql.com/doc/refman/8.0/en/create-database.html """ match_grammar: Matchable = Sequence( "CREATE", OneOf("DATABASE", "SCHEMA"), Ref("IfNotExistsGrammar", optional=True), Ref("DatabaseReferenceSegment"), AnyNumberOf(Ref("CreateOptionSegment")), ) class CreateOptionSegment(BaseSegment): """A database characteristic. As specified in https://dev.mysql.com/doc/refman/8.0/en/create-database.html """ type = "create_option_segment" match_grammar = Sequence( Ref.keyword("DEFAULT", optional=True), OneOf( Sequence( "CHARACTER", "SET", Ref("EqualsSegment", optional=True), OneOf(Ref("NakedIdentifierSegment"), Ref("QuotedLiteralSegment")), ), Sequence( "COLLATE", Ref("EqualsSegment", optional=True), Ref("CollationReferenceSegment"), ), Sequence( "ENCRYPTION", Ref("EqualsSegment", optional=True), Ref("QuotedLiteralSegment"), ), ), ) class AlterDatabaseStatementSegment(BaseSegment): """A `ALTER DATABASE` statement. As specified in https://dev.mysql.com/doc/refman/8.0/en/alter-database.html """ type = "alter_database_statement" match_grammar: Matchable = Sequence( "ALTER", OneOf("DATABASE", "SCHEMA"), Ref("DatabaseReferenceSegment", optional=True), AnyNumberOf(Ref("AlterOptionSegment")), ) class AlterOptionSegment(BaseSegment): """A database characteristic. As specified in https://dev.mysql.com/doc/refman/8.0/en/alter-database.html """ type = "alter_option_segment" match_grammar = Sequence( OneOf( Sequence( Ref.keyword("DEFAULT", optional=True), "CHARACTER", "SET", Ref("EqualsSegment", optional=True), OneOf( Ref("SingleIdentifierGrammar"), Ref("SingleQuotedIdentifierSegment"), Ref("DoubleQuotedIdentifierSegment"), ), ), Sequence( Ref.keyword("DEFAULT", optional=True), "COLLATE", Ref("EqualsSegment", optional=True), Ref("CollationReferenceSegment"), ), Sequence( Ref.keyword("DEFAULT", optional=True), "ENCRYPTION", Ref("EqualsSegment", optional=True), Ref("QuotedLiteralSegment"), ), Sequence( "READ", "ONLY", Ref("EqualsSegment", optional=True), OneOf("DEFAULT", Ref("NumericLiteralSegment")), ), ), ) class ReturnStatementSegment(BaseSegment): """A RETURN statement. As specified in https://dev.mysql.com/doc/refman/8.0/en/return.html """ type = "return_statement" match_grammar = Sequence( "RETURN", Ref("ExpressionSegment"), ) class SetTransactionStatementSegment(BaseSegment): """A `SET TRANSACTION` statement. As specified in https://dev.mysql.com/doc/refman/8.0/en/set-transaction.html """ type = "set_transaction_statement" match_grammar: Matchable = Sequence( "SET", OneOf("GLOBAL", "SESSION", optional=True), "TRANSACTION", Delimited( Sequence( "ISOLATION", "LEVEL", OneOf( Sequence( "READ", OneOf("COMMITTED", "UNCOMMITTED"), ), Sequence("REPEATABLE", "READ"), "SERIALIZABLE", ), ), Sequence("READ", OneOf("WRITE", "ONLY")), ), ) class SetNamesStatementSegment(BaseSegment): """A `SET NAMES` statement. As specified in https://dev.mysql.com/doc/refman/8.0/en/set-names.html """ type = "set_names_statement" match_grammar: Matchable = Sequence( "SET", "NAMES", OneOf("DEFAULT", Ref("QuotedLiteralSegment"), Ref("NakedIdentifierSegment")), Sequence("COLLATE", Ref("CollationReferenceSegment"), optional=True), ) class CreateEventStatementSegment(BaseSegment): """A `CREATE EVENT` statement. As specified in https://dev.mysql.com/doc/refman/9.2/en/create-event.html """ type = "create_event_statement" match_grammar: Matchable = Sequence( "CREATE", Ref("DefinerSegment", optional=True), "EVENT", Ref("IfNotExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), "ON", "SCHEDULE", OneOf("AT", "EVERY"), Ref("ExpressionSegment"), OneOf(Ref("DatetimeUnitSegment"), optional=True), AnyNumberOf( Sequence( OneOf("STARTS", "ENDS"), Ref("ExpressionSegment"), ), optional=True, ), Sequence( "ON", "COMPLETION", Ref.keyword("NOT", optional=True), "PRESERVE", optional=True, ), OneOf( "ENABLE", "DISABLE", Sequence("DISABLE", "ON", OneOf("REPLICA", "SLAVE")), optional=True, ), Ref("CommentClauseSegment", optional=True), "DO", Ref("StatementSegment"), ) class AlterEventStatementSegment(BaseSegment): """An `ALTER EVENT` statement. As specified in https://dev.mysql.com/doc/refman/9.2/en/alter-event.html """ type = "alter_event_statement" match_grammar: Matchable = Sequence( "ALTER", Ref("DefinerSegment", optional=True), "EVENT", Ref("ObjectReferenceSegment"), Sequence( "ON", "SCHEDULE", OneOf("AT", "EVERY"), Ref("ExpressionSegment"), OneOf(Ref("DatetimeUnitSegment"), optional=True), AnyNumberOf( Sequence( OneOf("STARTS", "ENDS"), Ref("ExpressionSegment"), ), optional=True, ), optional=True, ), Sequence( "ON", "COMPLETION", Ref.keyword("NOT", optional=True), "PRESERVE", optional=True, ), Sequence("RENAME", "TO", Ref("ObjectReferenceSegment"), optional=True), OneOf( "ENABLE", "DISABLE", Sequence("DISABLE", "ON", OneOf("REPLICA", "SLAVE")), optional=True, ), Ref("CommentClauseSegment", optional=True), Sequence("DO", Ref("StatementSegment"), optional=True), ) class DropEventStatementSegment(BaseSegment): """A `DROP EVENT` statement. As specified in https://dev.mysql.com/doc/refman/9.2/en/drop-event.html """ type = "drop_event_statement" match_grammar: Matchable = Sequence( "DROP", "EVENT", Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), ) class DatatypeSegment(BaseSegment): """A data type segment. Supports timestamp with(out) time zone. Doesn't currently support intervals. """ type = "data_type" match_grammar: Matchable = OneOf( Ref("TimeWithTZGrammar"), Sequence( "DOUBLE", "PRECISION", ), Sequence( OneOf( Sequence( OneOf("CHARACTER", "BINARY"), OneOf("VARYING", Sequence("LARGE", "OBJECT")), ), Sequence( # Some dialects allow optional qualification of data types with # schemas Sequence( Ref("SingleIdentifierGrammar"), Ref("DotSegment"), allow_gaps=False, optional=True, ), Ref("DatatypeIdentifierSegment"), allow_gaps=False, ), ), # There may be no brackets for some data types Ref("BracketedArguments", optional=True), OneOf( Ref("CharCharacterSetGrammar"), "SIGNED", "UNSIGNED", "ZEROFILL", Sequence("ZEROFILL", "UNSIGNED"), Sequence("UNSIGNED", "ZEROFILL"), optional=True, ), ), Ref("ArrayTypeSegment"), ) class NullSafeEqualsSegment(CompositeComparisonOperatorSegment): """NULL-safe equals operator. https://dev.mysql.com/doc/refman/9.3/en/comparison-operators.html#operator_equal-to """ match_grammar: Matchable = Sequence( Ref("RawLessThanSegment"), Ref("RawEqualsSegment"), Ref("RawGreaterThanSegment"), allow_gaps=False, ) sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_mysql_keywords.py000066400000000000000000000166401503426445100247110ustar00rootroot00000000000000"""A List of MySQL SQL keywords. https://dev.mysql.com/doc/refman/8.0/en/keywords.html """ mysql_reserved_keywords = """ACCESSIBLE ADD ALL ALTER ANALYZE AND AS ASC ASENSITIVE BEFORE BETWEEN BIGINT BINARY BLOB BOTH BY CALL CASCADE CASE CHANGE CHAR CHARACTER CHECK COLLATE COLUMN CONDITION CONSTRAINT CONTINUE CONVERT CREATE CROSS CUME_DIST CURRENT_DATE CURRENT_TIME CURRENT_TIMESTAMP CURRENT_USER CURSOR DATABASE DATABASES DAY_HOUR DAY_MICROSECOND DAY_MINUTE DAY_SECOND DEC DECIMAL DECLARE DEFAULT DELAYED DELETE DENSE_RANK DESC DESCRIBE DETERMINISTIC DISTINCT DISTINCTROW DIV DOUBLE DROP DUAL EACH ELSE ELSEIF EMPTY ENCLOSED ESCAPED EXCEPT EXISTS EXIT EXPLAIN FALSE FETCH FIRST_VALUE FLOAT FLOAT4 FLOAT8 FOR FORCE FOREIGN FROM FULLTEXT GENERATED GET GRANT GROUP GROUPING GROUPS HAVING HIGH_PRIORITY HOUR_MICROSECOND HOUR_MINUTE HOUR_SECOND IF IGNORE IN INDEX INFILE INNER INOUT INSENSITIVE INSERT INT INT1 INT2 INT3 INT4 INT8 INTEGER INTERSECT INTERVAL INTO IO_AFTER_GTIDS IO_BEFORE_GTIDS IS ITERATE JOIN JSON_TABLE KEY KEYS KILL LAG LAST_VALUE LATERAL LEAD LEADING LEAVE LEFT LIKE LIMIT LINEAR LINES LOAD LOCALTIME LOCALTIMESTAMP LOCK LONG LONGBLOB LONGTEXT LOOP LOW_PRIORITY MASTER_BIND MASTER_SSL_VERIFY_SERVER_CERT MATCH MAXVALUE MEDIUMBLOB MEDIUMINT MEDIUMTEXT MIDDLEINT MINUTE_MICROSECOND MINUTE_SECOND MOD MODIFIES NATURAL NOT NO_WRITE_TO_BINLOG NTH_VALUE NTILE NULL NUMERIC OF ON OPTIMIZE OPTIMIZER_COSTS OPTION OPTIONALLY OR ORDER OUT OUTER OUTFILE OVER PARTITION PERCENT_RANK PRECISION PRIMARY PROCEDURE PURGE RANGE RANK READ READS READ_WRITE REAL RECURSIVE REFERENCES REGEXP RELEASE RENAME REPEAT REPLACE REQUIRE RESIGNAL RESTRICT RETURN REVOKE RIGHT RLIKE ROW_NUMBER SCHEMA SCHEMAS SECOND_MICROSECOND SELECT SENSITIVE SEPARATOR SET SHOW SIGNAL SMALLINT SPATIAL SPECIFIC SQL SQLEXCEPTION SQLSTATE SQLWARNING SQL_BIG_RESULT SQL_CALC_FOUND_ROWS SQL_SMALL_RESULT SSL STARTING STORED STRAIGHT_JOIN SYSTEM TABLE TERMINATED THEN TINYBLOB TINYINT TINYTEXT TO TRAILING TRIGGER TRUE UNDO UNION UNIQUE UNLOCK UNSIGNED UPDATE USAGE USE USING UTC_DATE UTC_TIME UTC_TIMESTAMP VALUES VARBINARY VARCHAR VARCHARACTER VARYING VIRTUAL WHEN WHERE WHILE WINDOW WITH WRITE XOR YEAR_MONTH ZEROFILL """ mysql_unreserved_keywords = """ACCOUNT ACTION ACTIVE ADMIN AFTER AGAINST AGGREGATE ALGORITHM ALWAYS ANALYSE ANY ARRAY ASCII AT ATTRIBUTE AUTHENTICATION AUTOEXTEND_SIZE AUTO_INCREMENT AVG AVG_ROW_LENGTH BACKUP BEGIN BINLOG BIT BLOCK BOOL BOOLEAN BTREE BUCKETS BYTE CACHE CASCADED CATALOG_NAME CHAIN CHALLENGE_RESPONSE CHANGED CHANNEL CHARSET CHECKSUM CIPHER CLASS_ORIGIN CLIENT CLONE CLOSE COALESCE CODE COLLATION COLUMNS COLUMN_FORMAT COLUMN_NAME COMMENT COMMIT COMMITTED COMPACT COMPLETION COMPONENT COMPRESSED COMPRESSION CONCURRENT CONNECTION CONSISTENT CONSTRAINT_CATALOG CONSTRAINT_NAME CONSTRAINT_SCHEMA CONTAINS CONTEXT CPU CUBE CUME_DIST CURRENT CURSOR_NAME DATA DATAFILE DATE DATETIME DAY DEALLOCATE DEFAULT_AUTH DEFINER DEFINITION DELAY_KEY_WRITE DENSE_RANK DESCRIPTION DES_KEY_FILE DIAGNOSTICS DIRECTORY DISABLE DISCARD DISK DO DUMPFILE DUPLICATE DYNAMIC EMPTY ENABLE ENCRYPTION END ENDS ENFORCED ENGINE ENGINES ENGINE_ATTRIBUTE ENUM ERROR ERRORS ESCAPE EVENT EVENTS EVERY EXCHANGE EXCLUDE EXECUTE EXPANSION EXPIRE EXPORT EXTENDED EXTENT_SIZE FACTOR FAILED_LOGIN_ATTEMPTS FAST FAULTS FIELDS FILE FILE_BLOCK_SIZE FILTER FINISH FIRST FIRST_VALUE FIXED FLUSH FOLLOWING FOLLOWS FORMAT FOUND FULL FUNCTION GENERAL GEOMCOLLECTION GEOMETRY GEOMETRYCOLLECTION GET_FORMAT GET_MASTER_PUBLIC_KEY GET_SOURCE_PUBLIC_KEY GLOBAL GRANTS GROUPING GROUPS GROUP_REPLICATION GTID_ONLY HANDLER HASH HELP HISTOGRAM HISTORY HOST HOSTS HOUR IDENTIFIED IGNORE_SERVER_IDS IMPORT INACTIVE INDEXES INITIAL INITIAL_SIZE INITIATE INSERT_METHOD INSTALL INSTANCE INTERSECT INVISIBLE INVOKER IO IO_THREAD IPC ISOLATION ISSUER JSON JSON_TABLE JSON_VALUE KEYRING KEY_BLOCK_SIZE LAG LANGUAGE LAST LAST_VALUE LATERAL LEAD LEAVES LESS LEVEL LINESTRING LIST LOCAL LOCKED LOCKS LOGFILE LOGS MASTER MASTER_AUTO_POSITION MASTER_COMPRESSION_ALGORITHMS MASTER_CONNECT_RETRY MASTER_DELAY MASTER_HEARTBEAT_PERIOD MASTER_HOST MASTER_LOG_FILE MASTER_LOG_POS MASTER_PASSWORD MASTER_PORT MASTER_PUBLIC_KEY_PATH MASTER_RETRY_COUNT MASTER_SERVER_ID MASTER_SSL MASTER_SSL_CA MASTER_SSL_CAPATH MASTER_SSL_CERT MASTER_SSL_CIPHER MASTER_SSL_CRL MASTER_SSL_CRLPATH MASTER_SSL_KEY MASTER_TLS_CIPHERSUITES MASTER_TLS_VERSION MASTER_USER MASTER_ZSTD_COMPRESSION_LEVEL MAX_CONNECTIONS_PER_HOUR MAX_QUERIES_PER_HOUR MAX_ROWS MAX_SIZE MAX_UPDATES_PER_HOUR MAX_USER_CONNECTIONS MEDIUM MEMBER MEMORY MERGE MESSAGE_TEXT MICROSECOND MIGRATE MINUTE MIN_ROWS MODE MODIFY MONTH MULTILINESTRING MULTIPOINT MULTIPOLYGON MUTEX MYSQL_ERRNO NAME NAMES NATIONAL NCHAR NDB NDBCLUSTER NESTED NETWORK_NAMESPACE NEVER NEW NEXT NO NODEGROUP NONE NOWAIT NO_WAIT NTH_VALUE NTILE NULLS NUMBER NVARCHAR OF OFF OFFSET OJ OLD ONE ONLY OPEN OPTIONAL OPTIONS ORDINALITY ORGANIZATION OTHERS OVER OWNER PACK_KEYS PAGE PARSER PARSE_GCOL_EXPR PARTIAL PARTITIONING PARTITIONS PASSWORD PASSWORD_LOCK_TIME PATH PERCENT_RANK PERSIST PERSIST_ONLY PHASE PLUGIN PLUGINS PLUGIN_DIR POINT POLYGON PORT PRECEDES PRECEDING PREPARE PRESERVE PREV PRIVILEGES PRIVILEGE_CHECKS_USER PROCESS PROCESSLIST PROFILE PROFILES PROXY QUARTER QUERY QUICK RANDOM RANK READ_ONLY REBUILD RECOVER RECURSIVE REDOFILE REDO_BUFFER_SIZE REDUNDANT REFERENCE REGISTRATION RELAY RELAYLOG RELAY_LOG_FILE RELAY_LOG_POS RELAY_THREAD RELOAD REMOTE REMOVE REORGANIZE REPAIR REPEATABLE REPLICA REPLICAS REPLICATE_DO_DB REPLICATE_DO_TABLE REPLICATE_IGNORE_DB REPLICATE_IGNORE_TABLE REPLICATE_REWRITE_DB REPLICATE_WILD_DO_TABLE REPLICATE_WILD_IGNORE_TABLE REPLICATION REQUIRE_ROW_FORMAT RESET RESOURCE RESPECT RESTART RESTORE RESUME RETAIN RETURNED_SQLSTATE RETURNING RETURNS REUSE REVERSE ROLE ROLLBACK ROLLUP ROTATE ROUTINE ROW ROWS ROW_COUNT ROW_FORMAT ROW_NUMBER RTREE SAVEPOINT SCHEDULE SCHEMA_NAME SECOND SECONDARY SECONDARY_ENGINE SECONDARY_ENGINE_ATTRIBUTE SECONDARY_LOAD SECONDARY_UNLOAD SECURITY SERIAL SERIALIZABLE SERVER SESSION SHARE SHUTDOWN SIGNED SIMPLE SKIP SLAVE SLOW SNAPSHOT SOCKET SOME SONAME SOUNDS SOURCE SOURCE_AUTO_POSITION SOURCE_BIND SOURCE_COMPRESSION_ALGORITHMS SOURCE_CONNECT_RETRY SOURCE_DELAY SOURCE_HEARTBEAT_PERIOD SOURCE_HOST SOURCE_LOG_FILE SOURCE_LOG_POS SOURCE_PASSWORD SOURCE_PORT SOURCE_PUBLIC_KEY_PATH SOURCE_RETRY_COUNT SOURCE_SSL SOURCE_SSL_CA SOURCE_SSL_CAPATH SOURCE_SSL_CERT SOURCE_SSL_CIPHER SOURCE_SSL_CRL SOURCE_SSL_CRLPATH SOURCE_SSL_KEY SOURCE_SSL_VERIFY_SERVER_CERT SOURCE_TLS_CIPHERSUITES SOURCE_TLS_VERSION SOURCE_USER SOURCE_ZSTD_COMPRESSION_LEVEL SQL_AFTER_GTIDS SQL_AFTER_MTS_GAPS SQL_BEFORE_GTIDS SQL_BUFFER_RESULT SQL_CACHE SQL_NO_CACHE SQL_THREAD SQL_TSI_DAY SQL_TSI_HOUR SQL_TSI_MINUTE SQL_TSI_MONTH SQL_TSI_QUARTER SQL_TSI_SECOND SQL_TSI_WEEK SQL_TSI_YEAR SRID STACKED START STARTS STATS_AUTO_RECALC STATS_PERSISTENT STATS_SAMPLE_PAGES STATUS STOP STORAGE STREAM STRING SUBCLASS_ORIGIN SUBJECT SUBPARTITION SUBPARTITIONS SUPER SUSPEND SWAPS SWITCHES SYSTEM TABLES TABLESPACE TABLE_CHECKSUM TABLE_NAME TEMPORARY TEMPTABLE TEXT THAN THREAD_PRIORITY TIES TIME TIMESTAMP TIMESTAMPADD TIMESTAMPDIFF TLS TRANSACTION TRIGGERS TRUNCATE TYPE TYPES UNBOUNDED UNCOMMITTED UNDEFINED UNDOFILE UNDO_BUFFER_SIZE UNICODE UNINSTALL UNKNOWN UNREGISTER UNTIL UPGRADE USER USER_RESOURCES USE_FRM VALIDATION VALUE VARIABLES VCPU VIEW VISIBLE WAIT WARNINGS WEEK WEIGHT_STRING WINDOW WITHOUT WORK WRAPPER X509 XA XID XML YEAR ZONE """ # These are not MySQL keywords, but SQLFluff needs them to parse well. mysql_unreserved_keywords += """NOW SHARED INPLACE NOCOPY INSTANT """ sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_oracle.py000066400000000000000000002467251503426445100230730ustar00rootroot00000000000000"""The Oracle dialect. This inherits from the ansi dialect. """ from typing import cast from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( AnyNumberOf, Anything, BaseFileSegment, BaseSegment, Bracketed, BracketedSegment, CodeSegment, CommentSegment, CompositeComparisonOperatorSegment, Conditional, Dedent, Delimited, IdentifierSegment, ImplicitIndent, Indent, LiteralSegment, Matchable, Nothing, OneOf, OptionallyBracketed, ParseMode, Ref, RegexLexer, RegexParser, SegmentGenerator, Sequence, StringLexer, StringParser, SymbolSegment, TypedParser, WordSegment, ) from sqlfluff.dialects import dialect_ansi as ansi ansi_dialect = load_raw_dialect("ansi") oracle_dialect = ansi_dialect.copy_as( "oracle", formatted_name="Oracle", docstring="""The dialect for `Oracle`_ SQL. Note: this does include PL/SQL. .. _`Oracle`: https://www.oracle.com/database/technologies/appdev/sql.html""", ) oracle_dialect.sets("reserved_keywords").update( [ "ACCESS", "ADD", "ALL", "ALTER", "AND", "ANY", "AS", "ASC", "AUDIT", "BETWEEN", "BY", "CHAR", "CHECK", "CLUSTER", "COLUMN", "COLUMN_VALUE", "COMMENT", "COMPRESS", "CONNECT", "CONNECT_BY_ROOT", "CONSTRAINT", "CREATE", "CURRENT", "DATE", "DECIMAL", "DEFAULT", "DEFINITION", "DELETE", "DELETING", "DESC", "DISTINCT", "DROP", "ELSE", "EXCLUSIVE", "EXISTS", "FILE", "FLOAT", "FOR", "FORCE", "FROM", "GRANT", "GROUP", "HAVING", "IDENTIFIED", "IMMEDIATE", "IN", "INCREMENT", "INDEX", "INDEXTYPE", "INITIAL", "INSERT", "INSERTING", "INTEGER", "INTERSECT", "INTO", "IS", "LEVEL", "LIKE", "LOCK", "LONG", "LOOP", "MAXEXTENTS", "MINUS", "MLSLABEL", "MODE", "MODIFY", "NESTED_TABLE_ID", "NOAUDIT", "NOCOMPRESS", "NOT", "NOWAIT", "NULL", "NUMBER", "OF", "OFFLINE", "ON", "ONLINE", "OPTION", "OR", "ORDER", "OVERFLOW", "PCTFREE", "PIVOT", "PRIOR", "PRIVATE", "PROMPT", "PUBLIC", "RAW", "RENAME", "RESOURCE", "REVOKE", "ROW", "ROWID", "ROWNUM", "ROWS", "SELECT", "SESSION", "SET", "SHARE", "SIBLINGS", "SIZE", "SMALLINT", "START", "SUCCESSFUL", "SYNONYM", "SYSDATE", "TABLE", "THEN", "TO", "TRIGGER", "UID", "UNION", "UNIQUE", "UNPIVOT", "UPDATE", "UPDATING", "USER", "VALIDATE", "VALUES", "VARCHAR", "VARCHAR2", "VIEW", "WHEN", "WHENEVER", "WHERE", "WITH", ] ) oracle_dialect.sets("unreserved_keywords").update( [ "ABSENT", "ACCESSIBLE", "AUTHID", "BODY", "BULK_EXCEPTIONS", "BULK_ROWCOUNT", "BYTE", "COMPILE", "COMPOUND", "CONSTANT", "CONTAINER", "CROSSEDITION", "CURSOR", "DEBUG", "DIGEST", "EDITIONABLE", "EDITIONING", "ELSIF", "ERROR", "EXPIRE", "EXTERNALLY", "FOLLOWS", "FORALL", "GLOBALLY", "HTTP", "INDICES", "ISOPEN", "KEEP", "LOOP", "MUTABLE", "NESTED", "NOCOPY", "NOMAXVALUE", "NOMINVALUE", "NONEDITIONABLE", "NOTFOUND", "OID", "PACKAGE", "PAIRS", "PARALLEL_ENABLE", "PARENT", "PERSISTABLE", "PIPELINED", "PRAGMA", "PRECEDES", "PROFILE", "QUOTA", "RAISE", "RECORD", "RESULT_CACHE", "RETURNING", "REUSE", "REVERSE", "ROWTYPE", "SHARD_ENABLE", "SHARING", "SPECIFICATION", "SQL_MACRO", "UNLIMITED", "VARRAY", ] ) oracle_dialect.sets("bare_functions").clear() oracle_dialect.sets("bare_functions").update( [ "current_date", "current_timestamp", "dbtimezone", "localtimestamp", "sessiontimestamp", "sysdate", "systimestamp", ] ) oracle_dialect.patch_lexer_matchers( [ RegexLexer("word", r"[a-zA-Z][0-9a-zA-Z_$#]*", WordSegment), RegexLexer( "single_quote", r"'([^'\\]|\\|\\.|'')*'", CodeSegment, segment_kwargs={ "quoted_value": (r"'((?:[^'\\]|\\|\\.|'')*)'", 1), "escape_replacements": [(r"''", "'")], }, ), RegexLexer( "double_quote", r'"([^"]|"")*"', CodeSegment, segment_kwargs={ "quoted_value": (r'"((?:[^"]|"")*)"', 1), "escape_replacements": [(r'""', '"')], }, ), RegexLexer( "numeric_literal", r"(?>\d+\.\d+|\d+\.(?![\.\w])|\d+)(\.?[eE][+-]?\d+)?((?", CodeSegment), ], before="equals", ) oracle_dialect.add( AtSignSegment=StringParser("@", SymbolSegment, type="at_sign"), RightArrowSegment=StringParser("=>", SymbolSegment, type="right_arrow"), OnCommitGrammar=Sequence( "ON", "COMMIT", OneOf( Sequence(OneOf("DROP", "PRESERVE"), Ref.keyword("DEFINITION")), Sequence(OneOf("DELETE", "PRESERVE"), Ref.keyword("ROWS")), ), ), ConnectByRootGrammar=Sequence("CONNECT_BY_ROOT", Ref("NakedIdentifierSegment")), PlusJoinSegment=Bracketed( StringParser("+", SymbolSegment, type="plus_join_symbol") ), PlusJoinGrammar=OneOf( Sequence( OneOf(Ref("ColumnReferenceSegment"), Ref("FunctionSegment")), Ref("EqualsSegment"), Ref("ColumnReferenceSegment"), Ref("PlusJoinSegment"), ), Sequence( Ref("ColumnReferenceSegment"), Ref("PlusJoinSegment"), Ref("EqualsSegment"), OneOf(Ref("ColumnReferenceSegment"), Ref("FunctionSegment")), ), ), IntervalUnitsGrammar=OneOf("YEAR", "MONTH", "DAY", "HOUR", "MINUTE", "SECOND"), PivotForInGrammar=Sequence( "FOR", OptionallyBracketed(Delimited(Ref("ColumnReferenceSegment"))), "IN", Bracketed( Delimited( Sequence( Ref("Expression_D_Grammar"), Ref("AliasExpressionSegment", optional=True), ) ) ), ), UnpivotNullsGrammar=Sequence(OneOf("INCLUDE", "EXCLUDE"), "NULLS"), StatementAndDelimiterGrammar=Sequence( Ref("StatementSegment"), Ref("DelimiterGrammar", optional=True), ), OneOrMoreStatementsGrammar=AnyNumberOf( Ref("StatementAndDelimiterGrammar"), min_times=1, ), TimingPointGrammar=Sequence( OneOf("BEFORE", "AFTER", Sequence("INSTEAD", "OF")), OneOf("STATEMENT", Sequence("EACH", "ROW")), ), SharingClauseGrammar=Sequence("SHARING", OneOf("METADATA", "NONE"), optional=True), DefaultCollationClauseGrammar=Sequence( "DEFAULT", "COLLATION", Ref("NakedIdentifierSegment"), optional=True ), InvokerRightsClauseGrammar=Sequence("AUTHID", OneOf("CURRENT_USER", "DEFINER")), AccessibleByClauseGrammar=Sequence( "ACCESSIBLE", "BY", Delimited( Bracketed( Sequence( OneOf( "FUNCTION", "PROCEDURE", "PACKAGE", "TRIGGER", "TYPE", optional=True, ), Ref("FunctionNameSegment"), ) ) ), ), DmlGrammar=OneOf( "DELETE", "INSERT", Sequence( "UPDATE", Sequence("OF", Delimited(Ref("ColumnReferenceSegment")), optional=True), ), ), IterationBoundsGrammar=OneOf( Ref("NumericLiteralSegment"), Ref("SingleIdentifierGrammar"), Sequence( Ref("SingleIdentifierGrammar"), Ref("DotSegment"), Ref("SingleIdentifierGrammar"), ), ), IterationSteppedControlGrammar=Sequence( Ref("IterationBoundsGrammar"), Ref("DotSegment"), Ref("DotSegment"), Ref("IterationBoundsGrammar"), Sequence("BY", "STEP", optional=True), ), ParallelEnableClauseGrammar=Sequence( "PARALLEL_ENABLE", Sequence( Bracketed( "PARTITION", Ref("SingleIdentifierGrammar"), "BY", OneOf( "ANY", Sequence( OneOf("HASH", "RANGE"), Bracketed(Delimited(Ref("ColumnReferenceSegment"))), Sequence( OneOf("ORDER", "CLUSTER"), Ref("ExpressionSegment"), "BY", Bracketed(Delimited(Ref("ColumnReferenceSegment"))), optional=True, ), ), Sequence("VALUE", Bracketed(Ref("ColumnReferenceSegment"))), ), ), optional=True, ), ), ResultCacheClauseGrammar=Sequence( "RESULT_CACHE", Sequence( "RELIES_ON", Bracketed(Delimited(Ref("SingleIdentifierGrammar"))), optional=True, ), ), PipelinedClauseGrammar=Sequence( "PIPELINED", OneOf( Sequence("USING", Ref("ObjectReferenceSegment"), optional=True), Sequence( OneOf("ROW", "TABLE"), "POLYMORPHIC", Sequence("USING", Ref("ObjectReferenceSegment"), optional=True), ), ), ), ElementSpecificationGrammar=Sequence( AnyNumberOf( Sequence( Ref.keyword("NOT"), OneOf("OVERRIDING", "FINAL", "INSTANTIABLE"), ), optional=True, ), AnyNumberOf( Sequence( OneOf("MEMBER", "STATIC"), OneOf( Ref("CreateFunctionStatementSegment"), Ref("CreateProcedureStatementSegment"), ), ) ), ), ImplicitCursorAttributesGrammar=Sequence( Ref("SingleIdentifierGrammar"), Ref("ModuloSegment"), OneOf( "ISOPEN", "FOUND", "NOTFOUND", "ROWCOUNT", "BULK_ROWCOUNT", "BULK_EXCEPTIONS", ), ), ObjectTypeAndSubtypeDefGrammar=Sequence( OneOf("OBJECT", Sequence("UNDER", Ref("ObjectReferenceSegment"))), Bracketed( Delimited( OneOf( Sequence( Ref("SingleIdentifierGrammar"), Ref("DatatypeSegment"), ), Ref("ElementSpecificationGrammar"), ) ), optional=True, ), AnyNumberOf( Sequence( Ref.keyword("NOT", optional=True), OneOf("FINAL", "INSTANTIABLE", "PERSISTABLE"), ), optional=True, ), ), VarrayAndNestedTypeSpecGrammar=Sequence( OneOf( Sequence( OneOf( "VARRAY", Sequence(Ref.keyword("VARYING", optional=True), "ARRAY"), ), Bracketed(Ref("NumericLiteralSegment")), ), "TABLE", ), "OF", OneOf( Sequence( Ref("StartBracketSegment", optional=True), Ref("DatatypeSegment"), Sequence("NOT", "NULL", optional=True), Ref("EndBracketSegment", optional=True), ), Sequence( Bracketed( Sequence( Ref("DatatypeSegment"), Sequence("NOT", "NULL", optional=True), ) ), Ref.keyword("NOT", optional=True), Ref.keyword("PERSISTABLE", optional=True), ), ), ), ForUpdateGrammar=Sequence( "FOR", "UPDATE", Sequence("OF", Ref("TableReferenceSegment"), optional=True) ), CompileClauseGrammar=Sequence( "COMPILE", Ref.keyword("DEBUG", optional=True), OneOf("PACKAGE", "SPECIFICATION", "BODY", optional=True), Delimited( Ref("ParameterNameSegment"), Ref("EqualsSegment"), Ref("NakedIdentifierSegment"), optional=True, ), Sequence("REUSE", "SETTINGS", optional=True), ), IdentityClauseGrammar=Sequence( "GENERATED", OneOf( "ALWAYS", Sequence( "BY", "DEFAULT", Sequence( "ON", "NULL", Sequence( "FOR", "INSERT", OneOf("ONLY", Sequence("AND", "UPDATE")), optional=True, ), optional=True, ), ), optional=True, ), "AS", "IDENTITY", Bracketed(Ref("IdentityOptionsGrammar"), optional=True), ), IdentityOptionsGrammar=AnyNumberOf( Sequence( OneOf( Sequence("START", "WITH"), Sequence("INCREMENT", "BY"), "MAXVALUE", "MINVALUE", "CACHE", ), Ref("NumericLiteralSegment"), Sequence("LIMIT", "VALUE", optional=True), ), "NOMAXVALUE", "NOMINVALUE", "CYCLE", "NOCYCLE", "NOCACHE", "ORDER", "NOORDER", ), SizeClauseGrammar=Sequence( Ref("NumericLiteralSegment"), RegexParser(r"[KMGTPE]?", LiteralSegment, type="size_prefix"), ), SlashStatementTerminatorSegment=StringParser( "/", SymbolSegment, type="statement_terminator" ), TriggerPredicatesGrammar=OneOf( "INSERTING", Sequence("UPDATING", Bracketed(Ref("QuotedLiteralSegment"), optional=True)), "DELETING", ), JSONObjectContentSegment=Sequence( OneOf(Ref("StarSegment"), Delimited(Ref("JSONEntrySegment")), optional=True), Ref("JSONOnNullClause", optional=True), Ref("JSONReturningClause", optional=True), Ref.keyword("STRICT", optional=True), Sequence("WITH", "UNIQUE", "KEYS", optional=True), ), JSONEntrySegment=OneOf( Sequence( Ref("JSONRegularEntrySegment"), Sequence("FORMAT", "JSON", optional=True), ), Ref("WildcardIdentifierSegment"), ), JSONRegularEntrySegment=Sequence( OneOf( Sequence( Ref.keyword("KEY", optional=True), Ref("QuotedLiteralSegment"), "VALUE", Ref("ExpressionSegment"), ), Sequence( Ref("ExpressionSegment"), Sequence(Ref("ColonSegment"), Ref("ExpressionSegment"), optional=True), ), Ref("ColumnReferenceSegment"), ) ), JSONOnNullClause=Sequence(OneOf("NULL", "ABSENT"), "ON", "NULL"), JSONReturningClause=Sequence( "RETURNING", OneOf( Sequence( "VARCHAR", Bracketed( Sequence( Ref("NumericLiteralSegment"), OneOf("BYTE", "CHAR", optional=True), ), optional=True, ), Sequence("WITH", "TYPENAME", optional=True), ), Sequence( OneOf("CLOB", "BLOB"), Ref("SingleIdentifierGrammar", optional=True) ), "JSON", ), ), ) oracle_dialect.replace( # https://docs.oracle.com/en/database/oracle/oracle-database/19/sqlrf/DROP-TABLE.html DropBehaviorGrammar=Sequence( Sequence( "CASCADE", "CONSTRAINTS", optional=True, ), Ref.keyword("PURGE", optional=True), optional=True, ), NakedIdentifierSegment=SegmentGenerator( lambda dialect: RegexParser( r"[A-Z0-9_]*[A-Z][A-Z0-9_#$]*", IdentifierSegment, type="naked_identifier", anti_template=r"^(" + r"|".join(dialect.sets("reserved_keywords")) + r")$", casefold=str.upper, ) ), PostFunctionGrammar=AnyNumberOf( Ref("WithinGroupClauseSegment"), Ref("FilterClauseGrammar"), Ref("OverClauseSegment", optional=True), ), FunctionContentsExpressionGrammar=OneOf( Ref("ExpressionSegment"), Ref("NamedArgumentSegment"), ), FunctionContentsGrammar=ansi_dialect.get_grammar("FunctionContentsGrammar").copy( insert=[Ref("ListaggOverflowClauseSegment"), Ref("JSONObjectContentSegment")] ), TemporaryGrammar=Sequence( OneOf("GLOBAL", "PRIVATE"), Ref.keyword("TEMPORARY"), optional=True, ), ParameterNameSegment=RegexParser( r'[A-Z_][A-Z0-9_$]*|"[^"]*"', CodeSegment, type="parameter" ), LiteralGrammar=ansi_dialect.get_grammar("LiteralGrammar").copy( insert=[ Ref("SqlplusVariableGrammar"), Ref.keyword("LEVEL"), Ref.keyword("ROWNUM"), Ref.keyword("ANY"), ], before=Ref("ArrayLiteralSegment"), ), BaseExpressionElementGrammar=ansi_dialect.get_grammar( "BaseExpressionElementGrammar" ).copy( insert=[ Ref("ConnectByRootGrammar"), Ref("SqlplusSubstitutionVariableSegment"), ] ), Expression_D_Grammar=Sequence( OneOf( Ref("PlusJoinGrammar"), Ref("BareFunctionSegment"), Ref("FunctionSegment"), Bracketed( OneOf( # We're using the expression segment here rather than the grammar so # that in the parsed structure we get nested elements. Ref("ExpressionSegment"), Ref("SelectableGrammar"), Delimited( Ref( "ColumnReferenceSegment" ), # WHERE (a,b,c) IN (select a,b,c FROM...) Ref( "FunctionSegment" ), # WHERE (a, substr(b,1,3)) IN (select c,d FROM...) Ref("LiteralGrammar"), # WHERE (a, 2) IN (SELECT b, c FROM ...) Ref("LocalAliasSegment"), # WHERE (LOCAL.a, LOCAL.b) IN (...) ), ), parse_mode=ParseMode.GREEDY, ), # Allow potential select statement without brackets Ref("SelectStatementSegment"), Ref("LiteralGrammar"), Ref("IntervalExpressionSegment"), Ref("TypedStructLiteralSegment"), Ref("ArrayExpressionSegment"), Ref("ColumnReferenceSegment"), # For triggers, we allow "NEW.*" but not just "*" nor "a.b.*" # So can't use WildcardIdentifierSegment nor WildcardExpressionSegment Sequence( Ref("SingleIdentifierGrammar"), Ref("ObjectReferenceDelimiterGrammar"), Ref("StarSegment"), ), Sequence( Ref("StructTypeSegment"), Bracketed(Delimited(Ref("ExpressionSegment"))), ), Sequence( Ref("DatatypeSegment"), # Don't use the full LiteralGrammar here # because only some of them are applicable. # Notably we shouldn't use QualifiedNumericLiteralSegment # here because it looks like an arithmetic operation. OneOf( Ref("QuotedLiteralSegment"), Ref("NumericLiteralSegment"), Ref("BooleanLiteralGrammar"), Ref("NullLiteralSegment"), Ref("DateTimeLiteralGrammar"), ), ), Ref("LocalAliasSegment"), Ref("SqlplusSubstitutionVariableSegment"), Ref("ImplicitCursorAttributesGrammar"), terminators=[Ref("CommaSegment")], ), Ref("AccessorGrammar", optional=True), allow_gaps=True, ), DateTimeLiteralGrammar=Sequence( OneOf("DATE", "TIME", "TIMESTAMP", "INTERVAL"), TypedParser("single_quote", LiteralSegment, type="date_constructor_literal"), Sequence( Ref("IntervalUnitsGrammar"), Sequence("TO", Ref("IntervalUnitsGrammar"), optional=True), ), ), PreTableFunctionKeywordsGrammar=OneOf("LATERAL"), ConditionalCrossJoinKeywordsGrammar=Nothing(), UnconditionalCrossJoinKeywordsGrammar=Ref.keyword("CROSS"), SingleIdentifierGrammar=ansi_dialect.get_grammar("SingleIdentifierGrammar").copy( insert=[ Ref("SqlplusSubstitutionVariableSegment"), ] ), SequenceMinValueGrammar=OneOf( Sequence("MINVALUE", Ref("NumericLiteralSegment")), "NOMINVALUE", ), SequenceMaxValueGrammar=OneOf( Sequence("MAXVALUE", Ref("NumericLiteralSegment")), "NOMAXVALUE", ), FunctionParameterGrammar=Sequence( Ref("ParameterNameSegment"), OneOf( Sequence( Ref.keyword("IN", optional=True), OneOf(Ref("DatatypeSegment"), Ref("ColumnTypeReferenceSegment")), Sequence( OneOf( Sequence(Ref("ColonSegment"), Ref("EqualsSegment")), "DEFAULT" ), Ref("ExpressionSegment"), optional=True, ), ), Sequence( Ref.keyword("IN", optional=True), "OUT", Ref.keyword("NOCOPY", optional=True), OneOf(Ref("DatatypeSegment"), Ref("ColumnTypeReferenceSegment")), ), ), ), DelimiterGrammar=Sequence( Ref("SemicolonSegment"), Ref("SlashStatementTerminatorSegment", optional=True) ), SelectClauseTerminatorGrammar=OneOf( "INTO", "FROM", "WHERE", Sequence("ORDER", "BY"), "LIMIT", "OVERLAPS", Ref("SetOperatorSegment"), "FETCH", ), ) class AlterTableStatementSegment(ansi.AlterTableStatementSegment): """An `ALTER TABLE` statement. https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/ALTER-TABLE.html If possible, please keep the order below the same as Oracle's doc: """ match_grammar: Matchable = Sequence( "ALTER", "TABLE", Ref("TableReferenceSegment"), OneOf( # @TODO all stuff inside this "Delimited" is not validated for Oracle Delimited( OneOf( # Table options Sequence( Ref("ParameterNameSegment"), Ref("EqualsSegment", optional=True), OneOf(Ref("LiteralGrammar"), Ref("NakedIdentifierSegment")), ), ), ), Ref("AlterTablePropertiesSegment"), Ref("AlterTableColumnClausesSegment"), Ref("AlterTableConstraintClauses"), ), ) class AlterTablePropertiesSegment(BaseSegment): """ALTER TABLE `alter_table_properties` per defined in Oracle's grammar. https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/ALTER-TABLE.html If possible, please match the order of this sequence with what's defined in Oracle's alter_table_properties grammar. """ type = "alter_table_properties" # TODO: There are many more alter_table_properties to implement match_grammar = OneOf( # Rename Sequence( "RENAME", "TO", Ref("TableReferenceSegment"), ), ) class AlterTableColumnClausesSegment(BaseSegment): """ALTER TABLE `column_clauses` per defined in Oracle's grammar. https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/ALTER-TABLE.html If possible, please match the order of this sequence with what's defined in Oracle's column_clauses grammar. """ type = "alter_table_column_clauses" match_grammar = OneOf( # add_column_clause # modify_column_clause Sequence( OneOf( "ADD", "MODIFY", ), OneOf( Ref("ColumnDefinitionSegment"), Bracketed(Delimited(Ref("ColumnDefinitionSegment"))), ), ), # drop_column_clause # @TODO: extend drop_column_clause Sequence( "DROP", OneOf( Sequence("COLUMN", Ref("ColumnReferenceSegment")), Bracketed(Delimited(Ref("ColumnReferenceSegment"))), ), ), # @TODO: add_period_clause # @TODO: drop_period_clause # rename_column_clause Sequence( "RENAME", "COLUMN", Ref("ColumnReferenceSegment"), "TO", Ref("ColumnReferenceSegment"), ), # @TODO: modify_collection_retrieval # @TODO: modify_LOB_storage_clause # @TODO: alter_varray_col_properties ) class AlterTableConstraintClauses(BaseSegment): """ALTER TABLE `constraint_clauses` per defined in Oracle's grammar. https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/ALTER-TABLE.html If possible, please match the order of this sequence with what's defined in Oracle's constraint_clauses grammar. """ type = "alter_table_constraint_clauses" match_grammar = OneOf( Sequence( "ADD", Ref("TableConstraintSegment"), ), # @TODO MODIFY # @TODO RENAME # @TODO DROP # drop_constraint_clause Sequence( "DROP", OneOf( Sequence( "PRIMARY", "KEY", ), Sequence( "UNIQUE", Bracketed(Ref("ColumnReferenceSegment")), ), Sequence("CONSTRAINT", Ref("ObjectReferenceSegment")), ), Ref.keyword("CASCADE", optional=True), Sequence( OneOf( "KEEP", "DROP", ), "INDEX", optional=True, ), Ref.keyword("ONLINE", optional=True), ), ) class ExecuteFileSegment(BaseSegment): """A reference to an indextype.""" type = "execute_file_statement" match_grammar = Sequence( OneOf( Sequence( Ref("AtSignSegment"), Ref("AtSignSegment", optional=True), ), "START", ), # Probably should have a better file definition but this will do for now AnyNumberOf( Ref("SingleIdentifierGrammar"), Ref("DotSegment"), Ref("SlashStatementTerminatorSegment"), ), ) class IndexTypeReferenceSegment(BaseSegment): """A reference to an indextype.""" type = "indextype_reference" match_grammar = ansi.ObjectReferenceSegment.match_grammar.copy() # Adding Oracle specific statements. class StatementSegment(ansi.StatementSegment): """A generic segment, to any of its child subsegments. Override ANSI to allow exclusion of ExecuteFileSegment. """ type = "statement" match_grammar = ansi.StatementSegment.match_grammar.copy( insert=[ Ref("CommentStatementSegment"), Ref("CreateProcedureStatementSegment"), Ref("DropProcedureStatementSegment"), Ref("AlterFunctionStatementSegment"), Ref("CreateTypeStatementSegment"), Ref("CreateTypeBodyStatementSegment"), Ref("CreatePackageStatementSegment"), Ref("DropPackageStatementSegment"), Ref("AlterPackageStatementSegment"), Ref("AlterTriggerStatementSegment"), Ref("BeginEndSegment"), Ref("AssignmentStatementSegment"), Ref("RecordTypeDefinitionSegment"), Ref("DeclareCursorVariableSegment"), Ref("FunctionSegment"), Ref("IfExpressionStatement"), Ref("CaseExpressionSegment"), Ref("NullStatementSegment"), Ref("ForLoopStatementSegment"), Ref("WhileLoopStatementSegment"), Ref("LoopStatementSegment"), Ref("ForAllStatementSegment"), Ref("OpenStatementSegment"), Ref("CloseStatementSegment"), Ref("OpenForStatementSegment"), Ref("FetchStatementSegment"), Ref("ExitStatementSegment"), Ref("ContinueStatementSegment"), Ref("RaiseStatementSegment"), Ref("ReturnStatementSegment"), ], ) class FileSegment(BaseFileSegment): """A segment representing a whole file or script. This is also the default "root" segment of the dialect, and so is usually instantiated directly. It therefore has no match_grammar. Override ANSI to allow addition of ExecuteFileSegment without ending in DelimiterGrammar """ match_grammar = AnyNumberOf( Ref("ExecuteFileSegment"), Delimited( Ref("StatementSegment"), delimiter=AnyNumberOf(Ref("DelimiterGrammar"), min_times=1), allow_gaps=True, allow_trailing=True, ), ) class CommentStatementSegment(BaseSegment): """A `Comment` statement. COMMENT [text] https://docs.oracle.com/cd/B19306_01/server.102/b14200/statements_4009.htm """ type = "comment_statement" match_grammar = Sequence( "COMMENT", "ON", Sequence( OneOf( Sequence( "TABLE", Ref("TableReferenceSegment"), ), Sequence( "COLUMN", Ref("ColumnReferenceSegment"), ), Sequence( "OPERATOR", Ref("ObjectReferenceSegment"), ), Sequence( "INDEXTYPE", Ref("IndexTypeReferenceSegment"), ), Sequence( "MATERIALIZED", "VIEW", Ref("TableReferenceSegment"), ), ), Sequence("IS", OneOf(Ref("QuotedLiteralSegment"), "NULL")), ), ) # need to ignore type due to mypy rules on type variables # see https://mypy.readthedocs.io/en/stable/common_issues.html#variables-vs-type-aliases # for details class TableReferenceSegment(ansi.ObjectReferenceSegment): """A reference to an table, CTE, subquery or alias. Extended from ANSI to allow Database Link syntax using AtSignSegment """ type = "table_reference" match_grammar: Matchable = Delimited( Ref("SingleIdentifierGrammar"), delimiter=OneOf( Ref("DotSegment"), Sequence(Ref("DotSegment"), Ref("DotSegment")), Ref("AtSignSegment"), ), terminators=[ "ON", "AS", "USING", Ref("CommaSegment"), Ref("CastOperatorSegment"), Ref("StartSquareBracketSegment"), Ref("StartBracketSegment"), Ref("BinaryOperatorGrammar"), Ref("ColonSegment"), Ref("DelimiterGrammar"), Ref("JoinLikeClauseGrammar"), BracketedSegment, ], allow_gaps=False, ) class CreateViewStatementSegment(ansi.CreateViewStatementSegment): """A `CREATE VIEW` statement.""" type = "create_view_statement" # https://docs.oracle.com/en/database/oracle/oracle-database/19/sqlrf/CREATE-VIEW.html match_grammar: Matchable = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Sequence(Ref.keyword("NO", optional=True), "FORCE", optional=True), OneOf( "EDITIONING", Sequence("EDITIONABLE", Ref.keyword("EDITIONING", optional=True)), "NONEDITIONABLE", optional=True, ), Ref.keyword("MATERIALIZED", optional=True), "VIEW", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), # Optional list of column names Ref("BracketedColumnReferenceListGrammar", optional=True), "AS", OptionallyBracketed(Ref("SelectableGrammar")), Ref("WithNoSchemaBindingClauseSegment", optional=True), ) class WithinGroupClauseSegment(BaseSegment): """An WITHIN GROUP clause for window functions.""" type = "withingroup_clause" match_grammar = Sequence( "WITHIN", "GROUP", Bracketed(Ref("OrderByClauseSegment", optional=False)), ) class ListaggOverflowClauseSegment(BaseSegment): """ON OVERFLOW clause of listagg function.""" type = "listagg_overflow_clause" match_grammar = Sequence( "ON", "OVERFLOW", OneOf( "ERROR", Sequence( "TRUNCATE", Ref("SingleQuotedIdentifierSegment", optional=True), OneOf("WITH", "WITHOUT", optional=True), Ref.keyword("COUNT", optional=True), ), ), ) class NamedArgumentSegment(BaseSegment): """Named argument to a function. https://docs.oracle.com/en/database/oracle/oracle-database/21/lnpls/plsql-subprograms.html#GUID-A7D51201-1711-4F33-827F-70042700801F """ type = "named_argument" match_grammar = Sequence( Ref("NakedIdentifierSegment"), Ref("RightArrowSegment"), Ref("ExpressionSegment"), ) class CreateTableStatementSegment(BaseSegment): """A CREATE TABLE statement. https://docs.oracle.com/en/database/oracle/oracle-database/19/sqlrf/CREATE-TABLE.html https://oracle-base.com/articles/misc/temporary-tables https://oracle-base.com/articles/18c/private-temporary-tables-18c """ type = "create_table_statement" match_grammar: Matchable = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Ref("TemporaryGrammar", optional=True), "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), OneOf( # Columns and comment syntax: Sequence( Bracketed( Delimited( OneOf( Ref("TableConstraintSegment"), Ref("ColumnDefinitionSegment"), ), ) ), Ref("CommentClauseSegment", optional=True), Ref("OnCommitGrammar", optional=True), ), # Create AS syntax: Sequence( Ref("OnCommitGrammar", optional=True), "AS", OptionallyBracketed(Ref("SelectableGrammar")), ), # Create like syntax Sequence("LIKE", Ref("TableReferenceSegment")), ), Ref("TableEndClauseSegment", optional=True), ) class ColumnDefinitionSegment(BaseSegment): """A column definition, e.g. for CREATE TABLE or ALTER TABLE.""" type = "column_definition" match_grammar: Matchable = Sequence( Ref("SingleIdentifierGrammar"), # Column name OneOf( AnyNumberOf( Sequence( Ref("ColumnConstraintSegment"), Ref.keyword("ENABLE", optional=True), ) ), Sequence( Ref("DatatypeSegment"), # Column type # For types like VARCHAR(100), VARCHAR(100 BYTE), VARCHAR (100 CHAR) Bracketed( Sequence( Anything(), OneOf( "BYTE", "CHAR", optional=True, ), ), optional=True, ), AnyNumberOf( Ref("ColumnConstraintSegment", optional=True), ), Ref("IdentityClauseGrammar", optional=True), ), ), ) class SqlplusVariableGrammar(BaseSegment): """SQLPlus Bind Variables :thing. https://docs.oracle.com/en/database/oracle/oracle-database/23/sqpug/using-substitution-variables-sqlplus.html """ type = "sqlplus_variable" match_grammar = Sequence( OptionallyBracketed( Ref("ColonSegment"), Ref("ParameterNameSegment"), Sequence(Ref("DotSegment"), Ref("ParameterNameSegment"), optional=True), ) ) class ConnectByClauseSegment(BaseSegment): """`CONNECT BY` clause used in Hierarchical Queries. https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/Hierarchical-Queries.html """ type = "connectby_clause" match_grammar: Matchable = Sequence( "CONNECT", "BY", Ref.keyword("NOCYCLE", optional=True), Ref("ExpressionSegment"), ) class StartWithClauseSegment(BaseSegment): """`START WITH` clause used in Hierarchical Queries. https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/Hierarchical-Queries.html """ type = "startwith_clause" match_grammar: Matchable = Sequence( "START", "WITH", Ref("ExpressionSegment"), ) class HierarchicalQueryClauseSegment(BaseSegment): """Hierarchical Query. https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/Hierarchical-Queries.html """ type = "hierarchical_query_clause" match_grammar: Matchable = OneOf( Sequence( Ref("ConnectByClauseSegment"), Ref("StartWithClauseSegment", optional=True), ), Sequence( Ref("StartWithClauseSegment"), Ref("ConnectByClauseSegment"), ), ) class OrderByClauseSegment(ansi.OrderByClauseSegment): """A `ORDER BY` clause like in `SELECT`.""" match_grammar: Matchable = ansi.OrderByClauseSegment.match_grammar.copy( insert=[Ref.keyword("SIBLINGS", optional=True)], before=Ref("ByKeywordSegment") ) class UnorderedSelectStatementSegment(ansi.UnorderedSelectStatementSegment): """A `SELECT` statement without any ORDER clauses or later. This is designed for use in the context of set operations, for other use cases, we should use the main SelectStatementSegment. """ match_grammar = ansi.UnorderedSelectStatementSegment.match_grammar.copy( insert=[ Ref("HierarchicalQueryClauseSegment", optional=True), Ref("PivotSegment", optional=True), Ref("UnpivotSegment", optional=True), ], before=Ref("GroupByClauseSegment", optional=True), terminators=[ Ref("HierarchicalQueryClauseSegment"), Ref("PivotSegment", optional=True), Ref("UnpivotSegment", optional=True), ], ).copy( insert=[ Ref("IntoClauseSegment", optional=True), ], before=Ref("FromClauseSegment", optional=True), ) class SelectStatementSegment(ansi.SelectStatementSegment): """A `SELECT` statement.""" match_grammar: Matchable = UnorderedSelectStatementSegment.match_grammar.copy( insert=[ Ref("IntoClauseSegment", optional=True), Ref("ForUpdateGrammar", optional=True), Ref("OrderByClauseSegment", optional=True), Ref("FetchClauseSegment", optional=True), Ref("LimitClauseSegment", optional=True), Ref("NamedWindowSegment", optional=True), Ref("ForUpdateGrammar", optional=True), ], replace_terminators=True, terminators=cast( Sequence, ansi.SelectStatementSegment.match_grammar ).terminators, ) class GreaterThanOrEqualToSegment(CompositeComparisonOperatorSegment): """Allow spaces between operators.""" match_grammar = OneOf( Sequence( Ref("RawGreaterThanSegment"), Ref("RawEqualsSegment"), ), Sequence( Ref("RawNotSegment"), Ref("RawLessThanSegment"), ), ) class LessThanOrEqualToSegment(CompositeComparisonOperatorSegment): """Allow spaces between operators.""" match_grammar = OneOf( Sequence( Ref("RawLessThanSegment"), Ref("RawEqualsSegment"), ), Sequence( Ref("RawNotSegment"), Ref("RawGreaterThanSegment"), ), ) class NotEqualToSegment(CompositeComparisonOperatorSegment): """Allow spaces between operators.""" match_grammar = OneOf( Sequence(Ref("RawNotSegment"), Ref("RawEqualsSegment")), Sequence(Ref("RawLessThanSegment"), Ref("RawGreaterThanSegment")), ) class PivotSegment(BaseSegment): """Pivot clause. https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/SELECT.html """ type = "pivot_clause" match_grammar: Matchable = Sequence( "PIVOT", Ref.keyword("XML", optional=True), Bracketed( Delimited( Sequence( Ref("FunctionSegment"), Ref("AliasExpressionSegment", optional=True) ) ), Ref("PivotForInGrammar"), ), ) class UnpivotSegment(BaseSegment): """Unpivot clause. https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/SELECT.html """ type = "unpivot_clause" match_grammar: Matchable = Sequence( "UNPIVOT", Ref("UnpivotNullsGrammar", optional=True), Bracketed( OptionallyBracketed(Delimited(Ref("ColumnReferenceSegment"))), Ref("PivotForInGrammar"), ), ) class ObjectReferenceSegment(ansi.ObjectReferenceSegment): """A reference to an object.""" # Allow whitespace match_grammar: Matchable = Delimited( Ref("SingleIdentifierGrammar"), delimiter=Ref("ObjectReferenceDelimiterGrammar"), terminators=[Ref("ObjectReferenceTerminatorGrammar")], allow_gaps=True, ) class ColumnReferenceSegment(ObjectReferenceSegment): """A reference to column, field or alias.""" type = "column_reference" class FunctionNameSegment(BaseSegment): """Function name, including any prefix bits, e.g. project or schema.""" type = "function_name" match_grammar: Matchable = Sequence( # Project name, schema identifier, etc. AnyNumberOf( Sequence( Ref("SingleIdentifierGrammar"), Ref("DotSegment"), ), terminators=[Ref("BracketedSegment")], ), # Base function name Delimited( OneOf( Ref("FunctionNameIdentifierSegment"), Ref("QuotedIdentifierSegment"), terminators=[Ref("BracketedSegment")], ), delimiter=Ref("AtSignSegment"), ), allow_gaps=False, ) class SqlplusSubstitutionVariableSegment(BaseSegment): """SQLPlus Substitution Variables &thing. https://docs.oracle.com/en/database/oracle/oracle-database/21/sqpug/using-substitution-variables-sqlplus.html """ type = "sqlplus_variable" match_grammar = Sequence( Ref("AmpersandSegment"), Ref("AmpersandSegment", optional=True), Ref("SingleIdentifierGrammar"), ) class TableExpressionSegment(ansi.TableExpressionSegment): """The main table expression e.g. within a FROM clause.""" match_grammar = ansi.TableExpressionSegment.match_grammar.copy( insert=[ Ref("SqlplusSubstitutionVariableSegment"), ] ) class TableConstraintSegment(ansi.TableConstraintSegment): """A table constraint, e.g. for CREATE TABLE. https://docs.oracle.com/en/database/oracle/oracle-database/23/sqlrf/ALTER-TABLE.html#GUID-552E7373-BF93-477D-9DA3-B2C9386F2877__I2103997 """ type = "table_constraint" # Later add support for CHECK constraint, others? # e.g. CONSTRAINT constraint_1 PRIMARY KEY(column_1) match_grammar: Matchable = Sequence( Sequence( # [ CONSTRAINT ] "CONSTRAINT", Ref("ObjectReferenceSegment"), optional=True ), OneOf( Sequence( "CHECK", Bracketed(Ref("ExpressionSegment")), Sequence("NO", "INHERIT", optional=True), ), Sequence( # UNIQUE ( column_name [, ... ] ) "UNIQUE", Ref("BracketedColumnReferenceListGrammar"), # Later add support for index_parameters? ), Sequence( # PRIMARY KEY ( column_name [, ... ] ) index_parameters Ref("PrimaryKeyGrammar"), # Columns making up PRIMARY KEY constraint Ref("BracketedColumnReferenceListGrammar"), # Later add support for index_parameters? ), Sequence( # FOREIGN KEY ( column_name [, ... ] ) # REFERENCES reftable [ ( refcolumn [, ... ] ) ] Ref("ForeignKeyGrammar"), # Local columns making up FOREIGN KEY constraint Ref("BracketedColumnReferenceListGrammar"), Ref( "ReferenceDefinitionGrammar" ), # REFERENCES reftable [ ( refcolumn) ] ), ), ) class TransactionStatementSegment(BaseSegment): """A `COMMIT`, `ROLLBACK` or `TRANSACTION` statement.""" type = "transaction_statement" match_grammar: Matchable = Sequence( OneOf("START", "COMMIT", "ROLLBACK"), OneOf("TRANSACTION", "WORK", optional=True), Sequence("NAME", Ref("SingleIdentifierGrammar"), optional=True), Sequence("AND", Ref.keyword("NO", optional=True), "CHAIN", optional=True), ) class CreateProcedureStatementSegment(BaseSegment): """A `CREATE OR ALTER PROCEDURE` statement. https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/CREATE-PROCEDURE-statement.html """ type = "create_procedure_statement" match_grammar = Sequence( Ref.keyword("CREATE", optional=True), Sequence("OR", "REPLACE", optional=True), OneOf("EDITIONABLE", "NONEDITIONABLE", optional=True), "PROCEDURE", Ref("IfNotExistsGrammar", optional=True), Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar", optional=True), Ref("SharingClauseGrammar", optional=True), AnyNumberOf( Ref("DefaultCollationClauseGrammar"), Ref("InvokerRightsClauseGrammar"), Ref("AccessibleByClauseGrammar"), optional=True, ), OneOf("IS", "AS", optional=True), AnyNumberOf(Ref("DeclareSegment"), optional=True), Ref("BeginEndSegment", optional=True), Ref("DelimiterGrammar", optional=True), ) class DropProcedureStatementSegment(BaseSegment): """A `DROP PROCEDURE` statement. https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/DROP-PROCEDURE-statement.html """ type = "drop_procedure_statement" match_grammar = Sequence( "DROP", "PROCEDURE", Ref("FunctionNameSegment"), ) class DeclareSegment(BaseSegment): """A declaration segment in PL/SQL. https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/block.html#GUID-9ACEB9ED-567E-4E1A-A16A-B8B35214FC9D__CJAIABJJ """ type = "declare_segment" match_grammar = Sequence( Ref.keyword("DECLARE", optional=True), AnyNumberOf( Delimited( OneOf( Sequence( OneOf( Sequence( Ref("SingleIdentifierGrammar"), Ref.keyword("CONSTANT", optional=True), OneOf( Ref("DatatypeSegment"), Ref("ColumnTypeReferenceSegment"), Ref("RowTypeReferenceSegment"), ), ), Sequence( "PRAGMA", Ref("FunctionSegment"), ), Ref("CollectionTypeDefinitionSegment"), Ref("RecordTypeDefinitionSegment"), Ref("RefCursorTypeDefinitionSegment"), ), Sequence("NOT", "NULL", optional=True), Sequence( OneOf( Sequence(Ref("ColonSegment"), Ref("EqualsSegment")), "DEFAULT", ), Ref("ExpressionSegment"), optional=True, ), Ref("DelimiterGrammar"), ), Ref("CreateProcedureStatementSegment"), Ref("CreateFunctionStatementSegment"), Ref("DeclareCursorVariableSegment"), ), delimiter=Ref("DelimiterGrammar"), terminators=["BEGIN", "END"], ) ), ) class ColumnTypeReferenceSegment(BaseSegment): """A column type reference segment (e.g. `table_name.column_name%type`). https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/TYPE-attribute.html """ type = "column_type_reference" match_grammar = Sequence( Ref("ColumnReferenceSegment"), Ref("ModuloSegment"), "TYPE" ) class RowTypeReferenceSegment(BaseSegment): """A column type reference segment (e.g. `table_name%rowtype`). https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/ROWTYPE-attribute.html """ type = "row_type_reference" match_grammar = Sequence( Ref("TableReferenceSegment"), Ref("ModuloSegment"), "ROWTYPE" ) class CollectionTypeDefinitionSegment(BaseSegment): """A collection type definition. https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/collection-variable.html """ type = "collection_type" match_grammar = Sequence( "TYPE", Ref("SingleIdentifierGrammar"), "IS", Sequence("TABLE", "OF", optional=True), OneOf( Ref("DatatypeSegment"), Ref("ColumnTypeReferenceSegment"), Ref("RowTypeReferenceSegment"), ), Sequence("OF", Ref("DatatypeSegment"), optional=True), Sequence("NOT", "NULL", optional=True), Sequence("INDEX", "BY", Ref("DatatypeSegment"), optional=True), ) class RecordTypeDefinitionSegment(BaseSegment): """A `RECORD` type definition. https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/record-variable-declaration.html """ type = "record_type" match_grammar = Sequence( "TYPE", Ref("SingleIdentifierGrammar"), "IS", "RECORD", Bracketed( Delimited( Sequence( Ref("SingleIdentifierGrammar"), OneOf(Ref("DatatypeSegment"), Ref("ColumnTypeReferenceSegment")), Sequence( Sequence("NOT", "NULL", optional=True), OneOf( Sequence(Ref("ColonSegment"), Ref("EqualsSegment")), "DEFAULT", ), Ref("ExpressionSegment"), optional=True, ), ) ) ), ) class RefCursorTypeDefinitionSegment(BaseSegment): """A `REF CURSOR TYPE` declaration. https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/cursor-variable-declaration.html """ type = "ref_cursor_type" match_grammar = Sequence( "TYPE", Ref("SingleIdentifierGrammar"), "IS", "REF", "CURSOR", Sequence( "RETURN", OneOf( Ref("RowTypeReferenceSegment"), Ref("ColumnTypeReferenceSegment"), Ref("ObjectReferenceSegment"), ), optional=True, ), ) class DeclareCursorVariableSegment(BaseSegment): """A `CURSOR` declaration. https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/explicit-cursor-declaration-and-definition.html """ type = "cursor_variable" match_grammar = Sequence( "CURSOR", Ref("SingleIdentifierGrammar"), Ref("FunctionParameterListGrammar", optional=True), Sequence( "RETURN", OneOf( Ref("ColumnTypeReferenceSegment"), Ref("RowTypeReferenceSegment"), Ref("DatatypeSegment"), ), optional=True, ), Sequence("IS", Ref("SelectStatementSegment"), optional=True), Ref("DelimiterGrammar", optional=True), ) class BeginEndSegment(BaseSegment): """A `BEGIN/END` block. Encloses multiple statements into a single statement object. https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/block.html """ type = "begin_end_block" match_grammar = Sequence( Ref("DeclareSegment", optional=True), "BEGIN", Indent, Ref("OneOrMoreStatementsGrammar"), Sequence( "EXCEPTION", "WHEN", OneOf( "OTHERS", Sequence( Ref("SingleIdentifierGrammar"), AnyNumberOf( Sequence( "OR", Ref("SingleIdentifierGrammar"), ) ), ), ), "THEN", Ref("OneOrMoreStatementsGrammar"), optional=True, ), Dedent, "END", Ref("ObjectReferenceSegment", optional=True), ) class CreateFunctionStatementSegment(BaseSegment): """A `CREATE OR ALTER FUNCTION` statement. https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/CREATE-FUNCTION-statement.html """ type = "create_function_statement" match_grammar = Sequence( Ref.keyword("CREATE", optional=True), Sequence("OR", "REPLACE", optional=True), OneOf("EDITIONABLE", "NONEDITIONABLE", optional=True), "FUNCTION", Ref("IfNotExistsGrammar", optional=True), Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar", optional=True), "RETURN", Ref("DatatypeSegment"), Ref("SharingClauseGrammar", optional=True), AnyNumberOf( Ref("DefaultCollationClauseGrammar"), Ref("InvokerRightsClauseGrammar"), Ref("AccessibleByClauseGrammar"), "DETERMINISTIC", "SHARD_ENABLE", Ref("ParallelEnableClauseGrammar"), Ref("ResultCacheClauseGrammar"), Sequence("AGGREGATE", "USING", Ref("ObjectReferenceSegment")), Ref("PipelinedClauseGrammar"), Sequence( "SQL_MACRO", Bracketed( Sequence("TYPE", Ref("RightArrowSegment")), OneOf("SCALAR", "TABLE"), optional=True, ), ), optional=True, ), OneOf("IS", "AS", optional=True), AnyNumberOf(Ref("DeclareSegment"), optional=True), Ref("BeginEndSegment", optional=True), Ref("DelimiterGrammar", optional=True), ) class AlterFunctionStatementSegment(BaseSegment): """An `ALTER FUNCTION` or `ALTER PROCEDURE` statement. https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/ALTER-FUNCTION-statement.html https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/ALTER-PROCEDURE-statement.html """ type = "alter_function_statement" match_grammar = Sequence( "ALTER", OneOf("FUNCTION", "PROCEDURE"), Ref("IfExistsGrammar", optional=True), Ref("FunctionNameSegment"), OneOf( Ref("CompileClauseGrammar"), "EDITIONABLE", "NONEDITIONABLE", ), ) class CreateTypeStatementSegment(BaseSegment): """A `CREATE TYPE` declaration. https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/CREATE-TYPE-statement.html """ type = "create_type_statement" match_grammar = Sequence( Ref.keyword("CREATE", optional=True), Sequence("OR", "REPLACE", optional=True), OneOf("EDITIONABLE", "NONEDITIONABLE", optional=True), "TYPE", Ref("IfNotExistsGrammar", optional=True), Ref("TypeReferenceSegment"), Ref.keyword("FORCE", optional=True), Sequence( "OID", Ref("SingleQuotedIdentifierSegment"), Ref("ObjectReferenceSegment"), Ref("SingleQuotedIdentifierSegment"), optional=True, ), Ref("SharingClauseGrammar", optional=True), Ref("DefaultCollationClauseGrammar", optional=True), AnyNumberOf( Ref("InvokerRightsClauseGrammar"), Ref("AccessibleByClauseGrammar"), optional=True, ), OneOf("IS", "AS", optional=True), OneOf( Ref("ObjectTypeAndSubtypeDefGrammar"), Ref("VarrayAndNestedTypeSpecGrammar"), ), ) class TypeReferenceSegment(ObjectReferenceSegment): """A reference to a type.""" type = "type_reference" class CreateTypeBodyStatementSegment(BaseSegment): """A `CREATE TYPE BODY` statement. https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/CREATE-TYPE-BODY-statement.html """ type = "create_type_body_statement" match_grammar = Sequence( Ref.keyword("CREATE", optional=True), Sequence("OR", "REPLACE", optional=True), OneOf("EDITIONABLE", "NONEDITIONABLE", optional=True), "TYPE", "BODY", Ref("IfNotExistsGrammar", optional=True), Ref("TypeReferenceSegment"), Ref("SharingClauseGrammar", optional=True), OneOf("IS", "AS"), Ref("ElementSpecificationGrammar"), "END", ) class DropTypeStatementSegment(ansi.DropTypeStatementSegment): """A `DROP TYPE` statement. https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/DROP-TYPE-statement.html """ type = "drop_type_statement" match_grammar: Matchable = ansi.DropTypeStatementSegment.match_grammar.copy( insert=[Ref.keyword("BODY", optional=True)], before=Ref("IfExistsGrammar", optional=True), ).copy( insert=[OneOf("FORCE", "VALIDATE", optional=True)], before=Ref("DropBehaviorGrammar", optional=True), ) class CreatePackageStatementSegment(BaseSegment): """A `CREATE PACKAGE` statement. https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/CREATE-PACKAGE-statement.html """ type = "create_package_statement" match_grammar = Sequence( "CREATE", Sequence("OR", "REPLACE", optional=True), OneOf("EDITIONABLE", "NONEDITIONABLE", optional=True), "PACKAGE", Ref.keyword("BODY", optional=True), Ref("IfNotExistsGrammar", optional=True), Ref("PackageReferenceSegment"), Ref("SharingClauseGrammar", optional=True), AnyNumberOf( Ref("DefaultCollationClauseGrammar"), Ref("InvokerRightsClauseGrammar"), Ref("AccessibleByClauseGrammar"), optional=True, ), OneOf("IS", "AS"), Ref("DeclareSegment"), "END", Ref("PackageReferenceSegment", optional=True), ) class PackageReferenceSegment(ObjectReferenceSegment): """A reference to a package.""" type = "package_reference" class AlterPackageStatementSegment(BaseSegment): """An `ALTER PACKAGE` statement. https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/ALTER-PACKAGE-statement.html """ type = "alter_package_statement" match_grammar = Sequence( "ALTER", "PACKAGE", Ref("IfExistsGrammar", optional=True), Ref("PackageReferenceSegment"), OneOf(Ref("CompileClauseGrammar"), "EDITIONABLE", "NONEDITIONABLE"), ) class DropPackageStatementSegment(BaseSegment): """A `DROP PACKAGE` statement. https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/DROP-PACKAGE-statement.html """ type = "drop_package_statement" match_grammar = Sequence( "DROP", "PACKAGE", Ref.keyword("BODY", optional=True), Ref("IfExistsGrammar", optional=True), Ref("PackageReferenceSegment"), ) class CreateTriggerStatementSegment(ansi.CreateTriggerStatementSegment): """Create Trigger Statement. https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/CREATE-TRIGGER-statement.html """ type = "create_trigger_statement" match_grammar: Matchable = Sequence( "CREATE", Sequence("OR", "REPLACE", optional=True), OneOf("EDITIONABLE", "NONEDITIONABLE", optional=True), "TRIGGER", Ref("IfNotExistsGrammar", optional=True), Ref("TriggerReferenceSegment"), Ref("SharingClauseGrammar", optional=True), Ref("DefaultCollationClauseGrammar", optional=True), Sequence( OneOf(OneOf("BEFORE", "AFTER"), Sequence("INSTEAD", "OF"), "FOR"), Ref("DmlEventClauseSegment"), ), Ref("ReferencingClauseSegment", optional=True), Sequence("FOR", "EACH", "ROW", optional=True), Sequence( OneOf("FORWARD", "REVERSE", optional=True), "CROSSEDITION", optional=True ), Sequence( OneOf("FOLLOWS", "PRECEDES"), Delimited(Ref("TriggerReferenceSegment")), optional=True, ), OneOf("ENABLE", "DISABLE", optional=True), Sequence("WHEN", Bracketed(Ref("ExpressionSegment")), optional=True), OneOf(Ref("CompoundTriggerBlock"), Ref("OneOrMoreStatementsGrammar")), Ref.keyword("END", optional=True), Ref("TriggerReferenceSegment", optional=True), ) class DmlEventClauseSegment(BaseSegment): """DML event clause. https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/CREATE-TRIGGER-statement.html#GUID-AF9E33F1-64D1-4382-A6A4-EC33C36F237B__BABGDFBI """ type = "dml_event_clause" match_grammar: Matchable = Sequence( Ref("DmlGrammar"), AnyNumberOf( Sequence( "OR", Ref("DmlGrammar"), ) ), "ON", Sequence("NESTED", "TABLE", Ref("ColumnReferenceSegment"), "OF", optional=True), Ref("TableReferenceSegment"), ) class ReferencingClauseSegment(BaseSegment): """`REFERENCING` clause. https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/CREATE-TRIGGER-statement.html#GUID-AF9E33F1-64D1-4382-A6A4-EC33C36F237B__BABEBAAB """ type = "referencing_clause" match_grammar: Matchable = Sequence( "REFERENCING", AnyNumberOf( Sequence( OneOf("OLD", "NEW", "PARENT"), Ref.keyword("AS", optional=True), Ref("NakedIdentifierSegment"), ) ), ) class CompoundTriggerBlock(BaseSegment): """A compound trigger block. https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/CREATE-TRIGGER-statement.html#GUID-AF9E33F1-64D1-4382-A6A4-EC33C36F237B__CJACFCDJ """ type = "compound_trigger_statement" match_grammar: Matchable = Sequence( "COMPOUND", "TRIGGER", Ref("DeclareSegment", optional=True), AnyNumberOf(Ref("TimingPointSectionSegment")), ) class TimingPointSectionSegment(BaseSegment): """A timing point section. https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/CREATE-TRIGGER-statement.html#GUID-AF9E33F1-64D1-4382-A6A4-EC33C36F237B__GUID-2CD49225-7507-458B-8BDF-21C56AFC3527 """ type = "timing_point_section" match_grammar: Matchable = Sequence( Ref("TimingPointGrammar"), "IS", "BEGIN", Ref("OneOrMoreStatementsGrammar"), Sequence("END", Ref("TimingPointGrammar")), Ref("DelimiterGrammar"), ) class AlterTriggerStatementSegment(BaseSegment): """An `ALTER TRIGGER` statement. https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/ALTER-TRIGGER-statement.html """ type = "alter_trigger_statement" match_grammar = Sequence( "ALTER", "TRIGGER", Ref("IfExistsGrammar", optional=True), Ref("FunctionNameSegment"), OneOf( Ref("CompileClauseGrammar"), "ENABLE", "DISABLE", Sequence("RENAME", "TO", Ref("FunctionNameSegment")), "EDITIONABLE", "NONEDITIONABLE", ), ) class AssignmentStatementSegment(BaseSegment): """A assignment segment in PL/SQL. https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/assignment-statement.html """ type = "assignment_segment_statement" match_grammar = Sequence( AnyNumberOf( Ref("ObjectReferenceSegment"), Bracketed(Ref("ObjectReferenceSegment"), optional=True), Ref("DotSegment", optional=True), Ref("SqlplusVariableGrammar"), optional=True, ), OneOf(Sequence(Ref("ColonSegment"), Ref("EqualsSegment")), "DEFAULT"), Ref("ExpressionSegment"), ) class IfExpressionStatement(BaseSegment): """IF-ELSE statement. https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/IF-statement.html """ type = "if_then_statement" match_grammar = Sequence( Ref("IfClauseSegment"), Ref("OneOrMoreStatementsGrammar"), AnyNumberOf( Sequence( "ELSIF", OneOf( Ref("ExpressionSegment"), Ref("TriggerPredicatesGrammar"), ), "THEN", Ref("OneOrMoreStatementsGrammar"), ), ), Sequence( "ELSE", Ref("OneOrMoreStatementsGrammar"), optional=True, ), "END", "IF", ) class IfClauseSegment(BaseSegment): """IF clause. https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/IF-statement.html """ type = "if_clause" match_grammar = Sequence( "IF", OneOf( Ref("ExpressionSegment"), Ref("TriggerPredicatesGrammar"), ), "THEN", ) class CaseExpressionSegment(BaseSegment): """A `CASE WHEN` clause. https://docs.oracle.com/en/database/oracle/oracle-database/23/sqlrf/CASE-Expressions.html """ type = "case_expression" match_grammar: Matchable = OneOf( Sequence( "CASE", ImplicitIndent, AnyNumberOf( Ref("WhenClauseSegment"), reset_terminators=True, terminators=[Ref.keyword("ELSE"), Ref.keyword("END")], ), Ref( "ElseClauseSegment", optional=True, reset_terminators=True, terminators=[Ref.keyword("END")], ), Dedent, "END", Ref.keyword("CASE", optional=True), Ref("SingleIdentifierGrammar", optional=True), ), Sequence( "CASE", OneOf( Ref("ExpressionSegment"), Ref("TriggerPredicatesGrammar"), ), ImplicitIndent, AnyNumberOf( Ref("WhenClauseSegment"), reset_terminators=True, terminators=[Ref.keyword("ELSE"), Ref.keyword("END")], ), Ref( "ElseClauseSegment", optional=True, reset_terminators=True, terminators=[Ref.keyword("END")], ), Dedent, "END", Ref.keyword("CASE", optional=True), Ref("SingleIdentifierGrammar", optional=True), ), terminators=[ Ref("ComparisonOperatorGrammar"), Ref("CommaSegment"), Ref("BinaryOperatorGrammar"), ], ) class WhenClauseSegment(BaseSegment): """A 'WHEN' clause for a 'CASE' statement. https://docs.oracle.com/en/database/oracle/oracle-database/23/sqlrf/CASE-Expressions.html """ type = "when_clause" match_grammar: Matchable = Sequence( "WHEN", # NOTE: The nested sequence here is to ensure the correct # placement of the meta segments when templated elements # are present. # https://github.com/sqlfluff/sqlfluff/issues/3988 Sequence( ImplicitIndent, OneOf( Ref("ExpressionSegment"), Ref("TriggerPredicatesGrammar"), ), Dedent, ), Conditional(Indent, indented_then=True), "THEN", Conditional(ImplicitIndent, indented_then_contents=True), OneOf(Ref("ExpressionSegment"), Ref("OneOrMoreStatementsGrammar")), Conditional(Dedent, indented_then_contents=True), Conditional(Dedent, indented_then=True), ) class ElseClauseSegment(BaseSegment): """An 'ELSE' clause for a 'CASE' statement. https://docs.oracle.com/en/database/oracle/oracle-database/23/sqlrf/CASE-Expressions.html """ type = "else_clause" match_grammar: Matchable = Sequence( "ELSE", ImplicitIndent, OneOf(Ref("ExpressionSegment"), Ref("OneOrMoreStatementsGrammar")), Dedent, ) class NullStatementSegment(BaseSegment): """A `NULL` statement inside a block.""" type = "null_statement" match_grammar = Sequence("NULL") class MergeUpdateClauseSegment(BaseSegment): """`UPDATE` clause within the `MERGE` statement. https://docs.oracle.com/en/database/oracle/oracle-database/23/sqlrf/MERGE.html#GUID-5692CCB7-24D9-4C0E-81A7-A22436DC968F__BGBBBIDF """ type = "merge_update_clause" match_grammar: Matchable = Sequence( "UPDATE", Indent, Ref("SetClauseListSegment"), Dedent, Ref("WhereClauseSegment", optional=True), Ref("ReturningClauseSegment", optional=True), ) class InsertStatementSegment(BaseSegment): """An `INSERT` statement. https://docs.oracle.com/en/database/oracle/oracle-database/23/sqlrf/INSERT.html """ type = "insert_statement" match_grammar: Matchable = Sequence( "INSERT", Ref.keyword("OVERWRITE", optional=True), "INTO", Ref("TableReferenceSegment"), OneOf( Ref("SelectableGrammar"), Sequence( Ref("BracketedColumnReferenceListGrammar"), Ref("SelectableGrammar"), ), Ref("DefaultValuesGrammar"), Sequence( "VALUES", Ref("SingleIdentifierGrammar"), Bracketed(Ref("SingleIdentifierGrammar"), optional=True), optional=True, ), ), Ref("ReturningClauseSegment", optional=True), ) class ForLoopStatementSegment(BaseSegment): """A `FOR LOOP` statement. https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/FOR-LOOP-statement.html """ type = "for_loop_statement" match_grammar: Matchable = Sequence( "FOR", Delimited( Sequence( Ref("SingleIdentifierGrammar"), OneOf("MUTABLE", "IMMUTABLE", optional=True), ) ), "IN", Delimited( Sequence( Ref.keyword("REVERSE", optional=True), OneOf( Ref("IterationSteppedControlGrammar"), Sequence( Ref.keyword("REPEAT", optional=True), Ref("ExpressionSegment") ), Sequence( OneOf("VALUES", "INDICES", "PAIRS"), "OF", Ref("SingleIdentifierGrammar"), ), Bracketed(Ref("SelectStatementSegment")), ), Sequence("WHILE", Ref("ExpressionSegment"), optional=True), Sequence("WHEN", Ref("ExpressionSegment"), optional=True), ) ), Ref("LoopStatementSegment"), ) class WhileLoopStatementSegment(BaseSegment): """A `WHILE LOOP` statement. https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/WHILE-LOOP-statement.html """ type = "while_loop_statement" match_grammar: Matchable = Sequence( "WHILE", Ref("ExpressionSegment"), Ref("LoopStatementSegment"), ) class LoopStatementSegment(BaseSegment): """A `LOOP` statement. https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/loop-statements.html """ type = "loop_statement" match_grammar: Matchable = Sequence( Ref("SingleIdentifierGrammar", optional=True), "LOOP", Ref("OneOrMoreStatementsGrammar"), "END", "LOOP", Ref("SingleIdentifierGrammar", optional=True), ) class ForAllStatementSegment(BaseSegment): """A `FORALL` statement. https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/FORALL-statement.html """ type = "forall_statement" match_grammar = Sequence( "FORALL", Ref("NakedIdentifierSegment"), "IN", OneOf( Ref("IterationSteppedControlGrammar"), Sequence("VALUES", "OF", Ref("SingleIdentifierGrammar")), ), Sequence("SAVE", "EXCEPTIONS", optional=True), OneOf( Ref("DeleteStatementSegment"), Ref("InsertStatementSegment"), Ref("SelectStatementSegment"), Ref("UpdateStatementSegment"), ), ) class OpenStatementSegment(BaseSegment): """An `OPEN` statement. https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/OPEN-statement.html """ type = "open_statement" match_grammar = Sequence( "OPEN", Ref("SingleIdentifierGrammar"), Ref("FunctionContentsSegment", optional=True), ) class CloseStatementSegment(BaseSegment): """A `CLOSE` statement. https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/CLOSE-statement.html """ type = "close_statement" match_grammar = Sequence( "CLOSE", OneOf(Ref("SingleIdentifierGrammar"), Ref("SqlplusVariableGrammar")), ) class OpenForStatementSegment(BaseSegment): """An `OPEN FOR` statement. https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/OPEN-FOR-statement.html """ type = "open_for_statement" match_grammar = Sequence( "OPEN", OneOf(Ref("SingleIdentifierGrammar"), Ref("SqlplusVariableGrammar")), "FOR", OneOf( Ref("SingleQuotedIdentifierSegment"), Ref("SelectStatementSegment"), Ref("SingleIdentifierGrammar"), ), Sequence( "USING", Delimited( Sequence( OneOf("IN", "OUT", Sequence("IN", "OUT"), optional=True), OneOf( Ref("SingleIdentifierGrammar"), Ref("SingleQuotedIdentifierSegment"), ), ), optional=True, ), optional=True, ), ) class FetchStatementSegment(BaseSegment): """A `FETCH` statement. https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/FETCH-statement.html """ type = "fetch_statement" match_grammar = Sequence( "FETCH", OneOf(Ref("SingleIdentifierGrammar"), Ref("SqlplusVariableGrammar")), OneOf( Ref("IntoClauseSegment"), Sequence( Ref("BulkCollectIntoClauseSegment"), Sequence( "LIMIT", OneOf(Ref("NumericLiteralSegment"), Ref("SingleIdentifierGrammar")), optional=True, ), ), ), ) class IntoClauseSegment(BaseSegment): """Into Clause Segment. https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/RETURNING-INTO-clause.html#GUID-38F735B9-1100-45AF-AE71-18FB74A899BE__CJAJDJHC """ type = "into_clause" match_grammar = Sequence( "INTO", Delimited(Ref("SingleIdentifierGrammar")), ) class BulkCollectIntoClauseSegment(BaseSegment): """A `BULK COLLECT INTO` Clause Segment. https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/RETURNING-INTO-clause.html#GUID-38F735B9-1100-45AF-AE71-18FB74A899BE__CJAIAGHJ """ type = "bulk_collect_into_clause" match_grammar = Sequence( "BULK", "COLLECT", "INTO", Delimited(OneOf(Ref("SingleIdentifierGrammar"), Ref("SqlplusVariableGrammar"))), ) class ExitStatementSegment(BaseSegment): """An `EXIT` statement. https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/EXIT-statement.html """ type = "exit_statement" match_grammar = Sequence( "EXIT", Ref("SingleIdentifierGrammar", optional=True), Sequence("WHEN", Ref("ExpressionSegment"), optional=True), ) class ContinueStatementSegment(BaseSegment): """A `CONTINUE` statement. https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/CONTINUE-statement.html """ type = "continue_statement" match_grammar = Sequence( "CONTINUE", Ref("SingleIdentifierGrammar", optional=True), Sequence("WHEN", Ref("ExpressionSegment"), optional=True), ) class RaiseStatementSegment(BaseSegment): """A `RAISE` statement. https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/RAISE-statement.html """ type = "raise_statement" match_grammar = Sequence( "RAISE", Ref("SingleIdentifierGrammar", optional=True), ) class ReturnStatementSegment(BaseSegment): """A RETURN statement. https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/RETURN-statement.html """ type = "return_statement" match_grammar = Sequence( "RETURN", Ref("ExpressionSegment", optional=True), ) class CreateUserStatementSegment(BaseSegment): """A `CREATE USER` statement. https://docs.oracle.com/en/database/oracle/oracle-database/23/sqlrf/CREATE-USER.html """ type = "create_user_statement" match_grammar: Matchable = Sequence( "CREATE", "USER", Ref("IfNotExistsGrammar", optional=True), Ref("RoleReferenceSegment"), OneOf( Sequence( "IDENTIFIED", OneOf( Sequence( "BY", Ref("SingleIdentifierGrammar"), Sequence( Ref.keyword("HTTP", optional=True), "DIGEST", OneOf("ENABLE", "DISABLE"), optional=True, ), ), Sequence( OneOf("EXTERNALLY", "GLOBALLY"), Sequence( "AS", OneOf( Ref("QuotedIdentifierSegment"), Ref("SingleQuotedIdentifierSegment"), ), optional=True, ), ), ), ), Sequence("NO", "AUTHENTICATION"), ), AnyNumberOf( Ref("DefaultCollationClauseGrammar"), Sequence( OneOf( Sequence("DEFAULT", "TABLESPACE"), Sequence( Ref.keyword("LOCAL", optional=True), "TEMPORARY", "TABLESPACE" ), Sequence( "QUOTA", OneOf(Ref("SizeClauseGrammar"), "UNLIMITED"), "ON" ), "PROFILE", ), Ref("ObjectReferenceSegment"), ), Sequence("PASSWORD", "EXPIRE"), Sequence("ACCOUNT", OneOf("LOCK", "UNLOCK")), Sequence("ENABLE", "EDITIONS"), Sequence("CONTAINER", Ref("EqualsSegment"), OneOf("CURRENT", "ALL")), Sequence("READ", OneOf("ONLY", "WRITE")), ), ) class ReturningClauseSegment(BaseSegment): """A `RETURNING` clause. https://docs.oracle.com/en/database/oracle/oracle-database/23/lnpls/RETURNING-INTO-clause.html """ type = "returning_clause" match_grammar: Matchable = Sequence( OneOf("RETURNING", "RETURN"), Delimited( Sequence( OneOf("OLD", "NEW", optional=True), Ref("SingleIdentifierGrammar"), ), ), OneOf(Ref("IntoClauseSegment"), Ref("BulkCollectIntoClauseSegment")), ) class UpdateStatementSegment(ansi.UpdateStatementSegment): """An `Update` statement. https://docs.oracle.com/en/database/oracle/oracle-database/23/sqlrf/UPDATE.html """ match_grammar: Matchable = ansi.UpdateStatementSegment.match_grammar.copy( insert=[Ref("ReturningClauseSegment", optional=True)] ) class DeleteStatementSegment(ansi.DeleteStatementSegment): """A `DELETE` statement. https://docs.oracle.com/en/database/oracle/oracle-database/23/sqlrf/DELETE.html """ match_grammar: Matchable = ansi.DeleteStatementSegment.match_grammar.copy( insert=[Ref("ReturningClauseSegment", optional=True)] ) sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_postgres.py000066400000000000000000006530121503426445100234630ustar00rootroot00000000000000"""The PostgreSQL dialect.""" from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( AnyNumberOf, AnySetOf, Anything, BaseSegment, Bracketed, BracketedSegment, CodeSegment, CommentSegment, CompositeComparisonOperatorSegment, Dedent, Delimited, IdentifierSegment, ImplicitIndent, Indent, LiteralKeywordSegment, LiteralSegment, Matchable, NewlineSegment, Nothing, OneOf, OptionallyBracketed, ParseMode, Ref, RegexLexer, RegexParser, SegmentGenerator, Sequence, StringLexer, StringParser, SymbolSegment, TypedParser, WhitespaceSegment, WordSegment, ) from sqlfluff.dialects import dialect_ansi as ansi from sqlfluff.dialects.dialect_postgres_keywords import ( get_keywords, postgres_keywords, postgres_postgis_datatype_keywords, ) ansi_dialect = load_raw_dialect("ansi") postgres_dialect = ansi_dialect.copy_as( "postgres", formatted_name="PostgreSQL", docstring="""**Default Casing**: ``lowercase`` **Quotes**: String Literals: ``''``, Identifiers: ``""``. This is based around the `PostgreSQL spec`_. Many other SQL dialects are often based on the PostreSQL syntax. If you're running an unsupported dialect, then this is often the dialect to use (until someone makes a specific dialect). .. _`PostgreSQL spec`: https://www.postgresql.org/docs/current/reference.html""", ) postgres_dialect.insert_lexer_matchers( # JSON Operators: https://www.postgresql.org/docs/9.5/functions-json.html [ StringLexer("right_arrow", "=>", CodeSegment), ], before="equals", ) postgres_dialect.insert_lexer_matchers( # JSON Operators: https://www.postgresql.org/docs/9.5/functions-json.html [ # Explanation for the regex # - (?s) Switch - .* includes newline characters # - U& - must start with U& # - '([^']|'')*' # ' Begin single quote # ([^']|'')* Any number of non-single quote # characters or two single quotes # ' End single quote # - (\s*UESCAPE\s*'[^0-9A-Fa-f'+\-\s)]')? # \s*UESCAPE\s* Whitespace, followed by UESCAPE, # followed by whitespace # '[^0-9A-Fa-f'+\-\s)]' Any character that isn't A-F, a-f, # 0-9, +-, or whitespace, in quotes # ? This last block is optional RegexLexer( "unicode_single_quote", r"(?si)U&'([^']|'')*'(\s*UESCAPE\s*'[^0-9A-Fa-f'+\-\s)]')?", CodeSegment, ), # This is similar to the Unicode regex, the key differences being: # - E - must start with E # - The final quote character must be preceded by: # (?>?|#>>?|@[>@?]|<@|\?[|&]?|#-", SymbolSegment, ), # L2 nearest neighbor (<->), # inner product (<#>), # cosine distance (<=>), # and L1 distance (<+>) RegexLexer( "pgvector_operator", r"<->|<#>|<=>|<\+>", SymbolSegment, ), # r"|".join( # re.escape(operator) # for operator in [ # "&&&", # "&<|", # "<<|", # "@", # "|&>", # "|>>", # "~=", # "<->", # "|=|", # "<#>", # "<<->>", # "<<#>>", # ] # ) RegexLexer( "postgis_operator", r"\&\&\&|\&<\||<<\||@|\|\&>|\|>>|\~=|<\->|\|=\||<\#>|<<\->>|<<\#>>", SymbolSegment, ), StringLexer("at", "@", CodeSegment), # https://www.postgresql.org/docs/current/sql-syntax-lexical.html RegexLexer( "bit_string_literal", # binary (e.g. b'1001') or hex (e.g. X'1FF') r"[bBxX]'[0-9a-fA-F]*'", CodeSegment, ), StringLexer("full_text_search_operator", "!!", SymbolSegment), ], before="like_operator", ) postgres_dialect.insert_lexer_matchers( [ # Explanation for the regex # \\([^(\\\r\n)])+((\\\\)|(?=\n)|(?=\r\n))? # \\ Starts with backslash # ([^\\\r\n])+ Anything that is not a newline or a # backslash # ( # (\\\\) Double backslash # | OR # (?=\n) The next character is a newline # | OR # (?=\r\n) The next 2 characters are a carriage # return and a newline # ) # ? The previous clause is optional RegexLexer( # For now we'll just treat meta syntax like comments and so just ignore # them. In future we may want to enhance this to actually parse them to # ensure they are valid meta commands. "meta_command", r"\\(?!gset|gexec)([^\\\r\n])+((\\\\)|(?=\n)|(?=\r\n))?", CommentSegment, ), RegexLexer( # pg_stat_statements which is an official postgres extension used for # storing the query logs replaces the actual literals used in the # query with $n where n is integer value. This grammar is for parsing # those literals. # ref: https://www.postgresql.org/docs/current/pgstatstatements.html "dollar_numeric_literal", r"\$\d+", LiteralSegment, ), RegexLexer( # For now we'll just treat meta syntax like comments and so just ignore # them. In future we may want to enhance this to actually parse them to # ensure they are valid meta commands. "meta_command_query_buffer", r"\\([^\\\r\n])+((\\g(set|exec))|(?=\n)|(?=\r\n))?", SymbolSegment, ), ], before="word", # Final thing to search for - as psql specific ) postgres_dialect.insert_lexer_matchers( [ StringLexer("walrus_operator", ":=", CodeSegment), ], before="equals", ) postgres_dialect.patch_lexer_matchers( [ # Patching comments to remove hash comments RegexLexer( "inline_comment", r"(--)[^\n]*", CommentSegment, segment_kwargs={"trim_start": ("--")}, ), # In Postgres, the only escape character is ' for single quote strings RegexLexer( "single_quote", r"'([^']|'')*'", CodeSegment, segment_kwargs={ "quoted_value": (r"'((?:[^']|'')*)'", 1), "escape_replacements": [(r"''", "'")], }, ), # In Postgres, the escape character is "" for double quote strings RegexLexer( "double_quote", r'"([^"]|"")*"', CodeSegment, segment_kwargs={ "quoted_value": (r'"((?:[^"]|"")*)"', 1), "escape_replacements": [(r'""', '"')], }, ), # Patching block comments to account for nested blocks. # N.B. this syntax is only possible via the non-standard-library # (but still backwards compatible) `regex` package. # https://pypi.org/project/regex/ # Pattern breakdown: # /\* Match opening slash. # (?> Atomic grouping # (https://www.regular-expressions.info/atomic.html). # [^*/]+ Non forward-slash or asterisk characters. # |\*(?!\/) Negative lookahead assertion to match # asterisks not followed by a forward-slash. # |/[^*] Match lone forward-slashes not followed by an asterisk. # )* Match any number of the atomic group contents. # (?> # (?R) Recursively match the block comment pattern # to match nested block comments. # (?> # [^*/]+ # |\*(?!\/) # |/[^*] # )* # )* # \*/ Match closing slash. RegexLexer( "block_comment", r"/\*(?>[^*/]+|\*(?!\/)|/[^*])*(?>(?R)(?>[^*/]+|\*(?!\/)|/[^*])*)*\*/", CommentSegment, subdivider=RegexLexer( "newline", r"\r\n|\n", NewlineSegment, ), trim_post_subdivide=RegexLexer( "whitespace", r"[^\S\r\n]+", WhitespaceSegment, ), ), RegexLexer("word", r"[a-zA-Z_][0-9a-zA-Z_$]*", WordSegment), ] ) postgres_dialect.sets("reserved_keywords").clear() postgres_dialect.sets("reserved_keywords").update( get_keywords(postgres_keywords, "reserved") ) postgres_dialect.sets("unreserved_keywords").update( get_keywords(postgres_keywords, "non-reserved") ) postgres_dialect.sets("reserved_keywords").difference_update( get_keywords(postgres_keywords, "not-keyword") ) postgres_dialect.sets("unreserved_keywords").difference_update( get_keywords(postgres_keywords, "not-keyword") ) # Add datetime units postgres_dialect.sets("datetime_units").update( [ "CENTURY", "DECADE", "DOW", "DOY", "EPOCH", "ISODOW", "ISOYEAR", "MICROSECONDS", "MILLENNIUM", "MILLISECONDS", "TIMEZONE", "TIMEZONE_HOUR", "TIMEZONE_MINUTE", ] ) # Set the bare functions postgres_dialect.sets("bare_functions").update( [ "CURRENT_TIMESTAMP", "CURRENT_TIME", "CURRENT_DATE", "LOCALTIME", "LOCALTIMESTAMP", "CURRENT_CATALOG", "CURRENT_ROLE", "CURRENT_SCHEMA", "CURRENT_USER", "SESSION_USER", "SYSTEM_USER", "USER", ] ) # Postgres doesn't have a dateadd function # Also according to https://www.postgresql.org/docs/14/functions-datetime.html # It quotes dateparts. So don't need this. postgres_dialect.sets("date_part_function_name").clear() # In Postgres, UNNEST() returns a "value table", similar to BigQuery postgres_dialect.sets("value_table_functions").update(["UNNEST", "GENERATE_SERIES"]) postgres_dialect.add( JsonOperatorSegment=TypedParser( "json_operator", SymbolSegment, type="binary_operator" ), PostgisOperatorSegment=TypedParser( "postgis_operator", SymbolSegment, type="binary_operator" ), PgvectorOperatorSegment=TypedParser( "pgvector_operator", SymbolSegment, type="binary_operator" ), SimpleGeometryGrammar=AnyNumberOf(Ref("NumericLiteralSegment")), # N.B. this MultilineConcatenateDelimiterGrammar is only created # to parse multiline-concatenated string literals # and shouldn't be used in other contexts. # In general let the parser handle newlines and whitespace. MultilineConcatenateNewline=TypedParser( "newline", NewlineSegment, type="newline", ), MultilineConcatenateDelimiterGrammar=AnyNumberOf( Ref("MultilineConcatenateNewline"), min_times=1, allow_gaps=False ), # Add a Full equivalent which also allow keywords NakedIdentifierFullSegment=TypedParser( "word", IdentifierSegment, type="naked_identifier_all", ), PropertiesNakedIdentifierSegment=TypedParser( # allows reserved keywords "word", CodeSegment, type="properties_naked_identifier", ), SingleIdentifierFullGrammar=OneOf( Ref("NakedIdentifierSegment"), Ref("QuotedIdentifierSegment"), Ref("NakedIdentifierFullSegment"), ), DefinitionArgumentValueGrammar=OneOf( # This comes from def_arg: # https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L6331 # TODO: this list is incomplete Ref("LiteralGrammar"), # This is a gross simplification of the grammar, which seems overly # permissive for the actual use cases here. Grammar says this matches # reserved keywords. Plus also unreserved keywords and IDENT: func_type --> # Typename --> SimpleTypename --> GenericType --> type_function_name --> # { unreserved_keyword | type_func_name_keyword | IDENT } # We'll just match any normal code/keyword string here to keep it simple. Ref("PropertiesNakedIdentifierSegment"), ), CascadeRestrictGrammar=OneOf("CASCADE", "RESTRICT"), ExtendedTableReferenceGrammar=OneOf( Ref("TableReferenceSegment"), Sequence("ONLY", OptionallyBracketed(Ref("TableReferenceSegment"))), Sequence(Ref("TableReferenceSegment"), Ref("StarSegment")), ), RightArrowSegment=StringParser("=>", SymbolSegment, type="right_arrow"), OnKeywordAsIdentifierSegment=StringParser( "ON", IdentifierSegment, type="naked_identifier" ), DollarNumericLiteralSegment=TypedParser( "dollar_numeric_literal", LiteralSegment, type="dollar_numeric_literal" ), ForeignDataWrapperGrammar=Sequence("FOREIGN", "DATA", "WRAPPER"), OptionsListGrammar=Sequence( Delimited(Ref("NakedIdentifierFullSegment"), Ref("QuotedLiteralSegment")) ), OptionsGrammar=Sequence( "OPTIONS", Bracketed(AnyNumberOf(Ref("OptionsListGrammar"))) ), CreateUserMappingGrammar=Sequence("CREATE", "USER", "MAPPING"), SessionInformationUserFunctionsGrammar=OneOf( "USER", "CURRENT_ROLE", "CURRENT_USER", "SESSION_USER" ), ImportForeignSchemaGrammar=Sequence("IMPORT", "FOREIGN", "SCHEMA"), CreateForeignTableGrammar=Sequence("CREATE", "FOREIGN", "TABLE"), IntervalUnitsGrammar=OneOf("YEAR", "MONTH", "DAY", "HOUR", "MINUTE", "SECOND"), WalrusOperatorSegment=StringParser(":=", SymbolSegment, type="assignment_operator"), MetaCommandQueryBufferSegment=TypedParser( "meta_command_query_buffer", SymbolSegment, type="meta_command" ), FullTextSearchOperatorSegment=TypedParser( "full_text_search_operator", LiteralSegment, type="full_text_search_operator" ), JsonTypeGrammar=OneOf("VALUE", "SCALAR", "ARRAY", "OBJECT"), JsonUniqueKeysGrammar=Sequence( OneOf("WITH", "WITHOUT"), "UNIQUE", Sequence("KEYS", optional=True), ), JsonTestGrammar=Sequence( "JSON", Ref("JsonTypeGrammar", optional=True), Ref("JsonUniqueKeysGrammar", optional=True), ), ) postgres_dialect.replace( LikeGrammar=OneOf("LIKE", "ILIKE", Sequence("SIMILAR", "TO")), StringBinaryOperatorGrammar=OneOf(Ref("ConcatSegment"), "COLLATE"), IsClauseGrammar=OneOf( Ref("NullLiteralSegment"), Ref("NanLiteralSegment"), Ref("UnknownLiteralSegment"), Ref("BooleanLiteralGrammar"), Ref("NormalizedGrammar"), Ref("JsonTestGrammar"), ), ComparisonOperatorGrammar=OneOf( Ref("EqualsSegment"), Ref("GreaterThanSegment"), Ref("LessThanSegment"), Ref("GreaterThanOrEqualToSegment"), Ref("LessThanOrEqualToSegment"), Ref("NotEqualToSegment"), Ref("LikeOperatorSegment"), Sequence("IS", "DISTINCT", "FROM"), Sequence("IS", "NOT", "DISTINCT", "FROM"), Ref("OverlapSegment"), Ref("NotExtendRightSegment"), Ref("NotExtendLeftSegment"), Ref("AdjacentSegment"), Ref("PostgisOperatorSegment"), Ref("PgvectorOperatorSegment"), ), NakedIdentifierSegment=SegmentGenerator( # Generate the anti template from the set of reserved keywords lambda dialect: RegexParser( # Can’t begin with $ or digits, # must only contain digits, letters, underscore or $ r"[A-Z_][A-Z0-9_$]*", IdentifierSegment, type="naked_identifier", anti_template=r"^(" + r"|".join(dialect.sets("reserved_keywords")) + r")$", casefold=str.lower, ) ), Expression_C_Grammar=Sequence( Ref("WalrusOperatorSegment", optional=True), OneOf( ansi_dialect.get_grammar("Expression_C_Grammar"), Sequence( Ref("FullTextSearchOperatorSegment", optional=True), Ref("ShorthandCastSegment"), ), ), ), ParameterNameSegment=RegexParser( r'[A-Z_][A-Z0-9_$]*|"[^"]*"', CodeSegment, type="parameter" ), FunctionNameIdentifierSegment=RegexParser( r"[A-Z_][A-Z0-9_$]*", CodeSegment, type="function_name_identifier", ), FunctionContentsExpressionGrammar=OneOf( Ref("ExpressionSegment"), Ref("NamedArgumentSegment"), ), FunctionContentsGrammar=AnyNumberOf( Ref("ExpressionSegment"), OptionallyBracketed(Ref("SetExpressionSegment")), # A Cast-like function Sequence(Ref("ExpressionSegment"), "AS", Ref("DatatypeSegment")), # Trim function Sequence( Ref("TrimParametersGrammar"), Ref("ExpressionSegment", optional=True, exclude=Ref.keyword("FROM")), "FROM", Ref("ExpressionSegment"), ), # An extract-like or substring-like function # https://www.postgresql.org/docs/current/functions-string.html Sequence( OneOf(Ref("DatetimeUnitSegment"), Ref("ExpressionSegment")), AnySetOf( Sequence("FROM", Ref("ExpressionSegment")), Sequence("FOR", Ref("ExpressionSegment")), optional=True, ), ), # VARIADIC function call argument # https://www.postgresql.org/docs/current/xfunc-sql.html#XFUNC-SQL-VARIADIC-FUNCTIONS Sequence("VARIADIC", Ref("ExpressionSegment")), Sequence( # Allow an optional distinct keyword here. Ref.keyword("DISTINCT", optional=True), OneOf( # Most functions will be using the delimited route # but for COUNT(*) or similar we allow the star segment # here. Ref("StarSegment"), Delimited(Ref("FunctionContentsExpressionGrammar")), ), ), Ref( "AggregateOrderByClause" ), # used by string_agg (postgres), group_concat (exasol),listagg (snowflake).. Sequence(Ref.keyword("SEPARATOR"), Ref("LiteralGrammar")), # like a function call: POSITION ( 'QL' IN 'SQL') Sequence( OneOf( Ref("QuotedLiteralSegment"), Ref("SingleIdentifierGrammar"), Ref("ColumnReferenceSegment"), Ref("ExpressionSegment"), ), "IN", OneOf( Ref("QuotedLiteralSegment"), Ref("SingleIdentifierGrammar"), Ref("ColumnReferenceSegment"), Ref("ExpressionSegment"), ), ), Ref("IgnoreRespectNullsGrammar"), Ref("IndexColumnDefinitionSegment"), Ref("EmptyStructLiteralSegment"), Delimited( Sequence( Ref("ExpressionSegment"), OneOf("VALUE", Ref("ColonSegment")), Ref("ExpressionSegment"), ) ), ), QuotedLiteralSegment=OneOf( # Postgres allows newline-concatenated string literals (#1488). # Since these string literals can have comments between them, # we use grammar to handle this. # Note we CANNOT use Delimited as it's greedy and swallows the # last Newline - see #2495 Sequence( OneOf( TypedParser( "single_quote", LiteralSegment, type="quoted_literal", ), TypedParser( "escaped_single_quote", LiteralSegment, type="quoted_literal", ), ), AnyNumberOf( Ref("MultilineConcatenateDelimiterGrammar"), TypedParser( "single_quote", LiteralSegment, type="quoted_literal", ), ), ), Sequence( TypedParser( "bit_string_literal", LiteralSegment, type="quoted_literal", ), AnyNumberOf( Ref("MultilineConcatenateDelimiterGrammar"), RegexParser( r"(?i)'[0-9a-f]*'", LiteralSegment, type="quoted_literal", ), ), ), Sequence( TypedParser( "unicode_single_quote", LiteralSegment, type="quoted_literal", ), AnyNumberOf( Ref("MultilineConcatenateDelimiterGrammar"), RegexParser( r"'([^']|'')*'", LiteralSegment, type="quoted_literal", ), ), Sequence( "UESCAPE", RegexParser( r"'[^0-9A-Fa-f'+\-\s)]'", CodeSegment, "unicode_escape_value" ), optional=True, ), ), Delimited( TypedParser( "dollar_quote", LiteralSegment, type="quoted_literal", ), AnyNumberOf( Ref("MultilineConcatenateDelimiterGrammar"), TypedParser( "dollar_quote", LiteralSegment, type="quoted_literal", ), ), ), ), QuotedIdentifierSegment=OneOf( TypedParser("double_quote", IdentifierSegment, type="quoted_identifier"), TypedParser( "unicode_double_quote", IdentifierSegment, type="quoted_identifier" ), ), PostFunctionGrammar=AnyNumberOf( Ref("WithinGroupClauseSegment"), Ref("OverClauseSegment"), # Filter clause supported by both Postgres and SQLite Ref("FilterClauseGrammar"), ), BinaryOperatorGrammar=OneOf( Ref("ArithmeticBinaryOperatorGrammar"), Ref("StringBinaryOperatorGrammar"), Ref("BooleanBinaryOperatorGrammar"), Ref("ComparisonOperatorGrammar"), # Add JSON operators Ref("JsonOperatorSegment"), ), FunctionParameterGrammar=Sequence( OneOf("IN", "OUT", "INOUT", "VARIADIC", optional=True), OneOf( Ref("DatatypeSegment"), Sequence( Ref("ParameterNameSegment"), OneOf("IN", "OUT", "INOUT", "VARIADIC", optional=True), OneOf(Ref("DatatypeSegment"), Ref("ColumnTypeReferenceSegment")), ), ), Sequence( OneOf("DEFAULT", Ref("EqualsSegment"), Ref("WalrusOperatorSegment")), Ref("ExpressionSegment"), optional=True, ), ), FrameClauseUnitGrammar=OneOf("RANGE", "ROWS", "GROUPS"), # Postgres supports the non-standard ISNULL and NONNULL comparison operators. See # https://www.postgresql.org/docs/14/functions-comparison.html IsNullGrammar=Ref.keyword("ISNULL"), NotNullGrammar=Ref.keyword("NOTNULL"), PreTableFunctionKeywordsGrammar=OneOf("LATERAL"), ConditionalCrossJoinKeywordsGrammar=Nothing(), UnconditionalCrossJoinKeywordsGrammar=Ref.keyword("CROSS"), SelectClauseTerminatorGrammar=OneOf( "INTO", "FROM", "WHERE", Sequence("ORDER", "BY"), "LIMIT", "RETURNING", Ref("CommaSegment"), Ref("SetOperatorSegment"), Ref("MetaCommandQueryBufferSegment"), ), LiteralGrammar=ansi_dialect.get_grammar("LiteralGrammar").copy( insert=[ Ref("DollarNumericLiteralSegment"), Ref("PsqlVariableGrammar"), ], before=Ref("ArrayLiteralSegment"), ), FromClauseTerminatorGrammar=ansi_dialect.get_grammar( "FromClauseTerminatorGrammar" ).copy( insert=[Ref("ForClauseSegment")], ), WhereClauseTerminatorGrammar=OneOf( "LIMIT", Sequence("GROUP", "BY"), Sequence("ORDER", "BY"), "HAVING", "QUALIFY", "WINDOW", "OVERLAPS", "RETURNING", Sequence("ON", "CONFLICT"), Ref("ForClauseSegment"), ), OrderByClauseTerminators=OneOf( "LIMIT", "HAVING", "QUALIFY", # For window functions "WINDOW", Ref("FrameClauseUnitGrammar"), "SEPARATOR", Sequence("WITH", "DATA"), Ref("ForClauseSegment"), ), AccessorGrammar=AnyNumberOf( Ref("ArrayAccessorSegment"), # Add in semi structured expressions Ref("SemiStructuredAccessorSegment"), ), # PostgreSQL supports the non-standard "RETURNING" keyword, and therefore the # INSERT/UPDATE/DELETE statements can also be used in subqueries. NonWithSelectableGrammar=OneOf( Ref("SetExpressionSegment"), OptionallyBracketed(Ref("SelectStatementSegment")), Ref("NonSetSelectableGrammar"), # moved from NonWithNonSelectableGrammar: Ref("UpdateStatementSegment"), Ref("InsertStatementSegment"), Ref("DeleteStatementSegment"), ), NonWithNonSelectableGrammar=OneOf(), # https://www.postgresql.org/docs/current/functions-datetime.html DateTimeLiteralGrammar=Sequence( OneOf("DATE", "TIME", "TIMESTAMP", "INTERVAL"), TypedParser("single_quote", LiteralSegment, type="date_constructor_literal"), Sequence( Ref("IntervalUnitsGrammar"), Sequence("TO", Ref("IntervalUnitsGrammar"), optional=True), ), ), BracketedSetExpressionGrammar=Bracketed(Ref("SetExpressionSegment")), ReferentialActionGrammar=OneOf( "CASCADE", Sequence( "SET", OneOf("DEFAULT", "NULL"), Bracketed( Delimited(Ref("ColumnReferenceSegment")), optional=True, ), ), "RESTRICT", Sequence("NO", "ACTION"), ), UnknownLiteralSegment=StringParser( "UNKNOWN", LiteralKeywordSegment, type="null_literal" ), NormalizedGrammar=Sequence( OneOf("NFC", "NFD", "NFKC", "NFKD", optional=True), "NORMALIZED", ), ) class OverlapSegment(CompositeComparisonOperatorSegment): """Overlaps range operator.""" match_grammar = Sequence( Ref("AmpersandSegment"), Ref("AmpersandSegment"), allow_gaps=False ) class NotExtendRightSegment(CompositeComparisonOperatorSegment): """Not extend right range operator.""" match_grammar = Sequence( Ref("AmpersandSegment"), Ref("RawGreaterThanSegment"), allow_gaps=False ) class NotExtendLeftSegment(CompositeComparisonOperatorSegment): """Not extend left range operator.""" match_grammar = Sequence( Ref("AmpersandSegment"), Ref("RawLessThanSegment"), allow_gaps=False ) class AdjacentSegment(CompositeComparisonOperatorSegment): """Adjacent range operator.""" match_grammar = Sequence( Ref("MinusSegment"), Ref("PipeSegment"), Ref("MinusSegment"), allow_gaps=False ) class PsqlVariableGrammar(BaseSegment): """PSQl Variables :thing, :'thing', :"thing".""" type = "psql_variable" match_grammar = Sequence( OptionallyBracketed( Ref("ColonSegment"), OneOf( Ref("ParameterNameSegment"), Ref("QuotedLiteralSegment"), ), ) ) class ArrayAccessorSegment(ansi.ArrayAccessorSegment): """Overwrites Array Accessor in ANSI to allow n many consecutive brackets. Postgres can also have array access like python [:2] or [2:] so numbers on either side of the slice segment are optional. """ match_grammar = Bracketed( OneOf( # These three are for a single element access: [n] Ref("QualifiedNumericLiteralSegment"), Ref("NumericLiteralSegment"), Ref("ExpressionSegment"), # This is for slice access: [n:m], [:m], [n:], and [:] Sequence( OneOf( Ref("QualifiedNumericLiteralSegment"), Ref("NumericLiteralSegment"), Ref("ExpressionSegment"), optional=True, ), Ref("SliceSegment"), OneOf( Ref("QualifiedNumericLiteralSegment"), Ref("NumericLiteralSegment"), Ref("ExpressionSegment"), optional=True, ), ), ), bracket_type="square", ) class DateTimeTypeIdentifier(BaseSegment): """Date Time Type.""" type = "datetime_type_identifier" match_grammar = OneOf( "DATE", Ref("TimeWithTZGrammar"), Sequence( OneOf("INTERVAL", "TIMETZ", "TIMESTAMPTZ"), Bracketed(Ref("NumericLiteralSegment"), optional=True), ), ) class DateTimeLiteralGrammar(BaseSegment): """Literal Date Time.""" type = "datetime_literal" match_grammar = Sequence( Ref("DateTimeTypeIdentifier", optional=True), Ref("QuotedLiteralSegment"), ) class DatatypeSegment(ansi.DatatypeSegment): """A data type segment. Supports timestamp with(out) time zone. Doesn't currently support intervals. """ match_grammar = Sequence( # Some dialects allow optional qualification of data types with schemas Sequence( Ref("SingleIdentifierGrammar"), Ref("DotSegment"), allow_gaps=False, optional=True, ), OneOf( Ref("WellKnownTextGeometrySegment"), Ref("DateTimeTypeIdentifier"), Ref("StructTypeSegment"), Ref("MapTypeSegment"), Sequence( OneOf( # numeric types "SMALLINT", "INTEGER", "INT", "INT2", "INT4", "INT8", "BIGINT", "FLOAT4", "FLOAT8", "REAL", Sequence("DOUBLE", "PRECISION"), "SMALLSERIAL", "SERIAL", "SERIAL2", "SERIAL4", "SERIAL8", "BIGSERIAL", # numeric types [(precision)] Sequence( OneOf("FLOAT"), Ref("BracketedArguments", optional=True), ), # numeric types [precision ["," scale])] Sequence( OneOf("DECIMAL", "NUMERIC"), Ref("BracketedArguments", optional=True), ), # monetary type "MONEY", # character types OneOf( Sequence( OneOf( "BPCHAR", "CHAR", # CHAR VARYING is not documented, but it's # in the real grammar: # https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L14262 Sequence("CHAR", "VARYING"), "CHARACTER", Sequence("CHARACTER", "VARYING"), "VARCHAR", ), Ref("BracketedArguments", optional=True), ), "TEXT", ), # binary type "BYTEA", # boolean types OneOf("BOOLEAN", "BOOL"), # geometric types OneOf("POINT", "LINE", "LSEG", "BOX", "PATH", "POLYGON", "CIRCLE"), # network address types OneOf("CIDR", "INET", "MACADDR", "MACADDR8"), # text search types OneOf("TSVECTOR", "TSQUERY"), # bit string types Sequence( "BIT", OneOf("VARYING", optional=True), Ref("BracketedArguments", optional=True), ), # uuid type "UUID", # xml type "XML", # json types OneOf("JSON", "JSONB"), # range types "INT4RANGE", "INT8RANGE", "NUMRANGE", "TSRANGE", "TSTZRANGE", "DATERANGE", # pg_lsn type "PG_LSN", # pgvector types Sequence( "VECTOR", Ref("BracketedArguments", optional=True), ), ), ), # user defined data types Ref("DatatypeIdentifierSegment"), ), # array types OneOf( AnyNumberOf( Bracketed( Ref("ExpressionSegment", optional=True), bracket_type="square" ) ), Ref("ArrayTypeSegment"), Ref("SizedArrayTypeSegment"), optional=True, ), ) class ArrayTypeSegment(ansi.ArrayTypeSegment): """Prefix for array literals specifying the type.""" type = "array_type" match_grammar = Ref.keyword("ARRAY") class IndexAccessMethodSegment(BaseSegment): """Index access method (e.g. `USING gist`).""" type = "index_access_method" match_grammar = Ref("SingleIdentifierGrammar") class OperatorClassReferenceSegment(ansi.ObjectReferenceSegment): """A reference to an operator class.""" type = "operator_class_reference" class DefinitionParameterSegment(BaseSegment): """A single definition parameter. https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L6320 """ type = "definition_parameter" match_grammar: Matchable = Sequence( Ref("PropertiesNakedIdentifierSegment"), Sequence( Ref("EqualsSegment"), # could also contain ParameterNameSegment: Ref("DefinitionArgumentValueGrammar"), optional=True, ), ) class DefinitionParametersSegment(BaseSegment): """List of definition parameters. https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L6313 """ type = "definition_parameters" match_grammar: Matchable = Bracketed( Delimited( Ref("DefinitionParameterSegment"), ) ) class CreateCastStatementSegment(ansi.CreateCastStatementSegment): """A `CREATE CAST` statement. https://www.postgresql.org/docs/15/sql-createcast.html https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L8951 """ match_grammar: Matchable = Sequence( "CREATE", "CAST", Bracketed( Ref("DatatypeSegment"), "AS", Ref("DatatypeSegment"), ), OneOf( Sequence( "WITH", "FUNCTION", Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar", optional=True), ), Sequence("WITHOUT", "FUNCTION"), Sequence("WITH", "INOUT"), ), OneOf( Sequence("AS", "ASSIGNMENT", optional=True), Sequence("AS", "IMPLICIT", optional=True), optional=True, ), ) class DropCastStatementSegment(ansi.DropCastStatementSegment): """A `DROP CAST` statement. https://www.postgresql.org/docs/15/sql-dropcast.html https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L8995 """ match_grammar: Matchable = Sequence( "DROP", "CAST", Ref("IfExistsGrammar", optional=True), Bracketed( Ref("DatatypeSegment"), "AS", Ref("DatatypeSegment"), ), Ref("DropBehaviorGrammar", optional=True), ) class DropAggregateStatementSegment(BaseSegment): """A `DROP AGGREGATE` statement. https://www.postgresql.org/docs/15/sql-dropaggregate.html """ type = "drop_aggregate_statement" match_grammar: Matchable = Sequence( "DROP", "AGGREGATE", Ref("IfExistsGrammar", optional=True), Delimited( Sequence( Ref("ObjectReferenceSegment"), OneOf( Ref("FunctionParameterListGrammar"), # TODO: Is this too permissive? Anything(), Ref("StarSegment"), ), ), ), Ref("DropBehaviorGrammar", optional=True), ) class CreateAggregateStatementSegment(BaseSegment): """A `CREATE AGGREGATE` statement. https://www.postgresql.org/docs/16/sql-createaggregate.html """ type = "create_aggregate_statement" match_grammar: Matchable = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "AGGREGATE", Ref("ObjectReferenceSegment"), Bracketed( # TODO: Is this too permissive? Anything(), ), Ref("FunctionParameterListGrammar"), ) class AlterAggregateStatementSegment(BaseSegment): """A `ALTER AGGREGATE` statement. https://www.postgresql.org/docs/current/sql-alteraggregate.html """ type = "alter_aggregate_statement" match_grammar: Matchable = Sequence( "ALTER", "AGGREGATE", Ref("ObjectReferenceSegment"), Bracketed( OneOf( Ref("FunctionParameterListGrammar"), Anything(), Ref("StarSegment"), ) ), OneOf( Sequence( "RENAME", "TO", Ref("FunctionNameSegment"), ), Sequence( "OWNER", "TO", OneOf( "CURRENT_ROLE", "CURRENT_USER", "SESSION_USER", Ref("RoleReferenceSegment"), ), ), Sequence( "SET", "SCHEMA", Ref("SchemaReferenceSegment"), ), ), ) class RelationOptionSegment(BaseSegment): """Relation option element from reloptions. It is very similar to DefinitionParameterSegment except that it allows qualified names (e.g. namespace.attr = 5). https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L3016-L3035 """ type = "relation_option" match_grammar: Matchable = Sequence( Ref("PropertiesNakedIdentifierSegment"), Sequence( Ref("DotSegment"), Ref("PropertiesNakedIdentifierSegment"), optional=True, ), Sequence( Ref("EqualsSegment"), # could also contain ParameterNameSegment: Ref("DefinitionArgumentValueGrammar"), optional=True, ), ) class RelationOptionsSegment(BaseSegment): """List of relation options. https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L3003-L3014 """ type = "relation_options" match_grammar: Matchable = Bracketed( Delimited( Ref("RelationOptionSegment"), ) ) class CreateFunctionStatementSegment(ansi.CreateFunctionStatementSegment): """A `CREATE FUNCTION` statement. This version in the ANSI dialect should be a "common subset" of the structure of the code for those dialects. postgres: https://www.postgresql.org/docs/13/sql-createfunction.html """ match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Ref("TemporaryGrammar", optional=True), "FUNCTION", Ref("IfNotExistsGrammar", optional=True), Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar"), Sequence( # Optional function return type "RETURNS", OneOf( Sequence( "TABLE", Bracketed( Delimited( OneOf( Ref("DatatypeSegment"), Sequence( Ref("ColumnReferenceSegment"), Ref("DatatypeSegment"), ), ), ) ), optional=True, ), Sequence( "SETOF", Ref("DatatypeSegment"), ), Ref("DatatypeSegment"), ), optional=True, ), Ref("FunctionDefinitionGrammar"), ) class DropFunctionStatementSegment(BaseSegment): """A `DROP FUNCTION` statement. As per the specification: https://www.postgresql.org/docs/14/sql-dropfunction.html """ type = "drop_function_statement" match_grammar = Sequence( "DROP", "FUNCTION", Ref("IfExistsGrammar", optional=True), Delimited( Sequence( Ref("ObjectReferenceSegment"), Ref("FunctionParameterListGrammar", optional=True), ) ), Ref("DropBehaviorGrammar", optional=True), ) class AlterFunctionStatementSegment(BaseSegment): """A `ALTER FUNCTION` statement. As per the specification: https://www.postgresql.org/docs/14/sql-alterfunction.html """ type = "alter_function_statement" match_grammar = Sequence( "ALTER", "FUNCTION", Delimited( Sequence( Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar", optional=True), ) ), OneOf( Ref("AlterFunctionActionSegment", optional=True), Sequence("RENAME", "TO", Ref("FunctionNameSegment")), Sequence("SET", "SCHEMA", Ref("SchemaReferenceSegment")), Sequence( "OWNER", "TO", OneOf( OneOf(Ref("ParameterNameSegment"), Ref("QuotedIdentifierSegment")), "CURRENT_ROLE", "CURRENT_USER", "SESSION_USER", ), ), Sequence( Ref.keyword("NO", optional=True), "DEPENDS", "ON", "EXTENSION", Ref("ExtensionReferenceSegment"), ), ), ) class AlterFunctionActionSegment(BaseSegment): """Alter Function Action Segment. https://www.postgresql.org/docs/14/sql-alterfunction.html """ type = "alter_function_action_segment" match_grammar = Sequence( OneOf( OneOf( Sequence("CALLED", "ON", "NULL", "INPUT"), Sequence("RETURNS", "NULL", "ON", "NULL", "INPUT"), "STRICT", ), OneOf("IMMUTABLE", "STABLE", "VOLATILE"), Sequence(Ref.keyword("NOT", optional=True), "LEAKPROOF"), Sequence( Ref.keyword("EXTERNAL", optional=True), "SECURITY", OneOf("DEFINER", "INVOKER"), ), Sequence("PARALLEL", OneOf("UNSAFE", "RESTRICTED", "SAFE")), Sequence("COST", Ref("NumericLiteralSegment")), Sequence("ROWS", Ref("NumericLiteralSegment")), Sequence("SUPPORT", Ref("ParameterNameSegment")), Sequence( "SET", Ref("ParameterNameSegment"), OneOf( Sequence( OneOf("TO", Ref("EqualsSegment")), OneOf( Ref("LiteralGrammar"), Ref("NakedIdentifierSegment"), "DEFAULT", ), ), Sequence("FROM", "CURRENT"), ), ), Sequence("RESET", OneOf("ALL", Ref("ParameterNameSegment"))), ), Ref.keyword("RESTRICT", optional=True), ) class AlterProcedureActionSegment(BaseSegment): """Alter Procedure Action Segment. https://www.postgresql.org/docs/14/sql-alterprocedure.html """ type = "alter_procedure_action_segment" match_grammar = Sequence( OneOf( Sequence( Ref.keyword("EXTERNAL", optional=True), "SECURITY", OneOf("DEFINER", "INVOKER"), ), Sequence( "SET", Ref("ParameterNameSegment"), OneOf( Sequence( OneOf("TO", Ref("EqualsSegment")), OneOf( Ref("LiteralGrammar"), Ref("NakedIdentifierSegment"), "DEFAULT", ), ), Sequence("FROM", "CURRENT"), ), ), Sequence("RESET", OneOf("ALL", Ref("ParameterNameSegment"))), ), Ref.keyword("RESTRICT", optional=True), ) class AlterProcedureStatementSegment(BaseSegment): """An `ALTER PROCEDURE` statement. https://www.postgresql.org/docs/14/sql-alterprocedure.html """ type = "alter_procedure_statement" match_grammar = Sequence( "ALTER", "PROCEDURE", Delimited( Sequence( Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar", optional=True), ) ), OneOf( Ref("AlterProcedureActionSegment", optional=True), Sequence("RENAME", "TO", Ref("FunctionNameSegment")), Sequence("SET", "SCHEMA", Ref("SchemaReferenceSegment")), Sequence( "SET", Ref("ParameterNameSegment"), OneOf( Sequence( OneOf("TO", Ref("EqualsSegment")), Delimited( OneOf( Ref("ParameterNameSegment"), Ref("LiteralGrammar"), ), ), ), Sequence("FROM", "CURRENT"), ), ), Sequence( "OWNER", "TO", OneOf( OneOf(Ref("ParameterNameSegment"), Ref("QuotedIdentifierSegment")), "CURRENT_ROLE", "CURRENT_USER", "SESSION_USER", ), ), Sequence( Ref.keyword("NO", optional=True), "DEPENDS", "ON", "EXTENSION", Ref("ExtensionReferenceSegment"), ), ), ) class OffsetClauseSegment(ansi.OffsetClauseSegment): """A `OFFSET` clause like in `SELECT`.""" type = "offset_clause" match_grammar: Matchable = Sequence( "OFFSET", Indent, OneOf( # Allow a number by itself OR Ref("NumericLiteralSegment"), # An arbitrary expression Ref("ExpressionSegment"), ), Dedent, ) class CreateProcedureStatementSegment(BaseSegment): """A `CREATE PROCEDURE` statement. https://www.postgresql.org/docs/14/sql-createprocedure.html TODO: Just a basic statement for now, without full syntax. based on CreateFunctionStatementSegment without a return type. """ type = "create_procedure_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "PROCEDURE", Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar"), Ref("FunctionDefinitionGrammar"), ) class DropProcedureStatementSegment(BaseSegment): """A `DROP PROCEDURE` statement. https://www.postgresql.org/docs/11/sql-dropprocedure.html """ type = "drop_procedure_statement" match_grammar = Sequence( "DROP", "PROCEDURE", Ref("IfExistsGrammar", optional=True), Delimited( Sequence( Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar", optional=True), ), ), OneOf( "CASCADE", "RESTRICT", optional=True, ), ) class WellKnownTextGeometrySegment(BaseSegment): """A Data Type Segment to identify Well Known Text Geometric Data Types. As specified in https://postgis.net/stuff/postgis-3.1.pdf This approach is to maximise 'accepted code' for the parser, rather than be overly restrictive. """ type = "wkt_geometry_type" _geometry_type_keywords = [x[0] for x in postgres_postgis_datatype_keywords] match_grammar = OneOf( Sequence( OneOf(*_geometry_type_keywords), Bracketed( Delimited( OptionallyBracketed(Delimited(Ref("SimpleGeometryGrammar"))), # 2D Arrays of coordinates - to specify surfaces Bracketed( Delimited(Bracketed(Delimited(Ref("SimpleGeometryGrammar")))) ), Ref("WellKnownTextGeometrySegment"), ) ), ), Sequence( OneOf("GEOMETRY", "GEOGRAPHY"), Bracketed( Sequence( OneOf(*_geometry_type_keywords, "GEOMETRY", "GEOGRAPHY"), Sequence( Ref("CommaSegment"), Ref("NumericLiteralSegment"), optional=True, ), ), ), ), ) class SemiStructuredAccessorSegment(BaseSegment): """A semi-structured data accessor segment.""" type = "semi_structured_expression" match_grammar = Sequence( Ref("DotSegment"), Ref("SingleIdentifierGrammar"), Ref("ArrayAccessorSegment", optional=True), AnyNumberOf( Sequence( Ref("DotSegment"), Ref("SingleIdentifierGrammar"), allow_gaps=True, ), Ref("ArrayAccessorSegment", optional=True), allow_gaps=True, ), allow_gaps=True, ) class FunctionDefinitionGrammar(ansi.FunctionDefinitionGrammar): """This is the body of a `CREATE FUNCTION AS` statement. https://www.postgresql.org/docs/13/sql-createfunction.html """ match_grammar = Sequence( AnyNumberOf( Ref("LanguageClauseSegment"), Sequence("TRANSFORM", "FOR", "TYPE", Ref("ParameterNameSegment")), Ref.keyword("WINDOW"), OneOf("IMMUTABLE", "STABLE", "VOLATILE"), Sequence(Ref.keyword("NOT", optional=True), "LEAKPROOF"), OneOf( Sequence("CALLED", "ON", "NULL", "INPUT"), Sequence("RETURNS", "NULL", "ON", "NULL", "INPUT"), "STRICT", ), Sequence( Ref.keyword("EXTERNAL", optional=True), "SECURITY", OneOf("INVOKER", "DEFINER"), ), Sequence("PARALLEL", OneOf("UNSAFE", "RESTRICTED", "SAFE")), Sequence("COST", Ref("NumericLiteralSegment")), Sequence("ROWS", Ref("NumericLiteralSegment")), Sequence("SUPPORT", Ref("ParameterNameSegment")), Sequence( "SET", Ref("ParameterNameSegment"), OneOf( Sequence( OneOf("TO", Ref("EqualsSegment")), Delimited( OneOf( Ref("ParameterNameSegment"), Ref("LiteralGrammar"), ), ), ), Sequence("FROM", "CURRENT"), ), ), Sequence( "AS", OneOf( Ref("QuotedLiteralSegment"), Sequence( Ref("QuotedLiteralSegment"), Ref("CommaSegment"), Ref("QuotedLiteralSegment"), ), ), ), Sequence( "RETURN", Ref("ExpressionSegment"), ), Sequence( "BEGIN", "ATOMIC", AnyNumberOf( Sequence( Ref("InsertStatementSegment"), Ref("SemicolonSegment"), ), Sequence( Ref("UpdateStatementSegment"), Ref("SemicolonSegment"), ), Sequence( OneOf( Ref("WithCompoundStatementSegment"), Ref("SelectStatementSegment"), ), Ref("SemicolonSegment"), ), Sequence( "RETURN", Ref("ExpressionSegment"), Ref("SemicolonSegment"), ), ), "END", ), ), Sequence( "WITH", Bracketed(Delimited(Ref("ParameterNameSegment"))), optional=True, ), ) class IntoClauseSegment(BaseSegment): """Into Clause Segment. As specified in https://www.postgresql.org/docs/14/sql-selectinto.html """ type = "into_clause" match_grammar = Sequence( "INTO", OneOf("TEMPORARY", "TEMP", "UNLOGGED", optional=True), Ref.keyword("TABLE", optional=True), Ref("TableReferenceSegment"), ) class ForClauseSegment(BaseSegment): """`FOR ...` clause in `SELECT` statements. As specified in https://www.postgresql.org/docs/current/sql-select.html#SQL-FOR-UPDATE-SHARE. """ type = "for_clause" match_grammar = Sequence( "FOR", OneOf( "UPDATE", Sequence("NO", "KEY", "UPDATE"), "SHARE", Sequence("KEY", "SHARE"), ), Sequence( "OF", Delimited( Ref("TableReferenceSegment"), ), optional=True, ), OneOf( "NOWAIT", Sequence("SKIP", "LOCKED"), optional=True, ), ) class UnorderedSelectStatementSegment(ansi.UnorderedSelectStatementSegment): """Overrides ANSI Statement, to allow for SELECT INTO statements.""" match_grammar = ansi.UnorderedSelectStatementSegment.match_grammar.copy( insert=[ Ref("IntoClauseSegment", optional=True), ], before=Ref("FromClauseSegment", optional=True), terminators=[ Sequence("WITH", Ref.keyword("NO", optional=True), "DATA"), Sequence("ON", "CONFLICT"), Ref.keyword("RETURNING"), Ref("WithCheckOptionSegment"), Ref("MetaCommandQueryBufferSegment"), ], ) class SelectStatementSegment(ansi.SelectStatementSegment): """Overrides ANSI as the parse grammar copy needs to be reapplied. As per https://www.postgresql.org/docs/current/sql-select.html """ # Inherit most of the parse grammar from the unordered version. match_grammar: Matchable = UnorderedSelectStatementSegment.match_grammar.copy( insert=[ Ref("NamedWindowSegment", optional=True), Ref("OrderByClauseSegment", optional=True), Ref("LimitClauseSegment", optional=True), Ref("OffsetClauseSegment", optional=True), Ref("FetchClauseSegment", optional=True), Ref("ForClauseSegment", optional=True), ], replace_terminators=True, terminators=[ Ref("SetOperatorSegment"), Ref("WithNoSchemaBindingClauseSegment"), Ref("WithDataClauseSegment"), Sequence("ON", "CONFLICT"), Ref.keyword("RETURNING"), Ref("WithCheckOptionSegment"), Ref("MetaCommandQueryBufferSegment"), ], ) class SelectClauseSegment(ansi.SelectClauseSegment): """Overrides ANSI to allow INTO as a terminator.""" match_grammar = Sequence( "SELECT", Ref("SelectClauseModifierSegment", optional=True), Indent, Delimited( Ref("SelectClauseElementSegment"), # In Postgres you don't need an element so make it optional optional=True, allow_trailing=True, ), Dedent, terminators=[ "INTO", "FROM", "WHERE", Sequence("ORDER", "BY"), Sequence("ON", "CONFLICT"), "LIMIT", "RETURNING", "OVERLAPS", Ref("SetOperatorSegment"), Sequence("WITH", Ref.keyword("NO", optional=True), "DATA"), Ref("WithCheckOptionSegment"), Ref("MetaCommandQueryBufferSegment"), ], parse_mode=ParseMode.GREEDY_ONCE_STARTED, ) class SelectClauseModifierSegment(ansi.SelectClauseModifierSegment): """Things that come after SELECT but before the columns.""" match_grammar = OneOf( Sequence( "DISTINCT", Sequence( "ON", Bracketed(Delimited(Ref("ExpressionSegment"))), optional=True, ), ), "ALL", ) class WithinGroupClauseSegment(BaseSegment): """An WITHIN GROUP clause for window functions. https://www.postgresql.org/docs/current/functions-aggregate.html. """ type = "withingroup_clause" match_grammar = Sequence( "WITHIN", "GROUP", Bracketed(Ref("OrderByClauseSegment", optional=True)), ) class GroupByClauseSegment(BaseSegment): """A `GROUP BY` clause like in `SELECT`.""" type = "groupby_clause" match_grammar = Sequence( "GROUP", "BY", Indent, Delimited( OneOf( Ref("ColumnReferenceSegment"), # Can `GROUP BY 1` Ref("NumericLiteralSegment"), Ref("CubeRollupClauseSegment"), Ref("GroupingSetsClauseSegment"), # Can `GROUP BY coalesce(col, 1)` Ref("ExpressionSegment"), Bracketed(), # Allows empty parentheses ), terminators=[ Sequence("ORDER", "BY"), "LIMIT", "HAVING", "QUALIFY", "WINDOW", Ref("SetOperatorSegment"), ], ), Dedent, ) class CreateRoleStatementSegment(ansi.CreateRoleStatementSegment): """A `CREATE ROLE` statement. As per: https://www.postgresql.org/docs/current/sql-createrole.html """ type = "create_role_statement" match_grammar = Sequence( "CREATE", OneOf("ROLE", "USER"), Ref("RoleReferenceSegment"), Sequence( Ref.keyword("WITH", optional=True), AnySetOf( OneOf("SUPERUSER", "NOSUPERUSER"), OneOf("CREATEDB", "NOCREATEDB"), OneOf("CREATEROLE", "NOCREATEROLE"), OneOf("INHERIT", "NOINHERIT"), OneOf("LOGIN", "NOLOGIN"), OneOf("REPLICATION", "NOREPLICATION"), OneOf("BYPASSRLS", "NOBYPASSRLS"), Sequence("CONNECTION", "LIMIT", Ref("NumericLiteralSegment")), Sequence( Ref.keyword("ENCRYPTED", optional=True), "PASSWORD", OneOf(Ref("QuotedLiteralSegment"), "NULL"), ), Sequence("VALID", "UNTIL", Ref("QuotedLiteralSegment")), Sequence("IN", "ROLE", Ref("RoleReferenceSegment")), Sequence("IN", "GROUP", Ref("RoleReferenceSegment")), Sequence("ROLE", Ref("RoleReferenceSegment")), Sequence("ADMIN", Ref("RoleReferenceSegment")), Sequence("USER", Ref("RoleReferenceSegment")), Sequence("SYSID", Ref("NumericLiteralSegment")), ), optional=True, ), ) class AlterRoleStatementSegment(BaseSegment): """An `ALTER ROLE` statement. As per: https://www.postgresql.org/docs/current/sql-alterrole.html """ type = "alter_role_statement" match_grammar = Sequence( "ALTER", OneOf("ROLE", "USER"), OneOf( # role_specification Sequence( OneOf( "CURRENT_ROLE", "CURRENT_USER", "SESSION_USER", Ref("RoleReferenceSegment"), ), Ref.keyword("WITH", optional=True), AnySetOf( OneOf("SUPERUSER", "NOSUPERUSER"), OneOf("CREATEDB", "NOCREATEDB"), OneOf("CREATEROLE", "NOCREATEROLE"), OneOf("INHERIT", "NOINHERIT"), OneOf("LOGIN", "NOLOGIN"), OneOf("REPLICATION", "NOREPLICATION"), OneOf("BYPASSRLS", "NOBYPASSRLS"), Sequence("CONNECTION", "LIMIT", Ref("NumericLiteralSegment")), Sequence( Ref.keyword("ENCRYPTED", optional=True), "PASSWORD", OneOf(Ref("QuotedLiteralSegment"), "NULL"), ), Sequence("VALID", "UNTIL", Ref("QuotedLiteralSegment")), ), ), # name only Sequence( Ref("RoleReferenceSegment"), Sequence("RENAME", "TO", Ref("RoleReferenceSegment")), ), # role_specification | all Sequence( OneOf( "CURRENT_ROLE", "CURRENT_USER", "SESSION_USER", "ALL", Ref("RoleReferenceSegment"), ), Sequence( "IN", "DATABASE", Ref("DatabaseReferenceSegment"), optional=True, ), OneOf( Sequence( "SET", Ref("ParameterNameSegment"), OneOf( Sequence( OneOf("TO", Ref("EqualsSegment")), OneOf( "DEFAULT", Delimited( Ref("LiteralGrammar"), Ref("NakedIdentifierSegment"), # https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L1810-L1815 Ref("OnKeywordAsIdentifierSegment"), ), ), ), Sequence( "FROM", "CURRENT", ), ), ), Sequence("RESET", OneOf(Ref("ParameterNameSegment"), "ALL")), ), ), ), ) class ExplainStatementSegment(ansi.ExplainStatementSegment): """An `Explain` statement. EXPLAIN [ ( option [, ...] ) ] statement EXPLAIN [ ANALYZE ] [ VERBOSE ] statement https://www.postgresql.org/docs/14/sql-explain.html """ match_grammar = Sequence( "EXPLAIN", OneOf( Sequence( OneOf( "ANALYZE", "ANALYSE", optional=True, ), Ref.keyword("VERBOSE", optional=True), ), Bracketed(Delimited(Ref("ExplainOptionSegment"))), optional=True, ), ansi.ExplainStatementSegment.explainable_stmt, ) class ExplainOptionSegment(BaseSegment): """An `Explain` statement option. ANALYZE [ boolean ] VERBOSE [ boolean ] COSTS [ boolean ] SETTINGS [ boolean ] BUFFERS [ boolean ] WAL [ boolean ] TIMING [ boolean ] SUMMARY [ boolean ] FORMAT { TEXT | XML | JSON | YAML } https://www.postgresql.org/docs/14/sql-explain.html """ type = "explain_option" match_grammar = OneOf( Sequence( OneOf( "ANALYZE", "ANALYSE", "VERBOSE", "COSTS", "SETTINGS", "BUFFERS", "WAL", "TIMING", "SUMMARY", ), Ref("BooleanLiteralGrammar", optional=True), ), Sequence( "FORMAT", OneOf("TEXT", "XML", "JSON", "YAML"), ), ) class SecurityLabelStatementSegment(BaseSegment): """A `SECURITY LABEL` statement. https://www.postgresql.org/docs/current/sql-security-label.html """ type = "security_label_statement" match_grammar = Sequence( "SECURITY", "LABEL", # Optional FOR provider clause Sequence( "FOR", Ref("ObjectReferenceSegment"), optional=True, ), "ON", OneOf( Sequence( "TABLE", Ref("TableReferenceSegment"), ), Sequence( "COLUMN", Ref("ColumnReferenceSegment"), ), Sequence( "AGGREGATE", Ref("FunctionNameSegment"), Bracketed( Ref("FunctionParameterListGrammar", optional=True), ), ), Sequence( "DATABASE", Ref("DatabaseReferenceSegment"), ), Sequence( "DOMAIN", Ref("ObjectReferenceSegment"), ), Sequence( "EVENT", "TRIGGER", Ref("ObjectReferenceSegment"), ), Sequence( "FOREIGN", "TABLE", Ref("TableReferenceSegment"), ), Sequence( "FUNCTION", Ref("FunctionNameSegment"), OptionallyBracketed( Ref("FunctionParameterGrammar", optional=True), ), ), Sequence( "LARGE", "OBJECT", Ref("NumericLiteralSegment"), ), Sequence( "MATERIALIZED", "VIEW", Ref("TableReferenceSegment"), ), Sequence( Ref.keyword("PROCEDURAL", optional=True), "LANGUAGE", Ref("ObjectReferenceSegment"), ), Sequence( "PROCEDURE", Ref("FunctionNameSegment"), OptionallyBracketed( Ref("FunctionParameterGrammar", optional=True), ), ), Sequence( "PUBLICATION", Ref("ObjectReferenceSegment"), ), Sequence( "ROLE", Ref("RoleReferenceSegment"), ), Sequence( "ROUTINE", Ref("FunctionNameSegment"), OptionallyBracketed( Ref("FunctionParameterGrammar", optional=True), ), ), Sequence( "SCHEMA", Ref("SchemaReferenceSegment"), ), Sequence( "SEQUENCE", Ref("ObjectReferenceSegment"), ), Sequence( "SUBSCRIPTION", Ref("ObjectReferenceSegment"), ), Sequence( "TABLESPACE", Ref("ObjectReferenceSegment"), ), Sequence( "TYPE", Ref("ObjectReferenceSegment"), ), Sequence( "VIEW", Ref("TableReferenceSegment"), ), ), "IS", OneOf( Ref("QuotedLiteralSegment"), "NULL", ), ) class CreateSchemaStatementSegment(ansi.CreateSchemaStatementSegment): """A `CREATE SCHEMA` statement. https://www.postgresql.org/docs/15/sql-createschema.html https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L1493 """ match_grammar: Matchable = Sequence( "CREATE", "SCHEMA", Ref("IfNotExistsGrammar", optional=True), OneOf( Sequence( # schema name defaults to role if not provided Ref("SchemaReferenceSegment", optional=True), "AUTHORIZATION", Ref("RoleReferenceSegment"), ), Ref("SchemaReferenceSegment"), ), ) class CreateTableStatementSegment(ansi.CreateTableStatementSegment): """A `CREATE TABLE` statement. As specified in https://www.postgresql.org/docs/13/sql-createtable.html """ match_grammar = Sequence( "CREATE", OneOf( Sequence( OneOf("GLOBAL", "LOCAL", optional=True), Ref("TemporaryGrammar", optional=True), ), "UNLOGGED", optional=True, ), "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), OneOf( # Columns and comment syntax: Sequence( Bracketed( Delimited( OneOf( Sequence( Ref("ColumnReferenceSegment"), Ref("DatatypeSegment"), AnyNumberOf( # A single COLLATE segment can come before or # after constraint segments OneOf( Ref("ColumnConstraintSegment"), Sequence( "COLLATE", Ref("CollationReferenceSegment"), ), ), ), ), Ref("TableConstraintSegment"), Sequence( "LIKE", Ref("TableReferenceSegment"), AnyNumberOf(Ref("LikeOptionSegment"), optional=True), ), ), optional=True, ) ), Sequence( "INHERITS", Bracketed(Delimited(Ref("TableReferenceSegment"))), optional=True, ), ), # Create OF syntax: Sequence( "OF", Ref("ParameterNameSegment"), Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), Sequence("WITH", "OPTIONS", optional=True), AnyNumberOf(Ref("ColumnConstraintSegment")), ), Ref("TableConstraintSegment"), ), optional=True, ), ), # Create PARTITION OF syntax Sequence( "PARTITION", "OF", Ref("TableReferenceSegment"), Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), Sequence("WITH", "OPTIONS", optional=True), AnyNumberOf(Ref("ColumnConstraintSegment")), ), Ref("TableConstraintSegment"), ), optional=True, ), OneOf( Sequence("FOR", "VALUES", Ref("PartitionBoundSpecSegment")), "DEFAULT", ), ), ), AnyNumberOf( Sequence( "PARTITION", "BY", OneOf("RANGE", "LIST", "HASH"), Bracketed( AnyNumberOf( Delimited( Sequence( OneOf( Ref("ColumnReferenceSegment"), Ref("FunctionSegment"), ), AnyNumberOf( Sequence( "COLLATE", Ref("CollationReferenceSegment"), optional=True, ), Ref("ParameterNameSegment", optional=True), ), ), ) ) ), ), Sequence("USING", Ref("ParameterNameSegment")), OneOf( Sequence("WITH", Ref("RelationOptionsSegment")), Sequence("WITHOUT", "OIDS"), ), Sequence( "ON", "COMMIT", OneOf(Sequence("PRESERVE", "ROWS"), Sequence("DELETE", "ROWS"), "DROP"), ), Sequence("TABLESPACE", Ref("TablespaceReferenceSegment")), ), ) class CreateTableAsStatementSegment(BaseSegment): """A `CREATE TABLE AS` statement. As specified in https://www.postgresql.org/docs/13/sql-createtableas.html """ type = "create_table_as_statement" match_grammar = Sequence( "CREATE", OneOf( Sequence( OneOf("GLOBAL", "LOCAL", optional=True), Ref("TemporaryGrammar"), ), "UNLOGGED", optional=True, ), "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), AnyNumberOf( Bracketed( Delimited(Ref("ColumnReferenceSegment")), optional=True, ), Sequence("USING", Ref("ParameterNameSegment"), optional=True), OneOf( Sequence( "WITH", Bracketed( Delimited( Sequence( Ref("ParameterNameSegment"), Sequence( Ref("EqualsSegment"), OneOf( Ref("LiteralGrammar"), Ref("NakedIdentifierSegment"), ), optional=True, ), ) ) ), ), Sequence("WITHOUT", "OIDS"), optional=True, ), Sequence( "ON", "COMMIT", OneOf(Sequence("PRESERVE", "ROWS"), Sequence("DELETE", "ROWS"), "DROP"), optional=True, ), Sequence("TABLESPACE", Ref("TablespaceReferenceSegment"), optional=True), ), "AS", OneOf( OptionallyBracketed(Ref("SelectableGrammar")), OptionallyBracketed(Sequence("TABLE", Ref("TableReferenceSegment"))), Ref("ValuesClauseSegment"), OptionallyBracketed(Sequence("EXECUTE", Ref("FunctionSegment"))), ), Ref("WithDataClauseSegment", optional=True), ) class AlterTableStatementSegment(ansi.AlterTableStatementSegment): """An `ALTER TABLE` statement. Matches the definition in https://www.postgresql.org/docs/13/sql-altertable.html """ match_grammar = Sequence( "ALTER", "TABLE", OneOf( Sequence( Ref("IfExistsGrammar", optional=True), Ref.keyword("ONLY", optional=True), Ref("TableReferenceSegment"), Ref("StarSegment", optional=True), OneOf( Delimited(Ref("AlterTableActionSegment")), Sequence( "RENAME", Ref.keyword("COLUMN", optional=True), Ref("ColumnReferenceSegment"), "TO", Ref("ColumnReferenceSegment"), ), Sequence( "RENAME", "CONSTRAINT", Ref("ParameterNameSegment"), "TO", Ref("ParameterNameSegment"), ), ), ), Sequence( Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), OneOf( Sequence("RENAME", "TO", Ref("TableReferenceSegment")), Sequence("SET", "SCHEMA", Ref("SchemaReferenceSegment")), Sequence( "ATTACH", "PARTITION", Ref("ParameterNameSegment"), OneOf( Sequence("FOR", "VALUES", Ref("PartitionBoundSpecSegment")), "DEFAULT", ), ), Sequence( "DETACH", "PARTITION", Ref("ParameterNameSegment"), Ref.keyword("CONCURRENTLY", optional=True), Ref.keyword("FINALIZE", optional=True), ), ), ), Sequence( "ALL", "IN", "TABLESPACE", Ref("TablespaceReferenceSegment"), Sequence( "OWNED", "BY", Delimited(Ref("ObjectReferenceSegment")), optional=True, ), "SET", "TABLESPACE", Ref("TablespaceReferenceSegment"), Ref.keyword("NOWAIT", optional=True), ), ), ) class AlterTableActionSegment(BaseSegment): """Alter Table Action Segment. https://www.postgresql.org/docs/13/sql-altertable.html """ type = "alter_table_action_segment" match_grammar = OneOf( Sequence( "ADD", Ref.keyword("COLUMN", optional=True), Ref("IfNotExistsGrammar", optional=True), Ref("ColumnReferenceSegment"), Ref("DatatypeSegment"), Sequence("COLLATE", Ref("CollationReferenceSegment"), optional=True), AnyNumberOf(Ref("ColumnConstraintSegment")), ), Sequence( "DROP", Ref.keyword("COLUMN", optional=True), Ref("IfExistsGrammar", optional=True), Ref("ColumnReferenceSegment"), Ref("DropBehaviorGrammar", optional=True), ), Sequence( "ALTER", Ref.keyword("COLUMN", optional=True), Ref("ColumnReferenceSegment"), OneOf( Sequence( Sequence("SET", "DATA", optional=True), "TYPE", Ref("DatatypeSegment"), Sequence( "COLLATE", Ref("CollationReferenceSegment"), optional=True ), Sequence("USING", OneOf(Ref("ExpressionSegment")), optional=True), ), Sequence( "SET", "DEFAULT", OneOf( OneOf( Ref("LiteralGrammar"), Ref("FunctionSegment"), Ref("BareFunctionSegment"), Ref("ExpressionSegment"), ) ), ), Sequence("DROP", "DEFAULT"), Sequence(OneOf("SET", "DROP", optional=True), "NOT", "NULL"), Sequence("DROP", "EXPRESSION", Ref("IfExistsGrammar", optional=True)), Sequence( "ADD", "GENERATED", OneOf("ALWAYS", Sequence("BY", "DEFAULT")), "AS", "IDENTITY", Bracketed( AnyNumberOf(Ref("AlterSequenceOptionsSegment")), optional=True ), ), Sequence( OneOf( Sequence( "SET", "GENERATED", OneOf("ALWAYS", Sequence("BY", "DEFAULT")), ), Sequence("SET", Ref("AlterSequenceOptionsSegment")), Sequence( "RESTART", Sequence("WITH", Ref("NumericLiteralSegment")) ), ) ), Sequence( "DROP", "IDENTITY", Ref("IfExistsGrammar", optional=True), ), Sequence("SET", "STATISTICS", Ref("NumericLiteralSegment")), Sequence("SET", Ref("RelationOptionsSegment")), # Documentation says you can only provide keys in RESET options, but the # actual grammar lets you pass in values too. Sequence("RESET", Ref("RelationOptionsSegment")), Sequence( "SET", "STORAGE", OneOf("PLAIN", "EXTERNAL", "EXTENDED", "MAIN") ), ), ), Sequence("ADD", Ref("TableConstraintSegment")), Sequence("ADD", Ref("TableConstraintUsingIndexSegment")), Sequence( "ALTER", "CONSTRAINT", Ref("ParameterNameSegment"), OneOf("DEFERRABLE", Sequence("NOT", "DEFERRABLE"), optional=True), OneOf( Sequence("INITIALLY", "DEFERRED"), Sequence("INITIALLY", "IMMEDIATE"), optional=True, ), ), Sequence("VALIDATE", "CONSTRAINT", Ref("ParameterNameSegment")), Sequence( "DROP", "CONSTRAINT", Ref("IfExistsGrammar", optional=True), Ref("ParameterNameSegment"), Ref("DropBehaviorGrammar", optional=True), ), Sequence( OneOf("ENABLE", "DISABLE"), "TRIGGER", OneOf(Ref("ParameterNameSegment"), "ALL", "USER"), ), Sequence( "ENABLE", OneOf("REPLICA", "ALWAYS"), "TRIGGER", Ref("ParameterNameSegment") ), Sequence( OneOf( "ENABLE", "DISABLE", Sequence("ENABLE", "REPLICA"), Sequence("ENABLE", "RULE"), ), "RULE", Ref("ParameterNameSegment"), ), Sequence( OneOf("DISABLE", "ENABLE", "FORCE", Sequence("NO", "FORCE")), "ROW", "LEVEL", "SECURITY", ), Sequence("CLUSTER", "ON", Ref("ParameterNameSegment")), Sequence("SET", "WITHOUT", OneOf("CLUSTER", "OIDS")), Sequence("SET", "TABLESPACE", Ref("TablespaceReferenceSegment")), Sequence("SET", OneOf("LOGGED", "UNLOGGED")), Sequence("SET", Ref("RelationOptionsSegment")), # Documentation says you can only provide keys in RESET options, but the # actual grammar lets you pass in values too. Sequence("RESET", Ref("RelationOptionsSegment")), Sequence( Ref.keyword("NO", optional=True), "INHERIT", Ref("TableReferenceSegment") ), Sequence("OF", Ref("ParameterNameSegment")), Sequence("NOT", "OF"), Sequence( "OWNER", "TO", OneOf( Ref("ParameterNameSegment"), "CURRENT_ROLE", "CURRENT_USER", "SESSION_USER", ), ), Sequence( "REPLICA", "IDENTITY", OneOf( "DEFAULT", Sequence("USING", "INDEX", Ref("IndexReferenceSegment")), "FULL", "NOTHING", ), ), ) class VersionIdentifierSegment(BaseSegment): """A reference to an version.""" type = "version_identifier" # match grammar (don't allow whitespace) match_grammar: Matchable = OneOf( Ref("QuotedLiteralSegment"), Ref("NakedIdentifierSegment"), ) class CreateExtensionStatementSegment(BaseSegment): """A `CREATE EXTENSION` statement. https://www.postgresql.org/docs/9.1/sql-createextension.html """ type = "create_extension_statement" match_grammar: Matchable = Sequence( "CREATE", "EXTENSION", Ref("IfNotExistsGrammar", optional=True), Ref("ExtensionReferenceSegment"), Ref.keyword("WITH", optional=True), Sequence("SCHEMA", Ref("SchemaReferenceSegment"), optional=True), Sequence("VERSION", Ref("VersionIdentifierSegment"), optional=True), Sequence("FROM", Ref("VersionIdentifierSegment"), optional=True), Ref.keyword("CASCADE", optional=True), ) class DropExtensionStatementSegment(BaseSegment): """A `DROP EXTENSION` statement. https://www.postgresql.org/docs/14/sql-dropextension.html """ type = "drop_extension_statement" match_grammar: Matchable = Sequence( "DROP", "EXTENSION", Ref("IfExistsGrammar", optional=True), Ref("ExtensionReferenceSegment"), Ref("DropBehaviorGrammar", optional=True), ) class AlterExtensionStatementSegment(BaseSegment): """An `ALTER EXTENSION` statement. https://www.postgresql.org/docs/16/sql-alterextension.html """ type = "alter_extension_statement" match_grammar: Matchable = Sequence( "ALTER", "EXTENSION", Ref("ExtensionReferenceSegment"), OneOf( Sequence( "UPDATE", Sequence( "TO", Ref("LiteralGrammar"), optional=True, ), ), Sequence( "SET", "SCHEMA", OneOf(Ref("SchemaReferenceSegment"), "CURRENT_SCHEMA"), ), Sequence( OneOf( "ADD", "DROP", ), OneOf( Sequence( OneOf( Sequence("ACCESS", "METHOD"), "COLLATION", "CONVERSION", "DOMAIN", Sequence("EVENT", "TRIGGER"), Sequence("FOREIGN", "DATA", "WRAPPER"), Sequence("FOREIGN", "TABLE"), Sequence( Ref.keyword("PROCEDURAL", optional=True), "LANGUAGE", ), "SCHEMA", "SEQUENCE", "SERVER", Sequence( "TEXT", "SEARCH", OneOf( "CONFIGURATION", "DICTIONARY", "PARSER", "TEMPLATE", ), ), "TYPE", ), Ref("ObjectReferenceSegment"), ), Sequence( OneOf( Sequence("MATERIALIZED", "VIEW"), "TABLE", "VIEW", ), Ref("TableReferenceSegment"), ), Sequence( "AGGREGATE", Ref("ObjectReferenceSegment"), Bracketed( Sequence( # TODO: Is this too permissive? Anything(), optional=True, ), optional=True, ), ), Sequence( "CAST", Bracketed( Sequence( Ref("ObjectReferenceSegment"), "AS", Ref("ObjectReferenceSegment"), ), ), ), Sequence( OneOf( "FUNCTION", "PROCEDURE", "ROUTINE", ), Delimited( Sequence( Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar", optional=True), ), ), ), Sequence( "OPERATOR", OneOf( Sequence( Ref("ObjectReferenceSegment"), Bracketed( Delimited( Ref("DatatypeSegment"), Ref("CommaSegment"), Ref("DatatypeSegment"), ), ), ), Sequence( OneOf("CLASS", "FAMILY"), Ref("ObjectReferenceSegment"), "USING", Ref("IndexAccessMethodSegment"), ), ), ), Sequence("TRANSFORM", "FOR", "TYPE", Ref("ParameterNameSegment")), ), ), ), ) class CreateForeignDataWrapperStatementSegment(BaseSegment): """A CREATE FOREIGN DATA WRAPPER Statement. Docs: https://fdw.dev/catalog/ """ type = "create_foreign_data_wrapper" match_grammar: Matchable = Sequence( "CREATE", Ref("ForeignDataWrapperGrammar"), Ref("SingleIdentifierGrammar"), Indent, "HANDLER", Ref("SingleIdentifierGrammar"), Dedent, Indent, "VALIDATOR", Ref("SingleIdentifierGrammar"), Dedent, ) class SubscriptionReferenceSegment(ansi.ObjectReferenceSegment): """A subscription reference.""" type = "subscription_reference" class PublicationReferenceSegment(ansi.ObjectReferenceSegment): """A reference to a publication.""" type = "publication_reference" match_grammar: Matchable = Ref("SingleIdentifierGrammar") class PublicationTableSegment(BaseSegment): """Specification for a single table object in a publication.""" type = "publication_table" match_grammar: Matchable = Sequence( Ref("ExtendedTableReferenceGrammar"), Ref("BracketedColumnReferenceListGrammar", optional=True), Sequence("WHERE", Bracketed(Ref("ExpressionSegment")), optional=True), ) class PublicationObjectsSegment(BaseSegment): """Specification for one or more objects in a publication. Unlike the underlying PG grammar which has one object per PublicationObjSpec and so requires one to track the previous object type if it's a "continuation object type", this grammar groups together the continuation objects, e.g. "TABLE a, b, TABLE c, d" results in two segments: one containing references "a, b", and the other containing "c, d". https://www.postgresql.org/docs/15/sql-createpublication.html https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L10435-L10530 """ type = "publication_objects" match_grammar: Matchable = OneOf( Sequence( "TABLE", Delimited( Ref("PublicationTableSegment"), terminators=[Sequence(Ref("CommaSegment"), OneOf("TABLE", "TABLES"))], ), ), Sequence( "TABLES", "IN", "SCHEMA", Delimited( OneOf(Ref("SchemaReferenceSegment"), "CURRENT_SCHEMA"), terminators=[Sequence(Ref("CommaSegment"), OneOf("TABLE", "TABLES"))], ), ), ) class CreatePublicationStatementSegment(BaseSegment): """A `CREATE PUBLICATION` statement. https://www.postgresql.org/docs/15/sql-createpublication.html https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L10390-L10530 """ type = "create_publication_statement" match_grammar: Matchable = Sequence( "CREATE", "PUBLICATION", Ref("PublicationReferenceSegment"), OneOf( Sequence("FOR", "ALL", "TABLES"), Sequence("FOR", Delimited(Ref("PublicationObjectsSegment"))), optional=True, ), Sequence( "WITH", Ref("DefinitionParametersSegment"), optional=True, ), ) class AlterPublicationStatementSegment(BaseSegment): """A `ALTER PUBLICATION` statement. https://www.postgresql.org/docs/15/sql-alterpublication.html https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L10549 """ type = "alter_publication_statement" match_grammar: Matchable = Sequence( "ALTER", "PUBLICATION", Ref("PublicationReferenceSegment"), OneOf( Sequence("SET", Ref("DefinitionParametersSegment")), Sequence("ADD", Delimited(Ref("PublicationObjectsSegment"))), Sequence("SET", Delimited(Ref("PublicationObjectsSegment"))), Sequence("DROP", Delimited(Ref("PublicationObjectsSegment"))), Sequence("RENAME", "TO", Ref("PublicationReferenceSegment")), Sequence( "OWNER", "TO", OneOf( "CURRENT_ROLE", "CURRENT_USER", "SESSION_USER", # must come last; CURRENT_USER isn't reserved: Ref("RoleReferenceSegment"), ), ), ), ) class DropPublicationStatementSegment(BaseSegment): """A `DROP PUBLICATION` statement. https://www.postgresql.org/docs/15/sql-droppublication.html """ type = "drop_publication_statement" match_grammar: Matchable = Sequence( "DROP", "PUBLICATION", Ref("IfExistsGrammar", optional=True), Delimited(Ref("PublicationReferenceSegment")), Ref("DropBehaviorGrammar", optional=True), ) class CreateMaterializedViewStatementSegment(BaseSegment): """A `CREATE MATERIALIZED VIEW` statement. As specified in https://www.postgresql.org/docs/14/sql-creatematerializedview.html """ type = "create_materialized_view_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "MATERIALIZED", "VIEW", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), Ref("BracketedColumnReferenceListGrammar", optional=True), Sequence("USING", Ref("ParameterNameSegment"), optional=True), Sequence("WITH", Ref("RelationOptionsSegment"), optional=True), Sequence("TABLESPACE", Ref("TablespaceReferenceSegment"), optional=True), "AS", OneOf( OptionallyBracketed(Ref("SelectableGrammar")), OptionallyBracketed(Sequence("TABLE", Ref("TableReferenceSegment"))), Ref("ValuesClauseSegment"), OptionallyBracketed(Sequence("EXECUTE", Ref("FunctionSegment"))), ), Ref("WithDataClauseSegment", optional=True), ) class AlterMaterializedViewStatementSegment(BaseSegment): """A `ALTER MATERIALIZED VIEW` statement. As specified in https://www.postgresql.org/docs/14/sql-altermaterializedview.html """ type = "alter_materialized_view_statement" match_grammar = Sequence( "ALTER", "MATERIALIZED", "VIEW", OneOf( Sequence( Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), OneOf( Delimited(Ref("AlterMaterializedViewActionSegment")), Sequence( "RENAME", Sequence("COLUMN", optional=True), Ref("ColumnReferenceSegment"), "TO", Ref("ColumnReferenceSegment"), ), Sequence("RENAME", "TO", Ref("TableReferenceSegment")), Sequence("SET", "SCHEMA", Ref("SchemaReferenceSegment")), ), ), Sequence( Ref("TableReferenceSegment"), Ref.keyword("NO", optional=True), "DEPENDS", "ON", "EXTENSION", Ref("ExtensionReferenceSegment"), ), Sequence( "ALL", "IN", "TABLESPACE", Ref("TablespaceReferenceSegment"), Sequence( "OWNED", "BY", Delimited(Ref("ObjectReferenceSegment")), optional=True, ), "SET", "TABLESPACE", Ref("TablespaceReferenceSegment"), Sequence("NOWAIT", optional=True), ), ), ) class AlterMaterializedViewActionSegment(BaseSegment): """Alter Materialized View Action Segment. https://www.postgresql.org/docs/14/sql-altermaterializedview.html """ type = "alter_materialized_view_action_segment" match_grammar = OneOf( Sequence( "ALTER", Ref.keyword("COLUMN", optional=True), Ref("ColumnReferenceSegment"), OneOf( Sequence("SET", "STATISTICS", Ref("NumericLiteralSegment")), Sequence( "SET", Bracketed( Delimited( Sequence( Ref("ParameterNameSegment"), Ref("EqualsSegment"), Ref("LiteralGrammar"), ), ) ), ), Sequence( "RESET", Bracketed(Delimited(Ref("ParameterNameSegment"))), ), Sequence( "SET", "STORAGE", OneOf("PLAIN", "EXTERNAL", "EXTENDED", "MAIN") ), Sequence("SET", "COMPRESSION", Ref("ParameterNameSegment")), ), ), Sequence("CLUSTER", "ON", Ref("ParameterNameSegment")), Sequence("SET", "WITHOUT", "CLUSTER"), Sequence( "SET", Bracketed( Delimited( Sequence( Ref("ParameterNameSegment"), Sequence( Ref("EqualsSegment"), Ref("LiteralGrammar"), optional=True ), ) ) ), ), Sequence( "RESET", Bracketed(Delimited(Ref("ParameterNameSegment"))), ), Sequence( "OWNER", "TO", OneOf( Ref("ObjectReferenceSegment"), "CURRENT_ROLE", "CURRENT_USER", "SESSION_USER", ), ), ) class RefreshMaterializedViewStatementSegment(BaseSegment): """A `REFRESH MATERIALIZED VIEW` statement. As specified in https://www.postgresql.org/docs/14/sql-refreshmaterializedview.html """ type = "refresh_materialized_view_statement" match_grammar = Sequence( "REFRESH", "MATERIALIZED", "VIEW", Ref.keyword("CONCURRENTLY", optional=True), Ref("TableReferenceSegment"), Ref("WithDataClauseSegment", optional=True), ) class DropMaterializedViewStatementSegment(BaseSegment): """A `DROP MATERIALIZED VIEW` statement. As specified in https://www.postgresql.org/docs/14/sql-dropmaterializedview.html """ type = "drop_materialized_view_statement" match_grammar = Sequence( "DROP", "MATERIALIZED", "VIEW", Ref("IfExistsGrammar", optional=True), Delimited(Ref("TableReferenceSegment")), Ref("DropBehaviorGrammar", optional=True), ) class WithCheckOptionSegment(BaseSegment): """WITH [ CASCADED | LOCAL ] CHECK OPTION for Postgres' CREATE VIEWS. https://www.postgresql.org/docs/14/sql-createview.html """ type = "with_check_option" match_grammar: Matchable = Sequence( "WITH", OneOf("CASCADED", "LOCAL"), "CHECK", "OPTION" ) class AlterPolicyStatementSegment(BaseSegment): """An ALTER POLICY statement. As specified in https://www.postgresql.org/docs/current/sql-alterpolicy.html """ type = "alter_policy_statement" match_grammar = Sequence( "ALTER", "POLICY", Ref("ObjectReferenceSegment"), "ON", Ref("TableReferenceSegment"), OneOf( Sequence("RENAME", "TO", Ref("ObjectReferenceSegment")), AnySetOf( Sequence( "TO", Delimited( OneOf( Ref("RoleReferenceSegment"), "PUBLIC", "CURRENT_ROLE", "CURRENT_USER", "SESSION_USER", ) ), ), Sequence("USING", Bracketed(Ref("ExpressionSegment"))), Sequence( "WITH", "CHECK", Bracketed(Ref("ExpressionSegment")), ), min_times=1, ), ), ) class CreateViewStatementSegment(BaseSegment): """An `Create VIEW` statement. As specified in https://www.postgresql.org/docs/14/sql-createview.html """ type = "create_view_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Ref("TemporaryGrammar", optional=True), Ref.keyword("RECURSIVE", optional=True), "VIEW", Ref("TableReferenceSegment"), Ref("BracketedColumnReferenceListGrammar", optional=True), Sequence("WITH", Ref("RelationOptionsSegment"), optional=True), "AS", OneOf( OptionallyBracketed(Ref("SelectableGrammar")), Ref("ValuesClauseSegment"), ), Ref("WithCheckOptionSegment", optional=True), ) class AlterViewStatementSegment(BaseSegment): """An `ALTER VIEW` statement. As specified in https://www.postgresql.org/docs/14/sql-alterview.html """ type = "alter_view_statement" match_grammar = Sequence( "ALTER", "VIEW", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), OneOf( Sequence( "ALTER", Ref.keyword("COLUMN", optional=True), Ref("ColumnReferenceSegment"), OneOf( Sequence( "SET", "DEFAULT", OneOf( Ref("LiteralGrammar"), Ref("FunctionSegment"), Ref("BareFunctionSegment"), Ref("ExpressionSegment"), ), ), Sequence("DROP", "DEFAULT"), ), ), Sequence( "OWNER", "TO", OneOf( Ref("ObjectReferenceSegment"), "CURRENT_ROLE", "CURRENT_USER", "SESSION_USER", ), ), Sequence( "RENAME", Ref.keyword("COLUMN", optional=True), Ref("ColumnReferenceSegment"), "TO", Ref("ColumnReferenceSegment"), ), Sequence("RENAME", "TO", Ref("TableReferenceSegment")), Sequence("SET", "SCHEMA", Ref("SchemaReferenceSegment")), Sequence( "SET", Bracketed( Delimited( Sequence( Ref("ParameterNameSegment"), Sequence( Ref("EqualsSegment"), Ref("LiteralGrammar"), optional=True, ), ) ) ), ), Sequence( "RESET", Bracketed(Delimited(Ref("ParameterNameSegment"))), ), ), ) class DropViewStatementSegment(ansi.DropViewStatementSegment): """A `DROP VIEW` statement. https://www.postgresql.org/docs/15/sql-dropview.html https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L6698-L6719 """ match_grammar: Matchable = Sequence( "DROP", "VIEW", Ref("IfExistsGrammar", optional=True), Delimited(Ref("TableReferenceSegment")), Ref("DropBehaviorGrammar", optional=True), ) class CreateDatabaseStatementSegment(ansi.CreateDatabaseStatementSegment): """A `CREATE DATABASE` statement. As specified in https://www.postgresql.org/docs/14/sql-createdatabase.html """ match_grammar = Sequence( "CREATE", "DATABASE", Ref("DatabaseReferenceSegment"), Ref.keyword("WITH", optional=True), AnyNumberOf( Sequence( "OWNER", Ref("EqualsSegment", optional=True), Ref("ObjectReferenceSegment"), ), Sequence( "TEMPLATE", Ref("EqualsSegment", optional=True), Ref("ObjectReferenceSegment"), ), Sequence( "ENCODING", Ref("EqualsSegment", optional=True), OneOf(Ref("QuotedLiteralSegment"), "DEFAULT"), ), OneOf( # LOCALE This is a shortcut for setting LC_COLLATE and LC_CTYPE at once. # If you specify this, you cannot specify either of those parameters. Sequence( "LOCALE", Ref("EqualsSegment", optional=True), Ref("QuotedLiteralSegment"), ), AnyNumberOf( Sequence( "LC_COLLATE", Ref("EqualsSegment", optional=True), Ref("QuotedLiteralSegment"), ), Sequence( "LC_CTYPE", Ref("EqualsSegment", optional=True), Ref("QuotedLiteralSegment"), ), ), ), Sequence( "TABLESPACE", Ref("EqualsSegment", optional=True), OneOf(Ref("TablespaceReferenceSegment"), "DEFAULT"), ), Sequence( "ALLOW_CONNECTIONS", Ref("EqualsSegment", optional=True), Ref("BooleanLiteralGrammar"), ), Sequence( "CONNECTION", "LIMIT", Ref("EqualsSegment", optional=True), Ref("NumericLiteralSegment"), ), Sequence( "IS_TEMPLATE", Ref("EqualsSegment", optional=True), Ref("BooleanLiteralGrammar"), ), ), ) class AlterDatabaseStatementSegment(BaseSegment): """A `ALTER DATABASE` statement. As specified in https://www.postgresql.org/docs/14/sql-alterdatabase.html """ type = "alter_database_statement" match_grammar = Sequence( "ALTER", "DATABASE", Ref("DatabaseReferenceSegment"), OneOf( Sequence( Ref.keyword("WITH", optional=True), AnyNumberOf( Sequence("ALLOW_CONNECTIONS", Ref("BooleanLiteralGrammar")), Sequence( "CONNECTION", "LIMIT", Ref("NumericLiteralSegment"), ), Sequence("IS_TEMPLATE", Ref("BooleanLiteralGrammar")), min_times=1, ), ), Sequence("RENAME", "TO", Ref("DatabaseReferenceSegment")), Sequence( "OWNER", "TO", OneOf( Ref("ObjectReferenceSegment"), "CURRENT_ROLE", "CURRENT_USER", "SESSION_USER", ), ), Sequence("SET", "TABLESPACE", Ref("TablespaceReferenceSegment")), Sequence( "SET", Ref("ParameterNameSegment"), OneOf( Sequence( OneOf("TO", Ref("EqualsSegment")), OneOf( "DEFAULT", Ref("LiteralGrammar"), Ref("NakedIdentifierSegment"), Ref("QuotedIdentifierSegment"), ), ), Sequence("FROM", "CURRENT"), ), ), Sequence("RESET", OneOf("ALL", Ref("ParameterNameSegment"))), optional=True, ), ) class DropDatabaseStatementSegment(ansi.DropDatabaseStatementSegment): """A `DROP DATABASE` statement. As specified in https://www.postgresql.org/docs/14/sql-dropdatabase.html """ match_grammar = Sequence( "DROP", "DATABASE", Ref("IfExistsGrammar", optional=True), Ref("DatabaseReferenceSegment"), Sequence( Ref.keyword("WITH", optional=True), Bracketed("FORCE"), optional=True, ), ) class CreateSubscriptionStatementSegment(BaseSegment): """A `CREATE SUBSCRIPTION` statement. https://www.postgresql.org/docs/current/sql-createsubscription.html """ type = "create_subscription" match_grammar = Sequence( "CREATE", "SUBSCRIPTION", Ref("SubscriptionReferenceSegment"), "CONNECTION", Ref("QuotedLiteralSegment"), "PUBLICATION", Delimited(Ref("PublicationReferenceSegment")), Sequence( "WITH", Ref("DefinitionParametersSegment"), optional=True, ), ) class AlterSubscriptionStatementSegment(BaseSegment): """An `ALTER SUBSCRIPTION` statement. https://www.postgresql.org/docs/current/sql-altersubscription.html """ type = "alter_subscription" match_grammar = Sequence( "ALTER", "SUBSCRIPTION", Ref("SubscriptionReferenceSegment"), OneOf( Sequence("CONNECTION", Ref("QuotedLiteralSegment")), Sequence( OneOf( "SET", "ADD", "DROP", ), "PUBLICATION", Delimited(Ref("PublicationReferenceSegment")), Sequence( "WITH", Ref("DefinitionParametersSegment"), optional=True, ), ), Sequence( "REFRESH", "PUBLICATION", Sequence( "WITH", Ref("DefinitionParametersSegment"), optional=True, ), ), "ENABLE", "DISABLE", Sequence( "SET", Ref("DefinitionParametersSegment"), ), Sequence( "SKIP", Bracketed( Ref("ParameterNameSegment"), Ref("RawEqualsSegment"), Ref("ExpressionSegment"), ), ), Sequence( "OWNER", "TO", OneOf( Ref("ObjectReferenceSegment"), "CURRENT_ROLE", "CURRENT_USER", "CURRENT_SESSION", ), ), Sequence("RENAME", "TO", Ref("SubscriptionReferenceSegment")), ), ) class DropSubscriptionStatementSegment(BaseSegment): """An `DROP SUBSCRIPTION` statement. https://www.postgresql.org/docs/current/sql-dropsubscription.html """ type = "drop_subscription" match_grammar = Sequence( "DROP", "SUBSCRIPTION", Ref("IfExistsGrammar", optional=True), Ref("SubscriptionReferenceSegment"), OneOf("CASCADE", "RESTRICT", optional=True), ) class VacuumStatementSegment(BaseSegment): """A `VACUUM` statement. https://www.postgresql.org/docs/15/sql-vacuum.html https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L11658 """ type = "vacuum_statement" match_grammar = Sequence( "VACUUM", OneOf( Sequence( Ref.keyword("FULL", optional=True), Ref.keyword("FREEZE", optional=True), Ref.keyword("VERBOSE", optional=True), OneOf("ANALYZE", "ANALYSE", optional=True), ), Bracketed( Delimited( Sequence( OneOf( "FULL", "FREEZE", "VERBOSE", "ANALYZE", "ANALYSE", "DISABLE_PAGE_SKIPPING", "SKIP_LOCKED", "INDEX_CLEANUP", "PROCESS_TOAST", "TRUNCATE", "PARALLEL", ), OneOf( Ref("LiteralGrammar"), Ref("NakedIdentifierSegment"), # https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L1810-L1815 Ref("OnKeywordAsIdentifierSegment"), optional=True, ), ), ), ), optional=True, ), Delimited( Sequence( Ref("TableReferenceSegment"), Ref("BracketedColumnReferenceListGrammar", optional=True), ), optional=True, ), ) class LikeOptionSegment(BaseSegment): """Like Option Segment. As specified in https://www.postgresql.org/docs/13/sql-createtable.html """ type = "like_option_segment" match_grammar = Sequence( OneOf("INCLUDING", "EXCLUDING"), OneOf( "COMMENTS", "CONSTRAINTS", "DEFAULTS", "GENERATED", "IDENTITY", "INDEXES", "STATISTICS", "STORAGE", "ALL", ), ) class ColumnConstraintSegment(ansi.ColumnConstraintSegment): """A column option; each CREATE TABLE column can have 0 or more. https://www.postgresql.org/docs/13/sql-altertable.html """ # Column constraint from # https://www.postgresql.org/docs/12/sql-createtable.html match_grammar = Sequence( Sequence( "CONSTRAINT", Ref("ObjectReferenceSegment"), # Constraint name optional=True, ), OneOf( Sequence(Ref.keyword("NOT", optional=True), "NULL"), # NOT NULL or NULL Sequence( "CHECK", Bracketed(Ref("ExpressionSegment")), Sequence("NO", "INHERIT", optional=True), ), Sequence( # DEFAULT "DEFAULT", OneOf( Ref("ShorthandCastSegment"), Ref("LiteralGrammar"), Ref("FunctionSegment"), Ref("BareFunctionSegment"), Ref("ExpressionSegment"), ), ), Sequence("GENERATED", "ALWAYS", "AS", Ref("ExpressionSegment"), "STORED"), Sequence( "GENERATED", OneOf("ALWAYS", Sequence("BY", "DEFAULT")), "AS", "IDENTITY", Bracketed( AnyNumberOf(Ref("AlterSequenceOptionsSegment")), optional=True ), ), Sequence( "UNIQUE", Sequence( "NULLS", Ref.keyword("NOT", optional=True), "DISTINCT", optional=True, ), Sequence("WITH", Ref("DefinitionParametersSegment"), optional=True), Sequence( "USING", "INDEX", "TABLESPACE", Ref("TablespaceReferenceSegment"), optional=True, ), ), Sequence( "PRIMARY", "KEY", Sequence("WITH", Ref("DefinitionParametersSegment"), optional=True), Sequence( "USING", "INDEX", "TABLESPACE", Ref("TablespaceReferenceSegment"), optional=True, ), ), Ref("ReferenceDefinitionGrammar"), # REFERENCES reftable [ ( refcolumn) ] ), OneOf("DEFERRABLE", Sequence("NOT", "DEFERRABLE"), optional=True), OneOf( Sequence("INITIALLY", "DEFERRED"), Sequence("INITIALLY", "IMMEDIATE"), optional=True, ), ) class ForeignTableColumnConstraintSegment(ansi.ColumnConstraintSegment): """A column option for a foreign table. Each CREATE FOREIGN TABLE column can have 0 or more. https://www.postgresql.org/docs/16/sql-createforeigntable.html """ match_grammar = Sequence( # [ CONSTRAINT constraint_name ] Sequence( "CONSTRAINT", Ref("ObjectReferenceSegment"), optional=True, ), OneOf( # NOT NULL | NULL Sequence(Ref.keyword("NOT", optional=True), "NULL"), # CHECK ( expression ) [ NO INHERIT ] Sequence( "CHECK", Bracketed(Ref("ExpressionSegment")), Sequence("NO", "INHERIT", optional=True), ), # DEFAULT default_expr Sequence( "DEFAULT", OneOf( Ref("ShorthandCastSegment"), Ref("LiteralGrammar"), Ref("FunctionSegment"), Ref("BareFunctionSegment"), Ref("ExpressionSegment"), ), ), # GENERATED ALWAYS AS ( generation_expr ) STORED Sequence("GENERATED", "ALWAYS", "AS", Ref("ExpressionSegment"), "STORED"), ), ) class PartitionBoundSpecSegment(BaseSegment): """Partition bound spec. As per https://www.postgresql.org/docs/13/sql-altertable.html. """ type = "partition_bound_spec" match_grammar = OneOf( Sequence( "IN", Bracketed(Delimited(Ref("ExpressionSegment"))), ), Sequence( "FROM", Bracketed( Delimited( OneOf(Ref("ExpressionSegment"), "MINVALUE", "MAXVALUE"), ) ), "TO", Bracketed( Delimited( OneOf(Ref("ExpressionSegment"), "MINVALUE", "MAXVALUE"), ) ), ), Sequence( "WITH", Bracketed( Sequence( "MODULUS", Ref("NumericLiteralSegment"), Ref("CommaSegment"), "REMAINDER", Ref("NumericLiteralSegment"), ) ), ), ) class TableConstraintSegment(ansi.TableConstraintSegment): """A table constraint, e.g. for CREATE TABLE. As specified in https://www.postgresql.org/docs/13/sql-altertable.html """ match_grammar = Sequence( Sequence( # [ CONSTRAINT ] "CONSTRAINT", Ref("ObjectReferenceSegment"), optional=True ), OneOf( Sequence( "CHECK", Bracketed(Ref("ExpressionSegment")), Sequence("NO", "INHERIT", optional=True), ), Sequence( # UNIQUE ( column_name [, ... ] ) "UNIQUE", Sequence( "NULLS", Ref.keyword("NOT", optional=True), "DISTINCT", optional=True, ), Ref("BracketedColumnReferenceListGrammar"), Ref("IndexParametersSegment", optional=True), ), Sequence( # PRIMARY KEY ( column_name [, ... ] ) index_parameters Ref("PrimaryKeyGrammar"), # Columns making up PRIMARY KEY constraint Ref("BracketedColumnReferenceListGrammar"), Ref("IndexParametersSegment", optional=True), ), Sequence( "EXCLUDE", Sequence("USING", Ref("IndexAccessMethodSegment"), optional=True), Bracketed(Delimited(Ref("ExclusionConstraintElementSegment"))), Ref("IndexParametersSegment", optional=True), Sequence("WHERE", Bracketed(Ref("ExpressionSegment")), optional=True), ), Sequence( # FOREIGN KEY ( column_name [, ... ] ) # REFERENCES reftable [ ( refcolumn [, ... ] ) ] "FOREIGN", "KEY", # Local columns making up FOREIGN KEY constraint Ref("BracketedColumnReferenceListGrammar"), Ref( "ReferenceDefinitionGrammar" ), # REFERENCES reftable [ ( refcolumn) ] ), ), AnyNumberOf( OneOf("DEFERRABLE", Sequence("NOT", "DEFERRABLE")), OneOf( Sequence("INITIALLY", "DEFERRED"), Sequence("INITIALLY", "IMMEDIATE") ), Sequence("NOT", "VALID"), Sequence("NO", "INHERIT"), ), ) class ForeignTableTableConstraintSegment(ansi.TableConstraintSegment): """A table constraint on a foreign table, e.g. for CREATE FOREIGN TABLE. As specified in https://www.postgresql.org/docs/16/sql-createforeigntable.html """ match_grammar = Sequence( # [ CONSTRAINT constraint_name ] Sequence( "CONSTRAINT", Ref("ObjectReferenceSegment"), optional=True, ), # CHECK ( expression ) [ NO INHERIT ] Sequence( "CHECK", Bracketed(Ref("ExpressionSegment")), Sequence("NO", "INHERIT", optional=True), ), ) class TableConstraintUsingIndexSegment(BaseSegment): """table_constraint_using_index. As specified in: https://www.postgresql.org/docs/13/sql-altertable.html. """ type = "table_constraint" match_grammar = Sequence( Sequence( # [ CONSTRAINT ] "CONSTRAINT", Ref("ObjectReferenceSegment"), optional=True ), Sequence( OneOf("UNIQUE", Ref("PrimaryKeyGrammar")), "USING", "INDEX", Ref("IndexReferenceSegment"), ), OneOf("DEFERRABLE", Sequence("NOT", "DEFERRABLE"), optional=True), OneOf( Sequence("INITIALLY", "DEFERRED"), Sequence("INITIALLY", "IMMEDIATE"), optional=True, ), ) class SetConstraintsStatementSegment(BaseSegment): """`SET CONSTRAINTS` statement. https://www.postgresql.org/docs/current/sql-set-constraints.html """ type = "set_constraint_statement" match_grammar = Sequence( "SET", "CONSTRAINTS", OneOf("ALL", Delimited(Ref("ObjectReferenceSegment"))), OneOf("DEFERRED", "IMMEDIATE"), ) class IndexParametersSegment(BaseSegment): """index_parameters. As specified in https://www.postgresql.org/docs/13/sql-altertable.html. """ type = "index_parameters" match_grammar = Sequence( Sequence("INCLUDE", Ref("BracketedColumnReferenceListGrammar"), optional=True), Sequence("WITH", Ref("DefinitionParametersSegment"), optional=True), Sequence( "USING", "INDEX", "TABLESPACE", Ref("TablespaceReferenceSegment"), optional=True, ), ) class IndexElementOptionsSegment(BaseSegment): """Index element options segment. https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L8057 """ type = "index_element_options" match_grammar = Sequence( Sequence("COLLATE", Ref("CollationReferenceSegment"), optional=True), Sequence( Ref( "OperatorClassReferenceSegment", exclude=Sequence("NULLS", OneOf("FIRST", "LAST")), ), Ref("RelationOptionsSegment", optional=True), # args for opclass optional=True, ), OneOf("ASC", "DESC", optional=True), Sequence("NULLS", OneOf("FIRST", "LAST"), optional=True), ) class IndexElementSegment(BaseSegment): """Index element segment. As found in https://www.postgresql.org/docs/15/sql-altertable.html. https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L8089 """ type = "index_element" match_grammar = Sequence( OneOf( Ref("ColumnReferenceSegment"), # TODO: This is still not perfect. This corresponds to # func_expr_windowless in the grammar and we don't currently # implement everything it provides. Ref("FunctionSegment"), Bracketed(Ref("ExpressionSegment")), ), Ref("IndexElementOptionsSegment", optional=True), ) class ExclusionConstraintElementSegment(BaseSegment): """Exclusion constraint element segment. As found in https://www.postgresql.org/docs/15/sql-altertable.html. https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L4277 """ type = "exclusion_constraint_element" match_grammar = Sequence( Ref("IndexElementSegment"), "WITH", Ref("ComparisonOperatorGrammar"), ) class AlterDefaultPrivilegesStatementSegment(BaseSegment): """`ALTER DEFAULT PRIVILEGES` statement. ``` ALTER DEFAULT PRIVILEGES [ FOR { ROLE | USER } target_role [, ...] ] [ IN SCHEMA schema_name [, ...] ] abbreviated_grant_or_revoke ``` https://www.postgresql.org/docs/13/sql-alterdefaultprivileges.html """ type = "alter_default_privileges_statement" match_grammar = Sequence( "ALTER", "DEFAULT", "PRIVILEGES", Sequence( "FOR", OneOf("ROLE", "USER"), Delimited( Ref("ObjectReferenceSegment"), terminators=["IN", "GRANT", "REVOKE"], ), optional=True, ), Sequence( "IN", "SCHEMA", Delimited( Ref("SchemaReferenceSegment"), terminators=["GRANT", "REVOKE"], ), optional=True, ), OneOf( Ref("AlterDefaultPrivilegesGrantSegment"), Ref("AlterDefaultPrivilegesRevokeSegment"), ), ) class AlterDefaultPrivilegesObjectPrivilegesSegment(BaseSegment): """`ALTER DEFAULT PRIVILEGES` object privileges. https://www.postgresql.org/docs/13/sql-alterdefaultprivileges.html """ type = "alter_default_privileges_object_privilege" match_grammar = OneOf( Sequence("ALL", Ref.keyword("PRIVILEGES", optional=True)), Delimited( "CREATE", "DELETE", "EXECUTE", "INSERT", "REFERENCES", "SELECT", "TRIGGER", "TRUNCATE", "UPDATE", "USAGE", terminators=["ON"], ), ) class AlterDefaultPrivilegesSchemaObjectsSegment(BaseSegment): """`ALTER DEFAULT PRIVILEGES` schema object types. https://www.postgresql.org/docs/13/sql-alterdefaultprivileges.html """ type = "alter_default_privileges_schema_object" match_grammar = OneOf( "TABLES", "FUNCTIONS", "ROUTINES", "SEQUENCES", "TYPES", "SCHEMAS", ) class AlterDefaultPrivilegesToFromRolesSegment(BaseSegment): """The segment after `TO` / `FROM` in `ALTER DEFAULT PRIVILEGES`. `{ [ GROUP ] role_name | PUBLIC } [, ...]` https://www.postgresql.org/docs/13/sql-alterdefaultprivileges.html """ type = "alter_default_privileges_to_from_roles" match_grammar = OneOf( Sequence( Ref.keyword("GROUP", optional=True), Ref("RoleReferenceSegment"), ), "PUBLIC", ) class AlterDefaultPrivilegesGrantSegment(BaseSegment): """`GRANT` for `ALTER DEFAULT PRIVILEGES`. https://www.postgresql.org/docs/13/sql-alterdefaultprivileges.html """ type = "alter_default_privileges_grant" match_grammar = Sequence( "GRANT", Ref("AlterDefaultPrivilegesObjectPrivilegesSegment"), "ON", Ref("AlterDefaultPrivilegesSchemaObjectsSegment"), "TO", Delimited( Ref("AlterDefaultPrivilegesToFromRolesSegment"), terminators=["WITH"], ), Sequence("WITH", "GRANT", "OPTION", optional=True), ) class AlterDefaultPrivilegesRevokeSegment(BaseSegment): """`REVOKE` for `ALTER DEFAULT PRIVILEGES`. https://www.postgresql.org/docs/13/sql-alterdefaultprivileges.html """ type = "alter_default_privileges_revoke" match_grammar = Sequence( "REVOKE", Sequence("GRANT", "OPTION", "FOR", optional=True), Ref("AlterDefaultPrivilegesObjectPrivilegesSegment"), "ON", Ref("AlterDefaultPrivilegesSchemaObjectsSegment"), "FROM", Delimited( Ref("AlterDefaultPrivilegesToFromRolesSegment"), terminators=["RESTRICT", "CASCADE"], ), Ref("DropBehaviorGrammar", optional=True), ) class DropOwnedStatementSegment(BaseSegment): """A `DROP OWNED` statement. https://www.postgresql.org/docs/15/sql-drop-owned.html https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L6667 """ type = "drop_owned_statement" match_grammar = Sequence( "DROP", "OWNED", "BY", Delimited( OneOf( "CURRENT_ROLE", "CURRENT_USER", "SESSION_USER", # must come last; CURRENT_USER isn't reserved: Ref("RoleReferenceSegment"), ), ), Ref("DropBehaviorGrammar", optional=True), ) class ReassignOwnedStatementSegment(BaseSegment): """A `REASSIGN OWNED` statement. https://www.postgresql.org/docs/15/sql-reassign-owned.html https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L6678 """ type = "reassign_owned_statement" match_grammar = Sequence( "REASSIGN", "OWNED", "BY", Delimited( OneOf( "CURRENT_ROLE", "CURRENT_USER", "SESSION_USER", # must come last; CURRENT_USER isn't reserved: Ref("RoleReferenceSegment"), ), ), "TO", OneOf( "CURRENT_ROLE", "CURRENT_USER", "SESSION_USER", # must come last; CURRENT_USER isn't reserved: Ref("RoleReferenceSegment"), ), ) class CommentOnStatementSegment(BaseSegment): """`COMMENT ON` statement. https://www.postgresql.org/docs/13/sql-comment.html """ type = "comment_clause" match_grammar = Sequence( "COMMENT", "ON", Sequence( OneOf( Sequence( OneOf( "TABLE", # TODO: Create a ViewReferenceSegment "VIEW", ), Ref("TableReferenceSegment"), ), Sequence( "CAST", Bracketed( Sequence( Ref("ObjectReferenceSegment"), "AS", Ref("ObjectReferenceSegment"), ), ), ), Sequence( "COLUMN", # TODO: Does this correctly emit a Table Reference? Ref("ColumnReferenceSegment"), ), Sequence( "CONSTRAINT", Ref("ObjectReferenceSegment"), Sequence( "ON", Ref.keyword("DOMAIN", optional=True), Ref("ObjectReferenceSegment"), ), ), Sequence( "DATABASE", Ref("DatabaseReferenceSegment"), ), Sequence( "EXTENSION", Ref("ExtensionReferenceSegment"), ), Sequence( "FUNCTION", Ref("FunctionNameSegment"), Sequence(Ref("FunctionParameterListGrammar"), optional=True), ), Sequence( "INDEX", Ref("IndexReferenceSegment"), ), Sequence( "SCHEMA", Ref("SchemaReferenceSegment"), ), # TODO: Split out individual items if they have references Sequence( OneOf( "COLLATION", "CONVERSION", "DOMAIN", "LANGUAGE", "POLICY", "PUBLICATION", "ROLE", "RULE", "SEQUENCE", "SERVER", "STATISTICS", "SUBSCRIPTION", "TABLESPACE", "TRIGGER", "TYPE", Sequence("ACCESS", "METHOD"), Sequence("EVENT", "TRIGGER"), Sequence("FOREIGN", "DATA", "WRAPPER"), Sequence("FOREIGN", "TABLE"), Sequence("MATERIALIZED", "VIEW"), Sequence("TEXT", "SEARCH", "CONFIGURATION"), Sequence("TEXT", "SEARCH", "DICTIONARY"), Sequence("TEXT", "SEARCH", "PARSER"), Sequence("TEXT", "SEARCH", "TEMPLATE"), ), Ref("ObjectReferenceSegment"), Sequence("ON", Ref("ObjectReferenceSegment"), optional=True), ), Sequence( OneOf( "AGGREGATE", "PROCEDURE", "ROUTINE", ), Ref("ObjectReferenceSegment"), Bracketed( Sequence( # TODO: Is this too permissive? Anything(), optional=True, ), optional=True, ), ), ), Sequence("IS", OneOf(Ref("QuotedLiteralSegment"), "NULL")), ), ) class CreateIndexStatementSegment(ansi.CreateIndexStatementSegment): """A `CREATE INDEX` statement. As specified in https://www.postgresql.org/docs/13/sql-createindex.html """ match_grammar = Sequence( "CREATE", Ref.keyword("UNIQUE", optional=True), "INDEX", Ref.keyword("CONCURRENTLY", optional=True), Sequence( Ref("IfNotExistsGrammar", optional=True), Ref("IndexReferenceSegment"), optional=True, ), "ON", Ref.keyword("ONLY", optional=True), Ref("TableReferenceSegment"), Sequence("USING", Ref("IndexAccessMethodSegment"), optional=True), Bracketed(Delimited(Ref("IndexElementSegment"))), Sequence( "INCLUDE", Bracketed(Delimited(Ref("IndexElementSegment"))), optional=True ), Sequence("NULLS", Ref.keyword("NOT", optional=True), "DISTINCT", optional=True), Sequence("WITH", Ref("RelationOptionsSegment"), optional=True), Sequence("TABLESPACE", Ref("TablespaceReferenceSegment"), optional=True), Sequence("WHERE", Ref("ExpressionSegment"), optional=True), ) class AlterIndexStatementSegment(BaseSegment): """An ALTER INDEX segment. As per https://www.postgresql.org/docs/14/sql-alterindex.html """ type = "alter_index_statement" match_grammar = Sequence( "ALTER", "INDEX", OneOf( Sequence( Ref("IfExistsGrammar", optional=True), Ref("IndexReferenceSegment"), OneOf( Sequence("RENAME", "TO", Ref("IndexReferenceSegment")), Sequence("SET", "TABLESPACE", Ref("TablespaceReferenceSegment")), Sequence("ATTACH", "PARTITION", Ref("IndexReferenceSegment")), Sequence( Ref.keyword("NO", optional=True), "DEPENDS", "ON", "EXTENSION", Ref("ExtensionReferenceSegment"), ), Sequence( "SET", Bracketed( Delimited( Sequence( Ref("ParameterNameSegment"), Sequence( Ref("EqualsSegment"), Ref("LiteralGrammar"), optional=True, ), ) ) ), ), Sequence( "RESET", Bracketed(Delimited(Ref("ParameterNameSegment"))) ), Sequence( "ALTER", Ref.keyword("COLUMN", optional=True), Ref("NumericLiteralSegment"), "SET", "STATISTICS", Ref("NumericLiteralSegment"), ), ), ), Sequence( "ALL", "IN", "TABLESPACE", Ref("TablespaceReferenceSegment"), Sequence( "OWNED", "BY", Delimited(Ref("RoleReferenceSegment")), optional=True ), "SET", "TABLESPACE", Ref("TablespaceReferenceSegment"), Ref.keyword("NOWAIT", optional=True), ), ), ) class ReindexStatementSegment(BaseSegment): """A Reindex Statement Segment. As per https://www.postgresql.org/docs/14/sql-reindex.html """ type = "reindex_statement_segment" match_grammar = Sequence( "REINDEX", Bracketed( Delimited( Sequence("CONCURRENTLY", Ref("BooleanLiteralGrammar", optional=True)), Sequence( "TABLESPACE", Ref("TablespaceReferenceSegment"), ), Sequence("VERBOSE", Ref("BooleanLiteralGrammar", optional=True)), ), optional=True, ), OneOf( Sequence( "INDEX", Ref.keyword("CONCURRENTLY", optional=True), Ref("IndexReferenceSegment"), ), Sequence( "TABLE", Ref.keyword("CONCURRENTLY", optional=True), Ref("TableReferenceSegment"), ), Sequence( "SCHEMA", Ref.keyword("CONCURRENTLY", optional=True), Ref("SchemaReferenceSegment"), ), Sequence( OneOf("DATABASE", "SYSTEM"), Ref.keyword("CONCURRENTLY", optional=True), Ref("DatabaseReferenceSegment"), ), ), ) class DropIndexStatementSegment(ansi.DropIndexStatementSegment): """A `DROP INDEX` statement. https://www.postgresql.org/docs/15/sql-dropindex.html https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L6698-L6719 https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L6808-L6829 """ match_grammar: Matchable = Sequence( "DROP", "INDEX", Ref.keyword("CONCURRENTLY", optional=True), Ref("IfExistsGrammar", optional=True), Delimited(Ref("IndexReferenceSegment")), Ref("DropBehaviorGrammar", optional=True), ) class FrameClauseSegment(ansi.FrameClauseSegment): """A frame clause for window functions. As specified in https://www.postgresql.org/docs/13/sql-expressions.html """ _frame_extent = ansi.FrameClauseSegment._frame_extent _frame_exclusion = Sequence( "EXCLUDE", OneOf(Sequence("CURRENT", "ROW"), "GROUP", "TIES", Sequence("NO", "OTHERS")), optional=True, ) match_grammar = Sequence( Ref("FrameClauseUnitGrammar"), OneOf(_frame_extent, Sequence("BETWEEN", _frame_extent, "AND", _frame_extent)), _frame_exclusion, ) class CreateSequenceOptionsSegment(ansi.CreateSequenceOptionsSegment): """Options for Create Sequence statement. As specified in https://www.postgresql.org/docs/13/sql-createsequence.html """ match_grammar = OneOf( Sequence("AS", Ref("DatatypeSegment")), Sequence( "INCREMENT", Ref.keyword("BY", optional=True), Ref("SignedSegmentGrammar", optional=True), Ref("NumericLiteralSegment"), ), OneOf( Sequence( "MINVALUE", Ref("SignedSegmentGrammar", optional=True), Ref("NumericLiteralSegment"), ), Sequence("NO", "MINVALUE"), ), OneOf( Sequence( "MAXVALUE", Ref("SignedSegmentGrammar", optional=True), Ref("NumericLiteralSegment"), ), Sequence("NO", "MAXVALUE"), ), Sequence( "START", Ref.keyword("WITH", optional=True), Ref("SignedSegmentGrammar", optional=True), Ref("NumericLiteralSegment"), ), Sequence("CACHE", Ref("NumericLiteralSegment")), OneOf("CYCLE", Sequence("NO", "CYCLE")), Sequence("OWNED", "BY", OneOf("NONE", Ref("ColumnReferenceSegment"))), ) class CreateSequenceStatementSegment(BaseSegment): """Create Sequence Statement. As specified in https://www.postgresql.org/docs/13/sql-createsequence.html """ type = "create_sequence_statement" match_grammar = Sequence( "CREATE", Ref("TemporaryGrammar", optional=True), "SEQUENCE", Ref("IfNotExistsGrammar", optional=True), Ref("SequenceReferenceSegment"), AnyNumberOf(Ref("CreateSequenceOptionsSegment"), optional=True), ) class AlterSequenceOptionsSegment(ansi.AlterSequenceOptionsSegment): """Dialect-specific options for ALTER SEQUENCE statement. As specified in https://www.postgresql.org/docs/13/sql-altersequence.html """ match_grammar = OneOf( Sequence("AS", Ref("DatatypeSegment")), Sequence( "INCREMENT", Ref.keyword("BY", optional=True), Ref("SignedSegmentGrammar", optional=True), Ref("NumericLiteralSegment"), ), OneOf( Sequence( "MINVALUE", Ref("SignedSegmentGrammar", optional=True), Ref("NumericLiteralSegment"), ), Sequence("NO", "MINVALUE"), ), OneOf( Sequence( "MAXVALUE", Ref("SignedSegmentGrammar", optional=True), Ref("NumericLiteralSegment"), ), Sequence("NO", "MAXVALUE"), ), # N.B. The SEQUENCE NAME keywords are undocumented but are produced # by the pg_dump utility. See discussion in issue #1857. Sequence("SEQUENCE", "NAME", Ref("SequenceReferenceSegment")), Sequence( "START", Ref.keyword("WITH", optional=True), Ref("SignedSegmentGrammar", optional=True), Ref("NumericLiteralSegment"), ), Sequence( "RESTART", Ref.keyword("WITH", optional=True), Ref("SignedSegmentGrammar", optional=True), Ref("NumericLiteralSegment"), ), Sequence("CACHE", Ref("NumericLiteralSegment")), Sequence(Ref.keyword("NO", optional=True), "CYCLE"), Sequence("OWNED", "BY", OneOf("NONE", Ref("ColumnReferenceSegment"))), ) class AlterSequenceStatementSegment(ansi.AlterSequenceStatementSegment): """Alter Sequence Statement. As specified in https://www.postgresql.org/docs/13/sql-altersequence.html """ match_grammar = Sequence( "ALTER", "SEQUENCE", Ref("IfExistsGrammar", optional=True), Ref("SequenceReferenceSegment"), OneOf( AnyNumberOf(Ref("AlterSequenceOptionsSegment", optional=True)), Sequence( "OWNER", "TO", OneOf(Ref("ParameterNameSegment"), "CURRENT_USER", "SESSION_USER"), ), Sequence("RENAME", "TO", Ref("SequenceReferenceSegment")), Sequence("SET", "SCHEMA", Ref("SchemaReferenceSegment")), ), ) class DropSequenceStatementSegment(ansi.DropSequenceStatementSegment): """Drop Sequence Statement. As specified in https://www.postgresql.org/docs/13/sql-dropsequence.html """ match_grammar = Sequence( "DROP", "SEQUENCE", Ref("IfExistsGrammar", optional=True), Delimited(Ref("SequenceReferenceSegment")), Ref("DropBehaviorGrammar", optional=True), ) class StatisticsReferenceSegment(ansi.ObjectReferenceSegment): """Statistics Reference.""" type = "statistics_reference" class CreateStatisticsStatementSegment(BaseSegment): """Create Statistics Segment. As specified in https://www.postgresql.org/docs/16/sql-createstatistics.html """ type = "create_statistics_statement" match_grammar = Sequence( "CREATE", "STATISTICS", Sequence( Ref("IfNotExistsGrammar", optional=True), Ref("StatisticsReferenceSegment"), optional=True, ), Bracketed( Delimited( "DEPENDENCIES", "MCV", "NDISTINCT", ), optional=True, ), "ON", Delimited( Ref("ColumnReferenceSegment"), Ref("ExpressionSegment"), ), "FROM", Ref("TableReferenceSegment"), ) class AlterStatisticsStatementSegment(BaseSegment): """Alter Statistics Segment. As specified in https://www.postgresql.org/docs/16/sql-alterstatistics.html """ type = "alter_statistics_statement" match_grammar = Sequence( "ALTER", "STATISTICS", Ref("StatisticsReferenceSegment"), OneOf( Sequence( "OWNER", "TO", OneOf( OneOf(Ref("ParameterNameSegment"), Ref("QuotedIdentifierSegment")), "CURRENT_ROLE", "CURRENT_USER", "SESSION_USER", ), ), Sequence( "RENAME", "TO", Ref("StatisticsReferenceSegment"), ), Sequence( "SET", OneOf( Sequence( "SCHEMA", Ref("SchemaReferenceSegment"), ), Sequence( "STATISTICS", Ref("NumericLiteralSegment"), ), ), ), ), ) class DropStatisticsStatementSegment(BaseSegment): """Alter Statistics Segment. As specified in https://www.postgresql.org/docs/16/sql-dropstatistics.html """ type = "drop_statistics_statement" match_grammar = Sequence( "DROP", "STATISTICS", Ref("IfExistsGrammar", optional=True), Delimited(Ref("StatisticsReferenceSegment")), OneOf( "CASCADE", "RESTRICT", optional=True, ), ) class AnalyzeStatementSegment(BaseSegment): """Analyze Statement Segment. As specified in https://www.postgresql.org/docs/13/sql-analyze.html """ type = "analyze_statement" _option = Sequence( OneOf("VERBOSE", "SKIP_LOCKED"), Ref("BooleanLiteralGrammar", optional=True) ) _tables_and_columns = Sequence( Ref("TableReferenceSegment"), Bracketed(Delimited(Ref("ColumnReferenceSegment")), optional=True), ) match_grammar = Sequence( OneOf("ANALYZE", "ANALYSE"), OneOf(Bracketed(Delimited(_option)), "VERBOSE", optional=True), Delimited(_tables_and_columns, optional=True), ) # Adding PostgreSQL specific statements class StatementSegment(ansi.StatementSegment): """A generic segment, to any of its child subsegments.""" match_grammar = ansi.StatementSegment.match_grammar.copy( insert=[ Ref("AlterDefaultPrivilegesStatementSegment"), Ref("DropOwnedStatementSegment"), Ref("ReassignOwnedStatementSegment"), Ref("CommentOnStatementSegment"), Ref("AnalyzeStatementSegment"), Ref("CreateTableAsStatementSegment"), Ref("AlterTriggerStatementSegment"), Ref("SetStatementSegment"), Ref("AlterPolicyStatementSegment"), Ref("CreatePolicyStatementSegment"), Ref("DropPolicyStatementSegment"), Ref("CreateDomainStatementSegment"), Ref("AlterDomainStatementSegment"), Ref("DropDomainStatementSegment"), Ref("CreateMaterializedViewStatementSegment"), Ref("AlterMaterializedViewStatementSegment"), Ref("DropMaterializedViewStatementSegment"), Ref("RefreshMaterializedViewStatementSegment"), Ref("AlterDatabaseStatementSegment"), Ref("DropDatabaseStatementSegment"), Ref("VacuumStatementSegment"), Ref("AlterFunctionStatementSegment"), Ref("CreateViewStatementSegment"), Ref("AlterViewStatementSegment"), Ref("ListenStatementSegment"), Ref("NotifyStatementSegment"), Ref("UnlistenStatementSegment"), Ref("LoadStatementSegment"), Ref("ResetStatementSegment"), Ref("DiscardStatementSegment"), Ref("AlterProcedureStatementSegment"), Ref("CreateProcedureStatementSegment"), Ref("DropProcedureStatementSegment"), Ref("CopyStatementSegment"), Ref("DoStatementSegment"), Ref("AlterIndexStatementSegment"), Ref("ReindexStatementSegment"), Ref("AlterRoleStatementSegment"), Ref("CreateExtensionStatementSegment"), Ref("DropExtensionStatementSegment"), Ref("AlterExtensionStatementSegment"), Ref("CreateSubscriptionStatementSegment"), Ref("AlterSubscriptionStatementSegment"), Ref("DropSubscriptionStatementSegment"), Ref("CreatePublicationStatementSegment"), Ref("AlterPublicationStatementSegment"), Ref("DropPublicationStatementSegment"), Ref("CreateTypeStatementSegment"), Ref("AlterTypeStatementSegment"), Ref("AlterSchemaStatementSegment"), Ref("LockTableStatementSegment"), Ref("ClusterStatementSegment"), Ref("CreateCollationStatementSegment"), Ref("CallStoredProcedureSegment"), Ref("CreateServerStatementSegment"), Ref("CreateUserMappingStatementSegment"), Ref("ImportForeignSchemaStatementSegment"), Ref("CreateForeignTableStatementSegment"), Ref("DropAggregateStatementSegment"), Ref("CreateAggregateStatementSegment"), Ref("AlterAggregateStatementSegment"), Ref("CreateStatisticsStatementSegment"), Ref("AlterStatisticsStatementSegment"), Ref("DropStatisticsStatementSegment"), Ref("ShowStatementSegment"), Ref("SetConstraintsStatementSegment"), Ref("CreateForeignDataWrapperStatementSegment"), Ref("MetaCommandQueryBufferStatement"), Ref("DropForeignTableStatement"), Ref("CreateOperatorStatementSegment"), Ref("AlterForeignTableStatementSegment"), Ref("SecurityLabelStatementSegment"), Ref("PrepareStatementSegment"), Ref("ExecuteStatementSegment"), Ref("DeallocateStatementSegment"), Ref("SetSessionAuthorizationStatementSegment"), Ref("ResetSessionAuthorizationStatementSegment"), ], ) class CreateTriggerStatementSegment(ansi.CreateTriggerStatementSegment): """Create Trigger Statement. As Specified in https://www.postgresql.org/docs/14/sql-createtrigger.html """ match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Ref.keyword("CONSTRAINT", optional=True), "TRIGGER", Ref("TriggerReferenceSegment"), OneOf("BEFORE", "AFTER", Sequence("INSTEAD", "OF")), Delimited( "INSERT", "DELETE", "TRUNCATE", Sequence( "UPDATE", Sequence( "OF", Delimited( Ref("ColumnReferenceSegment"), terminators=["OR", "ON"], ), optional=True, ), ), delimiter="OR", ), "ON", Ref("TableReferenceSegment"), AnyNumberOf( Sequence("FROM", Ref("TableReferenceSegment")), OneOf( Sequence("NOT", "DEFERRABLE"), Sequence( Ref.keyword("DEFERRABLE", optional=True), OneOf( Sequence("INITIALLY", "IMMEDIATE"), Sequence("INITIALLY", "DEFERRED"), ), ), ), Sequence( "REFERENCING", OneOf("OLD", "NEW"), "TABLE", "AS", Ref("TableReferenceSegment"), Sequence( OneOf("OLD", "NEW"), "TABLE", "AS", Ref("TableReferenceSegment"), optional=True, ), ), Sequence( "FOR", Ref.keyword("EACH", optional=True), OneOf("ROW", "STATEMENT") ), Sequence("WHEN", Bracketed(Ref("ExpressionSegment"))), ), Sequence( "EXECUTE", OneOf("FUNCTION", "PROCEDURE"), Ref("FunctionSegment"), ), ) class AlterTriggerStatementSegment(BaseSegment): """Alter Trigger Statement. As Specified in https://www.postgresql.org/docs/14/sql-altertrigger.html """ type = "alter_trigger" match_grammar = Sequence( "ALTER", "TRIGGER", Ref("TriggerReferenceSegment"), "ON", Ref("TableReferenceSegment"), OneOf( Sequence("RENAME", "TO", Ref("TriggerReferenceSegment")), Sequence( Ref.keyword("NO", optional=True), "DEPENDS", "ON", "EXTENSION", Ref("ExtensionReferenceSegment"), ), ), ) class DropTriggerStatementSegment(ansi.DropTriggerStatementSegment): """Drop Trigger Statement. As Specified in https://www.postgresql.org/docs/14/sql-droptrigger.html """ match_grammar = Sequence( "DROP", "TRIGGER", Ref("IfExistsGrammar", optional=True), Ref("TriggerReferenceSegment"), "ON", Ref("TableReferenceSegment"), Ref("DropBehaviorGrammar", optional=True), ) class AliasExpressionSegment(ansi.AliasExpressionSegment): """A reference to an object with an `AS` clause. The optional AS keyword allows both implicit and explicit aliasing. """ match_grammar = Sequence( Indent, Ref("AsAliasOperatorSegment", optional=True), OneOf( Sequence( Ref("SingleIdentifierGrammar"), Bracketed(Ref("SingleIdentifierListSegment"), optional=True), ), Sequence( Ref("SingleIdentifierGrammar", optional=True), Bracketed( Delimited( Sequence(Ref("ParameterNameSegment"), Ref("DatatypeSegment")) ) ), ), ), Dedent, ) class AsAliasExpressionSegment(BaseSegment): """A reference to an object with an `AS` clause. This is used in `InsertStatementSegment` in Postgres since the `AS` is not optional in this context. N.B. We keep as a separate segment since the `alias_expression` type is required for rules to interpret the alias. """ type = "alias_expression" match_grammar = Sequence( Indent, Ref("AsAliasOperatorSegment", optional=False), Ref("SingleIdentifierGrammar"), Dedent, ) class OperationClassReferenceSegment(ansi.ObjectReferenceSegment): """A reference to an operation class.""" type = "operation_class_reference" class ConflictActionSegment(BaseSegment): """A Conflict Action Statement used within an INSERT statement. As specified in https://www.postgresql.org/docs/14/sql-insert.html """ type = "conflict_action" match_grammar = Sequence( "DO", OneOf( "NOTHING", Sequence( "UPDATE", Indent, "SET", ImplicitIndent, Delimited( OneOf( Sequence( Ref("ColumnReferenceSegment"), Ref("EqualsSegment"), OneOf(Ref("ExpressionSegment"), "DEFAULT"), ), Sequence( Bracketed(Delimited(Ref("ColumnReferenceSegment"))), Ref("EqualsSegment"), Ref.keyword("ROW", optional=True), Bracketed( Delimited(OneOf(Ref("ExpressionSegment"), "DEFAULT")) ), ), Sequence( Bracketed(Delimited(Ref("ColumnReferenceSegment"))), Ref("EqualsSegment"), Bracketed(Ref("SelectableGrammar")), ), ) ), Dedent, Sequence("WHERE", Ref("ExpressionSegment"), optional=True), Dedent, ), ), ) class ConflictTargetSegment(BaseSegment): """A Conflict Target Statement used within an INSERT statement. As specified in https://www.postgresql.org/docs/14/sql-insert.html """ type = "conflict_target" match_grammar = OneOf( Sequence( Bracketed( Delimited( Sequence( OneOf( Ref("ColumnReferenceSegment"), Bracketed(Ref("ExpressionSegment")), Ref("FunctionSegment"), ), Sequence( "COLLATE", Ref("CollationReferenceSegment"), optional=True, ), Ref("OperationClassReferenceSegment", optional=True), ) ) ), Sequence("WHERE", Ref("ExpressionSegment"), optional=True), ), Sequence("ON", "CONSTRAINT", Ref("ParameterNameSegment")), ) class InsertStatementSegment(ansi.InsertStatementSegment): """An `INSERT` statement. https://www.postgresql.org/docs/14/sql-insert.html """ match_grammar = Sequence( "INSERT", "INTO", Ref("TableReferenceSegment"), Ref("AsAliasExpressionSegment", optional=True), Ref("BracketedColumnReferenceListGrammar", optional=True), Sequence("OVERRIDING", OneOf("SYSTEM", "USER"), "VALUE", optional=True), OneOf( Sequence("DEFAULT", "VALUES"), Ref("SelectableGrammar"), ), Sequence( "ON", "CONFLICT", Ref("ConflictTargetSegment", optional=True), Ref("ConflictActionSegment"), optional=True, ), Sequence( "RETURNING", Indent, OneOf( Ref("StarSegment"), Delimited( Sequence( Ref("ExpressionSegment"), Ref("AsAliasExpressionSegment", optional=True), ), ), ), Dedent, optional=True, ), ) class DropTypeStatementSegment(ansi.DropTypeStatementSegment): """Drop Type Statement. As specified in https://www.postgresql.org/docs/14/sql-droptype.html """ match_grammar = Sequence( "DROP", "TYPE", Ref("IfExistsGrammar", optional=True), Delimited(Ref("DatatypeSegment")), Ref("DropBehaviorGrammar", optional=True), ) class SetStatementSegment(BaseSegment): """Set Statement. As specified in https://www.postgresql.org/docs/14/sql-set.html Also: https://www.postgresql.org/docs/15/sql-set-role.html (still a VariableSetStmt) https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L1584 """ type = "set_statement" match_grammar = Sequence( "SET", OneOf("SESSION", "LOCAL", optional=True), OneOf( Sequence( Ref("ParameterNameSegment"), OneOf("TO", Ref("EqualsSegment")), OneOf( "DEFAULT", Delimited( Ref("LiteralGrammar"), Ref("NakedIdentifierSegment"), Ref("QuotedIdentifierSegment"), # https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L1810-L1815 Ref("OnKeywordAsIdentifierSegment"), ), ), ), Sequence( "TIME", "ZONE", OneOf(Ref("QuotedLiteralSegment"), "LOCAL", "DEFAULT") ), Sequence("SCHEMA", Ref("QuotedLiteralSegment")), Sequence("ROLE", OneOf("NONE", Ref("RoleReferenceSegment"))), ), ) class CreatePolicyStatementSegment(BaseSegment): """A `CREATE POLICY` statement. As Specified in https://www.postgresql.org/docs/14/sql-createpolicy.html """ type = "create_policy_statement" match_grammar = Sequence( "CREATE", "POLICY", Ref("ObjectReferenceSegment"), "ON", Ref("TableReferenceSegment"), Sequence("AS", OneOf("PERMISSIVE", "RESTRICTIVE"), optional=True), Sequence( "FOR", OneOf("ALL", "SELECT", "INSERT", "UPDATE", "DELETE"), optional=True ), Sequence( "TO", Delimited( OneOf( Ref("ObjectReferenceSegment"), "PUBLIC", "CURRENT_ROLE", "CURRENT_USER", "SESSION_USER", ) ), optional=True, ), Sequence("USING", Bracketed(Ref("ExpressionSegment")), optional=True), Sequence("WITH", "CHECK", Bracketed(Ref("ExpressionSegment")), optional=True), ) class CallStoredProcedureSegment(BaseSegment): """This is a CALL statement used to execute a stored procedure. https://www.postgresql.org/docs/14/sql-call.html """ type = "call_statement" match_grammar = Sequence( "CALL", Ref("FunctionSegment"), ) class CreateDomainStatementSegment(BaseSegment): """A `CREATE Domain` statement. As Specified in https://www.postgresql.org/docs/current/sql-createdomain.html """ type = "create_domain_statement" match_grammar = Sequence( "CREATE", "DOMAIN", Ref("ObjectReferenceSegment"), Sequence("AS", optional=True), Ref("DatatypeSegment"), Sequence("COLLATE", Ref("CollationReferenceSegment"), optional=True), Sequence("DEFAULT", Ref("ExpressionSegment"), optional=True), AnyNumberOf( Sequence( Sequence( "CONSTRAINT", Ref("ObjectReferenceSegment"), optional=True, ), OneOf( Sequence(Ref.keyword("NOT", optional=True), "NULL"), Sequence("CHECK", Ref("ExpressionSegment")), ), ), ), ) class AlterDomainStatementSegment(BaseSegment): """An `ALTER DOMAIN` statement. As Specified in https://www.postgresql.org/docs/current/sql-alterdomain.html """ type = "alter_domain_statement" match_grammar: Matchable = Sequence( "ALTER", "DOMAIN", Ref("ObjectReferenceSegment"), OneOf( Sequence( "SET", "DEFAULT", Ref("ExpressionSegment"), ), Sequence( "DROP", "DEFAULT", ), Sequence(OneOf("SET", "DROP"), "NOT", "NULL"), Sequence( "ADD", Sequence( "CONSTRAINT", Ref("ObjectReferenceSegment"), optional=True, ), OneOf( Sequence(Ref.keyword("NOT", optional=True), "NULL"), Sequence("CHECK", Ref("ExpressionSegment")), ), Sequence("NOT", "VALID", optional=True), ), Sequence( "DROP", "CONSTRAINT", Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), OneOf("RESTRICT", "CASCADE", optional=True), ), Sequence( "RENAME", "CONSTRAINT", Ref("ObjectReferenceSegment"), "TO", Ref("ObjectReferenceSegment"), ), Sequence( "VALIDATE", "CONSTRAINT", Ref("ObjectReferenceSegment"), ), Sequence( "OWNER", "TO", OneOf( Ref("ObjectReferenceSegment"), "CURRENT_ROLE", "CURRENT_USER", "SESSION_USER", ), ), Sequence( "RENAME", "TO", Ref("ObjectReferenceSegment"), ), Sequence( "SET", "SCHEMA", Ref("ObjectReferenceSegment"), ), ), ) class DropDomainStatementSegment(BaseSegment): """Drop Domain Statement. As Specified in https://www.postgresql.org/docs/current/sql-dropdomain.html """ type = "drop_domain_statement" match_grammar = Sequence( "DROP", "DOMAIN", Ref("IfExistsGrammar", optional=True), Delimited(Ref("ObjectReferenceSegment")), Ref("DropBehaviorGrammar", optional=True), ) class DropPolicyStatementSegment(BaseSegment): """A `DROP POLICY` statement. As Specified in https://www.postgresql.org/docs/14/sql-droppolicy.html """ type = "drop_policy_statement" match_grammar = Sequence( "DROP", "POLICY", Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), "ON", Ref("TableReferenceSegment"), Ref("DropBehaviorGrammar", optional=True), ) class LoadStatementSegment(BaseSegment): """A `LOAD` statement. As Specified in https://www.postgresql.org/docs/14/sql-load.html """ type = "load_statement" match_grammar = Sequence( "LOAD", Ref("QuotedLiteralSegment"), ) class ResetStatementSegment(BaseSegment): """A `RESET` statement. As Specified in https://www.postgresql.org/docs/14/sql-reset.html Also, RESET ROLE from: https://www.postgresql.org/docs/15/sql-set-role.html """ type = "reset_statement" match_grammar = Sequence( "RESET", OneOf("ALL", "ROLE", Ref("ParameterNameSegment")), ) class DiscardStatementSegment(BaseSegment): """A `DISCARD` statement. As Specified in https://www.postgresql.org/docs/14/sql-discard.html """ type = "discard_statement" match_grammar = Sequence( "DISCARD", OneOf( "ALL", "PLANS", "SEQUENCES", "TEMPORARY", "TEMP", ), ) class ListenStatementSegment(BaseSegment): """A `LISTEN` statement. As Specified in https://www.postgresql.org/docs/14/sql-listen.html """ type = "listen_statement" match_grammar = Sequence("LISTEN", Ref("SingleIdentifierGrammar")) class NotifyStatementSegment(BaseSegment): """A `NOTIFY` statement. As Specified in https://www.postgresql.org/docs/14/sql-notify.html """ type = "notify_statement" match_grammar = Sequence( "NOTIFY", Ref("SingleIdentifierGrammar"), Sequence( Ref("CommaSegment"), Ref("QuotedLiteralSegment"), optional=True, ), ) class UnlistenStatementSegment(BaseSegment): """A `UNLISTEN` statement. As Specified in https://www.postgresql.org/docs/14/sql-unlisten.html """ type = "unlisten_statement" match_grammar = Sequence( "UNLISTEN", OneOf( Ref("SingleIdentifierGrammar"), Ref("StarSegment"), ), ) class TruncateStatementSegment(ansi.TruncateStatementSegment): """`TRUNCATE TABLE` statement. https://www.postgresql.org/docs/14/sql-truncate.html """ match_grammar = Sequence( "TRUNCATE", Ref.keyword("TABLE", optional=True), Delimited( OneOf( Sequence( Ref.keyword("ONLY", optional=True), Ref("TableReferenceSegment"), ), Sequence( Ref("TableReferenceSegment"), Ref("StarSegment", optional=True), ), ), ), Sequence( OneOf("RESTART", "CONTINUE"), "IDENTITY", optional=True, ), Ref( "DropBehaviorGrammar", optional=True, ), ) class CopyStatementSegment(BaseSegment): """A `COPY` statement. As Specified in https://www.postgresql.org/docs/14/sql-copy.html """ type = "copy_statement" _target_subset = OneOf( Ref("QuotedLiteralSegment"), Sequence("PROGRAM", Ref("QuotedLiteralSegment")) ) _table_definition = Sequence( Ref("TableReferenceSegment"), Bracketed(Delimited(Ref("ColumnReferenceSegment")), optional=True), ) _option = Sequence( Ref.keyword("WITH", optional=True), Bracketed( Delimited( AnySetOf( Sequence("FORMAT", Ref("SingleIdentifierGrammar")), Sequence("FREEZE", Ref("BooleanLiteralGrammar", optional=True)), Sequence("DELIMITER", Ref("QuotedLiteralSegment")), Sequence("NULL", Ref("QuotedLiteralSegment")), Sequence("HEADER", Ref("BooleanLiteralGrammar", optional=True)), Sequence("QUOTE", Ref("QuotedLiteralSegment")), Sequence("ESCAPE", Ref("QuotedLiteralSegment")), Sequence( "FORCE_QUOTE", OneOf( Bracketed(Delimited(Ref("ColumnReferenceSegment"))), Ref("StarSegment"), ), ), Sequence( "FORCE_NOT_NULL", Bracketed(Delimited(Ref("ColumnReferenceSegment"))), ), Sequence( "FORCE_NULL", Bracketed(Delimited(Ref("ColumnReferenceSegment"))), ), Sequence("ENCODING", Ref("QuotedLiteralSegment")), ) ) ), optional=True, ) _postgres9_compatible_stdin_options = Sequence( Ref.keyword("WITH", optional=True), AnySetOf( Sequence("BINARY"), Sequence( "DELIMITER", Ref.keyword("AS", optional=True), Ref("QuotedLiteralSegment"), ), Sequence( "NULL", Ref.keyword("AS", optional=True), Ref("QuotedLiteralSegment") ), Sequence( "CSV", OneOf( "HEADER", Sequence( "QUOTE", Ref.keyword("AS", optional=True), Ref("QuotedLiteralSegment"), ), Sequence( "ESCAPE", Ref.keyword("AS", optional=True), Ref("QuotedLiteralSegment"), ), Sequence( "FORCE", "NOT", "NULL", Delimited(Ref("ColumnReferenceSegment")), ), optional=True, ), ), optional=True, ), optional=True, ) _postgres9_compatible_stdout_options = Sequence( Ref.keyword("WITH", optional=True), AnySetOf( Sequence("BINARY"), Sequence( "DELIMITER", Ref.keyword("AS", optional=True), Ref("QuotedLiteralSegment"), ), Sequence( "NULL", Ref.keyword("AS", optional=True), Ref("QuotedLiteralSegment") ), Sequence( "CSV", OneOf( "HEADER", Sequence( "QUOTE", Ref.keyword("AS", optional=True), Ref("QuotedLiteralSegment"), ), Sequence( "ESCAPE", Ref.keyword("AS", optional=True), Ref("QuotedLiteralSegment"), ), Sequence( "FORCE", "QUOTE", OneOf( Bracketed(Delimited(Ref("ColumnReferenceSegment"))), Ref("StarSegment"), ), ), optional=True, ), ), optional=True, ), optional=True, ) match_grammar = Sequence( "COPY", OneOf( Sequence( _table_definition, "FROM", OneOf( _target_subset, Sequence("STDIN"), ), _option, Sequence("WHERE", Ref("ExpressionSegment"), optional=True), ), Sequence( _table_definition, "FROM", OneOf( Ref("QuotedLiteralSegment"), Sequence("STDIN"), ), _postgres9_compatible_stdin_options, ), Sequence( OneOf( _table_definition, Bracketed(Ref("UnorderedSelectStatementSegment")) ), "TO", OneOf( _target_subset, Sequence("STDOUT"), ), _option, ), Sequence( OneOf( _table_definition, Bracketed(Ref("UnorderedSelectStatementSegment")) ), "TO", OneOf( Ref("QuotedLiteralSegment"), Sequence("STDOUT"), ), _postgres9_compatible_stdout_options, ), ), ) class LanguageClauseSegment(BaseSegment): """Clause specifying language used for executing anonymous code blocks.""" type = "language_clause" match_grammar = Sequence( "LANGUAGE", OneOf(Ref("NakedIdentifierSegment"), Ref("SingleQuotedIdentifierSegment")), ) class DoStatementSegment(BaseSegment): """A `DO` statement for executing anonymous code blocks. As specified in https://www.postgresql.org/docs/14/sql-do.html """ type = "do_statement" match_grammar = Sequence( "DO", OneOf( Sequence( Ref("LanguageClauseSegment", optional=True), Ref("QuotedLiteralSegment"), ), Sequence( Ref("QuotedLiteralSegment"), Ref("LanguageClauseSegment", optional=True), ), ), ) class CTEDefinitionSegment(ansi.CTEDefinitionSegment): """A CTE Definition from a WITH statement. https://www.postgresql.org/docs/14/queries-with.html TODO: Data-Modifying Statements (INSERT, UPDATE, DELETE) in WITH """ match_grammar = Sequence( Ref("SingleIdentifierGrammar"), Ref("CTEColumnList", optional=True), "AS", Sequence(Ref.keyword("NOT", optional=True), "MATERIALIZED", optional=True), Bracketed( Ref("SelectableGrammar"), parse_mode=ParseMode.GREEDY, ), OneOf( Sequence( "SEARCH", OneOf( "BREADTH", "DEPTH", ), "FIRST", "BY", Ref("ColumnReferenceSegment"), "SET", Ref("ColumnReferenceSegment"), ), Sequence( "CYCLE", Ref("ColumnReferenceSegment"), "SET", Ref("ColumnReferenceSegment"), "USING", Ref("ColumnReferenceSegment"), ), optional=True, ), ) class ValuesClauseSegment(ansi.ValuesClauseSegment): """A `VALUES` clause within in `WITH` or `SELECT`.""" match_grammar = Sequence( "VALUES", Delimited( Bracketed( Delimited( Ref("ExpressionSegment"), # DEFAULT keyword used in # INSERT INTO statement. "DEFAULT", ), parse_mode=ParseMode.GREEDY, ), ), Ref("AliasExpressionSegment", optional=True), Ref("OrderByClauseSegment", optional=True), Ref("LimitClauseSegment", optional=True), ) class DeleteStatementSegment(ansi.DeleteStatementSegment): """A `DELETE` statement. https://www.postgresql.org/docs/14/sql-delete.html """ match_grammar = Sequence( "DELETE", "FROM", Ref.keyword("ONLY", optional=True), Ref("TableReferenceSegment"), Ref("StarSegment", optional=True), Ref("AliasExpressionSegment", optional=True), Sequence( "USING", Indent, Delimited( Sequence( Ref("TableExpressionSegment"), Ref("AliasExpressionSegment", optional=True), ), ), Dedent, optional=True, ), Ref("JoinClauseSegment", optional=True), OneOf( Sequence("WHERE", "CURRENT", "OF", Ref("ObjectReferenceSegment")), Ref("WhereClauseSegment"), optional=True, ), Sequence( "RETURNING", Indent, OneOf( Ref("StarSegment"), Delimited( Sequence( Ref("ExpressionSegment"), Ref("AliasExpressionSegment", optional=True), ), ), ), Dedent, optional=True, ), ) class SetClauseSegment(BaseSegment): """SQL 1992 set clause. ::= ::= | | DEFAULT ::= """ type = "set_clause" match_grammar: Matchable = Sequence( OneOf( Sequence( Ref("ColumnReferenceSegment"), Ref("ArrayAccessorSegment", optional=True), Ref("EqualsSegment"), OneOf( Ref("LiteralGrammar"), Ref("BareFunctionSegment"), Ref("FunctionSegment"), Ref("ColumnReferenceSegment"), Ref("ExpressionSegment"), "DEFAULT", ), AnyNumberOf(Ref("ShorthandCastSegment")), ), Sequence( Bracketed( Delimited( Ref("ColumnReferenceSegment"), ), ), Ref("EqualsSegment"), Bracketed( OneOf( # Potentially a bracketed SELECT Ref("SelectableGrammar"), # Or a delimited list of literals Delimited( Sequence( OneOf( Ref("LiteralGrammar"), Ref("BareFunctionSegment"), Ref("FunctionSegment"), Ref("ColumnReferenceSegment"), Ref("ExpressionSegment"), "DEFAULT", ), AnyNumberOf(Ref("ShorthandCastSegment")), ), ), ), ), ), ), ) class UpdateStatementSegment(BaseSegment): """An `Update` statement. https://www.postgresql.org/docs/current/sql-update.html """ type = "update_statement" match_grammar: Matchable = Sequence( # TODO add [ WITH [ RECURSIVE ] with_query [, ...] ] "UPDATE", Ref.keyword("ONLY", optional=True), Indent, Ref("TableReferenceSegment"), # SET is not a reserved word in all dialects (e.g. RedShift) # So specifically exclude as an allowed implicit alias to avoid parsing errors Ref("AliasExpressionSegment", exclude=Ref.keyword("SET"), optional=True), Dedent, Ref("SetClauseListSegment"), Ref("FromClauseSegment", optional=True), OneOf( Sequence("WHERE", "CURRENT", "OF", Ref("ObjectReferenceSegment")), Ref("WhereClauseSegment"), optional=True, ), Sequence( "RETURNING", Indent, OneOf( Ref("StarSegment"), Delimited( Sequence( Ref("ExpressionSegment"), Ref("AliasExpressionSegment", optional=True), ), ), ), Dedent, optional=True, ), ) class CreateTypeStatementSegment(BaseSegment): """A `CREATE TYPE` statement. https://www.postgresql.org/docs/current/sql-createtype.html """ type = "create_type_statement" match_grammar: Matchable = Sequence( "CREATE", "TYPE", Ref("ObjectReferenceSegment"), Sequence("AS", OneOf("ENUM", "RANGE", optional=True), optional=True), Bracketed(Delimited(Anything(), optional=True), optional=True), ) class AlterTypeStatementSegment(BaseSegment): """An `ALTER TYPE` statement. https://www.postgresql.org/docs/current/sql-altertype.html """ type = "alter_type_statement" match_grammar: Matchable = Sequence( "ALTER", "TYPE", Ref("ObjectReferenceSegment"), OneOf( Sequence( "OWNER", "TO", OneOf( "CURRENT_USER", "SESSION_USER", "CURRENT_ROLE", Ref("ObjectReferenceSegment"), ), ), Sequence( "RENAME", "VALUE", Ref("QuotedLiteralSegment"), "TO", Ref("QuotedLiteralSegment"), ), Sequence( "RENAME", "TO", Ref("ObjectReferenceSegment"), ), Sequence( "SET", "SCHEMA", Ref("SchemaReferenceSegment"), ), Delimited( Sequence( "ADD", "ATTRIBUTE", Ref("ColumnReferenceSegment"), Ref("DatatypeSegment"), Sequence( "COLLATE", Ref("CollationReferenceSegment"), optional=True, ), Ref("CascadeRestrictGrammar", optional=True), ), Sequence( "ALTER", "ATTRIBUTE", Ref("ColumnReferenceSegment"), Sequence("SET", "DATA", optional=True), "TYPE", Ref("DatatypeSegment"), Sequence( "COLLATE", Ref("CollationReferenceSegment"), optional=True, ), Ref("CascadeRestrictGrammar", optional=True), ), Sequence( "DROP", "ATTRIBUTE", Ref("IfExistsGrammar", optional=True), Ref("ColumnReferenceSegment"), Ref("CascadeRestrictGrammar", optional=True), ), Sequence( "RENAME", "ATTRIBUTE", Ref("ColumnReferenceSegment"), "TO", Ref("ColumnReferenceSegment"), Ref("CascadeRestrictGrammar", optional=True), ), ), Sequence( "ADD", "VALUE", Ref("IfNotExistsGrammar", optional=True), Ref("QuotedLiteralSegment"), Sequence( OneOf("BEFORE", "AFTER"), Ref("QuotedLiteralSegment"), optional=True ), ), ), ) class CreateCollationStatementSegment(BaseSegment): """A `CREATE COLLATION` statement. https://www.postgresql.org/docs/current/sql-createcollation.html """ type = "create_collation_statement" match_grammar: Matchable = Sequence( "CREATE", "COLLATION", Ref("IfNotExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), OneOf( Bracketed( Delimited( Sequence( "LOCALE", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "LC_COLLATE", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "LC_CTYPE", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "PROVIDER", Ref("EqualsSegment"), OneOf("ICU", "LIBC"), ), Sequence( "DETERMINISTIC", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "VERSION", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), ) ), Sequence( "FROM", Ref("ObjectReferenceSegment"), ), ), ) class AlterSchemaStatementSegment(BaseSegment): """An `ALTER SCHEMA` statement. https://www.postgresql.org/docs/current/sql-alterschema.html """ type = "alter_schema_statement" match_grammar = Sequence( "ALTER", "SCHEMA", Ref("SchemaReferenceSegment"), OneOf( Sequence( "RENAME", "TO", Ref("SchemaReferenceSegment"), ), Sequence( "OWNER", "TO", Ref("RoleReferenceSegment"), ), ), ) class LockTableStatementSegment(BaseSegment): """An `LOCK TABLE` statement. https://www.postgresql.org/docs/14/sql-lock.html """ type = "lock_table_statement" match_grammar: Matchable = Sequence( "LOCK", Ref.keyword("TABLE", optional=True), Ref.keyword("ONLY", optional=True), OneOf( Delimited( Ref("TableReferenceSegment"), ), Ref("StarSegment"), ), Sequence( "IN", OneOf( Sequence("ACCESS", "SHARE"), Sequence("ROW", "SHARE"), Sequence("ROW", "EXCLUSIVE"), Sequence("SHARE", "UPDATE", "EXCLUSIVE"), "SHARE", Sequence("SHARE", "ROW", "EXCLUSIVE"), "EXCLUSIVE", Sequence("ACCESS", "EXCLUSIVE"), ), "MODE", optional=True, ), Ref.keyword("NOWAIT", optional=True), ) class ClusterStatementSegment(BaseSegment): """A `CLUSTER` statement. https://www.postgresql.org/docs/current/sql-cluster.html """ type = "cluster_statement" match_grammar = Sequence( "CLUSTER", Ref.keyword("VERBOSE", optional=True), OneOf( Sequence( Ref("TableReferenceSegment"), Sequence("USING", Ref("IndexReferenceSegment"), optional=True), ), Sequence(Ref("IndexReferenceSegment"), "ON", Ref("TableReferenceSegment")), optional=True, ), ) class ColumnReferenceSegment(ansi.ObjectReferenceSegment): """A reference to column, field or alias. We override this for Postgres to allow keywords in fully qualified column names (using Full segments), similar to how this is done in BigQuery. """ type = "column_reference" match_grammar: Matchable = Sequence( Ref("SingleIdentifierGrammar"), Sequence( OneOf(Ref("DotSegment"), Sequence(Ref("DotSegment"), Ref("DotSegment"))), Delimited( Ref("SingleIdentifierFullGrammar"), delimiter=OneOf( Ref("DotSegment"), Sequence(Ref("DotSegment"), Ref("DotSegment")) ), terminators=[ "ON", "AS", "USING", Ref("CommaSegment"), Ref("CastOperatorSegment"), Ref("StartSquareBracketSegment"), Ref("StartBracketSegment"), Ref("BinaryOperatorGrammar"), Ref("ColonSegment"), Ref("DelimiterGrammar"), Ref("JoinLikeClauseGrammar"), BracketedSegment, ], allow_gaps=False, ), allow_gaps=False, optional=True, ), allow_gaps=False, ) class NamedArgumentSegment(BaseSegment): """Named argument to a function. https://www.postgresql.org/docs/current/sql-syntax-calling-funcs.html#SQL-SYNTAX-CALLING-FUNCS-NAMED """ type = "named_argument" match_grammar = Sequence( Ref("NakedIdentifierSegment"), OneOf(Ref("RightArrowSegment"), Ref("WalrusOperatorSegment")), Ref("ExpressionSegment"), ) class TableExpressionSegment(ansi.TableExpressionSegment): """The main table expression e.g. within a FROM clause. Override from ANSI to allow optional WITH ORDINALITY clause """ match_grammar: Matchable = OneOf( Ref("ValuesClauseSegment"), Ref("BareFunctionSegment"), Sequence( Ref("FunctionSegment"), Sequence("WITH", "ORDINALITY", optional=True), ), Ref("TableReferenceSegment"), # Nested Selects Bracketed(Ref("SelectableGrammar")), Bracketed(Ref("MergeStatementSegment")), ) class ServerReferenceSegment(ansi.ObjectReferenceSegment): """A reference to a server.""" type = "server_reference" class CreateServerStatementSegment(BaseSegment): """Create server statement. https://www.postgresql.org/docs/15/sql-createserver.html """ type = "create_server_statement" match_grammar: Matchable = Sequence( "CREATE", "SERVER", Ref("IfNotExistsGrammar", optional=True), Ref("ServerReferenceSegment"), Sequence("TYPE", Ref("QuotedLiteralSegment"), optional=True), Sequence("VERSION", Ref("VersionIdentifierSegment"), optional=True), Ref("ForeignDataWrapperGrammar"), Ref("ObjectReferenceSegment"), Ref("OptionsGrammar", optional=True), ) class CreateUserMappingStatementSegment(BaseSegment): """Create user mapping statement. https://www.postgresql.org/docs/15/sql-createusermapping.html """ type = "create_user_mapping_statement" match_grammar: Matchable = Sequence( Ref("CreateUserMappingGrammar"), Ref("IfNotExistsGrammar", optional=True), "FOR", OneOf( Ref("SingleIdentifierGrammar"), Ref("SessionInformationUserFunctionsGrammar"), "PUBLIC", ), "SERVER", Ref("ServerReferenceSegment"), Ref("OptionsGrammar", optional=True), ) class ImportForeignSchemaStatementSegment(BaseSegment): """Import foreign schema statement. https://www.postgresql.org/docs/15/sql-importforeignschema.html """ type = "import_foreign_schema_statement" match_grammar: Matchable = Sequence( Ref("ImportForeignSchemaGrammar"), Ref("SchemaReferenceSegment"), Sequence( OneOf(Sequence("LIMIT", "TO"), "EXCEPT"), Bracketed(Delimited(Ref("NakedIdentifierFullSegment"))), optional=True, ), "FROM", "SERVER", Ref("ServerReferenceSegment"), "INTO", Ref("SchemaReferenceSegment"), Ref("OptionsGrammar", optional=True), ) class CreateForeignTableStatementSegment(BaseSegment): """Create foreign table statement. https://www.postgresql.org/docs/current/sql-createforeigntable.html """ type = "create_foreign_table_statement" match_grammar: Matchable = OneOf( Sequence( Ref("CreateForeignTableGrammar"), Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), Bracketed( Delimited( OneOf( Sequence( Ref("ColumnReferenceSegment"), Ref("DatatypeSegment"), Ref("OptionsGrammar", optional=True), Sequence( "COLLATE", Ref("CollationReferenceSegment"), optional=True, ), AnyNumberOf(Ref("ForeignTableColumnConstraintSegment")), ), Ref("ForeignTableTableConstraintSegment"), ), ), optional=True, ), Sequence( "INHERITS", Bracketed(Delimited(Ref("TableReferenceSegment"))), optional=True, ), Sequence( "SERVER", Ref("ServerReferenceSegment"), ), Ref("OptionsGrammar", optional=True), ), Sequence( Ref("CreateForeignTableGrammar"), Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), Sequence( "PARTITION", "OF", Ref("TableReferenceSegment"), Bracketed( Delimited( OneOf( Sequence( Ref("ColumnReferenceSegment"), Sequence("WITH", "OPTIONS", optional=True), AnyNumberOf(Ref("ForeignTableColumnConstraintSegment")), ), Ref("ForeignTableTableConstraintSegment"), ) ), optional=True, ), OneOf( Sequence("FOR", "VALUES", Ref("PartitionBoundSpecSegment")), "DEFAULT", ), ), Sequence( "SERVER", Ref("ServerReferenceSegment"), ), Ref("OptionsGrammar", optional=True), ), ) class OverlapsClauseSegment(ansi.OverlapsClauseSegment): """An `OVERLAPS` clause. https://www.postgresql.org/docs/current/functions-datetime.html """ match_grammar: Matchable = Sequence( OneOf( Sequence( Bracketed( OneOf( Ref("ColumnReferenceSegment"), Ref("DateTimeLiteralGrammar"), Ref("ShorthandCastSegment"), Ref("FunctionContentsExpressionGrammar"), ), Ref("CommaSegment"), OneOf( Ref("ColumnReferenceSegment"), Ref("DateTimeLiteralGrammar"), Ref("ShorthandCastSegment"), Ref("FunctionContentsExpressionGrammar"), ), ) ), Ref("ColumnReferenceSegment"), ), "OVERLAPS", OneOf( Sequence( Bracketed( OneOf( Ref("ColumnReferenceSegment"), Ref("DateTimeLiteralGrammar"), Ref("ShorthandCastSegment"), Ref("FunctionContentsExpressionGrammar"), ), Ref("CommaSegment"), OneOf( Ref("ColumnReferenceSegment"), Ref("DateTimeLiteralGrammar"), Ref("ShorthandCastSegment"), Ref("FunctionContentsExpressionGrammar"), ), ) ), Ref("ColumnReferenceSegment"), ), ) class ShowStatementSegment(BaseSegment): """A SHOW Statement. As per https://www.postgresql.org/docs/16/sql-show.html """ type = "show_statement" match_grammar = Sequence( "SHOW", OneOf( "ALL", "IS_SUPERUSER", "LC_COLLATE", "LC_CTYPE", "SERVER_ENCODING", "SERVER_VERSION", Ref("ParameterNameSegment"), ), ) class MetaCommandQueryBufferStatement(BaseSegment): """A statement that uses meta-commands to change query buffer (e.g. gset and gexec). https://www.postgresql.org/docs/current/app-psql.html#APP-PSQL-META-COMMAND-GEXEC """ type = "meta_command_statement" match_grammar = Sequence( AnyNumberOf( Sequence( Ref("SelectStatementSegment"), Ref("MetaCommandQueryBufferSegment", optional=True), ) ) ) class DropForeignTableStatement(BaseSegment): """A `DROP FOREIGN TABLE` Statement. https://www.postgresql.org/docs/current/sql-dropforeigntable.html """ type = "drop_foreign_table_statement" match_grammar = Sequence( "DROP", "FOREIGN", "TABLE", Ref("IfExistsGrammar", optional=True), Delimited( Ref("TableReferenceSegment"), ), Ref("CascadeRestrictGrammar", optional=True), ) class ColumnTypeReferenceSegment(BaseSegment): """A column type reference segment (e.g. `table_name.column_name%type`). https://www.postgresql.org/docs/current/sql-createfunction.html """ type = "column_type_reference" match_grammar = Sequence( Ref("ColumnReferenceSegment"), Ref("ModuloSegment"), "TYPE" ) class CreateOperatorStatementSegment(BaseSegment): """A `CREATE OPERATOR` statement. As specified in https://www.postgresql.org/docs/17/sql-createoperator.html """ type = "create_operator_statement" match_grammar = Sequence( "CREATE", "OPERATOR", AnyNumberOf( RegexParser(r"^[+\-*/<>=~!@#%^&|`?]+$", SymbolSegment, "commutator"), ), Bracketed( Delimited( Sequence( OneOf("LEFTARG", "RIGHTARG"), Ref("EqualsSegment"), Ref("ObjectReferenceSegment"), optional=True, ), Sequence( "COMMUTATOR", Ref("EqualsSegment"), AnyNumberOf( RegexParser( r"^[+\-*/<>=~!@#%^&|`?]+$", SymbolSegment, "commutator" ), ), optional=True, ), Sequence( "NEGATOR", Ref("EqualsSegment"), AnyNumberOf( RegexParser( r"^[+\-*/<>=~!@#%^&|`?]+$", SymbolSegment, "negator" ), ), optional=True, ), Sequence( OneOf("RESTRICT", "JOIN", OneOf("PROCEDURE", "FUNCTION")), Ref("EqualsSegment"), Ref("FunctionNameSegment"), optional=True, ), Ref.keyword("HASHES", optional=True), Ref.keyword("MERGES", optional=True), ) ), ) class AlterForeignTableStatementSegment(BaseSegment): """An `ALTER TABLE` statement. https://www.postgresql.org/docs/17/sql-alterforeigntable.html """ type = "alter_foreign_table_statement" match_grammar = Sequence( "ALTER", "FOREIGN", "TABLE", Sequence( Ref("IfExistsGrammar", optional=True), Ref.keyword("ONLY", optional=True), Ref("TableReferenceSegment"), Ref("StarSegment", optional=True), OneOf( Delimited(Ref("AlterForeignTableActionSegment")), Sequence( "RENAME", Ref.keyword("COLUMN", optional=True), Ref("ColumnReferenceSegment"), "TO", Ref("ColumnReferenceSegment"), ), ), ), ) class AlterForeignTableActionSegment(AlterTableActionSegment): """Alter Foreign Table Action Segment. https://www.postgresql.org/docs/17/sql-alterforeigntable.html """ type = "alter_foreign_table_action_segment" match_grammar = AlterTableActionSegment.match_grammar.copy( insert=[ Sequence( Sequence( "ALTER", Ref("COLUMN", optional=True), Ref("ColumnReferenceSegment"), optional=True, ), "OPTIONS", Bracketed( Delimited( Sequence( OneOf("ADD", "SET", "DROP", optional=True), Ref("SingleIdentifierGrammar"), Ref("QuotedLiteralSegment", optional=True), ) ) ), ) ] ) class PrepareStatementSegment(BaseSegment): """A `PREPARE` statement. https://www.postgresql.org/docs/current/sql-prepare.html """ type = "prepare_statement" match_grammar = Sequence( "PREPARE", Ref("ObjectReferenceSegment"), Bracketed(Delimited(Ref("DatatypeSegment")), optional=True), "AS", OneOf( Ref("SelectableGrammar"), Ref("MergeStatementSegment"), ), ) class ExecuteStatementSegment(BaseSegment): """A `EXECUTE` statement. https://www.postgresql.org/docs/current/sql-execute.html """ type = "execute_statement" match_grammar = Sequence( "EXECUTE", Ref("ObjectReferenceSegment"), Bracketed(Delimited(Ref("ExpressionSegment")), optional=True), ) class DeallocateStatementSegment(BaseSegment): """A `DEALLOCATE` statement. https://www.postgresql.org/docs/current/sql-deallocate.html """ type = "deallocate_statement" match_grammar = Sequence( "DEALLOCATE", Ref.keyword("PREPARE", optional=True), OneOf( Ref("ObjectReferenceSegment"), "ALL", ), ) class TypedArrayLiteralSegment(ansi.TypedArrayLiteralSegment): """An array literal segment.""" type = "typed_array_literal" match_grammar = ansi.TypedArrayLiteralSegment.match_grammar.copy( insert=[ Sequence( Ref.keyword("VARIADIC"), Sequence( Ref("NakedIdentifierSegment"), Ref("WalrusOperatorSegment"), optional=True, ), optional=True, ) ], before=Ref("ArrayTypeSegment"), ) class SetSessionAuthorizationStatementSegment(BaseSegment): """A `SET SESSION AUTHORIZATION` statement. https://www.postgresql.org/docs/current/sql-set-session-authorization.html """ type = "set_session_authorization_statement" match_grammar = Sequence( "SET", OneOf( Sequence(Ref.keyword("LOCAL", optional=True), "SESSION"), Sequence(Ref.keyword("SESSION", optional=True), "SESSION"), ), "AUTHORIZATION", OneOf(Ref("RoleReferenceSegment"), "DEFAULT"), ) class ResetSessionAuthorizationStatementSegment(BaseSegment): """A `RESET SESSION AUTHORIZATION` statement. https://www.postgresql.org/docs/current/sql-set-session-authorization.html """ type = "reset_session_authorization_statement" match_grammar = Sequence("RESET", "SESSION", "AUTHORIZATION") sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_postgres_keywords.py000066400000000000000000001114211503426445100254030ustar00rootroot00000000000000"""Keywords in the Postgres Dialect. Most of the keywords come from https://www.postgresql.org/docs/13/sql-keywords-appendix.html Here, "not-keyword" refers to a word not being a keyword, and will be removed from any default keyword definition, these keywords are, or have been, an ANSI keyword. There are also some keywords that are(n't) supported as types and function, but there isn't support for that distinction at present. """ def priority_keyword_merge(*args: list[tuple[str, str]]) -> list[tuple[str, str]]: """Merge keyword lists, giving priority to entries in later lists. *args is a list of keyword lists, these lists should be of tuples in the form (keyword, type) """ keyword_lists = [*args] base_list = [] if len(keyword_lists) == 1: return keyword_lists[0] while len(keyword_lists) > 1: base_list, priority_list = keyword_lists[0], keyword_lists[1] keyword_set = {x[0] for x in base_list} for item in priority_list: if item[0] in keyword_set: for index, keyword in enumerate(base_list): if keyword[0] == item[0]: base_list.pop(index) break base_list.append(item) keyword_lists.pop(1) return base_list def get_keywords(keyword_list: list[tuple[str, str]], keyword_type: str) -> list[str]: """Get a list of keywords of the required type. keyword_type should be one of "not-keyword", "reserved", "non-reserved" """ keywords = [x[0] for x in keyword_list if x[1].startswith(keyword_type)] return keywords postgres_docs_keywords = [ ("A", "not-keyword"), ("ABORT", "non-reserved"), ("ABS", "not-keyword"), ("ABSENT", "not-keyword"), ("ABSOLUTE", "non-reserved"), ("ACCESS", "non-reserved"), ("ACCORDING", "not-keyword"), ("ACOS", "not-keyword"), ("ACTION", "non-reserved"), ("ADA", "not-keyword"), ("ADD", "non-reserved"), ("ADMIN", "non-reserved"), ("AFTER", "non-reserved"), ("AGGREGATE", "non-reserved"), ("ALL", "reserved"), ("ALLOCATE", "not-keyword"), ("ALSO", "non-reserved"), ("ALTER", "non-reserved"), ("ALWAYS", "non-reserved"), ("ANALYSE", "reserved"), ("ANALYZE", "reserved"), ("AND", "reserved"), ("ANY", "reserved"), ("ARE", "not-keyword"), ("ARRAY", "reserved"), ("ARRAY_AGG", "not-keyword"), ("ARRAY_MAX_CARDINALITY", "not-keyword"), ("AS", "reserved"), ("ASC", "reserved"), ("ASENSITIVE", "not-keyword"), ("ASIN", "not-keyword"), ("ASSERTION", "non-reserved"), ("ASSIGNMENT", "non-reserved"), ("ASYMMETRIC", "reserved"), ("AT", "non-reserved"), ("ATAN", "not-keyword"), ("ATOMIC", "non-reserved"), ("ATTACH", "non-reserved"), ("ATTRIBUTE", "non-reserved"), ("ATTRIBUTES", "not-keyword"), ("AUTHORIZATION", "reserved-(can-be-function-or-type)"), ("AVG", "not-keyword"), ("BACKWARD", "non-reserved"), ("BASE64", "not-keyword"), ("BEFORE", "non-reserved"), ("BEGIN", "non-reserved"), ("BEGIN_FRAME", "not-keyword"), ("BEGIN_PARTITION", "not-keyword"), ("BERNOULLI", "non-reserved"), ("BETWEEN", "non-reserved-(cannot-be-function-or-type)"), ("BIGINT", "non-reserved-(cannot-be-function-or-type)"), ("BIGSERIAL", "non-reserved-(cannot-be-function-or-type)"), ("BINARY", "reserved-(can-be-function-or-type)"), ("BIT", "non-reserved-(cannot-be-function-or-type)"), ("BIT_LENGTH", "not-keyword"), ("BLOB", "not-keyword"), ("BLOCKED", "not-keyword"), ("BOM", "not-keyword"), ("BOOLEAN", "non-reserved-(cannot-be-function-or-type)"), ("BOOL", "non-reserved-(cannot-be-function-or-type)"), ("BOTH", "reserved"), ("BOX", "non-reserved-(cannot-be-function-or-type)"), ("BPCHAR", "non-reserved-(cannot-be-function-or-type)"), ("BREADTH", "not-keyword"), ("BY", "non-reserved"), ("BYTEA", "non-reserved-(cannot-be-function-or-type)"), ("C", "not-keyword"), ("CACHE", "non-reserved"), ("CALL", "non-reserved"), ("CALLED", "non-reserved"), ("CARDINALITY", "not-keyword"), ("CASCADE", "non-reserved"), ("CASCADED", "non-reserved"), ("CASE", "reserved"), ("CAST", "reserved"), ("CATALOG", "non-reserved"), ("CATALOG_NAME", "not-keyword"), ("CEIL", "not-keyword"), ("CEILING", "not-keyword"), ("CHAIN", "non-reserved"), ("CHAINING", "not-keyword"), ("CHAR", "non-reserved-(cannot-be-function-or-type)"), ("CHARACTER", "non-reserved-(cannot-be-function-or-type)"), ("CHARACTERISTICS", "non-reserved"), ("CHARACTERS", "not-keyword"), ("CHARACTER_LENGTH", "not-keyword"), ("CHARACTER_SET_CATALOG", "not-keyword"), ("CHARACTER_SET_NAME", "not-keyword"), ("CHARACTER_SET_SCHEMA", "not-keyword"), ("CHAR_LENGTH", "not-keyword"), ("CHECK", "reserved"), ("CHECKPOINT", "non-reserved"), ("CIDR", "non-reserved-(cannot-be-function-or-type)"), ("CIRCLE", "non-reserved-(cannot-be-function-or-type)"), ("CLASS", "non-reserved"), ("CLASSIFIER", "not-keyword"), ("CLASS_ORIGIN", "not-keyword"), ("CLOB", "not-keyword"), ("CLOSE", "non-reserved"), ("CLUSTER", "non-reserved"), ("COALESCE", "non-reserved-(cannot-be-function-or-type)"), ("COBOL", "not-keyword"), ("COLLATE", "reserved"), ("COLLATION", "non-reserved"), ("COLLATION_CATALOG", "not-keyword"), ("COLLATION_NAME", "not-keyword"), ("COLLATION_SCHEMA", "not-keyword"), ("COLLECT", "not-keyword"), ("COLUMN", "reserved"), ("COLUMNS", "non-reserved"), ("COLUMN_NAME", "not-keyword"), ("COMMAND_FUNCTION", "not-keyword"), ("COMMAND_FUNCTION_CODE", "not-keyword"), ("COMMENT", "non-reserved"), ("COMMENTS", "non-reserved"), ("COMMIT", "non-reserved"), ("COMMITTED", "non-reserved"), ("COMMUTATOR", "reserved-(can-be-function-or-type)"), ("COMPRESSION", "non-reserved"), ("CONCURRENTLY", "reserved-(can-be-function-or-type)"), ("CONDITION", "not-keyword"), ("CONDITIONAL", "not-keyword"), ("CONDITION_NUMBER", "not-keyword"), ("CONFIGURATION", "non-reserved"), ("CONFLICT", "non-reserved"), ("CONNECT", "not-keyword"), ("CONNECTION", "non-reserved"), ("CONNECTION_NAME", "not-keyword"), ("CONSTRAINT", "reserved"), ("CONSTRAINTS", "non-reserved"), ("CONSTRAINT_CATALOG", "not-keyword"), ("CONSTRAINT_NAME", "not-keyword"), ("CONSTRAINT_SCHEMA", "not-keyword"), ("CONSTRUCTOR", "not-keyword"), ("CONTAINS", "not-keyword"), ("CONTENT", "non-reserved"), ("CONTINUE", "non-reserved"), ("CONTROL", "not-keyword"), ("CONVERSION", "non-reserved"), ("CONVERT", "not-keyword"), ("COPY", "non-reserved"), ("CORR", "not-keyword"), ("CORRESPONDING", "not-keyword"), ("COS", "not-keyword"), ("COSH", "not-keyword"), ("COST", "non-reserved"), ("COUNT", "not-keyword"), ("COVAR_POP", "not-keyword"), ("COVAR_SAMP", "not-keyword"), ("CREATE", "reserved"), ("CROSS", "reserved-(can-be-function-or-type)"), ("CSV", "non-reserved"), ("CUBE", "non-reserved"), ("CUME_DIST", "not-keyword"), ("CURRENT", "non-reserved"), ("CURRENT_CATALOG", "reserved"), ("CURRENT_DATE", "reserved"), ("CURRENT_DEFAULT_TRANSFORM_GROUP", "not-keyword"), ("CURRENT_PATH", "not-keyword"), ("CURRENT_ROLE", "reserved"), ("CURRENT_ROW", "not-keyword"), ("CURRENT_SCHEMA", "reserved-(can-be-function-or-type)"), ("CURRENT_TIME", "reserved"), ("CURRENT_TIMESTAMP", "reserved"), ("CURRENT_TRANSFORM_GROUP_FOR_TYPE", "not-keyword"), ("CURRENT_USER", "reserved"), ("CURSOR", "non-reserved"), ("CURSOR_NAME", "not-keyword"), ("CYCLE", "non-reserved"), ("DATA", "non-reserved"), ("DATABASE", "non-reserved"), ("DATALINK", "not-keyword"), ("DATE", "not-keyword"), ("DATERANGE", "non-reserved-(cannot-be-function-or-type)"), ("DATETIME_INTERVAL_CODE", "not-keyword"), ("DATETIME_INTERVAL_PRECISION", "not-keyword"), ("DAY", "non-reserved"), ("DB", "not-keyword"), ("DEALLOCATE", "non-reserved"), ("DEC", "non-reserved-(cannot-be-function-or-type)"), ("DECFLOAT", "not-keyword"), ("DECIMAL", "non-reserved-(cannot-be-function-or-type)"), ("DECLARE", "non-reserved"), ("DEFAULT", "reserved"), ("DEFAULTS", "non-reserved"), ("DEFERRABLE", "reserved"), ("DEFERRED", "non-reserved"), ("DEFINE", "not-keyword"), ("DEFINED", "not-keyword"), ("DEFINER", "non-reserved"), ("DEGREE", "not-keyword"), ("DELETE", "non-reserved"), ("DELIMITER", "non-reserved"), ("DELIMITERS", "non-reserved"), ("DENSE_RANK", "not-keyword"), ("DEPENDS", "non-reserved"), ("DEPTH", "not-keyword"), ("DEREF", "not-keyword"), ("DERIVED", "not-keyword"), ("DESC", "reserved"), ("DESCRIBE", "not-keyword"), ("DESCRIPTOR", "not-keyword"), ("DETACH", "non-reserved"), ("DETERMINISTIC", "not-keyword"), ("DIAGNOSTICS", "not-keyword"), ("DICTIONARY", "non-reserved"), ("DISABLE", "non-reserved"), ("DISCARD", "non-reserved"), ("DISCONNECT", "not-keyword"), ("DISPATCH", "not-keyword"), ("DISTINCT", "reserved"), ("DLNEWCOPY", "not-keyword"), ("DLPREVIOUSCOPY", "not-keyword"), ("DLURLCOMPLETE", "not-keyword"), ("DLURLCOMPLETEONLY", "not-keyword"), ("DLURLCOMPLETEWRITE", "not-keyword"), ("DLURLPATH", "not-keyword"), ("DLURLPATHONLY", "not-keyword"), ("DLURLPATHWRITE", "not-keyword"), ("DLURLSCHEME", "not-keyword"), ("DLURLSERVER", "not-keyword"), ("DLVALUE", "not-keyword"), ("DO", "reserved"), ("DOCUMENT", "non-reserved"), ("DOMAIN", "non-reserved"), ("DOUBLE", "non-reserved"), ("DROP", "non-reserved"), ("DYNAMIC", "not-keyword"), ("DYNAMIC_FUNCTION", "not-keyword"), ("DYNAMIC_FUNCTION_CODE", "not-keyword"), ("EACH", "non-reserved"), ("ELEMENT", "not-keyword"), ("ELSE", "reserved"), ("EMPTY", "not-keyword"), ("ENABLE", "non-reserved"), ("ENCODING", "non-reserved"), ("ENCRYPTED", "non-reserved"), ("END", "reserved"), ("END-EXEC", "not-keyword"), ("END_FRAME", "not-keyword"), ("END_PARTITION", "not-keyword"), ("ENFORCED", "not-keyword"), ("ENUM", "non-reserved"), ("EQUALS", "not-keyword"), ("ERROR", "not-keyword"), ("ESCAPE", "non-reserved"), ("EVENT", "non-reserved"), ("EVERY", "not-keyword"), ("EXCEPT", "reserved"), ("EXCEPTION", "not-keyword"), ("EXCLUDE", "non-reserved"), ("EXCLUDING", "non-reserved"), ("EXCLUSIVE", "non-reserved"), ("EXEC", "not-keyword"), ("EXECUTE", "non-reserved"), ("EXISTS", "non-reserved-(cannot-be-function-or-type)"), ("EXP", "not-keyword"), ("EXPLAIN", "non-reserved"), ("EXPRESSION", "non-reserved"), ("EXTENSION", "non-reserved"), ("EXTERNAL", "non-reserved"), ("EXTRACT", "non-reserved-(cannot-be-function-or-type)"), ("FALSE", "reserved"), ("FAMILY", "non-reserved"), ("FETCH", "reserved"), ("FILE", "not-keyword"), ("FILTER", "non-reserved"), ("FINAL", "not-keyword"), ("FINALIZE", "non-reserved"), ("FINISH", "not-keyword"), ("FIRST", "non-reserved"), ("FIRST_VALUE", "not-keyword"), ("FLAG", "not-keyword"), ("FLOAT", "non-reserved-(cannot-be-function-or-type)"), ("FLOOR", "not-keyword"), ("FOLLOWING", "non-reserved"), ("FOR", "reserved"), ("FORCE", "non-reserved"), ("FOREIGN", "reserved"), ("FORMAT", "not-keyword"), ("FORTRAN", "not-keyword"), ("FORWARD", "non-reserved"), ("FOUND", "not-keyword"), ("FRAME_ROW", "not-keyword"), ("FREE", "not-keyword"), ("FREEZE", "reserved-(can-be-function-or-type)"), ("FROM", "reserved"), ("FS", "not-keyword"), ("FULFILL", "not-keyword"), ("FULL", "reserved-(can-be-function-or-type)"), ("FUNCTION", "non-reserved"), ("FUNCTIONS", "non-reserved"), ("FUSION", "not-keyword"), ("G", "not-keyword"), ("GENERAL", "not-keyword"), ("GENERATED", "non-reserved"), ("GET", "not-keyword"), ("GLOBAL", "non-reserved"), ("GO", "not-keyword"), ("GOTO", "not-keyword"), ("GRANT", "reserved"), ("GRANTED", "non-reserved"), ("GREATEST", "non-reserved-(cannot-be-function-or-type)"), ("GROUP", "reserved"), ("GROUPING", "non-reserved-(cannot-be-function-or-type)"), ("GROUPS", "non-reserved"), ("HANDLER", "non-reserved"), ("HASHES", "reserved-(can-be-function-or-type)"), ("HAVING", "reserved"), ("HEADER", "non-reserved"), ("HEX", "not-keyword"), ("HIERARCHY", "not-keyword"), ("HOLD", "non-reserved"), ("HOUR", "non-reserved"), ("ID", "not-keyword"), ("IDENTITY", "non-reserved"), ("IF", "non-reserved"), ("IGNORE", "not-keyword"), ("ILIKE", "reserved-(can-be-function-or-type)"), ("IMMEDIATE", "non-reserved"), ("IMMEDIATELY", "not-keyword"), ("IMMUTABLE", "non-reserved"), ("IMPLEMENTATION", "not-keyword"), ("IMPLICIT", "non-reserved"), ("IMPORT", "non-reserved"), ("IN", "reserved"), ("INCLUDE", "non-reserved"), ("INCLUDING", "non-reserved"), ("INCREMENT", "non-reserved"), ("INDENT", "not-keyword"), ("INDEX", "non-reserved"), ("INDEXES", "non-reserved"), ("INET", "non-reserved-(cannot-be-function-or-type)"), ("INDICATOR", "not-keyword"), ("INHERIT", "non-reserved"), ("INHERITS", "non-reserved"), ("INITIAL", "not-keyword"), ("INITIALLY", "reserved"), ("INLINE", "non-reserved"), ("INNER", "reserved-(can-be-function-or-type)"), ("INOUT", "non-reserved-(cannot-be-function-or-type)"), ("INPUT", "non-reserved"), ("INSENSITIVE", "non-reserved"), ("INSERT", "non-reserved"), ("INSTANCE", "not-keyword"), ("INSTANTIABLE", "not-keyword"), ("INSTEAD", "non-reserved"), ("INT", "non-reserved-(cannot-be-function-or-type)"), ("INT2", "non-reserved-(cannot-be-function-or-type)"), ("INT4", "non-reserved-(cannot-be-function-or-type)"), ("INT4RANGE", "non-reserved-(cannot-be-function-or-type)"), ("INT8", "non-reserved-(cannot-be-function-or-type)"), ("INT8RANGE", "non-reserved-(cannot-be-function-or-type)"), ("INTEGER", "non-reserved-(cannot-be-function-or-type)"), ("INTEGRITY", "not-keyword"), ("INTERSECT", "reserved"), ("INTERSECTION", "not-keyword"), ("INTERVAL", "non-reserved-(cannot-be-function-or-type)"), ("INTO", "reserved"), ("INVOKER", "non-reserved"), ("IS", "reserved-(can-be-function-or-type)"), ("ISNULL", "reserved-(can-be-function-or-type)"), ("IS_SUPERUSER", "non-reserved"), ("ISOLATION", "non-reserved"), ("JOIN", "reserved-(can-be-function-or-type)"), ("JSON", "not-keyword"), ("JSON_ARRAY", "not-keyword"), ("JSON_ARRAYAGG", "not-keyword"), ("JSON_EXISTS", "not-keyword"), ("JSON_OBJECT", "not-keyword"), ("JSON_OBJECTAGG", "not-keyword"), ("JSON_QUERY", "not-keyword"), ("JSON_TABLE", "not-keyword"), ("JSON_TABLE_PRIMITIVE", "not-keyword"), ("JSON_VALUE", "not-keyword"), ("JSONB", "non-reserved-(cannot-be-function-or-type)"), ("K", "not-keyword"), ("KEEP", "not-keyword"), ("KEY", "non-reserved"), ("KEYS", "not-keyword"), ("KEY_MEMBER", "not-keyword"), ("KEY_TYPE", "not-keyword"), ("LABEL", "non-reserved"), ("LAG", "not-keyword"), ("LANGUAGE", "non-reserved"), ("LARGE", "non-reserved"), ("LAST", "non-reserved"), ("LAST_VALUE", "not-keyword"), ("LATERAL", "reserved"), ("LC_COLLATE", "non-reserved"), ("LC_CTYPE", "non-reserved"), ("LEAD", "not-keyword"), ("LEADING", "reserved"), ("LEAKPROOF", "non-reserved"), ("LEAST", "non-reserved-(cannot-be-function-or-type)"), ("LEFT", "reserved-(can-be-function-or-type)"), ("LEFTARG", "non-reserved-(cannot-be-function-or-type)"), ("LENGTH", "not-keyword"), ("LEVEL", "non-reserved"), ("LIBRARY", "not-keyword"), ("LIKE", "reserved-(can-be-function-or-type)"), ("LIKE_REGEX", "not-keyword"), ("LIMIT", "reserved"), ("LINE", "non-reserved-(cannot-be-function-or-type)"), ("LINK", "not-keyword"), ("LISTAGG", "not-keyword"), ("LISTEN", "non-reserved"), ("LN", "not-keyword"), ("LOAD", "non-reserved"), ("LOCAL", "non-reserved"), ("LOCALTIME", "reserved"), ("LOCALTIMESTAMP", "reserved"), ("LOCATION", "non-reserved"), ("LOCATOR", "not-keyword"), ("LOCK", "non-reserved"), ("LOCKED", "non-reserved"), ("LOG", "not-keyword"), ("LOG10", "not-keyword"), ("LOGGED", "non-reserved"), ("LOWER", "not-keyword"), ("LSEG", "non-reserved-(cannot-be-function-or-type)"), ("M", "not-keyword"), ("MACADDR", "non-reserved-(cannot-be-function-or-type)"), ("MACADDR8", "non-reserved-(cannot-be-function-or-type)"), ("MAP", "not-keyword"), ("MAPPING", "non-reserved"), ("MATCH", "non-reserved"), ("MATCHED", "non-reserved"), ("MATCHES", "not-keyword"), ("MATCH_NUMBER", "not-keyword"), ("MATCH_RECOGNIZE", "not-keyword"), ("MATERIALIZED", "non-reserved"), ("MAX", "not-keyword"), ("MAXVALUE", "non-reserved"), ("MEASURES", "not-keyword"), ("MEMBER", "not-keyword"), ("MERGE", "non-reserved"), ("MERGES", "reserved-(can-be-function-or-type)"), ("MESSAGE_LENGTH", "not-keyword"), ("MESSAGE_OCTET_LENGTH", "not-keyword"), ("MESSAGE_TEXT", "not-keyword"), ("METHOD", "non-reserved"), ("MIN", "not-keyword"), ("MINUTE", "non-reserved"), ("MINVALUE", "non-reserved"), ("MOD", "not-keyword"), ("MODE", "non-reserved"), ("MODIFIES", "not-keyword"), ("MODULE", "not-keyword"), ("MONEY", "non-reserved-(cannot-be-function-or-type)"), ("MONTH", "non-reserved"), ("MORE", "not-keyword"), ("MOVE", "non-reserved"), ("MULTISET", "not-keyword"), ("MUMPS", "not-keyword"), ("NAME", "non-reserved"), ("NAMES", "non-reserved"), ("NAMESPACE", "not-keyword"), ("NATIONAL", "non-reserved-(cannot-be-function-or-type)"), ("NATURAL", "reserved-(can-be-function-or-type)"), ("NCHAR", "non-reserved-(cannot-be-function-or-type)"), ("NCLOB", "not-keyword"), ("NEGATOR", "reserved-(can-be-function-or-type)"), ("NESTED", "not-keyword"), ("NESTING", "not-keyword"), ("NEW", "non-reserved"), ("NEXT", "non-reserved"), ("NFC", "non-reserved"), ("NFD", "non-reserved"), ("NFKC", "non-reserved"), ("NFKD", "non-reserved"), ("NIL", "not-keyword"), ("NO", "non-reserved"), ("NONE", "non-reserved-(cannot-be-function-or-type)"), ("NORMALIZE", "non-reserved-(cannot-be-function-or-type)"), ("NORMALIZED", "non-reserved"), ("NOT", "reserved"), ("NOTHING", "non-reserved"), ("NOTIFY", "non-reserved"), ("NOTNULL", "reserved-(can-be-function-or-type)"), ("NOWAIT", "non-reserved"), ("NTH_VALUE", "not-keyword"), ("NTILE", "not-keyword"), ("NULL", "reserved"), ("NULLABLE", "not-keyword"), ("NULLIF", "non-reserved-(cannot-be-function-or-type)"), ("NULLS", "non-reserved"), ("NUMBER", "not-keyword"), ("NUMERIC", "non-reserved-(cannot-be-function-or-type)"), ("NUMRANGE", "non-reserved-(cannot-be-function-or-type)"), ("OBJECT", "non-reserved"), ("OCCURRENCES_REGEX", "not-keyword"), ("OCTETS", "not-keyword"), ("OCTET_LENGTH", "not-keyword"), ("OF", "non-reserved"), ("OFF", "non-reserved"), ("OFFSET", "reserved"), ("OIDS", "non-reserved"), ("OLD", "non-reserved"), ("OMIT", "not-keyword"), ("ON", "reserved"), ("ONE", "not-keyword"), ("ONLY", "reserved"), ("OPEN", "not-keyword"), ("OPERATOR", "non-reserved"), ("OPTION", "non-reserved"), ("OPTIONS", "non-reserved"), ("OR", "reserved"), ("ORDER", "reserved"), ("ORDERING", "not-keyword"), ("ORDINALITY", "non-reserved"), ("OTHERS", "non-reserved"), ("OUT", "non-reserved-(cannot-be-function-or-type)"), ("OUTER", "reserved-(can-be-function-or-type)"), ("OUTPUT", "not-keyword"), ("OVER", "non-reserved"), ("OVERFLOW", "not-keyword"), ("OVERLAPS", "reserved-(can-be-function-or-type)"), ("OVERLAY", "non-reserved-(cannot-be-function-or-type)"), ("OVERRIDING", "non-reserved"), ("OWNED", "non-reserved"), ("OWNER", "non-reserved"), ("P", "not-keyword"), ("PAD", "not-keyword"), ("PARALLEL", "non-reserved"), ("PARAMETER", "not-keyword"), ("PARAMETER_MODE", "not-keyword"), ("PARAMETER_NAME", "not-keyword"), ("PARAMETER_ORDINAL_POSITION", "not-keyword"), ("PARAMETER_SPECIFIC_CATALOG", "not-keyword"), ("PARAMETER_SPECIFIC_NAME", "not-keyword"), ("PARAMETER_SPECIFIC_SCHEMA", "not-keyword"), ("PARSER", "non-reserved"), ("PARTIAL", "non-reserved"), ("PARTITION", "non-reserved"), ("PASCAL", "not-keyword"), ("PASS", "not-keyword"), ("PASSING", "non-reserved"), ("PASSTHROUGH", "not-keyword"), ("PASSWORD", "non-reserved"), ("PAST", "not-keyword"), ("PATH", "non-reserved-(cannot-be-function-or-type)"), ("PATTERN", "not-keyword"), ("PER", "not-keyword"), ("PERCENT", "not-keyword"), ("PERCENTILE_CONT", "not-keyword"), ("PERCENTILE_DISC", "not-keyword"), ("PERCENT_RANK", "not-keyword"), ("PERIOD", "not-keyword"), ("PERMISSION", "not-keyword"), ("PERMISSIVE", "non-reserved"), ("PERMUTE", "not-keyword"), ("PG_LSN", "non-reserved-(cannot-be-function-or-type)"), ("PLACING", "reserved"), ("PLAN", "not-keyword"), ("PLANS", "non-reserved"), ("PLI", "not-keyword"), ("POINT", "non-reserved-(cannot-be-function-or-type)"), ("POLICY", "non-reserved"), ("POLYGON", "non-reserved-(cannot-be-function-or-type)"), ("PORTION", "not-keyword"), ("POSITION", "non-reserved-(cannot-be-function-or-type)"), ("POSITION_REGEX", "not-keyword"), ("POWER", "not-keyword"), ("PRECEDES", "not-keyword"), ("PRECEDING", "non-reserved"), ("PRECISION", "non-reserved-(cannot-be-function-or-type)"), ("PREPARE", "non-reserved"), ("PREPARED", "non-reserved"), ("PRESERVE", "non-reserved"), ("PRIMARY", "reserved"), ("PRIOR", "non-reserved"), ("PRIVATE", "not-keyword"), ("PRIVILEGES", "non-reserved"), ("PROCEDURAL", "non-reserved"), ("PROCEDURE", "non-reserved"), ("PROCEDURES", "non-reserved"), ("PROGRAM", "non-reserved"), ("PRUNE", "not-keyword"), ("PTF", "not-keyword"), ("PUBLIC", "not-keyword"), ("PUBLICATION", "non-reserved"), ("QUOTE", "non-reserved"), ("QUOTES", "not-keyword"), ("RANGE", "non-reserved"), ("RANK", "not-keyword"), ("READ", "non-reserved"), ("READS", "not-keyword"), ("REAL", "non-reserved-(cannot-be-function-or-type)"), ("REASSIGN", "non-reserved"), ("RECHECK", "non-reserved"), ("RECOVERY", "not-keyword"), ("RECURSIVE", "non-reserved"), ("REF", "non-reserved"), ("REFERENCES", "reserved"), ("REFERENCING", "non-reserved"), ("REFRESH", "non-reserved"), ("REGR_AVGX", "not-keyword"), ("REGR_AVGY", "not-keyword"), ("REGR_COUNT", "not-keyword"), ("REGR_INTERCEPT", "not-keyword"), ("REGR_R2", "not-keyword"), ("REGR_SLOPE", "not-keyword"), ("REGR_SXX", "not-keyword"), ("REGR_SXY", "not-keyword"), ("REGR_SYY", "not-keyword"), ("REINDEX", "non-reserved"), ("RELATIVE", "non-reserved"), ("RELEASE", "non-reserved"), ("RENAME", "non-reserved"), ("REPEATABLE", "non-reserved"), ("REPLACE", "non-reserved"), ("REPLICA", "non-reserved"), ("REQUIRING", "not-keyword"), ("RESET", "non-reserved"), ("RESPECT", "not-keyword"), ("RESTART", "non-reserved"), ("RESTORE", "not-keyword"), ("RESTRICT", "non-reserved"), ("RESTRICTIVE", "non-reserved"), ("RESULT", "not-keyword"), ("RETRIEVE", "non-reserved"), ("RETURN", "non-reserved"), ("RETURNED_CARDINALITY", "not-keyword"), ("RETURNED_LENGTH", "not-keyword"), ("RETURNED_OCTET_LENGTH", "not-keyword"), ("RETURNED_SQLSTATE", "not-keyword"), ("RETURNING", "reserved"), ("RETURNS", "non-reserved"), ("REVOKE", "non-reserved"), ("RIGHT", "reserved-(can-be-function-or-type)"), ("RIGHTARG", "reserved-(can-be-function-or-type)"), ("ROLE", "non-reserved"), ("ROLLBACK", "non-reserved"), ("ROLLUP", "non-reserved"), ("ROUTINE", "non-reserved"), ("ROUTINES", "non-reserved"), ("ROUTINE_CATALOG", "not-keyword"), ("ROUTINE_NAME", "not-keyword"), ("ROUTINE_SCHEMA", "not-keyword"), ("ROW", "non-reserved-(cannot-be-function-or-type)"), ("ROWS", "non-reserved"), ("ROW_COUNT", "not-keyword"), ("ROW_NUMBER", "not-keyword"), ("RULE", "non-reserved"), ("RUNNING", "not-keyword"), ("SAVEPOINT", "non-reserved"), ("SCALAR", "not-keyword"), ("SCALE", "not-keyword"), ("SCHEMA", "non-reserved"), ("SCHEMAS", "non-reserved"), ("SCHEMA_NAME", "not-keyword"), ("SCOPE", "not-keyword"), ("SCOPE_CATALOG", "not-keyword"), ("SCOPE_NAME", "not-keyword"), ("SCOPE_SCHEMA", "not-keyword"), ("SCROLL", "non-reserved"), ("SEARCH", "non-reserved"), ("SECOND", "non-reserved"), ("SECTION", "not-keyword"), ("SECURITY", "non-reserved"), ("SEEK", "not-keyword"), ("SELECT", "reserved"), ("SELECTIVE", "not-keyword"), ("SELF", "not-keyword"), ("SENSITIVE", "not-keyword"), ("SEQUENCE", "non-reserved"), ("SEQUENCES", "non-reserved"), ("SERIAL", "non-reserved-(cannot-be-function-or-type)"), ("SERIAL2", "non-reserved-(cannot-be-function-or-type)"), ("SERIAL4", "non-reserved-(cannot-be-function-or-type)"), ("SERIAL8", "non-reserved-(cannot-be-function-or-type)"), ("SERIALIZABLE", "non-reserved"), ("SERVER", "non-reserved"), ("SERVER_ENCODING", "non-reserved"), ("SERVER_NAME", "not-keyword"), ("SERVER_VERSION", "non-reserved"), ("SESSION", "non-reserved"), ("SESSION_USER", "reserved"), ("SET", "non-reserved"), ("SETOF", "non-reserved-(cannot-be-function-or-type)"), ("SETS", "non-reserved"), ("SHARE", "non-reserved"), ("SHOW", "non-reserved"), ("SIMILAR", "reserved-(can-be-function-or-type)"), ("SIMPLE", "non-reserved"), ("SIN", "not-keyword"), ("SINH", "not-keyword"), ("SIZE", "not-keyword"), ("SKIP", "non-reserved"), ("SMALLINT", "non-reserved-(cannot-be-function-or-type)"), ("SMALLSERIAL", "non-reserved-(cannot-be-function-or-type)"), ("SNAPSHOT", "non-reserved"), ("SOME", "reserved"), ("SOURCE", "not-keyword"), ("SPACE", "not-keyword"), ("SPECIFIC", "not-keyword"), ("SPECIFICTYPE", "not-keyword"), ("SPECIFIC_NAME", "not-keyword"), ("SQL", "non-reserved"), ("SQLCODE", "not-keyword"), ("SQLERROR", "not-keyword"), ("SQLEXCEPTION", "not-keyword"), ("SQLSTATE", "not-keyword"), ("SQLWARNING", "not-keyword"), ("SQRT", "not-keyword"), ("STABLE", "non-reserved"), ("STANDALONE", "non-reserved"), ("START", "non-reserved"), ("STATE", "not-keyword"), ("STATEMENT", "non-reserved"), ("STATIC", "not-keyword"), ("STATISTICS", "non-reserved"), ("STDDEV_POP", "not-keyword"), ("STDDEV_SAMP", "not-keyword"), ("STDIN", "non-reserved"), ("STDOUT", "non-reserved"), ("STORAGE", "non-reserved"), ("STORED", "non-reserved"), ("STRICT", "non-reserved"), ("STRING", "not-keyword"), ("STRIP", "non-reserved"), ("STRUCTURE", "not-keyword"), ("STYLE", "not-keyword"), ("SUBCLASS_ORIGIN", "not-keyword"), ("SUBMULTISET", "not-keyword"), ("SUBSCRIPTION", "non-reserved"), ("SUBSET", "not-keyword"), ("SUBSTRING", "non-reserved-(cannot-be-function-or-type)"), ("SUBSTRING_REGEX", "not-keyword"), ("SUCCEEDS", "not-keyword"), ("SUM", "not-keyword"), ("SUPPORT", "non-reserved"), ("SYMMETRIC", "reserved"), ("SYSID", "non-reserved"), ("SYSTEM", "non-reserved"), ("SYSTEM_TIME", "not-keyword"), ("SYSTEM_USER", "not-keyword"), ("T", "not-keyword"), ("TABLE", "non-reserved"), ("TABLES", "non-reserved"), ("TABLESAMPLE", "reserved-(can-be-function-or-type)"), ("TABLESPACE", "non-reserved"), ("TABLE_NAME", "not-keyword"), ("TAN", "not-keyword"), ("TANH", "not-keyword"), ("TEMP", "non-reserved"), ("TEMPLATE", "non-reserved"), ("TEMPORARY", "non-reserved"), ("TEXT", "non-reserved"), ("THEN", "reserved"), ("THROUGH", "not-keyword"), ("TIES", "non-reserved"), ("TIME", "non-reserved-(cannot-be-function-or-type)"), ("TIMESTAMP", "non-reserved-(cannot-be-function-or-type)"), ("TIMEZONE_HOUR", "not-keyword"), ("TIMEZONE_MINUTE", "not-keyword"), ("TO", "reserved"), ("TOKEN", "not-keyword"), ("TOP_LEVEL_COUNT", "not-keyword"), ("TRAILING", "reserved"), ("TRANSACTION", "non-reserved"), ("TRANSACTIONS_COMMITTED", "not-keyword"), ("TRANSACTIONS_ROLLED_BACK", "not-keyword"), ("TRANSACTION_ACTIVE", "not-keyword"), ("TRANSFORM", "non-reserved"), ("TRANSFORMS", "not-keyword"), ("TRANSLATE", "not-keyword"), ("TRANSLATE_REGEX", "not-keyword"), ("TRANSLATION", "not-keyword"), ("TREAT", "non-reserved-(cannot-be-function-or-type)"), ("TRIGGER", "non-reserved"), ("TRIGGER_CATALOG", "not-keyword"), ("TRIGGER_NAME", "not-keyword"), ("TRIGGER_SCHEMA", "not-keyword"), ("TRIM", "non-reserved-(cannot-be-function-or-type)"), ("TRIM_ARRAY", "not-keyword"), ("TRUE", "reserved"), ("TRUNCATE", "non-reserved"), ("TRUSTED", "non-reserved"), ("TSQUERY", "non-reserved-(cannot-be-function-or-type)"), ("TSRANGE", "non-reserved-(cannot-be-function-or-type)"), ("TSTZRANGE", "non-reserved-(cannot-be-function-or-type)"), ("TSVECTOR", "non-reserved-(cannot-be-function-or-type)"), ("TYPE", "non-reserved"), ("TYPES", "non-reserved"), ("UESCAPE", "non-reserved"), ("UNBOUNDED", "non-reserved"), ("UNCOMMITTED", "non-reserved"), ("UNCONDITIONAL", "not-keyword"), ("UNDER", "not-keyword"), ("UNENCRYPTED", "non-reserved"), ("UNION", "reserved"), ("UNIQUE", "reserved"), ("UNKNOWN", "non-reserved"), ("UNLINK", "not-keyword"), ("UNLISTEN", "non-reserved"), ("UNLOGGED", "non-reserved"), ("UNMATCHED", "not-keyword"), ("UNNAMED", "not-keyword"), ("UNNEST", "not-keyword"), ("UNTIL", "non-reserved"), ("UNTYPED", "not-keyword"), ("UPDATE", "non-reserved"), ("UPPER", "not-keyword"), ("URI", "not-keyword"), ("USAGE", "not-keyword"), ("USER", "non-reserved"), ("USER_DEFINED_TYPE_CATALOG", "not-keyword"), ("USER_DEFINED_TYPE_CODE", "not-keyword"), ("USER_DEFINED_TYPE_NAME", "not-keyword"), ("USER_DEFINED_TYPE_SCHEMA", "not-keyword"), ("USING", "reserved"), ("UTF16", "not-keyword"), ("UTF32", "not-keyword"), ("UTF8", "not-keyword"), ("UUID", "non-reserved-(cannot-be-function-or-type)"), ("VACUUM", "non-reserved"), ("VALID", "non-reserved"), ("VALIDATE", "non-reserved"), ("VALIDATOR", "non-reserved"), ("VALUE", "non-reserved"), ("VALUES", "non-reserved-(cannot-be-function-or-type)"), ("VALUE_OF", "not-keyword"), ("VARBINARY", "not-keyword"), ("VARCHAR", "non-reserved-(cannot-be-function-or-type)"), ("VARIADIC", "reserved"), ("VARYING", "non-reserved"), ("VAR_POP", "not-keyword"), ("VAR_SAMP", "not-keyword"), ("VERBOSE", "reserved-(can-be-function-or-type)"), ("VERSION", "non-reserved"), ("VERSIONING", "not-keyword"), ("VIEW", "non-reserved"), ("VIEWS", "non-reserved"), ("VOLATILE", "non-reserved"), ("WHEN", "reserved"), ("WHENEVER", "not-keyword"), ("WHERE", "reserved"), ("WHITESPACE", "non-reserved"), ("WIDTH_BUCKET", "not-keyword"), ("WINDOW", "reserved"), ("WITH", "reserved"), ("WITHIN", "non-reserved"), ("WITHOUT", "non-reserved"), ("WORK", "non-reserved"), ("WRAPPER", "non-reserved"), ("WRITE", "non-reserved"), ("XML", "non-reserved"), ("XMLAGG", "not-keyword"), ("XMLATTRIBUTES", "non-reserved-(cannot-be-function-or-type)"), ("XMLBINARY", "not-keyword"), ("XMLCAST", "not-keyword"), ("XMLCOMMENT", "not-keyword"), ("XMLCONCAT", "non-reserved-(cannot-be-function-or-type)"), ("XMLDECLARATION", "not-keyword"), ("XMLDOCUMENT", "not-keyword"), ("XMLELEMENT", "non-reserved-(cannot-be-function-or-type)"), ("XMLEXISTS", "non-reserved-(cannot-be-function-or-type)"), ("XMLFOREST", "non-reserved-(cannot-be-function-or-type)"), ("XMLITERATE", "not-keyword"), ("XMLNAMESPACES", "non-reserved-(cannot-be-function-or-type)"), ("XMLPARSE", "non-reserved-(cannot-be-function-or-type)"), ("XMLPI", "non-reserved-(cannot-be-function-or-type)"), ("XMLQUERY", "not-keyword"), ("XMLROOT", "non-reserved-(cannot-be-function-or-type)"), ("XMLSCHEMA", "not-keyword"), ("XMLSERIALIZE", "non-reserved-(cannot-be-function-or-type)"), ("XMLTABLE", "non-reserved-(cannot-be-function-or-type)"), ("XMLTEXT", "not-keyword"), ("XMLVALIDATE", "not-keyword"), ("YEAR", "non-reserved"), ("YES", "non-reserved"), ("ZONE", "non-reserved"), ] postgres_nondocs_keywords = [ ("ALLOW_CONNECTIONS", "non-reserved"), ("BREADTH", "non-reserved"), ("BUFFERS", "non-reserved"), ("BYPASSRLS", "non-reserved"), ("CONNECT", "reserved"), ("COSTS", "non-reserved"), ("CURRENT_USER", "non-reserved"), ("CREATEDB", "non-reserved"), ("CREATEROLE", "non-reserved"), ("DATE", "non-reserved"), ("DEPENDENCIES", "non-reserved"), ("DEPTH", "non-reserved"), ("DESCRIBE", "non-reserved"), ("DETERMINISTIC", "non-reserved"), ("DISABLE_PAGE_SKIPPING", "non-reserved"), ("EXECUTION", "not-keyword"), ("EXTENDED", "non-reserved"), ("FILE", "non-reserved"), ("FORCE_NOT_NULL", "non-reserved"), ("FORCE_NULL", "non-reserved"), ("FORCE_QUOTE", "non-reserved"), ("FORMAT", "non-reserved"), ("HASH", "non-reserved"), ("ICU", "non-reserved"), ("IGNORE", "non-reserved"), ("INDEX_CLEANUP", "non-reserved"), ("IS_TEMPLATE", "non-reserved"), ("JSON", "non-reserved"), ("KEYS", "non-reserved"), ("LC_COLLATE", "non-reserved"), ("LC_CTYPE", "non-reserved"), ("LIBC", "non-reserved"), ("LIST", "non-reserved"), ("LOGIN", "non-reserved"), ("LOCALE", "non-reserved"), ("MAIN", "non-reserved"), ("MCV", "non-reserved"), ("MODULUS", "non-reserved"), ("NDISTINCT", "non-reserved"), ("NOBYPASSRLS", "non-reserved"), ("NOCREATEDB", "non-reserved"), ("NOCREATEROLE", "non-reserved"), ("NOINHERIT", "non-reserved"), ("NOLOGIN", "non-reserved"), ("NOREPLICATION", "non-reserved"), ("NOSUPERUSER", "non-reserved"), ("PLAIN", "non-reserved"), ("PROCESS_TOAST", "non-reserved"), ("PROVIDER", "non-reserved"), ("PUBLIC", "non-reserved"), ("REMAINDER", "non-reserved"), ("REPLICATION", "non-reserved"), ("RESPECT", "non-reserved"), ("RESTRICTED", "non-reserved"), ("SAFE", "non-reserved"), ("SCALAR", "non-reserved"), ("SETTINGS", "non-reserved"), ("SKIP_LOCKED", "non-reserved"), ("SUMMARY", "non-reserved"), ("SUPERUSER", "non-reserved"), ("TIMETZ", "non-reserved"), ("TIMESTAMPTZ", "non-reserved"), ("TIMING", "non-reserved"), ("UNSAFE", "non-reserved"), ("USAGE", "non-reserved"), ("WAL", "non-reserved"), ] postgres_postgis_datatype_keywords = [ ("POINT", "non-reserved"), ("LINESTRING", "non-reserved"), ("POLYGON", "non-reserved"), ("MULTIPOINT", "non-reserved"), ("MULTILINESTRING", "non-reserved"), ("MULTIPOLYGON", "non-reserved"), ("GEOMETRYCOLLECTION", "non-reserved"), ("POINTZ", "non-reserved"), ("LINESTRINGZ", "non-reserved"), ("POLYGONZ", "non-reserved"), ("MULTIPOINTZ", "non-reserved"), ("MULTILINESTRINGZ", "non-reserved"), ("MULTIPOLYGONZ", "non-reserved"), ("GEOMETRYCOLLECTIONZ", "non-reserved"), ("POINTM", "non-reserved"), ("LINESTRINGM", "non-reserved"), ("POLYGONM", "non-reserved"), ("MULTIPOINTM", "non-reserved"), ("MULTILINESTRINGM", "non-reserved"), ("MULTIPOLYGONM", "non-reserved"), ("GEOMETRYCOLLECTIONM", "non-reserved"), ("POINTZM", "non-reserved"), ("LINESTRINGZM", "non-reserved"), ("POLYGONZM", "non-reserved"), ("MULTIPOINTZM", "non-reserved"), ("MULTILINESTRINGZM", "non-reserved"), ("MULTIPOLYGONZM", "non-reserved"), ("GEOMETRYCOLLECTIONZM", "non-reserved"), ("CIRCULARSTRING", "non-reserved"), ("COMPOUNDCURVE", "non-reserved"), ("CURVEPOLYGON", "non-reserved"), ("MULTICURVE", "non-reserved"), ("MULTISURFACE", "non-reserved"), ("POLYHEDRALSURFACE", "non-reserved"), ("TRIANGLE", "non-reserved"), ("TIN", "non-reserved"), ] postgres_postgis_other_keywords = [ ("GEOMETRY", "non-reserved"), ("GEOGRAPHY", "non-reserved"), ("EMPTY", "non-reserved"), ] postgres_pgvector_keywords = [ ("VECTOR", "non-reserved"), ] postgres_keywords = priority_keyword_merge( postgres_docs_keywords, postgres_nondocs_keywords, postgres_postgis_datatype_keywords, postgres_postgis_other_keywords, postgres_pgvector_keywords, ) sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_redshift.py000066400000000000000000002265551503426445100234350ustar00rootroot00000000000000"""The Amazon Redshift dialect. This is based on postgres dialect, since it was initially based off of Postgres 8. We should monitor in future and see if it should be rebased off of ANSI """ from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( AnyNumberOf, AnySetOf, Anything, BaseSegment, Bracketed, Dedent, Delimited, IdentifierSegment, ImplicitIndent, Indent, LiteralKeywordSegment, Matchable, Nothing, OneOf, OptionallyBracketed, Ref, RegexLexer, RegexParser, SegmentGenerator, Sequence, StringParser, WordSegment, ) from sqlfluff.dialects import dialect_ansi as ansi from sqlfluff.dialects import dialect_postgres as postgres from sqlfluff.dialects.dialect_redshift_keywords import ( redshift_reserved_keywords, redshift_unreserved_keywords, ) postgres_dialect = load_raw_dialect("postgres") ansi_dialect = load_raw_dialect("ansi") redshift_dialect = postgres_dialect.copy_as( "redshift", formatted_name="AWS Redshift", docstring="""**Default Casing**: ``lowercase`` (unless configured to be case sensitive with all identifiers using the :code:`enable_case_sensitive_identifier` configuration value, see the `Redshift Names & Identifiers Docs`_). **Quotes**: String Literals: ``''``, Identifiers: ``""``. The dialect for `Redshift`_ on Amazon Web Services (AWS). .. _`Redshift`: https://aws.amazon.com/redshift/ .. _`Redshift Names & Identifiers Docs`: https://docs.aws.amazon.com/redshift/latest/dg/r_names.html """, # noqa: E501 ) # Set Keywords redshift_dialect.sets("unreserved_keywords").clear() redshift_dialect.update_keywords_set_from_multiline_string( "unreserved_keywords", redshift_unreserved_keywords ) redshift_dialect.sets("reserved_keywords").clear() redshift_dialect.update_keywords_set_from_multiline_string( "reserved_keywords", redshift_reserved_keywords ) redshift_dialect.sets("bare_functions").clear() redshift_dialect.sets("bare_functions").update( [ "current_date", "sysdate", "current_time", "current_timestamp", "user", "current_user", "current_aws_account", "current_namespace", "current_user_id", ] ) redshift_dialect.sets("date_part_function_name").update( ["DATEADD", "DATEDIFF", "EXTRACT", "DATE_PART"] ) # Add datetime units # https://docs.aws.amazon.com/redshift/latest/dg/r_Dateparts_for_datetime_functions.html redshift_dialect.sets("datetime_units").update( [ # millennium "MILLENNIUM", "MILLENNIA", "MIL", "MILS", # century "CENTURY", "CENTURIES", "C", "CENT", "CENTS", # decade "DECADE", "DECADES", "DEC", "DECS", # epoch "EPOCH", # year "YEAR", "YEARS", "Y", "YR", "YRS", # quarter "QUARTER", "QUARTERS", "QTR", "QTRS", # month "MONTH", "MONTHS", "MON", "MONS", # week "WEEK", "WEEKS", "W", # day of week "DAYOFWEEK", "DOW", "DW", "WEEKDAY", # day of year "DAYOFYEAR", "DOY", "DY", "YEARDAY", # day "DAY", "DAYS", "D", # hour "HOUR", "HOURS", "H", "HR", "HRS", # minute "MINUTE", "MINUTES", "M", "MIN", "MINS", # second "SECOND", "SECONDS", "S", "SEC", "SECS", # millisec "MILLISECOND", "MILLISECONDS", "MS", "MSEC", "MSECS", "MSECOND", "MSECONDS", "MILLISEC", "MILLISECS", "MILLISECON", # microsec "MICROSECOND", "MICROSECONDS", "MICROSEC", "MICROSECS", "MICROSECOND", "USECOND", "USECONDS", "US", "USEC", "USECS", # timezone "TIMEZONE", "TIMEZONE_HOUR", "TIMEZONE_MINUTE", ] ) redshift_dialect.replace( WellKnownTextGeometrySegment=Nothing(), JoinLikeClauseGrammar=Sequence( AnySetOf( Ref("FromPivotExpressionSegment"), Ref("FromUnpivotExpressionSegment"), min_times=1, ), Ref("AliasExpressionSegment", optional=True), ), NakedIdentifierSegment=SegmentGenerator( lambda dialect: RegexParser( # Optionally begins with # for temporary tables. Otherwise # must only contain digits, letters, underscore, and $ but # can’t be all digits. r"#?([A-Z_]+|[0-9]+[A-Z_$])[A-Z0-9_$]*", IdentifierSegment, type="naked_identifier", anti_template=r"^(" + r"|".join(dialect.sets("reserved_keywords")) + r")$", casefold=str.lower, ) ), LiteralGrammar=ansi_dialect.get_grammar("LiteralGrammar").copy( insert=[ Ref("MaxLiteralSegment"), Ref("DollarNumericLiteralSegment"), ] ), ) redshift_dialect.patch_lexer_matchers( [ # add optional leading # to word for temporary tables RegexLexer( "word", r"#?[0-9a-zA-Z_]+[0-9a-zA-Z_$]*", WordSegment, ), ] ) redshift_dialect.add( CompressionTypeGrammar=OneOf( "BZIP2", "GZIP", "LZOP", "ZSTD", ), ArgModeGrammar=OneOf( "IN", "OUT", "INOUT", ), ColumnEncodingGrammar=OneOf( "RAW", "AZ64", "BYTEDICT", "DELTA", "DELTA32K", "LZO", "MOSTLY8", "MOSTLY16", "MOSTLY32", "RUNLENGTH", "TEXT255", "TEXT32K", "ZSTD", ), QuotaGrammar=Sequence( "QUOTA", OneOf( Sequence( Ref("NumericLiteralSegment"), OneOf( "MB", "GB", "TB", ), ), "UNLIMITED", ), ), MaxLiteralSegment=StringParser("max", LiteralKeywordSegment, type="max_literal"), ) class FromUnpivotExpressionSegment(BaseSegment): """An UNPIVOT expression. See https://docs.aws.amazon.com/redshift/latest/dg/r_FROM_clause-pivot-unpivot-examples.html for details. """ type = "from_unpivot_expression" match_grammar = Sequence( "UNPIVOT", Sequence( OneOf("INCLUDE", "EXCLUDE"), "NULLS", optional=True, ), Bracketed( Sequence( Ref("ColumnReferenceSegment"), "FOR", Ref("ColumnReferenceSegment"), "IN", Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), Ref("AliasExpressionSegment", optional=True), ) ), ), ), ), ) class FromPivotExpressionSegment(BaseSegment): """A PIVOT expression. See https://docs.aws.amazon.com/redshift/latest/dg/r_FROM_clause-pivot-unpivot-examples.html for details. """ type = "from_pivot_expression" match_grammar = Sequence( "PIVOT", Bracketed( Sequence( OptionallyBracketed(Ref("FunctionSegment")), Ref("AliasExpressionSegment", optional=True), "FOR", Ref("ColumnReferenceSegment"), "IN", Bracketed( Delimited( Sequence( Ref("ExpressionSegment"), Ref("AliasExpressionSegment", optional=True), ), ), ), ), ), ) class DateTimeTypeIdentifier(BaseSegment): """A Date Time type.""" type = "datetime_type_identifier" match_grammar = OneOf( "DATE", "DATETIME", Ref("TimeWithTZGrammar"), OneOf("TIMETZ", "TIMESTAMPTZ"), # INTERVAL types are not Datetime types under Redshift: # https://docs.aws.amazon.com/redshift/latest/dg/r_Datetime_types.html ) class BracketedArguments(ansi.BracketedArguments): """A series of bracketed arguments. e.g. the bracketed part of numeric(1, 3) """ match_grammar = Bracketed( # The brackets might be empty for some cases... Delimited( OneOf( Ref("LiteralGrammar"), # In redshift, character types offer on optional MAX # keyword in their parameters. "MAX", ), optional=True, ), ) class DatatypeSegment(BaseSegment): """A data type segment. Indicates a data type. https://docs.aws.amazon.com/redshift/latest/dg/c_Supported_data_types.html """ type = "data_type" match_grammar = OneOf( # numeric types "SMALLINT", "INT2", "INTEGER", "INT", "INT4", "BIGINT", "INT8", "REAL", "FLOAT4", Sequence("DOUBLE", "PRECISION"), "FLOAT8", "FLOAT", # numeric types [precision ["," scale])] Sequence( OneOf("DECIMAL", "NUMERIC"), Ref("BracketedArguments", optional=True), ), # character types OneOf( Sequence( OneOf( "CHAR", "CHARACTER", "NCHAR", "VARCHAR", Sequence("CHARACTER", "VARYING"), "NVARCHAR", ), Ref("BracketedArguments", optional=True), ), "BPCHAR", "TEXT", ), Ref("DateTimeTypeIdentifier"), # INTERVAL is a data type *only* for conversion operations "INTERVAL", # boolean types OneOf("BOOLEAN", "BOOL"), # hllsketch type "HLLSKETCH", # super type "SUPER", # spatial data "GEOMETRY", "GEOGRAPHY", # binary type Sequence( OneOf( "VARBYTE", "VARBINARY", Sequence("BINARY", "VARYING"), ), Ref("BracketedArguments", optional=True), ), "ANYELEMENT", Sequence( Ref("SingleIdentifierGrammar"), Ref("DotSegment"), Ref("DatatypeIdentifierSegment"), allow_gaps=False, ), ) class DataFormatSegment(BaseSegment): """DataFormat segment. Indicates data format available for COPY commands. https://docs.aws.amazon.com/redshift/latest/dg/c_Compression_encodings.html """ type = "data_format_segment" match_grammar = Sequence( Sequence( "FORMAT", Ref.keyword("AS", optional=True), optional=True, ), OneOf( Sequence( "CSV", Sequence( "QUOTE", Ref.keyword("AS", optional=True), Ref("QuotedLiteralSegment"), optional=True, ), ), Sequence( "SHAPEFILE", Sequence( "SIMPLIFY", Ref.keyword("AUTO", optional=True), Ref("NumericLiteralSegment", optional=True), optional=True, ), ), Sequence( OneOf("AVRO", "JSON"), Sequence( Ref.keyword("AS", optional=True), Ref("QuotedLiteralSegment"), optional=True, ), ), "PARQUET", "ORC", "RCFILE", "SEQUENCEFILE", ), ) class AuthorizationSegment(BaseSegment): """Authorization segment. Specifies authorization to access data in another AWS resource. https://docs.aws.amazon.com/redshift/latest/dg/copy-parameters-authorization.html """ type = "authorization_segment" match_grammar = AnySetOf( OneOf( Sequence( "IAM_ROLE", OneOf( "DEFAULT", Ref("QuotedLiteralSegment"), ), ), Sequence( Ref.keyword("WITH", optional=True), "CREDENTIALS", Ref.keyword("AS", optional=True), Ref("QuotedLiteralSegment"), ), Sequence( "ACCESS_KEY_ID", Ref("QuotedLiteralSegment"), "SECRET_ACCESS_KEY", Ref("QuotedLiteralSegment"), Sequence( "SESSION_TOKEN", Ref("QuotedLiteralSegment"), optional=True, ), ), optional=False, ), Sequence( "KMS_KEY_ID", Ref("QuotedLiteralSegment"), optional=True, ), Sequence( "MASTER_SYMMETRIC_KEY", Ref("QuotedLiteralSegment"), optional=True, ), ) class ColumnAttributeSegment(BaseSegment): """Redshift specific column attributes. As specified in https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_TABLE_NEW.html """ type = "column_attribute_segment" match_grammar = AnySetOf( Sequence("DEFAULT", Ref("ExpressionSegment")), Sequence( "IDENTITY", Bracketed(Delimited(Ref("NumericLiteralSegment")), optional=True), ), Sequence( "GENERATED", "BY", "DEFAULT", "AS", "IDENTITY", Bracketed(Delimited(Ref("NumericLiteralSegment")), optional=True), ), Sequence("ENCODE", Ref("ColumnEncodingGrammar")), "DISTKEY", "SORTKEY", Sequence("COLLATE", OneOf("CASE_SENSITIVE", "CASE_INSENSITIVE")), ) class ColumnConstraintSegment(BaseSegment): """Redshift specific column constraints. As specified in https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_TABLE_NEW.html """ type = "column_constraint_segment" match_grammar = AnySetOf( OneOf(Sequence("NOT", "NULL"), "NULL"), OneOf("UNIQUE", Ref("PrimaryKeyGrammar")), Sequence( "REFERENCES", Ref("TableReferenceSegment"), Bracketed(Ref("ColumnReferenceSegment"), optional=True), ), ) class AlterTableActionSegment(BaseSegment): """Alter Table Action Segment. https://docs.aws.amazon.com/redshift/latest/dg/r_ALTER_TABLE.html """ type = "alter_table_action_segment" match_grammar = OneOf( Sequence( "ADD", Ref("TableConstraintSegment"), Sequence("NOT", "VALID", optional=True), ), Sequence("VALIDATE", "CONSTRAINT", Ref("ParameterNameSegment")), Sequence( "DROP", "CONSTRAINT", Ref("ParameterNameSegment"), Ref("DropBehaviorGrammar", optional=True), ), Sequence( "OWNER", "TO", OneOf( OneOf(Ref("ParameterNameSegment"), Ref("QuotedIdentifierSegment")), ), ), Sequence( "RENAME", "TO", OneOf( OneOf(Ref("ParameterNameSegment"), Ref("QuotedIdentifierSegment")), ), ), Sequence( "RENAME", "COLUMN", "TO", OneOf( Ref("ColumnReferenceSegment"), ), ), Sequence( "ALTER", Ref.keyword("COLUMN", optional=True), Ref("ColumnReferenceSegment"), OneOf( Sequence( "TYPE", Ref("DatatypeSegment"), ), Sequence( "ENCODE", Delimited( Ref("ColumnEncodingGrammar"), ), ), ), ), Sequence( "ALTER", "DISTKEY", Ref("ColumnReferenceSegment"), ), Sequence( "ALTER", "DISTSTYLE", OneOf( "ALL", "EVEN", Sequence("KEY", "DISTKEY", Ref("ColumnReferenceSegment")), "AUTO", ), ), Sequence( "ALTER", Ref.keyword("COMPOUND", optional=True), "SORTKEY", Bracketed( Delimited( Ref("ColumnReferenceSegment"), ), ), ), Sequence( "ALTER", "SORTKEY", OneOf( "AUTO", "NONE", ), ), Sequence( "ALTER", "ENCODE", "AUTO", ), Sequence( "ADD", Ref.keyword("COLUMN", optional=True), Ref("ColumnReferenceSegment"), Ref("DatatypeSegment"), Sequence("DEFAULT", Ref("ExpressionSegment"), optional=True), Sequence("COLLATE", Ref("CollationReferenceSegment"), optional=True), AnyNumberOf(Ref("ColumnConstraintSegment")), ), Sequence( "DROP", Ref.keyword("COLUMN", optional=True), Ref("ColumnReferenceSegment"), Ref("DropBehaviorGrammar", optional=True), ), Sequence( "APPEND", "FROM", Ref("TableReferenceSegment"), Ref.keyword("IGNOREEXTRA", optional=True), Ref.keyword("FILLTARGET", optional=True), ), Sequence( "SET", "LOCATION", Ref("QuotedLiteralSegment"), ), ) class TableAttributeSegment(BaseSegment): """Redshift specific table attributes. As specified in https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_TABLE_NEW.html """ type = "table_constraint" match_grammar = AnySetOf( Sequence("DISTSTYLE", OneOf("AUTO", "EVEN", "KEY", "ALL"), optional=True), Sequence("DISTKEY", Bracketed(Ref("ColumnReferenceSegment")), optional=True), OneOf( Sequence( OneOf("COMPOUND", "INTERLEAVED", optional=True), "SORTKEY", Bracketed(Delimited(Ref("ColumnReferenceSegment"))), ), Sequence("SORTKEY", "AUTO"), optional=True, ), Sequence("ENCODE", "AUTO", optional=True), ) class TableConstraintSegment(BaseSegment): """Redshift specific table constraints. As specified in https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_TABLE_NEW.html """ type = "table_constraint" match_grammar = Sequence( Sequence( # [ CONSTRAINT ] "CONSTRAINT", Ref("ObjectReferenceSegment"), optional=True ), OneOf( Sequence("UNIQUE", Bracketed(Delimited(Ref("ColumnReferenceSegment")))), Sequence( "PRIMARY", "KEY", Bracketed(Delimited(Ref("ColumnReferenceSegment"))), ), Sequence( "FOREIGN", "KEY", Bracketed(Delimited(Ref("ColumnReferenceSegment"))), "REFERENCES", Ref("TableReferenceSegment"), Bracketed(Delimited(Ref("ColumnReferenceSegment"))), ), ), ) class LikeOptionSegment(BaseSegment): """Like Option Segment. As specified in https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_TABLE_NEW.html """ type = "like_option_segment" match_grammar = Sequence(OneOf("INCLUDING", "EXCLUDING"), "DEFAULTS") class CreateTableStatementSegment(BaseSegment): """A `CREATE TABLE` statement. As specified in https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_TABLE_NEW.html """ type = "create_table_statement" match_grammar = Sequence( "CREATE", Ref.keyword("LOCAL", optional=True), Ref("TemporaryGrammar", optional=True), "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), Bracketed( Delimited( # Columns and comment syntax: OneOf( Sequence( Ref("ColumnReferenceSegment"), Ref("DatatypeSegment"), AnyNumberOf( Ref("ColumnAttributeSegment"), Ref("ColumnConstraintSegment"), optional=True, ), ), Ref("TableConstraintSegment"), Sequence( "LIKE", Ref("TableReferenceSegment"), AnyNumberOf(Ref("LikeOptionSegment"), optional=True), ), ), ) ), Sequence("BACKUP", OneOf("YES", "NO", optional=True), optional=True), AnyNumberOf(Ref("TableAttributeSegment"), optional=True), ) class CreateTableAsStatementSegment(BaseSegment): """A `CREATE TABLE AS` statement. As specified in https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_TABLE_AS.html """ type = "create_table_as_statement" match_grammar = Sequence( "CREATE", Sequence( Ref.keyword("LOCAL", optional=True), OneOf("TEMPORARY", "TEMP"), optional=True, ), "TABLE", Ref("ObjectReferenceSegment"), Bracketed( Delimited( Ref("ColumnReferenceSegment"), ), optional=True, ), Sequence("BACKUP", OneOf("YES", "NO"), optional=True), Ref("TableAttributeSegment", optional=True), "AS", OptionallyBracketed(Ref("SelectableGrammar")), ) class CreateModelStatementSegment(BaseSegment): """A `CREATE MODEL` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_MODEL.html NB: order of keywords matter """ type = "create_model_statement" match_grammar = Sequence( "CREATE", "MODEL", Ref("ObjectReferenceSegment"), Sequence( "FROM", OneOf( Ref("QuotedLiteralSegment"), Bracketed(Ref("SelectableGrammar")), Ref("ObjectReferenceSegment"), ), optional=True, ), Sequence( "TARGET", Ref("ColumnReferenceSegment"), optional=True, ), Sequence( "FUNCTION", Ref("ObjectReferenceSegment"), Bracketed( Delimited(Ref("DatatypeSegment")), optional=True, ), ), Sequence( "RETURNS", Ref("DatatypeSegment"), optional=True, ), Sequence( "SAGEMAKER", Ref("QuotedLiteralSegment"), optional=True, ), Sequence( "IAM_ROLE", OneOf( "DEFAULT", Ref("QuotedLiteralSegment"), ), ), Sequence( "AUTO", OneOf( "ON", "OFF", ), optional=True, ), Sequence( "MODEL_TYPE", OneOf( "XGBOOST", "MLP", "KMEANS", ), optional=True, ), Sequence( "PROBLEM_TYPE", OneOf( "REGRESSION", "BINARY_CLASSIFICATION", "MULTICLASS_CLASSIFICATION", ), optional=True, ), Sequence( "OBJECTIVE", Ref("QuotedLiteralSegment"), optional=True, ), Sequence( "PREPROCESSORS", Ref("QuotedLiteralSegment"), optional=True, ), Sequence( "HYPERPARAMETERS", "DEFAULT", Sequence( "EXCEPT", Bracketed( Delimited( Anything(), ), ), optional=True, ), optional=True, ), Sequence( "SETTINGS", Bracketed( Sequence( "S3_BUCKET", Ref("QuotedLiteralSegment"), Sequence( "KMS_KEY_ID", Ref("QuotedLiteralSegment"), optional=True, ), Sequence( "S3_GARBAGE_COLLECT", OneOf( "ON", "OFF", ), optional=True, ), Sequence( "MAX_CELLS", Ref("NumericLiteralSegment"), optional=True, ), Sequence( "MAX_RUNTIME", Ref("NumericLiteralSegment"), optional=True, ), ), ), optional=True, ), ) class ShowModelStatementSegment(BaseSegment): """A `SHOW MODEL` statement. As specified in: https://docs.aws.amazon.com/redshift/latest/dg/r_SHOW_MODEL.html """ type = "show_model_statement" match_grammar = Sequence( "SHOW", "MODEL", OneOf( "ALL", Ref("ObjectReferenceSegment"), ), ) class CreateExternalTableStatementSegment(BaseSegment): """A `CREATE EXTERNAL TABLE` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_EXTERNAL_TABLE.html """ type = "create_external_table_statement" match_grammar = Sequence( "CREATE", "EXTERNAL", "TABLE", Ref("TableReferenceSegment"), Bracketed( # Columns and comment syntax: Delimited( Sequence( Ref("ColumnReferenceSegment"), Ref("DatatypeSegment"), ), ), ), Ref("PartitionedBySegment", optional=True), Sequence( "ROW", "FORMAT", OneOf( Sequence( "DELIMITED", Ref("RowFormatDelimitedSegment"), ), Sequence( "SERDE", Ref("QuotedLiteralSegment"), Sequence( "WITH", "SERDEPROPERTIES", Bracketed( Delimited( Sequence( Ref("QuotedLiteralSegment"), Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), ), ), optional=True, ), ), ), optional=True, ), "STORED", "AS", OneOf( "PARQUET", "RCFILE", "SEQUENCEFILE", "TEXTFILE", "ORC", "AVRO", Sequence( "INPUTFORMAT", Ref("QuotedLiteralSegment"), "OUTPUTFORMAT", Ref("QuotedLiteralSegment"), ), ), "LOCATION", Ref("QuotedLiteralSegment"), Sequence( "TABLE", "PROPERTIES", Bracketed( Delimited( Sequence( Ref("QuotedLiteralSegment"), Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), ), ), optional=True, ), ) class CreateExternalTableAsStatementSegment(BaseSegment): """A `CREATE EXTERNAL TABLE AS` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_EXTERNAL_TABLE.html """ type = "create_external_table_statement" match_grammar = Sequence( "CREATE", "EXTERNAL", "TABLE", Ref("TableReferenceSegment"), Ref("PartitionedBySegment", optional=True), Sequence( "ROW", "FORMAT", "DELIMITED", Ref("RowFormatDelimitedSegment"), optional=True, ), "STORED", "AS", OneOf( "PARQUET", "TEXTFILE", ), "LOCATION", Ref("QuotedLiteralSegment"), Sequence( "TABLE", "PROPERTIES", Bracketed( Delimited( Sequence( Ref("QuotedLiteralSegment"), Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), ), ), optional=True, ), "AS", OptionallyBracketed(Ref("SelectableGrammar")), ) class CreateExternalSchemaStatementSegment(BaseSegment): """A `CREATE EXTERNAL SCHEMA` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_EXTERNAL_SCHEMA.html """ type = "create_external_schema_statement" match_grammar = Sequence( "CREATE", "EXTERNAL", "SCHEMA", Ref("IfNotExistsGrammar", optional=True), Ref("SchemaReferenceSegment"), "FROM", OneOf( Sequence("DATA", "CATALOG"), Sequence("HIVE", "METASTORE"), "POSTGRES", "MYSQL", "KINESIS", "MSK", "REDSHIFT", "KAFKA", ), AnySetOf( Sequence("DATABASE", Ref("QuotedLiteralSegment")), Sequence("REGION", Ref("QuotedLiteralSegment")), Sequence("SCHEMA", Ref("QuotedLiteralSegment")), Sequence( "URI", Ref("QuotedLiteralSegment"), Sequence("PORT", Ref("NumericLiteralSegment"), optional=True), ), Sequence( "IAM_ROLE", OneOf( "DEFAULT", Ref("QuotedLiteralSegment"), ), ), Sequence("AUTHENTICATION", OneOf("NONE", "IAM", "MTLS")), OneOf( Sequence("AUTHENTICATION_ARN", Ref("QuotedLiteralSegment")), Sequence("SECRET_ARN", Ref("QuotedLiteralSegment")), ), Sequence("CATALOG_ROLE", Ref("QuotedLiteralSegment")), Sequence("CREATE", "EXTERNAL", "DATABASE", "IF", "NOT", "EXISTS"), optional=True, ), ) class CreateLibraryStatementSegment(BaseSegment): """A `CREATE LIBRARY` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_LIBRARY.html """ type = "create_library_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "LIBRARY", Ref("ObjectReferenceSegment"), "LANGUAGE", "PLPYTHONU", "FROM", Ref("QuotedLiteralSegment"), AnySetOf( Ref("AuthorizationSegment", optional=False), Sequence( "REGION", Ref.keyword("AS", optional=True), Ref("QuotedLiteralSegment"), optional=True, ), ), ) class UnloadStatementSegment(BaseSegment): """A `UNLOAD` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_UNLOAD.html """ type = "unload_statement" match_grammar = Sequence( "UNLOAD", Bracketed(Ref("QuotedLiteralSegment")), "TO", Ref("QuotedLiteralSegment"), AnySetOf( Ref("AuthorizationSegment", optional=False), Sequence( "REGION", Ref.keyword("AS", optional=True), Ref("QuotedLiteralSegment"), optional=True, ), Ref("CompressionTypeGrammar", optional=True), Sequence( Sequence( "FORMAT", Ref.keyword("AS", optional=True), optional=True, ), OneOf( "CSV", "JSON", "PARQUET", ), optional=True, ), Sequence( "PARTITION", "BY", Ref("BracketedColumnReferenceListGrammar"), Ref.keyword("INCLUDE", optional=True), ), Sequence( "PARALLEL", OneOf( "PRESET", "ON", "OFF", "TRUE", "FALSE", optional=True, ), optional=True, ), Sequence( "EXTENSION", Ref("QuotedLiteralSegment"), Sequence( "PARALLEL", OneOf( "ON", "OFF", "TRUE", "FALSE", ), optional=True, ), optional=True, ), OneOf( Sequence( "DELIMITER", Ref.keyword("AS", optional=True), Ref("QuotedLiteralSegment"), ), Sequence( "FIXEDWIDTH", Ref.keyword("AS", optional=True), Ref("QuotedLiteralSegment"), ), optional=True, ), Sequence( "MANIFEST", Ref.keyword("VERBOSE", optional=True), optional=True, ), Sequence( "NULL", "AS", Ref("QuotedLiteralSegment"), optional=True, ), Sequence( "NULL", "AS", Ref("QuotedLiteralSegment"), optional=True, ), AnySetOf( OneOf( "MAXFILESIZE", "ROWGROUPSIZE", ), Ref.keyword("AS", optional=True), Ref("NumericLiteralSegment"), OneOf( "MB", "GB", ), optional=True, ), Sequence( "ENCRYPTED", Ref.keyword("AUTO", optional=True), optional=True, ), Ref.keyword("ALLOWOVERWRITE", optional=True), Ref.keyword("CLEANPATH", optional=True), Ref.keyword("ESCAPE", optional=True), Ref.keyword("ADDQUOTES", optional=True), Ref.keyword("HEADER", optional=True), ), ) class CopyStatementSegment(postgres.CopyStatementSegment): """A `COPY` statement. : - https://docs.aws.amazon.com/redshift/latest/dg/r_COPY.html - https://docs.aws.amazon.com/redshift/latest/dg/r_COPY-parameters.html """ type = "copy_statement" match_grammar = Sequence( "COPY", Ref("TableReferenceSegment"), Ref("BracketedColumnReferenceListGrammar", optional=True), "FROM", Ref("QuotedLiteralSegment"), AnySetOf( Ref("AuthorizationSegment", optional=False), Sequence( "REGION", Ref.keyword("AS", optional=True), Ref("QuotedLiteralSegment"), optional=True, ), Ref("CompressionTypeGrammar", optional=True), Ref("DataFormatSegment", optional=True), OneOf( Sequence( "DELIMITER", Ref.keyword("AS", optional=True), Ref("QuotedLiteralSegment"), ), Sequence( "FIXEDWIDTH", Ref.keyword("AS", optional=True), Ref("QuotedLiteralSegment"), ), optional=True, ), Sequence( "ENCRYPTED", Ref.keyword("AUTO", optional=True), optional=True, ), Ref.keyword("MANIFEST", optional=True), Sequence( "COMPROWS", Ref("NumericLiteralSegment"), optional=True, ), Sequence( "MAXERROR", Ref.keyword("AS", optional=True), Ref("NumericLiteralSegment"), optional=True, ), Sequence( "COMPUPDATE", OneOf( "PRESET", "ON", "OFF", "TRUE", "FALSE", optional=True, ), optional=True, ), Sequence( "STATUPDATE", OneOf( "ON", "OFF", "TRUE", "FALSE", optional=True, ), optional=True, ), Ref.keyword("NOLOAD", optional=True), Ref.keyword("ACCEPTANYDATE", optional=True), Sequence( "ACCEPTINVCHARS", Ref.keyword("AS", optional=True), Ref("QuotedLiteralSegment", optional=True), optional=True, ), Ref.keyword("BLANKSASNULL", optional=True), Sequence( "DATEFORMAT", Ref.keyword("AS", optional=True), OneOf( "AUTO", Ref("QuotedLiteralSegment"), ), optional=True, ), Ref.keyword("EMPTYASNULL", optional=True), Sequence( "ENCODING", Ref.keyword("AS", optional=True), OneOf( "UTF8", "UTF16", "UTF16BE", "UTF16LE", ), optional=True, ), Ref.keyword("ESCAPE", optional=True), Ref.keyword("EXPLICIT_IDS", optional=True), Ref.keyword("FILLRECORD", optional=True), Ref.keyword("IGNOREBLANKLINES", optional=True), Sequence( "IGNOREHEADER", Ref.keyword("AS", optional=True), Ref("LiteralGrammar"), optional=True, ), Sequence( "NULL", "AS", Ref("QuotedLiteralSegment"), optional=True, ), Sequence( "READRATIO", Ref("NumericLiteralSegment"), optional=True, ), Ref.keyword("REMOVEQUOTES", optional=True), Ref.keyword("ROUNDEC", optional=True), Sequence( "TIMEFORMAT", Ref.keyword("AS", optional=True), OneOf( "AUTO", "EPOCHSECS", "EPOCHMILLISECS", Ref("QuotedLiteralSegment"), ), optional=True, ), Ref.keyword("TRIMBLANKS", optional=True), Ref.keyword("TRUNCATECOLUMNS", optional=True), ), ) class InsertStatementSegment(BaseSegment): """An`INSERT` statement. Redshift has two versions of insert statements: - https://docs.aws.amazon.com/redshift/latest/dg/r_INSERT_30.html - https://docs.aws.amazon.com/redshift/latest/dg/r_INSERT_external_table.html """ # TODO: This logic can be streamlined. However, there are some odd parsing issues. # See https://github.com/sqlfluff/sqlfluff/pull/1896 type = "insert_statement" match_grammar = Sequence( "INSERT", "INTO", Ref("TableReferenceSegment"), OneOf( OptionallyBracketed(Ref("SelectableGrammar")), Sequence("DEFAULT", "VALUES"), Sequence( Ref("BracketedColumnReferenceListGrammar", optional=True), OneOf( Ref("ValuesClauseSegment"), OptionallyBracketed(Ref("SelectableGrammar")), ), ), ), ) class CreateSchemaStatementSegment(BaseSegment): """A `CREATE SCHEMA` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_SCHEMA.html TODO: support optional SCHEMA_ELEMENT (should mostly be provided by ansi) """ type = "create_schema_statement" match_grammar = Sequence( "CREATE", "SCHEMA", OneOf( Sequence( Ref("IfNotExistsGrammar", optional=True), Ref("SchemaReferenceSegment"), Sequence( "AUTHORIZATION", Ref("RoleReferenceSegment"), optional=True, ), ), Sequence( "AUTHORIZATION", Ref("RoleReferenceSegment"), ), ), Ref("QuotaGrammar", optional=True), ) class ProcedureParameterListSegment(BaseSegment): """The parameters for a procedure. https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_PROCEDURE.html """ type = "procedure_parameter_list" # Odd syntax, but prevents eager parameters being confused for data types _param_type = OneOf("REFCURSOR", Ref("DatatypeSegment")) match_grammar = Bracketed( Delimited( Sequence( AnyNumberOf( Ref( "ParameterNameSegment", exclude=OneOf(_param_type, Ref("ArgModeGrammar")), optional=True, ), Ref("ArgModeGrammar", optional=True), max_times_per_element=1, ), _param_type, ), optional=True, ), ) class CreateProcedureStatementSegment(BaseSegment): """A `CREATE PROCEDURE` statement. https://www.postgresql.org/docs/14/sql-createprocedure.html TODO: Just a basic statement for now, without full syntax. based on CreateFunctionStatementSegment without a return type. """ type = "create_procedure_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "PROCEDURE", Ref("FunctionNameSegment"), Ref("ProcedureParameterListSegment"), Ref("FunctionDefinitionGrammar"), ) class AlterProcedureStatementSegment(BaseSegment): """An `ALTER PROCEDURE` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_ALTER_PROCEDURE.html """ type = "alter_procedure_statement" match_grammar = Sequence( "ALTER", "PROCEDURE", Ref("FunctionNameSegment"), Ref("ProcedureParameterListSegment", optional=True), OneOf( Sequence("RENAME", "TO", Ref("FunctionNameSegment")), Sequence( "OWNER", "TO", OneOf( OneOf(Ref("ParameterNameSegment"), Ref("QuotedIdentifierSegment")), "CURRENT_USER", "SESSION_USER", ), ), ), ) class DropProcedureStatementSegment(BaseSegment): """An `DROP PROCEDURE` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_DROP_PROCEDURE.html """ type = "drop_procedure_statement" match_grammar = Sequence( "DROP", "PROCEDURE", Ref("IfExistsGrammar", optional=True), Delimited( Sequence( Ref("FunctionNameSegment"), Ref("ProcedureParameterListSegment", optional=True), ), ), ) class AlterDefaultPrivilegesSchemaObjectsSegment( postgres.AlterDefaultPrivilegesSchemaObjectsSegment ): """`ALTER DEFAULT PRIVILEGES` schema object types. https://docs.aws.amazon.com/redshift/latest/dg/r_ALTER_DEFAULT_PRIVILEGES.html """ match_grammar = ( postgres.AlterDefaultPrivilegesSchemaObjectsSegment.match_grammar.copy( insert=[Sequence("PROCEDURES")] ) ) class DeclareStatementSegment(BaseSegment): """A `DECLARE` statement. As specified in https://docs.aws.amazon.com/redshift/latest/dg/declare.html """ type = "declare_statement" match_grammar = Sequence( "DECLARE", Ref("ObjectReferenceSegment"), "CURSOR", "FOR", Ref("SelectableGrammar"), ) class FetchStatementSegment(BaseSegment): """A `FETCH` statement. As specified in https://docs.aws.amazon.com/redshift/latest/dg/fetch.html """ type = "fetch_statement" match_grammar = Sequence( "fetch", OneOf( "NEXT", "ALL", Sequence( "FORWARD", OneOf( "ALL", Ref("NumericLiteralSegment"), ), ), ), "FROM", Ref("ObjectReferenceSegment"), ) class CloseStatementSegment(BaseSegment): """A `CLOSE` statement. As specified in https://docs.aws.amazon.com/redshift/latest/dg/close.html """ type = "close_statement" match_grammar = Sequence( "CLOSE", Ref("ObjectReferenceSegment"), ) class AltereDatashareStatementSegment(BaseSegment): """An `ALTER DATASHARE` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_ALTER_DATASHARE.html """ type = "create_datashare_statement" match_grammar = Sequence( "ALTER", "DATASHARE", Ref("ObjectReferenceSegment"), OneOf( # add or remove objects to the datashare Sequence( OneOf( "ADD", "REMOVE", ), OneOf( Sequence( "TABLE", Delimited(Ref("TableReferenceSegment")), ), Sequence( "SCHEMA", Delimited(Ref("SchemaReferenceSegment")), ), Sequence( "FUNCTION", Delimited(Ref("FunctionNameSegment")), ), Sequence( "ALL", OneOf("TABLES", "FUNCTIONS"), "IN", "SCHEMA", Delimited(Ref("SchemaReferenceSegment")), ), ), ), # configure the properties of the datashare Sequence( "SET", OneOf( Sequence( "PUBLICACCESSIBLE", Ref("EqualsSegment", optional=True), Ref("BooleanLiteralGrammar"), ), Sequence( "INCLUDENEW", Ref("EqualsSegment", optional=True), Ref("BooleanLiteralGrammar"), "FOR", "SCHEMA", Ref("SchemaReferenceSegment"), ), ), ), ), ) class CreateDatashareStatementSegment(BaseSegment): """A `CREATE DATASHARE` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_DATASHARE.html """ type = "create_datashare_statement" match_grammar = Sequence( "CREATE", "DATASHARE", Ref("ObjectReferenceSegment"), Sequence( Ref.keyword("SET", optional=True), "PUBLICACCESSIBLE", Ref("EqualsSegment", optional=True), OneOf( "TRUE", "FALSE", ), optional=True, ), ) class DescDatashareStatementSegment(BaseSegment): """A `DESC DATASHARE` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_DESC_DATASHARE.html """ type = "desc_datashare_statement" match_grammar = Sequence( "DESC", "DATASHARE", Ref("ObjectReferenceSegment"), Sequence( "OF", Sequence( "ACCOUNT", Ref("QuotedLiteralSegment"), optional=True, ), "NAMESPACE", Ref("QuotedLiteralSegment"), optional=True, ), ) class DropDatashareStatementSegment(BaseSegment): """A `DROP DATASHARE` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_DROP_DATASHARE.html """ type = "drop_datashare_statement" match_grammar = Sequence( "DROP", "DATASHARE", Ref("ObjectReferenceSegment"), ) class ShowDatasharesStatementSegment(BaseSegment): """A `SHOW DATASHARES` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_SHOW_DATASHARES.html """ type = "show_datashares_statement" match_grammar = Sequence( "SHOW", "DATASHARES", Sequence( "LIKE", Ref("QuotedLiteralSegment"), optional=True, ), ) class GrantUsageDatashareStatementSegment(BaseSegment): """A `GRANT DATASHARES` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_GRANT.html section "Granting datashare permissions" Note: According to documentation, multiple accounts and namespaces can be specified. However, tests using redshift instance showed this causes a syntax error. """ type = "grant_datashare_statement" match_grammar = Sequence( OneOf("GRANT", "REVOKE"), "USAGE", "ON", "DATASHARE", Ref("ObjectReferenceSegment"), OneOf("TO", "FROM"), OneOf( Sequence("NAMESPACE", Ref("QuotedLiteralSegment")), Sequence( "ACCOUNT", Sequence( Ref("QuotedLiteralSegment"), Sequence("VIA", "DATA", "CATALOG", optional=True), ), ), ), ) class CreateRlsPolicyStatementSegment(BaseSegment): """A `CREATE RLS POLICY` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_RLS_POLICY.html """ type = "create_rls_policy_statement" match_grammar = Sequence( "CREATE", "RLS", "POLICY", Ref("ObjectReferenceSegment"), Sequence( "WITH", Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), Ref("DatatypeSegment"), ), ), ), Sequence( Ref.keyword("AS", optional=True), Ref("AliasExpressionSegment"), optional=True, ), optional=True, ), Sequence( "USING", Bracketed(Ref("ExpressionSegment")), ), ) class ManageRlsPolicyStatementSegment(BaseSegment): """An `ATTACH/DETACH RLS POLICY` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_ATTACH_RLS_POLICY.html https://docs.aws.amazon.com/redshift/latest/dg/r_DETACH_RLS_POLICY.html """ # 1 statement for both ATTACH and DETACH since same syntax type = "manage_rls_policy_statement" match_grammar = Sequence( OneOf("ATTACH", "DETACH"), "RLS", "POLICY", Ref("ObjectReferenceSegment"), "ON", Ref.keyword("TABLE", optional=True), Delimited( Ref("TableReferenceSegment"), ), OneOf("TO", "FROM"), Delimited( OneOf( Sequence( Ref.keyword("ROLE", optional=True), Ref("RoleReferenceSegment"), ), "PUBLIC", ), ), ) class DropRlsPolicyStatementSegment(BaseSegment): """A `DROP RLS POLICY` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_DROP_RLS_POLICY.html """ type = "drop_rls_policy_statement" match_grammar = Sequence( "DROP", "RLS", "POLICY", Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), OneOf( "CASCADE", "RESTRICT", optional=True, ), ) class AnalyzeCompressionStatementSegment(BaseSegment): """An `ANALYZE COMPRESSION` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_ANALYZE_COMPRESSION.html """ type = "analyze_compression_statement" match_grammar = Sequence( OneOf("ANALYZE", "ANALYSE"), "COMPRESSION", Sequence( Ref("TableReferenceSegment"), Bracketed( Delimited( Ref("ColumnReferenceSegment"), ), optional=True, ), Sequence( "COMPROWS", Ref("NumericLiteralSegment"), optional=True, ), optional=True, ), ) class VacuumStatementSegment(postgres.VacuumStatementSegment): """A `VACUUM` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_VACUUM_command.html """ match_grammar = Sequence( "VACUUM", OneOf( "FULL", "REINDEX", "RECLUSTER", Sequence( OneOf( "SORT", "DELETE", ), "ONLY", ), optional=True, ), Ref("TableReferenceSegment", optional=True), Sequence( "TO", Ref("NumericLiteralSegment"), "PERCENT", optional=True, ), Ref.keyword("BOOST", optional=True), ) # Adding Redshift specific statements class StatementSegment(postgres.StatementSegment): """A generic segment, to any of its child subsegments.""" type = "statement" match_grammar = postgres.StatementSegment.match_grammar.copy( insert=[ Ref("CreateLibraryStatementSegment"), Ref("CreateGroupStatementSegment"), Ref("AlterUserStatementSegment"), Ref("AlterGroupStatementSegment"), Ref("CreateExternalTableAsStatementSegment"), Ref("CreateExternalTableStatementSegment"), Ref("CreateExternalSchemaStatementSegment"), Ref("DataFormatSegment"), Ref("UnloadStatementSegment"), Ref("CopyStatementSegment"), Ref("ShowModelStatementSegment"), Ref("CreateDatashareStatementSegment"), Ref("DescDatashareStatementSegment"), Ref("DropDatashareStatementSegment"), Ref("ShowDatasharesStatementSegment"), Ref("AltereDatashareStatementSegment"), Ref("DeclareStatementSegment"), Ref("FetchStatementSegment"), Ref("CloseStatementSegment"), Ref("AnalyzeCompressionStatementSegment"), Ref("AlterProcedureStatementSegment"), Ref("CallStatementSegment"), Ref("CreateRlsPolicyStatementSegment"), Ref("ManageRlsPolicyStatementSegment"), Ref("DropRlsPolicyStatementSegment"), Ref("CreateExternalFunctionStatementSegment"), Ref("GrantUsageDatashareStatementSegment"), ], remove=[ Ref("ShowStatementSegment"), ], ) class PartitionedBySegment(BaseSegment): """Partitioned By Segment. As specified in https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_EXTERNAL_TABLE.html """ type = "partitioned_by_segment" match_grammar = Sequence( Ref.keyword("PARTITIONED"), "BY", Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), Ref("DatatypeSegment", optional=True), ), ), ), ) class RowFormatDelimitedSegment(BaseSegment): """Row Format Delimited Segment. As specified in https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_EXTERNAL_TABLE.html """ type = "row_format_delimited_segment" match_grammar = AnySetOf( Sequence( "FIELDS", "TERMINATED", "BY", Ref("QuotedLiteralSegment"), ), Sequence( "LINES", "TERMINATED", "BY", Ref("QuotedLiteralSegment"), ), optional=True, ) class CreateUserStatementSegment(ansi.CreateUserStatementSegment): """`CREATE USER` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html """ match_grammar = Sequence( "CREATE", "USER", Ref("RoleReferenceSegment"), Ref.keyword("WITH", optional=True), "PASSWORD", OneOf(Ref("QuotedLiteralSegment"), "DISABLE"), AnySetOf( OneOf( "CREATEDB", "NOCREATEDB", ), OneOf( "CREATEUSER", "NOCREATEUSER", ), Sequence( "SYSLOG", "ACCESS", OneOf( "RESTRICTED", "UNRESTRICTED", ), ), Sequence("IN", "GROUP", Delimited(Ref("ObjectReferenceSegment"))), Sequence("VALID", "UNTIL", Ref("QuotedLiteralSegment")), Sequence( "CONNECTION", "LIMIT", OneOf( Ref("NumericLiteralSegment"), "UNLIMITED", ), ), Sequence( "SESSION", "TIMEOUT", Ref("NumericLiteralSegment"), ), ), ) class CreateGroupStatementSegment(BaseSegment): """`CREATE GROUP` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_GROUP.html """ type = "create_group" match_grammar = Sequence( "CREATE", "GROUP", Ref("ObjectReferenceSegment"), Sequence( Ref.keyword("WITH", optional=True), "USER", Delimited( Ref("ObjectReferenceSegment"), ), optional=True, ), ) class AlterUserStatementSegment(BaseSegment): """`ALTER USER` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_ALTER_USER.html """ type = "alter_user_statement" match_grammar = Sequence( "ALTER", "USER", Ref("RoleReferenceSegment"), Ref.keyword("WITH", optional=True), AnySetOf( OneOf( "CREATEDB", "NOCREATEDB", ), OneOf( "CREATEUSER", "NOCREATEUSER", ), Sequence( "SYSLOG", "ACCESS", OneOf( "RESTRICTED", "UNRESTRICTED", ), ), Sequence( "PASSWORD", OneOf( Ref("QuotedLiteralSegment"), "DISABLE", ), Sequence("VALID", "UNTIL", Ref("QuotedLiteralSegment"), optional=True), ), Sequence( "RENAME", "TO", Ref("ObjectReferenceSegment"), ), Sequence( "CONNECTION", "LIMIT", OneOf( Ref("NumericLiteralSegment"), "UNLIMITED", ), ), OneOf( Sequence( "SESSION", "TIMEOUT", Ref("NumericLiteralSegment"), ), Sequence( "RESET", "SESSION", "TIMEOUT", ), ), OneOf( Sequence( "SET", Ref("ObjectReferenceSegment"), OneOf( "TO", Ref("EqualsSegment"), ), OneOf( "DEFAULT", Ref("LiteralGrammar"), ), ), Sequence( "RESET", Ref("ObjectReferenceSegment"), ), ), min_times=1, ), ) class AlterGroupStatementSegment(BaseSegment): """`ALTER GROUP` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_ALTER_GROUP.html """ type = "alter_group" match_grammar = Sequence( "ALTER", "GROUP", Ref("ObjectReferenceSegment"), OneOf( Sequence( OneOf("ADD", "DROP"), "USER", Delimited( Ref("ObjectReferenceSegment"), ), ), Sequence( "RENAME", "TO", Ref("ObjectReferenceSegment"), ), ), ) class TransactionStatementSegment(BaseSegment): """A `BEGIN|START`, `COMMIT|END` or `ROLLBACK|ABORT` transaction statement. https://docs.aws.amazon.com/redshift/latest/dg/r_BEGIN.html """ type = "transaction_statement" match_grammar = Sequence( OneOf("BEGIN", "START", "COMMIT", "END", "ROLLBACK", "ABORT"), OneOf("TRANSACTION", "WORK", optional=True), Sequence( "ISOLATION", "LEVEL", OneOf( "SERIALIZABLE", Sequence("READ", "COMMITTED"), Sequence("READ", "UNCOMMITTED"), Sequence("REPEATABLE", "READ"), ), optional=True, ), OneOf( Sequence("READ", "ONLY"), Sequence("READ", "WRITE"), optional=True, ), ) class AlterSchemaStatementSegment(BaseSegment): """An `ALTER SCHEMA` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_ALTER_SCHEMA.html """ type = "alter_schema_statement" match_grammar = Sequence( "ALTER", "SCHEMA", Ref("SchemaReferenceSegment"), OneOf( Sequence( "RENAME", "TO", Ref("SchemaReferenceSegment"), ), Sequence( "OWNER", "TO", Ref("RoleReferenceSegment"), ), Ref("QuotaGrammar"), ), ) class LockTableStatementSegment(BaseSegment): """An `LOCK TABLE` statement. https://www.postgresql.org/docs/14/sql-lock.html """ type = "lock_table_statement" match_grammar: Matchable = Sequence( "LOCK", Ref.keyword("TABLE", optional=True), Delimited( Ref("TableReferenceSegment"), ), ) class TableExpressionSegment(ansi.TableExpressionSegment): """The main table expression e.g. within a FROM clause. Override to add Object unpivoting. """ match_grammar = ansi.TableExpressionSegment.match_grammar.copy( insert=[ Ref("ObjectUnpivotSegment", optional=True), Ref("ArrayUnnestSegment", optional=True), ], before=Ref("TableReferenceSegment"), ) class ObjectUnpivotSegment(BaseSegment): """Object unpivoting. https://docs.aws.amazon.com/redshift/latest/dg/query-super.html#unpivoting """ type = "object_unpivoting" match_grammar: Matchable = Sequence( "UNPIVOT", Ref("ObjectReferenceSegment"), "AS", Ref("SingleIdentifierGrammar"), "AT", Ref("SingleIdentifierGrammar"), ) class ArrayAccessorSegment(ansi.ArrayAccessorSegment): """Array element accessor. Redshift allows multiple levels of array access, like Postgres, but it * doesn't allow ranges like `myarray[1:2]` * does allow function or column expressions `myarray[idx]` """ match_grammar = Sequence( AnyNumberOf( Bracketed( OneOf(Ref("NumericLiteralSegment"), Ref("ExpressionSegment")), bracket_type="square", ) ) ) class ArrayUnnestSegment(BaseSegment): """Array unnesting. https://docs.aws.amazon.com/redshift/latest/dg/query-super.html """ type = "array_unnesting" match_grammar: Matchable = Sequence( Ref("ObjectReferenceSegment"), "AS", Ref("SingleIdentifierGrammar"), "AT", Ref("SingleIdentifierGrammar"), ) class CallStatementSegment(BaseSegment): """A `CALL` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_CALL_procedure.html """ type = "call_statement" match_grammar = Sequence( "CALL", Ref("FunctionSegment"), ) class SelectClauseModifierSegment(postgres.SelectClauseModifierSegment): """Things that come after SELECT but before the columns.""" match_grammar = postgres.SelectClauseModifierSegment.match_grammar.copy( insert=[Sequence("TOP", Ref("NumericLiteralSegment"))], ) class ConvertFunctionNameSegment(BaseSegment): """CONVERT function name segment. Function taking a data type identifier and an expression. An alternative to CAST. """ type = "function_name" match_grammar = Sequence("CONVERT") class FunctionSegment(ansi.FunctionSegment): """A scalar or aggregate function. Maybe in the future we should distinguish between aggregate functions and other functions. For now we treat them the same because they look the same for our purposes. """ type = "function" match_grammar: Matchable = OneOf( Sequence( # Treat functions which take date parts separately # So those functions parse date parts as DatetimeUnitSegment # rather than identifiers. Sequence( Ref("DatePartFunctionNameSegment"), Ref("DateTimeFunctionContentsSegment"), ), ), Sequence( Sequence( OneOf( Ref( "FunctionNameSegment", exclude=OneOf( Ref("DatePartFunctionNameSegment"), Ref("ValuesClauseSegment"), Ref("ConvertFunctionNameSegment"), ), ), Sequence( Ref.keyword("APPROXIMATE"), Ref( "FunctionNameSegment", exclude=OneOf( Ref("DatePartFunctionNameSegment"), Ref("ValuesClauseSegment"), Ref("ConvertFunctionNameSegment"), ), ), ), ), Ref("FunctionContentsSegment"), ), Ref("PostFunctionGrammar", optional=True), ), Sequence( Ref("ConvertFunctionNameSegment"), Ref("ConvertFunctionContentsSegment"), ), ) class ConvertFunctionContentsSegment(BaseSegment): """Convert Function contents.""" type = "function_contents" match_grammar = Sequence( Bracketed( Ref("DatatypeSegment"), Ref("CommaSegment"), Ref("ExpressionSegment"), ), ) class FromClauseSegment(ansi.FromClauseSegment): """Slightly modified version which allows for using brackets for content of FROM.""" match_grammar = Sequence( "FROM", Delimited( OptionallyBracketed(Ref("FromExpressionSegment")), ), ) class CreateViewStatementSegment(BaseSegment): """A `CREATE VIEW` statement.""" type = "create_view_statement" # https://crate.io/docs/sql-99/en/latest/chapters/18.html#create-view-statement # https://dev.mysql.com/doc/refman/8.0/en/create-view.html # https://www.postgresql.org/docs/12/sql-createview.html match_grammar: Matchable = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "VIEW", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), # Optional list of column names Ref("BracketedColumnReferenceListGrammar", optional=True), "AS", OptionallyBracketed(Ref("SelectableGrammar")), Ref("WithNoSchemaBindingClauseSegment", optional=True), ) class CreateMaterializedViewStatementSegment( postgres.CreateMaterializedViewStatementSegment ): """A `CREATE MATERIALIZED VIEW` statement. # https://docs.aws.amazon.com/redshift/latest/dg/materialized-view-create-sql-command.html """ type = "create_materialized_view_statement" match_grammar = Sequence( "CREATE", "MATERIALIZED", "VIEW", Ref("TableReferenceSegment"), Sequence("BACKUP", OneOf("YES", "NO"), optional=True), Ref("TableAttributeSegment", optional=True), Sequence("AUTO", "REFRESH", OneOf("YES", "NO"), optional=True), "AS", OneOf( OptionallyBracketed(Ref("SelectableGrammar")), OptionallyBracketed(Sequence("TABLE", Ref("TableReferenceSegment"))), Ref("ValuesClauseSegment"), OptionallyBracketed(Sequence("EXECUTE", Ref("FunctionSegment"))), ), Ref("WithDataClauseSegment", optional=True), ) class CreateExternalFunctionStatementSegment(BaseSegment): """A `CREATE EXTERNAL FUNCTION` segment. https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_EXTERNAL_FUNCTION.html """ type = "create_external_function_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "EXTERNAL", "FUNCTION", Ref("FunctionNameSegment"), Bracketed( Delimited( Ref("DatatypeSegment"), optional=True, ), ), "RETURNS", Ref("DatatypeSegment"), OneOf("VOLATILE", "STABLE", "IMMUTABLE"), OneOf("LAMBDA", "SAGEMAKER"), Ref("QuotedLiteralSegment"), "IAM_ROLE", OneOf("DEFAULT", Ref("QuotedLiteralSegment")), Sequence( "RETRY_TIMEOUT", Ref("NumericLiteralSegment"), optional=True, ), ) class QualifyClauseSegment(BaseSegment): """A `QUALIFY` clause like in `SELECT`. https://docs.aws.amazon.com/redshift/latest/dg/r_QUALIFY_clause.html """ type = "qualify_clause" match_grammar = Sequence( "QUALIFY", ImplicitIndent, Ref("ExpressionSegment"), Dedent, ) class SelectStatementSegment(postgres.SelectStatementSegment): """A snowflake `SELECT` statement including optional Qualify. https://docs.aws.amazon.com/redshift/latest/dg/r_QUALIFY_clause.html """ type = "select_statement" match_grammar = postgres.SelectStatementSegment.match_grammar.copy( insert=[Ref("QualifyClauseSegment", optional=True)], before=Ref("OrderByClauseSegment", optional=True), terminators=[Ref("SetOperatorSegment")], ) class UnorderedSelectStatementSegment(ansi.UnorderedSelectStatementSegment): """A snowflake unordered `SELECT` statement including optional Qualify. https://docs.aws.amazon.com/redshift/latest/dg/r_QUALIFY_clause.html """ type = "select_statement" match_grammar = ansi.UnorderedSelectStatementSegment.match_grammar.copy( insert=[Ref("QualifyClauseSegment", optional=True)], before=Ref("OverlapsClauseSegment", optional=True), ) class WildcardExpressionSegment(ansi.WildcardExpressionSegment): """An extension of the star expression for Redshift.""" match_grammar = ansi.WildcardExpressionSegment.match_grammar.copy( insert=[ # Optional Exclude Ref("ExcludeClauseSegment", optional=True), ] ) class ExcludeClauseSegment(BaseSegment): """A Redshift SELECT EXCLUDE clause. https://docs.aws.amazon.com/redshift/latest/dg/r_EXCLUDE_list.html """ type = "select_exclude_clause" match_grammar = Sequence( "EXCLUDE", OneOf( Bracketed(Delimited(Ref("SingleIdentifierGrammar"))), Ref("SingleIdentifierGrammar"), ), ) class GroupByClauseSegment(postgres.GroupByClauseSegment): """A `GROUP BY` clause like in `SELECT`.""" type = "groupby_clause" match_grammar = Sequence( "GROUP", "BY", Indent, Delimited( OneOf( "ALL", Ref("ColumnReferenceSegment"), # Can `GROUP BY 1` Ref("NumericLiteralSegment"), Ref("CubeRollupClauseSegment"), Ref("GroupingSetsClauseSegment"), # Can `GROUP BY coalesce(col, 1)` Ref("ExpressionSegment"), Bracketed(), # Allows empty parentheses ), terminators=[ Sequence("ORDER", "BY"), "LIMIT", "HAVING", "QUALIFY", "WINDOW", Ref("SetOperatorSegment"), ], ), Dedent, ) class MergeStatementSegment(ansi.MergeStatementSegment): """A `MERGE` statement. https://docs.aws.amazon.com/pt_br/redshift/latest/dg/r_MERGE.html """ match_grammar = ansi.MergeStatementSegment.match_grammar.copy( insert=[OneOf(Ref("MergeMatchSegment"), Sequence("REMOVE", "DUPLICATES"))], remove=[ Ref("MergeMatchSegment"), ], ) class PrepareStatementSegment(postgres.PrepareStatementSegment): """A `PREPARE` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_PREPARE.html """ type = "prepare_statement" match_grammar = Sequence( "PREPARE", Ref("ObjectReferenceSegment"), Bracketed(Delimited(Ref("DatatypeSegment")), optional=True), "AS", Ref("SelectableGrammar"), ) class DeallocateStatementSegment(postgres.DeallocateStatementSegment): """A `DEALLOCATE` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_DEALLOCATE.html """ type = "deallocate_statement" match_grammar = Sequence( "DEALLOCATE", Ref.keyword("PREPARE", optional=True), Ref("ObjectReferenceSegment"), ) sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_redshift_keywords.py000066400000000000000000000230051503426445100253450ustar00rootroot00000000000000"""A list of all SQL key words.""" redshift_reserved_keywords = """AES128 AES256 ALL ALLOWOVERWRITE ANALYSE ANALYZE AND ANY APPEND ARRAY AS ASC AUTHORIZATION AZ64 BETWEEN BINARY BLANKSASNULL BOTH BYTEDICT CASE CAST CHECK COLLATE COLUMN COMPROWS COMPUPDATE CONSTRAINT CREATE CREDENTIALS CROSS CURRENT_DATE CURRENT_TIME CURRENT_TIMESTAMP CURRENT_USER CURRENT_USER_ID DATETIME DEFAULT DEFERRABLE DEFRAG DELIMITERS DELTA DELTA32K DESC DISABLE DISTINCT DO ELSE EMPTYASNULL ENABLE ENCRYPT ENCRYPTION END EXCEPT EXPLICIT_IDS FALSE FILLRECORD FOR FOREIGN FREEZE FROM FULL GLOBALDICT256 GLOBALDICT64K GRANT GROUP HAVING IDENTITY IGNORE IGNOREBLANKLINES IGNOREHEADER ILIKE IN INITIALLY INNER INTERSECT INTO IS ISNULL JOIN LEADING LEFT LIKE LIMIT LOCALTIME LOCALTIMESTAMP LUN LUNS LZO MINUS MOSTLY16 MOSTLY32 MOSTLY8 NATURAL NEW NOT NOTNULL NULL NULLS OFF OFFSET OID OLD ON ONLY OPEN OR ORDER OUTER OVERLAPS PARALLEL PARTITION PERCENT PERMISSIONS PIVOT PLACING PRIMARY RAW READRATIO RECOVER REFERENCES RESPECT REJECTLOG RESORT RESTORE RIGHT RUNLENGTH SELECT SESSION_USER SIMILAR SNAPSHOT SOME SYSDATE SYSTEM TABLE TAG TDES TEXT255 TEXT32K THEN TIMESTAMP TO TOP TRAILING TRUE TRUNCATECOLUMNS UNION UNIQUE UNNEST UNPIVOT USER USING VERBOSE WHEN WHERE WITH WITHIN WITHOUT""" redshift_unreserved_keywords = """A ABORT ABS ABSENT ABSOLUTE ACCEPTANYDATE ACCEPTINVCHARS ACCESS ACCESS_KEY_ID ACCORDING ACCOUNT ACOS ACTION ADA ADD ADDQUOTES ADMIN AFTER AGGREGATE ALLOCATE ALSO ALTER ALWAYS ANYELEMENT APPLY APPROXIMATE ARE ARRAY_AGG ARRAY_MAX_CARDINALITY ASENSITIVE ASIN ASSERTION ASSIGNMENT ASYMMETRIC AT ATAN ATOMIC ATTACH ATTRIBUTE ATTRIBUTES AUTHENTICATION AUTHENTICATION_ARN AUTO AUTO_INCREMENT AVG AVRO BACKUP BACKWARD BASE64 BEFORE BEGIN BEGIN_FRAME BEGIN_PARTITION BERNOULLI BIGINT BINARY_CLASSIFICATION BINDING BIT BIT_LENGTH BLANKSASNULL BLOB BLOCKED BOM BOOL BOOLEAN BOOST BPCHAR BREADTH BUFFERS BY BYPASSRLS BZIP2 C CACHE CALL CALLED CARDINALITY CASCADE CASCADED CASE_INSENSITIVE CASE_SENSITIVE CATALOG CATALOG_NAME CATALOG_ROLE CEIL CEILING CHAIN CHAINING CHAR CHARACTER CHARACTERISTICS CHARACTERS CHARACTER_LENGTH CHARACTER_SET_CATALOG CHARACTER_SET_NAME CHARACTER_SET_SCHEMA CHAR_LENGTH CHECKPOINT CLASS CLASSIFIER CLASS_ORIGIN CLEANPATH CLOB CLOSE CLUSTER COALESCE COBOL COLLATION COLLATION_CATALOG COLLATION_NAME COLLATION_SCHEMA COLLECT COLUMNS COLUMN_NAME COMMAND_FUNCTION COMMAND_FUNCTION_CODE COMMENT COMMENTS COMMIT COMMITTED COMPOUND COMPRESSION CONCURRENTLY CONDITION CONDITIONAL CONDITION_NUMBER CONFIGURATION CONFLICT CONNECT CONNECTION CONNECTION_NAME CONSTRAINTS CONSTRAINT_CATALOG CONSTRAINT_NAME CONSTRAINT_SCHEMA CONSTRUCTOR CONTAINS CONTENT CONTINUE CONTROL CONVERSION CONVERT COPY CORR CORRESPONDING COS COSH COST COSTS COUNT COVAR_POP COVAR_SAMP CREATEDB CREATEUSER CREATEROLE CSV CUBE CUME_DIST CURRENT CURRENT_CATALOG CURRENT_DEFAULT_TRANSFORM_GROUP CURRENT_PATH CURRENT_ROLE CURRENT_ROW CURRENT_SCHEMA CURRENT_TRANSFORM_GROUP_FOR_TYPE CURSOR CURSOR_NAME CYCLE DATA DATABASE DATALINK DATASHARE DATASHARES DATE DATEFORMAT DATETIME_INTERVAL_CODE DATETIME_INTERVAL_PRECISION DAY DAYOFYEAR DB DEALLOCATE DEC DECFLOAT DECIMAL DECLARE DEFAULTS DEFERRED DEFINE DEFINED DEFINER DEFLATE DEGREE DELETE DELIMITED DELIMITER DENSE_RANK DEPENDS DEPTH DEREF DERIVED DESCRIBE DESCRIPTOR DETACH DETERMINISTIC DIAGNOSTICS DICTIONARY DISCARD DISCONNECT DISPATCH DISTKEY DISTSTYLE DLNEWCOPY DLPREVIOUSCOPY DLURLCOMPLETE DLURLCOMPLETEONLY DLURLCOMPLETEWRITE DLURLPATH DLURLPATHONLY DLURLPATHWRITE DLURLSCHEME DLURLSERVER DLVALUE DOCUMENT DOMAIN DOUBLE DROP DUPLICATES DYNAMIC DYNAMIC_FUNCTION DYNAMIC_FUNCTION_CODE EACH ELEMENT EMPTY ENCODE ENCODING ENCRYPTED END-EXEC END_FRAME END_PARTITION ENFORCED ENUM EPOCH EPOCHSECS EPOCHMILLISECS EQUALS ERROR ESCAPE EVEN EVENT EVERY EXCEPTION EXCLUDE EXCLUDING EXCLUSIVE EXEC EXECUTE EXECUTION EXISTS EXP EXPLAIN EXPLICIT EXPRESSION EXTENDED EXTENSION EXTERNAL EXTRACT FAMILY FETCH FIELDS FILE FILLTARGET FILTER FINAL FINALIZE FINISH FIRST FIRST_VALUE FIXEDWIDTH FLAG FLOAT FLOAT4 FLOAT8 FLOOR FOLLOWING FORCE FORMAT FORTRAN FORWARD FOUND FRAME_ROW FREE FS FULFILL FUNCTION FUNCTIONS FUSION FUTURE G GB GENERAL GENERATED GEOGRAPHY GEOMETRY GET GLOBAL GO GOTO GRANTED GRANTS GREATEST GROUPING GROUPS GZIP HANDLER HASH HEADER HEX HIERARCHY HIVE HLLSKETCH HOLD HOUR HYPERPARAMETERS IAM IAM_ROLE ID IF IGNOREEXTRA IMMEDIATE IMMEDIATELY IMMUTABLE IMPLEMENTATION IMPLICIT IMPORT IMPORTED INCLUDE INCLUDENEW INCLUDING INCREMENT INDENT INDEX INDEXES INDICATOR INHERIT INHERITS INITIAL INLINE INOUT INPUT INPUTFORMAT INSENSITIVE INSERT INSTANCE INSTANTIABLE INSTEAD INT INT2 INT4 INT8 INTEGER INTEGRATION INTEGRITY INTERLEAVED INTERSECTION INTERVAL INVOKER ISOLATION JSON JSON_ARRAY JSON_ARRAYAGG JSON_EXISTS JSON_OBJECT JSON_OBJECTAGG JSON_QUERY JSON_TABLE JSON_TABLE_PRIMITIVE JSON_VALUE K KAFKA KEEP KEY KEYS KEY_MEMBER KEY_TYPE KINESIS KMEANS KMS_KEY_ID LABEL LAG LAMBDA LANGUAGE LARGE LAST LAST_VALUE LATERAL LEAD LEAKPROOF LEAST LENGTH LEVEL LIBRARY LIKE_REGEX LINES LINK LIST LISTAGG LISTEN LN LOAD LOCAL LOCATION LOCATOR LOCK LOCKED LOG LOG10 LOGGED LOGIN LOWER LZOP M MAIN MANAGE MANIFEST MAP MAPPING MASKING MASTER_SYMMETRIC_KEY MATCH MATCHED MATCHES MATCH_NUMBER MATCH_RECOGNIZE MATERIALIZED MAX MAXERROR MAXFILESIZE MAXVALUE MAX_CELLS MAX_RUNTIME MB MEASURES MEMBER MERGE MESSAGE_LENGTH MESSAGE_OCTET_LENGTH MESSAGE_TEXT METASTORE METHOD MILLISECOND MIN MINUTE MINVALUE ML MLP MOD MODE MODEL MODEL_TYPE MODIFIES MODIFY MODULE MODULUS MONITOR MONTH MORE MOVE MSK MTLS MULTICLASS_CLASSIFICATION MULTISET MYSQL MUMPS NAME NAMES NAMESPACE NAN NATIONAL NCHAR NCLOB NESTED NESTING NEXT NFC NFD NFKC NFKD NIL NO NOBYPASSRLS NOCACHE NOCREATEDB NOCREATEROLE NOCREATEUSER NOCYCLE NOINHERIT NOLOAD NOLOGIN NOREPLICATION NOSUPERUSER NONE NOORDER NORMALIZE OUTPUTFORMAT NORMALIZED NOTHING NOTIFY NOWAIT NTH_VALUE NTILE NULLABLE NULLIF NUMBER NUMERIC NVARCHAR OBJECT OBJECTIVE OCCURRENCES_REGEX OCTET_LENGTH OCTETS OF OFFLINE OIDS OMIT ONE OPERATE OPERATOR OPTION OPTIONS ORC ORDERING ORDINALITY OTHERS OUT OUTPUT OVER OVERFLOW OVERLAY OVERRIDING OVERWRITE OWNED OWNER OWNERSHIP P PAD PARAMETER PARAMETER_MODE PARAMETER_NAME PARAMETER_ORDINAL_POSITION PARAMETER_SPECIFIC_CATALOG PARAMETER_SPECIFIC_NAME PARAMETER_SPECIFIC_SCHEMA PARQUET PARSER PARTIAL PARTITIONED PASCAL PASS PASSING PASSTHROUGH PASSWORD PAST PATH PATTERN PER PERCENT_RANK PERCENTILE_CONT PERCENTILE_DISC PERIOD PERMISSION PERMUTE PIPE PLAIN PLAN PLANS PLI POLICY PORT PORTION POSITION POSITION_REGEX POSTGRES POWER PRECEDES PRECEDING PRECISION PREPARE PREPARED PREPROCESSORS PRESERVE PRESET PRIOR PRIVATE PRIVILEGES PROBLEM_TYPE PROCEDURAL PROCEDURE PROCEDURES PROGRAM PROPERTIES PRUNE PTF PUBLIC PUBLICACCESSIBLE PUBLICATION PLPYTHONU QUALIFY QUARTER QUOTA QUOTE QUOTES RANGE RANK RCFILE READ READRATIO READS REAL REASSIGN RECHECK RECLUSTER RECOVERY RECURSIVE REDSHIFT REF REFCURSOR REFERENCE_USAGE REFERENCING REFRESH REGION REGR_AVGX REGR_AVGY REGR_COUNT REGR_INTERCEPT REGR_R2 REGR_SLOPE REGR_SXX REGR_SXY REGR_SYY REGRESSION REINDEX RELATIVE RELEASE REMAINDER REMOVE REMOVEQUOTES RENAME REPEATABLE REPLACE REPLICA REPLICATION REQUIRING RESET RESOURCE RESTART RESTRICT RESTRICTED RESULT RETRY_TIMEOUT RETURN RETURNED_CARDINALITY RETURNED_LENGTH RETURNED_OCTET_LENGTH RETURNED_SQLSTATE RETURNING RETURNS REVOKE RLIKE RLS ROLE ROLLBACK ROLLUP ROUNDEC ROUTINE ROUTINE_CATALOG ROUTINE_NAME ROUTINE_SCHEMA ROUTINES ROW ROW_COUNT ROW_NUMBER ROWGROUPSIZE ROWS RULE RUNNING S3_BUCKET S3_GARBAGE_COLLECT SAFE SAGEMAKER SAVEPOINT SCALAR SCALE SCHEMA SCHEMA_NAME SCHEMAS SCOPE SCOPE_CATALOG SCOPE_NAME SCOPE_SCHEMA SCROLL SEARCH SECOND SECRET_ACCESS_KEY SECRET_ARN SECTION SECURITY SEEK SELECTIVE SELF SENSITIVE SEPARATOR SEQUENCE SEQUENCEFILE SEQUENCES SERDE SERDEPROPERTIES SERIALIZABLE SERVER SERVER_NAME SESSION SESSION_TOKEN SET SETTINGS SETOF SETS SHAPEFILE SHARE SHOW SIMPLE SIMPLIFY SIN SINH SIZE SKIP SMALLINT SORT SORTKEY SOURCE SPACE SPECIFIC SPECIFIC_NAME SPECIFICTYPE SQL SQLCODE SQLERROR SQLEXCEPTION SQLSTATE SQLWARNING SQRT STABLE STAGE STAGES STANDALONE START STATE STATEMENT STATIC STATISTICS STATUPDATE STDDEV_POP STDDEV_SAMP STDIN STDOUT STORAGE STORED STREAM STREAMS STRICT STRING STRIP STRUCTURE STYLE SUBCLASS_ORIGIN SUBMULTISET SUBSCRIPTION SUBSET SUBSTRING SUBSTRING_REGEX SUCCEEDS SUM SUPER SUPERUSER SUPPORT SYMMETRIC SYSID SYSLOG SYSTEM_TIME SYSTEM_USER T TABLE_NAME TABLES TABLESAMPLE TABLESPACE TAN TANH TARGET TASK TASKS TB TEMP TEMPLATE TEMPORARY TERMINATED TEXT TEXTFILE THROUGH TIES TIME TIMEFORMAT TIMEOUT TIMETZ TIMESTAMPTZ TIMEZONE_HOUR TIMEZONE_MINUTE TOKEN TOP_LEVEL_COUNT TRANSACTION TRANSACTION_ACTIVE TRANSACTIONS_COMMITTED TRANSACTIONS_ROLLED_BACK TRANSFORM TRANSFORMS TRANSIENT TRANSLATE TRANSLATE_REGEX TRANSLATION TREAT TRIGGER TRIGGER_CATALOG TRIGGER_NAME TRIGGER_SCHEMA TRIM TRIMBLANKS TRIM_ARRAY TRUNCATE TRUNCATECOLUMNS TRUSTED TYPE TYPES UESCAPE UNBOUNDED UNCOMMITTED UNCONDITIONAL UNDER UNENCRYPTED UNKNOWN UNLIMITED UNLINK UNLISTEN UNLOAD UNLOGGED UNMATCHED UNNAMED UNRESTRICTED UNSAFE UNSIGNED UNTIL UNTYPED UPDATE UPPER URI USE_ANY_ROLE USAGE USE USER_DEFINED_TYPE_CATALOG USER_DEFINED_TYPE_CODE USER_DEFINED_TYPE_NAME USER_DEFINED_TYPE_SCHEMA UTF16 UTF16BE UTF16LE UTF32 UTF8 VACUUM VALID VALIDATE VALIDATOR VALUE VALUE_OF VALUES VAR_POP VAR_SAMP VARBINARY VARBYTE VARCHAR VARIADIC VARYING VERSION VERSIONING VIA VIEW VIEWS VOLATILE WALLET WAREHOUSE WEEK WEEKDAY WHENEVER WHITESPACE WIDTH_BUCKET WINDOW WORK WRAPPER WRITE XGBOOST XML XMLAGG XMLATTRIBUTES XMLBINARY XMLCAST XMLCOMMENT XMLCONCAT XMLDECLARATION XMLDOCUMENT XMLELEMENT XMLEXISTS XMLFOREST XMLITERATE XMLNAMESPACES XMLPARSE XMLPI XMLQUERY XMLROOT XMLSCHEMA XMLSERIALIZE XMLTABLE XMLTEXT XMLVALIDATE YAML YEAR YES ZONE ZSTD""" sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_snowflake.py000066400000000000000000011234571503426445100236140ustar00rootroot00000000000000"""The Snowflake dialect. Inherits from ANSI. Based on https://docs.snowflake.com/en/sql-reference-commands.html """ from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( AnyNumberOf, AnySetOf, Anything, BaseSegment, Bracketed, CodeSegment, CommentSegment, Dedent, Delimited, IdentifierSegment, ImplicitIndent, Indent, KeywordSegment, LiteralSegment, Matchable, MultiStringParser, Nothing, OneOf, OptionallyBracketed, OptionallyDelimited, ParseMode, Ref, RegexLexer, RegexParser, SegmentGenerator, Sequence, StringLexer, StringParser, SymbolSegment, TypedParser, ) from sqlfluff.dialects import dialect_ansi as ansi from sqlfluff.dialects.dialect_snowflake_keywords import ( snowflake_reserved_keywords, snowflake_unreserved_keywords, ) ansi_dialect = load_raw_dialect("ansi") snowflake_dialect = ansi_dialect.copy_as( "snowflake", formatted_name="Snowflake", docstring="""**Default Casing**: ``UPPERCASE`` **Quotes**: String Literals: ``''``, Identifiers: ``""`` The dialect for `Snowflake `_, which has much of its syntax inherited from :ref:`postgres_dialect_ref`.""", ) snowflake_dialect.patch_lexer_matchers( [ # In snowflake, a double single quote resolves as a single quote in the string. # https://docs.snowflake.com/en/sql-reference/data-types-text.html#single-quoted-string-constants RegexLexer( "single_quote", r"'([^'\\]|\\.|'')*'", CodeSegment, ), RegexLexer( "inline_comment", r"(--|#|//)[^\n]*", CommentSegment, segment_kwargs={"trim_start": ("--", "#", "//")}, ), ] ) snowflake_dialect.insert_lexer_matchers( [ # Keyword assigner needed for keyword functions. StringLexer("parameter_assigner", "=>", CodeSegment), StringLexer("right_arrow", "->", CodeSegment), RegexLexer("stage_path", r"(?:@[^\s;)]+|'@[^']+')", CodeSegment), # Column selector # https://docs.snowflake.com/en/sql-reference/sql/select.html#parameters RegexLexer("column_selector", r"\$[0-9]+", CodeSegment), RegexLexer( "dollar_quote", r"\$\$.*\$\$", CodeSegment, ), RegexLexer( "dollar_literal", r"[$][a-zA-Z0-9_.]*", CodeSegment, ), RegexLexer( "inline_dollar_sign", r"[a-zA-Z_][a-zA-Z0-9_$]*\$[a-zA-Z0-9_$]*", CodeSegment, ), RegexLexer( # For use with https://docs.snowflake.com/en/sql-reference/sql/get.html # Accepts unquoted file paths that begin file://. # Unquoted file paths cannot include special characters. "unquoted_file_path", r"file://(?:[a-zA-Z]+:|/)+(?:[0-9a-zA-Z\\/_*?-]+)(?:\.[0-9a-zA-Z]+)?", CodeSegment, ), StringLexer("question_mark", "?", CodeSegment), StringLexer("exclude_bracket_open", "{-", CodeSegment), StringLexer("exclude_bracket_close", "-}", CodeSegment), ], before="like_operator", ) # Check for ":=" operator before the equals operator to correctly parse walrus operator # for Snowflake scripting block statements # https://docs.snowflake.com/en/developer-guide/snowflake-scripting/variables snowflake_dialect.insert_lexer_matchers( [ StringLexer("walrus_operator", ":=", CodeSegment), ], before="equals", ) snowflake_dialect.bracket_sets("bracket_pairs").add( ("exclude", "StartExcludeBracketSegment", "EndExcludeBracketSegment", True) ) # Set the bare functions snowflake_dialect.sets("bare_functions").clear() snowflake_dialect.sets("bare_functions").update( [ "CURRENT_DATE", "CURRENT_TIME", "CURRENT_TIMESTAMP", "CURRENT_USER", "LOCALTIME", "LOCALTIMESTAMP", ] ) # Add all Snowflake compression types snowflake_dialect.sets("compression_types").clear() snowflake_dialect.sets("compression_types").update( [ "AUTO", "AUTO_DETECT", "GZIP", "BZ2", "BROTLI", "ZSTD", "DEFLATE", "RAW_DEFLATE", "LZO", "NONE", "SNAPPY", ], ) # Add all Snowflake supported file types snowflake_dialect.sets("files_types").clear() snowflake_dialect.sets("files_types").update( ["CSV", "JSON", "AVRO", "ORC", "PARQUET", "XML"], ) snowflake_dialect.sets("warehouse_types").clear() snowflake_dialect.sets("warehouse_types").update( [ "STANDARD", "SNOWPARK-OPTIMIZED", ], ) snowflake_dialect.sets("warehouse_sizes").clear() snowflake_dialect.sets("warehouse_sizes").update( [ "XSMALL", "SMALL", "MEDIUM", "LARGE", "XLARGE", "XXLARGE", "X2LARGE", "XXXLARGE", "X3LARGE", "X4LARGE", "X5LARGE", "X6LARGE", "X-SMALL", "X-LARGE", "2X-LARGE", "3X-LARGE", "4X-LARGE", "5X-LARGE", "6X-LARGE", ], ) snowflake_dialect.sets("warehouse_scaling_policies").clear() snowflake_dialect.sets("warehouse_scaling_policies").update( [ "STANDARD", "ECONOMY", ], ) snowflake_dialect.sets("refreshmode_types").clear() snowflake_dialect.sets("refreshmode_types").update( ["AUTO", "FULL", "INCREMENTAL"], ) snowflake_dialect.sets("initialize_types").clear() snowflake_dialect.sets("initialize_types").update( ["ON_CREATE", "ON_SCHEDULE"], ) snowflake_dialect.add( # In snowflake, these are case sensitive even though they're not quoted # so they need a different `name` and `type` so they're not picked up # by other rules. ParameterAssignerSegment=StringParser( "=>", SymbolSegment, type="parameter_assigner" ), LambdaArrowSegment=StringParser("->", SymbolSegment, type="lambda_arrow"), FunctionAssignerSegment=StringParser("->", SymbolSegment, type="function_assigner"), # Walrus operator for Snowflake scripting block statements WalrusOperatorSegment=StringParser(":=", SymbolSegment, type="assignment_operator"), QuotedStarSegment=StringParser( "'*'", IdentifierSegment, type="quoted_star", trim_chars=("'",), ), # Any identifier is valid as a semi-structured element in Snowflake # as long as it's not a reserved keyword # https://docs.snowflake.com/en/sql-reference/identifiers-syntax NakedSemiStructuredElementSegment=RegexParser( r"[a-zA-Z_][a-zA-Z0-9_$]*", CodeSegment, type="semi_structured_element", ), QuotedSemiStructuredElementSegment=TypedParser( "double_quote", CodeSegment, type="semi_structured_element", ), # Normally, double quotes can't be used for literals. But in a few # cases they can (e.g. Tags, Comments). DoubleQuotedLiteralSegment=TypedParser( "double_quote", LiteralSegment, type="quoted_literal" ), ColumnIndexIdentifierSegment=RegexParser( r"\$[0-9]+", IdentifierSegment, type="column_index_identifier_segment", ), LocalVariableNameSegment=RegexParser( r"[a-zA-Z0-9_]*", CodeSegment, type="variable", ), SnowflakeVariableNameSegment=RegexParser( r":[a-zA-Z0-9_]*", CodeSegment, type="variable", ), ReferencedVariableNameSegment=RegexParser( r"\$[A-Z_][A-Z0-9_]*", CodeSegment, type="variable", trim_chars=("$",), ), # We use a RegexParser instead of keywords as some (those with dashes) require # quotes: WarehouseType=OneOf( MultiStringParser( [ type for type in snowflake_dialect.sets("warehouse_types") if "-" not in type ], CodeSegment, type="warehouse_size", ), MultiStringParser( [f"'{type}'" for type in snowflake_dialect.sets("warehouse_types")], CodeSegment, type="warehouse_size", ), ), WarehouseSize=OneOf( MultiStringParser( [ size for size in snowflake_dialect.sets("warehouse_sizes") if "-" not in size ], CodeSegment, type="warehouse_size", ), MultiStringParser( [f"'{size}'" for size in snowflake_dialect.sets("warehouse_sizes")], CodeSegment, type="warehouse_size", ), ), RefreshModeType=OneOf( MultiStringParser( snowflake_dialect.sets("refreshmode_types"), KeywordSegment, ) ), InitializeType=OneOf( MultiStringParser( snowflake_dialect.sets("initialize_types"), KeywordSegment, type="initialize_type", ) ), CompressionType=OneOf( MultiStringParser( snowflake_dialect.sets("compression_types"), KeywordSegment, type="compression_type", ), MultiStringParser( [ f"'{compression}'" for compression in snowflake_dialect.sets("compression_types") ], KeywordSegment, type="compression_type", ), ), ScalingPolicy=OneOf( MultiStringParser( snowflake_dialect.sets("warehouse_scaling_policies"), KeywordSegment, type="scaling_policy", ), MultiStringParser( [ f"'{scaling_policy}'" for scaling_policy in snowflake_dialect.sets( "warehouse_scaling_policies" ) ], KeywordSegment, type="scaling_policy", ), ), ValidationModeOptionSegment=RegexParser( r"'?RETURN_(?:\d+_ROWS|ERRORS|ALL_ERRORS)'?", CodeSegment, type="validation_mode_option", ), CopyOptionOnErrorSegment=RegexParser( r"'?CONTINUE'?|'?SKIP_FILE(?:_[0-9]+%?)?'?|'?ABORT_STATEMENT'?", LiteralSegment, type="copy_on_error_option", ), DynamicTableLagIntervalSegment=RegexParser( r"DYNAMIC|'.*'", LiteralSegment, type="dynamic_table_lag_interval_segment", ), DoubleQuotedUDFBody=TypedParser( "double_quote", CodeSegment, type="udf_body", trim_chars=('"',), ), SingleQuotedUDFBody=TypedParser( "single_quote", CodeSegment, type="udf_body", trim_chars=("'",), ), DollarQuotedUDFBody=TypedParser( "dollar_quote", CodeSegment, type="udf_body", trim_chars=("$",), ), StagePath=RegexParser( r"(?:@[^\s;)]+|'@[^']+')", IdentifierSegment, type="stage_path", ), S3Path=RegexParser( # https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html r"'s3://.*'", CodeSegment, type="bucket_path", ), GCSPath=RegexParser( # https://cloud.google.com/storage/docs/naming-buckets r"'gcs://.*", CodeSegment, type="bucket_path", ), AzureBlobStoragePath=RegexParser( # https://docs.microsoft.com/en-us/azure/azure-resource-manager/management/resource-name-rules#microsoftstorage r"'azure://.*", CodeSegment, type="bucket_path", ), UnquotedFilePath=TypedParser( "unquoted_file_path", CodeSegment, type="unquoted_file_path", ), SnowflakeEncryptionOption=MultiStringParser( ["'SNOWFLAKE_FULL'", "'SNOWFLAKE_SSE'"], CodeSegment, type="stage_encryption_option", ), S3EncryptionOption=MultiStringParser( ["'AWS_CSE'", "'AWS_SSE_S3'", "'AWS_SSE_KMS'", "'NONE'"], CodeSegment, type="stage_encryption_option", ), GCSEncryptionOption=MultiStringParser( ["'GCS_SSE_KMS'", "'NONE'"], CodeSegment, type="stage_encryption_option", ), AzureBlobStorageEncryptionOption=StringParser( "'AZURE_CSE'", CodeSegment, type="stage_encryption_option", ), FileType=OneOf( MultiStringParser( snowflake_dialect.sets("file_types"), CodeSegment, type="file_type", ), MultiStringParser( [f"'{file_type}'" for file_type in snowflake_dialect.sets("file_types")], CodeSegment, type="file_type", ), ), IntegerSegment=RegexParser( # An unquoted integer that can be passed as an argument to Snowflake functions. r"[0-9]+", LiteralSegment, type="integer_literal", ), SystemFunctionName=RegexParser( r"SYSTEM\$([A-Za-z0-9_]*)", CodeSegment, type="system_function_name", ), GroupByContentsGrammar=Delimited( OneOf( Ref("ColumnReferenceSegment"), # Can `GROUP BY 1` Ref("NumericLiteralSegment"), # Can `GROUP BY coalesce(col, 1)` Ref("ExpressionSegment"), ), terminators=[ "ORDER", "LIMIT", "FETCH", "OFFSET", "HAVING", "QUALIFY", "WINDOW", ], ), LimitLiteralGrammar=OneOf( Ref("NumericLiteralSegment"), "NULL", # '' and $$$$ are allowed as alternatives to NULL. Ref("QuotedLiteralSegment"), ), DynamicTableTargetLagSegment=OneOf( Ref("DynamicTableLagIntervalSegment"), "DOWNSTREAM", ), StartExcludeBracketSegment=StringParser( "{-", SymbolSegment, type="start_exclude_bracket" ), EndExcludeBracketSegment=StringParser( "-}", SymbolSegment, type="end_exclude_bracket" ), QuestionMarkSegment=StringParser("?", SymbolSegment, type="question_mark"), CaretSegment=StringParser("^", SymbolSegment, type="caret"), DollarSegment=StringParser("$", SymbolSegment, type="dollar"), PatternQuantifierGrammar=Sequence( OneOf( Ref("PositiveSegment"), Ref("StarSegment"), Ref("QuestionMarkSegment"), Bracketed( OneOf( Ref("NumericLiteralSegment"), Sequence( Ref("NumericLiteralSegment"), Ref("CommaSegment"), ), Sequence( Ref("CommaSegment"), Ref("NumericLiteralSegment"), ), Sequence( Ref("NumericLiteralSegment"), Ref("CommaSegment"), Ref("NumericLiteralSegment"), ), ), bracket_type="curly", bracket_pairs_set="bracket_pairs", ), ), # To put a quantifier into “reluctant mode”. Ref("QuestionMarkSegment", optional=True), allow_gaps=False, ), PatternSymbolGrammar=Sequence( Ref("SingleIdentifierGrammar"), Ref("PatternQuantifierGrammar", optional=True), allow_gaps=False, ), PatternOperatorGrammar=OneOf( Ref("PatternSymbolGrammar"), Sequence( OneOf( Bracketed( OneOf( AnyNumberOf( Ref("PatternOperatorGrammar"), ), Delimited( Ref("PatternOperatorGrammar"), delimiter=Ref("BitwiseOrSegment"), ), ), bracket_type="exclude", bracket_pairs_set="bracket_pairs", ), Bracketed( OneOf( AnyNumberOf( Ref("PatternOperatorGrammar"), ), Delimited( Ref("PatternOperatorGrammar"), delimiter=Ref("BitwiseOrSegment"), ), ), ), Sequence( "PERMUTE", Bracketed( Delimited( Ref("PatternSymbolGrammar"), ), ), ), ), # Operators can also be followed by a quantifier. Ref("PatternQuantifierGrammar", optional=True), allow_gaps=False, ), ), ContextHeadersGrammar=OneOf( "CURRENT_ACCOUNT", "CURRENT_CLIENT", "CURRENT_DATABASE", "CURRENT_DATE", "CURRENT_IP_ADDRESS", "CURRENT_REGION", "CURRENT_ROLE", "CURRENT_SCHEMA", "CURRENT_SCHEMAS", "CURRENT_SESSION", "CURRENT_STATEMENT", "CURRENT_TIME", "CURRENT_TIMESTAMP", "CURRENT_TRANSACTION", "CURRENT_USER", "CURRENT_VERSION", "CURRENT_WAREHOUSE", "LAST_QUERY_ID", "LAST_TRANSACTION", "LOCALTIME", "LOCALTIMESTAMP", ), ExceptionCodeSegment=Sequence( Ref("NegativeSegment"), RegexParser( r"20[0-9]{3}", LiteralSegment, type="exception_code", ), ), # https://docs.snowflake.com/en/sql-reference/sql/create-table-constraint InlineConstraintGrammar=AnySetOf( Sequence(Ref.keyword("NOT", optional=True), "ENFORCED"), Sequence(Ref.keyword("NOT", optional=True), "DEFERRABLE"), Sequence("INITIALLY", OneOf("DEFERRED", "IMMEDIATE")), OneOf("ENABLE", "DISABLE"), OneOf("VALIDATE", "NOVALIDATE"), OneOf("RELY", "NORELY"), ), ForeignKeyConstraintGrammar=AnySetOf( Sequence("MATCH", OneOf("FULL", "SIMPLE", "PARTIAL")), Sequence( AnyNumberOf( "ON", OneOf("UPDATE", "DELETE"), OneOf( "CASCADE", Sequence(Ref.keyword("SET"), Ref.keyword("NULL")), Sequence(Ref.keyword("SET"), Ref.keyword("DEFAULT")), "RESTRICT", Sequence("NO", "ACTION"), ), ) ), ), AlterOrReplaceGrammar=OneOf(Sequence("OR", "ALTER"), Ref("OrReplaceGrammar")), ) snowflake_dialect.replace( NakedIdentifierSegment=SegmentGenerator( # Generate the anti template from the set of reserved keywords lambda dialect: RegexParser( # See https://docs.snowflake.com/en/sql-reference/identifiers-syntax.html r"[a-zA-Z_][a-zA-Z0-9_$]*", IdentifierSegment, type="naked_identifier", anti_template=r"^(" + r"|".join(dialect.sets("reserved_keywords")) + r")$", casefold=str.upper, ) ), LiteralGrammar=ansi_dialect.get_grammar("LiteralGrammar").copy( insert=[ Ref("ReferencedVariableNameSegment"), ] ), AccessorGrammar=AnyNumberOf( Ref("ArrayAccessorSegment"), # Add in semi structured expressions Ref("SemiStructuredAccessorSegment"), ), PreTableFunctionKeywordsGrammar=OneOf(Ref("LateralKeywordSegment")), FunctionContentsExpressionGrammar=OneOf( Ref("DatetimeUnitSegment"), Ref("NamedParameterExpressionSegment"), Ref("ReferencedVariableNameSegment"), Ref("LambdaExpressionSegment"), Sequence( Ref("ExpressionSegment"), Sequence(OneOf("IGNORE", "RESPECT"), "NULLS", optional=True), ), ), JoinLikeClauseGrammar=Sequence( AnySetOf( Ref("MatchRecognizeClauseSegment"), Ref("ChangesClauseSegment"), Ref("ConnectByClauseSegment"), Ref("FromAtExpressionSegment"), Ref("FromBeforeExpressionSegment"), Ref("FromPivotExpressionSegment"), AnyNumberOf(Ref("FromUnpivotExpressionSegment")), Ref("SamplingExpressionSegment"), min_times=1, ), Ref("AliasExpressionSegment", optional=True), ), SingleIdentifierGrammar=OneOf( Ref("NakedIdentifierSegment"), Ref("QuotedIdentifierSegment"), Ref("ColumnIndexIdentifierSegment"), Ref("ReferencedVariableNameSegment"), Ref("StagePath"), Sequence( "IDENTIFIER", Bracketed( OneOf( Ref("SingleQuotedIdentifierSegment"), Ref("ReferencedVariableNameSegment"), Ref("BindVariableSegment"), ), ), ), ), PostFunctionGrammar=Sequence( Ref("WithinGroupClauseSegment", optional=True), Sequence(OneOf("IGNORE", "RESPECT"), "NULLS", optional=True), Ref("OverClauseSegment", optional=True), ), TemporaryGrammar=Sequence( OneOf("LOCAL", "GLOBAL", optional=True), OneOf("TEMP", "TEMPORARY", optional=True), Sequence("VOLATILE", optional=True), optional=True, ), BaseExpressionElementGrammar=ansi_dialect.get_grammar( "BaseExpressionElementGrammar" ) .copy( insert=[ # Allow use of CONNECT_BY_ROOT pseudo-columns. # https://docs.snowflake.com/en/sql-reference/constructs/connect-by.html#:~:text=Snowflake%20supports%20the%20CONNECT_BY_ROOT,the%20Examples%20section%20below. Sequence("CONNECT_BY_ROOT", Ref("ColumnReferenceSegment")), Sequence("PRIOR", Ref("ColumnReferenceSegment")), ], before=Ref("LiteralGrammar"), ) .copy( insert=[ Ref("SnowflakeVariableNameSegment"), ], before=Ref("LiteralGrammar"), ), QuotedLiteralSegment=OneOf( # https://docs.snowflake.com/en/sql-reference/data-types-text.html#string-constants TypedParser( "single_quote", LiteralSegment, type="quoted_literal", ), TypedParser( "dollar_quote", LiteralSegment, type="quoted_literal", ), ), LikeGrammar=OneOf( # https://docs.snowflake.com/en/sql-reference/functions/like.html Sequence("LIKE", OneOf("ALL", "ANY", optional=True)), "RLIKE", Sequence("ILIKE", Ref.keyword("ANY", optional=True)), "REGEXP", ), SelectClauseTerminatorGrammar=OneOf( "FROM", "WHERE", Sequence("ORDER", "BY"), "LIMIT", "FETCH", "OFFSET", Ref("SetOperatorSegment"), ), FromClauseTerminatorGrammar=OneOf( "WHERE", "LIMIT", "FETCH", "OFFSET", Sequence("GROUP", "BY"), Sequence("ORDER", "BY"), "HAVING", "QUALIFY", "WINDOW", Ref("SetOperatorSegment"), Ref("WithNoSchemaBindingClauseSegment"), Ref("WithDataClauseSegment"), ), WhereClauseTerminatorGrammar=OneOf( "LIMIT", "FETCH", "OFFSET", Sequence("GROUP", "BY"), Sequence("ORDER", "BY"), "HAVING", "QUALIFY", "WINDOW", "OVERLAPS", ), OrderByClauseTerminators=OneOf( "LIMIT", "HAVING", "QUALIFY", # For window functions "WINDOW", Ref("FrameClauseUnitGrammar"), "SEPARATOR", "FETCH", "OFFSET", "MEASURES", ), TrimParametersGrammar=Nothing(), GroupByClauseTerminatorGrammar=OneOf( "ORDER", "LIMIT", "FETCH", "OFFSET", "HAVING", "QUALIFY", "WINDOW" ), HavingClauseTerminatorGrammar=OneOf( Sequence("ORDER", "BY"), "LIMIT", "QUALIFY", "WINDOW", "FETCH", "OFFSET", ), NonStandardJoinTypeKeywordsGrammar=OneOf("ASOF"), UnconditionalJoinKeywordsGrammar=OneOf( Ref("NaturalJoinKeywordsGrammar"), Ref("UnconditionalCrossJoinKeywordsGrammar"), Ref("HorizontalJoinKeywordsGrammar"), Ref("NonStandardJoinTypeKeywordsGrammar"), ), FunctionParameterGrammar=Sequence( OneOf( Ref("DatatypeSegment"), Sequence(Ref("ParameterNameSegment"), Ref("DatatypeSegment")), ), Sequence( "DEFAULT", Ref("ExpressionSegment"), optional=True, ), ), CollateGrammar=Sequence("COLLATE", Ref("CollationReferenceSegment")), ) # Add all Snowflake keywords snowflake_dialect.sets("unreserved_keywords").clear() snowflake_dialect.update_keywords_set_from_multiline_string( "unreserved_keywords", snowflake_unreserved_keywords ) snowflake_dialect.sets("reserved_keywords").clear() snowflake_dialect.update_keywords_set_from_multiline_string( "reserved_keywords", snowflake_reserved_keywords ) # Add datetime units and their aliases from # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts snowflake_dialect.sets("datetime_units").clear() snowflake_dialect.sets("datetime_units").update( [ "YEAR", "Y", "YY", "YYY", "YYYY", "YR", "YEARS", "YRS", "MONTH", "MM", "MON", "MONS", "MONTHS", "DAY", "D", "DD", "DAYS", "DAYOFMONTH", "DAYOFWEEK", "WEEKDAY", "DOW", "DW", "DAYOFWEEKISO", "WEEKDAY_ISO", "DOW_ISO", "DW_ISO", "DAYOFYEAR", "YEARDAY", "DOY", "DY", "WEEK", "W", "WK", "WEEKOFYEAR", "WOY", "WY", "WEEKISO", "WEEK_ISO", "WEEKOFYEARISO", "WEEKOFYEAR_ISO", "QUARTER", "Q", "QTR", "QTRS", "QUARTERS", "YEAROFWEEK", "YEAROFWEEKISO", "HOUR", "H", "HH", "HR", "HOURS", "HRS", "MINUTE", "M", "MI", "MIN", "MINUTES", "MINS", "SECOND", "S", "SEC", "SECONDS", "SECS", "MILLISECOND", "MS", "MSEC", "MILLISECONDS", "MICROSECOND", "US", "USEC", "MICROSECONDS", "NANOSECOND", "NS", "NSEC", "NANOSEC", "NSECOND", "NANOSECONDS", "NANOSECS", "NSECONDS", "EPOCH_SECOND", "EPOCH", "EPOCH_SECONDS", "EPOCH_MILLISECOND", "EPOCH_MILLISECONDS", "EPOCH_MICROSECOND", "EPOCH_MICROSECONDS", "EPOCH_NANOSECOND", "EPOCH_NANOSECONDS", "TIMEZONE_HOUR", "TZH", "TIMEZONE_MINUTE", "TZM", ] ) class FunctionNameSegment(ansi.FunctionNameSegment): """Function name, including any prefix bits, e.g. project or schema. Overriding FunctionNameSegment to support Snowflake's IDENTIFIER pseudo-function. """ type = "function_name" match_grammar: Matchable = Sequence( # Project name, schema identifier, etc. AnyNumberOf( Sequence( Ref("SingleIdentifierGrammar"), Ref("DotSegment"), ), terminators=[Ref("BracketedSegment")], ), # Base function name OneOf( Ref("FunctionNameIdentifierSegment"), Ref("QuotedIdentifierSegment"), # Snowflake's IDENTIFIER pseudo-function # https://docs.snowflake.com/en/sql-reference/identifier-literal.html Sequence( "IDENTIFIER", Bracketed( OneOf( Ref("SingleQuotedIdentifierSegment"), Ref("ReferencedVariableNameSegment"), ), ), ), ), allow_gaps=False, ) class DatabaseRoleReferenceSegment(ansi.ObjectReferenceSegment): """Database role reference ([database_name.]rolename). See https://docs.snowflake.com/en/sql-reference/sql/create-database-role (the item of the "Required parameters" section). """ type = "database_role_reference" match_grammar: Matchable = OneOf( Sequence( Sequence(Ref("SingleIdentifierGrammar"), Ref("DotSegment"), optional=True), Ref("SingleIdentifierGrammar"), ), ) class ExternalVolumeReferenceSegment(ansi.ObjectReferenceSegment): """External Volume reference.""" type = "external_volume_reference" class DropExternalVolumeStatementSegment(BaseSegment): """Drop External Volume Statement. As per https://docs.snowflake.com/en/sql-reference/sql/drop-external-volume """ type = "drop_external_volume_statement" match_grammar = Sequence( "DROP", "EXTERNAL", "VOLUME", Ref("IfExistsGrammar", optional=True), Ref("ExternalVolumeReferenceSegment"), ) class CreateExternalVolumeStatementSegment(BaseSegment): """Create External Volume Statement. As per https://docs.snowflake.com/en/sql-reference/sql/create-external-volume """ type = "create_external_volume_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "EXTERNAL", "VOLUME", Ref("IfNotExistsGrammar", optional=True), Ref("ExternalVolumeReferenceSegment"), "STORAGE_LOCATIONS", Ref("EqualsSegment"), Bracketed( Delimited( Bracketed( "NAME", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), AnySetOf( Sequence( "STORAGE_PROVIDER", Ref("EqualsSegment"), OneOf("S3", "AZURE", "GCS", Ref("QuotedLiteralSegment")), ), Sequence( "STORAGE_AWS_ROLE_ARN", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "STORAGE_BASE_URL", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "STORAGE_AWS_EXTERNAL_ID", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "AZURE_TENANT_ID", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "ENCRYPTION", Ref("EqualsSegment"), Bracketed( "TYPE", Ref("EqualsSegment"), OneOf( Ref("S3EncryptionOption"), Ref("GCSEncryptionOption"), ), Sequence( "KMS_KEY_ID", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), ), ), ), ) ), ), AnySetOf( Sequence( "ALLOW_WRITES", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar") ), Ref("CommentEqualsClauseSegment"), optional=True, ), ) class AlterExternalVolumeStatementSegment(BaseSegment): """Alter External Volume Statement. As per https://docs.snowflake.com/en/sql-reference/sql/alter-external-volume """ type = "alter_external_volume_statement" match_grammar = Sequence( "ALTER", "EXTERNAL", "VOLUME", Ref("IfExistsGrammar", optional=True), Ref("ExternalVolumeReferenceSegment"), OneOf( Sequence( "ADD", "STORAGE_LOCATION", Ref("EqualsSegment"), Bracketed( "NAME", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), AnySetOf( Sequence( "STORAGE_PROVIDER", Ref("EqualsSegment"), OneOf("S3", "AZURE", "GCS", Ref("QuotedLiteralSegment")), ), Sequence( "STORAGE_AWS_ROLE_ARN", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "STORAGE_BASE_URL", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "STORAGE_AWS_EXTERNAL_ID", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "AZURE_TENANT_ID", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "ENCRYPTION", Ref("EqualsSegment"), Bracketed( "TYPE", Ref("EqualsSegment"), OneOf( Ref("S3EncryptionOption"), Ref("GCSEncryptionOption"), ), Sequence( "KMS_KEY_ID", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), ), ), ), ), ), Sequence( "REMOVE", "STORAGE_LOCATION", Ref("QuotedLiteralSegment"), ), Sequence( "SET", "ALLOW_WRITES", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "SET", Ref("CommentEqualsClauseSegment"), ), ), ) class ConnectByClauseSegment(BaseSegment): """A `CONNECT BY` clause. https://docs.snowflake.com/en/sql-reference/constructs/connect-by.html """ type = "connectby_clause" match_grammar = OneOf( Sequence( "START", "WITH", Ref("ExpressionSegment"), "CONNECT", "BY", Delimited( OptionallyBracketed(Ref("ExpressionSegment")), ), ), Sequence( "CONNECT", "BY", OptionallyBracketed(Ref("ExpressionSegment")), Sequence( "START", "WITH", Ref("ExpressionSegment"), optional=True, ), ), ) class GroupByClauseSegment(ansi.GroupByClauseSegment): """A `GROUP BY` clause like in `SELECT`. Snowflake supports Cube, Rollup, and Grouping Sets https://docs.snowflake.com/en/sql-reference/constructs/group-by.html """ match_grammar: Matchable = Sequence( "GROUP", "BY", Indent, OneOf( Sequence( OneOf("CUBE", "ROLLUP", Sequence("GROUPING", "SETS")), Bracketed( Ref("GroupByContentsGrammar"), ), ), "ALL", Ref("GroupByContentsGrammar"), ), Dedent, ) class ValuesClauseSegment(ansi.ValuesClauseSegment): """A `VALUES` clause like in `INSERT`.""" match_grammar = Sequence( "VALUES", Delimited( Bracketed( Delimited( # DEFAULT and NULL keywords used in # INSERT INTO statement. "DEFAULT", "NULL", Ref("ExpressionSegment"), ), parse_mode=ParseMode.GREEDY, ), ), ) class InsertStatementSegment(BaseSegment): """An `INSERT` statement. https://docs.snowflake.com/en/sql-reference/sql/insert.html https://docs.snowflake.com/en/sql-reference/sql/insert-multi-table.html """ type = "insert_statement" match_grammar = Sequence( "INSERT", Ref.keyword("OVERWRITE", optional=True), OneOf( # Single table INSERT INTO. Sequence( "INTO", Ref("TableReferenceSegment"), Ref("BracketedColumnReferenceListGrammar", optional=True), Ref("SelectableGrammar"), ), # Unconditional multi-table INSERT INTO. Sequence( "ALL", AnyNumberOf( Sequence( "INTO", Ref("TableReferenceSegment"), Ref("BracketedColumnReferenceListGrammar", optional=True), Ref("ValuesClauseSegment", optional=True), ), min_times=1, ), Ref("SelectStatementSegment"), ), # Conditional multi-table INSERT INTO. Sequence( OneOf( "FIRST", "ALL", ), AnyNumberOf( Sequence( "WHEN", Ref("ExpressionSegment"), "THEN", AnyNumberOf( Sequence( "INTO", Ref("TableReferenceSegment"), Ref( "BracketedColumnReferenceListGrammar", optional=True ), Ref("ValuesClauseSegment", optional=True), ), min_times=1, ), ), min_times=1, ), Sequence( "ELSE", "INTO", Ref("TableReferenceSegment"), Ref("BracketedColumnReferenceListGrammar", optional=True), Ref("ValuesClauseSegment", optional=True), optional=True, ), Ref("SelectStatementSegment"), ), ), ) class FunctionDefinitionGrammar(ansi.FunctionDefinitionGrammar): """This is the body of a `CREATE FUNCTION AS` statement.""" match_grammar = Sequence( "AS", Ref("QuotedLiteralSegment"), Sequence( "LANGUAGE", Ref("NakedIdentifierSegment"), optional=True, ), ) class StatementSegment(ansi.StatementSegment): """A generic segment, to any of its child subsegments.""" match_grammar = ansi.StatementSegment.match_grammar.copy( # NOTE: The Scripting Block segment must be tried before # we get to the transaction statement (from the ansi dialect) # because they both start with BEGIN. insert=[ Ref("ScriptingBlockStatementSegment"), ], before=Ref("TransactionStatementSegment"), ).copy( insert=[ Ref("AccessStatementSegment"), Ref("CreateStatementSegment"), Ref("CreateTaskSegment"), Ref("CreateUserSegment"), Ref("CreateCloneStatementSegment"), Ref("CreateProcedureStatementSegment"), Ref("AlterProcedureStatementSegment"), Ref("ScriptingLetStatementSegment"), Ref("ScriptingDeclareStatementSegment"), Ref("ReturnStatementSegment"), Ref("ShowStatementSegment"), Ref("AlterAccountStatementSegment"), Ref("AlterUserStatementSegment"), Ref("AlterSessionStatementSegment"), Ref("AlterTaskStatementSegment"), Ref("SetAssignmentStatementSegment"), Ref("CallStoredProcedureSegment"), Ref("MergeStatementSegment"), Ref("CopyIntoTableStatementSegment"), Ref("CopyIntoLocationStatementSegment"), Ref("CopyFilesIntoLocationStatementSegment"), Ref("FormatTypeOptions"), Ref("AlterWarehouseStatementSegment"), Ref("AlterShareStatementSegment"), Ref("CreateExternalTableSegment"), Ref("AlterExternalTableStatementSegment"), Ref("CreateSchemaStatementSegment"), Ref("AlterSchemaStatementSegment"), Ref("CreateFunctionStatementSegment"), Ref("AlterFunctionStatementSegment"), Ref("CreateExternalFunctionStatementSegment"), Ref("CreateStageSegment"), Ref("AlterStageSegment"), Ref("CreateStreamStatementSegment"), Ref("CreateStreamlitStatementSegment"), Ref("CreateCortexSearchServiceStatementSegment"), Ref("AlterStreamStatementSegment"), Ref("AlterStreamlitStatementSegment"), Ref("AlterCortexSearchServiceStatementSegment"), Ref("UnsetStatementSegment"), Ref("UndropStatementSegment"), Ref("CommentStatementSegment"), Ref("CallStatementSegment"), Ref("AlterViewStatementSegment"), Ref("AlterMaterializedViewStatementSegment"), Ref("DropProcedureStatementSegment"), Ref("DropExternalTableStatementSegment"), Ref("DropMaterializedViewStatementSegment"), Ref("DropObjectStatementSegment"), Ref("CreateFileFormatSegment"), Ref("AlterFileFormatSegment"), Ref("AlterPipeSegment"), Ref("ListStatementSegment"), Ref("GetStatementSegment"), Ref("PutStatementSegment"), Ref("RemoveStatementSegment"), Ref("CreateDatabaseFromShareStatementSegment"), Ref("CreateDatabaseRoleStatementSegment"), Ref("AlterRoleStatementSegment"), Ref("AlterStorageIntegrationSegment"), Ref("ExecuteImmediateClauseSegment"), Ref("ExecuteTaskClauseSegment"), Ref("CreateResourceMonitorStatementSegment"), Ref("AlterResourceMonitorStatementSegment"), Ref("CreateSequenceStatementSegment"), Ref("AlterSequenceStatementSegment"), Ref("AlterDatabaseSegment"), Ref("AlterMaskingPolicySegment"), Ref("AlterNetworkPolicyStatementSegment"), Ref("CreateExternalVolumeStatementSegment"), Ref("DropExternalVolumeStatementSegment"), Ref("AlterExternalVolumeStatementSegment"), Ref("ForInLoopSegment"), Ref("CreateEventTableStatementSegment"), Ref("CreatePasswordPolicyStatementSegment"), Ref("AlterPasswordPolicyStatementSegment"), Ref("DropPasswordPolicyStatementSegment"), Ref("CreateRowAccessPolicyStatementSegment"), Ref("AlterRowAccessPolicyStatmentSegment"), Ref("AlterTagStatementSegment"), Ref("ExceptionBlockStatementSegment"), Ref("DropDynamicTableSegment"), Ref("DropIcebergTableStatementSegment"), Ref("CreateAuthenticationPolicySegment"), Ref("DropResourceMonitorStatementSegment"), ], remove=[ Ref("CreateIndexStatementSegment"), Ref("DropIndexStatementSegment"), ], ) class SetAssignmentStatementSegment(BaseSegment): """A `SET` statement. https://docs.snowflake.com/en/sql-reference/sql/set.html """ type = "set_statement" match_grammar = OneOf( Sequence( "SET", Ref("LocalVariableNameSegment"), Ref("EqualsSegment"), Ref("ExpressionSegment"), ), Sequence( "SET", Bracketed(Delimited(Ref("LocalVariableNameSegment"))), Ref("EqualsSegment"), Bracketed( Delimited( Ref("ExpressionSegment"), ), ), ), ) class CallStoredProcedureSegment(BaseSegment): """This is a CALL statement used to execute a stored procedure. https://docs.snowflake.com/en/sql-reference/sql/call.html """ type = "call_segment" match_grammar = Sequence( "CALL", Ref("FunctionSegment"), ) class WithinGroupClauseSegment(BaseSegment): """An WITHIN GROUP clause for window functions. https://docs.snowflake.com/en/sql-reference/functions/listagg.html. https://docs.snowflake.com/en/sql-reference/functions/array_agg.html. """ type = "withingroup_clause" match_grammar = Sequence( "WITHIN", "GROUP", Bracketed( Ref("OrderByClauseSegment", optional=True), parse_mode=ParseMode.GREEDY ), ) class FromExpressionElementSegment(ansi.FromExpressionElementSegment): """A table expression. https://docs.snowflake.com/en/sql-reference/constructs/from """ type = "from_expression_element" match_grammar = Sequence( Ref("PreTableFunctionKeywordsGrammar", optional=True), OptionallyBracketed(Ref("TableExpressionSegment")), OneOf( Ref("FromAtExpressionSegment"), Ref("FromBeforeExpressionSegment"), optional=True, ), Ref( "AliasExpressionSegment", exclude=OneOf( Ref("FromClauseTerminatorGrammar"), Ref("SamplingExpressionSegment"), Ref("ChangesClauseSegment"), Ref("JoinLikeClauseGrammar"), "CROSS", ), optional=True, ), # https://cloud.google.com/bigquery/docs/reference/standard-sql/arrays#flattening_arrays Sequence("WITH", "OFFSET", Ref("AliasExpressionSegment"), optional=True), Ref("SamplingExpressionSegment", optional=True), Ref("PostTableExpressionGrammar", optional=True), ) class MatchConditionSegment(ansi.MatchConditionSegment): """A match condition for an ASOF join.""" type = "match_condition" match_grammar = Sequence("MATCH_CONDITION", Bracketed(Ref("ExpressionSegment"))) class PatternSegment(BaseSegment): """A `PATTERN` expression. https://docs.snowflake.com/en/sql-reference/constructs/match_recognize.html """ type = "pattern_expression" match_grammar = Sequence( # https://docs.snowflake.com/en/sql-reference/constructs/match_recognize.html#pattern-specifying-the-pattern-to-match Ref("CaretSegment", optional=True), OneOf( AnyNumberOf( Ref("PatternOperatorGrammar"), ), Delimited( Ref("PatternOperatorGrammar"), delimiter=Ref("BitwiseOrSegment"), ), ), Ref("DollarSegment", optional=True), ) class MatchRecognizeClauseSegment(BaseSegment): """A `MATCH_RECOGNIZE` clause. https://docs.snowflake.com/en/sql-reference/constructs/match_recognize.html """ type = "match_recognize_clause" match_grammar = Sequence( "MATCH_RECOGNIZE", Bracketed( Ref("PartitionClauseSegment", optional=True), Ref("OrderByClauseSegment", optional=True), Sequence( "MEASURES", Delimited( Sequence( # The edges of the window frame can be specified # by using either RUNNING or FINAL semantics. # https://docs.snowflake.com/en/sql-reference/constructs/match_recognize.html#expressions-in-define-and-measures-clauses OneOf( "FINAL", "RUNNING", optional=True, ), Ref("ExpressionSegment"), Ref("AliasExpressionSegment"), ), ), optional=True, ), OneOf( Sequence( "ONE", "ROW", "PER", "MATCH", ), Sequence( "ALL", "ROWS", "PER", "MATCH", OneOf( Sequence( "SHOW", "EMPTY", "MATCHES", ), Sequence( "OMIT", "EMPTY", "MATCHES", ), Sequence( "WITH", "UNMATCHED", "ROWS", ), optional=True, ), ), optional=True, ), Sequence( "AFTER", "MATCH", "SKIP", OneOf( Sequence( "PAST", "LAST", "ROW", ), Sequence( "TO", "NEXT", "ROW", ), Sequence( "TO", OneOf("FIRST", "LAST", optional=True), Ref("SingleIdentifierGrammar"), ), ), optional=True, ), "PATTERN", Bracketed( Ref("PatternSegment"), ), "DEFINE", Delimited( Sequence( Ref("SingleIdentifierGrammar"), "AS", Ref("ExpressionSegment"), ), ), ), ) class ChangesClauseSegment(BaseSegment): """A `CHANGES` clause. https://docs.snowflake.com/en/sql-reference/constructs/changes.html """ type = "changes_clause" match_grammar = Sequence( "CHANGES", Bracketed( "INFORMATION", Ref("ParameterAssignerSegment"), OneOf("DEFAULT", "APPEND_ONLY"), parse_mode=ParseMode.GREEDY, ), OneOf( Sequence( "AT", Bracketed( OneOf("TIMESTAMP", "OFFSET", "STATEMENT", "STREAM"), Ref("ParameterAssignerSegment"), Ref("ExpressionSegment"), parse_mode=ParseMode.GREEDY, ), ), Sequence( "BEFORE", Bracketed( "STATEMENT", Ref("ParameterAssignerSegment"), Ref("ExpressionSegment"), parse_mode=ParseMode.GREEDY, ), ), ), Sequence( "END", Bracketed( OneOf("TIMESTAMP", "OFFSET", "STATEMENT"), Ref("ParameterAssignerSegment"), Ref("ExpressionSegment"), parse_mode=ParseMode.GREEDY, ), optional=True, ), ) class FromAtExpressionSegment(BaseSegment): """An AT expression.""" type = "from_at_expression" match_grammar = Sequence( "AT", Bracketed( OneOf("TIMESTAMP", "OFFSET", "STATEMENT"), Ref("ParameterAssignerSegment"), Ref("ExpressionSegment"), ), ) class FromBeforeExpressionSegment(BaseSegment): """A BEFORE expression.""" type = "from_before_expression" match_grammar = Sequence( "BEFORE", Bracketed( OneOf("TIMESTAMP", "OFFSET", "STATEMENT"), Ref("ParameterAssignerSegment"), Ref("ExpressionSegment"), parse_mode=ParseMode.GREEDY, ), ) class FromPivotExpressionSegment(BaseSegment): """A PIVOT expression. https://docs.snowflake.com/en/sql-reference/constructs/pivot.html """ type = "from_pivot_expression" match_grammar = Sequence( "PIVOT", Bracketed( Ref("FunctionSegment"), "FOR", Ref("SingleIdentifierGrammar"), "IN", Bracketed( OneOf( Delimited(Ref("LiteralGrammar")), Sequence("ANY", Ref("OrderByClauseSegment", optional=True)), Ref("SelectStatementSegment"), ) ), Sequence( "DEFAULT", "ON", "NULL", Bracketed(Ref("LiteralGrammar")), optional=True ), ), ) class FromUnpivotExpressionSegment(BaseSegment): """An UNPIVOT expression.""" type = "from_unpivot_expression" match_grammar = Sequence( "UNPIVOT", Sequence(OneOf("INCLUDE", "EXCLUDE"), "NULLS", optional=True), Bracketed( Ref("SingleIdentifierGrammar"), "FOR", Ref("SingleIdentifierGrammar"), "IN", Bracketed(Delimited(Ref("SingleIdentifierGrammar"))), ), ) class SamplingExpressionSegment(ansi.SamplingExpressionSegment): """A sampling expression.""" match_grammar = Sequence( OneOf("SAMPLE", "TABLESAMPLE"), OneOf("BERNOULLI", "ROW", "SYSTEM", "BLOCK", optional=True), Bracketed( OneOf(Ref("NumericLiteralSegment"), Ref("ReferencedVariableNameSegment")), Ref.keyword("ROWS", optional=True), ), Sequence( OneOf("REPEATABLE", "SEED"), Bracketed(Ref("NumericLiteralSegment")), optional=True, ), ) class NamedParameterExpressionSegment(BaseSegment): """A keyword expression. e.g. 'input => custom_fields' """ type = "snowflake_keyword_expression" match_grammar = Sequence( Ref("ParameterNameSegment"), Ref("ParameterAssignerSegment"), OneOf( Ref("LiteralGrammar"), Ref("ColumnReferenceSegment"), Ref("ExpressionSegment"), ), ) class SemiStructuredAccessorSegment(BaseSegment): """A semi-structured data accessor segment. https://docs.snowflake.com/en/user-guide/semistructured-considerations.html """ type = "semi_structured_expression" match_grammar = Sequence( OneOf( # If a field is already a VARIANT, this could # be initiated by a colon or a dot. This is particularly # useful when a field is an ARRAY of objects. Ref("DotSegment"), Ref("ColonSegment"), ), OneOf( Ref("NakedSemiStructuredElementSegment"), Ref("QuotedSemiStructuredElementSegment"), ), Ref("ArrayAccessorSegment", optional=True), AnyNumberOf( Sequence( OneOf( # Can be delimited by dots or colons Ref("DotSegment"), Ref("ColonSegment"), ), OneOf( Ref("NakedSemiStructuredElementSegment"), Ref("QuotedSemiStructuredElementSegment"), ), allow_gaps=True, ), Ref("ArrayAccessorSegment", optional=True), allow_gaps=True, ), allow_gaps=True, ) class QualifyClauseSegment(BaseSegment): """A `QUALIFY` clause like in `SELECT`. https://docs.snowflake.com/en/sql-reference/constructs/qualify.html """ type = "qualify_clause" match_grammar = Sequence( "QUALIFY", ImplicitIndent, OneOf( Bracketed( Ref("ExpressionSegment"), ), Ref("ExpressionSegment"), ), Dedent, ) class SelectStatementSegment(ansi.SelectStatementSegment): """A snowflake `SELECT` statement including optional Qualify. https://docs.snowflake.com/en/sql-reference/constructs/qualify.html """ type = "select_statement" match_grammar = ansi.SelectStatementSegment.match_grammar.copy( insert=[Ref("QualifyClauseSegment", optional=True)], before=Ref("OrderByClauseSegment", optional=True), ) class SelectClauseElementSegment(ansi.SelectClauseElementSegment): """Inherit from ansi but also allow for Snowflake System Functions. https://docs.snowflake.com/en/sql-reference/functions-system """ match_grammar = ansi.SelectClauseElementSegment.match_grammar.copy( insert=[ Sequence( Ref("SystemFunctionName"), Bracketed(Delimited(Ref("LiteralGrammar"))), ) ], before=Ref("WildcardExpressionSegment"), ) class WildcardExpressionSegment(ansi.WildcardExpressionSegment): """An extension of the star expression for Snowflake.""" match_grammar = ansi.WildcardExpressionSegment.match_grammar.copy( insert=[ # Optional Exclude or Rename clause Ref("ExcludeClauseSegment", optional=True), Ref("ReplaceClauseSegment", optional=True), Ref("RenameClauseSegment", optional=True), ] ) class ExcludeClauseSegment(BaseSegment): """A snowflake SELECT EXCLUDE clause. https://docs.snowflake.com/en/sql-reference/sql/select.html """ type = "select_exclude_clause" match_grammar = Sequence( "EXCLUDE", OneOf( Bracketed(Delimited(Ref("SingleIdentifierGrammar"))), Ref("SingleIdentifierGrammar"), ), ) class RenameClauseSegment(BaseSegment): """A snowflake SELECT RENAME clause. https://docs.snowflake.com/en/sql-reference/sql/select.html """ type = "select_rename_clause" match_grammar = Sequence( "RENAME", OneOf( Sequence( Ref("SingleIdentifierGrammar"), "AS", Ref("SingleIdentifierGrammar"), ), Bracketed( Delimited( Sequence( Ref("SingleIdentifierGrammar"), "AS", Ref("SingleIdentifierGrammar"), ) ) ), ), ) class ReplaceClauseSegment(BaseSegment): """A snowflake SELECT REPLACE clause. https://docs.snowflake.com/en/sql-reference/sql/select.html """ type = "select_replace_clause" match_grammar = Sequence( "REPLACE", Bracketed( Delimited( Sequence( Ref("ExpressionSegment"), "AS", Ref("SingleIdentifierGrammar"), ) ) ), ) class SelectClauseModifierSegment(ansi.SelectClauseModifierSegment): """Things that come after SELECT but before the columns, specifically for Snowflake. https://docs.snowflake.com/en/sql-reference/constructs.html """ match_grammar = Sequence( OneOf("DISTINCT", "ALL", optional=True), # TOP N is unique to Snowflake, and we can optionally add DISTINCT/ALL in front # of it. Sequence("TOP", Ref("NumericLiteralSegment"), optional=True), ) class AlterTableStatementSegment(ansi.AlterTableStatementSegment): """An `ALTER TABLE` statement. https://docs.snowflake.com/en/sql-reference/sql/alter-table.html If possible, please keep the order below the same as Snowflake's doc: """ match_grammar: Matchable = Sequence( "ALTER", "TABLE", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), OneOf( # Rename Sequence( "RENAME", "TO", Ref("TableReferenceSegment"), ), # Swap With Sequence( "SWAP", "WITH", Ref("TableReferenceSegment"), ), # searchOptimizationAction # N.B. Since SEARCH and OPTIMIZATION are unreserved keywords # we move this above AlterTableTableColumnActionSegment # in order to avoid matching these as columns. Sequence( OneOf( "ADD", "DROP", ), "SEARCH", "OPTIMIZATION", ), Ref("AlterTableClusteringActionSegment"), Ref("AlterTableConstraintActionSegment"), # @TODO: constraintAction # @TODO: extTableColumnAction # SET Table options # @TODO: Restrict the list of parameters supported per Snowflake doc. Sequence( Ref.keyword("SET"), OneOf( Ref("ParameterNameSegment"), Ref.keyword("COMMENT"), ), Ref("EqualsSegment", optional=True), OneOf( Ref("LiteralGrammar"), Ref("NakedIdentifierSegment"), Ref("QuotedLiteralSegment"), ), ), # @TODO: add more constraint actions Sequence( "DROP", Ref("PrimaryKeyGrammar"), ), Sequence( "ADD", Ref("PrimaryKeyGrammar"), Bracketed(Delimited(Ref("ColumnReferenceSegment"), optional=True)), ), Ref("AlterTableTableColumnActionSegment"), # UNSET Table options Sequence( Ref.keyword("UNSET"), Delimited( OneOf( Ref("ParameterNameSegment"), Ref.keyword("COMMENT"), ), ), ), Ref("DataGovernancePolicyTagActionSegment"), ), ) class DataGovernancePolicyTagActionSegment(BaseSegment): """The dataGovnPolicyTagAction segment for alter table parsing.""" type = "data_governance_policy_tag_action_segment" match_grammar = OneOf( Sequence( "SET", Ref("TagEqualsSegment"), ), Sequence( "UNSET", Ref("TagEqualsSegment"), ), Sequence( "ADD", "ROW", "ACCESS", "POLICY", Ref("ObjectReferenceSegment"), "ON", Bracketed( Delimited( Ref("ObjectReferenceSegment"), ), ), ), Sequence( "DROP", "ROW", "ACCESS", "POLICY", Ref("ObjectReferenceSegment"), Sequence( Ref("CommaSegment"), "ADD", "ROW", "ACCESS", "POLICY", Ref("ObjectReferenceSegment"), "ON", Bracketed( Delimited( Ref("ObjectReferenceSegment"), ), ), optional=True, ), ), Sequence( "DROP", "ALL", "ROW", "ACCESS", "POLICIES", ), Sequence( "SET", "AGGREGATION", "POLICY", Ref("ObjectReferenceSegment"), Sequence( "ENTITY", "KEY", Bracketed( Delimited( Ref("ObjectReferenceSegment"), ), ), optional=True, ), Sequence( "FORCE", optional=True, ), ), Sequence( "UNSET", "AGGREGATION", "POLICY", ), Sequence( "SET", "JOIN", "POLICY", Ref("ObjectReferenceSegment"), Sequence( "FORCE", optional=True, ), ), Sequence( "UNSET", "JOIN", "POLICY", ), ) class AlterTableTableColumnActionSegment(BaseSegment): """ALTER TABLE `tableColumnAction` per defined in Snowflake's grammar. https://docs.snowflake.com/en/sql-reference/sql/alter-table.html https://docs.snowflake.com/en/sql-reference/sql/alter-table-column.html If possible, please match the order of this sequence with what's defined in Snowflake's tableColumnAction grammar. """ type = "alter_table_table_column_action" match_grammar = OneOf( # Add Column Sequence( "ADD", Ref.keyword("COLUMN", optional=True), # @TODO: Cannot specify IF NOT EXISTS if also specifying # DEFAULT, AUTOINCREMENT, IDENTITY UNIQUE, PRIMARY KEY, FOREIGN KEY, AS Ref("IfNotExistsGrammar", optional=True), # Handle Multiple Columns Delimited( Sequence( Ref("IfNotExistsGrammar", optional=True), Ref("ColumnReferenceSegment"), Ref("DatatypeSegment"), Sequence("NOT", "NULL", optional=True), OneOf( # Default & AS (virtual columns) Sequence( OneOf("DEFAULT", "AS"), Ref("ExpressionSegment"), ), # Auto-increment/identity column Sequence( OneOf( "AUTOINCREMENT", "IDENTITY", ), OneOf( # ( , ) Bracketed( Ref("NumericLiteralSegment"), Ref("CommaSegment"), Ref("NumericLiteralSegment"), ), # START INCREMENT Sequence( "START", Ref("NumericLiteralSegment"), "INCREMENT", Ref("NumericLiteralSegment"), ), optional=True, ), ), optional=True, ), # @TODO: Add support for `inlineConstraint` Sequence( Ref.keyword("WITH", optional=True), "MASKING", "POLICY", Ref("FunctionNameSegment"), Sequence( "USING", Bracketed( Delimited( OneOf( Ref("ColumnReferenceSegment"), Ref("ExpressionSegment"), ) ), ), optional=True, ), optional=True, ), Ref("CommentClauseSegment", optional=True), ), ), ), # Rename column Sequence( "RENAME", "COLUMN", Ref("ColumnReferenceSegment"), "TO", Ref("ColumnReferenceSegment"), ), # Alter/Modify column(s) Sequence( OneOf("ALTER", "MODIFY"), OptionallyBracketed( Delimited( OneOf( # Add things Sequence( Ref.keyword("COLUMN", optional=True), Ref("ColumnReferenceSegment"), OneOf( Sequence("DROP", "DEFAULT"), Sequence( "SET", "DEFAULT", Ref("NakedIdentifierSegment"), Ref("DotSegment"), "NEXTVAL", ), Sequence( OneOf("SET", "DROP", optional=True), "NOT", "NULL", ), Sequence( Sequence( Sequence("SET", "DATA", optional=True), "TYPE", optional=True, ), Ref("DatatypeSegment"), ), Ref("CommentClauseSegment"), ), ), Sequence( "COLUMN", Ref("ColumnReferenceSegment"), "SET", "MASKING", "POLICY", Ref("FunctionNameSegment"), Sequence( "USING", Bracketed( Delimited( OneOf( Ref("ColumnReferenceSegment"), Ref("ExpressionSegment"), ) ), ), optional=True, ), Ref.keyword("FORCE", optional=True), ), Sequence( "COLUMN", Ref("ColumnReferenceSegment"), "UNSET", "MASKING", "POLICY", ), Sequence( "COLUMN", Ref("ColumnReferenceSegment"), "SET", "TAG", Ref("TagReferenceSegment"), Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "COLUMN", Ref("ColumnReferenceSegment"), "UNSET", "TAG", Ref("TagReferenceSegment"), ), ), ), ), ), # Drop column Sequence( "DROP", Ref.keyword("COLUMN", optional=True), Delimited( Sequence( Ref("IfExistsGrammar", optional=True), Ref("ColumnReferenceSegment"), ) ), ), # @TODO: Drop columns # vvvvv COPIED FROM ANSI vvvvv # @TODO: Removed these once `tableColumnAction` is properly supported. Sequence( OneOf("ADD", "MODIFY"), Ref.keyword("COLUMN", optional=True), Ref("ColumnDefinitionSegment"), OneOf( Sequence(OneOf("FIRST", "AFTER"), Ref("ColumnReferenceSegment")), # Bracketed Version of the same Ref("BracketedColumnReferenceListGrammar"), optional=True, ), ), Sequence( "DROP", "CONSTRAINT", Ref("ObjectReferenceSegment"), ), ) class AlterTableClusteringActionSegment(BaseSegment): """ALTER TABLE `clusteringAction` per defined in Snowflake's grammar. https://docs.snowflake.com/en/sql-reference/sql/alter-table.html#clustering-actions-clusteringaction """ type = "alter_table_clustering_action" match_grammar = OneOf( Sequence( "CLUSTER", "BY", OneOf( Ref("FunctionSegment"), Bracketed(Delimited(Ref("ExpressionSegment"))), ), ), # N.B. RECLUSTER is deprecated: # https://docs.snowflake.com/en/user-guide/tables-clustering-manual.html Sequence( "RECLUSTER", Sequence( "MAX_SIZE", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), optional=True, ), Ref("WhereClauseSegment", optional=True), ), Sequence( OneOf( "SUSPEND", "RESUME", ), "RECLUSTER", ), Sequence( "DROP", "CLUSTERING", "KEY", ), ) class AlterTableConstraintActionSegment(BaseSegment): """ALTER TABLE `constraintAction` per defined in Snowflake's grammar. https://docs.snowflake.com/en/sql-reference/sql/alter-table.html#constraint-actions-constraintaction """ type = "alter_table_constraint_action" match_grammar = OneOf( # Add Column Sequence( "ADD", Ref("OutOfLineConstraintPropertiesSegment"), ), Sequence( "DROP", Sequence("CONSTRAINT", Ref("NakedIdentifierSegment"), optional=True), OneOf( Ref("PrimaryKeyGrammar"), Ref("ForeignKeyGrammar"), Ref("UniqueKeyGrammar"), ), Delimited(Ref("ColumnReferenceSegment")), ), Sequence( "RENAME", "CONSTRAINT", Ref("NakedIdentifierSegment"), "TO", Ref("NakedIdentifierSegment"), ), ) class AlterWarehouseStatementSegment(BaseSegment): """An `ALTER WAREHOUSE` statement. https://docs.snowflake.com/en/sql-reference/sql/alter-warehouse.html """ type = "alter_warehouse_statement" match_grammar = Sequence( "ALTER", "WAREHOUSE", Ref("IfExistsGrammar", optional=True), OneOf( Sequence( Ref("ObjectReferenceSegment", optional=True), OneOf( "SUSPEND", Sequence( "RESUME", Sequence("IF", "SUSPENDED", optional=True), ), ), ), Sequence( Ref("ObjectReferenceSegment", optional=True), Sequence( "ABORT", "ALL", "QUERIES", ), ), Sequence( Ref("ObjectReferenceSegment"), "RENAME", "TO", Ref("ObjectReferenceSegment"), ), Sequence( Ref("ObjectReferenceSegment", optional=True), "SET", OneOf( AnyNumberOf( Ref("CommaSegment", optional=True), Ref("WarehouseObjectPropertiesSegment"), Ref("CommentEqualsClauseSegment"), Ref("WarehouseObjectParamsSegment"), ), Ref("TagEqualsSegment"), ), ), Sequence( Ref("ObjectReferenceSegment"), "UNSET", OneOf( Delimited(Ref("NakedIdentifierSegment")), Sequence("TAG", Delimited(Ref("TagReferenceSegment"))), ), ), ), ) class AlterShareStatementSegment(BaseSegment): """An `ALTER SHARE` statement. https://docs.snowflake.com/en/sql-reference/sql/alter-share.html """ type = "alter_share_statement" match_grammar = Sequence( "ALTER", "SHARE", Ref("IfExistsGrammar", optional=True), Ref("NakedIdentifierSegment"), OneOf( Sequence( OneOf( "ADD", "REMOVE", ), "ACCOUNTS", Ref("EqualsSegment"), Delimited(Ref("ObjectReferenceSegment")), Sequence( "SHARE_RESTRICTIONS", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), optional=True, ), ), Sequence( "SET", "ACCOUNTS", Ref("EqualsSegment"), Delimited(Ref("ObjectReferenceSegment")), Ref("CommentEqualsClauseSegment", optional=True), ), Sequence( "SET", Ref("TagEqualsSegment"), ), Sequence( "UNSET", "TAG", Ref("TagReferenceSegment"), AnyNumberOf( Ref("CommaSegment"), Ref("TagReferenceSegment"), optional=True ), ), Sequence("UNSET", "COMMENT"), ), ) class AlterStorageIntegrationSegment(BaseSegment): """An `ALTER STORAGE INTEGRATION` statement. https://docs.snowflake.com/en/sql-reference/sql/alter-storage-integration """ type = "alter_storage_integration_statement" match_grammar = Sequence( "ALTER", Ref.keyword("STORAGE", optional=True), "INTEGRATION", Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), OneOf( Sequence( "SET", OneOf( Ref("TagEqualsSegment", optional=True), AnySetOf( Ref("CommentEqualsClauseSegment"), Sequence( "ENABLED", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "USE_PRIVATELINK_ENDPOINT", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), OneOf( AnySetOf( Sequence( "STORAGE_AWS_ROLE_ARN", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "STORAGE_AWS_OBJECT_ACL", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), ), AnySetOf( Sequence( "AZURE_TENANT_ID", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), ), ), Sequence( "STORAGE_ALLOWED_LOCATIONS", Ref("EqualsSegment"), OneOf( Bracketed( Delimited( OneOf( Ref("S3Path"), Ref("GCSPath"), Ref("AzureBlobStoragePath"), ) ) ), Bracketed( Ref("QuotedStarSegment"), ), ), ), Sequence( "STORAGE_BLOCKED_LOCATIONS", Ref("EqualsSegment"), Bracketed( Delimited( OneOf( Ref("S3Path"), Ref("GCSPath"), Ref("AzureBlobStoragePath"), ) ) ), ), ), ), ), Sequence( "UNSET", OneOf( Sequence( "TAG", Delimited(Ref("TagReferenceSegment")), optional=True ), "COMMENT", "ENABLED", "STORAGE_BLOCKED_LOCATIONS", ), ), ), ) class AlterExternalTableStatementSegment(BaseSegment): """An `ALTER EXTERNAL TABLE` statement. https://docs.snowflake.com/en/sql-reference/sql/alter-external-table.html """ type = "alter_external_table_statement" match_grammar = Sequence( "ALTER", "EXTERNAL", "TABLE", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), OneOf( Sequence("REFRESH", Ref("QuotedLiteralSegment", optional=True)), Sequence( OneOf("ADD", "REMOVE"), "FILES", Bracketed(Delimited(Ref("QuotedLiteralSegment"))), ), Sequence( "SET", Sequence( "AUTO_REFRESH", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), optional=True, ), Ref("TagEqualsSegment", optional=True), ), Sequence("UNSET", Ref("TagEqualsSegment")), Sequence("DROP", "PARTITION", "LOCATION", Ref("QuotedLiteralSegment")), Sequence( "ADD", "PARTITION", Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), ), ), "LOCATION", Ref("QuotedLiteralSegment"), ), ), ) class CommentEqualsClauseSegment(BaseSegment): """A comment clause. e.g. COMMENT = 'view/table description' """ type = "comment_equals_clause" match_grammar = Sequence( "COMMENT", Ref("EqualsSegment"), OneOf(Ref("QuotedLiteralSegment"), Ref("DoubleQuotedLiteralSegment")), ) class TagBracketedEqualsSegment(BaseSegment): """A tag clause. e.g. TAG (tag1 = 'value1', tag2 = 'value2') """ type = "tag_bracketed_equals" match_grammar = Sequence( Sequence("WITH", optional=True), "TAG", Bracketed( Delimited( Sequence( Ref("TagReferenceSegment"), Ref("EqualsSegment"), OneOf( Ref("QuotedLiteralSegment"), Ref("DoubleQuotedLiteralSegment") ), ) ), ), ) class LogLevelEqualsSegment(BaseSegment): """LOG_LEVEL clause. https://docs.snowflake.com/en/sql-reference/parameters#label-log-level """ type = "log_level_equals" match_grammar = Sequence( "LOG_LEVEL", Ref("EqualsSegment"), OneOf( "TRACE", "DEBUG", "INFO", "WARN", "ERROR", "FATAL", "OFF", ), ) class TraceLevelEqualsSegment(BaseSegment): """TRACE_LEVEL clause. https://docs.snowflake.com/en/sql-reference/parameters#trace-level """ type = "trace_level_equals" match_grammar = Sequence( "TRACE_LEVEL", Ref("EqualsSegment"), OneOf( "ALWAYS", "ON_EVENT", "OFF", ), ) class ExternalAccessIntegrationsEqualsSegment(BaseSegment): """EXTERNAL_ACCESS_INTEGRATIONS clause. https://docs.snowflake.com/en/sql-reference/sql/alter-function https://docs.snowflake.com/en/sql-reference/sql/create-external-access-integration """ type = "external_access_integration_equals" match_grammar = Sequence( "EXTERNAL_ACCESS_INTEGRATIONS", Ref("EqualsSegment"), Bracketed( Delimited( Sequence( AnyNumberOf( Sequence( Ref("SingleIdentifierGrammar"), Ref("DotSegment"), optional=True, ) ), Ref("SingleIdentifierGrammar"), ) ) ), ) class SecretsEqualsSegment(BaseSegment): """SECRETS clause. https://docs.snowflake.com/en/sql-reference/sql/alter-function https://docs.snowflake.com/en/sql-reference/sql/create-external-access-integration """ type = "external_access_integration_equals" match_grammar = Sequence( "SECRETS", Ref("EqualsSegment"), Bracketed( Sequence( Delimited( Sequence( Ref("QuotedLiteralSegment"), Ref("EqualsSegment"), AnyNumberOf( Sequence( Ref("SingleIdentifierGrammar"), Ref("DotSegment"), optional=True, ), ), Ref("SingleIdentifierGrammar"), ) ) ) ), ) class TagEqualsSegment(BaseSegment): """A tag clause. e.g. TAG tag1 = 'value1', tag2 = 'value2' """ type = "tag_equals" match_grammar = Sequence( "TAG", Delimited( Sequence( Ref("TagReferenceSegment"), Ref("EqualsSegment"), OneOf(Ref("QuotedLiteralSegment"), Ref("DoubleQuotedLiteralSegment")), ) ), ) class UnorderedSelectStatementSegment(ansi.UnorderedSelectStatementSegment): """A snowflake unordered `SELECT` statement including optional Qualify. https://docs.snowflake.com/en/sql-reference/constructs/qualify.html """ type = "select_statement" match_grammar = ansi.UnorderedSelectStatementSegment.match_grammar.copy( insert=[Ref("QualifyClauseSegment", optional=True)], before=Ref("OverlapsClauseSegment", optional=True), ) class AccessStatementSegment(BaseSegment): """A `GRANT` or `REVOKE` statement. Grant specific information: * https://docs.snowflake.com/en/sql-reference/sql/grant-privilege.html Revoke specific information: * https://docs.snowflake.com/en/sql-reference/sql/revoke-role.html * https://docs.snowflake.com/en/sql-reference/sql/revoke-privilege.html * https://docs.snowflake.com/en/sql-reference/sql/revoke-privilege-share.html """ type = "access_statement" # Privileges that can be set on the account (specific to snowflake) _global_permissions = OneOf( Sequence( "CREATE", OneOf( "ACCOUNT", "ROLE", "USER", "WAREHOUSE", "DATABASE", "INTEGRATION", "SHARE", "TAG", Sequence("DATA", "EXCHANGE", "LISTING"), Sequence("NETWORK", "POLICY"), ), ), Sequence("APPLY", "MASKING", "POLICY"), Sequence("APPLY", "ROW", "ACCESS", "POLICY"), Sequence("APPLY", "SESSION", "POLICY"), Sequence("APPLY", "TAG"), Sequence("ATTACH", "POLICY"), Sequence( "EXECUTE", OneOf("ALERT", Sequence(Ref.keyword("MANAGED", optional=True), "TASK")), ), Sequence("IMPORT", "SHARE"), Sequence( "MANAGE", OneOf( "GRANTS", Sequence(OneOf("ACCOUNT", "ORGANIZATION", "USER"), "SUPPORT", "CASES"), ), ), Sequence("MONITOR", OneOf("EXECUTION", "USAGE")), Sequence("OVERRIDE", "SHARE", "RESTRICTIONS"), ) _schema_object_names = [ "TABLE", "VIEW", "STAGE", "FUNCTION", "PROCEDURE", "ROUTINE", "SEQUENCE", "STREAM", "STREAMLIT", "TASK", "PIPE", "NOTEBOOK", "MODEL", ] _schema_object_types = OneOf( *_schema_object_names, Sequence("MATERIALIZED", "VIEW"), Sequence("EXTERNAL", "TABLE"), Sequence(OneOf("TEMP", "TEMPORARY"), "TABLE"), Sequence("FILE", "FORMAT"), Sequence("SESSION", "POLICY"), Sequence("MASKING", "POLICY"), Sequence("ROW", "ACCESS", "POLICY"), Sequence("CORTEX", "SEARCH", "SERVICE"), ) # We reuse the object names above and simply append an `S` to the end of them to get # plurals _schema_object_types_plural = OneOf( *[f"{object_name}S" for object_name in _schema_object_names] ) _permissions = Sequence( OneOf( Sequence( "CREATE", OneOf( "SCHEMA", # Sequence("MASKING", "POLICY"), _schema_object_types, ), ), Sequence("IMPORTED", "PRIVILEGES"), "APPLY", "CONNECT", "CREATE", "DELETE", "EXECUTE", "INSERT", "MODIFY", "MONITOR", "OPERATE", "OWNERSHIP", "READ", "REFERENCE_USAGE", "REFERENCES", "SELECT", "TEMP", "TEMPORARY", "TRIGGER", "TRUNCATE", "UPDATE", "USAGE", "USE_ANY_ROLE", "WRITE", Sequence("ALL", Ref.keyword("PRIVILEGES", optional=True)), ), Ref("BracketedColumnReferenceListGrammar", optional=True), ) # All of the object types that we can grant permissions on. _objects = OneOf( "ACCOUNT", Sequence( OneOf( Sequence("RESOURCE", "MONITOR"), Sequence("EXTERNAL", "VOLUME"), "WAREHOUSE", "DATABASE", "DOMAIN", "INTEGRATION", "SCHEMA", "ROLE", "USER", Sequence("ALL", "SCHEMAS", "IN", "DATABASE"), Sequence("FUTURE", "SCHEMAS", "IN", "DATABASE"), _schema_object_types, Sequence( OneOf("ALL", "FUTURE"), OneOf("DYNAMIC", optional=True), OneOf( _schema_object_types_plural, Sequence("MATERIALIZED", "VIEWS"), Sequence("EXTERNAL", "TABLES"), Sequence("FILE", "FORMATS"), ), "IN", OneOf("DATABASE", "SCHEMA"), ), Sequence("DATABASE", "ROLE"), optional=True, ), Delimited( Ref("ObjectReferenceSegment"), Sequence( Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar", optional=True), ), terminators=["TO", "FROM"], ), ), ) match_grammar: Matchable = OneOf( # https://docs.snowflake.com/en/sql-reference/sql/grant-privilege.html Sequence( "GRANT", OneOf( Sequence( Delimited( OneOf(_global_permissions, _permissions), terminators=["ON"], ), "ON", _objects, ), Sequence("ROLE", Ref("ObjectReferenceSegment")), Sequence("DATABASE", "ROLE", Ref("DatabaseRoleReferenceSegment")), Sequence("OWNERSHIP", "ON", "USER", Ref("ObjectReferenceSegment")), Sequence( "ADD", "SEARCH", "OPTIMIZATION", "ON", "SCHEMA", Ref("SchemaReferenceSegment"), ), Sequence("APPLICATION", "ROLE", Ref("ObjectReferenceSegment")), # In the case where a role is granted non-explicitly, # e.g. GRANT ROLE_NAME TO OTHER_ROLE_NAME # See https://docs.snowflake.com/en/sql-reference/sql/grant-role.html Ref("ObjectReferenceSegment"), ), "TO", OneOf( "APPLICATION", "USER", "ROLE", "SHARE", Sequence("DATABASE", "ROLE"), optional=True, ), Delimited( OneOf( Ref("RoleReferenceSegment"), Ref("FunctionSegment"), Ref("DatabaseRoleReferenceSegment"), "PUBLIC", ), ), OneOf( Sequence("WITH", "GRANT", "OPTION"), Sequence("WITH", "ADMIN", "OPTION"), Sequence(OneOf("REVOKE", "COPY"), "CURRENT", "GRANTS"), optional=True, ), Sequence( "GRANTED", "BY", OneOf( "CURRENT_USER", "SESSION_USER", Ref("ObjectReferenceSegment"), ), optional=True, ), ), # https://docs.snowflake.com/en/sql-reference/sql/revoke-privilege.html Sequence( "REVOKE", Sequence("GRANT", "OPTION", "FOR", optional=True), OneOf( Sequence( Delimited( OneOf(_global_permissions, _permissions), terminators=["ON"], ), "ON", _objects, ), Sequence("ROLE", Ref("ObjectReferenceSegment")), Sequence("DATABASE", "ROLE", Ref("DatabaseRoleReferenceSegment")), Sequence("OWNERSHIP", "ON", "USER", Ref("ObjectReferenceSegment")), ), "FROM", OneOf("USER", "ROLE", "SHARE", Sequence("DATABASE", "ROLE"), optional=True), Delimited( Ref("ObjectReferenceSegment"), ), Ref("DropBehaviorGrammar", optional=True), ), ) class CreateCloneStatementSegment(BaseSegment): """A snowflake `CREATE ... CLONE` statement. https://docs.snowflake.com/en/sql-reference/sql/create-clone.html """ type = "create_clone_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), OneOf( "DATABASE", "SCHEMA", Sequence(Sequence("DYNAMIC", optional=True), "TABLE"), "SEQUENCE", Sequence("FILE", "FORMAT"), "STAGE", "STREAM", "TASK", ), Ref("IfNotExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), "CLONE", Ref("ObjectReferenceSegment"), OneOf( Ref("FromAtExpressionSegment"), Ref("FromBeforeExpressionSegment"), optional=True, ), ) class CreateDatabaseFromShareStatementSegment(BaseSegment): """A snowflake `CREATE ... DATABASE FROM SHARE` statement. https://docs.snowflake.com/en/sql-reference/sql/create-database.html """ type = "create_database_from_share_statement" match_grammar = Sequence( "CREATE", "DATABASE", Ref("ObjectReferenceSegment"), Sequence("FROM", "SHARE"), Ref("ObjectReferenceSegment"), ) class CreateProcedureStatementSegment(BaseSegment): """A snowflake `CREATE ... PROCEDURE` statement. https://docs.snowflake.com/en/sql-reference/sql/create-procedure.html """ type = "create_procedure_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Sequence("SECURE", optional=True), "PROCEDURE", Ref("IfNotExistsGrammar", optional=True), Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar"), Sequence("COPY", "GRANTS", optional=True), "RETURNS", OneOf( Ref("DatatypeSegment"), Sequence( "TABLE", Bracketed(Delimited(Ref("ColumnDefinitionSegment"), optional=True)), ), ), AnySetOf( Sequence("NOT", "NULL", optional=True), Sequence( "LANGUAGE", OneOf( "JAVA", "JAVASCRIPT", "PYTHON", "SCALA", "SQL", ), optional=True, ), OneOf( Sequence("CALLED", "ON", "NULL", "INPUT"), Sequence("RETURNS", "NULL", "ON", "NULL", "INPUT"), "STRICT", optional=True, ), OneOf("VOLATILE", "IMMUTABLE", optional=True), Sequence( "RUNTIME_VERSION", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), Ref("CommentEqualsClauseSegment", optional=True), Sequence( "IMPORTS", Ref("EqualsSegment"), Bracketed(Delimited(Ref("QuotedLiteralSegment"))), optional=True, ), Sequence( "SECRETS", Ref("EqualsSegment"), Bracketed( Sequence( Delimited( Sequence( Ref("QuotedLiteralSegment"), Ref("EqualsSegment"), AnyNumberOf( Sequence( Ref("SingleIdentifierGrammar"), Ref("DotSegment"), optional=True, ), ), Ref("SingleIdentifierGrammar"), ) ) ) ), optional=True, ), Sequence( "EXTERNAL_ACCESS_INTEGRATIONS", Ref("EqualsSegment"), Bracketed( Delimited( Sequence( AnyNumberOf( Sequence( Ref("SingleIdentifierGrammar"), Ref("DotSegment"), optional=True, ) ), Ref("SingleIdentifierGrammar"), ) ) ), optional=True, ), Sequence( "PACKAGES", Ref("EqualsSegment"), Bracketed(Delimited(Ref("QuotedLiteralSegment"))), optional=True, ), Sequence( "HANDLER", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), Sequence( "TARGET_PATH", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), Sequence("EXECUTE", "AS", OneOf("CALLER", "OWNER"), optional=True), optional=True, ), Sequence( "AS", OneOf( # Either a foreign programming language UDF... Ref("DoubleQuotedUDFBody"), Ref("SingleQuotedUDFBody"), Ref("DollarQuotedUDFBody"), # ...or a SQL UDF Ref("ScriptingBlockStatementSegment"), ), optional=True, ), ) class AlterProcedureStatementSegment(BaseSegment): """A snowflake `ALTER ... PROCEDURE` statement. https://docs.snowflake.com/en/sql-reference/sql/alter-procedure.html """ type = "alter_procedure_statement" match_grammar = Sequence( "ALTER", "PROCEDURE", Ref("IfExistsGrammar", optional=True), Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar"), OneOf( Sequence("RENAME", "TO", Ref("FunctionNameSegment")), Sequence("EXECUTE", "AS", OneOf("CALLER", "OWNER")), Sequence( OneOf( # eg. SET LOG_LEVEL = WARN Sequence( "SET", OneOf( Ref("TagEqualsSegment", optional=True), Ref("CommentEqualsClauseSegment", optional=True), Ref("LogLevelEqualsSegment", optional=True), Ref("TraceLevelEqualsSegment", optional=True), Ref( "ExternalAccessIntegrationsEqualsSegment", optional=True ), Ref("SecretsEqualsSegment", optional=True), ), ), # eg. SET LOG_LEVEL = WARN, TRACE_LEVEL = ON_EVENT Sequence( "SET", Delimited( AnyNumberOf( Ref("TagEqualsSegment", optional=True), Ref("CommentEqualsClauseSegment", optional=True), Ref("LogLevelEqualsSegment", optional=True), Ref("TraceLevelEqualsSegment", optional=True), Ref( "ExternalAccessIntegrationsEqualsSegment", optional=True, ), Ref("SecretsEqualsSegment", optional=True), ), ), ), ), ), Sequence( "UNSET", OneOf( Sequence("TAG", Delimited(Ref("TagReferenceSegment"))), "COMMENT" ), ), ), ) class AlterNetworkPolicyStatementSegment(BaseSegment): """An ALTER NETWORK POLICY statement. As per https://docs.snowflake.com/en/sql-reference/sql/alter-network-policy """ type = "alter_network_policy_statement" match_grammar = Sequence( "ALTER", "NETWORK", "POLICY", Ref("IfExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), OneOf( Sequence( "SET", AnySetOf( Sequence( "ALLOWED_NETWORK_RULE_LIST", Ref("EqualsSegment"), Bracketed(Delimited(Ref("QuotedLiteralSegment"))), ), Sequence( "BLOCKED_NETWORK_RULE_LIST", Ref("EqualsSegment"), Bracketed(Delimited(Ref("QuotedLiteralSegment"))), ), Sequence( "ALLOWED_IP_LIST", Ref("EqualsSegment"), Bracketed(Delimited(Ref("QuotedLiteralSegment"))), ), Sequence( "BLOCKED_IP_LIST", Ref("EqualsSegment"), Bracketed(Delimited(Ref("QuotedLiteralSegment"))), ), Ref("CommentEqualsClauseSegment"), ), ), Sequence( "UNSET", "COMMENT", ), Sequence( OneOf( "ADD", "REMOVE", ), OneOf( "ALLOWED_NETWORK_RULE_LIST", "BLOCKED_NETWORK_RULE_LIST", ), Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence("RENAME", "TO", Ref("SingleIdentifierGrammar")), Sequence("SET", Ref("TagEqualsSegment")), Sequence( "UNSET", "TAG", Ref("TagReferenceSegment"), AnyNumberOf( Ref("CommaSegment"), Ref("TagReferenceSegment"), optional=True ), ), ), ) class ReturnStatementSegment(BaseSegment): """A snowflake `RETURN` statement for SQL scripting. https://docs.snowflake.com/en/sql-reference/snowflake-scripting/return """ type = "return_statement" match_grammar = Sequence( "RETURN", Ref("ExpressionSegment"), ) class ScriptingBlockStatementSegment(BaseSegment): """A snowflake `BEGIN ... END` statement for SQL scripting. https://docs.snowflake.com/en/sql-reference/snowflake-scripting/begin """ type = "scripting_block_statement" match_grammar = Sequence( Sequence( "BEGIN", Indent, Ref("StatementSegment"), ), AnyNumberOf( Sequence( Ref("DelimiterGrammar"), Ref("StatementSegment"), ), terminators=[ OneOf( Sequence(Ref("DelimiterGrammar"), "END"), # Don't terminate on an "END FOR", because that's a different # expression. exclude=Sequence(Ref("DelimiterGrammar"), "END", "FOR"), ), ], # NOTE: We can't be greedy because there may be nested loops. This # does make understanding any failed parsing loops difficult but I # don't think there's an easy way around that. ), Ref("DelimiterGrammar"), Dedent, "END", reset_terminators=True, ) class ScriptingLetStatementSegment(BaseSegment): """A snowflake `LET` statement for SQL scripting. https://docs.snowflake.com/en/sql-reference/snowflake-scripting/let https://docs.snowflake.com/en/developer-guide/snowflake-scripting/variables """ type = "scripting_let_statement" match_grammar = OneOf( # Initial declaration and assignment Sequence( "LET", Ref("LocalVariableNameSegment"), OneOf( # Variable assignment OneOf( Sequence( Ref("DatatypeSegment"), OneOf("DEFAULT", Ref("WalrusOperatorSegment")), Ref("ExpressionSegment"), ), Sequence( OneOf("DEFAULT", Ref("WalrusOperatorSegment")), Ref("ExpressionSegment"), ), ), # Cursor assignment Sequence( "CURSOR", "FOR", OneOf(Ref("LocalVariableNameSegment"), Ref("SelectableGrammar")), ), # Resultset assignment Sequence( "RESULTSET", Ref("WalrusOperatorSegment"), Bracketed(Ref("SelectableGrammar")), ), ), ), # Subsequent assignment, see # https://docs.snowflake.com/en/developer-guide/snowflake-scripting/variables Sequence( Ref("LocalVariableNameSegment"), Ref("WalrusOperatorSegment"), OneOf( # Variable reassigment Ref("ExpressionSegment"), # Cursors cannot be reassigned # no code # Resultset reassigment Bracketed(Ref("SelectableGrammar")), ), ), ) class CreateFunctionStatementSegment(BaseSegment): """A snowflake `CREATE ... FUNCTION` statement for SQL and JavaScript functions. https://docs.snowflake.com/en/sql-reference/sql/create-function.html """ type = "create_function_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), OneOf("TEMP", "TEMPORARY", optional=True), Sequence("SECURE", optional=True), Sequence("AGGREGATE", optional=True), "FUNCTION", Ref("IfNotExistsGrammar", optional=True), Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar"), Sequence("COPY", "GRANTS", optional=True), "RETURNS", OneOf( Ref("DatatypeSegment"), Sequence("TABLE", Bracketed(Delimited(Ref("ColumnDefinitionSegment")))), ), AnySetOf( Sequence("NOT", "NULL", optional=True), Sequence( "LANGUAGE", OneOf("JAVASCRIPT", "SQL", "PYTHON", "JAVA", "SCALA"), optional=True, ), OneOf( Sequence("CALLED", "ON", "NULL", "INPUT"), Sequence("RETURNS", "NULL", "ON", "NULL", "INPUT"), "STRICT", optional=True, ), OneOf("VOLATILE", "IMMUTABLE", optional=True), Sequence( "RUNTIME_VERSION", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), Ref("CommentEqualsClauseSegment", optional=True), Sequence( "IMPORTS", Ref("EqualsSegment"), Bracketed(Delimited(Ref("QuotedLiteralSegment"))), optional=True, ), Sequence( "SECRETS", Ref("EqualsSegment"), Bracketed( Sequence( Delimited( Sequence( Ref("QuotedLiteralSegment"), Ref("EqualsSegment"), AnyNumberOf( Sequence( Ref("SingleIdentifierGrammar"), Ref("DotSegment"), optional=True, ) ), Ref("SingleIdentifierGrammar"), ) ) ) ), optional=True, ), Sequence( "EXTERNAL_ACCESS_INTEGRATIONS", Ref("EqualsSegment"), Bracketed(Delimited(Ref("SingleIdentifierGrammar"))), optional=True, ), Sequence( "PACKAGES", Ref("EqualsSegment"), Bracketed(Delimited(Ref("QuotedLiteralSegment"))), optional=True, ), Sequence( "HANDLER", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), Sequence( "TARGET_PATH", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), optional=True, ), Sequence( "AS", OneOf( # Either a foreign programming language UDF... Ref("DoubleQuotedUDFBody"), Ref("SingleQuotedUDFBody"), Ref("DollarQuotedUDFBody"), # ...or a SQL UDF Ref("ScriptingBlockStatementSegment"), ), optional=True, ), ) class AlterFunctionStatementSegment(BaseSegment): """A snowflake `ALTER ... FUNCTION` and `ALTER ... EXTERNAL FUNCTION` statements. NOTE: `ALTER ... EXTERNAL FUNCTION` statements always use the `ALTER ... FUNCTION` syntax. https://docs.snowflake.com/en/sql-reference/sql/alter-function.html https://docs.snowflake.com/en/sql-reference/sql/alter-external-function.html """ type = "alter_function_statement" match_grammar = Sequence( "ALTER", "FUNCTION", Ref("IfExistsGrammar", optional=True), Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar"), OneOf( Sequence("RENAME", "TO", Ref("FunctionNameSegment")), Sequence( "SET", OneOf( # eg. SET LOG_LEVEL = WARN Sequence( OneOf( Ref("CommentEqualsClauseSegment"), Ref("LogLevelEqualsSegment", optional=True), Ref("TraceLevelEqualsSegment", optional=True), Ref( "ExternalAccessIntegrationsEqualsSegment", optional=True ), Ref("SecretsEqualsSegment", optional=True), Ref("TagEqualsSegment", optional=True), Sequence( "API_INTEGRATION", Ref("EqualsSegment"), Ref("SingleIdentifierGrammar"), ), Sequence( "HEADERS", Ref("EqualsSegment"), Bracketed( Delimited( Sequence( Ref("SingleQuotedIdentifierSegment"), Ref("EqualsSegment"), Ref("SingleQuotedIdentifierSegment"), ), ), ), ), Sequence( "CONTEXT_HEADERS", Ref("EqualsSegment"), Bracketed( Delimited( Ref("ContextHeadersGrammar"), ), ), ), Sequence( "MAX_BATCH_ROWS", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "COMPRESSION", Ref("EqualsSegment"), Ref("CompressionType"), ), "SECURE", Sequence( OneOf("REQUEST_TRANSLATOR", "RESPONSE_TRANSLATOR"), Ref("EqualsSegment"), Ref("FunctionNameSegment"), ), ), ), # eg. SET LOG_LEVEL = WARN, TRACE_LEVEL = ON_EVENT Sequence( Delimited( AnyNumberOf( Ref("CommentEqualsClauseSegment"), Ref("LogLevelEqualsSegment", optional=True), Ref("TraceLevelEqualsSegment", optional=True), Ref( "ExternalAccessIntegrationsEqualsSegment", optional=True, ), Ref("SecretsEqualsSegment", optional=True), Ref("TagEqualsSegment", optional=True), Sequence( "API_INTEGRATION", Ref("EqualsSegment"), Ref("SingleIdentifierGrammar"), ), Sequence( "HEADERS", Ref("EqualsSegment"), Bracketed( Delimited( Sequence( Ref("SingleQuotedIdentifierSegment"), Ref("EqualsSegment"), Ref("SingleQuotedIdentifierSegment"), ), ), ), ), Sequence( "CONTEXT_HEADERS", Ref("EqualsSegment"), Bracketed( Delimited( Ref("ContextHeadersGrammar"), ), ), ), Sequence( "MAX_BATCH_ROWS", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "COMPRESSION", Ref("EqualsSegment"), Ref("CompressionType"), ), "SECURE", Sequence( OneOf("REQUEST_TRANSLATOR", "RESPONSE_TRANSLATOR"), Ref("EqualsSegment"), Ref("FunctionNameSegment"), ), ), ), ), ), ), Sequence( "UNSET", OneOf( "COMMENT", "HEADERS", "CONTEXT_HEADERS", "MAX_BATCH_ROWS", "COMPRESSION", "SECURE", "REQUEST_TRANSLATOR", "RESPONSE_TRANSLATOR", Sequence( "TAG", Ref("TagReferenceSegment"), AnyNumberOf( Ref("CommaSegment"), Ref("TagReferenceSegment"), optional=True, ), ), ), ), Sequence( "RENAME", "TO", Ref("SingleIdentifierGrammar"), ), ), ) class CreateExternalFunctionStatementSegment(BaseSegment): """A snowflake `CREATE ... EXTERNAL FUNCTION` statement for API integrations. https://docs.snowflake.com/en/sql-reference/sql/create-external-function.html """ type = "create_external_function_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Sequence("SECURE", optional=True), "EXTERNAL", "FUNCTION", Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar"), "RETURNS", Ref("DatatypeSegment"), Sequence(Ref.keyword("NOT", optional=True), "NULL", optional=True), OneOf( Sequence("CALLED", "ON", "NULL", "INPUT"), Sequence("RETURNS", "NULL", "ON", "NULL", "INPUT"), "STRICT", optional=True, ), OneOf("VOLATILE", "IMMUTABLE", optional=True), Ref("CommentEqualsClauseSegment", optional=True), "API_INTEGRATION", Ref("EqualsSegment"), Ref("SingleIdentifierGrammar"), Sequence( "HEADERS", Ref("EqualsSegment"), Bracketed( Delimited( Sequence( Ref("SingleQuotedIdentifierSegment"), Ref("EqualsSegment"), Ref("SingleQuotedIdentifierSegment"), ), ), ), optional=True, ), Sequence( "CONTEXT_HEADERS", Ref("EqualsSegment"), Bracketed( Delimited( Ref("ContextHeadersGrammar"), ), ), optional=True, ), Sequence( "MAX_BATCH_ROWS", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), optional=True, ), Sequence( "COMPRESSION", Ref("EqualsSegment"), Ref("CompressionType"), optional=True, ), Sequence( "REQUEST_TRANSLATOR", Ref("EqualsSegment"), Ref("FunctionNameSegment"), optional=True, ), Sequence( "RESPONSE_TRANSLATOR", Ref("EqualsSegment"), Ref("FunctionNameSegment"), optional=True, ), "AS", Ref("SingleQuotedIdentifierSegment"), ) class WarehouseObjectPropertiesSegment(BaseSegment): """A snowflake Warehouse Object Properties segment. https://docs.snowflake.com/en/sql-reference/sql/create-warehouse.html https://docs.snowflake.com/en/sql-reference/sql/alter-warehouse.html Note: comments are handled separately so not incorrectly marked as warehouse object. """ type = "warehouse_object_properties" match_grammar = AnySetOf( Sequence( "WAREHOUSE_TYPE", Ref("EqualsSegment"), Ref("WarehouseType"), ), Sequence( "WAREHOUSE_SIZE", Ref("EqualsSegment"), Ref("WarehouseSize"), ), Sequence( "WAIT_FOR_COMPLETION", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "MAX_CLUSTER_COUNT", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "MIN_CLUSTER_COUNT", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "SCALING_POLICY", Ref("EqualsSegment"), Ref("ScalingPolicy"), ), Sequence( "AUTO_SUSPEND", Ref("EqualsSegment"), OneOf( Ref("NumericLiteralSegment"), "NULL", ), ), Sequence( "AUTO_RESUME", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "INITIALLY_SUSPENDED", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "RESOURCE_MONITOR", Ref("EqualsSegment"), Ref("NakedIdentifierSegment"), ), Sequence( "ENABLE_QUERY_ACCELERATION", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "QUERY_ACCELERATION_MAX_SCALE_FACTOR", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), ) class WarehouseObjectParamsSegment(BaseSegment): """A snowflake Warehouse Object Param segment. https://docs.snowflake.com/en/sql-reference/sql/create-warehouse.html https://docs.snowflake.com/en/sql-reference/sql/alter-warehouse.html """ type = "warehouse_object_properties" match_grammar = AnySetOf( Sequence( "MAX_CONCURRENCY_LEVEL", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "STATEMENT_QUEUED_TIMEOUT_IN_SECONDS", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "STATEMENT_TIMEOUT_IN_SECONDS", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), ) class InlineConstraintPropertiesSegment(BaseSegment): """In Line CONSTRAINT clause for CREATE TABLE or ALTER TABLE command. https://docs.snowflake.com/sql-reference/sql/create-table-constraint#syntax-for-inline-constraints """ type = "constraint_properties_segment" match_grammar = Sequence( Sequence( "CONSTRAINT", Ref("SingleIdentifierGrammar"), optional=True, ), OneOf( Sequence( OneOf( Ref("PrimaryKeyGrammar"), Ref("UniqueKeyGrammar"), ), Bracketed( Delimited( Ref("ColumnReferenceSegment"), ), # For use in CREATE TABLE as a part of # ColumnDefinitionSegment.ColumnConstraintSegment optional=True, ), ), Sequence( Sequence( Ref("ForeignKeyGrammar"), ), "REFERENCES", Ref("TableReferenceSegment"), Bracketed( Delimited( Ref("ColumnReferenceSegment"), ), ), Ref("ForeignKeyConstraintGrammar", optional=True), ), ), Ref("InlineConstraintGrammar", optional=True), ) class OutOfLineConstraintPropertiesSegment(BaseSegment): """Our of Line CONSTRAINT clause for CREATE TABLE or ALTER TABLE command. https://docs.snowflake.com/sql-reference/sql/create-table-constraint#syntax-for-out-of-line-constraints """ type = "constraint_properties_segment" match_grammar = Sequence( Sequence( "CONSTRAINT", Ref("SingleIdentifierGrammar"), optional=True, ), OneOf( Sequence( OneOf( Ref("PrimaryKeyGrammar"), Ref("UniqueKeyGrammar"), ), Bracketed( Delimited( Ref("ColumnReferenceSegment"), ), # For use in CREATE TABLE as a part of # ColumnDefinitionSegment.ColumnConstraintSegment optional=True, ), ), Sequence( Sequence( Ref("ForeignKeyGrammar"), Bracketed( Delimited( Ref("ColumnReferenceSegment"), ), ), ), "REFERENCES", Ref("TableReferenceSegment"), Bracketed( Delimited( Ref("ColumnReferenceSegment"), ), optional=True, ), Ref("ForeignKeyConstraintGrammar", optional=True), ), ), Ref("InlineConstraintGrammar", optional=True), ) class ColumnConstraintSegment(ansi.ColumnConstraintSegment): """A column option; each CREATE TABLE column can have 0 or more. https://docs.snowflake.com/en/sql-reference/sql/create-table.html """ match_grammar: Matchable = AnySetOf( Ref("CollateGrammar"), Sequence( "DEFAULT", Ref("ExpressionSegment"), ), Sequence( OneOf("AUTOINCREMENT", "IDENTITY"), OneOf( Bracketed(Delimited(Ref("NumericLiteralSegment"))), Sequence( "START", Ref("NumericLiteralSegment"), "INCREMENT", Ref("NumericLiteralSegment"), ), optional=True, ), Ref("OrderNoOrderGrammar", optional=True), ), Sequence(Ref.keyword("NOT", optional=True), "NULL"), # NOT NULL or NULL Sequence( Sequence("WITH", optional=True), "MASKING", "POLICY", Ref("FunctionNameSegment"), Sequence( "USING", Bracketed( Delimited( OneOf( Ref("ColumnReferenceSegment"), Ref("ExpressionSegment"), ) ), ), optional=True, ), ), Ref("TagBracketedEqualsSegment", optional=True), Ref("InlineConstraintPropertiesSegment"), Sequence("DEFAULT", Ref("QuotedLiteralSegment")), Sequence("CHECK", Bracketed(Ref("ExpressionSegment"))), Sequence( # DEFAULT "DEFAULT", OneOf( Ref("LiteralGrammar"), Ref("FunctionSegment"), # ?? Ref('IntervalExpressionSegment') ), ), Sequence( # REFERENCES reftable [ ( refcolumn) ] "REFERENCES", Ref("ColumnReferenceSegment"), # Foreign columns making up FOREIGN KEY constraint Ref("BracketedColumnReferenceListGrammar", optional=True), ), ) class CopyOptionsSegment(BaseSegment): """A Snowflake CopyOptions statement. https://docs.snowflake.com/en/sql-reference/sql/create-table.html https://docs.snowflake.com/en/sql-reference/sql/copy-into-location.html https://docs.snowflake.com/en/sql-reference/sql/copy-into-table.html """ type = "copy_options" _copy_options_matchables = [ Sequence("ON_ERROR", Ref("EqualsSegment"), Ref("CopyOptionOnErrorSegment")), Sequence("SIZE_LIMIT", Ref("EqualsSegment"), Ref("NumericLiteralSegment")), Sequence("PURGE", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar")), Sequence( "RETURN_FAILED_ONLY", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar") ), Sequence( "MATCH_BY_COLUMN_NAME", Ref("EqualsSegment"), OneOf( "CASE_SENSITIVE", "CASE_INSENSITIVE", "NONE", Ref("QuotedLiteralSegment"), ), ), Sequence( "INCLUDE_METADATA", Ref("EqualsSegment"), Bracketed( Delimited( Sequence( Ref("SingleIdentifierGrammar"), Ref("EqualsSegment"), OneOf( "METADATA$FILENAME", "METADATA$FILE_ROW_NUMBER", "METADATA$FILE_CONTENT_KEY", "METADATA$FILE_LAST_MODIFIED", "METADATA$START_SCAN_TIME", ), ), ) ), ), Sequence("ENFORCE_LENGTH", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar")), Sequence("TRUNCATECOLUMNS", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar")), Sequence("FORCE", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar")), Sequence("OVERWRITE", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar")), Sequence("SINGLE", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar")), Sequence("MAX_FILE_SIZE", Ref("EqualsSegment"), Ref("NumericLiteralSegment")), Sequence( "INCLUDE_QUERY_ID", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar") ), Sequence("DETAILED_OUTPUT", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar")), Sequence( "LOAD_UNCERTAIN_FILES", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar") ), ] match_grammar = AnySetOf(*_copy_options_matchables) class CreateSchemaStatementSegment(ansi.CreateSchemaStatementSegment): """A `CREATE SCHEMA` statement. https://docs.snowflake.com/en/sql-reference/sql/create-schema.html """ type = "create_schema_statement" match_grammar = Sequence( "CREATE", Ref("AlterOrReplaceGrammar", optional=True), Ref("TemporaryTransientGrammar", optional=True), "SCHEMA", Ref("IfNotExistsGrammar", optional=True), Ref("SchemaReferenceSegment"), Sequence("WITH", "MANAGED", "ACCESS", optional=True), Ref("SchemaObjectParamsSegment", optional=True), Ref("TagBracketedEqualsSegment", optional=True), ) class AlterRoleStatementSegment(BaseSegment): """An `ALTER ROLE` statement. https://docs.snowflake.com/en/sql-reference/sql/alter-role.html """ type = "alter_role_statement" match_grammar = Sequence( "ALTER", "ROLE", Ref("IfExistsGrammar", optional=True), Ref("RoleReferenceSegment"), OneOf( Sequence( "SET", OneOf( Ref("RoleReferenceSegment"), Ref("TagEqualsSegment"), Ref("CommentEqualsClauseSegment"), ), ), Sequence( "UNSET", OneOf( Ref("RoleReferenceSegment"), Sequence("TAG", Delimited(Ref("TagReferenceSegment"))), Sequence("COMMENT"), ), ), Sequence( "RENAME", "TO", OneOf( Ref("RoleReferenceSegment"), ), ), ), ) class CreateSequenceStatementSegment(BaseSegment): """A `CREATE SEQUENCE` statement. https://docs.snowflake.com/en/sql-reference/sql/alter-sequence """ type = "create_sequence_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "SEQUENCE", Ref("IfNotExistsGrammar", optional=True), Ref("SequenceReferenceSegment"), Sequence("WITH", optional=True), Sequence( "START", Sequence("WITH", optional=True), Ref("EqualsSegment", optional=True), Ref("IntegerSegment"), optional=True, ), Sequence( "INCREMENT", Sequence("BY", optional=True), Ref("EqualsSegment", optional=True), Ref("IntegerSegment"), optional=True, ), Ref("OrderNoOrderGrammar", optional=True), Ref("CommentEqualsClauseSegment", optional=True), ) class AlterSequenceStatementSegment(BaseSegment): """An `ALTER SEQUENCE` statement. https://docs.snowflake.com/en/sql-reference/sql/alter-sequence """ type = "alter_sequence_statement" match_grammar = Sequence( "ALTER", "SEQUENCE", Ref("IfExistsGrammar", optional=True), Ref("SequenceReferenceSegment"), Sequence( Sequence("SET", optional=True), AnySetOf( Sequence( "INCREMENT", Sequence("BY", optional=True), Ref("EqualsSegment", optional=True), Ref("IntegerSegment"), optional=True, ), Ref("OrderNoOrderGrammar", optional=True), Ref("CommentEqualsClauseSegment"), ), optional=True, ), Sequence("UNSET", "COMMENT", optional=True), Sequence("RENAME", "TO", Ref("SequenceReferenceSegment"), optional=True), ) class AlterSchemaStatementSegment(BaseSegment): """An `ALTER SCHEMA` statement. https://docs.snowflake.com/en/sql-reference/sql/alter-schema.html """ type = "alter_schema_statement" match_grammar = Sequence( "ALTER", "SCHEMA", Ref("IfExistsGrammar", optional=True), Ref("SchemaReferenceSegment"), OneOf( Sequence( "RENAME", "TO", Ref("SchemaReferenceSegment"), ), Sequence( "SWAP", "WITH", Ref("SchemaReferenceSegment"), ), Sequence( "SET", OneOf(Ref("SchemaObjectParamsSegment"), Ref("TagEqualsSegment")), ), Sequence( "UNSET", OneOf( Delimited( "DATA_RETENTION_TIME_IN_DAYS", "MAX_DATA_EXTENSION_TIME_IN_DAYS", "DEFAULT_DDL_COLLATION", "COMMENT", ), Sequence("TAG", Delimited(Ref("TagReferenceSegment"))), ), ), Sequence(OneOf("ENABLE", "DISABLE"), Sequence("MANAGED", "ACCESS")), ), ) class SchemaObjectParamsSegment(BaseSegment): """A Snowflake Schema Object Param segment. https://docs.snowflake.com/en/sql-reference/sql/create-schema.html https://docs.snowflake.com/en/sql-reference/sql/alter-schema.html """ type = "schema_object_properties" match_grammar = AnySetOf( Sequence( "DATA_RETENTION_TIME_IN_DAYS", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "MAX_DATA_EXTENSION_TIME_IN_DAYS", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "DEFAULT_DDL_COLLATION", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Ref("CommentEqualsClauseSegment"), ) class CreateEventTableStatementSegment(BaseSegment): """A `CREATE EVENT TABLE` statement. https://docs.snowflake.com/en/sql-reference/sql/create-event-table """ type = "create_event_table_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Sequence("EVENT", "TABLE"), Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), AnySetOf( Sequence( "CLUSTER", "BY", OneOf( Ref("FunctionSegment"), Bracketed(Delimited(Ref("ExpressionSegment"))), ), ), Sequence( "DATA_RETENTION_TIME_IN_DAYS", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "MAX_DATA_EXTENSION_TIME_IN_DAYS", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "CHANGE_TRACKING", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "DEFAULT_DDL_COLLATION", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "COPY", "GRANTS", ), Sequence( "WITH", "ROW", "ACCESS", "POLICY", Ref("ObjectReferenceSegment"), "ON", Bracketed(Delimited(Ref("ColumnReferenceSegment"))), ), Sequence( "WITH", Ref("CommentEqualsClauseSegment"), ), Ref("TagBracketedEqualsSegment"), optional=True, ), ) class DynamicTableOptionsSegment(BaseSegment): """A Snowflake Dynamic Table Options segment. https://docs.snowflake.com/en/sql-reference/sql/create-dynamic-table """ type = "dynamic_table_options" match_grammar = Sequence( AnySetOf( Sequence( "TARGET_LAG", Ref("EqualsSegment"), Ref("DynamicTableTargetLagSegment"), optional=True, ), Sequence( "REFRESH_MODE", Ref("EqualsSegment"), Ref("RefreshModeType"), optional=True, ), Sequence( "INITIALIZE", Ref("EqualsSegment"), Ref("InitializeType"), optional=True, ), Sequence( "WAREHOUSE", Ref("EqualsSegment"), OneOf( Ref("ObjectReferenceSegment"), Ref("QuotedLiteralSegment"), ), optional=True, ), Sequence( "CLUSTER", "BY", Delimited(Ref("ExpressionSegment")), optional=True, ), Sequence( "BASE_LOCATION", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), Sequence( "CATALOG", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), Sequence( "EXTERNAL_VOLUME", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), Sequence( "DATA_RETENTION_TIME_IN_DAYS", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), optional=True, ), Sequence( "MAX_DATA_EXTENSION_TIME_IN_DAYS", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), optional=True, ), Sequence( Ref.keyword("WITH", optional=True), "ROW", "ACCESS", "POLICY", Ref("ObjectReferenceSegment"), "ON", Bracketed( Delimited(Ref("ColumnReferenceSegment")), ), optional=True, ), Ref("TagBracketedEqualsSegment", optional=True), Sequence( "REQUIRE", "USER", optional=True, ), ), ) class IcebergTableOptionsSegment(BaseSegment): """A Snowflake Iceberg Table Options segment. https://docs.snowflake.com/en/sql-reference/sql/create-iceberg-table """ type = "iceberg_table_options" match_grammar = AnySetOf( Sequence( "EXTERNAL_VOLUME", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), Sequence( "CATALOG", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), Sequence( "CATALOG_TABLE_NAME", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), Sequence( "CATALOG_NAMESPACE", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), Sequence( "REPLACE_INVALID_CHARACTERS", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), optional=True, ), Sequence( "AUTO_REFRESH", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), optional=True, ), Sequence( "METADATA_FILE_PATH", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), Sequence( "BASE_LOCATION", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), ) class CreateTableStatementSegment(ansi.CreateTableStatementSegment): """A `CREATE TABLE` statement. A lot more options than ANSI https://docs.snowflake.com/en/sql-reference/sql/create-table.html https://docs.snowflake.com/en/sql-reference/sql/create-dynamic-table """ match_grammar: Matchable = Sequence( "CREATE", Ref("AlterOrReplaceGrammar", optional=True), Ref("TemporaryTransientGrammar", optional=True), Ref.keyword("DYNAMIC", optional=True), Ref.keyword("HYBRID", optional=True), Ref.keyword("ICEBERG", optional=True), "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), # Columns and comment syntax: AnySetOf( Sequence( Bracketed( Delimited( Sequence( OneOf( Ref("OutOfLineConstraintPropertiesSegment"), Ref("ColumnDefinitionSegment"), Ref("SingleIdentifierGrammar"), Sequence( Ref("SingleIdentifierGrammar"), Ref("DatatypeSegment"), Bracketed( Anything(), optional=True ), # For types like VARCHAR(100) "AS", OptionallyBracketed( Ref("ExpressionSegment"), ), ), ), Ref("CommentClauseSegment", optional=True), ), ), ), optional=True, ), Sequence( "CLUSTER", "BY", OneOf( Ref("FunctionSegment"), Bracketed(Delimited(Ref("ExpressionSegment"))), ), optional=True, ), Sequence( "STAGE_FILE_FORMAT", Ref("EqualsSegment"), Ref("FileFormatSegment"), optional=True, ), Sequence( "STAGE_COPY_OPTIONS", Ref("EqualsSegment"), Bracketed(Ref("CopyOptionsSegment")), optional=True, ), Sequence( "DATA_RETENTION_TIME_IN_DAYS", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), optional=True, ), Sequence( "MAX_DATA_EXTENSION_TIME_IN_DAYS", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), optional=True, ), Sequence( "CHANGE_TRACKING", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), optional=True, ), Sequence( "DEFAULT_DDL_COLLATION", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), Sequence( "COPY", "GRANTS", optional=True, ), Sequence( Sequence("WITH", optional=True), "ROW", "ACCESS", "POLICY", Ref("ObjectReferenceSegment"), "ON", Bracketed(Delimited(Ref("ColumnReferenceSegment"))), optional=True, ), Ref("IcebergTableOptionsSegment", optional=True), Ref("DynamicTableOptionsSegment", optional=True), Ref("TagBracketedEqualsSegment", optional=True), Ref("CommentEqualsClauseSegment", optional=True), OneOf( # Create AS syntax: Sequence( Ref("DynamicTableOptionsSegment", optional=True), "AS", OptionallyBracketed(Ref("SelectableGrammar")), ), # Create like syntax Sequence("LIKE", Ref("TableReferenceSegment")), # Create clone syntax Sequence( "CLONE", Ref("TableReferenceSegment"), ), Sequence("USING", "TEMPLATE", Ref("SelectableGrammar")), optional=True, ), ), ) class CreateTaskSegment(BaseSegment): """A snowflake `CREATE TASK` statement. https://docs.snowflake.com/en/sql-reference/sql/create-task.html """ type = "create_task_statement" match_grammar = Sequence( "CREATE", Ref("AlterOrReplaceGrammar", optional=True), "TASK", Ref("IfNotExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), Indent, AnyNumberOf( OneOf( Sequence( "WAREHOUSE", Ref("EqualsSegment"), OneOf( Ref("ObjectReferenceSegment"), Ref("ReferencedVariableNameSegment"), ), ), Sequence( "USER_TASK_MANAGED_INITIAL_WAREHOUSE_SIZE", Ref("EqualsSegment"), Ref("WarehouseSize"), ), ), Sequence( "SCHEDULE", Ref("EqualsSegment"), OneOf( Ref("QuotedLiteralSegment"), Ref("ReferencedVariableNameSegment"), ), ), Sequence( "ALLOW_OVERLAPPING_EXECUTION", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "USER_TASK_TIMEOUT_MS", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Delimited( Sequence( Ref("ParameterNameSegment"), Ref("EqualsSegment"), OneOf( Ref("BooleanLiteralGrammar"), Ref("QuotedLiteralSegment"), Ref("NumericLiteralSegment"), ), ), ), Sequence( "COPY", "GRANTS", ), Ref("CommentEqualsClauseSegment"), Ref("LogLevelEqualsSegment"), ), Sequence( "AFTER", Delimited(Ref("ObjectReferenceSegment")), optional=True, ), Dedent, Sequence( "WHEN", Indent, Ref("TaskExpressionSegment"), Dedent, optional=True, ), Sequence( Ref.keyword("AS"), Indent, Ref("StatementSegment"), Dedent, ), ) class TaskExpressionSegment(BaseSegment): """Expressions for WHEN clause in TASK. e.g. "SYSTEM$STREAM_HAS_DATA('MYSTREAM')" """ type = "snowflake_task_expression_segment" match_grammar = Sequence( Delimited( OneOf( Ref("ExpressionSegment"), Sequence( Ref("SystemFunctionName"), Bracketed(Ref("QuotedLiteralSegment")), ), ), delimiter=OneOf(Ref("BooleanBinaryOperatorGrammar")), ) ) class CreateStatementSegment(BaseSegment): """A snowflake `CREATE` statement. https://docs.snowflake.com/en/sql-reference/sql/create.html """ type = "create_statement" match_grammar = Sequence( "CREATE", OneOf( Sequence( Ref("OrReplaceGrammar", optional=True), OneOf( Sequence("NETWORK", "POLICY"), Sequence("RESOURCE", "MONITOR"), "SHARE", "TAG", Sequence("API", "INTEGRATION"), Sequence("NOTIFICATION", "INTEGRATION"), Sequence("SECURITY", "INTEGRATION"), Sequence("STORAGE", "INTEGRATION"), Sequence("CATALOG", "INTEGRATION"), Sequence("MATERIALIZED", "VIEW"), Sequence("MASKING", "POLICY"), "PIPE", Sequence("EXTERNAL", "FUNCTION"), "SEQUENCE", ), ), Sequence( Ref("AlterOrReplaceGrammar", optional=True), OneOf("WAREHOUSE", "DATABASE"), ), ), Ref("IfNotExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), # Next set are Notification Integration statements # https://docs.snowflake.com/en/sql-reference/sql/create-api-integration.html AnySetOf( Sequence( "API_PROVIDER", Ref("EqualsSegment"), OneOf( "AWS_API_GATEWAY", "AWS_PRIVATE_API_GATEWAY", "AWS_GOV_API_GATEWAY", "AWS_GOV_PRIVATE_API_GATEWAY", "AZURE_API_MANAGEMENT", "GOOGLE_API_GATEWAY", "GIT_HTTPS_API", ), ), Sequence( "API_ALLOWED_PREFIXES", Ref("EqualsSegment"), Bracketed( Ref("QuotedLiteralSegment"), ), ), Sequence( "API_BLOCKED_PREFIXES", Ref("EqualsSegment"), Bracketed( Ref("QuotedLiteralSegment"), ), ), Sequence("API_KEY", Ref("EqualsSegment"), Ref("QuotedLiteralSegment")), Sequence("ENABLED", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar")), # AWS specific params: Sequence( "API_AWS_ROLE_ARN", Ref("EqualsSegment"), Ref("QuotedLiteralSegment") ), # Azure specific params: Sequence( "AZURE_TENANT_ID", Ref("EqualsSegment"), Ref("QuotedLiteralSegment") ), Sequence( "AZURE_AD_APPLICATION_ID", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), # GCP specific params: Sequence( "GOOGLE_AUDIENCE", Ref("EqualsSegment"), Ref("QuotedLiteralSegment") ), # Git specific params: Sequence( "ALLOWED_AUTHENTICATION_SECRETS", Ref("EqualsSegment"), Bracketed( OneOf( "ALL", "NONE", Delimited( Ref("QuotedLiteralSegment"), ), ) ), ), ), # Next set are Notification Integration statements # https://docs.snowflake.com/en/sql-reference/sql/create-notification-integration.html AnySetOf( Sequence("TYPE", Ref("EqualsSegment"), "QUEUE"), Sequence("ENABLED", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar")), Sequence( "NOTIFICATION_PROVIDER", Ref("EqualsSegment"), OneOf( "AWS_SNS", "AZURE_EVENT_GRID", "GCP_PUBSUB", "AZURE_STORAGE_QUEUE", Ref("QuotedLiteralSegment"), ), ), # AWS specific params: Sequence( "AWS_SNS_TOPIC_ARN", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "AWS_SNS_ROLE_ARN", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), # Azure specific params: Sequence( "AZURE_TENANT_ID", Ref("EqualsSegment"), Ref("QuotedLiteralSegment") ), OneOf( Sequence( "AZURE_STORAGE_QUEUE_PRIMARY_URI", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "AZURE_EVENT_GRID_TOPIC_ENDPOINT", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), ), # GCP specific params: OneOf( Sequence( "GCP_PUBSUB_SUBSCRIPTION_NAME", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "GCP_PUBSUB_TOPIC_NAME", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), ), Sequence( "DIRECTION", Ref("EqualsSegment"), "OUTBOUND", optional=True, ), Ref("CommentEqualsClauseSegment"), # For tags Sequence( "ALLOWED_VALUES", Delimited( Ref("QuotedLiteralSegment"), ), ), # For network policy Sequence( "ALLOWED_IP_LIST", Ref("EqualsSegment"), Bracketed( Delimited( Ref("QuotedLiteralSegment"), ), ), ), # For network policy Sequence( "BLOCKED_IP_LIST", Ref("EqualsSegment"), Bracketed( Delimited( Ref("QuotedLiteralSegment"), ), ), ), Sequence( "ALLOWED_NETWORK_RULE_LIST", Ref("EqualsSegment"), Bracketed(Delimited(Ref("QuotedLiteralSegment"))), ), Sequence( "BLOCKED_NETWORK_RULE_LIST", Ref("EqualsSegment"), Bracketed(Delimited(Ref("QuotedLiteralSegment"))), ), ), # Next set are Storage Integration statements # https://docs.snowflake.com/en/sql-reference/sql/create-storage-integration.html AnySetOf( Sequence("TYPE", Ref("EqualsSegment"), "EXTERNAL_STAGE"), Sequence("ENABLED", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar")), Sequence( "STORAGE_PROVIDER", Ref("EqualsSegment"), OneOf("S3", "AZURE", "GCS", Ref("QuotedLiteralSegment")), ), # Azure specific params: Sequence( "AZURE_TENANT_ID", Ref("EqualsSegment"), Ref("QuotedLiteralSegment") ), # AWS specific params: Sequence( "STORAGE_AWS_ROLE_ARN", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "STORAGE_AWS_OBJECT_ACL", Ref("EqualsSegment"), StringParser("'bucket-owner-full-control'", LiteralSegment), ), Sequence( "STORAGE_ALLOWED_LOCATIONS", Ref("EqualsSegment"), OneOf( Bracketed( Delimited( OneOf( Ref("S3Path"), Ref("GCSPath"), Ref("AzureBlobStoragePath"), ) ) ), Bracketed( Ref("QuotedStarSegment"), ), ), ), Sequence( "STORAGE_BLOCKED_LOCATIONS", Ref("EqualsSegment"), Bracketed( Delimited( OneOf( Ref("S3Path"), Ref("GCSPath"), Ref("AzureBlobStoragePath"), ) ) ), ), Ref("CommentEqualsClauseSegment"), Sequence( "USE_PRIVATELINK_ENDPOINT", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), ), # Next set are Catalog Integration statements # https://docs.snowflake.com/en/sql-reference/sql/create-catalog-integration AnySetOf( Sequence( "CATALOG_SOURCE", Ref("EqualsSegment"), OneOf("GLUE", "POLARIS", "ICEBERG_REST", "OBJECT_STORE"), ), Sequence( "TABLE_FORMAT", Ref("EqualsSegment"), OneOf("ICEBERG", "DELTA", "ICEBERG_REST", "OBJECT_STORE"), ), Sequence( "CATALOG_NAMESPACE", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence("ENABLED", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar")), Sequence( "REFRESH_INTERVAL_SECONDS", Ref("EqualsSegment"), Ref("LiteralSegment"), ), Ref("CommentEqualsClauseSegment"), # AWS Glue specific params: Sequence( OneOf("GLUE_AWS_ROLE_ARN"), Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( OneOf("GLUE_CATALOG_ID"), Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( OneOf("GLUE_REGION"), Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), # Apache Iceberg REST & Snowflake Open Catalog specific params: Sequence( "REST_CONFIG", Ref("EqualsSegment"), Bracketed( AnySetOf( Sequence( "CATALOG_URI", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "CATALOG_NAME", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "PREFIX", Ref("EqualsSegment"), Ref("QuotedLiteralSegment") ), Sequence( "CATALOG_API_TYPE", Ref("EqualsSegment"), OneOf( "PUBLIC", "AWS_API_GATEWAY", "AWS_PRIVATE_API_GATEWAY", "AWS_GLUE", ), ), ) ), ), Sequence( "REST_AUTHENTICATION", Ref("EqualsSegment"), Bracketed( OneOf( # OAuth AnySetOf( Sequence("TYPE", Ref("EqualsSegment"), "OAUTH"), Sequence( "OAUTH_TOKEN_URI", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "OAUTH_CLIENT_ID", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "OAUTH_CLIENT_SECRET", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "OAUTH_ALLOWED_SCOPES", Ref("EqualsSegment"), Bracketed(Delimited(Ref("QuotedLiteralSegment"))), ), ), Sequence( Sequence("TYPE", Ref("EqualsSegment"), "BEARER"), Sequence( "BEARER_TOKEN", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), ), AnySetOf( Sequence("TYPE", Ref("EqualsSegment"), "SIGV4"), Sequence( "SIGV4_IAM_ROLE", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "SIGV4_SIGNING_REGION", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "SIGV4_EXTERNAL_ID", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), ), ) ), ), ), # Next set are Pipe statements # https://docs.snowflake.com/en/sql-reference/sql/create-pipe.html Sequence( Sequence( "AUTO_INGEST", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), optional=True, ), Sequence( "ERROR_INTEGRATION", Ref("EqualsSegment"), Ref("ObjectReferenceSegment"), optional=True, ), Sequence( "AWS_SNS_TOPIC", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), Sequence( "INTEGRATION", Ref("EqualsSegment"), OneOf( Ref("QuotedLiteralSegment"), Ref("ObjectReferenceSegment"), ), optional=True, ), optional=True, ), # Next are WAREHOUSE options # https://docs.snowflake.com/en/sql-reference/sql/create-warehouse.html Sequence( Sequence("WITH", optional=True), AnyNumberOf( Ref("WarehouseObjectPropertiesSegment"), Ref("CommentEqualsClauseSegment"), Ref("WarehouseObjectParamsSegment"), ), Ref("TagBracketedEqualsSegment", optional=True), optional=True, ), Ref("CommentEqualsClauseSegment", optional=True), Ref.keyword("AS", optional=True), OneOf( Ref("SelectStatementSegment"), Sequence( Ref("FunctionContentsSegment"), "RETURNS", Ref("DatatypeSegment"), Ref("FunctionAssignerSegment"), Ref("ExpressionSegment"), Ref( "CommentEqualsClauseSegment", optional=True, ), optional=True, ), Ref("CopyIntoTableStatementSegment"), optional=True, ), ) class CreateUserSegment(BaseSegment): """A snowflake `CREATE USER` statement. https://docs.snowflake.com/en/sql-reference/sql/create-user.html """ type = "create_user_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "USER", Ref("IfNotExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), Indent, AnyNumberOf( Sequence( "PASSWORD", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "LOGIN_NAME", Ref("EqualsSegment"), OneOf( Ref("ObjectReferenceSegment"), Ref("QuotedLiteralSegment"), ), ), Sequence( "DISPLAY_NAME", Ref("EqualsSegment"), OneOf( Ref("ObjectReferenceSegment"), Ref("QuotedLiteralSegment"), ), ), Sequence( "FIRST_NAME", Ref("EqualsSegment"), OneOf( Ref("ObjectReferenceSegment"), Ref("QuotedLiteralSegment"), ), ), Sequence( "MIDDLE_NAME", Ref("EqualsSegment"), OneOf( Ref("ObjectReferenceSegment"), Ref("QuotedLiteralSegment"), ), ), Sequence( "LAST_NAME", Ref("EqualsSegment"), OneOf( Ref("ObjectReferenceSegment"), Ref("QuotedLiteralSegment"), ), ), Sequence( "EMAIL", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "MUST_CHANGE_PASSWORD", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "DISABLED", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "DAYS_TO_EXPIRY", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "MINS_TO_UNLOCK", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "DEFAULT_WAREHOUSE", Ref("EqualsSegment"), OneOf( Ref("ObjectReferenceSegment"), Ref("QuotedLiteralSegment"), ), ), Sequence( "DEFAULT_NAMESPACE", Ref("EqualsSegment"), OneOf( Ref("ObjectReferenceSegment"), Ref("QuotedLiteralSegment"), ), ), Sequence( "DEFAULT_ROLE", Ref("EqualsSegment"), OneOf( Ref("ObjectReferenceSegment"), Ref("QuotedLiteralSegment"), ), ), Sequence( "DEFAULT_SECONDARY_ROLES", Ref("EqualsSegment"), Bracketed(Ref("QuotedLiteralSegment")), ), Sequence( "MINS_TO_BYPASS_MFA", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "RSA_PUBLIC_KEY", Ref("EqualsSegment"), OneOf( Ref("ObjectReferenceSegment"), Ref("QuotedLiteralSegment"), ), ), Sequence( "RSA_PUBLIC_KEY_2", Ref("EqualsSegment"), OneOf( Ref("ObjectReferenceSegment"), Ref("QuotedLiteralSegment"), ), ), Sequence( "TYPE", Ref("EqualsSegment"), OneOf( Ref("ObjectReferenceSegment"), Ref("QuotedLiteralSegment"), ), ), Ref("CommentEqualsClauseSegment"), ), Dedent, ) class CreateViewStatementSegment(ansi.CreateViewStatementSegment): """A `CREATE VIEW` statement, specifically for Snowflake's dialect. https://docs.snowflake.com/en/sql-reference/sql/create-view.html """ match_grammar = Sequence( "CREATE", Ref("AlterOrReplaceGrammar", optional=True), AnySetOf( "SECURE", "RECURSIVE", ), Ref("TemporaryGrammar", optional=True), Sequence("MATERIALIZED", optional=True), "VIEW", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), AnySetOf( Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), Sequence( Ref.keyword("WITH", optional=True), "MASKING", "POLICY", Ref("FunctionNameSegment"), Sequence( "USING", Bracketed( Delimited( OneOf( Ref("ColumnReferenceSegment"), Ref("ExpressionSegment"), ) ), ), optional=True, ), optional=True, ), Ref("TagBracketedEqualsSegment", optional=True), Ref("CommentClauseSegment", optional=True), ), ), ), Sequence( Ref.keyword("WITH", optional=True), "ROW", "ACCESS", "POLICY", Ref("ObjectReferenceSegment"), "ON", Bracketed( Delimited(Ref("ColumnReferenceSegment")), ), ), Ref("TagBracketedEqualsSegment"), Sequence("COPY", "GRANTS"), Ref("CommentEqualsClauseSegment"), ), "AS", OptionallyBracketed(Ref("SelectableGrammar")), ) class AlterViewStatementSegment(BaseSegment): """An `ALTER VIEW` statement, specifically for Snowflake's dialect. https://docs.snowflake.com/en/sql-reference/sql/alter-view.html """ type = "alter_view_statement" match_grammar = Sequence( "ALTER", "VIEW", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), OneOf( Sequence( "RENAME", "TO", Ref("TableReferenceSegment"), ), Ref("CommentEqualsClauseSegment"), Sequence( "UNSET", "COMMENT", ), Sequence( OneOf("SET", "UNSET"), "SECURE", ), Sequence("SET", Ref("TagEqualsSegment")), Sequence("UNSET", "TAG", Delimited(Ref("TagReferenceSegment"))), Delimited( Sequence( "ADD", "ROW", "ACCESS", "POLICY", Ref("FunctionNameSegment"), "ON", Bracketed(Delimited(Ref("ColumnReferenceSegment"))), ), Sequence( "DROP", "ROW", "ACCESS", "POLICY", Ref("FunctionNameSegment"), ), ), Sequence( OneOf("ALTER", "MODIFY"), OneOf( Delimited( Sequence( Ref.keyword("COLUMN", optional=True), Ref("ColumnReferenceSegment"), OneOf( Sequence( "SET", "MASKING", "POLICY", Ref("FunctionNameSegment"), Sequence( "USING", Bracketed( Delimited(Ref("ColumnReferenceSegment")) ), optional=True, ), Ref.keyword("FORCE", optional=True), ), Sequence("UNSET", "MASKING", "POLICY"), Sequence("SET", Ref("TagEqualsSegment")), ), ), Sequence( "COLUMN", Ref("ColumnReferenceSegment"), "UNSET", "TAG", Delimited(Ref("TagReferenceSegment")), ), ), ), ), ), ) class AlterMaterializedViewStatementSegment(BaseSegment): """An `ALTER MATERIALIZED VIEW` statement, specifically for Snowflake's dialect. https://docs.snowflake.com/en/sql-reference/sql/alter-materialized-view.html """ type = "alter_materialized_view_statement" match_grammar = Sequence( "ALTER", "MATERIALIZED", "VIEW", Ref("TableReferenceSegment"), OneOf( Sequence("RENAME", "TO", Ref("TableReferenceSegment")), Sequence("CLUSTER", "BY", Delimited(Ref("ExpressionSegment"))), Sequence("DROP", "CLUSTERING", "KEY"), Sequence("SUSPEND", "RECLUSTER"), Sequence("RESUME", "RECLUSTER"), "SUSPEND", "RESUME", Sequence( OneOf("SET", "UNSET"), OneOf( "SECURE", Ref("CommentEqualsClauseSegment"), Ref("TagEqualsSegment"), ), ), ), ) class CreateFileFormatSegment(BaseSegment): """A snowflake `CREATE FILE FORMAT` statement. https://docs.snowflake.com/en/sql-reference/sql/create-file-format.html """ type = "create_file_format_segment" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Sequence("FILE", "FORMAT"), Ref("IfNotExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), # TYPE = is included in below parameter segments. # It is valid syntax to have TYPE = after other parameters. OneOf( Ref("CsvFileFormatTypeParameters"), Ref("JsonFileFormatTypeParameters"), Ref("AvroFileFormatTypeParameters"), Ref("OrcFileFormatTypeParameters"), Ref("ParquetFileFormatTypeParameters"), Ref("XmlFileFormatTypeParameters"), ), Sequence( # Use a Sequence and include an optional CommaSegment here. # This allows a preceding comma when above parameters are delimited. Ref("CommaSegment", optional=True), Ref("CommentEqualsClauseSegment"), optional=True, ), ) class AlterFileFormatSegment(BaseSegment): """A snowflake `Alter FILE FORMAT` statement. https://docs.snowflake.com/en/sql-reference/sql/alter-file-format.html """ type = "alter_file_format_segment" match_grammar = Sequence( "ALTER", Sequence("FILE", "FORMAT"), Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), OneOf( Sequence("RENAME", "TO", Ref("ObjectReferenceSegment")), Sequence( "SET", OneOf( Ref("CsvFileFormatTypeParameters"), Ref("JsonFileFormatTypeParameters"), Ref("AvroFileFormatTypeParameters"), Ref("OrcFileFormatTypeParameters"), Ref("ParquetFileFormatTypeParameters"), Ref("XmlFileFormatTypeParameters"), ), ), ), Sequence( # Use a Sequence and include an optional CommaSegment here. # This allows a preceding comma when above parameters are delimited. Ref("CommaSegment", optional=True), Ref("CommentEqualsClauseSegment"), optional=True, ), ) class CsvFileFormatTypeParameters(BaseSegment): """A Snowflake File Format Type Options segment for CSV. https://docs.snowflake.com/en/sql-reference/sql/create-file-format.html """ type = "csv_file_format_type_parameters" match_grammar = OptionallyDelimited( Sequence( "TYPE", Ref("EqualsSegment"), OneOf( StringParser( "'CSV'", CodeSegment, type="file_type", ), StringParser( "CSV", CodeSegment, type="file_type", ), ), ), Sequence( "COMPRESSION", Ref("EqualsSegment"), Ref("CompressionType"), ), Sequence("FILE_EXTENSION", Ref("EqualsSegment"), Ref("QuotedLiteralSegment")), Sequence( "SKIP_HEADER", Ref("EqualsSegment"), Ref("IntegerSegment"), ), Sequence( OneOf( "DATE_FORMAT", "TIME_FORMAT", "TIMESTAMP_FORMAT", ), Ref("EqualsSegment"), OneOf("AUTO", Ref("QuotedLiteralSegment")), ), Sequence("BINARY_FORMAT", Ref("EqualsSegment"), OneOf("HEX", "BASE64", "UTF8")), Sequence( OneOf( "RECORD_DELIMITER", "FIELD_DELIMITER", "ESCAPE", "ESCAPE_UNENCLOSED_FIELD", "FIELD_OPTIONALLY_ENCLOSED_BY", ), Ref("EqualsSegment"), OneOf("NONE", Ref("QuotedLiteralSegment")), ), Sequence( "NULL_IF", Ref("EqualsSegment"), Bracketed(Delimited(Ref("QuotedLiteralSegment"), optional=True)), ), Sequence( OneOf( "SKIP_BLANK_LINES", "ERROR_ON_COLUMN_COUNT_MISMATCH", "REPLACE_INVALID_CHARACTERS", "VALIDATE_UTF8", "EMPTY_FIELD_AS_NULL", "SKIP_BYTE_ORDER_MARK", "TRIM_SPACE", "PARSE_HEADER", ), Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "ENCODING", Ref("EqualsSegment"), OneOf( "UTF8", Ref("QuotedLiteralSegment"), ), ), ) class JsonFileFormatTypeParameters(BaseSegment): """A Snowflake File Format Type Options segment for JSON. https://docs.snowflake.com/en/sql-reference/sql/create-file-format.html """ type = "json_file_format_type_parameters" match_grammar = OptionallyDelimited( Sequence( "TYPE", Ref("EqualsSegment"), OneOf( StringParser( "'JSON'", CodeSegment, type="file_type", ), StringParser( "JSON", CodeSegment, type="file_type", ), ), ), Sequence( "COMPRESSION", Ref("EqualsSegment"), Ref("CompressionType"), ), Sequence( OneOf( "DATE_FORMAT", "TIME_FORMAT", "TIMESTAMP_FORMAT", ), Ref("EqualsSegment"), OneOf(Ref("QuotedLiteralSegment"), "AUTO"), ), Sequence("BINARY_FORMAT", Ref("EqualsSegment"), OneOf("HEX", "BASE64", "UTF8")), Sequence( "NULL_IF", Ref("EqualsSegment"), Bracketed(Delimited(Ref("QuotedLiteralSegment"), optional=True)), ), Sequence("FILE_EXTENSION", Ref("EqualsSegment"), Ref("QuotedLiteralSegment")), Sequence( OneOf( "TRIM_SPACE", "ENABLE_OCTAL", "ALLOW_DUPLICATE", "STRIP_OUTER_ARRAY", "STRIP_NULL_VALUES", "REPLACE_INVALID_CHARACTERS", "IGNORE_UTF8_ERRORS", "SKIP_BYTE_ORDER_MARK", ), Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), ) class AvroFileFormatTypeParameters(BaseSegment): """A Snowflake File Format Type Options segment for AVRO. https://docs.snowflake.com/en/sql-reference/sql/create-file-format.html """ type = "avro_file_format_type_parameters" match_grammar = OptionallyDelimited( Sequence( "TYPE", Ref("EqualsSegment"), OneOf( StringParser( "'AVRO'", CodeSegment, type="file_type", ), StringParser( "AVRO", CodeSegment, type="file_type", ), ), ), Sequence("COMPRESSION", Ref("EqualsSegment"), Ref("CompressionType")), Sequence("TRIM_SPACE", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar")), Sequence( "NULL_IF", Ref("EqualsSegment"), Bracketed(Delimited(Ref("QuotedLiteralSegment"))), ), ) class OrcFileFormatTypeParameters(BaseSegment): """A Snowflake File Format Type Options segment for ORC. https://docs.snowflake.com/en/sql-reference/sql/create-file-format.html """ type = "orc_file_format_type_parameters" match_grammar = OptionallyDelimited( Sequence( "TYPE", Ref("EqualsSegment"), OneOf( StringParser( "'ORC'", CodeSegment, type="file_type", ), StringParser( "ORC", CodeSegment, type="file_type", ), ), ), Sequence("TRIM_SPACE", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar")), Sequence( "NULL_IF", Ref("EqualsSegment"), Bracketed(Delimited(Ref("QuotedLiteralSegment"))), ), ) class ParquetFileFormatTypeParameters(BaseSegment): """A Snowflake File Format Type Options segment for PARQUET. https://docs.snowflake.com/en/sql-reference/sql/create-file-format.html """ type = "parquet_file_format_type_parameters" match_grammar = OptionallyDelimited( Sequence( "TYPE", Ref("EqualsSegment"), OneOf( StringParser( "'PARQUET'", CodeSegment, type="file_type", ), StringParser( "PARQUET", CodeSegment, type="file_type", ), ), ), Sequence( "COMPRESSION", Ref("EqualsSegment"), Ref("CompressionType"), ), Sequence( OneOf( "SNAPPY_COMPRESSION", "BINARY_AS_TEXT", "USE_LOGICAL_TYPE", "TRIM_SPACE", "USE_VECTORIZED_SCANNER", "REPLACE_INVALID_CHARACTERS", ), Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "NULL_IF", Ref("EqualsSegment"), Bracketed(Delimited(Ref("QuotedLiteralSegment"))), ), ) class XmlFileFormatTypeParameters(BaseSegment): """A Snowflake File Format Type Options segment for XML. https://docs.snowflake.com/en/sql-reference/sql/create-file-format.html """ type = "xml_file_format_type_parameters" match_grammar = OptionallyDelimited( Sequence( "TYPE", Ref("EqualsSegment"), OneOf( StringParser( "'XML'", CodeSegment, type="file_type", ), StringParser( "XML", CodeSegment, type="file_type", ), ), ), Sequence( "COMPRESSION", Ref("EqualsSegment"), Ref("CompressionType"), ), Sequence( OneOf( "IGNORE_UTF8_ERRORS", "PRESERVE_SPACE", "STRIP_OUTER_ELEMENT", "DISABLE_SNOWFLAKE_DATA", "DISABLE_AUTO_CONVERT", "SKIP_BYTE_ORDER_MARK", ), Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), ) class AlterPipeSegment(BaseSegment): """A snowflake `Alter PIPE` statement. https://docs.snowflake.com/en/sql-reference/sql/alter-pipe.html """ type = "alter_pipe_segment" match_grammar = Sequence( "ALTER", "PIPE", Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), OneOf( Sequence( "SET", AnyNumberOf( Sequence( "PIPE_EXECUTION_PAUSED", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "ERROR_INTEGRATION", Ref("EqualsSegment"), Ref("ObjectReferenceSegment"), ), Ref("CommentEqualsClauseSegment"), ), ), Sequence( "UNSET", OneOf("PIPE_EXECUTION_PAUSED", "COMMENT"), ), Sequence( "SET", Ref("TagEqualsSegment"), ), Sequence( "UNSET", Sequence("TAG", Delimited(Ref("TagReferenceSegment"))), ), Sequence( "REFRESH", Sequence( "PREFIX", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), Sequence( "MODIFIED_AFTER", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), ), ), Ref("CommaSegment", optional=True), ) class FileFormatSegment(BaseSegment): """A Snowflake FILE_FORMAT Segment. https://docs.snowflake.com/en/sql-reference/sql/create-table.html https://docs.snowflake.com/en/sql-reference/sql/create-external-table.html https://docs.snowflake.com/en/sql-reference/sql/create-stage.html """ type = "file_format_segment" match_grammar = OneOf( OneOf( Ref("QuotedLiteralSegment"), Ref("ObjectReferenceSegment"), ), Bracketed( Sequence( OneOf( Sequence( "FORMAT_NAME", Ref("EqualsSegment"), OneOf( Ref("QuotedLiteralSegment"), Ref("ObjectReferenceSegment"), ), ), OneOf( Ref("CsvFileFormatTypeParameters"), Ref("JsonFileFormatTypeParameters"), Ref("AvroFileFormatTypeParameters"), Ref("OrcFileFormatTypeParameters"), Ref("ParquetFileFormatTypeParameters"), Ref("XmlFileFormatTypeParameters"), ), ), Ref("FormatTypeOptions", optional=True), ), ), ) class FormatTypeOptions(BaseSegment): """A Snowflake formatTypeOptions. https://docs.snowflake.com/en/sql-reference/sql/copy-into-table.html#format-type-options https://docs.snowflake.com/en/sql-reference/sql/copy-into-location.html#format-type-options This part specifically works for the format: `FILE_FORMAT = (FORMAT_NAME = myformatname)` Another case: `FILE_FORMAT = (TYPE = mytype)` their fileFormatOptions are implemented in their specific `FormatTypeParameters` """ type = "format_type_options" match_grammar = OneOf( # COPY INTO , open for extension AnySetOf( Sequence( "COMPRESSION", Ref("EqualsSegment"), Ref("CompressionType"), ), Sequence( "RECORD_DELIMITER", Ref("EqualsSegment"), OneOf("NONE", Ref("QuotedLiteralSegment")), ), Sequence( "FIELD_DELIMITER", Ref("EqualsSegment"), OneOf("NONE", Ref("QuotedLiteralSegment")), ), Sequence( "ESCAPE", Ref("EqualsSegment"), OneOf("NONE", Ref("QuotedLiteralSegment")), ), Sequence( "ESCAPE_UNENCLOSED_FIELD", Ref("EqualsSegment"), OneOf("NONE", Ref("QuotedLiteralSegment")), ), Sequence( "DATA_FORMAT", Ref("EqualsSegment"), OneOf("AUTO", Ref("QuotedLiteralSegment")), ), Sequence( "TIME_FORMAT", Ref("EqualsSegment"), OneOf("NONE", Ref("QuotedLiteralSegment")), ), Sequence( "TIMESTAMP_FORMAT", Ref("EqualsSegment"), OneOf("NONE", Ref("QuotedLiteralSegment")), ), Sequence( "BINARY_FORMAT", Ref("EqualsSegment"), OneOf("HEX", "BASE64", "UTF8") ), Sequence( "FIELD_OPTIONALITY_ENCLOSED_BY", Ref("EqualsSegment"), OneOf("NONE", Ref("QuotedLiteralSegment")), ), Sequence( "NULL_IF", Ref("EqualsSegment"), Bracketed(Delimited(Ref("QuotedLiteralSegment"))), ), Sequence( "EMPTY_FIELD_AS_NULL", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "SNAPPY_COMPRESSION", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), ), # COPY INTO
, open for extension AnySetOf(), ) class CreateExternalTableSegment(BaseSegment): """A snowflake `CREATE EXTERNAL TABLE` statement. https://docs.snowflake.com/en/sql-reference/sql/create-external-table.html """ type = "create_external_table_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "EXTERNAL", "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), # Columns: Bracketed( Delimited( Sequence( Ref("SingleIdentifierGrammar"), Ref("DatatypeSegment"), "AS", OptionallyBracketed( Sequence( Ref("ExpressionSegment"), Ref("InlineConstraintPropertiesSegment", optional=True), Sequence( Ref.keyword("NOT", optional=True), "NULL", optional=True ), ) ), ) ), optional=True, ), # The use of AnySetOf is not strictly correct here, because LOCATION and # FILE_FORMAT are required parameters. They can however be in arbitrary order # with the other parameters. AnySetOf( Sequence("INTEGRATION", Ref("EqualsSegment"), Ref("QuotedLiteralSegment")), Sequence( "PARTITION", "BY", Bracketed(Delimited(Ref("SingleIdentifierGrammar"))), ), Sequence( Sequence("WITH", optional=True), "LOCATION", Ref("EqualsSegment"), Ref("StagePath"), ), Sequence( "REFRESH_ON_CREATE", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "AUTO_REFRESH", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "PATTERN", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "FILE_FORMAT", Ref("EqualsSegment"), Ref("FileFormatSegment"), ), Sequence( "AWS_SNS_TOPIC", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "COPY", "GRANTS", ), Sequence( "PARTITION_TYPE", Ref("EqualsSegment"), "USER_SPECIFIED", ), Sequence( Sequence("WITH", optional=True), "ROW", "ACCESS", "POLICY", Ref("ObjectReferenceSegment"), ), Ref("TagBracketedEqualsSegment"), Ref("CommentEqualsClauseSegment"), ), ) class TableExpressionSegment(ansi.TableExpressionSegment): """The main table expression e.g. within a FROM clause.""" match_grammar = OneOf( Ref("BareFunctionSegment"), Ref("FunctionSegment"), Ref("TableReferenceSegment"), # Nested Selects Bracketed(Ref("SelectableGrammar")), Ref("ValuesClauseSegment"), Sequence( Ref("StagePath"), Bracketed( Delimited( Sequence( "FILE_FORMAT", Ref("ParameterAssignerSegment"), Ref("FileFormatSegment"), ), Sequence( "PATTERN", Ref("ParameterAssignerSegment"), Ref("QuotedLiteralSegment"), ), ), optional=True, ), ), ) class PartitionBySegment(BaseSegment): """A `PARTITION BY` for `copy_into_location` functions.""" type = "partition_by_segment" match_grammar: Matchable = Sequence( "PARTITION", "BY", Indent, # Brackets are optional in a partition by statement OptionallyBracketed(Delimited(Ref("ExpressionSegment"))), Dedent, ) class CopyIntoLocationStatementSegment(BaseSegment): """A Snowflake `COPY INTO ` statement. # https://docs.snowflake.com/en/sql-reference/sql/copy-into-location.html """ type = "copy_into_location_statement" match_grammar = Sequence( "COPY", "INTO", Ref("StorageLocation"), Bracketed(Delimited(Ref("ColumnReferenceSegment")), optional=True), Sequence( "FROM", OneOf( Ref("TableReferenceSegment"), Bracketed(Ref("SelectableGrammar")), ), optional=True, ), OneOf( Ref("S3ExternalStageParameters"), Ref("AzureBlobStorageExternalStageParameters"), optional=True, ), Ref("InternalStageParameters", optional=True), AnySetOf( Ref("PartitionBySegment"), Sequence( "FILE_FORMAT", Ref("EqualsSegment"), Ref("FileFormatSegment"), ), # We explode the CopyOptionsSegments because the AnySetOf may appear in any # order for these other elements as well. *CopyOptionsSegment._copy_options_matchables, Sequence( "VALIDATION_MODE", Ref("EqualsSegment"), Ref("ValidationModeOptionSegment"), ), Sequence( "HEADER", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), ), ) class CopyIntoTableStatementSegment(BaseSegment): """A Snowflake `COPY INTO
` statement. # https://docs.snowflake.com/en/sql-reference/sql/copy-into-table.html """ type = "copy_into_table_statement" match_grammar = Sequence( "COPY", "INTO", Ref("TableReferenceSegment"), Bracketed(Delimited(Ref("ColumnReferenceSegment")), optional=True), Sequence( "FROM", OneOf( Ref("StorageLocation"), Bracketed(Ref("SelectStatementSegment")), ), optional=True, ), OneOf( Ref("S3ExternalStageParameters"), Ref("AzureBlobStorageExternalStageParameters"), optional=True, ), Ref("InternalStageParameters", optional=True), AnySetOf( Sequence( "FILES", Ref("EqualsSegment"), Bracketed( Delimited( Ref("QuotedLiteralSegment"), ), ), ), Sequence( "PATTERN", Ref("EqualsSegment"), OneOf( Ref("QuotedLiteralSegment"), Ref("ReferencedVariableNameSegment"), ), ), Sequence( "FILE_FORMAT", Ref("EqualsSegment"), Ref("FileFormatSegment"), ), # We explode the CopyOptionsSegments because the AnySetOf may appear in any # order for these other elements as well. *CopyOptionsSegment._copy_options_matchables, ), Sequence( "VALIDATION_MODE", Ref("EqualsSegment"), Ref("ValidationModeOptionSegment"), optional=True, ), ) class CopyFilesIntoLocationStatementSegment(BaseSegment): """A Snowflake `COPY FILE INTO FROM ` statement. # https://docs.snowflake.com/en/sql-reference/sql/copy-files.html """ type = "copy_files_into_location_statement" match_grammar = Sequence( "COPY", "FILES", "INTO", Ref("StorageLocation"), "FROM", Ref("StorageLocation"), AnySetOf( Sequence( "FILES", Ref("EqualsSegment"), Bracketed(Delimited(Ref("QuotedLiteralSegment"))), ), Sequence( "PATTERN", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "DETAILED_OUTPUT", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), optional=True, ), ) class StorageLocation(BaseSegment): """A Snowflake storage location. https://docs.snowflake.com/en/sql-reference/sql/copy-into-table.html#syntax """ type = "storage_location" match_grammar = OneOf( Ref("StagePath"), Ref("S3Path"), Ref("GCSPath"), Ref("AzureBlobStoragePath"), ) class InternalStageParameters(BaseSegment): """Parameters for an internal stage in Snowflake. https://docs.snowflake.com/en/sql-reference/sql/create-stage.html https://docs.snowflake.com/en/sql-reference/sql/alter-stage.html """ name = "internal_stage_parameters" type = "stage_parameters" match_grammar = Sequence( Sequence( "ENCRYPTION", Ref("EqualsSegment"), Bracketed( "TYPE", Ref("EqualsSegment"), Ref("SnowflakeEncryptionOption"), ), optional=True, ), ) class S3ExternalStageParameters(BaseSegment): """Parameters for an S3 external stage in Snowflake. https://docs.snowflake.com/en/sql-reference/sql/create-stage.html https://docs.snowflake.com/en/sql-reference/sql/alter-stage.html """ name = "s3_external_stage_parameters" type = "stage_parameters" match_grammar = Sequence( OneOf( Sequence( "STORAGE_INTEGRATION", Ref("EqualsSegment"), Ref("ObjectReferenceSegment"), ), Sequence( "CREDENTIALS", Ref("EqualsSegment"), Bracketed( OneOf( Sequence( "AWS_KEY_ID", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), "AWS_SECRET_KEY", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), Sequence( "AWS_TOKEN", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), ), Sequence( "AWS_ROLE", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), ) ), ), optional=True, ), Sequence( "ENCRYPTION", Ref("EqualsSegment"), Bracketed( OneOf( Sequence( Sequence( "TYPE", Ref("EqualsSegment"), Ref("S3EncryptionOption"), optional=True, ), "MASTER_KEY", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence("TYPE", Ref("EqualsSegment"), Ref("S3EncryptionOption")), Sequence( "TYPE", Ref("EqualsSegment"), Ref("S3EncryptionOption"), Sequence( "KMS_KEY_ID", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), ), Sequence("TYPE", Ref("EqualsSegment"), "NONE"), ) ), optional=True, ), ) class GCSExternalStageParameters(BaseSegment): """Parameters for a GCS external stage in Snowflake. https://docs.snowflake.com/en/sql-reference/sql/create-stage.html https://docs.snowflake.com/en/sql-reference/sql/alter-stage.html """ name = "gcs_external_stage_parameters" type = "stage_parameters" match_grammar = Sequence( Sequence( "STORAGE_INTEGRATION", Ref("EqualsSegment"), Ref("ObjectReferenceSegment"), optional=True, ), Sequence( "ENCRYPTION", Ref("EqualsSegment"), Bracketed( Sequence( "TYPE", Ref("EqualsSegment"), OneOf( Sequence( Ref("GCSEncryptionOption"), Sequence( "KMS_KEY_ID", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), ), "NONE", ), ) ), optional=True, ), ) class AzureBlobStorageExternalStageParameters(BaseSegment): """Parameters for an Azure Blob Storage external stage in Snowflake. https://docs.snowflake.com/en/sql-reference/sql/create-stage.html https://docs.snowflake.com/en/sql-reference/sql/alter-stage.html """ name = "azure_blob_storage_external_stage_parameters" type = "stage_parameters" match_grammar = Sequence( OneOf( Sequence( "STORAGE_INTEGRATION", Ref("EqualsSegment"), OneOf( Ref("ObjectReferenceSegment"), Ref("ReferencedVariableNameSegment") ), ), Sequence( "CREDENTIALS", Ref("EqualsSegment"), Bracketed( Sequence("AZURE_SAS_TOKEN"), Ref("EqualsSegment"), OneOf( Ref("QuotedLiteralSegment"), Ref("ReferencedVariableNameSegment"), ), ), ), optional=True, ), Sequence( "ENCRYPTION", Ref("EqualsSegment"), Bracketed( Sequence( "TYPE", Ref("EqualsSegment"), OneOf( Sequence( Ref("AzureBlobStorageEncryptionOption"), Sequence( "MASTER_KEY", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), ), "NONE", ), ) ), optional=True, ), ) class CreateStageSegment(BaseSegment): """A Snowflake CREATE STAGE statement. https://docs.snowflake.com/en/sql-reference/sql/create-stage.html """ type = "create_stage_statement" match_grammar = Sequence( "CREATE", Ref("AlterOrReplaceGrammar", optional=True), Ref.keyword("TEMPORARY", optional=True), "STAGE", Ref("IfNotExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), Indent, OneOf( # Internal stages Sequence( Ref("InternalStageParameters", optional=True), Sequence( "DIRECTORY", Ref("EqualsSegment"), Bracketed( Sequence( "ENABLE", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ) ), optional=True, ), ), OneOf( Sequence( "URL", Ref("EqualsSegment"), OneOf( Ref("S3Path"), Ref("GCSPath"), Ref("AzureBlobStoragePath"), Ref("ReferencedVariableNameSegment"), ), OneOf( # External S3 stage Sequence( Ref("S3ExternalStageParameters", optional=True), Sequence( "DIRECTORY", Ref("EqualsSegment"), Bracketed( Sequence( "ENABLE", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "AUTO_REFRESH", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), optional=True, ), ), optional=True, ), ), # External GCS stage Sequence( Ref("GCSExternalStageParameters", optional=True), Sequence( "DIRECTORY", Ref("EqualsSegment"), Bracketed( Sequence( "ENABLE", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "AUTO_REFRESH", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), optional=True, ), Sequence( "NOTIFICATION_INTEGRATION", Ref("EqualsSegment"), OneOf( Ref("NakedIdentifierSegment"), Ref("QuotedLiteralSegment"), ), optional=True, ), ), optional=True, ), ), # External Azure Blob Storage stage Sequence( Ref( "AzureBlobStorageExternalStageParameters", optional=True ), Sequence( "DIRECTORY", Ref("EqualsSegment"), Bracketed( Sequence( "ENABLE", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "AUTO_REFRESH", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), optional=True, ), Sequence( "NOTIFICATION_INTEGRATION", Ref("EqualsSegment"), OneOf( Ref("NakedIdentifierSegment"), Ref("QuotedLiteralSegment"), ), optional=True, ), ), optional=True, ), ), optional=True, ), ), Sequence( OneOf( # External S3 stage Sequence( Ref("S3ExternalStageParameters", optional=True), Sequence( "DIRECTORY", Ref("EqualsSegment"), Bracketed( Sequence( "ENABLE", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "AUTO_REFRESH", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), optional=True, ), ), optional=True, ), ), # External GCS stage Sequence( Ref("GCSExternalStageParameters", optional=True), Sequence( "DIRECTORY", Ref("EqualsSegment"), Bracketed( Sequence( "ENABLE", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "AUTO_REFRESH", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), optional=True, ), Sequence( "NOTIFICATION_INTEGRATION", Ref("EqualsSegment"), OneOf( Ref("NakedIdentifierSegment"), Ref("QuotedLiteralSegment"), ), optional=True, ), ), optional=True, ), ), # External Azure Blob Storage stage Sequence( Ref( "AzureBlobStorageExternalStageParameters", optional=True ), Sequence( "DIRECTORY", Ref("EqualsSegment"), Bracketed( Sequence( "ENABLE", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "AUTO_REFRESH", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), optional=True, ), Sequence( "NOTIFICATION_INTEGRATION", Ref("EqualsSegment"), OneOf( Ref("NakedIdentifierSegment"), Ref("QuotedLiteralSegment"), ), optional=True, ), ), optional=True, ), ), optional=True, ), "URL", Ref("EqualsSegment"), OneOf( Ref("S3Path"), Ref("GCSPath"), Ref("AzureBlobStoragePath"), Ref("ReferencedVariableNameSegment"), ), ), ), optional=True, ), Sequence( "FILE_FORMAT", Ref("EqualsSegment"), Ref("FileFormatSegment"), optional=True ), Sequence( "COPY_OPTIONS", Ref("EqualsSegment"), Bracketed(Ref("CopyOptionsSegment")), optional=True, ), Ref("TagBracketedEqualsSegment", optional=True), Ref("CommentEqualsClauseSegment", optional=True), Dedent, ) class AlterStageSegment(BaseSegment): """A Snowflake ALTER STAGE statement. https://docs.snowflake.com/en/sql-reference/sql/alter-stage.html """ type = "alter_stage_statement" match_grammar = Sequence( "ALTER", "STAGE", Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), OneOf( Sequence("RENAME", "TO", Ref("ObjectReferenceSegment")), Sequence( "SET", Indent, OneOf( Sequence( OneOf( Ref("InternalStageParameters"), Sequence( Sequence( "URL", Ref("EqualsSegment"), Ref("S3Path"), optional=True, ), Ref( "S3ExternalStageParameters", optional=True, ), ), Sequence( Sequence( "URL", Ref("EqualsSegment"), Ref("GCSPath"), optional=True, ), Ref( "GCSExternalStageParameters", optional=True, ), ), Sequence( Sequence( "URL", Ref("EqualsSegment"), Ref("AzureBlobStoragePath"), optional=True, ), Ref( "AzureBlobStorageExternalStageParameters", optional=True, ), ), optional=True, ), Sequence( "FILE_FORMAT", Ref("EqualsSegment"), Ref("FileFormatSegment"), optional=True, ), Sequence( "COPY_OPTIONS", Ref("EqualsSegment"), Bracketed(Ref("CopyOptionsSegment")), optional=True, ), Ref("CommentEqualsClauseSegment", optional=True), ), Ref("TagEqualsSegment"), ), Dedent, ), Sequence( "REFRESH", Sequence( "SUBPATH", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), ), ), ) class CreateStreamStatementSegment(BaseSegment): """A Snowflake `CREATE STREAM` statement. https://docs.snowflake.com/en/sql-reference/sql/create-stream.html """ type = "create_stream_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "STREAM", Ref("IfNotExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), Sequence("COPY", "GRANTS", optional=True), "ON", OneOf( Sequence( OneOf("TABLE", "VIEW"), Ref("ObjectReferenceSegment"), OneOf( Ref("FromAtExpressionSegment"), Ref("FromBeforeExpressionSegment"), optional=True, ), Sequence( "APPEND_ONLY", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), optional=True, ), Sequence( "SHOW_INITIAL_ROWS", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), optional=True, ), ), Sequence( "EXTERNAL", "TABLE", Ref("ObjectReferenceSegment"), OneOf( Ref("FromAtExpressionSegment"), Ref("FromBeforeExpressionSegment"), optional=True, ), Sequence( "INSERT_ONLY", Ref("EqualsSegment"), Ref("TrueSegment"), optional=True, ), ), Sequence( "STAGE", Ref("ObjectReferenceSegment"), ), ), Ref("CommentEqualsClauseSegment", optional=True), ) class CreateStreamlitStatementSegment(BaseSegment): """A Snowflake `CREATE STREAMLIT` statement. https://docs.snowflake.com/en/sql-reference/sql/create-streamlit.html """ type = "create_streamlit_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "STREAMLIT", Ref("IfNotExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), Sequence( "ROOT_LOCATION", Ref("EqualsSegment"), Ref("StagePath"), ), Sequence( "MAIN_FILE", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "QUERY_WAREHOUSE", Ref("EqualsSegment"), OneOf( Ref("ObjectReferenceSegment"), Ref("QuotedLiteralSegment"), ), optional=True, ), Ref("CommentEqualsClauseSegment", optional=True), Sequence( "TITLE", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), ) class CreateCortexSearchServiceStatementSegment(BaseSegment): """A Snowflake `CREATE CORTEX SEARCH SERVICE` statement. https://docs.snowflake.com/en/sql-reference/sql/create-cortex-search """ type = "create_cortex_search_service_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Sequence("CORTEX", "SEARCH", "SERVICE"), Ref("IfNotExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), Sequence( "ON", Ref("ColumnReferenceSegment"), ), Sequence( "ATTRIBUTES", Delimited(Ref("ColumnReferenceSegment")), optional=True, ), Sequence( "WAREHOUSE", Ref("EqualsSegment"), OneOf( Ref("ObjectReferenceSegment"), Ref("QuotedLiteralSegment"), ), ), Sequence( "TARGET_LAG", Ref("EqualsSegment"), Ref("DynamicTableTargetLagSegment"), ), Sequence( "EMBEDDING_MODEL", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), Ref("CommentEqualsClauseSegment", optional=True), Sequence( "AS", OptionallyBracketed(Ref("SelectableGrammar")), ), ) class AlterStreamStatementSegment(BaseSegment): """A Snowflake `ALTER STREAM` statement. https://docs.snowflake.com/en/sql-reference/sql/alter-stream.html """ type = "alter_stream_statement" match_grammar = Sequence( "ALTER", "STREAM", Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), OneOf( Sequence( "SET", Sequence( "APPEND_ONLY", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), optional=True, ), Sequence( "INSERT_ONLY", Ref("EqualsSegment"), Ref("TrueSegment"), optional=True, ), Ref("TagEqualsSegment", optional=True), Ref("CommentEqualsClauseSegment", optional=True), ), Sequence( "UNSET", OneOf( Sequence("TAG", Delimited(Ref("TagReferenceSegment"))), "COMMENT", ), ), ), ) class AlterStreamlitStatementSegment(BaseSegment): """A Snowflake `ALTER STREAMLIT` statement. https://docs.snowflake.com/en/sql-reference/sql/alter-streamlit.html """ type = "alter_streamlit_statement" match_grammar = Sequence( "ALTER", "STREAMLIT", Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), OneOf( Sequence( "SET", Sequence( "ROOT_LOCATION", Ref("EqualsSegment"), Ref("StagePath"), ), Sequence( "MAIN_FILE", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "QUERY_WAREHOUSE", Ref("EqualsSegment"), OneOf( Ref("ObjectReferenceSegment"), Ref("QuotedLiteralSegment"), ), optional=True, ), Ref("CommentEqualsClauseSegment", optional=True), ), Sequence("RENAME", "TO", Ref("ObjectReferenceSegment")), ), ) class AlterCortexSearchServiceStatementSegment(BaseSegment): """A Snowflake `ALTER CORTEX SEARCH SERVICE` statement. https://docs.snowflake.com/en/sql-reference/sql/alter-cortex-search """ type = "alter_streamlit_statement" match_grammar = Sequence( "ALTER", Sequence("CORTEX", "SEARCH", "SERVICE"), Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), OneOf( Sequence( OneOf("SUSPEND", "RESUME"), OneOf("INDEXING", "SERVING"), ), Sequence( "SET", AnySetOf( Sequence( "WAREHOUSE", Ref("EqualsSegment"), OneOf( Ref("ObjectReferenceSegment"), Ref("QuotedLiteralSegment"), ), optional=True, ), Sequence( "TARGET_LAG", Ref("EqualsSegment"), Ref("DynamicTableTargetLagSegment"), optional=True, ), Ref("CommentEqualsClauseSegment", optional=True), ), ), Sequence("RENAME", "TO", Ref("ObjectReferenceSegment")), ), ) class ShowStatementSegment(BaseSegment): """A snowflake `SHOW` statement. https://docs.snowflake.com/en/sql-reference/sql/show.html """ _object_types_plural = OneOf( "PARAMETERS", Sequence("GLOBAL", "ACCOUNTS"), "REGIONS", Sequence("REPLICATION", "ACCOUNTS"), Sequence("REPLICATION", "DATABASES"), "PARAMETERS", "VARIABLES", "TRANSACTIONS", "LOCKS", "PARAMETERS", "FUNCTIONS", Sequence("NETWORK", "POLICIES"), "SHARES", "ROLES", "GRANTS", "USERS", "WAREHOUSES", "DATABASES", Sequence( OneOf("API", "NOTIFICATION", "SECURITY", "STORAGE", optional=True), "INTEGRATIONS", ), "SCHEMAS", "OBJECTS", "TABLES", Sequence("EXTERNAL", "TABLES"), "VIEWS", Sequence("MATERIALIZED", "VIEWS"), Sequence("MASKING", "POLICIES"), "COLUMNS", Sequence("FILE", "FORMATS"), "SEQUENCES", "STAGES", "PIPES", "STREAMS", "STREAMLITS", "TASKS", Sequence("USER", "FUNCTIONS"), Sequence("EXTERNAL", "FUNCTIONS"), "PROCEDURES", Sequence("FUTURE", "GRANTS"), Sequence("EXTERNAL", "VOLUMES"), Sequence("PASSWORD", "POLICIES"), Sequence("CORTEX", "SEARCH", "SERVICES"), Sequence("RESOURCE", "MONITORS"), ) _object_scope_types = OneOf( "ACCOUNT", "SESSION", Sequence( OneOf( "DATABASE", "SCHEMA", "SHARE", "ROLE", "TABLE", "TASK", "USER", "WAREHOUSE", "VIEW", ), Ref("ObjectReferenceSegment", optional=True), ), ) type = "show_statement" match_grammar = Sequence( "SHOW", OneOf("TERSE", optional=True), _object_types_plural, OneOf("HISTORY", optional=True), Sequence("LIKE", Ref("QuotedLiteralSegment"), optional=True), Sequence( OneOf("ON", "TO", "OF", "IN"), OneOf( Sequence(_object_scope_types), Ref("ObjectReferenceSegment"), ), optional=True, ), Sequence("STARTS", "WITH", Ref("QuotedLiteralSegment"), optional=True), Sequence("WITH", "PRIMARY", Ref("ObjectReferenceSegment"), optional=True), Sequence( Ref("LimitClauseSegment"), Sequence("FROM", Ref("QuotedLiteralSegment"), optional=True), optional=True, ), ) class AlterAccountStatementSegment(BaseSegment): """`ALTER ACCOUNT` statement. ALTER ACCOUNT SET { [ accountParams ] [ objectParams ] [ sessionParams ] } ALTER ACCOUNT UNSET [ , ... ] ALTER ACCOUNT SET RESOURCE_MONITOR = ALTER ACCOUNT SET { PASSWORD | SESSION } POLICY ALTER ACCOUNT UNSET { PASSWORD | SESSION } POLICY ALTER ACCOUNT SET TAG = '' [, = '' ...] ALTER ACCOUNT UNSET TAG [ , ... ] https://docs.snowflake.com/en/sql-reference/sql/alter-account All the account parameters can be found here https://docs.snowflake.com/en/sql-reference/parameters """ type = "alter_account_statement" match_grammar = Sequence( "ALTER", "ACCOUNT", OneOf( Sequence( "SET", "RESOURCE_MONITOR", Ref("EqualsSegment"), Ref("NakedIdentifierSegment"), ), Sequence( "SET", OneOf("PASSWORD", "SESSION"), "POLICY", Ref("TableReferenceSegment"), ), Sequence( "SET", Ref("TagEqualsSegment"), ), Sequence( "SET", Delimited( Sequence( Ref("ParameterNameSegment"), Ref("EqualsSegment"), OneOf( Ref("BooleanLiteralGrammar"), Ref("QuotedLiteralSegment"), Ref("NumericLiteralSegment"), Ref("NakedIdentifierSegment"), Ref("TableReferenceSegment"), ), ), ), ), Sequence( "UNSET", OneOf("PASSWORD", "SESSION"), "POLICY", ), Sequence( "UNSET", OneOf( Sequence("TAG", Delimited(Ref("TagReferenceSegment"))), Delimited(Ref("NakedIdentifierSegment")), ), ), ), ) class AlterUserStatementSegment(BaseSegment): """`ALTER USER` statement. https://docs.snowflake.com/en/sql-reference/sql/alter-user.html All user parameters can be found here https://docs.snowflake.com/en/sql-reference/parameters.html """ type = "alter_user_statement" match_grammar = Sequence( "ALTER", "USER", Ref("IfExistsGrammar", optional=True), Ref("RoleReferenceSegment"), OneOf( Sequence("RENAME", "TO", Ref("ObjectReferenceSegment")), Sequence("RESET", "PASSWORD"), Sequence("ABORT", "ALL", "QUERIES"), Sequence( "ADD", "DELEGATED", "AUTHORIZATION", "OF", "ROLE", Ref("ObjectReferenceSegment"), "TO", "SECURITY", "INTEGRATION", Ref("ObjectReferenceSegment"), ), Sequence( "REMOVE", "DELEGATED", OneOf( Sequence( "AUTHORIZATION", "OF", "ROLE", Ref("ObjectReferenceSegment") ), "AUTHORIZATIONS", ), "FROM", "SECURITY", "INTEGRATION", Ref("ObjectReferenceSegment"), ), # Snowflake supports the SET command with space delimited parameters, but # it also supports using commas which is better supported by `Delimited`, so # we will just use that. Sequence( "SET", OptionallyDelimited( Sequence( Ref("ParameterNameSegment"), Ref("EqualsSegment"), OneOf(Ref("LiteralGrammar"), Ref("ObjectReferenceSegment")), ), ), ), Sequence("UNSET", Delimited(Ref("ParameterNameSegment"))), ), ) class CreateRoleStatementSegment(ansi.CreateRoleStatementSegment): """A `CREATE ROLE` statement. Redefined because it's much simpler than postgres. https://docs.snowflake.com/en/sql-reference/sql/create-role.html """ match_grammar = Sequence( "CREATE", Ref("AlterOrReplaceGrammar", optional=True), "ROLE", Ref("IfNotExistsGrammar", optional=True), Ref("RoleReferenceSegment"), Ref( "CommentEqualsClauseSegment", optional=True, ), ) class CreateDatabaseRoleStatementSegment(BaseSegment): """A `CREATE DATABASE ROLE` statement. https://docs.snowflake.com/en/sql-reference/sql/create-database-role """ type = "create_database_role_statement" match_grammar = Sequence( "CREATE", Ref( "AlterOrReplaceGrammar", optional=True, ), "DATABASE", "ROLE", Ref( "IfNotExistsGrammar", optional=True, ), Ref("DatabaseRoleReferenceSegment"), Ref( "CommentEqualsClauseSegment", optional=True, ), ) class ResourceMonitorOptionsSegment(BaseSegment): """A `RESOURCE MONITOR` options statement. https://docs.snowflake.com/en/sql-reference/sql/create-resource-monitor https://docs.snowflake.com/en/sql-reference/sql/alter-resource-monitor """ type = "resource_monitor_options" match_grammar = AnySetOf( Sequence( "CREDIT_QUOTA", Ref("EqualsSegment"), Ref("IntegerSegment"), optional=True, ), Sequence( "FREQUENCY", Ref("EqualsSegment"), OneOf("MONTHLY", "DAILY", "WEEKLY", "YEARLY", "NEVER"), optional=True, ), Sequence( "START_TIMESTAMP", Ref("EqualsSegment"), OneOf(Ref("QuotedLiteralSegment"), "IMMEDIATELY"), optional=True, ), Sequence( "END_TIMESTAMP", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), Sequence( "NOTIFY_USERS", Ref("EqualsSegment"), Bracketed( Delimited( Ref("ObjectReferenceSegment"), ), ), optional=True, ), Sequence( "TRIGGERS", AnyNumberOf( Sequence( "ON", Ref("IntegerSegment"), "PERCENT", "DO", OneOf("SUSPEND", "SUSPEND_IMMEDIATE", "NOTIFY"), ), ), optional=True, ), ) class CreateResourceMonitorStatementSegment(BaseSegment): """A `CREATE RESOURCE MONITOR` statement. https://docs.snowflake.com/en/sql-reference/sql/create-resource-monitor """ type = "create_resource_monitor_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Ref("IfNotExistsGrammar", optional=True), Sequence("RESOURCE", "MONITOR"), Ref("ObjectReferenceSegment"), "WITH", Ref("ResourceMonitorOptionsSegment"), ) class AlterResourceMonitorStatementSegment(BaseSegment): """An `ALTER RESOURCE MONITOR` statement. https://docs.snowflake.com/en/sql-reference/sql/alter-resource-monitor """ type = "alter_resource_monitor_statement" match_grammar = Sequence( "ALTER", Sequence("RESOURCE", "MONITOR"), Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), "SET", Ref("ResourceMonitorOptionsSegment"), ) class DropResourceMonitorStatementSegment(BaseSegment): """A `DROP RESOURCE MONITOR` statement. https://docs.snowflake.com/en/sql-reference/sql/drop-resource-monitor """ type = "drop_resource_monitor_statement" match_grammar = Sequence( "DROP", Sequence("RESOURCE", "MONITOR"), Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), ) class ExplainStatementSegment(ansi.ExplainStatementSegment): """An `Explain` statement. EXPLAIN [ USING { TABULAR | JSON | TEXT } ] https://docs.snowflake.com/en/sql-reference/sql/explain.html """ match_grammar = Sequence( "EXPLAIN", Sequence( "USING", OneOf("TABULAR", "JSON", "TEXT"), optional=True, ), ansi.ExplainStatementSegment.explainable_stmt, ) class AlterSessionStatementSegment(BaseSegment): """Snowflake's ALTER SESSION statement. ``` ALTER SESSION SET = ; ALTER SESSION UNSET , [ , , ... ]; ``` https://docs.snowflake.com/en/sql-reference/sql/alter-session.html """ type = "alter_session_statement" match_grammar = Sequence( "ALTER", "SESSION", OneOf( Ref("AlterSessionSetClauseSegment"), Ref("AlterSessionUnsetClauseSegment"), ), ) class AlterSessionSetClauseSegment(BaseSegment): """Snowflake's ALTER SESSION SET clause. ``` [ALTER SESSION] SET = ; ``` https://docs.snowflake.com/en/sql-reference/sql/alter-session.html """ type = "alter_session_set_statement" match_grammar = Sequence( "SET", Ref("ParameterNameSegment"), Ref("EqualsSegment"), OneOf( Ref("BooleanLiteralGrammar"), Ref("QuotedLiteralSegment"), Ref("NumericLiteralSegment"), ), ) class AlterSessionUnsetClauseSegment(BaseSegment): """Snowflake's ALTER SESSION UNSET clause. ``` [ALTER SESSION] UNSET , [ , , ... ]; ``` https://docs.snowflake.com/en/sql-reference/sql/alter-session.html """ type = "alter_session_unset_clause" match_grammar = Sequence( "UNSET", Delimited(Ref("ParameterNameSegment")), ) class AlterTaskStatementSegment(BaseSegment): """Snowflake's ALTER TASK statement. ``` ALTER TASK [IF EXISTS] RESUME; ALTER TASK [IF EXISTS] SUSPEND; ALTER TASK [IF EXISTS] REMOVE AFTER ; ALTER TASK [IF EXISTS] ADD AFTER ; ALTER TASK [IF EXISTS] SET [WAREHOUSE = ] [SCHEDULE = ] [ALLOW_OVERLAPPING_EXECUTION = TRUE|FALSE]; ALTER TASK [IF EXISTS] SET = [ , = , ...]; ALTER TASK [IF EXISTS] UNSET [ , , ... ]; ALTER TASK [IF EXISTS] MODIFY AS ; ALTER TASK [IF EXISTS] MODIFY WHEN ; ``` https://docs.snowflake.com/en/sql-reference/sql/alter-task.html """ type = "alter_task_statement" match_grammar = Sequence( "ALTER", "TASK", Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), OneOf( "RESUME", "SUSPEND", Sequence("REMOVE", "AFTER", Delimited(Ref("ObjectReferenceSegment"))), Sequence("ADD", "AFTER", Delimited(Ref("ObjectReferenceSegment"))), Ref("AlterTaskSpecialSetClauseSegment"), Ref("AlterTaskSetClauseSegment"), Ref("AlterTaskUnsetClauseSegment"), Sequence( "MODIFY", "AS", ansi.ExplainStatementSegment.explainable_stmt, ), Sequence("MODIFY", "WHEN", Ref("BooleanLiteralGrammar")), ), ) class AlterTaskSpecialSetClauseSegment(BaseSegment): """Snowflake's ALTER TASK special SET clause. ``` [ALTER TASK ] SET [WAREHOUSE = ] [SCHEDULE = ] [ALLOW_OVERLAPPING_EXECUTION = TRUE|FALSE]; ``` https://docs.snowflake.com/en/sql-reference/sql/alter-task.html """ type = "alter_task_special_set_clause" match_grammar = Sequence( "SET", AnySetOf( Sequence( "WAREHOUSE", Ref("EqualsSegment"), Ref("ObjectReferenceSegment"), optional=True, ), Sequence( "SCHEDULE", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), Sequence( "ALLOW_OVERLAPPING_EXECUTION", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), optional=True, ), min_times=1, ), ) class AlterTaskSetClauseSegment(BaseSegment): """Snowflake's ALTER TASK SET clause. ``` [ALTER TASK ] SET = [ , = , ...]; ``` https://docs.snowflake.com/en/sql-reference/sql/alter-task.html """ type = "alter_task_set_clause" match_grammar = Sequence( "SET", Delimited( Sequence( Ref("ParameterNameSegment"), Ref("EqualsSegment"), OneOf( Ref("BooleanLiteralGrammar"), Ref("QuotedLiteralSegment"), Ref("NumericLiteralSegment"), ), ), ), ) class AlterTaskUnsetClauseSegment(BaseSegment): """Snowflake's ALTER TASK UNSET clause. ``` [ALTER TASK ] UNSET [ , , ... ]; ``` https://docs.snowflake.com/en/sql-reference/sql/alter-task.html """ type = "alter_task_unset_clause" match_grammar = Sequence( "UNSET", Delimited(Ref("ParameterNameSegment")), ) class ExecuteImmediateClauseSegment(BaseSegment): """Snowflake's EXECUTE IMMEDIATE clause. ``` EXECUTE IMMEDIATE '' [ USING ( [ , ... ] ) ] EXECUTE IMMEDIATE [ USING ( [ , ... ] ) ] EXECUTE IMMEDIATE $ [ USING ( [ , ... ] ) ] EXECUTE IMMEDIATE FROM { absoluteFilePath | relativeFilePath } ``` https://docs.snowflake.com/en/sql-reference/sql/execute-immediate https://docs.snowflake.com/en/sql-reference/sql/execute-immediate-from """ type = "execute_immediate_clause" match_grammar = Sequence( "EXECUTE", "IMMEDIATE", Ref.keyword("FROM", optional=True), OneOf( Ref("QuotedLiteralSegment"), Ref("ReferencedVariableNameSegment"), Ref("StorageLocation"), Sequence( Ref("ColonSegment"), Ref("LocalVariableNameSegment"), ), ), Sequence( "USING", Bracketed(Delimited(Ref("LocalVariableNameSegment"))), optional=True, ), ) class ExecuteTaskClauseSegment(BaseSegment): """Snowflake's EXECUTE TASK clause. ``` EXECUTE TASK ``` https://docs.snowflake.com/en/sql-reference/sql/execute-task """ type = "execute_task_clause" match_grammar = Sequence( "EXECUTE", "TASK", Ref("ObjectReferenceSegment"), ) ############################ # MERGE ############################ class MergeUpdateClauseSegment(ansi.MergeUpdateClauseSegment): """`UPDATE` clause within the `MERGE` statement.""" match_grammar = Sequence( "UPDATE", Ref("SetClauseListSegment"), Ref("WhereClauseSegment", optional=True), ) class MergeDeleteClauseSegment(ansi.MergeDeleteClauseSegment): """`DELETE` clause within the `MERGE` statement.""" match_grammar = Sequence( "DELETE", Ref("WhereClauseSegment", optional=True), ) class MergeInsertClauseSegment(ansi.MergeInsertClauseSegment): """`INSERT` clause within the `MERGE` statement.""" match_grammar = Sequence( "INSERT", Indent, Ref("BracketedColumnReferenceListGrammar", optional=True), Dedent, Ref("ValuesClauseSegment", optional=True), Ref("WhereClauseSegment", optional=True), ) class DeleteStatementSegment(BaseSegment): """A `DELETE` statement. https://docs.snowflake.com/en/sql-reference/sql/delete.html """ type = "delete_statement" match_grammar = Sequence( "DELETE", "FROM", Ref("TableReferenceSegment"), Ref("AliasExpressionSegment", optional=True), Sequence( "USING", Indent, Delimited( Sequence( Ref("TableExpressionSegment"), Ref("AliasExpressionSegment", optional=True), ), ), Dedent, optional=True, ), Ref("WhereClauseSegment", optional=True), ) class DescribeStatementSegment(BaseSegment): """`DESCRIBE` statement grammar. https://docs.snowflake.com/en/sql-reference/sql/desc.html """ type = "describe_statement" match_grammar = Sequence( OneOf("DESCRIBE", "DESC"), OneOf( # https://docs.snowflake.com/en/sql-reference/sql/desc-result.html Sequence( "RESULT", OneOf( Ref("QuotedLiteralSegment"), Sequence("LAST_QUERY_ID", Bracketed()), ), ), # https://docs.snowflake.com/en/sql-reference/sql/desc-network-policy.html Sequence( "NETWORK", "POLICY", Ref("ObjectReferenceSegment"), ), # https://docs.snowflake.com/en/sql-reference/sql/desc-share.html Sequence( "SHARE", Ref("ObjectReferenceSegment"), Sequence( Ref("DotSegment"), Ref("ObjectReferenceSegment"), optional=True, ), ), # https://docs.snowflake.com/en/sql-reference/sql/desc-user.html Sequence( "USER", Ref("ObjectReferenceSegment"), ), Sequence( "WAREHOUSE", Ref("ObjectReferenceSegment"), ), Sequence( "DATABASE", Ref("DatabaseReferenceSegment"), ), # https://docs.snowflake.com/en/sql-reference/sql/desc-integration.html Sequence( OneOf("API", "NOTIFICATION", "SECURITY", "STORAGE", optional=True), "INTEGRATION", Ref("ObjectReferenceSegment"), ), # https://docs.snowflake.com/en/sql-reference/sql/desc-session-policy.html Sequence( "SESSION", "POLICY", Ref("ObjectReferenceSegment"), ), Sequence( "SCHEMA", Ref("SchemaReferenceSegment"), ), # https://docs.snowflake.com/en/sql-reference/sql/desc-table.html Sequence( "TABLE", Ref("TableReferenceSegment"), Sequence( "TYPE", Ref("EqualsSegment"), OneOf("COLUMNS", "STAGE"), optional=True, ), ), # https://docs.snowflake.com/en/sql-reference/sql/desc-external-table.html Sequence( "EXTERNAL", "TABLE", Ref("TableReferenceSegment"), Sequence( "TYPE", Ref("EqualsSegment"), OneOf("COLUMNS", "STAGE"), optional=True, ), ), Sequence( "EXTERNAL", "VOLUME", Ref("ExternalVolumeReferenceSegment"), ), # https://docs.snowflake.com/en/sql-reference/sql/desc-view.html Sequence( "VIEW", Ref("TableReferenceSegment"), ), # https://docs.snowflake.com/en/sql-reference/sql/desc-materialized-view.html Sequence( "MATERIALIZED", "VIEW", Ref("TableReferenceSegment"), ), # https://docs.snowflake.com/en/sql-reference/sql/desc-sequence.html Sequence( "SEQUENCE", Ref("SequenceReferenceSegment"), ), # https://docs.snowflake.com/en/sql-reference/sql/desc-masking-policy.html Sequence( "MASKING", "POLICY", Ref("ObjectReferenceSegment"), ), # https://docs.snowflake.com/en/sql-reference/sql/desc-row-access-policy.html Sequence( "ROW", "ACCESS", "POLICY", Ref("ObjectReferenceSegment"), ), # https://docs.snowflake.com/en/sql-reference/sql/desc-file-format.html Sequence( "FILE", "FORMAT", Ref("ObjectReferenceSegment"), ), # https://docs.snowflake.com/en/sql-reference/sql/desc-stage.html Sequence( "STAGE", Ref("ObjectReferenceSegment"), ), # https://docs.snowflake.com/en/sql-reference/sql/desc-pipe.html Sequence( "PIPE", Ref("ObjectReferenceSegment"), ), # https://docs.snowflake.com/en/sql-reference/sql/desc-stream.html Sequence( "STREAM", Ref("ObjectReferenceSegment"), ), Sequence( "STREAMLIT", Ref("ObjectReferenceSegment"), ), # https://docs.snowflake.com/en/sql-reference/sql/desc-task.html Sequence( "TASK", Ref("ObjectReferenceSegment"), ), # https://docs.snowflake.com/en/sql-reference/sql/desc-function.html Sequence( "FUNCTION", Ref("FunctionNameSegment"), Bracketed( Delimited( Ref("DatatypeSegment"), optional=True, ), ), ), # https://docs.snowflake.com/en/sql-reference/sql/desc-procedure.html Sequence( "PROCEDURE", Ref("FunctionNameSegment"), Bracketed( Delimited( Ref("DatatypeSegment"), optional=True, ), ), ), # https://docs.snowflake.com/en/sql-reference/sql/desc-password-policy Sequence( "PASSWORD", "POLICY", Ref("PasswordPolicyReferenceSegment"), ), # https://docs.snowflake.com/en/sql-reference/sql/desc-cortex-search Sequence( "CORTEX", "SEARCH", "SERVICE", Ref("ObjectReferenceSegment"), ), ), ) class TransactionStatementSegment(ansi.TransactionStatementSegment): """`BEGIN`, `START TRANSACTION`, `COMMIT`, AND `ROLLBACK` statement grammar. Overwrites ANSI to match correct Snowflake grammar. https://docs.snowflake.com/en/sql-reference/sql/begin.html https://docs.snowflake.com/en/sql-reference/sql/commit.html https://docs.snowflake.com/en/sql-reference/sql/rollback.html NOTE: "END" is not currently a supported keyword here. """ match_grammar = OneOf( Sequence( "BEGIN", OneOf("WORK", "TRANSACTION", optional=True), Sequence("NAME", Ref("ObjectReferenceSegment"), optional=True), ), Sequence( "START", "TRANSACTION", Sequence("NAME", Ref("ObjectReferenceSegment"), optional=True), ), Sequence( "COMMIT", Sequence("WORK", optional=True), ), "ROLLBACK", ) class TruncateStatementSegment(ansi.TruncateStatementSegment): """`TRUNCATE TABLE` statement. https://docs.snowflake.com/en/sql-reference/sql/truncate-table.html """ match_grammar = Sequence( "TRUNCATE", Ref.keyword("TABLE", optional=True), Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), ) class UnsetStatementSegment(BaseSegment): """An `UNSET` statement. https://docs.snowflake.com/en/sql-reference/sql/unset.html """ type = "unset_statement" match_grammar = Sequence( "UNSET", OneOf( Ref("LocalVariableNameSegment"), Bracketed( Delimited( Ref("LocalVariableNameSegment"), ), ), ), ) class UndropStatementSegment(BaseSegment): """`UNDROP` statement. DATABASE: https://docs.snowflake.com/en/sql-reference/sql/undrop-database.html SCHEMA: https://docs.snowflake.com/en/sql-reference/sql/undrop-schema.html TABLE: https://docs.snowflake.com/en/sql-reference/sql/undrop-table.html """ type = "undrop_statement" match_grammar = Sequence( "UNDROP", OneOf( Sequence( "DATABASE", Ref("DatabaseReferenceSegment"), ), Sequence( "SCHEMA", Ref("SchemaReferenceSegment"), ), Sequence( "TABLE", Ref("TableReferenceSegment"), ), Sequence( "EXTERNAL", "VOLUME", Ref("ExternalVolumeReferenceSegment"), ), ), ) class CommentStatementSegment(BaseSegment): """`COMMENT` statement grammar. https://docs.snowflake.com/en/sql-reference/sql/comment.html N.B. this applies to all objects, so there may be some I've missed here so add any others to the OneOf grammar below. """ type = "comment_statement" match_grammar = Sequence( "COMMENT", Ref("IfExistsGrammar", optional=True), "ON", OneOf( "COLUMN", "TABLE", "VIEW", "SCHEMA", "DATABASE", "WAREHOUSE", "USER", "STAGE", "FUNCTION", "PROCEDURE", "SEQUENCE", "SHARE", "PIPE", "STREAM", "STREAMLIT", "TASK", Sequence( "NETWORK", "POLICY", ), Sequence( OneOf( "API", "NOTIFICATION", "SECURITY", "STORAGE", ), "INTEGRATION", ), Sequence( "SESSION", "POLICY", ), Sequence( "EXTERNAL", "TABLE", ), Sequence( "MATERIALIZED", "VIEW", ), Sequence( "MASKING", "POLICY", ), Sequence( "ROW", "ACCESS", "POLICY", ), Sequence( "FILE", "FORMAT", ), ), Ref("ObjectReferenceSegment"), "IS", Ref("QuotedLiteralSegment"), ) class UseStatementSegment(ansi.UseStatementSegment): """A `USE` statement. https://docs.snowflake.com/en/sql-reference/sql/use.html """ match_grammar = Sequence( "USE", OneOf( Sequence("ROLE", Ref("ObjectReferenceSegment")), Sequence("WAREHOUSE", Ref("ObjectReferenceSegment")), Sequence( Ref.keyword("DATABASE", optional=True), Ref("DatabaseReferenceSegment"), ), Sequence( Ref.keyword("SCHEMA", optional=True), Ref("SchemaReferenceSegment"), ), Sequence( "SECONDARY", "ROLES", OneOf( "ALL", "NONE", ), ), ), ) class CallStatementSegment(BaseSegment): """`CALL` statement. https://docs.snowflake.com/en/sql-reference/sql/call.html """ type = "call_statement" match_grammar = Sequence( "CALL", Sequence( Ref("FunctionNameSegment"), Ref("FunctionContentsSegment"), ), ) class LimitClauseSegment(ansi.LimitClauseSegment): """A `LIMIT` clause. https://docs.snowflake.com/en/sql-reference/constructs/limit.html """ match_grammar = OneOf( Sequence( "LIMIT", Indent, Ref("LimitLiteralGrammar"), Dedent, Sequence( "OFFSET", Indent, Ref("LimitLiteralGrammar"), Dedent, optional=True, ), ), Sequence( Sequence( "OFFSET", Indent, Ref("LimitLiteralGrammar"), OneOf( "ROW", "ROWS", optional=True, ), Dedent, optional=True, ), "FETCH", Indent, OneOf( "FIRST", "NEXT", optional=True, ), Ref("LimitLiteralGrammar"), OneOf( "ROW", "ROWS", optional=True, ), Ref.keyword("ONLY", optional=True), Dedent, ), ) class SelectClauseSegment(ansi.SelectClauseSegment): """A group of elements in a select target statement.""" match_grammar = ansi.SelectClauseSegment.match_grammar.copy( terminators=[Ref.keyword("FETCH"), Ref.keyword("OFFSET")], ) class OrderByClauseSegment(ansi.OrderByClauseSegment): """An `ORDER BY` clause. https://docs.snowflake.com/en/sql-reference/constructs/order-by.html """ match_grammar = Sequence( "ORDER", "BY", Indent, Delimited( Sequence( OneOf( Ref("BooleanLiteralGrammar"), Ref("ColumnReferenceSegment"), # Can `ORDER BY 1` Ref("NumericLiteralSegment"), # Can order by an expression Ref("ExpressionSegment"), ), OneOf("ASC", "DESC", optional=True), Sequence("NULLS", OneOf("FIRST", "LAST"), optional=True), ), terminators=["LIMIT", "FETCH", "OFFSET", Ref("FrameClauseUnitGrammar")], ), Dedent, ) class FrameClauseSegment(ansi.FrameClauseSegment): """A frame clause for window functions. https://docs.snowflake.com/en/sql-reference/functions-analytic.html#window-frame-syntax-and-usage """ type = "frame_clause" _frame_extent = OneOf( Sequence("CURRENT", "ROW"), Sequence( OneOf( Ref("NumericLiteralSegment"), Ref("ReferencedVariableNameSegment"), Sequence("INTERVAL", Ref("QuotedLiteralSegment")), "UNBOUNDED", ), OneOf("PRECEDING", "FOLLOWING"), ), ) match_grammar: Matchable = Sequence( Ref("FrameClauseUnitGrammar"), OneOf(_frame_extent, Sequence("BETWEEN", _frame_extent, "AND", _frame_extent)), ) class DropProcedureStatementSegment(BaseSegment): """A snowflake `DROP PROCEDURE ...` statement. https://docs.snowflake.com/en/sql-reference/sql/drop-procedure.html """ type = "drop_procedure_statement" match_grammar = Sequence( "DROP", "PROCEDURE", Ref("IfExistsGrammar", optional=True), Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar"), ) class DropExternalTableStatementSegment(BaseSegment): """A snowflake `DROP EXTERNAL TABLE ...` statement. https://docs.snowflake.com/en/sql-reference/sql/drop-external-table.html """ type = "drop_external_table_statement" match_grammar = Sequence( "DROP", "EXTERNAL", "TABLE", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), Ref("DropBehaviorGrammar", optional=True), ) class DropFunctionStatementSegment(BaseSegment): """A `DROP FUNCTION` statement.""" type = "drop_function_statement" match_grammar = Sequence( "DROP", Ref.keyword("EXTERNAL", optional=True), "FUNCTION", Ref("IfExistsGrammar", optional=True), Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar"), ) class DropMaterializedViewStatementSegment(BaseSegment): """A snowflake `DROP MATERIALIZED VIEW ...` statement. https://docs.snowflake.com/en/sql-reference/sql/drop-materialized-view.html """ type = "drop_materialized_view_statement" match_grammar = Sequence( "DROP", "MATERIALIZED", "VIEW", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), ) class DropObjectStatementSegment(BaseSegment): """A snowflake `DROP ...` statement. https://docs.snowflake.com/en/sql-reference/sql/drop.html """ type = "drop_object_statement" match_grammar = Sequence( "DROP", OneOf( Sequence( OneOf( "CONNECTION", Sequence("CORTEX", "SEARCH", "SERVICE"), Sequence("FILE", "FORMAT"), Sequence( OneOf( "API", "NOTIFICATION", "SECURITY", "STORAGE", optional=True ), "INTEGRATION", ), "PIPE", Sequence("ROW", "ACCESS", "POLICY"), "STAGE", "STREAM", "STREAMLIT", "TAG", "TASK", ), Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), ), Sequence( OneOf(Sequence("RESOURCE", "MONITOR"), "SHARE"), Ref("ObjectReferenceSegment"), ), Sequence( OneOf( Sequence("MANAGED", "ACCOUNT"), Sequence("MASKING", "POLICY"), ), Ref("SingleIdentifierGrammar"), ), Sequence( OneOf( Sequence("NETWORK", "POLICY"), ), Ref("IfExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), ), Sequence( OneOf("WAREHOUSE", Sequence("SESSION", "POLICY")), Ref("IfExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), ), Sequence( "SEQUENCE", Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), Ref("DropBehaviorGrammar", optional=True), ), ), ) class ListStatementSegment(BaseSegment): """A snowflake `LIST @ ...` statement. https://docs.snowflake.com/en/sql-reference/sql/list.html """ type = "list_statement" match_grammar = Sequence( OneOf("LIST", "LS"), Ref("StagePath"), Sequence( "PATTERN", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True ), ) class GetStatementSegment(BaseSegment): """A snowflake `GET @ ...` statement. https://docs.snowflake.com/en/sql-reference/sql/get.html """ type = "get_statement" match_grammar = Sequence( "GET", Ref("StagePath"), OneOf( Ref("UnquotedFilePath"), Ref("QuotedLiteralSegment"), ), AnySetOf( Sequence( "PARALLEL", Ref("EqualsSegment"), Ref("IntegerSegment"), ), Sequence( "PATTERN", Ref("EqualsSegment"), OneOf( Ref("QuotedLiteralSegment"), Ref("ReferencedVariableNameSegment") ), ), ), ) class PutStatementSegment(BaseSegment): """A snowflake `PUT ...` statement. https://docs.snowflake.com/en/sql-reference/sql/put.html """ type = "put_statement" match_grammar = Sequence( "PUT", OneOf( Ref("UnquotedFilePath"), Ref("QuotedLiteralSegment"), ), Ref("StagePath"), AnySetOf( Sequence( "PARALLEL", Ref("EqualsSegment"), Ref("IntegerSegment"), ), Sequence( "AUTO_COMPRESS", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "SOURCE_COMPRESSION", Ref("EqualsSegment"), Ref("CompressionType") ), Sequence( "OVERWRITE", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), ), ) class RemoveStatementSegment(BaseSegment): """A snowflake `REMOVE @ ...` statement. https://docs.snowflake.com/en/sql-reference/sql/remove.html """ type = "remove_statement" match_grammar = Sequence( OneOf( "REMOVE", "RM", ), Ref("StagePath"), Sequence( "PATTERN", Ref("EqualsSegment"), OneOf(Ref("QuotedLiteralSegment"), Ref("ReferencedVariableNameSegment")), optional=True, ), ) class SetOperatorSegment(ansi.SetOperatorSegment): """A set operator such as Union, Minus, Except or Intersect.""" type = "set_operator" match_grammar: Matchable = OneOf( Sequence( "UNION", OneOf("DISTINCT", "ALL", optional=True), Sequence( "BY", "NAME", optional=True, ), ), Sequence( OneOf( "INTERSECT", "EXCEPT", ), Ref.keyword("ALL", optional=True), ), "MINUS", ) class ArrayTypeSegment(ansi.ArrayTypeSegment): """Prefix for array literals specifying the type.""" type = "array_type" match_grammar = Sequence( "ARRAY", Ref("ArrayTypeSchemaSegment", optional=True), ) class ArrayTypeSchemaSegment(ansi.ArrayTypeSegment): """Prefix for array literals specifying the type.""" type = "array_type_schema" match_grammar = Bracketed( Ref("DatatypeSegment"), Sequence("NOT", "NULL", optional=True), ) class ShorthandCastSegment(BaseSegment): """A casting operation using '::'.""" type = "cast_expression" match_grammar: Matchable = Sequence( OneOf( Ref("Expression_D_Grammar"), Ref("CaseExpressionSegment"), ), AnyNumberOf( Sequence( Ref("CastOperatorSegment"), Ref("DatatypeSegment"), OneOf( Ref("TimeZoneGrammar"), AnyNumberOf( Ref("ArrayAccessorSegment"), ), AnyNumberOf( Ref("SemiStructuredAccessorSegment"), ), optional=True, ), ), min_times=1, ), ) class AlterDatabaseSegment(BaseSegment): """An `ALTER DATABASE` statement. https://docs.snowflake.com/en/sql-reference/sql/alter-database """ type = "alter_database_statement" match_grammar = Sequence( "ALTER", "DATABASE", Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), OneOf( Sequence("RENAME", "TO", Ref("ObjectReferenceSegment")), Sequence("SWAP", "WITH", Ref("ObjectReferenceSegment")), Sequence( "SET", OneOf( Ref("TagEqualsSegment"), Delimited( Sequence( Ref("ParameterNameSegment"), Ref("EqualsSegment"), OneOf( Ref("BooleanLiteralGrammar"), Ref("QuotedLiteralSegment"), Ref("NumericLiteralSegment"), ), ), ), ), ), Sequence("UNSET", "TAG", Delimited(Ref("TagReferenceSegment"))), Sequence( "UNSET", Delimited( AnySetOf( "DATA_RETENTION_TIME_IN_DAYS", "MAX_DATA_EXTENSION_TIME_IN_DAYS", "DEFAULT_DDL_COLLATION", "COMMENT", ), ), ), ), ) class AlterMaskingPolicySegment(BaseSegment): """An `ALTER MASKING POLICY` statement. https://docs.snowflake.com/en/sql-reference/sql/alter-masking-policy """ type = "alter_masking_policy" match_grammar = Sequence( "ALTER", "MASKING", "POLICY", Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), OneOf( Sequence("RENAME", "TO", Ref("ObjectReferenceSegment")), Sequence( "SET", "BODY", Ref("FunctionAssignerSegment"), Ref("ExpressionSegment"), ), Sequence("SET", Ref("TagEqualsSegment")), Sequence("UNSET", "TAG", Delimited(Ref("TagReferenceSegment"))), Sequence( "SET", "COMMENT", Ref("EqualsSegment"), Ref("QuotedLiteralSegment") ), Sequence("UNSET", "COMMENT"), ), ) class ForInLoopSegment(BaseSegment): """FOR...IN...DO...END FOR statement. https://docs.snowflake.com/en/developer-guide/snowflake-scripting/loops#for-loop """ type = "for_in_statement" match_grammar = Sequence( Sequence( Sequence( "FOR", Ref("LocalVariableNameSegment"), "IN", Ref("LocalVariableNameSegment"), "DO", Indent, ), Delimited( Ref("StatementSegment"), delimiter=Ref("DelimiterGrammar"), ), parse_mode=ParseMode.GREEDY_ONCE_STARTED, reset_terminators=True, terminators=[Sequence(Ref("DelimiterGrammar"), "END", "FOR")], ), # There must be a trailing semicolon Ref("DelimiterGrammar"), Dedent, "END", "FOR", ) class BindVariableSegment(BaseSegment): """A :VARIABLE_NAME expression.""" type = "bind_variable" match_grammar = Sequence( Ref("ColonSegment"), Ref("LocalVariableNameSegment"), ) class ScriptingDeclareStatementSegment(BaseSegment): """A snowflake `Declare` statement for SQL scripting. https://docs.snowflake.com/en/sql-reference/snowflake-scripting/declare https://docs.snowflake.com/en/developer-guide/snowflake-scripting/variables """ type = "scripting_declare_statement" match_grammar = Sequence( "DECLARE", Indent, Sequence( # Avoid BEGIN as a variable from the subsequent scripting block Ref("LocalVariableNameSegment", exclude=Ref.keyword("BEGIN")), OneOf( # Variable assignment OneOf( Sequence( Ref("DatatypeSegment"), OneOf("DEFAULT", Ref("WalrusOperatorSegment")), Ref("ExpressionSegment"), ), Sequence( OneOf("DEFAULT", Ref("WalrusOperatorSegment")), Ref("ExpressionSegment"), ), ), # Cursor assignment Sequence( "CURSOR", "FOR", OneOf(Ref("LocalVariableNameSegment"), Ref("SelectableGrammar")), ), # Resultset assignment Sequence( "RESULTSET", Sequence( OneOf( "DEFAULT", Ref("WalrusOperatorSegment"), ), Sequence("ASYNC", optional=True), Bracketed(Ref("SelectClauseSegment"), optional=True), optional=True, ), ), # Exception assignment Sequence( "EXCEPTION", Bracketed( Delimited( Ref("ExceptionCodeSegment"), Ref("QuotedLiteralSegment") ) ), ), ), ), AnyNumberOf( Sequence( Ref("DelimiterGrammar"), # Avoid BEGIN as a variable from the subsequent scripting block Ref("LocalVariableNameSegment", exclude=Ref.keyword("BEGIN")), OneOf( # Variable assignment OneOf( Sequence( Ref("DatatypeSegment"), OneOf("DEFAULT", Ref("WalrusOperatorSegment")), Ref("ExpressionSegment"), ), Sequence( OneOf("DEFAULT", Ref("WalrusOperatorSegment")), Ref("ExpressionSegment"), ), ), # Cursor assignment Sequence( "CURSOR", "FOR", OneOf( Ref("LocalVariableNameSegment"), Ref("SelectableGrammar") ), ), # Resultset assignment Sequence( "RESULTSET", Sequence( OneOf( "DEFAULT", Ref("WalrusOperatorSegment"), ), Sequence("ASYNC", optional=True), Bracketed(Ref("SelectClauseSegment"), optional=True), optional=True, ), ), # Exception assignment Sequence( "EXCEPTION", Bracketed( Delimited( Ref("ExceptionCodeSegment"), Ref("QuotedLiteralSegment") ) ), ), ), ), ), Dedent, Ref("ScriptingBlockStatementSegment", optional=True), ) class LambdaExpressionSegment(BaseSegment): """A lambda expression. https://docs.snowflake.com/en/user-guide/querying-semistructured#lambda-expressions """ type = "lambda_function" match_grammar = Sequence( OneOf( Sequence( Ref("ParameterNameSegment"), Ref("DatatypeSegment", optional=True), ), Bracketed( Delimited( Sequence( Ref("ParameterNameSegment"), Ref("DatatypeSegment", optional=True), ) ) ), ), Ref("LambdaArrowSegment"), Ref("ExpressionSegment"), ) class PasswordPolicyReferenceSegment(ansi.ObjectReferenceSegment): """Password Policy Reference.""" type = "password_policy_reference" class PasswordPolicyOptionsSegment(BaseSegment): """Password Policy Options.""" type = "password_policy_options" match_grammar = AnySetOf( Sequence( "PASSWORD_MIN_LENGTH", Ref("EqualsSegment"), Ref("NumericLiteralSegment") ), Sequence( "PASSWORD_MAX_LENGTH", Ref("EqualsSegment"), Ref("NumericLiteralSegment") ), Sequence( "PASSWORD_MIN_UPPER_CASE_CHARS", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "PASSWORD_MIN_LOWER_CASE_CHARS", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "PASSWORD_MIN_NUMERIC_CHARS", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "PASSWORD_MIN_SPECIAL_CHARS", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "PASSWORD_MIN_AGE_DAYS", Ref("EqualsSegment"), Ref("NumericLiteralSegment") ), Sequence( "PASSWORD_MAX_AGE_DAYS", Ref("EqualsSegment"), Ref("NumericLiteralSegment") ), Sequence( "PASSWORD_MAX_RETRIES", Ref("EqualsSegment"), Ref("NumericLiteralSegment") ), Sequence( "PASSWORD_LOCKOUT_TIME_MINS", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "PASSWORD_HISTORY", Ref("EqualsSegment"), Ref("NumericLiteralSegment") ), Ref("CommentEqualsClauseSegment"), ) class CreatePasswordPolicyStatementSegment(BaseSegment): """Create Password Policy Statement. As per https://docs.snowflake.com/en/sql-reference/sql/create-packages-policy """ type = "create_password_policy_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "PASSWORD", "POLICY", Ref("IfNotExistsGrammar", optional=True), Ref("PasswordPolicyReferenceSegment"), Ref("PasswordPolicyOptionsSegment", optional=True), ) class AlterPasswordPolicyStatementSegment(BaseSegment): """Alter Password Policy Statement. As per https://docs.snowflake.com/en/sql-reference/sql/alter-password-policy """ type = "alter_password_policy_statement" match_grammar = Sequence( "ALTER", "PASSWORD", "POLICY", Ref("IfExistsGrammar", optional=True), Ref("PasswordPolicyReferenceSegment"), OneOf( Sequence( "RENAME", "TO", Ref("PasswordPolicyReferenceSegment"), ), Sequence( "SET", Ref("PasswordPolicyOptionsSegment"), ), Sequence("SET", Ref("TagEqualsSegment")), Sequence("UNSET", "TAG", Delimited(Ref("TagReferenceSegment"))), Sequence( "UNSET", AnySetOf( "PASSWORD_MIN_LENGTH", "PASSWORD_MAX_LENGTH", "PASSWORD_MIN_UPPER_CASE_CHARS", "PASSWORD_MIN_LOWER_CASE_CHARS", "PASSWORD_MIN_NUMERIC_CHARS", "PASSWORD_MIN_SPECIAL_CHARS", "PASSWORD_MIN_AGE_DAYS", "PASSWORD_MAX_AGE_DAYS", "PASSWORD_MAX_RETRIES", "PASSWORD_LOCKOUT_TIME_MINS", "PASSWORD_HISTORY", "COMMENT", ), ), ), ) class DropPasswordPolicyStatementSegment(BaseSegment): """Drop Password Policy Statement. As per https://docs.snowflake.com/en/sql-reference/sql/drop-password-policy """ type = "drop_password_policy_statement" match_grammar = Sequence( "DROP", "PASSWORD", "POLICY", Ref("IfExistsGrammar", optional=True), Ref("PasswordPolicyReferenceSegment"), ) class CreateRowAccessPolicyStatementSegment(BaseSegment): """Create Row Access Policy. As per https://docs.snowflake.com/en/sql-reference/sql/create-row-access-policy """ type = "create_row_access_policy_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "ROW", "ACCESS", "POLICY", Ref("IfNotExistsGrammar", optional=True), OneOf(Ref("NakedIdentifierSegment"), Ref("QuotedIdentifierSegment")), "AS", Ref("FunctionParameterListGrammar"), "RETURNS", "BOOLEAN", Ref("FunctionAssignerSegment"), Ref("ExpressionSegment"), Ref( "CommentEqualsClauseSegment", optional=True, ), ) class AlterRowAccessPolicyStatmentSegment(BaseSegment): """Alter Row Access Policy Statement. As per https://docs.snowflake.com/en/sql-reference/sql/alter-row-access-policy """ type = "alter_row_access_policy_statement" match_grammar = Sequence( "ALTER", "ROW", "ACCESS", "POLICY", Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), OneOf( Sequence("RENAME", "TO", Ref("ObjectReferenceSegment")), Sequence( "SET", "BODY", Ref("FunctionAssignerSegment"), Ref("ExpressionSegment"), ), Sequence("SET", Ref("TagEqualsSegment")), Sequence("UNSET", "TAG", Delimited(Ref("TagReferenceSegment"))), Sequence( "SET", "COMMENT", Ref("EqualsSegment"), Ref("QuotedLiteralSegment") ), Sequence("UNSET", "COMMENT"), ), ) class AlterTagStatementSegment(BaseSegment): """A Snowflake Alter Tag Statement. As per https://docs.snowflake.com/en/sql-reference/sql/alter-tag """ type = "alter_tag_statement" match_grammar = Sequence( "ALTER", "TAG", Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), OneOf( Sequence( "RENAME", "TO", Ref("ObjectReferenceSegment"), ), Sequence( OneOf( "SET", "UNSET", ), Delimited( Sequence( "MASKING", "POLICY", Ref("ParameterNameSegment"), ), ), ), Sequence( "SET", Ref("CommentEqualsClauseSegment"), ), Sequence("UNSET", "COMMENT"), Sequence( OneOf( "ADD", "DROP", ), "ALLOWED_VALUES", Delimited( Ref("QuotedLiteralSegment"), ), ), Sequence("UNSET", "ALLOWED_VALUES"), ), ) class ExceptionBlockStatementSegment(BaseSegment): """A snowflake `BEGIN ... END` statement for SQL scripting. https://docs.snowflake.com/en/sql-reference/snowflake-scripting/begin """ type = "exception_block_statement" match_grammar = Sequence( Sequence( "EXCEPTION", Indent, OneOf( Sequence( "WHEN", Ref("ObjectReferenceSegment"), AnyNumberOf( Sequence( "OR", Ref("ObjectReferenceSegment"), ), ), "THEN", ), Sequence( "WHEN", "OTHER", "THEN", ), ), Ref("StatementSegment"), ), AnyNumberOf( Sequence( Ref("DelimiterGrammar"), OneOf( Sequence( "WHEN", Ref("ObjectReferenceSegment"), AnyNumberOf( Sequence( "OR", Ref("ObjectReferenceSegment"), ), ), "THEN", ), Sequence( "WHEN", "OTHER", "THEN", ), ), Ref("StatementSegment"), ), ), ) class DropIcebergTableStatementSegment(BaseSegment): """`DROP ICEBERG TABLE` statement. Snowflake syntax reference: https://docs.snowflake.com/en/sql-reference/sql/drop-table.html """ type = "drop_iceberg_table_statement" match_grammar = Sequence( "DROP", "ICEBERG", "TABLE", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), ) class DropDynamicTableSegment(BaseSegment): """Drop dynamic table segment.""" type = "drop_dynamic_table_segment" match_grammar = Sequence( "DROP", "DYNAMIC", "TABLE", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), ) class CreateAuthenticationPolicySegment(BaseSegment): """A Snowflake Create Authentication Policy Segment.""" type = "create_authentication_policy_segment" match_grammar = Sequence( "Create", Ref("OrReplaceGrammar", optional=True), "AUTHENTICATION", "POLICY", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), Sequence( "AUTHENTICATION_METHODS", Ref("EqualsSegment"), Bracketed( Delimited( Ref("QuotedLiteralSegment"), ), ), optional=True, ), Sequence( "MFA_AUTHENTICATION_METHODS", Ref("EqualsSegment"), Bracketed( Delimited( Ref("QuotedLiteralSegment"), ), ), optional=True, ), Sequence( "MFA_ENROLLMENT", Ref("EqualsSegment"), OneOf( "REQUIRED", "OPTIONAL", ), optional=True, ), Sequence( "CLIENT_TYPES", Ref("EqualsSegment"), Bracketed( Delimited( Ref("QuotedLiteralSegment"), ), ), optional=True, ), Sequence( "SECURITY_INTEGRATIONS", Ref("EqualsSegment"), Bracketed( Delimited( Ref("QuotedLiteralSegment"), ), ), optional=True, ), Ref( "CommentEqualsClauseSegment", optional=True, ), ) sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_snowflake_keywords.py000066400000000000000000000204761503426445100255370ustar00rootroot00000000000000"""A list of all Snowflake SQL key words. https://docs.snowflake.com/en/sql-reference/reserved-keywords.html """ snowflake_reserved_keywords = """ALL ALTER AND ANY AS ASOF BEARER BEARER_TOKEN BETWEEN BY CAST CHECK CONNECT CONNECTION CONSTRAINT CREATE CURRENT CURRENT_DATE CURRENT_TIME CURRENT_TIMESTAMP DELETE DELTA DECLARE DISTINCT DROP ELSE EXISTS FOLLOWING FOR FROM FULL GRANT GROUP GSCLUSTER HAVING HYBRID ILIKE IN INCREMENT INNER INSERT INSERT_ONLY INTERSECT INTO IS JOIN LATERAL LEFT LIKE LOCALTIME LOCALTIMESTAMP MATCH_CONDITION MATCH_RECOGNIZE MINUS NATURAL NOT NULL NULL_IF OF ON OR ORDER QUALIFY REGEXP REVOKE RIGHT RLIKE ROW ROWS SAMPLE SELECT SET SOME START STRICT TABLE TABLESAMPLE THEN TO TRIGGER TRY_CAST UNION UNIQUE UNPIVOT UPDATE USING VALUES WHEN WHENEVER WHERE WITH """ snowflake_unreserved_keywords = """ ABORT ABORT_STATEMENT ACCESS ACCOUNT ACCOUNTS ACTION ADD ADMIN AFTER AGGREGATE AGGREGATION ALERT ALLOW_DUPLICATE ALLOW_OVERLAPPING_EXECUTION ALLOW_WRITES ALLOWED_AUTHENTICATION_SECRETS ALLOWED_IP_LIST ALLOWED_NETWORK_RULE_LIST ALLOWED_VALUES ALWAYS API API_ALLOWED_PREFIXES API_AWS_ROLE_ARN API_BLOCKED_PREFIXES API_INTEGRATION API_KEY API_PROVIDER APPEND_ONLY APPLICATION APPLY ARRAY ASC ASYNC AT ATTACH ATTRIBUTES AUTHENTICATION AUTHENTICATION_METHODS AUTHORIZATION AUTHORIZATIONS AUTO AUTO_COMPRESS AUTO_INCREMENT AUTO_INGEST AUTO_REFRESH AUTO_RESUME AUTO_SUSPEND AUTOINCREMENT AVRO AWS_API_GATEWAY AWS_GLUE AWS_PRIVATE_API_GATEWAY AWS_GOV_API_GATEWAY AWS_GOV_PRIVATE_API_GATEWAY AWS_KEY_ID AWS_PRIVATE_API_GATEWAY AWS_ROLE AWS_SECRET_KEY AWS_SNS AWS_SNS_ROLE_ARN AWS_SNS_TOPIC AWS_SNS_TOPIC_ARN AWS_TOKEN AZURE AZURE_API_MANAGEMENT AZURE_AD_APPLICATION_ID AZURE_EVENT_GRID AZURE_EVENT_GRID_TOPIC_ENDPOINT AZURE_SAS_TOKEN AZURE_STORAGE_QUEUE AZURE_STORAGE_QUEUE_PRIMARY_URI AZURE_TENANT_ID BASE_LOCATION BASE64 BEFORE BEGIN BERNOULLI BINARY BINARY_AS_TEXT BINARY_FORMAT BINDING BLOCK BLOCKED_IP_LIST BLOCKED_NETWORK_RULE_LIST BODY BOOLEAN BROTLI BZ2 CACHE CALL CALLED CALLER CASCADE CASE CASE_INSENSITIVE CASE_SENSITIVE CASES CATALOG CATALOG_API_TYPE CATALOG_NAME CATALOG_NAMESPACE CATALOG_SOURCE CATALOG_TABLE_NAME CATALOG_URI CHAIN CHANGE_TRACKING CHANGES CHARACTER CLIENT_TYPES CLONE CLUSTER CLUSTERING COLLATE COLUMN COLUMNS COMMENT COMMIT COMPRESSION CONCURRENTLY CONNECT_BY_ROOT CONTEXT_HEADERS CONTINUE COPY COPY_OPTIONS CORTEX CREDENTIALS CREDIT_QUOTA CROSS CSV CUBE CURRENT_ACCOUNT CURRENT_CLIENT CURRENT_DATABASE CURRENT_IP_ADDRESS CURRENT_REGION CURRENT_ROLE CURRENT_SCHEMA CURRENT_SCHEMAS CURRENT_SESSION CURRENT_STATEMENT CURRENT_TRANSACTION CURRENT_USER CURRENT_VERSION CURRENT_WAREHOUSE CURSOR CYCLE DAILY DATA DATA_FORMAT DATA_RETENTION_TIME_IN_DAYS DATABASE DATABASES DATE DATE_FORMAT DAYS_TO_EXPIRY DEBUG DEFAULT DEFAULT_DDL_COLLATION DEFAULT_NAMESPACE DEFAULT_ROLE DEFAULT_SECONDARY_ROLES DEFAULT_WAREHOUSE DEFERRABLE DEFERRED DEFINE DEFLATE DELEGATED DESC DESCRIBE DETAILED_OUTPUT DIRECTION DIRECTORY DISABLE DISABLE_AUTO_CONVERT DISABLE_SNOWFLAKE_DATA DISABLED DISPLAY_NAME DO DOMAIN DOUBLE DOWNSTREAM DYNAMIC ECONOMY EMAIL EMBEDDING_MODEL EMPTY EMPTY_FIELD_AS_NULL ENABLE ENABLE_OCTAL ENABLE_QUERY_ACCELERATION ENABLED ENCODING ENCRYPTION END END_TIMESTAMP ENFORCE_LENGTH ENFORCED ENTITY ENUM ERROR ERROR_INTEGRATION ERROR_ON_COLUMN_COUNT_MISMATCH ESCAPE ESCAPE_UNENCLOSED_FIELD EVENT EXCEPT EXCEPTION EXCHANGE EXCLUDE EXECUTE EXECUTION EXPLAIN EXTENSION EXTERNAL EXTERNAL_ACCESS_INTEGRATIONS EXTERNAL_STAGE EXTERNAL_VOLUME FATAL FETCH FIELD_DELIMITER FIELD_OPTIONALITY_ENCLOSED_BY FIELD_OPTIONALLY_ENCLOSED_BY FILE FILE_EXTENSION FILE_FORMAT FILES FILTER FINAL FIRST FIRST_NAME FOR FORCE FOREIGN FORMAT FORMAT_NAME FORMATS FREQUENCY FUNCTION FUNCTIONS FUTURE GCP_PUBSUB GCP_PUBSUB_SUBSCRIPTION_NAME GCP_PUBSUB_TOPIC_NAME GCS GET GIT_HTTPS_API GLOBAL GLUE GLUE_AWS_ROLE_ARN GLUE_CATALOG_ID GLUE_REGION GOOGLE_AUDIENCE GOOGLE_API_GATEWAY GRANTED GRANTS GROUPING GZIP HANDLER HEADER HEADERS HEX HISTORY ICEBERG ICEBERG_REST IDENTIFIER IDENTITY IF IGNORE IGNORE_UTF8_ERRORS IMMEDIATE IMMEDIATELY IMMUTABLE IMPORT IMPORTED IMPORTS INCLUDE INCLUDE_METADATA INCLUDE_QUERY_ID INDEX INDEXING INFO INFORMATION INITIALIZE INITIALLY INITIALLY_SUSPENDED INPUT INTEGRATION INTEGRATIONS INTERVAL ISSUE JAVA JAVASCRIPT JSON KEY KMS_KEY_ID LANGUAGE LARGE LAST LAST_NAME LAST_QUERY_ID LAST_TRANSACTION LET LIMIT LIST LISTING LOAD_UNCERTAIN_FILES LOCAL LOCATION LOCKS LOG_LEVEL LOGIN_NAME LS LZO M MAIN_FILE MANAGE MANAGED MASKING MASTER_KEY MATCH MATCH_BY_COLUMN_NAME MATCHED MATCHES MATERIALIZED MAX_BATCH_ROWS MAX_CLUSTER_COUNT MAX_CONCURRENCY_LEVEL MAX_DATA_EXTENSION_TIME_IN_DAYS MAX_FILE_SIZE MAX_SIZE MAXVALUE MEASURES MERGE METADATA_FILE_PATH METADATA$FILENAME METADATA$FILE_ROW_NUMBER METADATA$FILE_CONTENT_KEY METADATA$FILE_LAST_MODIFIED METADATA$START_SCAN_TIME MFA_AUTHENTICATION_METHODS MFA_ENROLLMENT MIDDLE_NAME MIN_CLUSTER_COUNT MINS_TO_BYPASS_MFA MINS_TO_UNLOCK MINVALUE ML MODEL MODELS MODIFIED_AFTER MODIFY MONITOR MONITORS MONTHLY MUST_CHANGE_PASSWORD NAME NAN NETWORK NEVER NEXT NEXTVAL NO NOCACHE NOCYCLE NONE NOORDER NORELY NOTEBOOK NOTEBOOKS NOTIFICATION NOTIFICATION_INTEGRATION NOTIFICATION_PROVIDER NOTIFY NOTIFY_USERS NOVALIDATE NULL_IF NULLS OAUTH OAUTH_ALLOWED_SCOPES OAUTH_CLIENT_ID OAUTH_CLIENT_SECRET OAUTH_TOKEN_URI OBJECT OBJECT_STORE OBJECTS OFF OFFSET OMIT ON_ERROR ON_EVENT ONE ONLY OPERATE OPTIMIZATION OPTION OPTIONAL OPTIONS ORC ORGANIZATION OTHER OUTBOUND OUTER OVER OVERLAPS OVERRIDE OVERWRITE OWNER OWNERSHIP PACKAGES PARALLEL PARAMETERS PARQUET PARSE_HEADER PARTIAL PARTITION PARTITION_TYPE PASSWORD PASSWORD_HISTORY PASSWORD_LOCKOUT_TIME_MINS PASSWORD_MAX_AGE_DAYS PASSWORD_MAX_LENGTH PASSWORD_MAX_RETRIES PASSWORD_MIN_AGE_DAYS PASSWORD_MIN_LOWER_CASE_CHARS PASSWORD_MIN_LENGTH PASSWORD_MIN_NUMERIC_CHARS PASSWORD_MIN_SPECIAL_CHARS PASSWORD_MIN_UPPER_CASE_CHARS PAST PATTERN PER PERCENT PERMUTE PIPE PIPE_EXECUTION_PAUSED PIPES PIVOT POLARIS POLICIES POLICY PRECEDING PRECISION PREFIX PRESERVE_SPACE PRIMARY PRIOR PRIVILEGES PROCEDURE PROCEDURES PUBLIC PURGE PUT PYTHON QUERIES QUERY_ACCELERATION_MAX_SCALE_FACTOR QUERY_WAREHOUSE QUEUE RANGE RAW_DEFLATE READ RECLUSTER RECORD_DELIMITER RECURSIVE REFERENCE_USAGE REFERENCES REFRESH REFRESH_INTERVAL_SECONDS REFRESH_MODE REFRESH_ON_CREATE REGIONS RELY REMOVE RENAME REPEATABLE REPLACE REPLACE_INVALID_CHARACTERS REPLICATION REQUEST_TRANSLATOR REQUIRE REQUIRED RESET RESOURCE RESOURCE_MONITOR RESPECT RESPONSE_TRANSLATOR RESTRICT RESTRICTIONS REST_AUTHENTICATION REST_CONFIG RESULT RESULTSET RESUME RETURN RETURN_ALL_ERRORS RETURN_ERRORS RETURN_FAILED_ONLY RETURNS RM ROLE ROLES ROLLBACK ROLLUP ROOT_LOCATION ROUTINE ROUTINES ROW RSA_PUBLIC_KEY RSA_PUBLIC_KEY_2 RUNNING RUNTIME_VERSION S3 SCALA SCALING_POLICY SCHEDULE SCHEMA SCHEMAS SEARCH SECONDARY SECRETS SECURE SECURITY SECURITY_INTEGRATIONS SEED SEPARATOR SEQUENCE SEQUENCES SERVER SERVICE SERVICES SERVING SESSION SESSION_USER SETS SHARE SHARE_RESTRICTIONS SHARES SHOW SHOW_INITIAL_ROWS SIGV4 SIGV4_EXTERNAL_ID SIGV4_IAM_ROLE SIGV4_SIGNING_REGION SIMPLE SINGLE SIZE_LIMIT SKIP SKIP_BLANK_LINES SKIP_BYTE_ORDER_MARK SKIP_FILE SKIP_HEADER SNAPPY SNAPPY_COMPRESSION SNOWFLAKE_FULL SNOWFLAKE_SSE SOURCE_COMPRESSION SQL STAGE STAGE_COPY_OPTIONS STAGE_FILE_FORMAT STAGES STANDARD START_TIMESTAMP STARTS STATEMENT STATEMENT_QUEUED_TIMEOUT_IN_SECONDS STATEMENT_TIMEOUT_IN_SECONDS STORAGE STORAGE_ALLOWED_LOCATIONS STORAGE_AWS_EXTERNAL_ID STORAGE_AWS_OBJECT_ACL STORAGE_AWS_ROLE_ARN STORAGE_BASE_URL STORAGE_BLOCKED_LOCATIONS STORAGE_INTEGRATION STORAGE_LOCATION STORAGE_LOCATIONS STORAGE_PROVIDER STREAM STREAMLIT STREAMLITS STREAMS STRIP_NULL_VALUES STRIP_OUTER_ARRAY STRIP_OUTER_ELEMENT SUBPATH SUPPORT SUSPEND SUSPEND_IMMEDIATE SUSPENDED SWAP SYSDATE SYSTEM TABLES TABLESPACE TABLE_FORMAT TABULAR TAG TARGET_LAG TARGET_PATH TASK TASKS TEMP TEMPLATE TEMPORARY TERSE TEXT TIME TIME_FORMAT TIMESTAMP TIMESTAMP_FORMAT TITLE TOP TRACE TRACE_LEVEL TRANSACTION TRANSACTIONS TRANSIENT TRIGGERS TRIM_SPACE TRUNCATE TRUNCATECOLUMNS TYPE UNBOUNDED UNDROP UNMATCHED UNSET UNSIGNED URL US USAGE USE USE_ANY_ROLE USE_LOGICAL_TYPE USE_PRIVATELINK_ENDPOINT USE_VECTORIZED_SCANNER USER USER_SPECIFIED USER_TASK_MANAGED_INITIAL_WAREHOUSE_SIZE USER_TASK_TIMEOUT_MS USERS UTF8 VALIDATE VALIDATE_UTF8 VALIDATION_MODE VALUE VARIABLES VARIANT VARYING VERSION VIEW VIEWS VOLATILE VOLUME VOLUMES WAIT_FOR_COMPLETION WAREHOUSE WAREHOUSE_SIZE WAREHOUSE_TYPE WAREHOUSES WARN WEEKLY WINDOW WITH WITHIN WITHOUT WORK WOY WRAPPER WRITE XML YEARLY ZONE ZSTD """ sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_soql.py000066400000000000000000000067761503426445100226040ustar00rootroot00000000000000"""The SOQL dialect. https://developer.salesforce.com/docs/atlas.en-us.soql_sosl.meta/soql_sosl/sforce_api_calls_soql.htm """ from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( BaseSegment, CodeSegment, LiteralSegment, OneOf, Ref, RegexLexer, Sequence, TypedParser, ) from sqlfluff.dialects import dialect_ansi as ansi ansi_dialect = load_raw_dialect("ansi") soql_dialect = ansi_dialect.copy_as( "soql", formatted_name="Salesforce Object Query Language (SOQL)", docstring=( "The dialect for `SOQL `_ " "(Salesforce Object Query Language)." ), ) soql_dialect.insert_lexer_matchers( [ # Date and datetime literals as per: # https://developer.salesforce.com/docs/atlas.en-us.soql_sosl.meta/soql_sosl/sforce_api_calls_soql_select_dateformats.htm RegexLexer( "datetime_literal", r"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}(Z|(\+|\-)[0-9]{2}:[0-9]{2})", # noqa E501 CodeSegment, ), RegexLexer( "date_literal", r"[0-9]{4}-[0-9]{2}-[0-9]{2}", CodeSegment, ), ], before="numeric_literal", ) date_literals = { "YESTERDAY", "TODAY", "TOMORROW", "LAST_WEEK", "THIS_WEEK", "NEXT_WEEK", "LAST_MONTH", "THIS_MONTH", "NEXT_MONTH", "LAST_90_DAYS", "NEXT_90_DAYS", "THIS_QUARTER", "LAST_QUARTER", "NEXT_QUARTER", "THIS_YEAR", "LAST_YEAR", "NEXT_YEAR", "THIS_FISCAL_QUARTER", "LAST_FISCAL_QUARTER", "NEXT_FISCAL_QUARTER", "THIS_FISCAL_YEAR", "LAST_FISCAL_YEAR", "NEXT_FISCAL_YEAR", } date_n_literals = { "LAST_N_DAYS", "NEXT_N_DAYS", "LAST_N_WEEKS", "NEXT_N_WEEKS", "LAST_N_MONTHS", "NEXT_N_MONTHS", "LAST_N_QUARTERS", "NEXT_N_QUARTERS", "LAST_N_YEARS", "NEXT_N_YEARS", "LAST_N_FISCAL_QUARTERS", "NEXT_N_FISCAL_QUARTERS", "LAST_N_FISCAL_YEARS", "NEXT_N_FISCAL_YEARS", } soql_dialect.sets("reserved_keywords").update(date_literals | date_n_literals) soql_dialect.sets("bare_functions").update(date_literals) class DateLiteralNSegment(BaseSegment): """A Date literal keyword that takes the :n integer suffix. https://developer.salesforce.com/docs/atlas.en-us.soql_sosl.meta/soql_sosl/sforce_api_calls_soql_select_dateformats.htm """ type = "date_n_literal" match_grammar = Sequence( OneOf(*date_n_literals), Ref("ColonSegment"), Ref("NumericLiteralSegment"), allow_gaps=False, ) soql_dialect.replace( Expression_C_Grammar=ansi_dialect.get_grammar("Expression_C_Grammar").copy( insert=[ Ref("DateLiteralNSegment"), ] ), DateTimeLiteralGrammar=OneOf( TypedParser("date_literal", LiteralSegment, type="date_literal"), TypedParser("datetime_literal", LiteralSegment, type="datetime_literal"), Sequence( OneOf("DATE", "TIME", "TIMESTAMP", "INTERVAL"), TypedParser( "single_quote", LiteralSegment, type="date_constructor_literal" ), ), ), ) class StatementSegment(ansi.StatementSegment): """SOQL seems to only support SELECT statements. https://developer.salesforce.com/docs/atlas.en-us.soql_sosl.meta/soql_sosl/sforce_api_calls_soql.htm """ match_grammar = Ref("SelectableGrammar") sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_sparksql.py000066400000000000000000003254671503426445100234670ustar00rootroot00000000000000"""The ANSI Compliant SparkSQL dialect. Inherits from ANSI. Spark SQL ANSI Mode is more restrictive regarding keywords than the Default Mode, and still shares some syntax with hive. Based on: https://spark.apache.org/docs/latest/sql-ref.html https://spark.apache.org/docs/latest/sql-ref-ansi-compliance.html https://github.com/apache/spark/blob/master/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBase.g4 """ from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( AnyNumberOf, AnySetOf, Anything, BaseSegment, Bracketed, BracketedSegment, CodeSegment, CommentSegment, ComparisonOperatorSegment, Conditional, Dedent, Delimited, IdentifierSegment, ImplicitIndent, Indent, KeywordSegment, LiteralSegment, Matchable, MultiStringParser, OneOf, OptionallyBracketed, ParseMode, Ref, RegexLexer, RegexParser, Sequence, StringLexer, StringParser, SymbolSegment, TypedParser, WordSegment, ) from sqlfluff.dialects import dialect_ansi as ansi from sqlfluff.dialects import dialect_hive as hive from sqlfluff.dialects.dialect_sparksql_keywords import ( RESERVED_KEYWORDS, UNRESERVED_KEYWORDS, ) ansi_dialect = load_raw_dialect("ansi") hive_dialect = load_raw_dialect("hive") sparksql_dialect = ansi_dialect.copy_as( "sparksql", formatted_name="Apache Spark SQL", docstring="""**Default Casing**: SparkSQL is case insensitive with both quoted and unquoted identifiers (_"delimited"_ identifiers in Spark terminology). See the `Spark Identifiers`_ docs. **Quotes**: String Literals: ``''`` or ``""``, Identifiers: |back_quotes|. The dialect for Apache `Spark SQL`_. This includes relevant syntax from :ref:`hive_dialect_ref` for commands that permit Hive Format. Spark SQL extensions provided by the `Delta Lake`_ project are also implemented in this dialect. This implementation focuses on the `Ansi Compliant Mode`_ introduced in Spark3, instead of being Hive Compliant. The introduction of ANSI Compliance provides better data quality and easier migration from traditional DBMS. Versions of Spark prior to 3.x will only support the Hive dialect. .. _`Spark SQL`: https://spark.apache.org/docs/latest/sql-ref.html .. _`Delta Lake`: https://docs.delta.io/latest/quick-start.html#set-up-apache-spark-with-delta-lake .. _`Ansi Compliant Mode`: https://spark.apache.org/docs/latest/sql-ref-ansi-compliance.html .. _`Spark Identifiers`: https://spark.apache.org/docs/latest/sql-ref-identifier.html""", # noqa: E501 ) sparksql_dialect.patch_lexer_matchers( [ # Spark SQL, only -- is used for single-line comment RegexLexer( "inline_comment", r"(--)[^\n]*", CommentSegment, segment_kwargs={"trim_start": "--"}, ), # == and <=> are valid equal operations # <=> is a non-null equals in Spark SQL # https://spark.apache.org/docs/latest/api/sql/index.html#_10 RegexLexer("equals", r"==|<=>|=", CodeSegment), # identifiers are delimited with ` # within a delimited identifier, ` is used to escape special characters, # including ` # Ex: select `delimited `` with escaped` from `just delimited` # https://spark.apache.org/docs/latest/sql-ref-identifier.html#delimited-identifier RegexLexer( "back_quote", r"`([^`]|``)*`", CodeSegment, segment_kwargs={ "quoted_value": (r"`((?:[^`]|``)*)`", 1), "escape_replacements": [(r"``", "`")], }, ), # Numeric literal matches integers, decimals, and exponential formats. # https://spark.apache.org/docs/latest/sql-ref-literals.html#numeric-literal # Pattern breakdown: # (?> Atomic grouping # (https://www.regular-expressions.info/atomic.html). # 3 distinct groups here: # 1. Obvious fractional types # (can optionally be exponential). # 2. Integer followed by exponential. # These must be fractional types. # 3. Integer only. # These can either be integral or # fractional types. # # (?> 1. # \d+\.\d+ e.g. 123.456 # |\d+\. e.g. 123. # |\.\d+ e.g. .123 # ) # ([eE][+-]?\d+)? Optional exponential. # ([dDfF]|BD|bd)? Fractional data types. # |\d+[eE][+-]?\d+([dDfF]|BD|bd)? 2. Integer + exponential with # fractional data types. # |\d+([dDfFlLsSyY]|BD|bd)? 3. Integer only with integral or # fractional data types. # ) # ( # (?<=\.) If matched character ends with . # (e.g. 123.) then don't worry about # word boundary check. # |(?=\b) Check that we are at word boundary to # avoid matching valid naked identifiers # (e.g. 123column). # ) RegexLexer( "numeric_literal", ( r"(?>(?>\d+\.\d+|\d+\.|\.\d+)([eE][+-]?\d+)?([dDfF]|BD|bd)?" r"|\d+[eE][+-]?\d+([dDfF]|BD|bd)?" r"|\d+([dDfFlLsSyY]|BD|bd)?)" r"((?<=\.)|(?=\b))" ), CodeSegment, ), ] ) sparksql_dialect.insert_lexer_matchers( [ RegexLexer( "raw_single_quote", r"[rR]'([^'\\]|\\.)*'", CodeSegment, ), RegexLexer( "raw_double_quote", r'[rR]"([^"\\]|\\.)*"', CodeSegment, ), RegexLexer( "bytes_single_quote", r"X'([^'\\]|\\.)*'", CodeSegment, ), RegexLexer( "bytes_double_quote", r'X"([^"\\]|\\.)*"', CodeSegment, ), ], before="single_quote", ) sparksql_dialect.insert_lexer_matchers( [ RegexLexer( "at_sign_literal", r"@\w*", CodeSegment, ), ], before="word", ) sparksql_dialect.insert_lexer_matchers( [ RegexLexer( "file_literal", ( r"[a-zA-Z0-9]+:([a-zA-Z0-9\-_\.]*(\/|\\)){2,}" r"((([a-zA-Z0-9\-_\.]*(:|\?|=|&)[a-zA-Z0-9\-_\.]*)+)" r"|([a-zA-Z0-9\-_\.]*\.[a-z]+))" ), CodeSegment, ), ], before="newline", ) # Set the bare functions sparksql_dialect.sets("bare_functions").clear() sparksql_dialect.sets("bare_functions").update( [ "CURRENT_DATE", "CURRENT_TIMESTAMP", "CURRENT_USER", ] ) # Set the date part functions sparksql_dialect.sets("date_part_function_name").clear() sparksql_dialect.sets("date_part_function_name").update( [ "DATE_ADD", "DATE_DIFF", "DATEADD", "DATEDIFF", "TIMESTAMPADD", "TIMESTAMPDIFF", ] ) # Set the datetime units sparksql_dialect.sets("datetime_units").clear() sparksql_dialect.sets("datetime_units").update( [ "YEAR", "YEARS", "YYYY", "YY", "QUARTER", "QUARTERS", "MONTH", "MONTHS", "MON", "MM", "WEEK", "WEEKS", "DAY", "DAYS", "DD", "DAYOFYEAR", "HOUR", "HOURS", "MINUTE", "MINUTES", "SECOND", "SECONDS", "MILLISECOND", "MILLISECONDS", "MICROSECOND", "MICROSECONDS", ] ) # Set Keywords sparksql_dialect.sets("unreserved_keywords").update(UNRESERVED_KEYWORDS) sparksql_dialect.sets("reserved_keywords").update(RESERVED_KEYWORDS) # Set Angle Bracket Pairs sparksql_dialect.bracket_sets("angle_bracket_pairs").update( [ ("angle", "StartAngleBracketSegment", "EndAngleBracketSegment", False), ] ) # Real Segments sparksql_dialect.replace( DateTimeLiteralGrammar=Sequence( OneOf( "DATE", "TIME", "TIMESTAMP", "INTERVAL", "TIMESTAMP_LTZ", "TIMESTAMP_NTZ" ), TypedParser("single_quote", LiteralSegment, type="date_constructor_literal"), ), ComparisonOperatorGrammar=OneOf( Ref("EqualsSegment"), Ref("EqualsSegment_a"), Ref("EqualsSegment_b"), Ref("GreaterThanSegment"), Ref("LessThanSegment"), Ref("GreaterThanOrEqualToSegment"), Ref("LessThanOrEqualToSegment"), Ref("NotEqualToSegment"), Ref("LikeOperatorSegment"), Sequence("IS", "DISTINCT", "FROM"), Sequence("IS", "NOT", "DISTINCT", "FROM"), ), SelectClauseTerminatorGrammar=ansi_dialect.get_grammar( "SelectClauseTerminatorGrammar" ).copy( insert=[ Sequence("CLUSTER", "BY"), Sequence("DISTRIBUTE", "BY"), Sequence("SORT", "BY"), Ref.keyword("QUALIFY"), ] ), FromClauseTerminatorGrammar=OneOf( "WHERE", "LIMIT", Sequence("GROUP", "BY"), Sequence("ORDER", "BY"), Sequence("CLUSTER", "BY"), Sequence("DISTRIBUTE", "BY"), Sequence("SORT", "BY"), "HAVING", "QUALIFY", Ref("SetOperatorSegment"), Ref("WithNoSchemaBindingClauseSegment"), Ref("WithDataClauseSegment"), "KEYS", ), TemporaryGrammar=Sequence( Sequence("GLOBAL", optional=True), OneOf("TEMP", "TEMPORARY"), ), QuotedLiteralSegment=OneOf( TypedParser("single_quote", LiteralSegment, type="quoted_literal"), TypedParser("double_quote", LiteralSegment, type="quoted_literal"), ), LiteralGrammar=ansi_dialect.get_grammar("LiteralGrammar").copy( insert=[ Ref("RawQuotedLiteralSegment"), Ref("BytesQuotedLiteralSegment"), ] ), NaturalJoinKeywordsGrammar=Sequence( "NATURAL", Ref("JoinTypeKeywords", optional=True), ), JoinLikeClauseGrammar=Sequence( OneOf( Ref("PivotClauseSegment"), Ref("UnpivotClauseSegment"), Ref("LateralViewClauseSegment"), ), Ref( "AliasExpressionSegment", exclude=OneOf( Ref("FromClauseTerminatorGrammar"), Ref("JoinLikeClauseGrammar"), ), optional=True, ), ), LikeGrammar=OneOf( # https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-like.html # ilike: https://github.com/apache/spark/pull/33966/files Sequence( OneOf("LIKE", "ILIKE"), OneOf( "ALL", "ANY", # `SOME` is equivalent to `ANY` "SOME", optional=True, ), ), "RLIKE", "REGEXP", ), NotOperatorGrammar=OneOf( StringParser("NOT", KeywordSegment, type="keyword"), StringParser("!", CodeSegment, type="not_operator"), ), SingleIdentifierGrammar=OneOf( Ref("NakedIdentifierSegment"), Ref("QuotedIdentifierSegment"), Ref("SingleQuotedIdentifierSegment"), Ref("BackQuotedIdentifierSegment"), ), WhereClauseTerminatorGrammar=OneOf( "LIMIT", Sequence( OneOf( "CLUSTER", "DISTRIBUTE", "GROUP", "ORDER", "SORT", ), "BY", ), Sequence("ORDER", "BY"), Sequence("DISTRIBUTE", "BY"), "HAVING", "QUALIFY", "WINDOW", "OVERLAPS", "APPLY", ), GroupByClauseTerminatorGrammar=OneOf( Sequence( OneOf( "ORDER", "DISTRIBUTE", "CLUSTER", "SORT", ), "BY", ), "LIMIT", "HAVING", "WINDOW", ), HavingClauseTerminatorGrammar=OneOf( Sequence( OneOf( "ORDER", "CLUSTER", "DISTRIBUTE", "SORT", ), "BY", ), "LIMIT", "QUALIFY", "WINDOW", ), ArithmeticBinaryOperatorGrammar=OneOf( Ref("PlusSegment"), Ref("MinusSegment"), Ref("DivideSegment"), Ref("MultiplySegment"), Ref("ModuloSegment"), Ref("BitwiseAndSegment"), Ref("BitwiseOrSegment"), Ref("BitwiseXorSegment"), Ref("BitwiseLShiftSegment"), Ref("BitwiseRShiftSegment"), Ref("DivBinaryOperatorSegment"), ), BinaryOperatorGrammar=OneOf( Ref("ArithmeticBinaryOperatorGrammar"), Ref("StringBinaryOperatorGrammar"), Ref("BooleanBinaryOperatorGrammar"), Ref("ComparisonOperatorGrammar"), # Add arrow operators for lambdas (e.g. aggregate) Ref("RightArrowOperator"), ), AccessorGrammar=AnyNumberOf( Ref("ArrayAccessorSegment"), # Add in semi structured expressions Ref("SemiStructuredAccessorSegment"), ), ObjectReferenceTerminatorGrammar=OneOf( "ON", "AS", "USING", Ref("CommaSegment"), Ref("CastOperatorSegment"), Ref("StartSquareBracketSegment"), Ref("StartBracketSegment"), Ref("BinaryOperatorGrammar"), Ref("DelimiterGrammar"), Ref("JoinLikeClauseGrammar"), BracketedSegment, ), FunctionContentsExpressionGrammar=OneOf( Ref("ExpressionSegment"), Ref("StarSegment"), ), NonWithNonSelectableGrammar=ansi_dialect.get_grammar( "NonWithNonSelectableGrammar" ).copy(insert=[Ref("InsertOverwriteDirectorySegment")]), ) sparksql_dialect.add( BackQuotedIdentifierSegment=TypedParser( "back_quote", IdentifierSegment, type="quoted_identifier", trim_chars=("`",), # match ANSI's naked identifier casefold, sparksql is case-insensitive. casefold=str.upper, ), NakedSemiStructuredElementSegment=RegexParser( r"[A-Z0-9_]*", CodeSegment, type="semi_structured_element", ), QuotedSemiStructuredElementSegment=TypedParser( "single_quote", CodeSegment, type="semi_structured_element", ), RightArrowOperator=StringParser("->", SymbolSegment, type="binary_operator"), BinaryfileKeywordSegment=StringParser( "BINARYFILE", KeywordSegment, type="file_format", ), JsonfileKeywordSegment=StringParser( "JSONFILE", KeywordSegment, type="file_format", ), RcfileKeywordSegment=StringParser("RCFILE", KeywordSegment, type="file_format"), SequencefileKeywordSegment=StringParser( "SEQUENCEFILE", KeywordSegment, type="file_format" ), TextfileKeywordSegment=StringParser("TEXTFILE", KeywordSegment, type="file_format"), StartAngleBracketSegment=StringParser( "<", SymbolSegment, type="start_angle_bracket" ), EndAngleBracketSegment=StringParser(">", SymbolSegment, type="end_angle_bracket"), EqualsSegment_a=StringParser("==", ComparisonOperatorSegment), EqualsSegment_b=StringParser("<=>", ComparisonOperatorSegment), FileKeywordSegment=MultiStringParser( ["FILE", "FILES"], KeywordSegment, type="file_keyword" ), JarKeywordSegment=MultiStringParser( ["JAR", "JARS"], KeywordSegment, type="file_keyword" ), NoscanKeywordSegment=StringParser("NOSCAN", KeywordSegment, type="keyword"), WhlKeywordSegment=StringParser("WHL", KeywordSegment, type="file_keyword"), # Add relevant Hive Grammar CommentGrammar=hive_dialect.get_grammar("CommentGrammar"), LocationGrammar=hive_dialect.get_grammar("LocationGrammar"), SerdePropertiesGrammar=hive_dialect.get_grammar("SerdePropertiesGrammar"), StoredAsGrammar=hive_dialect.get_grammar("StoredAsGrammar"), StoredByGrammar=hive_dialect.get_grammar("StoredByGrammar"), StorageFormatGrammar=hive_dialect.get_grammar("StorageFormatGrammar"), TerminatedByGrammar=hive_dialect.get_grammar("TerminatedByGrammar"), # Add Spark Grammar PropertyGrammar=Sequence( Ref("PropertyNameSegment"), Ref("EqualsSegment", optional=True), OneOf( Ref("LiteralGrammar"), # when property value is Java Class Name Delimited( Ref("PropertiesNakedIdentifierSegment"), delimiter=Ref("DotSegment"), ), ), ), PropertyNameListGrammar=Delimited(Ref("PropertyNameSegment")), BracketedPropertyNameListGrammar=Bracketed(Ref("PropertyNameListGrammar")), PropertyListGrammar=Delimited(Ref("PropertyGrammar")), BracketedPropertyListGrammar=Bracketed(Ref("PropertyListGrammar")), OptionsGrammar=Sequence("OPTIONS", Ref("BracketedPropertyListGrammar")), BucketSpecGrammar=Sequence( Ref("ClusteredBySpecGrammar"), Ref("SortedBySpecGrammar", optional=True), "INTO", Ref("NumericLiteralSegment"), "BUCKETS", ), ClusteredBySpecGrammar=Sequence( "CLUSTERED", "BY", Ref("BracketedColumnReferenceListGrammar"), ), DatabasePropertiesGrammar=Sequence( "DBPROPERTIES", Ref("BracketedPropertyListGrammar") ), DataSourcesV2FileTypeGrammar=OneOf( # https://github.com/apache/spark/tree/master/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2 # noqa: E501 # Separated here because these allow for additional # commands such as Select From File # https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-file.html # Spark Core Data Sources # https://spark.apache.org/docs/latest/sql-data-sources.html "AVRO", "CSV", "JSON", "PARQUET", "ORC", # Separated here because these allow for additional commands # Similar to DataSourcesV2 "DELTA", # https://github.com/delta-io/delta "CSV", "ICEBERG", "TEXT", "BINARYFILE", ), DescribeObjectGrammar=OneOf( Sequence( OneOf("DATABASE", "SCHEMA"), Ref.keyword("EXTENDED", optional=True), Ref("DatabaseReferenceSegment"), ), Sequence( "FUNCTION", Ref.keyword("EXTENDED", optional=True), Ref("FunctionNameSegment"), ), Sequence( Ref.keyword("TABLE", optional=True), Ref.keyword("EXTENDED", optional=True), Ref("TableReferenceSegment"), Ref("PartitionSpecGrammar", optional=True), # can be fully qualified column after table is listed # [database.][table.][column] Sequence( Ref("SingleIdentifierGrammar"), AnyNumberOf( Sequence( Ref("DotSegment"), Ref("SingleIdentifierGrammar"), allow_gaps=False, ), max_times=2, allow_gaps=False, ), optional=True, allow_gaps=False, ), ), Sequence( Ref.keyword("QUERY", optional=True), OneOf( Sequence( "TABLE", Ref("TableReferenceSegment"), ), Sequence( "FROM", Ref("TableReferenceSegment"), "SELECT", Delimited( Ref("ColumnReferenceSegment"), ), Ref("WhereClauseSegment", optional=True), Ref("GroupByClauseSegment", optional=True), Ref("OrderByClauseSegment", optional=True), Ref("LimitClauseSegment", optional=True), ), Ref("StatementSegment"), ), ), exclude=OneOf( Ref.keyword("HISTORY"), Ref.keyword("DETAIL"), ), ), FileFormatGrammar=OneOf( Ref("DataSourcesV2FileTypeGrammar"), "SEQUENCEFILE", "TEXTFILE", "RCFILE", "JSONFILE", Sequence( "INPUTFORMAT", Ref("QuotedLiteralSegment"), "OUTPUTFORMAT", Ref("QuotedLiteralSegment"), ), ), TimestampAsOfGrammar=Sequence( "TIMESTAMP", "AS", "OF", OneOf( Ref("QuotedLiteralSegment"), Ref("BareFunctionSegment"), Ref("FunctionSegment"), ), ), VersionAsOfGrammar=Sequence( "VERSION", "AS", "OF", Ref("NumericLiteralSegment"), ), # Adding Hint related segments so they are not treated as generic comments # https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-hints.html StartHintSegment=StringParser("/*+", SymbolSegment, type="start_hint"), EndHintSegment=StringParser("*/", SymbolSegment, type="end_hint"), PartitionSpecGrammar=Sequence( OneOf( "PARTITION", Sequence("PARTITIONED", "BY"), ), Bracketed( Delimited( OneOf( Ref("ColumnDefinitionSegment"), Sequence( Ref("ColumnReferenceSegment"), Ref("EqualsSegment", optional=True), Ref("LiteralGrammar", optional=True), Ref("CommentGrammar", optional=True), ), Ref("IcebergTransformationSegment", optional=True), ), ), ), ), PartitionFieldGrammar=Sequence( "PARTITION", "FIELD", Delimited( OneOf( Ref("ColumnDefinitionSegment"), Sequence( Ref("ColumnReferenceSegment"), Ref("EqualsSegment", optional=True), Ref("LiteralGrammar", optional=True), Ref("CommentGrammar", optional=True), ), Ref("IcebergTransformationSegment", optional=True), ), ), Sequence( Ref.keyword("WITH", optional=True), Delimited( OneOf( Ref("ColumnDefinitionSegment"), Sequence( Ref("ColumnReferenceSegment"), Ref("EqualsSegment", optional=True), Ref("LiteralGrammar", optional=True), Ref("CommentGrammar", optional=True), ), Ref("IcebergTransformationSegment", optional=True), ), ), optional=True, ), Sequence("AS", Ref("NakedIdentifierSegment"), optional=True), ), # NB: Redefined from `NakedIdentifierSegment` which uses an anti-template to # not match keywords; however, SparkSQL allows keywords to be used in table # and runtime properties. PropertiesNakedIdentifierSegment=RegexParser( r"[A-Z0-9]*[A-Z][A-Z0-9]*", IdentifierSegment, type="properties_naked_identifier", ), ResourceFileGrammar=OneOf( Ref("JarKeywordSegment"), Ref("WhlKeywordSegment"), Ref("FileKeywordSegment"), ), ResourceLocationGrammar=Sequence( "USING", Ref("ResourceFileGrammar"), Ref("QuotedLiteralSegment"), ), SortedBySpecGrammar=Sequence( "SORTED", "BY", Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), OneOf("ASC", "DESC", optional=True), ) ) ), optional=True, ), UnsetTablePropertiesGrammar=Sequence( "UNSET", "TBLPROPERTIES", Ref("IfExistsGrammar", optional=True), Ref("BracketedPropertyNameListGrammar"), ), TablePropertiesGrammar=Sequence( "TBLPROPERTIES", Ref("BracketedPropertyListGrammar") ), RawQuotedLiteralSegment=OneOf( TypedParser( "raw_single_quote", LiteralSegment, type="raw_quoted_literal", ), TypedParser( "raw_double_quote", LiteralSegment, type="raw_quoted_literal", ), ), BytesQuotedLiteralSegment=OneOf( TypedParser( "bytes_single_quote", LiteralSegment, type="bytes_quoted_literal", ), TypedParser( "bytes_double_quote", LiteralSegment, type="bytes_quoted_literal", ), ), JoinTypeKeywords=OneOf( "CROSS", "INNER", Sequence( OneOf( "FULL", "LEFT", "RIGHT", ), Ref.keyword("OUTER", optional=True), ), Sequence( Ref.keyword("LEFT", optional=True), "SEMI", ), Sequence( Ref.keyword("LEFT", optional=True), "ANTI", ), ), AtSignLiteralSegment=TypedParser( "at_sign_literal", LiteralSegment, type="at_sign_literal", trim_chars=("@",), ), # This is the same as QuotedLiteralSegment but # is given a different `name` to stop LT01 flagging # TODO: Work out how the LT01 change influence this. SignedQuotedLiteralSegment=OneOf( TypedParser( "single_quote", LiteralSegment, type="signed_quoted_literal", ), TypedParser( "double_quote", LiteralSegment, type="signed_quoted_literal", ), ), # Delta Live Tables CREATE TABLE and VIEW statements OrRefreshGrammar=Sequence("OR", "REFRESH"), # Databricks widget WidgetNameIdentifierSegment=RegexParser( r"[A-Z][A-Z0-9_]*", CodeSegment, type="widget_name_identifier", ), WidgetDefaultGrammar=Sequence( "DEFAULT", Ref("QuotedLiteralSegment"), ), TableDefinitionSegment=Sequence( OneOf(Ref("OrReplaceGrammar"), Ref("OrRefreshGrammar"), optional=True), Ref("TemporaryGrammar", optional=True), Ref.keyword("EXTERNAL", optional=True), Ref.keyword("STREAMING", optional=True), Ref.keyword("LIVE", optional=True), "TABLE", Ref("IfNotExistsGrammar", optional=True), OneOf( Ref("FileReferenceSegment"), Ref("TableReferenceSegment"), ), OneOf( # Columns and comment syntax: Bracketed( Delimited( Sequence( OneOf( Ref("ColumnFieldDefinitionSegment"), Ref("GeneratedColumnDefinitionSegment"), Ref("TableConstraintSegment", optional=True), ), Ref("CommentGrammar", optional=True), ), Ref("ConstraintStatementSegment", optional=True), ), ), # Like Syntax Sequence( "LIKE", OneOf( Ref("FileReferenceSegment"), Ref("TableReferenceSegment"), ), ), optional=True, ), Ref("UsingClauseSegment", optional=True), AnySetOf( Ref("RowFormatClauseSegment"), Ref("StoredAsGrammar"), Ref("CommentGrammar"), Ref("OptionsGrammar"), Ref("PartitionSpecGrammar"), Ref("BucketSpecGrammar"), Ref("LocationGrammar"), Ref("CommentGrammar"), Ref("TablePropertiesGrammar"), Sequence("CLUSTER", "BY", Ref("BracketedColumnReferenceListGrammar")), optional=True, ), # Create AS syntax: Sequence( Ref.keyword("AS", optional=True), OptionallyBracketed(Ref("SelectableGrammar")), optional=True, ), ), FirstOrAfterGrammar=Sequence( OneOf( "FIRST", Sequence( "AFTER", Ref("ColumnReferenceSegment"), ), ), ), ShowDatabasesSchemasGrammar=Sequence( # SHOW { DATABASES | SCHEMAS } OneOf("DATABASES", "SCHEMAS"), Sequence( "LIKE", Ref("QuotedLiteralSegment"), optional=True, ), ), ShowFunctionsGrammar=Sequence( # SHOW FUNCTIONS OneOf("USER", "SYSTEM", "ALL", optional=True), "FUNCTIONS", OneOf( # qualified function from a database Sequence( Ref("DatabaseReferenceSegment"), Ref("DotSegment"), Ref("FunctionNameSegment"), allow_gaps=False, optional=True, ), # non-qualified function Ref("FunctionNameSegment", optional=True), Sequence( "LIKE", Ref("QuotedLiteralSegment"), optional=True, ), ), ), ShowTablesGrammar=Sequence( # SHOW TABLES "TABLES", Sequence( OneOf("FROM", "IN"), Ref("DatabaseReferenceSegment"), optional=True, ), Sequence( "LIKE", Ref("QuotedLiteralSegment"), optional=True, ), ), ShowViewsGrammar=Sequence( # SHOW VIEWS "VIEWS", Sequence( OneOf("FROM", "IN"), Ref("DatabaseReferenceSegment"), optional=True, ), Sequence( "LIKE", Ref("QuotedLiteralSegment"), optional=True, ), ), ShowObjectGrammar=OneOf( # SHOW CREATE TABLE Sequence( "CREATE", "TABLE", Ref("TableExpressionSegment"), Sequence( "AS", "SERDE", optional=True, ), ), # SHOW COLUMNS Sequence( "COLUMNS", "IN", Ref("TableExpressionSegment"), Sequence( "IN", Ref("DatabaseReferenceSegment"), optional=True, ), ), # SHOW PARTITIONS Sequence( "PARTITIONS", Ref("TableReferenceSegment"), Ref("PartitionSpecGrammar", optional=True), ), # SHOW TABLE EXTENDED Sequence( "TABLE", "EXTENDED", Sequence( OneOf("FROM", "IN"), Ref("DatabaseReferenceSegment"), optional=True, ), "LIKE", Ref("QuotedLiteralSegment"), Ref("PartitionSpecGrammar", optional=True), ), # SHOW TBLPROPERTIES Sequence( "TBLPROPERTIES", Ref("TableReferenceSegment"), Ref("BracketedPropertyNameListGrammar", optional=True), ), Ref("ShowDatabasesSchemasGrammar"), Ref("ShowFunctionsGrammar"), Ref("ShowTablesGrammar"), Ref("ShowViewsGrammar"), ), ) # Adding Hint related grammar before comment `block_comment` and # `single_quote` so they are applied before comment lexer so # hints are treated as such instead of comments when parsing. # https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-hints.html sparksql_dialect.insert_lexer_matchers( [ StringLexer("start_hint", "/*+", CodeSegment), ], before="block_comment", ) sparksql_dialect.insert_lexer_matchers( [ StringLexer("end_hint", "*/", CodeSegment), ], before="single_quote", ) sparksql_dialect.insert_lexer_matchers( # Lambda expressions: # https://github.com/apache/spark/blob/b4c019627b676edf850c00bb070377896b66fad2/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBaseLexer.g4#L396 # https://github.com/apache/spark/blob/b4c019627b676edf850c00bb070377896b66fad2/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBaseParser.g4#L837-L838 [ StringLexer("right_arrow", "->", CodeSegment), ], before="like_operator", ) class SQLConfPropertiesSegment(BaseSegment): """A SQL Config Option.""" type = "sql_conf_option" match_grammar = Sequence( StringParser("-", SymbolSegment, type="dash"), StringParser("v", SymbolSegment, type="sql_conf_option"), allow_gaps=False, ) class DivBinaryOperatorSegment(BaseSegment): """DIV type binary_operator.""" type = "binary_operator" match_grammar = Ref.keyword("DIV") class QualifyClauseSegment(BaseSegment): """A `QUALIFY` clause like in `SELECT`.""" type = "qualify_clause" match_grammar = Sequence( "QUALIFY", ImplicitIndent, OptionallyBracketed(Ref("ExpressionSegment")), Dedent, ) # Hive Segments class RowFormatClauseSegment(hive.RowFormatClauseSegment): """`ROW FORMAT` clause in a CREATE HIVEFORMAT TABLE statement.""" pass class SkewedByClauseSegment(hive.SkewedByClauseSegment): """`SKEWED BY` clause in a CREATE HIVEFORMAT TABLE statement.""" pass # Primitive Data Types class PrimitiveTypeSegment(BaseSegment): """Spark SQL Primitive data types. https://spark.apache.org/docs/latest/sql-ref-datatypes.html """ type = "primitive_type" match_grammar = OneOf( "BOOLEAN", # TODO : not currently supported; add segment - see NumericLiteralSegment # "BYTE", "TINYINT", # TODO : not currently supported; add segment - see NumericLiteralSegment # "SHORT", "LONG", "SMALLINT", "INT", "INTEGER", "BIGINT", "FLOAT", "REAL", "DOUBLE", "DATE", "TIMESTAMP", "TIMESTAMP_LTZ", "TIMESTAMP_NTZ", "STRING", Sequence( OneOf("CHAR", "CHARACTER", "VARCHAR", "DECIMAL", "DEC", "NUMERIC"), Ref("BracketedArguments", optional=True), ), "BINARY", "INTERVAL", "VARIANT", ) class ArrayTypeSegment(hive.ArrayTypeSegment): """ARRAY type as per hive.""" pass class StructTypeSegment(hive.StructTypeSegment): """STRUCT type as per hive.""" pass class StructTypeSchemaSegment(BaseSegment): """Expression to construct the schema of a STRUCT datatype.""" type = "struct_type_schema" match_grammar = Bracketed( Delimited( Sequence( Ref("SingleIdentifierGrammar"), Ref("ColonSegment", optional=True), Ref("DatatypeSegment"), Ref("CommentGrammar", optional=True), ), bracket_pairs_set="angle_bracket_pairs", ), bracket_pairs_set="angle_bracket_pairs", bracket_type="angle", ) class SemiStructuredAccessorSegment(BaseSegment): """A semi-structured data accessor segment. https://docs.databricks.com/en/sql/language-manual/functions/colonsign.html """ type = "semi_structured_expression" match_grammar = Sequence( OneOf( # If a field is already a VARIANT, this could # be initiated by a colon or a dot. This is particularly # useful when a field is an ARRAY of objects. Ref("DotSegment"), Ref("ColonSegment"), ), OneOf( Ref("NakedSemiStructuredElementSegment"), Bracketed(Ref("QuotedSemiStructuredElementSegment"), bracket_type="square"), ), Ref("ArrayAccessorSegment", optional=True), AnyNumberOf( Sequence( OneOf( # Can be delimited by dots or colons Ref("DotSegment"), Ref("ColonSegment"), ), OneOf( Ref("NakedSemiStructuredElementSegment"), Bracketed( Ref("QuotedSemiStructuredElementSegment"), bracket_type="square" ), ), allow_gaps=True, ), Ref("ArrayAccessorSegment", optional=True), allow_gaps=True, ), allow_gaps=True, ) class DatatypeSegment(BaseSegment): """Spark SQL Data types. https://spark.apache.org/docs/latest/sql-ref-datatypes.html """ type = "data_type" match_grammar = OneOf( Ref("PrimitiveTypeSegment"), Ref("ArrayTypeSegment"), Sequence( "MAP", Bracketed( Sequence( Ref("DatatypeSegment"), Ref("CommaSegment"), Ref("DatatypeSegment"), ), bracket_pairs_set="angle_bracket_pairs", bracket_type="angle", ), ), Ref("StructTypeSegment"), ) # Data Definition Statements # http://spark.apache.org/docs/latest/sql-ref-syntax-ddl.html class AlterDatabaseStatementSegment(BaseSegment): """An `ALTER DATABASE/SCHEMA` statement. http://spark.apache.org/docs/latest/sql-ref-syntax-ddl-alter-database.html """ type = "alter_database_statement" match_grammar = Sequence( "ALTER", OneOf("DATABASE", "SCHEMA"), Ref("DatabaseReferenceSegment"), "SET", OneOf( Ref("DatabasePropertiesGrammar"), Ref("LocationGrammar"), ), ) class AlterTableStatementSegment(ansi.AlterTableStatementSegment): """A `ALTER TABLE` statement to change the table schema or properties. http://spark.apache.org/docs/latest/sql-ref-syntax-ddl-alter-table.html https://docs.delta.io/latest/delta-constraints.html#constraints """ type = "alter_table_statement" match_grammar = Sequence( "ALTER", "TABLE", Ref("TableReferenceSegment"), Indent, OneOf( # ALTER TABLE - RENAME TO `table_identifier` Sequence( "RENAME", "TO", Ref("TableReferenceSegment"), ), # ALTER TABLE - RENAME `partition_spec` Sequence( Ref("PartitionSpecGrammar"), "RENAME", "TO", Ref("PartitionSpecGrammar"), ), # ALTER TABLE - RENAME TO 'column_identifier' Sequence( "RENAME", "COLUMN", Ref("ColumnReferenceSegment"), "TO", Ref("ColumnReferenceSegment"), ), # ALTER TABLE - ADD COLUMNS Sequence( "ADD", OneOf("COLUMNS", "COLUMN"), Indent, OptionallyBracketed( Delimited( Sequence( Ref("ColumnFieldDefinitionSegment"), Ref("FirstOrAfterGrammar", optional=True), ), ), ), Dedent, ), # ALTER TABLE - ALTER OR CHANGE COLUMN Sequence( OneOf("ALTER", "CHANGE"), Ref.keyword("COLUMN", optional=True), Indent, AnyNumberOf( Ref( "ColumnReferenceSegment", exclude=OneOf( "COMMENT", "TYPE", Ref("DatatypeSegment"), "FIRST", "AFTER", "SET", "DROP", ), ), max_times=2, ), Ref.keyword("TYPE", optional=True), Ref("DatatypeSegment", optional=True), Ref("CommentGrammar", optional=True), Ref("FirstOrAfterGrammar", optional=True), Sequence(OneOf("SET", "DROP"), "NOT", "NULL", optional=True), Dedent, ), # ALTER TABLE - REPLACE COLUMNS Sequence( "REPLACE", "COLUMNS", Bracketed( Delimited( Sequence( Ref("ColumnDefinitionSegment"), Ref("CommentGrammar", optional=True), ), ), ), ), # ALTER TABLE - DROP COLUMN # https://docs.delta.io/2.0.0/delta-batch.html#drop-columns Sequence( "DROP", OneOf( Sequence( "COLUMN", Ref("IfExistsGrammar", optional=True), Ref("ColumnReferenceSegment"), ), Sequence( "COLUMNS", Ref("IfExistsGrammar", optional=True), Bracketed( Delimited(AnyNumberOf(Ref("ColumnReferenceSegment"))), ), ), ), ), # ALTER TABLE - ADD PARTITION Sequence( "ADD", Ref("IfNotExistsGrammar", optional=True), AnyNumberOf( Ref("PartitionSpecGrammar"), Ref("PartitionFieldGrammar"), min_times=1, ), ), # ALTER TABLE - DROP PARTITION Sequence( "DROP", Ref("IfExistsGrammar", optional=True), OneOf( Ref("PartitionSpecGrammar"), Ref("PartitionFieldGrammar"), ), Sequence("PURGE", optional=True), ), Sequence( "Replace", Ref("PartitionFieldGrammar"), ), # ALTER TABLE - REPAIR PARTITION Sequence("RECOVER", "PARTITIONS"), # ALTER TABLE - SET PROPERTIES Sequence("SET", Ref("TablePropertiesGrammar")), # ALTER TABLE - UNSET PROPERTIES Ref("UnsetTablePropertiesGrammar"), # ALTER TABLE - SET SERDE Sequence( Ref("PartitionSpecGrammar", optional=True), "SET", OneOf( Sequence( "SERDEPROPERTIES", Ref("BracketedPropertyListGrammar"), ), Sequence( "SERDE", Ref("QuotedLiteralSegment"), Ref("SerdePropertiesGrammar", optional=True), ), ), ), # ALTER TABLE - SET FILE FORMAT Sequence( Ref("PartitionSpecGrammar", optional=True), "SET", "FILEFORMAT", Ref("DataSourceFormatSegment"), ), # ALTER TABLE - CHANGE FILE LOCATION Sequence( Ref("PartitionSpecGrammar", optional=True), "SET", Ref("LocationGrammar"), ), # ALTER TABLE - ADD/DROP CONSTRAINTS (DELTA) Sequence( Indent, OneOf("ADD", "DROP"), "CONSTRAINT", Ref( "ColumnReferenceSegment", exclude=Ref.keyword("CHECK"), ), Ref.keyword("CHECK", optional=True), Bracketed(Ref("ExpressionSegment"), optional=True), Dedent, ), # ALTER TABLE - ICEBERG WRITE ORDER / DISTRIBUTION # https://iceberg.apache.org/docs/latest/spark-ddl/#alter-table--write-ordered-by Sequence( "WRITE", AnyNumberOf( Sequence("DISTRIBUTED", "BY", "PARTITION", optional=True), Sequence( Ref.keyword("LOCALLY", optional=True), "ORDERED", "BY", Indent, Delimited( Sequence( Ref("ColumnReferenceSegment"), OneOf("ASC", "DESC", optional=True), # NB: This isn't really ANSI, and isn't supported # in Mysql,but is supported in enough other dialects # for it to make sense here for now. Sequence( "NULLS", OneOf("FIRST", "LAST"), optional=True ), ), optional=True, ), Dedent, optional=True, ), min_times=1, max_times_per_element=1, ), ), # ALTER TABLE - ICEBERG SET IDENTIFIER FIELDS Sequence( "SET", "IDENTIFIER", "FIELDS", Indent, Delimited( Sequence( Ref("ColumnReferenceSegment"), ), ), Dedent, ), # ALTER TABLE - ICEBERG DROP IDENTIFIER FIELDS Sequence( "DROP", "IDENTIFIER", "FIELDS", Indent, Delimited( Sequence( Ref("ColumnReferenceSegment"), ), ), Dedent, ), ), Dedent, ) class ColumnFieldDefinitionSegment(ansi.ColumnDefinitionSegment): """A column field definition, e.g. for CREATE TABLE or ALTER TABLE. This supports the iceberg syntax and allows for iceberg syntax such as ADD COLUMN a.b. """ match_grammar: Matchable = Sequence( Ref("ColumnReferenceSegment"), # Column name Ref("DatatypeSegment"), # Column type Bracketed(Anything(), optional=True), # For types like VARCHAR(100) AnyNumberOf( Ref("ColumnConstraintSegment", optional=True), ), ) class AlterViewStatementSegment(BaseSegment): """A `ALTER VIEW` statement to change the view schema or properties. https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-alter-view.html """ type = "alter_view_statement" match_grammar = Sequence( "ALTER", "VIEW", Ref("TableReferenceSegment"), OneOf( Sequence( "RENAME", "TO", Ref("TableReferenceSegment"), ), Sequence("SET", Ref("TablePropertiesGrammar")), Ref("UnsetTablePropertiesGrammar"), Sequence( "AS", OptionallyBracketed(Ref("SelectStatementSegment")), ), ), ) class CreateDatabaseStatementSegment(ansi.CreateDatabaseStatementSegment): """A `CREATE DATABASE` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-create-database.html """ match_grammar = Sequence( "CREATE", OneOf("DATABASE", "SCHEMA"), Ref("IfNotExistsGrammar", optional=True), Ref("DatabaseReferenceSegment"), Ref("CommentGrammar", optional=True), Ref("LocationGrammar", optional=True), Sequence( "WITH", "DBPROPERTIES", Ref("BracketedPropertyListGrammar"), optional=True ), ) class FunctionParameterListGrammarWithComments(BaseSegment): """The parameters for a function ie. `(column type COMMENT 'comment')`.""" type = "function_parameter_list_with_comments" match_grammar: Matchable = Bracketed( Delimited( Sequence( Ref("FunctionParameterGrammar"), AnyNumberOf( Sequence("DEFAULT", Ref("LiteralGrammar"), optional=True), Ref("CommentClauseSegment", optional=True), ), ), optional=True, ), ) class CreateFunctionStatementSegment(BaseSegment): """A `CREATE FUNCTION` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-create-function.html """ type = "create_function_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Ref("TemporaryGrammar", optional=True), "FUNCTION", Ref("IfNotExistsGrammar", optional=True), Ref("FunctionNameIdentifierSegment"), "AS", Ref("QuotedLiteralSegment"), Ref("ResourceLocationGrammar", optional=True), ) class CreateTableStatementSegment(ansi.CreateTableStatementSegment): """A `CREATE TABLE` statement using a Data Source or Like. http://spark.apache.org/docs/latest/sql-ref-syntax-ddl-create-table-datasource.html https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-create-table-like.html https://docs.delta.io/latest/delta-batch.html#create-a-table """ match_grammar = Sequence("CREATE", Ref("TableDefinitionSegment")) class CreateViewStatementSegment(ansi.CreateViewStatementSegment): """A `CREATE VIEW` statement. https://spark.apache.org/docs/3.0.0/sql-ref-syntax-ddl-create-view.html#syntax """ match_grammar = Sequence( "CREATE", OneOf(Ref("OrReplaceGrammar"), Ref("OrRefreshGrammar"), optional=True), Ref("TemporaryGrammar", optional=True), Ref.keyword("STREAMING", optional=True), Ref.keyword("LIVE", optional=True), Ref.keyword("MATERIALIZED", optional=True), "VIEW", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), # Columns and comment syntax: Sequence( Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), Ref("DatatypeSegment", optional=True), Ref("CommentGrammar", optional=True), ), Ref("ConstraintStatementSegment", optional=True), ), ), optional=True, ), Sequence("USING", Ref("DataSourceFormatSegment"), optional=True), Ref("OptionsGrammar", optional=True), Ref("CommentGrammar", optional=True), Ref("TablePropertiesGrammar", optional=True), Sequence("AS", OptionallyBracketed(Ref("SelectableGrammar")), optional=True), Ref("WithNoSchemaBindingClauseSegment", optional=True), ) class CreateWidgetStatementSegment(BaseSegment): """A `CREATE WIDGET` STATEMENT. https://docs.databricks.com/notebooks/widgets.html#databricks-widget-api """ type = "create_widget_statement" match_grammar = Sequence( "CREATE", "WIDGET", OneOf( Sequence( "DROPDOWN", Ref("WidgetNameIdentifierSegment"), Ref("WidgetDefaultGrammar"), Sequence("CHOICES", Ref("SelectStatementSegment")), ), Sequence( "TEXT", Ref("WidgetNameIdentifierSegment"), Ref("WidgetDefaultGrammar") ), ), ) class ReplaceTableStatementSegment(BaseSegment): """A `REPLACE TABLE` statement using the iceberg table format. https://iceberg.apache.org/docs/latest/spark-ddl/#replace-table--as-select """ type = "replace_table_statement" match_grammar = Sequence("REPLACE", Ref("TableDefinitionSegment")) class RemoveWidgetStatementSegment(BaseSegment): """A `REMOVE WIDGET` STATEMENT. https://docs.databricks.com/notebooks/widgets.html#databricks-widget-api """ type = "remove_widget_statement" match_grammar = Sequence( "REMOVE", "WIDGET", Ref("WidgetNameIdentifierSegment"), ) class DropDatabaseStatementSegment(ansi.DropDatabaseStatementSegment): """A `DROP DATABASE` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-drop-database.html """ type = "drop_database_statement" match_grammar: Matchable = Sequence( "DROP", OneOf("DATABASE", "SCHEMA"), Ref("IfExistsGrammar", optional=True), Ref("DatabaseReferenceSegment"), Ref("DropBehaviorGrammar", optional=True), ) class DropFunctionStatementSegment(BaseSegment): """A `DROP FUNCTION` STATEMENT. https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-drop-function.html """ type = "drop_function_statement" match_grammar = Sequence( "DROP", Ref("TemporaryGrammar", optional=True), "FUNCTION", Ref("IfExistsGrammar", optional=True), Ref("FunctionNameSegment"), ) class MsckRepairTableStatementSegment(hive.MsckRepairTableStatementSegment): """A `REPAIR TABLE` statement using Hive MSCK (Metastore Check) format. This class inherits from Hive since Spark leverages Hive format for this command and is dependent on the Hive metastore. https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-repair-table.html """ pass class TruncateStatementSegment(ansi.TruncateStatementSegment): """A `TRUNCATE TABLE` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-truncate-table.html """ match_grammar = Sequence( "TRUNCATE", "TABLE", Ref("TableReferenceSegment"), Ref("PartitionSpecGrammar", optional=True), ) class UseDatabaseStatementSegment(BaseSegment): """A `USE DATABASE` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-usedb.html """ type = "use_database_statement" match_grammar = Sequence( "USE", Ref("DatabaseReferenceSegment"), ) # Data Manipulation Statements class InsertStatementSegment(BaseSegment): """A `INSERT [TABLE]` statement to insert or overwrite new rows into a table. https://spark.apache.org/docs/latest/sql-ref-syntax-dml-insert-into.html https://spark.apache.org/docs/latest/sql-ref-syntax-dml-insert-overwrite-table.html """ type = "insert_statement" match_grammar = Sequence( "INSERT", OneOf("INTO", "OVERWRITE"), Ref.keyword("TABLE", optional=True), Ref("TableReferenceSegment"), Ref("PartitionSpecGrammar", optional=True), Ref("BracketedColumnReferenceListGrammar", optional=True), OneOf( AnyNumberOf( Ref("ValuesClauseSegment"), min_times=1, ), Ref("SelectableGrammar"), Sequence( Ref.keyword("TABLE", optional=True), Ref("TableReferenceSegment"), ), Sequence( "FROM", Ref("TableReferenceSegment"), "SELECT", Delimited( Ref("ColumnReferenceSegment"), ), Ref("WhereClauseSegment", optional=True), Ref("GroupByClauseSegment", optional=True), Ref("OrderByClauseSegment", optional=True), Ref("LimitClauseSegment", optional=True), ), ), ) class InsertOverwriteDirectorySegment(BaseSegment): """An `INSERT OVERWRITE [LOCAL] DIRECTORY` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-dml-insert-overwrite-directory.html """ type = "insert_overwrite_directory_statement" match_grammar = Sequence( "INSERT", "OVERWRITE", Ref.keyword("LOCAL", optional=True), "DIRECTORY", Ref("QuotedLiteralSegment", optional=True), "USING", Ref("DataSourceFormatSegment"), Ref("OptionsGrammar", optional=True), OneOf( AnyNumberOf( Ref("ValuesClauseSegment"), min_times=1, ), Ref("SelectableGrammar"), ), ) class InsertOverwriteDirectoryHiveFmtSegment(BaseSegment): """An `INSERT OVERWRITE [LOCAL] DIRECTORY` statement in Hive format. https://spark.apache.org/docs/latest/sql-ref-syntax-dml-insert-overwrite-directory-hive.html """ type = "insert_overwrite_directory_hive_fmt_statement" match_grammar = Sequence( "INSERT", "OVERWRITE", Ref.keyword("LOCAL", optional=True), "DIRECTORY", Ref("QuotedLiteralSegment"), Ref("RowFormatClauseSegment", optional=True), Ref("StoredAsGrammar", optional=True), OneOf( AnyNumberOf( Ref("ValuesClauseSegment"), min_times=1, ), Ref("SelectableGrammar"), ), ) class LoadDataSegment(BaseSegment): """A `LOAD DATA` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-dml-load.html """ type = "load_data_statement" match_grammar = Sequence( "LOAD", "DATA", Ref.keyword("LOCAL", optional=True), "INPATH", Ref("QuotedLiteralSegment"), Ref.keyword("OVERWRITE", optional=True), "INTO", "TABLE", Ref("TableReferenceSegment"), Ref("PartitionSpecGrammar", optional=True), ) # Data Retrieval Statements class ClusterByClauseSegment(BaseSegment): """A `CLUSTER BY` clause from `SELECT` statement. Equivalent to `DISTRIBUTE BY` and `SORT BY` in tandem. This clause is mutually exclusive with SORT BY, ORDER BY and DISTRIBUTE BY. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-clusterby.html """ type = "cluster_by_clause" match_grammar = Sequence( "CLUSTER", "BY", Indent, Delimited( Sequence( OneOf( Ref("ColumnReferenceSegment"), # Can `CLUSTER BY 1` Ref("NumericLiteralSegment"), # Can cluster by an expression Ref("ExpressionSegment"), ), ), terminators=[ "LIMIT", "HAVING", # For window functions "WINDOW", Ref("FrameClauseUnitGrammar"), "SEPARATOR", ], ), Dedent, ) class DistributeByClauseSegment(BaseSegment): """A `DISTRIBUTE BY` clause from `SELECT` statement. This clause is mutually exclusive with SORT BY, ORDER BY and DISTRIBUTE BY. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-distribute-by.html """ type = "distribute_by_clause" match_grammar = Sequence( "DISTRIBUTE", "BY", Indent, Delimited( Sequence( OneOf( Ref("ColumnReferenceSegment"), # Can `DISTRIBUTE BY 1` Ref("NumericLiteralSegment"), # Can distribute by an expression Ref("ExpressionSegment"), ), ), terminators=[ "SORT", "LIMIT", "HAVING", # For window functions "WINDOW", Ref("FrameClauseUnitGrammar"), "SEPARATOR", ], ), Dedent, ) class HintFunctionSegment(BaseSegment): """A Function within a SparkSQL Hint. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-hints.html """ type = "hint_function" match_grammar = Sequence( Ref("FunctionNameSegment"), Ref("FunctionContentsSegment", optional=True), ) class SelectHintSegment(BaseSegment): """Spark Select Hints. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-hints.html """ type = "select_hint" match_grammar = Sequence( Sequence( Ref("StartHintSegment"), Delimited( AnyNumberOf( Ref("HintFunctionSegment"), # At least function should be supplied min_times=1, ), terminators=[Ref("EndHintSegment")], ), Ref("EndHintSegment"), ), ) class LimitClauseSegment(ansi.LimitClauseSegment): """A `LIMIT` clause like in `SELECT`. Enhanced from ANSI dialect. :: Spark does not allow explicit or implicit `OFFSET` (implicit being 1000, 20 for example) :: Spark allows an `ALL` quantifier or a function expression as an input to `LIMIT` https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-limit.html """ match_grammar = Sequence( "LIMIT", Indent, OneOf( Ref("NumericLiteralSegment"), "ALL", Ref("FunctionSegment"), ), Dedent, ) class SetOperatorSegment(ansi.SetOperatorSegment): """A set operator such as Union, Minus, Except or Intersect. Enhanced from ANSI dialect. :: Spark allows the `ALL` keyword to follow Except and Minus. :: Distinct allows the `DISTINCT` and `ALL` keywords. # https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-setops.html """ match_grammar = OneOf( Sequence( OneOf("EXCEPT", "MINUS"), Ref.keyword("ALL", optional=True), ), Sequence( OneOf("UNION", "INTERSECT"), OneOf("DISTINCT", "ALL", optional=True), ), exclude=Sequence("EXCEPT", Bracketed(Anything())), ) class SelectClauseModifierSegment(ansi.SelectClauseModifierSegment): """Things that come after SELECT but before the columns. Enhance `SelectClauseModifierSegment` from Ansi to allow SparkSQL Hints https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-hints.html """ match_grammar = Sequence( # TODO New Rule warning of Join Hints priority if multiple specified # When different join strategy hints are specified on # both sides of a join, Spark prioritizes the BROADCAST # hint over the MERGE hint over the SHUFFLE_HASH hint # over the SHUFFLE_REPLICATE_NL hint. # # Spark will issue Warning in the following example: # # SELECT # /*+ BROADCAST(t1), MERGE(t1, t2) */ # t1.a, # t1.b, # t2.c # FROM t1 INNER JOIN t2 ON t1.key = t2.key; # # Hints should be listed in order of priority in Select Ref("SelectHintSegment", optional=True), OneOf("DISTINCT", "ALL", optional=True), ) class UnorderedSelectStatementSegment(ansi.UnorderedSelectStatementSegment): """Enhance unordered `SELECT` statement for valid SparkSQL clauses. This is designed for use in the context of set operations, for other use cases, we should use the main SelectStatementSegment. """ match_grammar = ansi.UnorderedSelectStatementSegment.match_grammar.copy( insert=[ Ref("QualifyClauseSegment", optional=True), Ref("ClusterByClauseSegment", optional=True), Ref("DistributeByClauseSegment", optional=True), Ref("SortByClauseSegment", optional=True), ], # Removing non-valid clauses that exist in ANSI dialect remove=[Ref("OverlapsClauseSegment", optional=True)], ) class SelectStatementSegment(ansi.SelectStatementSegment): """Enhance `SELECT` statement for valid SparkSQL clauses.""" match_grammar = ansi.SelectStatementSegment.match_grammar.copy( # TODO New Rule: Warn of mutual exclusion of following clauses # DISTRIBUTE, SORT, CLUSTER and ORDER BY if multiple specified insert=[ Ref("ClusterByClauseSegment", optional=True), Ref("DistributeByClauseSegment", optional=True), Ref("SortByClauseSegment", optional=True), ], before=Ref("LimitClauseSegment", optional=True), ).copy( insert=[Ref("QualifyClauseSegment", optional=True)], before=Ref("OrderByClauseSegment", optional=True), ) class GroupByClauseSegment(ansi.GroupByClauseSegment): """Enhance `GROUP BY` clause like in `SELECT` for `CUBE` and `ROLLUP`. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-groupby.html """ match_grammar = Sequence( "GROUP", "BY", Indent, OneOf( Delimited( Ref("CubeRollupClauseSegment"), Ref("GroupingSetsClauseSegment"), Ref("ColumnReferenceSegment"), # Can `GROUP BY 1` Ref("NumericLiteralSegment"), # Can `GROUP BY coalesce(col, 1)` Ref("ExpressionSegment"), ), Sequence( Delimited( Ref("ColumnReferenceSegment"), # Can `GROUP BY 1` Ref("NumericLiteralSegment"), # Can `GROUP BY coalesce(col, 1)` Ref("ExpressionSegment"), ), OneOf( Ref("WithCubeRollupClauseSegment"), Ref("GroupingSetsClauseSegment"), ), ), ), Dedent, ) class WithCubeRollupClauseSegment(BaseSegment): """A `[WITH CUBE | WITH ROLLUP]` clause after the `GROUP BY` clause. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-groupby.html """ type = "with_cube_rollup_clause" match_grammar = Sequence( "WITH", OneOf("CUBE", "ROLLUP"), ) class SortByClauseSegment(BaseSegment): """A `SORT BY` clause like in `SELECT`. This clause is mutually exclusive with SORT BY, ORDER BY and DISTRIBUTE BY. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-sortby.html """ type = "sort_by_clause" match_grammar = Sequence( "SORT", "BY", Indent, Delimited( Sequence( OneOf( Ref("ColumnReferenceSegment"), # Can `ORDER BY 1` Ref("NumericLiteralSegment"), # Can order by an expression Ref("ExpressionSegment"), ), OneOf("ASC", "DESC", optional=True), # NB: This isn't really ANSI, and isn't supported in Mysql, # but is supported in enough other dialects for it to make # sense here for now. Sequence("NULLS", OneOf("FIRST", "LAST"), optional=True), ), terminators=[ "LIMIT", "HAVING", "QUALIFY", # For window functions "WINDOW", Ref("FrameClauseUnitGrammar"), "SEPARATOR", ], ), Dedent, ) class SamplingExpressionSegment(ansi.SamplingExpressionSegment): """A `TABLESAMPLE` clause following a table identifier. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-sampling.html """ match_grammar = Sequence( "TABLESAMPLE", OneOf( Bracketed( Ref("NumericLiteralSegment"), OneOf( "PERCENT", "ROWS", ), ), Bracketed( "BUCKET", Ref("NumericLiteralSegment"), "OUT", "OF", Ref("NumericLiteralSegment"), ), ), ) class LateralViewClauseSegment(BaseSegment): """A `LATERAL VIEW` like in a `FROM` clause. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-lateral-view.html """ type = "lateral_view_clause" match_grammar = Sequence( Ref("CommaSegment", optional=True), Indent, "LATERAL", Ref.keyword("VIEW", optional=True), Ref.keyword("OUTER", optional=True), Ref("FunctionSegment"), OneOf( Sequence( Ref("SingleIdentifierGrammar"), Sequence( Ref.keyword("AS", optional=True), Delimited(Ref("SingleIdentifierGrammar")), optional=True, ), ), Sequence( Ref.keyword("AS", optional=True), Delimited(Ref("SingleIdentifierGrammar")), ), ), Dedent, ) class PivotClauseSegment(BaseSegment): """A `PIVOT` clause as using in FROM clause. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-pivot.html """ type = "pivot_clause" match_grammar = Sequence( Indent, "PIVOT", Bracketed( Indent, Delimited( Sequence( Ref("BaseExpressionElementGrammar"), Ref("AliasExpressionSegment", optional=True), ), ), "FOR", OptionallyBracketed( OneOf( Ref("SingleIdentifierGrammar"), Delimited( Ref("SingleIdentifierGrammar"), ), ), ), "IN", Bracketed( Delimited( Sequence( OneOf( Bracketed( Delimited( Ref("ExpressionSegment"), ), parse_mode=ParseMode.GREEDY, ), Delimited( Ref("ExpressionSegment"), ), ), Ref("AliasExpressionSegment", optional=True), ), ), ), Dedent, ), Dedent, ) class UnpivotClauseSegment(BaseSegment): """An UNPIVOT expression. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-unpivot.html """ type = "unpivot_clause" match_grammar = Sequence( Indent, "UNPIVOT", Sequence(OneOf("INCLUDE", "EXCLUDE"), "NULLS", optional=True), Indent, Bracketed( OneOf( Ref("SingleValueColumnUnpivotSegment"), Ref("MultiValueColumnUnpivotSegment"), ), ), Dedent, ) class SingleValueColumnUnpivotSegment(BaseSegment): """An UNPIVOT single column syntax fragment.""" type = "unpivot_single_column" match_grammar = Sequence( Ref("SingleIdentifierGrammar"), "FOR", Ref("SingleIdentifierGrammar"), "IN", Bracketed( Indent, Delimited( Sequence( Ref("ColumnReferenceSegment"), Ref("AliasExpressionSegment", optional=True), ), ), parse_mode=ParseMode.GREEDY, ), Dedent, ) class MultiValueColumnUnpivotSegment(BaseSegment): """An UNPIVOT multiple column syntax fragment.""" type = "unpivot_multi_column" match_grammar = Sequence( Bracketed(Delimited(Ref("SingleIdentifierGrammar"))), Indent, "FOR", Ref("SingleIdentifierGrammar"), "IN", Bracketed( Indent, Delimited( Sequence( Bracketed(Indent, Delimited(Ref("ColumnReferenceSegment"))), Ref("AliasExpressionSegment", optional=True), ), ), parse_mode=ParseMode.GREEDY, ), Dedent, ) class TransformClauseSegment(BaseSegment): """A `TRANSFORM` clause like used in `SELECT`. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-transform.html """ type = "transform_clause" match_grammar = Sequence( "TRANSFORM", Bracketed( Delimited( Ref("SingleIdentifierGrammar"), ), parse_mode=ParseMode.GREEDY, ), Indent, Ref("RowFormatClauseSegment", optional=True), "USING", Ref("QuotedLiteralSegment"), Sequence( "AS", Bracketed( Delimited( AnyNumberOf( Ref("SingleIdentifierGrammar"), Ref("DatatypeSegment"), ), ), ), optional=True, ), Ref("RowFormatClauseSegment", optional=True), ) class ExplainStatementSegment(ansi.ExplainStatementSegment): """An `Explain` statement. Enhanced from ANSI dialect to allow for additional parameters. EXPLAIN [ EXTENDED | CODEGEN | COST | FORMATTED ] explainable_stmt https://spark.apache.org/docs/latest/sql-ref-syntax-qry-explain.html """ explainable_stmt = Ref("StatementSegment") match_grammar = Sequence( "EXPLAIN", OneOf( "EXTENDED", "CODEGEN", "COST", "FORMATTED", optional=True, ), explainable_stmt, ) # Auxiliary Statements class AddFileSegment(BaseSegment): """A `ADD {FILE | FILES}` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-aux-resource-mgmt-add-file.html """ type = "add_file_statement" match_grammar = Sequence( "ADD", Ref("FileKeywordSegment"), AnyNumberOf(Ref("QuotedLiteralSegment"), Ref("FileLiteralSegment")), ) class FileLiteralSegment(BaseSegment): """A path literal that isn't quoted. The regular expression will pickup any paths with a leading protocol, however to prevent some division operators that may look like paths, we only parse them here **after** lexing. """ type = "file_literal" match_grammar: Matchable = OneOf( TypedParser("file_literal", LiteralSegment), Sequence( Ref("SlashSegment", optional=True), Delimited( Delimited( TypedParser("word", WordSegment, type="path_segment"), delimiter=Ref("DotSegment"), ), delimiter=Ref("SlashSegment"), allow_gaps=False, ), ), ) class AddJarSegment(BaseSegment): """A `ADD {JAR | JARS}` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-aux-resource-mgmt-add-jar.html """ type = "add_jar_statement" match_grammar = Sequence( "ADD", Ref("JarKeywordSegment"), AnyNumberOf( Ref("QuotedLiteralSegment"), Ref("FileLiteralSegment"), ), ) class AnalyzeTableSegment(BaseSegment): """An `ANALYZE {TABLE | TABLES}` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-aux-analyze-table.html """ type = "analyze_table_statement" match_grammar = Sequence( "ANALYZE", OneOf( Sequence( "TABLE", Ref("TableReferenceSegment"), Ref( "PartitionSpecGrammar", optional=True, ), "COMPUTE", "STATISTICS", OneOf( "NOSCAN", Sequence( "FOR", "COLUMNS", OptionallyBracketed( Delimited( Ref( "ColumnReferenceSegment", ), ), ), ), optional=True, ), ), Sequence( "TABLES", Sequence( OneOf( "FROM", "IN", ), Ref( "DatabaseReferenceSegment", ), optional=True, ), "COMPUTE", "STATISTICS", Ref.keyword( "NOSCAN", optional=True, ), ), ), ) class CacheTableSegment(BaseSegment): """A `CACHE TABLE` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-aux-cache-cache-table.html """ type = "cache_table" match_grammar = Sequence( "CACHE", Ref.keyword("LAZY", optional=True), "TABLE", Ref("TableReferenceSegment"), Ref("OptionsGrammar", optional=True), Sequence( Ref.keyword("AS", optional=True), Ref("SelectableGrammar"), optional=True ), ) class ClearCacheSegment(BaseSegment): """A `CLEAR CACHE` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-aux-cache-clear-cache.html """ type = "clear_cache" match_grammar = Sequence( "CLEAR", "CACHE", ) class DescribeStatementSegment(BaseSegment): """A `DESCRIBE` statement. This class provides coverage for databases, tables, functions, and queries. NB: These are similar enough that it makes sense to include them in a common class, especially since there wouldn't be any specific rules that would apply to one describe vs another, but they could be broken out to one class per describe statement type. https://spark.apache.org/docs/latest/sql-ref-syntax-aux-describe-database.html https://spark.apache.org/docs/latest/sql-ref-syntax-aux-describe-function.html https://spark.apache.org/docs/latest/sql-ref-syntax-aux-describe-query.html https://spark.apache.org/docs/latest/sql-ref-syntax-aux-describe-table.html """ type = "describe_statement" match_grammar = Sequence( OneOf("DESCRIBE", "DESC"), Ref("DescribeObjectGrammar"), ) class ListFileSegment(BaseSegment): """A `LIST {FILE | FILES}` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-aux-resource-mgmt-list-file.html """ type = "list_file_statement" match_grammar = Sequence( "LIST", Ref("FileKeywordSegment"), AnyNumberOf(Ref("QuotedLiteralSegment"), Ref("FileLiteralSegment")), ) class ListJarSegment(BaseSegment): """A `ADD {JAR | JARS}` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-aux-resource-mgmt-add-jar.html """ type = "list_jar_statement" match_grammar = Sequence( "LIST", Ref("JarKeywordSegment"), AnyNumberOf(Ref("QuotedLiteralSegment"), Ref("FileLiteralSegment")), ) class RefreshStatementSegment(BaseSegment): """A `REFRESH` statement for given data source path. NB: These are similar enough that it makes sense to include them in a common class, especially since there wouldn't be any specific rules that would apply to one refresh vs another, but they could be broken out to one class per refresh statement type. https://spark.apache.org/docs/latest/sql-ref-syntax-aux-cache-refresh.html https://spark.apache.org/docs/latest/sql-ref-syntax-aux-cache-refresh-table.html https://spark.apache.org/docs/latest/sql-ref-syntax-aux-cache-refresh-function.html """ type = "refresh_statement" match_grammar = Sequence( "REFRESH", OneOf( Ref("QuotedLiteralSegment"), Sequence( Ref.keyword("TABLE", optional=True), Ref("TableReferenceSegment"), ), Sequence( "FUNCTION", Ref("FunctionNameSegment"), ), ), ) class ResetStatementSegment(BaseSegment): """A `RESET` statement used to reset runtime configurations. https://spark.apache.org/docs/latest/sql-ref-syntax-aux-conf-mgmt-reset.html """ type = "reset_statement" match_grammar = Sequence( "RESET", Delimited( Ref("SingleIdentifierGrammar"), delimiter=Ref("DotSegment"), optional=True, ), ) class SetStatementSegment(BaseSegment): """A `SET` statement used to set runtime properties. https://spark.apache.org/docs/latest/sql-ref-syntax-aux-conf-mgmt-set.html """ type = "set_statement" match_grammar = Sequence( "SET", Ref("SQLConfPropertiesSegment", optional=True), OneOf( Ref("PropertyListGrammar"), Ref("PropertyNameSegment"), optional=True, ), ) class ShowStatement(BaseSegment): """Common class for `SHOW` statements. NB: These are similar enough that it makes sense to include them in a common class, especially since there wouldn't be any specific rules that would apply to one show vs another, but they could be broken out to one class per show statement type. https://spark.apache.org/docs/latest/sql-ref-syntax-aux-show-columns.html https://spark.apache.org/docs/latest/sql-ref-syntax-aux-show-create-table.html https://spark.apache.org/docs/latest/sql-ref-syntax-aux-show-databases.html https://spark.apache.org/docs/latest/sql-ref-syntax-aux-show-functions.html https://spark.apache.org/docs/latest/sql-ref-syntax-aux-show-partitions.html https://spark.apache.org/docs/latest/sql-ref-syntax-aux-show-table.html https://spark.apache.org/docs/latest/sql-ref-syntax-aux-show-tables.html https://spark.apache.org/docs/latest/sql-ref-syntax-aux-show-tblproperties.html https://spark.apache.org/docs/latest/sql-ref-syntax-aux-show-views.html """ type = "show_statement" match_grammar = Sequence( "SHOW", Ref("ShowObjectGrammar"), ) class UncacheTableSegment(BaseSegment): """AN `UNCACHE TABLE` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-aux-cache-uncache-table.html """ type = "uncache_table" match_grammar = Sequence( "UNCACHE", "TABLE", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), ) class StatementSegment(ansi.StatementSegment): """Overriding StatementSegment to allow for additional segment parsing.""" match_grammar = ansi.StatementSegment.match_grammar.copy( # Segments defined in Spark3 dialect insert=[ # Data Definition Statements Ref("AlterDatabaseStatementSegment"), Ref("AlterTableStatementSegment"), Ref("AlterViewStatementSegment"), Ref("CreateTableStatementSegment"), Ref("MsckRepairTableStatementSegment"), Ref("UseDatabaseStatementSegment"), # Auxiliary Statements Ref("AddFileSegment"), Ref("AddJarSegment"), Ref("AnalyzeTableSegment"), Ref("CacheTableSegment"), Ref("ClearCacheSegment"), Ref("ListFileSegment"), Ref("ListJarSegment"), Ref("RefreshStatementSegment"), Ref("ResetStatementSegment"), Ref("SetStatementSegment"), Ref("ShowStatement"), Ref("UncacheTableSegment"), # Data Manipulation Statements Ref("InsertOverwriteDirectorySegment"), Ref("InsertOverwriteDirectoryHiveFmtSegment"), Ref("LoadDataSegment"), # Data Retrieval Statements Ref("ClusterByClauseSegment"), Ref("DistributeByClauseSegment"), # Delta Lake Ref("VacuumStatementSegment"), Ref("DescribeHistoryStatementSegment"), Ref("DescribeDetailStatementSegment"), Ref("GenerateManifestFileStatementSegment"), Ref("ConvertToDeltaStatementSegment"), Ref("RestoreTableStatementSegment"), # Databricks - Delta Live Tables Ref("ConstraintStatementSegment"), Ref("ApplyChangesIntoStatementSegment"), # Databricks - widgets Ref("CreateWidgetStatementSegment"), Ref("RemoveWidgetStatementSegment"), Ref("ReplaceTableStatementSegment"), Ref("SetVariableStatementSegment"), ], remove=[ Ref("TransactionStatementSegment"), Ref("CreateSchemaStatementSegment"), Ref("SetSchemaStatementSegment"), Ref("CreateModelStatementSegment"), Ref("DropModelStatementSegment"), ], ) class JoinClauseSegment(ansi.JoinClauseSegment): """Any number of join clauses, including the `JOIN` keyword. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-join.html """ match_grammar = OneOf( # NB These qualifiers are optional # TODO: Allow nested joins like: # ....FROM S1.T1 t1 LEFT JOIN ( S2.T2 t2 JOIN S3.T3 t3 ON t2.col1=t3.col1) ON # tab1.col1 = tab2.col1 Sequence( Ref("JoinTypeKeywords", optional=True), Ref("JoinKeywordsGrammar"), Indent, Ref("FromExpressionElementSegment"), Dedent, Conditional(Indent, indented_using_on=True), # NB: this is optional OneOf( # ON clause Ref("JoinOnConditionSegment"), # USING clause Sequence( "USING", Conditional(Indent, indented_using_on=False), Bracketed( # NB: We don't use BracketedColumnReferenceListGrammar # here because we're just using SingleIdentifierGrammar, # rather than ObjectReferenceSegment or # ColumnReferenceSegment. This is a) so that we don't # lint it as a reference and b) because the column will # probably be returned anyway during parsing. Delimited(Ref("SingleIdentifierGrammar")), parse_mode=ParseMode.GREEDY, ), Conditional(Dedent, indented_using_on=False), ), # Unqualified joins *are* allowed. They just might not # be a good idea. optional=True, ), Conditional(Dedent, indented_using_on=True), ), # Note NATURAL joins do not support Join conditions Sequence( Ref("NaturalJoinKeywordsGrammar"), Ref("JoinKeywordsGrammar"), Indent, Ref("FromExpressionElementSegment"), Dedent, ), ) class AliasExpressionSegment(ansi.AliasExpressionSegment): """A reference to an object with an `AS` clause. The optional AS keyword allows both implicit and explicit aliasing. Note also that it's possible to specify just column aliases without aliasing the table as well: .. code-block:: sql SELECT * FROM VALUES (1,2) as t (a, b); SELECT * FROM VALUES (1,2) as (a, b); SELECT * FROM VALUES (1,2) as t; Note that in Spark SQL, identifiers are quoted using backticks (`my_table`) rather than double quotes ("my_table"). Quoted identifiers are allowed in aliases, but unlike ANSI which allows single quoted identifiers ('my_table') in aliases, this is not allowed in Spark and so the definition of this segment must depart from ANSI. """ match_grammar = Sequence( Indent, Ref("AsAliasOperatorSegment", optional=True), OneOf( # maybe table alias and column aliases Sequence( Ref("SingleIdentifierGrammar", optional=True), Bracketed(Ref("SingleIdentifierListSegment")), ), # just a table alias Ref("SingleIdentifierGrammar"), exclude=OneOf( "LATERAL", Ref("JoinTypeKeywords"), "WINDOW", "PIVOT", "KEYS", "FROM", ), ), Dedent, ) class ValuesClauseSegment(ansi.ValuesClauseSegment): """A `VALUES` clause, as typically used with `INSERT` or `SELECT`. The Spark SQL reference does not mention `VALUES` clauses except in the context of `INSERT` statements. However, they appear to behave much the same as in `postgres `. In short, they can appear anywhere a `SELECT` can, and also as bare `VALUES` statements. Here are some examples: .. code-block:: sql VALUES 1,2 LIMIT 1; SELECT * FROM VALUES (1,2) as t (a,b); SELECT * FROM (VALUES (1,2) as t (a,b)); WITH a AS (VALUES 1,2) SELECT * FROM a; """ match_grammar = Sequence( "VALUES", Delimited( OneOf( Bracketed( Delimited( # NULL keyword used in # INSERT INTO statement. "NULL", Ref("ExpressionSegment"), ), parse_mode=ParseMode.GREEDY, ), "NULL", Ref("ExpressionSegment"), exclude=OneOf("VALUES"), ), ), # LIMIT/ORDER are unreserved in sparksql. Ref( "AliasExpressionSegment", exclude=OneOf("LIMIT", "ORDER"), optional=True, ), Ref("OrderByClauseSegment", optional=True), Ref("LimitClauseSegment", optional=True), ) class TableExpressionSegment(ansi.TableExpressionSegment): """The main table expression e.g. within a FROM clause. Enhance to allow for additional clauses allowed in Spark and Delta Lake. """ match_grammar = OneOf( Ref("ValuesClauseSegment"), Ref("BareFunctionSegment"), Ref("FunctionSegment"), Sequence( OneOf( Ref("FileReferenceSegment"), Ref("TableReferenceSegment"), ), OneOf( Ref("AtSignLiteralSegment"), Sequence( Indent, OneOf( Ref("TimestampAsOfGrammar"), Ref("VersionAsOfGrammar"), ), Dedent, ), optional=True, ), ), # Nested Selects Bracketed(Ref("SelectableGrammar")), ) class FileReferenceSegment(BaseSegment): """A reference to a file for direct query. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-file.html """ type = "file_reference" match_grammar = Sequence( Ref("DataSourcesV2FileTypeGrammar"), Ref("DotSegment"), # NB: Using `QuotedLiteralSegment` here causes `FileReferenceSegment` # to match as a `TableReferenceSegment` Ref("BackQuotedIdentifierSegment"), ) class FromExpressionElementSegment(ansi.FromExpressionElementSegment): """A table expression. Enhanced from ANSI to allow for `LATERAL VIEW` clause """ match_grammar = Sequence( Ref("PreTableFunctionKeywordsGrammar", optional=True), OptionallyBracketed(Ref("TableExpressionSegment")), Ref("SamplingExpressionSegment", optional=True), Ref( "AliasExpressionSegment", exclude=OneOf( Ref("FromClauseTerminatorGrammar"), Ref("JoinLikeClauseGrammar"), ), optional=True, ), Ref("PostTableExpressionGrammar", optional=True), ) class PropertyNameSegment(BaseSegment): """A segment for a property name to set and retrieve table and runtime properties. https://spark.apache.org/docs/latest/configuration.html#application-properties """ type = "property_name_identifier" match_grammar = Sequence( OneOf( Delimited( Ref("PropertiesNakedIdentifierSegment"), delimiter=Ref("DotSegment"), allow_gaps=False, ), Ref("SingleIdentifierGrammar"), ), ) class GeneratedColumnDefinitionSegment(BaseSegment): """A generated column definition, e.g. for CREATE TABLE or ALTER TABLE. https://docs.delta.io/latest/delta-batch.html#use-generated-columns """ type = "generated_column_definition" match_grammar: Matchable = Sequence( Ref("SingleIdentifierGrammar"), # Column name Ref("DatatypeSegment"), # Column type Bracketed(Anything(), optional=True), # For types like VARCHAR(100) Sequence( "GENERATED", "ALWAYS", "AS", Bracketed( OneOf( Ref("FunctionSegment"), Ref("BareFunctionSegment"), ), ), ), AnyNumberOf( Ref("ColumnConstraintSegment", optional=True), ), ) class MergeUpdateClauseSegment(ansi.MergeUpdateClauseSegment): """`UPDATE` clause within the `MERGE` statement.""" type = "merge_update_clause" match_grammar: Matchable = Sequence( "UPDATE", OneOf( Sequence("SET", Ref("WildcardIdentifierSegment")), Sequence( Indent, Ref("SetClauseListSegment"), Dedent, ), ), ) class MergeInsertClauseSegment(ansi.MergeInsertClauseSegment): """`INSERT` clause within the `MERGE` statement.""" type = "merge_insert_clause" match_grammar: Matchable = Sequence( "INSERT", OneOf( Ref("WildcardIdentifierSegment"), Sequence( Indent, Ref("BracketedColumnReferenceListGrammar"), Dedent, Ref("ValuesClauseSegment"), ), ), ) class UpdateStatementSegment(ansi.UpdateStatementSegment): """An `Update` statement. Enhancing from ANSI dialect to be SparkSQL & Delta Lake specific. https://docs.delta.io/latest/delta-update.html#update-a-table """ match_grammar: Matchable = Sequence( "UPDATE", Indent, OneOf( Ref("FileReferenceSegment"), Ref("TableReferenceSegment"), ), # SET is not a reserved word in all dialects (e.g. RedShift) # So specifically exclude as an allowed implicit alias to avoid parsing errors Ref( "AliasExpressionSegment", exclude=Ref.keyword("SET"), optional=True, ), Dedent, Ref("SetClauseListSegment"), Ref("WhereClauseSegment", optional=True), ) class IntervalLiteralSegment(BaseSegment): """An interval literal segment. https://spark.apache.org/docs/latest/sql-ref-literals.html#interval-literal """ type = "interval_literal" match_grammar: Matchable = Sequence( Ref("SignedSegmentGrammar", optional=True), OneOf( Ref("NumericLiteralSegment"), Ref("SignedQuotedLiteralSegment"), ), Ref("DatetimeUnitSegment"), Ref.keyword("TO", optional=True), Ref("DatetimeUnitSegment", optional=True), ) class IntervalExpressionSegment(ansi.IntervalExpressionSegment): """An interval expression segment. Redefining from ANSI dialect to allow for additional syntax. https://spark.apache.org/docs/latest/sql-ref-literals.html#interval-literal """ match_grammar: Matchable = Sequence( "INTERVAL", OneOf( AnyNumberOf( Ref("IntervalLiteralSegment"), ), Ref("QuotedLiteralSegment"), ), ) class VacuumStatementSegment(BaseSegment): """A `VACUUM` statement segment. https://docs.delta.io/latest/delta-utility.html#remove-files-no-longer-referenced-by-a-delta-table """ type = "vacuum_statement" match_grammar: Matchable = Sequence( "VACUUM", OneOf( Ref("QuotedLiteralSegment"), Ref("FileReferenceSegment"), Ref("TableReferenceSegment"), ), OneOf( Sequence( "RETAIN", Ref("NumericLiteralSegment"), Ref("DatetimeUnitSegment"), ), Sequence( "DRY", "RUN", ), optional=True, ), ) class DescribeHistoryStatementSegment(BaseSegment): """A `DESCRIBE HISTORY` statement segment. https://docs.delta.io/latest/delta-utility.html#retrieve-delta-table-history """ type = "describe_history_statement" match_grammar: Matchable = Sequence( "DESCRIBE", "HISTORY", OneOf( Ref("QuotedLiteralSegment"), Ref("FileReferenceSegment"), Ref("TableReferenceSegment"), ), Ref("LimitClauseSegment", optional=True), ) class DescribeDetailStatementSegment(BaseSegment): """A `DESCRIBE DETAIL` statement segment. https://docs.delta.io/latest/delta-utility.html#retrieve-delta-table-details """ type = "describe_detail_statement" match_grammar: Matchable = Sequence( "DESCRIBE", "DETAIL", OneOf( Ref("QuotedLiteralSegment"), Ref("FileReferenceSegment"), Ref("TableReferenceSegment"), ), ) class GenerateManifestFileStatementSegment(BaseSegment): """A statement to `GENERATE` manifest files for a Delta Table. https://docs.delta.io/latest/delta-utility.html#generate-a-manifest-file """ type = "generate_manifest_file_statement" match_grammar: Matchable = Sequence( "GENERATE", StringParser( "symlink_format_manifest", CodeSegment, type="symlink_format_manifest", ), "FOR", "TABLE", OneOf( Ref("QuotedLiteralSegment"), Ref("FileReferenceSegment"), Ref("TableReferenceSegment"), ), ) class ConvertToDeltaStatementSegment(BaseSegment): """A statement to convert other file formats to Delta. https://docs.delta.io/latest/delta-utility.html#convert-a-parquet-table-to-a-delta-table https://docs.databricks.com/delta/delta-utility.html#convert-an-iceberg-table-to-a-delta-table """ type = "convert_to_delta_statement" match_grammar: Matchable = Sequence( "CONVERT", "TO", "DELTA", Ref("FileReferenceSegment"), Sequence("NO", "STATISTICS", optional=True), Ref("PartitionSpecGrammar", optional=True), ) class RestoreTableStatementSegment(BaseSegment): """A statement to `RESTORE` a Delta Table to a previous version. https://docs.delta.io/latest/delta-utility.html#restore-a-delta-table-to-an-earlier-state """ type = "restore_table_statement" match_grammar: Matchable = Sequence( "RESTORE", "TABLE", OneOf( Ref("QuotedLiteralSegment"), Ref("FileReferenceSegment"), Ref("TableReferenceSegment"), ), "TO", OneOf( Ref("TimestampAsOfGrammar"), Ref("VersionAsOfGrammar"), ), ) class ConstraintStatementSegment(BaseSegment): """A `CONSTRAINT` statement to to define data quality on data contents. https://docs.databricks.com/workflows/delta-live-tables/delta-live-tables-expectations.html#manage-data-quality-with-delta-live-tables """ type = "constraint_statement" match_grammar: Matchable = Sequence( "CONSTRAINT", Ref("ObjectReferenceSegment"), "EXPECT", Bracketed(Ref("ExpressionSegment")), Sequence("ON", "VIOLATION", optional=True), OneOf( Sequence("FAIL", "UPDATE"), Sequence("DROP", "ROW"), optional=True, ), ) class ApplyChangesIntoStatementSegment(BaseSegment): """A statement ingest CDC data a target table. https://docs.databricks.com/workflows/delta-live-tables/delta-live-tables-cdc.html#sql """ type = "apply_changes_into_statement" match_grammar = Sequence( Sequence( "APPLY", "CHANGES", "INTO", ), Indent, Ref("TableExpressionSegment"), Dedent, Ref("FromClauseSegment"), Sequence( "KEYS", Indent, Ref("BracketedColumnReferenceListGrammar"), Dedent, ), Sequence("IGNORE", "NULL", "UPDATES", optional=True), Ref("WhereClauseSegment", optional=True), AnyNumberOf( Sequence( "APPLY", "AS", OneOf("DELETE", "TRUNCATE"), "WHEN", Ref("ColumnReferenceSegment"), Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), # NB: Setting max_times to allow for one instance # of DELETE and TRUNCATE at most max_times=2, ), Sequence( "SEQUENCE", "BY", Ref("ColumnReferenceSegment"), ), Sequence( "COLUMNS", OneOf( Delimited( Ref("ColumnReferenceSegment"), ), Sequence( Ref("StarSegment"), "EXCEPT", Ref("BracketedColumnReferenceListGrammar"), ), ), optional=True, ), Sequence( "STORED", "AS", "SCD", "TYPE", Ref("NumericLiteralSegment"), optional=True, ), Sequence( "TRACK", "HISTORY", "ON", OneOf( Delimited( Ref("ColumnReferenceSegment"), ), Sequence( Ref("StarSegment"), "EXCEPT", Ref("BracketedColumnReferenceListGrammar"), ), ), optional=True, ), ) class WildcardExpressionSegment(ansi.WildcardExpressionSegment): """An extension of the star expression for Databricks.""" match_grammar = ansi.WildcardExpressionSegment.match_grammar.copy( insert=[ # Optional EXCEPT clause # https://docs.databricks.com/release-notes/runtime/9.0.html#exclude-columns-in-select--public-preview Ref("ExceptClauseSegment", optional=True), ] ) class ExceptClauseSegment(BaseSegment): """SELECT * EXCEPT clause.""" type = "select_except_clause" match_grammar = Sequence( "EXCEPT", Bracketed(Delimited(Ref("ColumnReferenceSegment"))), ) class SelectClauseSegment(BaseSegment): """A group of elements in a select target statement. It's very similar to `SelectClauseSegment` from `dialect_ansi` except does not have set `SetOperatorSegment` as possible terminator - this is to avoid issues with wrongly recognized `EXCEPT`. """ type = "select_clause" match_grammar = Sequence( "SELECT", OneOf( Ref("TransformClauseSegment"), Sequence( Ref( "SelectClauseModifierSegment", optional=True, ), Indent, Delimited( Ref("SelectClauseElementSegment"), allow_trailing=True, ), ), ), Dedent, terminators=[Ref("SelectClauseTerminatorGrammar")], parse_mode=ParseMode.GREEDY_ONCE_STARTED, ) class UsingClauseSegment(BaseSegment): """`USING` clause segment.""" type = "using_clause" match_grammar = Sequence("USING", Ref("DataSourceFormatSegment")) class DataSourceFormatSegment(BaseSegment): """Data source format segment.""" type = "data_source_format" match_grammar = OneOf( Ref("FileFormatGrammar"), # NB: JDBC is part of DataSourceV2 but not included # there since there are no significant syntax changes "JDBC", Ref( "ObjectReferenceSegment" ), # This allows for formats such as org.apache.spark.sql.jdbc ) class IcebergTransformationSegment(BaseSegment): """A Transformation expressions used in PARTITIONED BY. This segment is to be used in creating hidden partitions in the iceberg table format. https://iceberg.apache.org/docs/latest/spark-ddl/#partitioned-by """ type = "iceberg_transformation" match_grammar = OneOf( Sequence( OneOf( "YEARS", "MONTHS", "DAYS", "DATE", "HOURS", "DATE_HOUR", ), Bracketed(Ref("ColumnReferenceSegment")), ), Sequence( OneOf("BUCKET", "TRUNCATE"), Bracketed( Sequence( Ref("NumericLiteralSegment"), Ref("CommaSegment"), Ref("ColumnReferenceSegment"), ) ), ), ) class FrameClauseSegment(ansi.FrameClauseSegment): """A frame clause for window functions. This overrides the ansi dialect frame clause segment as the sparksql frame clause allows for a more expressive frame syntax. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-window.html """ type = "frame_clause" _frame_extent = OneOf( Sequence("CURRENT", "ROW"), Sequence( OneOf( Ref("NumericLiteralSegment"), "UNBOUNDED", Ref("IntervalExpressionSegment"), ), OneOf("PRECEDING", "FOLLOWING"), ), ) match_grammar: Matchable = Sequence( Ref("FrameClauseUnitGrammar"), OneOf(_frame_extent, Sequence("BETWEEN", _frame_extent, "AND", _frame_extent)), ) class SetVariableStatementSegment(BaseSegment): """A `SET VARIABLE` statement used to set session variables. https://spark.apache.org/docs/4.0.0-preview2/sql-ref-syntax-aux-set-var.html """ type = "set_variable_statement" match_grammar = Sequence( "SET", OneOf( "VAR", "VARIABLE", ), OptionallyBracketed(Delimited(Ref("SingleIdentifierGrammar"))), Ref("EqualsSegment"), OneOf( "DEFAULT", OptionallyBracketed(Ref("ExpressionSegment")), ), allow_gaps=True, ) sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_sparksql_keywords.py000066400000000000000000000114611503426445100254000ustar00rootroot00000000000000"""A List of Spark SQL keywords. https://spark.apache.org/docs/latest/sql-ref-ansi-compliance.html#sql-keywords """ RESERVED_KEYWORDS = [ "ALL", "AND", "ANY", "AS", "AUTHORIZATION", "BOTH", "CASE", "CAST", "CHECK", "COLLATE", "COLUMN", "CONSTRAINT", "CREATE", "CROSS", "CURRENT_DATE", "CURRENT_TIME", "CURRENT_TIMESTAMP", "CURRENT_USER", "ELSE", "END", "ESCAPE", "EXCEPT", "FALSE", "FETCH", "FILTER", "FOR", "FOREIGN", "FROM", "FULL", "GRANT", "GROUP", "HAVING", "IN", "INNER", "INTERSECT", "INTO", "IS", "JOIN", "LEADING", "LEFT", "NATURAL", "NOT", "NULL", "ON", "ONLY", "OR", "ORDER", "OUTER", "OVERLAPS", "PRIMARY", "REFERENCES", "RIGHT", "SELECT", "SESSION_USER", "SOME", "TABLE", "THEN", "TO", "TRAILING", "UNION", "UNIQUE", "UNKNOWN", "USER", "USING", "WHEN", "WHERE", "WITH", ] UNRESERVED_KEYWORDS = [ "ADD", "AFTER", "ALTER", "ANALYZE", "ANTI", "ARCHIVE", "ARRAY", "ASC", "AT", "BERNOULLI", "BETWEEN", "BUCKET", "BUCKETS", "BY", "CACHE", "CASCADE", "CHANGE", "CLEAR", "CLUSTER", "CLUSTERED", "CODEGEN", "COLLECTION", "COLUMNS", "COMMENT", "COMMIT", "COMPACT", "COMPACTIONS", "COMPUTE", "CONCATENATE", "COST", "CUBE", "CURRENT", "DATA", "DATE", "DATE_HOUR", "DATABASE", "DATABASES", "DAY", "DAYS", "DBPROPERTIES", "DEFINED", "DELETE", "DELIMITED", "DESC", "DESCRIBE", "DFS", "DIRECTORIES", "DIRECTORY", "DISTINCT", "DISTRIBUTE", "DISTRIBUTED", "DIV", "DROP", "ESCAPED", "EXCHANGE", "EXISTS", "EXPLAIN", "EXPORT", "EXTENDED", "EXTERNAL", "EXTRACT", "FIELD", "FIELDS", "FILEFORMAT", "FIRST", "FOLLOWING", "FORMAT", "FORMATTED", "FUNCTION", "FUNCTIONS", "GLOBAL", "GROUPING", "HOUR", "HOURS", "IDENTIFIER", "IF", "IGNORE", "ILIKE", "IMPORT", "INDEX", "INDEXES", "INPATH", "INPUTFORMAT", "INSERT", "INTERVAL", "ITEMS", "KEYS", "LAST", "LAZY", "LIKE", "LIMIT", "LINES", "LIST", "LOAD", "LOCAL", "LOCALLY", "LOCATION", "LOCK", "LOCKS", "LOGICAL", "MACRO", "MAP", "MATCHED", "MERGE", "MINUTE", "MONTH", "MONTHS", "MSCK", "NAMESPACE", "NAMESPACES", "NO", "NULLS", "OF", "OPTION", "OPTIONS", "ORDERED", "OUT", "OUTPUTFORMAT", "OVER", "OVERLAY", "OVERWRITE", "PARTITION", "PARTITIONED", "PARTITIONS", "PERCENTLIT", "PIVOT", "PLACING", "POSITION", "PRECEDING", "PRINCIPALS", "PROPERTIES", "PURGE", "QUALIFY", "QUERY", "RANGE", "RECORDREADER", "RECORDWRITER", "RECOVER", "REDUCE", "REFRESH", "RENAME", "REPAIR", "REPEATABLE", "REPLACE", "RESET", "RESPECT", "RESTRICT", "REVOKE", "RLIKE", "ROLE", "ROLES", "ROLLBACK", "ROLLUP", "ROW", "ROWS", "SCHEMA", "SECOND", "SEMI", "SEPARATED", "SERDE", "SERDEPROPERTIES", "SET", "SETMINUS", "SETS", "SHOW", "SKEWED", "SORT", "SORTED", "START", "STATISTICS", "STORED", "STRATIFY", "STRUCT", "SUBSTR", "SUBSTRING", "SYNC", "SYSTEM", "TABLES", "TABLESAMPLE", "TBLPROPERTIES", "TEMP", "TEMPORARY", "TERMINATED", "TIME", "TIMESTAMP_LTZ", "TIMESTAMP_NTZ", "TOUCH", "TRANSACTION", "TRANSACTIONS", "TRANSFORM", "TRIM", "TRUE", "TRUNCATE", "TRY_CAST", "TYPE", "UNARCHIVE", "UNBOUNDED", "UNCACHE", "UNLOCK", "UNPIVOT", "UNSET", "UPDATE", "USE", "VALUES", "VAR", "VARIABLE", "VARIANT", "VIEW", "VIEWS", "WRITE", "WINDOW", "YEAR", "YEARS", "ZONE", # Spark Core Data Sources # https://spark.apache.org/docs/latest/sql-data-sources.html "AVRO", "CSV", "JSON", "PARQUET", "ORC", "JDBC", # Community Contributed Data Sources "DELTA", # https://github.com/delta-io/delta "XML", # https://github.com/databricks/spark-xml "ICEBERG", # Delta Lake "DETAIL", "DRY", "GENERATE", "HISTORY", "RETAIN", "RUN", # Databricks - Delta Live Tables "CHANGES", "DELETES", "EXPECT", "FAIL", "LIVE", "SCD", "STREAMING", "UPDATES", "VIOLATION", "TRACK", "HISTORY", # Databricks widget "WIDGET", "DROPDOWN", "TEXT", "CHOICES", "REMOVE", ] sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_sqlite.py000066400000000000000000001007001503426445100231050ustar00rootroot00000000000000"""The sqlite dialect. https://www.sqlite.org/ """ from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( AnyNumberOf, Anything, BaseSegment, Bracketed, CodeSegment, CommentSegment, Dedent, Delimited, IdentifierSegment, Indent, LiteralSegment, Matchable, NewlineSegment, Nothing, OneOf, OptionallyBracketed, ParseMode, Ref, RegexLexer, Sequence, StringLexer, StringParser, SymbolSegment, TypedParser, WhitespaceSegment, ) from sqlfluff.dialects import dialect_ansi as ansi from sqlfluff.dialects.dialect_sqlite_keywords import ( RESERVED_KEYWORDS, UNRESERVED_KEYWORDS, ) ansi_dialect = load_raw_dialect("ansi") sqlite_dialect = ansi_dialect.copy_as( "sqlite", formatted_name="SQLite", docstring="""**Default Casing**: Not specified in the docs, but through testing it appears that SQLite *stores* column names in whatever case they were defined, but is always *case-insensitive* when resolving those names. **Quotes**: String Literals: ``''`` (or ``""`` if not otherwise resolved to an identifier), Identifiers: ``""``, ``[]`` or |back_quotes|. See the `SQLite Keywords Docs`_ for more details. The dialect for `SQLite `_. .. _`SQLite Keywords Docs`: https://sqlite.org/lang_keywords.html """, ) sqlite_dialect.sets("reserved_keywords").clear() sqlite_dialect.sets("reserved_keywords").update(RESERVED_KEYWORDS) sqlite_dialect.sets("unreserved_keywords").clear() sqlite_dialect.sets("unreserved_keywords").update(UNRESERVED_KEYWORDS) sqlite_dialect.patch_lexer_matchers( [ # SQLite allows block comments to be terminated by end of input RegexLexer( "block_comment", r"\/\*([^\*]|\*(?!\/))*(\*\/|\Z)", CommentSegment, subdivider=RegexLexer( "newline", r"\r\n|\n", NewlineSegment, ), trim_post_subdivide=RegexLexer( "whitespace", r"[^\S\r\n]+", WhitespaceSegment, ), ), RegexLexer( "single_quote", r"'([^']|'')*'", CodeSegment, segment_kwargs={ "quoted_value": (r"'((?:[^']|'')*)'", 1), "escape_replacements": [(r"''", "'")], }, ), RegexLexer( "double_quote", r'"([^"]|"")*"', CodeSegment, segment_kwargs={ "quoted_value": (r'"((?:[^"]|"")*)"', 1), "escape_replacements": [(r'""', '"')], }, ), RegexLexer( "back_quote", r"`([^`]|``)*`", CodeSegment, segment_kwargs={ "quoted_value": (r"`((?:[^`]|``)*)`", 1), "escape_replacements": [(r"``", "`")], }, ), ] ) sqlite_dialect.insert_lexer_matchers( [ RegexLexer( "at_sign_literal", r"@[a-zA-Z0-9_]+", LiteralSegment, segment_kwargs={"type": "at_sign_literal"}, ), RegexLexer( "colon_literal", r":[a-zA-Z0-9_]+", LiteralSegment, segment_kwargs={"type": "colon_literal"}, ), RegexLexer( "question_literal", r"\?[0-9]+", LiteralSegment, segment_kwargs={"type": "question_literal"}, ), RegexLexer( "dollar_literal", r"\$[a-zA-Z0-9_]+", LiteralSegment, segment_kwargs={"type": "dollar_literal"}, ), ], before="question", ) sqlite_dialect.insert_lexer_matchers( [ StringLexer("inline_path_operator", "->>", CodeSegment), StringLexer("column_path_operator", "->", CodeSegment), ], before="greater_than", ) sqlite_dialect.add( BackQuotedIdentifierSegment=TypedParser( "back_quote", IdentifierSegment, type="quoted_identifier", # match ANSI's naked identifier casefold, sqlite is case-insensitive. casefold=str.upper, ), ColumnPathOperatorSegment=StringParser( "->", SymbolSegment, type="column_path_operator" ), InlinePathOperatorSegment=StringParser( "->>", SymbolSegment, type="column_path_operator" ), QuestionMarkSegment=StringParser("?", SymbolSegment, type="question_mark"), AtSignLiteralSegment=TypedParser( "at_sign_literal", LiteralSegment, type="at_sign_literal", ), ColonLiteralSegment=TypedParser( "colon_literal", LiteralSegment, type="colon_literal", ), QuestionLiteralSegment=TypedParser( "question_literal", LiteralSegment, type="question_literal", ), DollarLiteralSegment=TypedParser( "dollar_literal", LiteralSegment, type="dollar_literal", ), ) sqlite_dialect.replace( PrimaryKeyGrammar=Sequence( "PRIMARY", "KEY", OneOf("ASC", "DESC", optional=True), Ref("ConflictClauseSegment", optional=True), Sequence("AUTOINCREMENT", optional=True), ), NumericLiteralSegment=OneOf( TypedParser("numeric_literal", LiteralSegment, type="numeric_literal"), Ref("ParameterizedSegment"), ), LiteralGrammar=ansi_dialect.get_grammar("LiteralGrammar").copy( insert=[Ref("ParameterizedSegment")] ), TemporaryTransientGrammar=Ref("TemporaryGrammar"), DateTimeLiteralGrammar=Sequence( OneOf("DATE", "DATETIME"), TypedParser("single_quote", LiteralSegment, type="date_constructor_literal"), ), BaseExpressionElementGrammar=OneOf( Ref("LiteralGrammar"), Ref("BareFunctionSegment"), Ref("FunctionSegment"), Ref("ColumnReferenceSegment"), Ref("ExpressionSegment"), Sequence( Ref("DatatypeSegment"), Ref("LiteralGrammar"), ), terminators=[ Ref("CommaSegment"), Ref.keyword("AS"), ], ), AlterTableOptionsGrammar=OneOf( Sequence("RENAME", "TO", Ref("SingleIdentifierGrammar")), Sequence( "RENAME", Sequence("COLUMN", optional=True), Ref("ColumnReferenceSegment"), "TO", Ref("SingleIdentifierGrammar"), ), Sequence( "ADD", Sequence("COLUMN", optional=True), Ref("ColumnDefinitionSegment") ), Sequence( "DROP", Sequence("COLUMN", optional=True), Ref("ColumnReferenceSegment") ), ), AutoIncrementGrammar=Nothing(), CommentClauseSegment=Nothing(), IntervalExpressionSegment=Nothing(), TimeZoneGrammar=Nothing(), FetchClauseSegment=Nothing(), TrimParametersGrammar=Nothing(), LikeGrammar=Sequence("LIKE"), OverlapsClauseSegment=Nothing(), MLTableExpressionSegment=Nothing(), MergeIntoLiteralGrammar=Nothing(), SamplingExpressionSegment=Nothing(), BinaryOperatorGrammar=ansi_dialect.get_grammar("BinaryOperatorGrammar").copy( insert=[ Ref("ColumnPathOperatorSegment"), Ref("InlinePathOperatorSegment"), ] ), OrderByClauseTerminators=OneOf( "LIMIT", # For window functions "WINDOW", Ref("FrameClauseUnitGrammar"), ), WhereClauseTerminatorGrammar=OneOf( "LIMIT", Sequence("GROUP", "BY"), Sequence("ORDER", "BY"), "WINDOW", ), FromClauseTerminatorGrammar=OneOf( "WHERE", "LIMIT", Sequence("GROUP", "BY"), Sequence("ORDER", "BY"), "WINDOW", Ref("SetOperatorSegment"), Ref("WithNoSchemaBindingClauseSegment"), Ref("WithDataClauseSegment"), ), GroupByClauseTerminatorGrammar=OneOf( Sequence("ORDER", "BY"), "LIMIT", "HAVING", "WINDOW", ), PostFunctionGrammar=Sequence( Ref("FilterClauseGrammar", optional=True), Ref("OverClauseSegment", optional=True), ), IgnoreRespectNullsGrammar=Nothing(), SelectClauseTerminatorGrammar=OneOf( "FROM", "WHERE", Sequence("ORDER", "BY"), "LIMIT", Ref("SetOperatorSegment"), ), FunctionContentsGrammar=AnyNumberOf( Ref("ExpressionSegment"), # A Cast-like function Sequence(Ref("ExpressionSegment"), "AS", Ref("DatatypeSegment")), # Trim function Sequence( Ref("TrimParametersGrammar"), Ref("ExpressionSegment", optional=True, exclude=Ref.keyword("FROM")), "FROM", Ref("ExpressionSegment"), ), # An extract-like or substring-like function Sequence( OneOf(Ref("DatetimeUnitSegment"), Ref("ExpressionSegment")), "FROM", Ref("ExpressionSegment"), ), Sequence( # Allow an optional distinct keyword here. Ref.keyword("DISTINCT", optional=True), OneOf( # Most functions will be using the delimited route # but for COUNT(*) or similar we allow the star segment # here. Ref("StarSegment"), Delimited(Ref("FunctionContentsExpressionGrammar")), ), ), Ref( "OrderByClauseSegment" ), # used by string_agg (postgres), group_concat (exasol),listagg (snowflake).. # like a function call: POSITION ( 'QL' IN 'SQL') Sequence( OneOf( Ref("QuotedLiteralSegment"), Ref("SingleIdentifierGrammar"), Ref("ColumnReferenceSegment"), ), "IN", OneOf( Ref("QuotedLiteralSegment"), Ref("SingleIdentifierGrammar"), Ref("ColumnReferenceSegment"), ), ), Ref("IndexColumnDefinitionSegment"), # Raise Function contents OneOf( "IGNORE", Sequence( OneOf( "ABORT", "FAIL", "ROLLBACK", ), Ref("CommaSegment"), Ref("QuotedLiteralSegment"), ), ), ), # NOTE: This block was copy/pasted from dialect_ansi.py with these changes made: # - "PRIOR" keyword removed from Expression_A_Unary_Operator_Grammar Expression_A_Unary_Operator_Grammar=OneOf( Ref( "SignedSegmentGrammar", exclude=Sequence(Ref("QualifiedNumericLiteralSegment")), ), Ref("TildeSegment"), Ref("NotOperatorGrammar"), ), IsDistinctFromGrammar=Sequence( "IS", Ref.keyword("NOT", optional=True), Sequence("DISTINCT", "FROM", optional=True), ), NanLiteralSegment=Nothing(), PatternMatchingGrammar=Sequence( Ref.keyword("NOT", optional=True), OneOf("GLOB", "REGEXP", "MATCH"), ), SingleIdentifierGrammar=OneOf( Ref("NakedIdentifierSegment"), Ref("SingleQuotedIdentifierSegment"), Ref("QuotedIdentifierSegment"), Ref("BackQuotedIdentifierSegment"), terminators=[Ref("DotSegment")], ), # match ANSI's naked identifier casefold, sqlite is case-insensitive. QuotedIdentifierSegment=TypedParser( "double_quote", IdentifierSegment, type="quoted_identifier", casefold=str.upper ), SingleQuotedIdentifierSegment=TypedParser( "single_quote", IdentifierSegment, type="quoted_identifier", casefold=str.upper ), ColumnConstraintDefaultGrammar=Ref("ExpressionSegment"), FrameClauseUnitGrammar=OneOf("ROWS", "RANGE", "GROUPS"), ) class FrameClauseSegment(BaseSegment): """A frame clause for window functions. https://www.sqlite.org/syntax/frame-spec.html """ type = "frame_clause" match_grammar: Matchable = Sequence( Ref("FrameClauseUnitGrammar"), OneOf( Sequence("UNBOUNDED", "PRECEDING"), Sequence("CURRENT", "ROW"), Sequence(Ref("ExpressionSegment"), "PRECEDING"), Sequence( "BETWEEN", OneOf( Sequence("UNBOUNDED", "PRECEDING"), Sequence("CURRENT", "ROW"), Sequence(Ref("ExpressionSegment"), "FOLLOWING"), Sequence(Ref("ExpressionSegment"), "PRECEDING"), ), "AND", OneOf( Sequence("UNBOUNDED", "FOLLOWING"), Sequence("CURRENT", "ROW"), Sequence(Ref("ExpressionSegment"), "FOLLOWING"), Sequence(Ref("ExpressionSegment"), "PRECEDING"), ), ), ), Sequence( "EXCLUDE", OneOf( Sequence("NO", "OTHERS"), Sequence("CURRENT", "ROW"), "TIES", "GROUP" ), optional=True, ), ) class ParameterizedSegment(BaseSegment): """Sqlite allows named and argument based parameters to prevent SQL Injection. https://www.sqlite.org/c3ref/bind_blob.html """ type = "parameterized_expression" match_grammar = OneOf( Ref("AtSignLiteralSegment"), Ref("QuestionMarkSegment"), Ref("ColonLiteralSegment"), Ref("QuestionLiteralSegment"), Ref("DollarLiteralSegment"), ) class SetOperatorSegment(BaseSegment): """A set operator such as Union, Minus, Except or Intersect.""" type = "set_operator" match_grammar: Matchable = OneOf( Sequence("UNION", OneOf("DISTINCT", "ALL", optional=True)), Sequence( OneOf( "INTERSECT", "EXCEPT", ), Ref.keyword("ALL", optional=True), ), exclude=Sequence("EXCEPT", Bracketed(Anything())), ) class ColumnReferenceSegment(ansi.ColumnReferenceSegment): """A reference to column, field or alias. Also allows `column->path` and `column->>path` for JSON values. https://www.sqlite.org/json1.html#jptr """ match_grammar = ansi.ColumnReferenceSegment.match_grammar.copy( insert=[ Sequence( OneOf( ansi.ColumnReferenceSegment.match_grammar.copy(), Ref("FunctionSegment"), Ref("BareFunctionSegment"), Ref("LiteralGrammar"), ), ), ] ) class TableReferenceSegment(ansi.TableReferenceSegment): """A reference to a table. Also allows `table->path` and `table->>path` for JSON values. https://www.sqlite.org/json1.html#jptr """ match_grammar = ansi.TableReferenceSegment.match_grammar.copy( insert=[ Sequence( ansi.TableReferenceSegment.match_grammar.copy(), OneOf( Ref("ColumnPathOperatorSegment"), Ref("InlinePathOperatorSegment"), ), OneOf( Ref("LiteralGrammar"), ), ), ] ) class DatatypeSegment(ansi.DatatypeSegment): """A data type segment. Supports timestamp with(out) time zone. Doesn't currently support intervals. """ type = "data_type" match_grammar: Matchable = OneOf( Sequence( "DOUBLE", "PRECISION", ), Sequence("UNSIGNED", "BIG", "INT"), Sequence( OneOf( Sequence( OneOf("VARYING", "NATIVE"), OneOf("CHARACTER"), ), Sequence( OneOf("CHARACTER"), OneOf("VARYING", "NATIVE"), ), Ref("DatatypeIdentifierSegment"), ), Ref("BracketedArguments", optional=True), OneOf("UNSIGNED", optional=True), ), ) class TableEndClauseSegment(BaseSegment): """Support Table Options at end of tables. https://www.sqlite.org/syntax/table-options.html """ type = "table_end_clause_segment" match_grammar: Matchable = Delimited(Sequence("WITHOUT", "ROWID"), "STRICT") class ValuesClauseSegment(ansi.ValuesClauseSegment): """A `VALUES` clause like in `INSERT`.""" type = "values_clause" match_grammar: Matchable = Sequence( "VALUES", Delimited( Sequence( Bracketed( Delimited( "DEFAULT", Ref("ExpressionSegment"), ), parse_mode=ParseMode.GREEDY, ), ), ), ) class IndexColumnDefinitionSegment(BaseSegment): """A column definition for CREATE INDEX. Overridden from ANSI to allow expressions https://www.sqlite.org/expridx.html. """ type = "index_column_definition" match_grammar: Matchable = Sequence( OneOf( Ref("SingleIdentifierGrammar"), # Column name Ref("ExpressionSegment"), # Expression for simple functions ), OneOf("ASC", "DESC", optional=True), ) class ReturningClauseSegment(BaseSegment): """A returning clause. Per docs https://www.sqlite.org/lang_returning.html """ type = "returning_clause" match_grammar = Sequence( "RETURNING", Indent, Delimited( Ref("WildcardExpressionSegment"), Sequence( Ref("ExpressionSegment"), Ref("AliasExpressionSegment", optional=True), ), ), Dedent, ) class ConflictTargetSegment(BaseSegment): """An upsert conflict target. https://www.sqlite.org/lang_upsert.html """ type = "conflict_target" match_grammar = Sequence( Delimited(Ref("IndexColumnDefinitionSegment")), Sequence("WHERE", Ref("ExpressionSegment"), optional=True), ) class UpsertClauseSegment(BaseSegment): """An upsert clause. https://www.sqlite.org/lang_upsert.html """ type = "upsert_clause" match_grammar = Sequence( "ON", "CONFLICT", Ref("ConflictTargetSegment", optional=True), "DO", OneOf( "NOTHING", Sequence( "UPDATE", "SET", Delimited( Sequence( OneOf( Ref("SingleIdentifierGrammar"), Ref("BracketedColumnReferenceListGrammar"), ), Ref("EqualsSegment"), Ref("ExpressionSegment"), ), ), Sequence( "WHERE", Ref("ExpressionSegment"), optional=True, ), ), ), ) class InsertStatementSegment(BaseSegment): """An`INSERT` statement. https://www.sqlite.org/lang_insert.html """ type = "insert_statement" match_grammar = Sequence( OneOf( Sequence( "INSERT", Sequence( "OR", OneOf( "ABORT", "FAIL", "IGNORE", "REPLACE", "ROLLBACK", ), optional=True, ), ), # REPLACE is just an alias for INSERT OR REPLACE "REPLACE", ), "INTO", Ref("TableReferenceSegment"), Ref("BracketedColumnReferenceListGrammar", optional=True), OneOf( Sequence( Ref("ValuesClauseSegment"), Ref("UpsertClauseSegment", optional=True), ), Sequence( OptionallyBracketed(Ref("SelectableGrammar")), Ref("UpsertClauseSegment", optional=True), ), Ref("DefaultValuesGrammar"), ), Ref("ReturningClauseSegment", optional=True), ) class ConflictClauseSegment(BaseSegment): """A conflict clause. https://www.sqlite.org/lang_conflict.html """ type = "conflict_clause" match_grammar = Sequence( "ON", "CONFLICT", OneOf( "ROLLBACK", "ABORT", "FAIL", "IGNORE", "REPLACE", ), ) class ColumnConstraintSegment(ansi.ColumnConstraintSegment): """A column option; each CREATE TABLE column can have 0 or more. Overriding ColumnConstraintSegment to allow for additional segment parsing and to support on conflict clauses. """ match_grammar: Matchable = Sequence( Sequence( "CONSTRAINT", Ref("ObjectReferenceSegment"), # Constraint name optional=True, ), OneOf( Sequence( Ref.keyword("NOT", optional=True), "NULL", Ref("ConflictClauseSegment", optional=True), ), # NOT NULL or NULL Sequence("CHECK", Bracketed(Ref("ExpressionSegment"))), Sequence( # DEFAULT "DEFAULT", Ref("ColumnConstraintDefaultGrammar"), ), Ref("PrimaryKeyGrammar"), Sequence( Ref("UniqueKeyGrammar"), Ref("ConflictClauseSegment", optional=True) ), # UNIQUE Ref("AutoIncrementGrammar"), Ref("ReferenceDefinitionGrammar"), # REFERENCES reftable [ ( refcolumn) ]x Ref("CommentClauseSegment"), Sequence( "COLLATE", Ref("CollationReferenceSegment") ), # https://www.sqlite.org/datatype3.html#collation Sequence( Sequence("GENERATED", "ALWAYS", optional=True), "AS", Bracketed(Ref("ExpressionSegment")), OneOf("STORED", "VIRTUAL", optional=True), ), # https://www.sqlite.org/gencol.html ), OneOf("DEFERRABLE", Sequence("NOT", "DEFERRABLE"), optional=True), OneOf( Sequence("INITIALLY", "DEFERRED"), Sequence("INITIALLY", "IMMEDIATE"), optional=True, ), ) class TableConstraintSegment(ansi.TableConstraintSegment): """Overriding TableConstraintSegment to allow for additional segment parsing.""" match_grammar: Matchable = Sequence( Sequence( # [ CONSTRAINT ] "CONSTRAINT", Ref("ObjectReferenceSegment"), optional=True ), OneOf( # CHECK ( ) Sequence("CHECK", Bracketed(Ref("ExpressionSegment"))), Sequence( # UNIQUE ( column_name [, ... ] ) "UNIQUE", Ref("BracketedColumnReferenceListGrammar"), # Later add support for index_parameters? Ref("ConflictClauseSegment", optional=True), ), Sequence( # PRIMARY KEY ( column_name [, ... ] ) index_parameters Ref("PrimaryKeyGrammar"), # Columns making up PRIMARY KEY constraint Ref("BracketedColumnReferenceListGrammar"), # Later add support for index_parameters? Ref("ConflictClauseSegment", optional=True), ), Sequence( # FOREIGN KEY ( column_name [, ... ] ) # REFERENCES reftable [ ( refcolumn [, ... ] ) ] Ref("ForeignKeyGrammar"), # Local columns making up FOREIGN KEY constraint Ref("BracketedColumnReferenceListGrammar"), Ref( "ReferenceDefinitionGrammar" ), # REFERENCES reftable [ ( refcolumn) ] ), ), OneOf("DEFERRABLE", Sequence("NOT", "DEFERRABLE"), optional=True), OneOf( Sequence("INITIALLY", "DEFERRED"), Sequence("INITIALLY", "IMMEDIATE"), optional=True, ), ) class TransactionStatementSegment(ansi.TransactionStatementSegment): """A `COMMIT`, `ROLLBACK` or `TRANSACTION` statement. As per https://www.sqlite.org/lang_transaction.html """ type = "transaction_statement" match_grammar: Matchable = Sequence( OneOf("BEGIN", "COMMIT", "ROLLBACK", "END"), OneOf("TRANSACTION", optional=True), Sequence("TO", "SAVEPOINT", Ref("ObjectReferenceSegment"), optional=True), ) class PragmaReferenceSegment(ansi.ObjectReferenceSegment): """A Pragma object.""" type = "pragma_reference" class PragmaStatementSegment(BaseSegment): """A Pragma Statement. As per https://www.sqlite.org/pragma.html """ type = "pragma_statement" _pragma_value = OneOf( Ref("LiteralGrammar"), Ref("BooleanLiteralGrammar"), "YES", "NO", "ON", "OFF", "NONE", "FULL", "INCREMENTAL", "DELETE", "TRUNCATE", "PERSIST", "MEMORY", "WAL", "NORMAL", "EXCLUSIVE", "FAST", "EXTRA", "DEFAULT", "FILE", "PASSIVE", "RESTART", "RESET", ) match_grammar = Sequence( "PRAGMA", Ref("PragmaReferenceSegment"), Bracketed(_pragma_value, optional=True), Sequence( Ref("EqualsSegment"), OptionallyBracketed(_pragma_value), optional=True ), ) class CreateTriggerStatementSegment(ansi.CreateTriggerStatementSegment): """Create Trigger Statement. https://www.sqlite.org/lang_createtrigger.html """ type = "create_trigger" match_grammar: Matchable = Sequence( "CREATE", Ref("TemporaryGrammar", optional=True), "TRIGGER", Ref("IfNotExistsGrammar", optional=True), Ref("TriggerReferenceSegment"), OneOf("BEFORE", "AFTER", Sequence("INSTEAD", "OF"), optional=True), OneOf( "DELETE", "INSERT", Sequence( "UPDATE", Sequence( "OF", Delimited( Ref("ColumnReferenceSegment"), ), optional=True, ), ), ), "ON", Ref("TableReferenceSegment"), Sequence("FOR", "EACH", "ROW", optional=True), Sequence("WHEN", OptionallyBracketed(Ref("ExpressionSegment")), optional=True), "BEGIN", Delimited( Ref("UpdateStatementSegment"), Ref("InsertStatementSegment"), Ref("DeleteStatementSegment"), Ref("SelectableGrammar"), delimiter=AnyNumberOf(Ref("DelimiterGrammar"), min_times=1), allow_gaps=True, allow_trailing=True, ), "END", ) class CreateViewStatementSegment(BaseSegment): """A `CREATE VIEW` statement.""" type = "create_view_statement" # https://www.sqlite.org/lang_createview.html match_grammar: Matchable = Sequence( "CREATE", Ref("TemporaryGrammar", optional=True), "VIEW", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), # Optional list of column names Ref("BracketedColumnReferenceListGrammar", optional=True), "AS", OptionallyBracketed(Ref("SelectableGrammar")), ) class UnorderedSelectStatementSegment(BaseSegment): """A `SELECT` statement without any ORDER clauses or later. Replaces (without overriding) ANSI to remove Eager Matcher """ type = "select_statement" match_grammar = Sequence( Ref("SelectClauseSegment"), Ref("FromClauseSegment", optional=True), Ref("WhereClauseSegment", optional=True), Ref("GroupByClauseSegment", optional=True), Ref("HavingClauseSegment", optional=True), Ref("OverlapsClauseSegment", optional=True), Ref("NamedWindowSegment", optional=True), ) class DeleteStatementSegment(ansi.DeleteStatementSegment): """A `DELETE` statement. DELETE FROM
[ WHERE ] """ type = "delete_statement" # match grammar. This one makes sense in the context of knowing that it's # definitely a statement, we just don't know what type yet. match_grammar: Matchable = Sequence( "DELETE", Ref("FromClauseSegment"), Ref("WhereClauseSegment", optional=True), Ref("ReturningClauseSegment", optional=True), ) class UpdateStatementSegment(ansi.UpdateStatementSegment): """An `Update` statement. UPDATE
SET [ WHERE ] """ type = "update_statement" match_grammar: Matchable = Sequence( "UPDATE", Sequence( "OR", OneOf( "ABORT", "FAIL", "IGNORE", "REPLACE", "ROLLBACK", ), optional=True, ), Indent, Ref("TableReferenceSegment"), Ref("AliasExpressionSegment", optional=True), Dedent, Ref("SetClauseListSegment"), Ref("FromClauseSegment", optional=True), Ref("WhereClauseSegment", optional=True), Ref("ReturningClauseSegment", optional=True), ) class SetClauseSegment(ansi.SetClauseSegment): """A set clause.""" match_grammar = Sequence( OneOf( Ref("SingleIdentifierGrammar"), Ref("BracketedColumnReferenceListGrammar"), ), Ref("EqualsSegment"), Ref("ExpressionSegment"), ) class SelectStatementSegment(BaseSegment): """A `SELECT` statement. Replaces (without overriding) ANSI to remove Eager Matcher """ type = "select_statement" # Remove the Limit and Window statements from ANSI match_grammar = UnorderedSelectStatementSegment.match_grammar.copy( insert=[ Ref("OrderByClauseSegment", optional=True), Ref("FetchClauseSegment", optional=True), Ref("LimitClauseSegment", optional=True), Ref("NamedWindowSegment", optional=True), ] ) class GroupingSetsClauseSegment(ansi.GroupingSetsClauseSegment): """`GROUPING SETS` clause within the `GROUP BY` clause. This is `Nothing` for SQLite. """ match_grammar = Nothing() class CreateIndexStatementSegment(ansi.CreateIndexStatementSegment): """A `CREATE INDEX` statement. As per https://www.sqlite.org/lang_createindex.html """ type = "create_index_statement" match_grammar: Matchable = Sequence( "CREATE", Ref.keyword("UNIQUE", optional=True), "INDEX", Ref("IfNotExistsGrammar", optional=True), Ref("IndexReferenceSegment"), "ON", Ref("TableReferenceSegment"), Sequence( Bracketed( Delimited( Ref("IndexColumnDefinitionSegment"), ), ) ), Ref("WhereClauseSegment", optional=True), ) class CreateVirtualTableStatementSegment(BaseSegment): """A `CREATE VIRTUAL TABLE` statement. As per https://www.sqlite.org/lang_createvtab.html """ type = "create_virtual_table_statement" match_grammar: Matchable = Sequence( "CREATE", "VIRTUAL", "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), "USING", Ref("SingleIdentifierGrammar"), Bracketed( Delimited( OneOf( Ref("QuotedLiteralSegment"), Ref("NumericLiteralSegment"), Ref("SingleIdentifierGrammar"), ), ), optional=True, ), ) class StatementSegment(ansi.StatementSegment): """Overriding StatementSegment to allow for additional segment parsing.""" match_grammar = OneOf( Ref("AlterTableStatementSegment"), Ref("CreateIndexStatementSegment"), Ref("CreateTableStatementSegment"), Ref("CreateVirtualTableStatementSegment"), Ref("CreateTriggerStatementSegment"), Ref("CreateViewStatementSegment"), Ref("DeleteStatementSegment"), Ref("DropIndexStatementSegment"), Ref("DropTableStatementSegment"), Ref("DropTriggerStatementSegment"), Ref("DropViewStatementSegment"), Ref("ExplainStatementSegment"), Ref("InsertStatementSegment"), Ref("PragmaStatementSegment"), Ref("SelectableGrammar"), Ref("TransactionStatementSegment"), Ref("UpdateStatementSegment"), Bracketed(Ref("StatementSegment")), ) sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_sqlite_keywords.py000066400000000000000000000056131503426445100250430ustar00rootroot00000000000000"""A List of SQLite keywords. https://www.sqlite.org/lang_keywords.html Augmented with data types, and a couple of omitted keywords. """ RESERVED_KEYWORDS = [ "ABORT", "ADD", "AFTER", "ALL", "ALTER", "ALWAYS", "ANALYZE", "AND", "AS", "ASC", "ATTACH", "AUTOINCREMENT", "BEFORE", "BEGIN", "BETWEEN", "BY", "CASCADE", "CASE", "CAST", "CHECK", "COLLATE", "COLUMN", "COMMIT", "CONFLICT", "CONSTRAINT", "CREATE", "CROSS", "CURRENT", "CURRENT_DATE", "CURRENT_TIME", "CURRENT_TIMESTAMP", "DATABASE", "DEFAULT", "DEFERRABLE", "DEFERRED", "DELETE", "DESC", "DETACH", "DISTINCT", "DO", "DROP", "EACH", "ELSE", "END", "ESCAPE", "EXCEPT", "EXCLUDE", "EXCLUSIVE", "EXISTS", "EXPLAIN", "FAIL", "FILTER", "FIRST", "FOLLOWING", "FOR", "FOREIGN", "FROM", "FULL", "GENERATED", "GLOB", "GROUP", "GROUPS", "HAVING", "IF", "IGNORE", "IMMEDIATE", "IN", "INDEX", "INDEXED", "INITIALLY", "INNER", "INSERT", "INSTEAD", "INTERSECT", "INTO", "IS", "ISNULL", "JOIN", "LAST", "LEFT", "LIKE", "LIMIT", "MATCH", "MATERIALIZED", "NATURAL", "NO", "NOT", "NOTHING", "NOTNULL", "NULL", "NULLS", "OF", "OFFSET", "ON", "OR", "ORDER", "OTHERS", "OUTER", "OVER", "PARTITION", "PLAN", "PRAGMA", "PRECEDING", "PRIMARY", "QUERY", "RAISE", "RANGE", "RECURSIVE", "REFERENCES", "REGEXP", "REINDEX", "RELEASE", "RENAME", "REPLACE", "RESTRICT", "RETURNING", "RIGHT", "ROLLBACK", "ROW", "ROWS", "SAVEPOINT", "SELECT", "SET", "TABLE", "TEMP", "TEMPORARY", "THEN", "TO", "TRANSACTION", "TRIGGER", "UNBOUNDED", "UNION", "UNIQUE", "UPDATE", "USING", "VACUUM", "VALUES", "VIEW", "VIRTUAL", "WHEN", "WHERE", "WINDOW", "WITH", "WITHOUT", ] UNRESERVED_KEYWORDS = [ "KEY", "INT", "INTEGER", "TINYINT", "SMALLINT", "MEDIUMINT", "BIGINT", "UNSIGNED", "INT2", "INT8", "CHARACTER", "VARCHAR", "VARYING", "NCHAR", "NATIVE", "NVARCHAR", "TIES", "TEXT", "CLOB", "BLOB", "REAL", "BIG", "DOUBLE", "PRECISION", "FLOAT", "NUMERIC", "DECIMAL", "BOOLEAN", "DATE", "DATETIME", "ROWID", "YES", "OFF", "NONE", "INCREMENTAL", "TRUNCATE", "PERSIST", "MEMORY", "WAL", "NORMAL", "FAST", "EXTRA", "FILE", "PASSIVE", "RESTART", "RESET", "STRICT", "BINARY", "NOCASE", "RTRIM", "STORED", # https://sqlite.org/forum/forumpost/91127ba3db "ACTION", ] sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_starrocks.py000066400000000000000000000311401503426445100236200ustar00rootroot00000000000000"""The StarRocks dialect. This dialect extends MySQL grammar with specific StarRocks syntax features. """ from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( BaseSegment, Bracketed, CodeSegment, Delimited, Matchable, MultiStringParser, OneOf, OptionallyBracketed, Ref, SegmentGenerator, Sequence, ) from sqlfluff.dialects import dialect_mysql as mysql from sqlfluff.dialects.dialect_starrocks_keywords import ( starrocks_reserved_keywords, starrocks_unreserved_keywords, ) mysql_dialect = load_raw_dialect("mysql") starrocks_dialect = mysql_dialect.copy_as( "starrocks", formatted_name="StarRocks", docstring="""**Default Casing**: ``lowercase`` **Quotes**: String Literals: ``''``, ``"``, Identifiers: |back_quotes|. The dialect for `StarRocks `_.""", ) starrocks_dialect.update_keywords_set_from_multiline_string( "unreserved_keywords", starrocks_unreserved_keywords ) starrocks_dialect.sets("reserved_keywords").clear() starrocks_dialect.update_keywords_set_from_multiline_string( "reserved_keywords", starrocks_reserved_keywords ) # Add the engine types set starrocks_dialect.sets("engine_types").update( ["olap", "mysql", "elasticsearch", "hive", "hudi", "iceberg", "jdbc"] ) starrocks_dialect.add( EngineTypeSegment=SegmentGenerator( lambda dialect: MultiStringParser( dialect.sets("engine_types"), CodeSegment, type="engine_type", ) ), ) class CreateTableStatementSegment(mysql.CreateTableStatementSegment): """A `CREATE TABLE` statement. StarRocks-specific version that handles: - Different ENGINE types - Storage and bucketing options - Specific partition syntax - Specific table properties """ type = "create_table_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Ref.keyword("EXTERNAL", optional=True), Ref.keyword("TEMPORARY", optional=True), "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), OneOf( # Standard column definitions Sequence( Bracketed( Delimited( OneOf( Ref("TableConstraintSegment"), Ref("ColumnDefinitionSegment"), Ref("IndexDefinitionSegment"), ), ) ), # StarRocks specific Sequence( "ENGINE", Ref("EqualsSegment"), Ref("EngineTypeSegment"), optional=True, ), # Key type Sequence( OneOf( Sequence("AGGREGATE", "KEY"), Sequence("UNIQUE", "KEY"), Sequence("PRIMARY", "KEY"), Sequence("DUPLICATE", "KEY"), ), Bracketed(Delimited(Ref("ColumnReferenceSegment"))), optional=True, ), Ref("CommentClauseSegment", optional=True), # Partitioning Ref("PartitionSegment", optional=True), # Distribution Ref("DistributionSegment", optional=True), # Order by Sequence( "ORDER", "BY", Bracketed(Delimited(Ref("ColumnReferenceSegment"))), optional=True, ), Sequence( "PROPERTIES", Bracketed( Delimited( Sequence( Ref("QuotedLiteralSegment"), Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ) ) ), optional=True, ), ), # Create table like Sequence("LIKE", Ref("TableReferenceSegment")), # Create table as Sequence( Ref.keyword("AS", optional=True), OptionallyBracketed(Ref("SelectableGrammar")), ), ), ) class ColumnConstraintSegment(mysql.ColumnConstraintSegment): """A column option; each CREATE TABLE column can have 0 or more.""" match_grammar: Matchable = OneOf( mysql.ColumnConstraintSegment.match_grammar, Sequence("AS", Ref("ExpressionSegment")), ) class PartitionSegment(BaseSegment): """A partition segment supporting StarRocks specific syntax. Supports three types of partitioning: 1. Range partitioning (PARTITION BY RANGE) 2. Expression partitioning using time functions (date_trunc/time_slice) 3. Expression partitioning using column expressions """ type = "partition_segment" match_grammar = Sequence( "PARTITION", "BY", OneOf( # Range partitioning Sequence( "RANGE", Bracketed(Delimited(Ref("ColumnReferenceSegment"))), OneOf( # Fixed partitions Bracketed( Delimited( Sequence( "PARTITION", Ref("ObjectReferenceSegment"), "VALUES", OneOf( # LESS THAN syntax Sequence( "LESS", "THAN", OneOf( "MAXVALUE", Bracketed(Delimited(Ref("LiteralGrammar"))), ), ), # Fixed range syntax Sequence( Bracketed( Bracketed(Delimited(Ref("LiteralGrammar"))), ",", Bracketed(Delimited(Ref("LiteralGrammar"))), ) ), ), ) ) ), # Dynamic partitions Bracketed( Sequence( "START", Bracketed(Ref("QuotedLiteralSegment")), "END", Bracketed(Ref("QuotedLiteralSegment")), "EVERY", Bracketed( OneOf( Ref("QuotedLiteralSegment"), Sequence( "INTERVAL", Ref("NumericLiteralSegment"), OneOf("YEAR", "MONTH", "DAY", "HOUR"), ), ) ), ) ), ), ), # Expression partitioning - time function expressions Ref("FunctionSegment"), # Expression partitioning - column expressions Bracketed(Delimited(Ref("ColumnReferenceSegment"))), ), ) class DistributionSegment(BaseSegment): """A distribution segment supporting both hash and random distribution.""" type = "distribution_segment" match_grammar = Sequence( "DISTRIBUTED", "BY", OneOf( Sequence( "HASH", Bracketed(Delimited(Ref("ColumnReferenceSegment"))), Sequence("BUCKETS", Ref("NumericLiteralSegment"), optional=True), ), Sequence( "RANDOM", Sequence("BUCKETS", Ref("NumericLiteralSegment"), optional=True), ), ), ) class IndexDefinitionSegment(BaseSegment): """Bitmap index definition specific to StarRocks.""" type = "index_definition" match_grammar = Sequence( "INDEX", Ref("IndexReferenceSegment"), Bracketed(Delimited(Ref("ColumnReferenceSegment"))), Sequence("USING", "BITMAP", optional=True), Sequence("COMMENT", Ref("QuotedLiteralSegment"), optional=True), ) class CreateRoutineLoadStatementSegment(BaseSegment): """A `CREATE ROUTINE LOAD` statement for StarRocks.""" type = "create_routine_load_statement" match_grammar = Sequence( "CREATE", "ROUTINE", "LOAD", Ref("ObjectReferenceSegment"), # job_name "ON", Ref("TableReferenceSegment"), # table_name # Column definitions Sequence( "COLUMNS", Bracketed( Delimited( OneOf( Ref("QuotedIdentifierSegment"), Ref("NakedIdentifierSegment"), Sequence( OneOf( Ref("QuotedIdentifierSegment"), Ref("NakedIdentifierSegment"), ), Ref("EqualsSegment"), Ref("ExpressionSegment"), ), ) ) ), optional=True, ), # Properties section using dedicated properties segment Sequence( "PROPERTIES", Bracketed(Delimited(Ref("CreateRoutineLoadPropertiesSegment"))), optional=True, ), # Data Source section using dedicated data source properties segment "FROM", "KAFKA", Bracketed(Delimited(Ref("CreateRoutineLoadDataSourcePropertiesSegment"))), ) class CreateRoutineLoadPropertiesSegment(BaseSegment): """Properties segment for CREATE ROUTINE LOAD statement.""" type = "routine_load_properties" match_grammar = Sequence( Ref("QuotedLiteralSegment"), Ref("EqualsSegment"), Ref("QuotedLiteralSegment") ) class CreateRoutineLoadDataSourcePropertiesSegment(BaseSegment): """Data source properties segment for CREATE ROUTINE LOAD statement.""" type = "routine_load_data_source_properties" match_grammar = Sequence( Ref("QuotedLiteralSegment"), Ref("EqualsSegment"), Ref("QuotedLiteralSegment") ) """Grammar for STOP ROUTINE LOAD statement in StarRocks.""" class StopRoutineLoadStatementSegment(BaseSegment): """A `STOP ROUTINE LOAD` statement. Stops a running routine load job. STOP ROUTINE LOAD FOR [db_name.] """ type = "stop_routine_load_statement" match_grammar = Sequence( "STOP", "ROUTINE", "LOAD", "FOR", OneOf( # db_name.job_name format Sequence( Ref("DatabaseReferenceSegment"), Ref("DotSegment"), Ref("ObjectReferenceSegment"), ), # job_name only format Ref("ObjectReferenceSegment"), ), ) class PauseRoutineLoadStatementSegment(BaseSegment): """A `PAUSE ROUTINE LOAD` statement. Pauses a running routine load job. """ type = "pause_routine_load_statement" match_grammar = Sequence( "PAUSE", "ROUTINE", "LOAD", "FOR", Ref("ObjectReferenceSegment"), ) class ResumeRoutineLoadStatementSegment(BaseSegment): """A `RESUME ROUTINE LOAD` statement. Resumes a paused routine load job. """ type = "resume_routine_load_statement" match_grammar = Sequence( "RESUME", "ROUTINE", "LOAD", "FOR", Ref("ObjectReferenceSegment"), ) class StatementSegment(mysql.StatementSegment): """Overriding StatementSegment to allow for additional segment parsing.""" match_grammar = mysql.StatementSegment.match_grammar.copy( insert=[ Ref("CreateRoutineLoadStatementSegment"), Ref("StopRoutineLoadStatementSegment"), Ref("PauseRoutineLoadStatementSegment"), Ref("ResumeRoutineLoadStatementSegment"), ] ) sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_starrocks_keywords.py000066400000000000000000000147301503426445100255550ustar00rootroot00000000000000"""A List of MariaDB SQL keywords. The full list can be queried from MariaDB directly https://mariadb.com/kb/en/information-schema-keywords-table/ The reserved keywords are listed on https://mariadb.com/kb/en/reserved-words/ """ starrocks_reserved_keywords = """ ACCESSIBLE ADD ALL ALTER ANALYZE AND AS ASC ASENSITIVE BEFORE BETWEEN BIGINT BINARY BLOB BOTH BY CALL CASCADE CASE CHANGE CHANGED_PAGE_BITMAPS CHAR CHARACTER CHECK CLIENT_STATISTICS COLLATE COLUMN CONDITION CONSTRAINT CONTINUE CONVERT CREATE CROSS CURRENT_DATE CURRENT_ROLE CURRENT_TIME CURRENT_TIMESTAMP CURRENT_USER CURSOR DATABASE DATABASES DATE_TRUNC DAY_HOUR DAY_MICROSECOND DAY_MINUTE DAY_SECOND DEC DECIMAL DECLARE DEFAULT DELAYED DELETE DELETE_DOMAIN_ID DESC DESCRIBE DETERMINISTIC DISTINCT DISTINCTROW DIV DO_DOMAIN_IDS DOUBLE DROP DUAL EACH ELSE ELSEIF ENCLOSED ESCAPED EXCEPT EXISTS EXIT EXPLAIN FALSE FETCH FLOAT FLOAT4 FLOAT8 FOR FORCE FOREIGN FROM FULLTEXT GENERAL GRANT GROUP HAVING HIGH_PRIORITY HOUR_MICROSECOND HOUR_MINUTE HOUR_SECOND IF IGNORE IGNORE_DOMAIN_IDS IGNORE_SERVER_IDS IN INDEX INDEX_STATISTICS INFILE INNER INOUT INSENSITIVE INSERT INT INT1 INT2 INT3 INT4 INT8 INTEGER INTERSECT INTERVAL INTO IS ITERATE JOIN KEY KEYS KILL LEADING LEAVE LEFT LIKE LIMIT LINEAR LINES LOAD LOCALTIME LOCALTIMESTAMP LOCK LONG LONGBLOB LONGTEXT LOOP LOW_PRIORITY MASTER_HEARTBEAT_PERIOD MASTER_SSL_VERIFY_SERVER_CERT MATCH MAXVALUE MEDIUMBLOB MEDIUMINT MEDIUMTEXT MIDDLEINT MINUTE_MICROSECOND MINUTE_SECOND MOD MODIFIES NATURAL NOT NO_WRITE_TO_BINLOG NULL NUMERIC OFFSET OLAP ON OPTIMIZE OPTION OPTIONALLY OR ORDER OUT OUTER OUTFILE OVER PAGE_CHECKSUM PARSE_VCOL_EXPR PARTITION PRECISION PRIMARY PROCEDURE PROPERTIES PURGE RANGE READ READS READ_WRITE REAL RECURSIVE REF_SYSTEM_ID REFERENCES REGEXP RELEASE RENAME REPEAT REPLACE REQUIRE RESIGNAL RESTRICT RETURN RETURNING REVOKE RIGHT RLIKE ROW_NUMBER ROWS SCHEMA SCHEMAS SECOND_MICROSECOND SELECT SENSITIVE SEPARATOR SET SHOW SIGNAL SLOW SMALLINT SPATIAL SPECIFIC SQL SQLEXCEPTION SQLSTATE SQLWARNING SQL_BIG_RESULT SQL_CALC_FOUND_ROWS SQL_SMALL_RESULT SSL STARTING STATS_AUTO_RECALC STATS_PERSISTENT STATS_SAMPLE_PAGES STRAIGHT_JOIN TABLE TERMINATED THEN TIME_SLICE TINYBLOB TINYINT TINYTEXT TO TRAILING TRIGGER TRUE UNDO UNION UNIQUE UNLOCK UNSIGNED UPDATE USAGE USE USING UTC_DATE UTC_TIME UTC_TIMESTAMP VALUES VARBINARY VARCHAR VARCHARACTER VARYING WHEN WHERE WHILE WINDOW WITH WRITE XOR YEAR_MONTH ZEROFILL """ starrocks_unreserved_keywords = """ ACCOUNT ACTION ADMIN AFTER AGAINST AGGREGATE ALGORITHM ALWAYS ANY ASCII AT ATOMIC AUTHORS AUTO AUTO_INCREMENT AUTOEXTEND_SIZE AVG AVG_ROW_LENGTH BACKUP BEGIN BINLOG BIT BLOCK BODY BOOL BOOLEAN BTREE BYTE CACHE CASCADED CATALOG_NAME CHAIN CHANGED CHANNEL CHARSET CHECKPOINT CHECKSUM CIPHER CLASS_ORIGIN CLIENT CLOB CLOSE COALESCE CODE COLLATION COLUMN_NAME COLUMNS COLUMN_ADD COLUMN_CHECK COLUMN_CREATE COLUMN_DELETE COLUMN_GET COMMENT COMMIT COMMITTED COMPACT COMPLETION COMPRESSED CONCURRENT CONNECTION CONSISTENT CONSTRAINT_CATALOG CONSTRAINT_NAME CONSTRAINT_SCHEMA CONTAINS CONTEXT CONTRIBUTORS CPU CUBE CURRENT CURRENT_POS CURSOR_NAME CYCLE DATA DATAFILE DATE DATETIME DAY DEALLOCATE DEFINER DELAY_KEY_WRITE DES_KEY_FILE DIAGNOSTICS DIRECTORY DISABLE DISCARD DISK DO DUMPFILE DUPLICATE DYNAMIC ELSIF EMPTY ENABLE END ENDS ENGINE ENGINES ENUM ERROR ERRORS ESCAPE EVENT EVENTS EVERY EXAMINED EXCHANGE EXCLUDE EXECUTE EXCEPTION EXPANSION EXPIRE EXPORT EXTENDED EXTENT_SIZE FAST FAULTS FEDERATED FIELDS FILE FIRST FIXED FLUSH FOLLOWING FOLLOWS FORMAT FOUND FULL FUNCTION GENERATED GET_FORMAT GET GLOBAL GOTO GRANTS HANDLER HARD HASH HELP HISTORY HOST HOSTS HOUR ID IDENTIFIED IGNORED IMMEDIATE IMPORT INCREMENT INDEXES INITIAL_SIZE INSERT_METHOD INSTALL INVISIBLE IO IO_THREAD IPC ISOLATION ISOPEN ISSUER INVOKER JSON JSON_TABLE KAFKA KEY_BLOCK_SIZE LANGUAGE LAST LAST_VALUE LASTVAL LEAVES LESS LEVEL LIST LOCAL LOCKED LOCKS LOGFILE LOGS MASTER MASTER_CONNECT_RETRY MASTER_DELAY MASTER_GTID_POS MASTER_HOST MASTER_LOG_FILE MASTER_LOG_POS MASTER_PASSWORD MASTER_PORT MASTER_SERVER_ID MASTER_SSL MASTER_SSL_CA MASTER_SSL_CAPATH MASTER_SSL_CERT MASTER_SSL_CIPHER MASTER_SSL_CRL MASTER_SSL_CRLPATH MASTER_SSL_KEY MASTER_USER MASTER_USE_GTID MASTER_DEMOTE_TO_SLAVE MAX_CONNECTIONS_PER_HOUR MAX_QUERIES_PER_HOUR MAX_ROWS MAX_SIZE MAX_STATEMENT_TIME MAX_UPDATES_PER_HOUR MAX_USER_CONNECTIONS MEDIUM MEMORY MERGE MESSAGE_TEXT MICROSECOND MIGRATE MINUS MINUTE MINVALUE MIN_ROWS MODE MODIFY MONITOR MONTH MUTEX MYSQL MYSQL_ERRNO NAME NAMES NATIONAL NCHAR NESTED NEVER NEXT NEXTVAL NO NOMAXVALUE NOMINVALUE NOCACHE NOCYCLE NO_WAIT NOWAIT NODEGROUP NONE NOTFOUND NUMBER NVARCHAR OF OLD_PASSWORD ONE ONLINE ONLY OPEN OPTIONS ORDINALITY OTHERS OVERLAPS OWNER PACKAGE PACK_KEYS PAGE PARSER PATH PERIOD PAUSE PARTIAL PARTITIONING PARTITIONS PASSWORD PERSISTENT PHASE PLUGIN PLUGINS PORT PORTION PRECEDES PRECEDING PREPARE PRESERVE PREV PREVIOUS PRIVILEGES PROCESS PROCESSLIST PROFILE PROFILES PROXY QUARTER QUERY QUERY_RESPONSE_TIME QUICK RAISE RAW READ_ONLY REBUILD RECOVER REDO_BUFFER_SIZE REDOFILE REDUNDANT RELAY RELAYLOG RELAY_LOG_FILE RELAY_LOG_POS RELAY_THREAD RELOAD REMOVE REORGANIZE REPAIR REPEATABLE REPLAY REPLICA REPLICAS REPLICA_POS REPLICATION RESET RESTART RESTORE RESUME RETURNED_SQLSTATE RETURNS REUSE REVERSE ROLE ROLLBACK ROLLUP ROUTINE ROW ROWCOUNT ROWNUM ROWTYPE ROW_COUNT ROW_FORMAT RTREE SAVEPOINT SCHEDULE SCHEMA_NAME SECOND SECURITY SEQUENCE SERIAL SERIALIZABLE SESSION SERVER SETVAL SHARE SHUTDOWN SIGNED SIMPLE SKIP SLAVE SLAVES SLAVE_POS SNAPSHOT SOCKET SOFT SOME SONAME SOUNDS SOURCE STAGE STORED SQL_AFTER_GTIDS SQL_BEFORE_GTIDS SQL_BUFFER_RESULT SQL_CACHE SQL_NO_CACHE SQL_THREAD SQL_TSI_SECOND SQL_TSI_MINUTE SQL_TSI_HOUR SQL_TSI_DAY SQL_TSI_WEEK SQL_TSI_MONTH SQL_TSI_QUARTER SQL_TSI_YEAR START STARTS STATEMENT STATUS STOP STORAGE STRING SUBCLASS_ORIGIN SUBJECT SUBPARTITION SUBPARTITIONS SUPER SUSPEND SWAPS SWITCHES SYSDATE SYSTEM SYSTEM_TIME TABLE_NAME TABLES TABLESPACE TABLE_CHECKSUM TABLE_STATISTICS TEMPORARY TEMPTABLE TEXT THAN TIES TIME TIMESTAMP TIMESTAMPADD TIMESTAMPDIFF TRANSACTION TRANSACTIONAL THREADS TRIGGERS TRUNCATE TYPE UNBOUNDED UNCOMMITTED UNDEFINED UNDO_BUFFER_SIZE UNDOFILE UNICODE UNKNOWN UNINSTALL UNTIL UPGRADE USER USER_RESOURCES USER_STATISTICS USER_VARIABLES USE_FRM VALIDATION VALUE VARCHAR2 VARIABLES VIA VIEW VIRTUAL VISIBLE VERSIONING WAIT WARNINGS WEEK WEIGHT_STRING WITHIN WITHOUT WORK WRAPPER X509 XA XML YEAR """ # These are not MariaDB keywords, but needed to parse well. # Taken from the mysql dialect starrocks_unreserved_keywords += """ NOW SHARED INPLACE NOCOPY INSTANT """ sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_teradata.py000066400000000000000000000750221503426445100234010ustar00rootroot00000000000000"""The Teradata dialect. This inherits from the ansi dialect, with changes as specified by Teradata Database SQL Data Definition Language Syntax and Examples Release Number 15.10 Release Date December 2015 """ from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( AnyNumberOf, AnySetOf, Anything, BaseSegment, Bracketed, CodeSegment, ComparisonOperatorSegment, CompositeComparisonOperatorSegment, Dedent, Delimited, IdentifierSegment, ImplicitIndent, Indent, Matchable, OneOf, OptionallyBracketed, Ref, RegexLexer, Sequence, StringParser, TypedParser, ) from sqlfluff.dialects import dialect_ansi as ansi ansi_dialect = load_raw_dialect("ansi") teradata_dialect = ansi_dialect.copy_as( "teradata", formatted_name="Teradata", docstring="""The dialect for `Teradata `_.""", ) teradata_dialect.patch_lexer_matchers( [ # so it also matches 1. RegexLexer( "numeric_literal", r"([0-9]+(\.[0-9]*)?)", CodeSegment, ), ] ) # Remove unused keywords from the dialect. teradata_dialect.sets("unreserved_keywords").difference_update( [ # 'auto_increment', # The following are moved to being reserved keywords "UNION", "TIMESTAMP", ] ) teradata_dialect.sets("unreserved_keywords").update( [ "AUTOINCREMENT", "ACTIVITYCOUNT", "CASESPECIFIC", "CS", "DAYS", "DEL", "DUAL", "ERRORCODE", "EXCL", "EXPORT", "FALLBACK", "FORMAT", "HASH", "IMPORT", "JOURNAL", "LABEL", "LOGON", "LOGOFF", "MACRO", "MAXINTERVALS", "MAXVALUELENGTH", "MEETS", "MERGEBLOCKRATIO", "NONE", "OVERRIDE", "PERCENT", "PROFILE", "PROTECTION", "QUERY_BAND", "QUIT", "RUN", "SAMPLE", "SEL", "SS", "STAT", "STATS", "STATISTICS", "SUMMARY", "THRESHOLD", "UC", "UPPERCASE", ] ) teradata_dialect.sets("reserved_keywords").update( ["LOCKING", "UNION", "REPLACE", "TIMESTAMP"] ) teradata_dialect.sets("bare_functions").update(["DATE"]) teradata_dialect.replace( # ANSI standard comparison operators plus Teradata extensions ComparisonOperatorGrammar=OneOf( Ref("EqualsSegment"), Ref("EqualsSegment_a"), Ref("GreaterThanSegment"), Ref("GreaterThanSegment_a"), Ref("LessThanSegment"), Ref("LessThanSegment_a"), Ref("GreaterThanOrEqualToSegment"), Ref("GreaterThanOrEqualToSegment_a"), Ref("LessThanOrEqualToSegment"), Ref("LessThanOrEqualToSegment_a"), Ref("NotEqualToSegment"), Ref("NotEqualToSegment_a"), Ref("NotEqualToSegment_b"), Ref("NotEqualToSegment_c"), Ref("LikeOperatorSegment"), Sequence("IS", "DISTINCT", "FROM"), Sequence("IS", "NOT", "DISTINCT", "FROM"), ), # match ANSI's naked identifier casefold, teradata is case-insensitive. QuotedIdentifierSegment=TypedParser( "double_quote", IdentifierSegment, type="quoted_identifier", casefold=str.upper ), ) teradata_dialect.add( # Add Teradata comparison operator extensions EqualsSegment_a=StringParser("EQ", ComparisonOperatorSegment), GreaterThanSegment_a=StringParser("GT", ComparisonOperatorSegment), LessThanSegment_a=StringParser("LT", ComparisonOperatorSegment), GreaterThanOrEqualToSegment_a=StringParser("GE", ComparisonOperatorSegment), LessThanOrEqualToSegment_a=StringParser("LE", ComparisonOperatorSegment), NotEqualToSegment_a=StringParser("NE", ComparisonOperatorSegment), NotEqualToSegment_b=StringParser("NOT=", ComparisonOperatorSegment), NotEqualToSegment_c=StringParser("^=", ComparisonOperatorSegment), ) # BTEQ statement class BteqKeyWordSegment(BaseSegment): """Bteq Keywords. Often a string with a dot, sometimes followed by a Literal LOGON - Used to log into Teradata system. ACTIVITYCOUNT - Returns the number of rows affected by the previous query. ERRORCODE - Returns the status code of the previous query. DATABASE - Sets the default database. LABEL - Assigns a label to a set of SQL commands. RUN FILE - Executes the query contained in a file. GOTO - Transfers control to a label. LOGOFF - Logs off from database and terminates all sessions. IMPORT - Specifies the input file path. EXPORT - Specifies the output file path and initiates the export. """ type = "bteq_key_word_segment" match_grammar = Sequence( Ref("DotSegment", optional=True), OneOf( "IF", "THEN", "LOGON", "ACTIVITYCOUNT", "ERRORCODE", "DATABASE", "LABEL", "GOTO", "LOGOFF", "IMPORT", "EXPORT", "RUN", "QUIT", "ACTIVITYCOUNT", ), Ref("LiteralGrammar", optional=True), ) class BteqStatementSegment(BaseSegment): """Bteq statements start with a dot, followed by a Keyword. Non exhaustive and maybe catching too many statements? # BTEQ commands .if errorcode > 0 then .quit 2 .IF ACTIVITYCOUNT = 0 THEN .QUIT """ type = "bteq_statement" match_grammar = Sequence( Ref("DotSegment"), Ref("BteqKeyWordSegment"), AnyNumberOf( Ref("BteqKeyWordSegment"), # if ... then: the ... Sequence( Ref("ComparisonOperatorGrammar"), Ref("LiteralGrammar"), optional=True ), optional=True, ), ) class TdCollectStatUsingOptionClauseSegment(BaseSegment): """'using_option' for COLLECT STAT clause.""" type = "collect_stat_using_option_clause" match_grammar = Sequence( OneOf( Sequence("SAMPLE", Ref("NumericLiteralSegment"), "PERCENT"), Sequence("SYSTEM", "THRESHOLD", OneOf("PERCENT", "DAYS", optional=True)), Sequence("SYSTEM", "SAMPLE"), Sequence( "THRESHOLD", Ref("NumericLiteralSegment"), OneOf("PERCENT", "DAYS"), ), Sequence("NO", "THRESHOLD", OneOf("PERCENT", "DAYS", optional=True)), Sequence("NO", "SAMPLE"), Sequence("MAXINTERVALS", Ref("NumericLiteralSegment")), Sequence("SYSTEM", "MAXINTERVALS"), Sequence("MAXVALUELENGTH", Ref("NumericLiteralSegment")), Sequence("SYSTEM", "MAXVALUELENGTH"), "SAMPLE", ), Sequence("FOR", "CURRENT", optional=True), ) class TdOrderByStatClauseSegment(BaseSegment): """An `ORDER BY (VALUES|HASH) (column_name)` clause in COLLECT STATS.""" type = "stat_orderby_clause" match_grammar = Sequence( "ORDER", "BY", OneOf("VALUES", "HASH"), Bracketed(Ref("ColumnReferenceSegment")) ) # Collect Statistics statement class TdCollectStatisticsStatementSegment(BaseSegment): """A `COLLECT STATISTICS (Optimizer Form)` statement. # TODO: add expression COLLECT [SUMMARY] (STATISTICS|STAT) [[COLUMN| [UNIQUE] INDEX] (expression (, expression ...)] ON TABLENAME [[COLUMN] (expression, ...)] """ type = "collect_statistics_statement" match_grammar = Sequence( "COLLECT", Ref.keyword("SUMMARY", optional=True), OneOf("STAT", "STATS", "STATISTICS"), Sequence( "USING", Delimited( Ref("TdCollectStatUsingOptionClauseSegment"), delimiter="AND", ), optional=True, ), Delimited( OneOf( # UNIQUE INDEX index_name ALL (column_name, ...) ORDER BY VALUES|HASH # (column_name) Sequence( Ref.keyword("UNIQUE", optional=True), "INDEX", Ref("IndexReferenceSegment", optional=True), Ref.keyword("ALL", optional=True), Bracketed(Delimited(Ref("ColumnReferenceSegment"))), Ref("TdOrderByStatClauseSegment", optional=True), ), # UNIQUE INDEX index_name Sequence( Ref.keyword("UNIQUE", optional=True), "INDEX", Ref("IndexReferenceSegment"), ), # COLUMN ... Sequence( "COLUMN", OptionallyBracketed( Delimited( OneOf( Ref("ColumnReferenceSegment"), Ref.keyword("PARTITION"), # TODO: expression ), ), ), Sequence( Ref.keyword("AS", optional=True), Ref("ObjectReferenceSegment"), # statistics_name optional=True, ), ), ), optional=True, ), "ON", Ref.keyword("TEMPORARY", optional=True), Ref("TableReferenceSegment"), Sequence( "COLUMN", OptionallyBracketed( Delimited( OneOf( Ref("ColumnReferenceSegment"), "PARTITION", # TODO: expression ), ), ), Sequence( Ref.keyword("AS", optional=True), Ref("ObjectReferenceSegment"), # statistics_name optional=True, ), optional=True, ), ) class TdCommentStatementSegment(BaseSegment): """A `COMMENT` statement. COMMENT [ON] (object_kind_1|object_kind_2) name [[AS|IS] comment] object_kind_1: (COLUMN|FUNCTION|GLOP SET|MACRO|MAP|METHOD|PROCEDURE|PROFILE|ROLE| TRIGGER|TYPE|VIEW) object_kind_2: (DATABASE|FILE|TABLE|USER) """ type = "comment_clause" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "COMMENT", OneOf("ON", optional=True), OneOf( Sequence("COLUMN", Ref("ColumnReferenceSegment")), Sequence("FUNCTION", Ref("ObjectReferenceSegment")), Sequence("MACRO", Ref("ObjectReferenceSegment")), Sequence("MAP", Ref("ObjectReferenceSegment")), Sequence("METHOD", Ref("ObjectReferenceSegment")), Sequence("PROCEDURE", Ref("ObjectReferenceSegment")), Sequence("PROFILE", Ref("ObjectReferenceSegment")), Sequence("ROLE", Ref("ObjectReferenceSegment")), Sequence("TRIGGER", Ref("ObjectReferenceSegment")), Sequence("TYPE", Ref("ObjectReferenceSegment")), Sequence("VIEW", Ref("TableReferenceSegment")), Sequence("DATABASE", Ref("DatabaseReferenceSegment")), Sequence("FILE", Ref("ObjectReferenceSegment")), Sequence("TABLE", Ref("TableReferenceSegment")), Sequence("USER", Ref("ObjectReferenceSegment")), ), Sequence( OneOf("AS", "IS", optional=True), Ref("QuotedLiteralSegment"), optional=True, ), ) # Rename table statement class TdRenameStatementSegment(BaseSegment): """A `RENAME TABLE` statement. https://docs.teradata.com/reader/eWpPpcMoLGQcZEoyt5AjEg/Kl~F4lxPauOELYJVuFLjag RENAME TABLE OLD_TABLENAME (TO|AS) NEW_TABLENAME """ type = "rename_table_statement" match_grammar = Sequence( "RENAME", "TABLE", Ref("TableReferenceSegment"), OneOf( "TO", "AS", ), Ref("TableReferenceSegment"), ) # Adding Teradata specific DATE FORMAT 'YYYYMM' class DatatypeSegment(ansi.DatatypeSegment): """A data type segment. DATE FORMAT 'YYYY-MM-DD' """ match_grammar = Sequence( Ref("DatatypeIdentifierSegment"), Ref("BracketedArguments", optional=True), Bracketed( OneOf( Delimited(Ref("ExpressionSegment")), # The brackets might be empty for some cases... optional=True, ), # There may be no brackets for some data types optional=True, ), Sequence( # FORMAT 'YYYY-MM-DD', "FORMAT", Ref("QuotedLiteralSegment"), optional=True ), ) class TeradataCastSegment(BaseSegment): """A casting operation using Teradata conversion syntax. https://docs.teradata.com/reader/kmuOwjp1zEYg98JsB8fu_A/ypGGhd87xi3E2E7SlNS1Xg # Teradata Conversion Syntax in Explicit Data Type Conversions expression ([data_attribute,] data_type [, data_attribute]) with data_type := a data type declaration such as INTEGER or DATE data_attribute := a data attribute such as FORMAT, NAMED or TITLE e.g. '9999-12-31' (DATE), '9999-12-31' (DATE FORMAT 'YYYY-MM-DD') '100000' (SMALLINT) DATE FORMAT 'E4,BM4BDD,BY4' DATE '2007-01-01' """ type = "cast_expression" match_grammar = Bracketed(Ref("DatatypeSegment")) class ExpressionSegment(BaseSegment): """A expression, either arithmetic or boolean. We extend the expression segment in teradata to enable casting. """ type = "expression" match_grammar = Sequence( Ref("Expression_A_Grammar"), Ref("TeradataCastSegment", optional=True), ) # Adding Teradata specific column definitions class ColumnDefinitionSegment(BaseSegment): """A column definition, e.g. for CREATE TABLE or ALTER TABLE.""" type = "column_definition" match_grammar = Sequence( Ref("ColumnReferenceSegment"), # Column name Ref("DatatypeSegment"), # Column type Bracketed(Anything(), optional=True), # For types like VARCHAR(100) AnyNumberOf( Ref("ColumnConstraintSegment", optional=True), # Adding Teradata specific column definitions Ref("TdColumnConstraintSegment", optional=True), ), ) class TdColumnConstraintSegment(BaseSegment): """Teradata specific column attributes. e.g. CHARACTER SET LATIN | [NOT] (CASESPECIFIC|CS) | (UPPERCASE|UC) """ type = "td_column_attribute_constraint" match_grammar = Sequence( OneOf( Sequence( # CHARACTER SET LATIN "CHARACTER", "SET", Ref("SingleIdentifierGrammar") ), Sequence( # [NOT] CASESPECIFIC Ref.keyword("NOT", optional=True), OneOf("CASESPECIFIC", "CS"), ), OneOf("UPPERCASE", "UC"), Sequence( # COMPRESS [(1.,3.) | 3. | NULL], "COMPRESS", OneOf( Bracketed(Delimited(Ref("LiteralGrammar"))), Ref("LiteralGrammar"), "NULL", optional=True, ), ), ), ) # Create Teradata Create Table Statement class TdCreateTableOptions(BaseSegment): """CreateTableOptions. , NO FALLBACK, NO BEFORE JOURNAL, NO AFTER JOURNAL, CHECKSUM = DEFAULT , DEFAULT MERGEBLOCKRATIO , MAP = TD_MAP1 """ type = "create_table_options_statement" match_grammar = Sequence( Ref("CommaSegment"), Delimited( OneOf( # [ NO ] FALLBACK [ PROTECTION ] Sequence( Ref.keyword("NO", optional=True), "FALLBACK", Ref.keyword("PROTECTION", optional=True), ), # [NO | DUAL | LOCAL |NOT LOCAL] [AFTER | BEFORE] JOURNAL Sequence( OneOf( "NO", "DUAL", "LOCAL", Sequence("NOT", "LOCAL"), optional=True ), OneOf("BEFORE", "AFTER", optional=True), "JOURNAL", ), # CHECKSUM = (ON|OFF|DEFAULT) Sequence( "CHECKSUM", Ref("EqualsSegment"), OneOf( "ON", "OFF", "DEFAULT", ), ), # (NO|Default) MergeBlockRatio Sequence( OneOf( "DEFAULT", "NO", ), "MERGEBLOCKRATIO", ), # MergeBlockRatio = integer [PERCENT] Sequence( "MERGEBLOCKRATIO", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), Ref.keyword("PERCENT", optional=True), ), # MAP = mapname Sequence("MAP", Ref("EqualsSegment"), Ref("ObjectReferenceSegment")), ), ), ) class TdTablePartitioningLevel(BaseSegment): """Partitioning Level. https://docs.teradata.com/reader/eWpPpcMoLGQcZEoyt5AjEg/e0GX8Iw16u1SCwYvc5qXzg partition_expression or COLUMN [[NO] AUTO COMPRESS] [[ALL BUT] column_partition] [ADD constant] column_partition := ([COLUMN|ROW] column_name (, column_name2, ...) NO AUTOCOMPRESS partition_expression := CASE_N, RANGE_N, EXTRACT, expression and in case of multi-level in parenthesis """ type = "td_partitioning_level" match_grammar = OneOf( Sequence( Ref("FunctionNameSegment"), Bracketed(Anything(optional=True)), ), Bracketed( Delimited( Sequence( Ref("FunctionNameSegment"), Bracketed(Anything(optional=True)), ), ), ), ) class TdTableConstraints(BaseSegment): """Teradata specific table attributes. e.g. UNIQUE PRIMARY INDEX Column_name | ( Column_name, ... ) NO PRIMARY INDEX ... """ type = "td_table_constraint" match_grammar = AnyNumberOf( # PRIMARY Index OneOf( Sequence( # UNIQUE PRIMARY INDEX Column_name | ( Column_name, ... ) Ref.keyword("UNIQUE", optional=True), "PRIMARY", "INDEX", Ref("ObjectReferenceSegment", optional=True), # primary index name OneOf( Bracketed( Delimited( Ref("SingleIdentifierGrammar"), ) ), Ref("SingleIdentifierGrammar"), ), ), Sequence("NO", "PRIMARY", "INDEX"), # NO PRIMARY INDEX ), # PARTITION BY ... Sequence( # INDEX HOPR_TRN_TRAV_SIN_MP_I ( IND_TIPO_TARJETA ); "PARTITION", "BY", Ref("TdTablePartitioningLevel"), ), # Index Sequence( # INDEX HOPR_TRN_TRAV_SIN_MP_I ( IND_TIPO_TARJETA ); Ref.keyword("UNIQUE", optional=True), "INDEX", Ref("ObjectReferenceSegment"), # Index name Ref.keyword("ALL", optional=True), Bracketed( # Columns making up constraint Delimited(Ref("ColumnReferenceSegment")), ), ), # WITH DATA Sequence("WITH", Sequence("NO", optional=True), "DATA"), # AND STATISITCS Sequence( "AND", Sequence("NO", optional=True), OneOf("STAT", "STATS", "STATISTICS"), optional=True, ), # ON COMMIT PRESERVE ROWS Sequence("ON", "COMMIT", OneOf("PRESERVE", "DELETE"), "ROWS"), ) class CreateTableStatementSegment(BaseSegment): """A `CREATE [MULTISET| SET] TABLE` statement.""" type = "create_table_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), AnySetOf( OneOf("SET", "MULTISET"), OneOf(Sequence("GLOBAL", "TEMPORARY"), "VOLATILE"), optional=True, ), "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), # , NO FALLBACK, NO BEFORE JOURNAL, NO AFTER JOURNAL Ref("TdCreateTableOptions", optional=True), OneOf( # Columns and comment syntax: Sequence( Bracketed( Delimited( OneOf( Ref("ColumnDefinitionSegment"), Ref("TableConstraintSegment"), ), ) ), Ref("CommentClauseSegment", optional=True), ), # Create AS syntax: Sequence("AS", Ref("SelectableGrammar")), # Create like syntax Sequence("LIKE", Ref("TableReferenceSegment")), ), # PRIMARY INDEX( COD_TARJETA, COD_EST, IND_TIPO_TARJETA, FEC_ANIO_MES ) OneOf(Ref("TdTableConstraints"), optional=True), ) class CreateViewStatementSegment(BaseSegment): """A `[CREATE | REPLACE] VIEW` statement.""" type = "create_view_statement" match_grammar: Matchable = Sequence( OneOf("CREATE", "REPLACE"), "VIEW", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), # Optional list of column names Ref("BracketedColumnReferenceListGrammar", optional=True), "AS", OptionallyBracketed(Ref("SelectableGrammar")), Ref("WithNoSchemaBindingClauseSegment", optional=True), ) # Update class UpdateStatementSegment(BaseSegment): """A `Update from` statement. The UPDATE statement FROM clause is a Teradata extension to the ANSI SQL:2011 standard. UPDATE (
| FROM Statement) SET [ WHERE ] """ type = "update_statement" match_grammar = Sequence( "UPDATE", Indent, OneOf( Ref("TableReferenceSegment"), Ref("FromUpdateClauseSegment"), Sequence( Ref("TableReferenceSegment"), Ref("FromUpdateClauseSegment"), ), ), Dedent, Ref("SetClauseListSegment"), Ref("WhereClauseSegment", optional=True), ) class FromUpdateClauseSegment(BaseSegment): """A `FROM` clause like in `SELECT` but terminated by SET.""" type = "from_in_update_clause" match_grammar = Sequence( "FROM", Delimited( # Optional old school delimited joins Ref("FromExpressionElementSegment"), ), ) # Adding Teradata specific statements class StatementSegment(ansi.StatementSegment): """A generic segment, to any of its child subsegments.""" type = "statement" match_grammar = ansi.StatementSegment.match_grammar.copy( insert=[ Ref("TdCollectStatisticsStatementSegment"), Ref("BteqStatementSegment"), Ref("TdRenameStatementSegment"), Ref("QualifyClauseSegment"), Ref("TdCommentStatementSegment"), Ref("DatabaseStatementSegment"), Ref("SetSessionStatementSegment"), Ref("SetQueryBandStatementSegment"), ], ) class QualifyClauseSegment(BaseSegment): """A `QUALIFY` clause like in `SELECT`.""" type = "qualify_clause" match_grammar = Sequence( "QUALIFY", ImplicitIndent, OptionallyBracketed(Ref("ExpressionSegment")), Dedent, ) class LockingClauseSegment(BaseSegment): """A `LOCKING` clause for Teradata. https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/SQL-Data-Manipulation-Language/Statement-Syntax/LOCKING-Request-Modifier """ type = "locking_clause" match_grammar = Sequence( OneOf( "LOCKING", "LOCK", ), OneOf( "ROW", Sequence("TABLE", Ref("ObjectReferenceSegment", optional=True)), Sequence("VIEW", Ref("ObjectReferenceSegment", optional=True)), Sequence("DATABASE", Ref("ObjectReferenceSegment", optional=True)), ), OneOf( "FOR", "IN", ), OneOf( "ACCESS", "WRITE", "EXCLUSIVE", "EXCL", Sequence("READ", Sequence("OVERRIDE", optional=True)), "SHARE", "CHECKSUM", Sequence("LOAD", "COMMITTED"), ), Sequence("MODE", optional=True), Sequence("NOWAIT", optional=True), ) class SelectStatementSegment(ansi.SelectStatementSegment): """A `SELECT` statement. https://dev.mysql.com/doc/refman/5.7/en/select.html """ match_grammar_with_qualify_clause = ansi.SelectStatementSegment.match_grammar.copy( insert=[Ref("QualifyClauseSegment", optional=True)], before=Ref("OrderByClauseSegment", optional=True), ) match_grammar = match_grammar_with_qualify_clause.copy( insert=[Ref("LockingClauseSegment", optional=True)], before=Ref("SelectClauseSegment"), ) class WithCompoundStatementSegment(ansi.WithCompoundStatementSegment): """A `SELECT` statement preceded by a selection of `WITH` clauses. `WITH tab (col1,col2) AS (SELECT a,b FROM x)` """ match_grammar = ansi.WithCompoundStatementSegment.match_grammar.copy( insert=[Ref("LockingClauseSegment", optional=True)], at=0 ) class UnorderedSelectStatementSegment(ansi.UnorderedSelectStatementSegment): """An unordered `SELECT` statement. https://dev.mysql.com/doc/refman/5.7/en/select.html """ match_grammar = ansi.UnorderedSelectStatementSegment.match_grammar.copy( insert=[Ref("QualifyClauseSegment", optional=True)], before=Ref("OverlapsClauseSegment", optional=True), ) class SelectClauseSegment(ansi.SelectClauseSegment): """A group of elements in a select target statement. Remove OVERLAPS as a terminator as this can be part of SelectClauseModifierSegment """ match_grammar = ansi.SelectClauseSegment.match_grammar.copy( # Allow "SEL" as in place of just "SELECT" insert=[OneOf("SELECT", "SEL")], before=Ref.keyword("SELECT"), remove=[Ref.keyword("SELECT")], terminators=[ "FROM", "WHERE", Sequence("ORDER", "BY"), "LIMIT", Ref("SetOperatorSegment"), ], replace_terminators=True, ) class DeleteStatementSegment(BaseSegment): """A `DELETE` statement. DEL[ETE] FROM
[ WHERE ] """ type = "delete_statement" # match grammar. This one makes sense in the context of knowing that it's # definitely a statement, we just don't know what type yet. match_grammar: Matchable = Sequence( OneOf("DELETE", "DEL"), Ref("FromClauseSegment"), Ref("WhereClauseSegment", optional=True), ) class SelectClauseModifierSegment(BaseSegment): """Things that come after SELECT but before the columns. Adds NORMALIZE clause: https://docs.teradata.com/r/2_MC9vCtAJRlKle2Rpb0mA/UuxiA0mklFgv~33X5nyKMA """ type = "select_clause_modifier" match_grammar = OneOf( "DISTINCT", "ALL", Sequence( "TOP", Ref("ExpressionSegment"), Sequence("PERCENT", optional=True), Sequence("WITH", "TIES", optional=True), ), Sequence( "NORMALIZE", OneOf( Sequence( "ON", "MEETS", "OR", "OVERLAPS", ), Sequence( "ON", "OVERLAPS", ), Sequence( "ON", "OVERLAPS", "OR", "MEETS", ), optional=True, ), ), ) class DatabaseStatementSegment(BaseSegment): """A `DATABASE` statement. https://docs.teradata.com/r/Teradata-Database-SQL-Data-Definition-Language-Syntax-and-Examples/December-2015/Database-Statements/DATABASE """ type = "database_statement" match_grammar: Matchable = Sequence( "DATABASE", Ref("DatabaseReferenceSegment"), ) # Limited to SET SESSION DATABASE for now. # Many other session parameters may be set via SET SESSION. class SetSessionStatementSegment(BaseSegment): """A `SET SESSION` statement. https://docs.teradata.com/r/Teradata-Database-SQL-Data-Definition-Language-Syntax-and-Examples/December-2015/Session-Statements/SET-SESSION-DATABASE """ type = "set_session_statement" match_grammar: Matchable = Sequence( OneOf( Sequence("SET", "SESSION"), "SS", ), Ref("DatabaseStatementSegment"), ) class SetQueryBandStatementSegment(BaseSegment): """A `SET QUERY_BAND` statement. SET QUERY_BAND = { 'band_specification [...]' | NONE } [ UPDATE ] FOR { SESSION [VOLATILE] | TRANSACTION } [;] https://docs.teradata.com/r/Teradata-VantageTM-SQL-Data-Definition-Language-Syntax-and-Examples/July-2021/Session-Statements/SET-QUERY_BAND """ type = "set_query_band_statement" match_grammar: Matchable = Sequence( "SET", "QUERY_BAND", Ref("EqualsSegment"), OneOf(Ref("QuotedLiteralSegment"), "NONE"), Sequence("UPDATE", optional=True), "FOR", OneOf(Sequence("SESSION", Sequence("VOLATILE", optional=True)), "TRANSACTION"), ) class NotEqualToSegment_b(CompositeComparisonOperatorSegment): """The comparison operator extension NOT=. https://www.docs.teradata.com/r/Teradata-Database-SQL-Functions-Operators-Expressions-and-Predicates/March-2017/Comparison-Operators-and-Functions/Comparison-Operators/Supported-Comparison-Operators """ match_grammar = Sequence( Ref("NotOperatorGrammar"), Ref("RawEqualsSegment"), allow_gaps=False ) class NotEqualToSegment_c(CompositeComparisonOperatorSegment): """The comparison operator extension ^=. https://www.docs.teradata.com/r/Teradata-Database-SQL-Functions-Operators-Expressions-and-Predicates/March-2017/Comparison-Operators-and-Functions/Comparison-Operators/Supported-Comparison-Operators """ match_grammar = Sequence( Ref("BitwiseXorSegment"), Ref("RawEqualsSegment"), allow_gaps=False ) sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_trino.py000066400000000000000000000665451503426445100227610ustar00rootroot00000000000000"""The Trino dialect. See https://trino.io/docs/current/language.html """ from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( AnyNumberOf, AnySetOf, Anything, BaseSegment, Bracketed, CodeSegment, Dedent, Delimited, IdentifierSegment, Indent, LiteralSegment, Matchable, Nothing, OneOf, OptionallyBracketed, Ref, RegexLexer, Sequence, StringLexer, StringParser, SymbolSegment, TypedParser, ) from sqlfluff.dialects import dialect_ansi as ansi from sqlfluff.dialects.dialect_trino_keywords import ( trino_reserved_keywords, trino_unreserved_keywords, ) ansi_dialect = load_raw_dialect("ansi") trino_dialect = ansi_dialect.copy_as( "trino", formatted_name="Trino", docstring="""**Default Casing**: ``lowercase``, although the case of a reference is used in the result set column label. If a column is defined using :code:`CREATE TEMPORARY TABLE foo (COL1 int)`, then :code:`SELECT * FROM foo` returns a column labelled :code:`col1`, however :code:`SELECT COL1 FROM foo` returns a column labelled :code:`COL1`. **Quotes**: String Literals: ``''``, Identifiers: ``""`` The dialect for `Trino `_.""", ) # Set the bare functions: https://trino.io/docs/current/functions/datetime.html trino_dialect.sets("bare_functions").update( ["current_date", "current_time", "current_timestamp", "localtime", "localtimestamp"] ) # Set keywords trino_dialect.sets("unreserved_keywords").clear() trino_dialect.update_keywords_set_from_multiline_string( "unreserved_keywords", trino_unreserved_keywords ) trino_dialect.sets("reserved_keywords").clear() trino_dialect.update_keywords_set_from_multiline_string( "reserved_keywords", trino_reserved_keywords ) trino_dialect.insert_lexer_matchers( # Regexp Replace w/ Lambda: https://trino.io/docs/422/functions/regexp.html [ StringLexer("right_arrow", "->", CodeSegment), StringLexer("fat_right_arrow", "=>", CodeSegment), ], before="like_operator", ) trino_dialect.add( RightArrowOperator=StringParser("->", SymbolSegment, type="binary_operator"), LambdaArrowSegment=StringParser("->", SymbolSegment, type="lambda_arrow"), ExecuteArrowSegment=StringParser("=>", SymbolSegment, type="execute_arrow"), StartAngleBracketSegment=StringParser( "<", SymbolSegment, type="start_angle_bracket" ), EndAngleBracketSegment=StringParser(">", SymbolSegment, type="end_angle_bracket"), FormatJsonEncodingGrammar=Sequence( "FORMAT", "JSON", Sequence("ENCODING", OneOf("UTF8", "UTF16", "UTF32"), optional=True), ), ) trino_dialect.bracket_sets("angle_bracket_pairs").update( [ ("angle", "StartAngleBracketSegment", "EndAngleBracketSegment", False), ] ) trino_dialect.patch_lexer_matchers( [ RegexLexer( "double_quote", r'"([^"]|"")*"', CodeSegment, segment_kwargs={ "quoted_value": (r'"((?:[^"]|"")*)"', 1), "escape_replacements": [(r'""', '"')], }, ), ] ) trino_dialect.replace( DateTimeLiteralGrammar=OneOf( Sequence( OneOf("DATE", "TIME", "TIMESTAMP"), TypedParser( "single_quote", LiteralSegment, type="date_constructor_literal" ), ), Ref("IntervalExpressionSegment"), ), LikeGrammar=Sequence("LIKE"), # TODO: There are no custom SQL functions in Trino! How to handle this? MLTableExpressionSegment=Nothing(), FromClauseTerminatorGrammar=OneOf( "WHERE", "LIMIT", Sequence("GROUP", "BY"), Sequence("ORDER", "BY"), "HAVING", "WINDOW", Ref("SetOperatorSegment"), Ref("WithNoSchemaBindingClauseSegment"), Ref("WithDataClauseSegment"), "FETCH", ), OrderByClauseTerminators=OneOf( "LIMIT", "HAVING", # For window functions "WINDOW", Ref("FrameClauseUnitGrammar"), "FETCH", ), SelectClauseTerminatorGrammar=OneOf( "FROM", "WHERE", Sequence("ORDER", "BY"), "LIMIT", Ref("SetOperatorSegment"), "FETCH", ), WhereClauseTerminatorGrammar=OneOf( "LIMIT", Sequence("GROUP", "BY"), Sequence("ORDER", "BY"), "HAVING", "WINDOW", "FETCH", ), HavingClauseTerminatorGrammar=OneOf( Sequence("ORDER", "BY"), "LIMIT", "WINDOW", "FETCH", ), GroupByClauseTerminatorGrammar=OneOf( Sequence("ORDER", "BY"), "LIMIT", "HAVING", "WINDOW", "FETCH", ), # NOTE: This block was copy/pasted from dialect_ansi.py with these changes made: # - "PRIOR" keyword removed Expression_A_Unary_Operator_Grammar=OneOf( Ref( "SignedSegmentGrammar", exclude=Sequence(Ref("QualifiedNumericLiteralSegment")), ), Ref("TildeSegment"), Ref("NotOperatorGrammar"), ), PostFunctionGrammar=ansi_dialect.get_grammar("PostFunctionGrammar").copy( insert=[ Ref("WithinGroupClauseSegment"), Ref("WithOrdinalityClauseSegment"), ], ), FunctionContentsGrammar=AnyNumberOf( Ref("ExpressionSegment"), # A Cast-like function Sequence(Ref("ExpressionSegment"), "AS", Ref("DatatypeSegment")), # Trim function Sequence( Ref("TrimParametersGrammar"), Ref("ExpressionSegment", optional=True, exclude=Ref.keyword("FROM")), "FROM", Ref("ExpressionSegment"), ), # An extract-like or substring-like function Sequence( OneOf(Ref("DatetimeUnitSegment"), Ref("ExpressionSegment")), "FROM", Ref("ExpressionSegment"), ), Sequence( # Allow an optional distinct keyword here. Ref.keyword("DISTINCT", optional=True), OneOf( # Most functions will be using the delimited route # but for COUNT(*) or similar we allow the star segment # here. Ref("StarSegment"), Delimited(Ref("FunctionContentsExpressionGrammar")), ), ), Ref( "OrderByClauseSegment" ), # used by string_agg (postgres), group_concat (exasol),listagg (snowflake).. # like a function call: POSITION ( 'QL' IN 'SQL') Sequence( OneOf( Ref("QuotedLiteralSegment"), Ref("SingleIdentifierGrammar"), Ref("ColumnReferenceSegment"), ), "IN", OneOf( Ref("QuotedLiteralSegment"), Ref("SingleIdentifierGrammar"), Ref("ColumnReferenceSegment"), ), ), # For JSON_QUERY function # https://trino.io/docs/current/functions/json.html#json-query Sequence( Ref("ExpressionSegment"), # json_input Ref("FormatJsonEncodingGrammar", optional=True), Ref("CommaSegment"), Ref("ExpressionSegment"), # json_path OneOf( Sequence("WITHOUT", Ref.keyword("ARRAY", optional=True), "WRAPPER"), Sequence( "WITH", OneOf("CONDITIONAL", "UNCONDITIONAL", optional=True), Ref.keyword("ARRAY", optional=True), "WRAPPER", ), optional=True, ), ), Ref("IgnoreRespectNullsGrammar"), Ref("IndexColumnDefinitionSegment"), Ref("EmptyStructLiteralSegment"), Ref("ListaggOverflowClauseSegment"), ), BinaryOperatorGrammar=OneOf( Ref("ArithmeticBinaryOperatorGrammar"), Ref("StringBinaryOperatorGrammar"), Ref("BooleanBinaryOperatorGrammar"), Ref("ComparisonOperatorGrammar"), # Add arrow operators for functions (e.g. regexp_replace) Ref("RightArrowOperator"), ), AccessorGrammar=AnyNumberOf( Ref("ArrayAccessorSegment"), # Add in semi structured expressions Ref("SemiStructuredAccessorSegment"), ), # match ANSI's naked identifier casefold, trino is case-insensitive. QuotedIdentifierSegment=TypedParser( "double_quote", IdentifierSegment, type="quoted_identifier", casefold=str.upper ), FunctionContentsExpressionGrammar=OneOf( Ref("LambdaExpressionSegment"), Ref("ExpressionSegment"), ), TemporaryTransientGrammar=Nothing(), PrimaryKeyGrammar=Nothing(), ForeignKeyGrammar=Nothing(), UniqueKeyGrammar=Nothing(), TemporaryGrammar=Nothing(), ) class DatatypeSegment(BaseSegment): """Data type segment. See https://trino.io/docs/current/language/types.html """ type = "data_type" match_grammar = OneOf( # Boolean "BOOLEAN", # Integer "TINYINT", "SMALLINT", "INTEGER", "INT", "BIGINT", # Floating-point "REAL", "DOUBLE", # Fixed-precision Sequence( "DECIMAL", Ref("BracketedArguments", optional=True), ), # String Sequence( OneOf("CHAR", "VARCHAR"), Ref("BracketedArguments", optional=True), ), "VARBINARY", "JSON", # Date and time "DATE", Ref("TimeWithTZGrammar"), # Structural Ref("ArrayTypeSegment"), "MAP", Ref("RowTypeSegment"), # Others "IPADDRESS", "UUID", ) class RowTypeSegment(ansi.StructTypeSegment): """Expression to construct a ROW datatype.""" match_grammar = Sequence( "ROW", Ref("RowTypeSchemaSegment", optional=True), ) class RowTypeSchemaSegment(BaseSegment): """Expression to construct the schema of a ROW datatype.""" type = "struct_type_schema" match_grammar = Bracketed( Delimited( # Comma-separated list of field names/types Sequence( OneOf( # ParameterNames can look like Datatypes so can't use # Optional=True here and instead do a OneOf in order # with DataType only first, followed by both. Ref("DatatypeSegment"), Sequence( Ref("ParameterNameSegment"), Ref("DatatypeSegment"), ), ) ) ) ) class SemiStructuredAccessorSegment(BaseSegment): """A semi-structured data accessor segment.""" type = "semi_structured_expression" match_grammar = Sequence( Ref("DotSegment"), Ref("SingleIdentifierGrammar"), Ref("ArrayAccessorSegment", optional=True), AnyNumberOf( Sequence( Ref("DotSegment"), Ref("SingleIdentifierGrammar"), allow_gaps=True, ), Ref("ArrayAccessorSegment", optional=True), allow_gaps=True, ), allow_gaps=True, ) class OverlapsClauseSegment(BaseSegment): """An `OVERLAPS` clause like in `SELECT.""" type = "overlaps_clause" match_grammar: Matchable = Nothing() class UnorderedSelectStatementSegment(ansi.UnorderedSelectStatementSegment): """A `SELECT` statement without any ORDER clauses or later.""" match_grammar: Matchable = Sequence( Ref("SelectClauseSegment"), Ref("FromClauseSegment", optional=True), Ref("WhereClauseSegment", optional=True), Ref("GroupByClauseSegment", optional=True), Ref("HavingClauseSegment", optional=True), Ref("NamedWindowSegment", optional=True), ) class ValuesClauseSegment(ansi.ValuesClauseSegment): """A `VALUES` clause within in `WITH`, `SELECT`, `INSERT`.""" match_grammar = Sequence( "VALUES", Delimited(Ref("ExpressionSegment")), ) class IntervalExpressionSegment(BaseSegment): """An interval representing a span of time. https://trino.io/docs/current/language/types.html#interval-year-to-month https://trino.io/docs/current/functions/datetime.html#date-and-time-operators """ type = "interval_expression" match_grammar = Sequence( "INTERVAL", Ref("QuotedLiteralSegment"), OneOf("YEAR", "MONTH", "DAY", "HOUR", "MINUTE", "SECOND"), ) class FrameClauseSegment(BaseSegment): """A frame clause for window functions. https://trino.io/blog/2021/03/10/introducing-new-window-features.html """ type = "frame_clause" _frame_extent = OneOf( Sequence("CURRENT", "ROW"), Sequence( OneOf( Ref("NumericLiteralSegment"), Ref("DateTimeLiteralGrammar"), "UNBOUNDED" ), OneOf("PRECEDING", "FOLLOWING"), ), ) match_grammar: Matchable = Sequence( Ref("FrameClauseUnitGrammar"), OneOf(_frame_extent, Sequence("BETWEEN", _frame_extent, "AND", _frame_extent)), ) class SetOperatorSegment(BaseSegment): """A set operator such as Union, Intersect or Except.""" type = "set_operator" match_grammar: Matchable = OneOf( Sequence("UNION", OneOf("DISTINCT", "ALL", optional=True)), Sequence( OneOf( "INTERSECT", "EXCEPT", ), Ref.keyword("ALL", optional=True), ), exclude=Sequence("EXCEPT", Bracketed(Anything())), ) class StatementSegment(ansi.StatementSegment): """A generic segment, to any of its child subsegments.""" type = "statement" match_grammar: Matchable = OneOf( Ref("AlterTableStatementSegment"), Ref("AnalyzeStatementSegment"), Ref("CommentOnStatementSegment"), Ref("CreateFunctionStatementSegment"), Ref("CreateRoleStatementSegment"), Ref("CreateSchemaStatementSegment"), Ref("CreateTableStatementSegment"), Ref("CreateViewStatementSegment"), Ref("DeleteStatementSegment"), Ref("DescribeStatementSegment"), Ref("DropFunctionStatementSegment"), Ref("DropRoleStatementSegment"), Ref("DropSchemaStatementSegment"), Ref("DropTableStatementSegment"), Ref("DropViewStatementSegment"), Ref("ExplainStatementSegment"), Ref("InsertStatementSegment"), Ref("MergeStatementSegment"), Ref("SelectableGrammar"), Ref("SetSchemaStatementSegment"), Ref("TransactionStatementSegment"), Ref("UpdateStatementSegment"), Ref("UseStatementSegment"), Ref("SetSessionStatementSegment"), terminators=[Ref("DelimiterGrammar")], ) class AnalyzeStatementSegment(BaseSegment): """An 'ANALYZE' statement. As per docs https://trino.io/docs/current/sql/analyze.html """ type = "analyze_statement" match_grammar = Sequence( "ANALYZE", Ref("TableReferenceSegment"), Sequence( "WITH", Bracketed( Delimited( Sequence( Ref("ParameterNameSegment"), Ref("EqualsSegment"), Ref("ExpressionSegment"), ), ), ), optional=True, ), ) class WithOrdinalityClauseSegment(BaseSegment): """A WITH ORDINALITY AS t(name1, name2) clause for CROSS JOIN UNNEST(...). https://trino.io/docs/current/sql/select.html#unnest Trino supports an optional WITH ORDINALITY clause on UNNEST, which adds a numerical ordinality column to the UNNEST result. """ type = "withordinality_clause" match_grammar = Sequence( "WITH", "ORDINALITY", ) class WithinGroupClauseSegment(BaseSegment): """An WITHIN GROUP clause for window functions. https://trino.io/docs/current/functions/aggregate.html#array_agg Trino supports an optional FILTER during aggregation that comes immediately after the WITHIN GROUP clause. https://trino.io/docs/current/functions/aggregate.html#filtering-during-aggregation """ type = "withingroup_clause" match_grammar = Sequence( "WITHIN", "GROUP", Bracketed(Ref("OrderByClauseSegment", optional=False)), Ref("FilterClauseGrammar", optional=True), ) class ListaggOverflowClauseSegment(BaseSegment): """ON OVERFLOW clause of listagg function. https://trino.io/docs/current/functions/aggregate.html#array_agg """ type = "listagg_overflow_clause" match_grammar = Sequence( "ON", "OVERFLOW", OneOf( "ERROR", Sequence( "TRUNCATE", Ref("QuotedLiteralSegment", optional=True), OneOf("WITH", "WITHOUT", optional=True), Ref.keyword("COUNT", optional=True), ), ), ) class ArrayTypeSegment(ansi.ArrayTypeSegment): """Prefix for array literals optionally specifying the type.""" type = "array_type" match_grammar = Sequence( "ARRAY", Ref("ArrayTypeSchemaSegment", optional=True), ) class ArrayTypeSchemaSegment(ansi.ArrayTypeSegment): """Data type segment of the array. Trino supports ARRAY(DATA_TYPE) and ARRAY """ type = "array_type_schema" match_grammar = OneOf( Bracketed( Ref("DatatypeSegment"), bracket_pairs_set="angle_bracket_pairs", bracket_type="angle", ), Bracketed( Ref("DatatypeSegment"), bracket_pairs_set="bracket_pairs", bracket_type="round", ), ) class GroupByClauseSegment(BaseSegment): """A `GROUP BY` clause like in `SELECT`.""" type = "groupby_clause" match_grammar: Matchable = Sequence( "GROUP", "BY", Indent, OneOf( "ALL", Ref("CubeRollupClauseSegment"), # Add GROUPING SETS support Ref("GroupingSetsClauseSegment"), Sequence( Delimited( OneOf( Ref("ColumnReferenceSegment"), # Can `GROUP BY 1` Ref("NumericLiteralSegment"), # Can `GROUP BY coalesce(col, 1)` Ref("ExpressionSegment"), ), terminators=[Ref("GroupByClauseTerminatorGrammar")], ), ), ), Dedent, ) class CommentOnStatementSegment(BaseSegment): """`COMMENT ON` statement. https://trino.io/docs/current/sql/comment.html """ type = "comment_clause" match_grammar = Sequence( "COMMENT", "ON", Sequence( OneOf( Sequence( OneOf( "TABLE", # TODO: Create a ViewReferenceSegment "VIEW", ), Ref("TableReferenceSegment"), ), Sequence( "COLUMN", # TODO: Does this correctly emit a Table Reference? Ref("ColumnReferenceSegment"), ), ), Sequence("IS", OneOf(Ref("QuotedLiteralSegment"), "NULL")), ), ) class LambdaExpressionSegment(BaseSegment): """Lambda function used in a function.""" type = "lambda_function" match_grammar = Sequence( OneOf( Ref("ParameterNameSegment"), Bracketed(Delimited(Ref("ParameterNameSegment"))), ), Ref("LambdaArrowSegment"), Ref("ExpressionSegment"), ) class CreateTableStatementSegment(ansi.CreateTableStatementSegment): """A `CREATE TABLE` statement.""" type = "create_table_statement" match_grammar: Matchable = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), # Columns and comment syntax: Bracketed( Delimited( Ref("ColumnReferenceSegment"), Ref("ColumnDefinitionSegment"), Sequence( "LIKE", Ref("TableReferenceSegment"), Sequence( OneOf("INCLUDING", "EXCLUDING"), "PROPERTIES", optional=True, ), ), ), optional=True, ), Ref("CommentClauseSegment", optional=True), Sequence( "WITH", Bracketed( Delimited( Ref("ParameterNameSegment"), Ref("EqualsSegment"), Ref("ExpressionSegment"), ), ), optional=True, ), # Create AS syntax: Sequence( "AS", OptionallyBracketed(Ref("SelectableGrammar")), optional=True, ), # Create like syntax Sequence("WITH", Ref.keyword("NO", optional=True), "DATA", optional=True), ) class ColumnDefinitionSegment(ansi.ColumnDefinitionSegment): """A column definition, e.g. for CREATE TABLE or ALTER TABLE.""" type = "column_definition" match_grammar: Matchable = Sequence( Ref("SingleIdentifierGrammar"), # Column name Ref("DatatypeSegment"), # Column type AnySetOf( Sequence( "NOT", "NULL", ), Ref("CommentClauseSegment"), Sequence( "WITH", Bracketed( Delimited( Ref("ParameterNameSegment"), Ref("EqualsSegment"), Ref("ExpressionSegment"), ), ), ), ), ) class TransactionStatementSegment(ansi.TransactionStatementSegment): """A `COMMIT`, `ROLLBACK` or `TRANSACTION` statement. https://trino.io/docs/current/sql/commit.html https://trino.io/docs/current/sql/rollback.html https://trino.io/docs/current/sql/start-transaction.html """ type = "transaction_statement" match_grammar: Matchable = OneOf( Sequence( "COMMIT", Ref.keyword("WORK", optional=True), ), Sequence( "ROLLBACK", Ref.keyword("WORK", optional=True), ), Sequence( "START", "TRANSACTION", Delimited( Sequence( "ISOLATION", "LEVEL", OneOf( Sequence("READ", "UNCOMMITTED"), Sequence("READ", "COMMITTED"), Sequence("REPEATABLE", "READ"), "SERIALIZABLE", ), ), Sequence( "READ", OneOf("ONLY", "WRITE"), ), optional=True, ), ), ) class InsertStatementSegment(ansi.InsertStatementSegment): """An `INSERT` statement. https://trino.io/docs/current/sql/insert.html """ type = "insert_statement" match_grammar: Matchable = Sequence( "INSERT", "INTO", Ref("TableReferenceSegment"), OneOf( Ref("SelectableGrammar"), Sequence( Ref("BracketedColumnReferenceListGrammar"), Ref("SelectableGrammar"), ), Ref("DefaultValuesGrammar"), ), ) class SetSessionStatementSegment(BaseSegment): """A `SET SESSION` statement. https://trino.io/docs/current/sql/set-session.html """ type = "set_session_statement" match_grammar: Matchable = Sequence( "SET", "SESSION", Sequence( Ref("ParameterNameSegment"), Ref("DotSegment"), optional=True, ), Ref("ParameterNameSegment"), Ref("EqualsSegment"), Ref("ExpressionSegment"), ) class AlterTableStatementSegment(ansi.AlterTableStatementSegment): """A `ALTER TABLE` statement. https://trino.io/docs/current/sql/alter-table.html """ match_grammar = Sequence( "ALTER", "TABLE", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), OneOf( Sequence( "RENAME", "TO", Ref("TableReferenceSegment"), ), Sequence( "ADD", "COLUMN", Ref("IfNotExistsGrammar", optional=True), Ref("ColumnDefinitionSegment"), OneOf( "FIRST", "LAST", Sequence( "AFTER", Ref("ColumnReferenceSegment"), ), optional=True, ), ), Sequence( "DROP", "COLUMN", Ref("IfExistsGrammar", optional=True), Ref("ColumnReferenceSegment"), ), Sequence( "RENAME", "COLUMN", Ref("IfExistsGrammar", optional=True), Ref("ColumnReferenceSegment"), "TO", Ref("ColumnReferenceSegment"), ), Sequence( "ALTER", "COLUMN", Ref("ColumnReferenceSegment"), OneOf( Sequence( "SET", "DATA", "TYPE", Ref("DatatypeSegment"), ), Sequence( "DROP", "NOT", "NULL", ), ), ), Sequence( "SET", "AUTHORIZATION", OneOf( Ref("RoleReferenceSegment"), Sequence( "USER", Ref("RoleReferenceSegment"), ), Sequence( "ROLE", Ref("RoleReferenceSegment"), ), ), ), Sequence( "SET", "PROPERTIES", Delimited( Sequence( Ref("ParameterNameSegment"), Ref("EqualsSegment"), Ref("ExpressionSegment"), ), ), ), Sequence( "EXECUTE", Ref("FunctionNameSegment"), Bracketed( Delimited( Sequence( Ref("ParameterNameSegment"), Ref("ExecuteArrowSegment"), Ref("ExpressionSegment"), ), ), optional=True, ), Sequence( "WHERE", Ref("ExpressionSegment"), optional=True, ), ), ), ) sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_trino_keywords.py000066400000000000000000000045511503426445100246750ustar00rootroot00000000000000"""Keywords in the Trino Dialect. - https://trino.io/docs/current/language/reserved.html - https://github.com/trinodb/trino/blob/ master/core/trino-parser/src/main/antlr4/io/trino/sql/parser/SqlBase.g4 """ trino_reserved_keywords = """ALTER AND AS BETWEEN BY CASE CAST CONSTRAINT CREATE CROSS CUBE CURRENT_CATALOG CURRENT_DATE CURRENT_PATH CURRENT_ROLE CURRENT_SCHEMA CURRENT_TIME CURRENT_TIMESTAMP CURRENT_USER DEALLOCATE DELETE DESCRIBE DISTINCT DROP ELSE END ESCAPE EXCEPT EXECUTE EXISTS EXTRACT FALSE FOR FROM FULL FUNCTION GROUP GROUPING HAVING IN INNER INSERT INTERSECT INTO IS JOIN JSON_ARRAY JSON_EXISTS JSON_OBJECT JSON_QUERY JSON_TABLE JSON_VALUE LEFT LIKE LISTAGG LOCALTIME LOCALTIMESTAMP NATURAL NORMALIZE NOT NULL ON OR ORDER OUTER PREPARE RECURSIVE RIGHT ROLLUP SELECT SKIP TABLE THEN TRIM TRUE UESCAPE UNION UNNEST USING VALUES WHEN WHERE WITH """ trino_unreserved_keywords = """ABSENT ADD ADMIN AFTER ALL ANALYZE ANY ARRAY ASC AT AUTHORIZATION BERNOULLI BIGINT BOOLEAN BOTH CALL CASCADE CATALOG CATALOGS CHAR COLUMN COLUMNS COMMENT COMMIT COMMITTED CONDITIONAL COPARTITION COUNT CURRENT DATA DATE DAY DECIMAL DEFAULT DEFINE DEFINER DENY DESC DESCRIPTOR DISTRIBUTED DOUBLE EMPTY ENCODING ERROR EXCLUDING EXPLAIN FETCH FILTER FINAL FIRST FOLLOWING FORMAT FUNCTIONS GRACE GRANT GRANTED GRANTS GRAPHVIZ GROUPS HOUR IF IGNORE IMMEDIATE INCLUDING INITIAL INPUT INT INTEGER INTERVAL INVOKER IO IPADDRESS ISOLATION JSON KEEP KEY KEYS LAST LATERAL LEADING LEVEL LIMIT LOCAL LOGICAL MAP MATCH MATCHED MATCHES MATCH_RECOGNIZE MATERIALIZED MEASURES MERGE MINUTE MONTH NESTED NEXT NFC NFD NFKC NFKD NO NONE NULLIF NULLS OBJECT OF OFFSET OMIT ONE ONLY OPTION ORDINALITY OUTPUT OVER OVERFLOW PARTITION PARTITIONS PASSING PAST PATH PATTERN PER PERIOD PERMUTE PLAN POSITION PRECEDING PRECISION PRIVILEGES PROPERTIES PRUNE QUOTES RANGE READ REAL REFRESH RENAME REPEATABLE REPLACE RESET RESPECT RESTRICT RETURNING REVOKE ROLE ROLES ROLLBACK ROW ROWS RUNNING SCALAR SCHEMA SCHEMAS SECOND SECURITY SEEK SERIALIZABLE SESSION SET SETS SHOW SMALLINT SOME START STATS SUBSET SUBSTRING SYSTEM TABLES TABLESAMPLE TEXT TEXT_STRING TIES TIME TIMESTAMP TINYINT TO TRAILING TRANSACTION TRUNCATE TRY_CAST TYPE UNBOUNDED UNCOMMITTED UNCONDITIONAL UNIQUE UNKNOWN UNMATCHED UPDATE USE USER UTF16 UTF32 UTF8 UUID VALIDATE VALUE VARBINARY VARCHAR VERBOSE VERSION VIEW WINDOW WITHIN WITHOUT WORK WRAPPER WRITE YEAR ZONE """ sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_tsql.py000066400000000000000000006574471503426445100226200ustar00rootroot00000000000000"""The MSSQL T-SQL dialect. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/language-elements-transact-sql """ from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( AnyNumberOf, AnySetOf, BaseFileSegment, BaseSegment, Bracketed, CodeSegment, CommentSegment, CompositeBinaryOperatorSegment, CompositeComparisonOperatorSegment, Conditional, Dedent, Delimited, IdentifierSegment, ImplicitIndent, Indent, LiteralSegment, Matchable, MultiStringParser, NewlineSegment, Nothing, OneOf, OptionallyBracketed, ParseMode, Ref, RegexLexer, RegexParser, SegmentGenerator, Sequence, StringParser, SymbolSegment, TypedParser, WhitespaceSegment, WordSegment, ) from sqlfluff.dialects import dialect_ansi as ansi from sqlfluff.dialects.dialect_tsql_keywords import ( FUTURE_RESERVED_KEYWORDS, RESERVED_KEYWORDS, UNRESERVED_KEYWORDS, ) ansi_dialect = load_raw_dialect("ansi") tsql_dialect = ansi_dialect.copy_as( "tsql", formatted_name="Microsoft T-SQL", docstring="""The dialect for `T-SQL`_ (aka Transact-SQL). .. _`T-SQL`: https://docs.microsoft.com/en-us/sql/t-sql/language-reference""", ) tsql_dialect.sets("reserved_keywords").clear() tsql_dialect.sets("unreserved_keywords").clear() tsql_dialect.sets("future_reserved_keywords").clear() tsql_dialect.sets("reserved_keywords").update(RESERVED_KEYWORDS) tsql_dialect.sets("unreserved_keywords").update(UNRESERVED_KEYWORDS) tsql_dialect.sets("future_reserved_keywords").update(FUTURE_RESERVED_KEYWORDS) # Set the datetime units tsql_dialect.sets("datetime_units").clear() tsql_dialect.sets("datetime_units").update( [ "D", "DAY", "DAYS", "DAYOFYEAR", "DD", "DW", "DY", "HH", "HOUR", "ISO_WEEK", "ISOWK", "ISOWW", "INFINITE", "M", "MCS", "MI", "MICROSECOND", "MILLISECOND", "MINUTE", "MM", "MONTH", "MONTHS", "MS", "N", "NANOSECOND", "NS", "Q", "QQ", "QUARTER", "S", "SECOND", "SS", "TZ", "TZOFFSET", "W", "WEEK", "WEEKS", "WEEKDAY", "WK", "WW", "YEAR", "YEARS", "Y", "YY", "YYYY", ] ) tsql_dialect.sets("date_part_function_name").clear() tsql_dialect.sets("date_part_function_name").update( ["DATEADD", "DATEDIFF", "DATEDIFF_BIG", "DATENAME", "DATEPART", "DATETRUNC"] ) tsql_dialect.sets("date_format").clear() tsql_dialect.sets("date_format").update( [ "mdy", "dmy", "ymd", "myd", "dym", ] ) tsql_dialect.sets("bare_functions").update( ["CURRENT_USER", "SESSION_USER", "SYSTEM_USER", "USER"] ) tsql_dialect.sets("sqlcmd_operators").clear() tsql_dialect.sets("sqlcmd_operators").update(["r", "setvar"]) tsql_dialect.sets("file_compression").clear() tsql_dialect.sets("file_compression").update( [ "'org.apache.hadoop.io.compress.GzipCodec'", "'org.apache.hadoop.io.compress.DefaultCodec'", "'org.apache.hadoop.io.compress.SnappyCodec'", ] ) tsql_dialect.sets("file_encoding").clear() tsql_dialect.sets("file_encoding").update( [ "'UTF8'", "'UTF16'", ] ) tsql_dialect.sets("serde_method").clear() tsql_dialect.sets("serde_method").update( [ "'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe'", "'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe'", ] ) tsql_dialect.insert_lexer_matchers( [ RegexLexer( "atsign", r"[@][a-zA-Z0-9_]+", CodeSegment, ), RegexLexer( "var_prefix", r"[$][a-zA-Z0-9_]+", CodeSegment, ), RegexLexer( "square_quote", r"\[([^\[\]]*)*\]", CodeSegment, segment_kwargs={ "quoted_value": (r"\[([^\[\]]*)\]", 1), }, ), # T-SQL unicode strings RegexLexer( "single_quote_with_n", r"N'([^']|'')*'", CodeSegment, segment_kwargs={ "quoted_value": (r"N'((?:[^']|'')*)'", 1), }, ), RegexLexer( "hash_prefix", r"[#][#]?[a-zA-Z0-9_]+", CodeSegment, ), RegexLexer( "unquoted_relative_sql_file_path", # currently there is no way to pass `regex.IGNORECASE` flag to `RegexLexer` r"[.\w\\/#-]+\.[sS][qQ][lL]\b", CodeSegment, ), ], before="back_quote", ) tsql_dialect.patch_lexer_matchers( [ # Patching single_quote to allow for TSQL-style escaped quotes RegexLexer( "single_quote", r"'([^']|'')*'", CodeSegment, ), # Patching comments to remove hash comments RegexLexer( "inline_comment", r"(--)[^\n]*", CommentSegment, segment_kwargs={"trim_start": ("--")}, ), # Patching block comments to account for nested blocks. # N.B. this syntax is only possible via the non-standard-library # (but still backwards compatible) `regex` package. # https://pypi.org/project/regex/ # Pattern breakdown: # /\* Match opening slash. # (?> Atomic grouping # (https://www.regular-expressions.info/atomic.html). # [^*/]+ Non forward-slash or asterisk characters. # |\*(?!\/) Negative lookahead assertion to match # asterisks not followed by a forward-slash. # |/[^*] Match lone forward-slashes not followed by an asterisk. # )* Match any number of the atomic group contents. # (?> # (?R) Recursively match the block comment pattern # to match nested block comments. # (?> # [^*/]+ # |\*(?!\/) # |/[^*] # )* # )* # \*/ Match closing slash. RegexLexer( "block_comment", r"/\*(?>[^*/]+|\*(?!\/)|/[^*])*(?>(?R)(?>[^*/]+|\*(?!\/)|/[^*])*)*\*/", CommentSegment, subdivider=RegexLexer( "newline", r"\r\n|\n", NewlineSegment, ), trim_post_subdivide=RegexLexer( "whitespace", r"[^\S\r\n]+", WhitespaceSegment, ), ), RegexLexer( "word", r"[0-9a-zA-Z_#@\p{L}]+", WordSegment ), # overriding to allow hash mark and at-sign in code ] ) tsql_dialect.add( BracketedIdentifierSegment=TypedParser( "square_quote", IdentifierSegment, type="quoted_identifier", casefold=str.upper, ), HashIdentifierSegment=TypedParser( "hash_prefix", IdentifierSegment, type="hash_identifier", casefold=str.upper, ), VariableIdentifierSegment=TypedParser( "var_prefix", IdentifierSegment, type="variable_identifier", casefold=str.upper, ), BatchDelimiterGrammar=Ref("GoStatementSegment"), QuotedLiteralSegmentWithN=TypedParser( "single_quote_with_n", LiteralSegment, type="quoted_literal" ), QuotedLiteralSegmentOptWithN=OneOf( Ref("QuotedLiteralSegment"), Ref("QuotedLiteralSegmentWithN"), ), TransactionGrammar=OneOf( "TRANSACTION", "TRAN", ), SystemVariableSegment=RegexParser( r"@@[A-Za-z0-9_]+", CodeSegment, type="system_variable" ), StatementAndDelimiterGrammar=Sequence( Ref("StatementSegment"), Ref("DelimiterGrammar", optional=True), ), OneOrMoreStatementsGrammar=AnyNumberOf( Ref("StatementAndDelimiterGrammar"), min_times=1, ), TopPercentGrammar=Sequence( "TOP", OptionallyBracketed(Ref("ExpressionSegment")), Ref.keyword("PERCENT", optional=True), ), CursorNameGrammar=OneOf( Sequence(Ref.keyword("GLOBAL", optional=True), Ref("NakedIdentifierSegment")), Ref("ParameterNameSegment"), ), CredentialGrammar=Sequence( "IDENTITY", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), Sequence( Ref("CommaSegment"), "SECRET", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), ), AzureBlobStoragePath=RegexParser( r"'https://[a-z0-9][a-z0-9-]{1,61}[a-z0-9]\.blob\.core\.windows\.net/[a-z0-9]" r"[a-z0-9\.-]{1,61}[a-z0-9](?:/.+)?'", CodeSegment, type="external_location", ), AzureDataLakeStorageGen2Path=RegexParser( r"'https://[a-z0-9][a-z0-9-]{1,61}[a-z0-9]\.dfs\.core\.windows\.net/[a-z0-9]" r"[a-z0-9\.-]{1,61}[a-z0-9](?:/.+)?'", CodeSegment, type="external_location", ), SqlcmdOperatorSegment=SegmentGenerator( lambda dialect: MultiStringParser( dialect.sets("sqlcmd_operators"), CodeSegment, type="sqlcmd_operator", ) ), SqlcmdFilePathSegment=TypedParser( "unquoted_relative_sql_file_path", CodeSegment, type="unquoted_relative_sql_file_path", ), FileCompressionSegment=SegmentGenerator( lambda dialect: MultiStringParser( dialect.sets("file_compression"), CodeSegment, type="file_compression", ) ), FileEncodingSegment=SegmentGenerator( lambda dialect: MultiStringParser( dialect.sets("file_encoding"), CodeSegment, type="file_encoding", ) ), SerdeMethodSegment=SegmentGenerator( lambda dialect: MultiStringParser( dialect.sets("serde_method"), CodeSegment, type="serde_method", ) ), ProcedureParameterGrammar=Sequence( Ref("ParameterNameSegment", optional=True), Sequence("AS", optional=True), Ref("DatatypeSegment"), AnySetOf("VARYING", Sequence("NOT", optional=True), "NULL"), Sequence(Ref("EqualsSegment"), Ref("ExpressionSegment"), optional=True), ), DateFormatSegment=SegmentGenerator( lambda dialect: MultiStringParser( dialect.sets("date_format"), CodeSegment, type="date_format", ) ), # Here we add a special case for a DotSegment where we don't want to apply # LT01's respace rule. LeadingDotSegment=StringParser(".", SymbolSegment, type="leading_dot"), HexadecimalLiteralSegment=RegexParser( r"([xX]'([\da-fA-F][\da-fA-F])+'|0x[\da-fA-F]+)", LiteralSegment, type="numeric_literal", ), PlusComparisonSegment=StringParser( "+", SymbolSegment, type="raw_comparison_operator" ), MinusComparisonSegment=StringParser( "-", SymbolSegment, type="raw_comparison_operator" ), MultiplyComparisonSegment=StringParser( "*", SymbolSegment, type="raw_comparison_operator" ), DivideComparisonSegment=StringParser( "/", SymbolSegment, type="raw_comparison_operator" ), ModuloComparisonSegment=StringParser( "%", SymbolSegment, type="raw_comparison_operator" ), ) tsql_dialect.replace( # Overriding to cover TSQL allowed identifier name characters # https://docs.microsoft.com/en-us/sql/relational-databases/databases/database-identifiers NakedIdentifierSegment=SegmentGenerator( # Generate the anti template from the set of reserved keywords lambda dialect: RegexParser( r"[A-Z_\p{L}][A-Z0-9_@$#\p{L}]*", IdentifierSegment, type="naked_identifier", anti_template=r"^(" + r"|".join( dialect.sets("reserved_keywords") | dialect.sets("future_reserved_keywords") ) + r")$", casefold=str.upper, ) ), QuotedIdentifierSegment=TypedParser( "double_quote", IdentifierSegment, type="quoted_identifier", casefold=str.upper, ), # Overring ANSI BaseExpressionElement to remove Interval Expression Segment BaseExpressionElementGrammar=ansi_dialect.get_grammar( "BaseExpressionElementGrammar" ).copy( remove=[ Ref("IntervalExpressionSegment"), ] ), SingleIdentifierGrammar=OneOf( Ref("NakedIdentifierSegment"), Ref("QuotedIdentifierSegment"), Ref("BracketedIdentifierSegment"), Ref("HashIdentifierSegment"), Ref("ParameterNameSegment"), Ref("VariableIdentifierSegment"), ), LiteralGrammar=ansi_dialect.get_grammar("LiteralGrammar") .copy( insert=[ Ref("QuotedLiteralSegmentWithN"), ], before=Ref("NumericLiteralSegment"), remove=[ Ref("ArrayLiteralSegment"), Ref("ObjectLiteralSegment"), ], ) .copy( insert=[ Ref("ParameterNameSegment"), Ref("SystemVariableSegment"), ], ), ParameterNameSegment=RegexParser(r"@[A-Za-z0-9_]+", CodeSegment, type="parameter"), FunctionParameterGrammar=Sequence( Ref("ParameterNameSegment", optional=True), Sequence("AS", optional=True), Ref("DatatypeSegment"), Sequence("NULL", optional=True), Sequence(Ref("EqualsSegment"), Ref("ExpressionSegment"), optional=True), ), FunctionNameIdentifierSegment=SegmentGenerator( # Generate the anti template from the set of reserved keywords # minus the function names that are reserved words. lambda dialect: RegexParser( r"[A-Z][A-Z0-9_]*|\[[A-Z][A-Z0-9_]*\]", CodeSegment, type="function_name_identifier", anti_template=r"^(" + r"|".join( dialect.sets("reserved_keywords").difference({"UPDATE"}) | dialect.sets("future_reserved_keywords") ) + r")$", ) ), NanLiteralSegment=Nothing(), DatatypeIdentifierSegment=SegmentGenerator( # Generate the anti template reserved keywords lambda dialect: OneOf( RegexParser( r"[A-Z][A-Z0-9_]*|\[[A-Z][A-Z0-9_]*\]", CodeSegment, type="data_type_identifier", # anti_template=r"^(NOT)$", anti_template=r"^(" + r"|".join( dialect.sets("reserved_keywords") | dialect.sets("future_reserved_keywords") ) + r")$", # TODO - this is a stopgap until we implement explicit data types ), Ref("SingleIdentifierGrammar", exclude=Ref("NakedIdentifierSegment")), ), ), PrimaryKeyGrammar=Sequence( OneOf( Sequence( "PRIMARY", "KEY", ), "UNIQUE", ), OneOf( "CLUSTERED", "NONCLUSTERED", optional=True, ), ), FromClauseTerminatorGrammar=OneOf( "WHERE", Sequence("GROUP", "BY"), Sequence("ORDER", "BY"), "HAVING", Ref("SetOperatorSegment"), Ref("WithNoSchemaBindingClauseSegment"), Ref("DelimiterGrammar"), "WINDOW", ), # Replace ANSI LikeGrammar to remove TSQL non-keywords RLIKE and ILIKE LikeGrammar=Sequence( "LIKE", ), # Replace ANSI FunctionContentsGrammar to remove TSQL non-keyword Separator # TODO: fully represent TSQL functionality FunctionContentsGrammar=AnyNumberOf( Ref("ExpressionSegment"), # A Cast-like function Sequence(Ref("ExpressionSegment"), "AS", Ref("DatatypeSegment")), # An extract-like or substring-like function Sequence( OneOf(Ref("DatetimeUnitSegment"), Ref("ExpressionSegment")), "FROM", Ref("ExpressionSegment"), ), Sequence( # Allow an optional distinct keyword here. Ref.keyword("DISTINCT", optional=True), OneOf( # Most functions will be using the delimited route # but for COUNT(*) or similar we allow the star segment # here. Ref("StarSegment"), Delimited(Ref("FunctionContentsExpressionGrammar")), ), ), Ref("OrderByClauseSegment"), # used by string_agg (postgres), group_concat (exasol),listagg (snowflake)... # like a function call: POSITION ( 'QL' IN 'SQL') Sequence( OneOf( Ref("QuotedLiteralSegment"), Ref("SingleIdentifierGrammar"), Ref("ColumnReferenceSegment"), ), "IN", OneOf( Ref("QuotedLiteralSegment"), Ref("SingleIdentifierGrammar"), Ref("ColumnReferenceSegment"), ), ), Sequence(OneOf("IGNORE", "RESPECT"), "NULLS"), ), JoinTypeKeywordsGrammar=Sequence( OneOf( "INNER", Sequence( OneOf( "FULL", "LEFT", "RIGHT", ), Ref.keyword("OUTER", optional=True), ), ), OneOf( "LOOP", "HASH", "MERGE", optional=True, ), optional=True, ), JoinKeywordsGrammar=OneOf("JOIN", "APPLY"), ConditionalCrossJoinKeywordsGrammar=Nothing(), NaturalJoinKeywordsGrammar=Ref.keyword("CROSS"), ExtendedNaturalJoinKeywordsGrammar=Sequence("OUTER", "APPLY"), NestedJoinGrammar=Sequence( Indent, Ref("JoinClauseSegment"), Dedent, ), # Replace Expression_D_Grammar to remove casting syntax invalid in TSQL Expression_D_Grammar=Sequence( OneOf( Ref("BareFunctionSegment"), Ref("FunctionSegment"), Bracketed( OneOf( # We're using the expression segment here rather than the grammar so # that in the parsed structure we get nested elements. Ref("ExpressionSegment"), Ref("SelectableGrammar"), Delimited( Ref( "ColumnReferenceSegment" ), # WHERE (a,b,c) IN (select a,b,c FROM...) Ref( "FunctionSegment" ), # WHERE (a, substr(b,1,3)) IN (select c,d FROM...) Ref("LiteralGrammar"), # WHERE (a, 2) IN (SELECT b, c FROM ...) ), ), parse_mode=ParseMode.GREEDY, ), # Allow potential select statement without brackets Ref("SelectStatementSegment"), Ref("LiteralGrammar"), Ref("ColumnReferenceSegment"), Ref("TypedArrayLiteralSegment"), Ref("ArrayLiteralSegment"), "DEFAULT", ), Ref("AccessorGrammar", optional=True), allow_gaps=True, ), MergeIntoLiteralGrammar=Sequence( "MERGE", Ref("TopPercentGrammar", optional=True), Ref.keyword("INTO", optional=True), ), TrimParametersGrammar=Nothing(), TemporaryGrammar=Nothing(), JoinLikeClauseGrammar=AnySetOf( Ref("PivotUnpivotStatementSegment"), min_times=1, ), CollateGrammar=Sequence("COLLATE", Ref("CollationReferenceSegment")), ArithmeticBinaryOperatorGrammar=ansi_dialect.get_grammar( "ArithmeticBinaryOperatorGrammar" ).copy( insert=[ Ref("AdditionAssignmentSegment"), Ref("SubtractionAssignmentSegment"), Ref("MultiplicationAssignmentSegment"), Ref("DivisionAssignmentSegment"), Ref("ModulusAssignmentSegment"), ] ), ) class StatementSegment(ansi.StatementSegment): """Overriding StatementSegment to allow for additional segment parsing.""" match_grammar = ansi.StatementSegment.match_grammar.copy( insert=[ Ref("IfExpressionStatement"), Ref("DeclareStatementSegment"), Ref("DeclareCursorStatementSegment"), Ref("SetStatementSegment"), Ref("AlterTableSwitchStatementSegment"), Ref("PrintStatementSegment"), Ref("CreateTableGraphStatementSegment"), Ref( "CreateTableAsSelectStatementSegment" ), # Azure Synapse Analytics specific Ref("RenameStatementSegment"), # Azure Synapse Analytics specific Ref("ExecuteScriptSegment"), Ref("DropStatisticsStatementSegment"), Ref("DropProcedureStatementSegment"), Ref("UpdateStatisticsStatementSegment"), Ref("BeginEndSegment"), Ref("TryCatchSegment"), Ref("MergeStatementSegment"), Ref("ThrowStatementSegment"), Ref("RaiserrorStatementSegment"), Ref("ReturnStatementSegment"), Ref("GotoStatement"), Ref("LabelStatementSegment"), Ref("DisableTriggerStatementSegment"), Ref("WhileExpressionStatement"), Ref("BreakStatement"), Ref("ContinueStatement"), Ref("WaitForStatementSegment"), Ref("OpenCursorStatementSegment"), Ref("CloseCursorStatementSegment"), Ref("DeallocateCursorStatementSegment"), Ref("FetchCursorStatementSegment"), Ref("CreateTypeStatementSegment"), Ref("CreateSynonymStatementSegment"), Ref("DropSynonymStatementSegment"), Ref("BulkInsertStatementSegment"), Ref("AlterIndexStatementSegment"), Ref("CreateDatabaseScopedCredentialStatementSegment"), Ref("CreateExternalDataSourceStatementSegment"), Ref("SqlcmdCommandSegment"), Ref("CreateExternalFileFormat"), Ref("CreateExternalTableStatementSegment"), Ref("DropExternalTableStatementSegment"), Ref("CopyIntoTableStatementSegment"), Ref("CreateFullTextIndexStatementSegment"), Ref("AtomicBeginEndSegment"), Ref("ReconfigureStatementSegment"), Ref("CreateColumnstoreIndexStatementSegment"), Ref("CreatePartitionFunctionSegment"), Ref("AlterPartitionSchemeSegment"), Ref("CreatePartitionSchemeSegment"), Ref("AlterPartitionFunctionSegment"), Ref("CreateMasterKeySegment"), Ref("AlterMasterKeySegment"), Ref("DropMasterKeySegment"), Ref("OpenSymmetricKeySegment"), Ref("CreateLoginStatementSegment"), Ref("SetContextInfoSegment"), Ref("CreateSecurityPolicySegment"), Ref("AlterSecurityPolicySegment"), Ref("DropSecurityPolicySegment"), ], remove=[ Ref("CreateModelStatementSegment"), Ref("DropModelStatementSegment"), Ref("DescribeStatementSegment"), ], ) class GreaterThanOrEqualToSegment(CompositeComparisonOperatorSegment): """Greater than or equal to operator. N.B. Patching to add !< and to allow spaces between operators. """ match_grammar = OneOf( Sequence( Ref("RawGreaterThanSegment"), Ref("RawEqualsSegment"), ), Sequence( Ref("RawNotSegment"), Ref("RawLessThanSegment"), ), ) class LessThanOrEqualToSegment(CompositeComparisonOperatorSegment): """Greater than or equal to operator. N.B. Patching to add !> and to allow spaces between operators. """ match_grammar = OneOf( Sequence( Ref("RawLessThanSegment"), Ref("RawEqualsSegment"), ), Sequence( Ref("RawNotSegment"), Ref("RawGreaterThanSegment"), ), ) class NotEqualToSegment(CompositeComparisonOperatorSegment): """Not equal to operator. N.B. Patching to allow spaces between operators. """ match_grammar = OneOf( Sequence(Ref("RawNotSegment"), Ref("RawEqualsSegment")), Sequence(Ref("RawLessThanSegment"), Ref("RawGreaterThanSegment")), ) class SelectClauseElementSegment(ansi.SelectClauseElementSegment): """An element in the targets of a select statement. Overriding ANSI to remove greedy logic which assumes statements have been delimited """ # Important to split elements before parsing, otherwise debugging is really hard. match_grammar = OneOf( # *, blah.*, blah.blah.*, etc. Ref("WildcardExpressionSegment"), Sequence( Ref("AltAliasExpressionSegment"), Ref("BaseExpressionElementGrammar"), ), Sequence( Ref("BaseExpressionElementGrammar"), Ref("AliasExpressionSegment", optional=True), ), ) class AltAliasExpressionSegment(BaseSegment): """An alternative alias clause as used by tsql using `=`.""" type = "alias_expression" match_grammar = Sequence( OneOf( Ref("NakedIdentifierSegment"), Ref("QuotedIdentifierSegment"), Ref("BracketedIdentifierSegment"), Ref("SingleQuotedIdentifierSegment"), ), Indent, Ref("EqualAliasOperatorSegment"), Dedent, ) class EqualAliasOperatorSegment(BaseSegment): """The as alias expression operator.""" type = "alias_operator" match_grammar: Matchable = Sequence(Ref("RawEqualsSegment")) class SelectClauseModifierSegment(BaseSegment): """Things that come after SELECT but before the columns.""" type = "select_clause_modifier" match_grammar = AnyNumberOf( "DISTINCT", "ALL", Sequence( # https://docs.microsoft.com/en-us/sql/t-sql/queries/top-transact-sql "TOP", OptionallyBracketed(Ref("ExpressionSegment")), Sequence("PERCENT", optional=True), Sequence("WITH", "TIES", optional=True), ), ) class SelectClauseSegment(BaseSegment): """A group of elements in a select target statement. Overriding ANSI to remove greedy logic which assumes statements have been delimited """ type = "select_clause" match_grammar: Matchable = Sequence( "SELECT", Ref("SelectClauseModifierSegment", optional=True), Indent, # NOTE: Don't allow trailing. Delimited(Ref("SelectClauseElementSegment")), Dedent, # NOTE: In TSQL - this grammar is NOT greedy. ) class UnorderedSelectStatementSegment(BaseSegment): """A `SELECT` statement without any ORDER clauses or later. We need to change ANSI slightly to remove LimitClauseSegment and NamedWindowSegment which don't exist in T-SQL. We also need to get away from ANSI's use of terminators. There's not a clean list of terminators that can be used to identify the end of a TSQL select statement. Semi-colon is optional. """ type = "select_statement" match_grammar = Sequence( Ref("SelectClauseSegment"), Ref("IntoTableSegment", optional=True), Ref("FromClauseSegment", optional=True), Ref("WhereClauseSegment", optional=True), Ref("GroupByClauseSegment", optional=True), Ref("HavingClauseSegment", optional=True), Ref("NamedWindowSegment", optional=True), ) class InsertStatementSegment(BaseSegment): """An `INSERT` statement. Overriding ANSI definition to remove terminator logic that doesn't handle optional delimitation well. """ type = "insert_statement" match_grammar = Sequence( "INSERT", OneOf( Sequence( Ref.keyword("INTO", optional=True), Ref("TableReferenceSegment"), ), Ref("OpenQuerySegment"), ), Ref("PostTableExpressionGrammar", optional=True), Ref("BracketedColumnReferenceListGrammar", optional=True), Ref("OutputClauseSegment", optional=True), OneOf( Ref("SelectableGrammar"), Ref("ExecuteScriptSegment"), Ref("DefaultValuesGrammar"), ), ) class BulkInsertStatementSegment(BaseSegment): """A `BULK INSERT` statement. https://learn.microsoft.com/en-us/sql/t-sql/statements/bulk-insert-transact-sql """ type = "bulk_insert_statement" match_grammar = Sequence( "BULK", "INSERT", Ref("TableReferenceSegment"), "FROM", Ref("QuotedLiteralSegment"), Ref("BulkInsertStatementWithSegment", optional=True), ) class BulkInsertStatementWithSegment(BaseSegment): """A `WITH` segment in the BULK INSERT statement. https://learn.microsoft.com/en-us/sql/t-sql/statements/bulk-insert-transact-sql """ type = "bulk_insert_with_segment" match_grammar = Sequence( "WITH", Bracketed( Delimited( AnyNumberOf( Sequence( OneOf( "BATCHSIZE", "FIRSTROW", "KILOBYTES_PER_BATCH", "LASTROW", "MAXERRORS", "ROWS_PER_BATCH", ), Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( OneOf( "CODEPAGE", "DATAFILETYPE", "DATA_SOURCE", "ERRORFILE", "ERRORFILE_DATA_SOURCE", "FORMATFILE_DATA_SOURCE", "ROWTERMINATOR", "FORMAT", "FIELDQUOTE", "FORMATFILE", "FIELDTERMINATOR", ), Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "ORDER", Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), OneOf("ASC", "DESC", optional=True), ), ), ), ), "CHECK_CONSTRAINTS", "FIRE_TRIGGERS", "KEEPIDENTITY", "KEEPNULLS", "TABLOCK", ) ) ), ) class WithCompoundStatementSegment(BaseSegment): """A `SELECT` statement preceded by a selection of `WITH` clauses. `WITH tab (col1,col2) AS (SELECT a,b FROM x)` Overriding ANSI to remove the greedy use of terminators. """ type = "with_compound_statement" # match grammar match_grammar = Sequence( "WITH", Ref.keyword("RECURSIVE", optional=True), Conditional(Indent, indented_ctes=True), Delimited( Ref("CTEDefinitionSegment"), terminators=["SELECT"], ), Conditional(Dedent, indented_ctes=True), OneOf( Ref("NonWithSelectableGrammar"), Ref("NonWithNonSelectableGrammar"), Ref("MergeStatementSegment"), ), ) class SelectStatementSegment(BaseSegment): """A `SELECT` statement. We need to change ANSI slightly to remove LimitClauseSegment and NamedWindowSegment which don't exist in T-SQL. We also need to get away from ANSI's use of terminators. There's not a clean list of terminators that can be used to identify the end of a TSQL select statement. Semi-colon is optional. """ type = "select_statement" # Remove the Limit and Window statements from ANSI match_grammar = UnorderedSelectStatementSegment.match_grammar.copy( insert=[ Ref("OrderByClauseSegment", optional=True), Ref("OptionClauseSegment", optional=True), Ref("DelimiterGrammar", optional=True), Ref("ForClauseSegment", optional=True), ] ) class IntoTableSegment(BaseSegment): """`INTO` clause within `SELECT`. https://docs.microsoft.com/en-us/sql/t-sql/queries/select-into-clause-transact-sql """ type = "into_table_clause" match_grammar = Sequence("INTO", Ref("ObjectReferenceSegment")) class WhereClauseSegment(BaseSegment): """A `WHERE` clause like in `SELECT` or `INSERT`. Overriding ANSI in order to get away from the use of terminators. There's not a clean list of terminators that can be used to identify the end of a TSQL select statement. Semi-colon is optional. """ type = "where_clause" match_grammar = Sequence( "WHERE", ImplicitIndent, OptionallyBracketed(Ref("ExpressionSegment")), Dedent, ) class CreateIndexStatementSegment(BaseSegment): """A `CREATE INDEX` or `CREATE STATISTICS` statement. https://docs.microsoft.com/en-us/sql/t-sql/statements/create-index-transact-sql https://docs.microsoft.com/en-us/sql/t-sql/statements/create-statistics-transact-sql """ type = "create_index_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Sequence("UNIQUE", optional=True), OneOf("CLUSTERED", "NONCLUSTERED", optional=True), OneOf("INDEX", "STATISTICS"), Ref("IfNotExistsGrammar", optional=True), Ref("IndexReferenceSegment"), Indent, "ON", Ref("TableReferenceSegment"), Ref("BracketedIndexColumnListGrammar"), Sequence( "INCLUDE", Ref("BracketedColumnReferenceListGrammar"), optional=True, ), Ref("WhereClauseSegment", optional=True), Ref("RelationalIndexOptionsSegment", optional=True), Ref("OnPartitionOrFilegroupOptionSegment", optional=True), Ref("FilestreamOnOptionSegment", optional=True), Ref("DelimiterGrammar", optional=True), Dedent, ) class CreateColumnstoreIndexStatementSegment(BaseSegment): """A `CREATE COLUMNSTORE INDEX` statement. https://learn.microsoft.com/en-us/sql/t-sql/statements/create-columnstore-index-transact-sql """ type = "create_columnstore_index_statement" match_grammar = Sequence( "CREATE", OneOf("CLUSTERED", "NONCLUSTERED", optional=True), "COLUMNSTORE", "INDEX", Ref("IndexReferenceSegment"), "ON", Ref("TableReferenceSegment"), Ref("BracketedIndexColumnListGrammar", optional=True), Sequence( "ORDER", Bracketed( Delimited( Ref("ColumnReferenceSegment"), ), ), optional=True, ), Ref("WhereClauseSegment", optional=True), Sequence( "WITH", Bracketed( OneOf( Sequence( "DROP_EXISTING", Ref("EqualsSegment", optional=True), OneOf( "ON", "OFF", ), ), Sequence( "MAXDOP", Ref("EqualsSegment", optional=True), Ref("NumericLiteralSegment"), ), Sequence( "ONLINE", Ref("EqualsSegment", optional=True), OneOf( "ON", "OFF", ), ), Sequence( "COMPRESSION_DELAY", Ref("EqualsSegment", optional=True), Ref("NumericLiteralSegment"), "MINUTES", ), Sequence( "DATA_COMPRESSION", Ref("EqualsSegment", optional=True), OneOf( "COLUMNSTORE", "COLUMNSTORE_ARCHIVE", ), Sequence( Sequence( "ON", "PARTITIONS", ), Bracketed( Delimited( Ref("NumericLiteralSegment"), ), Sequence( "TO", Ref("NumericLiteralSegment"), optional=True, ), ), optional=True, ), ), ), ), optional=True, ), Ref("OnPartitionOrFilegroupOptionSegment", optional=True), ) class CreateFullTextIndexStatementSegment(BaseSegment): """A `CREATE FULLTEXT INDEX` statement. https://learn.microsoft.com/fr-fr/sql/t-sql/statements/create-fulltext-index-transact-sql """ type = "create_fulltext_index_statement" _catalog_filegroup_option = Sequence( "ON", Delimited( AnySetOf( Ref("ObjectReferenceSegment"), Sequence( "FILEGROUP", Ref("ObjectReferenceSegment"), ), ), allow_trailing=True, ), optional=True, ) _with_option = Sequence( "WITH", Bracketed( OneOf( Sequence( "CHANGE_TRACKING", Ref("EqualsSegment", optional=True), OneOf( "MANUAL", "AUTO", Delimited( "OFF", Sequence( "NO", "POPULATION", optional=True, ), ), ), ), Sequence( "STOPLIST", Ref("EqualsSegment", optional=True), OneOf( "OFF", "SYSTEM", Ref("ObjectReferenceSegment"), ), ), Sequence( "SEARCH", "PROPERTY", "LIST", Ref("EqualsSegment", optional=True), Ref("ObjectReferenceSegment"), ), ), ), optional=True, ) match_grammar = Sequence( "CREATE", "FULLTEXT", "INDEX", "ON", Ref("TableReferenceSegment"), Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), AnySetOf( Sequence( "TYPE", "COLUMN", Ref("DatatypeSegment"), ), Sequence( "LANGUAGE", OneOf( Ref("NumericLiteralSegment"), Ref("QuotedLiteralSegment"), optional=True, ), ), "STATISTICAL_SEMANTICS", ), ), ), ), Sequence( "KEY", "INDEX", Ref("ObjectReferenceSegment"), _catalog_filegroup_option, ), _with_option, ) class AlterIndexStatementSegment(BaseSegment): """An ALTER INDEX statement. As per. https://learn.microsoft.com/en-us/sql/t-sql/statements/alter-index-transact-sql """ type = "alter_index_statement" _low_priority_lock_wait = Sequence( "WAIT_AT_LOW_PRIORITY", Bracketed( Sequence( "MAX_DURATION", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), Ref.keyword("MINUTES", optional=True), ), Ref("CommaSegment"), Sequence( "ABORT_AFTER_WAIT", Ref("EqualsSegment"), OneOf( "NONE", "SELF", "BLOCKERS", ), ), ), ) _on_partitions = Sequence( Sequence( "ON", "PARTITIONS", ), Bracketed( Delimited( Ref("NumericLiteralSegment"), ), Sequence( "TO", Ref("NumericLiteralSegment"), optional=True, ), ), optional=True, ) _rebuild_index_option = AnyNumberOf( Sequence( OneOf( "PAD_INDEX", "SORT_IN_TEMPDB", "IGNORE_DUP_KEY", "STATISTICS_NORECOMPUTE", "STATISTICS_INCREMENTAL", "RESUMABLE", "ALLOW_ROW_LOCKS", "ALLOW_PAGE_LOCKS", ), Ref("EqualsSegment"), OneOf( "ON", "OFF", ), ), Sequence( OneOf( "MAXDOP", "FILLFACTOR", "MAX_DURATION", ), Ref("EqualsSegment"), Ref("NumericLiteralSegment"), Ref.keyword("MINUTES", optional=True), ), Sequence( "ONLINE", Ref("EqualsSegment"), OneOf( Sequence( "ON", Bracketed( _low_priority_lock_wait, optional=True, ), ), "OFF", ), ), Sequence( "DATA_COMPRESSION", Ref("EqualsSegment"), OneOf( "NONE", "ROW", "PAGE", "COLUMNSTORE", "COLUMNSTORE_ARCHIVE", ), _on_partitions, ), Sequence( "XML_COMPRESSION", Ref("EqualsSegment"), OneOf( "ON", "OFF", ), _on_partitions, ), ) _single_partition_rebuild_index_option = AnyNumberOf( Sequence( OneOf( "XML_COMPRESSION", "SORT_IN_TEMPDB", "RESUMABLE", ), Ref("EqualsSegment"), OneOf( "ON", "OFF", ), ), Sequence( OneOf( "MAXDOP", "MAX_DURATION", ), Ref("EqualsSegment"), Ref("NumericLiteralSegment"), Ref.keyword("MINUTES", optional=True), ), Sequence( "DATA_COMPRESSION", Ref("EqualsSegment"), OneOf( "NONE", "ROW", "PAGE", "COLUMNSTORE", "COLUMNSTORE_ARCHIVE", ), ), Sequence( "ONLINE", Ref("EqualsSegment"), OneOf( Sequence( "ON", Bracketed( _low_priority_lock_wait, optional=True, ), ), "OFF", ), ), ) match_grammar = Sequence( "ALTER", "INDEX", OneOf( Ref("ObjectReferenceSegment"), "ALL", ), "ON", Ref("TableReferenceSegment"), OneOf( Sequence( "REBUILD", OneOf( Sequence( Sequence( "PARTITION", Ref("EqualsSegment"), "ALL", optional=True, ), Sequence( "WITH", Bracketed( Delimited( _rebuild_index_option, ) ), optional=True, ), ), Sequence( Sequence( "PARTITION", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), optional=True, ), Sequence( "WITH", Bracketed( Delimited( _single_partition_rebuild_index_option, ), ), optional=True, ), ), optional=True, ), ), "DISABLE", Sequence( "REORGANIZE", Sequence( "PARTITION", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), optional=True, ), Sequence( "WITH", Bracketed( Sequence( OneOf( "LOB_COMPACTION", "COMPRESS_ALL_ROW_GROUPS", ), Ref("EqualsSegment"), OneOf( "ON", "OFF", ), ), ), optional=True, ), ), Sequence( "SET", Bracketed( Delimited( AnyNumberOf( Sequence( OneOf( "ALLOW_ROW_LOCKS", "ALLOW_PAGE_LOCKS", "OPTIMIZE_FOR_SEQUENTIAL_KEY", "IGNORE_DUP_KEY", "STATISTICS_NORECOMPUTE", ), Ref("EqualsSegment"), OneOf( "ON", "OFF", ), ), Sequence( "COMPRESSION_DELAY", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), Ref.keyword("MINUTES", optional=True), ), ), ), ), ), Sequence( "RESUME", Sequence( "WITH", Bracketed( Delimited( Sequence( OneOf( "MAX_DURATION", "MAXDOP", ), Ref("EqualsSegment"), Ref("NumericLiteralSegment"), Ref.keyword("MINUTES", optional=True), ), _low_priority_lock_wait, ), ), optional=True, ), ), "PAUSE", "ABORT", ), ) class OnPartitionOrFilegroupOptionSegment(BaseSegment): """ON partition scheme or filegroup option. https://docs.microsoft.com/en-us/sql/t-sql/statements/create-index-transact-sql https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql """ type = "on_partition_or_filegroup_statement" match_grammar = OneOf( Ref("PartitionSchemeClause"), Ref("FilegroupClause"), Ref("LiteralGrammar"), # for "default" value ) class FilestreamOnOptionSegment(BaseSegment): """FILESTREAM_ON index option in `CREATE INDEX` and 'CREATE TABLE' statements. https://docs.microsoft.com/en-us/sql/t-sql/statements/create-index-transact-sql https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql """ type = "filestream_on_option_statement" match_grammar = Sequence( "FILESTREAM_ON", OneOf( Ref("FilegroupNameSegment"), Ref("PartitionSchemeNameSegment"), OneOf( "NULL", Ref("LiteralGrammar"), # for "default" value ), ), ) class TextimageOnOptionSegment(BaseSegment): """TEXTIMAGE ON option in `CREATE TABLE` statement. https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql """ type = "textimage_on_option_statement" match_grammar = Sequence( "TEXTIMAGE_ON", OneOf( Ref("FilegroupNameSegment"), Ref("LiteralGrammar"), # for "default" value ), ) class TableOptionSegment(BaseSegment): """TABLE option in `CREATE TABLE` statement. https://learn.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql """ _ledger_view_option = Delimited( Sequence( OneOf( "TRANSACTION_ID_COLUMN_NAME", "SEQUENCE_NUMBER_COLUMN_NAME", "OPERATION_TYPE_COLUMN_NAME", "OPERATION_TYPE_DESC_COLUMN_NAME", ), Ref("EqualsSegment"), Ref("ColumnReferenceSegment"), optional=True, ), ) _on_partitions = Sequence( Sequence( "ON", "PARTITIONS", ), Bracketed( Delimited( Ref("NumericLiteralSegment"), ), Sequence( "TO", Ref("NumericLiteralSegment"), optional=True, ), ), optional=True, ) type = "table_option_statement" match_grammar = Sequence( "WITH", Bracketed( Delimited( AnyNumberOf( Sequence("MEMORY_OPTIMIZED", Ref("EqualsSegment"), "ON"), Sequence( "DURABILITY", Ref("EqualsSegment"), OneOf("SCHEMA_ONLY", "SCHEMA_AND_DATA"), ), Sequence( "SYSTEM_VERSIONING", Ref("EqualsSegment"), "ON", Bracketed( Delimited( AnyNumberOf( Sequence( "HISTORY_TABLE", Ref("EqualsSegment"), Ref("TableReferenceSegment"), ), Sequence( "HISTORY_RETENTION_PERIOD", Ref("EqualsSegment"), OneOf( "INFINITE", Sequence( Ref( "NumericLiteralSegment", optional=True, ), OneOf( "DAYS", "WEEKS", "MONTHS", "YEARS", ), optional=True, ), ), ), Sequence( Ref("CommaSegment"), "DATA_CONSISTENCY_CHECK", Ref("EqualsSegment"), OneOf("ON", "OFF"), ), ), ), ), ), Sequence( "DATA_COMPRESSION", Ref("EqualsSegment"), OneOf( "NONE", "ROW", "PAGE", ), _on_partitions, ), Sequence( "XML_COMPRESSION", Ref("EqualsSegment"), OneOf("ON", "OFF"), _on_partitions, ), Sequence( "FILETABLE_DIRECTORY", Ref("EqualsSegment"), Ref("LiteralGrammar"), ), Sequence( OneOf( "FILETABLE_COLLATE_FILENAME", "FILETABLE_PRIMARY_KEY_CONSTRAINT_NAME", "FILETABLE_STREAMID_UNIQUE_CONSTRAINT_NAME", "FILETABLE_FULLPATH_UNIQUE_CONSTRAINT_NAME", ), Ref("EqualsSegment"), Ref("ObjectReferenceSegment"), ), Sequence( "REMOTE_DATA_ARCHIVE", Ref("EqualsSegment"), OneOf( Sequence( "ON", Bracketed( Delimited( Sequence( "FILTER_PREDICATE", Ref("EqualsSegment"), OneOf( "NULL", Ref("FunctionNameSegment"), ), optional=True, ), Sequence( "MIGRATION_STATE", Ref("EqualsSegment"), OneOf("OUTBOUND", "INBOUND", "PAUSED"), ), ), optional=True, ), ), Sequence( "OFF", Bracketed( "MIGRATION_STATE", Ref("EqualsSegment"), "PAUSED", ), ), ), ), Sequence( "DATA_DELETION", Ref("EqualsSegment"), "ON", Bracketed( "FILTER_COLUMN", Ref("EqualsSegment"), Ref("ColumnReferenceSegment"), Ref("CommaSegment"), "RETENTION_PERIOD", Ref("EqualsSegment"), Ref("NumericLiteralSegment", optional=True), Ref("DatetimeUnitSegment"), ), ), Sequence( "LEDGER", Ref("EqualsSegment"), OneOf( Sequence( "ON", Bracketed( Delimited( Sequence( "LEDGER_VIEW", Ref("EqualsSegment"), Ref("TableReferenceSegment"), Bracketed( _ledger_view_option, optional=True ), optional=True, ), Sequence( "APPEND_ONLY", Ref("EqualsSegment"), OneOf("ON", "OFF"), optional=True, ), ), optional=True, ), ), "OFF", ), ), ) ) ), ) class ReferencesConstraintGrammar(BaseSegment): """REFERENCES constraint option in `CREATE TABLE` statement. https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql """ type = "references_constraint_grammar" match_grammar = Sequence( # REFERENCES reftable [ ( refcolumn) ] "REFERENCES", Ref("TableReferenceSegment"), # Foreign columns making up FOREIGN KEY constraint Ref("BracketedColumnReferenceListGrammar", optional=True), AnySetOf( Sequence("ON", "DELETE", Ref("ReferentialActionGrammar")), Sequence("ON", "UPDATE", Ref("ReferentialActionGrammar")), Sequence("NOT", "FOR", "REPLICATION"), ), ) class CheckConstraintGrammar(BaseSegment): """CHECK constraint option in `CREATE TABLE` statement. https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql """ type = "check_constraint_grammar" match_grammar = Sequence( "CHECK", Sequence("NOT", "FOR", "REPLICATION", optional=True), Bracketed( Ref("ExpressionSegment"), ), ) class ConnectionConstraintGrammar(BaseSegment): """CONNECTION constraint option in `CREATE TABLE` statement. https://learn.microsoft.com/en-us/sql/t-sql/statements/create-table-sql-graph """ type = "connection_constraint_grammar" match_grammar = Sequence( "CONNECTION", Bracketed( Delimited( Sequence( Ref("TableReferenceSegment"), "TO", Ref("TableReferenceSegment"), optional=True, ), allow_trailing=True, ) ), AnySetOf( Sequence("ON", "DELETE", OneOf(Sequence("NO", "ACTION"), "CASCADE")), Sequence("ON", "UPDATE", OneOf(Sequence("NO", "ACTION"), "CASCADE")), ), ) class RelationalIndexOptionsSegment(BaseSegment): """A relational index options in `CREATE INDEX` statement. https://docs.microsoft.com/en-us/sql/t-sql/statements/create-index-transact-sql """ type = "relational_index_options" match_grammar = Sequence( "WITH", OptionallyBracketed( Delimited( AnyNumberOf( Sequence( OneOf( "PAD_INDEX", "FILLFACTOR", "SORT_IN_TEMPDB", "IGNORE_DUP_KEY", "STATISTICS_NORECOMPUTE", "STATISTICS_INCREMENTAL", "DROP_EXISTING", "RESUMABLE", "ALLOW_ROW_LOCKS", "ALLOW_PAGE_LOCKS", "OPTIMIZE_FOR_SEQUENTIAL_KEY", "MAXDOP", ), Ref("EqualsSegment"), OneOf( "ON", "OFF", Ref("LiteralGrammar"), ), ), Ref("MaxDurationSegment"), Sequence( "ONLINE", Ref("EqualsSegment"), OneOf( "OFF", Sequence( "ON", Bracketed( Sequence( "WAIT_AT_LOW_PRIORITY", Bracketed( Delimited( Ref("MaxDurationSegment"), Sequence( "ABORT_AFTER_WAIT", Ref("EqualsSegment"), OneOf( "NONE", "SELF", "BLOCKERS", ), ), ), ), ), optional=True, ), ), ), ), # for table constrains Sequence( "COMPRESSION_DELAY", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), Sequence( "MINUTES", optional=True, ), ), Sequence( "DATA_COMPRESSION", Ref("EqualsSegment"), OneOf( "NONE", "ROW", "PAGE", "COLUMNSTORE", # for table constrains "COLUMNSTORE_ARCHIVE", # for table constrains ), Ref("OnPartitionsSegment", optional=True), ), min_times=1, ), ), ), ) class MaxDurationSegment(BaseSegment): """A `MAX DURATION` clause. https://docs.microsoft.com/en-us/sql/t-sql/statements/create-index-transact-sql """ type = "max_duration" match_grammar = Sequence( "MAX_DURATION", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), Sequence( "MINUTES", optional=True, ), ) class DropIndexStatementSegment(ansi.DropIndexStatementSegment): """A `DROP INDEX` statement. Overriding ANSI to include required ON clause. """ match_grammar = Sequence( "DROP", "INDEX", Ref("IfExistsGrammar", optional=True), Ref("IndexReferenceSegment"), "ON", Ref("TableReferenceSegment"), Ref("DelimiterGrammar", optional=True), ) class DropStatisticsStatementSegment(BaseSegment): """A `DROP STATISTICS` statement.""" type = "drop_statement" # DROP INDEX [CONCURRENTLY] [IF EXISTS] {RESTRICT | CASCADE} match_grammar = Sequence( "DROP", OneOf("STATISTICS"), Ref("IndexReferenceSegment"), Ref("DelimiterGrammar", optional=True), ) class UpdateStatisticsStatementSegment(BaseSegment): """An `UPDATE STATISTICS` statement. https://docs.microsoft.com/en-us/sql/t-sql/statements/update-statistics-transact-sql """ type = "update_statistics_statement" match_grammar = Sequence( "UPDATE", "STATISTICS", Ref("ObjectReferenceSegment"), OneOf( Ref("SingleIdentifierGrammar"), Bracketed( Delimited( Ref("SingleIdentifierGrammar"), ), ), optional=True, ), Ref("DelimiterGrammar", optional=True), Sequence("WITH", OneOf("FULLSCAN", "RESAMPLE"), optional=True), ) class ReconfigureStatementSegment(BaseSegment): """Reconfigure statement. https://learn.microsoft.com/en-us/sql/t-sql/language-elements/reconfigure-transact-sql """ type = "reconfigure_statement" match_grammar = Sequence( "RECONFIGURE", Sequence( "WITH", "OVERRIDE", optional=True, ), ) class ObjectReferenceSegment(ansi.ObjectReferenceSegment): """A reference to an object. Update ObjectReferenceSegment to only allow dot separated SingleIdentifierGrammar So Square Bracketed identifiers can be matched. """ # match grammar (allow whitespace) match_grammar: Matchable = Sequence( Ref("SingleIdentifierGrammar"), AnyNumberOf( Sequence( Ref("DotSegment"), Ref("SingleIdentifierGrammar", optional=True), ), min_times=0, max_times=3, ), ) class TableReferenceSegment(ObjectReferenceSegment): """A reference to an table, CTE, subquery or alias. Overriding to capture TSQL's override of ObjectReferenceSegment """ type = "table_reference" match_grammar: Matchable = OneOf( Sequence( Ref("SingleIdentifierGrammar"), AnyNumberOf( Sequence( Ref("DotSegment"), Ref("SingleIdentifierGrammar", optional=True), ), min_times=0, max_times=3, ), ), # This can have a leading number of dots. If the table reference starts with a # dot segment, apply a special type of DotSegment to prevent removal of spaces Sequence( Ref("LeadingDotSegment"), AnyNumberOf( Sequence( Ref("SingleIdentifierGrammar", optional=True), Ref("DotSegment"), ), min_times=0, max_times=2, ), Ref("SingleIdentifierGrammar"), ), ) class SchemaReferenceSegment(ObjectReferenceSegment): """A reference to a schema. Overriding to capture TSQL's override of ObjectReferenceSegment """ type = "schema_reference" class DatabaseReferenceSegment(ObjectReferenceSegment): """A reference to a database. Overriding to capture TSQL's override of ObjectReferenceSegment """ type = "database_reference" class IndexReferenceSegment(ObjectReferenceSegment): """A reference to an index. Overriding to capture TSQL's override of ObjectReferenceSegment """ type = "index_reference" class ExtensionReferenceSegment(ObjectReferenceSegment): """A reference to an extension. Overriding to capture TSQL's override of ObjectReferenceSegment """ type = "extension_reference" class ColumnReferenceSegment(ObjectReferenceSegment): """A reference to column, field or alias. Overriding to capture TSQL's override of ObjectReferenceSegment """ type = "column_reference" class SequenceReferenceSegment(ObjectReferenceSegment): """A reference to a sequence. Overriding to capture TSQL's override of ObjectReferenceSegment """ type = "sequence_reference" class PivotColumnReferenceSegment(ObjectReferenceSegment): """A reference to a PIVOT column. Used to differentiate it from a regular column reference. """ type = "pivot_column_reference" class PivotUnpivotStatementSegment(BaseSegment): """Declaration of a variable. https://docs.microsoft.com/en-us/sql/t-sql/queries/from-using-pivot-and-unpivot """ type = "from_pivot_expression" match_grammar = Sequence( OneOf( Sequence( "PIVOT", OptionallyBracketed( Sequence( OptionallyBracketed(Ref("FunctionSegment")), "FOR", Ref("ColumnReferenceSegment"), "IN", Bracketed(Delimited(Ref("PivotColumnReferenceSegment"))), ) ), ), Sequence( "UNPIVOT", OptionallyBracketed( Sequence( OptionallyBracketed(Ref("ColumnReferenceSegment")), "FOR", Ref("ColumnReferenceSegment"), "IN", Bracketed(Delimited(Ref("PivotColumnReferenceSegment"))), ) ), ), ), Sequence("AS", optional=True), Ref("TableReferenceSegment"), ) class DeclareStatementSegment(BaseSegment): """Declaration of a variable. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/declare-local-variable-transact-sql """ type = "declare_segment" match_grammar = Sequence( "DECLARE", Indent, Delimited( Sequence( Ref("ParameterNameSegment"), Sequence("AS", optional=True), OneOf( Sequence( Ref("DatatypeSegment"), Sequence( Ref("EqualsSegment"), Ref("ExpressionSegment"), optional=True, ), ), Sequence( "TABLE", Bracketed( Delimited( OneOf( Ref("TableConstraintSegment"), Ref("ColumnDefinitionSegment"), ), allow_trailing=True, ) ), ), ), ), ), Dedent, Ref("DelimiterGrammar", optional=True), ) class DeclareCursorStatementSegment(BaseSegment): """Declaration of a cursor. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/declare-cursor-transact-sql """ type = "declare_segment" match_grammar = Sequence( "DECLARE", Ref("NakedIdentifierSegment"), "CURSOR", OneOf("LOCAL", "GLOBAL", optional=True), OneOf("FORWARD_ONLY", "SCROLL", optional=True), OneOf("STATIC", "KEYSET", "DYNAMIC", "FAST_FORWARD", optional=True), OneOf("READ_ONLY", "SCROLL_LOCKS", "OPTIMISTIC", optional=True), Sequence("TYPE_WARNING", optional=True), "FOR", Ref("SelectStatementSegment"), ) class GoStatementSegment(BaseSegment): """GO signals the end of a batch of Transact-SQL statements. GO statements are not part of the TSQL language. They are used to signal batch statements so that clients know in how batches of statements can be executed. """ type = "go_statement" match_grammar = Ref.keyword("GO") class BracketedArguments(ansi.BracketedArguments): """A series of bracketed arguments. e.g. the bracketed part of numeric(1, 3) """ match_grammar = Bracketed( Delimited( OneOf( # TSQL allows optional MAX in some data types "MAX", Ref("ExpressionSegment"), ), # The brackets might be empty for some cases... optional=True, ), ) class DatatypeSegment(BaseSegment): """A data type segment. Updated for Transact-SQL to allow bracketed data types with bracketed schemas. """ type = "data_type" match_grammar = Sequence( # Some dialects allow optional qualification of data types with schemas Sequence( Ref("SingleIdentifierGrammar"), Ref("DotSegment"), allow_gaps=False, optional=True, ), OneOf( Ref("DatatypeIdentifierSegment"), Bracketed(Ref("DatatypeIdentifierSegment"), bracket_type="square"), ), # Stop Gap until explicit Data Types as only relevant for character Ref.keyword("VARYING", optional=True), Ref("BracketedArguments", optional=True), Ref("CharCharacterSetGrammar", optional=True), ) class CreateSequenceOptionsSegment(BaseSegment): """Options for Create Sequence statement. https://docs.microsoft.com/en-us/sql/t-sql/statements/create-sequence-transact-sql """ type = "create_sequence_options_segment" match_grammar = OneOf( Sequence( "AS", Ref("DatatypeSegment"), ), Sequence("START", "WITH", Ref("NumericLiteralSegment")), Sequence("INCREMENT", "BY", Ref("NumericLiteralSegment")), Sequence("MINVALUE", Ref("NumericLiteralSegment")), Sequence("NO", "MINVALUE"), Sequence("MAXVALUE", Ref("NumericLiteralSegment")), Sequence("NO", "MAXVALUE"), Sequence( Sequence("NO", optional=True), "CYCLE", ), Sequence( "CACHE", Ref("NumericLiteralSegment"), ), Sequence( "NO", "CACHE", ), ) class NextValueSequenceSegment(BaseSegment): """Segment to get next value from a sequence.""" type = "sequence_next_value" match_grammar = Sequence( "NEXT", "VALUE", "FOR", Ref("ObjectReferenceSegment"), ) class IfExpressionStatement(BaseSegment): """IF-ELSE statement. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/if-else-transact-sql """ type = "if_then_statement" match_grammar = Sequence( Ref("IfClauseSegment"), Indent, Ref("StatementAndDelimiterGrammar"), Dedent, AnyNumberOf( # ELSE IF included explicitly to allow for correct indentation Sequence( "ELSE", Ref("IfClauseSegment"), Indent, Ref("StatementAndDelimiterGrammar"), Dedent, ), ), Sequence( "ELSE", Indent, Ref("StatementAndDelimiterGrammar"), Dedent, optional=True, ), ) class IfClauseSegment(BaseSegment): """IF clause.""" type = "if_clause" match_grammar = Sequence( "IF", Indent, Ref("ExpressionSegment"), Dedent, ) class WhileExpressionStatement(BaseSegment): """WHILE statement. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/while-transact-sql """ type = "while_statement" match_grammar = Sequence( "WHILE", Ref("ExpressionSegment"), Indent, Ref("StatementAndDelimiterGrammar"), Dedent, ) class BreakStatement(BaseSegment): """BREAK statement. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/break-transact-sql """ type = "break_statement" match_grammar = Sequence( "BREAK", ) class ContinueStatement(BaseSegment): """CONTINUE statement. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/continue-transact-sql """ type = "continue_statement" match_grammar = Sequence( "CONTINUE", ) class WaitForStatementSegment(BaseSegment): """WAITFOR statement. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/waitfor-transact-sql Partially implemented, lacking Receive and Get Conversation Group statements for now. """ type = "waitfor_statement" match_grammar = Sequence( "WAITFOR", OneOf( Sequence("DELAY", Ref("ExpressionSegment")), Sequence("TIME", Ref("ExpressionSegment")), ), Sequence("TIMEOUT", Ref("NumericLiteralSegment"), optional=True), ) class ColumnConstraintSegment(BaseSegment): """A column option; each CREATE TABLE column can have 0 or more.""" type = "column_constraint_segment" # Column constraint from # https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql match_grammar = OneOf( Sequence( Sequence( "CONSTRAINT", Ref("ObjectReferenceSegment"), # Constraint name optional=True, ), OneOf( "FILESTREAM", Sequence( "COLLATE", Ref("CollationReferenceSegment") ), # [COLLATE collation_name] "SPARSE", Sequence( "MASKED", "WITH", Bracketed("FUNCTION", Ref("EqualsSegment"), Ref("LiteralGrammar")), ), Sequence( Sequence( "CONSTRAINT", Ref("ObjectReferenceSegment"), # Constraint name optional=True, ), # DEFAULT "DEFAULT", OptionallyBracketed( OneOf( OptionallyBracketed(Ref("LiteralGrammar")), # ((-1)) Ref("BareFunctionSegment"), Ref("FunctionSegment"), Ref("NextValueSequenceSegment"), Ref("HexadecimalLiteralSegment"), ), ), ), Ref("IdentityGrammar"), Sequence("NOT", "FOR", "REPLICATION"), Sequence( Sequence("GENERATED", "ALWAYS", "AS"), OneOf("ROW", "TRANSACTION_ID", "SEQUENCE_NUMBER"), OneOf("START", "END"), Ref.keyword("HIDDEN", optional=True), ), Sequence(Ref.keyword("NOT", optional=True), "NULL"), # NOT NULL or NULL "ROWGUIDCOL", Ref("EncryptedWithGrammar"), # Primary Key without a column list Ref("PrimaryKeyGrammar"), Ref("RelationalIndexOptionsSegment"), Ref("OnPartitionOrFilegroupOptionSegment"), # Foreign Key without a column list Ref("ForeignKeyGrammar"), Ref("ReferencesConstraintGrammar"), Ref("CheckConstraintGrammar"), Ref("FilestreamOnOptionSegment", optional=True), # column_index Sequence( "INDEX", Ref("ObjectReferenceSegment"), # index name OneOf("CLUSTERED", "NONCLUSTERED", optional=True), # other optional blocks (RelationalIndexOptionsSegment, # OnIndexOptionSegment,FilestreamOnOptionSegment) are mentioned # above ), # other optional blocks (RelationalIndexOptionsSegment, # OnIndexOptionSegment, ReferencesConstraintGrammar, # CheckConstraintGrammar) are mentioned above ), ), # This is used where a PK or FK may have a column list plus additional # options set. Ref("TableConstraintSegment"), ) class FunctionParameterListGrammar(BaseSegment): """The parameters for a function ie. `(@city_name NVARCHAR(30), @postal_code NVARCHAR(15))`. Overriding ANSI (1) to optionally bracket and (2) remove Delimited """ type = "function_parameter_list" # Function parameter list match_grammar = Bracketed( Delimited( Sequence( Ref("FunctionParameterGrammar"), Sequence("READONLY", optional=True), ), optional=True, ), ) class CreateFunctionStatementSegment(BaseSegment): """A `CREATE FUNCTION` statement. This version in the TSQL dialect should be a "common subset" of the structure of the code for those dialects. Updated to include AS after declaration of RETURNS. Might be integrated in ANSI though. https://www.postgresql.org/docs/9.1/sql-createfunction.html https://docs.snowflake.com/en/sql-reference/sql/create-function.html https://cloud.google.com/bigquery/docs/reference/standard-sql/user-defined-functions https://docs.microsoft.com/en-us/sql/t-sql/statements/create-function-transact-sql https://learn.microsoft.com/en-us/sql/t-sql/statements/alter-function-transact-sql """ type = "create_function_statement" match_grammar = Sequence( OneOf("CREATE", "ALTER", Sequence("CREATE", "OR", "ALTER")), "FUNCTION", Ref("ObjectReferenceSegment"), Ref("FunctionParameterListGrammar"), Sequence( # Optional function return type "RETURNS", OneOf( Ref("DatatypeSegment"), "TABLE", Sequence( Ref("ParameterNameSegment"), "TABLE", Bracketed( Delimited( OneOf( Ref("TableConstraintSegment"), Ref("ColumnDefinitionSegment"), ), ), ), ), ), optional=True, ), Ref("FunctionOptionSegment", optional=True), Ref.keyword("AS", optional=True), Ref("ProcedureDefinitionGrammar"), ) class FunctionOptionSegment(BaseSegment): """A function option segment.""" type = "function_option_segment" match_grammar = Sequence( "WITH", Delimited( AnyNumberOf( "ENCRYPTION", "SCHEMABINDING", Sequence( OneOf( Sequence( "RETURNS", "NULL", ), "CALLED", ), "ON", "NULL", "INPUT", ), Ref("ExecuteAsClauseSegment"), Sequence( "INLINE", Ref("EqualsSegment"), OneOf( "ON", "OFF", ), ), min_times=1, ), ), ) class DropFunctionStatementSegment(BaseSegment): """A `DROP FUNCTION` statement. https://docs.microsoft.com/en-us/sql/t-sql/statements/drop-function-transact-sql """ type = "drop_function_statement" match_grammar = Sequence( "DROP", "FUNCTION", Ref("IfExistsGrammar", optional=True), Delimited(Ref("FunctionNameSegment")), Ref("DelimiterGrammar", optional=True), ) class ReturnStatementSegment(BaseSegment): """A RETURN statement.""" type = "return_segment" match_grammar = Sequence( "RETURN", Ref("ExpressionSegment", optional=True), Ref("DelimiterGrammar", optional=True), ) class ExecuteAsClauseSegment(BaseSegment): """An EXECUTE AS clause. https://docs.microsoft.com/en-us/sql/t-sql/statements/execute-as-clause-transact-sql """ type = "execute_as_clause" match_grammar = Sequence( OneOf("EXEC", "EXECUTE"), "AS", OneOf( "CALLER", "SELF", "OWNER", Ref("QuotedLiteralSegment"), ), ) class SetStatementSegment(BaseSegment): """A Set statement. Setting an already declared variable or global variable. https://docs.microsoft.com/en-us/sql/t-sql/statements/set-statements-transact-sql https://docs.microsoft.com/en-us/sql/t-sql/language-elements/set-local-variable-transact-sql """ type = "set_segment" match_grammar = Sequence( "SET", Indent, Delimited( OneOf( Sequence( "TRANSACTION", "ISOLATION", "LEVEL", OneOf( "SNAPSHOT", "SERIALIZABLE", Sequence( "REPEATABLE", "READ", ), Sequence( "READ", OneOf( "COMMITTED", "UNCOMMITTED", ), ), ), ), Sequence( Delimited( "DATEFIRST", "DATEFORMAT", "DEADLOCK_PRIORITY", "LOCK_TIMEOUT", "CONCAT_NULL_YIELDS_NULL", "CURSOR_CLOSE_ON_COMMIT", "FIPS_FLAGGER", Sequence("IDENTITY_INSERT", Ref("TableReferenceSegment")), "LANGUAGE", "OFFSETS", "QUOTED_IDENTIFIER", "ARITHABORT", "ARITHIGNORE", "FMTONLY", "NOCOUNT", "NOEXEC", "NUMERIC_ROUNDABORT", "PARSEONLY", "QUERY_GOVERNOR_COST_LIMIT", "RESULT_SET_CACHING", # Azure Synapse Analytics specific "ROWCOUNT", "TEXTSIZE", "ANSI_DEFAULTS", "ANSI_NULL_DFLT_OFF", "ANSI_NULL_DFLT_ON", "ANSI_NULLS", "ANSI_PADDING", "ANSI_WARNINGS", "FORCEPLAN", "SHOWPLAN_ALL", "SHOWPLAN_TEXT", "SHOWPLAN_XML", Sequence( "STATISTICS", OneOf( "IO", "PROFILE", "TIME", "XML", ), ), "IMPLICIT_TRANSACTIONS", "REMOTE_PROC_TRANSACTIONS", "XACT_ABORT", ), OneOf( "ON", "OFF", Sequence( Ref("EqualsSegment"), Ref("ExpressionSegment"), ), # The below for https://learn.microsoft.com/en-us/sql/t-sql/statements/set-deadlock-priority-transact-sql # noqa "LOW", "NORMAL", "HIGH", Ref("ParameterNameSegment"), Ref("NumericLiteralSegment"), Ref("QualifiedNumericLiteralSegment"), ), ), Sequence( Ref("ParameterNameSegment"), Ref("AssignmentOperatorSegment"), OneOf( Ref("ExpressionSegment"), Ref("SelectableGrammar"), ), ), ), ), Dedent, Ref("DelimiterGrammar", optional=True), ) class AssignmentOperatorSegment(BaseSegment): """One of the assignment operators. Includes simpler equals but also +=, -=, etc. """ type = "assignment_operator" match_grammar = OneOf( Ref("RawEqualsSegment"), Sequence( OneOf( Ref("PlusSegment"), Ref("MinusSegment"), Ref("DivideSegment"), Ref("MultiplySegment"), Ref("ModuloSegment"), Ref("BitwiseAndSegment"), Ref("BitwiseOrSegment"), Ref("BitwiseXorSegment"), ), Ref("RawEqualsSegment"), allow_gaps=False, ), ) class ProcedureParameterListGrammar(BaseSegment): """The parameters for a procedure ie. `@city_name NVARCHAR(30), @postal_code NVARCHAR(15)`. """ type = "procedure_parameter_list" # Function parameter list match_grammar = OptionallyBracketed( Delimited( Sequence( Ref("ProcedureParameterGrammar"), OneOf("OUT", "OUTPUT", optional=True), Sequence("READONLY", optional=True), ), optional=True, ), ) class CreateProcedureStatementSegment(BaseSegment): """A `CREATE OR ALTER PROCEDURE` statement. https://docs.microsoft.com/en-us/sql/t-sql/statements/create-procedure-transact-sql https://learn.microsoft.com/en-us/sql/t-sql/statements/alter-procedure-transact-sql """ type = "create_procedure_statement" _procedure_option = Sequence( "WITH", Delimited( AnySetOf( "ENCRYPTION", "RECOMPILE", "NATIVE_COMPILATION", # natively compiled stored procedure "SCHEMABINDING", # natively compiled stored procedure Ref("ExecuteAsClauseSegment", optional=True), ), ), optional=True, ) match_grammar = Sequence( OneOf("CREATE", "ALTER", Sequence("CREATE", "OR", "ALTER")), OneOf("PROC", "PROCEDURE"), Ref("ObjectReferenceSegment"), # Not for natively compiled stored procedures Sequence( Ref("SemicolonSegment"), Ref("NumericLiteralSegment"), optional=True, ), Indent, Ref("ProcedureParameterListGrammar", optional=True), _procedure_option, Sequence("FOR", "REPLICATION", optional=True), Dedent, "AS", Ref("ProcedureDefinitionGrammar"), ) class DropProcedureStatementSegment(BaseSegment): """A `DROP PROCEDURE` statement. https://docs.microsoft.com/en-us/sql/t-sql/statements/drop-procedure-transact-sql """ type = "drop_procedure_statement" match_grammar = Sequence( "DROP", OneOf("PROCEDURE", "PROC"), Ref("IfExistsGrammar", optional=True), Delimited(Ref("ObjectReferenceSegment")), Ref("DelimiterGrammar", optional=True), ) class ProcedureDefinitionGrammar(BaseSegment): """This is the body of a `CREATE OR ALTER PROCEDURE AS` statement. This also handles the body of a `CREATE FUNCTION AS` statement. """ type = "procedure_statement" name = "procedure_statement" match_grammar = OneOf( Ref("OneOrMoreStatementsGrammar"), Ref("AtomicBeginEndSegment"), Sequence( "EXTERNAL", "NAME", Ref("ObjectReferenceSegment"), ), ) class CreateViewStatementSegment(BaseSegment): """A `CREATE VIEW` statement. Adjusted to allow CREATE OR ALTER instead of CREATE OR REPLACE. https://docs.microsoft.com/en-us/sql/t-sql/statements/create-view-transact-sql#examples https://learn.microsoft.com/en-us/sql/t-sql/statements/alter-view-transact-sql#examples """ type = "create_view_statement" match_grammar = Sequence( OneOf("CREATE", "ALTER", Sequence("CREATE", "OR", "ALTER")), "VIEW", Ref("ObjectReferenceSegment"), Bracketed( Delimited( Ref("IndexColumnDefinitionSegment"), ), optional=True, ), Sequence( "WITH", Delimited("ENCRYPTION", "SCHEMABINDING", "VIEW_METADATA"), optional=True, ), "AS", OptionallyBracketed(Ref("SelectableGrammar")), Sequence("WITH", "CHECK", "OPTION", optional=True), Ref("DelimiterGrammar", optional=True), ) class MLTableExpressionSegment(BaseSegment): """An ML table expression. Not present in T-SQL. TODO: Consider whether this segment can be used to represent a PREDICT statement. """ type = "ml_table_expression" match_grammar = Nothing() class ConvertFunctionNameSegment(BaseSegment): """CONVERT function name segment. Need to be able to specify this as type function_name so that linting rules identify it properly """ type = "function_name" match_grammar = OneOf("CONVERT", "TRY_CONVERT") class CastFunctionNameSegment(BaseSegment): """CAST function name segment. Need to be able to specify this as type function_name so that linting rules identify it properly """ type = "function_name" match_grammar = Sequence("CAST") class ReplicateFunctionNameSegment(BaseSegment): """REPLICATE function name segment. https://learn.microsoft.com/en-us/sql/t-sql/functions/replicate-transact-sql Need to be able to specify this as type function_name so that linting rules identify it properly """ type = "function_name" match_grammar = Sequence("REPLICATE") class JsonFunctionNameSegment(BaseSegment): """JSON functions name segment. https://learn.microsoft.com/en-us/sql/t-sql/functions/json-object-transact-sql Need to be able to specify this as type function_name so that linting rules identify it properly """ type = "function_name" match_grammar = OneOf("JSON_ARRAY", "JSON_OBJECT") class RankFunctionNameSegment(BaseSegment): """Rank function name segment. Need to be able to specify this as type function_name so that linting rules identify it properly """ type = "function_name" match_grammar = OneOf("DENSE_RANK", "NTILE", "RANK", "ROW_NUMBER") class ReservedKeywordFunctionNameSegment(BaseSegment): """Reserved keywords that are also functions. Need to be able to specify this as type function_name so that linting rules identify it properly """ type = "function_name" match_grammar = OneOf( "COALESCE", "LEFT", "NULLIF", "RIGHT", ) class ReservedKeywordBareFunctionNameSegment(BaseSegment): """Reserved keywords that are functions without parentheses. Need to be able to specify this as type function_name so that linting rules identify it properly """ type = "function_name" match_grammar = OneOf( "CURRENT_TIMESTAMP", "CURRENT_USER", "SESSION_USER", "SYSTEM_USER", ) class WithinGroupFunctionNameSegment(BaseSegment): """WITHIN GROUP function name segment. For aggregation functions that use the WITHIN GROUP clause. https://docs.microsoft.com/en-us/sql/t-sql/functions/string-agg-transact-sql https://docs.microsoft.com/en-us/sql/t-sql/functions/percentile-cont-transact-sql https://docs.microsoft.com/en-us/sql/t-sql/functions/percentile-disc-transact-sql Need to be able to specify this as type function_name so that linting rules identify it properly """ type = "function_name" match_grammar = OneOf( "STRING_AGG", "PERCENTILE_CONT", "PERCENTILE_DISC", ) class WithinGroupClause(BaseSegment): """WITHIN GROUP clause. For a small set of aggregation functions. https://docs.microsoft.com/en-us/sql/t-sql/functions/string-agg-transact-sql https://docs.microsoft.com/en-us/sql/t-sql/functions/percentile-cont-transact-sql """ type = "within_group_clause" match_grammar = Sequence( "WITHIN", "GROUP", Bracketed( Ref("OrderByClauseSegment"), ), Sequence( "OVER", Bracketed(Ref("PartitionClauseSegment")), optional=True, ), ) class PartitionClauseSegment(ansi.PartitionClauseSegment): """PARTITION BY clause. https://docs.microsoft.com/en-us/sql/t-sql/queries/select-over-clause-transact-sql#partition-by """ type = "partitionby_clause" match_grammar = Sequence( "PARTITION", "BY", Delimited( OptionallyBracketed( OneOf( Ref("ColumnReferenceSegment"), Ref("ExpressionSegment"), ) ) ), ) class OnPartitionsSegment(BaseSegment): """ON PARTITIONS clause. https://docs.microsoft.com/en-us/sql/t-sql/statements/create-index-transact-sql """ type = "on_partitions_clause" match_grammar = Sequence( "ON", "PARTITIONS", Bracketed( Delimited( OneOf( Ref("NumericLiteralSegment"), Sequence( Ref("NumericLiteralSegment"), "TO", Ref("NumericLiteralSegment") ), ) ) ), ) class PartitionSchemeNameSegment(BaseSegment): """Partition Scheme Name.""" type = "partition_scheme_name" match_grammar = Ref("SingleIdentifierGrammar") class PartitionSchemeClause(BaseSegment): """Partition Scheme Clause segment. https://docs.microsoft.com/en-us/sql/t-sql/statements/create-index-transact-sql """ type = "partition_scheme_clause" match_grammar = Sequence( "ON", Ref("PartitionSchemeNameSegment"), Bracketed(Ref("ColumnReferenceSegment")), ) class CastFunctionContentsSegment(BaseSegment): """Cast Function contents.""" type = "function_contents" match_grammar = Sequence( Bracketed( Ref("ExpressionSegment"), "AS", Ref("DatatypeSegment"), ), ) class ConvertFunctionContentsSegment(BaseSegment): """Convert Function contents.""" type = "function_contents" match_grammar = Sequence( Bracketed( Ref("DatatypeSegment"), Bracketed(Ref("NumericLiteralSegment"), optional=True), Ref("CommaSegment"), Ref("ExpressionSegment"), Sequence(Ref("CommaSegment"), Ref("NumericLiteralSegment"), optional=True), ), ) class ReplicateFunctionContentsSegment(BaseSegment): """REPLICATE Function contents.""" type = "function_contents" match_grammar = Sequence( Bracketed( OneOf( Ref("ExpressionSegment"), Ref("HexadecimalLiteralSegment"), ), Ref("CommaSegment"), Ref("ExpressionSegment"), ), ) class JsonFunctionContentsSegment(BaseSegment): """JSON function contents.""" type = "function_contents" _json_null_clause = OneOf( Sequence("NULL", "ON", "NULL"), Sequence("ABSENT", "ON", "NULL"), optional=True, ) _json_key_value = Sequence( OneOf( Ref("QuotedLiteralSegment"), Ref("ParameterNameSegment"), ), Ref("ColonSegment"), Sequence( OneOf( Ref("QuotedLiteralSegment"), Ref("LiteralGrammar"), Ref("NumericLiteralSegment"), Ref("ColumnReferenceSegment"), Ref("ParameterNameSegment"), Ref("FunctionSegment"), Bracketed(Ref("SelectStatementSegment")), "NULL", ), _json_null_clause, ), allow_gaps=True, ) match_grammar = OneOf( Bracketed( Delimited( AnyNumberOf( Ref("QuotedLiteralSegment"), Ref("NumericLiteralSegment"), Ref("ColumnReferenceSegment"), Ref("ParameterNameSegment"), "NULL", _json_null_clause, ) ) ), Bracketed( Delimited(_json_key_value, _json_null_clause), ), ) class RankFunctionContentsSegment(BaseSegment): """Rank Function contents.""" type = "function_contents" match_grammar = Sequence( Bracketed( Ref("NumericLiteralSegment", optional=True), ), ) class FunctionSegment(BaseSegment): """A scalar or aggregate function. Maybe in the future we should distinguish between aggregate functions and other functions. For now we treat them the same because they look the same for our purposes. """ type = "function" match_grammar = OneOf( Ref("ReservedKeywordBareFunctionNameSegment"), Sequence( # Treat functions which take date parts separately # So those functions parse date parts as DatetimeUnitSegment # rather than identifiers. Ref("DatePartFunctionNameSegment"), Ref("DateTimeFunctionContentsSegment"), ), Sequence( Ref("RankFunctionNameSegment"), Ref("RankFunctionContentsSegment"), Ref("OverClauseSegment"), ), Sequence( # https://docs.microsoft.com/en-us/sql/t-sql/functions/cast-and-convert-transact-sql Ref("ConvertFunctionNameSegment"), Ref("ConvertFunctionContentsSegment"), ), Sequence( # https://docs.microsoft.com/en-us/sql/t-sql/functions/cast-and-convert-transact-sql Ref("CastFunctionNameSegment"), Ref("CastFunctionContentsSegment"), ), Sequence( Ref("ReplicateFunctionNameSegment"), Ref("ReplicateFunctionContentsSegment"), ), Sequence( Ref("WithinGroupFunctionNameSegment"), Ref("FunctionContentsSegment"), Ref("WithinGroupClause", optional=True), ), Sequence( OneOf( Ref( "FunctionNameSegment", exclude=OneOf( Ref("ValuesClauseSegment"), # List of special functions handled differently Ref("CastFunctionNameSegment"), Ref("ConvertFunctionNameSegment"), Ref("DatePartFunctionNameSegment"), Ref("WithinGroupFunctionNameSegment"), Ref("RankFunctionNameSegment"), ), ), Ref("ReservedKeywordFunctionNameSegment"), ), Ref("FunctionContentsSegment"), Ref("PostFunctionGrammar", optional=True), ), Sequence( Ref("JsonFunctionNameSegment"), Ref("JsonFunctionContentsSegment"), ), ) class CreateTableStatementSegment(BaseSegment): """A `CREATE TABLE` statement.""" type = "create_table_statement" # https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql # https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-azure-sql-data-warehouse?view=aps-pdw-2016-au7 match_grammar = Sequence( "CREATE", "TABLE", Ref("TableReferenceSegment"), OneOf( # Columns and comment syntax: Bracketed( Delimited( Ref("TableConstraintSegment"), Ref("ComputedColumnDefinitionSegment"), Ref("ColumnDefinitionSegment"), Ref("TableIndexSegment"), Ref("PeriodSegment"), allow_trailing=True, ), optional=True, ), # Create AS syntax: Sequence( "AS", OptionallyBracketed(Ref("SelectableGrammar")), ), # Create like syntax Sequence("LIKE", Ref("TableReferenceSegment")), ), Ref( "TableDistributionIndexClause", optional=True ), # Azure Synapse Analytics specific Ref("OnPartitionOrFilegroupOptionSegment", optional=True), Ref("FilestreamOnOptionSegment", optional=True), Ref("TextimageOnOptionSegment", optional=True), Ref("TableOptionSegment", optional=True), Ref("DelimiterGrammar", optional=True), ) class CreateTableGraphStatementSegment(BaseSegment): """A `CREATE TABLE` GRAPH statement.""" type = "create_table_graph_statement" # https://learn.microsoft.com/en-us/sql/t-sql/statements/create-table-sql-graph match_grammar = Sequence( "CREATE", "TABLE", Ref("TableReferenceSegment"), Bracketed( Delimited( Ref("GraphTableConstraintSegment"), Ref("ComputedColumnDefinitionSegment"), Ref("ColumnDefinitionSegment"), Ref("TableIndexSegment"), Ref("PeriodSegment"), allow_trailing=True, ), optional=True, ), # GRAPH Sequence( "AS", OneOf( "NODE", "EDGE", ), ), Ref("OnPartitionOrFilegroupOptionSegment", optional=True), Ref("DelimiterGrammar", optional=True), ) class AlterTableStatementSegment(BaseSegment): """An `ALTER TABLE` statement. https://docs.microsoft.com/en-us/sql/t-sql/statements/alter-table-transact-sql Overriding ANSI to remove TSQL non-keywords MODIFY, FIRST TODO: Flesh out TSQL-specific functionality """ type = "alter_table_statement" match_grammar = Sequence( "ALTER", "TABLE", Ref("TableReferenceSegment"), Delimited( OneOf( # Table options Sequence( Ref("ParameterNameSegment"), Ref("EqualsSegment", optional=True), OneOf(Ref("LiteralGrammar"), Ref("NakedIdentifierSegment")), ), Sequence( "ALTER", "COLUMN", Ref("ColumnDefinitionSegment"), ), Sequence( "ADD", Delimited( Ref("ComputedColumnDefinitionSegment"), Ref("ColumnDefinitionSegment"), ), ), Sequence( "DROP", "COLUMN", Ref("IfExistsGrammar", optional=True), Delimited(Ref("ColumnReferenceSegment")), ), Sequence( "ADD", Ref("ColumnConstraintSegment"), "FOR", Ref("ColumnReferenceSegment"), ), Sequence(OneOf("ADD", "DROP"), Ref("PeriodSegment")), Sequence( Sequence( "WITH", "CHECK", optional=True, ), "ADD", Ref("TableConstraintSegment"), ), Sequence( "CHECK", "CONSTRAINT", Ref("ObjectReferenceSegment"), ), Sequence( "DROP", "CONSTRAINT", Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), ), # Rename Sequence( "RENAME", OneOf("AS", "TO", optional=True), Ref("TableReferenceSegment"), ), Sequence( "SET", OneOf( Bracketed( Sequence( "FILESTREAM_ON", Ref("EqualsSegment"), OneOf( Ref("FilegroupNameSegment"), Ref("PartitionSchemeNameSegment"), OneOf( "NULL", Ref("LiteralGrammar"), # for "default" value ), ), ) ), Bracketed( Sequence( "SYSTEM_VERSIONING", Ref("EqualsSegment"), OneOf("ON", "OFF"), Sequence( Bracketed( "HISTORY_TABLE", Ref("EqualsSegment"), Ref("TableReferenceSegment"), Sequence( Ref("CommaSegment"), "DATA_CONSISTENCY_CHECK", Ref("EqualsSegment"), OneOf("ON", "OFF"), optional=True, ), Sequence( Ref("CommaSegment"), "HISTORY_RETENTION_PERIOD", Ref("EqualsSegment"), Ref("NumericLiteralSegment", optional=True), Ref("DatetimeUnitSegment"), optional=True, ), ), optional=True, ), ) ), Bracketed( Sequence( "DATA_DELETION", Ref("EqualsSegment"), OneOf("ON", "OFF"), Sequence( Bracketed( "FILTER_COLUMN", Ref("EqualsSegment"), Ref("ColumnReferenceSegment"), Sequence( Ref("CommaSegment"), "RETENTION_PERIOD", Ref("EqualsSegment"), Ref("NumericLiteralSegment", optional=True), Ref("DatetimeUnitSegment"), optional=True, ), ), optional=True, ), ), ), ), ), ) ), ) class TableConstraintSegment(BaseSegment): """A table constraint, e.g. for CREATE TABLE.""" # https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql type = "table_constraint" match_grammar = Sequence( Sequence( # [ CONSTRAINT ] "CONSTRAINT", Ref("ObjectReferenceSegment"), optional=True ), OneOf( Sequence( Ref("PrimaryKeyGrammar"), Ref("BracketedIndexColumnListGrammar"), Ref("RelationalIndexOptionsSegment", optional=True), Ref("OnPartitionOrFilegroupOptionSegment", optional=True), ), Sequence( # FOREIGN KEY ( column_name [, ... ] ) # REFERENCES reftable [ ( refcolumn [, ... ] ) ] Ref("ForeignKeyGrammar"), # Local columns making up FOREIGN KEY constraint Ref("BracketedColumnReferenceListGrammar"), # REFERENCES reftable [ ( refcolumn) ] + ON DELETE/ON UPDATE Ref("ReferencesConstraintGrammar"), ), Ref("CheckConstraintGrammar", optional=True), ), ) class GraphTableConstraintSegment(BaseSegment): """A table constraint segment for graph tables, including connection constraints.""" type = "graph_table_constraint" match_grammar = Sequence( Sequence( # [ CONSTRAINT ] "CONSTRAINT", Ref("ObjectReferenceSegment"), optional=True ), OneOf( Sequence( Ref("PrimaryKeyGrammar"), Ref("BracketedIndexColumnListGrammar"), Ref("RelationalIndexOptionsSegment", optional=True), Ref("OnPartitionOrFilegroupOptionSegment", optional=True), ), Sequence( # FOREIGN KEY ( column_name [, ... ] ) # REFERENCES reftable [ ( refcolumn [, ... ] ) ] Ref("ForeignKeyGrammar"), # Local columns making up FOREIGN KEY constraint Ref("BracketedColumnReferenceListGrammar"), # REFERENCES reftable [ ( refcolumn) ] + ON DELETE/ON UPDATE Ref("ReferencesConstraintGrammar"), ), Ref("ConnectionConstraintGrammar", optional=True), Ref("CheckConstraintGrammar", optional=True), ), ) class TableIndexSegment(BaseSegment): """A table index, e.g. for CREATE TABLE.""" # https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql type = "table_index_segment" match_grammar = Sequence( Sequence("INDEX", Ref("ObjectReferenceSegment"), optional=True), OneOf( Sequence( Sequence("UNIQUE", optional=True), OneOf("CLUSTERED", "NONCLUSTERED", optional=True), Ref("BracketedIndexColumnListGrammar"), ), Sequence("CLUSTERED", "COLUMNSTORE"), Sequence( Sequence("NONCLUSTERED", optional=True), "COLUMNSTORE", Ref("BracketedColumnReferenceListGrammar"), ), ), Ref("RelationalIndexOptionsSegment", optional=True), Ref("OnPartitionOrFilegroupOptionSegment", optional=True), Ref("FilestreamOnOptionSegment", optional=True), ) class BracketedIndexColumnListGrammar(BaseSegment): """list of columns used for CREATE INDEX, constraints.""" type = "bracketed_index_column_list_grammar" match_grammar = Sequence( Bracketed( Delimited( Ref("IndexColumnDefinitionSegment"), ) ) ) class FilegroupNameSegment(BaseSegment): """Filegroup Name Segment.""" type = "filegroup_name" match_grammar = Ref("SingleIdentifierGrammar") class FilegroupClause(BaseSegment): """Filegroup Clause segment. https://docs.microsoft.com/en-us/sql/relational-databases/databases/database-files-and-filegroups """ type = "filegroup_clause" match_grammar = Sequence( "ON", Ref("FilegroupNameSegment"), ) class IdentityGrammar(BaseSegment): """`IDENTITY (1,1)` in table schemas. https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql-identity-property """ type = "identity_grammar" match_grammar = Sequence( "IDENTITY", # optional (seed, increment) e.g. (1, 1) Bracketed( Sequence( Ref("NumericLiteralSegment"), Ref("CommaSegment"), Ref("NumericLiteralSegment"), ), optional=True, ), ) class EncryptedWithGrammar(BaseSegment): """ENCRYPTED WITH in table schemas. https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql-identity-property """ type = "encrypted_with_grammar" match_grammar = Sequence( "ENCRYPTED", "WITH", Bracketed( Delimited( Sequence( "COLUMN_ENCRYPTION_KEY", Ref("EqualsSegment"), Ref("SingleIdentifierGrammar"), ), Sequence( "ENCRYPTION_TYPE", Ref("EqualsSegment"), OneOf("DETERMINISTIC", "RANDOMIZED"), ), Sequence( "ALGORITHM", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), ) ), ) class TableDistributionIndexClause(BaseSegment): """`CREATE TABLE` distribution / index clause. This is specific to Azure Synapse Analytics. """ type = "table_distribution_index_clause" match_grammar = Sequence( "WITH", Bracketed( Delimited( Ref("TableDistributionClause"), Ref("TableIndexClause"), Ref("TableLocationClause"), ), ), ) class TableDistributionClause(BaseSegment): """`CREATE TABLE` distribution clause. This is specific to Azure Synapse Analytics. """ type = "table_distribution_clause" match_grammar = Sequence( "DISTRIBUTION", Ref("EqualsSegment"), OneOf( "REPLICATE", "ROUND_ROBIN", Sequence( "HASH", Bracketed(Ref("ColumnReferenceSegment")), ), ), ) class TableIndexClause(BaseSegment): """`CREATE TABLE` table index clause. This is specific to Azure Synapse Analytics. https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-azure-sql-data-warehouse?view=aps-pdw-2016-au7#TableOptions """ type = "table_index_clause" match_grammar = Sequence( OneOf( "HEAP", Sequence( "CLUSTERED", "COLUMNSTORE", "INDEX", Sequence( "ORDER", Bracketed( Delimited( Ref("ColumnReferenceSegment"), ), ), optional=True, ), ), Sequence( "CLUSTERED", "INDEX", Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), OneOf( "ASC", "DESC", optional=True, ), ), ), ), ), ), ) class TableLocationClause(BaseSegment): """`CREATE TABLE` location clause. This is specific to Azure Synapse Analytics (deprecated) or to an external table. """ type = "table_location_clause" match_grammar = Sequence( "LOCATION", Ref("EqualsSegment"), OneOf( "USER_DB", # Azure Synapse Analytics specific Ref("QuotedLiteralSegmentOptWithN"), # External Table ), ) class AlterTableSwitchStatementSegment(BaseSegment): """An `ALTER TABLE SWITCH` statement.""" type = "alter_table_switch_statement" # https://docs.microsoft.com/en-us/sql/t-sql/statements/alter-table-transact-sql # T-SQL's ALTER TABLE SWITCH grammar is different enough to core ALTER TABLE grammar # to merit its own definition match_grammar = Sequence( "ALTER", "TABLE", Ref("ObjectReferenceSegment"), "SWITCH", Sequence("PARTITION", Ref("NumericLiteralSegment"), optional=True), "TO", Ref("ObjectReferenceSegment"), Sequence("PARTITION", Ref("NumericLiteralSegment"), optional=True), Sequence( "WITH", OneOf( Bracketed( "WAIT_AT_LOW_PRIORITY", Bracketed( Delimited( Sequence( "MAX_DURATION", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), Ref.keyword("MINUTES", optional=True), ), Sequence( "ABORT_AFTER_WAIT", Ref("EqualsSegment"), OneOf("NONE", "SELF", "BLOCKERS"), ), ), ), ), # Azure Synapse Analytics specific: Bracketed( "TRUNCATE_TARGET", Ref("EqualsSegment"), OneOf("ON", "OFF"), ), ), optional=True, ), Ref("DelimiterGrammar", optional=True), ) class CreateTableAsSelectStatementSegment(BaseSegment): """A `CREATE TABLE AS SELECT` statement. This is specific to Azure Synapse Analytics. """ type = "create_table_as_select_statement" # https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-as-select-azure-sql-data-warehouse?toc=/azure/synapse-analytics/sql-data-warehouse/toc.json&bc=/azure/synapse-analytics/sql-data-warehouse/breadcrumb/toc.json&view=azure-sqldw-latest&preserve-view=true match_grammar = Sequence( "CREATE", "TABLE", Ref("TableReferenceSegment"), Ref("TableDistributionIndexClause"), "AS", OptionallyBracketed(Ref("SelectableGrammar")), Ref("OptionClauseSegment", optional=True), Ref("DelimiterGrammar", optional=True), ) class TransactionStatementSegment(BaseSegment): """A `COMMIT`, `ROLLBACK` or `TRANSACTION` statement.""" type = "transaction_statement" match_grammar = OneOf( # [ BEGIN | SAVE ] [ TRANSACTION | TRAN ] [ | ] # COMMIT [ TRANSACTION | TRAN | WORK ] # ROLLBACK [ TRANSACTION | TRAN | WORK ] [ | ] # https://docs.microsoft.com/en-us/sql/t-sql/language-elements/begin-transaction-transact-sql Sequence( "BEGIN", Sequence("DISTRIBUTED", optional=True), Ref("TransactionGrammar"), Ref("SingleIdentifierGrammar", optional=True), Sequence("WITH", "MARK", Ref("QuotedIdentifierSegment"), optional=True), Ref("DelimiterGrammar", optional=True), ), Sequence( OneOf("COMMIT", "ROLLBACK"), Ref("TransactionGrammar", optional=True), OneOf( Ref("SingleIdentifierGrammar"), Ref("VariableIdentifierSegment"), optional=True, ), Ref("DelimiterGrammar", optional=True), ), Sequence( OneOf("COMMIT", "ROLLBACK"), Sequence("WORK", optional=True), Ref("DelimiterGrammar", optional=True), ), Sequence( "SAVE", Ref("TransactionGrammar"), OneOf( Ref("SingleIdentifierGrammar"), Ref("VariableIdentifierSegment"), optional=True, ), Ref("DelimiterGrammar", optional=True), ), ) class BeginEndSegment(BaseSegment): """A `BEGIN/END` block. Encloses multiple statements into a single statement object. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/begin-end-transact-sql """ type = "begin_end_block" match_grammar = Sequence( "BEGIN", Ref("DelimiterGrammar", optional=True), Indent, Ref("OneOrMoreStatementsGrammar"), Dedent, "END", ) class AtomicBeginEndSegment(BaseSegment): """A special `BEGIN/END` block with atomic options. This is only dedicated to natively compiled stored procedures. Encloses multiple statements into a single statement object. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/begin-end-transact-sql https://learn.microsoft.com/en-us/sql/t-sql/statements/create-procedure-transact-sql#syntax """ type = "atomic_begin_end_block" match_grammar = Sequence( "BEGIN", Sequence( "ATOMIC", "WITH", Bracketed( Delimited( Sequence( "LANGUAGE", Ref("EqualsSegment"), Ref("QuotedLiteralSegmentOptWithN"), ), Sequence( "TRANSACTION", "ISOLATION", "LEVEL", Ref("EqualsSegment"), OneOf( "SNAPSHOT", Sequence("REPEATABLE", "READ"), "SERIALIZABLE", ), ), Sequence( "DATEFIRST", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), optional=True, ), Sequence( "DATEFORMAT", Ref("EqualsSegment"), Ref("DateFormatSegment"), optional=True, ), Sequence( "DELAYED_DURABILITY", Ref("EqualsSegment"), OneOf("ON", "OFF"), optional=True, ), ), ), ), Ref("DelimiterGrammar", optional=True), Indent, Ref("OneOrMoreStatementsGrammar"), Dedent, Sequence("END", optional=True), ) class TryCatchSegment(BaseSegment): """A `TRY/CATCH` block pair. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/try-catch-transact-sql """ type = "try_catch" match_grammar = Sequence( "BEGIN", "TRY", Ref("DelimiterGrammar", optional=True), Indent, Ref("OneOrMoreStatementsGrammar"), Dedent, "END", "TRY", "BEGIN", "CATCH", Ref("DelimiterGrammar", optional=True), Indent, # A catch block may be empty AnyNumberOf(Ref("StatementAndDelimiterGrammar")), Dedent, "END", "CATCH", ) class BatchSegment(BaseSegment): """A segment representing a GO batch within a file or script.""" type = "batch" match_grammar = OneOf( # Things that can be bundled Ref("OneOrMoreStatementsGrammar"), # Things that can't be bundled Ref("CreateProcedureStatementSegment"), ) class FileSegment(BaseFileSegment): """A segment representing a whole file or script. We override default as T-SQL allows concept of several batches of commands separated by GO as well as usual semicolon-separated statement lines. This is also the default "root" segment of the dialect, and so is usually instantiated directly. It therefore has no match_grammar. """ match_grammar = Sequence( AnyNumberOf(Ref("BatchDelimiterGrammar")), Delimited( Ref("BatchSegment"), delimiter=AnyNumberOf( Sequence( Ref("DelimiterGrammar", optional=True), Ref("BatchDelimiterGrammar") ), min_times=1, ), allow_gaps=True, allow_trailing=True, ), ) class OpenRowSetSegment(BaseSegment): """A `OPENROWSET` segment. https://docs.microsoft.com/en-us/sql/t-sql/functions/openrowset-transact-sql """ type = "openrowset_segment" match_grammar = Sequence( "OPENROWSET", Bracketed( OneOf( Sequence( Ref("QuotedLiteralSegment"), Ref("CommaSegment"), OneOf( Sequence( Ref("QuotedLiteralSegment"), Ref("DelimiterGrammar"), Ref("QuotedLiteralSegment"), Ref("DelimiterGrammar"), Ref("QuotedLiteralSegment"), ), Ref("QuotedLiteralSegment"), ), Ref("CommaSegment"), OneOf( Ref("TableReferenceSegment"), Ref("QuotedLiteralSegment"), ), ), Sequence( "BULK", Ref("QuotedLiteralSegmentOptWithN"), Ref("CommaSegment"), OneOf( Sequence( Sequence( "FORMATFILE", Ref("EqualsSegment"), Ref("QuotedLiteralSegmentOptWithN"), Ref("CommaSegment"), optional=True, ), Delimited( AnyNumberOf( Sequence( "DATASOURCE", Ref("EqualsSegment"), Ref("QuotedLiteralSegmentOptWithN"), ), Sequence( "ERRORFILE", Ref("EqualsSegment"), Ref("QuotedLiteralSegmentOptWithN"), ), Sequence( "ERRORFILE_DATA_SOURCE", Ref("EqualsSegment"), Ref("QuotedLiteralSegmentOptWithN"), ), Sequence( "MAXERRORS", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "FIRSTROW", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "LASTROW", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "CODEPAGE", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "FORMAT", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "FIELDQUOTE", Ref("EqualsSegment"), Ref("QuotedLiteralSegmentOptWithN"), ), Sequence( "FORMATFILE", Ref("EqualsSegment"), Ref("QuotedLiteralSegmentOptWithN"), ), Sequence( "FORMATFILE_DATA_SOURCE", Ref("EqualsSegment"), Ref("QuotedLiteralSegmentOptWithN"), ), ), optional=True, ), ), "SINGLE_BLOB", "SINGLE_CLOB", "SINGLE_NCLOB", ), ), ), ), Ref("OpenRowSetWithClauseSegment", optional=True), ) class OpenRowSetWithClauseSegment(BaseSegment): """A `WITH` clause of an `OPENROWSET()` segment. https://learn.microsoft.com/en-us/azure/synapse-analytics/sql/develop-openrowset#syntax """ type = "openrowset_with_clause" match_grammar = Sequence( "WITH", Bracketed( Delimited( Sequence( Ref("SingleIdentifierGrammar"), # Column name Ref("DatatypeSegment"), # Column type Bracketed(Ref("NumericLiteralSegment"), optional=True), Ref("CollateGrammar", optional=True), OneOf( Ref("NumericLiteralSegment"), # Column ordinal Ref("QuotedLiteralSegment"), # JSON path optional=True, ), ) ) ), ) class DeleteStatementSegment(BaseSegment): """A `DELETE` statement. https://docs.microsoft.com/en-us/sql/t-sql/statements/delete-transact-sql Overriding ANSI to remove greedy logic which assumes statements have been delimited and to allow for Azure Synapse Analytics-specific DELETE statements """ type = "delete_statement" # match grammar. This one makes sense in the context of knowing that it's # definitely a statement, we just don't know what type yet. match_grammar = Sequence( "DELETE", OneOf( Sequence( Ref("TopPercentGrammar", optional=True), Ref.keyword("FROM", optional=True), OneOf( Sequence( Sequence( "OPENDATASOURCE", Bracketed( Ref("QuotedLiteralSegment"), Ref("CommaSegment"), Ref("QuotedLiteralSegment"), ), Ref("DotSegment"), optional=True, ), Ref("TableReferenceSegment"), Ref("PostTableExpressionGrammar", optional=True), ), Sequence( "OPENQUERY", Bracketed( Ref("NakedIdentifierSegment"), Ref("CommaSegment"), Ref("QuotedLiteralSegment"), ), ), Ref("OpenRowSetSegment"), ), Ref("OutputClauseSegment", optional=True), Ref("FromClauseSegment", optional=True), OneOf( Ref("WhereClauseSegment"), Sequence( "WHERE", "CURRENT", "OF", Ref("CursorNameGrammar"), ), optional=True, ), ), # Azure Synapse Analytics-specific Sequence( "FROM", Ref("TableReferenceSegment"), "JOIN", Ref("TableReferenceSegment"), Ref("JoinOnConditionSegment"), Ref("WhereClauseSegment", optional=True), ), Ref("OpenQuerySegment"), ), Ref("OptionClauseSegment", optional=True), Ref("DelimiterGrammar", optional=True), ) class FromClauseSegment(ansi.FromClauseSegment): """A `FROM` clause like in `SELECT`. NOTE: this is a delimited set of table expressions, with a variable number of optional join clauses with those table expressions. The delmited aspect is the higher of the two such that the following is valid (albeit unusual): ``` SELECT * FROM a JOIN b, c JOIN d ``` Overriding ANSI to remove Delimited logic which assumes statements have been delimited """ type = "from_clause" match_grammar = Sequence( "FROM", Delimited(Ref("FromExpressionSegment")), Ref("DelimiterGrammar", optional=True), ) class TableExpressionSegment(BaseSegment): """The main table expression e.g. within a FROM clause. In SQL standard, as well as T-SQL, table expressions (`table reference` in SQL standard) can also be join tables, optionally bracketed, allowing for nested joins. """ type = "table_expression" match_grammar: Matchable = OneOf( Ref("ValuesClauseSegment"), Sequence(Ref("TableReferenceSegment"), Ref("PostTableExpressionGrammar")), Ref("BareFunctionSegment"), Ref("FunctionSegment"), Ref("OpenRowSetSegment"), Ref("OpenJsonSegment"), Ref("OpenQuerySegment"), Ref("TableReferenceSegment"), Ref("StorageLocationSegment"), # Nested Selects Bracketed(Ref("SelectableGrammar")), Bracketed(Ref("MergeStatementSegment")), Bracketed( Sequence( Ref("TableExpressionSegment"), # TODO: Revisit this to make sure it's sensible. Conditional(Dedent, indented_joins=False), Conditional(Indent, indented_joins=True), OneOf(Ref("JoinClauseSegment"), Ref("JoinLikeClauseGrammar")), Conditional(Dedent, indented_joins=True), Conditional(Indent, indented_joins=True), ) ), ) class GroupByClauseSegment(BaseSegment): """A `GROUP BY` clause like in `SELECT`. Overriding ANSI to remove Delimited logic which assumes statements have been delimited """ type = "groupby_clause" match_grammar = Sequence( "GROUP", "BY", Indent, OneOf( Ref("ColumnReferenceSegment"), # Can `GROUP BY 1` Ref("NumericLiteralSegment"), # Can `GROUP BY coalesce(col, 1)` Ref("ExpressionSegment"), ), AnyNumberOf( Ref("CommaSegment"), OneOf( Ref("ColumnReferenceSegment"), # Can `GROUP BY 1` Ref("NumericLiteralSegment"), # Can `GROUP BY coalesce(col, 1)` Ref("ExpressionSegment"), ), ), Ref("WithRollupClauseSegment", optional=True), Dedent, ) class WithRollupClauseSegment(BaseSegment): """A `WITH ROLLUP` clause after the `GROUP BY` clause.""" type = "with_rollup_clause" match_grammar = Sequence( "WITH", "ROLLUP", ) class HavingClauseSegment(BaseSegment): """A `HAVING` clause like in `SELECT`. Overriding ANSI to remove greedy terminator """ type = "having_clause" match_grammar = Sequence( "HAVING", Indent, OptionallyBracketed(Ref("ExpressionSegment")), Dedent, ) class OrderByClauseSegment(BaseSegment): """A `ORDER BY` clause like in `SELECT`. Overriding ANSI to remove Greedy logic which assumes statements have been delimited """ type = "orderby_clause" match_grammar = Sequence( "ORDER", "BY", Indent, Delimited( Sequence( OneOf( Ref("ColumnReferenceSegment"), # Can `ORDER BY 1` Ref("NumericLiteralSegment"), # Can order by an expression Ref("ExpressionSegment"), ), OneOf("ASC", "DESC", optional=True), ), terminators=[Ref("OffsetClauseSegment")], ), Sequence( Ref("OffsetClauseSegment"), Ref("FetchClauseSegment", optional=True), optional=True, ), Dedent, ) class OffsetClauseSegment(BaseSegment): """OFFSET clause as in a SELECT statement.""" type = "offset_clause" match_grammar = Sequence( "OFFSET", OneOf( Ref("NumericLiteralSegment"), Ref("ExpressionSegment"), ), OneOf("ROW", "ROWS"), ) class RenameStatementSegment(BaseSegment): """`RENAME` statement. https://docs.microsoft.com/en-us/sql/t-sql/statements/rename-transact-sql Azure Synapse Analytics-specific. """ type = "rename_statement" match_grammar = Sequence( "RENAME", "OBJECT", Ref("ObjectReferenceSegment"), "TO", Ref("SingleIdentifierGrammar"), Ref("DelimiterGrammar", optional=True), ) class DropTableStatementSegment(ansi.DropTableStatementSegment): """A `DROP TABLE` statement. Overriding ANSI to add optional delimiter. """ match_grammar = ansi.DropTableStatementSegment.match_grammar.copy( insert=[ Ref("DelimiterGrammar", optional=True), ], ) class DropViewStatementSegment(ansi.DropViewStatementSegment): """A `DROP VIEW` statement. Overriding ANSI to add optional delimiter. """ match_grammar = ansi.DropViewStatementSegment.match_grammar.copy( insert=[ Ref("DelimiterGrammar", optional=True), ], ) class DropUserStatementSegment(ansi.DropUserStatementSegment): """A `DROP USER` statement. Overriding ANSI to add optional delimiter. """ match_grammar = ansi.DropUserStatementSegment.match_grammar.copy( insert=[ Ref("DelimiterGrammar", optional=True), ], ) class UpdateStatementSegment(BaseSegment): """An `Update` statement. UPDATE
SET [ WHERE ] Overriding ANSI in order to allow for PostTableExpressionGrammar (table hints) """ type = "update_statement" match_grammar = Sequence( "UPDATE", Indent, OneOf( Ref("TableReferenceSegment"), Ref("AliasedTableReferenceGrammar"), Ref("OpenQuerySegment"), ), Ref("PostTableExpressionGrammar", optional=True), Dedent, Ref("SetClauseListSegment"), Ref("OutputClauseSegment", optional=True), Ref("FromClauseSegment", optional=True), Ref("WhereClauseSegment", optional=True), Ref("OptionClauseSegment", optional=True), Ref("DelimiterGrammar", optional=True), ) class SetClauseListSegment(BaseSegment): """set clause list. Overriding ANSI to remove Delimited """ type = "set_clause_list" match_grammar = Sequence( "SET", Indent, Ref("SetClauseSegment"), AnyNumberOf( Ref("CommaSegment"), Ref("SetClauseSegment"), ), Dedent, ) class SetClauseSegment(BaseSegment): """Set clause. Overriding ANSI to allow for ExpressionSegment on the right """ type = "set_clause" match_grammar = Sequence( Ref("ColumnReferenceSegment"), Ref("AssignmentOperatorSegment"), Ref("ExpressionSegment"), ) class SetContextInfoSegment(BaseSegment): """SET CONTEXT_INFO Statement. https://learn.microsoft.com/en-us/sql/t-sql/statements/set-context-info-transact-sql """ type = "set_context_info_statement" match_grammar = Sequence( "SET", "CONTEXT_INFO", OneOf( Ref("HexadecimalLiteralSegment"), Ref("ParameterNameSegment"), ), ) class PrintStatementSegment(BaseSegment): """PRINT statement segment.""" type = "print_statement" match_grammar = Sequence( "PRINT", Ref("ExpressionSegment"), Ref("DelimiterGrammar", optional=True), ) class OptionClauseSegment(BaseSegment): """Query Hint clause. https://docs.microsoft.com/en-us/sql/t-sql/queries/hints-transact-sql-query """ type = "option_clause" match_grammar = Sequence( "OPTION", Bracketed( Delimited(Ref("QueryHintSegment")), ), ) class QueryHintSegment(BaseSegment): """Query Hint segment. https://docs.microsoft.com/en-us/sql/t-sql/queries/hints-transact-sql-query """ type = "query_hint_segment" match_grammar = OneOf( Sequence( # Azure Synapse Analytics specific "LABEL", Ref("EqualsSegment"), Ref("QuotedLiteralSegmentOptWithN"), ), Sequence( OneOf("HASH", "ORDER"), "GROUP", ), Sequence(OneOf("MERGE", "HASH", "CONCAT"), "UNION"), Sequence(OneOf("LOOP", "MERGE", "HASH"), "JOIN"), Sequence("EXPAND", "VIEWS"), Sequence( OneOf( "FAST", "MAXDOP", "MAXRECURSION", "QUERYTRACEON", Sequence( OneOf( "MAX_GRANT_PERCENT", "MIN_GRANT_PERCENT", ), Ref("EqualsSegment"), ), ), Ref("NumericLiteralSegment"), ), Sequence("FORCE", "ORDER"), Sequence( OneOf("FORCE", "DISABLE"), OneOf("EXTERNALPUSHDOWN", "SCALEOUTEXECUTION"), ), Sequence( OneOf( "KEEP", "KEEPFIXED", "ROBUST", ), "PLAN", ), "IGNORE_NONCLUSTERED_COLUMNSTORE_INDEX", "NO_PERFORMANCE_SPOOL", Sequence( "OPTIMIZE", "FOR", OneOf( "UNKNOWN", Bracketed( Ref("ParameterNameSegment"), OneOf( "UNKNOWN", Sequence(Ref("EqualsSegment"), Ref("LiteralGrammar")) ), AnyNumberOf( Ref("CommaSegment"), Ref("ParameterNameSegment"), OneOf( "UNKNOWN", Sequence(Ref("EqualsSegment"), Ref("LiteralGrammar")), ), ), ), ), ), Sequence("PARAMETERIZATION", OneOf("SIMPLE", "FORCED")), "RECOMPILE", Sequence( "USE", "HINT", Bracketed( Ref("QuotedLiteralSegment"), AnyNumberOf(Ref("CommaSegment"), Ref("QuotedLiteralSegment")), ), ), Sequence( "USE", "PLAN", Ref("QuotedLiteralSegmentOptWithN"), ), Sequence( "TABLE", "HINT", Ref("ObjectReferenceSegment"), Delimited(Ref("TableHintSegment")), ), ) class PostTableExpressionGrammar(BaseSegment): """Table Hint clause. Overloading the PostTableExpressionGrammar to implement. https://docs.microsoft.com/en-us/sql/t-sql/queries/hints-transact-sql-table """ type = "post_table_expression" match_grammar = Sequence( Sequence("WITH", optional=True), Bracketed( Ref("TableHintSegment"), AnyNumberOf( Ref("CommaSegment"), Ref("TableHintSegment"), ), ), ) class TableHintSegment(BaseSegment): """Table Hint segment. https://docs.microsoft.com/en-us/sql/t-sql/queries/hints-transact-sql-table """ type = "query_hint_segment" match_grammar = OneOf( "NOEXPAND", Sequence( "INDEX", Bracketed( Delimited( OneOf(Ref("IndexReferenceSegment"), Ref("NumericLiteralSegment")), ), ), ), Sequence( "INDEX", Ref("EqualsSegment"), Bracketed( OneOf(Ref("IndexReferenceSegment"), Ref("NumericLiteralSegment")), ), ), "KEEPIDENTITY", "KEEPDEFAULTS", Sequence( "FORCESEEK", Bracketed( Ref("IndexReferenceSegment"), Bracketed( Ref("SingleIdentifierGrammar"), AnyNumberOf(Ref("CommaSegment"), Ref("SingleIdentifierGrammar")), ), optional=True, ), ), "FORCESCAN", "HOLDLOCK", "IGNORE_CONSTRAINTS", "IGNORE_TRIGGERS", "NOLOCK", "NOWAIT", "PAGLOCK", "READCOMMITTED", "READCOMMITTEDLOCK", "READPAST", "READUNCOMMITTED", "REPEATABLEREAD", "ROWLOCK", "SERIALIZABLE", "SNAPSHOT", Sequence( "SPATIAL_WINDOW_MAX_CELLS", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), "TABLOCK", "TABLOCKX", "UPDLOCK", "XLOCK", ) class SetOperatorSegment(BaseSegment): """A set operator such as Union, Except or Intersect. Override ANSI to remove TSQL non-keyword MINUS. """ type = "set_operator" match_grammar = OneOf( Sequence("UNION", OneOf("DISTINCT", "ALL", optional=True)), "INTERSECT", "EXCEPT", ) class SetExpressionSegment(BaseSegment): """A set expression with either Union, Minus, Except or Intersect. Overriding ANSI to include OPTION clause. """ type = "set_expression" # match grammar match_grammar = Sequence( Ref("NonSetSelectableGrammar"), AnyNumberOf( Sequence( Ref("SetOperatorSegment"), Ref("NonSetSelectableGrammar"), ), min_times=1, ), Ref("OrderByClauseSegment", optional=True), Ref("OptionClauseSegment", optional=True), Ref("DelimiterGrammar", optional=True), ) class ForClauseSegment(BaseSegment): """A For Clause segment for TSQL. This is used to format results into XML or JSON """ type = "for_clause" _common_directives_for_xml = Sequence( Sequence( "BINARY", "BASE64", ), "TYPE", Sequence( "ROOT", Bracketed( Ref("LiteralGrammar"), optional=True, ), ), optional=True, ) _elements = Sequence("ELEMENTS", OneOf("XSINIL", "ABSENT", optional=True)) match_grammar = Sequence( "FOR", OneOf( "BROWSE", Sequence( "JSON", Delimited( OneOf( "AUTO", "PATH", ), Sequence( "ROOT", Bracketed( Ref("LiteralGrammar"), optional=True, ), optional=True, ), Ref.keyword("INCLUDE_NULL_VALUES", optional=True), Ref.keyword("WITHOUT_ARRAY_WRAPPER", optional=True), ), ), Sequence( "XML", OneOf( Delimited( Sequence( "PATH", Bracketed( Ref("LiteralGrammar"), optional=True, ), ), _common_directives_for_xml, _elements, ), Delimited( "EXPLICIT", _common_directives_for_xml, Ref.keyword("XMLDATA", optional=True), ), Delimited( OneOf( "AUTO", Sequence( "RAW", Bracketed( Ref("LiteralGrammar"), optional=True, ), ), ), _common_directives_for_xml, _elements, Sequence( OneOf( "XMLDATA", Sequence( "XMLSCHEMA", Bracketed( Ref("LiteralGrammar"), optional=True, ), ), ), optional=True, ), ), ), ), ), ) class ExecuteOptionSegment(BaseSegment): """An option for EXEC/EXECUTE WITH clause.""" type = "execute_option" _result_sets_definition = OneOf( # ( { column_name data_type [ COLLATE collation_name ] # [ NULL | NOT NULL ] } [,...n ] ) Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), Ref("DatatypeSegment"), Sequence( "COLLATE", Ref("ObjectReferenceSegment"), optional=True, ), OneOf("NULL", Sequence("NOT", "NULL"), optional=True), ), ) ), # AS OBJECT [ db_name . [ schema_name ] . | schema_name . ] # {table_name | view_name | table_valued_function_name } Sequence( "AS", "OBJECT", Sequence( Ref("SingleIdentifierGrammar", optional=True), Ref("SingleIdentifierGrammar"), optional=True, ), Ref("ObjectReferenceSegment"), ), # AS TYPE [ schema_name.]table_type_name Sequence( "AS", "TYPE", Sequence( Ref("ObjectReferenceSegment"), Ref("DotSegment"), optional=True, ), Ref("ObjectReferenceSegment"), ), # AS FOR XML Sequence("AS", "FOR", "XML"), ) match_grammar = OneOf( "RECOMPILE", Sequence("RESULT", "SETS", "UNDEFINED"), Sequence("RESULT", "SETS", "NONE"), Sequence( "RESULT", "SETS", Bracketed( Delimited(_result_sets_definition), ), ), ) class LoginUserSegment(BaseSegment): """A `LOGIN` or `USER` segment. This is used in the EXECUTE statement to specify the login or user context. """ type = "login_user_segment" match_grammar = Sequence( "AS", OneOf("LOGIN", "USER"), Ref("RawEqualsSegment"), Ref("QuotedLiteralSegment"), ) class ExecuteScriptSegment(BaseSegment): """`EXECUTE` statement. Matching segment name and type from exasol. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/execute-transact-sql """ # Execute a stored procedure or function _execute_stored_procedure_or_function = Sequence( # [ @return_status = ] Sequence( Ref("ParameterNameSegment"), Ref("RawEqualsSegment"), optional=True, ), OneOf( # module_name [;number] or @module_name_var Sequence( Ref("ObjectReferenceSegment"), Sequence( Ref("SemicolonSegment"), Ref("NumericLiteralSegment"), optional=True, ), ), Ref("ParameterNameSegment"), ), # Parameter list (optional, comma-separated) Indent, AnyNumberOf( Delimited( Sequence( Sequence( Ref("ParameterNameSegment"), Ref("EqualsSegment"), optional=True, ), OneOf( Ref("ExpressionSegment"), Sequence( Ref("ParameterNameSegment"), Sequence("OUTPUT", optional=True), ), "DEFAULT", ), ) ) ), Dedent, Sequence( "WITH", Ref("ExecuteOptionSegment"), optional=True, ), ) # Execute a character string _execute_a_characters_string = Sequence( Bracketed( Delimited( OneOf( Ref("ParameterNameSegment"), Ref("QuotedLiteralSegmentOptWithN"), ), delimiter=Ref("PlusSegment"), ) ), Ref("LoginUserSegment", optional=True), ) # Execute a pass-through command against a linked server _execute_pass_through_command = Sequence( Bracketed( Delimited( OneOf( Ref("ParameterNameSegment"), Ref("QuotedLiteralSegmentOptWithN"), ), delimiter=Ref("PlusSegment"), ), # Optional: , { value | @variable [ OUTPUT ] } [,...n] Sequence( Ref("CommaSegment"), Delimited( Sequence( OneOf( Ref("ExpressionSegment"), Ref("ParameterNameSegment"), ), Sequence("OUTPUT", optional=True), ), ), optional=True, ), ), # Optional: [ AS { LOGIN | USER } = ' name ' ] Ref("LoginUserSegment", optional=True), # Optional: [ AT linked_server_name ] # Optional: [ AT DATA_SOURCE data_source_name ] Sequence( "AT", Sequence("DATA_SOURCE", optional=True), Ref("ObjectReferenceSegment"), optional=True, ), ) type = "execute_script_statement" match_grammar = Sequence( OneOf("EXEC", "EXECUTE", optional=True), OneOf( _execute_stored_procedure_or_function, _execute_a_characters_string, _execute_pass_through_command, ), Ref("DelimiterGrammar", optional=True), ) class CreateSchemaStatementSegment(BaseSegment): """A `CREATE SCHEMA` statement. Overriding ANSI to allow for AUTHORIZATION clause https://docs.microsoft.com/en-us/sql/t-sql/statements/create-schema-transact-sql Not yet implemented: proper schema_element parsing. Once we have an AccessStatementSegment that works for TSQL, this definition should be tweaked to include schema elements. """ type = "create_schema_statement" match_grammar = Sequence( "CREATE", "SCHEMA", Ref("SchemaReferenceSegment"), Sequence( "AUTHORIZATION", Ref("RoleReferenceSegment"), optional=True, ), Ref( "DelimiterGrammar", optional=True, ), ) class MergeStatementSegment(ansi.MergeStatementSegment): """Contains dialect specific `MERGE` statement.""" type = "merge_statement" match_grammar = Sequence( Ref("MergeIntoLiteralGrammar"), Indent, Ref("TableReferenceSegment"), Sequence( "WITH", Bracketed( Delimited( Ref("TableHintSegment", optional=True), ) ), optional=True, ), Ref("AliasExpressionSegment", optional=True, exclude=Ref.keyword("USING")), Dedent, "USING", Indent, OneOf( Ref("TableReferenceSegment"), Ref("AliasedTableReferenceGrammar"), Sequence( Bracketed( Ref("SelectableGrammar"), ), Ref("AliasExpressionSegment", optional=True), ), ), Dedent, Conditional(Indent, indented_using_on=True), Ref("JoinOnConditionSegment"), Conditional(Dedent, indented_using_on=True), Ref("MergeMatchSegment"), ) class MergeMatchSegment(BaseSegment): """Contains dialect specific merge operations.""" type = "merge_match" match_grammar = Sequence( AnyNumberOf( Ref("MergeMatchedClauseSegment"), Ref("MergeNotMatchedClauseSegment"), min_times=1, ), Ref("OutputClauseSegment", optional=True), Ref("OptionClauseSegment", optional=True), ) class MergeMatchedClauseSegment(BaseSegment): """The `WHEN MATCHED` clause within a `MERGE` statement.""" type = "merge_when_matched_clause" match_grammar = Sequence( "WHEN", "MATCHED", Sequence( "AND", Ref("ExpressionSegment"), optional=True, ), Indent, "THEN", OneOf( Ref("MergeUpdateClauseSegment"), Ref("MergeDeleteClauseSegment"), ), Dedent, ) class MergeNotMatchedClauseSegment(BaseSegment): """The `WHEN NOT MATCHED` clause within a `MERGE` statement.""" type = "merge_when_not_matched_clause" match_grammar = OneOf( Sequence( "WHEN", "NOT", "MATCHED", Sequence("BY", "TARGET", optional=True), Sequence("AND", Ref("ExpressionSegment"), optional=True), Indent, "THEN", Ref("MergeInsertClauseSegment"), Dedent, ), Sequence( "WHEN", "NOT", "MATCHED", "BY", "SOURCE", Sequence("AND", Ref("ExpressionSegment"), optional=True), Indent, "THEN", OneOf( Ref("MergeUpdateClauseSegment"), Ref("MergeDeleteClauseSegment"), ), Dedent, ), ) class MergeInsertClauseSegment(BaseSegment): """`INSERT` clause within the `MERGE` statement.""" type = "merge_insert_clause" match_grammar = Sequence( "INSERT", Indent, Ref("BracketedColumnReferenceListGrammar", optional=True), Dedent, "VALUES", Indent, OneOf( Bracketed( Delimited( AnyNumberOf( Ref("ExpressionSegment"), ), ), ), Sequence( "DEFAULT", "VALUES", ), ), Dedent, ) class OutputClauseSegment(BaseSegment): """OUTPUT Clause used within DELETE, INSERT, UPDATE, MERGE. https://docs.microsoft.com/en-us/sql/t-sql/queries/output-clause-transact-sql """ type = "output_clause" match_grammar = AnyNumberOf( Sequence( "OUTPUT", Indent, Delimited( AnyNumberOf( Ref("WildcardExpressionSegment"), Sequence( Ref("BaseExpressionElementGrammar"), Ref("AliasExpressionSegment", optional=True), ), Ref("SingleIdentifierGrammar"), terminators=[Ref.keyword("INTO")], ), ), Dedent, Sequence( "INTO", Indent, Ref("TableReferenceSegment"), Bracketed( Delimited( Ref("ColumnReferenceSegment"), ), optional=True, ), Dedent, optional=True, ), ), ) class ThrowStatementSegment(BaseSegment): """A THROW statement. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/throw-transact-sql """ type = "throw_statement" match_grammar = Sequence( "THROW", Sequence( OneOf( # error_number Ref("NumericLiteralSegment"), Ref("ParameterNameSegment"), ), Ref("CommaSegment"), OneOf( # message Ref("QuotedLiteralSegment"), Ref("QuotedLiteralSegmentWithN"), Ref("ParameterNameSegment"), ), Ref("CommaSegment"), OneOf( # state Ref("NumericLiteralSegment"), Ref("ParameterNameSegment"), ), optional=True, ), ) class RaiserrorStatementSegment(BaseSegment): """RAISERROR statement. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/raiserror-transact-sql """ type = "raiserror_statement" match_grammar = Sequence( "RAISERROR", Bracketed( Delimited( OneOf( Ref("NumericLiteralSegment"), Ref("QuotedLiteralSegment"), Ref("QuotedLiteralSegmentWithN"), Ref("ParameterNameSegment"), ), OneOf( Ref("NumericLiteralSegment"), Ref("QualifiedNumericLiteralSegment"), Ref("ParameterNameSegment"), ), OneOf( Ref("NumericLiteralSegment"), Ref("QualifiedNumericLiteralSegment"), Ref("ParameterNameSegment"), ), AnyNumberOf( Ref("LiteralGrammar"), Ref("ParameterNameSegment"), min_times=0, max_times=20, ), ), ), Sequence( "WITH", Delimited( "LOG", "NOWAIT", "SETERROR", ), optional=True, ), ) class GotoStatement(BaseSegment): """GOTO statement. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/goto-transact-sql """ type = "goto_statement" match_grammar = Sequence("GOTO", Ref("SingleIdentifierGrammar")) class ExecuteAsClause(BaseSegment): """EXECUTE AS Clause. https://learn.microsoft.com/en-us/sql/t-sql/statements/execute-as-clause-transact-sql """ type = "execute_as_clause" match_grammar = Sequence( "EXECUTE", "AS", Ref("SingleQuotedIdentifierSegment"), ) class CreateTriggerStatementSegment(BaseSegment): """Create Trigger Statement. https://docs.microsoft.com/en-us/sql/t-sql/statements/create-trigger-transact-sql """ type = "create_trigger" match_grammar: Matchable = Sequence( "CREATE", Sequence("OR", "ALTER", optional=True), "TRIGGER", Ref("TriggerReferenceSegment"), "ON", OneOf( Ref("TableReferenceSegment"), Sequence("ALL", "SERVER"), "DATABASE", ), Sequence( "WITH", AnySetOf( # NOTE: Technically, ENCRYPTION can't be combined with the other two, # but this slightly more generous parsing is ok for SQLFluff. Ref.keyword("ENCRYPTION"), Ref.keyword("NATIVE_COMPILATION"), Ref.keyword("SCHEMABINDING"), ), Ref("ExecuteAsClause", optional=True), optional=True, ), OneOf( Sequence("FOR", Delimited(Ref("SingleIdentifierGrammar"), optional=True)), "AFTER", Sequence("INSTEAD", "OF"), optional=True, ), Delimited( "INSERT", "UPDATE", "DELETE", optional=True, ), Sequence("WITH", "APPEND", optional=True), Sequence("NOT", "FOR", "REPLICATION", optional=True), "AS", Ref("OneOrMoreStatementsGrammar"), # TODO: EXTERNAL NAME ) class DropTriggerStatementSegment(BaseSegment): """Drop Trigger Statement. https://docs.microsoft.com/en-us/sql/t-sql/statements/drop-trigger-transact-sql """ type = "drop_trigger" match_grammar: Matchable = Sequence( "DROP", "TRIGGER", Ref("IfExistsGrammar", optional=True), Delimited(Ref("TriggerReferenceSegment")), Sequence("ON", OneOf("DATABASE", Sequence("ALL", "SERVER")), optional=True), ) class DisableTriggerStatementSegment(BaseSegment): """Disable Trigger Statement. https://docs.microsoft.com/en-us/sql/t-sql/statements/disable-trigger-transact-sql """ type = "disable_trigger" match_grammar: Matchable = Sequence( "DISABLE", "TRIGGER", OneOf( Delimited(Ref("TriggerReferenceSegment")), "ALL", ), Sequence( "ON", OneOf(Ref("ObjectReferenceSegment"), "DATABASE", Sequence("ALL", "SERVER")), optional=True, ), ) class LabelStatementSegment(BaseSegment): """Label Statement, for a GOTO statement. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/goto-transact-sql """ type = "label_segment" match_grammar: Matchable = Sequence( Ref("NakedIdentifierSegment"), Ref("ColonSegment"), allow_gaps=False ) class AccessStatementSegment(BaseSegment): """A `GRANT` or `REVOKE` statement. https://docs.microsoft.com/en-us/sql/t-sql/statements/grant-transact-sql https://docs.microsoft.com/en-us/sql/t-sql/statements/deny-transact-sql https://docs.microsoft.com/en-us/sql/t-sql/statements/revoke-transact-sql """ type = "access_statement" # Privileges that can be set on the account (specific to snowflake) _global_permissions = OneOf( Sequence( "CREATE", OneOf( "ROLE", "USER", "WAREHOUSE", "DATABASE", "INTEGRATION", ), ), Sequence("APPLY", "MASKING", "POLICY"), "EXECUTE", ) _schema_object_names = [ "TABLE", "VIEW", "FUNCTION", "PROCEDURE", "SEQUENCE", ] _schema_object_types = OneOf( *_schema_object_names, Sequence("EXTERNAL", "TABLE"), Sequence("FILE", "FORMAT"), ) # We reuse the object names above and simply append an `S` to the end of them to get # plurals _schema_object_types_plural = OneOf( *[f"{object_name}S" for object_name in _schema_object_names] ) _permissions = Sequence( OneOf( "ALTER", "CONTROL", "DELETE", "EXECUTE", "INSERT", "RECEIVE", "REFERENCES", "SELECT", Sequence("TAKE", "OWNERSHIP"), "UPDATE", Sequence("VIEW", "CHANGE", "TRACKING"), Sequence("VIEW", "DEFINITION"), ), Ref("BracketedColumnReferenceListGrammar", optional=True), ) # All of the object types that we can grant permissions on. # This list will contain ansi sql objects as well as dialect specific ones. _objects = Sequence( OneOf( "DATABASE", "LANGUAGE", "SCHEMA", "ROLE", "TYPE", Sequence( "FOREIGN", OneOf("SERVER", Sequence("DATA", "WRAPPER")), ), Sequence("ALL", "SCHEMAS", "IN", "DATABASE"), _schema_object_types, Sequence("ALL", _schema_object_types_plural, "IN", "SCHEMA"), optional=True, ), Delimited(Ref("ObjectReferenceSegment"), terminators=["TO", "FROM"]), Ref("FunctionParameterListGrammar", optional=True), ) match_grammar: Matchable = OneOf( # Based on https://www.postgresql.org/docs/13/sql-grant.html # and https://docs.snowflake.com/en/sql-reference/sql/grant-privilege.html Sequence( "GRANT", OneOf( Sequence( Delimited( OneOf(_global_permissions, _permissions), terminators=["ON"], ), ), Sequence("ALL", Ref.keyword("PRIVILEGES", optional=True)), ), "ON", Sequence( OneOf("LOGIN", "DATABASE", "OBJECT", "ROLE", "SCHEMA", "USER"), Ref("CastOperatorSegment"), optional=True, ), _objects, "TO", Delimited( OneOf(Ref("RoleReferenceSegment"), Ref("FunctionSegment")), ), OneOf( Sequence("WITH", "GRANT", "OPTION"), optional=True, ), Sequence( "AS", Ref("ObjectReferenceSegment"), optional=True, ), ), Sequence( "DENY", OneOf( Delimited( OneOf(_global_permissions, _permissions), terminators=["ON"], ), Sequence("ALL", Ref.keyword("PRIVILEGES", optional=True)), ), "ON", Sequence( OneOf("LOGIN", "DATABASE", "OBJECT", "ROLE", "SCHEMA", "USER"), Ref("CastOperatorSegment"), optional=True, ), _objects, OneOf("TO"), Delimited( Ref("RoleReferenceSegment"), ), Sequence( Ref.keyword("CASCADE", optional=True), Ref("ObjectReferenceSegment", optional=True), optional=True, ), ), Sequence( "REVOKE", Sequence("GRANT", "OPTION", "FOR", optional=True), OneOf( Delimited( OneOf(_global_permissions, _permissions), terminators=["ON"], ), Sequence("ALL", Ref.keyword("PRIVILEGES", optional=True)), ), "ON", Sequence( OneOf("LOGIN", "DATABASE", "OBJECT", "ROLE", "SCHEMA", "USER"), Ref("CastOperatorSegment"), optional=True, ), _objects, OneOf("TO", "FROM"), Delimited( Ref("RoleReferenceSegment"), ), Sequence( Ref.keyword("CASCADE", optional=True), Ref("ObjectReferenceSegment", optional=True), optional=True, ), ), ) class CreateTypeStatementSegment(BaseSegment): """A `CREATE TYPE` statement. https://docs.microsoft.com/en-us/sql/t-sql/statements/create-type-transact-sql """ type = "create_type_statement" match_grammar: Matchable = Sequence( "CREATE", "TYPE", Ref("ObjectReferenceSegment"), OneOf( Sequence("FROM", Ref("ObjectReferenceSegment")), Sequence( "AS", "TABLE", Sequence( Bracketed( Delimited( OneOf( Ref("TableConstraintSegment"), Ref("ColumnDefinitionSegment"), Ref("TableIndexSegment"), ), allow_trailing=True, ) ), ), ), ), ) class OpenCursorStatementSegment(BaseSegment): """An `OPEN` cursor statement. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/open-transact-sql """ type = "open_cursor_statement" match_grammar: Matchable = Sequence( "OPEN", Ref("CursorNameGrammar"), ) class CloseCursorStatementSegment(BaseSegment): """A `CLOSE` cursor statement. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/close-transact-sql """ type = "close_cursor_statement" match_grammar: Matchable = Sequence( "CLOSE", Ref("CursorNameGrammar"), ) class DeallocateCursorStatementSegment(BaseSegment): """A `DEALLOCATE` cursor statement. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/deallocate-transact-sql """ type = "deallocate_cursor_statement" match_grammar: Matchable = Sequence( "DEALLOCATE", Ref("CursorNameGrammar"), ) class FetchCursorStatementSegment(BaseSegment): """A `FETCH` cursor statement. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/fetch-transact-sql """ type = "fetch_cursor_statement" match_grammar: Matchable = Sequence( "FETCH", OneOf("NEXT", "PRIOR", "FIRST", "LAST", optional=True), "FROM", Ref("CursorNameGrammar"), Sequence("INTO", Delimited(Ref("ParameterNameSegment")), optional=True), ) class ConcatSegment(CompositeBinaryOperatorSegment): """Concat operator.""" match_grammar: Matchable = Ref("PlusSegment") class CreateSynonymStatementSegment(BaseSegment): """A `CREATE SYNONYM` statement.""" type = "create_synonym_statement" # https://learn.microsoft.com/en-us/sql/t-sql/statements/create-synonym-transact-sql match_grammar: Matchable = Sequence( "CREATE", "SYNONYM", Ref("SynonymReferenceSegment"), "FOR", Ref("ObjectReferenceSegment"), ) class DropSynonymStatementSegment(BaseSegment): """A `DROP SYNONYM` statement.""" type = "drop_synonym_statement" # https://learn.microsoft.com/en-us/sql/t-sql/statements/drop-synonym-transact-sql match_grammar: Matchable = Sequence( "DROP", "SYNONYM", Ref("IfExistsGrammar", optional=True), Ref("SynonymReferenceSegment"), ) class SynonymReferenceSegment(ansi.ObjectReferenceSegment): """A reference to a synonym. A synonym may only (optionally) specify a schema. It may not specify a server or database name. """ type = "synonym_reference" # match grammar (allow whitespace) match_grammar: Matchable = Sequence( Ref("SingleIdentifierGrammar"), AnyNumberOf( Sequence( Ref("DotSegment"), Ref("SingleIdentifierGrammar", optional=True), ), min_times=0, max_times=1, ), ) class SamplingExpressionSegment(ansi.SamplingExpressionSegment): """Override ANSI to use TSQL TABLESAMPLE expression.""" type = "sample_expression" match_grammar: Matchable = Sequence( "TABLESAMPLE", Sequence("SYSTEM", optional=True), Bracketed( Sequence( Ref("NumericLiteralSegment"), OneOf("PERCENT", "ROWS", optional=True) ) ), Sequence( OneOf("REPEATABLE"), Bracketed(Ref("NumericLiteralSegment")), optional=True, ), ) class TemporalQuerySegment(ansi.TemporalQuerySegment): """A segment that allows Temporal Queries to be run. https://learn.microsoft.com/en-us/sql/relational-databases/tables/temporal-tables """ type = "temporal_query" match_grammar: Matchable = Sequence( "FOR", "SYSTEM_TIME", OneOf( "ALL", Sequence( "AS", "OF", OneOf(Ref("QuotedLiteralSegment"), Ref("ParameterNameSegment")), ), Sequence( "FROM", OneOf(Ref("QuotedLiteralSegment"), Ref("ParameterNameSegment")), "TO", OneOf(Ref("QuotedLiteralSegment"), Ref("ParameterNameSegment")), ), Sequence( "BETWEEN", OneOf(Ref("QuotedLiteralSegment"), Ref("ParameterNameSegment")), "AND", OneOf(Ref("QuotedLiteralSegment"), Ref("ParameterNameSegment")), ), Sequence( "CONTAINED", "IN", Bracketed( Delimited( Ref("QuotedLiteralSegment"), ) ), ), ), ) class CreateDatabaseScopedCredentialStatementSegment(BaseSegment): """A statement to create a database scoped credential. https://learn.microsoft.com/en-us/sql/t-sql/statements/create-database-scoped-credential-transact-sql """ type = "create_database_scoped_credential_statement" match_grammar: Matchable = Sequence( "CREATE", "DATABASE", "SCOPED", "CREDENTIAL", Ref("ObjectReferenceSegment"), "WITH", Ref("CredentialGrammar"), ) class CreateExternalDataSourceStatementSegment(BaseSegment): """A statement to create an external data source. https://learn.microsoft.com/en-us/sql/t-sql/statements/create-external-data-source-transact-sql&tabs=dedicated#syntax """ type = "create_external_data_source_statement" match_grammar: Matchable = Sequence( "CREATE", "EXTERNAL", "DATA", "SOURCE", Ref("ObjectReferenceSegment"), "WITH", Bracketed( Delimited( Ref("TableLocationClause"), Sequence( "CONNECTION_OPTIONS", Ref("EqualsSegment"), AnyNumberOf(Ref("QuotedLiteralSegmentOptWithN")), ), Sequence( "CREDENTIAL", Ref("EqualsSegment"), Ref("ObjectReferenceSegment"), ), Sequence( "PUSHDOWN", Ref("EqualsSegment"), OneOf("ON", "OFF"), ), ), ), ) class PeriodSegment(BaseSegment): """A `PERIOD FOR SYSTEM_TIME` for `CREATE TABLE` of temporal tables. https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql https://learn.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql#generated-always-as--row--transaction_id--sequence_number----start--end---hidden---not-null- """ type = "period_segment" match_grammar = Sequence( "PERIOD", "FOR", "SYSTEM_TIME", Bracketed( Delimited( Ref("ColumnReferenceSegment"), Ref("ColumnReferenceSegment"), ), optional=True, ), ) class SqlcmdCommandSegment(BaseSegment): """A `sqlcmd` command. Microsoft allows professional CI/CD deployment through so called 'SQL Database Projects'. There are proprietary `sqlcmd Commands` that can be part of an SQL file. https://learn.microsoft.com/en-us/sql/tools/sqlcmd/sqlcmd-utility#sqlcmd-commands """ type = "sqlcmd_command_segment" match_grammar: Matchable = OneOf( Sequence( Sequence( Ref("ColonSegment"), Ref("SqlcmdOperatorSegment"), # `:r` allow_gaps=False, ), Ref("SqlcmdFilePathSegment"), ), Sequence( Sequence( Ref("ColonSegment"), Ref("SqlcmdOperatorSegment"), # `:setvar` allow_gaps=False, ), Ref("ObjectReferenceSegment"), Ref("CodeSegment"), ), ) class ExternalFileFormatDelimitedTextFormatOptionClause(BaseSegment): """`CREATE EXTERNAL FILE FORMAT` Delimited text `FORMAT_OPTIONS` clause.""" type = "external_file_delimited_text_format_options_clause" match_grammar = OneOf( Sequence( OneOf( "FIELD_TERMINATOR", "STRING_DELIMITER", "DATE_FORMAT", "PARSER_VERSION" ), Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "FIRST_ROW", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "USE_TYPE_DEFAULT", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "ENCODING", Ref("EqualsSegment"), Ref("FileEncodingSegment"), ), ) class ExternalFileFormatDelimitedTextClause(BaseSegment): """`CREATE EXTERNAL FILE FORMAT` *Delimited text* clause. https://learn.microsoft.com/en-us/sql/t-sql/statements/create-external-file-format-transact-sql&tabs=delimited#syntax """ type = "external_file_delimited_text_clause" match_grammar = Delimited( Sequence( "FORMAT_TYPE", Ref("EqualsSegment"), "DELIMITEDTEXT", ), Sequence( "FORMAT_OPTIONS", Bracketed( Delimited( Ref("ExternalFileFormatDelimitedTextFormatOptionClause"), ), ), optional=True, ), Sequence( "DATA_COMPRESSION", Ref("EqualsSegment"), Ref("FileCompressionSegment"), optional=True, ), ) class ExternalFileFormatRcClause(BaseSegment): """`CREATE EXTERNAL FILE FORMAT` *Record Columnar file format (RcFile)* clause. https://learn.microsoft.com/en-us/sql/t-sql/statements/create-external-file-format-transact-sql&tabs=rc#syntax """ type = "external_file_rc_clause" match_grammar = Delimited( Sequence( "FORMAT_TYPE", Ref("EqualsSegment"), "RCFILE", ), Sequence( "SERDE_METHOD", Ref("EqualsSegment"), Ref("SerdeMethodSegment"), ), Sequence( "DATA_COMPRESSION", Ref("EqualsSegment"), Ref("FileCompressionSegment"), optional=True, ), ) class ExternalFileFormatOrcClause(BaseSegment): """`CREATE EXTERNAL FILE FORMAT` *Optimized Row Columnar (ORC)* format clause. https://learn.microsoft.com/en-us/sql/t-sql/statements/create-external-file-format-transact-sql&tabs=orc#syntax """ type = "external_file_orc_clause" match_grammar = Delimited( Sequence( "FORMAT_TYPE", Ref("EqualsSegment"), "ORC", ), Sequence( "DATA_COMPRESSION", Ref("EqualsSegment"), Ref("FileCompressionSegment"), optional=True, ), ) class ExternalFileFormatParquetClause(BaseSegment): """`CREATE EXTERNAL FILE FORMAT` *PARQUET* format clause. https://learn.microsoft.com/en-us/sql/t-sql/statements/create-external-file-format-transact-sql&tabs=parquet#syntax """ type = "external_file_parquet_clause" match_grammar = Delimited( Sequence( "FORMAT_TYPE", Ref("EqualsSegment"), "PARQUET", ), Sequence( "DATA_COMPRESSION", Ref("EqualsSegment"), Ref("FileCompressionSegment"), optional=True, ), ) class ExternalFileFormatJsonClause(BaseSegment): """`CREATE EXTERNAL FILE FORMAT` *JSON* format clause. https://learn.microsoft.com/en-us/sql/t-sql/statements/create-external-file-format-transact-sql&tabs=json#syntax """ type = "external_file_json_clause" match_grammar = Delimited( Sequence( "FORMAT_TYPE", Ref("EqualsSegment"), "JSON", ), Sequence( "DATA_COMPRESSION", Ref("EqualsSegment"), Ref("FileCompressionSegment"), optional=True, ), ) class ExternalFileFormatDeltaClause(BaseSegment): """`CREATE EXTERNAL FILE FORMAT` *Delta Lake* format clause. https://learn.microsoft.com/en-us/sql/t-sql/statements/create-external-file-format-transact-sql&tabs=delta#syntax """ type = "external_file_delta_clause" match_grammar = Sequence( "FORMAT_TYPE", Ref("EqualsSegment"), "DELTA", ) class CreateExternalFileFormat(BaseSegment): """A statement to create an `EXTERNAL FILE FORMAT` object. https://learn.microsoft.com/en-us/sql/t-sql/statements/create-external-file-format-transact-sql&tabs=delta#syntax """ type = "create_external_file_format" match_grammar: Matchable = Sequence( "CREATE", "EXTERNAL", "FILE", "FORMAT", Ref("ObjectReferenceSegment"), "WITH", Bracketed( OneOf( Ref("ExternalFileFormatDelimitedTextClause"), Ref("ExternalFileFormatRcClause"), Ref("ExternalFileFormatOrcClause"), Ref("ExternalFileFormatParquetClause"), Ref("ExternalFileFormatJsonClause"), Ref("ExternalFileFormatDeltaClause"), ), ), ) class OpenJsonWithClauseSegment(BaseSegment): """A `WITH` clause of an `OPENJSON()` table-valued function. https://learn.microsoft.com/en-us/sql/t-sql/functions/openjson-transact-sql#with_clause """ type = "openjson_with_clause" match_grammar = Sequence( "WITH", Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), Ref("DatatypeSegment"), Ref("QuotedLiteralSegment", optional=True), # column_path Sequence( "AS", "JSON", optional=True, ), ), ), ), ) class OpenJsonSegment(BaseSegment): """An `OPENJSON()` table-valued function. https://learn.microsoft.com/en-us/sql/t-sql/functions/openjson-transact-sql#syntax """ type = "openjson_segment" match_grammar = Sequence( "OPENJSON", Bracketed( Delimited( Ref("QuotedLiteralSegmentOptWithN"), # jsonExpression Ref("ColumnReferenceSegment"), Ref("ParameterNameSegment"), Ref("QuotedLiteralSegment"), # path ), ), Ref("OpenJsonWithClauseSegment", optional=True), ) class OpenQuerySegment(BaseSegment): """An `OPENQUERY()` table-valued function. https://learn.microsoft.com/en-us/sql/t-sql/functions/openquery-transact-sql#syntax """ type = "openquery_segment" match_grammar = Sequence( "OPENQUERY", Bracketed( Delimited( Ref("ObjectReferenceSegment"), Ref("QuotedLiteralSegment"), ) ), ) class CreateExternalTableStatementSegment(BaseSegment): """A `CREATE EXTERNAL TABLE` statement. https://learn.microsoft.com/en-us/sql/t-sql/statements/create-external-table-transact-sql&tabs=dedicated """ type = "create_external_table_statement" match_grammar = Sequence( "CREATE", "EXTERNAL", "TABLE", Ref("ObjectReferenceSegment"), Bracketed( Delimited( Ref("ColumnDefinitionSegment"), ), ), "WITH", Bracketed( Delimited( Ref("TableLocationClause"), Sequence( "DATA_SOURCE", Ref("EqualsSegment"), Ref("ObjectReferenceSegment"), ), Sequence( "FILE_FORMAT", Ref("EqualsSegment"), Ref("ObjectReferenceSegment"), ), Sequence( "REJECT_TYPE", Ref("EqualsSegment"), OneOf("value", "percentage"), ), Sequence( "REJECT_VALUE", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "REJECT_SAMPLE_VALUE", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "REJECTED_ROW_LOCATION", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), ), ), ) class CreateRoleStatementSegment(ansi.CreateRoleStatementSegment): """A `CREATE ROLE` statement. https://learn.microsoft.com/en-us/sql/t-sql/statements/create-role-transact-sql """ type = "create_role_statement" match_grammar = Sequence( "CREATE", "ROLE", Ref("RoleReferenceSegment"), Sequence( "AUTHORIZATION", Ref("RoleReferenceSegment"), optional=True, ), ) class CreateLoginStatementSegment(BaseSegment): """A `CREATE LOGIN` statement. https://learn.microsoft.com/en-us/sql/t-sql/statements/create-login-transact-sql """ type = "create_login_statement" _default_database = Sequence( "DEFAULT_DATABASE", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ) _default_language = Sequence( "DEFAULT_LANGUAGE", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ) _option_list_2 = AnyNumberOf( Sequence( "SID", Ref("EqualsSegment"), Ref("HexadecimalLiteralSegment"), ), _default_database, _default_language, Sequence( "CHECK_EXPIRATION", Ref("EqualsSegment"), OneOf( "ON", "OFF", ), ), Sequence( "CHECK_POLICY", Ref("EqualsSegment"), OneOf( "ON", "OFF", ), ), Sequence( "CREDENTIAL", Ref("EqualsSegment"), Ref("ObjectReferenceSegment"), ), ) _option_list_1 = Sequence( "PASSWORD", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), Ref.keyword("MUST_CHANGE", optional=True), Ref("CommaSegment", optional=True), Delimited(_option_list_2, optional=True), ) _windows_options = AnyNumberOf( _default_database, _default_language, ) _sources = OneOf( "WINDOWS", Sequence("EXTERNAL", "PROVIDER"), Sequence("CERTIFICATE", Ref("ObjectReferenceSegment")), Sequence( Sequence("ASYMMETRIC", "KEY"), Ref("ObjectReferenceSegment"), ), ) match_grammar: Matchable = Sequence( "CREATE", "LOGIN", Ref("ObjectReferenceSegment"), AnyNumberOf( Sequence("FROM", _sources), Sequence("WITH", _option_list_1), ), ) class DropExternalTableStatementSegment(BaseSegment): """A `DROP EXTERNAL TABLE ...` statement. https://learn.microsoft.com/en-us/sql/t-sql/statements/drop-external-table-transact-sql """ type = "drop_external_table_statement" match_grammar = Sequence( "DROP", "EXTERNAL", "TABLE", Ref("TableReferenceSegment"), ) class StorageLocationSegment(BaseSegment): """A tsql external storage location. https://learn.microsoft.com/en-us/sql/t-sql/statements/copy-into-transact-sql#external-locations """ type = "storage_location" match_grammar = OneOf( Ref("AzureBlobStoragePath"), Ref("AzureDataLakeStorageGen2Path"), ) class CopyIntoTableStatementSegment(BaseSegment): """A tsql `COPY INTO
` statement. https://learn.microsoft.com/en-us/sql/t-sql/statements/copy-into-transact-sql """ type = "copy_into_table_statement" match_grammar = Sequence( "COPY", "INTO", Ref("TableReferenceSegment"), Bracketed(Delimited(Ref("ColumnDefinitionSegment")), optional=True), Ref("FromClauseSegment"), Sequence( "WITH", Bracketed( Delimited( AnySetOf( Sequence( "FILE_TYPE", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "FILE_FORMAT", Ref("EqualsSegment"), Ref("ObjectReferenceSegment"), ), Sequence( "CREDENTIAL", Ref("EqualsSegment"), Bracketed(Ref("CredentialGrammar")), ), Sequence( "ERRORFILE", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "ERRORFILE_CREDENTIAL", Ref("EqualsSegment"), Bracketed(Ref("CredentialGrammar")), ), Sequence( "MAXERRORS", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "COMPRESSION", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "FIELDQUOTE", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "FIELDTERMINATOR", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "ROWTERMINATOR", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "FIRSTROW", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "DATEFORMAT", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "ENCODING", Ref("EqualsSegment"), Ref("FileEncodingSegment"), ), Sequence( "IDENTITY_INSERT", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "AUTO_CREATE_TABLE", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), ) ) ), optional=True, ), ) class CreateUserStatementSegment(ansi.CreateUserStatementSegment): """`CREATE USER` statement. https://learn.microsoft.com/en-us/sql/t-sql/statements/create-user-transact-sql#syntax """ _allow_encrypted_value = Sequence( "ALLOW_ENCRYPTED_VALUE_MODIFICATIONS", Ref("EqualsSegment"), OneOf("ON", "OFF"), ) _default_schema = Sequence( "DEFAULT_SCHEMA", Ref("EqualsSegment"), Ref("ObjectReferenceSegment"), ) _default_language = Sequence( "DEFAULT_LANGUAGE", Ref("EqualsSegment"), Ref("ObjectReferenceSegment"), ) _external_provider = Sequence( "FROM", "EXTERNAL", "PROVIDER", Sequence( "WITH", "OBJECT_ID", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), optional=True, ) _limited_option_list = Sequence( "WITH", Delimited( _default_schema, _default_language, _allow_encrypted_value, ), optional=True, ) _options_list = Delimited( _default_schema, _default_language, Sequence( "SID", Ref("EqualsSegment"), Ref("HexadecimalLiteralSegment"), ), _allow_encrypted_value, Sequence( "PASSWORD", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), ) match_grammar = Sequence( "CREATE", "USER", Ref("RoleReferenceSegment"), AnyNumberOf( Sequence("WITH", _options_list), Sequence( OneOf("FROM", "FOR"), "LOGIN", Ref("ObjectReferenceSegment"), _limited_option_list, ), Sequence( OneOf("FROM", "FOR"), OneOf( "CERTIFICATE", Sequence("ASYMMETRIC", "KEY"), ), Ref("ObjectReferenceSegment"), ), Sequence( "WITHOUT", "LOGIN", _limited_option_list, ), _external_provider, ), ) class ComputedColumnDefinitionSegment(BaseSegment): """A computed column definition, e.g. for CREATE TABLE or ALTER TABLE. https://learn.microsoft.com/en-us/sql/relational-databases/tables/specify-computed-columns-in-a-table """ type = "computed_column_definition" match_grammar: Matchable = Sequence( Ref("SingleIdentifierGrammar"), # Column name "AS", OptionallyBracketed( OneOf( Ref("FunctionSegment"), Ref("BareFunctionSegment"), Ref("ExpressionSegment"), ), ), Sequence( "PERSISTED", Sequence("NOT", "NULL", optional=True), optional=True, ), AnyNumberOf( Ref("ColumnConstraintSegment", optional=True), ), ) class CreatePartitionFunctionSegment(BaseSegment): """A `CREATE PARTITION FUNCTION` statement.""" # https://learn.microsoft.com/en-us/sql/t-sql/statements/create-partition-function-transact-sql type = "create_partition_function_statement" match_grammar: Matchable = Sequence( "CREATE", "PARTITION", "FUNCTION", Ref("ObjectReferenceSegment"), Bracketed( Ref("DatatypeSegment"), ), "AS", "RANGE", OneOf( "LEFT", "RIGHT", ), "FOR", "VALUES", Bracketed(Delimited(Ref("LiteralGrammar"))), # Bracketed(Delimited("LEFT")), ) class AlterPartitionFunctionSegment(BaseSegment): """A `ALTER PARTITION FUNCTION` statement.""" # https://learn.microsoft.com/en-us/sql/t-sql/statements/alter-partition-function-transact-sql # https://learn.microsoft.com/en-us/sql/relational-databases/partitions/modify-a-partition-function type = "alter_partition_function_statement" match_grammar: Matchable = Sequence( "ALTER", "PARTITION", "FUNCTION", Ref("ObjectReferenceSegment"), Bracketed(), OneOf( Sequence("SPLIT", "RANGE", Bracketed(Ref("LiteralGrammar"))), Sequence("MERGE", "RANGE", Bracketed(Ref("LiteralGrammar"))), ), ) class CreatePartitionSchemeSegment(BaseSegment): """A `CREATE PARTITION SCHEME` statement.""" # https://learn.microsoft.com/en-us/sql/t-sql/statements/create-partition-scheme-transact-sql type = "create_partition_scheme_statement" match_grammar: Matchable = Sequence( "CREATE", "PARTITION", "SCHEME", Ref("ObjectReferenceSegment"), "AS", "PARTITION", Ref("ObjectReferenceSegment"), Ref.keyword("ALL", optional=True), "TO", Bracketed( Delimited( OneOf(Ref("ObjectReferenceSegment"), "PRIMARY"), ), ), ) class AlterPartitionSchemeSegment(BaseSegment): """A `ALTER PARTITION SCHEME` statement.""" # https://learn.microsoft.com/en-us/sql/t-sql/statements/alter-partition-scheme-transact-sql # https://learn.microsoft.com/en-us/sql/relational-databases/partitions/modify-a-partition-scheme type = "alter_partition_scheme_statement" match_grammar: Matchable = Sequence( "ALTER", "PARTITION", "SCHEME", Ref("ObjectReferenceSegment"), "NEXT", "USED", Ref("ObjectReferenceSegment", optional=True), ) class CreateMasterKeySegment(BaseSegment): """A `CREATE MASTER KEY` statement.""" # https://learn.microsoft.com/en-us/sql/t-sql/statements/create-master-key-transact-sql type = "create_master_key_statement" match_grammar: Matchable = Sequence( "CREATE", "MASTER", "KEY", Sequence( "ENCRYPTION", "BY", "PASSWORD", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), ) class MasterKeyEncryptionSegment(BaseSegment): """Master key encryptopn option.""" type = "master_key_encryption_option" match_grammar: Matchable = OneOf( Sequence("SERVICE", "MASTER", "KEY"), Sequence( "PASSWORD", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), ) class AlterMasterKeySegment(BaseSegment): """A `ALTER MASTER KEY` statement.""" # https://learn.microsoft.com/en-us/sql/t-sql/statements/alter-master-key-transact-sql type = "alter_master_key_statement" match_grammar: Matchable = Sequence( "ALTER", "MASTER", "KEY", OneOf( Sequence( Ref.keyword("FORCE", optional=True), "REGENERATE", "WITH", "ENCRYPTION", "BY", Ref("MasterKeyEncryptionSegment"), ), Sequence( OneOf("ADD", "DROP"), "ENCRYPTION", "BY", Ref("MasterKeyEncryptionSegment"), ), ), ) class DropMasterKeySegment(BaseSegment): """A `DROP MASTER KEY` statement.""" # https://learn.microsoft.com/en-us/sql/t-sql/statements/drop-master-key-transact-sql type = "drop_master_key_statement" match_grammar: Matchable = Sequence( "DROP", "MASTER", "KEY", ) class CreateSecurityPolicySegment(BaseSegment): """A `CREATE SECURITY POLICY` statement.""" # https://learn.microsoft.com/en-us/sql/t-sql/statements/create-security-policy-transact-sql type = "create_security_policy_statement" match_grammar: Matchable = Sequence( "CREATE", "SECURITY", "POLICY", Ref("ObjectReferenceSegment"), Delimited( Sequence( "ADD", OneOf("FILTER", "BLOCK", optional=True), "PREDICATE", Ref("ObjectReferenceSegment"), Bracketed( Delimited( Ref("ColumnReferenceSegment"), Ref("ExpressionSegment"), ), ), "ON", Ref("ObjectReferenceSegment"), OneOf( Sequence( "AFTER", OneOf("INSERT", "UPDATE"), ), Sequence( "BEFORE", OneOf("UPDATE", "DELETE"), ), optional=True, ), ), ), Sequence( "WITH", Bracketed( Delimited( Sequence("STATE", Ref("EqualsSegment"), OneOf("ON", "OFF")), Sequence("SCHEMABINDING", Ref("EqualsSegment"), OneOf("ON", "OFF")), optional=True, ), ), optional=True, ), Sequence( "NOT", "FOR", "REPLICATION", optional=True, ), ) class AlterSecurityPolicySegment(BaseSegment): """A `ALTER SECURITY POLICY` statement.""" # https://learn.microsoft.com/en-us/sql/t-sql/statements/alter-security-policy-transact-sql type = "alter_security_policy_statement" match_grammar: Matchable = Sequence( "ALTER", "SECURITY", "POLICY", Ref("ObjectReferenceSegment"), Delimited( Sequence( OneOf("ADD", "ALTER"), OneOf("FILTER", "BLOCK", optional=True), "PREDICATE", Ref("ObjectReferenceSegment"), Bracketed( Delimited( Ref("ColumnReferenceSegment"), Ref("ExpressionSegment"), ), ), "ON", Ref("ObjectReferenceSegment"), OneOf( Sequence( "AFTER", OneOf("INSERT", "UPDATE"), ), Sequence( "BEFORE", OneOf("UPDATE", "DELETE"), ), optional=True, ), ), Sequence( "DROP", OneOf("FILTER", "BLOCK", optional=True), "PREDICATE", "ON", Ref("ObjectReferenceSegment"), ), optional=True, ), Sequence( "WITH", Bracketed( Delimited( Sequence("STATE", Ref("EqualsSegment"), OneOf("ON", "OFF")), Sequence("SCHEMABINDING", Ref("EqualsSegment"), OneOf("ON", "OFF")), optional=True, ), ), optional=True, ), Sequence( "NOT", "FOR", "REPLICATION", optional=True, ), ) class DropSecurityPolicySegment(BaseSegment): """A `DROP SECURITY POLICY` statement.""" # https://learn.microsoft.com/en-us/sql/t-sql/statements/drop-security-policy-transact-sql type = "drop_security_policy" match_grammar: Matchable = Sequence( "DROP", "SECURITY", "POLICY", Sequence("IF", "EXISTS", optional=True), Ref("ObjectReferenceSegment"), ) class OpenSymmetricKeySegment(BaseSegment): """A `OPEN SYMMETRIC KEY` statement.""" # https://learn.microsoft.com/en-us/sql/t-sql/statements/open-symmetric-key-transact-sql type = "open_symmetric_key_statement" # WITH PASSWORD = 'password' _with_password = Sequence( "WITH", "PASSWORD", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ) _decryption_mechanism = OneOf( Sequence("CERTIFICATE", Ref("ObjectReferenceSegment"), _with_password), Sequence("ASYMMETRIC", "KEY", Ref("ObjectReferenceSegment"), _with_password), Sequence("SYMMETRIC", "KEY", Ref("ObjectReferenceSegment")), Sequence("PASSWORD", Ref("EqualsSegment"), Ref("QuotedLiteralSegment")), ) match_grammar: Matchable = Sequence( "OPEN", "SYMMETRIC", "KEY", Ref("ObjectReferenceSegment"), "DECRYPTION", "BY", _decryption_mechanism, ) class ExpressionSegment(BaseSegment): """An expression, either arithmetic or boolean. Extended for TSQL to include the `NEXT VALUE FOR` segment. """ type = "expression" match_grammar: Matchable = OneOf( Ref("Expression_A_Grammar"), Ref("NextValueSequenceSegment") ) class AdditionAssignmentSegment(CompositeBinaryOperatorSegment): """An addition assignment (`+=`) segment. https://learn.microsoft.com/en-us/sql/t-sql/language-elements/add-equals-transact-sql?view=sql-server-ver16 """ match_grammar = Sequence(Ref("PlusComparisonSegment"), Ref("RawEqualsSegment")) class SubtractionAssignmentSegment(CompositeBinaryOperatorSegment): """A subtraction assignment (`-=`) segment. https://learn.microsoft.com/en-us/sql/t-sql/language-elements/subtract-equals-transact-sql?view=sql-server-ver16 """ match_grammar = Sequence(Ref("MinusComparisonSegment"), Ref("RawEqualsSegment")) class MultiplicationAssignmentSegment(CompositeBinaryOperatorSegment): """A multiplication assignment (`*=`) segment. https://learn.microsoft.com/en-us/sql/t-sql/language-elements/multiply-equals-transact-sql?view=sql-server-ver16 """ match_grammar = Sequence(Ref("MultiplyComparisonSegment"), Ref("RawEqualsSegment")) class DivisionAssignmentSegment(CompositeBinaryOperatorSegment): """A division assignment (`/=`) segment. https://learn.microsoft.com/en-us/sql/t-sql/language-elements/divide-equals-transact-sql?view=sql-server-ver16 """ match_grammar = Sequence(Ref("DivideComparisonSegment"), Ref("RawEqualsSegment")) class ModulusAssignmentSegment(CompositeBinaryOperatorSegment): """A modulus assignment (`%=`) segment. https://learn.microsoft.com/en-us/sql/t-sql/language-elements/multiply-equals-transact-sql?view=sql-server-ver16 """ match_grammar = Sequence(Ref("ModuloComparisonSegment"), Ref("RawEqualsSegment")) sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_tsql_keywords.py000066400000000000000000000321121503426445100245170ustar00rootroot00000000000000r"""A list of all SQL key words. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/reserved-keywords-transact-sql?view=sql-server-ver16 Run the script in a browser console to extract all reserved keywords: ```js (function () { const xpathResult = document.evaluate( '//div[@class=\'column\']/p[not(descendant::strong)]', document, null, XPathResult.ORDERED_NODE_SNAPSHOT_TYPE, null ); const list = new Set(); for (let index = 0; index < xpathResult.snapshotLength; ++index) { const node = xpathResult.snapshotItem(index); list.add(node.textContent.trim()); } console.log([...list].sort().map(value => ` "${value}"`).join(',\n')); })(); ``` Be careful, some keywords are present in `UNRESERVED_KEYWORDS`. """ RESERVED_KEYWORDS = [ "ADD", "ALL", "ALTER", "AND", "ANY", "APPEND", "AS", "ASC", "AUTHORIZATION", "BACKUP", "BATCHSIZE", "BEGIN", "BETWEEN", "BREAK", "BROWSE", "BULK", "BY", "CASCADE", "CASE", "CHECK_CONSTRAINTS", "CHECK", "CHECKPOINT", "CLOSE", "CLUSTERED", "COALESCE", "COLLATE", "COLUMN", "COMMIT", "COMPUTE", "CONSTRAINT", "CONTAINS", "CONTAINSTABLE", "CONTINUE", "CONVERT", "CREATE", "CROSS", "CURRENT_CATALOG", # *future* "CURRENT_DATE", "CURRENT_DEFAULT_TRANSFORM_GROUP", # *future* "CURRENT_PATH", # *future* "CURRENT_ROLE", # *future* "CURRENT_SCHEMA", # *future* "CURRENT_TIME", "CURRENT_TIMESTAMP", "CURRENT_TRANSFORM_GROUP_FOR_TYPE", # *future* "CURRENT_USER", "CURRENT", "CURSOR", "DATABASE", "DBCC", "DEALLOCATE", "DECLARE", "DECRYPTION", "DEFAULT", "DELETE", "DENY", "DESC", "DISTINCT", "DISTRIBUTED", "DOUBLE", "DROP", "DYNAMIC", "ELSE", "END", "ERRLVL", "ESCAPE", "EXCEPT", "EXEC", "EXECUTE", "EXISTS", "EXIT", "EXTERNAL", "FAST_FORWARD", "FETCH", "FILE", "FILLFACTOR", "FOR", "FOREIGN", "FORWARD_ONLY", "FREETEXT", "FREETEXTTABLE", "FROM", "FULL", "FULLSCAN", "FUNCTION", "GLOBAL", "GO", "GOTO", "GRANT", "GROUP", "HAVING", "HOLDLOCK", "IDENTITY_INSERT", "IDENTITY", "IDENTITYCOL", "IF", "IN", "INDEX", "INNER", "INSERT", "INTERSECT", "INTO", "IS", "JOIN", "KEY", "KEYSET", "KILL", "LEFT", "LIKE", "LINENO", "LIST", "LOCAL", "MERGE", "NATIONAL", "NATIVE_COMPILATION", "NOCHECK", "NONCLUSTERED", "NOT", "NULL", "NULLIF", "OF", "OFF", "OFFSETS", "ON", "OPEN", "OPENDATASOURCE", "OPENQUERY", "OPENROWSET", "OPENXML", "OPTIMISTIC", "OPTION", "OR", "ORDER", "OUTER", "OVER", "OVERLAY", # *future* "PERCENT", "PIVOT", "PLAN", "PRIMARY", "PRINT", "PROC", "PROCEDURE", "PUBLIC", "RAISERROR", "READ_ONLY", "READ", "READTEXT", "RECONFIGURE", "REFERENCES", "REPLICATION", "RESAMPLE", "RESTORE", "RESTRICT", "RETURN", "REVERT", "REVOKE", "RIGHT", "ROLLBACK", "ROWCOUNT", "ROWGUIDCOL", "RULE", "SAVE", "SCHEMA", "SCROLL_LOCKS", "SCROLL", "SELECT", "SEMANTICKEYPHRASETABLE", "SEMANTICSIMILARITYDETAILSTABLE", "SEMANTICSIMILARITYTABLE", "SESSION_USER", "SET", "SETUSER", "SHUTDOWN", "SOME", "STATIC", "STATISTICS", "SYMMETRIC", "SYSTEM_USER", "TABLE", "TABLESAMPLE", "TEXTSIZE", "THEN", "THROW", "TO", "TOP", "TRAN", "TRAN", "TRANSACTION", "TRIGGER", "TRUNCATE", "TRY_CONVERT", "TSEQUAL", "TYPE_WARNING", "UNION", "UNIQUE", "UNPIVOT", "UPDATE", "UPDATETEXT", "USE", "USER", "VALUES", "VARYING", "VIEW", "WAITFOR", "WHEN", "WHERE", "WHILE", "WINDOW", "WITH", "WRITETEXT", ] # Future reserved keywords extracted from the documentation FUTURE_RESERVED_KEYWORDS = [ "ALIAS", "ARRAY", "CLASS", "DESTROY", "END-EXEC", "EVERY", "LIKE_REGEX", ] UNRESERVED_KEYWORDS = [ "ABORT_AFTER_WAIT", "ABORT", "ABSENT", "ACTION", "AFTER", "ALGORITHM", "ALLOW_ENCRYPTED_VALUE_MODIFICATIONS", "ALLOW_PAGE_LOCKS", "ALLOW_ROW_LOCKS", "ALLOWED", "ALWAYS", "ANSI_DEFAULTS", "ANSI_NULL_DFLT_OFF", "ANSI_NULL_DFLT_ON", "ANSI_NULLS", "ANSI_PADDING", "ANSI_WARNINGS", "APPEND_ONLY", "APPLY", "ARITHABORT", "ARITHIGNORE", "ASYMMETRIC", "AT", "ATOMIC", "AUTO_CREATE_TABLE", "AUTO", "BEFORE", # *future* "BERNOULLI", "BINARY", "BLOCK", "BLOCKERS", "BREAK", "CACHE", "CALLED", "CALLER", "CAST", "CATCH", "CERTIFICATE", "CHANGE_TRACKING", "CHECK_EXPIRATION", "CHECK_POLICY", "CODEPAGE", "COLUMN_ENCRYPTION_KEY", "COLUMNSTORE_ARCHIVE", "COLUMNSTORE", "COMMITTED", "COMPRESS_ALL_ROW_GROUPS", "COMPRESSION_DELAY", "COMPRESSION", "CONCAT_NULL_YIELDS_NULL", "CONCAT", "CONNECTION", "CONNECTION_OPTIONS", "CONTAINED", "CONTEXT_INFO", "CONTINUE", "CONTROL", "COPY", "CREDENTIAL", "CURSOR_CLOSE_ON_COMMIT", "CYCLE", "DATA_COMPRESSION", "DATA_CONSISTENCY_CHECK", "DATA_DELETION", "DATA_SOURCE", "DATA", "DATAFILETYPE", "DATASOURCE", "DATE_FORMAT", "DATE", "DATEFIRST", "DATEFORMAT", "DAY", "DAYS", "DEADLOCK_PRIORITY", "DEFAULT_DATABASE", "DEFAULT_LANGUAGE", "DEFAULT_SCHEMA", "DELAY", "DELAYED_DURABILITY", "DELIMITEDTEXT", "DELTA", "DENSE_RANK", "DETERMINISTIC", "DISABLE", "DISK", # listed as reserved but functionally unreserved "DISTRIBUTION", # Azure Synapse Analytics specific "DROP_EXISTING", "DUMP", # listed as reserved but functionally unreserved "DURABILITY", "EDGE", "ELEMENT", # *future* "ELEMENTS", "ENCODING", "ENCRYPTED", "ENCRYPTION_TYPE", "ENCRYPTION", "ERRORFILE_CREDENTIAL", "ERRORFILE_DATA_SOURCE", "ERRORFILE", "EXPAND", "EXPLAIN", # Azure Synapse Analytics specific "EXPLICIT", "EXTERNALPUSHDOWN", "FAST", "FIELD_TERMINATOR", "FIELDQUOTE", "FIELDTERMINATOR", "FILE_FORMAT", "FILE_TYPE", "FILEGROUP", "FILESTREAM_ON", "FILESTREAM", "FILESTREAM", "FILETABLE_COLLATE_FILENAME", "FILETABLE_DIRECTORY", "FILETABLE_FULLPATH_UNIQUE_CONSTRAINT_NAME", "FILETABLE_PRIMARY_KEY_CONSTRAINT_NAME", "FILETABLE_STREAMID_UNIQUE_CONSTRAINT_NAME", "FILTER_COLUMN", "FILTER_PREDICATE", "FILTER", "FIPS_FLAGGER", "FIRE_TRIGGERS", "FIRST_ROW", "FIRST", "FIRSTROW", "FMTONLY", "FOLLOWING", "FORCE", "FORCED", "FORCEPLAN", "FORCESCAN", "FORCESEEK", "FORMAT_OPTIONS", "FORMAT_TYPE", "FORMAT", "FORMATFILE_DATA_SOURCE", "FORMATFILE", "FULLTEXT", "GENERATED", "HASH", "HEAP", # Azure Synapse Analytics specific "HIDDEN", "HIGH", "HINT", "HISTORY_RETENTION_PERIOD", "HISTORY_TABLE", "IGNORE_CONSTRAINTS", "IGNORE_DUP_KEY", "IGNORE_NONCLUSTERED_COLUMNSTORE_INDEX", "IGNORE_TRIGGERS", "IGNORE", "IMPLICIT_TRANSACTIONS", "INBOUND", "INCLUDE_NULL_VALUES", "INCLUDE", "INCREMENT", "INFINITE", "INLINE", "INPUT", "INSTEAD", "INTERVAL", "IO", "ISOLATION", "JSON", "JSON_ARRAY", "JSON_OBJECT", "KEEP", "KEEPDEFAULTS", "KEEPFIXED", "KEEPIDENTITY", "KEEPNULLS", "KILOBYTES_PER_BATCH", "LABEL", # *reserved* keyword in Azure Synapse; but would break TSQL parsing "LANGUAGE", "LAST", "LASTROW", "LEDGER_VIEW", "LEDGER", "LEGACY_CARDINALITY_ESTIMATION", "LEVEL", "LOAD", # listed as reserved but functionally unreserved "LOB_COMPACTION", "LOCATION", "LOCK_TIMEOUT", "LOG", "LOGIN", "LOOP", "LOW", "MANUAL", "MASKED", "MASTER", "MATCHED", "MAX_DURATION", "MAX_GRANT_PERCENT", "MAX", "MAXDOP", "MAXERRORS", "MAXRECURSION", "MAXVALUE", "MEMORY_OPTIMIZED", "MIGRATION_STATE", "MIN_GRANT_PERCENT", "MINUTES", "MINVALUE", "MONTH", "MONTHS", "MUST_CHANGE", "NAME", "NEXT", "NO_PERFORMANCE_SPOOL", "NO", "NOCOUNT", "NODE", "NOEXEC", "NOEXPAND", "NOLOCK", "NONE", "NORMAL", "NOWAIT", "NTILE", "NULLS", "NUMERIC_ROUNDABORT", "OBJECT", "OFFSET", "ONLINE", "ONLY", "OPENJSON", "OPENQUERY", "OPERATION_TYPE_COLUMN_NAME", "OPERATION_TYPE_DESC_COLUMN_NAME", "OPTIMIZE_FOR_SEQUENTIAL_KEY", "OPTIMIZE", "ORC", "OUT", "OUTBOUND", "OUTPUT", "OVERRIDE", "OWNER", "PAD_INDEX", "PAGE", "PAGLOCK", "PARAMETER", "PARAMETERIZATION", "PARAMETERS", # *future* "PARQUET", "PARSEONLY", "PARSER_VERSION", "PARTIAL", # *future* "PARTITION", "PARTITIONS", "PASSWORD", "PATH", "PAUSE", "PAUSED", "PERCENTAGE", "PERCENTILE_CONT", "PERCENTILE_DISC", "PERIOD", "PERSISTED", "POLICY", "POPULATION", "PRECEDING", "PRECISION", # listed as reserved but functionally unreserved "PREDICATE", "PRIOR", "PROFILE", "PROPERTY", "PROVIDER", "PUSHDOWN", "QUERY_GOVERNOR_COST_LIMIT", "QUERYTRACEON", "QUOTED_IDENTIFIER", "R", # sqlcmd command "RANDOMIZED", "RANGE", "RANK", "RAW", "RCFILE", "READCOMMITTED", "READCOMMITTEDLOCK", "READONLY", "READPAST", "READUNCOMMITTED", "REBUILD", "RECEIVE", "RECOMPILE", "RECURSIVE", "REGENERATE", "REGR_AVGX", # *future* "REGR_AVGY", # *future* "REGR_COUNT", # *future* "REGR_INTERCEPT", # *future* "REGR_R2", # *future* "REGR_SLOPE", # *future* "REGR_SXX", # *future* "REGR_SXY", # *future* "REGR_SYY", # *future* "REJECT_SAMPLE_VALUE", "REJECT_TYPE", "REJECT_VALUE", "REJECTED_ROW_LOCATION", "REMOTE_DATA_ARCHIVE", "REMOTE_PROC_TRANSACTIONS", "RENAME", # Azure Synapse Analytics specific "REORGANIZE", "REPEATABLE", "REPEATABLEREAD", "REPLACE", "REPLICATE", # Azure Synapse Analytics "RESPECT", "RESULT", "RESULT_SET_CACHING", # Azure Synapse Analytics specific "RESUMABLE", "RESUME", "RETENTION_PERIOD", "RETURNS", "ROBUST", "ROLE", "ROLLUP", "ROOT", "ROUND_ROBIN", # Azure Synapse Analytics specific "ROW_NUMBER", "ROW", "ROWGUIDCOL", "ROWLOCK", "ROWS_PER_BATCH", "ROWS", "ROWTERMINATOR", "S", "SCALEOUTEXECUTION", "SCHEMA_AND_DATA", "SCHEMA_ONLY", "SCHEMABINDING", "SCHEME", "SCOPED", "SEARCH", "SECRET", "SECURITY", "SECURITYAUDIT", # listed as reserved but functionally unreserved "SELF", "SEQUENCE_NUMBER_COLUMN_NAME", "SEQUENCE_NUMBER", "SEQUENCE", "SERDE_METHOD", "SERIALIZABLE", "SERVER", "SERVICE", "SETERROR", "SETS", "SETVAR", # sqlcmd command "SHOWPLAN_ALL", "SHOWPLAN_TEXT", "SHOWPLAN_XML", "SID", "SINGLE_BLOB", "SINGLE_CLOB", "SINGLE_NCLOB", "SNAPSHOT", "SORT_IN_TEMPDB", "SOURCE", "SPARSE", "SPATIAL_WINDOW_MAX_CELLS", "SPLIT", "START", "STATE", "STATISTICAL_SEMANTICS", "STATISTICS_INCREMENTAL", "STATISTICS_NORECOMPUTE", "STOPLIST", "STRING_AGG", "STRING_DELIMITER", "SWITCH", "SYNONYM", "SYSTEM_TIME", "SYSTEM_VERSIONING", "SYSTEM", "TABLOCK", "TABLOCKX", "TAKE", "TARGET", "TEXTIMAGE_ON", "TIES", "TIME", "TIMEOUT", "TIMESTAMP", "TRANSACTION_ID_COLUMN_NAME", "TRANSACTION_ID", "TRUNCATE_TARGET", # Azure Synapse Analytics specific "TRY", "TYPE", "UNBOUNDED", "UNCOMMITTED", "UNDEFINED", "UNKNOWN", "UPDLOCK", "USE_TYPE_DEFAULT", "USED", "USER_DB", # Azure Synapse Analytics specific, deprecated "USING", "VALUE", "VIEW_METADATA", "WAIT_AT_LOW_PRIORITY", "WAITFOR", "WEEK", "WEEKS", "WHILE", "WINDOWS", "WITHIN", "WITHOUT", "WITHOUT_ARRAY_WRAPPER", "WORK", "XACT_ABORT", "XLOCK", "XML_COMPRESSION", "XML", "XMLAGG", # *future* "XMLATTRIBUTES", # *future* "XMLBINARY", # *future* "XMLCAST", # *future* "XMLCOMMENT", # *future* "XMLCONCAT", # *future* "XMLDATA", "XMLDOCUMENT", # *future* "XMLELEMENT", # *future* "XMLEXISTS", # *future* "XMLFOREST", # *future* "XMLITERATE", # *future* "XMLNAMESPACES", # *future* "XMLPARSE", # *future* "XMLPI", # *future* "XMLQUERY", # *future* "XMLSCHEMA", "XMLSERIALIZE", # *future* "XMLTABLE", # *future* "XMLTEXT", # *future* "XMLVALIDATE", # *future* "XSINIL", "YEAR", "YEARS", "ZONE", ] sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_vertica.py000066400000000000000000002032551503426445100232520ustar00rootroot00000000000000"""The Vertica dialect. https://docs.vertica.com/latest/en/ """ from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( AnyNumberOf, AnySetOf, BaseSegment, Bracketed, BracketedSegment, CodeSegment, CompositeComparisonOperatorSegment, Dedent, Delimited, IdentifierSegment, Indent, KeywordSegment, LiteralSegment, Matchable, MultiStringParser, OneOf, OptionallyBracketed, ParseMode, Ref, RegexLexer, RegexParser, SegmentGenerator, Sequence, StringLexer, StringParser, SymbolSegment, TypedParser, WordSegment, ) from sqlfluff.dialects import dialect_ansi as ansi from sqlfluff.dialects.dialect_vertica_keywords import ( vertica_reserved_keywords, vertica_unreserved_keywords, ) ansi_dialect = load_raw_dialect("ansi") vertica_dialect = ansi_dialect.copy_as( "vertica", formatted_name="Vertica", docstring="""The dialect for `Vertica `_.""", ) vertica_dialect.insert_lexer_matchers( # Allow ::! operator as in # https://docs.vertica.com/latest/en/sql-reference/language-elements/operators/data-type-coercion-operators-cast/cast-failures/ [ StringLexer("null_casting_operator", "::!", CodeSegment), ], before="casting_operator", ) vertica_dialect.insert_lexer_matchers( # Allow <==> operator as in # https://docs.vertica.com/latest/en/sql-reference/language-elements/operators/comparison-operators/ [ StringLexer("null_equals_operator", "<=>", CodeSegment), ], before="less_than", ) vertica_dialect.insert_lexer_matchers( # Allow additional math operators as in # https://docs.vertica.com/latest/en/sql-reference/language-elements/operators/mathematical-operators/ # TODO: add other math operators [ StringLexer("integer_division", "//", CodeSegment), ], before="divide", ) vertica_dialect.insert_lexer_matchers( [ # This is similar to the Unicode regex, the key differences being: # - [eE] - must start with e or E # - The final quote character must be preceded by: # (?", SymbolSegment, type="null_equals_operator" ), IntervalUnitsGrammar=OneOf("YEAR", "MONTH", "DAY", "HOUR", "MINUTE", "SECOND"), InterpolateGrammar=Sequence("INTERPOLATE", OneOf("PREVIOUS", "NEXT"), "VALUE"), IntervalLiteralGrammar=Sequence( Ref("IntervalUnitsGrammar"), Sequence( "TO", Sequence( Ref("IntervalUnitsGrammar"), Ref("BracketedArguments", optional=True), ), optional=True, ), ), IntegerDivideSegment=StringParser("//", SymbolSegment, type="binary_operator"), ) vertica_dialect.replace( FunctionContentsGrammar=AnyNumberOf( Ref("ExpressionSegment"), OptionallyBracketed(Ref("SetExpressionSegment")), # A Cast-like function Sequence( Ref("ExpressionSegment"), "AS", OneOf(Ref("DatatypeSegment"), Ref("DateTimeLiteralGrammar")), ), # Trim function Sequence( Ref("TrimParametersGrammar"), Ref("ExpressionSegment", optional=True, exclude=Ref.keyword("FROM")), "FROM", Ref("ExpressionSegment"), ), # An extract-like or substring-like function # https://www.postgresql.org/docs/current/functions-string.html Sequence( OneOf(Ref("DatetimeUnitSegment"), Ref("ExpressionSegment")), AnySetOf( Sequence("FROM", Ref("ExpressionSegment")), Sequence("FOR", Ref("ExpressionSegment")), optional=True, ), ), Sequence( # Allow an optional distinct keyword here. Ref.keyword("DISTINCT", optional=True), OneOf( # Most functions will be using the delimited route # but for COUNT(*) or similar we allow the star segment # here. Ref("StarSegment"), Delimited(Ref("FunctionContentsExpressionGrammar")), ), ), Ref( "AggregateOrderByClause" ), # used by string_agg (postgres), group_concat (exasol),listagg (snowflake).. Sequence(Ref.keyword("SEPARATOR"), Ref("LiteralGrammar")), # like a function call: POSITION ( 'QL' IN 'SQL') Sequence( Ref("DatatypeSegment", optional=True), OneOf( Ref("QuotedLiteralSegment"), Ref("SingleIdentifierGrammar"), Ref("ColumnReferenceSegment"), ), "IN", Ref("DatatypeSegment", optional=True), OneOf( Ref("QuotedLiteralSegment"), Ref("SingleIdentifierGrammar"), Ref("ColumnReferenceSegment"), ), ), # used by listagg, explode, unnest Sequence( Ref.keyword("DISTINCT", optional=True), OneOf( Ref("QuotedLiteralSegment"), Ref("ColumnReferenceSegment"), Ref("ExpressionSegment"), ), Sequence( "USING", "PARAMETERS", Delimited( Sequence( Ref("ParameterNameSegment"), Ref("EqualsSegment"), OneOf( Ref("QuotedLiteralSegment"), Ref("BooleanLiteralGrammar"), Ref("NumericLiteralSegment"), ), ), ), ), Ref("OverClauseSegment", optional=True), ), Ref("IgnoreRespectNullsGrammar"), Ref("EmptyStructLiteralSegment"), ), ObjectReferenceTerminatorGrammar=OneOf( "ON", "AS", "USING", Ref("CommaSegment"), Ref("CastOperatorSegment"), Ref("NullCastOperatorSegment"), Ref("StartSquareBracketSegment"), Ref("StartBracketSegment"), Ref("BinaryOperatorGrammar"), Ref("ColonSegment"), Ref("DelimiterGrammar"), Ref("JoinLikeClauseGrammar"), BracketedSegment, ), PostFunctionGrammar=AnySetOf( # Optional OVER suffix for window functions. # This is supported in bigquery & postgres (and its derivatives) # and so is included here for now. Ref("OverClauseSegment"), # Filter clause supported by both Postgres and SQLite Ref("FilterClauseGrammar"), # Within group clause supported by some analytic functions in Vertica Ref("WithinGroupClauseSegment"), ), DateTimeLiteralGrammar=Sequence( # analog of postgres dialect but with treating expressions like # as interval hour TO SECOND(6) OneOf("DATE", "TIME", "TIMESTAMP", "INTERVAL"), TypedParser( "single_quote", LiteralSegment, type="date_constructor_literal", optional=True, ), Ref("IntervalLiteralGrammar", optional=True), ), Expression_A_Grammar=Sequence( # It's a copy of ansi Expression_A_Grammar Ref("Tail_Recurse_Expression_A_Grammar"), AnyNumberOf( OneOf( Ref("LikeExpressionGrammar"), Sequence( Ref("BinaryOperatorGrammar"), Ref("Tail_Recurse_Expression_A_Grammar"), ), Ref("InOperatorGrammar"), Sequence( "IS", Ref.keyword("NOT", optional=True), Ref("IsClauseGrammar"), ), Ref("IsNullGrammar"), Sequence( Sequence( Ref("InterpolateGrammar"), ), Ref("Expression_A_Grammar"), ), Ref("NotNullGrammar"), Ref("CollateGrammar"), Sequence( Ref.keyword("NOT", optional=True), "BETWEEN", Ref("Expression_B_Grammar"), "AND", Ref("Tail_Recurse_Expression_A_Grammar"), ), ) ), ), ComparisonOperatorGrammar=OneOf( Ref("EqualsSegment"), Ref("NullEqualsSegment"), Ref("GreaterThanSegment"), Ref("LessThanSegment"), Ref("GreaterThanOrEqualToSegment"), Ref("LessThanOrEqualToSegment"), Ref("NotEqualToSegment"), Ref("LikeOperatorSegment"), Ref("IsDistinctFromGrammar"), ), JoinTypeKeywordsGrammar=OneOf( "ANTI", "SEMIALL", "SEMI", Sequence("NULLAWARE", "ANTI"), "INNER", Sequence( OneOf( "FULL", "LEFT", "RIGHT", ), Ref.keyword("OUTER", optional=True), ), Sequence( "NATURAL", OneOf( "INNER", Sequence(OneOf("RIGHT", "LEFT", "FULL"), "OUTER"), ), ), ), ArithmeticBinaryOperatorGrammar=OneOf( Ref("PlusSegment"), Ref("MinusSegment"), Ref("DivideSegment"), Ref("IntegerDivideSegment"), Ref("MultiplySegment"), Ref("ModuloSegment"), Ref("BitwiseAndSegment"), Ref("BitwiseOrSegment"), Ref("BitwiseXorSegment"), Ref("BitwiseLShiftSegment"), Ref("BitwiseRShiftSegment"), ), # Vertica supports the non-standard ISNULL and NONNULL comparison operators. See # https://docs.vertica.com/latest/en/sql-reference/language-elements/operators/null-operators/ IsNullGrammar=Ref.keyword("ISNULL"), NotNullGrammar=Ref.keyword("NOTNULL"), QuotedLiteralSegment=OneOf( Sequence( TypedParser( "single_quote", LiteralSegment, type="quoted_literal", ), ), # Support Extended string literals Sequence( TypedParser( "escaped_single_quote", LiteralSegment, type="quoted_literal", ), ), ), QuotedIdentifierSegment=TypedParser( "double_quote", IdentifierSegment, type="quoted_identifier", casefold=str.upper ), NakedIdentifierSegment=SegmentGenerator( # Generate the anti template from the set of reserved keywords lambda dialect: RegexParser( # https://docs.vertica.com/24.3.x/en/sql-reference/language-elements/identifiers/ # Unquoted SQL identifiers must begin with one of the following: # * Non-Unicode letters: A–Z or a-z # -- /actually Vertica accepts also non-ASCII UTF-8 Unicode # characters here, which is not well documented/ # * Underscore (_) # Subsequent characters in an identifier can be any combination of # the following: # * Non-Unicode letters: A–Z or a-z # * Underscore (_) # * Digits(0–9) # * Unicode letters (letters with diacriticals or not in the Latin # alphabet), unsupported for model names # * Dollar sign ($), unsupported for model names # # Vertica accepts **non-ASCII UTF-8 Unicode characters** for table # names, column names, and other identifiers, # extending the cases where upper/lower case distinctions are # ignored (case-folded) to all alphabets, # including Latin, Cyrillic, and Greek. # \p{L} matches any kind of letter from any language; # \p{N} matches any kind of numeric character in any script r"[\p{L}_][\p{L}\p{N}$_]*", IdentifierSegment, type="naked_identifier", anti_template=r"^(" + r"|".join(dialect.sets("reserved_keywords")) + r")$", casefold=str.upper, ) ), ParameterNameSegment=RegexParser( # need to cover cases where non-ascii word is parameter # like ```ALTER TABLE some_table TO utf8_identifier_eg_Verkäufer;``` r"[\p{L}_][\p{L}\p{N}$_]*", CodeSegment, type="parameter", ), ) class ShorthandCastSegment(ansi.ShorthandCastSegment): """A casting operation using '::' or '::!'.""" match_grammar: Matchable = Sequence( OneOf( Ref("Expression_D_Grammar"), Ref("CaseExpressionSegment"), ), AnyNumberOf( Sequence( OneOf(Ref("CastOperatorSegment"), Ref("NullCastOperatorSegment")), Ref("DatatypeSegment"), Ref("TimeZoneGrammar", optional=True), allow_gaps=True, ), min_times=1, ), ) class StatementSegment(ansi.StatementSegment): """A generic segment, to any of its child subsegments.""" match_grammar = ansi.StatementSegment.match_grammar.copy( insert=[ Ref("CreateExternalTableSegment"), Ref("CreateTableLikeStatementSegment"), Ref("CreateTableAsStatementSegment"), Ref("CreateProjectionStatementSegment"), Ref("AlterDefaultPrivilegesGrantSegment"), Ref("DropProjectionStatementSegment"), Ref("AlterViewStatementSegment"), Ref("SetStatementSegment"), Ref("CommentOnStatementSegment"), Ref("TransactionalStatements"), Ref("AlterSessionStatements"), Ref("CopyStatementSegment"), Ref("AlterSchemaStatementSegment"), ], ) class ArrayTypeSegment(ansi.ArrayTypeSegment): """Prefix for array literals specifying the type.""" type = "array_type" match_grammar = Sequence( Ref.keyword("ARRAY"), Bracketed( Ref("DatatypeSegment"), bracket_type="square", bracket_pairs_set="bracket_pairs", optional=True, ), ) class LimitClauseSegment(ansi.LimitClauseSegment): """A vertica `LIMIT` clause. https://docs.vertica.com/latest/en/sql-reference/statements/select/limit-clause/ """ match_grammar: Matchable = Sequence( "LIMIT", Indent, OptionallyBracketed( OneOf( # Allow a number by itself OR Ref("NumericLiteralSegment"), # An arbitrary expression Ref("ExpressionSegment"), "ALL", ) ), OneOf( Sequence( "OFFSET", OneOf( # Allow a number by itself OR Ref("NumericLiteralSegment"), # An arbitrary expression Ref("ExpressionSegment"), ), ), Sequence( Ref("CommaSegment"), Ref("NumericLiteralSegment"), ), Ref("OverClauseSegment"), optional=True, ), Dedent, ) class ColumnEncodingSegment(BaseSegment): """The `ENCODING` clause within a `CREATE TABLE` statement for a column.""" type = "column_encoding" match_grammar: Matchable = Sequence( "ENCODING", Ref("EncodingType"), ) class TableConstraintSegment(ansi.TableConstraintSegment): """A table constraint, e.g. for CREATE TABLE. As specified in https://docs.vertica.com/latest/en/sql-reference/statements/create-statements/create-table/table-constraint/ """ match_grammar = Sequence( Sequence( # [ CONSTRAINT ] "CONSTRAINT", Ref("ObjectReferenceSegment"), optional=True ), OneOf( Sequence( # PRIMARY KEY (column[,...]) [ ENABLED | DISABLED] Ref("PrimaryKeyGrammar"), # Columns making up PRIMARY KEY constraint Ref("BracketedColumnReferenceListGrammar"), OneOf("ENABLED", "DISABLED", optional=True), ), Sequence( "CHECK", Bracketed(Ref("ExpressionSegment")), OneOf("ENABLED", "DISABLED", optional=True), ), Sequence( # UNIQUE (column[,...]) [ENABLED | DISABLED] "UNIQUE", Ref("BracketedColumnReferenceListGrammar"), OneOf("ENABLED", "DISABLED", optional=True), ), Sequence( # FOREIGN KEY ( column_name [, ... ] ) # REFERENCES reftable [ ( refcolumn [, ... ] ) ] "FOREIGN", "KEY", # Local columns making up FOREIGN KEY constraint Ref("BracketedColumnReferenceListGrammar"), Ref( "ReferenceDefinitionGrammar" ), # REFERENCES reftable [ ( refcolumn) ] ), ), ) class LikeOptionSegment(BaseSegment): """Like Option Segment. As specified in https://docs.vertica.com/latest/en/admin/working-with-native-tables/creating-table-from-other-tables/replicating-table/ """ type = "like_option_segment" match_grammar = Sequence( OneOf( Sequence(OneOf("INCLUDING", "EXCLUDING"), "PROJECTIONS"), Ref("SchemaPrivilegesSegment"), ), ) class DiskQuotaSegment(BaseSegment): """Disk Quota Segment. https://docs.vertica.com/latest/en/admin/working-with-native-tables/disk-quotas/ Available from Vertica 12.x """ type = "disk_quota_segment" match_grammar = Sequence("DISK_QUOTA", Ref("QuotedLiteralSegment")) class KsafeSegment(BaseSegment): """Ksafe Segment. https://docs.vertica.com/latest/en/sql-reference/statements/create-statements/create-table/ https://docs.vertica.com/latest/en/architecture/enterprise-concepts/k-safety-an-enterprise-db/ """ type = "ksafe_segment" match_grammar = Sequence( "KSAFE", Ref("NumericLiteralSegment", optional=True), ) class SchemaPrivilegesSegment(BaseSegment): """Schema Privileges Segment. https://docs.vertica.com/latest/en/sql-reference/statements/create-statements/create-table/ """ type = "schema_privileges_segment" match_grammar: Matchable = Sequence( # MATERIALIZE available only in ALTER TABLE statement, # but we keep it here to not duplicate the code OneOf("INCLUDE", "EXCLUDE", "MATERIALIZE"), Ref.keyword("SCHEMA", optional=True), "PRIVILEGES", ) class SegmentedByClauseSegment(BaseSegment): """A `SEGMENTED BY` or `UNSEGMENTED` clause. As specified in https://docs.vertica.com/latest/en/sql-reference/statements/ create-statements/create-projection/hash-segmentation-clause/ Vertica allows different expressions in segmented by clause, but using hash function is recommended one As specified in https://docs.vertica.com/latest/en/sql-reference/statements/ create-statements/create-projection/unsegmented-clause/ """ type = "segmentedby_clause" match_grammar: Matchable = Sequence( OneOf( Sequence("UNSEGMENTED", "ALL", "NODES"), Sequence( "SEGMENTED", "BY", OneOf( Ref("FunctionSegment"), Bracketed( Delimited( Sequence( OneOf( Ref("ColumnReferenceSegment"), Ref("NumericLiteralSegment"), Ref("ExpressionSegment"), Ref("ShorthandCastSegment"), ), ), ), ), ), "ALL", "NODES", ), ), ) class PartitionByClauseSegment(BaseSegment): """A `PARTITION BY` clause. As specified in https://docs.vertica.com/latest/en/sql-reference/statements/create-statements/create-table/partition-clause/ """ type = "partitionby_clause" match_grammar: Matchable = Sequence( "PARTITION", "BY", AnyNumberOf( Delimited( Sequence( AnyNumberOf( Ref("ColumnReferenceSegment"), Ref("ExpressionSegment"), Ref("FunctionSegment"), Ref("ShorthandCastSegment"), ), ), ), Bracketed( Delimited( Sequence( AnyNumberOf( Ref("ColumnReferenceSegment"), Ref("FunctionSegment"), Ref("ShorthandCastSegment"), ), ), ), ), terminators=[Sequence("GROUP", "BY"), "REORGANIZE"], ), Ref("GroupByClauseSegment", optional=True), Ref.keyword("REORGANIZE", optional=True), ) class CreateTableStatementSegment(ansi.CreateTableStatementSegment): """A `CREATE TABLE` statement. https://docs.vertica.com/latest/en/sql-reference/statements/create-statements/create-table/ """ match_grammar = Sequence( "CREATE", OneOf( Sequence( OneOf("GLOBAL", "LOCAL", optional=True), Ref("TemporaryGrammar", optional=True), ), optional=True, ), "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), Sequence( Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), Ref("DatatypeSegment"), AnyNumberOf( Ref("ColumnConstraintSegment"), Ref("ColumnEncodingSegment"), Sequence( "ACCESSRANK", Ref("IntegerSegment"), optional=True ), ), ), Ref("TableConstraintSegment"), ), ), ), AnySetOf( Ref("OrderByClauseSegment"), Ref("SegmentedByClauseSegment"), Ref("KsafeSegment"), Ref("SchemaPrivilegesSegment"), Ref("DiskQuotaSegment"), Ref("PartitionByClauseSegment"), ), AnySetOf( # these options are available only for temp table, so it's kind of a hack Sequence("ON", "COMMIT", OneOf("DELETE", "PRESERVE"), "ROWS"), Sequence("NO", "PROJECTION"), ), ) class CreateTableAsStatementSegment(BaseSegment): """A `CREATE TABLE AS` statement. As specified in https://docs.vertica.com/latest/en/admin/working-with-native-tables/creating-table-from-other-tables/creating-table-from-query/ """ type = "create_table_as_statement" match_grammar = Sequence( "CREATE", OneOf( Sequence( OneOf("GLOBAL", "LOCAL", optional=True), Ref("TemporaryGrammar", optional=True), ), optional=True, ), "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), AnySetOf( # these options are available only for temp table, so it's kind of a hack Sequence("ON", "COMMIT", OneOf("DELETE", "PRESERVE"), "ROWS"), Sequence("NO", "PROJECTION"), ), AnyNumberOf( Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), Ref("ColumnEncodingSegment", optional=True), Sequence("ACCESSRANK", Ref("IntegerSegment"), optional=True), # TODO: need to add GROUPED clause # https://docs.vertica.com/latest/en/sql-reference/statements/ # create-statements/create-projection/grouped-clause/ ), ), optional=True, ), Ref("SchemaPrivilegesSegment", optional=True), ), "AS", # TODO: need to add LABEL clause # https://docs.vertica.com/latest/en/admin/ # working-with-native-tables/creating-table-from-other-tables/creating-table-from-query/ Sequence( "AT", OneOf("LATEST", Ref("NumericLiteralSegment"), Ref("DatetimeUnitSegment")), optional=True, ), Ref( "SelectableGrammar", terminators=[Ref("SegmentedByClauseSegment"), Ref("OrderByClauseSegment")], ), Ref("OrderByClauseSegment", optional=True), Ref("SegmentedByClauseSegment", optional=True), ) class CreateTableLikeStatementSegment(BaseSegment): """A `CREATE TABLE LIKE` statement. As specified in https://docs.vertica.com/latest/en/admin/working-with-native-tables/creating-table-from-other-tables/replicating-table/ """ type = "create_table_like_statement" match_grammar = Sequence( "CREATE", "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), Sequence( "LIKE", Ref("TableReferenceSegment"), AnyNumberOf(Ref("LikeOptionSegment"), optional=True), ), Ref("DiskQuotaSegment", optional=True), ) class CopyOptionsForColumnsSegment(BaseSegment): """A vertica options for columns in COPY. https://docs.vertica.com/latest/en/sql-reference/statements/copy/ """ type = "copy_options_for_columns" match_grammar = Sequence( AnySetOf( Sequence( "DELIMITER", Sequence("AS", optional=True), Ref("QuotedLiteralSegment") ), Sequence( "ENCLOSED", Sequence("BY", optional=True), Ref("QuotedLiteralSegment") ), "ENFORCELENGTH", OneOf( Sequence( "ESCAPE", Sequence("AS", optional=True), Ref("QuotedLiteralSegment") ), Sequence("NO", "ESCAPE"), ), Sequence("FILLER", Ref("DatatypeSegment")), Sequence("FORMAT", Ref("QuotedLiteralSegment")), Sequence( "NULL", Sequence("AS", optional=True), Ref("QuotedLiteralSegment") ), Sequence("TRIM", Ref("QuotedLiteralSegment")), ), ) class CopyColumnOptionsSegment(BaseSegment): """A vertica column description in COPY. https://docs.vertica.com/latest/en/sql-reference/statements/copy/ """ type = "copy_column_options" match_grammar = Sequence( Ref("ColumnReferenceSegment"), Ref("CopyOptionsForColumnsSegment", optional=True), ) class CopyOptionsSegment(BaseSegment): """A vertica options for COPY. https://docs.vertica.com/latest/en/sql-reference/statements/copy/ """ type = "copy_options" match_grammar = Sequence( AnyNumberOf( # TODO: add WITH FILTER, WITH PARSER, and on nodename support Sequence("ABORT", "ON", "ERROR"), Sequence("ERROR", "TOLERANCE"), Sequence("EXCEPTION", Ref("QuotedLiteralSegment")), Sequence("RECORD", "TERMINATOR", Ref("QuotedLiteralSegment")), Sequence("REJECTED", "DATA", Ref("QuotedLiteralSegment")), Sequence("REJECTMAX", Ref("IntegerSegment")), Sequence("SKIP", Ref("IntegerSegment")), Sequence("SKIP", "BYTES", Ref("IntegerSegment")), Sequence("TRAILING", "NULLCOLS"), Ref("CopyOptionsForColumnsSegment", optional=True), ), ) class CreateExternalTableSegment(BaseSegment): """A vertica `CREATE EXTERNAL TABLE` statement. https://docs.vertica.com/latest/en/sql-reference/statements/create-statements/create-external-table-as-copy/ """ type = "create_external_table_statement" match_grammar = Sequence( "CREATE", "EXTERNAL", "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), # Columns: Sequence( Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), Ref("DatatypeSegment"), AnyNumberOf( Ref("ColumnConstraintSegment"), Ref("ColumnEncodingSegment"), Sequence( "ACCESSRANK", Ref("IntegerSegment"), optional=True ), ), ), Ref("TableConstraintSegment"), ), ), ), Ref("SchemaPrivilegesSegment", optional=True), "AS", "COPY", OneOf( Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), Ref("CopyColumnOptionsSegment", optional=True), ), ), ), Sequence( "COLUMN", "OPTION", Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), Ref("CopyColumnOptionsSegment", optional=True), ), ), ), ), optional=True, ), "FROM", Ref("QuotedLiteralSegment"), OneOf("NATIVE", Sequence("NATIVE", "VARCHAR"), "ORC", "PARQUET", optional=True), Ref("CopyOptionsSegment", optional=True), ) class GroupByClauseSegment(BaseSegment): """A `GROUP BY` clause like in `SELECT`.""" type = "groupby_clause" match_grammar: Matchable = Sequence( "GROUP", "BY", Indent, Sequence( Delimited( OneOf( Ref("ColumnReferenceSegment"), # Can `GROUP BY 1` Ref("NumericLiteralSegment"), # Can `GROUP BY coalesce(col, 1)` Ref("ExpressionSegment"), ), terminators=[Ref("GroupByClauseTerminatorGrammar")], ), ), Dedent, ) class CreateProjectionStatementSegment(BaseSegment): """A `CREATE PROJECTION` statement. As specified in https://docs.vertica.com/latest/en/sql-reference/statements/ create-statements/create-projection/standard-projection/ """ type = "create_projection_statement" match_grammar = Sequence( "CREATE", "PROJECTION", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), Ref("ColumnEncodingSegment", optional=True), Sequence("ACCESSRANK", Ref("IntegerSegment"), optional=True), # TODO: need to add GROUPED clause # https://docs.vertica.com/latest/en/sql-reference/statements/ # create-statements/create-projection/grouped-clause/ ), ), optional=True, ), "AS", Ref( "SelectableGrammar", terminators=[ Ref("SegmentedByClauseSegment"), Ref("OrderByClauseSegment"), Ref("LimitClauseSegment"), Ref("GroupByClauseSegment"), "ON", ], ), OneOf( # TODO: add udtf projection type AnyNumberOf( Ref("OrderByClauseSegment"), Ref("SegmentedByClauseSegment"), Sequence( "ON", "PARTITION", "RANGE", "BETWEEN", Ref("QuotedLiteralSegment"), "AND", Ref("QuotedLiteralSegment"), ), ), Ref("GroupByClauseSegment"), Ref("LimitClauseSegment"), ), Ref("KsafeSegment", optional=True), ) class AlterTableStatementSegment(ansi.AlterTableStatementSegment): """An `ALTER TABLE` statement. https://docs.vertica.com/latest/en/sql-reference/statements/alter-statements/alter-table/ """ match_grammar = Sequence( "ALTER", "TABLE", Delimited(Ref("TableReferenceSegment")), OneOf( Sequence( Delimited(Ref("AlterTableActionSegment")), ), Sequence( "ADD", Ref.keyword("COLUMN"), Ref("IfNotExistsGrammar", optional=True), Ref("ColumnReferenceSegment"), Ref("DatatypeSegment"), AnyNumberOf(Ref("ColumnConstraintSegment")), Ref("ColumnEncodingSegment", optional=True), OneOf( Sequence( "PROJECTIONS", Bracketed(Delimited(Ref("TableReferenceSegment"))), ), Sequence("ALL", "PROJECTIONS"), optional=True, ), Ref("ColumnConstraintSegment", optional=True), ), Sequence( "ALTER", Ref.keyword("COLUMN"), Ref("ColumnReferenceSegment"), OneOf( Sequence( Ref("ColumnEncodingSegment"), "PROJECTIONS", Bracketed(Delimited(Ref("TableReferenceSegment"))), ), Sequence("SET", Ref("ColumnSetSegment")), Sequence("SET", "NOT", "NULL"), Sequence("SET", "DATA", "TYPE", Ref("DatatypeSegment")), Sequence( "DROP", OneOf( "DEFAULT", Sequence("SET", "USING"), Sequence("DEFAULT", "USING"), Sequence("NOT", "NULL"), ), ), ), ), Sequence( "DROP", "CONSTRAINT", Ref("ParameterNameSegment"), OneOf("CASCADE", "RESTRICT", optional=True), ), Sequence( "DROP", Ref.keyword("COLUMN", optional=True), Ref("IfExistsGrammar", optional=True), Ref("ColumnReferenceSegment"), Ref("DropBehaviorGrammar", optional=True), ), Ref("PartitionByClauseSegment"), Sequence("REMOVE", "PARTITIONING"), Sequence( "RENAME", Ref.keyword("COLUMN", optional=True), Ref("ColumnReferenceSegment"), "TO", Ref("ColumnReferenceSegment"), ), Sequence( "RENAME", "TO", Delimited(Ref("TableReferenceSegment")), ), "REORGANIZE", Sequence("SET", "SCHEMA", Ref("SchemaReferenceSegment")), ), ) class AlterTableActionSegment(BaseSegment): """Alter Table Action Segment. https://docs.vertica.com/latest/en/sql-reference/statements/alter-statements/alter-table/ """ type = "alter_table_action_segment" match_grammar = OneOf( Sequence("ADD", Ref("TableConstraintSegment")), Sequence( "ALTER", "CONSTRAINT", Ref("ParameterNameSegment"), OneOf("ENABLED", "DISABLED"), ), Ref("DiskQuotaSegment"), Sequence("FORCE", "OUTER", Ref("IntegerSegment")), Ref("SchemaPrivilegesSegment"), Sequence( "OWNER", "TO", Ref("ParameterNameSegment"), ), Sequence( "SET", OneOf( Sequence( "ActivePartitionCount", OneOf(Ref("IntegerSegment"), "DEFAULT") ), Sequence("IMMUTABLE", "ROWS"), Sequence("MERGEOUT", OneOf("1", "2")), ), ), ) class AlterDefaultPrivilegesObjectPrivilegesSegment(BaseSegment): """`ALTER DEFAULT PRIVILEGES` object privileges. https://docs.vertica.com/latest/en/sql-reference/statements/grant-statements/grant-table/ """ type = "alter_default_privileges_object_privilege" match_grammar = OneOf( Sequence( "ALL", Ref.keyword("PRIVILEGES", optional=True), Ref.keyword("EXTEND", optional=True), ), Delimited( "SELECT", "INSERT", "UPDATE", "DELETE", "REFERENCES", "TRUNCATE", "ALTER", "DROP", terminators=["ON"], ), ) class AlterDefaultPrivilegesGrantSegment(BaseSegment): """`GRANT` for `ALTER DEFAULT PRIVILEGES`. https://docs.vertica.com/latest/en/sql-reference/statements/grant-statements/grant-table/ """ type = "alter_default_privileges_grant" match_grammar = Sequence( "GRANT", Ref("AlterDefaultPrivilegesObjectPrivilegesSegment"), "ON", OneOf( Delimited( Sequence( Ref.keyword("TABLE", optional=True), Ref("TableReferenceSegment") ) ), Delimited( Sequence( "ALL", "TABLES", "IN", "SCHEMA", Ref("SchemaReferenceSegment") ), ), terminators=["WITH"], ), "TO", Delimited( Ref("RoleReferenceSegment"), terminators=["WITH"], ), Sequence("WITH", "GRANT", "OPTION", optional=True), ) class ColumnConstraintSegment(ansi.ColumnConstraintSegment): """A column option; each CREATE TABLE column can have 0 or more. https://docs.vertica.com/latest/en/sql-reference/statements/create-statements/create-table/column-constraint/ """ match_grammar = Sequence( # TODO: add auto increment OneOf( Sequence( Sequence( "CONSTRAINT", Ref("ObjectReferenceSegment"), # Constraint name optional=True, ), OneOf( Sequence( Ref.keyword("NOT", optional=True), "NULL" ), # NOT NULL or NULL Sequence( "CHECK", Bracketed(Ref("ExpressionSegment")), OneOf("ENABLED", "DISABLED", optional=True), ), Sequence( "UNIQUE", OneOf("ENABLED", "DISABLED", optional=True), ), Sequence( "PRIMARY", "KEY", OneOf("ENABLED", "DISABLED", optional=True), ), # REFERENCES reftable [ ( refcolumn) ] Ref("ReferenceDefinitionGrammar"), ), ), Ref("ColumnSetSegment"), ), ) class ColumnSetSegment(BaseSegment): """A SET DEFAULT | USING | DEFAULT USING. https://docs.vertica.com/latest/en/sql-reference/statements/alter-statements/alter-table/ """ type = "column_set_segment" match_grammar = Sequence( # DEFAULT OneOf( "DEFAULT", # Depends on the place where we use it (in alter table or create table) # we need or don't need set keyword Sequence(Ref.keyword("SET", optional=True), "USING"), Sequence("DEFAULT", "USING"), ), OneOf( Ref("ShorthandCastSegment"), Ref("LiteralGrammar"), Ref("FunctionSegment"), Ref("BareFunctionSegment"), Ref("ExpressionSegment"), Bracketed(Ref("SelectableGrammar")), ), ) class DropProjectionStatementSegment(BaseSegment): """A `DROP PROJECTION` statement. https://docs.vertica.com/latest/en/sql-reference/statements/drop-statements/drop-projection/ """ type = "drop_projection_statement" match_grammar: Matchable = Sequence( "DROP", "PROJECTION", Ref("IfExistsGrammar", optional=True), Delimited(Ref("TableReferenceSegment")), Ref("DropBehaviorGrammar", optional=True), ) class AlterViewStatementSegment(BaseSegment): """A `ALTER VIEW` statement. https://docs.vertica.com/latest/en/sql-reference/statements/alter-statements/alter-view/ """ type = "alter_view_statement" match_grammar: Matchable = Sequence( "ALTER", "VIEW", Delimited(Ref("TableReferenceSegment")), AnyNumberOf( Sequence("OWNER", "TO", Ref("ParameterNameSegment")), Sequence("SET", "SCHEMA", Ref("SchemaReferenceSegment")), Ref("SchemaPrivilegesSegment"), Sequence("RENAME", "TO", Delimited(Ref("ParameterNameSegment"))), ), ) class SetStatementSegment(BaseSegment): """Set Statement. https://docs.vertica.com/latest/en/sql-reference/statements/set-statements/ """ type = "set_statement" match_grammar = Sequence( "SET", OneOf( Sequence( OneOf( "DATESTYLE", "ESCAPE_STRING_WARNING", "INTERVALSTYLE", "LOCALE", "STANDARD_CONFORMING_STRINGS", ), "TO", Ref("ParameterNameSegment"), ), Sequence( "SEARCH_PATH", OneOf("TO", Ref("EqualsSegment")), OneOf(Delimited(Ref("ParameterNameSegment")), "DEFAULT"), ), Sequence( "ROLE", OneOf( "NONE", "DEFAULT", Sequence( "ALL", Sequence( "EXCEPT", Delimited(Ref("ParameterNameSegment")), optional=True, ), ), Delimited(Ref("ParameterNameSegment")), ), ), Sequence( "TIME", "ZONE", Ref.keyword("TO", optional=True), OneOf(Ref("ParameterNameSegment"), Ref("QuotedLiteralSegment")), ), Sequence( Ref.keyword("SESSION", optional=True), "RESOURCE_POOL", Ref("EqualsSegment"), OneOf( Ref("QuotedLiteralSegment"), Ref("ParameterNameSegment"), "DEFAULT" ), ), Sequence( "SESSION", OneOf( Sequence( "AUTHORIZATION", OneOf(Ref("ParameterNameSegment"), "DEFAULT") ), Sequence("AUTOCOMMIT", "TO", OneOf("ON", "OFF")), Sequence( "CHARACTERISTICS", "AS", "TRANSACTION", Ref("ParameterNameSegment"), ), Sequence( OneOf("GRACEPERIOD", "IDLESESSIONTIMEOUT", "RUNTIMECAP"), OneOf( "NONE", Sequence(Ref("EqualsSegment"), "DEFAULT"), Ref("QuotedLiteralSegment"), ), ), Sequence( OneOf("MEMORYCAP", "TEMPSPACECAP"), OneOf( "NONE", Sequence(Ref("EqualsSegment"), "DEFAULT"), Ref("QuotedLiteralSegment"), ), ), Sequence("MULTIPLEACTIVERESULTSETS", "TO", OneOf("ON", "OFF")), Sequence( "WORKLOAD", Ref.keyword("TO", optional=True), OneOf(Ref("ParameterNameSegment"), "DEFAULT", "NONE"), ), ), ), ), ) class CommentOnStatementSegment(BaseSegment): """`COMMENT ON` statement. https://www.postgresql.org/docs/13/sql-comment.html """ type = "comment_clause" match_grammar = Sequence( "COMMENT", "ON", Sequence( OneOf( Sequence( OneOf( "TABLE", "VIEW", "PROJECTION", ), Ref("TableReferenceSegment"), ), Sequence( "COLUMN", Ref("ColumnReferenceSegment"), ), Sequence( "CONSTRAINT", Ref("ObjectReferenceSegment"), Sequence( "ON", Ref("ObjectReferenceSegment"), ), ), Sequence( OneOf("AGGREGATE", "ANALYTIC", "TRANSFORM", optional=True), "FUNCTION", Ref("FunctionNameSegment"), Sequence(Ref("FunctionParameterListGrammar"), optional=True), ), Sequence( "SCHEMA", Ref("SchemaReferenceSegment"), ), Sequence( "NODE", Ref("ParameterNameSegment"), ), Sequence( OneOf("SEQUENCE", "LIBRARY"), Ref("ObjectReferenceSegment"), ), ), Sequence("IS", OneOf(Ref("QuotedLiteralSegment"), "NULL")), ), ) class TransactionalStatements(BaseSegment): """DML commands wrapped by BEGIN and END. As in https://docs.vertica.com/latest/en/sql-reference/statements/begin/ https://docs.vertica.com/latest/en/sql-reference/statements/end/ """ type = "transactional_statement" match_grammar: Matchable = Sequence( # TODO add rollback, commit logic and optional keywords "BEGIN", AnyNumberOf( Sequence( Ref("InsertStatementSegment"), Ref("SemicolonSegment"), ), Sequence( Ref("UpdateStatementSegment"), Ref("SemicolonSegment"), ), Sequence( Ref("DeleteStatementSegment"), Ref("SemicolonSegment"), ), Sequence( Ref("SelectStatementSegment"), Ref("SemicolonSegment"), ), ), "END", ) class DatatypeSegment(ansi.DatatypeSegment): """A data type segment.""" match_grammar: Matchable = Sequence( OneOf( # Date / Datetime Ref("TimeWithTZGrammar"), "DATE", "DATETIME", "SMALLDATETIME", Sequence("INTERVAL", Ref("IntervalLiteralGrammar", optional=True)), # Approximate Numeric Sequence( "DOUBLE", "PRECISION", ), Sequence("FLOAT", Ref("BracketedArguments", optional=True)), "FLOAT8", "REAL", # Exact Numeric "INTEGER", "INT", "BIGINT", "INT8", "SMALLINT", "TINYINT", Sequence( OneOf("DECIMAL", "NUMERIC", "NUMBER", "MONEY"), Ref("BracketedArguments", optional=True), ), # Spatial Sequence( OneOf("GEOMETRY", "GEOGRAPHY"), Bracketed(Ref("NumericLiteralSegment"), optional=True), ), # UUID "UUID", # Text Sequence( Ref.keyword("LONG", optional=True), "VARCHAR", Ref("BracketedArguments", optional=True), ), Sequence("CHAR", Ref("BracketedArguments", optional=True)), # Binary types OneOf( "BINARY", Sequence(Ref.keyword("LONG", optional=True), "VARBINARY"), "BYTEA", "RAW", ), "BOOLEAN", # array types OneOf( # TODO: need to add an opportunity to specify size of array AnyNumberOf( Bracketed( Ref("ExpressionSegment", optional=True), bracket_type="square" ) ), Ref("ArrayTypeSegment"), Ref("SizedArrayTypeSegment"), ), # TODO: add row data type support Sequence( OneOf( Sequence( OneOf("CHARACTER", "BINARY"), OneOf("VARYING", Sequence("LARGE", "OBJECT")), ), Sequence( # Some dialects allow optional qualification of data types with # schemas Sequence( Ref("SingleIdentifierGrammar"), Ref("DotSegment"), allow_gaps=False, optional=True, ), Ref("DatatypeIdentifierSegment"), allow_gaps=False, ), ), # There may be no brackets for some data types Ref("BracketedArguments", optional=True), ), ), ) class AlterSessionStatements(BaseSegment): """An ALTER SESSION statement. https://docs.vertica.com/latest/en/sql-reference/statements/alter-statements/alter-session/ """ type = "alter_session_statement" match_grammar: Matchable = Sequence( "ALTER", "SESSION", OneOf( Sequence( "SET", Ref.keyword("PARAMETER", optional=True), OptionallyBracketed( Sequence( Ref("ParameterNameSegment"), Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), ), ), Sequence( "CLEAR", Ref.keyword("PARAMETER", optional=True), OneOf( Bracketed( Sequence( Ref("ParameterNameSegment"), Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), ), Sequence("PARAMETER", "ALL"), ), ), Sequence( "SET", "UDPARAMETER", Sequence("FOR", Ref("ParameterNameSegment"), optional=True), Bracketed( Sequence( Ref("ParameterNameSegment"), Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), ), ), Sequence( "CLEAR", "UDPARAMETER", Sequence("FOR", Ref("ParameterNameSegment"), optional=True), OneOf( "ALL", Bracketed(Ref("ParameterNameSegment")), ), ), ), ) class CopyStatementSegment(BaseSegment): """A `COPY` statement. As Specified in https://docs.vertica.com/latest/en/sql-reference/statements/copy/ """ # TODO: it's not full and requires additional improvements type = "copy_statement" match_grammar = Sequence( "COPY", Ref("TableReferenceSegment"), OneOf( Bracketed( Delimited(Ref("CopyColumnOptionsSegment")), ), Sequence( "COLUMN", "OPTION", Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), Ref("CopyColumnOptionsSegment", optional=True), ), ), ), ), optional=True, ), "FROM", OneOf( Sequence( Ref.keyword("LOCAL", optional=True), "STDIN", Ref("CompressionType", optional=True), Ref("QuotedLiteralSegment", optional=True), ), Sequence( "LOCAL", Ref("QuotedLiteralSegment"), Ref("CompressionType", optional=True), ), Sequence( "VERTICA", Ref("TableReferenceSegment"), Bracketed(Delimited(Ref("ColumnReferenceSegment")), optional=True), ), Sequence(Delimited(Ref("QuotedLiteralSegment"))), ), OneOf("NATIVE", Sequence("NATIVE", "VARCHAR"), "ORC", "PARQUET", optional=True), Ref("CopyOptionsSegment", optional=True), ) class FunctionSegment(ansi.FunctionSegment): """A scalar or aggregate function. https://docs.vertica.com/latest/en/sql-reference/functions/aggregate-functions/ """ match_grammar: Matchable = OneOf( Sequence( # Treat functions which take date parts separately # So those functions parse date parts as DatetimeUnitSegment # rather than identifiers. Sequence( Ref("DatePartFunctionNameSegment"), Ref("DateTimeFunctionContentsSegment"), ), ), Ref("ColumnsExpressionGrammar"), Sequence( Sequence( Ref( "FunctionNameSegment", exclude=OneOf( Ref("DatePartFunctionNameSegment"), Ref("ColumnsExpressionFunctionNameSegment"), Ref("ValuesClauseSegment"), ), ), Ref("FunctionContentsSegment"), ), AnySetOf(Ref("PostFunctionGrammar")), ), ) class WithinGroupClauseSegment(BaseSegment): """A `WITHIN GROUP` clause for some analytic functions. https://docs.vertica.com/latest/en/sql-reference/functions/analytic-functions/percentile-cont-analytic/ """ type = "within_group_clause_statement" match_grammar = Sequence("WITHIN", "GROUP", Bracketed(Ref("OrderByClauseSegment"))) class TimeseriesClauseSegment(BaseSegment): """A vertica `TIMESERIES` clause. https://docs.vertica.com/latest/en/sql-reference/statements/select/timeseries-clause/ """ type = "timeseries_clause_statement" match_grammar: Matchable = Sequence( "TIMESERIES", Ref("AliasExpressionSegment"), Ref.keyword("AS"), Ref("QuotedLiteralSegment"), Indent, Ref("OverClauseSegment"), # TODO: add optional ORDER BY Dedent, ) class UnorderedSelectStatementSegment(ansi.UnorderedSelectStatementSegment): """A `SELECT` statement without any ORDER clauses or later. Copy of ansi class except additional terminator TimeseriesClauseSegment """ match_grammar: Matchable = Sequence( Ref("SelectClauseSegment"), Ref("FromClauseSegment", optional=True), Ref("WhereClauseSegment", optional=True), Ref("GroupByClauseSegment", optional=True), Ref("HavingClauseSegment", optional=True), Ref("OverlapsClauseSegment", optional=True), Ref("NamedWindowSegment", optional=True), terminators=[ Ref("SetOperatorSegment"), Ref("WithNoSchemaBindingClauseSegment"), Ref("WithDataClauseSegment"), Ref("OrderByClauseSegment"), Ref("LimitClauseSegment"), Ref("TimeseriesClauseSegment"), ], parse_mode=ParseMode.GREEDY_ONCE_STARTED, ) class SelectStatementSegment(ansi.SelectStatementSegment): """A `SELECT` statement. Copy of ansi class except additional TimeseriesClauseSegment grammar """ match_grammar = UnorderedSelectStatementSegment.match_grammar.copy( insert=[ Ref("OrderByClauseSegment", optional=True), Ref("FetchClauseSegment", optional=True), Ref("LimitClauseSegment", optional=True), Ref("TimeseriesClauseSegment", optional=True), Ref("NamedWindowSegment", optional=True), ], # Overwrite the terminators, because we want to remove some. replace_terminators=True, terminators=[ Ref("SetOperatorSegment"), Ref("WithNoSchemaBindingClauseSegment"), Ref("WithDataClauseSegment"), ], ) class NullEqualsSegment(CompositeComparisonOperatorSegment): """Null Equals operator.""" match_grammar: Matchable = Ref("NullEqualsOperatorSegment") class PartitionClauseSegment(ansi.PartitionClauseSegment): """A `PARTITION BY` for window functions. https://docs.vertica.com/latest/en/sql-reference/language-elements/window-clauses/window-partition-clause/ """ match_grammar: Matchable = Sequence( "PARTITION", OneOf( Sequence( "BY", Indent, # Brackets are optional in a partition by statement OptionallyBracketed(Delimited(Ref("ExpressionSegment"))), Dedent, ), "BEST", "NODES", "ROW", Sequence("LEFT", "JOIN"), ), ) class FrameClauseSegment(ansi.FrameClauseSegment): """A frame clause for window functions. https://docs.vertica.com/latest/en/sql-reference/language-elements/window-clauses/window-partition-clause/ """ type = "frame_clause" _frame_extent = OneOf( Sequence("CURRENT", "ROW"), Sequence( OneOf( Ref("NumericLiteralSegment"), OneOf( Sequence( Ref("QuotedLiteralSegment"), Ref("CastOperatorSegment"), "INTERVAL", ), Sequence( # TODO maybe this logic should be in an additional segment? # because there are so many options # for the interval representation. Ref.keyword("INTERVAL", optional=True), OneOf( Ref("IntervalLiteralGrammar"), Ref("QuotedLiteralSegment") ), Ref("DatetimeUnitSegment", optional=True), ), ), "UNBOUNDED", ), OneOf("PRECEDING", "FOLLOWING"), ), ) match_grammar: Matchable = Sequence( Ref("FrameClauseUnitGrammar"), OneOf(_frame_extent, Sequence("BETWEEN", _frame_extent, "AND", _frame_extent)), ) class AlterSchemaStatementSegment(BaseSegment): """An `ALTER SCHEMA` statement. https://docs.vertica.com/latest/en/sql-reference/statements/alter-statements/alter-schema/ """ type = "alter_schema_statement" match_grammar = Sequence( "ALTER", "SCHEMA", Delimited(Ref("SchemaReferenceSegment")), OneOf( Sequence("DEFAULT", Ref("SchemaPrivilegesSegment")), Sequence( "RENAME", "TO", Delimited(Ref("SchemaReferenceSegment")), ), Sequence( "OWNER", "TO", Ref("RoleReferenceSegment"), Ref.keyword("CASCADE", optional=True), ), Ref("DiskQuotaSegment"), ), ) class CreateSchemaStatementSegment(ansi.CreateSchemaStatementSegment): """A `CREATE SCHEMA` statement. https://docs.vertica.com/latest/en/sql-reference/statements/create-statements/create-schema/ """ match_grammar: Matchable = Sequence( "CREATE", "SCHEMA", Ref("IfNotExistsGrammar", optional=True), Ref("SchemaReferenceSegment"), AnySetOf( Sequence("AUTHORIZATION", Ref("RoleReferenceSegment")), Sequence("DEFAULT", Ref("SchemaPrivilegesSegment")), Ref("DiskQuotaSegment"), ), ) class AliasExpressionSegment(ansi.AliasExpressionSegment): """A reference to an object with an `AS` clause. The optional AS keyword allows both implicit and explicit aliasing. """ match_grammar: Matchable = OneOf( Sequence( Indent, Ref("AsAliasOperatorSegment", optional=True), OneOf( Sequence( Ref("SingleIdentifierGrammar"), # Column alias in VALUES clause Bracketed(Ref("SingleIdentifierListSegment"), optional=True), ), Sequence(Bracketed(Ref("SingleIdentifierListSegment"), optional=True)), Ref("SingleQuotedIdentifierSegment"), ), Dedent, ), # Some functions alias several columns in brackets () like mapkeys or explode Sequence( Indent, Ref("AsAliasOperatorSegment", optional=True), Bracketed(Delimited(Ref("ColumnReferenceSegment"))), Dedent, ), ) sqlfluff-3.4.2/src/sqlfluff/dialects/dialect_vertica_keywords.py000066400000000000000000000142051503426445100251740ustar00rootroot00000000000000"""A list of all SQL keywords. Based on query: select * from keywords """ # From reserved keywords were removed keywords that work correctly # in create table(keyword some_datatype) without quotes. # Put them in unreserved_keywords. vertica_reserved_keywords = """ALL AND ANY ARRAY AS ASC AUTHORIZATION BETWEEN BINARY BOTH CASE CAST CHECK COLLATE COLUMN CONSTRAINT CORRELATION CREATE CURRENT_DATABASE CURRENT_DATE CURRENT_SCHEMA CURRENT_TIME CURRENT_TIMESTAMP CURRENT_USER DEFAULT DEFERRABLE DESC DISTINCT ELSE END EXCEPT FALSE FOR FOREIGN FROM GRANT HAVING IN INITIALLY INTERSECT INTERVAL INTERVALYM INTO IS LATERAL LEADING LIKE LIMIT LOCALTIME LOCALTIMESTAMP MATCH NEW NOT NOTNULL NULL NULLSEQUAL OFFSET OLD ON ONLY OR ORDER OVER OVERLAPS PRIMARY REFERENCES SELECT SESSION_USER SIMILAR SOME SYSDATE TABLE THEN TIMESERIES TO TRAILING TRUE UNBOUNDED UNION UNIQUE USER USING WHEN WHERE WINDOW WITH WITHIN """ vertica_unreserved_keywords = """ABORT ABSOLUTE ACCESS ACCESSRANK ACCOUNT ACTION ACTIVATE ACTIVEPARTITIONCOUNT ADD ADDRESS ADMIN AFTER AGGREGATE ALSO ALTER ANALYSE ANALYTIC ANALYZE ANNOTATED ANTI ARCHIVE ASSERTION ASSIGNMENT AT AUTHENTICATION AUTO AUTOCOMMIT AUTO_INCREMENT AVAILABLE BACKWARD BALANCE BASENAME BATCH BEFORE BEGIN BEST BIGINT BIT BLOCK BLOCKDICT_COMP BLOCK_DICT BOOLEAN BROADCAST BUNDLE BY BYTEA BYTES BZIP BZIP_COMP CA CACHE CALL CALLED CASCADE CATALOGPATH CERTIFICATE CERTIFICATES CHAIN CHAR CHARACTER CHARACTERISTICS CHARACTERS CHARACTER_LENGTH CHAR_LENGTH CHECKPOINT CIPHER CLASS CLEAR CLOSE CLUSTER COLLECTIONCLOSE COLLECTIONDELIMITER COLLECTIONENCLOSE COLLECTIONNULLELEMENT COLLECTIONOPEN COLSIZES COLUMNS COLUMNS_COUNT COMMENT COMMIT COMMITTED COMMONDELTA_COMP COMMUNAL COMPLEX CONFIGURATION CONNECT CONSTRAINTS CONTROL COPY COUNT CPUAFFINITYMODE CPUAFFINITYSET CREATEDB CREATEUSER CRON CROSS CSV CUBE CURRENT CURSOR CUSTOM CUSTOM_PARTITIONS CYCLE DATA DATABASE DATAPATH DATEDIFF DATESTYLE DATETIME DATETIMES DAY DEACTIVATE DEALLOCATE DEBUG DEC DECIMAL DECLARE DECODE DEFAULTS DEFERRED DEFINE DEFINER DELETE DELIMITED DELIMITER DELIMITERS DELTARANGE_COMP DELTARANGE_COMP_SP DELTAVAL DEPENDS DETERMINES DIRECT DIRECTCOLS DIRECTED DIRECTGROUPED DIRECTPROJ DISABLE DISABLED DISCONNECT DISK_QUOTA DISTVALINDEX DO DOMAIN DOUBLE DROP DRYRUN DURABLE EACH ENABLE ENABLED ENCLOSED ENCODED ENCODING ENCRYPTED ENFORCELENGTH EPHEMERAL EPOCH ERROR ESCAPE ESCAPE_STRING_WARNING EVENT EVENTS EXCEPTION EXCEPTIONS EXCLUDE EXCLUDING EXCLUSIVE EXECUTE EXECUTIONPARALLELISM EXISTS EXPIRE EXPLAIN EXPORT EXTEND EXTENSIONS EXTERNAL EXTRACT FAILED_LOGIN_ATTEMPTS FALLTHROUGH FAULT FENCED FETCH FILESYSTEM FILLER FILTER FIRST FIXEDWIDTH FLEX FLEXIBLE FLOAT FOLLOWING FORCE FORMAT FORWARD FREEZE FULL FUNCTION FUNCTIONS GCDDELTA GEOMETRY GEOGRAPHY GET GLOB GLOBAL GRACEPERIOD GROUP GROUPED GROUPING GZIP GZIP_COMP HANDLER HCATALOG HCATALOG_CONNECTION_TIMEOUT HCATALOG_DB HCATALOG_SCHEMA HCATALOG_SLOW_TRANSFER_LIMIT HCATALOG_SLOW_TRANSFER_TIME HCATALOG_USER HIGH HIVESERVER2_HOSTNAME HOLD HOST HOSTNAME HOUR HOURS ICEBERG ID IDENTIFIED IDENTITY IDLESESSIONTIMEOUT IF IGNORE ILIKE ILIKEB IMMEDIATE IMMUTABLE IMPLICIT INCLUDE INCLUDING INCREMENT INDEX INHERITS INNER INOUT INPUT INSENSITIVE INSERT INSTEAD INT INTEGER INTERFACE INTERPOLATE INTERVALSTYLE INVOKER ISNULL ISOLATION JOIN JSON KEY KEYMANAGER KSAFE LABEL LANCOMPILER LANGUAGE LARGE LAST LATEST LEFT LENGTH LESS LEVEL LIBRARY LIKEB LISTEN LOAD LOADER LOCAL LOCALE LOCATION LOCK LONG LOW LZO MANAGED MAP MASK MATCHED MATERIALIZE MAXCONCURRENCY MAXCONCURRENCYGRACE MAXCONNECTIONS MAXMEMORYSIZE MAXPAYLOAD MAXQUERYMEMORYSIZE MAXVALUE MEDIUM MEMORYCAP MEMORYSIZE MERGE MERGEOUT METHOD MICROSECONDS MILLISECONDS MINUS MINUTE MINUTES MINVALUE MODE MODEL MONEY MONTH MOVE MOVEOUT MULTIPLEACTIVERESULTSETS NAME NAMESPACE NATIONAL NATIVE NATURAL NCHAR NETWORK NEXT NO NOCREATEDB NOCREATEUSER NODE NODES NOGLOB NONE NOTHING NOTIFIER NOTIFY NOWAIT NULLAWARE NULLCOLS NULLS NUMBER NUMERIC OBJECT OBJECTS OCTETS OF OFF OIDS OPERATOR OPT OPTIMIZER OPTION OPTVER ORC OTHERS OUT OUTER OVERLAY OWNER PARAMETER PARAMETERS PARQUET PARSER PARTIAL PARTITION PARTITIONING PASSWORD PASSWORD_GRACE_TIME PASSWORD_LIFE_TIME PASSWORD_LOCK_TIME PASSWORD_MAX_LENGTH PASSWORD_MIN_CHAR_CHANGE PASSWORD_MIN_DIGITS PASSWORD_MIN_LENGTH PASSWORD_MIN_LETTERS PASSWORD_MIN_LIFE_TIME PASSWORD_MIN_LOWERCASE_LETTERS PASSWORD_MIN_SYMBOLS PASSWORD_MIN_UPPERCASE_LETTERS PASSWORD_REUSE_MAX PASSWORD_REUSE_TIME PATTERN PERCENT PERMANENT PINNED PLACING PLANNEDCONCURRENCY POINT POLICY POOL PORT POSITION PRECEDING PRECISION PREFER PREFIX PREPARE PREPASS PRESERVE PREVIOUS PRIOR PRIORITY PRIVILEGES PROCEDURAL PROCEDURE PROFILE PROJECTION PROJECTIONS PSDATE QUERY QUEUETIMEOUT QUOTE RANDOM RANGE RAW READ REAL RECHECK RECORD RECOVER RECURSIVE REFRESH REINDEX REJECTED REJECTMAX RELATIVE RELEASE REMOVE RENAME REORGANIZE REPEATABLE REPLACE REPLICATE RESET RESOURCE RESOURCE_POOL RESTART RESTORE RESTRICT RESULTS RETENTION RETRY RETURN RETURNREJECTED REVOKE RIGHT RLE ROLE ROLES ROLLBACK ROLLUP ROUTE ROUTING ROW ROWS RULE RUNTIMECAP RUNTIMEPRIORITY RUNTIMEPRIORITYTHRESHOLD RWITH SALT SAVE SAVEPOINT SCHEDULE SCHEMA SCHEMATA SCROLL SEARCH_PATH SECOND SECONDARY SECONDS SECURITY SECURITY_ALGORITHM SEGMENTED SEMI SEMIALL SEQUENCE SEQUENCES SERIAL SERIALIZABLE SESSION SET SETOF SETS SHARD SHARE SHARED SHOW SIGNED SIMPLE SINGLEINITIATOR SITE SITES SKIP SMALLDATETIME SMALLINT SOURCE SPLIT SSL_CONFIG STABLE STANDARD_CONFORMING_STRINGS STANDBY START STATEMENT STATISTICS STDIN STDOUT STEMMER STORAGE STORED STREAM STRENGTH STRICT SUBCLUSTER SUBJECT SUBNET SUBSTRING SUITES SYSID SYSTEM TABLES TABLESAMPLE TABLESPACE TEMP TEMPLATE TEMPORARY TEMPSPACECAP TERMINATOR TEXT THAN TIES TIME TIMESTAMP TIMESTAMPADD TIMESTAMPDIFF TIMESTAMPTZ TIMETZ TIMEZONE TINYINT TLS TLSMODE TOAST TOKENIZER TOLERANCE TRANSACTION TRANSFORM TREAT TRICKLE TRIGGER TRIM TRUNCATE TRUSTED TUNING TYPE TYPES UDPARAMETER UNCOMMITTED UNCOMPRESSED UNI UNINDEXED UNKNOWN UNLIMITED UNLISTEN UNLOCK UNSEGMENTED UPDATE USAGE UUID VACUUM VALID VALIDATE VALIDATOR VALINDEX VALUE VALUES VARBINARY VARCHAR VARCHAR2 VARYING VERBOSE VERTICA VIEW VOLATILE WAIT WEBHDFS_ADDRESS WEBSERVICE_HOSTNAME WEBSERVICE_PORT WITHOUT WORK WORKLOAD WRITE YEAR ZONE ZSTD ZSTD_COMP ZSTD_FAST_COMP ZSTD_HIGH_COMP """ sqlfluff-3.4.2/src/sqlfluff/diff_quality_plugin.py000066400000000000000000000111041503426445100223640ustar00rootroot00000000000000"""This module integrates SQLFluff with diff_cover's "diff-quality" tool.""" import copy import json import logging import os import pathlib import sys import tempfile from diff_cover.command_runner import execute, run_command_for_code from diff_cover.hook import hookimpl as diff_cover_hookimpl from diff_cover.violationsreporters.base import ( QualityDriver, QualityReporter, Violation, ) logger = logging.getLogger(__name__) class SQLFluffDriver(QualityDriver): """SQLFluff driver for use by SQLFluffViolationReporter.""" def __init__(self) -> None: super().__init__( [sys.executable, "-m", "sqlfluff.cli.commands"], [".sql"], [ s.encode(sys.getfilesystemencoding()) for s in ["sqlfluff", "lint", "--format=json"] ], exit_codes=[0, 1], ) def parse_reports(self, reports) -> None: # pragma: no cover """Parse report output. Not used by SQLFluff.""" pass def installed(self) -> bool: """Check if SQLFluff is installed.""" return run_command_for_code(["sqlfluff", "--version"]) == 0 class SQLFluffViolationReporter(QualityReporter): """Class that implements diff-quality integration.""" supported_extensions = ["sql"] def __init__(self, **kw) -> None: """Calls the base class constructor to set the object's name.""" super().__init__(SQLFluffDriver(), **kw) def violations_batch(self, src_paths): """Return a dictionary of Violations recorded in `src_paths`.""" # Check if SQLFluff is installed. if self.driver_tool_installed is None: self.driver_tool_installed = self.driver.installed() if not self.driver_tool_installed: # pragma: no cover raise OSError(f"{self.driver.name} is not installed") if src_paths: output = self.reports if self.reports else self._run_sqlfluff(src_paths) for o in output: # Load and parse SQLFluff JSON output. try: report = json.loads(o) except json.JSONDecodeError as e: # pragma: no cover print(f"Error parsing JSON output ({e}): {repr(o)}") raise else: for file in report: self.violations_dict[file["filepath"]] = [ Violation(v["start_line_no"], v["description"]) for v in file["violations"] ] else: logger.warning("Not running SQLFluff: No files to check") return self.violations_dict def _run_sqlfluff(self, src_paths) -> list[str]: # Prepare the SQLFluff command to run. command = copy.deepcopy(self.driver.command) if self.options: for arg in self.options.split(): command.append(arg) for src_path in src_paths: if src_path.endswith(".sql") and os.path.exists(src_path): command.append(src_path.encode(sys.getfilesystemencoding())) with tempfile.NamedTemporaryFile( prefix="sqlfluff-", suffix=".json", delete=False ) as f: f.close() try: # Write output to a temporary file. This avoids issues where # extraneous SQLFluff or dbt output results in the JSON output # being invalid. command += ["--write-output", f.name] # Run SQLFluff. printable_command = " ".join( [ ( c.decode(sys.getfilesystemencoding()) if isinstance(c, bytes) else c ) for c in command ] ) logger.warning(f"{printable_command}") execute(command, self.driver.exit_codes) return [pathlib.Path(f.name).read_text()] finally: os.remove(f.name) def measured_lines(self, src_path: str) -> None: # pragma: no cover """Return list of the lines in src_path that were measured.""" @diff_cover_hookimpl def diff_cover_report_quality(**kw) -> SQLFluffViolationReporter: """Returns the SQLFluff plugin. This function is registered as a diff_cover entry point. diff-quality calls it in order to "discover" the SQLFluff plugin. :return: Object that implements the BaseViolationReporter ABC """ return SQLFluffViolationReporter(**kw) sqlfluff-3.4.2/src/sqlfluff/py.typed000066400000000000000000000002431503426445100174550ustar00rootroot00000000000000In line with PEP561 this file indicates to people using this package that it's contents have type annotations which can be used. https://peps.python.org/pep-0561/ sqlfluff-3.4.2/src/sqlfluff/rules/000077500000000000000000000000001503426445100171115ustar00rootroot00000000000000sqlfluff-3.4.2/src/sqlfluff/rules/__init__.py000066400000000000000000000000551503426445100212220ustar00rootroot00000000000000"""Standard Rules packaged with sqlfluff.""" sqlfluff-3.4.2/src/sqlfluff/rules/aliasing/000077500000000000000000000000001503426445100207005ustar00rootroot00000000000000sqlfluff-3.4.2/src/sqlfluff/rules/aliasing/AL01.py000066400000000000000000000100671503426445100217130ustar00rootroot00000000000000"""Implementation of Rule AL01.""" from typing import Optional from sqlfluff.core.parser import BaseSegment, KeywordSegment, WhitespaceSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.dialects.dialect_ansi import AsAliasOperatorSegment class Rule_AL01(BaseRule): """Implicit/explicit aliasing of table. Aliasing of table to follow preference (requiring an explicit ``AS`` is the default). **Anti-pattern** In this example, the alias ``voo`` is implicit. .. code-block:: sql SELECT voo.a FROM foo voo **Best practice** Add ``AS`` to make it explicit. .. code-block:: sql SELECT voo.a FROM foo AS voo """ name = "aliasing.table" aliases = ("L011",) groups: tuple[str, ...] = ("all", "aliasing") config_keywords = ["aliasing"] crawl_behaviour = SegmentSeekerCrawler({"alias_expression"}, provide_raw_stack=True) is_fix_compatible = True _target_parent_types: tuple[str, ...] = ( "from_expression_element", "merge_statement", ) def _eval(self, context: RuleContext) -> Optional[LintResult]: """Implicit aliasing of table/column not allowed. Use explicit `AS` clause. We look for the alias segment, and then evaluate its parent and whether it contains an AS keyword. This is the _eval function for both AL01 and AL02. """ # Config type hints self.aliasing: str # AL01 is disabled for Oracle, still run for AL02. if context.dialect.name == "oracle" and self.name == "aliasing.table": return None assert context.segment.is_type("alias_expression") if context.parent_stack[-1].is_type(*self._target_parent_types): # Search for an AS keyword. as_keyword: Optional[BaseSegment] = context.segment.get_child( "alias_operator" ) if as_keyword: if self.aliasing == "implicit": self.logger.debug("Removing AS keyword and respacing.") whitespace: Optional[BaseSegment] = context.segment.get_child( "whitespace" ) if whitespace: fixes = [LintFix.delete(whitespace), LintFix.delete(as_keyword)] else: fixes = [LintFix.delete(as_keyword)] # pragma: no cover return LintResult( anchor=as_keyword, fixes=fixes, ) elif self.aliasing != "implicit": self.logger.debug("Inserting AS keyword and respacing.") for identifier in context.segment.raw_segments: if identifier.is_code: break else: # pragma: no cover raise NotImplementedError( "Failed to find identifier. Raise this as a bug on GitHub." ) as_alias_operator_segment = AsAliasOperatorSegment( segments=(KeywordSegment("AS"),) ) # if the pre sibling has already a leading whitespace at it's tail # we do not need an additional leading whitespace has_leading_whitespace = context.siblings_pre and isinstance( context.siblings_pre[-1], WhitespaceSegment ) if has_leading_whitespace: edit_segments = [as_alias_operator_segment, WhitespaceSegment()] else: edit_segments = [ WhitespaceSegment(), as_alias_operator_segment, WhitespaceSegment(), ] return LintResult( anchor=context.segment, fixes=[LintFix.create_before(identifier, edit_segments)], ) return None sqlfluff-3.4.2/src/sqlfluff/rules/aliasing/AL02.py000066400000000000000000000025031503426445100217100ustar00rootroot00000000000000"""Implementation of Rule AL02.""" from typing import Optional from sqlfluff.core.rules import LintResult, RuleContext from sqlfluff.rules.aliasing.AL01 import Rule_AL01 class Rule_AL02(Rule_AL01): """Implicit/explicit aliasing of columns. Aliasing of columns to follow preference (explicit using an ``AS`` clause is default). **Anti-pattern** In this example, the alias for column ``a`` is implicit. .. code-block:: sql SELECT a alias_col FROM foo **Best practice** Add ``AS`` to make it explicit. .. code-block:: sql SELECT a AS alias_col FROM foo """ name = "aliasing.column" aliases = ("L012",) groups = ("all", "core", "aliasing") config_keywords = ["aliasing"] # NB: crawl_behaviour is the same as Rule AL01 _target_parent_types = ("select_clause_element",) def _eval(self, context: RuleContext) -> Optional[LintResult]: # T-SQL supports alternative alias expressions for AL02 # select alias = value # instead of # select value as alias # Recognise this and exit early alias_expression = context.segment if getattr(alias_expression.get_child("alias_operator"), "raw", None) == "=": return None return super()._eval(context) sqlfluff-3.4.2/src/sqlfluff/rules/aliasing/AL03.py000066400000000000000000000112451503426445100217140ustar00rootroot00000000000000"""Implementation of Rule AL03.""" from typing import Optional from sqlfluff.core.rules import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.functional import FunctionalContext, Segments, sp class Rule_AL03(BaseRule): """Column expression without alias. Use explicit `AS` clause. **Anti-pattern** In this example, there is no alias for both sums. .. code-block:: sql SELECT sum(a), sum(b) FROM foo **Best practice** Add aliases. .. code-block:: sql SELECT sum(a) AS a_sum, sum(b) AS b_sum FROM foo """ name = "aliasing.expression" aliases = ("L013",) groups = ("all", "core", "aliasing") config_keywords = ["allow_scalar"] crawl_behaviour = SegmentSeekerCrawler({"select_clause_element"}) def _eval(self, context: RuleContext) -> Optional[LintResult]: """Column expression without alias. Use explicit `AS` clause. We look for the select_clause_element segment, and then evaluate whether it has an alias segment or not and whether the expression is complicated enough. `parent_stack` is to assess how many other elements there are. """ functional_context = FunctionalContext(context) segment = functional_context.segment children = segment.children() # If we have an alias its all good if children.any(sp.is_type("alias_expression")): return None # Ignore if it's a function with EMITS clause as EMITS is equivalent to AS if ( children.select(sp.is_type("function")) .children() .select(sp.is_type("emits_segment")) ): return None # Ignore if it's a cast_expression with non-function enclosed children # For example, we do not want to ignore something like func()::type # but we can ignore something like a::type if children.children().select( sp.is_type("cast_expression") ) and not children.children().select( sp.is_type("cast_expression") ).children().any( sp.is_type("function") ): return None parent_stack = functional_context.parent_stack # Ignore if it is part of a CTE with column names if ( parent_stack.last(sp.is_type("common_table_expression")) .children() .any(sp.is_type("cte_column_list")) ): return None # Ignore if using a columns expression. A nested function such as # ``MIN(COLUMNS(*))`` will assign the same alias to all columns. if len(children.recursive_crawl("columns_expression")) > 0: return None select_clause_children = children.select(sp.not_(sp.is_type("star"))) is_complex_clause = _recursively_check_is_complex(select_clause_children) if not is_complex_clause: return None # No fixes, because we don't know what the alias should be, # the user should document it themselves. if self.allow_scalar: # type: ignore # Check *how many* elements/columns there are in the select # statement. If this is the only one, then we won't # report an error. immediate_parent = parent_stack.last() elements = immediate_parent.children(sp.is_type("select_clause_element")) num_elements = len(elements) if num_elements > 1: return LintResult(anchor=context.segment) return None return LintResult(anchor=context.segment) def _recursively_check_is_complex(select_clause_or_exp_children: Segments) -> bool: forgiveable_types = [ "whitespace", "newline", "column_reference", "wildcard_expression", "bracketed", ] selector = sp.not_(sp.is_type(*forgiveable_types)) filtered = select_clause_or_exp_children.select(selector) remaining_count = len(filtered) # Once we have removed the above if nothing remains, # then this statement/expression was simple if remaining_count == 0: return False first_el = filtered.first() # If the element has a select statement inside, this is likely a subquery if first_el.recursive_crawl("select_statement").any(): return True # Anything except a single expression seg remains # Then it was complex if remaining_count > 1 or not first_el.all(sp.is_type("expression")): return True # If we have just an expression check if it was simple return _recursively_check_is_complex(first_el.children()) sqlfluff-3.4.2/src/sqlfluff/rules/aliasing/AL04.py000066400000000000000000000112221503426445100217100ustar00rootroot00000000000000"""Implementation of Rule AL04.""" import itertools from typing import Optional from sqlfluff.core.dialects.common import AliasInfo, ColumnAliasInfo from sqlfluff.core.parser import BaseSegment from sqlfluff.core.rules import BaseRule, EvalResultType, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.dialects.dialect_ansi import ObjectReferenceSegment from sqlfluff.utils.analysis.select import get_select_statement_info class Rule_AL04(BaseRule): """Table aliases should be unique within each clause. Reusing table aliases is very likely a coding error. **Anti-pattern** In this example, the alias ``t`` is reused for two different tables: .. code-block:: sql SELECT t.a, t.b FROM foo AS t, bar AS t -- This can also happen when using schemas where the -- implicit alias is the table name: SELECT a, b FROM 2020.foo, 2021.foo **Best practice** Make all tables have a unique alias. .. code-block:: sql SELECT f.a, b.b FROM foo AS f, bar AS b -- Also use explicit aliases when referencing two tables -- with the same name from two different schemas. SELECT f1.a, f2.b FROM 2020.foo AS f1, 2021.foo AS f2 """ name = "aliasing.unique.table" aliases = ("L020",) groups: tuple[str, ...] = ("all", "core", "aliasing", "aliasing.unique") crawl_behaviour = SegmentSeekerCrawler({"select_statement"}) def _lint_references_and_aliases( self, table_aliases: list[AliasInfo], standalone_aliases: list[BaseSegment], references: list[ObjectReferenceSegment], col_aliases: list[ColumnAliasInfo], using_cols: list[BaseSegment], parent_select: Optional[BaseSegment], rule_context: RuleContext, ) -> Optional[list[LintResult]]: """Check whether any aliases are duplicates. NB: Subclasses of this error should override this function. """ if parent_select: parent_select_info = get_select_statement_info( parent_select, rule_context.dialect ) if parent_select_info: # If we are looking at a subquery, include any table references for table_alias in parent_select_info.table_aliases: if table_alias.from_expression_element.path_to( rule_context.segment ): # Skip the subquery alias itself continue table_aliases.append(table_alias) # Are any of the aliases the same? duplicate = set() for a1, a2 in itertools.combinations(table_aliases, 2): # Compare the strings if a1.ref_str == a2.ref_str and a1.ref_str: duplicate.add(a2) if duplicate: return [ LintResult( # Reference the element, not the string. anchor=aliases.segment, description=( "Duplicate table alias {!r}. Table aliases should be unique." ).format(aliases.ref_str), ) for aliases in duplicate ] else: return None def _eval(self, context: RuleContext) -> EvalResultType: """Get References and Aliases and allow linting. This rule covers a lot of potential cases of odd usages of references, see the code for each of the potential cases. Subclasses of this rule should override the `_lint_references_and_aliases` method. """ assert context.segment.is_type("select_statement") select_info = get_select_statement_info(context.segment, context.dialect) if not select_info: return None # Work out if we have a parent select function parent_select = None for seg in reversed(context.parent_stack): if seg.is_type("select_statement"): parent_select = seg break # Pass them all to the function that does all the work. # NB: Subclasses of this rules should override the function below return self._lint_references_and_aliases( select_info.table_aliases, select_info.standalone_aliases, select_info.reference_buffer, select_info.col_aliases, select_info.using_cols, parent_select, context, ) sqlfluff-3.4.2/src/sqlfluff/rules/aliasing/AL05.py000066400000000000000000000302561503426445100217210ustar00rootroot00000000000000"""Implementation of Rule AL05.""" from collections import Counter from dataclasses import dataclass, field from typing import cast from sqlfluff.core.dialects.common import AliasInfo from sqlfluff.core.parser.segments import BaseSegment, RawSegment from sqlfluff.core.rules import ( BaseRule, EvalResultType, LintFix, LintResult, RuleContext, ) from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.analysis.query import Query from sqlfluff.utils.analysis.select import get_select_statement_info from sqlfluff.utils.functional import Segments, sp @dataclass class AL05Query(Query): """Query subclass with custom AL05 info.""" aliases: list[AliasInfo] = field(default_factory=list) tbl_refs: set[str] = field(default_factory=set) class Rule_AL05(BaseRule): """Tables should not be aliased if that alias is not used. **Anti-pattern** .. code-block:: sql SELECT a FROM foo AS zoo **Best practice** Use the alias or remove it. An unused alias makes code harder to read without changing any functionality. .. code-block:: sql SELECT zoo.a FROM foo AS zoo -- Alternatively... SELECT a FROM foo """ name = "aliasing.unused" aliases = ("L025",) groups = ("all", "core", "aliasing") config_keywords = ["alias_case_check"] crawl_behaviour = SegmentSeekerCrawler({"select_statement"}) _dialects_requiring_alias_for_values_clause = [ "athena", "snowflake", "tsql", "postgres", ] is_fix_compatible = True # config alias_case_check: str def _eval(self, context: RuleContext) -> EvalResultType: violations: list[LintResult] = [] assert context.segment.is_type("select_statement") # Exit early if the SELECT does not define any aliases. select_info = get_select_statement_info(context.segment, context.dialect) if not select_info or not select_info.table_aliases: return None # Analyze the SELECT. alias: AliasInfo query = cast( AL05Query, AL05Query.from_segment(context.segment, dialect=context.dialect) ) self._analyze_table_aliases(query) if context.dialect.name in ("redshift", "bigquery"): # Redshift supports un-nesting using aliases. # Detect that situation and ignore. # https://docs.aws.amazon.com/redshift/latest/dg/query-super.html#unnest # Do any references refer to aliases in the same list? references = set() aliases = set() for alias in query.aliases: aliases.add(alias.ref_str) if alias.segment: aliases.add(self._cs_str_id(alias.segment)) if not alias.object_reference: continue # pragma: no cover for seg in alias.object_reference.segments: if seg.is_type("identifier"): references.add(self._cs_str_id(seg)) # If there's any overlap between aliases and reference if aliases.intersection(references): self.logger.debug( "Overlapping references found. Assuming %s semi-structured.", context.dialect.name, ) return None # Get the number of times an object (table/view) is referenced. While some # dialects can handle the same table name reference with different schemas, # we don't want to allow a conflict with AL04's uniqueness rule so we grab # the base table name instead of the fully qualified one to determine naming # collisions. ref_counter = Counter( self._cs_str_id(a.object_reference.segments[-1]) for a in query.aliases if a.object_reference and a.object_reference.segments ) for alias in query.aliases: # Skip alias if it's required (some dialects require aliases for # VALUES clauses). if alias.from_expression_element and self._is_alias_required( alias.from_expression_element, context.dialect.name ): continue # Skip alias if the table is referenced more than once, some dialects # require the referenced table names to be unique even if not returned # by the statement. if ( alias.object_reference and alias.object_reference.segments and ref_counter.get( self._cs_str_id(alias.object_reference.segments[-1]), 0, ) > 1 ): continue # Redshift requires an alias when a `QUALIFY` statement immediately follows # the `FROM` clause. # https://docs.aws.amazon.com/redshift/latest/dg/r_QUALIFY_clause.html if ( context.dialect.name == "redshift" and alias.alias_expression and self._followed_by_qualify(context, alias) ): continue # If the alias is for a _function_ rather than just a table, it's possible # that it's an array function, like `unnest` or `jsonb_array_elements_text` # So if that alias appears as what looks like a _column reference_ then # also skip it. # https://github.com/sqlfluff/sqlfluff/issues/4623 _table_expression = alias.from_expression_element.get_child( "table_expression" ) if _table_expression and _table_expression.get_child("function"): # Case insensitive match for conservatism if alias.ref_str.strip("\"'`[]").upper() in [ seg.raw_upper.strip("\"'`[]") for seg in select_info.reference_buffer ]: self.logger.debug( f"Alias for function {alias.ref_str} found as apparent " "column reference in select. Skipping" ) continue if ( alias.aliased and alias.segment and self._cs_str_id(alias.segment) not in query.tbl_refs ): # Unused alias. Report and fix. violations.append(self._report_unused_alias(alias)) return violations or None def _cs_str_id(self, identifier: BaseSegment): _normal_val = identifier.raw_normalized(self.alias_case_check == "dialect") if self.alias_case_check == "case_insensitive": _normal_val = _normal_val.upper() elif self.alias_case_check == "quoted_cs_naked_upper": if identifier.is_type("naked_identifier"): _normal_val = _normal_val.upper() elif self.alias_case_check == "quoted_cs_naked_lower": if identifier.is_type("naked_identifier"): _normal_val = _normal_val.lower() return _normal_val def _followed_by_qualify(self, context: RuleContext, alias: AliasInfo) -> bool: curr_from_seen = False assert alias.alias_expression for seg in context.segment.segments: if alias.alias_expression.get_end_loc() == seg.get_end_loc(): curr_from_seen = True elif curr_from_seen and not seg.is_code: continue elif curr_from_seen and seg.is_type("qualify_clause"): return True elif curr_from_seen: return False return False def _is_alias_required( self, from_expression_element: BaseSegment, dialect_name: str ) -> bool: """Given an alias, is it REQUIRED to be present? There are a few circumstances where an alias is either required by the dialect, or recommended by SQLFluff: * Aliases are required in SOME, but not all dialects when there's a VALUES clause. * In the case of a nested SELECT, all dialect checked (MySQL, Postgres, T-SQL) require an alias. """ # Look for a table_expression (i.e. VALUES clause) as a descendant of # the FROM expression, potentially nested inside brackets. The reason we # allow nesting in brackets is that in some dialects (e.g. TSQL), this # is actually *required* in order for SQL Server to parse it. for segment in from_expression_element.iter_segments(expanding=("bracketed",)): if segment.is_type("table_expression"): # Found a table expression. Does it have a VALUES clause? if segment.get_child("values_clause"): # Found a VALUES clause. Is this a dialect that requires # VALUE clauses to be aliased? return ( dialect_name in self._dialects_requiring_alias_for_values_clause ) elif any( seg.is_type( "select_statement", "set_expression", "with_compound_statement" ) for seg in segment.iter_segments(expanding=("bracketed",)) ): # The FROM expression is a derived table, i.e. a nested # SELECT. In this case, the alias is required in every # dialect we checked (MySQL, Postgres, T-SQL). # https://pganalyze.com/docs/log-insights/app-errors/U115 return True else: # None of the special cases above applies, so the alias is # not required. return False # This should never happen. Return False just to be safe. return False # pragma: no cover def _analyze_table_aliases(self, query: AL05Query) -> None: # Get table aliases defined in query. for selectable in query.selectables: select_info = selectable.select_info if select_info: # Record the aliases. query.aliases += select_info.table_aliases # Look at each table reference; if it's an alias reference, # resolve the alias: could be an alias defined in "query" # itself or an "ancestor" query. for r in ( select_info.reference_buffer + select_info.table_reference_buffer ): for tr in r.extract_possible_references( level=r.ObjectReferenceLevel.TABLE ): # This function walks up the query's parent stack if necessary. self._resolve_and_mark_reference(query, tr.segments[0]) # Visit children. for child in query.children: self._analyze_table_aliases(cast(AL05Query, child)) def _resolve_and_mark_reference(self, query: AL05Query, ref: RawSegment) -> None: # Does this query define the referenced alias? _ref = self._cs_str_id(ref) if any(_ref == self._cs_str_id(a.segment) for a in query.aliases if a.segment): # Yes. Record the reference. query.tbl_refs.add(_ref) elif query.parent: # No. Recursively check the query's parent hierarchy. self._resolve_and_mark_reference(query.parent, ref) def _report_unused_alias(self, alias: AliasInfo) -> LintResult: fixes = [LintFix.delete(alias.alias_expression)] # type: ignore # Walk back to remove indents/whitespaces to_delete = ( Segments(*alias.from_expression_element.segments) .reversed() .select( start_seg=alias.alias_expression, # Stop once we reach an other, "regular" segment. loop_while=sp.or_(sp.is_whitespace(), sp.is_meta()), ) ) fixes += [LintFix.delete(seg) for seg in to_delete] return LintResult( anchor=alias.segment, description="Alias {!r} is never used in SELECT statement.".format( alias.ref_str ), fixes=fixes, ) sqlfluff-3.4.2/src/sqlfluff/rules/aliasing/AL06.py000066400000000000000000000100371503426445100217150ustar00rootroot00000000000000"""Implementation of Rule AL06.""" from typing import Optional from sqlfluff.core.rules import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.functional import FunctionalContext class Rule_AL06(BaseRule): """Enforce table alias lengths in from clauses and join conditions. **Anti-pattern** In this example, alias ``o`` is used for the orders table. .. code-block:: sql SELECT SUM(o.amount) as order_amount, FROM orders as o **Best practice** Avoid aliases. Avoid short aliases when aliases are necessary. See also: :sqlfluff:ref:`AL07`. .. code-block:: sql SELECT SUM(orders.amount) as order_amount, FROM orders SELECT replacement_orders.amount, previous_orders.amount FROM orders AS replacement_orders JOIN orders AS previous_orders ON replacement_orders.id = previous_orders.replacement_id """ name = "aliasing.length" aliases = ("L066",) groups = ("all", "core", "aliasing") config_keywords = ["min_alias_length", "max_alias_length"] crawl_behaviour = SegmentSeekerCrawler({"select_statement"}) def _eval(self, context: RuleContext) -> Optional[list[LintResult]]: """Identify aliases in from clause and join conditions. Find base table, table expressions in join, and other expressions in select clause and decide if it's needed to report them. """ self.min_alias_length: Optional[int] self.max_alias_length: Optional[int] assert context.segment.is_type("select_statement") children = FunctionalContext(context).segment.children() from_expression_elements = children.recursive_crawl("from_expression_element") return self._lint_aliases(from_expression_elements) or None def _lint_aliases(self, from_expression_elements) -> Optional[list[LintResult]]: """Lint all table aliases.""" # A buffer to keep any violations. violation_buff = [] # For each table, check whether it is aliased, and if so check the # lengths. for from_expression_element in from_expression_elements: table_expression = from_expression_element.get_child("table_expression") table_ref = ( table_expression.get_child("object_reference") if table_expression else None ) # If the from_expression_element has no object_reference - skip it # An example case is a lateral flatten, where we have a function segment # instead of a table_reference segment. if not table_ref: continue # If there's no alias expression - skip it alias_exp_ref = from_expression_element.get_child("alias_expression") if alias_exp_ref is None: continue alias_identifier_ref = alias_exp_ref.get_child("identifier") if self.min_alias_length is not None: if len(alias_identifier_ref.raw) < self.min_alias_length: violation_buff.append( LintResult( anchor=alias_identifier_ref, description=( "Aliases should be at least {} character(s) long." ).format(self.min_alias_length), ) ) if self.max_alias_length is not None: if len(alias_identifier_ref.raw) > self.max_alias_length: violation_buff.append( LintResult( anchor=alias_identifier_ref, description=( "Aliases should be no more than {} character(s) long." ).format(self.max_alias_length), ) ) return violation_buff or None sqlfluff-3.4.2/src/sqlfluff/rules/aliasing/AL07.py000066400000000000000000000253751503426445100217310ustar00rootroot00000000000000"""Implementation of Rule AL07.""" from collections import Counter, defaultdict from collections.abc import Generator from typing import NamedTuple, Optional from sqlfluff.core.parser import BaseSegment, IdentifierSegment, SymbolSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.functional import FunctionalContext, sp class TableAliasInfo(NamedTuple): """Structure yielded by_filter_table_expressions().""" table_ref: BaseSegment whitespace_ref: BaseSegment alias_exp_ref: BaseSegment alias_identifier_ref: BaseSegment class Rule_AL07(BaseRule): """Avoid table aliases in from clauses and join conditions. .. note:: This rule was taken from the `dbt Style Guide `_ which notes that: Avoid table aliases in join conditions (especially initialisms) - it's harder to understand what the table called "c" is compared to "customers". This rule is controversial and for many larger databases avoiding alias is neither realistic nor desirable. In particular for BigQuery due to the complexity of backtick requirements and determining whether a name refers to a project or dataset so automated fixes can potentially break working SQL code. For most users :sqlfluff:ref:`AL06` is likely a more appropriate linting rule to drive a sensible behaviour around aliasing. The stricter treatment of aliases in this rule may be useful for more focused projects, or temporarily as a refactoring tool because the :code:`fix` routine of the rule can remove aliases. This rule is disabled by default for all dialects it can be enabled with the ``force_enable = True`` flag. **Anti-pattern** In this example, alias ``o`` is used for the orders table, and ``c`` is used for ``customers`` table. .. code-block:: sql SELECT COUNT(o.customer_id) as order_amount, c.name FROM orders as o JOIN customers as c on o.id = c.user_id **Best practice** Avoid aliases. .. code-block:: sql SELECT COUNT(orders.customer_id) as order_amount, customers.name FROM orders JOIN customers on orders.id = customers.user_id -- Self-join will not raise issue SELECT table1.a, table_alias.b, FROM table1 LEFT JOIN table1 AS table_alias ON table1.foreign_key = table_alias.foreign_key """ name = "aliasing.forbid" aliases = ("L031",) groups = ("all", "aliasing") config_keywords = ["force_enable"] crawl_behaviour = SegmentSeekerCrawler({"select_statement"}) is_fix_compatible = True def _eval(self, context: RuleContext) -> Optional[list[LintResult]]: """Identify aliases in from clause and join conditions. Find base table, table expressions in join, and other expressions in select clause and decide if it's needed to report them. """ # Config type hints self.force_enable: bool # Issue 2810: BigQuery has some tricky expectations (apparently not # documented, but subject to change, e.g.: # https://www.reddit.com/r/bigquery/comments/fgk31y/new_in_bigquery_no_more_backticks_around_table/) # about whether backticks are required (and whether the query is valid # or not, even with them), depending on whether the GCP project name is # present, or just the dataset name. Since SQLFluff doesn't have access # to BigQuery when it is looking at the query, it would be complex for # this rule to do the right thing. For now, the rule simply disables # itself. if not self.force_enable: return None assert context.segment.is_type("select_statement") children = FunctionalContext(context).segment.children() from_clause_segment = children.select(sp.is_type("from_clause")).first() base_table = ( from_clause_segment.children(sp.is_type("from_expression")) .first() .children(sp.is_type("from_expression_element")) .first() .children(sp.is_type("table_expression")) .first() .children(sp.is_type("object_reference")) .first() ) if not base_table: return None # A buffer for all table expressions in join conditions from_expression_elements = [] column_reference_segments = [] after_from_clause = children.select(start_seg=from_clause_segment[0]) for clause in from_clause_segment + after_from_clause: for from_expression_element in clause.recursive_crawl( "from_expression_element" ): from_expression_elements.append(from_expression_element) for column_reference in clause.recursive_crawl("column_reference"): column_reference_segments.append(column_reference) return ( self._lint_aliases_in_join( base_table[0] if base_table else None, from_expression_elements, column_reference_segments, context.segment, ) or None ) @classmethod def _filter_table_expressions( cls, base_table, from_expression_elements ) -> Generator[TableAliasInfo, None, None]: for from_expression in from_expression_elements: table_expression = from_expression.get_child("table_expression") if not table_expression: continue # pragma: no cover table_ref = table_expression.get_child("object_reference") # If the from_expression_element has no object_references - skip it # An example case is a lateral flatten, where we have a function segment # instead of a table_reference segment. if not table_ref: continue # If this is self-join - skip it if ( base_table and base_table.raw == table_ref.raw and base_table != table_ref ): continue whitespace_ref = from_expression.get_child("whitespace") # If there's no alias expression - skip it alias_exp_ref = from_expression.get_child("alias_expression") if alias_exp_ref is None: continue alias_identifier_ref = alias_exp_ref.get_child("identifier") yield TableAliasInfo( table_ref, whitespace_ref, alias_exp_ref, alias_identifier_ref ) def _lint_aliases_in_join( self, base_table, from_expression_elements, column_reference_segments, segment ) -> Optional[list[LintResult]]: """Lint and fix all aliases in joins - except for self-joins.""" # A buffer to keep any violations. violation_buff = [] to_check = list( self._filter_table_expressions(base_table, from_expression_elements) ) # How many times does each table appear in the FROM clause? table_counts = Counter(ai.table_ref.raw for ai in to_check) # What is the set of aliases used for each table? (We are mainly # interested in the NUMBER of different aliases used.) table_aliases = defaultdict(set) for ai in to_check: if ai and ai.table_ref and ai.alias_identifier_ref: table_aliases[ai.table_ref.raw].add(ai.alias_identifier_ref.raw) # For each aliased table, check whether to keep or remove it. for alias_info in to_check: # If the same table appears more than once in the FROM clause with # different alias names, do not consider removing its aliases. # The aliases may have been introduced simply to make each # occurrence of the table independent within the query. if ( table_counts[alias_info.table_ref.raw] > 1 and len(table_aliases[alias_info.table_ref.raw]) > 1 ): continue select_clause = segment.get_child("select_clause") ids_refs = [] # Find all references to alias in select clause if alias_info.alias_identifier_ref: alias_name = alias_info.alias_identifier_ref.raw for alias_with_column in select_clause.recursive_crawl( "object_reference" ): used_alias_ref = alias_with_column.get_child("identifier") if used_alias_ref and used_alias_ref.raw == alias_name: ids_refs.append(used_alias_ref) # Find all references to alias in column references for exp_ref in column_reference_segments: used_alias_ref = exp_ref.get_child("identifier") # exp_ref.get_child('dot') ensures that the column reference includes a # table reference if ( used_alias_ref and used_alias_ref.raw == alias_name and exp_ref.get_child("dot") ): ids_refs.append(used_alias_ref) # Fixes for deleting ` as sth` and for editing references to aliased tables # Note unparsable errors have cause the delete to fail (see #2484) # so check there is a d before doing deletes. fixes: list[LintFix] = [] fixes += [ LintFix.delete(d) for d in [alias_info.alias_exp_ref, alias_info.whitespace_ref] if d ] for alias in [alias_info.alias_identifier_ref, *ids_refs]: if alias: identifier_parts = alias_info.table_ref.raw.split(".") edits: list[BaseSegment] = [] for part in identifier_parts: if edits: edits.append(SymbolSegment(".", type="dot")) edits.append(IdentifierSegment(part, type="naked_identifier")) fixes.append( LintFix.replace( alias, edits, source=[alias_info.table_ref], ) ) violation_buff.append( LintResult( anchor=alias_info.alias_identifier_ref, description="Avoid aliases in from clauses and join conditions.", fixes=fixes, ) ) return violation_buff or None sqlfluff-3.4.2/src/sqlfluff/rules/aliasing/AL08.py000066400000000000000000000103141503426445100217150ustar00rootroot00000000000000"""Implementation of Rule AL08.""" from typing import Optional from sqlfluff.core.parser import BaseSegment from sqlfluff.core.rules import BaseRule, EvalResultType, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler class Rule_AL08(BaseRule): """Column aliases should be unique within each clause. Reusing column aliases is very likely a coding error. Note that while in many dialects, quoting an identifier makes it case-sensitive this rule always compares in a case-insensitive way. This is because columns with the same name, but different case, are still confusing and potentially ambiguous to other readers. In situations where it is *necessary* to have columns with the same name (whether they differ in case or not) we recommend disabling this rule for either just the line, or the whole file. **Anti-pattern** In this example, the alias ``foo`` is reused for two different columns: .. code-block:: sql SELECT a as foo, b as foo FROM tbl; -- This can also happen when referencing the same column -- column twice, or aliasing an expression to the same -- name as a column: SELECT foo, foo, a as foo FROM tbl; **Best practice** Make all columns have a unique alias. .. code-block:: sql SELECT a as foo, b as bar FROM tbl; -- Avoid also using the same column twice unless aliased: SELECT foo as foo1, foo as foo2, a as foo3 FROM tbl; """ name = "aliasing.unique.column" aliases = () groups: tuple[str, ...] = ("all", "core", "aliasing", "aliasing.unique") crawl_behaviour = SegmentSeekerCrawler({"select_clause"}) def _eval(self, context: RuleContext) -> EvalResultType: """Walk through select clauses, looking for matching identifiers.""" assert context.segment.is_type("select_clause") used_aliases: dict[str, BaseSegment] = {} violations = [] # Work through each of the elements for clause_element in context.segment.get_children("select_clause_element"): # Is there an alias expression? alias_expression = clause_element.get_child("alias_expression") column_alias: Optional[BaseSegment] = None if alias_expression: # The alias can be the naked_identifier or the quoted_identifier column_alias = alias_expression.get_child( "naked_identifier", "quoted_identifier" ) # No alias, the only other thing we'll track are column references. else: column_reference = clause_element.get_child("column_reference") if column_reference: # We don't want the whole reference, just the last section. # If it is qualified, take the last bit. Otherwise, we still # take the last bit but it shouldn't make a difference. column_alias = column_reference.segments[-1] # If we don't have an alias to work with, just skip this element if not column_alias: continue # NOTE: Always case insensitive, see docstring for why. _key = column_alias.raw_upper # Strip any quote tokens _key = _key.strip("\"'`") # Otherwise check whether it's been used before if _key in used_aliases: # It has. previous = used_aliases[_key] assert previous.pos_marker violations.append( LintResult( anchor=column_alias, description=( "Reuse of column alias " f"{column_alias.raw!r} from line " f"{previous.pos_marker.line_no}." ), ) ) else: # It's not, save it to check against others. used_aliases[_key] = clause_element return violations sqlfluff-3.4.2/src/sqlfluff/rules/aliasing/AL09.py000066400000000000000000000221051503426445100217170ustar00rootroot00000000000000"""Implementation of rule AL09.""" from sqlfluff.core.rules import BaseRule, LintFix, LintResult from sqlfluff.core.rules.base import EvalResultType from sqlfluff.core.rules.context import RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.functional import FunctionalContext, Segments, sp class Rule_AL09(BaseRule): """Column aliases should not alias to itself, i.e. self-alias. Renaming the column to itself is a redundant piece of SQL, which doesn't affect its functionality. This rule only applies when aliasing to an exact copy of the column reference (e.g. :code:`foo as foo` or :code:`"BAR" as "BAR"`, see note below on more complex examples). Aliases which effectively change the casing of an identifier are still allowed. .. note:: This rule works in conjunction with :sqlfluff:ref:`references.quoting` (:sqlfluff:ref:`RF06`) and :sqlfluff:ref:`capitalisation.identifiers` (:sqlfluff:ref:`CP02`) to handle self aliases with mixed quoting and casing. In the situation that these two rules are not enabled then this rule will only fix the strict case where the quoting and casing of the alias and reference are the same. If those two rules are enabled, the fixes applied may result in a situation where this rule can kick in as a secondary effect. For example this :ref:`snowflake_dialect_ref` query: .. code-block:: sql -- Original Query. AL09 will not trigger because casing and -- quoting are different. RF06 will however fix the unnecessary -- quoting of "COL". SELECT "COL" AS col FROM table; -- After RF06, the query will look like this, at which point -- CP02 will see the inconsistent capitalisation. Depending -- on the configuration it will change one of the identifiers. -- Let's assume the default configuration of "consistent". SELECT COL AS col FROM table; -- After CP02, the alias and the reference will be the same -- and at this point AL09 can take over and remove the alias. SELECT COL AS COL FROM table; -- ..resulting in: SELECT COL FROM table; This interdependence between the rules, and the configuration options offered by each one means a variety of outcomes can be achieved by enabling and disabling each one. See :ref:`ruleselection` and :ref:`ruleconfig` for more details. **Anti-pattern** Aliasing the column to itself, where not necessary for changing the case of an identifier. .. code-block:: sql SELECT col AS col, "Col" AS "Col", COL AS col FROM table; **Best practice** Not to use alias to rename the column to its original name. Self-aliasing leads to redundant code without changing any functionality, unless used to effectively change the case of the identifier. .. code-block:: sql SELECT col, "Col" COL, FROM table; -- Re-casing aliasing is still allowed where necessary, i.e. SELECT col as "Col", "col" as "COL" FROM table; """ name = "aliasing.self_alias.column" groups = ("all", "core", "aliasing") crawl_behaviour = SegmentSeekerCrawler({"select_clause"}) is_fix_compatible = True def _eval(self, context: RuleContext) -> EvalResultType: """Find self-aliased columns and fix them. Checks the alias in the `SELECT` clause and see if the alias identifier is same as the column identifier (self-alias). If the column is self-aliased, then the `AS` keyword, whitespaces and alias identifier is removed as part of the fix. For example: `col_a as col_a,` is fixed to `col_a,` """ assert context.segment.is_type("select_clause") violations = [] children: Segments = FunctionalContext(context).segment.children() for clause_element in children.select(sp.is_type("select_clause_element")): clause_element_raw_segments = ( clause_element.get_raw_segments() ) # col_a as col_a column = clause_element.get_child("column_reference") # `col_a` alias_expression = clause_element.get_child( "alias_expression" ) # `as col_a` # We're only interested in direct aliasing of columns (i.e. not # and expression), so if that isn't the case, move on. if not (alias_expression and column): continue # The column needs to be a naked_identifier or quoted_identifier # (not positional identifier like $n in snowflake). # Move on if not. Some column references have multiple elements # (e.g. my_table.my_column), so only fetch the last available. _column_elements = column.get_children( "naked_identifier", "quoted_identifier" ) if not _column_elements: # pragma: no cover continue column_identifier = _column_elements[-1] # Fetch the whitespace between the reference and the alias. whitespace = clause_element.get_child("whitespace") # ` ` # The alias can be the naked_identifier or the quoted_identifier alias_identifier = alias_expression.get_child( "naked_identifier", "quoted_identifier" ) # if we do not have an alias identifier we can continue if not alias_identifier: # pragma: no cover continue alias_keyword_raw = getattr( alias_expression.get_child("alias_operator"), "raw", None ) # If the alias keyword is '=', then no whitespace have to be present # between the alias_keyword and the alias_identifier if alias_keyword_raw != "=": if not (whitespace and alias_identifier): # pragma: no cover # We *should* expect all of these to be non-null, but some bug # reports suggest that that isn't always the case for some # dialects. In those cases, log a warning here, but don't # flag it as a linting issue. Hopefully this will help # better bug reports in future. self.logger.warning( "AL09 found an unexpected syntax in an alias expression. " "Unable to determine if this is a self-alias. Please " "report this as a bug on GitHub.\n\n" f"Debug details: dialect: {context.dialect.name}, " f"whitespace: {whitespace is not None}, " f"alias_identifier: {alias_identifier is not None}, " f"alias_expression: {clause_element.raw!r}." ) continue case_sensitive_dialects = ["clickhouse"] # We compare the _exact_ raw value of the column identifier # and the alias identifier (i.e. including quoting and casing). # Resolving aliases & references with differing quoting and casing # should be done in conjunction with RF06 & CP02 (see docstring). if column_identifier.raw == alias_identifier.raw: fixes: list[LintFix] = [] if whitespace is not None: fixes.append(LintFix.delete(whitespace)) fixes.append(LintFix.delete(alias_expression)) violations.append( LintResult( anchor=clause_element_raw_segments[0], description="Column should not be self-aliased.", fixes=fixes, ) ) # If *both* are unquoted, and we're in a dialect which isn't case # sensitive for unquoted identifiers, then flag an error but don't # suggest a fix. It's ambiguous about what the users intent was: # i.e. did they mean to change the case (and so the correct # resolution is quoting), or did they mistakenly add an unnecessary # alias? elif ( context.dialect.name not in case_sensitive_dialects and column_identifier.is_type("naked_identifier") and alias_identifier is not None and alias_identifier.is_type("naked_identifier") and column_identifier.raw_upper == alias_identifier.raw_upper ): violations.append( LintResult( anchor=clause_element_raw_segments[0], description=( "Ambiguous self alias. Either remove unnecessary " "alias, or quote alias/reference to make case " "change explicit." ), ) ) return violations or None sqlfluff-3.4.2/src/sqlfluff/rules/aliasing/__init__.py000066400000000000000000000044741503426445100230220ustar00rootroot00000000000000"""The aliasing plugin bundle.""" from sqlfluff.core.plugin import hookimpl from sqlfluff.core.rules import BaseRule, ConfigInfo @hookimpl def get_configs_info() -> dict[str, ConfigInfo]: """Get additional rule config validations and descriptions.""" return { "aliasing": { "validation": ["implicit", "explicit"], "definition": ( "Should alias have an explicit AS or is implicit aliasing required?" ), }, "allow_scalar": { "validation": [True, False], "definition": ( "Whether or not to allow a single element in the " " select clause to be without an alias." ), }, "alias_case_check": { "validation": [ "dialect", "case_insensitive", "quoted_cs_naked_upper", "quoted_cs_naked_lower", "case_sensitive", ], "definition": "How to handle comparison casefolding in an alias.", }, "min_alias_length": { "validation": range(1000), "definition": ( "The minimum length of an alias to allow without raising a violation." ), }, "max_alias_length": { "validation": range(1000), "definition": ( "The maximum length of an alias to allow without raising a violation." ), }, } @hookimpl def get_rules() -> list[type[BaseRule]]: """Get plugin rules. NOTE: Rules are imported only on fetch to manage import times when rules aren't used. """ from sqlfluff.rules.aliasing.AL01 import Rule_AL01 from sqlfluff.rules.aliasing.AL02 import Rule_AL02 from sqlfluff.rules.aliasing.AL03 import Rule_AL03 from sqlfluff.rules.aliasing.AL04 import Rule_AL04 from sqlfluff.rules.aliasing.AL05 import Rule_AL05 from sqlfluff.rules.aliasing.AL06 import Rule_AL06 from sqlfluff.rules.aliasing.AL07 import Rule_AL07 from sqlfluff.rules.aliasing.AL08 import Rule_AL08 from sqlfluff.rules.aliasing.AL09 import Rule_AL09 return [ Rule_AL01, Rule_AL02, Rule_AL03, Rule_AL04, Rule_AL05, Rule_AL06, Rule_AL07, Rule_AL08, Rule_AL09, ] sqlfluff-3.4.2/src/sqlfluff/rules/ambiguous/000077500000000000000000000000001503426445100211045ustar00rootroot00000000000000sqlfluff-3.4.2/src/sqlfluff/rules/ambiguous/AM01.py000066400000000000000000000036161503426445100221220ustar00rootroot00000000000000"""Implementation of Rule AM01.""" from typing import Optional from sqlfluff.core.rules import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.functional import FunctionalContext, sp class Rule_AM01(BaseRule): """Ambiguous use of ``DISTINCT`` in a ``SELECT`` statement with ``GROUP BY``. When using ``GROUP BY`` a `DISTINCT`` clause should not be necessary as every non-distinct ``SELECT`` clause must be included in the ``GROUP BY`` clause. **Anti-pattern** ``DISTINCT`` and ``GROUP BY`` are conflicting. .. code-block:: sql SELECT DISTINCT a FROM foo GROUP BY a **Best practice** Remove ``DISTINCT`` or ``GROUP BY``. In our case, removing ``GROUP BY`` is better. .. code-block:: sql SELECT DISTINCT a FROM foo """ name = "ambiguous.distinct" aliases = ("L021",) groups: tuple[str, ...] = ("all", "core", "ambiguous") crawl_behaviour = SegmentSeekerCrawler({"select_statement"}) def _eval(self, context: RuleContext) -> Optional[LintResult]: """Ambiguous use of DISTINCT in select statement with GROUP BY.""" segment = FunctionalContext(context).segment # We know it's a select_statement from the seeker crawler assert segment.all(sp.is_type("select_statement")) # Do we have a group by clause if segment.children(sp.is_type("groupby_clause")): # Do we have the "DISTINCT" keyword in the select clause distinct = ( segment.children(sp.is_type("select_clause")) .children(sp.is_type("select_clause_modifier")) .children(sp.is_type("keyword")) .select(sp.is_keyword("distinct")) ) if distinct: return LintResult(anchor=distinct[0]) return None sqlfluff-3.4.2/src/sqlfluff/rules/ambiguous/AM02.py000066400000000000000000000065701503426445100221250ustar00rootroot00000000000000"""Implementation of Rule AM02.""" from sqlfluff.core.parser import KeywordSegment, WhitespaceSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler class Rule_AM02(BaseRule): """``UNION [DISTINCT|ALL]`` is preferred over just ``UNION``. .. note:: This rule is only enabled for dialects that support ``UNION`` and ``UNION DISTINCT`` (``ansi``, ``bigquery``, ``clickhouse``, ``databricks``, ``db2``, ``hive``, ``mysql``, ``redshift``, ``snowflake``, and ``trino``). **Anti-pattern** In this example, ``UNION DISTINCT`` should be preferred over ``UNION``, because explicit is better than implicit. .. code-block:: sql SELECT a, b FROM table_1 UNION SELECT a, b FROM table_2 **Best practice** Specify ``DISTINCT`` or ``ALL`` after ``UNION`` (note that ``DISTINCT`` is the default behavior). .. code-block:: sql SELECT a, b FROM table_1 UNION DISTINCT SELECT a, b FROM table_2 """ name = "ambiguous.union" aliases = ("L033",) groups: tuple[str, ...] = ("all", "core", "ambiguous") crawl_behaviour = SegmentSeekerCrawler({"set_operator"}) is_fix_compatible = True def _eval(self, context: RuleContext) -> LintResult: """Look for UNION keyword not immediately followed by DISTINCT or ALL. Note that UNION DISTINCT is valid, rule only applies to bare UNION. The function does this by looking for a segment of type set_operator which has a UNION but no DISTINCT or ALL. Note only some dialects have concept of UNION DISTINCT, so rule is only applied to dialects that are known to support this syntax. """ if context.dialect.name not in [ "ansi", "bigquery", "clickhouse", "databricks", "db2", "hive", "mysql", "redshift", "snowflake", "trino", ]: return LintResult() assert context.segment.is_type("set_operator") if "union" in context.segment.raw and not ( "ALL" in context.segment.raw.upper() or "DISTINCT" in context.segment.raw.upper() ): return LintResult( anchor=context.segment, fixes=[ LintFix.replace( context.segment.segments[0], [ KeywordSegment("union"), WhitespaceSegment(), KeywordSegment("distinct"), ], ) ], ) elif "UNION" in context.segment.raw.upper() and not ( "ALL" in context.segment.raw.upper() or "DISTINCT" in context.segment.raw.upper() ): return LintResult( anchor=context.segment, fixes=[ LintFix.replace( context.segment.segments[0], [ KeywordSegment("UNION"), WhitespaceSegment(), KeywordSegment("DISTINCT"), ], ) ], ) return LintResult() sqlfluff-3.4.2/src/sqlfluff/rules/ambiguous/AM03.py000066400000000000000000000073741503426445100221310ustar00rootroot00000000000000"""Implementation of Rule AM03.""" from typing import NamedTuple, Optional from sqlfluff.core.parser import BaseSegment, KeywordSegment, WhitespaceSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler class OrderByColumnInfo(NamedTuple): """For AM03, segment that ends an ORDER BY column and any order provided.""" column_reference: BaseSegment order: Optional[str] # One of 'ASC'/'DESC'/None class Rule_AM03(BaseRule): """Ambiguous ordering directions for columns in order by clause. **Anti-pattern** .. code-block:: sql SELECT a, b FROM foo ORDER BY a, b DESC **Best practice** If any columns in the ``ORDER BY`` clause specify ``ASC`` or ``DESC``, they should all do so. .. code-block:: sql SELECT a, b FROM foo ORDER BY a ASC, b DESC """ name = "ambiguous.order_by" aliases = ("L037",) groups: tuple[str, ...] = ("all", "ambiguous") crawl_behaviour = SegmentSeekerCrawler({"orderby_clause"}) is_fix_compatible = True @staticmethod def _get_orderby_info(segment: BaseSegment) -> list[OrderByColumnInfo]: assert segment.is_type("orderby_clause") result = [] column_reference = None ordering_reference = None for child_segment in segment.segments: if child_segment.is_type("column_reference"): column_reference = child_segment elif child_segment.is_type("keyword") and child_segment.raw_upper in ( "ASC", "DESC", ): ordering_reference = child_segment.raw_upper if column_reference and child_segment.raw == ",": result.append( OrderByColumnInfo( column_reference=column_reference, order=ordering_reference ) ) # Reset findings column_reference = None ordering_reference = None # Special handling for last column if column_reference: result.append( OrderByColumnInfo( column_reference=column_reference, order=ordering_reference ) ) return result def _eval(self, context: RuleContext) -> Optional[list[LintResult]]: """Ambiguous ordering directions for columns in order by clause. This rule checks if some ORDER BY columns explicitly specify ASC or DESC and some don't. """ # We only trigger on orderby_clause lint_fixes = [] orderby_spec = self._get_orderby_info(context.segment) order_types = {o.order for o in orderby_spec} # If ALL columns or NO columns explicitly specify ASC/DESC, all is # well. if None not in order_types or order_types == {None}: return None # There's a mix of explicit and default sort order. Make everything # explicit. for col_info in orderby_spec: if not col_info.order: # Since ASC is default in SQL, add in ASC for fix lint_fixes.append( LintFix.create_after( col_info.column_reference, [WhitespaceSegment(), KeywordSegment("ASC")], ) ) return [ LintResult( anchor=context.segment, fixes=lint_fixes, description=( "Ambiguous order by clause. Order by clauses should specify " "order direction for ALL columns or NO columns." ), ) ] sqlfluff-3.4.2/src/sqlfluff/rules/ambiguous/AM04.py000066400000000000000000000140311503426445100221160ustar00rootroot00000000000000"""Implementation of Rule AM04.""" from typing import Optional from sqlfluff.core.parser import BaseSegment from sqlfluff.core.rules import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.analysis.query import Query _START_TYPES = ["select_statement", "set_expression", "with_compound_statement"] class RuleFailure(Exception): """Exception class for reporting lint failure inside deeply nested code.""" def __init__(self, anchor: BaseSegment): self.anchor: BaseSegment = anchor class Rule_AM04(BaseRule): """Query produces an unknown number of result columns. **Anti-pattern** Querying all columns using ``*`` produces a query result where the number or ordering of columns changes if the upstream table's schema changes. This should generally be avoided because it can cause slow performance, cause important schema changes to go undetected, or break production code. For example: * If a query does ``SELECT t.*`` and is expected to return columns ``a``, ``b``, and ``c``, the actual columns returned will be wrong/different if columns are added to or deleted from the input table. * ``UNION`` and ``DIFFERENCE`` clauses require the inputs have the same number of columns (and compatible types). * ``JOIN`` queries may break due to new column name conflicts, e.g. the query references a column ``c`` which initially existed in only one input table but a column of the same name is added to another table. * ``CREATE TABLE (<>) AS SELECT *`` .. code-block:: sql WITH cte AS ( SELECT * FROM foo ) SELECT * FROM cte UNION SELECT a, b FROM t **Best practice** Somewhere along the "path" to the source data, specify columns explicitly. .. code-block:: sql WITH cte AS ( SELECT * FROM foo ) SELECT a, b FROM cte UNION SELECT a, b FROM t """ name = "ambiguous.column_count" aliases = ("L044",) groups: tuple[str, ...] = ("all", "ambiguous") # Only evaluate the outermost query. crawl_behaviour = SegmentSeekerCrawler(set(_START_TYPES), allow_recurse=False) def _handle_alias(self, selectable, alias_info, query) -> None: select_info_target = next( query.crawl_sources(alias_info.from_expression_element, True) ) if isinstance(select_info_target, str): # It's an alias to an external table whose # number of columns could vary without our # knowledge. Thus, warn. self.logger.debug( f"Query target {select_info_target} is external. Generating warning." ) raise RuleFailure(selectable.selectable) else: # Handle nested SELECT. self._analyze_result_columns(select_info_target) def _analyze_result_columns(self, query: Query) -> None: """Given info on a list of SELECTs, determine whether to warn.""" # Recursively walk from the given query (select_info_list) to any # wildcard columns in the select targets. If every wildcard evdentually # resolves to a query without wildcards, all is well. Otherwise, warn. if not query.selectables: return None # pragma: no cover for selectable in query.selectables: self.logger.debug(f"Analyzing query: {selectable.selectable.raw}") for wildcard in selectable.get_wildcard_info(): if wildcard.tables: for wildcard_table in wildcard.tables: self.logger.debug( f"Wildcard: {wildcard.segment.raw} has target " "{wildcard_table}" ) # Is it an alias? alias_info = selectable.find_alias(wildcard_table) if alias_info: # Found the alias matching the wildcard. Recurse, # analyzing the query associated with that alias. self._handle_alias(selectable, alias_info, query) else: # Not an alias. Is it a CTE? cte = query.lookup_cte(wildcard_table) if cte: # Wildcard refers to a CTE. Analyze it. self._analyze_result_columns(cte) else: # Not CTE, not table alias. Presumably an # external table. Warn. self.logger.debug( f"Query target {wildcard_table} is external. " "Generating warning." ) raise RuleFailure(selectable.selectable) else: # No table was specified with the wildcard. Assume we're # querying from a nested select in FROM. for o in query.crawl_sources(query.selectables[0].selectable, True): if isinstance(o, Query): self._analyze_result_columns(o) return None self.logger.debug( f'Query target "{query.selectables[0].selectable.raw}" has no ' "targets. Generating warning." ) raise RuleFailure(query.selectables[0].selectable) def _eval(self, context: RuleContext) -> Optional[LintResult]: """Outermost query should produce known number of columns.""" query: Query = Query.from_segment(context.segment, context.dialect) try: # Begin analysis at the outer query. self._analyze_result_columns(query) return None except RuleFailure as e: return LintResult(anchor=e.anchor) sqlfluff-3.4.2/src/sqlfluff/rules/ambiguous/AM05.py000066400000000000000000000061241503426445100221230ustar00rootroot00000000000000"""Implementation of Rule AM05.""" from typing import Optional from sqlfluff.core.parser import KeywordSegment, WhitespaceSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler class Rule_AM05(BaseRule): """Join clauses should be fully qualified. By default this rule is configured to enforce fully qualified ``INNER JOIN`` clauses, but not ``[LEFT/RIGHT/FULL] OUTER JOIN``. If you prefer a stricter lint then this is configurable. **Anti-pattern** A join is used without specifying the **kind** of join. .. code-block:: sql :force: SELECT foo FROM bar JOIN baz; **Best practice** Use ``INNER JOIN`` rather than ``JOIN``. .. code-block:: sql :force: SELECT foo FROM bar INNER JOIN baz; """ name = "ambiguous.join" aliases = ("L051",) groups: tuple[str, ...] = ("all", "ambiguous") config_keywords = ["fully_qualify_join_types"] crawl_behaviour = SegmentSeekerCrawler({"join_clause"}) is_fix_compatible = True def _eval(self, context: RuleContext) -> Optional[LintResult]: """Fully qualify JOINs.""" # Config type hints self.fully_qualify_join_types: str # We are only interested in JOIN clauses. assert context.segment.is_type("join_clause") join_clause_keywords = [ segment for segment in context.segment.segments if segment.type == "keyword" ] # Identify LEFT/RIGHT/OUTER JOIN and if the next keyword is JOIN. if ( self.fully_qualify_join_types in ["outer", "both"] and join_clause_keywords[0].raw_upper in ["RIGHT", "LEFT", "FULL"] and join_clause_keywords[1].raw_upper == "JOIN" ): # Define basic-level OUTER capitalization based on JOIN outer_kw = ("outer", "OUTER")[join_clause_keywords[1].raw == "JOIN"] # Insert OUTER after LEFT/RIGHT/FULL return LintResult( context.segment.segments[0], fixes=[ LintFix.create_after( context.segment.segments[0], [WhitespaceSegment(), KeywordSegment(outer_kw)], ) ], ) # Identify lone JOIN by looking at first child segment. if ( self.fully_qualify_join_types in ["inner", "both"] and join_clause_keywords[0].raw_upper == "JOIN" ): # Define basic-level INNER capitalization based on JOIN inner_kw = ("inner", "INNER")[join_clause_keywords[0].raw == "JOIN"] # Replace lone JOIN with INNER JOIN. return LintResult( context.segment.segments[0], fixes=[ LintFix.create_before( context.segment.segments[0], [KeywordSegment(inner_kw), WhitespaceSegment()], ) ], ) return None sqlfluff-3.4.2/src/sqlfluff/rules/ambiguous/AM06.py000066400000000000000000000134311503426445100221230ustar00rootroot00000000000000"""Implementation of Rule AM06.""" from typing import Optional from sqlfluff.core.rules import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.functional import FunctionalContext, sp class Rule_AM06(BaseRule): """Inconsistent column references in ``GROUP BY/ORDER BY`` clauses. .. note:: ``ORDER BY`` clauses from ``WINDOW`` clauses are ignored by this rule. **Anti-pattern** A mix of implicit and explicit column references are used in a ``GROUP BY`` clause. .. code-block:: sql :force: SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY foo, 2; -- The same also applies to column -- references in ORDER BY clauses. SELECT foo, bar FROM fake_table ORDER BY 1, bar; **Best practice** Reference all ``GROUP BY``/``ORDER BY`` columns either by name or by position. .. code-block:: sql :force: -- GROUP BY: Explicit SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY foo, bar; -- ORDER BY: Explicit SELECT foo, bar FROM fake_table ORDER BY foo, bar; -- GROUP BY: Implicit SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY 1, 2; -- ORDER BY: Implicit SELECT foo, bar FROM fake_table ORDER BY 1, 2; """ name = "ambiguous.column_references" aliases = ("L054",) groups: tuple[str, ...] = ("all", "core", "ambiguous") config_keywords = ["group_by_and_order_by_style"] crawl_behaviour = SegmentSeekerCrawler( {"groupby_clause", "orderby_clause", "grouping_expression_list"} ) _ignore_types: list[str] = [ "withingroup_clause", "window_specification", "aggregate_order_by", ] def _eval(self, context: RuleContext) -> Optional[LintResult]: """Inconsistent column references in GROUP BY/ORDER BY clauses.""" # Config type hints self.group_by_and_order_by_style: str # We only care about GROUP BY/ORDER BY clauses. assert context.segment.is_type( "groupby_clause", "orderby_clause", "grouping_expression_list" ) # Ignore Windowing clauses if FunctionalContext(context).parent_stack.any(sp.is_type(*self._ignore_types)): return LintResult(memory=context.memory) # Ignore Array expressions in BigQuery # BigQuery doesn't support implicit ordering inside an array expression, # these aren't going to be caught by ignoring any of the listed types # above. if context.dialect.name == "bigquery" and FunctionalContext( context ).parent_stack.any(sp.is_type("array_expression")): return LintResult(memory=context.memory) # Look at child segments and map column references to either the implicit or # explicit category. # N.B. segment names are used as the numeric literal type is 'raw', so best to # be specific with the name. column_reference_category_map = { "column_reference": "explicit", "expression": "explicit", "numeric_literal": "implicit", } column_reference_category_set = { column_reference_category_map[segment.get_type()] for segment in context.segment.segments if segment.is_type(*column_reference_category_map.keys()) } # If there are no column references then just return if not column_reference_category_set: # pragma: no cover return LintResult(memory=context.memory) if self.group_by_and_order_by_style == "consistent": # If consistent naming then raise lint error if either: if len(column_reference_category_set) > 1: # 1. Both implicit and explicit column references are found in the same # clause. return LintResult( anchor=context.segment, memory=context.memory, ) else: # 2. A clause is found to contain column name references that # contradict the precedent set in earlier clauses. current_group_by_order_by_convention = ( column_reference_category_set.pop() ) prior_group_by_order_by_convention = context.memory.get( "prior_group_by_order_by_convention" ) if prior_group_by_order_by_convention and ( prior_group_by_order_by_convention != current_group_by_order_by_convention ): return LintResult( anchor=context.segment, memory=context.memory, ) context.memory["prior_group_by_order_by_convention"] = ( current_group_by_order_by_convention ) else: # If explicit or implicit naming then raise lint error # if the opposite reference type is detected. if any( category != self.group_by_and_order_by_style for category in column_reference_category_set ): return LintResult( anchor=context.segment, memory=context.memory, ) # Return memory for later clauses. return LintResult(memory=context.memory) sqlfluff-3.4.2/src/sqlfluff/rules/ambiguous/AM07.py000066400000000000000000000206411503426445100221250ustar00rootroot00000000000000"""Implementation of Rule AM07.""" from typing import Optional from sqlfluff.core.rules import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.analysis.query import Query, Selectable, WildcardInfo class Rule_AM07(BaseRule): """Queries within set query produce different numbers of columns. **Anti-pattern** When writing set expressions, all queries must return the same number of columns. .. code-block:: sql WITH cte AS ( SELECT a, b FROM foo ) SELECT * FROM cte UNION SELECT c, d, e FROM t **Best practice** Always specify columns when writing set queries and ensure that they all seleect same number of columns .. code-block:: sql WITH cte AS ( SELECT a, b FROM foo ) SELECT a, b FROM cte UNION SELECT c, d FROM t """ name = "ambiguous.set_columns" aliases = ("L068",) groups: tuple[str, ...] = ("all", "ambiguous") crawl_behaviour = SegmentSeekerCrawler({"set_expression"}, provide_raw_stack=True) def __resolve_wild_query( self, query: Query, ) -> tuple[int, bool]: """Attempt to resolve a full query which may contain wildcards. NOTE: This requires a ``Query`` as input rather than just a ``Selectable`` and will delegate to ``__resolve_selectable`` once any Selectables have been identified. This method is *not* called on the initial set expression as that is evaluated as a series of Selectables. This method is only called on any subqueries (which may themselves be SELECT, WITH or set expressions) found during the resolution of any wildcards. """ self.logger.debug("Resolving query of type %s", query.query_type) for s in query.selectables: self.logger.debug(" ...with selectable %r", s.selectable.raw) # if one of the source queries for a query within the set is a # set expression, just use the first query. If that first query isn't # reflective of the others, that will be caught when that segment # is processed. We'll know if we're in a set based on whether there # is more than one selectable. i.e. Just take the first selectable. return self.__resolve_selectable(query.selectables[0], query) def __resolve_selectable_wildcard( self, wildcard: WildcardInfo, selectable: Selectable, root_query: Query ) -> tuple[int, bool]: """Attempt to resolve a single wildcard (*) within a Selectable. NOTE: This means resolving the number of columns implied by a single *. This method would be run multiple times if there are multiple wildcards in a single selectable. """ resolved = True # If there is no table specified, it is likely a subquery. # Handle that first. if not wildcard.tables: # Crawl the Query looking for the subquery, probably in the FROM. for o in root_query.crawl_sources(selectable.selectable): if isinstance(o, Query): return self.__resolve_wild_query(o) # We should find one. This is not an expected path to be in. return 0, False # pragma: no cover # There might be multiple tables referenced in some wildcard cases. num_cols = 0 for wildcard_table in wildcard.tables: cte_name = wildcard_table # Get the AliasInfo for the table referenced in the wildcard # expression. alias_info = selectable.find_alias(wildcard_table) # attempt to resolve alias or table name to a cte if alias_info: # Crawl inside the FROM expression looking for something to # resolve to. select_info_target = next( root_query.crawl_sources(alias_info.from_expression_element) ) if isinstance(select_info_target, str): cte_name = select_info_target else: _cols, _resolved = self.__resolve_wild_query(select_info_target) num_cols += _cols resolved = resolved and _resolved continue cte = root_query.lookup_cte(cte_name) if cte: _cols, _resolved = self.__resolve_wild_query(cte) num_cols += _cols resolved = resolved and _resolved else: # Unable to resolve resolved = False return num_cols, resolved def __resolve_selectable( self, selectable: Selectable, root_query: Query ) -> tuple[int, bool]: """Resolve the number of columns in a single Selectable. The selectable may or may not have wildcard (*) expressions. If it does, we attempt to resolve them. """ self.logger.debug("Resolving selectable: %r", selectable.selectable.raw) assert selectable.select_info wildcard_info = selectable.get_wildcard_info() # Start with the number of non-wild columns. num_cols = len(selectable.select_info.select_targets) - len(wildcard_info) # If there's no wildcard, just count the columns and move on. if not wildcard_info: # if there is no wildcard in the query use the count of select targets self.logger.debug("Resolved N=%s: %r", num_cols, selectable.selectable.raw) return num_cols, True resolved = True # If the set query contains on or more wildcards, attempt to resolve it to a # list of select targets that can be counted. for wildcard in wildcard_info: _cols, _resolved = self.__resolve_selectable_wildcard( wildcard, selectable, root_query ) resolved = resolved and _resolved # Add on the number of columns which the wildcard resolves to. num_cols += _cols self.logger.debug( "%s N=%s: %r", "Resolved" if resolved else "Unresolved", num_cols, selectable.selectable.raw, ) return num_cols, resolved def _get_select_target_counts(self, query: Query) -> tuple[set[int], bool]: """Given a set expression, get the number of select targets in each query. We keep track of the number of columns in each selectable using a ``set``. Ideally at the end there is only one item in the set, showing that all selectables have the same size. Importantly we can't guarantee that we can always resolve any wildcards (*), so we also return a flag to indicate whether any present have been fully resolved. """ select_target_counts = set() resolved_wildcard = True for selectable in query.selectables: cnt, res = self.__resolve_selectable(selectable, query) if not res: resolved_wildcard = False select_target_counts.add(cnt) return select_target_counts, resolved_wildcard def _eval(self, context: RuleContext) -> Optional[LintResult]: """All queries in set expression should return the same number of columns.""" assert context.segment.is_type("set_expression") root = context.segment # Is the parent of the set expression a WITH expression? # NOTE: Backward slice to work outward. for parent in context.parent_stack[::-1]: if parent.is_type("with_compound_statement"): # If it is, work from there instead. root = parent break query: Query = Query.from_segment(root, dialect=context.dialect) set_segment_select_sizes, resolve_wildcard = self._get_select_target_counts( query ) self.logger.info( "Resolved select sizes (resolved wildcard: %s) : %s", resolve_wildcard, set_segment_select_sizes, ) # if queries had different select target counts # and all wildcards have been resolved; fail if len(set_segment_select_sizes) > 1 and resolve_wildcard: return LintResult(anchor=context.segment) return LintResult() sqlfluff-3.4.2/src/sqlfluff/rules/ambiguous/AM08.py000066400000000000000000000101011503426445100221140ustar00rootroot00000000000000"""Implementation of Rule AM08.""" from typing import Optional from sqlfluff.core.parser import BaseSegment from sqlfluff.core.rules import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler class Rule_AM08(BaseRule): """Implicit cross join detected. **Anti-pattern** Cross joins are valid, but rare in the wild - and more often created by mistake than on purpose. This rule catches situations where a cross join has been specified, but not explicitly and so the risk of a mistaken cross join is highly likely. .. code-block:: sql :force: SELECT foo FROM bar JOIN baz; **Best practice** Use CROSS JOIN. .. code-block:: sql :force: SELECT foo FROM bar CROSS JOIN baz; """ name = "ambiguous.join_condition" aliases = () groups: tuple[str, ...] = ("all", "ambiguous") crawl_behaviour = SegmentSeekerCrawler({"join_clause"}) is_fix_compatible = True def _eval(self, context: RuleContext) -> Optional[LintResult]: """Find joins without ON clause. Fix them into CROSS JOIN (if dialect allows it). """ if not self._cross_join_supported(context): # pragma: no cover # At the time of implementation, all dialects supports CROSS JOIN syntax. # Therefore, no cover is used on if statement. return None # We are only interested in JOIN clauses. join_clause = context.segment assert join_clause.is_type("join_clause") join_clause_keywords = [ seg for seg in join_clause.segments if seg.type == "keyword" ] if any( kw.raw_upper in ("CROSS", "POSITIONAL", "USING", "NATURAL") for kw in join_clause_keywords ): # If explicit CROSS JOIN is used, disregard lack of condition # If explicit POSITIONAL JOIN is used, disregard lack of condition # If explicit NATURAL JOIN is used, disregard lack of condition # If explicit JOIN USING is used, disregard lack of condition return None this_join_condition = join_clause.get_child("join_on_condition") if this_join_condition: # Join condition is present, no error reported. return None select_stmt = self._get_select_stmt(context.parent_stack) if select_stmt is None: # Do not emit this warning for JOIN in UPDATE or DELETE return None maybe_where_clause = select_stmt.get_child("where_clause") if maybe_where_clause: # See CV12 return None join_keywords = [kw for kw in join_clause_keywords if kw.raw_upper == "JOIN"] if len(join_keywords) != 1: # This can happen in T-SQL CROSS APPLY / OUTER APPLY return None # Skip if join is part of flattening logic maybe_from_expression_element = join_clause.get_child("from_expression_element") if maybe_from_expression_element: for ( function_name_identifier ) in maybe_from_expression_element.recursive_crawl( "function_name_identifier" ): if function_name_identifier.raw_upper == "UNNEST": return None return LintResult(join_clause) @staticmethod def _cross_join_supported(context: RuleContext) -> bool: return ( False or "CROSS" in context.dialect.sets("reserved_keywords") or "CROSS" in context.dialect.sets("unreserved_keywords") ) @staticmethod def _get_select_stmt(stack: tuple[BaseSegment, ...]) -> Optional[BaseSegment]: for seg in reversed(stack): if seg.is_type("select_statement"): return seg elif seg.is_type("update_statement", "delete_statement"): return None # According to grammar, this is not reachable. # Do not emit any error instead of crashing. return None # pragma: no cover sqlfluff-3.4.2/src/sqlfluff/rules/ambiguous/__init__.py000066400000000000000000000031221503426445100232130ustar00rootroot00000000000000"""The ambiguous plugin bundle. NOTE: Yes the title of this bundle is ...ambiguous. 😁 """ from sqlfluff.core.plugin import hookimpl from sqlfluff.core.rules import BaseRule, ConfigInfo @hookimpl def get_configs_info() -> dict[str, ConfigInfo]: """Get additional rule config validations and descriptions.""" return { "fully_qualify_join_types": { "validation": ["inner", "outer", "both"], "definition": ("Which types of JOIN clauses should be fully qualified?"), }, "group_by_and_order_by_style": { "validation": ["consistent", "implicit", "explicit"], "definition": ( "The expectation for using explicit column name references " "or implicit positional references." ), }, } @hookimpl def get_rules() -> list[type[BaseRule]]: """Get plugin rules. NOTE: Rules are imported only on fetch to manage import times when rules aren't used. """ from sqlfluff.rules.ambiguous.AM01 import Rule_AM01 from sqlfluff.rules.ambiguous.AM02 import Rule_AM02 from sqlfluff.rules.ambiguous.AM03 import Rule_AM03 from sqlfluff.rules.ambiguous.AM04 import Rule_AM04 from sqlfluff.rules.ambiguous.AM05 import Rule_AM05 from sqlfluff.rules.ambiguous.AM06 import Rule_AM06 from sqlfluff.rules.ambiguous.AM07 import Rule_AM07 from sqlfluff.rules.ambiguous.AM08 import Rule_AM08 return [ Rule_AM01, Rule_AM02, Rule_AM03, Rule_AM04, Rule_AM05, Rule_AM06, Rule_AM07, Rule_AM08, ] sqlfluff-3.4.2/src/sqlfluff/rules/capitalisation/000077500000000000000000000000001503426445100221155ustar00rootroot00000000000000sqlfluff-3.4.2/src/sqlfluff/rules/capitalisation/CP01.py000066400000000000000000000300061503426445100231310ustar00rootroot00000000000000"""Implementation of Rule CP01.""" from typing import Optional import regex from sqlfluff.core.parser import BaseSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.config_info import get_config_info from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler def is_capitalizable(character: str) -> bool: """Does the character have differing lower and upper-case versions?""" if character.lower() == character.upper(): return False return True class Rule_CP01(BaseRule): """Inconsistent capitalisation of keywords. **Anti-pattern** In this example, ``select`` is in lower-case whereas ``FROM`` is in upper-case. .. code-block:: sql select a FROM foo **Best practice** Make all keywords either in upper-case or in lower-case. .. code-block:: sql SELECT a FROM foo -- Also good select a from foo """ name = "capitalisation.keywords" aliases = ("L010",) groups: tuple[str, ...] = ("all", "core", "capitalisation") is_fix_compatible = True lint_phase = "post" # Binary operators behave like keywords too. crawl_behaviour = SegmentSeekerCrawler({"keyword", "binary_operator", "date_part"}) # Skip literals (which are also keywords) as they have their own rule (CP04) _exclude_types: tuple[str, ...] = ("literal",) _exclude_parent_types: tuple[str, ...] = ( "data_type", "datetime_type_identifier", "primitive_type", ) config_keywords = ["capitalisation_policy", "ignore_words", "ignore_words_regex"] # Human readable target elem for description _description_elem = "Keywords" def _eval(self, context: RuleContext) -> Optional[list[LintResult]]: """Inconsistent capitalisation of keywords. We use the `memory` feature here to keep track of cases known to be INconsistent with what we've seen so far as well as the top choice for what the possible case is. """ # NOTE: Given the dialect structure we can assume the targets have a parent. parent: BaseSegment = context.parent_stack[-1] if context.segment.is_type(*self._exclude_types) or parent.is_type( *self._exclude_parent_types ): return [LintResult(memory=context.memory)] # Used by CP03 (that inherits from this rule) # If it's a qualified function_name (i.e with more than one part to # function_name). Then it is likely an existing user defined function (UDF) # which are case sensitive so ignore for this. if parent.get_type() == "function_name" and len(parent.segments) != 1: return [LintResult(memory=context.memory)] return [self._handle_segment(context.segment, context)] def _handle_segment(self, segment: BaseSegment, context: RuleContext) -> LintResult: # NOTE: this mutates the memory field. memory = context.memory self.logger.info("_handle_segment: %s, %s", segment, segment.get_type()) # Config type hints self.ignore_words_regex: str # Get the capitalisation policy configuration. try: cap_policy = self.cap_policy cap_policy_opts = self.cap_policy_opts ignore_words_list = self.ignore_words_list ignore_templated_areas = self.ignore_templated_areas except AttributeError: # First-time only, read the settings from configuration. This is # very slow. ( cap_policy, cap_policy_opts, ignore_words_list, ignore_templated_areas, ) = self._init_capitalisation_policy(context) # Skip if in ignore list if ignore_words_list and segment.raw.lower() in ignore_words_list: return LintResult(memory=memory) # Skip if matches ignore regex if self.ignore_words_regex and regex.search( self.ignore_words_regex, segment.raw ): return LintResult(memory=memory) # Skip if templated. If the user wants to ignore templated areas, we don't # even want to look at them to avoid affecting flagging non-template areas # that are inconsistent with the template areas. if segment.is_templated and ignore_templated_areas: return LintResult(memory=memory) # Skip if empty. if not segment.raw: return LintResult(memory=memory) refuted_cases = memory.get("refuted_cases", set()) # Which cases are definitely inconsistent with the segment? first_letter_is_lowercase = False for character in segment.raw: if is_capitalizable(character): first_letter_is_lowercase = character != character.upper() break # We refute inference of camel, pascal, and snake case. # snake, if not explicitly set, can be destructive to # variable names, adding underscores. # camel and Pascal could allow poorly linted code in, # so must be explicitly chosen. refuted_cases.update(["camel", "pascal", "snake"]) if first_letter_is_lowercase: refuted_cases.update(["upper", "capitalise"]) if segment.raw != segment.raw.lower(): refuted_cases.update(["lower"]) else: refuted_cases.update(["lower"]) if segment.raw != segment.raw.upper(): refuted_cases.update(["upper"]) if segment.raw != segment.raw.capitalize(): refuted_cases.update(["capitalise"]) # Update the memory memory["refuted_cases"] = refuted_cases self.logger.debug( f"Refuted cases after segment '{segment.raw}': {refuted_cases}" ) # Skip if no inconsistencies, otherwise compute a concrete policy # to convert to. if cap_policy == "consistent": possible_cases = [c for c in cap_policy_opts if c not in refuted_cases] self.logger.debug( f"Possible cases after segment '{segment.raw}': {possible_cases}" ) if possible_cases: # Save the latest possible case and skip memory["latest_possible_case"] = possible_cases[0] self.logger.debug( f"Consistent capitalization, returning with memory: {memory}" ) return LintResult(memory=memory) else: concrete_policy = memory.get("latest_possible_case", "upper") self.logger.debug( f"Getting concrete policy '{concrete_policy}' from memory" ) else: if cap_policy not in refuted_cases: # Skip self.logger.debug( f"Consistent capitalization {cap_policy}, returning with " f"memory: {memory}" ) return LintResult(memory=memory) else: concrete_policy = cap_policy self.logger.debug( f"Setting concrete policy '{concrete_policy}' from cap_policy" ) # Set the fixed to same as initial in case any of below don't match fixed_raw = segment.raw # We need to change the segment to match the concrete policy if concrete_policy in ["upper", "lower", "capitalise"]: if concrete_policy == "upper": fixed_raw = fixed_raw.upper() elif concrete_policy == "lower": fixed_raw = fixed_raw.lower() elif concrete_policy == "capitalise": fixed_raw = fixed_raw.capitalize() elif concrete_policy == "pascal": # For Pascal we set the first letter in each "word" to uppercase # We do not lowercase other letters to allow for PascalCase style # words. This does mean we allow all UPPERCASE and also don't # correct Pascalcase to PascalCase, but there's only so much we can # do. We do correct underscore_words to Underscore_Words. fixed_raw = regex.sub( "([^a-zA-Z0-9]+|^)([a-zA-Z0-9])([a-zA-Z0-9]*)", lambda match: match.group(1) + match.group(2).upper() + match.group(3), segment.raw, ) elif concrete_policy == "camel": # Similar to Pascal, for Camel, we can only do a best efforts approach. # This presents as us never changing case mid-string. fixed_raw = regex.sub( "([^a-zA-Z0-9]+|^)([a-zA-Z0-9])([a-zA-Z0-9]*)", lambda match: match.group(1) + match.group(2).lower() + match.group(3), segment.raw, ) elif concrete_policy == "snake": if segment.raw.isupper(): fixed_raw = segment.raw.lower() else: fixed_raw = regex.sub( r"(?<=[a-z0-9])([A-Z])|(?<=[A-Za-z])([0-9])|(?<=[0-9])([A-Za-z])", lambda match: "_" + match.group(), segment.raw, ).lower() if fixed_raw == segment.raw: # No need to fix self.logger.debug( f"Capitalisation of segment '{segment.raw}' already OK with " f"policy '{concrete_policy}', returning with memory {memory}" ) return LintResult(memory=memory) else: # build description based on the policy in use consistency = "consistently " if cap_policy == "consistent" else "" if concrete_policy in ["upper", "lower", "pascal", "camel", "snake"]: policy = f"{concrete_policy} case." elif concrete_policy == "capitalise": policy = "capitalised." # Return the fixed segment self.logger.debug( f"INCONSISTENT Capitalisation of segment '{segment.raw}', " f"fixing to '{fixed_raw}' and returning with memory {memory}" ) return LintResult( anchor=segment, fixes=[self._get_fix(segment, fixed_raw)], memory=memory, description=f"{self._description_elem} must be {consistency}{policy}", ) def _get_fix(self, segment: BaseSegment, fixed_raw: str) -> LintFix: """Given a segment found to have a fix, returns a LintFix for it. May be overridden by subclasses, which is useful when the parse tree structure varies from this simple base case. """ return LintFix.replace(segment, [segment.edit(fixed_raw)]) def _init_capitalisation_policy(self, context: RuleContext): """Called first time rule is evaluated to fetch & cache the policy.""" cap_policy_name = next( k for k in self.config_keywords if k.endswith("capitalisation_policy") ) self.cap_policy = getattr(self, cap_policy_name) valid_options = get_config_info()[cap_policy_name]["validation"] or [] self.cap_policy_opts = [opt for opt in valid_options if opt != "consistent"] # Use str() as CP04 uses bools which might otherwise be read as bool ignore_words_config = str(getattr(self, "ignore_words")) if ignore_words_config and ignore_words_config != "None": self.ignore_words_list = self.split_comma_separated_string( ignore_words_config.lower() ) else: self.ignore_words_list = [] self.ignore_templated_areas = context.config.get("ignore_templated_areas") self.logger.debug( f"Selected '{cap_policy_name}': '{self.cap_policy}' from options " f"{self.cap_policy_opts}" ) cap_policy = self.cap_policy cap_policy_opts = self.cap_policy_opts ignore_words_list = self.ignore_words_list ignore_templated_areas = self.ignore_templated_areas return cap_policy, cap_policy_opts, ignore_words_list, ignore_templated_areas sqlfluff-3.4.2/src/sqlfluff/rules/capitalisation/CP02.py000066400000000000000000000071611503426445100231400ustar00rootroot00000000000000"""Implementation of Rule CP02.""" from typing import Optional from sqlfluff.core.rules import LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.rules.capitalisation.CP01 import Rule_CP01 from sqlfluff.utils.identifers import identifiers_policy_applicable class Rule_CP02(Rule_CP01): """Inconsistent capitalisation of unquoted identifiers. This rule applies to all unquoted identifiers, whether references or aliases, and whether they refer to columns or other objects (such as tables or schemas). .. note:: In **most** dialects, unquoted identifiers are treated as case-insensitive and so the fixes proposed by this rule do not change the interpretation of the query. **HOWEVER**, some databases (notably :ref:`bigquery_dialect_ref`, :ref:`trino_dialect_ref` and :ref:`clickhouse_dialect_ref`) do take the casing of *unquoted* identifiers into account when determining the casing of the column heading in the *result*. As this feature is only present in a few dialects, and not widely understood by users, we regard it as *an antipattern*. It is more widely understood that if the case of an identifier *matters*, then it should be quoted. If you, or your organisation, do wish to rely on this feature, we recommend that you disabled this rule (see :ref:`ruleselection`). **Anti-pattern** In this example, unquoted identifier ``a`` is in lower-case but ``B`` is in upper-case. .. code-block:: sql select a, B from foo In this more complicated example, there are a mix of capitalisations in both reference and aliases of columns and tables. That inconsistency is acceptable when those identifiers are quoted, but not when unquoted. .. code-block:: sql select col_1 + Col_2 as COL_3, "COL_4" as Col_5 from Foo as BAR **Best practice** Ensure all unquoted identifiers are either in upper-case or in lower-case. .. code-block:: sql select a, b from foo; -- ...also good... select A, B from foo; --- ...or for comparison with our more complex example, this too: select col_1 + col_2 as col_3, "COL_4" as col_5 from foo as bar """ name = "capitalisation.identifiers" aliases = ("L014",) is_fix_compatible = True crawl_behaviour = SegmentSeekerCrawler( {"naked_identifier", "properties_naked_identifier"} ) config_keywords = [ "extended_capitalisation_policy", "unquoted_identifiers_policy", "ignore_words", "ignore_words_regex", ] _description_elem = "Unquoted identifiers" def _eval(self, context: RuleContext) -> Optional[list[LintResult]]: # Return None if identifier is case-sensitive property to enable Change # Data Feed # https://docs.delta.io/2.0.0/delta-change-data-feed.html#enable-change-data-feed if ( context.dialect.name in ["databricks", "sparksql"] and context.parent_stack and context.parent_stack[-1].type == "property_name_identifier" and context.segment.raw == "enableChangeDataFeed" ): return None if identifiers_policy_applicable( self.unquoted_identifiers_policy, # type: ignore context.parent_stack, ): return super()._eval(context=context) else: return [LintResult(memory=context.memory)] sqlfluff-3.4.2/src/sqlfluff/rules/capitalisation/CP03.py000066400000000000000000000023671503426445100231440ustar00rootroot00000000000000"""Implementation of Rule CP03.""" from sqlfluff.core.parser.segments.base import BaseSegment from sqlfluff.core.rules import LintFix from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.rules.capitalisation.CP01 import Rule_CP01 class Rule_CP03(Rule_CP01): """Inconsistent capitalisation of function names. **Anti-pattern** In this example, the two ``SUM`` functions don't have the same capitalisation. .. code-block:: sql SELECT sum(a) AS aa, SUM(b) AS bb FROM foo **Best practice** Make the case consistent. .. code-block:: sql SELECT sum(a) AS aa, sum(b) AS bb FROM foo """ name = "capitalisation.functions" aliases = ("L030",) is_fix_compatible = True crawl_behaviour = SegmentSeekerCrawler( {"function_name_identifier", "bare_function"} ) _exclude_types = () _exclude_parent_types = () config_keywords = [ "extended_capitalisation_policy", "ignore_words", "ignore_words_regex", ] _description_elem = "Function names" def _get_fix(self, segment: BaseSegment, fixed_raw: str) -> LintFix: return super()._get_fix(segment, fixed_raw) sqlfluff-3.4.2/src/sqlfluff/rules/capitalisation/CP04.py000066400000000000000000000022041503426445100231330ustar00rootroot00000000000000"""Implementation of Rule CP04.""" from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.rules.capitalisation.CP01 import Rule_CP01 class Rule_CP04(Rule_CP01): """Inconsistent capitalisation of boolean/null literal. **Anti-pattern** In this example, ``null`` and ``false`` are in lower-case whereas ``TRUE`` is in upper-case. .. code-block:: sql select a, null, TRUE, false from foo **Best practice** Ensure all literal ``null``/``true``/``false`` literals are consistently upper or lower case .. code-block:: sql select a, NULL, TRUE, FALSE from foo -- Also good select a, null, true, false from foo """ name = "capitalisation.literals" aliases = ("L040",) is_fix_compatible = True crawl_behaviour = SegmentSeekerCrawler({"null_literal", "boolean_literal"}) _exclude_types = () _exclude_parent_types = () _description_elem = "Boolean/null literals" sqlfluff-3.4.2/src/sqlfluff/rules/capitalisation/CP05.py000066400000000000000000000060711503426445100231420ustar00rootroot00000000000000"""Implementation of Rule CP05.""" from sqlfluff.core.parser import BaseSegment from sqlfluff.core.rules.base import LintResult from sqlfluff.core.rules.context import RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.rules.capitalisation.CP01 import Rule_CP01 class Rule_CP05(Rule_CP01): """Inconsistent capitalisation of datatypes. **Anti-pattern** In this example, ``int`` and ``unsigned`` are in lower-case whereas ``VARCHAR`` is in upper-case. .. code-block:: sql CREATE TABLE t ( a int unsigned, b VARCHAR(15) ); **Best practice** Ensure all datatypes are consistently upper or lower case .. code-block:: sql CREATE TABLE t ( a INT UNSIGNED, b VARCHAR(15) ); """ name = "capitalisation.types" aliases = ("L063",) groups = ("all", "core", "capitalisation") is_fix_compatible = True crawl_behaviour = SegmentSeekerCrawler( { "data_type_identifier", "primitive_type", "datetime_type_identifier", "data_type", } ) # NOTE: CP05 overrides `_eval` and then only calls # `_handle_segment` from CP01. Setting `_exclude_types` # and `_exclude_parent_types` therefore has no effect. # They are set here to empty tuples to avoid confusion. _exclude_types = () _exclude_parent_types = () config_keywords = [ "extended_capitalisation_policy", "ignore_words", "ignore_words_regex", ] _description_elem = "Datatypes" def _eval(self, context: RuleContext) -> list[LintResult]: """Inconsistent capitalisation of datatypes. We use the `memory` feature here to keep track of cases known to be inconsistent with what we've seen so far as well as the top choice for what the possible case is. """ results = [] # For some of these segments we want to run the code on if context.segment.is_type( "primitive_type", "datetime_type_identifier", "data_type" ): for seg in context.segment.segments: # We don't want to edit symbols, quoted things or identifiers # if they appear. if seg.is_type( "symbol", "identifier", "quoted_literal" ) or not seg.is_type("raw"): continue res = self._handle_segment(seg, context) if res: results.append(res) # NOTE: Given the dialect structure we can assume the targets have a parent. parent: BaseSegment = context.parent_stack[-1] # Don't process it if it's likely to have been processed by the parent. if context.segment.is_type("data_type_identifier") and not parent.is_type( "primitive_type", "datetime_type_identifier", "data_type" ): results.append( self._handle_segment(context.segment, context) ) # pragma: no cover return results sqlfluff-3.4.2/src/sqlfluff/rules/capitalisation/__init__.py000066400000000000000000000037231503426445100242330ustar00rootroot00000000000000"""The capitalisation plugin bundle.""" from sqlfluff.core.plugin import hookimpl from sqlfluff.core.rules import BaseRule, ConfigInfo @hookimpl def get_configs_info() -> dict[str, ConfigInfo]: """Get additional rule config validations and descriptions.""" return { "capitalisation_policy": { "validation": ["consistent", "upper", "lower", "capitalise"], "definition": "The capitalisation policy to enforce.", }, "extended_capitalisation_policy": { "validation": [ "consistent", "upper", "lower", "pascal", "capitalise", "snake", "camel", ], "definition": ( "The capitalisation policy to enforce, extended with PascalCase, " "snake_case, and camelCase. " "This is separate from ``capitalisation_policy`` as it should not be " "applied to keywords." "Camel, Pascal, and Snake will never be inferred when the policy is " "set to consistent. This is because snake can cause destructive " "changes to the identifier, and unlinted code is too easily mistaken " "for camel and pascal. If, when set to consistent, no consistent " "case is found, it will default to upper." ), }, } @hookimpl def get_rules() -> list[type[BaseRule]]: """Get plugin rules. NOTE: Rules are imported only on fetch to manage import times when rules aren't used. """ from sqlfluff.rules.capitalisation.CP01 import Rule_CP01 from sqlfluff.rules.capitalisation.CP02 import Rule_CP02 from sqlfluff.rules.capitalisation.CP03 import Rule_CP03 from sqlfluff.rules.capitalisation.CP04 import Rule_CP04 from sqlfluff.rules.capitalisation.CP05 import Rule_CP05 return [Rule_CP01, Rule_CP02, Rule_CP03, Rule_CP04, Rule_CP05] sqlfluff-3.4.2/src/sqlfluff/rules/convention/000077500000000000000000000000001503426445100212735ustar00rootroot00000000000000sqlfluff-3.4.2/src/sqlfluff/rules/convention/CV01.py000066400000000000000000000061751503426445100223270ustar00rootroot00000000000000"""Implementation of Rule CV01.""" from typing import Optional from sqlfluff.core.parser import SymbolSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.functional import FunctionalContext, sp class Rule_CV01(BaseRule): """Consistent usage of ``!=`` or ``<>`` for "not equal to" operator. **Anti-pattern** .. code-block:: sql SELECT * FROM X WHERE 1 <> 2 AND 3 != 4; **Best practice** Ensure all "not equal to" comparisons are consistent, not mixing ``!=`` and ``<>``. .. code-block:: sql SELECT * FROM X WHERE 1 != 2 AND 3 != 4; """ name = "convention.not_equal" aliases = ("L061",) groups = ("all", "convention") crawl_behaviour = SegmentSeekerCrawler({"comparison_operator"}) config_keywords = ["preferred_not_equal_style"] is_fix_compatible = True def _eval(self, context: RuleContext) -> Optional[LintResult]: """Enforce consistent "not equal to" style.""" self.preferred_not_equal_style: str # Get the comparison operator children raw_comparison_operators = ( FunctionalContext(context) .segment.children() .select(select_if=sp.is_type("raw_comparison_operator")) ) # Only check ``<>`` or ``!=`` operators raw_operator_list = [r.raw for r in raw_comparison_operators] if raw_operator_list not in [["<", ">"], ["!", "="]]: return None memory = context.memory # If style is consistent, add the style of the first occurrence to memory if self.preferred_not_equal_style == "consistent": preferred_not_equal_style = context.memory.get("preferred_not_equal_style") if not preferred_not_equal_style: preferred_not_equal_style = ( "ansi" if raw_operator_list == ["<", ">"] else "c_style" ) memory["preferred_not_equal_style"] = preferred_not_equal_style else: preferred_not_equal_style = self.preferred_not_equal_style if preferred_not_equal_style == "c_style": replacement = ["!", "="] elif preferred_not_equal_style == "ansi": replacement = ["<", ">"] # This operator already matches the existing style if raw_operator_list == replacement: return LintResult(memory=memory) # Provide a fix and replace ``<>`` with ``!=`` # As each symbol is a separate symbol this is done in two steps: # Depending on style type, flip any inconsistent operators # 1. Flip < and ! # 2. Flip > and = fixes = [ LintFix.replace( raw_comparison_operators[0], [SymbolSegment(raw=replacement[0], type="raw_comparison_operator")], ), LintFix.replace( raw_comparison_operators[1], [SymbolSegment(raw=replacement[1], type="raw_comparison_operator")], ), ] return LintResult(anchor=context.segment, fixes=fixes, memory=memory) sqlfluff-3.4.2/src/sqlfluff/rules/convention/CV02.py000066400000000000000000000040221503426445100223150ustar00rootroot00000000000000"""Implementation of Rule CV02.""" from typing import Optional from sqlfluff.core.parser import WordSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler class Rule_CV02(BaseRule): """Use ``COALESCE`` instead of ``IFNULL`` or ``NVL``. **Anti-pattern** ``IFNULL`` or ``NVL`` are used to fill ``NULL`` values. .. code-block:: sql SELECT ifnull(foo, 0) AS bar, FROM baz; SELECT nvl(foo, 0) AS bar, FROM baz; **Best practice** Use ``COALESCE`` instead. ``COALESCE`` is universally supported, whereas Redshift doesn't support ``IFNULL`` and BigQuery doesn't support ``NVL``. Additionally, ``COALESCE`` is more flexible and accepts an arbitrary number of arguments. .. code-block:: sql SELECT coalesce(foo, 0) AS bar, FROM baz; """ name = "convention.coalesce" aliases = ("L060",) groups = ("all", "convention") crawl_behaviour = SegmentSeekerCrawler({"function_name_identifier"}) is_fix_compatible = True def _eval(self, context: RuleContext) -> Optional[LintResult]: """Use ``COALESCE`` instead of ``IFNULL`` or ``NVL``.""" # We only care about function names, and they should be the # only things we get assert context.segment.is_type("function_name_identifier") # Only care if the function is ``IFNULL`` or ``NVL``. if context.segment.raw_upper not in {"IFNULL", "NVL"}: return None # Create fix to replace ``IFNULL`` or ``NVL`` with ``COALESCE``. fix = LintFix.replace( context.segment, [ WordSegment( raw="COALESCE", type="function_name_identifier", ) ], ) return LintResult( anchor=context.segment, fixes=[fix], description=f"Use 'COALESCE' instead of '{context.segment.raw_upper}'.", ) sqlfluff-3.4.2/src/sqlfluff/rules/convention/CV03.py000066400000000000000000000105231503426445100223210ustar00rootroot00000000000000"""Implementation of Rule CV03.""" from typing import Optional from sqlfluff.core.parser import BaseSegment, SymbolSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.functional import FunctionalContext, sp class Rule_CV03(BaseRule): """Trailing commas within select clause. .. note:: For many database backends this is allowed. For some users this may be something they wish to enforce (in line with Python best practice). Many database backends regard this as a syntax error, and as such the `SQLFluff` default is to forbid trailing commas in the select clause. **Anti-pattern** .. code-block:: sql SELECT a, b, FROM foo **Best practice** .. code-block:: sql SELECT a, b FROM foo """ name = "convention.select_trailing_comma" aliases = ("L038",) groups = ("all", "core", "convention") config_keywords = ["select_clause_trailing_comma"] crawl_behaviour = SegmentSeekerCrawler({"select_clause"}) is_fix_compatible = True def _eval(self, context: RuleContext) -> Optional[LintResult]: """Trailing commas within select clause.""" # Config type hints self.select_clause_trailing_comma: str segment = FunctionalContext(context).segment children = segment.children() # Iterate content to find last element last_content: BaseSegment = children.last(sp.is_code())[0] # What mode are we in? if self.select_clause_trailing_comma == "forbid": # Is it a comma? if last_content.is_type("comma"): # The last content is a comma. Before we try and remove it, we # should check that it's safe. One edge case is that it's a trailing # comma in a loop, but that if we try and remove it, we also break # the previous examples. We should check that this comma doesn't # share a source position with any other commas in the same select. # If there isn't a source position, then it's safe to remove, it's # a recent addition. if not last_content.pos_marker: # pragma: no cover fixes = [LintFix.delete(last_content)] else: comma_pos = last_content.pos_marker.source_position() for seg in context.segment.segments: if seg.is_type("comma"): if not seg.pos_marker: # pragma: no cover continue elif seg.pos_marker.source_position() == comma_pos: if seg is not last_content: # Not safe to fix self.logger.info( "Preventing deletion of %s, because source " "position is the same as %s. Templated " "positions are %s and %s.", last_content, seg, last_content.pos_marker.templated_position(), seg.pos_marker.templated_position(), ) fixes = [] break else: # No matching commas found. It's safe. fixes = [LintFix.delete(last_content)] return LintResult( anchor=last_content, fixes=fixes, description="Trailing comma in select statement forbidden", ) elif self.select_clause_trailing_comma == "require": if not last_content.is_type("comma"): new_comma = SymbolSegment(",", type="comma") return LintResult( anchor=last_content, fixes=[LintFix.replace(last_content, [last_content, new_comma])], description="Trailing comma in select statement required", ) return None sqlfluff-3.4.2/src/sqlfluff/rules/convention/CV04.py000066400000000000000000000122241503426445100223220ustar00rootroot00000000000000"""Implementation of Rule CV04.""" from typing import Optional from sqlfluff.core.parser import LiteralSegment, RawSegment, SymbolSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.functional import FunctionalContext, sp class Rule_CV04(BaseRule): """Use consistent syntax to express "count number of rows". Note: If both ``prefer_count_1`` and ``prefer_count_0`` are set to true then ``prefer_count_1`` has precedence. ``COUNT(*)``, ``COUNT(1)``, and even ``COUNT(0)`` are equivalent syntaxes in many SQL engines due to optimizers interpreting these instructions as "count number of rows in result". The ANSI-92_ spec mentions the ``COUNT(*)`` syntax specifically as having a special meaning: If COUNT(*) is specified, then the result is the cardinality of T. So by default, `SQLFluff` enforces the consistent use of ``COUNT(*)``. If the SQL engine you work with, or your team, prefers ``COUNT(1)`` or ``COUNT(0)`` over ``COUNT(*)``, you can configure this rule to consistently enforce your preference. .. _ANSI-92: http://msdn.microsoft.com/en-us/library/ms175997.aspx **Anti-pattern** .. code-block:: sql select count(1) from table_a **Best practice** Use ``count(*)`` unless specified otherwise by config ``prefer_count_1``, or ``prefer_count_0`` as preferred. .. code-block:: sql select count(*) from table_a """ name = "convention.count_rows" aliases = ("L047",) groups = ("all", "core", "convention") config_keywords = ["prefer_count_1", "prefer_count_0"] crawl_behaviour = SegmentSeekerCrawler({"function"}) is_fix_compatible = True def _eval(self, context: RuleContext) -> Optional[LintResult]: """Find rule violations and provide fixes.""" # Config type hints self.prefer_count_0: bool self.prefer_count_1: bool new_segment: RawSegment # We already know we're in a function because of the crawl_behaviour. # This means it's very unlikely that there isn't a function_name here. function_name = context.segment.get_child("function_name") if not function_name: # pragma: no cover return None if function_name.raw_upper == "COUNT": # Get bracketed content f_content = ( FunctionalContext(context) .segment.children(sp.is_type("function_contents")) .children(sp.is_type("bracketed")) .children( sp.and_( sp.not_(sp.is_meta()), sp.not_( sp.is_type( "start_bracket", "end_bracket", "whitespace", "newline" ) ), ) ) ) if len(f_content) != 1: # pragma: no cover return None preferred = "*" if self.prefer_count_1: preferred = "1" elif self.prefer_count_0: preferred = "0" if f_content[0].is_type("star") and ( self.prefer_count_1 or self.prefer_count_0 ): new_segment = LiteralSegment(raw=preferred, type="numeric_literal") return LintResult( anchor=context.segment, fixes=[ LintFix.replace( f_content[0], [new_segment], ), ], ) if f_content[0].is_type("expression"): expression_content = [ seg for seg in f_content[0].segments if not seg.is_meta ] if ( len(expression_content) == 1 and expression_content[0].is_type("literal") and expression_content[0].raw in ["0", "1"] and expression_content[0].raw != preferred ): if preferred == "*": new_segment = SymbolSegment(raw=preferred, type="star") else: new_segment = LiteralSegment( raw=preferred, type="numeric_literal" ) return LintResult( anchor=context.segment, fixes=[ LintFix.replace( expression_content[0], [ expression_content[0].edit( expression_content[0].raw.replace( expression_content[0].raw, preferred ) ), ], ), ], ) return None sqlfluff-3.4.2/src/sqlfluff/rules/convention/CV05.py000066400000000000000000000076671503426445100223420ustar00rootroot00000000000000"""Implementation of Rule CV05.""" from typing import Optional, Union from sqlfluff.core.parser import KeywordSegment, WhitespaceSegment from sqlfluff.core.rules import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.functional import Segments, sp from sqlfluff.utils.reflow import ReflowSequence CorrectionListType = list[Union[WhitespaceSegment, KeywordSegment]] class Rule_CV05(BaseRule): """Comparisons with NULL should use "IS" or "IS NOT". **Anti-pattern** In this example, the ``=`` operator is used to check for ``NULL`` values. .. code-block:: sql SELECT a FROM foo WHERE a = NULL **Best practice** Use ``IS`` or ``IS NOT`` to check for ``NULL`` values. .. code-block:: sql SELECT a FROM foo WHERE a IS NULL """ name = "convention.is_null" aliases = ("L049",) groups = ("all", "core", "convention") crawl_behaviour = SegmentSeekerCrawler({"comparison_operator"}) is_fix_compatible = True def _eval(self, context: RuleContext) -> Optional[LintResult]: """Relational operators should not be used to check for NULL values.""" # Context/motivation for this rule: # https://news.ycombinator.com/item?id=28772289 # https://stackoverflow.com/questions/9581745/sql-is-null-and-null # Allow assignments in SET clauses if len(context.parent_stack) >= 2 and context.parent_stack[-2].is_type( "set_clause_list", "execute_script_statement", "options_segment" ): return None # Allow assignments in EXEC clauses, or any other explicit assignments if context.parent_stack and context.parent_stack[-1].is_type( "set_clause_list", "execute_script_statement", "assignment_operator" ): return None # If the operator is in an EXCLUDE constraint (PostgreSQL feature), the SQL # could look like: EXCLUDE (field WITH =). In that case, we can exit early # to avoid an assertion failure due to no segment following the operator. # Note that if the EXCLUDE is based on an expression, we will still be # checking that expression because it will be under a different child segment. if context.parent_stack and context.parent_stack[-1].is_type( "exclusion_constraint_element" ): return None # We only care about equality operators. if context.segment.raw not in ("=", "!=", "<>"): return None # We only care if it's followed by a NULL literal. siblings = Segments(*context.parent_stack[-1].segments) after_op_list = siblings.select(start_seg=context.segment) next_code = after_op_list.first(sp.is_code()) if not next_code.all(sp.is_type("null_literal")): return None sub_seg = next_code.get() assert sub_seg, "TypeGuard: Segment must exist" self.logger.debug( "Found NULL literal following equals/not equals @%s: %r", sub_seg.pos_marker, sub_seg.raw, ) edit = _create_base_is_null_sequence( is_upper=sub_seg.raw[0] == "N", operator_raw=context.segment.raw, ) return LintResult( anchor=context.segment, fixes=ReflowSequence.from_around_target( context.segment, context.parent_stack[0], config=context.config ) .replace(context.segment, edit) .respace() .get_fixes(), ) def _create_base_is_null_sequence( is_upper: bool, operator_raw: str, ) -> CorrectionListType: is_seg = KeywordSegment("IS" if is_upper else "is") not_seg = KeywordSegment("NOT" if is_upper else "not") if operator_raw == "=": return [is_seg] return [ is_seg, WhitespaceSegment(), not_seg, ] sqlfluff-3.4.2/src/sqlfluff/rules/convention/CV06.py000066400000000000000000000352101503426445100223240ustar00rootroot00000000000000"""Implementation of Rule CV06.""" from collections.abc import Sequence from typing import NamedTuple, Optional, cast from sqlfluff.core.parser import BaseSegment, NewlineSegment, RawSegment, SymbolSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import RootOnlyCrawler from sqlfluff.utils.functional import Segments, sp class SegmentMoveContext(NamedTuple): """Context information for moving a segment.""" anchor_segment: RawSegment is_one_line: bool before_segment: Segments whitespace_deletions: Segments class Rule_CV06(BaseRule): """Statements must end with a semi-colon. **Anti-pattern** A statement is not immediately terminated with a semi-colon. The ``•`` represents space. .. code-block:: sql :force: SELECT a FROM foo ; SELECT b FROM bar••; **Best practice** Immediately terminate the statement with a semi-colon. .. code-block:: sql :force: SELECT a FROM foo; """ name = "convention.terminator" aliases = ("L052",) groups = ("all", "convention") config_keywords = ["multiline_newline", "require_final_semicolon"] crawl_behaviour = RootOnlyCrawler() is_fix_compatible = True @staticmethod def _handle_preceding_inline_comments( before_segment: Sequence[BaseSegment], anchor_segment: BaseSegment ): """Adjust segments to not move preceding inline comments. We don't want to move inline comments that are on the same line as the preceding code segment as they could contain noqa instructions. """ # See if we have a preceding inline comment on the same line as the preceding # segment. same_line_comment = next( ( s for s in before_segment if s.is_comment and not s.is_type("block_comment") and s.pos_marker and s.pos_marker.working_line_no # We don't need to handle the case where raw_segments is empty # because it never is. It's either a segment with raw children # or a raw segment which returns [self] as raw_segments. == anchor_segment.raw_segments[-1].pos_marker.working_line_no ), None, ) # If so then make that our new anchor segment and adjust # before_segment accordingly. if same_line_comment: anchor_segment = same_line_comment before_segment = before_segment[: before_segment.index(same_line_comment)] return before_segment, anchor_segment @staticmethod def _handle_trailing_inline_comments( parent_segment: BaseSegment, anchor_segment: BaseSegment ) -> BaseSegment: """Adjust anchor_segment to not move trailing inline comment. We don't want to move inline comments that are on the same line as the preceding code segment as they could contain noqa instructions. """ # See if we have a trailing inline comment on the same line as the preceding # segment. for comment_segment in parent_segment.recursive_crawl("comment"): assert comment_segment.pos_marker assert anchor_segment.pos_marker if ( comment_segment.pos_marker.working_line_no == anchor_segment.pos_marker.working_line_no ) and (not comment_segment.is_type("block_comment")): anchor_segment = comment_segment return anchor_segment @staticmethod def _is_one_line_statement( parent_segment: BaseSegment, segment: BaseSegment ) -> bool: """Check if the statement containing the provided segment is one line.""" # Find statement segment containing the current segment. statement_segment = next( ( ps.segment for ps in (parent_segment.path_to(segment) or []) if ps.segment.is_type("statement") ), None, ) if statement_segment is None: # pragma: no cover # If we can't find a parent statement segment then don't try anything # special. return False if not any(statement_segment.recursive_crawl("newline")): # Statement segment has no newlines therefore starts and ends on the same # line. return True return False def _get_segment_move_context( self, target_segment: RawSegment, parent_segment: BaseSegment ) -> SegmentMoveContext: # Locate the segment to be moved (i.e. context.segment) and search back # over the raw stack to find the end of the preceding statement. reversed_raw_stack = Segments(*parent_segment.raw_segments).reversed() before_code = reversed_raw_stack.select( loop_while=sp.not_(sp.is_code()), start_seg=target_segment ) before_segment = before_code.select(sp.not_(sp.is_meta())) # We're selecting from the raw stack, so we know that before_code is # made of RawSegment elements. anchor_segment = ( cast(RawSegment, before_code[-1]) if before_code else target_segment ) first_code = reversed_raw_stack.select( sp.is_code(), start_seg=target_segment ).first() self.logger.debug("Semicolon: first_code: %s", first_code) is_one_line = ( self._is_one_line_statement(parent_segment, first_code[0]) if first_code else False ) # We can tidy up any whitespace between the segment # and the preceding code/comment segment. # Don't mess with comment spacing/placement. whitespace_deletions = before_segment.select(loop_while=sp.is_whitespace()) return SegmentMoveContext( anchor_segment, is_one_line, before_segment, whitespace_deletions ) def _handle_semicolon( self, target_segment: RawSegment, parent_segment: BaseSegment ) -> Optional[LintResult]: info = self._get_segment_move_context(target_segment, parent_segment) semicolon_newline = self.multiline_newline if not info.is_one_line else False self.logger.debug("Semicolon Newline: %s", semicolon_newline) # Semi-colon on same line. if not semicolon_newline: return self._handle_semicolon_same_line( target_segment, parent_segment, info ) # Semi-colon on new line. else: return self._handle_semicolon_newline(target_segment, parent_segment, info) def _handle_semicolon_same_line( self, target_segment: RawSegment, parent_segment: BaseSegment, info: SegmentMoveContext, ) -> Optional[LintResult]: if not info.before_segment: return None # If preceding segments are found then delete the old # semi-colon and its preceding whitespace and then insert # the semi-colon in the correct location. fixes = self._create_semicolon_and_delete_whitespace( target_segment, parent_segment, info.anchor_segment, info.whitespace_deletions, [ SymbolSegment(raw=";", type="statement_terminator"), ], ) return LintResult( anchor=info.anchor_segment, fixes=fixes, ) def _handle_semicolon_newline( self, target_segment: RawSegment, parent_segment: BaseSegment, info: SegmentMoveContext, ) -> Optional[LintResult]: # Adjust before_segment and anchor_segment for preceding inline # comments. Inline comments can contain noqa logic so we need to add the # newline after the inline comment. (before_segment, anchor_segment) = self._handle_preceding_inline_comments( info.before_segment, info.anchor_segment ) if (len(before_segment) == 1) and all( s.is_type("newline") for s in before_segment ): return None # If preceding segment is not a single newline then delete the old # semi-colon/preceding whitespace and then insert the # semi-colon in the correct location. # This handles an edge case in which an inline comment comes after # the semi-colon. anchor_segment = self._handle_trailing_inline_comments( parent_segment, anchor_segment ) fixes = [] if anchor_segment is target_segment: fixes.append( LintFix.replace( anchor_segment, [ NewlineSegment(), SymbolSegment(raw=";", type="statement_terminator"), ], ) ) else: fixes.extend( self._create_semicolon_and_delete_whitespace( target_segment, parent_segment, anchor_segment, info.whitespace_deletions, [ NewlineSegment(), SymbolSegment(raw=";", type="statement_terminator"), ], ) ) return LintResult( anchor=anchor_segment, fixes=fixes, ) def _create_semicolon_and_delete_whitespace( self, target_segment: BaseSegment, parent_segment: BaseSegment, anchor_segment: BaseSegment, whitespace_deletions: Segments, create_segments: list[BaseSegment], ) -> list[LintFix]: anchor_segment = self._choose_anchor_segment( parent_segment, "create_after", anchor_segment, filter_meta=True ) lintfix_fn = LintFix.create_after whitespace_deletion_set = set(whitespace_deletions) if anchor_segment in whitespace_deletion_set: # Can't delete() and create_after() the same segment. Use replace() # instead. lintfix_fn = LintFix.replace whitespace_deletions = whitespace_deletions.select( lambda seg: seg is not anchor_segment ) fixes = [ lintfix_fn( anchor_segment, create_segments, ), LintFix.delete( target_segment, ), ] fixes.extend(LintFix.delete(d) for d in whitespace_deletions) return fixes def _ensure_final_semicolon( self, parent_segment: BaseSegment ) -> Optional[LintResult]: # Iterate backwards over complete stack to find # if the final semi-colon is already present. anchor_segment = parent_segment.segments[-1] trigger_segment = parent_segment.segments[-1] semi_colon_exist_flag = False is_one_line = False before_segment = [] for segment in parent_segment.segments[::-1]: anchor_segment = segment if segment.is_type("statement_terminator"): semi_colon_exist_flag = True elif segment.is_code: is_one_line = self._is_one_line_statement(parent_segment, segment) break elif not segment.is_meta: before_segment.append(segment) trigger_segment = segment else: return None # File does not contain any statements self.logger.debug("Trigger on: %s", trigger_segment) self.logger.debug("Anchoring on: %s", anchor_segment) semicolon_newline = self.multiline_newline if not is_one_line else False if not semi_colon_exist_flag: # Create the final semi-colon if it does not yet exist. # Semi-colon on same line. if not semicolon_newline: fixes = [ LintFix.create_after( self._choose_anchor_segment( parent_segment, "create_after", anchor_segment, filter_meta=True, ), [ SymbolSegment(raw=";", type="statement_terminator"), ], ) ] # Semi-colon on new line. else: # Adjust before_segment and anchor_segment for inline # comments. ( before_segment, anchor_segment, ) = self._handle_preceding_inline_comments( before_segment, anchor_segment ) self.logger.debug("Revised anchor on: %s", anchor_segment) fixes = [ LintFix.create_after( self._choose_anchor_segment( parent_segment, "create_after", anchor_segment, filter_meta=True, ), [ NewlineSegment(), SymbolSegment(raw=";", type="statement_terminator"), ], ) ] return LintResult( anchor=trigger_segment, fixes=fixes, ) return None def _eval(self, context: RuleContext) -> list[LintResult]: """Statements must end with a semi-colon.""" # Config type hints self.multiline_newline: bool self.require_final_semicolon: bool # We should only be dealing with a root segment assert context.segment.is_type("file") results = [] for idx, seg in enumerate(context.segment.segments): res = None # First we can simply handle the case of existing semi-colon alignment. if seg.is_type("statement_terminator"): # If it's a terminator then we know it's a raw. seg = cast(RawSegment, seg) self.logger.debug("Handling semi-colon: %s", seg) res = self._handle_semicolon(seg, context.segment) # Otherwise handle the end of the file separately. elif ( self.require_final_semicolon and idx == len(context.segment.segments) - 1 ): self.logger.debug("Handling final segment: %s", seg) res = self._ensure_final_semicolon(context.segment) if res: results.append(res) return results sqlfluff-3.4.2/src/sqlfluff/rules/convention/CV07.py000066400000000000000000000077241503426445100223360ustar00rootroot00000000000000"""Implementation of Rule CV07.""" from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import RootOnlyCrawler from sqlfluff.utils.functional import Segments, sp class Rule_CV07(BaseRule): """Top-level statements should not be wrapped in brackets. **Anti-pattern** A top-level statement is wrapped in brackets. .. code-block:: sql :force: (SELECT foo FROM bar) -- This also applies to statements containing a sub-query. (SELECT foo FROM (SELECT * FROM bar)) **Best practice** Don't wrap top-level statements in brackets. .. code-block:: sql :force: SELECT foo FROM bar -- Likewise for statements containing a sub-query. SELECT foo FROM (SELECT * FROM bar) """ name = "convention.statement_brackets" aliases = ("L053",) groups = ("all", "convention") crawl_behaviour = RootOnlyCrawler() is_fix_compatible = True @staticmethod def _iter_statements(file_segment): """Designed to be used on files. Yields only direct children, or children of batches. """ for seg in file_segment.segments: if seg.is_type("batch"): for subseg in seg.segments: if subseg.is_type("statement"): yield subseg elif seg.is_type("statement"): yield seg @classmethod def _iter_bracketed_statements(cls, file_segment): for stmt in cls._iter_statements(file_segment): for seg in stmt.segments: if seg.is_type("bracketed"): yield stmt, seg def _eval(self, context: RuleContext) -> list[LintResult]: """Top-level statements should not be wrapped in brackets.""" # Because of the root_only_crawler, this can control its own # crawling behaviour. results = [] for parent, bracketed_segment in self._iter_bracketed_statements( context.segment ): self.logger.debug("Evaluating %s in %s", bracketed_segment, parent) # Replace the bracketed segment with it's # children, excluding the bracket symbols. bracket_set = {"start_bracket", "end_bracket"} filtered_children = Segments( *[ segment for segment in bracketed_segment.segments if segment.get_type() not in bracket_set and not segment.is_meta ] ) # Lift leading/trailing whitespace and inline comments to the # segment above. This avoids introducing a parse error (ANSI and other # dialects generally don't allow this at lower levels of the parse # tree). to_lift_predicate = sp.or_(sp.is_whitespace(), sp.is_type("inline_comment")) leading = filtered_children.select(loop_while=to_lift_predicate) self.logger.debug("Leading: %s", leading) trailing = ( filtered_children.reversed() .select(loop_while=to_lift_predicate) .reversed() ) self.logger.debug("Trailing: %s", trailing) lift_nodes = set(leading + trailing) fixes = [] if lift_nodes: fixes.append(LintFix.create_before(parent, list(leading))) fixes.append(LintFix.create_after(parent, list(trailing))) fixes.extend([LintFix.delete(segment) for segment in lift_nodes]) filtered_children = filtered_children[len(leading) : -len(trailing)] fixes.append( LintFix.replace( bracketed_segment, filtered_children, ) ) results.append(LintResult(anchor=bracketed_segment, fixes=fixes)) return results sqlfluff-3.4.2/src/sqlfluff/rules/convention/CV08.py000066400000000000000000000025371503426445100223340ustar00rootroot00000000000000"""Implementation of Rule CV08.""" from typing import Optional from sqlfluff.core.rules import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler class Rule_CV08(BaseRule): """Use ``LEFT JOIN`` instead of ``RIGHT JOIN``. **Anti-pattern** ``RIGHT JOIN`` is used. .. code-block:: sql :force: SELECT foo.col1, bar.col2 FROM foo RIGHT JOIN bar ON foo.bar_id = bar.id; **Best practice** Refactor and use ``LEFT JOIN`` instead. .. code-block:: sql :force: SELECT foo.col1, bar.col2 FROM bar LEFT JOIN foo ON foo.bar_id = bar.id; """ name = "convention.left_join" aliases = ("L055",) groups = ("all", "convention") crawl_behaviour = SegmentSeekerCrawler({"join_clause"}) def _eval(self, context: RuleContext) -> Optional[LintResult]: """Use LEFT JOIN instead of RIGHT JOIN.""" # We are only interested in JOIN clauses. assert context.segment.is_type("join_clause") # Identify if RIGHT JOIN is present. if {"RIGHT", "JOIN"}.issubset( {segment.raw_upper for segment in context.segment.segments} ): return LintResult(context.segment.segments[0]) return None sqlfluff-3.4.2/src/sqlfluff/rules/convention/CV09.py000066400000000000000000000076631503426445100223420ustar00rootroot00000000000000"""Implementation of Rule CV09.""" from typing import Optional import regex from sqlfluff.core.rules import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler class Rule_CV09(BaseRule): """Block a list of configurable words from being used. This generic rule can be useful to prevent certain keywords, functions, or objects from being used. Only whole words can be blocked, not phrases, nor parts of words. This block list is case insensitive. **Example use cases** * We prefer ``BOOL`` over ``BOOLEAN`` and there is no existing rule to enforce this. Until such a rule is written, we can add ``BOOLEAN`` to the deny list to cause a linting error to flag this. * We have deprecated a schema/table/function and want to prevent it being used in future. We can add that to the denylist and then add a ``-- noqa: CV09`` for the few exceptions that still need to be in the code base for now. **Anti-pattern** If the ``blocked_words`` config is set to ``deprecated_table,bool`` then the following will flag: .. code-block:: sql SELECT * FROM deprecated_table WHERE 1 = 1; CREATE TABLE myschema.t1 (a BOOL); **Best practice** Do not used any blocked words: .. code-block:: sql SELECT * FROM another_table WHERE 1 = 1; CREATE TABLE myschema.t1 (a BOOLEAN); """ name = "convention.blocked_words" aliases = ("L062",) groups = ("all", "convention") # It's a broad selector, but only trigger on raw segments. crawl_behaviour = SegmentSeekerCrawler({"raw"}) config_keywords = [ "blocked_words", "blocked_regex", "match_source", ] def _eval(self, context: RuleContext) -> Optional[LintResult]: # Config type hints self.blocked_words: Optional[str] self.blocked_regex: Optional[str] self.match_source: Optional[bool] # Exit early if no block list set if not self.blocked_words and not self.blocked_regex: return None if context.segment.type == "comment": return None # Get the ignore list configuration and cache it try: blocked_words_list = self.blocked_words_list except AttributeError: # First-time only, read the settings from configuration. # So we can cache them for next time for speed. blocked_words_list = self._init_blocked_words() if context.segment.raw_upper in blocked_words_list: return LintResult( anchor=context.segment, description=f"Use of blocked word '{context.segment.raw}'.", ) if self.blocked_regex: if regex.search(self.blocked_regex, context.segment.raw): return LintResult( anchor=context.segment, description=f"Use of blocked regex '{context.segment.raw}'.", ) if self.match_source: for segment in context.segment.raw_segments: source_str = segment.pos_marker.source_str() if regex.search(self.blocked_regex, source_str): return LintResult( anchor=context.segment, description=f"Use of blocked regex '{source_str}'.", ) return None def _init_blocked_words(self) -> list[str]: """Called first time rule is evaluated to fetch & cache the blocked_words.""" blocked_words_config = getattr(self, "blocked_words") if blocked_words_config: self.blocked_words_list = self.split_comma_separated_string( blocked_words_config.upper() ) else: # pragma: no cover # Shouldn't get here as we exit early if no block list self.blocked_words_list = [] return self.blocked_words_list sqlfluff-3.4.2/src/sqlfluff/rules/convention/CV10.py000066400000000000000000000272531503426445100223270ustar00rootroot00000000000000"""Implementation of Rule CV10.""" from typing import Optional import regex from sqlfluff.core.parser import LiteralSegment from sqlfluff.core.parser.markers import PositionMarker from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.functional import FunctionalContext, rsp class Rule_CV10(BaseRule): r"""Consistent usage of preferred quotes for quoted literals. Some databases allow quoted literals to use either single or double quotes. Prefer one type of quotes as specified in rule setting, falling back to alternate quotes to reduce the need for escapes. Dollar-quoted raw strings are excluded from this rule, as they are mostly used for literal UDF Body definitions. .. note:: This rule only checks quoted literals and not quoted identifiers as they often cannot interchange single and double quotes This rule is only enabled for dialects that allow single *and* double quotes for quoted literals (currently ``bigquery``, ``databricks``, ``hive``, ``mysql``, ``sparksql``). It can be enabled for other dialects with the ``force_enable = True`` flag. **Anti-pattern** .. code-block:: sql :force: select "abc", 'abc', "\"", "abc" = 'abc' from foo **Best practice** Ensure all quoted literals use preferred quotes, unless escaping can be reduced by using alternate quotes. .. code-block:: sql :force: select "abc", "abc", '"', "abc" = "abc" from foo """ name = "convention.quoted_literals" aliases = ("L064",) groups = ("all", "convention") config_keywords = ["preferred_quoted_literal_style", "force_enable"] crawl_behaviour = SegmentSeekerCrawler({"literal"}) targets_templated = True is_fix_compatible = True _dialects_with_double_quoted_strings = [ "bigquery", "databricks", "hive", "mysql", "sparksql", ] _quotes_mapping = { "single_quotes": { "common_name": "single quotes", "preferred_quote_char": "'", "alternate_quote_char": '"', }, "double_quotes": { "common_name": "double quotes", "preferred_quote_char": '"', "alternate_quote_char": "'", }, } # BigQuery string prefix characters. _string_prefix_chars = "rbRB" def _eval(self, context: RuleContext) -> Optional[LintResult]: # Config type hints self.preferred_quoted_literal_style: str self.force_enable: bool # Only care about quoted literal segments. if not context.segment.is_type("quoted_literal"): return None if not ( self.force_enable or context.dialect.name in self._dialects_with_double_quoted_strings ): return LintResult(memory=context.memory) # This rule can also cover quoted literals that are partially templated. # I.e. when the quotes characters are _not_ part of the template we can # meaningfully apply this rule. templated_raw_slices = FunctionalContext(context).segment.raw_slices.select( rsp.is_slice_type("templated") ) for raw_slice in templated_raw_slices: pos_marker = context.segment.pos_marker # This is to make mypy happy. assert isinstance(pos_marker, PositionMarker) # Check whether the quote characters are inside the template. # For the leading quote we need to account for string prefix characters. leading_quote_inside_template = pos_marker.source_str()[:2].lstrip( self._string_prefix_chars )[0] not in ['"', "'"] trailing_quote_inside_template = pos_marker.source_str()[-1] not in [ '"', "'", ] # quotes are not entirely outside of a template, nothing we can do if leading_quote_inside_template or trailing_quote_inside_template: return LintResult(memory=context.memory) # If quoting style is set to consistent we use the quoting style of the first # quoted_literal that we encounter. if self.preferred_quoted_literal_style == "consistent": memory = context.memory preferred_quoted_literal_style = memory.get( "preferred_quoted_literal_style" ) if not preferred_quoted_literal_style: # Getting the quote from LAST character to be able to handle STRING # prefixes preferred_quoted_literal_style = ( "double_quotes" if context.segment.raw[-1] == '"' else "single_quotes" ) memory["preferred_quoted_literal_style"] = ( preferred_quoted_literal_style ) self.logger.debug( "Preferred string quotes is set to `consistent`. Derived quoting " "style %s from first quoted literal.", preferred_quoted_literal_style, ) else: preferred_quoted_literal_style = self.preferred_quoted_literal_style fixed_string = self._normalize_preferred_quoted_literal_style( context.segment.raw, preferred_quote_char=self._quotes_mapping[preferred_quoted_literal_style][ "preferred_quote_char" ], alternate_quote_char=self._quotes_mapping[preferred_quoted_literal_style][ "alternate_quote_char" ], ) if fixed_string != context.segment.raw: # We can't just set the primary type, but we have to ensure that the # subtypes are properly set too so that the re-parse checks pass. if fixed_string[0] == "'": _instance_types = ("quoted_literal", "single_quote") else: _instance_types = ("quoted_literal", "double_quote") return LintResult( anchor=context.segment, memory=context.memory, fixes=[ LintFix.replace( context.segment, [ LiteralSegment( raw=fixed_string, instance_types=_instance_types, ) ], ) ], description=( "Inconsistent use of preferred quote style '" f"{self._quotes_mapping[preferred_quoted_literal_style]['common_name']}" # noqa: E501 f"'. Use {fixed_string} instead of {context.segment.raw}." ), ) return None # Code for preferred quoted_literal style was copied from Black string normalization # and adapted to our use-case. def _regex_sub_with_overlap( self, regex: regex.Pattern, replacement: str, original: str ) -> str: """Replace `regex` with `replacement` twice on `original`. This is used by string normalization to perform replaces on overlapping matches. Source: https://github.com/psf/black/blob/7f7673d941a947a8d392c8c0866d3d588affc174/src/black/strings.py#L23-L29 """ return regex.sub(replacement, regex.sub(replacement, original)) def _normalize_preferred_quoted_literal_style( self, s: str, preferred_quote_char: str, alternate_quote_char: str ) -> str: """Prefer `preferred_quote_char` but only if it doesn't cause more escaping. Adds or removes backslashes as appropriate. Source: https://github.com/psf/black/blob/7f7673d941a947a8d392c8c0866d3d588affc174/src/black/strings.py#L167 """ value = s.lstrip(self._string_prefix_chars) if value[:3] == preferred_quote_char * 3: # In triple-quoted strings we are not replacing escaped quotes. # So nothing left to do and we can exit. return s elif value[0] == preferred_quote_char: # Quotes are alright already. But maybe we can remove some unnecessary # escapes or reduce the number of escapes using alternate_quote_char ? orig_quote = preferred_quote_char new_quote = alternate_quote_char elif value[:3] == alternate_quote_char * 3: orig_quote = alternate_quote_char * 3 new_quote = preferred_quote_char * 3 elif value[0] == alternate_quote_char: orig_quote = alternate_quote_char new_quote = preferred_quote_char else: self.logger.debug( "Found quoted string %s using neither preferred quote char %s " "nor alternate_quote_char %s. Skipping...", s, preferred_quote_char, alternate_quote_char, ) return s first_quote_pos = s.find(orig_quote) prefix = s[:first_quote_pos] unescaped_new_quote = regex.compile(rf"(([^\\]|^)(\\\\)*){new_quote}") escaped_new_quote = regex.compile(rf"([^\\]|^)\\((?:\\\\)*){new_quote}") escaped_orig_quote = regex.compile(rf"([^\\]|^)\\((?:\\\\)*){orig_quote}") body = s[first_quote_pos + len(orig_quote) : -len(orig_quote)] if "r" in prefix.lower(): if unescaped_new_quote.search(body): self.logger.debug( "There's at least one unescaped new_quote in this raw string " "so converting is impossible." ) return s # Do not modify the body of raw strings by introducing or removing # backslashes as this changes the value of the raw string. new_body = body else: # remove unnecessary escapes new_body = self._regex_sub_with_overlap( escaped_new_quote, rf"\1\2{new_quote}", body ) if body != new_body: # Consider the string without unnecessary escapes as the original self.logger.debug("Removing unnecessary escapes in %s.", body) body = new_body s = f"{prefix}{orig_quote}{body}{orig_quote}" new_body = self._regex_sub_with_overlap( escaped_orig_quote, rf"\1\2{orig_quote}", new_body ) new_body = self._regex_sub_with_overlap( unescaped_new_quote, rf"\1\\{new_quote}", new_body ) if ( new_quote == 3 * preferred_quote_char and new_body[-1:] == preferred_quote_char ): # edge case: for example when converting quotes from '''a"''' # to """a\"""" the last " of the string body needs to be escaped. new_body = new_body[:-1] + f"\\{preferred_quote_char}" orig_escape_count = body.count("\\") new_escape_count = new_body.count("\\") if new_escape_count > orig_escape_count: self.logger.debug( "Changing quote style would introduce more escapes in the body. " "Before: %s After: %s . Skipping.", body, new_body, ) return s # Do not introduce more escaping if new_escape_count == orig_escape_count and orig_quote == preferred_quote_char: # Already using preferred_quote_char, and no escape benefit to changing return s return f"{prefix}{new_quote}{new_body}{new_quote}" sqlfluff-3.4.2/src/sqlfluff/rules/convention/CV11.py000066400000000000000000000411341503426445100223220ustar00rootroot00000000000000"""Implementation of Rule CV11.""" from collections.abc import Iterable from typing import Optional from sqlfluff.core.parser import ( BaseSegment, KeywordSegment, SymbolSegment, WhitespaceSegment, WordSegment, ) from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.functional import FunctionalContext, Segments, sp class Rule_CV11(BaseRule): """Enforce consistent type casting style. .. note:: This is only compatible with 2-arguments CONVERT as some dialects allow an optional 3rd argument e.g TSQL, which cannot be rewritten into CAST. This rule is disabled by default for Teradata because it supports different type casting apart from CONVERT and :: e.g DATE '2007-01-01', '9999-12-31' (DATE). **Anti-pattern** Using mixture of CONVERT, :: and CAST when ``preferred_type_casting_style`` config is set to ``consistent`` (default). .. code-block:: sql SELECT CONVERT(int, 1) AS bar, 100::int::text, CAST(10 AS text) AS coo FROM foo; **Best practice** Use consistent type casting style. .. code-block:: sql SELECT CAST(1 AS int) AS bar, CAST(CAST(100 AS int) AS text), CAST(10 AS text) AS coo FROM foo; """ name = "convention.casting_style" aliases = ("L067",) groups = ("all", "convention") config_keywords = ["preferred_type_casting_style"] crawl_behaviour = SegmentSeekerCrawler({"function", "cast_expression"}) is_fix_compatible = True @staticmethod def _get_children(segments: Segments) -> Segments: return segments.children( sp.and_( sp.not_(sp.is_meta()), sp.not_( sp.is_type( "start_bracket", "end_bracket", "whitespace", "newline", "casting_operator", "comma", "keyword", ) ), ) ) @staticmethod def _cast_fix_list( context: RuleContext, cast_arg_1: Iterable[BaseSegment], cast_arg_2: BaseSegment, later_types: Optional[Segments] = None, ) -> list[LintFix]: """Generate list of fixes to convert CONVERT and ShorthandCast to CAST.""" # Add cast and opening parenthesis. edits = ( [ WordSegment("cast", type="function_name_identifier"), SymbolSegment("(", type="start_bracket"), ] + list(cast_arg_1) + [ WhitespaceSegment(), KeywordSegment("as"), WhitespaceSegment(), cast_arg_2, SymbolSegment(")", type="end_bracket"), ] ) if later_types: pre_edits: list[BaseSegment] = [ WordSegment("cast", type="function_name_identifier"), SymbolSegment("(", type="start_bracket"), ] in_edits: list[BaseSegment] = [ WhitespaceSegment(), KeywordSegment("as"), WhitespaceSegment(), ] post_edits: list[BaseSegment] = [ SymbolSegment(")", type="end_bracket"), ] for _type in later_types: edits = pre_edits + edits + in_edits + [_type] + post_edits fixes = [ LintFix.replace( context.segment, edits, ) ] return fixes @staticmethod def _convert_fix_list( context: RuleContext, convert_arg_1: BaseSegment, convert_arg_2: BaseSegment, later_types=None, ) -> list[LintFix]: """Generate list of fixes to convert CAST and ShorthandCast to CONVERT.""" # Add convert and opening parenthesis. edits = [ WordSegment("convert", type="function_name_identifier"), SymbolSegment("(", type="start_bracket"), convert_arg_1, SymbolSegment(",", type="comma"), WhitespaceSegment(), convert_arg_2, SymbolSegment(")", type="end_bracket"), ] if later_types: pre_edits: list[BaseSegment] = [ WordSegment("convert", type="function_name_identifier"), SymbolSegment("(", type="start_bracket"), ] in_edits: list[BaseSegment] = [ SymbolSegment(",", type="comma"), WhitespaceSegment(), ] post_edits: list[BaseSegment] = [ SymbolSegment(")", type="end_bracket"), ] for _type in later_types: edits = pre_edits + [_type] + in_edits + edits + post_edits fixes = [ LintFix.replace( context.segment, edits, ) ] return fixes @staticmethod def _shorthand_fix_list( context: RuleContext, shorthand_arg_1: BaseSegment, shorthand_arg_2: BaseSegment ) -> list[LintFix]: """Generate list of fixes to convert CAST and CONVERT to ShorthandCast.""" if len(shorthand_arg_1.raw_segments) > 1: edits = [ SymbolSegment("(", type="start_bracket"), shorthand_arg_1, SymbolSegment(")", type="end_bracket"), ] else: edits = [shorthand_arg_1] edits.extend( [ SymbolSegment("::", type="casting_operator"), shorthand_arg_2, ] ) fixes = [ LintFix.replace( context.segment, edits, ) ] return fixes def _eval(self, context: RuleContext) -> Optional[LintResult]: """Enforce consistent type casting style.""" # Config type hints self.preferred_type_casting_style: str # Rule disabled for teradata. if context.dialect.name == "teradata": return None # If we're in a templated section, don't consider the current location. # (i.e. if a cast happens in a macro, the end user writing the current # query may not know that or have control over it, so we should just # skip it). if context.segment.pos_marker: if not context.segment.pos_marker.is_literal(): return None # Construct segment type casting if context.segment.is_type("function"): function_name = context.segment.get_child("function_name") # Functions should always have a name, that means this clause should # be unnecessary. if not function_name: # pragma: no cover return None elif function_name.raw_upper == "CAST": current_type_casting_style = "cast" elif function_name.raw_upper == "CONVERT": current_type_casting_style = "convert" else: current_type_casting_style = None elif context.segment.is_type("cast_expression"): current_type_casting_style = "shorthand" else: # pragma: no cover current_type_casting_style = None functional_context = FunctionalContext(context) # If casting style is set to consistent, # we use the casting style of the first segment we encounter. # convert_content = None if self.preferred_type_casting_style == "consistent": memory = context.memory prior_type_casting_style = context.memory.get("prior_type_casting_style") previous_skipped = context.memory.get("previous_skipped") # if previous_skipped then we can skip the whole fix # Construct fixes if prior_type_casting_style == "cast": if current_type_casting_style == "convert": # Get the content of CONVERT bracketed = functional_context.segment.children( sp.is_type("function_contents") ).children(sp.is_type("bracketed")) convert_content = self._get_children(bracketed) # We only care about 2-arguments convert # some dialects allow an optional 3rd argument e.g TSQL # which cannot be rewritten into CAST if len(convert_content) > 2: # set previous_skipped if previous_skipped is None: # Only update prior_type_casting_style # if it is none, this ultimately # makes sure we maintain the first # casting style we encounter memory["previous_skipped"] = True return None fixes = self._cast_fix_list( context, [convert_content[1]], convert_content[0], ) elif current_type_casting_style == "shorthand": # Get the expression and the datatype segment expression_datatype_segment = self._get_children( functional_context.segment ) fixes = self._cast_fix_list( context, [expression_datatype_segment[0]], expression_datatype_segment[1], # We can have multiple shorthandcast e.g 1::int::text # in that case, we need to introduce nested CAST() expression_datatype_segment[2:], ) elif prior_type_casting_style == "convert": bracketed = functional_context.segment.children( sp.is_type("function_contents") ).children(sp.is_type("bracketed")) if current_type_casting_style == "cast": cast_content = self._get_children(bracketed) if len(cast_content) > 2: return None fixes = self._convert_fix_list( context, cast_content[1], cast_content[0], ) elif current_type_casting_style == "shorthand": expression_datatype_segment = self._get_children( functional_context.segment ) fixes = self._convert_fix_list( context, expression_datatype_segment[1], expression_datatype_segment[0], expression_datatype_segment[2:], ) elif prior_type_casting_style == "shorthand": bracketed = functional_context.segment.children( sp.is_type("function_contents") ).children(sp.is_type("bracketed")) if current_type_casting_style == "cast": # Get the content of CAST cast_content = self._get_children(bracketed) if len(cast_content) > 2: return None fixes = self._shorthand_fix_list( context, cast_content[0], cast_content[1], ) elif current_type_casting_style == "convert": convert_content = self._get_children(bracketed) if len(convert_content) > 2: return None fixes = self._shorthand_fix_list( context, convert_content[1], convert_content[0], ) if ( prior_type_casting_style and current_type_casting_style and (prior_type_casting_style != current_type_casting_style) ): return LintResult( anchor=context.segment, memory=context.memory, fixes=fixes, description=("Inconsistent type casting styles found."), ) if prior_type_casting_style is None: # Only update prior_type_casting_style if it is none, this ultimately # makes sure we maintain the first casting style we encounter memory["prior_type_casting_style"] = current_type_casting_style elif ( current_type_casting_style and current_type_casting_style != self.preferred_type_casting_style ): convert_content = None cast_content = None if self.preferred_type_casting_style == "cast": if current_type_casting_style == "convert": bracketed = functional_context.segment.children( sp.is_type("function_contents") ).children(sp.is_type("bracketed")) convert_content = self._get_children(bracketed) fixes = self._cast_fix_list( context, [convert_content[1]], convert_content[0], ) elif current_type_casting_style == "shorthand": expression_datatype_segment = self._get_children( functional_context.segment ) for data_type_idx, seg in enumerate(expression_datatype_segment): if seg.is_type("data_type"): break fixes = self._cast_fix_list( context, expression_datatype_segment[:data_type_idx], expression_datatype_segment[data_type_idx], expression_datatype_segment[data_type_idx + 1 :], ) elif self.preferred_type_casting_style == "convert": if current_type_casting_style == "cast": bracketed = functional_context.segment.children( sp.is_type("function_contents") ).children(sp.is_type("bracketed")) cast_content = self._get_children(bracketed) fixes = self._convert_fix_list( context, cast_content[1], cast_content[0], ) elif current_type_casting_style == "shorthand": expression_datatype_segment = self._get_children( functional_context.segment ) fixes = self._convert_fix_list( context, expression_datatype_segment[1], expression_datatype_segment[0], expression_datatype_segment[2:], ) elif self.preferred_type_casting_style == "shorthand": bracketed = functional_context.segment.children( sp.is_type("function_contents") ).children(sp.is_type("bracketed")) if current_type_casting_style == "cast": cast_content = self._get_children(bracketed) fixes = self._shorthand_fix_list( context, cast_content[0], cast_content[1], ) elif current_type_casting_style == "convert": convert_content = self._get_children(bracketed) fixes = self._shorthand_fix_list( context, convert_content[1], convert_content[0], ) # Don't fix if there's too much content. if (convert_content and len(convert_content) > 2) or ( cast_content and len(cast_content) > 2 ): fixes = [] return LintResult( anchor=context.segment, memory=context.memory, fixes=fixes, description=( "Used type casting style is different from" " the preferred type casting style." ), ) return None sqlfluff-3.4.2/src/sqlfluff/rules/convention/CV12.py000066400000000000000000000255351503426445100223320ustar00rootroot00000000000000"""Implementation of Rule CV12.""" import collections from collections.abc import Iterator from typing import Deque from sqlfluff.core.parser import BaseSegment from sqlfluff.core.parser.segments.common import ( BinaryOperatorSegment, WhitespaceSegment, ) from sqlfluff.core.parser.segments.keyword import KeywordSegment from sqlfluff.core.rules import BaseRule, EvalResultType, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.core.rules.fix import LintFix from sqlfluff.dialects.dialect_ansi import ( ExpressionSegment, JoinClauseSegment, JoinOnConditionSegment, ) class Rule_CV12(BaseRule): """Use `JOIN ... ON ...` instead of `WHERE ...` for join conditions. **Anti-pattern** Using WHERE clause for join conditions. .. code-block:: sql SELECT foo.a , bar.b FROM foo JOIN bar WHERE foo.x = bar.y; **Best practice** Use JOIN ON clause for join condition. .. code-block:: sql SELECT foo.a , bar.b FROM foo JOIN bar ON foo.x = bar.y; """ name = "convention.join_condition" aliases = () groups = ("all", "convention") crawl_behaviour = SegmentSeekerCrawler({"select_statement"}) is_fix_compatible = True def _eval(self, context: RuleContext) -> EvalResultType: """Find joins with WHERE clause. Fix them into JOIN ON. """ return [lint_result for lint_result in self._eval_gen(context)] def _eval_gen(self, context: RuleContext) -> Iterator[LintResult]: # We are only interested in SELECT statement. select_statement = context.segment assert select_statement.is_type("select_statement") maybe_where_clause = select_statement.get_child("where_clause") if not maybe_where_clause: return where_clause = maybe_where_clause where_clause_simplifable = self._is_where_clause_simplifable(where_clause) if where_clause_simplifable: expr = where_clause.get_child("expression") assert expr is not None subexpressions = self._get_subexpression_chunks(expr) else: subexpressions = [] consumed_subexpressions = set() # get references in from clause select_table_references = [ *select_statement.recursive_crawl( "from_expression_element", no_recursive_seg_type=["join_clause", "select_statement"], ) ] # track all seen references (from clause + all previous joins) encountered_references = { self._get_from_expression_element_alias(table_ref) for table_ref in select_table_references } for join_clause in select_statement.recursive_crawl( "join_clause", no_recursive_seg_type=["select_statement"] ): # mark table reference as seen join_table_reference = next( join_clause.recursive_crawl( "from_expression_element", no_recursive_seg_type=["select_statement"], ) ) encountered_references.add( self._get_from_expression_element_alias(join_table_reference) ) join_clause_keywords = [ seg for seg in join_clause.segments if seg.type == "keyword" ] if any( kw.raw_upper in ("CROSS", "POSITIONAL", "USING", "APPLY") for kw in join_clause_keywords ): # If explicit CROSS JOIN is used, disregard lack of condition # If explicit POSITIONAL JOIN is used, disregard lack of condition # If explicit JOIN USING is used, disregard lack of condition # If explicit CROSS/OUTER APPLY is used, disregard lack of condition continue this_join_condition = join_clause.get_child("join_on_condition") if this_join_condition: # Join condition is present, no error reported. continue if not where_clause_simplifable: yield LintResult(anchor=join_clause) else: this_join_clause_subexpressions = set() for subexpr_idx, subexpr_segments in enumerate(subexpressions): if subexpr_idx in consumed_subexpressions: continue qualified_column_references = [ col_ref for seg in subexpr_segments for col_ref in seg.recursive_crawl( "column_reference", no_recursive_seg_type="select_statement", ) if "dot" in col_ref.descendant_type_set ] if len(qualified_column_references) > 1 and all( col_ref.raw_upper.startswith( tuple( f"{table_ref}." for table_ref in encountered_references ) ) for col_ref in qualified_column_references ): this_join_clause_subexpressions.add(subexpr_idx) consumed_subexpressions.add(subexpr_idx) if not this_join_clause_subexpressions: yield LintResult(join_clause) else: join_clause_fix_segments: Deque[BaseSegment] = collections.deque() for subexpr_idx, subexpr_segments in enumerate(subexpressions): if subexpr_idx in this_join_clause_subexpressions: join_clause_fix_segments.extend(subexpr_segments) join_clause_fix_segments.append( BinaryOperatorSegment("AND") ) while join_clause_fix_segments and join_clause_fix_segments[ 0 ].is_type("whitespace", "binary_operator"): join_clause_fix_segments.popleft() while join_clause_fix_segments and join_clause_fix_segments[ -1 ].is_type("whitespace", "binary_operator"): join_clause_fix_segments.pop() join_on_expression = ExpressionSegment( tuple(join_clause_fix_segments), ) join_on = JoinOnConditionSegment( ( KeywordSegment("ON"), WhitespaceSegment(), join_on_expression, ) ) join_clause_segment = JoinClauseSegment( ( *join_clause.segments, WhitespaceSegment(), join_on, ) ) yield LintResult( anchor=join_clause, fixes=[ LintFix.replace( join_clause, edit_segments=[join_clause_segment], ) ], ) if not where_clause_simplifable: return if not consumed_subexpressions: return # Rewrite WHERE to keep conditions not moved to ON clauses where_clause_fix_segments: Deque[BaseSegment] = collections.deque() for subexpr_idx, subexpr_segments in enumerate(subexpressions): if subexpr_idx not in consumed_subexpressions: where_clause_fix_segments.extend(subexpr_segments) where_clause_fix_segments.append(BinaryOperatorSegment("AND")) while where_clause_fix_segments and where_clause_fix_segments[0].is_type( "whitespace", "binary_operator" ): where_clause_fix_segments.popleft() while where_clause_fix_segments and where_clause_fix_segments[-1].is_type( "whitespace", "binary_operator" ): where_clause_fix_segments.pop() if where_clause_fix_segments: where_clause_expr = where_clause.get_child("expression") assert where_clause_expr is not None yield LintResult( anchor=where_clause_expr, fixes=[ LintFix.replace( where_clause_expr, edit_segments=[*where_clause_fix_segments] ) ], ) else: assert select_statement.segments[-1].is_type("where_clause") assert select_statement.segments[-2].is_type("whitespace", "newline") yield LintResult( anchor=where_clause, fixes=[ LintFix.delete(select_statement.segments[-2]), LintFix.delete(select_statement.segments[-1]), ], ) @staticmethod def _get_from_expression_element_alias(from_expr_element: BaseSegment) -> str: if "alias_expression" in from_expr_element.direct_descendant_type_set: alias_seg = from_expr_element.get_child("alias_expression") assert alias_seg is not None identifier_seg = alias_seg.get_child("identifier") assert identifier_seg is not None alias_str = identifier_seg.raw_upper else: alias_str = from_expr_element.raw_upper return alias_str @staticmethod def _is_where_clause_simplifable(where_clause: BaseSegment) -> bool: assert where_clause.is_type("where_clause") expr = where_clause.get_child("expression") if not expr: # pragma: no cover # According to grammar, we should always have an ExpressionSegment # See sqlfluff.dialects.dialect_ansi.WhereClauseSegment return False ops = expr.recursive_crawl("binary_operator") return all(op.raw_upper == "AND" for op in ops) @staticmethod def _get_subexpression_chunks(expr: BaseSegment) -> list[list[BaseSegment]]: expr_segments = expr.segments bin_op_indices = [ i for i, e in enumerate(expr_segments) if e.is_type("binary_operator") ] split_segments = [None, *[expr_segments[i] for i in bin_op_indices], None] start_segments_iter = iter(split_segments) stop_segments_iter = iter(split_segments) _ = next(stop_segments_iter) return [ expr.select_children(start_seg, stop_seg) for start_seg, stop_seg in zip(start_segments_iter, stop_segments_iter) ] sqlfluff-3.4.2/src/sqlfluff/rules/convention/__init__.py000066400000000000000000000065171503426445100234150ustar00rootroot00000000000000"""The convention plugin bundle.""" from sqlfluff.core.plugin import hookimpl from sqlfluff.core.rules import BaseRule, ConfigInfo @hookimpl def get_configs_info() -> dict[str, ConfigInfo]: """Get additional rule config validations and descriptions.""" return { "preferred_not_equal_style": { "validation": ["consistent", "c_style", "ansi"], "definition": ( "The style for using not equal to operator. Defaults to ``consistent``." ), }, "select_clause_trailing_comma": { "validation": ["forbid", "require"], "definition": ( "Should trailing commas within select clauses be required or forbidden?" ), }, "prefer_count_1": { "validation": [True, False], "definition": ("Should count(1) be preferred over count(*) and count(0)?"), }, "prefer_count_0": { "validation": [True, False], "definition": ("Should count(0) be preferred over count(*) and count(1)?"), }, "multiline_newline": { "validation": [True, False], "definition": ( "Should semi-colons be placed on a new line after multi-line " "statements?" ), }, "require_final_semicolon": { "validation": [True, False], "definition": ( "Should final semi-colons be required? " "(N.B. forcing trailing semi-colons is not recommended for dbt users " "as it can cause issues when wrapping the query within other SQL " "queries)." ), }, "preferred_quoted_literal_style": { "validation": ["consistent", "single_quotes", "double_quotes"], "definition": ( "Preferred quoting style to use for the quoted literals. If set to " "``consistent`` quoting style is derived from the first quoted literal " "in the file." ), }, "preferred_type_casting_style": { "validation": ["consistent", "shorthand", "convert", "cast"], "definition": ("The expectation for using sql type casting"), }, } @hookimpl def get_rules() -> list[type[BaseRule]]: """Get plugin rules. NOTE: Rules are imported only on fetch to manage import times when rules aren't used. """ from sqlfluff.rules.convention.CV01 import Rule_CV01 from sqlfluff.rules.convention.CV02 import Rule_CV02 from sqlfluff.rules.convention.CV03 import Rule_CV03 from sqlfluff.rules.convention.CV04 import Rule_CV04 from sqlfluff.rules.convention.CV05 import Rule_CV05 from sqlfluff.rules.convention.CV06 import Rule_CV06 from sqlfluff.rules.convention.CV07 import Rule_CV07 from sqlfluff.rules.convention.CV08 import Rule_CV08 from sqlfluff.rules.convention.CV09 import Rule_CV09 from sqlfluff.rules.convention.CV10 import Rule_CV10 from sqlfluff.rules.convention.CV11 import Rule_CV11 from sqlfluff.rules.convention.CV12 import Rule_CV12 return [ Rule_CV01, Rule_CV02, Rule_CV03, Rule_CV04, Rule_CV05, Rule_CV06, Rule_CV07, Rule_CV08, Rule_CV09, Rule_CV10, Rule_CV11, Rule_CV12, ] sqlfluff-3.4.2/src/sqlfluff/rules/jinja/000077500000000000000000000000001503426445100202045ustar00rootroot00000000000000sqlfluff-3.4.2/src/sqlfluff/rules/jinja/JJ01.py000066400000000000000000000176271503426445100212370ustar00rootroot00000000000000"""Implementation of Rule JJ01.""" from sqlfluff.core.parser.segments import BaseSegment, SourceFix from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import RootOnlyCrawler from sqlfluff.core.templaters import JinjaTemplater class Rule_JJ01(BaseRule): """Jinja tags should have a single whitespace on either side. This rule is only active if the ``jinja`` templater (or one of it's subclasses, like the ``dbt`` templater) are used for the current file. **Anti-pattern** Jinja tags with either no whitespace or very long whitespace are hard to read. .. code-block:: jinja :force: SELECT {{ a }} from {{ref('foo')}} **Best practice** A single whitespace surrounding Jinja tags, alternatively longer gaps containing newlines are acceptable. .. code-block:: jinja :force: SELECT {{ a }} from {{ ref('foo') }}; SELECT {{ a }} from {{ ref('foo') }}; """ name = "jinja.padding" aliases = ("L046",) groups = ("all", "core", "jinja") crawl_behaviour = RootOnlyCrawler() targets_templated = True is_fix_compatible = True @staticmethod def _get_whitespace_ends(s: str) -> tuple[str, str, str, str, str]: """Remove tag ends and partition off any whitespace ends. This function assumes that we've already trimmed the string to just the tag, and will raise an AssertionError if not. >>> Rule_JJ01._get_whitespace_ends(' {{not_trimmed}} ') Traceback (most recent call last): ... AssertionError In essence it divides up a tag into the end tokens, any leading or trailing whitespace and the inner content >>> Rule_JJ01._get_whitespace_ends('{{ my_content }}') ('{{', ' ', 'my_content', ' ', '}}') It also works with block tags and more complicated content and end markers. >>> Rule_JJ01._get_whitespace_ends('{%+if a + b is True -%}') ('{%+', '', 'if a + b is True', ' ', '-%}') """ assert s[0] == "{" and s[-1] == "}" # Jinja tags all have a length of two. We can use slicing # to remove them easily. main = s[2:-2] pre = s[:2] post = s[-2:] # Optionally Jinja tags may also have plus of minus notation # https://jinja2docs.readthedocs.io/en/stable/templates.html#whitespace-control modifier_chars = ["+", "-"] if main and main[0] in modifier_chars: main = main[1:] pre = s[:3] if main and main[-1] in modifier_chars: main = main[:-1] post = s[-3:] inner = main.strip() pos = main.find(inner) return pre, main[:pos], inner, main[pos + len(inner) :], post @classmethod def _find_raw_at_src_idx(cls, segment: BaseSegment, src_idx: int): """Recursively search to find a raw segment for a position in the source. NOTE: This assumes it's not being called on a `raw`. In the case that there are multiple potential targets, we will find the first. """ assert segment.segments for seg in segment.segments: if not seg.pos_marker: # pragma: no cover continue src_slice = seg.pos_marker.source_slice # If it's before, skip onward. if src_slice.stop <= src_idx: continue # Is the current segment raw? if seg.is_raw(): return seg # Otherwise recurse return cls._find_raw_at_src_idx(seg, src_idx) def _eval(self, context: RuleContext) -> list[LintResult]: """Look for non-literal segments. NOTE: The existing crawlers don't filter very well for only templated code, and so we process the whole file from the root here. """ # If the position maker for the root segment is literal then there's # no templated code. So we can return early. assert context.segment.pos_marker if context.segment.pos_marker.is_literal(): return [] # We'll need the templated file. If for whatever reason it's # not present, abort. if not context.templated_file: # pragma: no cover return [] # We also only work with setups which use the jinja templater # or a derivative of that. Otherwise return empty. # NOTE: The `templater_obj` is not available in parallel operations # and we don't really want to rehydrate a templater just to check # what type it is, so use `get_templater_class()`. _templater_class = context.config.get_templater_class() if not issubclass(_templater_class, JinjaTemplater): self.logger.debug(f"Detected non-jinja templater: {_templater_class.name}") return [] results = [] # Work through the templated slices for raw_slice in context.templated_file.raw_sliced: # We only want templated slices. if raw_slice.slice_type not in ("templated", "block_start", "block_end"): continue stripped = raw_slice.raw.strip() if not stripped or stripped[0] != "{" or stripped[-1] != "}": continue # pragma: no cover self.logger.debug( "Tag found @ source index %s: %r ", raw_slice.source_idx, stripped ) # Partition and Position src_idx = raw_slice.source_idx tag_pre, ws_pre, inner, ws_post, tag_post = self._get_whitespace_ends( stripped ) position = raw_slice.raw.find(stripped[0]) self.logger.debug( "Tag string segments: %r | %r | %r | %r | %r @ %s + %s", tag_pre, ws_pre, inner, ws_post, tag_post, src_idx, position, ) # For the following section, whitespace should be a single # whitespace OR it should contain a newline. pre_fix = None post_fix = None # Check the initial whitespace. if not ws_pre or (ws_pre != " " and "\n" not in ws_pre): pre_fix = " " # Check latter whitespace. if not ws_post or (ws_post != " " and "\n" not in ws_post): post_fix = " " # If no fixes, continue if pre_fix is None and post_fix is None: continue fixed = ( tag_pre + (pre_fix or ws_pre) + inner + (post_fix or ws_post) + tag_post ) # We need to identify a raw segment to attach to fix to. raw_seg = self._find_raw_at_src_idx(context.segment, src_idx) # If that raw segment already has fixes, don't apply it again. # We're likely on a second pass. if raw_seg.source_fixes: continue source_fixes = [ SourceFix( fixed, slice( src_idx + position, src_idx + position + len(stripped), ), # This position in the templated file is rough, but # close enough for sequencing. raw_seg.pos_marker.templated_slice, ) ] results.append( LintResult( anchor=raw_seg, description=f"Jinja tags should have a single " f"whitespace on either side: {stripped}", fixes=[ LintFix.replace( raw_seg, [raw_seg.edit(source_fixes=source_fixes)], ) ], ) ) return results sqlfluff-3.4.2/src/sqlfluff/rules/jinja/__init__.py000066400000000000000000000005721503426445100223210ustar00rootroot00000000000000"""The jinja rules plugin bundle.""" from sqlfluff.core.plugin import hookimpl from sqlfluff.core.rules import BaseRule @hookimpl def get_rules() -> list[type[BaseRule]]: """Get plugin rules. NOTE: Rules are imported only on fetch to manage import times when rules aren't used. """ from sqlfluff.rules.jinja.JJ01 import Rule_JJ01 return [Rule_JJ01] sqlfluff-3.4.2/src/sqlfluff/rules/layout/000077500000000000000000000000001503426445100204265ustar00rootroot00000000000000sqlfluff-3.4.2/src/sqlfluff/rules/layout/LT01.py000066400000000000000000000044461503426445100214700ustar00rootroot00000000000000"""Implementation of Rule LT01.""" from typing import Optional from sqlfluff.core.rules import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.crawlers import RootOnlyCrawler from sqlfluff.utils.reflow.sequence import ReflowSequence class Rule_LT01(BaseRule): """Inappropriate Spacing. This rule checks for an enforces the spacing as configured in :ref:`layoutconfig`. This includes excessive whitespace, trailing whitespace at the end of a line and also the wrong spacing between elements on the line. Because of this wide reach you may find that you wish to add specific configuration in your project to tweak how specific elements are treated. Rather than configuration on this specific rule, use the `sqlfluff.layout` section of your configuration file to customise how this rule operates. The ``•`` character represents a space in the examples below. **Anti-pattern** .. code-block:: sql :force: SELECT a, b(c) as d•• FROM foo•••• JOIN bar USING(a) **Best practice** * Unless an indent or preceding a comment, whitespace should be a single space. * There should also be no trailing whitespace at the ends of lines. * There should be a space after :code:`USING` so that it's not confused for a function. .. code-block:: sql SELECT a, b(c) as d FROM foo JOIN bar USING (a) """ name = "layout.spacing" # NOTE: This rule combines the following legacy rules: # - L001: Trailing Whitespace # - L005 & L008: Space around commas # - L006: Space around operators # - L023: Space after AS in WITH clause # - L024: Space immediately after USING # - L039: Unnecessary Whitespace # - L048: Spacing around quoted literals # - L071: Spacing around brackets aliases = ("L001", "L005", "L006", "L008", "L023", "L024", "L039", "L048", "L071") groups = ("all", "core", "layout") crawl_behaviour = RootOnlyCrawler() is_fix_compatible = True def _eval(self, context: RuleContext) -> Optional[list[LintResult]]: """Unnecessary whitespace.""" sequence = ReflowSequence.from_root(context.segment, config=context.config) return sequence.respace().get_results() sqlfluff-3.4.2/src/sqlfluff/rules/layout/LT02.py000066400000000000000000000034311503426445100214620ustar00rootroot00000000000000"""Implementation of Rule LT02.""" from sqlfluff.core.rules import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.crawlers import RootOnlyCrawler from sqlfluff.utils.reflow.sequence import ReflowSequence class Rule_LT02(BaseRule): """Incorrect Indentation. **Anti-pattern** The ``•`` character represents a space and the ``→`` character represents a tab. In this example, the third line contains five spaces instead of four and the second line contains two spaces and one tab. .. code-block:: sql :force: SELECT ••→a, •••••b FROM foo **Best practice** Change the indentation to use a multiple of four spaces. This example also assumes that the ``indent_unit`` config value is set to ``space``. If it had instead been set to ``tab``, then the indents would be tabs instead. .. code-block:: sql :force: SELECT ••••a, ••••b FROM foo """ name = "layout.indent" # NOTE: We're combining three legacy rules here into one. aliases = ("L002", "L003", "L004") groups = ("all", "core", "layout") crawl_behaviour = RootOnlyCrawler() is_fix_compatible = True targets_templated = True template_safe_fixes = True _adjust_anchors = True def _eval(self, context: RuleContext) -> list[LintResult]: """Indentation not consistent with previous lines. To set the default tab size, set the `tab_space_size` value in the appropriate configuration. To correct indents to tabs use the `indent_unit` value set to `tab`. """ return ( ReflowSequence.from_root(context.segment, context.config) .reindent() .get_results() ) sqlfluff-3.4.2/src/sqlfluff/rules/layout/LT03.py000066400000000000000000000127121503426445100214650ustar00rootroot00000000000000"""Implementation of Rule LT03.""" from collections.abc import Sequence from sqlfluff.core.parser import BaseSegment from sqlfluff.core.rules import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.reflow import ReflowSequence class Rule_LT03(BaseRule): """Operators should follow a standard for being before/after newlines. The configuration for whether operators should be ``trailing`` or ``leading`` is part of :ref:`layoutconfig`. The default configuration is: .. code-block:: cfg [sqlfluff:layout:type:binary_operator] line_position = leading [sqlfluff:layout:type:comparison_operator] line_position = leading **Anti-pattern** In this example, if ``line_position = leading`` (or unspecified, as is the default), then the operator ``+`` should not be at the end of the second line. .. code-block:: sql SELECT a + b FROM foo **Best practice** If ``line_position = leading`` (or unspecified, as this is the default), place the operator after the newline. .. code-block:: sql SELECT a + b FROM foo If ``line_position = trailing``, place the operator before the newline. .. code-block:: sql SELECT a + b FROM foo """ name = "layout.operators" aliases = ("L007",) groups = ("all", "layout") crawl_behaviour = SegmentSeekerCrawler({"binary_operator", "comparison_operator"}) is_fix_compatible = True def _seek_newline( self, segments: Sequence[BaseSegment], idx: int, dir: int ) -> bool: """Seek in a direction, looking for newlines. Args: segments: A sequence of segments to seek within. idx: The index of the "current" segment. dir: The direction to seek in (+1 for forward, -1 for backward) """ assert dir in (1, -1) for segment in segments[idx + dir :: dir]: if segment.is_type("newline"): # It's definitely leading. No problems. self.logger.debug( "Shortcut (dir = %s) OK. Found newline: %s", dir, segment ) return True elif not segment.is_type("whitespace", "indent", "comment"): # We found something before it which suggests it's not leading. # We should run the full reflow routine to check. break return False def _check_trail_lead_shortcut( self, segment: BaseSegment, parent: BaseSegment, line_position: str ) -> bool: """Check to see whether we should pass the rule and shortcut. Args: segment: The target segment. parent: The parent segment (must contain `segment`). line_position: The `line_position` config for the segment. """ idx = parent.segments.index(segment) # Shortcut #1: Leading. if line_position == "leading": if self._seek_newline(parent.segments, idx, dir=-1): return True # If we didn't find a newline before, if there's _also_ not a newline # after, then we can also shortcut. i.e. it's a comma "mid line". if not self._seek_newline(parent.segments, idx, dir=1): return True # Shortcut #2: Trailing. elif line_position == "trailing": if self._seek_newline(parent.segments, idx, dir=1): return True # If we didn't find a newline after, if there's _also_ not a newline # before, then we can also shortcut. i.e. it's a comma "mid line". if not self._seek_newline(parent.segments, idx, dir=-1): return True return False def _eval(self, context: RuleContext) -> list[LintResult]: """Operators should follow a standard for being before/after newlines. For the fixing routines we delegate to the reflow utils. However for performance reasons we have some initial shortcuts to quickly identify situations which are _ok_ to avoid the overhead of the full reflow path. """ # NOTE: These shortcuts assume that any newlines will be direct # siblings of the operator in question. This isn't _always_ the case # but is true often enough to have meaningful upside from early # detection. if context.segment.is_type("comparison_operator"): comparison_positioning = context.config.get( "line_position", ["layout", "type", "comparison_operator"] ) if self._check_trail_lead_shortcut( context.segment, context.parent_stack[-1], comparison_positioning ): return [LintResult()] elif context.segment.is_type("binary_operator"): binary_positioning = context.config.get( "line_position", ["layout", "type", "binary_operator"] ) if self._check_trail_lead_shortcut( context.segment, context.parent_stack[-1], binary_positioning ): return [LintResult()] return ( ReflowSequence.from_around_target( context.segment, root_segment=context.parent_stack[0], config=context.config, ) .rebreak() .get_results() ) sqlfluff-3.4.2/src/sqlfluff/rules/layout/LT04.py000066400000000000000000000050351503426445100214660ustar00rootroot00000000000000"""Implementation of Rule LT04.""" from sqlfluff.core.rules import LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.rules.layout.LT03 import Rule_LT03 from sqlfluff.utils.reflow import ReflowSequence class Rule_LT04(Rule_LT03): """Leading/Trailing comma enforcement. The configuration for whether operators should be ``trailing`` or ``leading`` is part of :ref:`layoutconfig`. The default configuration is: .. code-block:: cfg [sqlfluff:layout:type:comma] line_position = trailing **Anti-pattern** There is a mixture of leading and trailing commas. .. code-block:: sql SELECT a , b, c FROM foo **Best practice** By default, `SQLFluff` prefers trailing commas. However it is configurable for leading commas. The chosen style must be used consistently throughout your SQL. .. code-block:: sql SELECT a, b, c FROM foo -- Alternatively, set the configuration file to 'leading' -- and then the following would be acceptable: SELECT a , b , c FROM foo """ name = "layout.commas" aliases = ("L019",) groups = ("all", "layout") crawl_behaviour = SegmentSeekerCrawler({"comma"}) _adjust_anchors = True is_fix_compatible = True def _eval(self, context: RuleContext) -> list[LintResult]: """Enforce comma placement. For the fixing routines we delegate to the reflow utils. However for performance reasons we have some initial shortcuts to quickly identify situations which are _ok_ to avoid the overhead of the full reflow path. """ comma_positioning = context.config.get( "line_position", ["layout", "type", "comma"] ) # NOTE: These shortcuts assume that any newlines will be direct # siblings of the comma in question. This isn't _always_ the case # but is true often enough to have meaningful upside from early # detection. if self._check_trail_lead_shortcut( context.segment, context.parent_stack[-1], comma_positioning ): return [LintResult()] return ( ReflowSequence.from_around_target( context.segment, root_segment=context.parent_stack[0], config=context.config, ) .rebreak() .get_results() ) sqlfluff-3.4.2/src/sqlfluff/rules/layout/LT05.py000066400000000000000000000145561503426445100214770ustar00rootroot00000000000000"""Implementation of Rule LT05.""" from typing import cast from sqlfluff.core.parser.segments import RawSegment, TemplateSegment from sqlfluff.core.rules import LintResult, RuleContext from sqlfluff.core.rules.base import BaseRule from sqlfluff.core.rules.crawlers import RootOnlyCrawler from sqlfluff.utils.reflow.sequence import ReflowSequence class Rule_LT05(BaseRule): """Line is too long.""" name = "layout.long_lines" aliases = ("L016",) groups = ("all", "core", "layout") crawl_behaviour = RootOnlyCrawler() targets_templated = True template_safe_fixes = True _adjust_anchors = True _check_docstring = False is_fix_compatible = True config_keywords = [ "ignore_comment_lines", "ignore_comment_clauses", ] def _eval(self, context: RuleContext) -> list[LintResult]: """Line is too long.""" self.ignore_comment_lines: bool self.ignore_comment_clauses: bool # Reflow and generate fixes. results = ( ReflowSequence.from_root(context.segment, context.config) .break_long_lines() .get_results() ) # Ignore any comment line if appropriate. if self.ignore_comment_lines: raw_segments = context.segment.raw_segments for res in results[:]: # First handle the easy case that the anchor (i.e. the start # of the line is a comment). assert res.anchor assert res.anchor.pos_marker assert isinstance(res.anchor, RawSegment), ( "Expected fixes generated by `break_long_lines` to only have " "`RawSegment` anchors." ) if res.anchor.is_type("comment"): self.logger.debug( "Purging result on long line starting with comment: %s", res.anchor.pos_marker.working_line_no, ) results.remove(res) continue # Then look for comments on the rest of the line: assert res.anchor.pos_marker raw_idx = raw_segments.index(res.anchor) for seg in raw_segments[raw_idx:]: if ( seg.pos_marker.working_line_no != res.anchor.pos_marker.working_line_no ): # We've gone past the end of the line. Stop looking. break # pragma: no cover # Is it a comment? if seg.is_type("comment"): self.logger.debug( "Purging result on long line containing comment: %s", res.anchor.pos_marker.working_line_no, ) results.remove(res) break # Is it a template comment? elif ( seg.is_type("placeholder") and cast(TemplateSegment, seg).block_type == "comment" ): self.logger.debug( "Purging result with template comment line: %s", res.anchor.pos_marker.working_line_no, ) results.remove(res) break # Ignore any comment clauses if present. if self.ignore_comment_clauses: raw_segments = context.segment.raw_segments for res in results[:]: # The anchor should be the first raw on the line. Work forward # until we're not on the line. Check if any have a parent which # is a comment_clause. assert res.anchor assert res.anchor.pos_marker assert isinstance(res.anchor, RawSegment), ( "Expected fixes generated by `break_long_lines` to only have " "`RawSegment` anchors." ) raw_idx = raw_segments.index(res.anchor) for seg in raw_segments[raw_idx:]: if ( seg.pos_marker.working_line_no != res.anchor.pos_marker.working_line_no ): # We've gone past the end of the line. Stop looking. break # Look to see if any are in comment clauses for ps in context.segment.path_to(seg): if ps.segment.is_type( "comment_clause", "comment_equals_clause" ): # It IS! Ok, purge this result from results, unless # the line is already too long without the comment. # We'll know that based on the line position of # the comment. # We can fairly confidently assert that the segment # will have a position marker at this stage. assert ps.segment.pos_marker line_pos = ps.segment.pos_marker.working_line_pos if line_pos < context.config.get("max_line_length"): # OK purge it. self.logger.debug( "Purging result on long line with comment " "clause: %s", res.anchor.pos_marker.working_line_no, ) results.remove(res) break self.logger.debug( "Keeping result on long line with comment clause. " "Still too long without comment: %s", res.anchor.pos_marker.working_line_no, ) # If we finish the loop without breaking, we didn't find a # comment. Keep looking. else: continue # If we did finish with a break, we should break the outer # loop too. break return results sqlfluff-3.4.2/src/sqlfluff/rules/layout/LT06.py000066400000000000000000000045541503426445100214750ustar00rootroot00000000000000"""Implementation of Rule LT06.""" from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.functional import FunctionalContext, sp class Rule_LT06(BaseRule): """Function name not immediately followed by parenthesis. **Anti-pattern** In this example, there is a space between the function and the parenthesis. .. code-block:: sql SELECT sum (a) FROM foo **Best practice** Remove the space between the function and the parenthesis. .. code-block:: sql SELECT sum(a) FROM foo """ name = "layout.functions" aliases = ("L017",) groups = ("all", "core", "layout") crawl_behaviour = SegmentSeekerCrawler({"function"}) is_fix_compatible = True def _eval(self, context: RuleContext) -> LintResult: """Function name not immediately followed by bracket. Look for Function Segment with anything other than the function name before brackets NOTE: This hasn't been combined with LT01 because it has some special treatment for comments. That might be something we revisit at a later point if duplicate errors become problematic. """ segment = FunctionalContext(context).segment # We only trigger on function contents assert segment.all(sp.is_type("function")) children = segment.children() function_name = children.first(sp.is_type("function_name"))[0] function_contents = children.first(sp.is_type("function_contents"))[0] intermediate_segments = children.select( start_seg=function_name, stop_seg=function_contents ) if intermediate_segments: # It's only safe to fix if there is only whitespace # or newlines in the intervening section. if intermediate_segments.all(sp.is_type("whitespace", "newline")): return LintResult( anchor=intermediate_segments[0], fixes=[LintFix.delete(seg) for seg in intermediate_segments], ) else: # It's not all whitespace, just report the error. return LintResult( anchor=intermediate_segments[0], ) return LintResult() sqlfluff-3.4.2/src/sqlfluff/rules/layout/LT07.py000066400000000000000000000110611503426445100214650ustar00rootroot00000000000000"""Implementation of Rule LT07.""" from typing import Optional, cast from sqlfluff.core.parser import NewlineSegment, RawSegment from sqlfluff.core.parser.segments import TemplateSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.functional import FunctionalContext, sp class Rule_LT07(BaseRule): """``WITH`` clause closing bracket should be on a new line. **Anti-pattern** In this example, the closing bracket is on the same line as CTE. .. code-block:: sql :force: WITH zoo AS ( SELECT a FROM foo) SELECT * FROM zoo **Best practice** Move the closing bracket on a new line. .. code-block:: sql WITH zoo AS ( SELECT a FROM foo ) SELECT * FROM zoo """ name = "layout.cte_bracket" aliases = ("L018",) groups = ("all", "core", "layout") crawl_behaviour = SegmentSeekerCrawler( {"with_compound_statement"}, provide_raw_stack=True ) is_fix_compatible = True def _eval(self, context: RuleContext) -> Optional[LintResult]: """WITH clause closing bracket should be aligned with WITH keyword. Look for a with clause and evaluate the position of closing brackets. """ # We only trigger on start_bracket (open parenthesis) assert context.segment.is_type("with_compound_statement") # Find the end brackets for the CTE *query* (i.e. ignore optional # list of CTE columns). cte_end_brackets: set[RawSegment] = set() for cte in ( FunctionalContext(context) .segment.children(sp.is_type("common_table_expression")) .iterate_segments() ): cte_start_bracket = ( cte.children() .last(sp.is_type("bracketed")) .children() .first(sp.is_type("start_bracket")) ) cte_end_bracket = ( cte.children() .last(sp.is_type("bracketed")) .children() .last(sp.is_type("end_bracket")) ) if cte_start_bracket and cte_end_bracket: self.logger.debug( "Found CTE with brackets: %s & %s", cte_start_bracket, cte_end_bracket, ) # Are they on the same line? # NOTE: This assertion should be fairly safe because # there aren't many reasons for an bracket to not yet # be positioned. assert cte_start_bracket[0].pos_marker assert cte_end_bracket[0].pos_marker if ( cte_start_bracket[0].pos_marker.line_no == cte_end_bracket[0].pos_marker.line_no ): # Same line self.logger.debug("Skipping because on same line.") continue # Otherwise add to the ones to check. cte_end_brackets.add(cast(RawSegment, cte_end_bracket[0])) for seg in cte_end_brackets: contains_non_whitespace = False idx = context.segment.raw_segments.index(seg) self.logger.debug("End bracket %s has idx %s", seg, idx) # Search backward through the raw segments from just before # the location of the bracket. for elem in context.segment.raw_segments[idx - 1 :: -1]: # If there's a literal newline, stop. if elem.is_type("newline"): break # ...or a consumed newline in a placeholder. elif elem.is_type("placeholder"): placeholder = cast(TemplateSegment, elem) if placeholder.source_str == "\n": break elif not elem.is_type("indent", "whitespace"): self.logger.debug("Found non-whitespace: %s", elem) contains_non_whitespace = True break if contains_non_whitespace: # We have to move it to a newline return LintResult( anchor=seg, fixes=[ LintFix.create_before( seg, [ NewlineSegment(), ], ) ], ) return None sqlfluff-3.4.2/src/sqlfluff/rules/layout/LT08.py000066400000000000000000000201211503426445100214630ustar00rootroot00000000000000"""Implementation of Rule LT08.""" from typing import Optional from sqlfluff.core.parser import NewlineSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler class Rule_LT08(BaseRule): """Blank line expected but not found after CTE closing bracket. **Anti-pattern** There is no blank line after the CTE closing bracket. In queries with many CTEs, this hinders readability. .. code-block:: sql WITH plop AS ( SELECT * FROM foo ) SELECT a FROM plop **Best practice** Add a blank line. .. code-block:: sql WITH plop AS ( SELECT * FROM foo ) SELECT a FROM plop """ name = "layout.cte_newline" aliases = ("L022",) groups = ("all", "core", "layout") crawl_behaviour = SegmentSeekerCrawler({"with_compound_statement"}) is_fix_compatible = True def _eval(self, context: RuleContext) -> Optional[list[LintResult]]: """Blank line expected but not found after CTE definition.""" error_buffer = [] global_comma_style = context.config.get( "line_position", ["layout", "type", "comma"] ) assert context.segment.is_type("with_compound_statement") # First we need to find all the commas, the end brackets, the # things that come after that and the blank lines in between. # Find all the closing brackets. They are our anchor points. bracket_indices = [] expanded_segments = list( context.segment.iter_segments(expanding=["common_table_expression"]) ) for idx, seg in enumerate(expanded_segments): if seg.is_type("bracketed"): bracket_indices.append(idx) # Work through each point and deal with it individually for bracket_idx in bracket_indices: forward_slice = expanded_segments[bracket_idx:] seg_idx = 1 line_idx = 0 comma_seg_idx = 0 blank_lines = 0 comma_line_idx = None line_blank = False comma_style: str line_starts = {} comment_lines = [] self.logger.info( "## CTE closing bracket found at %s, idx: %s. Forward slice: %.20r", forward_slice[0].pos_marker, bracket_idx, "".join(elem.raw for elem in forward_slice), ) # Work forward to map out the following segments. while ( forward_slice[seg_idx].is_type("comma") or not forward_slice[seg_idx].is_code ): if forward_slice[seg_idx].is_type("newline"): if line_blank: # It's a blank line! blank_lines += 1 line_blank = True line_idx += 1 line_starts[line_idx] = seg_idx + 1 elif forward_slice[seg_idx].is_type("comment"): # Lines with comments aren't blank line_blank = False comment_lines.append(line_idx) elif forward_slice[seg_idx].is_type("comma"): # Keep track of where the comma is. # We'll evaluate it later. comma_line_idx = line_idx comma_seg_idx = seg_idx seg_idx += 1 # Infer the comma style (NB this could be different for each case!) if comma_line_idx is None: comma_style = "final" elif line_idx == 0: comma_style = "oneline" elif comma_line_idx == 0: comma_style = "trailing" elif comma_line_idx == line_idx: comma_style = "leading" else: comma_style = "floating" # Readout of findings self.logger.info( "blank_lines: %s, comma_line_idx: %s. final_line_idx: %s, " "final_seg_idx: %s", blank_lines, comma_line_idx, line_idx, seg_idx, ) self.logger.info( "comma_style: %r, line_starts: %r, comment_lines: %r", comma_style, line_starts, comment_lines, ) # If we've got blank lines. We're good. if blank_lines >= 1: continue # We've got an issue self.logger.info("!! Found CTE without enough blank lines.") # Based on the current location of the comma we insert newlines # to correct the issue. # First handle the potential simple case of a current one line fix_type = "create_before" # In most cases we just insert newlines. if comma_style == "oneline": # Here we respect the target comma style to insert at the # relevant point. if global_comma_style == "trailing": # Add a blank line after the comma fix_point = forward_slice[comma_seg_idx + 1] # Optionally here, if the segment we've landed on is # whitespace then we REPLACE it rather than inserting. if forward_slice[comma_seg_idx + 1].is_type("whitespace"): fix_type = "replace" elif global_comma_style == "leading": # Add a blank line before the comma fix_point = forward_slice[comma_seg_idx] else: # pragma: no cover raise NotImplementedError( f"Unexpected global comma style {global_comma_style!r}" ) # In both cases it's a double newline. num_newlines = 2 else: # In the following cases we only care which one we're in # when comments don't get in the way. If they *do*, then # we just work around them. if not comment_lines or line_idx - 1 not in comment_lines: self.logger.info("Comment routines not applicable") if comma_style in ("trailing", "final", "floating"): # Detected an existing trailing comma or it's a final # CTE, OR the comma isn't leading or trailing. # If the preceding segment is whitespace, replace it if forward_slice[seg_idx - 1].is_type("whitespace"): fix_point = forward_slice[seg_idx - 1] fix_type = "replace" else: # Otherwise add a single newline before the end # content. fix_point = forward_slice[seg_idx] elif comma_style == "leading": # Detected an existing leading comma. fix_point = forward_slice[comma_seg_idx] else: self.logger.info("Handling preceding comments") offset = 1 while line_idx - offset in comment_lines: offset += 1 # If the offset - 1 equals the line_idx then there aren't # really any comment-only lines (ref #2945). # Reset to line_idx fix_point = forward_slice[ line_starts[line_idx - (offset - 1) or line_idx] ] num_newlines = 1 fixes = [ LintFix( fix_type, fix_point, [NewlineSegment()] * num_newlines, ) ] # Create a result, anchored on the start of the next content. error_buffer.append(LintResult(anchor=forward_slice[seg_idx], fixes=fixes)) # Return the buffer if we have one. return error_buffer or None sqlfluff-3.4.2/src/sqlfluff/rules/layout/LT09.py000066400000000000000000000411571503426445100215000ustar00rootroot00000000000000"""Implementation of Rule LT09.""" from collections.abc import Sequence from typing import NamedTuple, Optional from sqlfluff.core.parser import BaseSegment, NewlineSegment, WhitespaceSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.functional import FunctionalContext, Segments, sp class SelectTargetsInfo(NamedTuple): """Info about select targets and nearby whitespace.""" select_idx: int first_new_line_idx: int first_select_target_idx: int first_whitespace_idx: int comment_after_select_idx: int select_targets: Sequence[BaseSegment] from_segment: Optional[BaseSegment] pre_from_whitespace: list[BaseSegment] class Rule_LT09(BaseRule): """Select targets should be on a new line unless there is only one select target. .. note:: By default, a wildcard (e.g. ``SELECT *``) is considered a single select target. If you want it to be treated as multiple select targets, configure ``wildcard_policy = multiple``. **Anti-pattern** Multiple select targets on the same line. .. code-block:: sql select a, b from foo; -- Single select target on its own line. SELECT a FROM foo; **Best practice** Multiple select targets each on their own line. .. code-block:: sql select a, b from foo; -- Single select target on the same line as the ``SELECT`` -- keyword. SELECT a FROM foo; -- When select targets span multiple lines, however they -- can still be on a new line. SELECT SUM( 1 + SUM( 2 + 3 ) ) AS col FROM test_table; """ name = "layout.select_targets" aliases = ("L036",) groups = ("all", "layout") config_keywords = ["wildcard_policy"] crawl_behaviour = SegmentSeekerCrawler({"select_clause"}) is_fix_compatible = True def _eval(self, context: RuleContext) -> Optional[LintResult]: self.wildcard_policy: str assert context.segment.is_type("select_clause") select_targets_info = self._get_indexes(context) select_clause = FunctionalContext(context).segment wildcards = select_clause.children( sp.is_type("select_clause_element") ).children(sp.is_type("wildcard_expression")) has_wildcard = bool(wildcards) if len(select_targets_info.select_targets) == 1 and ( not has_wildcard or self.wildcard_policy == "single" ): return self._eval_single_select_target_element( select_targets_info, context, ) elif len(select_targets_info.select_targets): return self._eval_multiple_select_target_elements( select_targets_info, context.segment ) return None @staticmethod def _get_indexes(context: RuleContext) -> SelectTargetsInfo: children = FunctionalContext(context).segment.children() select_targets = children.select(sp.is_type("select_clause_element")) first_select_target_idx = children.find(select_targets.get()) selects = children.select(sp.is_keyword("select")) select_idx = children.find(selects.get()) if selects else -1 newlines = children.select(sp.is_type("newline")) first_new_line_idx = children.find(newlines.get()) if newlines else -1 comment_after_select_idx = -1 if newlines: comment_after_select = children.select( sp.is_type("comment"), start_seg=selects.get(), stop_seg=newlines.get(), loop_while=sp.or_( sp.is_type("comment"), sp.is_type("whitespace"), sp.is_meta() ), ) if comment_after_select: comment_after_select_idx = ( children.find(comment_after_select.get()) if comment_after_select else -1 ) first_whitespace_idx = -1 if first_new_line_idx != -1: # TRICKY: Ignore whitespace prior to the first newline, e.g. if # the line with "SELECT" (before any select targets) has trailing # whitespace. segments_after_first_line = children.select( sp.is_type("whitespace"), start_seg=children[first_new_line_idx] ) first_whitespace_idx = children.find(segments_after_first_line.get()) siblings_post = FunctionalContext(context).siblings_post from_segment = siblings_post.first(sp.is_type("from_clause")).first().get() pre_from_whitespace = siblings_post.select( sp.is_type("whitespace"), stop_seg=from_segment ) return SelectTargetsInfo( select_idx, first_new_line_idx, first_select_target_idx, first_whitespace_idx, comment_after_select_idx, select_targets, from_segment, list(pre_from_whitespace), ) def _eval_multiple_select_target_elements( self, select_targets_info, segment ) -> Optional[LintResult]: """Multiple select targets. Ensure each is on a separate line.""" fixes = [] previous_code = None select_clause_raws = Segments(segment).raw_segments for i, select_target in enumerate(select_targets_info.select_targets): assert select_target.pos_marker target_start_line = select_target.pos_marker.working_line_no target_initial_code = ( Segments(select_target).raw_segments.first(sp.is_code()).get() ) assert target_initial_code previous_code = ( select_clause_raws.select( # Get the first code that isn't a comma. select_if=sp.and_(sp.is_code(), sp.not_(sp.raw_is(","))), start_seg=previous_code, stop_seg=target_initial_code, ) .last() .get() ) assert previous_code assert previous_code.pos_marker previous_end_line = previous_code.pos_marker.working_line_no self.logger.debug( "- Evaluating %s [%s, %s]: Prev ends with: %s", select_target, previous_end_line, target_start_line, previous_code, ) # Check whether this target *starts* on the same line that the # previous one *ends* on. If they are on the same line, insert a newline. if target_start_line == previous_end_line: # Find and delete any whitespace before the select target. start_seg = select_targets_info.select_idx # If any select modifier (e.g. distinct ) is present, start # there rather than at the beginning. modifier = segment.get_child("select_clause_modifier") if modifier: start_seg = segment.segments.index(modifier) ws_to_delete = segment.select_children( start_seg=( segment.segments[start_seg] if not i else select_targets_info.select_targets[i - 1] ), select_if=lambda s: s.is_type("whitespace"), loop_while=lambda s: s.is_type("whitespace", "comma") or s.is_meta, ) fixes += [LintFix.delete(ws) for ws in ws_to_delete] fixes.append(LintFix.create_before(select_target, [NewlineSegment()])) # If we are at the last select target check if the FROM clause # is on the same line, and if so move it to its own line. if select_targets_info.from_segment: if (i + 1 == len(select_targets_info.select_targets)) and ( select_target.pos_marker.working_line_no == select_targets_info.from_segment.pos_marker.working_line_no ): fixes.extend( [ LintFix.delete(ws) for ws in select_targets_info.pre_from_whitespace ] ) fixes.append( LintFix.create_before( select_targets_info.from_segment, [NewlineSegment()], ) ) if fixes: return LintResult(anchor=segment, fixes=fixes) return None def _eval_single_select_target_element( self, select_targets_info, context: RuleContext ): select_clause = FunctionalContext(context).segment parent_stack = context.parent_stack target_idx = select_targets_info.first_select_target_idx select_children = select_clause.children() target_seg = select_children[target_idx] # If it's all on one line, then there's no issue. if not ( select_targets_info.select_idx < select_targets_info.first_new_line_idx < target_idx ): self.logger.info( "Target at index %s is already on a single line.", target_idx, ) return None # Does the target contain a newline? # i.e. even if it's a single element, does it already span more than # one line? if "newline" in target_seg.descendant_type_set: self.logger.info( "Target at index %s spans multiple lines so ignoring.", target_idx, ) return None if select_targets_info.comment_after_select_idx != -1: # The SELECT is followed by a comment on the same line. In order # to autofix this, we'd need to move the select target between # SELECT and the comment and potentially delete the entire line # where the select target was (if it is now empty). This is # *fairly tricky and complex*, in part because the newline on # the select target's line is several levels higher in the # parser tree. Hence, we currently don't autofix this. Could be # autofixed in the future if/when we have the time. return LintResult(anchor=select_clause.get()) # Prepare the select clause which will be inserted insert_buff = [WhitespaceSegment(), target_seg] # Delete the first select target from its original location. # We'll add it to the right section at the end, once we know # what to add. initial_deletes = [target_seg] # If there's whitespace before it, delete that too. if select_children[target_idx - 1].is_type("whitespace"): initial_deletes.append(select_children[target_idx - 1]) # Do we have a modifier? modifier: Optional[Segments] modifier = select_children.first(sp.is_type("select_clause_modifier")) if ( # Check if the modifier is one we care about modifier # We only care if it's not already on the first line. and select_children.index(modifier.get()) >= select_targets_info.first_new_line_idx ): # Prepend it to the insert buffer insert_buff = [WhitespaceSegment(), modifier[0]] + insert_buff modifier_idx = select_children.index(modifier.get()) # Delete the whitespace after it (which is two after, thanks to indent) if ( len(select_children) > modifier_idx + 1 and select_children[modifier_idx + 2].is_whitespace ): initial_deletes.append(select_children[modifier_idx + 2]) # Delete the modifier itself initial_deletes.append(modifier[0]) # Set the position marker for removing the preceding # whitespace and newline, which we'll use below. start_idx = modifier_idx start_seg = modifier[0] else: # Set the position marker for removing the preceding # whitespace and newline, which we'll use below. start_idx = target_idx start_seg = select_children[select_targets_info.first_new_line_idx] fixes = [ # Insert the select_clause in place of the first newline in the # Select statement LintFix.replace( select_children[select_targets_info.first_new_line_idx], insert_buff, ), # Materialise any deletes so far... *(LintFix.delete(seg) for seg in initial_deletes), ] if parent_stack and parent_stack[-1].is_type("select_statement"): select_stmt = parent_stack[-1] select_clause_idx = select_stmt.segments.index(select_clause.get()) after_select_clause_idx = select_clause_idx + 1 if len(select_stmt.segments) > after_select_clause_idx: add_newline = True to_delete: Sequence[BaseSegment] = [target_seg] next_segment = select_stmt.segments[after_select_clause_idx] if next_segment.is_type("newline"): # Since we're deleting the newline, we should also delete all # whitespace before it or it will add random whitespace to # following statements. So walk back through the segment # deleting whitespace until you get the previous newline, or # something else. to_delete = select_children.reversed().select( loop_while=sp.is_type("whitespace"), start_seg=select_children[start_idx], ) if to_delete: # The select_clause is immediately followed by a # newline. Delete the newline in order to avoid leaving # behind an empty line after fix, *unless* we stopped # due to something other than a newline. delete_last_newline = select_children[ start_idx - len(to_delete) - 1 ].is_type("newline") # Delete the newline if we decided to. if delete_last_newline: fixes.append(LintFix.delete(next_segment)) elif next_segment.is_type("whitespace"): # The select_clause has stuff after (most likely a comment) # Delete the whitespace immediately after the select clause # so the other stuff aligns nicely based on where the select # clause started. fixes.append(LintFix.delete(next_segment)) if to_delete: # Clean up by moving leftover select_clause segments. # Context: Some of the other fixes we make in # _eval_single_select_target_element() leave leftover # child segments that need to be moved to become # *siblings* of the select_clause. move_after_select_clause = select_children.select( start_seg=start_seg, stop_seg=to_delete[-1], ) # :TRICKY: Below, we have a couple places where we # filter to guard against deleting the same segment # multiple times -- this is illegal. all_deletes = { fix.anchor for fix in fixes if fix.edit_type == "delete" } for seg in (*to_delete, *move_after_select_clause): if seg not in all_deletes: fixes.append(LintFix.delete(seg)) all_deletes.add(seg) if move_after_select_clause or add_newline: fixes.append( LintFix.create_after( select_clause[0], ([NewlineSegment()] if add_newline else []) + list(move_after_select_clause), ) ) return LintResult( anchor=select_clause.get(), fixes=fixes, ) sqlfluff-3.4.2/src/sqlfluff/rules/layout/LT10.py000066400000000000000000000115651503426445100214700ustar00rootroot00000000000000"""Implementation of Rule LT10.""" from typing import Optional from sqlfluff.core.parser import NewlineSegment, WhitespaceSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.functional import FunctionalContext, sp class Rule_LT10(BaseRule): """``SELECT`` modifiers (e.g. ``DISTINCT``) must be on the same line as ``SELECT``. **Anti-pattern** .. code-block:: sql select distinct a, b from x **Best practice** .. code-block:: sql select distinct a, b from x """ name = "layout.select_modifiers" aliases = ("L041",) groups = ("all", "core", "layout") crawl_behaviour = SegmentSeekerCrawler({"select_clause"}) is_fix_compatible = True def _eval(self, context: RuleContext) -> Optional[LintResult]: """Select clause modifiers must appear on same line as SELECT.""" # We only care about select_clause. assert context.segment.is_type("select_clause") # Get children of select_clause and the corresponding select keyword. child_segments = FunctionalContext(context).segment.children() select_keyword = child_segments[0] # See if we have a select_clause_modifier. select_clause_modifier_seg = child_segments.first( sp.is_type("select_clause_modifier") ) # Rule doesn't apply if there's no select clause modifier. if not select_clause_modifier_seg: return None select_clause_modifier = select_clause_modifier_seg[0] # Are there any newlines between the select keyword # and the select clause modifier. leading_newline_segments = child_segments.select( select_if=sp.is_type("newline"), loop_while=sp.or_(sp.is_whitespace(), sp.is_meta()), start_seg=select_keyword, ) # Rule doesn't apply if select clause modifier # is already on the same line as the select keyword. if not leading_newline_segments: return None # We should check if there is whitespace before the select clause modifier # and remove this during the lint fix. leading_whitespace_segments = child_segments.select( select_if=sp.is_type("whitespace"), loop_while=sp.or_(sp.is_whitespace(), sp.is_meta()), start_seg=select_keyword, ) # We should also check if the following select clause element # is on the same line as the select clause modifier. trailing_newline_segments = child_segments.select( select_if=sp.is_type("newline"), loop_while=sp.or_(sp.is_whitespace(), sp.is_meta()), start_seg=select_clause_modifier, ) # We will insert these segments directly after the select keyword. edit_segments = [ WhitespaceSegment(), select_clause_modifier, ] if not trailing_newline_segments: # if the first select clause element is on the same line # as the select clause modifier then also insert a newline. edit_segments.append(NewlineSegment()) fixes = [] # Move select clause modifier after select keyword. fixes.append( LintFix.create_after( anchor_segment=select_keyword, edit_segments=edit_segments, ) ) # Delete original newlines and whitespace between select keyword # and select clause modifier. # If there is not a newline after the select clause modifier then delete # newlines between the select keyword and select clause modifier. if not trailing_newline_segments: fixes.extend(LintFix.delete(s) for s in leading_newline_segments) # If there is a newline after the select clause modifier then delete both the # newlines and whitespace between the select keyword and select clause modifier. else: fixes.extend( LintFix.delete(s) for s in leading_newline_segments + leading_whitespace_segments ) # Delete the original select clause modifier. fixes.append(LintFix.delete(select_clause_modifier)) # If there is whitespace (on the same line) after the select clause modifier # then also delete this. trailing_whitespace_segments = child_segments.select( select_if=sp.is_whitespace(), loop_while=sp.or_(sp.is_type("whitespace"), sp.is_meta()), start_seg=select_clause_modifier, ) if trailing_whitespace_segments: fixes.extend(LintFix.delete(s) for s in trailing_whitespace_segments) return LintResult( anchor=context.segment, fixes=fixes, ) sqlfluff-3.4.2/src/sqlfluff/rules/layout/LT11.py000066400000000000000000000025701503426445100214650ustar00rootroot00000000000000"""Implementation of Rule LT11.""" from sqlfluff.core.rules import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.reflow.sequence import ReflowSequence class Rule_LT11(BaseRule): """Set operators should be surrounded by newlines. **Anti-pattern** In this example, `UNION ALL` is not on a line itself. .. code-block:: sql SELECT 'a' AS col UNION ALL SELECT 'b' AS col **Best practice** .. code-block:: sql SELECT 'a' AS col UNION ALL SELECT 'b' AS col """ name = "layout.set_operators" aliases = ("L065",) groups = ("all", "core", "layout") is_fix_compatible = True crawl_behaviour = SegmentSeekerCrawler({"set_operator"}) def _eval(self, context: RuleContext) -> list[LintResult]: """Set operators should be surrounded by newlines. For any set operator we check if there is any NewLineSegment in the non-code segments preceding or following it. In particular, as part of this rule we allow multiple NewLineSegments. """ return ( ReflowSequence.from_around_target( context.segment, root_segment=context.parent_stack[0], config=context.config, ) .rebreak() .get_results() ) sqlfluff-3.4.2/src/sqlfluff/rules/layout/LT12.py000066400000000000000000000155111503426445100214650ustar00rootroot00000000000000"""Implementation of Rule LT12.""" from typing import Optional, cast from sqlfluff.core.helpers.string import get_trailing_whitespace_from_string from sqlfluff.core.parser import BaseSegment, NewlineSegment from sqlfluff.core.parser.segments import SourceFix, TemplateSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import RootOnlyCrawler from sqlfluff.utils.functional import FunctionalContext, Segments, sp, tsp def get_trailing_newlines(segment: BaseSegment) -> list[BaseSegment]: """Returns list of trailing newlines in the tree.""" result = [] for seg in segment.recursive_crawl_all(reverse=True): if seg.is_type("newline"): result.append(seg) if not seg.is_whitespace and not seg.is_type("dedent", "end_of_file"): break return result def get_last_segment(segment: Segments) -> tuple[list[BaseSegment], Segments]: """Returns rightmost & lowest descendant and its "parent stack".""" parent_stack: list[BaseSegment] = [] while True: children = segment.children() if children: parent_stack.append(segment[0]) segment = children.last(predicate=sp.not_(sp.is_type("end_of_file"))) else: return parent_stack, segment class Rule_LT12(BaseRule): """Files must end with a single trailing newline. **Anti-pattern** The content in file does not end with a single trailing newline. The ``$`` represents end of file. .. code-block:: sql :force: SELECT a FROM foo$ -- Ending on an indented line means there is no newline -- at the end of the file, the • represents space. SELECT ••••a FROM ••••foo ••••$ -- Ending on a semi-colon means the last line is not a -- newline. SELECT a FROM foo ;$ -- Ending with multiple newlines. SELECT a FROM foo $ **Best practice** Add trailing newline to the end. The ``$`` character represents end of file. .. code-block:: sql :force: SELECT a FROM foo $ -- Ensuring the last line is not indented so is just a -- newline. SELECT ••••a FROM ••••foo $ -- Even when ending on a semi-colon, ensure there is a -- newline after. SELECT a FROM foo ; $ """ name = "layout.end_of_file" # Between 2.0.0 and 2.0.4 we supported had a kebab-case name for this rule # so the old name remains here as an alias to enable backward compatibility. aliases = ("L009", "layout.end-of-file") groups = ("all", "core", "layout") targets_templated = True # Use the RootOnlyCrawler to only call _eval() ONCE, with the root segment. crawl_behaviour = RootOnlyCrawler() lint_phase = "post" is_fix_compatible = True def _eval(self, context: RuleContext) -> Optional[LintResult]: """Files must end with a single trailing newline. We only care about the segment and the siblings which come after it for this rule, we discard the others into the kwargs argument. """ # We only care about the final segment of the parse tree. parent_stack, segment = get_last_segment(FunctionalContext(context).segment) self.logger.debug("Found last segment as: %s", segment) if not segment: # NOTE: Edge case. If the file is totally empty, we won't find a final # segment. In this case return without error. return None # Check whether the final segment is a placeholder. If it is, we need # to proceed more carefully. _trailing_segment = segment.get() assert _trailing_segment if _trailing_segment.is_type("placeholder"): self.logger.debug("Trailing placeholder detected.") _template_segment = cast(TemplateSegment, _trailing_segment) _trailing_whitespace = get_trailing_whitespace_from_string( _template_segment.source_str ) # Does it already end with a single whitespace? if _trailing_whitespace == "\n": return None # If this segment already has fixes applied, don't try and do it # again. if _template_segment.source_fixes: self.logger.debug("Fixes already applied. Aborting...") return None # It's not a single newline. Return a fix to make it one. _current_stop = _template_segment.pos_marker.source_slice.stop source_fix = SourceFix( "\n", slice( _current_stop - len(_trailing_whitespace), _current_stop, ), _template_segment.pos_marker.templated_slice, ) return LintResult( anchor=_template_segment, fixes=[ LintFix.replace( _template_segment, [_template_segment.edit(source_fixes=[source_fix])], ) ], ) trailing_newlines = Segments(*get_trailing_newlines(context.segment)) trailing_literal_newlines = trailing_newlines self.logger.debug( "Untemplated trailing newlines: %s", trailing_literal_newlines ) if context.templated_file: trailing_literal_newlines = trailing_newlines.select( loop_while=lambda seg: sp.templated_slices( seg, context.templated_file ).all(tsp.is_slice_type("literal")) ) self.logger.debug("Templated trailing newlines: %s", trailing_literal_newlines) if not trailing_literal_newlines: # We make an edit to create this segment after the child of the FileSegment. if len(parent_stack) == 1: fix_anchor_segment = segment[0] else: fix_anchor_segment = parent_stack[1] self.logger.debug("Anchor on: %s", fix_anchor_segment) return LintResult( anchor=segment[0], fixes=[ LintFix.create_after( fix_anchor_segment, [NewlineSegment()], ) ], ) elif len(trailing_literal_newlines) > 1: # Delete extra newlines. return LintResult( anchor=segment[0], fixes=[LintFix.delete(d) for d in trailing_literal_newlines[1:]], ) else: # Single newline, no need for fix. return None sqlfluff-3.4.2/src/sqlfluff/rules/layout/LT13.py000066400000000000000000000061371503426445100214720ustar00rootroot00000000000000"""Implementation of Rule LT13.""" from typing import Optional from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import RootOnlyCrawler from sqlfluff.utils.functional import Segments, rsp, sp class Rule_LT13(BaseRule): """Files must not begin with newlines or whitespace. **Anti-pattern** The file begins with newlines or whitespace. The ``^`` represents the beginning of the file. .. code-block:: sql :force: ^ SELECT a FROM foo -- Beginning on an indented line is also forbidden, -- (the • represents space). ••••SELECT ••••a FROM ••••foo **Best practice** Start file on either code or comment. (The ``^`` represents the beginning of the file.) .. code-block:: sql :force: ^SELECT a FROM foo -- Including an initial block comment. ^/* This is a description of my SQL code. */ SELECT a FROM foo -- Including an initial inline comment. ^--This is a description of my SQL code. SELECT a FROM foo """ name = "layout.start_of_file" aliases = ("L050",) groups = ("all", "layout") targets_templated = True # Use the RootOnlyCrawler to only call _eval() ONCE, with the root segment. crawl_behaviour = RootOnlyCrawler() lint_phase = "post" is_fix_compatible = True def _eval(self, context: RuleContext) -> Optional[LintResult]: """Files must not begin with newlines or whitespace.""" # Only check raw segments. This ensures we don't try and delete the same # whitespace multiple times (i.e. for non-raw segments higher in the # tree). raw_segments = [] whitespace_types = {"newline", "whitespace", "indent", "dedent"} for seg in context.segment.recursive_crawl_all(): if not seg.is_raw(): continue if seg.is_type(*whitespace_types): raw_segments.append(seg) continue raw_stack = Segments(*raw_segments, templated_file=context.templated_file) # Non-whitespace segment. if ( not raw_stack.all(sp.is_meta()) # It is possible that a template segment (e.g. # {{ config(materialized='view') }}) renders to an empty string # and as such is omitted from the parsed tree. We therefore # should flag if a templated raw slice intersects with the # source slices in the raw stack and skip this rule to avoid # risking collisions with template objects. and not raw_stack.raw_slices.any(rsp.is_slice_type("templated")) ): return LintResult( anchor=context.segment, fixes=[LintFix.delete(d) for d in raw_stack], ) else: break return None sqlfluff-3.4.2/src/sqlfluff/rules/layout/LT14.py000066400000000000000000000024421503426445100214660ustar00rootroot00000000000000"""Implementation of Rule LT14.""" from typing import Optional from sqlfluff.core.rules import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.crawlers import RootOnlyCrawler from sqlfluff.utils.reflow.sequence import ReflowSequence class Rule_LT14(BaseRule): """Keyword clauses should follow a standard for being before/after newlines. **Anti-pattern** In this example, the keyword are not at the beginning of or alone on the line. .. code-block:: sql SELECT 'a' AS col FROM tab WHERE x = 4 ORDER BY y LIMIT 5 **Best practice** .. code-block:: sql SELECT 'a' AS col FROM tab WHERE x = 4 ORDER BY y LIMIT 5 .. code-block:: sql SELECT 'a' AS col FROM tab WHERE x = 4 ORDER BY y LIMIT 5 """ name = "layout.keyword_newline" groups = ("all", "layout") crawl_behaviour = RootOnlyCrawler() is_fix_compatible = True def _eval(self, context: RuleContext) -> Optional[list[LintResult]]: """Keyword clauses should begin on a newline.""" return ( ReflowSequence.from_root(context.segment, config=context.config) .rebreak("keywords") .get_results() ) sqlfluff-3.4.2/src/sqlfluff/rules/layout/LT15.py000066400000000000000000000036271503426445100214750ustar00rootroot00000000000000"""Implementation of Rule LT15.""" from typing import List, Optional from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler class Rule_LT15(BaseRule): """Too many consecutive blank lines. **Anti-pattern** In this example, the maximum number of empty lines inside a statement is set to 0. .. code-block:: sql SELECT 'a' AS col FROM tab WHERE x = 4 ORDER BY y LIMIT 5 ; **Best practice** .. code-block:: sql SELECT 'a' AS col FROM tab WHERE x = 4 ORDER BY y LIMIT 5 ; """ name = "layout.newlines" groups = ("all", "layout") config_keywords = [ "maximum_empty_lines_between_statements", "maximum_empty_lines_inside_statements", ] crawl_behaviour = SegmentSeekerCrawler(types={"newline"}, provide_raw_stack=True) is_fix_compatible = True def _eval(self, context: RuleContext) -> Optional[List[LintResult]]: """There should be a maximum number of empty lines.""" self.maximum_empty_lines_between_statements: int self.maximum_empty_lines_inside_statements: int context_seg = context.segment maximum_empty_lines = ( self.maximum_empty_lines_inside_statements if any(seg.is_type("statement") for seg in context.parent_stack) else self.maximum_empty_lines_between_statements ) if len(context.raw_stack) < maximum_empty_lines: return None if all( raw_seg.is_type("newline") for raw_seg in context.raw_stack[-maximum_empty_lines - 1 :] ): return [ LintResult( anchor=context_seg, fixes=[LintFix.delete(context_seg)], ) ] return None sqlfluff-3.4.2/src/sqlfluff/rules/layout/__init__.py000066400000000000000000000054041503426445100225420ustar00rootroot00000000000000"""The layout plugin bundle.""" from sqlfluff.core.plugin import hookimpl from sqlfluff.core.rules import BaseRule, ConfigInfo @hookimpl def get_configs_info() -> dict[str, ConfigInfo]: """Get additional rule config validations and descriptions.""" return { "ignore_comment_lines": { "validation": [True, False], "definition": ( "Should lines that contain only whitespace and comments" " be ignored when linting line lengths?" ), }, "ignore_comment_clauses": { "validation": [True, False], "definition": ( "Should comment clauses (e.g. column comments) be ignored" " when linting line lengths?" ), }, "maximum_empty_lines_between_statements": { "validation": range(1000), "definition": ( "The maximum number of empty lines allowed between statements. " "Note that currently, the gap _before_ and _after_ the semicolon " "is considered 'between' statements." ), }, "maximum_empty_lines_inside_statements": { "validation": range(1000), "definition": ( "The maximum number of empty lines allowed inside statements." ), }, "wildcard_policy": { "validation": ["single", "multiple"], "definition": "Treatment of wildcards. Defaults to ``single``.", }, } @hookimpl def get_rules() -> list[type[BaseRule]]: """Get plugin rules. NOTE: Rules are imported only on fetch to manage import times when rules aren't used. """ from sqlfluff.rules.layout.LT01 import Rule_LT01 from sqlfluff.rules.layout.LT02 import Rule_LT02 from sqlfluff.rules.layout.LT03 import Rule_LT03 from sqlfluff.rules.layout.LT04 import Rule_LT04 from sqlfluff.rules.layout.LT05 import Rule_LT05 from sqlfluff.rules.layout.LT06 import Rule_LT06 from sqlfluff.rules.layout.LT07 import Rule_LT07 from sqlfluff.rules.layout.LT08 import Rule_LT08 from sqlfluff.rules.layout.LT09 import Rule_LT09 from sqlfluff.rules.layout.LT10 import Rule_LT10 from sqlfluff.rules.layout.LT11 import Rule_LT11 from sqlfluff.rules.layout.LT12 import Rule_LT12 from sqlfluff.rules.layout.LT13 import Rule_LT13 from sqlfluff.rules.layout.LT14 import Rule_LT14 from sqlfluff.rules.layout.LT15 import Rule_LT15 return [ Rule_LT01, Rule_LT02, Rule_LT03, Rule_LT04, Rule_LT05, Rule_LT06, Rule_LT07, Rule_LT08, Rule_LT09, Rule_LT10, Rule_LT11, Rule_LT12, Rule_LT13, Rule_LT14, Rule_LT15, ] sqlfluff-3.4.2/src/sqlfluff/rules/references/000077500000000000000000000000001503426445100212325ustar00rootroot00000000000000sqlfluff-3.4.2/src/sqlfluff/rules/references/RF01.py000066400000000000000000000302011503426445100222500ustar00rootroot00000000000000"""Implementation of Rule RF01.""" from dataclasses import dataclass, field from typing import Optional, cast from sqlfluff.core.dialects.base import Dialect from sqlfluff.core.dialects.common import AliasInfo from sqlfluff.core.parser import BaseSegment from sqlfluff.core.rules import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.core.rules.reference import object_ref_matches_table from sqlfluff.dialects.dialect_ansi import ObjectReferenceSegment from sqlfluff.utils.analysis.query import Query, Selectable _START_TYPES = [ "delete_statement", "merge_statement", "select_statement", "update_statement", ] @dataclass class RF01Query(Query): """Query with custom RF01 info.""" aliases: list[AliasInfo] = field(default_factory=list) standalone_aliases: list[BaseSegment] = field(default_factory=list) parent_stack: tuple[BaseSegment, ...] = field(default_factory=tuple) class Rule_RF01(BaseRule): """References cannot reference objects not present in ``FROM`` clause. .. note:: This rule is disabled by default for Athena, BigQuery, Databricks, DuckDB, Hive, Redshift, SOQL and SparkSQL due to the support of things like structs and lateral views which trigger false positives. It can be enabled with the ``force_enable = True`` flag. **Anti-pattern** In this example, the reference ``vee`` has not been declared. .. code-block:: sql SELECT vee.a FROM foo **Best practice** Remove the reference. .. code-block:: sql SELECT a FROM foo """ name = "references.from" aliases = ("L026",) groups = ("all", "core", "references") config_keywords = ["force_enable"] # If any of the parents would have also triggered the rule, don't fire # because they will more accurately process any internal references. crawl_behaviour = SegmentSeekerCrawler(set(_START_TYPES), allow_recurse=False) def _eval(self, context: RuleContext) -> list[LintResult]: violations: list[LintResult] = [] dml_target_table: Optional[list[tuple[str, ...]]] = None self.logger.debug("Trigger on: %s", context.segment) if not context.segment.is_type("select_statement"): # Extract first table reference. This will be the target # table in a DML statement. table_reference = next( context.segment.recursive_crawl("table_reference"), None ) if table_reference: dml_target_table = self._table_ref_as_tuple( cast(ObjectReferenceSegment, table_reference) ) self.logger.debug("DML Reference Table: %s", dml_target_table) # Verify table references in any SELECT statements found in or # below context.segment in the parser tree. query: RF01Query = RF01Query.from_segment(context.segment, context.dialect) query.parent_stack = context.parent_stack self._analyze_table_references( query, dml_target_table, context.dialect, violations ) return violations def _alias_info_as_tuples(self, alias_info: AliasInfo) -> list[tuple[str, ...]]: result: list[tuple[str, ...]] = [] if alias_info.aliased: result.append((alias_info.ref_str,)) if alias_info.object_reference: result += self._table_ref_as_tuple( cast(ObjectReferenceSegment, alias_info.object_reference) ) return result def _table_ref_as_tuple( self, table_reference: ObjectReferenceSegment, ) -> list[tuple[str, ...]]: return [ tuple(ref.part for ref in table_reference.iter_raw_references()), tuple( ref.segments[0].normalize(ref.part) for ref in table_reference.iter_raw_references() ), ] def _analyze_table_references( self, query: RF01Query, dml_target_table: Optional[list[tuple[str, ...]]], dialect: Dialect, violations: list[LintResult], ) -> None: # For each query... for selectable in query.selectables: select_info = selectable.select_info self.logger.debug( "Selectable: %s", selectable, ) if select_info: # Record the available tables. query.aliases += select_info.table_aliases query.standalone_aliases += select_info.standalone_aliases self.logger.debug( "Aliases: %s %s", [alias.ref_str for alias in select_info.table_aliases], [standalone.raw for standalone in select_info.standalone_aliases], ) # Try and resolve each reference to a value in query.aliases (or # in an ancestor query). for r in select_info.reference_buffer: if not self._should_ignore_reference(r, selectable): # This function walks up the query's parent stack if necessary. violation = self._resolve_reference( r, self._get_table_refs(r, dialect), dml_target_table, query ) if violation: violations.append(violation) # Visit children. for child in query.children: self._analyze_table_references( cast(RF01Query, child), dml_target_table, dialect, violations ) def _should_ignore_reference( self, reference: ObjectReferenceSegment, selectable: Selectable ) -> bool: ref_path = selectable.selectable.path_to(reference) # Ignore references occurring in an "INTO" clause: # - They are table references, not column references. # - They are the target table, similar to an INSERT or UPDATE # statement, thus not expected to match a table in the FROM # clause. if ref_path: return any(ps.segment.is_type("into_table_clause") for ps in ref_path) else: return False # pragma: no cover def _get_table_refs( self, ref: ObjectReferenceSegment, dialect: Dialect ) -> list[tuple[ObjectReferenceSegment.ObjectReferencePart, tuple[str, ...]]]: """Given ObjectReferenceSegment, determine possible table references.""" tbl_refs: list[ tuple[ObjectReferenceSegment.ObjectReferencePart, tuple[str, ...]] ] = [] # First, handle any schema.table references. for sr, tr in ref.extract_possible_multipart_references( levels=[ ref.ObjectReferenceLevel.SCHEMA, ref.ObjectReferenceLevel.TABLE, ] ): tbl_refs.append((tr, (sr.part, tr.part))) tbl_refs.append( ( tr, ( sr.segments[0].normalize(sr.part), tr.segments[0].normalize(tr.part), ), ) ) # Maybe check for simple table references. Two cases: # - For most dialects, skip this if it's a schema+table reference -- the # reference was specific, so we shouldn't ignore that by looking # elsewhere.) # - Always do this in BigQuery. BigQuery table references are frequently # ambiguous because BigQuery SQL supports structures, making some # multi-level "." references impossible to interpret with certainty. # We may need to genericize this code someday to support other # dialects. If so, this check should probably align somehow with # whether the dialect overrides # ObjectReferenceSegment.extract_possible_references(). if not tbl_refs or dialect.name in ["bigquery"]: for tr in ref.extract_possible_references( level=ref.ObjectReferenceLevel.TABLE ): tbl_refs.append((tr, (tr.part,))) tbl_refs.append((tr, (tr.segments[0].normalize(tr.part),))) return tbl_refs def _resolve_reference( self, r: ObjectReferenceSegment, tbl_refs: list[ tuple[ObjectReferenceSegment.ObjectReferencePart, tuple[str, ...]] ], dml_target_table: Optional[list[tuple[str, ...]]], query: RF01Query, ) -> Optional[LintResult]: # Does this query define the referenced table? possible_references = [tbl_ref[1] for tbl_ref in tbl_refs] targets: list[tuple[str, ...]] = [] for alias in query.aliases: targets += self._alias_info_as_tuples(alias) for standalone_alias in query.standalone_aliases: targets.append((standalone_alias.raw,)) targets.append((standalone_alias.raw_normalized(False),)) distinct_targets = set(tuple(s.upper() for s in t) for t in targets) if len(distinct_targets) == 1 and self._dialect_supports_dot_access( query.dialect ): self.force_enable: bool if self.force_enable: # Backwards compatibility. # Nowadays "force_enable" is more of "strict" mode, # for dialects with dot access. pass else: return None targets += self._get_implicit_targets(query) if not object_ref_matches_table(possible_references, targets): # No. Check the parent query, if there is one. if query.parent: return self._resolve_reference( r, tbl_refs, dml_target_table, cast(RF01Query, query.parent) ) # No parent query. If there's a DML statement at the root, check its # target table or alias. elif not dml_target_table or not object_ref_matches_table( possible_references, dml_target_table ): return LintResult( # Return the first segment rather than the string anchor=tbl_refs[0][0].segments[0], description=f"Reference {r.raw!r} refers to table/view " "not found in the FROM clause or found in ancestor " "statement.", ) return None def _get_implicit_targets(self, query: RF01Query) -> list[tuple[str, ...]]: if query.dialect.name == "sqlite": maybe_create_trigger: Optional[BaseSegment] = next( ( seg for seg in reversed(query.parent_stack) if seg.is_type("create_trigger") ), None, ) if not maybe_create_trigger: return [] for seg in maybe_create_trigger.segments: if seg.is_type("keyword") and seg.raw_upper == "INSERT": return [("new",)] elif seg.is_type("keyword") and seg.raw_upper == "UPDATE": return [("new",), ("old",)] elif seg.is_type("keyword") and seg.raw_upper == "DELETE": return [("old",)] else: pass # pragma: no cover return [] def _dialect_supports_dot_access(self, dialect: Dialect) -> bool: # Athena: # https://docs.aws.amazon.com/athena/latest/ug/filtering-with-dot.html # BigQuery: # https://cloud.google.com/bigquery/docs/reference/standard-sql/operators#field_access_operator # Databricks: # https://docs.databricks.com/en/sql/language-manual/functions/dotsign.html # DuckDB: # https://duckdb.org/docs/sql/data_types/struct#retrieving-from-structs # Redshift: # https://docs.aws.amazon.com/redshift/latest/dg/query-super.html # TODO: all doc links to all referenced dialects return dialect.name in ( "athena", "bigquery", "databricks", "duckdb", "hive", "redshift", "soql", "sparksql", ) sqlfluff-3.4.2/src/sqlfluff/rules/references/RF02.py000066400000000000000000000155521503426445100222650ustar00rootroot00000000000000"""Implementation of Rule RF02.""" from typing import Optional import regex from sqlfluff.core.dialects.common import AliasInfo, ColumnAliasInfo from sqlfluff.core.parser import BaseSegment from sqlfluff.core.rules import LintResult, RuleContext from sqlfluff.rules.aliasing.AL04 import Rule_AL04 from sqlfluff.utils.analysis.select import get_select_statement_info class Rule_RF02(Rule_AL04): """References should be qualified if select has more than one referenced table/view. .. note:: Except if they're present in a ``USING`` clause. **Anti-pattern** In this example, the reference ``vee`` has not been declared, and the variables ``a`` and ``b`` are potentially ambiguous. .. code-block:: sql SELECT a, b FROM foo LEFT JOIN vee ON vee.a = foo.a **Best practice** Add the references. .. code-block:: sql SELECT foo.a, vee.b FROM foo LEFT JOIN vee ON vee.a = foo.a """ name = "references.qualification" aliases = ("L027",) groups = ("all", "references") # Crawl behaviour is defined in AL04 config_keywords = [ "subqueries_ignore_external_references", ] # Config type hints ignore_words_regex: str ignore_words_list: list[str] subqueries_ignore_external_references: bool def _lint_references_and_aliases( self, table_aliases: list[AliasInfo], standalone_aliases: list[BaseSegment], references, col_aliases: list[ColumnAliasInfo], using_cols: list[BaseSegment], parent_select: Optional[BaseSegment], rule_context: RuleContext, ) -> Optional[list[LintResult]]: if parent_select: parent_select_info = get_select_statement_info( parent_select, rule_context.dialect ) if parent_select_info: # If we are looking at a subquery, include any table references for table_alias in parent_select_info.table_aliases: is_from = self._is_root_from_clause(rule_context) if ( table_alias.from_expression_element.path_to( rule_context.segment ) or is_from or self.subqueries_ignore_external_references ): # Skip the subquery alias itself or if the subquery is inside # of a `from` or `join`` clause that isn't a nested where clause continue table_aliases.append(table_alias) # Do we have more than one? If so, all references should be qualified. if len(table_aliases) <= 1: return None # Get the ignore_words_list configuration. try: ignore_words_list = self.ignore_words_list except AttributeError: # First-time only, read the settings from configuration. This is # very slow. ignore_words_list = self._init_ignore_words_list() sql_variables = self._find_sql_variables(rule_context) # A buffer to keep any violations. violation_buff = [] # Check all the references that we have. for r in references: # Skip if in ignore list if ignore_words_list and r.raw.lower() in ignore_words_list: continue # Skip if a sql variable name inside the file if r.raw.lower() in sql_variables: continue # Skip if matches ignore regex if self.ignore_words_regex and regex.search(self.ignore_words_regex, r.raw): continue this_ref_type = r.qualification() # Discard column aliases that # refer to the current column reference. col_alias_names = [ c.alias_identifier_name for c in col_aliases if r not in c.column_reference_segments ] if ( this_ref_type == "unqualified" # Allow unqualified columns that # are actually aliases defined # in a different select clause element. and r.raw not in col_alias_names # Allow columns defined in a USING expression. and r.raw not in [using_col.raw for using_col in using_cols] # Allow columns defined as standalone aliases # (e.g. value table functions from bigquery) and r.raw not in [a.raw for a in standalone_aliases] ): violation_buff.append( LintResult( anchor=r, description=f"Unqualified reference {r.raw!r} found in " "select with more than one referenced table/view.", ) ) return violation_buff or None def _is_root_from_clause(self, rule_context: RuleContext) -> bool: """This is to determine if a subquery is part of the from clause. Any subqueries in the `from_clause` should be ignore, unless they are a nested correlated query. """ is_from = False for x in reversed(rule_context.parent_stack): if x.is_type("from_clause"): is_from = True break elif x.is_type("where_clause"): break return is_from def _init_ignore_words_list(self) -> list[str]: """Called first time rule is evaluated to fetch & cache the policy.""" ignore_words_config: str = str(getattr(self, "ignore_words")) if ignore_words_config and ignore_words_config != "None": self.ignore_words_list = self.split_comma_separated_string( ignore_words_config.lower() ) else: self.ignore_words_list = [] return self.ignore_words_list def _find_sql_variables(self, rule_context: RuleContext) -> set[str]: """Get any `DECLARE`d variables in the whole of the linted file. This assumes that the declare statement is going to be used before any reference """ sql_variables: set[str] = set() # Check for bigquery declared variables. These may only exists at the top of # the file or at the beginning of a `BEGIN` block. The risk of collision # _should_ be low and no `IF` chain searching should be required. if rule_context.dialect.name == "bigquery": sql_variables |= { identifier.raw.lower() for declare in rule_context.parent_stack[0].recursive_crawl( "declare_segment" ) for identifier in declare.get_children("identifier") } # TODO: Add any additional dialect specific variable names return sql_variables sqlfluff-3.4.2/src/sqlfluff/rules/references/RF03.py000066400000000000000000000300761503426445100222640ustar00rootroot00000000000000"""Implementation of Rule RF03.""" from collections.abc import Iterator from typing import Optional from sqlfluff.core.dialects.common import AliasInfo, ColumnAliasInfo from sqlfluff.core.parser import IdentifierSegment from sqlfluff.core.parser.segments import BaseSegment, SymbolSegment from sqlfluff.core.rules import ( BaseRule, EvalResultType, LintFix, LintResult, RuleContext, ) from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.dialects.dialect_ansi import ObjectReferenceSegment from sqlfluff.utils.analysis.query import Query from sqlfluff.utils.analysis.select import SelectStatementColumnsAndTables _START_TYPES = ["select_statement", "set_expression", "with_compound_statement"] class Rule_RF03(BaseRule): """Column references should be qualified consistently in single table statements. .. note:: For BigQuery, Hive and Redshift this rule is disabled by default. This is due to historical false positives associated with STRUCT data types. This default behaviour may be changed in the future. The rule can be enabled with the ``force_enable = True`` flag. "consistent" will be fixed to "qualified" if inconsistency is found. **Anti-pattern** In this example, only the reference to ``b`` is qualified. .. code-block:: sql SELECT a, foo.b FROM foo **Best practice** Either all column references should be qualified, or all unqualified. .. code-block:: sql SELECT a, b FROM foo -- Also good SELECT foo.a, foo.b FROM foo """ name = "references.consistent" aliases = ("L028",) groups = ("all", "references") config_keywords = [ "single_table_references", "force_enable", ] # If any of the parents would have also triggered the rule, don't fire # because they will more accurately process any internal references. crawl_behaviour = SegmentSeekerCrawler(set(_START_TYPES), allow_recurse=False) _is_struct_dialect = False _dialects_with_structs = ["bigquery", "hive", "redshift"] # This could be turned into an option _fix_inconsistent_to = "qualified" is_fix_compatible = True single_table_references: str def _eval(self, context: RuleContext) -> EvalResultType: """Override base class for dialects that use structs, or SELECT aliases.""" # Config type hints self.force_enable: bool # Some dialects use structs (e.g. column.field) which look like # table references and so incorrectly trigger this rule. if ( context.dialect.name in self._dialects_with_structs and not self.force_enable ): return LintResult() if context.dialect.name in self._dialects_with_structs: self._is_struct_dialect = True query: Query = Query.from_segment(context.segment, dialect=context.dialect) visited: set = set() # Recursively visit and check each query in the tree. return list(self._visit_queries(query, visited)) def _iter_available_targets( self, query: Query, subquery: Optional[Query] = None ) -> Iterator[AliasInfo]: """Iterate along a list of valid alias targets.""" for selectable in query.selectables: select_info = selectable.select_info if select_info: for alias in select_info.table_aliases: if subquery and alias.from_expression_element.path_to( subquery.selectables[0].selectable ): # Skip the subquery alias itself continue if (subquery and not alias.object_reference) or alias.ref_str: yield alias def _visit_queries(self, query: Query, visited: set) -> Iterator[LintResult]: select_info: Optional[SelectStatementColumnsAndTables] = None if query.selectables: select_info = query.selectables[0].select_info # How many table names are visible from here? If more than one then do # nothing. if select_info and len(select_info.table_aliases) == 1: fixable = True # :TRICKY: Subqueries in the column list of a SELECT can see tables # in the FROM list of the containing query. Thus, count tables at # the *parent* query level. Only check if it is a subquery of the # parent. possible_ref_tables = list(self._iter_available_targets(query)) if query.parent and query.is_subquery: possible_ref_tables += list( self._iter_available_targets(query.parent, query) ) if len(possible_ref_tables) > 1: # If more than one table name is visible, check for and report # potential lint warnings, but don't generate fixes, because # fixes are unsafe if there's more than one table visible. fixable = False yield from _check_references( select_info.table_aliases, select_info.standalone_aliases, select_info.reference_buffer, select_info.col_aliases, self.single_table_references, self._is_struct_dialect, self._fix_inconsistent_to, fixable, ) children = list(query.children) # 'query.children' includes CTEs and "main" queries, but not queries in # the "FROM" list. We want to visit those as well. if select_info: for a in select_info.table_aliases: for q in query.crawl_sources(a.from_expression_element, True): if not isinstance(q, Query): continue # Check for previously visited selectables to avoid possible # infinite recursion, e.g.: # WITH test1 AS (SELECT i + 1, j + 1 FROM test1) # SELECT * FROM test1; if any(s.selectable in visited for s in q.selectables): continue visited.update(s.selectable for s in q.selectables) children.append(q) for child in children: yield from self._visit_queries(child, visited) def _check_references( table_aliases: list[AliasInfo], standalone_aliases: list[BaseSegment], references: list[ObjectReferenceSegment], col_aliases: list[ColumnAliasInfo], single_table_references: str, is_struct_dialect: bool, fix_inconsistent_to: Optional[str], fixable: bool, ) -> Iterator[LintResult]: """Iterate through references and check consistency.""" # A buffer to keep any violations. col_alias_names: list[str] = [c.alias_identifier_name for c in col_aliases] table_ref_str: str = table_aliases[0].ref_str table_ref_str_source = table_aliases[0].segment # Check all the references that we have. seen_ref_types: set[str] = set() for ref in references: this_ref_type: str = ref.qualification() if this_ref_type == "qualified" and is_struct_dialect: # If this col appears "qualified" check if it is more logically a struct. if next(ref.iter_raw_references()).part != table_ref_str: this_ref_type = "unqualified" lint_res = _validate_one_reference( single_table_references, ref, this_ref_type, standalone_aliases, table_ref_str, table_ref_str_source, col_alias_names, seen_ref_types, fixable, ) seen_ref_types.add(this_ref_type) if not lint_res: continue if fix_inconsistent_to and single_table_references == "consistent": # If we found a "consistent" error but we have a fix directive, # recurse with a different single_table_references value yield from _check_references( table_aliases, standalone_aliases, references, col_aliases, # NB vars are passed in a different order here single_table_references=fix_inconsistent_to, is_struct_dialect=is_struct_dialect, fix_inconsistent_to=None, fixable=fixable, ) yield lint_res def _validate_one_reference( single_table_references: str, ref: ObjectReferenceSegment, this_ref_type: str, standalone_aliases: list[BaseSegment], table_ref_str: str, table_ref_str_source: Optional[BaseSegment], col_alias_names: list[str], seen_ref_types: set[str], fixable: bool, ) -> Optional[LintResult]: # We skip any unqualified wildcard references (i.e. *). They shouldn't # count. if not ref.is_qualified() and ref.is_type("wildcard_identifier"): return None # Oddball case: Column aliases provided via function calls in by # FROM or JOIN. References to these don't need to be qualified. # Note there could be a table with a column by the same name as # this alias, so avoid bogus warnings by just skipping them # entirely rather than trying to enforce anything. if ref.raw in [a.raw for a in standalone_aliases]: return None # If the reference is qualified, see that the table is not in the standalone_aliases # namely for lambda expressions. if ref.is_qualified(): for part in ref.extract_possible_references( level=ref.ObjectReferenceLevel.TABLE ): if part.segments[0].raw in [a.raw for a in standalone_aliases]: return None # Oddball case: tsql table variables can't be used to qualify references. # This appears here as an empty string for table_ref_str. if not table_ref_str: return None # Certain dialects allow use of SELECT alias in WHERE clauses if ref.raw in col_alias_names: return None # Check first for consistency if single_table_references == "consistent": if seen_ref_types and this_ref_type not in seen_ref_types: return LintResult( anchor=ref, description=f"{this_ref_type.capitalize()} reference " f"{ref.raw!r} found in single table select which is " "inconsistent with previous references.", ) # Config is consistent, and this reference matches types so far. return None # Otherwise check for a specified type of referencing. # If it's the right kind already, just return. if single_table_references == this_ref_type: return None # If not, it's the wrong type and we should handle it. if single_table_references == "unqualified": # If unqualified and not fixable, there is no error. if not fixable: return None # If this is qualified we must have a "table", "."" at least return LintResult( anchor=ref, fixes=[LintFix.delete(el) for el in ref.segments[:2]], description="{} reference {!r} found in single table select.".format( this_ref_type.capitalize(), ref.raw ), ) fixes = None if fixable: fixes = [ LintFix.create_before( ref.segments[0] if len(ref.segments) else ref, source=[table_ref_str_source] if table_ref_str_source else None, edit_segments=[ IdentifierSegment( raw=table_ref_str, type="naked_identifier", ), SymbolSegment(raw=".", type="symbol"), ], ) ] return LintResult( anchor=ref, fixes=fixes, description="{} reference {!r} found in single table select.".format( this_ref_type.capitalize(), ref.raw ), ) sqlfluff-3.4.2/src/sqlfluff/rules/references/RF04.py000066400000000000000000000101351503426445100222570ustar00rootroot00000000000000"""Implementation of Rule RF04.""" from typing import Optional import regex from sqlfluff.core.rules import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.identifers import identifiers_policy_applicable class Rule_RF04(BaseRule): """Keywords should not be used as identifiers. Although `unreserved` keywords `can` be used as identifiers, and `reserved words` can be used as quoted identifiers, best practice is to avoid where possible, to avoid any misunderstandings as to what the alias represents. .. note:: Note that `reserved` keywords cannot be used as unquoted identifiers and will cause parsing errors and so are not covered by this rule. **Anti-pattern** In this example, ``SUM`` (built-in function) is used as an alias. .. code-block:: sql SELECT sum.a FROM foo AS sum **Best practice** Avoid keywords as the name of an alias. .. code-block:: sql SELECT vee.a FROM foo AS vee """ name = "references.keywords" aliases = ("L029",) groups = ("all", "references") crawl_behaviour = SegmentSeekerCrawler({"naked_identifier", "quoted_identifier"}) config_keywords = [ "unquoted_identifiers_policy", "quoted_identifiers_policy", "ignore_words", "ignore_words_regex", ] def _eval(self, context: RuleContext) -> Optional[LintResult]: """Keywords should not be used as identifiers.""" # Config type hints self.ignore_words_regex: str # Skip 1 letter identifiers. These can be datepart keywords # (e.g. "d" for Snowflake) but most people expect to be able to use them. if len(context.segment.raw) == 1: return LintResult(memory=context.memory) # Get the ignore list configuration and cache it try: ignore_words_list = self.ignore_words_list except AttributeError: # First-time only, read the settings from configuration. # So we can cache them for next time for speed. ignore_words_list = self._init_ignore_string() # Skip if in ignore list if ignore_words_list and context.segment.raw.lower() in ignore_words_list: return LintResult(memory=context.memory) # Skip if matches ignore regex if self.ignore_words_regex and regex.search( self.ignore_words_regex, context.segment.raw ): return LintResult(memory=context.memory) if ( context.segment.is_type("naked_identifier") and identifiers_policy_applicable( self.unquoted_identifiers_policy, # type: ignore context.parent_stack, ) and ( context.segment.raw.upper() in context.dialect.sets("unreserved_keywords") ) ) or ( context.segment.is_type("quoted_identifier") and identifiers_policy_applicable( self.quoted_identifiers_policy, # type: ignore context.parent_stack, ) and ( context.segment.raw.upper()[1:-1] in context.dialect.sets("unreserved_keywords") or context.segment.raw.upper()[1:-1] in context.dialect.sets("reserved_keywords") ) ): return LintResult(anchor=context.segment) else: return None def _init_ignore_string(self) -> list[str]: """Called first time rule is evaluated to fetch & cache the ignore_words.""" # Use str() in case bools are passed which might otherwise be read as bool ignore_words_config = str(getattr(self, "ignore_words")) if ignore_words_config and ignore_words_config != "None": self.ignore_words_list = self.split_comma_separated_string( ignore_words_config.lower() ) else: self.ignore_words_list = [] ignore_words_list = self.ignore_words_list return ignore_words_list sqlfluff-3.4.2/src/sqlfluff/rules/references/RF05.py000066400000000000000000000225661503426445100222730ustar00rootroot00000000000000"""Implementation of Rule RF05.""" from typing import Optional import regex from sqlfluff.core.rules import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.identifers import identifiers_policy_applicable class Rule_RF05(BaseRule): """Do not use special characters in identifiers. **Anti-pattern** Using special characters within identifiers when creating or aliasing objects. .. code-block:: sql CREATE TABLE DBO.ColumnNames ( [Internal Space] INT, [Greater>Than] INT, [Less str: """Returns additional allowed characters, with adjustments for dialect.""" result: set[str] = set() if self.additional_allowed_characters: result.update(self.additional_allowed_characters) if dialect_name == "bigquery": # In BigQuery, also allow hyphens. result.update("-") if dialect_name == "snowflake": # In Snowflake, external stage metadata uses $. result.update("$") return "".join(result) def _eval(self, context: RuleContext) -> Optional[LintResult]: """Do not use special characters in object names.""" # Config type hints self.quoted_identifiers_policy: str self.unquoted_identifiers_policy: str self.allow_space_in_identifier: bool self.additional_allowed_characters: str self.ignore_words: str self.ignore_words_regex: str # Confirm it's a single identifier. assert context.segment.is_type("naked_identifier", "quoted_identifier") # Get the ignore_words_list configuration. try: ignore_words_list = self.ignore_words_list except AttributeError: # First-time only, read the settings from configuration. This is # very slow. ignore_words_list = self._init_ignore_words_list() # Assume unquoted (we'll update if quoted) policy = self.unquoted_identifiers_policy identifier = context.segment.raw # Skip if in ignore list if ignore_words_list and identifier.lower() in ignore_words_list: return None # Skip if matches ignore regex if self.ignore_words_regex and regex.search( self.ignore_words_regex, identifier ): return LintResult(memory=context.memory) if self._is_aliased_select_clause_element(context): # If selects are aliased, ignore unaliased column reference return None # Do some extra processing for quoted identifiers. if context.segment.is_type("quoted_identifier"): # Update the default policy to quoted policy = self.quoted_identifiers_policy # Strip the quotes first identifier = context.segment.raw_normalized(casefold=False) # Skip if in ignore list - repeat check now we've strip the quotes if ignore_words_list and identifier.lower() in ignore_words_list: return None # Skip if matches ignore regex - repeat check now we've strip the quotes if self.ignore_words_regex and regex.search( self.ignore_words_regex, identifier ): return LintResult(memory=context.memory) # PostgreSQL Extension allows the use of extensions. # # These extensions are often qutoed identifiers. # (https://www.postgresql.org/docs/current/contrib.html) # # Allow quoted identifiers in extension references if ( context.dialect.name in ["postgres"] and context.parent_stack and context.parent_stack[-1].is_type("extension_reference") ): return None # BigQuery table references are quoted in back ticks so allow dots # # It also allows a star at the end of table_references for wildcards # (https://cloud.google.com/bigquery/docs/querying-wildcard-tables) # # Strip both out before testing the identifier if ( context.dialect.name in ["bigquery"] and context.parent_stack and context.parent_stack[-1].is_type("table_reference") ): if identifier and identifier[-1] == "*": identifier = identifier[:-1] identifier = identifier.replace(".", "") # Databricks & SparkSQL file references for direct file query # are quoted in back ticks to allow for identifiers common # in file paths and regex patterns for path globbing # https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-file.html # # Path Glob Filters (done inline for SQL direct file query) # https://spark.apache.org/docs/latest/sql-data-sources-generic-options.html#path-global-filter # if ( context.dialect.name in ["databricks", "sparksql"] and context.parent_stack ): # Databricks & SparkSQL file references for direct file query # are quoted in back ticks to allow for identifiers common # in file paths and regex patterns for path globbing # https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-file.html # # Path Glob Filters (done inline for SQL direct file query) # https://spark.apache.org/docs/latest/sql-data-sources-generic-options.html#path-global-filter # if context.parent_stack[-1].is_type("file_reference"): return None # Databricks & SparkSQL properties keys # used for setting table and runtime # configurations denote namespace using dots, so these are # removed before testing L057 to not trigger false positives # Runtime configurations: # https://spark.apache.org/docs/latest/configuration.html#application-properties # Example configurations for table: # https://spark.apache.org/docs/latest/sql-data-sources-parquet.html#configuration # if context.parent_stack[-1].is_type("property_name_identifier"): identifier = identifier.replace(".", "") # Strip spaces if allowed (note a separate config as only valid for quoted # identifiers) if self.allow_space_in_identifier: identifier = identifier.replace(" ", "") # We always allow underscores so strip them out identifier = identifier.replace("_", "") # redshift allows a # at the beginning of temporary table names if ( context.dialect.name == "redshift" and identifier[0] == "#" and context.parent_stack and context.parent_stack[-1].is_type("table_reference") ): identifier = identifier[1:] # Set the identified minus the allowed characters additional_allowed_characters = self._get_additional_allowed_characters( context.dialect.name ) if additional_allowed_characters: identifier = identifier.translate( str.maketrans("", "", additional_allowed_characters) ) # Finally test if the remaining identifier is only made up of alphanumerics if identifiers_policy_applicable(policy, context.parent_stack) and not ( identifier.isalnum() ): return LintResult(anchor=context.segment) return None def _init_ignore_words_list(self) -> list[str]: """Called first time rule is evaluated to fetch & cache the policy.""" ignore_words_config: str = str(getattr(self, "ignore_words")) if ignore_words_config and ignore_words_config != "None": self.ignore_words_list = self.split_comma_separated_string( ignore_words_config.lower() ) else: self.ignore_words_list = [] return self.ignore_words_list @staticmethod def _is_aliased_select_clause_element(context: RuleContext) -> bool: for seg in reversed(context.parent_stack): if seg.is_type("alias_expression"): return False if seg.is_type("select_clause_element"): return seg.get_child("alias_expression") is not None return False sqlfluff-3.4.2/src/sqlfluff/rules/references/RF06.py000066400000000000000000000272371503426445100222740ustar00rootroot00000000000000"""Implementation of Rule RF06.""" from functools import cached_property from typing import TYPE_CHECKING, Optional, cast import regex from sqlfluff.core.parser import CodeSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.functional import FunctionalContext, sp if TYPE_CHECKING: # pragma: no cover from sqlfluff.core.parser.parsers import RegexParser class Rule_RF06(BaseRule): """Unnecessary quoted identifier. This rule will fail if the quotes used to quote an identifier are (un)necessary depending on the ``force_quote_identifier`` configuration. This rule applies to both column *references* and their *aliases*. The *default* (safe) behaviour is designed not to unexpectedly corrupt SQL. That means the circumstances in which quotes can be safely removed depends on the current dialect would resolve the unquoted variant of the identifier (see below for examples). Additionally this rule may be configured to a more aggressive setting by setting :code:`case_sensitive` to :code:`False`, in which case quotes will be removed regardless of the casing of the contained identifier. Any identifiers which contain special characters, spaces or keywords will still be left quoted. This setting is more appropriate for projects or teams where there is more control over the inputs and outputs of queries, and where it's more viable to institute rules such as enforcing that all identifiers are the default casing (and therefore meaning that using quotes to change the case of identifiers is unnecessary). .. list-table:: :widths: 26 26 48 :header-rows: 1 * - Dialect group - ✅ Example where quotes are safe to remove. - ⚠️ Examples where quotes are not safe to remove. * - Natively :code:`UPPERCASE` dialects e.g. Snowflake, BigQuery, TSQL & Oracle. - Identifiers which, without quotes, would resolve to the default casing of :code:`FOO` i.e. :code:`"FOO"`. - Identifiers where the quotes are necessary to preserve case (e.g. :code:`"Foo"` or :code:`"foo"`), or where the identifier contains something invalid without the quotes such as keywords or special characters e.g. :code:`"SELECT"`, :code:`"With Space"` or :code:`"Special&Characters"`. * - Natively :code:`lowercase` dialects e.g. Athena, Hive & Postgres - Identifiers which, without quotes, would resolve to the default casing of :code:`foo` i.e. :code:`"foo"`. - Identifiers where the quotes are necessary to preserve case (e.g. :code:`"Foo"` or :code:`"foo"`), or where the identifier contains something invalid without the quotes such as keywords or special characters e.g. :code:`"SELECT"`, :code:`"With Space"` or :code:`"Special&Characters"`. * - Case insensitive dialects e.g. :ref:`duckdb_dialect_ref` or :ref:`sparksql_dialect_ref` - Any identifiers which are valid without quotes: e.g. :code:`"FOO"`, :code:`"foo"`, :code:`"Foo"`, :code:`"fOo"`, :code:`FOO` and :code:`foo` would all resolve to the same object. - Identifiers which contain something invalid without the quotes such as keywords or special characters e.g. :code:`"SELECT"`, :code:`"With Space"` or :code:`"Special&Characters"`. This rule is closely associated with (and constrained by the same above factors) as :sqlfluff:ref:`aliasing.self_alias.column` (:sqlfluff:ref:`AL09`). When ``prefer_quoted_identifiers = False`` (default behaviour), the quotes are unnecessary, except for reserved keywords and special characters in identifiers. **Anti-pattern** In this example, valid unquoted identifiers, that are not also reserved keywords, are needlessly quoted. .. code-block:: sql SELECT "foo" as "bar"; -- For lowercase dialects like Postgres SELECT "FOO" as "BAR"; -- For uppercase dialects like Snowflake **Best practice** Use unquoted identifiers where possible. .. code-block:: sql SELECT foo as bar; -- For lowercase dialects like Postgres SELECT FOO as BAR; -- For uppercase dialects like Snowflake -- Note that where the case of the quoted identifier requires -- the quotes to remain, or where the identifier cannot be -- unquoted because it would be invalid to do so, the quotes -- may remain. For example: SELECT "Case_Sensitive_Identifier" as is_allowed, "Identifier with spaces or speci@l characters" as this_too, "SELECT" as also_reserved_words FROM "My Table With Spaces" When ``prefer_quoted_identifiers = True``, the quotes are always necessary, no matter if the identifier is valid, a reserved keyword, or contains special characters. .. note:: Note due to different quotes being used by different dialects supported by `SQLFluff`, and those quotes meaning different things in different contexts, this mode is not ``sqlfluff fix`` compatible. **Anti-pattern** In this example, a valid unquoted identifier, that is also not a reserved keyword, is required to be quoted. .. code-block:: sql SELECT 123 as foo **Best practice** Use quoted identifiers. .. code-block:: sql SELECT 123 as "foo" -- For ANSI, ... -- or SELECT 123 as `foo` -- For BigQuery, MySql, ... """ name = "references.quoting" aliases = ("L059",) groups = ("all", "references") config_keywords = [ "prefer_quoted_identifiers", "prefer_quoted_keywords", "ignore_words", "ignore_words_regex", "case_sensitive", ] crawl_behaviour = SegmentSeekerCrawler({"quoted_identifier", "naked_identifier"}) is_fix_compatible = True # Ignore "password_auth" type to allow quotes around passwords within # `CREATE USER` statements in Exasol dialect. # `EXECUTE AS` clauses in TSQL also require quotes. _ignore_types: list[str] = ["password_auth", "execute_as_clause"] def _eval(self, context: RuleContext) -> Optional[LintResult]: """Unnecessary quoted identifier.""" # Config type hints self.prefer_quoted_identifiers: bool self.prefer_quoted_keywords: bool self.ignore_words: str self.ignore_words_regex: str self.case_sensitive: bool # Ignore some segment types if FunctionalContext(context).parent_stack.any(sp.is_type(*self._ignore_types)): return None identifier_is_quoted = not regex.search( r'^[^"\'[].+[^"\'\]]$', context.segment.raw ) identifier_contents = context.segment.raw if identifier_is_quoted: identifier_contents = identifier_contents[1:-1] identifier_is_keyword = identifier_contents.upper() in context.dialect.sets( "reserved_keywords" ) or identifier_contents.upper() in context.dialect.sets("unreserved_keywords") if self.prefer_quoted_identifiers: context_policy = "naked_identifier" else: context_policy = "quoted_identifier" # Get the ignore_words_list configuration. ignore_words_list = self.ignore_words_list # Skip if in ignore list if ignore_words_list and identifier_contents.lower() in ignore_words_list: return None # Skip if matches ignore regex if self.ignore_words_regex and regex.search( self.ignore_words_regex, identifier_contents ): return LintResult(memory=context.memory) if self.prefer_quoted_keywords and identifier_is_keyword: if not identifier_is_quoted: return LintResult( context.segment, description=( f"Missing quoted keyword identifier {identifier_contents}." ), ) return None # Ignore the segments that are not of the same type as the defined policy above. # Also TSQL has a keyword called QUOTED_IDENTIFIER which maps to the name so # need to explicitly check for that. if not context.segment.is_type( context_policy ) or context.segment.raw.lower() in ( "quoted_identifier", "naked_identifier", ): return None # Manage cases of identifiers must be quoted first. # Naked identifiers are _de facto_ making this rule fail as configuration forces # them to be quoted. # In this case, it cannot be fixed as which quote to use is dialect dependent if self.prefer_quoted_identifiers: return LintResult( context.segment, description=f"Missing quoted identifier {identifier_contents}.", ) # Now we only deal with NOT forced quoted identifiers configuration # (meaning prefer_quoted_identifiers=False). # Retrieve NakedIdentifierSegment RegexParser for the dialect. naked_identifier_parser = cast( "RegexParser", context.dialect._library["NakedIdentifierSegment"] ) anti_template = cast(str, naked_identifier_parser.anti_template) NakedIdentifierSegment = cast( type[CodeSegment], context.dialect.get_segment("IdentifierSegment") ) # For this to be a candidate for unquoting, it must: # - Casefold to it's current exact case. i.e. already be in the default # casing of the dialect *unless case_sensitive mode is False*. # - be a valid naked identifier. # - not be a reserved keyword. # NOTE: If the identifier parser has no casefold defined, we assume that # there is no casefolding (i.e. that the dialect is case sensitive, and # even when unquoted, and therefore we should never unquote). # EXCEPT: if we're in a totally case insensitive dialect like DuckDB. is_case_insensitive_dialect = context.dialect.name in ("duckdb", "sparksql") if ( not is_case_insensitive_dialect and self.case_sensitive and naked_identifier_parser.casefold and identifier_contents != naked_identifier_parser.casefold(identifier_contents) ): return None if not regex.fullmatch( naked_identifier_parser.template, identifier_contents, regex.IGNORECASE, ): return None if regex.fullmatch( anti_template, identifier_contents, regex.IGNORECASE, ): return None return LintResult( context.segment, fixes=[ LintFix.replace( context.segment, [ NakedIdentifierSegment( raw=identifier_contents, **naked_identifier_parser.segment_kwargs(), ) ], ) ], description=f"Unnecessary quoted identifier {context.segment.raw}.", ) @cached_property def ignore_words_list(self) -> list[str]: """Words that the rule should ignore. Cached so that it's only evaluated on the first pass. """ ignore_words_config: str = str(getattr(self, "ignore_words")) if ignore_words_config and ignore_words_config != "None": return self.split_comma_separated_string(ignore_words_config.lower()) return [] sqlfluff-3.4.2/src/sqlfluff/rules/references/__init__.py000066400000000000000000000052071503426445100233470ustar00rootroot00000000000000"""The references plugin bundle.""" from sqlfluff.core.plugin import hookimpl from sqlfluff.core.rules import BaseRule, ConfigInfo @hookimpl def get_configs_info() -> dict[str, ConfigInfo]: """Get additional rule config validations and descriptions.""" return { "subqueries_ignore_external_references": { "validation": [True, False], "definition": "If ``True``, parent query references are not included as " "potentially ambiguous in subqueries. Defaults to ``False``.", }, "single_table_references": { "validation": ["consistent", "qualified", "unqualified"], "definition": "The expectation for references in single-table select.", }, "unquoted_identifiers_policy": { "validation": ["all", "aliases", "column_aliases", "table_aliases"], "definition": "Types of unquoted identifiers to flag violations for.", }, "quoted_identifiers_policy": { "validation": ["all", "aliases", "column_aliases", "table_aliases", "none"], "definition": "Types of quoted identifiers to flag violations for.", }, "allow_space_in_identifier": { "validation": [True, False], "definition": ("Should spaces in identifiers be allowed?"), }, "additional_allowed_characters": { "definition": ( "Optional list of extra allowed characters, " "in addition to alphanumerics (A-Z, a-z, 0-9) and underscores." ), }, "prefer_quoted_identifiers": { "validation": [True, False], "definition": ( "If ``True``, requires every identifier to be quoted. " "Defaults to ``False``." ), }, "prefer_quoted_keywords": { "validation": [True, False], "definition": ( "If ``True``, requires every keyword used as an identifier to be " "quoted. Defaults to ``False``." ), }, } @hookimpl def get_rules() -> list[type[BaseRule]]: """Get plugin rules. NOTE: Rules are imported only on fetch to manage import times when rules aren't used. """ from sqlfluff.rules.references.RF01 import Rule_RF01 from sqlfluff.rules.references.RF02 import Rule_RF02 from sqlfluff.rules.references.RF03 import Rule_RF03 from sqlfluff.rules.references.RF04 import Rule_RF04 from sqlfluff.rules.references.RF05 import Rule_RF05 from sqlfluff.rules.references.RF06 import Rule_RF06 return [Rule_RF01, Rule_RF02, Rule_RF03, Rule_RF04, Rule_RF05, Rule_RF06] sqlfluff-3.4.2/src/sqlfluff/rules/structure/000077500000000000000000000000001503426445100211515ustar00rootroot00000000000000sqlfluff-3.4.2/src/sqlfluff/rules/structure/ST01.py000066400000000000000000000052101503426445100222100ustar00rootroot00000000000000"""Implementation of Rule ST01.""" from typing import Optional from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.functional import FunctionalContext, sp class Rule_ST01(BaseRule): """Do not specify ``else null`` in a case when statement (redundant). **Anti-pattern** .. code-block:: sql select case when name like '%cat%' then 'meow' when name like '%dog%' then 'woof' else null end from x **Best practice** Omit ``else null`` .. code-block:: sql select case when name like '%cat%' then 'meow' when name like '%dog%' then 'woof' end from x """ name = "structure.else_null" aliases = ("L035",) groups: tuple[str, ...] = ("all", "structure") crawl_behaviour = SegmentSeekerCrawler({"case_expression"}) is_fix_compatible = True def _eval(self, context: RuleContext) -> Optional[LintResult]: """Find rule violations and provide fixes. 0. Look for a case expression 1. Look for "ELSE" 2. Mark "ELSE" for deletion (populate "fixes") 3. Backtrack and mark all newlines/whitespaces for deletion 4. Look for a raw "NULL" segment 5.a. The raw "NULL" segment is found, we mark it for deletion and return 5.b. We reach the end of case when without matching "NULL": the rule passes """ assert context.segment.is_type("case_expression") children = FunctionalContext(context).segment.children() else_clause = children.first(sp.is_type("else_clause")) # Does the "ELSE" have a "NULL"? NOTE: Here, it's safe to look for # "NULL", as an expression would *contain* NULL but not be == NULL. if else_clause and else_clause.children( lambda child: child.raw_upper == "NULL" ): # Found ELSE with NULL. Delete the whole else clause as well as # indents/whitespaces/meta preceding the ELSE. :TRICKY: Note # the use of reversed() to make select() effectively search in # reverse. before_else = children.reversed().select( start_seg=else_clause[0], loop_while=sp.or_(sp.is_type("whitespace", "newline"), sp.is_meta()), ) return LintResult( anchor=context.segment, fixes=[LintFix.delete(else_clause[0])] + [LintFix.delete(seg) for seg in before_else], ) return None sqlfluff-3.4.2/src/sqlfluff/rules/structure/ST02.py000066400000000000000000000246451503426445100222260ustar00rootroot00000000000000"""Implementation of Rule ST02.""" from typing import Optional from sqlfluff.core.parser import ( KeywordSegment, SymbolSegment, WhitespaceSegment, WordSegment, ) from sqlfluff.core.parser.segments.base import BaseSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.functional import FunctionalContext, Segments, sp class FunctionNameSegment(BaseSegment): """Function name, including any prefix bits, e.g. project or schema. NOTE: This is a minimal segment definition to allow appropriate fixing. """ type = "function_name" class FunctionContentsSegment(BaseSegment): """Function Contents. NOTE: This is a minimal segment definition to allow appropriate fixing. """ type = "function_contents" class Rule_ST02(BaseRule): """Unnecessary ``CASE`` statement. **Anti-pattern** ``CASE`` statement returns booleans. .. code-block:: sql :force: select case when fab > 0 then true else false end as is_fab from fancy_table -- This rule can also simplify CASE statements -- that aim to fill NULL values. select case when fab is null then 0 else fab end as fab_clean from fancy_table -- This also covers where the case statement -- replaces NULL values with NULL values. select case when fab is null then null else fab end as fab_clean from fancy_table **Best practice** Reduce to ``WHEN`` condition within ``COALESCE`` function. .. code-block:: sql :force: select coalesce(fab > 0, false) as is_fab from fancy_table -- To fill NULL values. select coalesce(fab, 0) as fab_clean from fancy_table -- NULL filling NULL. select fab as fab_clean from fancy_table """ name = "structure.simple_case" aliases = ("L043",) groups: tuple[str, ...] = ("all", "structure") crawl_behaviour = SegmentSeekerCrawler({"case_expression"}) is_fix_compatible = True @staticmethod def _coalesce_fix_list( context: RuleContext, coalesce_arg_1: BaseSegment, coalesce_arg_2: BaseSegment, preceding_not: bool = False, ) -> list[LintFix]: """Generate list of fixes to convert CASE statement to COALESCE function.""" # Add coalesce and opening parenthesis. edits = [ FunctionNameSegment( segments=(WordSegment("coalesce", type="function_name_identifier"),) ), FunctionContentsSegment( segments=( SymbolSegment("(", type="start_bracket"), coalesce_arg_1, SymbolSegment(",", type="comma"), WhitespaceSegment(), coalesce_arg_2, SymbolSegment(")", type="end_bracket"), ) ), ] if preceding_not: not_edits: list[BaseSegment] = [ KeywordSegment("not"), WhitespaceSegment(), ] edits = not_edits + edits fixes = [ LintFix.replace( context.segment, edits, ) ] return fixes @staticmethod def _column_only_fix_list( context: RuleContext, column_reference_segment: BaseSegment, ) -> list[LintFix]: """Generate list of fixes to reduce CASE statement to a single column.""" fixes = [ LintFix.replace( context.segment, [column_reference_segment], ) ] return fixes def _eval(self, context: RuleContext) -> Optional[LintResult]: """Unnecessary CASE statement.""" # Look for CASE expression. if context.segment.segments[0].raw_upper == "CASE": # Find all 'WHEN' clauses and the optional 'ELSE' clause. children = FunctionalContext(context).segment.children() when_clauses = children.select(sp.is_type("when_clause")) else_clauses = children.select(sp.is_type("else_clause")) # Can't fix if multiple WHEN clauses. if len(when_clauses) > 1: return None # Find condition and then expressions. condition_expression = when_clauses.children(sp.is_type("expression"))[0] then_expression = when_clauses.children(sp.is_type("expression"))[1] # Method 1: Check if THEN/ELSE expressions are both Boolean and can # therefore be reduced. if else_clauses: else_expression = else_clauses.children(sp.is_type("expression"))[0] upper_bools = ["TRUE", "FALSE"] if ( (then_expression.raw_upper in upper_bools) and (else_expression.raw_upper in upper_bools) and (then_expression.raw_upper != else_expression.raw_upper) ): coalesce_arg_1: BaseSegment = condition_expression coalesce_arg_2: BaseSegment = KeywordSegment("false") preceding_not = then_expression.raw_upper == "FALSE" fixes = self._coalesce_fix_list( context, coalesce_arg_1, coalesce_arg_2, preceding_not, ) return LintResult( anchor=condition_expression, fixes=fixes, description="Unnecessary CASE statement. " "Use COALESCE function instead.", ) # Method 2: Check if the condition expression is comparing a column # reference to NULL and whether that column reference is also in either the # THEN/ELSE expression. We can only apply this method when there is only # one condition in the condition expression. condition_expression_segments_raw = { segment.raw_upper for segment in condition_expression.segments } if {"IS", "NULL"}.issubset(condition_expression_segments_raw) and ( not condition_expression_segments_raw.intersection({"AND", "OR"}) ): # Check if the comparison is to NULL or NOT NULL. is_not_prefix = "NOT" in condition_expression_segments_raw # Locate column reference in condition expression. column_reference_segment = ( Segments(condition_expression) .children(sp.is_type("column_reference")) .get() ) array_accessor_segment = ( Segments(condition_expression) .children(sp.is_type("array_accessor")) .get() ) # Return None if none found (this condition does not apply to functions) if not column_reference_segment: return None if array_accessor_segment: column_reference_segment_raw_upper = ( column_reference_segment.raw_upper + array_accessor_segment.raw_upper ) else: column_reference_segment_raw_upper = ( column_reference_segment.raw_upper ) if else_clauses: else_expression = else_clauses.children(sp.is_type("expression"))[0] # Check if we can reduce the CASE expression to a single coalesce # function. if ( not is_not_prefix and column_reference_segment_raw_upper == else_expression.raw_upper ): coalesce_arg_1 = else_expression coalesce_arg_2 = then_expression elif ( is_not_prefix and column_reference_segment_raw_upper == then_expression.raw_upper ): coalesce_arg_1 = then_expression coalesce_arg_2 = else_expression else: return None if coalesce_arg_2.raw_upper == "NULL": # Can just specify the column on it's own # rather than using a COALESCE function. return LintResult( anchor=condition_expression, fixes=self._column_only_fix_list( context, column_reference_segment, ), description="Unnecessary CASE statement. " f"Just use column '{column_reference_segment.raw}'.", ) return LintResult( anchor=condition_expression, fixes=self._coalesce_fix_list( context, coalesce_arg_1, coalesce_arg_2, ), description="Unnecessary CASE statement. " "Use COALESCE function instead.", ) elif column_reference_segment.raw_upper == then_expression.raw_upper: # Can just specify the column on it's own # rather than using a COALESCE function. # In this case no ELSE statement is equivalent to ELSE NULL. return LintResult( anchor=condition_expression, fixes=self._column_only_fix_list( context, column_reference_segment, ), description="Unnecessary CASE statement. " f"Just use column '{column_reference_segment.raw}'.", ) return None sqlfluff-3.4.2/src/sqlfluff/rules/structure/ST03.py000066400000000000000000000040211503426445100222110ustar00rootroot00000000000000"""Implementation of Rule ST03.""" from sqlfluff.core.rules import BaseRule, EvalResultType, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.analysis.query import Query class Rule_ST03(BaseRule): """Query defines a CTE (common-table expression) but does not use it. **Anti-pattern** Defining a CTE that is not used by the query is harmless, but it means the code is unnecessary and could be removed. .. code-block:: sql WITH cte1 AS ( SELECT a FROM t ), cte2 AS ( SELECT b FROM u ) SELECT * FROM cte1 **Best practice** Remove unused CTEs. .. code-block:: sql WITH cte1 AS ( SELECT a FROM t ) SELECT * FROM cte1 """ name = "structure.unused_cte" aliases = ("L045",) groups = ("all", "core", "structure") crawl_behaviour = SegmentSeekerCrawler({"with_compound_statement"}) def _eval(self, context: RuleContext) -> EvalResultType: result = [] query: Query = Query.from_root(context.segment, dialect=context.dialect) # Build up a dict of remaining CTEs (uppercased as not case sensitive). remaining_ctes = {k.upper(): k for k in query.ctes} # Work through all the references in the file, checking off CTES as the # are referenced. for reference in context.segment.recursive_crawl("table_reference"): remaining_ctes.pop(reference.raw_normalized(False).upper(), None) # For any left un-referenced at the end. Raise an issue about them. for name in remaining_ctes.values(): cte = query.ctes[name] result += [ LintResult( anchor=cte.cte_name_segment, description=f"Query defines CTE " f'"{cte.cte_name_segment.raw}" ' f"but does not use it.", ) ] return result sqlfluff-3.4.2/src/sqlfluff/rules/structure/ST04.py000066400000000000000000000227301503426445100222210ustar00rootroot00000000000000"""Implementation of Rule ST04.""" from sqlfluff.core.parser import BaseSegment, Indent, NewlineSegment, WhitespaceSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.functional import FunctionalContext, Segments, sp from sqlfluff.utils.reflow.reindent import construct_single_indent class Rule_ST04(BaseRule): """Nested ``CASE`` statement in ``ELSE`` clause could be flattened. **Anti-pattern** In this example, the outer ``CASE``'s ``ELSE`` is an unnecessary, nested ``CASE``. .. code-block:: sql SELECT CASE WHEN species = 'Cat' THEN 'Meow' ELSE CASE WHEN species = 'Dog' THEN 'Woof' END END as sound FROM mytable **Best practice** Move the body of the inner ``CASE`` to the end of the outer one. .. code-block:: sql SELECT CASE WHEN species = 'Cat' THEN 'Meow' WHEN species = 'Dog' THEN 'Woof' END AS sound FROM mytable """ name = "structure.nested_case" aliases = ("L058",) groups = ("all", "structure") crawl_behaviour = SegmentSeekerCrawler({"case_expression"}) is_fix_compatible = True def _eval(self, context: RuleContext) -> LintResult: """Nested CASE statement in ELSE clause could be flattened.""" segment = FunctionalContext(context).segment assert segment.select(sp.is_type("case_expression")) case1_children = segment.children() case1_first_case = case1_children.first(sp.is_keyword("CASE")).get() assert case1_first_case case1_first_when = case1_children.first( sp.is_type("when_clause", "else_clause") ).get() case1_last_when = case1_children.last(sp.is_type("when_clause")).get() case1_else_clause = case1_children.select(sp.is_type("else_clause")) case1_else_expressions = case1_else_clause.children(sp.is_type("expression")) expression_children = case1_else_expressions.children() case2 = expression_children.select(sp.is_type("case_expression")) case2_children = case2.children() case2_first_case = case2_children.first(sp.is_keyword("CASE")).get() case2_first_when = case2_children.first( sp.is_type("when_clause", "else_clause") ).get() # The len() checks below are for safety, to ensure the CASE inside # the ELSE is not part of a larger expression. In that case, it's # not safe to simplify in this way -- we'd be deleting other code. if ( not case1_last_when or len(case1_else_expressions) > 1 or len(expression_children) > 1 or not case2 ): return LintResult() # Determine if we can combine the else case statement, the first and # second case expressions should be the same. If they aren't, that # case currently isn't handled. if [ x.raw_upper for x in segment.children(sp.is_code()) .select(start_seg=case1_first_case, stop_seg=case1_first_when) .raw_segments ] != [ x.raw_upper for x in case2.children(sp.is_code()) .select(start_seg=case2_first_case, stop_seg=case2_first_when) .raw_segments ]: return LintResult() # We can assert that this exists because of the previous check. assert case1_last_when # We can also assert that we'll also have an else clause because # otherwise the case2 check above would fail. case1_else_clause_seg = case1_else_clause.get() assert case1_else_clause_seg # Delete stuff between the last "WHEN" clause and the "ELSE" clause. case1_to_delete = case1_children.select( start_seg=case1_last_when, stop_seg=case1_else_clause_seg ) # Restore any comments that were deleted after_last_comment_index = ( case1_to_delete.find(case1_to_delete.last(sp.is_comment()).get()) + 1 ) case1_comments_to_restore = case1_to_delete.select( stop_seg=case1_to_delete.get(after_last_comment_index) ) after_else_comment = case1_else_clause.children().select( select_if=sp.is_type("newline", "comment", "whitespace"), stop_seg=case1_else_expressions.get(), ) # Delete the nested "CASE" expression. fixes = case1_to_delete.apply(LintFix.delete) tab_space_size: int = context.config.get("tab_space_size", ["indentation"]) indent_unit: str = context.config.get("indent_unit", ["indentation"]) # Determine the indentation to use when we move the nested "WHEN" # and "ELSE" clauses, based on the indentation of case1_last_when. # If no whitespace segments found, use default indent. when_indent_str = self._get_indentation( case1_children, case1_last_when, tab_space_size, indent_unit ) # Again determine indentation, but matching the "CASE"/"END" level. end_indent_str = self._get_indentation( case1_children, case1_first_case, tab_space_size, indent_unit ) # Move the nested "when" and "else" clauses after the last outer # "when". nested_clauses = case2.children( sp.is_type("when_clause", "else_clause", "newline", "comment", "whitespace") ) # Rebuild the nested case statement. # Any comments after the last outer "WHEN" that were deleted segments = list(case1_comments_to_restore) # Any comments between the "ELSE" and nested "CASE" segments += self._rebuild_spacing(when_indent_str, after_else_comment) # The nested "WHEN", "ELSE" or "comments", with logical spacing segments += self._rebuild_spacing(when_indent_str, nested_clauses) fixes.append(LintFix.create_after(case1_last_when, segments, source=segments)) # Delete the outer "else" clause. fixes.append(LintFix.delete(case1_else_clause_seg)) # Add spacing for any comments that may exist after the nested `END` # but only on that same line. fixes += self._nested_end_trailing_comment( case1_children, case1_else_clause_seg, end_indent_str ) return LintResult(case2[0], fixes=fixes) def _get_indentation( self, parent_segments: Segments, segment: BaseSegment, tab_space_size: int, indent_unit: str, ) -> str: """Calculate the indentation level for rebuilding nested struct. This is only a best attempt as the input may not be equally indented. The layout rules, if run, would resolve this. """ leading_whitespace = ( parent_segments.select(stop_seg=segment) .reversed() .first(sp.is_type("whitespace")) ) seg_indent = parent_segments.select(stop_seg=segment).last(sp.is_type("indent")) indent_level = 1 if ( seg_indent and (segment_indent := seg_indent.get()) and isinstance(segment_indent, Indent) ): indent_level = segment_indent.indent_val + 1 indent_str = ( "".join(seg.raw for seg in leading_whitespace) if leading_whitespace and (whitespace_seg := leading_whitespace.get()) and len(whitespace_seg.raw) > 1 else construct_single_indent(indent_unit, tab_space_size) * indent_level ) return indent_str def _nested_end_trailing_comment( self, case1_children: Segments, case1_else_clause_seg: BaseSegment, end_indent_str: str, ) -> list[LintFix]: """Prepend newline spacing to comments on the final nested `END` line.""" trailing_end = case1_children.select( start_seg=case1_else_clause_seg, loop_while=sp.not_(sp.is_type("newline")), ) fixes = trailing_end.select( sp.is_whitespace(), loop_while=sp.not_(sp.is_comment()) ).apply(LintFix.delete) first_comment = trailing_end.first(sp.is_comment()).get() if first_comment: segments = [NewlineSegment(), WhitespaceSegment(end_indent_str)] fixes.append(LintFix.create_before(first_comment, segments, segments)) return fixes def _rebuild_spacing( self, indent_str: str, nested_clauses: Segments ) -> list[BaseSegment]: buff = [] # If the first segment is a comment, add a newline prior_newline = nested_clauses.first(sp.not_(sp.is_whitespace())).any( sp.is_comment() ) prior_whitespace = "" for seg in nested_clauses: if seg.is_type("when_clause", "else_clause") or ( prior_newline and seg.is_comment ): buff += [NewlineSegment(), WhitespaceSegment(indent_str), seg] prior_newline = False prior_whitespace = "" elif seg.is_type("newline"): prior_newline = True prior_whitespace = "" elif not prior_newline and seg.is_comment: buff += [WhitespaceSegment(prior_whitespace), seg] prior_newline = False prior_whitespace = "" elif seg.is_whitespace: # Don't reset newline prior_whitespace = seg.raw return buff sqlfluff-3.4.2/src/sqlfluff/rules/structure/ST05.py000066400000000000000000000543551503426445100222320ustar00rootroot00000000000000"""Implementation of Rule ST05.""" from collections.abc import Iterator from functools import partial from typing import NamedTuple, Optional, TypeVar, cast from sqlfluff.core.dialects.base import Dialect from sqlfluff.core.dialects.common import AliasInfo from sqlfluff.core.parser import ( BaseSegment, CodeSegment, KeywordSegment, NewlineSegment, SymbolSegment, WhitespaceSegment, ) from sqlfluff.core.rules import ( BaseRule, EvalResultType, LintFix, LintResult, RuleContext, ) from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.dialects.dialect_ansi import ( CTEDefinitionSegment, TableExpressionSegment, TableReferenceSegment, WithCompoundStatementSegment, ) from sqlfluff.utils.analysis.query import Query, Selectable from sqlfluff.utils.analysis.select import get_select_statement_info from sqlfluff.utils.functional import FunctionalContext, Segments from sqlfluff.utils.functional.segment_predicates import ( is_keyword, is_type, is_whitespace, ) _SELECT_TYPES = [ "with_compound_statement", "set_expression", "select_statement", ] class _NestedSubQuerySummary(NamedTuple): query: Query selectable: Selectable table_alias: AliasInfo select_source_names: set[str] class Rule_ST05(BaseRule): """Join/From clauses should not contain subqueries. Use CTEs instead. By default this rule is configured to allow subqueries within ``FROM`` clauses but not within ``JOIN`` clauses. If you prefer a stricter lint then this is configurable. .. note:: Some dialects don't allow CTEs, and for those dialects this rule makes no sense and should be disabled. **Anti-pattern** .. code-block:: sql select a.x, a.y, b.z from a join ( select x, z from b ) using(x) **Best practice** .. code-block:: sql with c as ( select x, z from b ) select a.x, a.y, c.z from a join c using(x) """ name = "structure.subquery" aliases = ("L042",) groups = ("all", "structure") config_keywords = ["forbid_subquery_in"] crawl_behaviour = SegmentSeekerCrawler(set(_SELECT_TYPES)) _config_mapping = { "join": ["join_clause"], "from": ["from_expression_element"], "both": ["join_clause", "from_expression_element"], } is_fix_compatible = True # These are dialects that support WITH ... INSERT ... SELECT instead of # INSERT ... WITH ... SELECT # NOTE: this may be incomplete # NOTE: postgres supports both ways, so I've not included it here. _with_before_insert = {"tsql"} def _eval(self, context: RuleContext) -> EvalResultType: """Join/From clauses should not contain subqueries. Use CTEs instead.""" self.forbid_subquery_in: str functional_context = FunctionalContext(context) segment = functional_context.segment parent_stack = functional_context.parent_stack is_select = segment.all(is_type(*_SELECT_TYPES)) is_select_child = parent_stack.any(is_type(*_SELECT_TYPES)) insert_parent = parent_stack.last(is_type("insert_statement")) if not is_select or is_select_child: # Nothing to do. return None query: Query = Query.from_segment(context.segment, context.dialect) # generate an instance which will track and shape our output CTE ctes = _CTEBuilder() # Init the output/final select & # populate existing CTEs for cte in query.ctes.values(): ctes.insert_cte(cte.cte_definition_segment) is_with = segment.all(is_type("with_compound_statement")) # TODO: consider if we can fix recursive CTEs is_recursive = is_with and len(segment.children(is_keyword("recursive"))) > 0 case_preference = _get_case_preference(segment) output_select = segment if is_with: output_select = segment.children( is_type( "set_expression", "select_statement", "insert_statement", ) ) elif insert_parent and context.dialect.name in self._with_before_insert: # Here we select the parent `insert_statement` because it should be where # we place the new CTE. output_select = insert_parent segment = insert_parent # Issue 3617: In T-SQL (and possibly other dialects) the automated fix # leaves parentheses in a location that causes a syntax error. This is an # unusual corner case. For simplicity, we still generate the lint warning # but don't try to generate a fix. Someone could look at this later (a # correct fix would involve removing the parentheses.) bracketed_ctas = [seg.type for seg in parent_stack[-2:]] == [ "create_table_statement", "bracketed", ] # If there are offending elements calculate fixes clone_map = SegmentCloneMap(segment[0]) results = self._lint_query( dialect=context.dialect, query=query, ctes=ctes, case_preference=case_preference, clone_map=clone_map, ) results_list: list[tuple[LintResult, BaseSegment, str, BaseSegment, bool]] = [] for result in results: ( lint_result, from_expression, alias_name, subquery_parent, is_fixable, ) = result assert any( from_expression is seg for seg in subquery_parent.recursive_crawl_all() ) results_list.append(result) if not is_fixable: continue this_seg_clone = clone_map[from_expression] new_table_ref = _create_table_ref(alias_name, context.dialect) # Add positions to the new table reference, other rules may need a position # but the clone is not a typical "fix". assert this_seg_clone.pos_marker this_seg_clone.segments = this_seg_clone._position_segments( (new_table_ref,), this_seg_clone.pos_marker ) ctes.replace_with_clone(subquery_parent, clone_map) for ( lint_result, from_expression, alias_name, subquery_parent, is_fixable, ) in results_list: if bracketed_ctas or is_recursive or not is_fixable: continue # Compute fix. output_select_clone = clone_map[output_select[0]] fixes = ctes.ensure_space_after_from( output_select[0], output_select_clone, subquery_parent ) new_select = ctes.compose_select( output_select_clone, case_preference=case_preference ) lint_result.fixes = [ LintFix.replace( segment[0], edit_segments=[new_select], ) ] lint_result.fixes += fixes return [lint_result[0] for lint_result in results_list] def _nested_subqueries( self, query: Query, dialect: Dialect ) -> Iterator[_NestedSubQuerySummary]: parent_types = self._config_mapping[self.forbid_subquery_in] for i, q in enumerate([query] + list(query.ctes.values())): for selectable in q.selectables: if not selectable.select_info: continue # pragma: no cover select_source_names = set() for a in selectable.select_info.table_aliases: # For each table in FROM, return table name and any alias. if a.ref_str: select_source_names.add(a.ref_str) if a.object_reference: select_source_names.add(a.object_reference.raw) for table_alias in selectable.select_info.table_aliases: try: query = Query.from_root( table_alias.from_expression_element, dialect ) except AssertionError: # Couldn't find a selectable, carry on. continue path_to = selectable.selectable.path_to( table_alias.from_expression_element ) if not ( # The from_expression_element table_alias.from_expression_element.is_type(*parent_types) # Or any of it's parents up to the selectable or any(ps.segment.is_type(*parent_types) for ps in path_to) ): continue if _is_correlated_subquery( Segments(query.selectables[0].selectable), select_source_names, dialect, ): continue yield _NestedSubQuerySummary( q, selectable, table_alias, select_source_names ) # Recursively find nested queries in CTEs if i > 0: yield from self._nested_subqueries(query, dialect) def _lint_query( self, dialect: Dialect, query: Query, ctes: "_CTEBuilder", case_preference: str, clone_map, ) -> Iterator[tuple[LintResult, BaseSegment, str, BaseSegment, bool]]: """Given the root query, compute lint warnings.""" nsq: _NestedSubQuerySummary for nsq in self._nested_subqueries(query, dialect): alias_name, _ = ctes.create_cte_alias(nsq.table_alias) # 'anchor' is the TableExpressionSegment we fix/replace w/CTE name. anchor = nsq.table_alias.from_expression_element.segments[0] # If we have duplicate CTE names just don't fix anything # Return the lint warnings anyway is_fixable = alias_name not in ctes.list_used_names() # if the subquery is table_expression, get the bracketed child instead. if anchor.is_type("table_expression"): bracket_anchor = anchor.get_child("bracketed") # if the table_expression isn't bracketed, assume it isn't a subquery. if not bracket_anchor: continue else: bracket_anchor = anchor # we can't create a CTE from a nested subquery here, ignore it. if not bracket_anchor.is_type("bracketed") or bracket_anchor.get_child( "table_expression" ): is_fixable = False if is_fixable: new_cte = _create_cte_seg( # 'prep_1 as (select ...)' alias_name=alias_name, subquery=clone_map[bracket_anchor], case_preference=case_preference, dialect=dialect, ) ctes.insert_cte(new_cte) # Grab the first keyword or symbol in the subquery to # use as the anchor. This makes the lint warning less # likely to be filtered out if a bit of the subquery # happens to be templated. anchor = next(anchor.recursive_crawl("keyword", "symbol")) res = LintResult( anchor=anchor, description=f"{nsq.selectable.selectable.type} clauses " "should not contain subqueries. Use CTEs instead", fixes=[], ) yield ( res, # FromExpressionElementSegment, parent of original "anchor" segment nsq.table_alias.from_expression_element, alias_name, # Name of CTE we're creating from the nested query # Query with the subquery: 'select a from (select x from b)' nsq.selectable.selectable, is_fixable, ) def _get_first_select_statement_descendant( segment: BaseSegment, ) -> Optional[BaseSegment]: """Find first SELECT statement segment (if any) in descendants of 'segment'.""" for select_statement in segment.recursive_crawl( "select_statement", recurse_into=False ): # We only want the first one. return select_statement return None # pragma: no cover def _is_correlated_subquery( nested_select: Segments, select_source_names: set[str], dialect: Dialect ) -> bool: """Given nested select and the sources of its parent, determine if correlated. https://en.wikipedia.org/wiki/Correlated_subquery """ select_statement = _get_first_select_statement_descendant(nested_select[0]) if not select_statement: return False # pragma: no cover nested_select_info = get_select_statement_info(select_statement, dialect) if nested_select_info: for r in nested_select_info.reference_buffer: for tr in r.extract_possible_references(level=r.ObjectReferenceLevel.TABLE): # Check for correlated subquery, as indicated by use of a # parent reference. if tr.part in select_source_names: return True return False class _CTEBuilder: """Gather CTE parts, maintain order and track naming/aliasing.""" def __init__(self) -> None: self.ctes: list[CTEDefinitionSegment] = [] self.name_idx = 0 def list_used_names(self) -> list[str]: """Check CTEs and return used aliases.""" used_names: list[str] = [] for cte in self.ctes: id_seg = cte.get_identifier() cte_name = id_seg.raw if id_seg.is_type("quoted_identifier"): cte_name = cte_name[1:-1] used_names.append(cte_name) return used_names def insert_cte(self, cte: CTEDefinitionSegment) -> None: """Add a new CTE to the list as late as possible but before all its parents.""" # This should still have the position markers of its true position inbound_subquery = ( Segments(cte).children().last(lambda seg: bool(seg.pos_marker)) ) insert_position = next( ( i for i, el in enumerate(self.ctes) if _is_child(Segments(el).children().last(), inbound_subquery) ), len(self.ctes), ) self.ctes.insert(insert_position, cte) def create_cte_alias(self, alias: Optional[AliasInfo]) -> tuple[str, bool]: """Find or create the name for the next CTE.""" if alias and alias.aliased and alias.ref_str: # If we know the name use it return alias.ref_str, False self.name_idx = self.name_idx + 1 name = f"prep_{self.name_idx}" if name in self.list_used_names(): # corner case where prep_x exists in origin query return self.create_cte_alias(None) return name, True def get_cte_segments(self) -> list[BaseSegment]: """Return a valid list of CTES with required padding segments.""" cte_segments: list[BaseSegment] = [] for cte in self.ctes: cte_segments += [ cte, SymbolSegment(",", type="comma"), NewlineSegment(), ] return cte_segments[:-2] def compose_select( self, output_select_clone: BaseSegment, case_preference: str ) -> BaseSegment: """Compose our final new CTE.""" # Compose the CTE. new_select = WithCompoundStatementSegment( segments=tuple( [ _segmentify("WITH", case_preference), WhitespaceSegment(), *self.get_cte_segments(), NewlineSegment(), output_select_clone, ] ) ) return new_select def ensure_space_after_from( self, output_select: BaseSegment, output_select_clone: BaseSegment, subquery_parent: BaseSegment, ) -> list[LintFix]: """Ensure there's whitespace between "FROM" and the CTE table name.""" fixes = [] if subquery_parent is output_select: ( missing_space_after_from, from_clause, from_clause_children, from_segment, ) = self._missing_space_after_from(output_select_clone) if missing_space_after_from: # Case 1: from_clause is a child of cloned "output_select_clone" # that will be inserted by a fix. We can directly manipulate the # "segments" list. to insert whitespace between "FROM" and the # CTE table name. idx_from = from_clause_children.index(from_segment[0]) from_clause.segments = list( from_clause_children[: idx_from + 1] + (WhitespaceSegment(),) + from_clause_children[idx_from + 1 :] ) else: ( missing_space_after_from, from_clause, from_clause_children, from_segment, ) = self._missing_space_after_from(subquery_parent) if missing_space_after_from: # Case 2. from_segment is in the current parse tree, so we can't # modify it directly. Create a LintFix to do it. fixes.append( LintFix.create_after(from_segment[0], [WhitespaceSegment()]) ) return fixes @staticmethod def _missing_space_after_from(segment: BaseSegment): missing_space_after_from = False from_clause_children = None from_segment = None from_clause = segment.get_child("from_clause") if from_clause is not None: from_clause_children = Segments(*from_clause.segments) from_segment = from_clause_children.first(is_keyword("from")) if from_segment and not from_clause_children.select( start_seg=from_segment[0], loop_while=is_whitespace() ): missing_space_after_from = True return missing_space_after_from, from_clause, from_clause_children, from_segment def replace_with_clone(self, segment, clone_map) -> None: for idx, cte in enumerate(self.ctes): if any(segment is seg for seg in cte.recursive_crawl_all()): self.ctes[idx] = clone_map[self.ctes[idx]] return None def _is_child(maybe_parent: Segments, maybe_child: Segments) -> bool: """Is the child actually between the start and end markers of the parent.""" assert ( len(maybe_child) == 1 ), "Cannot assess child relationship of multiple segments" assert ( len(maybe_parent) == 1 ), "Cannot assess child relationship of multiple parents" child_markers = maybe_child[0].pos_marker parent_pos = maybe_parent[0].pos_marker assert parent_pos and child_markers if child_markers.start_point_marker() < parent_pos.start_point_marker(): return False # pragma: no cover if child_markers.end_point_marker() > parent_pos.end_point_marker(): return False return True S = TypeVar("S", bound=type[BaseSegment]) def _get_seg(class_def: S, dialect: Dialect) -> S: return cast(S, dialect.get_segment(class_def.__name__)) def _create_cte_seg( alias_name: str, subquery: BaseSegment, case_preference: str, dialect: Dialect ) -> CTEDefinitionSegment: CTESegment = _get_seg(CTEDefinitionSegment, dialect) IdentifierSegment = cast( type[CodeSegment], dialect.get_segment("IdentifierSegment") ) element: CTEDefinitionSegment = CTESegment( segments=( IdentifierSegment( raw=alias_name, type="naked_identifier", ), WhitespaceSegment(), _segmentify("AS", casing=case_preference), WhitespaceSegment(), # Return the bracketed segment instead of the table expression subquery, ) ) return element def _create_table_ref(table_name: str, dialect: Dialect) -> TableExpressionSegment: Seg = partial(_get_seg, dialect=dialect) TableExpressionSeg = Seg(TableExpressionSegment) TableReferenceSeg = Seg(TableReferenceSegment) IdentifierSegment = cast( type[CodeSegment], dialect.get_segment("IdentifierSegment") ) return TableExpressionSeg( segments=( TableReferenceSeg( segments=( IdentifierSegment( raw=table_name, type="naked_identifier", ), ), ), ), ) def _get_case_preference(root_select: Segments): # First get the segment itself so we have access to the generator root_segment = root_select.get() assert root_segment, "Root SELECT not found." # Get the first item of the recursive crawl. first_keyword = next( root_segment.recursive_crawl( "keyword", recurse_into=False, ), None, ) assert first_keyword, "Keyword not found." # Get case preference based on the case of that keyword. if first_keyword.raw.islower(): return "LOWER" return "UPPER" def _segmentify(input_el: str, casing: str) -> BaseSegment: """Apply casing and convert strings to Keywords.""" input_el = input_el.lower() if casing == "UPPER": input_el = input_el.upper() return KeywordSegment(raw=input_el) class SegmentCloneMap: """Clones a segment tree, maps from original segments to their clones.""" def __init__(self, segment: BaseSegment): segment_copy = segment.copy() self.segment_map = {} for old_segment, new_segment in zip( segment.recursive_crawl_all(), segment_copy.recursive_crawl_all(), ): new_segment.pos_marker = old_segment.pos_marker self.segment_map[id(old_segment)] = new_segment def __getitem__(self, old_segment: BaseSegment) -> BaseSegment: return self.segment_map[id(old_segment)] sqlfluff-3.4.2/src/sqlfluff/rules/structure/ST06.py000066400000000000000000000243631503426445100222270ustar00rootroot00000000000000"""Implementation of Rule ST06.""" from collections.abc import Iterator from typing import Optional, Union from sqlfluff.core.parser import BaseSegment from sqlfluff.core.rules import ( BaseRule, EvalResultType, LintFix, LintResult, RuleContext, ) from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler class Rule_ST06(BaseRule): """Select wildcards then simple targets before calculations and aggregates. **Anti-pattern** .. code-block:: sql select a, *, row_number() over (partition by id order by date) as y, b from x **Best practice** Order ``select`` targets in ascending complexity .. code-block:: sql select *, a, b, row_number() over (partition by id order by date) as y from x """ name = "structure.column_order" aliases = ("L034",) groups = ("all", "structure") crawl_behaviour = SegmentSeekerCrawler({"select_clause"}) is_fix_compatible = True def _validate(self, i: int, segment: BaseSegment) -> None: # Check if we've seen a more complex select target element already if self.seen_band_elements[i + 1 : :] != [[]] * len( self.seen_band_elements[i + 1 : :] ): # Found a violation (i.e. a simpler element that *follows* a more # complex element. self.violation_exists = True self.current_element_band: Optional[int] = i self.seen_band_elements[i].append(segment) def _eval(self, context: RuleContext) -> EvalResultType: self.violation_exists = False # Bands of select targets in order to be enforced select_element_order_preference: tuple[ tuple[Union[str, tuple[str, ...]], ...], ... ] = ( ("wildcard_expression",), ( "object_reference", "literal", "cast_expression", ("function", "cast"), ("expression", "cast_expression"), ), ) # Track which bands have been seen, with additional empty list for the # non-matching elements. If we find a matching target element, we append the # element to the corresponding index. self.seen_band_elements: list[list[BaseSegment]] = [ [] for _ in select_element_order_preference ] + [ [] ] # type: ignore assert context.segment.is_type("select_clause") # insert, merge, create table, union are order-sensitive for seg in reversed(context.parent_stack): if seg.is_type( "insert_statement", "set_expression", "create_table_statement", "merge_statement", ): return None # CTE is order-sensitive only if CTE is referenced as SELECT * in set expression for seg in reversed(context.parent_stack): if seg.is_type("common_table_expression"): cte_identifier = seg.get_child("identifier") assert cte_identifier is not None maybe_with_compound_statement = seg.get_parent() if maybe_with_compound_statement is None: break # pragma: no cover with_compound_statement, _ = maybe_with_compound_statement for ref in with_compound_statement.recursive_crawl("table_reference"): if ref.raw_upper == cte_identifier.raw_upper: path = with_compound_statement.path_to(ref) if any( path_step.segment.is_type("set_expression") for path_step in path ): select_statements = [ path_step.segment for path_step in path if path_step.segment.is_type( "select_statement", "unordered_select_statement_segment", ) ] if any( "wildcard_expression" in select_statement.descendant_type_set for select_statement in select_statements ): return None select_clause_segment = context.segment select_target_elements = context.segment.get_children("select_clause_element") if not select_target_elements: return None # Iterate through all the select targets to find any order violations for segment in select_target_elements: # The band index of the current segment in # select_element_order_preference self.current_element_band = None # Compare the segment to the bands in select_element_order_preference for i, band in enumerate(select_element_order_preference): for e in band: # Identify simple select target if isinstance(e, str) and segment.get_child(e): self._validate(i, segment) # Identify function elif isinstance(e, tuple) and e[0] == "function": try: _function = segment.get_child("function") assert _function _function_name = _function.get_child("function_name") assert _function_name if _function_name.raw == e[1]: self._validate(i, segment) except (AttributeError, AssertionError): # If the segment doesn't match pass # Identify simple expression elif isinstance(e, tuple) and e[0] == "expression": try: _expression = segment.get_child("expression") assert _expression if ( _expression.get_child(e[1]) and _expression.segments[0].type in ( "column_reference", "object_reference", "literal", "cast_expression", ) # len == 2 to ensure the expression is 'simple' and ( len(_expression.segments) == 2 # cast_expression is one length or len(_expression.segments) == 1 ) ): self._validate(i, segment) except (AttributeError, AssertionError): # If the segment doesn't match pass # If the target doesn't exist in select_element_order_preference then it # is 'complex' and must go last if self.current_element_band is None: self.seen_band_elements[-1].append(segment) if self.violation_exists: if len(context.parent_stack) and any( self._implicit_column_references(context.parent_stack[-1]) ): # If there are implicit column references (i.e. column # numbers), warn but don't fix, because it's much more # complicated to autofix. return LintResult(anchor=select_clause_segment) # Create a list of all the edit fixes # We have to do this at the end of iterating through all the # select_target_elements to get the order correct. This means we can't # add a lint fix to each individual LintResult as we go ordered_select_target_elements = [ segment for band in self.seen_band_elements for segment in band ] # TODO: The "if" in the loop below compares corresponding items # to avoid creating "do-nothing" edits. A potentially better # approach would leverage difflib.SequenceMatcher.get_opcodes(), # which generates a list of edit actions (similar to the # command-line "diff" tool in Linux). This is more complex to # implement, but minimizing the number of LintFixes makes the # final application of patches (in "sqlfluff fix") more robust. fixes = [ LintFix.replace( initial_select_target_element, [replace_select_target_element], ) for initial_select_target_element, replace_select_target_element in zip( # noqa: E501 select_target_elements, ordered_select_target_elements ) if initial_select_target_element is not replace_select_target_element ] # Anchoring on the select statement segment ensures that # select statements which include macro targets are ignored # when ignore_templated_areas is set return LintResult(anchor=select_clause_segment, fixes=fixes) return None @classmethod def _implicit_column_references(cls, segment: BaseSegment) -> Iterator[BaseSegment]: """Yield any implicit ORDER BY or GROUP BY column references. This function was adapted from similar code in AM06. """ _ignore_types: list[str] = ["withingroup_clause", "window_specification"] if not segment.is_type(*_ignore_types): # Ignore Windowing clauses if segment.is_type("groupby_clause", "orderby_clause"): for seg in segment.segments: if seg.is_type("numeric_literal"): yield segment else: for seg in segment.segments: yield from cls._implicit_column_references(seg) sqlfluff-3.4.2/src/sqlfluff/rules/structure/ST07.py000066400000000000000000000155151503426445100222270ustar00rootroot00000000000000"""Implementation of Rule ST07.""" from typing import Optional from sqlfluff.core.parser import ( BaseSegment, IdentifierSegment, KeywordSegment, SymbolSegment, WhitespaceSegment, ) from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.dialects.dialect_ansi import ColumnReferenceSegment from sqlfluff.utils.analysis.select import get_select_statement_info from sqlfluff.utils.functional import FunctionalContext, Segments, sp class Rule_ST07(BaseRule): """Prefer specifying join keys instead of using ``USING``. .. note:: This rule was originally taken from the `dbt Style Guide `_ which notes that: Certain warehouses have inconsistencies in ``USING`` results (specifically Snowflake). In fact `dbt removed it from their style guide in February 2022 `_. However, some like the rule, so for now we will keep it in SQLFluff, but encourage those that do not find value in the rule, to turn it off. .. note:: This rule is disabled for ClickHouse as it supports ``USING`` without brackets which this rule does not support. **Anti-pattern** .. code-block:: sql SELECT table_a.field_1, table_b.field_2 FROM table_a INNER JOIN table_b USING (id) **Best practice** Specify the keys directly .. code-block:: sql SELECT table_a.field_1, table_b.field_2 FROM table_a INNER JOIN table_b ON table_a.id = table_b.id """ name = "structure.using" aliases = ("L032",) groups: tuple[str, ...] = ("all", "structure") crawl_behaviour = SegmentSeekerCrawler({"join_clause"}) is_fix_compatible = True _dialects_disabled_by_default = [ "clickhouse", ] def _eval(self, context: RuleContext) -> Optional[LintResult]: if context.dialect.name in self._dialects_disabled_by_default: return LintResult() """Look for USING in a join clause.""" segment = FunctionalContext(context).segment parent_stack = FunctionalContext(context).parent_stack # We are not concerned with non join clauses assert context.segment.is_type("join_clause") using_anchor = segment.children(sp.is_keyword("using")).first() # If there is no evidence of a USING then we exit if len(using_anchor) == 0: return None anchor = using_anchor.get() description = "Found USING statement. Expected only ON statements." # All returns from here out will be some form of linting error. # we prepare the variable here unfixable_result = LintResult( anchor=anchor, description=description, ) tables_in_join = parent_stack.last().children( sp.is_type("join_clause", "from_expression_element") ) # We can only safely fix the first join clause if segment.get(0) != tables_in_join.get(1): return unfixable_result parent_select = parent_stack.last(sp.is_type("select_statement")).get() if not parent_select: # pragma: no cover return unfixable_result select_info = get_select_statement_info(parent_select, context.dialect) table_aliases = [ ta for ta in (select_info.table_aliases if select_info else []) if ta.ref_str ] if len(table_aliases) < 2: return unfixable_result to_delete, insert_after_anchor = _extract_deletion_sequence_and_anchor(segment) table_a, table_b = table_aliases[:2] edit_segments = [ KeywordSegment(raw="ON"), WhitespaceSegment(raw=" "), ] + _generate_join_conditions( table_a.ref_str, table_b.ref_str, _extract_cols_from_using(segment, using_anchor), ) assert table_a.segment assert table_b.segment fixes = [ LintFix.create_before( anchor_segment=insert_after_anchor, source=[table_a.segment, table_b.segment], edit_segments=edit_segments, ), *[LintFix.delete(seg) for seg in to_delete], ] return LintResult( anchor=anchor, description=description, fixes=fixes, ) def _extract_cols_from_using(join_clause: Segments, using_segs: Segments) -> list[str]: # First bracket after the USING keyword, then find ids using_cols: list[str] = ( join_clause.children() .select(start_seg=using_segs[0], select_if=sp.is_type("bracketed")) .first() .children(sp.is_type("identifier")) .apply(lambda el: el.raw) ) return using_cols def _generate_join_conditions( table_a_ref: str, table_b_ref: str, columns: list[str] ) -> list[BaseSegment]: edit_segments: list[BaseSegment] = [] for col in columns: edit_segments = edit_segments + [ _create_col_reference( table_a_ref, col, ), WhitespaceSegment(raw=" "), SymbolSegment(raw="="), WhitespaceSegment(raw=" "), _create_col_reference( table_b_ref, col, ), WhitespaceSegment(raw=" "), KeywordSegment(raw="AND"), WhitespaceSegment(raw=" "), ] # Trim the " " "AND" " " at the end return edit_segments[:-3] SequenceAndAnchorRes = tuple[list[BaseSegment], BaseSegment] def _extract_deletion_sequence_and_anchor( join_clause: Segments, ) -> SequenceAndAnchorRes: insert_anchor: Optional[BaseSegment] = None to_delete: list[BaseSegment] = [] for seg in join_clause.children(): if seg.raw_upper == "USING": # Start collecting once we hit USING to_delete.append(seg) continue if len(to_delete) == 0: # Skip if we haven't started collecting continue if to_delete[-1].is_type("bracketed"): # terminate when we hit the brackets insert_anchor = seg break to_delete.append(seg) assert insert_anchor, "Insert Anchor must be present at this point" return to_delete, insert_anchor def _create_col_reference(table_ref: str, column_name: str) -> ColumnReferenceSegment: segments = ( IdentifierSegment(raw=table_ref, type="naked_identifier"), SymbolSegment(raw=".", type="symbol"), IdentifierSegment(raw=column_name, type="naked_identifier"), ) return ColumnReferenceSegment(segments=segments, pos_marker=None) sqlfluff-3.4.2/src/sqlfluff/rules/structure/ST08.py000066400000000000000000000123401503426445100222210ustar00rootroot00000000000000"""Implementation of Rule ST08.""" from typing import Optional from sqlfluff.core.parser import BaseSegment, KeywordSegment, WhitespaceSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.functional import FunctionalContext, Segments, sp from sqlfluff.utils.reflow.sequence import ReflowSequence class Rule_ST08(BaseRule): """``DISTINCT`` used with parentheses. **Anti-pattern** In this example, parentheses are not needed and confuse ``DISTINCT`` with a function. The parentheses can also be misleading about which columns are affected by the ``DISTINCT`` (all the columns!). .. code-block:: sql SELECT DISTINCT(a), b FROM foo **Best practice** Remove parentheses to be clear that the ``DISTINCT`` applies to both columns. .. code-block:: sql SELECT DISTINCT a, b FROM foo """ name = "structure.distinct" aliases = ("L015",) groups = ("all", "structure", "core") crawl_behaviour = SegmentSeekerCrawler({"select_clause", "function"}) is_fix_compatible = True def _eval(self, context: RuleContext) -> Optional[LintResult]: """Looking for DISTINCT before a bracket. Look for DISTINCT keyword immediately followed by open parenthesis. """ seq = None anchor = None children = FunctionalContext(context).segment.children() if context.segment.is_type("select_clause"): # Look for `select_clause_modifier` modifier = children.select(sp.is_type("select_clause_modifier")) first_element = children.select(sp.is_type("select_clause_element")).first() expression = ( first_element.children(sp.is_type("expression")).first() or first_element ) bracketed = expression.children(sp.is_type("bracketed")).first() # is the first element only an expression with only brackets? if modifier and bracketed: # If there's nothing else in the expression, remove the brackets. if len(expression[0].segments) == 1: anchor, seq = self._remove_unneeded_brackets(context, bracketed) # Otherwise, still make sure there's a space after the DISTINCT. else: anchor = modifier[0] seq = ReflowSequence.from_around_target( modifier[0], context.parent_stack[0], config=context.config, sides="after", ) elif context.segment.is_type("function"): # Look for a function call DISTINCT() whose parent is an expression # with a single child. anchor = context.parent_stack[-1] if not anchor.is_type("expression") or len(anchor.segments) != 1: return None function_name = children.select(sp.is_type("function_name")).first() bracketed = children.first(sp.is_type("function_contents")) if ( not function_name or function_name[0].raw_upper != "DISTINCT" or not bracketed # If the DISTINCT has a subquery, don't remove the brackets or bracketed.recursive_crawl("select_statement", recurse_into=False) ): return None # Using ReflowSequence here creates an unneeded space between CONCAT # and "(" in the test case test_fail_distinct_concat_inside_count: # SELECT COUNT(DISTINCT(CONCAT(col1, '-', col2, '-', col3))) # # seq = ReflowSequence.from_around_target( # anchor, # context.parent_stack[0], # config=context.config, # ).replace( # anchor, # (KeywordSegment("DISTINCT"), WhitespaceSegment()) # + self.filter_meta(bracketed[0].segments)[1:-1], # ) # Do this until we have a fix for the above. return LintResult( anchor=anchor, fixes=[ LintFix.replace( anchor, (KeywordSegment("DISTINCT"), WhitespaceSegment()) + self.filter_meta(bracketed.children()[0].segments)[1:-1], ) ], ) if seq and anchor: # Get modifications. fixes = seq.respace().get_fixes() if fixes: return LintResult( anchor=anchor, fixes=fixes, ) return None def _remove_unneeded_brackets( self, context: RuleContext, bracketed: Segments ) -> tuple[BaseSegment, ReflowSequence]: # Remove the brackets and strip any meta segments. anchor = bracketed.get() assert anchor seq = ReflowSequence.from_around_target( anchor, context.parent_stack[0], config=context.config, sides="before", ).replace(anchor, self.filter_meta(anchor.segments)[1:-1]) return anchor, seq sqlfluff-3.4.2/src/sqlfluff/rules/structure/ST09.py000066400000000000000000000265611503426445100222340ustar00rootroot00000000000000"""Implementation of Rule ST09.""" from typing import Optional, cast from sqlfluff.core.parser import BaseSegment, SymbolSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.dialects.dialect_ansi import ( FromExpressionElementSegment, JoinClauseSegment, ) from sqlfluff.utils.functional import FunctionalContext, Segments class Rule_ST09(BaseRule): """Joins should list the table referenced earlier/later first. This rule will break conditions from join clauses down into subconditions using the "and" and "or" binary operators. Subconditions that are made up of a qualified column reference, a comparison operator and another qualified column reference are then evaluated to check whether they list the table that was referenced earlier - or later, depending on the ``preferred_first_table_in_join_clause`` configuration. Subconditions that do not follow that pattern are ignored by this rule. .. note:: Joins in ``WHERE`` clauses are currently not supported by this rule. **Anti-pattern** In this example, the tables that were referenced later are listed first and the ``preferred_first_table_in_join_clause`` configuration is set to ``earlier``. .. code-block:: sql select foo.a, foo.b, bar.c from foo left join bar -- This subcondition does not list -- the table referenced earlier first: on bar.a = foo.a -- Neither does this subcondition: and bar.b = foo.b **Best practice** List the tables that were referenced earlier first. .. code-block:: sql select foo.a, foo.b, bar.c from foo left join bar on foo.a = bar.a and foo.b = bar.b """ name = "structure.join_condition_order" aliases = () groups: tuple[str, ...] = ("all", "structure") config_keywords = ["preferred_first_table_in_join_clause"] crawl_behaviour = SegmentSeekerCrawler({"from_expression"}) is_fix_compatible = True def _eval(self, context: RuleContext) -> Optional[LintResult]: """Find rule violations and provide fixes. 0. Grab all table aliases into a table_aliases list. 1. Grab all conditions from the different join_on_condition segments. 2. Break conditions down into subconditions using the "and" and "or" binary operators. 3. Keep subconditions that are made up of a qualified column_reference, a comparison_operator and another qualified column_reference segments. 4. Check whether the table associated with the first column_reference segment has a greater index in table_aliases than the second column_reference segment. If so, populate the fixes list (lower index instead of greater index if preferred_first_table_in_join_clause == "later"). 5.a. If fixes is empty the rule passes. 5.b. If fixes isn't empty we return a LintResult object with fixable violations. """ self.preferred_first_table_in_join_clause: str assert context.segment.is_type("from_expression") # STEP 0. table_aliases: list[str] = [] children = FunctionalContext(context).segment.children() # we use recursive_crawl to deal with brackets join_clauses = children.recursive_crawl("join_clause") join_on_conditions = join_clauses.children().recursive_crawl( "join_on_condition" ) # we only care about join_on_condition segments if len(join_on_conditions) == 0: return None # the first alias comes from the from clause from_expression_alias: str = next( cast( FromExpressionElementSegment, children.recursive_crawl("from_expression_element")[0], ).get_eventual_alias() ).ref_str table_aliases.append(from_expression_alias) # the rest of the aliases come from the different join clauses join_clause_aliases: list[str] = [ cast(JoinClauseSegment, join_clause).get_eventual_aliases()[0][1].ref_str for join_clause in [clause for clause in join_clauses] ] table_aliases = table_aliases + join_clause_aliases table_aliases = [alias.upper() for alias in table_aliases] # STEP 1. conditions: list[list[BaseSegment]] = [] join_on_condition__expressions = join_on_conditions.children().recursive_crawl( "expression" ) for expression in join_on_condition__expressions: expression_group = [] for element in Segments(expression).children(): if element.type not in ("whitespace", "newline"): expression_group.append(element) conditions.append(expression_group) # STEP 2. subconditions: list[list[list[BaseSegment]]] = [] for expression_group in conditions: subconditions.append( self._split_list_by_segment_type( segment_list=expression_group, delimiter_type="binary_operator", delimiters=["and", "or"], ) ) subconditions_flattened: list[list[BaseSegment]] = [ item for sublist in subconditions for item in sublist ] # STEP 3. column_operator_column_subconditions: list[list[BaseSegment]] = [ subcondition for subcondition in subconditions_flattened if self._is_qualified_column_operator_qualified_column_sequence( subcondition ) ] # STEP 4. fixes: list[LintFix] = [] anchor_segment = context.segment # Default anchor for subcondition in column_operator_column_subconditions: comparison_operator = subcondition[1] first_column_reference = subcondition[0] second_column_reference = subcondition[2] raw_comparison_operators = comparison_operator.get_children( "raw_comparison_operator" ) first_table_seg = first_column_reference.get_child( "naked_identifier", "quoted_identifier" ) second_table_seg = second_column_reference.get_child( "naked_identifier", "quoted_identifier" ) assert first_table_seg and second_table_seg first_table = first_table_seg.raw_upper second_table = second_table_seg.raw_upper # if we swap the two column references around the comparison operator # we might have to replace the comparison operator with a different one raw_comparison_operator_opposites = {"<": ">", ">": "<"} # there seem to be edge cases where either the first table or the second # table is not in table_aliases, in which case we cannot provide any fix if first_table not in table_aliases or second_table not in table_aliases: continue if ( table_aliases.index(first_table) > table_aliases.index(second_table) and self.preferred_first_table_in_join_clause == "earlier" ) or ( table_aliases.index(first_table) < table_aliases.index(second_table) and self.preferred_first_table_in_join_clause == "later" ): # Use the first column reference as anchor if it has a literal # position marker. This ensures the violation is anchored to # a literal segment which won't be filtered out in templated # code. if ( not fixes and first_column_reference.pos_marker and first_column_reference.pos_marker.is_literal() ): anchor_segment = first_column_reference fixes = ( fixes + [ LintFix.replace( first_column_reference, [second_column_reference], ) ] + [ LintFix.replace( second_column_reference, [first_column_reference], ) ] + ( [ LintFix.replace( raw_comparison_operators[0], [ SymbolSegment( raw=raw_comparison_operator_opposites[ raw_comparison_operators[0].raw ], type="raw_comparison_operator", ) ], ) ] if raw_comparison_operators and raw_comparison_operators[0].raw in raw_comparison_operator_opposites and [r.raw for r in raw_comparison_operators] != ["<", ">"] else [] ) ) # STEP 5.a. if not fixes: return None # STEP 5.b. else: return LintResult( anchor=anchor_segment, fixes=fixes, description=( "Joins should list the table referenced " f"{self.preferred_first_table_in_join_clause} first." ), ) @staticmethod def _split_list_by_segment_type( segment_list: list[BaseSegment], delimiter_type: str, delimiters: list[str] ) -> list: # Break down a list into multiple sub-lists using a set of delimiters delimiters = [delimiter.upper() for delimiter in delimiters] new_list = [] sub_list = [] for i in range(len(segment_list)): if i == len(segment_list) - 1: sub_list.append(segment_list[i]) new_list.append(sub_list) elif ( segment_list[i].type == delimiter_type and segment_list[i].raw_upper in delimiters ): new_list.append(sub_list) sub_list = [] else: sub_list.append(segment_list[i]) return new_list @staticmethod def _is_qualified_column_operator_qualified_column_sequence( segment_list: list[BaseSegment], ) -> bool: # Check if list is made up of a qualified column_reference segment, # a comparison_operator segment and another qualified column_reference segment if len(segment_list) != 3: return False if ( segment_list[0].type == "column_reference" and "dot" in segment_list[0].direct_descendant_type_set and segment_list[1].type == "comparison_operator" and segment_list[2].type == "column_reference" and "dot" in segment_list[2].direct_descendant_type_set ): return True return False sqlfluff-3.4.2/src/sqlfluff/rules/structure/ST10.py000066400000000000000000000107241503426445100222160ustar00rootroot00000000000000"""Implementation of Rule ST10.""" from collections.abc import Iterator from sqlfluff.core.rules import BaseRule, EvalResultType, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler class Rule_ST10(BaseRule): """Redundant constant expression. Including an expression that always evaluates to either ``TRUE`` or ``FALSE`` regardless of the input columns is unnecessary and makes statements harder to read and understand. Constant conditions are sometimes mistakes (by mistyping the column name intended), and sometimes the result of incorrect information that they are necessary in some circumstances. In the former case, they can sometimes result in a cartesian join if it was supposed to be a join condition. Given the ambiguity of intent, this rule does not suggest an automatic fix, and instead invites the user to resolve the problem manually. **Anti-pattern** .. code-block:: sql SELECT * FROM my_table -- This following WHERE clause is redundant. WHERE my_table.col = my_table.col **Best practice** .. code-block:: sql SELECT * FROM my_table -- Replace with a condition that includes meaningful logic, -- or remove the condition entirely. WHERE my_table.col > 3 """ name = "structure.constant_expression" aliases = () groups: tuple[str, ...] = ("all", "structure") config_keywords = [] crawl_behaviour = SegmentSeekerCrawler({"expression"}) is_fix_compatible = False def _eval(self, context: RuleContext) -> EvalResultType: return [lint_result for lint_result in self._eval_gen(context)] def _eval_gen(self, context: RuleContext) -> Iterator[LintResult]: assert context.segment.is_type("expression") subsegments = context.segment.segments count_subsegments = len(subsegments) # The following literal expressions are allowable because they're # often included in auto-generated code. # NOTE: In future this could become a configuration option. allowable_literal_expressions = {"1 = 1", "1 = 0"} for idx, seg in enumerate(context.segment.segments): if seg.is_type("comparison_operator"): if seg.raw not in ("=", "!=", "<>"): continue has_other_operators_on_lhs = any( subsegments[i] for i in range(idx - 1, -1, -1) if subsegments[i].is_type("comparison_operator", "binary_operator") ) has_other_operators_on_rhs = any( subsegments[i] for i in range(idx - 1, -1, -1) if subsegments[i].is_type("comparison_operator", "binary_operator") ) if has_other_operators_on_lhs or has_other_operators_on_rhs: # Figuring our precedence of different operators is outside of scope continue lhs = next( ( subsegments[i] for i in range(idx - 1, -1, -1) if not subsegments[i].is_whitespace ), None, ) rhs = next( ( subsegments[i] for i in range(idx + 1, count_subsegments, 1) if not subsegments[i].is_whitespace ), None, ) if not lhs or not rhs: # Should be unreachable with correctly parsed tree continue # pragma: no cover if lhs.is_templated or rhs.is_templated: continue # literals need explicit handling (due to well-defined allow-list) if lhs.is_type("literal") and rhs.is_type("literal"): expr_s = f"{lhs.raw_normalized()} {seg.raw} {rhs.raw_normalized()}" if expr_s in allowable_literal_expressions: # ignore based on allowlist continue else: if lhs.type != rhs.type: continue if lhs.raw_normalized() != rhs.raw_normalized(): continue # attach violation to eq/ne operator in expression yield LintResult(seg) sqlfluff-3.4.2/src/sqlfluff/rules/structure/ST11.py000066400000000000000000000272271503426445100222250ustar00rootroot00000000000000"""Implementation of Rule ST11.""" from collections.abc import Iterator from typing import cast from sqlfluff.core.parser.segments import BaseSegment from sqlfluff.core.rules import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.dialects.dialect_ansi import ObjectReferenceSegment from sqlfluff.utils.analysis.query import Query class UnqualifiedReferenceError(ValueError): """Custom exception for signalling when a reference is unqualified.""" class Rule_ST11(BaseRule): """Joined table not referenced in query. This rule will check if there are any tables that are referenced in the ``FROM`` or ``JOIN`` clause of a ``SELECT`` statement, but where no columns from that table are referenced in the any of the other clauses. Because some types of join are often used as filters, or to otherwise control granularity without being referenced (e.g. ``INNER`` and ``CROSS``), this rule only applies to explicit ``OUTER`` joins (i.e. ``LEFT``, ``RIGHT`` and ``FULL`` joins). This rule relies on all of the column references in the ``SELECT`` statement being qualified with at least the table name, and so is designed to work alongside :sqlfluff:ref:`references.qualification` (:sqlfluff:ref:`RF02`). This is because without the knowledge of what columns exist in each upstream table, the rule is unable to resolve which table an unqualified column reference is pulled from. This rule does not propose a fix, because it assumes that it an unused table is a mistake, but doesn't know whether the mistake was the join, or the mistake was not using it. **Anti-pattern** In this example, the table ``bar`` is included in the ``JOIN`` clause but not columns from it are referenced in .. code-block:: sql SELECT foo.a, foo.b FROM foo LEFT JOIN bar ON foo.a = bar.a **Best practice** Remove the join, or use the table. .. code-block:: sql SELECT foo.a, vee.b FROM foo; SELECT foo.a, foo.b, bar.c FROM foo LEFT JOIN bar ON foo.a = bar.a In the (*very rare*) situations that it is logically necessary to include a table in a join clause, but not otherwise refer to it (likely for granularity reasons, or as a stepping stone to another table), we recommend ignoring this rule for that specific line by using ``-- noqa: ST11`` at the end of the line. .. note: To avoid sticky situations with casing and quoting in different dialects this rule uses case-insensitive comparison. That means if you have two tables with the same name, but different cases (and you're really sure that's a good idea!), then this rule may not detect if one of them is unused. """ name = "structure.unused_join" aliases = () groups: tuple[str, ...] = ("all", "structure") crawl_behaviour = SegmentSeekerCrawler({"select_statement"}) is_fix_compatible = False def _extract_references_from_expression(self, segment: BaseSegment) -> str: assert segment.is_type("from_expression_element") # If there's an alias, we care more about that. alias_expression = segment.get_child("alias_expression") if alias_expression: alias_identifier = alias_expression.get_child("identifier") if alias_identifier: # Append the raw representation and the from expression. return alias_identifier.raw_normalized(casefold=False).upper() # Otherwise if no alias, we need the name of the object we're # referencing. for table_reference in segment.recursive_crawl( "table_reference", no_recursive_seg_type="select_statement" ): return table_reference.segments[-1].raw_normalized(casefold=False).upper() # If we can't find a reference, just return an empty string # to signal that there isn't one. This could be a case of a # VALUES clause, or anything else selectable which hasn't # been given an alias. return "" def _extract_referenced_tables( self, segment: BaseSegment, allow_unqualified: bool = False ) -> Iterator[str]: # NOTE: Here we _may_ recurse into subqueries to find references. for ref in segment.recursive_crawl("column_reference"): obj_ref = cast(ObjectReferenceSegment, ref) parts = list(obj_ref.iter_raw_references()) if len(parts) < 2: if allow_unqualified: continue else: raise UnqualifiedReferenceError(ref.raw) # Remove any quoting characters when returning. yield parts[-2].part.upper().strip("\"'`[]") def _extract_references_from_select( self, segment: BaseSegment ) -> list[tuple[str, BaseSegment]]: assert segment.is_type("select_statement") # Tables which exist in the query joined_tables = [] # Tables which are referred to elsewhere. # NOTE: We populate this here if a table is referred to in the # join clause for a *different* table. referenced_tables = [] # Extract the information from any FROM clauses. from_clause = segment.get_child("from_clause") if not from_clause: # No from, no joins, no worries return [] for from_expression in from_clause.get_children("from_expression"): # Handle the main FROM expression. for from_expression_elem in from_expression.get_children( "from_expression_element" ): ref = self._extract_references_from_expression(from_expression_elem) if ref: joined_tables.append((ref, from_expression_elem)) if len(joined_tables) > 1: # We had an implicit cross join, don't add any FROM tables to check. joined_tables.clear() break # Then handle any JOIN clauses. for join_clause in from_expression.get_children("join_clause"): # Extract the join keywords used so we can exclude any which are # configured. For example, INNER joins are often used as filters # without being referenced. join_keywords = { keyword.raw_upper for keyword in join_clause.get_children("keyword") } _this_clause_refs = [] for from_expression_elem in join_clause.get_children( "from_expression_element" ): ref = self._extract_references_from_expression(from_expression_elem) # Only mark it as a possible issue if it's an explicit LEFT, RIGHT # or FULL join. if ref and join_keywords.intersection({"FULL", "LEFT", "RIGHT"}): joined_tables.append((ref, from_expression_elem)) _this_clause_refs.append(ref) # If we have functions in the table_expression, we referenced them, # add them to the list. for tbl_ref in self._extract_referenced_tables( from_expression_elem, allow_unqualified=True ): if tbl_ref not in _this_clause_refs: referenced_tables.append(tbl_ref) # Look for any references in the ON clause to other tables. for join_on_condition in join_clause.get_children("join_on_condition"): # We can tolerate some unqualified references here, so no need # to raise exceptions. for tbl_ref in self._extract_referenced_tables( join_on_condition, allow_unqualified=True ): if tbl_ref not in _this_clause_refs: referenced_tables.append(tbl_ref) # NOTE: For the following debug message, it's important to note that if tables # are brought in with join type which isn't covered - (e.g. an INNER JOIN), then # they won't be shown as "in scope". self.logger.debug( f"Processed SELECT statement.\nJoined tables in scope: {joined_tables}\n" f"...of which referenced in non-self join clauses: {referenced_tables}" ) # If there's only a single table in this SELECT, we don't return # *ANY*. That's to shortcut this rule to not consider single table # selects. if len(joined_tables) <= 1: return [] # If a table is referenced elsewhere in the join, we shouldn't consider # it as a potential issue later. So purge them from the list now. return [ (ref, seg) for (ref, seg) in joined_tables if ref not in referenced_tables ] def _eval(self, context: RuleContext) -> list[LintResult]: """Implement the logic to detect unused tables in joins. First we fetch all the tables brought *into* the query via either FROM or JOIN clauses. We then search for all the tables referenced in all the other clauses and look for mismatches. NOTE: If *any* references aren't appropriately qualified, this rule will abort (because it won't know how to resolve the ambiguous references). That means it relies on RF02 having been already applied. """ reference_clause_types = [ "select_clause", "where_clause", "groupby_clause", "orderby_clause", "having_clause", "qualify_clause", ] joined_tables = self._extract_references_from_select(context.segment) if not joined_tables: # No from, no joins, no worries self.logger.debug("No tables found in scope.") return [] # We should now have a list of joined tables (or aliases) which # aren't otherwise referred to in the FROM clause. Now we work # through all the other clauses. table_references = set() for other_clause in context.segment.get_children(*reference_clause_types): try: for tbl_ref in self._extract_referenced_tables( other_clause, allow_unqualified=False ): self.logger.debug(f" {tbl_ref!r} referenced in {other_clause}") table_references.add(tbl_ref) except UnqualifiedReferenceError as err: self.logger.debug( f"Found an unqualified ref '{err}'. Aborting for this SELECT." ) return [] query: Query = Query.from_segment(context.segment, context.dialect) for selectable in query.selectables: for wcinfo in selectable.get_wildcard_info(): table_references |= {t.upper() for t in wcinfo.tables} results: list[LintResult] = [] self.logger.debug( f"Select statement {context.segment} references " f"tables: {table_references}.\n" f"Joined tables to asses: {joined_tables}" ) for tbl_ref, segment in joined_tables: if tbl_ref not in table_references: results.append( LintResult( anchor=segment, description=( f"Joined table '{segment.raw}' not referenced " "elsewhere in query" ), ) ) return results sqlfluff-3.4.2/src/sqlfluff/rules/structure/__init__.py000066400000000000000000000033141503426445100232630ustar00rootroot00000000000000"""The structure plugin bundle.""" from sqlfluff.core.plugin import hookimpl from sqlfluff.core.rules import BaseRule, ConfigInfo @hookimpl def get_configs_info() -> dict[str, ConfigInfo]: """Get additional rule config validations and descriptions.""" return { "forbid_subquery_in": { "validation": ["join", "from", "both"], "definition": "Which clauses should be linted for subqueries?", }, "preferred_first_table_in_join_clause": { "validation": ["earlier", "later"], "definition": ( "Which table to list first when joining two tables. " "Defaults to ``earlier``." ), }, } @hookimpl def get_rules() -> list[type[BaseRule]]: """Get plugin rules. NOTE: Rules are imported only on fetch to manage import times when rules aren't used. """ from sqlfluff.rules.structure.ST01 import Rule_ST01 from sqlfluff.rules.structure.ST02 import Rule_ST02 from sqlfluff.rules.structure.ST03 import Rule_ST03 from sqlfluff.rules.structure.ST04 import Rule_ST04 from sqlfluff.rules.structure.ST05 import Rule_ST05 from sqlfluff.rules.structure.ST06 import Rule_ST06 from sqlfluff.rules.structure.ST07 import Rule_ST07 from sqlfluff.rules.structure.ST08 import Rule_ST08 from sqlfluff.rules.structure.ST09 import Rule_ST09 from sqlfluff.rules.structure.ST10 import Rule_ST10 from sqlfluff.rules.structure.ST11 import Rule_ST11 return [ Rule_ST01, Rule_ST02, Rule_ST03, Rule_ST04, Rule_ST05, Rule_ST06, Rule_ST07, Rule_ST08, Rule_ST09, Rule_ST10, Rule_ST11, ] sqlfluff-3.4.2/src/sqlfluff/rules/tsql/000077500000000000000000000000001503426445100200745ustar00rootroot00000000000000sqlfluff-3.4.2/src/sqlfluff/rules/tsql/TQ01.py000066400000000000000000000047551503426445100211460ustar00rootroot00000000000000"""Implementation of Rule TQ01.""" from typing import Optional from sqlfluff.core.rules import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler class Rule_TQ01(BaseRule): r"""``SP_`` prefix should not be used for user-defined stored procedures in T-SQL. **Anti-pattern** The ``SP_`` prefix is used to identify system procedures and can adversely affect performance of the user-defined stored procedure. It can also break system procedures if there is a naming conflict. .. code-block:: sql :force: CREATE PROCEDURE dbo.sp_pull_data AS SELECT ID, DataDate, CaseOutput FROM table1 **Best practice** Use a different name for the stored procedure. .. code-block:: sql :force: CREATE PROCEDURE dbo.pull_data AS SELECT ID, DataDate, CaseOutput FROM table1 -- Alternatively prefix with USP_ to -- indicate a user-defined stored procedure. CREATE PROCEDURE dbo.usp_pull_data AS SELECT ID, DataDate, CaseOutput FROM table1 """ name = "tsql.sp_prefix" aliases = ("L056",) groups = ("all", "tsql") crawl_behaviour = SegmentSeekerCrawler({"create_procedure_statement"}) def _eval(self, context: RuleContext) -> Optional[LintResult]: r"""``SP_`` prefix should not be used for user-defined stored procedures.""" # Rule only applies to T-SQL syntax. if context.dialect.name != "tsql": return None # pragma: no cover # We are only interested in CREATE PROCEDURE statements. assert context.segment.is_type("create_procedure_statement") # Find the object reference for the stored procedure. object_reference_segment = next( s for s in context.segment.segments if s.type == "object_reference" ) # We only want to check the stored procedure name. procedure_segment = object_reference_segment.segments[-1] # If stored procedure name starts with 'SP\_' then raise lint error. if procedure_segment.raw_upper.lstrip('["').startswith("SP_"): "s".lstrip return LintResult( procedure_segment, description="'SP_' prefix should not be used for user-defined stored " "procedures.", ) return None sqlfluff-3.4.2/src/sqlfluff/rules/tsql/__init__.py000066400000000000000000000012161503426445100222050ustar00rootroot00000000000000"""The tsql rules plugin bundle. This plugin bundles linting rules which apply exclusively to TSQL. At some point in the future it might be useful to spin this off into a separate installable python package, but so long as the number of rules remain low, it makes sense to keep it bundled with SQLFluff core. """ from sqlfluff.core.plugin import hookimpl from sqlfluff.core.rules import BaseRule @hookimpl def get_rules() -> list[type[BaseRule]]: """Get plugin rules. NOTE: Rules are imported only on fetch to manage import times when rules aren't used. """ from sqlfluff.rules.tsql.TQ01 import Rule_TQ01 return [Rule_TQ01] sqlfluff-3.4.2/src/sqlfluff/utils/000077500000000000000000000000001503426445100171175ustar00rootroot00000000000000sqlfluff-3.4.2/src/sqlfluff/utils/__init__.py000066400000000000000000000000731503426445100212300ustar00rootroot00000000000000"""Utilities which are usable by the cli, api or rules.""" sqlfluff-3.4.2/src/sqlfluff/utils/analysis/000077500000000000000000000000001503426445100207425ustar00rootroot00000000000000sqlfluff-3.4.2/src/sqlfluff/utils/analysis/__init__.py000066400000000000000000000001101503426445100230430ustar00rootroot00000000000000"""Code analysis tools to support development of more complex rules.""" sqlfluff-3.4.2/src/sqlfluff/utils/analysis/query.py000066400000000000000000000403341503426445100224650ustar00rootroot00000000000000"""Tools for more complex analysis of SELECT statements.""" import logging from collections.abc import Iterator from dataclasses import dataclass, field from enum import Enum from functools import cached_property from typing import Generic, NamedTuple, Optional, TypeVar, Union, cast from sqlfluff.core.dialects.base import Dialect from sqlfluff.core.dialects.common import AliasInfo from sqlfluff.core.parser import BaseSegment from sqlfluff.dialects.dialect_ansi import ObjectReferenceSegment from sqlfluff.utils.analysis.select import ( SelectStatementColumnsAndTables, get_select_statement_info, ) from sqlfluff.utils.functional import Segments, sp analysis_logger = logging.getLogger("sqlfluff.rules.analysis") # Segment types which directly are or contain selectables. SELECTABLE_TYPES = ( "with_compound_statement", "set_expression", "select_statement", ) # Segment types which are likely to contain a subselect. SUBSELECT_TYPES = ( "merge_statement", "update_statement", "delete_statement", # NOTE: Values clauses won't have sub selects, but it's # also harmless to look, and they may appear in similar # locations. We include them here because they come through # the same code paths - although are likely to return nothing. "values_clause", ) class QueryType(Enum): """Query type: Simple is just a query; WithCompound has CTE(s).""" Simple = 1 WithCompound = 2 class WildcardInfo(NamedTuple): """Structure returned by Selectable.get_wildcard_info().""" segment: BaseSegment tables: list[str] @dataclass class Selectable: """A "SELECT" query segment.""" selectable: BaseSegment dialect: Dialect def as_str(self) -> str: """String representation for logging/testing.""" return self.selectable.raw @cached_property def select_info(self) -> Optional[SelectStatementColumnsAndTables]: """Returns SelectStatementColumnsAndTables on the SELECT.""" if self.selectable.is_type("select_statement"): return get_select_statement_info( self.selectable, self.dialect, early_exit=False ) else: # DML or values_clause # This is a bit dodgy, but a very useful abstraction. Here, we # interpret a DML or values_clause segment as if it were a SELECT. # Someday, we may need to tweak this, e.g. perhaps add a separate # QueryType for this (depending on the needs of the rules that use # it. # # For more info on the syntax and behavior of VALUES and its # similarity to a SELECT statement with literal values (no table # source), see the "Examples" section of the Postgres docs page: # (https://www.postgresql.org/docs/8.2/sql-values.html). values = Segments(self.selectable) alias_expression = values.children().first(sp.is_type("alias_expression")) name = alias_expression.children().first( sp.is_type("naked_identifier", "quoted_identifier") ) alias_info = AliasInfo( name[0].raw if name else "", name[0] if name else None, bool(name), self.selectable, alias_expression[0] if alias_expression else None, None, ) return SelectStatementColumnsAndTables( select_statement=self.selectable, table_aliases=[alias_info], standalone_aliases=[], reference_buffer=[], select_targets=[], col_aliases=[], using_cols=[], table_reference_buffer=[], ) def get_wildcard_info(self) -> list[WildcardInfo]: """Find wildcard (*) targets in the SELECT.""" buff: list[WildcardInfo] = [] # Some select-like statements don't have select_info # (e.g. test_exasol_invalid_foreign_key_from) if not self.select_info: # pragma: no cover # TODO: Review whether to remove this. # Restructure of Exasol dialect means it no longer applies. return buff for seg in self.select_info.select_targets: if seg.get_child("wildcard_expression"): if "." in seg.raw: # The wildcard specifies a target table. table = seg.raw.rsplit(".", 1)[0] buff.append(WildcardInfo(seg, [table])) else: # The wildcard is unqualified (i.e. does not specify a # table). This means to include all columns from all the # tables in the query. buff.append( WildcardInfo( seg, [ ( alias_info.ref_str if alias_info.aliased else alias_info.from_expression_element.raw ) for alias_info in self.select_info.table_aliases if alias_info.ref_str ], ) ) return buff def find_alias(self, table: str) -> Optional[AliasInfo]: """Find corresponding table_aliases entry (if any) matching "table".""" alias_info = [ t for t in (self.select_info.table_aliases if self.select_info else []) if t.aliased and t.ref_str == table ] assert len(alias_info) <= 1 return alias_info[0] if alias_info else None T = TypeVar("T", bound="Query") @dataclass class Query(Generic[T]): """A main SELECT query plus possible CTEs.""" query_type: QueryType dialect: Dialect selectables: list[Selectable] = field(default_factory=list) ctes: dict[str, T] = field(default_factory=dict) # Parent scope. This query can "see" CTEs defined by parents. parent: Optional[T] = field(default=None) # subqueries are subselects in either the SELECT or FROM clause. subqueries: list[T] = field(default_factory=list) cte_definition_segment: Optional[BaseSegment] = field(default=None) cte_name_segment: Optional[BaseSegment] = field(default=None) is_subquery: Optional[bool] = None def __post_init__(self) -> None: # Once instantiated, set the `parent` attribute of any # subqueries and ctes. Some might already be set - but # we'll reset them anyway here. for subquery in self.subqueries: subquery.parent = self # We set this here to prevent a potential recursion error in RF03. subquery.is_subquery = True # NOTE: In normal operation, CTEs are typically set after # instantiation, and so for this method there aren't normally # any present. It is included here for completeness but not # covered in the test suite. # See `.from_segment()` for the way `parent` is set for CTEs. for cte in self.ctes.values(): # pragma: no cover cte.parent = self @property def children(self: T) -> list[T]: """Children could be CTEs, subselects or Others.""" return list(self.ctes.values()) + self.subqueries def as_dict(self: T) -> dict: """Dict representation for logging/testing.""" result: dict[str, Union[str, list[str], dict, list[dict]]] = {} if self.query_type != QueryType.Simple: result["query_type"] = self.query_type.name if self.selectables: result["selectables"] = [s.as_str() for s in self.selectables] if self.ctes: result["ctes"] = {k: v.as_dict() for k, v in self.ctes.items()} if self.subqueries: result["subqueries"] = [q.as_dict() for q in self.subqueries] return result def lookup_cte(self: T, name: str, pop: bool = True) -> Optional[T]: """Look up a CTE by name, in the current or any parent scope.""" cte = self.ctes.get(name.upper()) if cte: if pop: del self.ctes[name.upper()] return cte if self.parent: return self.parent.lookup_cte(name, pop) else: return None def crawl_sources( self: T, segment: BaseSegment, recurse_into: bool = True, pop: bool = False, lookup_cte: bool = True, ) -> Iterator[Union[str, T]]: """Find SELECTs, table refs, or value table function calls in segment. For each SELECT, yield a list of Query objects. As we find table references or function call strings, yield those. """ found_nested_select = False for seg in segment.recursive_crawl( "table_reference", "set_expression", "select_statement", "values_clause", recurse_into=False, allow_self=False, ): # Crawl efficiently, don't recurse here. We do that later. # What do we have? # 1. If it's a table reference, work out whether it's to a CTE # or to an external table. if seg.is_type("table_reference"): _seg = cast(ObjectReferenceSegment, seg) if not _seg.is_qualified() and lookup_cte: cte = self.lookup_cte(_seg.raw, pop=pop) if cte: # It's a CTE. yield cte # It's an external table reference. yield _seg.raw # 2. If it's some kind of more complex expression which is still # valid in this position, generate an appropriate sub-select. else: assert seg.is_type( "set_expression", "select_statement", "values_clause" ) found_nested_select = True # Generate a subquery, referencing the current query # as the parent. yield self.__class__.from_segment(seg, self.dialect, parent=self) if not found_nested_select: # If we reach here, the SELECT may be querying from a value table # function, e.g. UNNEST(). For our purposes, this is basically the # same as an external table. Return the "table" part as a string. table_expr = segment.get_child("table_expression") if table_expr: yield table_expr.raw @classmethod def _extract_subqueries( cls: type[T], selectable: Selectable, dialect: Dialect ) -> Iterator[T]: """Given a Selectable, extract subqueries.""" assert selectable.selectable.is_type( *SELECTABLE_TYPES, *SUBSELECT_TYPES, ), f"Found unexpected {selectable.selectable}" # For MERGE, UPDATE & DELETE, we should expect to find a sub select. for subselect in selectable.selectable.recursive_crawl( *SELECTABLE_TYPES, recurse_into=False, allow_self=False, ): # NOTE: We don't need to set the parent here, because it will # be set when attached to the parent later. yield cls.from_segment(subselect, dialect=dialect) @classmethod def from_root(cls: type[T], root_segment: BaseSegment, dialect: Dialect) -> T: """Given a root segment, find the first appropriate selectable and analyse.""" selectable_segment = next( # Could be a Selectable or a MERGE root_segment.recursive_crawl(*SELECTABLE_TYPES, "merge_statement"), None, ) assert selectable_segment, f"No selectable found in {root_segment.raw!r}." return cls.from_segment(selectable_segment, dialect=dialect) @classmethod def from_segment( cls: type[T], segment: BaseSegment, dialect: Dialect, parent: Optional[T] = None, ) -> T: """Recursively generate a query from an appropriate segment.""" assert segment.is_type( *SELECTABLE_TYPES, *SUBSELECT_TYPES ), f"Invalid segment for `from_segment`: {segment}" selectables = [] subqueries = [] cte_defs: list[BaseSegment] = [] query_type = QueryType.Simple if segment.is_type("select_statement", *SUBSELECT_TYPES): # It's a select. Instantiate a Query. selectables = [Selectable(segment, dialect=dialect)] elif segment.is_type("set_expression"): # It's a set expression. There may be multiple selectables. for _seg in segment.recursive_crawl("select_statement", recurse_into=False): selectables.append(Selectable(_seg, dialect=dialect)) else: # Otherwise it's a WITH statement. assert segment.is_type("with_compound_statement") query_type = QueryType.WithCompound for _seg in segment.recursive_crawl( # NOTE: We don't _specify_ set expressions here, because # all set expressions are made of selects, and we # want to look straight through to those child # expressions. "select_statement", recurse_into=False, no_recursive_seg_type="common_table_expression", ): selectables.append(Selectable(_seg, dialect=dialect)) # We also need to handle CTEs for _seg in segment.recursive_crawl( "common_table_expression", recurse_into=False, # Don't recurse into any other WITH statements. no_recursive_seg_type="with_compound_statement", ): # Just store the segments for now. cte_defs.append(_seg) # Extract subqueries from any selectables. for selectable in selectables: # NOTE: If any VALUES clauses are present, they pass through here # safely without Exception. They won't yield any subqueries. subqueries += list(cls._extract_subqueries(selectable, dialect)) # Instantiate the query outer_query = cls( query_type, dialect, selectables, parent=parent, subqueries=subqueries, ) # If we don't have any CTEs, we can stop now. if not cte_defs: return outer_query # Otherwise build up the CTE map. ctes = {} for cte in cte_defs: # NOTE: This feels a little risky to just assume the first segment # is the name, but it's the same functionality we've run with for # a while. name_seg = cte.segments[0] name = name_seg.raw_normalized(False).upper() # Get the query out of it, just stop on the first one we find. try: inner_qry = next( cte.recursive_crawl( *SELECTABLE_TYPES, "values_clause", # Very rarely, we might find things like update # clauses in here, handle them accordingly. *SUBSELECT_TYPES, ), ) # If this fails it's because we didn't find anything "selectable" # in the CTE. Flag this up, but then carry on. It's likely something # strange (w.g. a Clickhouse WITH clause setting a with). except StopIteration: # pragma: no cover # Log it as an issue, but otherwise skip this one. analysis_logger.info(f"Skipping unexpected CTE structure: {cte.raw!r}") continue qry = cls.from_segment(inner_qry, dialect=dialect, parent=outer_query) assert qry # Populate the CTE specific args. qry.cte_definition_segment = cte qry.cte_name_segment = name_seg # File it in the dictionary. ctes[name] = qry # Set the CTEs attribute on the outer. # NOTE: Because we're setting this after instantiation, it's important # that we've already set the `parent` value of the cte queries. outer_query.ctes = ctes return outer_query sqlfluff-3.4.2/src/sqlfluff/utils/analysis/select.py000066400000000000000000000246141503426445100226020ustar00rootroot00000000000000"""Basic code analysis tools for SELECT statements.""" from typing import NamedTuple, Optional, cast from sqlfluff.core.dialects.base import Dialect from sqlfluff.core.dialects.common import AliasInfo, ColumnAliasInfo from sqlfluff.core.parser.segments import BaseSegment from sqlfluff.dialects.dialect_ansi import ( FromClauseSegment, JoinClauseSegment, ObjectReferenceSegment, SelectClauseElementSegment, ) class SelectStatementColumnsAndTables(NamedTuple): """Structure returned by get_select_statement_info().""" select_statement: BaseSegment table_aliases: list[AliasInfo] standalone_aliases: list[BaseSegment] # value table function aliases reference_buffer: list[ObjectReferenceSegment] select_targets: list[SelectClauseElementSegment] col_aliases: list[ColumnAliasInfo] using_cols: list[BaseSegment] table_reference_buffer: list[ObjectReferenceSegment] def _get_object_references(segment: BaseSegment) -> list[ObjectReferenceSegment]: return list( cast(ObjectReferenceSegment, _seg) for _seg in segment.recursive_crawl( "object_reference", no_recursive_seg_type=["select_statement", "merge_statement"], ) ) def get_select_statement_info( segment: BaseSegment, dialect: Optional[Dialect], early_exit: bool = True ) -> Optional[SelectStatementColumnsAndTables]: """Analyze a select statement: targets, aliases, etc. Return info.""" assert segment.is_type("select_statement") table_aliases, standalone_aliases = get_aliases_from_select(segment, dialect) if early_exit and not table_aliases and not standalone_aliases: return None # Iterate through all the references, both in the select clause, but also # potential others. sc = segment.get_child("select_clause") # Sometimes there is no select clause (e.g. "SELECT *" is a select_clause_element) if not sc: # pragma: no cover # TODO: Review whether this clause should be removed. It might only # have existed for an old way of structuring the Exasol dialect. return None # NOTE: In this first crawl, don't crawl inside any sub-selects, that's very # important for both isolation and performance reasons. reference_buffer = _get_object_references(sc) table_reference_buffer = [] for potential_clause in ( "where_clause", "groupby_clause", "having_clause", "orderby_clause", "qualify_clause", ): clause = segment.get_child(potential_clause) if clause: reference_buffer += _get_object_references(clause) # Get all select targets. _select_clause = segment.get_child("select_clause") assert _select_clause, "Select statement found without select clause." select_targets = cast( list[SelectClauseElementSegment], _select_clause.get_children("select_clause_element"), ) # Get all column aliases. NOTE: In two steps so mypy can follow. _pre_aliases = [s.get_alias() for s in select_targets] col_aliases = [_alias for _alias in _pre_aliases if _alias is not None] # Get any columns referred to in a using clause, and extract anything # from ON clauses. using_cols = [] fc = segment.get_child("from_clause") if fc: for table_expression in fc.recursive_crawl( "table_expression", no_recursive_seg_type="select_statement" ): for seg in table_expression.iter_segments(): # table references can get tricky with what is a schema, table, # project, or column. It may be best for now to use the redshift # unnest logic for dialects that support arrays or objects/structs # in AL05. However, this solves finding other types of references # in functions such as LATERAL FLATTEN. if not seg.is_type("table_reference"): reference_buffer += _get_object_references(seg) elif cast(ObjectReferenceSegment, seg).is_qualified(): table_reference_buffer += _get_object_references(seg) for join_clause in fc.recursive_crawl( "join_clause", no_recursive_seg_type="select_statement" ): seen_using = False for seg in join_clause.iter_segments(): if seg.is_type("keyword") and seg.raw_upper == "USING": seen_using = True elif seg.is_type("join_on_condition"): for on_seg in seg.segments: if on_seg.is_type("bracketed", "expression"): # Deal with expressions reference_buffer += _get_object_references(seg) elif seen_using and seg.is_type("bracketed"): for subseg in seg.segments: if subseg.is_type("identifier"): using_cols.append(subseg) seen_using = False return SelectStatementColumnsAndTables( select_statement=segment, table_aliases=table_aliases or [], standalone_aliases=standalone_aliases or [], reference_buffer=reference_buffer, select_targets=select_targets, col_aliases=col_aliases, using_cols=using_cols, table_reference_buffer=table_reference_buffer, ) def get_aliases_from_select( segment: BaseSegment, dialect: Optional[Dialect] = None ) -> tuple[Optional[list[AliasInfo]], Optional[list[BaseSegment]]]: """Gets the aliases referred to in the FROM clause. Returns a tuple of two lists: - Table aliases - Value table function aliases """ fc = segment.get_child("from_clause") if not fc: # If there's no from clause then just abort. return None, None assert isinstance(fc, (FromClauseSegment, JoinClauseSegment)) aliases = fc.get_eventual_aliases() # We only want table aliases, so filter out aliases for value table # functions, lambda parameters and pivot columns. standalone_aliases: list[BaseSegment] = [] standalone_aliases += _get_pivot_table_aliases(segment, dialect) standalone_aliases += _get_lambda_argument_columns(segment, dialect) table_aliases = [] for table_expr, alias_info in aliases: if _has_value_table_function(table_expr, dialect): if alias_info.segment and alias_info.segment not in standalone_aliases: standalone_aliases.append(alias_info.segment) elif alias_info not in table_aliases: table_aliases.append(alias_info) return table_aliases, standalone_aliases def _has_value_table_function( table_expr: BaseSegment, dialect: Optional[Dialect] ) -> bool: if not dialect: # We need the dialect to get the value table function names. If # we don't have it, assume the clause does not have a value table # function. return False # pragma: no cover for function_name in table_expr.recursive_crawl("function_name"): # Other rules can increase whitespace in the function name, so use strip to # remove # See: https://github.com/sqlfluff/sqlfluff/issues/1304 if function_name.raw.upper().strip() in dialect.sets("value_table_functions"): return True return False def _get_pivot_table_aliases( segment: BaseSegment, dialect: Optional[Dialect] ) -> list[BaseSegment]: if not dialect: # We need the dialect to get the pivot table column names. If # we don't have it, assume the clause does not have a pivot table return [] # pragma: no cover pivot_table_aliases: list[BaseSegment] = [] for fc in segment.recursive_crawl("from_pivot_expression"): for pivot_table_alias in fc.recursive_crawl( "pivot_column_reference", "table_reference" ): if pivot_table_alias.raw not in [a.raw for a in pivot_table_aliases]: pivot_table_aliases.append(pivot_table_alias) return pivot_table_aliases # Lambda arguments, # e.g. `x` and `y` in `x -> x is not null` and `(x, y) -> x + y` # are declared in-place, and are as such standalone – i.e. they do not reference # identifiers or columns that we should expect to be declared somewhere else. # These columns are interesting to identify since they can get special # treatment in some rules. def _get_lambda_argument_columns( segment: BaseSegment, dialect: Optional[Dialect] ) -> list[BaseSegment]: if not dialect or dialect.name not in [ "athena", "sparksql", "duckdb", "trino", "databricks", "snowflake", ]: # Only athena and sparksql are known to have lambda expressions, # so all other dialects will have zero lambda columns return [] lambda_argument_columns: list[BaseSegment] = [] for potential_lambda in segment.recursive_crawl("expression", "lambda_function"): potential_arrow = potential_lambda.get_child("binary_operator", "lambda_arrow") if potential_arrow and potential_arrow.raw == "->": arrow_operator = potential_arrow # The arguments will be before the arrow operator, so we get anything # that is a column reference or a set of bracketed column references before # the arrow. There should be exactly one segment matching this, if there are # more, this doesn't cleanly match a lambda expression argument_segments = potential_lambda.select_children( stop_seg=arrow_operator, select_if=( lambda x: x.is_type("bracketed", "column_reference", "parameter") ), ) assert len(argument_segments) == 1 child_segment = argument_segments[0] if child_segment.is_type("bracketed"): start_bracket = child_segment.get_child("start_bracket") # There will be a start bracket if it's bracketed. assert start_bracket if start_bracket.raw == "(": bracketed_arguments = child_segment.get_children( "column_reference", "parameter" ) raw_arguments = [argument for argument in bracketed_arguments] lambda_argument_columns += raw_arguments elif child_segment.is_type("column_reference", "parameter"): lambda_argument_columns.append(child_segment) return lambda_argument_columns sqlfluff-3.4.2/src/sqlfluff/utils/functional/000077500000000000000000000000001503426445100212615ustar00rootroot00000000000000sqlfluff-3.4.2/src/sqlfluff/utils/functional/__init__.py000066400000000000000000000020711503426445100233720ustar00rootroot00000000000000"""Modules in this directory provide a "functional" API for rule writing. Wikipedia defines functional programming (https://en.wikipedia.org/wiki/Functional_programming) as a declarative programming paradigm where code is built by applying and composing functions. The modules in this API provide classes and predicates for working with segments and slices. The API is loosely inspired by packages such as Pandas and Numpy. These classes provide a simpler, higher-level API for writing rules, resulting in shorter, simpler, easier-to-read code. Rules can use these classes, the lower-level classes, or a mix, but it is suggested that each rule primarily use one or the other for readability. """ __all__ = ("Segments", "rsp", "sp", "tsp", "FunctionalContext") import sqlfluff.utils.functional.raw_file_slice_predicates as rsp import sqlfluff.utils.functional.segment_predicates as sp import sqlfluff.utils.functional.templated_file_slice_predicates as tsp from sqlfluff.utils.functional.context import FunctionalContext from sqlfluff.utils.functional.segments import Segments sqlfluff-3.4.2/src/sqlfluff/utils/functional/context.py000066400000000000000000000035751503426445100233310ustar00rootroot00000000000000"""Define FunctionalContext class.""" from sqlfluff.core.rules import RuleContext from sqlfluff.utils.functional.segments import Segments class FunctionalContext: """RuleContext written in a "functional" style; simplifies writing rules.""" def __init__(self, context: RuleContext): self.context = context @property def segment(self) -> "Segments": """Returns a Segments object for context.segment.""" return Segments( self.context.segment, templated_file=self.context.templated_file ) @property def parent_stack(self) -> "Segments": # pragma: no cover """Returns a Segments object for context.parent_stack.""" return Segments( *self.context.parent_stack, templated_file=self.context.templated_file ) @property def siblings_pre(self) -> "Segments": # pragma: no cover """Returns a Segments object for context.siblings_pre.""" return Segments( *self.context.siblings_pre, templated_file=self.context.templated_file ) @property def siblings_post(self) -> "Segments": # pragma: no cover """Returns a Segments object for context.siblings_post.""" return Segments( *self.context.siblings_post, templated_file=self.context.templated_file ) @property def raw_stack(self) -> "Segments": # pragma: no cover """Returns a Segments object for context.raw_stack.""" return Segments( *self.context.raw_stack, templated_file=self.context.templated_file ) @property def raw_segments(self) -> Segments: # pragma: no cover """Returns a Segments object for all the raw segments in the file.""" file_segment = self.context.parent_stack[0] return Segments( *file_segment.get_raw_segments(), templated_file=self.context.templated_file ) sqlfluff-3.4.2/src/sqlfluff/utils/functional/raw_file_slice_predicates.py000066400000000000000000000015501503426445100270060ustar00rootroot00000000000000"""Defines commonly used raw file slice predicates for rule writers. For consistency, all the predicates in this module are implemented as functions returning functions. This avoids rule writers having to remember the distinction between normal functions and functions returning functions. This is not necessarily a complete set of predicates covering all possible requirements. Rule authors can define their own predicates as needed, either as regular functions, `lambda`, etc. """ from typing import Callable from sqlfluff.core.templaters.base import RawFileSlice def is_slice_type( *slice_types: str, ) -> Callable[[RawFileSlice], bool]: """Returns a function that determines if segment is one of the types.""" def _(raw_slice: RawFileSlice) -> bool: return any(raw_slice.slice_type == slice_type for slice_type in slice_types) return _ sqlfluff-3.4.2/src/sqlfluff/utils/functional/raw_file_slices.py000066400000000000000000000043701503426445100247710ustar00rootroot00000000000000"""Surrogate class for working with RawFileSlice collections.""" from typing import Callable, Optional from sqlfluff.core.templaters.base import RawFileSlice, TemplatedFile class RawFileSlices(tuple[RawFileSlice, ...]): """Encapsulates a sequence of one or more RawFileSlice. The slices may or may not be contiguous in a file. Provides useful operations on a sequence of slices to simplify rule creation. """ def __new__( cls, *raw_slices: RawFileSlice, templated_file: Optional[TemplatedFile] = None ) -> "RawFileSlices": """Override new operator.""" return super().__new__(cls, raw_slices) def __init__(self, *_: RawFileSlice, templated_file: TemplatedFile): self.templated_file = templated_file def all(self, predicate: Optional[Callable[[RawFileSlice], bool]] = None) -> bool: """Do all the raw slices match?""" for s in self: if predicate is not None and not predicate(s): return False return True def any(self, predicate: Optional[Callable[[RawFileSlice], bool]] = None) -> bool: """Do any of the raw slices match?""" for s in self: if predicate is None or predicate(s): return True return False def select( self, select_if: Optional[Callable[[RawFileSlice], bool]] = None, loop_while: Optional[Callable[[RawFileSlice], bool]] = None, start_slice: Optional[RawFileSlice] = None, stop_slice: Optional[RawFileSlice] = None, ) -> "RawFileSlices": """Retrieve range/subset. NOTE: Iterates the slices BETWEEN start_slice and stop_slice, i.e. those slices are not included in the loop. """ start_index = self.index(start_slice) if start_slice else -1 stop_index = self.index(stop_slice) if stop_slice else len(self) buff = [] for slice_ in self[start_index + 1 : stop_index]: if loop_while is not None and not loop_while(slice_): # NOTE: This likely needs more tests. break # pragma: no cover if select_if is None or select_if(slice_): buff.append(slice_) return RawFileSlices(*buff, templated_file=self.templated_file) sqlfluff-3.4.2/src/sqlfluff/utils/functional/segment_predicates.py000066400000000000000000000132351503426445100255040ustar00rootroot00000000000000"""Defines commonly used segment predicates for rule writers. For consistency, all the predicates in this module are implemented as functions returning functions. This avoids rule writers having to remember the distinction between normal functions and functions returning functions. This is not necessarily a complete set of predicates covering all possible requirements. Rule authors can define their own predicates as needed, either as regular functions, `lambda`, etc. """ from typing import Callable, Optional from sqlfluff.core.parser import BaseSegment from sqlfluff.core.templaters.base import TemplatedFile from sqlfluff.utils.functional.raw_file_slices import RawFileSlices from sqlfluff.utils.functional.templated_file_slices import TemplatedFileSlices def raw_is(*raws: str) -> Callable[[BaseSegment], bool]: # pragma: no cover """Returns a function that determines if segment matches one of the raw inputs.""" def _(segment: BaseSegment) -> bool: return segment.raw in raws return _ def raw_upper_is(*raws: str) -> Callable[[BaseSegment], bool]: """Returns a function that determines if segment matches one of the raw inputs.""" def _(segment: BaseSegment) -> bool: return segment.raw_upper in raws return _ def is_type(*seg_type: str) -> Callable[[BaseSegment], bool]: """Returns a function that determines if segment is one of the types.""" def _(segment: BaseSegment) -> bool: return segment.is_type(*seg_type) return _ def is_keyword(*keyword_name: str) -> Callable[[BaseSegment], bool]: """Returns a function that determines if it's a matching keyword.""" return and_( is_type("keyword"), raw_upper_is(*[raw.upper() for raw in keyword_name]) ) def is_code() -> Callable[[BaseSegment], bool]: """Returns a function that checks if segment is code.""" def _(segment: BaseSegment) -> bool: return segment.is_code return _ def is_comment() -> Callable[[BaseSegment], bool]: """Returns a function that checks if segment is comment.""" def _(segment: BaseSegment) -> bool: return segment.is_comment return _ def is_meta() -> Callable[[BaseSegment], bool]: """Returns a function that checks if segment is meta.""" def _(segment: BaseSegment) -> bool: return segment.is_meta return _ def is_raw() -> Callable[[BaseSegment], bool]: """Returns a function that checks if segment is raw.""" def _(segment: BaseSegment) -> bool: return segment.is_raw() return _ def is_whitespace() -> Callable[[BaseSegment], bool]: """Returns a function that checks if segment is whitespace.""" def _(segment: BaseSegment) -> bool: return segment.is_whitespace return _ def is_templated() -> Callable[[BaseSegment], bool]: # pragma: no cover """Returns a function that checks if segment is templated.""" def _(segment: BaseSegment) -> bool: return segment.is_templated return _ def get_type() -> Callable[[BaseSegment], str]: """Returns a function that gets segment type.""" def _(segment: BaseSegment) -> str: return segment.get_type() return _ def and_(*functions: Callable[[BaseSegment], bool]) -> Callable[[BaseSegment], bool]: """Returns a function that computes the functions and-ed together.""" def _(segment: BaseSegment) -> bool: return all(function(segment) for function in functions) return _ def or_(*functions: Callable[[BaseSegment], bool]) -> Callable[[BaseSegment], bool]: """Returns a function that computes the functions or-ed together.""" def _(segment: BaseSegment) -> bool: return any(function(segment) for function in functions) return _ def not_(fn: Callable[[BaseSegment], bool]) -> Callable[[BaseSegment], bool]: """Returns a function that computes: not fn().""" def _(segment: BaseSegment) -> bool: return not fn(segment) return _ def raw_slices( segment: BaseSegment, templated_file: Optional[TemplatedFile], ) -> RawFileSlices: # pragma: no cover """Returns raw slices for a segment.""" if not templated_file: raise ValueError( 'raw_slices: "templated_file" parameter is required.' ) # pragma: no cover if not segment.pos_marker: raise ValueError( 'raw_slices: "segment" parameter must have pos_marker set.' ) # pragma: no cover return RawFileSlices( *templated_file.raw_slices_spanning_source_slice( segment.pos_marker.source_slice ), templated_file=templated_file, ) def templated_slices( segment: BaseSegment, templated_file: Optional[TemplatedFile], ) -> TemplatedFileSlices: """Returns raw slices for a segment.""" if not templated_file: raise ValueError( 'templated_slices: "templated_file" parameter is required.' ) # pragma: no cover if not segment.pos_marker: raise ValueError( 'templated_slices: "segment" parameter must have pos_marker set.' ) # pragma: no cover # :TRICKY: We don't use _find_slice_indices_of_templated_pos() here because # it treats TemplatedFileSlice.templated_slice.stop as inclusive, not # exclusive. Other parts of SQLFluff rely on this behaviour, but we don't # want it. It's easy enough to do this ourselves. start = segment.pos_marker.templated_slice.start stop = segment.pos_marker.templated_slice.stop templated_slices = [ slice_ for slice_ in templated_file.sliced_file if (stop > slice_.templated_slice.start and start < slice_.templated_slice.stop) ] return TemplatedFileSlices(*templated_slices, templated_file=templated_file) sqlfluff-3.4.2/src/sqlfluff/utils/functional/segments.py000066400000000000000000000202341503426445100234610ustar00rootroot00000000000000"""Surrogate class for working with Segment collections.""" from collections.abc import Iterable, Iterator from typing import ( Any, Callable, Optional, SupportsIndex, Union, overload, ) from sqlfluff.core.parser import BaseSegment from sqlfluff.core.templaters.base import TemplatedFile from sqlfluff.utils.functional.raw_file_slices import RawFileSlices PredicateType = Callable[[BaseSegment], bool] class Segments(tuple[BaseSegment, ...]): """Encapsulates a sequence of one or more BaseSegments. The segments may or may not be contiguous in a parse tree. Provides useful operations on a sequence of segments to simplify rule creation. """ def __new__( cls, *segments: BaseSegment, templated_file: Optional[TemplatedFile] = None ) -> "Segments": """Override new operator.""" return super().__new__(cls, segments) def __init__( self, *_: BaseSegment, templated_file: Optional[TemplatedFile] = None ) -> None: self.templated_file = templated_file def __add__(self, segments_) -> "Segments": return Segments( *tuple(self).__add__(tuple(segments_)), templated_file=self.templated_file ) def __radd__(self, segments_) -> "Segments": return Segments( *tuple(segments_).__add__(tuple(self)), templated_file=self.templated_file ) def find(self, segment: Optional[BaseSegment]) -> int: """Returns index if found, -1 if not found.""" try: return self.index(segment) except ValueError: return -1 def all(self, predicate: Optional[PredicateType] = None) -> bool: """Do all the segments match?""" for s in self: if predicate is not None and not predicate(s): return False return True def any(self, predicate: Optional[PredicateType] = None) -> bool: """Do any of the segments match?""" for s in self: if predicate is None or predicate(s): return True return False def reversed(self) -> "Segments": # pragma: no cover """Return the same segments in reverse order.""" return Segments(*reversed(self), templated_file=self.templated_file) @property def raw_slices(self) -> RawFileSlices: """Raw slices of the segments, sorted in source file order.""" if not self.templated_file: raise ValueError( 'Segments.raw_slices: "templated_file" property is required.' ) raw_slices = set() for s in self: if s.pos_marker is None: raise ValueError( "Segments include a positionless segment" ) # pragma: no cover source_slice = s.pos_marker.source_slice raw_slices.update( self.templated_file.raw_slices_spanning_source_slice(source_slice) ) return RawFileSlices( *sorted(raw_slices, key=lambda slice_: slice_.source_idx), templated_file=self.templated_file, ) # TODO:This method isn't used as at 2022-08-10. Consider removing in future. @property def raw_segments(self) -> "Segments": # pragma: no cover """Get raw segments underlying the segments.""" raw_segments_list = [] for s in self: raw_segments_list.extend(s.raw_segments) return Segments(*raw_segments_list, templated_file=self.templated_file) def recursive_crawl_all(self) -> "Segments": # pragma: no cover """Recursively crawl all descendant segments.""" segments: list[BaseSegment] = [] for s in self: for i in s.recursive_crawl_all(): segments.append(i) return Segments(*segments, templated_file=self.templated_file) def recursive_crawl(self, *seg_type: str, recurse_into: bool = True) -> "Segments": """Recursively crawl for segments of a given type.""" segments: list[BaseSegment] = [] for s in self: for i in s.recursive_crawl(*seg_type, recurse_into=recurse_into): segments.append(i) return Segments(*segments, templated_file=self.templated_file) def children( self, predicate: Optional[PredicateType] = None, ) -> "Segments": """Returns an object with children of the segments in this object.""" child_segments: list[BaseSegment] = [] for s in self: for child in s.segments: if predicate is None or predicate(child): child_segments.append(child) return Segments(*child_segments, templated_file=self.templated_file) def first( self, predicate: Optional[PredicateType] = None, ) -> "Segments": """Returns the first segment (if any) that satisfies the predicates.""" for s in self: if predicate is None or predicate(s): return Segments(s, templated_file=self.templated_file) # If no segment satisfies "predicates", return empty Segments. return Segments(templated_file=self.templated_file) def last( self, predicate: Optional[PredicateType] = None, ) -> "Segments": """Returns the last segment (if any) that satisfies the predicates.""" for s in reversed(self): if predicate is None or predicate(s): return Segments(s, templated_file=self.templated_file) # If no segment satisfies "predicates", return empty Segments. return Segments(templated_file=self.templated_file) def __iter__(self) -> Iterator[BaseSegment]: # pragma: no cover # Typing understand we are looping BaseSegment return super().__iter__() @overload def __getitem__(self, item: SupportsIndex) -> BaseSegment: """Individual "getting" returns a single segment. NOTE: Using `SupportsIndex` rather than `int` is to ensure type compatibility with the parent `tuple` implementation. """ @overload def __getitem__(self, item: slice) -> "Segments": """Getting a slice returns another `Segments` object.""" def __getitem__( self, item: Union[SupportsIndex, slice] ) -> Union[BaseSegment, "Segments"]: result = super().__getitem__(item) if isinstance(result, tuple): return Segments(*result, templated_file=self.templated_file) else: return result def get( self, index: int = 0, *, default: Optional[BaseSegment] = None ) -> Optional[BaseSegment]: """Return specified item. Returns default if index out of range.""" try: return self[index] except IndexError: return default def apply(self, fn: Callable[[BaseSegment], Any]) -> list[Any]: """Apply function to every item.""" return [fn(s) for s in self] def select( self, select_if: Optional[PredicateType] = None, loop_while: Optional[PredicateType] = None, start_seg: Optional[BaseSegment] = None, stop_seg: Optional[BaseSegment] = None, ) -> "Segments": """Retrieve range/subset. NOTE: Iterates the segments BETWEEN start_seg and stop_seg, i.e. those segments are not included in the loop. """ start_index = self.index(start_seg) if start_seg else -1 stop_index = self.index(stop_seg) if stop_seg else len(self) buff = [] for seg in self[start_index + 1 : stop_index]: if loop_while is not None and not loop_while(seg): break if select_if is None or select_if(seg): buff.append(seg) return Segments(*buff, templated_file=self.templated_file) def iterate_segments( self, predicate: Optional[PredicateType] = None, ) -> Iterable["Segments"]: """Loop over each element as a fresh Segments.""" # Looping over Segments returns BaseEls # which is sometime what we want and sometimes not for base_el in self: if predicate and not predicate(base_el): # pragma: no cover continue yield Segments(base_el, templated_file=self.templated_file) sqlfluff-3.4.2/src/sqlfluff/utils/functional/templated_file_slice_predicates.py000066400000000000000000000015751503426445100302030ustar00rootroot00000000000000"""Defines commonly used templated file slice predicates for rule writers. For consistency, all the predicates in this module are implemented as functions returning functions. This avoids rule writers having to remember the distinction between normal functions and functions returning functions. This is not necessarily a complete set of predicates covering all possible requirements. Rule authors can define their own predicates as needed, either as regular functions, `lambda`, etc. """ from typing import Callable from sqlfluff.core.templaters.base import TemplatedFileSlice def is_slice_type( *slice_types: str, ) -> Callable[[TemplatedFileSlice], bool]: """Returns a function that determines if segment is one the types.""" def _(raw_slice: TemplatedFileSlice) -> bool: return any(raw_slice.slice_type == slice_type for slice_type in slice_types) return _ sqlfluff-3.4.2/src/sqlfluff/utils/functional/templated_file_slices.py000066400000000000000000000046031503426445100261560ustar00rootroot00000000000000"""Surrogate class for working with TemplatedFileSlice collections.""" from typing import Callable, Optional from sqlfluff.core.templaters.base import TemplatedFile, TemplatedFileSlice class TemplatedFileSlices(tuple[TemplatedFileSlice, ...]): """Encapsulates a sequence of one or more TemplatedFileSlice. The slices may or may not be contiguous in a file. Provides useful operations on a sequence of slices to simplify rule creation. """ def __new__( cls, *templated_slices: TemplatedFileSlice, templated_file: Optional[TemplatedFile] = None, ) -> "TemplatedFileSlices": """Override new operator.""" return super().__new__(cls, templated_slices) def __init__(self, *_: TemplatedFileSlice, templated_file: TemplatedFile) -> None: self.templated_file = templated_file def all( self, predicate: Optional[Callable[[TemplatedFileSlice], bool]] = None ) -> bool: """Do all the templated slices match?""" for s in self: if predicate is not None and not predicate(s): return False return True def any( self, predicate: Optional[Callable[[TemplatedFileSlice], bool]] = None ) -> bool: # pragma: no cover """Do any of the templated slices match?""" for s in self: if predicate is None or predicate(s): return True return False def select( self, select_if: Optional[Callable[[TemplatedFileSlice], bool]] = None, loop_while: Optional[Callable[[TemplatedFileSlice], bool]] = None, start_slice: Optional[TemplatedFileSlice] = None, stop_slice: Optional[TemplatedFileSlice] = None, ) -> "TemplatedFileSlices": # pragma: no cover """Retrieve range/subset. NOTE: Iterates the slices BETWEEN start_slice and stop_slice, i.e. those slices are not included in the loop. """ start_index = self.index(start_slice) if start_slice else -1 stop_index = self.index(stop_slice) if stop_slice else len(self) buff = [] for slice_ in self[start_index + 1 : stop_index]: if loop_while is not None and not loop_while(slice_): break if select_if is None or select_if(slice_): buff.append(slice_) return TemplatedFileSlices(*buff, templated_file=self.templated_file) sqlfluff-3.4.2/src/sqlfluff/utils/identifers.py000066400000000000000000000017601503426445100216310ustar00rootroot00000000000000"""Helper utilities for identifiers. These are primarily common functions used by multiple rule bundles. Defined here to avoid duplication, but also avoid circular imports. """ from sqlfluff.core.parser import BaseSegment def identifiers_policy_applicable( policy: str, parent_stack: tuple[BaseSegment, ...] ) -> bool: """Does `(un)quoted_identifiers_policy` apply to this segment? This method is used in CP02, RF04 and RF05. """ if policy == "all": return True if policy == "none": return False is_alias = parent_stack and parent_stack[-1].is_type( "alias_expression", "column_definition", "with_compound_statement" ) if policy == "aliases" and is_alias: return True is_inside_from = any(p.is_type("from_clause") for p in parent_stack) if policy == "column_aliases" and is_alias and not is_inside_from: return True if policy == "table_aliases" and is_alias and is_inside_from: return True return False sqlfluff-3.4.2/src/sqlfluff/utils/reflow/000077500000000000000000000000001503426445100204155ustar00rootroot00000000000000sqlfluff-3.4.2/src/sqlfluff/utils/reflow/__init__.py000066400000000000000000000002051503426445100225230ustar00rootroot00000000000000"""Reflow utilities for sqlfluff rules.""" from sqlfluff.utils.reflow.sequence import ReflowSequence __all__ = ("ReflowSequence",) sqlfluff-3.4.2/src/sqlfluff/utils/reflow/config.py000066400000000000000000000174231503426445100222430ustar00rootroot00000000000000"""Methods to set up appropriate reflow config from file.""" # Until we have a proper structure this will work. # TODO: Migrate this to the config file. from dataclasses import dataclass from typing import AbstractSet, Any, Optional, Union from sqlfluff.core.config import FluffConfig from sqlfluff.core.helpers.string import split_comma_separated_string from sqlfluff.utils.reflow.depthmap import DepthInfo ConfigElementType = dict[str, str] ConfigDictType = dict[str, ConfigElementType] @dataclass() class BlockConfig: """Holds spacing config for a block and allows easy manipulation.""" spacing_before: str = "single" spacing_after: str = "single" spacing_within: Optional[str] = None line_position: Optional[str] = None keyword_line_position: Optional[str] = None keyword_line_position_exclusions: Union[str, list[str], None] = None def incorporate( self, before: Optional[str] = None, after: Optional[str] = None, within: Optional[str] = None, line_position: Optional[str] = None, config: Optional[ConfigElementType] = None, keyword_line_position: Optional[str] = None, keyword_line_position_exclusions: Union[str, list[str], None] = None, ) -> None: """Mutate the config based on additional information.""" config = config or {} self.spacing_before = ( before or config.get("spacing_before", None) or self.spacing_before ) self.spacing_after = ( after or config.get("spacing_after", None) or self.spacing_after ) self.spacing_within = ( within or config.get("spacing_within", None) or self.spacing_within ) self.line_position = ( line_position or config.get("line_position", None) or self.line_position ) self.keyword_line_position = ( keyword_line_position or config.get("keyword_line_position", None) or self.keyword_line_position ) self.keyword_line_position_exclusions = split_comma_separated_string( keyword_line_position_exclusions or config.get("keyword_line_position_exclusions", None) or self.keyword_line_position_exclusions or [] ) @dataclass(frozen=True) class ReflowConfig: """An interface onto the configuration of how segments should reflow. This acts as the primary translation engine between configuration held either in dicts for testing, or in the FluffConfig in live usage, and the configuration used during reflow operations. """ _config_dict: ConfigDictType config_types: set[str] # In production, these values are almost _always_ set because we # use `.from_fluff_config`, but the defaults are here to aid in # testing. tab_space_size: int = 4 indent_unit: str = " " max_line_length: int = 80 hanging_indents: bool = False skip_indentation_in: frozenset[str] = frozenset() allow_implicit_indents: bool = False trailing_comments: str = "before" ignore_comment_lines: bool = False @classmethod def from_dict(cls, config_dict: ConfigDictType, **kwargs: Any) -> "ReflowConfig": """Construct a ReflowConfig from a dict.""" config_types = set(config_dict.keys()) # Enrich any of the "align" keys with what they're aligning with. for seg_type in config_dict: for key in ("spacing_before", "spacing_after"): if config_dict[seg_type].get(key, None) == "align": new_key = "align:" + seg_type # Is there a limiter or boundary? # NOTE: A `boundary` is only applicable if `within` is present. if config_dict[seg_type].get("align_within", None): new_key += ":" + config_dict[seg_type]["align_within"] if config_dict[seg_type].get("align_scope", None): new_key += ":" + config_dict[seg_type]["align_scope"] config_dict[seg_type][key] = new_key return cls(_config_dict=config_dict, config_types=config_types, **kwargs) @classmethod def from_fluff_config(cls, config: FluffConfig) -> "ReflowConfig": """Constructs a ReflowConfig from a FluffConfig.""" return cls.from_dict( config.get_section(["layout", "type"]), indent_unit=config.get("indent_unit", ["indentation"]), tab_space_size=config.get("tab_space_size", ["indentation"]), hanging_indents=config.get("hanging_indents", ["indentation"]), max_line_length=config.get("max_line_length"), skip_indentation_in=frozenset( config.get("skip_indentation_in", ["indentation"]).split(",") ), allow_implicit_indents=config.get( "allow_implicit_indents", ["indentation"] ), trailing_comments=config.get("trailing_comments", ["indentation"]), ignore_comment_lines=config.get("ignore_comment_lines", ["indentation"]), ) def get_block_config( self, block_class_types: AbstractSet[str], depth_info: Optional[DepthInfo] = None, ) -> BlockConfig: """Given the class types of a ReflowBlock return spacing config. When fetching the config for a single class type for a simple block we should just get an appropriate simple config back. >>> cfg = ReflowConfig.from_dict({"comma": {"spacing_before": "touch"}}) >>> cfg.get_block_config({"comma"}) # doctest: +ELLIPSIS BlockConfig(spacing_before='touch', spacing_after='single', ...) """ # set intersection to get the class types which matter configured_types = self.config_types.intersection(block_class_types) # Start with a default config. block_config = BlockConfig() # Update with the config from any specific classes. # First: With the types of any parent segments where # we're at one end (if depth info provided). if depth_info: parent_start, parent_end = True, True for idx, key in enumerate(depth_info.stack_hashes[::-1]): # Work out if we're allowed to claim the parent. if depth_info.stack_positions[key].type not in ("solo", "start"): parent_start = False if depth_info.stack_positions[key].type not in ("solo", "end"): parent_end = False if not (parent_start or parent_end): break # Get corresponding classes. parent_classes = depth_info.stack_class_types[-1 - idx] configured_parent_types = self.config_types.intersection(parent_classes) # Claim the _before_ config if at the start. if parent_start: for seg_type in configured_parent_types: block_config.incorporate( before=self._config_dict[seg_type].get("spacing_before") ) # Claim the _after_ config if at the end. if parent_end: for seg_type in configured_parent_types: block_config.incorporate( after=self._config_dict[seg_type].get("spacing_after") ) # Second: With the types of the raw segment itself. # Unless someone is doing something complicated with their configuration # there should only be one. # TODO: Extend (or at least harden) this code to handle multiple # configured (and matched) types much better. for seg_type in configured_types: block_config.incorporate(config=self._config_dict[seg_type]) return block_config sqlfluff-3.4.2/src/sqlfluff/utils/reflow/depthmap.py000066400000000000000000000156641503426445100226050ustar00rootroot00000000000000"""The DepthMap class is an enriched sequence of raw segments.""" import logging from collections.abc import Sequence from dataclasses import dataclass from sqlfluff.core.parser import BaseSegment from sqlfluff.core.parser.segments.base import PathStep from sqlfluff.core.parser.segments.raw import RawSegment reflow_logger = logging.getLogger("sqlfluff.rules.reflow") @dataclass(frozen=True) class StackPosition: """An element of the stack_positions property of DepthInfo.""" idx: int len: int type: str @staticmethod def _stack_pos_interpreter(path_step: PathStep) -> str: """Interpret a path step for stack_positions.""" # If no code, then no. if not path_step.code_idxs: return "" # If there's only one code element, this must be it. elif len(path_step.code_idxs) == 1: return "solo" # Check for whether first or last code element. # NOTE: code_idxs is always sorted because of how it's constructed. # That means the lowest is always as the start and the highest at the end. elif path_step.idx == path_step.code_idxs[0]: return "start" elif path_step.idx == path_step.code_idxs[-1]: return "end" else: return "" # NOTE: Empty string evaluates as falsy. @classmethod def from_path_step(cls, path_step: PathStep) -> "StackPosition": """Interpret a PathStep to construct a StackPosition. The reason we don't just use the same object is partly to interpret it a little more, but also to drop the reference to a specific segment which could induce bugs at a later stage if used. """ return cls(path_step.idx, path_step.len, cls._stack_pos_interpreter(path_step)) @dataclass(frozen=True) class DepthInfo: """An object to hold the depth information for a specific raw segment.""" stack_depth: int stack_hashes: tuple[int, ...] # This is a convenience cache to speed up operations. stack_hash_set: frozenset[int] stack_class_types: tuple[frozenset[str], ...] stack_positions: dict[int, StackPosition] @classmethod def from_raw_and_stack( cls, raw: RawSegment, stack: Sequence[PathStep] ) -> "DepthInfo": """Construct from a raw and its stack.""" stack_hashes = tuple(hash(ps.segment) for ps in stack) return cls( stack_depth=len(stack), stack_hashes=stack_hashes, stack_hash_set=frozenset(stack_hashes), stack_class_types=tuple(ps.segment.class_types for ps in stack), stack_positions={ # Reuse the hash first calculated above. stack_hashes[idx]: StackPosition.from_path_step(ps) for idx, ps in enumerate(stack) }, ) def common_with(self, other: "DepthInfo") -> tuple[int, ...]: """Get the common depth and hashes with the other.""" # We use set intersection because it's faster and hashes should be unique. common_hashes = self.stack_hash_set.intersection(other.stack_hashes) # We should expect there to be _at least_ one common ancestor, because # they should share the same file segment. If that's not the case we # we should error because it's likely a bug or programming error. assert common_hashes, "DepthInfo comparison shares no common ancestor!" common_depth = len(common_hashes) return self.stack_hashes[:common_depth] def trim(self, amount: int) -> "DepthInfo": """Return a DepthInfo object with some amount trimmed.""" # Excluded from coverage: no longer triggered since AL01 rule was refactored if amount == 0: # pragma: no cover # The trivial case. return self new_hash_set = self.stack_hash_set.difference(self.stack_hashes[-amount:]) return self.__class__( stack_depth=self.stack_depth - amount, stack_hashes=self.stack_hashes[:-amount], stack_hash_set=new_hash_set, stack_class_types=self.stack_class_types[:-amount], stack_positions={ k: v for k, v in self.stack_positions.items() if k in new_hash_set }, ) class DepthMap: """A mapping of raw segments to depth and parent information. This class addresses two needs: - To understand configuration of segments with no whitespace within them - so the config is related to the parent and not the segment) - To map the depth of an indent points to apply some precedence for where to insert line breaks. The internals are structured around a list to do lookups and a dict (keyed with the raw segment UUID) to hold the rest. """ def __init__(self, raws_with_stack: Sequence[tuple[RawSegment, list[PathStep]]]): self.depth_info = {} for raw, stack in raws_with_stack: self.depth_info[raw.uuid] = DepthInfo.from_raw_and_stack(raw, stack) @classmethod def from_parent(cls: type["DepthMap"], parent: BaseSegment) -> "DepthMap": """Generate a DepthMap from all the children of a segment. NOTE: This is the most efficient way to construct a DepthMap due to caching in the BaseSegment. """ return cls(raws_with_stack=parent.raw_segments_with_ancestors) @classmethod def from_raws_and_root( cls: type["DepthMap"], raw_segments: Sequence[RawSegment], root_segment: BaseSegment, ) -> "DepthMap": """Generate a DepthMap a sequence of raws and a root. NOTE: This is the less efficient way to construct a DepthMap as it doesn't take advantage of caching in the same way as `from_parent`. """ buff = [] for raw in raw_segments: stack = root_segment.path_to(raw) buff.append((raw, stack)) return cls(raws_with_stack=buff) def get_depth_info(self, raw: RawSegment) -> DepthInfo: """Get the depth info for a given segment.""" try: return self.depth_info[raw.uuid] except KeyError as err: # pragma: no cover reflow_logger.exception("Available UUIDS: %s", self.depth_info.keys()) raise KeyError( "Tried to get depth info for unknown " f"segment {raw} with UUID {raw.uuid}" ) from err def copy_depth_info( self, anchor: RawSegment, new_segment: RawSegment, trim: int = 0 ) -> None: """Copy the depth info for one segment and apply to another. This mutates the existing depth map. That's ok because it's an idempotent operation and uuids should be unique. This is used in edits to a reflow sequence when new segments are inserted and can't infer their own depth info. NOTE: we don't remove the old one because it causes no harm. """ self.depth_info[new_segment.uuid] = self.get_depth_info(anchor).trim(trim) sqlfluff-3.4.2/src/sqlfluff/utils/reflow/elements.py000066400000000000000000001050541503426445100226100ustar00rootroot00000000000000"""Dataclasses for reflow work.""" import logging from collections.abc import Sequence from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union, cast from sqlfluff.core.helpers.slice import slice_overlaps from sqlfluff.core.parser import PositionMarker from sqlfluff.core.parser.segments import ( BaseSegment, Indent, NewlineSegment, RawSegment, SourceFix, TemplateSegment, WhitespaceSegment, ) from sqlfluff.core.rules import LintFix, LintResult from sqlfluff.utils.reflow.config import ReflowConfig from sqlfluff.utils.reflow.depthmap import DepthInfo # Respace Algorithms from sqlfluff.utils.reflow.respace import ( determine_constraints, handle_respace__inline_with_space, handle_respace__inline_without_space, process_spacing, ) # We're in the utils module, but users will expect reflow # logs to appear in the context of rules. Hence it's a subset # of the rules logger. reflow_logger = logging.getLogger("sqlfluff.rules.reflow") def get_consumed_whitespace(segment: Optional[RawSegment]) -> Optional[str]: """A helper function to extract possible consumed whitespace. Args: segment (:obj:`RawSegment`, optional): A segment to test for suitability and extract the source representation of if appropriate. If passed None, then returns None. Returns: Returns the :code:`source_str` if the segment is of type :code:`placeholder` and has a :code:`block_type` of :code:`literal`. Otherwise None. """ if not segment or not segment.is_type("placeholder"): return None placeholder = cast(TemplateSegment, segment) if placeholder.block_type != "literal": return None return placeholder.source_str @dataclass(frozen=True) class ReflowElement: """Base reflow element class.""" segments: tuple[RawSegment, ...] @staticmethod def _class_types(segments: Sequence[RawSegment]) -> set[str]: return set(chain.from_iterable(seg.class_types for seg in segments)) @property def class_types(self) -> set[str]: """Get the set of contained class types. Parallel to `BaseSegment.class_types` """ return self._class_types(self.segments) @property def raw(self) -> str: """Get the current raw representation.""" return "".join(seg.raw for seg in self.segments) @property def pos_marker(self) -> Optional[PositionMarker]: """Get the first position marker of the element.""" for seg in self.segments: if seg.pos_marker: return seg.pos_marker return None def num_newlines(self) -> int: """Return the number of newlines in this element. These newlines are either newline segments or contained within consumed sections of whitespace. This counts both. """ return sum( bool("newline" in seg.class_types) + (get_consumed_whitespace(seg) or "").count("\n") for seg in self.segments ) def is_all_unrendered(self) -> bool: """Return whether this element is all unrendered. Returns True if contains only whitespace, indents, template loops or placeholders. Note: * ReflowBlocks will contain the placeholders and loops * ReflowPoints will contain whitespace, indents and newlines. """ for seg in self.segments: if not seg.is_type( "whitespace", "placeholder", "newline", "indent", "template_loop" ): return False return True @dataclass(frozen=True) class ReflowBlock(ReflowElement): """Class for keeping track of elements to reflow. This class, and its sibling :obj:`ReflowPoint`, should not normally be manipulated directly by rules, but instead should be manipulated using :obj:`ReflowSequence`. It holds segments to reflow and also exposes configuration regarding how they are expected to reflow around others. Typically it holds only a single element, which is usually code or a templated element. Because reflow operations control spacing, it would be very unusual for this object to be modified; as such it exposes relatively few methods. The attributes exposed are designed to be "post configuration" i.e. they should reflect configuration appropriately. """ #: Desired spacing before this block. #: See :ref:`layoutspacingconfig` spacing_before: str #: Desired spacing after this block. #: See :ref:`layoutspacingconfig` spacing_after: str #: Desired line position for this block. #: See :ref:`layoutspacingconfig` line_position: Optional[str] #: Metadata on the depth of this segment within the parse tree #: which is used in inferring how and where line breaks should #: exist. depth_info: DepthInfo #: Desired spacing configurations for parent segments #: of the segment in this block. #: See :ref:`layoutspacingconfig` stack_spacing_configs: dict[int, str] #: Desired line position configurations for parent segments #: of the segment in this block. #: See :ref:`layoutspacingconfig` line_position_configs: dict[int, str] #: Desired line position for this block's keywords. #: See :ref:`layoutspacingconfig` keyword_line_position: Optional[str] #: Desired keyword line position configurations for parent segments #: of the segment in this block. #: See :ref:`layoutspacingconfig` keyword_line_position_configs: dict[int, str] #: Parent segments which this block's keyword line positioning #: should not apply. #: See :ref:`layoutspacingconfig` keyword_line_position_exclusions: Union[str, list[str], None] #: Configurations for parent segments which this block's keyword #: line positioning should not apply. #: See :ref:`layoutspacingconfig` keyword_line_position_exclusions_configs: dict[int, Union[str, list[str]]] @classmethod def from_config( cls: type["ReflowBlock"], segments: tuple[RawSegment, ...], config: ReflowConfig, depth_info: DepthInfo, ) -> "ReflowBlock": """Construct a ReflowBlock while extracting relevant configuration. This is the primary route to construct a ReflowBlock, as is allows all of the inference of the spacing and position configuration from the segments it contains and the appropriate config objects. """ block_config = config.get_block_config(cls._class_types(segments), depth_info) stack_spacing_configs = {} line_position_configs = {} keyword_line_position_configs = {} keyword_line_position_exclusions_configs = {} for hash, class_types in zip( depth_info.stack_hashes, depth_info.stack_class_types ): cfg = config.get_block_config(class_types) if cfg.spacing_within: stack_spacing_configs[hash] = cfg.spacing_within if cfg.line_position: line_position_configs[hash] = cfg.line_position if cfg.keyword_line_position: keyword_line_position_configs[hash] = cfg.keyword_line_position if cfg.keyword_line_position_exclusions: keyword_line_position_exclusions_configs[hash] = ( cfg.keyword_line_position_exclusions ) return cls( segments=segments, spacing_before=block_config.spacing_before, spacing_after=block_config.spacing_after, line_position=block_config.line_position, depth_info=depth_info, stack_spacing_configs=stack_spacing_configs, line_position_configs=line_position_configs, keyword_line_position=block_config.keyword_line_position, keyword_line_position_configs=keyword_line_position_configs, keyword_line_position_exclusions=( block_config.keyword_line_position_exclusions ), keyword_line_position_exclusions_configs=( keyword_line_position_exclusions_configs ), ) def _indent_description(indent: str) -> str: """Construct a human readable description of the indent. NOTE: We operate assuming that the "correct" indent is never a mix of tabs and spaces. That means if the provided indent *does* contain both that this description is likely a case where we are matching a pre-existing indent, and can assume that the *description* of that indent is non-critical. To handle that situation gracefully we just return "Mixed Indent". See: https://github.com/sqlfluff/sqlfluff/issues/4255 """ if indent == "": return "no indent" elif " " in indent and "\t" in indent: return "mixed indent" elif indent[0] == " ": assert all(c == " " for c in indent) return f"indent of {len(indent)} spaces" elif indent[0] == "\t": # pragma: no cover assert all(c == "\t" for c in indent) return f"indent of {len(indent)} tabs" else: # pragma: no cover raise NotImplementedError(f"Invalid indent construction: {indent!r}") @dataclass(frozen=True) class IndentStats: """Dataclass to hold summary of indents in a point. Attributes: impulse (int): The net change when summing the impulses of all the consecutive indent or dedent segments in a point. trough (int): The lowest point reached when summing the impulses (in order) of all the consecutive indent or dedent segments in a point. implicit_indents (tuple of int): The indent balance corresponding to any detected (and enabled) implicit indents. This follows the usual convention that indents are identified by their "uphill" side. A positive indent is identified by the indent balance _after_ and a negative indent is identified by the indent balance _before_. """ impulse: int trough: int # Defaults to an empty tuple if unset. implicit_indents: tuple[int, ...] = () @classmethod def from_combination( cls, first: Optional["IndentStats"], second: "IndentStats" ) -> "IndentStats": """Create IndentStats from two consecutive IndentStats. This is mostly used for combining the effects of indent and dedent tokens either side of a comment. NOTE: The *first* is considered optional, because if we're calling this function, we're assuming that there's always a second. """ # First check for the trivial case that we only have one. if not first: return second # Otherwise, combine the two into one. return cls( first.impulse + second.impulse, min(first.trough, first.impulse + second.trough), second.implicit_indents, ) @dataclass(frozen=True, init=False) class ReflowPoint(ReflowElement): """Class for keeping track of editable elements in reflow. This class, and its sibling :obj:`ReflowBlock`, should not normally be manipulated directly by rules, but instead should be manipulated using :obj:`ReflowSequence`. It holds segments which can be changed during a reflow operation such as whitespace and newlines.It may also contain :obj:`Indent` and :obj:`Dedent` elements. It holds no configuration and is influenced by the blocks on either side, so that any operations on it usually have that configuration passed in as required. """ _stats: IndentStats = field(init=False) def __init__(self, segments: tuple[RawSegment, ...]): """Override the init method to calculate indent stats.""" object.__setattr__(self, "segments", segments) object.__setattr__(self, "_stats", self._generate_indent_stats(segments)) def _get_indent_segment(self) -> Optional[RawSegment]: """Get the current indent segment (if there). NOTE: This only returns _untemplated_ indents. If templated newline or whitespace segments are found they are skipped. """ indent: Optional[RawSegment] = None for seg in reversed(self.segments): if seg.pos_marker and not seg.pos_marker.is_literal(): # Skip any templated elements. # NOTE: It must _have_ a position marker at this # point however to take this route. A segment # without a position marker at all, is an edit # or insertion, and so should still be considered. continue elif seg.is_type("newline"): return indent elif seg.is_type("whitespace"): indent = seg elif "\n" in (get_consumed_whitespace(seg) or ""): # Consumed whitespace case. # NOTE: In this situation, we're not looking for # separate newline and indent segments, we're # making the assumption that they'll be together # which I think is a safe one for now. return seg # i.e. if we never find a newline, it's not an indent. return None def get_indent(self) -> Optional[str]: """Get the current indent (if there).""" # If no newlines, it's not an indent. Return None. if not self.num_newlines(): return None # If there are newlines but no indent segment. Return "". seg = self._get_indent_segment() consumed_whitespace = get_consumed_whitespace(seg) if consumed_whitespace: # pragma: no cover # Return last bit after newline. # NOTE: Not tested, because usually this would happen # directly via _get_indent_segment. return consumed_whitespace.split("\n")[-1] return seg.raw if seg else "" def get_indent_segment_vals(self, exclude_block_indents=False) -> list[int]: """Iterate through any indent segments and extract their values.""" values = [] for seg in self.segments: if seg.is_type("indent"): indent_seg = cast(Indent, seg) if exclude_block_indents and indent_seg.block_uuid: continue values.append(indent_seg.indent_val) return values @staticmethod def _generate_indent_stats( segments: Sequence[RawSegment], ) -> IndentStats: """Generate the change in intended indent balance. This is the main logic which powers .get_indent_impulse() """ trough = 0 running_sum = 0 implicit_indents = [] for seg in segments: if seg.is_type("indent"): indent_seg = cast(Indent, seg) running_sum += indent_seg.indent_val # Do we need to add a new implicit indent? if indent_seg.is_implicit: implicit_indents.append(running_sum) # NOTE: We don't check for removal of implicit indents # because it's unlikely that one would be opened, and then # closed within the same point. That would probably be the # sign of a bug in the dialect. if running_sum < trough: trough = running_sum return IndentStats(running_sum, trough, tuple(implicit_indents)) def get_indent_impulse(self) -> IndentStats: """Get the change in intended indent balance from this point.""" return self._stats def indent_to( self, desired_indent: str, after: Optional[BaseSegment] = None, before: Optional[BaseSegment] = None, description: Optional[str] = None, source: Optional[str] = None, ) -> tuple[list[LintResult], "ReflowPoint"]: """Coerce a point to have a particular indent. If the point currently contains no newlines, one will be introduced and any trailing whitespace will be effectively removed. More specifically, the newline is *inserted before* the existing whitespace, with the new indent being a *replacement* for that same whitespace. For placeholder newlines or indents we generate appropriate source fixes. """ assert "\n" not in desired_indent, "Newline found in desired indent." # Get the indent (or in the case of no newline, the last whitespace) indent_seg = self._get_indent_segment() reflow_logger.debug( "Coercing indent %s to %r. (newlines: %s)", indent_seg, desired_indent, self.num_newlines(), ) if indent_seg and indent_seg.is_type("placeholder"): # Handle the placeholder case. indent_seg = cast(TemplateSegment, indent_seg) # There should always be a newline, so assert that. assert "\n" in indent_seg.source_str # We should always replace the section _containing_ the # newline, rather than just bluntly inserting. This # makes slicing later easier. current_indent = indent_seg.source_str.split("\n")[-1] source_slice = slice( indent_seg.pos_marker.source_slice.stop - len(current_indent), indent_seg.pos_marker.source_slice.stop, ) for existing_source_fix in indent_seg.source_fixes: # pragma: no cover if slice_overlaps(existing_source_fix.source_slice, source_slice): reflow_logger.warning( "Creating overlapping source fix. Results may be " "unpredictable and this might be a sign of a bug. " "Please report this along with your query.\n" f"({existing_source_fix.source_slice} overlaps " f"{source_slice})" ) new_source_fix = SourceFix( desired_indent, source_slice, # The templated slice is going to be a zero slice _anyway_. indent_seg.pos_marker.templated_slice, ) if new_source_fix in indent_seg.source_fixes: # pragma: no cover # NOTE: If we're trying to reapply the same fix, don't. # Just return an error without the fixes. This is probably # a bug if we're taking this route, but this clause will help # catch bugs faster if they occur. reflow_logger.warning( "Attempted to apply a duplicate source fix to %r. " "Returning this time without fix.", indent_seg.pos_marker.source_str(), ) fixes = [] new_segments = self.segments else: if current_indent: new_source_str = ( indent_seg.source_str[: -len(current_indent)] + desired_indent ) else: new_source_str = indent_seg.source_str + desired_indent assert "\n" in new_source_str new_placeholder = indent_seg.edit( source_fixes=[new_source_fix], source_str=new_source_str, ) fixes = [LintFix.replace(indent_seg, [new_placeholder])] new_segments = tuple( new_placeholder if seg is indent_seg else seg for seg in self.segments ) return [ LintResult( indent_seg, fixes, description=description or f"Expected {_indent_description(desired_indent)}.", source=source, ) ], ReflowPoint(new_segments) elif self.num_newlines(): # There is already a newline. Is there an indent? if indent_seg: # Coerce existing indent to desired. if indent_seg.raw == desired_indent: # Trivial case. Indent already correct return [], self elif desired_indent == "": idx = self.segments.index(indent_seg) return [ LintResult( indent_seg, # Coerce to no indent. We don't want the indent. Delete it. [LintFix.delete(indent_seg)], description=description or "Line should not be indented.", source=source, ) ], ReflowPoint(self.segments[:idx] + self.segments[idx + 1 :]) # Standard case of an indent change. new_indent = indent_seg.edit(desired_indent) idx = self.segments.index(indent_seg) return [ LintResult( indent_seg, [LintFix.replace(indent_seg, [new_indent])], description=description or f"Expected {_indent_description(desired_indent)}.", source=source, ) ], ReflowPoint( self.segments[:idx] + (new_indent,) + self.segments[idx + 1 :] ) else: # There is a newline, but no indent. Make one after the newline # Find the index of the last newline (there _will_ be one because # we checked self.num_newlines() above). # Before going further, check we have a non-zero indent. if not desired_indent: # We're trying to coerce a non-existent indent to zero. This # means we're already ok. return [], self for idx in range(len(self.segments) - 1, -1, -1): # NOTE: Must be a _literal_ newline, not a templated one. # https://github.com/sqlfluff/sqlfluff/issues/4367 if self.segments[idx].is_type("newline"): if self.segments[idx].pos_marker.is_literal(): break new_indent = WhitespaceSegment(desired_indent) return [ LintResult( # The anchor for the *result* should be the segment # *after* the newline, otherwise the location of the fix # is confusing. # For this method, `before` is optional, but normally # passed. If it is there, use that as the anchor # instead. We fall back to the last newline if not. before if before else self.segments[idx], # Rather than doing a `create_after` here, we're # going to do a replace. This is effectively to give a hint # to the linter that this is safe to do before a templated # placeholder. This solves some potential bugs - although # it feels a bit like a workaround. [ LintFix.replace( self.segments[idx], [self.segments[idx], new_indent] ) ], description=description or f"Expected {_indent_description(desired_indent)}.", source=source, ) ], ReflowPoint( self.segments[: idx + 1] + (new_indent,) + self.segments[idx + 1 :] ) else: # There isn't currently a newline. new_newline = NewlineSegment() new_segs: list[RawSegment] # Check for whitespace ws_seg = None for seg in self.segments[::-1]: if seg.is_type("whitespace"): ws_seg = seg if not ws_seg: # Work out the new segments. Always a newline, only whitespace if # there's a non zero indent. new_segs = [new_newline] + ( [WhitespaceSegment(desired_indent)] if desired_indent else [] ) # There isn't a whitespace segment either. We need to insert one. # Do we have an anchor? if not before and not after: # pragma: no cover raise NotImplementedError( "Not set up to handle empty points in this " "scenario without provided before/after " f"anchor: {self.segments}" ) # Otherwise make a new indent, attached to the relevant anchor. # Prefer anchoring before because it makes the labelling better. elif before: before_raw = ( cast(TemplateSegment, before).source_str if before.is_type("placeholder") else before.raw ) fix = LintFix.create_before(before, new_segs) description = description or ( "Expected line break and " f"{_indent_description(desired_indent)} " f"before {before_raw!r}." ) else: assert after # mypy hint after_raw = ( cast(TemplateSegment, after).source_str if after.is_type("placeholder") else after.raw ) fix = LintFix.create_after(after, new_segs) description = description or ( "Expected line break and " f"{_indent_description(desired_indent)} " f"after {after_raw!r}." ) new_point = ReflowPoint(tuple(new_segs)) anchor = before else: # There is whitespace. Coerce it to the right indent and add # a newline _before_. In the edge case that we're coercing to # _no indent_, edit existing indent to be the newline and leave # it there. if desired_indent == "": new_segs = [new_newline] else: new_segs = [new_newline, ws_seg.edit(desired_indent)] idx = self.segments.index(ws_seg) if not description: # Prefer before, because it makes the anchoring better. if before: description = ( "Expected line break and " f"{_indent_description(desired_indent)} " f"before {before.raw!r}." ) elif after: description = ( "Expected line break and " f"{_indent_description(desired_indent)} " f"after {after.raw!r}." ) else: # pragma: no cover # NOTE: Doesn't have test coverage because there's # normally an `after` or `before` value, so this # clause is unused. description = ( "Expected line break and " f"{_indent_description(desired_indent)}." ) fix = LintFix.replace(ws_seg, new_segs) new_point = ReflowPoint( self.segments[:idx] + tuple(new_segs) + self.segments[idx + 1 :] ) anchor = ws_seg return [ LintResult(anchor, fixes=[fix], description=description, source=source) ], new_point def respace_point( self, prev_block: Optional[ReflowBlock], next_block: Optional[ReflowBlock], root_segment: BaseSegment, lint_results: list[LintResult], strip_newlines: bool = False, anchor_on: str = "before", ) -> tuple[list[LintResult], "ReflowPoint"]: """Respace a point based on given constraints. NB: This effectively includes trailing whitespace fixes. Deletion and edit fixes are generated immediately, but creations are paused to the end and done in bulk so as not to generate conflicts. Note that the `strip_newlines` functionality exists here as a slight exception to pure respacing, but as a very simple case of positioning line breaks. The default operation of `respace` does not enable it, however it exists as a convenience for rules which wish to use it. """ existing_results = lint_results[:] pre_constraint, post_constraint, strip_newlines = determine_constraints( prev_block, next_block, strip_newlines ) reflow_logger.debug("* Respacing: %r @ %s", self.raw, self.pos_marker) # The buffer is used to create the new reflow point to return segment_buffer, last_whitespace, new_results = process_spacing( list(self.segments), strip_newlines ) # Check for final trailing whitespace (which otherwise looks like an indent). if next_block and "end_of_file" in next_block.class_types and last_whitespace: new_results.append( LintResult( last_whitespace, [LintFix.delete(last_whitespace)], description="Unnecessary trailing whitespace at end of file.", ) ) segment_buffer.remove(last_whitespace) last_whitespace = None # Is there a newline? # NOTE: We do this based on the segment buffer rather than self.class_types # because we may have just removed any present newlines in the buffer. if ( any(seg.is_type("newline") for seg in segment_buffer) and not strip_newlines ) or (next_block and "end_of_file" in next_block.class_types): # Most of this section should be handled as _Indentation_. # BUT: There is one case we should handle here. # If we find that the last whitespace has a newline # before it, and the position markers imply there was # a removal between them, then remove the whitespace. # This ensures a consistent indent. if last_whitespace: ws_idx = self.segments.index(last_whitespace) if ws_idx > 0: # NOTE: Iterate by index so that we don't slice the full range. for prev_seg_idx in range(ws_idx - 1, -1, -1): prev_seg = self.segments[prev_seg_idx] # Skip past any indents if not prev_seg.is_type("indent"): break if ( prev_seg.is_type("newline") # Not just unequal. Must be actively _before_. # NOTE: Based on working locations and prev_seg.get_end_loc() < last_whitespace.get_start_loc() ): # pragma: no cover # Excluded from coverage: no longer triggered since AL01 rule # was refactored reflow_logger.debug( " Removing non-contiguous whitespace post removal." ) segment_buffer.remove(last_whitespace) # Ideally we should attach to an existing result. # To do that effectively, we should look for the removed # segment in the existing results. temp_idx = last_whitespace.pos_marker.templated_slice.start for res in existing_results: if ( res.anchor and res.anchor.pos_marker and res.anchor.pos_marker.templated_slice.stop == temp_idx ): break else: # pragma: no cover raise NotImplementedError("Could not find removal result.") existing_results.remove(res) new_results.append( LintResult( res.anchor, fixes=res.fixes + [LintFix("delete", last_whitespace)], description=res.description, ) ) # Return the results. return existing_results + new_results, ReflowPoint(tuple(segment_buffer)) # Otherwise is this an inline case? (i.e. no newline) reflow_logger.debug( " Inline case. Constraints: %s <-> %s.", pre_constraint, post_constraint, ) # Do we at least have _some_ whitespace? if last_whitespace: # We do - is it the right size? segment_buffer, results = handle_respace__inline_with_space( pre_constraint, post_constraint, prev_block, next_block, root_segment, segment_buffer, last_whitespace, ) new_results.extend(results) else: # No. Should we insert some? # NOTE: This method operates on the existing fix buffer. segment_buffer, new_results, edited = handle_respace__inline_without_space( pre_constraint, post_constraint, prev_block, next_block, segment_buffer, existing_results + new_results, anchor_on=anchor_on, ) existing_results = [] if edited: reflow_logger.debug(" Modified result buffer: %s", new_results) # Only log if we actually made a change. if new_results: reflow_logger.debug(" New Results: %s", new_results) return existing_results + new_results, ReflowPoint(tuple(segment_buffer)) ReflowSequenceType = list[Union[ReflowBlock, ReflowPoint]] sqlfluff-3.4.2/src/sqlfluff/utils/reflow/helpers.py000066400000000000000000000036751503426445100224440ustar00rootroot00000000000000"""Helper utilities for reflow.""" import logging from collections.abc import Iterable from itertools import chain from sqlfluff.core.parser import BaseSegment, RawSegment from sqlfluff.core.rules import LintFix, LintResult # We're in the utils module, but users will expect reflow # logs to appear in the context of rules. Hence it's a subset # of the rules logger. reflow_logger = logging.getLogger("sqlfluff.rules.reflow") def fixes_from_results(results: Iterable[LintResult]) -> list[LintFix]: """Return a list of fixes from an iterable of LintResult.""" return list(chain.from_iterable(result.fixes for result in results)) def pretty_segment_name(segment: BaseSegment) -> str: """Get a nicely formatted name of the segment.""" if segment.is_type("symbol"): # In a symbol reference, show the raw value and type. # (With underscores as spaces) return segment.get_type().replace("_", " ") + f" {segment.raw!r}" elif segment.is_type("keyword"): # Reference keywords as keywords. return f"{segment.raw!r} keyword" else: # Reference other segments just by their type. # (With underscores as spaces) return segment.get_type().replace("_", " ") def deduce_line_indent(raw_segment: RawSegment, root_segment: BaseSegment) -> str: """Given a raw segment, deduce the indent of its line.""" seg_idx = root_segment.raw_segments.index(raw_segment) indent_seg = None # Use range and a lookup here because it's more efficient than slicing # as we only need a subset of the long series. for idx in range(seg_idx, -1, -1): seg = root_segment.raw_segments[idx] if seg.is_code: indent_seg = None elif seg.is_type("whitespace"): indent_seg = seg elif seg.is_type("newline"): break reflow_logger.debug("Deduced indent for %s as %s", raw_segment, indent_seg) return indent_seg.raw if indent_seg else "" sqlfluff-3.4.2/src/sqlfluff/utils/reflow/rebreak.py000066400000000000000000001101661503426445100224070ustar00rootroot00000000000000"""Static methods to support ReflowSequence.rebreak().""" import logging from dataclasses import dataclass from typing import cast from sqlfluff.core.parser import BaseSegment, RawSegment from sqlfluff.core.rules import LintFix, LintResult from sqlfluff.utils.reflow.elements import ReflowBlock, ReflowPoint, ReflowSequenceType from sqlfluff.utils.reflow.helpers import ( deduce_line_indent, fixes_from_results, pretty_segment_name, ) # We're in the utils module, but users will expect reflow # logs to appear in the context of rules. Hence it's a subset # of the rules logger. reflow_logger = logging.getLogger("sqlfluff.rules.reflow") @dataclass(frozen=True) class _RebreakSpan: """A location within a sequence to consider rebreaking.""" target: BaseSegment start_idx: int end_idx: int line_position: str strict: bool @dataclass(frozen=True) class _RebreakIndices: """Indices of points for a _RebreakLocation.""" dir: int adj_pt_idx: int newline_pt_idx: int pre_code_pt_idx: int @classmethod def from_elements( cls: type["_RebreakIndices"], elements: ReflowSequenceType, start_idx: int, dir: int, ) -> "_RebreakIndices": """Iterate through the elements to deduce important point indices.""" assert dir in (1, -1), "Direction must be a unit direction (i.e. 1 or -1)." # Limit depends on the direction limit = 0 if dir == -1 else len(elements) # The adjacent point is just the next one. adj_point_idx = start_idx + dir # The newline point is next. We hop in 2s because we're checking # only points, which alternate with blocks. for newline_point_idx in range(adj_point_idx, limit, 2 * dir): if "newline" in elements[newline_point_idx].class_types or any( seg.is_code for seg in elements[newline_point_idx + dir].segments ): break # Finally we look for the point preceding the next code element. for pre_code_point_idx in range(newline_point_idx, limit, 2 * dir): if any(seg.is_code for seg in elements[pre_code_point_idx + dir].segments): break return cls(dir, adj_point_idx, newline_point_idx, pre_code_point_idx) @dataclass(frozen=True) class _RebreakLocation: """A location within a sequence to rebreak, with metadata.""" target: BaseSegment prev: _RebreakIndices next: _RebreakIndices line_position: str strict: bool @classmethod def from_span( cls: type["_RebreakLocation"], span: _RebreakSpan, elements: ReflowSequenceType ) -> "_RebreakLocation": """Expand a span to a location.""" return cls( span.target, _RebreakIndices.from_elements(elements, span.start_idx, -1), _RebreakIndices.from_elements(elements, span.end_idx, 1), span.line_position, span.strict, ) def pretty_target_name(self) -> str: """Get a nicely formatted name of the target.""" return pretty_segment_name(self.target) def has_templated_newline(self, elements: ReflowSequenceType) -> bool: """Is either side a templated newline? If either side has a templated newline, then that's ok too. The intent here is that if the next newline is a _templated_ one, then in the source there will be a tag ({{ tag }}), which acts like _not having a newline_. """ # Check the _last_ newline of the previous point. # Slice backward to search in reverse. for seg in elements[self.prev.newline_pt_idx].segments[::-1]: if seg.is_type("newline"): if not seg.pos_marker.is_literal(): return True break # Check the _first_ newline of the next point. for seg in elements[self.next.newline_pt_idx].segments: if seg.is_type("newline"): if not seg.pos_marker.is_literal(): return True break return False def has_inappropriate_newlines( self, elements: ReflowSequenceType, strict: bool = False ) -> bool: """Is the span surrounded by one (but not two) line breaks? Args: elements: The elements of the ReflowSequence this element is taken from to allow comparison. strict (:obj:`bool`): If set to true, this will not allow the case where there aren't newlines on either side. """ # Here we use the newline index, not # just the adjacent point, so that we can see past comments. n_prev_newlines = elements[self.prev.newline_pt_idx].num_newlines() n_next_newlines = elements[self.next.newline_pt_idx].num_newlines() newlines_on_neither_side = n_prev_newlines + n_next_newlines == 0 newlines_on_both_sides = n_prev_newlines > 0 and n_next_newlines > 0 return ( # If there isn't a newline on either side then carry # on, unless it's strict. (newlines_on_neither_side and not strict) # If there is a newline on BOTH sides. That's ok. or newlines_on_both_sides ) def first_create_anchor( elem_buff: ReflowSequenceType, loc_range: range ) -> tuple[RawSegment, ...]: """Handle the potential case of an empty point with the next point with segments. While a reflow element's segments are empty, search for the next available element with segments to anchor new element creation. """ # https://github.com/sqlfluff/sqlfluff/issues/4184 try: create_anchor = next( elem_buff[i].segments for i in loc_range if elem_buff[i].segments ) except StopIteration as exc: # pragma: no cover # NOTE: We don't test this because we *should* always find # _something_ to anchor the creation on, even if we're # unlucky enough not to find it on the first pass. raise NotImplementedError("Could not find anchor for creation.") from exc return create_anchor def identify_rebreak_spans( element_buffer: ReflowSequenceType, root_segment: BaseSegment ) -> list[_RebreakSpan]: """Identify areas in file to rebreak. A span here is a block, or group of blocks which have explicit configs for their line position, either directly as raw segments themselves or by virtue of one of their parent segments. """ spans: list[_RebreakSpan] = [] # We'll need at least two elements each side, so constrain # our range accordingly. for idx in range(2, len(element_buffer) - 2): # Only evaluate blocks: elem = element_buffer[idx] # Only evaluate blocks if not isinstance(elem, ReflowBlock): continue # Does the element itself have config? (The easy case) if elem.line_position: # We should check whether this is a valid place to break based # on whether it's in a templated tag. If it's not a literal, then skip # it. # TODO: We probably only care if the side of the element that we would # break at (i.e. the start if it's `leading` or the end if it's # `trailing`), but we'll go with the blunt logic for simplicity first. if not elem.segments[0].pos_marker.is_literal(): reflow_logger.debug( " ! Skipping rebreak span on %s because " "non-literal location.", elem.segments[0], ) continue # Blocks should only have one segment so it's easy to pick it. spans.append( _RebreakSpan( elem.segments[0], idx, idx, # NOTE: this isn't pretty but until it needs to be more # complex, this works. elem.line_position.split(":")[0], elem.line_position.endswith("strict"), ) ) # Do any of its parents have config, and are we at the start # of them? for key in elem.line_position_configs.keys(): # If we're not at the start of the segment, then pass. if elem.depth_info.stack_positions[key].idx != 0: continue # Can we find the end? # NOTE: It's safe to look right to the end here rather than up to # -2 because we're going to end up stepping back by two in the # complicated cases. for end_idx in range(idx, len(element_buffer)): end_elem = element_buffer[end_idx] final_idx = None if not isinstance(end_elem, ReflowBlock): continue elif key not in end_elem.depth_info.stack_positions: # If we get here, it means the last block was the end. # NOTE: This feels a little hacky, but it's because of a limitation # in detecting the "end" and "solo" markers effectively in larger # sections. final_idx = end_idx - 2 # pragma: no cover elif end_elem.depth_info.stack_positions[key].type in ("end", "solo"): final_idx = end_idx if final_idx is not None: # Found the end. Add it to the stack. # We reference the appropriate element from the parent stack. target_depth = elem.depth_info.stack_hashes.index(key) target = root_segment.path_to(element_buffer[idx].segments[0])[ target_depth ].segment spans.append( _RebreakSpan( target, idx, final_idx, # NOTE: this isn't pretty but until it needs to be more # complex, this works. elem.line_position_configs[key].split(":")[0], elem.line_position_configs[key].endswith("strict"), ) ) break # If we find the start, but not the end, it's not a problem, but # we won't be rebreaking this span. This is important so that we # don't rebreak part of something without the context of what's # in the rest of it. We continue without adding it to the buffer. return spans def identify_keyword_rebreak_spans( element_buffer: ReflowSequenceType, ) -> list[_RebreakSpan]: """Identify keyword areas in file to rebreak. A span here is a block, or group of blocks which have explicit configs for their keyword's line position. """ spans: list[_RebreakSpan] = [] # We'll need at least two elements each side, so constrain # our range accordingly. for idx in range(2, len(element_buffer) - 2): elem = element_buffer[idx] # Only evaluate blocks if not isinstance(elem, ReflowBlock): continue # Do any of its parents have config, and are we at the start # of them? for key in elem.keyword_line_position_configs.keys(): # If the element has been unset by using "None" then we want to skip adding # it here. if elem.keyword_line_position_configs[key].lower() == "none": continue # If we're not at the start of the segment, then pass. Some keywords might # be at an index of 1 due to a leading indent so check for both 0 and 1. if elem.depth_info.stack_positions[key].idx > 1: continue # Next check how deep the current element is with respect to the # element which is configured. If we're operating at a deeper depth than # the configuration is applied to, then this keyword cannot be the leading # keyword for that segment. In that case continue, because we're not # looking at the trigger keyword. configured_depth = elem.depth_info.stack_hashes.index(key) if elem.depth_info.stack_depth > configured_depth + 1: continue # Then make sure it's actually a keyword. if not element_buffer[idx].segments or not element_buffer[idx].segments[ 0 ].is_type("keyword"): continue # Can we find the end? # NOTE: It's safe to look right to the end here rather than up to # -2 because we're going to end up stepping back by two in the # complicated cases. for end_idx in range(idx, len(element_buffer)): end_elem = element_buffer[end_idx] final_idx = None if not isinstance(end_elem, ReflowBlock): if any(seg.is_type("indent") for seg in end_elem.segments): final_idx = end_idx - 1 else: continue elif end_elem.depth_info.stack_positions[key].type in ("end", "solo"): final_idx = end_idx if final_idx is not None: # Found the end. Add it to the stack. # We reference the appropriate element from the parent stack. target = element_buffer[idx].segments[0] parent_exclusion = ( elem.keyword_line_position_exclusions_configs.get(key, []) ) if any( t.intersection(parent_exclusion) for t in elem.depth_info.stack_class_types ): # If the keyword is excluded, skip it. reflow_logger.debug( "# Skipping rebreak span on %s because " "excluded by keyword_line_position_exclusions.", elem.segments[0], ) break spans.append( _RebreakSpan( target, idx, final_idx, # NOTE: this isn't pretty but until it needs to be more # complex, this works. elem.keyword_line_position_configs[key].split(":")[0], elem.keyword_line_position_configs[key].endswith("strict"), ) ) break # If we find the start, but not the end, it's not a problem, but # we won't be rebreaking this span. This is important so that we # don't rebreak part of something without the context of what's # in the rest of it. We continue without adding it to the buffer. return spans def rebreak_sequence( elements: ReflowSequenceType, root_segment: BaseSegment, ) -> tuple[ReflowSequenceType, list[LintResult]]: """Reflow line breaks within a sequence. Initially this only _moves_ existing segments around line breaks (e.g. for operators and commas), but eventually this method should also handle line length considerations too. This intentionally does *not* handle indentation, as the existing indents are assumed to be correct. """ lint_results: list[LintResult] = [] fixes: list[LintFix] = [] elem_buff: ReflowSequenceType = elements.copy() # Given a sequence we should identify the objects which # make sense to rebreak. That includes any raws with config, # but also and parent segments which have config and we can # find both ends for. Given those spans, we then need to find # the points either side of them and then the blocks either # side to respace them at the same time. # 1. First find appropriate spans. spans = identify_rebreak_spans(elem_buff, root_segment) # The spans give us the edges of operators, but for line positioning we need # to handle comments differently. There are two other important points: # 1. The next newline outward before code (but passing over comments). # 2. The point before the next _code_ segment (ditto comments). locations: list[_RebreakLocation] = [] for span in spans: try: locations.append(_RebreakLocation.from_span(span, elem_buff)) # If we try and create a location from an incomplete span (i.e. one # where we're unable to find the next newline effectively), then # we'll get an exception. If we do - skip that one - we won't be # able to effectively work with it even if we could construct it. except UnboundLocalError: pass # Handle each span: for loc in locations: reflow_logger.debug( "Handing Rebreak Span (%r: %s): %r", loc.line_position, loc.target, "".join( elem.raw for elem in elem_buff[ loc.prev.pre_code_pt_idx - 1 : loc.next.pre_code_pt_idx + 2 ] ), ) if loc.has_inappropriate_newlines(elem_buff, strict=loc.strict): continue if loc.has_templated_newline(elem_buff): continue # Points and blocks either side are just offsets from the indices. prev_point = cast(ReflowPoint, elem_buff[loc.prev.adj_pt_idx]) next_point = cast(ReflowPoint, elem_buff[loc.next.adj_pt_idx]) # So we know we have a preference, is it ok? if loc.line_position == "leading": if elem_buff[loc.prev.newline_pt_idx].num_newlines(): # We're good. It's already leading. continue # Generate the text for any issues. pretty_name = loc.pretty_target_name() if loc.strict: # pragma: no cover # TODO: The 'strict' option isn't widely tested yet. desc = f"{pretty_name.capitalize()} should always start a new line." else: desc = ( f"Found trailing {pretty_name}. Expected only leading " "near line breaks." ) # Is it the simple case with no comments between the # old and new desired locations and only a single following # whitespace? if ( loc.next.adj_pt_idx == loc.next.pre_code_pt_idx and elem_buff[loc.next.newline_pt_idx].num_newlines() == 1 ): reflow_logger.debug(" Trailing Easy Case") # Simple case. No comments. # Strip newlines from the next point. Apply the indent to # the previous point. new_results, prev_point = prev_point.indent_to( next_point.get_indent() or "", before=loc.target ) new_results, next_point = next_point.respace_point( cast(ReflowBlock, elem_buff[loc.next.adj_pt_idx - 1]), cast(ReflowBlock, elem_buff[loc.next.adj_pt_idx + 1]), root_segment=root_segment, lint_results=new_results, strip_newlines=True, ) # Update the points in the buffer elem_buff[loc.prev.adj_pt_idx] = prev_point elem_buff[loc.next.adj_pt_idx] = next_point else: reflow_logger.debug(" Trailing Tricky Case") # Otherwise we've got a tricky scenario where there are comments # to negotiate around. In this case, we _move the target_ # rather than just adjusting the whitespace. # Delete the existing position of the target, and # the _preceding_ point. fixes.append(LintFix.delete(loc.target)) for seg in elem_buff[loc.prev.adj_pt_idx].segments: if not seg.is_type("dedent"): fixes.append(LintFix.delete(seg)) # We always reinsert after the first point, but respace # the inserted point to ensure it's the right size given # configs. new_results, new_point = ReflowPoint(()).respace_point( cast(ReflowBlock, elem_buff[loc.next.adj_pt_idx - 1]), cast(ReflowBlock, elem_buff[loc.next.pre_code_pt_idx + 1]), root_segment=root_segment, lint_results=[], anchor_on="after", ) create_anchor = first_create_anchor( elem_buff, range(loc.next.pre_code_pt_idx, loc.next.adj_pt_idx - 1, -1), )[-1] fixes.append( LintFix.create_after( create_anchor, [loc.target], ) ) elem_buff = ( elem_buff[: loc.prev.adj_pt_idx] + elem_buff[loc.next.adj_pt_idx : loc.next.pre_code_pt_idx + 1] + elem_buff[ loc.prev.adj_pt_idx + 1 : loc.next.adj_pt_idx ] # the target + [new_point] + elem_buff[loc.next.pre_code_pt_idx + 1 :] ) elif loc.line_position == "trailing": if elem_buff[loc.next.newline_pt_idx].num_newlines(): # We're good, it's already trailing. continue # Generate the text for any issues. pretty_name = loc.pretty_target_name() if loc.strict: # pragma: no cover # TODO: The 'strict' option isn't widely tested yet. desc = ( f"{pretty_name.capitalize()} should always be at the end of a line." ) else: desc = ( f"Found leading {pretty_name}. Expected only trailing " "near line breaks." ) # Is it the simple case with no comments between the # old and new desired locations and only one previous newline? if ( loc.prev.adj_pt_idx == loc.prev.pre_code_pt_idx and elem_buff[loc.prev.newline_pt_idx].num_newlines() == 1 ): reflow_logger.debug(" Leading Easy Case") # Simple case. No comments. # Strip newlines from the previous point. Apply the indent # to the next point. new_results, next_point = next_point.indent_to( prev_point.get_indent() or "", after=loc.target ) new_results, prev_point = prev_point.respace_point( cast(ReflowBlock, elem_buff[loc.prev.adj_pt_idx - 1]), cast(ReflowBlock, elem_buff[loc.prev.adj_pt_idx + 1]), root_segment=root_segment, lint_results=new_results, strip_newlines=True, ) # Update the points in the buffer elem_buff[loc.prev.adj_pt_idx] = prev_point elem_buff[loc.next.adj_pt_idx] = next_point else: reflow_logger.debug(" Leading Tricky Case") # Otherwise we've got a tricky scenario where there are comments # to negotiate around. In this case, we _move the target_ # rather than just adjusting the whitespace. # Delete the existing position of the target, and # the _following_ point. fixes.append(LintFix.delete(loc.target)) for seg in elem_buff[loc.next.adj_pt_idx].segments: fixes.append(LintFix.delete(seg)) # We always reinsert before the first point, but respace # the inserted point to ensure it's the right size given # configs. new_results, new_point = ReflowPoint(()).respace_point( cast(ReflowBlock, elem_buff[loc.prev.pre_code_pt_idx - 1]), cast(ReflowBlock, elem_buff[loc.prev.adj_pt_idx + 1]), root_segment=root_segment, lint_results=[], anchor_on="before", ) lead_create_anchor = first_create_anchor( elem_buff, range(loc.prev.pre_code_pt_idx, loc.prev.adj_pt_idx + 1) ) # Attempt to skip dedent elements on reinsertion. These are typically # found at the end of segments, but we don't want to include the # reinserted segment as part of prior code segment's parent segment. prev_code_anchor = next( ( prev_code_segment for prev_code_segment in lead_create_anchor if not prev_code_segment.is_type("dedent") ), None, ) if prev_code_anchor: fixes.append( LintFix.create_before( prev_code_anchor, [loc.target], ) ) else: # All segments were dedents, append to the end instead. fixes.append( LintFix.create_after( lead_create_anchor[-1], [loc.target], ) ) elem_buff = ( elem_buff[: loc.prev.pre_code_pt_idx] + [new_point] + elem_buff[ loc.prev.adj_pt_idx + 1 : loc.next.adj_pt_idx ] # the target + elem_buff[loc.prev.pre_code_pt_idx : loc.prev.adj_pt_idx + 1] + elem_buff[loc.next.adj_pt_idx + 1 :] ) elif loc.line_position == "alone": # If we get here we can assume that the element is currently # either leading or trailing and needs to be moved onto its # own line. # Generate the text for any issues. pretty_name = loc.pretty_target_name() desc = ( f"{pretty_name.capitalize()}s should always have a line break " "both before and after." ) # First handle the following newlines first (easy). if not elem_buff[loc.next.newline_pt_idx].num_newlines(): reflow_logger.debug(" Found missing newline after in alone case") new_results, next_point = next_point.indent_to( deduce_line_indent(loc.target.raw_segments[-1], root_segment), after=loc.target, ) # Update the point in the buffer elem_buff[loc.next.adj_pt_idx] = next_point # Then handle newlines before. (hoisting past comments if needed). if not elem_buff[loc.prev.adj_pt_idx].num_newlines(): reflow_logger.debug(" Found missing newline before in alone case") # NOTE: In the case that there are comments _after_ the # target, they will be moved with it. This might break things # but there isn't an unambiguous way to do this, because we # can't be sure what the comments are referring to. # Given that, we take the simple option. new_results, prev_point = prev_point.indent_to( deduce_line_indent(loc.target.raw_segments[0], root_segment), before=loc.target, ) # Update the point in the buffer elem_buff[loc.prev.adj_pt_idx] = prev_point else: raise NotImplementedError( # pragma: no cover f"Unexpected line_position config: {loc.line_position}" ) # Consolidate results and consume fix buffer lint_results.append( LintResult( loc.target, fixes=fixes_from_results(new_results) + fixes, description=desc, ) ) fixes = [] return elem_buff, lint_results def rebreak_keywords_sequence( elements: ReflowSequenceType, root_segment: BaseSegment, ) -> tuple[ReflowSequenceType, list[LintResult]]: """Reflow line breaks within a sequence. Initially this only _moves_ existing segments around line breaks (e.g. for operators and commas), but eventually this method should also handle line length considerations too. This intentionally does *not* handle indentation, as the existing indents are assumed to be correct. """ lint_results: list[LintResult] = [] fixes: list[LintFix] = [] elem_buff: ReflowSequenceType = elements.copy() # Given a sequence we should identify the objects which # make sense to rebreak. That includes any raws with config, # but also and parent segments which have config and we can # find both ends for. Given those spans, we then need to find # the points either side of them and then the blocks either # side to respace them at the same time. # 1. First find appropriate spans. spans = identify_keyword_rebreak_spans(elem_buff) # The spans give us the edges of operators, but for line positioning we need # to handle comments differently. There are two other important points: # 1. The next newline outward before code (but passing over comments). # 2. The point before the next _code_ segment (ditto comments). locations: list[_RebreakLocation] = [] for span in spans: try: locations.append(_RebreakLocation.from_span(span, elem_buff)) # If we try and create a location from an incomplete span (i.e. one # where we're unable to find the next newline effectively), then # we'll get an exception. If we do - skip that one - we won't be # able to effectively work with it even if we could construct it. # This would be unlikely to happen when breaking on only keywords, # but was left in that unlikely event. except UnboundLocalError: # pragma: no cover pass # Handle each span: for loc in locations: reflow_logger.debug( "Handing Rebreak Span (%r: %s): %r", loc.line_position, loc.target, "".join( elem.raw for elem in elem_buff[ loc.prev.pre_code_pt_idx - 1 : loc.next.pre_code_pt_idx + 2 ] ), ) if loc.has_inappropriate_newlines(elem_buff, True): continue if loc.has_templated_newline(elem_buff): continue # Points and blocks either side are just offsets from the indices. prev_point = cast(ReflowPoint, elem_buff[loc.prev.adj_pt_idx]) next_point = cast(ReflowPoint, elem_buff[loc.next.adj_pt_idx]) # So we know we have a preference, is it ok? if loc.line_position == "leading": if elem_buff[loc.prev.newline_pt_idx].num_newlines(): # We're good. It's already leading. continue # Generate the text for any issues. pretty_name = loc.pretty_target_name() desc = f"The {pretty_name} should always start a new line." # Is it the simple case with no comments between the # old and new desired locations and only a single following # whitespace? reflow_logger.debug(" Trailing Easy Case") # Strip newlines from the next point. Apply the indent to # the previous point. new_results, prev_point = prev_point.indent_to( next_point.get_indent() or "", before=elem_buff[loc.prev.adj_pt_idx + 1].segments[0], ) new_results, next_point = next_point.respace_point( cast(ReflowBlock, elem_buff[loc.next.adj_pt_idx - 1]), cast(ReflowBlock, elem_buff[loc.next.adj_pt_idx + 1]), root_segment=root_segment, lint_results=new_results, strip_newlines=True, ) # Update the points in the buffer elem_buff[loc.prev.adj_pt_idx] = prev_point elem_buff[loc.next.adj_pt_idx] = next_point elif loc.line_position == "trailing": if elem_buff[loc.next.newline_pt_idx].num_newlines(): # We're good, it's already trailing. continue # Generate the text for any issues. pretty_name = loc.pretty_target_name() desc = f"The {pretty_name} should always be at the end of a line." # Is it the simple case with no comments between the # old and new desired locations and only one previous newline? reflow_logger.debug(" Leading Easy Case") # Simple case. No comments. # Strip newlines from the previous point. Apply the indent # to the next point. new_results, next_point = next_point.indent_to( prev_point.get_indent() or "", after=elem_buff[loc.next.adj_pt_idx - 1].segments[-1], ) new_results, prev_point = prev_point.respace_point( cast(ReflowBlock, elem_buff[loc.prev.adj_pt_idx - 1]), cast(ReflowBlock, elem_buff[loc.prev.adj_pt_idx + 1]), root_segment=root_segment, lint_results=new_results, strip_newlines=True, ) # Update the points in the buffer elem_buff[loc.prev.adj_pt_idx] = prev_point elem_buff[loc.next.adj_pt_idx] = next_point elif loc.line_position == "alone": # If we get here we can assume that the element is currently # either leading or trailing and needs to be moved onto its # own line. # Generate the text for any issues. pretty_name = loc.pretty_target_name() desc = ( f"The {pretty_name} should always have a line break " "both before and after." ) # First handle the following newlines first (easy). if not elem_buff[loc.next.newline_pt_idx].num_newlines(): reflow_logger.debug(" Found missing newline after in alone case") new_results, next_point = next_point.indent_to( prev_point.get_indent() or "", after=elem_buff[loc.next.adj_pt_idx - 1].segments[-1], ) # Update the point in the buffer elem_buff[loc.next.adj_pt_idx] = next_point # Then handle newlines before. (hoisting past comments if needed). if not elem_buff[loc.prev.adj_pt_idx].num_newlines(): reflow_logger.debug(" Found missing newline before in alone case") # NOTE: In the case that there are comments _after_ the # target, they will be moved with it. This might break things # but there isn't an unambiguous way to do this, because we # can't be sure what the comments are referring to. # Given that, we take the simple option. new_results, prev_point = prev_point.indent_to( next_point.get_indent() or "", before=elem_buff[loc.prev.adj_pt_idx + 1].segments[0], ) # Update the point in the buffer elem_buff[loc.prev.adj_pt_idx] = prev_point else: raise NotImplementedError( # pragma: no cover f"Unexpected line_position config: {loc.line_position}" ) # Consolidate results and consume fix buffer lint_results.append( LintResult( loc.target, fixes=fixes_from_results(new_results) + fixes, description=desc, ) ) fixes = [] return elem_buff, lint_results sqlfluff-3.4.2/src/sqlfluff/utils/reflow/reindent.py000066400000000000000000003074621503426445100226130ustar00rootroot00000000000000"""Methods for deducing and understanding indents.""" import logging from collections import defaultdict from collections.abc import Iterator from dataclasses import dataclass from itertools import chain from typing import DefaultDict, Optional, Union, cast from sqlfluff.core.errors import SQLFluffUserError from sqlfluff.core.helpers.slice import slice_length from sqlfluff.core.parser import ( BaseSegment, NewlineSegment, RawSegment, WhitespaceSegment, ) from sqlfluff.core.parser.segments import SourceFix from sqlfluff.core.parser.segments.meta import MetaSegment, TemplateSegment from sqlfluff.core.rules import LintFix, LintResult from sqlfluff.utils.reflow.elements import ( IndentStats, ReflowBlock, ReflowPoint, ReflowSequenceType, ) from sqlfluff.utils.reflow.helpers import fixes_from_results from sqlfluff.utils.reflow.rebreak import _RebreakSpan, identify_rebreak_spans # We're in the utils module, but users will expect reflow # logs to appear in the context of rules. Hence it's a subset # of the rules logger. reflow_logger = logging.getLogger("sqlfluff.rules.reflow") def has_untemplated_newline(point: ReflowPoint) -> bool: """Determine whether a point contains any literal newlines. NOTE: We check for standard literal newlines, but also potential placeholder newlines which have been consumed. """ # If there are no newlines (or placeholders) at all - then False. if not point.class_types.intersection({"newline", "placeholder"}): return False for seg in point.segments: # Make sure it's not templated. # NOTE: An insertion won't have a pos_marker. But that # also means it's not templated. if seg.is_type("newline") and ( not seg.pos_marker or seg.pos_marker.is_literal() ): return True if seg.is_type("placeholder"): seg = cast(TemplateSegment, seg) assert ( seg.block_type == "literal" ), "Expected only literal placeholders in ReflowPoint." if "\n" in seg.source_str: return True return False @dataclass(frozen=True) class _IndentPoint: """Temporary structure for holding metadata about an indented ReflowPoint. We only evaluate point which either *are* line breaks or contain Indent/Dedent segments. """ idx: int indent_impulse: int indent_trough: int initial_indent_balance: int last_line_break_idx: Optional[int] is_line_break: bool # NOTE: an "untaken indent" is referenced by the value we go *up* to. # i.e. An Indent segment which takes the balance from 1 to 2 but with # no newline is an untaken indent of value 2. # It also only covers untaken indents _before_ this point. If this point # is _also_ an untaken indent, we should be able to infer that ourselves. untaken_indents: tuple[int, ...] @property def closing_indent_balance(self) -> int: return self.initial_indent_balance + self.indent_impulse @dataclass class _IndentLine: """Temporary structure for handing a line of indent points. Mutable so that we can adjust the initial indent balance for things like comments and templated elements, after constructing all the metadata for the points on the line. """ initial_indent_balance: int indent_points: list[_IndentPoint] def __repr__(self) -> str: """Compressed repr method to ease logging.""" return ( f"IndentLine(iib={self.initial_indent_balance}, ipts=[" + ", ".join( f"iPt@{ip.idx}({ip.indent_impulse}, {ip.indent_trough}, " f"{ip.initial_indent_balance}, {ip.last_line_break_idx}, " f"{ip.is_line_break}, {ip.untaken_indents})" for ip in self.indent_points ) + "])" ) @classmethod def from_points(cls, indent_points: list[_IndentPoint]) -> "_IndentLine": # Catch edge case for first line where we'll start with a # block if no initial indent. if indent_points[-1].last_line_break_idx: starting_balance = indent_points[0].closing_indent_balance else: starting_balance = 0 return cls(starting_balance, indent_points) def iter_elements( self, elements: ReflowSequenceType ) -> Iterator[Union[ReflowPoint, ReflowBlock]]: # Edge case for initial lines (i.e. where last_line_break is None) if self.indent_points[-1].last_line_break_idx is None: range_slice = slice(None, self.indent_points[-1].idx) else: range_slice = slice(self.indent_points[0].idx, self.indent_points[-1].idx) yield from elements[range_slice] def iter_blocks(self, elements: ReflowSequenceType) -> Iterator[ReflowBlock]: for element in self.iter_elements(elements): if isinstance(element, ReflowBlock): yield element def iter_block_segments(self, elements: ReflowSequenceType) -> Iterator[RawSegment]: for block in self.iter_blocks(elements): yield from block.segments def is_all_comments(self, elements: ReflowSequenceType) -> bool: """Is this line made up of just comments?""" block_segments = list(self.iter_block_segments(elements)) return bool(block_segments) and all( seg.is_type("comment") for seg in block_segments ) def is_all_templates(self, elements: ReflowSequenceType) -> bool: """Is this line made up of just template elements?""" return all(block.is_all_unrendered() for block in self.iter_blocks(elements)) def desired_indent_units(self, forced_indents: list[int]) -> int: """Calculate the desired indent units. This is the heart of the indentation calculations. First we work out how many previous indents are untaken. In the easy case, we just use the number of untaken indents from previous points. The more complicated example is where *this point* has both dedents *and* indents. In this case we use the `indent_trough` to prune any previous untaken indents which were above the trough at this point. After that we calculate the indent from the incoming balance, minus any relevant untaken events *plus* any previously untaken indents which have been forced (i.e. inserted by the same operation). """ if self.indent_points[0].indent_trough: # This says - purge any untaken indents which happened before # the trough (or at least only _keep_ any which would have remained). # NOTE: Minus signs are really hard to get wrong here. relevant_untaken_indents = [ i for i in self.indent_points[0].untaken_indents if i <= self.initial_indent_balance - ( self.indent_points[0].indent_impulse - self.indent_points[0].indent_trough ) ] else: relevant_untaken_indents = list(self.indent_points[0].untaken_indents) desired_indent = ( self.initial_indent_balance - len(relevant_untaken_indents) + len(forced_indents) ) reflow_logger.debug( " Desired Indent Calculation: IB: %s, RUI: %s, UIL: %s, " "iII: %s, iIT: %s. = %s", self.initial_indent_balance, relevant_untaken_indents, self.indent_points[0].untaken_indents, self.indent_points[0].indent_impulse, self.indent_points[0].indent_trough, desired_indent, ) return desired_indent def closing_balance(self) -> int: """The closing indent balance of the line.""" return self.indent_points[-1].closing_indent_balance def opening_balance(self) -> int: """The opening indent balance of the line. NOTE: We use the first point for the starting balance rather than the line starting balance because we're using this to detect missing lines and if the line has been corrected then we don't want to do that. """ # Edge case for first line of a file (where starting indent must be zero). if self.indent_points[-1].last_line_break_idx is None: return 0 return self.indent_points[0].closing_indent_balance def _revise_templated_lines( lines: list[_IndentLine], elements: ReflowSequenceType ) -> None: """Given an initial set of individual lines. Revise templated ones. NOTE: This mutates the `lines` argument. We do this to ensure that templated lines are _somewhat_ consistent. Total consistency is very hard, given templated elements can be used in a wide range of places. What we do here is to try and take a somewhat rules based approach, but also one which should fit mostly with user expectations. To do this we have three scenarios: 1. Template tags are already on the same indent. 2. Template tags aren't, but can be hoisted without effectively crossing code to be on the same indent. This effectively does the same as "reshuffling" placeholders, whitespace and indent segments but does so without requiring intervention on the parsed file. 3. Template tags which actively cut across the tree (i.e. start and end tags aren't at the same level and can't be hoisted). In this case the tags should be indented at the lowest indent of the matching set. In doing this we have to attempt to match up template tags. This might fail. As we battle-test this feature there may be some interesting bugs which come up! In addition to properly indenting block tags, we also filter out any jinja tags which contain newlines because if we try and fix them, we'll only fix the *initial* part of it. The rest won't be seen because it's within the tag. TODO: This could be an interesting way to extend the indentation algorithm to also cover indentation within jinja tags. """ reflow_logger.debug("# Revise templated lines.") # Because we want to modify the original lines, we're going # to use their list index to keep track of them. depths = defaultdict(list) grouped = defaultdict(list) for idx, line in enumerate(lines): if not line.is_all_templates(elements): continue # We can't assume they're all a single block. # So handle all blocks on the line. for block in line.iter_blocks(elements): # We already checked that it's all templates. segment = cast(MetaSegment, block.segments[0]) assert segment.is_type("placeholder", "template_loop") # If it's not got a block uuid, it's not a block, so it # should just be indented as usual. No need to revise. # e.g. comments or variables if segment.block_uuid: grouped[segment.block_uuid].append(idx) depths[segment.block_uuid].append(line.initial_indent_balance) reflow_logger.debug( " UUID: %s @ %s = %r", segment.block_uuid, idx, segment.pos_marker.source_str(), ) # Sort through the lines, so we do to *most* indented first. sorted_group_indices = sorted( grouped.keys(), key=lambda x: max(depths[x]), reverse=True ) reflow_logger.debug(" Sorted Group UUIDs: %s", sorted_group_indices) for group_idx, group_uuid in enumerate(sorted_group_indices): reflow_logger.debug(" Evaluating Group UUID: %s", group_uuid) group_lines = grouped[group_uuid] # Check for case 1. if len({lines[idx].initial_indent_balance for idx in group_lines}) == 1: reflow_logger.debug(" Case 1: All the same") continue # Check for case 2. # In this scenario, we only need to check the adjacent points. # If there's any wiggle room, we pick the lowest option. options: list[set[int]] = [] for idx in group_lines: line = lines[idx] steps: set[int] = {line.initial_indent_balance} # Run backward through the pre point. indent_balance = line.initial_indent_balance first_point_idx = line.indent_points[0].idx first_block = elements[first_point_idx + 1] assert first_block.segments first_segment = first_block.segments[0] if first_segment.is_type("template_loop"): # For template loops, don't count the line. They behave # strangely. continue for i in range(first_point_idx, 0, -1): _element = elements[i] if isinstance(_element, ReflowPoint): # If it's the one straight away, after a block_end or # block_mid, skip it. We know this because it will have # block_uuid. for indent_val in _element.get_indent_segment_vals( exclude_block_indents=True )[::-1]: # Minus because we're going backward. indent_balance -= indent_val reflow_logger.debug( " Backward look. Adding Step: %s", indent_balance, ) steps.add(indent_balance) # if it's anything other than a blank placeholder, break. # NOTE: We still need the forward version of this. elif not _element.segments[0].is_type("placeholder"): break elif cast(TemplateSegment, _element.segments[0]).block_type not in ( "block_start", "block_end", "skipped_source", "block_mid", ): # Recreating this condition is hard, but we shouldn't allow any # rendered content here. break # pragma: no cover # Run forward through the post point. indent_balance = line.initial_indent_balance last_point_idx = line.indent_points[-1].idx last_point = cast(ReflowPoint, elements[last_point_idx]) for indent_val in last_point.get_indent_segment_vals( exclude_block_indents=True ): # Positive because we're going forward. indent_balance += indent_val reflow_logger.debug( " Forward look. Adding Step: %s", indent_balance, ) steps.add(indent_balance) # NOTE: Edge case for consecutive blocks of the same type. # If we're next to another block which is "inner" (i.e.) has # already been handled. We can assume all options up to it's # new indent are open for use. _case_type = None if first_segment.is_type("placeholder"): _case_type = cast(TemplateSegment, first_segment).block_type if _case_type in ("block_start", "block_mid"): # Search forward until we actually find something rendered. # Indents can usually be shuffled a bit around unrendered # elements. # NOTE: We should only be counting non-template indents, i.e. # ones that don't have a block associated with them. # NOTE: We're starting with the current line. _forward_indent_balance = line.initial_indent_balance for elem in elements[line.indent_points[0].idx :]: if isinstance(elem, ReflowBlock): if not elem.is_all_unrendered(): break continue # Otherwise it's a point. for indent_val in elem.get_indent_segment_vals( exclude_block_indents=True ): _forward_indent_balance += indent_val reflow_logger.debug( " Precedes block. Adding Step: %s", _forward_indent_balance, ) steps.add(_forward_indent_balance) if _case_type in ("block_end", "block_mid"): # Is preceding _line_ AND element also a block? # i.e. nothing else between. if first_point_idx - 1 == lines[idx - 1].indent_points[0].idx + 1: seg = elements[first_point_idx - 1].segments[0] if seg.is_type("placeholder"): if cast(TemplateSegment, seg).block_type == "block_end": _inter_steps = list( range( line.initial_indent_balance, lines[idx - 1].initial_indent_balance, ) ) reflow_logger.debug( " Follows block. Adding Steps: %s", _inter_steps ) steps.update(_inter_steps) reflow_logger.debug( " Rendered Line %s (Source %s): Initial Balance: %s Options: %s", idx, first_block.segments[0].pos_marker.source_position()[0], lines[idx].initial_indent_balance, steps, ) options.append(steps) # We should also work out what all the indents are _between_ # these options and make sure we don't go above that. # Because there might be _outer_ loops, we look for spans # between blocks in this group which don't contain any blocks # from _outer_ loops. i.e. we can't just take all the lines from # first to last. last_group_line: Optional[int] = group_lines[0] # last = previous. net_balance = 0 balance_trough: Optional[int] = None temp_balance_trough: Optional[int] = None inner_lines = [] reflow_logger.debug(" Intermediate lines:") # NOTE: +1 on the last range to make sure we _do_ process the last one. for idx in range(group_lines[0] + 1, group_lines[-1] + 1): for grp in sorted_group_indices[group_idx + 1 :]: # found an "outer" group line, reset tracker. if idx in grouped[grp]: last_group_line = None net_balance = 0 temp_balance_trough = None # Unset the buffer break # Is it in this group? if idx in group_lines: # Stash the line indices of the inner lines. if last_group_line: _inner_lines = list(range(last_group_line + 1, idx)) reflow_logger.debug( " Extending Intermediates with rendered indices %s", _inner_lines, ) inner_lines.extend(_inner_lines) # if we have a temp balance - crystallise it if temp_balance_trough is not None: balance_trough = ( temp_balance_trough if balance_trough is None else min(balance_trough, temp_balance_trough) ) reflow_logger.debug( " + Save Trough: %s (min = %s)", temp_balance_trough, balance_trough, ) temp_balance_trough = None last_group_line = idx net_balance = 0 elif last_group_line: # It's not a group line, but we're still tracking. Update with impulses. is_subgroup_line = any( idx in grouped[grp] for grp in sorted_group_indices[:group_idx] ) for ip in lines[idx].indent_points[:-1]: # Don't count the trough on group lines we've already covered. if "placeholder" in elements[ip.idx + 1].class_types: _block_type = cast( TemplateSegment, elements[ip.idx + 1].segments[0] ).block_type if _block_type in ("block_end", "block_mid"): reflow_logger.debug( " Skipping trough before %r", _block_type ) continue if ip.indent_trough < 0 and not is_subgroup_line: # NOTE: We set it temporarily here, because if we're going # to pass an outer template loop then we should discard it. # i.e. only count intervals within inner loops. # Is there anything rendered between here and the next # group line? next_group_line = min(n for n in group_lines if n > idx) next_group_line_start_point = ( lines[next_group_line].indent_points[0].idx ) for i in range(ip.idx, next_group_line_start_point): if isinstance(elements[i], ReflowBlock): if not elements[i].is_all_unrendered(): break else: # no. skip this trough continue _this_through = net_balance + ip.indent_trough temp_balance_trough = ( _this_through if temp_balance_trough is None else min(temp_balance_trough, _this_through) ) reflow_logger.debug( " Stash Trough: %s (min = %s) @ %s", _this_through, temp_balance_trough, idx, ) # NOTE: We update net_balance _after_ the clause above. net_balance += ip.indent_impulse # Evaluate options. reflow_logger.debug(" Options: %s", options) overlap = set.intersection(*options) reflow_logger.debug(" Simple Overlap: %s", overlap) # Remove any options above the limit option. # We minus one from the limit, because if it comes into effect # we'll effectively remove the effects of the indents between the elements. # Is there a mutually agreeable option? reflow_logger.debug(" Balance Trough: %s", balance_trough) if not overlap or (balance_trough is not None and balance_trough <= 0): # Set the indent to the minimum of the existing ones. best_indent = min(lines[idx].initial_indent_balance for idx in group_lines) reflow_logger.debug( " Case 3: Best: %s. Inner Lines: %s", best_indent, inner_lines ) # Remove one indent from all intermediate lines. # This is because we're effectively saying that these # placeholders shouldn't impact the indentation within them. for idx in inner_lines: # MUTATION lines[idx].initial_indent_balance -= 1 else: if len(overlap) > 1: reflow_logger.debug( " Case 2 (precheck): Overlap: %s. Checking lines on the " "immediate inside to check nesting.", overlap, ) # We've got more than one option. To help narrow down, see whether # we we can net outside the lines immediately inside. check_lines = [group_lines[0] + 1, group_lines[-1] - 1] fallback = max(lines[idx].initial_indent_balance for idx in check_lines) for idx in check_lines: # NOTE: It's important here that we've already called # _revise_skipped_source_lines. We don't want to take # them into account here as that will throw us off. reflow_logger.debug( " Discarding %s.", lines[idx].initial_indent_balance, ) overlap.discard(lines[idx].initial_indent_balance) if not overlap: best_indent = fallback reflow_logger.debug( " Using fallback since all overlaps were discarded: %s.", fallback, ) else: best_indent = max(overlap) reflow_logger.debug( " Case 2: Best: %s, Overlap: %s", best_indent, overlap ) # Set all the lines to this indent for idx in group_lines: # MUTATION lines[idx].initial_indent_balance = best_indent # Finally, look for any of the lines which contain newlines # inside the placeholders. We use a slice to make sure # we're iterating through a copy so that we can safely # modify the underlying list. for idx, line in enumerate(lines[:]): # Get the first segment. first_seg = elements[line.indent_points[0].idx + 1].segments[0] src_str = first_seg.pos_marker.source_str() if src_str != first_seg.raw and "\n" in src_str: reflow_logger.debug( " Removing line %s from linting as placeholder contains newlines.", first_seg.pos_marker.working_line_no, ) lines.remove(line) def _revise_skipped_source_lines( lines: list[_IndentLine], elements: ReflowSequenceType, ) -> None: """Given an initial set of individual lines, revise any with skipped source. NOTE: This mutates the `lines` argument. In the cases of {% if ... %} statements, there can be strange effects if we try and lint both rendered and unrendered locations. In particular when there's one at the end of a loop. In all of these cases, if we find an unrendered {% if %} block, which is rendered elsewhere in the template we skip that line. """ reflow_logger.debug("# Revise skipped source lines.") if_locs = defaultdict(list) skipped_source_blocks = [] # Slice to avoid copying for idx, line in enumerate(lines[:]): has_skipped_source = False # Find lines which _start_ with a placeholder for idx, seg in enumerate(line.iter_block_segments(elements)): if not seg.is_type("placeholder"): break template_seg = cast(TemplateSegment, seg) # For now only deal with lines that that start with a block_start. if idx == 0: # If we start with anything else, ignore this line for now. if template_seg.block_type != "block_start": break template_loc = template_seg.pos_marker.templated_position() source_loc = template_seg.pos_marker.source_position() reflow_logger.debug( f" Found block start: {seg} {template_seg.source_str!r} " f"{template_loc} {source_loc}" ) if_locs[source_loc].append(template_loc) # Search forward, and see whether it's all skipped. # NOTE: Just on the same line for now. elif template_seg.block_type == "skipped_source": has_skipped_source = True elif template_seg.block_type == "block_end": # If we get here, we've only had placeholders on this line. # If it's also had skipped source. Make a note of the location # in both the source and template. if has_skipped_source: reflow_logger.debug(f" Skipped line found: {template_loc}") skipped_source_blocks.append((source_loc, template_loc)) ignore_locs = [] # Now iterate through each of the potentially skipped blocks, and work out # if they were otherwise rendered in a different location. for source_loc, template_loc in skipped_source_blocks: # Is there at least once location of this source which isn't also # skipped. for other_template_loc in if_locs[source_loc]: if (source_loc, other_template_loc) not in skipped_source_blocks: reflow_logger.debug( " Skipped element rendered elsewhere " f"{(source_loc, template_loc)} at {other_template_loc}" ) ignore_locs.append(template_loc) # Now go back through the lines, and remove any which we can ignore. # Slice to avoid copying for idx, line in enumerate(lines[:]): # Find lines which _start_ with a placeholder try: seg = next(line.iter_block_segments(elements)) except StopIteration: continue if not seg.is_type("placeholder"): continue template_seg = cast(TemplateSegment, seg) if template_seg.block_type != "block_start": continue template_loc = template_seg.pos_marker.templated_position() if template_loc in ignore_locs: reflow_logger.debug(" Removing line from buffer...") lines.remove(line) def _revise_comment_lines( lines: list[_IndentLine], elements: ReflowSequenceType, ignore_comment_lines: bool ) -> None: """Given an initial set of individual lines. Revise comment ones. NOTE: This mutates the `lines` argument. We do this to ensure that lines with comments are aligned to the following non-comment element. """ reflow_logger.debug("# Revise comment lines.") comment_line_buffer: list[int] = [] # Slice to avoid copying for idx, line in enumerate(lines[:]): if line.is_all_comments(elements): if ignore_comment_lines: # If we're removing comment lines, purge this line from the buffer. reflow_logger.debug("Ignoring comment line idx: %s", idx) lines.remove(line) else: comment_line_buffer.append(idx) else: # Not a comment only line, if there's a buffer anchor # to this one. for comment_line_idx in comment_line_buffer: reflow_logger.debug( " Comment Only Line: %s. Anchoring to %s", comment_line_idx, idx ) # Mutate reference lines to match this one. comment_line = lines[comment_line_idx] comment_line.initial_indent_balance = line.initial_indent_balance # Reset the buffer comment_line_buffer = [] # Any trailing comments should be anchored to the baseline. for comment_line_idx in comment_line_buffer: # Mutate reference lines to match this one. lines[comment_line_idx].initial_indent_balance = 0 reflow_logger.debug( " Comment Only Line: %s. Anchoring to baseline", comment_line_idx ) def construct_single_indent(indent_unit: str, tab_space_size: int) -> str: """Construct a single indent unit.""" if indent_unit == "tab": return "\t" elif indent_unit == "space": return " " * tab_space_size else: # pragma: no cover raise SQLFluffUserError( f"Expected indent_unit of 'tab' or 'space', instead got {indent_unit}" ) def _prune_untaken_indents( untaken_indents: tuple[int, ...], incoming_balance: int, indent_stats: IndentStats, has_newline: bool, ) -> tuple[int, ...]: """Update the tracking of untaken indents. This is an internal helper function for `_crawl_indent_points`. We use the `trough` of the given indent stats to remove any untaken indents which are now no longer relevant after balances are taken into account. """ # Strip any untaken indents above the new balance. # NOTE: We strip back to the trough, not just the end point # if the trough was lower than the impulse. ui = tuple( x for x in untaken_indents if x <= ( incoming_balance + indent_stats.impulse + indent_stats.trough if indent_stats.trough < indent_stats.impulse else incoming_balance + indent_stats.impulse ) ) # After stripping, we may have to add them back in. # NOTE: all the values in the indent_stats are relative to the incoming # indent, so we correct both of them here by using the incoming_balance. if indent_stats.impulse > indent_stats.trough and not has_newline: for i in range(indent_stats.trough, indent_stats.impulse): indent_val = incoming_balance + i + 1 if indent_val - incoming_balance not in indent_stats.implicit_indents: ui += (indent_val,) return ui def _update_crawl_balances( untaken_indents: tuple[int, ...], incoming_balance: int, indent_stats: IndentStats, has_newline: bool, ) -> tuple[int, tuple[int, ...]]: """Update the tracking of untaken indents and balances. This is an internal helper function for `_crawl_indent_points`. """ new_untaken_indents = _prune_untaken_indents( untaken_indents, incoming_balance, indent_stats, has_newline ) new_balance = incoming_balance + indent_stats.impulse return new_balance, new_untaken_indents def _crawl_indent_points( elements: ReflowSequenceType, allow_implicit_indents: bool = False ) -> Iterator[_IndentPoint]: """Crawl through a reflow sequence, mapping existing indents. This is where *most* of the logic for smart indentation happens. The values returned here have a large impact on exactly how indentation is treated. NOTE: If a line ends with a comment, indent impulses are pushed to the point _after_ the comment rather than before to aid with indentation. This saves searching for them later. TODO: Once this function *works*, there's definitely headroom for simplification and optimisation. We should do that. """ last_line_break_idx: int | None = None indent_balance = 0 untaken_indents: tuple[int, ...] = () cached_indent_stats: Optional[IndentStats] = None cached_point: Optional[_IndentPoint] = None for idx, elem in enumerate(elements): if isinstance(elem, ReflowPoint): # NOTE: The following line should never lead to an index error # because files should always have a trailing IndentBlock containing # an "end_of_file" marker, and so the final IndentPoint should always # have _something_ after it. indent_stats = IndentStats.from_combination( cached_indent_stats, elem.get_indent_impulse(), ) # If don't allow implicit indents we should remove them here. # Also, if we do - we should check for brackets. # NOTE: The reason we check following class_types is because # bracketed expressions behave a little differently and are an # exception to the normal implicit indent rules. For implicit # indents which precede bracketed expressions, the implicit indent # is treated as a normal indent. In this case the start_bracket # must be the start of the bracketed section which isn't closed # on the same line - if it _is_ closed then we keep the implicit # indents. if indent_stats.implicit_indents: unclosed_bracket = False if ( allow_implicit_indents and "start_bracket" in elements[idx + 1].class_types ): # Is it closed in the line? Iterate forward to find out. # get the stack depth next_elem = cast(ReflowBlock, elements[idx + 1]) depth = next_elem.depth_info.stack_depth for elem_j in elements[idx + 1 :]: if isinstance(elem_j, ReflowPoint): if elem_j.num_newlines() > 0: unclosed_bracket = True break elif ( "end_bracket" in elem_j.class_types and elem_j.depth_info.stack_depth == depth ): break else: # pragma: no cover unclosed_bracket = True if unclosed_bracket or not allow_implicit_indents: # Blank indent stats if not using them indent_stats = IndentStats( indent_stats.impulse, indent_stats.trough, () ) # Was there a cache? if cached_indent_stats: # If there was we can safely assume there is a cached point. assert cached_point # If there was, this is a signal that we need to yield two points. # The content of those points depends on the newlines that surround the # last segments (which will be comment block). # _leading_ comments (i.e. those preceded by a newline): Yield _before_ # _trailing_ comments (or rare "mid" comments): Yield _after_ # TODO: We might want to reconsider the treatment of comments in the # middle of lines eventually, but they're fairly unusual so not well # covered in tests as of writing. # We yield the first of those points here, and then manipulate the # indent_stats object to allow the following code to yield the other. # We can refer back to the cached point as a framework. In both # cases we use the combined impulse and trough, but we use the # current indent balance and untaken indents. if cached_point.is_line_break: # It's a leading comment. Yield all the info in that point. yield _IndentPoint( cached_point.idx, indent_stats.impulse, indent_stats.trough, indent_balance, cached_point.last_line_break_idx, True, untaken_indents, ) # Before zeroing, crystallise any effect on overall balances. indent_balance, untaken_indents = _update_crawl_balances( untaken_indents, indent_balance, indent_stats, True ) # Set indent stats to zero because we've already yielded. indent_stats = IndentStats(0, 0, indent_stats.implicit_indents) else: # It's a trailing (or mid) comment. Yield it in the next. yield _IndentPoint( cached_point.idx, 0, 0, indent_balance, cached_point.last_line_break_idx, False, untaken_indents, ) # No need to reset indent stats. It's already good. # Reset caches. cached_indent_stats = None has_newline = False cached_point = None # Do we have a newline? has_newline = has_untemplated_newline(elem) and idx != last_line_break_idx # Construct the point we may yield indent_point = _IndentPoint( idx, indent_stats.impulse, indent_stats.trough, indent_balance, last_line_break_idx, has_newline, untaken_indents, ) # Update the last newline index if this is a newline. # NOTE: We used the previous value in the construction of the # _IndentPoint above and we only reset after that construction. if has_newline: last_line_break_idx = idx # Is the next element a comment? If so - delay the decision until we've # got any indents from after the comment too. # # Also, some templaters might insert custom marker slices that are of zero # source string length as a way of marking locations in the middle of # templated output. These don't correspond to real source code, so we # can't meaningfully indent before them. We can safely handle them similar # to the comment case. if "comment" in elements[idx + 1].class_types or ( "placeholder" in elements[idx + 1].class_types and cast(TemplateSegment, elements[idx + 1].segments[0]).source_str == "" ): cached_indent_stats = indent_stats # Create parts of a point to use later. cached_point = indent_point # We loop around so that we don't do the untaken indent calcs yet. continue # Is it meaningful as an indent point? # i.e. Is it a line break? AND not a templated one. # NOTE: a point at idx zero is meaningful because it's like an indent. # NOTE: Last edge case. If we haven't yielded yet, but the # next element is the end of the file. Yield. elif ( has_newline or indent_stats.impulse or indent_stats.trough or idx == 0 or elements[idx + 1].segments[0].is_type("end_of_file") ): yield indent_point # Update balances indent_balance, untaken_indents = _update_crawl_balances( untaken_indents, indent_balance, indent_stats, has_newline ) def _map_line_buffers( elements: ReflowSequenceType, allow_implicit_indents: bool = False ) -> tuple[list[_IndentLine], list[int]]: """Map the existing elements, building up a list of _IndentLine. Returns: :obj:`tuple` of a :obj:`list` of :obj:`_IndentLine` and a :obj:`list` of :obj:`int`. The first is the main output and is designed to be used in assessing indents and their effect through a SQL file. The latter is a list of "imbalanced" indent locations, where the positive indent is untaken, but its corresponding negative indent *is* taken. """ # First build up the buffer of lines. lines = [] point_buffer = [] _previous_points = {} # Buffers to keep track of indents which are untaken on the way # up but taken on the way down. We track them explicitly so we # can force them later. #: dict of ints: maps indentation balance values to the last #: index location where they were seen. This is a working buffer #: and not directly returned by the function. untaken_indent_locs = {} #: list of ints: a list of element indices which contain untaken #: positive indents, that should be forced later because their #: corresponding negative indent _was_ taken. Several edge cases #: are excluded from this list and so not included. See code below. imbalanced_locs = [] for indent_point in _crawl_indent_points( elements, allow_implicit_indents=allow_implicit_indents ): # We evaluate all the points in a line at the same time, so # we first build up a buffer. point_buffer.append(indent_point) _previous_points[indent_point.idx] = indent_point if not indent_point.is_line_break: # If it's not a line break, we should still check whether it's # a positive untaken to keep track of them. # ...unless it's implicit. indent_stats = cast( ReflowPoint, elements[indent_point.idx] ).get_indent_impulse() if indent_point.indent_impulse > indent_point.indent_trough and not ( allow_implicit_indents and indent_stats.implicit_indents ): untaken_indent_locs[ indent_point.initial_indent_balance + indent_point.indent_impulse ] = indent_point.idx continue # If it *is* a line break, then store it. lines.append(_IndentLine.from_points(point_buffer)) # We should also evaluate whether this point inserts a newline at the close # of an indent which was untaken on the way up. # https://github.com/sqlfluff/sqlfluff/issues/4234 # Special case 1: # If we're at the end of the file we shouldn't interpret it as a line break # for problem indents, they're a bit of a special case. # Special case 2: # Bracketed expressions are a bit odd here. # e.g. # WHERE ( # foo = bar # ) # LIMIT 1 # # Technically there's an untaken indent before the opening bracket # but this layout is common practice so we're not going to force # one there even though there _is_ a line break after the closing # bracket. following_class_types = elements[indent_point.idx + 1].class_types if ( indent_point.indent_trough # End of file ends case. (Special case 1) and "end_of_file" not in following_class_types ): passing_indents = list( range( indent_point.initial_indent_balance, indent_point.initial_indent_balance + indent_point.indent_trough, -1, ) ) # There might be many indents at this point, but if any match, then # we should still force an indent # NOTE: We work _inward_ to check which have been taken. for i in reversed(passing_indents): # Was this outer one untaken? if i not in untaken_indent_locs: # No? Stop the loop. If we've a corresponding indent for # this dedent, we shouldn't use the same location to force # untaken indents at inner levels. break loc = untaken_indent_locs[i] # First check for bracket special case. It's less about whether # the section _ends_ with a lone bracket, and more about whether # the _starting point_ is a bracket which closes a line. If it # is, then skip this location. (Special case 2). # NOTE: We can safely "look ahead" here because we know all files # end with an IndentBlock, and we know here that `loc` refers to # an IndentPoint. if "start_bracket" in elements[loc + 1].class_types: continue # If the location was in the line we're just closing. That's # not a problem because it's an untaken indent which is closed # on the same line. if any(ip.idx == loc for ip in point_buffer): continue # If the only elements between current point and the end of the # reference line are comments, then don't trigger, it's a misplaced # indent. # First find the end of the reference line. for j in range(loc, indent_point.idx): _pt = _previous_points.get(j, None) if not _pt: continue if _pt.is_line_break: break assert _pt # Then check if all comments. if all( "comment" in elements[k].class_types for k in range(_pt.idx + 1, indent_point.idx, 2) ): # It is all comments. Ignore it. continue imbalanced_locs.append(loc) # Remove any which are now no longer relevant from the working buffer. for k in list(untaken_indent_locs.keys()): if k > indent_point.initial_indent_balance + indent_point.indent_trough: del untaken_indent_locs[k] # Reset the buffer point_buffer = [indent_point] # Handle potential final line if len(point_buffer) > 1: lines.append(_IndentLine.from_points(point_buffer)) return lines, imbalanced_locs def _deduce_line_current_indent( elements: ReflowSequenceType, last_line_break_idx: Optional[int] = None ) -> str: """Deduce the current indent string. This method accounts for both literal indents and indents consumed from the source as by potential templating tags. """ indent_seg = None if not elements[0].segments: return "" elif last_line_break_idx: indent_seg = cast( ReflowPoint, elements[last_line_break_idx] )._get_indent_segment() elif isinstance(elements[0], ReflowPoint) and elements[0].segments[ 0 ].pos_marker.working_loc == (1, 1): # No last_line_break_idx, but this is a point. It's the first line. # First check whether this is a first line with a leading # placeholder. if elements[0].segments[0].is_type("placeholder"): reflow_logger.debug(" Handling as initial leading placeholder") seg = cast(TemplateSegment, elements[0].segments[0]) # Is the placeholder a consumed whitespace? if seg.source_str.startswith((" ", "\t")): indent_seg = seg # Otherwise it's an initial leading literal whitespace. else: reflow_logger.debug(" Handling as initial leading whitespace") for indent_seg in elements[0].segments[::-1]: if indent_seg.is_type("whitespace") and not indent_seg.is_templated: break # Handle edge case of no whitespace, but with newline. if indent_seg and not indent_seg.is_type("whitespace"): indent_seg = None if not indent_seg: return "" # We have to check pos marker before checking is templated. # Insertions don't have pos_markers - so aren't templated, # but also don't support calling is_templated. if indent_seg.is_type("placeholder"): # It's a consumed indent. return cast(TemplateSegment, indent_seg).source_str.split("\n")[-1] or "" elif not indent_seg.pos_marker or not indent_seg.is_templated: # It's a literal assert "\n" not in indent_seg.raw, f"Found newline in indent: {indent_seg}" return indent_seg.raw else: # pragma: no cover # It's templated. This shouldn't happen. Segments returned by # _get_indent_segment, should be valid indents (i.e. whitespace # or placeholders for consumed whitespace). This is a bug. if indent_seg.pos_marker: reflow_logger.warning( "Segment position marker: %s: [SRC: %s, TMP:%s]", indent_seg.pos_marker, indent_seg.pos_marker.source_slice, indent_seg.pos_marker.templated_slice, ) raise NotImplementedError( "Unexpected templated indent. Report this as a bug on " f"GitHub. Segment: {indent_seg}\n" "https://github.com/sqlfluff/sqlfluff/issues/new/choose" ) def _lint_line_starting_indent( elements: ReflowSequenceType, indent_line: _IndentLine, single_indent: str, forced_indents: list[int], ) -> list[LintResult]: """Lint the indent at the start of a line. NOTE: This mutates `elements` to avoid lots of copying. """ indent_points = indent_line.indent_points # Set up the default anchor initial_point_idx = indent_points[0].idx anchor = {"before": elements[initial_point_idx + 1].segments[0]} # Find initial indent, and deduce appropriate string indent. current_indent = _deduce_line_current_indent( elements, indent_points[-1].last_line_break_idx ) desired_indent_units = indent_line.desired_indent_units(forced_indents) desired_starting_indent = desired_indent_units * single_indent initial_point = cast(ReflowPoint, elements[initial_point_idx]) if current_indent == desired_starting_indent: return [] if initial_point_idx > 0 and initial_point_idx < len(elements) - 1: # Edge case: Lone comments. Normally comments are anchored to the line # _after_ where they come. However, if the existing location _matches_ # the _preceding line_, then we will allow it. It's not the "expected" # location but it is allowable. if "comment" in elements[initial_point_idx + 1].class_types: last_indent = _deduce_line_current_indent( elements, indent_points[0].last_line_break_idx ) if len(current_indent) == len(last_indent): reflow_logger.debug(" Indent matches previous line. OK.") return [] # Edge case: Multiline comments. If the previous line was a multiline # comment and this line starts with a multiline comment, then we should # only lint the indent if it's _too small_. Otherwise we risk destroying # indentation which the logic here is not smart enough to handle. if ( "block_comment" in elements[initial_point_idx - 1].class_types and "block_comment" in elements[initial_point_idx + 1].class_types ): if len(current_indent) > len(desired_starting_indent): reflow_logger.debug(" Indent is bigger than required. OK.") return [] # NOTE: If the reindent code is flagging an indent change here that you # don't agree with for a line with templated elements, especially in a # loop, it's very likely that the fix shouldn't be here but much earlier # in the code as part of `_revise_templated_lines()`. reflow_logger.debug( " Correcting indent @ line %s. Expected: %r. Found %r", elements[initial_point_idx + 1].segments[0].pos_marker.working_line_no, desired_starting_indent, current_indent, ) # Initial point gets special handling if it has no newlines. if indent_points[0].idx == 0 and not indent_points[0].is_line_break: init_seg = elements[indent_points[0].idx].segments[0] if init_seg.is_type("placeholder"): init_seg = cast(TemplateSegment, init_seg) # If it's a placeholder initial indent, then modify the placeholder # to remove the indent from it. src_fix = SourceFix( "", source_slice=slice(0, len(current_indent) + 1), templated_slice=slice(0, 0), ) fixes = [ LintFix.replace( init_seg, [init_seg.edit(source_fixes=[src_fix], source_str="")], ) ] else: # Otherwise it's just initial whitespace. Remove it. fixes = [LintFix.delete(seg) for seg in initial_point.segments] new_results = [ LintResult( initial_point.segments[0], fixes, description="First line should not be indented.", source="reflow.indent.existing", ) ] new_point = ReflowPoint(()) # Placeholder indents also get special treatment else: new_results, new_point = initial_point.indent_to( desired_starting_indent, source="reflow.indent.existing", **anchor, # type: ignore ) elements[initial_point_idx] = new_point return new_results def _lint_line_untaken_positive_indents( elements: ReflowSequenceType, indent_line: _IndentLine, single_indent: str, imbalanced_indent_locs: list[int], ) -> tuple[list[LintResult], list[int]]: """Check for positive indents which should have been taken.""" # First check whether this line contains any of the untaken problem points. for ip in indent_line.indent_points: if ip.idx in imbalanced_indent_locs: # Force it at the relevant position. desired_indent = single_indent * ( ip.closing_indent_balance - len(ip.untaken_indents) ) reflow_logger.debug( " Detected imbalanced +ve break @ line %s. Indenting to %r", elements[ip.idx + 1].segments[0].pos_marker.working_line_no, desired_indent, ) target_point = cast(ReflowPoint, elements[ip.idx]) results, new_point = target_point.indent_to( desired_indent, before=elements[ip.idx + 1].segments[0], source="reflow.indent.imbalance", ) elements[ip.idx] = new_point # Keep track of the indent we forced, by returning it. return results, [ip.closing_indent_balance] # If we don't close the line higher there won't be any. starting_balance = indent_line.opening_balance() last_ip = indent_line.indent_points[-1] # Check whether it closes the opening indent. if last_ip.initial_indent_balance + last_ip.indent_trough <= starting_balance: return [], [] # It's not, we don't close out an opened indent. # NOTE: Because trailing comments should always shift their any # surrounding indentation effects to _after_ their position, we # should just be able to evaluate them safely from the end of the line. indent_points = indent_line.indent_points # Account for the closing trough. closing_trough = last_ip.initial_indent_balance + ( last_ip.indent_trough or last_ip.indent_impulse ) # Edge case: Adjust closing trough for trailing indents # after comments disrupting closing trough. _bal = 0 for elem in elements[last_ip.idx + 1 :]: if not isinstance(elem, ReflowPoint): if "comment" not in elem.class_types: break continue # Otherwise it's a point stats = elem.get_indent_impulse() # If it's positive, stop. We likely won't find enough negative to come. if stats.impulse > 0: # pragma: no cover break closing_trough = _bal + stats.trough _bal += stats.impulse # On the way up we're looking for whether the ending balance # was an untaken indent or not. If it *was* untaken, there's # a good chance that we *should* take it. # NOTE: an implicit indent would not force a newline # because it wouldn't be in the untaken_indents. It's # considered _taken_ even if not. if closing_trough not in indent_points[-1].untaken_indents: # If the closing point doesn't correspond to an untaken # indent within the line (i.e. it _was_ taken), then # there won't be an appropriate place to force an indent. return [], [] # The closing indent balance *does* correspond to an # untaken indent on this line. We *should* force a newline # at that position. for ip in indent_points: if ip.closing_indent_balance == closing_trough: target_point_idx = ip.idx desired_indent = single_indent * ( ip.closing_indent_balance - len(ip.untaken_indents) ) break else: # pragma: no cover raise NotImplementedError("We should always find the relevant point.") reflow_logger.debug( " Detected missing +ve line break @ line %s. Indenting to %r", elements[target_point_idx + 1].segments[0].pos_marker.working_line_no, desired_indent, ) target_point = cast(ReflowPoint, elements[target_point_idx]) results, new_point = target_point.indent_to( desired_indent, before=elements[target_point_idx + 1].segments[0], source="reflow.indent.positive", ) elements[target_point_idx] = new_point # Keep track of the indent we forced, by returning it. return results, [closing_trough] def _lint_line_untaken_negative_indents( elements: ReflowSequenceType, indent_line: _IndentLine, single_indent: str, forced_indents: list[int], ) -> list[LintResult]: """Check for negative indents which should have been taken.""" # If we don't close lower than we start, there won't be any. if indent_line.closing_balance() >= indent_line.opening_balance(): return [] results: list[LintResult] = [] # On the way down we're looking for indents which *were* taken on # the way up, but currently aren't on the way down. We slice so # that the _last_ point isn't evaluated, because that's fine. for ip in indent_line.indent_points[:-1]: # Is line break, or positive indent? if ip.is_line_break or ip.indent_impulse >= 0: continue # When using implicit indents, we may find untaken negatives which # aren't shallower than the line they're on. This is because they # were implicit on the way up and so not included in `untaken_indents`. # To catch them we also check that we're shallower than the start of # of the line. if ( ip.initial_indent_balance + ip.indent_trough >= indent_line.opening_balance() ): continue # It's negative, is it untaken? In the case of a multi-dedent # they must _all_ be untaken to take this route. covered_indents = set( range( ip.initial_indent_balance, ip.initial_indent_balance + ip.indent_trough, -1, ) ) untaken_indents = set(ip.untaken_indents).difference(forced_indents) if covered_indents.issubset(untaken_indents): # Yep, untaken. continue # Edge Case: Comments. Since introducing the code to push indent effects # to the point _after_ comments, we no longer need to detect an edge case # for them here. If we change that logic again in the future, so that # indent values are allowed before comments - that code should be # reintroduced here. # Edge Case: Semicolons. For now, semicolon placement is a little # more complicated than what we do here. For now we don't (by # default) introduce missing -ve indents before semicolons. # TODO: Review whether this is a good idea, or whether this should be # more configurable. # NOTE: This could potentially lead to a weird situation if two # statements are already on the same line. That's a bug to solve later. if elements[ip.idx + 1 :] and elements[ip.idx + 1].class_types.intersection( ("statement_terminator", "comma") ): reflow_logger.debug( " Detected missing -ve line break @ line %s, before " "semicolon or comma. Ignoring...", elements[ip.idx + 1].segments[0].pos_marker.working_line_no, ) continue # Edge case: template blocks. These sometimes sit in odd places # in the parse tree so don't force newlines before them if elements[ip.idx + 1 :] and "placeholder" in elements[ip.idx + 1].class_types: # are any of those placeholders blocks? if any( cast(TemplateSegment, seg).block_type.startswith("block") for seg in elements[ip.idx + 1].segments if seg.is_type("placeholder") ): reflow_logger.debug( " Detected missing -ve line break @ line %s, before " "block placeholder. Ignoring...", elements[ip.idx + 1].segments[0].pos_marker.working_line_no, ) continue # It's negative, not a line break and was taken on the way up. # This *should* be an indent! desired_indent = single_indent * ( ip.closing_indent_balance - len(ip.untaken_indents) + len(forced_indents) ) reflow_logger.debug( " Detected missing -ve line break @ line %s. Indenting to %r", elements[ip.idx + 1].segments[0].pos_marker.working_line_no, desired_indent, ) target_point = cast(ReflowPoint, elements[ip.idx]) new_results, new_point = target_point.indent_to( desired_indent, before=elements[ip.idx + 1].segments[0], source="reflow.indent.negative", ) elements[ip.idx] = new_point results += new_results return results def _lint_line_buffer_indents( elements: ReflowSequenceType, indent_line: _IndentLine, single_indent: str, forced_indents: list[int], imbalanced_indent_locs: list[int], ) -> list[LintResult]: """Evaluate a single set of indent points on one line. NOTE: This mutates the given `elements` and `forced_indents` input to avoid lots of copying. Order of operations: 1. Evaluate the starting indent for this line. 2. For points which aren't line breaks in the line, we evaluate them to see whether they *should* be. We separately address missing indents on the way *up* and then on the way *down*. - *Up* in this sense means where the indent balance goes up, but isn't closed again within the same line - e.g. :code:`SELECT a + (2 +` where the indent implied by the bracket isn't closed out before the end of the line. - *Down* in this sense means where we've dropped below the starting indent balance of the line - e.g. :code:`1 + 1) FROM foo` where the line starts within a bracket and then closes that *and* closes an apparent SELECT clause without a newline. This method returns fixes, including appropriate descriptions, to allow generation of LintResult objects directly from them. """ reflow_logger.info( # NOTE: We add a little extra ## here because it's effectively # the start of linting a single line and so the point to start # interpreting the any debug logging from. "## Evaluate Rendered Line #%s [source line #%s]. idx=%s:%s.", elements[indent_line.indent_points[0].idx + 1] .segments[0] .pos_marker.working_line_no, elements[indent_line.indent_points[0].idx + 1] .segments[0] .pos_marker.source_position()[0], indent_line.indent_points[0].idx, indent_line.indent_points[-1].idx, ) reflow_logger.debug( " Line Content: %s", [ repr(elem.raw) for elem in elements[ indent_line.indent_points[0].idx : indent_line.indent_points[-1].idx ] ], ) reflow_logger.debug(" Indent Line: %s", indent_line) reflow_logger.debug(" Forced Indents: %s", forced_indents) reflow_logger.debug(" Imbalanced Indent Locs: %s", imbalanced_indent_locs) results = [] # First, handle starting indent. results += _lint_line_starting_indent( elements, indent_line, single_indent, forced_indents ) # Second, handle potential missing positive indents. new_results, new_indents = _lint_line_untaken_positive_indents( elements, indent_line, single_indent, imbalanced_indent_locs ) # If we have any, bank them and return. We don't need to check for # negatives because we know we're on the way up. if new_results: results += new_results # Keep track of any indents we forced forced_indents.extend(new_indents) return results # Third, handle potential missing negative indents. results += _lint_line_untaken_negative_indents( elements, indent_line, single_indent, forced_indents ) # Lastly remove any forced indents above the closing balance. # Iterate through a slice so we're not editing the thing # that we're iterating through. for i in forced_indents[:]: if i > indent_line.closing_balance(): forced_indents.remove(i) return results def lint_indent_points( elements: ReflowSequenceType, single_indent: str, skip_indentation_in: frozenset[str] = frozenset(), allow_implicit_indents: bool = False, ignore_comment_lines: bool = False, ) -> tuple[ReflowSequenceType, list[LintResult]]: """Lint the indent points to check we have line breaks where we should. For linting indentation - we *first* need to make sure there are line breaks in all the places there should be. This takes an input set of indent points, and inserts additional line breaks in the necessary places to make sure indentation can be valid. Specifically we're addressing two things: 1. Any untaken indents. An untaken indent is only valid if it's corresponding dedent is on the same line. If that is not the case, there should be a line break at the location of the indent and dedent. 2. The indentation of lines. Given the line breaks are in the right place, is the line indented correctly. We do these at the same time, because we can't do the second without having line breaks in the right place, but if we're inserting a line break, we need to also know how much to indent by. """ # First map the line buffers. lines: list[_IndentLine] imbalanced_indent_locs: list[int] lines, imbalanced_indent_locs = _map_line_buffers( elements, allow_implicit_indents=allow_implicit_indents ) # Revise templated indents. # NOTE: There's a small dependency that we should make sure we remove # any "skipped source" lines before revising the templated lines in the # second step. That's because those "skipped source" lines can throw # off the detection algorithm. _revise_skipped_source_lines(lines, elements) _revise_templated_lines(lines, elements) # Revise comment indents _revise_comment_lines(lines, elements, ignore_comment_lines=ignore_comment_lines) # Skip elements we're configured to not touch (i.e. scripts) for line in lines[:]: for block in line.iter_blocks(elements): if any( skip_indentation_in.intersection(types) for types in block.depth_info.stack_class_types ): reflow_logger.debug( "Skipping line %s because it is within one of %s", line, skip_indentation_in, ) lines.remove(line) break reflow_logger.debug("# Evaluate lines for indentation.") # Last: handle each of the lines. results: list[LintResult] = [] # NOTE: forced_indents is mutated by _lint_line_buffer_indents # It's used to pass from one call to the next. forced_indents: list[int] = [] elem_buffer = elements.copy() # Make a working copy to mutate. for line in lines: line_results = _lint_line_buffer_indents( elem_buffer, line, single_indent, forced_indents, imbalanced_indent_locs ) if line_results: reflow_logger.info(" PROBLEMS:") for res in line_results: reflow_logger.info(" %s @ %s", res.source, res.anchor) reflow_logger.info(" %s", res.description) results += line_results return elem_buffer, results def _source_char_len(elements: ReflowSequenceType) -> int: """Calculate length in the source file. NOTE: This relies heavily on the sequence already being split appropriately. It will raise errors if not. TODO: There's a good chance that this might not play well with other fixes. If we find segments without positions then it will probably error. Those will need ironing out. TODO: This probably needs more tests. It's already the source of quite a few fiddly sections. """ char_len = 0 last_source_slice: Optional[slice] = None for seg in chain.from_iterable(elem.segments for elem in elements): # Indent tokens occasionally have strange position markers. # They also don't have length so skip them. # TODO: This is actually caused by bugs and inconsistencies # in how the source_slice is generated for the position markers # of indent and dedent tokens. That's a job for another day # however. if seg.is_type("indent"): continue # Get the source position. If there is no source position then it's # a recent edit or modification. We shouldn't evaluate it until it's # been positioned. Without a source marker we don't know how to treat # it. if not seg.pos_marker: # pragma: no cover break source_slice = seg.pos_marker.source_slice # Is there a newline in the source string? source_str = seg.pos_marker.source_str() if "\n" in source_str: # There is. Stop here. It's probably a complicated # jinja tag, so it's safer to stop here. # TODO: In future, we should probably be a little # smarter about this, but for now this is ok. Without # an algorithm for layout out code _within_ jinja tags # we won't be able to suggest appropriate fixes. char_len += source_str.index("\n") break slice_len = slice_length(source_slice) # Only update the length if it's a new slice. if source_slice != last_source_slice: # If it's got size in the template but not in the source, it's # probably an insertion. if seg.raw and not slice_len: char_len += len(seg.raw) # NOTE: Don't update the last_source_slice. elif not slice_len: # If it's not got a raw and no length, it's # irrelevant. Ignore it. It's probably a meta. continue # Otherwise if we're literal, use the raw length # because it might be an edit. elif seg.pos_marker.is_literal(): char_len += len(seg.raw) last_source_slice = source_slice # Otherwise assume it's templated code. else: char_len += slice_length(source_slice) last_source_slice = source_slice return char_len def _rebreak_priorities(spans: list[_RebreakSpan]) -> dict[int, int]: """Process rebreak spans into opportunities to split lines. The index to insert a potential indent at depends on the line_position of the span. Infer that here and store the indices in the elements. """ rebreak_priority = {} for span in spans: if span.line_position == "leading": rebreak_indices = [span.start_idx - 1] elif span.line_position == "trailing": rebreak_indices = [span.end_idx + 1] elif span.line_position == "alone": rebreak_indices = [span.start_idx - 1, span.end_idx + 1] else: # pragma: no cover raise NotImplementedError( "Unexpected line position: %s", span.line_position ) # NOTE: Operator precedence here is hard coded. It could be # moved to configuration in the layout section in the future. # Operator precedence is fairly consistent between dialects # so for now it feels ok that it's coded here - it also wouldn't # be a breaking change at that point so no pressure to release # it early. span_raw = span.target.raw_upper priority = 6 # Default to 6 for now i.e. the same as '+' # Override priority for specific precedence. if span_raw == ",": priority = 1 elif span.target.is_type("assignment_operator"): # This one is a little rarer so not covered in tests yet. # Logic is the same as others though. priority = 2 # pragma: no cover elif span_raw == "OR": priority = 3 elif span_raw == "AND": priority = 4 elif span.target.is_type("comparison_operator"): priority = 5 elif span_raw in ("*", "/", "%"): priority = 7 for rebreak_idx in rebreak_indices: rebreak_priority[rebreak_idx] = priority return rebreak_priority MatchedIndentsType = DefaultDict[float, list[int]] def _increment_balance( input_balance: int, indent_stats: IndentStats, elem_idx: int, ) -> tuple[int, MatchedIndentsType]: """Logic for stepping through _match_indents. This is the part of that logic which is potentially fragile so is separated here into a more isolated function for better testing. It's very easy to get wrong and necessary so we don't mistake empty elements, but potentially fragile nonetheless. Returns: A tuple where the first element is the resulting balance and the second is a :obj:`defaultdict` of the new elements to add to `matched_indents`. Positive indent example: >>> _increment_balance(0, IndentStats(1, 0), 7) (1, defaultdict(, {1.0: [7]})) Negative indent example: >>> _increment_balance(3, IndentStats(-1, -1), 11) (2, defaultdict(, {3.0: [11]})) Double negative indent example: >>> _increment_balance(3, IndentStats(-2, -2), 16) (1, defaultdict(, {3.0: [16], 2.0: [16]})) Dip indent example: >>> _increment_balance(3, IndentStats(0, -1), 21) (3, defaultdict(, {3.0: [21]})) """ balance = input_balance matched_indents: MatchedIndentsType = defaultdict(list) if indent_stats.trough < 0: # NOTE: for negative, *trough* counts. # in case of more than one indent we loop and apply to all. for b in range(0, indent_stats.trough, -1): matched_indents[(balance + b) * 1.0].append(elem_idx) # NOTE: We carry forward the impulse, not the trough. # This is important for dedent+indent pairs. balance += indent_stats.impulse elif indent_stats.impulse > 0: # NOTE: for positive, *impulse* counts. # in case of more than one indent we loop and apply to all. for b in range(0, indent_stats.impulse): matched_indents[(balance + b + 1) * 1.0].append(elem_idx) balance += indent_stats.impulse return balance, matched_indents def _match_indents( line_elements: ReflowSequenceType, rebreak_priorities: dict[int, int], newline_idx: int, allow_implicit_indents: bool = False, ) -> MatchedIndentsType: """Identify indent points, taking into account rebreak_priorities. Expect fractional keys, because of the half values for rebreak points. """ balance = 0 matched_indents: MatchedIndentsType = defaultdict(list) implicit_indents: dict[int, tuple[int, ...]] = {} for idx, e in enumerate(line_elements): # We only care about points, because only they contain indents. if not isinstance(e, ReflowPoint): continue # As usual, indents are referred to by their "uphill" side # so what number we store the point against depends on whether # it's positive or negative. # NOTE: Here we don't actually pass in the forward types because # we don't need them for the output. It doesn't make a difference. indent_stats = e.get_indent_impulse() e_idx = newline_idx - len(line_elements) + idx + 1 # Save any implicit indents. if indent_stats.implicit_indents: implicit_indents[e_idx] = indent_stats.implicit_indents balance, nmi = _increment_balance(balance, indent_stats, e_idx) # Incorporate nmi into matched_indents for b, indices in nmi.items(): matched_indents[b].extend(indices) # Something can be both an indent point AND a rebreak point. if idx in rebreak_priorities: # For potential rebreak options (i.e. ones without an indent) # we add 0.5 so that they sit *between* the varying indent # options. that means we split them before any of their # content, but don't necessarily split them when their # container is split. # Also to spread out the breaks within an indent, we further # add hints to distinguish between them. This is where operator # precedence (as defined above) actually comes into effect. priority = rebreak_priorities[idx] # Assume `priority` in range 0 - 50. So / 100 to add to 0.5. matched_indents[balance + 0.5 + (priority / 100)].append(e_idx) else: continue # Before working out the lowest option, we purge any which contain # ONLY the final point. That's because adding indents there won't # actually help the line length. There's *already* a newline there. for indent_level in list(matched_indents.keys()): if matched_indents[indent_level] == [newline_idx]: matched_indents.pop(indent_level) reflow_logger.debug( " purging balance of %s, it references only the final element.", indent_level, ) # ADDITIONALLY - if implicit indents are allowed we should # only use them if they match another untaken point (which isn't # implicit, or the end of the line). # NOTE: This logic might be best suited to be sited elsewhere # when (and if) we introduce smarter choices on where to add # indents. if allow_implicit_indents: for indent_level in list(matched_indents.keys()): major_points = set(matched_indents[indent_level]).difference( [newline_idx], implicit_indents.keys() ) if not major_points: matched_indents.pop(indent_level) reflow_logger.debug( " purging balance of %s, it references implicit indents " "or the final indent.", indent_level, ) return matched_indents def _fix_long_line_with_comment( line_buffer: ReflowSequenceType, elements: ReflowSequenceType, current_indent: str, line_length_limit: int, last_indent_idx: Optional[int], trailing_comments: str = "before", ) -> tuple[ReflowSequenceType, list[LintFix]]: """Fix long line by moving trailing comments if possible. This method (unlike the ones for normal lines), just returns a new `elements` argument rather than mutating it. """ # If the comment contains a noqa, don't fix it. It's unsafe. if "noqa" in line_buffer[-1].segments[-1].raw: reflow_logger.debug(" Unfixable because noqa unsafe to move.") return elements, [] # If the comment is longer than the limit _anyway_, don't move # it. It will still be too long. if len(line_buffer[-1].segments[-1].raw) + len(current_indent) > line_length_limit: reflow_logger.debug(" Unfixable because comment too long anyway.") return elements, [] comment_seg = line_buffer[-1].segments[-1] first_seg = line_buffer[0].segments[0] last_elem_idx = elements.index(line_buffer[-1]) assert trailing_comments in ( "after", "before", ), f"Unexpected value for `trailing_comments`: {trailing_comments!r}" # The simpler case if if we're moving the comment to the line # _after_. In that case we just coerce the point before it to # be an indent. if trailing_comments == "after": anchor_point = cast(ReflowPoint, line_buffer[-2]) results, new_point = anchor_point.indent_to(current_indent, before=comment_seg) elements = ( elements[: last_elem_idx - 1] + [new_point] + elements[last_elem_idx:] ) return elements, fixes_from_results(results) # Otherwise we're moving it up and _before_ the line, which is # a little more involved (but also the default). fixes = [ # Remove the comment from it's current position, and any # whitespace in the previous point. LintFix.delete(comment_seg), *[ LintFix.delete(ws) for ws in line_buffer[-2].segments if ws.is_type("whitespace") ], ] # Are we at the start of the file? If so, there's no # indent, and also no previous segments to deal with. if last_indent_idx is None: new_point = ReflowPoint((NewlineSegment(),)) prev_elems = [] anchor = first_seg else: new_segments: tuple[RawSegment, ...] = (NewlineSegment(),) if current_indent: new_segments += (WhitespaceSegment(current_indent),) new_point = ReflowPoint(new_segments) prev_elems = elements[: last_indent_idx + 1] anchor = elements[last_indent_idx + 1].segments[0] fixes.append( # NOTE: This looks a little convoluted, but we create # *before* a block here rather than *after* a point, # because the point may have been modified already by # reflow code and may not be a reliable anchor. LintFix.create_before( anchor, [ comment_seg, *new_point.segments, ], ) ) elements = ( prev_elems + [ line_buffer[-1], new_point, ] + line_buffer[:-2] + elements[last_elem_idx + 1 :] ) return elements, fixes def _fix_long_line_with_fractional_targets( elements: ReflowSequenceType, target_breaks: list[int], desired_indent: str ) -> list[LintResult]: """Work out fixes for splitting a long line at locations like operators. NOTE: This mutates `elements` to avoid copying. This is a helper function within .lint_line_length(). """ line_results = [] for e_idx in target_breaks: e = cast(ReflowPoint, elements[e_idx]) new_results, new_point = e.indent_to( desired_indent, after=elements[e_idx - 1].segments[-1], before=elements[e_idx + 1].segments[0], ) # NOTE: Mutation of elements. elements[e_idx] = new_point line_results += new_results return line_results def _fix_long_line_with_integer_targets( elements: ReflowSequenceType, target_breaks: list[int], line_length_limit: int, inner_indent: str, outer_indent: str, ) -> list[LintResult]: """Work out fixes for splitting a long line at locations like indents. NOTE: This mutates `elements` to avoid copying. This is a helper function within .lint_line_length(). """ line_results = [] # If we can get to the uphill indent of later break, and still be within # the line limit, then we can skip everything before it. purge_before = 0 for e_idx in target_breaks: # Is the following block already past the limit? # NOTE: We use the block because we know it will have segments. if not elements[e_idx + 1].segments[0].pos_marker: # If it doesn't have position - we should just bow out # now. It's too complicated. break # pragma: no cover if ( elements[e_idx + 1].segments[0].pos_marker.working_line_pos > line_length_limit ): # If we're past the line length limit, stop looking. break e = cast(ReflowPoint, elements[e_idx]) if e.get_indent_impulse().trough < 0: # It's negative. Skip onward. continue # If we get this far, then it's positive, but still within # the line limit. We can purge any pairs before this. purge_before = e_idx reflow_logger.debug(" ...breaks before %s unnecessary.", purge_before) # Only keep indices which are after the critical point. target_breaks = [e_idx for e_idx in target_breaks if e_idx >= purge_before] reflow_logger.debug(" Remaining breaks: %s.", target_breaks) for e_idx in target_breaks: e = cast(ReflowPoint, elements[e_idx]) indent_stats = e.get_indent_impulse() # NOTE: We check against the _impulse_ here rather than the # _trough_ because if we're about to step back up again then # it should still be indented. if indent_stats.impulse < 0: new_indent = outer_indent # NOTE: If we're about to insert a dedent before a # comma or semicolon ... don't. They are a bit special # in being allowed to trail. if elements[e_idx + 1].class_types.intersection( ("statement_terminator", "comma") ): reflow_logger.debug(" Skipping dedent before comma or semicolon.") # We break rather than continue because this is # necessarily a step back down. break else: new_indent = inner_indent new_results, new_point = e.indent_to( new_indent, after=elements[e_idx - 1].segments[-1], before=elements[e_idx + 1].segments[0], ) # NOTE: Mutation of elements. elements[e_idx] = new_point line_results += new_results # If the balance is *also* negative, then we should also stop. # We've indented a whole section - that's enough for now. # We've already skipped over any unnecessary sections, and they shouldn't # be reassessed on the next pass. If there are later sections which *also* # need to be reindented, then we'll catch them when we come back around. if indent_stats.trough < 0: reflow_logger.debug(" Stopping as we're back down.") break return line_results def lint_line_length( elements: ReflowSequenceType, root_segment: BaseSegment, single_indent: str, line_length_limit: int, allow_implicit_indents: bool = False, trailing_comments: str = "before", ) -> tuple[ReflowSequenceType, list[LintResult]]: """Lint the sequence to lines over the configured length. NOTE: This assumes that `lint_indent_points` has already been run. The method won't necessarily *fail* but it does assume that the current indent is correct and that indents have already been inserted where they're missing. """ # First check whether we should even be running this check. if line_length_limit <= 0: reflow_logger.debug("# Line length check disabled.") return elements, [] reflow_logger.debug("# Evaluate lines for length.") # Make a working copy to mutate. elem_buffer: ReflowSequenceType = elements.copy() line_buffer: ReflowSequenceType = [] results: list[LintResult] = [] last_indent_idx: int | None = None for i, elem in enumerate(elem_buffer): # Are there newlines in the element? # If not, add it to the buffer and wait to evaluate the line. # If yes, it's time to evaluate the line. if isinstance(elem, ReflowPoint) and ( # Is it the end of the file? # NOTE: Here, we're actually looking to see whether we're # currently on the _point before the end of the file_ rather # than actually on the final block. This is important because # the following code assumes we're on a point and not a block. # We're safe from indexing errors if we're on a point, because # we know there's always a trailing block. "end_of_file" in elem_buffer[i + 1].class_types # Or is there a newline? or has_untemplated_newline(elem) ): # In either case we want to process this, so carry on. pass else: # Otherwise build up the buffer and loop around again. line_buffer.append(elem) continue # If we don't have a buffer yet, also carry on. Nothing to lint. if not line_buffer: continue # Evaluate a line # Get the current indent. if last_indent_idx is not None: current_indent = _deduce_line_current_indent(elem_buffer, last_indent_idx) else: current_indent = "" # Get the length of all the elements on the line (other than the indent). # NOTE: This is the length in the _source_, because that's the line # length that the reader is actually looking at. char_len = _source_char_len(line_buffer) # Is the line over the limit length? line_len = len(current_indent) + char_len # NOTE: We should be able to rely on the first elements of the line having # a non-zero number of segments. If this isn't the case we may need to add # a clause to handle that scenario here. assert line_buffer[0].segments first_seg = line_buffer[0].segments[0] line_no = first_seg.pos_marker.working_line_no if line_len <= line_length_limit: reflow_logger.info( " Line #%s. Length %s <= %s. OK.", line_no, line_len, line_length_limit, ) else: reflow_logger.info( " Line #%s. Length %s > %s. PROBLEM.", line_no, line_len, line_length_limit, ) # Potential places to shorten the line are either indent locations # or segments with a defined line position (like operators). # NOTE: We make a buffer including the closing point, because we're # looking for pairs of indents and dedents. The closing dedent for one # of those pairs might be in the closing point so if we don't have it # then we'll miss any locations which have their closing dedent at # the end of the line. line_elements = line_buffer + [elem] # Type hints fixes: list[LintFix] # Identify rebreak spans first so we can work out their indentation # in the next section. # NOTE: In identifying spans, we give the method a little more than # the line, so that it can correctly identify the ends of things # accurately. It's safe to go to i+1 because there is always an # end_of_file marker at the end which we could span into. spans = identify_rebreak_spans( line_elements + [elements[i + 1]], root_segment ) reflow_logger.debug(" spans: %s", spans) rebreak_priorities = _rebreak_priorities(spans) reflow_logger.debug(" rebreak_priorities: %s", rebreak_priorities) # Identify indent points second, taking into # account rebreak_priorities. matched_indents = _match_indents( line_elements, rebreak_priorities, i, allow_implicit_indents=allow_implicit_indents, ) reflow_logger.debug(" matched_indents: %s", matched_indents) # If we don't have any matched_indents, we don't have any options. # This could be for things like comment lines. desc = f"Line is too long ({line_len} > {line_length_limit})." # Easiest option are lines ending with comments, but that aren't *all* # comments and the comment itself is shorter than the limit. # The reason for that last clause is that if the comment (plus an indent) # is already longer than the limit, then there's no point just putting it # on a new line - it will still fail - so it doesn't actually fix the issue. # Deal with them first. if ( len(line_buffer) > 1 # We can only fix _inline_ comments in this way. Others should # just be flagged as issues. and line_buffer[-1].segments[-1].is_type("inline_comment") ): reflow_logger.debug(" Handling as inline comment line.") elem_buffer, fixes = _fix_long_line_with_comment( line_buffer, elem_buffer, current_indent, line_length_limit, last_indent_idx, trailing_comments=trailing_comments, ) # Then check for cases where we have no other options. elif not matched_indents: # NOTE: In this case we have no options for shortening the line. # We'll still report a linting issue - but no fixes are provided. reflow_logger.debug(" Handling as unfixable line.") fixes = [] # Lastly deal with the "normal" case. else: # For now, the algorithm we apply isn't particularly elegant # and just finds the "outermost" opportunity to add additional # line breaks and adds them. # TODO: Make this more elegant later. The two obvious directions # would be to potentially add a) line breaks at multiple levels # in a single pass and b) to selectively skip levels if they're # "trivial", or if there would be a more suitable inner indent # to add first (e.g. the case of "(((((((a)))))))"). reflow_logger.debug(" Handling as normal line.") # NOTE: Double indents (or more likely dedents) will be # potentially in *multiple* sets - don't double count them # if we start doing something more clever. target_balance = min(matched_indents.keys()) desired_indent = current_indent if target_balance >= 1: desired_indent += single_indent target_breaks = matched_indents[target_balance] reflow_logger.debug( " Targeting balance of %s, indent: %r for %s", target_balance, desired_indent, target_breaks, ) # Is one of the locations the final element? If so remove it. # There's already a line break there. if i in target_breaks: target_breaks.remove(i) # Is it an "integer" indent or a fractional indent? # Integer indents (i.e. 1.0, 2.0, ...) are based on Indent and # Dedent tokens. Fractional indents (i.e. 1.5, 1.52, ...) are # based more on rebreak spans (e.g. around commas and operators). # The latter is simpler in that it doesn't change the indents, # just adds line breaks. The former is more complicated. # NOTE: Both of these methods mutate the `elem_buffer`. if target_balance % 1 == 0: line_results = _fix_long_line_with_integer_targets( elem_buffer, target_breaks, line_length_limit, desired_indent, current_indent, ) else: line_results = _fix_long_line_with_fractional_targets( elem_buffer, target_breaks, desired_indent ) # Consolidate all the results for the line into one. fixes = fixes_from_results(line_results) results.append( LintResult( # First segment on the line is the result anchor. first_seg, fixes=fixes, description=desc, source="reflow.long_line", ) ) # Regardless of whether the line was good or not, clear # the buffers ready for the next line. line_buffer = [] last_indent_idx = i return elem_buffer, results sqlfluff-3.4.2/src/sqlfluff/utils/reflow/respace.py000066400000000000000000000574441503426445100224270ustar00rootroot00000000000000"""Static methods to support ReflowPoint.respace_point().""" import logging from collections import defaultdict from typing import TYPE_CHECKING, Optional, cast from sqlfluff.core.errors import SQLFluffUserError from sqlfluff.core.parser import ( BaseSegment, PositionMarker, RawSegment, WhitespaceSegment, ) from sqlfluff.core.rules import LintFix, LintResult from sqlfluff.utils.reflow.helpers import pretty_segment_name if TYPE_CHECKING: # pragma: no cover from sqlfluff.utils.reflow.elements import ReflowBlock # We're in the utils module, but users will expect reflow # logs to appear in the context of rules. Hence it's a subset # of the rules logger. reflow_logger = logging.getLogger("sqlfluff.rules.reflow") def _unpack_constraint(constraint: str, strip_newlines: bool) -> tuple[str, bool]: """Unpack a spacing constraint. Used as a helper function in `determine_constraints`. """ # Check for deprecated options. if constraint == "inline": # pragma: no cover reflow_logger.warning( "Found 'inline' specified as a 'spacing_within' constraint. " "This setting is deprecated and has been replaced by the more " "explicit 'touch:inline'. Upgrade your configuration to " "remove this warning." ) constraint = "touch:inline" # Unless align, split. if constraint.startswith("align"): modifier = "" else: constraint, _, modifier = constraint.partition(":") if not modifier: pass elif modifier == "inline": strip_newlines = True else: # pragma: no cover raise SQLFluffUserError(f"Unexpected constraint modifier: {constraint!r}") return constraint, strip_newlines def determine_constraints( prev_block: Optional["ReflowBlock"], next_block: Optional["ReflowBlock"], strip_newlines: bool = False, ) -> tuple[str, str, bool]: """Given the surrounding blocks, determine appropriate constraints.""" # Start with the defaults. pre_constraint, strip_newlines = _unpack_constraint( prev_block.spacing_after if prev_block else "single", strip_newlines ) post_constraint, strip_newlines = _unpack_constraint( next_block.spacing_before if next_block else "single", strip_newlines ) # Work out the common parent segment and depth within_spacing = "" if prev_block and next_block: common = prev_block.depth_info.common_with(next_block.depth_info) # Just check the most immediate parent for now for speed. # TODO: Review whether just checking the parent is enough. # NOTE: spacing configs will be available on both sides if they're common # so it doesn't matter whether we get it from prev_block or next_block. idx = prev_block.depth_info.stack_hashes.index(common[-1]) within_constraint = prev_block.stack_spacing_configs.get(common[-1], None) if within_constraint: within_spacing, strip_newlines = _unpack_constraint( within_constraint, strip_newlines ) # Prohibit stripping newlines after comment segments if any(seg.is_type("comment") for seg in prev_block.segments): strip_newlines = False # If segments are expected to be touch within. Then modify # constraints accordingly. if within_spacing == "touch": # NOTE: We don't override if it's already "any" if pre_constraint != "any": pre_constraint = "touch" if post_constraint != "any": post_constraint = "touch" elif within_spacing == "any": pre_constraint = "any" post_constraint = "any" elif within_spacing == "single": pass elif within_spacing: # pragma: no cover assert prev_block raise SQLFluffUserError( f"Unexpected within constraint: {within_constraint!r} for " f"{prev_block.depth_info.stack_class_types[idx]}" ) return pre_constraint, post_constraint, strip_newlines def process_spacing( segment_buffer: list[RawSegment], strip_newlines: bool = False ) -> tuple[list[RawSegment], Optional[RawSegment], list[LintResult]]: """Given the existing spacing, extract information and do basic pruning.""" removal_buffer: list[RawSegment] = [] result_buffer: list[LintResult] = [] last_whitespace: list[RawSegment] = [] # Loop through the existing segments looking for spacing. for seg in segment_buffer: # If it's whitespace, store it. if seg.is_type("whitespace"): last_whitespace.append(seg) # If it's a newline, react accordingly. # NOTE: This should only trigger on literal newlines. elif seg.is_type("newline", "end_of_file"): if seg.pos_marker and not seg.pos_marker.is_literal(): last_whitespace = [] reflow_logger.debug(" Skipping templated newline: %s", seg) continue # Are we stripping newlines? if strip_newlines and seg.is_type("newline"): reflow_logger.debug(" Stripping newline: %s", seg) removal_buffer.append(seg) result_buffer.append( LintResult( seg, [LintFix.delete(seg)], description="Unexpected line break." ) ) # Carry on as though it wasn't here. continue # Check if we've just passed whitespace. If we have, remove it # as trailing whitespace, both from the buffer and create a fix. if last_whitespace: reflow_logger.debug(" Removing trailing whitespace.") for ws in last_whitespace: removal_buffer.append(ws) result_buffer.append( LintResult( ws, [LintFix.delete(ws)], description="Unnecessary trailing whitespace.", ) ) # Regardless, unset last_whitespace. # We either just deleted it, or it's not relevant for any future # segments. last_whitespace = [] if len(last_whitespace) >= 2: reflow_logger.debug(" Removing adjoining whitespace.") # If we find multiple sequential whitespaces, it's the sign # that we've removed something. Only the first one should be # a valid indent (or the one we consider for constraints). # Remove all the following ones. for ws in last_whitespace[1:]: removal_buffer.append(ws) result_buffer.append( LintResult( seg, [LintFix.delete(seg)], description="Removing duplicate whitespace.", ) ) # Turn the removal buffer updated segment buffer, last whitespace # and associated fixes. return ( [s for s in segment_buffer if s not in removal_buffer], # We should have removed all other whitespace by now. last_whitespace[0] if last_whitespace else None, result_buffer, ) def _determine_aligned_inline_spacing( root_segment: BaseSegment, whitespace_seg: RawSegment, next_seg: RawSegment, next_pos: PositionMarker, segment_type: str, align_within: Optional[str], align_scope: Optional[str], ) -> str: """Work out spacing for instance of an `align` constraint.""" # Find the level of segment that we're aligning. # NOTE: Reverse slice parent_segment = None # Edge case: if next_seg has no position, we should use the position # of the whitespace for searching. if align_within: for ps in root_segment.path_to( next_seg if next_seg.pos_marker else whitespace_seg )[::-1]: if ps.segment.is_type(align_within): parent_segment = ps.segment if align_scope and ps.segment.is_type(align_scope): break if not parent_segment: reflow_logger.debug(" No Parent found for alignment case. Treat as single.") return " " # We've got a parent. Find some siblings. reflow_logger.debug(" Determining alignment within: %s", parent_segment) siblings = [] for sibling in parent_segment.recursive_crawl(segment_type): # Purge any siblings with a boundary between them if not align_scope or not any( ps.segment.is_type(align_scope) for ps in parent_segment.path_to(sibling) ): siblings.append(sibling) else: reflow_logger.debug( " Purging a sibling because they're blocked by a boundary: %s", sibling, ) # If the segment we're aligning, has position. Use that position. # If it doesn't, then use the provided one. We can't do sibling analysis without it. if next_seg.pos_marker: next_pos = next_seg.pos_marker # Purge any siblings which are either on the same line or on another line and # have another index siblings_by_line: dict[int, list[BaseSegment]] = defaultdict(list) for sibling in siblings: _pos = sibling.pos_marker assert _pos siblings_by_line[_pos.working_line_no].append(sibling) # Sort all segments by position to easily access index information for line_siblings in siblings_by_line.values(): line_siblings.sort( key=lambda s: cast(PositionMarker, s.pos_marker).working_line_pos ) target_index = next( idx for idx, segment in enumerate(siblings_by_line[next_pos.working_line_no]) if ( cast(PositionMarker, segment.pos_marker).working_line_pos == next_pos.working_line_pos ) ) # Now that we know the target index, we can extract the relevant segment from # all lines siblings = [ segment for segments in siblings_by_line.values() for segment in ( [segments[target_index]] if target_index < len(segments) else [] ) ] # If there's only one sibling, we have nothing to compare to. Default to a single # space. if len(siblings) <= 1: desired_space = " " reflow_logger.debug( " desired_space: %r (based on no other siblings)", desired_space, ) return desired_space # Work out the current spacing before each. last_code: Optional[RawSegment] = None max_desired_line_pos = 0 for seg in parent_segment.raw_segments: for sibling in siblings: # NOTE: We're asserting that there must have been # a last_code. Otherwise this won't work. if ( seg.pos_marker and sibling.pos_marker and seg.pos_marker.working_loc == sibling.pos_marker.working_loc and last_code ): loc = last_code.pos_marker.working_loc_after(last_code.raw) reflow_logger.debug( " loc for %s: %s from %s", sibling, loc, last_code, ) if loc[1] > max_desired_line_pos: max_desired_line_pos = loc[1] if seg.is_code: last_code = seg desired_space = " " * ( 1 + max_desired_line_pos - whitespace_seg.pos_marker.working_line_pos ) reflow_logger.debug( " desired_space: %r (based on max line pos of %s)", desired_space, max_desired_line_pos, ) return desired_space def _extract_alignment_config( constraint: str, ) -> tuple[str, Optional[str], Optional[str]]: """Helper function to break apart an alignment config. >>> _extract_alignment_config("align:alias_expression") ('alias_expression', None, None) >>> _extract_alignment_config("align:alias_expression:statement") ('alias_expression', 'statement', None) >>> _extract_alignment_config("align:alias_expression:statement:bracketed") ('alias_expression', 'statement', 'bracketed') """ assert ":" in constraint alignment_config = constraint.split(":") assert alignment_config[0] == "align" seg_type = alignment_config[1] align_within = alignment_config[2] if len(alignment_config) > 2 else None align_scope = alignment_config[3] if len(alignment_config) > 3 else None reflow_logger.debug( " Alignment Config: %s, %s, %s", seg_type, align_within, align_scope, ) return seg_type, align_within, align_scope def handle_respace__inline_with_space( pre_constraint: str, post_constraint: str, prev_block: Optional["ReflowBlock"], next_block: Optional["ReflowBlock"], root_segment: BaseSegment, segment_buffer: list[RawSegment], last_whitespace: RawSegment, ) -> tuple[list[RawSegment], list[LintResult]]: """Check inline spacing is the right size. This forms one of the cases handled by .respace_point(). This code assumes: - a ReflowPoint with no newlines. - a ReflowPoint which has _some_ whitespace. Given this we apply constraints to ensure the whitespace is of an appropriate size. """ # Get some indices so that we can reference around them ws_idx = segment_buffer.index(last_whitespace) # Do we have either side set to "any" if "any" in [pre_constraint, post_constraint]: # In this instance - don't change anything. # e.g. this could mean there is a comment on one side. return segment_buffer, [] # Do we have either side set to "touch"? if "touch" in [pre_constraint, post_constraint]: # In this instance - no whitespace is correct, This # means we should delete it. segment_buffer.pop(ws_idx) if next_block: description = ( "Unexpected whitespace before " f"{pretty_segment_name(next_block.segments[0])}." ) else: # pragma: no cover # This clause has no test coverage because next_block is # normally provided. description = "Unexpected whitespace" return segment_buffer, [ LintResult( last_whitespace, [LintFix.delete(last_whitespace)], # Should make description from constraints. description=description, ), ] # Handle left alignment & singles if ( post_constraint.startswith("align") and next_block ) or pre_constraint == post_constraint == "single": # Determine the desired spacing, either as alignment or as a single. if post_constraint.startswith("align") and next_block: seg_type, align_within, align_scope = _extract_alignment_config( post_constraint ) next_pos: Optional[PositionMarker] if next_block.segments[0].pos_marker: next_pos = next_block.segments[0].pos_marker # Excluded from coverage: no longer triggered since AL01 rule was refactored elif last_whitespace.pos_marker: # pragma: no cover next_pos = last_whitespace.pos_marker.end_point_marker() # These second clauses are much less likely and so are excluded from # coverage. If we find a way of covering them, that would be great # but for now they exist as backups. elif prev_block and prev_block.segments[-1].pos_marker: # pragma: no cover next_pos = prev_block.segments[-1].pos_marker.end_point_marker() else: # pragma: no cover reflow_logger.info("Unable to find position marker for alignment.") next_pos = None desired_space = " " desc = f"Expected only single space. Found {last_whitespace.raw!r}." if next_pos: desired_space = _determine_aligned_inline_spacing( root_segment, last_whitespace, next_block.segments[0], next_pos, seg_type, align_within, align_scope, ) desc = ( f"{seg_type!r} elements are expected to be aligned. Found " "incorrect whitespace before " f"{pretty_segment_name(next_block.segments[0])}: " f"{last_whitespace.raw!r}." ) else: if next_block: desc = ( "Expected only single space before " f"{pretty_segment_name(next_block.segments[0])}. Found " f"{last_whitespace.raw!r}." ) else: # pragma: no cover # This clause isn't has no test coverage because next_block is # normally provided. desc = f"Expected only single space. Found {last_whitespace.raw!r}." desired_space = " " new_results: list[LintResult] = [] if last_whitespace.raw != desired_space: new_seg = last_whitespace.edit(desired_space) new_results.append( LintResult( last_whitespace, [ LintFix( "replace", anchor=last_whitespace, edit=[new_seg], ) ], description=desc, ) ) segment_buffer[ws_idx] = new_seg return segment_buffer, new_results raise NotImplementedError( # pragma: no cover f"Unexpected Constraints: {pre_constraint}, {post_constraint}" ) def handle_respace__inline_without_space( pre_constraint: str, post_constraint: str, prev_block: Optional["ReflowBlock"], next_block: Optional["ReflowBlock"], segment_buffer: list[RawSegment], existing_results: list[LintResult], anchor_on: str = "before", ) -> tuple[list[RawSegment], list[LintResult], bool]: """Ensure spacing is the right size. This forms one of the cases handled by .respace_point(). This code assumes: - a ReflowPoint with no newlines. - a ReflowPoint which _no_ whitespace. Given this we apply constraints to either confirm no spacing is required or create some of the right size. """ # Do we have either side set to "touch" or "any" if {"touch", "any"}.intersection([pre_constraint, post_constraint]): # In this instance - no whitespace is correct. # Either because there shouldn't be, or because "any" # means we shouldn't check. return segment_buffer, existing_results, False # Are we supposed to be aligning? elif post_constraint.startswith("align"): reflow_logger.debug(" Inserting Aligned Whitespace.") # TODO: We currently rely on a second pass to align # insertions. This is where we could devise alignment # in advance, but most of the alignment code relies on # having existing position markers for those insertions. # https://github.com/sqlfluff/sqlfluff/issues/4492 desired_space = " " added_whitespace = WhitespaceSegment(desired_space) # Is it anything other than the default case? elif not (pre_constraint == post_constraint == "single"): # pragma: no cover # TODO: This will get test coverage when configuration routines # are in properly. raise NotImplementedError( f"Unexpected Constraints: {pre_constraint}, {post_constraint}" ) else: # Default to a single whitespace reflow_logger.debug(" Inserting Single Whitespace.") added_whitespace = WhitespaceSegment() # Add it to the buffer first (the easy bit). The hard bit # is to then determine how to generate the appropriate LintFix # objects. segment_buffer.append(added_whitespace) # So special handling here. If segments either side # already exist then we don't care which we anchor on # but if one is already an insertion (as shown by a lack) # of pos_marker, then we should piggy back on that pre-existing # fix. existing_fix = None insertion = None if prev_block and not prev_block.segments[-1].pos_marker: existing_fix = "after" insertion = prev_block.segments[-1] elif next_block and not next_block.segments[0].pos_marker: existing_fix = "before" insertion = next_block.segments[0] if existing_fix: reflow_logger.debug(" Detected existing fix %s", existing_fix) if not existing_results: # pragma: no cover raise ValueError( "Fixes detected, but none passed to .respace(). " "This will cause conflicts." ) # Find the fix assert insertion for res in existing_results: # Does it contain the insertion? # TODO: This feels ugly - eq for BaseSegment is different # to uuid matching for RawSegment. Perhaps this should be # more aligned. There might be a better way of doing this. for fix in res.fixes or []: if fix.edit and insertion.uuid in [elem.uuid for elem in fix.edit]: break else: # pragma: no cover continue break else: # pragma: no cover reflow_logger.warning("Results %s", existing_results) raise ValueError(f"Couldn't find insertion for {insertion}") # Mutate the existing fix assert res assert fix assert fix in res.fixes assert fix.edit # It's going to be an edit if we've picked it up. # Mutate the fix, it's still in the same result, and that result # is still in the existing_results. if existing_fix == "before": fix.edit = [cast(BaseSegment, added_whitespace)] + fix.edit elif existing_fix == "after": fix.edit = fix.edit + [cast(BaseSegment, added_whitespace)] # No need to add new results, because we mutated the existing. return segment_buffer, existing_results, True # Otherwise... reflow_logger.debug(" Not Detected existing fix. Creating new") if prev_block and next_block: desc = ( "Expected single whitespace between " f"{pretty_segment_name(prev_block.segments[-1])} " f"and {pretty_segment_name(next_block.segments[0])}." ) else: # pragma: no cover # Something to fall back on if prev_block and next_block not provided. desc = "Expected single whitespace." # Take into account hint on where to anchor if given. if prev_block and anchor_on != "after": new_result = LintResult( # We do this shuffle, because for the CLI it's clearer if the # anchor for the error is at the point that the insertion will # happen which is the *start* of the next segment, even if # we're anchoring the fix on the previous. next_block.segments[0] if next_block else prev_block.segments[-1], fixes=[ LintFix( "create_after", anchor=prev_block.segments[-1], edit=[WhitespaceSegment()], ) ], description=desc, ) elif next_block: new_result = LintResult( next_block.segments[0], fixes=[ LintFix( "create_before", anchor=next_block.segments[0], edit=[WhitespaceSegment()], ) ], description=desc, ) else: # pragma: no cover NotImplementedError("Not set up to handle a missing _after_ and _before_.") return segment_buffer, existing_results + [new_result], True sqlfluff-3.4.2/src/sqlfluff/utils/reflow/sequence.py000066400000000000000000000646621503426445100226150ustar00rootroot00000000000000"""Dataclasses for reflow work.""" import logging from collections.abc import Iterator, Sequence from itertools import chain from typing import Literal, Optional, cast from sqlfluff.core.config import FluffConfig from sqlfluff.core.parser import BaseSegment, RawSegment from sqlfluff.core.rules import LintFix, LintResult from sqlfluff.utils.reflow.config import ReflowConfig from sqlfluff.utils.reflow.depthmap import DepthMap from sqlfluff.utils.reflow.elements import ( ReflowBlock, ReflowPoint, ReflowSequenceType, get_consumed_whitespace, ) from sqlfluff.utils.reflow.helpers import fixes_from_results from sqlfluff.utils.reflow.rebreak import rebreak_keywords_sequence, rebreak_sequence from sqlfluff.utils.reflow.reindent import ( construct_single_indent, lint_indent_points, lint_line_length, ) # We're in the utils module, but users will expect reflow # logs to appear in the context of rules. Hence it's a subset # of the rules logger. reflow_logger = logging.getLogger("sqlfluff.rules.reflow") class ReflowSequence: """Class for keeping track of elements in a reflow operation. This acts as the primary route into using the reflow routines. It acts in a way that plays nicely within a rule context in that it accepts segments and configuration, while allowing access to modified segments and a series of :obj:`LintFix` objects, which can be returned by the calling rule. Sequences are made up of alternating :obj:`ReflowBlock` and :obj:`ReflowPoint` objects (even if some points have no segments). This is validated on construction. Most operations also return :obj:`ReflowSequence` objects such that operations can be chained, and then the resultant fixes accessed at the last stage, for example: .. code-block:: py3 fixes = ( ReflowSequence.from_around_target( context.segment, root_segment=context.parent_stack[0], config=context.config, ) .rebreak() .get_fixes() ) """ def __init__( self, elements: ReflowSequenceType, root_segment: BaseSegment, reflow_config: ReflowConfig, depth_map: DepthMap, lint_results: Optional[list[LintResult]] = None, ): # First validate integrity self._validate_reflow_sequence(elements) # Then save self.elements = elements self.root_segment = root_segment self.reflow_config = reflow_config self.depth_map = depth_map # This keeps track of fixes generated in the chaining process. # Alternatively pictured: This is the list of fixes required # to generate this sequence. We can build on this as we edit # the sequence. # Rather than saving *fixes* directly, we package them into # LintResult objects to make it a little easier to expose them # in the CLI. self.lint_results: list[LintResult] = lint_results or [] def get_fixes(self) -> list[LintFix]: """Get the current fix buffer. We're hydrating them here directly from the LintResult objects, so for more accurate results, consider using .get_results(). This method is particularly useful when consolidating multiple results into one. """ return fixes_from_results(self.lint_results) def get_results(self) -> list[LintResult]: """Return the current result buffer.""" return self.lint_results def get_raw(self) -> str: """Get the current raw representation.""" return "".join(elem.raw for elem in self.elements) @staticmethod def _validate_reflow_sequence(elements: ReflowSequenceType) -> None: # An empty set of elements _is_ allowed as an edge case. if not elements: # Return early if so return None # Check odds and evens OddType = elements[0].__class__ EvenType = ReflowPoint if OddType is ReflowBlock else ReflowBlock try: # Check odds are all points assert all( isinstance(elem, OddType) for elem in elements[::2] ), f"Not all odd elements are {OddType.__name__}" # Check evens are all blocks assert all( isinstance(elem, EvenType) for elem in elements[1::2] ), f"Not all even elements are {EvenType.__name__}" return None except AssertionError as err: # pragma: no cover for elem in elements: reflow_logger.error(" - %s", elem) reflow_logger.exception("Assertion check on ReflowSequence failed.") raise err @staticmethod def _elements_from_raw_segments( segments: Sequence[RawSegment], reflow_config: ReflowConfig, depth_map: DepthMap ) -> ReflowSequenceType: """Construct reflow elements from raw segments. NOTE: ReflowBlock elements should only ever have one segment which simplifies iteration here. """ elem_buff: ReflowSequenceType = [] seg_buff: list[RawSegment] = [] for seg in segments: # NOTE: end_of_file is block-like rather than point-like. # This is to facilitate better evaluation of the ends of files. # NOTE: This also allows us to include literal placeholders for # whitespace only strings. if ( seg.is_type("whitespace", "newline", "indent") or (get_consumed_whitespace(seg) or "").isspace() ): # Add to the buffer and move on. seg_buff.append(seg) continue elif elem_buff or seg_buff: # There are elements. The last will have been a block. # Add a point before we add the block. NOTE: It may be empty. elem_buff.append(ReflowPoint(segments=tuple(seg_buff))) # Add the block, with config info. elem_buff.append( ReflowBlock.from_config( segments=(seg,), config=reflow_config, depth_info=depth_map.get_depth_info(seg), ) ) # Empty the buffer seg_buff = [] # If we ended with a buffer, apply it. # TODO: Consider removing this clause? if seg_buff: # pragma: no cover elem_buff.append(ReflowPoint(segments=tuple(seg_buff))) return elem_buff @classmethod def from_raw_segments( cls: type["ReflowSequence"], segments: Sequence[RawSegment], root_segment: BaseSegment, config: FluffConfig, depth_map: Optional[DepthMap] = None, ) -> "ReflowSequence": """Construct a ReflowSequence from a sequence of raw segments. This is intended as a base constructor, which others can use. In particular, if no `depth_map` argument is provided, this method will generate one in a potentially inefficient way. If the calling method has access to a better way of inferring a depth map (for example because it has access to a common root segment for all the content), it should do that instead and pass it in. """ reflow_config = ReflowConfig.from_fluff_config(config) if depth_map is None: depth_map = DepthMap.from_raws_and_root(segments, root_segment) return cls( elements=cls._elements_from_raw_segments( segments, reflow_config=reflow_config, # NOTE: This pathway is inefficient. Ideally the depth # map should be constructed elsewhere and then passed in. depth_map=depth_map, ), root_segment=root_segment, reflow_config=reflow_config, depth_map=depth_map, ) @classmethod def from_root( cls: type["ReflowSequence"], root_segment: BaseSegment, config: FluffConfig ) -> "ReflowSequence": """Generate a sequence from a root segment. Args: root_segment (:obj:`BaseSegment`): The relevant root segment (usually the base :obj:`FileSegment`). config (:obj:`FluffConfig`): A config object from which to load the spacing behaviours of different segments. """ return cls.from_raw_segments( root_segment.raw_segments, root_segment, config=config, # This is the efficient route. We use it here because we can. depth_map=DepthMap.from_parent(root_segment), ) @classmethod def from_around_target( cls: type["ReflowSequence"], target_segment: BaseSegment, root_segment: BaseSegment, config: FluffConfig, sides: str = "both", ) -> "ReflowSequence": """Generate a sequence around a target. Args: target_segment (:obj:`RawSegment`): The segment to center around when considering the sequence to construct. root_segment (:obj:`BaseSegment`): The relevant root segment (usually the base :obj:`FileSegment`). config (:obj:`FluffConfig`): A config object from which to load the spacing behaviours of different segments. sides (:obj:`str`): Limit the reflow sequence to just one side of the target. Default is two sided ("both"), but set to "before" or "after" to limit to either side. **NOTE**: We don't just expand to the first block around the target but to the first *code* element, which means we may swallow several `comment` blocks in the process. To evaluate reflow around a specific target, we need need to generate a sequence which goes for the preceding raw to the following raw. i.e. at least: block - point - block - point - block (where the central block is the target). """ # There's probably a more efficient way than immediately # materialising the raw_segments for the whole root, but # it works. Optimise later. all_raws = root_segment.raw_segments target_raws = target_segment.raw_segments assert target_raws pre_idx = all_raws.index(target_raws[0]) post_idx = all_raws.index(target_raws[-1]) + 1 initial_idx = (pre_idx, post_idx) if sides in ("both", "before"): # Catch at least the previous segment pre_idx -= 1 for pre_idx in range(pre_idx, -1, -1): if all_raws[pre_idx].is_code: break if sides in ("both", "after"): for post_idx in range(post_idx, len(all_raws)): if all_raws[post_idx].is_code: break # Capture one more after the whitespace. post_idx += 1 segments = all_raws[pre_idx:post_idx] reflow_logger.debug( "Generating ReflowSequence.from_around_target(). idx: %s. " "slice: %s:%s. raw: %r", initial_idx, pre_idx, post_idx, "".join(seg.raw for seg in segments), ) return cls.from_raw_segments(segments, root_segment, config=config) def _find_element_idx_with(self, target: RawSegment) -> int: # pragma: no cover """Helper method to find an element within a segment. Note: This method is currently excluded from test coverage because it is not actively used by any rule. It was previously utilized by rule AL01, but that rule now uses an alternative implementation. The method is retained for potential future reuse or reference. """ for idx, elem in enumerate(self.elements): if target in elem.segments: return idx raise ValueError(f"Target [{target}] not found in ReflowSequence.") def without(self, target: RawSegment) -> "ReflowSequence": # pragma: no cover """Returns a new :obj:`ReflowSequence` without the specified segment. This generates appropriate deletion :obj:`LintFix` objects to direct the linter to remove those elements. Note: This method is currently excluded from test coverage because it is not actively used by any rule. It was previously utilized by rule AL01, but that rule now uses an alternative implementation. The method is retained for potential future reuse or reference. """ removal_idx = self._find_element_idx_with(target) if removal_idx == 0 or removal_idx == len(self.elements) - 1: raise NotImplementedError( # pragma: no cover "Unexpected removal at one end of a ReflowSequence." ) if isinstance(self.elements[removal_idx], ReflowPoint): raise NotImplementedError( # pragma: no cover "Not expected removal of whitespace in ReflowSequence." ) merged_point = ReflowPoint( segments=self.elements[removal_idx - 1].segments + self.elements[removal_idx + 1].segments, ) return ReflowSequence( elements=self.elements[: removal_idx - 1] + [merged_point] + self.elements[removal_idx + 2 :], root_segment=self.root_segment, reflow_config=self.reflow_config, depth_map=self.depth_map, # Generate the fix to do the removal. lint_results=[LintResult(target, [LintFix.delete(target)])], ) def insert( self, insertion: RawSegment, target: RawSegment, pos: str = "before" ) -> "ReflowSequence": # pragma: no cover """Returns a new :obj:`ReflowSequence` with the new element inserted. Insertion is always relative to an existing element. Either before or after it as specified by `pos`. This generates appropriate creation :obj:`LintFix` objects to direct the linter to insert those elements. Note: This method is currently excluded from test coverage because it is not actively used by any rule. It was previously utilized by rule AL01, but that rule now uses an alternative implementation. The method is retained for potential future reuse or reference. """ assert pos in ("before", "after") target_idx = self._find_element_idx_with(target) # Are we trying to insert something whitespace-like? if insertion.is_type("whitespace", "indent", "newline"): # pragma: no cover raise ValueError( "ReflowSequence.insert() does not support direct insertion of " "spacing elements such as whitespace or newlines" ) # We're inserting something blocky. That means a new block AND a new point. # It's possible we try to _split_ a point by targeting a whitespace element # inside a larger point. For now this isn't supported. # NOTE: We use the depth info of the reference anchor, with the assumption # (I think reliable) that the insertion will be applied as a sibling of # the target. self.depth_map.copy_depth_info(target, insertion) new_block = ReflowBlock.from_config( segments=(insertion,), config=self.reflow_config, depth_info=self.depth_map.get_depth_info(target), ) if isinstance(self.elements[target_idx], ReflowPoint): raise NotImplementedError( # pragma: no cover "Can't insert relative to whitespace for now." ) elif pos == "before": return ReflowSequence( elements=self.elements[:target_idx] + [new_block, ReflowPoint(())] + self.elements[target_idx:], root_segment=self.root_segment, reflow_config=self.reflow_config, depth_map=self.depth_map, # Generate the fix to do the removal. lint_results=[ LintResult(target, [LintFix.create_before(target, [insertion])]) ], ) elif pos == "after": # pragma: no cover # TODO: This doesn't get coverage - should it even exist? # Re-evaluate whether this code path is ever taken once more rules use # this. return ReflowSequence( elements=self.elements[: target_idx + 1] + [ReflowPoint(()), new_block] + self.elements[target_idx + 1 :], root_segment=self.root_segment, reflow_config=self.reflow_config, depth_map=self.depth_map, # Generate the fix to do the removal. lint_results=[ LintResult(target, [LintFix.create_after(target, [insertion])]) ], ) raise ValueError( f"Unexpected value for ReflowSequence.insert(pos): {pos}" ) # pragma: no cover def replace( self, target: BaseSegment, edit: Sequence[BaseSegment] ) -> "ReflowSequence": """Returns a new :obj:`ReflowSequence` with `edit` elements replaced. This generates appropriate replacement :obj:`LintFix` objects to direct the linter to modify those elements. """ target_raws = target.raw_segments assert target_raws edit_raws = list(chain.from_iterable(seg.raw_segments for seg in edit)) # Add the new segments to the depth map at the same level as the target. # First work out how much to trim by. trim_amount = len(target.path_to(target_raws[0])) reflow_logger.debug( "Replacement trim amount: %s.", trim_amount, ) for edit_raw in edit_raws: # NOTE: if target raws has more than one segment we take the depth info # of the first one. We trim to avoid including the implications of removed # "container" segments. self.depth_map.copy_depth_info(target_raws[0], edit_raw, trim=trim_amount) # It's much easier to just totally reconstruct the sequence rather # than do surgery on the elements. # TODO: The surgery is actually a good idea for long sequences now that # we have the depth map. current_raws = list( chain.from_iterable(elem.segments for elem in self.elements) ) start_idx = current_raws.index(target_raws[0]) last_idx = current_raws.index(target_raws[-1]) return ReflowSequence( self._elements_from_raw_segments( current_raws[:start_idx] + edit_raws + current_raws[last_idx + 1 :], reflow_config=self.reflow_config, # NOTE: the depth map has been mutated to include the new segments. depth_map=self.depth_map, ), root_segment=self.root_segment, reflow_config=self.reflow_config, depth_map=self.depth_map, lint_results=[LintResult(target, [LintFix.replace(target, edit)])], ) def _iter_points_with_constraints( self, ) -> Iterator[tuple[ReflowPoint, Optional[ReflowBlock], Optional[ReflowBlock]]]: for idx, elem in enumerate(self.elements): # Only evaluate points. if isinstance(elem, ReflowPoint): pre = None post = None if idx > 0: pre = cast(ReflowBlock, self.elements[idx - 1]) if idx < len(self.elements) - 1: post = cast(ReflowBlock, self.elements[idx + 1]) yield elem, pre, post def respace( self, strip_newlines: bool = False, filter: str = "all" ) -> "ReflowSequence": """Returns a new :obj:`ReflowSequence` with points respaced. Args: strip_newlines (:obj:`bool`): Optionally strip newlines before respacing. This is primarily used on focused sequences to coerce objects onto a single line. This does not apply any prioritisation to which line breaks to remove and so is not a substitute for the full `reindent` or `reflow` methods. filter (:obj:`str`): Optionally filter which reflow points to respace. Default configuration is `all`. Other options are `line_break` which only respaces points containing a `newline` or followed by an `end_of_file` marker, or `inline` which is the inverse of `line_break`. This is most useful for filtering between trailing whitespace and fixes between content on a line. **NOTE** this method relies on the embodied results being correct so that we can build on them. """ assert filter in ( "all", "newline", "inline", ), f"Unexpected value for filter: {filter}" # Use the embodied fixes as a starting point. lint_results = self.get_results() new_elements: ReflowSequenceType = [] for point, pre, post in self._iter_points_with_constraints(): # We filter on the elements POST RESPACE. This is to allow # strict respacing to reclaim newlines. new_lint_results, new_point = point.respace_point( prev_block=pre, next_block=post, root_segment=self.root_segment, lint_results=lint_results, strip_newlines=strip_newlines, ) # If filter has been set, optionally unset the returned values. if ( filter == "inline" # NOTE: We test on the NEW point. if ( any(seg.is_type("newline") for seg in new_point.segments) # Or if it's followed by the end of file or (post and "end_of_file" in post.class_types) ) else filter == "newline" ): # Reset the values reflow_logger.debug( " Filter %r applied. Resetting %s", filter, point ) new_point = point # Otherwise apply the new fixes else: lint_results = new_lint_results if pre and (not new_elements or new_elements[-1] != pre): new_elements.append(pre) new_elements.append(new_point) if post: new_elements.append(post) return ReflowSequence( elements=new_elements, root_segment=self.root_segment, reflow_config=self.reflow_config, depth_map=self.depth_map, lint_results=lint_results, ) def rebreak( self, rebreak_type: Literal["lines", "keywords"] = "lines" ) -> "ReflowSequence": """Returns a new :obj:`ReflowSequence` corrected line breaks. This intentionally **does not handle indentation**, as the existing indents are assumed to be correct. .. note:: Currently this only *moves* existing segments around line breaks (e.g. for operators and commas), but eventually this method will also handle line length considerations too. """ if self.lint_results: raise NotImplementedError( # pragma: no cover "rebreak cannot currently handle pre-existing embodied fixes." ) # Delegate to the rebreak algorithm if rebreak_type == "lines": elem_buff, lint_results = rebreak_sequence(self.elements, self.root_segment) elif rebreak_type == "keywords": elem_buff, lint_results = rebreak_keywords_sequence( self.elements, self.root_segment ) else: # pragma: no cover raise NotImplementedError( f"Rebreak type of `{rebreak_type}` is not supported." ) return ReflowSequence( elements=elem_buff, root_segment=self.root_segment, reflow_config=self.reflow_config, depth_map=self.depth_map, lint_results=lint_results, ) def reindent(self) -> "ReflowSequence": """Reindent lines within a sequence.""" if self.lint_results: raise NotImplementedError( # pragma: no cover "rebreak cannot currently handle pre-existing embodied fixes." ) single_indent = construct_single_indent( indent_unit=self.reflow_config.indent_unit, tab_space_size=self.reflow_config.tab_space_size, ) reflow_logger.info("# Evaluating indents.") elements, indent_results = lint_indent_points( self.elements, single_indent=single_indent, skip_indentation_in=self.reflow_config.skip_indentation_in, allow_implicit_indents=self.reflow_config.allow_implicit_indents, ignore_comment_lines=self.reflow_config.ignore_comment_lines, ) return ReflowSequence( elements=elements, root_segment=self.root_segment, reflow_config=self.reflow_config, depth_map=self.depth_map, lint_results=indent_results, ) def break_long_lines(self) -> "ReflowSequence": """Rebreak any remaining long lines in a sequence. This assumes that reindent() has already been applied. """ if self.lint_results: raise NotImplementedError( # pragma: no cover "break_long_lines cannot currently handle pre-existing embodied fixes." ) single_indent = construct_single_indent( indent_unit=self.reflow_config.indent_unit, tab_space_size=self.reflow_config.tab_space_size, ) reflow_logger.info("# Evaluating line lengths.") elements, length_results = lint_line_length( self.elements, self.root_segment, single_indent=single_indent, line_length_limit=self.reflow_config.max_line_length, allow_implicit_indents=self.reflow_config.allow_implicit_indents, trailing_comments=self.reflow_config.trailing_comments, ) return ReflowSequence( elements=elements, root_segment=self.root_segment, reflow_config=self.reflow_config, depth_map=self.depth_map, lint_results=length_results, ) sqlfluff-3.4.2/src/sqlfluff/utils/testing/000077500000000000000000000000001503426445100205745ustar00rootroot00000000000000sqlfluff-3.4.2/src/sqlfluff/utils/testing/__init__.py000066400000000000000000000000741503426445100227060ustar00rootroot00000000000000"""Testing utils we want to expose for usage by plugins.""" sqlfluff-3.4.2/src/sqlfluff/utils/testing/cli.py000066400000000000000000000032411503426445100217150ustar00rootroot00000000000000"""Testing utils for working with the CLIs.""" import inspect from typing import Any, Optional from click.testing import CliRunner, Result def invoke_assert_code( ret_code: int = 0, args: Optional[list[Any]] = None, kwargs: Optional[dict[str, Any]] = None, cli_input: Optional[str] = None, assert_stdout_contains: str = "", assert_stderr_contains: str = "", raise_exceptions: bool = True, ) -> Result: """Invoke a command and check return code.""" args = args or [] kwargs = kwargs or {} if cli_input: kwargs["input"] = cli_input if "mix_stderr" in inspect.signature(CliRunner).parameters: # pragma: no cover runner = CliRunner(mix_stderr=False) # type: ignore[call-arg,unused-ignore] else: # pragma: no cover runner = CliRunner() result = runner.invoke(*args, **kwargs) # Output the CLI code for debugging print(result.output) if assert_stdout_contains != "": # The replace command just accounts for cross platform testing. assert assert_stdout_contains in result.stdout.replace("\\", "/") if assert_stderr_contains != "": # The replace command just accounts for cross platform testing. assert assert_stderr_contains in result.stderr.replace("\\", "/") # Check return codes, and unless we specifically want to pass back exceptions, # we should raise any exceptions which aren't `SystemExit` ones (i.e. ones # raised by `sys.exit()`) if raise_exceptions and result.exception: if not isinstance(result.exception, SystemExit): raise result.exception # pragma: no cover assert ret_code == result.exit_code return result sqlfluff-3.4.2/src/sqlfluff/utils/testing/logging.py000066400000000000000000000035761503426445100226070ustar00rootroot00000000000000"""This is a modified log capture mechanism which reliably works. So that logs are handled appropriately by the CLI, sqlfluff modifies the root logger in a way that can conflict with pytest. See: https://github.com/pytest-dev/pytest/issues/3697 This fixture returns a context manager to handle them better and enable testing of logs while working around the restrictions of setting the `propagate` attribute of the logger in each test. Code adapted from: https://github.com/pytest-dev/pytest/issues/3697#issuecomment-792129636 """ import logging from collections.abc import Iterator from contextlib import contextmanager from _pytest.logging import LogCaptureHandler, _remove_ansi_escape_sequences class FluffLogHandler(LogCaptureHandler): """A modified LogCaptureHandler which also exposes some helper functions. The aim is to mimic some of the methods available on caplog. See: https://docs.pytest.org/en/7.1.x/_modules/_pytest/logging.html """ @property def text(self) -> str: """The formatted log text.""" return _remove_ansi_escape_sequences(self.stream.getvalue()) @contextmanager def fluff_log_catcher(level: int, logger_name: str) -> Iterator[FluffLogHandler]: """Context manager that sets the level for capturing of logs. After the end of the 'with' statement the level is restored to its original value. Args: level (int): The lowest logging level to capture. logger_name (str): The name of the logger to capture. """ assert logger_name.startswith( "sqlfluff" ), "This should only be used with a SQLFluff logger." logger = logging.getLogger(logger_name) handler = FluffLogHandler() orig_level = logger.level logger.setLevel(level) logger.addHandler(handler) try: yield handler finally: logger.setLevel(orig_level) logger.removeHandler(handler) sqlfluff-3.4.2/src/sqlfluff/utils/testing/rules.py000066400000000000000000000304001503426445100222750ustar00rootroot00000000000000"""Testing utils for rule plugins.""" from collections.abc import Collection from glob import glob from typing import NamedTuple, Optional, Union import pytest import yaml from sqlfluff.core import Linter from sqlfluff.core.config import FluffConfig from sqlfluff.core.errors import ( SQLBaseError, SQLLintError, SQLParseError, SQLTemplaterError, ) from sqlfluff.core.helpers.string import split_comma_separated_string from sqlfluff.core.rules import BaseRule, get_ruleset from sqlfluff.core.types import ConfigMappingType FixDictType = dict[str, Union[str, int]] ViolationDictType = dict[str, Union[str, int, bool, list[FixDictType]]] class RuleTestCase(NamedTuple): """Used like a dataclass by rule tests.""" rule: str desc: Optional[str] = None pass_str: Optional[str] = None fail_str: Optional[str] = None violations: Optional[set[ViolationDictType]] = None fix_str: Optional[str] = None violations_after_fix: Optional[set[ViolationDictType]] = None configs: Optional[ConfigMappingType] = None skip: Optional[str] = None line_numbers: list[int] = [] def evaluate(self) -> None: """Evaluate the test case. NOTE: This method is designed to be run in a pytest context and will call methods such as `pytest.skip()` as part of it's execution. It may not be suitable for other testing contexts. """ rules__test_helper(self) def load_test_cases( test_cases_path: str, ) -> tuple[list[str], list[RuleTestCase]]: """Load rule test cases from YAML files. Args: test_cases_path (str): A glob string specifying the files containing test cases to load. """ ids = [] test_cases = [] for path in sorted(glob(test_cases_path)): with open(path) as f: raw = f.read() y = yaml.safe_load(raw) rule = y.pop("rule") global_config = y.pop("configs", None) if global_config: for i in y: if "configs" not in y[i].keys(): y[i].update({"configs": global_config}) # Replace any commas with underscores so we can reference specific tests. # e.g. `pytest -k AL05_CV12`. Commas break as test keys. ids.extend([rule.replace(",", "_") + "_" + t for t in y]) test_cases.extend([RuleTestCase(rule=rule, **v) for k, v in y.items()]) return ids, test_cases def get_rule_from_set(code: str, config: FluffConfig) -> BaseRule: """Fetch a rule from the rule set.""" for r in get_ruleset().get_rulepack(config=config).rules: if r.code == code: # pragma: no cover return r raise ValueError(f"{code!r} not in {get_ruleset()!r}") def _setup_config( code: str, configs: Optional[ConfigMappingType] = None ) -> FluffConfig: """Helper function to set up config consistently for pass & fail functions.""" overrides: ConfigMappingType = {"rules": code} _core_section = configs.get("core", {}) if configs else {} if not isinstance(_core_section, dict) or "dialect" not in _core_section: overrides["dialect"] = "ansi" return FluffConfig(configs=configs, overrides=overrides) def assert_rule_fail_in_sql( code: str, sql: str, configs: Optional[ConfigMappingType] = None, line_numbers: Optional[list[int]] = None, ) -> tuple[str, list[SQLBaseError]]: """Assert that a given rule does fail on the given sql. Args: code (str): The code of the rule to test. sql (str): The SQL text to check against. configs (:obj:`ConfigMappingType`, optional): A config dict object containing any overrides. line_numbers (list of int, optional): The line numbers which we want to test that errors occurred on. Returns: Tuple: values(fixed_sql (str), violations (list)) fixed_sql (str): The fixed string after linting. Note that for testing purposes, `.lint_string()` is always called with `fix` set to `True`. violations (list of SQLBaseError): the violations found during linting. """ print("# Asserting Rule Fail in SQL") # Set up the config to only use the rule we are testing. cfg = _setup_config(code, configs) # Lint it using the current config (while in fix mode) linted = Linter(config=cfg).lint_string(sql, fix=True) all_violations = linted.get_violations() print("Errors Found:") for e in all_violations: print(" " + repr(e)) if e.desc().startswith("Unexpected exception"): pytest.fail(f"Linter failed with {e.desc()}") # pragma: no cover parse_errors = [ v for v in all_violations if isinstance(v, (SQLParseError, SQLTemplaterError)) ] if parse_errors: pytest.fail(f"Found the following parse errors in test case: {parse_errors}") lint_errors: list[SQLLintError] = [ v for v in all_violations if isinstance(v, SQLLintError) ] if not any(v.rule.code in split_comma_separated_string(code) for v in lint_errors): assert linted.tree print(f"Parsed File:\n{linted.tree.stringify()}") pytest.fail( f"No {code} failures found in query which should fail.", pytrace=False, ) if line_numbers: actual_line_numbers = [e.line_no for e in lint_errors] if line_numbers != actual_line_numbers: # pragma: no cover pytest.fail( "Expected errors on lines {}, but got errors on lines {}".format( line_numbers, actual_line_numbers ) ) fixed_sql, _ = linted.fix_string() # Check that if it has made changes that this rule has set # `is_fix_compatible` appropriately. if fixed_sql != sql: assert any( get_rule_from_set(rule, config=cfg).is_fix_compatible for rule in split_comma_separated_string(code) ), ( f"Rule {code} returned fixes but does not specify " "'is_fix_compatible = True'." ) return fixed_sql, linted.violations def assert_rule_pass_in_sql( code: str, sql: str, configs: Optional[ConfigMappingType] = None, msg: Optional[str] = None, ) -> None: """Assert that a given rule doesn't fail on the given sql.""" # Configs allows overrides if we want to use them. print("# Asserting Rule Pass in SQL") cfg = _setup_config(code, configs) linter = Linter(config=cfg) # This section is mainly for aid in debugging. rendered = linter.render_string(sql, fname="", config=cfg, encoding="utf-8") parsed = linter.parse_rendered(rendered) tree = parsed.tree # Delegate assertions to the `.tree` property violations = parsed.violations if violations: if msg: print(msg) # pragma: no cover pytest.fail(violations[0].desc() + "\n" + tree.stringify()) print(f"Parsed:\n {tree.stringify()}") # Note that lint_string() runs the templater and parser again, in order to # test the whole linting pipeline in the same way that users do. In other # words, the "rendered" and "parsed" variables above are irrelevant to this # line of code. lint_result = linter.lint_string(sql, config=cfg, fname="") lint_errors = [v for v in lint_result.violations if isinstance(v, SQLLintError)] if any(v.rule.code in split_comma_separated_string(code) for v in lint_errors): print("Errors Found:") for e in lint_result.violations: print(" " + repr(e)) if msg: print(msg) # pragma: no cover pytest.fail(f"Found {code} failures in query which should pass.", pytrace=False) def assert_rule_raises_violations_in_file( rule: str, fpath: str, violations: list[tuple[int, int]], fluff_config: FluffConfig ) -> None: """Assert that a given rule raises given errors in specific positions of a file. Args: rule (str): The rule we're looking for. fpath (str): The path to the sql file to check. violations (:obj:`list` of :obj:`tuple`): A list of tuples, each with the line number and line position of the expected violation. fluff_config (:obj:`FluffConfig`): A config object to use while linting. """ lntr = Linter(config=fluff_config) lnt = lntr.lint_path(fpath) # Reformat the test data to match the format we're expecting. We use # sets because we really don't care about order and if one is missing, # we don't care about the orders of the correct ones. assert set(lnt.check_tuples()) == {(rule, v[0], v[1]) for v in violations} def prep_violations( rule: str, violations: Collection[ViolationDictType] ) -> Collection[ViolationDictType]: """Default to test rule if code is omitted.""" for v in violations: if "code" not in v: v["code"] = rule return violations def assert_violations_before_fix( test_case: RuleTestCase, violations_before_fix: list[SQLBaseError] ) -> None: """Assert that the given violations are found in the given sql.""" print("# Asserting Violations Before Fix") violation_info = [e.to_dict() for e in violations_before_fix] assert ( test_case.violations ), "Test case must have `violations` to call `assert_violations_before_fix()`" try: assert violation_info == prep_violations(test_case.rule, test_case.violations) except AssertionError: # pragma: no cover print( "Actual violations:\n", yaml.dump(violation_info, allow_unicode=True), sep="", ) raise def assert_violations_after_fix(test_case: RuleTestCase) -> None: """Assert that the given violations are found in the fixed sql.""" print("# Asserting Violations After Fix") assert ( test_case.fix_str ), "Test case must have `fix_str` to call `assert_violations_after_fix()`" assert test_case.violations_after_fix, ( "Test case must have `violations_after_fix` to call " "`assert_violations_after_fix()`" ) _, violations_after_fix = assert_rule_fail_in_sql( test_case.rule, test_case.fix_str, configs=test_case.configs, line_numbers=test_case.line_numbers, ) violation_info = [e.to_dict() for e in violations_after_fix] try: assert violation_info == prep_violations( test_case.rule, test_case.violations_after_fix ) except AssertionError: # pragma: no cover print( "Actual violations_after_fix:\n", yaml.dump(violation_info, allow_unicode=True), sep="", ) raise def rules__test_helper(test_case: RuleTestCase) -> None: """Test that a rule passes/fails on a set of test_cases. Optionally, also test the fixed string if provided in the test case. """ if test_case.skip: pytest.skip(test_case.skip) if test_case.pass_str: assert_rule_pass_in_sql( test_case.rule, test_case.pass_str, configs=test_case.configs, ) if test_case.fail_str: res, violations_before_fix = assert_rule_fail_in_sql( test_case.rule, test_case.fail_str, configs=test_case.configs, line_numbers=test_case.line_numbers, ) if test_case.violations: assert_violations_before_fix(test_case, violations_before_fix) # If a `fixed` value is provided then check it matches if test_case.fix_str: assert res == test_case.fix_str if test_case.violations_after_fix: assert_violations_after_fix(test_case) else: assert_rule_pass_in_sql( test_case.rule, test_case.fix_str, configs=test_case.configs, msg="The SQL after fix is applied still contains rule violations. " "To accept a partial fix, violations_after_fix must be set " "listing the remaining, expected, violations.", ) else: # Check that tests without a fix_str do not apply any fixes. assert res == test_case.fail_str, ( "No fix_str was provided, but the rule modified the SQL. Where a fix " "can be applied by a rule, a fix_str must be supplied in the test." ) sqlfluff-3.4.2/test/000077500000000000000000000000001503426445100143255ustar00rootroot00000000000000sqlfluff-3.4.2/test/__init__.py000066400000000000000000000000311503426445100164300ustar00rootroot00000000000000"""Init PY for tests.""" sqlfluff-3.4.2/test/api/000077500000000000000000000000001503426445100150765ustar00rootroot00000000000000sqlfluff-3.4.2/test/api/__init__.py000066400000000000000000000000401503426445100172010ustar00rootroot00000000000000"""Tests for the public api.""" sqlfluff-3.4.2/test/api/classes_test.py000066400000000000000000000023331503426445100201450ustar00rootroot00000000000000"""Tests for use cases of the public api classes.""" from sqlfluff.core import Lexer, Linter, Parser test_query = "SELECt 1" def test__api__lexer(): """Basic checking of lexing functionality.""" tokens, violations = Lexer(dialect="ansi").lex(test_query) assert violations == [] assert isinstance(tokens, tuple) # The last element is the file end marker. assert [elem.raw for elem in tokens] == ["SELECt", " ", "1", ""] def test__api__parser(): """Basic checking of parsing functionality.""" tokens, _ = Lexer(dialect="ansi").lex(test_query) parsed = Parser(dialect="ansi").parse(tokens) assert parsed.raw == test_query def test__api__linter_lint(): """Basic checking of parsing functionality.""" tokens, _ = Lexer(dialect="ansi").lex(test_query) parsed = Parser(dialect="ansi").parse(tokens) violations = Linter(dialect="ansi").lint(parsed) assert [v.rule.code for v in violations] == ["CP01", "LT12"] def test__api__linter_fix(): """Basic checking of parsing functionality.""" tokens, _ = Lexer(dialect="ansi").lex(test_query) parsed = Parser(dialect="ansi").parse(tokens) fixed, _ = Linter(dialect="ansi").fix(parsed) assert fixed.raw == "SELECT 1\n" sqlfluff-3.4.2/test/api/info_test.py000066400000000000000000000030411503426445100174400ustar00rootroot00000000000000"""Test using sqlfluff to extract elements of queries.""" import sqlfluff from sqlfluff.core.linter import RuleTuple def test__api__info_dialects(): """Basic linting of dialects.""" dialects = sqlfluff.list_dialects() assert isinstance(dialects, list) # Turn it into a dict so we can look for items in there. dialect_dict = {dialect.label: dialect for dialect in dialects} # Check the ansi dialect works assert "ansi" in dialect_dict ansi = dialect_dict["ansi"] assert ansi.label == "ansi" assert ansi.name == "ANSI" assert ansi.inherits_from == "nothing" assert "This is the base dialect" in ansi.docstring # Check one other works assert "postgres" in dialect_dict postgres = dialect_dict["postgres"] assert postgres.label == "postgres" assert postgres.name == "PostgreSQL" assert postgres.inherits_from == "ansi" assert "this is often the dialect to use" in postgres.docstring def test__api__info_rules(): """Basic linting of dialects.""" rules = sqlfluff.list_rules() assert isinstance(rules, list) assert ( RuleTuple( code="LT01", name="layout.spacing", description="Inappropriate Spacing.", groups=("all", "core", "layout"), aliases=( "L001", "L005", "L006", "L008", "L023", "L024", "L039", "L048", "L071", ), ) in rules ) sqlfluff-3.4.2/test/api/simple_test.py000066400000000000000000000501331503426445100200020ustar00rootroot00000000000000"""Tests for simple use cases of the public api.""" import json from contextlib import nullcontext import pytest import sqlfluff from sqlfluff.api import APIParsingError from sqlfluff.core.errors import SQLFluffUserError my_bad_query = "SeLEct *, 1, blah as fOO from myTable" lint_result = [ { "code": "AM04", "description": "Query produces an unknown number of result columns.", "start_line_no": 1, "start_line_pos": 1, "start_file_pos": 0, "end_line_no": 1, "end_line_pos": 41, "end_file_pos": 40, "name": "ambiguous.column_count", "fixes": [], "warning": False, }, { "code": "CP01", "start_line_no": 1, "start_line_pos": 1, "start_file_pos": 0, "end_line_no": 1, "end_line_pos": 7, "end_file_pos": 6, "description": "Keywords must be consistently upper case.", "name": "capitalisation.keywords", "fixes": [ { "type": "replace", "edit": "SELECT", "start_line_no": 1, "start_line_pos": 1, "start_file_pos": 0, "end_line_no": 1, "end_line_pos": 7, "end_file_pos": 6, } ], "warning": False, }, { "code": "LT09", "description": "Select targets should be on a new line unless there is only " "one select target.", "start_line_no": 1, "start_line_pos": 1, "start_file_pos": 0, "end_line_no": 1, "end_line_pos": 27, "end_file_pos": 26, "name": "layout.select_targets", "fixes": [ { "type": "delete", "edit": "", "start_line_no": 1, "start_line_pos": 7, "start_file_pos": 6, "end_line_no": 1, "end_line_pos": 9, "end_file_pos": 8, }, { "type": "create_before", "edit": "\n", "start_line_no": 1, "start_line_pos": 9, "start_file_pos": 8, "end_line_no": 1, "end_line_pos": 9, "end_file_pos": 8, }, { "type": "delete", "edit": "", "start_line_no": 1, "start_line_pos": 11, "start_file_pos": 10, "end_line_no": 1, "end_line_pos": 12, "end_file_pos": 11, }, { "type": "create_before", "edit": "\n", "start_line_no": 1, "start_line_pos": 12, "start_file_pos": 11, "end_line_no": 1, "end_line_pos": 12, "end_file_pos": 11, }, { "type": "delete", "edit": "", "start_line_no": 1, "start_line_pos": 14, "start_file_pos": 13, "end_line_no": 1, "end_line_pos": 15, "end_file_pos": 14, }, { "type": "create_before", "edit": "\n", "start_line_no": 1, "start_line_pos": 15, "start_file_pos": 14, "end_line_no": 1, "end_line_pos": 15, "end_file_pos": 14, }, { "type": "delete", "edit": "", "start_line_no": 1, "start_line_pos": 27, "start_file_pos": 26, "end_line_no": 1, "end_line_pos": 29, "end_file_pos": 28, }, { "type": "create_before", "edit": "\n", "start_line_no": 1, "start_line_pos": 29, "start_file_pos": 28, "end_line_no": 1, "end_line_pos": 29, "end_file_pos": 28, }, ], "warning": False, }, { "code": "LT01", "description": "Expected only single space before star '*'. Found ' '.", "start_line_no": 1, "start_line_pos": 7, "start_file_pos": 6, "end_line_no": 1, "end_line_pos": 9, "end_file_pos": 8, "name": "layout.spacing", "fixes": [ { "type": "replace", "edit": " ", "start_line_no": 1, "start_line_pos": 7, "start_file_pos": 6, "end_line_no": 1, "end_line_pos": 9, "end_file_pos": 8, } ], "warning": False, }, { "code": "AL03", "start_line_no": 1, "start_line_pos": 12, "start_file_pos": 11, "end_line_no": 1, "end_line_pos": 13, "end_file_pos": 12, "description": "Column expression without alias. Use explicit `AS` clause.", "name": "aliasing.expression", "fixes": [], "warning": False, }, { "code": "CP01", "start_line_no": 1, "start_line_pos": 20, "start_file_pos": 19, "end_line_no": 1, "end_line_pos": 22, "end_file_pos": 21, "description": "Keywords must be consistently upper case.", "name": "capitalisation.keywords", "fixes": [ { "type": "replace", "edit": "AS", "start_line_no": 1, "start_line_pos": 20, "start_file_pos": 19, "end_line_no": 1, "end_line_pos": 22, "end_file_pos": 21, } ], "warning": False, }, { "code": "LT01", "description": ( "Expected only single space before naked identifier. Found ' '." ), "start_line_no": 1, "start_line_pos": 22, "start_file_pos": 21, "end_line_no": 1, "end_line_pos": 24, "end_file_pos": 23, "name": "layout.spacing", "fixes": [ { "type": "replace", "edit": " ", "start_line_no": 1, "start_line_pos": 22, "start_file_pos": 21, "end_line_no": 1, "end_line_pos": 24, "end_file_pos": 23, } ], "warning": False, }, { "code": "CP02", "start_line_no": 1, "start_line_pos": 24, "start_file_pos": 23, "end_line_no": 1, "end_line_pos": 27, "end_file_pos": 26, "description": "Unquoted identifiers must be consistently lower case.", "name": "capitalisation.identifiers", "fixes": [ { "type": "replace", "edit": "foo", "start_line_no": 1, "start_line_pos": 24, "start_file_pos": 23, "end_line_no": 1, "end_line_pos": 27, "end_file_pos": 26, } ], "warning": False, }, { "code": "LT01", "description": "Expected only single space before 'from' keyword. Found ' '.", "start_line_no": 1, "start_line_pos": 27, "start_file_pos": 26, "end_line_no": 1, "end_line_pos": 29, "end_file_pos": 28, "name": "layout.spacing", "fixes": [ { "type": "replace", "edit": " ", "start_line_no": 1, "start_line_pos": 27, "start_file_pos": 26, "end_line_no": 1, "end_line_pos": 29, "end_file_pos": 28, } ], "warning": False, }, { "code": "CP01", "start_line_no": 1, "start_line_pos": 29, "start_file_pos": 28, "end_line_no": 1, "end_line_pos": 33, "end_file_pos": 32, "description": "Keywords must be consistently upper case.", "name": "capitalisation.keywords", "fixes": [ { "type": "replace", "edit": "FROM", "start_line_no": 1, "start_line_pos": 29, "start_file_pos": 28, "end_line_no": 1, "end_line_pos": 33, "end_file_pos": 32, } ], "warning": False, }, { "code": "CP02", "start_line_no": 1, "start_line_pos": 34, "start_file_pos": 33, "end_line_no": 1, "end_line_pos": 41, "end_file_pos": 40, "description": "Unquoted identifiers must be consistently lower case.", "name": "capitalisation.identifiers", "fixes": [ { "type": "replace", "edit": "mytable", "start_line_no": 1, "start_line_pos": 34, "start_file_pos": 33, "end_line_no": 1, "end_line_pos": 41, "end_file_pos": 40, } ], "warning": False, }, { "code": "LT12", "start_line_no": 1, "start_line_pos": 41, "start_file_pos": 40, "end_line_no": 1, "end_line_pos": 41, "end_file_pos": 40, "description": "Files must end with a single trailing newline.", "name": "layout.end_of_file", "fixes": [ { "type": "create_after", "edit": "\n", "start_line_no": 1, "start_line_pos": 41, "start_file_pos": 40, "end_line_no": 1, "end_line_pos": 41, "end_file_pos": 40, } ], "warning": False, }, ] def test__api__lint_string_without_violations(): """Check lint functionality when there is no violation.""" result = sqlfluff.lint("select column from table\n") assert result == [] def test__api__lint_string(): """Basic checking of lint functionality.""" result = sqlfluff.lint(my_bad_query) # Check return types. assert isinstance(result, list) assert all(isinstance(elem, dict) for elem in result) # Check actual result assert result == lint_result def test__api__lint_string_specific(): """Basic checking of lint functionality.""" rules = ["CP02", "LT12"] result = sqlfluff.lint(my_bad_query, rules=rules) # Check which rules are found assert all(elem["code"] in rules for elem in result) def test__api__lint_string_specific_single(): """Basic checking of lint functionality.""" rules = ["CP02"] result = sqlfluff.lint(my_bad_query, rules=rules) # Check which rules are found assert all(elem["code"] in rules for elem in result) def test__api__lint_string_specific_exclude(): """Basic checking of lint functionality.""" exclude_rules = ["LT12", "CP01", "AL03", "CP02", "LT09", "LT01"] result = sqlfluff.lint(my_bad_query, exclude_rules=exclude_rules) # Check only AM04 is found assert len(result) == 1 assert "AM04" == result[0]["code"] def test__api__lint_string_specific_exclude_single(): """Basic checking of lint functionality.""" exclude_rules = ["LT01"] result = sqlfluff.lint(my_bad_query, exclude_rules=exclude_rules) # Check only AM04 is found assert len(result) == 9 assert set(["LT12", "CP01", "AL03", "CP02", "LT09", "AM04"]) == set( [r["code"] for r in result] ) def test__api__lint_string_specific_exclude_all_failed_rules(): """Basic checking of lint functionality.""" exclude_rules = ["LT12", "CP01", "AL03", "CP02", "LT09", "LT01", "AM04"] result = sqlfluff.lint(my_bad_query, exclude_rules=exclude_rules) # Check it passes assert result == [] def test__api__fix_string(): """Basic checking of lint functionality.""" result = sqlfluff.fix(my_bad_query) # Check return types. assert isinstance(result, str) # Check actual result assert ( result == """SELECT *, 1, blah AS foo FROM mytable """ ) def test__api__fix_string_specific(): """Basic checking of lint functionality with a specific rule.""" result = sqlfluff.fix(my_bad_query, rules=["CP01"]) # Check actual result assert result == "SELECT *, 1, blah AS fOO FROM myTable" def test__api__fix_string_specific_exclude(): """Basic checking of lint functionality with a specific rule exclusion.""" result = sqlfluff.fix(my_bad_query, exclude_rules=["LT09"]) # Check actual result assert result == "SELECT *, 1, blah AS foo FROM mytable\n" def test__api__fix_string_unparsable(): """Test behavior with parse errors.""" bad_query = """SELECT my_col FROM my_schema.my_table where processdate ! 3""" result = sqlfluff.fix(bad_query, rules=["CP01"]) # Check fix result: should be unchanged because of the parse error. assert result == bad_query def test__api__fix_string_unparsable_fix_even_unparsable(): """Test behavior with parse errors.""" bad_query = """SELECT my_col FROM my_schema.my_table where processdate ! 3""" result = sqlfluff.fix(bad_query, rules=["CP01"], fix_even_unparsable=True) # Check fix result: should be fixed because we overrode fix_even_unparsable. assert ( result == """SELECT my_col FROM my_schema.my_table WHERE processdate ! 3""" ) def test__api__parse_string(): """Basic checking of parse functionality.""" parsed = sqlfluff.parse(my_bad_query) # Check a JSON object is returned. assert isinstance(parsed, dict) # Load in expected result. with open("test/fixtures/api/parse_test/parse_test.json", "r") as f: expected_parsed = json.load(f) # Compare JSON from parse to expected result. assert parsed == expected_parsed def test__api__parse_fail(): """Basic failure mode of parse functionality.""" try: sqlfluff.parse("Select (1 + 2 +++) FROM mytable as blah blah") pytest.fail("sqlfluff.parse should have raised an exception.") except Exception as err: # Check it's the right kind of exception assert isinstance(err, sqlfluff.api.APIParsingError) # Check there are two violations in there. assert len(err.violations) == 2 # Check it prints nicely. assert ( str(err) == """Found 2 issues while parsing string. Line 1, Position 15: Found unparsable section: '+++' Line 1, Position 41: Found unparsable section: 'blah'""" ) def test__api__config_path(): """Test that we can load a specified config file in the Simple API.""" # Load test SQL file. with open("test/fixtures/api/config_path_test/config_path_test.sql", "r") as f: sql = f.read() # Pass a config path to the Simple API. parsed = sqlfluff.parse( sql, config_path="test/fixtures/api/config_path_test/extra_configs/.sqlfluff", ) # Load in expected result. with open("test/fixtures/api/config_path_test/config_path_test.json", "r") as f: expected_parsed = json.load(f) # Compare JSON from parse to expected result. assert parsed == expected_parsed @pytest.mark.parametrize( "dialect,config_path,expectation", [ (None, "test/fixtures/api/config_dialect/.sqlfluff", nullcontext()), (None, None, pytest.raises(APIParsingError)), ("duckdb", None, nullcontext()), ("ansi", None, pytest.raises(APIParsingError)), ], ) def test__api__parse_dialect_config_path(dialect, config_path, expectation): """Test that we can load a dialect from a config file in the Simple API parse.""" # Load test SQL file. with open("test/fixtures/api/config_dialect/config_dialect.sql", "r") as f: sql = f.read() # Load in expected result. with open("test/fixtures/api/config_dialect/config_dialect_parse.json", "r") as f: expected_parsed = json.load(f) was_parsed = False with expectation: # Pass a config path to the Simple API. parsed = sqlfluff.parse( sql, dialect=dialect, config_path=config_path, ) was_parsed = True # Compare JSON from parse to expected result. assert parsed == expected_parsed if isinstance(expectation, nullcontext): assert was_parsed else: assert not was_parsed @pytest.mark.parametrize( "dialect,config_path,fails", [ (None, "test/fixtures/api/config_dialect/.sqlfluff", False), (None, None, True), ("duckdb", None, False), ("ansi", None, True), ], ) def test__api__lint_dialect_config_path(dialect, config_path, fails): """Test that we can load a dialect from a config file in the Simple API lint.""" # Load test SQL file. with open("test/fixtures/api/config_dialect/config_dialect.sql", "r") as f: sql = f.read() # Load in expected result. issue_type = "prs" if fails else "lt01" with open( f"test/fixtures/api/config_dialect/config_dialect_lint_{issue_type}.json", "r" ) as f: expected_lint = json.load(f) # Pass a config path to the Simple API. linted = sqlfluff.lint( sql, dialect=dialect, config_path=config_path, ) # Compare JSON from lint to expected result. assert linted == expected_lint @pytest.mark.parametrize( "dialect,config_path,fails", [ (None, "test/fixtures/api/config_dialect/.sqlfluff", False), (None, None, True), ("duckdb", None, False), ("ansi", None, True), ], ) def test__api__fix_dialect_config_path(dialect, config_path, fails): """Test that we can load a dialect from a config file in the Simple API fix.""" # Load test SQL file. with open("test/fixtures/api/config_dialect/config_dialect.sql", "r") as f: sql = f.read() # Load in expected result. if fails: expected_fix = sql else: with open("test/fixtures/api/config_dialect/config_dialect_fix.sql", "r") as f: expected_fix = f.read() # Pass a config path to the Simple API. fixed = sqlfluff.fix( sql, dialect=dialect, config_path=config_path, ) # Compare to expected result. assert fixed == expected_fix @pytest.mark.parametrize( "kwargs,expected", [ ( # No override from API, so uses .sqlfluff value {}, set(), ), ( # API overrides, so it uses that dict(exclude_rules=["RF02"]), {"RF04"}, ), ], ) def test__api__config_override(kwargs, expected, tmpdir): """Test that parameters to lint() override .sqlfluff correctly (or not).""" config_path = "test/fixtures/api/config_override/.sqlfluff" sql = "SELECT TRIM(name) AS name FROM some_table" lint_results = sqlfluff.lint(sql, config_path=config_path, **kwargs) assert expected == {"RF02", "RF04"}.intersection( {lr["code"] for lr in lint_results} ) def test__api__invalid_dialect(): """Test that SQLFluffUserError is raised for a bad dialect.""" # Load test SQL file. with open("test/fixtures/api/config_path_test/config_path_test.sql", "r") as f: sql = f.read() # Pass a fake dialect to the API and test the correct error is raised. with pytest.raises(SQLFluffUserError) as err: sqlfluff.parse( sql, dialect="not_a_real_dialect", config_path="test/fixtures/api/config_path_test/extra_configs/.sqlfluff", ) assert str(err.value) == "Error: Unknown dialect 'not_a_real_dialect'" def test__api__parse_exceptions(): """Test parse behaviour with errors.""" # Parsable content result = sqlfluff.parse("SELECT 1") assert result # Templater fail with pytest.raises(APIParsingError): sqlfluff.parse('SELECT {{ 1 > "a"}}') # Templater success but parsing fail with pytest.raises(APIParsingError): sqlfluff.parse("THIS IS NOT SQL") sqlfluff-3.4.2/test/cli/000077500000000000000000000000001503426445100150745ustar00rootroot00000000000000sqlfluff-3.4.2/test/cli/__init__.py000066400000000000000000000000361503426445100172040ustar00rootroot00000000000000"""Tests for sqlfluff.cli.""" sqlfluff-3.4.2/test/cli/autocomplete_test.py000066400000000000000000000013411503426445100212050ustar00rootroot00000000000000"""Test autocomplete commands.""" import pytest from sqlfluff.cli.autocomplete import dialect_shell_complete @pytest.mark.parametrize( "incomplete,expected", [ ["an", ["ansi"]], ["d", ["databricks", "db2", "doris", "duckdb"]], ["g", ["greenplum"]], ["s", ["snowflake", "soql", "sparksql", "sqlite", "starrocks"]], ["post", ["postgres"]], ], ) def test_dialect_click_type_shell_complete(incomplete, expected): """Check that autocomplete returns dialects as expected.""" completion_items = dialect_shell_complete( ctx="dummy_not_used", param="dummy_not_used", incomplete=incomplete ) actual = [c.value for c in completion_items] assert expected == actual sqlfluff-3.4.2/test/cli/commands_test.py000066400000000000000000002300431503426445100203100ustar00rootroot00000000000000"""The Test file for CLI (General).""" import json import logging import os import pathlib import re import shutil import stat import subprocess import sys import tempfile import textwrap from unittest.mock import MagicMock, patch import chardet # Testing libraries import pytest import yaml from click.testing import CliRunner # We import the library directly here to get the version import sqlfluff from sqlfluff.cli.commands import ( cli_format, dialects, fix, get_config, lint, parse, render, rules, version, ) from sqlfluff.utils.testing.cli import invoke_assert_code # tomllib is only in the stdlib from 3.11+ if sys.version_info >= (3, 11): import tomllib else: # pragma: no cover import toml as tomllib re_ansi_escape = re.compile(r"\x1b[^m]*m") @pytest.fixture(autouse=True) def logging_cleanup(): """This gracefully handles logging issues at session teardown. Removes handlers from all loggers. Autouse applies this to all tests in this file (i.e. all the cli command tests), which should be all of the test cases where `set_logging_level` is called. https://github.com/sqlfluff/sqlfluff/issues/3702 https://github.com/pytest-dev/pytest/issues/5502#issuecomment-1190557648 """ yield # NOTE: This is a teardown function so the clearup code # comes _after_ the yield. # Get only the sqlfluff loggers (which we set in set_logging_level) loggers = [ logger for logger in logging.Logger.manager.loggerDict.values() if isinstance(logger, logging.Logger) and logger.name.startswith("sqlfluff") ] for logger in loggers: if not hasattr(logger, "handlers"): continue for handler in logger.handlers[:]: logger.removeHandler(handler) def contains_ansi_escape(s: str) -> bool: """Does the string contain ANSI escape codes (e.g. color)?""" return re_ansi_escape.search(s) is not None expected_output = """== [test/fixtures/linter/indentation_error_simple.sql] FAIL L: 2 | P: 1 | LT02 | Expected indent of 4 spaces. [layout.indent] L: 5 | P: 10 | CP01 | Keywords must be consistently upper case. | [capitalisation.keywords] """ def test__cli__command_directed(): """Basic checking of lint functionality.""" result = invoke_assert_code( ret_code=1, args=[ lint, [ "--disable-progress-bar", "test/fixtures/linter/indentation_error_simple.sql", ], ], ) # We should get a readout of what the error was check_a = "L: 2 | P: 1 | LT02" # NB: Skip the number at the end because it's configurable check_b = "ndentation" assert check_a in result.stdout assert check_b in result.stdout # Finally check the WHOLE output to make sure that unexpected newlines are not # added. The replace command just accounts for cross platform testing. assert result.stdout.replace("\\", "/").startswith(expected_output) def test__cli__command_dialect(): """Check the script raises the right exception on an unknown dialect.""" # The dialect is unknown should be a non-zero exit code invoke_assert_code( ret_code=2, args=[ lint, [ "-n", "--dialect", "faslkjh", "test/fixtures/linter/indentation_error_simple.sql", ], ], ) @pytest.mark.parametrize( "command", [ render, parse, lint, cli_format, fix, ], ) def test__cli__command_no_dialect(command): """Check the script raises the right exception no dialect.""" # The dialect is unknown should be a non-zero exit code result = invoke_assert_code( ret_code=2, args=[ command, ["-"], ], cli_input="SELECT 1", ) assert "User Error" in result.stderr assert "No dialect was specified" in result.stderr # No traceback should be in the output assert "Traceback (most recent call last)" not in result.stderr @pytest.mark.parametrize( "command", [ parse, lint, cli_format, fix, ], ) def test__cli__command_no_dialect_stdin_filename_inline_dialect(command): """Check the script runs with no dialect but has an inline configuration.""" # The dialect is unknown should be a non-zero exit code result = invoke_assert_code( ret_code=0, args=[ command, ["--stdin-filename", "test.sql", "-"], ], cli_input="-- sqlfluff:dialect:ansi\nSELECT 1\n", ) assert "User Error" not in result.stderr assert "No dialect was specified" not in result.stderr # No traceback should be in the output assert "Traceback (most recent call last)" not in result.stderr def test__cli__command_parse_error_dialect_explicit_warning(): """Check parsing error raises the right warning.""" # For any parsing error there should be a non-zero exit code # and a human-readable warning should be displayed. # Dialect specified as commandline option. invoke_assert_code( ret_code=1, args=[ parse, [ "-n", "--dialect", "postgres", "test/fixtures/cli/fail_many.sql", ], ], assert_stdout_contains=( "WARNING: Parsing errors found and dialect is set to 'postgres'. " "Have you configured your dialect correctly?" ), ) def test__cli__command_parse_error_dialect_implicit_warning(): """Check parsing error raises the right warning.""" # For any parsing error there should be a non-zero exit code # and a human-readable warning should be displayed. # Dialect specified in .sqlfluff config. invoke_assert_code( ret_code=1, args=[ # Config sets dialect to tsql parse, [ "-n", "--config", "test/fixtures/cli/extra_configs/.sqlfluff", "test/fixtures/cli/fail_many.sql", ], ], assert_stdout_contains=( "WARNING: Parsing errors found and dialect is set to 'tsql'. " "Have you configured your dialect correctly?" ), ) def test__cli__command_dialect_legacy(): """Check the script raises the right exception on a legacy dialect.""" invoke_assert_code( ret_code=2, args=[ lint, [ "-n", "--dialect", "exasol_fs", "test/fixtures/linter/indentation_error_simple.sql", ], ], assert_stdout_contains="Please use the 'exasol' dialect instead.", ) def test__cli__command_extra_config_fail(): """Check the script raises the right exception non-existent extra config path.""" invoke_assert_code( ret_code=2, args=[ lint, [ "--config", "test/fixtures/cli/extra_configs/.sqlfluffsdfdfdfsfd", "test/fixtures/cli/extra_config_tsql.sql", ], ], assert_stdout_contains=( "Extra config path 'test/fixtures/cli/extra_configs/.sqlfluffsdfdfdfsfd' " "does not exist." ), ) stdin_cli_input = ( "SELECT\n A.COL1,\n B.COL2\nFROM TABA AS A\nPOSITIONAL JOIN TABB AS B;\n" ) @pytest.mark.parametrize( ("command", "stdin_filepath", "ret_code", "stdout", "stderr"), [ ( parse, "test/fixtures/cli/stdin_filename/without_config/stdin_filename.sql", 0, ( "[L: 5, P: 1] | join_clause:\n" "[L: 5, P: 1] | keyword:" " 'POSITIONAL'" ), "", ), ( parse, "test/fixtures/an_ansi_config_here.sql", 1, "Parsing errors found and dialect is set to 'ansi'.", "", ), ( lint, "test/fixtures/cli/stdin_filename/stdin_filename.sql", 0, "All Finished!", "", ), ( lint, "test/fixtures/cli/stdin_filename/without_config/stdin_filename.sql", 0, "All Finished!", "", ), ( lint, "test/fixtures/an_ansi_config_here.sql", 1, "Parsing errors found and dialect is set to 'ansi'.", "", ), ( cli_format, "test/fixtures/cli/stdin_filename/stdin_filename.sql", 0, stdin_cli_input, "", ), ( cli_format, "test/fixtures/cli/stdin_filename/without_config/stdin_filename.sql", 0, stdin_cli_input, "", ), ( cli_format, "test/fixtures/an_ansi_config_here.sql", 1, "", "[1 templating/parsing errors found]", ), ( fix, "test/fixtures/cli/stdin_filename/stdin_filename.sql", 0, stdin_cli_input, "", ), ( fix, "test/fixtures/cli/stdin_filename/without_config/stdin_filename.sql", 0, stdin_cli_input, "", ), ( fix, "test/fixtures/an_ansi_config_here.sql", 1, "", "Unfixable violations detected.", ), ], ) def test__cli__command_stdin_filename_config( command, stdin_filepath, ret_code, stdout, stderr ): """Check the script picks up the config from the indicated path.""" invoke_assert_code( ret_code=ret_code, args=[ command, [ "--stdin-filename", stdin_filepath, "-", ], ], cli_input=stdin_cli_input, assert_stdout_contains=stdout, assert_stderr_contains=stderr, ) @pytest.mark.parametrize( "command", [ ( "-", "-n", ), ( "-", "-n", "-v", ), ( "-", "-n", "-vv", ), ( "-", "-vv", ), ], ) def test__cli__command_lint_stdin(command): """Check basic commands on a simple script using stdin. The subprocess command should exit without errors, as no issues should be found. """ with open("test/fixtures/cli/passing_a.sql") as test_file: sql = test_file.read() invoke_assert_code(args=[lint, ("--dialect=ansi",) + command], cli_input=sql) def test__cli__command_lint_empty_stdin(): """Check linting an empty file raises no exceptions. https://github.com/sqlfluff/sqlfluff/issues/4807 """ invoke_assert_code(args=[lint, ("-d", "ansi", "-")], cli_input="") def test__cli__command_render_stdin(): """Check render on a simple script using stdin.""" with open("test/fixtures/cli/passing_a.sql") as test_file: sql = test_file.read() invoke_assert_code( args=[render, ("--dialect=ansi", "-")], cli_input=sql, # Check we get back out the same file we input. assert_stdout_contains=sql, ) @pytest.mark.parametrize( "command", [ # Test basic linting ( lint, [ "-n", "test/fixtures/cli/passing_b.sql", "--exclude-rules", "AM05", ], ), # Basic render ( render, [ "test/fixtures/cli/passing_b.sql", ], ), # Render with variants ( render, [ "test/fixtures/cli/jinja_variants.sql", ], ), # Original tests from test__cli__command_lint (lint, ["-n", "test/fixtures/cli/passing_a.sql"]), (lint, ["-n", "-v", "test/fixtures/cli/passing_a.sql"]), (lint, ["-n", "-vvvv", "test/fixtures/cli/passing_a.sql"]), (lint, ["-vvvv", "test/fixtures/cli/passing_a.sql"]), # Test basic linting with very high verbosity ( lint, [ "-n", "test/fixtures/cli/passing_b.sql", "-vvvvvvvvvvv", "--exclude-rules", "AM05", ], ), # Test basic linting with specific logger. # Also test short rule exclusion. ( lint, [ "-n", "test/fixtures/cli/passing_b.sql", "-vvv", "--logger", "parser", "-e", "AM05", ], ), # Check basic parsing ( parse, [ "-n", "test/fixtures/cli/passing_b.sql", "--exclude-rules", "AM05", ], ), # Test basic parsing with very high verbosity ( parse, [ "-n", "test/fixtures/cli/passing_b.sql", "-vvvvvvvvvvv", "-e", "AM05", ], ), # Check basic parsing, with the code only option (parse, ["-n", "test/fixtures/cli/passing_b.sql", "-c"]), # Check basic parsing, with the yaml output (parse, ["-n", "test/fixtures/cli/passing_b.sql", "-c", "--format", "yaml"]), (parse, ["-n", "test/fixtures/cli/passing_b.sql", "--format", "yaml"]), # Check parsing with no output (used mostly for testing) (parse, ["-n", "test/fixtures/cli/passing_b.sql", "--format", "none"]), # Parsing with variants ( parse, [ "test/fixtures/cli/jinja_variants.sql", ], ), # Check the benching commands (parse, ["-n", "test/fixtures/cli/passing_timing.sql", "--bench"]), (lint, ["-n", "test/fixtures/cli/passing_timing.sql", "--bench"]), (fix, ["-n", "test/fixtures/cli/passing_timing.sql", "--bench"]), # Check linting works in specifying rules ( lint, [ "-n", "--rules", "CP01", "test/fixtures/linter/operator_errors.sql", ], ), # Check ignoring linting (multiprocess) # https://github.com/sqlfluff/sqlfluff/issues/5066 ( lint, [ "-n", "--ignore", "linting", "-p", "2", "test/fixtures/linter/operator_errors.sql", "test/fixtures/linter/comma_errors.sql", ], ), # Check linting works in specifying multiple rules ( lint, [ "-n", "--rules", "CP01,LT02", "test/fixtures/linter/operator_errors.sql", ], ), # Check linting works with both included and excluded rules ( lint, [ "-n", "--rules", "CP01,LT01", "--exclude-rules", "LT01,AL07", "test/fixtures/linter/operator_errors.sql", ], ), # Check linting works with just excluded rules ( lint, [ "-n", "--exclude-rules", "LT01,LT03,AL07", "test/fixtures/linter/operator_errors.sql", ], ), # Check that ignoring works (also checks that unicode files parse). ( lint, [ "-n", "--exclude-rules", "LT02,LT12,AL07", "--ignore", "parsing,lexing", "test/fixtures/linter/parse_lex_error.sql", ], ), # Check nofail works (lint, ["--nofail", "test/fixtures/linter/parse_lex_error.sql"]), # Check config works (sets dialect to tsql) ( lint, [ "--config", "test/fixtures/cli/extra_configs/.sqlfluff", "test/fixtures/cli/extra_config_tsql.sql", ], ), ( lint, [ "--config", "test/fixtures/cli/extra_configs/pyproject.toml", "test/fixtures/cli/extra_config_tsql.sql", ], ), # Check timing outputs doesn't raise exceptions (lint, ["test/fixtures/cli/passing_a.sql", "--persist-timing", "test.csv"]), # Check lint --help command doesn't raise exception. # NOTE: This tests the LazySequence in action. (lint, ["--help"]), ], ) def test__cli__command_lint_parse(command): """Check basic commands on a more complicated script.""" invoke_assert_code(args=command) @pytest.mark.parametrize( "command, ret_code", [ # Check the script doesn't raise an unexpected exception with badly formed # files. ( ( fix, [ "--rules", "LT01", "test/fixtures/cli/fail_many.sql", "-vvvvvvv", ], ), 1, ), # Fix with a suffixs ( ( fix, [ "--rules", "LT01", "--fixed-suffix", "_fix", "test/fixtures/cli/fail_many.sql", ], ), 1, ), # Fix without specifying rules ( ( fix, [ "--fixed-suffix", "_fix", "test/fixtures/cli/fail_many.sql", ], ), 1, ), # Format ( ( cli_format, [ "--fixed-suffix", "_fix", "test/fixtures/linter/whitespace_errors.sql", ], ), 0, ), # Format with --persist-timing ( ( cli_format, [ "--fixed-suffix", "_fix", "test/fixtures/linter/whitespace_errors.sql", "--persist-timing", "test.csv", ], ), 0, ), # Format (specifying rules) ( ( cli_format, [ "--rules", "LT01", "--fixed-suffix", "_fix", "test/fixtures/linter/whitespace_errors.sql", ], ), 2, ), # Template syntax error in macro file ( ( lint, ["test/fixtures/cli/unknown_jinja_tag/test.sql"], ), 1, ), # Test overriding library path when it doesn't cause an issue ( ( lint, ["test/fixtures/cli/passing_a.sql", "--library-path", "none"], ), 0, ), # Test overriding library path when it DOES cause an issue # (because macros won't be found). ( ( # Render because that's the step where the issue will # occur. render, [ "test/fixtures/templater/jinja_r_library_in_macro/jinja.sql", "--library-path", "none", ], ), 1, ), # Test render fail ( ( render, ["test/fixtures/cli/fail_many.sql"], ), 1, ), # Test a longer lint fail with --bench # This tests the threshold rules clause ( ( lint, [ "test/fixtures/linter/autofix/bigquery/004_templating/before.sql", "--bench", ], ), 1, ), # Test that setting --quiet with --verbose raises an error. ( ( fix, [ "--quiet", "--verbose", "test/fixtures/cli/fail_many.sql", ], ), 2, ), # Test machine format parse command with an unparsable file. ( ( parse, ["test/fixtures/linter/parse_lex_error.sql", "-f", "yaml"], ), 1, ), # Test machine format parse command with a fatal templating error. ( ( parse, ["test/fixtures/cli/jinja_fatal_fail.sql", "-f", "yaml"], ), 1, ), ], ) def test__cli__command_lint_parse_with_retcode(command, ret_code): """Check commands expecting a non-zero ret code.""" invoke_assert_code(ret_code=ret_code, args=command) def test__cli__command_lint_warning_explicit_file_ignored(): """Check ignoring file works when file is in an ignore directory.""" runner = CliRunner() result = runner.invoke( lint, ["test/fixtures/linter/sqlfluffignore/path_b/query_c.sql"] ) assert result.exit_code == 0 assert ( "Exact file path test/fixtures/linter/sqlfluffignore/path_b/query_c.sql " "was given but it was ignored" ) in result.stdout.strip() def test__cli__command_lint_skip_ignore_files(): """Check "ignore file" is skipped when --disregard-sqlfluffignores flag is set.""" runner = CliRunner() result = runner.invoke( lint, [ "test/fixtures/linter/sqlfluffignore/path_b/query_c.sql", "--disregard-sqlfluffignores", ], ) assert result.exit_code == 1 assert "LT12" in result.stdout.strip() @pytest.mark.parametrize( "command", [ (fix), (cli_format), ], ) def test__cli__command_fix_skip_ignore_files(command): """Check "ignore file" is skipped when --disregard-sqlfluffignores flag is set.""" runner = CliRunner() result = runner.invoke( command, [ "test/fixtures/linter/sqlfluffignore/path_b/query_c.sql", "--disregard-sqlfluffignores", "-x", "_fix", ], ) assert result.exit_code == 0 assert "LT12" in result.stdout.strip() def test__cli__command_lint_ignore_local_config(): """Test that --ignore-local_config ignores .sqlfluff file as expected.""" runner = CliRunner() # First we test that not including the --ignore-local-config includes # .sqlfluff file, and therefore the lint doesn't raise AL02 result = runner.invoke( lint, [ "test/fixtures/cli/ignore_local_config/ignore_local_config_test.sql", ], ) assert result.exit_code == 0 assert "AL02" not in result.stdout.strip() # Then repeat the same lint but this time ignoring the .sqlfluff file. # We should see AL02 raised. result = runner.invoke( lint, [ "--ignore-local-config", "--dialect=ansi", "test/fixtures/cli/ignore_local_config/ignore_local_config_test.sql", ], ) assert result.exit_code == 1 assert "AL02" in result.stdout.strip() def test__cli__command_lint_warning(): """Test that configuring warnings works. For this test the warnings are configured using inline config in the file. That's more for simplicity however the code paths should be the same if it's configured in a file. """ runner = CliRunner() result = runner.invoke( lint, [ "test/fixtures/cli/warning_a.sql", ], ) # Because we're only warning. The command should pass. assert result.exit_code == 0 # The output should still say PASS. assert "PASS" in result.stdout.strip() # But should also contain the warnings. # NOTE: Not including the whole description because it's too long. assert ( "L: 4 | P: 9 | LT01 | WARNING: Expected single whitespace" in result.stdout.strip() ) def test__cli__command_lint_warning_name_rule(): """Test that configuring warnings works. For this test the warnings are configured using inline config in the file. That's more for simplicity however the code paths should be the same if it's configured in a file. """ runner = CliRunner() result = runner.invoke( lint, [ "test/fixtures/cli/warning_name_a.sql", ], ) # Because we're only warning. The command should pass. assert result.exit_code == 0 # The output should still say PASS. assert "PASS" in result.stdout.strip() # But should also contain the warnings. # NOTE: Not including the whole description because it's too long. assert ( "L: 4 | P: 9 | LT01 | WARNING: Expected single whitespace" in result.stdout.strip() ) def test__cli__command_versioning(): """Check version command.""" # Get the package version info pkg_version = sqlfluff.__version__ # Get the version info from the config file. # NOTE: Toml files are always encoded in UTF-8. with open("pyproject.toml", "r", encoding="utf-8") as config_file: config = tomllib.loads(config_file.read()) config_version = config["project"]["version"] assert pkg_version == config_version # Get the version from the cli runner = CliRunner() result = runner.invoke(version) assert result.exit_code == 0 # We need to strip to remove the newline characters assert result.stdout.strip() == pkg_version def test__cli__command_version(): """Just check version command for exceptions.""" # Get the package version info pkg_version = sqlfluff.__version__ runner = CliRunner() result = runner.invoke(version) assert result.exit_code == 0 assert pkg_version in result.stdout # Check a verbose version result = runner.invoke(version, ["-v"]) assert result.exit_code == 0 assert pkg_version in result.stdout def test__cli__command_rules(): """Check rules command for exceptions.""" invoke_assert_code(args=[rules]) def test__cli__command_dialects(): """Check dialects command for exceptions.""" invoke_assert_code(args=[dialects]) def generic_roundtrip_test( source_file, rulestring, final_exit_code=0, check=False, fix_input=None, fix_exit_code=0, input_file_encoding="utf-8", output_file_encoding=None, ): """A test for roundtrip testing, take a file buffer, lint, fix and lint. This is explicitly different from the linter version of this, in that it uses the command line rather than the direct api. """ filename = "testing.sql" # Lets get the path of a file to use tempdir_path = tempfile.mkdtemp() filepath = os.path.join(tempdir_path, filename) # Open the example file and write the content to it with open(filepath, mode="w", encoding=input_file_encoding) as dest_file: for line in source_file: dest_file.write(line) status = os.stat(filepath) assert stat.S_ISREG(status.st_mode) old_mode = stat.S_IMODE(status.st_mode) # Check that we first detect the issue invoke_assert_code( ret_code=1, args=[lint, ["--dialect=ansi", "--rules", rulestring, filepath]], ) # Fix the file (in force mode) if check: fix_args = ["--rules", rulestring, "--check", filepath] else: fix_args = ["--rules", rulestring, filepath] fix_args.append("--dialect=ansi") invoke_assert_code( ret_code=fix_exit_code, args=[fix, fix_args], cli_input=fix_input ) # Now lint the file and check for exceptions invoke_assert_code( ret_code=final_exit_code, args=[lint, ["--dialect=ansi", "--rules", rulestring, filepath]], ) # Check the output file has the correct encoding after fix if output_file_encoding: with open(filepath, mode="rb") as f: data = f.read() assert chardet.detect(data)["encoding"] == output_file_encoding # Also check the file mode was preserved. status = os.stat(filepath) assert stat.S_ISREG(status.st_mode) new_mode = stat.S_IMODE(status.st_mode) assert new_mode == old_mode shutil.rmtree(tempdir_path) @pytest.mark.parametrize( "rule,fname", [ ("LT01", "test/fixtures/linter/indentation_errors.sql"), ("LT01", "test/fixtures/linter/whitespace_errors.sql"), ("LT01", "test/fixtures/linter/indentation_errors.sql"), # Really stretching the ability of the fixer to re-indent a file ("LT02", "test/fixtures/linter/indentation_error_hard.sql"), ], ) def test__cli__command__fix(rule, fname): """Test the round trip of detecting, fixing and then not detecting the rule.""" with open(fname) as test_file: generic_roundtrip_test(test_file, rule) @pytest.mark.parametrize( "sql,fix_args,fixed,exit_code", [ ( # - One lint error: "where" is lower case # - Not fixable because of parse error, hence error exit """ SELECT my_col FROM my_schema.my_table where processdate ! 3 """, ["--fixed-suffix", "FIXED", "--rules", "CP01"], None, 1, ), ( # - One lint error: "where" is lower case # - Not fixable because of templater error, hence error exit """ SELECT my_col FROM my_schema.my_table where processdate {{ condition }} """, # Test the short versions of the options. ["-x", "FIXED", "-r", "CP01"], None, 1, ), ( # - One lint error: "where" is lower case # - Not fixable because of parse error (even though "noqa"), hence # error exit """ SELECT my_col FROM my_schema.my_table where processdate ! 3 -- noqa: PRS """, # Test the short versions of the options. ["-x", "FIXED", "-r", "CP01"], None, 1, ), ( # - No lint errors # - Parse error not suppressed, hence error exit """ SELECT my_col FROM my_schema.my_table WHERE processdate ! 3 """, ["--fixed-suffix", "FIXED", "--rules", "CP01"], None, 1, ), ( # - No lint errors # - Parse error suppressed, hence success exit """ SELECT my_col FROM my_schema.my_table WHERE processdate ! 3 --noqa: PRS """, ["--fixed-suffix", "FIXED", "--rules", "CP01"], None, 0, ), ( # - One lint error: "where" is lower case # - Parse error not suppressed # - "--FIX-EVEN-UNPARSABLE", hence fix anyway & success exit """ SELECT my_col FROM my_schema.my_table where processdate ! 3 """, [ "--fixed-suffix", "FIXED", "--rules", "CP01", "--FIX-EVEN-UNPARSABLE", ], """ SELECT my_col FROM my_schema.my_table WHERE processdate ! 3 """, 0, ), ( # Two files: # File #1: # - One lint error: "where" is lower case # - Not fixable because of parse error # File #2: # - One lint error: "where" is lower case # - No parse error, thus fixable # Should fix the second file but not the first, and exit with an # error. [ """ SELECT my_col FROM my_schema.my_table where processdate ! 3 """, """SELECT my_col FROM my_schema.my_table where processdate != 3""", ], ["--fixed-suffix", "FIXED", "--rules", "CP01"], [ None, """SELECT my_col FROM my_schema.my_table WHERE processdate != 3""", ], 1, ), ], ids=[ "1_lint_error_1_unsuppressed_parse_error", "1_lint_error_1_unsuppressed_templating_error", "1_lint_error_1_suppressed_parse_error", "0_lint_errors_1_unsuppressed_parse_error", "0_lint_errors_1_suppressed_parse_error", "1_lint_error_1_unsuppressed_parse_error_FIX_EVEN_UNPARSABLE", "2_files_with_lint_errors_1_unsuppressed_parse_error", ], ) def test__cli__fix_error_handling_behavior(sql, fix_args, fixed, exit_code, tmpdir): """Tests how "fix" behaves wrt parse errors, exit code, etc.""" if not isinstance(sql, list): sql = [sql] if not isinstance(fixed, list): fixed = [fixed] assert len(sql) == len(fixed) tmp_path = pathlib.Path(str(tmpdir)) for idx, this_sql in enumerate(sql): filepath = tmp_path / f"testing{idx + 1}.sql" filepath.write_text(textwrap.dedent(this_sql)) with tmpdir.as_cwd(): with pytest.raises(SystemExit) as e: fix( fix_args # Use the short dialect option + ["-d", "ansi"] ) assert exit_code == e.value.code for idx, this_fixed in enumerate(fixed): fixed_path = tmp_path / f"testing{idx + 1}FIXED.sql" if this_fixed is not None: assert textwrap.dedent(this_fixed) == fixed_path.read_text() else: # A None value indicates "sqlfluff fix" should have skipped any # fixes for this file. To confirm this, we verify that the output # file WAS NOT EVEN CREATED. assert not fixed_path.is_file() @pytest.mark.parametrize( "method,fix_even_unparsable", [ ("command-line", False), ("command-line", True), ("config-file", False), ("config-file", True), ], ) def test_cli_fix_even_unparsable( method: str, fix_even_unparsable: bool, monkeypatch, tmpdir ): """Test the fix_even_unparsable option works from cmd line and config.""" sql_filename = "fix_even_unparsable.sql" sql_path = str(tmpdir / sql_filename) with open(sql_path, "w") as f: print( """SELECT my_col FROM my_schema.my_table where processdate ! 3 """, file=f, ) options = [ "--dialect", "ansi", "--fixed-suffix=FIXED", sql_path, ] if method == "command-line": if fix_even_unparsable: options.append("--FIX-EVEN-UNPARSABLE") else: assert method == "config-file" with open(str(tmpdir / ".sqlfluff"), "w") as f: print( f"[sqlfluff]\nfix_even_unparsable = {fix_even_unparsable}", file=f, ) # TRICKY: Switch current directory to the one with the SQL file. Otherwise, # the setting doesn't work. That's because SQLFluff reads it in # sqlfluff.cli.commands.fix(), prior to reading any file-specific settings # (down in sqlfluff.core.linter.Linter._load_raw_file_and_config()). monkeypatch.chdir(str(tmpdir)) invoke_assert_code( ret_code=0 if fix_even_unparsable else 1, args=[ fix, options, ], ) fixed_path = str(tmpdir / "fix_even_unparsableFIXED.sql") if fix_even_unparsable: with open(fixed_path, "r") as f: fixed_sql = f.read() assert ( fixed_sql == """SELECT my_col FROM my_schema.my_table WHERE processdate ! 3 """ ) else: assert not os.path.isfile(fixed_path) @pytest.mark.parametrize( "stdin,rules,stdout", [ ("select * from t", "LT02", "select * from t"), # no change ( " select * from t", "LT02", "select * from t", ), # fix preceding whitespace ], ) def test__cli__command_fix_stdin(stdin, rules, stdout): """Check stdin input for fix works.""" result = invoke_assert_code( args=[ fix, ("-", "--rules", rules, "--disable-progress-bar", "--dialect=ansi"), ], cli_input=stdin, ) assert result.stdout == stdout assert result.stderr == "" @pytest.mark.parametrize( "stdin,stdout", [ ("select * from t\n", "select * from t\n"), # no change ( " select * FRoM t ", "select * from t\n", ), ( # Check that warnings related to parsing errors on input don't # go to stdout. This query shouldn't change, but stdout should # remain clean. # https://github.com/sqlfluff/sqlfluff/issues/5327 "select\n" " count(*) over (\n" " order by a desc \n" " range between b row and '10 seconds' following -- noqa: PRS\n" " ) as c\n" "from d\n", "select\n" " count(*) over (\n" " order by a desc \n" " range between b row and '10 seconds' following -- noqa: PRS\n" " ) as c\n" "from d\n", ), ], ) def test__cli__command_format_stdin(stdin, stdout): """Check stdin input for fix works.""" result = invoke_assert_code( args=[ cli_format, ("-", "--disable-progress-bar", "--dialect=ansi"), ], cli_input=stdin, ) assert result.stdout == stdout def test__cli__command_fix_stdin_logging_to_stderr(monkeypatch): """Check that logging goes to stderr when stdin is passed to fix.""" perfect_sql = "select col from table" class MockLinter(sqlfluff.core.Linter): @classmethod def lint_fix_parsed(cls, *args, **kwargs): cls._warn_unfixable("") return super().lint_fix_parsed(*args, **kwargs) monkeypatch.setattr(sqlfluff.cli.commands, "Linter", MockLinter) result = invoke_assert_code( args=[fix, ("-", "--rules=LT02", "--dialect=ansi")], cli_input=perfect_sql, ) assert result.stdout == perfect_sql assert "" in result.stderr def test__cli__command_fix_stdin_safety(): """Check edge cases regarding safety when fixing stdin.""" perfect_sql = "select col from table" # just prints the very same thing result = invoke_assert_code( args=[fix, ("-", "--disable-progress-bar", "--dialect=ansi")], cli_input=perfect_sql, ) assert result.stdout.strip() == perfect_sql assert result.stderr == "" @pytest.mark.parametrize( "sql,exit_code,params,assert_stderr_contains", [ ( "create TABLE {{ params.dsfsdfds }}.t (a int)", 1, "-v", "Fix aborted due to unparsable template variables.", ), # template error ("create TABLE a.t (a int)", 0, "", ""), # fixable error ("create table a.t (a int)", 0, "", ""), # perfection ( "select col from a join b using (c)", 1, "-v", "Unfixable violations detected.", ), # unfixable error (using) ], ) def test__cli__command_fix_stdin_error_exit_code( sql, exit_code, params, assert_stderr_contains ): """Check that the CLI fails nicely if fixing a templated stdin.""" invoke_assert_code( ret_code=exit_code, args=[fix, ((params,) if params else ()) + ("--dialect=ansi", "-")], cli_input=sql, assert_stderr_contains=assert_stderr_contains, ) @pytest.mark.parametrize( "rule,fname,prompt,exit_code,fix_exit_code", [ ("LT01", "test/fixtures/linter/indentation_errors.sql", "y", 0, 0), ("LT01", "test/fixtures/linter/indentation_errors.sql", "n", 1, 1), ], ) def test__cli__command__fix_check(rule, fname, prompt, exit_code, fix_exit_code): """Round trip test, using the prompts.""" with open(fname) as test_file: generic_roundtrip_test( test_file, rule, check=True, final_exit_code=exit_code, fix_input=prompt, fix_exit_code=fix_exit_code, ) @pytest.mark.parametrize("serialize", ["yaml", "json"]) @pytest.mark.parametrize("write_file", [None, "outfile"]) def test__cli__command_parse_serialize_from_stdin(serialize, write_file, tmp_path): """Check that the parser serialized output option is working. This tests both output to stdout and output to file. Not going to test for the content of the output as that is subject to change. """ cmd_args = ("-", "--format", serialize, "--dialect=ansi") if write_file: target_file = os.path.join(tmp_path, write_file + "." + serialize) cmd_args += ("--write-output", target_file) result = invoke_assert_code( args=[parse, cmd_args], cli_input="select * from tbl", ) if write_file: with open(target_file, "r") as payload_file: result_payload = payload_file.read() else: result_payload = result.stdout if serialize == "json": result = json.loads(result_payload) elif serialize == "yaml": result = yaml.safe_load(result_payload) else: raise Exception result = result[0] # only one file assert result["filepath"] == "stdin" @pytest.mark.parametrize("serialize", ["yaml", "json", "none"]) @pytest.mark.parametrize( "sql,rules,expected,exit_code", [ ( "select * from tbl", "CP01", [ { "filepath": "stdin", "statistics": { "raw_segments": 12, "segments": 24, "source_chars": 17, "templated_chars": 17, }, # Empty list because no violations. "violations": [], } ], 0, ), ( "SElect * from tbl", "CP01", [ { "filepath": "stdin", "violations": [ { "code": "CP01", "start_line_no": 1, "start_line_pos": 1, "start_file_pos": 0, "end_line_no": 1, "end_line_pos": 7, "end_file_pos": 6, "description": "Keywords must be consistently upper case.", "name": "capitalisation.keywords", "warning": False, "fixes": [ { "type": "replace", "edit": "SELECT", "start_line_no": 1, "start_line_pos": 1, "start_file_pos": 0, "end_line_no": 1, "end_line_pos": 7, "end_file_pos": 6, } ], }, { "code": "CP01", "start_line_no": 1, "start_line_pos": 10, "start_file_pos": 9, "end_line_no": 1, "end_line_pos": 14, "end_file_pos": 13, "description": "Keywords must be consistently upper case.", "name": "capitalisation.keywords", "warning": False, "fixes": [ { "type": "replace", "edit": "FROM", "start_line_no": 1, "start_line_pos": 10, "start_file_pos": 9, "end_line_no": 1, "end_line_pos": 14, "end_file_pos": 13, } ], }, ], "statistics": { "raw_segments": 12, "segments": 24, "source_chars": 17, "templated_chars": 17, }, # NOTE: There will be a timings section too, but we're not # going to test that. } ], 1, ), # Test serialisation with a source only fix. ( "SELECT {{1}}", "JJ01", [ { "filepath": "stdin", "violations": [ { "code": "JJ01", "start_line_no": 1, "start_line_pos": 8, "start_file_pos": 7, "end_line_no": 1, "end_line_pos": 13, "end_file_pos": 12, "description": ( "Jinja tags should have a single whitespace on " "either side: {{1}}" ), "name": "jinja.padding", "warning": False, "fixes": [ { "type": "replace", "edit": "{{ 1 }}", "start_line_no": 1, "start_line_pos": 8, "start_file_pos": 7, "end_line_no": 1, "end_line_pos": 13, "end_file_pos": 12, } ], }, ], "statistics": { "raw_segments": 6, "segments": 11, "source_chars": 12, "templated_chars": 8, }, # NOTE: There will be a timings section too, but we're not # going to test that. } ], 1, ), ], ) def test__cli__command_lint_serialize_from_stdin( serialize, sql, rules, expected, exit_code ): """Check an explicit serialized return value for a single error.""" result = invoke_assert_code( args=[ lint, ( "-", "--rules", rules, "--format", serialize, "--disable-progress-bar", "--dialect=ansi", ), ], cli_input=sql, ret_code=exit_code, ) if serialize == "json": result = json.loads(result.stdout) # Drop any timing section (because it's less determinate) for record in result: if "timings" in record: del record["timings"] assert result == expected elif serialize == "yaml": result = yaml.safe_load(result.stdout) # Drop any timing section (because it's less determinate) for record in result: if "timings" in record: del record["timings"] assert result == expected elif serialize == "none": assert result.stdout == "" else: raise Exception @pytest.mark.parametrize( "command", [ [lint, ("this_file_does_not_exist.sql")], [fix, ("this_file_does_not_exist.sql")], ], ) def test__cli__command_fail_nice_not_found(command): """Check commands fail as expected when then don't find files.""" invoke_assert_code( args=command, ret_code=2, assert_stderr_contains=( "User Error: Specified path does not exist. Check it/they " "exist(s): this_file_does_not_exist.sql" ), ) @patch("click.utils.should_strip_ansi") @patch("sys.stdout.isatty") @pytest.mark.parametrize( "flag, env_var, has_color", [ (None, None, True), ("--nocolor", None, False), ("--color", None, True), (None, "1", False), (None, "true", False), (None, "True", False), (None, "False", False), (None, "anything", False), (None, "", True), ("--color", "1", True), ], ) def test__cli__command_lint_nocolor( isatty, should_strip_ansi, capsys, tmpdir, flag, env_var, has_color ): """Test the --nocolor option prevents color output.""" # Patch these two functions to make it think every output stream is a TTY. # In spite of this, the output should not contain ANSI color codes because # we specify "--nocolor" below. no_color_flag = [flag] if flag else [] if env_var is not None: os.environ["NO_COLOR"] = env_var elif "NO_COLOR" in os.environ: os.environ.pop("NO_COLOR") isatty.return_value = True should_strip_ansi.return_value = False fpath = "test/fixtures/linter/indentation_errors.sql" output_file = str(tmpdir / "result.txt") cmd_args = [ "--verbose", *no_color_flag, "--dialect", "ansi", "--disable-progress-bar", fpath, "--write-output", output_file, ] with pytest.raises(SystemExit): lint(cmd_args) out = capsys.readouterr()[0] with open(output_file, "r") as f: file_contents = f.read() assert contains_ansi_escape(out + file_contents) == has_color @pytest.mark.parametrize( "serialize", ["human", "yaml", "json", "github-annotation", "github-annotation-native", "none"], ) @pytest.mark.parametrize("write_file", [None, "outfile"]) def test__cli__command_lint_serialize_multiple_files(serialize, write_file, tmp_path): """Test the output formats for multiple files. This tests runs both stdout checking and file checking. """ fpath1 = "test/fixtures/linter/indentation_errors.sql" fpath2 = "test/fixtures/linter/multiple_sql_errors.sql" cmd_args = ( fpath1, fpath2, "--format", serialize, "--disable-progress-bar", ) if write_file: ext = { "human": ".txt", "yaml": ".yaml", } target_file = os.path.join(tmp_path, write_file + ext.get(serialize, ".json")) cmd_args += ("--write-output", target_file) # note the file is in here twice. two files = two payloads. result = invoke_assert_code( args=[lint, cmd_args], ret_code=1, ) # NOTE: The "none" serializer doesn't write a file even if specified. if write_file and serialize != "none": with open(target_file, "r") as payload_file: result_payload = payload_file.read() else: result_payload = result.stdout # Print for debugging. payload_length = len(result_payload.split("\n")) print("=== BEGIN RESULT OUTPUT") print(result_payload) print("=== END RESULT OUTPUT") print("Result length:", payload_length) if serialize == "human": assert payload_length == 25 if write_file else 34 elif serialize == "none": assert payload_length == 1 # There will be a single newline. elif serialize == "json": result = json.loads(result_payload) assert len(result) == 2 elif serialize == "yaml": result = yaml.safe_load(result_payload) assert len(result) == 2 elif serialize == "github-annotation": result = json.loads(result_payload) filepaths = {r["file"] for r in result} assert len(filepaths) == 2 elif serialize == "github-annotation-native": result = result_payload.split("\n") # SQLFluff produces trailing newline if result[-1] == "": del result[-1] assert len(result) == 16 else: raise Exception def test__cli__command_lint_serialize_github_annotation(): """Test format of github-annotation output.""" fpath = "test/fixtures/linter/identifier_capitalisation.sql" result = invoke_assert_code( args=[ lint, ( fpath, "--format", "github-annotation", "--annotation-level", "warning", "--disable-progress-bar", ), ], ret_code=1, ) result = json.loads(result.stdout) assert result == [ { "annotation_level": "warning", # Normalise paths to control for OS variance "file": os.path.normpath( "test/fixtures/linter/identifier_capitalisation.sql" ), "start_line": 3, "end_line": 3, "message": "RF02: Unqualified reference 'foo' found in select with more " "than one referenced table/view.", "start_column": 5, "end_column": 8, "title": "SQLFluff", }, { "annotation_level": "warning", # Normalise paths to control for OS variance "file": os.path.normpath( "test/fixtures/linter/identifier_capitalisation.sql" ), "start_line": 4, "end_line": 4, "message": "LT02: Expected indent of 8 spaces.", "start_column": 1, "end_column": 5, "title": "SQLFluff", }, { "annotation_level": "warning", # Normalise paths to control for OS variance "file": os.path.normpath( "test/fixtures/linter/identifier_capitalisation.sql" ), "start_line": 4, "end_line": 4, "message": "AL02: Implicit/explicit aliasing of columns.", "start_column": 5, "end_column": 8, "title": "SQLFluff", }, { "annotation_level": "warning", # Normalise paths to control for OS variance "file": os.path.normpath( "test/fixtures/linter/identifier_capitalisation.sql" ), "start_line": 4, "end_line": 4, "message": "CP02: Unquoted identifiers must be consistently lower case.", "start_column": 5, "end_column": 8, "title": "SQLFluff", }, { # Warnings should come through as notices. "annotation_level": "notice", # Normalise paths to control for OS variance "file": os.path.normpath( "test/fixtures/linter/identifier_capitalisation.sql" ), "start_line": 5, "end_line": 5, "message": "CP01: Keywords must be consistently lower case.", "start_column": 1, "end_column": 5, "title": "SQLFluff", }, { "annotation_level": "warning", # Normalise paths to control for OS variance "file": os.path.normpath( "test/fixtures/linter/identifier_capitalisation.sql" ), "start_line": 5, "end_line": 5, "message": "CP02: Unquoted identifiers must be consistently lower case.", "start_column": 12, "end_column": 16, "title": "SQLFluff", }, { "annotation_level": "warning", # Normalise paths to control for OS variance "file": os.path.normpath( "test/fixtures/linter/identifier_capitalisation.sql" ), "start_line": 5, "end_line": 5, "message": "CP02: Unquoted identifiers must be consistently lower case.", "start_column": 18, "end_column": 22, "title": "SQLFluff", }, ] @pytest.mark.parametrize( "filename,expected_output", [ ( "test/fixtures/linter/identifier_capitalisation.sql", ( "::group::{filename}\n" "::error title=SQLFluff,file={filename}," "line=3,col=5,endLine=3,endColumn=8::" "RF02: Unqualified reference 'foo' found in select with more than one " "referenced table/view. [references.qualification]\n" "::error title=SQLFluff,file={filename}," "line=4,col=1,endLine=4,endColumn=5::" "LT02: Expected indent of 8 spaces. [layout.indent]\n" "::error title=SQLFluff,file={filename}," "line=4,col=5,endLine=4,endColumn=8::" "AL02: Implicit/explicit aliasing of columns. [aliasing.column]\n" "::error title=SQLFluff,file={filename}," "line=4,col=5,endLine=4,endColumn=8::" "CP02: Unquoted identifiers must be consistently lower case. " "[capitalisation.identifiers]\n" # Warnings should always come through as notices. "::notice title=SQLFluff,file={filename}," "line=5,col=1,endLine=5,endColumn=5::" "CP01: Keywords must be consistently lower case. " "[capitalisation.keywords]\n" "::error title=SQLFluff,file={filename}," "line=5,col=12,endLine=5,endColumn=16::" "CP02: Unquoted identifiers must be consistently lower case. " "[capitalisation.identifiers]\n" "::error title=SQLFluff,file={filename}," "line=5,col=18,endLine=5,endColumn=22::" "CP02: Unquoted identifiers must be consistently lower case. " "[capitalisation.identifiers]\n" "::endgroup::\n" # SQLFluff produces trailing newline ), ), ( "test/fixtures/linter/jinja_spacing.sql", ( "::group::{filename}\n" "::error title=SQLFluff,file={filename}," "line=3,col=15,endLine=3,endColumn=22::JJ01: " "Jinja tags should have a single whitespace on either " # NOTE: Double escaping, because we're going to pass this through a # .format() method. "side: {{{{foo}}}} " "[jinja.padding]\n" "::endgroup::\n" ), ), ], ) def test__cli__command_lint_serialize_github_annotation_native( filename, expected_output ): """Test format of github-annotation output.""" # Normalise paths to control for OS variance fpath_normalised = os.path.normpath(filename) result = invoke_assert_code( args=[ lint, ( filename, "--format", "github-annotation-native", "--annotation-level", "error", "--disable-progress-bar", ), ], ret_code=1, ) assert result.stdout == expected_output.format(filename=fpath_normalised) @pytest.mark.parametrize("serialize", ["github-annotation", "github-annotation-native"]) def test__cli__command_lint_serialize_annotation_level_error_failure_equivalent( serialize, ): """Test format of github-annotation output.""" fpath = "test/fixtures/linter/identifier_capitalisation.sql" result_error = invoke_assert_code( args=[ lint, ( fpath, "--format", serialize, "--annotation-level", "error", "--disable-progress-bar", ), ], ret_code=1, ) result_failure = invoke_assert_code( args=[ lint, ( fpath, "--format", serialize, "--annotation-level", "failure", "--disable-progress-bar", ), ], ret_code=1, ) assert result_error.stdout == result_failure.stdout def test___main___help(): """Test that the CLI can be access via __main__.""" # nonzero exit is good enough subprocess.check_output( [sys.executable, "-m", "sqlfluff", "--help"], env=os.environ ) @pytest.mark.parametrize( "encoding_in,encoding_out", [ ("utf-8", "ascii"), # chardet will detect ascii as a subset of utf-8 ("utf-8-sig", "UTF-8-SIG"), ("utf-32", "UTF-32"), ], ) def test_encoding(encoding_in, encoding_out): """Check the encoding of the test file remains the same after fix is applied.""" with open("test/fixtures/linter/indentation_errors.sql", "r") as testfile: generic_roundtrip_test( testfile, "LT01", input_file_encoding=encoding_in, output_file_encoding=encoding_out, ) @pytest.mark.parametrize( "encoding,method,expect_success", [ ("utf-8", "command-line", False), ("utf-8-SIG", "command-line", True), ("utf-8", "config-file", False), ("utf-8-SIG", "config-file", True), ], ) def test_cli_encoding(encoding, method, expect_success, tmpdir): """Try loading a utf-8-SIG encoded file using the correct encoding via the cli.""" sql_path = "test/fixtures/cli/encoding_test.sql" if method == "command-line": options = [sql_path, "--encoding", encoding] else: assert method == "config-file" with open(str(tmpdir / ".sqlfluff"), "w") as f: print(f"[sqlfluff]\ndialect=ansi\nencoding = {encoding}", file=f) shutil.copy(sql_path, tmpdir) options = [str(tmpdir / "encoding_test.sql")] result = invoke_assert_code( ret_code=1, args=[ lint, options, ], ) raw_stdout = repr(result.stdout) # Incorrect encoding raises parsing and lexer errors. success1 = r"L: 1 | P: 1 | LXR |" not in raw_stdout success2 = r"L: 1 | P: 1 | PRS |" not in raw_stdout assert success1 == expect_success assert success2 == expect_success def test_cli_no_disable_noqa_flag(): """Test that unset --disable-noqa flag respects inline noqa comments.""" invoke_assert_code( ret_code=0, args=[ lint, ["test/fixtures/cli/disable_noqa_test.sql"], ], ) def test_cli_disable_noqa_flag(): """Test that --disable-noqa flag ignores inline noqa comments.""" invoke_assert_code( ret_code=1, args=[ lint, [ "test/fixtures/cli/disable_noqa_test.sql", "--disable-noqa", ], ], # Linting error is raised even though it is inline ignored. assert_stdout_contains=r"L: 6 | P: 11 | CP01 |", ) def test_cli_disable_noqa_except_flag(): """Test that --disable-noqa-except flag ignores inline noqa comments.""" result = invoke_assert_code( ret_code=1, args=[ lint, [ "test/fixtures/cli/disable_noqa_test.sql", "--disable-noqa-except", "CP01", ], ], # Linting error is raised even though it is inline ignored. assert_stdout_contains=r"L: 8 | P: 5 | CP03 |", ) assert r"L: 6 | P: 11 | CP01 |" not in result.stdout def test_cli_disable_noqa_except_non_rules_flag(): """Test that --disable-noqa-except flag ignores all inline noqa comments.""" invoke_assert_code( ret_code=1, args=[ lint, [ "test/fixtures/cli/disable_noqa_test.sql", "--disable-noqa-except", "None", ], ], # Linting error is raised even though it is inline ignored. assert_stdout_contains=r"L: 6 | P: 11 | CP01 |", ) def test_cli_warn_unused_noqa_flag(): """Test that --warn-unused-ignores flag works.""" invoke_assert_code( # Return value should still be success. ret_code=0, args=[ lint, [ "test/fixtures/cli/disable_noqa_test.sql", "--warn-unused-ignores", ], ], # Warning shown. assert_stdout_contains=( r"L: 5 | P: 18 | NOQA | WARNING: Unused noqa: 'noqa: CP01'" ), ) def test_cli_get_default_config(): """`nocolor` and `verbose` values loaded from config if not specified via CLI.""" config = get_config( "test/fixtures/config/toml/pyproject.toml", True, nocolor=None, verbose=None, require_dialect=False, ) assert config.get("nocolor") is True assert config.get("verbose") == 2 @patch( "sqlfluff.core.linter.linter.progress_bar_configuration", disable_progress_bar=False, ) class TestProgressBars: """Progress bars test cases. The tqdm package, used for handling progress bars, is able to tell when it is used in a not tty terminal (when `disable` is set to None). In such cases, it just does not render anything. To suppress that for testing purposes, we need to set implicitly that we don't want to disable it. Probably it would be better - cleaner - just to patch `isatty` at some point, but I didn't find a way how to do that properly. """ def test_cli_lint_disabled_progress_bar( self, mock_disable_progress_bar: MagicMock ) -> None: """When progress bar is disabled, nothing should be printed into output.""" result = invoke_assert_code( args=[ lint, [ "--disable-progress-bar", "test/fixtures/linter/passing.sql", ], ], ) raw_stderr = repr(result.stderr) assert "\rpath test/fixtures/linter/passing.sql:" not in raw_stderr assert "\rparsing: 0it" not in raw_stderr assert "\r\rlint by rules:" not in raw_stderr def test_cli_lint_enabled_progress_bar( self, mock_disable_progress_bar: MagicMock ) -> None: """When progress bar is enabled, there should be some tracks in output.""" result = invoke_assert_code( args=[ lint, [ "test/fixtures/linter/passing.sql", ], ], ) raw_stderr = repr(result.stderr) assert r"\rlint by rules:" in raw_stderr assert r"\rrule LT01:" in raw_stderr assert r"\rrule CV05:" in raw_stderr def test_cli_lint_enabled_progress_bar_multiple_paths( self, mock_disable_progress_bar: MagicMock ) -> None: """When progress bar is enabled, there should be some tracks in output.""" result = invoke_assert_code( ret_code=1, args=[ lint, [ "test/fixtures/linter/passing.sql", "test/fixtures/linter/indentation_errors.sql", ], ], ) normalised_stderr = repr(result.stderr.replace("\\", "/")) assert r"\rfile test/fixtures/linter/passing.sql:" in normalised_stderr assert ( r"\rfile test/fixtures/linter/indentation_errors.sql:" in normalised_stderr ) assert r"\rlint by rules:" in normalised_stderr assert r"\rrule LT01:" in normalised_stderr assert r"\rrule CV05:" in normalised_stderr def test_cli_lint_enabled_progress_bar_multiple_files( self, mock_disable_progress_bar: MagicMock ) -> None: """When progress bar is enabled, there should be some tracks in output.""" result = invoke_assert_code( args=[ lint, [ "test/fixtures/linter/multiple_files", ], ], ) raw_stderr = repr(result.stderr) sep = os.sep if sys.platform == "win32": sep *= 2 assert ( r"\rfile test/fixtures/linter/multiple_files/passing.1.sql:".replace( "/", sep ) in raw_stderr ) assert ( r"\rfile test/fixtures/linter/multiple_files/passing.2.sql:".replace( "/", sep ) in raw_stderr ) assert ( r"\rfile test/fixtures/linter/multiple_files/passing.3.sql:".replace( "/", sep ) in raw_stderr ) assert r"\rlint by rules:" in raw_stderr assert r"\rrule LT01:" in raw_stderr assert r"\rrule CV05:" in raw_stderr multiple_expected_output = """==== finding fixable violations ==== == [test/fixtures/linter/multiple_sql_errors.sql] FAIL L: 12 | P: 1 | LT02 | Expected indent of 4 spaces. [layout.indent] L: 44 | P: 12 | ST09 | Joins should list the table referenced earlier first. | [structure.join_condition_order] ==== fixing violations ==== 2 fixable linting violations found Are you sure you wish to attempt to fix these? [Y/n] ... Invalid input, please enter 'Y' or 'N' Aborting... [4 unfixable linting violations found] """ def test__cli__fix_multiple_errors_no_show_errors(): """Test the fix output.""" invoke_assert_code( ret_code=1, args=[ fix, [ "--check", # Run in check mode to get the confirmation. "--disable-progress-bar", "test/fixtures/linter/multiple_sql_errors.sql", ], ], assert_stdout_contains=multiple_expected_output, ) def test__cli__fix_multiple_errors_quiet_force(): """Test the fix --quiet option with --force.""" invoke_assert_code( ret_code=0, args=[ fix, [ "--disable-progress-bar", "test/fixtures/linter/multiple_sql_errors.sql", "--quiet", "-x", "_fix", ], ], assert_stdout_contains=( """== [test/fixtures/linter/multiple_sql_errors.sql] FIXED 2 fixable linting violations found""" ), ) def test__cli__fix_multiple_errors_quiet_check(): """Test the fix --quiet option without --force.""" invoke_assert_code( ret_code=0, args=[ fix, [ "--disable-progress-bar", "test/fixtures/linter/multiple_sql_errors.sql", "--check", # Run in check mode to get the confirmation. "--quiet", "-x", "_fix", ], # Test with the confirmation step. "y", ], assert_stdout_contains=( """2 fixable linting violations found Are you sure you wish to attempt to fix these? [Y/n] ... == [test/fixtures/linter/multiple_sql_errors.sql] FIXED All Finished""" ), ) def test__cli__fix_multiple_errors_show_errors(): """Test the fix --show-lint-violations option.""" result = invoke_assert_code( ret_code=1, args=[ fix, [ "--disable-progress-bar", "--show-lint-violations", "test/fixtures/linter/multiple_sql_errors.sql", "--check", # Run in check mode to get the confirmation. ], ], ) # We should get a readout of what the error was check_a = "4 unfixable linting violations found" assert check_a in result.stdout # Finally check the WHOLE output to make sure that unexpected newlines are not # added. The replace command just accounts for cross platform testing. assert "L: 12 | P: 1 | LT02 | Expected indent of 4 spaces." in result.stdout assert ( "L: 36 | P: 9 | RF02 | Unqualified reference 'package_id' found in " "select with more than" in result.stdout ) assert ( "L: 45 | P: 17 | RF02 | Unqualified reference 'owner_type' found in " "select with more than" in result.stdout ) assert ( "L: 45 | P: 50 | RF02 | Unqualified reference 'app_key' found in " "select with more than one" in result.stdout ) assert ( "L: 42 | P: 45 | RF02 | Unqualified reference 'owner_id' found in " "select with more than" in result.stdout ) def test__cli__fix_show_parse_errors(): """Test the fix --show-lint-violations option with parser error.""" result = invoke_assert_code( ret_code=1, args=[ fix, [ "--show-lint-violations", "test/fixtures/linter/parse_lex_error.sql", ], ], ) check_a = "1 templating/parsing errors found" assert check_a not in result.stderr assert ( "L: 9 | P: 21 | PRS | Couldn't find closing bracket for opening bracket." in result.stdout ) assert "L: 9 | P: 22 | LXR | Unable to lex characters: " in result.stdout # Calling without show-lint-violations result = invoke_assert_code( ret_code=1, args=[ fix, [ "test/fixtures/linter/parse_lex_error.sql", ], ], ) assert check_a in result.stderr assert ( "L: 9 | P: 21 | PRS | Couldn't find closing bracket for opening bracket." not in result.stdout ) assert "L: 9 | P: 22 | LXR | Unable to lex characters: " not in result.stdout def test__cli__multiple_files__fix_multiple_errors_show_errors(): """Basic check of lint ensures with multiple files, filenames are listed.""" sql_path = "test/fixtures/linter/multiple_sql_errors.sql" indent_path = "test/fixtures/linter/indentation_errors.sql" result = invoke_assert_code( ret_code=1, args=[ fix, [ "--disable-progress-bar", "--check", # Run in check mode to get the confirmation. "--show-lint-violations", sql_path, indent_path, ], ], ) unfixable_error_msg = "==== lint for unfixable violations ====" assert unfixable_error_msg in result.stdout indent_pass_msg = f"== [{os.path.normpath(indent_path)}] PASS" multi_fail_msg = f"== [{os.path.normpath(sql_path)}] FAIL" unfix_err_log = result.stdout[result.stdout.index(unfixable_error_msg) :] assert indent_pass_msg in unfix_err_log assert multi_fail_msg in unfix_err_log # Assert that they are sorted in alphabetical order assert unfix_err_log.index(indent_pass_msg) < unfix_err_log.index(multi_fail_msg) def test__cli__render_fail(): """Basic how render fails.""" invoke_assert_code( ret_code=1, args=[ render, [ "test/fixtures/cli/fail_many.sql", ], ], assert_stdout_contains=( "L: 3 | P: 8 | TMP | Undefined jinja template variable: 'something'" ), ) def test__cli__render_pass(): """Basic how render works.""" invoke_assert_code( ret_code=0, args=[ render, [ "test/fixtures/templater/jinja_a/jinja.sql", ], ], assert_stdout_contains="SELECT 56 FROM sch1.tbl2", ) sqlfluff-3.4.2/test/cli/formatters_test.py000066400000000000000000000076101503426445100206770ustar00rootroot00000000000000"""The Test file for CLI Formatters.""" import pathlib import re import textwrap import pytest from sqlfluff.cli.commands import fix from sqlfluff.cli.formatters import OutputStreamFormatter from sqlfluff.cli.outputstream import FileOutput from sqlfluff.core import FluffConfig from sqlfluff.core.errors import SQLLintError from sqlfluff.core.parser import RawSegment from sqlfluff.core.parser.markers import PositionMarker from sqlfluff.core.rules import RuleGhost from sqlfluff.core.templaters.base import TemplatedFile from sqlfluff.core.types import Color def escape_ansi(line): """Remove ANSI color codes for testing.""" ansi_escape = re.compile("\u001b\\[[0-9]+(;[0-9]+)?m") return ansi_escape.sub("", line) def test__cli__formatters__filename_nocol(tmpdir): """Test formatting filenames.""" formatter = OutputStreamFormatter( FileOutput(FluffConfig(require_dialect=False), str(tmpdir / "out.txt")), False ) res = formatter.format_filename("blahblah", success=True) assert escape_ansi(res) == "== [blahblah] PASS" def test__cli__formatters__violation(tmpdir): """Test formatting violations. NB Position is 1 + start_pos. """ s = RawSegment( "foobarbar", PositionMarker( slice(10, 19), slice(10, 19), TemplatedFile.from_string(" \n\n foobarbar"), ), ) r = RuleGhost("A", "some-name", "DESC") v = SQLLintError(description=r.description, segment=s, rule=r) formatter = OutputStreamFormatter( FileOutput(FluffConfig(require_dialect=False), str(tmpdir / "out.txt")), False ) f = formatter.format_violation(v) # Position is 3, 3 because foobarbar is on the third # line (i.e. it has two newlines preceding it) and # it's at the third position in that line (i.e. there # are two characters between it and the preceding # newline). assert escape_ansi(f) == "L: 3 | P: 3 | A | DESC [some-name]" def test__cli__helpers__colorize(tmpdir): """Test ANSI colouring.""" formatter = OutputStreamFormatter( FileOutput(FluffConfig(require_dialect=False), str(tmpdir / "out.txt")), False ) # Force color output for this test. formatter.plain_output = False assert formatter.colorize("foo", Color.red) == "\u001b[31mfoo\u001b[0m" def test__cli__helpers__cli_table(tmpdir): """Test making tables.""" vals = [("a", 3), ("b", "c"), ("d", 4.7654), ("e", 9)] formatter = OutputStreamFormatter( FileOutput(FluffConfig(require_dialect=False), str(tmpdir / "out.txt")), False ) txt = formatter.cli_table(vals, col_width=7, divider_char="|", label_color=None) # NB: No trailing newline assert txt == "a: 3|b: c\nd: 4.77|e: 9" @pytest.mark.parametrize( "sql,fix_args,expected", [ ( ( "CREATE TABLE IF NOT EXISTS vuln.software_name_dictionary(" "id SERIAL PRIMARY KEY" "rule VARCHAR(30)" ");" ), ["--force", "--dialect", "postgres", "--disable-progress-bar", "--nocolor"], ( "CREATE TABLE IF NOT EXISTS vuln.software_name_dictionary(" "id SERIAL PRIMARY KEY" "rule VARCHAR(30)" ");" ), ) ], ) def test__cli__fix_no_corrupt_file_contents(sql, fix_args, expected, tmpdir): """Test how the fix cli command creates files. Ensure there is no incorrect output from stderr that makes it to the file. """ tmp_path = pathlib.Path(str(tmpdir)) filepath = tmp_path / "testing.sql" filepath.write_text(textwrap.dedent(sql)) with tmpdir.as_cwd(): with pytest.raises(SystemExit): fix(fix_args) with open(tmp_path / "testing.sql", "r") as fin: actual = fin.read() # Ensure no corruption in formatted file assert actual.strip() == expected.strip() sqlfluff-3.4.2/test/cli/helpers_test.py000066400000000000000000000043531503426445100201540ustar00rootroot00000000000000"""The Test file for CLI helpers.""" import pytest from sqlfluff.cli.helpers import LazySequence, pad_line, wrap_elem, wrap_field @pytest.mark.parametrize( "in_str,length,res", [ ("abc", 5, ["abc"]), # Space wrap test ("how now brown cow", 10, ["how now", "brown cow"]), # Harder wrap test ("A hippopotamus came for tea", 10, ["A hippopot", "amus came", "for tea"]), # Harder wrap test, with a newline. ("A hippopotamus\ncame for tea", 10, ["A hippopot", "amus came", "for tea"]), ], ) def test__cli__helpers__wrap_elem(in_str, length, res): """Test wrapping.""" str_list = wrap_elem(in_str, length) assert str_list == res def test__cli__helpers__wrap_field_a(): """Test simple wrapping.""" dct = wrap_field("abc", "How Now Brown Cow", width=40) assert dct["label_list"] == ["abc"] assert dct["val_list"] == ["How Now Brown Cow"] assert "sep_char" in dct assert dct["lines"] == 1 assert dct["label_width"] == 3 def test__cli__helpers__wrap_field_b(): """Test simple wrapping with overlap avoidance.""" dct = wrap_field("abc", "How Now Brown Cow", width=23) assert dct["label_list"] == ["abc"] assert dct["val_list"] == ["How Now Brown Cow"] assert dct["label_width"] == 3 def test__cli__helpers__wrap_field_c(): """Test simple wrapping.""" dct = wrap_field("how now brn cow", "How Now Brown Cow", width=25) assert dct["label_list"] == ["how now", "brn cow"] assert dct["label_width"] == 7 assert dct["val_list"] == ["How Now Brown", "Cow"] assert dct["lines"] == 2 def test__cli__helpers__pad_line(): """Test line padding.""" assert pad_line("abc", 5) == "abc " assert pad_line("abcdef", 10, align="right") == " abcdef" def test_cli__helpers__lazy_sequence(): """Test the LazySequence.""" getter_run = False def _get_sequence(): nonlocal getter_run getter_run = True return [1, 2, 3] seq = LazySequence(_get_sequence) # Check the sequence isn't called on instantiation. assert not getter_run # Fetch an item... assert seq[2] == 3 # .. and that now it has run. assert getter_run # Check other methods work assert len(seq) == 3 sqlfluff-3.4.2/test/conftest.py000066400000000000000000000226441503426445100165340ustar00rootroot00000000000000"""Common Test Fixtures.""" import hashlib import io import os from typing import NamedTuple import pytest import yaml from yaml import CDumper, CLoader from sqlfluff.cli.commands import quoted_presenter from sqlfluff.core import FluffConfig from sqlfluff.core.linter import Linter from sqlfluff.core.parser import Lexer, Parser from sqlfluff.core.parser.markers import PositionMarker from sqlfluff.core.parser.segments import ( BaseSegment, CodeSegment, CommentSegment, Dedent, Indent, NewlineSegment, SymbolSegment, WhitespaceSegment, ) from sqlfluff.core.rules import BaseRule from sqlfluff.core.templaters import TemplatedFile # When writing YAML files, double quotes string values needing escapes. yaml.add_representer(str, quoted_presenter) class ParseExample(NamedTuple): """A tuple representing an example SQL file to parse.""" dialect: str sqlfile: str def get_parse_fixtures( fail_on_missing_yml=False, ) -> tuple[list[ParseExample], list[tuple[str, str, bool, str]]]: """Search for all parsing fixtures.""" parse_success_examples = [] parse_structure_examples = [] # Generate the filenames for each dialect from the parser test directory for d in os.listdir(os.path.join("test", "fixtures", "dialects")): # Ignore documentation if d.endswith(".md"): continue # assume that d is now the name of a dialect dirlist = os.listdir(os.path.join("test", "fixtures", "dialects", d)) for f in dirlist: has_yml = False if f.endswith(".sql"): root = f[:-4] # only look for sql files parse_success_examples.append(ParseExample(d, f)) # Look for the code_only version of the structure y = root + ".yml" if y in dirlist: parse_structure_examples.append((d, f, True, y)) has_yml = True # Look for the non-code included version of the structure y = root + "_nc.yml" if y in dirlist: parse_structure_examples.append((d, f, False, y)) has_yml = True if not has_yml and fail_on_missing_yml: raise ( Exception( f"Missing .yml file for {os.path.join(d, f)}. Run the " "test/generate_parse_fixture_yml.py script!" ) ) return parse_success_examples, parse_structure_examples def make_dialect_path(dialect, fname): """Work out how to find paths given a dialect and a file name.""" return os.path.join("test", "fixtures", "dialects", dialect, fname) def load_file(dialect, fname): """Load a file.""" with open(make_dialect_path(dialect, fname), encoding="utf8") as f: raw = f.read() return raw def process_struct(obj): """Process a nested dict or dict-like into a check tuple.""" if isinstance(obj, dict): return tuple((k, process_struct(obj[k])) for k in obj) elif isinstance(obj, list): # If empty list, return empty tuple if not len(obj): return tuple() # We'll assume that it's a list of dicts if isinstance(obj[0], dict): buff = [process_struct(elem) for elem in obj] if any(len(elem) > 1 for elem in buff): raise ValueError(f"Not sure how to deal with multi key dict: {buff!r}") return tuple(elem[0] for elem in buff) else: raise TypeError(f"Did not expect a list of {type(obj[0])}: {obj[0]!r}") elif isinstance(obj, (str, int, float)): return str(obj) elif obj is None: return None else: raise TypeError(f"Not sure how to deal with type {type(obj)}: {obj!r}") def parse_example_file(dialect: str, sqlfile: str): """Parse example SQL file, return parse tree.""" config = FluffConfig(overrides=dict(dialect=dialect)) # Load the SQL raw = load_file(dialect, sqlfile) # Lex and parse the file tokens, _ = Lexer(config=config).lex(raw) tree = Parser(config=config).parse(tokens, fname=dialect + "/" + sqlfile) return tree def compute_parse_tree_hash(tree): """Given a parse tree, compute a consistent hash value for it.""" if tree: r = tree.as_record(code_only=True, show_raw=True) if r: r_io = io.StringIO() yaml.dump(r, r_io, sort_keys=False, allow_unicode=True, Dumper=CDumper) result = hashlib.blake2s(r_io.getvalue().encode("utf-8")).hexdigest() return result return None def load_yaml(fpath): """Load a yaml structure and process it into a tuple.""" # Load raw file with open(fpath, encoding="utf8") as f: raw = f.read() # Parse the yaml obj = yaml.load(raw, Loader=CLoader) # Return the parsed and structured object _hash = None if obj: _hash = obj.pop("_hash", None) processed = process_struct(obj) if processed: return _hash, process_struct(obj)[0] else: return None, None @pytest.fixture() def yaml_loader(): """Return a yaml loading function.""" # Return a function return load_yaml def _generate_test_segments_func(elems): """Roughly generate test segments. This function isn't totally robust, but good enough for testing. Use with caution. """ buff = [] raw_file = "".join(elems) templated_file = TemplatedFile.from_string(raw_file) idx = 0 for elem in elems: if elem == "": buff.append( Indent(pos_marker=PositionMarker.from_point(idx, idx, templated_file)) ) continue elif elem == "": buff.append( Dedent(pos_marker=PositionMarker.from_point(idx, idx, templated_file)) ) continue seg_kwargs = {} if set(elem) <= {" ", "\t"}: SegClass = WhitespaceSegment elif set(elem) <= {"\n"}: SegClass = NewlineSegment elif elem == "(": SegClass = SymbolSegment seg_kwargs = {"instance_types": ("start_bracket",)} elif elem == ")": SegClass = SymbolSegment seg_kwargs = {"instance_types": ("end_bracket",)} elif elem == "[": SegClass = SymbolSegment seg_kwargs = {"instance_types": ("start_square_bracket",)} elif elem == "]": SegClass = SymbolSegment seg_kwargs = {"instance_types": ("end_square_bracket",)} elif elem.startswith("--"): SegClass = CommentSegment seg_kwargs = {"instance_types": ("inline_comment",)} elif elem.startswith('"'): SegClass = CodeSegment seg_kwargs = {"instance_types": ("double_quote",)} elif elem.startswith("'"): SegClass = CodeSegment seg_kwargs = {"instance_types": ("single_quote",)} else: SegClass = CodeSegment # Set a none position marker which we'll realign at the end. buff.append( SegClass( raw=elem, pos_marker=PositionMarker( slice(idx, idx + len(elem)), slice(idx, idx + len(elem)), templated_file, ), **seg_kwargs, ) ) idx += len(elem) return tuple(buff) @pytest.fixture(scope="module") def generate_test_segments(): """Roughly generate test segments. This is a factory function so that it works as a fixture, but when actually used, this will return the inner function which is what you actually need. """ return _generate_test_segments_func @pytest.fixture def raise_critical_errors_after_fix(monkeypatch): """Raises errors that break the Fix process. These errors are otherwise swallowed to allow the lint messages to reach the end user. """ @staticmethod def _log_critical_errors(error: Exception): raise error monkeypatch.setattr(BaseRule, "_log_critical_errors", _log_critical_errors) @pytest.fixture(autouse=True) def fail_on_parse_error_after_fix(monkeypatch): """Cause tests to fail if a lint fix introduces a parse error. In production, we have a couple of functions that, upon detecting a bug in a lint rule, just log a warning. To catch bugs in new or modified rules, we want to be more strict during dev and CI/CD testing. Here, we patch in different functions which raise runtime errors, causing tests to fail if this happens. """ @staticmethod def raise_error_apply_fixes_check_issue(message, *args): # pragma: no cover raise ValueError(message % args) @staticmethod def raise_error_conflicting_fixes_same_anchor(message: str): # pragma: no cover raise ValueError(message) monkeypatch.setattr( BaseSegment, "_log_apply_fixes_check_issue", raise_error_apply_fixes_check_issue ) monkeypatch.setattr( Linter, "_report_conflicting_fixes_same_anchor", raise_error_conflicting_fixes_same_anchor, ) @pytest.fixture(autouse=True) def test_verbosity_level(request): """Report the verbosity level for a given pytest run. For example: $ pytest -vv Has a verbosity level of 2 While: $ pytest Has a verbosity level of 0 """ return request.config.getoption("verbose") sqlfluff-3.4.2/test/core/000077500000000000000000000000001503426445100152555ustar00rootroot00000000000000sqlfluff-3.4.2/test/core/__init__.py000066400000000000000000000000421503426445100173620ustar00rootroot00000000000000"""Tests for the core library.""" sqlfluff-3.4.2/test/core/config/000077500000000000000000000000001503426445100165225ustar00rootroot00000000000000sqlfluff-3.4.2/test/core/config/fluffconfig_test.py000066400000000000000000000336561503426445100224400ustar00rootroot00000000000000"""Tests for the configuration routines.""" import logging import os import pytest import sqlfluff from sqlfluff.core import FluffConfig, Linter from sqlfluff.core.errors import SQLFluffUserError from sqlfluff.core.templaters import ( JinjaTemplater, PlaceholderTemplater, PythonTemplater, RawTemplater, ) from sqlfluff.utils.testing.logging import fluff_log_catcher config_b = { "core": {"rules": "LT03", "dialect": "ansi"}, "layout": { "type": {"comma": {"line_position": "trailing", "spacing_before": "touch"}} }, } config_c = { "core": {"rules": "LT03", "dialect": "ansi"}, # NOTE: # - NOT_A_RULE doesn't match anything. # - L001 is an alias, but no longer a rule. # - layout is a group and but doesn't match any individual rule. "rules": { "NOT_A_RULE": {"foo": "bar"}, "L001": {"foo": "bar"}, "layout": {"foo": "bar"}, }, } def test__config__from_strings(): """Test loading config from multiple strings.""" strings = [ "[sqlfluff]\ndialect=mysql\ntesting_val=foobar", "[sqlfluff]\ndialect=postgres\ntesting_val2=bar", "[sqlfluff]\ndialect=mysql\ntesting_val=foo", ] cfg = FluffConfig.from_strings(*strings) assert cfg.get("dialect") == "mysql" assert cfg.get("testing_val2") == "bar" assert cfg.get("testing_val") == "foo" def test__config__nested_config_tests(): """Test linting with overridden config in nested paths. This looks like a linter test but it's actually a config test. """ lntr = Linter( # Exclude CP02 in overrides (similar to cli --exclude-rules) config=FluffConfig(overrides=dict(exclude_rules="CP02", dialect="ansi")) ) lnt = lntr.lint_path("test/fixtures/config/inheritance_b") violations = lnt.check_tuples_by_path() for k in violations: if k.endswith("nested\\example.sql"): # CP01 is enabled in the .sqlfluff file and not excluded. assert ("CP01", 1, 4) in violations[k] # LT02 is enabled in the .sqlfluff file and not excluded. assert ("LT02", 1, 1) in violations[k] # CP02 is enabled in the .sqlfluff file but excluded by the # override above. assert "CP02" not in [c[0] for c in violations[k]] elif k.endswith("inheritance_b\\example.sql"): # CP01 is enabled because while disabled in the tox.ini file, # the exclude-rules option is overridden by the override above # which effectively sets the exclude to CP02 and in effect # re-enables CP01. # This may seem counter-intuitive but is in line with current # documentation on how to use `rules` and `exclude-rules`. # https://docs.sqlfluff.com/en/latest/perma/rule_disabling.html assert ("CP01", 1, 4) in violations[k] # CP02 is disabled because of the override above. assert "CP02" not in [c[0] for c in violations[k]] # LT02 is disabled because it is not in the `rules` of tox.ini assert "LT02" not in [c[0] for c in violations[k]] @pytest.mark.parametrize( "templater_name,templater_class,raises_error", [ ("raw", RawTemplater, False), ("jinja", JinjaTemplater, False), ("python", PythonTemplater, False), ("placeholder", PlaceholderTemplater, False), ("afefhlsakufe", None, True), ("", None, True), ], ) def test__config__templater_selection(templater_name, templater_class, raises_error): """Test template selection by name.""" if raises_error: with pytest.raises(SQLFluffUserError): FluffConfig(overrides={"dialect": "ansi", "templater": templater_name}) else: cfg = FluffConfig(overrides={"dialect": "ansi", "templater": templater_name}) assert cfg.get_templater().__class__ is templater_class assert cfg._configs["core"]["templater_obj"].__class__ is templater_class def test__config__glob_exclude_config_tests(): """Test linting with a glob pattern in exclude_rules. This looks like a linter test but it's actually a config test. """ lntr = Linter(config=FluffConfig.from_path("test/fixtures/config/glob_exclude")) lnt = lntr.lint_path("test/fixtures/config/glob_exclude/test.sql") violations = lnt.check_tuples_by_path() for k in violations: assert ("AM04", 12, 1) in violations[k] assert "RF02" not in [c[0] for c in violations[k]] assert "LT13" not in [c[0] for c in violations[k]] assert "AM05" not in [c[0] for c in violations[k]] assert "CV06" not in [c[0] for c in violations[k]] def test__config__glob_include_config_tests(): """Test linting with a glob pattern in rules. This looks like a linter test but it's actually a config test. """ lntr = Linter(config=FluffConfig.from_path("test/fixtures/config/glob_include")) lnt = lntr.lint_path("test/fixtures/config/glob_include/test.sql") violations = lnt.check_tuples_by_path() for k in violations: assert ("LT13", 1, 1) in violations[k] assert ("AM05", 14, 1) in violations[k] assert ("CV06", 14, 9) in violations[k] assert ("RF02", 12, 8) in violations[k] assert "AM04" not in [c[0] for c in violations[k]] def test__config__rules_set_to_none(): """Test linting when rules are set to 'None'. Ensure that all rules are still run. """ lntr = Linter( config=FluffConfig.from_path("test/fixtures/config/rules_set_to_none") ) lnt = lntr.lint_path("test/fixtures/config/rules_set_to_none/test.sql") violations = lnt.check_tuples_by_path() for k in violations: assert ("LT13", 1, 1) in violations[k] assert ("AM04", 12, 1) in violations[k] assert ("CP01", 12, 10) in violations[k] def test__config__rules_group_with_exclude(): """Test linting when a rules group is selected and rules are excluded.""" lntr = Linter( config=FluffConfig.from_path("test/fixtures/config/rules_group_with_exclude") ) lnt = lntr.lint_path("test/fixtures/config/rules_group_with_exclude/test.sql") violations = lnt.check_tuples_by_path() for k in violations: assert ("CP01", 15, 1) in violations[k] assert "LT04" not in [c[0] for c in violations[k]] def test__config__get_section(): """Test FluffConfig.get_section method.""" cfg = FluffConfig(config_b) assert cfg.get_section("core").get("rules", None) == "LT03" assert cfg.get_section(["layout", "type", "comma"]) == { "line_position": "trailing", "spacing_before": "touch", } assert cfg.get_section("non_existent") is None def test__config__get(): """Test FluffConfig.get method.""" cfg = FluffConfig(config_b) assert cfg.get("rules") == "LT03" assert cfg.get("rulez") is None assert cfg.get("rulez", section="core", default=123) == 123 assert ( cfg.get("line_position", section=["layout", "type", "comma"], default=None) == "trailing" ) assert ( cfg.get("line_position", section=["layout", "type", "ASDFSDG007"], default=None) is None ) def test__config__from_kwargs(): """Test from_kwargs method of FluffConfig.""" # Instantiate config object. cfg = FluffConfig.from_kwargs( dialect="snowflake", rules=["LT01", "LT02"], exclude_rules=["CP01", "AL01"], ) # Verify we can later retrieve the config values. assert cfg.get("dialect") == "snowflake" assert cfg.get("rules") == "LT01,LT02" assert cfg.get("exclude_rules") == "CP01,AL01" def test__config__from_string(): """Test from_string method of FluffConfig.""" with open( os.path.join("test", "fixtures", "config", "inheritance_a", ".sqlfluff") ) as f: config_string = f.read() cfg = FluffConfig.from_string(config_string) # Verify we can later retrieve the config values. assert cfg.get("testing_val") == "foobar" assert cfg.get("dialect") == "mysql" def test__config_missing_dialect(): """Verify an exception is thrown if no dialect was specified.""" with pytest.raises(SQLFluffUserError) as e: FluffConfig.from_kwargs() assert "must configure a dialect" in str(e.value) def test__config__validate_configs_indirect(): """Test _validate_configs method of FluffConfig indirectly.""" # Instantiate config object. with pytest.raises(SQLFluffUserError): FluffConfig( configs={ "core": {"dialect": "ansi"}, # This is a known removed value. "rules": {"L003": {"lint_templated_tokens": True}}, } ) @pytest.mark.parametrize( "raw_sql", [ ( # "types" not "type" "-- sqlfluff:layout:types:comma:line_position:leading\nSELECT 1" ), ( # Unsupported layout config length "-- sqlfluff:layout:foo:bar\nSELECT 1" ), ( # Unsupported layout config length "-- sqlfluff:layout:type:comma:bar\nSELECT 1" ), ( # Unsupported layout config key ("foo") "-- sqlfluff:layout:type:comma:foo:bar\nSELECT 1" ), ( # Unsupported layout config key ("foo") [no space] "--sqlfluff:layout:type:comma:foo:bar\nSELECT 1" ), ], ) def test__config__validate_configs_inline_layout(raw_sql): """Test _validate_configs method of FluffConfig when used on a file. This test covers both the validation of inline config directives but also the validation of layout configs. """ # Instantiate config object. cfg = FluffConfig(configs={"core": {"dialect": "ansi"}}) # Try to process an invalid inline config. Make sure we get an error. with pytest.raises(SQLFluffUserError): cfg.process_raw_file_for_config(raw_sql, "test.sql") def test__config__warn_unknown_rule(): """Test warnings when rules are unknown.""" lntr = Linter(config=FluffConfig(config_c)) with fluff_log_catcher(logging.WARNING, "sqlfluff.rules") as caplog: lntr.get_rulepack() # Check we get a warning on the unrecognised rule. assert ( "Rule configuration contain a section for unexpected rule 'NOT_A_RULE'." ) in caplog.text # Check we get a warning for the deprecated rule. assert ( "Rule configuration contain a section for unexpected rule 'L001'." ) in caplog.text # Check we get a hint for the matched rule. assert "match for rule LT01 with name 'layout.spacing'" in caplog.text # Check we get a warning for the group name. assert ( "Rule configuration contain a section for unexpected rule 'layout'." ) in caplog.text # Check we get a hint for the matched rule group. # NOTE: We don't check the set explicitly because we can't assume ordering. assert ("The reference was found as a match for multiple rules: {") in caplog.text assert ("LT01") in caplog.text assert ("LT02") in caplog.text def test__process_inline_config(): """Test the processing of inline in-file configuration directives.""" cfg = FluffConfig(config_b) assert cfg.get("rules") == "LT03" cfg.process_inline_config("-- sqlfluff:rules:LT02", "test.sql") assert cfg.get("rules") == "LT02" assert cfg.get("tab_space_size", section="indentation") == 4 cfg.process_inline_config("-- sqlfluff:indentation:tab_space_size:20", "test.sql") assert cfg.get("tab_space_size", section="indentation") == 20 assert cfg.get("dialect") == "ansi" assert cfg.get("dialect_obj").name == "ansi" cfg.process_inline_config("-- sqlfluff:dialect:postgres", "test.sql") assert cfg.get("dialect") == "postgres" assert cfg.get("dialect_obj").name == "postgres" assert cfg.get("rulez") is None cfg.process_inline_config("-- sqlfluff:rulez:LT06", "test.sql") assert cfg.get("rulez") == "LT06" # Check that Windows paths don't get mangled cfg.process_inline_config("-- sqlfluff:jinja:my_path:c:\\foo", "test.sql") assert cfg.get("my_path", section="jinja") == "c:\\foo" # Check that JSON objects are not mangled cfg.process_inline_config('-- sqlfluff:jinja:my_dict:{"k":"v"}', "test.sql") assert cfg.get("my_dict", section="jinja") == '{"k":"v"}' # Check that JSON arrays are not mangled cfg.process_inline_config('-- sqlfluff:jinja:my_dict:[{"k":"v"}]', "test.sql") assert cfg.get("my_dict", section="jinja") == '[{"k":"v"}]' @pytest.mark.parametrize( "raw_sql", [ ( "-- sqlfluff:max_line_length:25\n" "-- sqlfluff:rules:LT05,LT06\n" "-- sqlfluff:exclude_rules:LT01,LT02\n" "SELECT 1" ) ], ) def test__process_raw_file_for_config(raw_sql): """Test the processing of a file inline directives.""" cfg = FluffConfig(config_b) # verify initial attributes based on the preloaded configuration assert cfg.get("max_line_length") == 80 assert cfg.get("rules") == "LT03" assert cfg.get("exclude_rules") is None # internal list attributes should have corresponding exploded list values assert cfg.get("rule_allowlist") == ["LT03"] assert cfg.get("rule_denylist") == [] cfg.process_raw_file_for_config(raw_sql, "test.sql") # verify overrides based on the file inline directives assert cfg.get("max_line_length") == 25 assert cfg.get("rules") == "LT05,LT06" assert cfg.get("exclude_rules") == "LT01,LT02" # internal list attributes should have overridden exploded list values assert cfg.get("rule_allowlist") == ["LT05", "LT06"] assert cfg.get("rule_denylist") == ["LT01", "LT02"] def test__api__immutable_config(): """Tests that a config is not mutated when parsing.""" config = FluffConfig.from_path( "test/fixtures/api/config_path_test/extra_configs/.sqlfluff" ) assert config.get("dialect") == "ansi" sqlfluff.parse( "-- sqlfluff:dialect: postgres\nSELECT * FROM table1\n", config=config ) assert config.get("dialect") == "ansi" sqlfluff-3.4.2/test/core/config/loader_test.py000066400000000000000000000231531503426445100214050ustar00rootroot00000000000000"""Tests for the configuration routines.""" import os import sys from contextlib import contextmanager from unittest.mock import call, patch import pytest from sqlfluff.core import FluffConfig from sqlfluff.core.config import ( load_config_at_path, load_config_file, load_config_string, load_config_up_to_path, ) from sqlfluff.core.config.loader import ( _get_user_config_dir_path, _load_user_appdir_config, ) from sqlfluff.core.errors import SQLFluffUserError config_a = { "core": {"testing_val": "foobar", "testing_int": 4, "dialect": "mysql"}, "bar": {"foo": "barbar"}, } @pytest.fixture def mock_xdg_home(monkeypatch): """Sets the XDG_CONFIG_HOME variable.""" monkeypatch.setenv("XDG_CONFIG_HOME", "~/.config/my/special/path") def test__config__load_file_dir(): """Test loading config from a directory path.""" cfg = load_config_at_path( os.path.join("test", "fixtures", "config", "inheritance_a") ) assert cfg == config_a def test__config__load_from_string(): """Test loading config from a string.""" # Load a string with open( os.path.join("test", "fixtures", "config", "inheritance_a", ".sqlfluff") ) as f: config_string = f.read() cfg = load_config_string(config_string) assert cfg == config_a def test__config__load_file_f(): """Test loading config from a file path.""" cfg = load_config_at_path( os.path.join("test", "fixtures", "config", "inheritance_a", "testing.sql") ) assert cfg == config_a def test__config__load_file_missing_extra(): """Test loading config from a file path if extra path is not found.""" with pytest.raises(SQLFluffUserError): load_config_up_to_path( os.path.join("test", "fixtures", "config", "inheritance_a", "testing.sql"), extra_config_path="non/existent/path", ) def test__config__load_nested(): """Test nested overwrite and order of precedence of config files.""" cfg = load_config_up_to_path( os.path.join( "test", "fixtures", "config", "inheritance_a", "nested", "blah.sql" ), extra_config_path=os.path.join( "test", "fixtures", "config", "inheritance_a", "extra", "this_can_have_any_name.cfg", ), ) assert cfg == { "core": { # Outer .sqlfluff defines dialect & testing_val and not overridden. "dialect": "mysql", "testing_val": "foobar", # tesing_int is defined in many. Inner pyproject.toml takes precedence. "testing_int": 1, # testing_bar is defined only in setup.cfg "testing_bar": 7.698, }, # bar is defined in a few, but the extra_config takes precedence. "bar": {"foo": "foobarextra"}, # fnarr is defined in a few. Inner tox.ini takes precedence. "fnarr": {"fnarr": {"foo": "foobar"}}, } @contextmanager def change_dir(path): """Set the current working directory to `path` for the duration of the context.""" original_dir = os.getcwd() try: os.chdir(path) yield finally: os.chdir(original_dir) @pytest.mark.skipif( sys.platform == "win32", reason="Seems test is not executed under home directory on Windows", ) def test__config__load_parent(): """Test that config is loaded from parent directory of current working directory.""" with change_dir( os.path.join("test", "fixtures", "config", "inheritance_a", "nested") ): cfg = load_config_up_to_path("blah.sql") assert cfg == { "core": { "dialect": "mysql", "testing_val": "foobar", "testing_int": 1, "testing_bar": 7.698, }, "bar": {"foo": "foobar"}, "fnarr": {"fnarr": {"foo": "foobar"}}, } def test__config__load_toml(): """Test loading config from a pyproject.toml file.""" cfg = load_config_file( os.path.join("test", "fixtures", "config", "toml"), "pyproject.toml", ) assert cfg == { "core": { "nocolor": True, "verbose": 2, "testing_int": 5, "testing_bar": 7.698, "testing_bool": False, "testing_arr": ["a", "b", "c"], "rules": ["LT03", "LT09"], "testing_inline_table": {"x": 1}, }, "bar": {"foo": "foobar"}, "fnarr": {"fnarr": {"foo": "foobar"}}, "rules": {"capitalisation.keywords": {"capitalisation_policy": "upper"}}, } def test__config__load_placeholder_cfg(): """Test loading a sqlfluff configuration file for placeholder templater.""" cfg = load_config_file( os.path.join("test", "fixtures", "config", "placeholder"), ".sqlfluff-placeholder", ) assert cfg == { "core": { "testing_val": "foobar", "testing_int": 4, }, "bar": {"foo": "barbar"}, "templater": { "placeholder": { "param_style": "flyway_var", "flyway:database": "test_db", } }, } @patch("os.path.exists") @patch("os.listdir") @pytest.mark.skipif(sys.platform == "win32", reason="Not applicable on Windows") @pytest.mark.parametrize( "sys_platform,xdg_exists,default_exists,resolved_config_path,paths_checked", [ # On linux, if the default path exists, it should be the only path we check # and the chosen config path. ("linux", True, True, "~/.config/sqlfluff", ["~/.config/sqlfluff"]), # On linux, if the default path doesn't exist, then (because for this # test case we set XDG_CONFIG_HOME) it will check the default path # but then on finding it to not exist it will then try the XDG path. # In this case, neither actually exist and so what matters is that both # are either checked or used - rather than one in particular being the # end result. ( "linux", False, False, "~/.config/my/special/path/sqlfluff", ["~/.config/sqlfluff"], ), # On MacOS, if the default config path and the XDG path don't exist, then # we should resolve config to the default MacOS config path. ( "darwin", False, False, "~/Library/Application Support/sqlfluff", ["~/.config/sqlfluff", "~/.config/my/special/path/sqlfluff"], ), # However, if XDG_CONFIG_HOME is set, and the path exists then that should # be resolved _ahead of_ the default MacOS config path (as demonstrated # by us not checking the presence of that path in the process). # https://github.com/sqlfluff/sqlfluff/issues/889 ( "darwin", True, False, "~/.config/my/special/path/sqlfluff", ["~/.config/sqlfluff", "~/.config/my/special/path/sqlfluff"], ), ], ) def test__config__get_user_config_dir_path( mock_listdir, mock_path_exists, mock_xdg_home, sys_platform, xdg_exists, default_exists, resolved_config_path, paths_checked, ): """Test loading config from user appdir.""" xdg_home = os.environ.get("XDG_CONFIG_HOME") assert xdg_home, "XDG HOME should be set by the mock. Something has gone wrong." xdg_config_path = xdg_home + "/sqlfluff" def path_exists(check_path): """Patch for os.path.exists which depends on test parameters. Returns: True, unless `default_exists` is `False` and the path passed to the function is the default config path, or unless `xdg_exists` is `False` and the path passed is the XDG config path. """ resolved_path = os.path.expanduser(check_path) if ( resolved_path == os.path.expanduser("~/.config/sqlfluff") and not default_exists ): return False if resolved_path == os.path.expanduser(xdg_config_path) and not xdg_exists: return False return True mock_path_exists.side_effect = path_exists # Get the config path as though we are on macOS. resolved_path = _get_user_config_dir_path(sys_platform) assert os.path.expanduser(resolved_path) == os.path.expanduser(resolved_config_path) mock_path_exists.assert_has_calls( [call(os.path.expanduser(path)) for path in paths_checked] ) @patch("os.path.exists") @patch("sqlfluff.core.config.loader.load_config_at_path") def test__config__load_user_appdir_config(mock_load_config, mock_path_exists): """Test _load_user_appdir_config. NOTE: We mock `load_config_at_path()` so we can be really focussed with this test and also not need to actually interact with local home directories. """ mock_load_config.side_effect = lambda x: {} mock_path_exists.side_effect = lambda x: True _load_user_appdir_config() # It will check that the default config path exists... mock_path_exists.assert_has_calls([call(os.path.expanduser("~/.config/sqlfluff"))]) # ...and assuming it does, it will try and load config files at that path. mock_load_config.assert_has_calls([call(os.path.expanduser("~/.config/sqlfluff"))]) def test__config__toml_list_config(): """Test Parsing TOML list of values.""" loaded_config = load_config_file( os.path.join("test", "fixtures", "config", "toml"), "pyproject.toml", ) loaded_config["core"]["dialect"] = "ansi" cfg = FluffConfig(loaded_config) # Verify we can later retrieve the config values. assert cfg.get("dialect") == "ansi" assert cfg.get("rules") == ["LT03", "LT09"] sqlfluff-3.4.2/test/core/config/validate_test.py000066400000000000000000000070341503426445100217300ustar00rootroot00000000000000"""Tests for the config validation routines.""" import pytest from sqlfluff.core.config.removed import ( REMOVED_CONFIGS, validate_config_dict_for_removed, ) from sqlfluff.core.config.validate import _validate_layout_config from sqlfluff.core.errors import SQLFluffUserError from sqlfluff.core.helpers.dict import ( iter_records_from_nested_dict, records_to_nested_dict, ) def test__validate_configs_direct(): """Test validate methods directly.""" # Make sure there _are_ removed configs. assert REMOVED_CONFIGS # Make sure all raise an error if validated for k in REMOVED_CONFIGS: print(k) if k.translation_func and k.new_path: config = records_to_nested_dict([(k.old_path, "foo")]) validate_config_dict_for_removed(config, "") print(config) new_records = list(iter_records_from_nested_dict(config)) # There should only be one assert len(new_records) == 1 # And it should be the reassigned one assert new_records[0][0] == k.new_path # Really we should check that it's output here, but logging config # seems to make that hard. else: config = records_to_nested_dict([(k.old_path, "foo")]) with pytest.raises(SQLFluffUserError) as excinfo: validate_config_dict_for_removed(config, "") assert "set an outdated config" in str(excinfo.value) assert k.warning in str(excinfo.value) def test__validate_configs_precedence_same_file(): """Test _validate_configs method of FluffConfig where there's a conflict.""" # Check with a known conflicted value old_key = ("rules", "LT03", "operator_new_lines") new_key = ("layout", "type", "binary_operator", "line_position") # Check it's still conflicted. assert any( k.old_path == old_key and k.new_path == new_key for k in REMOVED_CONFIGS ), ( "This test depends on this key still being removed. Update the test to " "one that is if this one isn't." ) # Test config config = records_to_nested_dict([(new_key, "foo"), (old_key, "foo")]) # Before validation assert config == { "rules": {"LT03": {"operator_new_lines": "foo"}}, "layout": {"type": {"binary_operator": {"line_position": "foo"}}}, } validate_config_dict_for_removed(config, "") # Check we only get the new key after validation assert config == {"layout": {"type": {"binary_operator": {"line_position": "foo"}}}} @pytest.mark.parametrize( "config_dict,config_warning", [ ({"layout": "foo"}, "Found value 'foo' instead of a valid layout section"), ( {"layout": {"invalid": "foo"}}, "Only sections of the form `sqlfluff:layout:type:...` are valid", ), ( {"layout": {"type": {"foo": "bar"}}}, "Expected a section", ), ( {"layout": {"type": {"foo": {"bar": "baz"}}}}, "Found the following invalid keys: {'bar'}", ), ( {"layout": {"type": {"foo": {"spacing_before": {"a": "b"}}}}}, "Found the an unexpected section rather than value", ), ], ) def test__validate_layouts(config_dict, config_warning): """Test the layout validation checks.""" with pytest.raises(SQLFluffUserError) as excinfo: _validate_layout_config(config_dict, "") assert "set an invalid `layout` option" in str(excinfo.value) assert config_warning in str(excinfo.value) sqlfluff-3.4.2/test/core/errors_test.py000066400000000000000000000043541503426445100202100ustar00rootroot00000000000000"""Tests pickling and unpickling of errors.""" import copy import pickle import pytest from sqlfluff.core.errors import SQLBaseError, SQLLexError, SQLLintError, SQLParseError from sqlfluff.core.parser import PositionMarker, RawSegment from sqlfluff.core.rules import BaseRule from sqlfluff.core.templaters import TemplatedFile class Rule_T078(BaseRule): """A dummy rule.""" groups = ("all",) def _eval(self, context): pass def assert_pickle_robust(err: SQLBaseError): """Test that the class remains the same through copying and pickling.""" # First try copying (and make sure they still compare equal) err_copy = copy.copy(err) assert err_copy == err # Then try picking (and make sure they also still compare equal) pickled = pickle.dumps(err) pickle_copy = pickle.loads(pickled) assert pickle_copy == err @pytest.mark.parametrize( "ignore", [True, False], ) def test__lex_error_pickle(ignore): """Test lexing error pickling.""" template = TemplatedFile.from_string("foobar") err = SQLLexError("Foo", pos=PositionMarker(slice(0, 6), slice(0, 6), template)) # Set ignore to true if configured. # NOTE: This not copying was one of the reasons for this test. err.ignore = ignore assert_pickle_robust(err) @pytest.mark.parametrize( "ignore", [True, False], ) def test__parse_error_pickle(ignore): """Test parse error pickling.""" template = TemplatedFile.from_string("foobar") segment = RawSegment("foobar", PositionMarker(slice(0, 6), slice(0, 6), template)) err = SQLParseError("Foo", segment=segment) # Set ignore to true if configured. # NOTE: This not copying was one of the reasons for this test. err.ignore = ignore assert_pickle_robust(err) @pytest.mark.parametrize( "ignore", [True, False], ) def test__lint_error_pickle(ignore): """Test lint error pickling.""" template = TemplatedFile.from_string("foobar") segment = RawSegment("foobar", PositionMarker(slice(0, 6), slice(0, 6), template)) err = SQLLintError("Foo", segment=segment, rule=Rule_T078) # Set ignore to true if configured. # NOTE: This not copying was one of the reasons for this test. err.ignore = ignore assert_pickle_robust(err) sqlfluff-3.4.2/test/core/helpers/000077500000000000000000000000001503426445100167175ustar00rootroot00000000000000sqlfluff-3.4.2/test/core/helpers/dict_test.py000066400000000000000000000050071503426445100212550ustar00rootroot00000000000000"""Tests for dict helpers.""" import doctest import sqlfluff.core.helpers.dict as dict_module from sqlfluff.core.helpers.dict import ( dict_diff, iter_records_from_nested_dict, nested_combine, ) def test_helpers_dict_doctests(): """Run dict helper doctests. Doctests are important for coverage in this module, and coverage isn't currently picked up when we run the doctests via --doctests. That means in this case we run them directly here. https://stackoverflow.com/questions/45261772/how-to-make-pythons-coverage-library-include-doctests """ doctest.testmod(dict_module) def test__helpers_dict__nested_combine(): """Test combination of two config dicts.""" a = {"a": {"b": {"c": 123, "d": 456}}} b = {"b": {"b": {"c": 123, "d": 456}}} c = {"a": {"b": {"c": 234, "e": 456}}} r = nested_combine(a, b, c) assert r == { "a": {"b": {"c": 234, "e": 456, "d": 456}}, "b": {"b": {"c": 123, "d": 456}}, } def test__helpers_dict__nested_combine_copy_effect(): """Verify that nested_combine effectively copies dicts. In particular it's important that even nested dicts are fully isolated, as if not true it can create some very difficult to trace bugs. """ # Set up the original dicts. a = {"a": {"b": {"c": 123, "d": 456}}} b = {"a": {"b": {"c": 234, "e": 567}}, "f": {"g": {"h": "i"}}} r = nested_combine(a, b) # After combination, edit both some of the inputs and one of the outputs. a["a"]["b"]["d"] = 999 b["f"]["g"]["h"] = "j" r["a"]["b"]["e"] = 888 # Check that editing the result didn't change the input: assert b["a"]["b"]["e"] == 567 # and not 888 # Check that editing the input didn't change the result: assert r["a"]["b"]["d"] == 456 # and not 999 assert r["f"]["g"]["h"] == "i" # and not "j" def test__helpers_dict__dict_diff(): """Test diffs between two config dicts.""" a = {"a": {"b": {"c": 123, "d": 456, "f": 6}}} b = {"b": {"b": {"c": 123, "d": 456}}} c = {"a": {"b": {"c": 234, "e": 456, "f": 6}}} assert dict_diff(a, b) == a assert dict_diff(a, c) == {"a": {"b": {"c": 123, "d": 456}}} assert dict_diff(c, a) == {"a": {"b": {"c": 234, "e": 456}}} def test__config__iter_records_from_nested_dict(): """Test conversion from nested dict to records.""" c = iter_records_from_nested_dict({"a": {"b": {"c": 123, "d": 456}, "f": 6}}) assert list(c) == [ (("a", "b", "c"), 123), (("a", "b", "d"), 456), (("a", "f"), 6), ] sqlfluff-3.4.2/test/core/helpers/file_test.py000066400000000000000000000073471503426445100212620ustar00rootroot00000000000000"""Test the helpers.""" import sys from pathlib import Path import pytest from sqlfluff.core.helpers.file import get_encoding, iter_intermediate_paths @pytest.mark.parametrize( "fname,config_encoding,result", [ ( "test/fixtures/linter/encoding-utf-8.sql", "autodetect", "ascii", # ascii is a subset of utf-8, this is valid ), ( "test/fixtures/linter/encoding-utf-8-sig.sql", "autodetect", "UTF-8-SIG", ), ( "test/fixtures/linter/encoding-utf-8.sql", "utf-8", "utf-8", ), ( "test/fixtures/linter/encoding-utf-8-sig.sql", "utf-8", "utf-8", ), ( "test/fixtures/linter/encoding-utf-8.sql", "utf-8-sig", "utf-8-sig", ), ( "test/fixtures/linter/encoding-utf-8-sig.sql", "utf-8-sig", "utf-8-sig", ), ], ) def test__parser__helper_get_encoding(fname, config_encoding, result): """Test get_encoding.""" assert ( get_encoding( fname=fname, config_encoding=config_encoding, ) == result ) @pytest.mark.parametrize( "path,working_path,result", [ ( # Standard use case. # SQLFluff run from an outer location, looking to an inner. "test/fixtures/config/inheritance_a/nested/blah.sql", "test/fixtures", # Order should work up from outer to inner [ "test/fixtures", "test/fixtures/config", "test/fixtures/config/inheritance_a", "test/fixtures/config/inheritance_a/nested", ], ), ( # Reverse use case. # SQLFluff running from an inner location, looking to outer. "test/fixtures", "test/fixtures/config/inheritance_a", # Should only return inner, then outer. [ # "test/fixtures/config/inheritance_a", # This SHOULD be present "test/fixtures", ], ), ( # Unrelated use case. # SQLFluff running from an one location, looking to parallel. "test/fixtures", "test/core", # Should each individually, with the working location first [ "test", # This SHOULD NOT be present. # "test/core", # This SHOULD be present. "test/fixtures", ], ), ], ) def test__config__iter_config_paths(path, working_path, result): """Test that config paths are fetched ordered by priority.""" cfg_paths = iter_intermediate_paths(Path(path), Path(working_path)) assert [str(p) for p in cfg_paths] == [str(Path(p).resolve()) for p in result] @pytest.mark.skipif(sys.platform != "win32", reason="Only applicable on Windows") def test__config__iter_config_paths_exc_win(): """Test that config path resolution exception handling works on windows.""" cfg_paths = iter_intermediate_paths(Path("J:\\\\"), Path("C:\\\\")) assert list(cfg_paths) == [Path("C:\\\\"), Path("J:\\\\")] @pytest.mark.skipif(sys.platform == "win32", reason="Not applicable on Windows") def test__config__iter_config_paths_exc_unix(): """Test that config path resolution exception handling works on linux.""" cfg_paths = iter_intermediate_paths(Path("/abc/def"), Path("/ghi/jlk")) # NOTE: `/def` doesn't exist, so we'll use it's parent instead because `.is_dir()` # will return false. This should still test the "zero path length" handling routine. assert list(cfg_paths) == [Path("/"), Path("/abc")] sqlfluff-3.4.2/test/core/helpers/slice_test.py000066400000000000000000000032701503426445100214310ustar00rootroot00000000000000"""Test the slice helpers.""" import pytest from sqlfluff.core.helpers.slice import slice_overlaps @pytest.mark.parametrize( "s1,s2,result", [ # Identity case (slice(0, 1), slice(0, 1), True), # Adjoining zero length slices aren't overlaps (slice(1, 1), slice(0, 1), False), (slice(0, 0), slice(0, 1), False), (slice(0, 1), slice(1, 1), False), (slice(0, 1), slice(0, 0), False), # Contained slices are overlaps (slice(0, 3), slice(1, 2), True), (slice(1, 2), slice(0, 3), True), # ...even if they're zero length (slice(0, 3), slice(1, 1), True), (slice(1, 1), slice(0, 3), True), # Easy cases of non-overlaps (slice(1, 2), slice(3, 4), False), (slice(3, 4), slice(1, 2), False), (slice(1, 2), slice(2, 3), False), (slice(2, 3), slice(1, 2), False), # Partial overlaps are overlaps (slice(1, 3), slice(2, 4), True), (slice(2, 4), slice(1, 3), True), ], ) def test__parser__slice_overlaps_result(s1, s2, result): """Test _findall.""" assert slice_overlaps(s1, s2) == result @pytest.mark.parametrize( "s1,s2", [ # Check None situations (slice(None, 1), slice(0, 1)), (slice(0, None), slice(0, 1)), (slice(0, 1), slice(None, 1)), (slice(0, 1), slice(0, None)), (slice(None, None), slice(None, None)), # Check positivity (slice(1, 0), slice(0, 1)), (slice(0, 1), slice(1, 0)), ], ) def test__parser__slice_overlaps_error(s1, s2): """Test assertions of slice_overlaps.""" with pytest.raises(AssertionError): slice_overlaps(s1, s2) sqlfluff-3.4.2/test/core/helpers/string_test.py000066400000000000000000000016241503426445100216410ustar00rootroot00000000000000"""Test the string helpers.""" import pytest from sqlfluff.core.helpers.string import findall, split_comma_separated_string @pytest.mark.parametrize( "mainstr,substr,positions", [ ("", "", []), ("a", "a", [0]), ("foobar", "o", [1, 2]), ("bar bar bar bar", "bar", [0, 4, 8, 12]), ], ) def test__helpers_string__findall(mainstr, substr, positions): """Test _findall.""" assert list(findall(substr, mainstr)) == positions @pytest.mark.parametrize( "raw_str, expected", [ ("AL01,LT08,AL07", ["AL01", "LT08", "AL07"]), ("\nAL01,\nLT08,\nAL07,", ["AL01", "LT08", "AL07"]), (["AL01", "LT08", "AL07"], ["AL01", "LT08", "AL07"]), ], ) def test__helpers_string__split_comma_separated_string(raw_str, expected): """Tests that string and lists are output correctly.""" assert split_comma_separated_string(raw_str) == expected sqlfluff-3.4.2/test/core/linter/000077500000000000000000000000001503426445100165525ustar00rootroot00000000000000sqlfluff-3.4.2/test/core/linter/__init__.py000066400000000000000000000000461503426445100206630ustar00rootroot00000000000000"""Tests for sqlfluff.core.linter.""" sqlfluff-3.4.2/test/core/linter/discovery_test.py000066400000000000000000000110521503426445100221710ustar00rootroot00000000000000"""Tests the path routines from the Linter class.""" import os import pytest from sqlfluff.core.errors import SQLFluffUserError from sqlfluff.core.linter.discovery import _load_specs_from_lines, paths_from_path def normalise_paths(paths): """Test normalising paths. NB Paths on difference platforms might look different, so this makes them comparable. """ return {pth.replace("/", ".").replace("\\", ".") for pth in paths} def test__linter__path_from_paths__dir(): """Test extracting paths from directories.""" paths = paths_from_path("test/fixtures/lexer") assert normalise_paths(paths) == { "test.fixtures.lexer.block_comment.sql", "test.fixtures.lexer.inline_comment.sql", "test.fixtures.lexer.basic.sql", } def test__linter__path_from_paths__default(): """Test .sql files are found by default.""" paths = normalise_paths(paths_from_path("test/fixtures/linter")) assert "test.fixtures.linter.passing.sql" in paths assert "test.fixtures.linter.passing_cap_extension.SQL" in paths assert "test.fixtures.linter.discovery_file.txt" not in paths def test__linter__path_from_paths__exts(): """Test configuration of file discovery.""" paths = normalise_paths( paths_from_path("test/fixtures/linter", target_file_exts=[".txt", ".txt.j2"]) ) assert "test.fixtures.linter.passing.sql" not in paths assert "test.fixtures.linter.passing_cap_extension.SQL" not in paths assert "test.fixtures.linter.discovery_file.txt" in paths assert "test.fixtures.linter.discovery_file.txt.j2" in paths def test__linter__path_from_paths__file(): """Test extracting paths from a file path.""" paths = paths_from_path("test/fixtures/linter/indentation_errors.sql") assert normalise_paths(paths) == {"test.fixtures.linter.indentation_errors.sql"} def test__linter__path_from_paths__not_exist(): """Test that the right errors are raise when a file doesn't exist.""" with pytest.raises(SQLFluffUserError): paths_from_path("asflekjfhsakuefhse") def test__linter__path_from_paths__not_exist_ignore(): """Test extracting paths from a file path.""" paths = paths_from_path("asflekjfhsakuefhse", ignore_non_existent_files=True) assert len(paths) == 0 def test__linter__path_from_paths__explicit_ignore(): """Test ignoring files that were passed explicitly.""" paths = paths_from_path( "test/fixtures/linter/sqlfluffignore/path_a/query_a.sql", ignore_non_existent_files=True, ignore_files=True, working_path="test/fixtures/linter/sqlfluffignore/", ) assert len(paths) == 0 def test__linter__path_from_paths__sqlfluffignore_current_directory(): """Test that .sqlfluffignore in the current directory is read when dir given.""" oldcwd = os.getcwd() try: os.chdir("test/fixtures/linter/sqlfluffignore") paths = paths_from_path( "path_a/", ignore_non_existent_files=True, ignore_files=True, working_path="test/fixtures/linter/sqlfluffignore/", ) assert len(paths) == 0 finally: os.chdir(oldcwd) def test__linter__path_from_paths__dot(): """Test extracting paths from a dot.""" # Use set theory to check that we get AT LEAST these files assert normalise_paths(paths_from_path(".")) >= { "test.fixtures.lexer.block_comment.sql", "test.fixtures.lexer.inline_comment.sql", "test.fixtures.lexer.basic.sql", } @pytest.mark.parametrize( "path", [ "test/fixtures/linter/sqlfluffignore", "test/fixtures/linter/sqlfluffignore/", "test/fixtures/linter/sqlfluffignore/.", ], ) def test__linter__path_from_paths__ignore(path): """Test extracting paths from a dot.""" # We should only get query_b, because of the sqlfluffignore files. assert normalise_paths(paths_from_path(path)) == { "test.fixtures.linter.sqlfluffignore.path_b.query_b.sql" } def test__linter__path_from_paths__specific_bad_ext(): """Test we get no match if a path with the wrong extension is passed.""" assert paths_from_path("README.md") == [] @pytest.mark.parametrize( "lines", [ 12345, # Something not iterable ["!"], # An iterable, with an invalid pattern in it. ], ) def test__linter__load_specs_from_lines(lines): """Test the unhappy path of _load_specs_from_lines. This is typically if we pass something un-iterable, or an invalid pattern """ with pytest.raises(SQLFluffUserError): _load_specs_from_lines(lines, "") sqlfluff-3.4.2/test/core/linter/fix_test.py000066400000000000000000000157721503426445100207650ustar00rootroot00000000000000"""Test routines for fixing errors.""" import logging import pytest from sqlfluff.core.linter.fix import compute_anchor_edit_info from sqlfluff.core.linter.patch import FixPatch, generate_source_patches from sqlfluff.core.parser.markers import PositionMarker from sqlfluff.core.parser.segments import ( BaseSegment, RawSegment, TemplateSegment, ) from sqlfluff.core.parser.segments.raw import SourceFix from sqlfluff.core.rules.fix import LintFix from sqlfluff.core.templaters import RawFileSlice, TemplatedFile from sqlfluff.core.templaters.base import TemplatedFileSlice @pytest.fixture(scope="module") def raw_segments(generate_test_segments): """Construct a list of raw segments as a fixture.""" return generate_test_segments(["foobar", ".barfoo"]) def test__rules_base_segments_compute_anchor_edit_info(raw_segments): """Test BaseSegment.compute_anchor_edit_info().""" # Construct a fix buffer, intentionally with: # - one duplicate. # - two different incompatible fixes on the same segment. fixes = [ LintFix.replace(raw_segments[0], [raw_segments[0].edit(raw="a")]), LintFix.replace(raw_segments[0], [raw_segments[0].edit(raw="a")]), LintFix.replace(raw_segments[0], [raw_segments[0].edit(raw="b")]), ] anchor_info_dict = compute_anchor_edit_info(fixes) # Check the target segment is the only key we have. assert list(anchor_info_dict.keys()) == [raw_segments[0].uuid] anchor_info = anchor_info_dict[raw_segments[0].uuid] # Check that the duplicate as been deduplicated. # i.e. this isn't 3. assert anchor_info.replace == 2 # Check the fixes themselves. # NOTE: There's no duplicated first fix. assert anchor_info.fixes == [ LintFix.replace(raw_segments[0], [raw_segments[0].edit(raw="a")]), LintFix.replace(raw_segments[0], [raw_segments[0].edit(raw="b")]), ] # Check the first replace assert anchor_info._first_replace == LintFix.replace( raw_segments[0], [raw_segments[0].edit(raw="a")] ) templated_file_1 = TemplatedFile.from_string("abc") templated_file_2 = TemplatedFile( "{# blah #}{{ foo }}bc", "", "abc", [ TemplatedFileSlice("comment", slice(0, 10), slice(0, 0)), TemplatedFileSlice("templated", slice(10, 19), slice(0, 1)), TemplatedFileSlice("literal", slice(19, 21), slice(1, 3)), ], [ RawFileSlice("{# blah #}", "comment", 0), RawFileSlice("{{ foo }}", "templated", 10), RawFileSlice("bc", "literal", 19), ], ) @pytest.mark.parametrize( "tree,templated_file,expected_result", [ # Trivial example ( RawSegment( "abc", PositionMarker(slice(0, 3), slice(0, 3), templated_file_1), "code", ), templated_file_1, [], ), # Simple literal edit example ( RawSegment( "abz", PositionMarker(slice(0, 3), slice(0, 3), templated_file_1), "code", ), templated_file_1, [FixPatch(slice(0, 3), "abz", "literal", slice(0, 3), "abc", "abc")], ), # Nested literal edit example ( BaseSegment( [ RawSegment( "a", PositionMarker(slice(0, 1), slice(0, 1), templated_file_1), "code", ), RawSegment( "b", PositionMarker(slice(1, 2), slice(1, 2), templated_file_1), "code", ), RawSegment( "z", PositionMarker(slice(2, 3), slice(2, 3), templated_file_1), "code", ), ] ), templated_file_1, [FixPatch(slice(0, 3), "abz", "literal", slice(0, 3), "abc", "abc")], ), # More complicated templating example ( BaseSegment( [ TemplateSegment( PositionMarker(slice(0, 10), slice(0, 0), templated_file_2), "{# blah #}", "comment", ), RawSegment( "a", PositionMarker(slice(10, 20), slice(0, 1), templated_file_2), "code", ), RawSegment( "b", PositionMarker(slice(19, 20), slice(1, 2), templated_file_2), "code", ), RawSegment( "z", PositionMarker(slice(20, 21), slice(2, 3), templated_file_2), "code", ), ] ), templated_file_2, [FixPatch(slice(2, 3), "z", "literal", slice(20, 21), "c", "c")], ), # Templating example with fixes ( BaseSegment( [ TemplateSegment( PositionMarker(slice(0, 10), slice(0, 0), templated_file_2), "{# blah #}", "comment", source_fixes=[ SourceFix("{# fixed #}", slice(0, 10), slice(0, 0)) ], ), RawSegment( "a", PositionMarker(slice(10, 19), slice(0, 1), templated_file_2), "code", source_fixes=[ SourceFix("{{ bar }}", slice(10, 19), slice(0, 1)) ], ), RawSegment( "b", PositionMarker(slice(19, 20), slice(1, 2), templated_file_2), "code", ), RawSegment( "z", PositionMarker(slice(20, 21), slice(2, 3), templated_file_2), "code", ), ] ), templated_file_2, [ FixPatch( slice(0, 0), "{# fixed #}", "source", slice(0, 10), "", "{# blah #}" ), FixPatch( slice(0, 1), "{{ bar }}", "source", slice(10, 19), "a", "{{ foo }}" ), FixPatch(slice(2, 3), "z", "literal", slice(20, 21), "c", "c"), ], ), ], ) def test__fix__generate_source_patches(tree, templated_file, expected_result, caplog): """Test generate_source_patches. This is part of fix_string(). """ with caplog.at_level(logging.DEBUG, logger="sqlfluff.linter"): result = generate_source_patches(tree, templated_file) assert result == expected_result sqlfluff-3.4.2/test/core/linter/linted_file_test.py000066400000000000000000000221011503426445100224350ustar00rootroot00000000000000"""Tests covering the LintedFile class and it's methods.""" import logging import pytest from sqlfluff.core.linter import LintedFile from sqlfluff.core.linter.patch import FixPatch from sqlfluff.core.templaters import RawFileSlice @pytest.mark.parametrize( "source_slices,source_patches,raw_source_string,expected_result", # NOTE: For all of these examples we're not setting the patch_category # of the fix patches. They're not used at this step so irrelevant for # testing. [ # Trivial example ([slice(0, 1)], [], "a", "a"), # Simple replacement ( [slice(0, 1), slice(1, 2), slice(2, 3)], [FixPatch(slice(1, 2), "d", "", slice(1, 2), "b", "b")], "abc", "adc", ), # Simple insertion ( [slice(0, 1), slice(1, 1), slice(1, 2)], [FixPatch(slice(1, 1), "b", "", slice(1, 1), "", "")], "ac", "abc", ), # Simple deletion ( [slice(0, 1), slice(1, 2), slice(2, 3)], [FixPatch(slice(1, 2), "", "", slice(1, 2), "b", "b")], "abc", "ac", ), # Illustrative templated example (although practically at # this step, the routine shouldn't care if it's templated). ( [slice(0, 2), slice(2, 7), slice(7, 9)], [FixPatch(slice(2, 3), "{{ b }}", "", slice(2, 7), "b", "{{b}}")], "a {{b}} c", "a {{ b }} c", ), ], ) def test__linted_file__build_up_fixed_source_string( source_slices, source_patches, raw_source_string, expected_result, caplog ): """Test _build_up_fixed_source_string. This is part of fix_string(). """ with caplog.at_level(logging.DEBUG, logger="sqlfluff.linter"): result = LintedFile._build_up_fixed_source_string( source_slices, source_patches, raw_source_string ) assert result == expected_result @pytest.mark.parametrize( "source_patches,source_only_slices,raw_source_string,expected_result", # NOTE: For all of these examples we're not setting the patch_category # of the fix patches. They're not used at this step so irrelevant for # testing. [ # Trivial example. # No edits in a single character file. Slice should be one # character long. ([], [], "a", [slice(0, 1)]), # Simple replacement. # We've yielded a patch to change a single character. This means # we should get only slices for that character, and for the # unchanged file around it. ( [FixPatch(slice(1, 2), "d", "", slice(1, 2), "b", "b")], [], "abc", [slice(0, 1), slice(1, 2), slice(2, 3)], ), # Templated no fixes. # A templated file, but with no fixes, so no subdivision of the # file is required and we should just get a single slice. ( [], [], "a {{ b }} c", [slice(0, 11)], ), # Templated example with a source-only slice. # A templated file, but with no fixes, so no subdivision of the # file is required and we should just get a single slice. While # there is handling for "source only" slices like template # comments, in this case no additional slicing is required # because no edits have been made. ( [], [RawFileSlice("{# b #}", "comment", 2)], "a {# b #} c", [slice(0, 11)], ), # Templated fix example with a source-only slice. # We're making an edit adjacent to a source only slice. Edits # _before_ source only slices currently don't trigger additional # slicing. This is fine. ( [FixPatch(slice(0, 1), "a ", "", slice(0, 1), "a", "a")], [RawFileSlice("{# b #}", "comment", 1)], "a{# b #}c", [slice(0, 1), slice(1, 9)], ), # Templated fix example with a source-only slice. # We've made an edit directly _after_ a source only slice # which should trigger the logic to ensure that the source # only slice isn't included in the source mapping of the # edit. # TODO: given that the logic is based on the _type_ # of the slice (e.g. comment), would we handle a # template tag which returns an empty string correctly? ( [FixPatch(slice(1, 2), " c", "", slice(8, 9), "c", "c")], [RawFileSlice("{# b #}", "comment", 1)], "a{# b #}cc", [slice(0, 1), slice(1, 8), slice(8, 9), slice(9, 10)], ), # Templated example with a source-only slice. # Here we're making the fix to the templated slice. This # checks that we don't duplicate or fumble the slice # generation when we're explicitly trying to edit the source. # TODO: Should we be using the fix type (e.g. "source") # to somehow determine whether the fix is "safe"? ( [FixPatch(slice(2, 2), "{# fixed #}", "", slice(2, 9), "", "")], [RawFileSlice("{# b #}", "comment", 2)], "a {# b #} c", [slice(0, 2), slice(2, 9), slice(9, 11)], ), # Illustrate potential templating bug (case from JJ01). # In this case we have fixes for all our tempolated sections # and they are all close to each other and so may be either # skipped or duplicated if the logic is not precise. ( [ FixPatch( templated_slice=slice(14, 14), fixed_raw="{%+ if true -%}", patch_category="source", source_slice=slice(14, 27), templated_str="", source_str="{%+if true-%}", ), FixPatch( templated_slice=slice(14, 14), fixed_raw="{{ ref('foo') }}", patch_category="source", source_slice=slice(28, 42), templated_str="", source_str="{{ref('foo')}}", ), FixPatch( templated_slice=slice(17, 17), fixed_raw="{%- endif %}", patch_category="source", source_slice=slice(43, 53), templated_str="", source_str="{%-endif%}", ), ], [ RawFileSlice( raw="{%+if true-%}", slice_type="block_start", source_idx=14, block_idx=0, ), RawFileSlice( raw="{%-endif%}", slice_type="block_end", source_idx=43, block_idx=1, ), ], "SELECT 1 from {%+if true-%} {{ref('foo')}} {%-endif%}", [ slice(0, 14), slice(14, 27), slice(27, 28), slice(28, 42), slice(42, 43), slice(43, 53), ], ), ], ) def test__linted_file__slice_source_file_using_patches( source_patches, source_only_slices, raw_source_string, expected_result, caplog ): """Test _slice_source_file_using_patches. This is part of fix_string(). """ with caplog.at_level(logging.DEBUG, logger="sqlfluff.linter"): result = LintedFile._slice_source_file_using_patches( source_patches, source_only_slices, raw_source_string ) assert result == expected_result @pytest.mark.parametrize( "case", [ dict( name="utf8_create", fname="test.sql", encoding="utf-8", existing=None, update="def", expected="def", ), dict( name="utf8_update", fname="test.sql", encoding="utf-8", existing="abc", update="def", expected="def", ), dict( name="utf8_special_char", fname="test.sql", encoding="utf-8", existing="abc", update="→", # Special utf-8 character expected="→", ), dict( name="incorrect_encoding", fname="test.sql", encoding="Windows-1252", existing="abc", update="→", # Not valid in Windows-1252 expected="abc", # File should be unchanged ), ], ids=lambda case: case["name"], ) def test_safe_create_replace_file(case, tmp_path): """Test creating or updating .sql files, various content and encoding.""" p = tmp_path / case["fname"] if case["existing"]: p.write_text(case["existing"]) try: LintedFile._safe_create_replace_file( str(p), str(p), case["update"], case["encoding"] ) except Exception: pass actual = p.read_text(encoding=case["encoding"]) assert case["expected"] == actual sqlfluff-3.4.2/test/core/linter/linter_test.py000066400000000000000000000545661503426445100215000ustar00rootroot00000000000000"""Tests for the Linter class and LintingResult class.""" import logging import os from unittest.mock import patch import pytest from sqlfluff.cli.formatters import OutputStreamFormatter from sqlfluff.cli.outputstream import make_output_stream from sqlfluff.core import FluffConfig, Linter from sqlfluff.core.errors import ( SQLBaseError, SQLFluffSkipFile, SQLLexError, SQLLintError, SQLParseError, SQLTemplaterError, ) from sqlfluff.core.linter import runner from sqlfluff.core.linter.linting_result import combine_dicts, sum_dicts from sqlfluff.core.linter.runner import get_runner from sqlfluff.utils.testing.logging import fluff_log_catcher class DummyLintError(SQLBaseError): """Fake lint error used by tests, similar to SQLLintError.""" def __init__(self, line_no: int, code: str = "LT01"): self._code = code super().__init__(line_no=line_no) def normalise_paths(paths): """Test normalising paths. NB Paths on difference platforms might look different, so this makes them comparable. """ return {pth.replace("/", ".").replace("\\", ".") for pth in paths} @pytest.mark.parametrize("filesize,raises_skip", [(0, False), (5, True), (2000, False)]) def test__linter__skip_large_bytes(filesize, raises_skip): """Test extracting paths from a file path.""" config = FluffConfig( overrides={"large_file_skip_byte_limit": filesize, "dialect": "ansi"} ) # First check the function directly if raises_skip: with pytest.raises(SQLFluffSkipFile) as excinfo: Linter.load_raw_file_and_config( "test/fixtures/linter/indentation_errors.sql", config ) assert "Skipping" in str(excinfo.value) assert f"over the limit of {filesize}" in str(excinfo.value) # If NOT raises, then we'll catch the raise an error and the test will fail. # Then check that it either is or isn't linted appropriately via lint_paths. lntr = Linter(config) result = lntr.lint_paths( ("test/fixtures/linter/indentation_errors.sql",), ) if raises_skip: assert not result.get_violations() else: assert result.get_violations() # Same again via parse_path, which is the other entry point. result = list( lntr.parse_path( "test/fixtures/linter/indentation_errors.sql", ) ) if raises_skip: assert not result else: assert result @pytest.mark.parametrize( "path", [ "test/fixtures/linter/indentation_errors.sql", "test/fixtures/linter/whitespace_errors.sql", ], ) def test__linter__lint_string_vs_file(path): """Test the linter finds the same things on strings and files.""" with open(path) as f: sql_str = f.read() lntr = Linter(dialect="ansi") assert ( lntr.lint_string(sql_str).check_tuples() == lntr.lint_path(path).check_tuples() ) @pytest.mark.parametrize( "byte_lim, raises", [ (0, False), (None, False), (200, False), ("200", False), ("Not a Valid value", True), ("None", True), ([1], True), ], ) def test__linter__large_file_skip_byte_limit__setting(byte_lim, raises): """Test custom values for large_file_skip_byte_limit. Linter should raise an error only in cases where the value really is invalid """ config = FluffConfig( overrides={"large_file_skip_byte_limit": byte_lim, "dialect": "ansi"} ) try: Linter.load_raw_file_and_config( "test/fixtures/linter/indentation_errors.sql", config ) assert not raises except (ValueError, TypeError): assert raises @pytest.mark.parametrize( "rules,num_violations", [(None, 7), ("CP01", 2), (("LT01", "LT12"), 1)] ) def test__linter__get_violations_filter_rules(rules, num_violations): """Test filtering violations by which rules were violated.""" lntr = Linter(dialect="ansi") lint_result = lntr.lint_string("select a, b FROM tbl c order BY d") assert len(lint_result.get_violations(rules=rules)) == num_violations def test__linter__linting_result__sum_dicts(): """Test the summing of dictionaries in the linter.""" i = {} a = dict(a=3, b=123, f=876.321) b = dict(a=19, b=321.0, g=23478) r = dict(a=22, b=444.0, f=876.321, g=23478) assert sum_dicts(a, b) == r # Check the identity too assert sum_dicts(r, i) == r def test__linter__linting_result__combine_dicts(): """Test the combination of dictionaries in the linter.""" a = dict(a=3, b=123, f=876.321) b = dict(h=19, i=321.0, j=23478) r = dict(z=22) assert combine_dicts(a, b, r) == dict( a=3, b=123, f=876.321, h=19, i=321.0, j=23478, z=22 ) def test__linter__linting_result_check_tuples(): """Test that a LintingResult can partition violations by the source files.""" lntr = Linter() result = lntr.lint_paths( ( "test/fixtures/linter/comma_errors.sql", "test/fixtures/linter/whitespace_errors.sql", ) ) check_tuples = result.check_tuples() isinstance(check_tuples, list) assert check_tuples == [ ("LT09", 2, 1), ("LT04", 4, 5), ("LT02", 5, 1), ("LT04", 5, 1), ("LT02", 6, 1), ("AL02", 6, 5), ("LT01", 6, 6), ("CP01", 8, 1), ("LT09", 1, 1), ("LT01", 2, 9), ("LT01", 3, 12), ("LT02", 4, 1), ("CP01", 6, 10), ] def test__linter__linting_result_check_tuples_by_path(): """Test that a LintingResult can partition violations by the source files.""" lntr = Linter() result = lntr.lint_paths( ( "test/fixtures/linter/comma_errors.sql", "test/fixtures/linter/whitespace_errors.sql", ) ) check_tuples = result.check_tuples_by_path() isinstance(check_tuples, dict) # Normalise the paths in the keys. check_tuples = {k.replace("\\", "/"): v for k, v in check_tuples.items()} assert check_tuples == { "test/fixtures/linter/comma_errors.sql": [ ("LT09", 2, 1), ("LT04", 4, 5), ("LT02", 5, 1), ("LT04", 5, 1), ("LT02", 6, 1), ("AL02", 6, 5), ("LT01", 6, 6), ("CP01", 8, 1), ], "test/fixtures/linter/whitespace_errors.sql": [ ("LT09", 1, 1), ("LT01", 2, 9), ("LT01", 3, 12), ("LT02", 4, 1), ("CP01", 6, 10), ], } @pytest.mark.parametrize( "path,stats", [ ( "multifile_a", { "avg per file": 2.5, "clean": 0, "clean files": 0, "exit code": 111, "files": 2, "status": "FAIL", "unclean": 2, "unclean files": 2, "unclean rate": 1.0, "violations": 5, }, ), ( "multifile_b", { "avg per file": 2.0, "clean": 0, "clean files": 0, "exit code": 111, "files": 2, "status": "FAIL", "unclean": 2, "unclean files": 2, "unclean rate": 1.0, "violations": 4, }, ), ], ) def test__linter__linting_result_stats(path, stats): """Test that a LintingResult can get the right stats with multiple files. https://github.com/sqlfluff/sqlfluff/issues/5673 """ lntr = Linter() result = lntr.lint_paths((f"test/fixtures/linter/exit_codes/{path}",)) # NOTE: We're using fake return codes for testing purposes. assert result.stats(111, 222) == stats @pytest.mark.parametrize("processes", [1, 2]) def test__linter__linting_result_get_violations(processes): """Test that we can get violations from a LintingResult.""" lntr = Linter() result = lntr.lint_paths( ( "test/fixtures/linter/comma_errors.sql", "test/fixtures/linter/whitespace_errors.sql", ), processes=processes, ) all([isinstance(v, SQLLintError) for v in result.get_violations()]) @pytest.mark.parametrize("force_error", [False, True]) def test__linter__linting_parallel_thread(force_error, monkeypatch): """Run linter in parallel mode using threads. Similar to test__linter__linting_result_get_violations but uses a thread pool of 1 worker to test parallel mode without subprocesses. This lets the tests capture code coverage information for the backend parts of parallel execution without having to jump through hoops. """ if not force_error: monkeypatch.setattr(Linter, "allow_process_parallelism", False) else: def _create_pool(*args, **kwargs): class ErrorPool: def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): pass def imap_unordered(self, *args, **kwargs): yield runner.DelayedException(ValueError()) return ErrorPool() monkeypatch.setattr(runner.MultiProcessRunner, "_create_pool", _create_pool) config = FluffConfig(overrides={"dialect": "ansi"}) output_stream = make_output_stream(config, None, os.devnull) lntr = Linter( formatter=OutputStreamFormatter(output_stream, False, verbosity=0), dialect="ansi", ) result = lntr.lint_paths( # NOTE: Lint more than one file to make sure we enabled the multithreaded # code path. ( "test/fixtures/linter/comma_errors.sql", "test/fixtures/linter/whitespace_errors.sql", ), processes=2, ) all([isinstance(v, SQLLintError) for v in result.get_violations()]) @patch("sqlfluff.core.linter.Linter.lint_rendered") def test_lint_path_parallel_wrapper_exception(patched_lint): """Tests the error catching behavior of _lint_path_parallel_wrapper(). Test on MultiThread runner because otherwise we have pickling issues. """ patched_lint.side_effect = ValueError("Something unexpected happened") for result in runner.MultiThreadRunner( Linter(), FluffConfig(overrides={"dialect": "ansi"}), processes=1 ).run( ["test/fixtures/linter/passing.sql"], fix=False, ): assert isinstance(result, runner.DelayedException) with pytest.raises(ValueError): result.reraise() @pytest.mark.parametrize( "mock_cpu,in_processes,exp_processes", [ # Make the mocked cpu count a really high value which is # unlikely to collide with the real value. We can then # test all the different combos. (512, 1, 1), (512, 0, 512), (512, -12, 500), (512, 5, 5), # Check that we can't go lower than 1 in a 1 cpu case (1, -1, 1), ], ) @patch("multiprocessing.cpu_count") def test__linter__get_runner_processes( patched_cpu_count, mock_cpu, in_processes, exp_processes ): """Test that get_runner handles processes correctly.""" # Make the mocked cpu count a really high value which is # unlikely to collide with the real value. patched_cpu_count.return_value = mock_cpu _, return_processes = get_runner( linter=Linter(), config=FluffConfig(overrides={"dialect": "ansi"}), processes=in_processes, ) assert return_processes == exp_processes @patch("sqlfluff.core.linter.runner.linter_logger") @patch("sqlfluff.core.linter.Linter.lint_rendered") def test__linter__linting_unexpected_error_handled_gracefully( patched_lint, patched_logger ): """Test that an unexpected internal error returns the issue-surfacing file.""" patched_lint.side_effect = Exception("Something unexpected happened") lntr = Linter() lntr.lint_paths(("test/fixtures/linter/passing.sql",)) assert ( "Unable to lint test/fixtures/linter/passing.sql due to an internal error." # NB: Replace is to handle windows-style paths. in patched_logger.warning.call_args[0][0].replace("\\", "/") and "Exception: Something unexpected happened" in patched_logger.warning.call_args[0][0] ) def test__linter__empty_file(): """Test linter behaves nicely with an empty string. Much of this test is about making sure that ParsedString is instantiated appropriately. """ lntr = Linter(dialect="ansi") # Make sure no exceptions raised and no violations found in empty file. parsed = lntr.parse_string("") # There should still be a parsed variant assert parsed.parsed_variants assert len(parsed.parsed_variants) == 1 root_variant = parsed.parsed_variants[0] # That root variant should still have a templated file and a parsed tree # (although that parsed tree will likely just be an end of file marker). assert root_variant.templated_file assert root_variant.tree # No violations assert not parsed.violations def test__linter__parse_fail(): """Test linter behaves as expected with an unparsable string. Much of this test is about making sure that ParsedString is instantiated appropriately. """ lntr = Linter(dialect="ansi") # Try and parse something which obviously isn't SQL parsed = lntr.parse_string("THIS IS NOT SQL") # There should still be a parsed variant assert parsed.parsed_variants assert len(parsed.parsed_variants) == 1 root_variant = parsed.parsed_variants[0] # That root variant should still have a templated file and a parsed tree... assert root_variant.templated_file assert root_variant.tree # ...but that tree should contain an unparsable segment. assert "unparsable" in root_variant.tree.type_set() # There *should* be violations because there should be a parsing fail. assert parsed.violations assert any(isinstance(v, SQLParseError) for v in parsed.violations) def test__linter__templating_fail(): """Test linter behaves as expected with invalid jinja template. Much of this test is about making sure that ParsedString is instantiated appropriately. """ lntr = Linter(dialect="ansi") # Try and parse something which breaks Jinja templating. parsed = lntr.parse_string("{% if foo %}") # For a templating fail, there won't be a parsed variant. assert not parsed.parsed_variants # There *should* be violations because there should be a templating fail. assert parsed.violations assert any(isinstance(v, SQLTemplaterError) for v in parsed.violations) @pytest.mark.parametrize( "path,rules,ignore_templated_areas,check_tuples", [ ( "test/fixtures/templater/jinja_h_macros/jinja.sql", "L006", True, [("LT01", 3, 39), ("LT01", 3, 40)], ), ( "test/fixtures/templater/jinja_h_macros/jinja.sql", "L006", False, [ # there are still two of each because LT01 checks # for both *before* and *after* the operator. # The deduplication filter makes sure there aren't 4. ("LT01", 3, 16), ("LT01", 3, 16), ("LT01", 3, 39), ("LT01", 3, 40), ], ), ( "test/fixtures/linter/jinja_variants/simple_CP01.sql", "CP01", False, [ # We should get violations from both sides of the if # statement without doubling up on the one outside. ("CP01", 2, 10), ("CP01", 2, 34), ("CP01", 2, 52), ], ), ], ) def test__linter__mask_templated_violations( path, rules, ignore_templated_areas, check_tuples ): """Test linter masks files properly around templated content. NOTE: this also tests deduplication of fixes which have the same source position. i.e. `LintedFile.deduplicate_in_source_space()`. """ lntr = Linter( config=FluffConfig( overrides={ "rules": rules, "ignore_templated_areas": ignore_templated_areas, "dialect": "ansi", } ) ) linted = lntr.lint_path(path=path) assert linted.check_tuples() == check_tuples @pytest.mark.parametrize( "fname,config_encoding,lexerror", [ ( "test/fixtures/linter/encoding-utf-8.sql", "autodetect", False, ), ( "test/fixtures/linter/encoding-utf-8-sig.sql", "autodetect", False, ), ( "test/fixtures/linter/encoding-utf-8.sql", "utf-8", False, ), ( "test/fixtures/linter/encoding-utf-8-sig.sql", "utf-8", True, ), ( "test/fixtures/linter/encoding-utf-8.sql", "utf-8-sig", False, ), ( "test/fixtures/linter/encoding-utf-8-sig.sql", "utf-8-sig", False, ), ], ) def test__linter__encoding(fname, config_encoding, lexerror): """Test linter deals with files with different encoding.""" lntr = Linter( config=FluffConfig( overrides={ "rules": "LT01", "encoding": config_encoding, "dialect": "ansi", } ) ) result = lntr.lint_paths((fname,)) assert lexerror == (SQLLexError in [type(v) for v in result.get_violations()]) def test_delayed_exception(): """Test that DelayedException stores and reraises a stored exception.""" ve = ValueError() de = runner.DelayedException(ve) with pytest.raises(ValueError): de.reraise() def test__attempt_to_change_templater_warning(): """Test warning when changing templater in .sqlfluff file in subdirectory.""" initial_config = FluffConfig( configs={"core": {"templater": "jinja", "dialect": "ansi"}} ) lntr = Linter(config=initial_config) updated_config = FluffConfig( configs={"core": {"templater": "python", "dialect": "ansi"}} ) with fluff_log_catcher(logging.WARNING, "sqlfluff.linter") as caplog: lntr.render_string( in_str="select * from table", fname="test.sql", config=updated_config, encoding="utf-8", ) assert "Attempt to set templater to " in caplog.text def test_advanced_api_methods(): """Test advanced API methods on segments.""" # These aren't used by the simple API, which returns # a simple JSON representation of the parse tree, but # are available for advanced API usage and within rules. sql = """ WITH cte AS ( SELECT * FROM tab_a ) SELECT cte.col_a, tab_b.col_b FROM cte INNER JOIN tab_b; """ linter = Linter(dialect="ansi") parsed = linter.parse_string(sql) # CTEDefinitionSegment.get_identifier cte_segment = next(parsed.tree.recursive_crawl("common_table_expression")) assert cte_segment.get_identifier().raw == "cte" # BaseFileSegment.get_table_references & StatementSegment.get_table_references assert parsed.tree.get_table_references() == {"tab_a", "tab_b"} def test_normalise_newlines(): """Test normalising newlines to unix-style line endings.""" in_str = "SELECT\r\n foo\n FROM \r \n\r bar;" out_str = "SELECT\n foo\n FROM \n \n\n bar;" assert out_str == Linter._normalise_newlines(in_str) @pytest.mark.parametrize( "fix_even_unparsable", [False, True], ) def test_unparsable_fix_output(fix_even_unparsable): """Tests functionality and logging output with unparsable sections. NOTE: While we cover different paths, the result for this test is the same for both values of `fix_even_unparsable`. We probably need a better test case at some point so that we can actually see the difference. """ config = FluffConfig( overrides={"fix_even_unparsable": fix_even_unparsable, "dialect": "ansi"} ) linter = Linter(config=config) # Attempt to fix it, capturing the logging output. with fluff_log_catcher(logging.WARNING, "sqlfluff.linter") as caplog: result = linter.lint_paths( ("test/fixtures/linter/parse_error_2.sql",), fix=True, apply_fixes=True, fixed_file_suffix=f"_{fix_even_unparsable}_fix", fix_even_unparsable=fix_even_unparsable, ) # Assert that it parsed (i.e. we found a select_statement), but with an # unparsable section in there too. assert result.tree assert "select_statement" in result.tree.descendant_type_set assert "unparsable" in result.tree.descendant_type_set # We should still find linting issues too assert result.check_tuples(raise_on_non_linting_violations=False) == [ ("CP01", 2, 7), # `a as b` - capitalisation of AS ("AL03", 3, 5), # 42 is an expression without an alias # The unparsable section is (wrongly) detected as an indentation issue. ("LT02", 4, 1), ("CP01", 5, 1), # `from` is uncapitalised ] # We should make sure that the warning that asks users to report a bug is # NOT present. i.e. the warning which could happen in `lint_fix_parsed()`.` assert "Please report this as a bug" not in caplog.text # Also not the `fix not applied`. The one in `_warn_unfixable()` assert "it would re-cause the same error" not in caplog.text # In fact, there shouldn't be any warnings at all. assert not caplog.text.strip() # In both cases, the final capitalisation and the `a as b` sections should have # been fixed (because they aren't in the unparsable section). assert "from cte" not in result.tree.raw assert "FROM cte" in result.tree.raw assert "a as b" not in result.tree.raw assert "a AS b" in result.tree.raw # Check whether the file was persisted. If `fix_even_unparsable` was set, then # there should be a file, and it should have the fixes from above in it. If not # then there should be no fixed file, as the persist will have been aborted due # to the parsing issues. predicted_fix_path = ( f"test/fixtures/linter/parse_error_2_{fix_even_unparsable}_fix.sql" ) if fix_even_unparsable: with open(predicted_fix_path, "r") as f: fixed_sql = f.read() assert result.tree.raw == fixed_sql else: with pytest.raises(FileNotFoundError): open(predicted_fix_path, "r") sqlfluff-3.4.2/test/core/parser/000077500000000000000000000000001503426445100165515ustar00rootroot00000000000000sqlfluff-3.4.2/test/core/parser/__init__.py000066400000000000000000000000461503426445100206620ustar00rootroot00000000000000"""Tests for sqlfluff.core.parser.""" sqlfluff-3.4.2/test/core/parser/conftest.py000066400000000000000000000013611503426445100207510ustar00rootroot00000000000000"""Test fixtures for parser tests.""" import pytest from sqlfluff.core.dialects import dialect_selector from sqlfluff.core.parser.segments import TemplateSegment @pytest.fixture(scope="function") def fresh_ansi_dialect(): """Expand the ansi dialect for use.""" return dialect_selector("ansi") @pytest.fixture(scope="function") def test_segments(generate_test_segments): """A preset list of segments for testing. Includes a templated segment for completeness. """ main_list = generate_test_segments(["bar", " \t ", "foo", "baar", " \t "]) ts = TemplateSegment( pos_marker=main_list[-1].get_end_point_marker(), source_str="{# comment #}", block_type="comment", ) return main_list + (ts,) sqlfluff-3.4.2/test/core/parser/grammar/000077500000000000000000000000001503426445100201775ustar00rootroot00000000000000sqlfluff-3.4.2/test/core/parser/grammar/__init__.py000066400000000000000000000000711503426445100223060ustar00rootroot00000000000000"""Tests for the sqlfluff.core.parser.grammar module.""" sqlfluff-3.4.2/test/core/parser/grammar/conftest.py000066400000000000000000000041451503426445100224020ustar00rootroot00000000000000"""Common test fixtures for grammar testing.""" from typing import Any import pytest from sqlfluff.core.parser import KeywordSegment, StringParser from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.grammar.base import BaseGrammar from sqlfluff.core.parser.types import ParseMode @pytest.fixture(scope="function") def structural_parse_mode_test(generate_test_segments, fresh_ansi_dialect): """Test the structural function of a grammar in various parse modes. This helper fixture is designed to modularise grammar tests. """ def _structural_parse_mode_test( test_segment_seeds: list[str], grammar_class: type[BaseGrammar], grammar_argument_seeds: list[str], grammar_terminator_seeds: list[str], grammar_kwargs: dict[str, Any], parse_mode: ParseMode, input_slice: slice, output_tuple: tuple[Any, ...], ): segments = generate_test_segments(test_segment_seeds) # Dialect is required here only to have access to bracket segments. ctx = ParseContext(dialect=fresh_ansi_dialect) # NOTE: We pass terminators using kwargs rather than directly because some # classes don't support it (e.g. Bracketed). if grammar_terminator_seeds: grammar_kwargs["terminators"] = [ StringParser(e, KeywordSegment) for e in grammar_terminator_seeds ] _seq = grammar_class( *(StringParser(e, KeywordSegment) for e in grammar_argument_seeds), parse_mode=parse_mode, **grammar_kwargs, ) _start = input_slice.start or 0 _stop = input_slice.stop or len(segments) _match = _seq.match(segments[:_stop], _start, ctx) # If we're expecting an output tuple, assert the match is truthy. if output_tuple: assert _match _result = tuple( e.to_tuple(show_raw=True, code_only=False, include_meta=True) for e in _match.apply(segments) ) assert _result == output_tuple # Return the function return _structural_parse_mode_test sqlfluff-3.4.2/test/core/parser/grammar/grammar_anyof_test.py000066400000000000000000000201221503426445100244270ustar00rootroot00000000000000"""Tests for the OneOf, AnyOf & AnySetOf grammars. NOTE: All of these tests depend somewhat on the KeywordSegment working as planned. """ import pytest from sqlfluff.core.parser import ( KeywordSegment, ParseMode, RawSegment, RegexParser, StringParser, ) from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.grammar import OneOf, Sequence from sqlfluff.core.parser.grammar.anyof import AnyNumberOf, AnySetOf from sqlfluff.core.parser.match_result import MatchResult class Example1Segment(RawSegment): """A minimal example segment for testing.""" type = "example1" class Example2Segment(RawSegment): """Another minimal example segment for testing.""" type = "example2" def test__parser__grammar__oneof__copy(): """Test grammar copying.""" bs = StringParser("bar", KeywordSegment) fs = StringParser("foo", KeywordSegment) g1 = OneOf(fs, bs) # Check copy g2 = g1.copy() assert g1 == g2 assert g1 is not g2 # Check copy insert (start) g3 = g1.copy(insert=[bs], at=0) assert g3 == OneOf(bs, fs, bs) # Check copy insert (mid) g4 = g1.copy(insert=[bs], at=1) assert g4 == OneOf(fs, bs, bs) # Check copy insert (end) g5 = g1.copy(insert=[bs], at=-1) assert g5 == OneOf(fs, bs, bs) @pytest.mark.parametrize("allow_gaps", [True, False]) def test__parser__grammar_oneof(test_segments, allow_gaps): """Test the OneOf grammar. NOTE: Should behave the same regardless of allow_gaps. """ bs = StringParser("bar", KeywordSegment) fs = StringParser("foo", KeywordSegment) g = OneOf(fs, bs, allow_gaps=allow_gaps) ctx = ParseContext(dialect=None) # Check directly assert g.match(test_segments, 0, parse_context=ctx) == MatchResult( matched_slice=slice(0, 1), matched_class=KeywordSegment, segment_kwargs={"instance_types": ("keyword",)}, ) # Check with a bit of whitespace assert not g.match(test_segments, 1, parse_context=ctx) def test__parser__grammar_oneof_templated(test_segments): """Test the OneOf grammar. NB: Should behave the same regardless of code_only. """ bs = StringParser("bar", KeywordSegment) fs = StringParser("foo", KeywordSegment) g = OneOf(fs, bs) ctx = ParseContext(dialect=None) # This shouldn't match, but it *ALSO* shouldn't raise an exception. # https://github.com/sqlfluff/sqlfluff/issues/780 assert not g.match(test_segments, 5, parse_context=ctx) def test__parser__grammar_oneof_exclude(test_segments): """Test the OneOf grammar exclude option.""" bs = StringParser("bar", KeywordSegment) fs = StringParser("foo", KeywordSegment) g = OneOf(bs, exclude=Sequence(bs, fs)) ctx = ParseContext(dialect=None) # Just against the first alone assert g.match(test_segments[:1], 0, parse_context=ctx) # Now with the bit to exclude included assert not g.match(test_segments, 0, parse_context=ctx) def test__parser__grammar_oneof_take_longest_match(test_segments): """Test that the OneOf grammar takes the longest match.""" fooRegex = RegexParser(r"fo{2}", KeywordSegment) baar = StringParser("baar", KeywordSegment) foo = StringParser("foo", KeywordSegment) fooBaar = Sequence( foo, baar, ) ctx = ParseContext(dialect=None) assert fooRegex.match(test_segments, 2, parse_context=ctx).matched_slice == slice( 2, 3 ) # Even if fooRegex comes first, fooBaar # is a longer match and should be taken assert OneOf(fooRegex, fooBaar).match( test_segments, 2, parse_context=ctx ).matched_slice == slice(2, 4) def test__parser__grammar_oneof_take_first(test_segments): """Test that the OneOf grammar takes first match in case they are of same length.""" foo1 = StringParser("foo", Example1Segment) foo2 = StringParser("foo", Example2Segment) ctx = ParseContext(dialect=None) # Both segments would match "foo" # so we test that order matters g1 = OneOf(foo1, foo2) result1 = g1.match(test_segments, 2, ctx) # 2 is the index of "foo" # in g1, the Example1Segment is first. assert result1.matched_class is Example1Segment g2 = OneOf(foo2, foo1) result2 = g2.match(test_segments, 2, ctx) # 2 is the index of "foo" # in g2, the Example2Segment is first. assert result2.matched_class is Example2Segment @pytest.mark.parametrize( "mode,options,terminators,input_slice,kwargs,output_tuple", [ # ##### # Strict matches # ##### # 1. Match once (ParseMode.STRICT, ["a"], [], slice(None, None), {}, (("keyword", "a"),)), # 2. Match none (ParseMode.STRICT, ["b"], [], slice(None, None), {}, ()), # 3. Match twice ( ParseMode.STRICT, ["b", "a"], [], slice(None, None), {}, ( ("keyword", "a"), ("whitespace", " "), ("keyword", "b"), ), ), # 4. Limited match ( ParseMode.STRICT, ["b", "a"], [], slice(None, None), {"max_times": 1}, (("keyword", "a"),), ), # ##### # Greedy matches # ##### # 1. Terminated match ( ParseMode.GREEDY, ["b", "a"], ["b"], slice(None, None), {}, (("keyword", "a"),), ), # 2. Terminated, but not matching the first element. ( ParseMode.GREEDY, ["b"], ["b"], slice(None, None), {}, (("unparsable", (("raw", "a"),)),), ), # 3. Terminated, but only a partial match. ( ParseMode.GREEDY, ["a"], ["c"], slice(None, None), {}, ( ("keyword", "a"), ("whitespace", " "), ("unparsable", (("raw", "b"),)), ), ), # Test exhaustion before hitting min_times. # This is tricky to otherwise get coverage for because it's a # fairly unusual occurrence, but nonetheless a path in the logic # which needs coverage. It would normally only occur if a relatively # high value is set for min_times. ( ParseMode.STRICT, ["d"], [], slice(5, None), {"min_times": 3}, (), ), ], ) def test__parser__grammar_anyof_modes( mode, options, terminators, input_slice, kwargs, output_tuple, structural_parse_mode_test, ): """Test the AnyNumberOf grammar with various parse modes. In particular here we're testing the treatment of unparsable sections. """ structural_parse_mode_test( ["a", " ", "b", " ", "c", "d", " ", "d"], AnyNumberOf, options, terminators, kwargs, mode, input_slice, output_tuple, ) def test__parser__grammar_anysetof(generate_test_segments): """Test the AnySetOf grammar.""" token_list = ["bar", " \t ", "foo", " \t ", "bar"] segments = generate_test_segments(token_list) bar = StringParser("bar", KeywordSegment) foo = StringParser("foo", KeywordSegment) g = AnySetOf(foo, bar) ctx = ParseContext(dialect=None) # Check it doesn't match if the start is whitespace. assert not g.match(segments, 1, ctx) # Check structure if we start with a match. result = g.match(segments, 0, ctx) assert result == MatchResult( matched_slice=slice(0, 3), child_matches=( MatchResult( slice(0, 1), KeywordSegment, segment_kwargs={"instance_types": ("keyword",)}, ), MatchResult( slice(2, 3), KeywordSegment, segment_kwargs={"instance_types": ("keyword",)}, ), # NOTE: The second "bar" isn't included because this # is any *set* of and we've already have "bar" once. ), ) sqlfluff-3.4.2/test/core/parser/grammar/grammar_other_test.py000066400000000000000000000200771503426445100244450ustar00rootroot00000000000000"""Tests for any other grammars. NOTE: All of these tests depend somewhat on the KeywordSegment working as planned. """ import logging import pytest from sqlfluff.core.parser import KeywordSegment, StringParser, SymbolSegment from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.grammar import Anything, Delimited, Nothing from sqlfluff.core.parser.grammar.noncode import NonCodeMatcher from sqlfluff.core.parser.types import ParseMode @pytest.mark.parametrize( "token_list,min_delimiters,allow_gaps,allow_trailing,match_len", [ # Basic testing (note diff to v1, no trailing whitespace.) (["bar", " \t ", ".", " ", "bar"], 0, True, False, 5), (["bar", " \t ", ".", " ", "bar", " "], 0, True, False, 5), # Testing allow_trailing (["bar", " \t ", ".", " "], 0, True, False, 1), # NOTE: Diff to v1 (["bar", " \t ", ".", " "], 0, True, True, 3), # NOTE: Diff to v1 # Testing the implications of allow_gaps (["bar", " \t ", ".", " ", "bar"], 0, True, False, 5), (["bar", " \t ", ".", " ", "bar"], 0, False, False, 1), (["bar", " \t ", ".", " ", "bar"], 1, True, False, 5), (["bar", " \t ", ".", " ", "bar"], 1, False, False, 0), (["bar", ".", "bar"], 0, True, False, 3), (["bar", ".", "bar"], 0, False, False, 3), (["bar", ".", "bar"], 1, True, False, 3), (["bar", ".", "bar"], 1, False, False, 3), # Check we still succeed with something trailing right on the end. (["bar", ".", "bar", "foo"], 1, False, False, 3), # Check min_delimiters. There's a delimiter here, but not enough to match. (["bar", ".", "bar", "foo"], 2, True, False, 0), ], ) def test__parser__grammar_delimited( min_delimiters, allow_gaps, allow_trailing, token_list, match_len, caplog, generate_test_segments, fresh_ansi_dialect, ): """Test the Delimited grammar when not code_only.""" test_segments = generate_test_segments(token_list) g = Delimited( StringParser("bar", KeywordSegment), delimiter=StringParser(".", SymbolSegment), allow_gaps=allow_gaps, allow_trailing=allow_trailing, min_delimiters=min_delimiters, ) ctx = ParseContext(dialect=fresh_ansi_dialect) with caplog.at_level(logging.DEBUG, logger="sqlfluff.parser"): # Matching with whitespace shouldn't match if we need at least one delimiter m = g.match(test_segments, 0, ctx) assert len(m) == match_len @pytest.mark.parametrize( "input_tokens, terminators, output_tuple", [ # No terminators (or non matching terminators), full match. ( ["a", " ", "b"], [], ( ("raw", "a"), ("whitespace", " "), ("raw", "b"), ), ), ( ["a", " ", "b"], ["c"], ( ("raw", "a"), ("whitespace", " "), ("raw", "b"), ), ), # Terminate after some matched content. ( ["a", " ", "b"], ["b"], (("raw", "a"),), ), # Terminate immediately. ( ["a", " ", "b"], ["a"], (), ), # NOTE: the the "c" terminator won't match because "c" is # a keyword and therefore is required to have whitespace # before it. # See `greedy_match()` for details. ( ["a", " ", "b", "c", " ", "d"], ["c"], ( ("raw", "a"), ("whitespace", " "), ("raw", "b"), ("raw", "c"), ("whitespace", " "), ("raw", "d"), ), ), # These next two tests check the handling of brackets in the # Anything match. Unlike other greedy matches, this grammar # assumes we're not going to re-parse these brackets and so # _does_ infer their structure and creates bracketed elements # for them. ( ["(", "foo", " ", ")", " ", "foo"], ["foo"], ( ( "bracketed", ( ("start_bracket", "("), ("indent", ""), ("raw", "foo"), ("whitespace", " "), ("dedent", ""), ("end_bracket", ")"), ), ), # No trailing whitespace. ), ), ( ["(", " ", "foo", "(", "foo", ")", ")", " ", "foo"], ["foo"], ( ( "bracketed", ( ("start_bracket", "("), ("indent", ""), ("whitespace", " "), ("raw", "foo"), ( "bracketed", ( ("start_bracket", "("), ("indent", ""), ("raw", "foo"), ("dedent", ""), ("end_bracket", ")"), ), ), ("dedent", ""), ("end_bracket", ")"), ), ), ), ), ], ) def test__parser__grammar_anything_structure( input_tokens, terminators, output_tuple, structural_parse_mode_test ): """Structure tests for the Anything grammar. NOTE: For most greedy semantics we don't instantiate inner brackets, but in the Anything grammar, the assumption is that we're not coming back to these segments later so we take the time to instantiate any bracketed sections. This is to maintain some backward compatibility with previous parsing behaviour. """ structural_parse_mode_test( input_tokens, Anything, [], terminators, {}, ParseMode.STRICT, slice(None, None), output_tuple, ) @pytest.mark.parametrize( "terminators,match_length", [ # No terminators, full match. ([], 6), # If terminate with foo - match length 1. (["foo"], 1), # If terminate with foof - unterminated. Match everything (["foof"], 6), # Greedy matching until the first item should return none (["bar"], 0), # NOTE: the greedy until "baar" won't match because baar is # a keyword and therefore is required to have whitespace # before it. In the test sequence "baar" does not. # See `greedy_match()` for details. (["baar"], 6), ], ) def test__parser__grammar_anything_match( terminators, match_length, test_segments, fresh_ansi_dialect ): """Test the Anything grammar. NOTE: Anything combined with terminators implements the semantics which used to be implemented by `GreedyUntil`. """ ctx = ParseContext(dialect=fresh_ansi_dialect) terms = [StringParser(kw, KeywordSegment) for kw in terminators] result = Anything(terminators=terms).match(test_segments, 0, parse_context=ctx) assert result.matched_slice == slice(0, match_length) assert result.matched_class is None # We shouldn't have set a class def test__parser__grammar_nothing_match(test_segments, fresh_ansi_dialect): """Test the Nothing grammar.""" ctx = ParseContext(dialect=fresh_ansi_dialect) assert not Nothing().match(test_segments, 0, ctx) def test__parser__grammar_noncode_match(test_segments, fresh_ansi_dialect): """Test the NonCodeMatcher.""" ctx = ParseContext(dialect=fresh_ansi_dialect) # NonCode Matcher doesn't work with simple assert NonCodeMatcher().simple(ctx) is None # We should match one and only one segment match = NonCodeMatcher().match(test_segments, 1, parse_context=ctx) assert match assert match.matched_slice == slice(1, 2) sqlfluff-3.4.2/test/core/parser/grammar/grammar_ref_test.py000066400000000000000000000052501503426445100240740ustar00rootroot00000000000000"""Test the Ref grammar. NOTE: All of these tests depend somewhat on the KeywordSegment working as planned. """ import pytest from sqlfluff.core.dialects import Dialect from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.grammar import Ref from sqlfluff.core.parser.lexer import RegexLexer from sqlfluff.core.parser.match_result import MatchResult from sqlfluff.core.parser.parsers import StringParser from sqlfluff.core.parser.segments import CodeSegment, WhitespaceSegment @pytest.fixture(scope="function") def test_dialect(): """A stripped back test dialect for testing.""" test_dialect = Dialect("test", root_segment_name="FileSegment") test_dialect.set_lexer_matchers( [ RegexLexer("whitespace", r"[^\S\r\n]+", WhitespaceSegment), RegexLexer( "code", r"[0-9a-zA-Z_]+", CodeSegment, segment_kwargs={"type": "code"} ), ] ) test_dialect.add(FooSegment=StringParser("foo", CodeSegment, type="foo")) # Return the expanded copy. return test_dialect.expand() def test__parser__grammar__ref_eq(): """Test equality of Ref Grammars.""" r1 = Ref("foo") r2 = Ref("foo") assert r1 is not r2 assert r1 == r2 check_list = [1, 2, r2, 3] # Check we can find it in lists assert r1 in check_list # Check we can get it's position assert check_list.index(r1) == 2 # Check we can remove it from a list check_list.remove(r1) assert r1 not in check_list def test__parser__grammar__ref_repr(): """Test the __repr__ method of Ref.""" assert repr(Ref("foo")) == "" assert repr(Ref("bar", optional=True)) == "" def test__parser__grammar_ref_match(generate_test_segments, test_dialect): """Test the Ref grammar match method.""" foo_ref = Ref("FooSegment") test_segments = generate_test_segments(["bar", "foo", "bar"]) ctx = ParseContext(dialect=test_dialect) match = foo_ref.match(test_segments, 1, ctx) assert match == MatchResult( matched_slice=slice(1, 2), matched_class=CodeSegment, segment_kwargs={"instance_types": ("foo",)}, ) def test__parser__grammar_ref_exclude(generate_test_segments, fresh_ansi_dialect): """Test the Ref grammar exclude option with the match method.""" identifier = Ref("NakedIdentifierSegment", exclude=Ref.keyword("ABS")) test_segments = generate_test_segments(["ABS", "ABSOLUTE"]) ctx = ParseContext(dialect=fresh_ansi_dialect) # Assert ABS does not match, due to the exclude assert not identifier.match(test_segments, 0, ctx) # Assert ABSOLUTE does match assert identifier.match(test_segments, 1, ctx) sqlfluff-3.4.2/test/core/parser/grammar/grammar_sequence_test.py000066400000000000000000000415601503426445100251340ustar00rootroot00000000000000"""Tests for the Sequence grammar. NOTE: All of these tests depend somewhat on the KeywordSegment working as planned. """ import logging import pytest from sqlfluff.core.errors import SQLParseError from sqlfluff.core.parser import Dedent, Indent, KeywordSegment, StringParser from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.grammar import Bracketed, Conditional, Sequence from sqlfluff.core.parser.match_result import MatchResult from sqlfluff.core.parser.types import ParseMode def test__parser__grammar_sequence_repr(): """Test the Sequence grammar __repr__ method.""" bar = StringParser("bar", KeywordSegment) assert repr(bar) == "" foo = StringParser("foo", KeywordSegment) sequence = Sequence(bar, foo) assert ( repr(sequence) == ", ]>" ) def test__parser__grammar_sequence_nested_match(test_segments, caplog): """Test the Sequence grammar when nested.""" bar = StringParser("bar", KeywordSegment) foo = StringParser("foo", KeywordSegment) baar = StringParser("baar", KeywordSegment) g = Sequence(Sequence(bar, foo), baar) ctx = ParseContext(dialect=None) # Confirm the structure of the test segments: assert [s.raw for s in test_segments] == ["bar", " \t ", "foo", "baar", " \t ", ""] with caplog.at_level(logging.DEBUG, logger="sqlfluff.parser"): # Matching just the start of the list shouldn't work. result1 = g.match(test_segments[:3], 0, ctx) assert not result1 # Check it returns falsy with caplog.at_level(logging.DEBUG, logger="sqlfluff.parser"): # Matching the whole list should. result2 = g.match(test_segments, 0, ctx) assert result2 # Check it returns truthy assert result2 == MatchResult( matched_slice=slice(0, 4), # NOTE: One of these is space. child_matches=( MatchResult( matched_slice=slice(0, 1), matched_class=KeywordSegment, segment_kwargs={"instance_types": ("keyword",)}, ), MatchResult( matched_slice=slice(2, 3), matched_class=KeywordSegment, segment_kwargs={"instance_types": ("keyword",)}, ), MatchResult( matched_slice=slice(3, 4), matched_class=KeywordSegment, segment_kwargs={"instance_types": ("keyword",)}, ), ), ) @pytest.mark.parametrize( "mode,sequence,terminators,input_slice,output_tuple", [ # ##### # Test matches where we should get something, and that's # the whole sequence. # NOTE: Include a little whitespace in the slice (i.e. the first _two_ # segments) to check that it isn't included in the match. (ParseMode.STRICT, ["a"], [], slice(None, 2), (("keyword", "a"),)), (ParseMode.GREEDY, ["a"], [], slice(None, 2), (("keyword", "a"),)), (ParseMode.GREEDY_ONCE_STARTED, ["a"], [], slice(None, 2), (("keyword", "a"),)), # ##### # Test matching on sequences where we run out of segments before matching # the whole sequence. # STRICT returns no match. (ParseMode.STRICT, ["a", "b"], [], slice(None, 2), ()), # GREEDY & GREEDY_ONCE_STARTED returns the content as unparsable, and # still don't include the trailing whitespace. The return value does # however have the matched "a" as a keyword and not a raw. ( ParseMode.GREEDY, ["a", "b"], [], slice(None, 2), (("unparsable", (("keyword", "a"),)),), ), ( ParseMode.GREEDY_ONCE_STARTED, ["a", "b"], [], slice(None, 2), (("unparsable", (("keyword", "a"),)),), ), # ##### # Test matching on sequences where we fail to match the first element. # STRICT & GREEDY_ONCE_STARTED return no match. (ParseMode.STRICT, ["b"], [], slice(None, 2), ()), (ParseMode.GREEDY_ONCE_STARTED, ["b"], [], slice(None, 2), ()), # GREEDY claims the remaining elements (unmutated) as unparsable, but # does not claim any trailing whitespace. ( ParseMode.GREEDY, ["b"], [], slice(None, 2), (("unparsable", (("raw", "a"),)),), ), # ##### # Test matches where we should match the sequence fully, but there's more # to match. # First without terminators... # STRICT ignores the rest. (ParseMode.STRICT, ["a"], [], slice(None, 5), (("keyword", "a"),)), # The GREEDY modes claim the rest as unparsable. # NOTE: the whitespace in between is _not_ unparsable. ( ParseMode.GREEDY, ["a"], [], slice(None, 5), ( ("keyword", "a"), ("whitespace", " "), ("unparsable", (("raw", "b"), ("whitespace", " "), ("raw", "c"))), ), ), ( ParseMode.GREEDY_ONCE_STARTED, ["a"], [], slice(None, 5), ( ("keyword", "a"), ("whitespace", " "), ("unparsable", (("raw", "b"), ("whitespace", " "), ("raw", "c"))), ), ), # Second *with* terminators. # NOTE: The whitespace before the terminator is not included. (ParseMode.STRICT, ["a"], ["c"], slice(None, 5), (("keyword", "a"),)), ( ParseMode.GREEDY, ["a"], ["c"], slice(None, 5), (("keyword", "a"), ("whitespace", " "), ("unparsable", (("raw", "b"),))), ), ( ParseMode.GREEDY_ONCE_STARTED, ["a"], ["c"], slice(None, 5), (("keyword", "a"), ("whitespace", " "), ("unparsable", (("raw", "b"),))), ), # ##### # Test matches where we match the first element of a sequence but not the # second (with terminators) (ParseMode.STRICT, ["a", "x"], ["c"], slice(None, 5), ()), # NOTE: For GREEDY modes, the matched portion is not included as an "unparsable" # only the portion which failed to match. The terminator is not included and # the matched portion is still mutated correctly. ( ParseMode.GREEDY, ["a", "x"], ["c"], slice(None, 5), (("keyword", "a"), ("whitespace", " "), ("unparsable", (("raw", "b"),))), ), ( ParseMode.GREEDY_ONCE_STARTED, ["a", "x"], ["c"], slice(None, 5), (("keyword", "a"), ("whitespace", " "), ("unparsable", (("raw", "b"),))), ), # ##### # Test competition between sequence elements and terminators. # In GREEDY_ONCE_STARTED, the first element is matched before any terminators. ( ParseMode.GREEDY_ONCE_STARTED, ["a"], ["a"], slice(None, 2), (("keyword", "a"),), ), # In GREEDY, the terminator is matched first and so takes precedence. ( ParseMode.GREEDY, ["a"], ["a"], slice(None, 2), (), ), # NOTE: In these last two cases, the "b" isn't included because it acted as # a terminator before being considered in the sequence. ( ParseMode.GREEDY_ONCE_STARTED, ["a", "b"], ["b"], slice(None, 3), (("unparsable", (("keyword", "a"),)),), ), ( ParseMode.GREEDY, ["a", "b"], ["b"], slice(None, 3), (("unparsable", (("keyword", "a"),)),), ), ], ) def test__parser__grammar_sequence_modes( mode, sequence, terminators, input_slice, output_tuple, structural_parse_mode_test, ): """Test the Sequence grammar with various parse modes. In particular here we're testing the treatment of unparsable sections. """ structural_parse_mode_test( ["a", " ", "b", " ", "c", "d", " ", "d"], Sequence, sequence, terminators, {}, mode, input_slice, output_tuple, ) @pytest.mark.parametrize( "input_seed,mode,sequence,kwargs,output_tuple", [ # A strict asymmetric bracket shouldn't match (["(", "a"], ParseMode.STRICT, ["a"], {}, ()), # A sequence that isn't bracketed shouldn't match. # Regardless of mode. (["a"], ParseMode.STRICT, ["a"], {}, ()), (["a"], ParseMode.GREEDY, ["a"], {}, ()), # Test potential empty brackets (no whitespace) ( ["(", ")"], ParseMode.STRICT, [], {}, ( ( "bracketed", ( ("start_bracket", "("), ("indent", ""), ("dedent", ""), ("end_bracket", ")"), ), ), ), ), ( ["(", ")"], ParseMode.GREEDY, [], {}, ( ( "bracketed", ( ("start_bracket", "("), ("indent", ""), ("dedent", ""), ("end_bracket", ")"), ), ), ), ), # Test potential empty brackets (with whitespace) ( ["(", " ", ")"], ParseMode.STRICT, [], {}, ( ( "bracketed", ( ("start_bracket", "("), ("indent", ""), ("whitespace", " "), ("dedent", ""), ("end_bracket", ")"), ), ), ), ), ( ["(", " ", ")"], ParseMode.GREEDY, [], {}, ( ( "bracketed", ( ("start_bracket", "("), ("indent", ""), ("whitespace", " "), ("dedent", ""), ("end_bracket", ")"), ), ), ), ), ( ["(", " ", ")"], ParseMode.STRICT, [], # Strict matching, without allowing gaps, shouldn't match. {"allow_gaps": False}, (), ), # Happy path content match. ( ["(", "a", ")"], ParseMode.STRICT, ["a"], {}, ( ( "bracketed", ( ("start_bracket", "("), ("indent", ""), ("keyword", "a"), ("dedent", ""), ("end_bracket", ")"), ), ), ), ), # Content match fails ( ["(", "a", ")"], ParseMode.STRICT, ["b"], {}, (), ), ( ["(", "a", ")"], ParseMode.GREEDY, ["b"], {}, ( ( "bracketed", ( ("start_bracket", "("), ("indent", ""), ("unparsable", (("raw", "a"),)), ("dedent", ""), ("end_bracket", ")"), ), ), ), ), # Partial matches (not whole grammar matched) ( ["(", "a", ")"], ParseMode.STRICT, ["a", "b"], {}, (), ), ( ["(", "a", ")"], ParseMode.GREEDY, ["a", "b"], {}, ( ( "bracketed", ( ("start_bracket", "("), ("indent", ""), ("unparsable", (("keyword", "a"),)), ("dedent", ""), ("end_bracket", ")"), ), ), ), ), # Partial matches (not whole sequence matched) ( ["(", "a", " ", "b", ")"], ParseMode.STRICT, ["a"], {}, (), ), ( ["(", "a", " ", "b", ")"], ParseMode.GREEDY, ["a"], {}, ( ( "bracketed", ( ("start_bracket", "("), ("indent", ""), ("keyword", "a"), ("whitespace", " "), ("unparsable", (("raw", "b"),)), ("dedent", ""), ("end_bracket", ")"), ), ), ), ), # Test an unwrapped path (with square brackets) ( ["[", "a", " ", "b", "]"], ParseMode.GREEDY, ["a"], {"bracket_type": "square"}, ( ("start_square_bracket", "["), ("indent", ""), ("keyword", "a"), ("whitespace", " "), ("unparsable", (("raw", "b"),)), ("dedent", ""), ("end_square_bracket", "]"), ), ), ], ) def test__parser__grammar_bracketed_modes( input_seed, mode, sequence, kwargs, output_tuple, structural_parse_mode_test, ): """Test the Bracketed grammar with various parse modes.""" structural_parse_mode_test( input_seed, Bracketed, sequence, [], kwargs, mode, slice(None, None), output_tuple, ) @pytest.mark.parametrize( "input_seed,mode,sequence,kwargs", [ # Unclosed greedy brackets always raise errors. (["(", "a"], ParseMode.GREEDY, ["a"], {}), ( ["(", " ", ")"], ParseMode.GREEDY, [], # Greedy matching, without allowing gaps, should raise a parsing error. # NOTE: This functionality doesn't get used much. {"allow_gaps": False}, ), ], ) def test__parser__grammar_bracketed_error_modes( input_seed, mode, sequence, kwargs, structural_parse_mode_test, ): """Test the Bracketed grammar with various parse modes.""" with pytest.raises(SQLParseError): structural_parse_mode_test( input_seed, Bracketed, sequence, [], kwargs, mode, slice(None, None), (), ) def test__parser__grammar_sequence_indent_conditional_match(test_segments, caplog): """Test the Sequence grammar with indents.""" bar = StringParser("bar", KeywordSegment) foo = StringParser("foo", KeywordSegment) # We will assume the default config has indented_joins = False. # We're testing without explicitly setting the `config_type` because # that's the assumed way of using the grammar in practice. g = Sequence( Dedent, Conditional(Indent, indented_joins=False), bar, Conditional(Indent, indented_joins=True), foo, Dedent, ) ctx = ParseContext(dialect=None) with caplog.at_level(logging.DEBUG, logger="sqlfluff.parser"): m = g.match(test_segments, 0, parse_context=ctx) assert m == MatchResult( matched_slice=slice(0, 3), # NOTE: One of these is space. child_matches=( # The two child keywords MatchResult( matched_slice=slice(0, 1), matched_class=KeywordSegment, segment_kwargs={"instance_types": ("keyword",)}, ), MatchResult( matched_slice=slice(2, 3), matched_class=KeywordSegment, segment_kwargs={"instance_types": ("keyword",)}, ), ), insert_segments=( (0, Dedent), # The starting, unconditional dedent. (0, Indent), # The conditional (activated) Indent. # NOTE: There *isn't* the other Indent. (3, Dedent), # The closing unconditional dedent. # NOTE: This last one is still included even though it's # after the last matched segment. ), ) sqlfluff-3.4.2/test/core/parser/helpers_test.py000066400000000000000000000017571503426445100216360ustar00rootroot00000000000000"""Test the helpers.""" import pytest from sqlfluff.core.parser.helpers import trim_non_code_segments @pytest.mark.parametrize( "token_list,pre_len,mid_len,post_len", [ (["bar", ".", "bar"], 0, 3, 0), (("bar", ".", "bar"), 0, 3, 0), ([], 0, 0, 0), ([" ", "\n", "\t", "bar", ".", "bar", " ", "\n", "\t"], 3, 3, 3), ], ) def test__parser__helper_trim_non_code_segments( token_list, pre_len, mid_len, post_len, generate_test_segments, ): """Test trim_non_code_segments.""" segments = generate_test_segments(token_list) pre, mid, post = trim_non_code_segments(segments) # Assert lengths assert (len(pre), len(mid), len(post)) == (pre_len, mid_len, post_len) # Assert content assert [elem.raw for elem in pre] == list(token_list[:pre_len]) assert [elem.raw for elem in mid] == list(token_list[pre_len : pre_len + mid_len]) assert [elem.raw for elem in post] == list(token_list[len(segments) - post_len :]) sqlfluff-3.4.2/test/core/parser/lexer_test.py000066400000000000000000000402201503426445100212770ustar00rootroot00000000000000"""The Test file for The New Parser (Lexing steps).""" import logging from typing import Any, NamedTuple, Union import pytest from sqlfluff.core import FluffConfig, SQLLexError from sqlfluff.core.parser import CodeSegment, Lexer, NewlineSegment from sqlfluff.core.parser.lexer import LexMatch, RegexLexer, StringLexer from sqlfluff.core.parser.segments.meta import TemplateSegment from sqlfluff.core.templaters import JinjaTemplater, RawFileSlice, TemplatedFile from sqlfluff.core.templaters.base import TemplatedFileSlice def assert_matches(instring, matcher, matchstring): """Assert that a matcher does or doesn't work on a string. The optional `matchstring` argument, which can optionally be None, allows to either test positive matching of a particular string or negative matching (that it explicitly) doesn't match. """ res = matcher.match(instring) # Check we've got the right type assert isinstance(res, LexMatch) if matchstring is None: assert res.forward_string == instring assert res.elements == [] else: assert res.forward_string == instring[len(matchstring) :] assert len(res.elements) == 1 assert res.elements[0].raw == matchstring @pytest.mark.parametrize( "raw,res", [ # NOTE: The final empty string is the end of file marker ("a b", ["a", " ", "b", ""]), ("b.c", ["b", ".", "c", ""]), ( "abc \n \t def ;blah", ["abc", " ", "\n", " \t ", "def", " ", ";", "blah", ""], ), # Test Quotes ('abc\'\n "\t\' "de`f"', ["abc", "'\n \"\t'", " ", '"de`f"', ""]), # Test Comments ("abc -- comment \nblah", ["abc", " ", "-- comment ", "\n", "blah", ""]), ("abc # comment \nblah", ["abc", " ", "# comment ", "\n", "blah", ""]), # Note the more complicated parsing of block comments. # This tests subdivision and trimming (incl the empty case) ( "abc /* comment \nblah*/", ["abc", " ", "/* comment", " ", "\n", "blah*/", ""], ), ("abc /*\n\t\n*/", ["abc", " ", "/*", "\n", "\t", "\n", "*/", ""]), # Test strings ("*-+bd/", ["*", "-", "+", "bd", "/", ""]), # Test Negatives and Minus ("2+4 -5", ["2", "+", "4", " ", "-", "5", ""]), ("when 'Spec\\'s 23' like", ["when", " ", "'Spec\\'s 23'", " ", "like", ""]), ('when "Spec\\"s 23" like', ["when", " ", '"Spec\\"s 23"', " ", "like", ""]), ], ) def test__parser__lexer_obj(raw, res, caplog): """Test the lexer splits as expected in a selection of cases.""" lex = Lexer(config=FluffConfig(overrides={"dialect": "ansi"})) with caplog.at_level(logging.DEBUG): lexing_segments, _ = lex.lex(raw) assert [seg.raw for seg in lexing_segments] == res @pytest.mark.parametrize( "raw,res", [ (".fsaljk", "."), ("fsaljk", None), ], ) def test__parser__lexer_string(raw, res): """Test the StringLexer.""" matcher = StringLexer("dot", ".", CodeSegment) assert_matches(raw, matcher, res) @pytest.mark.parametrize( "raw,reg,res", [ ("fsaljk", "f", "f"), ("fsaljk", r"f", "f"), ("fsaljk", r"[fas]*", "fsa"), # Matching whitespace segments (" \t fsaljk", r"[^\S\r\n]*", " \t "), # Matching whitespace segments (with a newline) (" \t \n fsaljk", r"[^\S\r\n]*", " \t "), # Matching quotes containing stuff ("'something boring' \t \n fsaljk", r"'[^']*'", "'something boring'"), ( "' something exciting \t\n ' \t \n fsaljk", r"'[^']*'", "' something exciting \t\n '", ), ], ) def test__parser__lexer_regex(raw, reg, res, caplog): """Test the RegexLexer.""" matcher = RegexLexer("test", reg, CodeSegment) with caplog.at_level(logging.DEBUG): assert_matches(raw, matcher, res) def test__parser__lexer_lex_match(caplog): """Test the RepeatedMultiMatcher.""" matchers = [ StringLexer("dot", ".", CodeSegment), RegexLexer("test", r"#[^#]*#", CodeSegment), ] with caplog.at_level(logging.DEBUG): res = Lexer.lex_match("..#..#..#", matchers) assert res.forward_string == "#" # Should match right up to the final element assert len(res.elements) == 5 assert res.elements[2].raw == "#..#" def test__parser__lexer_fail(): """Test the how the lexer fails and reports errors.""" lex = Lexer(config=FluffConfig(overrides={"dialect": "ansi"})) _, vs = lex.lex("Select \u0394") assert len(vs) == 1 err = vs[0] assert isinstance(err, SQLLexError) assert err.line_pos == 8 def test__parser__lexer_fail_via_parse(): """Test the how the parser fails and reports errors while lexing.""" lexer = Lexer(config=FluffConfig(overrides={"dialect": "ansi"})) _, vs = lexer.lex("Select \u0394") assert vs assert len(vs) == 1 err = vs[0] assert isinstance(err, SQLLexError) assert err.line_pos == 8 def test__parser__lexer_trim_post_subdivide(caplog): """Test a RegexLexer with a trim_post_subdivide function.""" matcher = [ RegexLexer( "function_script_terminator", r";\s+(?!\*)\/(?!\*)|\s+(?!\*)\/(?!\*)", CodeSegment, segment_kwargs={"type": "function_script_terminator"}, subdivider=StringLexer( "semicolon", ";", CodeSegment, segment_kwargs={"type": "semicolon"} ), trim_post_subdivide=RegexLexer( "newline", r"(\n|\r\n)+", NewlineSegment, ), ) ] with caplog.at_level(logging.DEBUG): res = Lexer.lex_match(";\n/\n", matcher) assert res.elements[0].raw == ";" assert res.elements[1].raw == "\n" assert res.elements[2].raw == "/" assert len(res.elements) == 3 class _LexerSlicingCase(NamedTuple): name: str in_str: str context: dict[str, Any] # ( # raw, # source_str (if TemplateSegment), # block_type (if TemplateSegment), # segment_type # ) expected_segments: list[tuple[str, Union[str, None], Union[str, None], str]] def _statement(*args, **kwargs): return "" def _load_result(*args, **kwargs): return ["foo", "bar"] @pytest.mark.parametrize( "case", [ _LexerSlicingCase( name="call macro and function overrides", in_str="{% call statement('unique_keys', fetch_result=true) %}\n" " select 1 as test\n" "{% endcall %}\n" "{% set unique_keys = load_result('unique_keys') %}\n" "select 2\n", context={"statement": _statement, "load_result": _load_result}, expected_segments=[ ( "", "{% call statement('unique_keys', fetch_result=true) %}", "block_start", "placeholder", ), ("", None, None, "indent"), ("", "\n select 1 as test\n", "literal", "placeholder"), ("", None, None, "dedent"), ("", "{% endcall %}", "block_end", "placeholder"), ("\n", None, None, "newline"), ( "", "{% set unique_keys = load_result('unique_keys') %}", "templated", "placeholder", ), ("\n", None, None, "newline"), ("select", None, None, "word"), (" ", None, None, "whitespace"), ("2", None, None, "literal"), ("\n", None, None, "newline"), ("", None, None, "end_of_file"), ], ), _LexerSlicingCase( name="call an existing macro", in_str="{% macro render_name(title) %}\n" " '{{ title }}. foo' as {{ caller() }}\n" "{% endmacro %}\n" "SELECT\n" " {% call render_name('Sir') %}\n" " bar\n" " {% endcall %}\n" "FROM baz\n", context={}, expected_segments=[ ("", "{% macro render_name(title) %}", "block_start", "placeholder"), ("", None, None, "indent"), ("", "\n '", "literal", "placeholder"), ("", "{{ title }}", "templated", "placeholder"), ("", ". foo' as ", "literal", "placeholder"), ("", "{{ caller() }}", "templated", "placeholder"), ("", "\n", "literal", "placeholder"), ("", None, None, "dedent"), ("", "{% endmacro %}", "block_end", "placeholder"), ("\n", None, None, "newline"), ("SELECT", None, None, "word"), ("\n", None, None, "newline"), (" ", None, None, "whitespace"), ("\n", None, None, "newline"), (" ", None, None, "whitespace"), ("'Sir. foo'", None, None, "raw"), (" ", None, None, "whitespace"), ("as", None, None, "word"), (" ", None, None, "whitespace"), ("\n", None, None, "newline"), (" ", None, None, "whitespace"), ("bar", None, None, "word"), ("\n", None, None, "newline"), (" ", None, None, "whitespace"), ("\n", None, None, "newline"), ("", "\n bar\n ", "literal", "placeholder"), ("", None, None, "dedent"), ("", "{% endcall %}", "block_end", "placeholder"), ("\n", None, None, "newline"), ("FROM", None, None, "word"), (" ", None, None, "whitespace"), ("baz", None, None, "word"), ("\n", None, None, "newline"), ("", None, None, "end_of_file"), ], ), ], ids=lambda case: case.name, ) def test__parser__lexer_slicing_calls(case: _LexerSlicingCase): """Test slicing of call blocks. https://github.com/sqlfluff/sqlfluff/issues/4013 """ config = FluffConfig(overrides={"dialect": "ansi"}) templater = JinjaTemplater(override_context=case.context) templated_file, templater_violations = templater.process( in_str=case.in_str, fname="test.sql", config=config, formatter=None ) assert ( not templater_violations ), f"Found templater violations: {templater_violations}" lexer = Lexer(config=config) lexing_segments, lexing_violations = lexer.lex(templated_file) assert not lexing_violations, f"Found templater violations: {lexing_violations}" assert case.expected_segments == [ ( seg.raw, seg.source_str if isinstance(seg, TemplateSegment) else None, seg.block_type if isinstance(seg, TemplateSegment) else None, seg.type, ) for seg in lexing_segments ] class _LexerSlicingTemplateFileCase(NamedTuple): name: str # easy way to build inputs here is to call templater.process in # test__parser__lexer_slicing_calls and adjust the output how you like: file: TemplatedFile # ( # raw, # source_str (if TemplateSegment), # block_type (if TemplateSegment), # segment_type # ) expected_segments: list[tuple[str, Union[str, None], Union[str, None], str]] @pytest.mark.parametrize( "case", [ _LexerSlicingTemplateFileCase( name="very simple test case", file=TemplatedFile( source_str="SELECT {# comment #}1;", templated_str="SELECT 1;", fname="test.sql", sliced_file=[ TemplatedFileSlice("literal", slice(0, 7, None), slice(0, 7, None)), TemplatedFileSlice( "comment", slice(7, 20, None), slice(7, 7, None) ), TemplatedFileSlice( "literal", slice(20, 22, None), slice(7, 9, None) ), ], raw_sliced=[ RawFileSlice("SELECT ", "literal", 0, 0, None), RawFileSlice("{# comment #}", "comment", 7, 0, None), RawFileSlice("1;", "literal", 20, 0, None), ], ), expected_segments=[ ("SELECT", None, None, "word"), (" ", None, None, "whitespace"), ("", "{# comment #}", "comment", "placeholder"), ("1", None, None, "literal"), (";", None, None, "raw"), ("", None, None, "end_of_file"), ], ), _LexerSlicingTemplateFileCase( name="special zero length slice type is kept", file=TemplatedFile( source_str="SELECT 1;", templated_str="SELECT 1;", fname="test.sql", sliced_file=[ TemplatedFileSlice("literal", slice(0, 7, None), slice(0, 7, None)), # this is a special marker that the templater wants to show up # as a meta segment: TemplatedFileSlice( "special_type", slice(7, 7, None), slice(7, 7, None) ), TemplatedFileSlice("literal", slice(7, 9, None), slice(7, 9, None)), ], raw_sliced=[ RawFileSlice("SELECT 1;", "literal", 0, 0, None), ], ), expected_segments=[ ("SELECT", None, None, "word"), (" ", None, None, "whitespace"), ("", "", "special_type", "placeholder"), ("1", None, None, "literal"), (";", None, None, "raw"), ("", None, None, "end_of_file"), ], ), _LexerSlicingTemplateFileCase( name="template with escaped slice", file=TemplatedFile( source_str="SELECT '{{}}' FROM TAB;", templated_str="SELECT '{}' FROM TAB;", fname="test.sql", sliced_file=[ TemplatedFileSlice("literal", slice(0, 8, None), slice(0, 8, None)), TemplatedFileSlice( "escaped", slice(8, 12, None), slice(8, 10, None) ), TemplatedFileSlice( "literal", slice(12, 23, None), slice(10, 21, None) ), ], raw_sliced=[ RawFileSlice("SELECT '", "literal", 0, 0, None), RawFileSlice("{{", "escaped", 8, 0, None), RawFileSlice("}}", "escaped", 10, 0, None), RawFileSlice("' FROM TAB;", "literal", 12, 0, None), ], ), expected_segments=[ ("SELECT", None, None, "word"), (" ", None, None, "whitespace"), ("'{}'", None, None, "raw"), (" ", None, None, "whitespace"), ("FROM", None, None, "word"), (" ", None, None, "whitespace"), ("TAB", None, None, "word"), (";", None, None, "raw"), ("", None, None, "end_of_file"), ], ), ], ids=lambda case: case.name, ) def test__parser__lexer_slicing_from_template_file(case: _LexerSlicingTemplateFileCase): """Test slicing using a provided TemplateFile. Useful for testing special inputs without having to find a templater to trick and yield the input you want to test. """ config = FluffConfig(overrides={"dialect": "ansi"}) lexer = Lexer(config=config) lexing_segments, lexing_violations = lexer.lex(case.file) assert not lexing_violations, f"Found templater violations: {lexing_violations}" assert case.expected_segments == [ ( seg.raw, seg.source_str if isinstance(seg, TemplateSegment) else None, seg.block_type if isinstance(seg, TemplateSegment) else None, seg.type, ) for seg in lexing_segments ] sqlfluff-3.4.2/test/core/parser/markers_test.py000066400000000000000000000051701503426445100216310ustar00rootroot00000000000000"""Tests for PositionMarker.""" import pytest from sqlfluff.core.parser.markers import PositionMarker from sqlfluff.core.templaters import TemplatedFile @pytest.mark.parametrize( "raw,start_pos,end_pos", [ ("fsaljk", (0, 0), (0, 6)), ("", (2, 2), (2, 2)), # NB: 1 indexed, not 0 indexed. ("\n", (2, 2), (3, 1)), ("boo\n", (2, 2), (3, 1)), ("boo\nfoo", (2, 2), (3, 4)), ("\nfoo", (2, 2), (3, 4)), ], ) def test_markers__infer_next_position(raw, start_pos, end_pos): """Test that we can correctly infer positions from strings.""" assert end_pos == PositionMarker.infer_next_position(raw, *start_pos) def test_markers__setting_position_raw(): """Test that we can correctly infer positions from strings & locations.""" templ = TemplatedFile.from_string("foobar") # Check inference in the template assert templ.get_line_pos_of_char_pos(2, source=True) == (1, 3) assert templ.get_line_pos_of_char_pos(2, source=False) == (1, 3) # Now check it passes through pos = PositionMarker(slice(2, 5), slice(2, 5), templ) # Can we infer positions correctly? assert pos.working_loc == (1, 3) # Check other marker properties work too (i.e. source properties) assert pos.line_no == 1 assert pos.line_pos == 3 # i.e. 2 + 1 (for 1-indexed) def test_markers__setting_position_working(): """Test that we can correctly set positions manually.""" templ = TemplatedFile.from_string("foobar") pos = PositionMarker(slice(2, 5), slice(2, 5), templ, 4, 4) # Can we don't infer when we're explicitly told. assert pos.working_loc == (4, 4) def test_markers__comparison(): """Test that we can correctly compare markers.""" templ = TemplatedFile.from_string("abc") # Make position markers for each of a, b & c # NOTE: We're not explicitly setting the working location, we # rely here on the marker inferring that correctly itself. a_pos = PositionMarker(slice(0, 1), slice(0, 1), templ) b_pos = PositionMarker(slice(1, 2), slice(1, 2), templ) c_pos = PositionMarker(slice(2, 3), slice(2, 3), templ) all_pos = (a_pos, b_pos, c_pos) # Check equality assert all(p == p for p in all_pos) # Check inequality assert a_pos != b_pos and a_pos != c_pos and b_pos != c_pos # Check less than assert a_pos < b_pos and b_pos < c_pos assert not c_pos < a_pos # Check greater than assert c_pos > a_pos and c_pos > b_pos assert not a_pos > c_pos # Check less than or equal assert all(a_pos <= p for p in all_pos) # Check greater than or equal assert all(c_pos >= p for p in all_pos) sqlfluff-3.4.2/test/core/parser/match_algorithms_test.py000066400000000000000000000202641503426445100235130ustar00rootroot00000000000000"""Tests for the BaseGrammar and it's methods. NOTE: All of these tests depend somewhat on the KeywordSegment working as planned. """ import pytest from sqlfluff.core.dialects.base import Dialect from sqlfluff.core.errors import SQLParseError from sqlfluff.core.parser import ( CodeSegment, KeywordSegment, StringParser, SymbolSegment, WhitespaceSegment, ) from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.lexer import RegexLexer from sqlfluff.core.parser.match_algorithms import ( greedy_match, next_ex_bracket_match, next_match, resolve_bracket, trim_to_terminator, ) # NB: All of these tests depend somewhat on the KeywordSegment working as planned @pytest.fixture(scope="function") def test_dialect(): """A stripped back test dialect for testing brackets.""" test_dialect = Dialect("test", root_segment_name="FileSegment") test_dialect.bracket_sets("bracket_pairs").update( [("round", "StartBracketSegment", "EndBracketSegment", True)] ) test_dialect.set_lexer_matchers( [ RegexLexer("whitespace", r"[^\S\r\n]+", WhitespaceSegment), RegexLexer( "code", r"[0-9a-zA-Z_]+", CodeSegment, segment_kwargs={"type": "code"} ), ] ) test_dialect.add( StartBracketSegment=StringParser("(", SymbolSegment, type="start_bracket"), EndBracketSegment=StringParser(")", SymbolSegment, type="end_bracket"), ) # Return the expanded copy. return test_dialect.expand() def make_result_tuple(result_slice, matcher_keywords, test_segments): """Make a comparison tuple for test matching.""" # No result slice means no match. if not result_slice: return () return tuple( ( KeywordSegment(elem.raw, pos_marker=elem.pos_marker) if elem.raw in matcher_keywords else elem ) for elem in test_segments[result_slice] ) @pytest.mark.parametrize( "matcher_keywords,result_slice,winning_matcher", [ # Basic version, we should find bar first (["bar", "foo"], slice(0, 1), "bar"), # Look ahead for foo (["foo"], slice(2, 3), "foo"), # Duplicate matchers (["foo", "foo"], slice(2, 3), "foo"), (["sadkjfhas", "asefaslf"], slice(0, 0), None), ], ) def test__parser__algorithms__next_match( matcher_keywords, result_slice, winning_matcher, test_segments, ): """Test the `next_match()` method.""" # Make the string parsers for testing. matchers = [StringParser(keyword, KeywordSegment) for keyword in matcher_keywords] # Fetch the matching keyword from above (because it will have the same position) if winning_matcher: winning_matcher = matchers[matcher_keywords.index(winning_matcher)] ctx = ParseContext(dialect=None) match, matcher = next_match( test_segments, 0, matchers, ctx, ) # Check the right matcher was successful. if winning_matcher: assert matcher is winning_matcher else: # If no designated winning matcher, assert that it wasn't successful. assert matcher is None assert not match assert match.matched_slice == result_slice @pytest.mark.parametrize( "raw_segments,result_slice,error", [ (["(", "a", ")", " ", "foo"], slice(0, 3), None), (["(", "a", "(", "b", ")", "(", "c", ")", "d", ")", "e"], slice(0, 10), None), # This should error because we try to close a square bracket # inside a round one. (["(", "a", "]", "b", ")", "e"], None, SQLParseError), # This should error because we never find the end. (["(", "a", " ", "b", " ", "e"], None, SQLParseError), ], ) def test__parser__algorithms__resolve_bracket( raw_segments, result_slice, error, generate_test_segments ): """Test the `resolve_bracket()` method.""" test_segments = generate_test_segments(raw_segments) start_bracket = StringParser("(", SymbolSegment, type="start_bracket") end_bracket = StringParser(")", SymbolSegment, type="end_bracket") start_sq_bracket = StringParser("[", SymbolSegment, type="start_square_bracket") end_sq_bracket = StringParser("]", SymbolSegment, type="end_square_bracket") ctx = ParseContext(dialect=None) # For this test case we assert that the first segment is the initial match. first_match = start_bracket.match(test_segments, 0, ctx) assert first_match args = (test_segments,) kwargs = dict( opening_match=first_match, opening_matcher=start_bracket, start_brackets=[start_bracket, start_sq_bracket], end_brackets=[end_bracket, end_sq_bracket], bracket_persists=[True, False], parse_context=ctx, ) # If an error is defined, check that it is raised. if error: with pytest.raises(error): resolve_bracket(*args, **kwargs) else: result = resolve_bracket(*args, **kwargs) assert result assert result.matched_slice == result_slice @pytest.mark.parametrize( "raw_segments,target_word,result_slice", [ ([], "foo", slice(0, 0)), (["(", "foo", ")", " ", "foo"], "foo", slice(4, 5)), (["a", " ", "foo", " ", "foo"], "foo", slice(2, 3)), (["foo", " ", "foo", " ", "foo"], "foo", slice(0, 1)), # Error case, unexpected closing bracket. # NOTE: This should never normally happen, but we should # be prepared in case it does so that we return appropriately. (["a", " ", ")", " ", "foo"], "foo", slice(0, 0)), ], ) def test__parser__algorithms__next_ex_bracket_match( raw_segments, target_word, result_slice, generate_test_segments, test_dialect ): """Test the `next_ex_bracket_match()` method.""" test_segments = generate_test_segments(raw_segments) target = StringParser(target_word, KeywordSegment) ctx = ParseContext(dialect=test_dialect) result, _, _ = next_ex_bracket_match( test_segments, 0, matchers=[target], parse_context=ctx, ) assert result.matched_slice == result_slice @pytest.mark.parametrize( "raw_segments,target_words,inc_term,result_slice", [ (["a", "b", " ", "c", "d", " ", "e"], ["e", "c"], False, slice(0, 2)), (["a", "b", " ", "c", "d", " ", "e"], ["e", "c"], True, slice(0, 4)), # NOTE: Because "b" is_alpha, it needs whitespace before it to match. (["a", "b", " ", "b"], ["b"], True, slice(0, 4)), (["a", "b", " ", "b"], ["b"], False, slice(0, 2)), (["a", "b", "c", " ", "b"], ["b"], False, slice(0, 3)), ], ) def test__parser__algorithms__greedy_match( raw_segments, target_words, inc_term, result_slice, generate_test_segments, test_dialect, ): """Test the `greedy_match()` method.""" test_segments = generate_test_segments(raw_segments) matchers = [StringParser(word, KeywordSegment) for word in target_words] ctx = ParseContext(dialect=test_dialect) match = greedy_match( segments=test_segments, idx=0, parse_context=ctx, matchers=matchers, include_terminator=inc_term, ) assert match assert match.matched_slice == result_slice @pytest.mark.parametrize( "raw_segments,target_words,expected_result", [ # Terminators mid sequence. (["a", "b", " ", "c", "d", " ", "e"], ["e", "c"], 2), # Initial terminators. (["a", "b", " ", "c", "d", " ", "e"], ["a", "e"], 0), # No terminators. (["a", "b", " ", "c", "d", " ", "e"], ["x", "y"], 7), # No sequence. ([], ["x", "y"], 0), ], ) def test__parser__algorithms__trim_to_terminator( raw_segments, target_words, expected_result, generate_test_segments, test_dialect, ): """Test the `trim_to_terminator()` method.""" test_segments = generate_test_segments(raw_segments) matchers = [StringParser(word, KeywordSegment) for word in target_words] ctx = ParseContext(dialect=test_dialect) assert ( trim_to_terminator( segments=test_segments, idx=0, parse_context=ctx, terminators=matchers, ) == expected_result ) sqlfluff-3.4.2/test/core/parser/match_result_test.py000066400000000000000000000053041503426445100226560ustar00rootroot00000000000000"""Tests for the MatchResult class. NOTE: This is all experimental for now. """ import pytest from sqlfluff.core.parser.match_result import MatchResult from sqlfluff.core.parser.segments import BaseSegment, Dedent, Indent class ExampleSegment(BaseSegment): """A minimal example segment for testing.""" type = "example" def _recursive_assert_pos(segment): assert segment.pos_marker for seg in segment.segments: _recursive_assert_pos(seg) @pytest.mark.parametrize( "segment_seed,match_result,match_len,serialised_result", [ ( ["a", "b", "c", "d", "e"], MatchResult( matched_slice=slice(1, 4), insert_segments=((3, Indent),), child_matches=( MatchResult( matched_slice=slice(2, 3), matched_class=ExampleSegment, insert_segments=((2, Indent),), ), ), ), 3, ( ("raw", "b"), ("example", (("indent", ""), ("raw", "c"))), ("indent", ""), ("raw", "d"), ), ), ( ["a", "b", "c", "d", "e"], MatchResult( matched_slice=slice(1, 4), insert_segments=((2, Dedent),), ), 3, ( ("raw", "b"), ("dedent", ""), ("raw", "c"), ("raw", "d"), ), ), ( ["a"], MatchResult( # An example with only inserts. matched_slice=slice(0, 0), insert_segments=((0, Dedent),), ), 0, (("dedent", ""),), ), ], ) def test__parser__matchresult2_apply( segment_seed, match_result, match_len, serialised_result, generate_test_segments ): """Test MatchResult.apply(). This includes testing instantiating the MatchResult and whether setting some attributes and not others works as expected. """ input_segments = generate_test_segments(segment_seed) # Test the length attribute. # NOTE: It's not the number of segments we'll return, but the span # of the match in the original sequence. assert len(match_result) == match_len out_segments = match_result.apply(input_segments) serialised = tuple( seg.to_tuple(show_raw=True, include_meta=True) for seg in out_segments ) assert serialised == serialised_result # Test that _every_ segment (including metas) has a position marker already. for seg in out_segments: _recursive_assert_pos(seg) sqlfluff-3.4.2/test/core/parser/parse_test.py000066400000000000000000000046471503426445100213070ustar00rootroot00000000000000"""The Test file for The New Parser (Grammar Classes).""" from sqlfluff.core import FluffConfig from sqlfluff.core.errors import SQLParseError from sqlfluff.core.linter.linter import Linter from sqlfluff.core.parser import Anything, BaseSegment, KeywordSegment, StringParser from sqlfluff.core.parser.context import ParseContext BarKeyword = StringParser("bar", KeywordSegment) class BasicSegment(BaseSegment): """A basic segment for testing parse and expand.""" type = "basic" match_grammar = Anything() def test__parser__parse_match(test_segments): """Test match method on a real segment.""" ctx = ParseContext(dialect=None) # This should match and have consumed everything, which should # now be part of a BasicSegment. match = BasicSegment.match(test_segments, 0, parse_context=ctx) assert match matched = match.apply(test_segments) assert len(matched) == 1 assert isinstance(matched[0], BasicSegment) assert matched[0].segments[0].type == "raw" def test__parser__parse_error(): """Test that SQLParseError is raised for unparsable section.""" in_str = "SELECT ;" lnt = Linter(dialect="ansi") parsed = lnt.parse_string(in_str) assert len(parsed.violations) == 1 violation = parsed.violations[0] assert isinstance(violation, SQLParseError) assert violation.desc() == "Line 1, Position 1: Found unparsable section: 'SELECT'" # Check that the expected labels work for logging. # TODO: This is more specific that in previous iterations, but we could # definitely make this easier to read. assert ( 'Expected: "]> " "after . " "Found nothing." ) in parsed.tree.stringify() def test_parse_jinja_macro_exclude(): """Test parsing when excluding macros with unknown tags. This test case has a file which defines the unknown tag `materialization` which would cause a templating error if not excluded. By ignoring that folder we can ensure there are no errors. """ config_path = "test/fixtures/templater/jinja_exclude_macro_path/.sqlfluff" config = FluffConfig.from_path(config_path) linter = Linter(config=config) sql_file_path = "test/fixtures/templater/jinja_exclude_macro_path/jinja.sql" parsed = linter.parse_path(sql_file_path) for parse in parsed: assert parse.violations == [] sqlfluff-3.4.2/test/core/parser/parser_test.py000066400000000000000000000155071503426445100214660ustar00rootroot00000000000000"""The Test file for Parsers (Matchable Classes).""" import pytest from sqlfluff.core.parser import ( KeywordSegment, MultiStringParser, RawSegment, RegexParser, StringParser, TypedParser, ) from sqlfluff.core.parser.context import ParseContext def test__parser__repr(): """Test the __repr__ method of the parsers.""" # For the string parser note the uppercase template. assert repr(StringParser("foo", KeywordSegment)) == "" # NOTE: For MultiStringParser we only test with one element here # because for more than one, the order is unpredictable. assert ( repr(MultiStringParser(["a"], KeywordSegment)) == "" ) # For the typed & regex parser it's case sensitive (although lowercase # by convention). assert repr(TypedParser("foo", KeywordSegment)) == "" assert repr(RegexParser(r"fo|o", KeywordSegment)) == "" class ExampleSegment(RawSegment): """A minimal example segment for testing.""" type = "example" def test__parser__typedparser__match(generate_test_segments): """Test the match method of TypedParser.""" parser = TypedParser("single_quote", ExampleSegment) ctx = ParseContext(dialect=None) # NOTE: The second element of the sequence has single quotes # and the test fixture will set the type accordingly. segments = generate_test_segments(["foo", "'bar'"]) result1 = parser.match(segments, 0, ctx) assert not result1 result2 = parser.match(segments, 1, ctx) assert result2 assert result2.matched_slice == slice(1, 2) assert result2.matched_class is ExampleSegment def test__parser__typedparser__simple(): """Test the simple method of TypedParser.""" parser = TypedParser("single_quote", ExampleSegment) ctx = ParseContext(dialect=None) assert parser.simple(ctx) == (frozenset(), frozenset(["single_quote"])) def test__parser__stringparser__match(generate_test_segments): """Test the match method of StringParser.""" parser = StringParser("foo", ExampleSegment, type="test") ctx = ParseContext(dialect=None) segments = generate_test_segments(["foo", "bar", "foo"]) result1 = parser.match(segments, 0, ctx) assert result1 assert result1.matched_slice == slice(0, 1) assert result1.matched_class is ExampleSegment assert result1.segment_kwargs == {"instance_types": ("test",)} result2 = parser.match(segments, 1, ctx) assert not result2 result3 = parser.match(segments, 2, ctx) assert result3 assert result3.matched_slice == slice(2, 3) assert result3.matched_class is ExampleSegment assert result3.segment_kwargs == {"instance_types": ("test",)} def test__parser__stringparser__simple(): """Test the simple method of StringParser.""" parser = StringParser("foo", ExampleSegment) ctx = ParseContext(dialect=None) assert parser.simple(ctx) == (frozenset(["FOO"]), frozenset()) def test__parser__regexparser__match(generate_test_segments): """Test the match method of RegexParser.""" parser = RegexParser(r"b.r", ExampleSegment) ctx = ParseContext(dialect=None) segments = generate_test_segments(["foo", "bar", "boo"]) assert not parser.match(segments, 0, ctx) assert not parser.match(segments, 2, ctx) result = parser.match(segments, 1, ctx) assert result assert result.matched_slice == slice(1, 2) assert result.matched_class is ExampleSegment def test__parser__regexparser__simple(): """Test the simple method of RegexParser.""" parser = RegexParser(r"b.r", ExampleSegment) ctx = ParseContext(dialect=None) assert parser.simple(ctx) is None def test__parser__multistringparser__match(generate_test_segments): """Test the match method of MultiStringParser.""" parser = MultiStringParser(["foo", "bar"], ExampleSegment) ctx = ParseContext(dialect=None) segments = generate_test_segments(["foo", "fo", "bar", "boo"]) assert not parser.match(segments, 1, ctx) assert not parser.match(segments, 3, ctx) result1 = parser.match(segments, 0, ctx) assert result1 assert result1.matched_slice == slice(0, 1) assert result1.matched_class is ExampleSegment result2 = parser.match(segments, 2, ctx) assert result2 assert result2.matched_slice == slice(2, 3) assert result2.matched_class is ExampleSegment def test__parser__multistringparser__simple(): """Test the MultiStringParser matchable.""" parser = MultiStringParser(["foo", "bar"], KeywordSegment) ctx = ParseContext(dialect=None) assert parser.simple(ctx) == (frozenset(["FOO", "BAR"]), frozenset()) @pytest.mark.parametrize( "new_type", [None, "bar"], ) def test__parser__typedparser_rematch(new_type, generate_test_segments): """Test that TypedParser allows rematching. Because the TypedParser looks for types and then changes the type as a result, there is a risk of preventing rematching. This is a problem because we use it when checking that fix edits haven't broken the parse tree. In this example the TypedParser is looking for a "single_quote" type segment, but is due to mutate to an Example segment, which inherits directly from `RawSegment`. Unless the TypedParser steps in, this would apparently present a rematching issue. """ pre_match_types = { "single_quote", "raw", "base", } post_match_types = { # Make sure we got the "example" class "example", # But we *also* get the "single_quote" class. # On the second pass this is the main crux of the test. "single_quote", "raw", "base", } kwargs = {} expected_type = "example" if new_type: post_match_types.add(new_type) kwargs = {"type": new_type} expected_type = new_type segments = generate_test_segments(["'foo'"]) # Check types pre-match assert segments[0].class_types == pre_match_types parser = TypedParser("single_quote", ExampleSegment, **kwargs) # Just check that our assumptions about inheritance are right. assert not ExampleSegment.class_is_type("single_quote") ctx = ParseContext(dialect=None) match1 = parser.match(segments, 0, ctx) assert match1 segments1 = match1.apply(segments) # Check types post-match 1 assert segments1[0].class_types == post_match_types assert segments1[0].get_type() == expected_type assert segments1[0].to_tuple(show_raw=True) == (expected_type, "'foo'") # Do a rematch to check it works. match = parser.match(segments1, 0, ctx) assert match # Check types post-match 2 segments2 = match.apply(segments1) assert segments2[0].class_types == post_match_types assert segments2[0].get_type() == expected_type assert segments2[0].to_tuple(show_raw=True) == (expected_type, "'foo'") sqlfluff-3.4.2/test/core/parser/segments/000077500000000000000000000000001503426445100203765ustar00rootroot00000000000000sqlfluff-3.4.2/test/core/parser/segments/__init__.py000066400000000000000000000000451503426445100225060ustar00rootroot00000000000000"""Tests for the segments module.""" sqlfluff-3.4.2/test/core/parser/segments/conftest.py000066400000000000000000000016651503426445100226050ustar00rootroot00000000000000"""Common fixtures for segment tests.""" import pytest from sqlfluff.core.parser import BaseSegment @pytest.fixture(scope="module") def raw_segments(generate_test_segments): """Construct a list of raw segments as a fixture.""" return generate_test_segments(["foobar", ".barfoo"]) @pytest.fixture(scope="module") def raw_seg(raw_segments): """Construct a raw segment as a fixture.""" return raw_segments[0] @pytest.fixture(scope="session") def DummySegment(): """Construct a raw segment as a fixture.""" class DummySegment(BaseSegment): """A dummy segment for testing with no grammar.""" type = "dummy" return DummySegment @pytest.fixture(scope="session") def DummyAuxSegment(): """Construct a raw segment as a fixture.""" class DummyAuxSegment(BaseSegment): """A different dummy segment for testing with no grammar.""" type = "dummy_aux" return DummyAuxSegment sqlfluff-3.4.2/test/core/parser/segments/segments_base_test.py000066400000000000000000000310201503426445100246220ustar00rootroot00000000000000"""Test the BaseSegment class.""" import pickle import pytest from sqlfluff.core.parser import BaseSegment, PositionMarker, RawSegment from sqlfluff.core.parser.segments.base import PathStep from sqlfluff.core.rules.base import LintFix from sqlfluff.core.templaters import TemplatedFile def test__parser__base_segments_type(DummySegment): """Test the .is_type() method.""" assert BaseSegment.class_is_type("base") assert not BaseSegment.class_is_type("foo") assert not BaseSegment.class_is_type("foo", "bar") assert DummySegment.class_is_type("dummy") assert DummySegment.class_is_type("base") assert DummySegment.class_is_type("base", "foo", "bar") def test__parser__base_segments_class_types(DummySegment): """Test the metaclass ._class_types attribute.""" assert DummySegment._class_types == {"dummy", "base"} def test__parser__base_segments_descendant_type_set( raw_segments, DummySegment, DummyAuxSegment ): """Test the .descendant_type_set() method.""" test_seg = DummySegment([DummyAuxSegment(raw_segments)]) assert test_seg.descendant_type_set == {"raw", "base", "dummy_aux"} def test__parser__base_segments_direct_descendant_type_set( raw_segments, DummySegment, DummyAuxSegment ): """Test the .direct_descendant_type_set() method.""" test_seg = DummySegment([DummyAuxSegment(raw_segments)]) assert test_seg.direct_descendant_type_set == {"base", "dummy_aux"} def test__parser__base_segments_to_tuple_a(raw_segments, DummySegment, DummyAuxSegment): """Test the .to_tuple() method.""" test_seg = DummySegment([DummyAuxSegment(raw_segments)]) assert test_seg.to_tuple() == ( "dummy", (("dummy_aux", (("raw", ()), ("raw", ()))),), ) def test__parser__base_segments_to_tuple_b(raw_segments, DummySegment, DummyAuxSegment): """Test the .to_tuple() method.""" test_seg = DummySegment( [DummyAuxSegment(raw_segments + (DummyAuxSegment(raw_segments[:1]),))] ) assert test_seg.to_tuple() == ( "dummy", (("dummy_aux", (("raw", ()), ("raw", ()), ("dummy_aux", (("raw", ()),)))),), ) def test__parser__base_segments_to_tuple_c(raw_segments, DummySegment, DummyAuxSegment): """Test the .to_tuple() method with show_raw=True.""" test_seg = DummySegment( [DummyAuxSegment(raw_segments + (DummyAuxSegment(raw_segments[:1]),))] ) assert test_seg.to_tuple(show_raw=True) == ( "dummy", ( ( "dummy_aux", ( ("raw", "foobar"), ("raw", ".barfoo"), ("dummy_aux", (("raw", "foobar"),)), ), ), ), ) def test__parser__base_segments_as_record_a( raw_segments, DummySegment, DummyAuxSegment ): """Test the .as_record() method. NOTE: In this test, note that there are lists, as some segment types are duplicated within their parent segment. """ test_seg = DummySegment([DummyAuxSegment(raw_segments)]) assert test_seg.as_record() == { "dummy": {"dummy_aux": [{"raw": None}, {"raw": None}]} } def test__parser__base_segments_as_record_b( raw_segments, DummySegment, DummyAuxSegment ): """Test the .as_record() method. NOTE: In this test, note that there are no lists, every segment type is unique within it's parent segment, and so there is no need. """ test_seg = DummySegment( [DummyAuxSegment(raw_segments[:1] + (DummyAuxSegment(raw_segments[:1]),))] ) assert test_seg.as_record() == { "dummy": {"dummy_aux": {"raw": None, "dummy_aux": {"raw": None}}} } def test__parser__base_segments_as_record_c( raw_segments, DummySegment, DummyAuxSegment ): """Test the .as_record() method with show_raw=True. NOTE: In this test, note that there are no lists, every segment type is unique within it's parent segment, and so there is no need. """ test_seg = DummySegment( [DummyAuxSegment(raw_segments[:1] + (DummyAuxSegment(raw_segments[:1]),))] ) assert test_seg.as_record(show_raw=True) == { "dummy": {"dummy_aux": {"raw": "foobar", "dummy_aux": {"raw": "foobar"}}} } def test__parser__base_segments_count_segments( raw_segments, DummySegment, DummyAuxSegment ): """Test the .count_segments() method.""" test_seg = DummySegment([DummyAuxSegment(raw_segments)]) assert test_seg.count_segments() == 4 assert test_seg.count_segments(raw_only=True) == 2 @pytest.mark.parametrize( "list_in, result", [ (["foo"], False), (["foo", " "], True), ([" ", "foo", " "], True), ([" ", "foo"], True), ([" "], True), (["foo", " ", "foo"], False), ], ) def test__parser_base_segments_validate_non_code_ends( generate_test_segments, DummySegment, list_in, result ): """Test BaseSegment.validate_non_code_ends().""" if result: # Assert that it _does_ raise an exception. with pytest.raises(AssertionError): # Validation happens on instantiation. seg = DummySegment(segments=generate_test_segments(list_in)) else: # Check that it _doesn't_ raise an exception... seg = DummySegment(segments=generate_test_segments(list_in)) # ...even when explicitly validating. seg.validate_non_code_ends() def test__parser__base_segments_path_to(raw_segments, DummySegment, DummyAuxSegment): """Test the .path_to() method.""" test_seg_a = DummyAuxSegment(raw_segments) test_seg_b = DummySegment([test_seg_a]) # With a direct parent/child relationship we only get # one element of path. # NOTE: All the dummy segments return True for .is_code() # so that means the do appear in code_idxs. assert test_seg_b.path_to(test_seg_a) == [PathStep(test_seg_b, 0, 1, (0,))] # With a three segment chain - we get two path elements. assert test_seg_b.path_to(raw_segments[0]) == [ PathStep(test_seg_b, 0, 1, (0,)), PathStep(test_seg_a, 0, 2, (0, 1)), ] assert test_seg_b.path_to(raw_segments[1]) == [ PathStep(test_seg_b, 0, 1, (0,)), PathStep(test_seg_a, 1, 2, (0, 1)), ] def test__parser__base_segments_stubs(): """Test stub methods that have no implementation in base class.""" template = TemplatedFile.from_string("foobar") rs1 = RawSegment("foobar", PositionMarker(slice(0, 6), slice(0, 6), template)) base_segment = BaseSegment(segments=[rs1]) with pytest.raises(NotImplementedError): base_segment.edit("foo") def test__parser__base_segments_raw(raw_seg): """Test raw segments behave as expected.""" # Check Segment Return assert raw_seg.segments == () assert raw_seg.raw == "foobar" # Check Formatting and Stringification assert str(raw_seg) == repr(raw_seg) == "" assert ( raw_seg.stringify(ident=1, tabsize=2) == "[L: 1, P: 1] | raw: " " 'foobar'\n" ) # Check tuple assert raw_seg.to_tuple() == ("raw", ()) # Check tuple assert raw_seg.to_tuple(show_raw=True) == ("raw", "foobar") def test__parser__base_segments_base(raw_segments, fresh_ansi_dialect, DummySegment): """Test base segments behave as expected.""" base_seg = DummySegment(raw_segments) # Check we assume the position correctly assert ( base_seg.pos_marker.start_point_marker() == raw_segments[0].pos_marker.start_point_marker() ) assert ( base_seg.pos_marker.end_point_marker() == raw_segments[-1].pos_marker.end_point_marker() ) # Check that we correctly reconstruct the raw assert base_seg.raw == "foobar.barfoo" # Check tuple assert base_seg.to_tuple() == ( "dummy", (raw_segments[0].to_tuple(), raw_segments[1].to_tuple()), ) # Check Formatting and Stringification assert str(base_seg) == repr(base_seg) == "" assert base_seg.stringify(ident=1, tabsize=2) == ( "[L: 1, P: 1] | dummy:\n" "[L: 1, P: 1] | raw: " " 'foobar'\n" "[L: 1, P: 7] | raw: " " '.barfoo'\n" ) def test__parser__base_segments_raw_compare(): """Test comparison of raw segments.""" template = TemplatedFile.from_string("foobar") rs1 = RawSegment("foobar", PositionMarker(slice(0, 6), slice(0, 6), template)) rs2 = RawSegment("foobar", PositionMarker(slice(0, 6), slice(0, 6), template)) assert rs1 == rs2 def test__parser__base_segments_base_compare(DummySegment, DummyAuxSegment): """Test comparison of base segments.""" template = TemplatedFile.from_string("foobar") rs1 = RawSegment("foobar", PositionMarker(slice(0, 6), slice(0, 6), template)) rs2 = RawSegment("foobar", PositionMarker(slice(0, 6), slice(0, 6), template)) ds1 = DummySegment([rs1]) ds2 = DummySegment([rs2]) dsa2 = DummyAuxSegment([rs2]) # Check for equality assert ds1 == ds2 # Check a different match on the same details are not the same assert ds1 != dsa2 def test__parser__base_segments_pickle_safe(raw_segments): """Test pickling and unpickling of BaseSegment.""" test_seg = BaseSegment([BaseSegment(raw_segments)]) test_seg.set_as_parent() pickled = pickle.dumps(test_seg) result_seg = pickle.loads(pickled) assert test_seg == result_seg # Check specifically the treatment of the parent position. assert result_seg.segments[0].get_parent()[0] is result_seg def test__parser__base_segments_copy_isolation(DummySegment, raw_segments): """Test copy isolation in BaseSegment. First on one of the raws and then on the dummy segment. """ # On a raw a_seg = raw_segments[0] a_copy = a_seg.copy() assert a_seg is not a_copy assert a_seg == a_copy assert a_seg.pos_marker is a_copy.pos_marker a_copy.pos_marker = None assert a_copy.pos_marker is None assert a_seg.pos_marker is not None # On a base b_seg = DummySegment(segments=raw_segments) b_copy = b_seg.copy() assert b_seg is not b_copy assert b_seg == b_copy assert b_seg.pos_marker is b_copy.pos_marker b_copy.pos_marker = None assert b_copy.pos_marker is None assert b_seg.pos_marker is not None # On addition to a lint Fix fix = LintFix("replace", a_seg, [b_seg]) for s in fix.edit: assert not s.pos_marker assert b_seg.pos_marker def test__parser__base_segments_parent_ref(DummySegment, raw_segments): """Test getting and setting parents on BaseSegment.""" # Check initially no parent (because not set) assert not raw_segments[0].get_parent() # Add it to a segment (which also sets the parent value) seg = DummySegment(segments=raw_segments) # The DummySegment shouldn't have a parent. assert seg.get_parent() is None assert seg.segments[0].get_parent()[0] is seg assert seg.segments[1].get_parent()[0] is seg # Remove segment from parent, but don't unset. # Should still check an return None. seg_0 = seg.segments[0] seg.segments = seg.segments[1:] assert seg_0 not in seg.segments assert not seg_0.get_parent() # Check the other still works. assert seg.segments[0].get_parent()[0] def test__parser__raw_segment_raw_normalized(): """Test comparison of raw segments.""" template = TemplatedFile.from_string('"a"""."e"') rs1 = RawSegment( '"a"""', PositionMarker(slice(0, 5), slice(0, 5), template), quoted_value=(r'"((?:[^"]|"")*)"', 1), escape_replacements=[('""', '"')], casefold=str.upper, ) rs2 = RawSegment( ".", PositionMarker(slice(6, 7), slice(6, 7), template), ) rs3 = RawSegment( '"e"', PositionMarker(slice(8, 10), slice(8, 10), template), quoted_value=(r'"((?:[^"]|"")*)"', 1), escape_replacements=[('""', '"')], casefold=str.upper, ) bs1 = BaseSegment( ( rs1, rs2, rs3, ), PositionMarker(slice(0, 10), slice(0, 10), template), ) assert rs1.raw == '"a"""' assert rs1.raw_normalized(False) == 'a"' assert rs1.raw_normalized() == 'A"' assert rs2.raw == "." assert rs2.raw_normalized(False) == "." assert rs2.raw_normalized() == "." assert rs3.raw == '"e"' assert rs3.raw_normalized(False) == "e" assert rs3.raw_normalized() == "E" assert bs1.raw == '"a"""."e"' assert bs1.raw_normalized() == 'A".E' sqlfluff-3.4.2/test/core/parser/segments/segments_common_test.py000066400000000000000000000022011503426445100251770ustar00rootroot00000000000000"""Test the KeywordSegment class.""" from sqlfluff.core.parser import KeywordSegment, StringParser from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.match_result import MatchResult def test__parser__core_keyword(raw_segments): """Test the Mystical KeywordSegment.""" # First make a keyword FooKeyword = StringParser("foobar", KeywordSegment, type="bar") # Check it looks as expected assert FooKeyword.template.upper() == "FOOBAR" ctx = ParseContext(dialect=None) # Match it against a list and check it doesn't match assert not FooKeyword.match(raw_segments, 1, parse_context=ctx) # Match it against the final element (returns tuple) m = FooKeyword.match(raw_segments, 0, parse_context=ctx) assert m assert m == MatchResult( matched_slice=slice(0, 1), matched_class=KeywordSegment, segment_kwargs={"instance_types": ("bar",)}, ) segments = m.apply(raw_segments) assert len(segments) == 1 segment = segments[0] assert segment.class_types == { "base", "word", "keyword", "raw", "bar", } sqlfluff-3.4.2/test/core/parser/segments/segments_file_test.py000066400000000000000000000006611503426445100246360ustar00rootroot00000000000000"""Test the BaseFileSegment class.""" from sqlfluff.core.parser import BaseFileSegment def test__parser__base_segments_file(raw_segments): """Test BaseFileSegment to behave as expected.""" base_seg = BaseFileSegment(raw_segments, fname="/some/dir/file.sql") assert base_seg.type == "file" assert base_seg.file_path == "/some/dir/file.sql" assert base_seg.can_start_end_non_code assert base_seg.allow_empty sqlfluff-3.4.2/test/core/parser/segments/segments_raw_test.py000066400000000000000000000017061503426445100245110ustar00rootroot00000000000000"""Test the RawSegment class.""" from sqlfluff.core.parser.segments.base import PathStep def test__parser__raw_get_raw_segments(raw_segments): """Test niche case of calling get_raw_segments on a raw segment.""" for s in raw_segments: assert s.get_raw_segments() == [s] def test__parser__raw_segments_with_ancestors( raw_segments, DummySegment, DummyAuxSegment ): """Test raw_segments_with_ancestors. This is used in the reflow module to assess parse depth. """ test_seg = DummySegment([DummyAuxSegment(raw_segments[:1]), raw_segments[1]]) # Result should be the same raw segment, but with appropriate parents assert test_seg.raw_segments_with_ancestors == [ ( raw_segments[0], [ PathStep(test_seg, 0, 2, (0, 1)), PathStep(test_seg.segments[0], 0, 1, (0,)), ], ), (raw_segments[1], [PathStep(test_seg, 1, 2, (0, 1))]), ] sqlfluff-3.4.2/test/core/plugin_test.py000066400000000000000000000103461503426445100201700ustar00rootroot00000000000000"""Plugin related tests.""" import importlib.metadata import logging import sys import pytest from sqlfluff import __version__ as pkg_version from sqlfluff.core.config import FluffConfig from sqlfluff.core.plugin.host import ( _get_sqlfluff_version, _load_plugin, get_plugin_manager, purge_plugin_manager, ) from sqlfluff.utils.testing.logging import fluff_log_catcher def test__plugin_manager_registers_example_plugin(): """Test that the example plugin is registered. This test also tests that warnings are raised on the import of plugins which have their imports in the wrong place (e.g. the example plugin). That means we need to make sure the plugin is definitely reimported at the start of this test, so we can see any warnings raised on imports. To do this we clear the plugin manager cache and also forcibly unload the example plugin modules if they are already loaded. This ensures that we can capture any warnings raised by importing the module. """ purge_plugin_manager() # We still to a try/except here, even though it's only run within # the context of a test because the module may or may not already # be imported depending on the order that the tests run in. try: del sys.modules["sqlfluff_plugin_example"] except KeyError: pass try: del sys.modules["sqlfluff_plugin_example.rules"] except KeyError: pass with fluff_log_catcher(logging.WARNING, "sqlfluff.rules") as caplog: plugin_manager = get_plugin_manager() # The plugin import order is non-deterministic. # Use sets in case the dbt plugin (or other plugins) are # already installed too. installed_plugins = set( plugin_module.__name__ for plugin_module in plugin_manager.get_plugins() ) print(f"Installed plugins: {installed_plugins}") assert installed_plugins.issuperset( { "sqlfluff_plugin_example", "sqlfluff.core.plugin.lib", } ) # At this stage we should also check that the example plugin # also raises a warning for it's import location. assert ( "Rule 'Rule_Example_L001' has been imported before all plugins " "have been fully loaded" ) in caplog.text @pytest.mark.parametrize( "rule_ref", # Check both V1 plugin ["Rule_Example_L001"], ) def test__plugin_example_rules_returned(rule_ref): """Test that the example rules from the plugin are returned.""" plugin_manager = get_plugin_manager() # The plugin import order is non-deterministic rule_names = [ rule.__name__ for rules in plugin_manager.hook.get_rules() for rule in rules ] print(f"Rule names: {rule_names}") assert rule_ref in rule_names @pytest.mark.parametrize( "rule_ref,config_option", # Check both V1 and V2 rule plugins. [("Example_L001", "forbidden_columns")], ) def test__plugin_default_config_read(rule_ref, config_option): """Test that the example plugin default config is merged into FluffConfig.""" fluff_config = FluffConfig(overrides={"dialect": "ansi"}) # The plugin import order is non-deterministic print(f"Detected config sections: {fluff_config._configs['rules'].keys()}") # Check V1 assert config_option in fluff_config._configs["rules"][rule_ref] class MockEntryPoint(importlib.metadata.EntryPoint): """Fake Entry Point which just raises an exception on load.""" def load(self): """Raise an exception on load.""" raise ValueError("TEST ERROR") def test__plugin_handle_bad_load(): """Test that we can safely survive a plugin which fails to load.""" # Mock fake plugin ep = MockEntryPoint("test_name", "test_value", "sqlfluff") plugin_manager = get_plugin_manager() with fluff_log_catcher(logging.WARNING, "sqlfluff.plugin") as caplog: _load_plugin(plugin_manager, ep, "plugin_name", "v1.2.3") # Assert that there was a warning assert "ERROR: Failed to load SQLFluff plugin" in caplog.text assert "plugin_name" in caplog.text assert "TEST ERROR" in caplog.text def test__plugin_get_version(): """Test the plugin method of getting the version gets the right version.""" assert _get_sqlfluff_version() == pkg_version sqlfluff-3.4.2/test/core/rules/000077500000000000000000000000001503426445100164075ustar00rootroot00000000000000sqlfluff-3.4.2/test/core/rules/__init__.py000066400000000000000000000003141503426445100205160ustar00rootroot00000000000000"""Tests for the rules module. Where possible, tests should be contained within the yaml test cases. For python based tests where this is not possible, the rule code should be in the test file name. """ sqlfluff-3.4.2/test/core/rules/crawlers_test.py000066400000000000000000000026761503426445100216550ustar00rootroot00000000000000"""Tests for crawlers.""" import pytest from sqlfluff.core.config import FluffConfig from sqlfluff.core.linter.linter import Linter from sqlfluff.core.rules.context import RuleContext from sqlfluff.core.rules.crawlers import ( ParentOfSegmentCrawler, RootOnlyCrawler, SegmentSeekerCrawler, ) from sqlfluff.core.templaters.base import TemplatedFile @pytest.mark.parametrize( "CrawlerType,crawler_kwargs,raw_sql_in,target_raws_out", [ (RootOnlyCrawler, {}, "SELECT 1 + 2", ["SELECT 1 + 2"]), ( SegmentSeekerCrawler, {"types": {"numeric_literal"}}, "SELECT 1 + 2", ["1", "2"], ), ( ParentOfSegmentCrawler, {"types": {"numeric_literal"}}, "SELECT 1 + 2", ["1 + 2"], ), ], ) def test_rules_crawlers(CrawlerType, crawler_kwargs, raw_sql_in, target_raws_out): """Test Crawlers.""" cfg = FluffConfig(overrides={"dialect": "ansi"}) linter = Linter(config=cfg) root = linter.parse_string(raw_sql_in).tree root_context = RuleContext( dialect=cfg.get("dialect_obj"), fix=True, templated_file=TemplatedFile(raw_sql_in, ""), path=None, segment=root, config=cfg, ) crawler = CrawlerType(**crawler_kwargs) result_raws = [context.segment.raw for context in crawler.crawl(root_context)] assert result_raws == target_raws_out sqlfluff-3.4.2/test/core/rules/docstring_test.py000066400000000000000000000043671503426445100220260ustar00rootroot00000000000000"""Test rules docstring.""" import re import pytest from sqlfluff import lint from sqlfluff.core.plugin.host import get_plugin_manager # NOTE: python 3.13 treats docstring whitespace differently to previous # versions. Not critical for rendering, but does affect how we test for # content here. # https://docs.python.org/3.13/whatsnew/3.13.html#other-language-changes KEYWORD_ANTI = re.compile(r"\*\*Anti-pattern\*\*") KEYWORD_BEST = re.compile(r"\*\*Best practice\*\*") KEYWORD_CODE_BLOCK = re.compile(r"\.\. code-block:: (sql|jinja)\n") @pytest.mark.parametrize( "content,min_count", [ (KEYWORD_ANTI, 1), (KEYWORD_BEST, 1), (KEYWORD_CODE_BLOCK, 2), ], ) def test_content_count(content, min_count): """Test docstring have specific content.""" for plugin_rules in get_plugin_manager().hook.get_rules(): for rule in plugin_rules: if rule._check_docstring is True: assert len(content.findall(rule.__doc__)) >= min_count, ( f"{rule.__name__} content {content} does not occur at least " f"{min_count} times" ) def test_keyword_anti_before_best(): """Test docstring anti pattern before best pattern.""" for plugin_rules in get_plugin_manager().hook.get_rules(): for rule in plugin_rules: if rule._check_docstring is True: best_match = KEYWORD_BEST.search(rule.__doc__) anti_match = KEYWORD_ANTI.search(rule.__doc__) assert best_match assert anti_match best_pos = best_match.start() anti_pos = anti_match.start() assert anti_pos < best_pos, ( f"{rule.__name__} keyword {KEYWORD_BEST} appears before " f"{KEYWORD_ANTI}" ) def test_backtick_replace(): """Test replacing docstring double backticks for lint results.""" sql = """ SELECT DISTINCT(a), b FROM foo """ result = lint(sql, rules=["ST08"]) # ST08 docstring looks like: # ``DISTINCT`` used with parentheses. # Check the double bacticks (``) get replaced by a single quote ('). assert result[0]["description"] == "'DISTINCT' used with parentheses." sqlfluff-3.4.2/test/core/rules/functional/000077500000000000000000000000001503426445100205515ustar00rootroot00000000000000sqlfluff-3.4.2/test/core/rules/functional/raw_file_slices_test.py000066400000000000000000000030441503426445100253150ustar00rootroot00000000000000"""Tests for the raw_file_slices module.""" import pytest from sqlfluff.core.templaters.base import RawFileSlice from sqlfluff.utils.functional import raw_file_slices rs_templated_abc = RawFileSlice("{{abc}}", "templated", 0) rs_templated_def = RawFileSlice("{{def}}", "templated", 0) rs_literal_abc = RawFileSlice("abc", "literal", 0) @pytest.mark.parametrize( ["input", "expected"], [ [ raw_file_slices.RawFileSlices(rs_templated_abc, templated_file=None), True, ], [ raw_file_slices.RawFileSlices(rs_templated_def, templated_file=None), False, ], [ raw_file_slices.RawFileSlices( rs_templated_abc, rs_templated_def, templated_file=None ), False, ], ], ) def test_slices_all(input, expected): """Test the "all()" function.""" assert input.all(lambda s: "abc" in s.raw) == expected @pytest.mark.parametrize( ["input", "expected"], [ [ raw_file_slices.RawFileSlices(rs_templated_abc, templated_file=None), True, ], [ raw_file_slices.RawFileSlices(rs_templated_def, templated_file=None), False, ], [ raw_file_slices.RawFileSlices( rs_templated_abc, rs_templated_def, templated_file=None ), True, ], ], ) def test_slices_any(input, expected): """Test the "any()" function.""" assert input.any(lambda s: "abc" in s.raw) == expected sqlfluff-3.4.2/test/core/rules/functional/segments_test.py000066400000000000000000000105161503426445100240120ustar00rootroot00000000000000"""Tests for the segments module.""" import pytest import sqlfluff.utils.functional.segment_predicates as sp from sqlfluff.core.linter.linter import Linter from sqlfluff.core.parser.segments.raw import RawSegment from sqlfluff.utils.functional import segments seg1 = RawSegment("s1") seg2 = RawSegment("s2") seg3 = RawSegment("s3") seg4 = RawSegment("s4") @pytest.mark.parametrize( ["lhs", "rhs", "expected"], [ [ segments.Segments(seg1, seg2), segments.Segments(seg3, seg4), segments.Segments(seg1, seg2, seg3, seg4), ], [ segments.Segments(seg3, seg4), segments.Segments(seg1, seg2), segments.Segments(seg3, seg4, seg1, seg2), ], [ segments.Segments(seg1, seg2), [seg3, seg4], segments.Segments(seg1, seg2, seg3, seg4), ], [ [seg1, seg2], segments.Segments(seg3, seg4), segments.Segments(seg1, seg2, seg3, seg4), ], ], ) def test_segments_add(lhs, rhs, expected): """Verify addition of Segments objects with themselves and lists.""" result = lhs + rhs assert isinstance(result, segments.Segments) assert result == expected @pytest.mark.parametrize( ["input", "expected"], [ [ segments.Segments(seg1, seg2), True, ], [ segments.Segments(seg1, seg3), False, ], ], ) def test_segments_all(input, expected): """Test the "all()" function.""" assert input.all(lambda s: s.raw[-1] <= "2") == expected @pytest.mark.parametrize( ["input", "expected"], [ [ segments.Segments(seg1, seg2), True, ], [ segments.Segments(seg1, seg3), True, ], [ segments.Segments(seg3), False, ], ], ) def test_segments_any(input, expected): """Test the "any()" function.""" assert input.any(lambda s: s.raw[-1] <= "2") == expected def test_segments_reversed(): """Test the "reversed()" function.""" assert segments.Segments(seg1, seg2).reversed() == segments.Segments(seg2, seg1) def test_segments_raw_slices_no_templated_file(): """Test that raw_slices() fails if TemplatedFile not provided.""" with pytest.raises(ValueError): segments.Segments(seg1).raw_slices def test_segments_first_no_predicate(): """Test the "first()" function with no predicate.""" assert segments.Segments(seg1, seg2).first() == segments.Segments(seg1) def test_segments_first_with_predicate(): """Test the "first()" function with a predicate.""" assert segments.Segments(seg1, seg2).first(sp.is_meta()) == segments.Segments() def test_segments_last(): """Test the "last()" function.""" assert segments.Segments(seg1, seg2).last() == segments.Segments(seg2) def test_segments_apply(): """Test the "apply()" function.""" assert segments.Segments(seg1, seg2).apply(lambda s: s.raw[-1]) == ["1", "2"] @pytest.mark.parametrize( ["function", "expected"], [ [sp.get_type(), ["raw", "raw"]], [sp.is_comment(), [False, False]], [sp.is_raw(), [True, True]], ], ) def test_segments_apply_functions(function, expected): """Test the "apply()" function with the "get_name()" function.""" assert segments.Segments(seg1, seg2).apply(function) == expected def test_segment_predicates_and(): """Test the "and_()" function.""" assert segments.Segments(seg1, seg2).select( select_if=sp.and_(sp.is_raw(), lambda s: s.raw[-1] == "1") ) == segments.Segments(seg1) assert ( segments.Segments(seg1, seg2).select( select_if=sp.and_(sp.is_raw(), lambda s: s.raw[-1] == "3") ) == segments.Segments() ) def test_segments_recursive_crawl(): """Test the "recursive_crawl()" function.""" sql = """ WITH cte AS ( SELECT * FROM tab_a ) SELECT cte.col_a, tab_b.col_b FROM cte INNER JOIN tab_b; """ linter = Linter(dialect="ansi") parsed = linter.parse_string(sql) functional_tree = segments.Segments(parsed.root_variant().tree) assert len(functional_tree.recursive_crawl("common_table_expression")) == 1 assert len(functional_tree.recursive_crawl("table_reference")) == 3 sqlfluff-3.4.2/test/core/rules/noqa_test.py000066400000000000000000000456651503426445100207760ustar00rootroot00000000000000"""Tests for applying noqa directives and the IgnoreMask.""" import logging import pytest from sqlfluff.core import FluffConfig, Linter from sqlfluff.core.errors import SQLBaseError, SQLParseError from sqlfluff.core.rules.noqa import IgnoreMask, NoQaDirective # noqa tests require a rule_set, therefore we construct dummy rule set for glob matching. dummy_rule_map = Linter().get_rulepack().reference_map class DummyLintError(SQLBaseError): """Fake lint error used by tests, similar to SQLLintError.""" def __init__(self, line_no: int, code: str = "LT01"): self._code = code super().__init__(line_no=line_no) def test__linter__raises_malformed_noqa(): """A badly formatted noqa gets raised as a parsing error.""" lntr = Linter(dialect="ansi") result = lntr.lint_string_wrapped("select 1 --noqa missing semicolon") with pytest.raises(SQLParseError): result.check_tuples() @pytest.mark.parametrize( "input,expected", [ ("", None), ("noqa", NoQaDirective(0, 0, None, None, "noqa")), ("noqa?", SQLParseError), ("noqa:", NoQaDirective(0, 0, None, None, "noqa:")), ( "noqa:LT01,LT02", NoQaDirective(0, 0, ("LT01", "LT02"), None, "noqa:LT01,LT02"), ), ( "noqa: enable=LT01", NoQaDirective(0, 0, ("LT01",), "enable", "noqa: enable=LT01"), ), ( "noqa: disable=CP01", NoQaDirective(0, 0, ("CP01",), "disable", "noqa: disable=CP01"), ), ( "noqa: disable=all", NoQaDirective(0, 0, None, "disable", "noqa: disable=all"), ), ("noqa: disable", SQLParseError), ( "Inline comment before inline ignore -- noqa:LT01,LT02", NoQaDirective(0, 0, ("LT01", "LT02"), None, "noqa:LT01,LT02"), ), # Test selection with rule globs ( "noqa:L04*", NoQaDirective( 0, 0, ( "AM04", # L044 is an alias of AM04 "CP04", # L040 is an alias of CP04 "CV04", # L047 is an alias of CV04 "CV05", # L049 is an alias of CV05 "JJ01", # L046 is an alias of JJ01 "LT01", # L048 is an alias of LT01 "LT10", # L041 is an alias of LT10 "ST02", # L043 is an alias of ST02 "ST03", # L045 is an alias of ST03 "ST05", # L042 is an alias of ST05 ), None, "noqa:L04*", ), ), # Test selection with aliases. ( "noqa:L002", NoQaDirective(0, 0, ("LT02",), None, "noqa:L002"), ), # Test selection with alias globs. ( "noqa:L00*", NoQaDirective( 0, 0, ("LT01", "LT02", "LT03", "LT12"), None, "noqa:L00*", ), ), # Test selection with names. ( "noqa:capitalisation.keywords", NoQaDirective(0, 0, ("CP01",), None, "noqa:capitalisation.keywords"), ), # Test selection with groups. ( "noqa:capitalisation", NoQaDirective( 0, 0, ("CP01", "CP02", "CP03", "CP04", "CP05"), None, "noqa:capitalisation", ), ), ], ) def test_parse_noqa(input, expected): """Test correct of "noqa" comments.""" result = IgnoreMask._parse_noqa(input, 0, 0, reference_map=dummy_rule_map) if not isinstance(expected, type): assert result == expected else: # With exceptions, just check the type, not the contents. assert isinstance(result, expected) def test_parse_noqa_no_dups(): """Test overlapping glob expansions don't return duplicate rules in noqa.""" result = IgnoreMask._parse_noqa( comment="noqa:L0*5,L01*", line_no=0, line_pos=0, reference_map=dummy_rule_map ) assert len(result.rules) == len(set(result.rules)) @pytest.mark.parametrize( "noqa,violations,expected,used_noqas", [ [ [], [DummyLintError(1)], [ 0, ], [], ], [ [dict(comment="noqa: LT01", line_no=1)], [DummyLintError(1)], [], [0], ], [ [dict(comment="noqa: LT01", line_no=2)], [DummyLintError(1)], [0], [], ], [ [dict(comment="noqa: LT02", line_no=1)], [DummyLintError(1)], [0], [], ], [ [dict(comment="noqa: enable=LT01", line_no=1)], [DummyLintError(1)], [0], [], ], [ [dict(comment="noqa: disable=LT01", line_no=1)], [DummyLintError(1)], [], [0], ], [ [ dict(comment="noqa: disable=LT01", line_no=2), dict(comment="noqa: enable=LT01", line_no=4), ], [DummyLintError(1)], [0], [], # The disable wasn't used, neither was the enable. ], [ [ dict(comment="noqa: disable=LT01", line_no=2), dict(comment="noqa: enable=LT01", line_no=4), ], [DummyLintError(2)], [], [0, 1], # Both were used. ], [ [ dict(comment="noqa: disable=LT01", line_no=2), dict(comment="noqa: enable=LT01", line_no=4), ], [DummyLintError(3)], [], [0, 1], # Both were used. ], [ [ dict(comment="noqa: disable=LT01", line_no=2), dict(comment="noqa: enable=LT01", line_no=4), ], [DummyLintError(4)], [0], [1], # The enable was matched, but the disable wasn't used. ], [ [ dict(comment="noqa: disable=all", line_no=2), dict(comment="noqa: enable=all", line_no=4), ], [DummyLintError(1)], [0], # TODO: This is an odd edge case, where we drop out in our # evaluation too early so see whether the "enable" is ever # matched. In this case _both_ are effectively unused, because # we never evaluate the last one. For a first pass I think this # might be an acceptable edge case. [], ], [ [ dict(comment="noqa: disable=all", line_no=2), dict(comment="noqa: enable=all", line_no=4), ], [DummyLintError(2)], [], [0, 1], # Both were used. ], [ [ dict(comment="noqa: disable=all", line_no=2), dict(comment="noqa: enable=all", line_no=4), ], [DummyLintError(3)], [], [0, 1], # Both were used. ], [ [ dict(comment="noqa: disable=all", line_no=2), dict(comment="noqa: enable=all", line_no=4), ], [DummyLintError(4)], [0], [1], # The enable was matched, but the disable wasn't used. ], [ [ dict(comment="noqa: disable=LT01", line_no=2), dict(comment="noqa: enable=all", line_no=4), ], [ DummyLintError(2, code="LT01"), DummyLintError(2, code="LT02"), DummyLintError(4, code="LT01"), DummyLintError(4, code="LT02"), ], [1, 2, 3], [0, 1], # The enable matched. The disable also matched rules. ], [ [ dict(comment="noqa: disable=all", line_no=2), dict(comment="noqa: enable=LT01", line_no=4), ], [ DummyLintError(2, code="LT01"), DummyLintError(2, code="LT02"), DummyLintError(4, code="LT01"), DummyLintError(4, code="LT02"), ], [2], [0, 1], # The enable matched the disable. The disable also matched ], [ [ dict( comment="Inline comment before inline ignore -- noqa: LT02", line_no=1, ) ], [DummyLintError(1)], [0], [], ], [ [ dict( comment="Inline comment before inline ignore -- noqa: LT02", line_no=1, ), dict( comment="Inline comment before inline ignore -- noqa: LT02", line_no=2, ), ], [ DummyLintError(1), DummyLintError(2), ], [0, 1], [], # Neither used because wrong code. ], [ [ dict( comment="Inline comment before inline ignore -- noqa: L01*", line_no=1, ), ], [ DummyLintError(1), ], [0], [], # Neither used because wrong code. ], [ [ dict( comment="Inline comment before inline ignore -- noqa: LT*", line_no=1, ), ], [ DummyLintError(1), ], [], [0], # Matched indirectly ], ], ids=[ "1_violation_no_ignore", "1_violation_ignore_specific_line", "1_violation_ignore_different_specific_line", "1_violation_ignore_different_specific_rule", "1_violation_ignore_enable_this_range", "1_violation_ignore_disable_this_range", "1_violation_line_1_ignore_disable_specific_2_3", "1_violation_line_2_ignore_disable_specific_2_3", "1_violation_line_3_ignore_disable_specific_2_3", "1_violation_line_4_ignore_disable_specific_2_3", "1_violation_line_1_ignore_disable_all_2_3", "1_violation_line_2_ignore_disable_all_2_3", "1_violation_line_3_ignore_disable_all_2_3", "1_violation_line_4_ignore_disable_all_2_3", "4_violations_two_types_disable_specific_enable_all", "4_violations_two_types_disable_all_enable_specific", "1_violations_comment_inline_ignore", "2_violations_comment_inline_ignore", "1_violations_comment_inline_glob_ignore_unmatch", "1_violations_comment_inline_glob_ignore_match", ], ) def test_linted_file_ignore_masked_violations( noqa: dict, violations: list[SQLBaseError], expected, used_noqas ): """Test that _ignore_masked_violations() correctly filters violations.""" ignore_mask = [ IgnoreMask._parse_noqa(reference_map=dummy_rule_map, line_pos=0, **c) for c in noqa ] result = IgnoreMask(ignore_mask).ignore_masked_violations(violations) expected_violations = [v for i, v in enumerate(violations) if i in expected] assert expected_violations == result # Check whether "used" evaluation works expected_used = [ignore_mask[i] for i, _ in enumerate(noqa) if i in used_noqas] actually_used = [i for i in ignore_mask if i.used] assert actually_used == expected_used def test_linter_noqa(): """Test "noqa" feature at the higher "Linter" level.""" lntr = Linter( config=FluffConfig( overrides={ "dialect": "bigquery", # Use bigquery to allow hash comments. "rules": "AL02, LT04", } ) ) sql = """ SELECT col_a a, col_b b, --noqa: disable=AL02 col_c c, col_d d, --noqa: enable=AL02 col_e e, col_f f, col_g g, --noqa col_h h, col_i i, --noqa:AL02 col_j j, col_k k, --noqa:AL03 col_l l, col_m m, col_n n, --noqa: disable=all col_o o, col_p p, --noqa: enable=all col_q q, --Inline comment --noqa: AL02 col_r r, /* Block comment */ --noqa: AL02 col_s s # hash comment --noqa: AL02 -- We trigger both AL02 (implicit aliasing) -- and LT04 (leading commas) here to -- test glob ignoring of multiple rules. , col_t t --noqa: L01* , col_u u -- Some comment --noqa: L01* , col_v v -- We can ignore both AL02 and LT04 -- noqa: L01[29] FROM foo """ result = lntr.lint_string(sql) violations = result.get_violations() assert {3, 6, 7, 8, 10, 12, 13, 14, 15, 18} == {v.line_no for v in violations} def test_linter_noqa_with_templating(): """Similar to test_linter_noqa, but uses templating (Jinja).""" lntr = Linter( config=FluffConfig( overrides={ "dialect": "bigquery", # Use bigquery to allow hash comments. "templater": "jinja", "rules": "LT05", } ) ) sql = "\n" '"{%- set a_var = ["1", "2"] -%}\n' "SELECT\n" " this_is_just_a_very_long_line_for_demonstration_purposes_of_a_bug_involving_" "templated_sql_files, --noqa: LT05\n" " this_is_not_so_big a, --Inline comment --noqa: AL02\n" " this_is_not_so_big b, /* Block comment */ --noqa: AL02\n" " this_is_not_so_big c, # hash comment --noqa: AL02\n" " this_is_just_a_very_long_line_for_demonstration_purposes_of_a_bug_involving_" "templated_sql_files, --noqa: L01*\n" "FROM\n" " a_table\n" " " result = lntr.lint_string(sql) assert not result.get_violations() def test_linter_noqa_template_errors(): """Similar to test_linter_noqa, but uses templating (Jinja).""" lntr = Linter( config=FluffConfig( overrides={ "templater": "jinja", "dialect": "ansi", } ) ) sql = """select * --noqa: TMP from raw where balance_date >= {{ execution_date - macros.timedelta() }} --noqa: TMP """ result = lntr.lint_string(sql) assert not result.get_violations() @pytest.mark.parametrize("disable_noqa", [True, False]) @pytest.mark.parametrize( "sql", ["SELEC * FROM foo -- noqa: PRS\n", "{% if 1 > '2' %} -- noqa: TMP\n"] ) def test_linter_noqa_prs(sql, disable_noqa, caplog): """Test "noqa" feature to ignore PRS or TMP at the higher "Linter" level. Because templating and parsing failures prevent a fully formed parse tree to be formed the rely on slightly different routines to ensure ignores are still applied. """ lntr = Linter( config=FluffConfig( overrides={ "disable_noqa": disable_noqa, "dialect": "ansi", } ) ) with caplog.at_level(logging.DEBUG, logger="sqlfluff.linter"): result = lntr.lint_string(sql) violations = result.get_violations() # In both the templating fail and parsing fail cases, the failures should be # ignored because of the inline ignore, _unless_ `disable_noqa`` is set. if disable_noqa: assert violations else: assert not violations def test_linter_noqa_tmp(): """Test "noqa" feature to ignore TMP at the higher "Linter" level.""" lntr = Linter( config=FluffConfig( overrides={ "exclude_rules": "LT13", "dialect": "ansi", } ) ) sql = """ SELECT {{ col_a }} AS a -- noqa: TMP,PRS FROM foo; """ result = lntr.lint_string(sql) print(result.tree.stringify()) violations = result.get_violations() assert not violations def test_linter_noqa_disable(): """Test "noqa" comments can be disabled via the config.""" lntr_noqa_enabled = Linter( config=FluffConfig( overrides={ "rules": "AL02", "dialect": "ansi", } ) ) lntr_noqa_disabled = Linter( config=FluffConfig( overrides={ "disable_noqa": True, "rules": "AL02", "dialect": "ansi", } ) ) # This query raises AL02, but it is being suppressed by the inline noqa comment. # We can ignore this comment by setting disable_noqa = True in the config # or by using the --disable-noqa flag in the CLI. sql = """ SELECT col_a a --noqa: AL02 FROM foo """ # Verify that noqa works as expected with disable_noqa = False (default). result_noqa_enabled = lntr_noqa_enabled.lint_string(sql) violations_noqa_enabled = result_noqa_enabled.get_violations() assert len(violations_noqa_enabled) == 0 # Verify that noqa comment is ignored with disable_noqa = True. result_noqa_disabled = lntr_noqa_disabled.lint_string(sql) violations_noqa_disabled = result_noqa_disabled.get_violations() assert len(violations_noqa_disabled) == 1 assert violations_noqa_disabled[0].rule.code == "AL02" def test_linter_disable_noqa_except(): """Test "noqa" comments can be disabled via the config.""" lntr_disable_noqa_except_al02 = Linter( config=FluffConfig( overrides={ "disable_noqa_except": "AL02", "rules": "AL02, CP01", "dialect": "ansi", } ) ) lntr_disable_noqa_except_core = Linter( config=FluffConfig( overrides={ "disable_noqa_except": "core", "rules": "AL02, CP01", "dialect": "ansi", } ) ) # This query raises AL02 and CP01 but it is being suppressed by the inline noqa # comments. We can partially ignore these comment by setting # disable_noqa_except = rule_list in the config or by using the # --disable-noqa-except option in the CLI. sql = """ SELECT col_a a, --noqa: AL02 col_b b --noqa: aliasing from foo; --noqa: CP01 """ # Verify that noqa comment is ignored with # disable_noqa_except = AL02 (base rule name). result_disable_noqa_except_al02 = lntr_disable_noqa_except_al02.lint_string(sql) violations_disable_noqa_except_al02 = ( result_disable_noqa_except_al02.get_violations() ) assert len(violations_disable_noqa_except_al02) == 1 assert violations_disable_noqa_except_al02[0].rule.code == "CP01" # Verify that noqa works as expected with disable_noqa_except = core (rule alias). result_disable_noqa_except_core = lntr_disable_noqa_except_core.lint_string(sql) violations_disable_noqa_except_core = ( result_disable_noqa_except_core.get_violations() ) assert len(violations_disable_noqa_except_core) == 0 sqlfluff-3.4.2/test/core/rules/reference_test.py000066400000000000000000000040721503426445100217610ustar00rootroot00000000000000"""Test components for working with object and table references.""" import pytest from sqlfluff.core.rules import reference @pytest.mark.parametrize( "possible_references, targets, result", [ # Empty list of references is always True. [[], [("abc",)], True], # Simple cases: one reference, one target. [[("agent1",)], [("agent1",)], True], [[("agent1",)], [("customer",)], False], # Multiple references. If any match, good. [[("bar",), ("user_id",)], [("bar",)], True], [[("foo",), ("user_id",)], [("bar",)], False], # Multiple targets. If any reference matches, good. [[("table1",)], [("table1",), ("table2",), ("table3",)], True], [[("tbl2",)], [("db", "sc", "tbl1")], False], [[("tbl2",)], [("db", "sc", "tbl2")], True], # Multi-part references and targets. If one tuple is shorter than # the other, checks for a suffix match. [ [ ( "rc", "tbl1", ) ], [("db", "sc", "tbl1")], False, ], [ [ ( "sc", "tbl1", ) ], [("db", "sc", "tbl1")], True, ], [ [ ( "cb", "sc", "tbl1", ) ], [("db", "sc", "tbl1")], False, ], [ [ ( "db", "sc", "tbl1", ) ], [("db", "sc", "tbl1")], True, ], [[("public", "agent1")], [("agent1",)], True], [[("public", "agent1")], [("public",)], False], ], ) def test_object_ref_matches_table(possible_references, targets, result): """Test object_ref_matches_table().""" assert reference.object_ref_matches_table(possible_references, targets) == result sqlfluff-3.4.2/test/core/rules/rules_test.py000066400000000000000000000344601503426445100211610ustar00rootroot00000000000000"""Tests for the standard set of rules.""" import logging import pytest from sqlfluff.core import Linter from sqlfluff.core.config import FluffConfig from sqlfluff.core.errors import SQLFluffUserError from sqlfluff.core.linter import RuleTuple from sqlfluff.core.parser import WhitespaceSegment from sqlfluff.core.parser.markers import PositionMarker from sqlfluff.core.rules import BaseRule, LintFix, LintResult, get_ruleset from sqlfluff.core.rules.crawlers import RootOnlyCrawler, SegmentSeekerCrawler from sqlfluff.core.rules.doc_decorators import ( document_configuration, document_fix_compatible, document_groups, ) from sqlfluff.core.rules.loader import get_rules_from_path from sqlfluff.core.templaters.base import TemplatedFile from sqlfluff.utils.testing.logging import fluff_log_catcher from sqlfluff.utils.testing.rules import get_rule_from_set from test.fixtures.rules.custom.L000 import Rule_L000 from test.fixtures.rules.custom.S000 import Rule_S000 class Rule_T042(BaseRule): """A dummy rule.""" groups = ("all",) def _eval(self, context): pass class Rule_T001(BaseRule): """A deliberately malicious rule. **Anti-pattern** Blah blah """ groups = ("all",) crawl_behaviour = SegmentSeekerCrawler({"whitespace"}) is_fix_compatible = True def _eval(self, context): """Stars make newlines.""" if context.segment.is_type("whitespace"): return LintResult( anchor=context.segment, fixes=[ LintFix.replace( context.segment, [WhitespaceSegment(context.segment.raw + " ")] ) ], ) class Rule_T002(BaseRule): """A rule which says all raw code segments are bad. This is used for testing unparsable code. """ groups = ("all",) # Root only crawler so that the in-rule filters don't kick in. crawl_behaviour = RootOnlyCrawler() def _eval(self, context): """Stars make newlines.""" violations = [] for seg in context.segment.raw_segments: if seg.is_code: violations.append(LintResult(anchor=seg, description="TESTING")) return violations class Rule_T003(BaseRule): """Another deliberately malicious rule. **Anti-pattern** Blah blah """ groups = ("all",) crawl_behaviour = SegmentSeekerCrawler({"numeric_literal"}) is_fix_compatible = True def _eval(self, context): """Triple any numeric literals.""" return LintResult( anchor=context.segment, fixes=[ LintFix.replace( context.segment, [ context.segment, WhitespaceSegment(context.segment.raw + " "), context.segment, WhitespaceSegment(context.segment.raw + " "), context.segment, ], ) ], ) def test__rules__user_rules(): """Test that can safely add user rules.""" # Set up a linter with the user rule linter = Linter(user_rules=[Rule_T042], dialect="ansi") # Make sure the new one is in there. assert RuleTuple("T042", "", "A dummy rule.", ("all",), ()) in linter.rule_tuples() # Instantiate a second linter and check it's NOT in there. # This tests that copying and isolation works. linter = Linter(dialect="ansi") assert not any(rule[0] == "T042" for rule in linter.rule_tuples()) @pytest.mark.parametrize( "rules, exclude_rules, resulting_codes", [ # NB: We don't check the "select nothing" case, because not setting # the rules setting just means "select everything". # ("", "", set()), # 1: Select by code. # NOTE: T012 uses T011 as it's name but that should be ignored # because of the conflict. ("T010", "", {"T010"}), ("T010,T011", "", {"T010", "T011"}), ("T010,T011", "T011", {"T010"}), # 2: Select by name # NOTE: T012 uses "fake_other" as it's group but that should be ignored # because of the conflict. ("fake_basic", "", {"T010"}), ("fake_other", "", {"T011"}), ("fake_basic,fake_other", "", {"T010", "T011"}), # 3: Select by group # NOTE: T010 uses "foo" as it's alias but that should be ignored # because of the conflict. ("test", "", {"T010", "T011"}), ("foo", "", {"T011", "T012"}), ("test,foo", "", {"T010", "T011", "T012"}), ("test", "foo", {"T010"}), # 3: Select by alias ("fb1", "", {"T010"}), ("fb2", "", {"T011"}), ], ) def test__rules__rule_selection(rules, exclude_rules, resulting_codes): """Test that rule selection works by various means.""" class Rule_T010(BaseRule): """Fake Basic Rule.""" groups = ("all", "test") name = "fake_basic" aliases = ("fb1", "foo") # NB: Foo is a group on another rule. crawl_behaviour = RootOnlyCrawler() def _eval(self, **kwargs): pass class Rule_T011(Rule_T010): """Fake Basic Rule. NOTE: We inherit crawl behaviour and _eval from above. """ groups = ("all", "test", "foo") name = "fake_other" aliases = ("fb2",) class Rule_T012(Rule_T010): """Fake Basic Rule. NOTE: We inherit crawl behaviour and _eval from above. """ # NB: "fake_other" is the name of another rule. groups = ("all", "foo", "fake_other") # No aliases, Name collides with the alias of another rule. name = "fake_again" aliases = () cfg = FluffConfig( overrides={"rules": rules, "exclude_rules": exclude_rules, "dialect": "ansi"} ) linter = Linter(config=cfg, user_rules=[Rule_T010, Rule_T011, Rule_T012]) # Get the set of selected codes: selected_codes = set(tpl[0] for tpl in linter.rule_tuples()) # Check selected rules assert selected_codes == resulting_codes def test__rules__filter_unparsable(): """Test that rules that handle their own crawling respect unparsable.""" # Set up a linter with the user rule linter = Linter(user_rules=[Rule_T002], dialect="ansi", rules=["T002"]) # Lint a simple parsable file and check we do get issues # It's parsable, so we should get issues. res = linter.lint_string("SELECT 1") assert any(v.rule_code() == "T002" for v in res.violations) # Lint an unparsable file. Check we don't get any violations. # It's not parsable so we shouldn't get issues. res = linter.lint_string("asd asdf sdfg") assert not any(v.rule_code() == "T002" for v in res.violations) def test__rules__result_unparsable(): """Test that the linter won't allow rules which make the file unparsable.""" # Set up a linter with the user rule linter = Linter(user_rules=[Rule_T003], dialect="ansi", rules=["T003"]) # Lint a simple parsable file and check we do get issues # It's parsable, so we should get issues. raw_sql = "SELECT 1 FROM a" with fluff_log_catcher(logging.WARNING, "sqlfluff") as caplog: res = linter.lint_string(raw_sql, fix=True) # Check we got the warning. assert "would result in an unparsable file" in caplog.text # Check we get the violation. assert any(v.rule_code() == "T003" for v in res.violations) # The resulting file should be _the same_ because it would have resulted # in an unparsable file if applied. assert res.tree.raw == raw_sql @pytest.mark.parametrize( "sql_query, check_tuples", [ ( "SELECT * FROM foo", # Even though there's a runaway fix, we should still # find each issue once and not duplicates of them. [ ("T001", 1, 7), ("T001", 1, 9), ("T001", 1, 14), ], ), # If the errors are disabled, they shouldn't come through. ("-- noqa: disable=all\nSELECT * FROM foo", []), ], ) def test__rules__runaway_fail_catch(sql_query, check_tuples): """Test that we catch runaway rules.""" runaway_limit = 5 # Set up the config to only use the rule we are testing. cfg = FluffConfig( overrides={"rules": "T001", "runaway_limit": runaway_limit, "dialect": "ansi"} ) # Lint it using the current config (while in fix mode) linter = Linter(config=cfg, user_rules=[Rule_T001]) # In theory this step should result in an infinite # loop, but the loop limit should catch it. result = linter.lint_string(sql_query, fix=True) # When the linter hits the runaway limit, it returns the original SQL tree. assert result.tree.raw == sql_query # Check the issues found. assert result.check_tuples() == check_tuples def test_rules_cannot_be_instantiated_without_declared_configs(): """Ensure that new rules must be instantiated with config values.""" class Rule_NewRule_ZZ99(BaseRule): """Testing Rule.""" config_keywords = ["case_sensitive"] new_rule = Rule_NewRule_ZZ99(code="L000", description="", case_sensitive=False) assert new_rule.case_sensitive is False # Error is thrown since "case_sensitive" is defined in class, # but not upon instantiation with pytest.raises(ValueError): new_rule = Rule_NewRule_ZZ99(code="L000", description="") def test_rules_legacy_doc_decorators(caplog): """Ensure that the deprecated decorators can still be imported but do nothing.""" with fluff_log_catcher(logging.WARNING, "sqlfluff") as caplog: @document_fix_compatible @document_groups @document_configuration class Rule_NewRule_ZZ99(BaseRule): """Untouched Text.""" pass # Check they didn't do anything to the docstring. assert Rule_NewRule_ZZ99.__doc__ == """Untouched Text.""" # Check there are warnings. print("Records:") for record in caplog.records: print(record) assert "uses the @document_fix_compatible decorator" in caplog.text assert "uses the @document_groups decorator" in caplog.text assert "uses the @document_configuration decorator" in caplog.text def test_rules_configs_are_dynamically_documented(): """Ensure that rule configurations are added to the class docstring.""" class RuleWithConfig_ZZ99(BaseRule): """A new rule with configuration.""" config_keywords = ["unquoted_identifiers_policy"] print(f"RuleWithConfig_ZZ99.__doc__: {RuleWithConfig_ZZ99.__doc__!r}") assert "unquoted_identifiers_policy" in RuleWithConfig_ZZ99.__doc__ class RuleWithoutConfig_ZZ99(BaseRule): """A new rule without configuration.""" pass print(f"RuleWithoutConfig_ZZ99.__doc__: {RuleWithoutConfig_ZZ99.__doc__!r}") assert "Configuration" not in RuleWithoutConfig_ZZ99.__doc__ def test_rules_name_validation(): """Ensure that rule names are validated.""" with pytest.raises(SQLFluffUserError) as exc_info: class RuleWithoutBadName_ZZ99(BaseRule): """A new rule without configuration.""" name = "MY-KEBAB-CASE-NAME" assert "Tried to define rule with unexpected name" in exc_info.value.args[0] assert "MY-KEBAB-CASE-NAME" in exc_info.value.args[0] def test_rule_exception_is_caught_to_validation(): """Assert that a rule that throws an exception returns it as a validation.""" std_rule_set = get_ruleset() @std_rule_set.register class Rule_T000(BaseRule): """Rule that throws an exception.""" groups = ("all",) crawl_behaviour = RootOnlyCrawler() def _eval(self, segment, parent_stack, **kwargs): raise Exception("Catch me or I'll deny any linting results from you") linter = Linter( config=FluffConfig(overrides=dict(rules="T000", dialect="ansi")), user_rules=[Rule_T000], ) assert linter.lint_string("select 1").check_tuples() == [("T000", 1, 1)] def test_rule_must_belong_to_all_group(): """Assert correct 'groups' config for rule.""" std_rule_set = get_ruleset() with pytest.raises(AssertionError): @std_rule_set.register class Rule_T000(BaseRule): """Badly configured rule, no groups attribute.""" def _eval(self, **kwargs): pass with pytest.raises(AssertionError): @std_rule_set.register class Rule_T001(BaseRule): """Badly configured rule, no 'all' group.""" groups = () def _eval(self, **kwargs): pass def test_std_rule_import_fail_bad_naming(): """Check that rule import from file works.""" assert get_rules_from_path( rules_path="test/fixtures/rules/custom/*.py", base_module="test.fixtures.rules.custom", ) == [Rule_L000, Rule_S000] with pytest.raises(AttributeError) as e: get_rules_from_path( rules_path="test/fixtures/rules/custom/bad_rule_name/*.py", base_module="test.fixtures.rules.custom.bad_rule_name", ) e.match("Rule classes must be named in the format of") def test_rule_set_return_informative_error_when_rule_not_registered(): """Assert that a rule that throws an exception returns it as a validation.""" cfg = FluffConfig(overrides={"dialect": "ansi"}) with pytest.raises(ValueError) as e: get_rule_from_set("L000", config=cfg) e.match("'L000' not in") seg = WhitespaceSegment( pos_marker=PositionMarker( slice(0, 1), slice(0, 1), TemplatedFile(" ", fname="") ) ) @pytest.mark.parametrize( "lint_result, expected", [ (LintResult(), "LintResult()"), (LintResult(seg), "LintResult()"), ( LintResult(seg, description="foo"), "LintResult(foo: )", ), ( LintResult( seg, description="foo", fixes=[ LintFix("create_before", seg, edit=[seg]), LintFix("create_after", seg, edit=[seg]), ], ), "LintResult(foo: +2F)", ), ], ) def test_rules__lint_result_repr(lint_result, expected): """Test that repr(LintResult) works as expected.""" assert repr(lint_result) == expected sqlfluff-3.4.2/test/core/templaters/000077500000000000000000000000001503426445100174355ustar00rootroot00000000000000sqlfluff-3.4.2/test/core/templaters/__init__.py000066400000000000000000000000271503426445100215450ustar00rootroot00000000000000"""Templater Tests.""" sqlfluff-3.4.2/test/core/templaters/base_test.py000066400000000000000000000301621503426445100217620ustar00rootroot00000000000000"""Tests for templaters.""" import pytest from sqlfluff.core.templaters import ( RawTemplater, TemplatedFile, ) from sqlfluff.core.templaters.base import ( RawFileSlice, TemplatedFileSlice, iter_indices_of_newlines, ) @pytest.mark.parametrize( "raw_str,positions", [ ("", []), ("foo", []), ("foo\nbar", [3]), ("\nfoo\n\nbar\nfoo\n\nbar\n", [0, 4, 5, 9, 13, 14, 18]), ], ) def test__indices_of_newlines(raw_str, positions): """Test iter_indices_of_newlines.""" assert list(iter_indices_of_newlines(raw_str)) == positions def test__templater_raw(): """Test the raw templater.""" t = RawTemplater() instr = "SELECT * FROM {{blah}}" outstr, _ = t.process(in_str=instr, fname="test") assert instr == str(outstr) SIMPLE_FILE_KWARGS = { "fname": "test.sql", "source_str": "01234\n6789{{foo}}fo\nbarss", "templated_str": "01234\n6789x\nfo\nbarss", "sliced_file": [ TemplatedFileSlice(*args) for args in [ ("literal", slice(0, 10, None), slice(0, 10, None)), ("templated", slice(10, 17, None), slice(10, 12, None)), ("literal", slice(17, 25, None), slice(12, 20, None)), ] ], "raw_sliced": [ RawFileSlice(*args) for args in [ ("x" * 10, "literal", 0), ("x" * 7, "templated", 10), ("x" * 8, "literal", 17), ] ], } COMPLEX_FILE_KWARGS = { "fname": "test.sql", "sliced_file": [ TemplatedFileSlice(*args) for args in [ ("literal", slice(0, 13, None), slice(0, 13, None)), ("comment", slice(13, 29, None), slice(13, 13, None)), ("literal", slice(29, 44, None), slice(13, 28, None)), ("block_start", slice(44, 68, None), slice(28, 28, None)), ("literal", slice(68, 81, None), slice(28, 41, None)), ("templated", slice(81, 86, None), slice(41, 42, None)), ("literal", slice(86, 110, None), slice(42, 66, None)), ("templated", slice(68, 86, None), slice(66, 76, None)), ("literal", slice(68, 81, None), slice(76, 89, None)), ("templated", slice(81, 86, None), slice(89, 90, None)), ("literal", slice(86, 110, None), slice(90, 114, None)), # ("templated", slice(68, 86, None), slice(114, 125, None)), ("literal", slice(68, 81, None), slice(125, 138, None)), # ("templated", slice(81, 86, None), slice(138, 139, None)), ("literal", slice(86, 110, None), slice(139, 163, None)), ("templated", slice(110, 123, None), slice(163, 166, None)), ("literal", slice(123, 132, None), slice(166, 175, None)), ("block_end", slice(132, 144, None), slice(175, 175, None)), ("literal", slice(144, 155, None), slice(175, 186, None)), ("block_start", slice(155, 179, None), slice(186, 186, None)), ("literal", slice(179, 189, None), slice(186, 196, None)), ("templated", slice(189, 194, None), slice(196, 197, None)), ("literal", slice(194, 203, None), slice(197, 206, None)), ("literal", slice(179, 189, None), slice(206, 216, None)), ("templated", slice(189, 194, None), slice(216, 217, None)), ("literal", slice(194, 203, None), slice(217, 226, None)), ("literal", slice(179, 189, None), slice(226, 236, None)), ("templated", slice(189, 194, None), slice(236, 237, None)), ("literal", slice(194, 203, None), slice(237, 246, None)), ("block_end", slice(203, 215, None), slice(246, 246, None)), ("literal", slice(215, 230, None), slice(246, 261, None)), ] ], "raw_sliced": [ RawFileSlice(*args) for args in [ # All contain dummy strings for now. ("x" * 13, "literal", 0), ("x" * 16, "comment", 13), ("x" * 15, "literal", 29), ("x" * 24, "block_start", 44), ("x" * 13, "literal", 68), ("x" * 5, "templated", 81), ("x" * 24, "literal", 86), ("x" * 13, "templated", 110), ("x" * 9, "literal", 123), ("x" * 12, "block_end", 132), ("x" * 11, "literal", 144), ("x" * 24, "block_start", 155), ("x" * 10, "literal", 179), ("x" * 5, "templated", 189), ("x" * 9, "literal", 194), ("x" * 12, "block_end", 203), ("x" * 15, "literal", 215), ] ], } COMPLEX_FILE_KWARGS["source_str"] = "".join( s.raw for s in COMPLEX_FILE_KWARGS["raw_sliced"] ) @pytest.mark.parametrize( "tf_kwargs,in_charpos,out_line_no,out_line_pos", [ # Simple examples ( SIMPLE_FILE_KWARGS, 0, 1, 1, ), ( SIMPLE_FILE_KWARGS, 20, 3, 1, ), ( SIMPLE_FILE_KWARGS, 24, 3, 5, ), ], ) def test__templated_file_get_line_pos_of_char_pos( tf_kwargs, in_charpos, out_line_no, out_line_pos, ): """Test TemplatedFile.get_line_pos_of_char_pos.""" file = TemplatedFile(**tf_kwargs) res_line_no, res_line_pos = file.get_line_pos_of_char_pos(in_charpos) assert res_line_no == out_line_no assert res_line_pos == out_line_pos @pytest.mark.parametrize( "templated_position,inclusive,tf_kwargs,sliced_idx_start,sliced_idx_stop", [ (100, True, COMPLEX_FILE_KWARGS, 10, 11), (13, True, COMPLEX_FILE_KWARGS, 0, 3), (28, True, COMPLEX_FILE_KWARGS, 2, 5), # Check end slicing. (12, True, SIMPLE_FILE_KWARGS, 1, 3), (20, True, SIMPLE_FILE_KWARGS, 2, 3), # Check inclusivity (13, False, COMPLEX_FILE_KWARGS, 0, 1), ], ) def test__templated_file_find_slice_indices_of_templated_pos( templated_position, inclusive, tf_kwargs, sliced_idx_start, sliced_idx_stop, ): """Test TemplatedFile._find_slice_indices_of_templated_pos.""" file = TemplatedFile(**tf_kwargs) res_start, res_stop = file._find_slice_indices_of_templated_pos( templated_position, inclusive=inclusive ) assert res_start == sliced_idx_start assert res_stop == sliced_idx_stop @pytest.mark.parametrize( "in_slice,out_slice,is_literal,tf_kwargs", [ # Simple example ( slice(5, 10), slice(5, 10), True, { "sliced_file": [ TemplatedFileSlice( "literal", slice(0, 20, None), slice(0, 20, None) ) ], "raw_sliced": [RawFileSlice("x" * 20, "literal", 0)], "source_str": "x" * 20, "fname": "foo.sql", }, ), # Trimming the end of a literal (with things that follow). ( slice(10, 13), slice(10, 13), True, COMPLEX_FILE_KWARGS, ), # Unrealistic, but should still work ( slice(5, 10), slice(55, 60), True, { "sliced_file": [ TemplatedFileSlice( "literal", slice(50, 70, None), slice(0, 20, None) ) ], "raw_sliced": [ RawFileSlice("x" * 50, "literal", 0), RawFileSlice("x" * 20, "literal", 50), ], "source_str": "x" * 70, "fname": "foo.sql", }, ), # Spanning a template ( slice(5, 15), slice(5, 20), False, SIMPLE_FILE_KWARGS, ), # Handling templated ( slice(5, 15), slice(0, 25), False, # NB: Same as SIMPLE_SLICED_FILE, but with different slice types. { **SIMPLE_FILE_KWARGS, "sliced_file": [ TemplatedFileSlice( "templated", slc.source_slice, slc.templated_slice ) for slc in SIMPLE_FILE_KWARGS["sliced_file"] ], "raw_sliced": [ RawFileSlice(slc.raw, "templated", slc.source_idx) for slc in SIMPLE_FILE_KWARGS["raw_sliced"] ], }, ), # Handling single length slices ( slice(10, 10), slice(10, 10), True, SIMPLE_FILE_KWARGS, ), ( slice(12, 12), slice(17, 17), True, SIMPLE_FILE_KWARGS, ), # Dealing with single length elements ( slice(20, 20), slice(25, 25), True, { "sliced_file": SIMPLE_FILE_KWARGS["sliced_file"] + [ TemplatedFileSlice( "comment", slice(25, 35, None), slice(20, 20, None) ) ], "raw_sliced": SIMPLE_FILE_KWARGS["raw_sliced"] + [RawFileSlice("x" * 10, "comment", 25)], "source_str": SIMPLE_FILE_KWARGS["source_str"] + "x" * 10, "fname": "foo.sql", }, ), # Just more test coverage ( slice(43, 43), slice(87, 87), True, COMPLEX_FILE_KWARGS, ), ( slice(13, 13), slice(13, 13), True, COMPLEX_FILE_KWARGS, ), ( slice(186, 186), slice(155, 155), True, COMPLEX_FILE_KWARGS, ), # Backward slicing. ( slice(100, 130), # NB This actually would reference the wrong way around if we # just take the points. Here we should handle it gracefully. slice(68, 110), False, COMPLEX_FILE_KWARGS, ), ], ) def test__templated_file_templated_slice_to_source_slice( in_slice, out_slice, is_literal, tf_kwargs ): """Test TemplatedFile.templated_slice_to_source_slice.""" file = TemplatedFile(**tf_kwargs) source_slice = file.templated_slice_to_source_slice(in_slice) literal_test = file.is_source_slice_literal(source_slice) assert (is_literal, source_slice) == (literal_test, out_slice) @pytest.mark.parametrize( "file,expected_result", [ # Comment example ( TemplatedFile( source_str=("a" * 10) + "{# b #}" + ("a" * 10), fname="test", sliced_file=[ TemplatedFileSlice("literal", slice(0, 10), slice(0, 10)), TemplatedFileSlice("templated", slice(10, 17), slice(10, 10)), TemplatedFileSlice("literal", slice(17, 27), slice(10, 20)), ], raw_sliced=[ RawFileSlice("a" * 10, "literal", 0), RawFileSlice("{# b #}", "comment", 10), RawFileSlice("a" * 10, "literal", 17), ], ), [RawFileSlice("{# b #}", "comment", 10)], ), # Template tags aren't source only. ( TemplatedFile( source_str=r"aaa{{ b }}aaa", fname="test", sliced_file=[ TemplatedFileSlice("literal", slice(0, 3), slice(0, 3)), TemplatedFileSlice("templated", slice(3, 10), slice(3, 6)), TemplatedFileSlice("literal", slice(10, 13), slice(6, 9)), ], raw_sliced=[ RawFileSlice("aaa", "literal", 0), RawFileSlice("{{ b }}", "templated", 3), RawFileSlice("aaa", "literal", 10), ], ), [], ), ], ) def test__templated_file_source_only_slices(file, expected_result): """Test TemplatedFile.source_only_slices.""" assert file.source_only_slices() == expected_result sqlfluff-3.4.2/test/core/templaters/builtins_test.py000066400000000000000000000145631503426445100227100ustar00rootroot00000000000000"""Test the templating builtins.""" import pytest from sqlfluff.core.errors import SQLTemplaterError from sqlfluff.core.templaters.builtins.common import FunctionWrapper from sqlfluff.core.templaters.builtins.dbt import DBT_BUILTINS def test_function_emulator(): """Make sure the function wrapper works as expected.""" def func(x): return "foo" + x wrapped = FunctionWrapper("test_name", func) assert str(wrapped("bar")) == "foobar" with pytest.raises(SQLTemplaterError): str(wrapped) def test_relation_emulator_magic_methods(): """Test all the magic methods defined on RelationEmulator.""" # tests for 'this' t = DBT_BUILTINS["this"] assert str(t) == "this_model" assert t.something is t assert str(t.database) == "this_database" assert str(t.schema) == "this_schema" assert str(t.name) == "this_model" assert str(t.identifier) == "this_model" assert str(t.type) == "this_model" assert str(t.something_new) == "this_model" assert t.is_table is True assert t.is_view is True assert t.is_materialized_view is True assert t.is_cte is True assert t.is_dynamic_table is True assert t.is_iceberg_format is True assert t.is_something_new is True assert t.something() is t assert t.something().something() is t assert t.something().something is t assert str(t.include()) == "this_model" assert str(t.include(database=False)) == "this_model" assert str(t.some_new_method()) == "this_model" assert str(t.something().something) == "this_model" # tests for 'ref' r = DBT_BUILTINS["ref"]("ref_model") assert str(r) == "ref_model" assert r.something is r assert str(r.database) == "this_database" assert str(r.schema) == "this_schema" assert str(r.name) == "ref_model" assert str(r.identifier) == "ref_model" assert str(r.type) == "ref_model" assert str(r.something_new) == "ref_model" assert r.is_table is True assert r.is_view is True assert r.is_materialized_view is True assert r.is_cte is True assert r.is_dynamic_table is True assert r.is_iceberg_format is True assert r.is_something_new is True assert r.something() is r assert r.something().something() is r assert r.something().something is r assert str(r.include()) == "ref_model" assert str(r.include(database=False)) == "ref_model" assert str(r.some_new_method()) == "ref_model" assert str(r.something().something) == "ref_model" # tests for versioned 'ref' r = DBT_BUILTINS["ref"]("ref_model", version=2) assert str(r) == "ref_model" assert r.something is r assert str(r.database) == "this_database" assert str(r.schema) == "this_schema" assert str(r.name) == "ref_model" assert str(r.identifier) == "ref_model" assert str(r.type) == "ref_model" assert str(r.something_new) == "ref_model" assert r.is_table is True assert r.is_view is True assert r.is_materialized_view is True assert r.is_cte is True assert r.is_dynamic_table is True assert r.is_iceberg_format is True assert r.is_something_new is True assert r.something() is r assert r.something().something() is r assert r.something().something is r assert str(r.include()) == "ref_model" assert str(r.include(database=False)) == "ref_model" assert str(r.some_new_method()) == "ref_model" assert str(r.something().something) == "ref_model" # tests for 'ref' from project/package r = DBT_BUILTINS["ref"]("package", "ref_model") assert str(r) == "ref_model" assert r.something is r assert str(r.database) == "this_database" assert str(r.schema) == "this_schema" assert str(r.name) == "ref_model" assert str(r.identifier) == "ref_model" assert str(r.type) == "ref_model" assert str(r.something_new) == "ref_model" assert r.is_table is True assert r.is_view is True assert r.is_materialized_view is True assert r.is_cte is True assert r.is_dynamic_table is True assert r.is_iceberg_format is True assert r.is_something_new is True assert r.something() is r assert r.something().something() is r assert r.something().something is r assert str(r.include()) == "ref_model" assert str(r.include(database=False)) == "ref_model" assert str(r.some_new_method()) == "ref_model" assert str(r.something().something) == "ref_model" # tests for versioned 'ref' from project/package r = DBT_BUILTINS["ref"]("package", "ref_model", version=2) assert str(r) == "ref_model" assert r.something is r assert str(r.database) == "this_database" assert str(r.schema) == "this_schema" assert str(r.name) == "ref_model" assert str(r.identifier) == "ref_model" assert str(r.type) == "ref_model" assert str(r.something_new) == "ref_model" assert r.is_table is True assert r.is_view is True assert r.is_materialized_view is True assert r.is_cte is True assert r.is_dynamic_table is True assert r.is_iceberg_format is True assert r.is_something_new is True assert r.something() is r assert r.something().something() is r assert r.something().something is r assert str(r.include()) == "ref_model" assert str(r.include(database=False)) == "ref_model" assert str(r.some_new_method()) == "ref_model" assert str(r.something().something) == "ref_model" # tests for 'source' s = DBT_BUILTINS["source"]("sourcename", "tablename") assert str(s) == "sourcename_tablename" assert s.something is s assert str(s.database) == "this_database" assert str(s.schema) == "this_schema" assert str(s.name) == "sourcename_tablename" assert str(s.identifier) == "sourcename_tablename" assert str(s.type) == "sourcename_tablename" assert str(s.something_new) == "sourcename_tablename" assert s.is_table is True assert s.is_view is True assert s.is_materialized_view is True assert s.is_cte is True assert s.is_dynamic_table is True assert s.is_iceberg_format is True assert s.is_something_new is True assert s.something() is s assert s.something().something() is s assert s.something().something is s assert str(s.include()) == "sourcename_tablename" assert str(s.include(database=False)) == "sourcename_tablename" assert str(s.some_new_method()) == "sourcename_tablename" assert str(s.something().something) == "sourcename_tablename" sqlfluff-3.4.2/test/core/templaters/jinja_test.py000066400000000000000000002142751503426445100221540ustar00rootroot00000000000000"""Tests for the jinja templater. These tests also test much of the core lexer, especially the treatment of templated sections which only really make sense to test in the context of a templater which supports loops and placeholders. """ import logging from collections import defaultdict from pathlib import Path from typing import NamedTuple, Union import pytest from jinja2 import Environment, nodes from jinja2.exceptions import UndefinedError from jinja2.ext import Extension from jinja2.nodes import Node from jinja2.parser import Parser from sqlfluff.core import FluffConfig, Linter from sqlfluff.core.errors import SQLFluffSkipFile, SQLFluffUserError, SQLTemplaterError from sqlfluff.core.parser import BaseSegment from sqlfluff.core.templaters import JinjaTemplater from sqlfluff.core.templaters.base import RawFileSlice, TemplatedFile from sqlfluff.core.templaters.jinja import DummyUndefined from sqlfluff.core.templaters.slicers.tracer import JinjaAnalyzer, JinjaTagConfiguration JINJA_STRING = ( "SELECT * FROM {% for c in blah %}{{c}}{% if not loop.last %}, " "{% endif %}{% endfor %} WHERE {{condition}}\n\n" ) JINJA_MACRO_CALL_SQL = ( "{% macro render_name(title) %}\n" " '{{ title }}. foo' as {{ caller() }}\n" "{% endmacro %}\n" "SELECT\n" " {% call render_name('Sir') %}\n" " bar\n" " {% endcall %}\n" "FROM baz\n" ) def get_parsed(path: str) -> BaseSegment: """Testing helper to parse paths.""" linter = Linter() # Get the first file matching the path string first_path = next(linter.parse_path(path)) # Delegate parse assertions to the `.tree` property return first_path.tree @pytest.mark.parametrize( "instr, expected_outstr", [ ( JINJA_STRING, "SELECT * FROM f, o, o WHERE a < 10\n\n", ), # Test for issue #968. This was previously raising an UnboundLocalError. ( """ {% set event_columns = ['campaign', 'click_item'] %} SELECT event_id {% for event_column in event_columns %} , {{ event_column }} {% endfor %} FROM events """, ( "\n\n\nSELECT\n event_id\n \n , campaign\n \n , " "click_item\n \nFROM events\n " ), ), ], ids=["simple", "unboundlocal_bugfix"], ) def test__templater_jinja(instr, expected_outstr): """Test jinja templating and the treatment of whitespace.""" t = JinjaTemplater(override_context=dict(blah="foo", condition="a < 10")) outstr, _ = t.process( in_str=instr, fname="test", config=FluffConfig(overrides={"dialect": "ansi"}) ) assert str(outstr) == expected_outstr class RawTemplatedTestCase(NamedTuple): """Instances of this object are test cases for test__templater_jinja_slices.""" name: str instr: str templated_str: str # These fields are used to check TemplatedFile.sliced_file. expected_templated_sliced__source_list: list[str] expected_templated_sliced__templated_list: list[str] # This field is used to check TemplatedFile.raw_sliced. expected_raw_sliced__source_list: list[str] @pytest.mark.parametrize( "case", [ RawTemplatedTestCase( name="basic_block", instr="\n\n{% set x = 42 %}\nSELECT 1, 2\n", templated_str="\n\n\nSELECT 1, 2\n", expected_templated_sliced__source_list=[ "\n\n", "{% set x = 42 %}", "\nSELECT 1, 2\n", ], expected_templated_sliced__templated_list=[ "\n\n", "", "\nSELECT 1, 2\n", ], expected_raw_sliced__source_list=[ "\n\n", "{% set x = 42 %}", "\nSELECT 1, 2\n", ], ), RawTemplatedTestCase( name="strip_left_block", instr="\n\n{%- set x = 42 %}\nSELECT 1, 2\n", templated_str="\nSELECT 1, 2\n", expected_templated_sliced__source_list=[ "\n\n", "{%- set x = 42 %}", "\nSELECT 1, 2\n", ], expected_templated_sliced__templated_list=[ "", "", "\nSELECT 1, 2\n", ], expected_raw_sliced__source_list=[ "\n\n", "{%- set x = 42 %}", "\nSELECT 1, 2\n", ], ), RawTemplatedTestCase( name="strip_both_block", instr="\n\n{%- set x = 42 -%}\nSELECT 1, 2\n", templated_str="SELECT 1, 2\n", expected_templated_sliced__source_list=[ "\n\n", "{%- set x = 42 -%}", "\n", "SELECT 1, 2\n", ], expected_templated_sliced__templated_list=[ "", "", "", "SELECT 1, 2\n", ], expected_raw_sliced__source_list=[ "\n\n", "{%- set x = 42 -%}", "\n", "SELECT 1, 2\n", ], ), RawTemplatedTestCase( name="strip_and_templated_whitespace", instr="SELECT {{- ' ' -}} 1{{ ' , 2' -}}\n", templated_str="SELECT 1 , 2", expected_templated_sliced__source_list=[ "SELECT", " ", "{{- ' ' -}}", " ", "1", "{{ ' , 2' -}}", "\n", ], expected_templated_sliced__templated_list=[ "SELECT", "", # Placeholder for consumed whitespace " ", # Placeholder for templated whitespace "", # Placeholder for consumed whitespace "1", " , 2", "", # Placeholder for consumed newline ], expected_raw_sliced__source_list=[ "SELECT", " ", "{{- ' ' -}}", " ", "1", "{{ ' , 2' -}}", "\n", ], ), RawTemplatedTestCase( name="strip_both_block_hard", instr="SELECT {%- set x = 42 %} 1 {%- if true -%} , 2{% endif -%}\n", templated_str="SELECT 1, 2", expected_templated_sliced__source_list=[ "SELECT", # NB: Even though the jinja tag consumes whitespace, we still # get it here as a placeholder. " ", "{%- set x = 42 %}", " 1", # This whitespace is a separate from the 1 because it's consumed. " ", "{%- if true -%}", " ", ", 2", "{% endif -%}", "\n", ], expected_templated_sliced__templated_list=[ "SELECT", "", # Consumed whitespace placeholder "", # Jinja block placeholder " 1", "", # Consumed whitespace "", # Jinja block placeholder "", # More consumed whitespace ", 2", "", # Jinja block "", # Consumed final newline. ], expected_raw_sliced__source_list=[ "SELECT", " ", "{%- set x = 42 %}", " 1", " ", "{%- if true -%}", " ", ", 2", "{% endif -%}", "\n", ], ), RawTemplatedTestCase( name="basic_data", instr="""select c1, {{ 'c' }}2 as user_id """, templated_str="""select c1, c2 as user_id """, expected_templated_sliced__source_list=[ "select\n c1,\n ", "{{ 'c' }}", "2 as user_id\n", ], expected_templated_sliced__templated_list=[ "select\n c1,\n ", "c", "2 as user_id\n", ], expected_raw_sliced__source_list=[ "select\n c1,\n ", "{{ 'c' }}", "2 as user_id\n", ], ), # Note this is basically identical to the "basic_data" case above. # "Right strip" is not actually a thing in Jinja. RawTemplatedTestCase( name="strip_right_data", instr="""SELECT {{ 'col1,' -}} col2 """, templated_str="""SELECT col1,col2 """, expected_templated_sliced__source_list=[ "SELECT\n ", "{{ 'col1,' -}}", "\n ", "col2\n", ], expected_templated_sliced__templated_list=[ "SELECT\n ", "col1,", "", "col2\n", ], expected_raw_sliced__source_list=[ "SELECT\n ", "{{ 'col1,' -}}", "\n ", "col2\n", ], ), RawTemplatedTestCase( name="strip_both_data", instr="""select c1, {{- 'c' -}} 2 as user_id """, templated_str="""select c1,c2 as user_id """, expected_templated_sliced__source_list=[ "select\n c1,", "\n ", "{{- 'c' -}}", "\n", "2 as user_id\n", ], expected_templated_sliced__templated_list=[ "select\n c1,", "", "c", "", "2 as user_id\n", ], expected_raw_sliced__source_list=[ "select\n c1,", "\n ", "{{- 'c' -}}", "\n", "2 as user_id\n", ], ), RawTemplatedTestCase( name="strip_both_comment", instr="""select c1, {#- Column 2 -#} c2 as user_id """, templated_str="""select c1,c2 as user_id """, expected_templated_sliced__source_list=[ "select\n c1,", "\n ", "{#- Column 2 -#}", " ", "c2 as user_id\n", ], expected_templated_sliced__templated_list=[ "select\n c1,", "", "", "", "c2 as user_id\n", ], expected_raw_sliced__source_list=[ "select\n c1,", "\n ", "{#- Column 2 -#}", " ", "c2 as user_id\n", ], ), RawTemplatedTestCase( name="union_all_loop1", instr="""{% set products = [ 'table1', 'table2', ] %} {% for product in products %} SELECT brand FROM {{ product }} {% if not loop.last -%} UNION ALL {%- endif %} {% endfor %} """, templated_str=( "\n\n\nSELECT\n brand\nFROM\n table1\nUNION ALL\n\nSELECT\n " "brand\nFROM\n table2\n\n\n" ), expected_templated_sliced__source_list=[ "{% set products = [\n 'table1',\n 'table2',\n ] %}", "\n\n", "{% for product in products %}", "\nSELECT\n brand\nFROM\n ", "{{ product }}", "\n", "{% if not loop.last -%}", " ", "UNION ALL", " ", "{%- endif %}", "\n", "{% endfor %}", "\nSELECT\n brand\nFROM\n ", "{{ product }}", "\n", "{% if not loop.last -%}", "{%- endif %}", "\n", "{% endfor %}", "\n", ], expected_templated_sliced__templated_list=[ "", "\n\n", "", "\nSELECT\n brand\nFROM\n ", "table1", "\n", "", "", "UNION ALL", "", "", "\n", "", "\nSELECT\n brand\nFROM\n ", "table2", "\n", "", "", "\n", "", "\n", ], expected_raw_sliced__source_list=[ "{% set products = [\n 'table1',\n 'table2',\n ] %}", "\n\n", "{% for product in products %}", "\nSELECT\n brand\nFROM\n ", "{{ product }}", "\n", "{% if not loop.last -%}", " ", "UNION ALL", " ", "{%- endif %}", "\n", "{% endfor %}", "\n", ], ), RawTemplatedTestCase( "set_multiple_variables_and_define_macro", """{% macro echo(text) %} {{text}} {% endmacro %} {% set a, b = 1, 2 %} SELECT {{ echo(a) }}, {{ echo(b) }}""", "\n\n\n\nSELECT\n \n1\n,\n \n2\n", [ "{% macro echo(text) %}", "\n", "{{text}}", "\n", "{% endmacro %}", "\n\n", "{% set a, b = 1, 2 %}", "\n\nSELECT\n ", "{{ echo(a) }}", ",\n ", "{{ echo(b) }}", ], [ "", "", "", "", "", "\n\n", "", "\n\nSELECT\n ", "\n1\n", ",\n ", "\n2\n", ], [ "{% macro echo(text) %}", "\n", "{{text}}", "\n", "{% endmacro %}", "\n\n", "{% set a, b = 1, 2 %}", "\n\nSELECT\n ", "{{ echo(a) }}", ",\n ", "{{ echo(b) }}", ], ), ], ids=lambda case: case.name, ) def test__templater_jinja_slices(case: RawTemplatedTestCase): """Test that Jinja templater slices raw and templated file correctly.""" t = JinjaTemplater() templated_file, _ = t.process( in_str=case.instr, fname="test", config=FluffConfig(overrides={"dialect": "ansi"}), ) assert templated_file is not None assert templated_file.source_str == case.instr assert templated_file.templated_str == case.templated_str # Build and check the list of source strings referenced by "sliced_file". actual_ts_source_list = [ case.instr[ts.source_slice] for ts in templated_file.sliced_file ] assert actual_ts_source_list == case.expected_templated_sliced__source_list # Build and check the list of templated strings referenced by "sliced_file". actual_ts_templated_list = [ templated_file.templated_str[ts.templated_slice] for ts in templated_file.sliced_file ] assert actual_ts_templated_list == case.expected_templated_sliced__templated_list # Build and check the list of source strings referenced by "raw_sliced". previous_rs = None actual_rs_source_list: list[RawFileSlice] = [] for rs in templated_file.raw_sliced + [None]: # type: ignore if previous_rs: if rs: actual_source = case.instr[previous_rs.source_idx : rs.source_idx] else: actual_source = case.instr[previous_rs.source_idx :] actual_rs_source_list.append(actual_source) previous_rs = rs assert actual_rs_source_list == case.expected_raw_sliced__source_list def test_templater_set_block_handling(): """Test handling of literals in {% set %} blocks. Specifically, verify they are not modified in the alternate template. """ def run_query(sql): # Prior to the bug fix, this assertion failed. This was bad because, # inside JinjaTracer, dbt templates similar to the one in this test # would call the database with funky SQL (including weird strings it # uses internally like: 00000000000000000000000000000002. assert sql == "\n\nselect 1 from foobarfoobarfoobarfoobar_dev\n\n" return sql t = JinjaTemplater(override_context=dict(run_query=run_query)) instr = """{% set my_query1 %} select 1 from foobarfoobarfoobarfoobar_{{ "dev" }} {% endset %} {% set my_query2 %} {{ my_query1 }} {% endset %} {{ run_query(my_query2) }} """ outstr, vs = t.process( in_str=instr, fname="test", config=FluffConfig(overrides={"dialect": "ansi"}) ) assert str(outstr) == "\n\n\n\n\nselect 1 from foobarfoobarfoobarfoobar_dev\n\n\n" assert len(vs) == 0 def test__templater_jinja_error_variable(): """Test missing variable error handling in the jinja templater.""" t = JinjaTemplater(override_context=dict(blah="foo")) instr = JINJA_STRING outstr, vs = t.process( in_str=instr, fname="test", config=FluffConfig(overrides={"dialect": "ansi"}) ) assert str(outstr) == "SELECT * FROM f, o, o WHERE \n\n" # Check we have violations. assert len(vs) > 0 # Check one of them is a templating error on line 1 assert any(v.rule_code() == "TMP" and v.line_no == 1 for v in vs) def test__templater_jinja_dynamic_variable_no_violations(): """Test no templater violation for variable defined within template.""" t = JinjaTemplater(override_context=dict(blah="foo")) instr = """{% if True %} {% set some_var %}1{% endset %} SELECT {{some_var}} {% endif %} """ outstr, vs = t.process( in_str=instr, fname="test", config=FluffConfig(overrides={"dialect": "ansi"}) ) assert str(outstr) == "\n \n SELECT 1\n\n" # Check we have no violations. assert len(vs) == 0 def test__templater_jinja_error_syntax(): """Test syntax problems in the jinja templater.""" t = JinjaTemplater() instr = "SELECT {{foo} FROM jinja_error\n" with pytest.raises(SQLTemplaterError) as excinfo: t.process( in_str=instr, fname="test", config=FluffConfig(overrides={"dialect": "ansi"}), ) templater_exception = excinfo.value assert templater_exception.rule_code() == "TMP" assert templater_exception.line_no == 1 assert "Failed to parse Jinja syntax" in str(templater_exception) def test__templater_jinja_error_catastrophic(): """Test error handling in the jinja templater.""" t = JinjaTemplater(override_context=dict(blah=7)) instr = JINJA_STRING with pytest.raises(SQLTemplaterError) as excinfo: t.process( in_str=instr, fname="test", config=FluffConfig(overrides={"dialect": "ansi"}), ) templater_exception = excinfo.value assert templater_exception.rule_code() == "TMP" assert templater_exception.line_no == 1 assert "Unrecoverable failure in Jinja templating" in str(templater_exception) def test__templater_jinja_error_macro_path_does_not_exist(): """Tests that an error is raised if macro path doesn't exist.""" with pytest.raises(ValueError) as e: JinjaTemplater().construct_render_func( config=FluffConfig.from_path( "test/fixtures/templater/jinja_macro_path_does_not_exist" ) ) assert str(e.value).startswith("Path does not exist") def test__templater_jinja_error_macro_invalid(): """Tests that an error is raised if a macro is invalid.""" invalid_macro_config_string = ( "[sqlfluff]\n" "templater = jinja\n" "dialect = ansi\n" "[sqlfluff:templater:jinja:macros]\n" "a_macro_def = {% macro pkg.my_macro() %}pass{% endmacro %}\n" ) config = FluffConfig.from_string(invalid_macro_config_string) with pytest.raises(SQLFluffUserError) as e: JinjaTemplater().construct_render_func(config=config) error_string = str(e.value) assert error_string.startswith("Error loading user provided macro") assert "{% macro pkg.my_macro() %}pass{% endmacro %}" in error_string def test__templater_jinja_lint_empty(): """Check that parsing a file which renders to an empty string. No exception should be raised, and we should get a single templated element. """ lntr = Linter(dialect="ansi") parsed = lntr.parse_string(in_str='{{ "" }}') parsed_variant = parsed.parsed_variants[0] assert parsed_variant.templated_file.source_str == '{{ "" }}' assert parsed_variant.templated_file.templated_str == "" # Get the types of the segments print(f"Segments: {parsed_variant.tree.raw_segments}") seg_types = [seg.get_type() for seg in parsed_variant.tree.raw_segments] assert seg_types == ["placeholder", "end_of_file"] def assert_structure(yaml_loader, path, code_only=True, include_meta=False): """Check that a parsed sql file matches the yaml file with the same name.""" parsed = get_parsed(path + ".sql") # Whitespace is important here to test how that's treated tpl = parsed.to_tuple(code_only=code_only, show_raw=True, include_meta=include_meta) # Check nothing unparsable if "unparsable" in parsed.type_set(): print(parsed.stringify()) raise ValueError("Input file is unparsable.") _, expected = yaml_loader(path + ".yml") assert tpl == expected @pytest.mark.parametrize( "subpath,code_only,include_meta", [ # Config Scalar ("jinja_a/jinja", True, False), # Macros ("jinja_b/jinja", False, False), # dbt builtins ("jinja_c_dbt/dbt_builtins_cross_ref", True, False), ("jinja_c_dbt/dbt_builtins_config", True, False), ("jinja_c_dbt/dbt_builtins_is_incremental", True, False), ("jinja_c_dbt/dbt_builtins_ref", True, False), ("jinja_c_dbt/dbt_builtins_source", True, False), ("jinja_c_dbt/dbt_builtins_this", True, False), ("jinja_c_dbt/dbt_builtins_this_callable", True, False), ("jinja_c_dbt/dbt_builtins_var_default", True, False), ("jinja_c_dbt/dbt_builtins_test", True, False), ("jinja_c_dbt/dbt_builtins_zip", True, False), ("jinja_c_dbt/dbt_builtins_zip_strict", True, False), # do directive ("jinja_e/jinja", True, False), # case sensitivity and python literals ("jinja_f/jinja", True, False), # Macro loading from a folder ("jinja_g_macros/jinja", True, False), # Excluding macros ("jinja_exclude_macro_path/jinja", True, False), # Excluding macros with running from subdirectory ("jinja_exclude_macro_path/model_directory/jinja_sub_directory", True, False), # jinja raw tag ("jinja_h_macros/jinja", True, False), ("jinja_i_raw/raw_tag", True, False), ("jinja_i_raw/raw_tag_2", True, False), # Library Loading from a folder ("jinja_j_libraries/jinja", True, False), # Priority of macros ("jinja_k_config_override_path_macros/jinja", True, False), # Placeholders and metas ("jinja_l_metas/001", False, True), ("jinja_l_metas/002", False, True), ("jinja_l_metas/003", False, True), ("jinja_l_metas/004", False, True), ("jinja_l_metas/005", False, True), ("jinja_l_metas/006", False, True), ("jinja_l_metas/007", False, True), ("jinja_l_metas/008", False, True), ("jinja_l_metas/009", False, True), ("jinja_l_metas/010", False, True), ("jinja_l_metas/011", False, True), # Library Loading from a folder when library is module ("jinja_m_libraries_module/jinja", True, False), ("jinja_n_nested_macros/jinja", True, False), # Test more dbt configurations ("jinja_o_config_override_dbt_builtins/override_dbt_builtins", True, False), ("jinja_p_disable_dbt_builtins/disable_dbt_builtins", True, False), # Load all the macros ("jinja_q_multiple_path_macros/jinja", True, False), ("jinja_s_filters_in_library/jinja", True, False), # Jinja loader search path, without also loading macros into global namespace ("jinja_t_loader_search_path/jinja", True, False), ], ) def test__templater_full(subpath, code_only, include_meta, yaml_loader, caplog): """Check structure can be parsed from jinja templated files.""" # Log the templater and lexer throughout this test caplog.set_level(logging.DEBUG, logger="sqlfluff.templater") caplog.set_level(logging.DEBUG, logger="sqlfluff.lexer") assert_structure( yaml_loader, "test/fixtures/templater/" + subpath, code_only=code_only, include_meta=include_meta, ) def test__templater_jinja_block_matching(caplog): """Test the block UUID matching works with a complicated case.""" caplog.set_level(logging.DEBUG, logger="sqlfluff.lexer") path = "test/fixtures/templater/jinja_l_metas/002.sql" # Parse the file. parsed = get_parsed(path) # We only care about the template elements template_segments = [ seg for seg in parsed.raw_segments if seg.is_type("template_loop") or ( seg.is_type("placeholder") and seg.block_type in ("block_start", "block_end", "block_mid") ) ] # Group them together by block UUID assert all( seg.block_uuid for seg in template_segments ), "All templated segments should have a block uuid!" grouped = defaultdict(list) for seg in template_segments: grouped[seg.block_uuid].append(seg.pos_marker.working_loc) print(grouped) # Now the matching block IDs should be found at the following positions. # NOTE: These are working locations in the rendered file. groups = { "for actions clause 1": [(6, 5), (9, 5), (12, 5), (15, 5)], "for actions clause 2": [(17, 5), (21, 5), (29, 5), (37, 5)], # NOTE: all the if loop clauses are grouped together. "if loop.first": [ (18, 9), (20, 9), (20, 9), (22, 9), (22, 9), (28, 9), (30, 9), (30, 9), (36, 9), ], } # Check all are accounted for: for clause in groups.keys(): for block_uuid, locations in grouped.items(): if groups[clause] == locations: print(f"Found {clause}, locations with UUID: {block_uuid}") break else: raise ValueError(f"Couldn't find appropriate grouping of blocks: {clause}") class DerivedJinjaAnalyzer(JinjaAnalyzer): """An analyzer that includes some custom Jinja tags. This is used for tests that show the analyzer can be extended for custom plugin templaters that support custom tags. """ @classmethod def _get_tag_configuration(cls, tag: str) -> JinjaTagConfiguration: tag_map = { "up": JinjaTagConfiguration( block_type="block_start", block_tracking=True, ), "down": JinjaTagConfiguration( block_type="block_mid", block_tracking=True, ), "end": JinjaTagConfiguration( block_type="block_end", block_tracking=True, ), } return tag_map.get(tag, super()._get_tag_configuration(tag)) @pytest.mark.parametrize( "test,result,analyzer_class", [ ("", [], JinjaAnalyzer), ("foo", [("foo", "literal", 0)], JinjaAnalyzer), ( "foo {{bar}} z ", [ ("foo ", "literal", 0), ("{{bar}}", "templated", 4), (" z ", "literal", 11), ], JinjaAnalyzer, ), ( ( "SELECT {# A comment #} {{field}} {% for i in [1, 3]%}, " "fld_{{i}}{% endfor %} FROM my_schema.{{my_table}} " ), [ ("SELECT ", "literal", 0), ("{# A comment #}", "comment", 7), (" ", "literal", 22), ("{{field}}", "templated", 23), (" ", "literal", 32), ("{% for i in [1, 3]%}", "block_start", 33, 1, "for"), (", fld_", "literal", 53, 1), ("{{i}}", "templated", 59, 1), ("{% endfor %}", "block_end", 64, 1, "endfor"), (" FROM my_schema.", "literal", 76, 2), ("{{my_table}}", "templated", 92, 2), (" ", "literal", 104, 2), ], JinjaAnalyzer, ), ( "{% set thing %}FOO{% endset %} BAR", [ ("{% set thing %}", "block_start", 0, 1, "set"), ("FOO", "literal", 15, 1), ("{% endset %}", "block_end", 18, 1, "endset"), (" BAR", "literal", 30, 2), ], JinjaAnalyzer, ), ( # Tests Jinja "block assignment" syntax. Also tests the use of # template substitution within the block: {{ "dev" }}. """{% set my_query %} select 1 from foobarfoobarfoobarfoobar_{{ "dev" }} {% endset %} {{ my_query }} """, [ ("{% set my_query %}", "block_start", 0, 1, "set"), ("\nselect 1 from foobarfoobarfoobarfoobar_", "literal", 18, 1), ('{{ "dev" }}', "templated", 58, 1), ("\n", "literal", 69, 1), ("{% endset %}", "block_end", 70, 1, "endset"), ("\n", "literal", 82, 2), ("{{ my_query }}", "templated", 83, 2), ("\n", "literal", 97, 2), ], JinjaAnalyzer, ), # Tests for jinja blocks that consume whitespace. ( """SELECT 1 FROM {%+if true-%} {{ref('foo')}} {%-endif%}""", [ ("SELECT 1 FROM ", "literal", 0), ("{%+if true-%}", "block_start", 14, 1, "if"), (" ", "literal", 27, 1), ("{{ref('foo')}}", "templated", 28, 1), (" ", "literal", 42, 1), ("{%-endif%}", "block_end", 43, 1, "endif"), ], JinjaAnalyzer, ), ( """{% for item in some_list -%} SELECT * FROM some_table {{ "UNION ALL\n" if not loop.last }} {%- endfor %}""", [ ("{% for item in some_list -%}", "block_start", 0, 1, "for"), # This gets consumed in the templated file, but it's still here. ("\n ", "literal", 28, 1), ("SELECT *\n FROM some_table\n", "literal", 33, 1), ('{{ "UNION ALL\n" if not loop.last }}', "templated", 62, 1), ("\n", "literal", 97, 1), ("{%- endfor %}", "block_end", 98, 1, "endfor"), ], JinjaAnalyzer, ), ( JINJA_MACRO_CALL_SQL, [ ("{% macro render_name(title) %}", "block_start", 0, 1, "macro"), ("\n '", "literal", 30, 1), ("{{ title }}", "templated", 34, 1), (". foo' as ", "literal", 45, 1), ("{{ caller() }}", "templated", 55, 1), ("\n", "literal", 69, 1), ("{% endmacro %}", "block_end", 70, 1, "endmacro"), ("\nSELECT\n ", "literal", 84, 2), ("{% call render_name('Sir') %}", "block_start", 96, 3, "call"), ("\n bar\n ", "literal", 125, 3), ("{% endcall %}", "block_end", 142, 3, "endcall"), ("\nFROM baz\n", "literal", 155, 4), ], JinjaAnalyzer, ), ( # Test of tag heuristics in the default _get_tag_configuration """{% randomtagstart %} SELECT 1; {% elphony %} SELECT 2; {% endsomethingweird %}""", [ ("{% randomtagstart %}", "block_start", 0, 1, "randomtagstart"), ("\n SELECT 1;\n", "literal", 20, 1), ("{% elphony %}", "block_mid", 35, 1, "elphony"), ("\n SELECT 2;\n", "literal", 48, 1), ("{% endsomethingweird %}", "block_end", 63, 1, "endsomethingweird"), ], JinjaAnalyzer, ), ( # Basic test with a derived JinjaAnalyzer that supports some custom tags """{% up 'create table xyz' %} CREATE TABLE xyz (id int); {% down %} DROP TABLE xyz; {% end %}""", [ ("{% up 'create table xyz' %}", "block_start", 0, 1, "up"), ("\n CREATE TABLE xyz (id int);\n", "literal", 27, 1), ("{% down %}", "block_mid", 59, 1, "down"), ("\n DROP TABLE xyz;\n", "literal", 69, 1), ("{% end %}", "block_end", 90, 1, "end"), ], DerivedJinjaAnalyzer, ), ], ) def test__templater_jinja_slice_template(test, result, analyzer_class): """Test _slice_template.""" templater = JinjaTemplater() env, _, render_func = templater.construct_render_func() analyzer = analyzer_class(test, env) analyzer.analyze(render_func=render_func) resp = analyzer.raw_sliced # check contiguous (unless there's a comment in it) if "{#" not in test: assert "".join(elem.raw for elem in resp) == test # check indices idx = 0 for raw_slice in resp: assert raw_slice.source_idx == idx idx += len(raw_slice.raw) # Check total result assert resp == [RawFileSlice(*args) for args in result] class DBMigrationExtension(Extension): """Example of a hypothetical custom Jinja extension. This extension might ostensibly be used to represent up/down database migrations. """ tags = {"up"} def parse(self, parser: Parser) -> Union[Node, list[Node]]: """Parse the up/down blocks.""" # {% up 'migration name' %} next(parser.stream) # skip the "up" token parser.parse_expression() # skip the name of this migration up_body = parser.parse_statements(("name:down",)) # {% down %} next(parser.stream) # skip the "down" token down_body = parser.parse_statements(("name:end",)) # {% end %} next(parser.stream) # This is just a test, so output the blocks verbatim one after the other: return [nodes.Scope(up_body), nodes.Scope(down_body)] class DerivedJinjaTemplater(JinjaTemplater): """A templater that includes some custom Jinja tags. This is used for tests that show the templater can be extended for custom plugin templaters that support custom tags. """ name = "derivedtemplater" def _get_jinja_env(self, config=None): env = super()._get_jinja_env(config) env.add_extension(DBMigrationExtension) return env def _get_jinja_analyzer(self, raw_str: str, env: Environment) -> JinjaAnalyzer: return DerivedJinjaAnalyzer(raw_str, env) def _statement(*args, **kwargs): # NOTE: The standard dbt statement() call returns nothing. return "" def _load_result(*args, **kwargs): return "_load_result" @pytest.mark.parametrize( "raw_file,override_context,result,templater_class", [ ("", None, [], JinjaTemplater), ( "foo", None, [("literal", slice(0, 3, None), slice(0, 3, None))], JinjaTemplater, ), # Example with no loops ( "SELECT {{blah}}, boo {# comment #} from something", dict(blah="foobar"), [ ("literal", slice(0, 7, None), slice(0, 7, None)), ("templated", slice(7, 15, None), slice(7, 13, None)), ("literal", slice(15, 21, None), slice(13, 19, None)), ("comment", slice(21, 34, None), slice(19, 19, None)), ("literal", slice(34, 49, None), slice(19, 34, None)), ], JinjaTemplater, ), # Example with loops ( ( "SELECT {# A comment #} {{field}} {% for i in [1, 3, 7]%}, " "fld_{{i}}_x{% endfor %} FROM my_schema.{{my_table}} " ), dict(field="foobar", my_table="barfoo"), [ ("literal", slice(0, 7, None), slice(0, 7, None)), ("comment", slice(7, 22, None), slice(7, 7, None)), ("literal", slice(22, 23, None), slice(7, 8, None)), ("templated", slice(23, 32, None), slice(8, 14, None)), ("literal", slice(32, 33, None), slice(14, 15, None)), ("block_start", slice(33, 56, None), slice(15, 15, None)), ("literal", slice(56, 62, None), slice(15, 21, None)), ("templated", slice(62, 67, None), slice(21, 22, None)), ("literal", slice(67, 69, None), slice(22, 24, None)), ("block_end", slice(69, 81, None), slice(24, 24, None)), ("literal", slice(56, 62, None), slice(24, 30, None)), ("templated", slice(62, 67, None), slice(30, 31, None)), ("literal", slice(67, 69, None), slice(31, 33, None)), ("block_end", slice(69, 81, None), slice(33, 33, None)), ("literal", slice(56, 62, None), slice(33, 39, None)), ("templated", slice(62, 67, None), slice(39, 40, None)), ("literal", slice(67, 69, None), slice(40, 42, None)), ("block_end", slice(69, 81, None), slice(42, 42, None)), ("literal", slice(81, 97, None), slice(42, 58, None)), ("templated", slice(97, 109, None), slice(58, 64, None)), ("literal", slice(109, 110, None), slice(64, 65, None)), ], JinjaTemplater, ), # Example with loops (and utilising the end slice code) ( ( "SELECT {# A comment #} {{field}} {% for i in [1, 3, 7]%}, " "fld_{{i}}{% endfor %} FROM my_schema.{{my_table}} " ), dict(field="foobar", my_table="barfoo"), [ ("literal", slice(0, 7, None), slice(0, 7, None)), ("comment", slice(7, 22, None), slice(7, 7, None)), ("literal", slice(22, 23, None), slice(7, 8, None)), ("templated", slice(23, 32, None), slice(8, 14, None)), ("literal", slice(32, 33, None), slice(14, 15, None)), ("block_start", slice(33, 56, None), slice(15, 15, None)), ("literal", slice(56, 62, None), slice(15, 21, None)), ("templated", slice(62, 67, None), slice(21, 22, None)), ("block_end", slice(67, 79, None), slice(22, 22, None)), ("literal", slice(56, 62, None), slice(22, 28, None)), ("templated", slice(62, 67, None), slice(28, 29, None)), ("block_end", slice(67, 79, None), slice(29, 29, None)), ("literal", slice(56, 62, None), slice(29, 35, None)), ("templated", slice(62, 67, None), slice(35, 36, None)), ("block_end", slice(67, 79, None), slice(36, 36, None)), ("literal", slice(79, 95, None), slice(36, 52, None)), ("templated", slice(95, 107, None), slice(52, 58, None)), ("literal", slice(107, 108, None), slice(58, 59, None)), ], JinjaTemplater, ), # Test a trailing split, and some variables which don't refer anything. ( "{{ config(materialized='view') }}\n\nSELECT 1 FROM {{ source('finance', " "'reconciled_cash_facts') }}\n\n", dict( config=lambda *args, **kwargs: "", source=lambda *args, **kwargs: "finance_reconciled_cash_facts", ), [ ("templated", slice(0, 33, None), slice(0, 0, None)), ("literal", slice(33, 49, None), slice(0, 16, None)), ("templated", slice(49, 97, None), slice(16, 45, None)), ("literal", slice(97, 99, None), slice(45, 47, None)), ], JinjaTemplater, ), # Test splitting with a loop. ( "SELECT\n " "{% for i in [1, 2, 3] %}\n , " "c_{{i}}+42 AS the_meaning_of_li{{ 'f' * i }}\n " "{% endfor %}\n" "FROM my_table", None, [ ("literal", slice(0, 11, None), slice(0, 11, None)), ("block_start", slice(11, 35, None), slice(11, 11, None)), ("literal", slice(35, 48, None), slice(11, 24, None)), ("templated", slice(48, 53, None), slice(24, 25, None)), ("literal", slice(53, 77, None), slice(25, 49, None)), ("templated", slice(77, 90, None), slice(49, 50, None)), ("literal", slice(90, 95, None), slice(50, 55, None)), ("block_end", slice(95, 107, None), slice(55, 55, None)), ("literal", slice(35, 48, None), slice(55, 68, None)), ("templated", slice(48, 53, None), slice(68, 69, None)), ("literal", slice(53, 77, None), slice(69, 93, None)), ("templated", slice(77, 90, None), slice(93, 95, None)), ("literal", slice(90, 95, None), slice(95, 100, None)), ("block_end", slice(95, 107, None), slice(100, 100, None)), ("literal", slice(35, 48, None), slice(100, 113, None)), ("templated", slice(48, 53, None), slice(113, 114, None)), ("literal", slice(53, 77, None), slice(114, 138, None)), ("templated", slice(77, 90, None), slice(138, 141, None)), ("literal", slice(90, 95, None), slice(141, 146, None)), ("block_end", slice(95, 107, None), slice(146, 146, None)), ("literal", slice(107, 121, None), slice(146, 160, None)), ], JinjaTemplater, ), # Test an example where a block is removed entirely. ( "{% set thing %}FOO{% endset %} SELECT 1", None, [ ("block_start", slice(0, 15, None), slice(0, 0, None)), ("literal", slice(15, 18, None), slice(0, 0, None)), ("block_end", slice(18, 30, None), slice(0, 0, None)), ("literal", slice(30, 39, None), slice(0, 9, None)), ], JinjaTemplater, ), ( # Tests Jinja "include" directive. """{% include 'subdir/include_comment.sql' %} SELECT 1 """, None, [ ("templated", slice(0, 42, None), slice(0, 18, None)), ("literal", slice(42, 53, None), slice(18, 29, None)), ], JinjaTemplater, ), ( # Tests Jinja "import" directive. """{% import 'echo.sql' as echo %} SELECT 1 """, None, [ ("templated", slice(0, 31, None), slice(0, 0, None)), ("literal", slice(31, 42, None), slice(0, 11, None)), ], JinjaTemplater, ), ( # Tests Jinja "from import" directive.. """{% from 'echo.sql' import echo %} {% from 'echoecho.sql' import echoecho %} SELECT {{ echo("foo") }}, {{ echoecho("bar") }} """, None, [ ("templated", slice(0, 33, None), slice(0, 0, None)), ("literal", slice(33, 34, None), slice(0, 1, None)), ("templated", slice(34, 75, None), slice(1, 1, None)), ("literal", slice(75, 88, None), slice(1, 14, None)), ("templated", slice(88, 105, None), slice(14, 19, None)), ("literal", slice(105, 111, None), slice(19, 25, None)), ("templated", slice(111, 132, None), slice(25, 34, None)), ("literal", slice(132, 133, None), slice(34, 35, None)), ], JinjaTemplater, ), ( # Tests Jinja "do" directive. Should be treated as a # templated instead of block - issue 4603. """{% do true %} {% if true %} select 1 {% endif %}""", None, [ ("templated", slice(0, 13, None), slice(0, 0, None)), ("literal", slice(13, 15, None), slice(0, 2, None)), ("block_start", slice(15, 28, None), slice(2, 2, None)), ("literal", slice(28, 42, None), slice(2, 16, None)), ("block_end", slice(42, 53, None), slice(16, 16, None)), ], JinjaTemplater, ), ( # Tests issue 2541, a bug where the {%- endfor %} was causing # IndexError: list index out of range. """{% for x in ['A', 'B'] %} {% if x != 'A' %} SELECT 'E' {% endif %} {%- endfor %} """, None, [ ("block_start", slice(0, 25, None), slice(0, 0, None)), ("literal", slice(25, 30, None), slice(0, 5, None)), ("block_start", slice(30, 47, None), slice(5, 5, None)), ("block_end", slice(67, 78, None), slice(5, 5, None)), ("literal", slice(78, 79, None), slice(5, 5, None)), ("block_end", slice(79, 92, None), slice(5, 5, None)), ("literal", slice(25, 30, None), slice(5, 10, None)), ("block_start", slice(30, 47, None), slice(10, 10, None)), ("literal", slice(47, 67, None), slice(10, 30, None)), ("block_end", slice(67, 78, None), slice(30, 30, None)), ("literal", slice(78, 79, None), slice(30, 30, None)), ("block_end", slice(79, 92, None), slice(30, 30, None)), ("literal", slice(92, 93, None), slice(30, 31, None)), ], JinjaTemplater, ), ( # Similar to the test above for issue 2541, but it's even trickier: # whitespace control everywhere and NO NEWLINES or other characters # between Jinja segments. In order to get a thorough-enough trace, # JinjaTracer has to build the alternate template with whitespace # control removed, as this increases the amount of trace output. "{%- for x in ['A', 'B'] -%}" "{%- if x == 'B' -%}" "SELECT 'B';" "{%- endif -%}" "{%- if x == 'A' -%}" "SELECT 'A';" "{%- endif -%}" "{%- endfor -%}", None, [ ("block_start", slice(0, 27, None), slice(0, 0, None)), ("block_start", slice(27, 46, None), slice(0, 0, None)), ("block_end", slice(57, 70, None), slice(0, 0, None)), ("block_start", slice(70, 89, None), slice(0, 0, None)), ("literal", slice(89, 100, None), slice(0, 11, None)), ("block_end", slice(100, 113, None), slice(11, 11, None)), ("block_end", slice(113, 127, None), slice(11, 11, None)), ("block_start", slice(27, 46, None), slice(11, 11, None)), ("literal", slice(46, 57, None), slice(11, 22, None)), ("block_end", slice(57, 70, None), slice(22, 22, None)), ("block_start", slice(70, 89, None), slice(22, 22, None)), ("block_end", slice(100, 113, None), slice(22, 22, None)), ("block_end", slice(113, 127, None), slice(22, 22, None)), ], JinjaTemplater, ), ( # Test for issue 2786. Also lots of whitespace control. In this # case, removing whitespace control alone wasn't enough. In order # to get a good trace, JinjaTracer had to be updated so the # alternate template included output for the discarded whitespace. """select id, {%- for features in ["value4", "value5"] %} {%- if features in ["value7"] %} {{features}} {%- if not loop.last -%},{% endif %} {%- else -%} {{features}} {%- if not loop.last -%},{% endif %} {%- endif -%} {%- endfor %} from my_table """, None, [ ("literal", slice(0, 14, None), slice(0, 14, None)), ("literal", slice(14, 19, None), slice(14, 14, None)), ("block_start", slice(19, 62, None), slice(14, 14, None)), ("literal", slice(62, 71, None), slice(14, 14, None)), ("block_start", slice(71, 103, None), slice(14, 14, None)), ("block_mid", slice(186, 198, None), slice(14, 14, None)), ("literal", slice(198, 211, None), slice(14, 14, None)), ("templated", slice(211, 223, None), slice(14, 20, None)), ("literal", slice(223, 236, None), slice(20, 20, None)), ("block_start", slice(236, 260, None), slice(20, 20, None)), ("literal", slice(260, 261, None), slice(20, 21, None)), ("block_end", slice(261, 272, None), slice(21, 21, None)), ("literal", slice(272, 281, None), slice(21, 21, None)), ("block_end", slice(281, 294, None), slice(21, 21, None)), ("literal", slice(294, 299, None), slice(21, 21, None)), ("block_end", slice(299, 312, None), slice(21, 21, None)), ("literal", slice(62, 71, None), slice(21, 21, None)), ("block_start", slice(71, 103, None), slice(21, 21, None)), ("block_mid", slice(186, 198, None), slice(21, 21, None)), ("literal", slice(198, 211, None), slice(21, 21, None)), ("templated", slice(211, 223, None), slice(21, 27, None)), ("literal", slice(223, 236, None), slice(27, 27, None)), ("block_start", slice(236, 260, None), slice(27, 27, None)), ("block_end", slice(261, 272, None), slice(27, 27, None)), ("literal", slice(272, 281, None), slice(27, 27, None)), ("block_end", slice(281, 294, None), slice(27, 27, None)), ("literal", slice(294, 299, None), slice(27, 27, None)), ("block_end", slice(299, 312, None), slice(27, 27, None)), ("literal", slice(312, 327, None), slice(27, 42, None)), ], JinjaTemplater, ), ( # Test for issue 2835. There's no space between "col" and "=". # Also tests for issue 3750 that self contained set statements # are parsed as "templated" and not "block_start". """{% set col= "col1" %} SELECT {{ col }} """, None, [ ("templated", slice(0, 21, None), slice(0, 0, None)), ("literal", slice(21, 29, None), slice(0, 8, None)), ("templated", slice(29, 38, None), slice(8, 12, None)), ("literal", slice(38, 39, None), slice(12, 13, None)), ], JinjaTemplater, ), ( # Another test for issue 2835. The {% for %} loop inside the # {% set %} caused JinjaTracer to think the {% set %} ended # at the {% endfor %} """{% set some_part_of_the_query %} {% for col in ["col1"] %} {{col}} {% endfor %} {% endset %} SELECT {{some_part_of_the_query}} FROM SOME_TABLE """, None, [ ("block_start", slice(0, 32, None), slice(0, 0, None)), ("literal", slice(32, 37, None), slice(0, 0, None)), ("block_start", slice(37, 62, None), slice(0, 0, None)), ("literal", slice(62, 67, None), slice(0, 0, None)), ("templated", slice(67, 74, None), slice(0, 0, None)), ("literal", slice(74, 79, None), slice(0, 0, None)), ("block_end", slice(79, 91, None), slice(0, 0, None)), ("literal", slice(91, 92, None), slice(0, 0, None)), ("block_end", slice(92, 104, None), slice(0, 0, None)), ("literal", slice(104, 113, None), slice(0, 9, None)), ("templated", slice(113, 139, None), slice(9, 29, None)), ("literal", slice(139, 156, None), slice(29, 46, None)), ], JinjaTemplater, ), ( # Third test for issue 2835. This was the original SQL provided in # the issue report. # Also tests for issue 3750 that self contained set statements # are parsed as "templated" and not "block_start". """{% set whitelisted= [ {'name': 'COL_1'}, {'name': 'COL_2'}, {'name': 'COL_3'} ] %} {% set some_part_of_the_query %} {% for col in whitelisted %} {{col.name}}{{ ", " if not loop.last }} {% endfor %} {% endset %} SELECT {{some_part_of_the_query}} FROM SOME_TABLE """, None, [ ("templated", slice(0, 94, None), slice(0, 0, None)), ("literal", slice(94, 96, None), slice(0, 2, None)), ("block_start", slice(96, 128, None), slice(2, 2, None)), ("literal", slice(128, 133, None), slice(2, 2, None)), ("block_start", slice(133, 161, None), slice(2, 2, None)), ("literal", slice(161, 166, None), slice(2, 2, None)), ("templated", slice(166, 178, None), slice(2, 2, None)), ("templated", slice(178, 205, None), slice(2, 2, None)), ("literal", slice(205, 210, None), slice(2, 2, None)), ("block_end", slice(210, 222, None), slice(2, 2, None)), ("literal", slice(222, 223, None), slice(2, 2, None)), ("block_end", slice(223, 235, None), slice(2, 2, None)), ("literal", slice(235, 244, None), slice(2, 11, None)), ("templated", slice(244, 270, None), slice(11, 66, None)), ("literal", slice(270, 287, None), slice(66, 83, None)), ], JinjaTemplater, ), ( # Test for issue 2822: Handle slicing when there's no newline after # the Jinja block end. "{% if true %}\nSELECT 1 + 1\n{%- endif %}", None, [ ("block_start", slice(0, 13, None), slice(0, 0, None)), ("literal", slice(13, 26, None), slice(0, 13, None)), ("literal", slice(26, 27, None), slice(13, 13, None)), ("block_end", slice(27, 39, None), slice(13, 13, None)), ], JinjaTemplater, ), ( # Test for issue 3434: Handle {% block %}. "SELECT {% block table_name %}block_contents{% endblock %} " "FROM {{ self.table_name() }}\n", None, [ ("literal", slice(0, 7, None), slice(0, 7, None)), ("literal", slice(29, 43, None), slice(7, 21, None)), ("block_start", slice(7, 29, None), slice(21, 21, None)), ("literal", slice(29, 43, None), slice(21, 21, None)), ("block_end", slice(43, 57, None), slice(21, 21, None)), ("literal", slice(57, 63, None), slice(21, 27, None)), ("templated", slice(63, 86, None), slice(27, 27, None)), ("literal", slice(29, 43, None), slice(27, 41, None)), ("literal", slice(86, 87, None), slice(41, 42, None)), ], JinjaTemplater, ), ( # Another test for issue 3434: Similar to the first, but uses # the block inside a loop. """{% block table_name %}block_contents{% endblock %} SELECT {% for j in [4, 5, 6] %} FROM {{ j }}{{ self.table_name() }} {% endfor %} """, None, [ ("literal", slice(22, 36, None), slice(0, 14, None)), ("block_start", slice(0, 22, None), slice(14, 14, None)), ("literal", slice(22, 36, None), slice(14, 14, None)), ("block_end", slice(36, 50, None), slice(14, 14, None)), ("literal", slice(50, 58, None), slice(14, 22, None)), ("block_start", slice(58, 82, None), slice(22, 22, None)), ("literal", slice(82, 88, None), slice(22, 28, None)), ("templated", slice(88, 95, None), slice(28, 29, None)), ("templated", slice(95, 118, None), slice(29, 29, None)), ("literal", slice(22, 36, None), slice(29, 43, None)), ("literal", slice(118, 119, None), slice(43, 44, None)), ("block_end", slice(119, 131, None), slice(44, 44, None)), ("literal", slice(82, 88, None), slice(44, 50, None)), ("templated", slice(88, 95, None), slice(50, 51, None)), ("templated", slice(95, 118, None), slice(51, 51, None)), ("literal", slice(22, 36, None), slice(51, 65, None)), ("literal", slice(118, 119, None), slice(65, 66, None)), ("block_end", slice(119, 131, None), slice(66, 66, None)), ("literal", slice(82, 88, None), slice(66, 72, None)), ("templated", slice(88, 95, None), slice(72, 73, None)), ("templated", slice(95, 118, None), slice(73, 73, None)), ("literal", slice(22, 36, None), slice(73, 87, None)), ("literal", slice(118, 119, None), slice(87, 88, None)), ("block_end", slice(119, 131, None), slice(88, 88, None)), ("literal", slice(131, 132, None), slice(88, 89, None)), ], JinjaTemplater, ), ( "{{ statement('variables', fetch_result=true) }}\n", dict( statement=_statement, load_result=_load_result, ), [ ("templated", slice(0, 47, None), slice(0, 0, None)), ("literal", slice(47, 48, None), slice(0, 1, None)), ], JinjaTemplater, ), ( "{% call statement('variables', fetch_result=true) %}\n" "select 1 as test\n" "{% endcall %}\n" "select 2 as foo\n", dict( statement=_statement, load_result=_load_result, ), [ ("block_start", slice(0, 52, None), slice(0, 0, None)), ("literal", slice(52, 70, None), slice(0, 0, None)), ("block_end", slice(70, 83, None), slice(0, 0, None)), ("literal", slice(83, 100, None), slice(0, 17, None)), ], JinjaTemplater, ), ( JINJA_MACRO_CALL_SQL, None, [ # First all of this is the call block. ("block_start", slice(0, 30, None), slice(0, 0, None)), ("literal", slice(30, 34, None), slice(0, 0, None)), ("templated", slice(34, 45, None), slice(0, 0, None)), ("literal", slice(45, 55, None), slice(0, 0, None)), ("templated", slice(55, 69, None), slice(0, 0, None)), ("literal", slice(69, 70, None), slice(0, 0, None)), ("block_end", slice(70, 84, None), slice(0, 0, None)), # Then the actual query. ("literal", slice(84, 96, None), slice(0, 12, None)), # The block_start (call) contains the actual content. ("block_start", slice(96, 125, None), slice(12, 47, None)), # The middle and end of the call, have zero length in the template ("literal", slice(125, 142, None), slice(47, 47, None)), ("block_end", slice(142, 155, None), slice(47, 47, None)), ("literal", slice(155, 165, None), slice(47, 57, None)), ], JinjaTemplater, ), ( # Simple test of a derived templater with custom tags """{% up 'create table xyz' %} CREATE TABLE xyz (id int); {% down %} DROP TABLE xyz; {% end %}""", None, [ ("block_start", slice(0, 27, None), slice(0, 0, None)), ("literal", slice(27, 59, None), slice(0, 32, None)), ("block_mid", slice(59, 69, None), slice(32, 32, None)), ("literal", slice(69, 90, None), slice(32, 53, None)), ("block_end", slice(90, 99, None), slice(53, 53, None)), ], DerivedJinjaTemplater, ), ( # test for issue 6121: The first rendered element # inside the loop is far from the start position of the loop. """ {% for i in range(2) %}{% set a = 0 %}{% set b = 0 %}{% set c = 0 %} SELECT 1; {% endfor %} """, None, [ ("literal", slice(0, 1, None), slice(0, 1, None)), ("block_start", slice(1, 24, None), slice(1, 1, None)), ("templated", slice(24, 39, None), slice(1, 1, None)), ("templated", slice(39, 54, None), slice(1, 1, None)), ("templated", slice(54, 69, None), slice(1, 1, None)), ("literal", slice(69, 80, None), slice(1, 12, None)), ("block_end", slice(80, 92, None), slice(12, 12, None)), ("templated", slice(24, 39, None), slice(12, 12, None)), ("templated", slice(39, 54, None), slice(12, 12, None)), ("templated", slice(54, 69, None), slice(12, 12, None)), ("literal", slice(69, 80, None), slice(12, 23, None)), ("block_end", slice(80, 92, None), slice(23, 23, None)), ("literal", slice(92, 93, None), slice(23, 24, None)), ], JinjaTemplater, ), ], ) def test__templater_jinja_slice_file( raw_file, override_context, result, templater_class, caplog ): """Test slice_file.""" templater = templater_class(override_context=override_context) _, _, render_func = templater.construct_render_func( config=FluffConfig.from_path( "test/fixtures/templater/jinja_slice_template_macros" ) ) with caplog.at_level(logging.DEBUG, logger="sqlfluff.templater"): raw_sliced, sliced_file, templated_str = templater.slice_file( raw_file, render_func=render_func ) # Create a TemplatedFile from the results. This runs some useful sanity # checks. _ = TemplatedFile(raw_file, "<>", templated_str, sliced_file, raw_sliced) # Check contiguous on the TEMPLATED VERSION print(sliced_file) prev_slice = None for elem in sliced_file: print(elem) if prev_slice: assert elem[2].start == prev_slice.stop prev_slice = elem[2] # Check that all literal segments have a raw slice for elem in sliced_file: if elem[0] == "literal": assert elem[1] is not None # check result actual = [ ( templated_file_slice.slice_type, templated_file_slice.source_slice, templated_file_slice.templated_slice, ) for templated_file_slice in sliced_file ] assert actual == result def test__templater_jinja_large_file_check(): """Test large file skipping. The check is separately called on each .process() method so it makes sense to test a few templaters. """ # First check we can process the file normally without specific config. # i.e. check the defaults work and the default is high. JinjaTemplater().process( in_str="SELECT 1", fname="", config=FluffConfig(overrides={"dialect": "ansi"}), ) # Second check setting the value low disables the check JinjaTemplater().process( in_str="SELECT 1", fname="", config=FluffConfig( overrides={"dialect": "ansi", "large_file_skip_char_limit": 0} ), ) # Finally check we raise a skip exception when config is set low. with pytest.raises(SQLFluffSkipFile) as excinfo: JinjaTemplater().process( in_str="SELECT 1", fname="", config=FluffConfig( overrides={"dialect": "ansi", "large_file_skip_char_limit": 2}, ), ) assert "Length of file" in str(excinfo.value) @pytest.mark.parametrize( "in_str, ignore, expected_violation", [ ( """WITH a AS ({{ b(c=d, e=f) }}) SELECT * FROM final""", "", SQLTemplaterError("Undefined jinja template variable: 'b'"), ), ("""WITH a AS ({{ b(c=d, e=f) }}) SELECT * FROM final""", "templating", None), ( # https://github.com/sqlfluff/sqlfluff/issues/6360 """{% for tbl in tbl_list %}SELECT a FROM {{ tbl }};{% endfor %}""", "", SQLTemplaterError("Undefined jinja template variable: 'tbl_list'"), ), ( """SELECT a FROM {{ tbl['name'] }};""", "", SQLTemplaterError("Undefined jinja template variable: 'tbl'"), ), ], ) def test_jinja_undefined_callable(in_str, ignore, expected_violation): """Test undefined callable returns TemplatedFile and sensible error.""" templater = JinjaTemplater() templated_file, violations = templater.process( in_str=in_str, fname="test.sql", config=FluffConfig(overrides={"dialect": "ansi", "ignore": ignore}), ) # This was previously failing to process, due to UndefinedRecorder not # supporting __call__(), also Jinja thinking it was not *safe* to call. assert templated_file is not None if expected_violation: assert len(violations) == 1 isinstance(violations[0], type(expected_violation)) assert str(violations[0]) == str(expected_violation) else: assert len(violations) == 0 def test_dummy_undefined_fail_with_undefined_error(): """Tests that a recursion error bug no longer occurs.""" ud = DummyUndefined("name") with pytest.raises(UndefinedError): # This was previously causing a recursion error. ud._fail_with_undefined_error() def test_undefined_magic_methods(): """Test all the magic methods defined on DummyUndefined.""" ud = DummyUndefined("name") # _self_impl assert ud + ud is ud assert ud - ud is ud assert ud / ud is ud assert ud // ud is ud assert ud % ud is ud assert ud**ud is ud assert +ud is ud assert -ud is ud assert ud << ud is ud assert ud[ud] is ud assert ~ud is ud assert ud(ud) is ud # _bool_impl assert ud and ud assert ud or ud assert ud ^ ud assert bool(ud) assert ud < ud assert ud <= ud assert ud == ud assert ud != ud assert ud >= ud assert ud > ud assert ud + ud is ud @pytest.mark.parametrize( "sql_path, expected_renderings", [ pytest.param( "simple_if_true.sql", [ "\nSELECT 1\n\n", "\nSELECT 2\n\n", ], id="simple_if_true", ), pytest.param( "simple_if_false.sql", [ "\nSELECT 2\n\n", "\nSELECT 1\n\n", ], id="simple_if_false", ), pytest.param( "if_elif_else.sql", [ "\nSELECT 1\n\n", "\nSELECT 2\n\n", "\nSELECT 3\n\n", ], id="if_elif_else", ), pytest.param( "if_else_if_nested.sql", [ "\nSELECT 1\n\n", "\n\nSELECT 2\n\n\n", "\n\nSELECT 3\n\n\n", ], id="if_else_if_nested", ), # This test case exercises the scoring function. Generates up to 10 # variants, but only the top 5 are returned. pytest.param( "if_elif_else_chain_scoring.sql", [ "\nSELECT 1\n\n", "\nSELECT 100000000\n\n", "\nSELECT 10000000\n\n", "\nSELECT 1000000\n\n", "\nSELECT 100000\n\n", "\nSELECT 10000\n\n", ], id="if_elif_else_chain_scoring", ), # This test case results in a TypeError executing the variant. This # should be ignored, and only the primary should be returned. pytest.param( "if_true_elif_type_error_else.sql", [ "\nSELECT 1\n\n", "\nSELECT 2\n\n", ], id="if_true_elif_type_error_else", ), # https://github.com/sqlfluff/sqlfluff/issues/5803 pytest.param( "inline_select.sql", [ "select 2\n", "select 1\n", ], id="inline_select", ), ], ) def test__templater_lint_unreached_code(sql_path: str, expected_renderings): """Test that Jinja templater slices raw and templated file correctly.""" test_dir = Path("test/fixtures/templater/jinja_lint_unreached_code") t = JinjaTemplater() renderings = [] raw_slicings = [] final_source_slices = [] for templated_file, _ in t.process_with_variants( in_str=(test_dir / sql_path).read_text(), fname=str(sql_path), config=FluffConfig.from_path(str(test_dir)), ): renderings.append(templated_file.templated_str) raw_slicings.append(templated_file.raw_sliced) # Capture the final slice for all of them. final_source_slices.append(templated_file.sliced_file[-1].source_slice) assert renderings == expected_renderings # Compare all of the additional raw slicings to make sure they're the # same as the root. root_slicing = raw_slicings[0] for additional_slicing in raw_slicings[1:]: assert additional_slicing == root_slicing # Check that the final source slices also line up in the templated files. # NOTE: Clearly the `templated_slice` values _won't_ be the same. # We're doing the _final_ slice, because it's very likely to be the same # _type_ and if it's in the right place, we can assume that all of the # others probably are. root_final_slice = final_source_slices[0] for additional_final_slice in final_source_slices[1:]: assert additional_final_slice == root_final_slice sqlfluff-3.4.2/test/core/templaters/placeholder_test.py000066400000000000000000000276451503426445100233460ustar00rootroot00000000000000"""Tests for templaters.""" import pytest from sqlfluff.core import FluffConfig from sqlfluff.core.templaters import PlaceholderTemplater def test__templater_raw(): """Test the templaters when nothing has to be replaced.""" t = PlaceholderTemplater(override_context=dict(param_style="colon")) instr = "SELECT * FROM {{blah}} WHERE %(gnepr)s OR e~':'" outstr, _ = t.process(in_str=instr, fname="test") assert str(outstr) == instr @pytest.mark.parametrize( "instr, param_style, expected_outstr, values", [ ( "SELECT * FROM f, o, o WHERE a < 10\n\n", "colon", "SELECT * FROM f, o, o WHERE a < 10\n\n", dict( unused=7777, ), ), ( """ SELECT user_mail, city_id FROM users_data WHERE userid = :user_id AND date > :start_date """, "colon", """ SELECT user_mail, city_id FROM users_data WHERE userid = 42 AND date > '2021-10-01' """, dict( user_id="42", start_date="'2021-10-01'", city_ids="(1, 2, 3, 45)", ), ), ( """ SELECT user_mail, city_id FROM users_data WHERE userid = :user_id AND date > :start_date""", "colon", """ SELECT user_mail, city_id FROM users_data WHERE userid = 42 AND date > '2021-10-01'""", dict( user_id="42", start_date="'2021-10-01'", city_ids="(1, 2, 3, 45)", ), ), ( """ SELECT user_mail, city_id FROM users_data WHERE (city_id) IN :city_ids AND date > '2020-10-01' """, "colon", """ SELECT user_mail, city_id FROM users_data WHERE (city_id) IN (1, 2, 3, 45) AND date > '2020-10-01' """, dict( user_id="42", start_date="'2021-10-01'", city_ids="(1, 2, 3, 45)", ), ), ( """ SELECT user_mail, city_id, :"custom_column" FROM users_data WHERE userid = :user_id AND date > :'start_date' """, "colon_optional_quotes", """ SELECT user_mail, city_id, "PascalCaseColumn" FROM users_data WHERE userid = 42 AND date > '2021-10-01' """, dict( user_id="42", custom_column="PascalCaseColumn", start_date="2021-10-01", ), ), ( """ SELECT user_mail, city_id FROM users_data:table_suffix """, "colon_nospaces", """ SELECT user_mail, city_id FROM users_data42 """, dict( table_suffix="42", ), ), ( # Postgres uses double-colons for type casts , see # https://www.postgresql.org/docs/current/sql-expressions.html#SQL-SYNTAX-TYPE-CASTS # This test ensures we don't confuse them with colon placeholders. """ SELECT user_mail, city_id, joined::date FROM users_data:table_suffix """, "colon_nospaces", """ SELECT user_mail, city_id, joined::date FROM users_data42 """, dict( table_suffix="42", ), ), ( """ SELECT user_mail, city_id FROM users_data WHERE (city_id) IN ? AND date > ? """, "question_mark", """ SELECT user_mail, city_id FROM users_data WHERE (city_id) IN (1, 2, 3, 45) AND date > '2020-10-01' """, { "1": "(1, 2, 3, 45)", "2": "'2020-10-01'", }, ), ( """ SELECT user_mail, city_id FROM users_data WHERE (city_id) IN :1 AND date > :45 """, "numeric_colon", """ SELECT user_mail, city_id FROM users_data WHERE (city_id) IN (1, 2, 3, 45) AND date > '2020-10-01' """, { "1": "(1, 2, 3, 45)", "45": "'2020-10-01'", }, ), ( """ SELECT user_mail, city_id FROM users_data WHERE (city_id) IN %(city_id)s AND date > %(date)s AND someflag = %(someflag)s LIMIT %(limit)s """, "pyformat", """ SELECT user_mail, city_id FROM users_data WHERE (city_id) IN (1, 2, 3, 45) AND date > '2020-10-01' AND someflag = False LIMIT 15 """, dict( city_id="(1, 2, 3, 45)", date="'2020-10-01'", limit=15, someflag=False ), ), ( """ SELECT user_mail, city_id FROM users_data WHERE (city_id) IN $city_id AND date > $date OR date = ${date} """, "dollar", """ SELECT user_mail, city_id FROM users_data WHERE (city_id) IN (1, 2, 3, 45) AND date > '2020-10-01' OR date = '2020-10-01' """, dict( city_id="(1, 2, 3, 45)", date="'2020-10-01'", ), ), ( """ SELECT user_mail, city_id FROM users_data WHERE (city_id) IN $12 AND date > $90 """, "numeric_dollar", """ SELECT user_mail, city_id FROM users_data WHERE (city_id) IN (1, 2, 3, 45) AND date > '2020-10-01' """, { "12": "(1, 2, 3, 45)", "90": "'2020-10-01'", }, ), ( """ SELECT user_mail, city_id FROM users_data WHERE (city_id) IN ${12} AND date > ${90} """, "numeric_dollar", """ SELECT user_mail, city_id FROM users_data WHERE (city_id) IN (1, 2, 3, 45) AND date > '2020-10-01' """, { "12": "(1, 2, 3, 45)", "90": "'2020-10-01'", }, ), ( """ SELECT user_mail, city_id FROM users_data WHERE user_mail = '${12}' AND date > ${90} """, "numeric_dollar", """ SELECT user_mail, city_id FROM users_data WHERE user_mail = 'test@example.com' AND date > '2020-10-01' """, { "12": "test@example.com", "90": "'2020-10-01'", }, ), ( """ SELECT user_mail, city_id FROM users_data WHERE user_mail = '$12$' AND date > $90$ """, "dollar_surround", """ SELECT user_mail, city_id FROM users_data WHERE user_mail = 'test@example.com' AND date > '2020-10-01' """, { "12": "test@example.com", "90": "'2020-10-01'", }, ), ( """ SELECT user_mail, city_id FROM users_data WHERE (city_id) IN %s AND date > %s """, "percent", """ SELECT user_mail, city_id FROM users_data WHERE (city_id) IN (1, 2, 3, 45) AND date > '2020-10-01' """, { "1": "(1, 2, 3, 45)", "2": "'2020-10-01'", }, ), ( """ USE DATABASE &{env}_MARKETING; USE SCHEMA &&EMEA; SELECT user_mail, city_id FROM users_data WHERE userid = &user_id AND date > &{start_date} """, "ampersand", """ USE DATABASE PRD_MARKETING; USE SCHEMA &&EMEA; SELECT user_mail, city_id FROM users_data WHERE userid = 42 AND date > '2021-10-01' """, dict( env="PRD", user_id="42", start_date="'2021-10-01'", ), ), ( "USE ${flyway:database}.test_schema;", "flyway_var", "USE test_db.test_schema;", { "flyway:database": "test_db", }, ), ( "SELECT metadata$filename, $1 FROM @stg_data_export_${env_name};", "flyway_var", "SELECT metadata$filename, $1 FROM @stg_data_export_staging;", { "env_name": "staging", }, ), ( "SELECT metadata$filename, $1 FROM @stg_data_export_${env_name};", "flyway_var", "SELECT metadata$filename, $1 FROM @stg_data_export_env_name;", {}, ), ], ids=[ "no_changes", "colon_simple_substitution", "colon_accept_block_at_end", "colon_tuple_substitution", "colon_quoted", "colon_nospaces", "colon_nospaces_double_colon_ignored", "question_mark", "numeric_colon", "pyformat", "dollar", "numeric_dollar", "numeric_dollar_with_braces", "numeric_dollar_with_braces_and_string", "dollar_surround", "percent", "ampersand", "flyway_var", "flyway_var", "params_not_specified", ], ) def test__templater_param_style(instr, expected_outstr, param_style, values): """Test different param_style templating.""" t = PlaceholderTemplater(override_context={**values, "param_style": param_style}) outstr, _ = t.process( in_str=instr, fname="test", config=FluffConfig(overrides={"dialect": "ansi"}) ) assert str(outstr) == expected_outstr def test__templater_custom_regex(): """Test custom regex templating.""" t = PlaceholderTemplater( override_context=dict(param_regex="__(?P[\\w_]+)__", my_name="john") ) outstr, _ = t.process( in_str="SELECT bla FROM blob WHERE id = __my_name__", fname="test", config=FluffConfig(overrides={"dialect": "ansi"}), ) assert str(outstr) == "SELECT bla FROM blob WHERE id = john" def test__templater_setup(): """Test the exception raised when config is incomplete or ambiguous.""" t = PlaceholderTemplater(override_context=dict(name="'john'")) with pytest.raises( ValueError, match=( "No param_regex nor param_style was provided to the placeholder templater" ), ): t.process(in_str="SELECT 2+2", fname="test") t = PlaceholderTemplater( override_context=dict(param_style="bla", param_regex="bli") ) with pytest.raises( ValueError, match=r"Either param_style or param_regex must be provided, not both", ): t.process(in_str="SELECT 2+2", fname="test") def test__templater_styles(): """Test the exception raised when parameter style is unknown.""" t = PlaceholderTemplater(override_context=dict(param_style="pperccent")) with pytest.raises(ValueError, match=r"Unknown param_style"): t.process(in_str="SELECT 2+2", fname="test") sqlfluff-3.4.2/test/core/templaters/python_test.py000066400000000000000000000460471503426445100224020ustar00rootroot00000000000000"""Tests for templaters.""" import logging import pytest from sqlfluff.core import FluffConfig, SQLTemplaterError from sqlfluff.core.errors import SQLFluffSkipFile from sqlfluff.core.templaters import PythonTemplater from sqlfluff.core.templaters.base import RawFileSlice, TemplatedFileSlice from sqlfluff.core.templaters.python import IntermediateFileSlice PYTHON_STRING = "SELECT * FROM {blah}" def test__templater_python(): """Test the python templater.""" t = PythonTemplater(override_context=dict(blah="foo")) instr = PYTHON_STRING outstr, _ = t.process(in_str=instr, fname="test") assert str(outstr) == "SELECT * FROM foo" def test__templater_python_error(): """Test error handling in the python templater.""" t = PythonTemplater(override_context=dict(noblah="foo")) instr = PYTHON_STRING with pytest.raises(SQLTemplaterError): t.process(in_str=instr, fname="test") @pytest.mark.parametrize( "int_slice,templated_str,head_test,tail_test,int_test", [ # Test Invariante ( IntermediateFileSlice( "compound", slice(0, 5), slice(0, 5), [RawFileSlice("{{i}}", "templated", 0)], ), "foo", [], [], IntermediateFileSlice( "compound", slice(0, 5), slice(0, 5), [RawFileSlice("{{i}}", "templated", 0)], ), ), # Test Complete Trimming ( IntermediateFileSlice( "compound", slice(0, 3), slice(0, 3), [RawFileSlice("foo", "literal", 0)], ), "foo", [TemplatedFileSlice("literal", slice(0, 3), slice(0, 3))], [], IntermediateFileSlice( "compound", slice(3, 3), slice(3, 3), [], ), ), # Test Basic Trimming. ( IntermediateFileSlice( "compound", slice(0, 11), slice(0, 7), [ RawFileSlice("foo", "literal", 0), RawFileSlice("{{i}}", "templated", 3), RawFileSlice("bar", "literal", 8), ], ), "foo1bar", [TemplatedFileSlice("literal", slice(0, 3), slice(0, 3))], [TemplatedFileSlice("literal", slice(8, 11), slice(4, 7))], IntermediateFileSlice( "compound", slice(3, 8), slice(3, 4), [RawFileSlice("{{i}}", "templated", 3)], ), ), # Test stopping at blocks. ( IntermediateFileSlice( "compound", slice(0, 34), slice(0, 24), [ RawFileSlice("foo", "literal", 0), RawFileSlice("{{for}}", "block_start", 3), RawFileSlice("foo", "literal", 10), RawFileSlice("{{i}}", "literal", 13), RawFileSlice("bar", "literal", 18), RawFileSlice("{{endfor}}", "block_end", 21), RawFileSlice("bar", "literal", 31), ], ), "foofoofoobarfoofoobarbar", [ TemplatedFileSlice("literal", slice(0, 3), slice(0, 3)), TemplatedFileSlice("block_start", slice(3, 10), slice(3, 3)), ], [ TemplatedFileSlice("block_end", slice(21, 31), slice(21, 21)), TemplatedFileSlice("literal", slice(31, 34), slice(21, 24)), ], IntermediateFileSlice( "compound", slice(10, 21), slice(3, 21), [ RawFileSlice("foo", "literal", 10), RawFileSlice("{{i}}", "literal", 13), RawFileSlice("bar", "literal", 18), ], ), ), ], ) def test__templater_python_intermediate__trim( int_slice, templated_str, head_test, tail_test, int_test ): """Test trimming IntermediateFileSlice.""" h, i, t = int_slice.trim_ends(templated_str=templated_str) assert h == head_test assert t == tail_test assert i == int_test @pytest.mark.parametrize( "mainstr,substrings,positions", [ ("", [], []), ("a", ["a"], [[0]]), ("foobar", ["o", "b"], [[1, 2], [3]]), ("bar foo bar foo", ["bar", "foo"], [[0, 8], [4, 12]]), ], ) def test__templater_python_substring_occurrences(mainstr, substrings, positions): """Test _substring_occurrences.""" occurrences = PythonTemplater._substring_occurrences(mainstr, substrings) assert isinstance(occurrences, dict) pos_test = [occurrences[substring] for substring in substrings] assert pos_test == positions @pytest.mark.parametrize( "test,result", [ ({}, []), ({"A": [1]}, [("A", 1)]), ( {"A": [3, 2, 1], "B": [4, 2]}, [("A", 1), ("A", 2), ("B", 2), ("A", 3), ("B", 4)], ), ], ) def test__templater_python_sorted_occurrence_tuples(test, result): """Test _sorted_occurrence_tuples.""" assert PythonTemplater._sorted_occurrence_tuples(test) == result @pytest.mark.parametrize( "test,result", [ ("", []), ("foo", [RawFileSlice("foo", "literal", 0)]), ( "foo {bar} z {{ y", [ RawFileSlice("foo ", "literal", 0), RawFileSlice("{bar}", "templated", 4), RawFileSlice(" z ", "literal", 9), RawFileSlice("{{", "escaped", 12), RawFileSlice(" y", "literal", 14), ], ), ], ) def test__templater_python_slice_template(test, result): """Test _slice_template.""" resp = list(PythonTemplater._slice_template(test)) # check contiguous assert "".join(elem.raw for elem in resp) == test # check indices idx = 0 for raw_file_slice in resp: assert raw_file_slice.source_idx == idx idx += len(raw_file_slice.raw) # Check total result assert resp == result @pytest.mark.parametrize( "raw_sliced,literals,raw_occurrences,templated_occurrences,templated_length,result", [ ([], [], {}, {}, 0, []), ( [RawFileSlice("foo", "literal", 0)], ["foo"], {"foo": [0]}, {"foo": [0]}, 3, [ IntermediateFileSlice( "invariant", slice(0, 3, None), slice(0, 3, None), [RawFileSlice("foo", "literal", 0)], ) ], ), ], ) def test__templater_python_split_invariants( raw_sliced, literals, raw_occurrences, templated_occurrences, templated_length, result, ): """Test _split_invariants.""" resp = list( PythonTemplater._split_invariants( raw_sliced, literals, raw_occurrences, templated_occurrences, templated_length, ) ) # check result assert resp == result @pytest.mark.parametrize( "split_file,raw_occurrences,templated_occurrences,templated_str,result", [ ([], {}, {}, "", []), ( [ IntermediateFileSlice( "invariant", slice(0, 3, None), slice(0, 3, None), [RawFileSlice("foo", "literal", 0)], ) ], {"foo": [0]}, {"foo": [0]}, "foo", [TemplatedFileSlice("literal", slice(0, 3, None), slice(0, 3, None))], ), ( [ IntermediateFileSlice( "invariant", slice(0, 7, None), slice(0, 7, None), [RawFileSlice("SELECT ", "literal", 0)], ), IntermediateFileSlice( "compound", slice(7, 24, None), slice(7, 22, None), [ RawFileSlice("{blah}", "templated", 7), RawFileSlice(", ", "literal", 13), RawFileSlice("{foo:.2f}", "templated", 15), ], ), IntermediateFileSlice( "invariant", slice(24, 33, None), slice(22, 31, None), [RawFileSlice(" as foo, ", "literal", 22)], ), IntermediateFileSlice( "simple", slice(33, 38, None), slice(31, 35, None), [RawFileSlice("{bar}", "templated", 33)], ), IntermediateFileSlice( "invariant", slice(38, 41, None), slice(35, 38, None), [RawFileSlice(", '", "literal", 35)], ), IntermediateFileSlice( "compound", slice(41, 45, None), slice(38, 40, None), [ RawFileSlice("{{", "escaped", 41), RawFileSlice("}}", "escaped", 43), ], ), IntermediateFileSlice( "invariant", slice(45, 76, None), slice(40, 71, None), [RawFileSlice("' as convertible from something", "literal", 40)], ), ], { "SELECT ": [0], ", ": [13, 31, 38], " as foo, ": [24], ", '": [38], "' as convertible from something": [45], }, { "SELECT ": [0], ", ": [14, 29, 35], " as foo, ": [22], ", '": [35], "' as convertible from something": [40], }, "SELECT nothing, 435.24 as foo, spam, '{}' as convertible from something", [ TemplatedFileSlice("literal", slice(0, 7, None), slice(0, 7, None)), TemplatedFileSlice("templated", slice(7, 13, None), slice(7, 14, None)), TemplatedFileSlice("literal", slice(13, 15, None), slice(14, 16, None)), TemplatedFileSlice( "templated", slice(15, 24, None), slice(16, 22, None) ), TemplatedFileSlice("literal", slice(24, 33, None), slice(22, 31, None)), TemplatedFileSlice( "templated", slice(33, 38, None), slice(31, 35, None) ), TemplatedFileSlice("literal", slice(38, 41, None), slice(35, 38, None)), TemplatedFileSlice("escaped", slice(41, 45, None), slice(38, 40, None)), TemplatedFileSlice("literal", slice(45, 76, None), slice(40, 71, None)), ], ), # Check for recursion error in non-exact raw cases. ( [ IntermediateFileSlice( "compound", slice(0, 13, None), slice(0, 9, None), [ RawFileSlice("{foo}", "templated", 0), RawFileSlice(" , ", "literal", 5), RawFileSlice("{bar}", "templated", 8), ], ), ], {",": [6]}, {",": [4]}, "foo , bar", [ TemplatedFileSlice("templated", slice(0, 5, None), slice(0, 3, None)), # Alternate implementations which group these next three together # would also be fine. TemplatedFileSlice("literal", slice(5, 6, None), slice(3, 4, None)), TemplatedFileSlice("literal", slice(6, 7, None), slice(4, 5, None)), TemplatedFileSlice("literal", slice(7, 8, None), slice(5, 6, None)), TemplatedFileSlice("templated", slice(8, 13, None), slice(6, 9, None)), ], ), ], ) def test__templater_python_split_uniques_coalesce_rest( split_file, raw_occurrences, templated_occurrences, templated_str, result, caplog ): """Test _split_uniques_coalesce_rest.""" with caplog.at_level(logging.DEBUG, logger="sqlfluff.templater"): resp = list( PythonTemplater._split_uniques_coalesce_rest( split_file, raw_occurrences, templated_occurrences, templated_str, ) ) # Check contiguous prev_slice = None for elem in result: if prev_slice: assert elem[1].start == prev_slice[0].stop assert elem[2].start == prev_slice[1].stop prev_slice = (elem[1], elem[2]) # check result assert resp == result @pytest.mark.parametrize( "raw_file,templated_file,unwrap_wrapped,result", [ ("", "", True, []), ( "foo", "foo", True, [("literal", slice(0, 3, None), slice(0, 3, None))], ), ( "SELECT {blah}, {foo:.2f} as foo, {bar}, '{{}}' as convertible from " "something", "SELECT nothing, 435.24 as foo, spam, '{}' as convertible from something", True, [ ("literal", slice(0, 7, None), slice(0, 7, None)), ("templated", slice(7, 13, None), slice(7, 14, None)), ("literal", slice(13, 15, None), slice(14, 16, None)), ("templated", slice(15, 24, None), slice(16, 22, None)), ("literal", slice(24, 33, None), slice(22, 31, None)), ("templated", slice(33, 38, None), slice(31, 35, None)), ("literal", slice(38, 41, None), slice(35, 38, None)), ("escaped", slice(41, 45, None), slice(38, 40, None)), ("literal", slice(45, 76, None), slice(40, 71, None)), ], ), # Test a wrapped example. Given the default config is to unwrap any wrapped # queries, it should ignore the ends in the sliced file. ( "SELECT {blah} FROM something", "WITH wrap AS (SELECT nothing FROM something) SELECT * FROM wrap", True, # The sliced version should have trimmed the ends [ ("literal", slice(0, 7, None), slice(0, 7, None)), ("templated", slice(7, 13, None), slice(7, 14, None)), ("literal", slice(13, 28, None), slice(14, 29, None)), ], ), ( "SELECT {blah} FROM something", "WITH wrap AS (SELECT nothing FROM something) SELECT * FROM wrap", False, # Test NOT unwrapping it. # The sliced version should NOT have trimmed the ends [ ("templated", slice(0, 0, None), slice(0, 14, None)), ("literal", slice(0, 7, None), slice(14, 21, None)), ("templated", slice(7, 13, None), slice(21, 28, None)), ("literal", slice(13, 28, None), slice(28, 43, None)), ("templated", slice(28, 28, None), slice(43, 63, None)), ], ), ], ) def test__templater_python_slice_file(raw_file, templated_file, unwrap_wrapped, result): """Test slice_file.""" _, resp, _ = PythonTemplater().slice_file( raw_file, # For the render_func we just use a function which just returns the # templated file from the test case. (lambda x: templated_file), config=FluffConfig( configs={"templater": {"unwrap_wrapped_queries": unwrap_wrapped}}, overrides={"dialect": "ansi"}, ), ) # Check contiguous prev_slice = None for templated_slice in resp: if prev_slice: assert templated_slice.source_slice.start == prev_slice[0].stop assert templated_slice.templated_slice.start == prev_slice[1].stop prev_slice = (templated_slice.source_slice, templated_slice.templated_slice) # check result assert resp == result def test__templater_python_large_file_check(): """Test large file skipping. The check is separately called on each .process() method so it makes sense to test a few templaters. """ # First check we can process the file normally without config. PythonTemplater().process(in_str="SELECT 1", fname="") # Then check we raise a skip exception when config is set low. with pytest.raises(SQLFluffSkipFile) as excinfo: PythonTemplater().process( in_str="SELECT 1", fname="", config=FluffConfig( overrides={"dialect": "ansi", "large_file_skip_char_limit": 2}, ), ) assert "Length of file" in str(excinfo.value) @pytest.mark.parametrize( "raw_str,result", [ ("", ""), ( "SELECT * FROM {foo.bar}", "SELECT * FROM foobar", ), ( "SELECT {foo} FROM {foo.bar}", "SELECT bar FROM foobar", ), ( "SELECT {num:.2f} FROM blah", "SELECT 123.00 FROM blah", ), ( "SELECT {self.number:.1f} FROM blah", "SELECT 42.0 FROM blah", ), ( "SELECT * FROM {obj.schema}.{obj.table}", "SELECT * FROM my_schema.my_table", ), ], ) def test__templater_python_dot_notation_variables(raw_str, result): """Test template variables that contain a dot character (`.`).""" context = { "foo": "bar", "num": 123, "sqlfluff": { "foo.bar": "foobar", "self.number": 42, "obj.schema": "my_schema", "obj.table": "my_table", }, } t = PythonTemplater(override_context=context) instr = raw_str outstr, _ = t.process(in_str=instr, fname="test") assert str(outstr) == result @pytest.mark.parametrize( "context,error_string", [ # No additional context (i.e. no sqlfluff key) ( {}, "magic key 'sqlfluff' missing from context. This key is required " "for template variables containing '.'.", ), # No key missing within sqlfluff dict. ( {"sqlfluff": {"a": "b"}}, "'foo.bar' key missing from 'sqlfluff' dict in context. Template " "variables containing '.' are required to use the 'sqlfluff' magic " "fixed context key.", ), ], ) def test__templater_python_dot_notation_fail(context, error_string): """Test failures with template variables that contain a dot character (`.`).""" t = PythonTemplater(override_context=context) with pytest.raises(SQLTemplaterError) as excinfo: outstr, _ = t.process(in_str="SELECT * FROM {foo.bar}", fname="test") assert error_string in excinfo.value.desc() sqlfluff-3.4.2/test/dialects/000077500000000000000000000000001503426445100161155ustar00rootroot00000000000000sqlfluff-3.4.2/test/dialects/__init__.py000066400000000000000000000000431503426445100202230ustar00rootroot00000000000000"""Tests for sqlfluff.dialects.""" sqlfluff-3.4.2/test/dialects/ansi_test.py000066400000000000000000000211111503426445100204540ustar00rootroot00000000000000"""Tests specific to the ansi dialect.""" import logging import pytest from sqlfluff.core import FluffConfig, Linter from sqlfluff.core.parser import Lexer @pytest.mark.parametrize( "raw,res", [ # NB: The final empty string is the end of file marker. ("a b", ["a", " ", "b", ""]), ("b.c", ["b", ".", "c", ""]), ( "abc \n \t def ;blah", ["abc", " ", "\n", " \t ", "def", " ", ";", "blah", ""], ), ], ) def test__dialect__ansi__file_lex(raw, res, caplog): """Test we don't drop bits on simple examples.""" config = FluffConfig(overrides=dict(dialect="ansi")) lexer = Lexer(config=config) with caplog.at_level(logging.DEBUG): tokens, _ = lexer.lex(raw) # From just the initial parse, check we're all there raw_list = [token.raw for token in tokens] assert "".join(token.raw for token in tokens) == raw assert raw_list == res # Develop test to check specific elements against specific grammars. @pytest.mark.parametrize( "segmentref,raw", [ ("SelectKeywordSegment", "select"), ("NakedIdentifierSegment", "online_sales"), ("BareFunctionSegment", "current_timestamp"), ("FunctionSegment", "current_timestamp()"), ("NumericLiteralSegment", "1000.0"), ("ExpressionSegment", "online_sales / 1000.0"), ("IntervalExpressionSegment", "INTERVAL 1 YEAR"), ("ExpressionSegment", "CASE WHEN id = 1 THEN 'nothing' ELSE 'test' END"), # Nested Case Expressions # https://github.com/sqlfluff/sqlfluff/issues/172 ( "ExpressionSegment", ( "CASE WHEN id = 1 THEN CASE WHEN true THEN 'something' " "ELSE 'nothing' END ELSE 'test' END" ), ), # Casting expressions # https://github.com/sqlfluff/sqlfluff/issues/161 ("ExpressionSegment", "CAST(ROUND(online_sales / 1000.0) AS varchar)"), # Like expressions # https://github.com/sqlfluff/sqlfluff/issues/170 ("ExpressionSegment", "name NOT LIKE '%y'"), # Functions with a space # https://github.com/sqlfluff/sqlfluff/issues/171 ("SelectClauseElementSegment", "MIN (test.id) AS min_test_id"), # Interval literals # https://github.com/sqlfluff/sqlfluff/issues/148 ( "ExpressionSegment", "DATE_ADD(CURRENT_DATE('America/New_York'), INTERVAL 1 year)", ), # Array accessors ("ExpressionSegment", "my_array[1]"), ("ExpressionSegment", "my_array[OFFSET(1)]"), ("ExpressionSegment", "my_array[5:8]"), ("ExpressionSegment", "4 + my_array[OFFSET(1)]"), ("ExpressionSegment", "bits[OFFSET(0)] + 7"), ( "SelectClauseElementSegment", ( "(count_18_24 * bits[OFFSET(0)])" " / audience_size AS relative_abundance" ), ), ("ExpressionSegment", "count_18_24 * bits[OFFSET(0)] + count_25_34"), ( "SelectClauseElementSegment", ( "(count_18_24 * bits[OFFSET(0)] + count_25_34)" " / audience_size AS relative_abundance" ), ), # Dense math expressions # https://github.com/sqlfluff/sqlfluff/issues/178 # https://github.com/sqlfluff/sqlfluff/issues/179 ("SelectStatementSegment", "SELECT t.val/t.id FROM test WHERE id*1.0/id > 0.8"), ("SelectClauseElementSegment", "t.val/t.id"), # Issue with casting raise as part of PR #177 ("SelectClauseElementSegment", "CAST(num AS INT64)"), # Casting as datatype with arguments ("SelectClauseElementSegment", "CAST(num AS numeric(8,4))"), # Wildcard field selection ("SelectClauseElementSegment", "a.*"), ("SelectClauseElementSegment", "a.b.*"), ("SelectClauseElementSegment", "a.b.c.*"), # Default Element Syntax ("SelectClauseElementSegment", "a..c.*"), # Negative Elements ("SelectClauseElementSegment", "-some_variable"), ("SelectClauseElementSegment", "- some_variable"), # Complex Functions ( "ExpressionSegment", "concat(left(uaid, 2), '|', right(concat('0000000', " "SPLIT_PART(uaid, '|', 4)), 10), '|', '00000000')", ), # Notnull and Isnull ("ExpressionSegment", "c is null"), ("ExpressionSegment", "c is not null"), ("SelectClauseElementSegment", "c is null as c_isnull"), ("SelectClauseElementSegment", "c is not null as c_notnull"), # Shorthand casting ("ExpressionSegment", "NULL::INT"), ("SelectClauseElementSegment", "NULL::INT AS user_id"), ("TruncateStatementSegment", "TRUNCATE TABLE test"), ("TruncateStatementSegment", "TRUNCATE test"), ], ) def test__dialect__ansi_specific_segment_parses( segmentref, raw, caplog, dialect_specific_segment_parses ): """Test that specific segments parse as expected. NB: We're testing the PARSE function not the MATCH function although this will be a recursive parse and so the match function of SUBSECTIONS will be tested if present. The match function of the parent will not be tested. """ dialect_specific_segment_parses("ansi", segmentref, raw, caplog) @pytest.mark.parametrize( "segmentref,raw", [ # Check we don't match empty whitespace as a reference ("ObjectReferenceSegment", "\n ") ], ) def test__dialect__ansi_specific_segment_not_match( segmentref, raw, caplog, dialect_specific_segment_not_match ): """Test that specific segments do not match. NB: We're testing the MATCH function not the PARSE function. This is the opposite to the above. """ dialect_specific_segment_not_match("ansi", segmentref, raw, caplog) @pytest.mark.parametrize( "raw,err_locations", [ # Missing Closing bracket. Error should be raised # on the starting bracket. ("SELECT 1 + (2 ", [(1, 12)]), # Set expression with inappropriate ORDER BY or LIMIT. Error # raised on the UNION. ("SELECT * FROM a ORDER BY 1 UNION SELECT * FROM b", [(1, 28)]), ("SELECT * FROM a LIMIT 1 UNION SELECT * FROM b", [(1, 25)]), ("SELECT * FROM a ORDER BY 1 LIMIT 1 UNION SELECT * FROM b", [(1, 36)]), ], ) def test__dialect__ansi_specific_segment_not_parse(raw, err_locations): """Test queries do not parse, with parsing errors raised properly.""" lnt = Linter(dialect="ansi") parsed = lnt.parse_string(raw) assert len(parsed.violations) > 0 print(parsed.violations) locs = [(v.line_no, v.line_pos) for v in parsed.violations] assert locs == err_locations def test__dialect__ansi_is_whitespace(): """Test proper tagging with is_whitespace.""" lnt = Linter(dialect="ansi") with open("test/fixtures/dialects/ansi/select_in_multiline_comment.sql") as f: parsed = lnt.parse_string(f.read()) # Check all the segments that *should* be whitespace, ARE for raw_seg in parsed.tree.get_raw_segments(): if raw_seg.is_type("whitespace", "newline"): assert raw_seg.is_whitespace @pytest.mark.parametrize( "sql_string, indented_joins, meta_loc", [ ( "select field_1 from my_table as alias_1", True, (1, 4, 8, 11, 15, 16, 17, 18, 19), ), ("select field_1 from my_table as alias_1", False, (1, 4, 8, 11, 15, 16, 17)), ( "select field_1 from my_table as alias_1 join foo using (field_1)", True, (1, 4, 8, 11, 15, 17, 18, 20, 24, 25, 27, 30, 32, 34, 35, 36, 37), ), ( "select field_1 from my_table as alias_1 join foo using (field_1)", False, (1, 4, 8, 11, 15, 17, 19, 23, 24, 26, 29, 31, 33, 34, 35), ), ], ) def test__dialect__ansi_parse_indented_joins(sql_string, indented_joins, meta_loc): """Test parsing of meta segments using Conditional works with indented_joins.""" lnt = Linter( config=FluffConfig( configs={"indentation": {"indented_joins": indented_joins}}, overrides={"dialect": "ansi"}, ) ) parsed = lnt.parse_string(sql_string) tree = parsed.tree # Check that there's nothing unparsable assert "unparsable" not in tree.type_set() # Check all the segments that *should* be metas, ARE. # NOTE: This includes the end of file marker. res_meta_locs = tuple( idx for idx, raw_seg in enumerate(tree.get_raw_segments()) if raw_seg.is_meta ) assert res_meta_locs == meta_loc sqlfluff-3.4.2/test/dialects/bigquery_test.py000066400000000000000000000066431503426445100213660ustar00rootroot00000000000000"""Tests specific to the snowflake dialect.""" import hypothesis.strategies as st import pytest from hypothesis import example, given, note, settings from sqlfluff.core import FluffConfig from sqlfluff.core.parser import Lexer, Parser @settings(max_examples=100, deadline=None) @given( st.lists( st.tuples(st.sampled_from(["<", "=", ">"]), st.sampled_from(["AND", "OR"])), min_size=1, max_size=30, ) ) @example(data=[("<", "AND")]) @example(data=[(">", "AND")]) @example(data=[("<", "AND"), (">", "AND")]) @example(data=[("<", "AND"), ("=", "AND"), (">", "AND")]) @example(data=[(">", "AND"), ("<", "AND")]) @example(data=[("<", "AND"), ("<", "AND"), (">", "AND")]) @example(data=[(">", "AND"), (">", "AND"), ("<", "AND")]) def test_bigquery_relational_operator_parsing(data): """Tests queries with a diverse mixture of relational operators.""" # Generate a simple SELECT query with relational operators and conjunctions # as specified in 'data'. Note the conjunctions are used as separators # between comparisons, sn the conjunction in the first item is not used. filter = [] for i, (relation, conjunction) in enumerate(data): if i: filter.append(f" {conjunction} ") filter.append(f"a {relation} b") raw = f'SELECT * FROM t WHERE {"".join(filter)}' note(f"query: {raw}") # Load the right dialect config = FluffConfig(overrides=dict(dialect="bigquery")) tokens, lex_vs = Lexer(config=config).lex(raw) # From just the initial parse, check we're all there assert "".join(token.raw for token in tokens) == raw # Check we don't have lexing issues assert not lex_vs # Do the parse WITHOUT lots of logging # The logs get too long here to be useful. We should use # specific segment tests if we want to debug logs. parsed = Parser(config=config).parse(tokens) print(f"Post-parse structure: {parsed.to_tuple(show_raw=True)}") print(f"Post-parse structure: {parsed.stringify()}") # Check we're all there. assert parsed.raw == raw # Check that there's nothing un parsable typs = parsed.type_set() assert "unparsable" not in typs @pytest.mark.parametrize( "table_reference, reference_parts", [ ( "bigquery-public-data.pypi.file_downloads", ["bigquery-public-data", "pypi", "file_downloads"], ), ( "`bigquery-public-data.pypi.file_downloads`", ["bigquery-public-data", "pypi", "file_downloads"], ), ("foo.far.bar", ["foo", "far", "bar"]), ("`foo.far.bar`", ["foo", "far", "bar"]), ("a-b.c-d.e-f", ["a-b", "c-d", "e-f"]), ], ) def test_bigquery_table_reference_segment_iter_raw_references( table_reference, reference_parts ): """Tests BigQuery override of TableReferenceSegment.iter_raw_references(). The BigQuery implementation is more complex, handling: - hyphenated table references - quoted or not quoted table references """ query = f"SELECT bar.user_id FROM {table_reference}" config = FluffConfig(overrides=dict(dialect="bigquery")) tokens, lex_vs = Lexer(config=config).lex(query) parsed = Parser(config=config).parse(tokens) for table_reference in parsed.recursive_crawl("table_reference"): actual_reference_parts = [ orp.part for orp in table_reference.iter_raw_references() ] assert reference_parts == actual_reference_parts sqlfluff-3.4.2/test/dialects/conftest.py000066400000000000000000000104741503426445100203220ustar00rootroot00000000000000"""Sharing fixtures to test the dialects.""" import logging import pytest from sqlfluff.core import FluffConfig, Linter from sqlfluff.core.parser import BaseSegment, Lexer from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.match_result import MatchResult from sqlfluff.core.parser.matchable import Matchable def lex(raw, config): """Basic parsing for the tests below.""" # Set up the lexer lex = Lexer(config=config) # Lex the string for matching. For a good test, this would # arguably happen as a fixture, but it's easier to pass strings # as parameters than pre-lexed segment strings. segments, vs = lex.lex(raw) assert not vs print(segments) return segments def validate_segment(segmentref, config): """Get and validate segment for tests below.""" Seg = config.get("dialect_obj").ref(segmentref) if isinstance(Seg, Matchable): return Seg try: if issubclass(Seg, BaseSegment): return Seg except TypeError: pass raise TypeError( "{} is not of type Segment or Matchable. Test is invalid.".format(segmentref) ) def _dialect_specific_segment_parses(dialect, segmentref, raw, caplog): """Test that specific segments parse as expected. NB: We're testing the PARSE function not the MATCH function although this will be a recursive parse and so the match function of SUBSECTIONS will be tested if present. The match function of the parent will not be tested. """ config = FluffConfig(overrides=dict(dialect=dialect)) segments = lex(raw, config=config) Seg = validate_segment(segmentref, config=config) # Most segments won't handle the end of file marker. We should strip it. if segments[-1].is_type("end_of_file"): segments = segments[:-1] ctx = ParseContext.from_config(config) with caplog.at_level(logging.DEBUG): result = Seg.match(segments, 0, parse_context=ctx) assert isinstance(result, MatchResult) parsed = result.apply(segments) assert len(parsed) == 1 print(parsed) parsed = parsed[0] # Check we get a good response print(parsed) print(type(parsed)) print(type(parsed.raw)) # Check we're all there. assert parsed.raw == raw # Check that there's nothing un parsable typs = parsed.type_set() assert "unparsable" not in typs def _dialect_specific_segment_not_match(dialect, segmentref, raw, caplog): """Test that specific segments do not match. NB: We're testing the MATCH function not the PARSE function. This is the opposite to the above. """ config = FluffConfig(overrides=dict(dialect=dialect)) segments = lex(raw, config=config) Seg = validate_segment(segmentref, config=config) ctx = ParseContext.from_config(config) with caplog.at_level(logging.DEBUG): match = Seg.match(segments, 0, parse_context=ctx) assert not match def _validate_dialect_specific_statements(dialect, segment_cls, raw, stmt_count): """This validates one or multiple statements against specified segment class. It even validates the number of parsed statements with the number of expected statements. """ lnt = Linter(dialect=dialect) parsed = lnt.parse_string(raw) assert len(parsed.violations) == 0 # Find any unparsable statements typs = parsed.tree.type_set() assert "unparsable" not in typs # Find the expected type in the parsed segment child_segments = [seg for seg in parsed.tree.recursive_crawl(segment_cls.type)] assert len(child_segments) == stmt_count # Check if all child segments are the correct type for c in child_segments: assert isinstance(c, segment_cls) @pytest.fixture() def dialect_specific_segment_parses(): """Fixture to check specific segments of a dialect.""" return _dialect_specific_segment_parses @pytest.fixture() def dialect_specific_segment_not_match(): """Check specific segments of a dialect which will not match to a segment.""" return _dialect_specific_segment_not_match @pytest.fixture() def validate_dialect_specific_statements(): """This validates one or multiple statements against specified segment class. It even validates the number of parsed statements with the number of expected statements. """ return _validate_dialect_specific_statements sqlfluff-3.4.2/test/dialects/dialects_test.py000066400000000000000000000127671503426445100213330ustar00rootroot00000000000000"""Automated tests for all dialects. Any files in the test/fixtures/dialects/ directory will be picked up and automatically tested against the appropriate dialect. """ from typing import Any, Optional import pytest from sqlfluff.core import FluffConfig, Linter from sqlfluff.core.linter import ParsedString, RenderedFile from sqlfluff.core.parser.segments.base import BaseSegment from sqlfluff.core.templaters import TemplatedFile from ..conftest import ( compute_parse_tree_hash, get_parse_fixtures, load_file, make_dialect_path, parse_example_file, ) parse_success_examples, parse_structure_examples = get_parse_fixtures( fail_on_missing_yml=True ) def lex_and_parse(config_overrides: dict[str, Any], raw: str) -> Optional[ParsedString]: """Performs a Lex and Parse, with cacheable inputs within fixture.""" # Load the right dialect config = FluffConfig(overrides=config_overrides) # Construct rendered file (to skip the templater) templated_file = TemplatedFile.from_string(raw) rendered_file = RenderedFile( [templated_file], [], config, {}, templated_file.fname, "utf8", raw, ) # Parse (which includes lexing) linter = Linter(config=config) parsed_file = linter.parse_rendered(rendered_file) if not raw: # Empty file case # We're just checking there aren't exceptions in this case. return None # Check we managed to parse assert parsed_file.tree # From just the initial parse, check we're all there assert "".join(token.raw for token in parsed_file.tree.raw_segments) == raw # Check we don't have lexing or parsing issues assert not parsed_file.violations return parsed_file @pytest.mark.integration @pytest.mark.parse_suite @pytest.mark.parametrize("dialect,file", parse_success_examples) def test__dialect__base_file_parse(dialect, file): """For given test examples, check successful parsing.""" raw = load_file(dialect, file) config_overrides = dict(dialect=dialect) # Use the helper function to avoid parsing twice parsed: Optional[ParsedString] = lex_and_parse(config_overrides, raw) if not parsed: # Empty file case return # Check we're all there. assert parsed.tree.raw == raw # Check that there's nothing unparsable types = parsed.tree.type_set() assert "unparsable" not in types # When testing the validity of fixes we re-parse sections of the file. # To ensure this is safe - here we re-parse the unfixed file to ensure # it's still valid even in the case that no fixes have been applied. assert parsed.tree.validate_segment_with_reparse(parsed.config.get("dialect_obj")) @pytest.mark.integration @pytest.mark.fix_suite @pytest.mark.parametrize("dialect,file", parse_success_examples) def test__dialect__base_broad_fix( dialect, file, raise_critical_errors_after_fix, caplog ): """Run a full fix with all rules, in search of critical errors. NOTE: This suite does all of the same things as the above test suite (the `parse_suite`), but also runs fix. In CI, we run the above tests _with_ coverage tracking, but these we run _without_. The purpose of this test is as a more stretching run through a wide range of test sql examples, and the full range of rules to find any potential critical errors raised by any interactions between different dialects and rules. We also do not use DEBUG logging here because it gets _very_ noisy. """ raw = load_file(dialect, file) config_overrides = dict(dialect=dialect) parsed: Optional[ParsedString] = lex_and_parse(config_overrides, raw) if not parsed: # Empty file case return print(parsed.tree.stringify()) config = FluffConfig(overrides=config_overrides) linter = Linter(config=config) rule_pack = linter.get_rulepack() # Due to "raise_critical_errors_after_fix" fixture "fix", # will now throw. linter.lint_parsed( parsed, rule_pack, fix=True, ) @pytest.mark.integration @pytest.mark.parse_suite @pytest.mark.parametrize("dialect,sqlfile,code_only,yamlfile", parse_structure_examples) def test__dialect__base_parse_struct( dialect, sqlfile, code_only, yamlfile, yaml_loader, ): """For given test examples, check parsed structure against yaml.""" parsed: Optional[BaseSegment] = parse_example_file(dialect, sqlfile) actual_hash = compute_parse_tree_hash(parsed) # Load the YAML expected_hash, res = yaml_loader(make_dialect_path(dialect, yamlfile)) if not parsed: assert parsed == res return # Verify the current parse tree matches the historic parse tree. parsed_tree = parsed.to_tuple(code_only=code_only, show_raw=True) # The parsed tree consists of a tuple of "File:", followed by the # statements. So only compare when there is at least one statement. if parsed_tree[1] or res[1]: assert parsed_tree == res # Verify the current hash matches the historic hash. The main purpose of # this check is to force contributors to use the generator script to # create these files. New contributors have sometimes been unaware of # this tool and have attempted to craft the YAML files manually. This # can lead to slight differences, confusion, and errors. assert expected_hash == actual_hash, ( "Parse tree hash does not match. Please run " "'python test/generate_parse_fixture_yml.py' to create YAML files " "in test/fixtures/dialects." ) sqlfluff-3.4.2/test/dialects/exasol_test.py000066400000000000000000000010621503426445100210200ustar00rootroot00000000000000"""Tests specific to the exasol dialect.""" import pytest TEST_DIALECT = "exasol" # Develop test to check specific elements against specific grammars. @pytest.mark.parametrize( "segmentref,raw", [ ("RangeOperator", ".."), ("WalrusOperatorSegment", ":="), ("VariableNameSegment", "var1"), ], ) def test_dialect_exasol_specific_segment_parses( segmentref, raw, caplog, dialect_specific_segment_parses ): """Test exasol specific segments.""" dialect_specific_segment_parses(TEST_DIALECT, segmentref, raw, caplog) sqlfluff-3.4.2/test/dialects/flink_test.py000066400000000000000000000236531503426445100206420ustar00rootroot00000000000000"""Tests for the FlinkSQL dialect.""" from sqlfluff.core import FluffConfig, Linter class TestFlinkSQLDialect: """Test FlinkSQL dialect parsing.""" def test_flink_dialect_basic(self): """Test basic FlinkSQL dialect functionality.""" config = FluffConfig(overrides={"dialect": "flink"}) linter = Linter(config=config) # Test simple SELECT statement sql = "SELECT * FROM my_table;\n" result = linter.lint_string(sql) assert result is not None # Check for parsing errors only, ignore style warnings parsing_errors = [v for v in result.violations if v.rule.code.startswith("PRS")] assert len(parsing_errors) == 0 def test_flink_create_table_basic(self): """Test basic CREATE TABLE statement.""" config = FluffConfig(overrides={"dialect": "flink"}) linter = Linter(config=config) sql = """ CREATE TABLE my_table ( id INT, name STRING, age INT ) WITH ( 'connector' = 'kafka', 'topic' = 'my-topic' ) """ result = linter.lint_string(sql) assert result is not None # Allow for some parsing issues initially def test_flink_row_data_type(self): """Test FlinkSQL ROW data type.""" config = FluffConfig(overrides={"dialect": "flink"}) linter = Linter(config=config) sql = """ CREATE TABLE my_table ( id INT, nested_data ROW ) WITH ( 'connector' = 'kafka' ) """ result = linter.lint_string(sql) assert result is not None def test_flink_timestamp_with_precision(self): """Test FlinkSQL TIMESTAMP with precision.""" config = FluffConfig(overrides={"dialect": "flink"}) linter = Linter(config=config) sql = """ CREATE TABLE my_table ( id INT, event_time TIMESTAMP(3), processing_time TIMESTAMP_LTZ(3) ) WITH ( 'connector' = 'kafka' ) """ result = linter.lint_string(sql) assert result is not None def test_flink_watermark_definition(self): """Test FlinkSQL WATERMARK definition.""" config = FluffConfig(overrides={"dialect": "flink"}) linter = Linter(config=config) sql = """ CREATE TABLE my_table ( id INT, event_time TIMESTAMP(3), WATERMARK FOR event_time AS event_time - INTERVAL '5' SECOND ) WITH ( 'connector' = 'kafka' ) """ result = linter.lint_string(sql) assert result is not None def test_flink_computed_column(self): """Test FlinkSQL computed column.""" config = FluffConfig(overrides={"dialect": "flink"}) linter = Linter(config=config) sql = """ CREATE TABLE my_table ( id INT, name STRING, full_name AS CONCAT(name, '_suffix') ) WITH ( 'connector' = 'kafka' ) """ result = linter.lint_string(sql) assert result is not None def test_flink_metadata_column(self): """Test FlinkSQL metadata column.""" config = FluffConfig(overrides={"dialect": "flink"}) linter = Linter(config=config) sql = """ CREATE TABLE my_table ( id INT, name STRING, kafka_offset BIGINT METADATA FROM 'offset' ) WITH ( 'connector' = 'kafka' ) """ result = linter.lint_string(sql) assert result is not None def test_flink_show_statements(self): """Test FlinkSQL SHOW statements.""" config = FluffConfig(overrides={"dialect": "flink"}) linter = Linter(config=config) statements = [ "SHOW CATALOGS", "SHOW DATABASES", "SHOW TABLES", "SHOW VIEWS", "SHOW FUNCTIONS", "SHOW MODULES", "SHOW JARS", "SHOW JOBS", ] for sql in statements: result = linter.lint_string(sql) assert result is not None def test_flink_use_statements(self): """Test FlinkSQL USE statements.""" config = FluffConfig(overrides={"dialect": "flink"}) linter = Linter(config=config) statements = [ "USE CATALOG my_catalog", "USE my_database", "USE my_catalog.my_database", ] for sql in statements: result = linter.lint_string(sql) assert result is not None def test_flink_describe_statement(self): """Test FlinkSQL DESCRIBE statement.""" config = FluffConfig(overrides={"dialect": "flink"}) linter = Linter(config=config) sql = "DESCRIBE my_table" result = linter.lint_string(sql) assert result is not None def test_flink_explain_statement(self): """Test FlinkSQL EXPLAIN statement.""" config = FluffConfig(overrides={"dialect": "flink"}) linter = Linter(config=config) sql = "EXPLAIN SELECT * FROM my_table" result = linter.lint_string(sql) assert result is not None def test_flink_create_catalog(self): """Test FlinkSQL CREATE CATALOG statement.""" config = FluffConfig(overrides={"dialect": "flink"}) linter = Linter(config=config) sql = """ CREATE CATALOG my_catalog WITH ( 'type' = 'hive', 'hive-conf-dir' = '/path/to/hive/conf' ) """ result = linter.lint_string(sql) assert result is not None def test_flink_create_database(self): """Test FlinkSQL CREATE DATABASE statement.""" config = FluffConfig(overrides={"dialect": "flink"}) linter = Linter(config=config) sql = """ CREATE DATABASE IF NOT EXISTS my_db COMMENT 'My database' WITH ( 'key1' = 'value1' ) """ result = linter.lint_string(sql) assert result is not None def test_flink_alternative_with_syntax(self): """Test FlinkSQL WITH clause using alternative double equals syntax.""" config = FluffConfig(overrides={"dialect": "flink"}) linter = Linter(config=config) sql = """ CREATE TABLE test_table ( data_info ROW<`info` STRING>, name STRING, score DOUBLE, total_count DOUBLE, active_count DOUBLE, metadata ROW<`details` STRING>, change_rate DOUBLE, volume DOUBLE, change_percentage DOUBLE, updated_at TIMESTAMP(3), category STRING ) WITH ( connector == 'test-connector', environment == 'development' ) """ result = linter.lint_string(sql) assert result is not None class TestFlinkSQLComplexExamples: """Test FlinkSQL with complex examples covering various features.""" def test_flink_row_datatype_table(self): """Test FlinkSQL table with ROW data types and connector options.""" config = FluffConfig(overrides={"dialect": "flink"}) linter = Linter(config=config) sql = """ CREATE TABLE table1 ( data_info ROW<`name` STRING>, email STRING, score DOUBLE, total_points DOUBLE, active_points DOUBLE, metadata ROW<`description` STRING>, change_percentage DOUBLE, volume DOUBLE, rate_change_percentage DOUBLE, last_updated TIMESTAMP(3), status STRING ) WITH ( 'connector' = 'test-connector', 'project' = 'test-project', 'dataset' = 'test-dataset' ) """ result = linter.lint_string(sql) assert result is not None def test_flink_complex_table_structure(self): """Test FlinkSQL table with complex structure and multiple data types.""" config = FluffConfig(overrides={"dialect": "flink"}) linter = Linter(config=config) sql = """ CREATE TABLE table2 ( session_id STRING, session_ts TIMESTAMP(3), source_name STRING, service STRING, category STRING, category_id STRING, type STRING, type_id STRING, identifier STRING, identifier_id STRING, event_type STRING, action_type STRING, resource_type STRING, value DOUBLE, quantity DOUBLE, request_url STRING, is_deleted BOOLEAN, item_count INT, created_ts TIMESTAMP(3), updated_ts TIMESTAMP(3), processed_ts TIMESTAMP(3), received_ts TIMESTAMP(3), sequence_ts TIMESTAMP(3) ) WITH ( 'connector' = 'test-connector', 'project' = 'test-project', 'dataset' = 'test-dataset', 'table' = 'test-table' ) """ result = linter.lint_string(sql) assert result is not None def test_flink_simple_record_table(self): """Test FlinkSQL table with simple record structure.""" config = FluffConfig(overrides={"dialect": "flink"}) linter = Linter(config=config) sql = """ CREATE TABLE table3 ( service STRING, type STRING, from_id STRING, to_id STRING, amount DOUBLE, quantity DOUBLE, executed_at TIMESTAMP(3), id STRING, request_url STRING, direction STRING, client_received_timestamp TIMESTAMP(3), job_timestamp TIMESTAMP(3), job_id STRING, processor STRING ) WITH ( 'connector' = 'test-connector', 'project' = 'test-project', 'dataset' = 'test-dataset', 'table' = 'test-records' ) """ result = linter.lint_string(sql) assert result is not None sqlfluff-3.4.2/test/dialects/postgres_test.py000066400000000000000000000115661503426445100214050ustar00rootroot00000000000000"""Tests specific to the postgres dialect.""" from typing import Callable import pytest from _pytest.logging import LogCaptureFixture from sqlfluff.core import FluffConfig, Linter from sqlfluff.dialects.dialect_postgres_keywords import ( get_keywords, priority_keyword_merge, ) @pytest.mark.parametrize( "segment_reference,raw", [ # AT TIME ZONE constructs ("SelectClauseElementSegment", "c_column AT TIME ZONE 'UTC'"), ("SelectClauseElementSegment", "(c_column AT TIME ZONE 'UTC')::time"), ( "SelectClauseElementSegment", "timestamp with time zone '2021-10-01' AT TIME ZONE 'UTC'", ), # Notnull and Isnull ("ExpressionSegment", "c is null"), ("ExpressionSegment", "c is not null"), ("ExpressionSegment", "c isnull"), ("ExpressionSegment", "c notnull"), ("SelectClauseElementSegment", "c is null as c_isnull"), ("SelectClauseElementSegment", "c is not null as c_notnull"), ("SelectClauseElementSegment", "c isnull as c_isnull"), ("SelectClauseElementSegment", "c notnull as c_notnull"), ("ArrayAccessorSegment", "[2:10]"), ("ArrayAccessorSegment", "[:10]"), ("ArrayAccessorSegment", "[2:]"), ("ArrayAccessorSegment", "[2]"), ], ) def test_dialect_postgres_specific_segment_parses( segment_reference: str, raw: str, caplog: LogCaptureFixture, dialect_specific_segment_parses: Callable, ) -> None: """Test that specific segments parse as expected. NB: We're testing the PARSE function not the MATCH function although this will be a recursive parse and so the match function of SUBSECTIONS will be tested if present. The match function of the parent will not be tested. """ dialect_specific_segment_parses("postgres", segment_reference, raw, caplog) @pytest.mark.parametrize( "raw", [ "SELECT t1.field, EXTRACT(EPOCH FROM t1.sometime) AS myepoch FROM t1", "SELECT t1.field, EXTRACT(EPOCH FROM t1.sometime - t1.othertime) AS myepoch " "FROM t1", ], ) def test_epoch_datetime_unit(raw: str) -> None: """Test the EPOCH keyword for postgres dialect.""" # Don't test for new lines or capitalisation cfg = FluffConfig( configs={"core": {"exclude_rules": "LT12,LT05,LT09", "dialect": "postgres"}} ) lnt = Linter(config=cfg) result = lnt.lint_string(raw) assert result.num_violations() == 0 @pytest.mark.parametrize( "raw", [ "SELECT foo AS space FROM t1", "SELECT space.something FROM t1 AS space", ], ) def test_space_is_not_reserved(raw: str) -> None: """Ensure that SPACE is not treated as reserved.""" cfg = FluffConfig( configs={"core": {"exclude_rules": "LT12,LT05,AL07", "dialect": "postgres"}} ) lnt = Linter(config=cfg) result = lnt.lint_string(raw) assert result.num_violations() == 0 def test_priority_keyword_merge() -> None: """Test merging on keyword lists works as expected.""" kw_list_1 = [("A", "not-keyword"), ("B", "non-reserved")] kw_list_2 = [("A", "reserved"), ("C", "non-reserved")] result = priority_keyword_merge(kw_list_1, kw_list_2) expected_result = [("A", "reserved"), ("B", "non-reserved"), ("C", "non-reserved")] assert sorted(result) == sorted(expected_result) kw_list_1 = [("A", "not-keyword"), ("B", "non-reserved")] kw_list_2 = [("A", "reserved"), ("C", "non-reserved")] result_2 = priority_keyword_merge(kw_list_2, kw_list_1) expected_result_2 = [ ("A", "not-keyword"), ("B", "non-reserved"), ("C", "non-reserved"), ] assert sorted(result_2) == sorted(expected_result_2) kw_list_1 = [("A", "not-keyword"), ("B", "non-reserved")] kw_list_2 = [("A", "reserved"), ("C", "non-reserved")] kw_list_3 = [("B", "reserved")] result_3 = priority_keyword_merge(kw_list_2, kw_list_1, kw_list_3) expected_result_3 = [("A", "not-keyword"), ("B", "reserved"), ("C", "non-reserved")] assert sorted(result_3) == sorted(expected_result_3) kw_list_1 = [("A", "not-keyword"), ("B", "non-reserved")] result_4 = priority_keyword_merge(kw_list_1) expected_result_4 = kw_list_1 assert sorted(result_4) == sorted(expected_result_4) def test_get_keywords() -> None: """Test keyword filtering works as expected.""" kw_list = [ ("A", "not-keyword"), ("B", "reserved"), ("C", "non-reserved"), ("D", "not-keyword"), ("E", "non-reserved-(cannot-be-function-or-type)"), ] expected_result = ["A", "D"] assert sorted(get_keywords(kw_list, "not-keyword")) == sorted(expected_result) expected_result_2 = ["C", "E"] assert sorted(get_keywords(kw_list, "non-reserved")) == sorted(expected_result_2) expected_result_3 = ["B"] assert sorted(get_keywords(kw_list, "reserved")) == sorted(expected_result_3) sqlfluff-3.4.2/test/dialects/snowflake_test.py000066400000000000000000000053451503426445100215260ustar00rootroot00000000000000"""Tests specific to the snowflake dialect.""" import pytest from sqlfluff.core import Linter from sqlfluff.core.dialects import dialect_selector # Deprecated: All new tests should be added as .sql and .yml files under # `test/fixtures/dialects/snowflake`. # See test/fixtures/dialects/README.md for more details. @pytest.mark.parametrize( "segment_cls,raw", [ ( "CreateCloneStatementSegment", "create table orders_clone_restore clone orders at (timestamp => " "to_timestamp_tz('04/05/2013 01:02:03', 'mm/dd/yyyy hh24:mi:ss'));", ), ("ShowStatementSegment", "SHOW GRANTS ON ACCOUNT;"), ("ShowStatementSegment", "show tables history in tpch.public;"), ("ShowStatementSegment", "show future grants in schema sales.public;"), ( "ShowStatementSegment", "show replication databases with primary aws_us_west_2.myaccount1.mydb1;", ), ( "ShowStatementSegment", "SHOW TERSE SCHEMAS HISTORY LIKE '%META%' IN DATABASE MYDB STARTS WITH " "'INT' LIMIT 10 FROM 'LAST_SCHEMA';", ), ("ShowStatementSegment", "SHOW GRANTS TO ROLE SECURITYADMIN;"), ("ShowStatementSegment", "SHOW GRANTS OF SHARE MY_SHARE;"), # Testing https://github.com/sqlfluff/sqlfluff/issues/634 ( "SemiStructuredAccessorSegment", "SELECT ID :: VARCHAR as id, OBJ : userId :: VARCHAR as user_id from x", ), ("DropUserStatementSegment", "DROP USER my_user;"), ("AlterSessionStatementSegment", "ALTER SESSION SET TIMEZONE = 'UTC'"), ( "AlterSessionStatementSegment", "ALTER SESSION SET ABORT_DETACHED_QUERY = FALSE", ), ("AlterSessionStatementSegment", "ALTER SESSION SET JSON_INDENT = 5"), ( "AlterSessionStatementSegment", "ALTER SESSION UNSET ERROR_ON_NONDETERMINISTIC_MERGE;", ), ( "AlterSessionStatementSegment", "ALTER SESSION UNSET TIME_OUTPUT_FORMAT, TWO_DIGIT_CENTURY_START;", ), ], ) def test_snowflake_queries(segment_cls, raw, caplog): """Test snowflake specific queries parse.""" lnt = Linter(dialect="snowflake") parsed = lnt.parse_string(raw) print(parsed.violations) assert len(parsed.violations) == 0 # Find any unparsable statements typs = parsed.tree.type_set() assert "unparsable" not in typs # Find the expected type in the parsed segment seg_type = dialect_selector("snowflake").get_segment(segment_cls).type child_segments = [seg for seg in parsed.tree.recursive_crawl(seg_type)] assert len(child_segments) > 0 # If we get here the raw statement was parsed as expected sqlfluff-3.4.2/test/dialects/soql_test.py000066400000000000000000000013571503426445100205120ustar00rootroot00000000000000"""Tests specific to the soql dialect.""" import pytest from sqlfluff.core import FluffConfig, Linter from sqlfluff.core.errors import SQLParseError @pytest.mark.parametrize( "raw", [ "ALTER TABLE foo DROP COLUMN bar\n", "CREATE USER my_user\n", "TRUNCATE TABLE foo\n", "EXPLAIN SELECT Id FROM Contact\n", "DROP TABLE foo\n", "DROP USER my_user\n", ], ) def test_non_selects_unparseable(raw: str) -> None: """Test that non-SELECT commands are not parseable.""" cfg = FluffConfig(configs={"core": {"dialect": "soql"}}) lnt = Linter(config=cfg) result = lnt.lint_string(raw) assert len(result.violations) == 1 assert isinstance(result.violations[0], SQLParseError) sqlfluff-3.4.2/test/dialects/unparsable_test.py000066400000000000000000000135301503426445100216640ustar00rootroot00000000000000"""Test the behaviour of the unparsable routines.""" from typing import Any, Optional import pytest from sqlfluff.core import FluffConfig from sqlfluff.core.parser import BaseSegment, Lexer, RawSegment from sqlfluff.core.parser.context import ParseContext # NOTE: Being specific on the segment ref helps to avoid crazy nesting. @pytest.mark.parametrize( "segmentref,dialect,raw,structure", [ ( # The first here makes sure all of this works from the outer # segment, but for other tests we should aim to be more specific. None, "ansi", "SELECT 1 1", ( "file", ( ( "statement", ( ( "select_statement", ( ( "select_clause", ( ("keyword", "SELECT"), ("whitespace", " "), ( "select_clause_element", (("numeric_literal", "1"),), ), ("whitespace", " "), ( "unparsable", (("numeric_literal", "1"),), ), ), ), ), ), ), ), ), ), ), ( "SelectClauseSegment", "ansi", "SELECT 1 1", ( "select_clause", ( ("keyword", "SELECT"), ("whitespace", " "), ( "select_clause_element", (("numeric_literal", "1"),), ), ("whitespace", " "), # We should get a single unparsable section # here at the end. ( "unparsable", (("numeric_literal", "1"),), ), ), ), ), # This more complex example looks a little strange, but does # reflect current unparsable behaviour. During future work # on the parser, the structure of this result may change # but it should still result in am unparsable section _within_ # the brackets, and not just a totally unparsable statement. ( "SelectClauseSegment", "ansi", "SELECT 1 + (2 2 2)", ( "select_clause", ( ("keyword", "SELECT"), ("whitespace", " "), ( "select_clause_element", ( ( "expression", ( ("numeric_literal", "1"), ("whitespace", " "), ("binary_operator", "+"), ("whitespace", " "), ( "bracketed", ( ("start_bracket", "("), ("expression", (("numeric_literal", "2"),)), ("whitespace", " "), ( "unparsable", ( ("numeric_literal", "2"), ("whitespace", " "), ("numeric_literal", "2"), ), ), ("end_bracket", ")"), ), ), ), ), ), ), ), ), ), ], ) def test_dialect_unparsable( segmentref: Optional[str], dialect: str, raw: str, structure: Any ): """Test the structure of unparsables.""" config = FluffConfig(overrides=dict(dialect=dialect)) # Get the referenced object (if set, otherwise root) if segmentref: Seg = config.get("dialect_obj").ref(segmentref) else: Seg = config.get("dialect_obj").get_root_segment() # We only allow BaseSegments as matchables in this test. assert issubclass(Seg, BaseSegment) assert not issubclass(Seg, RawSegment) # Lex the raw string. lex = Lexer(config=config) segments, vs = lex.lex(raw) assert not vs # Strip the end of file token if it's there. It will # confuse most segments. if segmentref and segments[-1].is_type("end_of_file"): segments = segments[:-1] ctx = ParseContext.from_config(config) # Match against the segment. match = Seg.match(segments, 0, ctx) result = match.apply(segments) assert len(result) == 1 parsed = result[0] assert isinstance(parsed, Seg) assert parsed.to_tuple(show_raw=True) == structure sqlfluff-3.4.2/test/diff_quality_plugin_test.py000066400000000000000000000043741503426445100220040ustar00rootroot00000000000000"""Tests for the SQLFluff integration with the "diff-quality" tool.""" import sys from pathlib import Path import pytest from sqlfluff import diff_quality_plugin from sqlfluff.cli.commands import lint from sqlfluff.utils.testing.cli import invoke_assert_code @pytest.mark.parametrize( "sql_paths,expected_violations_lines", [ (("linter/indentation_errors.sql",), list(range(2, 7))), (("linter/parse_error.sql",), {1}), # NB: This version of the file is in a directory configured # to ignore parsing errors. (("linter/diffquality/parse_error.sql",), []), (tuple(), []), ], ) def test_diff_quality_plugin(sql_paths, expected_violations_lines, monkeypatch): """Test the plugin at least finds errors on the expected lines.""" def execute(command, exit_codes): printable_command_parts = [ c.decode(sys.getfilesystemencoding()) if isinstance(c, bytes) else c for c in command ] result = invoke_assert_code( ret_code=1 if expected_violations_lines else 0, args=[ lint, printable_command_parts[2:], ], ) return result.output, "" # Mock the execute function -- this is an attempt to prevent the CircleCI # coverage check from hanging. (We've seen issues in the past where using # subprocesses caused things to occasionally hang.) monkeypatch.setattr(diff_quality_plugin, "execute", execute) monkeypatch.chdir("test/fixtures/") violation_reporter = diff_quality_plugin.diff_cover_report_quality( options="--processes=1" ) assert len(sql_paths) in (0, 1) sql_paths = [str(Path(sql_path)) for sql_path in sql_paths] violations_dict = violation_reporter.violations_batch(sql_paths) assert isinstance(violations_dict, dict) if expected_violations_lines: assert len(violations_dict[sql_paths[0]]) > 0 violations_lines = {v.line for v in violations_dict[sql_paths[0]]} for expected_line in expected_violations_lines: assert expected_line in violations_lines else: assert ( len(violations_dict[sql_paths[0]]) == 0 if sql_paths else len(violations_dict) == 0 ) sqlfluff-3.4.2/test/fixtures/000077500000000000000000000000001503426445100161765ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/.sqlfluff000066400000000000000000000000321503426445100200140ustar00rootroot00000000000000[sqlfluff] dialect = ansi sqlfluff-3.4.2/test/fixtures/api/000077500000000000000000000000001503426445100167475ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/api/config_dialect/000077500000000000000000000000001503426445100217015ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/api/config_dialect/.sqlfluff000066400000000000000000000000341503426445100235210ustar00rootroot00000000000000[sqlfluff] dialect = duckdb sqlfluff-3.4.2/test/fixtures/api/config_dialect/config_dialect.sql000066400000000000000000000000271503426445100253530ustar00rootroot00000000000000FROM tab WHERE a = 1; sqlfluff-3.4.2/test/fixtures/api/config_dialect/config_dialect_fix.sql000066400000000000000000000000261503426445100262200ustar00rootroot00000000000000FROM tab WHERE a = 1; sqlfluff-3.4.2/test/fixtures/api/config_dialect/config_dialect_lint_lt01.json000066400000000000000000000012621503426445100274150ustar00rootroot00000000000000[ { "code": "LT01", "description": "Expected only single space before naked identifier. Found ' '.", "end_file_pos": 6, "end_line_no": 1, "end_line_pos": 7, "fixes": [ { "edit": " ", "end_file_pos": 6, "end_line_no": 1, "end_line_pos": 7, "start_file_pos": 4, "start_line_no": 1, "start_line_pos": 5, "type": "replace" } ], "name": "layout.spacing", "start_file_pos": 4, "start_line_no": 1, "start_line_pos": 5, "warning": false } ] sqlfluff-3.4.2/test/fixtures/api/config_dialect/config_dialect_lint_prs.json000066400000000000000000000005371503426445100274450ustar00rootroot00000000000000[ { "code": "PRS", "description": "Line 1, Position 1: Found unparsable section: 'FROM tab\\nWHERE a = 1;'", "end_file_pos": 22, "end_line_no": 2, "end_line_pos": 13, "name": "", "start_file_pos": 0, "start_line_no": 1, "start_line_pos": 1, "warning": false } ] sqlfluff-3.4.2/test/fixtures/api/config_dialect/config_dialect_parse.json000066400000000000000000000030541503426445100267220ustar00rootroot00000000000000{ "file": { "statement": { "select_statement": { "from_clause": { "keyword": "FROM", "whitespace": " ", "from_expression": { "from_expression_element": { "table_expression": { "table_reference": { "naked_identifier": "tab" } } } } }, "newline": "\n", "where_clause": { "keyword": "WHERE", "whitespace": " ", "expression": [ { "column_reference": { "naked_identifier": "a" } }, { "whitespace": " " }, { "comparison_operator": { "raw_comparison_operator": "=" } }, { "whitespace": " " }, { "numeric_literal": "1" } ] } } }, "statement_terminator": ";", "newline": "\n" } } sqlfluff-3.4.2/test/fixtures/api/config_override/000077500000000000000000000000001503426445100221135ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/api/config_override/.sqlfluff000066400000000000000000000000451503426445100237350ustar00rootroot00000000000000[sqlfluff] exclude_rules = RF02,RF04 sqlfluff-3.4.2/test/fixtures/api/config_path_test/000077500000000000000000000000001503426445100222675ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/api/config_path_test/config_path_test.json000066400000000000000000000020071503426445100265010ustar00rootroot00000000000000{ "file": { "statement": { "select_statement": { "select_clause": { "keyword": "SELECT", "whitespace": " ", "select_clause_element": { "column_reference": { "naked_identifier": "foo" } } }, "whitespace": " ", "from_clause": { "keyword": "FROM", "whitespace": " ", "from_expression": { "from_expression_element": { "table_expression": { "table_reference": { "naked_identifier": "bar" } } } } } } }, "statement_terminator": ";", "newline": "\n" } } sqlfluff-3.4.2/test/fixtures/api/config_path_test/config_path_test.sql000066400000000000000000000000421503426445100263240ustar00rootroot00000000000000SELECT foo FROM {{ table_name }}; sqlfluff-3.4.2/test/fixtures/api/config_path_test/extra_configs/000077500000000000000000000000001503426445100251225ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/api/config_path_test/extra_configs/.sqlfluff000066400000000000000000000000621503426445100267430ustar00rootroot00000000000000[sqlfluff:templater:jinja:context] table_name=bar sqlfluff-3.4.2/test/fixtures/api/parse_test/000077500000000000000000000000001503426445100211205ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/api/parse_test/parse_test.json000066400000000000000000000044761503426445100241770ustar00rootroot00000000000000{ "file": { "statement": { "select_statement": { "select_clause": [ { "keyword": "SeLEct" }, { "whitespace": " " }, { "select_clause_element": { "wildcard_expression": { "wildcard_identifier": { "star": "*" } } } }, { "comma": "," }, { "whitespace": " " }, { "select_clause_element": { "numeric_literal": "1" } }, { "comma": "," }, { "whitespace": " " }, { "select_clause_element": { "column_reference": { "naked_identifier": "blah" }, "whitespace": " ", "alias_expression": { "alias_operator": { "keyword": "as" }, "whitespace": " ", "naked_identifier": "fOO" } } } ], "whitespace": " ", "from_clause": { "keyword": "from", "whitespace": " ", "from_expression": { "from_expression_element": { "table_expression": { "table_reference": { "naked_identifier": "myTable" } } } } } } } } } sqlfluff-3.4.2/test/fixtures/cli/000077500000000000000000000000001503426445100167455ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/cli/.gitignore000066400000000000000000000000531503426445100207330ustar00rootroot00000000000000# Results of fixed tests fail_many_fix.sql sqlfluff-3.4.2/test/fixtures/cli/disable_noqa_test.sql000066400000000000000000000005431503426445100231500ustar00rootroot00000000000000-- Test to verify that --disable-noqa CLI option -- allows for inline noqa comments to be ignored. -- NOTE: two noqas so that we can also test --warn-unused-ignores SELECT col_a AS a, --noqa: CP01 col_b as b, --noqa: CP01 UPPER(col_b) as c, --noqa: CP01 lower(col_b) as d, --noqa: CP01, CP03 lower(col_b) as e --noqa: core FROM t; sqlfluff-3.4.2/test/fixtures/cli/encoding_test.sql000066400000000000000000000001321503426445100223070ustar00rootroot00000000000000-- This file is encoded in utf-8-SIG SELECT foo FROM bar; -- utf-8-SIG comment → sqlfluff-3.4.2/test/fixtures/cli/extra_config_tsql.sql000066400000000000000000000001271503426445100232010ustar00rootroot00000000000000-- Some tsql specific sql to test config cli argument. BEGIN SELECT 'Weekend'; END sqlfluff-3.4.2/test/fixtures/cli/extra_configs/000077500000000000000000000000001503426445100216005ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/cli/extra_configs/.sqlfluff000066400000000000000000000000321503426445100234160ustar00rootroot00000000000000[sqlfluff] dialect = tsql sqlfluff-3.4.2/test/fixtures/cli/extra_configs/pyproject.toml000066400000000000000000000000461503426445100245140ustar00rootroot00000000000000[tool.sqlfluff.core] dialect = "tsql" sqlfluff-3.4.2/test/fixtures/cli/fail_many.sql000066400000000000000000000001771503426445100214320ustar00rootroot00000000000000-- File which fails on templating and lexing errors. SELECT {{ something }} as trailing_space , 3 + FROM SELECT FROM sqlfluff-3.4.2/test/fixtures/cli/ignore_local_config/000077500000000000000000000000001503426445100227275ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/cli/ignore_local_config/.sqlfluff000066400000000000000000000000401503426445100245440ustar00rootroot00000000000000[sqlfluff] exclude_rules = AL02 sqlfluff-3.4.2/test/fixtures/cli/ignore_local_config/ignore_local_config_test.sql000066400000000000000000000002641503426445100304730ustar00rootroot00000000000000-- This query raises AL02. -- We exclude this rule in the .sqlfluff file and then test -- ignoring this config file via the --ignore-local-config CLI flag. SELECT col_a a FROM foo sqlfluff-3.4.2/test/fixtures/cli/jinja_fatal_fail.sql000066400000000000000000000000261503426445100227210ustar00rootroot00000000000000select {{ 1 > "foo"}} sqlfluff-3.4.2/test/fixtures/cli/jinja_variants.sql000066400000000000000000000002471503426445100224730ustar00rootroot00000000000000-- sqlfluff:render_variant_limit:10 select a, {% if 1 > 2 %} B, {% elif 3 > 1 %} C, {% else %} D, {% endif %} d from e sqlfluff-3.4.2/test/fixtures/cli/passing_a.sql000066400000000000000000000000541503426445100214310ustar00rootroot00000000000000SELECT tbl.name, tbl.value FROM tbl sqlfluff-3.4.2/test/fixtures/cli/passing_b.sql000066400000000000000000000004751503426445100214410ustar00rootroot00000000000000SELECT tbl.name, b.value, /* This is a block comment */ d.something, -- Which a comment after it tbl.foo, c.val + b.val / -2 AS a_calculation FROM tbl INNER JOIN b ON (tbl.common_id = b.common_id) JOIN c ON (tbl.id = c.id) LEFT JOIN d ON (tbl.id = d.other_id) ORDER BY tbl.name ASC sqlfluff-3.4.2/test/fixtures/cli/passing_timing.sql000066400000000000000000000007651503426445100225110ustar00rootroot00000000000000-- NOTE: This query is duplicated many times so the test -- takes longer and can effectively measure timing routines. {% for i in range(10) %} SELECT tbl.name, b.value, /* This is a block comment */ d.something, -- Which a comment after it tbl.foo, d.val + b.val / -2 AS a_calculation FROM tbl INNER JOIN b ON (tbl.common_id = b.common_id) LEFT JOIN d ON (tbl.id = d.other_id) ORDER BY tbl.name ASC; {% endfor %} sqlfluff-3.4.2/test/fixtures/cli/stdin_filename/000077500000000000000000000000001503426445100217265ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/cli/stdin_filename/.sqlfluff000066400000000000000000000000341503426445100235460ustar00rootroot00000000000000[sqlfluff] dialect = duckdb sqlfluff-3.4.2/test/fixtures/cli/unknown_jinja_tag/000077500000000000000000000000001503426445100224525ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/cli/unknown_jinja_tag/.sqlfluff000066400000000000000000000001631503426445100242750ustar00rootroot00000000000000[sqlfluff] dialect = ansi [sqlfluff:templater:jinja] load_macros_from_path = my_macros apply_dbt_builtins = False sqlfluff-3.4.2/test/fixtures/cli/unknown_jinja_tag/my_macros/000077500000000000000000000000001503426445100244435ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/cli/unknown_jinja_tag/my_macros/dbt_test.sql000066400000000000000000000002501503426445100267710ustar00rootroot00000000000000{% test warn_if_odd(model, column_name) %} {{ config(severity = 'warn') }} select * from {{ model }} where ({{ column_name }} % 2) = 1 {% endtest %} sqlfluff-3.4.2/test/fixtures/cli/unknown_jinja_tag/test.sql000066400000000000000000000000111503426445100241420ustar00rootroot00000000000000SELECT 1 sqlfluff-3.4.2/test/fixtures/cli/warning_a.sql000066400000000000000000000002131503426445100214270ustar00rootroot00000000000000-- This file should fail _only_ for spacing around + -- We explicit configure that rule to only warn. -- sqlfluff:warnings:LT01 SELECT 1+2 sqlfluff-3.4.2/test/fixtures/cli/warning_name_a.sql000066400000000000000000000002251503426445100224320ustar00rootroot00000000000000-- This file should fail _only_ for spacing around + -- We explicit configure that rule to only warn. -- sqlfluff:warnings:layout.spacing SELECT 1+2 sqlfluff-3.4.2/test/fixtures/config/000077500000000000000000000000001503426445100174435ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/config/glob_exclude/000077500000000000000000000000001503426445100220775ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/config/glob_exclude/.sqlfluff000066400000000000000000000000451503426445100237210ustar00rootroot00000000000000[sqlfluff] exclude_rules = L05*,RF02 sqlfluff-3.4.2/test/fixtures/config/glob_exclude/test.sql000066400000000000000000000004331503426445100235770ustar00rootroot00000000000000 /* Denylist glob test This query violates RF02, AM04, LT13, AM05, and CV06. When we exclude L05*,RF02 in the config we expect RF02, LT13, AM05, and CV06 to be ignored by the linter. - AM05 because it's alias is L051 - CV06 because it's alias is L052 */ SELECT * FROM bar JOIN baz sqlfluff-3.4.2/test/fixtures/config/glob_include/000077500000000000000000000000001503426445100220715ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/config/glob_include/.sqlfluff000066400000000000000000000002071503426445100237130ustar00rootroot00000000000000[sqlfluff] rules = L05*,RF02 [sqlfluff:rules:convention.terminator] # Semi-colon formatting approach. require_final_semicolon = True sqlfluff-3.4.2/test/fixtures/config/glob_include/test.sql000066400000000000000000000004401503426445100235670ustar00rootroot00000000000000 /* Allowlist glob test This query violates RF02, AM04, LT13, AM05, and CV06. When we include L05*,RF02 in the config we expect RF02, LT13, AM05, and CV06 only to be raised by the linter. - AM05 because it's alias is L051 - CV06 because it's alias is L052 */ SELECT * FROM bar JOIN baz sqlfluff-3.4.2/test/fixtures/config/inheritance_a/000077500000000000000000000000001503426445100222345ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/config/inheritance_a/.sqlfluff000066400000000000000000000001251503426445100240550ustar00rootroot00000000000000[sqlfluff] dialect=mysql testing_val=foobar testing_int=4 [sqlfluff:bar] foo=barbar sqlfluff-3.4.2/test/fixtures/config/inheritance_a/extra/000077500000000000000000000000001503426445100233575ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/config/inheritance_a/extra/this_can_have_any_name.cfg000066400000000000000000000000371503426445100305020ustar00rootroot00000000000000[sqlfluff:bar] foo=foobarextra sqlfluff-3.4.2/test/fixtures/config/inheritance_a/nested/000077500000000000000000000000001503426445100235165ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/config/inheritance_a/nested/pyproject.toml000066400000000000000000000000451503426445100264310ustar00rootroot00000000000000[tool.sqlfluff.core] testing_int = 1 sqlfluff-3.4.2/test/fixtures/config/inheritance_a/nested/setup.cfg000066400000000000000000000000531503426445100253350ustar00rootroot00000000000000[sqlfluff] testing_int=5 testing_bar=7.698 sqlfluff-3.4.2/test/fixtures/config/inheritance_a/nested/tox.ini000066400000000000000000000001271503426445100250310ustar00rootroot00000000000000[sqlfluff] testing_int=6 [sqlfluff:bar] foo=foobar [sqlfluff:fnarr:fnarr] foo=foobar sqlfluff-3.4.2/test/fixtures/config/inheritance_a/testing.sql000066400000000000000000000000221503426445100244240ustar00rootroot00000000000000SELECT 1 FROM tbl sqlfluff-3.4.2/test/fixtures/config/inheritance_b/000077500000000000000000000000001503426445100222355ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/config/inheritance_b/example.sql000066400000000000000000000000161503426445100244060ustar00rootroot00000000000000 SELeCT fOosqlfluff-3.4.2/test/fixtures/config/inheritance_b/nested/000077500000000000000000000000001503426445100235175ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/config/inheritance_b/nested/.sqlfluff000066400000000000000000000000701503426445100253370ustar00rootroot00000000000000[sqlfluff] rules=LT01,LT02,CP01,CP02 exclude_rules=CP03 sqlfluff-3.4.2/test/fixtures/config/inheritance_b/nested/example.sql000066400000000000000000000000161503426445100256700ustar00rootroot00000000000000 SELeCT fOosqlfluff-3.4.2/test/fixtures/config/inheritance_b/tox.ini000066400000000000000000000000631503426445100235470ustar00rootroot00000000000000[sqlfluff] rules=LT01,CP01,CP02 exclude_rules=CP01 sqlfluff-3.4.2/test/fixtures/config/placeholder/000077500000000000000000000000001503426445100217255ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/config/placeholder/.sqlfluff-placeholder000066400000000000000000000002341503426445100260270ustar00rootroot00000000000000[sqlfluff] testing_val=foobar testing_int=4 [sqlfluff:bar] foo=barbar [sqlfluff:templater:placeholder] param_style = flyway_var flyway:database = test_db sqlfluff-3.4.2/test/fixtures/config/rules_group_with_exclude/000077500000000000000000000000001503426445100245555ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/config/rules_group_with_exclude/.sqlfluff000066400000000000000000000000551503426445100264000ustar00rootroot00000000000000[sqlfluff] rules = core exclude_rules = LT04 sqlfluff-3.4.2/test/fixtures/config/rules_group_with_exclude/test.sql000066400000000000000000000004101503426445100262500ustar00rootroot00000000000000 /* Rules group with exclude rules test If some monster wants to run the core rules, but at the same time allow trailing and leading commas, then they can do that now This query should only trigger CP01 */ SELECT field_1, field_2 , field_3 from bar sqlfluff-3.4.2/test/fixtures/config/rules_set_to_none/000077500000000000000000000000001503426445100231715ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/config/rules_set_to_none/.sqlfluff000066400000000000000000000000301503426445100250050ustar00rootroot00000000000000[sqlfluff] rules = None sqlfluff-3.4.2/test/fixtures/config/rules_set_to_none/test.sql000066400000000000000000000004121503426445100246660ustar00rootroot00000000000000 /* Rules set to none test The previous default setting for rules was 'None' which meant all rules would be run. The new default is 'all', but having rules = None should still run all rules, meaning this query will trigger LT13,AM04, and CP01 */ SELECT * from bar sqlfluff-3.4.2/test/fixtures/config/toml/000077500000000000000000000000001503426445100204165ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/config/toml/pyproject.toml000066400000000000000000000005421503426445100233330ustar00rootroot00000000000000[tool.sqlfluff.core] nocolor = true verbose = 2 testing_int = 5 testing_bar = 7.698 testing_bool = false testing_arr = [ "a", "b", "c" ] testing_inline_table = { x = 1 } rules = ["LT03", "LT09"] [tool.sqlfluff.bar] foo = "foobar" [tool.sqlfluff.fnarr.fnarr] foo = "foobar" [tool.sqlfluff.rules.capitalisation.keywords] capitalisation_policy = "upper" sqlfluff-3.4.2/test/fixtures/dialects/000077500000000000000000000000001503426445100177665ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/dialects/README.md000066400000000000000000000026461503426445100212550ustar00rootroot00000000000000# Automated parser tests The `parser` directory contains the files for automated parser tests. This is organised first into folders for each `dialect` (e.g. `ansi`, `mysql`) which each then contain both `.sql` files and `.yml` files. The intent for these folders is that each test should be in the _highest_ dialect that it can be in. i.e. If it can be in the `ansi` dialect then it should be in there. Within each folder, any `.sql` files will be tested that they can successfully parse (i.e. that they do not raise any errors and that the parsed result does not contain any _unparsable_ segments). If there is a `.yml` file with the same filename as the `.sql` file then the _structure_ of the parsed query will also be compared against the structure within that yaml file. ## Adding a new test For best test coverage, add both a `.sql` and `.yml` file. The easiest way to add a `.yml` file is to run: ``` python test/generate_parse_fixture_yml.py [--dialect ] [--filter ] [--new-only] ``` Or via `tox`: ``` tox -e generate-fixture-yml ``` Or via `tox` with arguments: ``` tox -e generate-fixture-yml -- --dialect ``` This will regenerate all the parsed structure yml files, or a subset based on the given filters. ## Running parser tests To avoid running the whole test suite with tox after changing parsers, you can instead run: ``` pytest test/dialects/dialects_test.py ``` to save some time. sqlfluff-3.4.2/test/fixtures/dialects/ansi/000077500000000000000000000000001503426445100207205ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/dialects/ansi/.sqlfluff000066400000000000000000000000321503426445100225360ustar00rootroot00000000000000[sqlfluff] dialect = ansi sqlfluff-3.4.2/test/fixtures/dialects/ansi/alter_sequence.sql000066400000000000000000000002721503426445100244410ustar00rootroot00000000000000ALTER SEQUENCE foo INCREMENT BY 1; ALTER SEQUENCE foo MAXVALUE 7 NO minvalue; ALTER SEQUENCE foo NOCACHE CYCLE; ALTER SEQUENCE foo NOORDER CACHE 5 NOCYCLE; ALTER SEQUENCE foo ORDER; sqlfluff-3.4.2/test/fixtures/dialects/ansi/alter_sequence.yml000066400000000000000000000036221503426445100244450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8702692b6970efbc664e56a683b682412dff7f788392da48f53763ce454394ea file: - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - alter_sequence_options_segment: - keyword: INCREMENT - keyword: BY - numeric_literal: '1' - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - alter_sequence_options_segment: keyword: MAXVALUE numeric_literal: '7' - alter_sequence_options_segment: - keyword: 'NO' - keyword: minvalue - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - alter_sequence_options_segment: keyword: NOCACHE - alter_sequence_options_segment: keyword: CYCLE - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - alter_sequence_options_segment: keyword: NOORDER - alter_sequence_options_segment: keyword: CACHE numeric_literal: '5' - alter_sequence_options_segment: keyword: NOCYCLE - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - alter_sequence_options_segment: keyword: ORDER - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/alter_table.sql000066400000000000000000000001721503426445100237170ustar00rootroot00000000000000ALTER TABLE x DROP COLUMN y; ALTER TABLE x DROP y; ALTER TABLE x DROP IF EXISTS y; ALTER TABLE x DROP COLUMN IF EXISTS y; sqlfluff-3.4.2/test/fixtures/dialects/ansi/alter_table.yml000066400000000000000000000024741503426445100237300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3f3f7e008ac5ac4bd528353dd71d7f92e09e87cae5279907070e712fc6f5f139 file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - keyword: DROP - keyword: COLUMN - naked_identifier: y - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - parameter: DROP - naked_identifier: y - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - keyword: DROP - keyword: IF - keyword: EXISTS - naked_identifier: y - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - keyword: DROP - keyword: COLUMN - keyword: IF - keyword: EXISTS - naked_identifier: y - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/alter_table_rename_to.sql000066400000000000000000000000651503426445100257510ustar00rootroot00000000000000ALTER TABLE old_table_name RENAME TO new_table_name; sqlfluff-3.4.2/test/fixtures/dialects/ansi/alter_table_rename_to.yml000066400000000000000000000012361503426445100257540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6a71f21b89678cde482692a03d2c37005c6266d69de63a55322abcd5b953996c file: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: old_table_name - keyword: RENAME - keyword: TO - table_reference: naked_identifier: new_table_name statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/ansi_cast_with_whitespaces.sql000066400000000000000000000021501503426445100270350ustar00rootroot00000000000000-- ansi_cast_with_whitespaces.sql /* Several valid queries where there is whitespace surrounding the ANSI cast operator (::) */ -- query from https://github.com/sqlfluff/sqlfluff/issues/2720 SELECT amount_of_honey :: FLOAT FROM bear_inventory; -- should be able to support an arbitrary amount of whitespace SELECT amount_of_honey :: FLOAT FROM bear_inventory; SELECT amount_of_honey:: FLOAT FROM bear_inventory; SELECT amount_of_honey ::FLOAT FROM bear_inventory; -- should support a wide variety of typecasts SELECT amount_of_honey :: time FROM bear_inventory; SELECT amount_of_honey :: text FROM bear_inventory; SELECT amount_of_honey :: VARCHAR( 512 ) FROM bear_inventory; SELECT amount_of_honey :: TIMESTAMPTZ FROM bear_inventory; SELECT amount_of_honey :: TIMESTAMP WITHOUT TIME ZONE FROM bear_inventory; -- should support casts with arbitrary amount of whitespace in join statements SELECT bi.amount_of_honey FROM bear_inventory bi LEFT JOIN favorite_cola fc ON fc.bear_id :: VARCHAR(512) = bi.bear_id ::VARCHAR(512) WHERE fc.favorite_cola = 'RC Cola'; sqlfluff-3.4.2/test/fixtures/dialects/ansi/ansi_cast_with_whitespaces.yml000066400000000000000000000200641503426445100270430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a204b9c459511a89cc2f1f77d9c46071a44272116c1e1e9f8287692bd96688ed file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: data_type_identifier: FLOAT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: data_type_identifier: FLOAT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: data_type_identifier: FLOAT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: data_type_identifier: FLOAT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: keyword: time from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: data_type_identifier: text from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '512' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: data_type_identifier: TIMESTAMPTZ from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: - keyword: TIMESTAMP - keyword: WITHOUT - keyword: TIME - keyword: ZONE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: bi - dot: . - naked_identifier: amount_of_honey from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory alias_expression: naked_identifier: bi join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: favorite_cola alias_expression: naked_identifier: fc - join_on_condition: keyword: 'ON' expression: - cast_expression: column_reference: - naked_identifier: fc - dot: . - naked_identifier: bear_id casting_operator: '::' data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '512' end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - cast_expression: column_reference: - naked_identifier: bi - dot: . - naked_identifier: bear_id casting_operator: '::' data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '512' end_bracket: ) where_clause: keyword: WHERE expression: column_reference: - naked_identifier: fc - dot: . - naked_identifier: favorite_cola comparison_operator: raw_comparison_operator: '=' quoted_literal: "'RC Cola'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/arithmetic_a.sql000066400000000000000000000013561503426445100240770ustar00rootroot00000000000000SELECT 1 + (2 * 3) >= 4 + 6+13 as val; SELECT 1 + ~(~2 * 3) >= 4 + ~6+13 as val; SELECT -1; SELECT -1 + 5; SELECT ~1; SELECT -1 + ~5; SELECT 4 & ~8 | 16; SELECT 8 + ~(3); SELECT 8 | ~ ~ ~4; SELECT 1 * -(5); SELECT 1 * -5; SELECT 1 * - - - 5; SELECT 1 * - - - (5); SELECT 1 * + + (5); SELECT 1 * - - - func(5); SELECT 1 * ~ ~ ~ func(5); SELECT 1 * +(5); SELECT 1 * +5; SELECT 1 * + + 5; SELECT FALSE AND NOT (TRUE); SELECT FALSE AND NOT NOT NOT (TRUE); -- parses middle NOT as column ref SELECT FALSE AND NOT (TRUE); SELECT FALSE AND NOT func(5); SELECT 'abc' LIKE - - 5; -- PG can parse this ok, and then fail due to data type mismatch SELECT 'abc' LIKE ~ ~ 5; -- PG can parse this ok, and then fail due to data type mismatch sqlfluff-3.4.2/test/fixtures/dialects/ansi/arithmetic_a.yml000066400000000000000000000256061503426445100241050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 14952d9d87f9b9ba951d6f57dcda97a4797a166002c72d1a1d8256d7a2207dd0 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: + - bracketed: start_bracket: ( expression: - numeric_literal: '2' - binary_operator: '*' - numeric_literal: '3' end_bracket: ) - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - numeric_literal: '4' - binary_operator: + - numeric_literal: '6' - binary_operator: + - numeric_literal: '13' alias_expression: alias_operator: keyword: as naked_identifier: val - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: + - tilde: '~' - bracketed: start_bracket: ( expression: - tilde: '~' - numeric_literal: '2' - binary_operator: '*' - numeric_literal: '3' end_bracket: ) - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - numeric_literal: '4' - binary_operator: + - tilde: '~' - numeric_literal: '6' - binary_operator: + - numeric_literal: '13' alias_expression: alias_operator: keyword: as naked_identifier: val - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: sign_indicator: '-' numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: sign_indicator: '-' numeric_literal: '1' - binary_operator: + - numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: tilde: '~' numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: sign_indicator: '-' numeric_literal: '1' - binary_operator: + - tilde: '~' - numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '4' - binary_operator: ampersand: '&' - tilde: '~' - numeric_literal: '8' - binary_operator: pipe: '|' - numeric_literal: '16' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: numeric_literal: '8' binary_operator: + tilde: '~' bracketed: start_bracket: ( expression: numeric_literal: '3' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '8' - binary_operator: pipe: '|' - tilde: '~' - tilde: '~' - tilde: '~' - numeric_literal: '4' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: numeric_literal: '1' binary_operator: '*' sign_indicator: '-' bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '*' - numeric_literal: sign_indicator: '-' numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '*' - sign_indicator: '-' - sign_indicator: '-' - numeric_literal: sign_indicator: '-' numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '*' - sign_indicator: '-' - sign_indicator: '-' - sign_indicator: '-' - bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '*' - sign_indicator: + - sign_indicator: + - bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '*' - sign_indicator: '-' - sign_indicator: '-' - sign_indicator: '-' - function: function_name: function_name_identifier: func function_contents: bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '*' - tilde: '~' - tilde: '~' - tilde: '~' - function: function_name: function_name_identifier: func function_contents: bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: numeric_literal: '1' binary_operator: '*' sign_indicator: + bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '*' - numeric_literal: sign_indicator: + numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '*' - sign_indicator: + - numeric_literal: sign_indicator: + numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: boolean_literal: 'FALSE' binary_operator: AND keyword: NOT bracketed: start_bracket: ( expression: boolean_literal: 'TRUE' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - boolean_literal: 'FALSE' - binary_operator: AND - keyword: NOT - keyword: NOT - keyword: NOT - bracketed: start_bracket: ( expression: boolean_literal: 'TRUE' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: boolean_literal: 'FALSE' binary_operator: AND keyword: NOT bracketed: start_bracket: ( expression: boolean_literal: 'TRUE' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: boolean_literal: 'FALSE' binary_operator: AND keyword: NOT function: function_name: function_name_identifier: func function_contents: bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: quoted_literal: "'abc'" keyword: LIKE sign_indicator: '-' numeric_literal: sign_indicator: '-' numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'abc'" - keyword: LIKE - tilde: '~' - tilde: '~' - numeric_literal: '5' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/bracket_in_comment.sql000066400000000000000000000000301503426445100252550ustar00rootroot00000000000000select a /* ) */ from b sqlfluff-3.4.2/test/fixtures/dialects/ansi/bracket_in_comment.yml000066400000000000000000000013731503426445100252720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c4b65a8593c78cb67ed7ac4ca38869092f0274857b76255e5a79d0499a1a50fb file: statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: b sqlfluff-3.4.2/test/fixtures/dialects/ansi/bracketed_statement.sql000066400000000000000000000000541503426445100254500ustar00rootroot00000000000000(SELECT 1); ((SELECT 1)); (((SELECT 1))); sqlfluff-3.4.2/test/fixtures/dialects/ansi/bracketed_statement.yml000066400000000000000000000024701503426445100254560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a7028eaa159caaa519f3691bc92ad59783a0e192e83b0435f5633b589c6f7847 file: - statement: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: bracketed: start_bracket: ( bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: bracketed: start_bracket: ( bracketed: start_bracket: ( bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' end_bracket: ) end_bracket: ) end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/comments.sql000066400000000000000000000001221503426445100232610ustar00rootroot00000000000000-- This is a comment /* So is this */ /* This is a multiple line comment */ sqlfluff-3.4.2/test/fixtures/dialects/ansi/comments.yml000066400000000000000000000006171503426445100232740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fdda373cd9cd649f82a9c5cf7ba9e290375c0ceae29477b0bad5a25f24a52ae3 file: null sqlfluff-3.4.2/test/fixtures/dialects/ansi/commit.sql000066400000000000000000000000071503426445100227260ustar00rootroot00000000000000commit sqlfluff-3.4.2/test/fixtures/dialects/ansi/commit.yml000066400000000000000000000007101503426445100227310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 524dc2c43882d88047d23a738fdc9a5fa41c1c595e4b2e3dba6e192f7c424244 file: statement: transaction_statement: keyword: commit sqlfluff-3.4.2/test/fixtures/dialects/ansi/commit_and_no_chain.sql000066400000000000000000000000241503426445100254050ustar00rootroot00000000000000commit and no chain sqlfluff-3.4.2/test/fixtures/dialects/ansi/commit_and_no_chain.yml000066400000000000000000000010041503426445100254060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3d10b732862da9211f19d4b99e56ea99c86b1a5156fc2fabe7b8deaff50c2b78 file: statement: transaction_statement: - keyword: commit - keyword: and - keyword: 'no' - keyword: chain sqlfluff-3.4.2/test/fixtures/dialects/ansi/commit_work.sql000066400000000000000000000000141503426445100237660ustar00rootroot00000000000000commit work sqlfluff-3.4.2/test/fixtures/dialects/ansi/commit_work.yml000066400000000000000000000007341503426445100240010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7bb7ca6fe94332bd83031c85e7adc4cf063c66f926f54b6a85e17ba258015462 file: statement: transaction_statement: - keyword: commit - keyword: work sqlfluff-3.4.2/test/fixtures/dialects/ansi/commit_work_and_no_chain.sql000066400000000000000000000000311503426445100264450ustar00rootroot00000000000000commit work and no chain sqlfluff-3.4.2/test/fixtures/dialects/ansi/commit_work_and_no_chain.yml000066400000000000000000000010301503426445100264470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4b90d12608e387d7607439e59f911f13161235e52b413201f43b79e1f264af8c file: statement: transaction_statement: - keyword: commit - keyword: work - keyword: and - keyword: 'no' - keyword: chain sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_cast.sql000066400000000000000000000014201503426445100237130ustar00rootroot00000000000000CREATE CAST (int AS bool) WITH FUNCTION fname; CREATE CAST (int AS bool) WITH FUNCTION fname AS ASSIGNMENT; CREATE CAST (int AS bool) WITH FUNCTION fname(); CREATE CAST (int AS bool) WITH FUNCTION fname(bool); CREATE CAST (int AS bool) WITH FUNCTION sch.fname(int, bool) AS ASSIGNMENT; CREATE CAST (udt_1 AS udt_2) WITH FUNCTION fname(udt_1, udt_2) FOR udt_3; CREATE CAST (sch.udt_1 AS sch.udt_2) WITH FUNCTION sch.fname(sch.udt_1, sch.udt_2) FOR sch.udt_3; CREATE CAST (int AS bool) WITH ROUTINE fname(); CREATE CAST (int AS bool) WITH PROCEDURE fname(); CREATE CAST (int AS bool) WITH METHOD fname(); CREATE CAST (int AS bool) WITH INSTANCE METHOD fname(); CREATE CAST (int AS bool) WITH STATIC METHOD fname(); CREATE CAST (int AS bool) WITH CONSTRUCTOR METHOD fname(); sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_cast.yml000066400000000000000000000174371503426445100237340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 54848c7eae3e3c3c50ab04feca39d8798be72e8e24009c02366c2b4083750362 file: - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: data_type_identifier: int - keyword: AS - data_type: data_type_identifier: bool - end_bracket: ) - keyword: WITH - keyword: FUNCTION - function_name: function_name_identifier: fname - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: data_type_identifier: int - keyword: AS - data_type: data_type_identifier: bool - end_bracket: ) - keyword: WITH - keyword: FUNCTION - function_name: function_name_identifier: fname - keyword: AS - keyword: ASSIGNMENT - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: data_type_identifier: int - keyword: AS - data_type: data_type_identifier: bool - end_bracket: ) - keyword: WITH - keyword: FUNCTION - function_name: function_name_identifier: fname - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: data_type_identifier: int - keyword: AS - data_type: data_type_identifier: bool - end_bracket: ) - keyword: WITH - keyword: FUNCTION - function_name: function_name_identifier: fname - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: bool end_bracket: ) - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: data_type_identifier: int - keyword: AS - data_type: data_type_identifier: bool - end_bracket: ) - keyword: WITH - keyword: FUNCTION - function_name: naked_identifier: sch dot: . function_name_identifier: fname - function_parameter_list: bracketed: - start_bracket: ( - data_type: data_type_identifier: int - comma: ',' - data_type: data_type_identifier: bool - end_bracket: ) - keyword: AS - keyword: ASSIGNMENT - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: data_type_identifier: udt_1 - keyword: AS - data_type: data_type_identifier: udt_2 - end_bracket: ) - keyword: WITH - keyword: FUNCTION - function_name: function_name_identifier: fname - function_parameter_list: bracketed: - start_bracket: ( - data_type: data_type_identifier: udt_1 - comma: ',' - data_type: data_type_identifier: udt_2 - end_bracket: ) - keyword: FOR - object_reference: naked_identifier: udt_3 - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: naked_identifier: sch dot: . data_type_identifier: udt_1 - keyword: AS - data_type: naked_identifier: sch dot: . data_type_identifier: udt_2 - end_bracket: ) - keyword: WITH - keyword: FUNCTION - function_name: naked_identifier: sch dot: . function_name_identifier: fname - function_parameter_list: bracketed: - start_bracket: ( - data_type: naked_identifier: sch dot: . data_type_identifier: udt_1 - comma: ',' - data_type: naked_identifier: sch dot: . data_type_identifier: udt_2 - end_bracket: ) - keyword: FOR - object_reference: - naked_identifier: sch - dot: . - naked_identifier: udt_3 - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: data_type_identifier: int - keyword: AS - data_type: data_type_identifier: bool - end_bracket: ) - keyword: WITH - keyword: ROUTINE - function_name: function_name_identifier: fname - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: data_type_identifier: int - keyword: AS - data_type: data_type_identifier: bool - end_bracket: ) - keyword: WITH - keyword: PROCEDURE - function_name: function_name_identifier: fname - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: data_type_identifier: int - keyword: AS - data_type: data_type_identifier: bool - end_bracket: ) - keyword: WITH - keyword: METHOD - function_name: function_name_identifier: fname - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: data_type_identifier: int - keyword: AS - data_type: data_type_identifier: bool - end_bracket: ) - keyword: WITH - keyword: INSTANCE - keyword: METHOD - function_name: function_name_identifier: fname - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: data_type_identifier: int - keyword: AS - data_type: data_type_identifier: bool - end_bracket: ) - keyword: WITH - keyword: STATIC - keyword: METHOD - function_name: function_name_identifier: fname - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: data_type_identifier: int - keyword: AS - data_type: data_type_identifier: bool - end_bracket: ) - keyword: WITH - keyword: CONSTRUCTOR - keyword: METHOD - function_name: function_name_identifier: fname - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_database.yml000066400000000000000000000000001503426445100245200ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_database_a.sql000066400000000000000000000000341503426445100250250ustar00rootroot00000000000000create database my_database sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_database_a.yml000066400000000000000000000010441503426445100250310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5e42524c286cd1f2d8fd1e82b6332662d677b62505552f1b2cc0664940f91052 file: statement: create_database_statement: - keyword: create - keyword: database - database_reference: naked_identifier: my_database sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_database_if_not_exists.sql000066400000000000000000000000521503426445100274620ustar00rootroot00000000000000create database if not exists my_database sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_database_if_not_exists.yml000066400000000000000000000011371503426445100274710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7a2ac2ab01722015628232b7c24c213d6332bd20f90d7306ce32faf0694f8a16 file: statement: create_database_statement: - keyword: create - keyword: database - keyword: if - keyword: not - keyword: exists - database_reference: naked_identifier: my_database sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_function.sql000066400000000000000000000001651503426445100246130ustar00rootroot00000000000000CREATE FUNCTION add(integer, integer) RETURNS integer AS 'select $1 + $2;' LANGUAGE SQL; DROP FUNCTION add; sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_function.yml000066400000000000000000000022761503426445100246220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5efc4a124fb92699b72bbb8a6d21ba92beab827932f80df463538fa3b3abe1a3 file: - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: add - function_parameter_list: bracketed: - start_bracket: ( - data_type: data_type_identifier: integer - comma: ',' - data_type: data_type_identifier: integer - end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: integer - function_definition: - keyword: AS - quoted_literal: "'select $1 + $2;'" - keyword: LANGUAGE - naked_identifier: SQL - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - function_name: function_name_identifier: add - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_function_no_args.sql000066400000000000000000000001211503426445100263130ustar00rootroot00000000000000CREATE FUNCTION add() RETURNS integer AS 'select $1 + $2;' LANGUAGE SQL; sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_function_no_args.yml000066400000000000000000000016001503426445100263200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 03144fc2e1352886ee67bca5ca29353f09c06edcc60e429188f092604289873c file: statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: add - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: integer - function_definition: - keyword: AS - quoted_literal: "'select $1 + $2;'" - keyword: LANGUAGE - naked_identifier: SQL statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_index_if_not_exists.sql000066400000000000000000000001251503426445100270260ustar00rootroot00000000000000CREATE INDEX IF NOT EXISTS transaction_updated ON transaction_master (last_updated); sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_index_if_not_exists.yml000066400000000000000000000015331503426445100270340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fb1585edfd71857739ed70dbec2feaf89c1615bfe41a655c8d05374f9454d739 file: statement: create_index_statement: - keyword: CREATE - keyword: INDEX - keyword: IF - keyword: NOT - keyword: EXISTS - index_reference: naked_identifier: transaction_updated - keyword: 'ON' - table_reference: naked_identifier: transaction_master - bracketed: start_bracket: ( index_column_definition: naked_identifier: last_updated end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_index_simple.sql000066400000000000000000000002231503426445100254410ustar00rootroot00000000000000CREATE INDEX transaction_updated ON transaction_master(last_updated); CREATE UNIQUE INDEX transaction_updated ON transaction_master(last_updated); sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_index_simple.yml000066400000000000000000000023141503426445100254460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e305bc89e96ea473198a1c092d3f15dd7764269593bfdb0e6b826e7d17f77649 file: - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: transaction_updated - keyword: 'ON' - table_reference: naked_identifier: transaction_master - bracketed: start_bracket: ( index_column_definition: naked_identifier: last_updated end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: UNIQUE - keyword: INDEX - index_reference: naked_identifier: transaction_updated - keyword: 'ON' - table_reference: naked_identifier: transaction_master - bracketed: start_bracket: ( index_column_definition: naked_identifier: last_updated end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_model_options.sql000066400000000000000000000003121503426445100256330ustar00rootroot00000000000000CREATE OR REPLACE MODEL model3 OPTIONS ( MODEL_TYPE='LOGISTIC_REG', AUTO_CLASS_WEIGHTS=TRUE, INPUT_LABEL_COLS = ['label_str'] ) AS SELECT a, b FROM table1 WHERE training = 1 sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_model_options.yml000066400000000000000000000037041503426445100256450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 211fd01681c933400ac519c3f5901dcefb5dc7e061009417bf5aed3a98d10094 file: statement: create_model_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: MODEL - object_reference: naked_identifier: model3 - keyword: OPTIONS - bracketed: - start_bracket: ( - parameter: MODEL_TYPE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'LOGISTIC_REG'" - comma: ',' - parameter: AUTO_CLASS_WEIGHTS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - parameter: INPUT_LABEL_COLS - comparison_operator: raw_comparison_operator: '=' - array_literal: start_square_bracket: '[' quoted_literal: "'label_str'" end_square_bracket: ']' - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 where_clause: keyword: WHERE expression: column_reference: naked_identifier: training comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_role.sql000066400000000000000000000000251503426445100237220ustar00rootroot00000000000000CREATE ROLE foo_role sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_role.yml000066400000000000000000000010251503426445100237250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8d37ad3c99e87b6413a9653853c23aa004554e27e1e3f7832f6c531a582920c8 file: statement: create_role_statement: - keyword: CREATE - keyword: ROLE - role_reference: naked_identifier: foo_role sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_schema.yml000066400000000000000000000000001503426445100242140ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_schema_a.sql000066400000000000000000000000301503426445100245150ustar00rootroot00000000000000create schema my_schema sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_schema_a.yml000066400000000000000000000010341503426445100245240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b2a34157972c496f8de49616590e4c505278c27cb7f885e6091e323d83f9f630 file: statement: create_schema_statement: - keyword: create - keyword: schema - schema_reference: naked_identifier: my_schema sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_schema_if_not_exists.sql000066400000000000000000000000461503426445100271610ustar00rootroot00000000000000create schema if not exists my_schema sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_schema_if_not_exists.yml000066400000000000000000000011271503426445100271640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 55fa276eca5e297488dafef7ebdf134e099cc4ea04e210d37c6f1e6c111241ca file: statement: create_schema_statement: - keyword: create - keyword: schema - keyword: if - keyword: not - keyword: exists - schema_reference: naked_identifier: my_schema sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_sequence.sql000066400000000000000000000004571503426445100246020ustar00rootroot00000000000000CREATE SEQUENCE foo; CREATE SEQUENCE foo INCREMENT BY 3; CREATE SEQUENCE foo MINVALUE 5 NO MAXVALUE; CREATE SEQUENCE foo NO MINVALUE MAXVALUE 12; CREATE SEQUENCE foo INCREMENT BY 5 START WITH 8 CACHE 4; CREATE SEQUENCE foo NOCACHE; CREATE SEQUENCE foo NOCYCLE ORDER; CREATE SEQUENCE foo NOORDER; sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_sequence.yml000066400000000000000000000054051503426445100246020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f0ed3f2e276d9585288a10f1422f2a387ea7fe6cc60fe9c82c5c93ecbb44c6cc file: - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - create_sequence_options_segment: - keyword: INCREMENT - keyword: BY - numeric_literal: '3' - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - create_sequence_options_segment: keyword: MINVALUE numeric_literal: '5' - create_sequence_options_segment: - keyword: 'NO' - keyword: MAXVALUE - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - create_sequence_options_segment: - keyword: 'NO' - keyword: MINVALUE - create_sequence_options_segment: keyword: MAXVALUE numeric_literal: '12' - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - create_sequence_options_segment: - keyword: INCREMENT - keyword: BY - numeric_literal: '5' - create_sequence_options_segment: - keyword: START - keyword: WITH - numeric_literal: '8' - create_sequence_options_segment: keyword: CACHE numeric_literal: '4' - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - create_sequence_options_segment: keyword: NOCACHE - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - create_sequence_options_segment: keyword: NOCYCLE - create_sequence_options_segment: keyword: ORDER - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - create_sequence_options_segment: keyword: NOORDER - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_table.sql000066400000000000000000000004171503426445100240550ustar00rootroot00000000000000-- Test various forms of quoted data types CREATE TABLE foo ( pk int PRIMARY KEY, quoted_name "custom udt", qualified_name sch.qualified, quoted_qualified "my schema".qualified, more_quoted "my schema"."custom udt", quoted_udt sch."custom udt" ); sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_table.yml000066400000000000000000000034501503426445100240570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4d508e6a17455867f424bf23fbf6c04cd8ec300f8aa8b13b6920ee8199a9944b file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_definition: naked_identifier: pk data_type: data_type_identifier: int column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_definition: naked_identifier: quoted_name data_type: quoted_identifier: '"custom udt"' - comma: ',' - column_definition: naked_identifier: qualified_name data_type: naked_identifier: sch dot: . data_type_identifier: qualified - comma: ',' - column_definition: naked_identifier: quoted_qualified data_type: quoted_identifier: '"my schema"' dot: . data_type_identifier: qualified - comma: ',' - column_definition: naked_identifier: more_quoted data_type: - quoted_identifier: '"my schema"' - dot: . - quoted_identifier: '"custom udt"' - comma: ',' - column_definition: naked_identifier: quoted_udt data_type: naked_identifier: sch dot: . quoted_identifier: '"custom udt"' - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_table_a_c1_c2.sql000066400000000000000000000000531503426445100253200ustar00rootroot00000000000000create table table1 (c1 SMALLINT, c2 DATE) sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_table_a_c1_c2.yml000066400000000000000000000015321503426445100253250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b259c9e7889ddd71fda8e2d7d3e303d69184a38db1c02676ecdb69415c4e2039 file: statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: table1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: c1 data_type: data_type_identifier: SMALLINT - comma: ',' - column_definition: naked_identifier: c2 data_type: data_type_identifier: DATE - end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_table_a_column_constraints.sql000066400000000000000000000012101503426445100303510ustar00rootroot00000000000000create table table1 ( c1 INT NOT NULL, c2 INT NULL DEFAULT 1, c3 INT PRIMARY KEY, c4 INT UNIQUE, c5 INT REFERENCES table2, c6 INT REFERENCES table2 (c6_other), c6 INT REFERENCES table2 (c6_other) MATCH FULL, c6 INT REFERENCES table2 (c6_other) MATCH PARTIAL, c6 INT REFERENCES table2 (c6_other) MATCH SIMPLE, c6 INT REFERENCES table2 (c6_other) ON DELETE NO ACTION, c6 INT REFERENCES table2 (c6_other) ON UPDATE SET NULL, c6 INT REFERENCES table2 (c6_other) ON DELETE RESTRICT ON UPDATE CASCADE, c7 INT NOT NULL DEFAULT 1 UNIQUE REFERENCES table3 (c7_other), c8 INT NOT NULL DEFAULT 1::INT ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_table_a_column_constraints.yml000066400000000000000000000145631503426445100303720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d7769d9eb54abd09aa301054592b03b6258fb68d97f01f894f52b65d67cb820e file: statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: table1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: c1 data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: - naked_identifier: c2 - data_type: data_type_identifier: INT - column_constraint_segment: keyword: 'NULL' - column_constraint_segment: keyword: DEFAULT numeric_literal: '1' - comma: ',' - column_definition: naked_identifier: c3 data_type: data_type_identifier: INT column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_definition: naked_identifier: c4 data_type: data_type_identifier: INT column_constraint_segment: keyword: UNIQUE - comma: ',' - column_definition: naked_identifier: c5 data_type: data_type_identifier: INT column_constraint_segment: keyword: REFERENCES table_reference: naked_identifier: table2 - comma: ',' - column_definition: naked_identifier: c6 data_type: data_type_identifier: INT column_constraint_segment: keyword: REFERENCES table_reference: naked_identifier: table2 bracketed: start_bracket: ( column_reference: naked_identifier: c6_other end_bracket: ) - comma: ',' - column_definition: naked_identifier: c6 data_type: data_type_identifier: INT column_constraint_segment: - keyword: REFERENCES - table_reference: naked_identifier: table2 - bracketed: start_bracket: ( column_reference: naked_identifier: c6_other end_bracket: ) - keyword: MATCH - keyword: FULL - comma: ',' - column_definition: naked_identifier: c6 data_type: data_type_identifier: INT column_constraint_segment: - keyword: REFERENCES - table_reference: naked_identifier: table2 - bracketed: start_bracket: ( column_reference: naked_identifier: c6_other end_bracket: ) - keyword: MATCH - keyword: PARTIAL - comma: ',' - column_definition: naked_identifier: c6 data_type: data_type_identifier: INT column_constraint_segment: - keyword: REFERENCES - table_reference: naked_identifier: table2 - bracketed: start_bracket: ( column_reference: naked_identifier: c6_other end_bracket: ) - keyword: MATCH - keyword: SIMPLE - comma: ',' - column_definition: naked_identifier: c6 data_type: data_type_identifier: INT column_constraint_segment: - keyword: REFERENCES - table_reference: naked_identifier: table2 - bracketed: start_bracket: ( column_reference: naked_identifier: c6_other end_bracket: ) - keyword: 'ON' - keyword: DELETE - keyword: 'NO' - keyword: ACTION - comma: ',' - column_definition: naked_identifier: c6 data_type: data_type_identifier: INT column_constraint_segment: - keyword: REFERENCES - table_reference: naked_identifier: table2 - bracketed: start_bracket: ( column_reference: naked_identifier: c6_other end_bracket: ) - keyword: 'ON' - keyword: UPDATE - keyword: SET - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: c6 data_type: data_type_identifier: INT column_constraint_segment: - keyword: REFERENCES - table_reference: naked_identifier: table2 - bracketed: start_bracket: ( column_reference: naked_identifier: c6_other end_bracket: ) - keyword: 'ON' - keyword: DELETE - keyword: RESTRICT - keyword: 'ON' - keyword: UPDATE - keyword: CASCADE - comma: ',' - column_definition: - naked_identifier: c7 - data_type: data_type_identifier: INT - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: DEFAULT numeric_literal: '1' - column_constraint_segment: keyword: UNIQUE - column_constraint_segment: keyword: REFERENCES table_reference: naked_identifier: table3 bracketed: start_bracket: ( column_reference: naked_identifier: c7_other end_bracket: ) - comma: ',' - column_definition: - naked_identifier: c8 - data_type: data_type_identifier: INT - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: DEFAULT cast_expression: numeric_literal: '1' casting_operator: '::' data_type: data_type_identifier: INT - end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_table_a_pk_unique_fk_constraints.sql000066400000000000000000000002351503426445100315420ustar00rootroot00000000000000create table table1 ( c1 INT, c2 INT, c3 INT, PRIMARY KEY (c1), UNIQUE (c2, c3), FOREIGN KEY (c2, c3) REFERENCES table2 (c2_, c3_) ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_table_a_pk_unique_fk_constraints.yml000066400000000000000000000042011503426445100315410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b3b030f95d03b4e596d31d6c865d1aa67ac01dd005d0a5929bd77ba56946c7d6 file: statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: table1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: c1 data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: c2 data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: c3 data_type: data_type_identifier: INT - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: c1 end_bracket: ) - comma: ',' - table_constraint: keyword: UNIQUE bracketed: - start_bracket: ( - column_reference: naked_identifier: c2 - comma: ',' - column_reference: naked_identifier: c3 - end_bracket: ) - comma: ',' - table_constraint: - keyword: FOREIGN - keyword: KEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: c2 - comma: ',' - column_reference: naked_identifier: c3 - end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: table2 - bracketed: - start_bracket: ( - column_reference: naked_identifier: c2_ - comma: ',' - column_reference: naked_identifier: c3_ - end_bracket: ) - end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_table_as.sql000066400000000000000000000000671503426445100245410ustar00rootroot00000000000000CREATE OR REPLACE TABLE t2 AS SELECT a, b FROM t1 sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_table_as.yml000066400000000000000000000021021503426445100245330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 74ba87761cac0f656c136be0907cf0c68088419fb3524ed5bdc65a4714ca4bf2 file: statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - table_reference: naked_identifier: t2 - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_table_as_select_cte_no_parentheses.sql000066400000000000000000000001551503426445100320260ustar00rootroot00000000000000CREATE TABLE final_rows AS WITH source_table AS ( SELECT * FROM source_data ) SELECT * FROM source_table sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_table_as_select_cte_no_parentheses.yml000066400000000000000000000033131503426445100320270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a1659f3f5ab4f99e7621bf54f474960ca57dd88541f828ecc8f1d7fcfc88a175 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: final_rows - keyword: AS - with_compound_statement: keyword: WITH common_table_expression: naked_identifier: source_table keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source_data end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source_table sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_table_as_select_cte_parentheses.sql000066400000000000000000000002051503426445100313260ustar00rootroot00000000000000CREATE TABLE final_rows AS ( WITH source_table AS ( SELECT * FROM source_data ) SELECT * FROM source_table ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_table_as_select_cte_parentheses.yml000066400000000000000000000035241503426445100313370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 41cc71e536723cf73e74d0c075a76665a111a7de29762ce7ad9a1abba8d9a780 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: final_rows - keyword: AS - bracketed: start_bracket: ( with_compound_statement: keyword: WITH common_table_expression: naked_identifier: source_table keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source_data end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source_table end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_table_auto_increment.sql000066400000000000000000000000551503426445100271470ustar00rootroot00000000000000CREATE TABLE a ( id INT AUTO_INCREMENT ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_table_auto_increment.yml000066400000000000000000000014201503426445100271460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c6defdfc61f69a5f0424a15eae97b42d22a0806c32c0e207417916c7d1370024 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: a - bracketed: start_bracket: ( column_definition: naked_identifier: id data_type: data_type_identifier: INT column_constraint_segment: keyword: AUTO_INCREMENT end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_table_column_comment.sql000066400000000000000000000001001503426445100271410ustar00rootroot00000000000000CREATE TABLE a ( id VARCHAR(100) COMMENT 'Column comment' ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_table_column_comment.yml000066400000000000000000000017751503426445100271660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 091626b64511bb95a40b4231d3a84444a64d598f4a0feae993269df647b964fa file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: a - bracketed: start_bracket: ( column_definition: naked_identifier: id data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '100' end_bracket: ) column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: "'Column comment'" end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_table_column_constraint.sql000066400000000000000000000005471503426445100277020ustar00rootroot00000000000000CREATE TABLE users ( username TEXT, age INT CHECK(age > 18) ); CREATE TABLE users ( username TEXT, age INT CHECK(age IS NOT NULL) ); CREATE TABLE Persons ( ID int NOT NULL, LastName varchar(255) NOT NULL, FirstName varchar(255), Age int, City varchar(255), CONSTRAINT CHK_Person CHECK (Age>=18 AND City='Sandnes') ); sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_table_column_constraint.yml000066400000000000000000000105751503426445100277060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f2ec6c3de951ea1b764a10ac67bddbe60a46994bcfcc6539af64f0fd3b8cd827 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: users - bracketed: - start_bracket: ( - column_definition: naked_identifier: username data_type: data_type_identifier: TEXT - comma: ',' - column_definition: naked_identifier: age data_type: data_type_identifier: INT column_constraint_segment: keyword: CHECK bracketed: start_bracket: ( expression: column_reference: naked_identifier: age comparison_operator: raw_comparison_operator: '>' numeric_literal: '18' end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: users - bracketed: - start_bracket: ( - column_definition: naked_identifier: username data_type: data_type_identifier: TEXT - comma: ',' - column_definition: naked_identifier: age data_type: data_type_identifier: INT column_constraint_segment: keyword: CHECK bracketed: start_bracket: ( expression: - column_reference: naked_identifier: age - keyword: IS - keyword: NOT - null_literal: 'NULL' end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: Persons - bracketed: - start_bracket: ( - column_definition: naked_identifier: ID data_type: data_type_identifier: int column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: LastName data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '255' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: FirstName data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '255' end_bracket: ) - comma: ',' - column_definition: naked_identifier: Age data_type: data_type_identifier: int - comma: ',' - column_definition: naked_identifier: City data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '255' end_bracket: ) - comma: ',' - column_definition: naked_identifier: CONSTRAINT data_type: data_type_identifier: CHK_Person column_constraint_segment: keyword: CHECK bracketed: start_bracket: ( expression: - column_reference: naked_identifier: Age - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - numeric_literal: '18' - binary_operator: AND - column_reference: naked_identifier: City - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'Sandnes'" end_bracket: ) - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_table_constraint_default.sql000066400000000000000000000001641503426445100300240ustar00rootroot00000000000000BEGIN TRANSACTION; CREATE TABLE IF NOT EXISTS "tbl" ( "col" TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ); COMMIT; sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_table_constraint_default.yml000066400000000000000000000022651503426445100300320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 149db629af0164d27dc37ef59df84f570101734b0f14157185888321791fddb1 file: - statement: transaction_statement: - keyword: BEGIN - keyword: TRANSACTION - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: quoted_identifier: '"tbl"' - bracketed: start_bracket: ( column_definition: - quoted_identifier: '"col"' - data_type: keyword: TIMESTAMP - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: DEFAULT bare_function: CURRENT_TIMESTAMP end_bracket: ) - statement_terminator: ; - statement: transaction_statement: keyword: COMMIT - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_table_constraint_reference_option.sql000066400000000000000000000005141503426445100317250ustar00rootroot00000000000000CREATE TABLE b ( b INT NOT NULL, c INT NOT NULL, d INT NOT NULL, CONSTRAINT c_b FOREIGN KEY (b) REFERENCES a(b) ON DELETE RESTRICT ON UPDATE NO ACTION, CONSTRAINT c_d FOREIGN KEY (d) REFERENCES a(d) ON UPDATE CASCADE ON DELETE SET NULL, CONSTRAINT c_c FOREIGN KEY (c) REFERENCES a(c) ON DELETE SET DEFAULT ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_table_constraint_reference_option.yml000066400000000000000000000064621503426445100317370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4dcc841678a21a7fe42a1604833bdc7089d1f015e3041884d035b203d3e11b32 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: b - bracketed: - start_bracket: ( - column_definition: naked_identifier: b data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: c data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: d data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: c_b - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: b end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: a - bracketed: start_bracket: ( column_reference: naked_identifier: b end_bracket: ) - keyword: 'ON' - keyword: DELETE - keyword: RESTRICT - keyword: 'ON' - keyword: UPDATE - keyword: 'NO' - keyword: ACTION - comma: ',' - table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: c_d - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: d end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: a - bracketed: start_bracket: ( column_reference: naked_identifier: d end_bracket: ) - keyword: 'ON' - keyword: UPDATE - keyword: CASCADE - keyword: 'ON' - keyword: DELETE - keyword: SET - keyword: 'NULL' - comma: ',' - table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: c_c - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: c end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: a - bracketed: start_bracket: ( column_reference: naked_identifier: c end_bracket: ) - keyword: 'ON' - keyword: DELETE - keyword: SET - keyword: DEFAULT - end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_table_default_function.sql000066400000000000000000000000661503426445100274660ustar00rootroot00000000000000CREATE TABLE a ( ts TIMESTAMP DEFAULT GETDATE() ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_table_default_function.yml000066400000000000000000000017471503426445100274770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a748f6572b04eff5adc3ef22bb760e00df0d38c20b3f6d5c98ee38c4b4a53e24 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: a - bracketed: start_bracket: ( column_definition: naked_identifier: ts data_type: keyword: TIMESTAMP column_constraint_segment: keyword: DEFAULT function: function_name: function_name_identifier: GETDATE function_contents: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_table_double_precision.sql000066400000000000000000000000561503426445100274610ustar00rootroot00000000000000CREATE TABLE test ( angle double precision ); sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_table_double_precision.yml000066400000000000000000000013741503426445100274670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 922fa2632db9df21e02a75ae5a704665060b2f2726e5c5c703b0550430da3250 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: test - bracketed: start_bracket: ( column_definition: naked_identifier: angle data_type: - keyword: double - keyword: precision end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_table_table_comment.sql000066400000000000000000000000771503426445100267500ustar00rootroot00000000000000CREATE TABLE a ( id VARCHAR(100) ) COMMENT 'Table comment' sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_table_table_comment.yml000066400000000000000000000017051503426445100267510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 79d67e660908fa5e76b07b41b6cfd4156850b94e42255abd6b89c011b770bfd8 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: a - bracketed: start_bracket: ( column_definition: naked_identifier: id data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '100' end_bracket: ) end_bracket: ) - comment_clause: keyword: COMMENT quoted_literal: "'Table comment'" sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_table_varchar.sql000066400000000000000000000000471503426445100255620ustar00rootroot00000000000000CREATE TABLE a ( id VARCHAR(100) ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_table_varchar.yml000066400000000000000000000015541503426445100255700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ddbfbeee0227954476881c7b0236fdc0ca7fa47d1401c2911ed5508aff69f55f file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: a - bracketed: start_bracket: ( column_definition: naked_identifier: id data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '100' end_bracket: ) end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_trigger.sql000066400000000000000000000006611503426445100244320ustar00rootroot00000000000000CREATE TRIGGER foo BEFORE INSERT ON bar EXECUTE PROCEDURE proc(args); CREATE TRIGGER foo BEFORE INSERT on bar EXECUTE PROCEDURE proc(args); CREATE TRIGGER foo AFTER UPDATE OF bar, baz ON bar EXECUTE PROCEDURE proc(args); CREATE TRIGGER foo INSTEAD OF DELETE ON bar FROM baz DEFERRABLE INITIALLY IMMEDIATE FOR EACH ROW EXECUTE PROCEDURE proc(args); CREATE TRIGGER foo BEFORE INSERT ON bar WHEN (a=b) EXECUTE PROCEDURE proc(args); sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_trigger.yml000066400000000000000000000073151503426445100244370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e2b3ea5295e570d17ef161b60fb9e039007e50fb60c2c3caf15f4c58491fbef8 file: - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: foo - keyword: BEFORE - keyword: INSERT - keyword: 'ON' - table_reference: naked_identifier: bar - keyword: EXECUTE - keyword: PROCEDURE - function_name_identifier: proc - function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: args end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: foo - keyword: BEFORE - keyword: INSERT - keyword: 'on' - table_reference: naked_identifier: bar - keyword: EXECUTE - keyword: PROCEDURE - function_name_identifier: proc - function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: args end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: foo - keyword: AFTER - keyword: UPDATE - keyword: OF - column_reference: naked_identifier: bar - comma: ',' - column_reference: naked_identifier: baz - keyword: 'ON' - table_reference: naked_identifier: bar - keyword: EXECUTE - keyword: PROCEDURE - function_name_identifier: proc - function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: args end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: foo - keyword: INSTEAD - keyword: OF - keyword: DELETE - keyword: 'ON' - table_reference: naked_identifier: bar - keyword: FROM - table_reference: naked_identifier: baz - keyword: DEFERRABLE - keyword: INITIALLY - keyword: IMMEDIATE - keyword: FOR - keyword: EACH - keyword: ROW - keyword: EXECUTE - keyword: PROCEDURE - function_name_identifier: proc - function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: args end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: foo - keyword: BEFORE - keyword: INSERT - keyword: 'ON' - table_reference: naked_identifier: bar - keyword: WHEN - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b end_bracket: ) - keyword: EXECUTE - keyword: PROCEDURE - function_name_identifier: proc - function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: args end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_user.sql000066400000000000000000000000251503426445100237370ustar00rootroot00000000000000CREATE USER foo_user sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_user.yml000066400000000000000000000010251503426445100237420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 04fed1ca81b7b7e7a80eb72cffecba963fe5218190b64a4347770e44dd839146 file: statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: foo_user sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_view_a.sql000066400000000000000000000004611503426445100242370ustar00rootroot00000000000000CREATE VIEW a AS SELECT c FROM table1 INNER JOIN table2 ON (table1.id = table2.id); CREATE OR REPLACE VIEW vw_appt_latest AS ( WITH most_current as ( SELECT da.* FROM dim_appt da WHERE da.current_appt_id IS NULL ) SELECT * from most_current ); sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_view_a.yml000066400000000000000000000073641503426445100242520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 828d3386d75173425688561ed663eaed5b254a2b06a3288cafd28f329128f049 file: - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: a - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: table1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: table2 - dot: . - naked_identifier: id end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: VIEW - table_reference: naked_identifier: vw_appt_latest - keyword: AS - bracketed: start_bracket: ( with_compound_statement: keyword: WITH common_table_expression: naked_identifier: most_current keyword: as bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: da dot: . star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dim_appt alias_expression: naked_identifier: da where_clause: keyword: WHERE expression: column_reference: - naked_identifier: da - dot: . - naked_identifier: current_appt_id keyword: IS null_literal: 'NULL' end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: most_current end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_view_if_not_exists.sql000066400000000000000000000001651503426445100266750ustar00rootroot00000000000000CREATE VIEW IF NOT EXISTS a AS SELECT c FROM table1 INNER JOIN table2 ON (table1.id = table2.id) sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_view_if_not_exists.yml000066400000000000000000000034341503426445100267010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6932759b92f812b45c0a8c035862e8dfaf8b3055b433de9b17e2e20604d7d93c file: statement: create_view_statement: - keyword: CREATE - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: a - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: table1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: table2 - dot: . - naked_identifier: id end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_zero_argument_function.sql000066400000000000000000000002321503426445100275470ustar00rootroot00000000000000CREATE OR REPLACE FUNCTION a() RETURNS integer AS ' SELECT 1; ' LANGUAGE SQL; CREATE FUNCTION a() RETURNS integer AS ' SELECT 1; ' LANGUAGE SQL; sqlfluff-3.4.2/test/fixtures/dialects/ansi/create_zero_argument_function.yml000066400000000000000000000026371503426445100275640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a34b50baf434a3c5d92187e85a5fbecb00d8f1695deec76a38ce613ff964436b file: - statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name: function_name_identifier: a - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: integer - function_definition: - keyword: AS - quoted_literal: "'\n SELECT 1;\n'" - keyword: LANGUAGE - naked_identifier: SQL - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: a - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: integer - function_definition: - keyword: AS - quoted_literal: "'\n SELECT 1;\n'" - keyword: LANGUAGE - naked_identifier: SQL - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/delete_from.sql000066400000000000000000000000441503426445100237240ustar00rootroot00000000000000DELETE FROM table_name WHERE a > 0; sqlfluff-3.4.2/test/fixtures/dialects/ansi/delete_from.yml000066400000000000000000000016161503426445100237340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9d32ff56cb5f997f61ef9026e4322d824482c3e189b70583070cb106cfeb6a1b file: statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name where_clause: keyword: WHERE expression: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: '>' numeric_literal: '0' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/describe_table.sql000066400000000000000000000000331503426445100243640ustar00rootroot00000000000000describe table "my_table"; sqlfluff-3.4.2/test/fixtures/dialects/ansi/describe_table.yml000066400000000000000000000010771503426445100243770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8c5ded8a79782ebe613a1e38b6e94c82aafd938c8f45edb5aa950939e7444626 file: statement: describe_statement: keyword: describe naked_identifier: table object_reference: quoted_identifier: '"my_table"' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/double_dot.sql000066400000000000000000000002741503426445100235640ustar00rootroot00000000000000-- Snowflake Double-Dot Notation -- https://docs.snowflake.com/en/sql-reference/name-resolution.html#resolution-when-schema-omitted-double-dot-notation SELECT * FROM my_database..my_table sqlfluff-3.4.2/test/fixtures/dialects/ansi/double_dot.yml000066400000000000000000000015721503426445100235700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e87c7ed014c2b8ec15e4fe464f02b12cc5608766a1b970e657ce20b5977042f0 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: my_database - dot: . - dot: . - naked_identifier: my_table sqlfluff-3.4.2/test/fixtures/dialects/ansi/double_quote_escapes.sql000066400000000000000000000002571503426445100256370ustar00rootroot00000000000000select 1 as foo, 2 as "foo", 3 as """foo""", 4 as """""foo""""", bar, "bar", """bar""", """""bar""""" from """""a"""""."""""b"""""."""""c""""" sqlfluff-3.4.2/test/fixtures/dialects/ansi/double_quote_escapes.yml000066400000000000000000000040641503426445100256410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 73ef8ab95d643074317bc5120d0805192913a9905afc31b2db4e49a72da8a9d0 file: statement: select_statement: select_clause: - keyword: select - select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: as naked_identifier: foo - comma: ',' - select_clause_element: numeric_literal: '2' alias_expression: alias_operator: keyword: as quoted_identifier: '"foo"' - comma: ',' - select_clause_element: numeric_literal: '3' alias_expression: alias_operator: keyword: as quoted_identifier: '"""foo"""' - comma: ',' - select_clause_element: numeric_literal: '4' alias_expression: alias_operator: keyword: as quoted_identifier: '"""""foo"""""' - comma: ',' - select_clause_element: column_reference: naked_identifier: bar - comma: ',' - select_clause_element: column_reference: quoted_identifier: '"bar"' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '"""bar"""' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '"""""bar"""""' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - quoted_identifier: '"""""a"""""' - dot: . - quoted_identifier: '"""""b"""""' - dot: . - quoted_identifier: '"""""c"""""' sqlfluff-3.4.2/test/fixtures/dialects/ansi/drop_cast.sql000066400000000000000000000002401503426445100234130ustar00rootroot00000000000000DROP CAST (int AS bool); DROP CAST (int AS bool) RESTRICT; DROP CAST (int AS bool) CASCADE; DROP CAST (udt_1 AS udt_2); DROP CAST (sch.udt_1 AS sch.udt_2); sqlfluff-3.4.2/test/fixtures/dialects/ansi/drop_cast.yml000066400000000000000000000037751503426445100234350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9ab44c21d3a8f7594f50924ffe73aaf87716721475787ac5558e197378caf83c file: - statement: drop_cast_statement: - keyword: DROP - keyword: CAST - bracketed: - start_bracket: ( - data_type: data_type_identifier: int - keyword: AS - data_type: data_type_identifier: bool - end_bracket: ) - statement_terminator: ; - statement: drop_cast_statement: - keyword: DROP - keyword: CAST - bracketed: - start_bracket: ( - data_type: data_type_identifier: int - keyword: AS - data_type: data_type_identifier: bool - end_bracket: ) - keyword: RESTRICT - statement_terminator: ; - statement: drop_cast_statement: - keyword: DROP - keyword: CAST - bracketed: - start_bracket: ( - data_type: data_type_identifier: int - keyword: AS - data_type: data_type_identifier: bool - end_bracket: ) - keyword: CASCADE - statement_terminator: ; - statement: drop_cast_statement: - keyword: DROP - keyword: CAST - bracketed: - start_bracket: ( - data_type: data_type_identifier: udt_1 - keyword: AS - data_type: data_type_identifier: udt_2 - end_bracket: ) - statement_terminator: ; - statement: drop_cast_statement: - keyword: DROP - keyword: CAST - bracketed: - start_bracket: ( - data_type: naked_identifier: sch dot: . data_type_identifier: udt_1 - keyword: AS - data_type: naked_identifier: sch dot: . data_type_identifier: udt_2 - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/drop_index_if_exists.sql000066400000000000000000000000521503426445100256460ustar00rootroot00000000000000DROP INDEX IF EXISTS transaction_updated; sqlfluff-3.4.2/test/fixtures/dialects/ansi/drop_index_if_exists.yml000066400000000000000000000011411503426445100256500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3634f62eee0607dad56faa2de91f83077f514dd94ddbb22de9c6cb3e5e9cff0f file: statement: drop_index_statement: - keyword: DROP - keyword: INDEX - keyword: IF - keyword: EXISTS - index_reference: naked_identifier: transaction_updated statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/drop_index_simple.sql000066400000000000000000000000401503426445100251370ustar00rootroot00000000000000DROP INDEX transaction_updated; sqlfluff-3.4.2/test/fixtures/dialects/ansi/drop_index_simple.yml000066400000000000000000000010711503426445100251460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 463c6a46c26e0890aca73e7444abf5f6ff40309a08d248cdb5d6deab9aac69ec file: statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: naked_identifier: transaction_updated statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/drop_model.sql000066400000000000000000000000341503426445100235620ustar00rootroot00000000000000DROP MODEL IF EXISTS model3 sqlfluff-3.4.2/test/fixtures/dialects/ansi/drop_model.yml000066400000000000000000000010721503426445100235670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2c02c6b91dd87146c7d0e271966beea6473289a4ae243637cd8742d72f5aa1ed file: statement: drop_MODELstatement: - keyword: DROP - keyword: MODEL - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: model3 sqlfluff-3.4.2/test/fixtures/dialects/ansi/drop_schema.sql000066400000000000000000000003121503426445100237210ustar00rootroot00000000000000drop schema my_schema; drop schema my_schema cascade; drop schema my_schema restrict; drop schema if exists my_schema; drop schema if exists my_schema cascade; drop schema if exists my_schema restrict; sqlfluff-3.4.2/test/fixtures/dialects/ansi/drop_schema.yml000066400000000000000000000031201503426445100237230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 54f1225c5247d92c341517d4b0626dd3b5b9f6df4fe91e4fb61e27c12f81b68a file: - statement: drop_schema_statement: - keyword: drop - keyword: schema - schema_reference: naked_identifier: my_schema - statement_terminator: ; - statement: drop_schema_statement: - keyword: drop - keyword: schema - schema_reference: naked_identifier: my_schema - keyword: cascade - statement_terminator: ; - statement: drop_schema_statement: - keyword: drop - keyword: schema - schema_reference: naked_identifier: my_schema - keyword: restrict - statement_terminator: ; - statement: drop_schema_statement: - keyword: drop - keyword: schema - keyword: if - keyword: exists - schema_reference: naked_identifier: my_schema - statement_terminator: ; - statement: drop_schema_statement: - keyword: drop - keyword: schema - keyword: if - keyword: exists - schema_reference: naked_identifier: my_schema - keyword: cascade - statement_terminator: ; - statement: drop_schema_statement: - keyword: drop - keyword: schema - keyword: if - keyword: exists - schema_reference: naked_identifier: my_schema - keyword: restrict - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/drop_sequence.sql000066400000000000000000000000531503426445100242730ustar00rootroot00000000000000DROP SEQUENCE foo; DROP SEQUENCE foo.foo; sqlfluff-3.4.2/test/fixtures/dialects/ansi/drop_sequence.yml000066400000000000000000000014071503426445100243010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ad101c01a6b1de9948242d88c6100a9d96553b768c416aeabda25f24162545a2 file: - statement: drop_sequence_statement: - keyword: DROP - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - statement_terminator: ; - statement: drop_sequence_statement: - keyword: DROP - keyword: SEQUENCE - sequence_reference: - naked_identifier: foo - dot: . - naked_identifier: foo - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/drop_table_a.sql000066400000000000000000000000151503426445100240500ustar00rootroot00000000000000drop table a sqlfluff-3.4.2/test/fixtures/dialects/ansi/drop_table_a.yml000066400000000000000000000010151503426445100240530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a1cda24832a0538f65fc2459b57806a1e2212dfd66c59e3a338e297caa0e8ccd file: statement: drop_table_statement: - keyword: drop - keyword: table - table_reference: naked_identifier: a sqlfluff-3.4.2/test/fixtures/dialects/ansi/drop_table_a_cascade.sql000066400000000000000000000000251503426445100255140ustar00rootroot00000000000000drop table a cascade sqlfluff-3.4.2/test/fixtures/dialects/ansi/drop_table_a_cascade.yml000066400000000000000000000010441503426445100255200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e6c446ca08215c9e601ab3ab20521d1dadecdc90f059c8ad2d2063d835ae81ae file: statement: drop_table_statement: - keyword: drop - keyword: table - table_reference: naked_identifier: a - keyword: cascade sqlfluff-3.4.2/test/fixtures/dialects/ansi/drop_table_a_restrict.sql000066400000000000000000000000261503426445100257710ustar00rootroot00000000000000drop table a restrict sqlfluff-3.4.2/test/fixtures/dialects/ansi/drop_table_a_restrict.yml000066400000000000000000000010451503426445100257750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 181c25001520158bcef4025a06fdccd3400ec29e57a5f29667ed4f953bcbbf14 file: statement: drop_table_statement: - keyword: drop - keyword: table - table_reference: naked_identifier: a - keyword: restrict sqlfluff-3.4.2/test/fixtures/dialects/ansi/drop_table_if_exists_a.sql000066400000000000000000000000271503426445100261300ustar00rootroot00000000000000drop table if exists a sqlfluff-3.4.2/test/fixtures/dialects/ansi/drop_table_if_exists_a.yml000066400000000000000000000010651503426445100261350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c0ff0ea789008c5781f680d2c2203bd6b17aa8e0467434a795d8fb2f96f56abe file: statement: drop_table_statement: - keyword: drop - keyword: table - keyword: if - keyword: exists - table_reference: naked_identifier: a sqlfluff-3.4.2/test/fixtures/dialects/ansi/drop_trigger.sql000066400000000000000000000000221503426445100241220ustar00rootroot00000000000000DROP TRIGGER foo; sqlfluff-3.4.2/test/fixtures/dialects/ansi/drop_trigger.yml000066400000000000000000000010451503426445100241320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5a6c840481533f56dbc53131a0b3635f48d417d2581a5971178265d9fc44768c file: statement: drop_trigger: - keyword: DROP - keyword: TRIGGER - trigger_reference: naked_identifier: foo statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/drop_type.sql000066400000000000000000000001561503426445100234500ustar00rootroot00000000000000DROP TYPE typename; DROP TYPE IF EXISTS typename; DROP TYPE typename CASCADE; DROP TYPE typename RESTRICT; sqlfluff-3.4.2/test/fixtures/dialects/ansi/drop_type.yml000066400000000000000000000021551503426445100234530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: af0af8bb9b926a9826a74fd76cace2e475ea1c3747ccbf0245b2fc47180ed5a5 file: - statement: drop_type_statement: - keyword: DROP - keyword: TYPE - object_reference: naked_identifier: typename - statement_terminator: ; - statement: drop_type_statement: - keyword: DROP - keyword: TYPE - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: typename - statement_terminator: ; - statement: drop_type_statement: - keyword: DROP - keyword: TYPE - object_reference: naked_identifier: typename - keyword: CASCADE - statement_terminator: ; - statement: drop_type_statement: - keyword: DROP - keyword: TYPE - object_reference: naked_identifier: typename - keyword: RESTRICT - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/drop_view_a.sql000066400000000000000000000000141503426445100237320ustar00rootroot00000000000000drop view a sqlfluff-3.4.2/test/fixtures/dialects/ansi/drop_view_a.yml000066400000000000000000000010131503426445100237340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cc09d9bc6e480190a8c283a36082f8b11104b89247d2ed552a4e1768ce9b0cb6 file: statement: drop_view_statement: - keyword: drop - keyword: view - table_reference: naked_identifier: a sqlfluff-3.4.2/test/fixtures/dialects/ansi/drop_view_a_cascade.sql000066400000000000000000000000241503426445100253760ustar00rootroot00000000000000drop view a cascade sqlfluff-3.4.2/test/fixtures/dialects/ansi/drop_view_a_cascade.yml000066400000000000000000000010421503426445100254010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b0e62b90e366c4740da190f473bda08e67af21745183790557383213e3e9be01 file: statement: drop_view_statement: - keyword: drop - keyword: view - table_reference: naked_identifier: a - keyword: cascade sqlfluff-3.4.2/test/fixtures/dialects/ansi/drop_view_a_restrict.sql000066400000000000000000000000251503426445100256530ustar00rootroot00000000000000drop view a restrict sqlfluff-3.4.2/test/fixtures/dialects/ansi/drop_view_a_restrict.yml000066400000000000000000000010431503426445100256560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bd605eda6485ce2c878fd84d2fee268e9c78b528e1204357ba90dd575035ebcb file: statement: drop_view_statement: - keyword: drop - keyword: view - table_reference: naked_identifier: a - keyword: restrict sqlfluff-3.4.2/test/fixtures/dialects/ansi/empty_file.sql000066400000000000000000000000001503426445100235640ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/dialects/ansi/empty_file.yml000066400000000000000000000006171503426445100236040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fdda373cd9cd649f82a9c5cf7ba9e290375c0ceae29477b0bad5a25f24a52ae3 file: null sqlfluff-3.4.2/test/fixtures/dialects/ansi/escape.sql000066400000000000000000000000561503426445100227020ustar00rootroot00000000000000SELECT * FROM x WHERE z LIKE '^_f' ESCAPE '^' sqlfluff-3.4.2/test/fixtures/dialects/ansi/escape.yml000066400000000000000000000020061503426445100227010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 50d87cdcda13faaf46c03e07cd5f5248275cdec5d199d0abf3e2def49aff112d file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: x where_clause: keyword: WHERE expression: - column_reference: naked_identifier: z - keyword: LIKE - quoted_literal: "'^_f'" - keyword: ESCAPE - quoted_literal: "'^'" sqlfluff-3.4.2/test/fixtures/dialects/ansi/escaped_quotes.sql000066400000000000000000000001631503426445100244450ustar00rootroot00000000000000select case when "Spec\"s 23" like 'Spec\'s%' then 'boop' end as field; select 'This shouldn''t fail' as success; sqlfluff-3.4.2/test/fixtures/dialects/ansi/escaped_quotes.yml000066400000000000000000000026131503426445100244510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f62c9944f6d33e61ade2d7907bd9c4101e8af605080475da5c276ea54fadbd69 file: - statement: select_statement: select_clause: keyword: select select_clause_element: expression: case_expression: - keyword: case - when_clause: - keyword: when - expression: column_reference: quoted_identifier: '"Spec\"s 23"' keyword: like quoted_literal: "'Spec\\'s%'" - keyword: then - expression: quoted_literal: "'boop'" - keyword: end alias_expression: alias_operator: keyword: as naked_identifier: field - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: quoted_literal: "'This shouldn''t fail'" alias_expression: alias_operator: keyword: as naked_identifier: success - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/expression_recursion.sql000066400000000000000000000046761503426445100257460ustar00rootroot00000000000000 -- This test checks for recursion errors. If the expression -- is not parsed correctly it can lead to very deep recursion. -- If this test is failing, then check the structure of expression -- parsing. select 1 from test_table where test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' --5 or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' -- 10 or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' -- 15 or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' -- 20 or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' --30 or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' -- 40 sqlfluff-3.4.2/test/fixtures/dialects/ansi/expression_recursion.yml000066400000000000000000000232501503426445100257350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2af3c174ee9c4ac29110809b8c6b6b716e07fa0b9953e2f83ee88c2920b7529c file: statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table where_clause: keyword: where expression: - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" sqlfluff-3.4.2/test/fixtures/dialects/ansi/expression_recursion_2.sql000066400000000000000000000010671503426445100261560ustar00rootroot00000000000000 -- This test checks for recursion errors. If the expression -- is not parsed correctly it can lead to very deep recursion. -- If this test is failing, then check the structure of expression -- parsing. SELECT * FROM t WHERE a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b sqlfluff-3.4.2/test/fixtures/dialects/ansi/expression_recursion_2.yml000066400000000000000000000202211503426445100261510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ec768aae77d8a2ecce149b275de13c7ca322a6d1fc8a2d249ca6ed00a7445d15 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t where_clause: keyword: WHERE expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b sqlfluff-3.4.2/test/fixtures/dialects/ansi/from_fetch.sql000066400000000000000000000000571503426445100235570ustar00rootroot00000000000000SELECT * FROM counter FETCH FIRST 10 ROWS ONLY sqlfluff-3.4.2/test/fixtures/dialects/ansi/from_fetch.yml000066400000000000000000000016511503426445100235620ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: dc15eef87b4a1e04131c0c5059abe5c561cb4e8a656934f74aec336cfe33f1e0 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: counter fetch_clause: - keyword: FETCH - keyword: FIRST - numeric_literal: '10' - keyword: ROWS - keyword: ONLY sqlfluff-3.4.2/test/fixtures/dialects/ansi/functions_a.sql000066400000000000000000000002071503426445100237500ustar00rootroot00000000000000SELECT DATE(t), ROUND(b, 2), LEFT(right(s, 5), LEN(s + 6)) as compound FROM tbl_b; SELECT _custom_function(5) as test_column; sqlfluff-3.4.2/test/fixtures/dialects/ansi/functions_a.yml000066400000000000000000000067121503426445100237610ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a8adb57b106b17117d895ece47b07a5a0ea042e499d557eb44e0e5ce3a8636fd file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: DATE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: t end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: ROUND function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: b - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LEFT function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: right function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: s - comma: ',' - expression: numeric_literal: '5' - end_bracket: ) - comma: ',' - expression: function: function_name: function_name_identifier: LEN function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: s binary_operator: + numeric_literal: '6' end_bracket: ) - end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: compound from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl_b - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: _custom_function function_contents: bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: test_column - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/functions_b.sql000066400000000000000000000002051503426445100237470ustar00rootroot00000000000000-- Thanks @mrshu for this query, it tests nested functions SELECT SPLIT(LOWER(text), ' ') AS text FROM "database"."sample_table" sqlfluff-3.4.2/test/fixtures/dialects/ansi/functions_b.yml000066400000000000000000000031531503426445100237560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 75a360d9882ca76c8acb74f87e8bf5b0c2fcbe5d85eeb31f15c1ece499928256 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: SPLIT function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: LOWER function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: text end_bracket: ) - comma: ',' - expression: quoted_literal: "' '" - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: text from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - quoted_identifier: '"database"' - dot: . - quoted_identifier: '"sample_table"' sqlfluff-3.4.2/test/fixtures/dialects/ansi/grant_all_on_mytable_to_role.sql000066400000000000000000000000371503426445100273400ustar00rootroot00000000000000grant all on mytable to public sqlfluff-3.4.2/test/fixtures/dialects/ansi/grant_all_on_mytable_to_role.yml000066400000000000000000000011541503426445100273430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b8305d724d1da5414030fc847f2dd138ab8af97040c8366e510a96d6f1f77735 file: statement: access_statement: - keyword: grant - keyword: all - keyword: 'on' - object_reference: naked_identifier: mytable - keyword: to - role_reference: naked_identifier: public sqlfluff-3.4.2/test/fixtures/dialects/ansi/grant_all_on_table_mytable_to_role.sql000066400000000000000000000000451503426445100305060ustar00rootroot00000000000000grant all on table mytable to myrole sqlfluff-3.4.2/test/fixtures/dialects/ansi/grant_all_on_table_mytable_to_role.yml000066400000000000000000000012011503426445100305030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 97e7798a4e3aac6fbb05d2025e8ac0ae6be6be806ce036119eedd9103ca3b26a file: statement: access_statement: - keyword: grant - keyword: all - keyword: 'on' - keyword: table - object_reference: naked_identifier: mytable - keyword: to - role_reference: naked_identifier: myrole sqlfluff-3.4.2/test/fixtures/dialects/ansi/grant_all_privileges_on_mytable_to_role.sql000066400000000000000000000000521503426445100315660ustar00rootroot00000000000000grant all privileges on mytable to myrole sqlfluff-3.4.2/test/fixtures/dialects/ansi/grant_all_privileges_on_mytable_to_role.yml000066400000000000000000000012061503426445100315720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 722e222a729b789a73aa7a7c29b5181a7f048377b5741b29810476a4b2f341ea file: statement: access_statement: - keyword: grant - keyword: all - keyword: privileges - keyword: 'on' - object_reference: naked_identifier: mytable - keyword: to - role_reference: naked_identifier: myrole sqlfluff-3.4.2/test/fixtures/dialects/ansi/grant_multiple_tables.sql000066400000000000000000000007401503426445100260220ustar00rootroot00000000000000GRANT INSERT ON my_table, my_table2 TO public; GRANT INSERT ON my_table, my_table2 TO "public"; GRANT INSERT, UPDATE ON my_table, my_table2 TO public; GRANT INSERT, UPDATE ON my_table, my_table2 TO "public"; GRANT INSERT, UPDATE, DELETE ON my_table, my_table2 TO public; GRANT INSERT, UPDATE, DELETE ON my_table, my_table2 TO "public"; GRANT INSERT, UPDATE, DELETE, SELECT ON my_table, my_table2 TO public; GRANT INSERT, UPDATE, DELETE, SELECT ON my_table, my_table2 TO "public"; sqlfluff-3.4.2/test/fixtures/dialects/ansi/grant_multiple_tables.yml000066400000000000000000000067321503426445100260330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1c12c3673e61617399ed12ec453a59f43e5bf00d2b38e6117f3ef08efb25ddc3 file: - statement: access_statement: - keyword: GRANT - keyword: INSERT - keyword: 'ON' - object_reference: naked_identifier: my_table - comma: ',' - object_reference: naked_identifier: my_table2 - keyword: TO - role_reference: naked_identifier: public - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: INSERT - keyword: 'ON' - object_reference: naked_identifier: my_table - comma: ',' - object_reference: naked_identifier: my_table2 - keyword: TO - role_reference: quoted_identifier: '"public"' - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: INSERT - comma: ',' - keyword: UPDATE - keyword: 'ON' - object_reference: naked_identifier: my_table - comma: ',' - object_reference: naked_identifier: my_table2 - keyword: TO - role_reference: naked_identifier: public - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: INSERT - comma: ',' - keyword: UPDATE - keyword: 'ON' - object_reference: naked_identifier: my_table - comma: ',' - object_reference: naked_identifier: my_table2 - keyword: TO - role_reference: quoted_identifier: '"public"' - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: INSERT - comma: ',' - keyword: UPDATE - comma: ',' - keyword: DELETE - keyword: 'ON' - object_reference: naked_identifier: my_table - comma: ',' - object_reference: naked_identifier: my_table2 - keyword: TO - role_reference: naked_identifier: public - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: INSERT - comma: ',' - keyword: UPDATE - comma: ',' - keyword: DELETE - keyword: 'ON' - object_reference: naked_identifier: my_table - comma: ',' - object_reference: naked_identifier: my_table2 - keyword: TO - role_reference: quoted_identifier: '"public"' - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: INSERT - comma: ',' - keyword: UPDATE - comma: ',' - keyword: DELETE - comma: ',' - keyword: SELECT - keyword: 'ON' - object_reference: naked_identifier: my_table - comma: ',' - object_reference: naked_identifier: my_table2 - keyword: TO - role_reference: naked_identifier: public - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: INSERT - comma: ',' - keyword: UPDATE - comma: ',' - keyword: DELETE - comma: ',' - keyword: SELECT - keyword: 'ON' - object_reference: naked_identifier: my_table - comma: ',' - object_reference: naked_identifier: my_table2 - keyword: TO - role_reference: quoted_identifier: '"public"' - statement_terminator: ; grant_select_col1_col2_update_col1_on_mytable_to_public.sql000066400000000000000000000000761503426445100344250ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/dialects/ansigrant select (col1, col2), update (col1) on mytable to public grant_select_col1_col2_update_col1_on_mytable_to_public.yml000066400000000000000000000017341503426445100344310ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/dialects/ansi# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7723482a8f471de4ce6b952aead403d556fd48795cad2d947d68d5d7baeee290 file: statement: access_statement: - keyword: grant - keyword: select - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - comma: ',' - keyword: update - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: 'on' - object_reference: naked_identifier: mytable - keyword: to - role_reference: naked_identifier: public sqlfluff-3.4.2/test/fixtures/dialects/ansi/grant_select_on_mytable_to_public.sql000066400000000000000000000000421503426445100303600ustar00rootroot00000000000000grant select on mytable to public sqlfluff-3.4.2/test/fixtures/dialects/ansi/grant_select_on_mytable_to_public.yml000066400000000000000000000011571503426445100303720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 29c7fafdbdad2321a36d17f804f86e5b3fd472049786028fd1310097c14c6c45 file: statement: access_statement: - keyword: grant - keyword: select - keyword: 'on' - object_reference: naked_identifier: mytable - keyword: to - role_reference: naked_identifier: public sqlfluff-3.4.2/test/fixtures/dialects/ansi/grant_select_on_mytable_to_public_with_grant_option.sql000066400000000000000000000000641503426445100342020ustar00rootroot00000000000000grant select on mytable to public with grant option sqlfluff-3.4.2/test/fixtures/dialects/ansi/grant_select_on_mytable_to_public_with_grant_option.yml000066400000000000000000000012561503426445100342100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 74f8012e4befe77e9c842c77b0dbf29035a7f04c5dedd0eba48c352539d91932 file: statement: access_statement: - keyword: grant - keyword: select - keyword: 'on' - object_reference: naked_identifier: mytable - keyword: to - role_reference: naked_identifier: public - keyword: with - keyword: grant - keyword: option sqlfluff-3.4.2/test/fixtures/dialects/ansi/grant_select_update_insert_on_mytable_to_public.sql000066400000000000000000000000621503426445100333100ustar00rootroot00000000000000grant select, update, insert on mytable to public sqlfluff-3.4.2/test/fixtures/dialects/ansi/grant_select_update_insert_on_mytable_to_public.yml000066400000000000000000000012751503426445100333210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: efa558aa4843d6c812b49728b5c5644ffb6f79a126233ff013fc6a8b30b89ebe file: statement: access_statement: - keyword: grant - keyword: select - comma: ',' - keyword: update - comma: ',' - keyword: insert - keyword: 'on' - object_reference: naked_identifier: mytable - keyword: to - role_reference: naked_identifier: public sqlfluff-3.4.2/test/fixtures/dialects/ansi/grant_update_on_all_tables_in_schema_a_to_public.sql000066400000000000000000000000611503426445100333370ustar00rootroot00000000000000grant update on all tables in schema a to public sqlfluff-3.4.2/test/fixtures/dialects/ansi/grant_update_on_all_tables_in_schema_a_to_public.yml000066400000000000000000000012721503426445100333460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8d44ae0355d61b36f7b20519f4b7c29abba67ad1c63d2a25cec001c882ba3c0f file: statement: access_statement: - keyword: grant - keyword: update - keyword: 'on' - keyword: all - keyword: tables - keyword: in - keyword: schema - object_reference: naked_identifier: a - keyword: to - role_reference: naked_identifier: public sqlfluff-3.4.2/test/fixtures/dialects/ansi/group_by_fetch.sql000066400000000000000000000001161503426445100244360ustar00rootroot00000000000000SELECT status FROM orders GROUP BY status FETCH FIRST 3 ROWS ONLY sqlfluff-3.4.2/test/fixtures/dialects/ansi/group_by_fetch.yml000066400000000000000000000020161503426445100244410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 379ba05fd8fd1bd3e4c5030df27c24869bb5ac75ece660e7da1a45dc715634c6 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: status from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: status fetch_clause: - keyword: FETCH - keyword: FIRST - numeric_literal: '3' - keyword: ROWS - keyword: ONLY sqlfluff-3.4.2/test/fixtures/dialects/ansi/having_fetch.sql000066400000000000000000000001761503426445100240720ustar00rootroot00000000000000SELECT house_id, COUNT (person_id) FROM persons GROUP BY house_id HAVING COUNT (person_id) > 10 FETCH FIRST 30 ROWS ONLY sqlfluff-3.4.2/test/fixtures/dialects/ansi/having_fetch.yml000066400000000000000000000035711503426445100240760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 51ddcc4a3d1b117bcd12b7828ca5486b2760468cc3231604b132c9b782cd4f20 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: house_id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: COUNT function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: person_id end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: persons groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: house_id having_clause: keyword: HAVING expression: function: function_name: function_name_identifier: COUNT function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: person_id end_bracket: ) comparison_operator: raw_comparison_operator: '>' numeric_literal: '10' fetch_clause: - keyword: FETCH - keyword: FIRST - numeric_literal: '30' - keyword: ROWS - keyword: ONLY sqlfluff-3.4.2/test/fixtures/dialects/ansi/insert_a.sql000066400000000000000000000001341503426445100232430ustar00rootroot00000000000000INSERT into tbl_b (col1) values (123); INSERT INTO tbl_c ( SELECT * FROM table1 ); sqlfluff-3.4.2/test/fixtures/dialects/ansi/insert_a.yml000066400000000000000000000027251503426445100232550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 039eff37d4ce25fec8d2a99a5e11fc8145d03925916ccfa9ef22203887299592 file: - statement: insert_statement: - keyword: INSERT - keyword: into - table_reference: naked_identifier: tbl_b - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - values_clause: keyword: values bracketed: start_bracket: ( numeric_literal: '123' end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: tbl_c - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/insert_default_values.sql000066400000000000000000000000441503426445100260260ustar00rootroot00000000000000INSERT INTO mytable DEFAULT VALUES; sqlfluff-3.4.2/test/fixtures/dialects/ansi/insert_default_values.yml000066400000000000000000000011271503426445100260330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e5635732edc5f38069bfe20809107f8b9f1d88a8415966d1daf88ee0afcf8572 file: statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: mytable - keyword: DEFAULT - keyword: VALUES statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/insert_using_subquery.sql000066400000000000000000000001531503426445100261100ustar00rootroot00000000000000INSERT INTO foo SELECT 0 AS bar; INSERT INTO foo (SELECT 1 AS bar); INSERT INTO foo ((SELECT 1 AS bar)); sqlfluff-3.4.2/test/fixtures/dialects/ansi/insert_using_subquery.yml000066400000000000000000000035621503426445100261210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5d474fcb20db5ca949b49b7cff0c9bea2db09cc971507bc4356b98f5ff31135f file: - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '0' alias_expression: alias_operator: keyword: AS naked_identifier: bar - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: AS naked_identifier: bar end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: start_bracket: ( bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: AS naked_identifier: bar end_bracket: ) end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/insert_with_statement.sql000066400000000000000000000002451503426445100260650ustar00rootroot00000000000000INSERT INTO table2 (column1, column2, column3) WITH mycte AS ( SELECT foo, bar FROM mytable1 ) SELECT foo, bar, baz FROM mycte; sqlfluff-3.4.2/test/fixtures/dialects/ansi/insert_with_statement.yml000066400000000000000000000044501503426445100260710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: aa63e81316e7735b7b77672357284c7905d78729e2ec8fec5c205a1544fd459b file: statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: table2 - bracketed: - start_bracket: ( - column_reference: naked_identifier: column1 - comma: ',' - column_reference: naked_identifier: column2 - comma: ',' - column_reference: naked_identifier: column3 - end_bracket: ) - with_compound_statement: keyword: WITH common_table_expression: naked_identifier: mycte keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: foo - comma: ',' - select_clause_element: column_reference: naked_identifier: bar from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable1 end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: foo - comma: ',' - select_clause_element: column_reference: naked_identifier: bar - comma: ',' - select_clause_element: column_reference: naked_identifier: baz from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mycte statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/like_operators.sql000066400000000000000000000004561503426445100244700ustar00rootroot00000000000000-- https://github.com/sqlfluff/sqlfluff/issues/828 -- https://github.com/sqlfluff/sqlfluff/issues/842 -- https://www.postgresql.org/docs/9.0/functions-matching.html#FUNCTIONS-LIKE SELECT * FROM my_tbl WHERE a !~ '[a-z]' AND d !~~* '[a-z]' AND b LIKE 'Spec\'s%' AND c !~* '^([0-9]){1,}(\.)([0-9]{1,})$' sqlfluff-3.4.2/test/fixtures/dialects/ansi/like_operators.yml000066400000000000000000000027111503426445100244660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 186842fd07c44c4d6d6dbc939d992e74dbf1096b3d6022a705bc27557c97c4ae file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_tbl where_clause: keyword: WHERE expression: - column_reference: naked_identifier: a - like_operator: '!~' - quoted_literal: "'[a-z]'" - binary_operator: AND - column_reference: naked_identifier: d - like_operator: '!~~*' - quoted_literal: "'[a-z]'" - binary_operator: AND - column_reference: naked_identifier: b - keyword: LIKE - quoted_literal: "'Spec\\'s%'" - binary_operator: AND - column_reference: naked_identifier: c - like_operator: '!~*' - quoted_literal: "'^([0-9]){1,}(\\.)([0-9]{1,})$'" sqlfluff-3.4.2/test/fixtures/dialects/ansi/merge_into.sql000066400000000000000000000014531503426445100235740ustar00rootroot00000000000000-- Merge using Table MERGE INTO t USING u ON (a = b) WHEN MATCHED THEN UPDATE SET a = b WHEN NOT MATCHED THEN INSERT (b) VALUES (c); -- Merge using Select MERGE INTO t USING (SELECT * FROM u) AS u ON (a = b) WHEN MATCHED THEN UPDATE SET a = b WHEN NOT MATCHED THEN INSERT (b) VALUES (c); -- Merge using Delete MERGE INTO t USING u ON (a = b) WHEN MATCHED THEN UPDATE SET a = b WHEN MATCHED THEN DELETE; -- Merge using multiple operations MERGE INTO t USING u ON (a = b) WHEN MATCHED AND a > b THEN UPDATE SET a = b WHEN MATCHED AND ( a < b AND c < d ) THEN DELETE WHEN NOT MATCHED THEN INSERT (a, c) VALUES (b, d); -- Merge using CTE WITH source AS ( SELECT * FROM u ) MERGE INTO t USING source AS u ON (a = b) WHEN MATCHED THEN UPDATE SET a = b WHEN NOT MATCHED THEN INSERT (b) VALUES (c); sqlfluff-3.4.2/test/fixtures/dialects/ansi/merge_into.yml000066400000000000000000000254051503426445100236010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6ffafae7c50e3146bbd2d040d1b6788d28e23866a9c0fcdac8aaa2015c0c3b63 file: - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: t - keyword: USING - table_reference: naked_identifier: u - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b end_bracket: ) - merge_match: merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: keyword: SET set_clause: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b merge_when_not_matched_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: keyword: INSERT bracketed: start_bracket: ( column_reference: naked_identifier: b end_bracket: ) values_clause: keyword: VALUES bracketed: start_bracket: ( expression: column_reference: naked_identifier: c end_bracket: ) - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: t - keyword: USING - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: u end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: u - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b end_bracket: ) - merge_match: merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: keyword: SET set_clause: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b merge_when_not_matched_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: keyword: INSERT bracketed: start_bracket: ( column_reference: naked_identifier: b end_bracket: ) values_clause: keyword: VALUES bracketed: start_bracket: ( expression: column_reference: naked_identifier: c end_bracket: ) - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: t - keyword: USING - table_reference: naked_identifier: u - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b end_bracket: ) - merge_match: - merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: keyword: SET set_clause: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b - merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_delete_clause: keyword: DELETE - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: t - keyword: USING - table_reference: naked_identifier: u - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b end_bracket: ) - merge_match: - merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: AND - expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '>' - column_reference: naked_identifier: b - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: keyword: SET set_clause: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b - merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: AND - expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: c - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: d end_bracket: ) - keyword: THEN - merge_delete_clause: keyword: DELETE - merge_when_not_matched_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: keyword: INSERT bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: b - comma: ',' - expression: column_reference: naked_identifier: d - end_bracket: ) - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: source keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: u end_bracket: ) merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: t - keyword: USING - table_reference: naked_identifier: source - alias_expression: alias_operator: keyword: AS naked_identifier: u - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b end_bracket: ) - merge_match: merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: keyword: SET set_clause: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b merge_when_not_matched_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: keyword: INSERT bracketed: start_bracket: ( column_reference: naked_identifier: b end_bracket: ) values_clause: keyword: VALUES bracketed: start_bracket: ( expression: column_reference: naked_identifier: c end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/modulo.sql000066400000000000000000000002731503426445100227420ustar00rootroot00000000000000SELECT CASE WHEN (year_number % 400 = 0) OR (year_number % 4 = 0 AND year_number % 100 != 0) THEN TRUE ELSE FALSE END AS is_leap_year FROM mytable sqlfluff-3.4.2/test/fixtures/dialects/ansi/modulo.yml000066400000000000000000000050661503426445100227510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 32ab1e024dcd9a27f1127abd3b4ee84258d7bf27bae0045f7bfbe862d5d83c0b file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: year_number - binary_operator: '%' - numeric_literal: '400' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '0' end_bracket: ) - binary_operator: OR - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: year_number - binary_operator: '%' - numeric_literal: '4' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '0' - binary_operator: AND - column_reference: naked_identifier: year_number - binary_operator: '%' - numeric_literal: '100' - comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '=' - numeric_literal: '0' end_bracket: ) - keyword: THEN - expression: boolean_literal: 'TRUE' - else_clause: keyword: ELSE expression: boolean_literal: 'FALSE' - keyword: END alias_expression: alias_operator: keyword: AS naked_identifier: is_leap_year from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable sqlfluff-3.4.2/test/fixtures/dialects/ansi/multi_statement_a.sql000066400000000000000000000001501503426445100251530ustar00rootroot00000000000000select a from tbl1 /*comment here*/ ; /*and here*/ select b from tbl2; -- trailling ending comment sqlfluff-3.4.2/test/fixtures/dialects/ansi/multi_statement_a.yml000066400000000000000000000022461503426445100251650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2fa662a2e8867bec6a62bd9077d5d583802675ef5ea6c2bd07acc0bedacbe7f3 file: - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: b from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl2 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/multi_statement_b.sql000066400000000000000000000004461503426445100251640ustar00rootroot00000000000000# COMMENT -- Another Comment Select A from Sys.dual where a -- inline comment in ('RED', /* Inline */ 'GREEN','BLUE'); select * from tbl_b; # as another comment insert into sch.tbl_b (col1) values (123); with tmp as ( select * from blah ) select a, b from tmp; # And that's the end sqlfluff-3.4.2/test/fixtures/dialects/ansi/multi_statement_b.yml000066400000000000000000000064241503426445100251700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 056e3d1311c24d3fd42bc09cc503bc3ba11f2aaa9b9307b04ea0ff3f2d032dce file: - statement: select_statement: select_clause: keyword: Select select_clause_element: column_reference: naked_identifier: A from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sys - dot: . - naked_identifier: dual where_clause: keyword: where expression: column_reference: naked_identifier: a keyword: in bracketed: - start_bracket: ( - quoted_literal: "'RED'" - comma: ',' - quoted_literal: "'GREEN'" - comma: ',' - quoted_literal: "'BLUE'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl_b - statement_terminator: ; - statement: insert_statement: - keyword: insert - keyword: into - table_reference: - naked_identifier: sch - dot: . - naked_identifier: tbl_b - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - values_clause: keyword: values bracketed: start_bracket: ( numeric_literal: '123' end_bracket: ) - statement_terminator: ; - statement: with_compound_statement: keyword: with common_table_expression: naked_identifier: tmp keyword: as bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: blah end_bracket: ) select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tmp - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/multi_statement_c.sql000066400000000000000000000001321503426445100251550ustar00rootroot00000000000000select * from boo; WITH blah AS (select x,y,4.567 FROM foo) select z, y, x from blah; sqlfluff-3.4.2/test/fixtures/dialects/ansi/multi_statement_c.yml000066400000000000000000000044621503426445100251710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8b80ac837c1d2f5e4dede56754cabd62664e3d88e0359515f5f0e4ae7a5f4431 file: - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: boo - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: blah keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: x - comma: ',' - select_clause_element: column_reference: naked_identifier: y - comma: ',' - select_clause_element: numeric_literal: '4.567' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo end_bracket: ) select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: z - comma: ',' - select_clause_element: column_reference: naked_identifier: y - comma: ',' - select_clause_element: column_reference: naked_identifier: x from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: blah - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/naked_identifiers.sql000066400000000000000000000005311503426445100251070ustar00rootroot00000000000000-- A valid identifier is alphanumeric and contains at least one letter. select "a" as 0_is_a_legal_identifier; select "a" as 00_is_a_legal_identifier; select 123_is_a_legal_identifier.456_is_a_legal_identifier; select "a" as 0is_a_legal_identifier; select _s00.45_is_a_legal_identifier from sdf9_._234awdf; select "a" as is_a_legal_identifier0; sqlfluff-3.4.2/test/fixtures/dialects/ansi/naked_identifiers.yml000066400000000000000000000050271503426445100251160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1fff0ff9569dc6f5cb95b5aea91a1b165d283ab4c67917b503b5babdbe5d3c17 file: - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: quoted_identifier: '"a"' alias_expression: alias_operator: keyword: as naked_identifier: 0_is_a_legal_identifier - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: quoted_identifier: '"a"' alias_expression: alias_operator: keyword: as naked_identifier: 00_is_a_legal_identifier - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: - naked_identifier: 123_is_a_legal_identifier - dot: . - naked_identifier: 456_is_a_legal_identifier - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: quoted_identifier: '"a"' alias_expression: alias_operator: keyword: as naked_identifier: 0is_a_legal_identifier - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: - naked_identifier: _s00 - dot: . - naked_identifier: 45_is_a_legal_identifier from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sdf9_ - dot: . - naked_identifier: _234awdf - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: quoted_identifier: '"a"' alias_expression: alias_operator: keyword: as naked_identifier: is_a_legal_identifier0 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/non_breaking_space.sql000066400000000000000000000001101503426445100252400ustar00rootroot00000000000000#space before from is non-breaking space SELECT a,b, c from sch."blah" sqlfluff-3.4.2/test/fixtures/dialects/ansi/non_breaking_space.yml000066400000000000000000000020341503426445100252510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 41944fdfe4253c4020bed83e05d0bd14c2539fd2155859af01458fda50d4a7bb file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sch dot: . quoted_identifier: '"blah"' sqlfluff-3.4.2/test/fixtures/dialects/ansi/numeric_literal.sql000066400000000000000000000001741503426445100246210ustar00rootroot00000000000000SELECT 1, 1.2, 1., .2, 1e3, 1E3, 1.2e+3, 1.2E+3, 1.e-3, 1.E-3, .2e3, .2E3 ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/numeric_literal.yml000066400000000000000000000027051503426445100246250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f0533292dcf978bb1a20080d5df81ca71fdebc7a769b09e9b9f8c3b72f8e030e file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '1' - comma: ',' - select_clause_element: numeric_literal: '1.2' - comma: ',' - select_clause_element: numeric_literal: '1.' - comma: ',' - select_clause_element: numeric_literal: '.2' - comma: ',' - select_clause_element: numeric_literal: 1e3 - comma: ',' - select_clause_element: numeric_literal: 1E3 - comma: ',' - select_clause_element: numeric_literal: '1.2e+3' - comma: ',' - select_clause_element: numeric_literal: '1.2E+3' - comma: ',' - select_clause_element: numeric_literal: '1.e-3' - comma: ',' - select_clause_element: numeric_literal: '1.E-3' - comma: ',' - select_clause_element: numeric_literal: .2e3 - comma: ',' - select_clause_element: numeric_literal: .2E3 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/obevo.sql000066400000000000000000000004541503426445100225560ustar00rootroot00000000000000//// CHANGE name=alter1 ALTER TABLE table1 ADD COLUMN colY NUMBER; ALTER TABLE table1 ADD COLUMN colZ NUMBER; //// METADATA test //// CHANGE name="init" CREATE TABLE OrigTable ( Field1 int, Field2 int ) //// CHANGE name="dropOld" DROP_TABLE dependencies="OldToNewTableMigration.migration" sqlfluff-3.4.2/test/fixtures/dialects/ansi/obevo.yml000066400000000000000000000027351503426445100225640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e79b63404b6e6ffb1a2e316ffcc5612c691ad07f393496d97bc7758eb8f30541 file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table1 - keyword: ADD - keyword: COLUMN - column_definition: naked_identifier: colY data_type: data_type_identifier: NUMBER - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table1 - keyword: ADD - keyword: COLUMN - column_definition: naked_identifier: colZ data_type: data_type_identifier: NUMBER - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: OrigTable - bracketed: - start_bracket: ( - column_definition: naked_identifier: Field1 data_type: data_type_identifier: int - comma: ',' - column_definition: naked_identifier: Field2 data_type: data_type_identifier: int - end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/revoke_select_on_table_a_from_group_b.sql000066400000000000000000000000461503426445100311760ustar00rootroot00000000000000revoke select on table a from group b sqlfluff-3.4.2/test/fixtures/dialects/ansi/revoke_select_on_table_a_from_group_b.yml000066400000000000000000000012231503426445100311760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8f0fcfb3080b3ecc20c562740c3f682929cf8be7ea60f2b4b54fc46c1fdd371f file: statement: access_statement: - keyword: revoke - keyword: select - keyword: 'on' - keyword: table - object_reference: naked_identifier: a - keyword: from - keyword: group - object_reference: naked_identifier: b sqlfluff-3.4.2/test/fixtures/dialects/ansi/rollback.sql000066400000000000000000000000111503426445100232220ustar00rootroot00000000000000rollback sqlfluff-3.4.2/test/fixtures/dialects/ansi/rollback.yml000066400000000000000000000007121503426445100232340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6935c614cdd8c2ff4f31d203e1c6c3e810992659d1780aae96bd4969b6457d24 file: statement: transaction_statement: keyword: rollback sqlfluff-3.4.2/test/fixtures/dialects/ansi/rollback_and_no_chain.sql000066400000000000000000000000261503426445100257100ustar00rootroot00000000000000rollback and no chain sqlfluff-3.4.2/test/fixtures/dialects/ansi/rollback_and_no_chain.yml000066400000000000000000000010061503426445100257110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 86784a218718ec1870247ec1a3b586b7bdef8ba4fad2f464d3e89da0e455e594 file: statement: transaction_statement: - keyword: rollback - keyword: and - keyword: 'no' - keyword: chain sqlfluff-3.4.2/test/fixtures/dialects/ansi/rollback_work.sql000066400000000000000000000000161503426445100242710ustar00rootroot00000000000000rollback work sqlfluff-3.4.2/test/fixtures/dialects/ansi/rollback_work.yml000066400000000000000000000007361503426445100243040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e5ca7dcd8037e150a8f4b5e125f52ea9a337ada16beabff610feb32d92a02139 file: statement: transaction_statement: - keyword: rollback - keyword: work sqlfluff-3.4.2/test/fixtures/dialects/ansi/rollback_work_and_no_chain.sql000066400000000000000000000000331503426445100267500ustar00rootroot00000000000000rollback work and no chain sqlfluff-3.4.2/test/fixtures/dialects/ansi/rollback_work_and_no_chain.yml000066400000000000000000000010321503426445100267520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3c14181da00acf0aadc77c0de0522c7c49123f94fd96f2d0aa0283289b15d2c9 file: statement: transaction_statement: - keyword: rollback - keyword: work - keyword: and - keyword: 'no' - keyword: chain sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_a.sql000066400000000000000000000000361503426445100232170ustar00rootroot00000000000000select a,b, c from sch."blah" sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_a.yml000066400000000000000000000020341503426445100232210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 264966256d79237298c65904f5e9b688beb292b2a0c39d20a879467f4fac2efe file: statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sch dot: . quoted_identifier: '"blah"' sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_a_and_not_b.sql000066400000000000000000000001221503426445100252160ustar00rootroot00000000000000-- https://github.com/sqlfluff/sqlfluff/issues/827 SELECT a AND NOT i.b FROM i sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_a_and_not_b.yml000066400000000000000000000017061503426445100252310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b4ece8e4414be9539e889fbb3876c9e1046c7ea8e4dbbef100ed3ffb536cea38 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - column_reference: naked_identifier: a - binary_operator: AND - keyword: NOT - column_reference: - naked_identifier: i - dot: . - naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: i sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_b.sql000066400000000000000000000000561503426445100232220ustar00rootroot00000000000000select * from foo JOIN bar ON (foo.a = bar.a) sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_b.yml000066400000000000000000000030021503426445100232160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0df936d2bcdda936bef477103ec7616d96829041deedb0f20cee737779892555 file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: bar join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: foo - dot: . - naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: bar - dot: . - naked_identifier: a end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_bitwise_operators.sql000066400000000000000000000001121503426445100265360ustar00rootroot00000000000000select c1 & c2, c3 | c4, c5 ^ c6, c7 << c8, c9 >> c10 sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_bitwise_operators.yml000066400000000000000000000034521503426445100265520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7c5075062f6d6da74e8109008486152d5135e26ba31c9d725ad4bac17ee3dfeb file: statement: select_statement: select_clause: - keyword: select - select_clause_element: expression: - column_reference: naked_identifier: c1 - binary_operator: ampersand: '&' - column_reference: naked_identifier: c2 - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: c3 - binary_operator: pipe: '|' - column_reference: naked_identifier: c4 - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: c5 - binary_operator: ^ - column_reference: naked_identifier: c6 - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: c7 - binary_operator: - raw_comparison_operator: < - raw_comparison_operator: < - column_reference: naked_identifier: c8 - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: c9 - binary_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '>' - column_reference: naked_identifier: c10 sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_c.sql000066400000000000000000000004051503426445100232210ustar00rootroot00000000000000-- Thanks @mrshu for this query, it tests functions and order by SELECT col_a, col_b, date_col_a, date_col_b FROM "database"."sample_table" WHERE DATE(date_col_b) >= current_date AND length(col_a) = 4 ORDER BY date_col_a DESC NULLS LAST sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_c.yml000066400000000000000000000045361503426445100232340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 967bf2de259367305d5ba63678c6dbc6d2e3b43a08e9f40f5b0b73dd60c314c0 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col_a - comma: ',' - select_clause_element: column_reference: naked_identifier: col_b - comma: ',' - select_clause_element: column_reference: naked_identifier: date_col_a - comma: ',' - select_clause_element: column_reference: naked_identifier: date_col_b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - quoted_identifier: '"database"' - dot: . - quoted_identifier: '"sample_table"' where_clause: keyword: WHERE expression: - function: function_name: function_name_identifier: DATE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: date_col_b end_bracket: ) - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - bare_function: current_date - binary_operator: AND - function: function_name: function_name_identifier: length function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: col_a end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '4' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: date_col_a - keyword: DESC - keyword: NULLS - keyword: LAST sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_case_a.sql000066400000000000000000000002341503426445100242120ustar00rootroot00000000000000SELECT CASE WHEN 1 = 2 THEN 3 WHEN 4 > 3 THEN 5 + 2 WHEN some_var IN (1,2,3) then "nothing" ELSE "boo" END as a_case_statement FROM boo sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_case_a.yml000066400000000000000000000047151503426445100242240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 49770c21a5b91749226f86efb49aab3f64070a4b0616e6346817b1d94fcdf138 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: - numeric_literal: '1' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '2' - keyword: THEN - expression: numeric_literal: '3' - when_clause: - keyword: WHEN - expression: - numeric_literal: '4' - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '3' - keyword: THEN - expression: - numeric_literal: '5' - binary_operator: + - numeric_literal: '2' - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: some_var keyword: IN bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - end_bracket: ) - keyword: then - expression: column_reference: quoted_identifier: '"nothing"' - else_clause: keyword: ELSE expression: column_reference: quoted_identifier: '"boo"' - keyword: END alias_expression: alias_operator: keyword: as naked_identifier: a_case_statement from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: boo sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_case_b.sql000066400000000000000000000001671503426445100242200ustar00rootroot00000000000000SELECT CASE some_var WHEN 'hello' THEN 3 WHEN 'hi' THEN 12 ELSE 0 END as a_case_statement FROM boo sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_case_b.yml000066400000000000000000000031231503426445100242150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fb563749ff883d3eb2e9b1bd077bab014aa99f60f79eea25f8fd056dc4a78dfc file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: case_expression: - keyword: CASE - expression: column_reference: naked_identifier: some_var - when_clause: - keyword: WHEN - expression: quoted_literal: "'hello'" - keyword: THEN - expression: numeric_literal: '3' - when_clause: - keyword: WHEN - expression: quoted_literal: "'hi'" - keyword: THEN - expression: numeric_literal: '12' - else_clause: keyword: ELSE expression: numeric_literal: '0' - keyword: END alias_expression: alias_operator: keyword: as naked_identifier: a_case_statement from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: boo sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_case_c.sql000066400000000000000000000007141503426445100242170ustar00rootroot00000000000000select col0, case when col1 then col2 else col3 end::text as mycol from table1; select col0, case when col1 then col2 else col3 end::int::float as mycol from table1; select col0, cast(case when col1 then col2 else col3 end as text) as mycol from table1; SELECT column1, CASE WHEN 'test' THEN 12 ELSE 0 END >= 0 FROM boo; sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_case_c.yml000066400000000000000000000131561503426445100242250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 91367baca14317eab5f578d87517e6b09e6117f75e8bda8b7617c4d4d234df85 file: - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: col0 - comma: ',' - select_clause_element: expression: cast_expression: case_expression: - keyword: case - when_clause: - keyword: when - expression: column_reference: naked_identifier: col1 - keyword: then - expression: column_reference: naked_identifier: col2 - else_clause: keyword: else expression: column_reference: naked_identifier: col3 - keyword: end casting_operator: '::' data_type: data_type_identifier: text alias_expression: alias_operator: keyword: as naked_identifier: mycol from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: col0 - comma: ',' - select_clause_element: expression: cast_expression: - case_expression: - keyword: case - when_clause: - keyword: when - expression: column_reference: naked_identifier: col1 - keyword: then - expression: column_reference: naked_identifier: col2 - else_clause: keyword: else expression: column_reference: naked_identifier: col3 - keyword: end - casting_operator: '::' - data_type: data_type_identifier: int - casting_operator: '::' - data_type: data_type_identifier: float alias_expression: alias_operator: keyword: as naked_identifier: mycol from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: col0 - comma: ',' - select_clause_element: function: function_name: function_name_identifier: cast function_contents: bracketed: start_bracket: ( expression: case_expression: - keyword: case - when_clause: - keyword: when - expression: column_reference: naked_identifier: col1 - keyword: then - expression: column_reference: naked_identifier: col2 - else_clause: keyword: else expression: column_reference: naked_identifier: col3 - keyword: end keyword: as data_type: data_type_identifier: text end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: mycol from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: column1 - comma: ',' - select_clause_element: expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: quoted_literal: "'test'" - keyword: THEN - expression: numeric_literal: '12' - else_clause: keyword: ELSE expression: numeric_literal: '0' - keyword: END comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' numeric_literal: '0' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: boo - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_cross_join.sql000066400000000000000000000004141503426445100251470ustar00rootroot00000000000000-- https://github.com/sqlfluff/sqlfluff/issues/871 WITH constants AS ( SELECT 8760 AS hours_per_year ) SELECT table1.name, foo.name, foo.value * constants.hours_per_year AS some_value FROM table1 CROSS JOIN constants JOIN table2 AS foo USING (id) sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_cross_join.yml000066400000000000000000000054031503426445100251540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 05963e58706a5e52eea76ffffe933867b3d39ba7ee3323435bd3c964e29c0a9c file: statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: constants keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '8760' alias_expression: alias_operator: keyword: AS naked_identifier: hours_per_year end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: table1 - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: foo - dot: . - naked_identifier: name - comma: ',' - select_clause_element: expression: - column_reference: - naked_identifier: foo - dot: . - naked_identifier: value - binary_operator: '*' - column_reference: - naked_identifier: constants - dot: . - naked_identifier: hours_per_year alias_expression: alias_operator: keyword: AS naked_identifier: some_value from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: table1 - join_clause: - keyword: CROSS - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: constants - join_clause: - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 alias_expression: alias_operator: keyword: AS naked_identifier: foo - keyword: USING - bracketed: start_bracket: ( naked_identifier: id end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_d.sql000066400000000000000000000001631503426445100232230ustar00rootroot00000000000000SELECT col_a, col_b FROM some_table WHERE col_a IS NOT NULL AND col_b NOT IN (SELECT c FROM another_table) sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_d.yml000066400000000000000000000033661503426445100232350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d954262ab58fdf0d7eaec4c6fcec951e8ff789a701dc1acaabe5e7f8ac66d675 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col_a - comma: ',' - select_clause_element: column_reference: naked_identifier: col_b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: some_table where_clause: keyword: WHERE expression: - column_reference: naked_identifier: col_a - keyword: IS - keyword: NOT - null_literal: 'NULL' - binary_operator: AND - column_reference: naked_identifier: col_b - keyword: NOT - keyword: IN - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: another_table end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_e.sql000066400000000000000000000003421503426445100232230ustar00rootroot00000000000000-- Union expressions SELECT col_a as foo FROM some_table UNION SELECT col_b as foo FROM another_table UNION ALL SELECT col_c as foo FROM this_other_table INTERSECT SELECT col_d as foo FROM the_last_table sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_e.yml000066400000000000000000000051561503426445100232350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3aeb0b5ca772593ef5b180ecc72ad1f6afb381bc8de9658f67d85b603477c2a7 file: statement: set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col_a alias_expression: alias_operator: keyword: as naked_identifier: foo from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: some_table - set_operator: keyword: UNION - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col_b alias_expression: alias_operator: keyword: as naked_identifier: foo from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: another_table - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col_c alias_expression: alias_operator: keyword: as naked_identifier: foo from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: this_other_table - set_operator: keyword: INTERSECT - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col_d alias_expression: alias_operator: keyword: as naked_identifier: foo from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: the_last_table sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_except.sql000066400000000000000000000000731503426445100242700ustar00rootroot00000000000000SELECT 1 EXCEPT SELECT 2 ; SELECT 1 EXCEPT ALL SELECT 2 ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_except.yml000066400000000000000000000022311503426445100242700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 164b93288c1ad3ddb071071686b3c34e1fe2e93f29f6feaa4c068e458d3701b5 file: - statement: set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - set_operator: keyword: EXCEPT - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '2' - statement_terminator: ; - statement: set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - set_operator: - keyword: EXCEPT - keyword: ALL - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '2' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_f.sql000066400000000000000000000001271503426445100232250ustar00rootroot00000000000000-- Test the parser on complex maths SELECT COS(2*ACOS(-1)*2*y/53) AS c2 FROM t sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_f.yml000066400000000000000000000034711503426445100232340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 10acc9b413a3f5b996af4b70d477cbee33b44e57594953d87578415ce13907af file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: COS function_contents: bracketed: start_bracket: ( expression: - numeric_literal: '2' - binary_operator: '*' - function: function_name: function_name_identifier: ACOS function_contents: bracketed: start_bracket: ( expression: numeric_literal: sign_indicator: '-' numeric_literal: '1' end_bracket: ) - binary_operator: '*' - numeric_literal: '2' - binary_operator: '*' - column_reference: naked_identifier: y - binary_operator: / - numeric_literal: '53' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: c2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_fetch.sql000066400000000000000000000001041503426445100240640ustar00rootroot00000000000000SELECT EMPLOYEE.EMPNO FROM EMPLOYEE FETCH FIRST 3 ROWS ONLY sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_fetch.yml000066400000000000000000000017131503426445100240750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8019caf6295be540dd5bef757766ba8f960d1425d6cb0d1db4779b1816c11f2e file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: EMPLOYEE - dot: . - naked_identifier: EMPNO from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: EMPLOYEE fetch_clause: - keyword: FETCH - keyword: FIRST - numeric_literal: '3' - keyword: ROWS - keyword: ONLY sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_fn_square_bracket_array_parameter.sql000066400000000000000000000000271503426445100317130ustar00rootroot00000000000000SELECT myfn([1, 2, 3]) sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_fn_square_bracket_array_parameter.yml000066400000000000000000000020351503426445100317160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2d7409f5c3e98851f7c40a4a0d6978d7d84d38ea26f7c14c5ba6b81481fb376d file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: myfn function_contents: bracketed: start_bracket: ( expression: array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - end_square_bracket: ']' end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_from_where_overlaps.sql000066400000000000000000000001031503426445100270420ustar00rootroot00000000000000SELECT column_name FROM table_name WHERE period1 OVERLAPS period2; sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_from_where_overlaps.yml000066400000000000000000000020301503426445100270450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9cedfe94bf4b39aa6ab3800f1ff478ea2e253d375df4aea21e254523c2923827 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: column_name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name where_clause: keyword: WHERE expression: column_reference: naked_identifier: period1 overlaps_clause: keyword: OVERLAPS column_reference: naked_identifier: period2 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_function_in_group_by.sql000066400000000000000000000001041503426445100272140ustar00rootroot00000000000000SELECT COALESCE(id, 1) FROM some_table GROUP BY COALESCE(id, 1) sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_function_in_group_by.yml000066400000000000000000000031331503426445100272230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4edc6ec5391158a725ad8711e68239250701a5e66816ff78276c96c632669253 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: COALESCE function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: id - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: some_table groupby_clause: - keyword: GROUP - keyword: BY - expression: function: function_name: function_name_identifier: COALESCE function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: id - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_g.sql000066400000000000000000000001641503426445100232270ustar00rootroot00000000000000-- More complex select clause without from clause SELECT NULL::INT AS user_id, NULL::INT AS is_paid LIMIT 0 sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_g.yml000066400000000000000000000023371503426445100232350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7dd6c896c0d8ffcf139fec11fddd4a6381ec13a8dc8718acb5743b03d0c7cf07 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: cast_expression: null_literal: 'NULL' casting_operator: '::' data_type: data_type_identifier: INT alias_expression: alias_operator: keyword: AS naked_identifier: user_id - comma: ',' - select_clause_element: expression: cast_expression: null_literal: 'NULL' casting_operator: '::' data_type: data_type_identifier: INT alias_expression: alias_operator: keyword: AS naked_identifier: is_paid limit_clause: keyword: LIMIT numeric_literal: '0' sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_g_fetch.sql000066400000000000000000000002041503426445100243730ustar00rootroot00000000000000-- More complex select clause without from clause SELECT NULL::INT AS user_id, NULL::INT AS is_paid FETCH FIRST 0 ROWS ONLY sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_g_fetch.yml000066400000000000000000000024421503426445100244030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0563d992f61b313c54c3e5f5bbf28ddebdce61a6bf92711cfd796699e9fc6f63 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: cast_expression: null_literal: 'NULL' casting_operator: '::' data_type: data_type_identifier: INT alias_expression: alias_operator: keyword: AS naked_identifier: user_id - comma: ',' - select_clause_element: expression: cast_expression: null_literal: 'NULL' casting_operator: '::' data_type: data_type_identifier: INT alias_expression: alias_operator: keyword: AS naked_identifier: is_paid fetch_clause: - keyword: FETCH - keyword: FIRST - numeric_literal: '0' - keyword: ROWS - keyword: ONLY sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_h.sql000066400000000000000000000003321503426445100232250ustar00rootroot00000000000000SELECT DATE(zendesk.created_at, 'America/New_York') AS date, COUNT( CASE WHEN zendesk.support_team IN ('tech support', 'taskus', 'onc') THEN 1 END ) AS tech_support FROM zendesk sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_h.yml000066400000000000000000000051251503426445100232340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bb6cdaa9b29e62d36093086e3b53e65eea9d9c74d1a4c65d24dd12c0089bc014 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: DATE function_contents: bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: zendesk - dot: . - naked_identifier: created_at - comma: ',' - expression: quoted_literal: "'America/New_York'" - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: date - comma: ',' - select_clause_element: function: function_name: function_name_identifier: COUNT function_contents: bracketed: start_bracket: ( expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: column_reference: - naked_identifier: zendesk - dot: . - naked_identifier: support_team keyword: IN bracketed: - start_bracket: ( - quoted_literal: "'tech support'" - comma: ',' - quoted_literal: "'taskus'" - comma: ',' - quoted_literal: "'onc'" - end_bracket: ) - keyword: THEN - expression: numeric_literal: '1' - keyword: END end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: tech_support from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: zendesk sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_in_multiline_comment.sql000066400000000000000000000001661503426445100272150ustar00rootroot00000000000000with a as ( select * from table_a ), /* select */ b as ( select * from a ) select * from b sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_in_multiline_comment.yml000066400000000000000000000041171503426445100272170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 63dd7e7ed5683b256493aa01bcebb3091cb0058c3c4246230200a506d4b5eecb file: statement: with_compound_statement: - keyword: with - common_table_expression: naked_identifier: a keyword: as bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_a end_bracket: ) - comma: ',' - common_table_expression: naked_identifier: b keyword: as bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: a end_bracket: ) - select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: b sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_intersect.sql000066400000000000000000000001011503426445100247700ustar00rootroot00000000000000SELECT 1 INTERSECT SELECT 2 ; SELECT 1 INTERSECT ALL SELECT 2 ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_intersect.yml000066400000000000000000000022371503426445100250060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a2de94a03cd14dd4823bc2a5043a3418e8994b289c1013ec32aa4df7a832cc65 file: - statement: set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - set_operator: keyword: INTERSECT - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '2' - statement_terminator: ; - statement: set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - set_operator: - keyword: INTERSECT - keyword: ALL - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '2' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_j.sql000066400000000000000000000004221503426445100232270ustar00rootroot00000000000000-- Aliasing without AS -- https://github.com/sqlfluff/sqlfluff/issues/149 SELECT (POW(sd2,2) + POW(sd3,2) + POW(sd4,2) + POW(sd4,2)) w1 FROM dat; -- Another Aliasing without AS SELECT CASE WHEN order_month = max_month THEN 1 ELSE 0 END churn sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_j.yml000066400000000000000000000073651503426445100232460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6d0d616d431d59cdba64b181a682b7c62e757aa1530d4ff7d9abe9109b06345e file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bracketed: start_bracket: ( expression: - function: function_name: function_name_identifier: POW function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: sd2 - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - binary_operator: + - function: function_name: function_name_identifier: POW function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: sd3 - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - binary_operator: + - function: function_name: function_name_identifier: POW function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: sd4 - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - binary_operator: + - function: function_name: function_name_identifier: POW function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: sd4 - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) end_bracket: ) alias_expression: naked_identifier: w1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dat - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: - column_reference: naked_identifier: order_month - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: max_month - keyword: THEN - expression: numeric_literal: '1' - else_clause: keyword: ELSE expression: numeric_literal: '0' - keyword: END alias_expression: naked_identifier: churn sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_l.sql000066400000000000000000000001701503426445100232310ustar00rootroot00000000000000-- Nested scalar query -- https://github.com/sqlfluff/sqlfluff/issues/147 SELECT a FROM dat WHERE c >= (SELECT 1) sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_l.yml000066400000000000000000000024011503426445100232320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d73dada9ab50d1658f9be0779284a5152fe088c3625647d3d44a4ec701730684 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dat where_clause: keyword: WHERE expression: column_reference: naked_identifier: c comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_m.sql000066400000000000000000000005701503426445100232360ustar00rootroot00000000000000-- On clause without brackets -- https://github.com/sqlfluff/sqlfluff/issues/146 SELECT a FROM zendesk LEFT JOIN ticket ON zendesk.ticket_id = ticket.id; SELECT low_user_counts FROM acceptable_buckets JOIN small_buckets ON (business_type = low_business_type) AND (business_type = low_business_type OR size_label = low_size_label); sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_m.yml000066400000000000000000000064231503426445100232430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2eea1fa41db10c8151633f3fd9f63f4bd1c2849ca484e69fc3eefaccdfcd08b2 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: zendesk join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: ticket - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: zendesk - dot: . - naked_identifier: ticket_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: ticket - dot: . - naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: low_user_counts from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: acceptable_buckets join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: small_buckets join_on_condition: keyword: 'ON' expression: - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: business_type - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: low_business_type end_bracket: ) - binary_operator: AND - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: business_type - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: low_business_type - binary_operator: OR - column_reference: naked_identifier: size_label - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: low_size_label end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_many_join.sql000066400000000000000000000016261503426445100247700ustar00rootroot00000000000000SELECT * FROM a as foo JOIN b JOIN c as foobar JOIN d, e as bar JOIN f JOIN g('blah') as tbl_func JOIN h, baz as buzz; SELECT c.f1 as f1 , co.f2 as f2 , po.f3 as f3 , c2c.f4 as f4 , c_ph.f5 as f5 FROM t1 AS c LEFT JOIN t2 AS co ON c.f1 = co.f1 LEFT JOIN t3 AS po ON c.f1 = po.f1 LEFT JOIN ( SELECT t._tmp as _tmp FROM (SELECT * FROM t4) AS t ) AS l_ccc ON c.f1 = l_ccc._tmp LEFT JOIN t5 AS cc ON l_ccc._tmp = cc.f1 LEFT JOIN ( ( SELECT t._tmp AS _tmp FROM (SELECT * FROM t6) AS t ) AS l_c2c_c LEFT JOIN ( SELECT a1._tmp AS _tmp , h.id , h.f1 FROM ( SELECT t.id , t.f4 FROM (SELECT * FROM t7) AS t) AS h LEFT JOIN (SELECT * FROM t8) AS a1 ON a1.id = h.id ) AS c2c ON l_c2c_c._tmp = c2c.id ) ON c.f1 = l_c2c_c._tmp LEFT JOIN t9 AS c_ph ON c.f1 = c_ph.f1; sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_many_join.yml000066400000000000000000000506741503426445100250010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6b675edb84da45893d181be8d3ed15a1e761bc7f0cff3a32062b2218fc599435 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: - keyword: FROM - from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: a alias_expression: alias_operator: keyword: as naked_identifier: foo - join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: b - join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: c alias_expression: alias_operator: keyword: as naked_identifier: foobar - join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: d - comma: ',' - from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: e alias_expression: alias_operator: keyword: as naked_identifier: bar - join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: f - join_clause: keyword: JOIN from_expression_element: table_expression: function: function_name: function_name_identifier: g function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'blah'" end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: tbl_func - join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: h - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: baz alias_expression: alias_operator: keyword: as naked_identifier: buzz - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: c - dot: . - naked_identifier: f1 alias_expression: alias_operator: keyword: as naked_identifier: f1 - comma: ',' - select_clause_element: column_reference: - naked_identifier: co - dot: . - naked_identifier: f2 alias_expression: alias_operator: keyword: as naked_identifier: f2 - comma: ',' - select_clause_element: column_reference: - naked_identifier: po - dot: . - naked_identifier: f3 alias_expression: alias_operator: keyword: as naked_identifier: f3 - comma: ',' - select_clause_element: column_reference: - naked_identifier: c2c - dot: . - naked_identifier: f4 alias_expression: alias_operator: keyword: as naked_identifier: f4 - comma: ',' - select_clause_element: column_reference: - naked_identifier: c_ph - dot: . - naked_identifier: f5 alias_expression: alias_operator: keyword: as naked_identifier: f5 from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: t1 alias_expression: alias_operator: keyword: AS naked_identifier: c - join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t2 alias_expression: alias_operator: keyword: AS naked_identifier: co - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: c - dot: . - naked_identifier: f1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: co - dot: . - naked_identifier: f1 - join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t3 alias_expression: alias_operator: keyword: AS naked_identifier: po - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: c - dot: . - naked_identifier: f1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: po - dot: . - naked_identifier: f1 - join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: _tmp alias_expression: alias_operator: keyword: as naked_identifier: _tmp from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t4 end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: t end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: l_ccc - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: c - dot: . - naked_identifier: f1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: l_ccc - dot: . - naked_identifier: _tmp - join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t5 alias_expression: alias_operator: keyword: AS naked_identifier: cc - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: l_ccc - dot: . - naked_identifier: _tmp - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: cc - dot: . - naked_identifier: f1 - join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: bracketed: start_bracket: ( table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: _tmp alias_expression: alias_operator: keyword: AS naked_identifier: _tmp from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t6 end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: t end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: l_c2c_c join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: a1 - dot: . - naked_identifier: _tmp alias_expression: alias_operator: keyword: AS naked_identifier: _tmp - comma: ',' - select_clause_element: column_reference: - naked_identifier: h - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: h - dot: . - naked_identifier: f1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: f4 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t7 end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: t end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: h join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t8 end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a1 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: a1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: h - dot: . - naked_identifier: id end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: c2c - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: l_c2c_c - dot: . - naked_identifier: _tmp - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: c2c - dot: . - naked_identifier: id end_bracket: ) - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: c - dot: . - naked_identifier: f1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: l_c2c_c - dot: . - naked_identifier: _tmp - join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t9 alias_expression: alias_operator: keyword: AS naked_identifier: c_ph - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: c - dot: . - naked_identifier: f1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: c_ph - dot: . - naked_identifier: f1 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_multiple_named_windows.sql000066400000000000000000000003411503426445100275470ustar00rootroot00000000000000SELECT item, purchases, category, LAST_VALUE(item) OVER (d) AS most_popular FROM Produce WINDOW a AS (PARTITION BY category), b AS (a ORDER BY purchases), c AS (b ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING), d AS (c) sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_multiple_named_windows.yml000066400000000000000000000064311503426445100275570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f5ce4da54f1f881b7d10d32d06486a714f6ff32d900218c7ea8323f41c4dddeb file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: column_reference: naked_identifier: purchases - comma: ',' - select_clause_element: column_reference: naked_identifier: category - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LAST_VALUE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: item end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: naked_identifier: d end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: most_popular from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce named_window: - keyword: WINDOW - named_window_expression: naked_identifier: a keyword: AS bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category end_bracket: ) - comma: ',' - named_window_expression: naked_identifier: b keyword: AS bracketed: start_bracket: ( window_specification: naked_identifier: a orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases end_bracket: ) - comma: ',' - named_window_expression: naked_identifier: c keyword: AS bracketed: start_bracket: ( window_specification: naked_identifier: b frame_clause: - keyword: ROWS - keyword: BETWEEN - numeric_literal: '2' - keyword: PRECEDING - keyword: AND - numeric_literal: '2' - keyword: FOLLOWING end_bracket: ) - comma: ',' - named_window_expression: naked_identifier: d keyword: AS bracketed: start_bracket: ( window_specification: naked_identifier: c end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_n.sql000066400000000000000000000005111503426445100232320ustar00rootroot00000000000000-- Full Join -- https://github.com/sqlfluff/sqlfluff/issues/144 SELECT exists_left.business_type AS business_type_left, exists_right.business_type AS business_type_right FROM benchmark_summaries AS exists_left FULL JOIN business_types AS exists_right ON exists_left.business_type = exists_right.business_type sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_n.yml000066400000000000000000000043561503426445100232470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 552a1824901cd1a298125560d4f3a138ae64a2d2408e54e2f345ee3f99caa910 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: exists_left - dot: . - naked_identifier: business_type alias_expression: alias_operator: keyword: AS naked_identifier: business_type_left - comma: ',' - select_clause_element: column_reference: - naked_identifier: exists_right - dot: . - naked_identifier: business_type alias_expression: alias_operator: keyword: AS naked_identifier: business_type_right from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: benchmark_summaries alias_expression: alias_operator: keyword: AS naked_identifier: exists_left join_clause: - keyword: FULL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: business_types alias_expression: alias_operator: keyword: AS naked_identifier: exists_right - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: exists_left - dot: . - naked_identifier: business_type - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: exists_right - dot: . - naked_identifier: business_type sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_named_window.sql000066400000000000000000000001071503426445100254510ustar00rootroot00000000000000SELECT AVG(col) OVER(win) FROM table WINDOW win AS (PARTITION BY date) sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_named_window.yml000066400000000000000000000032321503426445100254550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1927f81fb17b972b82b8383bc371b34a6c15fb5549bc25d21ec47d4f38934b9f file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: AVG function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: col end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: naked_identifier: win end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table named_window: keyword: WINDOW named_window_expression: naked_identifier: win keyword: AS bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: date end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_named_window_no_parentheses.sql000066400000000000000000000002131503426445100305440ustar00rootroot00000000000000SELECT NTH_VALUE(bar, 1) OVER w1 AS baz FROM t WINDOW w1 AS ( PARTITION BY x, y, z ORDER BY abc DESC ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_named_window_no_parentheses.yml000066400000000000000000000042551503426445100305600ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2428658bc2293c9aefd795c0a4b8c1a06782af7a491997d363e47f54f1a69474 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: NTH_VALUE function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: bar - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) over_clause: keyword: OVER naked_identifier: w1 alias_expression: alias_operator: keyword: AS naked_identifier: baz from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t named_window: keyword: WINDOW named_window_expression: naked_identifier: w1 keyword: AS bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: x - comma: ',' - expression: column_reference: naked_identifier: y - comma: ',' - expression: column_reference: naked_identifier: z orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: abc - keyword: DESC end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_named_window_with_parentheses.sql000066400000000000000000000002611503426445100311060ustar00rootroot00000000000000SELECT NTH_VALUE(bar, 1) OVER(w1) AS baz, NTH_VALUE(bar, 1) OVER() AS foo FROM t WINDOW w1 AS ( PARTITION BY x, y, z ORDER BY abc DESC ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_named_window_with_parentheses.yml000066400000000000000000000060051503426445100311120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9858cd1c474eaa68d3ec679ab4b88fde27c98ed28c6f70158ba0a2f29dfb5ec1 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: NTH_VALUE function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: bar - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: naked_identifier: w1 end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: baz - comma: ',' - select_clause_element: function: function_name: function_name_identifier: NTH_VALUE function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: bar - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: foo from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t named_window: keyword: WINDOW named_window_expression: naked_identifier: w1 keyword: AS bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: x - comma: ',' - expression: column_reference: naked_identifier: y - comma: ',' - expression: column_reference: naked_identifier: z orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: abc - keyword: DESC end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_named_windows_each_window_specification.sql000066400000000000000000000003211503426445100331010ustar00rootroot00000000000000SELECT item, purchases, category, LAST_VALUE(item) OVER (c) AS most_popular FROM Produce WINDOW a AS (PARTITION BY category), b AS (ORDER BY purchases), c AS (ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING) sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_named_windows_each_window_specification.yml000066400000000000000000000057341503426445100331200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 201952e76374d0a871d6122d889c17af69dcf37384f2a770fee1a6f9de657420 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: column_reference: naked_identifier: purchases - comma: ',' - select_clause_element: column_reference: naked_identifier: category - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LAST_VALUE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: item end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: naked_identifier: c end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: most_popular from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce named_window: - keyword: WINDOW - named_window_expression: naked_identifier: a keyword: AS bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category end_bracket: ) - comma: ',' - named_window_expression: naked_identifier: b keyword: AS bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases end_bracket: ) - comma: ',' - named_window_expression: naked_identifier: c keyword: AS bracketed: start_bracket: ( window_specification: frame_clause: - keyword: ROWS - keyword: BETWEEN - numeric_literal: '2' - keyword: PRECEDING - keyword: AND - numeric_literal: '2' - keyword: FOLLOWING end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_natural_join.sql000066400000000000000000000006171503426445100254710ustar00rootroot00000000000000SELECT * FROM table1 NATURAL JOIN table2; SELECT * FROM table1 NATURAL INNER JOIN table2; SELECT * FROM table1 NATURAL LEFT JOIN table2; SELECT * FROM table1 NATURAL LEFT OUTER JOIN table2; SELECT * FROM table1 NATURAL RIGHT JOIN table2; SELECT * FROM table1 NATURAL RIGHT OUTER JOIN table2; SELECT * FROM table1 NATURAL FULL JOIN table2; SELECT * FROM table1 NATURAL FULL OUTER JOIN table2; sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_natural_join.yml000066400000000000000000000133541503426445100254750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 83e3312a773ee3e3dee22bd7341b922507c1aaee51ef41c02c3ea414ef6b8c82 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: NATURAL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: NATURAL - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: NATURAL - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: NATURAL - keyword: LEFT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: NATURAL - keyword: RIGHT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: NATURAL - keyword: RIGHT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: NATURAL - keyword: FULL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: NATURAL - keyword: FULL - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_nested_join.sql000066400000000000000000000036161503426445100253070ustar00rootroot00000000000000-- query with no brackets select orders.order_id AS order_id, customers.email AS email from orders join customers on( ( customers.customer_id = orders.customer_id ) ) where (customers.email = 'sample@gmail.com') group by orders.order_id, customers.email order by orders.order_id; -- nested bracketed up to 1 levels select orders.order_id AS order_id, customers.email AS email from ( orders join customers on( ( customers.customer_id = orders.customer_id ) ) ) where (customers.email = 'sample@gmail.com') group by orders.order_id, customers.email order by orders.order_id; -- nested bracketed up to 2 levels select orders.order_id AS order_id, customers.email AS email from ( ( orders join customers on( ( customers.customer_id = orders.customer_id ) ) ) join products on( (products.product_id = orders.product_id) ) ) where (customers.email = 'sample@gmail.com') group by orders.order_id, customers.email order by orders.order_id; -- nested bracketed up to 3 levels select orders.order_id AS order_id, customers.email AS email from ( ( ( orders join customers on( ( customers.customer_id = orders.customer_id ) ) ) join products on( (products.product_id = orders.product_id) ) ) join random on( (random.product_id = products.product_id) ) ) where (customers.email = 'sample@gmail.com') group by orders.order_id, customers.email order by orders.order_id; sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_nested_join.yml000066400000000000000000000360621503426445100253120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 61d9b77e97d00d4c8d80182e1b184a084793a528f7519595a86a0dd67f08deca file: - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: orders - dot: . - naked_identifier: order_id alias_expression: alias_operator: keyword: AS naked_identifier: order_id - comma: ',' - select_clause_element: column_reference: - naked_identifier: customers - dot: . - naked_identifier: email alias_expression: alias_operator: keyword: AS naked_identifier: email from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders join_clause: keyword: join from_expression_element: table_expression: table_reference: naked_identifier: customers join_on_condition: keyword: 'on' bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: customers - dot: . - naked_identifier: customer_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: orders - dot: . - naked_identifier: customer_id end_bracket: ) end_bracket: ) where_clause: keyword: where bracketed: start_bracket: ( expression: column_reference: - naked_identifier: customers - dot: . - naked_identifier: email comparison_operator: raw_comparison_operator: '=' quoted_literal: "'sample@gmail.com'" end_bracket: ) groupby_clause: - keyword: group - keyword: by - column_reference: - naked_identifier: orders - dot: . - naked_identifier: order_id - comma: ',' - column_reference: - naked_identifier: customers - dot: . - naked_identifier: email orderby_clause: - keyword: order - keyword: by - column_reference: - naked_identifier: orders - dot: . - naked_identifier: order_id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: orders - dot: . - naked_identifier: order_id alias_expression: alias_operator: keyword: AS naked_identifier: order_id - comma: ',' - select_clause_element: column_reference: - naked_identifier: customers - dot: . - naked_identifier: email alias_expression: alias_operator: keyword: AS naked_identifier: email from_clause: keyword: from from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: table_reference: naked_identifier: orders join_clause: keyword: join from_expression_element: table_expression: table_reference: naked_identifier: customers join_on_condition: keyword: 'on' bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: customers - dot: . - naked_identifier: customer_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: orders - dot: . - naked_identifier: customer_id end_bracket: ) end_bracket: ) end_bracket: ) where_clause: keyword: where bracketed: start_bracket: ( expression: column_reference: - naked_identifier: customers - dot: . - naked_identifier: email comparison_operator: raw_comparison_operator: '=' quoted_literal: "'sample@gmail.com'" end_bracket: ) groupby_clause: - keyword: group - keyword: by - column_reference: - naked_identifier: orders - dot: . - naked_identifier: order_id - comma: ',' - column_reference: - naked_identifier: customers - dot: . - naked_identifier: email orderby_clause: - keyword: order - keyword: by - column_reference: - naked_identifier: orders - dot: . - naked_identifier: order_id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: orders - dot: . - naked_identifier: order_id alias_expression: alias_operator: keyword: AS naked_identifier: order_id - comma: ',' - select_clause_element: column_reference: - naked_identifier: customers - dot: . - naked_identifier: email alias_expression: alias_operator: keyword: AS naked_identifier: email from_clause: keyword: from from_expression: bracketed: start_bracket: ( from_expression_element: bracketed: start_bracket: ( table_expression: table_reference: naked_identifier: orders join_clause: keyword: join from_expression_element: table_expression: table_reference: naked_identifier: customers join_on_condition: keyword: 'on' bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: customers - dot: . - naked_identifier: customer_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: orders - dot: . - naked_identifier: customer_id end_bracket: ) end_bracket: ) end_bracket: ) join_clause: keyword: join from_expression_element: table_expression: table_reference: naked_identifier: products join_on_condition: keyword: 'on' bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: products - dot: . - naked_identifier: product_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: orders - dot: . - naked_identifier: product_id end_bracket: ) end_bracket: ) end_bracket: ) where_clause: keyword: where bracketed: start_bracket: ( expression: column_reference: - naked_identifier: customers - dot: . - naked_identifier: email comparison_operator: raw_comparison_operator: '=' quoted_literal: "'sample@gmail.com'" end_bracket: ) groupby_clause: - keyword: group - keyword: by - column_reference: - naked_identifier: orders - dot: . - naked_identifier: order_id - comma: ',' - column_reference: - naked_identifier: customers - dot: . - naked_identifier: email orderby_clause: - keyword: order - keyword: by - column_reference: - naked_identifier: orders - dot: . - naked_identifier: order_id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: orders - dot: . - naked_identifier: order_id alias_expression: alias_operator: keyword: AS naked_identifier: order_id - comma: ',' - select_clause_element: column_reference: - naked_identifier: customers - dot: . - naked_identifier: email alias_expression: alias_operator: keyword: AS naked_identifier: email from_clause: keyword: from from_expression: bracketed: start_bracket: ( bracketed: start_bracket: ( from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: table_reference: naked_identifier: orders join_clause: keyword: join from_expression_element: table_expression: table_reference: naked_identifier: customers join_on_condition: keyword: 'on' bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: customers - dot: . - naked_identifier: customer_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: orders - dot: . - naked_identifier: customer_id end_bracket: ) end_bracket: ) end_bracket: ) join_clause: keyword: join from_expression_element: table_expression: table_reference: naked_identifier: products join_on_condition: keyword: 'on' bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: products - dot: . - naked_identifier: product_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: orders - dot: . - naked_identifier: product_id end_bracket: ) end_bracket: ) end_bracket: ) join_clause: keyword: join from_expression_element: table_expression: table_reference: naked_identifier: random join_on_condition: keyword: 'on' bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: random - dot: . - naked_identifier: product_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: products - dot: . - naked_identifier: product_id end_bracket: ) end_bracket: ) end_bracket: ) where_clause: keyword: where bracketed: start_bracket: ( expression: column_reference: - naked_identifier: customers - dot: . - naked_identifier: email comparison_operator: raw_comparison_operator: '=' quoted_literal: "'sample@gmail.com'" end_bracket: ) groupby_clause: - keyword: group - keyword: by - column_reference: - naked_identifier: orders - dot: . - naked_identifier: order_id - comma: ',' - column_reference: - naked_identifier: customers - dot: . - naked_identifier: email orderby_clause: - keyword: order - keyword: by - column_reference: - naked_identifier: orders - dot: . - naked_identifier: order_id - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_numeric_literal_exponential_format.sql000066400000000000000000000001761503426445100321400ustar00rootroot00000000000000SELECT 1e-9, -12.345e12, .0123E-6, 25, +6.34, 0.5, 25e-03, -1, 25, -col1, -+-col2 sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_numeric_literal_exponential_format.yml000066400000000000000000000034011503426445100321340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e84987da703b2565d55268ce089edd8ec71b9e94963f4e7216f0c82dfe3d7107 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: 1e-9 - comma: ',' - select_clause_element: numeric_literal: sign_indicator: '-' numeric_literal: 12.345e12 - comma: ',' - select_clause_element: numeric_literal: '.0123E-6' - comma: ',' - select_clause_element: numeric_literal: '25' - comma: ',' - select_clause_element: numeric_literal: sign_indicator: + numeric_literal: '6.34' - comma: ',' - select_clause_element: numeric_literal: '0.5' - comma: ',' - select_clause_element: numeric_literal: 25e-03 - comma: ',' - select_clause_element: numeric_literal: sign_indicator: '-' numeric_literal: '1' - comma: ',' - select_clause_element: numeric_literal: '25' - comma: ',' - select_clause_element: expression: sign_indicator: '-' column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: expression: - sign_indicator: '-' - sign_indicator: + - sign_indicator: '-' - column_reference: naked_identifier: col2 sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_o.sql000066400000000000000000000006311503426445100232360ustar00rootroot00000000000000-- Between and Not Between -- https://github.com/sqlfluff/sqlfluff/issues/142 -- https://github.com/sqlfluff/sqlfluff/issues/478 -- https://github.com/sqlfluff/sqlfluff/issues/2845 SELECT business_type FROM benchmark_summaries WHERE avg_click_rate NOT BETWEEN 0 and 1 + 1 + some_value AND some_other_thing BETWEEN 0 - 1 * another_value and 1 AND another_thing BETWEEN -another_value and 0 sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_o.yml000066400000000000000000000035431503426445100232450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 27f7510e3029d7c72b72d88565fe738581add837efc8e0a16900e17670aec6f3 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: business_type from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: benchmark_summaries where_clause: keyword: WHERE expression: - column_reference: naked_identifier: avg_click_rate - keyword: NOT - keyword: BETWEEN - numeric_literal: '0' - keyword: and - numeric_literal: '1' - binary_operator: + - numeric_literal: '1' - binary_operator: + - column_reference: naked_identifier: some_value - binary_operator: AND - column_reference: naked_identifier: some_other_thing - keyword: BETWEEN - numeric_literal: '0' - binary_operator: '-' - numeric_literal: '1' - binary_operator: '*' - column_reference: naked_identifier: another_value - keyword: and - numeric_literal: '1' - binary_operator: AND - column_reference: naked_identifier: another_thing - keyword: BETWEEN - sign_indicator: '-' - column_reference: naked_identifier: another_value - keyword: and - numeric_literal: '0' sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_offset.sql000066400000000000000000000002301503426445100242610ustar00rootroot00000000000000SELECT * FROM test OFFSET 10 ROWS FETCH FIRST 10 ROWS ONLY; SELECT Name, Address FROM Employees ORDER BY Salary OFFSET 2 ROWS FETCH FIRST 2 ROWS ONLY; sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_offset.yml000066400000000000000000000036241503426445100242750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: da4b9b6e658e58cd26d488995a5131018009258c61a5533f97015a806c661dd9 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test offset_clause: - keyword: OFFSET - numeric_literal: '10' - keyword: ROWS fetch_clause: - keyword: FETCH - keyword: FIRST - numeric_literal: '10' - keyword: ROWS - keyword: ONLY - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: Name - comma: ',' - select_clause_element: column_reference: naked_identifier: Address from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Employees orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: Salary offset_clause: - keyword: OFFSET - numeric_literal: '2' - keyword: ROWS fetch_clause: - keyword: FETCH - keyword: FIRST - numeric_literal: '2' - keyword: ROWS - keyword: ONLY - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_order_fetch.sql000066400000000000000000000001351503426445100252630ustar00rootroot00000000000000SELECT EMPLOYEE.EMPNO FROM EMPLOYEE ORDER BY SALARY DESC FETCH FIRST 3 ROWS ONLY sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_order_fetch.yml000066400000000000000000000021371503426445100252710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e2f9ffd79819b655896c0464ea8f489882038a7ca53e79331662bb85d9a830cd file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: EMPLOYEE - dot: . - naked_identifier: EMPNO from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: EMPLOYEE orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: SALARY - keyword: DESC fetch_clause: - keyword: FETCH - keyword: FIRST - numeric_literal: '3' - keyword: ROWS - keyword: ONLY sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_overlaps.sql000066400000000000000000000001371503426445100246340ustar00rootroot00000000000000SELECT (DATE '2001-02-16', DATE '2001-12-21') OVERLAPS (DATE '2001-10-30', DATE '2002-10-30'); sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_overlaps.yml000066400000000000000000000021361503426445100246370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 71dca941f86bf8ba3aac81e95eab24273aac6c958b5f9276e4cd2fd7c080ded8 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bracketed: - start_bracket: ( - keyword: DATE - date_constructor_literal: "'2001-02-16'" - comma: ',' - keyword: DATE - date_constructor_literal: "'2001-12-21'" - end_bracket: ) overlaps_clause: keyword: OVERLAPS bracketed: - start_bracket: ( - keyword: DATE - date_constructor_literal: "'2001-10-30'" - comma: ',' - keyword: DATE - date_constructor_literal: "'2002-10-30'" - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_p.sql000066400000000000000000000003501503426445100232350ustar00rootroot00000000000000-- Case and Extract Expressions -- https://github.com/sqlfluff/sqlfluff/issues/143 SELECT CAST(25.65 AS int), SAFE_CAST(NULL AS STRING) AS age_label, EXTRACT(day FROM end_time) AS day FROM benchmark_with_performance sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_p.yml000066400000000000000000000042551503426445100232470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ef37a2924556ffa5c75e02ecde51fedf309dab23c0ee4dd2d587596c337a90bd file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: numeric_literal: '25.65' keyword: AS data_type: data_type_identifier: int end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SAFE_CAST function_contents: bracketed: start_bracket: ( expression: null_literal: 'NULL' keyword: AS data_type: data_type_identifier: STRING end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: age_label - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: day keyword: FROM expression: column_reference: naked_identifier: end_time end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: day from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: benchmark_with_performance sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_q.sql000066400000000000000000000003601503426445100232370ustar00rootroot00000000000000-- Unexpected Join Fail -- https://github.com/sqlfluff/sqlfluff/issues/163 SELECT data.id FROM data JOIN data_max ON data.event_id = data_max.event_id LEFT JOIN "other_db"."other_data" AS od ON od.fid = data_max.fid sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_q.yml000066400000000000000000000044761503426445100232550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 652f61a7adf86fd255f39e8d969589b068bede11b2f6632d9db7d0a889225227 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: data - dot: . - naked_identifier: id from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: data - join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: data_max join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: data - dot: . - naked_identifier: event_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: data_max - dot: . - naked_identifier: event_id - join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: - quoted_identifier: '"other_db"' - dot: . - quoted_identifier: '"other_data"' alias_expression: alias_operator: keyword: AS naked_identifier: od - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: od - dot: . - naked_identifier: fid - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: data_max - dot: . - naked_identifier: fid sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_r.sql000066400000000000000000000003501503426445100232370ustar00rootroot00000000000000-- Union All in a With -- https://github.com/sqlfluff/sqlfluff/issues/162 WITH result AS ( SELECT customer FROM sales_eu AS s UNION ALL SELECT customer FROM sales_us AS s2 ) SELECT * FROM result sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_r.yml000066400000000000000000000045311503426445100232460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 723322574908c867a45279e1a536f61c1f1eed4d0e0010e575295380bd4aa361 file: statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: result keyword: AS bracketed: start_bracket: ( set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: customer from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sales_eu alias_expression: alias_operator: keyword: AS naked_identifier: s - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: customer from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sales_us alias_expression: alias_operator: keyword: AS naked_identifier: s2 end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: result sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_right.sql000066400000000000000000000001341503426445100241130ustar00rootroot00000000000000SELECT column_name FROM table1 RIGHT JOIN table2 ON table1.column_name = table2.column_name sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_right.yml000066400000000000000000000026741503426445100241300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b3fc1e70bdaf4d47492cf7f44f0786aff1532c440338f40f26d7b9611b3e63d0 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: column_name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: RIGHT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: table1 - dot: . - naked_identifier: column_name - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: table2 - dot: . - naked_identifier: column_name sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_s.sql000066400000000000000000000006411503426445100232430ustar00rootroot00000000000000-- Array notation (BigQuery and Postgres) -- https://github.com/sqlfluff/sqlfluff/issues/59 SELECT user_id, list_id, (count_18_24 * bits[OFFSET(0)] + count_25_34 * bits[OFFSET(1)] + count_35_44 * bits[OFFSET(2)] + count_45_54 * bits[OFFSET(3)] + count_55_64 * bits[OFFSET(4)] + count_65_plus * bits[OFFSET(5)]) / audience_size AS relative_abundance FROM gcp_project.dataset.audience_counts_gender_age sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_s.yml000066400000000000000000000134361503426445100232530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 74ac6db224086dd7e8d316b209ff2d8a6f453c1cfa18a4c00d7bfd0ecd8fcfef file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: user_id - comma: ',' - select_clause_element: column_reference: naked_identifier: list_id - comma: ',' - select_clause_element: expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: count_18_24 - binary_operator: '*' - column_reference: naked_identifier: bits - array_accessor: start_square_bracket: '[' expression: function: function_name: function_name_identifier: OFFSET function_contents: bracketed: start_bracket: ( expression: numeric_literal: '0' end_bracket: ) end_square_bracket: ']' - binary_operator: + - column_reference: naked_identifier: count_25_34 - binary_operator: '*' - column_reference: naked_identifier: bits - array_accessor: start_square_bracket: '[' expression: function: function_name: function_name_identifier: OFFSET function_contents: bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) end_square_bracket: ']' - binary_operator: + - column_reference: naked_identifier: count_35_44 - binary_operator: '*' - column_reference: naked_identifier: bits - array_accessor: start_square_bracket: '[' expression: function: function_name: function_name_identifier: OFFSET function_contents: bracketed: start_bracket: ( expression: numeric_literal: '2' end_bracket: ) end_square_bracket: ']' - binary_operator: + - column_reference: naked_identifier: count_45_54 - binary_operator: '*' - column_reference: naked_identifier: bits - array_accessor: start_square_bracket: '[' expression: function: function_name: function_name_identifier: OFFSET function_contents: bracketed: start_bracket: ( expression: numeric_literal: '3' end_bracket: ) end_square_bracket: ']' - binary_operator: + - column_reference: naked_identifier: count_55_64 - binary_operator: '*' - column_reference: naked_identifier: bits - array_accessor: start_square_bracket: '[' expression: function: function_name: function_name_identifier: OFFSET function_contents: bracketed: start_bracket: ( expression: numeric_literal: '4' end_bracket: ) end_square_bracket: ']' - binary_operator: + - column_reference: naked_identifier: count_65_plus - binary_operator: '*' - column_reference: naked_identifier: bits - array_accessor: start_square_bracket: '[' expression: function: function_name: function_name_identifier: OFFSET function_contents: bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) end_square_bracket: ']' end_bracket: ) binary_operator: / column_reference: naked_identifier: audience_size alias_expression: alias_operator: keyword: AS naked_identifier: relative_abundance from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: gcp_project - dot: . - naked_identifier: dataset - dot: . - naked_identifier: audience_counts_gender_age sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_sample_bernoulli_10.sql000066400000000000000000000000551503426445100266340ustar00rootroot00000000000000SELECT * FROM foo TABLESAMPLE BERNOULLI (10) sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_sample_bernoulli_10.yml000066400000000000000000000017731503426445100266460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7f43ddfc65bea81777b3409c1dfbc58879c76dd5c10217ac854b6315fd23fa6e file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo sample_expression: - keyword: TABLESAMPLE - keyword: BERNOULLI - bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_sample_bernoulli_10_aliased.sql000066400000000000000000000000721503426445100303150ustar00rootroot00000000000000SELECT f.colname FROM foo AS f TABLESAMPLE BERNOULLI (10) sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_sample_bernoulli_10_aliased.yml000066400000000000000000000022221503426445100303160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0fa6454fe9cbdd6e52029cad9545c7a7b8b3064757486fc9e7794406ce9e4c6e file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: f - dot: . - naked_identifier: colname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo alias_expression: alias_operator: keyword: AS naked_identifier: f sample_expression: - keyword: TABLESAMPLE - keyword: BERNOULLI - bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_sample_bernoulli_10_repeatable.sql000066400000000000000000000000761503426445100310230ustar00rootroot00000000000000SELECT * FROM foo TABLESAMPLE BERNOULLI (10) REPEATABLE (100) sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_sample_bernoulli_10_repeatable.yml000066400000000000000000000022351503426445100310240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 91865cea4612ace8ed9777150a6122d2cf5c307fa5a15b4a8e55048c16b7d74d file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo sample_expression: - keyword: TABLESAMPLE - keyword: BERNOULLI - bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - keyword: REPEATABLE - bracketed: start_bracket: ( numeric_literal: '100' end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_sample_bernoulli_order_limit.sql000066400000000000000000000001161503426445100307230ustar00rootroot00000000000000SELECT col1, col2 FROM foo TABLESAMPLE BERNOULLI (10) ORDER BY col1 LIMIT 100 sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_sample_bernoulli_order_limit.yml000066400000000000000000000024311503426445100307270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f21525a5906c37fc87da1b4790b90701a283c29277b569a3f542beaa477d661e file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo sample_expression: - keyword: TABLESAMPLE - keyword: BERNOULLI - bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: col1 limit_clause: keyword: LIMIT numeric_literal: '100' sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_sample_system_10.sql000066400000000000000000000000521503426445100261620ustar00rootroot00000000000000SELECT * FROM foo TABLESAMPLE SYSTEM (10) sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_sample_system_10.yml000066400000000000000000000017701503426445100261740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7478fb95270d15dd89cfcbed39cfc04de7a90854b40acdc7d240910dbf0d322d file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo sample_expression: - keyword: TABLESAMPLE - keyword: SYSTEM - bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_sample_system_10_aliased.sql000066400000000000000000000000671503426445100276520ustar00rootroot00000000000000SELECT f.colname FROM foo AS f TABLESAMPLE SYSTEM (10) sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_sample_system_10_aliased.yml000066400000000000000000000022171503426445100276530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 75e800cdaeb73f78420dc507139629498f50b20a493e92c53dcf436fc4c2bc15 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: f - dot: . - naked_identifier: colname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo alias_expression: alias_operator: keyword: AS naked_identifier: f sample_expression: - keyword: TABLESAMPLE - keyword: SYSTEM - bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_sample_system_10_repeatable.sql000066400000000000000000000000731503426445100303510ustar00rootroot00000000000000SELECT * FROM foo TABLESAMPLE SYSTEM (10) REPEATABLE (100) sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_sample_system_10_repeatable.yml000066400000000000000000000022321503426445100303520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7da96fb9cdc292e207aaf5fb2ca24010332d4aae5187fb19abb6e55dc4b7ed97 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo sample_expression: - keyword: TABLESAMPLE - keyword: SYSTEM - bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - keyword: REPEATABLE - bracketed: start_bracket: ( numeric_literal: '100' end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_simple_a.sql000066400000000000000000000000111503426445100245610ustar00rootroot00000000000000select 1 sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_simple_a.yml000066400000000000000000000010301503426445100245650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2d5b2a76d63773e6faa9f682b2b616e05a1cead7f3ae1232e86cbcf87e7c1ce1 file: statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_simple_b.sql000066400000000000000000000000231503426445100245650ustar00rootroot00000000000000select * from blah sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_simple_b.yml000066400000000000000000000014321503426445100245740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 322fb1e6da9414c65b760e1c0d87ea0b579ba73cdbca3365afa79b1eba267475 file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: blah sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_simple_c.sql000066400000000000000000000000271503426445100245720ustar00rootroot00000000000000select * from foo, bar sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_simple_c.yml000066400000000000000000000017131503426445100245770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4173e890171054d33cb16b159a27866bc73734bedeea1605b2023aab0ce13fea file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: - keyword: from - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_simple_d.sql000066400000000000000000000000401503426445100245660ustar00rootroot00000000000000 select 12 -- ends with comment sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_simple_d.yml000066400000000000000000000010311503426445100245710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b45317c5e006ec28acb8a50d078c16d3036b90f0e8c266793d85ee615368f21e file: statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '12' sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_simple_e.sql000066400000000000000000000001361503426445100245750ustar00rootroot00000000000000SELECT my_var::date as casted_variable, 123::bigint as another_casted_number FROM boo sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_simple_e.yml000066400000000000000000000026411503426445100246020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 85cef2229ff9027b4bfbf253c06a2805bf7768f1e224d426b6c09a55d56f1e2f file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: cast_expression: column_reference: naked_identifier: my_var casting_operator: '::' data_type: data_type_identifier: date alias_expression: alias_operator: keyword: as naked_identifier: casted_variable - comma: ',' - select_clause_element: expression: cast_expression: numeric_literal: '123' casting_operator: '::' data_type: data_type_identifier: bigint alias_expression: alias_operator: keyword: as naked_identifier: another_casted_number from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: boo sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_simple_f.sql000066400000000000000000000001401503426445100245710ustar00rootroot00000000000000-- test some more advanced constructs SELECT a, b FROM boo GROUP BY 1 ORDER BY b, 1 LIMIT 5 sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_simple_f.yml000066400000000000000000000022721503426445100246030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: dfaa42a3cdec07d3ade8545214ad52704e0ff48d62b8ae1d85f48dece573cc9b file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: boo groupby_clause: - keyword: GROUP - keyword: BY - numeric_literal: '1' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: b - comma: ',' - numeric_literal: '1' limit_clause: keyword: LIMIT numeric_literal: '5' sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_simple_g.sql000066400000000000000000000001251503426445100245750ustar00rootroot00000000000000-- Having Clause SELECT id FROM test WHERE id >= 4 GROUP BY id HAVING id < 5 sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_simple_g.yml000066400000000000000000000025501503426445100246030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 046e4b32715f69c01155d2d5909f6322d3ed4ec353ef7cff62f341c194733540 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test where_clause: keyword: WHERE expression: column_reference: naked_identifier: id comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' numeric_literal: '4' groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: id having_clause: keyword: HAVING expression: column_reference: naked_identifier: id comparison_operator: raw_comparison_operator: < numeric_literal: '5' sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_simple_h.sql000066400000000000000000000002211503426445100245730ustar00rootroot00000000000000-- test window functions in functions with casting SELECT DATEADD(DAY, ROW_NUMBER() OVER (ORDER BY DateCD ASC), '2014-01-01') AS dt FROM boo sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_simple_h.yml000066400000000000000000000037121503426445100246050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: edbb120691c09f820c566b1d76cdb31c7f7877987824509c9d7585fbac87f829 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: DATEADD function_contents: bracketed: - start_bracket: ( - date_part: DAY - comma: ',' - expression: function: function_name: function_name_identifier: ROW_NUMBER function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: DateCD - keyword: ASC end_bracket: ) - comma: ',' - expression: quoted_literal: "'2014-01-01'" - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: dt from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: boo sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_simple_i.sql000066400000000000000000000006751503426445100246110ustar00rootroot00000000000000-- test aliasing SELECT raw_column, raw_sch.raw_col, simple_explicit as aliased_column_1, simple_implicit aliased_column_2, an_unaliased + calculation, -- We know that the following one doesn't parse... -- an_implicitly + aliased calculation, an_explicitly - aliased as calculation, 'an unalised string', 123.6, -786 as aliased_column3 FROM unaliased JOIN aliased_1 as foo JOIN aliased_2 bar USING(b) sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_simple_i.yml000066400000000000000000000062341503426445100246100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0744957d16eb3bda89b7216791006755fcdbd5d2e738b3d24c1f41ad0e6de1c3 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: raw_column - comma: ',' - select_clause_element: column_reference: - naked_identifier: raw_sch - dot: . - naked_identifier: raw_col - comma: ',' - select_clause_element: column_reference: naked_identifier: simple_explicit alias_expression: alias_operator: keyword: as naked_identifier: aliased_column_1 - comma: ',' - select_clause_element: column_reference: naked_identifier: simple_implicit alias_expression: naked_identifier: aliased_column_2 - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: an_unaliased - binary_operator: + - column_reference: naked_identifier: calculation - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: an_explicitly - binary_operator: '-' - column_reference: naked_identifier: aliased alias_expression: alias_operator: keyword: as naked_identifier: calculation - comma: ',' - select_clause_element: quoted_literal: "'an unalised string'" - comma: ',' - select_clause_element: numeric_literal: '123.6' - comma: ',' - select_clause_element: numeric_literal: sign_indicator: '-' numeric_literal: '786' alias_expression: alias_operator: keyword: as naked_identifier: aliased_column3 from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: unaliased - join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: aliased_1 alias_expression: alias_operator: keyword: as naked_identifier: foo - join_clause: - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: aliased_2 alias_expression: naked_identifier: bar - keyword: USING - bracketed: start_bracket: ( naked_identifier: b end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_simple_j.sql000066400000000000000000000002721503426445100246030ustar00rootroot00000000000000-- test parsing of cross join and outer join SELECT count_correctly_substituted FROM correctly_substituted CROSS JOIN needs_substitution LEFT OUTER JOIN some_other_table sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_simple_j.yml000066400000000000000000000024231503426445100246050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cda21556c8c2d93b02b3817f5e0eb3616d69740b706d026b8f7f6fb2a4477da5 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: count_correctly_substituted from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: correctly_substituted - join_clause: - keyword: CROSS - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: needs_substitution - join_clause: - keyword: LEFT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: some_other_table sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_t.sql000066400000000000000000000004301503426445100232400ustar00rootroot00000000000000SELECT * FROM TABLE_1 FULL OUTER JOIN -- comment1 ( SELECT * FROM Table_B WHERE COL_2 = 'B' UNION ALL SELECT * FROM TABLE_C WHERE COL_1 = 0 ) ON TABLE_1.A = TABLE_2.A sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_t.yml000066400000000000000000000070171503426445100232520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e7e2a19499cb4875ea5e86b4b9a3ef52564d1cb0fe3ee88e4da65c8f29287ec0 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: TABLE_1 join_clause: - keyword: FULL - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: bracketed: start_bracket: ( set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Table_B where_clause: keyword: WHERE expression: column_reference: naked_identifier: COL_2 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'B'" - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: TABLE_C where_clause: keyword: WHERE expression: column_reference: naked_identifier: COL_1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' end_bracket: ) - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: TABLE_1 - dot: . - naked_identifier: A - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: TABLE_2 - dot: . - naked_identifier: A sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_table_named_group.sql000066400000000000000000000000531503426445100264450ustar00rootroot00000000000000select 1 from group; select 1 from groups; sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_table_named_group.yml000066400000000000000000000021571503426445100264560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a290676e6fee30d9f4fb94419484ce3a2c078833f421d3db14f26e776593a5d4 file: - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: group - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: groups - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_trailing_comma_column_list.sql000066400000000000000000000000431503426445100303720ustar00rootroot00000000000000SELECT user_id, FROM table sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_trailing_comma_column_list.yml000066400000000000000000000014301503426445100303750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 000c55713505ad198a4c74a474808e536cdc1ae9010c0ba276ac0a524f82d63c file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: user_id comma: ',' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_true_and_not_false.sql000066400000000000000000000002361503426445100266340ustar00rootroot00000000000000-- https://github.com/sqlfluff/sqlfluff/issues/874 SELECT TRUE AND NOT FALSE; SELECT TRUE; SELECT TRUE AND FALSE; SELECT NOT TRUE; SELECT NOT TRUE AND FALSE; sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_true_and_not_false.yml000066400000000000000000000031361503426445100266400ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1eab200cd987526c057e1c1be42cf2bfdba7609dadf9e6b155fbd91b01860942 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - boolean_literal: 'TRUE' - binary_operator: AND - keyword: NOT - boolean_literal: 'FALSE' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: boolean_literal: 'TRUE' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - boolean_literal: 'TRUE' - binary_operator: AND - boolean_literal: 'FALSE' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: keyword: NOT boolean_literal: 'TRUE' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - keyword: NOT - boolean_literal: 'TRUE' - binary_operator: AND - boolean_literal: 'FALSE' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_u.sql000066400000000000000000000000661503426445100232460ustar00rootroot00000000000000select substring(a from 'abc') as b from my_table sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_u.yml000066400000000000000000000023351503426445100232510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2bc57e72cc197235fe46dc68d914169c33d370f1396bc20ee9900bd691954dae file: statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: substring function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: a - keyword: from - expression: quoted_literal: "'abc'" - end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: b from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_union.sql000066400000000000000000000000341503426445100241250ustar00rootroot00000000000000SELECT 0 UNION SELECT 1 sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_union.yml000066400000000000000000000013541503426445100241350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 763e7cd57e8af9de1c189ad32fe63f08dae1344a36e08ba4166fc9b61b549427 file: statement: set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '0' - set_operator: keyword: UNION - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_union_all.sql000066400000000000000000000000401503426445100247520ustar00rootroot00000000000000SELECT 0 UNION ALL SELECT 1 sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_union_all.yml000066400000000000000000000014011503426445100247560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d8c79f2b1ea2ff055e8214c02fedae7d34c5d1a726a51fdfd37a7a571cc182d4 file: statement: set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '0' - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_union_bracketed.sql000066400000000000000000000003011503426445100261260ustar00rootroot00000000000000(SELECT 0) UNION (SELECT 1); ((SELECT 0)) UNION ((SELECT 1)); ( SELECT * FROM tbl1 EXCEPT SELECT * FROM tbl2 ) UNION ( SELECT * FROM tbl2 EXCEPT SELECT * FROM tbl1 ); sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_union_bracketed.yml000066400000000000000000000074541503426445100261500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e29da8ed6a23db5e70371eb566600c1025a510eed0833af74284d5ddafe41aa9 file: - statement: set_expression: - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '0' end_bracket: ) - set_operator: keyword: UNION - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: set_expression: - bracketed: start_bracket: ( bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '0' end_bracket: ) end_bracket: ) - set_operator: keyword: UNION - bracketed: start_bracket: ( bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: set_expression: - bracketed: start_bracket: ( set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - set_operator: keyword: EXCEPT - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl2 end_bracket: ) - set_operator: keyword: UNION - bracketed: start_bracket: ( set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl2 - set_operator: keyword: EXCEPT - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_union_distinct.sql000066400000000000000000000000451503426445100260300ustar00rootroot00000000000000SELECT 0 UNION DISTINCT SELECT 1 sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_union_distinct.yml000066400000000000000000000014061503426445100260340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 795fef86836ffa436be22add911aa3cbeeb458ef7b309514ce28639f926f0ef0 file: statement: set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '0' - set_operator: - keyword: UNION - keyword: DISTINCT - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_v.sql000066400000000000000000000002101503426445100232360ustar00rootroot00000000000000-- Test Nested WITH WITH counter AS ( WITH ladder AS ( SELECT 1 ) SELECT * FROM ladder ORDER BY 1 ) SELECT * FROM counter sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_v.yml000066400000000000000000000040621503426445100232510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 80d0bbf478f03f9f206c631fc99854a4f644a68936073e9e5744ecee82394cec file: statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: counter keyword: AS bracketed: start_bracket: ( with_compound_statement: keyword: WITH common_table_expression: naked_identifier: ladder keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: ladder orderby_clause: - keyword: ORDER - keyword: BY - numeric_literal: '1' end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: counter sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_where_in_unnest.sql000066400000000000000000000000711503426445100261720ustar00rootroot00000000000000SELECT user_id FROM t WHERE 1 IN UNNEST(t.c) sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_where_in_unnest.yml000066400000000000000000000023661503426445100262050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b3c0baaaf29082228cf7043d04a729b396064ea9ac1720b3c6e102fe1662d0e8 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: user_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t where_clause: keyword: WHERE expression: numeric_literal: '1' keyword: IN function: function_name: function_name_identifier: UNNEST function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: t - dot: . - naked_identifier: c end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_with_a.sql000066400000000000000000000000631503426445100242520ustar00rootroot00000000000000WITH cte as (select a from tbla) select a from cte sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_with_a.yml000066400000000000000000000026251503426445100242620ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1298ca21b1fa3bc9eedfd815d67ba202ec5342bbc767d995aac381df569fb4a9 file: statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: cte keyword: as bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbla end_bracket: ) select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: cte sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_with_b.sql000066400000000000000000000000771503426445100242600ustar00rootroot00000000000000WITH blah AS (select x,y,z FROM foo) select z, y, x from blah; sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_with_b.yml000066400000000000000000000036501503426445100242620ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d30f0d1b4b6f309151e12d5aa38d0a47c9cd50518fa65e4f4634e17c573ad731 file: statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: blah keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: x - comma: ',' - select_clause_element: column_reference: naked_identifier: y - comma: ',' - select_clause_element: column_reference: naked_identifier: z from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo end_bracket: ) select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: z - comma: ',' - select_clause_element: column_reference: naked_identifier: y - comma: ',' - select_clause_element: column_reference: naked_identifier: x from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: blah statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_with_brackets.sql000066400000000000000000000001721503426445100256310ustar00rootroot00000000000000select * from (my_table); select * from (my_table tt); select * from ((my_table tt)); select * from (((my_table tt))); sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_with_brackets.yml000066400000000000000000000056751503426445100256500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: faeb9fee63d6841e1780c14f91fccd944590d1b3cdcec3c24db9d80e2e81e069 file: - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: table_reference: naked_identifier: my_table end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: table_reference: naked_identifier: my_table alias_expression: naked_identifier: tt end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: bracketed: start_bracket: ( from_expression_element: bracketed: start_bracket: ( table_expression: table_reference: naked_identifier: my_table alias_expression: naked_identifier: tt end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: bracketed: start_bracket: ( bracketed: start_bracket: ( from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: table_reference: naked_identifier: my_table alias_expression: naked_identifier: tt end_bracket: ) end_bracket: ) end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_with_limit_and_offset.sql000066400000000000000000000000511503426445100273350ustar00rootroot00000000000000SELECT * FROM counter LIMIT 10 OFFSET 10 sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_with_limit_and_offset.yml000066400000000000000000000016341503426445100273470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e87a966441c7e2091e72dcbfec0b7999c9a79b610855befa30fd945e874126bd file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: counter limit_clause: - keyword: LIMIT - numeric_literal: '10' - keyword: OFFSET - numeric_literal: '10' sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_with_offset_limit.sql000066400000000000000000000000431503426445100265140ustar00rootroot00000000000000SELECT * FROM counter LIMIT 10, 10 sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_with_offset_limit.yml000066400000000000000000000016271503426445100265270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bc4f9329c02c258f49e16d548f61579b65ad37eb64b34da9b1ca062aef44b5ff file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: counter limit_clause: - keyword: LIMIT - numeric_literal: '10' - comma: ',' - numeric_literal: '10' sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_with_recursive.sql000066400000000000000000000001411503426445100260360ustar00rootroot00000000000000WITH RECURSIVE cte(a) AS (SELECT 1 UNION ALL SELECT a+1 FROM cte WHERE a < 5) SELECT a FROM cte; sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_with_recursive.yml000066400000000000000000000044551503426445100260540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f38848372150f3c627659cbd7c834658cfb3b43acd04f3bde9143764e21d5944 file: statement: with_compound_statement: - keyword: WITH - keyword: RECURSIVE - common_table_expression: naked_identifier: cte cte_column_list: bracketed: start_bracket: ( identifier_list: naked_identifier: a end_bracket: ) keyword: AS bracketed: start_bracket: ( set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: keyword: SELECT select_clause_element: expression: column_reference: naked_identifier: a binary_operator: + numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: cte where_clause: keyword: WHERE expression: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: < numeric_literal: '5' end_bracket: ) - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: cte statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_with_simple_limit.sql000066400000000000000000000000371503426445100265220ustar00rootroot00000000000000SELECT * FROM counter LIMIT 10 sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_with_simple_limit.yml000066400000000000000000000015461503426445100265320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a5003ef6c80948fdbb87c1249b0f4265d93037606edf5722d991576dcd822075 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: counter limit_clause: keyword: LIMIT numeric_literal: '10' sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_with_where_clause_functions.sql000066400000000000000000000001441503426445100305700ustar00rootroot00000000000000select t.column1 from sch.table1 as t where t.b_year in (year(getdate()) , year(getdate()) - 1); sqlfluff-3.4.2/test/fixtures/dialects/ansi/select_with_where_clause_functions.yml000066400000000000000000000046631503426445100306040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 54c22dba7dfd692e748f7cef7b0bd617cfb816401654e0f7e277ec14b7f73031 file: statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: column1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sch - dot: . - naked_identifier: table1 alias_expression: alias_operator: keyword: as naked_identifier: t where_clause: keyword: where expression: column_reference: - naked_identifier: t - dot: . - naked_identifier: b_year keyword: in bracketed: - start_bracket: ( - function: function_name: function_name_identifier: year function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: getdate function_contents: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) - comma: ',' - function: function_name: function_name_identifier: year function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: getdate function_contents: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) - binary_operator: '-' - numeric_literal: '1' - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/semicolon_delimiters.sql000066400000000000000000000002051503426445100256470ustar00rootroot00000000000000-- It's possible to have multiple semicolons between statements. SELECT foo FROM bar;; SELECT foo FROM bar; ; ; SELECT foo FROM bar; sqlfluff-3.4.2/test/fixtures/dialects/ansi/semicolon_delimiters.yml000066400000000000000000000032051503426445100256540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0726a750be417f4364e1de4f4378a1d0c23c0ab9b1c6c524bc6d83f5dafd80b8 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: foo from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar - statement_terminator: ; - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: foo from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar - statement_terminator: ; - statement_terminator: ; - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: foo from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/set_order_by.sql000066400000000000000000000001441503426445100241200ustar00rootroot00000000000000-- https://github.com/sqlfluff/sqlfluff/issues/852 SELECT 1 AS a UNION ALL SELECT 1 AS a ORDER BY a sqlfluff-3.4.2/test/fixtures/dialects/ansi/set_order_by.yml000066400000000000000000000021561503426445100241270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 97b5054a52c1e8871b0f0567f7daddeddd3f3dabcd6eb918d5792f7550b0e3d0 file: statement: set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: AS naked_identifier: a - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: AS naked_identifier: a - orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: a sqlfluff-3.4.2/test/fixtures/dialects/ansi/set_order_by_complex.sql000066400000000000000000000003331503426445100256470ustar00rootroot00000000000000-- https://github.com/sqlfluff/sqlfluff/issues/852 -- ORDER BY and LIMIT are allowed when bracketed. Otherwise not. (SELECT * FROM a ORDER BY 1 LIMIT 1) UNION ALL (SELECT * FROM b ORDER BY 1 LIMIT 1) ORDER BY 1 LIMIT 1 sqlfluff-3.4.2/test/fixtures/dialects/ansi/set_order_by_complex.yml000066400000000000000000000040071503426445100256530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6f7aadd671fed70160b3eda465490c09c32a77fe77145b3a8bd37bcfbf532cd0 file: statement: set_expression: - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: a orderby_clause: - keyword: ORDER - keyword: BY - numeric_literal: '1' limit_clause: keyword: LIMIT numeric_literal: '1' end_bracket: ) - set_operator: - keyword: UNION - keyword: ALL - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: b orderby_clause: - keyword: ORDER - keyword: BY - numeric_literal: '1' limit_clause: keyword: LIMIT numeric_literal: '1' end_bracket: ) - orderby_clause: - keyword: ORDER - keyword: BY - numeric_literal: '1' - limit_clause: keyword: LIMIT numeric_literal: '1' sqlfluff-3.4.2/test/fixtures/dialects/ansi/set_schema_a.sql000066400000000000000000000000251503426445100240510ustar00rootroot00000000000000set schema my_schema sqlfluff-3.4.2/test/fixtures/dialects/ansi/set_schema_a.yml000066400000000000000000000010261503426445100240550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7f948d86ae8ddb413b44c667a3dee6096cf7db4569f3ab72d981b230bf87d3a2 file: statement: set_schema_statement: - keyword: set - keyword: schema - schema_reference: naked_identifier: my_schema sqlfluff-3.4.2/test/fixtures/dialects/ansi/shorthand_cast.sql000066400000000000000000000001041503426445100244400ustar00rootroot00000000000000select '1' :: INT as id1, '2'::int as id2 from table_a sqlfluff-3.4.2/test/fixtures/dialects/ansi/shorthand_cast.yml000066400000000000000000000025351503426445100244540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 471619df1b1745b7e99d49e3f28850a3c5258b4b7b928b43b0e80610ac8b1406 file: statement: select_statement: select_clause: - keyword: select - select_clause_element: expression: cast_expression: quoted_literal: "'1'" casting_operator: '::' data_type: data_type_identifier: INT alias_expression: alias_operator: keyword: as naked_identifier: id1 - comma: ',' - select_clause_element: expression: cast_expression: quoted_literal: "'2'" casting_operator: '::' data_type: data_type_identifier: int alias_expression: alias_operator: keyword: as naked_identifier: id2 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_a sqlfluff-3.4.2/test/fixtures/dialects/ansi/table_expression.sql000066400000000000000000000003071503426445100250070ustar00rootroot00000000000000SELECT y AS woy FROM UNNEST(GENERATE_ARRAY(1, 53)) AS y; SELECT id, name FROM UNNEST([1, 2, 3]) id WITH OFFSET pos1, UNNEST(['a', 'b', 'c']) name WITH OFFSET pos2 WHERE pos1 = pos2; sqlfluff-3.4.2/test/fixtures/dialects/ansi/table_expression.yml000066400000000000000000000101231503426445100250060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5f31201d2f2ff1bcb930d0cfe0ede2433174a1cfef691939d3eb27877d30dd81 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: y alias_expression: alias_operator: keyword: AS naked_identifier: woy from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: UNNEST function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: GENERATE_ARRAY function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '53' - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: y - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: - keyword: FROM - from_expression: from_expression_element: - table_expression: function: function_name: function_name_identifier: UNNEST function_contents: bracketed: start_bracket: ( expression: array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - end_square_bracket: ']' end_bracket: ) - alias_expression: naked_identifier: id - keyword: WITH - keyword: OFFSET - alias_expression: naked_identifier: pos1 - comma: ',' - from_expression: from_expression_element: - table_expression: function: function_name: function_name_identifier: UNNEST function_contents: bracketed: start_bracket: ( expression: array_literal: - start_square_bracket: '[' - quoted_literal: "'a'" - comma: ',' - quoted_literal: "'b'" - comma: ',' - quoted_literal: "'c'" - end_square_bracket: ']' end_bracket: ) - alias_expression: naked_identifier: name - keyword: WITH - keyword: OFFSET - alias_expression: naked_identifier: pos2 where_clause: keyword: WHERE expression: - column_reference: naked_identifier: pos1 - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: pos2 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/transactions.sql000066400000000000000000000001121503426445100241430ustar00rootroot00000000000000BEGIN TRANSACTION; UPDATE tbl SET foo = 1 WHERE bar = 2; END TRANSACTION; sqlfluff-3.4.2/test/fixtures/dialects/ansi/transactions.yml000066400000000000000000000022751503426445100241610ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7ae293181f9a25549b65004c3536b6f0101395df493dfb722b17f9bccf0a3480 file: - statement: transaction_statement: - keyword: BEGIN - keyword: TRANSACTION - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: tbl set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: foo comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' where_clause: keyword: WHERE expression: column_reference: naked_identifier: bar comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - statement_terminator: ; - statement: transaction_statement: - keyword: END - keyword: TRANSACTION - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/trim_functions.sql000066400000000000000000000005261503426445100245070ustar00rootroot00000000000000SELECT trim(' SparkSQL '); SELECT trim(BOTH FROM ' SparkSQL '); SELECT trim(LEADING FROM ' SparkSQL '); SELECT trim(TRAILING FROM ' SparkSQL '); SELECT trim('SL' FROM 'SSparkSQLS'); SELECT trim(BOTH 'SL' FROM 'SSparkSQLS'); SELECT trim(LEADING 'SL' FROM 'SSparkSQLS'); SELECT trim(TRAILING 'SL' FROM 'SSparkSQLS'); sqlfluff-3.4.2/test/fixtures/dialects/ansi/trim_functions.yml000066400000000000000000000106261503426445100245130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6b45e2b9722a62bc1e9a593c90cc04d034b2c166295a26623696ab39016230f4 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: trim function_contents: bracketed: start_bracket: ( expression: quoted_literal: "' SparkSQL '" end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: trim function_contents: bracketed: - start_bracket: ( - keyword: BOTH - keyword: FROM - expression: quoted_literal: "' SparkSQL '" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: trim function_contents: bracketed: - start_bracket: ( - keyword: LEADING - keyword: FROM - expression: quoted_literal: "' SparkSQL '" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: trim function_contents: bracketed: - start_bracket: ( - keyword: TRAILING - keyword: FROM - expression: quoted_literal: "' SparkSQL '" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: trim function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'SL'" - keyword: FROM - expression: quoted_literal: "'SSparkSQLS'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: trim function_contents: bracketed: - start_bracket: ( - keyword: BOTH - expression: quoted_literal: "'SL'" - keyword: FROM - expression: quoted_literal: "'SSparkSQLS'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: trim function_contents: bracketed: - start_bracket: ( - keyword: LEADING - expression: quoted_literal: "'SL'" - keyword: FROM - expression: quoted_literal: "'SSparkSQLS'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: trim function_contents: bracketed: - start_bracket: ( - keyword: TRAILING - expression: quoted_literal: "'SL'" - keyword: FROM - expression: quoted_literal: "'SSparkSQLS'" - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/truncate_a.sql000066400000000000000000000000131503426445100235600ustar00rootroot00000000000000truncate a sqlfluff-3.4.2/test/fixtures/dialects/ansi/truncate_a.yml000066400000000000000000000007661503426445100236010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9d4f2f7b0b94694925f4ed333c4b00f0cb6d50b58ccbd037a9bbfb8657ba833e file: statement: truncate_table: keyword: truncate table_reference: naked_identifier: a sqlfluff-3.4.2/test/fixtures/dialects/ansi/truncate_table_a.sql000066400000000000000000000000211503426445100247260ustar00rootroot00000000000000truncate table a sqlfluff-3.4.2/test/fixtures/dialects/ansi/truncate_table_a.yml000066400000000000000000000010131503426445100247320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1b1d7fc327f208104f290374b0813024d71f9d97f340558a45007ed81a0c6eef file: statement: truncate_table: - keyword: truncate - keyword: table - table_reference: naked_identifier: a sqlfluff-3.4.2/test/fixtures/dialects/ansi/unaliased_using_subquery.sql000066400000000000000000000001341503426445100265500ustar00rootroot00000000000000SELECT * FROM A_TABLE INNER JOIN ( SELECT margin FROM B_TABLE ) USING (SOME_COLUMN) sqlfluff-3.4.2/test/fixtures/dialects/ansi/unaliased_using_subquery.yml000066400000000000000000000033141503426445100265550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2475b577af1c428903288f5ab85677960322310c8fd1b7a4e48734431c703e68 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: A_TABLE join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: margin from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: B_TABLE end_bracket: ) - keyword: USING - bracketed: start_bracket: ( naked_identifier: SOME_COLUMN end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/ansi/update.sql000066400000000000000000000001041503426445100227160ustar00rootroot00000000000000UPDATE table_name SET column1 = value1, column2 = value2 WHERE a=1; sqlfluff-3.4.2/test/fixtures/dialects/ansi/update.yml000066400000000000000000000023701503426445100227270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 97f0f9ca17728f0b84dace4787d5c607fdc080b9e8f0bb2214229bbf290140d3 file: statement: update_statement: keyword: UPDATE table_reference: naked_identifier: table_name set_clause_list: - keyword: SET - set_clause: - column_reference: naked_identifier: column1 - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: value1 - comma: ',' - set_clause: - column_reference: naked_identifier: column2 - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: value2 where_clause: keyword: WHERE expression: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/update_set_case.sql000066400000000000000000000002321503426445100245660ustar00rootroot00000000000000UPDATE table1 SET a = CASE WHEN t2.column = 'T' THEN TRUE WHEN t2.column = 'F' THEN FALSE ELSE NULL END FROM table2 t2; sqlfluff-3.4.2/test/fixtures/dialects/ansi/update_set_case.yml000066400000000000000000000041501503426445100245730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9a80463bd722a8feccaa88d702e5fbe88eb34e922b5379831463cc2b3a8f3f97 file: statement: update_statement: keyword: UPDATE table_reference: naked_identifier: table1 set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: '=' expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: column comparison_operator: raw_comparison_operator: '=' quoted_literal: "'T'" - keyword: THEN - expression: boolean_literal: 'TRUE' - when_clause: - keyword: WHEN - expression: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: column comparison_operator: raw_comparison_operator: '=' quoted_literal: "'F'" - keyword: THEN - expression: boolean_literal: 'FALSE' - else_clause: keyword: ELSE expression: null_literal: 'NULL' - keyword: END from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table2 alias_expression: naked_identifier: t2 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/update_with_from_clause.sql000066400000000000000000000001731503426445100263360ustar00rootroot00000000000000UPDATE my_table SET my_table.days=other_table.days FROM other_table WHERE my_table.po_number=other_table.po_number sqlfluff-3.4.2/test/fixtures/dialects/ansi/update_with_from_clause.yml000066400000000000000000000027201503426445100263400ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c5d4563686f1cda32dd215be7a532e24aa7f86300c59bf96319b1f4247a4419d file: statement: update_statement: keyword: UPDATE table_reference: naked_identifier: my_table set_clause_list: keyword: SET set_clause: - column_reference: - naked_identifier: my_table - dot: . - naked_identifier: days - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: other_table - dot: . - naked_identifier: days from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: other_table where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: my_table - dot: . - naked_identifier: po_number - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: other_table - dot: . - naked_identifier: po_number sqlfluff-3.4.2/test/fixtures/dialects/ansi/update_with_table_alias.sql000066400000000000000000000000631503426445100262750ustar00rootroot00000000000000UPDATE my_table AS tttd SET tttd.days=ttu.days sqlfluff-3.4.2/test/fixtures/dialects/ansi/update_with_table_alias.yml000066400000000000000000000017231503426445100263030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a28a744c27fcae9cde0db4c8ff78962f7a1309abc3ffb74e3f9809d3448b0458 file: statement: update_statement: keyword: UPDATE table_reference: naked_identifier: my_table alias_expression: alias_operator: keyword: AS naked_identifier: tttd set_clause_list: keyword: SET set_clause: - column_reference: - naked_identifier: tttd - dot: . - naked_identifier: days - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: ttu - dot: . - naked_identifier: days sqlfluff-3.4.2/test/fixtures/dialects/ansi/where_fetch.sql000066400000000000000000000001061503426445100237210ustar00rootroot00000000000000SELECT * FROM Persons WHERE Country='France' FETCH FIRST 5 ROWS ONLY; sqlfluff-3.4.2/test/fixtures/dialects/ansi/where_fetch.yml000066400000000000000000000022601503426445100237260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6415723d932beb559b60f37c6bd6c879a1a6c36ba35910cfd69ba1b5cbdc206c file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Persons where_clause: keyword: WHERE expression: column_reference: naked_identifier: Country comparison_operator: raw_comparison_operator: '=' quoted_literal: "'France'" fetch_clause: - keyword: FETCH - keyword: FIRST - numeric_literal: '5' - keyword: ROWS - keyword: ONLY statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/with_compound_select_union.sql000066400000000000000000000001611503426445100270650ustar00rootroot00000000000000SELECT 0 UNION (WITH t AS (SELECT 1) SELECT * FROM t) ; (WITH t AS (SELECT 0) SELECT * FROM t) UNION SELECT 1 ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/with_compound_select_union.yml000066400000000000000000000053121503426445100270720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 121502f6e6f032202e6ab7c9050b3b3843e8a7d9699592bf6bcad501d2129760 file: - statement: set_expression: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '0' set_operator: keyword: UNION bracketed: start_bracket: ( with_compound_statement: keyword: WITH common_table_expression: naked_identifier: t keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t end_bracket: ) - statement_terminator: ; - statement: set_expression: bracketed: start_bracket: ( with_compound_statement: keyword: WITH common_table_expression: naked_identifier: t keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '0' end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t end_bracket: ) set_operator: keyword: UNION select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/with_insert_bracketed_with_statement.sql000066400000000000000000000004731503426445100311270ustar00rootroot00000000000000WITH mycte1 AS ( SELECT foo, bar, baz FROM mytable ) INSERT INTO table2 (column1, column2, column3) ( WITH mycte2 AS ( SELECT foo, bar, baz FROM mycte1 ) SELECT foo, bar, baz FROM mycte2 ); sqlfluff-3.4.2/test/fixtures/dialects/ansi/with_insert_bracketed_with_statement.yml000066400000000000000000000071751503426445100311370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 21d731739663b535b09c9ea09b64a89ee8c60cb29a01e8d7ac6221153d2a32ca file: statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: mycte1 keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: foo - comma: ',' - select_clause_element: column_reference: naked_identifier: bar - comma: ',' - select_clause_element: column_reference: naked_identifier: baz from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable end_bracket: ) insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: table2 - bracketed: - start_bracket: ( - column_reference: naked_identifier: column1 - comma: ',' - column_reference: naked_identifier: column2 - comma: ',' - column_reference: naked_identifier: column3 - end_bracket: ) - bracketed: start_bracket: ( with_compound_statement: keyword: WITH common_table_expression: naked_identifier: mycte2 keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: foo - comma: ',' - select_clause_element: column_reference: naked_identifier: bar - comma: ',' - select_clause_element: column_reference: naked_identifier: baz from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mycte1 end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: foo - comma: ',' - select_clause_element: column_reference: naked_identifier: bar - comma: ',' - select_clause_element: column_reference: naked_identifier: baz from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mycte2 end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/with_insert_statement_a.sql000066400000000000000000000006501503426445100263650ustar00rootroot00000000000000-- with insert statement with `AS` keyword WITH mycte AS ( SELECT foo, bar, baz FROM mytable1 ) INSERT INTO table2 (column1, column2, column3) VALUES ('value1', 'value2', 'value3'); -- with statement without `AS` keyword WITH mycte ( SELECT foo, bar, baz FROM mytable1 ) INSERT INTO table2 (column1, column2, column3) VALUES ('value1', 'value2', 'value3'); sqlfluff-3.4.2/test/fixtures/dialects/ansi/with_insert_statement_a.yml000066400000000000000000000072441503426445100263750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 27bc5d9a88b703fd7e8f1e135b3811f8a0a432c5f04cb24cbe3a2e5928d85008 file: - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: mycte keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: foo - comma: ',' - select_clause_element: column_reference: naked_identifier: bar - comma: ',' - select_clause_element: column_reference: naked_identifier: baz from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable1 end_bracket: ) insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: table2 - bracketed: - start_bracket: ( - column_reference: naked_identifier: column1 - comma: ',' - column_reference: naked_identifier: column2 - comma: ',' - column_reference: naked_identifier: column3 - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - quoted_literal: "'value1'" - comma: ',' - quoted_literal: "'value2'" - comma: ',' - quoted_literal: "'value3'" - end_bracket: ) - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: mycte bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: foo - comma: ',' - select_clause_element: column_reference: naked_identifier: bar - comma: ',' - select_clause_element: column_reference: naked_identifier: baz from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable1 end_bracket: ) insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: table2 - bracketed: - start_bracket: ( - column_reference: naked_identifier: column1 - comma: ',' - column_reference: naked_identifier: column2 - comma: ',' - column_reference: naked_identifier: column3 - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - quoted_literal: "'value1'" - comma: ',' - quoted_literal: "'value2'" - comma: ',' - quoted_literal: "'value3'" - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/with_insert_statement_b.sql000066400000000000000000000002621503426445100263650ustar00rootroot00000000000000WITH mycte AS ( SELECT foo, bar, baz FROM mytable1 ) INSERT INTO table2 (column1, column2, column3) SELECT foo, bar, baz FROM mycte; sqlfluff-3.4.2/test/fixtures/dialects/ansi/with_insert_statement_b.yml000066400000000000000000000046361503426445100264000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 33da5bd44e3638d1708630829459d10c3c04684fb9e15d802c75f8d186da1de6 file: statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: mycte keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: foo - comma: ',' - select_clause_element: column_reference: naked_identifier: bar - comma: ',' - select_clause_element: column_reference: naked_identifier: baz from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable1 end_bracket: ) insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: table2 - bracketed: - start_bracket: ( - column_reference: naked_identifier: column1 - comma: ',' - column_reference: naked_identifier: column2 - comma: ',' - column_reference: naked_identifier: column3 - end_bracket: ) - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: foo - comma: ',' - select_clause_element: column_reference: naked_identifier: bar - comma: ',' - select_clause_element: column_reference: naked_identifier: baz from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mycte statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/with_insert_with_statement.sql000066400000000000000000000004101503426445100271120ustar00rootroot00000000000000WITH mycte1 AS ( SELECT foo, bar, baz FROM mytable ) INSERT INTO table2 (column1, column2, column3) WITH mycte2 AS ( SELECT foo, bar, baz FROM mycte1 ) SELECT foo, bar, baz FROM mycte2; sqlfluff-3.4.2/test/fixtures/dialects/ansi/with_insert_with_statement.yml000066400000000000000000000067221503426445100271300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5c5586f9471e934aaae4230cd18a11da243eeb04fd581eeced85fe9e7061841e file: statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: mycte1 keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: foo - comma: ',' - select_clause_element: column_reference: naked_identifier: bar - comma: ',' - select_clause_element: column_reference: naked_identifier: baz from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable end_bracket: ) insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: table2 - bracketed: - start_bracket: ( - column_reference: naked_identifier: column1 - comma: ',' - column_reference: naked_identifier: column2 - comma: ',' - column_reference: naked_identifier: column3 - end_bracket: ) - with_compound_statement: keyword: WITH common_table_expression: naked_identifier: mycte2 keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: foo - comma: ',' - select_clause_element: column_reference: naked_identifier: bar - comma: ',' - select_clause_element: column_reference: naked_identifier: baz from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mycte1 end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: foo - comma: ',' - select_clause_element: column_reference: naked_identifier: bar - comma: ',' - select_clause_element: column_reference: naked_identifier: baz from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mycte2 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/with_nested_in_with_statement.sql000066400000000000000000000005031503426445100275610ustar00rootroot00000000000000( WITH mycte2 AS ( WITH mycte1 AS ( SELECT foo, bar, baz FROM mytable ) SELECT foo, bar, baz FROM mycte1 ) SELECT foo, bar, baz FROM mycte2 ); sqlfluff-3.4.2/test/fixtures/dialects/ansi/with_nested_in_with_statement.yml000066400000000000000000000064021503426445100275670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 949eef4c8f91534769e7a2f739a5e760e42f6fb6e22d66f63cc2b57cd75b3d29 file: statement: bracketed: start_bracket: ( with_compound_statement: keyword: WITH common_table_expression: naked_identifier: mycte2 keyword: AS bracketed: start_bracket: ( with_compound_statement: keyword: WITH common_table_expression: naked_identifier: mycte1 keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: foo - comma: ',' - select_clause_element: column_reference: naked_identifier: bar - comma: ',' - select_clause_element: column_reference: naked_identifier: baz from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: foo - comma: ',' - select_clause_element: column_reference: naked_identifier: bar - comma: ',' - select_clause_element: column_reference: naked_identifier: baz from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mycte1 end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: foo - comma: ',' - select_clause_element: column_reference: naked_identifier: bar - comma: ',' - select_clause_element: column_reference: naked_identifier: baz from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mycte2 end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/with_no_schema_binding.sql000066400000000000000000000001241503426445100261170ustar00rootroot00000000000000create view my_schema.my_view as select * from schema.table with no schema binding; sqlfluff-3.4.2/test/fixtures/dialects/ansi/with_no_schema_binding.yml000066400000000000000000000023321503426445100261240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7ea812fc14414f5738321bfb716f97ea4e58cdf87c32d5726d981a0bf1ebcc38 file: statement: create_view_statement: - keyword: create - keyword: view - table_reference: - naked_identifier: my_schema - dot: . - naked_identifier: my_view - keyword: as - select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: schema - dot: . - naked_identifier: table - with_no_schema_binding_clause: - keyword: with - keyword: 'no' - keyword: schema - keyword: binding statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/ansi/with_update_statement.sql000066400000000000000000000002141503426445100260370ustar00rootroot00000000000000WITH mycte AS ( SELECT foo, bar FROM mytable1 ) UPDATE sometable SET sometable.baz = mycte.bar FROM mycte; sqlfluff-3.4.2/test/fixtures/dialects/ansi/with_update_statement.yml000066400000000000000000000036261503426445100260530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e7bacd843605728db15379d32320886ef0a608c3fbfcae64d08a14fd592df72e file: statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: mycte keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: foo - comma: ',' - select_clause_element: column_reference: naked_identifier: bar from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable1 end_bracket: ) update_statement: keyword: UPDATE table_reference: naked_identifier: sometable set_clause_list: keyword: SET set_clause: - column_reference: - naked_identifier: sometable - dot: . - naked_identifier: baz - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: mycte - dot: . - naked_identifier: bar from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mycte statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/000077500000000000000000000000001503426445100212265ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/dialects/athena/.sqlfluff000066400000000000000000000000341503426445100230460ustar00rootroot00000000000000[sqlfluff] dialect = athena sqlfluff-3.4.2/test/fixtures/dialects/athena/alter_table.sql000066400000000000000000000000631503426445100242240ustar00rootroot00000000000000ALTER TABLE x DROP COLUMN y; ALTER TABLE x DROP y; sqlfluff-3.4.2/test/fixtures/dialects/athena/alter_table.yml000066400000000000000000000014741503426445100242350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 94a2396ae7ae37cab4cef6ee26d888c14d3aa9c1d9557453c09c8f04ef59b2ee file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - keyword: DROP - keyword: COLUMN - naked_identifier: y - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - parameter: DROP - naked_identifier: y - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/create_array_table.sql000066400000000000000000000001531503426445100255560ustar00rootroot00000000000000CREATE TABLE array_table (c1 array) LOCATION '...'; INSERT INTO array_table values(ARRAY[1,2,3]); sqlfluff-3.4.2/test/fixtures/dialects/athena/create_array_table.yml000066400000000000000000000033171503426445100255650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ee5ac7cd56307785349aeb5e1747f5769fc03aa8355f481ac382e9f8c18451e5 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: array_table - bracketed: start_bracket: ( column_definition: naked_identifier: c1 data_type: array_type: keyword: array array_type_schema: start_angle_bracket: < data_type: primitive_type: keyword: integer end_angle_bracket: '>' end_bracket: ) - keyword: LOCATION - quoted_literal: "'...'" - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: array_table - values_clause: keyword: values expression: bracketed: start_bracket: ( expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - end_square_bracket: ']' end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/create_database.sql000066400000000000000000000000351503426445100250340ustar00rootroot00000000000000create database my_database; sqlfluff-3.4.2/test/fixtures/dialects/athena/create_database.yml000066400000000000000000000010761503426445100250440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 92367f186eab8a420d5599e66571f75d5d5b92592afc9599a887b2be6bd5769a file: statement: create_database_statement: - keyword: create - keyword: database - database_reference: naked_identifier: my_database statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/create_database_if_not_exists.sql000066400000000000000000000000531503426445100277710ustar00rootroot00000000000000create database if not exists my_database; sqlfluff-3.4.2/test/fixtures/dialects/athena/create_database_if_not_exists.yml000066400000000000000000000011711503426445100277750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 48281e65f2141e81f9e67a84ce552e21c5c731039e29cdd35a38d8cf7e17de87 file: statement: create_database_statement: - keyword: create - keyword: database - keyword: if - keyword: not - keyword: exists - database_reference: naked_identifier: my_database statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/create_external_table.sql000066400000000000000000000023321503426445100262630ustar00rootroot00000000000000create external table my_database.my_table( field_1 string, field_2 int, field_3 float ) PARTITIONED BY (field_partition string) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' ESCAPED BY '\\' LINES TERMINATED BY '\n' LOCATION 's3://athena-examples-myregion/flight/csv/'; CREATE TABLE bucketed_table WITH ( bucketed_by = ARRAY[column_name], bucket_count = 30, format = 'PARQUET', external_location ='s3://DOC-EXAMPLE-BUCKET/tables/parquet_table/' ) AS SELECT * FROM table_name; CREATE EXTERNAL TABLE `tpch100.lineitem_parq_partitioned`( `l_orderkey` int, `l_partkey` int, `l_suppkey` int, `l_linenumber` int, `l_quantity` double, `l_extendedprice` double, `l_discount` double, `l_tax` double, `l_returnflag` string, `l_linestatus` string, `l_commitdate` string, `l_receiptdate` string, `l_shipinstruct` string, `l_comment` string) PARTITIONED BY ( `l_shipdate` string) ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat' LOCATION 's3:///lineitem/' sqlfluff-3.4.2/test/fixtures/dialects/athena/create_external_table.yml000066400000000000000000000152631503426445100262740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3ff2a7e34890b9b31fa6b988a66eb82060240c1259c3c6fcfe2d012fd7d579f3 file: - statement: create_table_statement: - keyword: create - keyword: external - keyword: table - table_reference: - naked_identifier: my_database - dot: . - naked_identifier: my_table - bracketed: - start_bracket: ( - column_definition: naked_identifier: field_1 data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: field_2 data_type: primitive_type: keyword: int - comma: ',' - column_definition: naked_identifier: field_3 data_type: primitive_type: keyword: float - end_bracket: ) - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( column_definition: naked_identifier: field_partition data_type: primitive_type: keyword: string end_bracket: ) - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: DELIMITED - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "','" - keyword: ESCAPED - keyword: BY - quoted_literal: "'\\\\'" - keyword: LINES - keyword: TERMINATED - keyword: BY - quoted_literal: "'\\n'" - keyword: LOCATION - quoted_literal: "'s3://athena-examples-myregion/flight/csv/'" - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: bucketed_table - keyword: WITH - bracketed: - start_bracket: ( - keyword: bucketed_by - comparison_operator: raw_comparison_operator: '=' - typed_array_literal: array_type: keyword: ARRAY array_literal: start_square_bracket: '[' column_reference: naked_identifier: column_name end_square_bracket: ']' - comma: ',' - keyword: bucket_count - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '30' - comma: ',' - keyword: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'PARQUET'" - comma: ',' - keyword: external_location - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'s3://DOC-EXAMPLE-BUCKET/tables/parquet_table/'" - end_bracket: ) - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: quoted_identifier: '`tpch100.lineitem_parq_partitioned`' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '`l_orderkey`' data_type: primitive_type: keyword: int - comma: ',' - column_definition: quoted_identifier: '`l_partkey`' data_type: primitive_type: keyword: int - comma: ',' - column_definition: quoted_identifier: '`l_suppkey`' data_type: primitive_type: keyword: int - comma: ',' - column_definition: quoted_identifier: '`l_linenumber`' data_type: primitive_type: keyword: int - comma: ',' - column_definition: quoted_identifier: '`l_quantity`' data_type: primitive_type: keyword: double - comma: ',' - column_definition: quoted_identifier: '`l_extendedprice`' data_type: primitive_type: keyword: double - comma: ',' - column_definition: quoted_identifier: '`l_discount`' data_type: primitive_type: keyword: double - comma: ',' - column_definition: quoted_identifier: '`l_tax`' data_type: primitive_type: keyword: double - comma: ',' - column_definition: quoted_identifier: '`l_returnflag`' data_type: primitive_type: keyword: string - comma: ',' - column_definition: quoted_identifier: '`l_linestatus`' data_type: primitive_type: keyword: string - comma: ',' - column_definition: quoted_identifier: '`l_commitdate`' data_type: primitive_type: keyword: string - comma: ',' - column_definition: quoted_identifier: '`l_receiptdate`' data_type: primitive_type: keyword: string - comma: ',' - column_definition: quoted_identifier: '`l_shipinstruct`' data_type: primitive_type: keyword: string - comma: ',' - column_definition: quoted_identifier: '`l_comment`' data_type: primitive_type: keyword: string - end_bracket: ) - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( column_definition: quoted_identifier: '`l_shipdate`' data_type: primitive_type: keyword: string end_bracket: ) - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: SERDE - quoted_literal: "'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe'" - keyword: STORED - keyword: AS - keyword: INPUTFORMAT - quoted_literal: "'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat'" - keyword: OUTPUTFORMAT - quoted_literal: "'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat'" - keyword: LOCATION - quoted_literal: "'s3:///lineitem/'" sqlfluff-3.4.2/test/fixtures/dialects/athena/create_external_table_input_format.sql000066400000000000000000000005661503426445100310610ustar00rootroot00000000000000CREATE EXTERNAL TABLE my_table( col_1 string, col_2 boolean, col_3 bigint, col_4 string, col_5 string ) PARTITIONED BY (field_partition string) ROW FORMAT SERDE 'some row format' STORED AS INPUTFORMAT 'some input format' OUTPUTFORMAT 'some output format' LOCATION 's3://athena-examples-myregion/some_data/' TBLPROPERTIES ('has_encrypted_data' = 'true'); sqlfluff-3.4.2/test/fixtures/dialects/athena/create_external_table_input_format.yml000066400000000000000000000044461503426445100310640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c0112e4b1374744bea18f7bc4cee2caf76d052a8e188a6b5a88d1f668a728e47 file: statement: create_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: naked_identifier: my_table - bracketed: - start_bracket: ( - column_definition: naked_identifier: col_1 data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: col_2 data_type: primitive_type: keyword: boolean - comma: ',' - column_definition: naked_identifier: col_3 data_type: primitive_type: keyword: bigint - comma: ',' - column_definition: naked_identifier: col_4 data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: col_5 data_type: primitive_type: keyword: string - end_bracket: ) - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( column_definition: naked_identifier: field_partition data_type: primitive_type: keyword: string end_bracket: ) - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: SERDE - quoted_literal: "'some row format'" - keyword: STORED - keyword: AS - keyword: INPUTFORMAT - quoted_literal: "'some input format'" - keyword: OUTPUTFORMAT - quoted_literal: "'some output format'" - keyword: LOCATION - quoted_literal: "'s3://athena-examples-myregion/some_data/'" - keyword: TBLPROPERTIES - bracketed: - start_bracket: ( - quoted_literal: "'has_encrypted_data'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'true'" - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/create_external_table_struct.sql000066400000000000000000000021701503426445100276670ustar00rootroot00000000000000create external table my_database.my_table( `date` string, campaignId string, campaignName string, deleted string, campaignStatus string, app struct, servingStatus string, servingStateReasons string, countriesOrRegions array, modificationTime string, totalBudget struct, dailyBudget struct, displayStatus string, supplySources array, adChannelType string, orgId string, billingEvent string, countryOrRegionServingStateReasons string, other boolean, impressions int, taps int, installs int, newDownloads int, redownloads int, latOnInstalls int, latOffInstalls int, ttr int, avgCPA struct, avgCPT struct, avgCPM struct, localSpend struct, conversionRate int ) PARTITIONED BY (field_partition string) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' ESCAPED BY '\\' LINES TERMINATED BY '\n' LOCATION 's3://athena-examples-myregion/flight/csv/'; sqlfluff-3.4.2/test/fixtures/dialects/athena/create_external_table_struct.yml000066400000000000000000000241421503426445100276740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c60781b1f9ce3b7db9a2144cbbc28c14fcbb7141fc0df394cd0d444fa50138c3 file: statement: create_table_statement: - keyword: create - keyword: external - keyword: table - table_reference: - naked_identifier: my_database - dot: . - naked_identifier: my_table - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '`date`' data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: campaignId data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: campaignName data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: deleted data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: campaignStatus data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: app data_type: struct_type: keyword: struct struct_type_schema: - start_angle_bracket: < - naked_identifier: appName - colon: ':' - data_type: primitive_type: keyword: string - comma: ',' - naked_identifier: adamId - colon: ':' - data_type: primitive_type: keyword: string - end_angle_bracket: '>' - comma: ',' - column_definition: naked_identifier: servingStatus data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: servingStateReasons data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: countriesOrRegions data_type: array_type: keyword: array array_type_schema: start_angle_bracket: < data_type: primitive_type: keyword: string end_angle_bracket: '>' - comma: ',' - column_definition: naked_identifier: modificationTime data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: totalBudget data_type: struct_type: keyword: struct struct_type_schema: - start_angle_bracket: < - naked_identifier: amount - colon: ':' - data_type: primitive_type: keyword: int - comma: ',' - naked_identifier: currency - colon: ':' - data_type: primitive_type: keyword: string - end_angle_bracket: '>' - comma: ',' - column_definition: naked_identifier: dailyBudget data_type: struct_type: keyword: struct struct_type_schema: - start_angle_bracket: < - naked_identifier: amount - colon: ':' - data_type: primitive_type: keyword: int - comma: ',' - naked_identifier: currency - colon: ':' - data_type: primitive_type: keyword: string - end_angle_bracket: '>' - comma: ',' - column_definition: naked_identifier: displayStatus data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: supplySources data_type: array_type: keyword: array array_type_schema: start_angle_bracket: < data_type: primitive_type: keyword: string end_angle_bracket: '>' - comma: ',' - column_definition: naked_identifier: adChannelType data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: orgId data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: billingEvent data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: countryOrRegionServingStateReasons data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: other data_type: primitive_type: keyword: boolean - comma: ',' - column_definition: naked_identifier: impressions data_type: primitive_type: keyword: int - comma: ',' - column_definition: naked_identifier: taps data_type: primitive_type: keyword: int - comma: ',' - column_definition: naked_identifier: installs data_type: primitive_type: keyword: int - comma: ',' - column_definition: naked_identifier: newDownloads data_type: primitive_type: keyword: int - comma: ',' - column_definition: naked_identifier: redownloads data_type: primitive_type: keyword: int - comma: ',' - column_definition: naked_identifier: latOnInstalls data_type: primitive_type: keyword: int - comma: ',' - column_definition: naked_identifier: latOffInstalls data_type: primitive_type: keyword: int - comma: ',' - column_definition: naked_identifier: ttr data_type: primitive_type: keyword: int - comma: ',' - column_definition: naked_identifier: avgCPA data_type: struct_type: keyword: struct struct_type_schema: - start_angle_bracket: < - naked_identifier: amount - colon: ':' - data_type: primitive_type: keyword: int - comma: ',' - naked_identifier: currency - colon: ':' - data_type: primitive_type: keyword: string - end_angle_bracket: '>' - comma: ',' - column_definition: naked_identifier: avgCPT data_type: struct_type: keyword: struct struct_type_schema: - start_angle_bracket: < - naked_identifier: amount - colon: ':' - data_type: primitive_type: keyword: int - comma: ',' - naked_identifier: currency - colon: ':' - data_type: primitive_type: keyword: string - end_angle_bracket: '>' - comma: ',' - column_definition: naked_identifier: avgCPM data_type: struct_type: keyword: struct struct_type_schema: - start_angle_bracket: < - naked_identifier: amount - colon: ':' - data_type: primitive_type: keyword: int - comma: ',' - naked_identifier: currency - colon: ':' - data_type: primitive_type: keyword: string - end_angle_bracket: '>' - comma: ',' - column_definition: naked_identifier: localSpend data_type: struct_type: keyword: struct struct_type_schema: - start_angle_bracket: < - naked_identifier: amount - colon: ':' - data_type: primitive_type: keyword: int - comma: ',' - naked_identifier: currency - colon: ':' - data_type: primitive_type: keyword: string - end_angle_bracket: '>' - comma: ',' - column_definition: naked_identifier: conversionRate data_type: primitive_type: keyword: int - end_bracket: ) - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( column_definition: naked_identifier: field_partition data_type: primitive_type: keyword: string end_bracket: ) - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: DELIMITED - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "','" - keyword: ESCAPED - keyword: BY - quoted_literal: "'\\\\'" - keyword: LINES - keyword: TERMINATED - keyword: BY - quoted_literal: "'\\n'" - keyword: LOCATION - quoted_literal: "'s3://athena-examples-myregion/flight/csv/'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/create_map_table.sql000066400000000000000000000002051503426445100252130ustar00rootroot00000000000000CREATE TABLE map_table(c1 map) LOCATION '...'; INSERT INTO map_table values(MAP(ARRAY['foo', 'bar'], ARRAY[1, 2])); sqlfluff-3.4.2/test/fixtures/dialects/athena/create_map_table.yml000066400000000000000000000050021503426445100252150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2858043276625507fcfe667d2a0d8cfd326a8c923b6ffc95e3756d2ea487b299 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: map_table - bracketed: start_bracket: ( column_definition: naked_identifier: c1 data_type: map_type: keyword: map map_type_schema: start_angle_bracket: < primitive_type: keyword: string comma: ',' data_type: primitive_type: keyword: integer end_angle_bracket: '>' end_bracket: ) - keyword: LOCATION - quoted_literal: "'...'" - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: map_table - values_clause: keyword: values expression: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: MAP function_contents: bracketed: - start_bracket: ( - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - quoted_literal: "'foo'" - comma: ',' - quoted_literal: "'bar'" - end_square_bracket: ']' - comma: ',' - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_square_bracket: ']' - end_bracket: ) end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/create_partitioned_table.sql000066400000000000000000000015321503426445100267640ustar00rootroot00000000000000CREATE table my_lineitem_parq_partitioned WITH (partitioned_by = ARRAY['l_shipdate']) AS SELECT l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_commitdate, l_receiptdate, l_shipinstruct, l_comment, l_shipdate FROM tpch100.lineitem_parq_partitioned WHERE cast(l_shipdate as timestamp) < DATE('1992-02-01'); CREATE TABLE db.ctas_iceberg WITH ( table_type = 'ICEBERG', format = 'PARQUET', location = 's3://my_athena_results/ctas_iceberg_parquet/', is_external = false, partitioning = ARRAY['month(dt)'], vacuum_min_snapshots_to_keep = 10, vacuum_max_snapshot_age_seconds = 259200 ) AS SELECT key1, name1, 'date' FROM db.table1; sqlfluff-3.4.2/test/fixtures/dialects/athena/create_partitioned_table.yml000066400000000000000000000145171503426445100267750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3498f71e1d7ea6840645cc414479dd2074b58164fe31a46fb3df6e9470d7c02d file: - statement: create_table_statement: - keyword: CREATE - keyword: table - table_reference: naked_identifier: my_lineitem_parq_partitioned - keyword: WITH - bracketed: start_bracket: ( keyword: partitioned_by comparison_operator: raw_comparison_operator: '=' typed_array_literal: array_type: keyword: ARRAY array_literal: start_square_bracket: '[' quoted_literal: "'l_shipdate'" end_square_bracket: ']' end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: l_orderkey - comma: ',' - select_clause_element: column_reference: naked_identifier: l_partkey - comma: ',' - select_clause_element: column_reference: naked_identifier: l_suppkey - comma: ',' - select_clause_element: column_reference: naked_identifier: l_linenumber - comma: ',' - select_clause_element: column_reference: naked_identifier: l_quantity - comma: ',' - select_clause_element: column_reference: naked_identifier: l_extendedprice - comma: ',' - select_clause_element: column_reference: naked_identifier: l_discount - comma: ',' - select_clause_element: column_reference: naked_identifier: l_tax - comma: ',' - select_clause_element: column_reference: naked_identifier: l_returnflag - comma: ',' - select_clause_element: column_reference: naked_identifier: l_linestatus - comma: ',' - select_clause_element: column_reference: naked_identifier: l_commitdate - comma: ',' - select_clause_element: column_reference: naked_identifier: l_receiptdate - comma: ',' - select_clause_element: column_reference: naked_identifier: l_shipinstruct - comma: ',' - select_clause_element: column_reference: naked_identifier: l_comment - comma: ',' - select_clause_element: column_reference: naked_identifier: l_shipdate from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: tpch100 - dot: . - naked_identifier: lineitem_parq_partitioned where_clause: keyword: WHERE expression: - function: function_name: function_name_identifier: cast function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: l_shipdate keyword: as data_type: primitive_type: keyword: timestamp end_bracket: ) - comparison_operator: raw_comparison_operator: < - function: function_name: function_name_identifier: DATE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'1992-02-01'" end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: db - dot: . - naked_identifier: ctas_iceberg - keyword: WITH - bracketed: - start_bracket: ( - keyword: table_type - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'ICEBERG'" - comma: ',' - keyword: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'PARQUET'" - comma: ',' - keyword: location - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'s3://my_athena_results/ctas_iceberg_parquet/'" - comma: ',' - keyword: is_external - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'false' - comma: ',' - keyword: partitioning - comparison_operator: raw_comparison_operator: '=' - typed_array_literal: array_type: keyword: ARRAY array_literal: start_square_bracket: '[' quoted_literal: "'month(dt)'" end_square_bracket: ']' - comma: ',' - keyword: vacuum_min_snapshots_to_keep - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '10' - comma: ',' - keyword: vacuum_max_snapshot_age_seconds - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '259200' - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: key1 - comma: ',' - select_clause_element: column_reference: naked_identifier: name1 - comma: ',' - select_clause_element: quoted_literal: "'date'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: db - dot: . - naked_identifier: table1 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/create_struct_table.sql000066400000000000000000000002611503426445100257640ustar00rootroot00000000000000CREATE TABLE struct_table(c1 struct) LOCATION '...'; INSERT INTO struct_table SELECT CAST(ROW('Bob', 38) AS ROW(name VARCHAR(10), age INTEGER)); sqlfluff-3.4.2/test/fixtures/dialects/athena/create_struct_table.yml000066400000000000000000000063331503426445100257740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0d582e98441f7d59557a013f0ba031af9560bf47be07face2254c4c83b747830 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: struct_table - bracketed: start_bracket: ( column_definition: naked_identifier: c1 data_type: struct_type: keyword: struct struct_type_schema: - start_angle_bracket: < - naked_identifier: name - colon: ':' - data_type: primitive_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - comma: ',' - naked_identifier: age - colon: ':' - data_type: primitive_type: keyword: integer - end_angle_bracket: '>' end_bracket: ) - keyword: LOCATION - quoted_literal: "'...'" - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: struct_table - select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ROW function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'Bob'" - comma: ',' - expression: numeric_literal: '38' - end_bracket: ) keyword: AS data_type: keyword: ROW bracketed: - start_bracket: ( - naked_identifier: name - data_type: primitive_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - comma: ',' - naked_identifier: age - data_type: primitive_type: keyword: INTEGER - end_bracket: ) end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/create_table_as_select.sql000066400000000000000000000003341503426445100264030ustar00rootroot00000000000000CREATE TABLE my_ctas WITH ( format='Parquet', external_location='s3://my-bucket/my-path-level-1/my-path-level-2', partitioned_by=array['load_date'] ) AS SELECT field_1, field_2, field_3 from my_table; sqlfluff-3.4.2/test/fixtures/dialects/athena/create_table_as_select.yml000066400000000000000000000036621503426445100264140ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 22fa674be23f5ccea3ac46cf23b246375654df63fa579095b1378e0b931438aa file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: my_ctas - keyword: WITH - bracketed: - start_bracket: ( - keyword: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'Parquet'" - comma: ',' - keyword: external_location - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'s3://my-bucket/my-path-level-1/my-path-level-2'" - comma: ',' - keyword: partitioned_by - comparison_operator: raw_comparison_operator: '=' - typed_array_literal: array_type: keyword: array array_literal: start_square_bracket: '[' quoted_literal: "'load_date'" end_square_bracket: ']' - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: field_1 - comma: ',' - select_clause_element: column_reference: naked_identifier: field_2 - comma: ',' - select_clause_element: column_reference: naked_identifier: field_3 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/create_table_iceberg.sql000066400000000000000000000010361503426445100260410ustar00rootroot00000000000000create table my_database.my_table( field_1 string, field_2 int, field_3 float ) PARTITIONED BY (field_1) LOCATION 's3://athena-examples-myregion/my_table/' TBLPROPERTIES ( 'table_type' = 'ICEBERG' ); -- Example from Athena Docs: -- https://docs.aws.amazon.com/athena/latest/ug/querying-iceberg-creating-tables.html CREATE TABLE iceberg_table (id bigint, data string, category string) PARTITIONED BY (category, bucket(16, id)) LOCATION 's3://amzn-s3-demo-bucket/your-folder/' TBLPROPERTIES ( 'table_type' = 'ICEBERG' ); sqlfluff-3.4.2/test/fixtures/dialects/athena/create_table_iceberg.yml000066400000000000000000000061571503426445100260540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 429e9192d92993c21efe9631e7184683efd4d061a3af75894201d4f0df4c0f45 file: - statement: create_table_statement: - keyword: create - keyword: table - table_reference: - naked_identifier: my_database - dot: . - naked_identifier: my_table - bracketed: - start_bracket: ( - column_definition: naked_identifier: field_1 data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: field_2 data_type: primitive_type: keyword: int - comma: ',' - column_definition: naked_identifier: field_3 data_type: primitive_type: keyword: float - end_bracket: ) - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( naked_identifier: field_1 end_bracket: ) - keyword: LOCATION - quoted_literal: "'s3://athena-examples-myregion/my_table/'" - keyword: TBLPROPERTIES - bracketed: - start_bracket: ( - quoted_literal: "'table_type'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'ICEBERG'" - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: iceberg_table - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: primitive_type: keyword: bigint - comma: ',' - column_definition: naked_identifier: data data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: category data_type: primitive_type: keyword: string - end_bracket: ) - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( naked_identifier: category comma: ',' function: function_name: function_name_identifier: bucket function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '16' - comma: ',' - expression: column_reference: naked_identifier: id - end_bracket: ) end_bracket: ) - keyword: LOCATION - quoted_literal: "'s3://amzn-s3-demo-bucket/your-folder/'" - keyword: TBLPROPERTIES - bracketed: - start_bracket: ( - quoted_literal: "'table_type'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'ICEBERG'" - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/create_view.sql000066400000000000000000000002731503426445100242460ustar00rootroot00000000000000CREATE VIEW test AS SELECT orderkey, orderstatus, totalprice / 2 AS half FROM orders; CREATE OR REPLACE VIEW test AS SELECT orderkey, orderstatus, totalprice / 4 AS quarter FROM orders; sqlfluff-3.4.2/test/fixtures/dialects/athena/create_view.yml000066400000000000000000000047521503426445100242560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4b9f327cd36317b0514ab31fba38d44f4416e2578e095ee9952aac4f2289f9b0 file: - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: test - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: orderkey - comma: ',' - select_clause_element: column_reference: naked_identifier: orderstatus - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: totalprice binary_operator: / numeric_literal: '2' alias_expression: alias_operator: keyword: AS naked_identifier: half from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: VIEW - table_reference: naked_identifier: test - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: orderkey - comma: ',' - select_clause_element: column_reference: naked_identifier: orderstatus - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: totalprice binary_operator: / numeric_literal: '4' alias_expression: alias_operator: keyword: AS naked_identifier: quarter from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/element_at.sql000066400000000000000000000003671503426445100240720ustar00rootroot00000000000000SELECT COALESCE( element_at(rq.hiring_managers, 1), element_at(rq.hiring_managers, 2), rq.creator_id ) AS part1, element_at(pl.hiring_managers, 1).id AS part2, element_at(pl.hiring_managers, 2).id AS part3; sqlfluff-3.4.2/test/fixtures/dialects/athena/element_at.yml000066400000000000000000000076341503426445100241000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3537c3d2aec0168d791402cafdef85909a9589574e34c675acf4e0cd25436cb6 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: COALESCE function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: element_at function_contents: bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: rq - dot: . - naked_identifier: hiring_managers - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) - comma: ',' - expression: function: function_name: function_name_identifier: element_at function_contents: bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: rq - dot: . - naked_identifier: hiring_managers - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - expression: column_reference: - naked_identifier: rq - dot: . - naked_identifier: creator_id - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: part1 - comma: ',' - select_clause_element: expression: function: function_name: function_name_identifier: element_at function_contents: bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: pl - dot: . - naked_identifier: hiring_managers - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) dot: . object_reference: naked_identifier: id alias_expression: alias_operator: keyword: AS naked_identifier: part2 - comma: ',' - select_clause_element: expression: function: function_name: function_name_identifier: element_at function_contents: bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: pl - dot: . - naked_identifier: hiring_managers - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) dot: . object_reference: naked_identifier: id alias_expression: alias_operator: keyword: AS naked_identifier: part3 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/insert_into.sql000066400000000000000000000005571503426445100243130ustar00rootroot00000000000000INSERT INTO canada_pageviews SELECT * FROM vancouver_pageviews WHERE pageview_date BETWEEN date '2019-07-01' AND date '2019-07-31'; INSERT INTO cities VALUES (1,'Lansing','MI','Si quaeris peninsulam amoenam circumspice'); INSERT INTO cities VALUES (1,'Lansing','MI','Si quaeris peninsulam amoenam circumspice'), (3,'Boise','ID','Esto perpetua'); sqlfluff-3.4.2/test/fixtures/dialects/athena/insert_into.yml000066400000000000000000000053471503426445100243170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fcc2e94a157cee2033e8e1c4c434cbc9791c4e28d4b41afb2df6c097263a5b36 file: - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: canada_pageviews - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: vancouver_pageviews where_clause: keyword: WHERE expression: - column_reference: naked_identifier: pageview_date - keyword: BETWEEN - keyword: date - date_constructor_literal: "'2019-07-01'" - keyword: AND - keyword: date - date_constructor_literal: "'2019-07-31'" - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: cities - values_clause: keyword: VALUES expression: bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - quoted_literal: "'Lansing'" - comma: ',' - quoted_literal: "'MI'" - comma: ',' - quoted_literal: "'Si quaeris peninsulam amoenam circumspice'" - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: cities - values_clause: - keyword: VALUES - expression: bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - quoted_literal: "'Lansing'" - comma: ',' - quoted_literal: "'MI'" - comma: ',' - quoted_literal: "'Si quaeris peninsulam amoenam circumspice'" - end_bracket: ) - comma: ',' - expression: bracketed: - start_bracket: ( - numeric_literal: '3' - comma: ',' - quoted_literal: "'Boise'" - comma: ',' - quoted_literal: "'ID'" - comma: ',' - quoted_literal: "'Esto perpetua'" - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/listagg.sql000066400000000000000000000020541503426445100234020ustar00rootroot00000000000000SELECT composition.material_number AS material_number_composition, LISTAGG( composition.composition, ' || ' ON OVERFLOW ERROR ) WITHIN GROUP ( ORDER BY composition ASC ) AS composition FROM article_composition AS composition GROUP BY composition.material_number; SELECT country, LISTAGG(city, ',') WITHIN GROUP ( ORDER BY population DESC ) FILTER (WHERE population >= 10_000_000) AS megacities FROM ( VALUES ('India', 'Bangalore', 13_700_000), ('India', 'Chennai', 12_200_000), ('India', 'Ranchi', 1_547_000), ('Austria', 'Vienna', 1_897_000), ('Poland', 'Warsaw', 1_765_000) ) AS t (country, city, population) GROUP BY country ORDER BY country; SELECT LISTAGG(value, ',' ON OVERFLOW TRUNCATE '.....' WITH COUNT) WITHIN GROUP ( ORDER BY value ) FROM (VALUES 'a', 'b', 'c') AS t (value); SELECT LISTAGG( value, ',' ON OVERFLOW TRUNCATE '.....' WITHOUT COUNT ) WITHIN GROUP ( ORDER BY value ) FROM (VALUES 'a', 'b', 'c') AS t (value); sqlfluff-3.4.2/test/fixtures/dialects/athena/listagg.yml000066400000000000000000000260531503426445100234110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f42c3532ac9e21473d48ac1b81e8c867135e4517e7e04202778ea746e4fd39fa file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: composition - dot: . - naked_identifier: material_number alias_expression: alias_operator: keyword: AS naked_identifier: material_number_composition - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LISTAGG function_contents: bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: composition - dot: . - naked_identifier: composition - comma: ',' - expression: quoted_literal: "' || '" - listagg_overflow_clause: - keyword: 'ON' - keyword: OVERFLOW - keyword: ERROR - end_bracket: ) withingroup_clause: - keyword: WITHIN - keyword: GROUP - bracketed: start_bracket: ( orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: composition - keyword: ASC end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: composition from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: article_composition alias_expression: alias_operator: keyword: AS naked_identifier: composition groupby_clause: - keyword: GROUP - keyword: BY - column_reference: - naked_identifier: composition - dot: . - naked_identifier: material_number - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: country - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LISTAGG function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: city - comma: ',' - expression: quoted_literal: "','" - end_bracket: ) withingroup_clause: - keyword: WITHIN - keyword: GROUP - bracketed: start_bracket: ( orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: population - keyword: DESC end_bracket: ) - keyword: FILTER - bracketed: start_bracket: ( keyword: WHERE expression: - column_reference: naked_identifier: population - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - column_reference: naked_identifier: '10_000_000' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: megacities from_clause: keyword: FROM from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: VALUES - expression: bracketed: - start_bracket: ( - quoted_literal: "'India'" - comma: ',' - quoted_literal: "'Bangalore'" - comma: ',' - column_reference: naked_identifier: '13_700_000' - end_bracket: ) - comma: ',' - expression: bracketed: - start_bracket: ( - quoted_literal: "'India'" - comma: ',' - quoted_literal: "'Chennai'" - comma: ',' - column_reference: naked_identifier: '12_200_000' - end_bracket: ) - comma: ',' - expression: bracketed: - start_bracket: ( - quoted_literal: "'India'" - comma: ',' - quoted_literal: "'Ranchi'" - comma: ',' - column_reference: naked_identifier: '1_547_000' - end_bracket: ) - comma: ',' - expression: bracketed: - start_bracket: ( - quoted_literal: "'Austria'" - comma: ',' - quoted_literal: "'Vienna'" - comma: ',' - column_reference: naked_identifier: '1_897_000' - end_bracket: ) - comma: ',' - expression: bracketed: - start_bracket: ( - quoted_literal: "'Poland'" - comma: ',' - quoted_literal: "'Warsaw'" - comma: ',' - column_reference: naked_identifier: '1_765_000' - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: country - comma: ',' - naked_identifier: city - comma: ',' - naked_identifier: population end_bracket: ) groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: country orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: country - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: LISTAGG function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: value - comma: ',' - expression: quoted_literal: "','" - listagg_overflow_clause: - keyword: 'ON' - keyword: OVERFLOW - keyword: TRUNCATE - quoted_literal: "'.....'" - keyword: WITH - keyword: COUNT - end_bracket: ) withingroup_clause: - keyword: WITHIN - keyword: GROUP - bracketed: start_bracket: ( orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: value end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: VALUES - expression: quoted_literal: "'a'" - comma: ',' - expression: quoted_literal: "'b'" - comma: ',' - expression: quoted_literal: "'c'" end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: t bracketed: start_bracket: ( identifier_list: naked_identifier: value end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: LISTAGG function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: value - comma: ',' - expression: quoted_literal: "','" - listagg_overflow_clause: - keyword: 'ON' - keyword: OVERFLOW - keyword: TRUNCATE - quoted_literal: "'.....'" - keyword: WITHOUT - keyword: COUNT - end_bracket: ) withingroup_clause: - keyword: WITHIN - keyword: GROUP - bracketed: start_bracket: ( orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: value end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: VALUES - expression: quoted_literal: "'a'" - comma: ',' - expression: quoted_literal: "'b'" - comma: ',' - expression: quoted_literal: "'c'" end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: t bracketed: start_bracket: ( identifier_list: naked_identifier: value end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/msck_repair_table.sql000066400000000000000000000000341503426445100254120ustar00rootroot00000000000000MSCK REPAIR TABLE my_table; sqlfluff-3.4.2/test/fixtures/dialects/athena/msck_repair_table.yml000066400000000000000000000011131503426445100254130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: aadaff42833cff5f19411fa8875d9468514306cdb9448f96c00d03fe7815e21a file: statement: msck_repair_table_statement: - keyword: MSCK - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: my_table statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/prepared_statements.sql000066400000000000000000000010651503426445100260220ustar00rootroot00000000000000PREPARE my_select1 FROM SELECT * FROM nation; PREPARE my_select2 FROM SELECT * FROM "my_database"."my_table" WHERE year = ?; PREPARE my_select3 FROM SELECT 'order' FROM orders WHERE productid = ? and quantity < ?; PREPARE my_insert FROM INSERT INTO cities_usa (city, state) SELECT city, state FROM cities_world WHERE country = ?; PREPARE my_unload FROM UNLOAD (SELECT * FROM table1 WHERE productid < ?) TO 's3://my_output_bucket/' WITH (format='PARQUET'); EXECUTE statement_name; EXECUTE statement_name USING 'value'; EXECUTE statement_name USING 'value', 10; sqlfluff-3.4.2/test/fixtures/dialects/athena/prepared_statements.yml000066400000000000000000000137401503426445100260270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cfd7ab01c93d5e69da42578d477561782550c6935dcd9450b5470d064ffbea20 file: - statement: prepare_statement: - keyword: PREPARE - table_reference: naked_identifier: my_select1 - keyword: FROM - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: nation - statement_terminator: ; - statement: prepare_statement: - keyword: PREPARE - table_reference: naked_identifier: my_select2 - keyword: FROM - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - quoted_identifier: '"my_database"' - dot: . - quoted_identifier: '"my_table"' where_clause: keyword: WHERE expression: column_reference: naked_identifier: year comparison_operator: raw_comparison_operator: '=' parameter: '?' - statement_terminator: ; - statement: prepare_statement: - keyword: PREPARE - table_reference: naked_identifier: my_select3 - keyword: FROM - select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'order'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders where_clause: keyword: WHERE expression: - column_reference: naked_identifier: productid - comparison_operator: raw_comparison_operator: '=' - parameter: '?' - binary_operator: and - column_reference: naked_identifier: quantity - comparison_operator: raw_comparison_operator: < - parameter: '?' - statement_terminator: ; - statement: prepare_statement: - keyword: PREPARE - table_reference: naked_identifier: my_insert - keyword: FROM - insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: cities_usa - bracketed: - start_bracket: ( - column_reference: naked_identifier: city - comma: ',' - column_reference: naked_identifier: state - end_bracket: ) - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: city - comma: ',' - select_clause_element: column_reference: naked_identifier: state from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: cities_world where_clause: keyword: WHERE expression: column_reference: naked_identifier: country comparison_operator: raw_comparison_operator: '=' parameter: '?' - statement_terminator: ; - statement: prepare_statement: - keyword: PREPARE - table_reference: naked_identifier: my_unload - keyword: FROM - unload_statement: - keyword: UNLOAD - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 where_clause: keyword: WHERE expression: column_reference: naked_identifier: productid comparison_operator: raw_comparison_operator: < parameter: '?' end_bracket: ) - keyword: TO - quoted_literal: "'s3://my_output_bucket/'" - keyword: WITH - bracketed: start_bracket: ( keyword: format comparison_operator: raw_comparison_operator: '=' quoted_literal: "'PARQUET'" end_bracket: ) - statement_terminator: ; - statement: execute_statement: keyword: EXECUTE table_reference: naked_identifier: statement_name - statement_terminator: ; - statement: execute_statement: - keyword: EXECUTE - table_reference: naked_identifier: statement_name - keyword: USING - quoted_literal: "'value'" - statement_terminator: ; - statement: execute_statement: - keyword: EXECUTE - table_reference: naked_identifier: statement_name - keyword: USING - quoted_literal: "'value'" - comma: ',' - numeric_literal: '10' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/select_a.sql000066400000000000000000000001301503426445100235200ustar00rootroot00000000000000SELECT field_1 , field_2 , field_3 , time , date , timestamp FROM my_table; sqlfluff-3.4.2/test/fixtures/dialects/athena/select_a.yml000066400000000000000000000025421503426445100235330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 287e4f998b4695a1d733c87a6a1ecad0735c474c0d5ed78422173e0d823736c5 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: field_1 - comma: ',' - select_clause_element: column_reference: naked_identifier: field_2 - comma: ',' - select_clause_element: column_reference: naked_identifier: field_3 - comma: ',' - select_clause_element: column_reference: naked_identifier: time - comma: ',' - select_clause_element: column_reference: naked_identifier: date - comma: ',' - select_clause_element: column_reference: naked_identifier: timestamp from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/select_array_of_rows.sql000066400000000000000000000000611503426445100261570ustar00rootroot00000000000000SELECT ARRAY[CAST(ROW(1) AS ROW(x INT))][1].x.y; sqlfluff-3.4.2/test/fixtures/dialects/athena/select_array_of_rows.yml000066400000000000000000000041061503426445100261650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 229ad331db0574e38536d0ed149fd6bc032240c093a546a9565ae8261568e4ab file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: typed_array_literal: array_type: keyword: ARRAY array_literal: start_square_bracket: '[' function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ROW function_contents: bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) keyword: AS data_type: keyword: ROW bracketed: start_bracket: ( naked_identifier: x data_type: primitive_type: keyword: INT end_bracket: ) end_bracket: ) end_square_bracket: ']' array_accessor: start_square_bracket: '[' numeric_literal: '1' end_square_bracket: ']' dot: . object_reference: - naked_identifier: x - dot: . - naked_identifier: y statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/select_b.sql000066400000000000000000000003231503426445100235250ustar00rootroot00000000000000WITH with_query as ( select field_1, field_2 from table_1) select field_1, field_2, count(1) from with_query where field_1 = 'value' group by 1, 2 having count(1) > 10 order by 1 DESC limit 10; sqlfluff-3.4.2/test/fixtures/dialects/athena/select_b.yml000066400000000000000000000061671503426445100235430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4dfd6d5fe802d39ad1c438d48f81d491a48c176757133f3700fbc86bd95ef349 file: statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: with_query keyword: as bracketed: start_bracket: ( select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: field_1 - comma: ',' - select_clause_element: column_reference: naked_identifier: field_2 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_1 end_bracket: ) select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: field_1 - comma: ',' - select_clause_element: column_reference: naked_identifier: field_2 - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: with_query where_clause: keyword: where expression: column_reference: naked_identifier: field_1 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'value'" groupby_clause: - keyword: group - keyword: by - numeric_literal: '1' - comma: ',' - numeric_literal: '2' having_clause: keyword: having expression: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) comparison_operator: raw_comparison_operator: '>' numeric_literal: '10' orderby_clause: - keyword: order - keyword: by - numeric_literal: '1' - keyword: DESC limit_clause: keyword: limit numeric_literal: '10' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/select_base_operators.sql000066400000000000000000000003211503426445100263120ustar00rootroot00000000000000SELECT CAST(null AS boolean) OR CAST(null AS boolean); -- null SELECT CAST(null AS boolean) OR false; -- null SELECT CAST(null AS boolean) OR true; -- true SELECT least(1, 2, 3); SELECT greatest(1, 2, 3); sqlfluff-3.4.2/test/fixtures/dialects/athena/select_base_operators.yml000066400000000000000000000076111503426445100263250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b8e9e7bcece289dac9df11a55010a2ba5fcccdcc38e9f456d1ebd02dadb74f7b file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: null_literal: 'null' keyword: AS data_type: primitive_type: keyword: boolean end_bracket: ) - binary_operator: OR - function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: null_literal: 'null' keyword: AS data_type: primitive_type: keyword: boolean end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: null_literal: 'null' keyword: AS data_type: primitive_type: keyword: boolean end_bracket: ) binary_operator: OR boolean_literal: 'false' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: null_literal: 'null' keyword: AS data_type: primitive_type: keyword: boolean end_bracket: ) binary_operator: OR boolean_literal: 'true' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: least function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: greatest function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/select_cast_withtimezone.sql000066400000000000000000000002571503426445100270520ustar00rootroot00000000000000SELECT cast(field_1 as time with time zone), cast(field_2 as timestamp with time zone), CAST(CURRENT_TIMESTAMP AS TIMESTAMP(6) WITH TIME ZONE) AS _log_time, FROM my_table; sqlfluff-3.4.2/test/fixtures/dialects/athena/select_cast_withtimezone.yml000066400000000000000000000051441503426445100270540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6eb889e5d4ceda3522300f927ea2cdb667e1d0850d401b72a698933d17ad30d3 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: cast function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: field_1 keyword: as data_type: - keyword: time - keyword: with - keyword: time - keyword: zone end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: cast function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: field_2 keyword: as data_type: - keyword: timestamp - keyword: with - keyword: time - keyword: zone end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: bare_function: CURRENT_TIMESTAMP keyword: AS data_type: - keyword: TIMESTAMP - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) - keyword: WITH - keyword: TIME - keyword: ZONE end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: _log_time - comma: ',' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/select_datetime_functions.sql000066400000000000000000000021071503426445100271720ustar00rootroot00000000000000-- https://prestodb.io/docs/0.217/functions/datetime.html select date '2012-08-08' + interval '2' day; select time '01:00' + interval '3' hour; select timestamp '2012-08-08 01:00' + interval '29' hour; select timestamp '2012-10-31 01:00' + interval '1' month; select interval '2' day + interval '3' hour; select interval '3' year + interval '5' month; select date '2012-08-08' - interval '2' day; select time '01:00' - interval '3' hour; select timestamp '2012-08-08 01:00' - interval '29' hour; select timestamp '2012-10-31 01:00' - interval '1' month; select interval '2' day - interval '3' hour; select interval '3' year - interval '5' month; select current_time; select current_date; select current_timestamp; select current_timezone(); select date('1970-01-01'); select cast('1970-01-01' as date); select from_iso8601_timestamp('2019-09-07T-15:50+00'); select from_iso8601_date('2019-09-07T-15:50+00'); select from_unixtime(1556285138); select localtime; select localtimestamp; select now(); select to_iso8601('1970-01-01'); select to_unixtime(current_timestamp); sqlfluff-3.4.2/test/fixtures/dialects/athena/select_datetime_functions.yml000066400000000000000000000240531503426445100272000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b8b4aa43d196d9687ef2b428cb602f8c77af196cbc18fe01f300724d86ae6fe6 file: - statement: select_statement: select_clause: keyword: select select_clause_element: expression: keyword: date date_constructor_literal: "'2012-08-08'" binary_operator: + interval_expression: keyword: interval quoted_literal: "'2'" date_part: day - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: keyword: time date_constructor_literal: "'01:00'" binary_operator: + interval_expression: keyword: interval quoted_literal: "'3'" date_part: hour - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: keyword: timestamp date_constructor_literal: "'2012-08-08 01:00'" binary_operator: + interval_expression: keyword: interval quoted_literal: "'29'" date_part: hour - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: keyword: timestamp date_constructor_literal: "'2012-10-31 01:00'" binary_operator: + interval_expression: keyword: interval quoted_literal: "'1'" date_part: month - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: - interval_expression: keyword: interval quoted_literal: "'2'" date_part: day - binary_operator: + - interval_expression: keyword: interval quoted_literal: "'3'" date_part: hour - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: - interval_expression: keyword: interval quoted_literal: "'3'" date_part: year - binary_operator: + - interval_expression: keyword: interval quoted_literal: "'5'" date_part: month - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: keyword: date date_constructor_literal: "'2012-08-08'" binary_operator: '-' interval_expression: keyword: interval quoted_literal: "'2'" date_part: day - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: keyword: time date_constructor_literal: "'01:00'" binary_operator: '-' interval_expression: keyword: interval quoted_literal: "'3'" date_part: hour - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: keyword: timestamp date_constructor_literal: "'2012-08-08 01:00'" binary_operator: '-' interval_expression: keyword: interval quoted_literal: "'29'" date_part: hour - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: keyword: timestamp date_constructor_literal: "'2012-10-31 01:00'" binary_operator: '-' interval_expression: keyword: interval quoted_literal: "'1'" date_part: month - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: - interval_expression: keyword: interval quoted_literal: "'2'" date_part: day - binary_operator: '-' - interval_expression: keyword: interval quoted_literal: "'3'" date_part: hour - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: - interval_expression: keyword: interval quoted_literal: "'3'" date_part: year - binary_operator: '-' - interval_expression: keyword: interval quoted_literal: "'5'" date_part: month - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: bare_function: current_time - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: bare_function: current_date - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: bare_function: current_timestamp - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: current_timezone function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: date function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'1970-01-01'" end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: cast function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'1970-01-01'" keyword: as data_type: primitive_type: keyword: date end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: from_iso8601_timestamp function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'2019-09-07T-15:50+00'" end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: from_iso8601_date function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'2019-09-07T-15:50+00'" end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: from_unixtime function_contents: bracketed: start_bracket: ( expression: numeric_literal: '1556285138' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: localtime - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: localtimestamp - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: now function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: to_iso8601 function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'1970-01-01'" end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: to_unixtime function_contents: bracketed: start_bracket: ( expression: bare_function: current_timestamp end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/select_filter.sql000066400000000000000000000003201503426445100245660ustar00rootroot00000000000000SELECT ARRAY [5, NULL, 7, NULL]; SELECT filter(ARRAY [], x -> true); -- [] SELECT filter(ARRAY [5, -6, NULL, 7], x -> x > 0); -- [5, 7] SELECT filter(ARRAY [5, NULL, 7, NULL], x -> x IS NOT NULL); -- [5, 7] sqlfluff-3.4.2/test/fixtures/dialects/athena/select_filter.yml000066400000000000000000000103621503426445100245770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2060d6fb0af498fab949db55ac646fd8b6460cc477396196e86dea8a80182326 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '5' - comma: ',' - null_literal: 'NULL' - comma: ',' - numeric_literal: '7' - comma: ',' - null_literal: 'NULL' - end_square_bracket: ']' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: filter function_contents: bracketed: - start_bracket: ( - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: start_square_bracket: '[' end_square_bracket: ']' - comma: ',' - expression: column_reference: naked_identifier: x binary_operator: -> boolean_literal: 'true' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: filter function_contents: bracketed: - start_bracket: ( - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '5' - comma: ',' - numeric_literal: sign_indicator: '-' numeric_literal: '6' - comma: ',' - null_literal: 'NULL' - comma: ',' - numeric_literal: '7' - end_square_bracket: ']' - comma: ',' - expression: - column_reference: naked_identifier: x - binary_operator: -> - column_reference: naked_identifier: x - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '0' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: filter function_contents: bracketed: - start_bracket: ( - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '5' - comma: ',' - null_literal: 'NULL' - comma: ',' - numeric_literal: '7' - comma: ',' - null_literal: 'NULL' - end_square_bracket: ']' - comma: ',' - expression: - column_reference: naked_identifier: x - binary_operator: -> - column_reference: naked_identifier: x - keyword: IS - keyword: NOT - null_literal: 'NULL' - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/select_group_by.sql000066400000000000000000000013631503426445100251370ustar00rootroot00000000000000select as_of_date, channel, sum(total_count) as cnt from agg.aggregate_total group by cube (as_of_date, channel); select as_of_date, channel, sum(total_count) as cnt from agg.aggregate_total group by rollup (as_of_date, channel); select as_of_date, channel, sum(total_count) as cnt from agg.aggregate_total group by grouping sets (as_of_date, channel); -- complex sets select as_of_date, channel, sum(total_count) as cnt from agg.aggregate_total group by grouping sets ((as_of_date, channel), (as_of_date), ()); -- "weird" cases select as_of_date, channel, platform, sum(total_count) as cnt from agg.aggregate_total group by as_of_date, grouping sets ((platform, channel), channel, ()); sqlfluff-3.4.2/test/fixtures/dialects/athena/select_group_by.yml000066400000000000000000000217351503426445100251460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 83514a0915632a2c850321b3d61197b25008f6e4f95342bb1df5de8af668e27a file: - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: as_of_date - comma: ',' - select_clause_element: column_reference: naked_identifier: channel - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: total_count end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: cnt from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: agg - dot: . - naked_identifier: aggregate_total groupby_clause: - keyword: group - keyword: by - cube_rollup_clause: function_name: function_name_identifier: cube bracketed: start_bracket: ( grouping_expression_list: - column_reference: naked_identifier: as_of_date - comma: ',' - column_reference: naked_identifier: channel end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: as_of_date - comma: ',' - select_clause_element: column_reference: naked_identifier: channel - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: total_count end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: cnt from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: agg - dot: . - naked_identifier: aggregate_total groupby_clause: - keyword: group - keyword: by - cube_rollup_clause: function_name: function_name_identifier: rollup bracketed: start_bracket: ( grouping_expression_list: - column_reference: naked_identifier: as_of_date - comma: ',' - column_reference: naked_identifier: channel end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: as_of_date - comma: ',' - select_clause_element: column_reference: naked_identifier: channel - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: total_count end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: cnt from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: agg - dot: . - naked_identifier: aggregate_total groupby_clause: - keyword: group - keyword: by - grouping_sets_clause: - keyword: grouping - keyword: sets - bracketed: start_bracket: ( grouping_expression_list: - column_reference: naked_identifier: as_of_date - comma: ',' - column_reference: naked_identifier: channel end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: as_of_date - comma: ',' - select_clause_element: column_reference: naked_identifier: channel - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: total_count end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: cnt from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: agg - dot: . - naked_identifier: aggregate_total groupby_clause: - keyword: group - keyword: by - grouping_sets_clause: - keyword: grouping - keyword: sets - bracketed: start_bracket: ( grouping_expression_list: - expression: bracketed: - start_bracket: ( - column_reference: naked_identifier: as_of_date - comma: ',' - column_reference: naked_identifier: channel - end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: as_of_date end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: as_of_date - comma: ',' - select_clause_element: column_reference: naked_identifier: channel - comma: ',' - select_clause_element: column_reference: naked_identifier: platform - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: total_count end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: cnt from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: agg - dot: . - naked_identifier: aggregate_total groupby_clause: - keyword: group - keyword: by - column_reference: naked_identifier: as_of_date - comma: ',' - grouping_sets_clause: - keyword: grouping - keyword: sets - bracketed: start_bracket: ( grouping_expression_list: - expression: bracketed: - start_bracket: ( - column_reference: naked_identifier: platform - comma: ',' - column_reference: naked_identifier: channel - end_bracket: ) - comma: ',' - column_reference: naked_identifier: channel - comma: ',' - expression: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/select_map_function.sql000066400000000000000000000007701503426445100257740ustar00rootroot00000000000000SELECT map(); WITH dataset AS ( SELECT map( ARRAY['first', 'last', 'age'], ARRAY['Bob', 'Smith', '35'] ) AS a_map ) SELECT a_map FROM dataset; SELECT map_filter(map(ARRAY[], ARRAY[]), (k, v) -> true); -- -- {} SELECT map_filter( map( ARRAY[10, 20, 30], ARRAY['a', null, 'c'] ), (k, v) -> v IS NOT NULL ); -- -- {10 -> a, 30 -> c} SELECT map_filter( map( ARRAY['k1', 'k2', 'k3'], ARRAY[20, 3, 15] ), (k, v) -> v > 10 ); sqlfluff-3.4.2/test/fixtures/dialects/athena/select_map_function.yml000066400000000000000000000224651503426445100260030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bfb068b1284cf0064d84486f515d8f29f3be3ca4e2f1b4402bf824690fa4215d file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: map function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: dataset keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: map function_contents: bracketed: - start_bracket: ( - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - quoted_literal: "'first'" - comma: ',' - quoted_literal: "'last'" - comma: ',' - quoted_literal: "'age'" - end_square_bracket: ']' - comma: ',' - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - quoted_literal: "'Bob'" - comma: ',' - quoted_literal: "'Smith'" - comma: ',' - quoted_literal: "'35'" - end_square_bracket: ']' - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a_map end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a_map from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dataset - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: map_filter function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: map function_contents: bracketed: - start_bracket: ( - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: start_square_bracket: '[' end_square_bracket: ']' - comma: ',' - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: start_square_bracket: '[' end_square_bracket: ']' - end_bracket: ) - comma: ',' - expression: bracketed: - start_bracket: ( - column_reference: naked_identifier: k - comma: ',' - column_reference: naked_identifier: v - end_bracket: ) binary_operator: -> boolean_literal: 'true' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: map_filter function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: map function_contents: bracketed: - start_bracket: ( - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '10' - comma: ',' - numeric_literal: '20' - comma: ',' - numeric_literal: '30' - end_square_bracket: ']' - comma: ',' - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - quoted_literal: "'a'" - comma: ',' - null_literal: 'null' - comma: ',' - quoted_literal: "'c'" - end_square_bracket: ']' - end_bracket: ) - comma: ',' - expression: - bracketed: - start_bracket: ( - column_reference: naked_identifier: k - comma: ',' - column_reference: naked_identifier: v - end_bracket: ) - binary_operator: -> - column_reference: naked_identifier: v - keyword: IS - keyword: NOT - null_literal: 'NULL' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: map_filter function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: map function_contents: bracketed: - start_bracket: ( - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - quoted_literal: "'k1'" - comma: ',' - quoted_literal: "'k2'" - comma: ',' - quoted_literal: "'k3'" - end_square_bracket: ']' - comma: ',' - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '20' - comma: ',' - numeric_literal: '3' - comma: ',' - numeric_literal: '15' - end_square_bracket: ']' - end_bracket: ) - comma: ',' - expression: bracketed: - start_bracket: ( - column_reference: naked_identifier: k - comma: ',' - column_reference: naked_identifier: v - end_bracket: ) binary_operator: -> column_reference: naked_identifier: v comparison_operator: raw_comparison_operator: '>' numeric_literal: '10' - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/select_map_type.sql000066400000000000000000000003761503426445100251320ustar00rootroot00000000000000SELECT CAST( JSON_PARSE(table_name.column_name) AS MAP ) AS json_map FROM table_name; CREATE TABLE map_table(c1 map) LOCATION '...'; INSERT INTO map_table values(MAP(ARRAY['foo', 'bar'], ARRAY[1, 2])); sqlfluff-3.4.2/test/fixtures/dialects/athena/select_map_type.yml000066400000000000000000000102221503426445100251230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1a34fc6e064ef4478f41bfed0c5d6079dd62b82372af971371e70a0ab2d152d5 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: JSON_PARSE function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: table_name - dot: . - naked_identifier: column_name end_bracket: ) keyword: AS data_type: map_type: keyword: MAP map_type_schema: start_angle_bracket: < primitive_type: keyword: VARCHAR comma: ',' data_type: primitive_type: keyword: VARCHAR end_angle_bracket: '>' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: json_map from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: map_table - bracketed: start_bracket: ( column_definition: naked_identifier: c1 data_type: map_type: keyword: map map_type_schema: start_angle_bracket: < primitive_type: keyword: string comma: ',' data_type: primitive_type: keyword: integer end_angle_bracket: '>' end_bracket: ) - keyword: LOCATION - quoted_literal: "'...'" - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: map_table - values_clause: keyword: values expression: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: MAP function_contents: bracketed: - start_bracket: ( - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - quoted_literal: "'foo'" - comma: ',' - quoted_literal: "'bar'" - end_square_bracket: ']' - comma: ',' - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_square_bracket: ']' - end_bracket: ) end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/select_reduce.sql000066400000000000000000000014061503426445100245560ustar00rootroot00000000000000SELECT reduce(ARRAY [], 0, (s, x) -> s + x, s -> s); -- 0 SELECT reduce(ARRAY [5, 20, 50], 0, (s, x) -> s + x, s -> s); -- 75 SELECT reduce(ARRAY [5, 20, NULL, 50], 0, (s, x) -> s + x, s -> s); -- NULL SELECT reduce(ARRAY [5, 20, NULL, 50], 0, (s, x) -> s + COALESCE(x, 0), s -> s); -- 75 SELECT reduce(ARRAY [5, 20, NULL, 50], 0, (s, x) -> IF(x IS NULL, s, s + x), s -> s); -- 75 SELECT reduce(ARRAY [2147483647, 1], CAST (0 AS BIGINT), (s, x) -> s + x, s -> s); -- 2147483648 SELECT reduce(ARRAY [5, 6, 10, 20], -- calculates arithmetic average: 10.25 CAST(ROW(0.0, 0) AS ROW(sum DOUBLE, count INTEGER)), (s, x) -> CAST(ROW(x + s.sum, s.count + 1) AS ROW(sum DOUBLE, count INTEGER)), s -> IF(s.count = 0, NULL, s.sum / s.count)); sqlfluff-3.4.2/test/fixtures/dialects/athena/select_reduce.yml000066400000000000000000000444131503426445100245650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 34cbb85c483c657c4c5b987e7586661081361e78cd178c0ef54f7733d3940281 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: reduce function_contents: bracketed: - start_bracket: ( - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: start_square_bracket: '[' end_square_bracket: ']' - comma: ',' - expression: numeric_literal: '0' - comma: ',' - expression: - bracketed: - start_bracket: ( - column_reference: naked_identifier: s - comma: ',' - column_reference: naked_identifier: x - end_bracket: ) - binary_operator: -> - column_reference: naked_identifier: s - binary_operator: + - column_reference: naked_identifier: x - comma: ',' - expression: - column_reference: naked_identifier: s - binary_operator: -> - column_reference: naked_identifier: s - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: reduce function_contents: bracketed: - start_bracket: ( - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '5' - comma: ',' - numeric_literal: '20' - comma: ',' - numeric_literal: '50' - end_square_bracket: ']' - comma: ',' - expression: numeric_literal: '0' - comma: ',' - expression: - bracketed: - start_bracket: ( - column_reference: naked_identifier: s - comma: ',' - column_reference: naked_identifier: x - end_bracket: ) - binary_operator: -> - column_reference: naked_identifier: s - binary_operator: + - column_reference: naked_identifier: x - comma: ',' - expression: - column_reference: naked_identifier: s - binary_operator: -> - column_reference: naked_identifier: s - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: reduce function_contents: bracketed: - start_bracket: ( - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '5' - comma: ',' - numeric_literal: '20' - comma: ',' - null_literal: 'NULL' - comma: ',' - numeric_literal: '50' - end_square_bracket: ']' - comma: ',' - expression: numeric_literal: '0' - comma: ',' - expression: - bracketed: - start_bracket: ( - column_reference: naked_identifier: s - comma: ',' - column_reference: naked_identifier: x - end_bracket: ) - binary_operator: -> - column_reference: naked_identifier: s - binary_operator: + - column_reference: naked_identifier: x - comma: ',' - expression: - column_reference: naked_identifier: s - binary_operator: -> - column_reference: naked_identifier: s - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: reduce function_contents: bracketed: - start_bracket: ( - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '5' - comma: ',' - numeric_literal: '20' - comma: ',' - null_literal: 'NULL' - comma: ',' - numeric_literal: '50' - end_square_bracket: ']' - comma: ',' - expression: numeric_literal: '0' - comma: ',' - expression: - bracketed: - start_bracket: ( - column_reference: naked_identifier: s - comma: ',' - column_reference: naked_identifier: x - end_bracket: ) - binary_operator: -> - column_reference: naked_identifier: s - binary_operator: + - function: function_name: function_name_identifier: COALESCE function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: x - comma: ',' - expression: numeric_literal: '0' - end_bracket: ) - comma: ',' - expression: - column_reference: naked_identifier: s - binary_operator: -> - column_reference: naked_identifier: s - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: reduce function_contents: bracketed: - start_bracket: ( - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '5' - comma: ',' - numeric_literal: '20' - comma: ',' - null_literal: 'NULL' - comma: ',' - numeric_literal: '50' - end_square_bracket: ']' - comma: ',' - expression: numeric_literal: '0' - comma: ',' - expression: bracketed: - start_bracket: ( - column_reference: naked_identifier: s - comma: ',' - column_reference: naked_identifier: x - end_bracket: ) binary_operator: -> function: function_name: function_name_identifier: IF function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: x keyword: IS null_literal: 'NULL' - comma: ',' - expression: column_reference: naked_identifier: s - comma: ',' - expression: - column_reference: naked_identifier: s - binary_operator: + - column_reference: naked_identifier: x - end_bracket: ) - comma: ',' - expression: - column_reference: naked_identifier: s - binary_operator: -> - column_reference: naked_identifier: s - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: reduce function_contents: bracketed: - start_bracket: ( - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '2147483647' - comma: ',' - numeric_literal: '1' - end_square_bracket: ']' - comma: ',' - expression: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: numeric_literal: '0' keyword: AS data_type: primitive_type: keyword: BIGINT end_bracket: ) - comma: ',' - expression: - bracketed: - start_bracket: ( - column_reference: naked_identifier: s - comma: ',' - column_reference: naked_identifier: x - end_bracket: ) - binary_operator: -> - column_reference: naked_identifier: s - binary_operator: + - column_reference: naked_identifier: x - comma: ',' - expression: - column_reference: naked_identifier: s - binary_operator: -> - column_reference: naked_identifier: s - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: reduce function_contents: bracketed: - start_bracket: ( - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '5' - comma: ',' - numeric_literal: '6' - comma: ',' - numeric_literal: '10' - comma: ',' - numeric_literal: '20' - end_square_bracket: ']' - comma: ',' - expression: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ROW function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '0.0' - comma: ',' - expression: numeric_literal: '0' - end_bracket: ) keyword: AS data_type: keyword: ROW bracketed: - start_bracket: ( - naked_identifier: sum - data_type: primitive_type: keyword: DOUBLE - comma: ',' - naked_identifier: count - data_type: primitive_type: keyword: INTEGER - end_bracket: ) end_bracket: ) - comma: ',' - expression: bracketed: - start_bracket: ( - column_reference: naked_identifier: s - comma: ',' - column_reference: naked_identifier: x - end_bracket: ) binary_operator: -> function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ROW function_contents: bracketed: - start_bracket: ( - expression: - column_reference: naked_identifier: x - binary_operator: + - column_reference: - naked_identifier: s - dot: . - naked_identifier: sum - comma: ',' - expression: column_reference: - naked_identifier: s - dot: . - naked_identifier: count binary_operator: + numeric_literal: '1' - end_bracket: ) keyword: AS data_type: keyword: ROW bracketed: - start_bracket: ( - naked_identifier: sum - data_type: primitive_type: keyword: DOUBLE - comma: ',' - naked_identifier: count - data_type: primitive_type: keyword: INTEGER - end_bracket: ) end_bracket: ) - comma: ',' - expression: column_reference: naked_identifier: s binary_operator: -> function: function_name: function_name_identifier: IF function_contents: bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: s - dot: . - naked_identifier: count comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' - comma: ',' - expression: null_literal: 'NULL' - comma: ',' - expression: - column_reference: - naked_identifier: s - dot: . - naked_identifier: sum - binary_operator: / - column_reference: - naked_identifier: s - dot: . - naked_identifier: count - end_bracket: ) - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/select_row.sql000066400000000000000000000007241503426445100241200ustar00rootroot00000000000000SELECT ROW(1, 2.0); SELECT CAST(ROW(1, 2.0) AS ROW(x BIGINT, y DOUBLE)); SELECT ARRAY[CAST(ROW(1) AS ROW(x INT))][1].x; SELECT CAST( ROW( ARRAY[ CAST(ROW('') AS ROW(id varchar)) ], CAST(ROW('') AS ROW(id varchar)), 'Approved' ) AS ROW( approvers ARRAY, performer ROW(id varchar), approvalStatus varchar ) ) as test; sqlfluff-3.4.2/test/fixtures/dialects/athena/select_row.yml000066400000000000000000000225071503426445100241250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cd32310b386344b8154e7ee01f358d4d3bbe279e4121c4ab694e2a4bd14aa94b file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: ROW function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2.0' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ROW function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2.0' - end_bracket: ) keyword: AS data_type: keyword: ROW bracketed: - start_bracket: ( - naked_identifier: x - data_type: primitive_type: keyword: BIGINT - comma: ',' - naked_identifier: y - data_type: primitive_type: keyword: DOUBLE - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: typed_array_literal: array_type: keyword: ARRAY array_literal: start_square_bracket: '[' function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ROW function_contents: bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) keyword: AS data_type: keyword: ROW bracketed: start_bracket: ( naked_identifier: x data_type: primitive_type: keyword: INT end_bracket: ) end_bracket: ) end_square_bracket: ']' array_accessor: start_square_bracket: '[' numeric_literal: '1' end_square_bracket: ']' dot: . object_reference: naked_identifier: x - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ROW function_contents: bracketed: - start_bracket: ( - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: start_square_bracket: '[' function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ROW function_contents: bracketed: start_bracket: ( expression: quoted_literal: "''" end_bracket: ) keyword: AS data_type: keyword: ROW bracketed: start_bracket: ( naked_identifier: id data_type: primitive_type: keyword: varchar end_bracket: ) end_bracket: ) end_square_bracket: ']' - comma: ',' - expression: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ROW function_contents: bracketed: start_bracket: ( expression: quoted_literal: "''" end_bracket: ) keyword: AS data_type: keyword: ROW bracketed: start_bracket: ( naked_identifier: id data_type: primitive_type: keyword: varchar end_bracket: ) end_bracket: ) - comma: ',' - expression: quoted_literal: "'Approved'" - end_bracket: ) keyword: AS data_type: keyword: ROW bracketed: - start_bracket: ( - naked_identifier: approvers - data_type: array_type: keyword: ARRAY array_type_schema: start_angle_bracket: < data_type: keyword: ROW bracketed: start_bracket: ( naked_identifier: id data_type: primitive_type: keyword: varchar end_bracket: ) end_angle_bracket: '>' - comma: ',' - naked_identifier: performer - data_type: keyword: ROW bracketed: start_bracket: ( naked_identifier: id data_type: primitive_type: keyword: varchar end_bracket: ) - comma: ',' - naked_identifier: approvalStatus - data_type: primitive_type: keyword: varchar - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: test - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/select_underscore.sql000066400000000000000000000002261503426445100254570ustar00rootroot00000000000000SELECT 1 AS _; SELECT 1 AS __; SELECT 1 AS __TEST; SELECT a FROM ( VALUES ('a'), ('b') ) AS _(a); SELECT a FROM ( VALUES ('a'), ('b') ) AS __(a); sqlfluff-3.4.2/test/fixtures/dialects/athena/select_underscore.yml000066400000000000000000000071551503426445100254710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 346a9d6b86cda9a349279affb10c93bf89d502848497d6b57b46ba98ad42256b file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: AS naked_identifier: _ - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: AS naked_identifier: __ - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: AS naked_identifier: __TEST - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: VALUES - expression: bracketed: start_bracket: ( expression: quoted_literal: "'a'" end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( expression: quoted_literal: "'b'" end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: _ bracketed: start_bracket: ( identifier_list: naked_identifier: a end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: VALUES - expression: bracketed: start_bracket: ( expression: quoted_literal: "'a'" end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( expression: quoted_literal: "'b'" end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: __ bracketed: start_bracket: ( identifier_list: naked_identifier: a end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/select_unnest.sql000066400000000000000000000004211503426445100246170ustar00rootroot00000000000000SELECT field_1, field_2, column_value FROM my_table CROSS JOIN UNNEST(array_field) AS my_unnested_table(column_value); SELECT numbers, n, a FROM ( VALUES (ARRAY[2, 5]), (ARRAY[7, 8, 9]) ) AS x (numbers) CROSS JOIN UNNEST(numbers) WITH ORDINALITY AS t (n, a); sqlfluff-3.4.2/test/fixtures/dialects/athena/select_unnest.yml000066400000000000000000000120241503426445100246230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5bb19bc20bc221086f838009eb2abcb68c2aeaabdb6dbb256449440176d9a6e5 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: field_1 - comma: ',' - select_clause_element: column_reference: naked_identifier: field_2 - comma: ',' - select_clause_element: column_reference: naked_identifier: column_value from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table join_clause: - keyword: CROSS - keyword: JOIN - from_expression_element: table_expression: function: function_name: function_name_identifier: UNNEST function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: array_field end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: my_unnested_table bracketed: start_bracket: ( identifier_list: naked_identifier: column_value end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: numbers - comma: ',' - select_clause_element: column_reference: naked_identifier: n - comma: ',' - select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: VALUES - expression: bracketed: start_bracket: ( expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '2' - comma: ',' - numeric_literal: '5' - end_square_bracket: ']' end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '7' - comma: ',' - numeric_literal: '8' - comma: ',' - numeric_literal: '9' - end_square_bracket: ']' end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: x bracketed: start_bracket: ( identifier_list: naked_identifier: numbers end_bracket: ) join_clause: - keyword: CROSS - keyword: JOIN - from_expression_element: table_expression: function: - function_name: function_name_identifier: UNNEST - function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: numbers end_bracket: ) - keyword: WITH - keyword: ORDINALITY alias_expression: alias_operator: keyword: AS naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: n - comma: ',' - naked_identifier: a end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/select_widow_functions.sql000066400000000000000000000005631503426445100265330ustar00rootroot00000000000000SELECT orderkey, clerk, totalprice, rank() OVER (PARTITION BY clerk ORDER BY totalprice DESC) AS rnk FROM orders ORDER BY clerk, rnk; SELECT clerk, orderdate, orderkey, totalprice, sum(totalprice) OVER (PARTITION BY clerk ORDER BY orderdate) AS rolling_sum FROM orders ORDER BY clerk, orderdate, orderkey; sqlfluff-3.4.2/test/fixtures/dialects/athena/select_widow_functions.yml000066400000000000000000000104471503426445100265370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fceb478f9d7f168db57ff9346ae3970cddd5034004857f5cb3b9ed79392a9741 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: orderkey - comma: ',' - select_clause_element: column_reference: naked_identifier: clerk - comma: ',' - select_clause_element: column_reference: naked_identifier: totalprice - comma: ',' - select_clause_element: function: function_name: function_name_identifier: rank function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: clerk orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: totalprice - keyword: DESC end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: rnk from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: clerk - comma: ',' - column_reference: naked_identifier: rnk - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: clerk - comma: ',' - select_clause_element: column_reference: naked_identifier: orderdate - comma: ',' - select_clause_element: column_reference: naked_identifier: orderkey - comma: ',' - select_clause_element: column_reference: naked_identifier: totalprice - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: totalprice end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: clerk orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: orderdate end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: rolling_sum from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: clerk - comma: ',' - column_reference: naked_identifier: orderdate - comma: ',' - column_reference: naked_identifier: orderkey - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/show_columns.sql000066400000000000000000000002271503426445100244700ustar00rootroot00000000000000SHOW COLUMNS FROM dbname.tablename; SHOW COLUMNS IN dbname.tablename; SHOW COLUMNS FROM tablename FROM dbname; SHOW COLUMNS IN tablename IN dbname; sqlfluff-3.4.2/test/fixtures/dialects/athena/show_columns.yml000066400000000000000000000025261503426445100244760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4cf7849864cf63b656aaf25f20f39df68107810001b8716f702864a4d105f9b6 file: - statement: show_statement: - keyword: SHOW - keyword: COLUMNS - keyword: FROM - table_reference: - naked_identifier: dbname - dot: . - naked_identifier: tablename - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: COLUMNS - keyword: IN - table_reference: - naked_identifier: dbname - dot: . - naked_identifier: tablename - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: COLUMNS - keyword: FROM - table_reference: naked_identifier: tablename - keyword: FROM - database_reference: naked_identifier: dbname - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: COLUMNS - keyword: IN - table_reference: naked_identifier: tablename - keyword: IN - database_reference: naked_identifier: dbname - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/show_create_table.sql000066400000000000000000000001021503426445100254120ustar00rootroot00000000000000SHOW CREATE TABLE tablename; SHOW CREATE TABLE dbname.tablename; sqlfluff-3.4.2/test/fixtures/dialects/athena/show_create_table.yml000066400000000000000000000014441503426445100254260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 93acdb802aed4a1977116a63a9695421637970b639b23d173e06f365d2419409 file: - statement: show_statement: - keyword: SHOW - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: tablename - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: dbname - dot: . - naked_identifier: tablename - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/show_create_view.sql000066400000000000000000000000331503426445100253000ustar00rootroot00000000000000SHOW CREATE VIEW viewname; sqlfluff-3.4.2/test/fixtures/dialects/athena/show_create_view.yml000066400000000000000000000010751503426445100253110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ca2d569ef7d4217449e981c819684f9eb31297df4430c689d6bc85b66276247b file: statement: show_statement: - keyword: SHOW - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: viewname statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/show_databases.sql000066400000000000000000000000541503426445100247350ustar00rootroot00000000000000SHOW DATABASES; SHOW SCHEMAS LIKE 'regex'; sqlfluff-3.4.2/test/fixtures/dialects/athena/show_databases.yml000066400000000000000000000012141503426445100247360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2b402e7a38e2300dc028c982bb7f226dc6fee4af9962071f1245a848a1bac9b4 file: - statement: show_statement: - keyword: SHOW - keyword: DATABASES - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: SCHEMAS - keyword: LIKE - quoted_literal: "'regex'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/show_partitions.sql000066400000000000000000000000331503426445100251770ustar00rootroot00000000000000SHOW PARTITIONS tablename; sqlfluff-3.4.2/test/fixtures/dialects/athena/show_partitions.yml000066400000000000000000000010561503426445100252070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 724b6a64dbad1cd0cdb0dbdfcf2668db8b880d6da57a2e8723b9d4f8372d2ed6 file: statement: show_statement: - keyword: SHOW - keyword: PARTITIONS - table_reference: naked_identifier: tablename statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/show_tables.sql000066400000000000000000000001471503426445100242630ustar00rootroot00000000000000SHOW TABLES; SHOW TABLES IN sampledb; SHOW TABLES '*myregex*'; SHOW TABLES IN sampledb '*myregex*'; sqlfluff-3.4.2/test/fixtures/dialects/athena/show_tables.yml000066400000000000000000000020041503426445100242570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 052f063d136c2d361b1fe9017d4941c28cf64c2a8ed26aa9d148cf6bf7efb576 file: - statement: show_statement: - keyword: SHOW - keyword: TABLES - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TABLES - keyword: IN - database_reference: naked_identifier: sampledb - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TABLES - quoted_literal: "'*myregex*'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TABLES - keyword: IN - database_reference: naked_identifier: sampledb - quoted_literal: "'*myregex*'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/show_tblproperties.sql000066400000000000000000000001101503426445100256750ustar00rootroot00000000000000SHOW TBLPROPERTIES tablename; SHOW TBLPROPERTIES tablename('tblname'); sqlfluff-3.4.2/test/fixtures/dialects/athena/show_tblproperties.yml000066400000000000000000000014751503426445100257160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4d4b5610757cd71d4dd154f53a8b3bfe011d810ec03c3d79d079458af43f3ce9 file: - statement: show_statement: - keyword: SHOW - keyword: TBLPROPERTIES - table_reference: naked_identifier: tablename - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TBLPROPERTIES - table_reference: naked_identifier: tablename - bracketed: start_bracket: ( quoted_literal: "'tblname'" end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/show_views.sql000066400000000000000000000001471503426445100241460ustar00rootroot00000000000000SHOW VIEWS; SHOW VIEWS IN sampledb; SHOW VIEWS LIKE 'regex*'; SHOW VIEWS IN sampledb LIKE 'regex*'; sqlfluff-3.4.2/test/fixtures/dialects/athena/show_views.yml000066400000000000000000000020421503426445100241440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 949666fd152de72d5efd5cf723472e4a046a11283a8e4694c8a2750efb768f19 file: - statement: show_statement: - keyword: SHOW - keyword: VIEWS - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: VIEWS - keyword: IN - database_reference: naked_identifier: sampledb - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: VIEWS - keyword: LIKE - quoted_literal: "'regex*'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: VIEWS - keyword: IN - database_reference: naked_identifier: sampledb - keyword: LIKE - quoted_literal: "'regex*'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/unload_select.sql000066400000000000000000000002711503426445100245700ustar00rootroot00000000000000UNLOAD (SELECT field_1, field_2 FROM my_table) TO 's3://my_athena_data_location/my_folder/' WITH (format='CSV', compression='gzip', field_delimiter=',', partitioned_by=ARRAY[field_2]); sqlfluff-3.4.2/test/fixtures/dialects/athena/unload_select.yml000066400000000000000000000040051503426445100245710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0b7b6bddcb121899fc43e2267dc5effd92040e2e6121585928a4fd8b5c3afced file: statement: unload_statement: - keyword: UNLOAD - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: field_1 - comma: ',' - select_clause_element: column_reference: naked_identifier: field_2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table end_bracket: ) - keyword: TO - quoted_literal: "'s3://my_athena_data_location/my_folder/'" - keyword: WITH - bracketed: - start_bracket: ( - keyword: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'CSV'" - comma: ',' - keyword: compression - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'gzip'" - comma: ',' - keyword: field_delimiter - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "','" - comma: ',' - keyword: partitioned_by - comparison_operator: raw_comparison_operator: '=' - typed_array_literal: array_type: keyword: ARRAY array_literal: start_square_bracket: '[' column_reference: naked_identifier: field_2 end_square_bracket: ']' - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/athena/values.sql000066400000000000000000000004001503426445100232400ustar00rootroot00000000000000VALUES 1, 2, 3; VALUES (1, 'a'), (2, 'b'), (3, 'c'); SELECT * FROM ( VALUES (1, 'a'), (2, 'b'), (3, 'c') ) AS t (id, name); CREATE TABLE customers AS SELECT * FROM ( VALUES (1, 'a'), (2, 'b'), (3, 'c') ) AS t (id, name); sqlfluff-3.4.2/test/fixtures/dialects/athena/values.yml000066400000000000000000000114541503426445100232550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9c483330ecfe3ae7e9b90078ebfff8905ab4b4d7a325dcc953ee91fbdb4b3329 file: - statement: values_clause: - keyword: VALUES - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - statement_terminator: ; - statement: values_clause: - keyword: VALUES - expression: bracketed: start_bracket: ( numeric_literal: '1' comma: ',' quoted_literal: "'a'" end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( numeric_literal: '2' comma: ',' quoted_literal: "'b'" end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( numeric_literal: '3' comma: ',' quoted_literal: "'c'" end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: VALUES - expression: bracketed: start_bracket: ( numeric_literal: '1' comma: ',' quoted_literal: "'a'" end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( numeric_literal: '2' comma: ',' quoted_literal: "'b'" end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( numeric_literal: '3' comma: ',' quoted_literal: "'c'" end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: id - comma: ',' - naked_identifier: name end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: customers - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: VALUES - expression: bracketed: start_bracket: ( numeric_literal: '1' comma: ',' quoted_literal: "'a'" end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( numeric_literal: '2' comma: ',' quoted_literal: "'b'" end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( numeric_literal: '3' comma: ',' quoted_literal: "'c'" end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: id - comma: ',' - naked_identifier: name end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/000077500000000000000000000000001503426445100216155ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/dialects/bigquery/.sqlfluff000066400000000000000000000000361503426445100234370ustar00rootroot00000000000000[sqlfluff] dialect = bigquery sqlfluff-3.4.2/test/fixtures/dialects/bigquery/alter_materialized_view_set_options.sql000066400000000000000000000003201503426445100316520ustar00rootroot00000000000000ALTER MATERIALIZED VIEW mydataset.my_mv SET OPTIONS ( enable_refresh=false ); ALTER MATERIALIZED VIEW mydataset.my_mv SET OPTIONS ( friendly_name="my_mv", labels=[("org_unit", "development")] ); sqlfluff-3.4.2/test/fixtures/dialects/bigquery/alter_materialized_view_set_options.yml000066400000000000000000000036231503426445100316650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f920358a40ae793dac230cb79235d4b77daac481e600270d82c487e513493ed0 file: - statement: alter_materialized_view_set_options_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: my_mv - keyword: SET - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: enable_refresh comparison_operator: raw_comparison_operator: '=' boolean_literal: 'false' end_bracket: ) - statement_terminator: ; - statement: alter_materialized_view_set_options_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: my_mv - keyword: SET - options_segment: keyword: OPTIONS bracketed: - start_bracket: ( - parameter: friendly_name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"my_mv"' - comma: ',' - parameter: labels - comparison_operator: raw_comparison_operator: '=' - array_literal: start_square_bracket: '[' expression: bracketed: - start_bracket: ( - quoted_literal: '"org_unit"' - comma: ',' - quoted_literal: '"development"' - end_bracket: ) end_square_bracket: ']' - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/alter_schema.sql000066400000000000000000000003561503426445100247710ustar00rootroot00000000000000ALTER SCHEMA example_dataset SET DEFAULT COLLATE "und:ci"; ALTER SCHEMA example_dataset SET OPTIONS(description=""); ALTER SCHEMA example_dataset ADD REPLICA `EU` OPTIONS(location=`eu`); ALTER SCHEMA example_dataset DROP REPLICA `EU`; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/alter_schema.yml000066400000000000000000000036771503426445100250040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2c88c9247c171e647f853cd704b69e76dd9aa1a0dcb86ad26f6280a9e89c8a0b file: - statement: alter_schema_statement: - keyword: ALTER - keyword: SCHEMA - table_reference: naked_identifier: example_dataset - keyword: SET - default_collate: - keyword: DEFAULT - keyword: COLLATE - quoted_literal: '"und:ci"' - statement_terminator: ; - statement: alter_schema_statement: - keyword: ALTER - keyword: SCHEMA - table_reference: naked_identifier: example_dataset - keyword: SET - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: '""' end_bracket: ) - statement_terminator: ; - statement: alter_schema_statement: - keyword: ALTER - keyword: SCHEMA - table_reference: naked_identifier: example_dataset - keyword: ADD - keyword: REPLICA - column_reference: quoted_identifier: '`EU`' - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: location comparison_operator: raw_comparison_operator: '=' column_reference: quoted_identifier: '`eu`' end_bracket: ) - statement_terminator: ; - statement: alter_schema_statement: - keyword: ALTER - keyword: SCHEMA - table_reference: naked_identifier: example_dataset - keyword: DROP - keyword: REPLICA - column_reference: quoted_identifier: '`EU`' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/alter_table_add_column.sql000066400000000000000000000005641503426445100270060ustar00rootroot00000000000000ALTER TABLE mydataset.mytable ADD COLUMN A STRING, ADD COLUMN IF NOT EXISTS B GEOGRAPHY, ADD COLUMN C ARRAY, ADD COLUMN D DATE OPTIONS(description="my description"); ALTER TABLE mydataset.mytable ADD COLUMN A STRUCT< B GEOGRAPHY, C ARRAY, D INT64 NOT NULL, E TIMESTAMP OPTIONS(description="creation time") >; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/alter_table_add_column.yml000066400000000000000000000064061503426445100270110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 11ca5f74cdd2c59f465e6e8a9e7a57d7717e10129695dcec229045391f266f4a file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: mytable - keyword: ADD - keyword: COLUMN - column_definition: naked_identifier: A data_type: data_type_identifier: STRING - comma: ',' - keyword: ADD - keyword: COLUMN - keyword: IF - keyword: NOT - keyword: EXISTS - column_definition: naked_identifier: B data_type: data_type_identifier: GEOGRAPHY - comma: ',' - keyword: ADD - keyword: COLUMN - column_definition: naked_identifier: C data_type: array_type: keyword: ARRAY start_angle_bracket: < data_type: data_type_identifier: NUMERIC end_angle_bracket: '>' - comma: ',' - keyword: ADD - keyword: COLUMN - column_definition: naked_identifier: D data_type: data_type_identifier: DATE options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: '"my description"' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: mytable - keyword: ADD - keyword: COLUMN - column_definition: naked_identifier: A data_type: struct_type: keyword: STRUCT struct_type_schema: - start_angle_bracket: < - parameter: B - data_type: data_type_identifier: GEOGRAPHY - comma: ',' - parameter: C - data_type: array_type: keyword: ARRAY start_angle_bracket: < data_type: data_type_identifier: INT64 end_angle_bracket: '>' - comma: ',' - parameter: D - data_type: data_type_identifier: INT64 - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - parameter: E - data_type: data_type_identifier: TIMESTAMP - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: '"creation time"' end_bracket: ) - end_angle_bracket: '>' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/alter_table_add_key.sql000066400000000000000000000007011503426445100262720ustar00rootroot00000000000000ALTER TABLE example_dataset.example_table2 ADD CONSTRAINT my_fk_name FOREIGN KEY (x) REFERENCES example_dataset.example_table(x) NOT ENFORCED; ALTER TABLE `example_dataset.example_table` ADD PRIMARY KEY (`x`) NOT ENFORCED; ALTER TABLE fk_table ADD PRIMARY KEY (x,y) NOT ENFORCED, ADD CONSTRAINT fk FOREIGN KEY (u, v) REFERENCES pk_table(x, y) NOT ENFORCED, ADD CONSTRAINT `fk2` FOREIGN KEY (`i`, `j`) REFERENCES `pk_table`(`x`, `y`) NOT ENFORCED; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/alter_table_add_key.yml000066400000000000000000000057271503426445100263110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7e3f74ec53c1b37238306efed683b139d7a014b2d315032fb6aab8da8952f161 file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: example_dataset - dot: . - naked_identifier: example_table2 - keyword: ADD - keyword: CONSTRAINT - naked_identifier: my_fk_name - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( naked_identifier: x end_bracket: ) - keyword: REFERENCES - table_reference: - naked_identifier: example_dataset - dot: . - naked_identifier: example_table - bracketed: start_bracket: ( naked_identifier: x end_bracket: ) - keyword: NOT - keyword: ENFORCED - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`example_dataset.example_table`' - keyword: ADD - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( quoted_identifier: '`x`' end_bracket: ) - keyword: NOT - keyword: ENFORCED - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: fk_table - keyword: ADD - keyword: PRIMARY - keyword: KEY - bracketed: - start_bracket: ( - naked_identifier: x - comma: ',' - naked_identifier: y - end_bracket: ) - keyword: NOT - keyword: ENFORCED - comma: ',' - keyword: ADD - keyword: CONSTRAINT - naked_identifier: fk - keyword: FOREIGN - keyword: KEY - bracketed: - start_bracket: ( - naked_identifier: u - comma: ',' - naked_identifier: v - end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: pk_table - bracketed: - start_bracket: ( - naked_identifier: x - comma: ',' - naked_identifier: y - end_bracket: ) - keyword: NOT - keyword: ENFORCED - comma: ',' - keyword: ADD - keyword: CONSTRAINT - quoted_identifier: '`fk2`' - keyword: FOREIGN - keyword: KEY - bracketed: - start_bracket: ( - quoted_identifier: '`i`' - comma: ',' - quoted_identifier: '`j`' - end_bracket: ) - keyword: REFERENCES - table_reference: quoted_identifier: '`pk_table`' - bracketed: - start_bracket: ( - quoted_identifier: '`x`' - comma: ',' - quoted_identifier: '`y`' - end_bracket: ) - keyword: NOT - keyword: ENFORCED - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/alter_table_alter_column.sql000066400000000000000000000005261503426445100273630ustar00rootroot00000000000000ALTER TABLE mydataset.mytable ALTER COLUMN IF EXISTS A SET OPTIONS ( description='some description here' ), ALTER COLUMN IF EXISTS B DROP NOT NULL, ALTER COLUMN IF EXISTS C DROP DEFAULT, ALTER COLUMN IF EXISTS D SET DATA TYPE FLOAT64, ALTER COLUMN IF EXISTS E SET DEFAULT 0, ALTER COLUMN IF EXISTS F SET DEFAULT CURRENT_TIMESTAMP() ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/alter_table_alter_column.yml000066400000000000000000000041721503426445100273660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6734bce50d79e2600d1ac66ef061549ec1e1770d93b6f0d8a8260127d517469f file: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: mytable - keyword: ALTER - keyword: COLUMN - keyword: IF - keyword: EXISTS - naked_identifier: A - keyword: SET - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: "'some description here'" end_bracket: ) - comma: ',' - keyword: ALTER - keyword: COLUMN - keyword: IF - keyword: EXISTS - naked_identifier: B - keyword: DROP - keyword: NOT - keyword: 'NULL' - comma: ',' - keyword: ALTER - keyword: COLUMN - keyword: IF - keyword: EXISTS - naked_identifier: C - keyword: DROP - keyword: DEFAULT - comma: ',' - keyword: ALTER - keyword: COLUMN - keyword: IF - keyword: EXISTS - naked_identifier: D - keyword: SET - keyword: DATA - keyword: TYPE - data_type: data_type_identifier: FLOAT64 - comma: ',' - keyword: ALTER - keyword: COLUMN - keyword: IF - keyword: EXISTS - naked_identifier: E - keyword: SET - keyword: DEFAULT - numeric_literal: '0' - comma: ',' - keyword: ALTER - keyword: COLUMN - keyword: IF - keyword: EXISTS - naked_identifier: F - keyword: SET - keyword: DEFAULT - function: function_name: function_name_identifier: CURRENT_TIMESTAMP function_contents: bracketed: start_bracket: ( end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/alter_table_drop_column.sql000066400000000000000000000001121503426445100272070ustar00rootroot00000000000000ALTER TABLE mydataset.mytable DROP COLUMN A, DROP COLUMN IF EXISTS B; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/alter_table_drop_column.yml000066400000000000000000000014431503426445100272210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: aeacd5c9b123877e49a7d44a88ed9f7b000f317f00b87f018609c220839906ab file: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: mytable - keyword: DROP - keyword: COLUMN - naked_identifier: A - comma: ',' - keyword: DROP - keyword: COLUMN - keyword: IF - keyword: EXISTS - naked_identifier: B statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/alter_table_drop_constraint.sql000066400000000000000000000002271503426445100301050ustar00rootroot00000000000000ALTER TABLE example_dataset.example_table DROP CONSTRAINT x; ALTER TABLE `example-project.example_dataset.example_table` DROP CONSTRAINT IF EXISTS x; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/alter_table_drop_constraint.yml000066400000000000000000000017661503426445100301200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 445077d18e2a7bae420bfe4d94b8023f1f5046d052c5eba77761550fe7e63886 file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: example_dataset - dot: . - naked_identifier: example_table - keyword: DROP - keyword: CONSTRAINT - naked_identifier: x - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`example-project.example_dataset.example_table`' - keyword: DROP - keyword: CONSTRAINT - keyword: IF - keyword: EXISTS - naked_identifier: x - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/alter_table_drop_key.sql000066400000000000000000000002251503426445100265070ustar00rootroot00000000000000ALTER TABLE example_dataset.example_table DROP PRIMARY KEY; ALTER TABLE `example-project.example_dataset.example_table` DROP PRIMARY KEY IF EXISTS; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/alter_table_drop_key.yml000066400000000000000000000017421503426445100265160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9997aaefc952eb9b0bf6a7b4fb603e8249cd40f4563c324036c459cc56f6407f file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: example_dataset - dot: . - naked_identifier: example_table - keyword: DROP - keyword: PRIMARY - keyword: KEY - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`example-project.example_dataset.example_table`' - keyword: DROP - keyword: PRIMARY - keyword: KEY - keyword: IF - keyword: EXISTS - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/alter_table_rename_column.sql000066400000000000000000000003511503426445100275170ustar00rootroot00000000000000ALTER TABLE mydataset.mytable RENAME COLUMN A TO columnA, RENAME COLUMN IF EXISTS B TO columnB; ALTER TABLE mydataset.mytable RENAME COLUMN columnA TO temp, RENAME COLUMN columnB TO columnA, RENAME COLUMN temp TO columnB; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/alter_table_rename_column.yml000066400000000000000000000027711503426445100275310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4aa26e86554f6b3078f8555a0121be2fd24f333e52d54d9f7ef94346ca7d0e1e file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: mytable - keyword: RENAME - keyword: COLUMN - naked_identifier: A - keyword: TO - naked_identifier: columnA - comma: ',' - keyword: RENAME - keyword: COLUMN - keyword: IF - keyword: EXISTS - naked_identifier: B - keyword: TO - naked_identifier: columnB - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: mytable - keyword: RENAME - keyword: COLUMN - naked_identifier: columnA - keyword: TO - naked_identifier: temp - comma: ',' - keyword: RENAME - keyword: COLUMN - naked_identifier: columnB - keyword: TO - naked_identifier: columnA - comma: ',' - keyword: RENAME - keyword: COLUMN - naked_identifier: temp - keyword: TO - naked_identifier: columnB - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/alter_table_rename_to.sql000066400000000000000000000000641503426445100266450ustar00rootroot00000000000000ALTER TABLE mydataset.mytable RENAME TO mynewtable; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/alter_table_rename_to.yml000066400000000000000000000013061503426445100266470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6ead70684bf792fdc289833285129002f41a92bb5f4c930fb4279bb04aed655c file: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: mytable - keyword: RENAME - keyword: TO - table_reference: naked_identifier: mynewtable statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/alter_table_set_default_collate.sql000066400000000000000000000001101503426445100306660ustar00rootroot00000000000000ALTER TABLE example_dataset.example_table SET DEFAULT COLLATE "und:ci"; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/alter_table_set_default_collate.yml000066400000000000000000000013551503426445100307040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2ee9007f6e23966d7c7ec32afc2cdc3ffaa1e220b4485d1908ad5828c656ce98 file: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: example_dataset - dot: . - naked_identifier: example_table - keyword: SET - default_collate: - keyword: DEFAULT - keyword: COLLATE - quoted_literal: '"und:ci"' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/alter_table_set_options.sql000066400000000000000000000003601503426445100272410ustar00rootroot00000000000000ALTER TABLE mydataset.mytable SET OPTIONS ( expiration_timestamp=TIMESTAMP_ADD(CURRENT_TIMESTAMP(), INTERVAL 7 DAY), description="Table that expires seven days from now" ); ALTER TABLE table SET OPTIONS (expiration_timestamp = NULL) ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/alter_table_set_options.yml000066400000000000000000000043431503426445100272500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ea6d8cd6ca9c4c0f0a0982254ba1bf4569586ee494b7f2d31e945c0f473c37fe file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: mytable - keyword: SET - options_segment: keyword: OPTIONS bracketed: - start_bracket: ( - parameter: expiration_timestamp - comparison_operator: raw_comparison_operator: '=' - function: function_name: function_name_identifier: TIMESTAMP_ADD function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: CURRENT_TIMESTAMP function_contents: bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: numeric_literal: '7' date_part: DAY - end_bracket: ) - comma: ',' - parameter: description - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"Table that expires seven days from now"' - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table - keyword: SET - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: expiration_timestamp comparison_operator: raw_comparison_operator: '=' null_literal: 'NULL' end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/alter_various_resources.sql000066400000000000000000000007011503426445100273050ustar00rootroot00000000000000ALTER ORGANIZATION SET OPTIONS (`region-us.default_time_zone`="Asia/Tokyo"); ALTER PROJECT `example-project` SET OPTIONS (`region-us.default_time_zone`="Asia/Tokyo"); ALTER BI_CAPACITY `example-project.region-us.default` SET OPTIONS(size_gb = 250); ALTER CAPACITY `example-project.region-us.example_commitment` SET OPTIONS (plan = "THREE_YEAR"); ALTER RESERVATION `example-project.region-us.example_reservation` SET OPTIONS (slot_capacity=123); sqlfluff-3.4.2/test/fixtures/dialects/bigquery/alter_various_resources.yml000066400000000000000000000053261503426445100273170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7fff21ba6a9cd5d9e5a1d6f20bd3affd6ffc4f49eb548fbbcfafa9946a9c4481 file: - statement: alter_organization_statement: - keyword: ALTER - keyword: ORGANIZATION - keyword: SET - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: '`region-us.default_time_zone`' comparison_operator: raw_comparison_operator: '=' quoted_literal: '"Asia/Tokyo"' end_bracket: ) - statement_terminator: ; - statement: alter_project_statement: - keyword: ALTER - keyword: PROJECT - table_reference: quoted_identifier: '`example-project`' - keyword: SET - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: '`region-us.default_time_zone`' comparison_operator: raw_comparison_operator: '=' quoted_literal: '"Asia/Tokyo"' end_bracket: ) - statement_terminator: ; - statement: alter_bi_capacity_statement: - keyword: ALTER - keyword: BI_CAPACITY - table_reference: quoted_identifier: '`example-project.region-us.default`' - keyword: SET - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: size_gb comparison_operator: raw_comparison_operator: '=' numeric_literal: '250' end_bracket: ) - statement_terminator: ; - statement: alter_capacity_statement: - keyword: ALTER - keyword: CAPACITY - table_reference: quoted_identifier: '`example-project.region-us.example_commitment`' - keyword: SET - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: plan comparison_operator: raw_comparison_operator: '=' quoted_literal: '"THREE_YEAR"' end_bracket: ) - statement_terminator: ; - statement: alter_reservation_statement: - keyword: ALTER - keyword: RESERVATION - table_reference: quoted_identifier: '`example-project.region-us.example_reservation`' - keyword: SET - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: slot_capacity comparison_operator: raw_comparison_operator: '=' numeric_literal: '123' end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/alter_view_alter_column.sql000066400000000000000000000003351503426445100272440ustar00rootroot00000000000000ALTER VIEW example_dataset.example_view ALTER COLUMN x SET OPTIONS(description="example"); ALTER VIEW IF EXISTS `example-project.example_dataset.example_view` ALTER COLUMN IF EXISTS x SET OPTIONS(description="example"); sqlfluff-3.4.2/test/fixtures/dialects/bigquery/alter_view_alter_column.yml000066400000000000000000000031041503426445100272430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c167b04f3ded45c772a70e6508643a28f1105cda17d6be72967b9e07d8ce1c97 file: - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: - naked_identifier: example_dataset - dot: . - naked_identifier: example_view - keyword: ALTER - keyword: COLUMN - naked_identifier: x - keyword: SET - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: '"example"' end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: quoted_identifier: '`example-project.example_dataset.example_view`' - keyword: ALTER - keyword: COLUMN - keyword: IF - keyword: EXISTS - naked_identifier: x - keyword: SET - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: '"example"' end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/alter_view_set_options.sql000066400000000000000000000002561503426445100271300ustar00rootroot00000000000000ALTER VIEW mydataset.myview SET OPTIONS ( expiration_timestamp=TIMESTAMP_ADD(CURRENT_TIMESTAMP(), INTERVAL 7 DAY), description="View that expires seven days from now" ); sqlfluff-3.4.2/test/fixtures/dialects/bigquery/alter_view_set_options.yml000066400000000000000000000034411503426445100271310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: caad76b6f6b44d0581aa60caa9e087fd345504e0f8cec4bfdaf5d0ca01295cc1 file: statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: myview - keyword: SET - options_segment: keyword: OPTIONS bracketed: - start_bracket: ( - parameter: expiration_timestamp - comparison_operator: raw_comparison_operator: '=' - function: function_name: function_name_identifier: TIMESTAMP_ADD function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: CURRENT_TIMESTAMP function_contents: bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: numeric_literal: '7' date_part: DAY - end_bracket: ) - comma: ',' - parameter: description - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"View that expires seven days from now"' - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/any_value_having.sql000066400000000000000000000002041503426445100256510ustar00rootroot00000000000000SELECT ANY_VALUE(foo HAVING MIN bar) AS any_value_having_min, ANY_VALUE(foo HAVING MAX bar) AS any_value_having_max, FROM t sqlfluff-3.4.2/test/fixtures/dialects/bigquery/any_value_having.yml000066400000000000000000000037421503426445100256650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a7daeaf6fa9363f22916bf2558609be2b6c3a4395039e5f0d927ae6ae0be04e8 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: ANY_VALUE function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: foo - keyword: HAVING - keyword: MIN - expression: column_reference: naked_identifier: bar - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: any_value_having_min - comma: ',' - select_clause_element: function: function_name: function_name_identifier: ANY_VALUE function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: foo - keyword: HAVING - keyword: MAX - expression: column_reference: naked_identifier: bar - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: any_value_having_max - comma: ',' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t sqlfluff-3.4.2/test/fixtures/dialects/bigquery/array_agg_distinct_ignore_nulls.sql000066400000000000000000000000761503426445100307560ustar00rootroot00000000000000SELECT ARRAY_AGG(DISTINCT x IGNORE NULLS) AS array_agg FROM t sqlfluff-3.4.2/test/fixtures/dialects/bigquery/array_agg_distinct_ignore_nulls.yml000066400000000000000000000023331503426445100307560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 582ee3e771c213837221b8d45a4351f0c8fd8c2dab776be5fb5ad855a8931181 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: ARRAY_AGG function_contents: bracketed: - start_bracket: ( - keyword: DISTINCT - expression: column_reference: naked_identifier: x - keyword: IGNORE - keyword: NULLS - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: array_agg from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t sqlfluff-3.4.2/test/fixtures/dialects/bigquery/array_agg_ignore_nulls.sql000066400000000000000000000000651503426445100270530ustar00rootroot00000000000000SELECT ARRAY_AGG(x IGNORE NULLS) AS array_agg FROM t sqlfluff-3.4.2/test/fixtures/dialects/bigquery/array_agg_ignore_nulls.yml000066400000000000000000000022711503426445100270560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d84ea4b7940651a8ba4b048d2feedda401b2c99aa093cb3290f5c2bc7ff76a4e file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: ARRAY_AGG function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: x - keyword: IGNORE - keyword: NULLS - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: array_agg from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t sqlfluff-3.4.2/test/fixtures/dialects/bigquery/array_agg_respect_nulls.sql000066400000000000000000000000661503426445100272360ustar00rootroot00000000000000SELECT ARRAY_AGG(x RESPECT NULLS) AS array_agg FROM t sqlfluff-3.4.2/test/fixtures/dialects/bigquery/array_agg_respect_nulls.yml000066400000000000000000000022721503426445100272410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5a539e0e6745c23a5e1d817db3f9d11109e6da03225f346989b19132719bc63e file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: ARRAY_AGG function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: x - keyword: RESPECT - keyword: NULLS - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: array_agg from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t sqlfluff-3.4.2/test/fixtures/dialects/bigquery/assert.sql000066400000000000000000000004041503426445100236350ustar00rootroot00000000000000ASSERT ( (SELECT COUNT(*) FROM UNNEST([1, 2, 3, 4, 5, 6])) > 5 ) AS 'Table must contain more than 5 rows.'; ASSERT EXISTS( SELECT X FROM UNNEST([7877, 7879, 7883, 7901, 7907]) AS X WHERE X = 7919 ) AS 'Column X must contain the value 7919'; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/assert.yml000066400000000000000000000110411503426445100236360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ff77247983387146d9b814e25bef3c1adb9fa4da89f2090acc2daa4f1e1c2764 file: - statement: assert_statement: - keyword: ASSERT - expression: bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: COUNT function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: UNNEST function_contents: bracketed: start_bracket: ( expression: array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - comma: ',' - numeric_literal: '4' - comma: ',' - numeric_literal: '5' - comma: ',' - numeric_literal: '6' - end_square_bracket: ']' end_bracket: ) end_bracket: ) comparison_operator: raw_comparison_operator: '>' numeric_literal: '5' end_bracket: ) - keyword: AS - quoted_literal: "'Table must contain more than 5 rows.'" - statement_terminator: ; - statement: assert_statement: - keyword: ASSERT - expression: keyword: EXISTS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: X from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: UNNEST function_contents: bracketed: start_bracket: ( expression: array_literal: - start_square_bracket: '[' - numeric_literal: '7877' - comma: ',' - numeric_literal: '7879' - comma: ',' - numeric_literal: '7883' - comma: ',' - numeric_literal: '7901' - comma: ',' - numeric_literal: '7907' - end_square_bracket: ']' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: X where_clause: keyword: WHERE expression: column_reference: naked_identifier: X comparison_operator: raw_comparison_operator: '=' numeric_literal: '7919' end_bracket: ) - keyword: AS - quoted_literal: "'Column X must contain the value 7919'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/begin.sql000066400000000000000000000014611503426445100234240ustar00rootroot00000000000000BEGIN BEGIN; INSERT INTO `my_project.my_dataset.my_table` SELECT col1, col2, CASE WHEN col1 > col2 THEN False ELSE True END AS col3, FROM `my_project.my_dataset.my_other_table`; COMMIT TRANSACTION; END; BEGIN BEGIN TRANSACTION; INSERT INTO `my_project.my_dataset.my_table` SELECT col1, col2, CASE WHEN col1 > col2 THEN False ELSE True END AS col3, FROM `my_project.my_dataset.my_other_table`; COMMIT TRANSACTION; END; BEGIN BEGIN; INSERT INTO `my_project.my_dataset.my_table` SELECT col1, col2, CASE WHEN col1 > col2 THEN 1 ELSE 2 END AS col3, CASE WHEN x > 5 THEN 5 END; -- case with a statement delimiter after COMMIT; END; mylabel: BEGIN BEGIN; INSERT INTO `my_project.my_dataset.my_table` SELECT 1; COMMIT; END mylabel; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/begin.yml000066400000000000000000000176271503426445100234410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f1ee3d3bd1412cf9a14caa66370f39f9b3fad182de2bd963ed65a7576a2c2552 file: - multi_statement_segment: begin_statement: - keyword: BEGIN - statement: transaction_statement: keyword: BEGIN - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: quoted_identifier: '`my_project.my_dataset.my_table`' - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 - comma: ',' - select_clause_element: expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: - column_reference: naked_identifier: col1 - comparison_operator: raw_comparison_operator: '>' - column_reference: naked_identifier: col2 - keyword: THEN - expression: boolean_literal: 'False' - else_clause: keyword: ELSE expression: boolean_literal: 'True' - keyword: END alias_expression: alias_operator: keyword: AS naked_identifier: col3 - comma: ',' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`my_project.my_dataset.my_other_table`' - statement_terminator: ; - statement: transaction_statement: - keyword: COMMIT - keyword: TRANSACTION - statement_terminator: ; - keyword: END - statement_terminator: ; - multi_statement_segment: begin_statement: - keyword: BEGIN - statement: transaction_statement: - keyword: BEGIN - keyword: TRANSACTION - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: quoted_identifier: '`my_project.my_dataset.my_table`' - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 - comma: ',' - select_clause_element: expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: - column_reference: naked_identifier: col1 - comparison_operator: raw_comparison_operator: '>' - column_reference: naked_identifier: col2 - keyword: THEN - expression: boolean_literal: 'False' - else_clause: keyword: ELSE expression: boolean_literal: 'True' - keyword: END alias_expression: alias_operator: keyword: AS naked_identifier: col3 - comma: ',' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`my_project.my_dataset.my_other_table`' - statement_terminator: ; - statement: transaction_statement: - keyword: COMMIT - keyword: TRANSACTION - statement_terminator: ; - keyword: END - statement_terminator: ; - multi_statement_segment: begin_statement: - keyword: BEGIN - statement: transaction_statement: keyword: BEGIN - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: quoted_identifier: '`my_project.my_dataset.my_table`' - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 - comma: ',' - select_clause_element: expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: - column_reference: naked_identifier: col1 - comparison_operator: raw_comparison_operator: '>' - column_reference: naked_identifier: col2 - keyword: THEN - expression: numeric_literal: '1' - else_clause: keyword: ELSE expression: numeric_literal: '2' - keyword: END alias_expression: alias_operator: keyword: AS naked_identifier: col3 - comma: ',' - select_clause_element: expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: '>' numeric_literal: '5' - keyword: THEN - expression: numeric_literal: '5' - keyword: END - statement_terminator: ; - statement: transaction_statement: keyword: COMMIT - statement_terminator: ; - keyword: END - statement_terminator: ; - multi_statement_segment: begin_statement: - naked_identifier: mylabel - colon: ':' - keyword: BEGIN - statement: transaction_statement: keyword: BEGIN - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: quoted_identifier: '`my_project.my_dataset.my_table`' - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: COMMIT - statement_terminator: ; - keyword: END - naked_identifier: mylabel - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/call_procedure.sql000066400000000000000000000003061503426445100253200ustar00rootroot00000000000000CALL mydataset.create_customer(); DECLARE retCode INT64; -- Procedure signature: (IN account_id STRING, OUT retCode INT64) CALL mySchema.UpdateSomeTables('someAccountId', retCode); SELECT retCode; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/call_procedure.yml000066400000000000000000000026761503426445100253360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e28f62302ed1b3da15e497481789ab28d097fd5ac197d5c31b84fcd2b553691e file: - statement: call_statement: keyword: CALL procedure_name: naked_identifier: mydataset dot: . procedure_name_identifier: create_customer bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: declare_segment: keyword: DECLARE naked_identifier: retCode data_type: data_type_identifier: INT64 - statement_terminator: ; - statement: call_statement: keyword: CALL procedure_name: naked_identifier: mySchema dot: . procedure_name_identifier: UpdateSomeTables bracketed: - start_bracket: ( - expression: quoted_literal: "'someAccountId'" - comma: ',' - expression: column_reference: naked_identifier: retCode - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: retCode - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_assignment.sql000066400000000000000000000002251503426445100260300ustar00rootroot00000000000000CREATE ASSIGNMENT `example_project.region-us.example-reservation.example-assignment` OPTIONS (assignee = 'projects/my_project', job_type = 'QUERY'); sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_assignment.yml000066400000000000000000000020471503426445100260360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6094022f9b62cd472544b14bafbce1a2317d4b55e9e4735800b2b9f043812bbf file: statement: create_assignment_statement: - keyword: CREATE - keyword: ASSIGNMENT - table_reference: quoted_identifier: '`example_project.region-us.example-reservation.example-assignment`' - options_segment: keyword: OPTIONS bracketed: - start_bracket: ( - parameter: assignee - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'projects/my_project'" - comma: ',' - parameter: job_type - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'QUERY'" - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_capacity.sql000066400000000000000000000001311503426445100254510ustar00rootroot00000000000000CREATE CAPACITY `example_project.region-us.example-commitment` OPTIONS (slot_count=123); sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_capacity.yml000066400000000000000000000015411503426445100254610ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c947e2477b9f911076c1bd1a99c9d5ef815413681c1d964cb9275a6c9ced8872 file: statement: create_capacity_statement: - keyword: CREATE - keyword: CAPACITY - table_reference: quoted_identifier: '`example_project.region-us.example-commitment`' - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: slot_count comparison_operator: raw_comparison_operator: '=' numeric_literal: '123' end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_external_table.sql000066400000000000000000000021031503426445100266460ustar00rootroot00000000000000CREATE EXTERNAL TABLE dataset.CsvTable OPTIONS ( format = 'CSV', uris = ['gs://bucket/path1.csv', 'gs://bucket/path2.csv'] ); CREATE OR REPLACE EXTERNAL TABLE dataset.CsvTable ( x INT64, y STRING ) OPTIONS ( format = 'CSV', uris = ['gs://bucket/path1.csv'], field_delimiter = '|', max_bad_records = 5 ); CREATE EXTERNAL TABLE dataset.AutoHivePartitionedTable WITH PARTITION COLUMNS OPTIONS ( uris=['gs://bucket/path/*'], format=csv, hive_partition_uri_prefix='gs://bucket/path' ); CREATE EXTERNAL TABLE dataset.CustomHivePartitionedTable WITH PARTITION COLUMNS ( field_1 STRING, -- column order must match the external path field_2 INT64 ) OPTIONS ( uris=['gs://bucket/path/*'], format=csv, hive_partition_uri_prefix='gs://bucket/path' ); -- Test arbitary ordering of optional arguments CREATE EXTERNAL TABLE dataset.CustomHivePartitionedTable OPTIONS ( uris=['gs://bucket/path/*'], format=csv, hive_partition_uri_prefix='gs://bucket/path' ) WITH PARTITION COLUMNS ( field_1 STRING, -- column order must match the external path field_2 INT64 ); sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_external_table.yml000066400000000000000000000146751503426445100266710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d3e6a88d0bf00afa9605bbe26e2dc61b43e6cb3dc9d09bd20e42fcbc19465f64 file: - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: CsvTable - options_segment: keyword: OPTIONS bracketed: - start_bracket: ( - parameter: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'CSV'" - comma: ',' - parameter: uris - comparison_operator: raw_comparison_operator: '=' - array_literal: - start_square_bracket: '[' - quoted_literal: "'gs://bucket/path1.csv'" - comma: ',' - quoted_literal: "'gs://bucket/path2.csv'" - end_square_bracket: ']' - end_bracket: ) - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: CsvTable - bracketed: - start_bracket: ( - column_definition: naked_identifier: x data_type: data_type_identifier: INT64 - comma: ',' - column_definition: naked_identifier: y data_type: data_type_identifier: STRING - end_bracket: ) - options_segment: keyword: OPTIONS bracketed: - start_bracket: ( - parameter: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'CSV'" - comma: ',' - parameter: uris - comparison_operator: raw_comparison_operator: '=' - array_literal: start_square_bracket: '[' quoted_literal: "'gs://bucket/path1.csv'" end_square_bracket: ']' - comma: ',' - parameter: field_delimiter - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'|'" - comma: ',' - parameter: max_bad_records - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '5' - end_bracket: ) - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: AutoHivePartitionedTable - keyword: WITH - keyword: PARTITION - keyword: COLUMNS - options_segment: keyword: OPTIONS bracketed: - start_bracket: ( - parameter: uris - comparison_operator: raw_comparison_operator: '=' - array_literal: start_square_bracket: '[' quoted_literal: "'gs://bucket/path/*'" end_square_bracket: ']' - comma: ',' - parameter: format - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: csv - comma: ',' - parameter: hive_partition_uri_prefix - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'gs://bucket/path'" - end_bracket: ) - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: CustomHivePartitionedTable - keyword: WITH - keyword: PARTITION - keyword: COLUMNS - bracketed: - start_bracket: ( - column_definition: naked_identifier: field_1 data_type: data_type_identifier: STRING - comma: ',' - column_definition: naked_identifier: field_2 data_type: data_type_identifier: INT64 - end_bracket: ) - options_segment: keyword: OPTIONS bracketed: - start_bracket: ( - parameter: uris - comparison_operator: raw_comparison_operator: '=' - array_literal: start_square_bracket: '[' quoted_literal: "'gs://bucket/path/*'" end_square_bracket: ']' - comma: ',' - parameter: format - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: csv - comma: ',' - parameter: hive_partition_uri_prefix - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'gs://bucket/path'" - end_bracket: ) - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: CustomHivePartitionedTable - options_segment: keyword: OPTIONS bracketed: - start_bracket: ( - parameter: uris - comparison_operator: raw_comparison_operator: '=' - array_literal: start_square_bracket: '[' quoted_literal: "'gs://bucket/path/*'" end_square_bracket: ']' - comma: ',' - parameter: format - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: csv - comma: ',' - parameter: hive_partition_uri_prefix - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'gs://bucket/path'" - end_bracket: ) - keyword: WITH - keyword: PARTITION - keyword: COLUMNS - bracketed: - start_bracket: ( - column_definition: naked_identifier: field_1 data_type: data_type_identifier: STRING - comma: ',' - column_definition: naked_identifier: field_2 data_type: data_type_identifier: INT64 - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_function_no_args.sql000066400000000000000000000005351503426445100272210ustar00rootroot00000000000000CREATE FUNCTION add() RETURNS integer AS 'select $1 + $2;' LANGUAGE SQL; CREATE FUNCTION example_dataset.exampleFunction() RETURNS STRING AS ("example") OPTIONS(description="example"); CREATE TEMP FUNCTION exampleFunction() RETURNS FLOAT64 AS (1.234 * 5.678); CREATE TEMPORARY FUNCTION exampleFunction() RETURNS BOOL AS (TRUE) OPTIONS(); sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_function_no_args.yml000066400000000000000000000060351503426445100272240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7535c132b78563198e54e600a955a26b59d33e9b3298a06ddd5c80bbeee1c3c1 file: - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: add - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: integer - function_definition: - keyword: AS - udf_body: "'select $1 + $2;'" - keyword: LANGUAGE - naked_identifier: SQL - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: naked_identifier: example_dataset dot: . function_name_identifier: exampleFunction - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: STRING - function_definition: keyword: AS bracketed: start_bracket: ( expression: quoted_literal: '"example"' end_bracket: ) options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: '"example"' end_bracket: ) - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: TEMP - keyword: FUNCTION - function_name: function_name_identifier: exampleFunction - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: FLOAT64 - function_definition: keyword: AS bracketed: start_bracket: ( expression: - numeric_literal: '1.234' - binary_operator: '*' - numeric_literal: '5.678' end_bracket: ) - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: FUNCTION - function_name: function_name_identifier: exampleFunction - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: BOOL - function_definition: keyword: AS bracketed: start_bracket: ( expression: boolean_literal: 'TRUE' end_bracket: ) options_segment: keyword: OPTIONS bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_function_with_args.sql000066400000000000000000000006231503426445100275560ustar00rootroot00000000000000CREATE FUNCTION example_dataset.exampleFunction(x FLOAT64) RETURNS FLOAT64 AS (x * x); CREATE OR REPLACE FUNCTION `example-project.example_dataset.exampleFunction`(x INTEGER, y INTEGER) RETURNS INTEGER AS (x * y) OPTIONS(description="foo"); CREATE TEMPORARY FUNCTION exampleFunction(x BIGNUMERIC) AS (x + x); CREATE TEMP FUNCTION exampleFunction(x STRING) RETURNS STRING AS (CONCAT(x, x)) OPTIONS(); sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_function_with_args.yml000066400000000000000000000103621503426445100275610ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c628b3819952808793e5bb6b35858ed6e21342eafbb251c9d063b3c7c3869d26 file: - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: naked_identifier: example_dataset dot: . function_name_identifier: exampleFunction - function_parameter_list: bracketed: start_bracket: ( parameter: x data_type: data_type_identifier: FLOAT64 end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: FLOAT64 - function_definition: keyword: AS bracketed: start_bracket: ( expression: - column_reference: naked_identifier: x - binary_operator: '*' - column_reference: naked_identifier: x end_bracket: ) - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name: function_name_identifier: '`example-project.example_dataset.exampleFunction`' - function_parameter_list: bracketed: - start_bracket: ( - parameter: x - data_type: data_type_identifier: INTEGER - comma: ',' - parameter: y - data_type: data_type_identifier: INTEGER - end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: INTEGER - function_definition: keyword: AS bracketed: start_bracket: ( expression: - column_reference: naked_identifier: x - binary_operator: '*' - column_reference: naked_identifier: y end_bracket: ) options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: '"foo"' end_bracket: ) - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: FUNCTION - function_name: function_name_identifier: exampleFunction - function_parameter_list: bracketed: start_bracket: ( parameter: x data_type: data_type_identifier: BIGNUMERIC end_bracket: ) - function_definition: keyword: AS bracketed: start_bracket: ( expression: - column_reference: naked_identifier: x - binary_operator: + - column_reference: naked_identifier: x end_bracket: ) - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: TEMP - keyword: FUNCTION - function_name: function_name_identifier: exampleFunction - function_parameter_list: bracketed: start_bracket: ( parameter: x data_type: data_type_identifier: STRING end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: STRING - function_definition: keyword: AS bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: CONCAT function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: x - comma: ',' - expression: column_reference: naked_identifier: x - end_bracket: ) end_bracket: ) options_segment: keyword: OPTIONS bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_index.sql000066400000000000000000000007151503426445100247730ustar00rootroot00000000000000CREATE SEARCH INDEX my_index ON example_dataset.example_table(ALL COLUMNS); CREATE SEARCH INDEX IF NOT EXISTS my_index ON example_dataset.example_table(x, y, z) OPTIONS (analyzer = 'NO_OP_ANALYZER'); CREATE VECTOR INDEX my_index ON example_dataset.example_table(example_column) OPTIONS(index_type = 'IVF'); CREATE OR REPLACE VECTOR INDEX IF NOT EXISTS my_index ON example_dataset.example_table(x, y, z) OPTIONS(index_type = 'IVF', distance_type = 'COSINE'); sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_index.yml000066400000000000000000000071421503426445100247760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5b43eae86a84edfdbb9dfdbaf664af036471b7bff1a24bc8772c1993a5b8007c file: - statement: create_search_index_statement: - keyword: CREATE - keyword: SEARCH - keyword: INDEX - index_reference: naked_identifier: my_index - keyword: 'ON' - table_reference: - naked_identifier: example_dataset - dot: . - naked_identifier: example_table - bracketed: - start_bracket: ( - keyword: ALL - keyword: COLUMNS - end_bracket: ) - statement_terminator: ; - statement: create_search_index_statement: - keyword: CREATE - keyword: SEARCH - keyword: INDEX - keyword: IF - keyword: NOT - keyword: EXISTS - index_reference: naked_identifier: my_index - keyword: 'ON' - table_reference: - naked_identifier: example_dataset - dot: . - naked_identifier: example_table - bracketed: - start_bracket: ( - index_column_definition: naked_identifier: x - comma: ',' - index_column_definition: naked_identifier: y - comma: ',' - index_column_definition: naked_identifier: z - end_bracket: ) - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: analyzer comparison_operator: raw_comparison_operator: '=' quoted_literal: "'NO_OP_ANALYZER'" end_bracket: ) - statement_terminator: ; - statement: create_vector_index_statement: - keyword: CREATE - keyword: VECTOR - keyword: INDEX - index_reference: naked_identifier: my_index - keyword: 'ON' - table_reference: - naked_identifier: example_dataset - dot: . - naked_identifier: example_table - bracketed: start_bracket: ( index_column_definition: naked_identifier: example_column end_bracket: ) - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: index_type comparison_operator: raw_comparison_operator: '=' quoted_literal: "'IVF'" end_bracket: ) - statement_terminator: ; - statement: create_vector_index_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: VECTOR - keyword: INDEX - keyword: IF - keyword: NOT - keyword: EXISTS - index_reference: naked_identifier: my_index - keyword: 'ON' - table_reference: - naked_identifier: example_dataset - dot: . - naked_identifier: example_table - bracketed: - start_bracket: ( - index_column_definition: naked_identifier: x - comma: ',' - index_column_definition: naked_identifier: y - comma: ',' - index_column_definition: naked_identifier: z - end_bracket: ) - options_segment: keyword: OPTIONS bracketed: - start_bracket: ( - parameter: index_type - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'IVF'" - comma: ',' - parameter: distance_type - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'COSINE'" - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_js_function_complex_types.sql000066400000000000000000000004161503426445100311560ustar00rootroot00000000000000CREATE TEMP FUNCTION qs( foo1 INT64, foo2 ARRAY, foo3 STRUCT, foo4 STRUCT, foo5 STRUCT, b STRUCT> ) RETURNS STRUCT> LANGUAGE js AS """ CODE GOES HERE """ sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_js_function_complex_types.yml000066400000000000000000000066601503426445100311670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8f953523c623c2f0f692913757e4e5b6b3166da87fe5b8f571796a8e326977cb file: statement: create_function_statement: - keyword: CREATE - keyword: TEMP - keyword: FUNCTION - function_name: function_name_identifier: qs - function_parameter_list: bracketed: - start_bracket: ( - parameter: foo1 - data_type: data_type_identifier: INT64 - comma: ',' - parameter: foo2 - data_type: array_type: keyword: ARRAY start_angle_bracket: < data_type: data_type_identifier: STRING end_angle_bracket: '>' - comma: ',' - parameter: foo3 - data_type: struct_type: keyword: STRUCT struct_type_schema: start_angle_bracket: < parameter: x data_type: data_type_identifier: INT64 end_angle_bracket: '>' - comma: ',' - parameter: foo4 - data_type: struct_type: keyword: STRUCT struct_type_schema: - start_angle_bracket: < - parameter: x - data_type: data_type_identifier: INT64 - comma: ',' - parameter: y - data_type: data_type_identifier: INT64 - end_angle_bracket: '>' - comma: ',' - parameter: foo5 - data_type: struct_type: keyword: STRUCT struct_type_schema: - start_angle_bracket: < - parameter: a - data_type: array_type: keyword: ARRAY start_angle_bracket: < data_type: data_type_identifier: FLOAT end_angle_bracket: '>' - comma: ',' - parameter: b - data_type: struct_type: keyword: STRUCT struct_type_schema: - start_angle_bracket: < - parameter: x - data_type: data_type_identifier: INT64 - comma: ',' - parameter: y - data_type: data_type_identifier: INT64 - end_angle_bracket: '>' - end_angle_bracket: '>' - end_bracket: ) - keyword: RETURNS - data_type: struct_type: keyword: STRUCT struct_type_schema: start_angle_bracket: < parameter: product_id data_type: array_type: keyword: ARRAY start_angle_bracket: < data_type: data_type_identifier: INT64 end_angle_bracket: '>' end_angle_bracket: '>' - function_definition: - keyword: LANGUAGE - naked_identifier: js - keyword: AS - udf_body: "\"\"\"\n CODE GOES HERE\n\"\"\"" sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_js_function_deterministic.sql000066400000000000000000000001161503426445100311230ustar00rootroot00000000000000CREATE FUNCTION qs( y STRING ) DETERMINISTIC LANGUAGE js AS " return y; " sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_js_function_deterministic.yml000066400000000000000000000016001503426445100311240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5215816c5689dccbcbf896f437850b2dc91b0efde7362006d1db916fd58fab59 file: statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: qs - function_parameter_list: bracketed: start_bracket: ( parameter: y data_type: data_type_identifier: STRING end_bracket: ) - function_definition: - keyword: DETERMINISTIC - keyword: LANGUAGE - naked_identifier: js - keyword: AS - udf_body: '" return y; "' sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_js_function_options_library_array.sql000066400000000000000000000003641503426445100327020ustar00rootroot00000000000000CREATE TEMP FUNCTION parseTopSellers(arr_str STRING) RETURNS ARRAY> LANGUAGE js OPTIONS ( library=["gs://my-bucket/path/to/lib1.js", "gs://my-bucket/path/to/lib2.js"] ) AS """ CODE GOES HERE """ sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_js_function_options_library_array.yml000066400000000000000000000035751503426445100327130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 61c3e66ff3de5079c9134b67ac6d4eb08d90ccbeeb79ef35a01d5588eee8a2a5 file: statement: create_function_statement: - keyword: CREATE - keyword: TEMP - keyword: FUNCTION - function_name: function_name_identifier: parseTopSellers - function_parameter_list: bracketed: start_bracket: ( parameter: arr_str data_type: data_type_identifier: STRING end_bracket: ) - keyword: RETURNS - data_type: array_type: keyword: ARRAY start_angle_bracket: < data_type: struct_type: keyword: STRUCT struct_type_schema: - start_angle_bracket: < - parameter: product_id - data_type: data_type_identifier: INT64 - comma: ',' - parameter: rating - data_type: data_type_identifier: FLOAT64 - end_angle_bracket: '>' end_angle_bracket: '>' - function_definition: - keyword: LANGUAGE - naked_identifier: js - keyword: OPTIONS - bracketed: - start_bracket: ( - parameter: library - comparison_operator: raw_comparison_operator: '=' - start_square_bracket: '[' - double_quote: '"gs://my-bucket/path/to/lib1.js"' - comma: ',' - double_quote: '"gs://my-bucket/path/to/lib2.js"' - end_square_bracket: ']' - end_bracket: ) - keyword: AS - udf_body: "\"\"\"\n CODE GOES HERE\n\"\"\"" sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_js_function_quoted_name.sql000066400000000000000000000001641503426445100305640ustar00rootroot00000000000000CREATE TEMP FUNCTION qs( y STRING ) RETURNS STRUCT<`$=` ARRAY> LANGUAGE js AS """ CODE GOES HERE """ sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_js_function_quoted_name.yml000066400000000000000000000025121503426445100305650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4a4cb38f3e6f423ed83783a235c1c7ac5c0a268cfd2a4903b20ccba8352d5faa file: statement: create_function_statement: - keyword: CREATE - keyword: TEMP - keyword: FUNCTION - function_name: function_name_identifier: qs - function_parameter_list: bracketed: start_bracket: ( parameter: y data_type: data_type_identifier: STRING end_bracket: ) - keyword: RETURNS - data_type: struct_type: keyword: STRUCT struct_type_schema: start_angle_bracket: < parameter: '`$=`' data_type: array_type: keyword: ARRAY start_angle_bracket: < data_type: data_type_identifier: INT64 end_angle_bracket: '>' end_angle_bracket: '>' - function_definition: - keyword: LANGUAGE - naked_identifier: js - keyword: AS - udf_body: "\"\"\"\n CODE GOES HERE\n\"\"\"" sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_js_function_simple.sql000066400000000000000000000001001503426445100275420ustar00rootroot00000000000000CREATE FUNCTION qs( y STRING ) LANGUAGE js AS " return y; " sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_js_function_simple.yml000066400000000000000000000015411503426445100275560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cd6b2acb838c5b14165ae5ff3d733beed753ba1cf49ed4f8c057ff0c60c1e41d file: statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: qs - function_parameter_list: bracketed: start_bracket: ( parameter: y data_type: data_type_identifier: STRING end_bracket: ) - function_definition: - keyword: LANGUAGE - naked_identifier: js - keyword: AS - udf_body: '" return y; "' sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_js_function_underscore_name.sql000066400000000000000000000001741503426445100314350ustar00rootroot00000000000000CREATE TEMP FUNCTION _qs( y STRING ) RETURNS STRUCT<_product_id ARRAY> LANGUAGE js AS """ CODE GOES HERE """ sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_js_function_underscore_name.yml000066400000000000000000000025201503426445100314340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: dd3ec6684af84a81c35279fd471dd61e0f8e6b4947388c123b76bd6d21cb80ea file: statement: create_function_statement: - keyword: CREATE - keyword: TEMP - keyword: FUNCTION - function_name: function_name_identifier: _qs - function_parameter_list: bracketed: start_bracket: ( parameter: y data_type: data_type_identifier: STRING end_bracket: ) - keyword: RETURNS - data_type: struct_type: keyword: STRUCT struct_type_schema: start_angle_bracket: < parameter: _product_id data_type: array_type: keyword: ARRAY start_angle_bracket: < data_type: data_type_identifier: INT64 end_angle_bracket: '>' end_angle_bracket: '>' - function_definition: - keyword: LANGUAGE - naked_identifier: js - keyword: AS - udf_body: "\"\"\"\n CODE GOES HERE\n\"\"\"" sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_materialized_view.sql000066400000000000000000000006731503426445100273730ustar00rootroot00000000000000CREATE MATERIALIZED VIEW mydataset.my_mv AS SELECT * FROM anotherdataset.mv_base_table; CREATE MATERIALIZED VIEW IF NOT EXISTS mydataset.my_mv AS SELECT * FROM anotherdataset.mv_base_table; CREATE MATERIALIZED VIEW mydataset.my_mv OPTIONS( friendly_name="my_mv" ) AS SELECT * FROM anotherdataset.mv_base_table; CREATE MATERIALIZED VIEW mydataset.my_mv PARTITION BY DATE(x) CLUSTER BY y AS SELECT x, y FROM anotherdataset.mv_base_table; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_materialized_view.yml000066400000000000000000000105511503426445100273710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9a995a851203a44d5b9d9986c5b1d6884af1e9746fc84fe1888bcd70c3eaf45f file: - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: my_mv - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: anotherdataset - dot: . - naked_identifier: mv_base_table - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: my_mv - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: anotherdataset - dot: . - naked_identifier: mv_base_table - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: my_mv - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: friendly_name comparison_operator: raw_comparison_operator: '=' quoted_literal: '"my_mv"' end_bracket: ) - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: anotherdataset - dot: . - naked_identifier: mv_base_table - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: my_mv - partition_by_segment: - keyword: PARTITION - keyword: BY - expression: function: function_name: function_name_identifier: DATE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x end_bracket: ) - cluster_by_segment: - keyword: CLUSTER - keyword: BY - expression: column_reference: naked_identifier: y - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: x - comma: ',' - select_clause_element: column_reference: naked_identifier: y from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: anotherdataset - dot: . - naked_identifier: mv_base_table - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_materialized_view_as_replica_of.sql000066400000000000000000000003431503426445100322330ustar00rootroot00000000000000CREATE MATERIALIZED VIEW mydataset.my_mv AS REPLICA OF mydataset.my_original_mv; CREATE MATERIALIZED VIEW my-project.mydataset.my_mv OPTIONS(replication_interval_seconds=900) AS REPLICA OF my-project.mydataset.my_original_mv; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_materialized_view_as_replica_of.yml000066400000000000000000000033221503426445100322350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1aaeff374dcc712894928545f6560bc8f91555ab61ce4c327d133baad9b877eb file: - statement: create_materialized_view_as_replica_of_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: my_mv - keyword: AS - keyword: REPLICA - keyword: OF - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: my_original_mv - statement_terminator: ; - statement: create_materialized_view_as_replica_of_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: - naked_identifier: my - dash: '-' - naked_identifier: project - dot: . - naked_identifier: mydataset - dot: . - naked_identifier: my_mv - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: replication_interval_seconds comparison_operator: raw_comparison_operator: '=' numeric_literal: '900' end_bracket: ) - keyword: AS - keyword: REPLICA - keyword: OF - table_reference: - naked_identifier: my - dash: '-' - naked_identifier: project - dot: . - naked_identifier: mydataset - dot: . - naked_identifier: my_original_mv - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_or_replace_sql_function_any_type.sql000066400000000000000000000003751503426445100324750ustar00rootroot00000000000000CREATE OR REPLACE FUNCTION qs( y ANY TYPE ) AS ( CASE WHEN y = 1 THEN 'low' WHEN y = 2 THEN 'midlow' WHEN y = 3 THEN 'mid' WHEN y = 4 THEN 'midhigh' WHEN y = 5 THEN 'high' ELSE "unknown" END ) sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_or_replace_sql_function_any_type.yml000066400000000000000000000057621503426445100325040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5534ffaec68f122b75ad2ca2d715003fb32932ed69e7d51d6869b1a046439379 file: statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name: function_name_identifier: qs - function_parameter_list: bracketed: - start_bracket: ( - parameter: y - keyword: ANY - keyword: TYPE - end_bracket: ) - function_definition: keyword: AS bracketed: start_bracket: ( expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: y comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - keyword: THEN - expression: quoted_literal: "'low'" - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: y comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - keyword: THEN - expression: quoted_literal: "'midlow'" - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: y comparison_operator: raw_comparison_operator: '=' numeric_literal: '3' - keyword: THEN - expression: quoted_literal: "'mid'" - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: y comparison_operator: raw_comparison_operator: '=' numeric_literal: '4' - keyword: THEN - expression: quoted_literal: "'midhigh'" - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: y comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' - keyword: THEN - expression: quoted_literal: "'high'" - else_clause: keyword: ELSE expression: quoted_literal: '"unknown"' - keyword: END end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_or_replace_table_options_as.sql000066400000000000000000000001451503426445100314010ustar00rootroot00000000000000CREATE OR REPLACE TABLE foo OPTIONS (description = 'copy of bar') AS ( SELECT * from bar ) sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_or_replace_table_options_as.yml000066400000000000000000000025231503426445100314050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1686dd9b1ec59d5a8c95a44e8ea5914dc79e291fc688ce56fa9cf8814a1108d6 file: statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - table_reference: naked_identifier: foo - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: "'copy of bar'" end_bracket: ) - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_procedure.sql000066400000000000000000000077311503426445100256610ustar00rootroot00000000000000CREATE OR REPLACE PROCEDURE `mfrm_working_temp_dataset.temp` (incremental INT64) BEGIN SELECT CURRENT_DATETIME(); END; CREATE PROCEDURE myProject.myDataset.QueryTable() BEGIN SELECT * FROM anotherDataset.myTable; END; CREATE PROCEDURE mydataset.AddDelta(INOUT x INT64, delta INT64) BEGIN SET x = x + delta; END; CREATE PROCEDURE mydataset.SelectFromTablesAndAppend( target_date DATE, OUT rows_added INT64) BEGIN CREATE TEMP TABLE DataForTargetDate AS SELECT t1.id, t1.x, t2.y FROM dataset.partitioned_table1 AS t1 JOIN dataset.partitioned_table2 AS t2 ON t1.id = t2.id WHERE t1.date = target_date AND t2.date = target_date; SET rows_added = (SELECT COUNT(*) FROM DataForTargetDate); SELECT id, x, y, target_date -- note that target_date is a parameter FROM DataForTargetDate; DROP TABLE DataForTargetDate; END; CREATE OR REPLACE PROCEDURE mydataset.create_customer() BEGIN DECLARE id STRING; SET id = GENERATE_UUID(); INSERT INTO mydataset.customers (customer_id) VALUES(id); SELECT FORMAT("Created customer %s", id); END; CREATE OR REPLACE PROCEDURE mydataset.create_customer(name STRING) BEGIN DECLARE id STRING; SET id = GENERATE_UUID(); INSERT INTO mydataset.customers (customer_id, name) VALUES(id, name); SELECT FORMAT("Created customer %s (%s)", id, name); END; CREATE OR REPLACE PROCEDURE mydataset.create_customer(name STRING, OUT id STRING) BEGIN SET id = GENERATE_UUID(); INSERT INTO mydataset.customers (customer_id, name) VALUES(id, name); SELECT FORMAT("Created customer %s (%s)", id, name); END; CREATE OR REPLACE PROCEDURE mydataset.test_raise_return(error_message STRING) BEGIN RETURN; RAISE; RAISE USING MESSAGE = "Test"; RAISE USING MESSAGE = error_message; END; CREATE OR REPLACE PROCEDURE place_name.proc_name ( _log STRUCT< Process_ID STRING, Debug INT64 > ) BEGIN -- Declaration DECLARE _source_table STRING; CREATE OR REPLACE TEMP TABLE _session.__calendar_target_buffer ( some_id INT64 ); --try BEGIN SET _metric_id = 1001; -- define metric details CALL some_place.some_name1 ( _some_id ); --end try END; END; CREATE OR REPLACE PROCEDURE place_name.proc_name ( _log STRUCT< Process_ID STRING, Debug INT64 > ) BEGIN -- Declaration DECLARE _source_table STRING; CREATE OR REPLACE TEMP TABLE _session.__calendar_target_buffer ( some_id INT64 ); --try BEGIN SET _metric_id = 1001; -- define metric details CALL some_place.some_name1 ( _some_id ); FOR x IN ( SELECT 1 ) DO FOR y IN ( SELECT x ) DO BEGIN; SELECT y; COMMIT TRANSACTION; END FOR; END FOR; --end try END; END; CREATE OR REPLACE PROCEDURE place_name.proc_name ( _log STRUCT< Process_ID STRING, Debug INT64 > ) OPTIONS ( strict_mode = TRUE, description = ''' Author: ''' ) BEGIN -- Declaration DECLARE _source_table STRING; CREATE OR REPLACE TEMP TABLE _session.__calendar_target_buffer ( some_id INT64 ); --try BEGIN -- Initialization: SET _metric_id = 1001; -- define metric details CALL some_place.some_name1 ( _some_id ); EXCEPTION WHEN ERROR THEN RAISE; --end try END; END; CREATE OR REPLACE PROCEDURE `kkk.dynamic_table_creator` ( IN table_suffix STRING, IN num_rows INT64 ) BEGIN DECLARE table_name STRING; SET table_name = 'my_table_' || table_suffix; EXECUTE IMMEDIATE ''' CREATE OR REPLACE TABLE `my_dataset.''' || table_name || '''` AS SELECT GENERATE_UUID() AS id, RAND() AS random_value FROM UNNEST(GENERATE_ARRAY(1, ''' || num_rows || ''')) AS num '''; END; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_procedure.yml000066400000000000000000000773071503426445100256710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7b5dfecf743200d0813d4bac9997313367da50a6853befb0e6ebae63bde3b244 file: - multi_statement_segment: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PROCEDURE - procedure_name: procedure_name_identifier: '`mfrm_working_temp_dataset.temp`' - procedure_parameter_list: bracketed: start_bracket: ( parameter: incremental data_type: data_type_identifier: INT64 end_bracket: ) - begin_statement: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CURRENT_DATETIME function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - multi_statement_segment: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - procedure_name: - naked_identifier: myProject - dot: . - naked_identifier: myDataset - dot: . - procedure_name_identifier: QueryTable - procedure_parameter_list: bracketed: start_bracket: ( end_bracket: ) - begin_statement: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: anotherDataset - dot: . - naked_identifier: myTable - statement_terminator: ; - keyword: END - statement_terminator: ; - multi_statement_segment: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - procedure_name: naked_identifier: mydataset dot: . procedure_name_identifier: AddDelta - procedure_parameter_list: bracketed: - start_bracket: ( - keyword: INOUT - parameter: x - data_type: data_type_identifier: INT64 - comma: ',' - parameter: delta - data_type: data_type_identifier: INT64 - end_bracket: ) - begin_statement: - keyword: BEGIN - statement: set_segment: keyword: SET naked_identifier: x comparison_operator: raw_comparison_operator: '=' expression: - column_reference: naked_identifier: x - binary_operator: + - column_reference: naked_identifier: delta - statement_terminator: ; - keyword: END - statement_terminator: ; - multi_statement_segment: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - procedure_name: naked_identifier: mydataset dot: . procedure_name_identifier: SelectFromTablesAndAppend - procedure_parameter_list: bracketed: - start_bracket: ( - parameter: target_date - data_type: data_type_identifier: DATE - comma: ',' - keyword: OUT - parameter: rows_added - data_type: data_type_identifier: INT64 - end_bracket: ) - begin_statement: - keyword: BEGIN - statement: create_table_statement: - keyword: CREATE - keyword: TEMP - keyword: TABLE - table_reference: naked_identifier: DataForTargetDate - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: x - comma: ',' - select_clause_element: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: y from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dataset - dot: . - naked_identifier: partitioned_table1 alias_expression: alias_operator: keyword: AS naked_identifier: t1 join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: - naked_identifier: dataset - dot: . - naked_identifier: partitioned_table2 alias_expression: alias_operator: keyword: AS naked_identifier: t2 join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: date - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: target_date - binary_operator: AND - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: date - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: target_date - statement_terminator: ; - statement: set_segment: keyword: SET naked_identifier: rows_added comparison_operator: raw_comparison_operator: '=' bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: COUNT function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: DataForTargetDate end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: x - comma: ',' - select_clause_element: column_reference: naked_identifier: y - comma: ',' - select_clause_element: column_reference: naked_identifier: target_date from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: DataForTargetDate - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: naked_identifier: DataForTargetDate - statement_terminator: ; - keyword: END - statement_terminator: ; - multi_statement_segment: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PROCEDURE - procedure_name: naked_identifier: mydataset dot: . procedure_name_identifier: create_customer - procedure_parameter_list: bracketed: start_bracket: ( end_bracket: ) - begin_statement: - keyword: BEGIN - statement: declare_segment: keyword: DECLARE naked_identifier: id data_type: data_type_identifier: STRING - statement_terminator: ; - statement: set_segment: keyword: SET naked_identifier: id comparison_operator: raw_comparison_operator: '=' function: function_name: function_name_identifier: GENERATE_UUID function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: customers - bracketed: start_bracket: ( column_reference: naked_identifier: customer_id end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: column_reference: naked_identifier: id end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: FORMAT function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: '"Created customer %s"' - comma: ',' - expression: column_reference: naked_identifier: id - end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - multi_statement_segment: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PROCEDURE - procedure_name: naked_identifier: mydataset dot: . procedure_name_identifier: create_customer - procedure_parameter_list: bracketed: start_bracket: ( parameter: name data_type: data_type_identifier: STRING end_bracket: ) - begin_statement: - keyword: BEGIN - statement: declare_segment: keyword: DECLARE naked_identifier: id data_type: data_type_identifier: STRING - statement_terminator: ; - statement: set_segment: keyword: SET naked_identifier: id comparison_operator: raw_comparison_operator: '=' function: function_name: function_name_identifier: GENERATE_UUID function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: customers - bracketed: - start_bracket: ( - column_reference: naked_identifier: customer_id - comma: ',' - column_reference: naked_identifier: name - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: id - comma: ',' - expression: column_reference: naked_identifier: name - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: FORMAT function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: '"Created customer %s (%s)"' - comma: ',' - expression: column_reference: naked_identifier: id - comma: ',' - expression: column_reference: naked_identifier: name - end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - multi_statement_segment: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PROCEDURE - procedure_name: naked_identifier: mydataset dot: . procedure_name_identifier: create_customer - procedure_parameter_list: bracketed: - start_bracket: ( - parameter: name - data_type: data_type_identifier: STRING - comma: ',' - keyword: OUT - parameter: id - data_type: data_type_identifier: STRING - end_bracket: ) - begin_statement: - keyword: BEGIN - statement: set_segment: keyword: SET naked_identifier: id comparison_operator: raw_comparison_operator: '=' function: function_name: function_name_identifier: GENERATE_UUID function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: customers - bracketed: - start_bracket: ( - column_reference: naked_identifier: customer_id - comma: ',' - column_reference: naked_identifier: name - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: id - comma: ',' - expression: column_reference: naked_identifier: name - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: FORMAT function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: '"Created customer %s (%s)"' - comma: ',' - expression: column_reference: naked_identifier: id - comma: ',' - expression: column_reference: naked_identifier: name - end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - multi_statement_segment: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PROCEDURE - procedure_name: naked_identifier: mydataset dot: . procedure_name_identifier: test_raise_return - procedure_parameter_list: bracketed: start_bracket: ( parameter: error_message data_type: data_type_identifier: STRING end_bracket: ) - begin_statement: - keyword: BEGIN - statement: return_statement: keyword: RETURN - statement_terminator: ; - statement: raise_statement: keyword: RAISE - statement_terminator: ; - statement: raise_statement: - keyword: RAISE - keyword: USING - keyword: MESSAGE - comparison_operator: raw_comparison_operator: '=' - expression: quoted_literal: '"Test"' - statement_terminator: ; - statement: raise_statement: - keyword: RAISE - keyword: USING - keyword: MESSAGE - comparison_operator: raw_comparison_operator: '=' - expression: column_reference: naked_identifier: error_message - statement_terminator: ; - keyword: END - statement_terminator: ; - multi_statement_segment: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PROCEDURE - procedure_name: naked_identifier: place_name dot: . procedure_name_identifier: proc_name - procedure_parameter_list: bracketed: start_bracket: ( parameter: _log data_type: struct_type: keyword: STRUCT struct_type_schema: - start_angle_bracket: < - parameter: Process_ID - data_type: data_type_identifier: STRING - comma: ',' - parameter: Debug - data_type: data_type_identifier: INT64 - end_angle_bracket: '>' end_bracket: ) - begin_statement: - keyword: BEGIN - statement: declare_segment: keyword: DECLARE naked_identifier: _source_table data_type: data_type_identifier: STRING - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TEMP - keyword: TABLE - table_reference: - naked_identifier: _session - dot: . - naked_identifier: __calendar_target_buffer - bracketed: start_bracket: ( column_definition: naked_identifier: some_id data_type: data_type_identifier: INT64 end_bracket: ) - statement_terminator: ; - multi_statement_segment: begin_statement: - keyword: BEGIN - statement: set_segment: keyword: SET naked_identifier: _metric_id comparison_operator: raw_comparison_operator: '=' numeric_literal: '1001' - statement_terminator: ; - statement: call_statement: keyword: CALL procedure_name: naked_identifier: some_place dot: . procedure_name_identifier: some_name1 bracketed: start_bracket: ( expression: column_reference: naked_identifier: _some_id end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - keyword: END - statement_terminator: ; - multi_statement_segment: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PROCEDURE - procedure_name: naked_identifier: place_name dot: . procedure_name_identifier: proc_name - procedure_parameter_list: bracketed: start_bracket: ( parameter: _log data_type: struct_type: keyword: STRUCT struct_type_schema: - start_angle_bracket: < - parameter: Process_ID - data_type: data_type_identifier: STRING - comma: ',' - parameter: Debug - data_type: data_type_identifier: INT64 - end_angle_bracket: '>' end_bracket: ) - begin_statement: - keyword: BEGIN - statement: declare_segment: keyword: DECLARE naked_identifier: _source_table data_type: data_type_identifier: STRING - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TEMP - keyword: TABLE - table_reference: - naked_identifier: _session - dot: . - naked_identifier: __calendar_target_buffer - bracketed: start_bracket: ( column_definition: naked_identifier: some_id data_type: data_type_identifier: INT64 end_bracket: ) - statement_terminator: ; - multi_statement_segment: begin_statement: - keyword: BEGIN - statement: set_segment: keyword: SET naked_identifier: _metric_id comparison_operator: raw_comparison_operator: '=' numeric_literal: '1001' - statement_terminator: ; - statement: call_statement: keyword: CALL procedure_name: naked_identifier: some_place dot: . procedure_name_identifier: some_name1 bracketed: start_bracket: ( expression: column_reference: naked_identifier: _some_id end_bracket: ) - statement_terminator: ; - multi_statement_segment: for_in_statement: - keyword: FOR - naked_identifier: x - keyword: IN - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' end_bracket: ) - keyword: DO - for_in_statements: multi_statement_segment: for_in_statement: - keyword: FOR - naked_identifier: y - keyword: IN - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: x end_bracket: ) - keyword: DO - for_in_statements: - statement: transaction_statement: keyword: BEGIN - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: y - statement_terminator: ; - statement: transaction_statement: - keyword: COMMIT - keyword: TRANSACTION - statement_terminator: ; - keyword: END - keyword: FOR statement_terminator: ; - keyword: END - keyword: FOR - statement_terminator: ; - keyword: END - statement_terminator: ; - keyword: END - statement_terminator: ; - multi_statement_segment: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PROCEDURE - procedure_name: naked_identifier: place_name dot: . procedure_name_identifier: proc_name - procedure_parameter_list: bracketed: start_bracket: ( parameter: _log data_type: struct_type: keyword: STRUCT struct_type_schema: - start_angle_bracket: < - parameter: Process_ID - data_type: data_type_identifier: STRING - comma: ',' - parameter: Debug - data_type: data_type_identifier: INT64 - end_angle_bracket: '>' end_bracket: ) - options_segment: keyword: OPTIONS bracketed: - start_bracket: ( - parameter: strict_mode - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - parameter: description - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'''\nAuthor:\n'''" - end_bracket: ) - begin_statement: - keyword: BEGIN - statement: declare_segment: keyword: DECLARE naked_identifier: _source_table data_type: data_type_identifier: STRING - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TEMP - keyword: TABLE - table_reference: - naked_identifier: _session - dot: . - naked_identifier: __calendar_target_buffer - bracketed: start_bracket: ( column_definition: naked_identifier: some_id data_type: data_type_identifier: INT64 end_bracket: ) - statement_terminator: ; - multi_statement_segment: begin_statement: - keyword: BEGIN - statement: set_segment: keyword: SET naked_identifier: _metric_id comparison_operator: raw_comparison_operator: '=' numeric_literal: '1001' - statement_terminator: ; - statement: call_statement: keyword: CALL procedure_name: naked_identifier: some_place dot: . procedure_name_identifier: some_name1 bracketed: start_bracket: ( expression: column_reference: naked_identifier: _some_id end_bracket: ) - statement_terminator: ; - keyword: EXCEPTION - keyword: WHEN - keyword: ERROR - keyword: THEN - statement: raise_statement: keyword: RAISE - statement_terminator: ; - keyword: END - statement_terminator: ; - keyword: END - statement_terminator: ; - multi_statement_segment: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PROCEDURE - procedure_name: procedure_name_identifier: '`kkk.dynamic_table_creator`' - procedure_parameter_list: bracketed: - start_bracket: ( - keyword: IN - parameter: table_suffix - data_type: data_type_identifier: STRING - comma: ',' - keyword: IN - parameter: num_rows - data_type: data_type_identifier: INT64 - end_bracket: ) - begin_statement: - keyword: BEGIN - statement: declare_segment: keyword: DECLARE naked_identifier: table_name data_type: data_type_identifier: STRING - statement_terminator: ; - statement: set_segment: keyword: SET naked_identifier: table_name comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'my_table_'" binary_operator: - pipe: '|' - pipe: '|' column_reference: naked_identifier: table_suffix - statement_terminator: ; - statement: execute_immediate: - keyword: EXECUTE - keyword: IMMEDIATE - expression: - quoted_literal: "'''\n CREATE OR REPLACE TABLE `my_dataset.'''" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: naked_identifier: table_name - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "'''` AS\n SELECT\n GENERATE_UUID() AS id,\n\ \ RAND() AS random_value\n FROM\n UNNEST(GENERATE_ARRAY(1,\ \ '''" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: naked_identifier: num_rows - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "''')) AS num\n '''" - statement_terminator: ; - keyword: END - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_reservation.sql000066400000000000000000000001401503426445100262150ustar00rootroot00000000000000CREATE RESERVATION `example_project.region-us.example-reservation` OPTIONS (slot_capacity=123); sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_reservation.yml000066400000000000000000000015531503426445100262300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ded9a2511b94a6b4660096fba408c58b8db2edef2b96ae5607d0fb9d1272354b file: statement: create_reservation_statement: - keyword: CREATE - keyword: RESERVATION - table_reference: quoted_identifier: '`example_project.region-us.example-reservation`' - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: slot_capacity comparison_operator: raw_comparison_operator: '=' numeric_literal: '123' end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_row_access_policy.sql000066400000000000000000000015741503426445100273770ustar00rootroot00000000000000CREATE ROW ACCESS POLICY row_access_policy_name ON example_dataset.example_table FILTER USING (TRUE); CREATE OR REPLACE ROW ACCESS POLICY row_access_policy_name ON example_dataset.example_table GRANT TO ("user:someone@example.com") FILTER USING (x = y); CREATE ROW ACCESS POLICY IF NOT EXISTS row_access_policy_name ON example_dataset.example_table GRANT TO ( "serviceAccount:example@example-project.iam.gserviceaccount.com", "group:some_group@example.com", "domain:example.com" ) FILTER USING (email_column_name = SESSION_USER()); CREATE OR REPLACE ROW ACCESS POLICY IF NOT EXISTS row_access_policy_name ON example_dataset.example_table GRANT TO ("allAuthenticatedUsers") FILTER USING (SESSION_USER() IN ("foo", "bar")); CREATE ROW ACCESS POLICY row_access_policy_name ON example_dataset.example_table GRANT TO ("allUsers") FILTER USING (example_dataset.exampleFunction(x, y)); sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_row_access_policy.yml000066400000000000000000000125311503426445100273740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fcd45e4bfc926fc7964ac237184ab221afd81ffb7c16348f340df940cc927106 file: - statement: create_row_access_policy_statement: - keyword: CREATE - keyword: ROW - keyword: ACCESS - keyword: POLICY - naked_identifier: row_access_policy_name - keyword: 'ON' - table_reference: - naked_identifier: example_dataset - dot: . - naked_identifier: example_table - keyword: FILTER - keyword: USING - bracketed: start_bracket: ( expression: boolean_literal: 'TRUE' end_bracket: ) - statement_terminator: ; - statement: create_row_access_policy_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: ROW - keyword: ACCESS - keyword: POLICY - naked_identifier: row_access_policy_name - keyword: 'ON' - table_reference: - naked_identifier: example_dataset - dot: . - naked_identifier: example_table - grant_to_segment: - keyword: GRANT - keyword: TO - bracketed: start_bracket: ( quoted_literal: '"user:someone@example.com"' end_bracket: ) - keyword: FILTER - keyword: USING - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: x - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: y end_bracket: ) - statement_terminator: ; - statement: create_row_access_policy_statement: - keyword: CREATE - keyword: ROW - keyword: ACCESS - keyword: POLICY - keyword: IF - keyword: NOT - keyword: EXISTS - naked_identifier: row_access_policy_name - keyword: 'ON' - table_reference: - naked_identifier: example_dataset - dot: . - naked_identifier: example_table - grant_to_segment: - keyword: GRANT - keyword: TO - bracketed: - start_bracket: ( - quoted_literal: '"serviceAccount:example@example-project.iam.gserviceaccount.com"' - comma: ',' - quoted_literal: '"group:some_group@example.com"' - comma: ',' - quoted_literal: '"domain:example.com"' - end_bracket: ) - keyword: FILTER - keyword: USING - bracketed: start_bracket: ( expression: column_reference: naked_identifier: email_column_name comparison_operator: raw_comparison_operator: '=' function: function_name: function_name_identifier: SESSION_USER function_contents: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_row_access_policy_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: ROW - keyword: ACCESS - keyword: POLICY - keyword: IF - keyword: NOT - keyword: EXISTS - naked_identifier: row_access_policy_name - keyword: 'ON' - table_reference: - naked_identifier: example_dataset - dot: . - naked_identifier: example_table - grant_to_segment: - keyword: GRANT - keyword: TO - bracketed: start_bracket: ( quoted_literal: '"allAuthenticatedUsers"' end_bracket: ) - keyword: FILTER - keyword: USING - bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: SESSION_USER function_contents: bracketed: start_bracket: ( end_bracket: ) keyword: IN bracketed: - start_bracket: ( - quoted_literal: '"foo"' - comma: ',' - quoted_literal: '"bar"' - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_row_access_policy_statement: - keyword: CREATE - keyword: ROW - keyword: ACCESS - keyword: POLICY - naked_identifier: row_access_policy_name - keyword: 'ON' - table_reference: - naked_identifier: example_dataset - dot: . - naked_identifier: example_table - grant_to_segment: - keyword: GRANT - keyword: TO - bracketed: start_bracket: ( quoted_literal: '"allUsers"' end_bracket: ) - keyword: FILTER - keyword: USING - bracketed: start_bracket: ( expression: function: function_name: naked_identifier: example_dataset dot: . function_name_identifier: exampleFunction function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: x - comma: ',' - expression: column_reference: naked_identifier: y - end_bracket: ) end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_schema.sql000066400000000000000000000002141503426445100251160ustar00rootroot00000000000000CREATE SCHEMA dataset_name; CREATE SCHEMA IF NOT EXISTS project_name.dataset_name DEFAULT COLLATE 'und:ci' OPTIONS(description="example"); sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_schema.yml000066400000000000000000000023111503426445100251200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a56bcb12bdbbc3a6a71e6fb7f8ea94fa8b7a57eee9edef046d2115d2650ad41e file: - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - table_reference: naked_identifier: dataset_name - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: - naked_identifier: project_name - dot: . - naked_identifier: dataset_name - default_collate: - keyword: DEFAULT - keyword: COLLATE - quoted_literal: "'und:ci'" - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: '"example"' end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_snapshot_table.sql000066400000000000000000000010471503426445100266710ustar00rootroot00000000000000CREATE SNAPSHOT TABLE `example-project.example_dataset.example_table_snapshot_20240101` CLONE `example-project.example_dataset.example_table`; CREATE SNAPSHOT TABLE IF NOT EXISTS `example-project.example_dataset.example_table_snapshot_20240101` CLONE `example-project.example_dataset.example_table` FOR SYSTEM_TIME AS OF TIMESTAMP("2024-01-01 12:00:00") OPTIONS ( expiration_timestamp=TIMESTAMP("2024-02-01 12:00:00"), friendly_name="my_table_snapshot", description="example description", labels=[("example_key", "example_value")] ); sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_snapshot_table.yml000066400000000000000000000057051503426445100267000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c9954d49c4baf332c6be5453ae00590c60c05141a1f2d0102c1e1a5718da9144 file: - statement: create_snapshot_table_statement: - keyword: CREATE - keyword: SNAPSHOT - keyword: TABLE - table_reference: quoted_identifier: '`example-project.example_dataset.example_table_snapshot_20240101`' - keyword: CLONE - table_reference: quoted_identifier: '`example-project.example_dataset.example_table`' - statement_terminator: ; - statement: create_snapshot_table_statement: - keyword: CREATE - keyword: SNAPSHOT - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: quoted_identifier: '`example-project.example_dataset.example_table_snapshot_20240101`' - keyword: CLONE - table_reference: quoted_identifier: '`example-project.example_dataset.example_table`' - for_system_time_as_of_segment: - keyword: FOR - keyword: SYSTEM_TIME - keyword: AS - keyword: OF - expression: function: function_name: function_name_identifier: TIMESTAMP function_contents: bracketed: start_bracket: ( expression: quoted_literal: '"2024-01-01 12:00:00"' end_bracket: ) - options_segment: keyword: OPTIONS bracketed: - start_bracket: ( - parameter: expiration_timestamp - comparison_operator: raw_comparison_operator: '=' - function: function_name: function_name_identifier: TIMESTAMP function_contents: bracketed: start_bracket: ( expression: quoted_literal: '"2024-02-01 12:00:00"' end_bracket: ) - comma: ',' - parameter: friendly_name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"my_table_snapshot"' - comma: ',' - parameter: description - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"example description"' - comma: ',' - parameter: labels - comparison_operator: raw_comparison_operator: '=' - array_literal: start_square_bracket: '[' expression: bracketed: - start_bracket: ( - quoted_literal: '"example_key"' - comma: ',' - quoted_literal: '"example_value"' - end_bracket: ) end_square_bracket: ']' - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_table_column_options.sql000066400000000000000000000006341503426445100301030ustar00rootroot00000000000000CREATE TABLE t_table1 ( x INT64 OPTIONS(description="An INTEGER field") ); CREATE TABLE t_table1 ( x INT64 NOT NULL OPTIONS(description="An INTEGER field that is NOT NULL") ); CREATE TABLE t_table1 ( x STRUCT< col1 INT64 OPTIONS(description="An INTEGER field in a STRUCT") >, y ARRAY> ); sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_table_column_options.yml000066400000000000000000000072401503426445100301050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7c6425d4b2c260947fda76839d8c852def220cf4d503f57679b1e8a198e0dd73 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t_table1 - bracketed: start_bracket: ( column_definition: naked_identifier: x data_type: data_type_identifier: INT64 options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: '"An INTEGER field"' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t_table1 - bracketed: start_bracket: ( column_definition: naked_identifier: x data_type: data_type_identifier: INT64 column_constraint_segment: - keyword: NOT - keyword: 'NULL' options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: '"An INTEGER field that is NOT NULL"' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t_table1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: x data_type: struct_type: keyword: STRUCT struct_type_schema: start_angle_bracket: < parameter: col1 data_type: data_type_identifier: INT64 options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: '"An INTEGER field in a STRUCT"' end_bracket: ) end_angle_bracket: '>' - comma: ',' - column_definition: naked_identifier: y data_type: array_type: keyword: ARRAY start_angle_bracket: < data_type: struct_type: keyword: STRUCT struct_type_schema: start_angle_bracket: < parameter: col1 data_type: data_type_identifier: INT64 options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: '"An INTEGER field in a REPEATED STRUCT"' end_bracket: ) end_angle_bracket: '>' end_angle_bracket: '>' - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_table_columns_partition_options.sql000066400000000000000000000001671503426445100323600ustar00rootroot00000000000000CREATE TABLE newtable ( x TIMESTAMP, y INT64 ) PARTITION BY DATE(x) CLUSTER BY x, y OPTIONS(description="foo") sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_table_columns_partition_options.yml000066400000000000000000000033631503426445100323630ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d7212cc0702be19c03fe7d36ba84048d3bd4a728b6e965cdf51533e09a3b3a3c file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: newtable - bracketed: - start_bracket: ( - column_definition: naked_identifier: x data_type: data_type_identifier: TIMESTAMP - comma: ',' - column_definition: naked_identifier: y data_type: data_type_identifier: INT64 - end_bracket: ) - partition_by_segment: - keyword: PARTITION - keyword: BY - expression: function: function_name: function_name_identifier: DATE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x end_bracket: ) - cluster_by_segment: - keyword: CLUSTER - keyword: BY - expression: column_reference: naked_identifier: x - comma: ',' - expression: column_reference: naked_identifier: y - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: '"foo"' end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_table_default_collate.sql000066400000000000000000000004661503426445100301650ustar00rootroot00000000000000CREATE TABLE example_dataset.example_table (x INT64) DEFAULT COLLATE 'und:ci'; CREATE OR REPLACE TABLE example-project.example_dataset.example_table ( x INT64 OPTIONS(description="example"), y INT64 OPTIONS(description="example") ) DEFAULT COLLATE 'und:ci' CLUSTER BY x, y OPTIONS(description="example"); sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_table_default_collate.yml000066400000000000000000000054071503426445100301670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 23a31b8c43b1058987c0db2c34869023ec7cf34b2639281bd1698ce89c703467 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: example_dataset - dot: . - naked_identifier: example_table - bracketed: start_bracket: ( column_definition: naked_identifier: x data_type: data_type_identifier: INT64 end_bracket: ) - default_collate: - keyword: DEFAULT - keyword: COLLATE - quoted_literal: "'und:ci'" - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - table_reference: - naked_identifier: example - dash: '-' - naked_identifier: project - dot: . - naked_identifier: example_dataset - dot: . - naked_identifier: example_table - bracketed: - start_bracket: ( - column_definition: naked_identifier: x data_type: data_type_identifier: INT64 options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: '"example"' end_bracket: ) - comma: ',' - column_definition: naked_identifier: y data_type: data_type_identifier: INT64 options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: '"example"' end_bracket: ) - end_bracket: ) - default_collate: - keyword: DEFAULT - keyword: COLLATE - quoted_literal: "'und:ci'" - cluster_by_segment: - keyword: CLUSTER - keyword: BY - expression: column_reference: naked_identifier: x - comma: ',' - expression: column_reference: naked_identifier: y - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: '"example"' end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_table_function.sql000066400000000000000000000006241503426445100266570ustar00rootroot00000000000000-- From BigQuery Docs: -- https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_table_function_statement CREATE OR REPLACE TABLE FUNCTION mydataset.names_by_year(y INT64) RETURNS TABLE AS SELECT year, name, SUM(number) AS total FROM `bigquery-public-data.usa_names.usa_1910_current` WHERE year = y GROUP BY year, name sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_table_function.yml000066400000000000000000000053611503426445100266640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e3541bc1696a889a9129eb78128793c2d1b2436ee9d415f1af8f8799dc6406cf file: statement: create_table_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - keyword: FUNCTION - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: names_by_year - bracketed: start_bracket: ( column_definition: naked_identifier: y data_type: data_type_identifier: INT64 end_bracket: ) - keyword: RETURNS - keyword: TABLE - start_angle_bracket: < - parameter: name - data_type: data_type_identifier: STRING - comma: ',' - parameter: year - data_type: data_type_identifier: INT64 - comma: ',' - parameter: total - data_type: data_type_identifier: INT64 - end_angle_bracket: '>' - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: year - comma: ',' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: number end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: total from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`bigquery-public-data.usa_names.usa_1910_current`' where_clause: keyword: WHERE expression: - column_reference: naked_identifier: year - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: y groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: year - comma: ',' - column_reference: naked_identifier: name sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_table_hyphen_project.sql000066400000000000000000000002231503426445100300460ustar00rootroot00000000000000CREATE OR REPLACE TABLE project-name.dataset_name.table_name ( x INT64 OPTIONS(description="An INTEGER field") ) PARTITION BY DATE(import_ts); sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_table_hyphen_project.yml000066400000000000000000000032021503426445100300500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6dfec2ec505c10a34b8a79dfbccc6728b4720355714e0dce7b73f6790a674a14 file: statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - table_reference: - naked_identifier: project - dash: '-' - naked_identifier: name - dot: . - naked_identifier: dataset_name - dot: . - naked_identifier: table_name - bracketed: start_bracket: ( column_definition: naked_identifier: x data_type: data_type_identifier: INT64 options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: '"An INTEGER field"' end_bracket: ) end_bracket: ) - partition_by_segment: - keyword: PARTITION - keyword: BY - expression: function: function_name: function_name_identifier: DATE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: import_ts end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_table_keys.sql000066400000000000000000000007671503426445100260150ustar00rootroot00000000000000CREATE TABLE t_table1 ( x INT64, PRIMARY KEY (x) NOT ENFORCED ) ; CREATE TABLE t_table1 ( y STRING, FOREIGN KEY (y) REFERENCES t_table2(y) NOT ENFORCED, ) ; CREATE TABLE t_table1 ( x INT64, PRIMARY KEY (x) NOT ENFORCED, y STRING, FOREIGN KEY (y) REFERENCES t_table2(y) NOT ENFORCED, _other STRING ) ; CREATE TABLE `some_dataset.some_table` ( id STRING NOT NULL PRIMARY KEY NOT ENFORCED, other_field STRING REFERENCES other_table(other_field) NOT ENFORCED ); sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_table_keys.yml000066400000000000000000000106051503426445100260070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e4af7f29c06a83c2190d5fd2588bfe8558c9438902ff2587fdc5194747f96e62 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t_table1 - bracketed: start_bracket: ( column_definition: naked_identifier: x data_type: data_type_identifier: INT64 comma: ',' table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: x end_bracket: ) - keyword: NOT - keyword: ENFORCED end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t_table1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: y data_type: data_type_identifier: STRING - comma: ',' - table_constraint: - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: y end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: t_table2 - bracketed: start_bracket: ( column_reference: naked_identifier: y end_bracket: ) - keyword: NOT - keyword: ENFORCED - comma: ',' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t_table1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: x data_type: data_type_identifier: INT64 - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: x end_bracket: ) - keyword: NOT - keyword: ENFORCED - comma: ',' - column_definition: naked_identifier: y data_type: data_type_identifier: STRING - comma: ',' - table_constraint: - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: y end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: t_table2 - bracketed: start_bracket: ( column_reference: naked_identifier: y end_bracket: ) - keyword: NOT - keyword: ENFORCED - comma: ',' - column_definition: naked_identifier: _other data_type: data_type_identifier: STRING - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: quoted_identifier: '`some_dataset.some_table`' - bracketed: - start_bracket: ( - column_definition: - naked_identifier: id - data_type: data_type_identifier: STRING - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - keyword: NOT - keyword: ENFORCED - comma: ',' - column_definition: naked_identifier: other_field data_type: data_type_identifier: STRING column_constraint_segment: - keyword: REFERENCES - table_reference: naked_identifier: other_table - bracketed: start_bracket: ( column_reference: naked_identifier: other_field end_bracket: ) - keyword: NOT - keyword: ENFORCED - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_table_like_copy_clone.sql000066400000000000000000000007021503426445100301650ustar00rootroot00000000000000CREATE TABLE mydataset.newtable LIKE mydataset.sourcetable ; CREATE TABLE mydataset.newtable LIKE mydataset.sourcetable AS SELECT * FROM mydataset.myothertable ; CREATE TABLE mydataset.newtable COPY mydataset.sourcetable ; CREATE TABLE mydataset.newtable_clone CLONE mydataset.sourcetable ; CREATE TABLE IF NOT EXISTS mydataset.newtable_clone CLONE mydataset.sourcetable FOR SYSTEM_TIME AS OF CURRENT_TIMESTAMP() OPTIONS(description="example") ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_table_like_copy_clone.yml000066400000000000000000000064501503426445100301750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 19ecf25d54274fb487ca055881a05fea3fa16e59c7f5d66296535a409ffcbf5b file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: newtable - keyword: LIKE - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: sourcetable - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: newtable - keyword: LIKE - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: sourcetable - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: myothertable - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: newtable - keyword: COPY - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: sourcetable - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: newtable_clone - keyword: CLONE - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: sourcetable - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: newtable_clone - keyword: CLONE - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: sourcetable - for_system_time_as_of_segment: - keyword: FOR - keyword: SYSTEM_TIME - keyword: AS - keyword: OF - expression: function: function_name: function_name_identifier: CURRENT_TIMESTAMP function_contents: bracketed: start_bracket: ( end_bracket: ) - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: '"example"' end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_table_options.sql000066400000000000000000000005351503426445100265260ustar00rootroot00000000000000CREATE TABLE table_1 OPTIONS( expiration_timestamp = TIMESTAMP_ADD(CURRENT_TIMESTAMP(), INTERVAL 24 HOUR) ); CREATE TABLE table_1 OPTIONS( expiration_timestamp = TIMESTAMP("2023-01-01 00:00:00 UTC") ); CREATE TABLE table_1 OPTIONS( description = "Test mixed options", expiration_timestamp = TIMESTAMP("2023-01-01 00:00:00 UTC") ); sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_table_options.yml000066400000000000000000000061251503426445100265310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0ebb8de208720a0f853759c17cec759342982625db0071480577efca9413d8ed file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: table_1 - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: expiration_timestamp comparison_operator: raw_comparison_operator: '=' function: function_name: function_name_identifier: TIMESTAMP_ADD function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: CURRENT_TIMESTAMP function_contents: bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: numeric_literal: '24' date_part: HOUR - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: table_1 - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: expiration_timestamp comparison_operator: raw_comparison_operator: '=' function: function_name: function_name_identifier: TIMESTAMP function_contents: bracketed: start_bracket: ( expression: quoted_literal: '"2023-01-01 00:00:00 UTC"' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: table_1 - options_segment: keyword: OPTIONS bracketed: - start_bracket: ( - parameter: description - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"Test mixed options"' - comma: ',' - parameter: expiration_timestamp - comparison_operator: raw_comparison_operator: '=' - function: function_name: function_name_identifier: TIMESTAMP function_contents: bracketed: start_bracket: ( expression: quoted_literal: '"2023-01-01 00:00:00 UTC"' end_bracket: ) - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_table_options_as.sql000066400000000000000000000001311503426445100272010ustar00rootroot00000000000000CREATE OR REPLACE TABLE foo OPTIONS (description = 'copy of bar') AS (SELECT * from bar) sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_table_options_as.yml000066400000000000000000000025231503426445100272120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1686dd9b1ec59d5a8c95a44e8ea5914dc79e291fc688ce56fa9cf8814a1108d6 file: statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - table_reference: naked_identifier: foo - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: "'copy of bar'" end_bracket: ) - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_table_partition_by_as.sql000066400000000000000000000001511503426445100302130ustar00rootroot00000000000000CREATE TABLE newtable ( x INT64, y INT64 ) PARTITION BY y AS SELECT x, y FROM table1 sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_table_partition_by_as.yml000066400000000000000000000027761503426445100302340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9efd71e7018e932691fde021d67c9ad35319421490736d07e46c78483b4c28b8 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: newtable - bracketed: - start_bracket: ( - column_definition: naked_identifier: x data_type: data_type_identifier: INT64 - comma: ',' - column_definition: naked_identifier: y data_type: data_type_identifier: INT64 - end_bracket: ) - partition_by_segment: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: y - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: x - comma: ',' - select_clause_element: column_reference: naked_identifier: y from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_table_partition_by_cluster_by_as.sql000066400000000000000000000001711503426445100324500ustar00rootroot00000000000000CREATE TABLE newtable ( x INT64, y INT64 ) PARTITION BY y CLUSTER BY x, y AS SELECT x, y FROM table1 sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_table_partition_by_cluster_by_as.yml000066400000000000000000000033701503426445100324560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c4cacf9cdeb2b07a2f67ebf103013783bbffae4977f85e1043aaa02042153e6e file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: newtable - bracketed: - start_bracket: ( - column_definition: naked_identifier: x data_type: data_type_identifier: INT64 - comma: ',' - column_definition: naked_identifier: y data_type: data_type_identifier: INT64 - end_bracket: ) - partition_by_segment: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: y - cluster_by_segment: - keyword: CLUSTER - keyword: BY - expression: column_reference: naked_identifier: x - comma: ',' - expression: column_reference: naked_identifier: y - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: x - comma: ',' - select_clause_element: column_reference: naked_identifier: y from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_table_trailing_comma.sql000066400000000000000000000003601503426445100300140ustar00rootroot00000000000000-- Basic example of trailing comma CREATE TABLE t_table ( col1 STRING, ); -- Complex example with other variants CREATE TABLE t_table ( col1 STRING, x INT64 NOT NULL OPTIONS(description="An INTEGER field that is NOT NULL"), ); sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_table_trailing_comma.yml000066400000000000000000000032331503426445100300200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 018364a69b948f44088a1b9f4dd3793605a728e0f7404b540764dd923a4f9917 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t_table - bracketed: start_bracket: ( column_definition: naked_identifier: col1 data_type: data_type_identifier: STRING comma: ',' end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t_table - bracketed: - start_bracket: ( - column_definition: naked_identifier: col1 data_type: data_type_identifier: STRING - comma: ',' - column_definition: naked_identifier: x data_type: data_type_identifier: INT64 column_constraint_segment: - keyword: NOT - keyword: 'NULL' options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: '"An INTEGER field that is NOT NULL"' end_bracket: ) - comma: ',' - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_temp_function_with_select.sql000066400000000000000000000002551503426445100311270ustar00rootroot00000000000000CREATE TEMP FUNCTION URLDECODE ( url STRING ) RETURNS STRING AS (( SELECT 1 FROM UNNEST(REGEXP_EXTRACT_ALL(url, r"%[0-9a-fA-F]{2}|[^%]+")) AS y WITH OFFSET AS i )) sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_temp_function_with_select.yml000066400000000000000000000057021503426445100311330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 19fea6b18d1b3a099ccc486ef23154e06d9235c137d14ddbec970df876046c88 file: statement: create_function_statement: - keyword: CREATE - keyword: TEMP - keyword: FUNCTION - function_name: function_name_identifier: URLDECODE - function_parameter_list: bracketed: start_bracket: ( parameter: url data_type: data_type_identifier: STRING end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: STRING - function_definition: keyword: AS bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: - table_expression: function: function_name: function_name_identifier: UNNEST function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: REGEXP_EXTRACT_ALL function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: url - comma: ',' - expression: quoted_literal: r"%[0-9a-fA-F]{2}|[^%]+" - end_bracket: ) end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: y - keyword: WITH - keyword: OFFSET - alias_expression: alias_operator: keyword: AS naked_identifier: i end_bracket: ) end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_view_options_as.sql000066400000000000000000000006411503426445100270720ustar00rootroot00000000000000CREATE OR REPLACE VIEW foo AS (SELECT * from bar); CREATE OR REPLACE VIEW foo OPTIONS (description = 'copy of bar') AS (SELECT * from bar); CREATE OR REPLACE VIEW IF NOT EXISTS foo OPTIONS (description = 'copy of bar') AS (SELECT * from bar); CREATE OR REPLACE VIEW foo ( x OPTIONS (description = 'x'), y OPTIONS (description = 'y') ) OPTIONS (description = 'view_column_name_list') AS (SELECT x, y from bar); sqlfluff-3.4.2/test/fixtures/dialects/bigquery/create_view_options_as.yml000066400000000000000000000116651503426445100271040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 190e6652d19c04d7533421dcc0f335f59e010157713bb565844366c5d562bc33 file: - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: VIEW - table_reference: naked_identifier: foo - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: VIEW - table_reference: naked_identifier: foo - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: "'copy of bar'" end_bracket: ) - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: foo - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: "'copy of bar'" end_bracket: ) - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: VIEW - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_definition: naked_identifier: x options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: "'x'" end_bracket: ) - comma: ',' - column_definition: naked_identifier: y options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: "'y'" end_bracket: ) - end_bracket: ) - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: "'view_column_name_list'" end_bracket: ) - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: x - comma: ',' - select_clause_element: column_reference: naked_identifier: y from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/dateparts.sql000066400000000000000000000013231503426445100243240ustar00rootroot00000000000000SELECT col1, EXTRACT(MICROSECOND FROM col1), EXTRACT(MILLISECOND FROM col1), EXTRACT(SECOND FROM col1), EXTRACT(MINUTE FROM col1), EXTRACT(HOUR FROM col1), EXTRACT(DAY FROM col1), EXTRACT(DAYOFWEEK FROM col1), EXTRACT(DAYOFYEAR FROM col1), EXTRACT(WEEK FROM col1), EXTRACT(ISOWEEK FROM col1), EXTRACT(MONTH FROM col1), EXTRACT(QUARTER FROM col1), EXTRACT(YEAR FROM col1), EXTRACT(ISOYEAR FROM col1), EXTRACT(WEEK(SUNDAY) FROM col1), EXTRACT(WEEK FROM col1) AS week, EXTRACT(DATE FROM col1) AS week, EXTRACT(DATE FROM TIMESTAMP_SECONDS(1651135778)), LAST_DAY(col1, MONTH), LAST_DAY(col1, WEEK), LAST_DAY(col1, WEEK(SUNDAY)), FROM tbl1; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/dateparts.yml000066400000000000000000000251121503426445100243300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ced353a83fea5af92f6ac7677f723672e5a6bcef2b9ffacb4d236e0c31f05b69 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: MICROSECOND keyword: FROM expression: column_reference: naked_identifier: col1 end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: MILLISECOND keyword: FROM expression: column_reference: naked_identifier: col1 end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: SECOND keyword: FROM expression: column_reference: naked_identifier: col1 end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: MINUTE keyword: FROM expression: column_reference: naked_identifier: col1 end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: HOUR keyword: FROM expression: column_reference: naked_identifier: col1 end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: DAY keyword: FROM expression: column_reference: naked_identifier: col1 end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: DAYOFWEEK keyword: FROM expression: column_reference: naked_identifier: col1 end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: DAYOFYEAR keyword: FROM expression: column_reference: naked_identifier: col1 end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: WEEK keyword: FROM expression: column_reference: naked_identifier: col1 end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: ISOWEEK keyword: FROM expression: column_reference: naked_identifier: col1 end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: MONTH keyword: FROM expression: column_reference: naked_identifier: col1 end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: QUARTER keyword: FROM expression: column_reference: naked_identifier: col1 end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: YEAR keyword: FROM expression: column_reference: naked_identifier: col1 end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: ISOYEAR keyword: FROM expression: column_reference: naked_identifier: col1 end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part_week: keyword: WEEK bracketed: start_bracket: ( keyword: SUNDAY end_bracket: ) keyword: FROM expression: column_reference: naked_identifier: col1 end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: WEEK keyword: FROM expression: column_reference: naked_identifier: col1 end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: week - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: DATE keyword: FROM expression: column_reference: naked_identifier: col1 end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: week - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: DATE keyword: FROM expression: function: function_name: function_name_identifier: TIMESTAMP_SECONDS function_contents: bracketed: start_bracket: ( expression: numeric_literal: '1651135778' end_bracket: ) end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LAST_DAY function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: col1 comma: ',' date_part: MONTH end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LAST_DAY function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: col1 comma: ',' date_part: WEEK end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LAST_DAY function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: col1 comma: ',' date_part_week: keyword: WEEK bracketed: start_bracket: ( keyword: SUNDAY end_bracket: ) end_bracket: ) - comma: ',' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/declare_variable.sql000066400000000000000000000015501503426445100256030ustar00rootroot00000000000000declare var1 int64; declare var2, var3 string; declare var4 default 'value'; declare var5 int64 default 1 + 2; declare var6 string(10); declare var7 numeric(5, 2); declare arr1 array; declare arr2 default ['one', 'two']; declare arr3 default []; declare arr4 array default ['one', 'two']; declare arr5 array; declare str1 struct; declare str2 struct default struct('one', 'two'); declare str3 default struct('one', 'two'); declare str4 struct default ('one', 'two'); declare str5 struct; -- Defining variables in quoted names declare `var1` string; declare `var1` string default 'value'; declare `var1`, `var1` string; -- Defining variables mixing quoted and unquoted names declare var1, `var2` string; declare var1, `var2` string default 'value'; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/declare_variable.yml000066400000000000000000000175431503426445100256160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d872a9abf3a1a7aac95f9150f2d37c9fcae749718e2ed61f6f8c11228db78caa file: - statement: declare_segment: keyword: declare naked_identifier: var1 data_type: data_type_identifier: int64 - statement_terminator: ; - statement: declare_segment: - keyword: declare - naked_identifier: var2 - comma: ',' - naked_identifier: var3 - data_type: data_type_identifier: string - statement_terminator: ; - statement: declare_segment: - keyword: declare - naked_identifier: var4 - keyword: default - quoted_literal: "'value'" - statement_terminator: ; - statement: declare_segment: - keyword: declare - naked_identifier: var5 - data_type: data_type_identifier: int64 - keyword: default - expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - statement_terminator: ; - statement: declare_segment: keyword: declare naked_identifier: var6 data_type: data_type_identifier: string bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - statement_terminator: ; - statement: declare_segment: keyword: declare naked_identifier: var7 data_type: data_type_identifier: numeric bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '5' - comma: ',' - numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: declare_segment: keyword: declare naked_identifier: arr1 data_type: array_type: keyword: array start_angle_bracket: < data_type: data_type_identifier: string end_angle_bracket: '>' - statement_terminator: ; - statement: declare_segment: - keyword: declare - naked_identifier: arr2 - keyword: default - array_literal: - start_square_bracket: '[' - quoted_literal: "'one'" - comma: ',' - quoted_literal: "'two'" - end_square_bracket: ']' - statement_terminator: ; - statement: declare_segment: - keyword: declare - naked_identifier: arr3 - keyword: default - array_literal: start_square_bracket: '[' end_square_bracket: ']' - statement_terminator: ; - statement: declare_segment: - keyword: declare - naked_identifier: arr4 - data_type: array_type: keyword: array start_angle_bracket: < data_type: data_type_identifier: string end_angle_bracket: '>' - keyword: default - array_literal: - start_square_bracket: '[' - quoted_literal: "'one'" - comma: ',' - quoted_literal: "'two'" - end_square_bracket: ']' - statement_terminator: ; - statement: declare_segment: keyword: declare naked_identifier: arr5 data_type: array_type: keyword: array start_angle_bracket: < data_type: data_type_identifier: string bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) end_angle_bracket: '>' - statement_terminator: ; - statement: declare_segment: keyword: declare naked_identifier: str1 data_type: struct_type: keyword: struct struct_type_schema: - start_angle_bracket: < - parameter: f1 - data_type: data_type_identifier: string - comma: ',' - parameter: f2 - data_type: data_type_identifier: string - end_angle_bracket: '>' - statement_terminator: ; - statement: declare_segment: - keyword: declare - naked_identifier: str2 - data_type: struct_type: keyword: struct struct_type_schema: - start_angle_bracket: < - parameter: f1 - data_type: data_type_identifier: string - comma: ',' - parameter: f2 - data_type: data_type_identifier: string - end_angle_bracket: '>' - keyword: default - expression: typed_struct_literal: struct_type: keyword: struct struct_literal: bracketed: - start_bracket: ( - quoted_literal: "'one'" - comma: ',' - quoted_literal: "'two'" - end_bracket: ) - statement_terminator: ; - statement: declare_segment: - keyword: declare - naked_identifier: str3 - keyword: default - expression: typed_struct_literal: struct_type: keyword: struct struct_literal: bracketed: - start_bracket: ( - quoted_literal: "'one'" - comma: ',' - quoted_literal: "'two'" - end_bracket: ) - statement_terminator: ; - statement: declare_segment: - keyword: declare - naked_identifier: str4 - data_type: struct_type: keyword: struct struct_type_schema: - start_angle_bracket: < - parameter: f1 - data_type: data_type_identifier: string - comma: ',' - parameter: f2 - data_type: data_type_identifier: string - end_angle_bracket: '>' - keyword: default - tuple: bracketed: - start_bracket: ( - quoted_literal: "'one'" - comma: ',' - quoted_literal: "'two'" - end_bracket: ) - statement_terminator: ; - statement: declare_segment: keyword: declare naked_identifier: str5 data_type: struct_type: keyword: struct struct_type_schema: - start_angle_bracket: < - parameter: f1 - data_type: data_type_identifier: string bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - comma: ',' - parameter: f2 - data_type: data_type_identifier: string bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - end_angle_bracket: '>' - statement_terminator: ; - statement: declare_segment: keyword: declare quoted_identifier: '`var1`' data_type: data_type_identifier: string - statement_terminator: ; - statement: declare_segment: - keyword: declare - quoted_identifier: '`var1`' - data_type: data_type_identifier: string - keyword: default - quoted_literal: "'value'" - statement_terminator: ; - statement: declare_segment: - keyword: declare - quoted_identifier: '`var1`' - comma: ',' - quoted_identifier: '`var1`' - data_type: data_type_identifier: string - statement_terminator: ; - statement: declare_segment: keyword: declare naked_identifier: var1 comma: ',' quoted_identifier: '`var2`' data_type: data_type_identifier: string - statement_terminator: ; - statement: declare_segment: - keyword: declare - naked_identifier: var1 - comma: ',' - quoted_identifier: '`var2`' - data_type: data_type_identifier: string - keyword: default - quoted_literal: "'value'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/declare_variable_with_default.sql000066400000000000000000000001621503426445100303400ustar00rootroot00000000000000declare var5 date default CURRENT_DATE(); declare var4 int64 default 1; declare var3 string default (SELECT "x"); sqlfluff-3.4.2/test/fixtures/dialects/bigquery/declare_variable_with_default.yml000066400000000000000000000025601503426445100303460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e4b4a480102572044feaa359999d2ccf6d39d832b425da22d67f695076c91ed8 file: - statement: declare_segment: - keyword: declare - naked_identifier: var5 - data_type: data_type_identifier: date - keyword: default - function: function_name: function_name_identifier: CURRENT_DATE function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: declare_segment: - keyword: declare - naked_identifier: var4 - data_type: data_type_identifier: int64 - keyword: default - numeric_literal: '1' - statement_terminator: ; - statement: declare_segment: - keyword: declare - naked_identifier: var3 - data_type: data_type_identifier: string - keyword: default - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: '"x"' end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/delete.sql000066400000000000000000000005141503426445100236000ustar00rootroot00000000000000DELETE dataset.Inventory WHERE quantity = 0; DELETE dataset.Inventory i WHERE i.product NOT IN (SELECT product from dataset.NewArrivals); DELETE dataset.Inventory WHERE NOT EXISTS (SELECT * from dataset.NewArrivals WHERE Inventory.product = NewArrivals.product); DELETE FROM `project_id.dataset_id.target_name` WHERE TRUE ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/delete.yml000066400000000000000000000071221503426445100236040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d0d792ccee74c7d010d7bc94eb4b3b7ac9f5f7c052ce3eb4de5401307396aa62 file: - statement: delete_statement: keyword: DELETE table_reference: - naked_identifier: dataset - dot: . - naked_identifier: Inventory where_clause: keyword: WHERE expression: column_reference: naked_identifier: quantity comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' - statement_terminator: ; - statement: delete_statement: keyword: DELETE table_reference: - naked_identifier: dataset - dot: . - naked_identifier: Inventory alias_expression: naked_identifier: i where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: i - dot: . - naked_identifier: product - keyword: NOT - keyword: IN - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: product from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dataset - dot: . - naked_identifier: NewArrivals end_bracket: ) - statement_terminator: ; - statement: delete_statement: keyword: DELETE table_reference: - naked_identifier: dataset - dot: . - naked_identifier: Inventory where_clause: keyword: WHERE expression: - keyword: NOT - keyword: EXISTS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dataset - dot: . - naked_identifier: NewArrivals where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: Inventory - dot: . - naked_identifier: product - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: NewArrivals - dot: . - naked_identifier: product end_bracket: ) - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: quoted_identifier: '`project_id.dataset_id.target_name`' - where_clause: keyword: WHERE expression: boolean_literal: 'TRUE' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/drop_function.sql000066400000000000000000000001611503426445100252050ustar00rootroot00000000000000DROP FUNCTION myproject.mydataset.addfunc; DROP TABLE FUNCTION IF EXISTS example_dataset.example_table_function; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/drop_function.yml000066400000000000000000000017231503426445100252140ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 44f1419fcbbb552e97a30e133cb19c55f83cdf84900d961fc83803e0153c856e file: - statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - function_name: - naked_identifier: myproject - dot: . - naked_identifier: mydataset - dot: . - function_name_identifier: addfunc - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: TABLE - keyword: FUNCTION - keyword: IF - keyword: EXISTS - function_name: naked_identifier: example_dataset dot: . function_name_identifier: example_table_function - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/drop_materialized_view.sql000066400000000000000000000001331503426445100270630ustar00rootroot00000000000000DROP MATERIALIZED VIEW mydataset.my_mv; DROP MATERIALIZED VIEW IF EXISTS mydataset.my_mv; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/drop_materialized_view.yml000066400000000000000000000016501503426445100270720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 023f2a8fc6cf7288d13cc3d91ec757da95570d47cfc90e3dca3f5abf0d2fff63 file: - statement: drop_materialized_view_statement: - keyword: DROP - keyword: MATERIALIZED - keyword: VIEW - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: my_mv - statement_terminator: ; - statement: drop_materialized_view_statement: - keyword: DROP - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: my_mv - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/drop_procedure.sql000066400000000000000000000001671503426445100253560ustar00rootroot00000000000000DROP PROCEDURE example_dataset.example_procedure; DROP PROCEDURE IF EXISTS `other-project`.sample_dataset.myprocedure; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/drop_procedure.yml000066400000000000000000000017231503426445100253570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 87164a4ac06e8f40de7e78798efdec097cdcfb7b3a1a0a8000be4197e3b92d2f file: - statement: drop_procedure_statement: - keyword: DROP - keyword: PROCEDURE - procedure_name: naked_identifier: example_dataset dot: . procedure_name_identifier: example_procedure - statement_terminator: ; - statement: drop_procedure_statement: - keyword: DROP - keyword: PROCEDURE - keyword: IF - keyword: EXISTS - procedure_name: - quoted_identifier: '`other-project`' - dot: . - naked_identifier: sample_dataset - dot: . - procedure_name_identifier: myprocedure - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/drop_schema.sql000066400000000000000000000002601503426445100246200ustar00rootroot00000000000000DROP SCHEMA example_dataset; DROP SCHEMA IF EXISTS example_project.example_dataset; UNDROP SCHEMA example_dataset; UNDROP SCHEMA IF NOT EXISTS example_project.example_dataset; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/drop_schema.yml000066400000000000000000000024371503426445100246320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 101426fe84e836494b6a5e883cae5dbadf87f473aa8eaabbee00587b9e76ca63 file: - statement: drop_schema_statement: - keyword: DROP - keyword: SCHEMA - schema_reference: naked_identifier: example_dataset - statement_terminator: ; - statement: drop_schema_statement: - keyword: DROP - keyword: SCHEMA - keyword: IF - keyword: EXISTS - schema_reference: - naked_identifier: example_project - dot: . - naked_identifier: example_dataset - statement_terminator: ; - statement: undrop_schema_statement: - keyword: UNDROP - keyword: SCHEMA - schema_reference: naked_identifier: example_dataset - statement_terminator: ; - statement: undrop_schema_statement: - keyword: UNDROP - keyword: SCHEMA - keyword: IF - keyword: NOT - keyword: EXISTS - schema_reference: - naked_identifier: example_project - dot: . - naked_identifier: example_dataset - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/drop_table.sql000066400000000000000000000002541503426445100244520ustar00rootroot00000000000000DROP TABLE IF EXISTS example_dataset.example_table; DROP SNAPSHOT TABLE example_dataset.example_snapshot_table; DROP EXTERNAL TABLE example_dataset.example_external_table; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/drop_table.yml000066400000000000000000000022321503426445100244520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: dba098cfb0b440b8a33220083580654145b276fcfd7c0c8abaa2204141383e3c file: - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: - naked_identifier: example_dataset - dot: . - naked_identifier: example_table - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: SNAPSHOT - keyword: TABLE - table_reference: - naked_identifier: example_dataset - dot: . - naked_identifier: example_snapshot_table - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: example_dataset - dot: . - naked_identifier: example_external_table - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/drop_table_function.sql000066400000000000000000000001541503426445100263560ustar00rootroot00000000000000DROP TABLE FUNCTION mydataset.my_table_function; DROP TABLE FUNCTION IF EXISTS mydataset.my_table_function; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/drop_table_function.yml000066400000000000000000000016641503426445100263670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 095ace4cc6272142d35ae22f75393fdae7da639d48ba94e9b077c3fddcab6c3b file: - statement: drop_function_statement: - keyword: DROP - keyword: TABLE - keyword: FUNCTION - function_name: naked_identifier: mydataset dot: . function_name_identifier: my_table_function - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: TABLE - keyword: FUNCTION - keyword: IF - keyword: EXISTS - function_name: naked_identifier: mydataset dot: . function_name_identifier: my_table_function - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/drop_various_resources.sql000066400000000000000000000007101503426445100271420ustar00rootroot00000000000000DROP ROW ACCESS POLICY IF EXISTS example_policy_name ON example_dataset.example_table; DROP CAPACITY `example-project.region-us.example_commitment`; DROP RESERVATION IF EXISTS `example-project.region-us.example_reservation`; DROP ASSIGNMENT `example-project.region-us.example_reservation.example_assignment`; DROP SEARCH INDEX IF EXISTS example_index ON example_dataset.example_table; DROP VECTOR INDEX example_index ON example_dataset.example_table; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/drop_various_resources.yml000066400000000000000000000042261503426445100271520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5f317b1b68df983826c410d4622980da88329aac869915c6c74c15064d3890a9 file: - statement: drop_row_access_policy_statement: - keyword: DROP - keyword: ROW - keyword: ACCESS - keyword: POLICY - keyword: IF - keyword: EXISTS - naked_identifier: example_policy_name - keyword: 'ON' - table_reference: - naked_identifier: example_dataset - dot: . - naked_identifier: example_table - statement_terminator: ; - statement: drop_capacity_statement: - keyword: DROP - keyword: CAPACITY - table_reference: quoted_identifier: '`example-project.region-us.example_commitment`' - statement_terminator: ; - statement: drop_reservation_statement: - keyword: DROP - keyword: RESERVATION - keyword: IF - keyword: EXISTS - table_reference: quoted_identifier: '`example-project.region-us.example_reservation`' - statement_terminator: ; - statement: drop_assignment_statement: - keyword: DROP - keyword: ASSIGNMENT - table_reference: quoted_identifier: '`example-project.region-us.example_reservation.example_assignment`' - statement_terminator: ; - statement: drop_search_index_statement: - keyword: DROP - keyword: SEARCH - keyword: INDEX - keyword: IF - keyword: EXISTS - index_reference: naked_identifier: example_index - keyword: 'ON' - table_reference: - naked_identifier: example_dataset - dot: . - naked_identifier: example_table - statement_terminator: ; - statement: drop_vector_index_statement: - keyword: DROP - keyword: VECTOR - keyword: INDEX - index_reference: naked_identifier: example_index - keyword: 'ON' - table_reference: - naked_identifier: example_dataset - dot: . - naked_identifier: example_table - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/execute_immediate.sql000066400000000000000000000017471503426445100260270ustar00rootroot00000000000000declare var_1 string; declare var_2 string; set var_1 = ''' insert into project.data_set.table select "%s" as var_1, count(*) as column_1 from ( select column_3 from %s.column_2 group by var_3 having count(var_3) > 1 ) '''; create or replace table project.data_set.table (var_1 string, column_1 int64) options ( expiration_timestamp = timestamp_add(current_timestamp(), interval 3 hour) ); for m in (select var_1 from project.data_set.table) do set var_1 = m[0]; execute immediate 'select 1;'; execute immediate var_1; execute immediate format(var_2, var_1, var_1); execute immediate case when x then format(var_1, var_2) else format(var_1, m) end; execute immediate (select format(var_2, var_1, var_1)); execute immediate 'SELECT 2 + 3' into y; execute immediate 'SELECT 2 + 3, 6' into y, z; execute immediate 'SELECT ? * (? + 2)' into y using 1, 3; execute immediate 'SELECT @a * (@b + 2)' into y using 1 as a, x as b; end for; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/execute_immediate.yml000066400000000000000000000224141503426445100260230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 963e6eafc7150d4699481e26b4dfd579004c039b584b4493b0da81b674e9c5f6 file: - statement: declare_segment: keyword: declare naked_identifier: var_1 data_type: data_type_identifier: string - statement_terminator: ; - statement: declare_segment: keyword: declare naked_identifier: var_2 data_type: data_type_identifier: string - statement_terminator: ; - statement: set_segment: keyword: set naked_identifier: var_1 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'''\ninsert into project.data_set.table\nselect\n \"%s\" as\ \ var_1,\n count(*) as column_1\nfrom (\n select column_3\n from %s.column_2\n\ \ group by var_3 having count(var_3) > 1\n)\n'''" - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: or - keyword: replace - keyword: table - table_reference: - naked_identifier: project - dot: . - naked_identifier: data_set - dot: . - naked_identifier: table - bracketed: - start_bracket: ( - column_definition: naked_identifier: var_1 data_type: data_type_identifier: string - comma: ',' - column_definition: naked_identifier: column_1 data_type: data_type_identifier: int64 - end_bracket: ) - options_segment: keyword: options bracketed: start_bracket: ( parameter: expiration_timestamp comparison_operator: raw_comparison_operator: '=' function: function_name: function_name_identifier: timestamp_add function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: current_timestamp function_contents: bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: interval_expression: keyword: interval expression: numeric_literal: '3' date_part: hour - end_bracket: ) end_bracket: ) - statement_terminator: ; - multi_statement_segment: for_in_statement: - keyword: for - naked_identifier: m - keyword: in - bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: var_1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: project - dot: . - naked_identifier: data_set - dot: . - naked_identifier: table end_bracket: ) - keyword: do - for_in_statements: - statement: set_segment: keyword: set naked_identifier: var_1 comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: m array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' - statement_terminator: ; - statement: execute_immediate: - keyword: execute - keyword: immediate - quoted_literal: "'select 1;'" - statement_terminator: ; - statement: execute_immediate: - keyword: execute - keyword: immediate - naked_identifier: var_1 - statement_terminator: ; - statement: execute_immediate: - keyword: execute - keyword: immediate - function: function_name: function_name_identifier: format function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: var_2 - comma: ',' - expression: column_reference: naked_identifier: var_1 - comma: ',' - expression: column_reference: naked_identifier: var_1 - end_bracket: ) - statement_terminator: ; - statement: execute_immediate: - keyword: execute - keyword: immediate - case_expression: - keyword: case - when_clause: - keyword: when - expression: column_reference: naked_identifier: x - keyword: then - expression: function: function_name: function_name_identifier: format function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: var_1 - comma: ',' - expression: column_reference: naked_identifier: var_2 - end_bracket: ) - else_clause: keyword: else expression: function: function_name: function_name_identifier: format function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: var_1 - comma: ',' - expression: column_reference: naked_identifier: m - end_bracket: ) - keyword: end - statement_terminator: ; - statement: execute_immediate: - keyword: execute - keyword: immediate - bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: format function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: var_2 - comma: ',' - expression: column_reference: naked_identifier: var_1 - comma: ',' - expression: column_reference: naked_identifier: var_1 - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: execute_immediate: - keyword: execute - keyword: immediate - quoted_literal: "'SELECT 2 + 3'" - keyword: into - naked_identifier: y - statement_terminator: ; - statement: execute_immediate: - keyword: execute - keyword: immediate - quoted_literal: "'SELECT 2 + 3, 6'" - keyword: into - naked_identifier: y - comma: ',' - naked_identifier: z - statement_terminator: ; - statement: execute_immediate: - keyword: execute - keyword: immediate - quoted_literal: "'SELECT ? * (? + 2)'" - keyword: into - naked_identifier: y - keyword: using - numeric_literal: '1' - comma: ',' - numeric_literal: '3' - statement_terminator: ; - statement: execute_immediate: - keyword: execute - keyword: immediate - quoted_literal: "'SELECT @a * (@b + 2)'" - keyword: into - naked_identifier: y - keyword: using - numeric_literal: '1' - keyword: as - naked_identifier: a - comma: ',' - column_reference: naked_identifier: x - keyword: as - naked_identifier: b - statement_terminator: ; - keyword: end - keyword: for - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/export_statement.sql000066400000000000000000000032011503426445100257370ustar00rootroot00000000000000EXPORT DATA WITH CONNECTION PROJECT_ID.LOCATION.CONNECTION_ID OPTIONS( uri='gs://bucket/folder/*.csv', format='CSV', overwrite=true, header=true, field_delimiter=';' ) AS SELECT field1, field2 FROM mydataset.table1 ORDER BY field1; EXPORT DATA WITH CONNECTION `PROJECT_ID.LOCATION.CONNECTION_ID` OPTIONS( uri='gs://bucket/folder/*.csv', format='CSV', overwrite=true, header=true, field_delimiter=';' ) AS SELECT field1, field2 FROM mydataset.table1 ORDER BY field1; EXPORT DATA OPTIONS( uri='gs://bucket/folder/*.csv', format='CSV', overwrite=true, header=true, field_delimiter=';') AS SELECT field1, field2 FROM mydataset.table1 ORDER BY field1 LIMIT 10; EXPORT DATA OPTIONS( uri="gs://bucket/folder/*.csv", format="CSV", overwrite=true, header=true, field_delimiter=';') AS SELECT field1, field2 FROM mydataset.table1 ORDER BY field1 LIMIT 10; EXPORT DATA OPTIONS( uri='gs://bucket/folder/*', format='AVRO', compression='SNAPPY') AS SELECT field1, field2 FROM mydataset.table1 ORDER BY field1 LIMIT 10; EXPORT DATA OPTIONS( uri='gs://bucket/folder/*', format='PARQUET', overwrite=true) AS SELECT field1, field2 FROM mydataset.table1 ORDER BY field1 LIMIT 10; EXPORT DATA OPTIONS( uri='gs://bucket/folder/*.csv', format='CSV', overwrite=true, header=true, field_delimiter=';') AS WITH cte AS ( SELECT field1, field2 FROM mydataset.table1 ORDER BY field1 LIMIT 10 ) SELECT * FROM cte; EXPORT DATA OPTIONS (uri=CONCAT("gs://bucket/","/file_*.csv"), format='CSV', overwrite=true, header=true, field_delimiter=',') as SELECT col1,col2 FROM thetable; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/export_statement.yml000066400000000000000000000351351503426445100257540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 80bb12f52440a5a21237531e1a15693f0f0684ce77bfb4473b045f6243b208a6 file: - statement: export_statement: - keyword: EXPORT - keyword: DATA - keyword: WITH - keyword: CONNECTION - object_reference: - naked_identifier: PROJECT_ID - dot: . - naked_identifier: LOCATION - dot: . - naked_identifier: CONNECTION_ID - keyword: OPTIONS - bracketed: - start_bracket: ( - export_option: uri - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'gs://bucket/folder/*.csv'" - comma: ',' - export_option: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'CSV'" - comma: ',' - export_option: overwrite - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - comma: ',' - export_option: header - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - comma: ',' - export_option: field_delimiter - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "';'" - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: field1 - comma: ',' - select_clause_element: column_reference: naked_identifier: field2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: table1 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: field1 - statement_terminator: ; - statement: export_statement: - keyword: EXPORT - keyword: DATA - keyword: WITH - keyword: CONNECTION - object_reference: quoted_identifier: '`PROJECT_ID.LOCATION.CONNECTION_ID`' - keyword: OPTIONS - bracketed: - start_bracket: ( - export_option: uri - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'gs://bucket/folder/*.csv'" - comma: ',' - export_option: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'CSV'" - comma: ',' - export_option: overwrite - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - comma: ',' - export_option: header - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - comma: ',' - export_option: field_delimiter - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "';'" - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: field1 - comma: ',' - select_clause_element: column_reference: naked_identifier: field2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: table1 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: field1 - statement_terminator: ; - statement: export_statement: - keyword: EXPORT - keyword: DATA - keyword: OPTIONS - bracketed: - start_bracket: ( - export_option: uri - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'gs://bucket/folder/*.csv'" - comma: ',' - export_option: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'CSV'" - comma: ',' - export_option: overwrite - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - comma: ',' - export_option: header - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - comma: ',' - export_option: field_delimiter - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "';'" - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: field1 - comma: ',' - select_clause_element: column_reference: naked_identifier: field2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: table1 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: field1 limit_clause: keyword: LIMIT numeric_literal: '10' - statement_terminator: ; - statement: export_statement: - keyword: EXPORT - keyword: DATA - keyword: OPTIONS - bracketed: - start_bracket: ( - export_option: uri - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"gs://bucket/folder/*.csv"' - comma: ',' - export_option: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"CSV"' - comma: ',' - export_option: overwrite - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - comma: ',' - export_option: header - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - comma: ',' - export_option: field_delimiter - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "';'" - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: field1 - comma: ',' - select_clause_element: column_reference: naked_identifier: field2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: table1 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: field1 limit_clause: keyword: LIMIT numeric_literal: '10' - statement_terminator: ; - statement: export_statement: - keyword: EXPORT - keyword: DATA - keyword: OPTIONS - bracketed: - start_bracket: ( - export_option: uri - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'gs://bucket/folder/*'" - comma: ',' - export_option: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'AVRO'" - comma: ',' - export_option: compression - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'SNAPPY'" - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: field1 - comma: ',' - select_clause_element: column_reference: naked_identifier: field2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: table1 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: field1 limit_clause: keyword: LIMIT numeric_literal: '10' - statement_terminator: ; - statement: export_statement: - keyword: EXPORT - keyword: DATA - keyword: OPTIONS - bracketed: - start_bracket: ( - export_option: uri - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'gs://bucket/folder/*'" - comma: ',' - export_option: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'PARQUET'" - comma: ',' - export_option: overwrite - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: field1 - comma: ',' - select_clause_element: column_reference: naked_identifier: field2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: table1 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: field1 limit_clause: keyword: LIMIT numeric_literal: '10' - statement_terminator: ; - statement: export_statement: - keyword: EXPORT - keyword: DATA - keyword: OPTIONS - bracketed: - start_bracket: ( - export_option: uri - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'gs://bucket/folder/*.csv'" - comma: ',' - export_option: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'CSV'" - comma: ',' - export_option: overwrite - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - comma: ',' - export_option: header - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - comma: ',' - export_option: field_delimiter - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "';'" - end_bracket: ) - keyword: AS - with_compound_statement: keyword: WITH common_table_expression: naked_identifier: cte keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: field1 - comma: ',' - select_clause_element: column_reference: naked_identifier: field2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: table1 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: field1 limit_clause: keyword: LIMIT numeric_literal: '10' end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: cte - statement_terminator: ; - statement: export_statement: - keyword: EXPORT - keyword: DATA - keyword: OPTIONS - bracketed: - start_bracket: ( - export_option: uri - comparison_operator: raw_comparison_operator: '=' - function: function_name: function_name_identifier: CONCAT function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: '"gs://bucket/"' - comma: ',' - expression: quoted_literal: '"/file_*.csv"' - end_bracket: ) - comma: ',' - export_option: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'CSV'" - comma: ',' - export_option: overwrite - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - comma: ',' - export_option: header - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - comma: ',' - export_option: field_delimiter - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "','" - end_bracket: ) - keyword: as - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: thetable - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/for_in.sql000066400000000000000000000010751503426445100236150ustar00rootroot00000000000000 -- For statment FOR record IN (SELECT word, word_count FROM bigquery-public-data.samples.shakespeare LIMIT 5) DO SELECT record.word, record.word_count; END FOR; -- Multiple statements FOR record IN (SELECT word, word_count FROM bigquery-public-data.samples.shakespeare LIMIT 5) DO SELECT record.word, record.word_count; SELECT record.word, record.word_count; SELECT record.word, record.word_count; END FOR; -- With Assert FOR user IN ( SELECT group1, count(*) as count FROM `database.user` ) DO ASSERT (COUNT > 0); END FOR; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/for_in.yml000066400000000000000000000146711503426445100236250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e3429c16ac7108bbcbda31c9c6edbe698708f86807a6c6255172217815250fbf file: - multi_statement_segment: for_in_statement: - keyword: FOR - naked_identifier: record - keyword: IN - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: word - comma: ',' - select_clause_element: column_reference: naked_identifier: word_count from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: bigquery - dash: '-' - naked_identifier: public - dash: '-' - naked_identifier: data - dot: . - naked_identifier: samples - dot: . - naked_identifier: shakespeare limit_clause: keyword: LIMIT numeric_literal: '5' end_bracket: ) - keyword: DO - for_in_statements: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: record - dot: . - naked_identifier: word - comma: ',' - select_clause_element: column_reference: - naked_identifier: record - dot: . - naked_identifier: word_count statement_terminator: ; - keyword: END - keyword: FOR - statement_terminator: ; - multi_statement_segment: for_in_statement: - keyword: FOR - naked_identifier: record - keyword: IN - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: word - comma: ',' - select_clause_element: column_reference: naked_identifier: word_count from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: bigquery - dash: '-' - naked_identifier: public - dash: '-' - naked_identifier: data - dot: . - naked_identifier: samples - dot: . - naked_identifier: shakespeare limit_clause: keyword: LIMIT numeric_literal: '5' end_bracket: ) - keyword: DO - for_in_statements: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: record - dot: . - naked_identifier: word - comma: ',' - select_clause_element: column_reference: - naked_identifier: record - dot: . - naked_identifier: word_count - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: record - dot: . - naked_identifier: word - comma: ',' - select_clause_element: column_reference: - naked_identifier: record - dot: . - naked_identifier: word_count - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: record - dot: . - naked_identifier: word - comma: ',' - select_clause_element: column_reference: - naked_identifier: record - dot: . - naked_identifier: word_count - statement_terminator: ; - keyword: END - keyword: FOR - statement_terminator: ; - multi_statement_segment: for_in_statement: - keyword: FOR - naked_identifier: user - keyword: IN - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: group1 - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: count from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`database.user`' end_bracket: ) - keyword: DO - for_in_statements: statement: assert_statement: keyword: ASSERT expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: COUNT comparison_operator: raw_comparison_operator: '>' numeric_literal: '0' end_bracket: ) statement_terminator: ; - keyword: END - keyword: FOR - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/handle_exception.sql000066400000000000000000000016671503426445100256610ustar00rootroot00000000000000-- Basic block BEGIN SELECT * FROM one_table; END; -- Block showcasing use of in-scope variables DECLARE x INT64 DEFAULT 10; BEGIN DECLARE y INT64; SET y = x; SELECT y; END; SELECT x; -- Basic exception block BEGIN SELECT 1/0; EXCEPTION WHEN ERROR THEN RAISE USING MESSAGE = "An error happened"; END; -- Exception block utilising @error BEGIN SELECT 100/0; EXCEPTION WHEN ERROR THEN RAISE USING MESSAGE = FORMAT("Something went wrong: %s", @@error.message); END; -- More complicated block with multiple statements BEGIN EXPORT DATA OPTIONS( uri='gs://bucket/folder/*.csv', format='CSV', overwrite=true, header=true, field_delimiter=';' ) AS ( SELECT field1, field2 FROM mydataset.table1 ORDER BY field1 LIMIT 10 ); SELECT 1/0; EXCEPTION WHEN ERROR THEN DELETE FROM mydataset.table1 WHERE field1 = '1'; RAISE USING MESSAGE = FORMAT("Something went wrong: %s", @@error.message); RETURN; END; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/handle_exception.yml000066400000000000000000000200201503426445100256430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 31ef385307d5070f629949797cf0fbebb6f29f5051032ba265a517155f228eab file: - multi_statement_segment: begin_statement: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: one_table - statement_terminator: ; - keyword: END - statement_terminator: ; - statement: declare_segment: - keyword: DECLARE - naked_identifier: x - data_type: data_type_identifier: INT64 - keyword: DEFAULT - numeric_literal: '10' - statement_terminator: ; - multi_statement_segment: begin_statement: - keyword: BEGIN - statement: declare_segment: keyword: DECLARE naked_identifier: y data_type: data_type_identifier: INT64 - statement_terminator: ; - statement: set_segment: keyword: SET naked_identifier: y comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: x - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: y - statement_terminator: ; - keyword: END - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: x - statement_terminator: ; - multi_statement_segment: begin_statement: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: / - numeric_literal: '0' - statement_terminator: ; - keyword: EXCEPTION - keyword: WHEN - keyword: ERROR - keyword: THEN - statement: raise_statement: - keyword: RAISE - keyword: USING - keyword: MESSAGE - comparison_operator: raw_comparison_operator: '=' - expression: quoted_literal: '"An error happened"' - statement_terminator: ; - keyword: END - statement_terminator: ; - multi_statement_segment: begin_statement: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '100' - binary_operator: / - numeric_literal: '0' - statement_terminator: ; - keyword: EXCEPTION - keyword: WHEN - keyword: ERROR - keyword: THEN - statement: raise_statement: - keyword: RAISE - keyword: USING - keyword: MESSAGE - comparison_operator: raw_comparison_operator: '=' - expression: function: function_name: function_name_identifier: FORMAT function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: '"Something went wrong: %s"' - comma: ',' - expression: system_variable: double_at_sign_literal: '@@error.message' - end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - multi_statement_segment: begin_statement: - keyword: BEGIN - statement: export_statement: - keyword: EXPORT - keyword: DATA - keyword: OPTIONS - bracketed: - start_bracket: ( - export_option: uri - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'gs://bucket/folder/*.csv'" - comma: ',' - export_option: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'CSV'" - comma: ',' - export_option: overwrite - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - comma: ',' - export_option: header - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - comma: ',' - export_option: field_delimiter - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "';'" - end_bracket: ) - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: field1 - comma: ',' - select_clause_element: column_reference: naked_identifier: field2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: table1 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: field1 limit_clause: keyword: LIMIT numeric_literal: '10' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: / - numeric_literal: '0' - statement_terminator: ; - keyword: EXCEPTION - keyword: WHEN - keyword: ERROR - keyword: THEN - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: table1 - where_clause: keyword: WHERE expression: column_reference: naked_identifier: field1 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'1'" - statement_terminator: ; - statement: raise_statement: - keyword: RAISE - keyword: USING - keyword: MESSAGE - comparison_operator: raw_comparison_operator: '=' - expression: function: function_name: function_name_identifier: FORMAT function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: '"Something went wrong: %s"' - comma: ',' - expression: system_variable: double_at_sign_literal: '@@error.message' - end_bracket: ) - statement_terminator: ; - statement: return_statement: keyword: RETURN - statement_terminator: ; - keyword: END - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/insert.sql000066400000000000000000000003571503426445100236470ustar00rootroot00000000000000INSERT INTO books (title, author) VALUES ('The Great Gatsby', 'F. Scott Fitzgerald'); INSERT books (title, author) VALUES ('The Great Gatsby', 'F. Scott Fitzgerald'); INSERT INTO `project.dataset.table` ( SELECT * FROM table1 ); sqlfluff-3.4.2/test/fixtures/dialects/bigquery/insert.yml000066400000000000000000000043211503426445100236440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8928e9050413e40a066027dd73870c53e91da1044ffc5c6651c53304f68047ad file: - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: books - bracketed: - start_bracket: ( - column_reference: naked_identifier: title - comma: ',' - column_reference: naked_identifier: author - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - quoted_literal: "'The Great Gatsby'" - comma: ',' - quoted_literal: "'F. Scott Fitzgerald'" - end_bracket: ) - statement_terminator: ; - statement: insert_statement: keyword: INSERT table_reference: naked_identifier: books bracketed: - start_bracket: ( - column_reference: naked_identifier: title - comma: ',' - column_reference: naked_identifier: author - end_bracket: ) values_clause: keyword: VALUES bracketed: - start_bracket: ( - quoted_literal: "'The Great Gatsby'" - comma: ',' - quoted_literal: "'F. Scott Fitzgerald'" - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: quoted_identifier: '`project.dataset.table`' - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/interval_function.sql000066400000000000000000000004741503426445100260740ustar00rootroot00000000000000SELECT TIMESTAMP_TRUNC(TIMESTAMP_ADD(session_start.eventTimestamp, INTERVAL cast(TIMESTAMP_DIFF(session_end.eventTimestamp, session_start.eventTimestamp, SECOND)/2 AS int64) second), HOUR) AS avgAtHour, TIME_ADD(time1, INTERVAL 10 MINUTE) AS after, DATE_SUB(time2, INTERVAL 5 YEAR) AS before FROM dummy; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/interval_function.yml000066400000000000000000000123331503426445100260730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f312e2aa7c3e6fafb9174c9130603940836ac4de2b5e8dbb910c4121e8e96d4c file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: TIMESTAMP_TRUNC function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: TIMESTAMP_ADD function_contents: bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: session_start - dot: . - naked_identifier: eventTimestamp - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: function: function_name: function_name_identifier: cast function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: TIMESTAMP_DIFF function_contents: bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: session_end - dot: . - naked_identifier: eventTimestamp - comma: ',' - expression: column_reference: - naked_identifier: session_start - dot: . - naked_identifier: eventTimestamp - comma: ',' - date_part: SECOND - end_bracket: ) binary_operator: / numeric_literal: '2' keyword: AS data_type: data_type_identifier: int64 end_bracket: ) date_part: second - end_bracket: ) comma: ',' date_part: HOUR end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: avgAtHour - comma: ',' - select_clause_element: function: function_name: function_name_identifier: TIME_ADD function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: time1 - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: numeric_literal: '10' date_part: MINUTE - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: after - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATE_SUB function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: time2 - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: numeric_literal: '5' date_part: YEAR - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: before from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dummy statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/is_not_distinct.sql000066400000000000000000000020131503426445100255260ustar00rootroot00000000000000-- 1. Column distinctness in SELECT expression -- TODO allow this to work without brackets SELECT (a_column IS DISTINCT FROM b_column) FROM t_table; SELECT (b_column IS NOT DISTINCT FROM c_column) FROM t_table; -- 2. Column distinctness in WHERE expression SELECT a_column FROM t_table WHERE a_column IS DISTINCT FROM b_column; SELECT a_column FROM t_table WHERE a_column IS NOT DISTINCT FROM b_column; -- 3. Column distinctness in JOIN expression SELECT t_table_1.a_column FROM t_table_1 INNER JOIN t_table_2 ON t_table_1.a_column IS DISTINCT FROM t_table_2.a_column; SELECT t_table_1.a_column FROM t_table_1 INNER JOIN t_table_2 ON t_table_1.a_column IS NOT DISTINCT FROM t_table_2.a_column; -- 4. Column distinctness in MERGE expression MERGE INTO t_table_1 USING t_table_2 ON t_table_1.a_column IS DISTINCT FROM t_table_2.a_column WHEN NOT MATCHED THEN INSERT (a) VALUES (b); MERGE INTO t_table_1 USING t_table_2 ON t_table_1.a_column IS NOT DISTINCT FROM t_table_2.a_column WHEN NOT MATCHED THEN INSERT (a) VALUES (b); sqlfluff-3.4.2/test/fixtures/dialects/bigquery/is_not_distinct.yml000066400000000000000000000177101503426445100255420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0d9ed9cabb7b1dedcb113c5b3e1e927f2224ba7002a18e9108d5c67388db5f05 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: a_column - keyword: IS - keyword: DISTINCT - keyword: FROM - column_reference: naked_identifier: b_column end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: b_column - keyword: IS - keyword: NOT - keyword: DISTINCT - keyword: FROM - column_reference: naked_identifier: c_column end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a_column from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t_table where_clause: keyword: WHERE expression: - column_reference: naked_identifier: a_column - keyword: IS - keyword: DISTINCT - keyword: FROM - column_reference: naked_identifier: b_column - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a_column from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t_table where_clause: keyword: WHERE expression: - column_reference: naked_identifier: a_column - keyword: IS - keyword: NOT - keyword: DISTINCT - keyword: FROM - column_reference: naked_identifier: b_column - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: t_table_1 - dot: . - naked_identifier: a_column from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t_table_1 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t_table_2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t_table_1 - dot: . - naked_identifier: a_column - keyword: IS - keyword: DISTINCT - keyword: FROM - column_reference: - naked_identifier: t_table_2 - dot: . - naked_identifier: a_column - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: t_table_1 - dot: . - naked_identifier: a_column from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t_table_1 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t_table_2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t_table_1 - dot: . - naked_identifier: a_column - keyword: IS - keyword: NOT - keyword: DISTINCT - keyword: FROM - column_reference: - naked_identifier: t_table_2 - dot: . - naked_identifier: a_column - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: t_table_1 - keyword: USING - table_reference: naked_identifier: t_table_2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t_table_1 - dot: . - naked_identifier: a_column - keyword: IS - keyword: DISTINCT - keyword: FROM - column_reference: - naked_identifier: t_table_2 - dot: . - naked_identifier: a_column - merge_match: not_matched_by_target_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: keyword: INSERT bracketed: start_bracket: ( column_reference: naked_identifier: a end_bracket: ) values_clause: keyword: VALUES bracketed: start_bracket: ( expression: column_reference: naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: t_table_1 - keyword: USING - table_reference: naked_identifier: t_table_2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t_table_1 - dot: . - naked_identifier: a_column - keyword: IS - keyword: NOT - keyword: DISTINCT - keyword: FROM - column_reference: - naked_identifier: t_table_2 - dot: . - naked_identifier: a_column - merge_match: not_matched_by_target_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: keyword: INSERT bracketed: start_bracket: ( column_reference: naked_identifier: a end_bracket: ) values_clause: keyword: VALUES bracketed: start_bracket: ( expression: column_reference: naked_identifier: b end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/literals_with_data_type_and_quoted.sql000066400000000000000000000027371503426445100314560ustar00rootroot00000000000000-- https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical?hl=en SELECT NUMERIC '0'; SELECT NUMERIC '0' = NUMERIC '0'; SELECT NUMERIC "0"; SELECT NUMERIC "0" = NUMERIC "0"; SELECT BIGNUMERIC '0'; SELECT BIGNUMERIC '0' = BIGNUMERIC '0'; SELECT BIGNUMERIC "0"; SELECT BIGNUMERIC "0" = BIGNUMERIC "0"; SELECT DATE '2014-09-27'; SELECT DATE '2014-09-27' = DATE '2014-09-27'; SELECT DATE "2014-09-27"; SELECT DATE "2014-09-27" = DATE "2014-09-27"; SELECT TIME '12:30:00.45'; SELECT TIME '12:30:00.45' = TIME '12:30:00.45'; SELECT TIME "12:30:00.45"; SELECT TIME "12:30:00.45" = TIME "12:30:00.45"; SELECT DATETIME '2014-09-27 12:30:00.45'; SELECT DATETIME '2014-09-27 12:30:00.45' = DATETIME '2014-09-27 12:30:00.45'; SELECT DATETIME "2014-09-27 12:30:00.45"; SELECT DATETIME "2014-09-27 12:30:00.45" = DATETIME "2014-09-27 12:30:00.45"; SELECT TIMESTAMP '2014-09-27 12:30:00.45-08'; SELECT TIMESTAMP '2014-09-27 12:30:00.45-08' = TIMESTAMP '2014-09-27 12:30:00.45-08'; SELECT TIMESTAMP "2014-09-27 12:30:00.45-08"; SELECT TIMESTAMP "2014-09-27 12:30:00.45-08" = TIMESTAMP "2014-09-27 12:30:00.45-08"; SELECT INTERVAL '10:20:30.52' HOUR TO SECOND; SELECT INTERVAL '10:20:30.52' HOUR TO SECOND = INTERVAL '10:20:30.52' HOUR TO SECOND; SELECT INTERVAL "10:20:30.52" HOUR TO SECOND; SELECT INTERVAL "10:20:30.52" HOUR TO SECOND = INTERVAL "10:20:30.52" HOUR TO SECOND; SELECT JSON '{}'; SELECT JSON '{}' IS NOT NULL; SELECT JSON "{}"; SELECT JSON "{}" IS NOT NULL; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/literals_with_data_type_and_quoted.yml000066400000000000000000000273101503426445100314520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6c96894e3d1f73464d5f3d36063824bc2c56e58247c845ada848fc389791ba30 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: data_type: data_type_identifier: NUMERIC quoted_literal: "'0'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - data_type: data_type_identifier: NUMERIC - quoted_literal: "'0'" - comparison_operator: raw_comparison_operator: '=' - data_type: data_type_identifier: NUMERIC - quoted_literal: "'0'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: data_type: data_type_identifier: NUMERIC quoted_literal: '"0"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - data_type: data_type_identifier: NUMERIC - quoted_literal: '"0"' - comparison_operator: raw_comparison_operator: '=' - data_type: data_type_identifier: NUMERIC - quoted_literal: '"0"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: data_type: data_type_identifier: BIGNUMERIC quoted_literal: "'0'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - data_type: data_type_identifier: BIGNUMERIC - quoted_literal: "'0'" - comparison_operator: raw_comparison_operator: '=' - data_type: data_type_identifier: BIGNUMERIC - quoted_literal: "'0'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: data_type: data_type_identifier: BIGNUMERIC quoted_literal: '"0"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - data_type: data_type_identifier: BIGNUMERIC - quoted_literal: '"0"' - comparison_operator: raw_comparison_operator: '=' - data_type: data_type_identifier: BIGNUMERIC - quoted_literal: '"0"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: keyword: DATE date_constructor_literal: "'2014-09-27'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - keyword: DATE - date_constructor_literal: "'2014-09-27'" - comparison_operator: raw_comparison_operator: '=' - keyword: DATE - date_constructor_literal: "'2014-09-27'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: data_type: data_type_identifier: DATE quoted_literal: '"2014-09-27"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - data_type: data_type_identifier: DATE - quoted_literal: '"2014-09-27"' - comparison_operator: raw_comparison_operator: '=' - data_type: data_type_identifier: DATE - quoted_literal: '"2014-09-27"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: keyword: TIME date_constructor_literal: "'12:30:00.45'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - keyword: TIME - date_constructor_literal: "'12:30:00.45'" - comparison_operator: raw_comparison_operator: '=' - keyword: TIME - date_constructor_literal: "'12:30:00.45'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: data_type: data_type_identifier: TIME quoted_literal: '"12:30:00.45"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - data_type: data_type_identifier: TIME - quoted_literal: '"12:30:00.45"' - comparison_operator: raw_comparison_operator: '=' - data_type: data_type_identifier: TIME - quoted_literal: '"12:30:00.45"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: keyword: DATETIME date_constructor_literal: "'2014-09-27 12:30:00.45'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - keyword: DATETIME - date_constructor_literal: "'2014-09-27 12:30:00.45'" - comparison_operator: raw_comparison_operator: '=' - keyword: DATETIME - date_constructor_literal: "'2014-09-27 12:30:00.45'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: data_type: data_type_identifier: DATETIME quoted_literal: '"2014-09-27 12:30:00.45"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - data_type: data_type_identifier: DATETIME - quoted_literal: '"2014-09-27 12:30:00.45"' - comparison_operator: raw_comparison_operator: '=' - data_type: data_type_identifier: DATETIME - quoted_literal: '"2014-09-27 12:30:00.45"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: keyword: TIMESTAMP date_constructor_literal: "'2014-09-27 12:30:00.45-08'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - keyword: TIMESTAMP - date_constructor_literal: "'2014-09-27 12:30:00.45-08'" - comparison_operator: raw_comparison_operator: '=' - keyword: TIMESTAMP - date_constructor_literal: "'2014-09-27 12:30:00.45-08'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: data_type: data_type_identifier: TIMESTAMP quoted_literal: '"2014-09-27 12:30:00.45-08"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - data_type: data_type_identifier: TIMESTAMP - quoted_literal: '"2014-09-27 12:30:00.45-08"' - comparison_operator: raw_comparison_operator: '=' - data_type: data_type_identifier: TIMESTAMP - quoted_literal: '"2014-09-27 12:30:00.45-08"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: interval_expression: - keyword: INTERVAL - expression: quoted_literal: "'10:20:30.52'" - date_part: HOUR - keyword: TO - date_part: SECOND - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - interval_expression: - keyword: INTERVAL - expression: quoted_literal: "'10:20:30.52'" - date_part: HOUR - keyword: TO - date_part: SECOND - comparison_operator: raw_comparison_operator: '=' - interval_expression: - keyword: INTERVAL - expression: quoted_literal: "'10:20:30.52'" - date_part: HOUR - keyword: TO - date_part: SECOND - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: interval_expression: - keyword: INTERVAL - expression: quoted_literal: '"10:20:30.52"' - date_part: HOUR - keyword: TO - date_part: SECOND - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - interval_expression: - keyword: INTERVAL - expression: quoted_literal: '"10:20:30.52"' - date_part: HOUR - keyword: TO - date_part: SECOND - comparison_operator: raw_comparison_operator: '=' - interval_expression: - keyword: INTERVAL - expression: quoted_literal: '"10:20:30.52"' - date_part: HOUR - keyword: TO - date_part: SECOND - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: data_type: data_type_identifier: JSON quoted_literal: "'{}'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - data_type: data_type_identifier: JSON - quoted_literal: "'{}'" - keyword: IS - keyword: NOT - null_literal: 'NULL' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: data_type: data_type_identifier: JSON quoted_literal: '"{}"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - data_type: data_type_identifier: JSON - quoted_literal: '"{}"' - keyword: IS - keyword: NOT - null_literal: 'NULL' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/load_data_statement.sql000066400000000000000000000053651503426445100263430ustar00rootroot00000000000000LOAD DATA INTO mydataset.table1 FROM FILES( format='AVRO', uris = ['gs://bucket/path/file.avro'] ); LOAD DATA INTO `myproject.mydataset.table1` FROM FILES( format='CSV', uris = ['gs://bucket/path/file1.csv', 'gs://bucket/path/file2.csv'] ); LOAD DATA INTO mydataset.table1(x INT64, y STRING) FROM FILES( skip_leading_rows=1, format='CSV', uris = ['gs://bucket/path/file.csv'] ); LOAD DATA INTO mydataset.table1 OPTIONS( description="my table", expiration_timestamp="2025-01-01 00:00:00 UTC" ) FROM FILES( format='AVRO', uris = ['gs://bucket/path/file.avro'] ); LOAD DATA OVERWRITE mydataset.table1 FROM FILES( format='AVRO', uris = ['gs://bucket/path/file.avro'] ); LOAD DATA INTO TEMP TABLE mydataset.table1 FROM FILES( format='AVRO', uris = ['gs://bucket/path/file.avro'] ); LOAD DATA INTO TEMP TABLE my_tmp_table FROM FILES( format='AVRO', uris = ['gs://bucket/path/file.avro'] ); LOAD DATA INTO mydataset.table1 PARTITION BY transaction_date CLUSTER BY customer_id OPTIONS( partition_expiration_days=3 ) FROM FILES( format='AVRO', uris = ['gs://bucket/path/file.avro'] ); LOAD DATA INTO mydataset.table1 PARTITIONS(_PARTITIONTIME = TIMESTAMP '2016-01-01') PARTITION BY _PARTITIONTIME FROM FILES( format = 'AVRO', uris = ['gs://bucket/path/file.avro'] ) LOAD DATA INTO mydataset.table1 FROM FILES( format='AVRO', uris = ['gs://bucket/path/*'], hive_partition_uri_prefix='gs://bucket/path' ) WITH PARTITION COLUMNS( field_1 STRING, -- column order must match the external path field_2 INT64 ) LOAD DATA INTO mydataset.table1 FROM FILES( format='AVRO', uris = ['gs://bucket/path/*'], hive_partition_uri_prefix='gs://bucket/path' ) WITH PARTITION COLUMNS -- This query returns an error in BigQuery. LOAD DATA INTO mydataset.table1 ( x INT64, -- column_list is given but the partition column list is missing y STRING ) FROM FILES( format='AVRO', uris = ['gs://bucket/path/*'], hive_partition_uri_prefix='gs://bucket/path' ) WITH PARTITION COLUMNS LOAD DATA INTO mydataset.testparquet FROM FILES ( uris = ['s3://test-bucket/sample.parquet'], format = 'PARQUET' ) WITH CONNECTION `aws-us-east-1.test-connection` LOAD DATA INTO mydataset.test_csv (Number INT64, Name STRING, Time DATE) PARTITION BY Time FROM FILES ( format = 'CSV', uris = ['azure://test.blob.core.windows.net/container/sampled*'], skip_leading_rows=1 ) WITH CONNECTION `azure-eastus2.test-connection` LOAD DATA OVERWRITE mydataset.testparquet FROM FILES ( uris = ['s3://test-bucket/sample.parquet'], format = 'PARQUET' ) WITH CONNECTION `aws-us-east-1.test-connection` sqlfluff-3.4.2/test/fixtures/dialects/bigquery/load_data_statement.yml000066400000000000000000000364671503426445100263540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: dac77d355d4ada518bfd8897bcd5df2ca46f5ac07a31af105120f67734ccc9f8 file: - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INTO - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: table1 - keyword: FROM - keyword: FILES - bracketed: - start_bracket: ( - parameter: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'AVRO'" - comma: ',' - parameter: uris - comparison_operator: raw_comparison_operator: '=' - array_literal: start_square_bracket: '[' quoted_literal: "'gs://bucket/path/file.avro'" end_square_bracket: ']' - end_bracket: ) - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INTO - table_reference: quoted_identifier: '`myproject.mydataset.table1`' - keyword: FROM - keyword: FILES - bracketed: - start_bracket: ( - parameter: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'CSV'" - comma: ',' - parameter: uris - comparison_operator: raw_comparison_operator: '=' - array_literal: - start_square_bracket: '[' - quoted_literal: "'gs://bucket/path/file1.csv'" - comma: ',' - quoted_literal: "'gs://bucket/path/file2.csv'" - end_square_bracket: ']' - end_bracket: ) - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INTO - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: table1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: x data_type: data_type_identifier: INT64 - comma: ',' - column_definition: naked_identifier: y data_type: data_type_identifier: STRING - end_bracket: ) - keyword: FROM - keyword: FILES - bracketed: - start_bracket: ( - parameter: skip_leading_rows - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - comma: ',' - parameter: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'CSV'" - comma: ',' - parameter: uris - comparison_operator: raw_comparison_operator: '=' - array_literal: start_square_bracket: '[' quoted_literal: "'gs://bucket/path/file.csv'" end_square_bracket: ']' - end_bracket: ) - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INTO - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: table1 - options_segment: keyword: OPTIONS bracketed: - start_bracket: ( - parameter: description - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"my table"' - comma: ',' - parameter: expiration_timestamp - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"2025-01-01 00:00:00 UTC"' - end_bracket: ) - keyword: FROM - keyword: FILES - bracketed: - start_bracket: ( - parameter: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'AVRO'" - comma: ',' - parameter: uris - comparison_operator: raw_comparison_operator: '=' - array_literal: start_square_bracket: '[' quoted_literal: "'gs://bucket/path/file.avro'" end_square_bracket: ']' - end_bracket: ) - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: OVERWRITE - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: table1 - keyword: FROM - keyword: FILES - bracketed: - start_bracket: ( - parameter: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'AVRO'" - comma: ',' - parameter: uris - comparison_operator: raw_comparison_operator: '=' - array_literal: start_square_bracket: '[' quoted_literal: "'gs://bucket/path/file.avro'" end_square_bracket: ']' - end_bracket: ) - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INTO - keyword: TEMP - keyword: TABLE - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: table1 - keyword: FROM - keyword: FILES - bracketed: - start_bracket: ( - parameter: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'AVRO'" - comma: ',' - parameter: uris - comparison_operator: raw_comparison_operator: '=' - array_literal: start_square_bracket: '[' quoted_literal: "'gs://bucket/path/file.avro'" end_square_bracket: ']' - end_bracket: ) - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INTO - keyword: TEMP - keyword: TABLE - table_reference: naked_identifier: my_tmp_table - keyword: FROM - keyword: FILES - bracketed: - start_bracket: ( - parameter: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'AVRO'" - comma: ',' - parameter: uris - comparison_operator: raw_comparison_operator: '=' - array_literal: start_square_bracket: '[' quoted_literal: "'gs://bucket/path/file.avro'" end_square_bracket: ']' - end_bracket: ) - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INTO - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: table1 - partition_by_segment: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: transaction_date - cluster_by_segment: - keyword: CLUSTER - keyword: BY - expression: column_reference: naked_identifier: customer_id - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: partition_expiration_days comparison_operator: raw_comparison_operator: '=' numeric_literal: '3' end_bracket: ) - keyword: FROM - keyword: FILES - bracketed: - start_bracket: ( - parameter: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'AVRO'" - comma: ',' - parameter: uris - comparison_operator: raw_comparison_operator: '=' - array_literal: start_square_bracket: '[' quoted_literal: "'gs://bucket/path/file.avro'" end_square_bracket: ']' - end_bracket: ) - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INTO - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: table1 - keyword: PARTITIONS - bracketed: start_bracket: ( parameter: _PARTITIONTIME comparison_operator: raw_comparison_operator: '=' keyword: TIMESTAMP date_constructor_literal: "'2016-01-01'" end_bracket: ) - partition_by_segment: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: _PARTITIONTIME - keyword: FROM - keyword: FILES - bracketed: - start_bracket: ( - parameter: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'AVRO'" - comma: ',' - parameter: uris - comparison_operator: raw_comparison_operator: '=' - array_literal: start_square_bracket: '[' quoted_literal: "'gs://bucket/path/file.avro'" end_square_bracket: ']' - end_bracket: ) - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INTO - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: table1 - keyword: FROM - keyword: FILES - bracketed: - start_bracket: ( - parameter: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'AVRO'" - comma: ',' - parameter: uris - comparison_operator: raw_comparison_operator: '=' - array_literal: start_square_bracket: '[' quoted_literal: "'gs://bucket/path/*'" end_square_bracket: ']' - comma: ',' - parameter: hive_partition_uri_prefix - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'gs://bucket/path'" - end_bracket: ) - keyword: WITH - keyword: PARTITION - keyword: COLUMNS - bracketed: - start_bracket: ( - naked_identifier: field_1 - data_type: data_type_identifier: STRING - comma: ',' - naked_identifier: field_2 - data_type: data_type_identifier: INT64 - end_bracket: ) - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INTO - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: table1 - keyword: FROM - keyword: FILES - bracketed: - start_bracket: ( - parameter: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'AVRO'" - comma: ',' - parameter: uris - comparison_operator: raw_comparison_operator: '=' - array_literal: start_square_bracket: '[' quoted_literal: "'gs://bucket/path/*'" end_square_bracket: ']' - comma: ',' - parameter: hive_partition_uri_prefix - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'gs://bucket/path'" - end_bracket: ) - keyword: WITH - keyword: PARTITION - keyword: COLUMNS - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INTO - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: table1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: x data_type: data_type_identifier: INT64 - comma: ',' - column_definition: naked_identifier: y data_type: data_type_identifier: STRING - end_bracket: ) - keyword: FROM - keyword: FILES - bracketed: - start_bracket: ( - parameter: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'AVRO'" - comma: ',' - parameter: uris - comparison_operator: raw_comparison_operator: '=' - array_literal: start_square_bracket: '[' quoted_literal: "'gs://bucket/path/*'" end_square_bracket: ']' - comma: ',' - parameter: hive_partition_uri_prefix - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'gs://bucket/path'" - end_bracket: ) - keyword: WITH - keyword: PARTITION - keyword: COLUMNS - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INTO - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: testparquet - keyword: FROM - keyword: FILES - bracketed: - start_bracket: ( - parameter: uris - comparison_operator: raw_comparison_operator: '=' - array_literal: start_square_bracket: '[' quoted_literal: "'s3://test-bucket/sample.parquet'" end_square_bracket: ']' - comma: ',' - parameter: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'PARQUET'" - end_bracket: ) - keyword: WITH - keyword: CONNECTION - object_reference: quoted_identifier: '`aws-us-east-1.test-connection`' - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INTO - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: test_csv - bracketed: - start_bracket: ( - column_definition: naked_identifier: Number data_type: data_type_identifier: INT64 - comma: ',' - column_definition: naked_identifier: Name data_type: data_type_identifier: STRING - comma: ',' - column_definition: naked_identifier: Time data_type: data_type_identifier: DATE - end_bracket: ) - partition_by_segment: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: Time - keyword: FROM - keyword: FILES - bracketed: - start_bracket: ( - parameter: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'CSV'" - comma: ',' - parameter: uris - comparison_operator: raw_comparison_operator: '=' - array_literal: start_square_bracket: '[' quoted_literal: "'azure://test.blob.core.windows.net/container/sampled*'" end_square_bracket: ']' - comma: ',' - parameter: skip_leading_rows - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - end_bracket: ) - keyword: WITH - keyword: CONNECTION - object_reference: quoted_identifier: '`azure-eastus2.test-connection`' - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: OVERWRITE - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: testparquet - keyword: FROM - keyword: FILES - bracketed: - start_bracket: ( - parameter: uris - comparison_operator: raw_comparison_operator: '=' - array_literal: start_square_bracket: '[' quoted_literal: "'s3://test-bucket/sample.parquet'" end_square_bracket: ']' - comma: ',' - parameter: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'PARQUET'" - end_bracket: ) - keyword: WITH - keyword: CONNECTION - object_reference: quoted_identifier: '`aws-us-east-1.test-connection`' sqlfluff-3.4.2/test/fixtures/dialects/bigquery/merge_into.sql000066400000000000000000000050061503426445100244670ustar00rootroot00000000000000MERGE dataset.detailedinventory t USING dataset.inventory s ON t.product = s.product WHEN NOT MATCHED AND quantity < 20 THEN INSERT(product, quantity, supply_constrained, comments) VALUES(product, quantity, TRUE) WHEN NOT MATCHED THEN INSERT(product, quantity, supply_constrained) VALUES(product, quantity, FALSE); -- optional INTO MERGE INTO dataset.detailedinventory t USING dataset.inventory s ON t.product = s.product WHEN NOT MATCHED AND quantity < 20 THEN INSERT(product, quantity, supply_constrained, comments) VALUES(product, quantity, TRUE) WHEN NOT MATCHED THEN INSERT(product, quantity, supply_constrained) VALUES(product, quantity, FALSE); MERGE dataset.inventory t USING dataset.newarrivals s ON t.product = s.product WHEN MATCHED THEN UPDATE SET quantity = t.quantity + s.quantity WHEN NOT MATCHED THEN INSERT (product, quantity) VALUES(product, quantity); MERGE dataset.newarrivals t USING (SELECT * FROM dataset.newarrivals WHERE warehouse != 'warehouse #2') s ON t.product = s.product WHEN MATCHED AND t.warehouse = 'warehouse #1' THEN UPDATE SET quantity = t.quantity + 20 WHEN MATCHED THEN DELETE; MERGE dataset.inventory t USING (SELECT product, quantity, state FROM dataset.newarrivals INNER JOIN dataset.warehouse ON dataset.newarrivals.warehouse = dataset.warehouse.warehouse) s ON t.product = s.product WHEN MATCHED AND state = 'CA' THEN UPDATE SET quantity = t.quantity + s.quantity WHEN MATCHED THEN DELETE; MERGE dataset.inventory t USING dataset.newarrivals s ON t.product = s.product WHEN MATCHED THEN UPDATE SET quantity = t.quantity + s.quantity; -- INSERT ROW MERGE dataset.inventory t USING dataset.newarrivals s ON t.product = s.product WHEN NOT MATCHED THEN INSERT ROW; -- Optional BY TARGET MERGE dataset.inventory t USING dataset.newarrivals s ON t.product = s.product WHEN NOT MATCHED BY TARGET AND quantity < 20 THEN INSERT(product, quantity, supply_constrained, comments) VALUES(product, quantity, TRUE) WHEN NOT MATCHED BY TARGET THEN INSERT(product, quantity, supply_constrained) VALUES(product, quantity, FALSE); -- NOT MATCHED BY SOURCE MERGE dataset.inventory t USING dataset.newarrivals s ON t.product = s.product WHEN NOT MATCHED BY SOURCE THEN UPDATE SET quantity = t.quantity + s.quantity; -- Merge using Select without alias MERGE dataset.NewArrivals USING (SELECT * FROM dataset.NewArrivals WHERE warehouse <> 'warehouse #2') ON FALSE WHEN MATCHED THEN DELETE sqlfluff-3.4.2/test/fixtures/dialects/bigquery/merge_into.yml000066400000000000000000000601331503426445100244730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a5d58da0b69423ce75e2e066d6be1d1339f7b2a87233182e627984d9e0895979 file: - statement: merge_statement: - keyword: MERGE - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: detailedinventory - alias_expression: naked_identifier: t - keyword: USING - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: inventory - alias_expression: naked_identifier: s - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: product - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: s - dot: . - naked_identifier: product - merge_match: - not_matched_by_target_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: AND - expression: column_reference: naked_identifier: quantity comparison_operator: raw_comparison_operator: < numeric_literal: '20' - keyword: THEN - merge_insert_clause: keyword: INSERT bracketed: - start_bracket: ( - column_reference: naked_identifier: product - comma: ',' - column_reference: naked_identifier: quantity - comma: ',' - column_reference: naked_identifier: supply_constrained - comma: ',' - column_reference: naked_identifier: comments - end_bracket: ) values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: product - comma: ',' - expression: column_reference: naked_identifier: quantity - comma: ',' - boolean_literal: 'TRUE' - end_bracket: ) - not_matched_by_target_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: keyword: INSERT bracketed: - start_bracket: ( - column_reference: naked_identifier: product - comma: ',' - column_reference: naked_identifier: quantity - comma: ',' - column_reference: naked_identifier: supply_constrained - end_bracket: ) values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: product - comma: ',' - expression: column_reference: naked_identifier: quantity - comma: ',' - boolean_literal: 'FALSE' - end_bracket: ) - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: detailedinventory - alias_expression: naked_identifier: t - keyword: USING - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: inventory - alias_expression: naked_identifier: s - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: product - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: s - dot: . - naked_identifier: product - merge_match: - not_matched_by_target_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: AND - expression: column_reference: naked_identifier: quantity comparison_operator: raw_comparison_operator: < numeric_literal: '20' - keyword: THEN - merge_insert_clause: keyword: INSERT bracketed: - start_bracket: ( - column_reference: naked_identifier: product - comma: ',' - column_reference: naked_identifier: quantity - comma: ',' - column_reference: naked_identifier: supply_constrained - comma: ',' - column_reference: naked_identifier: comments - end_bracket: ) values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: product - comma: ',' - expression: column_reference: naked_identifier: quantity - comma: ',' - boolean_literal: 'TRUE' - end_bracket: ) - not_matched_by_target_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: keyword: INSERT bracketed: - start_bracket: ( - column_reference: naked_identifier: product - comma: ',' - column_reference: naked_identifier: quantity - comma: ',' - column_reference: naked_identifier: supply_constrained - end_bracket: ) values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: product - comma: ',' - expression: column_reference: naked_identifier: quantity - comma: ',' - boolean_literal: 'FALSE' - end_bracket: ) - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: inventory - alias_expression: naked_identifier: t - keyword: USING - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: newarrivals - alias_expression: naked_identifier: s - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: product - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: s - dot: . - naked_identifier: product - merge_match: merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: quantity comparison_operator: raw_comparison_operator: '=' expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: quantity - binary_operator: + - column_reference: - naked_identifier: s - dot: . - naked_identifier: quantity not_matched_by_target_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: keyword: INSERT bracketed: - start_bracket: ( - column_reference: naked_identifier: product - comma: ',' - column_reference: naked_identifier: quantity - end_bracket: ) values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: product - comma: ',' - expression: column_reference: naked_identifier: quantity - end_bracket: ) - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: newarrivals - alias_expression: naked_identifier: t - keyword: USING - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dataset - dot: . - naked_identifier: newarrivals where_clause: keyword: WHERE expression: column_reference: naked_identifier: warehouse comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '=' quoted_literal: "'warehouse #2'" end_bracket: ) - alias_expression: naked_identifier: s - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: product - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: s - dot: . - naked_identifier: product - merge_match: - merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: AND - expression: column_reference: - naked_identifier: t - dot: . - naked_identifier: warehouse comparison_operator: raw_comparison_operator: '=' quoted_literal: "'warehouse #1'" - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: quantity comparison_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: t - dot: . - naked_identifier: quantity binary_operator: + numeric_literal: '20' - merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_delete_clause: keyword: DELETE - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: inventory - alias_expression: naked_identifier: t - keyword: USING - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: product - comma: ',' - select_clause_element: column_reference: naked_identifier: quantity - comma: ',' - select_clause_element: column_reference: naked_identifier: state from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dataset - dot: . - naked_identifier: newarrivals join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: dataset - dot: . - naked_identifier: warehouse - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: dataset - dot: . - naked_identifier: newarrivals - dot: . - naked_identifier: warehouse - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: dataset - dot: . - naked_identifier: warehouse - dot: . - naked_identifier: warehouse end_bracket: ) - alias_expression: naked_identifier: s - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: product - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: s - dot: . - naked_identifier: product - merge_match: - merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: AND - expression: column_reference: naked_identifier: state comparison_operator: raw_comparison_operator: '=' quoted_literal: "'CA'" - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: quantity comparison_operator: raw_comparison_operator: '=' expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: quantity - binary_operator: + - column_reference: - naked_identifier: s - dot: . - naked_identifier: quantity - merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_delete_clause: keyword: DELETE - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: inventory - alias_expression: naked_identifier: t - keyword: USING - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: newarrivals - alias_expression: naked_identifier: s - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: product - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: s - dot: . - naked_identifier: product - merge_match: merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: quantity comparison_operator: raw_comparison_operator: '=' expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: quantity - binary_operator: + - column_reference: - naked_identifier: s - dot: . - naked_identifier: quantity - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: inventory - alias_expression: naked_identifier: t - keyword: USING - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: newarrivals - alias_expression: naked_identifier: s - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: product - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: s - dot: . - naked_identifier: product - merge_match: not_matched_by_target_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: - keyword: INSERT - keyword: ROW - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: inventory - alias_expression: naked_identifier: t - keyword: USING - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: newarrivals - alias_expression: naked_identifier: s - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: product - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: s - dot: . - naked_identifier: product - merge_match: - not_matched_by_target_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: BY - keyword: TARGET - keyword: AND - expression: column_reference: naked_identifier: quantity comparison_operator: raw_comparison_operator: < numeric_literal: '20' - keyword: THEN - merge_insert_clause: keyword: INSERT bracketed: - start_bracket: ( - column_reference: naked_identifier: product - comma: ',' - column_reference: naked_identifier: quantity - comma: ',' - column_reference: naked_identifier: supply_constrained - comma: ',' - column_reference: naked_identifier: comments - end_bracket: ) values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: product - comma: ',' - expression: column_reference: naked_identifier: quantity - comma: ',' - boolean_literal: 'TRUE' - end_bracket: ) - not_matched_by_target_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: BY - keyword: TARGET - keyword: THEN - merge_insert_clause: keyword: INSERT bracketed: - start_bracket: ( - column_reference: naked_identifier: product - comma: ',' - column_reference: naked_identifier: quantity - comma: ',' - column_reference: naked_identifier: supply_constrained - end_bracket: ) values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: product - comma: ',' - expression: column_reference: naked_identifier: quantity - comma: ',' - boolean_literal: 'FALSE' - end_bracket: ) - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: inventory - alias_expression: naked_identifier: t - keyword: USING - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: newarrivals - alias_expression: naked_identifier: s - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: product - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: s - dot: . - naked_identifier: product - merge_match: merge_when_matched_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: BY - keyword: SOURCE - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: quantity comparison_operator: raw_comparison_operator: '=' expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: quantity - binary_operator: + - column_reference: - naked_identifier: s - dot: . - naked_identifier: quantity - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: NewArrivals - keyword: USING - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dataset - dot: . - naked_identifier: NewArrivals where_clause: keyword: WHERE expression: column_reference: naked_identifier: warehouse comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '>' quoted_literal: "'warehouse #2'" end_bracket: ) - join_on_condition: keyword: 'ON' expression: boolean_literal: 'FALSE' - merge_match: merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_delete_clause: keyword: DELETE sqlfluff-3.4.2/test/fixtures/dialects/bigquery/normalize_function.sql000066400000000000000000000021201503426445100262360ustar00rootroot00000000000000SELECT col1, NORMALIZE('\u00ea', NFD) AS a, NORMALIZE('\u0065\u0302', NFD) AS b, NORMALIZE_AND_CASEFOLD('\u00ea', NFD) AS c, NORMALIZE_AND_CASEFOLD('\u0065\u0302', NFD) AS d; SELECT a, b, a = b as normalized FROM (SELECT NORMALIZE('\u00ea') as a, NORMALIZE('\u0065\u0302') as b); WITH EquivalentNames AS ( SELECT name FROM UNNEST([ 'Jane\u2004Doe', 'John\u2004Smith', 'Jane\u2005Doe', 'Jane\u2006Doe', 'John Smith']) AS name ) SELECT NORMALIZE(name, NFKC) AS normalized_name, COUNT(*) AS name_count FROM EquivalentNames GROUP BY 1; SELECT a, b, NORMALIZE(a) = NORMALIZE(b) as normalized, NORMALIZE_AND_CASEFOLD(a) = NORMALIZE_AND_CASEFOLD(b) as normalized_with_case_folding FROM (SELECT 'The red barn' AS a, 'The Red Barn' AS b); SELECT a, b, NORMALIZE_AND_CASEFOLD(a, NFD)=NORMALIZE_AND_CASEFOLD(b, NFD) AS nfd, NORMALIZE_AND_CASEFOLD(a, NFC)=NORMALIZE_AND_CASEFOLD(b, NFC) AS nfc, NORMALIZE_AND_CASEFOLD(a, NFKD)=NORMALIZE_AND_CASEFOLD(b, NFKD) AS nkfd, NORMALIZE_AND_CASEFOLD(a, NFKC)=NORMALIZE_AND_CASEFOLD(b, NFKC) AS nkfc sqlfluff-3.4.2/test/fixtures/dialects/bigquery/normalize_function.yml000066400000000000000000000370031503426445100262500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a5f145a268db45a6932b6b5736fcad3a78175fe0125ed665112c362765909abc file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: function: function_name: function_name_identifier: NORMALIZE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'\\u00ea'" comma: ',' keyword: NFD end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a - comma: ',' - select_clause_element: function: function_name: function_name_identifier: NORMALIZE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'\\u0065\\u0302'" comma: ',' keyword: NFD end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: b - comma: ',' - select_clause_element: function: function_name: function_name_identifier: NORMALIZE_AND_CASEFOLD function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'\\u00ea'" comma: ',' keyword: NFD end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: c - comma: ',' - select_clause_element: function: function_name: function_name_identifier: NORMALIZE_AND_CASEFOLD function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'\\u0065\\u0302'" comma: ',' keyword: NFD end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: d - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b alias_expression: alias_operator: keyword: as naked_identifier: normalized from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: NORMALIZE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'\\u00ea'" end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: a - comma: ',' - select_clause_element: function: function_name: function_name_identifier: NORMALIZE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'\\u0065\\u0302'" end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: EquivalentNames keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: UNNEST function_contents: bracketed: start_bracket: ( expression: array_literal: - start_square_bracket: '[' - quoted_literal: "'Jane\\u2004Doe'" - comma: ',' - quoted_literal: "'John\\u2004Smith'" - comma: ',' - quoted_literal: "'Jane\\u2005Doe'" - comma: ',' - quoted_literal: "'Jane\\u2006Doe'" - comma: ',' - quoted_literal: "'John Smith'" - end_square_bracket: ']' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: name end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: NORMALIZE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: name comma: ',' keyword: NFKC end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: normalized_name - comma: ',' - select_clause_element: function: function_name: function_name_identifier: COUNT function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: name_count from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: EquivalentNames groupby_clause: - keyword: GROUP - keyword: BY - numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: expression: - function: function_name: function_name_identifier: NORMALIZE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: a end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - function: function_name: function_name_identifier: NORMALIZE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: b end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: normalized - comma: ',' - select_clause_element: expression: - function: function_name: function_name_identifier: NORMALIZE_AND_CASEFOLD function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: a end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - function: function_name: function_name_identifier: NORMALIZE_AND_CASEFOLD function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: b end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: normalized_with_case_folding from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: quoted_literal: "'The red barn'" alias_expression: alias_operator: keyword: AS naked_identifier: a - comma: ',' - select_clause_element: quoted_literal: "'The Red Barn'" alias_expression: alias_operator: keyword: AS naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: expression: - function: function_name: function_name_identifier: NORMALIZE_AND_CASEFOLD function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: a comma: ',' keyword: NFD end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - function: function_name: function_name_identifier: NORMALIZE_AND_CASEFOLD function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: b comma: ',' keyword: NFD end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: nfd - comma: ',' - select_clause_element: expression: - function: function_name: function_name_identifier: NORMALIZE_AND_CASEFOLD function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: a comma: ',' keyword: NFC end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - function: function_name: function_name_identifier: NORMALIZE_AND_CASEFOLD function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: b comma: ',' keyword: NFC end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: nfc - comma: ',' - select_clause_element: expression: - function: function_name: function_name_identifier: NORMALIZE_AND_CASEFOLD function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: a comma: ',' keyword: NFKD end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - function: function_name: function_name_identifier: NORMALIZE_AND_CASEFOLD function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: b comma: ',' keyword: NFKD end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: nkfd - comma: ',' - select_clause_element: expression: - function: function_name: function_name_identifier: NORMALIZE_AND_CASEFOLD function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: a comma: ',' keyword: NFKC end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - function: function_name: function_name_identifier: NORMALIZE_AND_CASEFOLD function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: b comma: ',' keyword: NFKC end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: nkfc sqlfluff-3.4.2/test/fixtures/dialects/bigquery/parameters.sql000066400000000000000000000011001503426445100244710ustar00rootroot00000000000000--bigquery allows for named params like @param or ordered params in ? select "1" from x where y = @z_test1; select datetime_trunc(@z2, week); select datetime_trunc(@_ab, week); select datetime_trunc(@a, week); select parse_date("%Y%m", year); -- this should parse year as an identifier select "1" from x where y = ?; select concat("1", ?); select id, datetime_trunc(@z2, week), sum(something) over( partition by some_id order by some_date rows BETWEEN @query_parameter PRECEDING AND CURRENT ROW) as some_sum from some_table where some_column = @query_parameter2; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/parameters.yml000066400000000000000000000153461503426445100245140ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8c54cfecb9e82789d21a903c1597a099d07f8d290574d9b70d891d49a51329a5 file: - statement: select_statement: select_clause: keyword: select select_clause_element: quoted_literal: '"1"' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: x where_clause: keyword: where expression: column_reference: naked_identifier: y comparison_operator: raw_comparison_operator: '=' parameterized_expression: at_sign_literal: '@z_test1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: datetime_trunc function_contents: bracketed: start_bracket: ( expression: parameterized_expression: at_sign_literal: '@z2' comma: ',' date_part: week end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: datetime_trunc function_contents: bracketed: start_bracket: ( expression: parameterized_expression: at_sign_literal: '@_ab' comma: ',' date_part: week end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: datetime_trunc function_contents: bracketed: start_bracket: ( expression: parameterized_expression: at_sign_literal: '@a' comma: ',' date_part: week end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: parse_date function_contents: bracketed: start_bracket: ( expression: quoted_literal: '"%Y%m"' comma: ',' date_part: year end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: quoted_literal: '"1"' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: x where_clause: keyword: where expression: column_reference: naked_identifier: y comparison_operator: raw_comparison_operator: '=' parameterized_expression: question_mark: '?' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: concat function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: '"1"' - comma: ',' - expression: parameterized_expression: question_mark: '?' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: datetime_trunc function_contents: bracketed: start_bracket: ( expression: parameterized_expression: at_sign_literal: '@z2' comma: ',' date_part: week end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: something end_bracket: ) over_clause: keyword: over bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: partition - keyword: by - expression: column_reference: naked_identifier: some_id orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: some_date frame_clause: - keyword: rows - keyword: BETWEEN - parameterized_expression: at_sign_literal: '@query_parameter' - keyword: PRECEDING - keyword: AND - keyword: CURRENT - keyword: ROW end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: some_sum from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: some_table where_clause: keyword: where expression: column_reference: naked_identifier: some_column comparison_operator: raw_comparison_operator: '=' parameterized_expression: at_sign_literal: '@query_parameter2' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/pipe_statement.sql000066400000000000000000000065151503426445100253660ustar00rootroot00000000000000FROM Produce; FROM (SELECT 'apples' AS item, 2 AS sales) |> SELECT item AS fruit_name; FROM (SELECT 'apples' AS item, 2 AS sales) |> EXTEND item IN ('carrots', 'oranges') AS is_orange; FROM (SELECT 2 AS x, 3 AS y) AS t |> SET x = x * x, y = 8 |> SELECT t.x AS original_x, x, y; FROM (SELECT 1 AS x, 2 AS y) AS t |> DROP x |> SELECT t.x AS original_x, y; FROM (SELECT 1 AS x, 2 AS y) AS t |> RENAME y AS renamed_y |> SELECT *, t.y AS t_y; FROM (SELECT 1 AS x, 2 AS y) |> AS t |> RENAME y AS renamed_y |> SELECT *, t.y AS t_y; FROM foo |> WHERE sales >= 3 |> LIMIT 10 OFFSET 4 |> ORDER BY sales DESC; FROM Produce |> AGGREGATE SUM(sales) AS total_sales GROUP AND ORDER BY category, item DESC; FROM Produce |> AGGREGATE SUM(sales) AS total_sales GROUP BY category, item |> ORDER BY category, item DESC; FROM Produce |> AGGREGATE SUM(sales) AS total_sales ASC GROUP BY item, category DESC; FROM Produce |> AGGREGATE SUM(sales) AS total_sales GROUP BY item, category |> ORDER BY category DESC, total_sales; FROM foo |> UNION ALL (SELECT 1), (SELECT 2); FROM foo |> UNION DISTINCT (SELECT 1), (SELECT 2); FROM foo |> UNION ALL BY NAME (SELECT 20 AS two_digit, 2 AS one_digit); FROM foo |> INTERSECT DISTINCT (SELECT 1), (SELECT 2); FROM foo |> INTERSECT DISTINCT (SELECT * FROM UNNEST(ARRAY[2, 3, 3, 5]) AS number), (SELECT * FROM UNNEST(ARRAY[3, 3, 4, 5]) AS number); FROM foo |> EXCEPT DISTINCT (SELECT 1), (SELECT 2); FROM foo |> EXCEPT DISTINCT (SELECT * FROM UNNEST(ARRAY[1, 2]) AS number), (SELECT * FROM UNNEST(ARRAY[1, 4]) AS number); FROM foo |> EXCEPT DISTINCT BY NAME (SELECT 10 AS two_digit, 1 AS one_digit); FROM foo |> LEFT JOIN ( SELECT "apples" AS item, 123 AS id ) AS produce_data ON produce_sales.item = produce_data.item |> SELECT produce_sales.item, sales, id; FROM input_table |> CALL tvf1(arg1) |> CALL tvf2(arg2, arg3); FROM mydataset.Produce |> CALL APPENDS(NULL, NULL); FROM LargeTable |> TABLESAMPLE SYSTEM (1 PERCENT); FROM foo |> PIVOT(SUM(sales) FOR quarter IN ('Q1', 'Q2')); FROM foo |> UNPIVOT(sales FOR quarter IN (Q1, Q2)); SELECT * FROM UNNEST(ARRAY[1, 2, 3]) AS number |> UNION DISTINCT (SELECT 1), (SELECT 2); WITH NumbersTable AS ( SELECT 1 AS one_digit, 10 AS two_digit UNION ALL SELECT 2, 20 UNION ALL SELECT 3, 30 ) SELECT one_digit, two_digit FROM NumbersTable |> INTERSECT DISTINCT BY NAME (SELECT 10 AS two_digit, 1 AS one_digit); ( SELECT 'apples' AS item, 2 AS sales UNION ALL SELECT 'bananas' AS item, 5 AS sales UNION ALL SELECT 'apples' AS item, 7 AS sales ) |> AGGREGATE COUNT(*) AS num_items, SUM(sales) AS total_sales; ( SELECT "000123" AS id, "apples" AS item, 2 AS sales UNION ALL SELECT "000456" AS id, "bananas" AS item, 5 AS sales ) AS sales_table |> AGGREGATE SUM(sales) AS total_sales GROUP BY id, item -- The sales_table alias is now out of scope. We must introduce a new one. |> AS t1 |> JOIN (SELECT 456 AS id, "yellow" AS color) AS t2 ON CAST(t1.id AS INT64) = t2.id |> SELECT t2.id, total_sales, color; SELECT 1 AS x, 2 AS y, 3 AS z |> AS t |> RENAME y AS renamed_y |> SELECT *, t.y AS t_y; ( SELECT 'apples' AS item, 2 AS sales UNION ALL SELECT 'carrots' AS item, 8 AS sales ) |> EXTEND item IN ('carrots', 'oranges') AS is_orange; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/pipe_statement.yml000066400000000000000000001532531503426445100253720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b264b656bc34690e3553435a09549823a1f14646fe46fd36c89b6131aacc0509 file: - statement: pipe_statement: from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce - statement_terminator: ; - statement: pipe_statement: from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: quoted_literal: "'apples'" alias_expression: alias_operator: keyword: AS naked_identifier: item - comma: ',' - select_clause_element: numeric_literal: '2' alias_expression: alias_operator: keyword: AS naked_identifier: sales end_bracket: ) pipe_operator_clause: pipe_operator: '|>' select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: item alias_expression: alias_operator: keyword: AS naked_identifier: fruit_name - statement_terminator: ; - statement: pipe_statement: from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: quoted_literal: "'apples'" alias_expression: alias_operator: keyword: AS naked_identifier: item - comma: ',' - select_clause_element: numeric_literal: '2' alias_expression: alias_operator: keyword: AS naked_identifier: sales end_bracket: ) pipe_operator_clause: pipe_operator: '|>' extend_clause: keyword: EXTEND expression: column_reference: naked_identifier: item keyword: IN bracketed: - start_bracket: ( - quoted_literal: "'carrots'" - comma: ',' - quoted_literal: "'oranges'" - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: is_orange - statement_terminator: ; - statement: pipe_statement: - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '2' alias_expression: alias_operator: keyword: AS naked_identifier: x - comma: ',' - select_clause_element: numeric_literal: '3' alias_expression: alias_operator: keyword: AS naked_identifier: y end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: t - pipe_operator_clause: pipe_operator: '|>' set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: '=' expression: - column_reference: naked_identifier: x - binary_operator: '*' - column_reference: naked_identifier: x - comma: ',' - set_clause: column_reference: naked_identifier: y comparison_operator: raw_comparison_operator: '=' numeric_literal: '8' - pipe_operator_clause: pipe_operator: '|>' select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: x alias_expression: alias_operator: keyword: AS naked_identifier: original_x - comma: ',' - select_clause_element: column_reference: naked_identifier: x - comma: ',' - select_clause_element: column_reference: naked_identifier: y - statement_terminator: ; - statement: pipe_statement: - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: AS naked_identifier: x - comma: ',' - select_clause_element: numeric_literal: '2' alias_expression: alias_operator: keyword: AS naked_identifier: y end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: t - pipe_operator_clause: pipe_operator: '|>' drop_column_clause: keyword: DROP column_reference: naked_identifier: x - pipe_operator_clause: pipe_operator: '|>' select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: x alias_expression: alias_operator: keyword: AS naked_identifier: original_x - comma: ',' - select_clause_element: column_reference: naked_identifier: y - statement_terminator: ; - statement: pipe_statement: - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: AS naked_identifier: x - comma: ',' - select_clause_element: numeric_literal: '2' alias_expression: alias_operator: keyword: AS naked_identifier: y end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: t - pipe_operator_clause: pipe_operator: '|>' rename_column_clause: keyword: RENAME column_reference: naked_identifier: y alias_expression: alias_operator: keyword: AS naked_identifier: renamed_y - pipe_operator_clause: pipe_operator: '|>' select_clause: - keyword: SELECT - select_clause_element: wildcard_expression: wildcard_identifier: star: '*' - comma: ',' - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: y alias_expression: alias_operator: keyword: AS naked_identifier: t_y - statement_terminator: ; - statement: pipe_statement: - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: AS naked_identifier: x - comma: ',' - select_clause_element: numeric_literal: '2' alias_expression: alias_operator: keyword: AS naked_identifier: y end_bracket: ) - pipe_operator_clause: pipe_operator: '|>' alias_expression: alias_operator: keyword: AS naked_identifier: t - pipe_operator_clause: pipe_operator: '|>' rename_column_clause: keyword: RENAME column_reference: naked_identifier: y alias_expression: alias_operator: keyword: AS naked_identifier: renamed_y - pipe_operator_clause: pipe_operator: '|>' select_clause: - keyword: SELECT - select_clause_element: wildcard_expression: wildcard_identifier: star: '*' - comma: ',' - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: y alias_expression: alias_operator: keyword: AS naked_identifier: t_y - statement_terminator: ; - statement: pipe_statement: - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo - pipe_operator_clause: pipe_operator: '|>' where_clause: keyword: WHERE expression: column_reference: naked_identifier: sales comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' numeric_literal: '3' - pipe_operator_clause: pipe_operator: '|>' limit_clause: - keyword: LIMIT - numeric_literal: '10' - keyword: OFFSET - numeric_literal: '4' - pipe_operator_clause: pipe_operator: '|>' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: sales - keyword: DESC - statement_terminator: ; - statement: pipe_statement: from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce pipe_operator_clause: pipe_operator: '|>' aggregate_clause: keyword: AGGREGATE function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: sales end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: total_sales group_and_orderby_clause: - keyword: GROUP - keyword: AND - keyword: ORDER - keyword: BY - column_reference: naked_identifier: category - comma: ',' - column_reference: naked_identifier: item - keyword: DESC - statement_terminator: ; - statement: pipe_statement: - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce - pipe_operator_clause: pipe_operator: '|>' aggregate_clause: keyword: AGGREGATE function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: sales end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: total_sales group_and_orderby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: category - comma: ',' - column_reference: naked_identifier: item - pipe_operator_clause: pipe_operator: '|>' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: category - comma: ',' - column_reference: naked_identifier: item - keyword: DESC - statement_terminator: ; - statement: pipe_statement: from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce pipe_operator_clause: pipe_operator: '|>' aggregate_clause: - keyword: AGGREGATE - function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: sales end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: total_sales - keyword: ASC - group_and_orderby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: item - comma: ',' - column_reference: naked_identifier: category - keyword: DESC - statement_terminator: ; - statement: pipe_statement: - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce - pipe_operator_clause: pipe_operator: '|>' aggregate_clause: keyword: AGGREGATE function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: sales end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: total_sales group_and_orderby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: item - comma: ',' - column_reference: naked_identifier: category - pipe_operator_clause: pipe_operator: '|>' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: category - keyword: DESC - comma: ',' - column_reference: naked_identifier: total_sales - statement_terminator: ; - statement: pipe_statement: from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo pipe_operator_clause: pipe_operator: '|>' set_operator_clause: - set_operator: - keyword: UNION - keyword: ALL - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '2' end_bracket: ) - statement_terminator: ; - statement: pipe_statement: from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo pipe_operator_clause: pipe_operator: '|>' set_operator_clause: - set_operator: - keyword: UNION - keyword: DISTINCT - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '2' end_bracket: ) - statement_terminator: ; - statement: pipe_statement: from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo pipe_operator_clause: pipe_operator: '|>' set_operator_clause: set_operator: - keyword: UNION - keyword: ALL - keyword: BY - keyword: NAME bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '20' alias_expression: alias_operator: keyword: AS naked_identifier: two_digit - comma: ',' - select_clause_element: numeric_literal: '2' alias_expression: alias_operator: keyword: AS naked_identifier: one_digit end_bracket: ) - statement_terminator: ; - statement: pipe_statement: from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo pipe_operator_clause: pipe_operator: '|>' set_operator_clause: - set_operator: - keyword: INTERSECT - keyword: DISTINCT - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '2' end_bracket: ) - statement_terminator: ; - statement: pipe_statement: from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo pipe_operator_clause: pipe_operator: '|>' set_operator_clause: - set_operator: - keyword: INTERSECT - keyword: DISTINCT - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: UNNEST function_contents: bracketed: start_bracket: ( expression: typed_array_literal: array_type: keyword: ARRAY start_angle_bracket: < data_type: data_type_identifier: INT64 end_angle_bracket: '>' array_literal: - start_square_bracket: '[' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - comma: ',' - numeric_literal: '3' - comma: ',' - numeric_literal: '5' - end_square_bracket: ']' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: number end_bracket: ) - comma: ',' - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: UNNEST function_contents: bracketed: start_bracket: ( expression: typed_array_literal: array_type: keyword: ARRAY start_angle_bracket: < data_type: data_type_identifier: INT64 end_angle_bracket: '>' array_literal: - start_square_bracket: '[' - numeric_literal: '3' - comma: ',' - numeric_literal: '3' - comma: ',' - numeric_literal: '4' - comma: ',' - numeric_literal: '5' - end_square_bracket: ']' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: number end_bracket: ) - statement_terminator: ; - statement: pipe_statement: from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo pipe_operator_clause: pipe_operator: '|>' set_operator_clause: - set_operator: - keyword: EXCEPT - keyword: DISTINCT - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '2' end_bracket: ) - statement_terminator: ; - statement: pipe_statement: from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo pipe_operator_clause: pipe_operator: '|>' set_operator_clause: - set_operator: - keyword: EXCEPT - keyword: DISTINCT - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: UNNEST function_contents: bracketed: start_bracket: ( expression: typed_array_literal: array_type: keyword: ARRAY start_angle_bracket: < data_type: data_type_identifier: INT64 end_angle_bracket: '>' array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_square_bracket: ']' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: number end_bracket: ) - comma: ',' - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: UNNEST function_contents: bracketed: start_bracket: ( expression: typed_array_literal: array_type: keyword: ARRAY start_angle_bracket: < data_type: data_type_identifier: INT64 end_angle_bracket: '>' array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '4' - end_square_bracket: ']' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: number end_bracket: ) - statement_terminator: ; - statement: pipe_statement: from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo pipe_operator_clause: pipe_operator: '|>' set_operator_clause: set_operator: - keyword: EXCEPT - keyword: DISTINCT - keyword: BY - keyword: NAME bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '10' alias_expression: alias_operator: keyword: AS naked_identifier: two_digit - comma: ',' - select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: AS naked_identifier: one_digit end_bracket: ) - statement_terminator: ; - statement: pipe_statement: - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo - pipe_operator_clause: pipe_operator: '|>' join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: quoted_literal: '"apples"' alias_expression: alias_operator: keyword: AS naked_identifier: item - comma: ',' - select_clause_element: numeric_literal: '123' alias_expression: alias_operator: keyword: AS naked_identifier: id end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: produce_data - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: produce_sales - dot: . - naked_identifier: item - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: produce_data - dot: . - naked_identifier: item - pipe_operator_clause: pipe_operator: '|>' select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: produce_sales - dot: . - naked_identifier: item - comma: ',' - select_clause_element: column_reference: naked_identifier: sales - comma: ',' - select_clause_element: column_reference: naked_identifier: id - statement_terminator: ; - statement: pipe_statement: - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: input_table - pipe_operator_clause: pipe_operator: '|>' call_operator: call_statement: keyword: CALL procedure_name: procedure_name_identifier: tvf1 bracketed: start_bracket: ( expression: column_reference: naked_identifier: arg1 end_bracket: ) - pipe_operator_clause: pipe_operator: '|>' call_operator: call_statement: keyword: CALL procedure_name: procedure_name_identifier: tvf2 bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: arg2 - comma: ',' - expression: column_reference: naked_identifier: arg3 - end_bracket: ) - statement_terminator: ; - statement: pipe_statement: from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: Produce pipe_operator_clause: pipe_operator: '|>' call_operator: call_statement: keyword: CALL procedure_name: procedure_name_identifier: APPENDS bracketed: - start_bracket: ( - expression: null_literal: 'NULL' - comma: ',' - expression: null_literal: 'NULL' - end_bracket: ) - statement_terminator: ; - statement: pipe_statement: from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: LargeTable pipe_operator_clause: pipe_operator: '|>' sample_expression: - keyword: TABLESAMPLE - keyword: SYSTEM - bracketed: start_bracket: ( numeric_literal: '1' keyword: PERCENT end_bracket: ) - statement_terminator: ; - statement: pipe_statement: from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo pipe_operator_clause: pipe_operator: '|>' pivot_operator: from_pivot_expression: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: sales end_bracket: ) - keyword: FOR - pivot_for_clause: column_reference: naked_identifier: quarter - keyword: IN - bracketed: - start_bracket: ( - quoted_literal: "'Q1'" - comma: ',' - quoted_literal: "'Q2'" - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: pipe_statement: from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo pipe_operator_clause: pipe_operator: '|>' unpivot_operator: from_unpivot_expression: keyword: UNPIVOT bracketed: - start_bracket: ( - naked_identifier: sales - keyword: FOR - naked_identifier: quarter - keyword: IN - bracketed: - start_bracket: ( - naked_identifier: Q1 - comma: ',' - naked_identifier: Q2 - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: pipe_statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: UNNEST function_contents: bracketed: start_bracket: ( expression: typed_array_literal: array_type: keyword: ARRAY start_angle_bracket: < data_type: data_type_identifier: INT64 end_angle_bracket: '>' array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - end_square_bracket: ']' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: number pipe_operator_clause: pipe_operator: '|>' set_operator_clause: - set_operator: - keyword: UNION - keyword: DISTINCT - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '2' end_bracket: ) - statement_terminator: ; - statement: pipe_statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: NumbersTable keyword: AS bracketed: start_bracket: ( set_expression: - select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: AS naked_identifier: one_digit - comma: ',' - select_clause_element: numeric_literal: '10' alias_expression: alias_operator: keyword: AS naked_identifier: two_digit - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '2' - comma: ',' - select_clause_element: numeric_literal: '20' - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '3' - comma: ',' - select_clause_element: numeric_literal: '30' end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: one_digit - comma: ',' - select_clause_element: column_reference: naked_identifier: two_digit from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: NumbersTable pipe_operator_clause: pipe_operator: '|>' set_operator_clause: set_operator: - keyword: INTERSECT - keyword: DISTINCT - keyword: BY - keyword: NAME bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '10' alias_expression: alias_operator: keyword: AS naked_identifier: two_digit - comma: ',' - select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: AS naked_identifier: one_digit end_bracket: ) - statement_terminator: ; - statement: pipe_statement: bracketed: start_bracket: ( set_expression: - select_statement: select_clause: - keyword: SELECT - select_clause_element: quoted_literal: "'apples'" alias_expression: alias_operator: keyword: AS naked_identifier: item - comma: ',' - select_clause_element: numeric_literal: '2' alias_expression: alias_operator: keyword: AS naked_identifier: sales - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: quoted_literal: "'bananas'" alias_expression: alias_operator: keyword: AS naked_identifier: item - comma: ',' - select_clause_element: numeric_literal: '5' alias_expression: alias_operator: keyword: AS naked_identifier: sales - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: quoted_literal: "'apples'" alias_expression: alias_operator: keyword: AS naked_identifier: item - comma: ',' - select_clause_element: numeric_literal: '7' alias_expression: alias_operator: keyword: AS naked_identifier: sales end_bracket: ) pipe_operator_clause: pipe_operator: '|>' aggregate_clause: - keyword: AGGREGATE - function: function_name: function_name_identifier: COUNT function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: num_items - comma: ',' - function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: sales end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: total_sales - statement_terminator: ; - statement: pipe_statement: - bracketed: start_bracket: ( set_expression: - select_statement: select_clause: - keyword: SELECT - select_clause_element: quoted_literal: '"000123"' alias_expression: alias_operator: keyword: AS naked_identifier: id - comma: ',' - select_clause_element: quoted_literal: '"apples"' alias_expression: alias_operator: keyword: AS naked_identifier: item - comma: ',' - select_clause_element: numeric_literal: '2' alias_expression: alias_operator: keyword: AS naked_identifier: sales - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: quoted_literal: '"000456"' alias_expression: alias_operator: keyword: AS naked_identifier: id - comma: ',' - select_clause_element: quoted_literal: '"bananas"' alias_expression: alias_operator: keyword: AS naked_identifier: item - comma: ',' - select_clause_element: numeric_literal: '5' alias_expression: alias_operator: keyword: AS naked_identifier: sales end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: sales_table - pipe_operator_clause: pipe_operator: '|>' aggregate_clause: keyword: AGGREGATE function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: sales end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: total_sales group_and_orderby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: id - comma: ',' - column_reference: naked_identifier: item - pipe_operator_clause: pipe_operator: '|>' alias_expression: alias_operator: keyword: AS naked_identifier: t1 - pipe_operator_clause: pipe_operator: '|>' join_clause: keyword: JOIN from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '456' alias_expression: alias_operator: keyword: AS naked_identifier: id - comma: ',' - select_clause_element: quoted_literal: '"yellow"' alias_expression: alias_operator: keyword: AS naked_identifier: color end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: t2 join_on_condition: keyword: 'ON' expression: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: id keyword: AS data_type: data_type_identifier: INT64 end_bracket: ) comparison_operator: raw_comparison_operator: '=' column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id - pipe_operator_clause: pipe_operator: '|>' select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: total_sales - comma: ',' - select_clause_element: column_reference: naked_identifier: color - statement_terminator: ; - statement: pipe_statement: - select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: AS naked_identifier: x - comma: ',' - select_clause_element: numeric_literal: '2' alias_expression: alias_operator: keyword: AS naked_identifier: y - comma: ',' - select_clause_element: numeric_literal: '3' alias_expression: alias_operator: keyword: AS naked_identifier: z - pipe_operator_clause: pipe_operator: '|>' alias_expression: alias_operator: keyword: AS naked_identifier: t - pipe_operator_clause: pipe_operator: '|>' rename_column_clause: keyword: RENAME column_reference: naked_identifier: y alias_expression: alias_operator: keyword: AS naked_identifier: renamed_y - pipe_operator_clause: pipe_operator: '|>' select_clause: - keyword: SELECT - select_clause_element: wildcard_expression: wildcard_identifier: star: '*' - comma: ',' - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: y alias_expression: alias_operator: keyword: AS naked_identifier: t_y - statement_terminator: ; - statement: pipe_statement: bracketed: start_bracket: ( set_expression: - select_statement: select_clause: - keyword: SELECT - select_clause_element: quoted_literal: "'apples'" alias_expression: alias_operator: keyword: AS naked_identifier: item - comma: ',' - select_clause_element: numeric_literal: '2' alias_expression: alias_operator: keyword: AS naked_identifier: sales - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: quoted_literal: "'carrots'" alias_expression: alias_operator: keyword: AS naked_identifier: item - comma: ',' - select_clause_element: numeric_literal: '8' alias_expression: alias_operator: keyword: AS naked_identifier: sales end_bracket: ) pipe_operator_clause: pipe_operator: '|>' extend_clause: keyword: EXTEND expression: column_reference: naked_identifier: item keyword: IN bracketed: - start_bracket: ( - quoted_literal: "'carrots'" - comma: ',' - quoted_literal: "'oranges'" - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: is_orange - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/procedural_statements.sql000066400000000000000000000012661503426445100267520ustar00rootroot00000000000000DECLARE x INT64 DEFAULT 0; REPEAT SET x = x + 1; SELECT x; UNTIL x >= 3 END REPEAT; WHILE true DO SELECT 1; CONTINUE; END WHILE; IF x >= 10 THEN SELECT x; END IF; IF x >= 10 THEN SET x = x - 1; ELSEIF x < 0 THEN SET x = x + 1; ELSEIF x = 0 THEN SET x = x + 1; ELSE SELECT x; END IF; LOOP SET x = x + 1; IF x >= 10 THEN LEAVE; ELSE CONTINUE; END IF; END LOOP; SELECT x; DECLARE heads BOOL; DECLARE heads_count INT64 DEFAULT 0; LOOP SET heads = RAND() < 0.5; IF heads THEN SELECT 'Heads!'; SET heads_count = heads_count + 1; CONTINUE; END IF; SELECT 'Tails!'; BREAK; END LOOP; SELECT CONCAT(CAST(heads_count AS STRING), ' heads in a row'); sqlfluff-3.4.2/test/fixtures/dialects/bigquery/procedural_statements.yml000066400000000000000000000236541503426445100267610ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9fce8c162bcaeb4150cca7216bf038c3a37e044d65f1d99f5956c06fb175afb3 file: - statement: declare_segment: - keyword: DECLARE - naked_identifier: x - data_type: data_type_identifier: INT64 - keyword: DEFAULT - numeric_literal: '0' - statement_terminator: ; - multi_statement_segment: repeat_statement: - keyword: REPEAT - repeat_statements: - statement: set_segment: keyword: SET naked_identifier: x comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: x binary_operator: + numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: x - statement_terminator: ; - keyword: UNTIL - expression: column_reference: naked_identifier: x comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' numeric_literal: '3' - keyword: END - keyword: REPEAT - statement_terminator: ; - multi_statement_segment: while_statement: - keyword: WHILE - expression: boolean_literal: 'true' - keyword: DO - while_statements: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: continue_statement: keyword: CONTINUE - statement_terminator: ; - keyword: END - keyword: WHILE - statement_terminator: ; - multi_statement_segment: if_statement: - keyword: IF - expression: column_reference: naked_identifier: x comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' numeric_literal: '10' - keyword: THEN - if_statements: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: x statement_terminator: ; - keyword: END - keyword: IF - statement_terminator: ; - multi_statement_segment: if_statement: - keyword: IF - expression: column_reference: naked_identifier: x comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' numeric_literal: '10' - keyword: THEN - if_statements: statement: set_segment: keyword: SET naked_identifier: x comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: x binary_operator: '-' numeric_literal: '1' statement_terminator: ; - keyword: ELSEIF - expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: < numeric_literal: '0' - keyword: THEN - if_statements: statement: set_segment: keyword: SET naked_identifier: x comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: x binary_operator: + numeric_literal: '1' statement_terminator: ; - keyword: ELSEIF - expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' - keyword: THEN - if_statements: statement: set_segment: keyword: SET naked_identifier: x comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: x binary_operator: + numeric_literal: '1' statement_terminator: ; - keyword: ELSE - if_statements: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: x statement_terminator: ; - keyword: END - keyword: IF - statement_terminator: ; - multi_statement_segment: loop_statement: - keyword: LOOP - loop_statements: - statement: set_segment: keyword: SET naked_identifier: x comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: x binary_operator: + numeric_literal: '1' - statement_terminator: ; - multi_statement_segment: if_statement: - keyword: IF - expression: column_reference: naked_identifier: x comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' numeric_literal: '10' - keyword: THEN - if_statements: statement: leave_statement: keyword: LEAVE statement_terminator: ; - keyword: ELSE - if_statements: statement: continue_statement: keyword: CONTINUE statement_terminator: ; - keyword: END - keyword: IF - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: x - statement_terminator: ; - statement: declare_segment: keyword: DECLARE naked_identifier: heads data_type: data_type_identifier: BOOL - statement_terminator: ; - statement: declare_segment: - keyword: DECLARE - naked_identifier: heads_count - data_type: data_type_identifier: INT64 - keyword: DEFAULT - numeric_literal: '0' - statement_terminator: ; - multi_statement_segment: loop_statement: - keyword: LOOP - loop_statements: - statement: set_segment: keyword: SET naked_identifier: heads comparison_operator: raw_comparison_operator: '=' expression: function: function_name: function_name_identifier: RAND function_contents: bracketed: start_bracket: ( end_bracket: ) comparison_operator: raw_comparison_operator: < numeric_literal: '0.5' - statement_terminator: ; - multi_statement_segment: if_statement: - keyword: IF - expression: column_reference: naked_identifier: heads - keyword: THEN - if_statements: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'Heads!'" - statement_terminator: ; - statement: set_segment: keyword: SET naked_identifier: heads_count comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: heads_count binary_operator: + numeric_literal: '1' - statement_terminator: ; - statement: continue_statement: keyword: CONTINUE - statement_terminator: ; - keyword: END - keyword: IF - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'Tails!'" - statement_terminator: ; - statement: break_statement: keyword: BREAK - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CONCAT function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: heads_count keyword: AS data_type: data_type_identifier: STRING end_bracket: ) - comma: ',' - expression: quoted_literal: "' heads in a row'" - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select.sql000066400000000000000000000000431503426445100236120ustar00rootroot00000000000000SELECT 'metadata' AS key from foo; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select.yml000066400000000000000000000015661503426445100236270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0cb963b53f2fa31526a616181c1f190315dc4c1c0e8ba540fa8cfdc9463f0286 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'metadata'" alias_expression: alias_operator: keyword: AS naked_identifier: key from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_1_gt_0.sql000066400000000000000000000000171503426445100247440ustar00rootroot00000000000000SELECT (1 > 0) sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_1_gt_0.yml000066400000000000000000000014321503426445100247500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 51a44bc3e8acaeffbccf25c241dff6ac626037ba22b0d7d1bb369a967ee210bd file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bracketed: start_bracket: ( expression: - numeric_literal: '1' - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '0' end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_1_lt_0.sql000066400000000000000000000000171503426445100247510ustar00rootroot00000000000000SELECT (1 < 0) sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_1_lt_0.yml000066400000000000000000000014301503426445100247530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5734ec8350f121daf28d0b5b6fd95c469e24f54648fff9c274fd016aae92976f file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bracketed: start_bracket: ( expression: - numeric_literal: '1' - comparison_operator: raw_comparison_operator: < - numeric_literal: '0' end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_case.sql000066400000000000000000000002021503426445100246020ustar00rootroot00000000000000select case fruit_code when 0 then 'apple' when 1 then 'banana' when 2 then 'cashew' end as fruit from some_table sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_case.yml000066400000000000000000000032731503426445100246170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3d5dde713271d10a898c0c50db02310a221c81996aa8c099d3049b3a7f409440 file: statement: select_statement: select_clause: keyword: select select_clause_element: expression: case_expression: - keyword: case - expression: column_reference: naked_identifier: fruit_code - when_clause: - keyword: when - expression: numeric_literal: '0' - keyword: then - expression: quoted_literal: "'apple'" - when_clause: - keyword: when - expression: numeric_literal: '1' - keyword: then - expression: quoted_literal: "'banana'" - when_clause: - keyword: when - expression: numeric_literal: '2' - keyword: then - expression: quoted_literal: "'cashew'" - keyword: end alias_expression: alias_operator: keyword: as naked_identifier: fruit from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: some_table sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_column_object_with_keyword.sql000066400000000000000000000003141503426445100313150ustar00rootroot00000000000000-- current is a reserved word but keywords are allowed as part of a nested object name SELECT table1.current.column, table1.object.current.column, table1.object.nested.current.column, FROM table1 sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_column_object_with_keyword.yml000066400000000000000000000030051503426445100313170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8320edbbddda2850279525aa5140e648fbcd0e3d164bcadb11a5194ebfee66f8 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: table1 - dot: . - naked_identifier_all: current - dot: . - naked_identifier: column - comma: ',' - select_clause_element: column_reference: - naked_identifier: table1 - dot: . - naked_identifier: object - dot: . - naked_identifier_all: current - dot: . - naked_identifier: column - comma: ',' - select_clause_element: column_reference: - naked_identifier: table1 - dot: . - naked_identifier: object - dot: . - naked_identifier: nested - dot: . - naked_identifier_all: current - dot: . - naked_identifier: column - comma: ',' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_datetime.sql000066400000000000000000000004061503426445100254710ustar00rootroot00000000000000-- Test BigQuery specific date identifiers. SELECT gmv._merchant_key, gmv.order_created_at, EXTRACT(DAY FROM gmv.order_created_at) AS order_day FROM my_table as gmv WHERE gmv.datetime >= DATE_TRUNC(DATE_SUB(CURRENT_DATE(), INTERVAL 2 YEAR), year) LIMIT 1 sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_datetime.yml000066400000000000000000000064711503426445100255030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e36e8a48b09b527a5e82f21b9e5975e81bee0424bda07deb38b46f35e3a9d789 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: gmv - dot: . - naked_identifier: _merchant_key - comma: ',' - select_clause_element: column_reference: - naked_identifier: gmv - dot: . - naked_identifier: order_created_at - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: DAY keyword: FROM expression: column_reference: - naked_identifier: gmv - dot: . - naked_identifier: order_created_at end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: order_day from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table alias_expression: alias_operator: keyword: as naked_identifier: gmv where_clause: keyword: WHERE expression: column_reference: - naked_identifier: gmv - dot: . - naked_identifier: datetime comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' function: function_name: function_name_identifier: DATE_TRUNC function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: DATE_SUB function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: CURRENT_DATE function_contents: bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: numeric_literal: '2' date_part: YEAR - end_bracket: ) comma: ',' date_part: year end_bracket: ) limit_clause: keyword: LIMIT numeric_literal: '1' sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_empty_array_literal.sql000066400000000000000000000000121503426445100277360ustar00rootroot00000000000000SELECT [] sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_empty_array_literal.yml000066400000000000000000000011341503426445100277460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7f45e520f24c3363bbc9a99cb0361bdbd9ef1959f5a177a1152b88479a635d45 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: array_literal: start_square_bracket: '[' end_square_bracket: ']' sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_example.sql000066400000000000000000000016361503426445100253360ustar00rootroot00000000000000-- This query should also parse in ANSI, but as a bigquery example -- it probably lives here. In particular it has an un-bracketed -- select clause within a function, and array notation which -- makes it a useful test case. WITH age_buckets_bit_array AS ( SELECT bucket_id, num_ranges, min_age, ARRAY(SELECT CAST(num AS INT64) FROM UNNEST(SPLIT(binary, '')) AS num) AS bits, age_label FROM age_buckets ), bucket_abundance AS ( SELECT bucket_id (count_18_24 * bits[OFFSET(0)] + count_25_34 * bits[OFFSET(1)] + count_35_44 * bits[OFFSET(2)] + count_45_54 * bits[OFFSET(3)] + count_55_64 * bits[OFFSET(4)] + count_65_plus * bits[OFFSET(5)]) / audience_size AS relative_abundance FROM audience_counts_gender_age CROSS JOIN age_buckets_bit_array ) SELECT * FROM age_buckets_bit_array JOIN bucket_abundance USING (bucket_id) sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_example.yml000066400000000000000000000302141503426445100253320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 95dbd1022c1d7a5656672b99f96cb09f1b1bb9f33f7e056362d8aaebbe2aa3bb file: statement: with_compound_statement: - keyword: WITH - common_table_expression: naked_identifier: age_buckets_bit_array keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: bucket_id - comma: ',' - select_clause_element: column_reference: naked_identifier: num_ranges - comma: ',' - select_clause_element: column_reference: naked_identifier: min_age - comma: ',' - select_clause_element: expression: array_expression: function_name: function_name_identifier: ARRAY function_contents: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: num keyword: AS data_type: data_type_identifier: INT64 end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: UNNEST function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: SPLIT function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: binary - comma: ',' - expression: quoted_literal: "''" - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: num end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: bits - comma: ',' - select_clause_element: column_reference: naked_identifier: age_label from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: age_buckets end_bracket: ) - comma: ',' - common_table_expression: naked_identifier: bucket_abundance keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: expression: function: function_name: function_name_identifier: bucket_id function_contents: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: count_18_24 - binary_operator: '*' - column_reference: naked_identifier: bits - array_accessor: start_square_bracket: '[' expression: function: function_name: function_name_identifier: OFFSET function_contents: bracketed: start_bracket: ( expression: numeric_literal: '0' end_bracket: ) end_square_bracket: ']' - binary_operator: + - column_reference: naked_identifier: count_25_34 - binary_operator: '*' - column_reference: naked_identifier: bits - array_accessor: start_square_bracket: '[' expression: function: function_name: function_name_identifier: OFFSET function_contents: bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) end_square_bracket: ']' - binary_operator: + - column_reference: naked_identifier: count_35_44 - binary_operator: '*' - column_reference: naked_identifier: bits - array_accessor: start_square_bracket: '[' expression: function: function_name: function_name_identifier: OFFSET function_contents: bracketed: start_bracket: ( expression: numeric_literal: '2' end_bracket: ) end_square_bracket: ']' - binary_operator: + - column_reference: naked_identifier: count_45_54 - binary_operator: '*' - column_reference: naked_identifier: bits - array_accessor: start_square_bracket: '[' expression: function: function_name: function_name_identifier: OFFSET function_contents: bracketed: start_bracket: ( expression: numeric_literal: '3' end_bracket: ) end_square_bracket: ']' - binary_operator: + - column_reference: naked_identifier: count_55_64 - binary_operator: '*' - column_reference: naked_identifier: bits - array_accessor: start_square_bracket: '[' expression: function: function_name: function_name_identifier: OFFSET function_contents: bracketed: start_bracket: ( expression: numeric_literal: '4' end_bracket: ) end_square_bracket: ']' - binary_operator: + - column_reference: naked_identifier: count_65_plus - binary_operator: '*' - column_reference: naked_identifier: bits - array_accessor: start_square_bracket: '[' expression: function: function_name: function_name_identifier: OFFSET function_contents: bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) end_square_bracket: ']' end_bracket: ) binary_operator: / column_reference: naked_identifier: audience_size alias_expression: alias_operator: keyword: AS naked_identifier: relative_abundance from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: audience_counts_gender_age join_clause: - keyword: CROSS - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: age_buckets_bit_array end_bracket: ) - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: age_buckets_bit_array join_clause: - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: bucket_abundance - keyword: USING - bracketed: start_bracket: ( naked_identifier: bucket_id end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_except.sql000066400000000000000000000007351503426445100251720ustar00rootroot00000000000000SELECT * EXCEPT (seqnum) REPLACE (foo as bar, baz as foobar) FROM my_tbl; -- Catch potential bugs in unions select * except (foo) from some_table union all select * from another_table; -- Except is allowed after other fields select 1 + 2 as calculated, * except (irrelevant) from my_tbl; -- This might be redundant with the example above. -- Demonstrates using multiple except clauses. select foo.* except (some_column), bar.* except (other_column) from my_tbl; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_except.yml000066400000000000000000000110511503426445100251650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2a75600fe8b482cc8901bff063da812fa022262d426181aa1619a9b917d0f09c file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_except_clause: keyword: EXCEPT bracketed: start_bracket: ( naked_identifier: seqnum end_bracket: ) select_replace_clause: keyword: REPLACE bracketed: - start_bracket: ( - column_reference: naked_identifier: foo - keyword: as - naked_identifier: bar - comma: ',' - column_reference: naked_identifier: baz - keyword: as - naked_identifier: foobar - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_tbl - statement_terminator: ; - statement: set_expression: - select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_except_clause: keyword: except bracketed: start_bracket: ( naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: some_table - set_operator: - keyword: union - keyword: all - select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: another_table - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' alias_expression: alias_operator: keyword: as naked_identifier: calculated - comma: ',' - select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_except_clause: keyword: except bracketed: start_bracket: ( naked_identifier: irrelevant end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_tbl - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: foo dot: . star: '*' select_except_clause: keyword: except bracketed: start_bracket: ( naked_identifier: some_column end_bracket: ) - comma: ',' - select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: bar dot: . star: '*' select_except_clause: keyword: except bracketed: start_bracket: ( naked_identifier: other_column end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_tbl - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_except_replace.sql000066400000000000000000000002051503426445100266550ustar00rootroot00000000000000-- We can call functions when replacing a field select * except(foo) replace (concat(fruit, 'berry') as fruit) from some_table sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_except_replace.yml000066400000000000000000000032611503426445100266640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8f677c4c98fd1853db8ffcdefbd1a38f69ccc6bfda06bb795efb62b40fc0ea8d file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_except_clause: keyword: except bracketed: start_bracket: ( naked_identifier: foo end_bracket: ) select_replace_clause: keyword: replace bracketed: start_bracket: ( function: function_name: function_name_identifier: concat function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: fruit - comma: ',' - expression: quoted_literal: "'berry'" - end_bracket: ) keyword: as naked_identifier: fruit end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: some_table sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_extract.sql000066400000000000000000000006131503426445100253470ustar00rootroot00000000000000SELECT EXTRACT(HOUR FROM some_timestamp AT TIME ZONE "UTC"); SELECT EXTRACT(HOUR FROM some_timestamp AT TIME ZONE timezone_column); WITH Input AS (SELECT TIMESTAMP("2008-12-25 05:30:00+00") AS timestamp_value) SELECT EXTRACT(DAY FROM timestamp_value AT TIME ZONE "UTC") AS the_day_utc, EXTRACT(DAY FROM timestamp_value AT TIME ZONE "America/Los_Angeles") AS the_day_california FROM Input; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_extract.yml000066400000000000000000000112371503426445100253550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9fe4d4a2759bee3ca0992e794a9c49bba97abac1f8e6fd1aa57f5efcdd891e2c file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: HOUR keyword: FROM expression: column_reference: naked_identifier: some_timestamp time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: '"UTC"' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: HOUR keyword: FROM expression: column_reference: naked_identifier: some_timestamp time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: column_reference: naked_identifier: timezone_column end_bracket: ) - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: Input keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: TIMESTAMP function_contents: bracketed: start_bracket: ( expression: quoted_literal: '"2008-12-25 05:30:00+00"' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: timestamp_value end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: DAY keyword: FROM expression: column_reference: naked_identifier: timestamp_value time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: '"UTC"' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: the_day_utc - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: DAY keyword: FROM expression: column_reference: naked_identifier: timestamp_value time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: '"America/Los_Angeles"' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: the_day_california from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Input - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_for_system_time.sql000066400000000000000000000006011503426445100271020ustar00rootroot00000000000000SELECT user_id FROM lists_emails AS list_emails FOR SYSTEM_TIME AS OF CAST('2019-12-02T20:52:34+00:00' AS TIMESTAMP); SELECT user_id FROM `project.dataset.table1` FOR SYSTEM_TIME AS OF CAST('2020-05-11T14:02:52+00:00' AS TIMESTAMP); SELECT user_id FROM `project.dataset.table1` FOR SYSTEM TIME AS OF CAST('2020-05-11T14:02:52+00:00' AS TIMESTAMP) sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_for_system_time.yml000066400000000000000000000073051503426445100271140ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 613a892af8a6f78a574a2b7cd42e443f7a8d5966af513d5031ced82ca9e1a83c file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: user_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: lists_emails alias_expression: alias_operator: keyword: AS naked_identifier: list_emails for_system_time_as_of_segment: - keyword: FOR - keyword: SYSTEM_TIME - keyword: AS - keyword: OF - expression: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'2019-12-02T20:52:34+00:00'" keyword: AS data_type: data_type_identifier: TIMESTAMP end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: user_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`project.dataset.table1`' for_system_time_as_of_segment: - keyword: FOR - keyword: SYSTEM_TIME - keyword: AS - keyword: OF - expression: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'2020-05-11T14:02:52+00:00'" keyword: AS data_type: data_type_identifier: TIMESTAMP end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: user_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`project.dataset.table1`' for_system_time_as_of_segment: - keyword: FOR - keyword: SYSTEM - keyword: TIME - keyword: AS - keyword: OF - expression: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'2020-05-11T14:02:52+00:00'" keyword: AS data_type: data_type_identifier: TIMESTAMP end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_function_object_fields.sql000066400000000000000000000006551503426445100304040ustar00rootroot00000000000000SELECT testFunction(a).b AS field, testFunction(a).* AS wildcard, testFunction(a).b.c AS field_with_field, testFunction(a).b.* AS field_with_wildcard, testFunction(a)[OFFSET(0)].* AS field_with_offset_wildcard, testFunction(a)[SAFE_OFFSET(0)].* AS field_with_safe_offset_wildcard, testFunction(a)[ORDINAL(1)].* AS field_with_ordinal_wildcard, testFunction(a)[ORDINAL(1)].a AS field_with_ordinal_field FROM table1 sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_function_object_fields.yml000066400000000000000000000163551503426445100304120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a7e5f99b0d9bbe4f8e15aa356b0abb5965da26b9e09dab2da4f11e9fb7616e90 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: testFunction function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: a end_bracket: ) semi_structured_expression: dot: . naked_identifier: b alias_expression: alias_operator: keyword: AS naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: testFunction function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: a end_bracket: ) semi_structured_expression: dot: . star: '*' alias_expression: alias_operator: keyword: AS naked_identifier: wildcard - comma: ',' - select_clause_element: function: function_name: function_name_identifier: testFunction function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: a end_bracket: ) semi_structured_expression: - dot: . - naked_identifier: b - dot: . - naked_identifier: c alias_expression: alias_operator: keyword: AS naked_identifier: field_with_field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: testFunction function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: a end_bracket: ) semi_structured_expression: - dot: . - naked_identifier: b - dot: . - star: '*' alias_expression: alias_operator: keyword: AS naked_identifier: field_with_wildcard - comma: ',' - select_clause_element: function: function_name: function_name_identifier: testFunction function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: a end_bracket: ) array_accessor: start_square_bracket: '[' expression: function: function_name: function_name_identifier: OFFSET function_contents: bracketed: start_bracket: ( expression: numeric_literal: '0' end_bracket: ) end_square_bracket: ']' semi_structured_expression: dot: . star: '*' alias_expression: alias_operator: keyword: AS naked_identifier: field_with_offset_wildcard - comma: ',' - select_clause_element: function: function_name: function_name_identifier: testFunction function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: a end_bracket: ) array_accessor: start_square_bracket: '[' expression: function: function_name: function_name_identifier: SAFE_OFFSET function_contents: bracketed: start_bracket: ( expression: numeric_literal: '0' end_bracket: ) end_square_bracket: ']' semi_structured_expression: dot: . star: '*' alias_expression: alias_operator: keyword: AS naked_identifier: field_with_safe_offset_wildcard - comma: ',' - select_clause_element: function: function_name: function_name_identifier: testFunction function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: a end_bracket: ) array_accessor: start_square_bracket: '[' expression: function: function_name: function_name_identifier: ORDINAL function_contents: bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) end_square_bracket: ']' semi_structured_expression: dot: . star: '*' alias_expression: alias_operator: keyword: AS naked_identifier: field_with_ordinal_wildcard - comma: ',' - select_clause_element: function: function_name: function_name_identifier: testFunction function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: a end_bracket: ) array_accessor: start_square_bracket: '[' expression: function: function_name: function_name_identifier: ORDINAL function_contents: bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) end_square_bracket: ']' semi_structured_expression: dot: . naked_identifier: a alias_expression: alias_operator: keyword: AS naked_identifier: field_with_ordinal_field from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 select_function_parameter_order_by_multiple_columns.sql000066400000000000000000000000541503426445100350420ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/dialects/bigquerySELECT STRING_AGG(a ORDER BY b, c) FROM foo select_function_parameter_order_by_multiple_columns.yml000066400000000000000000000025131503426445100350460ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/dialects/bigquery# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7f99d5b8d98e4aa517dc4cd8039e4965407b8c33d9dea9d7f795ad24fe4bcf09 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: STRING_AGG function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: a aggregate_order_by: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_function_with_named_arguments.sql000066400000000000000000000001431503426445100320040ustar00rootroot00000000000000SELECT ST_GEOGFROMGEOJSON('{"type":"LineString","coordinates":[[1,2],[4,5]]}', make_valid => true) sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_function_with_named_arguments.yml000066400000000000000000000020531503426445100320100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a3cf7d8cbc3f0818051569deb9853222ce6bc0da4a9b09cc0f3359544701c40c file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: ST_GEOGFROMGEOJSON function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'{\"type\":\"LineString\",\"coordinates\":[[1,2],[4,5]]}'" comma: ',' named_argument: naked_identifier: make_valid right_arrow: => expression: boolean_literal: 'true' end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_group_by.sql000066400000000000000000000020431503426445100255220ustar00rootroot00000000000000select foo, count(*) from bar group by all; select letter, sum(num) as sum_num from table_test group by grouping sets (1, ()); select letter, sum(num) as sum_num from table_test group by grouping sets (letter, ()); select product_type, product_name, sum(product_count) as product_sum from products group by grouping sets ( product_type, rollup(product_type, product_name) ) order by product_type, product_name; select product_type, product_name, sum(product_count) as product_sum from products group by grouping sets ( product_type, cube(product_type, product_name) ) order by product_type, product_name; select product_type, product_name, sum(product_count) as product_sum from products group by rollup (product_type, product_name) order by product_type, product_name; select product_type, product_name, sum(product_count) as product_sum from products group by cube (product_type, product_name) order by product_type, product_name; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_group_by.yml000066400000000000000000000266741503426445100255440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 112fa70a0e52c988c9eaa79cf6d401471472ece3b4084a04d67b104b71fdd7ae file: - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: foo - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar groupby_clause: - keyword: group - keyword: by - keyword: all - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: letter - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: num end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: sum_num from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_test groupby_clause: - keyword: group - keyword: by - grouping_sets_clause: - keyword: grouping - keyword: sets - bracketed: start_bracket: ( grouping_expression_list: numeric_literal: '1' comma: ',' expression: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: letter - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: num end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: sum_num from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_test groupby_clause: - keyword: group - keyword: by - grouping_sets_clause: - keyword: grouping - keyword: sets - bracketed: start_bracket: ( grouping_expression_list: column_reference: naked_identifier: letter comma: ',' expression: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: product_type - comma: ',' - select_clause_element: column_reference: naked_identifier: product_name - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: product_count end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: product_sum from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: products groupby_clause: - keyword: group - keyword: by - grouping_sets_clause: - keyword: grouping - keyword: sets - bracketed: start_bracket: ( grouping_expression_list: column_reference: naked_identifier: product_type comma: ',' expression: function: function_name: function_name_identifier: rollup function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: product_type - comma: ',' - expression: column_reference: naked_identifier: product_name - end_bracket: ) end_bracket: ) orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: product_type - comma: ',' - column_reference: naked_identifier: product_name - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: product_type - comma: ',' - select_clause_element: column_reference: naked_identifier: product_name - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: product_count end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: product_sum from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: products groupby_clause: - keyword: group - keyword: by - grouping_sets_clause: - keyword: grouping - keyword: sets - bracketed: start_bracket: ( grouping_expression_list: column_reference: naked_identifier: product_type comma: ',' expression: function: function_name: function_name_identifier: cube function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: product_type - comma: ',' - expression: column_reference: naked_identifier: product_name - end_bracket: ) end_bracket: ) orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: product_type - comma: ',' - column_reference: naked_identifier: product_name - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: product_type - comma: ',' - select_clause_element: column_reference: naked_identifier: product_name - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: product_count end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: product_sum from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: products groupby_clause: - keyword: group - keyword: by - cube_rollup_clause: function_name: function_name_identifier: rollup bracketed: start_bracket: ( grouping_expression_list: - column_reference: naked_identifier: product_type - comma: ',' - column_reference: naked_identifier: product_name end_bracket: ) orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: product_type - comma: ',' - column_reference: naked_identifier: product_name - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: product_type - comma: ',' - select_clause_element: column_reference: naked_identifier: product_name - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: product_count end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: product_sum from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: products groupby_clause: - keyword: group - keyword: by - cube_rollup_clause: function_name: function_name_identifier: cube bracketed: start_bracket: ( grouping_expression_list: - column_reference: naked_identifier: product_type - comma: ',' - column_reference: naked_identifier: product_name end_bracket: ) orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: product_type - comma: ',' - column_reference: naked_identifier: product_name - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_gt_lt.sql000066400000000000000000000000311503426445100250000ustar00rootroot00000000000000SELECT (1 > 0 AND 0 < 1) sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_gt_lt.yml000066400000000000000000000017331503426445100250140ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 980ee3955a2b1cc7004ad794d17d244255158323e7b2ba6a89eb09394f5ae31a file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bracketed: start_bracket: ( expression: - numeric_literal: '1' - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '0' - binary_operator: AND - numeric_literal: '0' - comparison_operator: raw_comparison_operator: < - numeric_literal: '1' end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_hyphenated_table_name_in_from.sql000066400000000000000000000004411503426445100317050ustar00rootroot00000000000000SELECT * FROM project-a.dataset-b.table-c JOIN dataset-c.table-d USING (a); SELECT * FROM a-1a.b.c; SELECT * FROM a-1.b.c; SELECT * FROM project23-123.dataset7-b1.table-2c JOIN dataset12-c1.table-1d USING (a); SELECT col1-col2 AS newcol1, col1 - col2 AS newcol2 FROM table-a123; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_hyphenated_table_name_in_from.yml000066400000000000000000000122021503426445100317050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f6d47e1f3af50f343ac387dce247d0715774c44253ae6daa32177c60c94778f9 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: project - dash: '-' - naked_identifier: a - dot: . - naked_identifier: dataset - dash: '-' - naked_identifier: b - dot: . - naked_identifier: table - dash: '-' - naked_identifier: c join_clause: - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: dataset - dash: '-' - naked_identifier: c - dot: . - naked_identifier: table - dash: '-' - naked_identifier: d - keyword: USING - bracketed: start_bracket: ( naked_identifier: a end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: a - dash: '-' - naked_identifier: 1a - dot: . - naked_identifier: b - dot: . - naked_identifier: c - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: a - dash: '-' - naked_identifier: '1' - dot: . - naked_identifier: b - dot: . - naked_identifier: c - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: project23 - dash: '-' - naked_identifier: '123' - dot: . - naked_identifier: dataset7 - dash: '-' - naked_identifier: b1 - dot: . - naked_identifier: table - dash: '-' - naked_identifier: 2c join_clause: - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: dataset12 - dash: '-' - naked_identifier: c1 - dot: . - naked_identifier: table - dash: '-' - naked_identifier: 1d - keyword: USING - bracketed: start_bracket: ( naked_identifier: a end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: - column_reference: naked_identifier: col1 - binary_operator: '-' - column_reference: naked_identifier: col2 alias_expression: alias_operator: keyword: AS naked_identifier: newcol1 - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: col1 - binary_operator: '-' - column_reference: naked_identifier: col2 alias_expression: alias_operator: keyword: AS naked_identifier: newcol2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: table - dash: '-' - naked_identifier: a123 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_identifiers.sql000066400000000000000000000012771503426445100262110ustar00rootroot00000000000000--Identifiers can start with an underscore in BigQuery -- and can contin just _ and 0-9 SELECT _01 FROM _2010_01; --Identifiers can start with an underscore in BigQuery -- and can contin just _ and 0-9 SELECT col_a AS _ FROM table1; -- TODO: Currently we don't support this but should -- Table names can contain dashes from FROM and TABLE clauses -- But reluctant to add to general naked_identifier grammar and not -- sure worth adding specific syntax for this unless someone requests it -- https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical -- SELECT * FROM data-customers-287.mydatabase.mytable; -- Same as above but quoted SELECT * FROM `data-customers-287`.mydatabase.mytable; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_identifiers.yml000066400000000000000000000035461503426445100262140ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 596cb87f0aeda98ce89073aee8bd5e4ad0263a83bcb6e843b8b3f8e1b2484956 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: _01 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: _2010_01 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col_a alias_expression: alias_operator: keyword: AS naked_identifier: _ from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - quoted_identifier: '`data-customers-287`' - dot: . - naked_identifier: mydatabase - dot: . - naked_identifier: mytable - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_if.sql000066400000000000000000000006471503426445100243020ustar00rootroot00000000000000SELECT client, firstHtml, vary, IF(_cdn_provider != '', 'CDN', 'Origin') AS source, COUNT(0) AS total FROM `httparchive.almanac.requests`, UNNEST(split(REGEXP_REPLACE(REGEXP_REPLACE(LOWER(resp_vary), '\"', ''), '[, ]+|\\\\0', ','), ',')) AS vary WHERE date = '2019-07-01' GROUP BY client, firstHtml, vary, source HAVING vary != '' AND vary IS NOT NULL ORDER BY client DESC, firstHtml DESC, total DESC sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_if.yml000066400000000000000000000155021503426445100243000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 242625b1dc9cb498310d499e8e835797605309c85adff35bd2d2dfaf3e91d2a9 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: client - comma: ',' - select_clause_element: column_reference: naked_identifier: firstHtml - comma: ',' - select_clause_element: column_reference: naked_identifier: vary - comma: ',' - select_clause_element: function: function_name: function_name_identifier: IF function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: _cdn_provider comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '=' quoted_literal: "''" - comma: ',' - expression: quoted_literal: "'CDN'" - comma: ',' - expression: quoted_literal: "'Origin'" - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: source - comma: ',' - select_clause_element: function: function_name: function_name_identifier: COUNT function_contents: bracketed: start_bracket: ( expression: numeric_literal: '0' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: total from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`httparchive.almanac.requests`' - comma: ',' - from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: UNNEST function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: split function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: REGEXP_REPLACE function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: REGEXP_REPLACE function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: LOWER function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: resp_vary end_bracket: ) - comma: ',' - expression: quoted_literal: "'\\\"'" - comma: ',' - expression: quoted_literal: "''" - end_bracket: ) - comma: ',' - expression: quoted_literal: "'[, ]+|\\\\\\\\0'" - comma: ',' - expression: quoted_literal: "','" - end_bracket: ) - comma: ',' - expression: quoted_literal: "','" - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: vary where_clause: keyword: WHERE expression: column_reference: naked_identifier: date comparison_operator: raw_comparison_operator: '=' quoted_literal: "'2019-07-01'" groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: client - comma: ',' - column_reference: naked_identifier: firstHtml - comma: ',' - column_reference: naked_identifier: vary - comma: ',' - column_reference: naked_identifier: source having_clause: keyword: HAVING expression: - column_reference: naked_identifier: vary - comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '=' - quoted_literal: "''" - binary_operator: AND - column_reference: naked_identifier: vary - keyword: IS - keyword: NOT - null_literal: 'NULL' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: client - keyword: DESC - comma: ',' - column_reference: naked_identifier: firstHtml - keyword: DESC - comma: ',' - column_reference: naked_identifier: total - keyword: DESC sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_interval_expression.sql000066400000000000000000000000631503426445100277770ustar00rootroot00000000000000SELECT DATE_ADD(CURRENT_DATE(), INTERVAL -1+2 DAY) sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_interval_expression.yml000066400000000000000000000026341503426445100300070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f4496712c506844a60daae9f9da82165bde98db25d2893e5909e0e54f53db999 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: DATE_ADD function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: CURRENT_DATE function_contents: bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: - numeric_literal: sign_indicator: '-' numeric_literal: '1' - binary_operator: + - numeric_literal: '2' date_part: DAY - end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_less_than_greater_than.sql000066400000000000000000000000201503426445100303700ustar00rootroot00000000000000SELECT 1<2, 2>1 sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_less_than_greater_than.yml000066400000000000000000000015611503426445100304050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 95b2a7833108ccca1a76481d7173df106d449b6fda839d0f4483d19bfb3efa23 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: - numeric_literal: '1' - comparison_operator: raw_comparison_operator: < - numeric_literal: '2' - comma: ',' - select_clause_element: expression: - numeric_literal: '2' - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '1' sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_lt_gt.sql000066400000000000000000000000311503426445100250000ustar00rootroot00000000000000SELECT (0 < 1 AND 1 > 0) sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_lt_gt.yml000066400000000000000000000017331503426445100250140ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bbf4a1322c6b9005ab54e9218f75baae50477bbe66cea4d4efff99e52523b15e file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bracketed: start_bracket: ( expression: - numeric_literal: '0' - comparison_operator: raw_comparison_operator: < - numeric_literal: '1' - binary_operator: AND - numeric_literal: '1' - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '0' end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_mixture_of_array_literals.sql000066400000000000000000000012571503426445100311600ustar00rootroot00000000000000-- Created this test case in response to issue #989. As of April 25, 2021 and PR #998, -- this query PARSES without error, but the word ARRAY is parsing as a column name, and -- the angle brackets < and > are being incorrectly parsed as comparison operators. -- This is being tracked in a separate issue, 999, not #989, since it's less severe -- (incorrect parse vs parse failure). SELECT [], [false], ARRAY[false], ['a'] AS strcol1, ARRAY['b'] AS strcol2, [1.0] AS numcol1, ARRAY[1.4] AS numcol2, [STRUCT("Rudisha" AS name, [23.4, 26.3, 26.4, 26.1] AS splits)] AS struct1, col1.obj1[safe_offset(1)].a AS struct_safe_offset sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_mixture_of_array_literals.yml000066400000000000000000000120411503426445100311530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1ecaf1f891f1aa2d4a8fc222034bc116a601d0b70b09290aea7d1dd8f8be30e3 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: array_literal: start_square_bracket: '[' end_square_bracket: ']' - comma: ',' - select_clause_element: array_literal: start_square_bracket: '[' boolean_literal: 'false' end_square_bracket: ']' - comma: ',' - select_clause_element: typed_array_literal: array_type: keyword: ARRAY start_angle_bracket: < data_type: data_type_identifier: BOOLEAN end_angle_bracket: '>' array_literal: start_square_bracket: '[' boolean_literal: 'false' end_square_bracket: ']' - comma: ',' - select_clause_element: array_literal: start_square_bracket: '[' quoted_literal: "'a'" end_square_bracket: ']' alias_expression: alias_operator: keyword: AS naked_identifier: strcol1 - comma: ',' - select_clause_element: typed_array_literal: array_type: keyword: ARRAY start_angle_bracket: < data_type: data_type_identifier: string end_angle_bracket: '>' array_literal: start_square_bracket: '[' quoted_literal: "'b'" end_square_bracket: ']' alias_expression: alias_operator: keyword: AS naked_identifier: strcol2 - comma: ',' - select_clause_element: array_literal: start_square_bracket: '[' numeric_literal: '1.0' end_square_bracket: ']' alias_expression: alias_operator: keyword: AS naked_identifier: numcol1 - comma: ',' - select_clause_element: typed_array_literal: array_type: keyword: ARRAY start_angle_bracket: < data_type: data_type_identifier: NUMERIC end_angle_bracket: '>' array_literal: start_square_bracket: '[' numeric_literal: '1.4' end_square_bracket: ']' alias_expression: alias_operator: keyword: AS naked_identifier: numcol2 - comma: ',' - select_clause_element: array_literal: start_square_bracket: '[' expression: typed_struct_literal: struct_type: keyword: STRUCT struct_literal: bracketed: - start_bracket: ( - quoted_literal: '"Rudisha"' - alias_expression: alias_operator: keyword: AS naked_identifier: name - comma: ',' - array_literal: - start_square_bracket: '[' - numeric_literal: '23.4' - comma: ',' - numeric_literal: '26.3' - comma: ',' - numeric_literal: '26.4' - comma: ',' - numeric_literal: '26.1' - end_square_bracket: ']' - alias_expression: alias_operator: keyword: AS naked_identifier: splits - end_bracket: ) end_square_bracket: ']' alias_expression: alias_operator: keyword: AS naked_identifier: struct1 - comma: ',' - select_clause_element: expression: column_reference: - naked_identifier: col1 - dot: . - naked_identifier: obj1 semi_structured_expression: array_accessor: start_square_bracket: '[' expression: function: function_name: function_name_identifier: safe_offset function_contents: bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) end_square_bracket: ']' dot: . naked_identifier: a alias_expression: alias_operator: keyword: AS naked_identifier: struct_safe_offset sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_ml_predict_with_select.sql000066400000000000000000000003071503426445100304110ustar00rootroot00000000000000SELECT * FROM ML.PREDICT( MODEL `project.dataset.model`, ( SELECT user_id FROM `project.dataset.stats` ) ) sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_ml_predict_with_select.yml000066400000000000000000000031411503426445100304120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e13d492f83f781ed68921a0c372915028ba8a71cc32b5564714b9e1b9f03ad00 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: ml_table_expression: keyword: ML dot: . naked_identifier: PREDICT bracketed: start_bracket: ( keyword: MODEL object_reference: quoted_identifier: '`project.dataset.model`' comma: ',' bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: user_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`project.dataset.stats`' end_bracket: ) end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_ml_weights.sql000066400000000000000000000001161503426445100260350ustar00rootroot00000000000000SELECT * FROM ML.WEIGHTS(MODEL `project.dataset.model`) ORDER BY 2, 1 sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_ml_weights.yml000066400000000000000000000021361503426445100260430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 745e8980f1829e521701aed0b6d4ef96d4546f7d6a1d4d5b4b4b1dd2ccc54075 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: ml_table_expression: keyword: ML dot: . naked_identifier: WEIGHTS bracketed: start_bracket: ( keyword: MODEL object_reference: quoted_identifier: '`project.dataset.model`' end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - numeric_literal: '2' - comma: ',' - numeric_literal: '1' sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_multi_except.sql000066400000000000000000000001431503426445100263750ustar00rootroot00000000000000select d.*, r.* except(date_key) from my_table as d inner join my_other_table as r using(date_key) sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_multi_except.yml000066400000000000000000000035221503426445100264030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ae1c1a43af1f653738b7938e70f261d774e8738d0d682a9f7a86442a1c12c8d3 file: statement: select_statement: select_clause: - keyword: select - select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: d dot: . star: '*' - comma: ',' - select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: r dot: . star: '*' select_except_clause: keyword: except bracketed: start_bracket: ( naked_identifier: date_key end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table alias_expression: alias_operator: keyword: as naked_identifier: d join_clause: - keyword: inner - keyword: join - from_expression_element: table_expression: table_reference: naked_identifier: my_other_table alias_expression: alias_operator: keyword: as naked_identifier: r - keyword: using - bracketed: start_bracket: ( naked_identifier: date_key end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_natural_join.sql000066400000000000000000000003551503426445100263650ustar00rootroot00000000000000SELECT * FROM table1 natural -- this should parse as an alias as BigQuery does not have NATURAL joins JOIN table2; SELECT * FROM table1 natural -- this should parse as an alias as BigQuery does not have NATURAL joins INNER JOIN table2; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_natural_join.yml000066400000000000000000000034151503426445100263670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1258f6496f9e52162346f19a12037e8f8bb2365aaae290ed0359ae0b88e27904 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 alias_expression: naked_identifier: natural join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: table2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 alias_expression: naked_identifier: natural join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_pivot.sql000066400000000000000000000021241503426445100250350ustar00rootroot00000000000000SELECT * FROM (SELECT * FROM Produce) PIVOT(SUM(sales) FOR quarter IN ('Q1', 'Q2', 'Q3', 'Q4')); SELECT * FROM (SELECT sales, quarter FROM Produce) PIVOT(SUM(sales) FOR quarter IN ('Q1', 'Q2', 'Q3', 'Q4')); SELECT * FROM (SELECT * FROM Produce) PIVOT(SUM(sales) FOR quarter IN ('Q1', 'Q2', 'Q3')); SELECT * FROM (SELECT sales, quarter FROM Produce) PIVOT(SUM(sales) FOR quarter IN ('Q1', 'Q2', 'Q3')); SELECT * FROM (SELECT sales, quarter FROM Produce) PIVOT(SUM(sales), COUNT(sales) FOR quarter IN ('Q1', 'Q2', 'Q3')); SELECT col1, col2 FROM table1 PIVOT(SUM(`grand_total`) FOR REPLACE(LOWER(`media_type`), " ", "_") IN ( "cinema", "digital", "direct_mail", "door_drops", "outdoor", "press", "radio", "tv" )); SELECT col1, col2 FROM table1 PIVOT(SUM(`grand_total`) FOR `media_type` IN ( "cinema", "digital", "direct_mail", "door_drops", "outdoor", "press", "radio", "tv" )); SELECT col1, col2 FROM table1 PIVOT(SUM(`grand_total`) FOR '2' || '1' IN ( "cinema", "digital", "direct_mail", "door_drops", "outdoor", "press", "radio", "tv" )); sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_pivot.yml000066400000000000000000000420341503426445100250430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 22ec3d6c8f24a54034e764e8c46527ce9eb0afdeb8c59e71e3c666cb72197436 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce end_bracket: ) from_pivot_expression: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: sales end_bracket: ) - keyword: FOR - pivot_for_clause: column_reference: naked_identifier: quarter - keyword: IN - bracketed: - start_bracket: ( - quoted_literal: "'Q1'" - comma: ',' - quoted_literal: "'Q2'" - comma: ',' - quoted_literal: "'Q3'" - comma: ',' - quoted_literal: "'Q4'" - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: sales - comma: ',' - select_clause_element: column_reference: naked_identifier: quarter from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce end_bracket: ) from_pivot_expression: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: sales end_bracket: ) - keyword: FOR - pivot_for_clause: column_reference: naked_identifier: quarter - keyword: IN - bracketed: - start_bracket: ( - quoted_literal: "'Q1'" - comma: ',' - quoted_literal: "'Q2'" - comma: ',' - quoted_literal: "'Q3'" - comma: ',' - quoted_literal: "'Q4'" - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce end_bracket: ) from_pivot_expression: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: sales end_bracket: ) - keyword: FOR - pivot_for_clause: column_reference: naked_identifier: quarter - keyword: IN - bracketed: - start_bracket: ( - quoted_literal: "'Q1'" - comma: ',' - quoted_literal: "'Q2'" - comma: ',' - quoted_literal: "'Q3'" - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: sales - comma: ',' - select_clause_element: column_reference: naked_identifier: quarter from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce end_bracket: ) from_pivot_expression: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: sales end_bracket: ) - keyword: FOR - pivot_for_clause: column_reference: naked_identifier: quarter - keyword: IN - bracketed: - start_bracket: ( - quoted_literal: "'Q1'" - comma: ',' - quoted_literal: "'Q2'" - comma: ',' - quoted_literal: "'Q3'" - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: sales - comma: ',' - select_clause_element: column_reference: naked_identifier: quarter from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce end_bracket: ) from_pivot_expression: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: sales end_bracket: ) - comma: ',' - function: function_name: function_name_identifier: COUNT function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: sales end_bracket: ) - keyword: FOR - pivot_for_clause: column_reference: naked_identifier: quarter - keyword: IN - bracketed: - start_bracket: ( - quoted_literal: "'Q1'" - comma: ',' - quoted_literal: "'Q2'" - comma: ',' - quoted_literal: "'Q3'" - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 from_pivot_expression: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: quoted_identifier: '`grand_total`' end_bracket: ) - keyword: FOR - pivot_for_clause: function: function_name: function_name_identifier: REPLACE function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: LOWER function_contents: bracketed: start_bracket: ( expression: column_reference: quoted_identifier: '`media_type`' end_bracket: ) - comma: ',' - expression: quoted_literal: '" "' - comma: ',' - expression: quoted_literal: '"_"' - end_bracket: ) - keyword: IN - bracketed: - start_bracket: ( - quoted_literal: '"cinema"' - comma: ',' - quoted_literal: '"digital"' - comma: ',' - quoted_literal: '"direct_mail"' - comma: ',' - quoted_literal: '"door_drops"' - comma: ',' - quoted_literal: '"outdoor"' - comma: ',' - quoted_literal: '"press"' - comma: ',' - quoted_literal: '"radio"' - comma: ',' - quoted_literal: '"tv"' - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 from_pivot_expression: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: quoted_identifier: '`grand_total`' end_bracket: ) - keyword: FOR - pivot_for_clause: column_reference: quoted_identifier: '`media_type`' - keyword: IN - bracketed: - start_bracket: ( - quoted_literal: '"cinema"' - comma: ',' - quoted_literal: '"digital"' - comma: ',' - quoted_literal: '"direct_mail"' - comma: ',' - quoted_literal: '"door_drops"' - comma: ',' - quoted_literal: '"outdoor"' - comma: ',' - quoted_literal: '"press"' - comma: ',' - quoted_literal: '"radio"' - comma: ',' - quoted_literal: '"tv"' - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 from_pivot_expression: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: quoted_identifier: '`grand_total`' end_bracket: ) - keyword: FOR - pivot_for_clause: expression: - quoted_literal: "'2'" - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "'1'" - keyword: IN - bracketed: - start_bracket: ( - quoted_literal: '"cinema"' - comma: ',' - quoted_literal: '"digital"' - comma: ',' - quoted_literal: '"direct_mail"' - comma: ',' - quoted_literal: '"door_drops"' - comma: ',' - quoted_literal: '"outdoor"' - comma: ',' - quoted_literal: '"press"' - comma: ',' - quoted_literal: '"radio"' - comma: ',' - quoted_literal: '"tv"' - end_bracket: ) - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_quoting.sql000066400000000000000000000002201503426445100253550ustar00rootroot00000000000000SELECT user_id, "some string" as list_id FROM `database.schema.benchmark_user_map` WHERE list_id IS NULL OR user_id IS NULL sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_quoting.yml000066400000000000000000000025061503426445100253700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c105a9705de44ea32ab41e529a62292f619a50a591bc2878bb87dccebc55bbba file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: user_id - comma: ',' - select_clause_element: quoted_literal: '"some string"' alias_expression: alias_operator: keyword: as naked_identifier: list_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`database.schema.benchmark_user_map`' where_clause: keyword: WHERE expression: - column_reference: naked_identifier: list_id - keyword: IS - null_literal: 'NULL' - binary_operator: OR - column_reference: naked_identifier: user_id - keyword: IS - null_literal: 'NULL' sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_replace.sql000066400000000000000000000003531503426445100253110ustar00rootroot00000000000000SELECT * REPLACE (CAST(1 AS BOOLEAN) AS foo) FROM (SELECT 1 AS foo); -- Single replace select * replace ('thing' as foo) from some_table; -- Multi replace select * REPLACE (quantity/2 AS quantity, 'thing' as foo) from some_table sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_replace.yml000066400000000000000000000066741503426445100253270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 620c4cac229f619ebbd0e3eef5ec8bdaeb3dedc8ccd205c602da546b67df422b file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_replace_clause: keyword: REPLACE bracketed: start_bracket: ( function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: numeric_literal: '1' keyword: AS data_type: data_type_identifier: BOOLEAN end_bracket: ) keyword: AS naked_identifier: foo end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: AS naked_identifier: foo end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_replace_clause: keyword: replace bracketed: start_bracket: ( quoted_literal: "'thing'" keyword: as naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: some_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_replace_clause: keyword: REPLACE bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: quantity binary_operator: / numeric_literal: '2' - keyword: AS - naked_identifier: quantity - comma: ',' - quoted_literal: "'thing'" - keyword: as - naked_identifier: foo - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: some_table sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_rows_between.sql000066400000000000000000000004611503426445100264010ustar00rootroot00000000000000SELECT is_sensitive, breach_date, total_number_of_affected_accounts, SUM(total_number_of_affected_accounts) OVER ( PARTITION BY is_sensitive ORDER BY is_sensitive, breach_date ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW ) AS cumulative_number_of_affected_accounts FROM table1 sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_rows_between.yml000066400000000000000000000050521503426445100264040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e5e853bbfc91393b9ded94671a1495c7eaf9cdc7096d5890719f11de0d06986a file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: is_sensitive - comma: ',' - select_clause_element: column_reference: naked_identifier: breach_date - comma: ',' - select_clause_element: column_reference: naked_identifier: total_number_of_affected_accounts - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: total_number_of_affected_accounts end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: is_sensitive orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: is_sensitive - comma: ',' - column_reference: naked_identifier: breach_date frame_clause: - keyword: ROWS - keyword: BETWEEN - keyword: UNBOUNDED - keyword: PRECEDING - keyword: AND - keyword: CURRENT - keyword: ROW end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: cumulative_number_of_affected_accounts from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_safe_function.sql000066400000000000000000000002301503426445100265130ustar00rootroot00000000000000SELECT TRUE AS col1, SAFE.SUBSTR('foo', 0, -2) AS col2, SAFE.DATEADD(DAY, -2, CURRENT_DATE), SAFE.MY_FUNCTION(column1) FROM table1; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_safe_function.yml000066400000000000000000000052071503426445100265260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fc26094109f498162ebd6326dce20aca48378060b1ad46e1c9d0a719ed828f76 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: boolean_literal: 'TRUE' alias_expression: alias_operator: keyword: AS naked_identifier: col1 - comma: ',' - select_clause_element: function: function_name: keyword: SAFE dot: . function_name_identifier: SUBSTR function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'foo'" - comma: ',' - expression: numeric_literal: '0' - comma: ',' - expression: numeric_literal: sign_indicator: '-' numeric_literal: '2' - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: col2 - comma: ',' - select_clause_element: function: function_name: keyword: SAFE dot: . function_name_identifier: DATEADD function_contents: bracketed: - start_bracket: ( - date_part: DAY - comma: ',' - expression: numeric_literal: sign_indicator: '-' numeric_literal: '2' - comma: ',' - expression: bare_function: CURRENT_DATE - end_bracket: ) - comma: ',' - select_clause_element: function: function_name: keyword: SAFE dot: . function_name_identifier: MY_FUNCTION function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: column1 end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_set_operators.sql000066400000000000000000000013761503426445100265750ustar00rootroot00000000000000-- EXCEPT DISTINCT SELECT c FROM number1 EXCEPT DISTINCT SELECT c FROM number2; -- INTERSECT DISTINCT (SELECT c FROM number1) INTERSECT DISTINCT (SELECT c FROM number2); -- UNION DISTINCT (SELECT c FROM number1) UNION DISTINCT (SELECT c FROM number2); -- UNION ALL SELECT c FROM number1 UNION ALL (SELECT c FROM number2); -- nesting of UNIONs (SELECT c FROM number1 UNION ALL SELECT c FROM number2) UNION ALL (SELECT c FROM number3 UNION ALL SELECT c FROM number4); -- UNION ALL BY NAME (SELECT c FROM number1) UNION ALL BY NAME (SELECT c FROM number2); -- LEFT UNION ALL (SELECT c FROM number1) LEFT UNION ALL BY NAME (SELECT c, d FROM number2); -- UNION ALL BY NAME ON COLUMNS (SELECT c, d FROM number1) UNION ALL BY NAME ON (c) (SELECT c, e FROM number2); sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_set_operators.yml000066400000000000000000000245501503426445100265760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 10ca0b57ad89e2e8aeb49b6de6af705d4383f2c9fa86dc3383b0f1e3ac63b9a1 file: - statement: set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number1 - set_operator: - keyword: EXCEPT - keyword: DISTINCT - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number2 - statement_terminator: ; - statement: set_expression: - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number1 end_bracket: ) - set_operator: - keyword: INTERSECT - keyword: DISTINCT - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number2 end_bracket: ) - statement_terminator: ; - statement: set_expression: - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number1 end_bracket: ) - set_operator: - keyword: UNION - keyword: DISTINCT - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number2 end_bracket: ) - statement_terminator: ; - statement: set_expression: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number1 set_operator: - keyword: UNION - keyword: ALL bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number2 end_bracket: ) - statement_terminator: ; - statement: set_expression: - bracketed: start_bracket: ( set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number1 - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number2 end_bracket: ) - set_operator: - keyword: UNION - keyword: ALL - bracketed: start_bracket: ( set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number3 - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number4 end_bracket: ) - statement_terminator: ; - statement: set_expression: - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number1 end_bracket: ) - set_operator: - keyword: UNION - keyword: ALL - keyword: BY - keyword: NAME - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number2 end_bracket: ) - statement_terminator: ; - statement: set_expression: - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number1 end_bracket: ) - set_operator: - keyword: LEFT - keyword: UNION - keyword: ALL - keyword: BY - keyword: NAME - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: c - comma: ',' - select_clause_element: column_reference: naked_identifier: d from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number2 end_bracket: ) - statement_terminator: ; - statement: set_expression: - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: c - comma: ',' - select_clause_element: column_reference: naked_identifier: d from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number1 end_bracket: ) - set_operator: - keyword: UNION - keyword: ALL - keyword: BY - keyword: NAME - keyword: 'ON' - bracketed: start_bracket: ( column_reference: naked_identifier: c end_bracket: ) - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: c - comma: ',' - select_clause_element: column_reference: naked_identifier: e from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number2 end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_struct.sql000066400000000000000000000022261503426445100252230ustar00rootroot00000000000000-- Example of "select as struct *" syntax. select some_table.foo_id, array( select as struct * from another_table where another_table.foo_id = some_table.foo_id ) from another_table; -- Example of "select as struct <>" syntax select as struct '1' as bb, 2 as aa; select distinct as struct '1' as bb, 2 as aa; -- Example of explicitly building a struct in a select clause. select struct( bar.bar_id as id, bar.bar_name as bar ) as bar from foo left join bar on bar.foo_id = foo.foo_id; -- Array of structs SELECT col_1, col_2 FROM UNNEST(ARRAY>[ ('hello','world'), ('hi', 'there') ]); SELECT STRUCT(5), STRUCT("2011-05-05"), STRUCT(1, t.str_col), STRUCT(int_col); -- This is to test typeless struct fields are not mistakenly considered as -- data types, see https://github.com/sqlfluff/sqlfluff/issues/3277 SELECT STRUCT( some_field, some_other_field ) AS col FROM table; -- Empty STRUCT within TO_JSON SELECT TO_JSON(STRUCT()) AS col FROM table; SELECT (1*1, 2) IN (STRUCT(1 AS a, 2 AS b)); sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_struct.yml000066400000000000000000000324731503426445100252340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b43493b887c46ef510709ffe1787e9110d6e44282e5ff67328802daec77a44cd file: - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: some_table - dot: . - naked_identifier: foo_id - comma: ',' - select_clause_element: expression: array_expression: function_name: function_name_identifier: array function_contents: bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_modifier: - keyword: as - keyword: struct select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: another_table where_clause: keyword: where expression: - column_reference: - naked_identifier: another_table - dot: . - naked_identifier: foo_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: some_table - dot: . - naked_identifier: foo_id end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: another_table - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_modifier: - keyword: as - keyword: struct - select_clause_element: quoted_literal: "'1'" alias_expression: alias_operator: keyword: as naked_identifier: bb - comma: ',' - select_clause_element: numeric_literal: '2' alias_expression: alias_operator: keyword: as naked_identifier: aa - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_modifier: - keyword: distinct - keyword: as - keyword: struct - select_clause_element: quoted_literal: "'1'" alias_expression: alias_operator: keyword: as naked_identifier: bb - comma: ',' - select_clause_element: numeric_literal: '2' alias_expression: alias_operator: keyword: as naked_identifier: aa - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: typed_struct_literal: struct_type: keyword: struct struct_literal: bracketed: - start_bracket: ( - column_reference: - naked_identifier: bar - dot: . - naked_identifier: bar_id - alias_expression: alias_operator: keyword: as naked_identifier: id - comma: ',' - column_reference: - naked_identifier: bar - dot: . - naked_identifier: bar_name - alias_expression: alias_operator: keyword: as naked_identifier: bar - end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: bar from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo join_clause: - keyword: left - keyword: join - from_expression_element: table_expression: table_reference: naked_identifier: bar - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: bar - dot: . - naked_identifier: foo_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: foo - dot: . - naked_identifier: foo_id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col_1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col_2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: UNNEST function_contents: bracketed: start_bracket: ( expression: typed_array_literal: array_type: keyword: ARRAY start_angle_bracket: < data_type: struct_type: keyword: STRUCT struct_type_schema: - start_angle_bracket: < - parameter: col_1 - data_type: data_type_identifier: STRING - comma: ',' - parameter: col_2 - data_type: data_type_identifier: STRING - end_angle_bracket: '>' end_angle_bracket: '>' array_literal: - start_square_bracket: '[' - expression: bracketed: - start_bracket: ( - quoted_literal: "'hello'" - comma: ',' - quoted_literal: "'world'" - end_bracket: ) - comma: ',' - expression: bracketed: - start_bracket: ( - quoted_literal: "'hi'" - comma: ',' - quoted_literal: "'there'" - end_bracket: ) - end_square_bracket: ']' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: typed_struct_literal: struct_type: keyword: STRUCT struct_type_schema: start_angle_bracket: < data_type: data_type_identifier: int64 end_angle_bracket: '>' struct_literal: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - comma: ',' - select_clause_element: expression: typed_struct_literal: struct_type: keyword: STRUCT struct_type_schema: start_angle_bracket: < data_type: data_type_identifier: date end_angle_bracket: '>' struct_literal: bracketed: start_bracket: ( quoted_literal: '"2011-05-05"' end_bracket: ) - comma: ',' - select_clause_element: expression: typed_struct_literal: struct_type: keyword: STRUCT struct_type_schema: - start_angle_bracket: < - parameter: x - data_type: data_type_identifier: int64 - comma: ',' - parameter: y - data_type: data_type_identifier: string - end_angle_bracket: '>' struct_literal: bracketed: start_bracket: ( numeric_literal: '1' comma: ',' column_reference: - naked_identifier: t - dot: . - naked_identifier: str_col end_bracket: ) - comma: ',' - select_clause_element: expression: typed_struct_literal: struct_type: keyword: STRUCT struct_type_schema: start_angle_bracket: < data_type: data_type_identifier: int64 end_angle_bracket: '>' struct_literal: bracketed: start_bracket: ( column_reference: naked_identifier: int_col end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: typed_struct_literal: struct_type: keyword: STRUCT struct_literal: bracketed: - start_bracket: ( - column_reference: naked_identifier: some_field - comma: ',' - column_reference: naked_identifier: some_other_field - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: col from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: TO_JSON function_contents: bracketed: start_bracket: ( expression: typed_struct_literal: struct_type: keyword: STRUCT struct_literal: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: col from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - bracketed: start_bracket: ( expression: - numeric_literal: '1' - binary_operator: '*' - numeric_literal: '1' comma: ',' numeric_literal: '2' end_bracket: ) - keyword: IN - bracketed: start_bracket: ( typed_struct_literal: struct_type: keyword: STRUCT struct_literal: bracketed: - start_bracket: ( - numeric_literal: '1' - alias_expression: alias_operator: keyword: AS naked_identifier: a - comma: ',' - numeric_literal: '2' - alias_expression: alias_operator: keyword: AS naked_identifier: b - end_bracket: ) end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_typeless_struct_inside_function.sql000066400000000000000000000001371503426445100324120ustar00rootroot00000000000000SELECT STRUCT(STRUCT(1 AS b) AS a) AS foo; SELECT ARRAY_AGG(STRUCT(a AS a, b AS b)) FROM foo; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_typeless_struct_inside_function.yml000066400000000000000000000056131503426445100324200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e84872ff4aa1a611500bde39d08b67d1432653458091f90ae9238e29cbb0c02b file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: typed_struct_literal: struct_type: keyword: STRUCT struct_literal: bracketed: start_bracket: ( expression: typed_struct_literal: struct_type: keyword: STRUCT struct_literal: bracketed: start_bracket: ( numeric_literal: '1' alias_expression: alias_operator: keyword: AS naked_identifier: b end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: foo - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: ARRAY_AGG function_contents: bracketed: start_bracket: ( expression: typed_struct_literal: struct_type: keyword: STRUCT struct_literal: bracketed: - start_bracket: ( - column_reference: naked_identifier: a - alias_expression: alias_operator: keyword: AS naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - alias_expression: alias_operator: keyword: AS naked_identifier: b - end_bracket: ) end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_udf_quote_everything.sql000066400000000000000000000002161503426445100301330ustar00rootroot00000000000000SELECT `another-gcp-project.functions.timestamp_parsing`(log_tbl.orderdate) AS orderdate FROM `gcp-project.data.year_2021` AS log_tbl sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_udf_quote_everything.yml000066400000000000000000000026231503426445100301410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c506eea8845866f521c54d81cd7f970cdadd74556cfce2d185c089f2ab1c6b2c file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: '`another-gcp-project.functions.timestamp_parsing`' function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: log_tbl - dot: . - naked_identifier: orderdate end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: orderdate from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`gcp-project.data.year_2021`' alias_expression: alias_operator: keyword: AS naked_identifier: log_tbl sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_udf_quote_nothing.sql000066400000000000000000000002031503426445100274110ustar00rootroot00000000000000SELECT gcpproject.functions.timestamp_parsing(log_tbl.orderdate) AS orderdate FROM `gcp-project.data.year_2021` AS log_tbl sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_udf_quote_nothing.yml000066400000000000000000000027601503426445100274250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 36308a26b455870bd23f73219902ee377b03b3571af6d1ff79bbd1a8562e8dac file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: - naked_identifier: gcpproject - dot: . - naked_identifier: functions - dot: . - function_name_identifier: timestamp_parsing function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: log_tbl - dot: . - naked_identifier: orderdate end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: orderdate from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`gcp-project.data.year_2021`' alias_expression: alias_operator: keyword: AS naked_identifier: log_tbl sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_udf_quote_project_and_datasetfunctionname.sql000066400000000000000000000002231503426445100343510ustar00rootroot00000000000000SELECT `another-gcp-project`.`functions.timestamp_parsing` (log_tbl.first_move) AS first_move FROM `gcp-project.data.year_2021` AS log_tbl sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_udf_quote_project_and_datasetfunctionname.yml000066400000000000000000000027171503426445100343650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ea9bc4e13a88e52cff4adcf4041844dfbbcb566ef6a2140886a628411d8bc6fc file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: quoted_identifier: '`another-gcp-project`' dot: . function_name_identifier: '`functions.timestamp_parsing`' function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: log_tbl - dot: . - naked_identifier: first_move end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: first_move from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`gcp-project.data.year_2021`' alias_expression: alias_operator: keyword: AS naked_identifier: log_tbl sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_udf_quote_project_name.sql000066400000000000000000000002201503426445100304100ustar00rootroot00000000000000SELECT `another-gcp-project`.functions.timestamp_parsing(log_tbl.first_move) AS first_move FROM `gcp-project.data.year_2021` AS log_tbl sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_udf_quote_project_name.yml000066400000000000000000000030001503426445100304110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 73d429e66efdc7a786350ad6aa95eff0e3353d5e58871331543e499ca081ada1 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: - quoted_identifier: '`another-gcp-project`' - dot: . - naked_identifier: functions - dot: . - function_name_identifier: timestamp_parsing function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: log_tbl - dot: . - naked_identifier: first_move end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: first_move from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`gcp-project.data.year_2021`' alias_expression: alias_operator: keyword: AS naked_identifier: log_tbl sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_unpivot.sql000066400000000000000000000016371503426445100254100ustar00rootroot00000000000000WITH Produce AS ( SELECT 'Kale' as product, 51 as Q1, 23 as Q2, 45 as Q3, 3 as Q4 UNION ALL SELECT 'Apple', 77, 0, 25, 2) SELECT * FROM Produce; SELECT * FROM Produce UNPIVOT(sales FOR quarter IN (Q1, Q2, Q3, Q4)); SELECT * FROM Produce UNPIVOT(sales FOR quarter IN (Q1 AS 1, Q2 AS 2, Q3 AS 3, Q4 AS 4)); SELECT * FROM Produce UNPIVOT INCLUDE NULLS (sales FOR quarter IN (Q1, Q2, Q3, Q4)); SELECT * FROM Produce UNPIVOT EXCLUDE NULLS (sales FOR quarter IN (Q1, Q2, Q3, Q4)); SELECT * FROM Produce UNPIVOT( (first_half_sales, second_half_sales) FOR semesters IN ((Q1, Q2) AS 'semester_1', (Q3, Q4) AS 'semester_2')); SELECT a AS 'barry' FROM model UNPIVOT( (A, B) FOR year IN ((C, D) AS "year_2011", (E, F) AS "year_2012")); SELECT * FROM foo UNPIVOT( (bar2, bar3, bar4) FOR year IN ((foo1, foo2, foo3) AS 1, (foo4, foo5, foo6) AS 2, (foo7, foo8, foo9) AS 3)); sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_unpivot.yml000066400000000000000000000325111503426445100254050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 559e504475c8e449b236a27b31b9616c9f4e2624803ebeffa788c54db79d78b6 file: - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: Produce keyword: AS bracketed: start_bracket: ( set_expression: - select_statement: select_clause: - keyword: SELECT - select_clause_element: quoted_literal: "'Kale'" alias_expression: alias_operator: keyword: as naked_identifier: product - comma: ',' - select_clause_element: numeric_literal: '51' alias_expression: alias_operator: keyword: as naked_identifier: Q1 - comma: ',' - select_clause_element: numeric_literal: '23' alias_expression: alias_operator: keyword: as naked_identifier: Q2 - comma: ',' - select_clause_element: numeric_literal: '45' alias_expression: alias_operator: keyword: as naked_identifier: Q3 - comma: ',' - select_clause_element: numeric_literal: '3' alias_expression: alias_operator: keyword: as naked_identifier: Q4 - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: quoted_literal: "'Apple'" - comma: ',' - select_clause_element: numeric_literal: '77' - comma: ',' - select_clause_element: numeric_literal: '0' - comma: ',' - select_clause_element: numeric_literal: '25' - comma: ',' - select_clause_element: numeric_literal: '2' end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce from_unpivot_expression: keyword: UNPIVOT bracketed: - start_bracket: ( - naked_identifier: sales - keyword: FOR - naked_identifier: quarter - keyword: IN - bracketed: - start_bracket: ( - naked_identifier: Q1 - comma: ',' - naked_identifier: Q2 - comma: ',' - naked_identifier: Q3 - comma: ',' - naked_identifier: Q4 - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce from_unpivot_expression: keyword: UNPIVOT bracketed: - start_bracket: ( - naked_identifier: sales - keyword: FOR - naked_identifier: quarter - keyword: IN - bracketed: - start_bracket: ( - naked_identifier: Q1 - alias_expression: alias_operator: keyword: AS numeric_literal: '1' - comma: ',' - naked_identifier: Q2 - alias_expression: alias_operator: keyword: AS numeric_literal: '2' - comma: ',' - naked_identifier: Q3 - alias_expression: alias_operator: keyword: AS numeric_literal: '3' - comma: ',' - naked_identifier: Q4 - alias_expression: alias_operator: keyword: AS numeric_literal: '4' - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce from_unpivot_expression: - keyword: UNPIVOT - keyword: INCLUDE - keyword: NULLS - bracketed: - start_bracket: ( - naked_identifier: sales - keyword: FOR - naked_identifier: quarter - keyword: IN - bracketed: - start_bracket: ( - naked_identifier: Q1 - comma: ',' - naked_identifier: Q2 - comma: ',' - naked_identifier: Q3 - comma: ',' - naked_identifier: Q4 - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce from_unpivot_expression: - keyword: UNPIVOT - keyword: EXCLUDE - keyword: NULLS - bracketed: - start_bracket: ( - naked_identifier: sales - keyword: FOR - naked_identifier: quarter - keyword: IN - bracketed: - start_bracket: ( - naked_identifier: Q1 - comma: ',' - naked_identifier: Q2 - comma: ',' - naked_identifier: Q3 - comma: ',' - naked_identifier: Q4 - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce from_unpivot_expression: keyword: UNPIVOT bracketed: - start_bracket: ( - bracketed: - start_bracket: ( - naked_identifier: first_half_sales - comma: ',' - naked_identifier: second_half_sales - end_bracket: ) - keyword: FOR - naked_identifier: semesters - keyword: IN - bracketed: - start_bracket: ( - bracketed: - start_bracket: ( - naked_identifier: Q1 - comma: ',' - naked_identifier: Q2 - end_bracket: ) - alias_expression: alias_operator: keyword: AS quoted_literal: "'semester_1'" - comma: ',' - bracketed: - start_bracket: ( - naked_identifier: Q3 - comma: ',' - naked_identifier: Q4 - end_bracket: ) - alias_expression: alias_operator: keyword: AS quoted_literal: "'semester_2'" - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a alias_expression: alias_operator: keyword: AS quoted_identifier: "'barry'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: model from_unpivot_expression: keyword: UNPIVOT bracketed: - start_bracket: ( - bracketed: - start_bracket: ( - naked_identifier: A - comma: ',' - naked_identifier: B - end_bracket: ) - keyword: FOR - naked_identifier: year - keyword: IN - bracketed: - start_bracket: ( - bracketed: - start_bracket: ( - naked_identifier: C - comma: ',' - naked_identifier: D - end_bracket: ) - alias_expression: alias_operator: keyword: AS quoted_literal: '"year_2011"' - comma: ',' - bracketed: - start_bracket: ( - naked_identifier: E - comma: ',' - naked_identifier: F - end_bracket: ) - alias_expression: alias_operator: keyword: AS quoted_literal: '"year_2012"' - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo from_unpivot_expression: keyword: UNPIVOT bracketed: - start_bracket: ( - bracketed: - start_bracket: ( - naked_identifier: bar2 - comma: ',' - naked_identifier: bar3 - comma: ',' - naked_identifier: bar4 - end_bracket: ) - keyword: FOR - naked_identifier: year - keyword: IN - bracketed: - start_bracket: ( - bracketed: - start_bracket: ( - naked_identifier: foo1 - comma: ',' - naked_identifier: foo2 - comma: ',' - naked_identifier: foo3 - end_bracket: ) - alias_expression: alias_operator: keyword: AS numeric_literal: '1' - comma: ',' - bracketed: - start_bracket: ( - naked_identifier: foo4 - comma: ',' - naked_identifier: foo5 - comma: ',' - naked_identifier: foo6 - end_bracket: ) - alias_expression: alias_operator: keyword: AS numeric_literal: '2' - comma: ',' - bracketed: - start_bracket: ( - naked_identifier: foo7 - comma: ',' - naked_identifier: foo8 - comma: ',' - naked_identifier: foo9 - end_bracket: ) - alias_expression: alias_operator: keyword: AS numeric_literal: '3' - end_bracket: ) - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_where_array_element_less_than.sql000066400000000000000000000001621503426445100317550ustar00rootroot00000000000000SELECT * FROM `project.dataset.table_1` WHERE effect_size_list[ORDINAL(1)] < effect_size_list[ORDINAL(1+1)] sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_where_array_element_less_than.yml000066400000000000000000000040251503426445100317610ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c2ca5862162f30fc6e8999a5b968455b12c79981fa0c434a1d587a730e9a64ee file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`project.dataset.table_1`' where_clause: keyword: WHERE expression: - column_reference: naked_identifier: effect_size_list - array_accessor: start_square_bracket: '[' expression: function: function_name: function_name_identifier: ORDINAL function_contents: bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) end_square_bracket: ']' - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: effect_size_list - array_accessor: start_square_bracket: '[' expression: function: function_name: function_name_identifier: ORDINAL function_contents: bracketed: start_bracket: ( expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '1' end_bracket: ) end_square_bracket: ']' sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_where_greater_than.sql000066400000000000000000000000721503426445100275310ustar00rootroot00000000000000SELECT * FROM `project.dataset.actions` WHERE c > 1 sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_where_greater_than.yml000066400000000000000000000020241503426445100275320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 94b86c29080873051a5a3cb46f90cd71d335ec8f90dbf54e16a91e25483ab46e file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`project.dataset.actions`' where_clause: keyword: WHERE expression: column_reference: naked_identifier: c comparison_operator: raw_comparison_operator: '>' numeric_literal: '1' sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_where_less_than.sql000066400000000000000000000004041503426445100270450ustar00rootroot00000000000000SELECT * FROM table_a WHERE -- Tests that '<' is parsed correctly. (Since some dialects use angle -- brackets, e.g. ARRAY, it's possible for a "<" in isolation to -- be parsed as an open angle bracket without a matching close bracket. a < b sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_where_less_than.yml000066400000000000000000000020321503426445100270460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: eba4d1754fab4c17ea2b0024c83b5adef079d6f15a550c565fe44b0c58d864f5 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_a where_clause: keyword: WHERE expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_with_cast.sql000066400000000000000000000004771503426445100256720ustar00rootroot00000000000000SELECT CAST(1 AS FLOAT64); SELECT SAFE_CAST(1.0 AS INT64); SELECT CAST(ARRAY["element1"] AS ARRAY); SELECT CAST(b'\x48\x65\x6c\x6c\x6f' AS STRING FORMAT 'ASCII') AS bytes_to_string; SELECT CAST( TIMESTAMP '2008-12-25 00:00:00+00:00' AS STRING FORMAT 'YYYY-MM-DD HH24:MI:SS TZH:TZM') AS date_time_to_string; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_with_cast.yml000066400000000000000000000100141503426445100256600ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f7c679f60d66377d2aef9572f7bdcb3ba79ec99ebb57229a2b5a8130edb1e012 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: numeric_literal: '1' keyword: AS data_type: data_type_identifier: FLOAT64 end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: SAFE_CAST function_contents: bracketed: start_bracket: ( expression: numeric_literal: '1.0' keyword: AS data_type: data_type_identifier: INT64 end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: typed_array_literal: array_type: keyword: ARRAY array_literal: start_square_bracket: '[' quoted_literal: '"element1"' end_square_bracket: ']' keyword: AS data_type: array_type: keyword: ARRAY start_angle_bracket: < data_type: data_type_identifier: STRING end_angle_bracket: '>' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CAST function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "b'\\x48\\x65\\x6c\\x6c\\x6f'" - keyword: AS - data_type: data_type_identifier: STRING - expression: data_type: data_type_identifier: FORMAT quoted_literal: "'ASCII'" - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: bytes_to_string - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CAST function_contents: bracketed: - start_bracket: ( - expression: keyword: TIMESTAMP date_constructor_literal: "'2008-12-25 00:00:00+00:00'" - keyword: AS - data_type: data_type_identifier: STRING - expression: data_type: data_type_identifier: FORMAT quoted_literal: "'YYYY-MM-DD HH24:MI:SS TZH:TZM'" - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: date_time_to_string - statement_terminator: ; select_with_date_literal_coercion_and_two_part_string_interval.sql000066400000000000000000000001611503426445100372140ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/dialects/bigquerySELECT GENERATE_DATE_ARRAY( DATE '2010-01-01', DATE '2010-01-31', INTERVAL '7' DAY ) AS my_array select_with_date_literal_coercion_and_two_part_string_interval.yml000066400000000000000000000025211503426445100372200ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/dialects/bigquery# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2a29f36f6916ab4b93642827fe4175ac33bd2a11ca860b9b84d3c8e2a2e7a1d4 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: GENERATE_DATE_ARRAY function_contents: bracketed: - start_bracket: ( - expression: keyword: DATE date_constructor_literal: "'2010-01-01'" - comma: ',' - expression: keyword: DATE date_constructor_literal: "'2010-01-31'" - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: quoted_literal: "'7'" date_part: DAY - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: my_array sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_with_offset.sql000066400000000000000000000007001503426445100262130ustar00rootroot00000000000000-- This has a table expression and also an offset value. -- It also includes a nested SELECT SELECT SUM(CASE WHEN value != previous_value THEN 1.0 ELSE 0.0 END) FROM ( SELECT value, CASE WHEN ix != 0 THEN LAG(value) OVER (ORDER BY ix ASC) ELSE value END AS previous_value FROM UNNEST(sequence_validation_and_business_rules.sequence_validation_and_business_rules) AS value WITH OFFSET AS ix ) sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_with_offset.yml000066400000000000000000000127311503426445100262240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 084cc8b009816498bd4a12c50e374adfc25c48658b8268b14fe577b0bcfbe939 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: - column_reference: naked_identifier: value - comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '=' - column_reference: naked_identifier: previous_value - keyword: THEN - expression: numeric_literal: '1.0' - else_clause: keyword: ELSE expression: numeric_literal: '0.0' - keyword: END end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: value - comma: ',' - select_clause_element: expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: ix comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '=' numeric_literal: '0' - keyword: THEN - expression: function: function_name: function_name_identifier: LAG function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: value end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: ix - keyword: ASC end_bracket: ) - else_clause: keyword: ELSE expression: column_reference: naked_identifier: value - keyword: END alias_expression: alias_operator: keyword: AS naked_identifier: previous_value from_clause: keyword: FROM from_expression: from_expression_element: - table_expression: function: function_name: function_name_identifier: UNNEST function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: sequence_validation_and_business_rules - dot: . - naked_identifier: sequence_validation_and_business_rules end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: value - keyword: WITH - keyword: OFFSET - alias_expression: alias_operator: keyword: AS naked_identifier: ix end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_with_offset_2.sql000066400000000000000000000000751503426445100264410ustar00rootroot00000000000000SELECT ARRAY(SELECT a FROM foo WITH OFFSET WHERE OFFSET > 1) sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_with_offset_2.yml000066400000000000000000000032771503426445100264520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5eb2620f8f3174b4320d0d35ecd12efaf0aac98e92e6f400244dc48bf1c37f04 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: array_expression: function_name: function_name_identifier: ARRAY function_contents: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: - table_expression: table_reference: naked_identifier: foo - keyword: WITH - keyword: OFFSET where_clause: keyword: WHERE expression: column_reference: naked_identifier: OFFSET comparison_operator: raw_comparison_operator: '>' numeric_literal: '1' end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_with_offset_3.sql000066400000000000000000000000711503426445100264360ustar00rootroot00000000000000SELECT i, offset FROM UNNEST([1, 2, 3]) AS i WITH OFFSET sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_with_offset_3.yml000066400000000000000000000031011503426445100264350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2bbac882b177fcb1cb0c3b2e52acb2090a97f93dc9a64821e3b96001cfdd1127 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: i - comma: ',' - select_clause_element: column_reference: naked_identifier: offset from_clause: keyword: FROM from_expression: from_expression_element: - table_expression: function: function_name: function_name_identifier: UNNEST function_contents: bracketed: start_bracket: ( expression: array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - end_square_bracket: ']' end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: i - keyword: WITH - keyword: OFFSET sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_with_qualify.sql000066400000000000000000000025401503426445100264030ustar00rootroot00000000000000SELECT item, RANK() OVER (PARTITION BY category ORDER BY purchases DESC) AS rank FROM Produce WHERE Produce.category = 'vegetable' QUALIFY rank <= 3; SELECT item, RANK() OVER (PARTITION BY category ORDER BY purchases DESC) AS rank FROM Produce WHERE Produce.category = 'vegetable' QUALIFY rank <= 3 ORDER BY item; SELECT item, RANK() OVER (PARTITION BY category ORDER BY purchases DESC) AS rank FROM Produce WHERE Produce.category = 'vegetable' QUALIFY rank <= 3 LIMIT 5; SELECT item, RANK() OVER (PARTITION BY category ORDER BY purchases DESC) AS rank FROM Produce WHERE Produce.category = 'vegetable' QUALIFY rank <= 3 ORDER BY item LIMIT 5; SELECT item, RANK() OVER (PARTITION BY category ORDER BY purchases DESC) AS rank FROM Produce WHERE Produce.category = 'vegetable' QUALIFY rank <= 3 LIMIT 5; SELECT item, RANK() OVER (PARTITION BY category ORDER BY purchases DESC) AS rank FROM Produce WHERE Produce.category = 'vegetable' QUALIFY rank <= 3 WINDOW item_window AS ( PARTITION BY category ORDER BY purchases ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING); SELECT item, RANK() OVER (PARTITION BY category ORDER BY purchases DESC) AS rank FROM Produce WHERE Produce.category = 'vegetable' QUALIFY rank <= 3 ORDER BY item WINDOW item_window AS ( PARTITION BY category ORDER BY purchases ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING); sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_with_qualify.yml000066400000000000000000000404571503426445100264160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f602ab96165c4b0c779f9c957ab8cec683a696b01e5552829e52e79d59c507ec file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: function: function_name: function_name_identifier: RANK function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases - keyword: DESC end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: rank from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce where_clause: keyword: WHERE expression: column_reference: - naked_identifier: Produce - dot: . - naked_identifier: category comparison_operator: raw_comparison_operator: '=' quoted_literal: "'vegetable'" qualify_clause: keyword: QUALIFY expression: column_reference: naked_identifier: rank comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '3' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: function: function_name: function_name_identifier: RANK function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases - keyword: DESC end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: rank from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce where_clause: keyword: WHERE expression: column_reference: - naked_identifier: Produce - dot: . - naked_identifier: category comparison_operator: raw_comparison_operator: '=' quoted_literal: "'vegetable'" qualify_clause: keyword: QUALIFY expression: column_reference: naked_identifier: rank comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '3' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: item - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: function: function_name: function_name_identifier: RANK function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases - keyword: DESC end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: rank from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce where_clause: keyword: WHERE expression: column_reference: - naked_identifier: Produce - dot: . - naked_identifier: category comparison_operator: raw_comparison_operator: '=' quoted_literal: "'vegetable'" qualify_clause: keyword: QUALIFY expression: column_reference: naked_identifier: rank comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '3' limit_clause: keyword: LIMIT numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: function: function_name: function_name_identifier: RANK function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases - keyword: DESC end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: rank from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce where_clause: keyword: WHERE expression: column_reference: - naked_identifier: Produce - dot: . - naked_identifier: category comparison_operator: raw_comparison_operator: '=' quoted_literal: "'vegetable'" qualify_clause: keyword: QUALIFY expression: column_reference: naked_identifier: rank comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '3' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: item limit_clause: keyword: LIMIT numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: function: function_name: function_name_identifier: RANK function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases - keyword: DESC end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: rank from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce where_clause: keyword: WHERE expression: column_reference: - naked_identifier: Produce - dot: . - naked_identifier: category comparison_operator: raw_comparison_operator: '=' quoted_literal: "'vegetable'" qualify_clause: keyword: QUALIFY expression: column_reference: naked_identifier: rank comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '3' limit_clause: keyword: LIMIT numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: function: function_name: function_name_identifier: RANK function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases - keyword: DESC end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: rank from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce where_clause: keyword: WHERE expression: column_reference: - naked_identifier: Produce - dot: . - naked_identifier: category comparison_operator: raw_comparison_operator: '=' quoted_literal: "'vegetable'" qualify_clause: keyword: QUALIFY expression: column_reference: naked_identifier: rank comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '3' named_window: keyword: WINDOW named_window_expression: naked_identifier: item_window keyword: AS bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases frame_clause: - keyword: ROWS - keyword: BETWEEN - numeric_literal: '2' - keyword: PRECEDING - keyword: AND - numeric_literal: '2' - keyword: FOLLOWING end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: function: function_name: function_name_identifier: RANK function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases - keyword: DESC end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: rank from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce where_clause: keyword: WHERE expression: column_reference: - naked_identifier: Produce - dot: . - naked_identifier: category comparison_operator: raw_comparison_operator: '=' quoted_literal: "'vegetable'" qualify_clause: keyword: QUALIFY expression: column_reference: naked_identifier: rank comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '3' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: item named_window: keyword: WINDOW named_window_expression: naked_identifier: item_window keyword: AS bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases frame_clause: - keyword: ROWS - keyword: BETWEEN - numeric_literal: '2' - keyword: PRECEDING - keyword: AND - numeric_literal: '2' - keyword: FOLLOWING end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_with_union_and_qualify.sql000066400000000000000000000004741503426445100304410ustar00rootroot00000000000000SELECT item, RANK() OVER (PARTITION BY category ORDER BY purchases DESC) AS rank FROM Produce WHERE Produce.category = 'vegetable' QUALIFY rank <= 3 UNION ALL SELECT item, RANK() OVER (PARTITION BY category ORDER BY purchases DESC) AS rank FROM Produce WHERE Produce.category = 'vegetable' QUALIFY rank <= 3 sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_with_union_and_qualify.yml000066400000000000000000000110431503426445100304350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fc504ed5338e04504d85d297cc78f92d545af232dfdf6675c6a277c6f48f14a9 file: statement: set_expression: - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: function: function_name: function_name_identifier: RANK function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases - keyword: DESC end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: rank from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce where_clause: keyword: WHERE expression: column_reference: - naked_identifier: Produce - dot: . - naked_identifier: category comparison_operator: raw_comparison_operator: '=' quoted_literal: "'vegetable'" qualify_clause: keyword: QUALIFY expression: column_reference: naked_identifier: rank comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '3' - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: function: function_name: function_name_identifier: RANK function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases - keyword: DESC end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: rank from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce where_clause: keyword: WHERE expression: column_reference: - naked_identifier: Produce - dot: . - naked_identifier: category comparison_operator: raw_comparison_operator: '=' quoted_literal: "'vegetable'" qualify_clause: keyword: QUALIFY expression: column_reference: naked_identifier: rank comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '3' sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_with_window.sql000066400000000000000000000015671503426445100262500ustar00rootroot00000000000000SELECT item, purchases, category, LAST_VALUE(item) OVER (item_window) AS most_popular FROM Produce WINDOW item_window AS ( PARTITION BY category ORDER BY purchases ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING); SELECT item, purchases, category, LAST_VALUE(item) OVER (d) AS most_popular FROM Produce WINDOW a AS (PARTITION BY category), b AS (a ORDER BY purchases), c AS (b ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING), d AS (c); SELECT item, purchases, category, LAST_VALUE(item) OVER (c ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING) AS most_popular FROM Produce WINDOW a AS (PARTITION BY category), b AS (a ORDER BY purchases), c AS b; select * , max(x) over (window_z) as max_x_over_z from raw_data_1 window window_z as (partition by z) union all select * , max(x) over (window_z) as max_x_over_z from raw_data_2 window window_z as (partition by z); sqlfluff-3.4.2/test/fixtures/dialects/bigquery/select_with_window.yml000066400000000000000000000267251503426445100262550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: de1a468926f82e004aaea210ba6a7da8ee74ec746e0452308faa14aa501893c3 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: column_reference: naked_identifier: purchases - comma: ',' - select_clause_element: column_reference: naked_identifier: category - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LAST_VALUE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: item end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: naked_identifier: item_window end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: most_popular from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce named_window: keyword: WINDOW named_window_expression: naked_identifier: item_window keyword: AS bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases frame_clause: - keyword: ROWS - keyword: BETWEEN - numeric_literal: '2' - keyword: PRECEDING - keyword: AND - numeric_literal: '2' - keyword: FOLLOWING end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: column_reference: naked_identifier: purchases - comma: ',' - select_clause_element: column_reference: naked_identifier: category - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LAST_VALUE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: item end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: naked_identifier: d end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: most_popular from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce named_window: - keyword: WINDOW - named_window_expression: naked_identifier: a keyword: AS bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category end_bracket: ) - comma: ',' - named_window_expression: naked_identifier: b keyword: AS bracketed: start_bracket: ( window_specification: naked_identifier: a orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases end_bracket: ) - comma: ',' - named_window_expression: naked_identifier: c keyword: AS bracketed: start_bracket: ( window_specification: naked_identifier: b frame_clause: - keyword: ROWS - keyword: BETWEEN - numeric_literal: '2' - keyword: PRECEDING - keyword: AND - numeric_literal: '2' - keyword: FOLLOWING end_bracket: ) - comma: ',' - named_window_expression: naked_identifier: d keyword: AS bracketed: start_bracket: ( window_specification: naked_identifier: c end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: column_reference: naked_identifier: purchases - comma: ',' - select_clause_element: column_reference: naked_identifier: category - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LAST_VALUE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: item end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: naked_identifier: c frame_clause: - keyword: ROWS - keyword: BETWEEN - numeric_literal: '2' - keyword: PRECEDING - keyword: AND - numeric_literal: '2' - keyword: FOLLOWING end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: most_popular from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce named_window: - keyword: WINDOW - named_window_expression: naked_identifier: a keyword: AS bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category end_bracket: ) - comma: ',' - named_window_expression: naked_identifier: b keyword: AS bracketed: start_bracket: ( window_specification: naked_identifier: a orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases end_bracket: ) - comma: ',' - named_window_expression: - naked_identifier: c - keyword: AS - naked_identifier: b - statement_terminator: ; - statement: set_expression: - select_statement: select_clause: - keyword: select - select_clause_element: wildcard_expression: wildcard_identifier: star: '*' - comma: ',' - select_clause_element: function: function_name: function_name_identifier: max function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x end_bracket: ) over_clause: keyword: over bracketed: start_bracket: ( window_specification: naked_identifier: window_z end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: max_x_over_z from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: raw_data_1 named_window: keyword: window named_window_expression: naked_identifier: window_z keyword: as bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: partition - keyword: by - expression: column_reference: naked_identifier: z end_bracket: ) - set_operator: - keyword: union - keyword: all - select_statement: select_clause: - keyword: select - select_clause_element: wildcard_expression: wildcard_identifier: star: '*' - comma: ',' - select_clause_element: function: function_name: function_name_identifier: max function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x end_bracket: ) over_clause: keyword: over bracketed: start_bracket: ( window_specification: naked_identifier: window_z end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: max_x_over_z from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: raw_data_2 named_window: keyword: window named_window_expression: naked_identifier: window_z keyword: as bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: partition - keyword: by - expression: column_reference: naked_identifier: z end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/set_variable_multiple.sql000066400000000000000000000001171503426445100267100ustar00rootroot00000000000000set (var2, var3, var5) = ("y", (select "x"), DATE_TRUNC("2000-01-01", month)); sqlfluff-3.4.2/test/fixtures/dialects/bigquery/set_variable_multiple.yml000066400000000000000000000026221503426445100267150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1bc729d9627f26053435a0ea12f2d9c7a19f5c228f84ba31cc07c1fc735b05d3 file: statement: set_segment: - keyword: set - bracketed: - start_bracket: ( - naked_identifier: var2 - comma: ',' - naked_identifier: var3 - comma: ',' - naked_identifier: var5 - end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: '"y"' - comma: ',' - bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: quoted_literal: '"x"' end_bracket: ) - comma: ',' - function: function_name: function_name_identifier: DATE_TRUNC function_contents: bracketed: start_bracket: ( expression: quoted_literal: '"2000-01-01"' comma: ',' date_part: month end_bracket: ) - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/set_variable_single.sql000066400000000000000000000001051503426445100263330ustar00rootroot00000000000000set var1 = 5; set var1 = ['one', 'two']; set @@query_label = 'text'; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/set_variable_single.yml000066400000000000000000000022201503426445100263350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6931e0cba974f75429a2ed7b21408313479f2214246c312dd94faaad59c2bfb7 file: - statement: set_segment: keyword: set naked_identifier: var1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' - statement_terminator: ; - statement: set_segment: keyword: set naked_identifier: var1 comparison_operator: raw_comparison_operator: '=' array_literal: - start_square_bracket: '[' - quoted_literal: "'one'" - comma: ',' - quoted_literal: "'two'" - end_square_bracket: ']' - statement_terminator: ; - statement: set_segment: keyword: set system_variable: double_at_sign_literal: '@@query_label' comparison_operator: raw_comparison_operator: '=' quoted_literal: "'text'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/string_literals.sql000066400000000000000000000011341503426445100255420ustar00rootroot00000000000000-- Examples from https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#string_and_bytes_literals SELECT "", '', "abc", "it's", 'it\'s', 'Title: "Boy"', "test \"escaped\"", 'test \'escaped\'', "test \\\"escaped", "test \"escaped\\\"", r"", r'', r"abc+", R"abc+", r'abc+', R'abc+', r'f\(abc, (.*),def\)', r"f\(abc, (.*),def\)", b'abc', B"abc", rb"abc*", rB"abc*", Rb'abc*', br'abc+', RB"abc+", r''' as foo '' bar ''', B""" triple ''' quoted '' "" string are """ as hard FROM dummy sqlfluff-3.4.2/test/fixtures/dialects/bigquery/string_literals.yml000066400000000000000000000062141503426445100255500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f3461902286ba9716107d127147f861fa43ec5d3ed535cfdc5eda2652ff6429c file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: quoted_literal: '""' - comma: ',' - select_clause_element: quoted_literal: "''" - comma: ',' - select_clause_element: quoted_literal: '"abc"' - comma: ',' - select_clause_element: quoted_literal: "\"it's\"" - comma: ',' - select_clause_element: quoted_literal: "'it\\'s'" - comma: ',' - select_clause_element: quoted_literal: "'Title: \"Boy\"'" - comma: ',' - select_clause_element: quoted_literal: '"test \"escaped\""' - comma: ',' - select_clause_element: quoted_literal: "'test \\'escaped\\''" - comma: ',' - select_clause_element: quoted_literal: '"test \\\"escaped"' - comma: ',' - select_clause_element: quoted_literal: '"test \"escaped\\\""' - comma: ',' - select_clause_element: quoted_literal: r"" - comma: ',' - select_clause_element: quoted_literal: "r''" - comma: ',' - select_clause_element: quoted_literal: r"abc+" - comma: ',' - select_clause_element: quoted_literal: R"abc+" - comma: ',' - select_clause_element: quoted_literal: "r'abc+'" - comma: ',' - select_clause_element: quoted_literal: "R'abc+'" - comma: ',' - select_clause_element: quoted_literal: "r'f\\(abc, (.*),def\\)'" - comma: ',' - select_clause_element: quoted_literal: r"f\(abc, (.*),def\)" - comma: ',' - select_clause_element: quoted_literal: "b'abc'" - comma: ',' - select_clause_element: quoted_literal: B"abc" - comma: ',' - select_clause_element: quoted_literal: rb"abc*" - comma: ',' - select_clause_element: quoted_literal: rB"abc*" - comma: ',' - select_clause_element: quoted_literal: "Rb'abc*'" - comma: ',' - select_clause_element: quoted_literal: "br'abc+'" - comma: ',' - select_clause_element: quoted_literal: RB"abc+" - comma: ',' - select_clause_element: quoted_literal: "r''' as foo '' bar '''" - comma: ',' - select_clause_element: quoted_literal: "B\"\"\" triple ''' quoted '' \"\" string are \"\"\"" alias_expression: alias_operator: keyword: as naked_identifier: hard from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dummy sqlfluff-3.4.2/test/fixtures/dialects/bigquery/tablesample.sql000066400000000000000000000001001503426445100246160ustar00rootroot00000000000000SELECT * FROM dataset.my_table TABLESAMPLE SYSTEM (10 PERCENT); sqlfluff-3.4.2/test/fixtures/dialects/bigquery/tablesample.yml000066400000000000000000000021711503426445100246320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4852fe883d0832f0f3878fcdbf9b303b3fdee4e1a60307c75f3b4c58ab4da621 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dataset - dot: . - naked_identifier: my_table sample_expression: - keyword: TABLESAMPLE - keyword: SYSTEM - bracketed: start_bracket: ( numeric_literal: '10' keyword: PERCENT end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/typeless_array.sql000066400000000000000000000001051503426445100254000ustar00rootroot00000000000000SELECT ARRAY(SELECT c FROM number1 UNION ALL SELECT c FROM number2); sqlfluff-3.4.2/test/fixtures/dialects/bigquery/typeless_array.yml000066400000000000000000000040241503426445100254060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b8ef4ac012c633809377a4f716c9b75ed3678fdc82505ccec3b237d9dd7c0f7a file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: array_expression: function_name: function_name_identifier: ARRAY function_contents: bracketed: start_bracket: ( set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number1 - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number2 end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/typeless_struct.sql000066400000000000000000000004371503426445100256160ustar00rootroot00000000000000 SELECT IF( TRUE, STRUCT('hello' AS greeting, 'world' AS subject), STRUCT('hi' AS greeting, 'there' AS subject) ) AS salute FROM (SELECT 1); SELECT CASE WHEN a.xxx != b.xxx THEN STRUCT(a.xxx AS M, b.xxx AS N) END AS xxx FROM A JOIN B ON B.id = A.id; sqlfluff-3.4.2/test/fixtures/dialects/bigquery/typeless_struct.yml000066400000000000000000000130501503426445100256130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b81132d68960d783d1d5841888c9fb8cdec62df7ec4ec77dc67a9d1cf718c2cc file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: IF function_contents: bracketed: - start_bracket: ( - expression: boolean_literal: 'TRUE' - comma: ',' - expression: typed_struct_literal: struct_type: keyword: STRUCT struct_literal: bracketed: - start_bracket: ( - quoted_literal: "'hello'" - alias_expression: alias_operator: keyword: AS naked_identifier: greeting - comma: ',' - quoted_literal: "'world'" - alias_expression: alias_operator: keyword: AS naked_identifier: subject - end_bracket: ) - comma: ',' - expression: typed_struct_literal: struct_type: keyword: STRUCT struct_literal: bracketed: - start_bracket: ( - quoted_literal: "'hi'" - alias_expression: alias_operator: keyword: AS naked_identifier: greeting - comma: ',' - quoted_literal: "'there'" - alias_expression: alias_operator: keyword: AS naked_identifier: subject - end_bracket: ) - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: salute from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: - column_reference: - naked_identifier: a - dot: . - naked_identifier: xxx - comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '=' - column_reference: - naked_identifier: b - dot: . - naked_identifier: xxx - keyword: THEN - expression: typed_struct_literal: struct_type: keyword: STRUCT struct_literal: bracketed: - start_bracket: ( - column_reference: - naked_identifier: a - dot: . - naked_identifier: xxx - alias_expression: alias_operator: keyword: AS naked_identifier: M - comma: ',' - column_reference: - naked_identifier: b - dot: . - naked_identifier: xxx - alias_expression: alias_operator: keyword: AS naked_identifier: N - end_bracket: ) - keyword: END alias_expression: alias_operator: keyword: AS naked_identifier: xxx from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: A join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: B join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: B - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: A - dot: . - naked_identifier: id - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/000077500000000000000000000000001503426445100221175ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/.sqlfluff000066400000000000000000000000401503426445100237340ustar00rootroot00000000000000[sqlfluff] dialect = clickhouse sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/alter_table.sql000066400000000000000000000116371503426445100251260ustar00rootroot00000000000000-- DROP COLUMN examples ALTER TABLE x DROP COLUMN y; ALTER TABLE x DROP COLUMN IF EXISTS y; ALTER TABLE x ON CLUSTER '{cluster}' DROP COLUMN y; ALTER TABLE x ON CLUSTER '{cluster}' DROP COLUMN IF EXISTS y; ALTER TABLE visits DROP COLUMN browser; -- ADD COLUMN examples ALTER TABLE x ADD COLUMN y Int32; ALTER TABLE x ADD COLUMN y Int32 DEFAULT 1; ALTER TABLE x ADD COLUMN IF NOT EXISTS y Int32; ALTER TABLE x ADD COLUMN IF NOT EXISTS y Int32 DEFAULT 1; ALTER TABLE alter_test ADD COLUMN Added1 UInt32 FIRST; ALTER TABLE alter_test ADD COLUMN Added2 UInt32 AFTER NestedColumn; ALTER TABLE alter_test ADD COLUMN Added3 UInt32 AFTER ToDrop; ALTER TABLE x ON CLUSTER '{cluster}' ADD COLUMN y Int32; ALTER TABLE x ON CLUSTER '{cluster}' ADD COLUMN y Int32 DEFAULT 1; ALTER TABLE x ON CLUSTER '{cluster}' ADD COLUMN IF NOT EXISTS y Int32; ALTER TABLE x ON CLUSTER '{cluster}' ADD COLUMN IF NOT EXISTS y Int32 DEFAULT 1; -- ADD COLUMN with CODEC and AFTER/FIRST ALTER TABLE x ADD COLUMN z Int32 CODEC(ZSTD); ALTER TABLE x ADD COLUMN z Int32 AFTER y; ALTER TABLE x ADD COLUMN z Int32 FIRST; ALTER TABLE x ADD COLUMN z Int32 DEFAULT 0 CODEC(ZSTD) AFTER y; -- RENAME COLUMN examples ALTER TABLE x RENAME COLUMN old_name TO new_name; ALTER TABLE x RENAME COLUMN IF EXISTS old_name TO new_name; ALTER TABLE x ON CLUSTER '{cluster}' RENAME COLUMN old_name TO new_name; ALTER TABLE x ON CLUSTER '{cluster}' RENAME COLUMN IF EXISTS old_name TO new_name; ALTER TABLE visits RENAME COLUMN webBrowser TO browser; -- COMMENT COLUMN examples ALTER TABLE x COMMENT COLUMN col_name 'This is a comment'; ALTER TABLE x COMMENT COLUMN IF EXISTS col_name 'Column description'; ALTER TABLE x ON CLUSTER '{cluster}' COMMENT COLUMN col_name 'Cluster-wide comment'; ALTER TABLE x ON CLUSTER '{cluster}' COMMENT COLUMN IF EXISTS col_name 'Cluster-wide description'; ALTER TABLE visits COMMENT COLUMN browser 'This column shows the browser used for accessing the site.'; -- COMMENT TABLE examples ALTER TABLE x COMMENT 'This is a table comment'; ALTER TABLE x ON CLUSTER '{cluster}' COMMENT 'This is a table comment'; ALTER TABLE x MODIFY COMMENT ''; -- MODIFY COLUMN examples ALTER TABLE x MODIFY COLUMN col_name Int64; ALTER TABLE x MODIFY COLUMN IF EXISTS col_name Int64; ALTER TABLE x MODIFY COLUMN col_name Int64 DEFAULT 0; ALTER TABLE x MODIFY COLUMN col_name Int64 CODEC(ZSTD); ALTER TABLE x MODIFY COLUMN col_name Int64 AFTER other_col; ALTER TABLE x MODIFY COLUMN col_name Int64 FIRST; ALTER TABLE x ON CLUSTER '{cluster}' MODIFY COLUMN col_name Int64 DEFAULT 0 CODEC(ZSTD) AFTER other_col; ALTER TABLE visits MODIFY COLUMN browser Array(String); ALTER TABLE users MODIFY COLUMN c2 String FIRST; ALTER TABLE users MODIFY COLUMN c2 TYPE String AFTER c1; -- MATERIALIZE COLUMN examples ALTER TABLE x MATERIALIZE COLUMN col_name; ALTER TABLE x MATERIALIZE COLUMN col_name IN PARTITION partition_name; ALTER TABLE x MATERIALIZE COLUMN col_name IN PARTITION ID 'partition_id'; ALTER TABLE x ON CLUSTER '{cluster}' MATERIALIZE COLUMN col_name; ALTER TABLE x ON CLUSTER '{cluster}' MATERIALIZE COLUMN col_name IN PARTITION partition_name; ALTER TABLE tmp ADD COLUMN s String MATERIALIZED toString(x); ALTER TABLE tmp MODIFY COLUMN s String MATERIALIZED toString(round(100/x)); ALTER TABLE tmp MATERIALIZE COLUMN s; -- ALIAS examples ALTER TABLE x ADD ALIAS alias_name FOR col_name; ALTER TABLE x ADD ALIAS IF NOT EXISTS alias_name FOR col_name; ALTER TABLE x ON CLUSTER '{cluster}' MODIFY COLUMN y ALIAS z/10; ALTER TABLE x ON CLUSTER '{cluster}' ADD ALIAS alias_name FOR col_name; ALTER TABLE x ON CLUSTER '{cluster}' MODIFY COLUMN y REMOVE ALIAS; ALTER TABLE x ON CLUSTER '{cluster}' ADD COLUMN IF NOT EXISTS y Float32 ALIAS z*100; -- TTL examples ALTER TABLE table_with_ttl MODIFY COLUMN column_ttl REMOVE TTL; ALTER TABLE table_with_ttl REMOVE TTL; ALTER TABLE table_with_ttl ON CLUSTER '{cluster}' REMOVE TTL; ALTER TABLE table_name ON CLUSTER '{cluster}' MODIFY TTL event_time + INTERVAL 3 MONTH; -- ALIAS examples ALTER TABLE x ADD COLUMN y Int32 ALIAS z + 10; ALTER TABLE x ON CLUSTER '{cluster}' ADD COLUMN IF NOT EXISTS y Float32 ALIAS z * 100; ALTER TABLE x MODIFY COLUMN y ALIAS z/10; ALTER TABLE x ON CLUSTER '{cluster}' MODIFY COLUMN y REMOVE ALIAS; -- Basic ALTER TABLE MODIFY QUERY examples ALTER TABLE mv MODIFY QUERY SELECT * FROM source_table; ALTER TABLE mv MODIFY QUERY SELECT id, name, value FROM source_table WHERE value > 0; -- With ON CLUSTER clause ALTER TABLE mv ON CLUSTER cluster1 MODIFY QUERY SELECT * FROM source_table; ALTER TABLE mv ON CLUSTER '{cluster}' MODIFY QUERY SELECT id, name, value FROM source_table WHERE value > 0; -- With complex SELECT queries ALTER TABLE mv MODIFY QUERY SELECT id, name, sum(value) AS total_value FROM source_table GROUP BY id, name HAVING total_value > 100 ORDER BY total_value DESC LIMIT 10; ALTER TABLE mv MODIFY QUERY SELECT t1.id, t1.name, t2.value FROM table1 AS t1 JOIN table2 AS t2 ON t1.id = t2.id WHERE t1.active = 1; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/alter_table.yml000066400000000000000000001001361503426445100251210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 218a737e4d9075599ddf01a2260e7aad078e4d6472e4276ad71297c4cd741e20 file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - keyword: DROP - keyword: COLUMN - naked_identifier: y - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - keyword: DROP - keyword: COLUMN - keyword: IF - keyword: EXISTS - naked_identifier: y - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - quoted_identifier: "'{cluster}'" - keyword: DROP - keyword: COLUMN - naked_identifier: y - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - quoted_identifier: "'{cluster}'" - keyword: DROP - keyword: COLUMN - keyword: IF - keyword: EXISTS - naked_identifier: y - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: visits - keyword: DROP - keyword: COLUMN - naked_identifier: browser - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - keyword: ADD - keyword: COLUMN - naked_identifier: y - data_type: data_type_identifier: Int32 - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - keyword: ADD - keyword: COLUMN - naked_identifier: y - data_type: data_type_identifier: Int32 - keyword: DEFAULT - expression: numeric_literal: '1' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - keyword: ADD - keyword: COLUMN - keyword: IF - keyword: NOT - keyword: EXISTS - naked_identifier: y - data_type: data_type_identifier: Int32 - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - keyword: ADD - keyword: COLUMN - keyword: IF - keyword: NOT - keyword: EXISTS - naked_identifier: y - data_type: data_type_identifier: Int32 - keyword: DEFAULT - expression: numeric_literal: '1' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: alter_test - keyword: ADD - keyword: COLUMN - naked_identifier: Added1 - data_type: data_type_identifier: UInt32 - keyword: FIRST - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: alter_test - keyword: ADD - keyword: COLUMN - naked_identifier: Added2 - data_type: data_type_identifier: UInt32 - keyword: AFTER - naked_identifier: NestedColumn - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: alter_test - keyword: ADD - keyword: COLUMN - naked_identifier: Added3 - data_type: data_type_identifier: UInt32 - keyword: AFTER - naked_identifier: ToDrop - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - quoted_identifier: "'{cluster}'" - keyword: ADD - keyword: COLUMN - naked_identifier: y - data_type: data_type_identifier: Int32 - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - quoted_identifier: "'{cluster}'" - keyword: ADD - keyword: COLUMN - naked_identifier: y - data_type: data_type_identifier: Int32 - keyword: DEFAULT - expression: numeric_literal: '1' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - quoted_identifier: "'{cluster}'" - keyword: ADD - keyword: COLUMN - keyword: IF - keyword: NOT - keyword: EXISTS - naked_identifier: y - data_type: data_type_identifier: Int32 - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - quoted_identifier: "'{cluster}'" - keyword: ADD - keyword: COLUMN - keyword: IF - keyword: NOT - keyword: EXISTS - naked_identifier: y - data_type: data_type_identifier: Int32 - keyword: DEFAULT - expression: numeric_literal: '1' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - keyword: ADD - keyword: COLUMN - naked_identifier: z - data_type: data_type_identifier: Int32 - keyword: CODEC - bracketed: start_bracket: ( naked_identifier: ZSTD end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - keyword: ADD - keyword: COLUMN - naked_identifier: z - data_type: data_type_identifier: Int32 - keyword: AFTER - naked_identifier: y - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - keyword: ADD - keyword: COLUMN - naked_identifier: z - data_type: data_type_identifier: Int32 - keyword: FIRST - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - keyword: ADD - keyword: COLUMN - naked_identifier: z - data_type: data_type_identifier: Int32 - keyword: DEFAULT - expression: numeric_literal: '0' - keyword: CODEC - bracketed: start_bracket: ( naked_identifier: ZSTD end_bracket: ) - keyword: AFTER - naked_identifier: y - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - keyword: RENAME - keyword: COLUMN - naked_identifier: old_name - keyword: TO - naked_identifier: new_name - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - keyword: RENAME - keyword: COLUMN - keyword: IF - keyword: EXISTS - naked_identifier: old_name - keyword: TO - naked_identifier: new_name - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - quoted_identifier: "'{cluster}'" - keyword: RENAME - keyword: COLUMN - naked_identifier: old_name - keyword: TO - naked_identifier: new_name - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - quoted_identifier: "'{cluster}'" - keyword: RENAME - keyword: COLUMN - keyword: IF - keyword: EXISTS - naked_identifier: old_name - keyword: TO - naked_identifier: new_name - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: visits - keyword: RENAME - keyword: COLUMN - naked_identifier: webBrowser - keyword: TO - naked_identifier: browser - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - keyword: COMMENT - keyword: COLUMN - naked_identifier: col_name - quoted_literal: "'This is a comment'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - keyword: COMMENT - keyword: COLUMN - keyword: IF - keyword: EXISTS - naked_identifier: col_name - quoted_literal: "'Column description'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - quoted_identifier: "'{cluster}'" - keyword: COMMENT - keyword: COLUMN - naked_identifier: col_name - quoted_literal: "'Cluster-wide comment'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - quoted_identifier: "'{cluster}'" - keyword: COMMENT - keyword: COLUMN - keyword: IF - keyword: EXISTS - naked_identifier: col_name - quoted_literal: "'Cluster-wide description'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: visits - keyword: COMMENT - keyword: COLUMN - naked_identifier: browser - quoted_literal: "'This column shows the browser used for accessing the site.'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - keyword: COMMENT - quoted_literal: "'This is a table comment'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - quoted_identifier: "'{cluster}'" - keyword: COMMENT - quoted_literal: "'This is a table comment'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - keyword: MODIFY - keyword: COMMENT - quoted_literal: "''" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - keyword: MODIFY - keyword: COLUMN - naked_identifier: col_name - data_type: data_type_identifier: Int64 - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - keyword: MODIFY - keyword: COLUMN - keyword: IF - keyword: EXISTS - naked_identifier: col_name - data_type: data_type_identifier: Int64 - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - keyword: MODIFY - keyword: COLUMN - naked_identifier: col_name - data_type: data_type_identifier: Int64 - keyword: DEFAULT - expression: numeric_literal: '0' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - keyword: MODIFY - keyword: COLUMN - naked_identifier: col_name - data_type: data_type_identifier: Int64 - keyword: CODEC - bracketed: start_bracket: ( naked_identifier: ZSTD end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - keyword: MODIFY - keyword: COLUMN - naked_identifier: col_name - data_type: data_type_identifier: Int64 - keyword: AFTER - naked_identifier: other_col - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - keyword: MODIFY - keyword: COLUMN - naked_identifier: col_name - data_type: data_type_identifier: Int64 - keyword: FIRST - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - quoted_identifier: "'{cluster}'" - keyword: MODIFY - keyword: COLUMN - naked_identifier: col_name - data_type: data_type_identifier: Int64 - keyword: DEFAULT - expression: numeric_literal: '0' - keyword: CODEC - bracketed: start_bracket: ( naked_identifier: ZSTD end_bracket: ) - keyword: AFTER - naked_identifier: other_col - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: visits - keyword: MODIFY - keyword: COLUMN - naked_identifier: browser - data_type: data_type_identifier: Array bracketed: start_bracket: ( data_type: data_type_identifier: String end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: users - keyword: MODIFY - keyword: COLUMN - naked_identifier: c2 - data_type: data_type_identifier: String - keyword: FIRST - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: users - keyword: MODIFY - keyword: COLUMN - naked_identifier: c2 - keyword: TYPE - data_type: data_type_identifier: String - keyword: AFTER - naked_identifier: c1 - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - keyword: MATERIALIZE - keyword: COLUMN - naked_identifier: col_name - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - keyword: MATERIALIZE - keyword: COLUMN - naked_identifier: col_name - keyword: IN - keyword: PARTITION - naked_identifier: partition_name - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - keyword: MATERIALIZE - keyword: COLUMN - naked_identifier: col_name - keyword: IN - keyword: PARTITION - keyword: ID - quoted_literal: "'partition_id'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - quoted_identifier: "'{cluster}'" - keyword: MATERIALIZE - keyword: COLUMN - naked_identifier: col_name - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - quoted_identifier: "'{cluster}'" - keyword: MATERIALIZE - keyword: COLUMN - naked_identifier: col_name - keyword: IN - keyword: PARTITION - naked_identifier: partition_name - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: tmp - keyword: ADD - keyword: COLUMN - naked_identifier: s - data_type: data_type_identifier: String - keyword: MATERIALIZED - expression: function: function_name: function_name_identifier: toString function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: tmp - keyword: MODIFY - keyword: COLUMN - naked_identifier: s - data_type: data_type_identifier: String - keyword: MATERIALIZED - expression: function: function_name: function_name_identifier: toString function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: round function_contents: bracketed: start_bracket: ( expression: numeric_literal: '100' binary_operator: / column_reference: naked_identifier: x end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: tmp - keyword: MATERIALIZE - keyword: COLUMN - naked_identifier: s - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - keyword: ADD - keyword: ALIAS - naked_identifier: alias_name - keyword: FOR - naked_identifier: col_name - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - keyword: ADD - keyword: ALIAS - keyword: IF - keyword: NOT - keyword: EXISTS - naked_identifier: alias_name - keyword: FOR - naked_identifier: col_name - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - quoted_identifier: "'{cluster}'" - keyword: MODIFY - keyword: COLUMN - naked_identifier: y - keyword: ALIAS - expression: column_reference: naked_identifier: z binary_operator: / numeric_literal: '10' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - quoted_identifier: "'{cluster}'" - keyword: ADD - keyword: ALIAS - naked_identifier: alias_name - keyword: FOR - naked_identifier: col_name - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - quoted_identifier: "'{cluster}'" - keyword: MODIFY - keyword: COLUMN - naked_identifier: y - keyword: REMOVE - keyword: ALIAS - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - quoted_identifier: "'{cluster}'" - keyword: ADD - keyword: COLUMN - keyword: IF - keyword: NOT - keyword: EXISTS - naked_identifier: y - data_type: data_type_identifier: Float32 - keyword: ALIAS - expression: column_reference: naked_identifier: z binary_operator: '*' numeric_literal: '100' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_with_ttl - keyword: MODIFY - keyword: COLUMN - naked_identifier: column_ttl - keyword: REMOVE - keyword: TTL - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_with_ttl - keyword: REMOVE - keyword: TTL - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_with_ttl - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - quoted_identifier: "'{cluster}'" - keyword: REMOVE - keyword: TTL - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - quoted_identifier: "'{cluster}'" - keyword: MODIFY - keyword: TTL - expression: column_reference: naked_identifier: event_time binary_operator: + interval_expression: keyword: INTERVAL numeric_literal: '3' date_part: MONTH - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - keyword: ADD - keyword: COLUMN - naked_identifier: y - data_type: data_type_identifier: Int32 - keyword: ALIAS - expression: column_reference: naked_identifier: z binary_operator: + numeric_literal: '10' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - quoted_identifier: "'{cluster}'" - keyword: ADD - keyword: COLUMN - keyword: IF - keyword: NOT - keyword: EXISTS - naked_identifier: y - data_type: data_type_identifier: Float32 - keyword: ALIAS - expression: column_reference: naked_identifier: z binary_operator: '*' numeric_literal: '100' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - keyword: MODIFY - keyword: COLUMN - naked_identifier: y - keyword: ALIAS - expression: column_reference: naked_identifier: z binary_operator: / numeric_literal: '10' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - quoted_identifier: "'{cluster}'" - keyword: MODIFY - keyword: COLUMN - naked_identifier: y - keyword: REMOVE - keyword: ALIAS - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: mv - keyword: MODIFY - keyword: QUERY - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source_table - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: mv - keyword: MODIFY - keyword: QUERY - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: value from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source_table where_clause: keyword: WHERE expression: column_reference: naked_identifier: value comparison_operator: raw_comparison_operator: '>' numeric_literal: '0' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: mv - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster1 - keyword: MODIFY - keyword: QUERY - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source_table - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: mv - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - quoted_identifier: "'{cluster}'" - keyword: MODIFY - keyword: QUERY - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: value from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source_table where_clause: keyword: WHERE expression: column_reference: naked_identifier: value comparison_operator: raw_comparison_operator: '>' numeric_literal: '0' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: mv - keyword: MODIFY - keyword: QUERY - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: value end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: total_value from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source_table groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: id - comma: ',' - column_reference: naked_identifier: name having_clause: keyword: HAVING expression: column_reference: naked_identifier: total_value comparison_operator: raw_comparison_operator: '>' numeric_literal: '100' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: total_value - keyword: DESC limit_clause: keyword: LIMIT limit_clause_component: numeric_literal: '10' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: mv - keyword: MODIFY - keyword: QUERY - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: value from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 alias_expression: alias_operator: keyword: AS naked_identifier: t1 join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: table2 alias_expression: alias_operator: keyword: AS naked_identifier: t2 join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id where_clause: keyword: WHERE expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: active comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/back_quoted_identifier.sql000066400000000000000000000001711503426445100273220ustar00rootroot00000000000000SELECT * FROM `foo`.`bar`; SELECT bar AS `baz` FROM foo; SELECT * FROM foo.bar `baz`; SELECT * FROM foo.bar AS `baz`; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/back_quoted_identifier.yml000066400000000000000000000050261503426445100273300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 11786835d116c03996f92e69629f19d7be318c8959676e61a5c1a913f6a63062 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: bar alias_expression: alias_operator: keyword: AS quoted_identifier: '`baz`' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: foo - dot: . - naked_identifier: bar alias_expression: quoted_identifier: '`baz`' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: foo - dot: . - naked_identifier: bar alias_expression: alias_operator: keyword: AS quoted_identifier: '`baz`' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/complex_table_definition.sql000066400000000000000000000057271503426445100277010ustar00rootroot00000000000000 CREATE TABLE db_name.table_name ( -- Basic types `id` UInt64, `timestamp` DateTime64(3, 'UTC') CODEC(Delta(8), LZ4), `value_raw` Float32, -- LowCardinality type `category` LowCardinality(String), -- Enum type `status` Enum8('ACTIVE' = 1, 'INACTIVE' = 2, 'PENDING' = 3), -- Nullable type `description` Nullable(String), -- Array type `tags` Array(String), -- ALIAS column `value_calculated` Float32 ALIAS value_raw / (3600 / 30), `flag_active` Int8 ALIAS if(status = 'ACTIVE', 1, 0), `value_with_dict` Float32 ALIAS value_raw * dictGetOrDefault('dictionary.lookup', 'key', (category, 'CATEGORY'), toDateTime(timestamp), 0.), -- MATERIALIZED column `description_is_null` UInt8 MATERIALIZED description IS NULL, -- Tuple types `coordinates` Tuple(Float64, Float64), `named_point` Tuple(x Float64, y Float64, z Float64), -- Map type `properties` Map(String, String), -- JSON type `json_data` JSON, -- Nested type `nested_data` Nested( key String, value Float64, timestamp DateTime ) ) ENGINE = MergeTree() ORDER BY id SETTINGS index_granularity = 8192; -- ALTER TABLE examples with various column types and options ALTER TABLE db_name.table_name ADD COLUMN `new_column` Float32 CODEC(Delta, LZ4); ALTER TABLE db_name.table_name ADD COLUMN `new_alias_column` Float32 ALIAS value_raw * 2; ALTER TABLE db_name.table_name ADD COLUMN `new_materialized_column` Float32 MATERIALIZED value_raw * 3; ALTER TABLE db_name.table_name ADD COLUMN `new_default_column` Float32 DEFAULT 100; ALTER TABLE db_name.table_name ADD COLUMN `new_enum_column` Enum8('VALUE1' = 1, 'VALUE2' = 2, 'VALUE3' = 3); ALTER TABLE db_name.table_name ADD COLUMN `new_lowcard_column` LowCardinality(String) DEFAULT 'DEFAULT_VALUE'; ALTER TABLE db_name.table_name ADD COLUMN `new_datetime_column` DateTime64(3, 'UTC') CODEC(Delta(8), LZ4); ALTER TABLE db_name.table_name ADD COLUMN `new_nullable_column` Nullable(Float32); ALTER TABLE db_name.table_name ADD COLUMN `new_json_column` JSON; -- Modify column examples ALTER TABLE db_name.table_name MODIFY COLUMN `value_raw` Float64 CODEC(Delta, LZ4); ALTER TABLE db_name.table_name MODIFY COLUMN `value_calculated` Float64 ALIAS value_raw / (3600 / 30); ALTER TABLE db_name.table_name MODIFY COLUMN `flag_active` Int8 MATERIALIZED if(status = 'ACTIVE', 1, 0); ALTER TABLE db_name.table_name MODIFY COLUMN `category` LowCardinality(String) DEFAULT 'UNKNOWN_CATEGORY'; -- Remove alias example ALTER TABLE db_name.table_name MODIFY COLUMN `value_with_dict` REMOVE ALIAS; -- Drop column example ALTER TABLE db_name.table_name DROP COLUMN `new_column`; -- Rename column example ALTER TABLE db_name.table_name RENAME COLUMN `new_column` TO `new_column_renamed`; -- Add alias from dictionary ALTER TABLE db_name.table_name ADD COLUMN `complex_alias` Float32 ALIAS value_raw * dictGetOrDefault('dictionary.lookup', 'price', (category, 'RESOURCE_TYPE'), toDateTime(timestamp), 0.); sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/complex_table_definition.yml000066400000000000000000000477271503426445100277110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d66dde416de3038b16ea8ee4cae8cf1048d54ab69cf64535d3fce588a22c9170 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: db_name - dot: . - naked_identifier: table_name - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '`id`' data_type: data_type_identifier: UInt64 - comma: ',' - column_definition: quoted_identifier: '`timestamp`' data_type: data_type_identifier: DateTime64 bracketed: start_bracket: ( numeric_literal: '3' comma: ',' quoted_literal: "'UTC'" end_bracket: ) keyword: CODEC bracketed: start_bracket: ( function: function_name: function_name_identifier: Delta function_contents: bracketed: start_bracket: ( expression: numeric_literal: '8' end_bracket: ) comma: ',' naked_identifier: LZ4 end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '`value_raw`' data_type: data_type_identifier: Float32 - comma: ',' - column_definition: quoted_identifier: '`category`' data_type: data_type_identifier: LowCardinality bracketed: start_bracket: ( data_type: data_type_identifier: String end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '`status`' data_type: data_type_identifier: Enum8 bracketed: - start_bracket: ( - quoted_literal: "'ACTIVE'" - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - comma: ',' - quoted_literal: "'INACTIVE'" - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '2' - comma: ',' - quoted_literal: "'PENDING'" - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '3' - end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '`description`' data_type: data_type_identifier: Nullable bracketed: start_bracket: ( data_type: data_type_identifier: String end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '`tags`' data_type: data_type_identifier: Array bracketed: start_bracket: ( data_type: data_type_identifier: String end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '`value_calculated`' data_type: data_type_identifier: Float32 keyword: ALIAS expression: column_reference: naked_identifier: value_raw binary_operator: / bracketed: start_bracket: ( expression: - numeric_literal: '3600' - binary_operator: / - numeric_literal: '30' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '`flag_active`' data_type: data_type_identifier: Int8 keyword: ALIAS expression: function: function_name: function_name_identifier: if function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: status comparison_operator: raw_comparison_operator: '=' quoted_literal: "'ACTIVE'" - comma: ',' - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '0' - end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '`value_with_dict`' data_type: data_type_identifier: Float32 keyword: ALIAS expression: column_reference: naked_identifier: value_raw binary_operator: '*' function: function_name: function_name_identifier: dictGetOrDefault function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'dictionary.lookup'" - comma: ',' - expression: quoted_literal: "'key'" - comma: ',' - expression: bracketed: start_bracket: ( column_reference: naked_identifier: category comma: ',' quoted_literal: "'CATEGORY'" end_bracket: ) - comma: ',' - expression: function: function_name: function_name_identifier: toDateTime function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: timestamp end_bracket: ) - comma: ',' - expression: numeric_literal: '0.' - end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '`description_is_null`' data_type: data_type_identifier: UInt8 keyword: MATERIALIZED expression: column_reference: naked_identifier: description keyword: IS null_literal: 'NULL' - comma: ',' - column_definition: quoted_identifier: '`coordinates`' data_type: data_type_identifier: Tuple bracketed: - start_bracket: ( - data_type: data_type_identifier: Float64 - comma: ',' - data_type: data_type_identifier: Float64 - end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '`named_point`' data_type: data_type_identifier: Tuple bracketed: - start_bracket: ( - naked_identifier: x - data_type: data_type_identifier: Float64 - comma: ',' - naked_identifier: y - data_type: data_type_identifier: Float64 - comma: ',' - naked_identifier: z - data_type: data_type_identifier: Float64 - end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '`properties`' data_type: data_type_identifier: Map bracketed: - start_bracket: ( - data_type: data_type_identifier: String - comma: ',' - data_type: data_type_identifier: String - end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '`json_data`' data_type: data_type_identifier: JSON - comma: ',' - column_definition: quoted_identifier: '`nested_data`' data_type: data_type_identifier: Nested bracketed: - start_bracket: ( - naked_identifier: key - data_type: data_type_identifier: String - comma: ',' - naked_identifier: value - data_type: data_type_identifier: Float64 - comma: ',' - naked_identifier: timestamp - data_type: data_type_identifier: DateTime - end_bracket: ) - end_bracket: ) - engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' table_engine_function: function_name: function_name_identifier: MergeTree function_contents: bracketed: start_bracket: ( end_bracket: ) merge_tree_order_by_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: id settings_clause: keyword: SETTINGS naked_identifier: index_granularity comparison_operator: raw_comparison_operator: '=' numeric_literal: '8192' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: db_name - dot: . - naked_identifier: table_name - keyword: ADD - keyword: COLUMN - quoted_identifier: '`new_column`' - data_type: data_type_identifier: Float32 - keyword: CODEC - bracketed: - start_bracket: ( - naked_identifier: Delta - comma: ',' - naked_identifier: LZ4 - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: db_name - dot: . - naked_identifier: table_name - keyword: ADD - keyword: COLUMN - quoted_identifier: '`new_alias_column`' - data_type: data_type_identifier: Float32 - keyword: ALIAS - expression: column_reference: naked_identifier: value_raw binary_operator: '*' numeric_literal: '2' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: db_name - dot: . - naked_identifier: table_name - keyword: ADD - keyword: COLUMN - quoted_identifier: '`new_materialized_column`' - data_type: data_type_identifier: Float32 - keyword: MATERIALIZED - expression: column_reference: naked_identifier: value_raw binary_operator: '*' numeric_literal: '3' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: db_name - dot: . - naked_identifier: table_name - keyword: ADD - keyword: COLUMN - quoted_identifier: '`new_default_column`' - data_type: data_type_identifier: Float32 - keyword: DEFAULT - expression: numeric_literal: '100' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: db_name - dot: . - naked_identifier: table_name - keyword: ADD - keyword: COLUMN - quoted_identifier: '`new_enum_column`' - data_type: data_type_identifier: Enum8 bracketed: - start_bracket: ( - quoted_literal: "'VALUE1'" - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - comma: ',' - quoted_literal: "'VALUE2'" - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '2' - comma: ',' - quoted_literal: "'VALUE3'" - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '3' - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: db_name - dot: . - naked_identifier: table_name - keyword: ADD - keyword: COLUMN - quoted_identifier: '`new_lowcard_column`' - data_type: data_type_identifier: LowCardinality bracketed: start_bracket: ( data_type: data_type_identifier: String end_bracket: ) - keyword: DEFAULT - expression: quoted_literal: "'DEFAULT_VALUE'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: db_name - dot: . - naked_identifier: table_name - keyword: ADD - keyword: COLUMN - quoted_identifier: '`new_datetime_column`' - data_type: data_type_identifier: DateTime64 bracketed: start_bracket: ( numeric_literal: '3' comma: ',' quoted_literal: "'UTC'" end_bracket: ) - keyword: CODEC - bracketed: start_bracket: ( function: function_name: function_name_identifier: Delta function_contents: bracketed: start_bracket: ( expression: numeric_literal: '8' end_bracket: ) comma: ',' naked_identifier: LZ4 end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: db_name - dot: . - naked_identifier: table_name - keyword: ADD - keyword: COLUMN - quoted_identifier: '`new_nullable_column`' - data_type: data_type_identifier: Nullable bracketed: start_bracket: ( data_type: data_type_identifier: Float32 end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: db_name - dot: . - naked_identifier: table_name - keyword: ADD - keyword: COLUMN - quoted_identifier: '`new_json_column`' - data_type: data_type_identifier: JSON - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: db_name - dot: . - naked_identifier: table_name - keyword: MODIFY - keyword: COLUMN - quoted_identifier: '`value_raw`' - data_type: data_type_identifier: Float64 - keyword: CODEC - bracketed: - start_bracket: ( - naked_identifier: Delta - comma: ',' - naked_identifier: LZ4 - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: db_name - dot: . - naked_identifier: table_name - keyword: MODIFY - keyword: COLUMN - quoted_identifier: '`value_calculated`' - data_type: data_type_identifier: Float64 - keyword: ALIAS - expression: column_reference: naked_identifier: value_raw binary_operator: / bracketed: start_bracket: ( expression: - numeric_literal: '3600' - binary_operator: / - numeric_literal: '30' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: db_name - dot: . - naked_identifier: table_name - keyword: MODIFY - keyword: COLUMN - quoted_identifier: '`flag_active`' - data_type: data_type_identifier: Int8 - keyword: MATERIALIZED - expression: function: function_name: function_name_identifier: if function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: status comparison_operator: raw_comparison_operator: '=' quoted_literal: "'ACTIVE'" - comma: ',' - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '0' - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: db_name - dot: . - naked_identifier: table_name - keyword: MODIFY - keyword: COLUMN - quoted_identifier: '`category`' - data_type: data_type_identifier: LowCardinality bracketed: start_bracket: ( data_type: data_type_identifier: String end_bracket: ) - keyword: DEFAULT - expression: quoted_literal: "'UNKNOWN_CATEGORY'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: db_name - dot: . - naked_identifier: table_name - keyword: MODIFY - keyword: COLUMN - quoted_identifier: '`value_with_dict`' - keyword: REMOVE - keyword: ALIAS - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: db_name - dot: . - naked_identifier: table_name - keyword: DROP - keyword: COLUMN - quoted_identifier: '`new_column`' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: db_name - dot: . - naked_identifier: table_name - keyword: RENAME - keyword: COLUMN - quoted_identifier: '`new_column`' - keyword: TO - quoted_identifier: '`new_column_renamed`' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: db_name - dot: . - naked_identifier: table_name - keyword: ADD - keyword: COLUMN - quoted_identifier: '`complex_alias`' - data_type: data_type_identifier: Float32 - keyword: ALIAS - expression: column_reference: naked_identifier: value_raw binary_operator: '*' function: function_name: function_name_identifier: dictGetOrDefault function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'dictionary.lookup'" - comma: ',' - expression: quoted_literal: "'price'" - comma: ',' - expression: bracketed: start_bracket: ( column_reference: naked_identifier: category comma: ',' quoted_literal: "'RESOURCE_TYPE'" end_bracket: ) - comma: ',' - expression: function: function_name: function_name_identifier: toDateTime function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: timestamp end_bracket: ) - comma: ',' - expression: numeric_literal: '0.' - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/create_database.sql000066400000000000000000000211261503426445100257310ustar00rootroot00000000000000CREATE DATABASE db_name; CREATE DATABASE _123; CREATE DATABASE db_name COMMENT 'SingleQuoted'; CREATE DATABASE db_name COMMENT "DoubleQuoted"; CREATE DATABASE db_name COMMENT 'SingleQuoted three words'; CREATE DATABASE db_name COMMENT "DoubleQuoted three words"; CREATE DATABASE db_name COMMENT 'Weird characters: !@#$%^&*()_+{}|:"<>?'; CREATE DATABASE db_name ON CLUSTER cluster; CREATE DATABASE db_name ON CLUSTER _123; CREATE DATABASE db_name ON CLUSTER "cluster"; CREATE DATABASE db_name ON CLUSTER "underscore_cluster"; CREATE DATABASE db_name ON CLUSTER 'cluster'; CREATE DATABASE db_name ON CLUSTER 'underscore_cluster'; CREATE DATABASE db_name ENGINE = Lazy() COMMENT 'Comment'; CREATE DATABASE db_comment ENGINE = Lazy() COMMENT 'The temporary database'; SELECT name, comment FROM system.databases WHERE name = 'db_comment'; -- https://clickhouse.com/docs/en/engines/database-engines/atomic CREATE DATABASE test; CREATE DATABASE test ENGINE = Atomic; -- https://clickhouse.com/docs/en/engines/database-engines/lazy CREATE DATABASE testlazy; CREATE DATABASE testlazy ENGINE = Lazy(expiration_time_in_seconds); -- https://clickhouse.com/docs/en/engines/database-engines/replicated CREATE DATABASE testdb; CREATE DATABASE testdb ENGINE = Replicated('zoo_path', 'shard_name', 'replica_name'); CREATE DATABASE testdb ENGINE = Replicated('zoo_path', 'shard_name', 'replica_name') SETTINGS key1 = value1; CREATE DATABASE testdb ENGINE = Replicated('zoo_path', 'shard_name', 'replica_name') SETTINGS key1 = 1, key2 = 2; CREATE DATABASE r ENGINE=Replicated('some/path/r','shard1','replica1'); CREATE DATABASE r ENGINE=Replicated('some/path/r','shard1','other_replica'); CREATE DATABASE r ENGINE=Replicated('some/path/r','other_shard','{replica}'); CREATE DATABASE r ENGINE=Replicated('some/path/r','other_shard','r2'); -- https://clickhouse.com/docs/en/engines/database-engines/postgresql CREATE DATABASE test_database ENGINE = PostgreSQL('postgres1:5432', 'test_database', 'postgres'); CREATE DATABASE test_database ENGINE = PostgreSQL('postgres1:5432', 'test_database', 'postgres', 'mysecretpassword'); CREATE DATABASE test_database ENGINE = PostgreSQL('postgres1:5432', 'test_database', 'postgres', 'mysecretpassword', 'schema_name'); CREATE DATABASE test_database ENGINE = PostgreSQL('postgres1:5432', 'test_database', 'postgres', 'mysecretpassword', 'schema_name', 1); -- https://clickhouse.com/docs/en/engines/database-engines/mysql CREATE DATABASE IF NOT EXISTS mysql_db ENGINE = MySQL('localhost:3306', 'test', 'my_user', 'user_password'); CREATE DATABASE mysql_db ON CLUSTER cluster ENGINE = MySQL('localhost:3306', 'test', 'my_user', 'user_password'); CREATE DATABASE mysql_db ENGINE = MySQL('localhost:3306', 'my_user', 'user_password'); CREATE DATABASE mysql_db ENGINE = MySQL('localhost:3306', test, 'my_user', 'user_password'); CREATE DATABASE mysql_db ENGINE = MySQL('localhost:3306', 'test', 'my_user', 'user_password'); CREATE DATABASE mysql_db ENGINE = MySQL('localhost:3306', 'test', 'my_user', 'user_password') SETTINGS read_write_timeout=10000; CREATE DATABASE mysql_db ENGINE = MySQL('localhost:3306', 'test', 'my_user', 'user_password') SETTINGS read_write_timeout=10000, connect_timeout=100; -- https://clickhouse.com/docs/en/engines/database-engines/sqlite CREATE DATABASE sqlite_db ENGINE = SQLite('sqlite.db'); -- https://clickhouse.com/docs/en/engines/database-engines/materialized-postgresql CREATE DATABASE postgres_db ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password'); CREATE DATABASE postgres_database ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password') SETTINGS materialized_postgresql_schema = 'postgres_schema'; CREATE DATABASE database1 ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password') SETTINGS materialized_postgresql_tables_list = 'schema1.table1,schema2.table2,schema1.table3', materialized_postgresql_tables_list_with_schema = 1; CREATE DATABASE database1 ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password') SETTINGS materialized_postgresql_schema_list = 'schema1,schema2,schema3'; CREATE DATABASE database1 ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password') SETTINGS materialized_postgresql_tables_list = 'table1,table2,table3'; CREATE DATABASE demodb ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password') SETTINGS materialized_postgresql_replication_slot = 'clickhouse_sync', materialized_postgresql_snapshot = '0000000A-0000023F-3', materialized_postgresql_tables_list = 'table1,table2,table3'; CREATE DATABASE IF NOT EXISTS db_name ENGINE = MaterializedPostgreSQL('host:port', 'database', 'user', 'password'); CREATE DATABASE db_name ON CLUSTER cluster ENGINE = MaterializedPostgreSQL('host:port', 'database', 'user', 'password'); CREATE DATABASE IF NOT EXISTS db_name ON CLUSTER cluster ENGINE = MaterializedPostgreSQL('host:port', 'database', 'user', 'password'); CREATE DATABASE IF NOT EXISTS db_name ENGINE = MaterializedPostgreSQL('host:port', 'database', 'user', 'password') SETTINGS materialized_postgresql_schema = 'postgres_schema'; CREATE DATABASE db_name ON CLUSTER cluster ENGINE = MaterializedPostgreSQL('host:port', 'database', 'user', 'password') SETTINGS materialized_postgresql_schema = 'postgres_schema'; CREATE DATABASE IF NOT EXISTS db_name ON CLUSTER cluster ENGINE = MaterializedPostgreSQL('host:port', 'database', 'user', 'password') SETTINGS materialized_postgresql_schema = 'postgres_schema'; -- https://clickhouse.com/docs/en/engines/database-engines/materialized-mysql CREATE DATABASE IF NOT EXISTS db_name ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***') TABLE OVERRIDE table1 (id UInt32, name String) TABLE OVERRIDE; CREATE DATABASE IF NOT EXISTS db_name ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***') TABLE OVERRIDE table1 (id UInt32, name String) TABLE OVERRIDE table2 (id UInt32, name String); CREATE DATABASE IF NOT EXISTS db_name ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***'); CREATE DATABASE IF NOT EXISTS db_name ON CLUSTER cluster ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***'); CREATE DATABASE IF NOT EXISTS db_name ON CLUSTER cluster ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***') TABLE OVERRIDE table1 (id UInt32, name String) TABLE OVERRIDE table2 (id UInt32, name String); CREATE DATABASE IF NOT EXISTS db_name ON CLUSTER cluster ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***') SETTINGS materialized_mysql_database_engine = 'InnoDB'; CREATE DATABASE IF NOT EXISTS db_name ON CLUSTER cluster ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***') SETTINGS materialized_mysql_database_engine = 'InnoDB' TABLE OVERRIDE table1 (id UInt32, name String) TABLE OVERRIDE table2 (id UInt32, name String); CREATE DATABASE db_name ON CLUSTER cluster ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***'); CREATE DATABASE db_name ON CLUSTER cluster ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***') TABLE OVERRIDE table1 (id UInt32, name String) TABLE OVERRIDE table2 (id UInt32, name String); CREATE DATABASE db_name ON CLUSTER cluster ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***') SETTINGS materialized_mysql_database_engine = 'InnoDB'; CREATE DATABASE db_name ON CLUSTER cluster ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***') SETTINGS materialized_mysql_database_engine = 'InnoDB' TABLE OVERRIDE table1 (id UInt32, name String) TABLE OVERRIDE table2 (id UInt32, name String); CREATE DATABASE db_name ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***') SETTINGS materialized_mysql_database_engine = 'InnoDB'; CREATE DATABASE db_name ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***') SETTINGS materialized_mysql_database_engine = 'InnoDB' TABLE OVERRIDE table1 (id UInt32, name String) TABLE OVERRIDE table2 (id UInt32, name String); CREATE DATABASE db_name ON CLUSTER cluster ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***') SETTINGS materialized_mysql_database_engine = 'InnoDB'; CREATE DATABASE db_name ON CLUSTER cluster ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***') SETTINGS materialized_mysql_database_engine = 'InnoDB' TABLE OVERRIDE table1 (id UInt32, name String) TABLE OVERRIDE table2 (id UInt32, name String); CREATE DATABASE mysql ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***') SETTINGS allows_query_when_mysql_lost=true, max_wait_time_when_mysql_unavailable=10000; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/create_database.yml000066400000000000000000001643231503426445100257420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c7b644f583f544519fa5672020a95c4c7c420154354e07e2a0d4203835fd11c0 file: - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: _123 - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - keyword: COMMENT - quoted_identifier: "'SingleQuoted'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - keyword: COMMENT - quoted_identifier: '"DoubleQuoted"' - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - keyword: COMMENT - quoted_identifier: "'SingleQuoted three words'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - keyword: COMMENT - quoted_identifier: '"DoubleQuoted three words"' - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - keyword: COMMENT - quoted_identifier: "'Weird characters: !@#$%^&*()_+{}|:\"<>?'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: _123 - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - quoted_identifier: '"cluster"' - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - quoted_identifier: '"underscore_cluster"' - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - quoted_identifier: "'cluster'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - quoted_identifier: "'underscore_cluster'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: Lazy function_contents: bracketed: start_bracket: ( end_bracket: ) - keyword: COMMENT - quoted_identifier: "'Comment'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_comment - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: Lazy function_contents: bracketed: start_bracket: ( end_bracket: ) - keyword: COMMENT - quoted_identifier: "'The temporary database'" - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: comment from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: system - dot: . - naked_identifier: databases where_clause: keyword: WHERE expression: column_reference: naked_identifier: name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'db_comment'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: test - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: test - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: Atomic - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: testlazy - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: testlazy - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: Lazy function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: expiration_time_in_seconds end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: testdb - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: testdb - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: Replicated function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'zoo_path'" - comma: ',' - expression: quoted_literal: "'shard_name'" - comma: ',' - expression: quoted_literal: "'replica_name'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: testdb - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: Replicated function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'zoo_path'" - comma: ',' - expression: quoted_literal: "'shard_name'" - comma: ',' - expression: quoted_literal: "'replica_name'" - end_bracket: ) settings_clause: - keyword: SETTINGS - naked_identifier: key1 - comparison_operator: raw_comparison_operator: '=' - naked_identifier: value1 - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: testdb - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: Replicated function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'zoo_path'" - comma: ',' - expression: quoted_literal: "'shard_name'" - comma: ',' - expression: quoted_literal: "'replica_name'" - end_bracket: ) settings_clause: - keyword: SETTINGS - naked_identifier: key1 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - comma: ',' - naked_identifier: key2 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '2' - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: r - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: Replicated function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'some/path/r'" - comma: ',' - expression: quoted_literal: "'shard1'" - comma: ',' - expression: quoted_literal: "'replica1'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: r - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: Replicated function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'some/path/r'" - comma: ',' - expression: quoted_literal: "'shard1'" - comma: ',' - expression: quoted_literal: "'other_replica'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: r - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: Replicated function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'some/path/r'" - comma: ',' - expression: quoted_literal: "'other_shard'" - comma: ',' - expression: quoted_literal: "'{replica}'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: r - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: Replicated function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'some/path/r'" - comma: ',' - expression: quoted_literal: "'other_shard'" - comma: ',' - expression: quoted_literal: "'r2'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: test_database - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: PostgreSQL function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'postgres1:5432'" - comma: ',' - expression: quoted_literal: "'test_database'" - comma: ',' - expression: quoted_literal: "'postgres'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: test_database - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: PostgreSQL function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'postgres1:5432'" - comma: ',' - expression: quoted_literal: "'test_database'" - comma: ',' - expression: quoted_literal: "'postgres'" - comma: ',' - expression: quoted_literal: "'mysecretpassword'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: test_database - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: PostgreSQL function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'postgres1:5432'" - comma: ',' - expression: quoted_literal: "'test_database'" - comma: ',' - expression: quoted_literal: "'postgres'" - comma: ',' - expression: quoted_literal: "'mysecretpassword'" - comma: ',' - expression: quoted_literal: "'schema_name'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: test_database - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: PostgreSQL function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'postgres1:5432'" - comma: ',' - expression: quoted_literal: "'test_database'" - comma: ',' - expression: quoted_literal: "'postgres'" - comma: ',' - expression: quoted_literal: "'mysecretpassword'" - comma: ',' - expression: quoted_literal: "'schema_name'" - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: mysql_db - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MySQL function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'test'" - comma: ',' - expression: quoted_literal: "'my_user'" - comma: ',' - expression: quoted_literal: "'user_password'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: mysql_db - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MySQL function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'test'" - comma: ',' - expression: quoted_literal: "'my_user'" - comma: ',' - expression: quoted_literal: "'user_password'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: mysql_db - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MySQL function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'my_user'" - comma: ',' - expression: quoted_literal: "'user_password'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: mysql_db - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MySQL function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: column_reference: naked_identifier: test - comma: ',' - expression: quoted_literal: "'my_user'" - comma: ',' - expression: quoted_literal: "'user_password'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: mysql_db - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MySQL function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'test'" - comma: ',' - expression: quoted_literal: "'my_user'" - comma: ',' - expression: quoted_literal: "'user_password'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: mysql_db - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MySQL function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'test'" - comma: ',' - expression: quoted_literal: "'my_user'" - comma: ',' - expression: quoted_literal: "'user_password'" - end_bracket: ) settings_clause: keyword: SETTINGS naked_identifier: read_write_timeout comparison_operator: raw_comparison_operator: '=' numeric_literal: '10000' - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: mysql_db - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MySQL function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'test'" - comma: ',' - expression: quoted_literal: "'my_user'" - comma: ',' - expression: quoted_literal: "'user_password'" - end_bracket: ) settings_clause: - keyword: SETTINGS - naked_identifier: read_write_timeout - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '10000' - comma: ',' - naked_identifier: connect_timeout - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '100' - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: sqlite_db - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: SQLite function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'sqlite.db'" end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: postgres_db - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MaterializedPostgreSQL function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'postgres1:5432'" - comma: ',' - expression: quoted_literal: "'postgres_database'" - comma: ',' - expression: quoted_literal: "'postgres_user'" - comma: ',' - expression: quoted_literal: "'postgres_password'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: postgres_database - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MaterializedPostgreSQL function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'postgres1:5432'" - comma: ',' - expression: quoted_literal: "'postgres_database'" - comma: ',' - expression: quoted_literal: "'postgres_user'" - comma: ',' - expression: quoted_literal: "'postgres_password'" - end_bracket: ) settings_clause: keyword: SETTINGS naked_identifier: materialized_postgresql_schema comparison_operator: raw_comparison_operator: '=' quoted_literal: "'postgres_schema'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: database1 - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MaterializedPostgreSQL function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'postgres1:5432'" - comma: ',' - expression: quoted_literal: "'postgres_database'" - comma: ',' - expression: quoted_literal: "'postgres_user'" - comma: ',' - expression: quoted_literal: "'postgres_password'" - end_bracket: ) settings_clause: - keyword: SETTINGS - naked_identifier: materialized_postgresql_tables_list - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'schema1.table1,schema2.table2,schema1.table3'" - comma: ',' - naked_identifier: materialized_postgresql_tables_list_with_schema - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: database1 - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MaterializedPostgreSQL function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'postgres1:5432'" - comma: ',' - expression: quoted_literal: "'postgres_database'" - comma: ',' - expression: quoted_literal: "'postgres_user'" - comma: ',' - expression: quoted_literal: "'postgres_password'" - end_bracket: ) settings_clause: keyword: SETTINGS naked_identifier: materialized_postgresql_schema_list comparison_operator: raw_comparison_operator: '=' quoted_literal: "'schema1,schema2,schema3'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: database1 - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MaterializedPostgreSQL function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'postgres1:5432'" - comma: ',' - expression: quoted_literal: "'postgres_database'" - comma: ',' - expression: quoted_literal: "'postgres_user'" - comma: ',' - expression: quoted_literal: "'postgres_password'" - end_bracket: ) settings_clause: keyword: SETTINGS naked_identifier: materialized_postgresql_tables_list comparison_operator: raw_comparison_operator: '=' quoted_literal: "'table1,table2,table3'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: demodb - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MaterializedPostgreSQL function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'postgres1:5432'" - comma: ',' - expression: quoted_literal: "'postgres_database'" - comma: ',' - expression: quoted_literal: "'postgres_user'" - comma: ',' - expression: quoted_literal: "'postgres_password'" - end_bracket: ) settings_clause: - keyword: SETTINGS - naked_identifier: materialized_postgresql_replication_slot - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'clickhouse_sync'" - comma: ',' - naked_identifier: materialized_postgresql_snapshot - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'0000000A-0000023F-3'" - comma: ',' - naked_identifier: materialized_postgresql_tables_list - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'table1,table2,table3'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: db_name - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MaterializedPostgreSQL function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'host:port'" - comma: ',' - expression: quoted_literal: "'database'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'password'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MaterializedPostgreSQL function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'host:port'" - comma: ',' - expression: quoted_literal: "'database'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'password'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MaterializedPostgreSQL function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'host:port'" - comma: ',' - expression: quoted_literal: "'database'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'password'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: db_name - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MaterializedPostgreSQL function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'host:port'" - comma: ',' - expression: quoted_literal: "'database'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'password'" - end_bracket: ) settings_clause: keyword: SETTINGS naked_identifier: materialized_postgresql_schema comparison_operator: raw_comparison_operator: '=' quoted_literal: "'postgres_schema'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MaterializedPostgreSQL function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'host:port'" - comma: ',' - expression: quoted_literal: "'database'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'password'" - end_bracket: ) settings_clause: keyword: SETTINGS naked_identifier: materialized_postgresql_schema comparison_operator: raw_comparison_operator: '=' quoted_literal: "'postgres_schema'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MaterializedPostgreSQL function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'host:port'" - comma: ',' - expression: quoted_literal: "'database'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'password'" - end_bracket: ) settings_clause: keyword: SETTINGS naked_identifier: materialized_postgresql_schema comparison_operator: raw_comparison_operator: '=' quoted_literal: "'postgres_schema'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: db_name - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MaterializedMySQL function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'db'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'***'" - end_bracket: ) - keyword: TABLE - keyword: OVERRIDE - table_reference: naked_identifier: table1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: UInt32 - comma: ',' - column_definition: naked_identifier: name data_type: data_type_identifier: String - end_bracket: ) - keyword: TABLE - keyword: OVERRIDE - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: db_name - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MaterializedMySQL function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'db'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'***'" - end_bracket: ) - keyword: TABLE - keyword: OVERRIDE - table_reference: naked_identifier: table1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: UInt32 - comma: ',' - column_definition: naked_identifier: name data_type: data_type_identifier: String - end_bracket: ) - keyword: TABLE - keyword: OVERRIDE - table_reference: naked_identifier: table2 - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: UInt32 - comma: ',' - column_definition: naked_identifier: name data_type: data_type_identifier: String - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: db_name - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MaterializedMySQL function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'db'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'***'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MaterializedMySQL function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'db'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'***'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MaterializedMySQL function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'db'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'***'" - end_bracket: ) - keyword: TABLE - keyword: OVERRIDE - table_reference: naked_identifier: table1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: UInt32 - comma: ',' - column_definition: naked_identifier: name data_type: data_type_identifier: String - end_bracket: ) - keyword: TABLE - keyword: OVERRIDE - table_reference: naked_identifier: table2 - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: UInt32 - comma: ',' - column_definition: naked_identifier: name data_type: data_type_identifier: String - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MaterializedMySQL function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'db'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'***'" - end_bracket: ) settings_clause: keyword: SETTINGS naked_identifier: materialized_mysql_database_engine comparison_operator: raw_comparison_operator: '=' quoted_literal: "'InnoDB'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MaterializedMySQL function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'db'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'***'" - end_bracket: ) settings_clause: keyword: SETTINGS naked_identifier: materialized_mysql_database_engine comparison_operator: raw_comparison_operator: '=' quoted_literal: "'InnoDB'" - keyword: TABLE - keyword: OVERRIDE - table_reference: naked_identifier: table1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: UInt32 - comma: ',' - column_definition: naked_identifier: name data_type: data_type_identifier: String - end_bracket: ) - keyword: TABLE - keyword: OVERRIDE - table_reference: naked_identifier: table2 - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: UInt32 - comma: ',' - column_definition: naked_identifier: name data_type: data_type_identifier: String - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MaterializedMySQL function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'db'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'***'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MaterializedMySQL function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'db'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'***'" - end_bracket: ) - keyword: TABLE - keyword: OVERRIDE - table_reference: naked_identifier: table1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: UInt32 - comma: ',' - column_definition: naked_identifier: name data_type: data_type_identifier: String - end_bracket: ) - keyword: TABLE - keyword: OVERRIDE - table_reference: naked_identifier: table2 - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: UInt32 - comma: ',' - column_definition: naked_identifier: name data_type: data_type_identifier: String - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MaterializedMySQL function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'db'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'***'" - end_bracket: ) settings_clause: keyword: SETTINGS naked_identifier: materialized_mysql_database_engine comparison_operator: raw_comparison_operator: '=' quoted_literal: "'InnoDB'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MaterializedMySQL function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'db'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'***'" - end_bracket: ) settings_clause: keyword: SETTINGS naked_identifier: materialized_mysql_database_engine comparison_operator: raw_comparison_operator: '=' quoted_literal: "'InnoDB'" - keyword: TABLE - keyword: OVERRIDE - table_reference: naked_identifier: table1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: UInt32 - comma: ',' - column_definition: naked_identifier: name data_type: data_type_identifier: String - end_bracket: ) - keyword: TABLE - keyword: OVERRIDE - table_reference: naked_identifier: table2 - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: UInt32 - comma: ',' - column_definition: naked_identifier: name data_type: data_type_identifier: String - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MaterializedMySQL function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'db'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'***'" - end_bracket: ) settings_clause: keyword: SETTINGS naked_identifier: materialized_mysql_database_engine comparison_operator: raw_comparison_operator: '=' quoted_literal: "'InnoDB'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MaterializedMySQL function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'db'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'***'" - end_bracket: ) settings_clause: keyword: SETTINGS naked_identifier: materialized_mysql_database_engine comparison_operator: raw_comparison_operator: '=' quoted_literal: "'InnoDB'" - keyword: TABLE - keyword: OVERRIDE - table_reference: naked_identifier: table1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: UInt32 - comma: ',' - column_definition: naked_identifier: name data_type: data_type_identifier: String - end_bracket: ) - keyword: TABLE - keyword: OVERRIDE - table_reference: naked_identifier: table2 - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: UInt32 - comma: ',' - column_definition: naked_identifier: name data_type: data_type_identifier: String - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MaterializedMySQL function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'db'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'***'" - end_bracket: ) settings_clause: keyword: SETTINGS naked_identifier: materialized_mysql_database_engine comparison_operator: raw_comparison_operator: '=' quoted_literal: "'InnoDB'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MaterializedMySQL function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'db'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'***'" - end_bracket: ) settings_clause: keyword: SETTINGS naked_identifier: materialized_mysql_database_engine comparison_operator: raw_comparison_operator: '=' quoted_literal: "'InnoDB'" - keyword: TABLE - keyword: OVERRIDE - table_reference: naked_identifier: table1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: UInt32 - comma: ',' - column_definition: naked_identifier: name data_type: data_type_identifier: String - end_bracket: ) - keyword: TABLE - keyword: OVERRIDE - table_reference: naked_identifier: table2 - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: UInt32 - comma: ',' - column_definition: naked_identifier: name data_type: data_type_identifier: String - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: mysql - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MaterializedMySQL function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'db'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'***'" - end_bracket: ) settings_clause: - keyword: SETTINGS - naked_identifier: allows_query_when_mysql_lost - comparison_operator: raw_comparison_operator: '=' - naked_identifier: 'true' - comma: ',' - naked_identifier: max_wait_time_when_mysql_unavailable - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '10000' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/create_materialized_view.sql000066400000000000000000000125431503426445100276740ustar00rootroot00000000000000CREATE MATERIALIZED VIEW IF NOT EXISTS db.table_mv TO db.table AS SELECT column1, column2 FROM db.table_kafka; CREATE MATERIALIZED VIEW table_mv TO table AS SELECT column1, column2 FROM table_kafka; CREATE MATERIALIZED VIEW IF NOT EXISTS db.table_mv ON CLUSTER mycluster TO db.table AS SELECT column1, column2 FROM db.table_kafka; CREATE MATERIALIZED VIEW table_mv TO table ENGINE = MergeTree() AS SELECT column1, column2 FROM table_kafka; CREATE MATERIALIZED VIEW table_mv ENGINE = MergeTree() AS SELECT column1, column2 FROM table_kafka; CREATE MATERIALIZED VIEW table_mv ENGINE = MergeTree() POPULATE AS SELECT column1, column2 FROM table_kafka; CREATE MATERIALIZED VIEW db.mv_table ENGINE MergeTree ORDER BY () AS SELECT * FROM db.table; -- Basic materialized view CREATE MATERIALIZED VIEW my_view ENGINE = MergeTree() ORDER BY id AS SELECT id, name FROM source_table; -- Materialized view with IF NOT EXISTS CREATE MATERIALIZED VIEW IF NOT EXISTS my_view_2 ENGINE = MergeTree() ORDER BY id AS SELECT id, value FROM source_table; -- Materialized view with ON CLUSTER (explicit cluster name) CREATE MATERIALIZED VIEW my_view_3 ON CLUSTER my_cluster ENGINE = MergeTree() ORDER BY id AS SELECT id, timestamp FROM source_table; -- Materialized view with ON CLUSTER (using cluster macro) CREATE MATERIALIZED VIEW my_view_3_macro ON CLUSTER '{cluster}' ENGINE = MergeTree() ORDER BY id AS SELECT id, timestamp FROM source_table; -- Materialized view with TO clause CREATE MATERIALIZED VIEW my_view_4 TO target_table AS SELECT id, category FROM source_table; -- Materialized view with IF NOT EXISTS, ON CLUSTER, and TO clause CREATE MATERIALIZED VIEW IF NOT EXISTS cdc_lay.table_mv ON CLUSTER default TO stg_lay.table AS SELECT * FROM source_table; -- Materialized view with IF NOT EXISTS, ON CLUSTER macro, and TO clause CREATE MATERIALIZED VIEW IF NOT EXISTS cdc_lay.table_mv_macro ON CLUSTER '{cluster}' TO stg_lay.table AS SELECT * FROM source_table; -- Materialized view with IF NOT EXISTS, ON CLUSTER macro, and TO clause CREATE MATERIALIZED VIEW IF NOT EXISTS cdc_lay.table_mv_macro ON CLUSTER default TO stg_lay.table AS SELECT * FROM source_table; -- Materialized view with POPULATE CREATE MATERIALIZED VIEW my_view_5 ENGINE = MergeTree() ORDER BY id POPULATE AS SELECT id, status FROM source_table; -- Materialized view with complex engine settings CREATE MATERIALIZED VIEW my_view_6 ENGINE = ReplicatedReplacingMergeTree('/clickhouse/tables/{shard}/my_view_6', '{replica}') PARTITION BY toYYYYMM(timestamp) ORDER BY (id, timestamp) TTL timestamp + INTERVAL 1 MONTH SETTINGS index_granularity = 8192 AS SELECT id, timestamp, value FROM source_table; -- Materialized view with TO and database.table syntax CREATE MATERIALIZED VIEW my_view_7 ON CLUSTER '{cluster}' TO db.target_table AS SELECT * FROM source_table; -- Materialized view with complex SELECT query CREATE MATERIALIZED VIEW my_view_8 ENGINE = SummingMergeTree() ORDER BY (category, metric) AS SELECT category, metric, sum(value) AS total_value, count() AS count, avg(value) AS avg_value FROM source_table GROUP BY category, metric; -- Materialized view with IF NOT EXISTS, ON CLUSTER, TO clause with column list CREATE MATERIALIZED VIEW IF NOT EXISTS cdc_lay.table_mv2 ON CLUSTER default TO stg_lay.table2 (id, name, value) AS SELECT id, name, value FROM source_table; -- Materialized view from kafka table with column list CREATE MATERIALIZED VIEW IF NOT EXISTS db.consumer_kafka ON CLUSTER '{cluster}' TO db.local AS SELECT *, _timestamp_ms AS processedAt FROM db.kafka; -- Materialized view with ARRAY JOIN CREATE MATERIALIZED VIEW IF NOT EXISTS db.nested_data_mv ON CLUSTER '{cluster}' TO db.nested_data_local AS SELECT identifier, _timestamp_ms AS processedAt, metrics.measuredAt AS measuredAt, metrics.value AS value, metrics.name AS name FROM db.kafka ARRAY JOIN metrics; -- Materialized view with subquery in FROM clause and GROUP BY CREATE MATERIALIZED VIEW IF NOT EXISTS db.aggeregating_mv ON CLUSTER '{cluster}' TO db.aggeregating_local AS SELECT identifier, _ingestedAt AS ingestedAt, objectList FROM ( SELECT toStartOfDay(ingestedAt) AS _ingestedAt, identifier, groupUniqArray(objectIdentfier) AS objectList FROM db.raw_table GROUP BY identifier, _ingestedAt ); -- Materialized view with subquery in FROM clause and GROUP BY CREATE MATERIALIZED VIEW IF NOT EXISTS db.aggeregating_mv2 ON CLUSTER '{cluster}' TO db.aggeregating_local2 AS SELECT identifier, _ingestedAt AS ingestedAt, valueCount, cumulativeLagSeconds FROM ( SELECT identifier, toStartOfMinute(ingestedAt) AS _ingestedAt, count() AS valueCount, sum((toUnixTimestamp64Milli(ingestedAt) - toUnixTimestamp64Milli(measuredAt)) / 1000) AS cumulativeLagSeconds FROM db.raw_table GROUP BY identifier, _ingestedAt ); -- Materialized view with TO clause and complex SELECT CREATE MATERIALIZED VIEW IF NOT EXISTS db.kafka_errors ON CLUSTER '{cluster}' TO db.kafka_errors_local AS SELECT _topic AS topic, _partition AS kafka_partition, _offset AS offset, ifNull(_timestamp_ms, now()) AS processedAt, _raw_message AS raw_message, _error AS error FROM db.kafka WHERE length(_error) > 0; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/create_materialized_view.yml000066400000000000000000001237121503426445100276770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 13787818f29237f6320c00fa239f0cf7be43cbfbe558b40b5c96fc6e3fd77fc9 file: - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: - naked_identifier: db - dot: . - naked_identifier: table_mv - keyword: TO - table_reference: - naked_identifier: db - dot: . - naked_identifier: table - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: column1 - comma: ',' - select_clause_element: column_reference: naked_identifier: column2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: db - dot: . - naked_identifier: table_kafka - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: table_mv - keyword: TO - table_reference: naked_identifier: table - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: column1 - comma: ',' - select_clause_element: column_reference: naked_identifier: column2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_kafka - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: - naked_identifier: db - dot: . - naked_identifier: table_mv - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: mycluster - keyword: TO - table_reference: - naked_identifier: db - dot: . - naked_identifier: table - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: column1 - comma: ',' - select_clause_element: column_reference: naked_identifier: column2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: db - dot: . - naked_identifier: table_kafka - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: table_mv - keyword: TO - table_reference: naked_identifier: table - engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' table_engine_function: function_name: function_name_identifier: MergeTree function_contents: bracketed: start_bracket: ( end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: column1 - comma: ',' - select_clause_element: column_reference: naked_identifier: column2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_kafka - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: table_mv - engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' table_engine_function: function_name: function_name_identifier: MergeTree function_contents: bracketed: start_bracket: ( end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: column1 - comma: ',' - select_clause_element: column_reference: naked_identifier: column2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_kafka - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: table_mv - engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' table_engine_function: function_name: function_name_identifier: MergeTree function_contents: bracketed: start_bracket: ( end_bracket: ) - keyword: POPULATE - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: column1 - comma: ',' - select_clause_element: column_reference: naked_identifier: column2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_kafka - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: - naked_identifier: db - dot: . - naked_identifier: mv_table - engine: keyword: ENGINE table_engine_function: function_name: function_name_identifier: MergeTree merge_tree_order_by_clause: - keyword: ORDER - keyword: BY - bracketed: start_bracket: ( end_bracket: ) - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: db - dot: . - naked_identifier: table - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: my_view - engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' table_engine_function: function_name: function_name_identifier: MergeTree function_contents: bracketed: start_bracket: ( end_bracket: ) merge_tree_order_by_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: id - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source_table - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: my_view_2 - engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' table_engine_function: function_name: function_name_identifier: MergeTree function_contents: bracketed: start_bracket: ( end_bracket: ) merge_tree_order_by_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: id - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: value from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source_table - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: my_view_3 - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: my_cluster - engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' table_engine_function: function_name: function_name_identifier: MergeTree function_contents: bracketed: start_bracket: ( end_bracket: ) merge_tree_order_by_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: id - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: timestamp from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source_table - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: my_view_3_macro - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - quoted_identifier: "'{cluster}'" - engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' table_engine_function: function_name: function_name_identifier: MergeTree function_contents: bracketed: start_bracket: ( end_bracket: ) merge_tree_order_by_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: id - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: timestamp from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source_table - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: my_view_4 - keyword: TO - table_reference: naked_identifier: target_table - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: category from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source_table - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: - naked_identifier: cdc_lay - dot: . - naked_identifier: table_mv - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: default - keyword: TO - table_reference: - naked_identifier: stg_lay - dot: . - naked_identifier: table - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source_table - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: - naked_identifier: cdc_lay - dot: . - naked_identifier: table_mv_macro - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - quoted_identifier: "'{cluster}'" - keyword: TO - table_reference: - naked_identifier: stg_lay - dot: . - naked_identifier: table - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source_table - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: - naked_identifier: cdc_lay - dot: . - naked_identifier: table_mv_macro - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: default - keyword: TO - table_reference: - naked_identifier: stg_lay - dot: . - naked_identifier: table - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source_table - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: my_view_5 - engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' table_engine_function: function_name: function_name_identifier: MergeTree function_contents: bracketed: start_bracket: ( end_bracket: ) merge_tree_order_by_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: id - keyword: POPULATE - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: status from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source_table - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: my_view_6 - engine: - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - table_engine_function: function_name: function_name_identifier: ReplicatedReplacingMergeTree function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'/clickhouse/tables/{shard}/my_view_6'" - comma: ',' - expression: quoted_literal: "'{replica}'" - end_bracket: ) - keyword: PARTITION - keyword: BY - expression: function: function_name: function_name_identifier: toYYYYMM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: timestamp end_bracket: ) - merge_tree_order_by_clause: - keyword: ORDER - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: id - comma: ',' - column_reference: naked_identifier: timestamp - end_bracket: ) - table_ttl_segment: keyword: TTL expression: column_reference: naked_identifier: timestamp binary_operator: + interval_expression: keyword: INTERVAL numeric_literal: '1' date_part: MONTH - settings_clause: keyword: SETTINGS naked_identifier: index_granularity comparison_operator: raw_comparison_operator: '=' numeric_literal: '8192' - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: timestamp - comma: ',' - select_clause_element: column_reference: naked_identifier: value from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source_table - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: my_view_7 - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - quoted_identifier: "'{cluster}'" - keyword: TO - table_reference: - naked_identifier: db - dot: . - naked_identifier: target_table - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source_table - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: my_view_8 - engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' table_engine_function: function_name: function_name_identifier: SummingMergeTree function_contents: bracketed: start_bracket: ( end_bracket: ) merge_tree_order_by_clause: - keyword: ORDER - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: category - comma: ',' - column_reference: naked_identifier: metric - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: category - comma: ',' - select_clause_element: column_reference: naked_identifier: metric - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: value end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: total_value - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: count - comma: ',' - select_clause_element: function: function_name: function_name_identifier: avg function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: value end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: avg_value from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source_table groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: category - comma: ',' - column_reference: naked_identifier: metric - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: - naked_identifier: cdc_lay - dot: . - naked_identifier: table_mv2 - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: default - keyword: TO - table_reference: - naked_identifier: stg_lay - dot: . - naked_identifier: table2 - bracketed: - start_bracket: ( - naked_identifier: id - comma: ',' - naked_identifier: name - comma: ',' - naked_identifier: value - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: value from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source_table - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: - naked_identifier: db - dot: . - naked_identifier: consumer_kafka - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - quoted_identifier: "'{cluster}'" - keyword: TO - table_reference: - naked_identifier: db - dot: . - naked_identifier: local - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: wildcard_expression: wildcard_identifier: star: '*' - comma: ',' - select_clause_element: column_reference: naked_identifier: _timestamp_ms alias_expression: alias_operator: keyword: AS naked_identifier: processedAt from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: db - dot: . - naked_identifier: kafka - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: - naked_identifier: db - dot: . - naked_identifier: nested_data_mv - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - quoted_identifier: "'{cluster}'" - keyword: TO - table_reference: - naked_identifier: db - dot: . - naked_identifier: nested_data_local - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: identifier - comma: ',' - select_clause_element: column_reference: naked_identifier: _timestamp_ms alias_expression: alias_operator: keyword: AS naked_identifier: processedAt - comma: ',' - select_clause_element: column_reference: - naked_identifier: metrics - dot: . - naked_identifier: measuredAt alias_expression: alias_operator: keyword: AS naked_identifier: measuredAt - comma: ',' - select_clause_element: column_reference: - naked_identifier: metrics - dot: . - naked_identifier: value alias_expression: alias_operator: keyword: AS naked_identifier: value - comma: ',' - select_clause_element: column_reference: - naked_identifier: metrics - dot: . - naked_identifier: name alias_expression: alias_operator: keyword: AS naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: db - dot: . - naked_identifier: kafka array_join_clause: - keyword: ARRAY - keyword: JOIN - select_clause_element: column_reference: naked_identifier: metrics - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: - naked_identifier: db - dot: . - naked_identifier: aggeregating_mv - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - quoted_identifier: "'{cluster}'" - keyword: TO - table_reference: - naked_identifier: db - dot: . - naked_identifier: aggeregating_local - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: identifier - comma: ',' - select_clause_element: column_reference: naked_identifier: _ingestedAt alias_expression: alias_operator: keyword: AS naked_identifier: ingestedAt - comma: ',' - select_clause_element: column_reference: naked_identifier: objectList from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: toStartOfDay function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: ingestedAt end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: _ingestedAt - comma: ',' - select_clause_element: column_reference: naked_identifier: identifier - comma: ',' - select_clause_element: function: function_name: function_name_identifier: groupUniqArray function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: objectIdentfier end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: objectList from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: db - dot: . - naked_identifier: raw_table groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: identifier - comma: ',' - column_reference: naked_identifier: _ingestedAt end_bracket: ) - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: - naked_identifier: db - dot: . - naked_identifier: aggeregating_mv2 - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - quoted_identifier: "'{cluster}'" - keyword: TO - table_reference: - naked_identifier: db - dot: . - naked_identifier: aggeregating_local2 - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: identifier - comma: ',' - select_clause_element: column_reference: naked_identifier: _ingestedAt alias_expression: alias_operator: keyword: AS naked_identifier: ingestedAt - comma: ',' - select_clause_element: column_reference: naked_identifier: valueCount - comma: ',' - select_clause_element: column_reference: naked_identifier: cumulativeLagSeconds from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: identifier - comma: ',' - select_clause_element: function: function_name: function_name_identifier: toStartOfMinute function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: ingestedAt end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: _ingestedAt - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: valueCount - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: - function: function_name: function_name_identifier: toUnixTimestamp64Milli function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: ingestedAt end_bracket: ) - binary_operator: '-' - function: function_name: function_name_identifier: toUnixTimestamp64Milli function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: measuredAt end_bracket: ) end_bracket: ) binary_operator: / numeric_literal: '1000' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: cumulativeLagSeconds from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: db - dot: . - naked_identifier: raw_table groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: identifier - comma: ',' - column_reference: naked_identifier: _ingestedAt end_bracket: ) - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: - naked_identifier: db - dot: . - naked_identifier: kafka_errors - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - quoted_identifier: "'{cluster}'" - keyword: TO - table_reference: - naked_identifier: db - dot: . - naked_identifier: kafka_errors_local - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: _topic alias_expression: alias_operator: keyword: AS naked_identifier: topic - comma: ',' - select_clause_element: column_reference: naked_identifier: _partition alias_expression: alias_operator: keyword: AS naked_identifier: kafka_partition - comma: ',' - select_clause_element: column_reference: naked_identifier: _offset alias_expression: alias_operator: keyword: AS naked_identifier: offset - comma: ',' - select_clause_element: function: function_name: function_name_identifier: ifNull function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: _timestamp_ms - comma: ',' - expression: function: function_name: function_name_identifier: now function_contents: bracketed: start_bracket: ( end_bracket: ) - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: processedAt - comma: ',' - select_clause_element: column_reference: naked_identifier: _raw_message alias_expression: alias_operator: keyword: AS naked_identifier: raw_message - comma: ',' - select_clause_element: column_reference: naked_identifier: _error alias_expression: alias_operator: keyword: AS naked_identifier: error from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: db - dot: . - naked_identifier: kafka where_clause: keyword: WHERE expression: function: function_name: function_name_identifier: length function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: _error end_bracket: ) comparison_operator: raw_comparison_operator: '>' numeric_literal: '0' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/create_table.sql000066400000000000000000000041601503426445100252530ustar00rootroot00000000000000create table example1 ( a String, b String ) engine = MergeTree() order by (a, b); CREATE TABLE table_name ( u64 UInt64, i32 Int32, s String ) ENGINE = MergeTree() ORDER BY (CounterID, EventDate) PARTITION BY toYYYYMM(EventDate) SETTINGS index_granularity=8192; CREATE TABLE WatchLog_old(date Date, UserId Int64, EventType String, Cnt UInt64) ENGINE=MergeTree(date, (UserId, EventType), 8192); CREATE TABLE WatchLog_new(date Date, UserId Int64, EventType String, Cnt UInt64) ENGINE=MergeTree PARTITION BY date ORDER BY (UserId, EventType) SETTINGS index_granularity=8192; CREATE TABLE WatchLog as WatchLog_old ENGINE=Merge(currentDatabase(), '^WatchLog'); CREATE TABLE _2 as _1 ENGINE=Merge(currentDatabase(), '^WatchLog'); CREATE TABLE hits_all AS hits ENGINE = Distributed(logs, default, hits) SETTINGS fsync_after_insert=0, fsync_directories=0; CREATE TABLE IF NOT EXISTS db.table_name AS table_function(); CREATE TABLE t1 (x String) ENGINE = Memory AS SELECT 1; CREATE TABLE codec_example ( timestamp DateTime CODEC(DoubleDelta), slow_values Float32 CODEC(Gorilla) ) ENGINE = MergeTree(); CREATE TABLE mytable ( x String Codec(Delta, LZ4, AES_128_GCM_SIV) ) ENGINE = MergeTree ORDER BY x; CREATE OR REPLACE TABLE base.t1 (n UInt64, s String) ENGINE = MergeTree ORDER BY n; CREATE OR REPLACE TABLE base.t1 (n UInt64, s Nullable(String)) ENGINE = MergeTree ORDER BY n; CREATE TABLE t1 (x String) ENGINE = Memory COMMENT 'The temporary table'; CREATE TABLE IF NOT EXISTS all_hits ON CLUSTER cluster (p Date, i Int32) ENGINE = Distributed(cluster, default, hits); CREATE TABLE table_name ( name1 String, CONSTRAINT constraint_name_1 CHECK (name1 = 'test') ) ENGINE = engine; CREATE TABLE example_table ( d DateTime, a Int TTL d + INTERVAL 1 MONTH, b Int TTL d + INTERVAL 1 MONTH, c String ) ENGINE = MergeTree PARTITION BY toYYYYMM(d) ORDER BY d; CREATE TABLE example_table ( d DateTime, a Int ) ENGINE = MergeTree PARTITION BY toYYYYMM(d) ORDER BY d TTL d + INTERVAL 1 MONTH DELETE, d + INTERVAL 1 WEEK TO VOLUME 'aaa', d + INTERVAL 2 WEEK TO DISK 'bbb'; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/create_table.yml000066400000000000000000000542131503426445100252610ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fd6849e0a10506d050bb85d718abe342abd04471b4361f262f35c0919836f19b file: - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: example1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: a data_type: data_type_identifier: String - comma: ',' - column_definition: naked_identifier: b data_type: data_type_identifier: String - end_bracket: ) - engine: keyword: engine comparison_operator: raw_comparison_operator: '=' table_engine_function: function_name: function_name_identifier: MergeTree function_contents: bracketed: start_bracket: ( end_bracket: ) merge_tree_order_by_clause: - keyword: order - keyword: by - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: table_name - bracketed: - start_bracket: ( - column_definition: naked_identifier: u64 data_type: data_type_identifier: UInt64 - comma: ',' - column_definition: naked_identifier: i32 data_type: data_type_identifier: Int32 - comma: ',' - column_definition: naked_identifier: s data_type: data_type_identifier: String - end_bracket: ) - engine: - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - table_engine_function: function_name: function_name_identifier: MergeTree function_contents: bracketed: start_bracket: ( end_bracket: ) - merge_tree_order_by_clause: - keyword: ORDER - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: CounterID - comma: ',' - column_reference: naked_identifier: EventDate - end_bracket: ) - keyword: PARTITION - keyword: BY - expression: function: function_name: function_name_identifier: toYYYYMM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: EventDate end_bracket: ) - settings_clause: keyword: SETTINGS naked_identifier: index_granularity comparison_operator: raw_comparison_operator: '=' numeric_literal: '8192' - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: WatchLog_old - bracketed: - start_bracket: ( - column_definition: naked_identifier: date data_type: data_type_identifier: Date - comma: ',' - column_definition: naked_identifier: UserId data_type: data_type_identifier: Int64 - comma: ',' - column_definition: naked_identifier: EventType data_type: data_type_identifier: String - comma: ',' - column_definition: naked_identifier: Cnt data_type: data_type_identifier: UInt64 - end_bracket: ) - engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' table_engine_function: function_name: function_name_identifier: MergeTree function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: date - comma: ',' - expression: bracketed: - start_bracket: ( - column_reference: naked_identifier: UserId - comma: ',' - column_reference: naked_identifier: EventType - end_bracket: ) - comma: ',' - expression: numeric_literal: '8192' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: WatchLog_new - bracketed: - start_bracket: ( - column_definition: naked_identifier: date data_type: data_type_identifier: Date - comma: ',' - column_definition: naked_identifier: UserId data_type: data_type_identifier: Int64 - comma: ',' - column_definition: naked_identifier: EventType data_type: data_type_identifier: String - comma: ',' - column_definition: naked_identifier: Cnt data_type: data_type_identifier: UInt64 - end_bracket: ) - engine: - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - table_engine_function: function_name: function_name_identifier: MergeTree - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: date - merge_tree_order_by_clause: - keyword: ORDER - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: UserId - comma: ',' - column_reference: naked_identifier: EventType - end_bracket: ) - settings_clause: keyword: SETTINGS naked_identifier: index_granularity comparison_operator: raw_comparison_operator: '=' numeric_literal: '8192' - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: WatchLog - keyword: as - table_reference: naked_identifier: WatchLog_old - engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' table_engine_function: function_name: function_name_identifier: Merge function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: currentDatabase function_contents: bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: quoted_literal: "'^WatchLog'" - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: _2 - keyword: as - table_reference: naked_identifier: _1 - engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' table_engine_function: function_name: function_name_identifier: Merge function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: currentDatabase function_contents: bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: quoted_literal: "'^WatchLog'" - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: hits_all - keyword: AS - table_reference: naked_identifier: hits - engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' table_engine_function: function_name: function_name_identifier: Distributed function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: logs - comma: ',' - expression: column_reference: naked_identifier: default - comma: ',' - expression: column_reference: naked_identifier: hits - end_bracket: ) settings_clause: - keyword: SETTINGS - naked_identifier: fsync_after_insert - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '0' - comma: ',' - naked_identifier: fsync_directories - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '0' - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: - naked_identifier: db - dot: . - naked_identifier: table_name - keyword: AS - function: function_name: function_name_identifier: table_function function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: start_bracket: ( column_definition: naked_identifier: x data_type: data_type_identifier: String end_bracket: ) - engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' table_engine_function: function_name: function_name_identifier: Memory - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: codec_example - bracketed: - start_bracket: ( - column_definition: naked_identifier: timestamp data_type: data_type_identifier: DateTime keyword: CODEC bracketed: start_bracket: ( naked_identifier: DoubleDelta end_bracket: ) - comma: ',' - column_definition: naked_identifier: slow_values data_type: data_type_identifier: Float32 keyword: CODEC bracketed: start_bracket: ( naked_identifier: Gorilla end_bracket: ) - end_bracket: ) - engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' table_engine_function: function_name: function_name_identifier: MergeTree function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: mytable - bracketed: start_bracket: ( column_definition: naked_identifier: x data_type: data_type_identifier: String keyword: Codec bracketed: - start_bracket: ( - naked_identifier: Delta - comma: ',' - naked_identifier: LZ4 - comma: ',' - naked_identifier: AES_128_GCM_SIV - end_bracket: ) end_bracket: ) - engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' table_engine_function: function_name: function_name_identifier: MergeTree merge_tree_order_by_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: x - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - table_reference: - naked_identifier: base - dot: . - naked_identifier: t1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: n data_type: data_type_identifier: UInt64 - comma: ',' - column_definition: naked_identifier: s data_type: data_type_identifier: String - end_bracket: ) - engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' table_engine_function: function_name: function_name_identifier: MergeTree merge_tree_order_by_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: n - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - table_reference: - naked_identifier: base - dot: . - naked_identifier: t1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: n data_type: data_type_identifier: UInt64 - comma: ',' - column_definition: naked_identifier: s data_type: data_type_identifier: Nullable bracketed: start_bracket: ( data_type: data_type_identifier: String end_bracket: ) - end_bracket: ) - engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' table_engine_function: function_name: function_name_identifier: MergeTree merge_tree_order_by_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: n - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: start_bracket: ( column_definition: naked_identifier: x data_type: data_type_identifier: String end_bracket: ) - engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' table_engine_function: function_name: function_name_identifier: Memory - keyword: COMMENT - quoted_identifier: "'The temporary table'" - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: all_hits - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster - bracketed: - start_bracket: ( - column_definition: naked_identifier: p data_type: data_type_identifier: Date - comma: ',' - column_definition: naked_identifier: i data_type: data_type_identifier: Int32 - end_bracket: ) - engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' table_engine_function: function_name: function_name_identifier: Distributed function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: cluster - comma: ',' - expression: column_reference: naked_identifier: default - comma: ',' - expression: column_reference: naked_identifier: hits - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: table_name - bracketed: - start_bracket: ( - column_definition: naked_identifier: name1 data_type: data_type_identifier: String - comma: ',' - column_definition: naked_identifier: CONSTRAINT data_type: data_type_identifier: constraint_name_1 column_constraint_segment: keyword: CHECK bracketed: start_bracket: ( expression: column_reference: naked_identifier: name1 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'test'" end_bracket: ) - end_bracket: ) - engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' table_engine_function: function_name: function_name_identifier: engine - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: example_table - bracketed: - start_bracket: ( - column_definition: naked_identifier: d data_type: data_type_identifier: DateTime - comma: ',' - column_definition: naked_identifier: a data_type: data_type_identifier: Int column_constraint_segment: column_ttl_segment: keyword: TTL expression: column_reference: naked_identifier: d binary_operator: + interval_expression: keyword: INTERVAL numeric_literal: '1' date_part: MONTH - comma: ',' - column_definition: naked_identifier: b data_type: data_type_identifier: Int column_constraint_segment: column_ttl_segment: keyword: TTL expression: column_reference: naked_identifier: d binary_operator: + interval_expression: keyword: INTERVAL numeric_literal: '1' date_part: MONTH - comma: ',' - column_definition: naked_identifier: c data_type: data_type_identifier: String - end_bracket: ) - engine: - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - table_engine_function: function_name: function_name_identifier: MergeTree - keyword: PARTITION - keyword: BY - expression: function: function_name: function_name_identifier: toYYYYMM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: d end_bracket: ) - merge_tree_order_by_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: d - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: example_table - bracketed: - start_bracket: ( - column_definition: naked_identifier: d data_type: data_type_identifier: DateTime - comma: ',' - column_definition: naked_identifier: a data_type: data_type_identifier: Int - end_bracket: ) - engine: - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - table_engine_function: function_name: function_name_identifier: MergeTree - keyword: PARTITION - keyword: BY - expression: function: function_name: function_name_identifier: toYYYYMM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: d end_bracket: ) - merge_tree_order_by_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: d - table_ttl_segment: - keyword: TTL - expression: column_reference: naked_identifier: d binary_operator: + interval_expression: keyword: INTERVAL numeric_literal: '1' date_part: MONTH - keyword: DELETE - comma: ',' - expression: column_reference: naked_identifier: d binary_operator: + interval_expression: keyword: INTERVAL numeric_literal: '1' date_part: WEEK - keyword: TO - keyword: VOLUME - quoted_literal: "'aaa'" - comma: ',' - expression: column_reference: naked_identifier: d binary_operator: + interval_expression: keyword: INTERVAL numeric_literal: '2' date_part: WEEK - keyword: TO - keyword: DISK - quoted_literal: "'bbb'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/create_temporary_table.sql000066400000000000000000000003321503426445100273520ustar00rootroot00000000000000CREATE TEMPORARY TABLE xt0 AS SELECT * FROM x; CREATE TEMPORARY TABLE IF NOT EXISTS t2 ( ty String, t2 String, c_date_time DateTime32 ) ENGINE = MergeTree ORDER BY (ty,t2) TTL c_date_time + INTERVAL 1 DAY sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/create_temporary_table.yml000066400000000000000000000047251503426445100273660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 51d346a915d0f87f6d66889eb8169b3d44ac5f87f62588a851beb4fca8e6c52b file: - statement: create_table_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: xt0 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: x - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: t2 - bracketed: - start_bracket: ( - column_definition: naked_identifier: ty data_type: data_type_identifier: String - comma: ',' - column_definition: naked_identifier: t2 data_type: data_type_identifier: String - comma: ',' - column_definition: naked_identifier: c_date_time data_type: data_type_identifier: DateTime32 - end_bracket: ) - engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' table_engine_function: function_name: function_name_identifier: MergeTree merge_tree_order_by_clause: - keyword: ORDER - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: ty - comma: ',' - column_reference: naked_identifier: t2 - end_bracket: ) - table_ttl_segment: keyword: TTL expression: column_reference: naked_identifier: c_date_time binary_operator: + interval_expression: keyword: INTERVAL numeric_literal: '1' date_part: DAY sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/create_view.sql000066400000000000000000000012561503426445100251410ustar00rootroot00000000000000CREATE VIEW db.view_mv AS SELECT column1, column2 FROM db.table_kafka; CREATE VIEW db.view_mv ON CLUSTER mycluster AS SELECT column1, column2 FROM db.table_kafka; CREATE OR REPLACE VIEW db.view_mv AS SELECT column1, column2 FROM db.table_kafka; CREATE OR REPLACE VIEW db.view_mv ON CLUSTER mycluster AS SELECT column1, column2 FROM db.table_kafka; CREATE VIEW IF NOT EXISTS db.view_mv AS SELECT column1, column2 FROM db.table_kafka; CREATE VIEW IF NOT EXISTS db.view_mv AS SELECT column1, column2 FROM db.table_kafka; CREATE VIEW IF NOT EXISTS db.view_mv ON CLUSTER mycluster AS SELECT column1, column2 FROM db.table_kafka; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/create_view.yml000066400000000000000000000150141503426445100251400ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: af0957a107ce336e42dd0c44c631861a71c55040f0e2cd10b257ae93439e9248 file: - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: - naked_identifier: db - dot: . - naked_identifier: view_mv - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: column1 - comma: ',' - select_clause_element: column_reference: naked_identifier: column2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: db - dot: . - naked_identifier: table_kafka - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: - naked_identifier: db - dot: . - naked_identifier: view_mv - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: mycluster - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: column1 - comma: ',' - select_clause_element: column_reference: naked_identifier: column2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: db - dot: . - naked_identifier: table_kafka - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: VIEW - table_reference: - naked_identifier: db - dot: . - naked_identifier: view_mv - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: column1 - comma: ',' - select_clause_element: column_reference: naked_identifier: column2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: db - dot: . - naked_identifier: table_kafka - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: VIEW - table_reference: - naked_identifier: db - dot: . - naked_identifier: view_mv - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: mycluster - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: column1 - comma: ',' - select_clause_element: column_reference: naked_identifier: column2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: db - dot: . - naked_identifier: table_kafka - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: - naked_identifier: db - dot: . - naked_identifier: view_mv - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: column1 - comma: ',' - select_clause_element: column_reference: naked_identifier: column2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: db - dot: . - naked_identifier: table_kafka - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: - naked_identifier: db - dot: . - naked_identifier: view_mv - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: column1 - comma: ',' - select_clause_element: column_reference: naked_identifier: column2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: db - dot: . - naked_identifier: table_kafka - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: - naked_identifier: db - dot: . - naked_identifier: view_mv - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: mycluster - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: column1 - comma: ',' - select_clause_element: column_reference: naked_identifier: column2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: db - dot: . - naked_identifier: table_kafka - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/cte.sql000066400000000000000000000016211503426445100234130ustar00rootroot00000000000000with ( select 1 as p ) as test_param select toString(1) as Test_string, toDateTime64('2022-05-25', 3) as Test_dateTime64, ifNull(null, 'TestNull') as testIf, JSONExtractString('{"abc": "hello"}', 'abc') as testJSON, test_param as param; WITH '2019-08-01 15:23:00' as ts_upper_bound SELECT * FROM hits WHERE EventDate = toDate(ts_upper_bound) AND EventTime <= ts_upper_bound; WITH sum(bytes) as s SELECT formatReadableSize(s), table FROM system.parts GROUP BY table ORDER BY s; /* this example would return TOP 10 of most huge tables */ WITH ( SELECT sum(bytes) FROM system.parts WHERE active ) AS total_disk_usage SELECT (sum(bytes) / total_disk_usage) * 100 AS table_disk_usage, table FROM system.parts GROUP BY table ORDER BY table_disk_usage DESC LIMIT 10; WITH test1 AS (SELECT i + 1, j + 1 FROM test1) SELECT * FROM test1; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/cte.yml000066400000000000000000000262601503426445100234230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8cda8c8d9a9295ed44d46b4987ad2bda56486b5b87126e078d231002a269102f file: - statement: with_compound_statement: keyword: with common_table_expression: expression: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: as naked_identifier: p end_bracket: ) keyword: as naked_identifier: test_param select_statement: select_clause: - keyword: select - select_clause_element: function: function_name: function_name_identifier: toString function_contents: bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: Test_string - comma: ',' - select_clause_element: function: function_name: function_name_identifier: toDateTime64 function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'2022-05-25'" - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: Test_dateTime64 - comma: ',' - select_clause_element: function: function_name: function_name_identifier: ifNull function_contents: bracketed: - start_bracket: ( - expression: null_literal: 'null' - comma: ',' - expression: quoted_literal: "'TestNull'" - end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: testIf - comma: ',' - select_clause_element: function: function_name: function_name_identifier: JSONExtractString function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'{\"abc\": \"hello\"}'" - comma: ',' - expression: quoted_literal: "'abc'" - end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: testJSON - comma: ',' - select_clause_element: column_reference: naked_identifier: test_param alias_expression: alias_operator: keyword: as naked_identifier: param - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: expression: quoted_literal: "'2019-08-01 15:23:00'" keyword: as naked_identifier: ts_upper_bound select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: hits where_clause: keyword: WHERE expression: - column_reference: naked_identifier: EventDate - comparison_operator: raw_comparison_operator: '=' - function: function_name: function_name_identifier: toDate function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: ts_upper_bound end_bracket: ) - binary_operator: AND - column_reference: naked_identifier: EventTime - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - column_reference: naked_identifier: ts_upper_bound - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: expression: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: bytes end_bracket: ) keyword: as naked_identifier: s select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: formatReadableSize function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: s end_bracket: ) - comma: ',' - select_clause_element: column_reference: naked_identifier: table from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: system - dot: . - naked_identifier: parts groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: table orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: s - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: expression: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: bytes end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: system - dot: . - naked_identifier: parts where_clause: keyword: WHERE expression: column_reference: naked_identifier: active end_bracket: ) keyword: AS naked_identifier: total_disk_usage select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: bytes end_bracket: ) binary_operator: / column_reference: naked_identifier: total_disk_usage end_bracket: ) binary_operator: '*' numeric_literal: '100' alias_expression: alias_operator: keyword: AS naked_identifier: table_disk_usage - comma: ',' - select_clause_element: column_reference: naked_identifier: table from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: system - dot: . - naked_identifier: parts groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: table orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: table_disk_usage - keyword: DESC limit_clause: keyword: LIMIT limit_clause_component: numeric_literal: '10' - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: test1 keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: column_reference: naked_identifier: i binary_operator: + numeric_literal: '1' - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: j binary_operator: + numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/cte_columns.sql000066400000000000000000000000761503426445100251560ustar00rootroot00000000000000WITH t(col1, col2) AS (SELECT 1, 2 FROM foo) SELECT * from t; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/cte_columns.yml000066400000000000000000000033561503426445100251640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b53fa22bf40f811851284c89c29204c8a30460e8248bc1b5c45d29b252db203d file: statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: t cte_column_list: bracketed: start_bracket: ( identifier_list: - naked_identifier: col1 - comma: ',' - naked_identifier: col2 end_bracket: ) keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '1' - comma: ',' - select_clause_element: numeric_literal: '2' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/datetime64_precision.sql000066400000000000000000000004001503426445100266530ustar00rootroot00000000000000SELECT '2024-01-01'::DateTime64 as datetime; SELECT '2024-01-01'::DateTime64() as datetime; SELECT '2024-01-01'::DateTime64(3) as datetime; SELECT '2024-01-01'::DateTime64(3, 'Europe/Paris') as datetime; SELECT '2024-01-01'::DateTime64(6) as datetime; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/datetime64_precision.yml000066400000000000000000000063401503426445100266660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a42ecfd08e1b93fedd3df21b303d0a69adcc70d9bacd927da070cd7e847b3e5c file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'2024-01-01'" casting_operator: '::' data_type: data_type_identifier: DateTime64 alias_expression: alias_operator: keyword: as naked_identifier: datetime - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'2024-01-01'" casting_operator: '::' data_type: data_type_identifier: DateTime64 bracketed: start_bracket: ( end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: datetime - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'2024-01-01'" casting_operator: '::' data_type: data_type_identifier: DateTime64 bracketed: start_bracket: ( numeric_literal: '3' end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: datetime - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'2024-01-01'" casting_operator: '::' data_type: data_type_identifier: DateTime64 bracketed: start_bracket: ( numeric_literal: '3' comma: ',' quoted_literal: "'Europe/Paris'" end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: datetime - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'2024-01-01'" casting_operator: '::' data_type: data_type_identifier: DateTime64 bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: datetime - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/decimal_support.sql000066400000000000000000000005431503426445100260340ustar00rootroot00000000000000SELECT '12'::Nullable(Decimal(12,0)) as num_value; SELECT '12'::Nullable(Decimal32(1)) as num_value; SELECT '12'::Nullable(Decimal64(12)) as num_value; SELECT '12'::Nullable(Decimal128(12)) as num_value; SELECT '12'::Nullable(Decimal256(12)) as num_value; SELECT '12'::Decimal(12,0) as num_value; SELECT '12'::Nullable(Numeric(12, 0)) as num_value; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/decimal_support.yml000066400000000000000000000141741503426445100260430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6deccb43d3cf83c9062b235c0f01b5a4310ea49149b78c99c459b1aac74c1c02 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'12'" casting_operator: '::' data_type: data_type_identifier: Nullable bracketed: start_bracket: ( data_type: data_type_identifier: Decimal bracketed_arguments: bracketed: - start_bracket: ( - data_type: numeric_literal: '12' - comma: ',' - data_type: numeric_literal: '0' - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: num_value - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'12'" casting_operator: '::' data_type: data_type_identifier: Nullable bracketed: start_bracket: ( data_type: data_type_identifier: Decimal32 bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: num_value - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'12'" casting_operator: '::' data_type: data_type_identifier: Nullable bracketed: start_bracket: ( data_type: data_type_identifier: Decimal64 bracketed: start_bracket: ( numeric_literal: '12' end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: num_value - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'12'" casting_operator: '::' data_type: data_type_identifier: Nullable bracketed: start_bracket: ( data_type: data_type_identifier: Decimal128 bracketed: start_bracket: ( numeric_literal: '12' end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: num_value - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'12'" casting_operator: '::' data_type: data_type_identifier: Nullable bracketed: start_bracket: ( data_type: data_type_identifier: Decimal256 bracketed: start_bracket: ( numeric_literal: '12' end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: num_value - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'12'" casting_operator: '::' data_type: data_type_identifier: Decimal bracketed_arguments: bracketed: - start_bracket: ( - data_type: numeric_literal: '12' - comma: ',' - data_type: numeric_literal: '0' - end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: num_value - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'12'" casting_operator: '::' data_type: data_type_identifier: Nullable bracketed: start_bracket: ( data_type: data_type_identifier: Numeric bracketed_arguments: bracketed: - start_bracket: ( - data_type: numeric_literal: '12' - comma: ',' - data_type: numeric_literal: '0' - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: num_value - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/dollar_quoted_literal.sql000066400000000000000000000001271503426445100272120ustar00rootroot00000000000000SELECT * FROM foo WHERE col1 = $$bar$$; SELECT * FROM foo WHERE col1 = $baz$bar$baz$; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/dollar_quoted_literal.yml000066400000000000000000000032621503426445100272170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: db62483fc2007d81cce45cf5cb073a25261d92065231c8344f0fbafff69db6ff file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo where_clause: keyword: WHERE expression: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' quoted_literal: $$bar$$ - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo where_clause: keyword: WHERE expression: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' quoted_literal: $baz$bar$baz$ - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/drop_statement.sql000066400000000000000000000025721503426445100256760ustar00rootroot00000000000000-- DROP DATABASE DROP DATABASE db; DROP DATABASE IF EXISTS db; DROP DATABASE db ON CLUSTER cluster; DROP DATABASE db SYNC; -- DROP TABLE DROP TABLE db_name; DROP TABLE db.name; DROP TABLE IF EXISTS db_name; DROP TABLE db_name ON CLUSTER cluster; DROP TABLE db_name SYNC; DROP TEMPORARY TABLE db_name; -- DROP DICTIONARY DROP DICTIONARY dict_name; DROP DICTIONARY IF EXISTS dict_name; DROP DICTIONARY dict_name SYNC; -- DROP USER DROP USER user_name; DROP USER IF EXISTS user_name; DROP USER user_name ON CLUSTER cluster_name; -- DROP ROLE DROP ROLE role_name; DROP ROLE IF EXISTS role_name; DROP ROLE role_name ON CLUSTER cluster_name; -- DROP ROW POLICY -- DROP [ROW] POLICY [IF EXISTS] name [,...] ON [database.]table [,...] [ON CLUSTER cluster_name] -- DROP QUOTA DROP QUOTA quota_name; DROP QUOTA IF EXISTS quota_name; DROP QUOTA quota_name ON CLUSTER cluster_name; -- DROP SETTINGS PROFILE DROP setting_name PROFILE profile_name; DROP setting_name PROFILE IF EXISTS profile_name; DROP setting_name PROFILE profile_name ON CLUSTER cluster_name; DROP setting_name1,setting_name2 PROFILE profile_name; -- DROP VIEW DROP VIEW view_name; DROP VIEW db.view_name; DROP VIEW IF EXISTS view_name; DROP VIEW view_name ON CLUSTER cluster; DROP VIEW view_name SYNC; -- DROP FUNCTION DROP FUNCTION function_name; DROP FUNCTION IF EXISTS function_name; DROP FUNCTION function_name on CLUSTER cluster; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/drop_statement.yml000066400000000000000000000161201503426445100256720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2009929ac9fd7f306cb4749f88dbdd183dba6fbe1ae1e98b347863f6a4a34301 file: - statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - database_reference: naked_identifier: db - statement_terminator: ; - statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - keyword: IF - keyword: EXISTS - database_reference: naked_identifier: db - statement_terminator: ; - statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - database_reference: naked_identifier: db - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster - statement_terminator: ; - statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - database_reference: naked_identifier: db - keyword: SYNC - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: naked_identifier: db_name - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: - naked_identifier: db - dot: . - naked_identifier: name - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: db_name - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: naked_identifier: db_name - keyword: SYNC - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: db_name - statement_terminator: ; - statement: drop_dictionary_statement: - keyword: DROP - keyword: DICTIONARY - naked_identifier: dict_name - statement_terminator: ; - statement: drop_dictionary_statement: - keyword: DROP - keyword: DICTIONARY - keyword: IF - keyword: EXISTS - naked_identifier: dict_name - statement_terminator: ; - statement: drop_dictionary_statement: - keyword: DROP - keyword: DICTIONARY - naked_identifier: dict_name - keyword: SYNC - statement_terminator: ; - statement: drop_user_statement: - keyword: DROP - keyword: USER - naked_identifier: user_name - statement_terminator: ; - statement: drop_user_statement: - keyword: DROP - keyword: USER - keyword: IF - keyword: EXISTS - naked_identifier: user_name - statement_terminator: ; - statement: drop_user_statement: - keyword: DROP - keyword: USER - naked_identifier: user_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster_name - statement_terminator: ; - statement: drop_user_statement: - keyword: DROP - keyword: ROLE - naked_identifier: role_name - statement_terminator: ; - statement: drop_user_statement: - keyword: DROP - keyword: ROLE - keyword: IF - keyword: EXISTS - naked_identifier: role_name - statement_terminator: ; - statement: drop_user_statement: - keyword: DROP - keyword: ROLE - naked_identifier: role_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster_name - statement_terminator: ; - statement: drop_quota_statement: - keyword: DROP - keyword: QUOTA - naked_identifier: quota_name - statement_terminator: ; - statement: drop_quota_statement: - keyword: DROP - keyword: QUOTA - keyword: IF - keyword: EXISTS - naked_identifier: quota_name - statement_terminator: ; - statement: drop_quota_statement: - keyword: DROP - keyword: QUOTA - naked_identifier: quota_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster_name - statement_terminator: ; - statement: drop_setting_profile_statement: - keyword: DROP - naked_identifier: setting_name - keyword: PROFILE - naked_identifier: profile_name - statement_terminator: ; - statement: drop_setting_profile_statement: - keyword: DROP - naked_identifier: setting_name - keyword: PROFILE - keyword: IF - keyword: EXISTS - naked_identifier: profile_name - statement_terminator: ; - statement: drop_setting_profile_statement: - keyword: DROP - naked_identifier: setting_name - keyword: PROFILE - naked_identifier: profile_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster_name - statement_terminator: ; - statement: drop_setting_profile_statement: - keyword: DROP - naked_identifier: setting_name1 - comma: ',' - naked_identifier: setting_name2 - keyword: PROFILE - naked_identifier: profile_name - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - table_reference: naked_identifier: view_name - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - table_reference: - naked_identifier: db - dot: . - naked_identifier: view_name - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: view_name - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - table_reference: naked_identifier: view_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - table_reference: naked_identifier: view_name - keyword: SYNC - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - naked_identifier: function_name - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - keyword: IF - keyword: EXISTS - naked_identifier: function_name - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - naked_identifier: function_name - on_cluster_clause: - keyword: 'on' - keyword: CLUSTER - naked_identifier: cluster - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/final.sql000066400000000000000000000001621503426445100237300ustar00rootroot00000000000000select a from my_table final where a > 0; SELECT sum(bytes) FROM system.parts as table_alias final WHERE active; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/final.yml000066400000000000000000000040121503426445100237300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d3dad31c73bc7f690ddd2095a96d5ae600d1880d76dd31662682ad2611f93c14 file: - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table keyword: final where_clause: keyword: where expression: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: '>' numeric_literal: '0' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: bytes end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: system - dot: . - naked_identifier: parts alias_expression: alias_operator: keyword: as naked_identifier: table_alias keyword: final where_clause: keyword: WHERE expression: column_reference: naked_identifier: active - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/format.sql000066400000000000000000000003421503426445100241270ustar00rootroot00000000000000SELECT test FROM toto FORMAT CSV; SELECT 1 FORMAT CSV; SELECT 1 as test FORMAT CSV; SELECT test FROM dual where test = '1' FORMAT CSV; SELECT test FROM dual where test = '1' FORMAT CSV SETTINGS format_csv_delimiter = ','; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/format.yml000066400000000000000000000057461503426445100241460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 83bf6a5cb8c39ac3d5b8205be5497c373428cfbd439102c96cbb97196d931807 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: test from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: toto format_clause: - keyword: FORMAT - keyword: CSV - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' format_clause: - keyword: FORMAT - keyword: CSV - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: as naked_identifier: test format_clause: - keyword: FORMAT - keyword: CSV - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: test from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual where_clause: keyword: where expression: column_reference: naked_identifier: test comparison_operator: raw_comparison_operator: '=' quoted_literal: "'1'" format_clause: - keyword: FORMAT - keyword: CSV - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: test from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual where_clause: keyword: where expression: column_reference: naked_identifier: test comparison_operator: raw_comparison_operator: '=' quoted_literal: "'1'" format_clause: - keyword: FORMAT - keyword: CSV - settings_clause: keyword: SETTINGS naked_identifier: format_csv_delimiter comparison_operator: raw_comparison_operator: '=' quoted_literal: "','" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/identifier.sql000066400000000000000000000002031503426445100247550ustar00rootroot00000000000000SELECT * FROM _1.Table; SELECT * FROM _1._2; SELECT bar AS _1 FROM foo; SELECT * FROM foo.bar _1; SELECT * FROM foo.bar AS _1; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/identifier.yml000066400000000000000000000057371503426445100250000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c1c72774e91ec7601e3c5a5048914612722000c172032d7d15e5da84dc1c4ddf file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: _1 - dot: . - naked_identifier: Table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: _1 - dot: . - naked_identifier: _2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: bar alias_expression: alias_operator: keyword: AS naked_identifier: _1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: foo - dot: . - naked_identifier: bar alias_expression: naked_identifier: _1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: foo - dot: . - naked_identifier: bar alias_expression: alias_operator: keyword: AS naked_identifier: _1 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/in_implicit_array_tuple.sql000066400000000000000000000003161503426445100275470ustar00rootroot00000000000000select * from tbl where int_col in (1, 2); select * from tbl where int_col in [1, 2]; select * from tbl where int_col global in (1, 2); select * from tbl where int_col not in [toUUID('a'), toUUID('b')]; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/in_implicit_array_tuple.yml000066400000000000000000000077171503426445100275650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d0b924d17e563d666e87b25e0462e81727ce2386610ce7855f9e6b074db09846 file: - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl where_clause: keyword: where expression: column_reference: naked_identifier: int_col keyword: in tuple: bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl where_clause: keyword: where expression: column_reference: naked_identifier: int_col keyword: in array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_square_bracket: ']' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl where_clause: keyword: where expression: - column_reference: naked_identifier: int_col - keyword: global - keyword: in - tuple: bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl where_clause: keyword: where expression: - column_reference: naked_identifier: int_col - keyword: not - keyword: in - array_literal: - start_square_bracket: '[' - function: function_name: function_name_identifier: toUUID function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'a'" end_bracket: ) - comma: ',' - function: function_name: function_name_identifier: toUUID function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'b'" end_bracket: ) - end_square_bracket: ']' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/in_operator.sql000066400000000000000000000012311503426445100251560ustar00rootroot00000000000000-- Standard IN SELECT uniq(col1) FROM table1 WHERE col1 IN (SELECT col1 FROM table1 WHERE col2 = 34); SELECT uniq(col1) FROM table1 WHERE col1 NOT IN (SELECT col1 FROM table1 WHERE col2 = 34); -- GLOBAL IN SELECT uniq(col1) FROM table1 WHERE col1 GLOBAL IN (SELECT col1 FROM table1 WHERE col2 = 34); SELECT uniq(col1) FROM table1 WHERE col1 GLOBAL NOT IN (SELECT col1 FROM table1 WHERE col2 = 34); -- IN FUNCTION SELECT uniq(col1) FROM table1 WHERE col1 IN tuple(1, 2); SELECT uniq(col1) FROM table1 WHERE col1 NOT IN tuple(1, 2); SELECT uniq(col1) FROM table1 WHERE col1 GLOBAL IN tuple(1, 2); SELECT uniq(col1) FROM table1 WHERE col1 GLOBAL NOT IN tuple(1, 2); sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/in_operator.yml000066400000000000000000000302161503426445100251650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 15c6a5e3466927fe29c0ab9f7f85b50f459ded3952818f223e1b3e09df5577a0 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: uniq function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: col1 end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 where_clause: keyword: WHERE expression: column_reference: naked_identifier: col1 keyword: IN tuple: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 where_clause: keyword: WHERE expression: column_reference: naked_identifier: col2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '34' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: uniq function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: col1 end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 where_clause: keyword: WHERE expression: - column_reference: naked_identifier: col1 - keyword: NOT - keyword: IN - tuple: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 where_clause: keyword: WHERE expression: column_reference: naked_identifier: col2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '34' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: uniq function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: col1 end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 where_clause: keyword: WHERE expression: - column_reference: naked_identifier: col1 - keyword: GLOBAL - keyword: IN - tuple: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 where_clause: keyword: WHERE expression: column_reference: naked_identifier: col2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '34' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: uniq function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: col1 end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 where_clause: keyword: WHERE expression: - column_reference: naked_identifier: col1 - keyword: GLOBAL - keyword: NOT - keyword: IN - tuple: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 where_clause: keyword: WHERE expression: column_reference: naked_identifier: col2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '34' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: uniq function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: col1 end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 where_clause: keyword: WHERE expression: column_reference: naked_identifier: col1 keyword: IN function: function_name: function_name_identifier: tuple function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: uniq function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: col1 end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 where_clause: keyword: WHERE expression: - column_reference: naked_identifier: col1 - keyword: NOT - keyword: IN - function: function_name: function_name_identifier: tuple function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: uniq function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: col1 end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 where_clause: keyword: WHERE expression: - column_reference: naked_identifier: col1 - keyword: GLOBAL - keyword: IN - function: function_name: function_name_identifier: tuple function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: uniq function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: col1 end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 where_clause: keyword: WHERE expression: - column_reference: naked_identifier: col1 - keyword: GLOBAL - keyword: NOT - keyword: IN - function: function_name: function_name_identifier: tuple function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/in_table_cte.sql000066400000000000000000000005071503426445100252520ustar00rootroot00000000000000with (select (1, 2, 3)) as in_arr_cte select * from tbl where int_col in in_arr_cte; with (1, 2, 3) as int_arr select * from tbl where int_col in int_arr; with [1, 2, 3] as int_arr select * from tbl where int_col in int_arr; with [1, 2, 3] as int_arr select *, if(int_col in int_arr, 1, 0) as in_array_flag from tbl; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/in_table_cte.yml000066400000000000000000000127111503426445100252540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: eb63b3e432098dfe10d85dc91ce890edfa3523802fd361ac1905f9cf140d8f1d file: - statement: with_compound_statement: keyword: with common_table_expression: expression: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: select function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) end_bracket: ) keyword: as naked_identifier: in_arr_cte select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl where_clause: keyword: where expression: column_reference: naked_identifier: int_col keyword: in naked_identifier: in_arr_cte - statement_terminator: ; - statement: with_compound_statement: keyword: with common_table_expression: expression: bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - end_bracket: ) keyword: as naked_identifier: int_arr select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl where_clause: keyword: where expression: column_reference: naked_identifier: int_col keyword: in naked_identifier: int_arr - statement_terminator: ; - statement: with_compound_statement: keyword: with common_table_expression: expression: array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - end_square_bracket: ']' keyword: as naked_identifier: int_arr select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl where_clause: keyword: where expression: column_reference: naked_identifier: int_col keyword: in naked_identifier: int_arr - statement_terminator: ; - statement: with_compound_statement: keyword: with common_table_expression: expression: array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - end_square_bracket: ']' keyword: as naked_identifier: int_arr select_statement: select_clause: - keyword: select - select_clause_element: wildcard_expression: wildcard_identifier: star: '*' - comma: ',' - select_clause_element: function: function_name: function_name_identifier: if function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: int_col keyword: in naked_identifier: int_arr - comma: ',' - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '0' - end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: in_array_flag from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/interval.sql000066400000000000000000000013671503426445100244730ustar00rootroot00000000000000SELECT INTERVAL 1 DAY; SELECT INTERVAL '1' DAY; SELECT INTERVAL '1 DAY'; SELECT INTERVAL '1 days' + interval '3 hours' + interval 2 minutes; SELECT date_add(today(), INTERVAL -30 DAY); SELECT subDate(toDate('2008-01-02'), INTERVAL 31 DAY); SELECT addDate(today(), INTERVAL -30 day); SELECT date_sub(toDate('2018-01-01'), INTERVAL 3 YEAR); SELECT date_add(toDate('2018-01-01'), INTERVAL 3 YYYY); SELECT date_add(today(), interval 7 * 4 days); SELECT addDate(today(), INTERVAL col1 DAY) FROM tbl1 ; SELECT subDate(today(), INTERVAL col1 + col2 DAY) FROM tbl1 ; SELECT formatDateTime(db1.tbl1.col1 + INTERVAL db1.tbl1.col2 + db2_tbl2.col2 DAY, '%F %T') FROM db1.tbl1 left join db2.tbl2 as db2_tbl2 on db1.tbl1.id = db2_tbl2.id ; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/interval.yml000066400000000000000000000302311503426445100244650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 50c8e74502339ad5101792b9a134d1b1da996a85cee63d20816b929532125ed7 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: interval_expression: keyword: INTERVAL numeric_literal: '1' date_part: DAY - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: interval_expression: keyword: INTERVAL quoted_literal: "'1'" date_part: DAY - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: interval_expression: keyword: INTERVAL quoted_literal: "'1 DAY'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - interval_expression: keyword: INTERVAL quoted_literal: "'1 days'" - binary_operator: + - interval_expression: keyword: interval quoted_literal: "'3 hours'" - binary_operator: + - interval_expression: keyword: interval numeric_literal: '2' date_part: minutes - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: date_add function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: today function_contents: bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: numeric_literal: sign_indicator: '-' numeric_literal: '30' date_part: DAY - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: subDate function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: toDate function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'2008-01-02'" end_bracket: ) - comma: ',' - expression: interval_expression: keyword: INTERVAL numeric_literal: '31' date_part: DAY - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: addDate function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: today function_contents: bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: numeric_literal: sign_indicator: '-' numeric_literal: '30' date_part: day - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: date_sub function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: toDate function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'2018-01-01'" end_bracket: ) - comma: ',' - expression: interval_expression: keyword: INTERVAL numeric_literal: '3' date_part: YEAR - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: date_add function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: toDate function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'2018-01-01'" end_bracket: ) - comma: ',' - expression: interval_expression: keyword: INTERVAL numeric_literal: '3' date_part: YYYY - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: date_add function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: today function_contents: bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: interval_expression: keyword: interval expression: - numeric_literal: '7' - binary_operator: '*' - numeric_literal: '4' date_part: days - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: addDate function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: today function_contents: bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: column_reference: naked_identifier: col1 date_part: DAY - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: subDate function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: today function_contents: bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: - column_reference: naked_identifier: col1 - binary_operator: + - column_reference: naked_identifier: col2 date_part: DAY - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: formatDateTime function_contents: bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: db1 - dot: . - naked_identifier: tbl1 - dot: . - naked_identifier: col1 binary_operator: + interval_expression: keyword: INTERVAL expression: - column_reference: - naked_identifier: db1 - dot: . - naked_identifier: tbl1 - dot: . - naked_identifier: col2 - binary_operator: + - column_reference: - naked_identifier: db2_tbl2 - dot: . - naked_identifier: col2 date_part: DAY - comma: ',' - expression: quoted_literal: "'%F %T'" - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: db1 - dot: . - naked_identifier: tbl1 join_clause: - keyword: left - keyword: join - from_expression_element: table_expression: table_reference: - naked_identifier: db2 - dot: . - naked_identifier: tbl2 alias_expression: alias_operator: keyword: as naked_identifier: db2_tbl2 - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: db1 - dot: . - naked_identifier: tbl1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: db2_tbl2 - dot: . - naked_identifier: id - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/into_outfile.sql000066400000000000000000000003651503426445100253440ustar00rootroot00000000000000SELECT 1 INTO OUTFILE '/tmp/test'; SELECT 1 as test INTO OUTFILE '/tmp/test' FORMAT TabSeparated; SELECT test FROM dual where test = '1' INTO OUTFILE '/tmp/test' FORMAT TabSeparated; SELECT test FROM dual INTO OUTFILE '/tmp/test' FORMAT CSV; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/into_outfile.yml000066400000000000000000000050011503426445100253360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 45a4958836a5cffe1ca01956cf45830ab661b3a01c060a2d89b6c7be6054badf file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' into_outfile_clause: - keyword: INTO - keyword: OUTFILE - quoted_literal: "'/tmp/test'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: as naked_identifier: test into_outfile_clause: - keyword: INTO - keyword: OUTFILE - quoted_literal: "'/tmp/test'" - format_clause: - keyword: FORMAT - keyword: TabSeparated - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: test from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual where_clause: keyword: where expression: column_reference: naked_identifier: test comparison_operator: raw_comparison_operator: '=' quoted_literal: "'1'" into_outfile_clause: - keyword: INTO - keyword: OUTFILE - quoted_literal: "'/tmp/test'" - format_clause: - keyword: FORMAT - keyword: TabSeparated - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: test from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual into_outfile_clause: - keyword: INTO - keyword: OUTFILE - quoted_literal: "'/tmp/test'" - format_clause: - keyword: FORMAT - keyword: CSV - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/join.sql000066400000000000000000000155171503426445100236100ustar00rootroot00000000000000-- no type join SELECT * FROM test1 ALL JOIN test2 ON test2.ty1=test1.ty1; SELECT * FROM test1 ANY JOIN test2 ON test2.ty1=test1.ty1; SELECT * FROM test1 JOIN test2 ON test2.ty1=test1.ty1; SELECT * FROM test1 GLOBAL ALL JOIN test2 ON test2.ty1=test1.ty1; SELECT * FROM test1 GLOBAL ANY JOIN test2 ON test2.ty1=test1.ty1; SELECT * FROM test1 GLOBAL JOIN test2 ON test2.ty1=test1.ty1; -- INNER join SELECT * FROM test1 INNER JOIN test2 ON test2.ty1=test1.ty1; SELECT * FROM test1 GLOBAL INNER JOIN test2 ON test2.ty1=test1.ty1; -- INNER join ... SELECT * FROM test1 INNER ALL JOIN test2 ON test2.ty1=test1.ty1; SELECT * FROM test1 INNER ANY JOIN test2 ON test2.ty1=test1.ty1; SELECT * FROM test1 GLOBAL INNER ALL JOIN test2 ON test2.ty1=test1.ty1; SELECT * FROM test1 GLOBAL INNER ANY JOIN test2 ON test2.ty1=test1.ty1; -- ... INNER join SELECT * FROM test1 ALL INNER JOIN test2 ON test2.ty1=test1.ty1; SELECT * FROM test1 ANY INNER JOIN test2 ON test2.ty1=test1.ty1; SELECT * FROM test1 GLOBAL ALL INNER JOIN test2 ON test2.ty1=test1.ty1; SELECT * FROM test1 GLOBAL ANY INNER JOIN test2 ON test2.ty1=test1.ty1; -- LEFT JOIN SELECT * FROM test1 LEFT JOIN test2 ON test2.ty1=test1.ty1; SELECT * FROM test1 GLOBAL LEFT JOIN test2 ON test2.ty1=test1.ty1; -- LEFT join ... SELECT tbl1.id FROM tbl1 LEFT ANTI join tbl2 on tbl1.id = tbl2.id; SELECT * FROM test1 as t1 LEFT SEMI JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 LEFT ANY JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 LEFT ALL JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 LEFT ASOF JOIN test2 USING ty1,ty2; SELECT tbl1.id FROM tbl1 GLOBAL LEFT ANTI join tbl2 on tbl1.id = tbl2.id; SELECT * FROM test1 as t1 GLOBAL LEFT SEMI JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 GLOBAL LEFT ANY JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 GLOBAL LEFT ALL JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 GLOBAL LEFT ASOF JOIN test2 USING ty1,ty2; -- ... LEFT join select tbl1.id from tbl1 ANTI LEFT join tbl2 on tbl1.id = tbl2.id; SELECT * FROM test1 as t1 SEMI LEFT JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 ANY LEFT JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 ALL LEFT JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 ASOF LEFT JOIN test2 USING (ty1,ty2); select tbl1.id from tbl1 GLOBAL ANTI LEFT join tbl2 on tbl1.id = tbl2.id; SELECT * FROM test1 as t1 GLOBAL SEMI LEFT JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 GLOBAL ANY LEFT JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 GLOBAL ALL LEFT JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 GLOBAL ASOF LEFT JOIN test2 USING (ty1,ty2); -- LEFT join test case OUTER SELECT * FROM test1 as t1 LEFT OUTER JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 LEFT ASOF OUTER JOIN test2 USING ty1,ty2; SELECT tbl1.id FROM tbl1 LEFT ANTI OUTER join tbl2 on tbl1.id = tbl2.id; SELECT * FROM test1 as t1 LEFT SEMI OUTER JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 LEFT ANY OUTER JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 LEFT ALL OUTER JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 LEFT ASOF OUTER JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 GLOBAL LEFT OUTER JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 GLOBAL LEFT ASOF OUTER JOIN test2 USING ty1,ty2; SELECT tbl1.id FROM tbl1 GLOBAL LEFT ANTI OUTER join tbl2 on tbl1.id = tbl2.id; SELECT * FROM test1 as t1 GLOBAL LEFT SEMI OUTER JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 GLOBAL LEFT ANY OUTER JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 GLOBAL LEFT ALL OUTER JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 GLOBAL LEFT ASOF OUTER JOIN test2 USING ty1,ty2; -- RIGHT JOIN SELECT * FROM test1 RIGHT JOIN test2 ON test2.ty1=test1.ty1; SELECT * FROM test1 GLOBAL RIGHT JOIN test2 ON test2.ty1=test1.ty1; -- RIGHT join ... SELECT tbl1.id FROM tbl1 RIGHT ANTI join tbl2 on tbl1.id = tbl2.id; SELECT * FROM test1 as t1 RIGHT SEMI JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 RIGHT ANY JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 RIGHT ALL JOIN test2 USING ty1,ty2; SELECT tbl1.id FROM tbl1 GLOBAL RIGHT ANTI join tbl2 on tbl1.id = tbl2.id; SELECT * FROM test1 as t1 GLOBAL RIGHT SEMI JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 GLOBAL RIGHT ANY JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 GLOBAL RIGHT ALL JOIN test2 USING ty1,ty2; -- ... RIGHT join select tbl1.id from tbl1 ANTI RIGHT join tbl2 on tbl1.id = tbl2.id; SELECT * FROM test1 as t1 SEMI RIGHT JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 ANY RIGHT JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 ALL RIGHT JOIN test2 USING ty1,ty2; select tbl1.id from tbl1 GLOBAL ANTI RIGHT join tbl2 on tbl1.id = tbl2.id; SELECT * FROM test1 as t1 GLOBAL SEMI RIGHT JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 GLOBAL ANY RIGHT JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 GLOBAL ALL RIGHT JOIN test2 USING ty1,ty2; -- RIGHT join test case OUTER SELECT * FROM test1 as t1 RIGHT OUTER JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 RIGHT ANTI OUTER JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 RIGHT SEMI OUTER JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 RIGHT ANY OUTER JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 RIGHT ALL OUTER JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 GLOBAL RIGHT OUTER JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 GLOBAL RIGHT ANTI OUTER JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 GLOBAL RIGHT SEMI OUTER JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 GLOBAL RIGHT ANY OUTER JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 GLOBAL RIGHT ALL OUTER JOIN test2 USING ty1,ty2; -- ASOF join select tbl1.id from tbl1 ASOF JOIN tbl2 on tbl1.id = tbl2.id; select tbl1.id from tbl1 GLOBAL ASOF JOIN tbl2 on tbl1.id = tbl2.id; -- CROSS join SELECT * FROM test1 CROSS JOIN test2; SELECT * FROM test1 GLOBAL CROSS JOIN test2; -- FULL join SELECT * FROM test1 as t1 FULL ALL JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 FULL JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 FULL ALL OUTER JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 GLOBAL FULL ALL JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 GLOBAL FULL JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 GLOBAL FULL ALL OUTER JOIN test2 USING ty1,ty2; -- PASTE join SELECT * FROM table1 t1 PASTE JOIN table2 t2; SELECT * FROM table1 t1 GLOBAL PASTE JOIN table2 t2; -- ARRAY join SELECT col FROM (SELECT arr FROM test1) AS t2 ARRAY JOIN arr AS col; SELECT col FROM (SELECT [1, 2] AS arr) AS t1 LEFT ARRAY JOIN arr AS col; SELECT * FROM (SELECT [1, 2] AS arr) AS t1 ARRAY JOIN arr; SELECT * FROM (SELECT [1, 2] AS arr) AS t1 LEFT ARRAY JOIN arr; SELECT * FROM (SELECT [1, 2] AS arr, [3, 4] AS arr2) AS t1 ARRAY JOIN arr, arr2; SELECT x, y FROM (SELECT [1, 2] AS arr, [3, 4] AS arr2) AS t1 ARRAY JOIN arr AS x, arr2 AS y; SELECT *,ch,cg FROM (SELECT 1) ARRAY JOIN ['1','2'] as cg, splitByChar(',','1,2') as ch; SELECT * FROM (SELECT [1,2] x) AS t1 ARRAY JOIN t1.*; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/join.yml000066400000000000000000003154331503426445100236120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 82a0d04e0efa48e7c7ab723e92edbf36134c8df2a274c2481deed302c8a8a1f4 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 join_clause: - keyword: ALL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: test2 - dot: . - naked_identifier: ty1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: test1 - dot: . - naked_identifier: ty1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 join_clause: - keyword: ANY - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: test2 - dot: . - naked_identifier: ty1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: test1 - dot: . - naked_identifier: ty1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: test2 join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: test2 - dot: . - naked_identifier: ty1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: test1 - dot: . - naked_identifier: ty1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 join_clause: - keyword: GLOBAL - keyword: ALL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: test2 - dot: . - naked_identifier: ty1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: test1 - dot: . - naked_identifier: ty1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 join_clause: - keyword: GLOBAL - keyword: ANY - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: test2 - dot: . - naked_identifier: ty1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: test1 - dot: . - naked_identifier: ty1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: naked_identifier: GLOBAL join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: test2 join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: test2 - dot: . - naked_identifier: ty1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: test1 - dot: . - naked_identifier: ty1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: test2 - dot: . - naked_identifier: ty1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: test1 - dot: . - naked_identifier: ty1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 join_clause: - keyword: GLOBAL - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: test2 - dot: . - naked_identifier: ty1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: test1 - dot: . - naked_identifier: ty1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 join_clause: - keyword: INNER - keyword: ALL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: test2 - dot: . - naked_identifier: ty1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: test1 - dot: . - naked_identifier: ty1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 join_clause: - keyword: INNER - keyword: ANY - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: test2 - dot: . - naked_identifier: ty1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: test1 - dot: . - naked_identifier: ty1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 join_clause: - keyword: GLOBAL - keyword: INNER - keyword: ALL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: test2 - dot: . - naked_identifier: ty1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: test1 - dot: . - naked_identifier: ty1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 join_clause: - keyword: GLOBAL - keyword: INNER - keyword: ANY - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: test2 - dot: . - naked_identifier: ty1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: test1 - dot: . - naked_identifier: ty1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 join_clause: - keyword: ALL - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: test2 - dot: . - naked_identifier: ty1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: test1 - dot: . - naked_identifier: ty1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 join_clause: - keyword: ANY - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: test2 - dot: . - naked_identifier: ty1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: test1 - dot: . - naked_identifier: ty1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 join_clause: - keyword: GLOBAL - keyword: ALL - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: test2 - dot: . - naked_identifier: ty1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: test1 - dot: . - naked_identifier: ty1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 join_clause: - keyword: GLOBAL - keyword: ANY - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: test2 - dot: . - naked_identifier: ty1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: test1 - dot: . - naked_identifier: ty1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: test2 - dot: . - naked_identifier: ty1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: test1 - dot: . - naked_identifier: ty1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 join_clause: - keyword: GLOBAL - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: test2 - dot: . - naked_identifier: ty1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: test1 - dot: . - naked_identifier: ty1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 join_clause: - keyword: LEFT - keyword: ANTI - keyword: join - from_expression_element: table_expression: table_reference: naked_identifier: tbl2 - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: LEFT - keyword: SEMI - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: LEFT - keyword: ANY - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: LEFT - keyword: ALL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: LEFT - keyword: ASOF - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 join_clause: - keyword: GLOBAL - keyword: LEFT - keyword: ANTI - keyword: join - from_expression_element: table_expression: table_reference: naked_identifier: tbl2 - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: GLOBAL - keyword: LEFT - keyword: SEMI - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: GLOBAL - keyword: LEFT - keyword: ANY - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: GLOBAL - keyword: LEFT - keyword: ALL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: GLOBAL - keyword: LEFT - keyword: ASOF - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 join_clause: - keyword: ANTI - keyword: LEFT - keyword: join - from_expression_element: table_expression: table_reference: naked_identifier: tbl2 - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: SEMI - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: ANY - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: ALL - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: ASOF - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - bracketed: - start_bracket: ( - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 join_clause: - keyword: GLOBAL - keyword: ANTI - keyword: LEFT - keyword: join - from_expression_element: table_expression: table_reference: naked_identifier: tbl2 - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: GLOBAL - keyword: SEMI - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: GLOBAL - keyword: ANY - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: GLOBAL - keyword: ALL - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: GLOBAL - keyword: ASOF - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - bracketed: - start_bracket: ( - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: LEFT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: LEFT - keyword: ASOF - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 join_clause: - keyword: LEFT - keyword: ANTI - keyword: OUTER - keyword: join - from_expression_element: table_expression: table_reference: naked_identifier: tbl2 - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: LEFT - keyword: SEMI - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: LEFT - keyword: ANY - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: LEFT - keyword: ALL - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: LEFT - keyword: ASOF - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: GLOBAL - keyword: LEFT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: GLOBAL - keyword: LEFT - keyword: ASOF - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 join_clause: - keyword: GLOBAL - keyword: LEFT - keyword: ANTI - keyword: OUTER - keyword: join - from_expression_element: table_expression: table_reference: naked_identifier: tbl2 - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: GLOBAL - keyword: LEFT - keyword: SEMI - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: GLOBAL - keyword: LEFT - keyword: ANY - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: GLOBAL - keyword: LEFT - keyword: ALL - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: GLOBAL - keyword: LEFT - keyword: ASOF - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 join_clause: - keyword: RIGHT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: test2 - dot: . - naked_identifier: ty1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: test1 - dot: . - naked_identifier: ty1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 join_clause: - keyword: GLOBAL - keyword: RIGHT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: test2 - dot: . - naked_identifier: ty1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: test1 - dot: . - naked_identifier: ty1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 join_clause: - keyword: RIGHT - keyword: ANTI - keyword: join - from_expression_element: table_expression: table_reference: naked_identifier: tbl2 - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: RIGHT - keyword: SEMI - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: RIGHT - keyword: ANY - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: RIGHT - keyword: ALL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 join_clause: - keyword: GLOBAL - keyword: RIGHT - keyword: ANTI - keyword: join - from_expression_element: table_expression: table_reference: naked_identifier: tbl2 - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: GLOBAL - keyword: RIGHT - keyword: SEMI - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: GLOBAL - keyword: RIGHT - keyword: ANY - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: GLOBAL - keyword: RIGHT - keyword: ALL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 join_clause: - keyword: ANTI - keyword: RIGHT - keyword: join - from_expression_element: table_expression: table_reference: naked_identifier: tbl2 - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: SEMI - keyword: RIGHT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: ANY - keyword: RIGHT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: ALL - keyword: RIGHT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 join_clause: - keyword: GLOBAL - keyword: ANTI - keyword: RIGHT - keyword: join - from_expression_element: table_expression: table_reference: naked_identifier: tbl2 - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: GLOBAL - keyword: SEMI - keyword: RIGHT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: GLOBAL - keyword: ANY - keyword: RIGHT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: GLOBAL - keyword: ALL - keyword: RIGHT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: RIGHT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: RIGHT - keyword: ANTI - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: RIGHT - keyword: SEMI - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: RIGHT - keyword: ANY - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: RIGHT - keyword: ALL - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: GLOBAL - keyword: RIGHT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: GLOBAL - keyword: RIGHT - keyword: ANTI - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: GLOBAL - keyword: RIGHT - keyword: SEMI - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: GLOBAL - keyword: RIGHT - keyword: ANY - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: GLOBAL - keyword: RIGHT - keyword: ALL - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 join_clause: - keyword: ASOF - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: tbl2 - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 join_clause: - keyword: GLOBAL - keyword: ASOF - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: tbl2 - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 join_clause: - keyword: CROSS - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 join_clause: - keyword: GLOBAL - keyword: CROSS - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: FULL - keyword: ALL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: FULL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: FULL - keyword: ALL - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: GLOBAL - keyword: FULL - keyword: ALL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: GLOBAL - keyword: FULL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: alias_operator: keyword: as naked_identifier: t1 join_clause: - keyword: GLOBAL - keyword: FULL - keyword: ALL - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 alias_expression: naked_identifier: t1 join_clause: - keyword: PASTE - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 alias_expression: naked_identifier: t2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 alias_expression: naked_identifier: t1 join_clause: - keyword: GLOBAL - keyword: PASTE - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 alias_expression: naked_identifier: t2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: arr from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: t2 array_join_clause: - keyword: ARRAY - keyword: JOIN - select_clause_element: column_reference: naked_identifier: arr alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_square_bracket: ']' alias_expression: alias_operator: keyword: AS naked_identifier: arr end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: t1 array_join_clause: - keyword: LEFT - keyword: ARRAY - keyword: JOIN - select_clause_element: column_reference: naked_identifier: arr alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_square_bracket: ']' alias_expression: alias_operator: keyword: AS naked_identifier: arr end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: t1 array_join_clause: - keyword: ARRAY - keyword: JOIN - select_clause_element: column_reference: naked_identifier: arr - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_square_bracket: ']' alias_expression: alias_operator: keyword: AS naked_identifier: arr end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: t1 array_join_clause: - keyword: LEFT - keyword: ARRAY - keyword: JOIN - select_clause_element: column_reference: naked_identifier: arr - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_square_bracket: ']' alias_expression: alias_operator: keyword: AS naked_identifier: arr - comma: ',' - select_clause_element: array_literal: - start_square_bracket: '[' - numeric_literal: '3' - comma: ',' - numeric_literal: '4' - end_square_bracket: ']' alias_expression: alias_operator: keyword: AS naked_identifier: arr2 end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: t1 array_join_clause: - keyword: ARRAY - keyword: JOIN - select_clause_element: column_reference: naked_identifier: arr - comma: ',' - select_clause_element: column_reference: naked_identifier: arr2 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: x - comma: ',' - select_clause_element: column_reference: naked_identifier: y from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_square_bracket: ']' alias_expression: alias_operator: keyword: AS naked_identifier: arr - comma: ',' - select_clause_element: array_literal: - start_square_bracket: '[' - numeric_literal: '3' - comma: ',' - numeric_literal: '4' - end_square_bracket: ']' alias_expression: alias_operator: keyword: AS naked_identifier: arr2 end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: t1 array_join_clause: - keyword: ARRAY - keyword: JOIN - select_clause_element: column_reference: naked_identifier: arr alias_expression: alias_operator: keyword: AS naked_identifier: x - comma: ',' - select_clause_element: column_reference: naked_identifier: arr2 alias_expression: alias_operator: keyword: AS naked_identifier: y - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: wildcard_expression: wildcard_identifier: star: '*' - comma: ',' - select_clause_element: column_reference: naked_identifier: ch - comma: ',' - select_clause_element: column_reference: naked_identifier: cg from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' end_bracket: ) array_join_clause: - keyword: ARRAY - keyword: JOIN - select_clause_element: array_literal: - start_square_bracket: '[' - quoted_literal: "'1'" - comma: ',' - quoted_literal: "'2'" - end_square_bracket: ']' alias_expression: alias_operator: keyword: as naked_identifier: cg - comma: ',' - select_clause_element: function: function_name: function_name_identifier: splitByChar function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "','" - comma: ',' - expression: quoted_literal: "'1,2'" - end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: ch - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_square_bracket: ']' alias_expression: naked_identifier: x end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: t1 array_join_clause: - keyword: ARRAY - keyword: JOIN - select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: t1 dot: . star: '*' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/lambda_function.sql000066400000000000000000000001551503426445100257660ustar00rootroot00000000000000SELECT arrayFirst(x -> x = 2, [1, 1, 2, 2]); SELECT arrayFirst(x, y -> x != y, [1, 1, 2, 2], [1, 2, 2, 3]); sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/lambda_function.yml000066400000000000000000000063421503426445100257740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 859c848698e25c9d220669ee133319623f0884a1498022dae05ddbf58ef63282 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: arrayFirst function_contents: bracketed: - start_bracket: ( - expression: - column_reference: naked_identifier: x - lambda: -> - column_reference: naked_identifier: x - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '2' - comma: ',' - expression: array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '2' - end_square_bracket: ']' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: arrayFirst function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: x - comma: ',' - expression: - column_reference: naked_identifier: y - lambda: -> - column_reference: naked_identifier: x - comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '=' - column_reference: naked_identifier: y - comma: ',' - expression: array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '2' - end_square_bracket: ']' - comma: ',' - expression: array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - end_square_bracket: ']' - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/limit_by.sql000066400000000000000000000005321503426445100244500ustar00rootroot00000000000000with toto as (SELECT 1 as id, 'test' as name) SELECT * FROM toto LIMIT 1, 2 by id; with toto as (SELECT 1 as id, 'test' as name) SELECT * FROM toto LIMIT 1 by id; with toto as (SELECT 1 as id, 'test' as name) SELECT * FROM toto LIMIT 2 OFFSET 1 by id; with toto as (SELECT 1 as id, 'test' as name) SELECT * FROM toto LIMIT 2, 1 by (id, name); sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/limit_by.yml000066400000000000000000000144161503426445100244600ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bfb1a1991e553e932bdb4d70cc5871ef1bec789eb542a3da37739661335c4c54 file: - statement: with_compound_statement: keyword: with common_table_expression: naked_identifier: toto keyword: as bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: as naked_identifier: id - comma: ',' - select_clause_element: quoted_literal: "'test'" alias_expression: alias_operator: keyword: as naked_identifier: name end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: toto limit_clause: - keyword: LIMIT - limit_clause_component: numeric_literal: '1' - comma: ',' - limit_clause_component: numeric_literal: '2' - keyword: by - column_reference: naked_identifier: id - statement_terminator: ; - statement: with_compound_statement: keyword: with common_table_expression: naked_identifier: toto keyword: as bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: as naked_identifier: id - comma: ',' - select_clause_element: quoted_literal: "'test'" alias_expression: alias_operator: keyword: as naked_identifier: name end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: toto limit_clause: - keyword: LIMIT - limit_clause_component: numeric_literal: '1' - keyword: by - column_reference: naked_identifier: id - statement_terminator: ; - statement: with_compound_statement: keyword: with common_table_expression: naked_identifier: toto keyword: as bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: as naked_identifier: id - comma: ',' - select_clause_element: quoted_literal: "'test'" alias_expression: alias_operator: keyword: as naked_identifier: name end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: toto limit_clause: - keyword: LIMIT - limit_clause_component: numeric_literal: '2' - keyword: OFFSET - limit_clause_component: numeric_literal: '1' - keyword: by - column_reference: naked_identifier: id - statement_terminator: ; - statement: with_compound_statement: keyword: with common_table_expression: naked_identifier: toto keyword: as bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: as naked_identifier: id - comma: ',' - select_clause_element: quoted_literal: "'test'" alias_expression: alias_operator: keyword: as naked_identifier: name end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: toto limit_clause: - keyword: LIMIT - limit_clause_component: numeric_literal: '2' - comma: ',' - limit_clause_component: numeric_literal: '1' - keyword: by - bracketed: - start_bracket: ( - column_reference: naked_identifier: id - comma: ',' - column_reference: naked_identifier: name - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/limit_delimited.sql000066400000000000000000000004761503426445100260050ustar00rootroot00000000000000with toto as (SELECT 1 as id, 'test' as name) SELECT * FROM toto LIMIT 1; with toto as (SELECT 1 as id, 'test' as name) SELECT * FROM toto LIMIT 1 OFFSET 1; with toto as (SELECT 1 as id, 'test' as name) SELECT * FROM toto LIMIT 1, 2; with toto as (SELECT 1 as id, 'test' as name) SELECT * FROM toto LIMIT (1), (2); sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/limit_delimited.yml000066400000000000000000000137011503426445100260020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 45dbfb269056af77b8331d6db67750b4c92d67167e337b229ec7f4aaa510d594 file: - statement: with_compound_statement: keyword: with common_table_expression: naked_identifier: toto keyword: as bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: as naked_identifier: id - comma: ',' - select_clause_element: quoted_literal: "'test'" alias_expression: alias_operator: keyword: as naked_identifier: name end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: toto limit_clause: keyword: LIMIT limit_clause_component: numeric_literal: '1' - statement_terminator: ; - statement: with_compound_statement: keyword: with common_table_expression: naked_identifier: toto keyword: as bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: as naked_identifier: id - comma: ',' - select_clause_element: quoted_literal: "'test'" alias_expression: alias_operator: keyword: as naked_identifier: name end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: toto limit_clause: - keyword: LIMIT - limit_clause_component: numeric_literal: '1' - keyword: OFFSET - limit_clause_component: numeric_literal: '1' - statement_terminator: ; - statement: with_compound_statement: keyword: with common_table_expression: naked_identifier: toto keyword: as bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: as naked_identifier: id - comma: ',' - select_clause_element: quoted_literal: "'test'" alias_expression: alias_operator: keyword: as naked_identifier: name end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: toto limit_clause: - keyword: LIMIT - limit_clause_component: numeric_literal: '1' - comma: ',' - limit_clause_component: numeric_literal: '2' - statement_terminator: ; - statement: with_compound_statement: keyword: with common_table_expression: naked_identifier: toto keyword: as bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: as naked_identifier: id - comma: ',' - select_clause_element: quoted_literal: "'test'" alias_expression: alias_operator: keyword: as naked_identifier: name end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: toto limit_clause: - keyword: LIMIT - limit_clause_component: bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) - comma: ',' - limit_clause_component: bracketed: start_bracket: ( numeric_literal: '2' end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/match_support.sql000066400000000000000000000004161503426445100255310ustar00rootroot00000000000000SELECT match('test', '^[0-9]+$')::bool; WITH test as (SELECT '1' as id) SELECT case when match(id, '^[0-9]*$') then id::Nullable(Float64) end as value_as_number FROM test; with toto as (SELECT '1' as id) SELECT * FROM toto WHERE match(id, '^[0-9]+$'); sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/match_support.yml000066400000000000000000000114121503426445100255310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f1a686c6d7d41af4ec7a97ff9a1422408935c3857c3ca63adbb564bfdaf51b96 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: function: function_name: function_name_identifier: match function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'test'" - comma: ',' - expression: quoted_literal: "'^[0-9]+$'" - end_bracket: ) casting_operator: '::' data_type: data_type_identifier: bool - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: test keyword: as bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'1'" alias_expression: alias_operator: keyword: as naked_identifier: id end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: expression: case_expression: - keyword: case - when_clause: - keyword: when - expression: function: function_name: function_name_identifier: match function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: id - comma: ',' - expression: quoted_literal: "'^[0-9]*$'" - end_bracket: ) - keyword: then - expression: cast_expression: column_reference: naked_identifier: id casting_operator: '::' data_type: data_type_identifier: Nullable bracketed: start_bracket: ( data_type: data_type_identifier: Float64 end_bracket: ) - keyword: end alias_expression: alias_operator: keyword: as naked_identifier: value_as_number from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test - statement_terminator: ; - statement: with_compound_statement: keyword: with common_table_expression: naked_identifier: toto keyword: as bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'1'" alias_expression: alias_operator: keyword: as naked_identifier: id end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: toto where_clause: keyword: WHERE expression: function: function_name: function_name_identifier: match function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: id - comma: ',' - expression: quoted_literal: "'^[0-9]+$'" - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/order_by_expression.sql000066400000000000000000000005521503426445100267260ustar00rootroot00000000000000CREATE TABLE foodb.events ON CLUSTER '{cluster}' ( timestamp DateTime, mt_id UInt32, event VARCHAR, uuid UUID, value Int32 ) ENGINE = ReplicatedMergeTree('/clickhouse/{cluster}/databases/{database}/all/tables/{table}', '{replica}') ORDER BY (mt_id, toStartOfDay(timestamp), event, uuid) SETTINGS index_granularity = 8192; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/order_by_expression.yml000066400000000000000000000057711503426445100267400ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9b3f3e39ec5a0917196f8f5f483661fc81529313c3f87aafbce80af2322ef625 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: foodb - dot: . - naked_identifier: events - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - quoted_identifier: "'{cluster}'" - bracketed: - start_bracket: ( - column_definition: naked_identifier: timestamp data_type: data_type_identifier: DateTime - comma: ',' - column_definition: naked_identifier: mt_id data_type: data_type_identifier: UInt32 - comma: ',' - column_definition: naked_identifier: event data_type: data_type_identifier: VARCHAR - comma: ',' - column_definition: naked_identifier: uuid data_type: data_type_identifier: UUID - comma: ',' - column_definition: naked_identifier: value data_type: data_type_identifier: Int32 - end_bracket: ) - engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' table_engine_function: function_name: function_name_identifier: ReplicatedMergeTree function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'/clickhouse/{cluster}/databases/{database}/all/tables/{table}'" - comma: ',' - expression: quoted_literal: "'{replica}'" - end_bracket: ) merge_tree_order_by_clause: - keyword: ORDER - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: mt_id - comma: ',' - expression: function: function_name: function_name_identifier: toStartOfDay function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: timestamp end_bracket: ) - comma: ',' - column_reference: naked_identifier: event - comma: ',' - column_reference: naked_identifier: uuid - end_bracket: ) settings_clause: keyword: SETTINGS naked_identifier: index_granularity comparison_operator: raw_comparison_operator: '=' numeric_literal: '8192' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/order_by_with_fill.sql000066400000000000000000000014221503426445100265050ustar00rootroot00000000000000SELECT n, source FROM ( SELECT toFloat32(number % 10) AS n, 'original' AS source FROM numbers(10) WHERE number % 3 = 1 ) ORDER BY n WITH FILL FROM 0 TO 5.51 STEP 0.5; SELECT toDate((number * 10) * 86400) AS d1, toDate(number * 86400) AS d2, 'original' AS source FROM numbers(10) WHERE (number % 3) = 1 ORDER BY d2 WITH FILL, d1 WITH FILL STEP 5; SELECT toDate((number * 10) * 86400) AS d1, toDate(number * 86400) AS d2, 'original' AS source FROM numbers(10) WHERE (number % 3) = 1 ORDER BY d1 WITH FILL STEP 5, d2 WITH FILL; SELECT toDate((number * 10) * 86400) AS d1, toDate(number * 86400) AS d2, 'original' AS source FROM numbers(10) WHERE (number % 3) = 1 ORDER BY d1 WITH FILL STEP INTERVAL 1 DAY, d2 WITH FILL; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/order_by_with_fill.yml000066400000000000000000000300521503426445100265100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 34db944830e216a9c733d3e985c275d08bc4f4e966543cf57dc34be9b3fc4215 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: n - comma: ',' - select_clause_element: column_reference: naked_identifier: source from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: toFloat32 function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: number binary_operator: '%' numeric_literal: '10' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: n - comma: ',' - select_clause_element: quoted_literal: "'original'" alias_expression: alias_operator: keyword: AS naked_identifier: source from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: numbers function_contents: bracketed: start_bracket: ( expression: numeric_literal: '10' end_bracket: ) where_clause: keyword: WHERE expression: - column_reference: naked_identifier: number - binary_operator: '%' - numeric_literal: '3' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: n - with_fill: - keyword: WITH - keyword: FILL - keyword: FROM - expression: numeric_literal: '0' - keyword: TO - expression: numeric_literal: '5.51' - keyword: STEP - numeric_literal: '0.5' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: toDate function_contents: bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: number binary_operator: '*' numeric_literal: '10' end_bracket: ) binary_operator: '*' numeric_literal: '86400' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: d1 - comma: ',' - select_clause_element: function: function_name: function_name_identifier: toDate function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: number binary_operator: '*' numeric_literal: '86400' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: d2 - comma: ',' - select_clause_element: quoted_literal: "'original'" alias_expression: alias_operator: keyword: AS naked_identifier: source from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: numbers function_contents: bracketed: start_bracket: ( expression: numeric_literal: '10' end_bracket: ) where_clause: keyword: WHERE expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: number binary_operator: '%' numeric_literal: '3' end_bracket: ) comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: d2 - with_fill: - keyword: WITH - keyword: FILL - comma: ',' - column_reference: naked_identifier: d1 - with_fill: - keyword: WITH - keyword: FILL - keyword: STEP - numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: toDate function_contents: bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: number binary_operator: '*' numeric_literal: '10' end_bracket: ) binary_operator: '*' numeric_literal: '86400' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: d1 - comma: ',' - select_clause_element: function: function_name: function_name_identifier: toDate function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: number binary_operator: '*' numeric_literal: '86400' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: d2 - comma: ',' - select_clause_element: quoted_literal: "'original'" alias_expression: alias_operator: keyword: AS naked_identifier: source from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: numbers function_contents: bracketed: start_bracket: ( expression: numeric_literal: '10' end_bracket: ) where_clause: keyword: WHERE expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: number binary_operator: '%' numeric_literal: '3' end_bracket: ) comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: d1 - with_fill: - keyword: WITH - keyword: FILL - keyword: STEP - numeric_literal: '5' - comma: ',' - column_reference: naked_identifier: d2 - with_fill: - keyword: WITH - keyword: FILL - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: toDate function_contents: bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: number binary_operator: '*' numeric_literal: '10' end_bracket: ) binary_operator: '*' numeric_literal: '86400' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: d1 - comma: ',' - select_clause_element: function: function_name: function_name_identifier: toDate function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: number binary_operator: '*' numeric_literal: '86400' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: d2 - comma: ',' - select_clause_element: quoted_literal: "'original'" alias_expression: alias_operator: keyword: AS naked_identifier: source from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: numbers function_contents: bracketed: start_bracket: ( expression: numeric_literal: '10' end_bracket: ) where_clause: keyword: WHERE expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: number binary_operator: '%' numeric_literal: '3' end_bracket: ) comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: d1 - with_fill: - keyword: WITH - keyword: FILL - keyword: STEP - interval_expression: keyword: INTERVAL numeric_literal: '1' date_part: DAY - comma: ',' - column_reference: naked_identifier: d2 - with_fill: - keyword: WITH - keyword: FILL - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/prewhere.sql000066400000000000000000000005221503426445100244600ustar00rootroot00000000000000SELECT * FROM table as t PREWHERE event_date = 1; SELECT * FROM table as t PREWHERE event_date = 1 ORDER BY t1; SELECT * FROM table as t PREWHERE c1 = 1 AND c2 = 2 ORDER BY c1; SELECT * FROM table as t PREWHERE c1 = 1 AND c2 = 2 WHERE c3 = 1 ORDER BY c1; SELECT * FROM table as t PREWHERE c1 = 1 AND c2 = 2 GROUP BY c1 ORDER BY c1; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/prewhere.yml000066400000000000000000000131561503426445100244710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2b1daecfc7f7e1b68f441bcaede8c6e723b285165cab40e2bd7788dc4d9cc36d file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table alias_expression: alias_operator: keyword: as naked_identifier: t prewhere_clause: keyword: PREWHERE expression: column_reference: naked_identifier: event_date comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table alias_expression: alias_operator: keyword: as naked_identifier: t prewhere_clause: keyword: PREWHERE expression: column_reference: naked_identifier: event_date comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table alias_expression: alias_operator: keyword: as naked_identifier: t prewhere_clause: keyword: PREWHERE expression: - column_reference: naked_identifier: c1 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - binary_operator: AND - column_reference: naked_identifier: c2 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '2' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: c1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table alias_expression: alias_operator: keyword: as naked_identifier: t prewhere_clause: keyword: PREWHERE expression: - column_reference: naked_identifier: c1 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - binary_operator: AND - column_reference: naked_identifier: c2 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '2' where_clause: keyword: WHERE expression: column_reference: naked_identifier: c3 comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: c1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table alias_expression: alias_operator: keyword: as naked_identifier: t prewhere_clause: keyword: PREWHERE expression: - column_reference: naked_identifier: c1 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - binary_operator: AND - column_reference: naked_identifier: c2 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '2' groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: c1 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: c1 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/rename.sql000066400000000000000000000005701503426445100241110ustar00rootroot00000000000000RENAME TABLE my_db.my_table TO my_db.my_new_table; RENAME DATABASE my_db TO other_db; RENAME DATABASE my_db TO other_db, my_db2 TO other_pg; RENAME DICTIONARY dict_A TO dict_B; RENAME DICTIONARY dict_A TO dict_B, dict_A TO dict_B; RENAME DICTIONARY db0.dict_A TO db1.dict_B; RENAME TABLE my_db.my_table TO my_db.my_new_table, my_table2 TO my_new_table2 ON CLUSTER toto; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/rename.yml000066400000000000000000000056351503426445100241220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 32cb3df2680363456d330117023e944d7a69e5ce1abf31d133390c920da09fe6 file: - statement: rename_table_statement: - keyword: RENAME - keyword: TABLE - table_reference: - naked_identifier: my_db - dot: . - naked_identifier: my_table - keyword: TO - table_reference: - naked_identifier: my_db - dot: . - naked_identifier: my_new_table - statement_terminator: ; - statement: rename_table_statement: - keyword: RENAME - keyword: DATABASE - database_reference: naked_identifier: my_db - keyword: TO - database_reference: naked_identifier: other_db - statement_terminator: ; - statement: rename_table_statement: - keyword: RENAME - keyword: DATABASE - database_reference: naked_identifier: my_db - keyword: TO - database_reference: naked_identifier: other_db - comma: ',' - database_reference: naked_identifier: my_db2 - keyword: TO - database_reference: naked_identifier: other_pg - statement_terminator: ; - statement: rename_table_statement: - keyword: RENAME - keyword: DICTIONARY - object_reference: naked_identifier: dict_A - keyword: TO - object_reference: naked_identifier: dict_B - statement_terminator: ; - statement: rename_table_statement: - keyword: RENAME - keyword: DICTIONARY - object_reference: naked_identifier: dict_A - keyword: TO - object_reference: naked_identifier: dict_B - comma: ',' - object_reference: naked_identifier: dict_A - keyword: TO - object_reference: naked_identifier: dict_B - statement_terminator: ; - statement: rename_table_statement: - keyword: RENAME - keyword: DICTIONARY - object_reference: - naked_identifier: db0 - dot: . - naked_identifier: dict_A - keyword: TO - object_reference: - naked_identifier: db1 - dot: . - naked_identifier: dict_B - statement_terminator: ; - statement: rename_table_statement: - keyword: RENAME - keyword: TABLE - table_reference: - naked_identifier: my_db - dot: . - naked_identifier: my_table - keyword: TO - table_reference: - naked_identifier: my_db - dot: . - naked_identifier: my_new_table - comma: ',' - table_reference: naked_identifier: my_table2 - keyword: TO - table_reference: naked_identifier: my_new_table2 - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: toto - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/select_distinct_on.sql000066400000000000000000000002761503426445100265210ustar00rootroot00000000000000SELECT DISTINCT ON (a,b) * FROM t1; SELECT DISTINCT ON (a,b) * FROM t1 ORDER BY b ASC; -- Distinct on clause can contain expressions SELECT DISTINCT ON (a = b, c) * FROM t1 ORDER BY b ASC; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/select_distinct_on.yml000066400000000000000000000062021503426445100265160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 35ce57bce74263c26149e6c293b62fc8ee6fd6f95eedd5132fd8ce0030f885e1 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_modifier: - keyword: DISTINCT - keyword: 'ON' - bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: a - comma: ',' - expression: column_reference: naked_identifier: b - end_bracket: ) select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_modifier: - keyword: DISTINCT - keyword: 'ON' - bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: a - comma: ',' - expression: column_reference: naked_identifier: b - end_bracket: ) select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: b - keyword: ASC - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_modifier: - keyword: DISTINCT - keyword: 'ON' - bracketed: - start_bracket: ( - expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b - comma: ',' - expression: column_reference: naked_identifier: c - end_bracket: ) select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: b - keyword: ASC - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/select_except.sql000066400000000000000000000001001503426445100254560ustar00rootroot00000000000000SELECT * EXCEPT (c1) from t1; SELECT * EXCEPT (c1, c2) from t1; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/select_except.yml000066400000000000000000000032261503426445100254740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ac4e56119ee233d9046cfd776655e3f259a469303a6d656776f6c3c03b47bb84 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_except_clause: keyword: EXCEPT bracketed: start_bracket: ( naked_identifier: c1 end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_except_clause: keyword: EXCEPT bracketed: - start_bracket: ( - naked_identifier: c1 - comma: ',' - naked_identifier: c2 - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/select_with_settings.sql000066400000000000000000000003541503426445100270740ustar00rootroot00000000000000SELECT * FROM test1 SETTINGS allow_experimental_window_functions = 1; SELECT * FROM test1 WHERE a = '' SETTINGS allow_experimental_window_functions = 1; SELECT * FROM test1 ORDER BY 2 SETTINGS allow_experimental_window_functions = 1; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/select_with_settings.yml000066400000000000000000000050731503426445100271010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fd0194bb75416c2dc6cf21f447c69bdbb703e9fd4ca6529b9243044c14a4a54c file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 settings_clause: keyword: SETTINGS naked_identifier: allow_experimental_window_functions comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 where_clause: keyword: WHERE expression: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: '=' quoted_literal: "''" settings_clause: keyword: SETTINGS naked_identifier: allow_experimental_window_functions comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 orderby_clause: - keyword: ORDER - keyword: BY - numeric_literal: '2' settings_clause: keyword: SETTINGS naked_identifier: allow_experimental_window_functions comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/system_statement.sql000066400000000000000000000104161503426445100262520ustar00rootroot00000000000000-- RELOAD DICTIONARY SELECT name, status FROM system.dictionaries; -- RELOAD MODELS SYSTEM RELOAD MODELS; SYSTEM RELOAD MODELS ON CLUSTER cluster_name; -- RELOAD MODEL SYSTEM RELOAD MODEL /model/path; SYSTEM RELOAD MODEL ON CLUSTER cluster_name /model/path; -- DROP REPLICA SYSTEM DROP REPLICA 'replica_name' FROM TABLE table; SYSTEM DROP REPLICA 'replica_name' FROM TABLE database.table; SYSTEM DROP REPLICA 'replica_name' FROM DATABASE database; SYSTEM DROP REPLICA 'replica_name'; SYSTEM DROP REPLICA 'replica_name' FROM ZKPATH '/path/to/table/in/zk'; -- Managing Distributed Tables -- -- STOP DISTRIBUTED SENDS SYSTEM STOP DISTRIBUTED SENDS distributed_table_name; SYSTEM STOP DISTRIBUTED SENDS db.distributed_table_name; -- -- FLUSH DISTRIBUTED SYSTEM FLUSH DISTRIBUTED distributed_table_name; SYSTEM FLUSH DISTRIBUTED db.distributed_table_name; -- -- START DISTRIBUTED SENDS SYSTEM START DISTRIBUTED SENDS distributed_table_name; SYSTEM START DISTRIBUTED SENDS db.distributed_table_name; -- Managing MergeTree Tables -- -- STOP MERGES SYSTEM STOP MERGES ON VOLUME volume_name; SYSTEM STOP MERGES merge_tree_family_table_name; SYSTEM STOP MERGES db.merge_tree_family_table_name; -- -- START MERGES SYSTEM START MERGES ON VOLUME volume_name; SYSTEM START MERGES merge_tree_family_table_name; SYSTEM START MERGES db.merge_tree_family_table_name; -- -- STOP TTL MERGES SYSTEM STOP TTL MERGES; SYSTEM STOP TTL MERGES db.merge_tree_family_table_name; SYSTEM STOP TTL MERGES merge_tree_family_table_name; -- -- START TTL MERGES SYSTEM START TTL MERGES; SYSTEM START TTL MERGES merge_tree_family_table_name; SYSTEM START TTL MERGES db.merge_tree_family_table_name; -- -- STOP MOVES SYSTEM STOP MOVES; SYSTEM STOP MOVES merge_tree_family_table_name; SYSTEM STOP MOVES db.merge_tree_family_table_name; -- -- START MOVES SYSTEM START MOVES; SYSTEM START MOVES merge_tree_family_table_name; SYSTEM START MOVES db.merge_tree_family_table_name; -- -- SYSTEM UNFREEZE SYSTEM UNFREEZE WITH NAME backup_name; -- Managing ReplicatedMergeTree Tables -- -- STOP FETCHES SYSTEM STOP FETCHES; SYSTEM STOP FETCHES replicated_merge_tree_family_table_name; SYSTEM STOP FETCHES db.replicated_merge_tree_family_table_name; -- -- START FETCHES SYSTEM START FETCHES; SYSTEM START FETCHES replicated_merge_tree_family_table_name; SYSTEM START FETCHES db.replicated_merge_tree_family_table_name; -- -- STOP REPLICATED SENDS SYSTEM STOP REPLICATED SENDS; SYSTEM STOP REPLICATED SENDS replicated_merge_tree_family_table_name; SYSTEM STOP REPLICATED SENDS db.replicated_merge_tree_family_table_name; -- -- START REPLICATED SENDS SYSTEM START REPLICATED SENDS; SYSTEM START REPLICATED SENDS replicated_merge_tree_family_table_name; SYSTEM START REPLICATED SENDS db.replicated_merge_tree_family_table_name; -- -- STOP REPLICATION QUEUES SYSTEM STOP REPLICATION QUEUES; SYSTEM STOP REPLICATION QUEUES replicated_merge_tree_family_table_name; SYSTEM STOP REPLICATION QUEUES db.replicated_merge_tree_family_table_name; -- -- START REPLICATION QUEUES SYSTEM START REPLICATION QUEUES; SYSTEM START REPLICATION QUEUES replicated_merge_tree_family_table_name; SYSTEM START REPLICATION QUEUES db.replicated_merge_tree_family_table_name; -- -- SYNC REPLICA SYSTEM SYNC REPLICA replicated_merge_tree_family_table_name; SYSTEM SYNC REPLICA db.replicated_merge_tree_family_table_name; SYSTEM SYNC REPLICA replicated_merge_tree_family_table_name STRICT; SYSTEM SYNC REPLICA ON CLUSTER cluster_name replicated_merge_tree_family_table_name; SYSTEM SYNC REPLICA ON CLUSTER cluster_name replicated_merge_tree_family_table_name STRICT; SYSTEM SYNC REPLICA ON CLUSTER cluster_name db.replicated_merge_tree_family_table_name; SYSTEM SYNC REPLICA ON CLUSTER cluster_name db.replicated_merge_tree_family_table_name STRICT; -- -- RESTART REPLICA SYSTEM RESTART REPLICA replicated_merge_tree_family_table_name; SYSTEM RESTART REPLICA db.replicated_merge_tree_family_table_name; -- -- RESTORE REPLICA SYSTEM RESTORE REPLICA replicated_merge_tree_family_table_name; SYSTEM RESTORE REPLICA db.replicated_merge_tree_family_table_name; SYSTEM RESTORE REPLICA replicated_merge_tree_family_table_name ON CLUSTER cluster_name; SYSTEM RESTORE REPLICA db.replicated_merge_tree_family_table_name ON CLUSTER cluster_name; -- -- DROP FILESYSTEM CACHE SYSTEM DROP FILESYSTEM CACHE; -- -- SYNC FILE CACHE SYSTEM SYNC FILE CACHE; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/system_statement.yml000066400000000000000000000457731503426445100262720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e253811b3c4090c78004856bbcc4ffce77899ea084e7fcd9a659c50f2657d856 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: status from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: system - dot: . - naked_identifier: dictionaries - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_model_segment: - keyword: RELOAD - keyword: MODELS - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_model_segment: - keyword: RELOAD - keyword: MODELS - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_model_segment: - keyword: RELOAD - keyword: MODEL - path_segment: - slash: / - path_segment: model - slash: / - path_segment: path - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_model_segment: - keyword: RELOAD - keyword: MODEL - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster_name - path_segment: - slash: / - path_segment: model - slash: / - path_segment: path - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replica_segment: - keyword: DROP - keyword: REPLICA - quoted_identifier: "'replica_name'" - keyword: FROM - keyword: TABLE - table_reference: naked_identifier: table - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replica_segment: - keyword: DROP - keyword: REPLICA - quoted_identifier: "'replica_name'" - keyword: FROM - keyword: TABLE - table_reference: - naked_identifier: database - dot: . - naked_identifier: table - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replica_segment: - keyword: DROP - keyword: REPLICA - quoted_identifier: "'replica_name'" - keyword: FROM - keyword: DATABASE - object_reference: naked_identifier: database - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replica_segment: - keyword: DROP - keyword: REPLICA - quoted_identifier: "'replica_name'" - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replica_segment: - keyword: DROP - keyword: REPLICA - quoted_identifier: "'replica_name'" - keyword: FROM - keyword: ZKPATH - path_segment: quoted_literal: "'/path/to/table/in/zk'" - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_distributed_segment: - keyword: STOP - keyword: DISTRIBUTED - keyword: SENDS - table_reference: naked_identifier: distributed_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_distributed_segment: - keyword: STOP - keyword: DISTRIBUTED - keyword: SENDS - table_reference: - naked_identifier: db - dot: . - naked_identifier: distributed_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_distributed_segment: - keyword: FLUSH - keyword: DISTRIBUTED - table_reference: naked_identifier: distributed_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_distributed_segment: - keyword: FLUSH - keyword: DISTRIBUTED - table_reference: - naked_identifier: db - dot: . - naked_identifier: distributed_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_distributed_segment: - keyword: START - keyword: DISTRIBUTED - keyword: SENDS - table_reference: naked_identifier: distributed_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_distributed_segment: - keyword: START - keyword: DISTRIBUTED - keyword: SENDS - table_reference: - naked_identifier: db - dot: . - naked_identifier: distributed_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_merges_segment: - keyword: STOP - keyword: MERGES - keyword: 'ON' - keyword: VOLUME - object_reference: naked_identifier: volume_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_merges_segment: - keyword: STOP - keyword: MERGES - table_reference: naked_identifier: merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_merges_segment: - keyword: STOP - keyword: MERGES - table_reference: - naked_identifier: db - dot: . - naked_identifier: merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_merges_segment: - keyword: START - keyword: MERGES - keyword: 'ON' - keyword: VOLUME - object_reference: naked_identifier: volume_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_merges_segment: - keyword: START - keyword: MERGES - table_reference: naked_identifier: merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_merges_segment: - keyword: START - keyword: MERGES - table_reference: - naked_identifier: db - dot: . - naked_identifier: merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_ttl_merges_segment: - keyword: STOP - keyword: TTL - keyword: MERGES - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_ttl_merges_segment: - keyword: STOP - keyword: TTL - keyword: MERGES - table_reference: - naked_identifier: db - dot: . - naked_identifier: merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_ttl_merges_segment: - keyword: STOP - keyword: TTL - keyword: MERGES - table_reference: naked_identifier: merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_ttl_merges_segment: - keyword: START - keyword: TTL - keyword: MERGES - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_ttl_merges_segment: - keyword: START - keyword: TTL - keyword: MERGES - table_reference: naked_identifier: merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_ttl_merges_segment: - keyword: START - keyword: TTL - keyword: MERGES - table_reference: - naked_identifier: db - dot: . - naked_identifier: merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_moves_segment: - keyword: STOP - keyword: MOVES - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_moves_segment: - keyword: STOP - keyword: MOVES - table_reference: naked_identifier: merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_moves_segment: - keyword: STOP - keyword: MOVES - table_reference: - naked_identifier: db - dot: . - naked_identifier: merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_moves_segment: - keyword: START - keyword: MOVES - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_moves_segment: - keyword: START - keyword: MOVES - table_reference: naked_identifier: merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_moves_segment: - keyword: START - keyword: MOVES - table_reference: - naked_identifier: db - dot: . - naked_identifier: merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_unfreeze_segment: - keyword: UNFREEZE - keyword: WITH - keyword: NAME - object_reference: naked_identifier: backup_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_fetches_segment: - keyword: STOP - keyword: FETCHES - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_fetches_segment: - keyword: STOP - keyword: FETCHES - table_reference: naked_identifier: replicated_merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_fetches_segment: - keyword: STOP - keyword: FETCHES - table_reference: - naked_identifier: db - dot: . - naked_identifier: replicated_merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_fetches_segment: - keyword: START - keyword: FETCHES - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_fetches_segment: - keyword: START - keyword: FETCHES - table_reference: naked_identifier: replicated_merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_fetches_segment: - keyword: START - keyword: FETCHES - table_reference: - naked_identifier: db - dot: . - naked_identifier: replicated_merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replicated_segment: - keyword: STOP - keyword: REPLICATED - keyword: SENDS - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replicated_segment: - keyword: STOP - keyword: REPLICATED - keyword: SENDS - table_reference: naked_identifier: replicated_merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replicated_segment: - keyword: STOP - keyword: REPLICATED - keyword: SENDS - table_reference: - naked_identifier: db - dot: . - naked_identifier: replicated_merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replicated_segment: - keyword: START - keyword: REPLICATED - keyword: SENDS - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replicated_segment: - keyword: START - keyword: REPLICATED - keyword: SENDS - table_reference: naked_identifier: replicated_merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replicated_segment: - keyword: START - keyword: REPLICATED - keyword: SENDS - table_reference: - naked_identifier: db - dot: . - naked_identifier: replicated_merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replication_segment: - keyword: STOP - keyword: REPLICATION - keyword: QUEUES - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replication_segment: - keyword: STOP - keyword: REPLICATION - keyword: QUEUES - table_reference: naked_identifier: replicated_merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replication_segment: - keyword: STOP - keyword: REPLICATION - keyword: QUEUES - table_reference: - naked_identifier: db - dot: . - naked_identifier: replicated_merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replication_segment: - keyword: START - keyword: REPLICATION - keyword: QUEUES - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replication_segment: - keyword: START - keyword: REPLICATION - keyword: QUEUES - table_reference: naked_identifier: replicated_merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replication_segment: - keyword: START - keyword: REPLICATION - keyword: QUEUES - table_reference: - naked_identifier: db - dot: . - naked_identifier: replicated_merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replica_segment: - keyword: SYNC - keyword: REPLICA - table_reference: naked_identifier: replicated_merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replica_segment: - keyword: SYNC - keyword: REPLICA - table_reference: - naked_identifier: db - dot: . - naked_identifier: replicated_merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replica_segment: - keyword: SYNC - keyword: REPLICA - table_reference: naked_identifier: replicated_merge_tree_family_table_name - keyword: STRICT - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replica_segment: - keyword: SYNC - keyword: REPLICA - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster_name - table_reference: naked_identifier: replicated_merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replica_segment: - keyword: SYNC - keyword: REPLICA - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster_name - table_reference: naked_identifier: replicated_merge_tree_family_table_name - keyword: STRICT - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replica_segment: - keyword: SYNC - keyword: REPLICA - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster_name - table_reference: - naked_identifier: db - dot: . - naked_identifier: replicated_merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replica_segment: - keyword: SYNC - keyword: REPLICA - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster_name - table_reference: - naked_identifier: db - dot: . - naked_identifier: replicated_merge_tree_family_table_name - keyword: STRICT - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replica_segment: - keyword: RESTART - keyword: REPLICA - table_reference: naked_identifier: replicated_merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replica_segment: - keyword: RESTART - keyword: REPLICA - table_reference: - naked_identifier: db - dot: . - naked_identifier: replicated_merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replica_segment: - keyword: RESTORE - keyword: REPLICA - table_reference: naked_identifier: replicated_merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replica_segment: - keyword: RESTORE - keyword: REPLICA - table_reference: - naked_identifier: db - dot: . - naked_identifier: replicated_merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replica_segment: - keyword: RESTORE - keyword: REPLICA - table_reference: naked_identifier: replicated_merge_tree_family_table_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replica_segment: - keyword: RESTORE - keyword: REPLICA - table_reference: - naked_identifier: db - dot: . - naked_identifier: replicated_merge_tree_family_table_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_filesystem_segment: - keyword: DROP - keyword: FILESYSTEM - keyword: CACHE - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_file_segment: - keyword: SYNC - keyword: FILE - keyword: CACHE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/tuple_datatype.sql000066400000000000000000000004361503426445100256670ustar00rootroot00000000000000SELECT (1, 'two', '2024-01-01')::Tuple(id Int64, name String, created_at Date32) as struct; SELECT (1, 'two', '2024-01-01')::Tuple(`id` Int64, `name` String, `created_at` Date32) as struct; SELECT (1, 'two', '2024-01-01')::`Tuple(id Int64, name String, created_at Date32)` as struct; sqlfluff-3.4.2/test/fixtures/dialects/clickhouse/tuple_datatype.yml000066400000000000000000000066761503426445100257050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 96ba28b45c02cd4ae856cbf38fdec62fa5d503e11da64e4f9cbc3042513950ea file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - quoted_literal: "'two'" - comma: ',' - quoted_literal: "'2024-01-01'" - end_bracket: ) casting_operator: '::' data_type: data_type_identifier: Tuple bracketed: - start_bracket: ( - naked_identifier: id - data_type: data_type_identifier: Int64 - comma: ',' - naked_identifier: name - data_type: data_type_identifier: String - comma: ',' - naked_identifier: created_at - data_type: data_type_identifier: Date32 - end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: struct - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - quoted_literal: "'two'" - comma: ',' - quoted_literal: "'2024-01-01'" - end_bracket: ) casting_operator: '::' data_type: data_type_identifier: Tuple bracketed: - start_bracket: ( - quoted_identifier: '`id`' - data_type: data_type_identifier: Int64 - comma: ',' - quoted_identifier: '`name`' - data_type: data_type_identifier: String - comma: ',' - quoted_identifier: '`created_at`' - data_type: data_type_identifier: Date32 - end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: struct - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - quoted_literal: "'two'" - comma: ',' - quoted_literal: "'2024-01-01'" - end_bracket: ) casting_operator: '::' data_type: quoted_identifier: '`Tuple(id Int64, name String, created_at Date32)`' alias_expression: alias_operator: keyword: as naked_identifier: struct - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/databricks/000077500000000000000000000000001503426445100220755ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/dialects/databricks/.sqlfluff000066400000000000000000000000401503426445100237120ustar00rootroot00000000000000[sqlfluff] dialect = databricks sqlfluff-3.4.2/test/fixtures/dialects/databricks/alter_catalog.sql000066400000000000000000000013551503426445100254230ustar00rootroot00000000000000-- Transfer ownership of the catalog to another user ALTER CATALOG some_cat OWNER TO `alf@melmak.et`; ALTER CATALOG some_cat OWNER TO my_group; -- SET is allowed as an optional keyword ALTER CATALOG some_cat SET OWNER TO `alf@melmak.et`; ALTER CATALOG some_cat SET OWNER TO my_group; -- Set and unset catalog tags ALTER CATALOG some_cat SET TAGS ('tag1'='value1'); ALTER CATALOG some_cat SET TAGS ('tag2'='value2', 'tag3'='value3'); ALTER CATALOG some_cat UNSET TAGS ('tag1'); ALTER CATALOG some_cat UNSET TAGS ('tag2', 'tag3'); -- Enable/Inherit/Disable Predictive Optimization ALTER CATALOG some_cat ENABLE PREDICTIVE OPTIMIZATION; ALTER CATALOG some_cat INHERIT PREDICTIVE OPTIMIZATION; ALTER CATALOG some_cat DISABLE PREDICTIVE OPTIMIZATION; sqlfluff-3.4.2/test/fixtures/dialects/databricks/alter_catalog.yml000066400000000000000000000077351503426445100254350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4bd715e4e734bf4969378de3398d7a34160dfda870decaf7d4fd8a28b0837d16 file: - statement: alter_catalog_statement: - keyword: ALTER - keyword: CATALOG - catalog_reference: naked_identifier: some_cat - keyword: OWNER - keyword: TO - quoted_identifier: '`alf@melmak.et`' - statement_terminator: ; - statement: alter_catalog_statement: - keyword: ALTER - keyword: CATALOG - catalog_reference: naked_identifier: some_cat - keyword: OWNER - keyword: TO - naked_identifier: my_group - statement_terminator: ; - statement: alter_catalog_statement: - keyword: ALTER - keyword: CATALOG - catalog_reference: naked_identifier: some_cat - keyword: SET - keyword: OWNER - keyword: TO - quoted_identifier: '`alf@melmak.et`' - statement_terminator: ; - statement: alter_catalog_statement: - keyword: ALTER - keyword: CATALOG - catalog_reference: naked_identifier: some_cat - keyword: SET - keyword: OWNER - keyword: TO - naked_identifier: my_group - statement_terminator: ; - statement: alter_catalog_statement: - keyword: ALTER - keyword: CATALOG - catalog_reference: naked_identifier: some_cat - keyword: SET - keyword: TAGS - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'tag1'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'value1'" end_bracket: ) - statement_terminator: ; - statement: alter_catalog_statement: - keyword: ALTER - keyword: CATALOG - catalog_reference: naked_identifier: some_cat - keyword: SET - keyword: TAGS - bracketed: - start_bracket: ( - property_name_identifier: quoted_identifier: "'tag2'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value2'" - comma: ',' - property_name_identifier: quoted_identifier: "'tag3'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value3'" - end_bracket: ) - statement_terminator: ; - statement: alter_catalog_statement: - keyword: ALTER - keyword: CATALOG - catalog_reference: naked_identifier: some_cat - keyword: UNSET - keyword: TAGS - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'tag1'" end_bracket: ) - statement_terminator: ; - statement: alter_catalog_statement: - keyword: ALTER - keyword: CATALOG - catalog_reference: naked_identifier: some_cat - keyword: UNSET - keyword: TAGS - bracketed: - start_bracket: ( - property_name_identifier: quoted_identifier: "'tag2'" - comma: ',' - property_name_identifier: quoted_identifier: "'tag3'" - end_bracket: ) - statement_terminator: ; - statement: alter_catalog_statement: - keyword: ALTER - keyword: CATALOG - catalog_reference: naked_identifier: some_cat - keyword: ENABLE - keyword: PREDICTIVE - keyword: OPTIMIZATION - statement_terminator: ; - statement: alter_catalog_statement: - keyword: ALTER - keyword: CATALOG - catalog_reference: naked_identifier: some_cat - keyword: INHERIT - keyword: PREDICTIVE - keyword: OPTIMIZATION - statement_terminator: ; - statement: alter_catalog_statement: - keyword: ALTER - keyword: CATALOG - catalog_reference: naked_identifier: some_cat - keyword: DISABLE - keyword: PREDICTIVE - keyword: OPTIMIZATION - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/databricks/alter_database.sql000066400000000000000000000021171503426445100255520ustar00rootroot00000000000000-- Transfer ownership of the schema to another user ALTER SCHEMA some_cat OWNER TO `alf@melmak.et`; ALTER SCHEMA some_cat OWNER TO my_group; -- SET is allowed as an optional keyword ALTER SCHEMA some_cat SET OWNER TO `alf@melmak.et`; ALTER SCHEMA some_cat SET OWNER TO my_group; -- DATABASE IS ALLOWED INSTEAD OF SCHEMA ALTER DATABASE some_cat OWNER TO `alf@melmak.et`; ALTER DATABASE some_cat SET OWNER TO `alf@melmak.et`; ALTER DATABASE some_cat OWNER TO my_group; ALTER DATABASE some_cat SET OWNER TO my_group; -- Set and unset schema tags ALTER SCHEMA some_cat SET TAGS ('tag1'='value1'); ALTER DATABASE some_cat SET TAGS ('tag2'='value2', 'tag3'='value3'); ALTER DATABASE some_cat UNSET TAGS ('tag1'); ALTER SCHEMA some_cat UNSET TAGS ('tag2', 'tag3'); -- Enable/Inherit/Disable Predictive Optimization ALTER SCHEMA some_cat ENABLE PREDICTIVE OPTIMIZATION; ALTER DATABASE some_cat INHERIT PREDICTIVE OPTIMIZATION; ALTER SCHEMA some_cat DISABLE PREDICTIVE OPTIMIZATION; -- -- Add some schema properties ALTER SCHEMA some_cat SET DBPROPERTIES ('Edited-by'='John Doe', 'Edit-date'='2020-01-01'); sqlfluff-3.4.2/test/fixtures/dialects/databricks/alter_database.yml000066400000000000000000000132421503426445100255550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 03c610e315a211d3fed46036ea30a6f132e3e88825d7e64d89bbfa3386cb8b7c file: - statement: alter_database_statement: - keyword: ALTER - keyword: SCHEMA - database_reference: naked_identifier: some_cat - keyword: OWNER - keyword: TO - quoted_identifier: '`alf@melmak.et`' - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: SCHEMA - database_reference: naked_identifier: some_cat - keyword: OWNER - keyword: TO - naked_identifier: my_group - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: SCHEMA - database_reference: naked_identifier: some_cat - keyword: SET - keyword: OWNER - keyword: TO - quoted_identifier: '`alf@melmak.et`' - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: SCHEMA - database_reference: naked_identifier: some_cat - keyword: SET - keyword: OWNER - keyword: TO - naked_identifier: my_group - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: some_cat - keyword: OWNER - keyword: TO - quoted_identifier: '`alf@melmak.et`' - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: some_cat - keyword: SET - keyword: OWNER - keyword: TO - quoted_identifier: '`alf@melmak.et`' - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: some_cat - keyword: OWNER - keyword: TO - naked_identifier: my_group - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: some_cat - keyword: SET - keyword: OWNER - keyword: TO - naked_identifier: my_group - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: SCHEMA - database_reference: naked_identifier: some_cat - keyword: SET - keyword: TAGS - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'tag1'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'value1'" end_bracket: ) - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: some_cat - keyword: SET - keyword: TAGS - bracketed: - start_bracket: ( - property_name_identifier: quoted_identifier: "'tag2'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value2'" - comma: ',' - property_name_identifier: quoted_identifier: "'tag3'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value3'" - end_bracket: ) - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: some_cat - keyword: UNSET - keyword: TAGS - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'tag1'" end_bracket: ) - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: SCHEMA - database_reference: naked_identifier: some_cat - keyword: UNSET - keyword: TAGS - bracketed: - start_bracket: ( - property_name_identifier: quoted_identifier: "'tag2'" - comma: ',' - property_name_identifier: quoted_identifier: "'tag3'" - end_bracket: ) - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: SCHEMA - database_reference: naked_identifier: some_cat - keyword: ENABLE - keyword: PREDICTIVE - keyword: OPTIMIZATION - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: some_cat - keyword: INHERIT - keyword: PREDICTIVE - keyword: OPTIMIZATION - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: SCHEMA - database_reference: naked_identifier: some_cat - keyword: DISABLE - keyword: PREDICTIVE - keyword: OPTIMIZATION - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: SCHEMA - database_reference: naked_identifier: some_cat - keyword: SET - keyword: DBPROPERTIES - bracketed: - start_bracket: ( - property_name_identifier: quoted_identifier: "'Edited-by'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'John Doe'" - comma: ',' - property_name_identifier: quoted_identifier: "'Edit-date'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'2020-01-01'" - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/databricks/alter_table.sql000066400000000000000000000063031503426445100250760ustar00rootroot00000000000000-- ALTER TABLE examples from Databricks documentation -- https://docs.databricks.com/en/sql/language-manual/sql-ref-syntax-ddl-alter-table.html ALTER TABLE Student RENAME TO StudentInfo; ALTER TABLE default.StudentInfo PARTITION (age='10') RENAME TO PARTITION (age='15'); ALTER TABLE StudentInfo ADD columns (LastName string, DOB timestamp); ALTER TABLE StudentInfo DROP COLUMN (DOB); ALTER TABLE StudentInfo DROP COLUMNS IF EXISTS (LastName, DOB); ALTER TABLE StudentInfo ADD IF NOT EXISTS PARTITION (age=18); ALTER TABLE StudentInfo DROP IF EXISTS PARTITION (age=18); ALTER TABLE StudentInfo ADD IF NOT EXISTS PARTITION (age=18) PARTITION (age=20); ALTER TABLE StudentInfo RECOVER PARTITIONS; ALTER TABLE StudentInfo ALTER COLUMN name COMMENT "new comment"; ALTER TABLE StudentInfo RENAME COLUMN name TO FirstName; -- Change the file Location ALTER TABLE dbx.tab1 PARTITION (a='1', b='2') SET LOCATION '/path/to/part/ways'; -- SET SERDE/ SERDE Properties (DBR only) ALTER TABLE test_tab SET SERDE 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe'; ALTER TABLE dbx.tab1 SET SERDE 'org.apache.hadoop' WITH SERDEPROPERTIES ('k' = 'v', 'kay' = 'vee'); -- SET TABLE PROPERTIES ALTER TABLE dbx.tab1 SET TBLPROPERTIES ('winner' = 'loser'); -- DROP TABLE PROPERTIES ALTER TABLE dbx.tab1 UNSET TBLPROPERTIES ('winner'); -- Drop the "deletion vectors" from a Delta table ALTER TABLE my_table DROP FEATURE deletionVectors; -- 24 hours later ALTER TABLE my_table DROP FEATURE deletionVectors TRUNCATE HISTORY; -- Applies three tags to the table named `test`. ALTER TABLE test SET TAGS ('tag1' = 'val1', 'tag2' = 'val2', 'tag3' = 'val3'); -- Removes three tags from the table named `test`. ALTER TABLE test UNSET TAGS ('tag1', 'tag2', 'tag3'); -- Applies three tags to table `main.schema1.test` column `col1`. ALTER TABLE main.schema1.test ALTER COLUMN col1 SET TAGS ('tag1' = 'val1', 'tag2' = 'val2', 'tag3' = 'val3'); -- Removes three tags from table `main.schema1.test` column `col1`. ALTER TABLE main.schema1.test ALTER COLUMN col1 UNSET TAGS ('tag1', 'tag2', 'tag3'); -- Enables predictive optimization for my_table ALTER TABLE my_table ENABLE PREDICTIVE OPTIMIZATION; ALTER TABLE sales SET ROW FILTER us_filter ON (); ALTER TABLE sales SET ROW FILTER us_filter ON (region); ALTER TABLE sales DROP ROW FILTER; ALTER TABLE users ALTER COLUMN ssn SET MASK ssn_mask; ALTER TABLE users ALTER COLUMN ssn SET MASK ssn_mask USING COLUMNS (ssn_value); ALTER TABLE users ALTER COLUMN ssn DROP MASK; ALTER TABLE persons ADD CONSTRAINT persons_pk PRIMARY KEY(first_name, last_name); ALTER TABLE pets ADD CONSTRAINT pets_persons_fk FOREIGN KEY(owner_first_name, owner_last_name) REFERENCES persons NOT ENFORCED RELY; ALTER TABLE pets ADD CONSTRAINT pets_name_not_cute_chk CHECK (length(name) < 20); ALTER TABLE pets DROP CONSTRAINT pets_name_not_cute_chk; ALTER TABLE persons DROP CONSTRAINT persons_pk RESTRICT; ALTER TABLE pets DROP FOREIGN KEY IF EXISTS (owner_first_name, owner_last_name); ALTER TABLE persons DROP PRIMARY KEY CASCADE; ALTER TABLE rocks DROP COLUMN rock; ALTER TABLE rocks DROP COLUMN rock, loc; ALTER TABLE rocks DROP COLUMN IF EXISTS rock, loc; ALTER TABLE rocks DROP COLUMN IF EXISTS (rock, loc); sqlfluff-3.4.2/test/fixtures/dialects/databricks/alter_table.yml000066400000000000000000000460201503426445100251000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 24c2de61a1dd493f7fbc541af4a96afa39ed1e6475d8a6b76bac314c8349c9f4 file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: Student - keyword: RENAME - keyword: TO - table_reference: naked_identifier: StudentInfo - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: default - dot: . - naked_identifier: StudentInfo - keyword: PARTITION - bracketed: start_bracket: ( set_clause: column_reference: naked_identifier: age comparison_operator: raw_comparison_operator: '=' quoted_literal: "'10'" end_bracket: ) - keyword: RENAME - keyword: TO - keyword: PARTITION - bracketed: start_bracket: ( set_clause: column_reference: naked_identifier: age comparison_operator: raw_comparison_operator: '=' quoted_literal: "'15'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: StudentInfo - keyword: ADD - keyword: columns - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: LastName data_type: primitive_type: keyword: string - comma: ',' - column_definition: column_reference: naked_identifier: DOB data_type: primitive_type: keyword: timestamp - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: StudentInfo - keyword: DROP - keyword: COLUMN - bracketed: start_bracket: ( column_reference: naked_identifier: DOB end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: StudentInfo - keyword: DROP - keyword: COLUMNS - keyword: IF - keyword: EXISTS - bracketed: - start_bracket: ( - column_reference: naked_identifier: LastName - comma: ',' - column_reference: naked_identifier: DOB - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: StudentInfo - keyword: ADD - keyword: IF - keyword: NOT - keyword: EXISTS - keyword: PARTITION - bracketed: start_bracket: ( set_clause: column_reference: naked_identifier: age comparison_operator: raw_comparison_operator: '=' numeric_literal: '18' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: StudentInfo - keyword: DROP - keyword: IF - keyword: EXISTS - keyword: PARTITION - bracketed: start_bracket: ( set_clause: column_reference: naked_identifier: age comparison_operator: raw_comparison_operator: '=' numeric_literal: '18' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: StudentInfo - keyword: ADD - keyword: IF - keyword: NOT - keyword: EXISTS - keyword: PARTITION - bracketed: start_bracket: ( set_clause: column_reference: naked_identifier: age comparison_operator: raw_comparison_operator: '=' numeric_literal: '18' end_bracket: ) - keyword: PARTITION - bracketed: start_bracket: ( set_clause: column_reference: naked_identifier: age comparison_operator: raw_comparison_operator: '=' numeric_literal: '20' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: StudentInfo - keyword: RECOVER - keyword: PARTITIONS - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: StudentInfo - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: name - keyword: COMMENT - quoted_literal: '"new comment"' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: StudentInfo - keyword: RENAME - keyword: COLUMN - column_reference: naked_identifier: name - keyword: TO - column_reference: naked_identifier: FirstName - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: dbx - dot: . - naked_identifier: tab1 - keyword: PARTITION - bracketed: - start_bracket: ( - set_clause: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: '=' quoted_literal: "'1'" - comma: ',' - set_clause: column_reference: naked_identifier: b comparison_operator: raw_comparison_operator: '=' quoted_literal: "'2'" - end_bracket: ) - keyword: SET - keyword: LOCATION - quoted_literal: "'/path/to/part/ways'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: test_tab - keyword: SET - keyword: SERDE - quoted_literal: "'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: dbx - dot: . - naked_identifier: tab1 - keyword: SET - keyword: SERDE - quoted_literal: "'org.apache.hadoop'" - keyword: WITH - keyword: SERDEPROPERTIES - bracketed: - start_bracket: ( - property_name_identifier: quoted_identifier: "'k'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'v'" - comma: ',' - property_name_identifier: quoted_identifier: "'kay'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'vee'" - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: dbx - dot: . - naked_identifier: tab1 - keyword: SET - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'winner'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'loser'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: dbx - dot: . - naked_identifier: tab1 - keyword: UNSET - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'winner'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - keyword: DROP - keyword: FEATURE - object_reference: naked_identifier: deletionVectors - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - keyword: DROP - keyword: FEATURE - object_reference: naked_identifier: deletionVectors - keyword: TRUNCATE - keyword: HISTORY - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: test - keyword: SET - keyword: TAGS - bracketed: - start_bracket: ( - property_name_identifier: quoted_identifier: "'tag1'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'val1'" - comma: ',' - property_name_identifier: quoted_identifier: "'tag2'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'val2'" - comma: ',' - property_name_identifier: quoted_identifier: "'tag3'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'val3'" - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: test - keyword: UNSET - keyword: TAGS - bracketed: - start_bracket: ( - property_name_identifier: quoted_identifier: "'tag1'" - comma: ',' - property_name_identifier: quoted_identifier: "'tag2'" - comma: ',' - property_name_identifier: quoted_identifier: "'tag3'" - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: main - dot: . - naked_identifier: schema1 - dot: . - naked_identifier: test - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: col1 - keyword: SET - keyword: TAGS - bracketed: - start_bracket: ( - property_name_identifier: quoted_identifier: "'tag1'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'val1'" - comma: ',' - property_name_identifier: quoted_identifier: "'tag2'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'val2'" - comma: ',' - property_name_identifier: quoted_identifier: "'tag3'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'val3'" - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: main - dot: . - naked_identifier: schema1 - dot: . - naked_identifier: test - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: col1 - keyword: UNSET - keyword: TAGS - bracketed: - start_bracket: ( - property_name_identifier: quoted_identifier: "'tag1'" - comma: ',' - property_name_identifier: quoted_identifier: "'tag2'" - comma: ',' - property_name_identifier: quoted_identifier: "'tag3'" - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - keyword: ENABLE - keyword: PREDICTIVE - keyword: OPTIMIZATION - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: sales - keyword: SET - keyword: ROW - keyword: FILTER - object_reference: naked_identifier: us_filter - keyword: 'ON' - bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: sales - keyword: SET - keyword: ROW - keyword: FILTER - object_reference: naked_identifier: us_filter - keyword: 'ON' - bracketed: start_bracket: ( column_reference: naked_identifier: region end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: sales - keyword: DROP - keyword: ROW - keyword: FILTER - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: users - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: ssn - keyword: SET - mask_statement: keyword: MASK function_name: function_name_identifier: ssn_mask - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: users - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: ssn - keyword: SET - mask_statement: - keyword: MASK - function_name: function_name_identifier: ssn_mask - keyword: USING - keyword: COLUMNS - bracketed: start_bracket: ( column_reference: naked_identifier: ssn_value end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: users - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: ssn - keyword: DROP - keyword: MASK - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: persons - keyword: ADD - table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: persons_pk - keyword: PRIMARY - keyword: KEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: first_name - comma: ',' - column_reference: naked_identifier: last_name - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: pets - keyword: ADD - table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: pets_persons_fk - keyword: FOREIGN - keyword: KEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: owner_first_name - comma: ',' - column_reference: naked_identifier: owner_last_name - end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: persons - keyword: NOT - keyword: ENFORCED - keyword: RELY - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: pets - keyword: ADD - table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: pets_name_not_cute_chk - keyword: CHECK - bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: length function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: name end_bracket: ) comparison_operator: raw_comparison_operator: < numeric_literal: '20' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: pets - keyword: DROP - keyword: CONSTRAINT - object_reference: naked_identifier: pets_name_not_cute_chk - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: persons - keyword: DROP - keyword: CONSTRAINT - object_reference: naked_identifier: persons_pk - keyword: RESTRICT - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: pets - keyword: DROP - keyword: FOREIGN - keyword: KEY - keyword: IF - keyword: EXISTS - bracketed: - start_bracket: ( - column_reference: naked_identifier: owner_first_name - comma: ',' - column_reference: naked_identifier: owner_last_name - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: persons - keyword: DROP - keyword: PRIMARY - keyword: KEY - keyword: CASCADE - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: rocks - keyword: DROP - keyword: COLUMN - column_reference: naked_identifier: rock - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: rocks - keyword: DROP - keyword: COLUMN - column_reference: naked_identifier: rock - comma: ',' - column_reference: naked_identifier: loc - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: rocks - keyword: DROP - keyword: COLUMN - keyword: IF - keyword: EXISTS - column_reference: naked_identifier: rock - comma: ',' - column_reference: naked_identifier: loc - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: rocks - keyword: DROP - keyword: COLUMN - keyword: IF - keyword: EXISTS - bracketed: - start_bracket: ( - column_reference: naked_identifier: rock - comma: ',' - column_reference: naked_identifier: loc - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/databricks/alter_view.sql000066400000000000000000000021161503426445100247570ustar00rootroot00000000000000-- ALTER TABLE examples from Databricks documentation -- https://docs.databricks.com/en/sql/language-manual/sql-ref-syntax-ddl-alter-view.html ALTER VIEW tempsc1.v1 RENAME TO tempsc1.v2; ALTER VIEW IDENTIFIER('tempsc1.v1') RENAME TO IDENTIFIER('tempsc1.v2'); ALTER VIEW tempsc1.v2 SET TBLPROPERTIES ('created.by.user' = "John", 'created.date' = '01-01-2001' ); ALTER VIEW tempsc1.v2 UNSET TBLPROPERTIES (`created`.`by`.`user`, created.date); ALTER VIEW tempsc1.v2 AS SELECT * FROM tempsc1.v1; ALTER VIEW v1 OWNER TO `alf@melmak.et`; ALTER VIEW v1 SET OWNER TO `alf@melmak.et`; ALTER VIEW v1 WITH SCHEMA BINDING; ALTER VIEW v1 WITH SCHEMA COMPENSATION; ALTER VIEW v1 WITH SCHEMA TYPE EVOLUTION; ALTER VIEW v1 WITH SCHEMA EVOLUTION; ALTER MATERIALIZED VIEW my_mv ADD SCHEDULE CRON '0 0 0 * * ? *' AT TIME ZONE 'America/Los_Angeles'; ALTER MATERIALIZED VIEW my_mv ALTER SCHEDULE CRON '0 0/15 * * * ? *'; ALTER MATERIALIZED VIEW my_mv DROP SCHEDULE; ALTER VIEW test SET TAGS ('tag1' = 'val1', 'tag2' = 'val2', 'tag3' = 'val3'); ALTER VIEW test UNSET TAGS ('tag1', 'tag2', 'tag3'); sqlfluff-3.4.2/test/fixtures/dialects/databricks/alter_view.yml000066400000000000000000000157641503426445100247760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b85b9a49d3398df61dc2fd11c83f54c7af9cb301a6c55cc8db87a4328f39642d file: - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: - naked_identifier: tempsc1 - dot: . - naked_identifier: v1 - keyword: RENAME - keyword: TO - table_reference: - naked_identifier: tempsc1 - dot: . - naked_identifier: v2 - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: identifier_clause_segment: keyword: IDENTIFIER bracketed: start_bracket: ( expression: quoted_literal: "'tempsc1.v1'" end_bracket: ) - keyword: RENAME - keyword: TO - table_reference: identifier_clause_segment: keyword: IDENTIFIER bracketed: start_bracket: ( expression: quoted_literal: "'tempsc1.v2'" end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: - naked_identifier: tempsc1 - dot: . - naked_identifier: v2 - keyword: SET - keyword: TBLPROPERTIES - bracketed: - start_bracket: ( - property_name_identifier: quoted_identifier: "'created.by.user'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"John"' - comma: ',' - property_name_identifier: quoted_identifier: "'created.date'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'01-01-2001'" - end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: - naked_identifier: tempsc1 - dot: . - naked_identifier: v2 - keyword: UNSET - keyword: TBLPROPERTIES - bracketed: - start_bracket: ( - property_name_identifier: - properties_naked_identifier: '`created`' - dot: . - properties_naked_identifier: '`by`' - dot: . - properties_naked_identifier: '`user`' - comma: ',' - property_name_identifier: - properties_naked_identifier: created - dot: . - properties_naked_identifier: date - end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: - naked_identifier: tempsc1 - dot: . - naked_identifier: v2 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: tempsc1 - dot: . - naked_identifier: v1 - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: v1 - keyword: OWNER - keyword: TO - quoted_identifier: '`alf@melmak.et`' - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: v1 - keyword: SET - keyword: OWNER - keyword: TO - quoted_identifier: '`alf@melmak.et`' - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: v1 - keyword: WITH - keyword: SCHEMA - keyword: BINDING - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: v1 - keyword: WITH - keyword: SCHEMA - keyword: COMPENSATION - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: v1 - keyword: WITH - keyword: SCHEMA - keyword: TYPE - keyword: EVOLUTION - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: v1 - keyword: WITH - keyword: SCHEMA - keyword: EVOLUTION - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: my_mv - keyword: ADD - keyword: SCHEDULE - keyword: CRON - quoted_literal: "'0 0 0 * * ? *'" - keyword: AT - keyword: TIME - keyword: ZONE - quoted_literal: "'America/Los_Angeles'" - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: my_mv - keyword: ALTER - keyword: SCHEDULE - keyword: CRON - quoted_literal: "'0 0/15 * * * ? *'" - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: my_mv - keyword: DROP - keyword: SCHEDULE - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: test - keyword: SET - keyword: TAGS - bracketed: - start_bracket: ( - property_name_identifier: quoted_identifier: "'tag1'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'val1'" - comma: ',' - property_name_identifier: quoted_identifier: "'tag2'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'val2'" - comma: ',' - property_name_identifier: quoted_identifier: "'tag3'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'val3'" - end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: test - keyword: UNSET - keyword: TAGS - bracketed: - start_bracket: ( - property_name_identifier: quoted_identifier: "'tag1'" - comma: ',' - property_name_identifier: quoted_identifier: "'tag2'" - comma: ',' - property_name_identifier: quoted_identifier: "'tag3'" - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/databricks/alter_volume.sql000066400000000000000000000011131503426445100253100ustar00rootroot00000000000000-- Rename a volume ALTER VOLUME some_vol RENAME TO some_new_vol; -- Transfer ownership of the volume to another user ALTER VOLUME some_vol OWNER TO `alf@melmak.et`; ALTER VOLUME some_vol OWNER TO my_group; -- SET is allowed as an optional keyword ALTER VOLUME some_vol SET OWNER TO `alf@melmak.et`; ALTER VOLUME some_vol SET OWNER TO my_group; -- Set and unset volume tags ALTER VOLUME some_vol SET TAGS ('tag1'='value1'); ALTER VOLUME some_vol SET TAGS ('tag2'='value2', 'tag3'='value3'); ALTER VOLUME some_vol UNSET TAGS ('tag1'); ALTER VOLUME some_vol UNSET TAGS ('tag2', 'tag3'); sqlfluff-3.4.2/test/fixtures/dialects/databricks/alter_volume.yml000066400000000000000000000067531503426445100253310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 49236a4b75d34513f6e1314253a9ca6cd584bed2137dbee75afbe1555e0eebab file: - statement: alter_volume_statement: - keyword: ALTER - keyword: VOLUME - volume_reference: naked_identifier: some_vol - keyword: RENAME - keyword: TO - volume_reference: naked_identifier: some_new_vol - statement_terminator: ; - statement: alter_volume_statement: - keyword: ALTER - keyword: VOLUME - volume_reference: naked_identifier: some_vol - keyword: OWNER - keyword: TO - quoted_identifier: '`alf@melmak.et`' - statement_terminator: ; - statement: alter_volume_statement: - keyword: ALTER - keyword: VOLUME - volume_reference: naked_identifier: some_vol - keyword: OWNER - keyword: TO - naked_identifier: my_group - statement_terminator: ; - statement: alter_volume_statement: - keyword: ALTER - keyword: VOLUME - volume_reference: naked_identifier: some_vol - keyword: SET - keyword: OWNER - keyword: TO - quoted_identifier: '`alf@melmak.et`' - statement_terminator: ; - statement: alter_volume_statement: - keyword: ALTER - keyword: VOLUME - volume_reference: naked_identifier: some_vol - keyword: SET - keyword: OWNER - keyword: TO - naked_identifier: my_group - statement_terminator: ; - statement: alter_volume_statement: - keyword: ALTER - keyword: VOLUME - volume_reference: naked_identifier: some_vol - keyword: SET - keyword: TAGS - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'tag1'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'value1'" end_bracket: ) - statement_terminator: ; - statement: alter_volume_statement: - keyword: ALTER - keyword: VOLUME - volume_reference: naked_identifier: some_vol - keyword: SET - keyword: TAGS - bracketed: - start_bracket: ( - property_name_identifier: quoted_identifier: "'tag2'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value2'" - comma: ',' - property_name_identifier: quoted_identifier: "'tag3'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value3'" - end_bracket: ) - statement_terminator: ; - statement: alter_volume_statement: - keyword: ALTER - keyword: VOLUME - volume_reference: naked_identifier: some_vol - keyword: UNSET - keyword: TAGS - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'tag1'" end_bracket: ) - statement_terminator: ; - statement: alter_volume_statement: - keyword: ALTER - keyword: VOLUME - volume_reference: naked_identifier: some_vol - keyword: UNSET - keyword: TAGS - bracketed: - start_bracket: ( - property_name_identifier: quoted_identifier: "'tag2'" - comma: ',' - property_name_identifier: quoted_identifier: "'tag3'" - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/databricks/command_terminator.sql000066400000000000000000000002611503426445100264770ustar00rootroot00000000000000-- Databricks notebook source SELECT COL1 FROM TABLE1 -- COMMAND ---------- SELECT COL2 FROM TABLE2 -- COMMAND ---------- SELECT COL3 FROM TABLE3; SELECT COL4 FROM TABLE4; sqlfluff-3.4.2/test/fixtures/dialects/databricks/command_terminator.yml000066400000000000000000000040161503426445100265030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 29da11329feadea30d92cf0f13e7fd80c65ef79c00a12c73c8e3e0c6514d5167 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: COL1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: TABLE1 - statement_terminator: "\n\n-- COMMAND ----------\n" - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: COL2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: TABLE2 - statement_terminator: "\n\n-- COMMAND ----------\n" - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: COL3 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: TABLE3 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: COL4 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: TABLE4 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/databricks/comment_on.sql000066400000000000000000000010341503426445100247520ustar00rootroot00000000000000COMMENT ON CATALOG my_catalog IS 'This is my catalog'; COMMENT ON CONNECTION mysql_connection IS 'this is a mysql connection'; COMMENT ON SCHEMA my_schema IS 'This is my schema'; COMMENT ON DATABASE my_other_schema IS 'This is my other schema'; COMMENT ON TABLE my_table IS 'This is my table'; COMMENT ON TABLE my_table IS NULL; COMMENT ON SHARE my_share IS 'A good share'; COMMENT ON RECIPIENT my_recipient IS 'A good recipient'; COMMENT ON PROVIDER my_provider IS 'A good provider'; COMMENT ON VOLUME my_volume IS 'Huge volume'; sqlfluff-3.4.2/test/fixtures/dialects/databricks/comment_on.yml000066400000000000000000000054551503426445100247670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: dea3b1bd03043d9c632669f603ac0787f764a78efe1628003ee13b4440a1dfc0 file: - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: CATALOG - catalog_reference: naked_identifier: my_catalog - keyword: IS - quoted_literal: "'This is my catalog'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: CONNECTION - object_reference: naked_identifier: mysql_connection - keyword: IS - quoted_literal: "'this is a mysql connection'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: SCHEMA - database_reference: naked_identifier: my_schema - keyword: IS - quoted_literal: "'This is my schema'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: DATABASE - database_reference: naked_identifier: my_other_schema - keyword: IS - quoted_literal: "'This is my other schema'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: TABLE - table_reference: naked_identifier: my_table - keyword: IS - quoted_literal: "'This is my table'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: TABLE - table_reference: naked_identifier: my_table - keyword: IS - keyword: 'NULL' - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: SHARE - object_reference: naked_identifier: my_share - keyword: IS - quoted_literal: "'A good share'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: RECIPIENT - object_reference: naked_identifier: my_recipient - keyword: IS - quoted_literal: "'A good recipient'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: PROVIDER - object_reference: naked_identifier: my_provider - keyword: IS - quoted_literal: "'A good provider'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: VOLUME - volume_reference: naked_identifier: my_volume - keyword: IS - quoted_literal: "'Huge volume'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/databricks/create_catalog.sql000066400000000000000000000006631503426445100255600ustar00rootroot00000000000000-- Create catalog `customer_cat`. -- This throws exception if catalog with name customer_cat already exists. CREATE CATALOG customer_cat; -- Create catalog `customer_cat` only if catalog with same name doesn't exist. CREATE CATALOG IF NOT EXISTS customer_cat; -- Create catalog `customer_cat` only if catalog with same name doesn't exist, with a comment. CREATE CATALOG IF NOT EXISTS customer_cat COMMENT 'This is customer catalog'; sqlfluff-3.4.2/test/fixtures/dialects/databricks/create_catalog.yml000066400000000000000000000021401503426445100255520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 54e4939e42deb86808cd4a8d475b03ec3d20b8480db5777aba6d99db75fbaf07 file: - statement: create_catalog_statement: - keyword: CREATE - keyword: CATALOG - catalog_reference: naked_identifier: customer_cat - statement_terminator: ; - statement: create_catalog_statement: - keyword: CREATE - keyword: CATALOG - keyword: IF - keyword: NOT - keyword: EXISTS - catalog_reference: naked_identifier: customer_cat - statement_terminator: ; - statement: create_catalog_statement: - keyword: CREATE - keyword: CATALOG - keyword: IF - keyword: NOT - keyword: EXISTS - catalog_reference: naked_identifier: customer_cat - keyword: COMMENT - quoted_literal: "'This is customer catalog'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/databricks/create_database.sql000066400000000000000000000020411503426445100257020ustar00rootroot00000000000000-- Create database with all optional syntax CREATE DATABASE IF NOT EXISTS database_name COMMENT "database_comment" LOCATION "root/database_directory" WITH DBPROPERTIES ( "property_name" = "property_value"); -- Create schema with all optional syntax CREATE SCHEMA IF NOT EXISTS database_name COMMENT "database_comment" LOCATION "root/database_directory" WITH DBPROPERTIES ( "property_name" = "property_value" ); -- Create database `customer_db`. CREATE DATABASE customer_db; -- Create database `customer_db` only if database with same name doesn't exist. CREATE DATABASE IF NOT EXISTS customer_db; -- `Comments`,`Specific Location` and `Database properties`. CREATE DATABASE IF NOT EXISTS customer_db COMMENT 'This is customer database' LOCATION '/user' WITH DBPROPERTIES ("ID" = "001", "Name" = 'John'); -- Create `inventory_db` Database CREATE DATABASE inventory_db COMMENT 'This database is used to maintain Inventory'; -- Create schema with a managed location CREATE SCHEMA IF NOT EXISTS database_name MANAGED LOCATION "s3://root_database_bucket/" sqlfluff-3.4.2/test/fixtures/dialects/databricks/create_database.yml000066400000000000000000000070371503426445100257160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0df327a33f3826f3a0681833b5fe2f4cd1742d5776662034b789f7a129f47025 file: - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: database_name - keyword: COMMENT - quoted_literal: '"database_comment"' - keyword: LOCATION - quoted_literal: '"root/database_directory"' - keyword: WITH - keyword: DBPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: '"property_name"' comparison_operator: raw_comparison_operator: '=' quoted_literal: '"property_value"' end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: SCHEMA - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: database_name - keyword: COMMENT - quoted_literal: '"database_comment"' - keyword: LOCATION - quoted_literal: '"root/database_directory"' - keyword: WITH - keyword: DBPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: '"property_name"' comparison_operator: raw_comparison_operator: '=' quoted_literal: '"property_value"' end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: customer_db - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: customer_db - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: customer_db - keyword: COMMENT - quoted_literal: "'This is customer database'" - keyword: LOCATION - quoted_literal: "'/user'" - keyword: WITH - keyword: DBPROPERTIES - bracketed: - start_bracket: ( - property_name_identifier: quoted_identifier: '"ID"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"001"' - comma: ',' - property_name_identifier: quoted_identifier: '"Name"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'John'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: inventory_db - keyword: COMMENT - quoted_literal: "'This database is used to maintain Inventory'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: SCHEMA - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: database_name - keyword: MANAGED - keyword: LOCATION - quoted_literal: '"s3://root_database_bucket/"' sqlfluff-3.4.2/test/fixtures/dialects/databricks/create_function.sql000066400000000000000000000055271503426445100257770ustar00rootroot00000000000000-- Create FUNCTION with all optional syntax CREATE OR REPLACE TEMPORARY FUNCTION IF NOT EXISTS function_name AS "class_name" USING FILE "resource_locations"; -- Create a permanent function called `simple_udf`. CREATE FUNCTION simple_udf AS 'SimpleUdf' USING JAR '/tmp/SimpleUdf.jar'; -- Created a temporary function. CREATE TEMPORARY FUNCTION simple_temp_udf AS 'SimpleUdf' USING JAR '/tmp/SimpleUdf.jar'; -- Replace the implementation of `simple_udf` CREATE OR REPLACE FUNCTION simple_udf AS 'SimpleUdfR' USING JAR '/tmp/SimpleUdfR.jar'; -- Create a permanent function `test_avg` CREATE FUNCTION test_avg AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDAFAverage'; ---- Create Temporary function `test_avg` CREATE TEMPORARY FUNCTION test_avg AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDAFAverage'; -- Create a temporary function with no parameter CREATE TEMPORARY FUNCTION hello() RETURNS STRING RETURN 'Hello World!'; -- Create a temporary function with no parameter. CREATE OR REPLACE TEMPORARY FUNCTION function_name() RETURNS TIMESTAMP LANGUAGE SQL RETURN SELECT MAX(time) AS time FROM my_table; -- Create a permanent function with parameters CREATE FUNCTION area(x DOUBLE, y DOUBLE) RETURNS DOUBLE RETURN x * y; -- Compose SQL functions. CREATE FUNCTION square(x DOUBLE) RETURNS DOUBLE RETURN area(x, x); -- Create a CTE function CREATE FUNCTION cte_function(x INT) RETURNS string LANGUAGE SQL RETURN WITH cte AS (SELECT x AS y) SELECT * FROM cte; -- Create a non-deterministic function CREATE FUNCTION roll_dice() RETURNS INT NOT DETERMINISTIC CONTAINS SQL COMMENT 'Roll a single 6 sided die' RETURN (rand() * 6)::INT + 1; -- Create a non-deterministic function with parameters and defaults CREATE FUNCTION roll_dice(num_dice INT DEFAULT 1 COMMENT 'number of dice to roll (Default: 1)', num_sides INT DEFAULT 6 COMMENT 'number of sides per die (Default: 6)') RETURNS INT NOT DETERMINISTIC CONTAINS SQL COMMENT 'Roll a number of n-sided dice' RETURN aggregate(sequence(1, roll_dice.num_dice, 1), 0, (acc, x) -> (rand() * roll_dice.num_sides)::int, acc -> acc + roll_dice.num_dice); -- Create Python functions CREATE FUNCTION main.default.greet(s STRING) RETURNS STRING LANGUAGE PYTHON AS $$ def greet(name): return "Hello " + name + "!" return greet(s) if s else None $$; -- Created Table Valued Function simple CREATE FUNCTION return_table() RETURNS TABLE RETURN SELECT time FROM my_table ; -- Created Table Valued Function with column spec + comment CREATE FUNCTION return_table() RETURNS TABLE (col_a string, col_b string comment "asdf") RETURN SELECT col_a, col_b FROM my_table ; -- backticked identifier create or replace function `catalog`.`schema`.`name` ( param int ) returns int return select param ; sqlfluff-3.4.2/test/fixtures/dialects/databricks/create_function.yml000066400000000000000000000413551503426445100260000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4e9291c11141e27435b3e37ac80bfe6ac5714cc290dac9035d4859cf75fd1f96 file: - statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TEMPORARY - keyword: FUNCTION - keyword: IF - keyword: NOT - keyword: EXISTS - function_name_identifier: function_name - keyword: AS - quoted_literal: '"class_name"' - keyword: USING - file_keyword: FILE - quoted_literal: '"resource_locations"' - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name_identifier: simple_udf - keyword: AS - quoted_literal: "'SimpleUdf'" - keyword: USING - file_keyword: JAR - quoted_literal: "'/tmp/SimpleUdf.jar'" - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: FUNCTION - function_name_identifier: simple_temp_udf - keyword: AS - quoted_literal: "'SimpleUdf'" - keyword: USING - file_keyword: JAR - quoted_literal: "'/tmp/SimpleUdf.jar'" - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name_identifier: simple_udf - keyword: AS - quoted_literal: "'SimpleUdfR'" - keyword: USING - file_keyword: JAR - quoted_literal: "'/tmp/SimpleUdfR.jar'" - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name_identifier: test_avg - keyword: AS - quoted_literal: "'org.apache.hadoop.hive.ql.udf.generic.GenericUDAFAverage'" - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: FUNCTION - function_name_identifier: test_avg - keyword: AS - quoted_literal: "'org.apache.hadoop.hive.ql.udf.generic.GenericUDAFAverage'" - statement_terminator: ; - statement: create_sql_function_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: FUNCTION - function_name: function_name_identifier: hello - function_parameter_list_with_comments: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - data_type: primitive_type: keyword: STRING - function_definition: keyword: RETURN expression: quoted_literal: "'Hello World!'" - statement_terminator: ; - statement: create_sql_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TEMPORARY - keyword: FUNCTION - function_name: function_name_identifier: function_name - function_parameter_list_with_comments: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - data_type: primitive_type: keyword: TIMESTAMP - function_definition: - keyword: LANGUAGE - keyword: SQL - keyword: RETURN - expression: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: MAX function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: time end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: time from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: create_sql_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: area - function_parameter_list_with_comments: bracketed: - start_bracket: ( - parameter: x - data_type: primitive_type: keyword: DOUBLE - comma: ',' - parameter: y - data_type: primitive_type: keyword: DOUBLE - end_bracket: ) - keyword: RETURNS - data_type: primitive_type: keyword: DOUBLE - function_definition: keyword: RETURN expression: - column_reference: naked_identifier: x - binary_operator: '*' - column_reference: naked_identifier: y - statement_terminator: ; - statement: create_sql_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: square - function_parameter_list_with_comments: bracketed: start_bracket: ( parameter: x data_type: primitive_type: keyword: DOUBLE end_bracket: ) - keyword: RETURNS - data_type: primitive_type: keyword: DOUBLE - function_definition: keyword: RETURN expression: function: function_name: function_name_identifier: area function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: x - comma: ',' - expression: column_reference: naked_identifier: x - end_bracket: ) - statement_terminator: ; - statement: create_sql_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: cte_function - function_parameter_list_with_comments: bracketed: start_bracket: ( parameter: x data_type: primitive_type: keyword: INT end_bracket: ) - keyword: RETURNS - data_type: primitive_type: keyword: string - function_definition: - keyword: LANGUAGE - keyword: SQL - keyword: RETURN - with_compound_statement: keyword: WITH common_table_expression: naked_identifier: cte keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: x alias_expression: alias_operator: keyword: AS naked_identifier: y end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: cte - statement_terminator: ; - statement: create_sql_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: roll_dice - function_parameter_list_with_comments: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - data_type: primitive_type: keyword: INT - function_definition: - keyword: NOT - keyword: DETERMINISTIC - keyword: CONTAINS - keyword: SQL - comment_clause: keyword: COMMENT quoted_literal: "'Roll a single 6 sided die'" - keyword: RETURN - expression: cast_expression: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: rand function_contents: bracketed: start_bracket: ( end_bracket: ) binary_operator: '*' numeric_literal: '6' end_bracket: ) casting_operator: '::' data_type: primitive_type: keyword: INT binary_operator: + numeric_literal: '1' - statement_terminator: ; - statement: create_sql_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: roll_dice - function_parameter_list_with_comments: bracketed: - start_bracket: ( - parameter: num_dice - data_type: primitive_type: keyword: INT - keyword: DEFAULT - numeric_literal: '1' - comment_clause: keyword: COMMENT quoted_literal: "'number of dice to roll (Default: 1)'" - comma: ',' - parameter: num_sides - data_type: primitive_type: keyword: INT - keyword: DEFAULT - numeric_literal: '6' - comment_clause: keyword: COMMENT quoted_literal: "'number of sides per die (Default: 6)'" - end_bracket: ) - keyword: RETURNS - data_type: primitive_type: keyword: INT - function_definition: - keyword: NOT - keyword: DETERMINISTIC - keyword: CONTAINS - keyword: SQL - comment_clause: keyword: COMMENT quoted_literal: "'Roll a number of n-sided dice'" - keyword: RETURN - expression: function: function_name: function_name_identifier: aggregate function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: sequence function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: column_reference: - naked_identifier: roll_dice - dot: . - naked_identifier: num_dice - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) - comma: ',' - expression: numeric_literal: '0' - comma: ',' - expression: bracketed: - start_bracket: ( - column_reference: naked_identifier: acc - comma: ',' - column_reference: naked_identifier: x - end_bracket: ) binary_operator: -> cast_expression: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: rand function_contents: bracketed: start_bracket: ( end_bracket: ) binary_operator: '*' column_reference: - naked_identifier: roll_dice - dot: . - naked_identifier: num_sides end_bracket: ) casting_operator: '::' data_type: primitive_type: keyword: int - comma: ',' - expression: - column_reference: naked_identifier: acc - binary_operator: -> - column_reference: naked_identifier: acc - binary_operator: + - column_reference: - naked_identifier: roll_dice - dot: . - naked_identifier: num_dice - end_bracket: ) - statement_terminator: ; - statement: create_sql_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: - naked_identifier: main - dot: . - naked_identifier: default - dot: . - function_name_identifier: greet - function_parameter_list_with_comments: bracketed: start_bracket: ( parameter: s data_type: primitive_type: keyword: STRING end_bracket: ) - keyword: RETURNS - data_type: primitive_type: keyword: STRING - function_definition: - keyword: LANGUAGE - keyword: PYTHON - keyword: AS - udf_body: "$$\n def greet(name):\n return \"Hello \" + name + \"!\"\ \n\n return greet(s) if s else None\n $$" - statement_terminator: ; - statement: create_sql_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: return_table - function_parameter_list_with_comments: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - keyword: TABLE - function_definition: keyword: RETURN expression: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: time from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: create_sql_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: return_table - function_parameter_list_with_comments: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - keyword: TABLE - bracketed: - start_bracket: ( - column_reference: naked_identifier: col_a - data_type: primitive_type: keyword: string - comma: ',' - column_reference: naked_identifier: col_b - data_type: primitive_type: keyword: string - keyword: comment - quoted_literal: '"asdf"' - end_bracket: ) - function_definition: keyword: RETURN select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col_a - comma: ',' - select_clause_element: column_reference: naked_identifier: col_b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: create_sql_function_statement: - keyword: create - keyword: or - keyword: replace - keyword: function - function_name: - quoted_identifier: '`catalog`' - dot: . - quoted_identifier: '`schema`' - dot: . - quoted_identifier: '`name`' - function_parameter_list_with_comments: bracketed: start_bracket: ( parameter: param data_type: primitive_type: keyword: int end_bracket: ) - keyword: returns - data_type: primitive_type: keyword: int - function_definition: keyword: return expression: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: param - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/databricks/create_private_materialized_view.sql000066400000000000000000000006501503426445100314000ustar00rootroot00000000000000CREATE PRIVATE MATERIALIZED VIEW dlt_private_mat_view ( a STRING COMMENT 'a', b TIMESTAMP COMMENT 'b' ) COMMENT 'DLT private materialized view' AS SELECT a, b FROM live.dlt_bronze; CREATE OR REFRESH PRIVATE MATERIALIZED VIEW dlt_refresh_private_mat_view ( a STRING COMMENT 'a', b TIMESTAMP COMMENT 'b' ) COMMENT 'DLT refreshed private materialized view' AS SELECT a, b FROM live.dlt_bronze; sqlfluff-3.4.2/test/fixtures/dialects/databricks/create_private_materialized_view.yml000066400000000000000000000061351503426445100314060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fac9e775bff9da87541d948b9e3ece47b3ba9ac94ae461398cea92904b1fac16 file: - statement: create_view_statement: - keyword: CREATE - keyword: PRIVATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: dlt_private_mat_view - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - data_type: primitive_type: keyword: STRING - keyword: COMMENT - quoted_literal: "'a'" - comma: ',' - column_reference: naked_identifier: b - data_type: primitive_type: keyword: TIMESTAMP - keyword: COMMENT - quoted_literal: "'b'" - end_bracket: ) - keyword: COMMENT - quoted_literal: "'DLT private materialized view'" - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: live - dot: . - naked_identifier: dlt_bronze - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REFRESH - keyword: PRIVATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: dlt_refresh_private_mat_view - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - data_type: primitive_type: keyword: STRING - keyword: COMMENT - quoted_literal: "'a'" - comma: ',' - column_reference: naked_identifier: b - data_type: primitive_type: keyword: TIMESTAMP - keyword: COMMENT - quoted_literal: "'b'" - end_bracket: ) - keyword: COMMENT - quoted_literal: "'DLT refreshed private materialized view'" - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: live - dot: . - naked_identifier: dlt_bronze - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/databricks/create_table.sql000066400000000000000000000052021503426445100252270ustar00rootroot00000000000000CREATE TABLE tablename ( id_column INT, othercolumn STRING, generated_always_as_expression DATE GENERATED ALWAYS AS (CAST(birth_date AS DATE)), generated_by_default BIGINT GENERATED BY DEFAULT AS IDENTITY, generated_always BIGINT GENERATED ALWAYS AS IDENTITY, generated_column_start_with BIGINT GENERATED ALWAYS AS IDENTITY (START WITH 10), generated_column_increment_by BIGINT GENERATED ALWAYS AS IDENTITY (INCREMENT BY 5), generated_column_start_with_increment_by BIGINT GENERATED ALWAYS AS IDENTITY (START WITH 10 INCREMENT BY 5) ) USING DELTA LOCATION "s3://someplace" CLUSTER BY (id_column); OPTIMIZE tablename; OPTIMIZE tablename WHERE date >= current_timestamp() - INTERVAL 1 day ZORDER BY (eventType, eventTime); -- Creates a Delta table CREATE TABLE student (id INT, name STRING, age INT); -- Use data from another table CREATE TABLE student_copy AS SELECT * FROM student; -- Creates a CSV table from an external directory CREATE TABLE student USING CSV LOCATION '/path/to/csv_files'; -- Specify table comment and properties CREATE TABLE student (id INT, name STRING, age INT) COMMENT 'this is a comment' TBLPROPERTIES ('foo'='bar'); -- Specify table comment and properties with different clauses order CREATE TABLE student (id INT, name STRING, age INT) TBLPROPERTIES ('foo'='bar') COMMENT 'this is a comment'; -- Create partitioned table CREATE TABLE student (id INT, name STRING, age INT) PARTITIONED BY (age); -- Create a table with a generated column CREATE TABLE rectangles(a INT, b INT, area INT GENERATED ALWAYS AS (a * b)); -- Create a table with a primary key CREATE TABLE rectangles(a INT, b INT PRIMARY KEY); -- Create a table with a not null primary key CREATE TABLE rectangles(a INT NOT NULL, b INT NOT NULL PRIMARY KEY); -- Create a table with a foreign key relation CREATE OR REPLACE TABLE TABLE1 ( DATE_VALUE DATE NOT NULL CONSTRAINT DATE_CONSTRAINT FOREIGN KEY REFERENCES TABLE2 ); -- Create a table with a column with default value CREATE TABLE student (id INT, name STRING DEFAULT 'bobby tables', age INT); -- Create a table with non nullable column with default value CREATE TABLE student (id INT, name STRING NOT NULL DEFAULT 'bobby tables', age INT); -- Create a table with a default timestamp CREATE TABLE clock ( which_time TIMESTAMP DEFAULT current_timestamp() ); -- Create a table with mixing default value and constraints CREATE TABLE clock ( which_time TIMESTAMP CONSTRAINT clock_pk PRIMARY KEY DEFAULT current_timestamp() NOT NULL ); -- Creates a table using identifier CREATE TABLE IDENTIFIER('student') (id INT, name STRING, age INT); sqlfluff-3.4.2/test/fixtures/dialects/databricks/create_table.yml000066400000000000000000000427621503426445100252450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 023b7e56d6c584e480a1b7b4e985e1a35a3860fb637339dec44406571580518f file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: tablename - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: id_column data_type: primitive_type: keyword: INT - comma: ',' - column_definition: column_reference: naked_identifier: othercolumn data_type: primitive_type: keyword: STRING - comma: ',' - generated_column_definition: - naked_identifier: generated_always_as_expression - data_type: primitive_type: keyword: DATE - keyword: GENERATED - keyword: ALWAYS - keyword: AS - bracketed: start_bracket: ( function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: birth_date keyword: AS data_type: primitive_type: keyword: DATE end_bracket: ) end_bracket: ) - comma: ',' - generated_column_definition: - naked_identifier: generated_by_default - data_type: primitive_type: keyword: BIGINT - keyword: GENERATED - keyword: BY - keyword: DEFAULT - keyword: AS - keyword: IDENTITY - comma: ',' - generated_column_definition: - naked_identifier: generated_always - data_type: primitive_type: keyword: BIGINT - keyword: GENERATED - keyword: ALWAYS - keyword: AS - keyword: IDENTITY - comma: ',' - generated_column_definition: - naked_identifier: generated_column_start_with - data_type: primitive_type: keyword: BIGINT - keyword: GENERATED - keyword: ALWAYS - keyword: AS - keyword: IDENTITY - bracketed: - start_bracket: ( - keyword: START - keyword: WITH - numeric_literal: '10' - end_bracket: ) - comma: ',' - generated_column_definition: - naked_identifier: generated_column_increment_by - data_type: primitive_type: keyword: BIGINT - keyword: GENERATED - keyword: ALWAYS - keyword: AS - keyword: IDENTITY - bracketed: - start_bracket: ( - keyword: INCREMENT - keyword: BY - numeric_literal: '5' - end_bracket: ) - comma: ',' - generated_column_definition: - naked_identifier: generated_column_start_with_increment_by - data_type: primitive_type: keyword: BIGINT - keyword: GENERATED - keyword: ALWAYS - keyword: AS - keyword: IDENTITY - bracketed: - start_bracket: ( - keyword: START - keyword: WITH - numeric_literal: '10' - keyword: INCREMENT - keyword: BY - numeric_literal: '5' - end_bracket: ) - end_bracket: ) - using_clause: keyword: USING data_source_format: keyword: DELTA - keyword: LOCATION - quoted_literal: '"s3://someplace"' - keyword: CLUSTER - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: id_column end_bracket: ) - statement_terminator: ; - statement: optimize_table_statement: keyword: OPTIMIZE table_reference: naked_identifier: tablename - statement_terminator: ; - statement: optimize_table_statement: - keyword: OPTIMIZE - table_reference: naked_identifier: tablename - keyword: WHERE - expression: column_reference: naked_identifier: date comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' function: function_name: function_name_identifier: current_timestamp function_contents: bracketed: start_bracket: ( end_bracket: ) binary_operator: '-' interval_expression: keyword: INTERVAL interval_literal: numeric_literal: '1' date_part: day - keyword: ZORDER - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: eventType - comma: ',' - column_reference: naked_identifier: eventTime - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: column_reference: naked_identifier: name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: age data_type: primitive_type: keyword: INT - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student_copy - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: student - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student - using_clause: keyword: USING data_source_format: keyword: CSV - keyword: LOCATION - quoted_literal: "'/path/to/csv_files'" - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: column_reference: naked_identifier: name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: age data_type: primitive_type: keyword: INT - end_bracket: ) - keyword: COMMENT - quoted_literal: "'this is a comment'" - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'foo'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'bar'" end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: column_reference: naked_identifier: name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: age data_type: primitive_type: keyword: INT - end_bracket: ) - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'foo'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'bar'" end_bracket: ) - keyword: COMMENT - quoted_literal: "'this is a comment'" - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: column_reference: naked_identifier: name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: age data_type: primitive_type: keyword: INT - end_bracket: ) - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: age end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: rectangles - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: a data_type: primitive_type: keyword: INT - comma: ',' - column_definition: column_reference: naked_identifier: b data_type: primitive_type: keyword: INT - comma: ',' - generated_column_definition: - naked_identifier: area - data_type: primitive_type: keyword: INT - keyword: GENERATED - keyword: ALWAYS - keyword: AS - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: a - binary_operator: '*' - column_reference: naked_identifier: b end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: rectangles - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: a data_type: primitive_type: keyword: INT - comma: ',' - column_definition: column_reference: naked_identifier: b data_type: primitive_type: keyword: INT column_constraint_segment: - keyword: PRIMARY - keyword: KEY - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: rectangles - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: a data_type: primitive_type: keyword: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: column_reference: naked_identifier: b data_type: primitive_type: keyword: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' - keyword: PRIMARY - keyword: KEY - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - table_reference: naked_identifier: TABLE1 - bracketed: start_bracket: ( column_definition: column_reference: naked_identifier: DATE_VALUE data_type: primitive_type: keyword: DATE column_constraint_segment: - keyword: NOT - keyword: 'NULL' - keyword: CONSTRAINT - object_reference: naked_identifier: DATE_CONSTRAINT - keyword: FOREIGN - keyword: KEY - keyword: REFERENCES - table_reference: naked_identifier: TABLE2 end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: column_reference: naked_identifier: name data_type: primitive_type: keyword: STRING keyword: DEFAULT quoted_literal: "'bobby tables'" - comma: ',' - column_definition: column_reference: naked_identifier: age data_type: primitive_type: keyword: INT - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: column_reference: naked_identifier: name data_type: primitive_type: keyword: STRING column_constraint_segment: - keyword: NOT - keyword: 'NULL' keyword: DEFAULT quoted_literal: "'bobby tables'" - comma: ',' - column_definition: column_reference: naked_identifier: age data_type: primitive_type: keyword: INT - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: clock - bracketed: start_bracket: ( column_definition: column_reference: naked_identifier: which_time data_type: primitive_type: keyword: TIMESTAMP keyword: DEFAULT function: function_name: function_name_identifier: current_timestamp function_contents: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: clock - bracketed: start_bracket: ( column_definition: - column_reference: naked_identifier: which_time - data_type: primitive_type: keyword: TIMESTAMP - column_constraint_segment: - keyword: CONSTRAINT - object_reference: naked_identifier: clock_pk - keyword: PRIMARY - keyword: KEY - keyword: DEFAULT - function: function_name: function_name_identifier: current_timestamp function_contents: bracketed: start_bracket: ( end_bracket: ) - column_constraint_segment: - keyword: NOT - keyword: 'NULL' end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: identifier_clause_segment: keyword: IDENTIFIER bracketed: start_bracket: ( expression: quoted_literal: "'student'" end_bracket: ) - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: column_reference: naked_identifier: name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: age data_type: primitive_type: keyword: INT - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/databricks/create_volume.sql000066400000000000000000000020761503426445100254550ustar00rootroot00000000000000-- Create volume `customer_vol`. -- This throws exception if volume with name customer_vol already exists. CREATE VOLUME customer_vol; -- Create volume `customer_vol` only if volume with same name doesn't exist. CREATE VOLUME IF NOT EXISTS customer_vol; -- Create volume `customer_vol` only if volume with same name doesn't exist, -- with a comment. CREATE VOLUME IF NOT EXISTS customer_vol COMMENT 'This is customer volume'; -- Create external volume `customer_vol_external` -- This throws exception if volume with name customer_vol_external -- already exists. CREATE EXTERNAL VOLUME customer_vol_external LOCATION 's3://s3-path/'; -- Create external volume `customer_vol_external` -- only if volume with same name doesn't exist, with a location. CREATE EXTERNAL VOLUME IF NOT EXISTS customer_vol_external LOCATION 's3://s3-path/'; -- Create external volume `customer_vol_external` -- only if volume with same name doesn't exist, with a location and a comment. CREATE EXTERNAL VOLUME IF NOT EXISTS customer_vol_external LOCATION 's3://s3-path/' COMMENT 'This is customer volume'; sqlfluff-3.4.2/test/fixtures/dialects/databricks/create_volume.yml000066400000000000000000000041051503426445100254520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 81ad3d2836c82168454967f21a7311e967ac0c08945f7c1df42a3fc1797c69c7 file: - statement: create_volume_statement: - keyword: CREATE - keyword: VOLUME - volume_reference: naked_identifier: customer_vol - statement_terminator: ; - statement: create_volume_statement: - keyword: CREATE - keyword: VOLUME - keyword: IF - keyword: NOT - keyword: EXISTS - volume_reference: naked_identifier: customer_vol - statement_terminator: ; - statement: create_volume_statement: - keyword: CREATE - keyword: VOLUME - keyword: IF - keyword: NOT - keyword: EXISTS - volume_reference: naked_identifier: customer_vol - keyword: COMMENT - quoted_literal: "'This is customer volume'" - statement_terminator: ; - statement: create_volume_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: VOLUME - volume_reference: naked_identifier: customer_vol_external - keyword: LOCATION - quoted_literal: "'s3://s3-path/'" - statement_terminator: ; - statement: create_volume_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: VOLUME - keyword: IF - keyword: NOT - keyword: EXISTS - volume_reference: naked_identifier: customer_vol_external - keyword: LOCATION - quoted_literal: "'s3://s3-path/'" - statement_terminator: ; - statement: create_volume_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: VOLUME - keyword: IF - keyword: NOT - keyword: EXISTS - volume_reference: naked_identifier: customer_vol_external - keyword: LOCATION - quoted_literal: "'s3://s3-path/'" - keyword: COMMENT - quoted_literal: "'This is customer volume'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/databricks/date_functions.sql000066400000000000000000000013331503426445100256230ustar00rootroot00000000000000SELECT my_table.a, other_table.b FROM my_table LEFT JOIN other_table ON DATEDIFF(SECOND, my_table.timestamp_a, other_table.timestamp_b) > 1; SELECT DATE_ADD(MICROSECOND, 5, start_dt) AS date_add_micro, DATE_DIFF(MILLISECOND, start_dt, end_dt) AS datediff_milli, DATEADD(MINUTE, 5, start_dt) AS dateadd_min, DATEDIFF(HOUR, start_dt, end_dt) AS datediff_hr, TIMEDIFF(DAY, start_dt, end_dt) AS timediff_day, TIMESTAMPADD(DAYOFYEAR, 5, start_dt) AS ts_add_day_of_yr, TIMESTAMPDIFF(WEEK, start_dt, end_dt) AS ts_diff_week, DATE_ADD(MONTH, 5, start_dt) AS date_add_month, DATE_ADD(QUARTER, 5, start_dt) AS date_add_quarter, DATE_ADD(YEAR, 5, start_dt) AS date_add_year FROM my_table; sqlfluff-3.4.2/test/fixtures/dialects/databricks/date_functions.yml000066400000000000000000000221431503426445100256270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 120308fb701a79a33f52d92cc4c06f4f80c63dd71a46ef50b848868a97e789c0 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: my_table - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: other_table - dot: . - naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: other_table - join_on_condition: keyword: 'ON' expression: function: function_name: function_name_identifier: DATEDIFF function_contents: bracketed: - start_bracket: ( - date_part: SECOND - comma: ',' - expression: column_reference: - naked_identifier: my_table - dot: . - naked_identifier: timestamp_a - comma: ',' - expression: column_reference: - naked_identifier: other_table - dot: . - naked_identifier: timestamp_b - end_bracket: ) comparison_operator: raw_comparison_operator: '>' numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: DATE_ADD function_contents: bracketed: - start_bracket: ( - date_part: MICROSECOND - comma: ',' - expression: numeric_literal: '5' - comma: ',' - expression: column_reference: naked_identifier: start_dt - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: date_add_micro - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATE_DIFF function_contents: bracketed: - start_bracket: ( - date_part: MILLISECOND - comma: ',' - expression: column_reference: naked_identifier: start_dt - comma: ',' - expression: column_reference: naked_identifier: end_dt - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: datediff_milli - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEADD function_contents: bracketed: - start_bracket: ( - date_part: MINUTE - comma: ',' - expression: numeric_literal: '5' - comma: ',' - expression: column_reference: naked_identifier: start_dt - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: dateadd_min - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEDIFF function_contents: bracketed: - start_bracket: ( - date_part: HOUR - comma: ',' - expression: column_reference: naked_identifier: start_dt - comma: ',' - expression: column_reference: naked_identifier: end_dt - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: datediff_hr - comma: ',' - select_clause_element: function: function_name: function_name_identifier: TIMEDIFF function_contents: bracketed: - start_bracket: ( - date_part: DAY - comma: ',' - expression: column_reference: naked_identifier: start_dt - comma: ',' - expression: column_reference: naked_identifier: end_dt - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: timediff_day - comma: ',' - select_clause_element: function: function_name: function_name_identifier: TIMESTAMPADD function_contents: bracketed: - start_bracket: ( - date_part: DAYOFYEAR - comma: ',' - expression: numeric_literal: '5' - comma: ',' - expression: column_reference: naked_identifier: start_dt - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: ts_add_day_of_yr - comma: ',' - select_clause_element: function: function_name: function_name_identifier: TIMESTAMPDIFF function_contents: bracketed: - start_bracket: ( - date_part: WEEK - comma: ',' - expression: column_reference: naked_identifier: start_dt - comma: ',' - expression: column_reference: naked_identifier: end_dt - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: ts_diff_week - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATE_ADD function_contents: bracketed: - start_bracket: ( - date_part: MONTH - comma: ',' - expression: numeric_literal: '5' - comma: ',' - expression: column_reference: naked_identifier: start_dt - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: date_add_month - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATE_ADD function_contents: bracketed: - start_bracket: ( - date_part: QUARTER - comma: ',' - expression: numeric_literal: '5' - comma: ',' - expression: column_reference: naked_identifier: start_dt - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: date_add_quarter - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATE_ADD function_contents: bracketed: - start_bracket: ( - date_part: YEAR - comma: ',' - expression: numeric_literal: '5' - comma: ',' - expression: column_reference: naked_identifier: start_dt - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: date_add_year from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/databricks/declare_or_replace_variable.sql000066400000000000000000000002361503426445100302560ustar00rootroot00000000000000DECLARE var; DECLARE OR REPLACE var; DECLARE OR REPLACE VARIABLE var; DECLARE var INT DEFAULT 5; DECLARE var INT = 5; DECLARE var = 5; DECLARE var DEFAULT 5; sqlfluff-3.4.2/test/fixtures/dialects/databricks/declare_or_replace_variable.yml000066400000000000000000000036011503426445100302570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6a909a5d34c442da8af91bc06cc01ebc5cb8eee405f364a38d3f0ccb1831337a file: - statement: declare_or_replace_variable_statement: keyword: DECLARE naked_identifier: var - statement_terminator: ; - statement: declare_or_replace_variable_statement: - keyword: DECLARE - keyword: OR - keyword: REPLACE - naked_identifier: var - statement_terminator: ; - statement: declare_or_replace_variable_statement: - keyword: DECLARE - keyword: OR - keyword: REPLACE - keyword: VARIABLE - naked_identifier: var - statement_terminator: ; - statement: declare_or_replace_variable_statement: - keyword: DECLARE - naked_identifier: var - data_type: primitive_type: keyword: INT - keyword: DEFAULT - expression: numeric_literal: '5' - statement_terminator: ; - statement: declare_or_replace_variable_statement: keyword: DECLARE naked_identifier: var data_type: primitive_type: keyword: INT comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '5' - statement_terminator: ; - statement: declare_or_replace_variable_statement: keyword: DECLARE naked_identifier: var comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '5' - statement_terminator: ; - statement: declare_or_replace_variable_statement: - keyword: DECLARE - naked_identifier: var - keyword: DEFAULT - expression: numeric_literal: '5' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/databricks/describe_volume.sql000066400000000000000000000000661503426445100257670ustar00rootroot00000000000000-- Desribe the volume DESCRIBE VOLUME VACCINE_VOLUME; sqlfluff-3.4.2/test/fixtures/dialects/databricks/describe_volume.yml000066400000000000000000000010701503426445100257650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2ea60f6943f16c260123e678dc8ec29433a13cd657484991939d8d56fe42d5e4 file: statement: describe_statement: - keyword: DESCRIBE - keyword: VOLUME - volume_reference: naked_identifier: VACCINE_VOLUME statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/databricks/drop_catalog.sql000066400000000000000000000002511503426445100252520ustar00rootroot00000000000000-- Drop the catalog and its schemas DROP CATALOG vaccine CASCADE; -- Drop the catalog using IF EXISTS and only if it is empty. DROP CATALOG IF EXISTS vaccine RESTRICT; sqlfluff-3.4.2/test/fixtures/dialects/databricks/drop_catalog.yml000066400000000000000000000014631503426445100252620ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7ea0dd1142a5697ad4fedde41ded6e228ee8f187261cbe617e7e9f231fe5a7e6 file: - statement: drop_catalog_statement: - keyword: DROP - keyword: CATALOG - catalog_reference: naked_identifier: vaccine - keyword: CASCADE - statement_terminator: ; - statement: drop_catalog_statement: - keyword: DROP - keyword: CATALOG - keyword: IF - keyword: EXISTS - catalog_reference: naked_identifier: vaccine - keyword: RESTRICT - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/databricks/drop_volume.sql000066400000000000000000000001721503426445100251510ustar00rootroot00000000000000-- Drop the volume DROP VOLUME vaccine_volume; -- Drop the volume using IF EXISTS. DROP VOLUME IF EXISTS vaccine_volume; sqlfluff-3.4.2/test/fixtures/dialects/databricks/drop_volume.yml000066400000000000000000000014141503426445100251530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: dfe9fe8a21ef3a2b447f823a1b8a437a16e71cea56fd657d1e6d9b1deddd76c5 file: - statement: drop_volume_statement: - keyword: DROP - keyword: VOLUME - volume_reference: naked_identifier: vaccine_volume - statement_terminator: ; - statement: drop_volume_statement: - keyword: DROP - keyword: VOLUME - keyword: IF - keyword: EXISTS - volume_reference: naked_identifier: vaccine_volume - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/databricks/magic_line.sql000066400000000000000000000004771503426445100247150ustar00rootroot00000000000000-- Databricks notebook source -- MAGIC %md -- MAGIC # Dummy Notebook -- COMMAND ---------- -- DBTITLE 1,Select Data SELECT x FROM y -- COMMAND ---------- -- MAGIC %python -- MAGIC foo = 'bar' -- MAGIC print(foo) -- COMMAND ---------- SELECT a FROM b; -- COMMAND ---------- -- MAGIC %sh -- MAGIC echo heloworld sqlfluff-3.4.2/test/fixtures/dialects/databricks/magic_line.yml000066400000000000000000000033441503426445100247130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1e0c9a346f45049496cef1858cf742bc89de13356522c94bcd888193456506b3 file: - statement: magic_cell_segment: magic_start: "-- MAGIC %md\n" magic_line: '-- MAGIC # Dummy Notebook' - statement_terminator: "\n\n-- COMMAND ----------\n" - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: x from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: y - statement_terminator: "\n\n-- COMMAND ----------\n" - statement: magic_cell_segment: - magic_start: "-- MAGIC %python\n" - magic_line: "-- MAGIC foo = 'bar'" - magic_line: -- MAGIC print(foo) - statement_terminator: "\n\n-- COMMAND ----------\n" - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: b - statement_terminator: ; - statement_terminator: "\n\n-- COMMAND ----------\n" - statement: magic_cell_segment: magic_start: "-- MAGIC %sh\n" magic_line: -- MAGIC echo heloworld sqlfluff-3.4.2/test/fixtures/dialects/databricks/magic_single_line.sql000066400000000000000000000002361503426445100262470ustar00rootroot00000000000000-- Databricks notebook source -- MAGIC %md -- MAGIC # Dummy Notebook -- COMMAND ---------- -- MAGIC %run ./Notebook -- COMMAND ---------- SELECT a FROM b sqlfluff-3.4.2/test/fixtures/dialects/databricks/magic_single_line.yml000066400000000000000000000020651503426445100262530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7bd24732f4f263dec401f41cafbd4abf033c7c5bb082b2bbb171414bb1bf420a file: - statement: magic_cell_segment: magic_start: "-- MAGIC %md\n" magic_line: '-- MAGIC # Dummy Notebook' - statement_terminator: "\n\n-- COMMAND ----------\n" - statement: magic_cell_segment: magic_single_line: -- MAGIC %run ./Notebook - statement_terminator: "\n\n-- COMMAND ----------\n" - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: b sqlfluff-3.4.2/test/fixtures/dialects/databricks/named_argument.sql000066400000000000000000000004001503426445100255760ustar00rootroot00000000000000--https://docs.databricks.com/en/sql/language-manual/sql-ref-function-invocation.html#named-parameter-invocation select my_function(arg1 => 3, arg2 => 4) from dual; select my_function(3, arg2 => 4) from dual; select my_function(arg1 => 3, 4) from dual; sqlfluff-3.4.2/test/fixtures/dialects/databricks/named_argument.yml000066400000000000000000000056601503426445100256150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a5fdbbf6eaa08251ecd5c0f8ccd12e22cf310f3710e32772b2131433c037dfa5 file: - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: my_function function_contents: bracketed: - start_bracket: ( - named_argument: naked_identifier: arg1 right_arrow: => expression: numeric_literal: '3' - comma: ',' - named_argument: naked_identifier: arg2 right_arrow: => expression: numeric_literal: '4' - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: my_function function_contents: bracketed: start_bracket: ( expression: numeric_literal: '3' comma: ',' named_argument: naked_identifier: arg2 right_arrow: => expression: numeric_literal: '4' end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: my_function function_contents: bracketed: start_bracket: ( named_argument: naked_identifier: arg1 right_arrow: => expression: numeric_literal: '3' comma: ',' expression: numeric_literal: '4' end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/databricks/pivot.sql000066400000000000000000000026141503426445100237620ustar00rootroot00000000000000-- Examples from https://docs.databricks.com/en/sql/language-manual/sql-ref-syntax-qry-select-pivot.html -- A very basic PIVOT -- Given a table with sales by quarter, return a table that returns sales across quarters per year. SELECT year, region, q1, q2, q3, q4 FROM sales PIVOT (sum(sales) AS sales FOR quarter IN (1 AS q1, 2 AS q2, 3 AS q3, 4 AS q4)); -- Also PIVOT on region SELECT year, q1_east, q1_west, q2_east, q2_west, q3_east, q3_west, q4_east, q4_west FROM sales PIVOT (sum(sales) AS sales FOR (quarter, region) IN ((1, 'east') AS q1_east, (1, 'west') AS q1_west, (2, 'east') AS q2_east, (2, 'west') AS q2_west, (3, 'east') AS q3_east, (3, 'west') AS q3_west, (4, 'east') AS q4_east, (4, 'west') AS q4_west)); -- To aggregate across regions the column must be removed from the input. SELECT year, q1, q2, q3, q4 FROM (SELECT year, quarter, sales FROM sales) AS s PIVOT (sum(sales) AS sales FOR quarter IN (1 AS q1, 2 AS q2, 3 AS q3, 4 AS q4)); -- A PIVOT with multiple aggregations SELECT year, q1_total, q1_avg, q2_total, q2_avg, q3_total, q3_avg, q4_total, q4_avg FROM (SELECT year, quarter, sales FROM sales) AS s PIVOT (sum(sales) AS total, avg(sales) AS avg FOR quarter IN (1 AS q1, 2 AS q2, 3 AS q3, 4 AS q4)); -- A PIVOT with anonymous columns SELECT year, region, q1, q2, q3, q4 FROM sales PIVOT (sum(sales) FOR quarter IN (1 AS q1, 2 AS q2, 3 AS q3, 4 AS q4)); sqlfluff-3.4.2/test/fixtures/dialects/databricks/pivot.yml000066400000000000000000000460531503426445100237710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 694df2655ef6260233589bb02fa84c79e59fa3fa0b935b71dd1e9e49c482957f file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: year - comma: ',' - select_clause_element: column_reference: naked_identifier: region - comma: ',' - select_clause_element: column_reference: naked_identifier: q1 - comma: ',' - select_clause_element: column_reference: naked_identifier: q2 - comma: ',' - select_clause_element: column_reference: naked_identifier: q3 - comma: ',' - select_clause_element: column_reference: naked_identifier: q4 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sales pivot_clause: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: sales end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: sales - keyword: FOR - naked_identifier: quarter - keyword: IN - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - alias_expression: alias_operator: keyword: AS naked_identifier: q1 - comma: ',' - expression: numeric_literal: '2' - alias_expression: alias_operator: keyword: AS naked_identifier: q2 - comma: ',' - expression: numeric_literal: '3' - alias_expression: alias_operator: keyword: AS naked_identifier: q3 - comma: ',' - expression: numeric_literal: '4' - alias_expression: alias_operator: keyword: AS naked_identifier: q4 - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: year - comma: ',' - select_clause_element: column_reference: naked_identifier: q1_east - comma: ',' - select_clause_element: column_reference: naked_identifier: q1_west - comma: ',' - select_clause_element: column_reference: naked_identifier: q2_east - comma: ',' - select_clause_element: column_reference: naked_identifier: q2_west - comma: ',' - select_clause_element: column_reference: naked_identifier: q3_east - comma: ',' - select_clause_element: column_reference: naked_identifier: q3_west - comma: ',' - select_clause_element: column_reference: naked_identifier: q4_east - comma: ',' - select_clause_element: column_reference: naked_identifier: q4_west from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sales pivot_clause: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: sales end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: sales - keyword: FOR - bracketed: - start_bracket: ( - naked_identifier: quarter - comma: ',' - naked_identifier: region - end_bracket: ) - keyword: IN - bracketed: - start_bracket: ( - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'east'" - end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: q1_east - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'west'" - end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: q1_west - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '2' - comma: ',' - expression: quoted_literal: "'east'" - end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: q2_east - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '2' - comma: ',' - expression: quoted_literal: "'west'" - end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: q2_west - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: quoted_literal: "'east'" - end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: q3_east - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: quoted_literal: "'west'" - end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: q3_west - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '4' - comma: ',' - expression: quoted_literal: "'east'" - end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: q4_east - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '4' - comma: ',' - expression: quoted_literal: "'west'" - end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: q4_west - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: year - comma: ',' - select_clause_element: column_reference: naked_identifier: q1 - comma: ',' - select_clause_element: column_reference: naked_identifier: q2 - comma: ',' - select_clause_element: column_reference: naked_identifier: q3 - comma: ',' - select_clause_element: column_reference: naked_identifier: q4 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: year - comma: ',' - select_clause_element: column_reference: naked_identifier: quarter - comma: ',' - select_clause_element: column_reference: naked_identifier: sales from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sales end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: s pivot_clause: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: sales end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: sales - keyword: FOR - naked_identifier: quarter - keyword: IN - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - alias_expression: alias_operator: keyword: AS naked_identifier: q1 - comma: ',' - expression: numeric_literal: '2' - alias_expression: alias_operator: keyword: AS naked_identifier: q2 - comma: ',' - expression: numeric_literal: '3' - alias_expression: alias_operator: keyword: AS naked_identifier: q3 - comma: ',' - expression: numeric_literal: '4' - alias_expression: alias_operator: keyword: AS naked_identifier: q4 - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: year - comma: ',' - select_clause_element: column_reference: naked_identifier: q1_total - comma: ',' - select_clause_element: column_reference: naked_identifier: q1_avg - comma: ',' - select_clause_element: column_reference: naked_identifier: q2_total - comma: ',' - select_clause_element: column_reference: naked_identifier: q2_avg - comma: ',' - select_clause_element: column_reference: naked_identifier: q3_total - comma: ',' - select_clause_element: column_reference: naked_identifier: q3_avg - comma: ',' - select_clause_element: column_reference: naked_identifier: q4_total - comma: ',' - select_clause_element: column_reference: naked_identifier: q4_avg from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: year - comma: ',' - select_clause_element: column_reference: naked_identifier: quarter - comma: ',' - select_clause_element: column_reference: naked_identifier: sales from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sales end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: s pivot_clause: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: sales end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: total - comma: ',' - function: function_name: function_name_identifier: avg function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: sales end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: avg - keyword: FOR - naked_identifier: quarter - keyword: IN - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - alias_expression: alias_operator: keyword: AS naked_identifier: q1 - comma: ',' - expression: numeric_literal: '2' - alias_expression: alias_operator: keyword: AS naked_identifier: q2 - comma: ',' - expression: numeric_literal: '3' - alias_expression: alias_operator: keyword: AS naked_identifier: q3 - comma: ',' - expression: numeric_literal: '4' - alias_expression: alias_operator: keyword: AS naked_identifier: q4 - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: year - comma: ',' - select_clause_element: column_reference: naked_identifier: region - comma: ',' - select_clause_element: column_reference: naked_identifier: q1 - comma: ',' - select_clause_element: column_reference: naked_identifier: q2 - comma: ',' - select_clause_element: column_reference: naked_identifier: q3 - comma: ',' - select_clause_element: column_reference: naked_identifier: q4 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sales pivot_clause: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: sales end_bracket: ) - keyword: FOR - naked_identifier: quarter - keyword: IN - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - alias_expression: alias_operator: keyword: AS naked_identifier: q1 - comma: ',' - expression: numeric_literal: '2' - alias_expression: alias_operator: keyword: AS naked_identifier: q2 - comma: ',' - expression: numeric_literal: '3' - alias_expression: alias_operator: keyword: AS naked_identifier: q3 - comma: ',' - expression: numeric_literal: '4' - alias_expression: alias_operator: keyword: AS naked_identifier: q4 - end_bracket: ) - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/databricks/select.sql000066400000000000000000000002071503426445100240740ustar00rootroot00000000000000select * from shopify_cz.order ; SELECT * FROM IDENTIFIER('table_name') ; SELECT * FROM IDENTIFIER('schema_name' || '.table_name') ; sqlfluff-3.4.2/test/fixtures/dialects/databricks/select.yml000066400000000000000000000043251503426445100241030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3fd3588bcd6512e79a25b1f94526827b0f94bc8ca91f212860b16c21c4636cda file: - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: shopify_cz - dot: . - naked_identifier: order - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: identifier_clause_segment: keyword: IDENTIFIER bracketed: start_bracket: ( expression: quoted_literal: "'table_name'" end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: identifier_clause_segment: keyword: IDENTIFIER bracketed: start_bracket: ( expression: - quoted_literal: "'schema_name'" - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "'.table_name'" end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/databricks/select_from_lateral_view.sql000066400000000000000000000041331503426445100276570ustar00rootroot00000000000000SELECT id, name, age, class, address, c_age, d_age FROM person LATERAL VIEW EXPLODE(ARRAY(30, 60)) tbl_name AS c_age LATERAL VIEW EXPLODE(ARRAY(40, 80)) AS d_age; SELECT c_age, COUNT(*) AS record_count FROM person LATERAL VIEW EXPLODE(ARRAY(30, 60)) AS c_age LATERAL VIEW EXPLODE(ARRAY(40, 80)) AS d_age GROUP BY c_age; SELECT id, name, age, class, address, c_age, d_age FROM person LATERAL VIEW EXPLODE(ARRAY()) tbl_name AS c_age; SELECT id, name, age, class, address, time, c_age FROM person LATERAL VIEW OUTER EXPLODE(ARRAY()) tbl_name AS c_age; SELECT id, name, age, class, address, time, c_age FROM person LATERAL VIEW OUTER EXPLODE(ARRAY()) tbl_name c_age; SELECT id, name, age, class, address, time, c_age FROM person LATERAL VIEW OUTER EXPLODE(ARRAY()) c_age; SELECT person.id, exploded_people.name, exploded_people.age, exploded_people.state FROM person LATERAL VIEW INLINE(array_of_structs) exploded_people AS name, age, state; SELECT p.id, exploded_people.name, exploded_people.age, exploded_people.state FROM person AS p LATERAL VIEW INLINE(array_of_structs) exploded_people AS name, age, state; SELECT p.id, exploded_people.name, exploded_people.age, exploded_people.state FROM person AS p LATERAL VIEW INLINE(array_of_structs) exploded_people; SELECT p.id, exploded_people.name, exploded_people.age, exploded_people.state FROM person AS p LATERAL VIEW INLINE(array_of_structs) exploded_people name, age, state; SELECT p.id, exploded_people.name, exploded_people.age, exploded_people.state FROM person AS p LATERAL VIEW INLINE(array_of_structs) AS name, age, state; SELECT t1.column1, CAST(GET_JSON_OBJECT(things, '$.percentage') AS DECIMAL(16, 8) ) AS ptc FROM table1 AS t1 LEFT JOIN table2 AS t2 ON c.column1 = p.column1 AND t2.type = 'SOMETHING' LATERAL VIEW OUTER EXPLODE(t2.column2) AS things; sqlfluff-3.4.2/test/fixtures/dialects/databricks/select_from_lateral_view.yml000066400000000000000000000615601503426445100276700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 83a9fd631220dfe52510d299d83b0d2c921aeb8ec49e8518ebf44b82832e69eb file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: class - comma: ',' - select_clause_element: column_reference: naked_identifier: address - comma: ',' - select_clause_element: column_reference: naked_identifier: c_age - comma: ',' - select_clause_element: column_reference: naked_identifier: d_age from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: person - lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: EXPLODE function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ARRAY function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '30' - comma: ',' - expression: numeric_literal: '60' - end_bracket: ) end_bracket: ) - naked_identifier: tbl_name - keyword: AS - naked_identifier: c_age - lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: EXPLODE function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ARRAY function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '40' - comma: ',' - expression: numeric_literal: '80' - end_bracket: ) end_bracket: ) - naked_identifier: AS - naked_identifier: d_age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: c_age - comma: ',' - select_clause_element: function: function_name: function_name_identifier: COUNT function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: record_count from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: person - lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: EXPLODE function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ARRAY function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '30' - comma: ',' - expression: numeric_literal: '60' - end_bracket: ) end_bracket: ) - naked_identifier: AS - naked_identifier: c_age - lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: EXPLODE function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ARRAY function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '40' - comma: ',' - expression: numeric_literal: '80' - end_bracket: ) end_bracket: ) - naked_identifier: AS - naked_identifier: d_age groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: c_age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: class - comma: ',' - select_clause_element: column_reference: naked_identifier: address - comma: ',' - select_clause_element: column_reference: naked_identifier: c_age - comma: ',' - select_clause_element: column_reference: naked_identifier: d_age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: EXPLODE function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ARRAY function_contents: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) - naked_identifier: tbl_name - keyword: AS - naked_identifier: c_age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: class - comma: ',' - select_clause_element: column_reference: naked_identifier: address - comma: ',' - select_clause_element: column_reference: naked_identifier: time - comma: ',' - select_clause_element: column_reference: naked_identifier: c_age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person lateral_view_clause: - keyword: LATERAL - keyword: VIEW - keyword: OUTER - function: function_name: function_name_identifier: EXPLODE function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ARRAY function_contents: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) - naked_identifier: tbl_name - keyword: AS - naked_identifier: c_age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: class - comma: ',' - select_clause_element: column_reference: naked_identifier: address - comma: ',' - select_clause_element: column_reference: naked_identifier: time - comma: ',' - select_clause_element: column_reference: naked_identifier: c_age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person lateral_view_clause: - keyword: LATERAL - keyword: VIEW - keyword: OUTER - function: function_name: function_name_identifier: EXPLODE function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ARRAY function_contents: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) - naked_identifier: tbl_name - naked_identifier: c_age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: class - comma: ',' - select_clause_element: column_reference: naked_identifier: address - comma: ',' - select_clause_element: column_reference: naked_identifier: time - comma: ',' - select_clause_element: column_reference: naked_identifier: c_age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person lateral_view_clause: - keyword: LATERAL - keyword: VIEW - keyword: OUTER - function: function_name: function_name_identifier: EXPLODE function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ARRAY function_contents: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) - naked_identifier: c_age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: person - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: age - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: state from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: INLINE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: array_of_structs end_bracket: ) - naked_identifier: exploded_people - keyword: AS - naked_identifier: name - comma: ',' - naked_identifier: age - comma: ',' - naked_identifier: state - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: p - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: age - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: state from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person alias_expression: alias_operator: keyword: AS naked_identifier: p lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: INLINE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: array_of_structs end_bracket: ) - naked_identifier: exploded_people - keyword: AS - naked_identifier: name - comma: ',' - naked_identifier: age - comma: ',' - naked_identifier: state - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: p - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: age - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: state from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person alias_expression: alias_operator: keyword: AS naked_identifier: p lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: INLINE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: array_of_structs end_bracket: ) - naked_identifier: exploded_people - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: p - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: age - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: state from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person alias_expression: alias_operator: keyword: AS naked_identifier: p lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: INLINE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: array_of_structs end_bracket: ) - naked_identifier: exploded_people - naked_identifier: name - comma: ',' - naked_identifier: age - comma: ',' - naked_identifier: state - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: p - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: age - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: state from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person alias_expression: alias_operator: keyword: AS naked_identifier: p lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: INLINE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: array_of_structs end_bracket: ) - naked_identifier: AS - naked_identifier: name - comma: ',' - naked_identifier: age - comma: ',' - naked_identifier: state - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: column1 - comma: ',' - select_clause_element: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: GET_JSON_OBJECT function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: things - comma: ',' - expression: quoted_literal: "'$.percentage'" - end_bracket: ) keyword: AS data_type: primitive_type: keyword: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '16' - comma: ',' - numeric_literal: '8' - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: ptc from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 alias_expression: alias_operator: keyword: AS naked_identifier: t1 join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 alias_expression: alias_operator: keyword: AS naked_identifier: t2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: c - dot: . - naked_identifier: column1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: p - dot: . - naked_identifier: column1 - binary_operator: AND - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: type - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'SOMETHING'" lateral_view_clause: - keyword: LATERAL - keyword: VIEW - keyword: OUTER - function: function_name: function_name_identifier: EXPLODE function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: column2 end_bracket: ) - naked_identifier: AS - naked_identifier: things - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/databricks/select_from_read_file.sql000066400000000000000000000025171503426445100271170ustar00rootroot00000000000000-- Taken from examples here: https://docs.databricks.com/aws/en/sql/language-manual/functions/read_files -- Reads the files available in the given path. Auto-detects the format and schema of the data. SELECT * FROM read_files('abfss://container@storageAccount.dfs.core.windows.net/base/path'); SELECT * FROM read_files( 's3://bucket/path', format => 'csv', schema => 'id int, ts timestamp, event string'); -- Infers the schema of CSV files with headers. Because the schema is not provided, -- the CSV files are assumed to have headers. SELECT * FROM read_files( 's3://bucket/path', format => 'csv'); -- Reads files that have a csv suffix. SELECT * FROM read_files('s3://bucket/path/*.csv'); -- Reads a single JSON file SELECT * FROM read_files( 'abfss://container@storageAccount.dfs.core.windows.net/path/single.json'); -- Reads JSON files and overrides the data type of the column `id` to integer. SELECT * FROM read_files( 's3://bucket/path', format => 'json', schemaHints => 'id int'); -- Reads files that have been uploaded or modified yesterday. SELECT * FROM read_files( 'gs://my-bucket/avroData', modifiedAfter => date_sub(current_date(), 1), modifiedBefore => current_date()); -- Reads a streaming table SELECT * FROM STREAM read_files('gs://my-bucket/avroData', includeExistingFiles => false); sqlfluff-3.4.2/test/fixtures/dialects/databricks/select_from_read_file.yml000066400000000000000000000216351503426445100271230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 174100b63d83fe671aa54d8162a534325ffad2cbbbcc12cbf31e20122e995016 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: read_files function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'abfss://container@storageAccount.dfs.core.windows.net/base/path'" end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: read_files function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'s3://bucket/path'" - comma: ',' - named_argument: naked_identifier: format right_arrow: => expression: quoted_literal: "'csv'" - comma: ',' - named_argument: naked_identifier: schema right_arrow: => expression: quoted_literal: "'id int, ts timestamp, event string'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: read_files function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'s3://bucket/path'" comma: ',' named_argument: naked_identifier: format right_arrow: => expression: quoted_literal: "'csv'" end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: read_files function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'s3://bucket/path/*.csv'" end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: read_files function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'abfss://container@storageAccount.dfs.core.windows.net/path/single.json'" end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: read_files function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'s3://bucket/path'" - comma: ',' - named_argument: naked_identifier: format right_arrow: => expression: quoted_literal: "'json'" - comma: ',' - named_argument: naked_identifier: schemaHints right_arrow: => expression: quoted_literal: "'id int'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: read_files function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'gs://my-bucket/avroData'" - comma: ',' - named_argument: naked_identifier: modifiedAfter right_arrow: => expression: function: function_name: function_name_identifier: date_sub function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: current_date function_contents: bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) - comma: ',' - named_argument: naked_identifier: modifiedBefore right_arrow: => expression: function: function_name: function_name_identifier: current_date function_contents: bracketed: start_bracket: ( end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: keyword: STREAM table_expression: function: function_name: function_name_identifier: read_files function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'gs://my-bucket/avroData'" comma: ',' named_argument: naked_identifier: includeExistingFiles right_arrow: => expression: boolean_literal: 'false' end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/databricks/select_group_by.sql000066400000000000000000000060241503426445100260050ustar00rootroot00000000000000-- Sum of quantity per dealership. Group by `id`. SELECT id, sum(quantity) AS sum_quantity FROM dealer GROUP BY id ORDER BY id; -- Use column position in GROUP by clause. SELECT id, sum(quantity) AS sum_quantity FROM dealer GROUP BY 1 ORDER BY 1; -- Multiple aggregations. -- 1. Sum of quantity per dealership. -- 2. Max quantity per dealership. SELECT id, sum(quantity) AS sum_quantity, max(quantity) AS max_quantity FROM dealer GROUP BY id ORDER BY id; -- Count the number of distinct dealer cities per car_model. SELECT car_model, count(DISTINCT city) AS count_distinct_city FROM dealer GROUP BY car_model; -- Sum of only 'Honda Civic' and 'Honda CRV' quantities per dealership. SELECT id, sum(quantity) FILTER ( WHERE car_model IN ('Honda Civic', 'Honda CRV') ) AS `sum(quantity)` FROM dealer GROUP BY id ORDER BY id; -- Aggregations using multiple sets of grouping columns in a single statement. -- Following performs aggregations based on four sets of grouping columns. -- 1. city, car_model -- 2. city -- 3. car_model -- 4. Empty grouping set. Returns quantities for all city and car models. SELECT city, car_model, sum(quantity) AS sum_quantity FROM dealer GROUP BY GROUPING SETS ((city, car_model), (city), (car_model), ()) ORDER BY city; SELECT city, car_model, sum(quantity) AS sum_quantity FROM dealer GROUP BY city, car_model GROUPING SETS ((city, car_model), (city), (car_model), ()) ORDER BY city; SELECT city, car_model, sum(quantity) AS sum_quantity FROM dealer GROUP BY city, car_model, GROUPING SETS ((city, car_model), (city), (car_model), ()) ORDER BY city; -- Group by processing with `ROLLUP` clause. -- Equivalent GROUP BY GROUPING SETS ((city, car_model), (city), ()) SELECT city, car_model, sum(quantity) AS sum_quantity FROM dealer GROUP BY city, car_model WITH ROLLUP ORDER BY city, car_model; -- Group by processing with `CUBE` clause. -- Equivalent GROUP BY: -- GROUPING SETS ((city, car_model), (city), (car_model), ()) SELECT city, car_model, sum(quantity) AS sum_quantity FROM dealer GROUP BY city, car_model WITH CUBE ORDER BY city, car_model; -- Select the first row in column age -- Implicit GROUP BY SELECT first(age) FROM person; -- Implicit GROUP BY SELECT first(age IGNORE NULLS) AS first_age, last(id) AS last_id, sum(id) AS sum_id FROM person; -- CUBE within GROUP BY clause SELECT name, age, count(*) AS record_count FROM people GROUP BY cube(name, age); -- CUBE within GROUP BY clause with single clause on newline SELECT name, count(*) AS record_count FROM people GROUP BY cube( name ); -- CUBE within GROUP BY clause with multiple clauses on newline SELECT name, age, count(*) AS record_count FROM people GROUP BY cube( name, age ); -- ROLLUP within GROUP BY clause SELECT name, age, count(*) AS record_count FROM people GROUP BY rollup(name, age); -- GROUP BY ALL SELECT name, age, count(*) AS record_count FROM people GROUP BY ALL; sqlfluff-3.4.2/test/fixtures/dialects/databricks/select_group_by.yml000066400000000000000000000631741503426445100260200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 190c1ef57480e9d5090ba1f22a86f90bc2b578398d70d4ff73845635196e682d file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: sum_quantity from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: id orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: sum_quantity from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer groupby_clause: - keyword: GROUP - keyword: BY - numeric_literal: '1' orderby_clause: - keyword: ORDER - keyword: BY - numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: sum_quantity - comma: ',' - select_clause_element: function: function_name: function_name_identifier: max function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: max_quantity from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: id orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: car_model - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( keyword: DISTINCT expression: column_reference: naked_identifier: city end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: count_distinct_city from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: car_model - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) keyword: FILTER bracketed: start_bracket: ( keyword: WHERE expression: column_reference: naked_identifier: car_model keyword: IN bracketed: - start_bracket: ( - quoted_literal: "'Honda Civic'" - comma: ',' - quoted_literal: "'Honda CRV'" - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS quoted_identifier: '`sum(quantity)`' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: id orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: city - comma: ',' - select_clause_element: column_reference: naked_identifier: car_model - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: sum_quantity from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer groupby_clause: - keyword: GROUP - keyword: BY - grouping_sets_clause: - keyword: GROUPING - keyword: SETS - bracketed: start_bracket: ( grouping_expression_list: - expression: bracketed: - start_bracket: ( - column_reference: naked_identifier: city - comma: ',' - column_reference: naked_identifier: car_model - end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: city end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: car_model end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: city - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: city - comma: ',' - select_clause_element: column_reference: naked_identifier: car_model - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: sum_quantity from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: city - comma: ',' - column_reference: naked_identifier: car_model - grouping_sets_clause: - keyword: GROUPING - keyword: SETS - bracketed: start_bracket: ( grouping_expression_list: - expression: bracketed: - start_bracket: ( - column_reference: naked_identifier: city - comma: ',' - column_reference: naked_identifier: car_model - end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: city end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: car_model end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: city - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: city - comma: ',' - select_clause_element: column_reference: naked_identifier: car_model - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: sum_quantity from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: city - comma: ',' - column_reference: naked_identifier: car_model - comma: ',' - grouping_sets_clause: - keyword: GROUPING - keyword: SETS - bracketed: start_bracket: ( grouping_expression_list: - expression: bracketed: - start_bracket: ( - column_reference: naked_identifier: city - comma: ',' - column_reference: naked_identifier: car_model - end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: city end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: car_model end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: city - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: city - comma: ',' - select_clause_element: column_reference: naked_identifier: car_model - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: sum_quantity from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: city - comma: ',' - column_reference: naked_identifier: car_model - with_cube_rollup_clause: - keyword: WITH - keyword: ROLLUP orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: city - comma: ',' - column_reference: naked_identifier: car_model - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: city - comma: ',' - select_clause_element: column_reference: naked_identifier: car_model - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: sum_quantity from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: city - comma: ',' - column_reference: naked_identifier: car_model - with_cube_rollup_clause: - keyword: WITH - keyword: CUBE orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: city - comma: ',' - column_reference: naked_identifier: car_model - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: first function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: age end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: first function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: age - keyword: IGNORE - keyword: NULLS - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: first_age - comma: ',' - select_clause_element: function: function_name: function_name_identifier: last function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: id end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: last_id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: id end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: sum_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: record_count from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: people groupby_clause: - keyword: GROUP - keyword: BY - cube_rollup_clause: function_name: function_name_identifier: cube bracketed: start_bracket: ( grouping_expression_list: - column_reference: naked_identifier: name - comma: ',' - column_reference: naked_identifier: age end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: record_count from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: people groupby_clause: - keyword: GROUP - keyword: BY - cube_rollup_clause: function_name: function_name_identifier: cube bracketed: start_bracket: ( grouping_expression_list: column_reference: naked_identifier: name end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: record_count from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: people groupby_clause: - keyword: GROUP - keyword: BY - cube_rollup_clause: function_name: function_name_identifier: cube bracketed: start_bracket: ( grouping_expression_list: - column_reference: naked_identifier: name - comma: ',' - column_reference: naked_identifier: age end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: record_count from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: people groupby_clause: - keyword: GROUP - keyword: BY - cube_rollup_clause: function_name: function_name_identifier: rollup bracketed: start_bracket: ( grouping_expression_list: - column_reference: naked_identifier: name - comma: ',' - column_reference: naked_identifier: age end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: record_count from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: people groupby_clause: - keyword: GROUP - keyword: BY - keyword: ALL - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/databricks/select_window.sql000066400000000000000000000002171503426445100254640ustar00rootroot00000000000000select lag(test) over (ORDER BY test) from schema.test_table; select lag(test) over (PARTITION BY test ORDER BY test) from schema.test_table; sqlfluff-3.4.2/test/fixtures/dialects/databricks/select_window.yml000066400000000000000000000053621503426445100254740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 560194282205d8b76a3142b02b950541522a1cc0e275f75556346405ddd0076b file: - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: lag function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: test end_bracket: ) over_clause: keyword: over bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: test end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: schema - dot: . - naked_identifier: test_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: lag function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: test end_bracket: ) over_clause: keyword: over bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: test orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: test end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: schema - dot: . - naked_identifier: test_table - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/databricks/set_time_zone.sql000066400000000000000000000002561503426445100254650ustar00rootroot00000000000000SET TIME ZONE LOCAL; SET TIME ZONE 'America/Los_Angeles'; SET TIME ZONE '+08:00'; SET TIME ZONE INTERVAL 1 HOUR 30 MINUTES; SET TIME ZONE INTERVAL '08:30:00' HOUR TO SECOND; sqlfluff-3.4.2/test/fixtures/dialects/databricks/set_time_zone.yml000066400000000000000000000030141503426445100254620ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 08c7d43e7a4b042dd59c903967e9582479d8888514a054e04049a5111ff76602 file: - statement: set_timezone_statement: - keyword: SET - keyword: TIME - keyword: ZONE - keyword: LOCAL - statement_terminator: ; - statement: set_timezone_statement: - keyword: SET - keyword: TIME - keyword: ZONE - quoted_literal: "'America/Los_Angeles'" - statement_terminator: ; - statement: set_timezone_statement: - keyword: SET - keyword: TIME - keyword: ZONE - quoted_literal: "'+08:00'" - statement_terminator: ; - statement: set_timezone_statement: - keyword: SET - keyword: TIME - keyword: ZONE - interval_expression: - keyword: INTERVAL - interval_literal: numeric_literal: '1' date_part: HOUR - interval_literal: numeric_literal: '30' date_part: MINUTES - statement_terminator: ; - statement: set_timezone_statement: - keyword: SET - keyword: TIME - keyword: ZONE - interval_expression: keyword: INTERVAL interval_literal: - signed_quoted_literal: "'08:30:00'" - date_part: HOUR - keyword: TO - date_part: SECOND - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/databricks/set_variable.sql000066400000000000000000000007631503426445100252640ustar00rootroot00000000000000-- simple assignment SET VAR var1 = 5; -- A complex expression assignment SET VARIABLE var1 = (SELECT max(c1) FROM VALUES(1), (2) AS t(c1)); -- resetting the variable to DEFAULT (set in declare) SET VAR var1 = DEFAULT; -- A multi variable assignment SET VAR (var1, var2, var3) = (VALUES(100,'x123',DEFAULT)); -- escpaed function name SET VARIABLE `foo` = select 'bar'; -- function call set var tz = current_timezone(); -- set multiple vars in one statement set var x1 = 12, x2 = 'helloworld'; sqlfluff-3.4.2/test/fixtures/dialects/databricks/set_variable.yml000066400000000000000000000120651503426445100252640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1f3a04fd4fa5dbf16f7e570010eb2a13cd4f1278f2eb8c34f2cc9738abf68a2e file: - statement: set_variable_statement: - keyword: SET - keyword: VAR - expression: column_reference: naked_identifier: var1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' - statement_terminator: ; - statement: set_variable_statement: - keyword: SET - keyword: VARIABLE - expression: column_reference: naked_identifier: var1 comparison_operator: raw_comparison_operator: '=' bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: max function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: c1 end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: values_clause: - keyword: VALUES - bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( expression: numeric_literal: '2' end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: t bracketed: start_bracket: ( identifier_list: naked_identifier: c1 end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: set_variable_statement: - keyword: SET - keyword: VAR - expression: - column_reference: naked_identifier: var1 - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: DEFAULT - statement_terminator: ; - statement: set_variable_statement: - keyword: SET - keyword: VAR - expression: - bracketed: - start_bracket: ( - column_reference: naked_identifier: var1 - comma: ',' - column_reference: naked_identifier: var2 - comma: ',' - column_reference: naked_identifier: var3 - end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '100' - comma: ',' - expression: quoted_literal: "'x123'" - comma: ',' - expression: column_reference: naked_identifier: DEFAULT - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: set_variable_statement: - keyword: SET - keyword: VARIABLE - expression: column_reference: quoted_identifier: '`foo`' comparison_operator: raw_comparison_operator: '=' select_statement: select_clause: keyword: select select_clause_element: quoted_literal: "'bar'" - statement_terminator: ; - statement: set_variable_statement: - keyword: set - keyword: var - expression: column_reference: naked_identifier: tz comparison_operator: raw_comparison_operator: '=' function: function_name: function_name_identifier: current_timezone function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: set_variable_statement: - keyword: set - keyword: var - expression: column_reference: naked_identifier: x1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '12' - comma: ',' - expression: column_reference: naked_identifier: x2 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'helloworld'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/databricks/show_databases.sql000066400000000000000000000011501503426445100256020ustar00rootroot00000000000000-- Lists all databases SHOW DATABASES; -- List all databases from userdb catalog SHOW DATABASES FROM userdb; -- List all databases in userdb catalog SHOW DATABASES IN userdb; -- List all databases from default catalog matching the pattern `sam*` SHOW DATABASES FROM default LIKE 'sam*'; -- List all databases from default catalog matching the pattern `sam*` -- without LIKE keyword SHOW DATABASES FROM default 'sam*'; -- List all databases matching the pattern `sam*|suj` without LIKE keyword SHOW DATABASES 'sam*|suj'; -- Lists all databases. Keywords SCHEMAS and DATABASES are interchangeable. SHOW SCHEMAS; sqlfluff-3.4.2/test/fixtures/dialects/databricks/show_databases.yml000066400000000000000000000030211503426445100256030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 800a4c7e8f8417bc60e5836e14f747518c342ea1a6009c8a3cc54eabbc960aba file: - statement: show_statement: - keyword: SHOW - keyword: DATABASES - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: DATABASES - keyword: FROM - database_reference: naked_identifier: userdb - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: DATABASES - keyword: IN - database_reference: naked_identifier: userdb - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: DATABASES - keyword: FROM - database_reference: naked_identifier: default - keyword: LIKE - quoted_literal: "'sam*'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: DATABASES - keyword: FROM - database_reference: naked_identifier: default - quoted_literal: "'sam*'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: DATABASES - quoted_literal: "'sam*|suj'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: SCHEMAS - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/databricks/show_functions.sql000066400000000000000000000032141503426445100256660ustar00rootroot00000000000000-- Unfiltered list of all functions; SHOW FUNCTIONS; -- List a system function `trim` by searching both user defined and system -- defined functions. SHOW FUNCTIONS trim; SHOW ALL FUNCTIONS trim; -- List a system function `concat` by searching system defined functions. SHOW SYSTEM FUNCTIONS concat; -- List a user function `concat_user` by searching user defined functions. SHOW USER FUNCTIONS concat_user; -- List a qualified function `max` from database `salesdb`. SHOW SYSTEM FUNCTIONS salesdb.max; -- List all functions starting with `t` SHOW FUNCTIONS LIKE 't*'; -- List all functions starting with `t` without LIKE keyword SHOW FUNCTIONS 't*'; -- List all user functions starting with `t` SHOW USER FUNCTIONS LIKE 't*'; -- List all user functions starting with `t` without LIKE keyword SHOW USER FUNCTIONS 't*'; -- List all functions starting with `yea` or `windo` SHOW FUNCTIONS LIKE 'yea*|windo*'; -- Use normal regex pattern to list function names that has 4 characters -- with `t` as the starting character. SHOW FUNCTIONS LIKE 't[a-z][a-z][a-z]'; -- List all functions from default schema SHOW FUNCTIONS FROM default; -- List all user functions from default schema SHOW USER FUNCTIONS FROM default; -- List all functions from default schema starting with `t` SHOW FUNCTIONS FROM default LIKE 't*'; -- List all functions from default schema starting with `t` without LIKE keyword SHOW FUNCTIONS FROM default 't*'; -- List all user functions from default schema starting with `t` SHOW USER FUNCTIONS FROM default LIKE 't*'; -- List all user functions from default schema starting with `t` without LIKE keyword SHOW USER FUNCTIONS FROM default 't*'; sqlfluff-3.4.2/test/fixtures/dialects/databricks/show_functions.yml000066400000000000000000000072071503426445100256760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4f2683ec5b6ee42315dfc9980bca4d2c063ef508083cc4c038ec8ed4a2facd89 file: - statement: show_statement: - keyword: SHOW - keyword: FUNCTIONS - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: FUNCTIONS - function_name: function_name_identifier: trim - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: ALL - keyword: FUNCTIONS - function_name: function_name_identifier: trim - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: SYSTEM - keyword: FUNCTIONS - function_name: function_name_identifier: concat - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: USER - keyword: FUNCTIONS - function_name: function_name_identifier: concat_user - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: SYSTEM - keyword: FUNCTIONS - function_name: naked_identifier: salesdb dot: . function_name_identifier: max - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: FUNCTIONS - keyword: LIKE - quoted_literal: "'t*'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: FUNCTIONS - quoted_literal: "'t*'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: USER - keyword: FUNCTIONS - keyword: LIKE - quoted_literal: "'t*'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: USER - keyword: FUNCTIONS - quoted_literal: "'t*'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: FUNCTIONS - keyword: LIKE - quoted_literal: "'yea*|windo*'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: FUNCTIONS - keyword: LIKE - quoted_literal: "'t[a-z][a-z][a-z]'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: FUNCTIONS - keyword: FROM - database_reference: naked_identifier: default - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: USER - keyword: FUNCTIONS - keyword: FROM - database_reference: naked_identifier: default - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: FUNCTIONS - keyword: FROM - database_reference: naked_identifier: default - keyword: LIKE - quoted_literal: "'t*'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: FUNCTIONS - keyword: FROM - database_reference: naked_identifier: default - quoted_literal: "'t*'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: USER - keyword: FUNCTIONS - keyword: FROM - database_reference: naked_identifier: default - keyword: LIKE - quoted_literal: "'t*'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: USER - keyword: FUNCTIONS - keyword: FROM - database_reference: naked_identifier: default - quoted_literal: "'t*'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/databricks/show_tables.sql000066400000000000000000000010001503426445100251170ustar00rootroot00000000000000-- List all tables in default database SHOW TABLES; -- List all tables from userdb database SHOW TABLES FROM userdb; -- List all tables in userdb database SHOW TABLES IN userdb; -- List all tables from default database matching the pattern `sam*` SHOW TABLES FROM default LIKE 'sam*'; -- List all tables from default database matching the pattern `sam*` -- without LIKE keyword SHOW TABLES FROM default 'sam*'; -- List all tables matching the pattern `sam*|suj` without LIKE keyword SHOW TABLES 'sam*|suj'; sqlfluff-3.4.2/test/fixtures/dialects/databricks/show_tables.yml000066400000000000000000000026311503426445100251340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fa51da687e2e5dc6cac50b8f120c491f5a462abb4edc33ce76db4bcfe532bab1 file: - statement: show_statement: - keyword: SHOW - keyword: TABLES - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TABLES - keyword: FROM - database_reference: naked_identifier: userdb - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TABLES - keyword: IN - database_reference: naked_identifier: userdb - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TABLES - keyword: FROM - database_reference: naked_identifier: default - keyword: LIKE - quoted_literal: "'sam*'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TABLES - keyword: FROM - database_reference: naked_identifier: default - quoted_literal: "'sam*'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TABLES - quoted_literal: "'sam*|suj'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/databricks/show_views.sql000066400000000000000000000012441503426445100250140ustar00rootroot00000000000000-- List all views in default database SHOW VIEWS; -- List all views from userdb database SHOW VIEWS FROM userdb; -- List all views in global temp view database SHOW VIEWS IN global_temp; -- List all views from default database matching the pattern `sam*` SHOW VIEWS FROM default LIKE 'sam*'; -- List all views from the current database -- matching the pattern `sam|suj|temp*` SHOW VIEWS LIKE 'sam|suj|temp*'; -- List all views from default database matching the pattern `sam*` -- without LIKE keyword SHOW VIEWS FROM default 'sam*'; -- List all views from the current database -- matching the pattern `sam|suj|temp*` without LIKE keyword SHOW VIEWS 'sam|suj|temp*'; sqlfluff-3.4.2/test/fixtures/dialects/databricks/show_views.yml000066400000000000000000000030751503426445100250220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 290273e288e97541ed705c0e3e9b7fc765fac0122d866bb9b30421783fd99c1f file: - statement: show_statement: - keyword: SHOW - keyword: VIEWS - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: VIEWS - keyword: FROM - database_reference: naked_identifier: userdb - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: VIEWS - keyword: IN - database_reference: naked_identifier: global_temp - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: VIEWS - keyword: FROM - database_reference: naked_identifier: default - keyword: LIKE - quoted_literal: "'sam*'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: VIEWS - keyword: LIKE - quoted_literal: "'sam|suj|temp*'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: VIEWS - keyword: FROM - database_reference: naked_identifier: default - quoted_literal: "'sam*'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: VIEWS - quoted_literal: "'sam|suj|temp*'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/databricks/show_volumes.sql000066400000000000000000000004311503426445100253460ustar00rootroot00000000000000SHOW VOLUMES; SHOW VOLUMES IN sampledb; SHOW VOLUMES FROM sampledb; SHOW VOLUMES LIKE 'regex*'; SHOW VOLUMES 'regex*'; SHOW VOLUMES IN sampledb LIKE 'regex*'; SHOW VOLUMES IN sampledb 'regex*'; SHOW VOLUMES FROM sampledb LIKE 'regex*'; SHOW VOLUMES FROM sampledb 'regex*'; sqlfluff-3.4.2/test/fixtures/dialects/databricks/show_volumes.yml000066400000000000000000000040021503426445100253460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1b612a552e97cd9625f34225db37954c2a8300ea7af792bcceaa5e379e280e6b file: - statement: show_statement: - keyword: SHOW - keyword: VOLUMES - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: VOLUMES - keyword: IN - database_reference: naked_identifier: sampledb - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: VOLUMES - keyword: FROM - database_reference: naked_identifier: sampledb - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: VOLUMES - keyword: LIKE - quoted_literal: "'regex*'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: VOLUMES - quoted_literal: "'regex*'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: VOLUMES - keyword: IN - database_reference: naked_identifier: sampledb - keyword: LIKE - quoted_literal: "'regex*'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: VOLUMES - keyword: IN - database_reference: naked_identifier: sampledb - quoted_literal: "'regex*'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: VOLUMES - keyword: FROM - database_reference: naked_identifier: sampledb - keyword: LIKE - quoted_literal: "'regex*'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: VOLUMES - keyword: FROM - database_reference: naked_identifier: sampledb - quoted_literal: "'regex*'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/databricks/unpivot.sql000066400000000000000000000007201503426445100243210ustar00rootroot00000000000000SELECT * FROM sales UNPIVOT INCLUDE NULLS (sales FOR quarter IN (q1 AS `Jan-Mar`, q2 AS `Apr-Jun`, q3 AS `Jul-Sep`, sales.q4 AS `Oct-Dec`)); SELECT * FROM oncall UNPIVOT ((name, email, phone) FOR precedence IN ((name1, email1, phone1) AS primary, (name2, email2, phone2) AS secondary)); sqlfluff-3.4.2/test/fixtures/dialects/databricks/unpivot.yml000066400000000000000000000110221503426445100243200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 419ab218aadf6da05de18b4a9e706f421f5ce1313140ac20ed4a2426c57cdb33 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sales unpivot_clause: - keyword: UNPIVOT - keyword: INCLUDE - keyword: NULLS - bracketed: start_bracket: ( unpivot_single_column: - naked_identifier: sales - keyword: FOR - naked_identifier: quarter - keyword: IN - bracketed: - start_bracket: ( - column_reference: naked_identifier: q1 - alias_expression: alias_operator: keyword: AS quoted_identifier: '`Jan-Mar`' - comma: ',' - column_reference: naked_identifier: q2 - alias_expression: alias_operator: keyword: AS quoted_identifier: '`Apr-Jun`' - comma: ',' - column_reference: naked_identifier: q3 - alias_expression: alias_operator: keyword: AS quoted_identifier: '`Jul-Sep`' - comma: ',' - column_reference: - naked_identifier: sales - dot: . - naked_identifier: q4 - alias_expression: alias_operator: keyword: AS quoted_identifier: '`Oct-Dec`' - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: oncall unpivot_clause: keyword: UNPIVOT bracketed: start_bracket: ( unpivot_multi_column: - bracketed: - start_bracket: ( - naked_identifier: name - comma: ',' - naked_identifier: email - comma: ',' - naked_identifier: phone - end_bracket: ) - keyword: FOR - naked_identifier: precedence - keyword: IN - bracketed: - start_bracket: ( - bracketed: - start_bracket: ( - column_reference: naked_identifier: name1 - comma: ',' - column_reference: naked_identifier: email1 - comma: ',' - column_reference: naked_identifier: phone1 - end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: primary - comma: ',' - bracketed: - start_bracket: ( - column_reference: naked_identifier: name2 - comma: ',' - column_reference: naked_identifier: email2 - comma: ',' - column_reference: naked_identifier: phone2 - end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: secondary - end_bracket: ) end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/databricks/use_catalog.sql000066400000000000000000000003021503426445100250770ustar00rootroot00000000000000USE CATALOG catalog_name; -- Use the 'hive_metastore' . USE CATALOG hive_metastore; USE CATALOG 'hive_metastore'; -- Use the 'some_catalog' USE CATALOG `some_catalog`; USE CATALOG some_cat; sqlfluff-3.4.2/test/fixtures/dialects/databricks/use_catalog.yml000066400000000000000000000023601503426445100251070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 282b60971e1137caf141e41a55f81c74985ed6ecf12ad8ad3c4d2e304fe7472d file: - statement: use_catalog_statement: - keyword: USE - keyword: CATALOG - catalog_reference: naked_identifier: catalog_name - statement_terminator: ; - statement: use_catalog_statement: - keyword: USE - keyword: CATALOG - catalog_reference: naked_identifier: hive_metastore - statement_terminator: ; - statement: use_catalog_statement: - keyword: USE - keyword: CATALOG - catalog_reference: quoted_identifier: "'hive_metastore'" - statement_terminator: ; - statement: use_catalog_statement: - keyword: USE - keyword: CATALOG - catalog_reference: quoted_identifier: '`some_catalog`' - statement_terminator: ; - statement: use_catalog_statement: - keyword: USE - keyword: CATALOG - catalog_reference: naked_identifier: some_cat - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/databricks/use_database.sql000066400000000000000000000004721503426445100252410ustar00rootroot00000000000000USE database_name; -- Use the 'userdb' USE userdb; -- Use the 'userdb1' USE userdb1; -- Keywords SCHEMA and DATABASE are interchangeable. USE DATABASE database_name; USE SCHEMA database_name; USE IDENTIFIER('database_name'); USE DATABASE IDENTIFIER('database_name'); USE SCHEMA IDENTIFIER('database_name'); sqlfluff-3.4.2/test/fixtures/dialects/databricks/use_database.yml000066400000000000000000000041771503426445100252510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4394b95f2d290f61d1d3879f63fd43e04ba9a5e394063525bdf9c8990275975c file: - statement: use_statement: keyword: USE database_reference: naked_identifier: database_name - statement_terminator: ; - statement: use_statement: keyword: USE database_reference: naked_identifier: userdb - statement_terminator: ; - statement: use_statement: keyword: USE database_reference: naked_identifier: userdb1 - statement_terminator: ; - statement: use_database_statement: - keyword: USE - keyword: DATABASE - database_reference: naked_identifier: database_name - statement_terminator: ; - statement: use_database_statement: - keyword: USE - keyword: SCHEMA - database_reference: naked_identifier: database_name - statement_terminator: ; - statement: use_statement: keyword: USE database_reference: identifier_clause_segment: keyword: IDENTIFIER bracketed: start_bracket: ( expression: quoted_literal: "'database_name'" end_bracket: ) - statement_terminator: ; - statement: use_database_statement: - keyword: USE - keyword: DATABASE - database_reference: identifier_clause_segment: keyword: IDENTIFIER bracketed: start_bracket: ( expression: quoted_literal: "'database_name'" end_bracket: ) - statement_terminator: ; - statement: use_database_statement: - keyword: USE - keyword: SCHEMA - database_reference: identifier_clause_segment: keyword: IDENTIFIER bracketed: start_bracket: ( expression: quoted_literal: "'database_name'" end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/db2/000077500000000000000000000000001503426445100204355ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/dialects/db2/.sqlfluff000066400000000000000000000000311503426445100222520ustar00rootroot00000000000000[sqlfluff] dialect = db2 sqlfluff-3.4.2/test/fixtures/dialects/db2/alter_table.sql000066400000000000000000000002731503426445100234360ustar00rootroot00000000000000ALTER TABLE x DROP COLUMN y; ALTER TABLE x DROP y; ALTER TABLE x DROP COLUMN y CASCADE; ALTER TABLE x DROP y CASCADE; ALTER TABLE x DROP COLUMN y RESTRICT; ALTER TABLE x DROP y RESTRICT; sqlfluff-3.4.2/test/fixtures/dialects/db2/alter_table.yml000066400000000000000000000033721503426445100234430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 46d1387ec78ed51b3e8a3463c6a4c5966c5a221943d7077ab1b747a2155f71af file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - keyword: DROP - keyword: COLUMN - naked_identifier: y - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - parameter: DROP - naked_identifier: y - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - keyword: DROP - keyword: COLUMN - naked_identifier: y - keyword: CASCADE - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - keyword: DROP - naked_identifier: y - keyword: CASCADE - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - keyword: DROP - keyword: COLUMN - naked_identifier: y - keyword: RESTRICT - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - keyword: DROP - naked_identifier: y - keyword: RESTRICT - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/db2/call_stored_proc.sql000066400000000000000000000014071503426445100244760ustar00rootroot00000000000000/* Examples from https://www.ibm.com/docs/en/db2/11.5?topic=statements-call */ -- Call with positional parameters CALL FOO(I1, I2); -- Call with named argument CALL Y.UPDATE_ORDER(TEST => 5); -- Call with positional and named argument CALL UPDATE_ORDER(5000, NEW_STATUS => 'Shipped'); -- Call with positional and multiple named arguments CALL UPDATE_ORDER( 5002, IN_CUSTID => 1001, NEW_STATUS => 'Received', NEW_COMMENTS => 'Customer satisfied with the order.' ); /* Examples from https://www.ibm.com/docs/en/db2/11.5?topic=commands-runstats-using-admin-cmd */ CALL SYSPROC.ADMIN_CMD( 'RUNSTATS ON TABLE employee ON KEY COLUMNS and INDEXES ALL' ); /* Test for no parameters. */ CALL DO_THE_THING(); /* Test for no parenthesis. */ CALL DO_THE_THING; sqlfluff-3.4.2/test/fixtures/dialects/db2/call_stored_proc.yml000066400000000000000000000071241503426445100245020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 90a8a38282d756af742bc45a6315b63df620460fd3c85ccdf642376daa6a96fc file: - statement: call_segment: keyword: CALL function: function_name: function_name_identifier: FOO function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: I1 - comma: ',' - expression: column_reference: naked_identifier: I2 - end_bracket: ) - statement_terminator: ; - statement: call_segment: keyword: CALL function: function_name: naked_identifier: Y dot: . function_name_identifier: UPDATE_ORDER function_contents: bracketed: start_bracket: ( named_argument: naked_identifier: TEST right_arrow: => expression: numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: call_segment: keyword: CALL function: function_name: function_name_identifier: UPDATE_ORDER function_contents: bracketed: start_bracket: ( expression: numeric_literal: '5000' comma: ',' named_argument: naked_identifier: NEW_STATUS right_arrow: => expression: quoted_literal: "'Shipped'" end_bracket: ) - statement_terminator: ; - statement: call_segment: keyword: CALL function: function_name: function_name_identifier: UPDATE_ORDER function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '5002' - comma: ',' - named_argument: naked_identifier: IN_CUSTID right_arrow: => expression: numeric_literal: '1001' - comma: ',' - named_argument: naked_identifier: NEW_STATUS right_arrow: => expression: quoted_literal: "'Received'" - comma: ',' - named_argument: naked_identifier: NEW_COMMENTS right_arrow: => expression: quoted_literal: "'Customer satisfied with the order.'" - end_bracket: ) - statement_terminator: ; - statement: call_segment: keyword: CALL function: function_name: naked_identifier: SYSPROC dot: . function_name_identifier: ADMIN_CMD function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'RUNSTATS ON TABLE employee ON KEY COLUMNS and INDEXES\ \ ALL'" end_bracket: ) - statement_terminator: ; - statement: call_segment: keyword: CALL function: function_name: function_name_identifier: DO_THE_THING function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: call_segment: keyword: CALL function_name: function_name_identifier: DO_THE_THING - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/db2/case.sql000066400000000000000000000001461503426445100220720ustar00rootroot00000000000000SELECT CASE WHEN ROLL = 1 THEN DAG WHEN ROLL > 1 THEN DAG_MOD - 1 DAYS END FROM MY_TABLE; sqlfluff-3.4.2/test/fixtures/dialects/db2/case.yml000066400000000000000000000034221503426445100220740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e3d61bb2d36284751454c0b350bb2ff492fec31530e7c3672c64f1caac175325 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: ROLL comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - keyword: THEN - expression: column_reference: naked_identifier: DAG - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: ROLL comparison_operator: raw_comparison_operator: '>' numeric_literal: '1' - keyword: THEN - expression: column_reference: naked_identifier: DAG_MOD binary_operator: '-' numeric_literal: '1' keyword: DAYS - keyword: END from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/db2/create_index.sql000066400000000000000000000034221503426445100236110ustar00rootroot00000000000000CREATE UNIQUE INDEX SESSION.FOO_IDX ON SESSION.FOO(column1) COMPRESS YES ALLOW REVERSE SCANS; CREATE UNIQUE INDEX UNIQUE_NAME ON PROJECT (PROJNAME); CREATE INDEX JOB_BY_DPT ON EMPLOYEE (WORKDEPT, JOB); CREATE INDEX JOB_BY_DPT_NME ON EMPLOYEE (WORKDEPT, JOB DESC, NAME RANDOM); CREATE UNIQUE INDEX JOB_BY_DEPT ON EMPLOYEE (WORKDEPT, JOB) SPECIFICATION ONLY; CREATE INDEX SPATIAL_INDEX ON CUSTOMER (LOCATION) EXTEND USING (GRID_EXTENSION (x'000100100010001000400010')); CREATE INDEX IDX1 ON TAB1 (COL1) COLLECT STATISTICS; CREATE INDEX IDX2 ON TAB1 (COL2) COLLECT DETAILED STATISTICS; CREATE INDEX IDX3 ON TAB1 (COL3) COLLECT SAMPLED DETAILED STATISTICS; CREATE UNIQUE INDEX A_IDX ON MYNUMBERDATA (A) IN IDX_TBSP; CREATE UNIQUE INDEX TIME_IDX ON MYNUMBERDATA (A, BUSINESS_TIME WITHOUT OVERLAPS); CREATE INDEX B_IDX ON MYNUMBERDATA (B) NOT PARTITIONED IN IDX_TBSP; CREATE INDEX EMPINDEX ON COMPANYINFO (COMPANYDOCS) GENERATE KEY USING XMLPATTERN '/company/emp/@id' AS SQL DOUBLE; CREATE INDEX EMPINDEX ON COMPANYINFO (COMPANYDOCS) GENERATE KEY USING XMLPATTERN '/child::company/child::emp/attribute::id' AS SQL DOUBLE; CREATE UNIQUE INDEX MYDOCSIDX ON MYDOCS (DOC) GENERATE KEY USING XMLPATTERN '/book/title' AS SQL VARCHAR(100); CREATE INDEX MYDOCSIDX ON MYDOCS (DOC) GENERATE KEY USING XMLPATTERN 'declare namespace b="http://www.example.com/book/"; declare namespace c="http://acme.org/chapters"; /b:book/c:chapter/@number' AS SQL DOUBLE; CREATE UNIQUE INDEX IDXPROJEST ON PROJECT (PROJNO) INCLUDE (PRSTAFF); CREATE UNIQUE INDEX IDXUSERID ON CUSTOMER (USER_ID) EXCLUDE NULL KEYS; CREATE INDEX EMP_UPPERNAME ON EMPLOYEE (UPPER(NAME), ID); CREATE INDEX EMP_PCT ON EMPLOYEE (ID) PCTFREE 10 LEVEL2 PCTFREE 20 PAGE SPLIT SYMMETRIC; CREATE INDEX EMP_CLUSTER ON EMPLOYEE (ID) CLUSTER; sqlfluff-3.4.2/test/fixtures/dialects/db2/create_index.yml000066400000000000000000000274661503426445100236310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 69d8bf392480a9a51a81b274c0109f1ad758f91f6f684e79f3677138b56c5ac4 file: - statement: create_index_statement: - keyword: CREATE - keyword: UNIQUE - keyword: INDEX - index_reference: - naked_identifier: SESSION - dot: . - naked_identifier: FOO_IDX - keyword: 'ON' - table_reference: - naked_identifier: SESSION - dot: . - naked_identifier: FOO - bracketed: start_bracket: ( index_column_definition: naked_identifier: column1 end_bracket: ) - keyword: COMPRESS - keyword: 'YES' - keyword: ALLOW - keyword: REVERSE - keyword: SCANS - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: UNIQUE - keyword: INDEX - index_reference: naked_identifier: UNIQUE_NAME - keyword: 'ON' - table_reference: naked_identifier: PROJECT - bracketed: start_bracket: ( index_column_definition: naked_identifier: PROJNAME end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: JOB_BY_DPT - keyword: 'ON' - table_reference: naked_identifier: EMPLOYEE - bracketed: - start_bracket: ( - index_column_definition: naked_identifier: WORKDEPT - comma: ',' - index_column_definition: naked_identifier: JOB - end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: JOB_BY_DPT_NME - keyword: 'ON' - table_reference: naked_identifier: EMPLOYEE - bracketed: - start_bracket: ( - index_column_definition: naked_identifier: WORKDEPT - comma: ',' - index_column_definition: naked_identifier: JOB keyword: DESC - comma: ',' - index_column_definition: naked_identifier: NAME keyword: RANDOM - end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: UNIQUE - keyword: INDEX - index_reference: naked_identifier: JOB_BY_DEPT - keyword: 'ON' - table_reference: naked_identifier: EMPLOYEE - bracketed: - start_bracket: ( - index_column_definition: naked_identifier: WORKDEPT - comma: ',' - index_column_definition: naked_identifier: JOB - end_bracket: ) - keyword: SPECIFICATION - keyword: ONLY - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: SPATIAL_INDEX - keyword: 'ON' - table_reference: naked_identifier: CUSTOMER - bracketed: start_bracket: ( index_column_definition: naked_identifier: LOCATION end_bracket: ) - keyword: EXTEND - keyword: USING - bracketed: start_bracket: ( index_reference: naked_identifier: GRID_EXTENSION bracketed: start_bracket: ( expression: data_type: data_type_identifier: x quoted_literal: "'000100100010001000400010'" end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: IDX1 - keyword: 'ON' - table_reference: naked_identifier: TAB1 - bracketed: start_bracket: ( index_column_definition: naked_identifier: COL1 end_bracket: ) - keyword: COLLECT - keyword: STATISTICS - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: IDX2 - keyword: 'ON' - table_reference: naked_identifier: TAB1 - bracketed: start_bracket: ( index_column_definition: naked_identifier: COL2 end_bracket: ) - keyword: COLLECT - keyword: DETAILED - keyword: STATISTICS - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: IDX3 - keyword: 'ON' - table_reference: naked_identifier: TAB1 - bracketed: start_bracket: ( index_column_definition: naked_identifier: COL3 end_bracket: ) - keyword: COLLECT - keyword: SAMPLED - keyword: DETAILED - keyword: STATISTICS - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: UNIQUE - keyword: INDEX - index_reference: naked_identifier: A_IDX - keyword: 'ON' - table_reference: naked_identifier: MYNUMBERDATA - bracketed: start_bracket: ( index_column_definition: naked_identifier: A end_bracket: ) - keyword: IN - tablespace_reference: naked_identifier: IDX_TBSP - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: UNIQUE - keyword: INDEX - index_reference: naked_identifier: TIME_IDX - keyword: 'ON' - table_reference: naked_identifier: MYNUMBERDATA - bracketed: - start_bracket: ( - index_column_definition: naked_identifier: A - comma: ',' - keyword: BUSINESS_TIME - keyword: WITHOUT - keyword: OVERLAPS - end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: B_IDX - keyword: 'ON' - table_reference: naked_identifier: MYNUMBERDATA - bracketed: start_bracket: ( index_column_definition: naked_identifier: B end_bracket: ) - keyword: NOT - keyword: PARTITIONED - keyword: IN - tablespace_reference: naked_identifier: IDX_TBSP - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: EMPINDEX - keyword: 'ON' - table_reference: naked_identifier: COMPANYINFO - bracketed: start_bracket: ( index_column_definition: naked_identifier: COMPANYDOCS end_bracket: ) - keyword: GENERATE - keyword: KEY - keyword: USING - keyword: XMLPATTERN - quoted_literal: "'/company/emp/@id'" - keyword: AS - keyword: SQL - data_type: data_type_identifier: DOUBLE - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: EMPINDEX - keyword: 'ON' - table_reference: naked_identifier: COMPANYINFO - bracketed: start_bracket: ( index_column_definition: naked_identifier: COMPANYDOCS end_bracket: ) - keyword: GENERATE - keyword: KEY - keyword: USING - keyword: XMLPATTERN - quoted_literal: "'/child::company/child::emp/attribute::id'" - keyword: AS - keyword: SQL - data_type: data_type_identifier: DOUBLE - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: UNIQUE - keyword: INDEX - index_reference: naked_identifier: MYDOCSIDX - keyword: 'ON' - table_reference: naked_identifier: MYDOCS - bracketed: start_bracket: ( index_column_definition: naked_identifier: DOC end_bracket: ) - keyword: GENERATE - keyword: KEY - keyword: USING - keyword: XMLPATTERN - quoted_literal: "'/book/title'" - keyword: AS - keyword: SQL - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '100' end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: MYDOCSIDX - keyword: 'ON' - table_reference: naked_identifier: MYDOCS - bracketed: start_bracket: ( index_column_definition: naked_identifier: DOC end_bracket: ) - keyword: GENERATE - keyword: KEY - keyword: USING - keyword: XMLPATTERN - quoted_literal: "'declare namespace b=\"http://www.example.com/book/\";\n \ \ declare namespace c=\"http://acme.org/chapters\";\n /b:book/c:chapter/@number'" - keyword: AS - keyword: SQL - data_type: data_type_identifier: DOUBLE - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: UNIQUE - keyword: INDEX - index_reference: naked_identifier: IDXPROJEST - keyword: 'ON' - table_reference: naked_identifier: PROJECT - bracketed: start_bracket: ( index_column_definition: naked_identifier: PROJNO end_bracket: ) - keyword: INCLUDE - bracketed: start_bracket: ( naked_identifier: PRSTAFF end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: UNIQUE - keyword: INDEX - index_reference: naked_identifier: IDXUSERID - keyword: 'ON' - table_reference: naked_identifier: CUSTOMER - bracketed: start_bracket: ( index_column_definition: naked_identifier: USER_ID end_bracket: ) - keyword: EXCLUDE - keyword: 'NULL' - keyword: KEYS - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: EMP_UPPERNAME - keyword: 'ON' - table_reference: naked_identifier: EMPLOYEE - bracketed: - start_bracket: ( - index_column_definition: expression: function: function_name: function_name_identifier: UPPER function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: NAME end_bracket: ) - comma: ',' - index_column_definition: naked_identifier: ID - end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: EMP_PCT - keyword: 'ON' - table_reference: naked_identifier: EMPLOYEE - bracketed: start_bracket: ( index_column_definition: naked_identifier: ID end_bracket: ) - keyword: PCTFREE - numeric_literal: '10' - keyword: LEVEL2 - keyword: PCTFREE - numeric_literal: '20' - keyword: PAGE - keyword: SPLIT - keyword: SYMMETRIC - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: EMP_CLUSTER - keyword: 'ON' - table_reference: naked_identifier: EMPLOYEE - bracketed: start_bracket: ( index_column_definition: naked_identifier: ID end_bracket: ) - keyword: CLUSTER - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/db2/create_table_field_name_with_pound_sign.sql000066400000000000000000000002221503426445100312070ustar00rootroot00000000000000-- Valid field names with # pound/hash sign CREATE TABLE test ( my_field_1# decimal(2,0), #my_field_1 decimal(2,0), # decimal(2,0) ); sqlfluff-3.4.2/test/fixtures/dialects/db2/create_table_field_name_with_pound_sign.yml000066400000000000000000000032621503426445100312200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 13105ac0dbc90718b62d6e22bace4ccb56603dbc9629cffda3add9014a2307a1 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: test - bracketed: - start_bracket: ( - column_definition: naked_identifier: my_field_1# data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '2' - comma: ',' - numeric_literal: '0' - end_bracket: ) - comma: ',' - column_definition: naked_identifier: '#my_field_1' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '2' - comma: ',' - numeric_literal: '0' - end_bracket: ) - comma: ',' - column_definition: naked_identifier: '#' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '2' - comma: ',' - numeric_literal: '0' - end_bracket: ) - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/db2/day_unit.sql000066400000000000000000000001451503426445100227720ustar00rootroot00000000000000SELECT CASE WHEN ROLL = 1 THEN DAG WHEN ROLL > 1 THEN DAG_MOD - 1 DAY END FROM MY_TABLE; sqlfluff-3.4.2/test/fixtures/dialects/db2/day_unit.yml000066400000000000000000000034211503426445100227740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ac1d01c1f619dd06023730cbe95c7e1485af36e400fb5488abe29aaf84f65736 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: ROLL comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - keyword: THEN - expression: column_reference: naked_identifier: DAG - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: ROLL comparison_operator: raw_comparison_operator: '>' numeric_literal: '1' - keyword: THEN - expression: column_reference: naked_identifier: DAG_MOD binary_operator: '-' numeric_literal: '1' keyword: DAY - keyword: END from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/db2/declare_global_temporary_table.sql000066400000000000000000000063301503426445100273500ustar00rootroot00000000000000DECLARE GLOBAL TEMPORARY TABLE SESSION.TEMP_EMP ( EMPNO CHAR(6) NOT NULL, SALARY DECIMAL(9, 2), BONUS DECIMAL(9, 2), COMM DECIMAL(9, 2) ) ON COMMIT PRESERVE ROWS; DECLARE GLOBAL TEMPORARY TABLE TEMPTAB1 LIKE USER1.EMPTAB INCLUDING IDENTITY ON COMMIT PRESERVE ROWS; DECLARE GLOBAL TEMPORARY TABLE MY_DGTT_TABLE ( EMPNO CHAR(6) NOT NULL ); DECLARE GLOBAL TEMPORARY TABLE MY_DGTT_TABLE ( EMPNO CHAR(6) NOT NULL ) ORGANIZE BY ROW; DECLARE GLOBAL TEMPORARY TABLE MY_DGTT_TABLE ( EMPNO CHAR(6) NOT NULL ) ORGANIZE BY COLUMN; DECLARE GLOBAL TEMPORARY TABLE MY_DGTT_TABLE ( EMPNO CHAR(6) NOT NULL ) ON COMMIT DELETE ROWS; DECLARE GLOBAL TEMPORARY TABLE MY_DGTT_TABLE ( EMPNO CHAR(6) NOT NULL ) ON COMMIT PRESERVE ROWS; DECLARE GLOBAL TEMPORARY TABLE MY_DGTT_TABLE ( EMPNO CHAR(6) NOT NULL ) LOGGED; DECLARE GLOBAL TEMPORARY TABLE MY_DGTT_TABLE ( EMPNO CHAR(6) NOT NULL ) NOT LOGGED ON ROLLBACK DELETE ROWS; DECLARE GLOBAL TEMPORARY TABLE MY_DGTT_TABLE ( EMPNO CHAR(6) NOT NULL ) NOT LOGGED ON ROLLBACK PRESERVE ROWS; DECLARE GLOBAL TEMPORARY TABLE MY_DGTT_TABLE ( EMPNO CHAR(6) NOT NULL ) WITH REPLACE; DECLARE GLOBAL TEMPORARY TABLE MY_DGTT_TABLE ( EMPNO CHAR(6) NOT NULL ) IN USERTEMPSPACE1; DECLARE GLOBAL TEMPORARY TABLE MY_DGTT_TABLE ( EMPNO CHAR(6) NOT NULL ) DISTRIBUTE BY RANDOM; DECLARE GLOBAL TEMPORARY TABLE MY_DGTT_TABLE ( EMPNO CHAR(6) NOT NULL ) DISTRIBUTE ON RANDOM; DECLARE GLOBAL TEMPORARY TABLE MY_DGTT_TABLE ( EMPNO CHAR(6) NOT NULL ) DISTRIBUTE BY HASH (EMP_NO); DECLARE GLOBAL TEMPORARY TABLE MY_DGTT_TABLE AS ( SELECT COL1 FROM MY_SCHEMA.MY_TABLE ) WITH NO DATA; DECLARE GLOBAL TEMPORARY TABLE MY_DGTT_TABLE AS ( SELECT COL1 FROM MY_SCHEMA.MY_TABLE ) WITH DATA; DECLARE GLOBAL TEMPORARY TABLE MY_DGTT_TABLE AS ( SELECT COL1 FROM MY_SCHEMA.MY_TABLE ) WITH NO DATA INCLUDING DEFAULTS; DECLARE GLOBAL TEMPORARY TABLE MY_DGTT_TABLE AS ( SELECT COL1 FROM MY_SCHEMA.MY_TABLE ) WITH NO DATA INCLUDING COLUMN DEFAULTS; DECLARE GLOBAL TEMPORARY TABLE MY_DGTT_TABLE AS ( SELECT COL1 FROM MY_SCHEMA.MY_TABLE ) WITH NO DATA EXCLUDING DEFAULTS; DECLARE GLOBAL TEMPORARY TABLE MY_DGTT_TABLE AS ( SELECT COL1 FROM MY_SCHEMA.MY_TABLE ) WITH NO DATA EXCLUDING COLUMN DEFAULTS; DECLARE GLOBAL TEMPORARY TABLE MY_DGTT_TABLE AS ( SELECT COL1 FROM MY_SCHEMA.MY_TABLE ) WITH NO DATA EXCLUDING IDENTITY; DECLARE GLOBAL TEMPORARY TABLE MY_DGTT_TABLE AS ( SELECT COL1 FROM MY_SCHEMA.MY_TABLE ) WITH NO DATA INCLUDING IDENTITY; DECLARE GLOBAL TEMPORARY TABLE MY_DGTT_TABLE AS ( SELECT COL1 FROM MY_SCHEMA.MY_TABLE ) WITH NO DATA EXCLUDING IDENTITY COLUMN ATTRIBUTES; DECLARE GLOBAL TEMPORARY TABLE MY_DGTT_TABLE AS ( SELECT COL1 FROM MY_SCHEMA.MY_TABLE ) WITH NO DATA INCLUDING IDENTITY COLUMN ATTRIBUTES; DECLARE GLOBAL TEMPORARY TABLE MY_DGTT_TABLE AS ( SELECT COL1 FROM MY_SCHEMA.MY_TABLE ) WITH DATA DISTRIBUTE ON HASH (COL1) IN USERTEMPSPACE1 ORGANIZE BY ROW ON COMMIT PRESERVE ROWS NOT LOGGED WITH REPLACE; DECLARE GLOBAL TEMPORARY TABLE MY_DGTT_TABLE AS ( SELECT COL1 FROM MY_SCHEMA.MY_TABLE ) WITH DATA IN USERTEMPSPACE1 DISTRIBUTE ON HASH (COL1) ORGANIZE BY COLUMN ON COMMIT PRESERVE ROWS NOT LOGGED; sqlfluff-3.4.2/test/fixtures/dialects/db2/declare_global_temporary_table.yml000066400000000000000000000602041503426445100273520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4c49735cc4fa31ea57cc9347534d097600ff73cca7803ff5de34178d4798bf59 file: - statement: declare_temp_table: - keyword: DECLARE - keyword: GLOBAL - keyword: TEMPORARY - keyword: TABLE - table_reference: - naked_identifier: SESSION - dot: . - naked_identifier: TEMP_EMP - bracketed: - start_bracket: ( - column_definition: naked_identifier: EMPNO data_type: data_type_identifier: CHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: SALARY data_type: data_type_identifier: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '9' - comma: ',' - numeric_literal: '2' - end_bracket: ) - comma: ',' - column_definition: naked_identifier: BONUS data_type: data_type_identifier: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '9' - comma: ',' - numeric_literal: '2' - end_bracket: ) - comma: ',' - column_definition: naked_identifier: COMM data_type: data_type_identifier: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '9' - comma: ',' - numeric_literal: '2' - end_bracket: ) - end_bracket: ) - keyword: 'ON' - keyword: COMMIT - keyword: PRESERVE - keyword: ROWS - statement_terminator: ; - statement: declare_temp_table: - keyword: DECLARE - keyword: GLOBAL - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: TEMPTAB1 - keyword: LIKE - table_reference: - naked_identifier: USER1 - dot: . - naked_identifier: EMPTAB - copy_options: - keyword: INCLUDING - keyword: IDENTITY - keyword: 'ON' - keyword: COMMIT - keyword: PRESERVE - keyword: ROWS - statement_terminator: ; - statement: declare_temp_table: - keyword: DECLARE - keyword: GLOBAL - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: MY_DGTT_TABLE - bracketed: start_bracket: ( column_definition: naked_identifier: EMPNO data_type: data_type_identifier: CHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' end_bracket: ) - statement_terminator: ; - statement: declare_temp_table: - keyword: DECLARE - keyword: GLOBAL - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: MY_DGTT_TABLE - bracketed: start_bracket: ( column_definition: naked_identifier: EMPNO data_type: data_type_identifier: CHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' end_bracket: ) - keyword: ORGANIZE - keyword: BY - keyword: ROW - statement_terminator: ; - statement: declare_temp_table: - keyword: DECLARE - keyword: GLOBAL - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: MY_DGTT_TABLE - bracketed: start_bracket: ( column_definition: naked_identifier: EMPNO data_type: data_type_identifier: CHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' end_bracket: ) - keyword: ORGANIZE - keyword: BY - keyword: COLUMN - statement_terminator: ; - statement: declare_temp_table: - keyword: DECLARE - keyword: GLOBAL - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: MY_DGTT_TABLE - bracketed: start_bracket: ( column_definition: naked_identifier: EMPNO data_type: data_type_identifier: CHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' end_bracket: ) - keyword: 'ON' - keyword: COMMIT - keyword: DELETE - keyword: ROWS - statement_terminator: ; - statement: declare_temp_table: - keyword: DECLARE - keyword: GLOBAL - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: MY_DGTT_TABLE - bracketed: start_bracket: ( column_definition: naked_identifier: EMPNO data_type: data_type_identifier: CHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' end_bracket: ) - keyword: 'ON' - keyword: COMMIT - keyword: PRESERVE - keyword: ROWS - statement_terminator: ; - statement: declare_temp_table: - keyword: DECLARE - keyword: GLOBAL - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: MY_DGTT_TABLE - bracketed: start_bracket: ( column_definition: naked_identifier: EMPNO data_type: data_type_identifier: CHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' end_bracket: ) - keyword: LOGGED - statement_terminator: ; - statement: declare_temp_table: - keyword: DECLARE - keyword: GLOBAL - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: MY_DGTT_TABLE - bracketed: start_bracket: ( column_definition: naked_identifier: EMPNO data_type: data_type_identifier: CHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' end_bracket: ) - keyword: NOT - keyword: LOGGED - keyword: 'ON' - keyword: ROLLBACK - keyword: DELETE - keyword: ROWS - statement_terminator: ; - statement: declare_temp_table: - keyword: DECLARE - keyword: GLOBAL - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: MY_DGTT_TABLE - bracketed: start_bracket: ( column_definition: naked_identifier: EMPNO data_type: data_type_identifier: CHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' end_bracket: ) - keyword: NOT - keyword: LOGGED - keyword: 'ON' - keyword: ROLLBACK - keyword: PRESERVE - keyword: ROWS - statement_terminator: ; - statement: declare_temp_table: - keyword: DECLARE - keyword: GLOBAL - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: MY_DGTT_TABLE - bracketed: start_bracket: ( column_definition: naked_identifier: EMPNO data_type: data_type_identifier: CHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' end_bracket: ) - keyword: WITH - keyword: REPLACE - statement_terminator: ; - statement: declare_temp_table: - keyword: DECLARE - keyword: GLOBAL - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: MY_DGTT_TABLE - bracketed: start_bracket: ( column_definition: naked_identifier: EMPNO data_type: data_type_identifier: CHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' end_bracket: ) - keyword: IN - tablespace_reference: naked_identifier: USERTEMPSPACE1 - statement_terminator: ; - statement: declare_temp_table: - keyword: DECLARE - keyword: GLOBAL - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: MY_DGTT_TABLE - bracketed: start_bracket: ( column_definition: naked_identifier: EMPNO data_type: data_type_identifier: CHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' end_bracket: ) - distribution_clause: - keyword: DISTRIBUTE - keyword: BY - keyword: RANDOM - statement_terminator: ; - statement: declare_temp_table: - keyword: DECLARE - keyword: GLOBAL - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: MY_DGTT_TABLE - bracketed: start_bracket: ( column_definition: naked_identifier: EMPNO data_type: data_type_identifier: CHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' end_bracket: ) - distribution_clause: - keyword: DISTRIBUTE - keyword: 'ON' - keyword: RANDOM - statement_terminator: ; - statement: declare_temp_table: - keyword: DECLARE - keyword: GLOBAL - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: MY_DGTT_TABLE - bracketed: start_bracket: ( column_definition: naked_identifier: EMPNO data_type: data_type_identifier: CHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' end_bracket: ) - distribution_clause: - keyword: DISTRIBUTE - keyword: BY - keyword: HASH - bracketed: start_bracket: ( column_reference: naked_identifier: EMP_NO end_bracket: ) - statement_terminator: ; - statement: declare_temp_table: - keyword: DECLARE - keyword: GLOBAL - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: MY_DGTT_TABLE - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: COL1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: MY_TABLE end_bracket: ) - with_data_clause: - keyword: WITH - keyword: 'NO' - keyword: DATA - statement_terminator: ; - statement: declare_temp_table: - keyword: DECLARE - keyword: GLOBAL - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: MY_DGTT_TABLE - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: COL1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: MY_TABLE end_bracket: ) - with_data_clause: - keyword: WITH - keyword: DATA - statement_terminator: ; - statement: declare_temp_table: - keyword: DECLARE - keyword: GLOBAL - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: MY_DGTT_TABLE - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: COL1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: MY_TABLE end_bracket: ) - with_data_clause: - keyword: WITH - keyword: 'NO' - keyword: DATA - copy_options: - keyword: INCLUDING - keyword: DEFAULTS - statement_terminator: ; - statement: declare_temp_table: - keyword: DECLARE - keyword: GLOBAL - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: MY_DGTT_TABLE - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: COL1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: MY_TABLE end_bracket: ) - with_data_clause: - keyword: WITH - keyword: 'NO' - keyword: DATA - copy_options: - keyword: INCLUDING - keyword: COLUMN - keyword: DEFAULTS - statement_terminator: ; - statement: declare_temp_table: - keyword: DECLARE - keyword: GLOBAL - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: MY_DGTT_TABLE - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: COL1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: MY_TABLE end_bracket: ) - with_data_clause: - keyword: WITH - keyword: 'NO' - keyword: DATA - copy_options: - keyword: EXCLUDING - keyword: DEFAULTS - statement_terminator: ; - statement: declare_temp_table: - keyword: DECLARE - keyword: GLOBAL - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: MY_DGTT_TABLE - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: COL1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: MY_TABLE end_bracket: ) - with_data_clause: - keyword: WITH - keyword: 'NO' - keyword: DATA - copy_options: - keyword: EXCLUDING - keyword: COLUMN - keyword: DEFAULTS - statement_terminator: ; - statement: declare_temp_table: - keyword: DECLARE - keyword: GLOBAL - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: MY_DGTT_TABLE - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: COL1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: MY_TABLE end_bracket: ) - with_data_clause: - keyword: WITH - keyword: 'NO' - keyword: DATA - copy_options: - keyword: EXCLUDING - keyword: IDENTITY - statement_terminator: ; - statement: declare_temp_table: - keyword: DECLARE - keyword: GLOBAL - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: MY_DGTT_TABLE - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: COL1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: MY_TABLE end_bracket: ) - with_data_clause: - keyword: WITH - keyword: 'NO' - keyword: DATA - copy_options: - keyword: INCLUDING - keyword: IDENTITY - statement_terminator: ; - statement: declare_temp_table: - keyword: DECLARE - keyword: GLOBAL - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: MY_DGTT_TABLE - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: COL1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: MY_TABLE end_bracket: ) - with_data_clause: - keyword: WITH - keyword: 'NO' - keyword: DATA - copy_options: - keyword: EXCLUDING - keyword: IDENTITY - keyword: COLUMN - keyword: ATTRIBUTES - statement_terminator: ; - statement: declare_temp_table: - keyword: DECLARE - keyword: GLOBAL - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: MY_DGTT_TABLE - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: COL1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: MY_TABLE end_bracket: ) - with_data_clause: - keyword: WITH - keyword: 'NO' - keyword: DATA - copy_options: - keyword: INCLUDING - keyword: IDENTITY - keyword: COLUMN - keyword: ATTRIBUTES - statement_terminator: ; - statement: declare_temp_table: - keyword: DECLARE - keyword: GLOBAL - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: MY_DGTT_TABLE - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: COL1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: MY_TABLE end_bracket: ) - with_data_clause: - keyword: WITH - keyword: DATA - distribution_clause: - keyword: DISTRIBUTE - keyword: 'ON' - keyword: HASH - bracketed: start_bracket: ( column_reference: naked_identifier: COL1 end_bracket: ) - keyword: IN - tablespace_reference: naked_identifier: USERTEMPSPACE1 - keyword: ORGANIZE - keyword: BY - keyword: ROW - keyword: 'ON' - keyword: COMMIT - keyword: PRESERVE - keyword: ROWS - keyword: NOT - keyword: LOGGED - keyword: WITH - keyword: REPLACE - statement_terminator: ; - statement: declare_temp_table: - keyword: DECLARE - keyword: GLOBAL - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: MY_DGTT_TABLE - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: COL1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: MY_TABLE end_bracket: ) - with_data_clause: - keyword: WITH - keyword: DATA - keyword: IN - tablespace_reference: naked_identifier: USERTEMPSPACE1 - distribution_clause: - keyword: DISTRIBUTE - keyword: 'ON' - keyword: HASH - bracketed: start_bracket: ( column_reference: naked_identifier: COL1 end_bracket: ) - keyword: ORGANIZE - keyword: BY - keyword: COLUMN - keyword: 'ON' - keyword: COMMIT - keyword: PRESERVE - keyword: ROWS - keyword: NOT - keyword: LOGGED - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/db2/function_within_group.sql000066400000000000000000000001521503426445100255770ustar00rootroot00000000000000SELECT LISTAGG(A_COLUMN_NAME, 'X') WITHIN GROUP(ORDER BY A_COLUMN_NAME) AS MY_COLUMN FROM A_TABLE sqlfluff-3.4.2/test/fixtures/dialects/db2/function_within_group.yml000066400000000000000000000031241503426445100256030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 02726def5633cf902bc961d544630416a722cc55733ed6731946a46aafbfeacb file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: LISTAGG function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: A_COLUMN_NAME - comma: ',' - expression: quoted_literal: "'X'" - end_bracket: ) withingroup_clause: - keyword: WITHIN - keyword: GROUP - bracketed: start_bracket: ( orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: A_COLUMN_NAME end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: MY_COLUMN from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: A_TABLE sqlfluff-3.4.2/test/fixtures/dialects/db2/join_types.sql000066400000000000000000000027421503426445100233460ustar00rootroot00000000000000-- inner join SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee INNER JOIN department ON employee.deptno = department.deptno; -- left join SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee LEFT JOIN department ON employee.deptno = department.deptno; SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee LEFT OUTER JOIN department ON employee.deptno = department.deptno; -- right join SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee RIGHT JOIN department ON employee.deptno = department.deptno; SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee RIGHT OUTER JOIN department ON employee.deptno = department.deptno; -- full join SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee FULL JOIN department ON employee.deptno = department.deptno; SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee FULL OUTER JOIN department ON employee.deptno = department.deptno; -- cross join SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee CROSS JOIN department; SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee, department; sqlfluff-3.4.2/test/fixtures/dialects/db2/join_types.yml000066400000000000000000000343251503426445100233520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 90e19f9063ac31d3d2dba6ebfbd6c2ddaceb381a3fc409787f58bb4f83ad59a2 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: LEFT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: RIGHT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: RIGHT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: FULL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: FULL - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: CROSS - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: department - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/db2/labeled_duration.sql000066400000000000000000000007641503426445100244620ustar00rootroot00000000000000SELECT * FROM TABX WHERE MY_DT BETWEEN CURRENT_DATE - (DAYS(CURRENT_DATE) - 1) DAYS - 180 DAYS AND CURRENT_DATE; SELECT CURRENT_DATE + COL1 YEAR FROM TABY; VALUES NOW + 1 MICROSECOND, NOW + 1 MICROSECONDS, NOW - 1 SECOND, NOW - 1 SECONDS, NOW - 1 MINUTE, NOW - 1 MINUTES, NOW - 1 HOUR, NOW + 1 HOURS, NOW - 1 DAY, CURRENT_DATE - 1 DAYS, NOW - 1 MONTH, NOW - 1 MONTHS, NOW() - 1 YEAR, NOW - 1 YEARS, NOW - 1 DAY - 2 HOURS, (NOW - 1 DAY) - 2 HOURS, NOW - (2 * 3) DAY - 2 HOURS, NOW + .5 DAY; sqlfluff-3.4.2/test/fixtures/dialects/db2/labeled_duration.yml000066400000000000000000000140451503426445100244610ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ec371c6cc1a0a85b9415dbb1b126d30a83961107deeb4e9a9997594200a7a4a7 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: TABX where_clause: keyword: WHERE expression: - column_reference: naked_identifier: MY_DT - keyword: BETWEEN - bare_function: keyword: CURRENT_DATE - binary_operator: '-' - bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: DAYS function_contents: bracketed: start_bracket: ( expression: bare_function: keyword: CURRENT_DATE end_bracket: ) binary_operator: '-' numeric_literal: '1' end_bracket: ) - keyword: DAYS - binary_operator: '-' - numeric_literal: '180' - keyword: DAYS - keyword: AND - bare_function: keyword: CURRENT_DATE - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bare_function: keyword: CURRENT_DATE binary_operator: + column_reference: naked_identifier: COL1 keyword: YEAR from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: TABY - statement_terminator: ; - statement: values_clause: - keyword: VALUES - expression: column_reference: naked_identifier: NOW binary_operator: + numeric_literal: '1' keyword: MICROSECOND - comma: ',' - expression: column_reference: naked_identifier: NOW binary_operator: + numeric_literal: '1' keyword: MICROSECONDS - comma: ',' - expression: column_reference: naked_identifier: NOW binary_operator: '-' numeric_literal: '1' keyword: SECOND - comma: ',' - expression: column_reference: naked_identifier: NOW binary_operator: '-' numeric_literal: '1' keyword: SECONDS - comma: ',' - expression: column_reference: naked_identifier: NOW binary_operator: '-' numeric_literal: '1' keyword: MINUTE - comma: ',' - expression: column_reference: naked_identifier: NOW binary_operator: '-' numeric_literal: '1' keyword: MINUTES - comma: ',' - expression: column_reference: naked_identifier: NOW binary_operator: '-' numeric_literal: '1' keyword: HOUR - comma: ',' - expression: column_reference: naked_identifier: NOW binary_operator: + numeric_literal: '1' keyword: HOURS - comma: ',' - expression: column_reference: naked_identifier: NOW binary_operator: '-' numeric_literal: '1' keyword: DAY - comma: ',' - expression: bare_function: keyword: CURRENT_DATE binary_operator: '-' numeric_literal: '1' keyword: DAYS - comma: ',' - expression: column_reference: naked_identifier: NOW binary_operator: '-' numeric_literal: '1' keyword: MONTH - comma: ',' - expression: column_reference: naked_identifier: NOW binary_operator: '-' numeric_literal: '1' keyword: MONTHS - comma: ',' - expression: function: function_name: function_name_identifier: NOW function_contents: bracketed: start_bracket: ( end_bracket: ) binary_operator: '-' numeric_literal: '1' keyword: YEAR - comma: ',' - expression: column_reference: naked_identifier: NOW binary_operator: '-' numeric_literal: '1' keyword: YEARS - comma: ',' - expression: - column_reference: naked_identifier: NOW - binary_operator: '-' - numeric_literal: '1' - keyword: DAY - binary_operator: '-' - numeric_literal: '2' - keyword: HOURS - comma: ',' - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: NOW binary_operator: '-' numeric_literal: '1' keyword: DAY end_bracket: ) binary_operator: '-' numeric_literal: '2' keyword: HOURS - comma: ',' - expression: - column_reference: naked_identifier: NOW - binary_operator: '-' - bracketed: start_bracket: ( expression: - numeric_literal: '2' - binary_operator: '*' - numeric_literal: '3' end_bracket: ) - keyword: DAY - binary_operator: '-' - numeric_literal: '2' - keyword: HOURS - comma: ',' - expression: column_reference: naked_identifier: NOW binary_operator: + numeric_literal: '.5' keyword: DAY - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/db2/lateral.sql000066400000000000000000000002731503426445100226040ustar00rootroot00000000000000-- comma cross join with a lateral SELECT X.NUM, D.MY_COL FROM MY_SCHEMA.MY_TABLE AS D, LATERAL (VALUES 0, 1) AS X (NUM); SELECT X.NUM FROM LATERAL (values (0), (1)) AS X (NUM); sqlfluff-3.4.2/test/fixtures/dialects/db2/lateral.yml000066400000000000000000000061441503426445100226110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d8b140fb9cc1c853aeda1a516b109fbeb56bdb9491aeea517dcd243e3fb87225 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: X - dot: . - naked_identifier: NUM - comma: ',' - select_clause_element: column_reference: - naked_identifier: D - dot: . - naked_identifier: MY_COL from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: MY_TABLE alias_expression: alias_operator: keyword: AS naked_identifier: D - comma: ',' - from_expression: from_expression_element: keyword: LATERAL bracketed: start_bracket: ( table_expression: values_clause: - keyword: VALUES - expression: numeric_literal: '0' - comma: ',' - expression: numeric_literal: '1' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: X bracketed: start_bracket: ( identifier_list: naked_identifier: NUM end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: X - dot: . - naked_identifier: NUM from_clause: keyword: FROM from_expression: from_expression_element: keyword: LATERAL bracketed: start_bracket: ( table_expression: values_clause: - keyword: values - bracketed: start_bracket: ( expression: numeric_literal: '0' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: X bracketed: start_bracket: ( identifier_list: naked_identifier: NUM end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/db2/offset_fetch_limit.sql000066400000000000000000000025021503426445100250120ustar00rootroot00000000000000-- Offset select column_name from table_name offset 2 row; select column_name from table_name offset 2 rows; select column_name from table_name offset 1 + 2 row; select column_name from table_name offset 1 + 2 rows; select column_name from table_name offset (1 + 2) row; select column_name from table_name offset (1 + 2) rows; -- Fetch select column_name from table_name fetch first row only; select column_name from table_name fetch first rows only; select column_name from table_name fetch first 2 row only; select column_name from table_name fetch first 2 rows only; select column_name from table_name fetch first 1 + 2 row only; select column_name from table_name fetch first 1 + 2 rows only; select column_name from table_name fetch first (1 + 2) row only; select column_name from table_name fetch first (1 + 2) rows only; -- Offset and Fetch select column_name from table_name offset 1 row fetch first 2 row only; select column_name from table_name offset 1 row fetch first 2 rows only; select column_name from table_name offset 1 + 2 row fetch first 1 + 2 row only; select column_name from table_name offset 1 + 2 row fetch first 1 + 2 rows only; -- Limit alternative syntax select column_name from table_name limit 1; select column_name from table_name limit 2 offset 1; select column_name from table_name limit 1, 2; sqlfluff-3.4.2/test/fixtures/dialects/db2/offset_fetch_limit.yml000066400000000000000000000321261503426445100250210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 935dc8eb530784fd063ef92dd5a2231ef2ff737c84d9721772fc0f5783ef2d28 file: - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: column_name from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name offset_clause: - keyword: offset - numeric_literal: '2' - keyword: row - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: column_name from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name offset_clause: - keyword: offset - numeric_literal: '2' - keyword: rows - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: column_name from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name offset_clause: - keyword: offset - expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - keyword: row - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: column_name from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name offset_clause: - keyword: offset - expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - keyword: rows - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: column_name from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name offset_clause: - keyword: offset - expression: bracketed: start_bracket: ( expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' end_bracket: ) - keyword: row - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: column_name from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name offset_clause: - keyword: offset - expression: bracketed: start_bracket: ( expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' end_bracket: ) - keyword: rows - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: column_name from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name fetch_clause: - keyword: fetch - keyword: first - keyword: row - keyword: only - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: column_name from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name fetch_clause: - keyword: fetch - keyword: first - keyword: rows - keyword: only - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: column_name from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name fetch_clause: - keyword: fetch - keyword: first - numeric_literal: '2' - keyword: row - keyword: only - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: column_name from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name fetch_clause: - keyword: fetch - keyword: first - numeric_literal: '2' - keyword: rows - keyword: only - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: column_name from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name fetch_clause: - keyword: fetch - keyword: first - expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - keyword: row - keyword: only - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: column_name from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name fetch_clause: - keyword: fetch - keyword: first - expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - keyword: rows - keyword: only - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: column_name from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name fetch_clause: - keyword: fetch - keyword: first - expression: bracketed: start_bracket: ( expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' end_bracket: ) - keyword: row - keyword: only - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: column_name from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name fetch_clause: - keyword: fetch - keyword: first - expression: bracketed: start_bracket: ( expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' end_bracket: ) - keyword: rows - keyword: only - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: column_name from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name offset_clause: - keyword: offset - numeric_literal: '1' - keyword: row fetch_clause: - keyword: fetch - keyword: first - numeric_literal: '2' - keyword: row - keyword: only - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: column_name from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name offset_clause: - keyword: offset - numeric_literal: '1' - keyword: row fetch_clause: - keyword: fetch - keyword: first - numeric_literal: '2' - keyword: rows - keyword: only - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: column_name from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name offset_clause: - keyword: offset - expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - keyword: row fetch_clause: - keyword: fetch - keyword: first - expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - keyword: row - keyword: only - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: column_name from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name offset_clause: - keyword: offset - expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - keyword: row fetch_clause: - keyword: fetch - keyword: first - expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - keyword: rows - keyword: only - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: column_name from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name limit_clause: keyword: limit numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: column_name from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name limit_clause: - keyword: limit - numeric_literal: '2' - keyword: offset - numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: column_name from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name limit_clause: - keyword: limit - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/db2/over.sql000066400000000000000000000001261503426445100221300ustar00rootroot00000000000000SELECT RANK() OVER (PARTITION BY ABCD ORDER BY EFGH DESC) AS A_RANK FROM A_TABLE; sqlfluff-3.4.2/test/fixtures/dialects/db2/over.yml000066400000000000000000000032341503426445100221350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d870ac1042a7ad1656b98cf9791e1a42a753cb7380857a85bd7bd2dcf651b037 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: RANK function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: ABCD orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: EFGH - keyword: DESC end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: A_RANK from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: A_TABLE statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/db2/select.sql000066400000000000000000000001061503426445100224320ustar00rootroot00000000000000-- escaped double quotes SELECT """t".* FROM MYSCHEMA.MYTABLE """t" ; sqlfluff-3.4.2/test/fixtures/dialects/db2/select.yml000066400000000000000000000017761503426445100224520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: aedcd7d402c17253903e926ae0ffcab203629d52c5324b7706a81c63f400e350 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: quoted_identifier: '"""t"' dot: . star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: MYSCHEMA - dot: . - naked_identifier: MYTABLE alias_expression: quoted_identifier: '"""t"' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/db2/select_ordered_nested_sets.sql000066400000000000000000000002551503426445100265430ustar00rootroot00000000000000( SELECT * FROM tbl1 EXCEPT SELECT * FROM tbl2 ) UNION ALL ( SELECT * FROM tbl2 EXCEPT SELECT * FROM tbl1 ORDER BY column_1 ) ORDER BY column_2; sqlfluff-3.4.2/test/fixtures/dialects/db2/select_ordered_nested_sets.yml000066400000000000000000000056171503426445100265540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2110b2cab8e4479d4f37f01644f6f085c055cf3c5eabdf9a29a0f040a409148e file: statement: set_expression: - bracketed: start_bracket: ( set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - set_operator: keyword: EXCEPT - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl2 end_bracket: ) - set_operator: - keyword: UNION - keyword: ALL - bracketed: start_bracket: ( set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl2 - set_operator: keyword: EXCEPT - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: column_1 end_bracket: ) - orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: column_2 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/db2/special_registers.sql000066400000000000000000000022051503426445100246640ustar00rootroot00000000000000VALUES ( CURRENT_DATE, CURRENT_PATH, CURRENT_SCHEMA, CURRENT_SERVER, CURRENT_TIME, CURRENT_TIMESTAMP, CURRENT_TIMEZONE, CURRENT_USER, CURRENT CLIENT_ACCTNG, CURRENT CLIENT_APPLNAME, CURRENT CLIENT_USERID, CURRENT CLIENT_WRKSTNNAME, CURRENT DATE, CURRENT DBPARTITIONNUM, CURRENT DECFLOAT ROUNDING MODE, CURRENT DEFAULT TRANSFORM GROUP, CURRENT DEGREE, CURRENT EXPLAIN MODE, CURRENT EXPLAIN SNAPSHOT, CURRENT FEDERATED ASYNCHRONY, CURRENT IMPLICIT XMLPARSE OPTION, CURRENT ISOLATION, CURRENT LOCALE LC_MESSAGES, CURRENT LOCALE LC_TIME, CURRENT LOCK TIMEOUT, CURRENT MAINTAINED TABLE TYPES FOR OPTIMIZATION, CURRENT MDC ROLLOUT MODE, CURRENT MEMBER, CURRENT OPTIMIZATION PROFILE, CURRENT PACKAGE PATH, CURRENT PATH, CURRENT QUERY OPTIMIZATION, CURRENT REFRESH AGE, CURRENT SCHEMA, CURRENT SERVER, CURRENT SQL_CCFLAGS, CURRENT TEMPORAL BUSINESS_TIME, CURRENT TEMPORAL SYSTEM_TIME, CURRENT TIME, CURRENT TIMESTAMP, CURRENT TIMEZONE, CURRENT USER, SESSION_USER, SYSTEM_USER, USER ); sqlfluff-3.4.2/test/fixtures/dialects/db2/special_registers.yml000066400000000000000000000145241503426445100246750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: dcbd9bfcc9e05a033877b3f76a67825a9ab37555142c74489bd1ce02db587e1c file: statement: values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: bare_function: keyword: CURRENT_DATE - comma: ',' - expression: bare_function: keyword: CURRENT_PATH - comma: ',' - expression: bare_function: keyword: CURRENT_SCHEMA - comma: ',' - expression: bare_function: keyword: CURRENT_SERVER - comma: ',' - expression: bare_function: keyword: CURRENT_TIME - comma: ',' - expression: bare_function: keyword: CURRENT_TIMESTAMP - comma: ',' - expression: bare_function: keyword: CURRENT_TIMEZONE - comma: ',' - expression: bare_function: keyword: CURRENT_USER - comma: ',' - expression: bare_function: - keyword: CURRENT - keyword: CLIENT_ACCTNG - comma: ',' - expression: bare_function: - keyword: CURRENT - keyword: CLIENT_APPLNAME - comma: ',' - expression: bare_function: - keyword: CURRENT - keyword: CLIENT_USERID - comma: ',' - expression: bare_function: - keyword: CURRENT - keyword: CLIENT_WRKSTNNAME - comma: ',' - expression: bare_function: - keyword: CURRENT - keyword: DATE - comma: ',' - expression: bare_function: - keyword: CURRENT - keyword: DBPARTITIONNUM - comma: ',' - expression: bare_function: - keyword: CURRENT - keyword: DECFLOAT - keyword: ROUNDING - keyword: MODE - comma: ',' - expression: bare_function: - keyword: CURRENT - keyword: DEFAULT - keyword: TRANSFORM - keyword: GROUP - comma: ',' - expression: bare_function: - keyword: CURRENT - keyword: DEGREE - comma: ',' - expression: bare_function: - keyword: CURRENT - keyword: EXPLAIN - keyword: MODE - comma: ',' - expression: bare_function: - keyword: CURRENT - keyword: EXPLAIN - keyword: SNAPSHOT - comma: ',' - expression: bare_function: - keyword: CURRENT - keyword: FEDERATED - keyword: ASYNCHRONY - comma: ',' - expression: bare_function: - keyword: CURRENT - keyword: IMPLICIT - keyword: XMLPARSE - keyword: OPTION - comma: ',' - expression: bare_function: - keyword: CURRENT - keyword: ISOLATION - comma: ',' - expression: bare_function: - keyword: CURRENT - keyword: LOCALE - keyword: LC_MESSAGES - comma: ',' - expression: bare_function: - keyword: CURRENT - keyword: LOCALE - keyword: LC_TIME - comma: ',' - expression: bare_function: - keyword: CURRENT - keyword: LOCK - keyword: TIMEOUT - comma: ',' - expression: bare_function: - keyword: CURRENT - keyword: MAINTAINED - keyword: TABLE - keyword: TYPES - keyword: FOR - keyword: OPTIMIZATION - comma: ',' - expression: bare_function: - keyword: CURRENT - keyword: MDC - keyword: ROLLOUT - keyword: MODE - comma: ',' - expression: bare_function: - keyword: CURRENT - keyword: MEMBER - comma: ',' - expression: bare_function: - keyword: CURRENT - keyword: OPTIMIZATION - keyword: PROFILE - comma: ',' - expression: bare_function: - keyword: CURRENT - keyword: PACKAGE - keyword: PATH - comma: ',' - expression: bare_function: - keyword: CURRENT - keyword: PATH - comma: ',' - expression: bare_function: - keyword: CURRENT - keyword: QUERY - keyword: OPTIMIZATION - comma: ',' - expression: bare_function: - keyword: CURRENT - keyword: REFRESH - keyword: AGE - comma: ',' - expression: bare_function: - keyword: CURRENT - keyword: SCHEMA - comma: ',' - expression: bare_function: - keyword: CURRENT - keyword: SERVER - comma: ',' - expression: bare_function: - keyword: CURRENT - keyword: SQL_CCFLAGS - comma: ',' - expression: bare_function: - keyword: CURRENT - keyword: TEMPORAL - keyword: BUSINESS_TIME - comma: ',' - expression: bare_function: - keyword: CURRENT - keyword: TEMPORAL - keyword: SYSTEM_TIME - comma: ',' - expression: bare_function: - keyword: CURRENT - keyword: TIME - comma: ',' - expression: bare_function: - keyword: CURRENT - keyword: TIMESTAMP - comma: ',' - expression: bare_function: - keyword: CURRENT - keyword: TIMEZONE - comma: ',' - expression: bare_function: - keyword: CURRENT - keyword: USER - comma: ',' - expression: bare_function: keyword: SESSION_USER - comma: ',' - expression: bare_function: keyword: SYSTEM_USER - comma: ',' - expression: bare_function: keyword: USER - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/db2/values.sql000066400000000000000000000016011503426445100224530ustar00rootroot00000000000000-- 3 rows of 1 column VALUES (1), (2), (3); -- 3 rows of 1 column VALUES 1, 2, 3; -- 1 row of 3 columns VALUES (1, 2, 3); -- 3 rows of 2 columns VALUES (1, 21), (2, 22), (3, 23); -- nested bracketed values VALUES ('A', ('S')), ('C', 'X'); -- values with sets VALUES 1, 2 EXCEPT VALUES 2; -- post order by VALUES 1, 2, 3 ORDER BY 1 OFFSET 1 ROWS FETCH FIRST 1 ROWS ONLY; -- values use within a CTE WITH CTE1 (C) AS ( VALUES 'A', 'B' ) SELECT * FROM CTE1; -- values use within a lateral join SELECT X.NUM, D.MY_COL FROM MY_SCHEMA.MY_TABLE AS D CROSS JOIN LATERAL(VALUES 0, 1) AS X (NUM); -- values within an insert statement INSERT INTO MY_TAB_DFLT VALUES (1, 2, 3), (1, NULL, DEFAULT); INSERT INTO MY_TAB_DFLT VALUES DEFAULT; INSERT INTO MY_TAB_DFLT VALUES (DEFAULT); INSERT INTO MY_TAB_DFLT VALUES DEFAULT, NULL, 1; INSERT INTO MY_TAB_DFLT VALUES (DEFAULT), (NULL), (1); sqlfluff-3.4.2/test/fixtures/dialects/db2/values.yml000066400000000000000000000206331503426445100224630ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1a76fde04cb055655dfa2cf24821318256e2cbae69e842391eede74562ef49b3 file: - statement: values_clause: - keyword: VALUES - bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( expression: numeric_literal: '2' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( expression: numeric_literal: '3' end_bracket: ) - statement_terminator: ; - statement: values_clause: - keyword: VALUES - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - statement_terminator: ; - statement: values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - statement_terminator: ; - statement: values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '21' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '22' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '23' - end_bracket: ) - statement_terminator: ; - statement: values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: quoted_literal: "'A'" - comma: ',' - expression: bracketed: start_bracket: ( expression: quoted_literal: "'S'" end_bracket: ) - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: quoted_literal: "'C'" - comma: ',' - expression: quoted_literal: "'X'" - end_bracket: ) - statement_terminator: ; - statement: set_expression: - values_clause: - keyword: VALUES - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - set_operator: keyword: EXCEPT - values_clause: keyword: VALUES expression: numeric_literal: '2' - statement_terminator: ; - statement: values_clause: - keyword: VALUES - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - orderby_clause: - keyword: ORDER - keyword: BY - numeric_literal: '1' - limit_clause: offset_clause: - keyword: OFFSET - numeric_literal: '1' - keyword: ROWS fetch_clause: - keyword: FETCH - keyword: FIRST - numeric_literal: '1' - keyword: ROWS - keyword: ONLY - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: CTE1 cte_column_list: bracketed: start_bracket: ( identifier_list: naked_identifier: C end_bracket: ) keyword: AS bracketed: start_bracket: ( values_clause: - keyword: VALUES - expression: quoted_literal: "'A'" - comma: ',' - expression: quoted_literal: "'B'" end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: CTE1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: X - dot: . - naked_identifier: NUM - comma: ',' - select_clause_element: column_reference: - naked_identifier: D - dot: . - naked_identifier: MY_COL from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: MY_TABLE alias_expression: alias_operator: keyword: AS naked_identifier: D join_clause: - keyword: CROSS - keyword: JOIN - from_expression_element: keyword: LATERAL bracketed: start_bracket: ( table_expression: values_clause: - keyword: VALUES - expression: numeric_literal: '0' - comma: ',' - expression: numeric_literal: '1' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: X bracketed: start_bracket: ( identifier_list: naked_identifier: NUM end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: MY_TAB_DFLT - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: null_literal: 'NULL' - comma: ',' - keyword: DEFAULT - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: MY_TAB_DFLT - values_clause: - keyword: VALUES - keyword: DEFAULT - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: MY_TAB_DFLT - values_clause: keyword: VALUES bracketed: start_bracket: ( keyword: DEFAULT end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: MY_TAB_DFLT - values_clause: - keyword: VALUES - keyword: DEFAULT - comma: ',' - expression: null_literal: 'NULL' - comma: ',' - expression: numeric_literal: '1' - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: MY_TAB_DFLT - values_clause: - keyword: VALUES - bracketed: start_bracket: ( keyword: DEFAULT end_bracket: ) - comma: ',' - bracketed: start_bracket: ( expression: null_literal: 'NULL' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/db2/where_like.sql000066400000000000000000000000561503426445100232750ustar00rootroot00000000000000SELECT col1 FROM test WHERE col1 LIKE '%sql'; sqlfluff-3.4.2/test/fixtures/dialects/db2/where_like.yml000066400000000000000000000017241503426445100233020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ec2f48e7e2a6454492da78df84751314161145b97c5da26aa7d54b094ff21c79 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test where_clause: keyword: WHERE expression: column_reference: naked_identifier: col1 keyword: LIKE quoted_literal: "'%sql'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/000077500000000000000000000000001503426445100211065ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/dialects/doris/.sqlfluff000066400000000000000000000000331503426445100227250ustar00rootroot00000000000000[sqlfluff] dialect = doris sqlfluff-3.4.2/test/fixtures/dialects/doris/create_hive_table_as_select_basic.sql000066400000000000000000000001301503426445100304310ustar00rootroot00000000000000CREATE TABLE hive_catalog.hive_db.hive_table ENGINE=hive AS SELECT * FROM source_table; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_hive_table_as_select_basic.yml000066400000000000000000000023021503426445100304360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 07d02d1fee78b4e68e814bf576670765bb58c4dc7e18e13240aa18ec0bb0ea87 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: hive_catalog - dot: . - naked_identifier: hive_db - dot: . - naked_identifier: hive_table - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - engine_type: hive - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source_table statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_hive_table_as_select_external.sql000066400000000000000000000003441503426445100312010ustar00rootroot00000000000000CREATE EXTERNAL TABLE hive_catalog.hive_db.external_hive_table ENGINE=hive PROPERTIES ( 'file_format' = 'orc', 'external_location' = 'hdfs://namenode:9000/user/hive/warehouse/external_table' ) AS SELECT * FROM source_table; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_hive_table_as_select_external.yml000066400000000000000000000032431503426445100312040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a06b393fee4e00dc692a55857c44fb4f3788023d0c435ea8af77a98c646f0fae file: statement: create_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: hive_catalog - dot: . - naked_identifier: hive_db - dot: . - naked_identifier: external_hive_table - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - engine_type: hive - keyword: PROPERTIES - bracketed: - start_bracket: ( - quoted_literal: "'file_format'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'orc'" - comma: ',' - quoted_literal: "'external_location'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'hdfs://namenode:9000/user/hive/warehouse/external_table'" - end_bracket: ) - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source_table statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_hive_table_as_select_with_comment.sql000066400000000000000000000003111503426445100320460ustar00rootroot00000000000000CREATE TABLE hive_catalog.hive_db.hive_table_with_comment ENGINE=hive COMMENT 'This is a Hive table created as select.' PROPERTIES ( 'file_format' = 'parquet' ) AS SELECT id, name FROM source_table; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_hive_table_as_select_with_comment.yml000066400000000000000000000032111503426445100320520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 27dc8142ebb0b095f76bc8c2dc2fa866ef5b8de6892f8e47f5ae4ecfbe07c67d file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: hive_catalog - dot: . - naked_identifier: hive_db - dot: . - naked_identifier: hive_table_with_comment - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - engine_type: hive - comment_clause: keyword: COMMENT quoted_literal: "'This is a Hive table created as select.'" - keyword: PROPERTIES - bracketed: - start_bracket: ( - quoted_literal: "'file_format'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'parquet'" - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source_table statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_hive_table_as_select_with_properties.sql000066400000000000000000000002701503426445100326040ustar00rootroot00000000000000CREATE TABLE hive_catalog.hive_db.hive_table ENGINE=hive PROPERTIES ( 'file_format' = 'parquet', 'hive.metastore.uris' = 'thrift://127.0.0.1:9083' ) AS SELECT * FROM source_table; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_hive_table_as_select_with_properties.yml000066400000000000000000000031501503426445100326060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ece51c23a5f30451b84b757688f1fbb128f7440af0d8fcb71411ff0936df3576 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: hive_catalog - dot: . - naked_identifier: hive_db - dot: . - naked_identifier: hive_table - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - engine_type: hive - keyword: PROPERTIES - bracketed: - start_bracket: ( - quoted_literal: "'file_format'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'parquet'" - comma: ',' - quoted_literal: "'hive.metastore.uris'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'thrift://127.0.0.1:9083'" - end_bracket: ) - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source_table statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_hive_table_basic.sql000066400000000000000000000002461503426445100264170ustar00rootroot00000000000000CREATE TABLE hive_catalog.hive_db.hive_table ( id INT, name STRING, age INT, email STRING ) ENGINE=hive PROPERTIES ( 'file_format' = 'parquet' ); sqlfluff-3.4.2/test/fixtures/dialects/doris/create_hive_table_basic.yml000066400000000000000000000031161503426445100264200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 79ad88ba9d11208eb5086ad270fc5255f981881a36aec5df8593e5be4829f40b file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: hive_catalog - dot: . - naked_identifier: hive_db - dot: . - naked_identifier: hive_table - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: name data_type: data_type_identifier: STRING - comma: ',' - column_definition: naked_identifier: age data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: email data_type: data_type_identifier: STRING - end_bracket: ) - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - engine_type: hive - keyword: PROPERTIES - bracketed: - start_bracket: ( - quoted_literal: "'file_format'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'parquet'" - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_hive_table_external.sql000066400000000000000000000002411503426445100271530ustar00rootroot00000000000000CREATE EXTERNAL TABLE hive_catalog.hive_db.external_table ( id INT, name STRING, data STRING ) ENGINE=hive PROPERTIES ( 'file_format' = 'orc' ); sqlfluff-3.4.2/test/fixtures/dialects/doris/create_hive_table_external.yml000066400000000000000000000027341503426445100271660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e163df12fe25719f525f2f963e7806220d25781810fb274eee0a480448e8cdc0 file: statement: create_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: hive_catalog - dot: . - naked_identifier: hive_db - dot: . - naked_identifier: external_table - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: name data_type: data_type_identifier: STRING - comma: ',' - column_definition: naked_identifier: data data_type: data_type_identifier: STRING - end_bracket: ) - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - engine_type: hive - keyword: PROPERTIES - bracketed: - start_bracket: ( - quoted_literal: "'file_format'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'orc'" - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_hive_table_with_comment.sql000066400000000000000000000005541503426445100300350ustar00rootroot00000000000000CREATE TABLE hive_catalog.hive_db.commented_table ( id INT COMMENT 'Primary key', name STRING COMMENT 'User name', age INT COMMENT 'User age', email STRING COMMENT 'User email address' ) ENGINE=hive COMMENT 'This is a test table for Hive catalog' PROPERTIES ( 'file_format' = 'parquet', 'hive.metastore.uris' = 'thrift://127.0.0.1:9083' ); sqlfluff-3.4.2/test/fixtures/dialects/doris/create_hive_table_with_comment.yml000066400000000000000000000046711503426445100300430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 949973771d138a0cbaa9dfe05a241913ee853ff5f0019dde71fef12e06fa1ec8 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: hive_catalog - dot: . - naked_identifier: hive_db - dot: . - naked_identifier: commented_table - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: INT column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: "'Primary key'" - comma: ',' - column_definition: naked_identifier: name data_type: data_type_identifier: STRING column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: "'User name'" - comma: ',' - column_definition: naked_identifier: age data_type: data_type_identifier: INT column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: "'User age'" - comma: ',' - column_definition: naked_identifier: email data_type: data_type_identifier: STRING column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: "'User email address'" - end_bracket: ) - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - engine_type: hive - comment_clause: keyword: COMMENT quoted_literal: "'This is a test table for Hive catalog'" - keyword: PROPERTIES - bracketed: - start_bracket: ( - quoted_literal: "'file_format'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'parquet'" - comma: ',' - quoted_literal: "'hive.metastore.uris'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'thrift://127.0.0.1:9083'" - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_hive_table_with_complex_properties.sql000066400000000000000000000007611503426445100323160ustar00rootroot00000000000000CREATE TABLE hive_catalog.hive_db.complex_table ( user_id BIGINT, username STRING, age INT, score DECIMAL(10, 2), create_time DATETIME, is_active BOOLEAN ) ENGINE=hive PROPERTIES ( 'file_format' = 'orc', 'hive.metastore.uris' = 'thrift://127.0.0.1:9083', 'fs.defaultFS' = 'hdfs://namenode:9000', 'hadoop.username' = 'hive', 'hive.metastore.kerberos.principal' = 'hive/_HOST@EXAMPLE.COM', 'hive.metastore.kerberos.keytab' = '/path/to/hive.keytab' ); sqlfluff-3.4.2/test/fixtures/dialects/doris/create_hive_table_with_complex_properties.yml000066400000000000000000000057321503426445100323230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 21785ff74c1363729275db712e8a80aa5b4725a63e49c9895f92a45d954bdb54 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: hive_catalog - dot: . - naked_identifier: hive_db - dot: . - naked_identifier: complex_table - bracketed: - start_bracket: ( - column_definition: naked_identifier: user_id data_type: data_type_identifier: BIGINT - comma: ',' - column_definition: naked_identifier: username data_type: data_type_identifier: STRING - comma: ',' - column_definition: naked_identifier: age data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: score data_type: data_type_identifier: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '10' - comma: ',' - numeric_literal: '2' - end_bracket: ) - comma: ',' - column_definition: naked_identifier: create_time keyword: DATETIME - comma: ',' - column_definition: naked_identifier: is_active data_type: data_type_identifier: BOOLEAN - end_bracket: ) - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - engine_type: hive - keyword: PROPERTIES - bracketed: - start_bracket: ( - quoted_literal: "'file_format'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'orc'" - comma: ',' - quoted_literal: "'hive.metastore.uris'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'thrift://127.0.0.1:9083'" - comma: ',' - quoted_literal: "'fs.defaultFS'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'hdfs://namenode:9000'" - comma: ',' - quoted_literal: "'hadoop.username'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'hive'" - comma: ',' - quoted_literal: "'hive.metastore.kerberos.principal'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'hive/_HOST@EXAMPLE.COM'" - comma: ',' - quoted_literal: "'hive.metastore.kerberos.keytab'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'/path/to/hive.keytab'" - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_hive_table_with_partition.sql000066400000000000000000000004511503426445100304000ustar00rootroot00000000000000CREATE TABLE partition_table ( `col1` BOOLEAN COMMENT 'col1', `col2` INT COMMENT 'col2', `col3` BIGINT COMMENT 'col3', `col4` DECIMAL(2,1) COMMENT 'col4', `pt1` VARCHAR COMMENT 'pt1' ) ENGINE=hive PARTITION BY LIST (pt1) () PROPERTIES ( 'file_format'='orc', 'compression'='zlib' ); sqlfluff-3.4.2/test/fixtures/dialects/doris/create_hive_table_with_partition.yml000066400000000000000000000057401503426445100304100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6e042e7541a89efeb0b39ff921966793e2de819416f1605ed328812f8ba6aef3 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: partition_table - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '`col1`' data_type: data_type_identifier: BOOLEAN column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: "'col1'" - comma: ',' - column_definition: quoted_identifier: '`col2`' data_type: data_type_identifier: INT column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: "'col2'" - comma: ',' - column_definition: quoted_identifier: '`col3`' data_type: data_type_identifier: BIGINT column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: "'col3'" - comma: ',' - column_definition: quoted_identifier: '`col4`' data_type: data_type_identifier: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '2' - comma: ',' - numeric_literal: '1' - end_bracket: ) column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: "'col4'" - comma: ',' - column_definition: quoted_identifier: '`pt1`' data_type: data_type_identifier: VARCHAR column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: "'pt1'" - end_bracket: ) - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - engine_type: hive - partition_segment: - keyword: PARTITION - keyword: BY - keyword: LIST - bracketed: start_bracket: ( column_reference: naked_identifier: pt1 end_bracket: ) - bracketed: start_bracket: ( end_bracket: ) - keyword: PROPERTIES - bracketed: - start_bracket: ( - quoted_literal: "'file_format'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'orc'" - comma: ',' - quoted_literal: "'compression'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'zlib'" - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_hive_table_with_partition2.sql000066400000000000000000000005151503426445100304630ustar00rootroot00000000000000CREATE TABLE partition_table ( `col1` BOOLEAN COMMENT 'col1', `col2` INT COMMENT 'col2', `col3` BIGINT COMMENT 'col3', `col4` DECIMAL(2,1) COMMENT 'col4', `pt1` VARCHAR COMMENT 'pt1', `pt2` VARCHAR COMMENT 'pt2' ) ENGINE=hive PARTITION BY LIST (pt1, pt2) () PROPERTIES ( 'file_format'='orc', 'compression'='zlib' ); sqlfluff-3.4.2/test/fixtures/dialects/doris/create_hive_table_with_partition2.yml000066400000000000000000000065131503426445100304710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1106ec8ce5fc88010c691c857419d44a014598efc607d1130515488202646e35 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: partition_table - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '`col1`' data_type: data_type_identifier: BOOLEAN column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: "'col1'" - comma: ',' - column_definition: quoted_identifier: '`col2`' data_type: data_type_identifier: INT column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: "'col2'" - comma: ',' - column_definition: quoted_identifier: '`col3`' data_type: data_type_identifier: BIGINT column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: "'col3'" - comma: ',' - column_definition: quoted_identifier: '`col4`' data_type: data_type_identifier: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '2' - comma: ',' - numeric_literal: '1' - end_bracket: ) column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: "'col4'" - comma: ',' - column_definition: quoted_identifier: '`pt1`' data_type: data_type_identifier: VARCHAR column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: "'pt1'" - comma: ',' - column_definition: quoted_identifier: '`pt2`' data_type: data_type_identifier: VARCHAR column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: "'pt2'" - end_bracket: ) - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - engine_type: hive - partition_segment: - keyword: PARTITION - keyword: BY - keyword: LIST - bracketed: - start_bracket: ( - column_reference: naked_identifier: pt1 - comma: ',' - column_reference: naked_identifier: pt2 - end_bracket: ) - bracketed: start_bracket: ( end_bracket: ) - keyword: PROPERTIES - bracketed: - start_bracket: ( - quoted_literal: "'file_format'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'orc'" - comma: ',' - quoted_literal: "'compression'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'zlib'" - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_advanced_aggregate.sql000066400000000000000000000011741503426445100300770ustar00rootroot00000000000000CREATE TABLE advanced_aggregate_test ( order_id BIGINT, customer_id INT, product_id INT, quantity INT SUM, unit_price DECIMAL(10, 2) MAX, total_amount DECIMAL(12, 2) SUM, order_status STRING REPLACE, order_date DATE, delivery_address STRING, payment_method STRING REPLACE, discount_rate DECIMAL(3, 2) MIN, created_at DATETIME DEFAULT CURRENT_TIMESTAMP, updated_at DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP ) AGGREGATE KEY (order_id, customer_id, product_id) DISTRIBUTED BY HASH (order_id) PROPERTIES ( 'replication_num' = '1', 'storage_medium' = 'SSD' ); sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_advanced_aggregate.yml000066400000000000000000000105751503426445100301060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fb654a26ce80d5fa3e8afd0e30be3766909556bddbd5273c65f948c094062c54 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: advanced_aggregate_test - bracketed: - start_bracket: ( - column_definition: naked_identifier: order_id data_type: data_type_identifier: BIGINT - comma: ',' - column_definition: naked_identifier: customer_id data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: product_id data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: quantity data_type: data_type_identifier: INT keyword: SUM - comma: ',' - column_definition: naked_identifier: unit_price data_type: data_type_identifier: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '10' - comma: ',' - numeric_literal: '2' - end_bracket: ) keyword: MAX - comma: ',' - column_definition: naked_identifier: total_amount data_type: data_type_identifier: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '12' - comma: ',' - numeric_literal: '2' - end_bracket: ) keyword: SUM - comma: ',' - column_definition: naked_identifier: order_status data_type: data_type_identifier: STRING keyword: REPLACE - comma: ',' - column_definition: naked_identifier: order_date data_type: data_type_identifier: DATE - comma: ',' - column_definition: naked_identifier: delivery_address data_type: data_type_identifier: STRING - comma: ',' - column_definition: naked_identifier: payment_method data_type: data_type_identifier: STRING keyword: REPLACE - comma: ',' - column_definition: naked_identifier: discount_rate data_type: data_type_identifier: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '3' - comma: ',' - numeric_literal: '2' - end_bracket: ) keyword: MIN - comma: ',' - column_definition: - naked_identifier: created_at - keyword: DATETIME - keyword: DEFAULT - keyword: CURRENT_TIMESTAMP - comma: ',' - column_definition: - naked_identifier: updated_at - keyword: DATETIME - keyword: DEFAULT - keyword: CURRENT_TIMESTAMP - keyword: 'ON' - keyword: UPDATE - keyword: CURRENT_TIMESTAMP - end_bracket: ) - keyword: AGGREGATE - keyword: KEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: order_id - comma: ',' - column_reference: naked_identifier: customer_id - comma: ',' - column_reference: naked_identifier: product_id - end_bracket: ) - distribution_segment: - keyword: DISTRIBUTED - keyword: BY - keyword: HASH - bracketed: start_bracket: ( column_reference: naked_identifier: order_id end_bracket: ) - keyword: PROPERTIES - bracketed: - start_bracket: ( - quoted_literal: "'replication_num'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'1'" - comma: ',' - quoted_literal: "'storage_medium'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'SSD'" - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_aggregate_functions.sql000066400000000000000000000005151503426445100303400ustar00rootroot00000000000000CREATE TABLE aggregate_functions_test ( id INT, value1 INT MAX, value2 INT MIN, value3 STRING REPLACE, value4 DECIMAL(10, 2) SUM, value5 BITMAP BITMAP_UNION, value6 HLL HLL_UNION, value7 QUANTILE QUANTILE_UNION ) AGGREGATE KEY (id) DISTRIBUTED BY HASH (id) PROPERTIES ( 'replication_num' = '1' ); sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_aggregate_functions.yml000066400000000000000000000053601503426445100303450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 853234424d60d72c6a3c6b24afab2fceadaedff96f6cf3c97e28d1962afcce13 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: aggregate_functions_test - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: value1 data_type: data_type_identifier: INT keyword: MAX - comma: ',' - column_definition: naked_identifier: value2 data_type: data_type_identifier: INT keyword: MIN - comma: ',' - column_definition: naked_identifier: value3 data_type: data_type_identifier: STRING keyword: REPLACE - comma: ',' - column_definition: naked_identifier: value4 data_type: data_type_identifier: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '10' - comma: ',' - numeric_literal: '2' - end_bracket: ) keyword: SUM - comma: ',' - column_definition: naked_identifier: value5 data_type: data_type_identifier: BITMAP keyword: BITMAP_UNION - comma: ',' - column_definition: naked_identifier: value6 data_type: data_type_identifier: HLL keyword: HLL_UNION - comma: ',' - column_definition: naked_identifier: value7 data_type: data_type_identifier: QUANTILE keyword: QUANTILE_UNION - end_bracket: ) - keyword: AGGREGATE - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - distribution_segment: - keyword: DISTRIBUTED - keyword: BY - keyword: HASH - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - keyword: PROPERTIES - bracketed: - start_bracket: ( - quoted_literal: "'replication_num'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'1'" - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_aggregate_key.sql000066400000000000000000000001771503426445100271240ustar00rootroot00000000000000CREATE TABLE t2 ( c1 INT, c2 INT MAX ) AGGREGATE KEY(c1) DISTRIBUTED BY HASH(c1) PROPERTIES ( 'replication_num' = '1' ); sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_aggregate_key.yml000066400000000000000000000027671503426445100271350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b446ad30f00f859d09f70dc85f2ebbad8a28b826a790664b75867cd596b8c757 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t2 - bracketed: - start_bracket: ( - column_definition: naked_identifier: c1 data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: c2 data_type: data_type_identifier: INT keyword: MAX - end_bracket: ) - keyword: AGGREGATE - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: c1 end_bracket: ) - distribution_segment: - keyword: DISTRIBUTED - keyword: BY - keyword: HASH - bracketed: start_bracket: ( column_reference: naked_identifier: c1 end_bracket: ) - keyword: PROPERTIES - bracketed: - start_bracket: ( - quoted_literal: "'replication_num'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'1'" - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_as_select_basic.sql000066400000000000000000000000461503426445100274240ustar00rootroot00000000000000CREATE TABLE t10 AS SELECT * FROM t1; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_as_select_basic.yml000066400000000000000000000017341503426445100274330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: eb1ba646f47615c224ef544aa5d23b46379c83b1d697f073c1998bb0a686c6d3 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t10 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_as_select_complex.sql000066400000000000000000000002651503426445100300150ustar00rootroot00000000000000CREATE TABLE t10 PROPERTIES ( 'replication_num' = '1', 'storage_medium' = 'SSD' ) AS SELECT id, name, COUNT(*) as count FROM t1 WHERE status = 'active' GROUP BY id, name; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_as_select_complex.yml000066400000000000000000000045131503426445100300170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 36089cc1ee20bc0a9d01830812e310d908d35c33853ea2d5a0e0a4dd8faed8d3 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t10 - keyword: PROPERTIES - bracketed: - start_bracket: ( - quoted_literal: "'replication_num'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'1'" - comma: ',' - quoted_literal: "'storage_medium'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'SSD'" - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: function: function_name: function_name_identifier: COUNT function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: count from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 where_clause: keyword: WHERE expression: column_reference: naked_identifier: status comparison_operator: raw_comparison_operator: '=' quoted_literal: "'active'" groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: id - comma: ',' - column_reference: naked_identifier: name statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_as_select_if_not_exists.sql000066400000000000000000000001351503426445100312170ustar00rootroot00000000000000CREATE TABLE IF NOT EXISTS t10 PROPERTIES ( 'replication_num' = '1' ) AS SELECT * FROM t1; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_as_select_if_not_exists.yml000066400000000000000000000024001503426445100312160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: da202fec1f339a84f01f6ce505e3cfab23c54ec5d83d86085c494dc68b967c6d file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: t10 - keyword: PROPERTIES - bracketed: - start_bracket: ( - quoted_literal: "'replication_num'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'1'" - end_bracket: ) - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_as_select_with_properties.sql000066400000000000000000000001171503426445100315710ustar00rootroot00000000000000CREATE TABLE t10 PROPERTIES ( 'replication_num' = '1' ) AS SELECT * FROM t1; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_as_select_with_properties.yml000066400000000000000000000023051503426445100315740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 990c1d5d1c72dcb1b3f534f3c098680891af38d7434c7b622ef175b73812bf76 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t10 - keyword: PROPERTIES - bracketed: - start_bracket: ( - quoted_literal: "'replication_num'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'1'" - end_bracket: ) - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_auto_partition.sql000066400000000000000000000002711503426445100273620ustar00rootroot00000000000000CREATE TABLE t7 ( c1 INT, c2 DATETIME NOT NULL ) DUPLICATE KEY(c1) AUTO PARTITION BY RANGE(date_trunc(c2, 'day')) () DISTRIBUTED BY RANDOM PROPERTIES ( 'replication_num' = '1' ); sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_auto_partition.yml000066400000000000000000000040271503426445100273670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: df90674aea181ae1dd9980e7ffd9650a9e61e879b8fb5ccb88c84cc243a7c1b6 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t7 - bracketed: - start_bracket: ( - column_definition: naked_identifier: c1 data_type: data_type_identifier: INT - comma: ',' - column_definition: - naked_identifier: c2 - keyword: DATETIME - keyword: NOT - keyword: 'NULL' - end_bracket: ) - keyword: DUPLICATE - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: c1 end_bracket: ) - partition_segment: - keyword: AUTO - keyword: PARTITION - keyword: BY - keyword: RANGE - bracketed: start_bracket: ( function: function_name: function_name_identifier: date_trunc function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: c2 - comma: ',' - expression: quoted_literal: "'day'" - end_bracket: ) end_bracket: ) - bracketed: start_bracket: ( end_bracket: ) - distribution_segment: - keyword: DISTRIBUTED - keyword: BY - keyword: RANDOM - keyword: PROPERTIES - bracketed: - start_bracket: ( - quoted_literal: "'replication_num'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'1'" - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_bitmap_hll_test.sql000066400000000000000000000005321503426445100274730ustar00rootroot00000000000000CREATE TABLE bitmap_hll_test ( user_id INT, page_views BITMAP BITMAP_UNION, unique_visitors HLL HLL_UNION, session_duration QUANTILE QUANTILE_UNION, page_id INT, visit_time DATETIME, user_agent STRING REPLACE ) AGGREGATE KEY (user_id, page_id) DISTRIBUTED BY HASH (user_id) PROPERTIES ( 'replication_num' = '1' ); sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_bitmap_hll_test.yml000066400000000000000000000046231503426445100275020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 568ab89c808d2cbcca685052897ab280f162437d910ce0460062207884607de3 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: bitmap_hll_test - bracketed: - start_bracket: ( - column_definition: naked_identifier: user_id data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: page_views data_type: data_type_identifier: BITMAP keyword: BITMAP_UNION - comma: ',' - column_definition: naked_identifier: unique_visitors data_type: data_type_identifier: HLL keyword: HLL_UNION - comma: ',' - column_definition: naked_identifier: session_duration data_type: data_type_identifier: QUANTILE keyword: QUANTILE_UNION - comma: ',' - column_definition: naked_identifier: page_id data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: visit_time keyword: DATETIME - comma: ',' - column_definition: naked_identifier: user_agent data_type: data_type_identifier: STRING keyword: REPLACE - end_bracket: ) - keyword: AGGREGATE - keyword: KEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: user_id - comma: ',' - column_reference: naked_identifier: page_id - end_bracket: ) - distribution_segment: - keyword: DISTRIBUTED - keyword: BY - keyword: HASH - bracketed: start_bracket: ( column_reference: naked_identifier: user_id end_bracket: ) - keyword: PROPERTIES - bracketed: - start_bracket: ( - quoted_literal: "'replication_num'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'1'" - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_duplicate_key.sql000066400000000000000000000001761503426445100271470ustar00rootroot00000000000000CREATE TABLE t1 ( c1 INT, c2 STRING ) DUPLICATE KEY(c1) DISTRIBUTED BY HASH(c1) PROPERTIES ( 'replication_num' = '1' ); sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_duplicate_key.yml000066400000000000000000000027431503426445100271530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c8538a30a937b6b1220751561fccd25bc19fdfd9330a584868493b18f56651f4 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: c1 data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: c2 data_type: data_type_identifier: STRING - end_bracket: ) - keyword: DUPLICATE - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: c1 end_bracket: ) - distribution_segment: - keyword: DISTRIBUTED - keyword: BY - keyword: HASH - bracketed: start_bracket: ( column_reference: naked_identifier: c1 end_bracket: ) - keyword: PROPERTIES - bracketed: - start_bracket: ( - quoted_literal: "'replication_num'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'1'" - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_generated_column.sql000066400000000000000000000002301503426445100276270ustar00rootroot00000000000000CREATE TABLE t4 ( c1 INT, c2 INT GENERATED ALWAYS AS (c1 + 1) ) DUPLICATE KEY(c1) DISTRIBUTED BY HASH(c1) PROPERTIES ( 'replication_num' = '1' ); sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_generated_column.yml000066400000000000000000000035261503426445100276440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cefc16196f63480f8dbda9bb58e3742cf749460d3f3c8b626d8622ce3da9b759 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t4 - bracketed: - start_bracket: ( - column_definition: naked_identifier: c1 data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: c2 data_type: data_type_identifier: INT column_constraint_segment: - keyword: GENERATED - keyword: ALWAYS - keyword: AS - bracketed: start_bracket: ( expression: column_reference: naked_identifier: c1 binary_operator: + numeric_literal: '1' end_bracket: ) - end_bracket: ) - keyword: DUPLICATE - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: c1 end_bracket: ) - distribution_segment: - keyword: DISTRIBUTED - keyword: BY - keyword: HASH - bracketed: start_bracket: ( column_reference: naked_identifier: c1 end_bracket: ) - keyword: PROPERTIES - bracketed: - start_bracket: ( - quoted_literal: "'replication_num'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'1'" - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_like_basic.sql000066400000000000000000000000541503426445100264050ustar00rootroot00000000000000CREATE TABLE new_table LIKE existing_table; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_like_basic.yml000066400000000000000000000012071503426445100264100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 49b660c60f7c4924f7d08562465b7e4c1ae78f9921e89146026af73ad9f63062 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: new_table - keyword: LIKE - table_reference: naked_identifier: existing_table statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_like_database_qualified.sql000066400000000000000000000000641503426445100311140ustar00rootroot00000000000000CREATE TABLE db1.new_table LIKE db2.existing_table; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_like_database_qualified.yml000066400000000000000000000013411503426445100311150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e2536278c458c2d58738fa5bf20e483457a888c30c814a4184a358d567d33818 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: db1 - dot: . - naked_identifier: new_table - keyword: LIKE - table_reference: - naked_identifier: db2 - dot: . - naked_identifier: existing_table statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_like_external.sql000066400000000000000000000001071503426445100271450ustar00rootroot00000000000000CREATE EXTERNAL TABLE external_new_table LIKE external_existing_table; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_like_external.yml000066400000000000000000000012611503426445100271510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 23cbea8eb85c417cf2b79bd279873177337edd6b1a3cc830f61237731feb3d19 file: statement: create_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: naked_identifier: external_new_table - keyword: LIKE - table_reference: naked_identifier: external_existing_table statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_like_if_not_exists.sql000066400000000000000000000000721503426445100302010ustar00rootroot00000000000000CREATE TABLE IF NOT EXISTS new_table LIKE existing_table; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_like_if_not_exists.yml000066400000000000000000000013021503426445100302000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c3549bda549fb74438dd0921f21bcd8ec0dc8825f6b2fd00dd98465aaa3a977c file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: new_table - keyword: LIKE - table_reference: naked_identifier: existing_table statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_like_temporary.sql000066400000000000000000000001001503426445100273360ustar00rootroot00000000000000CREATE TEMPORARY TABLE temp_new_table LIKE temp_existing_table; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_like_temporary.yml000066400000000000000000000012521503426445100273510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 05fee72d4fc1955e410594908565b8aa90e09c39e1c5453d6a18d95cb9bf2927 file: statement: create_table_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: temp_new_table - keyword: LIKE - table_reference: naked_identifier: temp_existing_table statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_like_temporary_external.sql000066400000000000000000000001331503426445100312460ustar00rootroot00000000000000CREATE TEMPORARY EXTERNAL TABLE temp_external_new_table LIKE temp_external_existing_table; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_like_temporary_external.yml000066400000000000000000000013241503426445100312530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d8a7a9c7aeffeddf3b48715fd1e598fd0f9ec69e7723773b93d38f4585fd481f file: statement: create_table_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: EXTERNAL - keyword: TABLE - table_reference: naked_identifier: temp_external_new_table - keyword: LIKE - table_reference: naked_identifier: temp_external_existing_table statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_list_partition.sql000066400000000000000000000003261503426445100273660ustar00rootroot00000000000000CREATE TABLE t9 ( c1 INT, c2 DATE NOT NULL ) DUPLICATE KEY(c1) PARTITION BY LIST(c2) ( PARTITION p1 VALUES IN (('2020-01-01'),('2020-01-02')) ) DISTRIBUTED BY RANDOM PROPERTIES ( 'replication_num' = '1' ); sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_list_partition.yml000066400000000000000000000044271503426445100273760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: eafe76ab23aebced056a220f072dd232b7d212cc2b9730fd7bb69ce0a43d717f file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t9 - bracketed: - start_bracket: ( - column_definition: naked_identifier: c1 data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: c2 data_type: data_type_identifier: DATE column_constraint_segment: - keyword: NOT - keyword: 'NULL' - end_bracket: ) - keyword: DUPLICATE - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: c1 end_bracket: ) - partition_segment: - keyword: PARTITION - keyword: BY - keyword: LIST - bracketed: start_bracket: ( column_reference: naked_identifier: c2 end_bracket: ) - bracketed: start_bracket: ( list_partition_definition: - keyword: PARTITION - object_reference: naked_identifier: p1 - keyword: VALUES - keyword: IN - bracketed: - start_bracket: ( - bracketed: start_bracket: ( quoted_literal: "'2020-01-01'" end_bracket: ) - comma: ',' - bracketed: start_bracket: ( quoted_literal: "'2020-01-02'" end_bracket: ) - end_bracket: ) end_bracket: ) - distribution_segment: - keyword: DISTRIBUTED - keyword: BY - keyword: RANDOM - keyword: PROPERTIES - bracketed: - start_bracket: ( - quoted_literal: "'replication_num'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'1'" - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_mixed_aggregate.sql000066400000000000000000000005101503426445100274310ustar00rootroot00000000000000CREATE TABLE mixed_aggregate_test ( user_id INT, username STRING, age INT, score DECIMAL(5, 2) MAX, last_login DATETIME, login_count INT SUM, user_tags STRING REPLACE, is_active BOOLEAN ) AGGREGATE KEY (user_id, username) DISTRIBUTED BY HASH (user_id) PROPERTIES ( 'replication_num' = '1' ); sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_mixed_aggregate.yml000066400000000000000000000053031503426445100274400ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ff36d3df4c293bc5e3605101a2dd2611b99a09b5bfe2b7fdca659115f9092a2a file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: mixed_aggregate_test - bracketed: - start_bracket: ( - column_definition: naked_identifier: user_id data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: username data_type: data_type_identifier: STRING - comma: ',' - column_definition: naked_identifier: age data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: score data_type: data_type_identifier: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '5' - comma: ',' - numeric_literal: '2' - end_bracket: ) keyword: MAX - comma: ',' - column_definition: naked_identifier: last_login keyword: DATETIME - comma: ',' - column_definition: naked_identifier: login_count data_type: data_type_identifier: INT keyword: SUM - comma: ',' - column_definition: naked_identifier: user_tags data_type: data_type_identifier: STRING keyword: REPLACE - comma: ',' - column_definition: naked_identifier: is_active data_type: data_type_identifier: BOOLEAN - end_bracket: ) - keyword: AGGREGATE - keyword: KEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: user_id - comma: ',' - column_reference: naked_identifier: username - end_bracket: ) - distribution_segment: - keyword: DISTRIBUTED - keyword: BY - keyword: HASH - bracketed: start_bracket: ( column_reference: naked_identifier: user_id end_bracket: ) - keyword: PROPERTIES - bracketed: - start_bracket: ( - quoted_literal: "'replication_num'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'1'" - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_range_partition.sql000066400000000000000000000003311503426445100275030ustar00rootroot00000000000000CREATE TABLE t8 ( c1 INT, c2 DATETIME NOT NULL ) DUPLICATE KEY(c1) PARTITION BY RANGE(c2) ( FROM ('2020-01-01') TO ('2020-01-10') INTERVAL 1 DAY ) DISTRIBUTED BY RANDOM PROPERTIES ( 'replication_num' = '1' ); sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_range_partition.yml000066400000000000000000000041101503426445100275040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0da06ad10c8235daf84446d1672b93696dd4e8180223a0f017ffcbced7a92820 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t8 - bracketed: - start_bracket: ( - column_definition: naked_identifier: c1 data_type: data_type_identifier: INT - comma: ',' - column_definition: - naked_identifier: c2 - keyword: DATETIME - keyword: NOT - keyword: 'NULL' - end_bracket: ) - keyword: DUPLICATE - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: c1 end_bracket: ) - partition_segment: - keyword: PARTITION - keyword: BY - keyword: RANGE - bracketed: start_bracket: ( column_reference: naked_identifier: c2 end_bracket: ) - bracketed: start_bracket: ( range_partition_interval: - keyword: FROM - bracketed: start_bracket: ( quoted_literal: "'2020-01-01'" end_bracket: ) - keyword: TO - bracketed: start_bracket: ( quoted_literal: "'2020-01-10'" end_bracket: ) - keyword: INTERVAL - numeric_literal: '1' - keyword: DAY end_bracket: ) - distribution_segment: - keyword: DISTRIBUTED - keyword: BY - keyword: RANDOM - keyword: PROPERTIES - bracketed: - start_bracket: ( - quoted_literal: "'replication_num'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'1'" - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_simple_aggregate.sql000066400000000000000000000003011503426445100276120ustar00rootroot00000000000000CREATE TABLE simple_aggregate_test ( id INT, name STRING, count INT SUM, value INT MAX ) AGGREGATE KEY (id) DISTRIBUTED BY HASH (id) PROPERTIES ( 'replication_num' = '1' ); sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_simple_aggregate.yml000066400000000000000000000034741503426445100276320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ccce122a6ed0c64b3625c6ec839d32b4d5d719b1059e46f13ec5c07fc955973f file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: simple_aggregate_test - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: name data_type: data_type_identifier: STRING - comma: ',' - column_definition: naked_identifier: count data_type: data_type_identifier: INT keyword: SUM - comma: ',' - column_definition: naked_identifier: value data_type: data_type_identifier: INT keyword: MAX - end_bracket: ) - keyword: AGGREGATE - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - distribution_segment: - keyword: DISTRIBUTED - keyword: BY - keyword: HASH - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - keyword: PROPERTIES - bracketed: - start_bracket: ( - quoted_literal: "'replication_num'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'1'" - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_unique_key.sql000066400000000000000000000001701503426445100264750ustar00rootroot00000000000000CREATE TABLE t3 ( c1 INT, c2 INT ) UNIQUE KEY(c1) DISTRIBUTED BY HASH(c1) PROPERTIES ( 'replication_num' = '1' ); sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_unique_key.yml000066400000000000000000000027351503426445100265100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8a9422ecb05f13e9429be0cfbae0e2e314ab07c5abe54d04202f2c09d354aed6 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t3 - bracketed: - start_bracket: ( - column_definition: naked_identifier: c1 data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: c2 data_type: data_type_identifier: INT - end_bracket: ) - keyword: UNIQUE - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: c1 end_bracket: ) - distribution_segment: - keyword: DISTRIBUTED - keyword: BY - keyword: HASH - bracketed: start_bracket: ( column_reference: naked_identifier: c1 end_bracket: ) - keyword: PROPERTIES - bracketed: - start_bracket: ( - quoted_literal: "'replication_num'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'1'" - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_with_index.sql000066400000000000000000000004571503426445100264710ustar00rootroot00000000000000CREATE TABLE example_db.table_hash ( k1 TINYINT, k2 DECIMAL(10, 2) DEFAULT "10.5", v1 CHAR(10) REPLACE, v2 INT SUM, INDEX k1_idx (k1) USING INVERTED COMMENT 'my first index' ) AGGREGATE KEY(k1, k2) DISTRIBUTED BY HASH(k1) BUCKETS 32 PROPERTIES ( "bloom_filter_columns" = "k2" ); sqlfluff-3.4.2/test/fixtures/dialects/doris/create_table_with_index.yml000066400000000000000000000055501503426445100264720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f13409b5262df5eaa738300c9bc5a129ce4581f43bfc7edae122fb1ef64573fb file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: example_db - dot: . - naked_identifier: table_hash - bracketed: - start_bracket: ( - column_definition: naked_identifier: k1 data_type: data_type_identifier: TINYINT - comma: ',' - column_definition: naked_identifier: k2 data_type: data_type_identifier: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '10' - comma: ',' - numeric_literal: '2' - end_bracket: ) column_constraint_segment: keyword: DEFAULT quoted_literal: '"10.5"' - comma: ',' - column_definition: naked_identifier: v1 data_type: data_type_identifier: CHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) keyword: REPLACE - comma: ',' - column_definition: naked_identifier: v2 data_type: data_type_identifier: INT keyword: SUM - comma: ',' - index_definition: - keyword: INDEX - index_reference: naked_identifier: k1_idx - bracketed: start_bracket: ( column_reference: naked_identifier: k1 end_bracket: ) - keyword: USING - keyword: INVERTED - keyword: COMMENT - quoted_literal: "'my first index'" - end_bracket: ) - keyword: AGGREGATE - keyword: KEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: k1 - comma: ',' - column_reference: naked_identifier: k2 - end_bracket: ) - distribution_segment: - keyword: DISTRIBUTED - keyword: BY - keyword: HASH - bracketed: start_bracket: ( column_reference: naked_identifier: k1 end_bracket: ) - keyword: BUCKETS - numeric_literal: '32' - keyword: PROPERTIES - bracketed: - start_bracket: ( - quoted_literal: '"bloom_filter_columns"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"k2"' - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/drop_table_basic.sql000066400000000000000000000000251503426445100251000ustar00rootroot00000000000000DROP TABLE my_table; sqlfluff-3.4.2/test/fixtures/dialects/doris/drop_table_basic.yml000066400000000000000000000010561503426445100251070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3ba79bbc260a7182409a6535d8dcb6ab6fb384cdfc0ebecbb3edae507f1f0ce3 file: statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: naked_identifier: my_table statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/drop_table_database_qualified.sql000066400000000000000000000000451503426445100276100ustar00rootroot00000000000000DROP TABLE database_name.table_name; sqlfluff-3.4.2/test/fixtures/dialects/doris/drop_table_database_qualified.yml000066400000000000000000000011471503426445100276160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1a02f6073df3f704b4f168eceb16698c6cd40cd9f128dd45160c256d16fa24ae file: statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: - naked_identifier: database_name - dot: . - naked_identifier: table_name statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/drop_table_force.sql000066400000000000000000000000601503426445100251140ustar00rootroot00000000000000DROP TABLE IF EXISTS example_db.my_table FORCE; sqlfluff-3.4.2/test/fixtures/dialects/doris/drop_table_force.yml000066400000000000000000000012371503426445100251250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a4808130773698a3a224dd11f7195b809b9f13ab1b1df2f2fa4c734ffdaaf7e5 file: statement: drop_table_statement: - keyword: DROP - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: - naked_identifier: example_db - dot: . - naked_identifier: my_table - keyword: FORCE statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/drop_table_if_exists.sql000066400000000000000000000000521503426445100260140ustar00rootroot00000000000000DROP TABLE IF EXISTS example_db.my_table; sqlfluff-3.4.2/test/fixtures/dialects/doris/drop_table_if_exists.yml000066400000000000000000000012121503426445100260150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 07a2f6a0dc3fadd627cec2ad9869a861868aee54d83b05b75774c3d16c2128fc file: statement: drop_table_statement: - keyword: DROP - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: - naked_identifier: example_db - dot: . - naked_identifier: my_table statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/insert_basic.sql000066400000000000000000000000401503426445100242660ustar00rootroot00000000000000INSERT INTO test VALUES (1, 2); sqlfluff-3.4.2/test/fixtures/dialects/doris/insert_basic.yml000066400000000000000000000013561503426445100243030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cbd6300bf5e9096de9c6592f37e9268bd8848d6baf5a34439a1ebe14aa01f321 file: statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: test - values_clause: keyword: VALUES bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/insert_complex.sql000066400000000000000000000001321503426445100246560ustar00rootroot00000000000000INSERT INTO test PARTITION(p1, p2) WITH LABEL label1 (c1, c2) SELECT id, name FROM test2; sqlfluff-3.4.2/test/fixtures/dialects/doris/insert_complex.yml000066400000000000000000000027301503426445100246660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b78a238890b5aced321cc8b8a5277ca72e8b21afe4aacf66d7ec71f79bfbc8ac file: statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: test - keyword: PARTITION - bracketed: - start_bracket: ( - naked_identifier: p1 - comma: ',' - naked_identifier: p2 - end_bracket: ) - keyword: WITH - keyword: LABEL - naked_identifier: label1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: c1 - comma: ',' - column_reference: naked_identifier: c2 - end_bracket: ) - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test2 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/insert_hive_table.sql000066400000000000000000000001101503426445100253050ustar00rootroot00000000000000INSERT INTO hive_catalog.hive_db.hive_table SELECT * FROM source_table; sqlfluff-3.4.2/test/fixtures/dialects/doris/insert_hive_table.yml000066400000000000000000000020731503426445100253210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 587a222a19a23fb651f2d97ba871c29692a69ee821b1dddaf35a1895fdc2c031 file: statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: hive_catalog - dot: . - naked_identifier: hive_db - dot: . - naked_identifier: hive_table - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source_table statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/insert_multiple_values.sql000066400000000000000000000000541503426445100264240ustar00rootroot00000000000000INSERT INTO test VALUES (1, 2), (3, 2 + 2); sqlfluff-3.4.2/test/fixtures/dialects/doris/insert_multiple_values.yml000066400000000000000000000017631503426445100264360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 651f0d7691567f5ceb9350c93051802d99a34bc7c84e8aa58b6414abd21815ec file: statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: test - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: start_bracket: ( numeric_literal: '3' comma: ',' expression: - numeric_literal: '2' - binary_operator: + - numeric_literal: '2' end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/insert_select.sql000066400000000000000000000000461503426445100244720ustar00rootroot00000000000000INSERT INTO test SELECT * FROM test2; sqlfluff-3.4.2/test/fixtures/dialects/doris/insert_select.yml000066400000000000000000000017071503426445100245010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a8853368bde18656c5df94165982d70da02ec461c95ad755879416ccb4e64493 file: statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: test - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test2 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/insert_with_columns.sql000066400000000000000000000000511503426445100257220ustar00rootroot00000000000000INSERT INTO test (c1, c2) VALUES (1, 2); sqlfluff-3.4.2/test/fixtures/dialects/doris/insert_with_columns.yml000066400000000000000000000016641503426445100257370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ba2de8e800aaaee508cb25de56bb0fdd9972d0376d98379a54a00d25ce680346 file: statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: test - bracketed: - start_bracket: ( - column_reference: naked_identifier: c1 - comma: ',' - column_reference: naked_identifier: c2 - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/insert_with_default.sql000066400000000000000000000000571503426445100256740ustar00rootroot00000000000000INSERT INTO test (c1, c2) VALUES (1, DEFAULT); sqlfluff-3.4.2/test/fixtures/dialects/doris/insert_with_default.yml000066400000000000000000000016601503426445100256770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5ab1d10c53ba1664300c57eee966fa19853e640b8943c823c9d911a1b5868884 file: statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: test - bracketed: - start_bracket: ( - column_reference: naked_identifier: c1 - comma: ',' - column_reference: naked_identifier: c2 - end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( numeric_literal: '1' comma: ',' keyword: DEFAULT end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/insert_with_label.sql000066400000000000000000000000701503426445100253220ustar00rootroot00000000000000INSERT INTO test WITH LABEL label1 SELECT * FROM test2; sqlfluff-3.4.2/test/fixtures/dialects/doris/insert_with_label.yml000066400000000000000000000020171503426445100253270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f3c88ad529032400d43dc90dee5536c540642188306bcc72f2662bb3d8fbe187 file: statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: test - keyword: WITH - keyword: LABEL - naked_identifier: label1 - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test2 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/doris/insert_with_partition.sql000066400000000000000000000000701503426445100262540ustar00rootroot00000000000000INSERT INTO test PARTITION(p1, p2) SELECT * FROM test2; sqlfluff-3.4.2/test/fixtures/dialects/doris/insert_with_partition.yml000066400000000000000000000021561503426445100262650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0bb7bd9aba9441bdb58344e9e20cd87c146d15ecbc948b035230973b9180757b file: statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: test - keyword: PARTITION - bracketed: - start_bracket: ( - naked_identifier: p1 - comma: ',' - naked_identifier: p2 - end_bracket: ) - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test2 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/000077500000000000000000000000001503426445100212225ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/dialects/duckdb/.sqlfluff000066400000000000000000000000341503426445100230420ustar00rootroot00000000000000[sqlfluff] dialect = duckdb sqlfluff-3.4.2/test/fixtures/dialects/duckdb/anti_semi_join.sql000066400000000000000000000005701503426445100247340ustar00rootroot00000000000000SELECT cars.name, cars.manufacturer FROM cars SEMI JOIN region ON cars.region = region.id; SELECT cars.name, cars.manufacturer FROM cars ANTI JOIN safety_data ON cars.safety_report_id = safety_data.report_id; SELECT cars.name, cars.manufacturer FROM cars SEMI JOIN region USING (region_id); SELECT cars.name, cars.manufacturer FROM cars ANTI JOIN region USING (region_id); sqlfluff-3.4.2/test/fixtures/dialects/duckdb/anti_semi_join.yml000066400000000000000000000116471503426445100247450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1c574210a093244da340d0d2579eb64c78c83f678b1c2e34a9d1bc63889680b6 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: cars - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: cars - dot: . - naked_identifier: manufacturer from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: cars join_clause: - keyword: SEMI - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: region - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: cars - dot: . - naked_identifier: region - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: region - dot: . - naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: cars - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: cars - dot: . - naked_identifier: manufacturer from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: cars join_clause: - keyword: ANTI - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: safety_data - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: cars - dot: . - naked_identifier: safety_report_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: safety_data - dot: . - naked_identifier: report_id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: cars - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: cars - dot: . - naked_identifier: manufacturer from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: cars join_clause: - keyword: SEMI - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: region - keyword: USING - bracketed: start_bracket: ( naked_identifier: region_id end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: cars - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: cars - dot: . - naked_identifier: manufacturer from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: cars join_clause: - keyword: ANTI - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: region - keyword: USING - bracketed: start_bracket: ( naked_identifier: region_id end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/asof_join.sql000066400000000000000000000021371503426445100237150ustar00rootroot00000000000000SELECT t.*, p.price FROM trades AS t ASOF JOIN prices AS p ON t.symbol = p.symbol AND t.when >= p.when; SELECT * FROM trades AS t ASOF LEFT JOIN prices AS p ON t.symbol = p.symbol AND t.when >= p.when; SELECT * FROM trades AS t ASOF RIGHT JOIN prices AS p ON t.symbol = p.symbol AND t.when >= p.when; SELECT * FROM trades AS t ASOF FULL OUTER JOIN prices AS p ON t.symbol = p.symbol AND t.when >= p.when; SELECT * FROM trades AS t ASOF ANTI JOIN prices AS p ON t.symbol = p.symbol AND t.when >= p.when; SELECT * FROM trades AS t ASOF SEMI JOIN prices AS p ON t.symbol = p.symbol AND t.when >= p.when; -- ASOF joins can also specify join conditions on matching column names with -- the USING syntax, but the last attribute in the list must be the inequality, -- which will be greater than or equal to (>=): SELECT * FROM trades ASOF JOIN prices USING (symbol, "when"); -- Returns symbol, trades.when, price (but NOT prices.when) SELECT t.symbol, t.when AS trade_when, p.when AS price_when, price FROM trades AS t ASOF LEFT JOIN prices AS p USING (symbol, "when"); sqlfluff-3.4.2/test/fixtures/dialects/duckdb/asof_join.yml000066400000000000000000000337441503426445100237270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 46f150e52902c99b78e0193d55da8fad77bb1e9cc59567434a6a64ab51b32c9d file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: t dot: . star: '*' - comma: ',' - select_clause_element: column_reference: - naked_identifier: p - dot: . - naked_identifier: price from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: trades alias_expression: alias_operator: keyword: AS naked_identifier: t join_clause: - keyword: ASOF - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: prices alias_expression: alias_operator: keyword: AS naked_identifier: p - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: symbol - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: p - dot: . - naked_identifier: symbol - binary_operator: AND - column_reference: naked_identifier: t dot: . naked_identifier_all: when - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - column_reference: naked_identifier: p dot: . naked_identifier_all: when - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: trades alias_expression: alias_operator: keyword: AS naked_identifier: t join_clause: - keyword: ASOF - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: prices alias_expression: alias_operator: keyword: AS naked_identifier: p - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: symbol - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: p - dot: . - naked_identifier: symbol - binary_operator: AND - column_reference: naked_identifier: t dot: . naked_identifier_all: when - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - column_reference: naked_identifier: p dot: . naked_identifier_all: when - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: trades alias_expression: alias_operator: keyword: AS naked_identifier: t join_clause: - keyword: ASOF - keyword: RIGHT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: prices alias_expression: alias_operator: keyword: AS naked_identifier: p - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: symbol - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: p - dot: . - naked_identifier: symbol - binary_operator: AND - column_reference: naked_identifier: t dot: . naked_identifier_all: when - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - column_reference: naked_identifier: p dot: . naked_identifier_all: when - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: trades alias_expression: alias_operator: keyword: AS naked_identifier: t join_clause: - keyword: ASOF - keyword: FULL - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: prices alias_expression: alias_operator: keyword: AS naked_identifier: p - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: symbol - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: p - dot: . - naked_identifier: symbol - binary_operator: AND - column_reference: naked_identifier: t dot: . naked_identifier_all: when - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - column_reference: naked_identifier: p dot: . naked_identifier_all: when - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: trades alias_expression: alias_operator: keyword: AS naked_identifier: t join_clause: - keyword: ASOF - keyword: ANTI - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: prices alias_expression: alias_operator: keyword: AS naked_identifier: p - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: symbol - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: p - dot: . - naked_identifier: symbol - binary_operator: AND - column_reference: naked_identifier: t dot: . naked_identifier_all: when - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - column_reference: naked_identifier: p dot: . naked_identifier_all: when - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: trades alias_expression: alias_operator: keyword: AS naked_identifier: t join_clause: - keyword: ASOF - keyword: SEMI - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: prices alias_expression: alias_operator: keyword: AS naked_identifier: p - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: symbol - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: p - dot: . - naked_identifier: symbol - binary_operator: AND - column_reference: naked_identifier: t dot: . naked_identifier_all: when - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - column_reference: naked_identifier: p dot: . naked_identifier_all: when - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: trades join_clause: - keyword: ASOF - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: prices - keyword: USING - bracketed: start_bracket: ( naked_identifier: symbol comma: ',' quoted_identifier: '"when"' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: symbol - comma: ',' - select_clause_element: column_reference: naked_identifier: t dot: . naked_identifier_all: when alias_expression: alias_operator: keyword: AS naked_identifier: trade_when - comma: ',' - select_clause_element: column_reference: naked_identifier: p dot: . naked_identifier_all: when alias_expression: alias_operator: keyword: AS naked_identifier: price_when - comma: ',' - select_clause_element: column_reference: naked_identifier: price from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: trades alias_expression: alias_operator: keyword: AS naked_identifier: t join_clause: - keyword: ASOF - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: prices alias_expression: alias_operator: keyword: AS naked_identifier: p - keyword: USING - bracketed: start_bracket: ( naked_identifier: symbol comma: ',' quoted_identifier: '"when"' end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/create_macro.sql000066400000000000000000000021631503426445100243710ustar00rootroot00000000000000CREATE MACRO add(a, b) AS a + b; CREATE MACRO ifelse(a, b, c) AS CASE WHEN a THEN b ELSE c END; CREATE MACRO one() AS (SELECT 1); CREATE MACRO plus_one(a) AS ( WITH cte AS (SELECT 1 AS a) SELECT cte.a + cte.a FROM cte ); CREATE FUNCTION main.my_avg(x) AS sum(x) / count(x); CREATE MACRO add_default(a, b := 5) AS a + b; CREATE MACRO arr_append(l, e) AS list_concat(l, list_value(e)); CREATE MACRO static_table() AS TABLE SELECT 'Hello' AS column1, 'World' AS column2; CREATE MACRO dynamic_table(col1_value, col2_value) AS TABLE SELECT col1_value AS column1, col2_value AS column2; CREATE OR REPLACE TEMP MACRO dynamic_table(col1_value, col2_value) AS TABLE SELECT col1_value AS column1, col2_value AS column2 UNION ALL SELECT 'Hello' AS col1_value, 456 AS col2_value; CREATE MACRO get_users(i) AS TABLE SELECT * FROM users WHERE uid IN (SELECT unnest(i)); CREATE OR REPLACE MACRO list_builder( col1, col2 ) AS CASE WHEN col1 AND col2 THEN ['x', 'y'] WHEN col1 THEN ['x'] WHEN col2 THEN ['y'] ELSE [] END; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/create_macro.yml000066400000000000000000000341141503426445100243740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 51ad73837eb1f5708e115d03bae6647b31699d4a0dbb160e204a60e2e65935da file: - statement: create_function_statement: - keyword: CREATE - keyword: MACRO - function_name: function_name_identifier: add - function_parameter_list: bracketed: - start_bracket: ( - data_type: data_type_identifier: a - comma: ',' - data_type: data_type_identifier: b - end_bracket: ) - keyword: AS - expression: - column_reference: naked_identifier: a - binary_operator: + - column_reference: naked_identifier: b - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: MACRO - function_name: function_name_identifier: ifelse - function_parameter_list: bracketed: - start_bracket: ( - data_type: data_type_identifier: a - comma: ',' - data_type: data_type_identifier: b - comma: ',' - data_type: data_type_identifier: c - end_bracket: ) - keyword: AS - expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: a - keyword: THEN - expression: column_reference: naked_identifier: b - else_clause: keyword: ELSE expression: column_reference: naked_identifier: c - keyword: END - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: MACRO - function_name: function_name_identifier: one - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: AS - expression: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: MACRO - function_name: function_name_identifier: plus_one - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: a end_bracket: ) - keyword: AS - expression: bracketed: start_bracket: ( with_compound_statement: keyword: WITH common_table_expression: naked_identifier: cte keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: AS naked_identifier: a end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: expression: - column_reference: - naked_identifier: cte - dot: . - naked_identifier: a - binary_operator: + - column_reference: - naked_identifier: cte - dot: . - naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: cte end_bracket: ) - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: naked_identifier: main dot: . function_name_identifier: my_avg - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: x end_bracket: ) - keyword: AS - expression: - function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x end_bracket: ) - binary_operator: / - function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x end_bracket: ) - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: MACRO - function_name: function_name_identifier: add_default - function_parameter_list: bracketed: - start_bracket: ( - data_type: data_type_identifier: a - comma: ',' - data_type: data_type_identifier: b - assignment_operator: := - expression: numeric_literal: '5' - end_bracket: ) - keyword: AS - expression: - column_reference: naked_identifier: a - binary_operator: + - column_reference: naked_identifier: b - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: MACRO - function_name: function_name_identifier: arr_append - function_parameter_list: bracketed: - start_bracket: ( - data_type: data_type_identifier: l - comma: ',' - data_type: data_type_identifier: e - end_bracket: ) - keyword: AS - expression: function: function_name: function_name_identifier: list_concat function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: l - comma: ',' - expression: function: function_name: function_name_identifier: list_value function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: e end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: MACRO - function_name: function_name_identifier: static_table - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: AS - keyword: TABLE - select_statement: select_clause: - keyword: SELECT - select_clause_element: quoted_literal: "'Hello'" alias_expression: alias_operator: keyword: AS naked_identifier: column1 - comma: ',' - select_clause_element: quoted_literal: "'World'" alias_expression: alias_operator: keyword: AS naked_identifier: column2 - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: MACRO - function_name: function_name_identifier: dynamic_table - function_parameter_list: bracketed: - start_bracket: ( - data_type: data_type_identifier: col1_value - comma: ',' - data_type: data_type_identifier: col2_value - end_bracket: ) - keyword: AS - keyword: TABLE - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1_value alias_expression: alias_operator: keyword: AS naked_identifier: column1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2_value alias_expression: alias_operator: keyword: AS naked_identifier: column2 - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TEMP - keyword: MACRO - function_name: function_name_identifier: dynamic_table - function_parameter_list: bracketed: - start_bracket: ( - data_type: data_type_identifier: col1_value - comma: ',' - data_type: data_type_identifier: col2_value - end_bracket: ) - keyword: AS - keyword: TABLE - set_expression: - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1_value alias_expression: alias_operator: keyword: AS naked_identifier: column1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2_value alias_expression: alias_operator: keyword: AS naked_identifier: column2 - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: quoted_literal: "'Hello'" alias_expression: alias_operator: keyword: AS naked_identifier: col1_value - comma: ',' - select_clause_element: numeric_literal: '456' alias_expression: alias_operator: keyword: AS naked_identifier: col2_value - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: MACRO - function_name: function_name_identifier: get_users - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: i end_bracket: ) - keyword: AS - keyword: TABLE - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: users where_clause: keyword: WHERE expression: column_reference: naked_identifier: uid keyword: IN bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: unnest function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: i end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: MACRO - function_name: function_name_identifier: list_builder - function_parameter_list: bracketed: - start_bracket: ( - data_type: data_type_identifier: col1 - comma: ',' - data_type: data_type_identifier: col2 - end_bracket: ) - keyword: AS - expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: - column_reference: naked_identifier: col1 - binary_operator: AND - column_reference: naked_identifier: col2 - keyword: THEN - expression: array_literal: - start_square_bracket: '[' - quoted_literal: "'x'" - comma: ',' - quoted_literal: "'y'" - end_square_bracket: ']' - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: col1 - keyword: THEN - expression: array_literal: start_square_bracket: '[' quoted_literal: "'x'" end_square_bracket: ']' - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: col2 - keyword: THEN - expression: array_literal: start_square_bracket: '[' quoted_literal: "'y'" end_square_bracket: ']' - else_clause: keyword: ELSE expression: array_literal: start_square_bracket: '[' end_square_bracket: ']' - keyword: END - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/create_table.sql000066400000000000000000000055551503426445100243670ustar00rootroot00000000000000/* Examples from https://duckdb.org/docs/sql/statements/create_table */ -- create a table with two integer columns (i and j) CREATE TABLE t1 (i INTEGER, j INTEGER); -- create a table with a primary key CREATE TABLE t1 (id INTEGER PRIMARY KEY, j VARCHAR); -- create a table with a composite primary key CREATE TABLE t1 (id INTEGER, j VARCHAR, PRIMARY KEY (id, j)); -- create a table with various different types and constraints CREATE TABLE t1 ( i INTEGER NOT NULL, decimalnr DOUBLE CHECK (decimalnr < 10), date DATE UNIQUE, time TIMESTAMP ); -- create a table from the result of a query CREATE TABLE t1 AS SELECT 42 AS i, 84 AS j; -- create a table from a CSV file using AUTO-DETECT (i.e., automatically detecting column names and types) CREATE TABLE t1 AS SELECT * FROM read_csv_auto('path/file.csv'); -- we can use the FROM-first syntax to omit 'SELECT *' CREATE TABLE t1 AS FROM read_csv_auto('path/file.csv'); -- create a temporary table from a CSV file (automatically detecting column names and types) CREATE TEMP TABLE t1 AS SELECT * FROM read_csv('path/file.csv'); -- create a table with two integer columns (i and j) even if t1 already exists CREATE OR REPLACE TABLE t1 (i INTEGER, j INTEGER); -- create a table with two integer columns (i and j) only if t1 does not exist yet. CREATE TABLE IF NOT EXISTS t1 (i INTEGER, j INTEGER); CREATE TABLE t1 ( id INTEGER PRIMARY KEY, percentage INTEGER CHECK (0 <= percentage AND percentage <= 100) ); CREATE TABLE t2 (id INTEGER PRIMARY KEY, x INTEGER, y INTEGER CHECK (x < y)); CREATE TABLE t3 ( id INTEGER PRIMARY KEY, x INTEGER, y INTEGER, CONSTRAINT x_smaller_than_y CHECK (x < y) ); CREATE TABLE t1 (id INTEGER PRIMARY KEY, j VARCHAR); CREATE TABLE t2 ( id INTEGER PRIMARY KEY, t1_id INTEGER, FOREIGN KEY (t1_id) REFERENCES t1 (id) ); CREATE TABLE t3 (id INTEGER, j VARCHAR, PRIMARY KEY (id, j)); CREATE TABLE t4 ( id INTEGER PRIMARY KEY, t3_id INTEGER, t3_j VARCHAR, FOREIGN KEY (t3_id, t3_j) REFERENCES t3 (id, j) ); CREATE TABLE t5 (id INTEGER UNIQUE, j VARCHAR); CREATE TABLE t6 ( id INTEGER PRIMARY KEY, t5_id INTEGER, FOREIGN KEY (t5_id) REFERENCES t5 (id) ); -- The simplest syntax for a generated column. -- The type is derived from the expression, and the variant defaults to VIRTUAL CREATE TABLE t1 (x FLOAT, two_x AS (2 * x)); -- Fully specifying the same generated column for completeness CREATE TABLE t1 (x FLOAT, two_x FLOAT GENERATED ALWAYS AS (2 * x) VIRTUAL); CREATE TABLE t_values AS VALUES (1); CREATE TABLE t_dflt_int (id INTEGER DEFAULT 0); CREATE OR REPLACE TABLE t_dflt_dt (dt DATE DEFAULT CURRENT_DATE); CREATE TABLE t ( s STRUCT( val STRING ) ); CREATE TABLE t ( s STRUCT( val STRING, s1 STRUCT( ival INT, jval INT ) ) ); CREATE TABLE map (tags MAP(VARCHAR, VARCHAR)); sqlfluff-3.4.2/test/fixtures/dialects/duckdb/create_table.yml000066400000000000000000000513211503426445100243610ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 98635797932ff880a2b9e764657fcf5384d5102ccc0aac1632c97055c71d4a6b file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: i - data_type: keyword: INTEGER - comma: ',' - column_reference: naked_identifier: j - data_type: keyword: INTEGER - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: id - data_type: keyword: INTEGER - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_reference: naked_identifier: j - data_type: keyword: VARCHAR - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: id - data_type: keyword: INTEGER - comma: ',' - column_reference: naked_identifier: j - data_type: keyword: VARCHAR - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: id - comma: ',' - column_reference: naked_identifier: j - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: i - data_type: keyword: INTEGER - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: decimalnr - data_type: data_type_identifier: DOUBLE - column_constraint_segment: keyword: CHECK bracketed: start_bracket: ( expression: column_reference: naked_identifier: decimalnr comparison_operator: raw_comparison_operator: < numeric_literal: '10' end_bracket: ) - comma: ',' - column_reference: naked_identifier: date - data_type: datetime_type_identifier: keyword: DATE - column_constraint_segment: keyword: UNIQUE - comma: ',' - column_reference: naked_identifier: time - data_type: datetime_type_identifier: keyword: TIMESTAMP - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '42' alias_expression: alias_operator: keyword: AS naked_identifier: i - comma: ',' - select_clause_element: numeric_literal: '84' alias_expression: alias_operator: keyword: AS naked_identifier: j - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: read_csv_auto function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'path/file.csv'" end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: AS - select_statement: from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: read_csv_auto function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'path/file.csv'" end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TEMP - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: read_csv function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'path/file.csv'" end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: i - data_type: keyword: INTEGER - comma: ',' - column_reference: naked_identifier: j - data_type: keyword: INTEGER - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: i - data_type: keyword: INTEGER - comma: ',' - column_reference: naked_identifier: j - data_type: keyword: INTEGER - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: id - data_type: keyword: INTEGER - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_reference: naked_identifier: percentage - data_type: keyword: INTEGER - column_constraint_segment: keyword: CHECK bracketed: start_bracket: ( expression: - numeric_literal: '0' - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - column_reference: naked_identifier: percentage - binary_operator: AND - column_reference: naked_identifier: percentage - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - numeric_literal: '100' end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t2 - bracketed: - start_bracket: ( - column_reference: naked_identifier: id - data_type: keyword: INTEGER - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_reference: naked_identifier: x - data_type: keyword: INTEGER - comma: ',' - column_reference: naked_identifier: y - data_type: keyword: INTEGER - column_constraint_segment: keyword: CHECK bracketed: start_bracket: ( expression: - column_reference: naked_identifier: x - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: y end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t3 - bracketed: - start_bracket: ( - column_reference: naked_identifier: id - data_type: keyword: INTEGER - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_reference: naked_identifier: x - data_type: keyword: INTEGER - comma: ',' - column_reference: naked_identifier: y - data_type: keyword: INTEGER - comma: ',' - table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: x_smaller_than_y - keyword: CHECK - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: x - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: y end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: id - data_type: keyword: INTEGER - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_reference: naked_identifier: j - data_type: keyword: VARCHAR - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t2 - bracketed: - start_bracket: ( - column_reference: naked_identifier: id - data_type: keyword: INTEGER - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_reference: naked_identifier: t1_id - data_type: keyword: INTEGER - comma: ',' - table_constraint: - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: t1_id end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: t1 - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t3 - bracketed: - start_bracket: ( - column_reference: naked_identifier: id - data_type: keyword: INTEGER - comma: ',' - column_reference: naked_identifier: j - data_type: keyword: VARCHAR - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: id - comma: ',' - column_reference: naked_identifier: j - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t4 - bracketed: - start_bracket: ( - column_reference: naked_identifier: id - data_type: keyword: INTEGER - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_reference: naked_identifier: t3_id - data_type: keyword: INTEGER - comma: ',' - column_reference: naked_identifier: t3_j - data_type: keyword: VARCHAR - comma: ',' - table_constraint: - keyword: FOREIGN - keyword: KEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: t3_id - comma: ',' - column_reference: naked_identifier: t3_j - end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: t3 - bracketed: - start_bracket: ( - column_reference: naked_identifier: id - comma: ',' - column_reference: naked_identifier: j - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t5 - bracketed: - start_bracket: ( - column_reference: naked_identifier: id - data_type: keyword: INTEGER - column_constraint_segment: keyword: UNIQUE - comma: ',' - column_reference: naked_identifier: j - data_type: keyword: VARCHAR - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t6 - bracketed: - start_bracket: ( - column_reference: naked_identifier: id - data_type: keyword: INTEGER - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_reference: naked_identifier: t5_id - data_type: keyword: INTEGER - comma: ',' - table_constraint: - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: t5_id end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: t5 - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: x - data_type: keyword: FLOAT - comma: ',' - column_reference: naked_identifier: two_x - keyword: AS - bracketed: start_bracket: ( expression: numeric_literal: '2' binary_operator: '*' column_reference: naked_identifier: x end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: x - data_type: keyword: FLOAT - comma: ',' - column_reference: naked_identifier: two_x - data_type: keyword: FLOAT - keyword: GENERATED - keyword: ALWAYS - keyword: AS - bracketed: start_bracket: ( expression: numeric_literal: '2' binary_operator: '*' column_reference: naked_identifier: x end_bracket: ) - keyword: VIRTUAL - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t_values - keyword: AS - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t_dflt_int - bracketed: start_bracket: ( column_reference: naked_identifier: id data_type: keyword: INTEGER column_constraint_segment: keyword: DEFAULT numeric_literal: '0' end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - table_reference: naked_identifier: t_dflt_dt - bracketed: start_bracket: ( column_reference: naked_identifier: dt data_type: datetime_type_identifier: keyword: DATE column_constraint_segment: keyword: DEFAULT expression: bare_function: CURRENT_DATE end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( column_reference: naked_identifier: s data_type: struct_type: keyword: STRUCT struct_type_schema: bracketed: start_bracket: ( parameter: val data_type: data_type_identifier: STRING end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( column_reference: naked_identifier: s data_type: struct_type: keyword: STRUCT struct_type_schema: bracketed: - start_bracket: ( - parameter: val - data_type: data_type_identifier: STRING - comma: ',' - parameter: s1 - data_type: struct_type: keyword: STRUCT struct_type_schema: bracketed: - start_bracket: ( - parameter: ival - data_type: keyword: INT - comma: ',' - parameter: jval - data_type: keyword: INT - end_bracket: ) - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: map - bracketed: start_bracket: ( column_reference: naked_identifier: tags data_type: map_type: keyword: MAP map_type_schema: bracketed: - start_bracket: ( - data_type: keyword: VARCHAR - comma: ',' - data_type: keyword: VARCHAR - end_bracket: ) end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/create_type.sql000066400000000000000000000005151503426445100242500ustar00rootroot00000000000000CREATE TYPE mood AS ENUM ('happy', 'sad', 'curious'); CREATE TYPE many_things AS STRUCT(k integer, l varchar); CREATE TYPE one_thing AS UNION (number integer, string varchar); CREATE TYPE x_index AS integer; CREATE TYPE myschema.mytype AS int; CREATE TYPE "myschema".mytype2 AS int; CREATE TYPE myschema.mytype3 AS myschema.mytype2; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/create_type.yml000066400000000000000000000055651503426445100242640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: eafef005a33efa153aad7ff59d412b3810dc381bfffcf22dd08ca5ca9ddc6cd8 file: - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - data_type: data_type_identifier: mood - keyword: AS - keyword: ENUM - bracketed: - start_bracket: ( - quoted_literal: "'happy'" - comma: ',' - quoted_literal: "'sad'" - comma: ',' - quoted_literal: "'curious'" - end_bracket: ) - statement_terminator: ; - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - data_type: data_type_identifier: many_things - keyword: AS - data_type: struct_type: keyword: STRUCT struct_type_schema: bracketed: - start_bracket: ( - parameter: k - data_type: keyword: integer - comma: ',' - parameter: l - data_type: keyword: varchar - end_bracket: ) - statement_terminator: ; - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - data_type: data_type_identifier: one_thing - keyword: AS - keyword: UNION - struct_type_schema: bracketed: - start_bracket: ( - parameter: number - data_type: keyword: integer - comma: ',' - parameter: string - data_type: keyword: varchar - end_bracket: ) - statement_terminator: ; - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - data_type: data_type_identifier: x_index - keyword: AS - data_type: keyword: integer - statement_terminator: ; - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - data_type: naked_identifier: myschema dot: . data_type_identifier: mytype - keyword: AS - data_type: keyword: int - statement_terminator: ; - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - data_type: quoted_identifier: '"myschema"' dot: . data_type_identifier: mytype2 - keyword: AS - data_type: keyword: int - statement_terminator: ; - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - data_type: naked_identifier: myschema dot: . data_type_identifier: mytype3 - keyword: AS - data_type: naked_identifier: myschema dot: . data_type_identifier: mytype2 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/create_view.sql000066400000000000000000000002261503426445100242400ustar00rootroot00000000000000CREATE VIEW v1 AS SELECT * FROM tbl; CREATE OR REPLACE VIEW v1 AS SELECT 42; CREATE VIEW v1(a) AS SELECT 42; create view if not exists v as select 1; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/create_view.yml000066400000000000000000000041571503426445100242510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 824d002c72d69c120e1613071d3df9ffeab071884c74997c34adea1cb0af3e00 file: - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: v1 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: VIEW - table_reference: naked_identifier: v1 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '42' - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: v1 - bracketed: start_bracket: ( column_reference: naked_identifier: a end_bracket: ) - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '42' - statement_terminator: ; - statement: create_view_statement: - keyword: create - keyword: view - keyword: if - keyword: not - keyword: exists - table_reference: naked_identifier: v - keyword: as - select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/double_equal.sql000066400000000000000000000003101503426445100243760ustar00rootroot00000000000000SELECT COALESCE( MAX(CASE WHEN col1 == 'A' THEN cola END), MAX(CASE WHEN col1 == 'B' THEN colb END), MAX(CASE WHEN col1 = 'C' THEN colb END) ) AS result FROM my_table; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/double_equal.yml000066400000000000000000000101071503426445100244050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: eafa45aa6c32ce4ac9b073c9aa553e28a8770036d0dafde5e8d34e2a553c3a94 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: COALESCE function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: MAX function_contents: bracketed: start_bracket: ( expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: col1 comparison_operator: == quoted_literal: "'A'" - keyword: THEN - expression: column_reference: naked_identifier: cola - keyword: END end_bracket: ) - comma: ',' - expression: function: function_name: function_name_identifier: MAX function_contents: bracketed: start_bracket: ( expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: col1 comparison_operator: == quoted_literal: "'B'" - keyword: THEN - expression: column_reference: naked_identifier: colb - keyword: END end_bracket: ) - comma: ',' - expression: function: function_name: function_name_identifier: MAX function_contents: bracketed: start_bracket: ( expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'C'" - keyword: THEN - expression: column_reference: naked_identifier: colb - keyword: END end_bracket: ) - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: result from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/drop_macro.sql000066400000000000000000000003331503426445100240670ustar00rootroot00000000000000DROP MACRO mcr; DROP MACRO TABLE mt; DROP FUNCTION mcr; DROP FUNCTION TABLE mt; DROP MACRO IF EXISTS mymacro; DROP MACRO myschema.mymacro; DROP MACRO "myschema".mymacro; DROP MACRO mcr RESTRICT; DROP MACRO mcr CASCADE; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/drop_macro.yml000066400000000000000000000041541503426445100240760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8c6889a81165f66015c75f661e519302019e515f84da023f2b978fb98c6dacf5 file: - statement: drop_function_statement: - keyword: DROP - keyword: MACRO - function_name: function_name_identifier: mcr - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: MACRO - keyword: TABLE - function_name: function_name_identifier: mt - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - function_name: function_name_identifier: mcr - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - keyword: TABLE - function_name: function_name_identifier: mt - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: MACRO - keyword: IF - keyword: EXISTS - function_name: function_name_identifier: mymacro - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: MACRO - function_name: naked_identifier: myschema dot: . function_name_identifier: mymacro - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: MACRO - function_name: quoted_identifier: '"myschema"' dot: . function_name_identifier: mymacro - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: MACRO - function_name: function_name_identifier: mcr - keyword: RESTRICT - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: MACRO - function_name: function_name_identifier: mcr - keyword: CASCADE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/extract_temporal.sql000066400000000000000000000005521503426445100253220ustar00rootroot00000000000000/* Checks for functions that use `FROM` */ SELECT extract('year' FROM DATE '1992-09-20'); SELECT extract('year' FROM '1992-09-20'::DATE); VALUES (extract('year' FROM DATE '1992-09-20')); SELECT extract('hour' FROM TIMESTAMP '1992-09-20 20:38:48'); SELECT extract('hour' FROM TIMESTAMPTZ '1992-09-20 20:38:48'); SELECT extract('hour' FROM TIME '14:21:13'); sqlfluff-3.4.2/test/fixtures/dialects/duckdb/extract_temporal.yml000066400000000000000000000105131503426445100253220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ace9a534be5724cc9bbd017e062b8c783919ad6afdbbf7a8294d15cd43f84a99 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'year'" - keyword: FROM - expression: datetime_literal: datetime_type_identifier: keyword: DATE quoted_literal: "'1992-09-20'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'year'" - keyword: FROM - expression: cast_expression: quoted_literal: "'1992-09-20'" casting_operator: '::' data_type: datetime_type_identifier: keyword: DATE - end_bracket: ) - statement_terminator: ; - statement: values_clause: keyword: VALUES bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: extract function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'year'" - keyword: FROM - expression: datetime_literal: datetime_type_identifier: keyword: DATE quoted_literal: "'1992-09-20'" - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'hour'" - keyword: FROM - expression: datetime_literal: datetime_type_identifier: keyword: TIMESTAMP quoted_literal: "'1992-09-20 20:38:48'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'hour'" - keyword: FROM - expression: datetime_literal: datetime_type_identifier: keyword: TIMESTAMPTZ quoted_literal: "'1992-09-20 20:38:48'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'hour'" - keyword: FROM - expression: datetime_literal: datetime_type_identifier: keyword: TIME quoted_literal: "'14:21:13'" - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/from_first.sql000066400000000000000000000006171503426445100241210ustar00rootroot00000000000000/* Examples from https://duckdb.org/docs/sql/query_syntax/from */ -- select all columns from the table called "table_name" using the FROM-first syntax FROM table_name SELECT *; -- select all columns using the FROM-first syntax and omitting the SELECT clause FROM table_name; -- use the FROM-first syntax with WHERE clause and aggregation FROM range(100) AS t (i) SELECT sum(t.i) WHERE t.i % 2 = 0; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/from_first.yml000066400000000000000000000051731503426445100241250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 190430f3a5afa718fb93ad59797c5b221d8a3cfddd73145333d3a0d21800083f file: - statement: select_statement: from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' - statement_terminator: ; - statement: select_statement: from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name - statement_terminator: ; - statement: select_statement: from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: range function_contents: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: t bracketed: start_bracket: ( identifier_list: naked_identifier: i end_bracket: ) select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: t - dot: . - naked_identifier: i end_bracket: ) where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: i - binary_operator: '%' - numeric_literal: '2' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '0' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/group_order_by_all.sql000066400000000000000000000010341503426445100256120ustar00rootroot00000000000000SELECT systems, planets, cities, cantinas, SUM(scum + villainy) as total_scum_and_villainy FROM star_wars_locations GROUP BY ALL ; SELECT * EXCLUDE (cantinas, booths, scum, villainy), SUM(scum + villainy) as total_scum_and_villainy FROM star_wars_locations GROUP BY ALL ; SELECT age, sum(civility) as total_civility FROM star_wars_universe GROUP BY ALL ORDER BY ALL ; SELECT x_wing, proton_torpedoes, --targeting_computer FROM luke_whats_wrong GROUP BY x_wing, proton_torpedoes, ; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/group_order_by_all.yml000066400000000000000000000125021503426445100256160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d423200c1532e7effc6c9636645550735fdfd582a41a616a0973c015d5823f2e file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: systems - comma: ',' - select_clause_element: column_reference: naked_identifier: planets - comma: ',' - select_clause_element: column_reference: naked_identifier: cities - comma: ',' - select_clause_element: column_reference: naked_identifier: cantinas - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: scum - binary_operator: + - column_reference: naked_identifier: villainy end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: total_scum_and_villainy from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: star_wars_locations groupby_clause: - keyword: GROUP - keyword: BY - keyword: ALL - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: wildcard_expression: wildcard_identifier: star: '*' wildcard_exclude: keyword: EXCLUDE bracketed: - start_bracket: ( - column_reference: naked_identifier: cantinas - comma: ',' - column_reference: naked_identifier: booths - comma: ',' - column_reference: naked_identifier: scum - comma: ',' - column_reference: naked_identifier: villainy - end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: scum - binary_operator: + - column_reference: naked_identifier: villainy end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: total_scum_and_villainy from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: star_wars_locations groupby_clause: - keyword: GROUP - keyword: BY - keyword: ALL - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: civility end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: total_civility from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: star_wars_universe groupby_clause: - keyword: GROUP - keyword: BY - keyword: ALL orderby_clause: - keyword: ORDER - keyword: BY - keyword: ALL - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: x_wing - comma: ',' - select_clause_element: column_reference: naked_identifier: proton_torpedoes - comma: ',' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: luke_whats_wrong groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: x_wing - comma: ',' - column_reference: naked_identifier: proton_torpedoes - comma: ',' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/insert.sql000066400000000000000000000015331503426445100232510ustar00rootroot00000000000000INSERT INTO tbl VALUES (1), (2), (3); INSERT INTO tbl SELECT col1, col2, col3 FROM other_tbl; INSERT OR REPLACE INTO tbl (i) VALUES (1); INSERT OR IGNORE INTO tbl (i) VALUES (1); INSERT INTO tbl BY POSITION VALUES (5, 42); INSERT INTO tbl BY NAME (SELECT 22 AS b); INSERT INTO tbl VALUES (1, 84) ON CONFLICT DO NOTHING; INSERT INTO tbl VALUES (1, 52), (1, 62) ON CONFLICT DO UPDATE SET j = EXCLUDED.j; INSERT INTO tbl BY NAME ( SELECT 1 AS i, 336 AS j ) ON CONFLICT DO UPDATE SET j = EXCLUDED.j; INSERT INTO t1 VALUES (1), (2), (3) RETURNING *; CREATE TABLE t1 (i INTEGER); INSERT INTO t1 VALUES (1), (2), (3); CREATE TABLE t2 (i INTEGER); INSERT INTO t2 SELECT i FROM t1 WHERE i IN (1, 3) ON CONFLICT DO UPDATE SET j = EXCLUDED.j RETURNING *; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/insert.yml000066400000000000000000000227401503426445100232560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1b2ed34ff8f71346b47ad9d51a0b7565972c6dd558dea3ad13b7c3b40873af07 file: - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: tbl - values_clause: - keyword: VALUES - bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( expression: numeric_literal: '2' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( expression: numeric_literal: '3' end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: tbl - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 - comma: ',' - select_clause_element: column_reference: naked_identifier: col3 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: other_tbl - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: OR - keyword: REPLACE - keyword: INTO - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: i end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: OR - keyword: IGNORE - keyword: INTO - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: i end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: tbl - keyword: BY - keyword: POSITION - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '5' - comma: ',' - expression: numeric_literal: '42' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: tbl - keyword: BY - keyword: NAME - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '22' alias_expression: alias_operator: keyword: AS naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: tbl - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '84' - end_bracket: ) - keyword: 'ON' - keyword: CONFLICT - conflict_action: - keyword: DO - keyword: NOTHING - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: tbl - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '52' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '62' - end_bracket: ) - keyword: 'ON' - keyword: CONFLICT - conflict_action: - keyword: DO - keyword: UPDATE - keyword: SET - column_reference: naked_identifier: j - comparison_operator: raw_comparison_operator: '=' - expression: column_reference: - naked_identifier: EXCLUDED - dot: . - naked_identifier: j - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: tbl - keyword: BY - keyword: NAME - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: AS naked_identifier: i - comma: ',' - select_clause_element: numeric_literal: '336' alias_expression: alias_operator: keyword: AS naked_identifier: j end_bracket: ) - keyword: 'ON' - keyword: CONFLICT - conflict_action: - keyword: DO - keyword: UPDATE - keyword: SET - column_reference: naked_identifier: j - comparison_operator: raw_comparison_operator: '=' - expression: column_reference: - naked_identifier: EXCLUDED - dot: . - naked_identifier: j - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - values_clause: - keyword: VALUES - bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( expression: numeric_literal: '2' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( expression: numeric_literal: '3' end_bracket: ) - keyword: RETURNING - star: '*' - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: start_bracket: ( column_reference: naked_identifier: i data_type: keyword: INTEGER end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - values_clause: - keyword: VALUES - bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( expression: numeric_literal: '2' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( expression: numeric_literal: '3' end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t2 - bracketed: start_bracket: ( column_reference: naked_identifier: i data_type: keyword: INTEGER end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t2 - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: i from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 where_clause: keyword: WHERE expression: column_reference: naked_identifier: i keyword: IN bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '3' - end_bracket: ) - keyword: 'ON' - keyword: CONFLICT - conflict_action: - keyword: DO - keyword: UPDATE - keyword: SET - column_reference: naked_identifier: j - comparison_operator: raw_comparison_operator: '=' - expression: column_reference: - naked_identifier: EXCLUDED - dot: . - naked_identifier: j - keyword: RETURNING - star: '*' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/json_operators.sql000066400000000000000000000006361503426445100250170ustar00rootroot00000000000000-- https://duckdb.org/docs/extensions/json#json-extraction-functions -- Get JSON array element (indexed from zero, negative integers count from the end) SELECT '[{"a":"foo"},{"b":"bar"},{"c":"baz"}]'::json->2; -- Get JSON object field by key SELECT '{"a": {"b":"foo"}}'::json->'a'; -- Get JSON array element as text SELECT '[1,2,3]'::json->>2; -- Get JSON object field as text SELECT '{"a":1,"b":2}'::json->>'b'; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/json_operators.yml000066400000000000000000000037371503426445100250260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 45b917bc77b364642e00e43cb21f3bcfe12e8ab5818b48241d16133dce780bf0 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'[{\"a\":\"foo\"},{\"b\":\"bar\"},{\"c\":\"baz\"}]'" casting_operator: '::' data_type: keyword: json binary_operator: -> numeric_literal: '2' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'{\"a\": {\"b\":\"foo\"}}'" casting_operator: '::' data_type: keyword: json binary_operator: -> quoted_literal: "'a'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'[1,2,3]'" casting_operator: '::' data_type: keyword: json binary_operator: ->> numeric_literal: '2' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'{\"a\":1,\"b\":2}'" casting_operator: '::' data_type: keyword: json binary_operator: ->> quoted_literal: "'b'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/list_comprehension.sql000066400000000000000000000002731503426445100256510ustar00rootroot00000000000000SELECT [lower(x) FOR x IN strings] FROM (VALUES (['Hello', '', 'World'])) t(strings); SELECT [upper(x) FOR x IN strings IF len(x) > 0] FROM (VALUES (['Hello', '', 'World'])) t(strings); sqlfluff-3.4.2/test/fixtures/dialects/duckdb/list_comprehension.yml000066400000000000000000000110421503426445100256470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f11de74d2f2f8cb2bc991851be5e886df97b3eb0910085182f08fdf300a0eee5 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: list_comprehension: - start_square_bracket: '[' - expression: function: function_name: function_name_identifier: lower function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x end_bracket: ) - keyword: FOR - parameter: x - keyword: IN - expression: column_reference: naked_identifier: strings - end_square_bracket: ']' from_clause: keyword: FROM from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: keyword: VALUES bracketed: start_bracket: ( expression: array_literal: - start_square_bracket: '[' - quoted_literal: "'Hello'" - comma: ',' - quoted_literal: "''" - comma: ',' - quoted_literal: "'World'" - end_square_bracket: ']' end_bracket: ) end_bracket: ) alias_expression: naked_identifier: t bracketed: start_bracket: ( identifier_list: naked_identifier: strings end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: list_comprehension: - start_square_bracket: '[' - expression: function: function_name: function_name_identifier: upper function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x end_bracket: ) - keyword: FOR - parameter: x - keyword: IN - expression: column_reference: naked_identifier: strings - keyword: IF - expression: function: function_name: function_name_identifier: len function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x end_bracket: ) comparison_operator: raw_comparison_operator: '>' numeric_literal: '0' - end_square_bracket: ']' from_clause: keyword: FROM from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: keyword: VALUES bracketed: start_bracket: ( expression: array_literal: - start_square_bracket: '[' - quoted_literal: "'Hello'" - comma: ',' - quoted_literal: "''" - comma: ',' - quoted_literal: "'World'" - end_square_bracket: ']' end_bracket: ) end_bracket: ) alias_expression: naked_identifier: t bracketed: start_bracket: ( identifier_list: naked_identifier: strings end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/list_struct.sql000066400000000000000000000007601503426445100243250ustar00rootroot00000000000000SELECT ['A-Wing', 'B-Wing', 'X-Wing', 'Y-Wing'] as starfighter_list, {name: 'Star Destroyer', common_misconceptions: 'Can''t in fact destroy a star'} as star_destroyer_facts ; SELECT starfighter_list[2:2] as dont_forget_the_b_wing FROM (SELECT ['A-Wing', 'B-Wing', 'X-Wing', 'Y-Wing'] as starfighter_list); SELECT 'I love you! I know'[:-3] as nearly_soloed; SELECT planet.name, planet."Amount of sand" FROM (SELECT {name: 'Tatooine', 'Amount of sand': 'High'} as planet) ; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/list_struct.yml000066400000000000000000000122321503426445100243240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9089c4ee01870665c30fac293ae6f6f4a1ba8fc257ad08bc955f367391e5275e file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: array_literal: - start_square_bracket: '[' - quoted_literal: "'A-Wing'" - comma: ',' - quoted_literal: "'B-Wing'" - comma: ',' - quoted_literal: "'X-Wing'" - comma: ',' - quoted_literal: "'Y-Wing'" - end_square_bracket: ']' alias_expression: alias_operator: keyword: as naked_identifier: starfighter_list - comma: ',' - select_clause_element: object_literal: - start_curly_bracket: '{' - object_literal_element: naked_identifier: name colon: ':' quoted_literal: "'Star Destroyer'" - comma: ',' - object_literal_element: naked_identifier: common_misconceptions colon: ':' quoted_literal: "'Can''t in fact destroy a star'" - end_curly_bracket: '}' alias_expression: alias_operator: keyword: as naked_identifier: star_destroyer_facts - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: column_reference: naked_identifier: starfighter_list array_accessor: - start_square_bracket: '[' - numeric_literal: '2' - slice: ':' - numeric_literal: '2' - end_square_bracket: ']' alias_expression: alias_operator: keyword: as naked_identifier: dont_forget_the_b_wing from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: array_literal: - start_square_bracket: '[' - quoted_literal: "'A-Wing'" - comma: ',' - quoted_literal: "'B-Wing'" - comma: ',' - quoted_literal: "'X-Wing'" - comma: ',' - quoted_literal: "'Y-Wing'" - end_square_bracket: ']' alias_expression: alias_operator: keyword: as naked_identifier: starfighter_list end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: quoted_literal: "'I love you! I know'" array_accessor: start_square_bracket: '[' slice: ':' numeric_literal: sign_indicator: '-' numeric_literal: '3' end_square_bracket: ']' alias_expression: alias_operator: keyword: as naked_identifier: nearly_soloed - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: planet - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: planet dot: . quoted_identifier: '"Amount of sand"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: object_literal: - start_curly_bracket: '{' - object_literal_element: naked_identifier: name colon: ':' quoted_literal: "'Tatooine'" - comma: ',' - object_literal_element: - quoted_literal: "'Amount of sand'" - colon: ':' - quoted_literal: "'High'" - end_curly_bracket: '}' alias_expression: alias_operator: keyword: as naked_identifier: planet end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/pivot.sql000066400000000000000000000033651503426445100231130ustar00rootroot00000000000000-- Simplified PIVOT -- Examples from https://duckdb.org/docs/sql/statements/pivot PIVOT Cities ON Year USING sum(Population); PIVOT Cities ON Year USING first(Population); PIVOT Cities ON Year USING sum(Population) GROUP BY Country; PIVOT Cities ON Year IN (2000, 2010) USING sum(Population) GROUP BY Country; PIVOT Cities ON Country, Name USING sum(Population); PIVOT Cities ON Country || '_' || Name USING sum(Population); PIVOT Cities ON Year USING sum(Population) AS total, max(Population) AS max GROUP BY Country; PIVOT Cities ON Year USING sum(Population) GROUP BY Country, Name; PIVOT Cities ON Year USING sum(Population) GROUP BY Country, Name ORDER BY Name; PIVOT Cities ON Year USING sum(Population) GROUP BY Country, Name LIMIT 1; PIVOT Cities ON Year USING sum(Population) GROUP BY Country, Name ORDER BY Name LIMIT 1; -- Without ON PIVOT Cities USING sum(Population) GROUP BY Country, Name; -- Only ON PIVOT Cities ON Year; -- Only USING PIVOT Cities USING sum(Population); -- Only GROUP BY PIVOT Cities GROUP BY Country, Name; -- In a CTE WITH pivot_alias AS ( PIVOT Cities ON Year USING sum(Population) GROUP BY Country ) SELECT * FROM pivot_alias; -- In a subquery SELECT * FROM ( PIVOT Cities ON Year USING sum(Population) GROUP BY Country ) pivot_alias; -- Multiple pivots with a join FROM (PIVOT Cities ON Year USING sum(Population) GROUP BY Country) year_pivot JOIN (PIVOT Cities ON Name USING sum(Population) GROUP BY Country) name_pivot USING (Country); -- Standard PIVOT FROM Cities PIVOT ( sum(Population) FOR Year IN (2000, 2010, 2020) GROUP BY Country ); FROM Cities PIVOT ( sum(Population) AS total, count(Population) AS count FOR Year IN (2000, 2010) Country in ('NL', 'US') ); sqlfluff-3.4.2/test/fixtures/dialects/duckdb/pivot.yml000066400000000000000000000462441503426445100231200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 636462068822a538f191632cf1f95bf3d085ed54e9750eae03e4496515e0ea0b file: - statement: simplified_pivot: - keyword: PIVOT - table_expression: table_reference: naked_identifier: Cities - keyword: 'ON' - column_reference: naked_identifier: Year - keyword: USING - function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: Population end_bracket: ) - statement_terminator: ; - statement: simplified_pivot: - keyword: PIVOT - table_expression: table_reference: naked_identifier: Cities - keyword: 'ON' - column_reference: naked_identifier: Year - keyword: USING - function: function_name: function_name_identifier: first function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: Population end_bracket: ) - statement_terminator: ; - statement: simplified_pivot: - keyword: PIVOT - table_expression: table_reference: naked_identifier: Cities - keyword: 'ON' - column_reference: naked_identifier: Year - keyword: USING - function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: Population end_bracket: ) - groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: Country - statement_terminator: ; - statement: simplified_pivot: - keyword: PIVOT - table_expression: table_reference: naked_identifier: Cities - keyword: 'ON' - expression: column_reference: naked_identifier: Year keyword: IN bracketed: - start_bracket: ( - numeric_literal: '2000' - comma: ',' - numeric_literal: '2010' - end_bracket: ) - keyword: USING - function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: Population end_bracket: ) - groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: Country - statement_terminator: ; - statement: simplified_pivot: - keyword: PIVOT - table_expression: table_reference: naked_identifier: Cities - keyword: 'ON' - column_reference: naked_identifier: Country - comma: ',' - column_reference: naked_identifier: Name - keyword: USING - function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: Population end_bracket: ) - statement_terminator: ; - statement: simplified_pivot: - keyword: PIVOT - table_expression: table_reference: naked_identifier: Cities - keyword: 'ON' - expression: - column_reference: naked_identifier: Country - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "'_'" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: naked_identifier: Name - keyword: USING - function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: Population end_bracket: ) - statement_terminator: ; - statement: simplified_pivot: - keyword: PIVOT - table_expression: table_reference: naked_identifier: Cities - keyword: 'ON' - column_reference: naked_identifier: Year - keyword: USING - function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: Population end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: total - comma: ',' - function: function_name: function_name_identifier: max function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: Population end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: max - groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: Country - statement_terminator: ; - statement: simplified_pivot: - keyword: PIVOT - table_expression: table_reference: naked_identifier: Cities - keyword: 'ON' - column_reference: naked_identifier: Year - keyword: USING - function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: Population end_bracket: ) - groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: Country - comma: ',' - column_reference: naked_identifier: Name - statement_terminator: ; - statement: simplified_pivot: - keyword: PIVOT - table_expression: table_reference: naked_identifier: Cities - keyword: 'ON' - column_reference: naked_identifier: Year - keyword: USING - function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: Population end_bracket: ) - groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: Country - comma: ',' - column_reference: naked_identifier: Name - orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: Name - statement_terminator: ; - statement: simplified_pivot: - keyword: PIVOT - table_expression: table_reference: naked_identifier: Cities - keyword: 'ON' - column_reference: naked_identifier: Year - keyword: USING - function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: Population end_bracket: ) - groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: Country - comma: ',' - column_reference: naked_identifier: Name - limit_clause: keyword: LIMIT numeric_literal: '1' - statement_terminator: ; - statement: simplified_pivot: - keyword: PIVOT - table_expression: table_reference: naked_identifier: Cities - keyword: 'ON' - column_reference: naked_identifier: Year - keyword: USING - function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: Population end_bracket: ) - groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: Country - comma: ',' - column_reference: naked_identifier: Name - orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: Name - limit_clause: keyword: LIMIT numeric_literal: '1' - statement_terminator: ; - statement: simplified_pivot: - keyword: PIVOT - table_expression: table_reference: naked_identifier: Cities - keyword: USING - function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: Population end_bracket: ) - groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: Country - comma: ',' - column_reference: naked_identifier: Name - statement_terminator: ; - statement: simplified_pivot: - keyword: PIVOT - table_expression: table_reference: naked_identifier: Cities - keyword: 'ON' - column_reference: naked_identifier: Year - statement_terminator: ; - statement: simplified_pivot: - keyword: PIVOT - table_expression: table_reference: naked_identifier: Cities - keyword: USING - function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: Population end_bracket: ) - statement_terminator: ; - statement: simplified_pivot: keyword: PIVOT table_expression: table_reference: naked_identifier: Cities groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: Country - comma: ',' - column_reference: naked_identifier: Name - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: pivot_alias keyword: AS bracketed: start_bracket: ( simplified_pivot: - keyword: PIVOT - table_expression: table_reference: naked_identifier: Cities - keyword: 'ON' - column_reference: naked_identifier: Year - keyword: USING - function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: Population end_bracket: ) - groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: Country end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: pivot_alias - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( simplified_pivot: - keyword: PIVOT - table_expression: table_reference: naked_identifier: Cities - keyword: 'ON' - column_reference: naked_identifier: Year - keyword: USING - function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: Population end_bracket: ) - groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: Country end_bracket: ) alias_expression: naked_identifier: pivot_alias - statement_terminator: ; - statement: select_statement: from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( simplified_pivot: - keyword: PIVOT - table_expression: table_reference: naked_identifier: Cities - keyword: 'ON' - column_reference: naked_identifier: Year - keyword: USING - function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: Population end_bracket: ) - groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: Country end_bracket: ) alias_expression: naked_identifier: year_pivot join_clause: - keyword: JOIN - from_expression_element: table_expression: bracketed: start_bracket: ( simplified_pivot: - keyword: PIVOT - table_expression: table_reference: naked_identifier: Cities - keyword: 'ON' - column_reference: naked_identifier: Name - keyword: USING - function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: Population end_bracket: ) - groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: Country end_bracket: ) alias_expression: naked_identifier: name_pivot - keyword: USING - bracketed: start_bracket: ( naked_identifier: Country end_bracket: ) - statement_terminator: ; - statement: select_statement: from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Cities from_pivot_expression: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: Population end_bracket: ) - keyword: FOR - naked_identifier: Year - keyword: IN - bracketed: - start_bracket: ( - numeric_literal: '2000' - comma: ',' - numeric_literal: '2010' - comma: ',' - numeric_literal: '2020' - end_bracket: ) - groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: Country - end_bracket: ) - statement_terminator: ; - statement: select_statement: from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Cities from_pivot_expression: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: Population end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: total - comma: ',' - function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: Population end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: count - keyword: FOR - naked_identifier: Year - keyword: IN - bracketed: - start_bracket: ( - numeric_literal: '2000' - comma: ',' - numeric_literal: '2010' - end_bracket: ) - naked_identifier: Country - keyword: in - bracketed: - start_bracket: ( - quoted_literal: "'NL'" - comma: ',' - quoted_literal: "'US'" - end_bracket: ) - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/positional_join.sql000066400000000000000000000001451503426445100251430ustar00rootroot00000000000000-- treat two data frames as a single table SELECT df1.*, df2.* FROM df1 POSITIONAL JOIN df2; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/positional_join.yml000066400000000000000000000024161503426445100251500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 627d2700c6b7ee556b3ea7b2ab29f4634cc939f80fac84e6f45f5239ac117968 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: df1 dot: . star: '*' - comma: ',' - select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: df2 dot: . star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: df1 join_clause: - keyword: POSITIONAL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: df2 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/qualify.sql000066400000000000000000000027031503426445100234170ustar00rootroot00000000000000/* Examples from https://duckdb.org/docs/sql/query_syntax/qualify */ -- Filter based on a WINDOW function defined in the QUALIFY clause SELECT schema_name, function_name, -- In this example the function_rank column in the select clause is for reference row_number() OVER (PARTITION BY schema_name ORDER BY function_name) AS function_rank FROM duckdb_functions QUALIFY row_number() OVER (PARTITION BY schema_name ORDER BY function_name) < 3; -- Filter based on a WINDOW function defined in the SELECT clause SELECT schema_name, function_name, row_number() OVER (PARTITION BY schema_name ORDER BY function_name) AS function_rank FROM duckdb_functions() QUALIFY function_rank < 3; -- Filter based on a WINDOW function defined in the QUALIFY clause, but using the WINDOW clause SELECT schema_name, function_name, -- In this example the function_rank column in the select clause is for reference row_number() OVER my_window AS function_rank FROM duckdb_functions() WINDOW my_window AS (PARTITION BY schema_name ORDER BY function_name) QUALIFY row_number() OVER my_window < 3; -- Filter based on a WINDOW function defined in the SELECT clause, but using the WINDOW clause SELECT schema_name, function_name, row_number() OVER my_window AS function_rank FROM duckdb_functions() WINDOW my_window AS (PARTITION BY schema_name ORDER BY function_name) QUALIFY function_rank < 3; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/qualify.yml000066400000000000000000000220711503426445100234210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 02c2fc06b95c5405173050dfb1282d1e5b57b034c520594f052fe1ff4faffbeb file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: schema_name - comma: ',' - select_clause_element: column_reference: naked_identifier: function_name - comma: ',' - select_clause_element: function: function_name: function_name_identifier: row_number function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: schema_name orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: function_name end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: function_rank from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: duckdb_functions qualify_clause: keyword: QUALIFY expression: function: function_name: function_name_identifier: row_number function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: schema_name orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: function_name end_bracket: ) comparison_operator: raw_comparison_operator: < numeric_literal: '3' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: schema_name - comma: ',' - select_clause_element: column_reference: naked_identifier: function_name - comma: ',' - select_clause_element: function: function_name: function_name_identifier: row_number function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: schema_name orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: function_name end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: function_rank from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: duckdb_functions function_contents: bracketed: start_bracket: ( end_bracket: ) qualify_clause: keyword: QUALIFY expression: column_reference: naked_identifier: function_rank comparison_operator: raw_comparison_operator: < numeric_literal: '3' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: schema_name - comma: ',' - select_clause_element: column_reference: naked_identifier: function_name - comma: ',' - select_clause_element: function: function_name: function_name_identifier: row_number function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER naked_identifier: my_window alias_expression: alias_operator: keyword: AS naked_identifier: function_rank from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: duckdb_functions function_contents: bracketed: start_bracket: ( end_bracket: ) named_window: keyword: WINDOW named_window_expression: naked_identifier: my_window keyword: AS bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: schema_name orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: function_name end_bracket: ) qualify_clause: keyword: QUALIFY expression: function: function_name: function_name_identifier: row_number function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER naked_identifier: my_window comparison_operator: raw_comparison_operator: < numeric_literal: '3' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: schema_name - comma: ',' - select_clause_element: column_reference: naked_identifier: function_name - comma: ',' - select_clause_element: function: function_name: function_name_identifier: row_number function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER naked_identifier: my_window alias_expression: alias_operator: keyword: AS naked_identifier: function_rank from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: duckdb_functions function_contents: bracketed: start_bracket: ( end_bracket: ) named_window: keyword: WINDOW named_window_expression: naked_identifier: my_window keyword: AS bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: schema_name orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: function_name end_bracket: ) qualify_clause: keyword: QUALIFY expression: column_reference: naked_identifier: function_rank comparison_operator: raw_comparison_operator: < numeric_literal: '3' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/select.sql000066400000000000000000000002161503426445100232210ustar00rootroot00000000000000select 10 // 5; SELECT * FROM capitals UNION BY NAME SELECT * FROM weather; SELECT * FROM capitals UNION ALL BY NAME SELECT * FROM weather; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/select.yml000066400000000000000000000051131503426445100232240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b336d1dcea0dab4bda87eab355124f897a8ebf8cc5181cd161ed477da253a0af file: - statement: select_statement: select_clause: keyword: select select_clause_element: expression: - numeric_literal: '10' - binary_operator: // - numeric_literal: '5' - statement_terminator: ; - statement: set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: capitals - set_operator: - keyword: UNION - keyword: BY - keyword: NAME - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: weather - statement_terminator: ; - statement: set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: capitals - set_operator: - keyword: UNION - keyword: ALL - keyword: BY - keyword: NAME - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: weather - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/select_columns.sql000066400000000000000000000010471503426445100247640ustar00rootroot00000000000000-- columns expression with lambda SELECT COLUMNS(c -> c LIKE '%num%') FROM addresses; -- columns expression with regular expression SELECT COLUMNS('number\d+') FROM addresses; -- function call on columns expression SELECT min(COLUMNS(*)) FROM addresses; SELECT min(COLUMNS(*)), count(COLUMNS(*)) FROM numbers; -- columns with wildcard replace and exclude SELECT min(COLUMNS(* REPLACE (number + id AS number))), count(COLUMNS(* EXCLUDE (number))) FROM numbers; -- columns expression with an expression SELECT COLUMNS(*) + COLUMNS(*) FROM numbers; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/select_columns.yml000066400000000000000000000177251503426445100250000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9d83b7d7f4186751065b5c90809bdfcdf7c33cdd1a5ad539a65b443d0bd07284 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: keyword: COLUMNS function_contents: bracketed: start_bracket: ( lambda_function: parameter: c lambda_arrow: -> expression: column_reference: naked_identifier: c keyword: LIKE quoted_literal: "'%num%'" end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: addresses - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: keyword: COLUMNS function_contents: bracketed: start_bracket: ( quoted_literal: "'number\\d+'" end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: addresses - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: min function_contents: bracketed: start_bracket: ( expression: function: function_name: keyword: COLUMNS function_contents: bracketed: start_bracket: ( wildcard_expression: wildcard_identifier: star: '*' end_bracket: ) end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: addresses - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: min function_contents: bracketed: start_bracket: ( expression: function: function_name: keyword: COLUMNS function_contents: bracketed: start_bracket: ( wildcard_expression: wildcard_identifier: star: '*' end_bracket: ) end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( expression: function: function_name: keyword: COLUMNS function_contents: bracketed: start_bracket: ( wildcard_expression: wildcard_identifier: star: '*' end_bracket: ) end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: numbers - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: min function_contents: bracketed: start_bracket: ( expression: function: function_name: keyword: COLUMNS function_contents: bracketed: start_bracket: ( wildcard_expression: wildcard_identifier: star: '*' wildcard_replace: keyword: REPLACE bracketed: start_bracket: ( expression: - column_reference: naked_identifier: number - binary_operator: + - column_reference: naked_identifier: id alias_expression: alias_operator: keyword: AS naked_identifier: number end_bracket: ) end_bracket: ) end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( expression: function: function_name: keyword: COLUMNS function_contents: bracketed: start_bracket: ( wildcard_expression: wildcard_identifier: star: '*' wildcard_exclude: keyword: EXCLUDE bracketed: start_bracket: ( column_reference: naked_identifier: number end_bracket: ) end_bracket: ) end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: numbers - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - function: function_name: keyword: COLUMNS function_contents: bracketed: start_bracket: ( wildcard_expression: wildcard_identifier: star: '*' end_bracket: ) - binary_operator: + - function: function_name: keyword: COLUMNS function_contents: bracketed: start_bracket: ( wildcard_expression: wildcard_identifier: star: '*' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: numbers - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/select_exclude.sql000066400000000000000000000003211503426445100247270ustar00rootroot00000000000000SELECT * EXCLUDE (jar_jar_binks, midichlorians) FROM star_wars; SELECT sw.* EXCLUDE (jar_jar_binks, midichlorians), ff.* EXCLUDE cancellation FROM star_wars sw, firefly ff ; SELECT * FROM star_wars; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/select_exclude.yml000066400000000000000000000060071503426445100247400ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: db18a9572997b454551a89d98852606904c3cd7dff639cd0a8acbd4537822923 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' wildcard_exclude: keyword: EXCLUDE bracketed: - start_bracket: ( - column_reference: naked_identifier: jar_jar_binks - comma: ',' - column_reference: naked_identifier: midichlorians - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: star_wars - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: sw dot: . star: '*' wildcard_exclude: keyword: EXCLUDE bracketed: - start_bracket: ( - column_reference: naked_identifier: jar_jar_binks - comma: ',' - column_reference: naked_identifier: midichlorians - end_bracket: ) - comma: ',' - select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: ff dot: . star: '*' wildcard_exclude: keyword: EXCLUDE column_reference: naked_identifier: cancellation from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: star_wars alias_expression: naked_identifier: sw - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: firefly alias_expression: naked_identifier: ff - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: star_wars - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/select_exclude_replace.sql000066400000000000000000000002171503426445100264260ustar00rootroot00000000000000SELECT * EXCLUDE y REPLACE (3 AS x) FROM tabx; -- The x column is replaced, but z is not SELECT * EXCLUDE y REPLACE 3 AS x, 6 as z FROM tabx; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/select_exclude_replace.yml000066400000000000000000000043121503426445100264300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6af9f443583036f12fbf1244587228403f43241b8dd1671115dff45eabefa34d file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' wildcard_exclude: keyword: EXCLUDE column_reference: naked_identifier: y wildcard_replace: keyword: REPLACE bracketed: start_bracket: ( numeric_literal: '3' alias_expression: alias_operator: keyword: AS naked_identifier: x end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tabx - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: wildcard_expression: wildcard_identifier: star: '*' wildcard_exclude: keyword: EXCLUDE column_reference: naked_identifier: y wildcard_replace: keyword: REPLACE numeric_literal: '3' alias_expression: alias_operator: keyword: AS naked_identifier: x - comma: ',' - select_clause_element: numeric_literal: '6' alias_expression: alias_operator: keyword: as naked_identifier: z from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tabx - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/select_lambda.sql000066400000000000000000000004611503426445100245230ustar00rootroot00000000000000-- lambda functions SELECT list_transform([4, 5, 6], x -> x + 1); SELECT list_filter([4, 5, 6], x -> x > 4); -- nested lambda functions SELECT list_transform( list_filter([0, 1, 2, 3, 4, 5], x -> x % 2 = 0), y -> y * y ); -- lambda with index SELECT list_filter([1, 3, 1, 5], (x, i) -> x > i); sqlfluff-3.4.2/test/fixtures/dialects/duckdb/select_lambda.yml000066400000000000000000000136331503426445100245320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 40bd061e80fc0993927b3feed5918d7947159a463d8b37b98dc87061e0315351 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: list_transform function_contents: bracketed: start_bracket: ( expression: array_literal: - start_square_bracket: '[' - numeric_literal: '4' - comma: ',' - numeric_literal: '5' - comma: ',' - numeric_literal: '6' - end_square_bracket: ']' comma: ',' lambda_function: parameter: x lambda_arrow: -> expression: column_reference: naked_identifier: x binary_operator: + numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: list_filter function_contents: bracketed: start_bracket: ( expression: array_literal: - start_square_bracket: '[' - numeric_literal: '4' - comma: ',' - numeric_literal: '5' - comma: ',' - numeric_literal: '6' - end_square_bracket: ']' comma: ',' lambda_function: parameter: x lambda_arrow: -> expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: '>' numeric_literal: '4' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: list_transform function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: list_filter function_contents: bracketed: start_bracket: ( expression: array_literal: - start_square_bracket: '[' - numeric_literal: '0' - comma: ',' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - comma: ',' - numeric_literal: '4' - comma: ',' - numeric_literal: '5' - end_square_bracket: ']' comma: ',' lambda_function: parameter: x lambda_arrow: -> expression: - column_reference: naked_identifier: x - binary_operator: '%' - numeric_literal: '2' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '0' end_bracket: ) comma: ',' lambda_function: parameter: y lambda_arrow: -> expression: - column_reference: naked_identifier: y - binary_operator: '*' - column_reference: naked_identifier: y end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: list_filter function_contents: bracketed: start_bracket: ( expression: array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '3' - comma: ',' - numeric_literal: '1' - comma: ',' - numeric_literal: '5' - end_square_bracket: ']' comma: ',' lambda_function: bracketed: - start_bracket: ( - parameter: x - comma: ',' - parameter: i - end_bracket: ) lambda_arrow: -> expression: - column_reference: naked_identifier: x - comparison_operator: raw_comparison_operator: '>' - column_reference: naked_identifier: i end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/select_quoted.sql000066400000000000000000000001311503426445100245760ustar00rootroot00000000000000SELECT count(*) FROM 'https://shell.duckdb.org/data/tpch/0_01/parquet/lineitem.parquet'; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/select_quoted.yml000066400000000000000000000020141503426445100246020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 06ecdc6896e0b9196ebea81833f342b83403f2411120e130593c365d5d20574e file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: "'https://shell.duckdb.org/data/tpch/0_01/parquet/lineitem.parquet'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/select_replace.sql000066400000000000000000000001641503426445100247160ustar00rootroot00000000000000SELECT * REPLACE (movie_count+3 as movie_count, show_count*1000 as show_count) FROM star_wars_owned_by_disney ; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/select_replace.yml000066400000000000000000000032411503426445100247170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c2d2d913ebaf7aa1442c2116439e6c3ce2ceb92903df44d3e46ae45156b96484 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' wildcard_replace: keyword: REPLACE bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: movie_count binary_operator: + numeric_literal: '3' - alias_expression: alias_operator: keyword: as naked_identifier: movie_count - comma: ',' - expression: column_reference: naked_identifier: show_count binary_operator: '*' numeric_literal: '1000' - alias_expression: alias_operator: keyword: as naked_identifier: show_count - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: star_wars_owned_by_disney statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/star_expression.sql000066400000000000000000000010561503426445100251750ustar00rootroot00000000000000-- * RENAME SELECT * RENAME (a AS b) FROM tbl; -- Pattern Matching SELECT * LIKE 'col%' FROM tbl; SELECT * ILIKE 'col%' FROM tbl; SELECT * NOT LIKE 'col%' FROM tbl; SELECT * GLOB 'col*' FROM tbl; SELECT * SIMILAR TO 'col.' FROM tbl; -- Pattern Matching with symbols SELECT * ~~ 'col%' FROM tbl; SELECT * ~~* 'col%' FROM tbl; SELECT * !~~ 'col%' FROM tbl; SELECT * ~~~ 'col*' FROM tbl; SELECT COLUMNS(['id', 'num']) FROM numbers; SELECT COALESCE(*COLUMNS([upper(x) for x in ['a', 'b', 'c']])) AS result FROM (SELECT NULL AS a, 42 AS b, true AS c); sqlfluff-3.4.2/test/fixtures/dialects/duckdb/star_expression.yml000066400000000000000000000235011503426445100251760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0ed8eabffde9fccfa1fd253981fd9b3025024f3939bbbd0cd4cfa7db842e4b6d file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' wildcard_rename: keyword: RENAME bracketed: start_bracket: ( column_reference: naked_identifier: a alias_expression: alias_operator: keyword: AS naked_identifier: b end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' wildcard_pattern_matching: keyword: LIKE quoted_literal: "'col%'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' wildcard_pattern_matching: keyword: ILIKE quoted_literal: "'col%'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' wildcard_pattern_matching: - keyword: NOT - keyword: LIKE - quoted_literal: "'col%'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' wildcard_pattern_matching: keyword: GLOB quoted_literal: "'col*'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' wildcard_pattern_matching: - keyword: SIMILAR - keyword: TO - quoted_literal: "'col.'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' wildcard_pattern_matching: like_operator: ~~ quoted_literal: "'col%'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' wildcard_pattern_matching: like_operator: ~~* quoted_literal: "'col%'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' wildcard_pattern_matching: like_operator: '!~~' quoted_literal: "'col%'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' wildcard_pattern_matching: glob_operator: ~~~ quoted_literal: "'col*'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: keyword: COLUMNS function_contents: bracketed: start_bracket: ( array_literal: - start_square_bracket: '[' - quoted_literal: "'id'" - comma: ',' - quoted_literal: "'num'" - end_square_bracket: ']' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: numbers - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: COALESCE function_contents: bracketed: start_bracket: ( expression: function: function_name: unpacking_operator: '*' keyword: COLUMNS function_contents: bracketed: start_bracket: ( expression: list_comprehension: - start_square_bracket: '[' - expression: function: function_name: function_name_identifier: upper function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x end_bracket: ) - keyword: for - parameter: x - keyword: in - expression: array_literal: - start_square_bracket: '[' - quoted_literal: "'a'" - comma: ',' - quoted_literal: "'b'" - comma: ',' - quoted_literal: "'c'" - end_square_bracket: ']' - end_square_bracket: ']' end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: result from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: null_literal: 'NULL' alias_expression: alias_operator: keyword: AS naked_identifier: a - comma: ',' - select_clause_element: numeric_literal: '42' alias_expression: alias_operator: keyword: AS naked_identifier: b - comma: ',' - select_clause_element: boolean_literal: 'true' alias_expression: alias_operator: keyword: AS naked_identifier: c end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/structs.sql000066400000000000000000000001031503426445100234440ustar00rootroot00000000000000SELECT a::STRUCT(y INTEGER) AS b FROM (SELECT {'x': 42} AS a); sqlfluff-3.4.2/test/fixtures/dialects/duckdb/structs.yml000066400000000000000000000037021503426445100234560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: dd51a5c24f787c35216072bc3fa03be92be30588bb2c23d01e33bdfb2d56934d file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: a casting_operator: '::' data_type: struct_type: keyword: STRUCT struct_type_schema: bracketed: start_bracket: ( parameter: y data_type: keyword: INTEGER end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: object_literal: start_curly_bracket: '{' object_literal_element: quoted_literal: "'x'" colon: ':' numeric_literal: '42' end_curly_bracket: '}' alias_expression: alias_operator: keyword: AS naked_identifier: a end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/unpivot.sql000066400000000000000000000015541503426445100234540ustar00rootroot00000000000000-- Simplified UNPIVOT UNPIVOT monthly_sales ON jan, feb, mar, apr, may, jun INTO NAME month VALUE sales; UNPIVOT monthly_sales ON COLUMNS (* EXCLUDE (empid, dept)) INTO NAME month VALUE sales; UNPIVOT monthly_sales ON (jan, feb, mar) AS q1, (apr, may, jun) AS q2 INTO NAME quarter VALUE month_1_sales, month_2_sales, month_3_sales; WITH unpivot_alias AS ( UNPIVOT monthly_sales ON COLUMNS (* EXCLUDE (empid, dept)) INTO NAME month VALUE sales ) SELECT * FROM unpivot_alias; -- Standard UNPIVOT FROM monthly_sales UNPIVOT ( sales FOR month IN (jan, feb, mar, apr, may, jun) ); FROM monthly_sales UNPIVOT ( sales FOR month IN (COLUMNS (* EXCLUDE (empid, dept))) ); FROM monthly_sales UNPIVOT ( (month_1_sales, month_2_sales, month_3_sales) FOR quarter IN ( (jan, feb, mar) AS q1, (apr, may, jun) AS q2 ) ); sqlfluff-3.4.2/test/fixtures/dialects/duckdb/unpivot.yml000066400000000000000000000214701503426445100234550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4fd86cf86d5fb835b936a1b395a37aa7f8dc1eb247d1791af3b8ff65613b54db file: - statement: simplified_unpivot: - keyword: UNPIVOT - table_expression: table_reference: naked_identifier: monthly_sales - keyword: 'ON' - column_reference: naked_identifier: jan - comma: ',' - column_reference: naked_identifier: feb - comma: ',' - column_reference: naked_identifier: mar - comma: ',' - column_reference: naked_identifier: apr - comma: ',' - column_reference: naked_identifier: may - comma: ',' - column_reference: naked_identifier: jun - keyword: INTO - keyword: NAME - naked_identifier: month - keyword: VALUE - naked_identifier: sales - statement_terminator: ; - statement: simplified_unpivot: - keyword: UNPIVOT - table_expression: table_reference: naked_identifier: monthly_sales - keyword: 'ON' - function_name: keyword: COLUMNS - function_contents: bracketed: start_bracket: ( wildcard_expression: wildcard_identifier: star: '*' wildcard_exclude: keyword: EXCLUDE bracketed: - start_bracket: ( - column_reference: naked_identifier: empid - comma: ',' - column_reference: naked_identifier: dept - end_bracket: ) end_bracket: ) - keyword: INTO - keyword: NAME - naked_identifier: month - keyword: VALUE - naked_identifier: sales - statement_terminator: ; - statement: simplified_unpivot: - keyword: UNPIVOT - table_expression: table_reference: naked_identifier: monthly_sales - keyword: 'ON' - bracketed: - start_bracket: ( - column_reference: naked_identifier: jan - comma: ',' - column_reference: naked_identifier: feb - comma: ',' - column_reference: naked_identifier: mar - end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: q1 - comma: ',' - bracketed: - start_bracket: ( - column_reference: naked_identifier: apr - comma: ',' - column_reference: naked_identifier: may - comma: ',' - column_reference: naked_identifier: jun - end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: q2 - keyword: INTO - keyword: NAME - naked_identifier: quarter - keyword: VALUE - naked_identifier: month_1_sales - comma: ',' - naked_identifier: month_2_sales - comma: ',' - naked_identifier: month_3_sales - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: unpivot_alias keyword: AS bracketed: start_bracket: ( simplified_unpivot: - keyword: UNPIVOT - table_expression: table_reference: naked_identifier: monthly_sales - keyword: 'ON' - function_name: keyword: COLUMNS - function_contents: bracketed: start_bracket: ( wildcard_expression: wildcard_identifier: star: '*' wildcard_exclude: keyword: EXCLUDE bracketed: - start_bracket: ( - column_reference: naked_identifier: empid - comma: ',' - column_reference: naked_identifier: dept - end_bracket: ) end_bracket: ) - keyword: INTO - keyword: NAME - naked_identifier: month - keyword: VALUE - naked_identifier: sales end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: unpivot_alias - statement_terminator: ; - statement: select_statement: from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: monthly_sales from_unpivot_expression: keyword: UNPIVOT bracketed: - start_bracket: ( - naked_identifier: sales - keyword: FOR - naked_identifier: month - keyword: IN - bracketed: - start_bracket: ( - naked_identifier: jan - comma: ',' - naked_identifier: feb - comma: ',' - naked_identifier: mar - comma: ',' - naked_identifier: apr - comma: ',' - naked_identifier: may - comma: ',' - naked_identifier: jun - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: monthly_sales from_unpivot_expression: keyword: UNPIVOT bracketed: - start_bracket: ( - naked_identifier: sales - keyword: FOR - naked_identifier: month - keyword: IN - bracketed: start_bracket: ( function_name: keyword: COLUMNS function_contents: bracketed: start_bracket: ( wildcard_expression: wildcard_identifier: star: '*' wildcard_exclude: keyword: EXCLUDE bracketed: - start_bracket: ( - column_reference: naked_identifier: empid - comma: ',' - column_reference: naked_identifier: dept - end_bracket: ) end_bracket: ) end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: monthly_sales from_unpivot_expression: keyword: UNPIVOT bracketed: - start_bracket: ( - bracketed: - start_bracket: ( - naked_identifier: month_1_sales - comma: ',' - naked_identifier: month_2_sales - comma: ',' - naked_identifier: month_3_sales - end_bracket: ) - keyword: FOR - naked_identifier: quarter - keyword: IN - bracketed: - start_bracket: ( - bracketed: - start_bracket: ( - naked_identifier: jan - comma: ',' - naked_identifier: feb - comma: ',' - naked_identifier: mar - end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: q1 - comma: ',' - bracketed: - start_bracket: ( - naked_identifier: apr - comma: ',' - naked_identifier: may - comma: ',' - naked_identifier: jun - end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: q2 - end_bracket: ) - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/duckdb/walrus_operator_function.sql000066400000000000000000000002131503426445100270740ustar00rootroot00000000000000create view v as select t.id, struct_pack( val := t.val ) as s from t; select struct_insert({ 'a': 1 }, b := 2); sqlfluff-3.4.2/test/fixtures/dialects/duckdb/walrus_operator_function.yml000066400000000000000000000050651503426445100271100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e8a41c81370b7ca1715644745c19acae63deecf19fddcfa9d62b181df7f57081 file: - statement: create_view_statement: - keyword: create - keyword: view - table_reference: naked_identifier: v - keyword: as - select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: struct_pack function_contents: bracketed: start_bracket: ( named_argument: naked_identifier: val assignment_operator: := expression: column_reference: - naked_identifier: t - dot: . - naked_identifier: val end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: s from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: struct_insert function_contents: bracketed: start_bracket: ( expression: object_literal: start_curly_bracket: '{' object_literal_element: quoted_literal: "'a'" colon: ':' numeric_literal: '1' end_curly_bracket: '}' comma: ',' named_argument: naked_identifier: b assignment_operator: := expression: numeric_literal: '2' end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/000077500000000000000000000000001503426445100212615ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/dialects/exasol/.sqlfluff000066400000000000000000000000341503426445100231010ustar00rootroot00000000000000[sqlfluff] dialect = exasol sqlfluff-3.4.2/test/fixtures/dialects/exasol/Add_Days.sql000066400000000000000000000001441503426445100234510ustar00rootroot00000000000000SELECT ADD_DAYS(DATE '2000-02-28', 1) AD1, ADD_DAYS(TIMESTAMP '2001-02-28 12:00:00', 1) AD2; sqlfluff-3.4.2/test/fixtures/dialects/exasol/Add_Days.yml000066400000000000000000000030511503426445100234530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: be2da0a9d3075d88de3fcd56c77f485b750ab43f32e2f7ea59c8ad6b891e5db4 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: ADD_DAYS function_contents: bracketed: - start_bracket: ( - expression: keyword: DATE date_constructor_literal: "'2000-02-28'" - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) alias_expression: naked_identifier: AD1 - comma: ',' - select_clause_element: function: function_name: function_name_identifier: ADD_DAYS function_contents: bracketed: - start_bracket: ( - expression: keyword: TIMESTAMP date_constructor_literal: "'2001-02-28 12:00:00'" - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) alias_expression: naked_identifier: AD2 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/access_statement.sql000066400000000000000000000022641503426445100253330ustar00rootroot00000000000000-- System privileges GRANT CREATE SCHEMA TO role1; GRANT SELECT ANY TABLE TO user1 WITH ADMIN OPTION; -- Object privileges GRANT INSERT ON my_schema.my_table TO user1, role2; GRANT SELECT ON VIEW my_schema.my_view TO user1; -- Access on my_view for all users GRANT SELECT ON my_schema.my_view TO PUBLIC; -- Roles GRANT role1 TO user1, user2 WITH ADMIN OPTION; GRANT role2 TO role1; -- Impersonation GRANT IMPERSONATION ON user2 TO user1; GRANT IMPERSONATION ON "user2" TO user1; GRANT IMPERSONATION ON user2 TO "user1"; -- Connection GRANT CONNECTION my_connection TO user1; GRANT CONNECTION my_connection TO "ADMIN"; -- Access to connection details for certain script GRANT ACCESS ON CONNECTION my_connection FOR SCRIPT script1 TO user1; GRANT ACCESS ON CONNECTION "my_connection" FOR SCRIPT "script1" TO "user1"; REVOKE CREATE SCHEMA FROM role1,user3; -- Object privileges REVOKE SELECT, INSERT ON my_schema.my_table FROM user1, role2; REVOKE ALL PRIVILEGES ON VIEW my_schema.my_view FROM PUBLIC; -- Role REVOKE role1 FROM user1, user2; -- Impersonation REVOKE IMPERSONATION ON user2 FROM user1; -- Connections REVOKE CONNECTION my_connection FROM user1; REVOKE CONNECTION my_connection FROM "ADMIN"; sqlfluff-3.4.2/test/fixtures/dialects/exasol/access_statement.yml000066400000000000000000000155441503426445100253420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2bb545f4adeb85060bff12a0f95565113146d63e1b8dcd7f456fe621a241f985 file: - statement: access_statement: keyword: GRANT grant_revoke_system_privileges: system_privilege: - keyword: CREATE - keyword: SCHEMA keyword: TO naked_identifier: role1 - statement_terminator: ; - statement: access_statement: keyword: GRANT grant_revoke_system_privileges: - system_privilege: - keyword: SELECT - keyword: ANY - keyword: TABLE - keyword: TO - naked_identifier: user1 - keyword: WITH - keyword: ADMIN - keyword: OPTION - statement_terminator: ; - statement: access_statement: keyword: GRANT grant_revoke_object_privileges: - object_privilege: keyword: INSERT - keyword: 'ON' - object_reference: - naked_identifier: my_schema - dot: . - naked_identifier: my_table - keyword: TO - naked_identifier: user1 - comma: ',' - naked_identifier: role2 - statement_terminator: ; - statement: access_statement: keyword: GRANT grant_revoke_object_privileges: - object_privilege: keyword: SELECT - keyword: 'ON' - keyword: VIEW - object_reference: - naked_identifier: my_schema - dot: . - naked_identifier: my_view - keyword: TO - naked_identifier: user1 - statement_terminator: ; - statement: access_statement: keyword: GRANT grant_revoke_object_privileges: - object_privilege: keyword: SELECT - keyword: 'ON' - object_reference: - naked_identifier: my_schema - dot: . - naked_identifier: my_view - keyword: TO - naked_identifier: PUBLIC - statement_terminator: ; - statement: access_statement: keyword: GRANT grant_revoke_roles: - role_reference: naked_identifier: role1 - keyword: TO - role_reference: naked_identifier: user1 - comma: ',' - role_reference: naked_identifier: user2 - keyword: WITH - keyword: ADMIN - keyword: OPTION - statement_terminator: ; - statement: access_statement: keyword: GRANT grant_revoke_roles: - role_reference: naked_identifier: role2 - keyword: TO - role_reference: naked_identifier: role1 - statement_terminator: ; - statement: access_statement: keyword: GRANT grant_revoke_impersonation: - keyword: IMPERSONATION - keyword: 'ON' - naked_identifier: user2 - keyword: TO - naked_identifier: user1 - statement_terminator: ; - statement: access_statement: keyword: GRANT grant_revoke_impersonation: - keyword: IMPERSONATION - keyword: 'ON' - quoted_identifier: '"user2"' - keyword: TO - naked_identifier: user1 - statement_terminator: ; - statement: access_statement: keyword: GRANT grant_revoke_impersonation: - keyword: IMPERSONATION - keyword: 'ON' - naked_identifier: user2 - keyword: TO - quoted_identifier: '"user1"' - statement_terminator: ; - statement: access_statement: keyword: GRANT grant_revoke_connection: - keyword: CONNECTION - naked_identifier: my_connection - keyword: TO - naked_identifier: user1 - statement_terminator: ; - statement: access_statement: keyword: GRANT grant_revoke_connection: - keyword: CONNECTION - naked_identifier: my_connection - keyword: TO - quoted_identifier: '"ADMIN"' - statement_terminator: ; - statement: access_statement: keyword: GRANT grant_revoke_connection_restricted: - keyword: ACCESS - keyword: 'ON' - keyword: CONNECTION - naked_identifier: my_connection - keyword: FOR - keyword: SCRIPT - naked_identifier: script1 - keyword: TO - naked_identifier: user1 - statement_terminator: ; - statement: access_statement: keyword: GRANT grant_revoke_connection_restricted: - keyword: ACCESS - keyword: 'ON' - keyword: CONNECTION - quoted_identifier: '"my_connection"' - keyword: FOR - keyword: SCRIPT - quoted_identifier: '"script1"' - keyword: TO - quoted_identifier: '"user1"' - statement_terminator: ; - statement: access_statement: keyword: REVOKE grant_revoke_system_privileges: - system_privilege: - keyword: CREATE - keyword: SCHEMA - keyword: FROM - naked_identifier: role1 - comma: ',' - naked_identifier: user3 - statement_terminator: ; - statement: access_statement: keyword: REVOKE grant_revoke_object_privileges: - object_privilege: keyword: SELECT - comma: ',' - object_privilege: keyword: INSERT - keyword: 'ON' - object_reference: - naked_identifier: my_schema - dot: . - naked_identifier: my_table - keyword: FROM - naked_identifier: user1 - comma: ',' - naked_identifier: role2 - statement_terminator: ; - statement: access_statement: keyword: REVOKE grant_revoke_object_privileges: - keyword: ALL - keyword: PRIVILEGES - keyword: 'ON' - keyword: VIEW - object_reference: - naked_identifier: my_schema - dot: . - naked_identifier: my_view - keyword: FROM - naked_identifier: PUBLIC - statement_terminator: ; - statement: access_statement: keyword: REVOKE grant_revoke_roles: - role_reference: naked_identifier: role1 - keyword: FROM - role_reference: naked_identifier: user1 - comma: ',' - role_reference: naked_identifier: user2 - statement_terminator: ; - statement: access_statement: keyword: REVOKE grant_revoke_impersonation: - keyword: IMPERSONATION - keyword: 'ON' - naked_identifier: user2 - keyword: FROM - naked_identifier: user1 - statement_terminator: ; - statement: access_statement: keyword: REVOKE grant_revoke_connection: - keyword: CONNECTION - naked_identifier: my_connection - keyword: FROM - naked_identifier: user1 - statement_terminator: ; - statement: access_statement: keyword: REVOKE grant_revoke_connection: - keyword: CONNECTION - naked_identifier: my_connection - keyword: FROM - quoted_identifier: '"ADMIN"' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/alter_connection.sql000066400000000000000000000000741503426445100253310ustar00rootroot00000000000000ALTER CONNECTION exa_connection TO '192.168.6.11..14:8564'; sqlfluff-3.4.2/test/fixtures/dialects/exasol/alter_connection.yml000066400000000000000000000011761503426445100253370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ade783fe9584cfecc1b51d72ef08f456870308182c8642439dac308ff67d7bef file: statement: alter_connection: - keyword: ALTER - keyword: CONNECTION - naked_identifier: exa_connection - keyword: TO - connection_definition: quoted_literal: "'192.168.6.11..14:8564'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/alter_consumer_group.sql000066400000000000000000000003641503426445100262430ustar00rootroot00000000000000ALTER CONSUMER GROUP TEST_TEAM SET PRECEDENCE = '800', CPU_WEIGHT = '150', GROUP_TEMP_DB_RAM_LIMIT = '10G', SESSION_TEMP_DB_RAM_LIMIT = '5G', QUERY_TIMEOUT = 60, IDLE_TIMEOUT = 3600; ALTER CONSUMER GROUP "ADMIN" SET PRECEDENCE = '800'; sqlfluff-3.4.2/test/fixtures/dialects/exasol/alter_consumer_group.yml000066400000000000000000000037421503426445100262500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7b7717c26fca6eb0d644963773b5281d3d356b649bd48dd0110c107999ef6450 file: - statement: alter_consumer_group_statement: - keyword: ALTER - keyword: CONSUMER - keyword: GROUP - naked_identifier: TEST_TEAM - keyword: SET - consumer_group_parameter: keyword: PRECEDENCE comparison_operator: raw_comparison_operator: '=' quoted_literal: "'800'" - comma: ',' - consumer_group_parameter: keyword: CPU_WEIGHT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'150'" - comma: ',' - consumer_group_parameter: keyword: GROUP_TEMP_DB_RAM_LIMIT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'10G'" - comma: ',' - consumer_group_parameter: keyword: SESSION_TEMP_DB_RAM_LIMIT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'5G'" - comma: ',' - consumer_group_parameter: keyword: QUERY_TIMEOUT comparison_operator: raw_comparison_operator: '=' numeric_literal: '60' - comma: ',' - consumer_group_parameter: keyword: IDLE_TIMEOUT comparison_operator: raw_comparison_operator: '=' numeric_literal: '3600' - statement_terminator: ; - statement: alter_consumer_group_statement: - keyword: ALTER - keyword: CONSUMER - keyword: GROUP - quoted_identifier: '"ADMIN"' - keyword: SET - consumer_group_parameter: keyword: PRECEDENCE comparison_operator: raw_comparison_operator: '=' quoted_literal: "'800'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/alter_role.sql000066400000000000000000000002061503426445100241300ustar00rootroot00000000000000ALTER ROLE role1 SET CONSUMER_GROUP = CEO; ALTER ROLE role2 SET CONSUMER_GROUP = NULL; ALTER ROLE "TABLE" SET CONSUMER_GROUP = "DaY"; sqlfluff-3.4.2/test/fixtures/dialects/exasol/alter_role.yml000066400000000000000000000024251503426445100241370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b81ce1404060a5c313267addf68a77067bb678e07eef7b7b095baf9cab8a5d97 file: - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: role1 - keyword: SET - keyword: CONSUMER_GROUP - comparison_operator: raw_comparison_operator: '=' - naked_identifier: CEO - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: role2 - keyword: SET - keyword: CONSUMER_GROUP - comparison_operator: raw_comparison_operator: '=' - keyword: 'NULL' - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: quoted_identifier: '"TABLE"' - keyword: SET - keyword: CONSUMER_GROUP - comparison_operator: raw_comparison_operator: '=' - quoted_identifier: '"DaY"' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/alter_schema_statement.sql000066400000000000000000000002421503426445100265130ustar00rootroot00000000000000ALTER SCHEMA s1 CHANGE OWNER user1; ALTER SCHEMA s1 CHANGE OWNER role1; ALTER SCHEMA s1 SET RAW_SIZE_LIMIT = 128*1024*1024; ALTER SCHEMA s1 CHANGE OWNER "role1"; sqlfluff-3.4.2/test/fixtures/dialects/exasol/alter_schema_statement.yml000066400000000000000000000027401503426445100265220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6caef570ecdf6ec635d22eccd23434c239efca88596bf79cd99ab8f7db1be3ba file: - statement: alter_schema_statement: - keyword: ALTER - keyword: SCHEMA - schema_reference: naked_identifier: s1 - keyword: CHANGE - keyword: OWNER - naked_identifier: user1 - statement_terminator: ; - statement: alter_schema_statement: - keyword: ALTER - keyword: SCHEMA - schema_reference: naked_identifier: s1 - keyword: CHANGE - keyword: OWNER - naked_identifier: role1 - statement_terminator: ; - statement: alter_schema_statement: - keyword: ALTER - keyword: SCHEMA - schema_reference: naked_identifier: s1 - keyword: SET - keyword: RAW_SIZE_LIMIT - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '128' - star: '*' - numeric_literal: '1024' - star: '*' - numeric_literal: '1024' - statement_terminator: ; - statement: alter_schema_statement: - keyword: ALTER - keyword: SCHEMA - schema_reference: naked_identifier: s1 - keyword: CHANGE - keyword: OWNER - quoted_identifier: '"role1"' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/alter_session.sql000066400000000000000000000004731503426445100246600ustar00rootroot00000000000000ALTER SESSION SET TIME_ZONE='EUROPE/BERLIN'; ALTER SESSION SET QUERY_TIMEOUT=120; ALTER SESSION SET NLS_DATE_FORMAT='DDD-YYYY'; ALTER SESSION SET SESSION_TEMP_DB_RAM_LIMIT = '10240M'; ALTER SESSION SET SNAPSHOT_MODE = 'OFF'; ALTER SESSION SET SNAPSHOT_MODE = 'SYSTEM TABLES'; ALTER SESSION SET IDLE_TIMEOUT = 2400; sqlfluff-3.4.2/test/fixtures/dialects/exasol/alter_session.yml000066400000000000000000000043461503426445100246650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 56d0d234590e9523861fd56dd27319d6e9134abf264d5d4777d74e84bc0d0b86 file: - statement: alter_session_statement: - keyword: ALTER - keyword: SESSION - keyword: SET - session_parameter: TIME_ZONE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'EUROPE/BERLIN'" - statement_terminator: ; - statement: alter_session_statement: - keyword: ALTER - keyword: SESSION - keyword: SET - session_parameter: QUERY_TIMEOUT - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '120' - statement_terminator: ; - statement: alter_session_statement: - keyword: ALTER - keyword: SESSION - keyword: SET - session_parameter: NLS_DATE_FORMAT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'DDD-YYYY'" - statement_terminator: ; - statement: alter_session_statement: - keyword: ALTER - keyword: SESSION - keyword: SET - session_parameter: SESSION_TEMP_DB_RAM_LIMIT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'10240M'" - statement_terminator: ; - statement: alter_session_statement: - keyword: ALTER - keyword: SESSION - keyword: SET - session_parameter: SNAPSHOT_MODE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'OFF'" - statement_terminator: ; - statement: alter_session_statement: - keyword: ALTER - keyword: SESSION - keyword: SET - session_parameter: SNAPSHOT_MODE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'SYSTEM TABLES'" - statement_terminator: ; - statement: alter_session_statement: - keyword: ALTER - keyword: SESSION - keyword: SET - session_parameter: IDLE_TIMEOUT - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '2400' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/alter_system.sql000066400000000000000000000003351503426445100245160ustar00rootroot00000000000000ALTER SYSTEM SET NLS_DATE_LANGUAGE='DEU'; ALTER SYSTEM SET NLS_FIRST_DAY_OF_WEEK=1; ALTER SYSTEM SET SNAPSHOT_MODE = 'SYSTEM TABLES'; ALTER SYSTEM SET IDLE_TIMEOUT = 3600; ALTER SYSTEM SET USER_TEMP_DB_RAM_LIMIT = '50G'; sqlfluff-3.4.2/test/fixtures/dialects/exasol/alter_system.yml000066400000000000000000000032761503426445100245270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7f31367ccb33b130784b4d551af5b1171b57321497dff55de71bae9d8ac0cc9d file: - statement: alter_system_statement: - keyword: ALTER - keyword: SYSTEM - keyword: SET - system_parameter: NLS_DATE_LANGUAGE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'DEU'" - statement_terminator: ; - statement: alter_system_statement: - keyword: ALTER - keyword: SYSTEM - keyword: SET - system_parameter: NLS_FIRST_DAY_OF_WEEK - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - statement_terminator: ; - statement: alter_system_statement: - keyword: ALTER - keyword: SYSTEM - keyword: SET - system_parameter: SNAPSHOT_MODE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'SYSTEM TABLES'" - statement_terminator: ; - statement: alter_system_statement: - keyword: ALTER - keyword: SYSTEM - keyword: SET - system_parameter: IDLE_TIMEOUT - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '3600' - statement_terminator: ; - statement: alter_system_statement: - keyword: ALTER - keyword: SYSTEM - keyword: SET - system_parameter: USER_TEMP_DB_RAM_LIMIT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'50G'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/alter_table_column.sql000066400000000000000000000010631503426445100256350ustar00rootroot00000000000000ALTER TABLE t ADD COLUMN IF NOT EXISTS new_dec DECIMAL(18,0); ALTER TABLE t ADD (new_char CHAR(10) DEFAULT 'some text'); ALTER TABLE myschema.t DROP COLUMN i; ALTER TABLE t DROP j; ALTER TABLE t MODIFY (i DECIMAL(10,2)); ALTER TABLE t MODIFY (j VARCHAR(5) DEFAULT 'text'); ALTER TABLE t MODIFY k INTEGER IDENTITY(1000); ALTER TABLE t RENAME COLUMN i TO j; ALTER TABLE t ALTER COLUMN v SET DEFAULT CURRENT_USER; ALTER TABLE "SCHEMA"."TABLE" ALTER COLUMN v DROP DEFAULT; ALTER TABLE t ALTER COLUMN id SET IDENTITY 1000; ALTER TABLE t ALTER COLUMN id DROP IDENTITY; sqlfluff-3.4.2/test/fixtures/dialects/exasol/alter_table_column.yml000066400000000000000000000146121503426445100256430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8680e95465f367bf955e272bb11d300b23c79d34b289c34a3425bfe2ddb1a023 file: - statement: alter_table_statement: alter_table_column_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t - alter_table_add_column: - keyword: ADD - keyword: COLUMN - keyword: IF - keyword: NOT - keyword: EXISTS - column_definition: column_datatype_definition: naked_identifier: new_dec data_type: keyword: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '18' - comma: ',' - numeric_literal: '0' - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: alter_table_column_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t - alter_table_add_column: keyword: ADD bracketed: start_bracket: ( column_definition: column_datatype_definition: naked_identifier: new_char data_type: keyword: CHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) column_constraint_segment: keyword: DEFAULT quoted_literal: "'some text'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: alter_table_column_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: myschema - dot: . - naked_identifier: t - alter_table_drop_column: - keyword: DROP - keyword: COLUMN - naked_identifier: i - statement_terminator: ; - statement: alter_table_statement: alter_table_column_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t - alter_table_drop_column: keyword: DROP naked_identifier: j - statement_terminator: ; - statement: alter_table_statement: alter_table_column_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t - alter_table_modify_column: keyword: MODIFY bracketed: start_bracket: ( naked_identifier: i data_type: keyword: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '10' - comma: ',' - numeric_literal: '2' - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: alter_table_column_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t - alter_table_modify_column: keyword: MODIFY bracketed: start_bracket: ( naked_identifier: j data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) column_constraint_segment: keyword: DEFAULT quoted_literal: "'text'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: alter_table_column_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t - alter_table_modify_column: keyword: MODIFY naked_identifier: k data_type: keyword: INTEGER column_constraint_segment: keyword: IDENTITY bracketed: start_bracket: ( numeric_literal: '1000' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: alter_table_column_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t - alter_table_rename_column: - keyword: RENAME - keyword: COLUMN - naked_identifier: i - keyword: TO - naked_identifier: j - statement_terminator: ; - statement: alter_table_statement: alter_table_column_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t - alter_table_alter_column: - keyword: ALTER - keyword: COLUMN - naked_identifier: v - keyword: SET - keyword: DEFAULT - bare_function: CURRENT_USER - statement_terminator: ; - statement: alter_table_statement: alter_table_column_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '"SCHEMA"' - dot: . - quoted_identifier: '"TABLE"' - alter_table_alter_column: - keyword: ALTER - keyword: COLUMN - naked_identifier: v - keyword: DROP - keyword: DEFAULT - statement_terminator: ; - statement: alter_table_statement: alter_table_column_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t - alter_table_alter_column: - keyword: ALTER - keyword: COLUMN - naked_identifier: id - keyword: SET - keyword: IDENTITY - numeric_literal: '1000' - statement_terminator: ; - statement: alter_table_statement: alter_table_column_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t - alter_table_alter_column: - keyword: ALTER - keyword: COLUMN - naked_identifier: id - keyword: DROP - keyword: IDENTITY - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/alter_table_constraint.sql000066400000000000000000000005221503426445100265230ustar00rootroot00000000000000ALTER TABLE t1 ADD CONSTRAINT my_primary_key PRIMARY KEY (a); ALTER TABLE t2 ADD CONSTRAINT my_foreign_key FOREIGN KEY (x) REFERENCES t1; ALTER TABLE t2 MODIFY CONSTRAINT my_foreign_key DISABLE; ALTER TABLE t2 RENAME CONSTRAINT my_foreign_key TO my_fk; ALTER TABLE t2 DROP CONSTRAINT my_fk; ALTER TABLE t1 DROP CONSTRAINT IF EXISTS PK_X; sqlfluff-3.4.2/test/fixtures/dialects/exasol/alter_table_constraint.yml000066400000000000000000000054361503426445100265360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cb4515d6700cb2e56cf988b801092be38560e0ea53536094c561ecf5d0b68d98 file: - statement: alter_table_statement: alter_table_constraint_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: ADD - table_constraint_definition: - keyword: CONSTRAINT - naked_identifier: my_primary_key - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: a end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: alter_table_constraint_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t2 - keyword: ADD - table_constraint_definition: - keyword: CONSTRAINT - naked_identifier: my_foreign_key - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: x end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: t1 - statement_terminator: ; - statement: alter_table_statement: alter_table_constraint_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t2 - keyword: MODIFY - keyword: CONSTRAINT - naked_identifier: my_foreign_key - keyword: DISABLE - statement_terminator: ; - statement: alter_table_statement: alter_table_constraint_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t2 - keyword: RENAME - keyword: CONSTRAINT - naked_identifier: my_foreign_key - keyword: TO - naked_identifier: my_fk - statement_terminator: ; - statement: alter_table_statement: alter_table_constraint_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t2 - keyword: DROP - keyword: CONSTRAINT - naked_identifier: my_fk - statement_terminator: ; - statement: alter_table_statement: alter_table_constraint_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: DROP - keyword: CONSTRAINT - keyword: IF - keyword: EXISTS - naked_identifier: PK_X - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/alter_table_distribute_partition.sql000066400000000000000000000005421503426445100306100ustar00rootroot00000000000000ALTER TABLE my_table DROP DISTRIBUTION KEYS; ALTER TABLE my_table DROP DISTRIBUTION AND PARTITION KEYS; ALTER TABLE my_table DISTRIBUTE BY shop_id, PARTITION BY order_date; ALTER TABLE my_table PARTITION BY order_date, DISTRIBUTE BY shop_id, branch_no; ALTER TABLE my_table PARTITION BY order_date; ALTER TABLE my_table DISTRIBUTE BY shop_id, branch_no; sqlfluff-3.4.2/test/fixtures/dialects/exasol/alter_table_distribute_partition.yml000066400000000000000000000057501503426445100306200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 70378d3650e57431fc47fb48dc6e68eae04241f3a5e101bc929d22134e967574 file: - statement: alter_table_statement: alter_table_distribute_partition_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - keyword: DROP - keyword: DISTRIBUTION - keyword: KEYS - statement_terminator: ; - statement: alter_table_statement: alter_table_distribute_partition_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - keyword: DROP - keyword: DISTRIBUTION - keyword: AND - keyword: PARTITION - keyword: KEYS - statement_terminator: ; - statement: alter_table_statement: alter_table_distribute_partition_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - table_distribution_partition_clause: - keyword: DISTRIBUTE - keyword: BY - column_reference: naked_identifier: shop_id - comma: ',' - keyword: PARTITION - keyword: BY - column_reference: naked_identifier: order_date - statement_terminator: ; - statement: alter_table_statement: alter_table_distribute_partition_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - table_distribution_partition_clause: - keyword: PARTITION - keyword: BY - column_reference: naked_identifier: order_date - comma: ',' - keyword: DISTRIBUTE - keyword: BY - column_reference: naked_identifier: shop_id - comma: ',' - column_reference: naked_identifier: branch_no - statement_terminator: ; - statement: alter_table_statement: alter_table_distribute_partition_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - table_distribution_partition_clause: - keyword: PARTITION - keyword: BY - column_reference: naked_identifier: order_date - statement_terminator: ; - statement: alter_table_statement: alter_table_distribute_partition_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - table_distribution_partition_clause: - keyword: DISTRIBUTE - keyword: BY - column_reference: naked_identifier: shop_id - comma: ',' - column_reference: naked_identifier: branch_no - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/alter_user.sql000066400000000000000000000012021503426445100241420ustar00rootroot00000000000000ALTER USER user_1 IDENTIFIED BY "h22_xhz" REPLACE "h12_xhz"; ALTER USER user_1 IDENTIFIED BY "h12_xhz"; ALTER USER user_2 IDENTIFIED AT LDAP AS 'cn=user_2,dc=authorization,dc=exasol,dc=com'; ALTER USER user_3 PASSWORD_EXPIRY_POLICY = '42 days'; ALTER USER user_4 PASSWORD EXPIRE; ALTER USER user_5 RESET FAILED LOGIN ATTEMPTS; ALTER USER userx SET CONSUMER_GROUP = CEO; ALTER USER userx SET CONSUMER_GROUP = NULL; ALTER USER "ADMIN" SET CONSUMER_GROUP = "TABLE"; ALTER USER [admin] SET CONSUMER_GROUP = "DAY"; ALTER USER "ADMIN" SET CONSUMER_GROUP = [day]; ALTER USER oidctestuser IDENTIFIED BY OPENID SUBJECT 'database-user@exasol.example'; sqlfluff-3.4.2/test/fixtures/dialects/exasol/alter_user.yml000066400000000000000000000075331503426445100241610ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ed080507b847481a7cd266cc2a80cfdc1266d4f83114102d536424d831bca17a file: - statement: alter_user_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: user_1 - keyword: IDENTIFIED - password_auth: keyword: BY password_literal: '"h22_xhz"' - keyword: REPLACE - password_literal: '"h12_xhz"' - statement_terminator: ; - statement: alter_user_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: user_1 - keyword: IDENTIFIED - password_auth: keyword: BY password_literal: '"h12_xhz"' - statement_terminator: ; - statement: alter_user_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: user_2 - keyword: IDENTIFIED - ldap_auth: - keyword: AT - keyword: LDAP - keyword: AS - quoted_literal: "'cn=user_2,dc=authorization,dc=exasol,dc=com'" - statement_terminator: ; - statement: alter_user_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: user_3 - keyword: PASSWORD_EXPIRY_POLICY - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'42 days'" - statement_terminator: ; - statement: alter_user_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: user_4 - keyword: PASSWORD - keyword: EXPIRE - statement_terminator: ; - statement: alter_user_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: user_5 - keyword: RESET - keyword: FAILED - keyword: LOGIN - keyword: ATTEMPTS - statement_terminator: ; - statement: alter_user_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: userx - keyword: SET - keyword: CONSUMER_GROUP - comparison_operator: raw_comparison_operator: '=' - naked_identifier: CEO - statement_terminator: ; - statement: alter_user_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: userx - keyword: SET - keyword: CONSUMER_GROUP - comparison_operator: raw_comparison_operator: '=' - keyword: 'NULL' - statement_terminator: ; - statement: alter_user_statement: - keyword: ALTER - keyword: USER - role_reference: quoted_identifier: '"ADMIN"' - keyword: SET - keyword: CONSUMER_GROUP - comparison_operator: raw_comparison_operator: '=' - quoted_identifier: '"TABLE"' - statement_terminator: ; - statement: alter_user_statement: - keyword: ALTER - keyword: USER - role_reference: identifier: '[admin]' - keyword: SET - keyword: CONSUMER_GROUP - comparison_operator: raw_comparison_operator: '=' - quoted_identifier: '"DAY"' - statement_terminator: ; - statement: alter_user_statement: - keyword: ALTER - keyword: USER - role_reference: quoted_identifier: '"ADMIN"' - keyword: SET - keyword: CONSUMER_GROUP - comparison_operator: raw_comparison_operator: '=' - identifier: '[day]' - statement_terminator: ; - statement: alter_user_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: oidctestuser - keyword: IDENTIFIED - openid_auth: - keyword: BY - keyword: OPENID - keyword: SUBJECT - quoted_literal: "'database-user@exasol.example'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/alter_virtual_schema_statement.sql000066400000000000000000000003271503426445100302650ustar00rootroot00000000000000ALTER VIRTUAL SCHEMA s2 SET CONNECTION_STRING = 'jdbc:hive2://localhost:10000/default'; ALTER VIRTUAL SCHEMA s2 REFRESH; ALTER VIRTUAL SCHEMA s1 CHANGE OWNER "role1"; ALTER VIRTUAL SCHEMA s1 CHANGE OWNER role1; sqlfluff-3.4.2/test/fixtures/dialects/exasol/alter_virtual_schema_statement.yml000066400000000000000000000030261503426445100302660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a8e9b562d1996955899207b557976b85d1f4d75781bebcf53666f31c9fad4269 file: - statement: alter_virtual_schema_statement: - keyword: ALTER - keyword: VIRTUAL - keyword: SCHEMA - schema_reference: naked_identifier: s2 - keyword: SET - column_reference: naked_identifier: CONNECTION_STRING - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'jdbc:hive2://localhost:10000/default'" - statement_terminator: ; - statement: alter_virtual_schema_statement: - keyword: ALTER - keyword: VIRTUAL - keyword: SCHEMA - schema_reference: naked_identifier: s2 - keyword: REFRESH - statement_terminator: ; - statement: alter_virtual_schema_statement: - keyword: ALTER - keyword: VIRTUAL - keyword: SCHEMA - schema_reference: naked_identifier: s1 - keyword: CHANGE - keyword: OWNER - quoted_identifier: '"role1"' - statement_terminator: ; - statement: alter_virtual_schema_statement: - keyword: ALTER - keyword: VIRTUAL - keyword: SCHEMA - schema_reference: naked_identifier: s1 - keyword: CHANGE - keyword: OWNER - naked_identifier: role1 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/comment_statement.sql000066400000000000000000000004071503426445100255310ustar00rootroot00000000000000COMMENT ON SCHEMA s1 IS 'My first schema'; COMMENT ON TABLE a_schema.t1 IS 'My first table'; COMMENT ON t1 (id IS 'Identity column', zip IS 'Zip code'); COMMENT ON SCRIPT script1 IS 'My first script'; COMMENT ON CONSUMER GROUP admin_group IS 'VERY important!!!'; sqlfluff-3.4.2/test/fixtures/dialects/exasol/comment_statement.yml000066400000000000000000000035051503426445100255350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d9224ab0e91c1e7b176afa31c09b18fc81334533b189f4548a7a930065f2510d file: - statement: comment_statement: - keyword: COMMENT - keyword: 'ON' - keyword: SCHEMA - object_reference: naked_identifier: s1 - keyword: IS - quoted_literal: "'My first schema'" - statement_terminator: ; - statement: comment_statement: - keyword: COMMENT - keyword: 'ON' - keyword: TABLE - table_reference: - naked_identifier: a_schema - dot: . - naked_identifier: t1 - keyword: IS - quoted_literal: "'My first table'" - statement_terminator: ; - statement: comment_statement: - keyword: COMMENT - keyword: 'ON' - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - naked_identifier: id - keyword: IS - quoted_literal: "'Identity column'" - comma: ',' - naked_identifier: zip - keyword: IS - quoted_literal: "'Zip code'" - end_bracket: ) - statement_terminator: ; - statement: comment_statement: - keyword: COMMENT - keyword: 'ON' - keyword: SCRIPT - object_reference: naked_identifier: script1 - keyword: IS - quoted_literal: "'My first script'" - statement_terminator: ; - statement: comment_statement: - keyword: COMMENT - keyword: 'ON' - keyword: CONSUMER - keyword: GROUP - object_reference: naked_identifier: admin_group - keyword: IS - quoted_literal: "'VERY important!!!'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_adapter_script_statement.sql000066400000000000000000000004421503426445100304150ustar00rootroot00000000000000CREATE JAVA ADAPTER SCRIPT my_script AS %jar hive_jdbc_adapter.jar; / CREATE OR REPLACE PYTHON ADAPTER SCRIPT test.adapter_dummy AS def adapter_call(in_json): return "BLABLA" / CREATE OR REPLACE LUA ADAPTER SCRIPT test.adapter_dummy AS function adapter_call(in_json): return 'BLABLA' / sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_adapter_script_statement.yml000066400000000000000000000035661503426445100304310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2876e9d4310906922147c390e5da057f0abdc05750e58d23dea16bdd7a91f99b file: - statement: create_adapter_script: - keyword: CREATE - keyword: JAVA - keyword: ADAPTER - keyword: SCRIPT - script_reference: naked_identifier: my_script - keyword: AS - script_content: - percent: '%' - word: jar - word: hive_jdbc_adapter - dot: . - word: jar - semicolon: ; - function_script_terminator: / - statement: create_adapter_script: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PYTHON - keyword: ADAPTER - keyword: SCRIPT - script_reference: - naked_identifier: test - dot: . - naked_identifier: adapter_dummy - keyword: AS - script_content: - word: def - word: adapter_call - bracketed: start_bracket: ( word: in_json end_bracket: ) - colon: ':' - word: return - double_quote: '"BLABLA"' - function_script_terminator: / - statement: create_adapter_script: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: LUA - keyword: ADAPTER - keyword: SCRIPT - script_reference: - naked_identifier: test - dot: . - naked_identifier: adapter_dummy - keyword: AS - script_content: - word: function - word: adapter_call - bracketed: start_bracket: ( word: in_json end_bracket: ) - colon: ':' - word: return - single_quote: "'BLABLA'" - function_script_terminator: / sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_connection.sql000066400000000000000000000010571503426445100254670ustar00rootroot00000000000000CREATE CONNECTION ftp_connection TO 'ftp://192.168.1.1/' USER 'agent_007' IDENTIFIED BY 'secret'; ---- CREATE CONNECTION exa_connection TO '192.168.6.11..14:8563'; ---- CREATE CONNECTION ora_connection TO '(DESCRIPTION = (ADDRESS = (PROTOCOL = TCP)(HOST = 192.168.6.54)(PORT = 1521)) (CONNECT_DATA = (SERVER = DEDICATED)(SERVICE_NAME = orcl)))'; ---- CREATE CONNECTION jdbc_connection_1 TO 'jdbc:mysql://192.168.6.1/my_db'; ---- CREATE CONNECTION jdbc_connection_2 TO 'jdbc:postgresql://192.168.6.2:5432/my_db?stringtype=unspecified'; sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_connection.yml000066400000000000000000000036501503426445100254720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 47ec33db1ebe241612043c1c9f8f787697c7fb28196577520d9087bc9e47b936 file: - statement: create_connection: - keyword: CREATE - keyword: CONNECTION - naked_identifier: ftp_connection - keyword: TO - connection_definition: - quoted_literal: "'ftp://192.168.1.1/'" - keyword: USER - quoted_literal: "'agent_007'" - keyword: IDENTIFIED - keyword: BY - quoted_literal: "'secret'" - statement_terminator: ; - statement: create_connection: - keyword: CREATE - keyword: CONNECTION - naked_identifier: exa_connection - keyword: TO - connection_definition: quoted_literal: "'192.168.6.11..14:8563'" - statement_terminator: ; - statement: create_connection: - keyword: CREATE - keyword: CONNECTION - naked_identifier: ora_connection - keyword: TO - connection_definition: quoted_literal: "'(DESCRIPTION =\n (ADDRESS = (PROTOCOL = TCP)(HOST = 192.168.6.54)(PORT\ \ = 1521))\n (CONNECT_DATA = (SERVER = DEDICATED)(SERVICE_NAME = orcl)))'" - statement_terminator: ; - statement: create_connection: - keyword: CREATE - keyword: CONNECTION - naked_identifier: jdbc_connection_1 - keyword: TO - connection_definition: quoted_literal: "'jdbc:mysql://192.168.6.1/my_db'" - statement_terminator: ; - statement: create_connection: - keyword: CREATE - keyword: CONNECTION - naked_identifier: jdbc_connection_2 - keyword: TO - connection_definition: quoted_literal: "'jdbc:postgresql://192.168.6.2:5432/my_db?stringtype=unspecified'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_consumer_group.sql000066400000000000000000000007541503426445100264020ustar00rootroot00000000000000CREATE CONSUMER GROUP CEO WITH PRECEDENCE = '1000', CPU_WEIGHT = '900'; CREATE CONSUMER GROUP BI_TEAM WITH PRECEDENCE = '900', CPU_WEIGHT = '500', GROUP_TEMP_DB_RAM_LIMIT = '200G', USER_TEMP_DB_RAM_LIMIT = '100G'; CREATE CONSUMER GROUP TEST_TEAM WITH PRECEDENCE = '800', CPU_WEIGHT = '100', GROUP_TEMP_DB_RAM_LIMIT = '10G', SESSION_TEMP_DB_RAM_LIMIT = '5G', QUERY_TIMEOUT = 60, IDLE_TIMEOUT = 3600; CREATE CONSUMER GROUP "ADMIN" WITH PRECEDENCE = '1000', CPU_WEIGHT = '900'; sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_consumer_group.yml000066400000000000000000000070741503426445100264060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b68e18fa0efb06fb9aa80a7508f74a475029217d3fc56c76649cd894b3ee62ab file: - statement: create_consumer_group_statement: - keyword: CREATE - keyword: CONSUMER - keyword: GROUP - naked_identifier: CEO - keyword: WITH - consumer_group_parameter: keyword: PRECEDENCE comparison_operator: raw_comparison_operator: '=' quoted_literal: "'1000'" - comma: ',' - consumer_group_parameter: keyword: CPU_WEIGHT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'900'" - statement_terminator: ; - statement: create_consumer_group_statement: - keyword: CREATE - keyword: CONSUMER - keyword: GROUP - naked_identifier: BI_TEAM - keyword: WITH - consumer_group_parameter: keyword: PRECEDENCE comparison_operator: raw_comparison_operator: '=' quoted_literal: "'900'" - comma: ',' - consumer_group_parameter: keyword: CPU_WEIGHT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'500'" - comma: ',' - consumer_group_parameter: keyword: GROUP_TEMP_DB_RAM_LIMIT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'200G'" - comma: ',' - consumer_group_parameter: keyword: USER_TEMP_DB_RAM_LIMIT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'100G'" - statement_terminator: ; - statement: create_consumer_group_statement: - keyword: CREATE - keyword: CONSUMER - keyword: GROUP - naked_identifier: TEST_TEAM - keyword: WITH - consumer_group_parameter: keyword: PRECEDENCE comparison_operator: raw_comparison_operator: '=' quoted_literal: "'800'" - comma: ',' - consumer_group_parameter: keyword: CPU_WEIGHT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'100'" - comma: ',' - consumer_group_parameter: keyword: GROUP_TEMP_DB_RAM_LIMIT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'10G'" - comma: ',' - consumer_group_parameter: keyword: SESSION_TEMP_DB_RAM_LIMIT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'5G'" - comma: ',' - consumer_group_parameter: keyword: QUERY_TIMEOUT comparison_operator: raw_comparison_operator: '=' numeric_literal: '60' - comma: ',' - consumer_group_parameter: keyword: IDLE_TIMEOUT comparison_operator: raw_comparison_operator: '=' numeric_literal: '3600' - statement_terminator: ; - statement: create_consumer_group_statement: - keyword: CREATE - keyword: CONSUMER - keyword: GROUP - quoted_identifier: '"ADMIN"' - keyword: WITH - consumer_group_parameter: keyword: PRECEDENCE comparison_operator: raw_comparison_operator: '=' quoted_literal: "'1000'" - comma: ',' - consumer_group_parameter: keyword: CPU_WEIGHT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'900'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_function_statement.sql000066400000000000000000000040551503426445100272420ustar00rootroot00000000000000CREATE OR REPLACE FUNCTION percentage ( fraction DECIMAL, entirety DECIMAL) RETURN VARCHAR(10) AS res DECIMAL; BEGIN res := (100*fraction)/entirety; RETURN res || ' %'; END percentage; / ---- CREATE FUNCTION hello () RETURN VARCHAR(10) AS res DECIMAL; BEGIN res := hello.world("no"); RETURN 'HELLO'; END hello; / ---- CREATE FUNCTION case_function () RETURN VARCHAR(10) AS res DECIMAL; BEGIN res := CASE WHEN input_variable < 0 THEN 0 ELSE input_variable END; RETURN res; END case_function; / ---- CREATE FUNCTION assignment_function () RETURN VARCHAR(10) AS res DECIMAL; BEGIN res := 'Hello World'; RETURN res; END assignment_function; / ---- CREATE FUNCTION if_function () RETURN VARCHAR(10) AS res DECIMAL; BEGIN IF input_variable = 0 THEN res := NULL; ELSEIF input_variable = 1 THEN res := 'HELLO'; ELSEIF input_variable = 2 THEN res := 'HALLO'; ELSE res := input_variable; END IF; RETURN res; END if_function; / ---- CREATE FUNCTION for_loop_function () RETURN VARCHAR(10) AS res DECIMAL; BEGIN FOR cnt := 1 TO input_variable DO res := res*2; END FOR; RETURN res; END for_loop_function; / ---- CREATE FUNCTION for_loop_function2 () RETURN VARCHAR(10) AS res DECIMAL; BEGIN FOR cnt IN 1..10 LOOP res := res*2; END LOOP; RETURN res; END for_loop_function2; / ---- CREATE FUNCTION for_loop_function3 () RETURN VARCHAR(10) AS res DECIMAL; BEGIN WHILE cnt <= input_variable DO res := res*2; cnt := cnt+1; END WHILE; RETURN res; END for_loop_function3; / CREATE FUNCTION schem.func ( p1 VARCHAR(6), p2 VARCHAR(10) ) RETURN VARCHAR (20) IS res VARCHAR(20); BEGIN IF p1 IS NOT NULL AND p2 IS NOT NULL THEN IF p1 = 1 THEN res:= 'Hello World'; ELSE IF p2 = 3 THEN res:= 'ABC'; END IF; res:= 'WOHOOOO'; END IF; END IF; RETURN res; END schem.func; / sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_function_statement.yml000066400000000000000000000417501503426445100272470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6862f52d714a58a784a1389f908431c78e94275c89d4ecfbee6eae48da5a0350 file: - statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_reference: naked_identifier: percentage - bracketed: - start_bracket: ( - naked_identifier: fraction - data_type: keyword: DECIMAL - comma: ',' - naked_identifier: entirety - data_type: keyword: DECIMAL - end_bracket: ) - keyword: RETURN - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - keyword: AS - variable: res - data_type: keyword: DECIMAL - statement_terminator: ; - keyword: BEGIN - function_body: function_assignment: variable: res assignment_operator: := expression: bracketed: start_bracket: ( expression: numeric_literal: '100' binary_operator: '*' column_reference: naked_identifier: fraction end_bracket: ) binary_operator: / column_reference: naked_identifier: entirety statement_terminator: ; - keyword: RETURN - expression: column_reference: naked_identifier: res binary_operator: - pipe: '|' - pipe: '|' quoted_literal: "' %'" - statement_terminator: ; - keyword: END - function_reference: naked_identifier: percentage - statement_terminator: ; - function_script_terminator: / - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_reference: naked_identifier: hello - bracketed: start_bracket: ( end_bracket: ) - keyword: RETURN - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - keyword: AS - variable: res - data_type: keyword: DECIMAL - statement_terminator: ; - keyword: BEGIN - function_body: function_assignment: variable: res assignment_operator: := function: function_name: naked_identifier: hello dot: . function_name_identifier: world function_contents: bracketed: start_bracket: ( expression: column_reference: quoted_identifier: '"no"' end_bracket: ) statement_terminator: ; - keyword: RETURN - expression: quoted_literal: "'HELLO'" - statement_terminator: ; - keyword: END - function_reference: naked_identifier: hello - statement_terminator: ; - function_script_terminator: / - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_reference: naked_identifier: case_function - bracketed: start_bracket: ( end_bracket: ) - keyword: RETURN - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - keyword: AS - variable: res - data_type: keyword: DECIMAL - statement_terminator: ; - keyword: BEGIN - function_body: function_assignment: variable: res assignment_operator: := expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: input_variable comparison_operator: raw_comparison_operator: < numeric_literal: '0' - keyword: THEN - expression: numeric_literal: '0' - else_clause: keyword: ELSE expression: column_reference: naked_identifier: input_variable - keyword: END statement_terminator: ; - keyword: RETURN - expression: column_reference: naked_identifier: res - statement_terminator: ; - keyword: END - function_reference: naked_identifier: case_function - statement_terminator: ; - function_script_terminator: / - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_reference: naked_identifier: assignment_function - bracketed: start_bracket: ( end_bracket: ) - keyword: RETURN - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - keyword: AS - variable: res - data_type: keyword: DECIMAL - statement_terminator: ; - keyword: BEGIN - function_body: function_assignment: variable: res assignment_operator: := quoted_literal: "'Hello World'" statement_terminator: ; - keyword: RETURN - expression: column_reference: naked_identifier: res - statement_terminator: ; - keyword: END - function_reference: naked_identifier: assignment_function - statement_terminator: ; - function_script_terminator: / - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_reference: naked_identifier: if_function - bracketed: start_bracket: ( end_bracket: ) - keyword: RETURN - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - keyword: AS - variable: res - data_type: keyword: DECIMAL - statement_terminator: ; - keyword: BEGIN - function_body: function_if_branch: - keyword: IF - expression: column_reference: naked_identifier: input_variable comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' - keyword: THEN - function_body: function_assignment: - variable: res - assignment_operator: := - variable: 'NULL' - statement_terminator: ; - keyword: ELSEIF - expression: column_reference: naked_identifier: input_variable comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - keyword: THEN - function_body: function_assignment: variable: res assignment_operator: := quoted_literal: "'HELLO'" statement_terminator: ; - keyword: ELSEIF - expression: column_reference: naked_identifier: input_variable comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - keyword: THEN - function_body: function_assignment: variable: res assignment_operator: := quoted_literal: "'HALLO'" statement_terminator: ; - keyword: ELSE - function_body: function_assignment: - variable: res - assignment_operator: := - variable: input_variable - statement_terminator: ; - keyword: END - keyword: IF - statement_terminator: ; - keyword: RETURN - expression: column_reference: naked_identifier: res - statement_terminator: ; - keyword: END - function_reference: naked_identifier: if_function - statement_terminator: ; - function_script_terminator: / - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_reference: naked_identifier: for_loop_function - bracketed: start_bracket: ( end_bracket: ) - keyword: RETURN - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - keyword: AS - variable: res - data_type: keyword: DECIMAL - statement_terminator: ; - keyword: BEGIN - function_body: function_for_loop: - keyword: FOR - naked_identifier: cnt - assignment_operator: := - expression: numeric_literal: '1' - keyword: TO - expression: column_reference: naked_identifier: input_variable - keyword: DO - function_body: function_assignment: variable: res assignment_operator: := expression: column_reference: naked_identifier: res binary_operator: '*' numeric_literal: '2' statement_terminator: ; - keyword: END - keyword: FOR - statement_terminator: ; - keyword: RETURN - expression: column_reference: naked_identifier: res - statement_terminator: ; - keyword: END - function_reference: naked_identifier: for_loop_function - statement_terminator: ; - function_script_terminator: / - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_reference: naked_identifier: for_loop_function2 - bracketed: start_bracket: ( end_bracket: ) - keyword: RETURN - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - keyword: AS - variable: res - data_type: keyword: DECIMAL - statement_terminator: ; - keyword: BEGIN - function_body: function_for_loop: - keyword: FOR - naked_identifier: cnt - keyword: IN - expression: numeric_literal: '1' - range_operator: .. - expression: numeric_literal: '10' - keyword: LOOP - function_body: function_assignment: variable: res assignment_operator: := expression: column_reference: naked_identifier: res binary_operator: '*' numeric_literal: '2' statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - keyword: RETURN - expression: column_reference: naked_identifier: res - statement_terminator: ; - keyword: END - function_reference: naked_identifier: for_loop_function2 - statement_terminator: ; - function_script_terminator: / - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_reference: naked_identifier: for_loop_function3 - bracketed: start_bracket: ( end_bracket: ) - keyword: RETURN - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - keyword: AS - variable: res - data_type: keyword: DECIMAL - statement_terminator: ; - keyword: BEGIN - function_body: function_while_loop: - keyword: WHILE - expression: - column_reference: naked_identifier: cnt - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - column_reference: naked_identifier: input_variable - keyword: DO - function_body: function_assignment: variable: res assignment_operator: := expression: column_reference: naked_identifier: res binary_operator: '*' numeric_literal: '2' statement_terminator: ; - function_body: function_assignment: variable: cnt assignment_operator: := expression: column_reference: naked_identifier: cnt binary_operator: + numeric_literal: '1' statement_terminator: ; - keyword: END - keyword: WHILE - statement_terminator: ; - keyword: RETURN - expression: column_reference: naked_identifier: res - statement_terminator: ; - keyword: END - function_reference: naked_identifier: for_loop_function3 - statement_terminator: ; - function_script_terminator: / - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_reference: - naked_identifier: schem - dot: . - naked_identifier: func - bracketed: - start_bracket: ( - naked_identifier: p1 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) - comma: ',' - naked_identifier: p2 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - end_bracket: ) - keyword: RETURN - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '20' end_bracket: ) - keyword: IS - variable: res - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '20' end_bracket: ) - statement_terminator: ; - keyword: BEGIN - function_body: function_if_branch: - keyword: IF - expression: - column_reference: naked_identifier: p1 - keyword: IS - keyword: NOT - null_literal: 'NULL' - binary_operator: AND - column_reference: naked_identifier: p2 - keyword: IS - keyword: NOT - null_literal: 'NULL' - keyword: THEN - function_body: function_if_branch: - keyword: IF - expression: column_reference: naked_identifier: p1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - keyword: THEN - function_body: function_assignment: variable: res assignment_operator: := quoted_literal: "'Hello World'" statement_terminator: ; - keyword: ELSE - function_body: function_if_branch: - keyword: IF - expression: column_reference: naked_identifier: p2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '3' - keyword: THEN - function_body: function_assignment: variable: res assignment_operator: := quoted_literal: "'ABC'" statement_terminator: ; - keyword: END - keyword: IF - statement_terminator: ; - function_body: function_assignment: variable: res assignment_operator: := quoted_literal: "'WOHOOOO'" statement_terminator: ; - keyword: END - keyword: IF - statement_terminator: ; - keyword: END - keyword: IF - statement_terminator: ; - keyword: RETURN - expression: column_reference: naked_identifier: res - statement_terminator: ; - keyword: END - function_reference: - naked_identifier: schem - dot: . - naked_identifier: func - statement_terminator: ; - function_script_terminator: / sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_lua_script_bracket.sql000066400000000000000000000003451503426445100271670ustar00rootroot00000000000000CREATE OR REPLACE LUA SCRIPT BRACKET.SCRIPT_EXAMPLE RETURNS ROWCOUNT AS local _stmt = [[SOME ASSIGNMENT WITH OPEN BRACKET ( ]] x = 1 local _stmt = _stmt .. [[ ) ]] local _nsted = [=[one ([[two]] one]=] return 1 / sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_lua_script_bracket.yml000066400000000000000000000024051503426445100271700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 63bc4f1ece0f16208c2155dffd001eb6fc1a592a49e5655d7fdad60cb8a23ec6 file: statement: create_scripting_lua_script: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: LUA - keyword: SCRIPT - script_reference: - naked_identifier: BRACKET - dot: . - naked_identifier: SCRIPT_EXAMPLE - keyword: RETURNS - keyword: ROWCOUNT - keyword: AS - script_content: - word: local - word: _stmt - equals: '=' - lua_multiline_quotes: '[[SOME ASSIGNMENT WITH OPEN BRACKET ( ]]' - word: x - equals: '=' - numeric_literal: '1' - word: local - word: _stmt - equals: '=' - word: _stmt - range_operator: .. - lua_multiline_quotes: '[[ ) ]]' - word: local - word: _nsted - equals: '=' - lua_nested_quotes: '[=[one ([[two]] one]=]' - word: return - numeric_literal: '1' function_script_terminator: / sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_python_scalar_script.sql000066400000000000000000000010341503426445100275550ustar00rootroot00000000000000CREATE OR REPLACE PYTHON3 SCALAR SCRIPT MYSCHEMA.MYPYTHONSCRIPT( JSON_STR VARCHAR(2000000), LANGUAGE_KEY VARCHAR(50), TXT_KEY VARCHAR(50) ) EMITS ( X VARCHAR(2000000) ) AS """ /*==================================================================== e.g.: SELECT MYSCHEMA.MYPYTHONSCRIPT( '[{"@lang":"de-DE","$":"Krztxt"}, {"@lang":"en-GB","$":"Shrttxt"}]', '@lang', '$' ); ====================================================================*/ """ def run (ctx): pass / sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_python_scalar_script.yml000066400000000000000000000060741503426445100275700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bf499a2c379af9da9489d8882663427bb624b81625f7b0af1a448b1325630ce4 file: statement: create_udf_script: - keyword: CREATE - keyword: OR - keyword: REPLACE - naked_identifier: PYTHON3 - keyword: SCALAR - keyword: SCRIPT - script_reference: - naked_identifier: MYSCHEMA - dot: . - naked_identifier: MYPYTHONSCRIPT - bracketed: - start_bracket: ( - column_datatype_definition: naked_identifier: JSON_STR data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '2000000' end_bracket: ) - comma: ',' - column_datatype_definition: naked_identifier: LANGUAGE_KEY data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '50' end_bracket: ) - comma: ',' - column_datatype_definition: naked_identifier: TXT_KEY data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '50' end_bracket: ) - end_bracket: ) - emits_segment: keyword: EMITS bracketed: start_bracket: ( column_datatype_definition: naked_identifier: X data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '2000000' end_bracket: ) end_bracket: ) - keyword: AS - script_content: - double_quote: "\"\"\"\n/*====================================================================\n\ \ e.g.:\n SELECT MYSCHEMA.MYPYTHONSCRIPT(\n '[{\"" - at_sign_literal: '@lang' - double_quote: '":"' - word: de - minus: '-' - word: DE - double_quote: '","' - dollar_literal: $ - double_quote: '":"' - word: Krztxt - double_quote: '"}, {"' - at_sign_literal: '@lang' - double_quote: '":"' - word: en - minus: '-' - word: GB - double_quote: '","' - dollar_literal: $ - double_quote: '":"' - word: Shrttxt - double_quote: "\"}]',\n '@lang',\n '$'\n );\n\ \ ====================================================================*/\n\ \"\"\"" - word: def - word: run - bracketed: start_bracket: ( word: ctx end_bracket: ) - colon: ':' - word: pass function_script_terminator: / sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_role.sql000066400000000000000000000001051503426445100242620ustar00rootroot00000000000000CREATE ROLE test_role; CREATE ROLE "test_role"; CREATE ROLE [admin]; sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_role.yml000066400000000000000000000015731503426445100242760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bd7ffab0539bafd2abf5b7da5f9aaba82cf5c5bcb4056b0facfa1f3518cb1861 file: - statement: create_role_statement: - keyword: CREATE - keyword: ROLE - role_reference: naked_identifier: test_role - statement_terminator: ; - statement: create_role_statement: - keyword: CREATE - keyword: ROLE - role_reference: quoted_identifier: '"test_role"' - statement_terminator: ; - statement: create_role_statement: - keyword: CREATE - keyword: ROLE - role_reference: identifier: '[admin]' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_schema.sql000066400000000000000000000000621503426445100245630ustar00rootroot00000000000000CREATE SCHEMA s1; CREATE SCHEMA IF NOT EXISTS s2; sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_schema.yml000066400000000000000000000014171503426445100245720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 39c90362dd81f364e7ea8dead40e4456c183e67d257b4a15f6cf83378ce117cb file: - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - schema_reference: naked_identifier: s1 - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - keyword: IF - keyword: NOT - keyword: EXISTS - schema_reference: naked_identifier: s2 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_scripting_lua_script_statement1.sql000066400000000000000000000002321503426445100317160ustar00rootroot00000000000000CREATE OR REPLACE LUA SCRIPT aschema.hello AS return 'HELLO' / -- and a second one CREATE OR REPLACE LUA SCRIPT aschema.world AS return 'WORLD' / sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_scripting_lua_script_statement1.yml000066400000000000000000000022021503426445100317170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 308917c43e3849e1bbab88f68f5713a47a6c8e4073b1abfef96e14496ad39ef9 file: - statement: create_scripting_lua_script: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: LUA - keyword: SCRIPT - script_reference: - naked_identifier: aschema - dot: . - naked_identifier: hello - keyword: AS - script_content: word: return single_quote: "'HELLO'" - function_script_terminator: / - statement: create_scripting_lua_script: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: LUA - keyword: SCRIPT - script_reference: - naked_identifier: aschema - dot: . - naked_identifier: world - keyword: AS - script_content: word: return single_quote: "'WORLD'" - function_script_terminator: / sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_scripting_lua_script_statement2.sql000066400000000000000000000001051503426445100317160ustar00rootroot00000000000000CREATE OR REPLACE LUA SCRIPT aschema.hello() AS return 'HELLO' / sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_scripting_lua_script_statement2.yml000066400000000000000000000015071503426445100317270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 65c1bbceecda9d10ec0f3fd093c6960d53987b947bf61e047e730d0c564d2370 file: statement: create_scripting_lua_script: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: LUA - keyword: SCRIPT - script_reference: - naked_identifier: aschema - dot: . - naked_identifier: hello - bracketed: start_bracket: ( end_bracket: ) - keyword: AS - script_content: word: return single_quote: "'HELLO'" function_script_terminator: / sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_scripting_lua_script_statement3.sql000066400000000000000000000003751503426445100317300ustar00rootroot00000000000000CREATE SCRIPT insert_low_high (param1, param2, param3) AS import('function_lib') -- accessing external function lowest, highest = function_lib.min_max(param1, param2, param3) query([[INSERT INTO t VALUES (:x, :y)]], {x=lowest, y=highest}) / sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_scripting_lua_script_statement3.yml000066400000000000000000000032211503426445100317230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 196d20e7cb5e7a97af84c414c18bdeb92db09867d2e4325c19d251a1117a4fa5 file: statement: create_scripting_lua_script: - keyword: CREATE - keyword: SCRIPT - script_reference: naked_identifier: insert_low_high - bracketed: - start_bracket: ( - naked_identifier: param1 - comma: ',' - naked_identifier: param2 - comma: ',' - naked_identifier: param3 - end_bracket: ) - keyword: AS - script_content: - word: import - bracketed: start_bracket: ( single_quote: "'function_lib'" end_bracket: ) - word: lowest - comma: ',' - word: highest - equals: '=' - word: function_lib - dot: . - word: min_max - bracketed: - start_bracket: ( - word: param1 - comma: ',' - word: param2 - comma: ',' - word: param3 - end_bracket: ) - word: query - bracketed: - start_bracket: ( - lua_multiline_quotes: '[[INSERT INTO t VALUES (:x, :y)]]' - comma: ',' - start_curly_bracket: '{' - word: x - equals: '=' - word: lowest - comma: ',' - word: y - equals: '=' - word: highest - end_curly_bracket: '}' - end_bracket: ) function_script_terminator: / sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_table_statement.sql000066400000000000000000000034051503426445100265020ustar00rootroot00000000000000CREATE TABLE myschema.t1 ( a VARCHAR(20) UTF8, b DECIMAL(24,4) NOT NULL COMMENT IS 'The B column', c DECIMAL DEFAULT 122, d DOUBLE, e TIMESTAMP DEFAULT CURRENT_TIMESTAMP, f BOOL); ---- CREATE TABLE "MYSCHEMA"."T2" AS (SELECT * FROM t1) WITH NO DATA; ---- CREATE OR REPLACE TABLE "MYSCHEMA".T2 AS SELECT a,b,c+1 AS c FROM t1; ---- CREATE TABLE t3 AS (SELECT count(*) AS my_count FROM t1) WITH NO DATA; ---- CREATE TABLE t4 LIKE t1; ---- CREATE TABLE t5 ( id int IDENTITY PRIMARY KEY DISABLE, LIKE t1 INCLUDING DEFAULTS, g DOUBLE, DISTRIBUTE BY a,b ); ---- CREATE TABLE t6 ( order_id INT, sales_id INT, order_price DOUBLE, order_date DATE, country VARCHAR(40), CONSTRAINT t6_pk PRIMARY KEY (order_id,sales_id), DISTRIBUTE BY order_id, PARTITION BY order_date) COMMENT IS 'a great table'; ---- CREATE OR REPLACE TABLE t8 (ref_id int CONSTRAINT FK_T5 REFERENCES t5 (id) DISABLE, b VARCHAR(20)); ---- CREATE TABLE IF NOT EXISTS SCHEM.TAB ( ID DECIMAL(18, 0) IDENTITY CONSTRAINT PRIMARY KEY DISABLE COMMENT IS 'without constraint name' ) COMMENT IS 'a nice table'; ---- CREATE TABLE SCHEM.TAB ( ID DECIMAL(18, 0), C1 CHAR(1), CONSTRAINT PRIMARY KEY (id) ); ---- CREATE TABLE SCHEM.TAB ( ID DECIMAL(18, 0), C1 CHAR(1), CONSTRAINT "ADMIN" PRIMARY KEY (id) ); ---- CREATE TABLE SCHEM.TAB ( C1 CHAR(1) CONSTRAINT "ADMIN" PRIMARY KEY ); ---- CREATE TABLE T AS SELECT * FROM A COMMENT IS 'BLABLA'; ---- CREATE TABLE "MYSCHEMA"."T2" AS SELECT * FROM t1 WITH NO DATA; ---- CREATE TABLE "MYSCHEMA"."T2" AS SELECT * FROM t1 WITH NO DATA COMMENT IS 'ABC'; sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_table_statement.yml000066400000000000000000000442701503426445100265110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 74c9cc5c509062c54c2714634479bdf68a2c9df9ab8d38eb4cbc6d9e0530303e file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: myschema - dot: . - naked_identifier: t1 - bracketed: - start_bracket: ( - table_content_definition: column_definition: column_datatype_definition: naked_identifier: a data_type: - keyword: VARCHAR - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '20' end_bracket: ) - keyword: UTF8 - comma: ',' - table_content_definition: column_definition: column_datatype_definition: naked_identifier: b data_type: keyword: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '24' - comma: ',' - numeric_literal: '4' - end_bracket: ) column_constraint_segment: table_constraint_definition: - keyword: NOT - keyword: 'NULL' comment_clause: - keyword: COMMENT - keyword: IS - quoted_literal: "'The B column'" - comma: ',' - table_content_definition: column_definition: column_datatype_definition: naked_identifier: c data_type: keyword: DECIMAL column_constraint_segment: keyword: DEFAULT numeric_literal: '122' - comma: ',' - table_content_definition: column_definition: column_datatype_definition: naked_identifier: d data_type: keyword: DOUBLE - comma: ',' - table_content_definition: column_definition: column_datatype_definition: naked_identifier: e data_type: keyword: TIMESTAMP column_constraint_segment: keyword: DEFAULT bare_function: CURRENT_TIMESTAMP - comma: ',' - table_content_definition: column_definition: column_datatype_definition: naked_identifier: f data_type: keyword: BOOL - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '"MYSCHEMA"' - dot: . - quoted_identifier: '"T2"' - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 end_bracket: ) - with_data_clause: - keyword: WITH - keyword: 'NO' - keyword: DATA - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - table_reference: quoted_identifier: '"MYSCHEMA"' dot: . naked_identifier: T2 - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: c binary_operator: + numeric_literal: '1' alias_expression: alias_operator: keyword: AS naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t3 - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: my_count from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 end_bracket: ) - with_data_clause: - keyword: WITH - keyword: 'NO' - keyword: DATA - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t4 - table_like_clause: keyword: LIKE table_reference: naked_identifier: t1 - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t5 - bracketed: - start_bracket: ( - table_content_definition: column_definition: column_datatype_definition: naked_identifier: id data_type: keyword: int column_constraint_segment: keyword: IDENTITY table_constraint_definition: - keyword: PRIMARY - keyword: KEY - keyword: DISABLE - comma: ',' - table_content_definition: table_like_clause: - keyword: LIKE - table_reference: naked_identifier: t1 - keyword: INCLUDING - keyword: DEFAULTS - comma: ',' - table_content_definition: column_definition: column_datatype_definition: naked_identifier: g data_type: keyword: DOUBLE - comma: ',' - table_distribution_partition_clause: - keyword: DISTRIBUTE - keyword: BY - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t6 - bracketed: - start_bracket: ( - table_content_definition: column_definition: column_datatype_definition: naked_identifier: order_id data_type: keyword: INT - comma: ',' - table_content_definition: column_definition: column_datatype_definition: naked_identifier: sales_id data_type: keyword: INT - comma: ',' - table_content_definition: column_definition: column_datatype_definition: naked_identifier: order_price data_type: keyword: DOUBLE - comma: ',' - table_content_definition: column_definition: column_datatype_definition: naked_identifier: order_date data_type: keyword: DATE - comma: ',' - table_content_definition: column_definition: column_datatype_definition: naked_identifier: country data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '40' end_bracket: ) - comma: ',' - table_content_definition: table_constraint_definition: - keyword: CONSTRAINT - naked_identifier: t6_pk - keyword: PRIMARY - keyword: KEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: order_id - comma: ',' - column_reference: naked_identifier: sales_id - end_bracket: ) - comma: ',' - table_distribution_partition_clause: - keyword: DISTRIBUTE - keyword: BY - column_reference: naked_identifier: order_id - comma: ',' - keyword: PARTITION - keyword: BY - column_reference: naked_identifier: order_date - end_bracket: ) - comment_clause: - keyword: COMMENT - keyword: IS - quoted_literal: "'a great table'" - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - table_reference: naked_identifier: t8 - bracketed: - start_bracket: ( - table_content_definition: column_definition: column_datatype_definition: naked_identifier: ref_id data_type: keyword: int column_constraint_segment: table_constraint_definition: - keyword: CONSTRAINT - naked_identifier: FK_T5 - keyword: REFERENCES - table_reference: naked_identifier: t5 - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - keyword: DISABLE - comma: ',' - table_content_definition: column_definition: column_datatype_definition: naked_identifier: b data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '20' end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: - naked_identifier: SCHEM - dot: . - naked_identifier: TAB - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: ID data_type: keyword: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '18' - comma: ',' - numeric_literal: '0' - end_bracket: ) column_constraint_segment: keyword: IDENTITY table_constraint_definition: - keyword: CONSTRAINT - keyword: PRIMARY - keyword: KEY - keyword: DISABLE comment_clause: - keyword: COMMENT - keyword: IS - quoted_literal: "'without constraint name'" end_bracket: ) - comment_clause: - keyword: COMMENT - keyword: IS - quoted_literal: "'a nice table'" - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: SCHEM - dot: . - naked_identifier: TAB - bracketed: - start_bracket: ( - table_content_definition: column_definition: column_datatype_definition: naked_identifier: ID data_type: keyword: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '18' - comma: ',' - numeric_literal: '0' - end_bracket: ) - comma: ',' - table_content_definition: column_definition: column_datatype_definition: naked_identifier: C1 data_type: keyword: CHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) - comma: ',' - table_content_definition: table_constraint_definition: - keyword: CONSTRAINT - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: SCHEM - dot: . - naked_identifier: TAB - bracketed: - start_bracket: ( - table_content_definition: column_definition: column_datatype_definition: naked_identifier: ID data_type: keyword: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '18' - comma: ',' - numeric_literal: '0' - end_bracket: ) - comma: ',' - table_content_definition: column_definition: column_datatype_definition: naked_identifier: C1 data_type: keyword: CHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) - comma: ',' - table_content_definition: table_constraint_definition: - keyword: CONSTRAINT - quoted_identifier: '"ADMIN"' - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: SCHEM - dot: . - naked_identifier: TAB - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: C1 data_type: keyword: CHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) column_constraint_segment: table_constraint_definition: - keyword: CONSTRAINT - quoted_identifier: '"ADMIN"' - keyword: PRIMARY - keyword: KEY end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: T - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: A - comment_clause: - keyword: COMMENT - keyword: IS - quoted_literal: "'BLABLA'" - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '"MYSCHEMA"' - dot: . - quoted_identifier: '"T2"' - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - with_data_clause: - keyword: WITH - keyword: 'NO' - keyword: DATA - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '"MYSCHEMA"' - dot: . - quoted_identifier: '"T2"' - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - with_data_clause: - keyword: WITH - keyword: 'NO' - keyword: DATA - comment_clause: - keyword: COMMENT - keyword: IS - quoted_literal: "'ABC'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_udfscript_dot_syntax.sql000066400000000000000000000001461503426445100276050ustar00rootroot00000000000000CREATE PYTHON SCALAR SCRIPT sample_simple (...) EMITS (...) AS def run(ctx): ctx.emit(True, False) / sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_udfscript_dot_syntax.yml000066400000000000000000000023111503426445100276030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7d331951eb3a334aad835fd5a4bc37ce40faf6927445dc9e90b48b67ad452fe9 file: statement: create_udf_script: - keyword: CREATE - keyword: PYTHON - keyword: SCALAR - keyword: SCRIPT - script_reference: naked_identifier: sample_simple - bracketed: start_bracket: ( identifier: '...' end_bracket: ) - emits_segment: keyword: EMITS bracketed: start_bracket: ( identifier: '...' end_bracket: ) - keyword: AS - script_content: - word: def - word: run - bracketed: start_bracket: ( word: ctx end_bracket: ) - colon: ':' - word: ctx - dot: . - word: emit - bracketed: - start_bracket: ( - word: 'True' - comma: ',' - word: 'False' - end_bracket: ) function_script_terminator: / sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_udfscript_statement1.sql000066400000000000000000000010331503426445100274720ustar00rootroot00000000000000CREATE LUA SCALAR SCRIPT my_average (a DOUBLE, b DOUBLE ORDER BY 1 desc) RETURNS DOUBLE AS function run(ctx) if ctx.a == nil or ctx.b==nil then return NULL end return (ctx.a+ctx.b)/2 end / CREATE LUA SCALAR SCRIPT my_average (a DOUBLE, b DOUBLE ORDER BY 1 desc) RETURNS DOUBLE AS function run(ctx) if ctx.a == nil or ctx.b==nil then return NULL end x = 10 / 2 return (ctx.a+ctx.b) / 2 end / sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_udfscript_statement1.yml000066400000000000000000000066671503426445100275160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0035382ef47bde6ebf1104b4b6b199d904ed536514329a25e2355d4a48b84cfb file: - statement: create_udf_script: - keyword: CREATE - keyword: LUA - keyword: SCALAR - keyword: SCRIPT - script_reference: naked_identifier: my_average - bracketed: - start_bracket: ( - column_datatype_definition: naked_identifier: a data_type: keyword: DOUBLE - comma: ',' - column_datatype_definition: naked_identifier: b data_type: keyword: DOUBLE - orderby_clause: - keyword: ORDER - keyword: BY - numeric_literal: '1' - keyword: desc - end_bracket: ) - keyword: RETURNS - data_type: keyword: DOUBLE - keyword: AS - script_content: - word: function - word: run - bracketed: start_bracket: ( word: ctx end_bracket: ) - word: if - word: ctx - dot: . - word: a - equals: '=' - equals: '=' - word: nil - word: or - word: ctx - dot: . - word: b - equals: '=' - equals: '=' - word: nil - word: then - word: return - word: 'NULL' - word: end - word: return - bracketed: - start_bracket: ( - word: ctx - dot: . - word: a - plus: + - word: ctx - dot: . - word: b - end_bracket: ) - divide: / - numeric_literal: '2' - word: end - function_script_terminator: / - statement: create_udf_script: - keyword: CREATE - keyword: LUA - keyword: SCALAR - keyword: SCRIPT - script_reference: naked_identifier: my_average - bracketed: - start_bracket: ( - column_datatype_definition: naked_identifier: a data_type: keyword: DOUBLE - comma: ',' - column_datatype_definition: naked_identifier: b data_type: keyword: DOUBLE - orderby_clause: - keyword: ORDER - keyword: BY - numeric_literal: '1' - keyword: desc - end_bracket: ) - keyword: RETURNS - data_type: keyword: DOUBLE - keyword: AS - script_content: - word: function - word: run - bracketed: start_bracket: ( word: ctx end_bracket: ) - word: if - word: ctx - dot: . - word: a - equals: '=' - equals: '=' - word: nil - word: or - word: ctx - dot: . - word: b - equals: '=' - equals: '=' - word: nil - word: then - word: return - word: 'NULL' - word: end - word: x - equals: '=' - numeric_literal: '10' - divide: / - numeric_literal: '2' - word: return - bracketed: - start_bracket: ( - word: ctx - dot: . - word: a - plus: + - word: ctx - dot: . - word: b - end_bracket: ) - divide: / - numeric_literal: '2' - word: end - function_script_terminator: / sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_udfscript_statement2.sql000066400000000000000000000004171503426445100275000ustar00rootroot00000000000000CREATE LUA SCALAR SCRIPT map_words(w varchar(10000)) EMITS (words varchar(100)) AS function run(ctx) local word = ctx.w if (word ~= null) then for i in unicode.utf8.gmatch(word,'([%w%p]+)') do ctx.emit(i) end end end / sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_udfscript_statement2.yml000066400000000000000000000044611503426445100275050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b1e70457ff1c5cb4148fdf5c1508ca843e899206e54ce96645df3a7082f04c8a file: statement: create_udf_script: - keyword: CREATE - keyword: LUA - keyword: SCALAR - keyword: SCRIPT - script_reference: naked_identifier: map_words - bracketed: start_bracket: ( column_datatype_definition: naked_identifier: w data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10000' end_bracket: ) end_bracket: ) - emits_segment: keyword: EMITS bracketed: start_bracket: ( column_datatype_definition: naked_identifier: words data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '100' end_bracket: ) end_bracket: ) - keyword: AS - script_content: - word: function - word: run - bracketed: start_bracket: ( word: ctx end_bracket: ) - word: local - word: word - equals: '=' - word: ctx - dot: . - word: w - word: if - bracketed: - start_bracket: ( - word: word - like_operator: '~' - equals: '=' - word: 'null' - end_bracket: ) - word: then - word: for - word: i - word: in - word: unicode - dot: . - word: utf8 - dot: . - word: gmatch - bracketed: start_bracket: ( word: word comma: ',' single_quote: "'([%w%p]+)'" end_bracket: ) - word: do - word: ctx - dot: . - word: emit - bracketed: start_bracket: ( word: i end_bracket: ) - word: end - word: end - word: end function_script_terminator: / sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_udfscript_statement3.sql000066400000000000000000000001711503426445100274760ustar00rootroot00000000000000CREATE OR REPLACE PYTHON3 SCALAR SCRIPT LIB.MYLIB() RETURNS INT AS def helloWorld(): return "Hello Python3 World!" / sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_udfscript_statement3.yml000066400000000000000000000020551503426445100275030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 30a734ddc52c073e74df7b0740c178aa2be3a7e4a512ee8fa5d1362bab60984b file: statement: create_udf_script: - keyword: CREATE - keyword: OR - keyword: REPLACE - naked_identifier: PYTHON3 - keyword: SCALAR - keyword: SCRIPT - script_reference: - naked_identifier: LIB - dot: . - naked_identifier: MYLIB - bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - data_type: keyword: INT - keyword: AS - script_content: - word: def - word: helloWorld - bracketed: start_bracket: ( end_bracket: ) - colon: ':' - word: return - double_quote: '"Hello Python3 World!"' function_script_terminator: / sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_udfscript_statement4.sql000066400000000000000000000002411503426445100274750ustar00rootroot00000000000000CREATE OR REPLACE PYTHON SCALAR SCRIPT TEST.MYHELLOWORLD() RETURNS VARCHAR(2000) AS l = exa.import_script('LIB.MYLIB') def run(ctx): return l.helloWorld() / sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_udfscript_statement4.yml000066400000000000000000000027501503426445100275060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1fe65b28e9d608d823864e97397366c52b9e5b10f3947892f610afb4ac742d50 file: statement: create_udf_script: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PYTHON - keyword: SCALAR - keyword: SCRIPT - script_reference: - naked_identifier: TEST - dot: . - naked_identifier: MYHELLOWORLD - bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '2000' end_bracket: ) - keyword: AS - script_content: - word: l - equals: '=' - word: exa - dot: . - word: import_script - bracketed: start_bracket: ( single_quote: "'LIB.MYLIB'" end_bracket: ) - word: def - word: run - bracketed: start_bracket: ( word: ctx end_bracket: ) - colon: ':' - word: return - word: l - dot: . - word: helloWorld - bracketed: start_bracket: ( end_bracket: ) function_script_terminator: / sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_udfscript_statement5.sql000066400000000000000000000002521503426445100275000ustar00rootroot00000000000000CREATE OR REPLACE JAVA SCALAR SCRIPT LIB.MYLIB() RETURNS VARCHAR(2000) AS class MYLIB { static String helloWorld(){ return "Hello Java World!"; } } / sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_udfscript_statement5.yml000066400000000000000000000025621503426445100275100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: eda93f93740e93130882f680bf0e81885097e099232b612ead8f976e26c15e7c file: statement: create_udf_script: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: JAVA - keyword: SCALAR - keyword: SCRIPT - script_reference: - naked_identifier: LIB - dot: . - naked_identifier: MYLIB - bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '2000' end_bracket: ) - keyword: AS - script_content: - word: class - word: MYLIB - start_curly_bracket: '{' - word: static - word: String - word: helloWorld - bracketed: start_bracket: ( end_bracket: ) - start_curly_bracket: '{' - word: return - double_quote: '"Hello Java World!"' - semicolon: ; - end_curly_bracket: '}' - end_curly_bracket: '}' function_script_terminator: / sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_user.sql000066400000000000000000000004401503426445100243010ustar00rootroot00000000000000CREATE USER user_1 IDENTIFIED BY "h12_xhz"; CREATE USER user_2 IDENTIFIED AT LDAP AS 'cn=user_2,dc=authorization,dc=exasol,dc=com'; CREATE USER user_3 IDENTIFIED BY KERBEROS PRINCIPAL '@'; CREATE USER oidctestuser IDENTIFIED BY OPENID SUBJECT 'database-user@exasol.example'; sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_user.yml000066400000000000000000000032041503426445100243040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3e212857d697e4668fd07bc5525d9faf36abec4852435c9703a88df3dc0bea1f file: - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: user_1 - keyword: IDENTIFIED - password_auth: keyword: BY password_literal: '"h12_xhz"' - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: user_2 - keyword: IDENTIFIED - ldap_auth: - keyword: AT - keyword: LDAP - keyword: AS - quoted_literal: "'cn=user_2,dc=authorization,dc=exasol,dc=com'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: user_3 - keyword: IDENTIFIED - kerberos_auth: - keyword: BY - keyword: KERBEROS - keyword: PRINCIPAL - quoted_literal: "'@'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: oidctestuser - keyword: IDENTIFIED - openid_auth: - keyword: BY - keyword: OPENID - keyword: SUBJECT - quoted_literal: "'database-user@exasol.example'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_view_statement.sql000066400000000000000000000007331503426445100263660ustar00rootroot00000000000000CREATE VIEW my_view as (select x from t) COMMENT IS 'nice view'; CREATE VIEW my_view (col1 ) as (select x from t); CREATE OR REPLACE FORCE VIEW my_view as select y from t; CREATE OR REPLACE VIEW my_view (col_1 COMMENT IS 'something important',col2) as select max(y) from t; CREATE VIEW schem.few (col1 ) /* some view header */ as (select x from t); CREATE VIEW schem.few (col1 ) --- single line as (select x from t); --- CREATE VIEW T AS SELECT * FROM A COMMENT IS 'BLABLA'; sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_view_statement.yml000066400000000000000000000137171503426445100263760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e1e4c5c3d1717fbaffed773eb2113c9d604f7f1755f56e9418accf67563d0dd9 file: - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - view_reference: naked_identifier: my_view - keyword: as - bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: x from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t end_bracket: ) - comment_clause: - keyword: COMMENT - keyword: IS - quoted_literal: "'nice view'" - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - view_reference: naked_identifier: my_view - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: as - bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: x from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FORCE - keyword: VIEW - view_reference: naked_identifier: my_view - keyword: as - select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: y from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: VIEW - view_reference: naked_identifier: my_view - bracketed: - start_bracket: ( - column_reference: naked_identifier: col_1 - comment_clause: - keyword: COMMENT - keyword: IS - quoted_literal: "'something important'" - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: as - select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: max function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: y end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - view_reference: - naked_identifier: schem - dot: . - naked_identifier: few - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: as - bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: x from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - view_reference: - naked_identifier: schem - dot: . - naked_identifier: few - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: as - bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: x from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - view_reference: naked_identifier: T - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: A - comment_clause: - keyword: COMMENT - keyword: IS - quoted_literal: "'BLABLA'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_virtual_schema_statement.sql000066400000000000000000000003721503426445100304210ustar00rootroot00000000000000CREATE VIRTUAL SCHEMA hive USING adapter.jdbc_adapter WITH SQL_DIALECT = 'HIVE' CONNECTION_STRING = 'jdbc:hive2://localhost:10000/default' SCHEMA_NAME = 'default' USERNAME = 'hive-usr' PASSWORD = 'hive-pwd'; sqlfluff-3.4.2/test/fixtures/dialects/exasol/create_virtual_schema_statement.yml000066400000000000000000000026001503426445100304170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 54145d683ff084dc4eb4827435723a1529e7f21cdb1fef9795b7f59127a24159 file: statement: create_virtual_schema_statement: - keyword: CREATE - keyword: VIRTUAL - keyword: SCHEMA - schema_reference: naked_identifier: hive - keyword: USING - object_reference: - naked_identifier: adapter - dot: . - naked_identifier: jdbc_adapter - keyword: WITH - parameter: SQL_DIALECT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'HIVE'" - parameter: CONNECTION_STRING - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'jdbc:hive2://localhost:10000/default'" - parameter: SCHEMA_NAME - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'default'" - parameter: USERNAME - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'hive-usr'" - parameter: PASSWORD - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'hive-pwd'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/data_type_test.sql000066400000000000000000000024441503426445100250170ustar00rootroot00000000000000CREATE TABLE t (c1 DECIMAL); CREATE TABLE t (c1 DECIMAL(10)); CREATE TABLE t (c1 DECIMAL(10,2)); CREATE TABLE t (c1 DEC(10,2)); CREATE TABLE t (c1 NUMERIC(10)); CREATE TABLE t (c1 NUMBER(10,2)); CREATE TABLE t (c1 BIGINT); CREATE TABLE t (c1 DOUBLE); CREATE TABLE t (c1 DOUBLE PRECISION); CREATE TABLE t (c1 FLOAT); CREATE TABLE t (c1 INT); CREATE TABLE t (c1 INTEGER); CREATE TABLE t (c1 REAL); CREATE TABLE t (c1 SHORTINT); CREATE TABLE t (c1 TINYINT); CREATE TABLE t (c1 SMALLINT); CREATE TABLE t (c1 BOOL); CREATE TABLE t (c1 BOOLEAN); CREATE TABLE t (c1 DATE); CREATE TABLE t (c1 TIMESTAMP); CREATE TABLE t (c1 TIMESTAMP WITH LOCAL TIME ZONE); CREATE TABLE t (c1 INTERVAL YEAR (1) TO MONTH); CREATE TABLE t (c1 INTERVAL DAY (2) TO SECOND(1)); CREATE TABLE t (c1 GEOMETRY(1000)); CREATE TABLE t (c1 HASHTYPE); CREATE TABLE t (c1 HASHTYPE(8 BYTE)); CREATE TABLE t (c1 HASHTYPE(8 BIT)); CREATE TABLE t (c1 CHAR(1)); CREATE TABLE t (c1 CHAR VARYING (1)); CREATE TABLE t (c1 VARCHAR(2000 CHAR)); CREATE TABLE t (c1 VARCHAR2(2000)); CREATE TABLE t (c1 VARCHAR(2000 BYTE)); CREATE TABLE t (c1 LONG VARCHAR); CREATE TABLE t (c1 CHARACTER LARGE OBJECT(1000)); CREATE TABLE t (c1 CHARACTER VARYING(1000)); CREATE TABLE t (c1 CLOB(2000)); CREATE TABLE t (c1 CLOB(2000) ASCII); CREATE TABLE t (c1 VARCHAR(2000 CHAR) UTF8); sqlfluff-3.4.2/test/fixtures/dialects/exasol/data_type_test.yml000066400000000000000000000512261503426445100250230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1145859a4e145cf9c986fec7cf06740385f18279926d2013bca49b17377c8f26 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: DECIMAL end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: DECIMAL bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '10' - comma: ',' - numeric_literal: '2' - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: DEC bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '10' - comma: ',' - numeric_literal: '2' - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: NUMERIC bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: NUMBER bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '10' - comma: ',' - numeric_literal: '2' - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: BIGINT end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: DOUBLE end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: - keyword: DOUBLE - keyword: PRECISION end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: FLOAT end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: INT end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: INTEGER end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: REAL end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: SHORTINT end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: TINYINT end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: SMALLINT end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: BOOL end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: BOOLEAN end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: DATE end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: TIMESTAMP end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: - keyword: TIMESTAMP - keyword: WITH - keyword: LOCAL - keyword: TIME - keyword: ZONE end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: - keyword: INTERVAL - keyword: YEAR - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) - keyword: TO - keyword: MONTH end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: - keyword: INTERVAL - keyword: DAY - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '2' end_bracket: ) - keyword: TO - keyword: SECOND - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: GEOMETRY bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '1000' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: HASHTYPE end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: HASHTYPE bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '8' keyword: BYTE end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: HASHTYPE bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '8' keyword: BIT end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: CHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: - keyword: CHAR - keyword: VARYING - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '2000' keyword: CHAR end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: VARCHAR2 bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '2000' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '2000' keyword: BYTE end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: - keyword: LONG - keyword: VARCHAR end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: - keyword: CHARACTER - keyword: LARGE - keyword: OBJECT - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '1000' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: - keyword: CHARACTER - keyword: VARYING - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '1000' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: CLOB bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '2000' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: - keyword: CLOB - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '2000' end_bracket: ) - keyword: ASCII end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: - keyword: VARCHAR - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '2000' keyword: CHAR end_bracket: ) - keyword: UTF8 end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/delete_statement.sql000066400000000000000000000001771503426445100253350ustar00rootroot00000000000000DELETE FROM staff WHERE name='SMITH'; DELETE * FROM staff; DELETE FROM staff PREFERRING (LOW change_date) PARTITION BY emp_no; sqlfluff-3.4.2/test/fixtures/dialects/exasol/delete_statement.yml000066400000000000000000000030021503426445100253250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 034c7f7b3ce888d4d4e8a09f77a3cb0097277f05725d1368a694096a26b6235b file: - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: staff - where_clause: keyword: WHERE expression: column_reference: naked_identifier: name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'SMITH'" - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - star: '*' - keyword: FROM - table_reference: naked_identifier: staff - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: staff - preferring_clause: keyword: PREFERRING bracketed: start_bracket: ( preference_term: keyword: LOW column_reference: naked_identifier: change_date end_bracket: ) partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: emp_no - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/drop_cascade_restrict_statement.sql000066400000000000000000000001111503426445100304050ustar00rootroot00000000000000DROP VIEW IF EXISTS my_view RESTRICT; DROP FUNCTION my_function CASCADE; sqlfluff-3.4.2/test/fixtures/dialects/exasol/drop_cascade_restrict_statement.yml000066400000000000000000000014641503426445100304230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f0e330e5f9012ac663311a479e2de220a366ba3e3b494fc360efbdf734d33a5d file: - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - keyword: IF - keyword: EXISTS - view_reference: naked_identifier: my_view - keyword: RESTRICT - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - function_name: function_name_identifier: my_function - keyword: CASCADE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/drop_cascade_statement.sql000066400000000000000000000001561503426445100264770ustar00rootroot00000000000000DROP USER test_user1;DROP USER test_user2 CASCADE; DROP ROLE myrole; DROP USER "ADMIN"; DROP ROLE "Important" sqlfluff-3.4.2/test/fixtures/dialects/exasol/drop_cascade_statement.yml000066400000000000000000000022661503426445100265050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d9fd30c72dc770f52417ead9cd818a71d9634e0c07cd5c668b91b5bce95d9c6c file: - statement: drop_user_statement: - keyword: DROP - keyword: USER - role_reference: naked_identifier: test_user1 - statement_terminator: ; - statement: drop_user_statement: - keyword: DROP - keyword: USER - role_reference: naked_identifier: test_user2 - keyword: CASCADE - statement_terminator: ; - statement: drop_role_statement: - keyword: DROP - keyword: ROLE - role_reference: naked_identifier: myrole - statement_terminator: ; - statement: drop_user_statement: - keyword: DROP - keyword: USER - role_reference: quoted_identifier: '"ADMIN"' - statement_terminator: ; - statement: drop_role_statement: - keyword: DROP - keyword: ROLE - role_reference: quoted_identifier: '"Important"' sqlfluff-3.4.2/test/fixtures/dialects/exasol/drop_connection_statement.sql000066400000000000000000000001411503426445100272450ustar00rootroot00000000000000DROP CONNECTION my_connection; DROP CONNECTION IF EXISTS my_connection; DROP CONNECTION "myCon"; sqlfluff-3.4.2/test/fixtures/dialects/exasol/drop_connection_statement.yml000066400000000000000000000015751503426445100272630ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: be64518afe0906f729329eabb5c72bea9de42b01a1f0f30421213d9b1073ffee file: - statement: drop_connection_statement: - keyword: DROP - keyword: CONNECTION - naked_identifier: my_connection - statement_terminator: ; - statement: drop_connection_statement: - keyword: DROP - keyword: CONNECTION - keyword: IF - keyword: EXISTS - naked_identifier: my_connection - statement_terminator: ; - statement: drop_connection_statement: - keyword: DROP - keyword: CONNECTION - quoted_identifier: '"myCon"' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/drop_consumer_group.sql000066400000000000000000000000661503426445100260770ustar00rootroot00000000000000DROP CONSUMER GROUP CEO; DROP CONSUMER GROUP "ADMIN"; sqlfluff-3.4.2/test/fixtures/dialects/exasol/drop_consumer_group.yml000066400000000000000000000013371503426445100261030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a883a865ea068fef2ebbb05389eee862981840f9dd1ff7c602154440c7f9988d file: - statement: drop_consumer_group_statement: - keyword: DROP - keyword: CONSUMER - keyword: GROUP - naked_identifier: CEO - statement_terminator: ; - statement: drop_consumer_group_statement: - keyword: DROP - keyword: CONSUMER - keyword: GROUP - quoted_identifier: '"ADMIN"' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/drop_schema_statement.sql000066400000000000000000000002041503426445100263460ustar00rootroot00000000000000DROP FORCE SCHEMA my_schema; DROP SCHEMA IF EXISTS my_schema; DROP SCHEMA my_schema CASCADE; DROP VIRTUAL SCHEMA my_virtual_schema; sqlfluff-3.4.2/test/fixtures/dialects/exasol/drop_schema_statement.yml000066400000000000000000000022351503426445100263560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c929968024c9d11e1906f33bb69d97f8fdfde93c2de59a3ca4f4c3895234ac48 file: - statement: drop_schema_statement: - keyword: DROP - keyword: FORCE - keyword: SCHEMA - schema_reference: naked_identifier: my_schema - statement_terminator: ; - statement: drop_schema_statement: - keyword: DROP - keyword: SCHEMA - keyword: IF - keyword: EXISTS - schema_reference: naked_identifier: my_schema - statement_terminator: ; - statement: drop_schema_statement: - keyword: DROP - keyword: SCHEMA - schema_reference: naked_identifier: my_schema - keyword: CASCADE - statement_terminator: ; - statement: drop_schema_statement: - keyword: DROP - keyword: VIRTUAL - keyword: SCHEMA - schema_reference: naked_identifier: my_virtual_schema - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/drop_script_statement.sql000066400000000000000000000001171503426445100264150ustar00rootroot00000000000000DROP SCRIPT my_script; DROP ADAPTER SCRIPT IF EXISTS my_schema.ADAPTER_SCRIPT; sqlfluff-3.4.2/test/fixtures/dialects/exasol/drop_script_statement.yml000066400000000000000000000015211503426445100264170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cc2412a6f4b64cadb5905abeb2c69cfdd39803647f0ed2f75c2a2bc4957c3bb3 file: - statement: drop_script_statement: - keyword: DROP - keyword: SCRIPT - script_reference: naked_identifier: my_script - statement_terminator: ; - statement: drop_script_statement: - keyword: DROP - keyword: ADAPTER - keyword: SCRIPT - keyword: IF - keyword: EXISTS - script_reference: - naked_identifier: my_schema - dot: . - naked_identifier: ADAPTER_SCRIPT - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/drop_table_statement.sql000066400000000000000000000001361503426445100262010ustar00rootroot00000000000000DROP TABLE my_table; DROP TABLE IF EXISTS "MY_SCHEMA"."MY_TABLE" CASCADE CASCADE CONSTRAINTS; sqlfluff-3.4.2/test/fixtures/dialects/exasol/drop_table_statement.yml000066400000000000000000000016001503426445100262000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 822913cd2e24616f0fced92ef4c7e9950759d8c638e53e6c556f8afb4362cc6d file: - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: naked_identifier: my_table - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: - quoted_identifier: '"MY_SCHEMA"' - dot: . - quoted_identifier: '"MY_TABLE"' - keyword: CASCADE - keyword: CASCADE - keyword: CONSTRAINTS - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/execute_script.sql000066400000000000000000000002271503426445100250310ustar00rootroot00000000000000EXECUTE SCRIPT script_1; EXECUTE SCRIPT script_1 WITH OUTPUT; EXECUTE SCRIPT script_2 (1,3,'ABC') WITH OUTPUT; EXECUTE SCRIPT script_3 (ARRAY(3,4,5)); sqlfluff-3.4.2/test/fixtures/dialects/exasol/execute_script.yml000066400000000000000000000036761503426445100250460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c2bd159c1a5962d6a4ad92c04d3939f74ac676348858475f8fb146166f77169a file: - statement: execute_script_statement: - keyword: EXECUTE - keyword: SCRIPT - script_reference: naked_identifier: script_1 - statement_terminator: ; - statement: execute_script_statement: - keyword: EXECUTE - keyword: SCRIPT - script_reference: naked_identifier: script_1 - keyword: WITH - keyword: OUTPUT - statement_terminator: ; - statement: execute_script_statement: - keyword: EXECUTE - keyword: SCRIPT - script_reference: naked_identifier: script_2 - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '3' - comma: ',' - expression: quoted_literal: "'ABC'" - end_bracket: ) - keyword: WITH - keyword: OUTPUT - statement_terminator: ; - statement: execute_script_statement: - keyword: EXECUTE - keyword: SCRIPT - script_reference: naked_identifier: script_3 - bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ARRAY function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - comma: ',' - expression: numeric_literal: '5' - end_bracket: ) end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/explain_virtual.sql000066400000000000000000000003241503426445100252070ustar00rootroot00000000000000SELECT pushdown_id, pushdown_involved_tables, pushdown_sql FROM (EXPLAIN VIRTUAL SELECT * FROM vs_impala.sample_07 WHERE total_emp>10000); EXPLAIN VIRTUAL SELECT * FROM vs_impala.sample_07 WHERE total_emp>10000; sqlfluff-3.4.2/test/fixtures/dialects/exasol/explain_virtual.yml000066400000000000000000000060021503426445100252100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3d88e69d20b6a87900b65adc3bb71901858ca2796d0deedf0bf8cff02b159bee file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: pushdown_id - comma: ',' - select_clause_element: column_reference: naked_identifier: pushdown_involved_tables - comma: ',' - select_clause_element: column_reference: naked_identifier: pushdown_sql from_clause: keyword: FROM from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: explain_virtual_statement: - keyword: EXPLAIN - keyword: VIRTUAL - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: vs_impala - dot: . - naked_identifier: sample_07 where_clause: keyword: WHERE expression: column_reference: naked_identifier: total_emp comparison_operator: raw_comparison_operator: '>' numeric_literal: '10000' end_bracket: ) - statement_terminator: ; - statement: explain_virtual_statement: - keyword: EXPLAIN - keyword: VIRTUAL - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: vs_impala - dot: . - naked_identifier: sample_07 where_clause: keyword: WHERE expression: column_reference: naked_identifier: total_emp comparison_operator: raw_comparison_operator: '>' numeric_literal: '10000' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/export_statement.sql000066400000000000000000000025411503426445100254110ustar00rootroot00000000000000EXPORT tab1 INTO CSV AT 'ftp://192.168.1.1/' USER 'agent_007' IDENTIFIED BY 'secret' FILE 'tab1.csv' COLUMN SEPARATOR = ';' ENCODING = 'Latin1' WITH COLUMN NAMES; ---- EXPORT tab1 INTO CSV AT 'ftp://192.168.1.1/' USER 'agent_007' IDENTIFIED BY 'secret' FILE 'tab1.csv' ( 1 FORMAT='DD.MM.YYYY', 2..3 DELIMIT=NEVER ) COLUMN SEPARATOR = ';' ENCODING = 'Latin1' WITH COLUMN NAMES; ---- EXPORT (SELECT * FROM T WHERE id=3295) INTO FBV AT my_connection FILE 't1.fbv' FILE 't2.fbv' REPLACE; ---- EXPORT (SELECT * FROM my_view) INTO EXA AT '192.168.6.11..14:8563' USER 'my_user' IDENTIFIED BY 'my_secret' TABLE my_schema.my_table CREATED BY 'CREATE TABLE my_table(order_id INT, price DEC(18,2))'; ---- EXPORT tab1 INTO JDBC DRIVER='MSSQL' AT 'jdbc:sqlserver://dbserver;databaseName=testdb' USER 'agent_007' IDENTIFIED BY 'secret' TABLE my_schema.tab1; ---- EXPORT tab1 INTO CSV AT 'http://HadoopNode:50070/webhdfs/v1/tmp' FILE 'file.csv?op=CREATE&user.name=user'; ---- EXPORT tab1 INTO CSV AT 'https://testbucket.s3.amazonaws.com' USER '' IDENTIFIED BY '' FILE 'file.csv'; ---- EXPORT tab1 INTO SCRIPT etl.export_hcat_table WITH HCAT_DB = 'default' HCAT_TABLE = 'my_hcat_table' HCAT_ADDRESS = 'hcatalog-server:50111' HDFS_USER = 'hdfs'; ---- EXPORT tab1 INTO LOCAL CSV FILE '/tmp/my_table.csv' COLUMN SEPARATOR = ';'; ---- sqlfluff-3.4.2/test/fixtures/dialects/exasol/export_statement.yml000066400000000000000000000210031503426445100254050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9c961da0bce1e2ea88c7c2c6b701f1016f38f93aa7b6b1d03642ae7e5b87d034 file: - statement: export_statement: keyword: EXPORT table_reference: naked_identifier: tab1 export_into_clause: keyword: INTO import_file: - keyword: CSV - keyword: AT - connection_definition: - quoted_literal: "'ftp://192.168.1.1/'" - keyword: USER - quoted_literal: "'agent_007'" - keyword: IDENTIFIED - keyword: BY - quoted_literal: "'secret'" - keyword: FILE - quoted_literal: "'tab1.csv'" - file_opts: - keyword: COLUMN - keyword: SEPARATOR - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "';'" - keyword: ENCODING - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'Latin1'" - keyword: WITH - keyword: COLUMN - keyword: NAMES - statement_terminator: ; - statement: export_statement: keyword: EXPORT table_reference: naked_identifier: tab1 export_into_clause: keyword: INTO import_file: - keyword: CSV - keyword: AT - connection_definition: - quoted_literal: "'ftp://192.168.1.1/'" - keyword: USER - quoted_literal: "'agent_007'" - keyword: IDENTIFIED - keyword: BY - quoted_literal: "'secret'" - keyword: FILE - quoted_literal: "'tab1.csv'" - csv_cols: bracketed: - start_bracket: ( - numeric_literal: '1' - keyword: FORMAT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'DD.MM.YYYY'" - comma: ',' - numeric_literal: '2' - range_operator: .. - numeric_literal: '3' - keyword: DELIMIT - comparison_operator: raw_comparison_operator: '=' - keyword: NEVER - end_bracket: ) - file_opts: - keyword: COLUMN - keyword: SEPARATOR - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "';'" - keyword: ENCODING - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'Latin1'" - keyword: WITH - keyword: COLUMN - keyword: NAMES - statement_terminator: ; - statement: export_statement: keyword: EXPORT bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: T where_clause: keyword: WHERE expression: column_reference: naked_identifier: id comparison_operator: raw_comparison_operator: '=' numeric_literal: '3295' end_bracket: ) export_into_clause: keyword: INTO import_file: - keyword: FBV - keyword: AT - connection_definition: naked_identifier: my_connection - keyword: FILE - quoted_literal: "'t1.fbv'" - keyword: FILE - quoted_literal: "'t2.fbv'" - file_opts: keyword: REPLACE - statement_terminator: ; - statement: export_statement: keyword: EXPORT bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_view end_bracket: ) export_into_clause: keyword: INTO import_export_dbsrc: - keyword: EXA - keyword: AT - connection_definition: - quoted_literal: "'192.168.6.11..14:8563'" - keyword: USER - quoted_literal: "'my_user'" - keyword: IDENTIFIED - keyword: BY - quoted_literal: "'my_secret'" - keyword: TABLE - table_reference: - naked_identifier: my_schema - dot: . - naked_identifier: my_table - keyword: CREATED - keyword: BY - quoted_literal: "'CREATE TABLE my_table(order_id INT, price DEC(18,2))'" - statement_terminator: ; - statement: export_statement: keyword: EXPORT table_reference: naked_identifier: tab1 export_into_clause: keyword: INTO import_export_dbsrc: - keyword: JDBC - keyword: DRIVER - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'MSSQL'" - keyword: AT - connection_definition: - quoted_literal: "'jdbc:sqlserver://dbserver;databaseName=testdb'" - keyword: USER - quoted_literal: "'agent_007'" - keyword: IDENTIFIED - keyword: BY - quoted_literal: "'secret'" - keyword: TABLE - table_reference: - naked_identifier: my_schema - dot: . - naked_identifier: tab1 - statement_terminator: ; - statement: export_statement: keyword: EXPORT table_reference: naked_identifier: tab1 export_into_clause: keyword: INTO import_file: - keyword: CSV - keyword: AT - connection_definition: quoted_literal: "'http://HadoopNode:50070/webhdfs/v1/tmp'" - keyword: FILE - quoted_literal: "'file.csv?op=CREATE&user.name=user'" - statement_terminator: ; - statement: export_statement: keyword: EXPORT table_reference: naked_identifier: tab1 export_into_clause: keyword: INTO import_file: - keyword: CSV - keyword: AT - connection_definition: - quoted_literal: "'https://testbucket.s3.amazonaws.com'" - keyword: USER - quoted_literal: "''" - keyword: IDENTIFIED - keyword: BY - quoted_literal: "''" - keyword: FILE - quoted_literal: "'file.csv'" - statement_terminator: ; - statement: export_statement: keyword: EXPORT table_reference: naked_identifier: tab1 export_into_clause: keyword: INTO import_script: - keyword: SCRIPT - object_reference: - naked_identifier: etl - dot: . - naked_identifier: export_hcat_table - keyword: WITH - parameter: HCAT_DB - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'default'" - parameter: HCAT_TABLE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'my_hcat_table'" - parameter: HCAT_ADDRESS - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'hcatalog-server:50111'" - parameter: HDFS_USER - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'hdfs'" - statement_terminator: ; - statement: export_statement: keyword: EXPORT table_reference: naked_identifier: tab1 export_into_clause: keyword: INTO import_file: - keyword: LOCAL - keyword: CSV - keyword: FILE - quoted_literal: "'/tmp/my_table.csv'" - file_opts: - keyword: COLUMN - keyword: SEPARATOR - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "';'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/flush_statistics.sql000066400000000000000000000000221503426445100253670ustar00rootroot00000000000000FLUSH STATISTICS; sqlfluff-3.4.2/test/fixtures/dialects/exasol/flush_statistics.yml000066400000000000000000000010001503426445100253660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cf3dae76baed74833328c2789c63a1361844b4f116d93ce1dfe781f9969f0f53 file: statement: flush_statistics_statement: - keyword: FLUSH - keyword: STATISTICS statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/import_statement.sql000066400000000000000000000035431503426445100254050ustar00rootroot00000000000000IMPORT INTO table_3 (col1, col2, col4) FROM ORA AT my_oracle USER 'agent_008' IDENTIFIED BY 'secret' STATEMENT ' SELECT * FROM orders WHERE order_state=''OK'' ' ERRORS INTO error_table (CURRENT_TIMESTAMP) REJECT LIMIT 10 ; ---- IMPORT INTO table_3 (col1, col2, col4) FROM ORA AT my_oracle USER 'agent_008' IDENTIFIED BY 'secret' TABLE a.tab (c1,c2,c3) ERRORS INTO error_table (CURRENT_TIMESTAMP) REJECT LIMIT 10 ; ---- IMPORT INTO table_1 FROM CSV AT 'http://192.168.1.1:8080/' USER 'agent_007' IDENTIFIED BY 'secret' FILE 'tab1_part1.csv' FILE 'tab1_part2.csv' ( 1 FORMAT='DD-MM-YYYY', 2..4 FORMAT='YYYYMMDD' ) COLUMN SEPARATOR = ';' SKIP = 5; ---- IMPORT INTO table_2 FROM FBV AT my_fileserver FILE 'tab2_part1.fbv' ( SIZE=8 PADDING='+' ALIGN=RIGHT, SIZE=4, SIZE=8, SIZE=32 FORMAT='DD-MM-YYYY' ) TRIM ; ---- IMPORT INTO table_7 FROM SCRIPT etl.import_hcat_table AT my_oracle USER 'agent_008' IDENTIFIED BY 'secret' WITH HCAT_DB = 'default' HCAT_TABLE = 'my_hcat_table' HCAT_ADDRESS = 'hcatalog-server:50111' HDFS_USER = 'hdfs'; ---- IMPORT INTO table_4 FROM JDBC DRIVER='MSSQL' AT 'jdbc:sqlserver://dbserver;databaseName=testdb' USER 'agent_008' IDENTIFIED BY 'secret' STATEMENT ' SELECT * FROM orders WHERE order_state=''OK'' '; ---- IMPORT INTO table_5 FROM CSV AT 'http://HadoopNode:50070/webhdfs/v1/tmp' FILE 'file.csv?op=OPEN&user.name=user'; ---- IMPORT INTO table_6 FROM EXA AT my_exasol TABLE MY_SCHEMA.MY_TABLE; ---- IMPORT INTO (LIKE CAT) FROM EXA AT my_exa_conn STATEMENT ' SELECT OBJECT_NAME, OBJECT_TYPE FROM EXA_USER_OBJECTS WHERE OBJECT_TYPE IN (''TABLE'', ''VIEW'') '; ---- IMPORT INTO table_8 FROM LOCAL CSV FILE '~/my_table.csv' COLUMN SEPARATOR = ';' SKIP = 5; ---- IMPORT INTO table_1 FROM CSV AT 'https://.s3-.amazonaws.com/' USER '' IDENTIFIED BY '' FILE 'file.csv'; sqlfluff-3.4.2/test/fixtures/dialects/exasol/import_statement.yml000066400000000000000000000264001503426445100254040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fe303913b29b3c4ce818088db1202612f82f8da278b5c417aca917c057aa90ad file: - statement: import_statement: - keyword: IMPORT - keyword: INTO - table_reference: naked_identifier: table_3 - bracketed: start_bracket: ( identifier_list: - naked_identifier: col1 - comma: ',' - naked_identifier: col2 - comma: ',' - naked_identifier: col4 end_bracket: ) - import_from_clause: keyword: FROM import_export_dbsrc: - keyword: ORA - keyword: AT - connection_definition: - naked_identifier: my_oracle - keyword: USER - quoted_literal: "'agent_008'" - keyword: IDENTIFIED - keyword: BY - quoted_literal: "'secret'" - keyword: STATEMENT - quoted_literal: "' SELECT * FROM orders WHERE order_state=''OK'' '" import_errors_clause: - keyword: ERRORS - keyword: INTO - import_error_destination: table_reference: naked_identifier: error_table - bracketed: start_bracket: ( expression: bare_function: CURRENT_TIMESTAMP end_bracket: ) - reject_clause: - keyword: REJECT - keyword: LIMIT - numeric_literal: '10' - statement_terminator: ; - statement: import_statement: - keyword: IMPORT - keyword: INTO - table_reference: naked_identifier: table_3 - bracketed: start_bracket: ( identifier_list: - naked_identifier: col1 - comma: ',' - naked_identifier: col2 - comma: ',' - naked_identifier: col4 end_bracket: ) - import_from_clause: keyword: FROM import_export_dbsrc: - keyword: ORA - keyword: AT - connection_definition: - naked_identifier: my_oracle - keyword: USER - quoted_literal: "'agent_008'" - keyword: IDENTIFIED - keyword: BY - quoted_literal: "'secret'" - keyword: TABLE - table_reference: - naked_identifier: a - dot: . - naked_identifier: tab - bracketed: start_bracket: ( identifier_list: - naked_identifier: c1 - comma: ',' - naked_identifier: c2 - comma: ',' - naked_identifier: c3 end_bracket: ) import_errors_clause: - keyword: ERRORS - keyword: INTO - import_error_destination: table_reference: naked_identifier: error_table - bracketed: start_bracket: ( expression: bare_function: CURRENT_TIMESTAMP end_bracket: ) - reject_clause: - keyword: REJECT - keyword: LIMIT - numeric_literal: '10' - statement_terminator: ; - statement: import_statement: - keyword: IMPORT - keyword: INTO - table_reference: naked_identifier: table_1 - import_from_clause: keyword: FROM import_file: - keyword: CSV - keyword: AT - connection_definition: - quoted_literal: "'http://192.168.1.1:8080/'" - keyword: USER - quoted_literal: "'agent_007'" - keyword: IDENTIFIED - keyword: BY - quoted_literal: "'secret'" - keyword: FILE - quoted_literal: "'tab1_part1.csv'" - keyword: FILE - quoted_literal: "'tab1_part2.csv'" - csv_cols: bracketed: - start_bracket: ( - numeric_literal: '1' - keyword: FORMAT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'DD-MM-YYYY'" - comma: ',' - numeric_literal: '2' - range_operator: .. - numeric_literal: '4' - keyword: FORMAT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'YYYYMMDD'" - end_bracket: ) - file_opts: - keyword: COLUMN - keyword: SEPARATOR - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "';'" - keyword: SKIP - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '5' - statement_terminator: ; - statement: import_statement: - keyword: IMPORT - keyword: INTO - table_reference: naked_identifier: table_2 - import_from_clause: keyword: FROM import_file: - keyword: FBV - keyword: AT - connection_definition: naked_identifier: my_fileserver - keyword: FILE - quoted_literal: "'tab2_part1.fbv'" - fbv_cols: bracketed: - start_bracket: ( - keyword: SIZE - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '8' - keyword: PADDING - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'+'" - keyword: ALIGN - comparison_operator: raw_comparison_operator: '=' - keyword: RIGHT - comma: ',' - keyword: SIZE - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '4' - comma: ',' - keyword: SIZE - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '8' - comma: ',' - keyword: SIZE - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '32' - keyword: FORMAT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'DD-MM-YYYY'" - end_bracket: ) - file_opts: keyword: TRIM - statement_terminator: ; - statement: import_statement: - keyword: IMPORT - keyword: INTO - table_reference: naked_identifier: table_7 - import_from_clause: keyword: FROM import_script: - keyword: SCRIPT - object_reference: - naked_identifier: etl - dot: . - naked_identifier: import_hcat_table - keyword: AT - connection_definition: - naked_identifier: my_oracle - keyword: USER - quoted_literal: "'agent_008'" - keyword: IDENTIFIED - keyword: BY - quoted_literal: "'secret'" - keyword: WITH - parameter: HCAT_DB - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'default'" - parameter: HCAT_TABLE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'my_hcat_table'" - parameter: HCAT_ADDRESS - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'hcatalog-server:50111'" - parameter: HDFS_USER - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'hdfs'" - statement_terminator: ; - statement: import_statement: - keyword: IMPORT - keyword: INTO - table_reference: naked_identifier: table_4 - import_from_clause: keyword: FROM import_export_dbsrc: - keyword: JDBC - keyword: DRIVER - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'MSSQL'" - keyword: AT - connection_definition: - quoted_literal: "'jdbc:sqlserver://dbserver;databaseName=testdb'" - keyword: USER - quoted_literal: "'agent_008'" - keyword: IDENTIFIED - keyword: BY - quoted_literal: "'secret'" - keyword: STATEMENT - quoted_literal: "' SELECT * FROM orders WHERE order_state=''OK'' '" - statement_terminator: ; - statement: import_statement: - keyword: IMPORT - keyword: INTO - table_reference: naked_identifier: table_5 - import_from_clause: keyword: FROM import_file: - keyword: CSV - keyword: AT - connection_definition: quoted_literal: "'http://HadoopNode:50070/webhdfs/v1/tmp'" - keyword: FILE - quoted_literal: "'file.csv?op=OPEN&user.name=user'" - statement_terminator: ; - statement: import_statement: - keyword: IMPORT - keyword: INTO - table_reference: naked_identifier: table_6 - import_from_clause: keyword: FROM import_export_dbsrc: - keyword: EXA - keyword: AT - connection_definition: naked_identifier: my_exasol - keyword: TABLE - table_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: MY_TABLE - statement_terminator: ; - statement: import_statement: - keyword: IMPORT - keyword: INTO - bracketed: start_bracket: ( import_columns: table_like_clause: keyword: LIKE table_reference: naked_identifier: CAT end_bracket: ) - import_from_clause: keyword: FROM import_export_dbsrc: - keyword: EXA - keyword: AT - connection_definition: naked_identifier: my_exa_conn - keyword: STATEMENT - quoted_literal: "' SELECT OBJECT_NAME, OBJECT_TYPE FROM EXA_USER_OBJECTS\ \ WHERE OBJECT_TYPE IN (''TABLE'', ''VIEW'') '" - statement_terminator: ; - statement: import_statement: - keyword: IMPORT - keyword: INTO - table_reference: naked_identifier: table_8 - import_from_clause: keyword: FROM import_file: - keyword: LOCAL - keyword: CSV - keyword: FILE - quoted_literal: "'~/my_table.csv'" - file_opts: - keyword: COLUMN - keyword: SEPARATOR - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "';'" - keyword: SKIP - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '5' - statement_terminator: ; - statement: import_statement: - keyword: IMPORT - keyword: INTO - table_reference: naked_identifier: table_1 - import_from_clause: keyword: FROM import_file: - keyword: CSV - keyword: AT - connection_definition: - quoted_literal: "'https://.s3-.amazonaws.com/'" - keyword: USER - quoted_literal: "''" - keyword: IDENTIFIED - keyword: BY - quoted_literal: "''" - keyword: FILE - quoted_literal: "'file.csv'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/insert_statement.sql000066400000000000000000000012221503426445100253670ustar00rootroot00000000000000INSERT INTO t (n1, n2, t1) VALUES (1, 2.34, 'abc'); INSERT INTO t VALUES (2, 1.56, 'ghi'), (3, 5.92, 'pqr'); INSERT INTO t VALUES (4, DEFAULT, 'xyz'); INSERT INTO t (i,k) SELECT * FROM u; INSERT INTO t (i) SELECT max(j) FROM u; INSERT INTO t DEFAULT VALUES; INSERT INTO t (SELECT * FROM u); INSERT INTO s.t(c1, c2, c3) VALUES((SELECT x FROM y), 'val1', 'val2'); INSERT INTO t (adate) values(current_timestamp); INSERT INTO t VALUES BETWEEN 1 AND 100; INSERT INTO t (i) VALUES BETWEEN 1 AND 100 WITH STEP 4; INSERT INTO t (d1, n1, v1) values (add_days(current_date, -1), 15.0, 'myvalue'); INSERT INTO t (d1, n1, v1) values (current_date +1, 15.0, 'myvalue'); sqlfluff-3.4.2/test/fixtures/dialects/exasol/insert_statement.yml000066400000000000000000000220361503426445100253770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5ac3604a02fb0eefccdc77be014f90b164910266af4104f4933ce38b2531cfa9 file: - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t - bracketed: - start_bracket: ( - column_reference: naked_identifier: n1 - comma: ',' - column_reference: naked_identifier: n2 - comma: ',' - column_reference: naked_identifier: t1 - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2.34' - comma: ',' - quoted_literal: "'abc'" - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - numeric_literal: '2' - comma: ',' - numeric_literal: '1.56' - comma: ',' - quoted_literal: "'ghi'" - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - numeric_literal: '3' - comma: ',' - numeric_literal: '5.92' - comma: ',' - quoted_literal: "'pqr'" - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t - values_clause: keyword: VALUES bracketed: - start_bracket: ( - numeric_literal: '4' - comma: ',' - keyword: DEFAULT - comma: ',' - quoted_literal: "'xyz'" - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t - bracketed: - start_bracket: ( - column_reference: naked_identifier: i - comma: ',' - column_reference: naked_identifier: k - end_bracket: ) - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: u - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t - bracketed: start_bracket: ( column_reference: naked_identifier: i end_bracket: ) - select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: max function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: j end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: u - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t - keyword: DEFAULT - keyword: VALUES - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: u end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: s - dot: . - naked_identifier: t - bracketed: - start_bracket: ( - column_reference: naked_identifier: c1 - comma: ',' - column_reference: naked_identifier: c2 - comma: ',' - column_reference: naked_identifier: c3 - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: x from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: y end_bracket: ) - comma: ',' - quoted_literal: "'val1'" - comma: ',' - quoted_literal: "'val2'" - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t - bracketed: start_bracket: ( column_reference: naked_identifier: adate end_bracket: ) - values_clause: keyword: values bracketed: start_bracket: ( expression: bare_function: current_timestamp end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t - values_range_clause: - keyword: VALUES - keyword: BETWEEN - numeric_literal: '1' - keyword: AND - numeric_literal: '100' - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t - bracketed: start_bracket: ( column_reference: naked_identifier: i end_bracket: ) - values_range_clause: - keyword: VALUES - keyword: BETWEEN - numeric_literal: '1' - keyword: AND - numeric_literal: '100' - keyword: WITH - keyword: STEP - numeric_literal: '4' - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t - bracketed: - start_bracket: ( - column_reference: naked_identifier: d1 - comma: ',' - column_reference: naked_identifier: n1 - comma: ',' - column_reference: naked_identifier: v1 - end_bracket: ) - values_clause: keyword: values bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: add_days function_contents: bracketed: - start_bracket: ( - expression: bare_function: current_date - comma: ',' - expression: numeric_literal: sign_indicator: '-' numeric_literal: '1' - end_bracket: ) - comma: ',' - numeric_literal: '15.0' - comma: ',' - quoted_literal: "'myvalue'" - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t - bracketed: - start_bracket: ( - column_reference: naked_identifier: d1 - comma: ',' - column_reference: naked_identifier: n1 - comma: ',' - column_reference: naked_identifier: v1 - end_bracket: ) - values_clause: keyword: values bracketed: - start_bracket: ( - expression: bare_function: current_date binary_operator: + numeric_literal: '1' - comma: ',' - numeric_literal: '15.0' - comma: ',' - quoted_literal: "'myvalue'" - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/kill_statement.sql000066400000000000000000000002771503426445100250270ustar00rootroot00000000000000KILL STATEMENT IN SESSION 7792436882684342285; KILL SESSION 7792436882684342285; KILL SESSION CURRENT_SESSION; KILL STATEMENT 1234 IN SESSION 7792436882684342285 WITH MESSAGE 'not allowed!'; sqlfluff-3.4.2/test/fixtures/dialects/exasol/kill_statement.yml000066400000000000000000000022161503426445100250240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d25d38d3533e5f94df45860f2deeaf79a70862202b10e93cb83cfc6fa702f738 file: - statement: kill_statement: - keyword: KILL - keyword: STATEMENT - keyword: IN - keyword: SESSION - numeric_literal: '7792436882684342285' - statement_terminator: ; - statement: kill_statement: - keyword: KILL - keyword: SESSION - numeric_literal: '7792436882684342285' - statement_terminator: ; - statement: kill_statement: - keyword: KILL - keyword: SESSION - keyword: CURRENT_SESSION - statement_terminator: ; - statement: kill_statement: - keyword: KILL - keyword: STATEMENT - numeric_literal: '1234' - keyword: IN - keyword: SESSION - numeric_literal: '7792436882684342285' - keyword: WITH - keyword: MESSAGE - quoted_literal: "'not allowed!'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/merge_statement.sql000066400000000000000000000010051503426445100251610ustar00rootroot00000000000000MERGE INTO staff T USING changes U ON T.name = U.name WHEN MATCHED THEN UPDATE SET T.salary = U.salary, T.lastChange = CURRENT_DATE WHERE T.salary < U.salary WHEN NOT MATCHED THEN INSERT VALUES (U.name,U.salary,CURRENT_DATE); ---- MERGE INTO staff T USING (SELECT name FROM X) U ON T.name = U.name WHEN MATCHED THEN DELETE; --- MERGE INTO staff T USING (SELECT name FROM X) U ON T.name = U.name WHEN NOT MATCHED THEN INSERT VALUES (1,2,3) WHEN MATCHED THEN DELETE; sqlfluff-3.4.2/test/fixtures/dialects/exasol/merge_statement.yml000066400000000000000000000145641503426445100252010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a4d138880f1919176af50be3073357c3b570fd5e695c4e10bceafe0075c0e799 file: - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: staff - alias_expression: naked_identifier: T - keyword: USING - table_reference: naked_identifier: changes - alias_expression: naked_identifier: U - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: T - dot: . - naked_identifier: name - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: U - dot: . - naked_identifier: name - merge_match: merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: - keyword: SET - set_clause: column_reference: - naked_identifier: T - dot: . - naked_identifier: salary comparison_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: U - dot: . - naked_identifier: salary - comma: ',' - set_clause: column_reference: - naked_identifier: T - dot: . - naked_identifier: lastChange comparison_operator: raw_comparison_operator: '=' expression: bare_function: CURRENT_DATE where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: T - dot: . - naked_identifier: salary - comparison_operator: raw_comparison_operator: < - column_reference: - naked_identifier: U - dot: . - naked_identifier: salary merge_when_not_matched_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: keyword: INSERT values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: U - dot: . - naked_identifier: name - comma: ',' - expression: column_reference: - naked_identifier: U - dot: . - naked_identifier: salary - comma: ',' - expression: bare_function: CURRENT_DATE - end_bracket: ) - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: staff - alias_expression: naked_identifier: T - keyword: USING - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: X end_bracket: ) - alias_expression: naked_identifier: U - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: T - dot: . - naked_identifier: name - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: U - dot: . - naked_identifier: name - merge_match: merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_delete_clause: keyword: DELETE - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: staff - alias_expression: naked_identifier: T - keyword: USING - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: X end_bracket: ) - alias_expression: naked_identifier: U - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: T - dot: . - naked_identifier: name - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: U - dot: . - naked_identifier: name - merge_match: merge_when_not_matched_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: keyword: INSERT values_clause: keyword: VALUES bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - end_bracket: ) merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_delete_clause: keyword: DELETE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/open_close_schema.sql000066400000000000000000000000641503426445100254500ustar00rootroot00000000000000OPEN SCHEMA test; OPEN SCHEMA "test"; CLOSE SCHEMA; sqlfluff-3.4.2/test/fixtures/dialects/exasol/open_close_schema.yml000066400000000000000000000015031503426445100254510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c35b9191fd265edef08cbb13dbc510e4c3887e8f5ddb58a18e60337dedd4c446 file: - statement: open_schema_statement: - keyword: OPEN - keyword: SCHEMA - schema_reference: naked_identifier: test - statement_terminator: ; - statement: open_schema_statement: - keyword: OPEN - keyword: SCHEMA - schema_reference: quoted_identifier: '"test"' - statement_terminator: ; - statement: close_schema_statement: - keyword: CLOSE - keyword: SCHEMA - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/preload_statement.sql000066400000000000000000000003141503426445100255120ustar00rootroot00000000000000PRELOAD TABLE t(i); PRELOAD DATABASE; PRELOAD TABLES t1,t2; PRELOAD SCHEMAS s1,s2; PRELOAD SCHEMA s1; TRUNCATE AUDIT LOGS; TRUNCATE AUDIT LOGS KEEP LAST MONTH; TRUNCATE AUDIT LOGS KEEP FROM '2019-01-01'; sqlfluff-3.4.2/test/fixtures/dialects/exasol/preload_statement.yml000066400000000000000000000036431503426445100255240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 78fd1d18e0207bf9ceed985e36e263b31aecfb436bdfae86a07ae6bacf754792 file: - statement: preload_statement: - keyword: PRELOAD - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( column_reference: naked_identifier: i end_bracket: ) - statement_terminator: ; - statement: preload_statement: - keyword: PRELOAD - keyword: DATABASE - statement_terminator: ; - statement: preload_statement: - keyword: PRELOAD - keyword: TABLES - table_reference: naked_identifier: t1 - comma: ',' - table_reference: naked_identifier: t2 - statement_terminator: ; - statement: preload_statement: - keyword: PRELOAD - keyword: SCHEMAS - schema_reference: naked_identifier: s1 - comma: ',' - schema_reference: naked_identifier: s2 - statement_terminator: ; - statement: preload_statement: - keyword: PRELOAD - keyword: SCHEMA - schema_reference: naked_identifier: s1 - statement_terminator: ; - statement: truncate_audit_logs_statement: - keyword: TRUNCATE - keyword: AUDIT - keyword: LOGS - statement_terminator: ; - statement: truncate_audit_logs_statement: - keyword: TRUNCATE - keyword: AUDIT - keyword: LOGS - keyword: KEEP - keyword: LAST - keyword: MONTH - statement_terminator: ; - statement: truncate_audit_logs_statement: - keyword: TRUNCATE - keyword: AUDIT - keyword: LOGS - keyword: KEEP - keyword: FROM - quoted_literal: "'2019-01-01'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/recompress_statement.sql000066400000000000000000000001011503426445100262400ustar00rootroot00000000000000RECOMPRESS TABLE t1 (column_1); RECOMPRESS TABLES t2,t3 ENFORCE; sqlfluff-3.4.2/test/fixtures/dialects/exasol/recompress_statement.yml000066400000000000000000000017051503426445100262550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b87f2a72fec13d09f3cbdb1612300c8e0a8180987f9d1dfcdcf15ca928899fa1 file: - statement: recompress_reorganize_statement: - keyword: RECOMPRESS - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: start_bracket: ( column_reference: naked_identifier: column_1 end_bracket: ) - statement_terminator: ; - statement: recompress_reorganize_statement: - keyword: RECOMPRESS - keyword: TABLES - table_reference: naked_identifier: t2 - comma: ',' - table_reference: naked_identifier: t3 - keyword: ENFORCE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/rename_statement.sql000066400000000000000000000001321503426445100253310ustar00rootroot00000000000000RENAME SCHEMA s1 TO s2; RENAME TABLE t1 TO t2; RENAME s2.t3 TO t4; RENAME s2.t3 TO s2.t4; sqlfluff-3.4.2/test/fixtures/dialects/exasol/rename_statement.yml000066400000000000000000000025651503426445100253470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ce3aa039eb41a592cc11abb2b60f23315a74ffa3a705616f63f2b3223fa5c590 file: - statement: rename_statement: - keyword: RENAME - keyword: SCHEMA - object_reference: naked_identifier: s1 - keyword: TO - object_reference: naked_identifier: s2 - statement_terminator: ; - statement: rename_statement: - keyword: RENAME - keyword: TABLE - object_reference: naked_identifier: t1 - keyword: TO - object_reference: naked_identifier: t2 - statement_terminator: ; - statement: rename_statement: - keyword: RENAME - object_reference: - naked_identifier: s2 - dot: . - naked_identifier: t3 - keyword: TO - object_reference: naked_identifier: t4 - statement_terminator: ; - statement: rename_statement: - keyword: RENAME - object_reference: - naked_identifier: s2 - dot: . - naked_identifier: t3 - keyword: TO - object_reference: - naked_identifier: s2 - dot: . - naked_identifier: t4 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/select_statement.sql000066400000000000000000000112071503426445100253460ustar00rootroot00000000000000SELECT last_name, employee_id id, manager_id mgr_id, CONNECT_BY_ISLEAF leaf, LEVEL, LPAD(' ', 2*LEVEL-1)||SYS_CONNECT_BY_PATH(last_name, '/') "PATH" FROM employees CONNECT BY PRIOR employee_id = manager_id AND dept_no = dno START WITH last_name = 'Clark' ORDER BY employee_id; ---- SELECT store, SUM(price) AS volume FROM sales GROUP BY store ORDER BY store DESC; ---- SELECT name, SUM(price) AS volume FROM customers JOIN sales USING (c_id) GROUP BY name ORDER BY name; ---- WITH tmp_view AS (SELECT name, price, store FROM customers, sales WHERE customers.c_id=sales.c_id) SELECT sum(price) AS volume, name, store FROM tmp_view GROUP BY GROUPING SETS (name,store,()); ---- SELECT * FROM (IMPORT INTO (v VARCHAR(1)) FROM EXA AT my_connection TABLE sys.dual); ---- SELECT aschema.afunction('hello', 123) FROM aschema.mytable WHERE (a,2,substr(c,1,3)) IN (SELECT a,b,c FROM bschema.yourtable); ---- WITH mylist AS ( VALUES ('a','b','c'), ('d','e','f'), (f1('a'),'b','d') AS mylist (a,b,c) ) SELECT * from mylist; ---- SELECT rowid, ROW_NUMBER () OVER ( PARTITION BY ( col1, col2 ) ORDER BY col1 DESC, col2 DESC ); ---- SELECT rowid, ROW_NUMBER () OVER ( PARTITION BY ( col1, col2 )) ORDER BY col1 DESC, col2 DESC; ---- SELECT x WITH INVALID UNIQUE(myid) FROM t; ---- SELECT * FROM values('x', 'y'); ---- SELECT * FROM values('x', 'y') AS x(c1,c2); ---- SELECT * FROM values(('x','2'), ('y','2')) AS x(c1,c2); ---- SELECT * FROM(VALUES 1,2,3); ---- SELECT * FROM(VALUES 1,2,3) AS xs(n1); ---- SELECT * FROM VALUES BETWEEN 1 AND 15 WITH STEP 4; ---- SELECT first_name,name WITH INVALID FOREIGN KEY (nr) from T1 REFERENCING T2 (id); ---- SELECT * WITH INVALID FOREIGN KEY (first_name,name) from T1 REFERENCING T2; ---- SELECT INVALID FOREIGN KEY (nr,first_name,name) from T1 REFERENCING T2 (id, first_name,name); ---- SELECT * INTO TABLE t2 FROM t1 ORDER BY 1; ---- SELECT date'2021-09-21' FROM dual; ---- SELECT INVALID PRIMARY KEY (first_name) from T1; ---- SELECT JSON_EXTRACT(json_str, '$."@id"', '$.error()') EMITS ( id VARCHAR(2000), error_column VARCHAR(2000000) ) FROM t; ---- SELECT 10 / 2; ---- select count(*) as a, local.a*10 from x; ---- SELECT ABS(x) AS x FROM t WHERE local.x>10; ---- SELECT c1 as cx, count(*) as cc FROM x GROUP BY local.cx; ---- SELECT c1 as cx FROM x ORDER BY local.cx; ---- SELECT c1, count(*) as c FROM x GROUP BY 1 HAVING local.c > 1; ---- SELECT S_ID, C_ID, PRICE, ROW_NUMBER() OVER (PARTITION BY C_ID ORDER BY PRICE DESC) NUM FROM SALES QUALIFY local.NUM = 1; SELECT [day] FROM T; ---- SELECT "day" FROM T; ---- SELECT * FROM T PREFERRING HIGH LOCAL.ranking PARTITION BY local.c1; ---- SELECT * FROM T PREFERRING HIGH LOCAL.ranking PRIOR TO LOW LOCAL.budget PARTITION BY local.c1; ---- SELECT * FROM T PREFERRING HIGH LOCAL.ranking PLUS LOW LOCAL.budget PARTITION BY local.c1; ---- SELECT * FROM T PREFERRING HIGH LOCAL.ranking PRIOR TO LOW LOCAL.budget INVERSE col20 PARTITION BY local.c1; ---- SELECT * FROM T WHERE (LOCAL.c1, LOCAL.c2) NOT IN (SELECT c1,c2 FROM b); ---- SELECT 'ABC' as c1 FROM dual WHERE local.c1 = 'ABC'; SELECT a, b, c FROM x union SELECT a, b, c FROM y ORDER BY a; ---- SELECT -1 * row_number() OVER() AS nummer FROM sys.exa_sql_keywords CROSS JOIN sys.exa_sql_keywords UNION ALL SELECT 0; -- SELECT INTERVAL '5' MONTH, INTERVAL '130' MONTH (3), INTERVAL '27' YEAR, INTERVAL '100-1' YEAR(3) TO MONTH, INTERVAL '2-1' YEAR TO MONTH, INTERVAL '10:20' HOUR TO MINUTE, INTERVAL '2 23:10:59' DAY TO SECOND, INTERVAL '6' MINUTE, INTERVAL '5' DAY , INTERVAL '100' HOUR(3) , INTERVAL '1.99999' SECOND(2,2) , INTERVAL '23:10:59.123' HOUR(2) TO SECOND(3); -- SELECT v, DATE'2020-10-26' + v * INTERVAL'7'DAY AS late_2020_mondays, 5 * v AS five_times_table FROM VALUES BETWEEN 1 AND 9 AS v(v); -- with v as ( select RANGE_VALUE from VALUES between 0 and days_between(current_date -6, current_date -1) ) select * from v; -- with v as ( select RANGE_VALUE from VALUES between abs(-5) and days_between(current_date -6, current_date -1) ) select * from v; -- SELECT 'abcd' LIKE 'a_d' AS res1, '%bcd' like '%%d' AS res2; -- SELECT 'abcd' NOT LIKE 'a_d' AS res1, '%bcd' like '%%d' AS res2; -- SELECT 'My mail address is my_mail@exasol.com' REGEXP_LIKE '(?i).*[a-z0-9._%+-]+@[a-z0-9.-]+\.[a-z]{2,4}.*' AS contains_email; -- SELECT 'My mail address is my_mail@exasol.com' NOT REGEXP_LIKE '(?i).*[a-z0-9._%+-]+@[a-z0-9.-]+\.[a-z]{2,4}.*' AS contains_email; -- SELECT current_date -1 as dt from dual; sqlfluff-3.4.2/test/fixtures/dialects/exasol/select_statement.yml000066400000000000000000001676631503426445100253720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ed63b494837b3e04680e3fd2fe69a5d776274ab689dd31550247f6d74a979c33 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: last_name - comma: ',' - select_clause_element: column_reference: naked_identifier: employee_id alias_expression: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: manager_id alias_expression: naked_identifier: mgr_id - comma: ',' - select_clause_element: bare_function: CONNECT_BY_ISLEAF alias_expression: naked_identifier: leaf - comma: ',' - select_clause_element: bare_function: LEVEL - comma: ',' - select_clause_element: expression: - function: function_name: function_name_identifier: LPAD function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "' '" - comma: ',' - expression: - numeric_literal: '2' - binary_operator: '*' - bare_function: LEVEL - binary_operator: '-' - numeric_literal: '1' - end_bracket: ) - binary_operator: - pipe: '|' - pipe: '|' - function: function_name: function_name_identifier: SYS_CONNECT_BY_PATH function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: last_name - comma: ',' - expression: quoted_literal: "'/'" - end_bracket: ) alias_expression: quoted_identifier: '"PATH"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees connect_by_clause: - keyword: CONNECT - keyword: BY - expression: - keyword: PRIOR - column_reference: naked_identifier: employee_id - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: manager_id - binary_operator: AND - column_reference: naked_identifier: dept_no - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: dno - keyword: START - keyword: WITH - expression: column_reference: naked_identifier: last_name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Clark'" orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: employee_id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: store - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: price end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: volume from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sales groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: store orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: store - keyword: DESC - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: price end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: volume from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: customers join_clause: - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: sales - keyword: USING - bracketed: start_bracket: ( naked_identifier: c_id end_bracket: ) groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: name orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: name - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: tmp_view keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: price - comma: ',' - select_clause_element: column_reference: naked_identifier: store from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: customers - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sales where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: customers - dot: . - naked_identifier: c_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: sales - dot: . - naked_identifier: c_id end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: price end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: volume - comma: ',' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: store from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tmp_view groupby_clause: - keyword: GROUP - keyword: BY - grouping_sets_clause: - keyword: GROUPING - keyword: SETS - bracketed: start_bracket: ( grouping_expression_list: - column_reference: naked_identifier: name - comma: ',' - column_reference: naked_identifier: store - comma: ',' - expression: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: import_statement: - keyword: IMPORT - keyword: INTO - bracketed: start_bracket: ( import_columns: column_datatype_definition: naked_identifier: v data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) end_bracket: ) - import_from_clause: keyword: FROM import_export_dbsrc: - keyword: EXA - keyword: AT - connection_definition: naked_identifier: my_connection - keyword: TABLE - table_reference: - naked_identifier: sys - dot: . - naked_identifier: dual end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: naked_identifier: aschema dot: . function_name_identifier: afunction function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'hello'" - comma: ',' - expression: numeric_literal: '123' - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: aschema - dot: . - naked_identifier: mytable where_clause: keyword: WHERE expression: - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - numeric_literal: '2' - comma: ',' - function: function_name: function_name_identifier: substr function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: c - comma: ',' - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - end_bracket: ) - keyword: IN - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: bschema - dot: . - naked_identifier: yourtable end_bracket: ) - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: mylist keyword: AS bracketed: start_bracket: ( values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - quoted_literal: "'a'" - comma: ',' - quoted_literal: "'b'" - comma: ',' - quoted_literal: "'c'" - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - quoted_literal: "'d'" - comma: ',' - quoted_literal: "'e'" - comma: ',' - quoted_literal: "'f'" - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: f1 function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'a'" end_bracket: ) - comma: ',' - quoted_literal: "'b'" - comma: ',' - quoted_literal: "'d'" - end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: mylist bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b - comma: ',' - naked_identifier: c end_bracket: ) end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mylist - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: bare_function: rowid - comma: ',' - select_clause_element: function: function_name: function_name_identifier: ROW_NUMBER function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: col1 - comma: ',' - expression: column_reference: naked_identifier: col2 - end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: col1 - keyword: DESC - comma: ',' - column_reference: naked_identifier: col2 - keyword: DESC end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: bare_function: rowid - comma: ',' - select_clause_element: function: function_name: function_name_identifier: ROW_NUMBER function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: col1 - comma: ',' - expression: column_reference: naked_identifier: col2 - end_bracket: ) end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: col1 - keyword: DESC - comma: ',' - column_reference: naked_identifier: col2 - keyword: DESC - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: x with_invalid_unique_pk_clause: - keyword: WITH - keyword: INVALID - keyword: UNIQUE - bracketed: start_bracket: ( column_reference: naked_identifier: myid end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: values_clause: keyword: values bracketed: - start_bracket: ( - quoted_literal: "'x'" - comma: ',' - quoted_literal: "'y'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: values_clause: keyword: values bracketed: - start_bracket: ( - quoted_literal: "'x'" - comma: ',' - quoted_literal: "'y'" - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: x bracketed: start_bracket: ( identifier_list: - naked_identifier: c1 - comma: ',' - naked_identifier: c2 end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: values_clause: keyword: values bracketed: - start_bracket: ( - expression: bracketed: - start_bracket: ( - quoted_literal: "'x'" - comma: ',' - quoted_literal: "'2'" - end_bracket: ) - comma: ',' - expression: bracketed: - start_bracket: ( - quoted_literal: "'y'" - comma: ',' - quoted_literal: "'2'" - end_bracket: ) - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: x bracketed: start_bracket: ( identifier_list: - naked_identifier: c1 - comma: ',' - naked_identifier: c2 end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: values_clause: - keyword: VALUES - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: VALUES - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: xs bracketed: start_bracket: ( identifier_list: naked_identifier: n1 end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: values_range_clause: - keyword: VALUES - keyword: BETWEEN - numeric_literal: '1' - keyword: AND - numeric_literal: '15' - keyword: WITH - keyword: STEP - numeric_literal: '4' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: first_name - comma: ',' - select_clause_element: column_reference: naked_identifier: name - with_invalid_foreign_key_clause: - keyword: WITH - keyword: INVALID - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: nr end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: T1 referencing_clause: keyword: REFERENCING table_reference: naked_identifier: T2 bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' with_invalid_foreign_key_clause: - keyword: WITH - keyword: INVALID - keyword: FOREIGN - keyword: KEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: first_name - comma: ',' - column_reference: naked_identifier: name - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: T1 referencing_clause: keyword: REFERENCING table_reference: naked_identifier: T2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT with_invalid_foreign_key_clause: - keyword: INVALID - keyword: FOREIGN - keyword: KEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: nr - comma: ',' - column_reference: naked_identifier: first_name - comma: ',' - column_reference: naked_identifier: name - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: T1 referencing_clause: keyword: REFERENCING table_reference: naked_identifier: T2 bracketed: - start_bracket: ( - column_reference: naked_identifier: id - comma: ',' - column_reference: naked_identifier: first_name - comma: ',' - column_reference: naked_identifier: name - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' into_table_clause: - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: t2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 orderby_clause: - keyword: ORDER - keyword: BY - numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: keyword: date date_constructor_literal: "'2021-09-21'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT with_invalid_unique_pk_clause: - keyword: INVALID - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: first_name end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: T1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: JSON_EXTRACT function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: json_str - comma: ',' - expression: quoted_literal: "'$.\"@id\"'" - comma: ',' - expression: quoted_literal: "'$.error()'" - end_bracket: ) emits_segment: keyword: EMITS bracketed: - start_bracket: ( - column_datatype_definition: naked_identifier: id data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '2000' end_bracket: ) - comma: ',' - column_datatype_definition: naked_identifier: error_column data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '2000000' end_bracket: ) - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '10' - binary_operator: / - numeric_literal: '2' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: a - comma: ',' - select_clause_element: expression: local_alias_segment: keyword: local dot: . naked_identifier: a binary_operator: '*' numeric_literal: '10' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: x - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: ABS function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: x from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t where_clause: keyword: WHERE expression: local_alias_segment: keyword: local dot: . naked_identifier: x comparison_operator: raw_comparison_operator: '>' numeric_literal: '10' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: c1 alias_expression: alias_operator: keyword: as naked_identifier: cx - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: cc from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: x groupby_clause: - keyword: GROUP - keyword: BY - expression: local_alias_segment: keyword: local dot: . naked_identifier: cx - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c1 alias_expression: alias_operator: keyword: as naked_identifier: cx from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: x orderby_clause: - keyword: ORDER - keyword: BY - expression: local_alias_segment: keyword: local dot: . naked_identifier: cx - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: c1 - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: x groupby_clause: - keyword: GROUP - keyword: BY - numeric_literal: '1' having_clause: keyword: HAVING expression: local_alias_segment: keyword: local dot: . naked_identifier: c comparison_operator: raw_comparison_operator: '>' numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: S_ID - comma: ',' - select_clause_element: column_reference: naked_identifier: C_ID - comma: ',' - select_clause_element: column_reference: naked_identifier: PRICE - comma: ',' - select_clause_element: function: function_name: function_name_identifier: ROW_NUMBER function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: C_ID orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: PRICE - keyword: DESC end_bracket: ) alias_expression: naked_identifier: NUM from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: SALES qualify_clause: keyword: QUALIFY expression: local_alias_segment: keyword: local dot: . naked_identifier: NUM comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: identifier: '[day]' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: T - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: quoted_identifier: '"day"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: T - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: T preferring_clause: keyword: PREFERRING preference_term: keyword: HIGH local_alias_segment: keyword: LOCAL dot: . naked_identifier: ranking partitionby_clause: - keyword: PARTITION - keyword: BY - expression: local_alias_segment: keyword: local dot: . naked_identifier: c1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: T preferring_clause: keyword: PREFERRING preference_term: keyword: HIGH local_alias_segment: keyword: LOCAL dot: . naked_identifier: ranking plus_prior_inverse: - keyword: PRIOR - keyword: TO - preference_term: keyword: LOW local_alias_segment: keyword: LOCAL dot: . naked_identifier: budget partitionby_clause: - keyword: PARTITION - keyword: BY - expression: local_alias_segment: keyword: local dot: . naked_identifier: c1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: T preferring_clause: keyword: PREFERRING preference_term: keyword: HIGH local_alias_segment: keyword: LOCAL dot: . naked_identifier: ranking plus_prior_inverse: keyword: PLUS preference_term: keyword: LOW local_alias_segment: keyword: LOCAL dot: . naked_identifier: budget partitionby_clause: - keyword: PARTITION - keyword: BY - expression: local_alias_segment: keyword: local dot: . naked_identifier: c1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: T preferring_clause: keyword: PREFERRING preference_term: keyword: HIGH local_alias_segment: keyword: LOCAL dot: . naked_identifier: ranking plus_prior_inverse: - keyword: PRIOR - keyword: TO - preference_term: keyword: LOW local_alias_segment: keyword: LOCAL dot: . naked_identifier: budget plus_prior_inverse: keyword: INVERSE preference_term: column_reference: naked_identifier: col20 partitionby_clause: - keyword: PARTITION - keyword: BY - expression: local_alias_segment: keyword: local dot: . naked_identifier: c1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: T where_clause: keyword: WHERE expression: - bracketed: - start_bracket: ( - local_alias_segment: keyword: LOCAL dot: . naked_identifier: c1 - comma: ',' - local_alias_segment: keyword: LOCAL dot: . naked_identifier: c2 - end_bracket: ) - keyword: NOT - keyword: IN - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: c1 - comma: ',' - select_clause_element: column_reference: naked_identifier: c2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'ABC'" alias_expression: alias_operator: keyword: as naked_identifier: c1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual where_clause: keyword: WHERE expression: local_alias_segment: keyword: local dot: . naked_identifier: c1 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'ABC'" - statement_terminator: ; - statement: set_expression: - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: x - set_operator: keyword: union - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: y - orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: a - statement_terminator: ; - statement: set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: expression: numeric_literal: sign_indicator: '-' numeric_literal: '1' binary_operator: '*' function: function_name: function_name_identifier: row_number function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: nummer from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sys - dot: . - naked_identifier: exa_sql_keywords join_clause: - keyword: CROSS - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: sys - dot: . - naked_identifier: exa_sql_keywords - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '0' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: interval_expression: - keyword: INTERVAL - quoted_literal: "'5'" - keyword: MONTH - comma: ',' - select_clause_element: interval_expression: - keyword: INTERVAL - quoted_literal: "'130'" - keyword: MONTH - bracketed: start_bracket: ( numeric_literal: '3' end_bracket: ) - comma: ',' - select_clause_element: interval_expression: - keyword: INTERVAL - quoted_literal: "'27'" - keyword: YEAR - comma: ',' - select_clause_element: interval_expression: - keyword: INTERVAL - quoted_literal: "'100-1'" - keyword: YEAR - bracketed: start_bracket: ( numeric_literal: '3' end_bracket: ) - keyword: TO - keyword: MONTH - comma: ',' - select_clause_element: interval_expression: - keyword: INTERVAL - quoted_literal: "'2-1'" - keyword: YEAR - keyword: TO - keyword: MONTH - comma: ',' - select_clause_element: interval_expression: - keyword: INTERVAL - quoted_literal: "'10:20'" - keyword: HOUR - keyword: TO - keyword: MINUTE - comma: ',' - select_clause_element: interval_expression: - keyword: INTERVAL - quoted_literal: "'2 23:10:59'" - keyword: DAY - keyword: TO - keyword: SECOND - comma: ',' - select_clause_element: interval_expression: - keyword: INTERVAL - quoted_literal: "'6'" - keyword: MINUTE - comma: ',' - select_clause_element: interval_expression: - keyword: INTERVAL - quoted_literal: "'5'" - keyword: DAY - comma: ',' - select_clause_element: interval_expression: - keyword: INTERVAL - quoted_literal: "'100'" - keyword: HOUR - bracketed: start_bracket: ( numeric_literal: '3' end_bracket: ) - comma: ',' - select_clause_element: interval_expression: - keyword: INTERVAL - quoted_literal: "'1.99999'" - keyword: SECOND - bracketed: - start_bracket: ( - numeric_literal: '2' - comma: ',' - numeric_literal: '2' - end_bracket: ) - comma: ',' - select_clause_element: interval_expression: - keyword: INTERVAL - quoted_literal: "'23:10:59.123'" - keyword: HOUR - bracketed: start_bracket: ( numeric_literal: '2' end_bracket: ) - keyword: TO - keyword: SECOND - bracketed: start_bracket: ( numeric_literal: '3' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: v - comma: ',' - select_clause_element: expression: - keyword: DATE - date_constructor_literal: "'2020-10-26'" - binary_operator: + - column_reference: naked_identifier: v - binary_operator: '*' - interval_expression: - keyword: INTERVAL - quoted_literal: "'7'" - keyword: DAY alias_expression: alias_operator: keyword: AS naked_identifier: late_2020_mondays - comma: ',' - select_clause_element: expression: numeric_literal: '5' binary_operator: '*' column_reference: naked_identifier: v alias_expression: alias_operator: keyword: AS naked_identifier: five_times_table from_clause: keyword: FROM from_expression: from_expression_element: table_expression: values_range_clause: - keyword: VALUES - keyword: BETWEEN - numeric_literal: '1' - keyword: AND - numeric_literal: '9' alias_expression: alias_operator: keyword: AS naked_identifier: v bracketed: start_bracket: ( identifier_list: naked_identifier: v end_bracket: ) - statement_terminator: ; - statement: with_compound_statement: keyword: with common_table_expression: naked_identifier: v keyword: as bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: RANGE_VALUE from_clause: keyword: from from_expression: from_expression_element: table_expression: values_range_clause: - keyword: VALUES - keyword: between - numeric_literal: '0' - keyword: and - function: function_name: function_name_identifier: days_between function_contents: bracketed: - start_bracket: ( - expression: bare_function: current_date binary_operator: '-' numeric_literal: '6' - comma: ',' - expression: bare_function: current_date binary_operator: '-' numeric_literal: '1' - end_bracket: ) end_bracket: ) select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: v - statement_terminator: ; - statement: with_compound_statement: keyword: with common_table_expression: naked_identifier: v keyword: as bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: RANGE_VALUE from_clause: keyword: from from_expression: from_expression_element: table_expression: values_range_clause: - keyword: VALUES - keyword: between - function: function_name: function_name_identifier: abs function_contents: bracketed: start_bracket: ( expression: numeric_literal: sign_indicator: '-' numeric_literal: '5' end_bracket: ) - keyword: and - function: function_name: function_name_identifier: days_between function_contents: bracketed: - start_bracket: ( - expression: bare_function: current_date binary_operator: '-' numeric_literal: '6' - comma: ',' - expression: bare_function: current_date binary_operator: '-' numeric_literal: '1' - end_bracket: ) end_bracket: ) select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: v - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: - quoted_literal: "'abcd'" - keyword: LIKE - quoted_literal: "'a_d'" alias_expression: alias_operator: keyword: AS naked_identifier: res1 - comma: ',' - select_clause_element: expression: - quoted_literal: "'%bcd'" - keyword: like - quoted_literal: "'%%d'" alias_expression: alias_operator: keyword: AS naked_identifier: res2 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: - quoted_literal: "'abcd'" - keyword: NOT - keyword: LIKE - quoted_literal: "'a_d'" alias_expression: alias_operator: keyword: AS naked_identifier: res1 - comma: ',' - select_clause_element: expression: - quoted_literal: "'%bcd'" - keyword: like - quoted_literal: "'%%d'" alias_expression: alias_operator: keyword: AS naked_identifier: res2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'My mail address is my_mail@exasol.com'" - keyword: REGEXP_LIKE - quoted_literal: "'(?i).*[a-z0-9._%+-]+@[a-z0-9.-]+\\.[a-z]{2,4}.*'" alias_expression: alias_operator: keyword: AS naked_identifier: contains_email - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'My mail address is my_mail@exasol.com'" - keyword: NOT - keyword: REGEXP_LIKE - quoted_literal: "'(?i).*[a-z0-9._%+-]+@[a-z0-9.-]+\\.[a-z]{2,4}.*'" alias_expression: alias_operator: keyword: AS naked_identifier: contains_email - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bare_function: current_date binary_operator: '-' numeric_literal: '1' alias_expression: alias_operator: keyword: as naked_identifier: dt from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/truncate_statement.sql000066400000000000000000000000251503426445100257100ustar00rootroot00000000000000TRUNCATE TABLE test; sqlfluff-3.4.2/test/fixtures/dialects/exasol/truncate_statement.yml000066400000000000000000000010501503426445100257110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a7f00bfe35abda3c5f894dcbed8f1790f94822f36c9e94a05e0ebbd4be83a5a3 file: statement: truncate_table: - keyword: TRUNCATE - keyword: TABLE - table_reference: naked_identifier: test statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/update_statement.sql000066400000000000000000000007451503426445100253560ustar00rootroot00000000000000UPDATE staff SET salary=salary*1.1 WHERE name='SMITH'; ---- UPDATE staff AS U SET U.salary=U.salary/1.95583, U.currency='EUR' WHERE U.currency='DM'; ---- UPDATE staff AS U SET U.salary=V.salary, U.currency=V.currency FROM staff AS U, staff_updates AS V WHERE U.name=V.name; ---- UPDATE order_pos SET stocks=stocks*10 PREFERRING HIGH (order_date) PARTITION BY (shop_id, order_id); ---- UPDATE t1 SET x=t2.c1, w=t4.c2 FROM t2 JOIN t3 g ON t2.c1=t3.c2 LEFT JOIN t4 ON t4.c3=t3.c1 ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/update_statement.yml000066400000000000000000000175461503426445100253670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 844f587cdd69a70a2c9083ca30e79ab076ae34a69e2f5db31336b7f29aa35761 file: - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: staff set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: salary comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: salary binary_operator: '*' numeric_literal: '1.1' where_clause: keyword: WHERE expression: column_reference: naked_identifier: name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'SMITH'" - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: staff alias_expression: alias_operator: keyword: AS naked_identifier: U set_clause_list: - keyword: SET - set_clause: column_reference: - naked_identifier: U - dot: . - naked_identifier: salary comparison_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: U - dot: . - naked_identifier: salary binary_operator: / numeric_literal: '1.95583' - comma: ',' - set_clause: column_reference: - naked_identifier: U - dot: . - naked_identifier: currency comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'EUR'" where_clause: keyword: WHERE expression: column_reference: - naked_identifier: U - dot: . - naked_identifier: currency comparison_operator: raw_comparison_operator: '=' quoted_literal: "'DM'" - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: staff alias_expression: alias_operator: keyword: AS naked_identifier: U set_clause_list: - keyword: SET - set_clause: column_reference: - naked_identifier: U - dot: . - naked_identifier: salary comparison_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: V - dot: . - naked_identifier: salary - comma: ',' - set_clause: column_reference: - naked_identifier: U - dot: . - naked_identifier: currency comparison_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: V - dot: . - naked_identifier: currency from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: staff alias_expression: alias_operator: keyword: AS naked_identifier: U - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: staff_updates alias_expression: alias_operator: keyword: AS naked_identifier: V where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: U - dot: . - naked_identifier: name - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: V - dot: . - naked_identifier: name - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: order_pos set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: stocks comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: stocks binary_operator: '*' numeric_literal: '10' preferring_clause: keyword: PREFERRING preference_term: function: function_name: function_name_identifier: HIGH function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: order_date end_bracket: ) partitionby_clause: - keyword: PARTITION - keyword: BY - bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: shop_id - comma: ',' - expression: column_reference: naked_identifier: order_id - end_bracket: ) - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: t1 set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: c1 - comma: ',' - set_clause: column_reference: naked_identifier: w comparison_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: t4 - dot: . - naked_identifier: c2 from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: t2 - join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: t3 alias_expression: naked_identifier: g join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: c1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t3 - dot: . - naked_identifier: c2 - join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t4 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t4 - dot: . - naked_identifier: c3 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t3 - dot: . - naked_identifier: c1 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/exasol/values_in_subquery.sql000066400000000000000000000003671503426445100257340ustar00rootroot00000000000000WITH txt AS ( VALUES (1) AS t (id) ) SELECT * FROM txt; WITH txt AS ( VALUES (1, 2), (3, 4) AS t (c1, c2) ) SELECT * FROM txt; SELECT * FROM ( VALUES (1) ) AS t(id); SELECT * FROM ( VALUES (1, 2), (3, 4) ) AS t(c1, c2); sqlfluff-3.4.2/test/fixtures/dialects/exasol/values_in_subquery.yml000066400000000000000000000121001503426445100257220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e2128fc09d7141ee0db981a81abec4e88bfffd79d0aa77bd4e13de2009a77154 file: - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: txt keyword: AS bracketed: start_bracket: ( values_clause: keyword: VALUES bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: t bracketed: start_bracket: ( identifier_list: naked_identifier: id end_bracket: ) end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: txt - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: txt keyword: AS bracketed: start_bracket: ( values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - numeric_literal: '3' - comma: ',' - numeric_literal: '4' - end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: c1 - comma: ',' - naked_identifier: c2 end_bracket: ) end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: txt - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: keyword: VALUES bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: t bracketed: start_bracket: ( identifier_list: naked_identifier: id end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - numeric_literal: '3' - comma: ',' - numeric_literal: '4' - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: c1 - comma: ',' - naked_identifier: c2 end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/flink/000077500000000000000000000000001503426445100210715ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/dialects/flink/create_catalog.sql000066400000000000000000000001441503426445100245460ustar00rootroot00000000000000CREATE CATALOG my_catalog WITH ( 'type' = 'hive', 'hive-conf-dir' = '/path/to/hive/conf' ); sqlfluff-3.4.2/test/fixtures/dialects/flink/create_catalog.yml000066400000000000000000000016521503426445100245550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 12173fabfd93c0b20d0d8fe34ee47a7a098dbc318cdfda6dc985615f93c7c523 file: statement: create_catalog_statement: - keyword: CREATE - keyword: CATALOG - naked_identifier: my_catalog - keyword: WITH - bracketed: - start_bracket: ( - quoted_literal: "'type'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'hive'" - comma: ',' - quoted_literal: "'hive-conf-dir'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'/path/to/hive/conf'" - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/flink/create_database.sql000066400000000000000000000001321503426445100246750ustar00rootroot00000000000000CREATE DATABASE IF NOT EXISTS my_db COMMENT 'My database' WITH ( 'key1' = 'value1' ); sqlfluff-3.4.2/test/fixtures/dialects/flink/create_database.yml000066400000000000000000000016131503426445100247040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 83a469e372fda883c652be19730d3c6b28aa90b7f9f9d4fcd6c501ae8176127a file: statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - object_reference: naked_identifier: my_db - keyword: COMMENT - quoted_literal: "'My database'" - keyword: WITH - bracketed: - start_bracket: ( - quoted_literal: "'key1'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value1'" - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/flink/create_table_basic.sql000066400000000000000000000003171503426445100253660ustar00rootroot00000000000000CREATE TABLE my_table ( id INT, name STRING, age INT ) WITH ( 'connector' = 'kafka', 'topic' = 'my-topic', 'properties.bootstrap.servers' = 'localhost:9092', 'format' = 'json' ); sqlfluff-3.4.2/test/fixtures/dialects/flink/create_table_basic.yml000066400000000000000000000033231503426445100253700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8ab2d9fd1649074b1756ed123662fe387e859e156d60a43fb47ce0d4335344b4 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: my_table - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: name data_type: data_type_identifier: STRING - comma: ',' - column_definition: naked_identifier: age data_type: data_type_identifier: INT - end_bracket: ) - keyword: WITH - bracketed: - start_bracket: ( - quoted_literal: "'connector'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'kafka'" - comma: ',' - quoted_literal: "'topic'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'my-topic'" - comma: ',' - quoted_literal: "'properties.bootstrap.servers'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'localhost:9092'" - comma: ',' - quoted_literal: "'format'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'json'" - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/flink/create_table_complex.sql000066400000000000000000000013231503426445100257520ustar00rootroot00000000000000CREATE TABLE complex_table ( session_id STRING, session_ts TIMESTAMP(3), source_name STRING, service STRING, category STRING, category_id STRING, type STRING, type_id STRING, identifier STRING, identifier_id STRING, event_type STRING, action_type STRING, resource_type STRING, value DOUBLE, quantity DOUBLE, request_url STRING, is_deleted BOOLEAN, item_count INT, created_ts TIMESTAMP(3), updated_ts TIMESTAMP(3), processed_ts TIMESTAMP(3), received_ts TIMESTAMP(3), sequence_ts TIMESTAMP(3) ) WITH ( 'connector' = 'test-connector', 'project' = 'test-project', 'dataset' = 'test-dataset', 'table' = 'test-table' ); sqlfluff-3.4.2/test/fixtures/dialects/flink/create_table_complex.yml000066400000000000000000000127411503426445100257620ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3c8280b26f5aaf76c2368c0350ed6487850b2ac3aafc95b41b1968cdda1a3c84 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: complex_table - bracketed: - start_bracket: ( - column_definition: naked_identifier: session_id data_type: data_type_identifier: STRING - comma: ',' - column_definition: naked_identifier: session_ts data_type: keyword: TIMESTAMP bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '3' end_bracket: ) - comma: ',' - column_definition: naked_identifier: source_name data_type: data_type_identifier: STRING - comma: ',' - column_definition: naked_identifier: service data_type: data_type_identifier: STRING - comma: ',' - column_definition: naked_identifier: category data_type: data_type_identifier: STRING - comma: ',' - column_definition: naked_identifier: category_id data_type: data_type_identifier: STRING - comma: ',' - column_definition: naked_identifier: type data_type: data_type_identifier: STRING - comma: ',' - column_definition: naked_identifier: type_id data_type: data_type_identifier: STRING - comma: ',' - column_definition: naked_identifier: identifier data_type: data_type_identifier: STRING - comma: ',' - column_definition: naked_identifier: identifier_id data_type: data_type_identifier: STRING - comma: ',' - column_definition: naked_identifier: event_type data_type: data_type_identifier: STRING - comma: ',' - column_definition: naked_identifier: action_type data_type: data_type_identifier: STRING - comma: ',' - column_definition: naked_identifier: resource_type data_type: data_type_identifier: STRING - comma: ',' - column_definition: naked_identifier: value data_type: data_type_identifier: DOUBLE - comma: ',' - column_definition: naked_identifier: quantity data_type: data_type_identifier: DOUBLE - comma: ',' - column_definition: naked_identifier: request_url data_type: data_type_identifier: STRING - comma: ',' - column_definition: naked_identifier: is_deleted data_type: data_type_identifier: BOOLEAN - comma: ',' - column_definition: naked_identifier: item_count data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: created_ts data_type: keyword: TIMESTAMP bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '3' end_bracket: ) - comma: ',' - column_definition: naked_identifier: updated_ts data_type: keyword: TIMESTAMP bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '3' end_bracket: ) - comma: ',' - column_definition: naked_identifier: processed_ts data_type: keyword: TIMESTAMP bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '3' end_bracket: ) - comma: ',' - column_definition: naked_identifier: received_ts data_type: keyword: TIMESTAMP bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '3' end_bracket: ) - comma: ',' - column_definition: naked_identifier: sequence_ts data_type: keyword: TIMESTAMP bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '3' end_bracket: ) - end_bracket: ) - keyword: WITH - bracketed: - start_bracket: ( - quoted_literal: "'connector'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test-connector'" - comma: ',' - quoted_literal: "'project'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test-project'" - comma: ',' - quoted_literal: "'dataset'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test-dataset'" - comma: ',' - quoted_literal: "'table'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test-table'" - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/flink/create_table_computed_metadata.sql000066400000000000000000000002641503426445100277660ustar00rootroot00000000000000CREATE TABLE my_table ( id INT, name STRING, full_name AS CONCAT(name, '_suffix'), kafka_offset BIGINT METADATA FROM 'offset' ) WITH ( 'connector' = 'kafka' ); sqlfluff-3.4.2/test/fixtures/dialects/flink/create_table_computed_metadata.yml000066400000000000000000000034421503426445100277710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 28ec502ee6e713c63e9c3261c31b67173da001e3a083cf1b3a5f38904966c906 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: my_table - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: name data_type: data_type_identifier: STRING - comma: ',' - naked_identifier: full_name - keyword: AS - expression: function: function_name: function_name_identifier: CONCAT function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: name - comma: ',' - expression: quoted_literal: "'_suffix'" - end_bracket: ) - comma: ',' - naked_identifier: kafka_offset - data_type: data_type_identifier: BIGINT - keyword: METADATA - keyword: FROM - quoted_literal: "'offset'" - end_bracket: ) - keyword: WITH - bracketed: - start_bracket: ( - quoted_literal: "'connector'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'kafka'" - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/flink/create_table_simple.sql000066400000000000000000000001361503426445100255750ustar00rootroot00000000000000CREATE TABLE simple_table ( id INT, name STRING ) WITH ( 'connector' = 'kafka' ); sqlfluff-3.4.2/test/fixtures/dialects/flink/create_table_simple.yml000066400000000000000000000021321503426445100255750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1e5a8a0d4ecb636d6ebe8989be2af05f37ddb429b0142a77d01dc48d2b9bf916 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: simple_table - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: name data_type: data_type_identifier: STRING - end_bracket: ) - keyword: WITH - bracketed: - start_bracket: ( - quoted_literal: "'connector'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'kafka'" - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/flink/create_table_timestamp_precision.sql000066400000000000000000000002511503426445100303600ustar00rootroot00000000000000CREATE TABLE my_table ( id INT, event_time TIMESTAMP(3), processing_time TIMESTAMP_LTZ(3), updated_at TIMESTAMP(6) ) WITH ( 'connector' = 'kafka' ); sqlfluff-3.4.2/test/fixtures/dialects/flink/create_table_timestamp_precision.yml000066400000000000000000000035271503426445100303730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 645b42013fc2d234e70d05daaad1e8f3919f2698e6f5fbb9f51dca750acf1c80 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: my_table - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: event_time data_type: keyword: TIMESTAMP bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '3' end_bracket: ) - comma: ',' - column_definition: naked_identifier: processing_time data_type: data_type_identifier: TIMESTAMP_LTZ bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '3' end_bracket: ) - comma: ',' - column_definition: naked_identifier: updated_at data_type: keyword: TIMESTAMP bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) - end_bracket: ) - keyword: WITH - bracketed: - start_bracket: ( - quoted_literal: "'connector'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'kafka'" - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/flink/create_table_watermark.sql000066400000000000000000000002501503426445100262760ustar00rootroot00000000000000CREATE TABLE my_table ( id INT, event_time TIMESTAMP(3), WATERMARK FOR event_time AS event_time - INTERVAL '5' SECOND ) WITH ( 'connector' = 'kafka' ); sqlfluff-3.4.2/test/fixtures/dialects/flink/create_table_watermark.yml000066400000000000000000000031761503426445100263120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f937e1f5a9656b64c6e89425ebbe5d53f922d02769798ba99eaf5e4ffe006080 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: my_table - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: event_time data_type: keyword: TIMESTAMP bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '3' end_bracket: ) - comma: ',' - keyword: WATERMARK - keyword: FOR - column_reference: naked_identifier: event_time - keyword: AS - expression: column_reference: naked_identifier: event_time binary_operator: '-' interval_expression: keyword: INTERVAL quoted_literal: "'5'" date_part: SECOND - end_bracket: ) - keyword: WITH - bracketed: - start_bracket: ( - quoted_literal: "'connector'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'kafka'" - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/flink/describe_statement.sql000066400000000000000000000000231503426445100254510ustar00rootroot00000000000000DESCRIBE my_table; sqlfluff-3.4.2/test/fixtures/dialects/flink/describe_statement.yml000066400000000000000000000010331503426445100254550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e1f477bd09af2c33558beb6ba5428dc3c8a336f4d4e809d29ee6f515ab61c14b file: statement: describe_statement: keyword: DESCRIBE table_reference: naked_identifier: my_table statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/flink/explain_statement.sql000066400000000000000000000000401503426445100253300ustar00rootroot00000000000000EXPLAIN SELECT * FROM my_table; sqlfluff-3.4.2/test/fixtures/dialects/flink/explain_statement.yml000066400000000000000000000016021503426445100253370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: dea94fdb22c312fe84eb59a24a8db028c354ad757a08e9aab955f57ef7cb120b file: statement: explain_statement: keyword: EXPLAIN select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/flink/select_basic.sql000066400000000000000000000000301503426445100242230ustar00rootroot00000000000000SELECT * FROM my_table; sqlfluff-3.4.2/test/fixtures/dialects/flink/select_basic.yml000066400000000000000000000014701503426445100242360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f0970df67f3eee4f9a0164980a2ce15f843193f781bb1041e387ce3debc3c982 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/flink/show_statements.sql000066400000000000000000000001631503426445100250410ustar00rootroot00000000000000SHOW CATALOGS; SHOW DATABASES; SHOW TABLES; SHOW VIEWS; SHOW FUNCTIONS; SHOW MODULES; SHOW JARS; SHOW JOBS; sqlfluff-3.4.2/test/fixtures/dialects/flink/show_statements.yml000066400000000000000000000022661503426445100250510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2029734dec4a5e9fcf7d988f6c889f599f99cf0b6ae8c3a30fec1c82d18327d4 file: - statement: show_statement: - keyword: SHOW - keyword: CATALOGS - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: DATABASES - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TABLES - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: VIEWS - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: FUNCTIONS - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: MODULES - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: JARS - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: JOBS - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/flink/use_statements.sql000066400000000000000000000001071503426445100246530ustar00rootroot00000000000000USE CATALOG my_catalog; USE my_database; USE my_catalog.my_database; sqlfluff-3.4.2/test/fixtures/dialects/flink/use_statements.yml000066400000000000000000000015651503426445100246660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4608da031dc0658c66f5f7883fa529d8ab68c4ed36e9cb1cc9d663fa38f223cf file: - statement: use_statement: - keyword: USE - keyword: CATALOG - object_reference: naked_identifier: my_catalog - statement_terminator: ; - statement: use_statement: keyword: USE object_reference: naked_identifier: my_database - statement_terminator: ; - statement: use_statement: keyword: USE object_reference: - naked_identifier: my_catalog - dot: . - naked_identifier: my_database - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/greenplum/000077500000000000000000000000001503426445100217645ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/dialects/greenplum/.sqlfluff000066400000000000000000000000371503426445100236070ustar00rootroot00000000000000[sqlfluff] dialect = greenplum sqlfluff-3.4.2/test/fixtures/dialects/greenplum/analize.sql000066400000000000000000000000551503426445100241300ustar00rootroot00000000000000ANALYSE ROOTPARTITION ALL; ANALYZE mytable; sqlfluff-3.4.2/test/fixtures/dialects/greenplum/analize.yml000066400000000000000000000012351503426445100241330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 58ab4230d9611bc8e65c1ecc0f0795b1d4cfb34e3b44a53c3105d4e63fb6a446 file: - statement: analize_statement: - keyword: ANALYSE - keyword: ROOTPARTITION - keyword: ALL - statement_terminator: ; - statement: analyze_statement: keyword: ANALYZE table_reference: naked_identifier: mytable - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/greenplum/close.sql000066400000000000000000000000331503426445100236060ustar00rootroot00000000000000CLOSE portala; CLOSE ALL; sqlfluff-3.4.2/test/fixtures/dialects/greenplum/close.yml000066400000000000000000000011701503426445100236130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 54461de95799a05e8b2dc343ec093c8933b1c718dfde355e970c5009574a9f11 file: - statement: close_statement: keyword: CLOSE table_reference: naked_identifier: portala - statement_terminator: ; - statement: close_statement: - keyword: CLOSE - keyword: ALL - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/greenplum/copy.sql000066400000000000000000000025631503426445100234650ustar00rootroot00000000000000COPY "schema_1"."table_1" ("field_1", ) FROM STDIN WITH CSV NULL '' DELIMITER ' ' ESCAPE '\\' ; COPY country TO STDOUT (DELIMITER '|'); COPY country FROM '/home/usr1/sql/country_data'; COPY (SELECT * FROM country WHERE country_name LIKE 'A%') TO '/home/usr1/sql/a_list_countries.copy'; COPY sales FROM '/home/usr1/sql/sales_data' LOG ERRORS SEGMENT REJECT LIMIT 10 ROWS; COPY mytable TO '/gpbackup.txt' ON SEGMENT; COPY (SELECT * FROM testtbl) TO '/tmp/mytst' ON SEGMENT; COPY LINEITEM TO PROGRAM 'cat > /tmp/lineitem.csv' CSV; COPY LINEITEM_4 FROM PROGRAM 'cat /tmp/lineitem_program.csv' ON SEGMENT CSV; COPY table_name FROM '/path/to/file.csv' DELIMITER ',' CSV HEADER; COPY schema_name.table_name FROM '/path/to/file.csv' DELIMITER ';' CSV HEADER; COPY table_name (column1, column2, column3) FROM '/path/to/file.csv' DELIMITER ',' CSV HEADER; COPY table_name FROM PROGRAM 'cat /path/to/file.csv' DELIMITER ',' CSV HEADER; COPY table_name FROM '/path/to/file.csv' DELIMITER ',' CSV QUOTE '"' ESCAPE ''; COPY table_name FROM '/path/to/file.csv' DELIMITER ',' CSV NULL 'NA' HEADER; COPY table_name (column1, column2, column3) FROM '/path/to/file.csv' DELIMITER ',' CSV HEADER QUOTE '"'; COPY table_name FROM '/path/to/file.csv' DELIMITER ',' CSV ESCAPE ''; COPY table_name FROM '/path/to/file.csv' DELIMITER ',' CSV HEADER ENCODING 'UTF8'; sqlfluff-3.4.2/test/fixtures/dialects/greenplum/copy.yml000066400000000000000000000172321503426445100234660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7d2ae360051180cccb538130f9d0441d2c1de265768a305d7f0ef83a9d3ffbff file: - statement: copy_statement: - keyword: COPY - table_reference: - quoted_identifier: '"schema_1"' - dot: . - quoted_identifier: '"table_1"' - bracketed: start_bracket: ( column_reference: quoted_identifier: '"field_1"' comma: ',' end_bracket: ) - keyword: FROM - keyword: STDIN - keyword: WITH - keyword: CSV - keyword: 'NULL' - quoted_literal: "''" - keyword: DELIMITER - quoted_literal: "'\t'" - keyword: ESCAPE - quoted_literal: "'\\\\'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: country - keyword: TO - keyword: STDOUT - bracketed: start_bracket: ( keyword: DELIMITER quoted_literal: "'|'" end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: country - keyword: FROM - quoted_literal: "'/home/usr1/sql/country_data'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: country where_clause: keyword: WHERE expression: column_reference: naked_identifier: country_name keyword: LIKE quoted_literal: "'A%'" end_bracket: ) - keyword: TO - quoted_literal: "'/home/usr1/sql/a_list_countries.copy'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: sales - keyword: FROM - quoted_literal: "'/home/usr1/sql/sales_data'" - keyword: LOG - keyword: ERRORS - keyword: SEGMENT - keyword: REJECT - keyword: LIMIT - numeric_literal: '10' - keyword: ROWS - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: mytable - keyword: TO - quoted_literal: "'/gpbackup.txt'" - keyword: 'ON' - keyword: SEGMENT - statement_terminator: ; - statement: copy_statement: - keyword: COPY - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: testtbl end_bracket: ) - keyword: TO - quoted_literal: "'/tmp/mytst'" - keyword: 'ON' - keyword: SEGMENT - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: LINEITEM - keyword: TO - keyword: PROGRAM - quoted_literal: "'cat > /tmp/lineitem.csv'" - keyword: CSV - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: LINEITEM_4 - keyword: FROM - keyword: PROGRAM - quoted_literal: "'cat /tmp/lineitem_program.csv'" - keyword: 'ON' - keyword: SEGMENT - keyword: CSV - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: table_name - keyword: FROM - quoted_literal: "'/path/to/file.csv'" - keyword: DELIMITER - quoted_literal: "','" - keyword: CSV - keyword: HEADER - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: - naked_identifier: schema_name - dot: . - naked_identifier: table_name - keyword: FROM - quoted_literal: "'/path/to/file.csv'" - keyword: DELIMITER - quoted_literal: "';'" - keyword: CSV - keyword: HEADER - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: table_name - bracketed: - start_bracket: ( - column_reference: naked_identifier: column1 - comma: ',' - column_reference: naked_identifier: column2 - comma: ',' - column_reference: naked_identifier: column3 - end_bracket: ) - keyword: FROM - quoted_literal: "'/path/to/file.csv'" - keyword: DELIMITER - quoted_literal: "','" - keyword: CSV - keyword: HEADER - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: table_name - keyword: FROM - keyword: PROGRAM - quoted_literal: "'cat /path/to/file.csv'" - keyword: DELIMITER - quoted_literal: "','" - keyword: CSV - keyword: HEADER - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: table_name - keyword: FROM - quoted_literal: "'/path/to/file.csv'" - keyword: DELIMITER - quoted_literal: "','" - keyword: CSV - keyword: QUOTE - quoted_literal: "'\"'" - keyword: ESCAPE - quoted_literal: "''" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: table_name - keyword: FROM - quoted_literal: "'/path/to/file.csv'" - keyword: DELIMITER - quoted_literal: "','" - keyword: CSV - keyword: 'NULL' - quoted_literal: "'NA'" - keyword: HEADER - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: table_name - bracketed: - start_bracket: ( - column_reference: naked_identifier: column1 - comma: ',' - column_reference: naked_identifier: column2 - comma: ',' - column_reference: naked_identifier: column3 - end_bracket: ) - keyword: FROM - quoted_literal: "'/path/to/file.csv'" - keyword: DELIMITER - quoted_literal: "','" - keyword: CSV - keyword: HEADER - keyword: QUOTE - quoted_literal: "'\"'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: table_name - keyword: FROM - quoted_literal: "'/path/to/file.csv'" - keyword: DELIMITER - quoted_literal: "','" - keyword: CSV - keyword: ESCAPE - quoted_literal: "''" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: table_name - keyword: FROM - quoted_literal: "'/path/to/file.csv'" - keyword: DELIMITER - quoted_literal: "','" - keyword: CSV - keyword: HEADER - keyword: ENCODING - quoted_literal: "'UTF8'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/greenplum/create_table.sql000066400000000000000000000052151503426445100251220ustar00rootroot00000000000000CREATE TABLE measurement ( city_id int NOT NULL, logdate date NOT NULL, peaktemp int, unitsales int ) WITH (appendoptimized=true, compresslevel=5) DISTRIBUTED BY (txn_id, other_field); CREATE TABLE measurement ( city_id int NOT NULL, logdate date NOT NULL, peaktemp int, unitsales int ) WITH (appendoptimized=true, orientation="column") DISTRIBUTED BY (txn_id); CREATE TEMP TABLE test ( test_id int NOT NULL, logdate date NOT NULL, test_text int ) DISTRIBUTED BY (txn_id); CREATE TABLE test_randomly ( test_id int NOT NULL, logdate date NOT NULL, test_text int ) DISTRIBUTED RANDOMLY; CREATE TABLE test_replicated ( test_id int NOT NULL, logdate date NOT NULL, test_text int ) DISTRIBUTED REPLICATED; create table table1 ( column1 int , column2 varchar , column3 boolean ) with (appendoptimized = true, compresstype = zstd) distributed by (column1, column2); CREATE TABLE sales (id int, year int, qtr int, c_rank int, code char(1), region text) DISTRIBUTED BY (id) PARTITION BY LIST (code) ( PARTITION sales VALUES ('S'), PARTITION returns VALUES ('R') ); CREATE TABLE sales (id int, year int, qtr int, c_rank int, code char(1), region text) DISTRIBUTED BY (id) PARTITION BY LIST (code) SUBPARTITION BY RANGE (c_rank) SUBPARTITION by LIST (region) ( PARTITION sales VALUES ('S') ( SUBPARTITION cr1 START (1) END (2) ( SUBPARTITION ca VALUES ('CA') ), SUBPARTITION cr2 START (3) END (4) ( SUBPARTITION ca VALUES ('CA') ) ), PARTITION returns VALUES ('R') ( SUBPARTITION cr1 START (1) END (2) ( SUBPARTITION ca VALUES ('CA') ), SUBPARTITION cr2 START (3) END (4) ( SUBPARTITION ca VALUES ('CA') ) ) ); CREATE TABLE sales1 (id int, year int, qtr int, c_rank int, code char(1), region text) DISTRIBUTED BY (id) PARTITION BY LIST (code) SUBPARTITION BY RANGE (c_rank) SUBPARTITION TEMPLATE ( SUBPARTITION cr1 START (1) END (2), SUBPARTITION cr2 START (3) END (4) ) SUBPARTITION BY LIST (region) SUBPARTITION TEMPLATE ( SUBPARTITION ca VALUES ('CA') ) ( PARTITION sales VALUES ('S'), PARTITION returns VALUES ('R') ); CREATE TABLE sales (id int, year int, qtr int, c_rank int, code char(1), region text) DISTRIBUTED BY (id) PARTITION BY RANGE (year) SUBPARTITION BY RANGE (qtr) SUBPARTITION TEMPLATE ( START (1) END (5) EVERY (1), DEFAULT SUBPARTITION bad_qtr ) SUBPARTITION BY LIST (region) SUBPARTITION TEMPLATE ( SUBPARTITION usa VALUES ('usa'), SUBPARTITION europe VALUES ('europe'), SUBPARTITION asia VALUES ('asia'), DEFAULT SUBPARTITION other_regions) ( START (2009) END (2011) EVERY (1), DEFAULT PARTITION outlying_years); sqlfluff-3.4.2/test/fixtures/dialects/greenplum/create_table.yml000066400000000000000000000511071503426445100251250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1879c4ed396cb8893d523c19d4502a7ccbcfe71eb87e9b19abd1087b9f0a4387 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: measurement - bracketed: - start_bracket: ( - column_reference: naked_identifier: city_id - data_type: keyword: int - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: logdate - data_type: datetime_type_identifier: keyword: date - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: peaktemp - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: unitsales - data_type: keyword: int - end_bracket: ) - keyword: WITH - bracketed: - start_bracket: ( - parameter: appendoptimized - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - comma: ',' - parameter: compresslevel - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '5' - end_bracket: ) - distributed_by: - keyword: DISTRIBUTED - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: txn_id - comma: ',' - column_reference: naked_identifier: other_field - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: measurement - bracketed: - start_bracket: ( - column_reference: naked_identifier: city_id - data_type: keyword: int - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: logdate - data_type: datetime_type_identifier: keyword: date - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: peaktemp - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: unitsales - data_type: keyword: int - end_bracket: ) - keyword: WITH - bracketed: - start_bracket: ( - parameter: appendoptimized - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - comma: ',' - parameter: orientation - comparison_operator: raw_comparison_operator: '=' - quoted_identifier: '"column"' - end_bracket: ) - distributed_by: - keyword: DISTRIBUTED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: txn_id end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TEMP - keyword: TABLE - table_reference: naked_identifier: test - bracketed: - start_bracket: ( - column_reference: naked_identifier: test_id - data_type: keyword: int - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: logdate - data_type: datetime_type_identifier: keyword: date - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: test_text - data_type: keyword: int - end_bracket: ) - distributed_by: - keyword: DISTRIBUTED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: txn_id end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: test_randomly - bracketed: - start_bracket: ( - column_reference: naked_identifier: test_id - data_type: keyword: int - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: logdate - data_type: datetime_type_identifier: keyword: date - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: test_text - data_type: keyword: int - end_bracket: ) - distributed_by: - keyword: DISTRIBUTED - keyword: RANDOMLY - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: test_replicated - bracketed: - start_bracket: ( - column_reference: naked_identifier: test_id - data_type: keyword: int - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: logdate - data_type: datetime_type_identifier: keyword: date - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: test_text - data_type: keyword: int - end_bracket: ) - distributed_by: - keyword: DISTRIBUTED - keyword: REPLICATED - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: table1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: column1 - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: column2 - data_type: keyword: varchar - comma: ',' - column_reference: naked_identifier: column3 - data_type: keyword: boolean - end_bracket: ) - keyword: with - bracketed: - start_bracket: ( - parameter: appendoptimized - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - comma: ',' - parameter: compresstype - comparison_operator: raw_comparison_operator: '=' - naked_identifier: zstd - end_bracket: ) - distributed_by: - keyword: distributed - keyword: by - bracketed: - start_bracket: ( - column_reference: naked_identifier: column1 - comma: ',' - column_reference: naked_identifier: column2 - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: sales - bracketed: - start_bracket: ( - column_reference: naked_identifier: id - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: year - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: qtr - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: c_rank - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: code - data_type: keyword: char bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) - comma: ',' - column_reference: naked_identifier: region - data_type: keyword: text - end_bracket: ) - distributed_by: - keyword: DISTRIBUTED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - keyword: PARTITION - keyword: BY - keyword: LIST - bracketed: start_bracket: ( column_reference: naked_identifier: code end_bracket: ) - bracketed: - start_bracket: ( - word: PARTITION - word: sales - word: VALUES - bracketed: start_bracket: ( single_quote: "'S'" end_bracket: ) - comma: ',' - word: PARTITION - word: returns - word: VALUES - bracketed: start_bracket: ( single_quote: "'R'" end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: sales - bracketed: - start_bracket: ( - column_reference: naked_identifier: id - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: year - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: qtr - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: c_rank - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: code - data_type: keyword: char bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) - comma: ',' - column_reference: naked_identifier: region - data_type: keyword: text - end_bracket: ) - distributed_by: - keyword: DISTRIBUTED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - keyword: PARTITION - keyword: BY - keyword: LIST - bracketed: start_bracket: ( column_reference: naked_identifier: code end_bracket: ) - keyword: SUBPARTITION - keyword: BY - keyword: RANGE - bracketed: start_bracket: ( column_reference: naked_identifier: c_rank end_bracket: ) - keyword: SUBPARTITION - keyword: by - keyword: LIST - bracketed: start_bracket: ( column_reference: naked_identifier: region end_bracket: ) - bracketed: - start_bracket: ( - word: PARTITION - word: sales - word: VALUES - bracketed: start_bracket: ( single_quote: "'S'" end_bracket: ) - bracketed: - start_bracket: ( - word: SUBPARTITION - word: cr1 - word: START - bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) - word: END - bracketed: start_bracket: ( numeric_literal: '2' end_bracket: ) - bracketed: - start_bracket: ( - word: SUBPARTITION - word: ca - word: VALUES - start_bracket: ( - single_quote: "'CA'" - end_bracket: ) - end_bracket: ) - comma: ',' - word: SUBPARTITION - word: cr2 - word: START - bracketed: start_bracket: ( numeric_literal: '3' end_bracket: ) - word: END - bracketed: start_bracket: ( numeric_literal: '4' end_bracket: ) - bracketed: - start_bracket: ( - word: SUBPARTITION - word: ca - word: VALUES - start_bracket: ( - single_quote: "'CA'" - end_bracket: ) - end_bracket: ) - end_bracket: ) - comma: ',' - word: PARTITION - word: returns - word: VALUES - bracketed: start_bracket: ( single_quote: "'R'" end_bracket: ) - bracketed: - start_bracket: ( - word: SUBPARTITION - word: cr1 - word: START - bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) - word: END - bracketed: start_bracket: ( numeric_literal: '2' end_bracket: ) - bracketed: - start_bracket: ( - word: SUBPARTITION - word: ca - word: VALUES - start_bracket: ( - single_quote: "'CA'" - end_bracket: ) - end_bracket: ) - comma: ',' - word: SUBPARTITION - word: cr2 - word: START - bracketed: start_bracket: ( numeric_literal: '3' end_bracket: ) - word: END - bracketed: start_bracket: ( numeric_literal: '4' end_bracket: ) - bracketed: - start_bracket: ( - word: SUBPARTITION - word: ca - word: VALUES - start_bracket: ( - single_quote: "'CA'" - end_bracket: ) - end_bracket: ) - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: sales1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: id - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: year - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: qtr - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: c_rank - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: code - data_type: keyword: char bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) - comma: ',' - column_reference: naked_identifier: region - data_type: keyword: text - end_bracket: ) - distributed_by: - keyword: DISTRIBUTED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - keyword: PARTITION - keyword: BY - keyword: LIST - bracketed: start_bracket: ( column_reference: naked_identifier: code end_bracket: ) - keyword: SUBPARTITION - keyword: BY - keyword: RANGE - bracketed: start_bracket: ( column_reference: naked_identifier: c_rank end_bracket: ) - keyword: SUBPARTITION - keyword: TEMPLATE - bracketed: - start_bracket: ( - word: SUBPARTITION - word: cr1 - word: START - bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) - word: END - bracketed: start_bracket: ( numeric_literal: '2' end_bracket: ) - comma: ',' - word: SUBPARTITION - word: cr2 - word: START - bracketed: start_bracket: ( numeric_literal: '3' end_bracket: ) - word: END - bracketed: start_bracket: ( numeric_literal: '4' end_bracket: ) - end_bracket: ) - keyword: SUBPARTITION - keyword: BY - keyword: LIST - bracketed: start_bracket: ( column_reference: naked_identifier: region end_bracket: ) - keyword: SUBPARTITION - keyword: TEMPLATE - bracketed: - start_bracket: ( - word: SUBPARTITION - word: ca - word: VALUES - bracketed: start_bracket: ( single_quote: "'CA'" end_bracket: ) - end_bracket: ) - bracketed: - start_bracket: ( - word: PARTITION - word: sales - word: VALUES - bracketed: start_bracket: ( single_quote: "'S'" end_bracket: ) - comma: ',' - word: PARTITION - word: returns - word: VALUES - bracketed: start_bracket: ( single_quote: "'R'" end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: sales - bracketed: - start_bracket: ( - column_reference: naked_identifier: id - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: year - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: qtr - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: c_rank - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: code - data_type: keyword: char bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) - comma: ',' - column_reference: naked_identifier: region - data_type: keyword: text - end_bracket: ) - distributed_by: - keyword: DISTRIBUTED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - keyword: PARTITION - keyword: BY - keyword: RANGE - bracketed: start_bracket: ( column_reference: naked_identifier: year end_bracket: ) - keyword: SUBPARTITION - keyword: BY - keyword: RANGE - bracketed: start_bracket: ( column_reference: naked_identifier: qtr end_bracket: ) - keyword: SUBPARTITION - keyword: TEMPLATE - bracketed: - start_bracket: ( - word: START - bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) - word: END - bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - word: EVERY - bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) - comma: ',' - word: DEFAULT - word: SUBPARTITION - word: bad_qtr - end_bracket: ) - keyword: SUBPARTITION - keyword: BY - keyword: LIST - bracketed: start_bracket: ( column_reference: naked_identifier: region end_bracket: ) - keyword: SUBPARTITION - keyword: TEMPLATE - bracketed: - start_bracket: ( - word: SUBPARTITION - word: usa - word: VALUES - bracketed: start_bracket: ( single_quote: "'usa'" end_bracket: ) - comma: ',' - word: SUBPARTITION - word: europe - word: VALUES - bracketed: start_bracket: ( single_quote: "'europe'" end_bracket: ) - comma: ',' - word: SUBPARTITION - word: asia - word: VALUES - bracketed: start_bracket: ( single_quote: "'asia'" end_bracket: ) - comma: ',' - word: DEFAULT - word: SUBPARTITION - word: other_regions - end_bracket: ) - bracketed: - start_bracket: ( - word: START - bracketed: start_bracket: ( numeric_literal: '2009' end_bracket: ) - word: END - bracketed: start_bracket: ( numeric_literal: '2011' end_bracket: ) - word: EVERY - bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) - comma: ',' - word: DEFAULT - word: PARTITION - word: outlying_years - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/greenplum/create_table_as.sql000066400000000000000000000005111503426445100255770ustar00rootroot00000000000000create table new_table as select * from old_table distributed randomly; create table new_table as select * from old_table; create table test_with_union as select distinct f1, f2 from table_1 union all select unnest(array['1', '2']), unnest(array['_total_', '_total_']) distributed randomly; sqlfluff-3.4.2/test/fixtures/dialects/greenplum/create_table_as.yml000066400000000000000000000101321503426445100256010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0fff97667f8db5de83e3ce1d78cbc5a4230fdd19f256040f0e71045e38d066c8 file: - statement: create_table_as_statement: - keyword: create - keyword: table - table_reference: naked_identifier: new_table - keyword: as - select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: old_table - distributed_by: - keyword: distributed - keyword: randomly - statement_terminator: ; - statement: create_table_as_statement: - keyword: create - keyword: table - table_reference: naked_identifier: new_table - keyword: as - select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: old_table - statement_terminator: ; - statement: create_table_as_statement: - keyword: create - keyword: table - table_reference: naked_identifier: test_with_union - keyword: as - set_expression: - select_statement: select_clause: - keyword: select - select_clause_modifier: keyword: distinct - select_clause_element: column_reference: naked_identifier: f1 - comma: ',' - select_clause_element: column_reference: naked_identifier: f2 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_1 - set_operator: - keyword: union - keyword: all - select_statement: select_clause: - keyword: select - select_clause_element: function: function_name: function_name_identifier: unnest function_contents: bracketed: start_bracket: ( expression: typed_array_literal: array_type: keyword: array array_literal: - start_square_bracket: '[' - quoted_literal: "'1'" - comma: ',' - quoted_literal: "'2'" - end_square_bracket: ']' end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: unnest function_contents: bracketed: start_bracket: ( expression: typed_array_literal: array_type: keyword: array array_literal: - start_square_bracket: '[' - quoted_literal: "'_total_'" - comma: ',' - quoted_literal: "'_total_'" - end_square_bracket: ']' end_bracket: ) - distributed_by: - keyword: distributed - keyword: randomly - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/greenplum/declare.sql000066400000000000000000000001731503426445100241050ustar00rootroot00000000000000DECLARE mycursor CURSOR FOR SELECT * FROM mytable; DECLARE myprcursor PARALLEL RETRIEVE CURSOR FOR SELECT * FROM mytable; sqlfluff-3.4.2/test/fixtures/dialects/greenplum/declare.yml000066400000000000000000000033121503426445100241050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a07a11aa3304729f51260f14ee0a82f8089ec786087a2ebc54d6109bf806c338 file: - statement: declare_statement: - keyword: DECLARE - table_reference: naked_identifier: mycursor - keyword: CURSOR - keyword: FOR - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable - statement_terminator: ; - statement: declare_statement: - keyword: DECLARE - table_reference: naked_identifier: myprcursor - keyword: PARALLEL - keyword: RETRIEVE - keyword: CURSOR - keyword: FOR - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/greenplum/fetch.sql000066400000000000000000000000371503426445100235760ustar00rootroot00000000000000FETCH FORWARD 5 FROM mycursor; sqlfluff-3.4.2/test/fixtures/dialects/greenplum/fetch.yml000066400000000000000000000011301503426445100235730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e2431496d1f31aa2cb84ea8874df65aeeb71581cee64584f60d4c210d1f0714e file: statement: fetch_clause: - keyword: FETCH - keyword: FORWARD - numeric_literal: '5' - keyword: FROM - table_reference: naked_identifier: mycursor statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/000077500000000000000000000000001503426445100207215ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/dialects/hive/.sqlfluff000066400000000000000000000000321503426445100225370ustar00rootroot00000000000000[sqlfluff] dialect = hive sqlfluff-3.4.2/test/fixtures/dialects/hive/alter_database.sql000066400000000000000000000002541503426445100243760ustar00rootroot00000000000000ALTER DATABASE foo SET DBPROPERTIES ("prop1"="val1", "prop2"="val2"); ALTER DATABASE bar SET LOCATION "hdfs://alternate_path"; ALTER DATABASE foo2 SET OWNER USER "user"; sqlfluff-3.4.2/test/fixtures/dialects/hive/alter_database.yml000066400000000000000000000027201503426445100244000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e471a0e0892ed298475a0ee64729fffe5b1b2144edcf454d92f384e0f8e79a2b file: - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: foo - keyword: SET - keyword: DBPROPERTIES - bracketed: - start_bracket: ( - quoted_literal: '"prop1"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"val1"' - comma: ',' - quoted_literal: '"prop2"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"val2"' - end_bracket: ) - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: bar - keyword: SET - keyword: LOCATION - quoted_literal: '"hdfs://alternate_path"' - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: foo2 - keyword: SET - keyword: OWNER - keyword: USER - quoted_literal: '"user"' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/alter_schema.sql000066400000000000000000000001531503426445100240700ustar00rootroot00000000000000ALTER DATABASE foo SET OWNER ROLE "role"; ALTER DATABASE bar SET MANAGEDLOCATION "hdfs://alternate_path"; sqlfluff-3.4.2/test/fixtures/dialects/hive/alter_schema.yml000066400000000000000000000016331503426445100240760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 01fbe10e33e9d07cea2ff2a3357ec3730607c4d29d3f169c31f6d322c7e59715 file: - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: foo - keyword: SET - keyword: OWNER - keyword: ROLE - quoted_literal: '"role"' - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: bar - keyword: SET - keyword: MANAGEDLOCATION - quoted_literal: '"hdfs://alternate_path"' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/alter_table.sql000066400000000000000000000002521503426445100237170ustar00rootroot00000000000000ALTER TABLE schema.table1 rename TO schema.table2; ALTER TABLE schema.table1 rename TO schema.table2; ALTER TABLE table2 EXCHANGE PARTITION (ds='1') WITH TABLE table1; sqlfluff-3.4.2/test/fixtures/dialects/hive/alter_table.yml000066400000000000000000000031441503426445100237240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2b89b43cb250c5f36cd9d41367b1441847f2612c8ae691d5968d89dc6d0b5953 file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: schema - dot: . - naked_identifier: table1 - keyword: rename - keyword: TO - table_reference: - naked_identifier: schema - dot: . - naked_identifier: table2 - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: schema - dot: . - naked_identifier: table1 - keyword: rename - keyword: TO - table_reference: - naked_identifier: schema - dot: . - naked_identifier: table2 - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table2 - keyword: EXCHANGE - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: ds comparison_operator: raw_comparison_operator: '=' quoted_literal: "'1'" end_bracket: ) - keyword: WITH - keyword: TABLE - table_reference: naked_identifier: table1 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/alter_view.sql000066400000000000000000000001541503426445100236030ustar00rootroot00000000000000ALTER VIEW db.foo AS SELECT col1 FROM db.bar; ALTER VIEW foo SET TBLPROPERTIES ('bar' = '1', 'baz' = '2'); sqlfluff-3.4.2/test/fixtures/dialects/hive/alter_view.yml000066400000000000000000000031211503426445100236020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 291f3e8adeae5c4684b7ae01000ae38e8d15ccda951d356f51026b763f02996e file: - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: - naked_identifier: db - dot: . - naked_identifier: foo - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: db - dot: . - naked_identifier: bar - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: foo - keyword: SET - keyword: TBLPROPERTIES - bracketed: - start_bracket: ( - quoted_literal: "'bar'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'1'" - comma: ',' - quoted_literal: "'baz'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'2'" - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/array_types.sql000066400000000000000000000006511503426445100240060ustar00rootroot00000000000000-- simple select array[a, b, c] as arr from sch.tbl; -- bit harder select t.a from unnest(array[1, 3, 6, 12]) as t(f); -- complex select map_from_entries(array[ row('pending.freebet', pending_fb), row('bonus.balance', bonus) ]) from sch.tbl; -- string consts select array['a', 'b', 'c'] as arr from sch.tbl; -- null select array['a', null] as arr from sch.tbl; -- empty array select array[] as arr from sch.tbl; sqlfluff-3.4.2/test/fixtures/dialects/hive/array_types.yml000066400000000000000000000162051503426445100240120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 11f375ef0e00859a6d630ea67b75fa1289161dabf3bc7ccaa8c973988b36db82 file: - statement: select_statement: select_clause: keyword: select select_clause_element: typed_array_literal: array_type: keyword: array array_literal: - start_square_bracket: '[' - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_square_bracket: ']' alias_expression: alias_operator: keyword: as naked_identifier: arr from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sch - dot: . - naked_identifier: tbl - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: a from_clause: keyword: from from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: unnest function_contents: bracketed: start_bracket: ( expression: typed_array_literal: array_type: keyword: array array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '3' - comma: ',' - numeric_literal: '6' - comma: ',' - numeric_literal: '12' - end_square_bracket: ']' end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: t bracketed: start_bracket: ( identifier_list: naked_identifier: f end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: map_from_entries function_contents: bracketed: start_bracket: ( expression: typed_array_literal: array_type: keyword: array array_literal: - start_square_bracket: '[' - function: function_name: function_name_identifier: row function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'pending.freebet'" - comma: ',' - expression: column_reference: naked_identifier: pending_fb - end_bracket: ) - comma: ',' - function: function_name: function_name_identifier: row function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'bonus.balance'" - comma: ',' - expression: column_reference: naked_identifier: bonus - end_bracket: ) - end_square_bracket: ']' end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sch - dot: . - naked_identifier: tbl - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: typed_array_literal: array_type: keyword: array array_literal: - start_square_bracket: '[' - quoted_literal: "'a'" - comma: ',' - quoted_literal: "'b'" - comma: ',' - quoted_literal: "'c'" - end_square_bracket: ']' alias_expression: alias_operator: keyword: as naked_identifier: arr from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sch - dot: . - naked_identifier: tbl - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: typed_array_literal: array_type: keyword: array array_literal: start_square_bracket: '[' quoted_literal: "'a'" comma: ',' null_literal: 'null' end_square_bracket: ']' alias_expression: alias_operator: keyword: as naked_identifier: arr from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sch - dot: . - naked_identifier: tbl - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: typed_array_literal: array_type: keyword: array array_literal: start_square_bracket: '[' end_square_bracket: ']' alias_expression: alias_operator: keyword: as naked_identifier: arr from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sch - dot: . - naked_identifier: tbl - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/create_database.sql000066400000000000000000000001211503426445100245230ustar00rootroot00000000000000CREATE DATABASE foo LOCATION 'hdfs://path' WITH DBPROPERTIES ("a"="1", "b"="2"); sqlfluff-3.4.2/test/fixtures/dialects/hive/create_database.yml000066400000000000000000000017701503426445100245400ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b8801c03e7efcddeaddf35bb0f6ed630b57534832b82ce29bccd06a854a3afca file: statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: foo - keyword: LOCATION - quoted_literal: "'hdfs://path'" - keyword: WITH - keyword: DBPROPERTIES - bracketed: - start_bracket: ( - quoted_literal: '"a"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"1"' - comma: ',' - quoted_literal: '"b"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"2"' - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/create_schema.sql000066400000000000000000000001251503426445100242230ustar00rootroot00000000000000CREATE SCHEMA IF NOT EXISTS foo COMMENT 'test schema' MANAGEDLOCATION 'hdfs://path'; sqlfluff-3.4.2/test/fixtures/dialects/hive/create_schema.yml000066400000000000000000000013611503426445100242300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7b843809c663bd1d1dc9a0524ea0edef5345e0803e2bb0d04400307c33323772 file: statement: create_database_statement: - keyword: CREATE - keyword: SCHEMA - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: foo - keyword: COMMENT - quoted_literal: "'test schema'" - keyword: MANAGEDLOCATION - quoted_literal: "'hdfs://path'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/create_table_as_select.sql000066400000000000000000000002671503426445100261030ustar00rootroot00000000000000CREATE TABLE new_foo ROW FORMAT SERDE "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" STORED AS RCFile AS SELECT (col1 % 1024) col, concat(col1, col2) col12 FROM foo; sqlfluff-3.4.2/test/fixtures/dialects/hive/create_table_as_select.yml000066400000000000000000000040771503426445100261100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 14b630222252bb4882c7cd41f7fe954a96dd5c3ba5f809961cdf4778b6673758 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: new_foo - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: SERDE - quoted_literal: '"org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe"' - keyword: STORED - keyword: AS - file_format: RCFile - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: col1 binary_operator: '%' numeric_literal: '1024' end_bracket: ) alias_expression: naked_identifier: col - comma: ',' - select_clause_element: function: function_name: function_name_identifier: concat function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: col1 - comma: ',' - expression: column_reference: naked_identifier: col2 - end_bracket: ) alias_expression: naked_identifier: col12 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/create_table_clustered_by.sql000066400000000000000000000001751503426445100266230ustar00rootroot00000000000000CREATE TABLE IF NOT EXISTS foo ( col1 string, col2 float ) CLUSTERED BY (col2) SORTED BY (col1 DESC) INTO 5 BUCKETS; sqlfluff-3.4.2/test/fixtures/dialects/hive/create_table_clustered_by.yml000066400000000000000000000025701503426445100266260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a459e89000ef9b718b41f3b3bb840a5ea04a49545ba3b2628f60731d35ad97b5 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_definition: naked_identifier: col1 data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: col2 data_type: primitive_type: keyword: float - end_bracket: ) - keyword: CLUSTERED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: col2 end_bracket: ) - keyword: SORTED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 keyword: DESC end_bracket: ) - keyword: INTO - numeric_literal: '5' - keyword: BUCKETS statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/create_table_constraints.sql000066400000000000000000000016371503426445100265120ustar00rootroot00000000000000CREATE TABLE foo( col1 INT PRIMARY KEY, col2 INTEGER NOT NULL, col3 BIGINT NOT NULL, col4 STRING, col5 STRING COMMENT 'Column 5' ) COMMENT 'This is a test table' STORED AS ORC; CREATE TABLE product ( product_id INTEGER, product_vendor_id INTEGER, PRIMARY KEY (product_id) DISABLE NOVALIDATE, CONSTRAINT product_fk_1 FOREIGN KEY (product_vendor_id) REFERENCES vendor(vendor_id) DISABLE NOVALIDATE ); CREATE TABLE vendor ( vendor_id INTEGER, PRIMARY KEY (vendor_id) DISABLE NOVALIDATE RELY ); CREATE TABLE product ( product_id INTEGER, product_vendor_id INTEGER, PRIMARY KEY (product_id) DISABLE NOVALIDATE, CONSTRAINT product_fk_1 FOREIGN KEY (product_vendor_id) REFERENCES vendor(vendor_id) DISABLE NOVALIDATE ); CREATE TABLE vendor ( vendor_id INTEGER, PRIMARY KEY (vendor_id) DISABLE NOVALIDATE NORELY ); sqlfluff-3.4.2/test/fixtures/dialects/hive/create_table_constraints.yml000066400000000000000000000143101503426445100265040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bfe287a81cc0b7da43dc9d173d420da1d99dcb42b8d55a1f605f7ffbca094379 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_definition: naked_identifier: col1 data_type: primitive_type: keyword: INT column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_definition: naked_identifier: col2 data_type: primitive_type: keyword: INTEGER column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: col3 data_type: primitive_type: keyword: BIGINT column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: col4 data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: col5 data_type: primitive_type: keyword: STRING column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: "'Column 5'" - end_bracket: ) - keyword: COMMENT - quoted_literal: "'This is a test table'" - keyword: STORED - keyword: AS - keyword: ORC - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: product - bracketed: - start_bracket: ( - column_definition: naked_identifier: product_id data_type: primitive_type: keyword: INTEGER - comma: ',' - column_definition: naked_identifier: product_vendor_id data_type: primitive_type: keyword: INTEGER - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: product_id end_bracket: ) - keyword: DISABLE - keyword: NOVALIDATE - comma: ',' - table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: product_fk_1 - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: product_vendor_id end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: vendor - bracketed: start_bracket: ( column_reference: naked_identifier: vendor_id end_bracket: ) - keyword: DISABLE - keyword: NOVALIDATE - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: vendor - bracketed: start_bracket: ( column_definition: naked_identifier: vendor_id data_type: primitive_type: keyword: INTEGER comma: ',' table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: vendor_id end_bracket: ) - keyword: DISABLE - keyword: NOVALIDATE - keyword: RELY end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: product - bracketed: - start_bracket: ( - column_definition: naked_identifier: product_id data_type: primitive_type: keyword: INTEGER - comma: ',' - column_definition: naked_identifier: product_vendor_id data_type: primitive_type: keyword: INTEGER - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: product_id end_bracket: ) - keyword: DISABLE - keyword: NOVALIDATE - comma: ',' - table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: product_fk_1 - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: product_vendor_id end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: vendor - bracketed: start_bracket: ( column_reference: naked_identifier: vendor_id end_bracket: ) - keyword: DISABLE - keyword: NOVALIDATE - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: vendor - bracketed: start_bracket: ( column_definition: naked_identifier: vendor_id data_type: primitive_type: keyword: INTEGER comma: ',' table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: vendor_id end_bracket: ) - keyword: DISABLE - keyword: NOVALIDATE - keyword: NORELY end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/create_table_datatypes.sql000066400000000000000000000004571503426445100261400ustar00rootroot00000000000000CREATE TABLE db.foo ( col1 string, col2 int, col3 decimal, col4 decimal(10, 2), col5 ARRAY, col6 MAP, col7 STRUCT< field1: boolean, field2: ARRAY, field3: UNIONTYPE>, col8 UNIONTYPE> ); sqlfluff-3.4.2/test/fixtures/dialects/hive/create_table_datatypes.yml000066400000000000000000000104411503426445100261340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 224d06e8d35e55927c093e397084485a36d7cdf2f67f972db87effd738945841 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: db - dot: . - naked_identifier: foo - bracketed: - start_bracket: ( - column_definition: naked_identifier: col1 data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: col2 data_type: primitive_type: keyword: int - comma: ',' - column_definition: naked_identifier: col3 data_type: primitive_type: keyword: decimal - comma: ',' - column_definition: naked_identifier: col4 data_type: primitive_type: keyword: decimal bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '10' - comma: ',' - numeric_literal: '2' - end_bracket: ) - comma: ',' - column_definition: naked_identifier: col5 data_type: array_type: keyword: ARRAY start_angle_bracket: < data_type: primitive_type: keyword: double end_angle_bracket: '>' - comma: ',' - column_definition: naked_identifier: col6 data_type: keyword: MAP start_angle_bracket: < primitive_type: keyword: varchar comma: ',' data_type: primitive_type: keyword: date end_angle_bracket: '>' - comma: ',' - column_definition: naked_identifier: col7 data_type: struct_type: keyword: STRUCT struct_type_schema: - start_angle_bracket: < - naked_identifier: field1 - colon: ':' - data_type: primitive_type: keyword: boolean - comma: ',' - naked_identifier: field2 - colon: ':' - data_type: array_type: keyword: ARRAY start_angle_bracket: < data_type: primitive_type: - keyword: double - keyword: precision end_angle_bracket: '>' - comma: ',' - naked_identifier: field3 - colon: ':' - data_type: - keyword: UNIONTYPE - start_angle_bracket: < - data_type: primitive_type: keyword: string - comma: ',' - data_type: primitive_type: keyword: decimal bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '10' - comma: ',' - numeric_literal: '2' - end_bracket: ) - end_angle_bracket: '>' - end_angle_bracket: '>' - comma: ',' - column_definition: naked_identifier: col8 data_type: - keyword: UNIONTYPE - start_angle_bracket: < - data_type: primitive_type: keyword: string - comma: ',' - data_type: array_type: keyword: ARRAY start_angle_bracket: < data_type: primitive_type: keyword: char end_angle_bracket: '>' - end_angle_bracket: '>' - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/create_table_external_partitioned.sql000066400000000000000000000002121503426445100303530ustar00rootroot00000000000000CREATE EXTERNAL TABLE IF NOT EXISTS foo ( col1 int, col2 string ) PARTITIONED BY (col3 string, col4 date) LOCATION 'hdfs://path'; sqlfluff-3.4.2/test/fixtures/dialects/hive/create_table_external_partitioned.yml000066400000000000000000000026721503426445100303710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 52e99569eb9d2f2655b832a5d6eb4d5ec7c4eaaf4482c0acad37af3fb53bab7c file: statement: create_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_definition: naked_identifier: col1 data_type: primitive_type: keyword: int - comma: ',' - column_definition: naked_identifier: col2 data_type: primitive_type: keyword: string - end_bracket: ) - keyword: PARTITIONED - keyword: BY - bracketed: - start_bracket: ( - column_definition: naked_identifier: col3 data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: col4 data_type: primitive_type: keyword: date - end_bracket: ) - keyword: LOCATION - quoted_literal: "'hdfs://path'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/create_table_like.sql000066400000000000000000000001231503426445100250540ustar00rootroot00000000000000CREATE TABLE db.new_foo LIKE foo TBLPROPERTIES ("property_name"="property_value"); sqlfluff-3.4.2/test/fixtures/dialects/hive/create_table_like.yml000066400000000000000000000016351503426445100250670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 21d89aac8d7e7c77878bd528bcada3dee345999498ff51dbaf3687b618a2edd8 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: db - dot: . - naked_identifier: new_foo - keyword: LIKE - table_reference: naked_identifier: foo - keyword: TBLPROPERTIES - bracketed: - start_bracket: ( - quoted_literal: '"property_name"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"property_value"' - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/create_table_row_format_delimited.sql000066400000000000000000000005531503426445100303360ustar00rootroot00000000000000 CREATE TABLE foo( col1 INT PRIMARY KEY, col2 BIGINT NOT NULL, col3 STRING, col4 STRING COMMENT 'Column 4') COMMENT 'This is a test table' ROW FORMAT DELIMITED FIELDS TERMINATED BY '\001' COLLECTION ITEMS TERMINATED BY '\002' MAP KEYS TERMINATED BY '\003' LINES TERMINATED BY '\004' NULL DEFINED AS '\005' STORED AS SEQUENCEFILE; sqlfluff-3.4.2/test/fixtures/dialects/hive/create_table_row_format_delimited.yml000066400000000000000000000044731503426445100303450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2d6f8816319d86ded1fe93d5cf969ec7b49812373d0e058ebb07a60df9fa1ebf file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_definition: naked_identifier: col1 data_type: primitive_type: keyword: INT column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_definition: naked_identifier: col2 data_type: primitive_type: keyword: BIGINT column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: col3 data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: col4 data_type: primitive_type: keyword: STRING column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: "'Column 4'" - end_bracket: ) - keyword: COMMENT - quoted_literal: "'This is a test table'" - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: DELIMITED - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "'\\001'" - keyword: COLLECTION - keyword: ITEMS - keyword: TERMINATED - keyword: BY - quoted_literal: "'\\002'" - keyword: MAP - keyword: KEYS - keyword: TERMINATED - keyword: BY - quoted_literal: "'\\003'" - keyword: LINES - keyword: TERMINATED - keyword: BY - quoted_literal: "'\\004'" - keyword: 'NULL' - keyword: DEFINED - keyword: AS - quoted_literal: "'\\005'" - keyword: STORED - keyword: AS - file_format: SEQUENCEFILE statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/create_table_row_format_serde.sql000066400000000000000000000005461503426445100275020ustar00rootroot00000000000000CREATE TABLE foo ( col1 int, col2 string ) PARTITIONED BY (col3 string, col4 date) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.RegexSerDe' WITH SERDEPROPERTIES ( "input.regex" = "([^]*) ([^]*) ([^]*) (-|\\[^\\]*\\]) ([^ \"]*|\"[^\"]*\") (-|[0-9]*) (-|[0-9]*)(?: ([^ \"]*|\".*\") ([^ \"]*|\".*\"))?" ) STORED AS TEXTFILE LOCATION 'hdfs://path'; sqlfluff-3.4.2/test/fixtures/dialects/hive/create_table_row_format_serde.yml000066400000000000000000000037361503426445100275100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6ffe814785db2a2c5967d32556c1def3c828422d965554a8ed2b37bb234a1ed5 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_definition: naked_identifier: col1 data_type: primitive_type: keyword: int - comma: ',' - column_definition: naked_identifier: col2 data_type: primitive_type: keyword: string - end_bracket: ) - keyword: PARTITIONED - keyword: BY - bracketed: - start_bracket: ( - column_definition: naked_identifier: col3 data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: col4 data_type: primitive_type: keyword: date - end_bracket: ) - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: SERDE - quoted_literal: "'org.apache.hadoop.hive.serde2.RegexSerDe'" - keyword: WITH - keyword: SERDEPROPERTIES - bracketed: - start_bracket: ( - quoted_literal: '"input.regex"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"([^]*) ([^]*) ([^]*) (-|\\[^\\]*\\]) ([^ \"]*|\"[^\"]*\") (-|[0-9]*) (-|[0-9]*)(?: ([^ \"]*|\".*\") ([^ \"]*|\".*\"))?"' - end_bracket: ) - keyword: STORED - keyword: AS - file_format: TEXTFILE - keyword: LOCATION - quoted_literal: "'hdfs://path'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/create_table_skewed_by.sql000066400000000000000000000002261503426445100261100ustar00rootroot00000000000000CREATE TABLE foo (col1 STRING, col2 int, col3 STRING) SKEWED BY (col1, col2) ON (('s1',1), ('s3',3), ('s13',13), ('s78',78)) STORED AS DIRECTORIES; sqlfluff-3.4.2/test/fixtures/dialects/hive/create_table_skewed_by.yml000066400000000000000000000043311503426445100261130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ce74da5f794983b9878af79414b0775e1887ef765352c442b53dbdaf0707d319 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_definition: naked_identifier: col1 data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: col2 data_type: primitive_type: keyword: int - comma: ',' - column_definition: naked_identifier: col3 data_type: primitive_type: keyword: STRING - end_bracket: ) - skewed_by_clause: - keyword: SKEWED - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: 'ON' - bracketed: - start_bracket: ( - bracketed: start_bracket: ( quoted_literal: "'s1'" comma: ',' numeric_literal: '1' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( quoted_literal: "'s3'" comma: ',' numeric_literal: '3' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( quoted_literal: "'s13'" comma: ',' numeric_literal: '13' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( quoted_literal: "'s78'" comma: ',' numeric_literal: '78' end_bracket: ) - end_bracket: ) - keyword: STORED - keyword: AS - keyword: DIRECTORIES statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/create_table_temporary_external.sql000066400000000000000000000002241503426445100300560ustar00rootroot00000000000000CREATE TEMPORARY EXTERNAL TABLE IF NOT EXISTS foo ( col1 int, col2 string ) PARTITIONED BY (col3 string, col4 date) LOCATION 'hdfs://path'; sqlfluff-3.4.2/test/fixtures/dialects/hive/create_table_temporary_external.yml000066400000000000000000000027231503426445100300660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8197c65999fe69d84ead3d6cc459330c2257773ffca3df94b5a7c725b74a10c8 file: statement: create_table_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: EXTERNAL - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_definition: naked_identifier: col1 data_type: primitive_type: keyword: int - comma: ',' - column_definition: naked_identifier: col2 data_type: primitive_type: keyword: string - end_bracket: ) - keyword: PARTITIONED - keyword: BY - bracketed: - start_bracket: ( - column_definition: naked_identifier: col3 data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: col4 data_type: primitive_type: keyword: date - end_bracket: ) - keyword: LOCATION - quoted_literal: "'hdfs://path'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/create_table_temporary_properties.sql000066400000000000000000000001731503426445100304330ustar00rootroot00000000000000CREATE TEMPORARY TABLE foo ( col1 string, col2 float ) TBLPROPERTIES ("property1"="value1", "property2"="value2"); sqlfluff-3.4.2/test/fixtures/dialects/hive/create_table_temporary_properties.yml000066400000000000000000000024721503426445100304410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f888d36a12373e5d91926cdff2333e67fe22096024b892a00e1226a415b7e48c file: statement: create_table_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_definition: naked_identifier: col1 data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: col2 data_type: primitive_type: keyword: float - end_bracket: ) - keyword: TBLPROPERTIES - bracketed: - start_bracket: ( - quoted_literal: '"property1"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"value1"' - comma: ',' - quoted_literal: '"property2"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"value2"' - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/create_table_with.sql000066400000000000000000000002071503426445100251060ustar00rootroot00000000000000CREATE TABLE masonboro_sandbox.test AS WITH us_sales AS ( SELECT rev FROM masonboro_sales.us_2021 ) SELECT rev FROM us_sales; sqlfluff-3.4.2/test/fixtures/dialects/hive/create_table_with.yml000066400000000000000000000034471503426445100251210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9b6e5c9ad01dd44a93dbd5dac68c1f42b3a7984d1b2c7eae9bc8ac0460e1a03e file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: masonboro_sandbox - dot: . - naked_identifier: test - keyword: AS - with_compound_statement: keyword: WITH common_table_expression: naked_identifier: us_sales keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: rev from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: masonboro_sales - dot: . - naked_identifier: us_2021 end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: rev from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: us_sales statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/double_equals.sql000066400000000000000000000000551503426445100242660ustar00rootroot00000000000000SELECT a FROM t WHERE t.a == t.b sqlfluff-3.4.2/test/fixtures/dialects/hive/double_equals.yml000066400000000000000000000022111503426445100242640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0bc2fd65471cb9bbacd80e9410496fc3e63bb95cd2e5c52af735d6f87e1da34d file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: a - comparison_operator: - raw_comparison_operator: '=' - raw_comparison_operator: '=' - column_reference: - naked_identifier: t - dot: . - naked_identifier: b sqlfluff-3.4.2/test/fixtures/dialects/hive/drop_database.sql000066400000000000000000000000601503426445100242260ustar00rootroot00000000000000DROP DATABASE foo; DROP DATABASE bar RESTRICT; sqlfluff-3.4.2/test/fixtures/dialects/hive/drop_database.yml000066400000000000000000000013621503426445100242360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 627698553969b8096046732155df04afb6d8d25701c26005ce447a34b08aab71 file: - statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - database_reference: naked_identifier: foo - statement_terminator: ; - statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - database_reference: naked_identifier: bar - keyword: RESTRICT - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/drop_schema.sql000066400000000000000000000000431503426445100237230ustar00rootroot00000000000000DROP SCHEMA IF EXISTS foo CASCADE; sqlfluff-3.4.2/test/fixtures/dialects/hive/drop_schema.yml000066400000000000000000000011531503426445100237300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 05bb4fc104ce95efd334d8ee7bfe45d835542a27ed67b9081412c605d076ff1d file: statement: drop_schema_statement: - keyword: DROP - keyword: SCHEMA - keyword: IF - keyword: EXISTS - schema_reference: naked_identifier: foo - keyword: CASCADE statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/drop_table.sql000066400000000000000000000000611503426445100235520ustar00rootroot00000000000000DROP TABLE foo; DROP TABLE IF exists bar PURGE; sqlfluff-3.4.2/test/fixtures/dialects/hive/drop_table.yml000066400000000000000000000014051503426445100235570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1907fbb08c02a5acfea04c7cee28cc55bb58d4b259c93a1b66f841f33aee718c file: - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: naked_identifier: foo - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - keyword: IF - keyword: exists - table_reference: naked_identifier: bar - keyword: PURGE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/insert_into_table.sql000066400000000000000000000001231503426445100251420ustar00rootroot00000000000000INSERT INTO TABLE foo SELECT a, b FROM bar; INSERT INTO foo SELECT a, b FROM bar; sqlfluff-3.4.2/test/fixtures/dialects/hive/insert_into_table.yml000066400000000000000000000033031503426445100251470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2a26381ae66bfbdbfbac6d6ec79085122f726a8b22522b87c638c76e5bc8fc68 file: - statement: insert_statement: - keyword: INSERT - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: foo - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/insert_into_table_partition.sql000066400000000000000000000001311503426445100272320ustar00rootroot00000000000000INSERT INTO TABLE foo PARTITION (a='test_foo', b='test_bar') SELECT a, b, c, d FROM bar; sqlfluff-3.4.2/test/fixtures/dialects/hive/insert_into_table_partition.yml000066400000000000000000000033141503426445100272420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 96837e9ccdd944eda65ed9eb2fe07bae2a344a30327d4b73312d1aea59a4f42c file: statement: insert_statement: - keyword: INSERT - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: foo - keyword: PARTITION - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test_foo'" - comma: ',' - column_reference: naked_identifier: b - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test_bar'" - end_bracket: ) - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c - comma: ',' - select_clause_element: column_reference: naked_identifier: d from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/insert_into_table_values.sql000066400000000000000000000000751503426445100265270ustar00rootroot00000000000000INSERT INTO TABLE foo VALUES ('foo', 'bar'), ('rab', 'oof'); sqlfluff-3.4.2/test/fixtures/dialects/hive/insert_into_table_values.yml000066400000000000000000000016731503426445100265360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 44a534b8d5b2c63d4f3426be4a68161a3b90809b51552e08966de51c18badb28 file: statement: insert_statement: - keyword: INSERT - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: foo - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - quoted_literal: "'foo'" - comma: ',' - quoted_literal: "'bar'" - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - quoted_literal: "'rab'" - comma: ',' - quoted_literal: "'oof'" - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/insert_overwrite_directory.sql000066400000000000000000000000771503426445100271440ustar00rootroot00000000000000INSERT OVERWRITE LOCAL DIRECTORY '/path' SELECT a, b FROM foo; sqlfluff-3.4.2/test/fixtures/dialects/hive/insert_overwrite_directory.yml000066400000000000000000000020721503426445100271430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4ab32463b975833e5d4a94a1f8dec95ecc47b76016380902007df8baa2cfcd6e file: statement: insert_statement: - keyword: INSERT - keyword: OVERWRITE - keyword: LOCAL - keyword: DIRECTORY - quoted_literal: "'/path'" - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/insert_overwrite_table.sql000066400000000000000000000000611503426445100262200ustar00rootroot00000000000000INSERT OVERWRITE TABLE foo SELECT a, b FROM bar; sqlfluff-3.4.2/test/fixtures/dialects/hive/insert_overwrite_table.yml000066400000000000000000000020661503426445100262310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9222db3adb9b7864b9638c8ffe6ec8858d02af5032632400b1f43d3fc785f410 file: statement: insert_statement: - keyword: INSERT - keyword: OVERWRITE - keyword: TABLE - table_reference: naked_identifier: foo - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/insert_overwrite_table_dynamic_partition.sql000066400000000000000000000003301503426445100320140ustar00rootroot00000000000000INSERT OVERWRITE TABLE foo PARTITION (a = 'test_foo', b) IF NOT EXISTS SELECT a, 'test_bar' AS b FROM bar; INSERT OVERWRITE TABLE foo PARTITION (a, b) IF NOT EXISTS SELECT 'test_foo' AS a, 'test_bar' AS b FROM bar; sqlfluff-3.4.2/test/fixtures/dialects/hive/insert_overwrite_table_dynamic_partition.yml000066400000000000000000000052441503426445100320270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e7f09d3cdaeb8d9b972203ab62e00dea948a5619c76ced3e7f5085c6b3dbd2e4 file: - statement: insert_statement: - keyword: INSERT - keyword: OVERWRITE - keyword: TABLE - table_reference: naked_identifier: foo - keyword: PARTITION - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test_foo'" - comma: ',' - column_reference: naked_identifier: b - end_bracket: ) - keyword: IF - keyword: NOT - keyword: EXISTS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: quoted_literal: "'test_bar'" alias_expression: alias_operator: keyword: AS naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: OVERWRITE - keyword: TABLE - table_reference: naked_identifier: foo - keyword: PARTITION - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - end_bracket: ) - keyword: IF - keyword: NOT - keyword: EXISTS - select_statement: select_clause: - keyword: SELECT - select_clause_element: quoted_literal: "'test_foo'" alias_expression: alias_operator: keyword: AS naked_identifier: a - comma: ',' - select_clause_element: quoted_literal: "'test_bar'" alias_expression: alias_operator: keyword: AS naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/insert_overwrite_table_partition.sql000066400000000000000000000001541503426445100303140ustar00rootroot00000000000000INSERT OVERWRITE TABLE foo PARTITION (a='test_foo', b='test_bar') IF NOT EXISTS SELECT a, b, c, d FROM bar; sqlfluff-3.4.2/test/fixtures/dialects/hive/insert_overwrite_table_partition.yml000066400000000000000000000034141503426445100303200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e5386e4860e0aa6d386ebf042c49b0c72e13a2a31c22ff8e2372822eef781d18 file: statement: insert_statement: - keyword: INSERT - keyword: OVERWRITE - keyword: TABLE - table_reference: naked_identifier: foo - keyword: PARTITION - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test_foo'" - comma: ',' - column_reference: naked_identifier: b - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test_bar'" - end_bracket: ) - keyword: IF - keyword: NOT - keyword: EXISTS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c - comma: ',' - select_clause_element: column_reference: naked_identifier: d from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/msck_repair_table.sql000066400000000000000000000006151503426445100251120ustar00rootroot00000000000000-- REPAIR TABLE with all optional syntax MSCK REPAIR TABLE table_identifier ADD PARTITIONS; MSCK REPAIR TABLE table_identifier DROP PARTITIONS; MSCK REPAIR TABLE table_identifier SYNC PARTITIONS; -- REPAIR TABLE with no optional syntax MSCK REPAIR TABLE table_identifier; -- run MSCK REPAIR TABLE to recovers all the partitions MSCK REPAIR TABLE t1; MSCK REPAIR TABLE emp_part DROP PARTITIONS; sqlfluff-3.4.2/test/fixtures/dialects/hive/msck_repair_table.yml000066400000000000000000000033411503426445100251130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 04d14147ab5e6a91ad4684a54a2eb40029b7ff458e480c86896bc441e03b0e01 file: - statement: msck_repair_table_statement: - keyword: MSCK - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: table_identifier - keyword: ADD - keyword: PARTITIONS - statement_terminator: ; - statement: msck_repair_table_statement: - keyword: MSCK - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: table_identifier - keyword: DROP - keyword: PARTITIONS - statement_terminator: ; - statement: msck_repair_table_statement: - keyword: MSCK - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: table_identifier - keyword: SYNC - keyword: PARTITIONS - statement_terminator: ; - statement: msck_repair_table_statement: - keyword: MSCK - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: table_identifier - statement_terminator: ; - statement: msck_repair_table_statement: - keyword: MSCK - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: t1 - statement_terminator: ; - statement: msck_repair_table_statement: - keyword: MSCK - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: emp_part - keyword: DROP - keyword: PARTITIONS - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/msck_table.sql000066400000000000000000000005431503426445100235500ustar00rootroot00000000000000-- REPAIR TABLE with all optional syntax MSCK TABLE table_identifier ADD PARTITIONS; MSCK TABLE table_identifier DROP PARTITIONS; MSCK TABLE table_identifier SYNC PARTITIONS; -- REPAIR TABLE with no optional syntax MSCK TABLE table_identifier; -- run MSCK REPAIR TABLE to recovers all the partitions MSCK TABLE t1; MSCK TABLE emp_part DROP PARTITIONS; sqlfluff-3.4.2/test/fixtures/dialects/hive/msck_table.yml000066400000000000000000000030631503426445100235520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2453ec227853be4f8cd172f70bedf92bdc447d41ed96b716b1aa01e5dc191bb3 file: - statement: msck_table_statement: - keyword: MSCK - keyword: TABLE - table_reference: naked_identifier: table_identifier - keyword: ADD - keyword: PARTITIONS - statement_terminator: ; - statement: msck_table_statement: - keyword: MSCK - keyword: TABLE - table_reference: naked_identifier: table_identifier - keyword: DROP - keyword: PARTITIONS - statement_terminator: ; - statement: msck_table_statement: - keyword: MSCK - keyword: TABLE - table_reference: naked_identifier: table_identifier - keyword: SYNC - keyword: PARTITIONS - statement_terminator: ; - statement: msck_table_statement: - keyword: MSCK - keyword: TABLE - table_reference: naked_identifier: table_identifier - statement_terminator: ; - statement: msck_table_statement: - keyword: MSCK - keyword: TABLE - table_reference: naked_identifier: t1 - statement_terminator: ; - statement: msck_table_statement: - keyword: MSCK - keyword: TABLE - table_reference: naked_identifier: emp_part - keyword: DROP - keyword: PARTITIONS - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/quoted_literal.sql000066400000000000000000000001571503426445100244620ustar00rootroot00000000000000SELECT result, `timestamp` as `timestamp` FROM student WHERE name = "John Smith" OR name = 'Jane Doe'; sqlfluff-3.4.2/test/fixtures/dialects/hive/quoted_literal.yml000066400000000000000000000026641503426445100244710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b3110c7704ed9a135aadd7ec7263fc164de36b29aba91c5b56a363b002172118 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: result - comma: ',' - select_clause_element: quoted_literal: '`timestamp`' alias_expression: alias_operator: keyword: as quoted_identifier: '`timestamp`' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: student where_clause: keyword: WHERE expression: - column_reference: naked_identifier: name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"John Smith"' - binary_operator: OR - column_reference: naked_identifier: name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'Jane Doe'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/select_cast.sql000066400000000000000000000001701503426445100237310ustar00rootroot00000000000000select cast(row(col1, col2) as row(a bigint, b decimal(23, 2))) from sch.tbl; select cast(a as json) from sch.tbl; sqlfluff-3.4.2/test/fixtures/dialects/hive/select_cast.yml000066400000000000000000000061131503426445100237360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 31d077932bb174b9ba3e9986901e0bb73478f84da92f761da6a2c3b8e10bb794 file: - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: cast function_contents: bracketed: start_bracket: ( expression: function: - function_name: row - function_contents: bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: as - keyword: row - bracketed: - start_bracket: ( - naked_identifier: a - data_type: primitive_type: keyword: bigint - comma: ',' - naked_identifier: b - data_type: primitive_type: keyword: decimal bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '23' - comma: ',' - numeric_literal: '2' - end_bracket: ) - end_bracket: ) end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sch - dot: . - naked_identifier: tbl - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: cast function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: a keyword: as data_type: primitive_type: keyword: json end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sch - dot: . - naked_identifier: tbl - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/select_cluster_distribute_sort_by.sql000066400000000000000000000006331503426445100304630ustar00rootroot00000000000000SELECT col1, col2 FROM t1 ORDER BY col1; SELECT col1, col2 FROM t1 CLUSTER BY col1; SELECT col1, col2 FROM t1 ORDER BY col1 CLUSTER BY col1; SELECT key, value FROM src SORT BY key ASC, value DESC; SELECT col1, col2 FROM t1 DISTRIBUTE BY col1; SELECT col1, col2 FROM t1 DISTRIBUTE BY col1 SORT BY col1 ASC, col2 DESC; SELECT col1, col2 FROM t1 ORDER BY col1 DISTRIBUTE BY col1 SORT BY col1 ASC, col2 DESC; sqlfluff-3.4.2/test/fixtures/dialects/hive/select_cluster_distribute_sort_by.yml000066400000000000000000000131521503426445100304650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 687d8bd77aea2b7099c821762f7a783d5e459ac8c2f220f1d7856a73a89b1124 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: col1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 clusterby_clause: - keyword: CLUSTER - keyword: BY - column_reference: naked_identifier: col1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: col1 clusterby_clause: - keyword: CLUSTER - keyword: BY - column_reference: naked_identifier: col1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: key - comma: ',' - select_clause_element: column_reference: naked_identifier: value from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: src sortby_clause: - keyword: SORT - keyword: BY - column_reference: naked_identifier: key - keyword: ASC - comma: ',' - column_reference: naked_identifier: value - keyword: DESC - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 distributeby_clause: - keyword: DISTRIBUTE - keyword: BY - column_reference: naked_identifier: col1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 distributeby_clause: - keyword: DISTRIBUTE - keyword: BY - column_reference: naked_identifier: col1 sortby_clause: - keyword: SORT - keyword: BY - column_reference: naked_identifier: col1 - keyword: ASC - comma: ',' - column_reference: naked_identifier: col2 - keyword: DESC - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: col1 distributeby_clause: - keyword: DISTRIBUTE - keyword: BY - column_reference: naked_identifier: col1 sortby_clause: - keyword: SORT - keyword: BY - column_reference: naked_identifier: col1 - keyword: ASC - comma: ',' - column_reference: naked_identifier: col2 - keyword: DESC - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/select_interval.sql000066400000000000000000000013611503426445100246260ustar00rootroot00000000000000SELECT current_date + INTERVAL '2' DAY; SELECT current_date - INTERVAL '1' DAY AS yesterday; SELECT current_date + INTERVAL '12' HOUR; -- These examples are from: -- https://cwiki.apache.org/confluence/display/hive/languagemanual+types#LanguageManualTypes-Intervals SELECT INTERVAL '1' DAY; SELECT INTERVAL '1-2' YEAR TO MONTH; SELECT INTERVAL '1' YEAR + INTERVAL '2' MONTH; SELECT INTERVAL '1 2:3:4.000005' DAY; SELECT INTERVAL '1' DAY+ INTERVAL '2' HOUR + INTERVAL '3' MINUTE + INTERVAL '4' SECOND + INTERVAL '5' NANO; SELECT INTERVAL 1 DAY; SELECT INTERVAL (1+dt) DAY; SELECT 1 DAY; SELECT INTERVAL 1 DAY; SELECT '1-2' YEAR TO MONTH; SELECT INTERVAL '1-2' YEARS TO MONTH; SELECT 2 SECONDS; SELECT 2 SECOND; sqlfluff-3.4.2/test/fixtures/dialects/hive/select_interval.yml000066400000000000000000000137571503426445100246440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8921156b6951ee4c4821b361bff5a53c5c7b13acb992653a2c3210b5e896846c file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bare_function: current_date binary_operator: + interval_expression: keyword: INTERVAL quoted_literal: "'2'" date_part: DAY - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bare_function: current_date binary_operator: '-' interval_expression: keyword: INTERVAL quoted_literal: "'1'" date_part: DAY alias_expression: alias_operator: keyword: AS naked_identifier: yesterday - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bare_function: current_date binary_operator: + interval_expression: keyword: INTERVAL quoted_literal: "'12'" date_part: HOUR - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: interval_expression: keyword: INTERVAL quoted_literal: "'1'" date_part: DAY - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: interval_expression: - keyword: INTERVAL - quoted_literal: "'1-2'" - date_part: YEAR - keyword: TO - date_part: MONTH - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - interval_expression: keyword: INTERVAL quoted_literal: "'1'" date_part: YEAR - binary_operator: + - interval_expression: keyword: INTERVAL quoted_literal: "'2'" date_part: MONTH - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: interval_expression: keyword: INTERVAL quoted_literal: "'1 2:3:4.000005'" date_part: DAY - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - interval_expression: keyword: INTERVAL quoted_literal: "'1'" date_part: DAY - binary_operator: + - interval_expression: keyword: INTERVAL quoted_literal: "'2'" date_part: HOUR - binary_operator: + - interval_expression: keyword: INTERVAL quoted_literal: "'3'" date_part: MINUTE - binary_operator: + - interval_expression: keyword: INTERVAL quoted_literal: "'4'" date_part: SECOND - binary_operator: + - interval_expression: keyword: INTERVAL quoted_literal: "'5'" date_part: NANO - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: interval_expression: keyword: INTERVAL numeric_literal: '1' date_part: DAY - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: interval_expression: keyword: INTERVAL bracketed: start_bracket: ( expression: numeric_literal: '1' binary_operator: + column_reference: naked_identifier: dt end_bracket: ) date_part: DAY - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: interval_expression: numeric_literal: '1' date_part: DAY - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: interval_expression: keyword: INTERVAL numeric_literal: '1' date_part: DAY - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: interval_expression: - quoted_literal: "'1-2'" - date_part: YEAR - keyword: TO - date_part: MONTH - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: interval_expression: - keyword: INTERVAL - quoted_literal: "'1-2'" - date_part: YEARS - keyword: TO - date_part: MONTH - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: interval_expression: numeric_literal: '2' date_part: SECONDS - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: interval_expression: numeric_literal: '2' date_part: SECOND - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/select_lateral_view.sql000066400000000000000000000017321503426445100254620ustar00rootroot00000000000000SELECT pageid, adid FROM pageAds LATERAL VIEW explode(adid_list) adTable AS adid; SELECT adid, count(1) FROM pageAds LATERAL VIEW explode(adid_list) adTable AS adid GROUP BY adid; SELECT * FROM exampleTable LATERAL VIEW explode(col1) myTable1 AS myCol1 LATERAL VIEW explode(myCol1) myTable2 AS myCol2; SELECT myCol1, myCol2 FROM baseTable LATERAL VIEW explode(col1) myTable1 AS myCol1 LATERAL VIEW explode(col2) myTable2 AS myCol2; SELECT * FROM src LATERAL VIEW explode(array()) C AS a limit 10; SELECT * FROM src LATERAL VIEW OUTER explode(array()) C AS a limit 10; -- besides as a part of LATERAL VIEW, UDTF can also be used in the SELECT expression SELECT explode(map('A', 10, 'B', 20, 'C', 30)) AS (key,value); SELECT posexplode(array('A', 'B', 'C')) AS (pos,val); SELECT inline(array(struct('A', 10, DATE '2015-01-01'), struct('B', 20, DATE '2016-02-02'))) AS (col1,col2,col3); SELECT stack(2, 'A', 10, DATE '2015-01-01', 'B', 20, DATE '2016-01-01') AS (col0,col1,col2); sqlfluff-3.4.2/test/fixtures/dialects/hive/select_lateral_view.yml000066400000000000000000000364671503426445100255010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ece8c9771bc0d48dcfdd99c5aa11b89b0f824cafaa96c17cd51565c15b28df8e file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: pageid - comma: ',' - select_clause_element: column_reference: naked_identifier: adid from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: pageAds lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: explode function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: adid_list end_bracket: ) - naked_identifier: adTable - keyword: AS - naked_identifier: adid - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: adid - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: pageAds lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: explode function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: adid_list end_bracket: ) - naked_identifier: adTable - keyword: AS - naked_identifier: adid groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: adid - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: - table_expression: table_reference: naked_identifier: exampleTable - lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: explode function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: col1 end_bracket: ) - naked_identifier: myTable1 - keyword: AS - naked_identifier: myCol1 - lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: explode function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: myCol1 end_bracket: ) - naked_identifier: myTable2 - keyword: AS - naked_identifier: myCol2 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: myCol1 - comma: ',' - select_clause_element: column_reference: naked_identifier: myCol2 from_clause: keyword: FROM from_expression: from_expression_element: - table_expression: table_reference: naked_identifier: baseTable - lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: explode function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: col1 end_bracket: ) - naked_identifier: myTable1 - keyword: AS - naked_identifier: myCol1 - lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: explode function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: col2 end_bracket: ) - naked_identifier: myTable2 - keyword: AS - naked_identifier: myCol2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: src lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: explode function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: array function_contents: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) - naked_identifier: C - keyword: AS - naked_identifier: a limit_clause: keyword: limit numeric_literal: '10' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: src lateral_view_clause: - keyword: LATERAL - keyword: VIEW - keyword: OUTER - function: function_name: function_name_identifier: explode function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: array function_contents: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) - naked_identifier: C - keyword: AS - naked_identifier: a limit_clause: keyword: limit numeric_literal: '10' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: explode function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: map function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'A'" - comma: ',' - expression: numeric_literal: '10' - comma: ',' - expression: quoted_literal: "'B'" - comma: ',' - expression: numeric_literal: '20' - comma: ',' - expression: quoted_literal: "'C'" - comma: ',' - expression: numeric_literal: '30' - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS bracketed: start_bracket: ( identifier_list: - naked_identifier: key - comma: ',' - naked_identifier: value end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: posexplode function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: array function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'A'" - comma: ',' - expression: quoted_literal: "'B'" - comma: ',' - expression: quoted_literal: "'C'" - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS bracketed: start_bracket: ( identifier_list: - naked_identifier: pos - comma: ',' - naked_identifier: val end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: inline function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: array function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: struct function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'A'" - comma: ',' - expression: numeric_literal: '10' - comma: ',' - expression: keyword: DATE date_constructor_literal: "'2015-01-01'" - end_bracket: ) - comma: ',' - expression: function: function_name: function_name_identifier: struct function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'B'" - comma: ',' - expression: numeric_literal: '20' - comma: ',' - expression: keyword: DATE date_constructor_literal: "'2016-02-02'" - end_bracket: ) - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS bracketed: start_bracket: ( identifier_list: - naked_identifier: col1 - comma: ',' - naked_identifier: col2 - comma: ',' - naked_identifier: col3 end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: stack function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '2' - comma: ',' - expression: quoted_literal: "'A'" - comma: ',' - expression: numeric_literal: '10' - comma: ',' - expression: keyword: DATE date_constructor_literal: "'2015-01-01'" - comma: ',' - expression: quoted_literal: "'B'" - comma: ',' - expression: numeric_literal: '20' - comma: ',' - expression: keyword: DATE date_constructor_literal: "'2016-01-01'" - end_bracket: ) alias_expression: alias_operator: keyword: AS bracketed: start_bracket: ( identifier_list: - naked_identifier: col0 - comma: ',' - naked_identifier: col1 - comma: ',' - naked_identifier: col2 end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/select_left_semi_join.sql000066400000000000000000000000771503426445100257730ustar00rootroot00000000000000SELECT a.key, a.val FROM a LEFT SEMI JOIN b ON (a.key = b.key) sqlfluff-3.4.2/test/fixtures/dialects/hive/select_left_semi_join.yml000066400000000000000000000033561503426445100260000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b8d293e9a5291804e22108e9862a1832a4a92dce82c84efadf59d77bdcd5fe03 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: a - dot: . - naked_identifier: key - comma: ',' - select_clause_element: column_reference: - naked_identifier: a - dot: . - naked_identifier: val from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: a join_clause: - keyword: LEFT - keyword: SEMI - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: b - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: a - dot: . - naked_identifier: key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: b - dot: . - naked_identifier: key end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/hive/select_regexp.sql000066400000000000000000000005741503426445100243010ustar00rootroot00000000000000SELECT * FROM example WHERE url REGEXP '(/click)'; SELECT *, url REGEXP '(/click)' FROM example; SELECT * FROM example WHERE url IREGEXP '(/click)'; SELECT *, url IREGEXP '(/click)' FROM example; SELECT * FROM example WHERE url RLIKE '(/click)'; SELECT *, url RLIKE '(/click)' FROM example; sqlfluff-3.4.2/test/fixtures/dialects/hive/select_regexp.yml000066400000000000000000000100771503426445100243020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c294424ca7b8697304dbbce8bebd05e438c39db94b93994fc68f622b8b38db9f file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: example where_clause: keyword: WHERE expression: column_reference: naked_identifier: url keyword: REGEXP quoted_literal: "'(/click)'" - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: wildcard_expression: wildcard_identifier: star: '*' - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: url keyword: REGEXP quoted_literal: "'(/click)'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: example - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: example where_clause: keyword: WHERE expression: column_reference: naked_identifier: url keyword: IREGEXP quoted_literal: "'(/click)'" - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: wildcard_expression: wildcard_identifier: star: '*' - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: url keyword: IREGEXP quoted_literal: "'(/click)'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: example - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: example where_clause: keyword: WHERE expression: column_reference: naked_identifier: url keyword: RLIKE quoted_literal: "'(/click)'" - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: wildcard_expression: wildcard_identifier: star: '*' - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: url keyword: RLIKE quoted_literal: "'(/click)'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: example - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/select_sampling_table.sql000066400000000000000000000004151503426445100257620ustar00rootroot00000000000000SELECT * FROM source TABLESAMPLE(BUCKET 3 OUT OF 32 ON id) s; SELECT * FROM source TABLESAMPLE(BUCKET 3 OUT OF 32 ON rand()) s; SELECT * FROM source TABLESAMPLE(0.1 PERCENT) AS s; SELECT * FROM source TABLESAMPLE(100M) s; SELECT * FROM source TABLESAMPLE(10 ROWS); sqlfluff-3.4.2/test/fixtures/dialects/hive/select_sampling_table.yml000066400000000000000000000106301503426445100257640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0173e5f44c0b1971d037c21f76cfb49fce18c693f61980bc46459095d07efe72 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source sample_expression: keyword: TABLESAMPLE bracketed: - start_bracket: ( - keyword: BUCKET - numeric_literal: '3' - keyword: OUT - keyword: OF - numeric_literal: '32' - keyword: 'ON' - naked_identifier: id - end_bracket: ) alias_expression: naked_identifier: s - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source sample_expression: keyword: TABLESAMPLE bracketed: - start_bracket: ( - keyword: BUCKET - numeric_literal: '3' - keyword: OUT - keyword: OF - numeric_literal: '32' - keyword: 'ON' - function: function_name: function_name_identifier: rand function_contents: bracketed: start_bracket: ( end_bracket: ) - end_bracket: ) alias_expression: naked_identifier: s - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source sample_expression: keyword: TABLESAMPLE bracketed: start_bracket: ( numeric_literal: '0.1' keyword: PERCENT end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: s - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source sample_expression: keyword: TABLESAMPLE bracketed: start_bracket: ( byte_length_literal: 100M end_bracket: ) alias_expression: naked_identifier: s - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source sample_expression: keyword: TABLESAMPLE bracketed: start_bracket: ( numeric_literal: '10' keyword: ROWS end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/set.sql000066400000000000000000000001521503426445100222330ustar00rootroot00000000000000set; set -v; set foo = 2; set foo = 'bar'; set hivevar:cat="Chloe"; set mapreduce.reduce.memory.mb=12000; sqlfluff-3.4.2/test/fixtures/dialects/hive/set.yml000066400000000000000000000026011503426445100222360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 825cbeca8cc53fd1ed3f6a7bb81b242abb9d2b6c42ab3848b10306ba8119c8f2 file: - statement: set_statement: keyword: set - statement_terminator: ; - statement: set_statement: keyword: set option_indicator: '-' option: v - statement_terminator: ; - statement: set_statement: keyword: set parameter: foo raw_comparison_operator: '=' numeric_literal: '2' - statement_terminator: ; - statement: set_statement: keyword: set parameter: foo raw_comparison_operator: '=' quoted_literal: "'bar'" - statement_terminator: ; - statement: set_statement: - keyword: set - parameter: hivevar - colon_delimiter: ':' - parameter: cat - raw_comparison_operator: '=' - quoted_literal: '"Chloe"' - statement_terminator: ; - statement: set_statement: - keyword: set - parameter: mapreduce - dot: . - parameter: reduce - dot: . - parameter: memory - dot: . - parameter: mb - raw_comparison_operator: '=' - numeric_literal: '12000' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/truncate_table.sql000066400000000000000000000000711503426445100244340ustar00rootroot00000000000000TRUNCATE TABLE foo; TRUNCATE bar PARTITION (col='val'); sqlfluff-3.4.2/test/fixtures/dialects/hive/truncate_table.yml000066400000000000000000000016471503426445100244500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 279328c3f03c864440999770ac5c82449a103eb1f81e265adddf2a5d1611d7a3 file: - statement: truncate_table: - keyword: TRUNCATE - keyword: TABLE - table_reference: naked_identifier: foo - statement_terminator: ; - statement: truncate_table: - keyword: TRUNCATE - table_reference: naked_identifier: bar - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: col comparison_operator: raw_comparison_operator: '=' quoted_literal: "'val'" end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/hive/use.sql000066400000000000000000000000101503426445100222250ustar00rootroot00000000000000USE db; sqlfluff-3.4.2/test/fixtures/dialects/hive/use.yml000066400000000000000000000010161503426445100222360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1247f4ff8e352edcb3de7913a509af396d6c2bdcdfad9481ebf698f8b319e7a4 file: statement: use_statement: keyword: USE database_reference: naked_identifier: db statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/impala/000077500000000000000000000000001503426445100212315ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/dialects/impala/.sqlfluff000066400000000000000000000000341503426445100230510ustar00rootroot00000000000000[sqlfluff] dialect = impala sqlfluff-3.4.2/test/fixtures/dialects/impala/compute_stats.sql000066400000000000000000000000711503426445100246420ustar00rootroot00000000000000COMPUTE STATS db.foo; COMPUTE INCREMENTAL STATS db.foo; sqlfluff-3.4.2/test/fixtures/dialects/impala/compute_stats.yml000066400000000000000000000015071503426445100246510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9cfb94b39d2db1240481cf4e1ed5067989948657fb1ef63d1f7ccffe85933f75 file: - statement: compute_stats_statement: - keyword: COMPUTE - keyword: STATS - table_reference: - naked_identifier: db - dot: . - naked_identifier: foo - statement_terminator: ; - statement: compute_stats_statement: - keyword: COMPUTE - keyword: INCREMENTAL - keyword: STATS - table_reference: - naked_identifier: db - dot: . - naked_identifier: foo - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/impala/create_table.sql000066400000000000000000000002351503426445100243640ustar00rootroot00000000000000CREATE TABLE db.foo (col1 integer, col2 string); CREATE TABLE db.foo ( col1 INT, col2 STRING, col3 DECIMAL(10,2) ) PARTITIONED BY (col4 INT); sqlfluff-3.4.2/test/fixtures/dialects/impala/create_table.yml000066400000000000000000000041751503426445100243750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 31109752b72e9e04185357c787940f20b3a9e350e5d532c5b0cac132c1687c27 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: db - dot: . - naked_identifier: foo - bracketed: - start_bracket: ( - column_definition: naked_identifier: col1 data_type: primitive_type: keyword: integer - comma: ',' - column_definition: naked_identifier: col2 data_type: primitive_type: keyword: string - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: db - dot: . - naked_identifier: foo - bracketed: - start_bracket: ( - column_definition: naked_identifier: col1 data_type: primitive_type: keyword: INT - comma: ',' - column_definition: naked_identifier: col2 data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: col3 data_type: primitive_type: keyword: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '10' - comma: ',' - numeric_literal: '2' - end_bracket: ) - end_bracket: ) - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( column_definition: naked_identifier: col4 data_type: primitive_type: keyword: INT end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/impala/create_table_as_select.sql000066400000000000000000000007501503426445100264100ustar00rootroot00000000000000CREATE TABLE db.foo AS SELECT col1, col2 FROM db.foo1; CREATE TABLE db.foo AS SELECT (col1, col2) FROM db.foo1; CREATE EXTERNAL TABLE IF NOT EXISTS db.foo PARTITIONED BY (col1) SORT BY (col2) COMMENT 'table_comment' ROW FORMAT DELIMITED WITH SERDEPROPERTIES ('key1'='value1', 'key2'='value2') STORED AS PARQUET LOCATION 'hdfs://host/path/to/location' TBLPROPERTIES ('key1'='value1', 'key2'='value2') AS SELECT col1, col2, col3, col4 FROM db.baz; sqlfluff-3.4.2/test/fixtures/dialects/impala/create_table_as_select.yml000066400000000000000000000110411503426445100264050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9950a4866a16732f21f915cbcef42d68386296dae555f2accf28d659c68c00f7 file: - statement: create_table_as_select_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: db - dot: . - naked_identifier: foo - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: db - dot: . - naked_identifier: foo1 - statement_terminator: ; - statement: create_table_as_select_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: db - dot: . - naked_identifier: foo - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: expression: bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: db - dot: . - naked_identifier: foo1 - statement_terminator: ; - statement: create_table_as_select_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: - naked_identifier: db - dot: . - naked_identifier: foo - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( naked_identifier: col1 end_bracket: ) - keyword: SORT - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: col2 end_bracket: ) - keyword: COMMENT - quoted_literal: "'table_comment'" - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: DELIMITED - keyword: WITH - keyword: SERDEPROPERTIES - bracketed: - start_bracket: ( - quoted_literal: "'key1'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value1'" - comma: ',' - quoted_literal: "'key2'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value2'" - end_bracket: ) - keyword: STORED - keyword: AS - keyword: PARQUET - keyword: LOCATION - quoted_literal: "'hdfs://host/path/to/location'" - keyword: TBLPROPERTIES - bracketed: - start_bracket: ( - quoted_literal: "'key1'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value1'" - comma: ',' - quoted_literal: "'key2'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value2'" - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 - comma: ',' - select_clause_element: column_reference: naked_identifier: col3 - comma: ',' - select_clause_element: column_reference: naked_identifier: col4 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: db - dot: . - naked_identifier: baz - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/impala/insert_into.sql000066400000000000000000000007641503426445100243160ustar00rootroot00000000000000INSERT INTO TABLE db.foo SELECT col1, col2 FROM db.foo2; INSERT INTO TABLE db.foo VALUES ((1, 'a'), (2, 'b')); INSERT INTO TABLE db.foo PARTITION (col1, col2) SELECT col1, col2, col3 FROM db.foo2; INSERT INTO TABLE db.foo PARTITION (col1=1, col2='a') SELECT col3 FROM db.foo2; INSERT INTO TABLE db.foo [SHUFFLE] SELECT col1, col2 FROM db.foo2; INSERT INTO TABLE db.foo [NOSHUFFLE] SELECT col1, col2 FROM db.foo2; INSERT INTO db.foo (col1, col2) SELECT col1, col2 FROM db.foo2 WHERE col2 > 100; sqlfluff-3.4.2/test/fixtures/dialects/impala/insert_into.yml000066400000000000000000000157771503426445100243320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f329c571ad013c3e97ffecf4b547b7961027d6bd5dcec7460a38b64f2834549b file: - statement: insert_statement: - keyword: INSERT - keyword: INTO - keyword: TABLE - table_reference: - naked_identifier: db - dot: . - naked_identifier: foo - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: db - dot: . - naked_identifier: foo2 - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - keyword: TABLE - table_reference: - naked_identifier: db - dot: . - naked_identifier: foo - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: bracketed: start_bracket: ( numeric_literal: '1' comma: ',' quoted_literal: "'a'" end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( numeric_literal: '2' comma: ',' quoted_literal: "'b'" end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - keyword: TABLE - table_reference: - naked_identifier: db - dot: . - naked_identifier: foo - keyword: PARTITION - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 - comma: ',' - select_clause_element: column_reference: naked_identifier: col3 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: db - dot: . - naked_identifier: foo2 - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - keyword: TABLE - table_reference: - naked_identifier: db - dot: . - naked_identifier: foo - keyword: PARTITION - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - comma: ',' - column_reference: naked_identifier: col2 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'a'" - end_bracket: ) - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col3 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: db - dot: . - naked_identifier: foo2 - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - keyword: TABLE - table_reference: - naked_identifier: db - dot: . - naked_identifier: foo - start_square_bracket: '[' - keyword: SHUFFLE - end_square_bracket: ']' - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: db - dot: . - naked_identifier: foo2 - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - keyword: TABLE - table_reference: - naked_identifier: db - dot: . - naked_identifier: foo - start_square_bracket: '[' - keyword: NOSHUFFLE - end_square_bracket: ']' - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: db - dot: . - naked_identifier: foo2 - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: db - dot: . - naked_identifier: foo - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: db - dot: . - naked_identifier: foo2 where_clause: keyword: WHERE expression: column_reference: naked_identifier: col2 comparison_operator: raw_comparison_operator: '>' numeric_literal: '100' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/impala/insert_overwrite.sql000066400000000000000000000007211503426445100253640ustar00rootroot00000000000000INSERT OVERWRITE TABLE db.foo SELECT col1, col2 FROM db.foo2; INSERT OVERWRITE TABLE db.foo PARTITION (col1, col2) SELECT col1, col2, col3 FROM db.foo2; INSERT OVERWRITE TABLE db.foo PARTITION (col1=1, col2='a') SELECT col3 FROM db.foo2; INSERT OVERWRITE TABLE db.foo [SHUFFLE] SELECT col1, col2 FROM db.foo2; INSERT OVERWRITE TABLE db.foo [NOSHUFFLE] SELECT col1, col2 FROM db.foo2; INSERT OVERWRITE TABLE db.foo IF NOT EXISTS SELECT col1, col2 FROM db.foo2; sqlfluff-3.4.2/test/fixtures/dialects/impala/insert_overwrite.yml000066400000000000000000000136641503426445100254000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c5fd3168404ff878a8f5e6352ed9ed475328adfb1f07ecd3d0e9c39e335068dc file: - statement: insert_statement: - keyword: INSERT - keyword: OVERWRITE - keyword: TABLE - table_reference: - naked_identifier: db - dot: . - naked_identifier: foo - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: db - dot: . - naked_identifier: foo2 - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: OVERWRITE - keyword: TABLE - table_reference: - naked_identifier: db - dot: . - naked_identifier: foo - keyword: PARTITION - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 - comma: ',' - select_clause_element: column_reference: naked_identifier: col3 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: db - dot: . - naked_identifier: foo2 - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: OVERWRITE - keyword: TABLE - table_reference: - naked_identifier: db - dot: . - naked_identifier: foo - keyword: PARTITION - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - comma: ',' - column_reference: naked_identifier: col2 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'a'" - end_bracket: ) - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col3 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: db - dot: . - naked_identifier: foo2 - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: OVERWRITE - keyword: TABLE - table_reference: - naked_identifier: db - dot: . - naked_identifier: foo - start_square_bracket: '[' - keyword: SHUFFLE - end_square_bracket: ']' - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: db - dot: . - naked_identifier: foo2 - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: OVERWRITE - keyword: TABLE - table_reference: - naked_identifier: db - dot: . - naked_identifier: foo - start_square_bracket: '[' - keyword: NOSHUFFLE - end_square_bracket: ']' - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: db - dot: . - naked_identifier: foo2 - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: OVERWRITE - keyword: TABLE - table_reference: - naked_identifier: db - dot: . - naked_identifier: foo - keyword: IF - keyword: NOT - keyword: EXISTS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: db - dot: . - naked_identifier: foo2 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/000077500000000000000000000000001503426445100213655ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/dialects/mariadb/.sqlfluff000066400000000000000000000000351503426445100232060ustar00rootroot00000000000000[sqlfluff] dialect = mariadb sqlfluff-3.4.2/test/fixtures/dialects/mariadb/alter_database.sql000066400000000000000000000007561503426445100250510ustar00rootroot00000000000000ALTER DATABASE my_database DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci DEFAULT ENCRYPTION 'N'; ALTER DATABASE my_database DEFAULT CHARACTER SET = utf8mb4 COLLATE = utf8mb4_0900_ai_ci DEFAULT ENCRYPTION = 'N'; ALTER SCHEMA my_database DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci DEFAULT ENCRYPTION 'N'; ALTER DATABASE my_database READ ONLY DEFAULT; ALTER DATABASE my_database READ ONLY 0; ALTER DATABASE my_database READ ONLY 1; ALTER DATABASE READ ONLY DEFAULT; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/alter_database.yml000066400000000000000000000063431503426445100250510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a1c4b4182cc44aace3c98db61b2527155aad4526958fbad5a5d52dc0be2e7079 file: - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: my_database - alter_option_segment: - keyword: DEFAULT - keyword: CHARACTER - keyword: SET - naked_identifier: utf8mb4 - alter_option_segment: keyword: COLLATE collation_reference: naked_identifier: utf8mb4_0900_ai_ci - alter_option_segment: - keyword: DEFAULT - keyword: ENCRYPTION - quoted_literal: "'N'" - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: my_database - alter_option_segment: - keyword: DEFAULT - keyword: CHARACTER - keyword: SET - comparison_operator: raw_comparison_operator: '=' - naked_identifier: utf8mb4 - alter_option_segment: keyword: COLLATE comparison_operator: raw_comparison_operator: '=' collation_reference: naked_identifier: utf8mb4_0900_ai_ci - alter_option_segment: - keyword: DEFAULT - keyword: ENCRYPTION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'N'" - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: SCHEMA - database_reference: naked_identifier: my_database - alter_option_segment: - keyword: DEFAULT - keyword: CHARACTER - keyword: SET - naked_identifier: utf8mb4 - alter_option_segment: keyword: COLLATE collation_reference: naked_identifier: utf8mb4_0900_ai_ci - alter_option_segment: - keyword: DEFAULT - keyword: ENCRYPTION - quoted_literal: "'N'" - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: my_database - alter_option_segment: - keyword: READ - keyword: ONLY - keyword: DEFAULT - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: my_database - alter_option_segment: - keyword: READ - keyword: ONLY - numeric_literal: '0' - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: my_database - alter_option_segment: - keyword: READ - keyword: ONLY - numeric_literal: '1' - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - alter_option_segment: - keyword: READ - keyword: ONLY - keyword: DEFAULT - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/alter_table.sql000066400000000000000000000051501503426445100243650ustar00rootroot00000000000000ALTER TABLE `users` MODIFY COLUMN `name` varchar(255) NOT NULL, COMMENT "name of user"; ALTER TABLE `users` MODIFY `name` varchar(255) NOT NULL FIRST; ALTER TABLE `users` RENAME TO `user`; ALTER TABLE `user` RENAME AS `users`; ALTER TABLE `users` RENAME `user`; ALTER TABLE `users` RENAME COLUMN `col_1` TO `del_col_1`; ALTER TABLE `users` CHANGE COLUMN `birthday` `date_of_birth` INT(11) NULL DEFAULT NULL; ALTER TABLE `users` CHANGE COLUMN `birthday` `date_of_birth` INT(11) NOT NULL; ALTER TABLE `users` CHANGE COLUMN `birthday` `date_of_birth` INT(11) FIRST; ALTER TABLE `users` CHANGE COLUMN `birthday` `date_of_birth` INT(11) AFTER `name`; ALTER TABLE `users` DROP COLUMN `age`; ALTER TABLE `foo`.`bar` ADD CONSTRAINT `index_name` UNIQUE(`col_1`, `col_2`, `col_3`); ALTER TABLE `foo`.`bar` ADD UNIQUE `index_name`(`col_1`, `col_2`, `col_3`); ALTER TABLE `foo`.`bar` ADD CONSTRAINT `index_name` UNIQUE INDEX (`col_1`, `col_2`, `col_3`); ALTER TABLE `foo`.`bar` ADD UNIQUE INDEX `index_name`(`col_1`, `col_2`, `col_3`); ALTER TABLE `foo`.`bar` ADD INDEX `index_name`(`col_1`, `col_2`, `col_3`); ALTER TABLE `foo`.`bar` ADD INDEX `index_name`(`col_1`, `col_2`, `col_3`) KEY_BLOCK_SIZE = 8; ALTER TABLE `foo`.`bar` ADD INDEX `index_name`(`col_1`, `col_2`, `col_3`) KEY_BLOCK_SIZE 8; ALTER TABLE `foo`.`bar` ADD INDEX `index_name`(`col_1`, `col_2`, `col_3`) KEY_BLOCK_SIZE 8 COMMENT 'index for col_1, col_2, col_3'; ALTER TABLE `foo`.`bar` DROP INDEX `index_name`; ALTER TABLE `foo`.`bar` RENAME INDEX `index_name` to `new_index_name`; ALTER TABLE `foo`.`bar` RENAME KEY `key_name` to `new_key_name`; ALTER TABLE `x` ADD CONSTRAINT FOREIGN KEY(`xk`) REFERENCES `y`(`yk`); ALTER TABLE `users` ADD COLUMN `active` tinyint(1) DEFAULT '0'; ALTER TABLE `users` ADD COLUMN IF NOT EXISTS `active` tinyint(1) DEFAULT '0'; ALTER TABLE `foo` ADD `bar` INT FIRST; ALTER TABLE `foo` ADD COLUMN d INT GENERATED ALWAYS AS (a*abs(b)) VIRTUAL; ALTER TABLE `foo` ADD COLUMN e TEXT GENERATED ALWAYS AS (substr(c,b,b+1)) STORED; ALTER TABLE `foo` ADD COLUMN e TEXT GENERATED ALWAYS AS (substr(c,b,b+1)) PERSISTENT; ALTER TABLE `foo` ADD COLUMN d INT AS (a*abs(b)); ALTER TABLE `foo` ADD COLUMN e TEXT AS (substr(c,b,b+1)) STORED; ALTER TABLE `foo` ADD COLUMN e TEXT AS (substr(c,b,b+1)) PERSISTENT; ALTER TABLE `foo` CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci; ALTER TABLE `foo` CONVERT TO CHARACTER SET `utf8mb4` COLLATE `utf8mb4_unicode_ci`; ALTER TABLE `foo` CONVERT TO CHARACTER SET 'utf8mb4' COLLATE 'utf8mb4_unicode_ci'; ALTER TABLE `foo` CONVERT TO CHARACTER SET "utf8mb4" COLLATE "utf8mb4_unicode_ci"; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/alter_table.yml000066400000000000000000000560721503426445100244000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b90c9ec3a2f7f7bf9bdaeb74300fe26abe56c1e27d543efff2fbb838655ec927 file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`users`' - keyword: MODIFY - keyword: COLUMN - column_definition: quoted_identifier: '`name`' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '255' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - parameter: COMMENT - quoted_literal: '"name of user"' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`users`' - keyword: MODIFY - column_definition: quoted_identifier: '`name`' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '255' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - keyword: FIRST - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`users`' - keyword: RENAME - keyword: TO - table_reference: quoted_identifier: '`user`' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`user`' - keyword: RENAME - keyword: AS - table_reference: quoted_identifier: '`users`' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`users`' - keyword: RENAME - table_reference: quoted_identifier: '`user`' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`users`' - keyword: RENAME - keyword: COLUMN - column_reference: quoted_identifier: '`col_1`' - keyword: TO - column_reference: quoted_identifier: '`del_col_1`' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`users`' - keyword: CHANGE - keyword: COLUMN - column_reference: quoted_identifier: '`birthday`' - column_definition: - quoted_identifier: '`date_of_birth`' - data_type: data_type_identifier: INT bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '11' end_bracket: ) - column_constraint_segment: keyword: 'NULL' - column_constraint_segment: keyword: DEFAULT null_literal: 'NULL' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`users`' - keyword: CHANGE - keyword: COLUMN - column_reference: quoted_identifier: '`birthday`' - column_definition: quoted_identifier: '`date_of_birth`' data_type: data_type_identifier: INT bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '11' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`users`' - keyword: CHANGE - keyword: COLUMN - column_reference: quoted_identifier: '`birthday`' - column_definition: quoted_identifier: '`date_of_birth`' data_type: data_type_identifier: INT bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '11' end_bracket: ) - keyword: FIRST - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`users`' - keyword: CHANGE - keyword: COLUMN - column_reference: quoted_identifier: '`birthday`' - column_definition: quoted_identifier: '`date_of_birth`' data_type: data_type_identifier: INT bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '11' end_bracket: ) - keyword: AFTER - column_reference: quoted_identifier: '`name`' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`users`' - keyword: DROP - keyword: COLUMN - column_reference: quoted_identifier: '`age`' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - keyword: ADD - table_constraint: - keyword: CONSTRAINT - object_reference: quoted_identifier: '`index_name`' - keyword: UNIQUE - bracketed: - start_bracket: ( - column_reference: quoted_identifier: '`col_1`' - comma: ',' - column_reference: quoted_identifier: '`col_2`' - comma: ',' - column_reference: quoted_identifier: '`col_3`' - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - keyword: ADD - table_constraint: keyword: UNIQUE index_reference: quoted_identifier: '`index_name`' bracketed: - start_bracket: ( - column_reference: quoted_identifier: '`col_1`' - comma: ',' - column_reference: quoted_identifier: '`col_2`' - comma: ',' - column_reference: quoted_identifier: '`col_3`' - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - keyword: ADD - table_constraint: - keyword: CONSTRAINT - object_reference: quoted_identifier: '`index_name`' - keyword: UNIQUE - keyword: INDEX - bracketed: - start_bracket: ( - column_reference: quoted_identifier: '`col_1`' - comma: ',' - column_reference: quoted_identifier: '`col_2`' - comma: ',' - column_reference: quoted_identifier: '`col_3`' - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - keyword: ADD - table_constraint: - keyword: UNIQUE - keyword: INDEX - index_reference: quoted_identifier: '`index_name`' - bracketed: - start_bracket: ( - column_reference: quoted_identifier: '`col_1`' - comma: ',' - column_reference: quoted_identifier: '`col_2`' - comma: ',' - column_reference: quoted_identifier: '`col_3`' - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - keyword: ADD - table_constraint: keyword: INDEX index_reference: quoted_identifier: '`index_name`' bracketed: - start_bracket: ( - column_reference: quoted_identifier: '`col_1`' - comma: ',' - column_reference: quoted_identifier: '`col_2`' - comma: ',' - column_reference: quoted_identifier: '`col_3`' - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - keyword: ADD - table_constraint: keyword: INDEX index_reference: quoted_identifier: '`index_name`' bracketed: - start_bracket: ( - column_reference: quoted_identifier: '`col_1`' - comma: ',' - column_reference: quoted_identifier: '`col_2`' - comma: ',' - column_reference: quoted_identifier: '`col_3`' - end_bracket: ) index_option: keyword: KEY_BLOCK_SIZE comparison_operator: raw_comparison_operator: '=' numeric_literal: '8' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - keyword: ADD - table_constraint: keyword: INDEX index_reference: quoted_identifier: '`index_name`' bracketed: - start_bracket: ( - column_reference: quoted_identifier: '`col_1`' - comma: ',' - column_reference: quoted_identifier: '`col_2`' - comma: ',' - column_reference: quoted_identifier: '`col_3`' - end_bracket: ) index_option: keyword: KEY_BLOCK_SIZE numeric_literal: '8' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - keyword: ADD - table_constraint: keyword: INDEX index_reference: quoted_identifier: '`index_name`' bracketed: - start_bracket: ( - column_reference: quoted_identifier: '`col_1`' - comma: ',' - column_reference: quoted_identifier: '`col_2`' - comma: ',' - column_reference: quoted_identifier: '`col_3`' - end_bracket: ) index_option: keyword: KEY_BLOCK_SIZE numeric_literal: '8' comment_clause: keyword: COMMENT quoted_literal: "'index for col_1, col_2, col_3'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`index_name`' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - keyword: RENAME - keyword: INDEX - index_reference: quoted_identifier: '`index_name`' - keyword: to - index_reference: quoted_identifier: '`new_index_name`' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - keyword: RENAME - keyword: KEY - index_reference: quoted_identifier: '`key_name`' - keyword: to - index_reference: quoted_identifier: '`new_key_name`' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`x`' - keyword: ADD - table_constraint: - keyword: CONSTRAINT - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: quoted_identifier: '`xk`' end_bracket: ) - keyword: REFERENCES - column_reference: quoted_identifier: '`y`' - bracketed: start_bracket: ( column_reference: quoted_identifier: '`yk`' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`users`' - keyword: ADD - keyword: COLUMN - column_definition: quoted_identifier: '`active`' data_type: data_type_identifier: tinyint bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) column_constraint_segment: keyword: DEFAULT quoted_literal: "'0'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`users`' - keyword: ADD - keyword: COLUMN - keyword: IF - keyword: NOT - keyword: EXISTS - column_definition: quoted_identifier: '`active`' data_type: data_type_identifier: tinyint bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) column_constraint_segment: keyword: DEFAULT quoted_literal: "'0'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`foo`' - keyword: ADD - column_definition: quoted_identifier: '`bar`' data_type: data_type_identifier: INT - keyword: FIRST - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`foo`' - keyword: ADD - keyword: COLUMN - column_definition: naked_identifier: d data_type: data_type_identifier: INT column_constraint_segment: - keyword: GENERATED - keyword: ALWAYS - keyword: AS - bracketed: start_bracket: ( expression: column_reference: naked_identifier: a binary_operator: '*' function: function_name: function_name_identifier: abs function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: b end_bracket: ) end_bracket: ) - keyword: VIRTUAL - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`foo`' - keyword: ADD - keyword: COLUMN - column_definition: naked_identifier: e data_type: data_type_identifier: TEXT column_constraint_segment: - keyword: GENERATED - keyword: ALWAYS - keyword: AS - bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: substr function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: c - comma: ',' - expression: column_reference: naked_identifier: b - comma: ',' - expression: column_reference: naked_identifier: b binary_operator: + numeric_literal: '1' - end_bracket: ) end_bracket: ) - keyword: STORED - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`foo`' - keyword: ADD - keyword: COLUMN - column_definition: naked_identifier: e data_type: data_type_identifier: TEXT column_constraint_segment: - keyword: GENERATED - keyword: ALWAYS - keyword: AS - bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: substr function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: c - comma: ',' - expression: column_reference: naked_identifier: b - comma: ',' - expression: column_reference: naked_identifier: b binary_operator: + numeric_literal: '1' - end_bracket: ) end_bracket: ) - keyword: PERSISTENT - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`foo`' - keyword: ADD - keyword: COLUMN - column_definition: naked_identifier: d data_type: data_type_identifier: INT column_constraint_segment: keyword: AS bracketed: start_bracket: ( expression: column_reference: naked_identifier: a binary_operator: '*' function: function_name: function_name_identifier: abs function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: b end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`foo`' - keyword: ADD - keyword: COLUMN - column_definition: naked_identifier: e data_type: data_type_identifier: TEXT column_constraint_segment: - keyword: AS - bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: substr function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: c - comma: ',' - expression: column_reference: naked_identifier: b - comma: ',' - expression: column_reference: naked_identifier: b binary_operator: + numeric_literal: '1' - end_bracket: ) end_bracket: ) - keyword: STORED - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`foo`' - keyword: ADD - keyword: COLUMN - column_definition: naked_identifier: e data_type: data_type_identifier: TEXT column_constraint_segment: - keyword: AS - bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: substr function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: c - comma: ',' - expression: column_reference: naked_identifier: b - comma: ',' - expression: column_reference: naked_identifier: b binary_operator: + numeric_literal: '1' - end_bracket: ) end_bracket: ) - keyword: PERSISTENT - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`foo`' - keyword: CONVERT - keyword: TO - alter_option_segment: - keyword: CHARACTER - keyword: SET - naked_identifier: utf8mb4 - alter_option_segment: keyword: COLLATE collation_reference: naked_identifier: utf8mb4_unicode_ci - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`foo`' - keyword: CONVERT - keyword: TO - alter_option_segment: - keyword: CHARACTER - keyword: SET - quoted_identifier: '`utf8mb4`' - alter_option_segment: keyword: COLLATE collation_reference: quoted_identifier: '`utf8mb4_unicode_ci`' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`foo`' - keyword: CONVERT - keyword: TO - alter_option_segment: - keyword: CHARACTER - keyword: SET - quoted_identifier: "'utf8mb4'" - alter_option_segment: keyword: COLLATE collation_reference: quoted_literal: "'utf8mb4_unicode_ci'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`foo`' - keyword: CONVERT - keyword: TO - alter_option_segment: - keyword: CHARACTER - keyword: SET - quoted_identifier: '"utf8mb4"' - alter_option_segment: keyword: COLLATE collation_reference: quoted_literal: '"utf8mb4_unicode_ci"' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/alter_view.sql000066400000000000000000000004141503426445100242460ustar00rootroot00000000000000ALTER VIEW v2 AS SELECT c, d FROM v1; ALTER VIEW v2 AS (SELECT c, d FROM v1); ALTER VIEW v1 (c,d) AS SELECT a,max(b) FROM t1 GROUP BY a; ALTER VIEW v2 AS SELECT * FROM t2 WHERE s1 IN (SELECT s1 FROM t1) WITH CHECK OPTION; ALTER VIEW v2 AS SELECT 1 UNION SELECT 2; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/alter_view.yml000066400000000000000000000117261503426445100242600ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5f84a1c4fd2ee5090b3c44289a6320ca69fd9c9a01acbf4f8ec76278e3e1a909 file: - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: v2 - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: c - comma: ',' - select_clause_element: column_reference: naked_identifier: d from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: v1 - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: v2 - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: c - comma: ',' - select_clause_element: column_reference: naked_identifier: d from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: v1 end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: v1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: c - comma: ',' - column_reference: naked_identifier: d - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: function: function_name: function_name_identifier: max function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: b end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: a - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: v2 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 where_clause: keyword: WHERE expression: column_reference: naked_identifier: s1 keyword: IN bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: s1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 end_bracket: ) - with_check_options: - keyword: WITH - keyword: CHECK - keyword: OPTION - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: v2 - keyword: AS - set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - set_operator: keyword: UNION - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '2' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/analyze_table.sql000066400000000000000000000023311503426445100247170ustar00rootroot00000000000000ANALYZE TABLE some_table; ANALYZE TABLE some_table1, some_table2; ANALYZE NO_WRITE_TO_BINLOG TABLE some_table; ANALYZE NO_WRITE_TO_BINLOG TABLE some_table1, some_table2; ANALYZE LOCAL TABLE some_table; ANALYZE LOCAL TABLE some_table1, some_table2; ANALYZE TABLE some_table UPDATE HISTOGRAM ON some_col; ANALYZE TABLE some_table UPDATE HISTOGRAM ON some_col1, some_col2; ANALYZE NO_WRITE_TO_BINLOG TABLE some_table UPDATE HISTOGRAM ON some_col; ANALYZE NO_WRITE_TO_BINLOG TABLE some_table UPDATE HISTOGRAM ON some_col1, some_col2; ANALYZE LOCAL TABLE some_table UPDATE HISTOGRAM ON some_col; ANALYZE LOCAL TABLE some_table UPDATE HISTOGRAM ON some_col1, some_col2; ANALYZE TABLE some_table UPDATE HISTOGRAM ON some_col WITH 10 BUCKETS; ANALYZE TABLE some_table UPDATE HISTOGRAM ON some_col1, some_col2 WITH 10 BUCKETS; ANALYZE TABLE some_table DROP HISTOGRAM ON some_col; ANALYZE TABLE some_table DROP HISTOGRAM ON some_col1, some_col2; ANALYZE NO_WRITE_TO_BINLOG TABLE some_table DROP HISTOGRAM ON some_col; ANALYZE NO_WRITE_TO_BINLOG TABLE some_table DROP HISTOGRAM ON some_col1, some_col2; ANALYZE LOCAL TABLE some_table DROP HISTOGRAM ON some_col; ANALYZE LOCAL TABLE some_table DROP HISTOGRAM ON some_col1, some_col2; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/analyze_table.yml000066400000000000000000000152711503426445100247300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: da31a67c607ae72d4f415cdc0446249963c84a488dfa30c0355b264bc81d221c file: - statement: analyze_table_statement: - keyword: ANALYZE - keyword: TABLE - table_reference: naked_identifier: some_table - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: NO_WRITE_TO_BINLOG - keyword: TABLE - table_reference: naked_identifier: some_table - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: NO_WRITE_TO_BINLOG - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: LOCAL - keyword: TABLE - table_reference: naked_identifier: some_table - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: LOCAL - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: UPDATE - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: UPDATE - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col1 - comma: ',' - column_reference: naked_identifier: some_col2 - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: NO_WRITE_TO_BINLOG - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: UPDATE - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: NO_WRITE_TO_BINLOG - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: UPDATE - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col1 - comma: ',' - column_reference: naked_identifier: some_col2 - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: LOCAL - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: UPDATE - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: LOCAL - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: UPDATE - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col1 - comma: ',' - column_reference: naked_identifier: some_col2 - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: UPDATE - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col - keyword: WITH - numeric_literal: '10' - keyword: BUCKETS - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: UPDATE - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col1 - comma: ',' - column_reference: naked_identifier: some_col2 - keyword: WITH - numeric_literal: '10' - keyword: BUCKETS - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: DROP - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: DROP - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col1 - comma: ',' - column_reference: naked_identifier: some_col2 - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: NO_WRITE_TO_BINLOG - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: DROP - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: NO_WRITE_TO_BINLOG - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: DROP - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col1 - comma: ',' - column_reference: naked_identifier: some_col2 - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: LOCAL - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: DROP - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: LOCAL - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: DROP - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col1 - comma: ',' - column_reference: naked_identifier: some_col2 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/begin.sql000066400000000000000000000000431503426445100231670ustar00rootroot00000000000000blocks:BEGIN select 1; END blocks~ sqlfluff-3.4.2/test/fixtures/dialects/mariadb/begin.yml000066400000000000000000000014551503426445100232010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c17580c773c4cd4a54b495af7c9b0c627eeecc3755e81f3e64bcf2954d9fc267 file: - statement: transaction_statement: naked_identifier: blocks colon: ':' keyword: BEGIN statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: END naked_identifier: blocks - statement_terminator: '~' sqlfluff-3.4.2/test/fixtures/dialects/mariadb/bit_value_literal.sql000066400000000000000000000000511503426445100255700ustar00rootroot00000000000000SELECT b'01'; SELECT B'01'; SELECT 0b01; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/bit_value_literal.yml000066400000000000000000000016151503426445100256010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: df122f68f5da6f8cb4ec03be5a3c8c0337afa15f4c06a19cd3d96ef603abec6b file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: "b'01'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: "B'01'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '0b01' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/call_statement.sql000066400000000000000000000003121503426445100251010ustar00rootroot00000000000000CALL somefunction('a'); CALL somefunction(test('something')); CALL somefunction('test', @test1, test2, test3('test'), "test4"); CALL somefunction(); CALL `somefunction`('a'); CALL testdb.testfunc(123); sqlfluff-3.4.2/test/fixtures/dialects/mariadb/call_statement.yml000066400000000000000000000063561503426445100251210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d38fa4a8a76f50f82728466e6980cf4b3da33742edc200256a9b5dfca1eb3c1a file: - statement: call_statement: keyword: CALL function: function_name: function_name_identifier: somefunction function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'a'" end_bracket: ) - statement_terminator: ; - statement: call_statement: keyword: CALL function: function_name: function_name_identifier: somefunction function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: test function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'something'" end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: call_statement: keyword: CALL function: function_name: function_name_identifier: somefunction function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'test'" - comma: ',' - expression: variable: '@test1' - comma: ',' - expression: column_reference: naked_identifier: test2 - comma: ',' - expression: function: function_name: function_name_identifier: test3 function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'test'" end_bracket: ) - comma: ',' - expression: quoted_literal: '"test4"' - end_bracket: ) - statement_terminator: ; - statement: call_statement: keyword: CALL function: function_name: function_name_identifier: somefunction function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: call_statement: keyword: CALL function: function_name: quoted_identifier: '`somefunction`' function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'a'" end_bracket: ) - statement_terminator: ; - statement: call_statement: keyword: CALL function: function_name: naked_identifier: testdb dot: . function_name_identifier: testfunc function_contents: bracketed: start_bracket: ( expression: numeric_literal: '123' end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/check_constraint.sql000066400000000000000000000004531503426445100254310ustar00rootroot00000000000000CREATE TABLE t1 ( CHECK (c1 <> c2), c1 INT CHECK (c1 > 10), c2 INT CONSTRAINT c2_positive CHECK (c2 > 0), c3 INT CHECK (c3 < 100), CONSTRAINT c1_nonzero CHECK (c1 <> 0), CHECK (c1 > c3) ); ALTER TABLE t1 ALTER CHECK c2_positive NOT ENFORCED; ALTER TABLE t1 DROP CONSTRAINT c1_nonzero; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/check_constraint.yml000066400000000000000000000100161503426445100254270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 819548e097fc750b37e657682d090047d51f01a458161bb97c18b26796a51181 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - table_constraint: keyword: CHECK bracketed: start_bracket: ( expression: - column_reference: naked_identifier: c1 - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '>' - column_reference: naked_identifier: c2 end_bracket: ) - comma: ',' - column_definition: naked_identifier: c1 data_type: data_type_identifier: INT column_constraint_segment: keyword: CHECK bracketed: start_bracket: ( expression: column_reference: naked_identifier: c1 comparison_operator: raw_comparison_operator: '>' numeric_literal: '10' end_bracket: ) - comma: ',' - column_definition: naked_identifier: c2 data_type: data_type_identifier: INT column_constraint_segment: - keyword: CONSTRAINT - object_reference: naked_identifier: c2_positive - keyword: CHECK - bracketed: start_bracket: ( expression: column_reference: naked_identifier: c2 comparison_operator: raw_comparison_operator: '>' numeric_literal: '0' end_bracket: ) - comma: ',' - column_definition: naked_identifier: c3 data_type: data_type_identifier: INT column_constraint_segment: keyword: CHECK bracketed: start_bracket: ( expression: column_reference: naked_identifier: c3 comparison_operator: raw_comparison_operator: < numeric_literal: '100' end_bracket: ) - comma: ',' - table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: c1_nonzero - keyword: CHECK - bracketed: start_bracket: ( expression: column_reference: naked_identifier: c1 comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '>' numeric_literal: '0' end_bracket: ) - comma: ',' - table_constraint: keyword: CHECK bracketed: start_bracket: ( expression: - column_reference: naked_identifier: c1 - comparison_operator: raw_comparison_operator: '>' - column_reference: naked_identifier: c3 end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: ALTER - keyword: CHECK - object_reference: naked_identifier: c2_positive - keyword: NOT - keyword: ENFORCED - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: DROP - keyword: CONSTRAINT - object_reference: naked_identifier: c1_nonzero - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/check_table.sql000066400000000000000000000010011503426445100243220ustar00rootroot00000000000000CHECK TABLE some_table FOR UPGRADE; CHECK TABLE some_table1, some_table2 FOR UPGRADE; CHECK TABLE some_table QUICK; CHECK TABLE some_table1, some_table2 QUICK; CHECK TABLE some_table FAST; CHECK TABLE some_table1, some_table2 FAST; CHECK TABLE some_table MEDIUM; CHECK TABLE some_table1, some_table2 MEDIUM; CHECK TABLE some_table EXTENDED; CHECK TABLE some_table1, some_table2 EXTENDED; CHECK TABLE some_table CHANGED; CHECK TABLE some_table1, some_table2 CHANGED; CHECK TABLE some_table FAST QUICK; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/check_table.yml000066400000000000000000000065051503426445100243420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5db8cdf9f2d5bbf23fbb9a36401f9236af4d0b54707dcbbfaa0a41d12905720b file: - statement: check_table_statement: - keyword: CHECK - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: FOR - keyword: UPGRADE - statement_terminator: ; - statement: check_table_statement: - keyword: CHECK - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - keyword: FOR - keyword: UPGRADE - statement_terminator: ; - statement: check_table_statement: - keyword: CHECK - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: QUICK - statement_terminator: ; - statement: check_table_statement: - keyword: CHECK - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - keyword: QUICK - statement_terminator: ; - statement: check_table_statement: - keyword: CHECK - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: FAST - statement_terminator: ; - statement: check_table_statement: - keyword: CHECK - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - keyword: FAST - statement_terminator: ; - statement: check_table_statement: - keyword: CHECK - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: MEDIUM - statement_terminator: ; - statement: check_table_statement: - keyword: CHECK - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - keyword: MEDIUM - statement_terminator: ; - statement: check_table_statement: - keyword: CHECK - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: EXTENDED - statement_terminator: ; - statement: check_table_statement: - keyword: CHECK - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - keyword: EXTENDED - statement_terminator: ; - statement: check_table_statement: - keyword: CHECK - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: CHANGED - statement_terminator: ; - statement: check_table_statement: - keyword: CHECK - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - keyword: CHANGED - statement_terminator: ; - statement: check_table_statement: - keyword: CHECK - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: FAST - keyword: QUICK - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/checksum_table.sql000066400000000000000000000002511503426445100250550ustar00rootroot00000000000000CHECKSUM TABLE some_table QUICK; CHECKSUM TABLE some_table1, some_table2 QUICK; CHECKSUM TABLE some_table EXTENDED; CHECKSUM TABLE some_table1, some_table2 EXTENDED; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/checksum_table.yml000066400000000000000000000024721503426445100250660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 59a13f3bdf5aff9ad8d165d083d825c9d4897885a57ea598ca91a825143195fe file: - statement: checksum_table_statement: - keyword: CHECKSUM - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: QUICK - statement_terminator: ; - statement: checksum_table_statement: - keyword: CHECKSUM - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - keyword: QUICK - statement_terminator: ; - statement: checksum_table_statement: - keyword: CHECKSUM - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: EXTENDED - statement_terminator: ; - statement: checksum_table_statement: - keyword: CHECKSUM - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - keyword: EXTENDED - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/close.sql000066400000000000000000000000211503426445100232040ustar00rootroot00000000000000CLOSE curcursor; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/close.yml000066400000000000000000000010071503426445100232130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: be7b55beb5bbee604a34bad94304e061d9d9078a0755be0f543c4104ba563e86 file: statement: cursor_open_close_segment: keyword: CLOSE naked_identifier: curcursor statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/close_qualified.sql000066400000000000000000000000231503426445100252310ustar00rootroot00000000000000CLOSE `curcursor`; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/close_qualified.yml000066400000000000000000000010141503426445100252340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0ff3a6055061615df1ede7802e3b9da4ce61b68f2c3adb5ead2883c43efe7e5d file: statement: cursor_open_close_segment: keyword: CLOSE quoted_identifier: '`curcursor`' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/column_alias.sql000066400000000000000000000001541503426445100245540ustar00rootroot00000000000000SELECT 1 AS `one`; SELECT 2 AS 'two'; SELECT 3 AS "three"; SELECT 4 AS "four""_with_escaped_double_quotes"; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/column_alias.yml000066400000000000000000000030561503426445100245620ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7fe1e4deda95ada9f20c59d9a235144ef8343ae59730e55350693659095a0c3c file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: AS quoted_identifier: '`one`' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '2' alias_expression: alias_operator: keyword: AS quoted_identifier: "'two'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '3' alias_expression: alias_operator: keyword: AS quoted_identifier: '"three"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '4' alias_expression: alias_operator: keyword: AS quoted_identifier: '"four""_with_escaped_double_quotes"' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_database.sql000066400000000000000000000007511503426445100252000ustar00rootroot00000000000000CREATE DATABASE my_database; CREATE DATABASE IF NOT EXISTS my_database; CREATE DATABASE my_database DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci DEFAULT ENCRYPTION 'N'; CREATE DATABASE my_database DEFAULT CHARACTER SET = utf8mb4 COLLATE = utf8mb4_0900_ai_ci DEFAULT ENCRYPTION = 'N'; CREATE SCHEMA my_database DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci DEFAULT ENCRYPTION 'N'; CREATE DATABASE IF NOT EXISTS xxx CHARACTER SET "utf8mb4" COLLATE "utf8mb4_bin"; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_database.yml000066400000000000000000000061261503426445100252040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 28d48d2ecab94ea00fef8f5f3e64a0c0f392dab3b36b7d29548cfdd50bb2a0b2 file: - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: my_database - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: my_database - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: my_database - create_option_segment: - keyword: DEFAULT - keyword: CHARACTER - keyword: SET - naked_identifier: utf8mb4 - create_option_segment: keyword: COLLATE collation_reference: naked_identifier: utf8mb4_0900_ai_ci - create_option_segment: - keyword: DEFAULT - keyword: ENCRYPTION - quoted_literal: "'N'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: my_database - create_option_segment: - keyword: DEFAULT - keyword: CHARACTER - keyword: SET - comparison_operator: raw_comparison_operator: '=' - naked_identifier: utf8mb4 - create_option_segment: keyword: COLLATE comparison_operator: raw_comparison_operator: '=' collation_reference: naked_identifier: utf8mb4_0900_ai_ci - create_option_segment: - keyword: DEFAULT - keyword: ENCRYPTION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'N'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: SCHEMA - database_reference: naked_identifier: my_database - create_option_segment: - keyword: DEFAULT - keyword: CHARACTER - keyword: SET - naked_identifier: utf8mb4 - create_option_segment: keyword: COLLATE collation_reference: naked_identifier: utf8mb4_0900_ai_ci - create_option_segment: - keyword: DEFAULT - keyword: ENCRYPTION - quoted_literal: "'N'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: xxx - create_option_segment: - keyword: CHARACTER - keyword: SET - quoted_literal: '"utf8mb4"' - create_option_segment: keyword: COLLATE collation_reference: quoted_literal: '"utf8mb4_bin"' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_index.sql000066400000000000000000000011361503426445100245410ustar00rootroot00000000000000CREATE INDEX idx ON tbl (col); CREATE UNIQUE INDEX idx ON tbl (col); CREATE FULLTEXT INDEX idx ON tbl (col); CREATE SPATIAL INDEX idx ON tbl (col); CREATE INDEX idx USING BTREE ON tbl (col); CREATE INDEX idx USING HASH ON tbl (col); CREATE INDEX idx ON tbl (col ASC); CREATE INDEX idx ON tbl (col DESC); CREATE INDEX part_of_name ON customer (name(10)); CREATE INDEX idx ON tbl (col) ALGORITHM DEFAULT; CREATE INDEX idx ON tbl (col) ALGORITHM NOCOPY; CREATE INDEX idx ON tbl (col) ALGORITHM INSTANT; CREATE INDEX idx ON tbl (col) LOCK DEFAULT; CREATE INDEX idx ON tbl ((col1 + col2), (col1 - col2), col1); sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_index.yml000066400000000000000000000145701503426445100245510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5ba008b762b05319be63ea1b01262c62e054e8ba120801da8589dc396cd1fdd3 file: - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: idx - keyword: 'ON' - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: UNIQUE - keyword: INDEX - index_reference: naked_identifier: idx - keyword: 'ON' - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - index_reference: naked_identifier: idx - keyword: 'ON' - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: SPATIAL - keyword: INDEX - index_reference: naked_identifier: idx - keyword: 'ON' - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: idx - index_type: - keyword: USING - keyword: BTREE - keyword: 'ON' - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: idx - index_type: - keyword: USING - keyword: HASH - keyword: 'ON' - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: idx - keyword: 'ON' - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: col keyword: ASC end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: idx - keyword: 'ON' - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: col keyword: DESC end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: part_of_name - keyword: 'ON' - table_reference: naked_identifier: customer - bracketed: start_bracket: ( column_reference: naked_identifier: name bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: idx - keyword: 'ON' - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) - keyword: ALGORITHM - keyword: DEFAULT - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: idx - keyword: 'ON' - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) - keyword: ALGORITHM - keyword: NOCOPY - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: idx - keyword: 'ON' - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) - keyword: ALGORITHM - keyword: INSTANT - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: idx - keyword: 'ON' - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) - keyword: LOCK - keyword: DEFAULT - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: idx - keyword: 'ON' - table_reference: naked_identifier: tbl - bracketed: - start_bracket: ( - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: col1 - binary_operator: + - column_reference: naked_identifier: col2 end_bracket: ) - comma: ',' - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: col1 - binary_operator: '-' - column_reference: naked_identifier: col2 end_bracket: ) - comma: ',' - column_reference: naked_identifier: col1 - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_or_replace_table.sql000066400000000000000000000010631503426445100267130ustar00rootroot00000000000000CREATE OR REPLACE TABLE `db_name`.tbl_name AS SELECT * FROM tbl; CREATE OR REPLACE TABLE `db_name`.tbl_name SELECT * FROM tbl; CREATE OR REPLACE TABLE tbl_name AS SELECT * FROM tbl; CREATE OR REPLACE TABLE tbl_name SELECT * FROM tbl; CREATE OR REPLACE TABLE IF NOT EXISTS `db_name`.tbl_name AS SELECT * FROM tbl; CREATE OR REPLACE TABLE IF NOT EXISTS `db_name`.tbl_name SELECT * FROM tbl; CREATE OR REPLACE TABLE IF NOT EXISTS tbl_name AS SELECT * FROM tbl; CREATE OR REPLACE TABLE IF NOT EXISTS tbl_name SELECT * FROM tbl; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_or_replace_table.yml000066400000000000000000000134161503426445100267220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8faaa71decf610d6943ce530b13f80bf702cdec7d0bc1ef1a46d999de27ec28a file: - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - table_reference: quoted_identifier: '`db_name`' dot: . naked_identifier: tbl_name - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - table_reference: quoted_identifier: '`db_name`' dot: . naked_identifier: tbl_name - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - table_reference: naked_identifier: tbl_name - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - table_reference: naked_identifier: tbl_name - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: quoted_identifier: '`db_name`' dot: . naked_identifier: tbl_name - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: quoted_identifier: '`db_name`' dot: . naked_identifier: tbl_name - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: tbl_name - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: tbl_name - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_or_replace_temporary_table.sql000066400000000000000000000015021503426445100310130ustar00rootroot00000000000000CREATE OR REPLACE TEMPORARY TABLE `db_name`.tbl_name AS SELECT * FROM tbl; CREATE OR REPLACE TEMPORARY TABLE `db_name`.tbl_name SELECT * FROM tbl; CREATE OR REPLACE TEMPORARY TABLE tbl_name AS SELECT * FROM tbl; CREATE OR REPLACE TEMPORARY TABLE tbl_name SELECT * FROM tbl; CREATE OR REPLACE TEMPORARY TABLE IF NOT EXISTS `db_name`.tbl_name AS SELECT * FROM tbl; CREATE OR REPLACE TEMPORARY TABLE IF NOT EXISTS `db_name`.tbl_name SELECT * FROM tbl; CREATE OR REPLACE TEMPORARY TABLE IF NOT EXISTS tbl_name AS SELECT * FROM tbl; CREATE OR REPLACE TEMPORARY TABLE IF NOT EXISTS tbl_name SELECT * FROM tbl; -- -- CREATE OR REPLACE TEMPORARY TABLE `db_name`.tbl_name ENGINE=MyISAM AS -- SELECT * FROM tbl; -- -- CREATE OR REPLACE TEMPORARY TABLE tbl_name ENGINE=InnoDB -- SELECT * FROM tbl; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_or_replace_temporary_table.yml000066400000000000000000000137261503426445100310300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 866f29089e98eb407c08c382cc3cc56d51c361e06a7f596aa6f6b0c0c10fccfd file: - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TEMPORARY - keyword: TABLE - table_reference: quoted_identifier: '`db_name`' dot: . naked_identifier: tbl_name - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TEMPORARY - keyword: TABLE - table_reference: quoted_identifier: '`db_name`' dot: . naked_identifier: tbl_name - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: tbl_name - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: tbl_name - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TEMPORARY - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: quoted_identifier: '`db_name`' dot: . naked_identifier: tbl_name - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TEMPORARY - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: quoted_identifier: '`db_name`' dot: . naked_identifier: tbl_name - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TEMPORARY - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: tbl_name - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TEMPORARY - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: tbl_name - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_role.sql000066400000000000000000000000521503426445100243670ustar00rootroot00000000000000CREATE ROLE IF NOT EXISTS 'example-role'; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_role.yml000066400000000000000000000011631503426445100243750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3ab69fdfaf7a2669b62e188941317fb7f7d67caea7c4cf122dbe07fd4001e936 file: statement: create_role_statement: - keyword: CREATE - keyword: ROLE - keyword: IF - keyword: NOT - keyword: EXISTS - role_reference: quoted_identifier: "'example-role'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_table.sql000066400000000000000000000020351503426445100245200ustar00rootroot00000000000000CREATE TABLE `foo` ( b VARCHAR(255) BINARY, `id` int(11) unsigned NOT NULL AUTO_INCREMENT, PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARACTER SET=utf8mb4 COLLATE=utf8mb4_unicode_ci; CREATE TABLE `foo` ( b VARCHAR(255) BINARY, `id` int(11) unsigned NOT NULL AUTO_INCREMENT, PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=`utf8mb4` COLLATE=`utf8mb4_unicode_ci`; CREATE TABLE `foo` ( b VARCHAR(255) BINARY, `id` int(11) unsigned NOT NULL AUTO_INCREMENT, PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET='utf8mb4' COLLATE='utf8mb4_unicode_ci'; CREATE TABLE `foo` ( b VARCHAR(255) BINARY, `id` int(11) unsigned NOT NULL AUTO_INCREMENT, PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET="utf8mb4" COLLATE="utf8mb4_unicode_ci"; create table `tickets` ( `id` serial primary key, `material_number` varchar(255) default null, `material_name` varchar(255) default null, `date_created` date not null default (current_date), `date_closed` date default null ); create table _ (a int); create table y LIKE x; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_table.yml000066400000000000000000000226111503426445100245240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3e08bc7a73f03cacf29b609751f0a8e855668f158467d7b90fe72ac5c1ec0368 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: quoted_identifier: '`foo`' - bracketed: - start_bracket: ( - column_definition: naked_identifier: b data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '255' end_bracket: ) keyword: BINARY - comma: ',' - column_definition: - quoted_identifier: '`id`' - data_type: data_type_identifier: int bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '11' end_bracket: ) keyword: unsigned - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: AUTO_INCREMENT - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: quoted_identifier: '`id`' end_bracket: ) - end_bracket: ) - parameter: ENGINE - comparison_operator: raw_comparison_operator: '=' - parameter: InnoDB - keyword: DEFAULT - keyword: CHARACTER - keyword: SET - comparison_operator: raw_comparison_operator: '=' - parameter: utf8mb4 - parameter: COLLATE - comparison_operator: raw_comparison_operator: '=' - parameter: utf8mb4_unicode_ci - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: quoted_identifier: '`foo`' - bracketed: - start_bracket: ( - column_definition: naked_identifier: b data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '255' end_bracket: ) keyword: BINARY - comma: ',' - column_definition: - quoted_identifier: '`id`' - data_type: data_type_identifier: int bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '11' end_bracket: ) keyword: unsigned - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: AUTO_INCREMENT - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: quoted_identifier: '`id`' end_bracket: ) - end_bracket: ) - parameter: ENGINE - comparison_operator: raw_comparison_operator: '=' - parameter: InnoDB - keyword: DEFAULT - parameter: CHARSET - comparison_operator: raw_comparison_operator: '=' - parameter: '`utf8mb4`' - parameter: COLLATE - comparison_operator: raw_comparison_operator: '=' - parameter: '`utf8mb4_unicode_ci`' - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: quoted_identifier: '`foo`' - bracketed: - start_bracket: ( - column_definition: naked_identifier: b data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '255' end_bracket: ) keyword: BINARY - comma: ',' - column_definition: - quoted_identifier: '`id`' - data_type: data_type_identifier: int bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '11' end_bracket: ) keyword: unsigned - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: AUTO_INCREMENT - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: quoted_identifier: '`id`' end_bracket: ) - end_bracket: ) - parameter: ENGINE - comparison_operator: raw_comparison_operator: '=' - parameter: InnoDB - keyword: DEFAULT - parameter: CHARSET - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'utf8mb4'" - parameter: COLLATE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'utf8mb4_unicode_ci'" - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: quoted_identifier: '`foo`' - bracketed: - start_bracket: ( - column_definition: naked_identifier: b data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '255' end_bracket: ) keyword: BINARY - comma: ',' - column_definition: - quoted_identifier: '`id`' - data_type: data_type_identifier: int bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '11' end_bracket: ) keyword: unsigned - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: AUTO_INCREMENT - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: quoted_identifier: '`id`' end_bracket: ) - end_bracket: ) - parameter: ENGINE - comparison_operator: raw_comparison_operator: '=' - parameter: InnoDB - keyword: DEFAULT - parameter: CHARSET - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"utf8mb4"' - parameter: COLLATE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"utf8mb4_unicode_ci"' - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: quoted_identifier: '`tickets`' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '`id`' data_type: data_type_identifier: serial column_constraint_segment: - keyword: primary - keyword: key - comma: ',' - column_definition: quoted_identifier: '`material_number`' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '255' end_bracket: ) column_constraint_segment: keyword: default null_literal: 'null' - comma: ',' - column_definition: quoted_identifier: '`material_name`' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '255' end_bracket: ) column_constraint_segment: keyword: default null_literal: 'null' - comma: ',' - column_definition: - quoted_identifier: '`date_created`' - data_type: data_type_identifier: date - column_constraint_segment: - keyword: not - keyword: 'null' - column_constraint_segment: keyword: default bracketed: start_bracket: ( bare_function: current_date end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '`date_closed`' data_type: data_type_identifier: date column_constraint_segment: keyword: default null_literal: 'null' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: _ - bracketed: start_bracket: ( column_definition: naked_identifier: a data_type: data_type_identifier: int end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: y - keyword: LIKE - table_reference: naked_identifier: x - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_table_column_charset.sql000066400000000000000000000006371503426445100276140ustar00rootroot00000000000000CREATE TABLE t1 ( col1 VARCHAR(5) CHARACTER SET latin1 COLLATE latin1_german1_ci ); CREATE TABLE t1 ( col1 VARCHAR(5) CHARACTER SET `latin1` COLLATE `latin1_german1_ci` ); CREATE TABLE t1 ( col1 VARCHAR(5) CHARACTER SET 'latin1' COLLATE 'latin1_german1_ci' ); CREATE TABLE t1 ( col1 VARCHAR(5) CHARACTER SET "latin1" COLLATE "latin1_german1_ci" ); sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_table_column_charset.yml000066400000000000000000000067621503426445100276230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3d383e37b65631c852af5deffd0bdde1132fbaef0e98b247fd01bbc2cece2e8c file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: start_bracket: ( column_definition: - naked_identifier: col1 - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - column_constraint_segment: - keyword: CHARACTER - keyword: SET - naked_identifier: latin1 - column_constraint_segment: keyword: COLLATE collation_reference: naked_identifier: latin1_german1_ci end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: start_bracket: ( column_definition: - naked_identifier: col1 - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - column_constraint_segment: - keyword: CHARACTER - keyword: SET - quoted_identifier: '`latin1`' - column_constraint_segment: keyword: COLLATE collation_reference: quoted_identifier: '`latin1_german1_ci`' end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: start_bracket: ( column_definition: - naked_identifier: col1 - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - column_constraint_segment: - keyword: CHARACTER - keyword: SET - quoted_identifier: "'latin1'" - column_constraint_segment: keyword: COLLATE collation_reference: quoted_literal: "'latin1_german1_ci'" end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: start_bracket: ( column_definition: - naked_identifier: col1 - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - column_constraint_segment: - keyword: CHARACTER - keyword: SET - quoted_identifier: '"latin1"' - column_constraint_segment: keyword: COLLATE collation_reference: quoted_literal: '"latin1_german1_ci"' end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_table_constraint_unique.sql000066400000000000000000000002501503426445100303470ustar00rootroot00000000000000CREATE TABLE a( a INT NOT NULL, UNIQUE (a), UNIQUE idx_c(a), UNIQUE KEY (a), UNIQUE KEY idx_a(a), UNIQUE INDEX (a), UNIQUE INDEX idx_b(a) ) sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_table_constraint_unique.yml000066400000000000000000000044651503426445100303650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: dc9e42ca7dac29e96e7c93596f533e5ccef4a0db1fe35bb1f46c8aaff86b57f5 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: a - bracketed: - start_bracket: ( - column_definition: naked_identifier: a data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - table_constraint: keyword: UNIQUE bracketed: start_bracket: ( column_reference: naked_identifier: a end_bracket: ) - comma: ',' - table_constraint: keyword: UNIQUE index_reference: naked_identifier: idx_c bracketed: start_bracket: ( column_reference: naked_identifier: a end_bracket: ) - comma: ',' - table_constraint: - keyword: UNIQUE - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: a end_bracket: ) - comma: ',' - table_constraint: - keyword: UNIQUE - keyword: KEY - index_reference: naked_identifier: idx_a - bracketed: start_bracket: ( column_reference: naked_identifier: a end_bracket: ) - comma: ',' - table_constraint: - keyword: UNIQUE - keyword: INDEX - bracketed: start_bracket: ( column_reference: naked_identifier: a end_bracket: ) - comma: ',' - table_constraint: - keyword: UNIQUE - keyword: INDEX - index_reference: naked_identifier: idx_b - bracketed: start_bracket: ( column_reference: naked_identifier: a end_bracket: ) - end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_table_datetime.sql000066400000000000000000000023771503426445100264050ustar00rootroot00000000000000CREATE TABLE `foo` ( created_date DATETIME DEFAULT CURRENT_TIMESTAMP NOT NULL, ts1 TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, dt1 DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, ts2 TIMESTAMP DEFAULT CURRENT_TIMESTAMP, dt2 DATETIME DEFAULT CURRENT_TIMESTAMP, ts3 TIMESTAMP DEFAULT 0, dt3 DATETIME DEFAULT 0, ts4 TIMESTAMP DEFAULT 0 ON UPDATE CURRENT_TIMESTAMP, dt4 DATETIME DEFAULT 0 ON UPDATE CURRENT_TIMESTAMP, ts5 TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, -- default 0 ts6 TIMESTAMP NULL ON UPDATE CURRENT_TIMESTAMP, -- default NULL dt5 DATETIME ON UPDATE CURRENT_TIMESTAMP, -- default NULL dt6 DATETIME NOT NULL ON UPDATE CURRENT_TIMESTAMP, -- default 0 ts7 TIMESTAMP(6) DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6), ts8 TIMESTAMP NULL DEFAULT NULL, ts9 TIMESTAMP NULL DEFAULT 0, ts10 TIMESTAMP NULL DEFAULT CURRENT_TIMESTAMP, ts11 TIMESTAMP NULL DEFAULT CURRENT_TIMESTAMP(), ts12 TIMESTAMP NULL DEFAULT '0000-00-00 00:00:00', ts13 TIMESTAMP NULL DEFAULT NOW ON UPDATE NOW, ts14 TIMESTAMP NULL DEFAULT NOW() ON UPDATE NOW(), ts15 TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, ts16 TIMESTAMP NULL DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP ) sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_table_datetime.yml000066400000000000000000000142151503426445100264010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 04711fd76a3d8efbe053513b9a50ce0e2aaf4af87a96e105f521c58ca30810c6 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: quoted_identifier: '`foo`' - bracketed: - start_bracket: ( - column_definition: - naked_identifier: created_date - keyword: DATETIME - keyword: DEFAULT - keyword: CURRENT_TIMESTAMP - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: - naked_identifier: ts1 - keyword: TIMESTAMP - keyword: DEFAULT - keyword: CURRENT_TIMESTAMP - keyword: 'ON' - keyword: UPDATE - keyword: CURRENT_TIMESTAMP - comma: ',' - column_definition: - naked_identifier: dt1 - keyword: DATETIME - keyword: DEFAULT - keyword: CURRENT_TIMESTAMP - keyword: 'ON' - keyword: UPDATE - keyword: CURRENT_TIMESTAMP - comma: ',' - column_definition: - naked_identifier: ts2 - keyword: TIMESTAMP - keyword: DEFAULT - keyword: CURRENT_TIMESTAMP - comma: ',' - column_definition: - naked_identifier: dt2 - keyword: DATETIME - keyword: DEFAULT - keyword: CURRENT_TIMESTAMP - comma: ',' - column_definition: - naked_identifier: ts3 - keyword: TIMESTAMP - keyword: DEFAULT - numeric_literal: '0' - comma: ',' - column_definition: - naked_identifier: dt3 - keyword: DATETIME - keyword: DEFAULT - numeric_literal: '0' - comma: ',' - column_definition: - naked_identifier: ts4 - keyword: TIMESTAMP - keyword: DEFAULT - numeric_literal: '0' - keyword: 'ON' - keyword: UPDATE - keyword: CURRENT_TIMESTAMP - comma: ',' - column_definition: - naked_identifier: dt4 - keyword: DATETIME - keyword: DEFAULT - numeric_literal: '0' - keyword: 'ON' - keyword: UPDATE - keyword: CURRENT_TIMESTAMP - comma: ',' - column_definition: - naked_identifier: ts5 - keyword: TIMESTAMP - keyword: 'ON' - keyword: UPDATE - keyword: CURRENT_TIMESTAMP - comma: ',' - column_definition: - naked_identifier: ts6 - keyword: TIMESTAMP - keyword: 'NULL' - keyword: 'ON' - keyword: UPDATE - keyword: CURRENT_TIMESTAMP - comma: ',' - column_definition: - naked_identifier: dt5 - keyword: DATETIME - keyword: 'ON' - keyword: UPDATE - keyword: CURRENT_TIMESTAMP - comma: ',' - column_definition: - naked_identifier: dt6 - keyword: DATETIME - keyword: NOT - keyword: 'NULL' - keyword: 'ON' - keyword: UPDATE - keyword: CURRENT_TIMESTAMP - comma: ',' - column_definition: - naked_identifier: ts7 - keyword: TIMESTAMP - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) - keyword: DEFAULT - keyword: CURRENT_TIMESTAMP - bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) - keyword: 'ON' - keyword: UPDATE - keyword: CURRENT_TIMESTAMP - bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) - comma: ',' - column_definition: - naked_identifier: ts8 - keyword: TIMESTAMP - keyword: 'NULL' - keyword: DEFAULT - keyword: 'NULL' - comma: ',' - column_definition: - naked_identifier: ts9 - keyword: TIMESTAMP - keyword: 'NULL' - keyword: DEFAULT - numeric_literal: '0' - comma: ',' - column_definition: - naked_identifier: ts10 - keyword: TIMESTAMP - keyword: 'NULL' - keyword: DEFAULT - keyword: CURRENT_TIMESTAMP - comma: ',' - column_definition: - naked_identifier: ts11 - keyword: TIMESTAMP - keyword: 'NULL' - keyword: DEFAULT - keyword: CURRENT_TIMESTAMP - bracketed: start_bracket: ( end_bracket: ) - comma: ',' - column_definition: - naked_identifier: ts12 - keyword: TIMESTAMP - keyword: 'NULL' - keyword: DEFAULT - quoted_literal: "'0000-00-00 00:00:00'" - comma: ',' - column_definition: - naked_identifier: ts13 - keyword: TIMESTAMP - keyword: 'NULL' - keyword: DEFAULT - keyword: NOW - keyword: 'ON' - keyword: UPDATE - keyword: NOW - comma: ',' - column_definition: - naked_identifier: ts14 - keyword: TIMESTAMP - keyword: 'NULL' - keyword: DEFAULT - keyword: NOW - bracketed: start_bracket: ( end_bracket: ) - keyword: 'ON' - keyword: UPDATE - keyword: NOW - bracketed: start_bracket: ( end_bracket: ) - comma: ',' - column_definition: - naked_identifier: ts15 - keyword: TIMESTAMP - keyword: NOT - keyword: 'NULL' - keyword: DEFAULT - keyword: CURRENT_TIMESTAMP - keyword: 'ON' - keyword: UPDATE - keyword: CURRENT_TIMESTAMP - comma: ',' - column_definition: - naked_identifier: ts16 - keyword: TIMESTAMP - keyword: 'NULL' - keyword: DEFAULT - keyword: 'NULL' - keyword: 'ON' - keyword: UPDATE - keyword: CURRENT_TIMESTAMP - end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_table_equals_optional.sql000066400000000000000000000000671503426445100300020ustar00rootroot00000000000000CREATE TABLE a ( id INT ) COLLATE utf8_general_ci; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_table_equals_optional.yml000066400000000000000000000014331503426445100300020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 58a605d729c2c4dda00b1d884f06b88b929780984ff660f716d2173b480119dc file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: a - bracketed: start_bracket: ( column_definition: naked_identifier: id data_type: data_type_identifier: INT end_bracket: ) - parameter: COLLATE - parameter: utf8_general_ci statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_table_generated_column.sql000066400000000000000000000012231503426445100301110ustar00rootroot00000000000000CREATE TABLE t1 ( a INT, b INT, c TEXT, d INT GENERATED ALWAYS AS (a*abs(b)) VIRTUAL, e TEXT GENERATED ALWAYS AS (substr(c,b,b+1)) STORED, PRIMARY KEY (a) ); CREATE TABLE t1 ( a INT, b INT, c TEXT, d INT GENERATED ALWAYS AS (a*abs(b)) VIRTUAL, e TEXT GENERATED ALWAYS AS (substr(c,b,b+1)) PERSISTENT, PRIMARY KEY (a) ); CREATE TABLE t1 ( a INT, b INT, c TEXT, d INT AS (a*abs(b)), e TEXT AS (substr(c,b,b+1)) STORED COMMENT 'foo', PRIMARY KEY (a) ); CREATE TABLE t1 ( a INT, b INT, c TEXT, d INT AS (a*abs(b)), e TEXT AS (substr(c,b,b+1)) PERSISTENT COMMENT 'foo', PRIMARY KEY (a) ); sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_table_generated_column.yml000066400000000000000000000272701503426445100301250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d20c4b39ccdb4cf19f45ab9d8ebd9df1fe1532519f3c5a9bab593861f1d59c23 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: a data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: b data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: c data_type: data_type_identifier: TEXT - comma: ',' - column_definition: naked_identifier: d data_type: data_type_identifier: INT column_constraint_segment: - keyword: GENERATED - keyword: ALWAYS - keyword: AS - bracketed: start_bracket: ( expression: column_reference: naked_identifier: a binary_operator: '*' function: function_name: function_name_identifier: abs function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: b end_bracket: ) end_bracket: ) - keyword: VIRTUAL - comma: ',' - column_definition: naked_identifier: e data_type: data_type_identifier: TEXT column_constraint_segment: - keyword: GENERATED - keyword: ALWAYS - keyword: AS - bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: substr function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: c - comma: ',' - expression: column_reference: naked_identifier: b - comma: ',' - expression: column_reference: naked_identifier: b binary_operator: + numeric_literal: '1' - end_bracket: ) end_bracket: ) - keyword: STORED - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: a end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: a data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: b data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: c data_type: data_type_identifier: TEXT - comma: ',' - column_definition: naked_identifier: d data_type: data_type_identifier: INT column_constraint_segment: - keyword: GENERATED - keyword: ALWAYS - keyword: AS - bracketed: start_bracket: ( expression: column_reference: naked_identifier: a binary_operator: '*' function: function_name: function_name_identifier: abs function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: b end_bracket: ) end_bracket: ) - keyword: VIRTUAL - comma: ',' - column_definition: naked_identifier: e data_type: data_type_identifier: TEXT column_constraint_segment: - keyword: GENERATED - keyword: ALWAYS - keyword: AS - bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: substr function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: c - comma: ',' - expression: column_reference: naked_identifier: b - comma: ',' - expression: column_reference: naked_identifier: b binary_operator: + numeric_literal: '1' - end_bracket: ) end_bracket: ) - keyword: PERSISTENT - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: a end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: a data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: b data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: c data_type: data_type_identifier: TEXT - comma: ',' - column_definition: naked_identifier: d data_type: data_type_identifier: INT column_constraint_segment: keyword: AS bracketed: start_bracket: ( expression: column_reference: naked_identifier: a binary_operator: '*' function: function_name: function_name_identifier: abs function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: b end_bracket: ) end_bracket: ) - comma: ',' - column_definition: - naked_identifier: e - data_type: data_type_identifier: TEXT - column_constraint_segment: - keyword: AS - bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: substr function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: c - comma: ',' - expression: column_reference: naked_identifier: b - comma: ',' - expression: column_reference: naked_identifier: b binary_operator: + numeric_literal: '1' - end_bracket: ) end_bracket: ) - keyword: STORED - column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: "'foo'" - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: a end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: a data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: b data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: c data_type: data_type_identifier: TEXT - comma: ',' - column_definition: naked_identifier: d data_type: data_type_identifier: INT column_constraint_segment: keyword: AS bracketed: start_bracket: ( expression: column_reference: naked_identifier: a binary_operator: '*' function: function_name: function_name_identifier: abs function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: b end_bracket: ) end_bracket: ) - comma: ',' - column_definition: - naked_identifier: e - data_type: data_type_identifier: TEXT - column_constraint_segment: - keyword: AS - bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: substr function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: c - comma: ',' - expression: column_reference: naked_identifier: b - comma: ',' - expression: column_reference: naked_identifier: b binary_operator: + numeric_literal: '1' - end_bracket: ) end_bracket: ) - keyword: PERSISTENT - column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: "'foo'" - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: a end_bracket: ) - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_table_index.sql000066400000000000000000000005171503426445100257120ustar00rootroot00000000000000CREATE TABLE foo ( id INT UNSIGNED AUTO_INCREMENT NOT NULL, a TEXT(500), b INT, c INT, PRIMARY KEY (id) COMMENT 'primary key (id)', FULLTEXT `idx_a` (a) COMMENT 'index (a)', INDEX `idx_prefix_a` (a(20)), INDEX `idx_b` (b) COMMENT 'index (b)', INDEX `idx_desc_b` (b DESC), INDEX `idx_asc_c` (c ASC) ) ENGINE=InnoDB; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_table_index.yml000066400000000000000000000073541503426445100257220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 99426d9a1d7e06af48cd81e5c09abd4f275ecd5dec99e2f398195bd08c14f018 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_definition: - naked_identifier: id - data_type: data_type_identifier: INT keyword: UNSIGNED - column_constraint_segment: keyword: AUTO_INCREMENT - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: a data_type: data_type_identifier: TEXT bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '500' end_bracket: ) - comma: ',' - column_definition: naked_identifier: b data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: c data_type: data_type_identifier: INT - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - index_option: comment_clause: keyword: COMMENT quoted_literal: "'primary key (id)'" - comma: ',' - table_constraint: keyword: FULLTEXT index_reference: quoted_identifier: '`idx_a`' bracketed: start_bracket: ( column_reference: naked_identifier: a end_bracket: ) index_option: comment_clause: keyword: COMMENT quoted_literal: "'index (a)'" - comma: ',' - table_constraint: keyword: INDEX index_reference: quoted_identifier: '`idx_prefix_a`' bracketed: start_bracket: ( column_reference: naked_identifier: a bracketed: start_bracket: ( numeric_literal: '20' end_bracket: ) end_bracket: ) - comma: ',' - table_constraint: keyword: INDEX index_reference: quoted_identifier: '`idx_b`' bracketed: start_bracket: ( column_reference: naked_identifier: b end_bracket: ) index_option: comment_clause: keyword: COMMENT quoted_literal: "'index (b)'" - comma: ',' - table_constraint: keyword: INDEX index_reference: quoted_identifier: '`idx_desc_b`' bracketed: start_bracket: ( column_reference: naked_identifier: b keyword: DESC end_bracket: ) - comma: ',' - table_constraint: keyword: INDEX index_reference: quoted_identifier: '`idx_asc_c`' bracketed: start_bracket: ( column_reference: naked_identifier: c keyword: ASC end_bracket: ) - end_bracket: ) - parameter: ENGINE - comparison_operator: raw_comparison_operator: '=' - parameter: InnoDB statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_table_null_position.sql000066400000000000000000000005431503426445100275000ustar00rootroot00000000000000CREATE TABLE IF NOT EXISTS db_name.table_name ( updated_at1 timestamp default CURRENT_TIMESTAMP not null on update CURRENT_TIMESTAMP, updated_at2 timestamp not null default CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP, updated_at3 timestamp default CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP not null, updated_at4 timestamp ); sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_table_null_position.yml000066400000000000000000000033621503426445100275040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 15f782abb1331d16b725b8b668192875e574c8c085ef79377398499dd95efbf1 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: - naked_identifier: db_name - dot: . - naked_identifier: table_name - bracketed: - start_bracket: ( - column_definition: - naked_identifier: updated_at1 - keyword: timestamp - keyword: default - keyword: CURRENT_TIMESTAMP - keyword: not - keyword: 'null' - keyword: 'on' - keyword: update - keyword: CURRENT_TIMESTAMP - comma: ',' - column_definition: - naked_identifier: updated_at2 - keyword: timestamp - keyword: not - keyword: 'null' - keyword: default - keyword: CURRENT_TIMESTAMP - keyword: 'on' - keyword: update - keyword: CURRENT_TIMESTAMP - comma: ',' - column_definition: - naked_identifier: updated_at3 - keyword: timestamp - keyword: default - keyword: CURRENT_TIMESTAMP - keyword: 'on' - keyword: update - keyword: CURRENT_TIMESTAMP - keyword: not - keyword: 'null' - comma: ',' - column_definition: naked_identifier: updated_at4 keyword: timestamp - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_table_options.sql000066400000000000000000000026541503426445100263020ustar00rootroot00000000000000CREATE TABLE t1 ( year_col INT, some_data INT ) PARTITION BY RANGE (year_col) ( PARTITION p0 VALUES LESS THAN (1991), PARTITION p1 VALUES LESS THAN (1995), PARTITION p2 VALUES LESS THAN (1999), PARTITION p3 VALUES LESS THAN (2002), PARTITION p4 VALUES LESS THAN (2006), PARTITION p5 VALUES LESS THAN MAXVALUE ); CREATE TABLE tbl_name ( id INT PRIMARY KEY AUTO_INCREMENT, c DATETIME NOT NULL, INDEX(col) ) ENGINE = InnoDB AUTO_INCREMENT = 1 AVG_ROW_LENGTH = 10 DEFAULT CHARACTER SET = utf8_unicode_ci CHECKSUM = 1 DEFAULT COLLATE = utf8mb4_unicode_ci COMMENT = 'comment' CONNECTION = 'connection_string' DATA_DIRECTORY = 'path/to/dir' DELAY_KEY_WRITE = 0 ENCRYPTED = NO ENCRYPTION_KEY_ID = 1234 IETF_QUOTES = YES INDEX DIRECTORY = 'path/to/dir' INSERT_METHOD = LAST KEY_BLOCK_SIZE = 1024 MAX_ROWS = 100000 MIN_ROWS = 1 PACK_KEYS = 1 PAGE_CHECKSUM = 1 PAGE_COMPRESSED = 0 PAGE_COMPRESSION_LEVEL = 9 PASSWORD = 'password' ROW_FORMAT = DYNAMIC SEQUENCE = 1 STATS_AUTO_RECALC = 1 STATS_PERSISTENT = 1 STATS_SAMPLE_PAGES = 4 TABLESPACE tablespace_name TRANSACTIONAL = 1 UNION = (tbl1,tbl2) WITH SYSTEM VERSIONING PARTITION BY LINEAR HASH(YEAR(c)) PARTITIONS 1 SUBPARTITION BY KEY(id, c) SUBPARTITIONS 1 PARTITION first_name VALUES LESS THAN 50 ENGINE = InnoDB COMMENT='comment' ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_table_options.yml000066400000000000000000000216341503426445100263030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6f13045ccbc00225038a5fa145915f080a65c52eeece14812367dd3fbfaa7015 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: year_col data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: some_data data_type: data_type_identifier: INT - end_bracket: ) - keyword: PARTITION - keyword: BY - keyword: RANGE - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: year_col end_bracket: ) - bracketed: - start_bracket: ( - keyword: PARTITION - column_reference: naked_identifier: p0 - keyword: VALUES - keyword: LESS - keyword: THAN - bracketed: start_bracket: ( expression: numeric_literal: '1991' end_bracket: ) - comma: ',' - keyword: PARTITION - column_reference: naked_identifier: p1 - keyword: VALUES - keyword: LESS - keyword: THAN - bracketed: start_bracket: ( expression: numeric_literal: '1995' end_bracket: ) - comma: ',' - keyword: PARTITION - column_reference: naked_identifier: p2 - keyword: VALUES - keyword: LESS - keyword: THAN - bracketed: start_bracket: ( expression: numeric_literal: '1999' end_bracket: ) - comma: ',' - keyword: PARTITION - column_reference: naked_identifier: p3 - keyword: VALUES - keyword: LESS - keyword: THAN - bracketed: start_bracket: ( expression: numeric_literal: '2002' end_bracket: ) - comma: ',' - keyword: PARTITION - column_reference: naked_identifier: p4 - keyword: VALUES - keyword: LESS - keyword: THAN - bracketed: start_bracket: ( expression: numeric_literal: '2006' end_bracket: ) - comma: ',' - keyword: PARTITION - column_reference: naked_identifier: p5 - keyword: VALUES - keyword: LESS - keyword: THAN - keyword: MAXVALUE - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: tbl_name - bracketed: - start_bracket: ( - column_definition: - naked_identifier: id - data_type: data_type_identifier: INT - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - column_constraint_segment: keyword: AUTO_INCREMENT - comma: ',' - column_definition: - naked_identifier: c - keyword: DATETIME - keyword: NOT - keyword: 'NULL' - comma: ',' - table_constraint: keyword: INDEX bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) - end_bracket: ) - parameter: ENGINE - comparison_operator: raw_comparison_operator: '=' - parameter: InnoDB - parameter: AUTO_INCREMENT - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - parameter: AVG_ROW_LENGTH - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '10' - keyword: DEFAULT - keyword: CHARACTER - keyword: SET - comparison_operator: raw_comparison_operator: '=' - parameter: utf8_unicode_ci - parameter: CHECKSUM - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - keyword: DEFAULT - parameter: COLLATE - comparison_operator: raw_comparison_operator: '=' - parameter: utf8mb4_unicode_ci - parameter: COMMENT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'comment'" - parameter: CONNECTION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'connection_string'" - parameter: DATA_DIRECTORY - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'path/to/dir'" - parameter: DELAY_KEY_WRITE - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '0' - parameter: ENCRYPTED - comparison_operator: raw_comparison_operator: '=' - parameter: 'NO' - parameter: ENCRYPTION_KEY_ID - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1234' - parameter: IETF_QUOTES - comparison_operator: raw_comparison_operator: '=' - parameter: 'YES' - keyword: INDEX - keyword: DIRECTORY - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'path/to/dir'" - parameter: INSERT_METHOD - comparison_operator: raw_comparison_operator: '=' - parameter: LAST - parameter: KEY_BLOCK_SIZE - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1024' - parameter: MAX_ROWS - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '100000' - parameter: MIN_ROWS - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - parameter: PACK_KEYS - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - parameter: PAGE_CHECKSUM - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - parameter: PAGE_COMPRESSED - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '0' - parameter: PAGE_COMPRESSION_LEVEL - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '9' - parameter: PASSWORD - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'password'" - parameter: ROW_FORMAT - comparison_operator: raw_comparison_operator: '=' - parameter: DYNAMIC - parameter: SEQUENCE - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - parameter: STATS_AUTO_RECALC - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - parameter: STATS_PERSISTENT - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - parameter: STATS_SAMPLE_PAGES - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '4' - parameter: TABLESPACE - parameter: tablespace_name - parameter: TRANSACTIONAL - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - parameter: UNION - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - table_reference: naked_identifier: tbl1 - comma: ',' - table_reference: naked_identifier: tbl2 - end_bracket: ) - keyword: WITH - keyword: SYSTEM - parameter: VERSIONING - keyword: PARTITION - keyword: BY - keyword: LINEAR - keyword: HASH - expression: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: YEAR function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: c end_bracket: ) end_bracket: ) - keyword: PARTITIONS - numeric_literal: '1' - parameter: SUBPARTITION - parameter: BY - parameter: KEY - bracketed: - start_bracket: ( - table_reference: naked_identifier: id - comma: ',' - table_reference: naked_identifier: c - end_bracket: ) - parameter: SUBPARTITIONS - numeric_literal: '1' - parameter: PARTITION - parameter: first_name - parameter: VALUES - parameter: LESS - parameter: THAN - numeric_literal: '50' - parameter: ENGINE - comparison_operator: raw_comparison_operator: '=' - parameter: InnoDB - parameter: COMMENT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'comment'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_table_primary_foreign_keys.sql000066400000000000000000000021461503426445100310320ustar00rootroot00000000000000CREATE TABLE parent ( id INT NOT NULL, PRIMARY KEY (id) ); CREATE TABLE child ( id INT, parent_id INT, INDEX par_ind (parent_id), FOREIGN KEY (parent_id) REFERENCES parent(id) ON DELETE CASCADE ); CREATE TABLE product ( category INT NOT NULL, id INT NOT NULL, price DECIMAL, PRIMARY KEY(category, id) ); CREATE TABLE customer ( id INT NOT NULL, PRIMARY KEY (id) ); CREATE TABLE product_order ( product_category INT NOT NULL, product_id INT NOT NULL, customer_id INT NOT NULL, PRIMARY KEY(no), -- INDEX (product_category, product_id), -- INDEX (customer_id), FOREIGN KEY (product_category, product_id) REFERENCES product(category, id) ON UPDATE CASCADE ON DELETE RESTRICT, FOREIGN KEY (customer_id) REFERENCES customer(id) ); CREATE TABLE source_tag_assoc ( source_id INT UNSIGNED NOT NULL, tag_id INT UNSIGNED NOT NULL, PRIMARY KEY (source_id, tag_id), FOREIGN KEY (source_id) REFERENCES source (id) ON DELETE CASCADE, FOREIGN KEY (tag_id) REFERENCES source_tag (id) ON DELETE CASCADE ); sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_table_primary_foreign_keys.yml000066400000000000000000000213131503426445100310310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fb91070986ad8b8e82dd3bd9b793200a0ed9398192aa045f0361fdcae06af34b file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: parent - bracketed: start_bracket: ( column_definition: naked_identifier: id data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' comma: ',' table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: child - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: parent_id data_type: data_type_identifier: INT - comma: ',' - table_constraint: keyword: INDEX index_reference: naked_identifier: par_ind bracketed: start_bracket: ( column_reference: naked_identifier: parent_id end_bracket: ) - comma: ',' - table_constraint: - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: parent_id end_bracket: ) - keyword: REFERENCES - column_reference: naked_identifier: parent - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - keyword: 'ON' - keyword: DELETE - keyword: CASCADE - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: product - bracketed: - start_bracket: ( - column_definition: naked_identifier: category data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: id data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: price data_type: data_type_identifier: DECIMAL - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: category - comma: ',' - column_reference: naked_identifier: id - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: customer - bracketed: start_bracket: ( column_definition: naked_identifier: id data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' comma: ',' table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: product_order - bracketed: - start_bracket: ( - column_definition: naked_identifier: product_category data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: product_id data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: customer_id data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: 'no' end_bracket: ) - comma: ',' - table_constraint: - keyword: FOREIGN - keyword: KEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: product_category - comma: ',' - column_reference: naked_identifier: product_id - end_bracket: ) - keyword: REFERENCES - column_reference: naked_identifier: product - bracketed: - start_bracket: ( - column_reference: naked_identifier: category - comma: ',' - column_reference: naked_identifier: id - end_bracket: ) - keyword: 'ON' - keyword: UPDATE - keyword: CASCADE - keyword: 'ON' - keyword: DELETE - keyword: RESTRICT - comma: ',' - table_constraint: - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: customer_id end_bracket: ) - keyword: REFERENCES - column_reference: naked_identifier: customer - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: source_tag_assoc - bracketed: - start_bracket: ( - column_definition: naked_identifier: source_id data_type: data_type_identifier: INT keyword: UNSIGNED column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: tag_id data_type: data_type_identifier: INT keyword: UNSIGNED column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: source_id - comma: ',' - column_reference: naked_identifier: tag_id - end_bracket: ) - comma: ',' - table_constraint: - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: source_id end_bracket: ) - keyword: REFERENCES - column_reference: naked_identifier: source - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - keyword: 'ON' - keyword: DELETE - keyword: CASCADE - comma: ',' - table_constraint: - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: tag_id end_bracket: ) - keyword: REFERENCES - column_reference: naked_identifier: source_tag - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - keyword: 'ON' - keyword: DELETE - keyword: CASCADE - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_table_unique_key.sql000066400000000000000000000000501503426445100267510ustar00rootroot00000000000000create table a( b int unique key ); sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_table_unique_key.yml000066400000000000000000000014721503426445100267640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: af44e65430301a281d5baa8730323d7ad9c796dcb0140ca1771982c21ee78e69 file: statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: a - bracketed: start_bracket: ( column_definition: naked_identifier: b data_type: data_type_identifier: int column_constraint_segment: - keyword: unique - keyword: key end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_temporary_table.sql000066400000000000000000000011431503426445100266210ustar00rootroot00000000000000CREATE TEMPORARY TABLE tbl_name AS SELECT * FROM table_name; CREATE TEMPORARY TABLE tbl_name SELECT * FROM table_name; CREATE TEMPORARY TABLE `db_name`.tbl_name AS SELECT * FROM table_name; CREATE TEMPORARY TABLE `db_name`.tbl_name SELECT * FROM table_name; CREATE TEMPORARY TABLE IF NOT EXISTS `db_name`.tbl_name AS SELECT * FROM table_name; CREATE TEMPORARY TABLE IF NOT EXISTS `db_name`.tbl_name SELECT * FROM table_name; CREATE TEMPORARY TABLE IF NOT EXISTS tbl_name AS SELECT * FROM table_name; CREATE TEMPORARY TABLE IF NOT EXISTS tbl_name SELECT * FROM table_name; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_temporary_table.yml000066400000000000000000000133061503426445100266270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 255acc8eb3b0b682ef483ee0aa8ed2c7a42407e7a41896ad89c772f0299aec04 file: - statement: create_table_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: tbl_name - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: tbl_name - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: TABLE - table_reference: quoted_identifier: '`db_name`' dot: . naked_identifier: tbl_name - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: TABLE - table_reference: quoted_identifier: '`db_name`' dot: . naked_identifier: tbl_name - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: quoted_identifier: '`db_name`' dot: . naked_identifier: tbl_name - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: quoted_identifier: '`db_name`' dot: . naked_identifier: tbl_name - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: tbl_name - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: tbl_name - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_temporary_table_ddl.sql000066400000000000000000000007631503426445100274530ustar00rootroot00000000000000CREATE TEMPORARY TABLE tbl_name ( id INT PRIMARY KEY AUTO_INCREMENT, col VARCHAR(255) DEFAULT '' NOT NULL, INDEX(col) ) AS SELECT id, col FROM table_name; CREATE TEMPORARY TABLE tbl_name ( id INT PRIMARY KEY AUTO_INCREMENT, col VARCHAR(255) DEFAULT '' NOT NULL, INDEX(col) ) SELECT id, col FROM table_name; CREATE TEMPORARY TABLE tbl_name (INDEX(col)) AS SELECT id, col FROM table_name; CREATE TEMPORARY TABLE tbl_name (INDEX(col)) SELECT id, col FROM table_name; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_temporary_table_ddl.yml000066400000000000000000000133541503426445100274550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ff62a8039e0f57c9ebc19e6cff094e7e9ee9a2e3b69882a960a0d98868bc66b9 file: - statement: create_table_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: tbl_name - bracketed: - start_bracket: ( - column_definition: - naked_identifier: id - data_type: data_type_identifier: INT - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - column_constraint_segment: keyword: AUTO_INCREMENT - comma: ',' - column_definition: - naked_identifier: col - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '255' end_bracket: ) - column_constraint_segment: keyword: DEFAULT quoted_literal: "''" - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - table_constraint: keyword: INDEX bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: col from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: tbl_name - bracketed: - start_bracket: ( - column_definition: - naked_identifier: id - data_type: data_type_identifier: INT - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - column_constraint_segment: keyword: AUTO_INCREMENT - comma: ',' - column_definition: - naked_identifier: col - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '255' end_bracket: ) - column_constraint_segment: keyword: DEFAULT quoted_literal: "''" - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - table_constraint: keyword: INDEX bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) - end_bracket: ) - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: col from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: tbl_name - bracketed: start_bracket: ( table_constraint: keyword: INDEX bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: col from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: tbl_name - bracketed: start_bracket: ( table_constraint: keyword: INDEX bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) end_bracket: ) - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: col from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_trigger.sql000066400000000000000000000030221503426445100250710ustar00rootroot00000000000000CREATE TRIGGER delete_members_after_transactions AFTER DELETE ON transactions FOR EACH ROW DELETE FROM members WHERE username NOT IN (SELECT UNIQUE(username) FROM transactions); CREATE TRIGGER some_trigger AFTER DELETE ON some_table FOR EACH ROW BEGIN DELETE FROM some_table; INSERT INTO some_table; END; CREATE TRIGGER ins_sum BEFORE INSERT ON account FOR EACH ROW SET @sum = @sum + NEW.amount; CREATE TRIGGER some_trigger AFTER DELETE ON some_table FOR EACH ROW DELETE FROM other_table; CREATE TRIGGER some_trigger BEFORE DELETE ON some_table FOR EACH ROW DELETE FROM other_table; CREATE TRIGGER some_trigger AFTER UPDATE ON some_table FOR EACH ROW DELETE FROM other_table; CREATE TRIGGER some_trigger BEFORE UPDATE ON some_table FOR EACH ROW DELETE FROM other_table; CREATE TRIGGER some_trigger AFTER INSERT ON some_table FOR EACH ROW DELETE FROM other_table; CREATE TRIGGER some_trigger BEFORE INSERT ON some_table FOR EACH ROW DELETE FROM other_table; CREATE TRIGGER IF NOT EXISTS some_trigger AFTER DELETE ON some_table FOR EACH ROW DELETE FROM other_table; CREATE TRIGGER some_trigger AFTER DELETE ON some_table FOR EACH ROW FOLLOWS some_other_trigger DELETE FROM other_table; CREATE TRIGGER some_trigger AFTER DELETE ON some_table FOR EACH ROW PRECEDES some_other_trigger DELETE FROM other_table; CREATE DEFINER=`root`@`127.0.0.1` TRIGGER ins_sum BEFORE INSERT ON account FOR EACH ROW SET @sum = @sum + NEW.amount; CREATE DEFINER=CURRENT_USER TRIGGER ins_sum BEFORE INSERT ON account FOR EACH ROW SET @sum = @sum + NEW.amount; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_trigger.yml000066400000000000000000000271721503426445100251070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 39f80e632ae1f4f2b0670e5fa3a867f82fe804b31d64045847cef827094eabd6 file: - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: delete_members_after_transactions - keyword: AFTER - keyword: DELETE - keyword: 'ON' - table_reference: naked_identifier: transactions - keyword: FOR - keyword: EACH - keyword: ROW - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: members where_clause: keyword: WHERE expression: - column_reference: naked_identifier: username - keyword: NOT - keyword: IN - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: UNIQUE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: username end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: transactions end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: some_trigger - keyword: AFTER - keyword: DELETE - keyword: 'ON' - table_reference: naked_identifier: some_table - keyword: FOR - keyword: EACH - keyword: ROW - statement: transaction_statement: keyword: BEGIN statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: some_table - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: some_table - statement_terminator: ; - statement: transaction_statement: keyword: END - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: ins_sum - keyword: BEFORE - keyword: INSERT - keyword: 'ON' - table_reference: naked_identifier: account - keyword: FOR - keyword: EACH - keyword: ROW - statement: set_statement: keyword: SET variable: '@sum' comparison_operator: raw_comparison_operator: '=' expression: variable: '@sum' binary_operator: + column_reference: - naked_identifier: NEW - dot: . - naked_identifier: amount - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: some_trigger - keyword: AFTER - keyword: DELETE - keyword: 'ON' - table_reference: naked_identifier: some_table - keyword: FOR - keyword: EACH - keyword: ROW - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: other_table - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: some_trigger - keyword: BEFORE - keyword: DELETE - keyword: 'ON' - table_reference: naked_identifier: some_table - keyword: FOR - keyword: EACH - keyword: ROW - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: other_table - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: some_trigger - keyword: AFTER - keyword: UPDATE - keyword: 'ON' - table_reference: naked_identifier: some_table - keyword: FOR - keyword: EACH - keyword: ROW - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: other_table - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: some_trigger - keyword: BEFORE - keyword: UPDATE - keyword: 'ON' - table_reference: naked_identifier: some_table - keyword: FOR - keyword: EACH - keyword: ROW - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: other_table - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: some_trigger - keyword: AFTER - keyword: INSERT - keyword: 'ON' - table_reference: naked_identifier: some_table - keyword: FOR - keyword: EACH - keyword: ROW - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: other_table - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: some_trigger - keyword: BEFORE - keyword: INSERT - keyword: 'ON' - table_reference: naked_identifier: some_table - keyword: FOR - keyword: EACH - keyword: ROW - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: other_table - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - keyword: IF - keyword: NOT - keyword: EXISTS - trigger_reference: naked_identifier: some_trigger - keyword: AFTER - keyword: DELETE - keyword: 'ON' - table_reference: naked_identifier: some_table - keyword: FOR - keyword: EACH - keyword: ROW - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: other_table - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: some_trigger - keyword: AFTER - keyword: DELETE - keyword: 'ON' - table_reference: naked_identifier: some_table - keyword: FOR - keyword: EACH - keyword: ROW - keyword: FOLLOWS - naked_identifier: some_other_trigger - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: other_table - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: some_trigger - keyword: AFTER - keyword: DELETE - keyword: 'ON' - table_reference: naked_identifier: some_table - keyword: FOR - keyword: EACH - keyword: ROW - keyword: PRECEDES - naked_identifier: some_other_trigger - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: other_table - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - definer_segment: keyword: DEFINER comparison_operator: raw_comparison_operator: '=' role_reference: - quoted_identifier: '`root`' - at_sign_literal: '@' - quoted_identifier: '`127.0.0.1`' - keyword: TRIGGER - trigger_reference: naked_identifier: ins_sum - keyword: BEFORE - keyword: INSERT - keyword: 'ON' - table_reference: naked_identifier: account - keyword: FOR - keyword: EACH - keyword: ROW - statement: set_statement: keyword: SET variable: '@sum' comparison_operator: raw_comparison_operator: '=' expression: variable: '@sum' binary_operator: + column_reference: - naked_identifier: NEW - dot: . - naked_identifier: amount - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - definer_segment: keyword: DEFINER comparison_operator: raw_comparison_operator: '=' role_reference: keyword: CURRENT_USER - keyword: TRIGGER - trigger_reference: naked_identifier: ins_sum - keyword: BEFORE - keyword: INSERT - keyword: 'ON' - table_reference: naked_identifier: account - keyword: FOR - keyword: EACH - keyword: ROW - statement: set_statement: keyword: SET variable: '@sum' comparison_operator: raw_comparison_operator: '=' expression: variable: '@sum' binary_operator: + column_reference: - naked_identifier: NEW - dot: . - naked_identifier: amount - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_user.sql000066400000000000000000000161231503426445100244120ustar00rootroot00000000000000CREATE USER jeffrey; CREATE USER IF NOT EXISTS jeffrey; CREATE USER 'prj_svc' IDENTIFIED WITH AWSAuthenticationPlugin AS 'RDS'; CREATE USER 'jeffrey'@'localhost' IDENTIFIED BY 'password'; CREATE USER "jeffrey"@"localhost" IDENTIFIED BY "password"; CREATE USER `jeffrey`@`localhost` IDENTIFIED BY "password"; CREATE USER 'jeffrey'@'localhost' IDENTIFIED BY 'new_password' PASSWORD EXPIRE; CREATE USER 'jeffrey'@'localhost' IDENTIFIED WITH caching_sha2_password BY 'new_password' PASSWORD EXPIRE INTERVAL 180 DAY FAILED_LOGIN_ATTEMPTS 3 PASSWORD_LOCK_TIME 2; CREATE USER 'jeffrey'@'localhost' IDENTIFIED WITH mysql_native_password BY 'new_password1', 'jeanne'@'localhost' IDENTIFIED WITH caching_sha2_password BY 'new_password2' REQUIRE X509 WITH MAX_QUERIES_PER_HOUR 60 PASSWORD HISTORY 5 ACCOUNT LOCK; CREATE USER 'jeffrey'@'localhost' IDENTIFIED WITH mysql_native_password BY 'password'; CREATE USER 'u1'@'localhost' IDENTIFIED WITH caching_sha2_password BY 'sha2_password' AND IDENTIFIED WITH authentication_ldap_sasl AS 'uid=u1_ldap,ou=People,dc=example,dc=com'; CREATE USER 'u1'@'localhost' IDENTIFIED WITH caching_sha2_password BY 'sha2_password' AND IDENTIFIED WITH authentication_ldap_sasl AS 'uid=u1_ldap,ou=People,dc=example,dc=com' AND IDENTIFIED WITH authentication_fido; CREATE USER user IDENTIFIED WITH authentication_fido INITIAL AUTHENTICATION IDENTIFIED BY RANDOM PASSWORD; CREATE USER 'joe'@'10.0.0.1' DEFAULT ROLE administrator, developer; CREATE USER 'jeffrey'@'localhost' REQUIRE NONE; CREATE USER 'jeffrey'@'localhost' REQUIRE SSL; CREATE USER 'jeffrey'@'localhost' REQUIRE X509; CREATE USER 'jeffrey'@'localhost' REQUIRE ISSUER '/C=SE/ST=Stockholm/L=Stockholm/ O=MySQL/CN=CA/emailAddress=ca@example.com'; CREATE USER 'jeffrey'@'localhost' REQUIRE SUBJECT '/C=SE/ST=Stockholm/L=Stockholm/ O=MySQL demo client certificate/ CN=client/emailAddress=client@example.com'; CREATE USER 'jeffrey'@'localhost' REQUIRE CIPHER 'EDH-RSA-DES-CBC3-SHA'; CREATE USER 'jeffrey'@'localhost' REQUIRE SUBJECT '/C=SE/ST=Stockholm/L=Stockholm/ O=MySQL demo client certificate/ CN=client/emailAddress=client@example.com' AND ISSUER '/C=SE/ST=Stockholm/L=Stockholm/ O=MySQL/CN=CA/emailAddress=ca@example.com' AND CIPHER 'EDH-RSA-DES-CBC3-SHA'; CREATE USER 'jeffrey'@'localhost' WITH MAX_QUERIES_PER_HOUR 500 MAX_UPDATES_PER_HOUR 100; CREATE USER 'jeffrey'@'localhost' PASSWORD EXPIRE; CREATE USER 'jeffrey'@'localhost' PASSWORD EXPIRE DEFAULT; CREATE USER 'jeffrey'@'localhost' PASSWORD EXPIRE NEVER; CREATE USER 'jeffrey'@'localhost' PASSWORD EXPIRE INTERVAL 180 DAY; CREATE USER 'jeffrey'@'localhost' PASSWORD HISTORY DEFAULT; CREATE USER 'jeffrey'@'localhost' PASSWORD HISTORY 6; CREATE USER 'jeffrey'@'localhost' PASSWORD REUSE INTERVAL DEFAULT; CREATE USER 'jeffrey'@'localhost' PASSWORD REUSE INTERVAL 360 DAY; CREATE USER 'jeffrey'@'localhost' PASSWORD REQUIRE CURRENT; CREATE USER 'jeffrey'@'localhost' PASSWORD REQUIRE CURRENT OPTIONAL; CREATE USER 'jeffrey'@'localhost' PASSWORD REQUIRE CURRENT DEFAULT; CREATE USER 'jeffrey'@'localhost' FAILED_LOGIN_ATTEMPTS 4 PASSWORD_LOCK_TIME 2; CREATE USER 'jon'@'localhost' COMMENT 'Some information about Jon'; CREATE USER 'jim'@'localhost' ATTRIBUTE '{"fname": "James", "lname": "Scott", "phone": "123-456-7890"}'; CREATE OR REPLACE USER jeffrey; CREATE OR REPLACE USER IF NOT EXISTS jeffrey; CREATE OR REPLACE USER 'prj_svc' IDENTIFIED WITH AWSAuthenticationPlugin AS 'RDS'; CREATE OR REPLACE USER 'jeffrey'@'localhost' IDENTIFIED BY 'password'; CREATE OR REPLACE USER "jeffrey"@"localhost" IDENTIFIED BY "password"; CREATE OR REPLACE USER `jeffrey`@`localhost` IDENTIFIED BY "password"; CREATE OR REPLACE USER 'jeffrey'@'localhost' IDENTIFIED BY 'new_password' PASSWORD EXPIRE; CREATE OR REPLACE USER 'jeffrey'@'localhost' IDENTIFIED WITH caching_sha2_password BY 'new_password' PASSWORD EXPIRE INTERVAL 180 DAY FAILED_LOGIN_ATTEMPTS 3 PASSWORD_LOCK_TIME 2; CREATE OR REPLACE USER 'jeffrey'@'localhost' IDENTIFIED WITH mysql_native_password BY 'new_password1', 'jeanne'@'localhost' IDENTIFIED WITH caching_sha2_password BY 'new_password2' REQUIRE X509 WITH MAX_QUERIES_PER_HOUR 60 PASSWORD HISTORY 5 ACCOUNT LOCK; CREATE OR REPLACE USER 'jeffrey'@'localhost' IDENTIFIED WITH mysql_native_password BY 'password'; CREATE OR REPLACE USER 'u1'@'localhost' IDENTIFIED WITH caching_sha2_password BY 'sha2_password' AND IDENTIFIED WITH authentication_ldap_sasl AS 'uid=u1_ldap,ou=People,dc=example,dc=com'; CREATE OR REPLACE USER 'u1'@'localhost' IDENTIFIED WITH caching_sha2_password BY 'sha2_password' AND IDENTIFIED WITH authentication_ldap_sasl AS 'uid=u1_ldap,ou=People,dc=example,dc=com' AND IDENTIFIED WITH authentication_fido; CREATE OR REPLACE USER user IDENTIFIED WITH authentication_fido INITIAL AUTHENTICATION IDENTIFIED BY RANDOM PASSWORD; CREATE OR REPLACE USER 'joe'@'10.0.0.1' DEFAULT ROLE administrator, developer; CREATE OR REPLACE USER 'jeffrey'@'localhost' REQUIRE NONE; CREATE OR REPLACE USER 'jeffrey'@'localhost' REQUIRE SSL; CREATE OR REPLACE USER 'jeffrey'@'localhost' REQUIRE X509; CREATE OR REPLACE USER 'jeffrey'@'localhost' REQUIRE ISSUER '/C=SE/ST=Stockholm/L=Stockholm/ O=MySQL/CN=CA/emailAddress=ca@example.com'; CREATE OR REPLACE USER 'jeffrey'@'localhost' REQUIRE SUBJECT '/C=SE/ST=Stockholm/L=Stockholm/ O=MySQL demo client certificate/ CN=client/emailAddress=client@example.com'; CREATE OR REPLACE USER 'jeffrey'@'localhost' REQUIRE CIPHER 'EDH-RSA-DES-CBC3-SHA'; CREATE OR REPLACE USER 'jeffrey'@'localhost' REQUIRE SUBJECT '/C=SE/ST=Stockholm/L=Stockholm/ O=MySQL demo client certificate/ CN=client/emailAddress=client@example.com' AND ISSUER '/C=SE/ST=Stockholm/L=Stockholm/ O=MySQL/CN=CA/emailAddress=ca@example.com' AND CIPHER 'EDH-RSA-DES-CBC3-SHA'; CREATE OR REPLACE USER 'jeffrey'@'localhost' WITH MAX_QUERIES_PER_HOUR 500 MAX_UPDATES_PER_HOUR 100; CREATE OR REPLACE USER 'jeffrey'@'localhost' PASSWORD EXPIRE; CREATE OR REPLACE USER 'jeffrey'@'localhost' PASSWORD EXPIRE DEFAULT; CREATE OR REPLACE USER 'jeffrey'@'localhost' PASSWORD EXPIRE NEVER; CREATE OR REPLACE USER 'jeffrey'@'localhost' PASSWORD EXPIRE INTERVAL 180 DAY; CREATE OR REPLACE USER 'jeffrey'@'localhost' PASSWORD HISTORY DEFAULT; CREATE OR REPLACE USER 'jeffrey'@'localhost' PASSWORD HISTORY 6; CREATE OR REPLACE USER 'jeffrey'@'localhost' PASSWORD REUSE INTERVAL DEFAULT; CREATE OR REPLACE USER 'jeffrey'@'localhost' PASSWORD REUSE INTERVAL 360 DAY; CREATE OR REPLACE USER 'jeffrey'@'localhost' PASSWORD REQUIRE CURRENT; CREATE OR REPLACE USER 'jeffrey'@'localhost' PASSWORD REQUIRE CURRENT OPTIONAL; CREATE OR REPLACE USER 'jeffrey'@'localhost' PASSWORD REQUIRE CURRENT DEFAULT; CREATE OR REPLACE USER 'jeffrey'@'localhost' FAILED_LOGIN_ATTEMPTS 4 PASSWORD_LOCK_TIME 2; CREATE OR REPLACE USER 'jon'@'localhost' COMMENT 'Some information about Jon'; CREATE OR REPLACE USER 'jim'@'localhost' ATTRIBUTE '{"fname": "James", "lname": "Scott", "phone": "123-456-7890"}'; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_user.yml000066400000000000000000000721761503426445100244260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9f2e8d3839abebe5ce821bdd713a832ce22ddfaddb47afca8d044e321e902592 file: - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: jeffrey - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - keyword: IF - keyword: NOT - keyword: EXISTS - role_reference: naked_identifier: jeffrey - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: quoted_identifier: "'prj_svc'" - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: AWSAuthenticationPlugin - keyword: AS - quoted_literal: "'RDS'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: IDENTIFIED - keyword: BY - quoted_literal: "'password'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_literal: '"jeffrey"' - at_sign_literal: '@' - quoted_literal: '"localhost"' - keyword: IDENTIFIED - keyword: BY - quoted_literal: '"password"' - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: '`jeffrey`' - at_sign_literal: '@' - quoted_identifier: '`localhost`' - keyword: IDENTIFIED - keyword: BY - quoted_literal: '"password"' - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: IDENTIFIED - keyword: BY - quoted_literal: "'new_password'" - keyword: PASSWORD - keyword: EXPIRE - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: caching_sha2_password - keyword: BY - quoted_literal: "'new_password'" - keyword: PASSWORD - keyword: EXPIRE - keyword: INTERVAL - numeric_literal: '180' - keyword: DAY - keyword: FAILED_LOGIN_ATTEMPTS - numeric_literal: '3' - keyword: PASSWORD_LOCK_TIME - numeric_literal: '2' - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: mysql_native_password - keyword: BY - quoted_literal: "'new_password1'" - comma: ',' - role_reference: - quoted_identifier: "'jeanne'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: caching_sha2_password - keyword: BY - quoted_literal: "'new_password2'" - keyword: REQUIRE - keyword: X509 - keyword: WITH - keyword: MAX_QUERIES_PER_HOUR - numeric_literal: '60' - keyword: PASSWORD - keyword: HISTORY - numeric_literal: '5' - keyword: ACCOUNT - keyword: LOCK - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: mysql_native_password - keyword: BY - quoted_literal: "'password'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'u1'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: caching_sha2_password - keyword: BY - quoted_literal: "'sha2_password'" - keyword: AND - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: authentication_ldap_sasl - keyword: AS - quoted_literal: "'uid=u1_ldap,ou=People,dc=example,dc=com'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'u1'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: caching_sha2_password - keyword: BY - quoted_literal: "'sha2_password'" - keyword: AND - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: authentication_ldap_sasl - keyword: AS - quoted_literal: "'uid=u1_ldap,ou=People,dc=example,dc=com'" - keyword: AND - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: authentication_fido - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: user - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: authentication_fido - keyword: INITIAL - keyword: AUTHENTICATION - keyword: IDENTIFIED - keyword: BY - keyword: RANDOM - keyword: PASSWORD - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'joe'" - at_sign_literal: '@' - quoted_identifier: "'10.0.0.1'" - keyword: DEFAULT - keyword: ROLE - role_reference: naked_identifier: administrator - comma: ',' - role_reference: naked_identifier: developer - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: REQUIRE - keyword: NONE - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: REQUIRE - keyword: SSL - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: REQUIRE - keyword: X509 - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: REQUIRE - keyword: ISSUER - quoted_literal: "'/C=SE/ST=Stockholm/L=Stockholm/\n O=MySQL/CN=CA/emailAddress=ca@example.com'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: REQUIRE - keyword: SUBJECT - quoted_literal: "'/C=SE/ST=Stockholm/L=Stockholm/\n O=MySQL demo client certificate/\n\ \ CN=client/emailAddress=client@example.com'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: REQUIRE - keyword: CIPHER - quoted_literal: "'EDH-RSA-DES-CBC3-SHA'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: REQUIRE - keyword: SUBJECT - quoted_literal: "'/C=SE/ST=Stockholm/L=Stockholm/\n O=MySQL demo client certificate/\n\ \ CN=client/emailAddress=client@example.com'" - keyword: AND - keyword: ISSUER - quoted_literal: "'/C=SE/ST=Stockholm/L=Stockholm/\n O=MySQL/CN=CA/emailAddress=ca@example.com'" - keyword: AND - keyword: CIPHER - quoted_literal: "'EDH-RSA-DES-CBC3-SHA'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: WITH - keyword: MAX_QUERIES_PER_HOUR - numeric_literal: '500' - keyword: MAX_UPDATES_PER_HOUR - numeric_literal: '100' - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: EXPIRE - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: EXPIRE - keyword: DEFAULT - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: EXPIRE - keyword: NEVER - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: EXPIRE - keyword: INTERVAL - numeric_literal: '180' - keyword: DAY - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: HISTORY - keyword: DEFAULT - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: HISTORY - numeric_literal: '6' - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: REUSE - keyword: INTERVAL - keyword: DEFAULT - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: REUSE - keyword: INTERVAL - numeric_literal: '360' - keyword: DAY - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: REQUIRE - keyword: CURRENT - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: REQUIRE - keyword: CURRENT - keyword: OPTIONAL - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: REQUIRE - keyword: CURRENT - keyword: DEFAULT - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: FAILED_LOGIN_ATTEMPTS - numeric_literal: '4' - keyword: PASSWORD_LOCK_TIME - numeric_literal: '2' - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jon'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: COMMENT - quoted_literal: "'Some information about Jon'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jim'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: ATTRIBUTE - quoted_literal: "'{\"fname\": \"James\", \"lname\": \"Scott\", \"phone\": \"\ 123-456-7890\"}'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: USER - role_reference: naked_identifier: jeffrey - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: USER - keyword: IF - keyword: NOT - keyword: EXISTS - role_reference: naked_identifier: jeffrey - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: USER - role_reference: quoted_identifier: "'prj_svc'" - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: AWSAuthenticationPlugin - keyword: AS - quoted_literal: "'RDS'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: IDENTIFIED - keyword: BY - quoted_literal: "'password'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: USER - role_reference: - quoted_literal: '"jeffrey"' - at_sign_literal: '@' - quoted_literal: '"localhost"' - keyword: IDENTIFIED - keyword: BY - quoted_literal: '"password"' - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: USER - role_reference: - quoted_identifier: '`jeffrey`' - at_sign_literal: '@' - quoted_identifier: '`localhost`' - keyword: IDENTIFIED - keyword: BY - quoted_literal: '"password"' - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: IDENTIFIED - keyword: BY - quoted_literal: "'new_password'" - keyword: PASSWORD - keyword: EXPIRE - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: caching_sha2_password - keyword: BY - quoted_literal: "'new_password'" - keyword: PASSWORD - keyword: EXPIRE - keyword: INTERVAL - numeric_literal: '180' - keyword: DAY - keyword: FAILED_LOGIN_ATTEMPTS - numeric_literal: '3' - keyword: PASSWORD_LOCK_TIME - numeric_literal: '2' - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: mysql_native_password - keyword: BY - quoted_literal: "'new_password1'" - comma: ',' - role_reference: - quoted_identifier: "'jeanne'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: caching_sha2_password - keyword: BY - quoted_literal: "'new_password2'" - keyword: REQUIRE - keyword: X509 - keyword: WITH - keyword: MAX_QUERIES_PER_HOUR - numeric_literal: '60' - keyword: PASSWORD - keyword: HISTORY - numeric_literal: '5' - keyword: ACCOUNT - keyword: LOCK - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: mysql_native_password - keyword: BY - quoted_literal: "'password'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: USER - role_reference: - quoted_identifier: "'u1'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: caching_sha2_password - keyword: BY - quoted_literal: "'sha2_password'" - keyword: AND - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: authentication_ldap_sasl - keyword: AS - quoted_literal: "'uid=u1_ldap,ou=People,dc=example,dc=com'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: USER - role_reference: - quoted_identifier: "'u1'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: caching_sha2_password - keyword: BY - quoted_literal: "'sha2_password'" - keyword: AND - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: authentication_ldap_sasl - keyword: AS - quoted_literal: "'uid=u1_ldap,ou=People,dc=example,dc=com'" - keyword: AND - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: authentication_fido - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: USER - role_reference: naked_identifier: user - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: authentication_fido - keyword: INITIAL - keyword: AUTHENTICATION - keyword: IDENTIFIED - keyword: BY - keyword: RANDOM - keyword: PASSWORD - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: USER - role_reference: - quoted_identifier: "'joe'" - at_sign_literal: '@' - quoted_identifier: "'10.0.0.1'" - keyword: DEFAULT - keyword: ROLE - role_reference: naked_identifier: administrator - comma: ',' - role_reference: naked_identifier: developer - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: REQUIRE - keyword: NONE - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: REQUIRE - keyword: SSL - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: REQUIRE - keyword: X509 - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: REQUIRE - keyword: ISSUER - quoted_literal: "'/C=SE/ST=Stockholm/L=Stockholm/\n O=MySQL/CN=CA/emailAddress=ca@example.com'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: REQUIRE - keyword: SUBJECT - quoted_literal: "'/C=SE/ST=Stockholm/L=Stockholm/\n O=MySQL demo client certificate/\n\ \ CN=client/emailAddress=client@example.com'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: REQUIRE - keyword: CIPHER - quoted_literal: "'EDH-RSA-DES-CBC3-SHA'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: REQUIRE - keyword: SUBJECT - quoted_literal: "'/C=SE/ST=Stockholm/L=Stockholm/\n O=MySQL demo client certificate/\n\ \ CN=client/emailAddress=client@example.com'" - keyword: AND - keyword: ISSUER - quoted_literal: "'/C=SE/ST=Stockholm/L=Stockholm/\n O=MySQL/CN=CA/emailAddress=ca@example.com'" - keyword: AND - keyword: CIPHER - quoted_literal: "'EDH-RSA-DES-CBC3-SHA'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: WITH - keyword: MAX_QUERIES_PER_HOUR - numeric_literal: '500' - keyword: MAX_UPDATES_PER_HOUR - numeric_literal: '100' - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: EXPIRE - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: EXPIRE - keyword: DEFAULT - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: EXPIRE - keyword: NEVER - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: EXPIRE - keyword: INTERVAL - numeric_literal: '180' - keyword: DAY - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: HISTORY - keyword: DEFAULT - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: HISTORY - numeric_literal: '6' - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: REUSE - keyword: INTERVAL - keyword: DEFAULT - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: REUSE - keyword: INTERVAL - numeric_literal: '360' - keyword: DAY - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: REQUIRE - keyword: CURRENT - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: REQUIRE - keyword: CURRENT - keyword: OPTIONAL - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: REQUIRE - keyword: CURRENT - keyword: DEFAULT - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: FAILED_LOGIN_ATTEMPTS - numeric_literal: '4' - keyword: PASSWORD_LOCK_TIME - numeric_literal: '2' - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: USER - role_reference: - quoted_identifier: "'jon'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: COMMENT - quoted_literal: "'Some information about Jon'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: USER - role_reference: - quoted_identifier: "'jim'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: ATTRIBUTE - quoted_literal: "'{\"fname\": \"James\", \"lname\": \"Scott\", \"phone\": \"\ 123-456-7890\"}'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_view.sql000066400000000000000000000005151503426445100244040ustar00rootroot00000000000000CREATE VIEW v1 (c,d) AS SELECT a,b FROM t1; CREATE OR REPLACE VIEW v1 (c,d,e,f) AS SELECT a,b, a IN (SELECT a+2 FROM t1), a = all (SELECT a FROM t1) FROM t1; CREATE VIEW v2 AS SELECT a FROM t1 WITH CASCADED CHECK OPTION; CREATE VIEW v2 AS (SELECT a FROM t1) WITH CASCADED CHECK OPTION; CREATE VIEW v2 AS SELECT 1 UNION SELECT 2; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/create_view.yml000066400000000000000000000142631503426445100244130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 42c04987e5868917da4918868f7292265f4e705a615649117afe33e16159a425 file: - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: v1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: c - comma: ',' - column_reference: naked_identifier: d - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: VIEW - table_reference: naked_identifier: v1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: c - comma: ',' - column_reference: naked_identifier: d - comma: ',' - column_reference: naked_identifier: e - comma: ',' - column_reference: naked_identifier: f - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: a keyword: IN bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: expression: column_reference: naked_identifier: a binary_operator: + numeric_literal: '2' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 end_bracket: ) - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: '=' function: function_name: function_name_identifier: all function_contents: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: v2 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - with_check_options: - keyword: WITH - keyword: CASCADED - keyword: CHECK - keyword: OPTION - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: v2 - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 end_bracket: ) - with_check_options: - keyword: WITH - keyword: CASCADED - keyword: CHECK - keyword: OPTION - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: v2 - keyword: AS - set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - set_operator: keyword: UNION - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '2' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/deallocate_prepare.sql000066400000000000000000000000321503426445100257140ustar00rootroot00000000000000DEALLOCATE PREPARE dynam; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/deallocate_prepare.yml000066400000000000000000000010301503426445100257150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 47c2b5531df3cb85f6eb69f699db36588afa339a5cde82a5058f928718671932 file: statement: deallocate_segment: - keyword: DEALLOCATE - keyword: PREPARE - naked_identifier: dynam statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/declare_condition.sql000066400000000000000000000000621503426445100255510ustar00rootroot00000000000000DECLARE random_condition_name CONDITION FOR 1051; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/declare_condition.yml000066400000000000000000000011271503426445100255560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7bdce909905e143f2896cccb3ca99f0b52c2f36ae44e5cbbbf19ab71e621efc1 file: statement: declare_statement: - keyword: DECLARE - naked_identifier: random_condition_name - keyword: CONDITION - keyword: FOR - numeric_literal: '1051' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/declare_continue_handler_sqlexception.sql000066400000000000000000000000771503426445100317100ustar00rootroot00000000000000DECLARE continue handler for sqlexception begin select 1; end; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/declare_continue_handler_sqlexception.yml000066400000000000000000000016311503426445100317070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0ef9e8623111d364c44fed8e8f3660ed551cf7158ff3dc87e02ab81cd98a02e0 file: - statement: declare_statement: - keyword: DECLARE - keyword: continue - keyword: handler - keyword: for - keyword: sqlexception - statement: transaction_statement: keyword: begin statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: end - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/declare_cursor.sql000066400000000000000000000000421503426445100250760ustar00rootroot00000000000000DECLARE test CURSOR FOR SELECT 1; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/declare_cursor.yml000066400000000000000000000013131503426445100251020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 55c9581409fbc9d064dd492a2842e4df7d4102077d475bb9f50a7a33906204cd file: statement: declare_statement: - keyword: DECLARE - naked_identifier: test - keyword: CURSOR - keyword: FOR - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/declare_default_numeric.sql000066400000000000000000000000331503426445100267270ustar00rootroot00000000000000DECLARE abc int default 1; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/declare_default_numeric.yml000066400000000000000000000011301503426445100267300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a302920845b75b85f450540a7283e49735d86a7878f913048cc3adf4efc5adfc file: statement: declare_statement: - keyword: DECLARE - variable: abc - data_type: data_type_identifier: int - keyword: default - numeric_literal: '1' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/declare_default_quotedliteral.sql000066400000000000000000000000451503426445100301460ustar00rootroot00000000000000DECLARE abc longtext default 'test'; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/declare_default_quotedliteral.yml000066400000000000000000000011411503426445100301460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6d67b2c5474e3a36857b12ef0b488a847015db1c9b9e0f45ea154871bf736428 file: statement: declare_statement: - keyword: DECLARE - variable: abc - data_type: data_type_identifier: longtext - keyword: default - quoted_literal: "'test'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/declare_exit_handler_sqlexception.sql000066400000000000000000000000731503426445100310310ustar00rootroot00000000000000DECLARE exit handler for sqlexception begin select 1; end; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/declare_exit_handler_sqlexception.yml000066400000000000000000000016251503426445100310370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fa28b2deb8b2ba8bbc00b29368ae51e09b2c142b0654a70ae093cb9e37d507b0 file: - statement: declare_statement: - keyword: DECLARE - keyword: exit - keyword: handler - keyword: for - keyword: sqlexception - statement: transaction_statement: keyword: begin statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: end - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/declare_local_variable.sql000066400000000000000000000000211503426445100265150ustar00rootroot00000000000000DECLARE abc int; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/declare_local_variable.yml000066400000000000000000000010461503426445100265270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b9f8d9b37c576c621698357759133c61224aedf19208cfaa65ef8ac6ca395b35 file: statement: declare_statement: keyword: DECLARE variable: abc data_type: data_type_identifier: int statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/declare_undo_handler_sqlexception.sql000066400000000000000000000000731503426445100310250ustar00rootroot00000000000000DECLARE undo handler for sqlexception begin select 1; end; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/declare_undo_handler_sqlexception.yml000066400000000000000000000016251503426445100310330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9ec056f47e16db2062f9cfe150451dd9dc8bb603e0c028f404f7b7cd77a7ee3a file: - statement: declare_statement: - keyword: DECLARE - keyword: undo - keyword: handler - keyword: for - keyword: sqlexception - statement: transaction_statement: keyword: begin statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: end - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/delete_multitable.sql000066400000000000000000000017721503426445100256010ustar00rootroot00000000000000DELETE a FROM a JOIN b USING (id) WHERE b.name = 'example'; DELETE FROM somelog WHERE user = 'jcole' ORDER BY timestamp_column LIMIT 1; DELETE LOW_PRIORITY QUICK IGNORE a FROM a JOIN b USING (id) WHERE b.name = 'example'; DELETE FROM a PARTITION (p) WHERE b.name = 'example'; -- Multiple-Table Syntax 1 DELETE t1, t2 FROM t1 INNER JOIN t2 INNER JOIN t3 WHERE t1.id=t2.id AND t2.id=t3.id; DELETE LOW_PRIORITY QUICK IGNORE t1, t2 FROM t1 INNER JOIN t2 INNER JOIN t3 WHERE t1.id=t2.id AND t2.id=t3.id; -- Multiple-Table Syntax 2 DELETE FROM t1, t2 USING t1 INNER JOIN t2 INNER JOIN t3 WHERE t1.id=t2.id AND t2.id=t3.id; DELETE LOW_PRIORITY QUICK IGNORE FROM t1, t2 USING t1 INNER JOIN t2 INNER JOIN t3 WHERE t1.id=t2.id AND t2.id=t3.id; DELETE a1, a2 FROM t1 AS a1 INNER JOIN t2 AS a2 WHERE a1.id=a2.id; -- .* after table name DELETE t1.*, t2.* FROM t1 INNER JOIN t2 INNER JOIN t3 WHERE t1.id=t2.id AND t2.id=t3.id; DELETE FROM t1.*, t2.* USING t1 INNER JOIN t2 INNER JOIN t3 WHERE t1.id=t2.id AND t2.id=t3.id; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/delete_multitable.yml000066400000000000000000000353531503426445100256050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3dea0ed0320789e1e32f39116489553f21b5f9bdd65386036c72ea39db7b7aed file: - statement: delete_statement: keyword: DELETE delete_target_table: table_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: a join_clause: - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: b - keyword: USING - bracketed: start_bracket: ( naked_identifier: id end_bracket: ) where_clause: keyword: WHERE expression: column_reference: - naked_identifier: b - dot: . - naked_identifier: name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'example'" - statement_terminator: ; - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: somelog where_clause: keyword: WHERE expression: column_reference: naked_identifier: user comparison_operator: raw_comparison_operator: '=' quoted_literal: "'jcole'" orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: timestamp_column limit_clause: keyword: LIMIT numeric_literal: '1' - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: LOW_PRIORITY - keyword: QUICK - keyword: IGNORE - delete_target_table: table_reference: naked_identifier: a - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: a join_clause: - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: b - keyword: USING - bracketed: start_bracket: ( naked_identifier: id end_bracket: ) - where_clause: keyword: WHERE expression: column_reference: - naked_identifier: b - dot: . - naked_identifier: name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'example'" - statement_terminator: ; - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: a partition_clause: keyword: PARTITION bracketed: start_bracket: ( object_reference: naked_identifier: p end_bracket: ) where_clause: keyword: WHERE expression: column_reference: - naked_identifier: b - dot: . - naked_identifier: name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'example'" - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - delete_target_table: table_reference: naked_identifier: t1 - comma: ',' - delete_target_table: table_reference: naked_identifier: t2 - from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: t1 - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t2 - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t3 - where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id - binary_operator: AND - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t3 - dot: . - naked_identifier: id - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: LOW_PRIORITY - keyword: QUICK - keyword: IGNORE - delete_target_table: table_reference: naked_identifier: t1 - comma: ',' - delete_target_table: table_reference: naked_identifier: t2 - from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: t1 - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t2 - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t3 - where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id - binary_operator: AND - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t3 - dot: . - naked_identifier: id - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - delete_target_table: table_reference: naked_identifier: t1 - comma: ',' - delete_target_table: table_reference: naked_identifier: t2 - using_clause: keyword: USING from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: t1 - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t2 - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t3 - where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id - binary_operator: AND - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t3 - dot: . - naked_identifier: id - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: LOW_PRIORITY - keyword: QUICK - keyword: IGNORE - keyword: FROM - delete_target_table: table_reference: naked_identifier: t1 - comma: ',' - delete_target_table: table_reference: naked_identifier: t2 - using_clause: keyword: USING from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: t1 - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t2 - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t3 - where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id - binary_operator: AND - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t3 - dot: . - naked_identifier: id - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - delete_target_table: table_reference: naked_identifier: a1 - comma: ',' - delete_target_table: table_reference: naked_identifier: a2 - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 alias_expression: alias_operator: keyword: AS naked_identifier: a1 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t2 alias_expression: alias_operator: keyword: AS naked_identifier: a2 - where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: a1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: a2 - dot: . - naked_identifier: id - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - delete_target_table: table_reference: naked_identifier: t1 dot: . star: '*' - comma: ',' - delete_target_table: table_reference: naked_identifier: t2 dot: . star: '*' - from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: t1 - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t2 - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t3 - where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id - binary_operator: AND - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t3 - dot: . - naked_identifier: id - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - delete_target_table: table_reference: naked_identifier: t1 dot: . star: '*' - comma: ',' - delete_target_table: table_reference: naked_identifier: t2 dot: . star: '*' - using_clause: keyword: USING from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: t1 - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t2 - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t3 - where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id - binary_operator: AND - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t3 - dot: . - naked_identifier: id - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/delete_returning.sql000066400000000000000000000001551503426445100254460ustar00rootroot00000000000000DELETE FROM a WHERE a.foo = 'bar' RETURNING a.*; DELETE FROM a WHERE a.foo = 'bar' RETURNING a.baz AS abaz; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/delete_returning.yml000066400000000000000000000040111503426445100254430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f8779993eb8e2f23295b978d7f9a442a0cbf29ea8c5551e4d5316d600e8e72f5 file: - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: a where_clause: keyword: WHERE expression: column_reference: - naked_identifier: a - dot: . - naked_identifier: foo comparison_operator: raw_comparison_operator: '=' quoted_literal: "'bar'" returning_clause: keyword: RETURNING select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: a dot: . star: '*' - statement_terminator: ; - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: a where_clause: keyword: WHERE expression: column_reference: - naked_identifier: a - dot: . - naked_identifier: foo comparison_operator: raw_comparison_operator: '=' quoted_literal: "'bar'" returning_clause: keyword: RETURNING select_clause_element: column_reference: - naked_identifier: a - dot: . - naked_identifier: baz alias_expression: alias_operator: keyword: AS naked_identifier: abaz - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/delimiter_function.sql000066400000000000000000000002371503426445100257730ustar00rootroot00000000000000DELIMITER ~ CREATE FUNCTION `add`(test int) RETURNS longtext DETERMINISTIC LANGUAGE SQL CONTAINS SQL SQL SECURITY DEFINER BEGIN SELECT 1 + 2; END~ DELIMITER ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/delimiter_function.yml000066400000000000000000000032611503426445100257750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e86cfba356795fe4599cef806bd5c5c782881b14785deacab9bef35c96bef07b file: - statement: delimiter_statement: keyword: DELIMITER - statement_terminator: '~' - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: quoted_identifier: '`add`' - function_parameter_list: bracketed: start_bracket: ( parameter: test data_type: data_type_identifier: int end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: longtext - characteristic_statement: - keyword: DETERMINISTIC - keyword: LANGUAGE - keyword: SQL - keyword: CONTAINS - keyword: SQL - keyword: SQL - keyword: SECURITY - keyword: DEFINER - function_definition: transaction_statement: keyword: BEGIN statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - statement_terminator: ; - statement: transaction_statement: keyword: END - statement_terminator: '~' - statement: delimiter_statement: keyword: DELIMITER - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/delimiter_procedure.sql000066400000000000000000000002311503426445100261300ustar00rootroot00000000000000DELIMITER ~ CREATE PROCEDURE `testprocedure`(test int) DETERMINISTIC LANGUAGE SQL CONTAINS SQL SQL SECURITY DEFINER BEGIN SELECT 1 + 2; END~ DELIMITER ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/delimiter_procedure.yml000066400000000000000000000031571503426445100261440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f8885557a4ee5d4d90b6fc6f07983158a33d5c9eaadcf2cdcae85be72280973c file: - statement: delimiter_statement: keyword: DELIMITER - statement_terminator: '~' - statement: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - function_name: quoted_identifier: '`testprocedure`' - procedure_parameter_list: bracketed: start_bracket: ( parameter: test data_type: data_type_identifier: int end_bracket: ) - characteristic_statement: - keyword: DETERMINISTIC - keyword: LANGUAGE - keyword: SQL - keyword: CONTAINS - keyword: SQL - keyword: SQL - keyword: SECURITY - keyword: DEFINER - function_definition: transaction_statement: keyword: BEGIN statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - statement_terminator: ; - statement: transaction_statement: keyword: END - statement_terminator: '~' - statement: delimiter_statement: keyword: DELIMITER - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/delimiter_select.sql000066400000000000000000000000421503426445100254170ustar00rootroot00000000000000DELIMITER ~ SELECT 1~ DELIMITER ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/delimiter_select.yml000066400000000000000000000013501503426445100254240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a7b5e6298ad0f76791911956fe2dcb7762a8483f6dd646d30160c4e0c39a9b2e file: - statement: delimiter_statement: keyword: DELIMITER - statement_terminator: '~' - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - statement_terminator: '~' - statement: delimiter_statement: keyword: DELIMITER - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/drop_function.sql000066400000000000000000000000501503426445100247520ustar00rootroot00000000000000DROP FUNCTION IF EXISTS `testfunction`; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/drop_function.yml000066400000000000000000000011431503426445100247600ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 36f8ff2a93f64a4996cc8a997d1048029f86ffcd5114b0db81ca5b4c984860bf file: statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - keyword: IF - keyword: EXISTS - function_name: quoted_identifier: '`testfunction`' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/drop_index.sql000066400000000000000000000000451503426445100242400ustar00rootroot00000000000000DROP INDEX `test` ON `table1`.`foo`; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/drop_index.yml000066400000000000000000000012621503426445100242440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 55a1c03846735c33fdff186577506b3f00b63f73d930c2bf8974f73d4d0f7cea file: statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: - quoted_identifier: '`table1`' - dot: . - quoted_identifier: '`foo`' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/drop_index_with_algorithm.sql000066400000000000000000000005121503426445100273400ustar00rootroot00000000000000DROP INDEX `test` ON `table1`.`foo` ALGORITHM = DEFAULT; DROP INDEX `test` ON `table1`.`foo` ALGORITHM = INPLACE; DROP INDEX `test` ON `table1`.`foo` ALGORITHM = COPY; DROP INDEX `test` ON `table1`.`foo` ALGORITHM DEFAULT; DROP INDEX `test` ON `table1`.`foo` ALGORITHM INPLACE; DROP INDEX `test` ON `table1`.`foo` ALGORITHM COPY; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/drop_index_with_algorithm.yml000066400000000000000000000051241503426445100273460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2f5fbf257ef1ab0490d88f2c0a9840283f4ea5595471e662572591e7fb25dd6b file: - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: - quoted_identifier: '`table1`' - dot: . - quoted_identifier: '`foo`' - keyword: ALGORITHM - comparison_operator: raw_comparison_operator: '=' - keyword: DEFAULT - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: - quoted_identifier: '`table1`' - dot: . - quoted_identifier: '`foo`' - keyword: ALGORITHM - comparison_operator: raw_comparison_operator: '=' - keyword: INPLACE - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: - quoted_identifier: '`table1`' - dot: . - quoted_identifier: '`foo`' - keyword: ALGORITHM - comparison_operator: raw_comparison_operator: '=' - keyword: COPY - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: - quoted_identifier: '`table1`' - dot: . - quoted_identifier: '`foo`' - keyword: ALGORITHM - keyword: DEFAULT - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: - quoted_identifier: '`table1`' - dot: . - quoted_identifier: '`foo`' - keyword: ALGORITHM - keyword: INPLACE - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: - quoted_identifier: '`table1`' - dot: . - quoted_identifier: '`foo`' - keyword: ALGORITHM - keyword: COPY - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/drop_index_with_lock.sql000066400000000000000000000005441503426445100263070ustar00rootroot00000000000000DROP INDEX `test` ON `table1` LOCK = DEFAULT; DROP INDEX `test` ON `table1` LOCK = NONE; DROP INDEX `test` ON `table1` LOCK = SHARED; DROP INDEX `test` ON `table1` LOCK = EXCLUSIVE; DROP INDEX `test` ON `table1` LOCK DEFAULT; DROP INDEX `test` ON `table1` LOCK NONE; DROP INDEX `test` ON `table1` LOCK SHARED; DROP INDEX `test` ON `table1` LOCK EXCLUSIVE; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/drop_index_with_lock.yml000066400000000000000000000056161503426445100263160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0c91a2276d5d611e727906a9c2b014e808dc5fb97fe6f238268b7fed429adb32 file: - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: quoted_identifier: '`table1`' - keyword: LOCK - comparison_operator: raw_comparison_operator: '=' - keyword: DEFAULT - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: quoted_identifier: '`table1`' - keyword: LOCK - comparison_operator: raw_comparison_operator: '=' - keyword: NONE - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: quoted_identifier: '`table1`' - keyword: LOCK - comparison_operator: raw_comparison_operator: '=' - keyword: SHARED - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: quoted_identifier: '`table1`' - keyword: LOCK - comparison_operator: raw_comparison_operator: '=' - keyword: EXCLUSIVE - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: quoted_identifier: '`table1`' - keyword: LOCK - keyword: DEFAULT - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: quoted_identifier: '`table1`' - keyword: LOCK - keyword: NONE - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: quoted_identifier: '`table1`' - keyword: LOCK - keyword: SHARED - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: quoted_identifier: '`table1`' - keyword: LOCK - keyword: EXCLUSIVE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/drop_prepare.sql000066400000000000000000000000241503426445100245640ustar00rootroot00000000000000DROP PREPARE dynam; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/drop_prepare.yml000066400000000000000000000010221503426445100245650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6fb0ffbafdc168dc91f61b1dc45a6ae2bb23726da0391e6a2bb85fb0c72e481d file: statement: deallocate_segment: - keyword: DROP - keyword: PREPARE - naked_identifier: dynam statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/drop_procedure.sql000066400000000000000000000000521503426445100251170ustar00rootroot00000000000000DROP PROCEDURE IF EXISTS `testprocedure`; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/drop_procedure.yml000066400000000000000000000011511503426445100251220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6226dad29f08fa6bc945f7ffbd3ee824cfb9513fec02f752efccc635d8bfc0e6 file: statement: drop_procedure_statement: - keyword: DROP - keyword: PROCEDURE - keyword: IF - keyword: EXISTS - object_reference: quoted_identifier: '`testprocedure`' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/drop_table.sql000066400000000000000000000002141503426445100242160ustar00rootroot00000000000000DROP TEMPORARY TABLE IF EXISTS t; DROP TEMPORARY TABLE IF EXISTS t, t2; DROP TABLE IF EXISTS t RESTRICT; DROP TABLE IF EXISTS t CASCADE; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/drop_table.yml000066400000000000000000000025041503426445100242240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 079d162da6241fd53a7106c843d2c5b885c9fa93adf3e4e1fedc73f706bc00c4 file: - statement: drop_table_statement: - keyword: DROP - keyword: TEMPORARY - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: t - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TEMPORARY - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: t - comma: ',' - table_reference: naked_identifier: t2 - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: t - keyword: RESTRICT - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: t - keyword: CASCADE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/drop_trigger.sql000066400000000000000000000001511503426445100245720ustar00rootroot00000000000000DROP TRIGGER trigger_name; DROP TRIGGER schema_name.trigger_name; DROP TRIGGER IF EXISTS trigger_name; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/drop_trigger.yml000066400000000000000000000017231503426445100246020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3ea2814154a2f377dfa41e981612ca02ec575bbb9384d4356a6f87d69d8367df file: - statement: drop_trigger: - keyword: DROP - keyword: TRIGGER - trigger_reference: naked_identifier: trigger_name - statement_terminator: ; - statement: drop_trigger: - keyword: DROP - keyword: TRIGGER - trigger_reference: - naked_identifier: schema_name - dot: . - naked_identifier: trigger_name - statement_terminator: ; - statement: drop_trigger: - keyword: DROP - keyword: TRIGGER - keyword: IF - keyword: EXISTS - trigger_reference: naked_identifier: trigger_name - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/execute_prepared_stmt.sql000066400000000000000000000000161503426445100264760ustar00rootroot00000000000000execute test; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/execute_prepared_stmt.yml000066400000000000000000000007721503426445100265110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 645d3d2f6b6a99f0b56f9e61558eddbd807982b88dd4b5a85926d08ad6fde973 file: statement: execute_segment: keyword: execute naked_identifier: test statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/execute_prepared_stmt_using.sql000066400000000000000000000000321503426445100277010ustar00rootroot00000000000000execute test using @test; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/execute_prepared_stmt_using.yml000066400000000000000000000010471503426445100277120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c84c2b918a06a90ade5b2628eaf69ac7ee1220e235e947be52d9663ea1717e60 file: statement: execute_segment: - keyword: execute - naked_identifier: test - keyword: using - variable: '@test' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/execute_prepared_stmt_using_multiple_variable.sql000066400000000000000000000000421503426445100334620ustar00rootroot00000000000000execute test using @test, @test1; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/execute_prepared_stmt_using_multiple_variable.yml000066400000000000000000000011211503426445100334630ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e883899cefef9dcaa1775b3ebae0ba07aa11a5b9fb3c10843c74b452f7e9921c file: statement: execute_segment: - keyword: execute - naked_identifier: test - keyword: using - variable: '@test' - comma: ',' - variable: '@test1' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/explain.sql000066400000000000000000000002271503426445100235470ustar00rootroot00000000000000explain select 1; explain update tbl set foo = 1 where bar = 2; explain delete from tbl where foo = 1; explain insert into tbl (col1) values (123); sqlfluff-3.4.2/test/fixtures/dialects/mariadb/explain.yml000066400000000000000000000045331503426445100235550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 77b5d16c2cddca62f5fb8d333728260a2f33be4f0e46cecfdc10d68bf5e4d542 file: - statement: explain_statement: keyword: explain select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: explain_statement: keyword: explain update_statement: keyword: update table_reference: naked_identifier: tbl set_clause_list: keyword: set set_clause: column_reference: naked_identifier: foo comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' where_clause: keyword: where expression: column_reference: naked_identifier: bar comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - statement_terminator: ; - statement: explain_statement: keyword: explain delete_statement: keyword: delete from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl where_clause: keyword: where expression: column_reference: naked_identifier: foo comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: explain_statement: keyword: explain insert_statement: - keyword: insert - keyword: into - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - values_clause: keyword: values bracketed: start_bracket: ( numeric_literal: '123' end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/fetch.sql000066400000000000000000000000331503426445100231730ustar00rootroot00000000000000fetch curcursor into test; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/fetch.yml000066400000000000000000000010531503426445100232000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ffb648d6082513210891e9d343ab35d75b56ddc97fee1356ec805b821eb52fbc file: statement: cursor_fetch_segment: - keyword: fetch - naked_identifier: curcursor - keyword: into - variable: test statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/fetch_from.sql000066400000000000000000000000401503426445100242140ustar00rootroot00000000000000fetch from curcursor into test; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/fetch_from.yml000066400000000000000000000010771503426445100242310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ac2545f40f18314609db52b54bcf5af85eeb4a464c012915e157261c76d0acdb file: statement: cursor_fetch_segment: - keyword: fetch - keyword: from - naked_identifier: curcursor - keyword: into - variable: test statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/fetch_multiple.sql000066400000000000000000000000421503426445100251060ustar00rootroot00000000000000fetch curcursor into test, test2; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/fetch_multiple.yml000066400000000000000000000011221503426445100251100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b8f2e64f6c4bfeb9e324e9f8fbf9ab2024c805ba70341e9763ca33556c367cbc file: statement: cursor_fetch_segment: - keyword: fetch - naked_identifier: curcursor - keyword: into - variable: test - comma: ',' - variable: test2 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/fetch_next_from.sql000066400000000000000000000000451503426445100252570ustar00rootroot00000000000000fetch next from curcursor into test; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/fetch_next_from.yml000066400000000000000000000011231503426445100252570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5799de98d4fbb26b52d336da9b39a4a58ecbce7623c1a8aefa4ccf02ba3ec0bc file: statement: cursor_fetch_segment: - keyword: fetch - keyword: next - keyword: from - naked_identifier: curcursor - keyword: into - variable: test statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/fetch_session.sql000066400000000000000000000000341503426445100247370ustar00rootroot00000000000000fetch curcursor into @test; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/fetch_session.yml000066400000000000000000000010561503426445100247460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0282fb848f6361a6299ff7b64ab47fb43966975f758c1e30e5ed1f55e66ee7ff file: statement: cursor_fetch_segment: - keyword: fetch - naked_identifier: curcursor - keyword: into - variable: '@test' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/fetch_session_multiple.sql000066400000000000000000000000441503426445100266530ustar00rootroot00000000000000fetch curcursor into @test, @test2; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/fetch_session_multiple.yml000066400000000000000000000011301503426445100266520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b21f217493fdc862ebf564013c0b16d8d4139163bc5a9910cb3f586f45c04114 file: statement: cursor_fetch_segment: - keyword: fetch - naked_identifier: curcursor - keyword: into - variable: '@test' - comma: ',' - variable: '@test2' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/flush.sql000066400000000000000000000007111503426445100232260ustar00rootroot00000000000000FLUSH LOGS; FLUSH NO_WRITE_TO_BINLOG BINARY LOGS, ENGINE LOGS, ERROR LOGS, GENERAL LOGS, HOSTS; FLUSH LOCAL HOSTS, QUERY CACHE, TABLE_STATISTICS, INDEX_STATISTICS, USER_STATISTICS; FLUSH STATUS; FLUSH RELAY LOGS my_channel; FLUSH RELAY LOGS FOR CHANNEL my_channel; FLUSH TABLES; FLUSH TABLES WITH READ LOCK; FLUSH TABLES table1; FLUSH TABLES table1, `foo`.`bar`; FLUSH TABLES table1, `foo`.`bar` WITH READ LOCK; FLUSH TABLES table1, `foo`.`bar` FOR EXPORT; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/flush.yml000066400000000000000000000060141503426445100232320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8f22cb8fd34961fa5b7befb14683818ae9bf139e7d2c24e85db74c4a4731e714 file: - statement: flush_statement: - keyword: FLUSH - keyword: LOGS - statement_terminator: ; - statement: flush_statement: - keyword: FLUSH - keyword: NO_WRITE_TO_BINLOG - keyword: BINARY - keyword: LOGS - comma: ',' - keyword: ENGINE - keyword: LOGS - comma: ',' - keyword: ERROR - keyword: LOGS - comma: ',' - keyword: GENERAL - keyword: LOGS - comma: ',' - keyword: HOSTS - statement_terminator: ; - statement: flush_statement: - keyword: FLUSH - keyword: LOCAL - keyword: HOSTS - comma: ',' - keyword: QUERY - keyword: CACHE - comma: ',' - keyword: TABLE_STATISTICS - comma: ',' - keyword: INDEX_STATISTICS - comma: ',' - keyword: USER_STATISTICS - statement_terminator: ; - statement: flush_statement: - keyword: FLUSH - keyword: STATUS - statement_terminator: ; - statement: flush_statement: - keyword: FLUSH - keyword: RELAY - keyword: LOGS - object_reference: naked_identifier: my_channel - statement_terminator: ; - statement: flush_statement: - keyword: FLUSH - keyword: RELAY - keyword: LOGS - keyword: FOR - keyword: CHANNEL - object_reference: naked_identifier: my_channel - statement_terminator: ; - statement: flush_statement: - keyword: FLUSH - keyword: TABLES - statement_terminator: ; - statement: flush_statement: - keyword: FLUSH - keyword: TABLES - keyword: WITH - keyword: READ - keyword: LOCK - statement_terminator: ; - statement: flush_statement: - keyword: FLUSH - keyword: TABLES - table_reference: naked_identifier: table1 - statement_terminator: ; - statement: flush_statement: - keyword: FLUSH - keyword: TABLES - table_reference: naked_identifier: table1 - comma: ',' - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - statement_terminator: ; - statement: flush_statement: - keyword: FLUSH - keyword: TABLES - table_reference: naked_identifier: table1 - comma: ',' - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - keyword: WITH - keyword: READ - keyword: LOCK - statement_terminator: ; - statement: flush_statement: - keyword: FLUSH - keyword: TABLES - table_reference: naked_identifier: table1 - comma: ',' - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - keyword: FOR - keyword: EXPORT - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/force_index.sql000066400000000000000000000000601503426445100243670ustar00rootroot00000000000000SELECT * FROM onetable FORCE INDEX (idx_index); sqlfluff-3.4.2/test/fixtures/dialects/mariadb/force_index.yml000066400000000000000000000020121503426445100243700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 76f81441b5f5b820ccbdc7248648433a05eac5d802017015e52e7d2adbfe9444 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: onetable index_hint_clause: - keyword: FORCE - keyword: INDEX - bracketed: start_bracket: ( object_reference: naked_identifier: idx_index end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/force_index_for_group_by.sql000066400000000000000000000000661503426445100271510ustar00rootroot00000000000000SELECT * FROM onetable FORCE INDEX FOR GROUP BY (i2); sqlfluff-3.4.2/test/fixtures/dialects/mariadb/force_index_for_group_by.yml000066400000000000000000000021031503426445100271450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 87773b30b37f699859714e573179e58c6f5dafb6191d5538e681e5831418af67 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: onetable index_hint_clause: - keyword: FORCE - keyword: INDEX - keyword: FOR - keyword: GROUP - keyword: BY - bracketed: start_bracket: ( object_reference: naked_identifier: i2 end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/force_index_for_join.sql000066400000000000000000000000621503426445100262560ustar00rootroot00000000000000SELECT * FROM onetable FORCE INDEX FOR JOIN (i2); sqlfluff-3.4.2/test/fixtures/dialects/mariadb/force_index_for_join.yml000066400000000000000000000020561503426445100262650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b6b94cfaf91b378bf96312ae5b473616021fb82e21522046a4e9543d1191e027 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: onetable index_hint_clause: - keyword: FORCE - keyword: INDEX - keyword: FOR - keyword: JOIN - bracketed: start_bracket: ( object_reference: naked_identifier: i2 end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/force_index_for_order_by.sql000066400000000000000000000000661503426445100271300ustar00rootroot00000000000000SELECT * FROM onetable FORCE INDEX FOR ORDER BY (i2); sqlfluff-3.4.2/test/fixtures/dialects/mariadb/force_index_for_order_by.yml000066400000000000000000000021031503426445100271240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d0c09a9cf14085e00ca694807253593f891828a1f101948491a5ad7083d4546e file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: onetable index_hint_clause: - keyword: FORCE - keyword: INDEX - keyword: FOR - keyword: ORDER - keyword: BY - bracketed: start_bracket: ( object_reference: naked_identifier: i2 end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/force_index_left_join.sql000066400000000000000000000001671503426445100264300ustar00rootroot00000000000000SELECT onetable.f1, twotable.f1 FROM onetable left join twotable FORCE INDEX (idx_index) on onetable.f1 = twotable.f1; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/force_index_left_join.yml000066400000000000000000000035031503426445100264270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7156f479a87ebed7b8e34b4cd7016aadd3dab7e0bd6b0ad14bc7d4795fd2de90 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: onetable - dot: . - naked_identifier: f1 - comma: ',' - select_clause_element: column_reference: - naked_identifier: twotable - dot: . - naked_identifier: f1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: onetable join_clause: - keyword: left - keyword: join - from_expression_element: table_expression: table_reference: naked_identifier: twotable index_hint_clause: - keyword: FORCE - keyword: INDEX - bracketed: start_bracket: ( object_reference: naked_identifier: idx_index end_bracket: ) - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: onetable - dot: . - naked_identifier: f1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: twotable - dot: . - naked_identifier: f1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/force_key.sql000066400000000000000000000000561503426445100240550ustar00rootroot00000000000000SELECT * FROM onetable FORCE KEY (idx_index); sqlfluff-3.4.2/test/fixtures/dialects/mariadb/force_key.yml000066400000000000000000000020101503426445100240470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4141b5f48dff30ccb10e63d5fffe11c7ba21cecf1f39f273af9cd2de9e2ef10f file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: onetable index_hint_clause: - keyword: FORCE - keyword: KEY - bracketed: start_bracket: ( object_reference: naked_identifier: idx_index end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/function_comment.sql000066400000000000000000000001571503426445100254600ustar00rootroot00000000000000CREATE FUNCTION `testfunction`(var1 int) RETURNS longtext COMMENT 'this is a comment' DETERMINISTIC BEGIN END~ sqlfluff-3.4.2/test/fixtures/dialects/mariadb/function_comment.yml000066400000000000000000000022151503426445100254570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f07d63ae8143264331610358ea3a6e91b3340a65e311f82bffdc8d05dc5f1438 file: statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: quoted_identifier: '`testfunction`' - function_parameter_list: bracketed: start_bracket: ( parameter: var1 data_type: data_type_identifier: int end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: longtext - comment_clause: keyword: COMMENT quoted_literal: "'this is a comment'" - characteristic_statement: keyword: DETERMINISTIC - function_definition: transaction_statement: keyword: BEGIN statement: transaction_statement: keyword: END statement_terminator: '~' sqlfluff-3.4.2/test/fixtures/dialects/mariadb/function_definer.sql000066400000000000000000000001501503426445100254230ustar00rootroot00000000000000CREATE DEFINER=`test`@`%` FUNCTION `testfunction`() RETURNS longtext DETERMINISTIC BEGIN SELECT 1; END~ sqlfluff-3.4.2/test/fixtures/dialects/mariadb/function_definer.yml000066400000000000000000000026171503426445100254370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 63402a8d737600b4ceec102c8982aca0423da4d2e24e7cd7893a8d394c15091c file: - statement: create_function_statement: - keyword: CREATE - definer_segment: keyword: DEFINER comparison_operator: raw_comparison_operator: '=' role_reference: - quoted_identifier: '`test`' - at_sign_literal: '@' - quoted_identifier: '`%`' - keyword: FUNCTION - function_name: quoted_identifier: '`testfunction`' - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: longtext - characteristic_statement: keyword: DETERMINISTIC - function_definition: transaction_statement: keyword: BEGIN statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: END - statement_terminator: '~' sqlfluff-3.4.2/test/fixtures/dialects/mariadb/function_modifies_sql.sql000066400000000000000000000001521503426445100264670ustar00rootroot00000000000000CREATE FUNCTION `add`(test int) RETURNS longtext DETERMINISTIC MODIFIES SQL DATA BEGIN SELECT 1 + 2; END~ sqlfluff-3.4.2/test/fixtures/dialects/mariadb/function_modifies_sql.yml000066400000000000000000000026341503426445100265000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c950f8fdbb6c66257b3ecd2a33e1db18ef6b958973246fbc0e17486c6e30758e file: - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: quoted_identifier: '`add`' - function_parameter_list: bracketed: start_bracket: ( parameter: test data_type: data_type_identifier: int end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: longtext - characteristic_statement: - keyword: DETERMINISTIC - keyword: MODIFIES - keyword: SQL - keyword: DATA - function_definition: transaction_statement: keyword: BEGIN statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - statement_terminator: ; - statement: transaction_statement: keyword: END - statement_terminator: '~' sqlfluff-3.4.2/test/fixtures/dialects/mariadb/function_no_sql.sql000066400000000000000000000001371503426445100253070ustar00rootroot00000000000000CREATE FUNCTION `add`(test int) RETURNS longtext DETERMINISTIC NO SQL BEGIN SELECT 1 + 2; END~ sqlfluff-3.4.2/test/fixtures/dialects/mariadb/function_no_sql.yml000066400000000000000000000026021503426445100253100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7cf6b2f6629057890ce653e7f421bdab2d9b541afed3344da33927c25e3f2d4e file: - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: quoted_identifier: '`add`' - function_parameter_list: bracketed: start_bracket: ( parameter: test data_type: data_type_identifier: int end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: longtext - characteristic_statement: - keyword: DETERMINISTIC - keyword: 'NO' - keyword: SQL - function_definition: transaction_statement: keyword: BEGIN statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - statement_terminator: ; - statement: transaction_statement: keyword: END - statement_terminator: '~' sqlfluff-3.4.2/test/fixtures/dialects/mariadb/function_notdeterministic.sql000066400000000000000000000001341503426445100273750ustar00rootroot00000000000000CREATE FUNCTION `add`(test int) RETURNS longtext NOT DETERMINISTIC BEGIN SELECT 1 + 2; END~ sqlfluff-3.4.2/test/fixtures/dialects/mariadb/function_notdeterministic.yml000066400000000000000000000025541503426445100274070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8a419b4154858e8c36166a450bc605ade73c8c8bd9cf192c2581831c5ba5213d file: - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: quoted_identifier: '`add`' - function_parameter_list: bracketed: start_bracket: ( parameter: test data_type: data_type_identifier: int end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: longtext - characteristic_statement: - keyword: NOT - keyword: DETERMINISTIC - function_definition: transaction_statement: keyword: BEGIN statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - statement_terminator: ; - statement: transaction_statement: keyword: END - statement_terminator: '~' sqlfluff-3.4.2/test/fixtures/dialects/mariadb/function_reads_sql.sql000066400000000000000000000001471503426445100257720ustar00rootroot00000000000000CREATE FUNCTION `add`(test int) RETURNS longtext DETERMINISTIC READS SQL DATA BEGIN SELECT 1 + 2; END~ sqlfluff-3.4.2/test/fixtures/dialects/mariadb/function_reads_sql.yml000066400000000000000000000026311503426445100257740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4f21236c7ba039e1347af4a274638f535a951ab5775f1bd9ced459f5a19dd5d8 file: - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: quoted_identifier: '`add`' - function_parameter_list: bracketed: start_bracket: ( parameter: test data_type: data_type_identifier: int end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: longtext - characteristic_statement: - keyword: DETERMINISTIC - keyword: READS - keyword: SQL - keyword: DATA - function_definition: transaction_statement: keyword: BEGIN statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - statement_terminator: ; - statement: transaction_statement: keyword: END - statement_terminator: '~' sqlfluff-3.4.2/test/fixtures/dialects/mariadb/function_return.sql000066400000000000000000000001411503426445100253260ustar00rootroot00000000000000CREATE FUNCTION `testfunction`(var1 int) RETURNS int DETERMINISTIC BEGIN RETURN (var1 + 1); END~ sqlfluff-3.4.2/test/fixtures/dialects/mariadb/function_return.yml000066400000000000000000000026701503426445100253410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f900c91d9372a026ebd6e47b33e72d76158191583040ff73d52264c797df7317 file: - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: quoted_identifier: '`testfunction`' - function_parameter_list: bracketed: start_bracket: ( parameter: var1 data_type: data_type_identifier: int end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: int - characteristic_statement: keyword: DETERMINISTIC - function_definition: transaction_statement: keyword: BEGIN statement: return_statement: keyword: RETURN expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: var1 binary_operator: + numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: transaction_statement: keyword: END - statement_terminator: '~' sqlfluff-3.4.2/test/fixtures/dialects/mariadb/function_sql_security_definer.sql000066400000000000000000000001551503426445100302360ustar00rootroot00000000000000CREATE FUNCTION `add`(test int) RETURNS longtext DETERMINISTIC SQL SECURITY DEFINER BEGIN SELECT 1 + 2; END~ sqlfluff-3.4.2/test/fixtures/dialects/mariadb/function_sql_security_definer.yml000066400000000000000000000026371503426445100302470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 74abdb518948c19065ad637d90fbcfff31c9b6cfde82cf79d4586f4605f9a2a6 file: - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: quoted_identifier: '`add`' - function_parameter_list: bracketed: start_bracket: ( parameter: test data_type: data_type_identifier: int end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: longtext - characteristic_statement: - keyword: DETERMINISTIC - keyword: SQL - keyword: SECURITY - keyword: DEFINER - function_definition: transaction_statement: keyword: BEGIN statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - statement_terminator: ; - statement: transaction_statement: keyword: END - statement_terminator: '~' sqlfluff-3.4.2/test/fixtures/dialects/mariadb/function_sql_security_invoker.sql000066400000000000000000000001551503426445100302770ustar00rootroot00000000000000CREATE FUNCTION `add`(test int) RETURNS longtext DETERMINISTIC SQL SECURITY INVOKER BEGIN SELECT 1 + 2; END~ sqlfluff-3.4.2/test/fixtures/dialects/mariadb/function_sql_security_invoker.yml000066400000000000000000000026371503426445100303100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c69f593c8af3fc5de0f6d04a417a91f5c128e29bb2d4cde483edd95f1799b39b file: - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: quoted_identifier: '`add`' - function_parameter_list: bracketed: start_bracket: ( parameter: test data_type: data_type_identifier: int end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: longtext - characteristic_statement: - keyword: DETERMINISTIC - keyword: SQL - keyword: SECURITY - keyword: INVOKER - function_definition: transaction_statement: keyword: BEGIN statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - statement_terminator: ; - statement: transaction_statement: keyword: END - statement_terminator: '~' sqlfluff-3.4.2/test/fixtures/dialects/mariadb/get_diagnostics_condition_info_local_variable.sql000066400000000000000000000000621503426445100333520ustar00rootroot00000000000000GET DIAGNOSTICS CONDITION 1 _test = CLASS_ORIGIN; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/get_diagnostics_condition_info_local_variable.yml000066400000000000000000000012331503426445100333550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f1f183ac1cbee5d2382d84afc9c8151c8037a58872cbf2315d25442242ae7a3e file: statement: get_diagnostics_segment: - keyword: GET - keyword: DIAGNOSTICS - keyword: CONDITION - variable: '1' - variable: _test - comparison_operator: raw_comparison_operator: '=' - keyword: CLASS_ORIGIN statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/get_diagnostics_condition_info_multiple_variable.sql000066400000000000000000000001061503426445100341120ustar00rootroot00000000000000GET DIAGNOSTICS CONDITION 1 _test = CLASS_ORIGIN, @test = TABLE_NAME; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/get_diagnostics_condition_info_multiple_variable.yml000066400000000000000000000014361503426445100341230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a774db06cd2a028f74beaf8e4fbc800bf95454357357782ca3dad888a9580e8b file: statement: get_diagnostics_segment: - keyword: GET - keyword: DIAGNOSTICS - keyword: CONDITION - variable: '1' - variable: _test - comparison_operator: raw_comparison_operator: '=' - keyword: CLASS_ORIGIN - comma: ',' - variable: '@test' - comparison_operator: raw_comparison_operator: '=' - keyword: TABLE_NAME statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/get_diagnostics_condition_info_session_variable.sql000066400000000000000000000000621503426445100337430ustar00rootroot00000000000000GET DIAGNOSTICS CONDITION 1 @test = CLASS_ORIGIN; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/get_diagnostics_condition_info_session_variable.yml000066400000000000000000000012351503426445100337500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 57cdf831e601b7330259c2fb2a9896682094508706204a3dfc889ab5ee7de24d file: statement: get_diagnostics_segment: - keyword: GET - keyword: DIAGNOSTICS - keyword: CONDITION - variable: '1' - variable: '@test' - comparison_operator: raw_comparison_operator: '=' - keyword: CLASS_ORIGIN statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/get_diagnostics_condition_local_variable.sql000066400000000000000000000000411503426445100323340ustar00rootroot00000000000000GET DIAGNOSTICS CONDITION _test; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/get_diagnostics_condition_local_variable.yml000066400000000000000000000010531503426445100323420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 42bce6b496e8e1802b7fb8dd2b7d3869ae2de167e39670d4b0d24007814e9441 file: statement: get_diagnostics_segment: - keyword: GET - keyword: DIAGNOSTICS - keyword: CONDITION - variable: _test statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/get_diagnostics_condition_numeric.sql000066400000000000000000000000351503426445100310420ustar00rootroot00000000000000GET DIAGNOSTICS CONDITION 1; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/get_diagnostics_condition_numeric.yml000066400000000000000000000010511503426445100310430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b7dd3117703135613c7197d4be59c7a2de1398f0d20291fe13aaa129cb5df786 file: statement: get_diagnostics_segment: - keyword: GET - keyword: DIAGNOSTICS - keyword: CONDITION - variable: '1' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/get_diagnostics_condition_session_variable.sql000066400000000000000000000000411503426445100327250ustar00rootroot00000000000000GET DIAGNOSTICS CONDITION @test; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/get_diagnostics_condition_session_variable.yml000066400000000000000000000010551503426445100327350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ac7218df685e13ae13347608b37dce89733405707037b02020f696a4b448ed6f file: statement: get_diagnostics_segment: - keyword: GET - keyword: DIAGNOSTICS - keyword: CONDITION - variable: '@test' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/get_diagnostics_number.sql000066400000000000000000000000511503426445100266200ustar00rootroot00000000000000GET DIAGNOSTICS @a = NUMBER CONDITION 1; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/get_diagnostics_number.yml000066400000000000000000000012241503426445100266250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 796518d65a6e3a9acb2574ac1162af0a4b26e7801d0169c1912a973abf831243 file: statement: get_diagnostics_segment: - keyword: GET - keyword: DIAGNOSTICS - variable: '@a' - comparison_operator: raw_comparison_operator: '=' - keyword: NUMBER - keyword: CONDITION - variable: '1' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/get_diagnostics_row_count.sql000066400000000000000000000000541503426445100273520ustar00rootroot00000000000000GET DIAGNOSTICS @a = ROW_COUNT CONDITION 1; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/get_diagnostics_row_count.yml000066400000000000000000000012271503426445100273570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c2e7f5ba8bf71309f0b86e1e2945924f9cc6523a1f063b04a44cd734b1213f7c file: statement: get_diagnostics_segment: - keyword: GET - keyword: DIAGNOSTICS - variable: '@a' - comparison_operator: raw_comparison_operator: '=' - keyword: ROW_COUNT - keyword: CONDITION - variable: '1' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/grant.sql000066400000000000000000000013721503426445100232240ustar00rootroot00000000000000GRANT INSERT, UPDATE, DELETE, SELECT, REFERENCES ON prj_table TO prj_svc; GRANT INSERT, UPDATE, DELETE, SELECT, REFERENCES ON prj_table TO 'prj_svc'; GRANT INSERT, UPDATE, DELETE, SELECT, REFERENCES ON prj_table TO "prj_svc"; GRANT INSERT, UPDATE, DELETE, SELECT, REFERENCES ON prj_table TO `prj_svc`; GRANT INSERT, UPDATE, DELETE, SELECT, REFERENCES ON prj_table TO 'prj_svc'@'%'; GRANT INSERT, UPDATE, DELETE, SELECT, REFERENCES ON prj_table TO "prj_svc"@"%"; GRANT INSERT, UPDATE, DELETE, SELECT, REFERENCES ON prj_table TO `prj_svc`@`%`; GRANT INSERT, UPDATE, DELETE, SELECT, REFERENCES ON prj_table TO `prj_svc` @`%`; GRANT ALL ON db1.* TO 'prj_svc'@'%'; GRANT ALL PRIVILEGES ON db1.* TO 'prj_svc'@'localhost'; GRANT ALL PRIVILEGES ON *.* TO 'prj_svc'@'%'; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/grant.yml000066400000000000000000000117741503426445100232350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: df855ae682e4d1b57088f24c78ffb933010ac6eb4fd31fa7a6fc6f6f6466ee06 file: - statement: access_statement: - keyword: GRANT - keyword: INSERT - comma: ',' - keyword: UPDATE - comma: ',' - keyword: DELETE - comma: ',' - keyword: SELECT - comma: ',' - keyword: REFERENCES - keyword: 'ON' - object_reference: naked_identifier: prj_table - keyword: TO - role_reference: naked_identifier: prj_svc - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: INSERT - comma: ',' - keyword: UPDATE - comma: ',' - keyword: DELETE - comma: ',' - keyword: SELECT - comma: ',' - keyword: REFERENCES - keyword: 'ON' - object_reference: naked_identifier: prj_table - keyword: TO - role_reference: quoted_identifier: "'prj_svc'" - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: INSERT - comma: ',' - keyword: UPDATE - comma: ',' - keyword: DELETE - comma: ',' - keyword: SELECT - comma: ',' - keyword: REFERENCES - keyword: 'ON' - object_reference: naked_identifier: prj_table - keyword: TO - role_reference: quoted_literal: '"prj_svc"' - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: INSERT - comma: ',' - keyword: UPDATE - comma: ',' - keyword: DELETE - comma: ',' - keyword: SELECT - comma: ',' - keyword: REFERENCES - keyword: 'ON' - object_reference: naked_identifier: prj_table - keyword: TO - role_reference: quoted_identifier: '`prj_svc`' - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: INSERT - comma: ',' - keyword: UPDATE - comma: ',' - keyword: DELETE - comma: ',' - keyword: SELECT - comma: ',' - keyword: REFERENCES - keyword: 'ON' - object_reference: naked_identifier: prj_table - keyword: TO - role_reference: - quoted_identifier: "'prj_svc'" - at_sign_literal: '@' - quoted_identifier: "'%'" - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: INSERT - comma: ',' - keyword: UPDATE - comma: ',' - keyword: DELETE - comma: ',' - keyword: SELECT - comma: ',' - keyword: REFERENCES - keyword: 'ON' - object_reference: naked_identifier: prj_table - keyword: TO - role_reference: - quoted_literal: '"prj_svc"' - at_sign_literal: '@' - quoted_literal: '"%"' - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: INSERT - comma: ',' - keyword: UPDATE - comma: ',' - keyword: DELETE - comma: ',' - keyword: SELECT - comma: ',' - keyword: REFERENCES - keyword: 'ON' - object_reference: naked_identifier: prj_table - keyword: TO - role_reference: - quoted_identifier: '`prj_svc`' - at_sign_literal: '@' - quoted_identifier: '`%`' - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: INSERT - comma: ',' - keyword: UPDATE - comma: ',' - keyword: DELETE - comma: ',' - keyword: SELECT - comma: ',' - keyword: REFERENCES - keyword: 'ON' - object_reference: naked_identifier: prj_table - keyword: TO - role_reference: - quoted_identifier: '`prj_svc`' - at_sign_literal: '@' - quoted_identifier: '`%`' - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: ALL - keyword: 'ON' - wildcard_identifier: naked_identifier: db1 dot: . star: '*' - keyword: TO - role_reference: - quoted_identifier: "'prj_svc'" - at_sign_literal: '@' - quoted_identifier: "'%'" - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: ALL - keyword: PRIVILEGES - keyword: 'ON' - wildcard_identifier: naked_identifier: db1 dot: . star: '*' - keyword: TO - role_reference: - quoted_identifier: "'prj_svc'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: ALL - keyword: PRIVILEGES - keyword: 'ON' - wildcard_identifier: - star: '*' - dot: . - star: '*' - keyword: TO - role_reference: - quoted_identifier: "'prj_svc'" - at_sign_literal: '@' - quoted_identifier: "'%'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/group_by.sql000066400000000000000000000007271503426445100237420ustar00rootroot00000000000000SELECT a, b FROM foo GROUP BY a; SELECT a, b FROM foo GROUP BY a,b; SELECT a, b FROM foo GROUP BY a ASC; SELECT a, b FROM foo GROUP BY a DESC; SELECT a, b FROM foo GROUP BY a WITH ROLLUP; SELECT a, b FROM foo GROUP BY a ASC WITH ROLLUP; SELECT a, b FROM foo GROUP BY a DESC WITH ROLLUP; SELECT a, b FROM foo GROUP BY a ASC, b DESC WITH ROLLUP; SELECT a, b FROM foo GROUP BY a WITH ROLLUP ORDER BY a; SELECT a, b FROM foo GROUP BY a ASC WITH ROLLUP HAVING a > 0; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/group_by.yml000066400000000000000000000171571503426445100237510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 61985073946a0e97a4c3a5b353052e0ffadb3ec7f572ada7a235adf9179348ce file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: a - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: a - keyword: ASC - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: a - keyword: DESC - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: a - with_rollup_clause: - keyword: WITH - keyword: ROLLUP - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: a - keyword: ASC - with_rollup_clause: - keyword: WITH - keyword: ROLLUP - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: a - keyword: DESC - with_rollup_clause: - keyword: WITH - keyword: ROLLUP - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: a - keyword: ASC - comma: ',' - column_reference: naked_identifier: b - keyword: DESC - with_rollup_clause: - keyword: WITH - keyword: ROLLUP - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: a - with_rollup_clause: - keyword: WITH - keyword: ROLLUP orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: a - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: a - keyword: ASC - with_rollup_clause: - keyword: WITH - keyword: ROLLUP having_clause: keyword: HAVING expression: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: '>' numeric_literal: '0' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/handler_condition_name.sql000066400000000000000000000000741503426445100265720ustar00rootroot00000000000000DECLARE exit handler for conditionName begin select 1; end; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/handler_condition_name.yml000066400000000000000000000016371503426445100266020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 414ccaf762a7703b813181a3ad0d367889e5491629f899021b28dca8767b320d file: - statement: declare_statement: - keyword: DECLARE - keyword: exit - keyword: handler - keyword: for - naked_identifier: conditionName - statement: transaction_statement: keyword: begin statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: end - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/handler_error_code.sql000066400000000000000000000000631503426445100257250ustar00rootroot00000000000000DECLARE exit handler for 1051 begin select 1; end; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/handler_error_code.yml000066400000000000000000000016271503426445100257360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3d34172436001fa633eaf25d34177ba7a6c903cbd232b5a83f7146fca0964287 file: - statement: declare_statement: - keyword: DECLARE - keyword: exit - keyword: handler - keyword: for - numeric_literal: '1051' - statement: transaction_statement: keyword: begin statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: end - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/handler_not_found.sql000066400000000000000000000000701503426445100255730ustar00rootroot00000000000000DECLARE exit handler for not found begin select 1; end; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/handler_not_found.yml000066400000000000000000000016411503426445100256020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c939949a50ffc6352dc498e1144348bcb1ae7813f5e690feaa32fffe9abf39b0 file: - statement: declare_statement: - keyword: DECLARE - keyword: exit - keyword: handler - keyword: for - keyword: not - keyword: found - statement: transaction_statement: keyword: begin statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: end - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/handler_sqlstate.sql000066400000000000000000000000731503426445100254430ustar00rootroot00000000000000DECLARE exit handler for SQLSTATE '1' begin select 1; end; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/handler_sqlstate.yml000066400000000000000000000016551503426445100254540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: efc0bd7a7a41d0f364d01e8bd2477733f99322c5f713df571ae464635cc55893 file: - statement: declare_statement: - keyword: DECLARE - keyword: exit - keyword: handler - keyword: for - keyword: SQLSTATE - quoted_literal: "'1'" - statement: transaction_statement: keyword: begin statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: end - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/handler_sqlstate_value.sql000066400000000000000000000001011503426445100266270ustar00rootroot00000000000000DECLARE exit handler for SQLSTATE VALUE '1' begin select 1; end; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/handler_sqlstate_value.yml000066400000000000000000000017021503426445100266410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ada5a017cc63b573515dfe48d409c682a6e71ecca29d0cce9cea35e446085a83 file: - statement: declare_statement: - keyword: DECLARE - keyword: exit - keyword: handler - keyword: for - keyword: SQLSTATE - keyword: VALUE - quoted_literal: "'1'" - statement: transaction_statement: keyword: begin statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: end - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/handler_sqlwarning.sql000066400000000000000000000000711503426445100257660ustar00rootroot00000000000000DECLARE exit handler for sqlwarning begin select 1; end; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/handler_sqlwarning.yml000066400000000000000000000016231503426445100257740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ec2fecbc723f160082a61a9b6f8a2bca85e98f87c99cfc923e06a591905870c7 file: - statement: declare_statement: - keyword: DECLARE - keyword: exit - keyword: handler - keyword: for - keyword: sqlwarning - statement: transaction_statement: keyword: begin statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: end - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/help_statement.sql000066400000000000000000000001501503426445100251160ustar00rootroot00000000000000HELP 'contents'; HELP 'data types'; HELP 'ascii'; HELP 'create table'; HELP 'status'; HELP 'functions'; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/help_statement.yml000066400000000000000000000020701503426445100251230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 975fb491721232f31c36d1c6f435d0a640fd60e56b4014ae471e397267475c3b file: - statement: help_statement: keyword: HELP quoted_literal: "'contents'" - statement_terminator: ; - statement: help_statement: keyword: HELP quoted_literal: "'data types'" - statement_terminator: ; - statement: help_statement: keyword: HELP quoted_literal: "'ascii'" - statement_terminator: ; - statement: help_statement: keyword: HELP quoted_literal: "'create table'" - statement_terminator: ; - statement: help_statement: keyword: HELP quoted_literal: "'status'" - statement_terminator: ; - statement: help_statement: keyword: HELP quoted_literal: "'functions'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/hexadecimal_literal.sql000066400000000000000000000001361503426445100260660ustar00rootroot00000000000000SELECT X'01AF'; SELECT X'01af'; SELECT x'01AF'; SELECT x'01af'; SELECT 0x01AF; SELECT 0x01af; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/hexadecimal_literal.yml000066400000000000000000000026341503426445100260750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ef38518cd78f06a19aff28ba8e786d3ce515fb506666dddfb9614d1ec9a5b52a file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: "X'01AF'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: "X'01af'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: "x'01AF'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: "x'01af'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '0x01AF' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '0x01af' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/if.sql000066400000000000000000000000641503426445100225040ustar00rootroot00000000000000if (x = 0) then set @errmsg = ''; select 1; end if; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/if.yml000066400000000000000000000023311503426445100225050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: da9fd2e77d3348ffe9c07370381c002cd0968f76c1863adb19a260b7f9aa396a file: - statement: if_then_statement: - keyword: if - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' end_bracket: ) - keyword: then - statement: set_statement: keyword: set variable: '@errmsg' comparison_operator: raw_comparison_operator: '=' quoted_literal: "''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: if_then_statement: - keyword: end - keyword: if - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/if_else.sql000066400000000000000000000000711503426445100235120ustar00rootroot00000000000000if (x = 0) then set @errmsg = ''; select 1; else end if; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/if_else.yml000066400000000000000000000024411503426445100235170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 69efae2cc2e5a1387423945d404086dba866559c76178f3462f598157ce7bb77 file: - statement: if_then_statement: - keyword: if - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' end_bracket: ) - keyword: then - statement: set_statement: keyword: set variable: '@errmsg' comparison_operator: raw_comparison_operator: '=' quoted_literal: "''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: if_then_statement: keyword: else statement: if_then_statement: - keyword: end - keyword: if - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/if_elseif.sql000066400000000000000000000001461503426445100240340ustar00rootroot00000000000000if (x = 0) then set @errmsg = ''; select 1; elseif (x = 1) then set _test = 1; else select 2; end if; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/if_elseif.yml000066400000000000000000000040251503426445100240360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b03d179afec3edaf60243ad42bbd2ff4372a446e28fd033ec04c0b17e74adf7e file: - statement: if_then_statement: - keyword: if - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' end_bracket: ) - keyword: then - statement: set_statement: keyword: set variable: '@errmsg' comparison_operator: raw_comparison_operator: '=' quoted_literal: "''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: if_then_statement: - keyword: elseif - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' end_bracket: ) - keyword: then - statement: set_statement: keyword: set variable: _test comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: if_then_statement: keyword: else statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '2' - statement_terminator: ; - statement: if_then_statement: - keyword: end - keyword: if - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/if_multiple_expression.sql000066400000000000000000000001321503426445100266720ustar00rootroot00000000000000if ((select count(*) from table1) = 0 and x = 1) then set @errmsg = ''; select 1; end if; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/if_multiple_expression.yml000066400000000000000000000043241503426445100267030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e29aea27c8f69eaf0019d88861f60a0204978037b4d672f5d74daa3054b5db49 file: - statement: if_then_statement: - keyword: if - expression: bracketed: start_bracket: ( expression: - bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '0' - binary_operator: and - column_reference: naked_identifier: x - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' end_bracket: ) - keyword: then - statement: set_statement: keyword: set variable: '@errmsg' comparison_operator: raw_comparison_operator: '=' quoted_literal: "''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: if_then_statement: - keyword: end - keyword: if - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/if_nested.sql000066400000000000000000000001261503426445100240450ustar00rootroot00000000000000if (x = 0) then select 0; if (y = 1) then set @errmsg = ''; select 1; end if; end if; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/if_nested.yml000066400000000000000000000035421503426445100240540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b374d03f02f5c7eb800e9262fd5075764b7775db085d5eb2fe21e78c8d5ab96d file: - statement: if_then_statement: - keyword: if - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' end_bracket: ) - keyword: then - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '0' - statement_terminator: ; - statement: if_then_statement: - keyword: if - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: y comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' end_bracket: ) - keyword: then - statement: set_statement: keyword: set variable: '@errmsg' comparison_operator: raw_comparison_operator: '=' quoted_literal: "''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: if_then_statement: - keyword: end - keyword: if - statement_terminator: ; - statement: if_then_statement: - keyword: end - keyword: if - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/if_session_variable.sql000066400000000000000000000000601503426445100261100ustar00rootroot00000000000000if (@x = 0) then set @b = ''; select 1; end if; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/if_session_variable.yml000066400000000000000000000022571503426445100261240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fdd4b0c9117b6b228762f9b46873ba13b9207f58d2cdeb6a2ebadc2068dea99f file: - statement: if_then_statement: - keyword: if - expression: bracketed: start_bracket: ( expression: variable: '@x' comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' end_bracket: ) - keyword: then - statement: set_statement: keyword: set variable: '@b' comparison_operator: raw_comparison_operator: '=' quoted_literal: "''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: if_then_statement: - keyword: end - keyword: if - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/if_subquery_expression.sql000066400000000000000000000001201503426445100267130ustar00rootroot00000000000000if ((select count(*) from table1) = 0) then set @errmsg = ''; select 1; end if; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/if_subquery_expression.yml000066400000000000000000000040061503426445100267240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5626b33570f2e08309801628d9fd5dc612419586627f73721e2abb1adc1028cc file: - statement: if_then_statement: - keyword: if - expression: bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 end_bracket: ) comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' end_bracket: ) - keyword: then - statement: set_statement: keyword: set variable: '@errmsg' comparison_operator: raw_comparison_operator: '=' quoted_literal: "''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: if_then_statement: - keyword: end - keyword: if - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/ignore_index.sql000066400000000000000000000000521503426445100245550ustar00rootroot00000000000000SELECT * FROM onetable IGNORE INDEX (i2); sqlfluff-3.4.2/test/fixtures/dialects/mariadb/ignore_index.yml000066400000000000000000000020041503426445100245560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2e1f60338262df41c57f37eece235e168d74a57176116c55c9c443126c8373a5 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: onetable index_hint_clause: - keyword: IGNORE - keyword: INDEX - bracketed: start_bracket: ( object_reference: naked_identifier: i2 end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/ignore_index_for_group_by.sql000066400000000000000000000000671503426445100273370ustar00rootroot00000000000000SELECT * FROM onetable IGNORE INDEX FOR GROUP BY (i2); sqlfluff-3.4.2/test/fixtures/dialects/mariadb/ignore_index_for_group_by.yml000066400000000000000000000021041503426445100273330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 26e55b2b5d567db8e63bd7802f436aeec190ab9f4c94603820231c9c8026ae89 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: onetable index_hint_clause: - keyword: IGNORE - keyword: INDEX - keyword: FOR - keyword: GROUP - keyword: BY - bracketed: start_bracket: ( object_reference: naked_identifier: i2 end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/ignore_index_for_join.sql000066400000000000000000000000631503426445100264440ustar00rootroot00000000000000SELECT * FROM onetable IGNORE INDEX FOR JOIN (i2); sqlfluff-3.4.2/test/fixtures/dialects/mariadb/ignore_index_for_join.yml000066400000000000000000000020571503426445100264530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 60cc4d293f24f927ac5fc8ed7e486d7604260516c11b3fb241c6d9d7a2362f05 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: onetable index_hint_clause: - keyword: IGNORE - keyword: INDEX - keyword: FOR - keyword: JOIN - bracketed: start_bracket: ( object_reference: naked_identifier: i2 end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/ignore_index_for_order_by.sql000066400000000000000000000000671503426445100273160ustar00rootroot00000000000000SELECT * FROM onetable IGNORE INDEX FOR ORDER BY (i2); sqlfluff-3.4.2/test/fixtures/dialects/mariadb/ignore_index_for_order_by.yml000066400000000000000000000021041503426445100273120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7e4537df9e55c6973d4495dc1a329920562069731c0838927483119d8588b25e file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: onetable index_hint_clause: - keyword: IGNORE - keyword: INDEX - keyword: FOR - keyword: ORDER - keyword: BY - bracketed: start_bracket: ( object_reference: naked_identifier: i2 end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/ignore_key.sql000066400000000000000000000000501503426445100242340ustar00rootroot00000000000000SELECT * FROM onetable IGNORE KEY (i2); sqlfluff-3.4.2/test/fixtures/dialects/mariadb/ignore_key.yml000066400000000000000000000020021503426445100242350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3033d4c9bc24004b84b6b337e6a280aa6d576857c8b2df2e6bac02a4392ac050 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: onetable index_hint_clause: - keyword: IGNORE - keyword: KEY - bracketed: start_bracket: ( object_reference: naked_identifier: i2 end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/insert.sql000066400000000000000000000014621503426445100234150ustar00rootroot00000000000000INSERT INTO t1 (a,b,c) VALUES (1,2,3),(4,5,6); INSERT INTO t1 SELECT * FROM (SELECT c, c+d AS e FROM t2) AS dt; INSERT INTO t1 (a,b,c) VALUES (1,2,3),(4,5,6) AS t2 ON DUPLICATE KEY UPDATE c = t2.a+t2.b; INSERT INTO t1 (a,b,c) VALUES (1,2,3),(4,5,6) AS t2 ON DUPLICATE KEY UPDATE a = VALUES(a), b = VALUES(b); INSERT INTO t1 (a,b,c) VALUES (1,2,3),(4,5,6) AS t2(m,n,p) ON DUPLICATE KEY UPDATE c = m+n; INSERT INTO t1 SELECT * FROM (SELECT c, c+d AS e FROM t2) AS dt ON DUPLICATE KEY UPDATE b = e; INSERT INTO t1 SELECT * FROM (SELECT c, c+d AS e FROM t2) ON DUPLICATE KEY UPDATE b = e; INSERT INTO t1 SET a=1,b=2,c=3 AS t2 ON DUPLICATE KEY UPDATE c = t2.a+t2.b; INSERT INTO t1 SET a=1,b=2,c=3 AS t2(m,n,p) ON DUPLICATE KEY UPDATE c = m+n, b = n+p; INSERT INTO T1 (A) WITH X AS (SELECT 1 AS A) SELECT A FROM X; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/insert.yml000066400000000000000000000406231503426445100234210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9cf3e0246a7e2e05177e23574d12d98afc6804de789b57724cce8345f6d387ea file: - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - numeric_literal: '4' - comma: ',' - numeric_literal: '5' - comma: ',' - numeric_literal: '6' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: c - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: c - binary_operator: + - column_reference: naked_identifier: d alias_expression: alias_operator: keyword: AS naked_identifier: e from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: dt - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - numeric_literal: '4' - comma: ',' - numeric_literal: '5' - comma: ',' - numeric_literal: '6' - end_bracket: ) - insert_row_alias: keyword: AS naked_identifier: t2 - upsert_clause_list: - keyword: 'ON' - keyword: DUPLICATE - keyword: KEY - keyword: UPDATE - set_clause: column_reference: naked_identifier: c comparison_operator: raw_comparison_operator: '=' expression: - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: a - binary_operator: + - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: b - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - numeric_literal: '4' - comma: ',' - numeric_literal: '5' - comma: ',' - numeric_literal: '6' - end_bracket: ) - insert_row_alias: keyword: AS naked_identifier: t2 - upsert_clause_list: - keyword: 'ON' - keyword: DUPLICATE - keyword: KEY - keyword: UPDATE - set_clause: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: '=' values_clause: keyword: VALUES bracketed: start_bracket: ( expression: column_reference: naked_identifier: a end_bracket: ) - comma: ',' - set_clause: column_reference: naked_identifier: b comparison_operator: raw_comparison_operator: '=' values_clause: keyword: VALUES bracketed: start_bracket: ( expression: column_reference: naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - numeric_literal: '4' - comma: ',' - numeric_literal: '5' - comma: ',' - numeric_literal: '6' - end_bracket: ) - insert_row_alias: keyword: AS naked_identifier: t2 bracketed: start_bracket: ( identifier_list: - naked_identifier: m - comma: ',' - naked_identifier: n - comma: ',' - naked_identifier: p end_bracket: ) - upsert_clause_list: - keyword: 'ON' - keyword: DUPLICATE - keyword: KEY - keyword: UPDATE - set_clause: column_reference: naked_identifier: c comparison_operator: raw_comparison_operator: '=' expression: - column_reference: naked_identifier: m - binary_operator: + - column_reference: naked_identifier: n - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: c - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: c - binary_operator: + - column_reference: naked_identifier: d alias_expression: alias_operator: keyword: AS naked_identifier: e from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: dt - upsert_clause_list: - keyword: 'ON' - keyword: DUPLICATE - keyword: KEY - keyword: UPDATE - set_clause: - column_reference: naked_identifier: b - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: e - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: c - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: c - binary_operator: + - column_reference: naked_identifier: d alias_expression: alias_operator: keyword: AS naked_identifier: e from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 end_bracket: ) - upsert_clause_list: - keyword: 'ON' - keyword: DUPLICATE - keyword: KEY - keyword: UPDATE - set_clause: - column_reference: naked_identifier: b - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: e - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - comma: ',' - set_clause: column_reference: naked_identifier: b comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - comma: ',' - set_clause: column_reference: naked_identifier: c comparison_operator: raw_comparison_operator: '=' numeric_literal: '3' - insert_row_alias: keyword: AS naked_identifier: t2 - upsert_clause_list: - keyword: 'ON' - keyword: DUPLICATE - keyword: KEY - keyword: UPDATE - set_clause: column_reference: naked_identifier: c comparison_operator: raw_comparison_operator: '=' expression: - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: a - binary_operator: + - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: b - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - comma: ',' - set_clause: column_reference: naked_identifier: b comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - comma: ',' - set_clause: column_reference: naked_identifier: c comparison_operator: raw_comparison_operator: '=' numeric_literal: '3' - insert_row_alias: keyword: AS naked_identifier: t2 bracketed: start_bracket: ( identifier_list: - naked_identifier: m - comma: ',' - naked_identifier: n - comma: ',' - naked_identifier: p end_bracket: ) - upsert_clause_list: - keyword: 'ON' - keyword: DUPLICATE - keyword: KEY - keyword: UPDATE - set_clause: column_reference: naked_identifier: c comparison_operator: raw_comparison_operator: '=' expression: - column_reference: naked_identifier: m - binary_operator: + - column_reference: naked_identifier: n - comma: ',' - set_clause: column_reference: naked_identifier: b comparison_operator: raw_comparison_operator: '=' expression: - column_reference: naked_identifier: n - binary_operator: + - column_reference: naked_identifier: p - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: T1 - bracketed: start_bracket: ( column_reference: naked_identifier: A end_bracket: ) - with_compound_statement: keyword: WITH common_table_expression: naked_identifier: X keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: AS naked_identifier: A end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: A from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: X - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/insert_returning.sql000066400000000000000000000016251503426445100255130ustar00rootroot00000000000000INSERT INTO t1 (a,b,c) VALUES (1,2,3),(4,5,6) RETURNING a, b, c; INSERT INTO t1 SELECT * FROM (SELECT c, c+d AS e FROM t2) AS dt RETURNING a, b, c; INSERT INTO t1 (a,b,c) VALUES (1,2,3),(4,5,6) AS t2 ON DUPLICATE KEY UPDATE c = t2.a+t2.b RETURNING a, b, c; INSERT INTO t1 (a,b,c) VALUES (1,2,3),(4,5,6) AS t2 ON DUPLICATE KEY UPDATE a = VALUES(a), b = VALUES(b) RETURNING a, b, c; INSERT INTO t1 (a,b,c) VALUES (1,2,3),(4,5,6) AS t2(m,n,p) ON DUPLICATE KEY UPDATE c = m+n RETURNING a, b, c; INSERT INTO t1 SELECT * FROM (SELECT c, c+d AS e FROM t2) AS dt ON DUPLICATE KEY UPDATE b = e RETURNING a, b, c; INSERT INTO t1 SELECT * FROM (SELECT c, c+d AS e FROM t2) ON DUPLICATE KEY UPDATE b = e RETURNING a, b, c; INSERT INTO t1 SET a=1,b=2,c=3 AS t2 ON DUPLICATE KEY UPDATE c = t2.a+t2.b RETURNING a, b, c; INSERT INTO t1 SET a=1,b=2,c=3 AS t2(m,n,p) ON DUPLICATE KEY UPDATE c = m+n, b = n+p RETURNING a, b, c; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/insert_returning.yml000066400000000000000000000446171503426445100255250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 74380c5cd4a865ff29c9f7c38733d32fcddd47b0f6520dbb10935f0462d61784 file: - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - numeric_literal: '4' - comma: ',' - numeric_literal: '5' - comma: ',' - numeric_literal: '6' - end_bracket: ) - returning_clause: - keyword: RETURNING - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: c - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: c - binary_operator: + - column_reference: naked_identifier: d alias_expression: alias_operator: keyword: AS naked_identifier: e from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: dt - returning_clause: - keyword: RETURNING - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - numeric_literal: '4' - comma: ',' - numeric_literal: '5' - comma: ',' - numeric_literal: '6' - end_bracket: ) - insert_row_alias: keyword: AS naked_identifier: t2 - upsert_clause_list: - keyword: 'ON' - keyword: DUPLICATE - keyword: KEY - keyword: UPDATE - set_clause: column_reference: naked_identifier: c comparison_operator: raw_comparison_operator: '=' expression: - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: a - binary_operator: + - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: b - returning_clause: - keyword: RETURNING - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - numeric_literal: '4' - comma: ',' - numeric_literal: '5' - comma: ',' - numeric_literal: '6' - end_bracket: ) - insert_row_alias: keyword: AS naked_identifier: t2 - upsert_clause_list: - keyword: 'ON' - keyword: DUPLICATE - keyword: KEY - keyword: UPDATE - set_clause: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: '=' values_clause: keyword: VALUES bracketed: start_bracket: ( expression: column_reference: naked_identifier: a end_bracket: ) - comma: ',' - set_clause: column_reference: naked_identifier: b comparison_operator: raw_comparison_operator: '=' values_clause: keyword: VALUES bracketed: start_bracket: ( expression: column_reference: naked_identifier: b end_bracket: ) - returning_clause: - keyword: RETURNING - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - numeric_literal: '4' - comma: ',' - numeric_literal: '5' - comma: ',' - numeric_literal: '6' - end_bracket: ) - insert_row_alias: keyword: AS naked_identifier: t2 bracketed: start_bracket: ( identifier_list: - naked_identifier: m - comma: ',' - naked_identifier: n - comma: ',' - naked_identifier: p end_bracket: ) - upsert_clause_list: - keyword: 'ON' - keyword: DUPLICATE - keyword: KEY - keyword: UPDATE - set_clause: column_reference: naked_identifier: c comparison_operator: raw_comparison_operator: '=' expression: - column_reference: naked_identifier: m - binary_operator: + - column_reference: naked_identifier: n - returning_clause: - keyword: RETURNING - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: c - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: c - binary_operator: + - column_reference: naked_identifier: d alias_expression: alias_operator: keyword: AS naked_identifier: e from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: dt - upsert_clause_list: - keyword: 'ON' - keyword: DUPLICATE - keyword: KEY - keyword: UPDATE - set_clause: - column_reference: naked_identifier: b - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: e - returning_clause: - keyword: RETURNING - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: c - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: c - binary_operator: + - column_reference: naked_identifier: d alias_expression: alias_operator: keyword: AS naked_identifier: e from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 end_bracket: ) - upsert_clause_list: - keyword: 'ON' - keyword: DUPLICATE - keyword: KEY - keyword: UPDATE - set_clause: - column_reference: naked_identifier: b - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: e - returning_clause: - keyword: RETURNING - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - comma: ',' - set_clause: column_reference: naked_identifier: b comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - comma: ',' - set_clause: column_reference: naked_identifier: c comparison_operator: raw_comparison_operator: '=' numeric_literal: '3' - insert_row_alias: keyword: AS naked_identifier: t2 - upsert_clause_list: - keyword: 'ON' - keyword: DUPLICATE - keyword: KEY - keyword: UPDATE - set_clause: column_reference: naked_identifier: c comparison_operator: raw_comparison_operator: '=' expression: - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: a - binary_operator: + - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: b - returning_clause: - keyword: RETURNING - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - comma: ',' - set_clause: column_reference: naked_identifier: b comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - comma: ',' - set_clause: column_reference: naked_identifier: c comparison_operator: raw_comparison_operator: '=' numeric_literal: '3' - insert_row_alias: keyword: AS naked_identifier: t2 bracketed: start_bracket: ( identifier_list: - naked_identifier: m - comma: ',' - naked_identifier: n - comma: ',' - naked_identifier: p end_bracket: ) - upsert_clause_list: - keyword: 'ON' - keyword: DUPLICATE - keyword: KEY - keyword: UPDATE - set_clause: column_reference: naked_identifier: c comparison_operator: raw_comparison_operator: '=' expression: - column_reference: naked_identifier: m - binary_operator: + - column_reference: naked_identifier: n - comma: ',' - set_clause: column_reference: naked_identifier: b comparison_operator: raw_comparison_operator: '=' expression: - column_reference: naked_identifier: n - binary_operator: + - column_reference: naked_identifier: p - returning_clause: - keyword: RETURNING - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/interval.sql000066400000000000000000000007471503426445100237420ustar00rootroot00000000000000SELECT DATE_ADD(CURDATE(), INTERVAL -30 DAY); SELECT SUBDATE('2008-01-02', INTERVAL 31 DAY); SELECT ADDDATE(CURDATE(), INTERVAL -30 DAY); SELECT DATE_SUB('1992-12-31 23:59:59.000002', INTERVAL '1.999999' SECOND_MICROSECOND); SELECT DATE_ADD('2100-12-31 23:59:59', INTERVAL '1:1' MINUTE_SECOND); SELECT DATE_ADD(CURDATE(), INTERVAL 7 * 4 DAY); SELECT ADDDATE(CURDATE(), INTERVAL col1 DAY) FROM tbl1 ; SELECT SUBDATE(CURDATE(), INTERVAL col1 + col2 DAY) FROM tbl1 ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/interval.yml000066400000000000000000000172061503426445100237420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 73a7c191d4f98f8820605af6e84e8c2afa8b19957574d4573dcaba5e9e580e47 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: DATE_ADD function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: CURDATE function_contents: bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: numeric_literal: sign_indicator: '-' numeric_literal: '30' date_part: DAY - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: SUBDATE function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'2008-01-02'" - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: numeric_literal: '31' date_part: DAY - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: ADDDATE function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: CURDATE function_contents: bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: numeric_literal: sign_indicator: '-' numeric_literal: '30' date_part: DAY - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: DATE_SUB function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'1992-12-31 23:59:59.000002'" - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: quoted_literal: "'1.999999'" date_part: SECOND_MICROSECOND - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: DATE_ADD function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'2100-12-31 23:59:59'" - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: quoted_literal: "'1:1'" date_part: MINUTE_SECOND - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: DATE_ADD function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: CURDATE function_contents: bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: - numeric_literal: '7' - binary_operator: '*' - numeric_literal: '4' date_part: DAY - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: ADDDATE function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: CURDATE function_contents: bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: column_reference: naked_identifier: col1 date_part: DAY - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: SUBDATE function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: CURDATE function_contents: bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: - column_reference: naked_identifier: col1 - binary_operator: + - column_reference: naked_identifier: col2 date_part: DAY - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/json.sql000066400000000000000000000006661503426445100230670ustar00rootroot00000000000000CREATE TABLE facts (sentence JSON); INSERT INTO facts VALUES (JSON_OBJECT("mascot", "Our mascot is a dolphin named \"Sakila\".")); SELECT sentence->"$.mascot" FROM facts; SELECT sentence->'$.mascot' FROM facts; SELECT sentence->>"$.mascot" FROM facts; SELECT sentence->>'$.mascot' FROM facts; SELECT sentence FROM facts WHERE JSON_TYPE(sentence->"$.mascot") = "NULL"; SELECT sentence FROM facts WHERE sentence->"$.mascot" IS NULL; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/json.yml000066400000000000000000000121121503426445100230560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 64f1b276000ceac9ed592190bf0f896fe2a725875d55f487b8eba62b597cbc33 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: facts - bracketed: start_bracket: ( column_definition: naked_identifier: sentence data_type: data_type_identifier: JSON end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: facts - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: JSON_OBJECT function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: '"mascot"' - comma: ',' - expression: quoted_literal: '"Our mascot is a dolphin named \"Sakila\"."' - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: column_reference: naked_identifier: sentence column_path_operator: -> quoted_literal: '"$.mascot"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: facts - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: column_reference: naked_identifier: sentence column_path_operator: -> quoted_literal: "'$.mascot'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: facts - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: column_reference: naked_identifier: sentence column_path_operator: ->> quoted_literal: '"$.mascot"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: facts - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: column_reference: naked_identifier: sentence column_path_operator: ->> quoted_literal: "'$.mascot'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: facts - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: sentence from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: facts where_clause: keyword: WHERE expression: function: function_name: function_name_identifier: JSON_TYPE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: sentence column_path_operator: -> quoted_literal: '"$.mascot"' end_bracket: ) comparison_operator: raw_comparison_operator: '=' quoted_literal: '"NULL"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: sentence from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: facts where_clause: keyword: WHERE expression: column_reference: naked_identifier: sentence column_path_operator: -> quoted_literal: '"$.mascot"' keyword: IS null_literal: 'NULL' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/line_comment.sql000066400000000000000000000002371503426445100245610ustar00rootroot00000000000000-- Hello --Hello --From Curaçao # hashtag comment #hashtag comment without space USE db; #inline USE db; # inline w/ space USE db; --inline USE db; -- inline sqlfluff-3.4.2/test/fixtures/dialects/mariadb/line_comment.yml000066400000000000000000000016321503426445100245630ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 92fc654926aea37fbb646fd0584c11a92986a27e489be3af95c8ecbe2e8954d4 file: - statement: use_statement: keyword: USE database_reference: naked_identifier: db - statement_terminator: ; - statement: use_statement: keyword: USE database_reference: naked_identifier: db - statement_terminator: ; - statement: use_statement: keyword: USE database_reference: naked_identifier: db - statement_terminator: ; - statement: use_statement: keyword: USE database_reference: naked_identifier: db - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/load_data.sql000066400000000000000000000024401503426445100240160ustar00rootroot00000000000000LOAD DATA INFILE '/var/lib/mysql-files/libaccess.csv' INTO TABLE libaccess FIELDS TERMINATED BY '\t' OPTIONALLY ENCLOSED BY '"' IGNORE 1 LINES; LOAD DATA INFILE 'data.txt' INTO TABLE db2.my_table; LOAD DATA INFILE 'data.txt' INTO TABLE db2.my_table PARTITION (partition_name); LOAD DATA INFILE '/tmp/test.txt' INTO TABLE test FIELDS TERMINATED BY ',' LINES STARTING BY 'xxx'; LOAD DATA INFILE '/tmp/test.txt' INTO TABLE test IGNORE 1 LINES; LOAD DATA INFILE 'data.txt' INTO TABLE table2 FIELDS TERMINATED BY ','; LOAD DATA INFILE 'data.txt' INTO TABLE table2 FIELDS TERMINATED BY '\t'; LOAD DATA INFILE 'data.txt' INTO TABLE tbl_name FIELDS TERMINATED BY ',' ENCLOSED BY '"' LINES TERMINATED BY '\r\n' IGNORE 1 LINES; LOAD DATA INFILE '/tmp/jokes.txt' INTO TABLE jokes FIELDS TERMINATED BY '' LINES TERMINATED BY '\n%%\n' (joke); LOAD DATA INFILE 'persondata.txt' INTO TABLE persondata; LOAD DATA INFILE 'file.txt' INTO TABLE t1 (column1, @var1) SET column2 = @var1/100; LOAD DATA INFILE 'file.txt' INTO TABLE t1 (column1, column2) SET column3 = CURRENT_TIMESTAMP; LOAD DATA INFILE 'file.txt' INTO TABLE t1 (column1, @dummy, column2, @dummy, column3); LOAD DATA INFILE '/local/access_log' INTO TABLE tbl_name FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '"' ESCAPED BY '\\' sqlfluff-3.4.2/test/fixtures/dialects/mariadb/load_data.yml000066400000000000000000000160231503426445100240220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0f068e4f34b1a5b03afb39dd77f1cf1dfdaa97fefef25d35dbd6c3e7288ed55b file: - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'/var/lib/mysql-files/libaccess.csv'" - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: libaccess - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "'\\t'" - keyword: OPTIONALLY - keyword: ENCLOSED - keyword: BY - quoted_literal: "'\"'" - keyword: IGNORE - numeric_literal: '1' - keyword: LINES - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'data.txt'" - keyword: INTO - keyword: TABLE - table_reference: - naked_identifier: db2 - dot: . - naked_identifier: my_table - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'data.txt'" - keyword: INTO - keyword: TABLE - table_reference: - naked_identifier: db2 - dot: . - naked_identifier: my_table - partition_clause: keyword: PARTITION bracketed: start_bracket: ( object_reference: naked_identifier: partition_name end_bracket: ) - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'/tmp/test.txt'" - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: test - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "','" - keyword: LINES - keyword: STARTING - keyword: BY - quoted_literal: "'xxx'" - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'/tmp/test.txt'" - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: test - keyword: IGNORE - numeric_literal: '1' - keyword: LINES - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'data.txt'" - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: table2 - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "','" - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'data.txt'" - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: table2 - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "'\\t'" - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'data.txt'" - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: tbl_name - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "','" - keyword: ENCLOSED - keyword: BY - quoted_literal: "'\"'" - keyword: LINES - keyword: TERMINATED - keyword: BY - quoted_literal: "'\\r\\n'" - keyword: IGNORE - numeric_literal: '1' - keyword: LINES - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'/tmp/jokes.txt'" - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: jokes - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "''" - keyword: LINES - keyword: TERMINATED - keyword: BY - quoted_literal: "'\\n%%\\n'" - bracketed: start_bracket: ( column_reference: naked_identifier: joke end_bracket: ) - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'persondata.txt'" - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: persondata - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'file.txt'" - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: column1 - comma: ',' - column_reference: variable: '@var1' - end_bracket: ) - keyword: SET - column_reference: naked_identifier: column2 - comparison_operator: raw_comparison_operator: '=' - variable: '@var1' - binary_operator: / - numeric_literal: '100' - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'file.txt'" - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: column1 - comma: ',' - column_reference: naked_identifier: column2 - end_bracket: ) - keyword: SET - column_reference: naked_identifier: column3 - comparison_operator: raw_comparison_operator: '=' - bare_function: CURRENT_TIMESTAMP - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'file.txt'" - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: column1 - comma: ',' - column_reference: variable: '@dummy' - comma: ',' - column_reference: naked_identifier: column2 - comma: ',' - column_reference: variable: '@dummy' - comma: ',' - column_reference: naked_identifier: column3 - end_bracket: ) - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'/local/access_log'" - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: tbl_name - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "','" - keyword: OPTIONALLY - keyword: ENCLOSED - keyword: BY - quoted_literal: "'\"'" - keyword: ESCAPED - keyword: BY - quoted_literal: "'\\\\'" sqlfluff-3.4.2/test/fixtures/dialects/mariadb/loop_label.sql000066400000000000000000000001211503426445100242100ustar00rootroot00000000000000iteration:loop select 1; iterate iteration; leave iteration; end loop iteration; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/loop_label.yml000066400000000000000000000020461503426445100242220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2ee6c612bc68b46b0637885e6c2d45996d0657a723d4c8aaf116166d7483357b file: - statement: loop_statement: naked_identifier: iteration colon: ':' keyword: loop statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: iterate_statement: keyword: iterate naked_identifier: iteration - statement_terminator: ; - statement: transaction_statement: keyword: leave naked_identifier: iteration - statement_terminator: ; - statement: loop_statement: - keyword: end - keyword: loop - naked_identifier: iteration - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/loop_multiple_statements.sql000066400000000000000000000000611503426445100272360ustar00rootroot00000000000000loop select 1; select * from onetable; end loop; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/loop_multiple_statements.yml000066400000000000000000000022171503426445100272450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1f4385380b2be8c5e17f459866e0ad060d6cdb7b1a3aabcac090d59502b9d0a8 file: - statement: loop_statement: keyword: loop statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: onetable - statement_terminator: ; - statement: loop_statement: - keyword: end - keyword: loop - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/loop_no_label.sql000066400000000000000000000000311503426445100247040ustar00rootroot00000000000000loop select 1; end loop; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/loop_no_label.yml000066400000000000000000000013411503426445100247130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5257dc626087e74ee817942fa42136a8f5b1fa385f436cb278a903375ababa10 file: - statement: loop_statement: keyword: loop statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: loop_statement: - keyword: end - keyword: loop - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/nested_begin.sql000066400000000000000000000000741503426445100245350ustar00rootroot00000000000000blocks:BEGIN nest:begin set @abc = 1; end nest; END blocks~ sqlfluff-3.4.2/test/fixtures/dialects/mariadb/nested_begin.yml000066400000000000000000000021251503426445100245360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4c8251d83f6e7828cccb5ba0c8f5e3670ba41d4a06660193eed35d37ba6d0f8b file: - statement: transaction_statement: naked_identifier: blocks colon: ':' keyword: BEGIN statement: transaction_statement: naked_identifier: nest colon: ':' keyword: begin statement: set_statement: keyword: set variable: '@abc' comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: end naked_identifier: nest - statement_terminator: ; - statement: transaction_statement: keyword: END naked_identifier: blocks - statement_terminator: '~' sqlfluff-3.4.2/test/fixtures/dialects/mariadb/open.sql000066400000000000000000000000201503426445100230370ustar00rootroot00000000000000open curcursor; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/open.yml000066400000000000000000000010061503426445100230460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e1aebd2df56d3ec79fea9159ce9c4448a2f2f46696461f7886d3b27f321ed502 file: statement: cursor_open_close_segment: keyword: open naked_identifier: curcursor statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/open_qualified.sql000066400000000000000000000000221503426445100250640ustar00rootroot00000000000000open `curcursor`; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/open_qualified.yml000066400000000000000000000010131503426445100250670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3adadcefab1078bb37b5f960ec6344ab8391a808cb512db12cedeeaf6bc8612d file: statement: cursor_open_close_segment: keyword: open quoted_identifier: '`curcursor`' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/optimize_table.sql000066400000000000000000000004031503426445100251120ustar00rootroot00000000000000OPTIMIZE TABLE some_table; OPTIMIZE TABLE some_table1, some_table2; OPTIMIZE NO_WRITE_TO_BINLOG TABLE some_table; OPTIMIZE NO_WRITE_TO_BINLOG TABLE some_table1, some_table2; OPTIMIZE LOCAL TABLE some_table; OPTIMIZE LOCAL TABLE some_table1, some_table2; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/optimize_table.yml000066400000000000000000000033711503426445100251230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: feffb21d789f331a3e487e1b8d5d71deb4fbfc668e677fd34995e77e3cd87c4d file: - statement: optimize_table_statement: - keyword: OPTIMIZE - keyword: TABLE - table_reference: naked_identifier: some_table - statement_terminator: ; - statement: optimize_table_statement: - keyword: OPTIMIZE - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - statement_terminator: ; - statement: optimize_table_statement: - keyword: OPTIMIZE - keyword: NO_WRITE_TO_BINLOG - keyword: TABLE - table_reference: naked_identifier: some_table - statement_terminator: ; - statement: optimize_table_statement: - keyword: OPTIMIZE - keyword: NO_WRITE_TO_BINLOG - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - statement_terminator: ; - statement: optimize_table_statement: - keyword: OPTIMIZE - keyword: LOCAL - keyword: TABLE - table_reference: naked_identifier: some_table - statement_terminator: ; - statement: optimize_table_statement: - keyword: OPTIMIZE - keyword: LOCAL - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/prepare_local_variable.sql000066400000000000000000000000311503426445100265550ustar00rootroot00000000000000PREPARE test FROM _test; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/prepare_local_variable.yml000066400000000000000000000010441503426445100265640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 25b6e93e2262f01eaf66c26d2f5644717653ed3e01b5bd341134513ded5c1b38 file: statement: prepare_segment: - keyword: PREPARE - naked_identifier: test - keyword: FROM - variable: _test statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/prepare_session_variable.sql000066400000000000000000000000311503426445100271460ustar00rootroot00000000000000PREPARE test FROM @test; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/prepare_session_variable.yml000066400000000000000000000010461503426445100271570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 465db8acb36a650a88c7dd35993d5321557e4b9b2241b2ee99a97b8ef7f39058 file: statement: prepare_segment: - keyword: PREPARE - naked_identifier: test - keyword: FROM - variable: '@test' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/prepare_statement.sql000066400000000000000000000000371503426445100256300ustar00rootroot00000000000000PREPARE test FROM 'select 1;'; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/prepare_statement.yml000066400000000000000000000010621503426445100256310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6a27f8f2efa35c42ccfda413a81d24920035e09430118145ee749b06f2d61285 file: statement: prepare_segment: - keyword: PREPARE - naked_identifier: test - keyword: FROM - quoted_literal: "'select 1;'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/procedure_definer.sql000066400000000000000000000001011503426445100255620ustar00rootroot00000000000000CREATE DEFINER=`test`@`%` PROCEDURE `testprocedure`() BEGIN END~ sqlfluff-3.4.2/test/fixtures/dialects/mariadb/procedure_definer.yml000066400000000000000000000021051503426445100255720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4a9679dce024353f3845d4f1b920ea507add980899b88e70c45c5864f274f9ce file: statement: create_procedure_statement: - keyword: CREATE - definer_segment: keyword: DEFINER comparison_operator: raw_comparison_operator: '=' role_reference: - quoted_identifier: '`test`' - at_sign_literal: '@' - quoted_identifier: '`%`' - keyword: PROCEDURE - function_name: quoted_identifier: '`testprocedure`' - procedure_parameter_list: bracketed: start_bracket: ( end_bracket: ) - function_definition: transaction_statement: keyword: BEGIN statement: transaction_statement: keyword: END statement_terminator: '~' sqlfluff-3.4.2/test/fixtures/dialects/mariadb/procedure_in_param.sql000066400000000000000000000000711503426445100257420ustar00rootroot00000000000000CREATE PROCEDURE `testprocedure`(in test int) BEGIN END~ sqlfluff-3.4.2/test/fixtures/dialects/mariadb/procedure_in_param.yml000066400000000000000000000017101503426445100257450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c2d6aefd94f35d5599b705fa4c4dfe7a3a8a0da4c29696e30d4d648c15bc5042 file: statement: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - function_name: quoted_identifier: '`testprocedure`' - procedure_parameter_list: bracketed: start_bracket: ( parameter_direction: in parameter: test data_type: data_type_identifier: int end_bracket: ) - function_definition: transaction_statement: keyword: BEGIN statement: transaction_statement: keyword: END statement_terminator: '~' sqlfluff-3.4.2/test/fixtures/dialects/mariadb/procedure_inout_param.sql000066400000000000000000000000741503426445100264750ustar00rootroot00000000000000CREATE PROCEDURE `testprocedure`(inout test int) BEGIN END~ sqlfluff-3.4.2/test/fixtures/dialects/mariadb/procedure_inout_param.yml000066400000000000000000000017131503426445100265000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fbe00aeccc4e8664fcef735462688e8f15ee4edc02e12cd9e6e372c85a2514d3 file: statement: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - function_name: quoted_identifier: '`testprocedure`' - procedure_parameter_list: bracketed: start_bracket: ( parameter_direction: inout parameter: test data_type: data_type_identifier: int end_bracket: ) - function_definition: transaction_statement: keyword: BEGIN statement: transaction_statement: keyword: END statement_terminator: '~' sqlfluff-3.4.2/test/fixtures/dialects/mariadb/procedure_out_param.sql000066400000000000000000000000721503426445100261440ustar00rootroot00000000000000CREATE PROCEDURE `testprocedure`(out test int) BEGIN END~ sqlfluff-3.4.2/test/fixtures/dialects/mariadb/procedure_out_param.yml000066400000000000000000000017111503426445100261470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b743b1d3c50497e605139814614c4b3b16413ada9a280ad66415562fa6b89121 file: statement: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - function_name: quoted_identifier: '`testprocedure`' - procedure_parameter_list: bracketed: start_bracket: ( parameter_direction: out parameter: test data_type: data_type_identifier: int end_bracket: ) - function_definition: transaction_statement: keyword: BEGIN statement: transaction_statement: keyword: END statement_terminator: '~' sqlfluff-3.4.2/test/fixtures/dialects/mariadb/purge_binary_logs.sql000066400000000000000000000003561503426445100256240ustar00rootroot00000000000000PURGE BINARY LOGS TO 'mysql-bin.010'; PURGE BINARY LOGS BEFORE '2019-04-02 22:46:26'; PURGE BINARY LOGS BEFORE TIMESTAMP '2019-04-02 22:46:26'; PURGE BINARY LOGS BEFORE 19830905132800; PURGE BINARY LOGS BEFORE TIMESTAMP 19830905132800; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/purge_binary_logs.yml000066400000000000000000000027731503426445100256330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 539e0add6ce7c2a4e4e22378a706fc247a3c09b0fe8fd0d36ab241fe4220ca07 file: - statement: purge_binary_logs_statement: - keyword: PURGE - keyword: BINARY - keyword: LOGS - keyword: TO - quoted_literal: "'mysql-bin.010'" - statement_terminator: ; - statement: purge_binary_logs_statement: - keyword: PURGE - keyword: BINARY - keyword: LOGS - keyword: BEFORE - expression: quoted_literal: "'2019-04-02 22:46:26'" - statement_terminator: ; - statement: purge_binary_logs_statement: - keyword: PURGE - keyword: BINARY - keyword: LOGS - keyword: BEFORE - expression: keyword: TIMESTAMP date_constructor_literal: "'2019-04-02 22:46:26'" - statement_terminator: ; - statement: purge_binary_logs_statement: - keyword: PURGE - keyword: BINARY - keyword: LOGS - keyword: BEFORE - expression: numeric_literal: '19830905132800' - statement_terminator: ; - statement: purge_binary_logs_statement: - keyword: PURGE - keyword: BINARY - keyword: LOGS - keyword: BEFORE - expression: keyword: TIMESTAMP numeric_literal: '19830905132800' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/quoted_literal.sql000066400000000000000000000014361503426445100251270ustar00rootroot00000000000000SELECT ''; SELECT ""; SELECT ''''; SELECT """"; SELECT ' '; SELECT " "; SELECT '''aaa'''; SELECT """aaa"""; SELECT ' '' '; SELECT " "" "; SELECT 'foo' 'bar'; SELECT "foo" "bar"; SELECT 'foo' 'bar'; SELECT "foo" "bar"; SELECT 'foo' "bar"; SELECT 'foo' 'bar'; SELECT "foo" "bar"; SELECT 'foo' -- some comment 'bar'; SELECT "foo" -- some comment "bar"; SELECT 'foo' /* some comment */ 'bar'; SELECT "foo" /* some comment */ "bar"; UPDATE table1 SET column1 = 'baz\'s'; UPDATE table1 SET column1 = "baz\"s"; SELECT 'terminating MySQL-y escaped single-quote bazs\''; SELECT "terminating MySQL-y escaped double-quote bazs\""; SELECT 'terminating ANSI-ish escaped single-quote '''; SELECT "terminating ANSI-ish escaped double-quote """; SELECT '\\'; SELECT "\\"; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/quoted_literal.yml000066400000000000000000000142671503426445100251370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b60a070dbc152e87be49e05488538fa0d1bf418b18f296b7ba01eb24b4cb8937 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: '""' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "''''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: '""""' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'\n\n'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "\"\n\n\"" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'''aaa'''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: '"""aaa"""' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'\n''\n'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "\"\n\"\"\n\"" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: "'foo'" - quoted_literal: "'bar'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: '"foo"' - quoted_literal: '"bar"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: "'foo'" - quoted_literal: "'bar'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: '"foo"' - quoted_literal: '"bar"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: "'foo'" - quoted_literal: '"bar"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: "'foo'" - quoted_literal: "'bar'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: '"foo"' - quoted_literal: '"bar"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: "'foo'" - quoted_literal: "'bar'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: '"foo"' - quoted_literal: '"bar"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: "'foo'" - quoted_literal: "'bar'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: '"foo"' - quoted_literal: '"bar"' - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: table1 set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: column1 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'baz\\'s'" - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: table1 set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: column1 comparison_operator: raw_comparison_operator: '=' quoted_literal: '"baz\"s"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'terminating MySQL-y escaped single-quote bazs\\''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: '"terminating MySQL-y escaped double-quote bazs\""' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'terminating ANSI-ish escaped single-quote '''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: '"terminating ANSI-ish escaped double-quote """' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'\\\\'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: '"\\"' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/rename_table.sql000066400000000000000000000001461503426445100245250ustar00rootroot00000000000000RENAME TABLE old_table TO new_table; RENAME TABLE old_table1 TO new_table1, old_table2 TO new_table2; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/rename_table.yml000066400000000000000000000020231503426445100245230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c90b566ed72ba3be4bcb5183c0ff3089ab6ea7ce8b37783931b52e0aa1d855c6 file: - statement: rename_table_statement: - keyword: RENAME - keyword: TABLE - table_reference: naked_identifier: old_table - keyword: TO - table_reference: naked_identifier: new_table - statement_terminator: ; - statement: rename_table_statement: - keyword: RENAME - keyword: TABLE - table_reference: naked_identifier: old_table1 - keyword: TO - table_reference: naked_identifier: new_table1 - comma: ',' - table_reference: naked_identifier: old_table2 - keyword: TO - table_reference: naked_identifier: new_table2 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/repair_table.sql000066400000000000000000000006151503426445100245410ustar00rootroot00000000000000REPAIR TABLE some_table; REPAIR TABLE some_table1, some_table2; REPAIR NO_WRITE_TO_BINLOG TABLE some_table; REPAIR NO_WRITE_TO_BINLOG TABLE some_table1, some_table2; REPAIR LOCAL TABLE some_table; REPAIR LOCAL TABLE some_table1, some_table2; REPAIR TABLE some_table QUICK; REPAIR TABLE some_table EXTENDED; REPAIR TABLE some_table USE_FRM; REPAIR TABLE some_table QUICK EXTENDED USE_FRM; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/repair_table.yml000066400000000000000000000050211503426445100245370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1faf15f84e873b3e2be5b00bf51587fe7e641b27ecc10fdfd77ea0c3bc5e56df file: - statement: repair_table_statement: - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: some_table - statement_terminator: ; - statement: repair_table_statement: - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - statement_terminator: ; - statement: repair_table_statement: - keyword: REPAIR - keyword: NO_WRITE_TO_BINLOG - keyword: TABLE - table_reference: naked_identifier: some_table - statement_terminator: ; - statement: repair_table_statement: - keyword: REPAIR - keyword: NO_WRITE_TO_BINLOG - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - statement_terminator: ; - statement: repair_table_statement: - keyword: REPAIR - keyword: LOCAL - keyword: TABLE - table_reference: naked_identifier: some_table - statement_terminator: ; - statement: repair_table_statement: - keyword: REPAIR - keyword: LOCAL - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - statement_terminator: ; - statement: repair_table_statement: - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: QUICK - statement_terminator: ; - statement: repair_table_statement: - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: EXTENDED - statement_terminator: ; - statement: repair_table_statement: - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: USE_FRM - statement_terminator: ; - statement: repair_table_statement: - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: QUICK - keyword: EXTENDED - keyword: USE_FRM - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/repeat_label.sql000066400000000000000000000001051503426445100245210ustar00rootroot00000000000000iteration:repeat set @a = @a + 1; until @a > 5 end repeat iteration; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/repeat_label.yml000066400000000000000000000021101503426445100245210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6202885f33bcd0a346d2436056aa723837e6127704fddff948205986aad344dc file: - statement: repeat_statement: naked_identifier: iteration colon: ':' keyword: repeat statement: set_statement: keyword: set variable: '@a' comparison_operator: raw_comparison_operator: '=' expression: variable: '@a' binary_operator: + numeric_literal: '1' - statement_terminator: ; - statement: repeat_statement: - keyword: until - expression: variable: '@a' comparison_operator: raw_comparison_operator: '>' numeric_literal: '5' - keyword: end - keyword: repeat - naked_identifier: iteration - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/repeat_multiple_statements.sql000066400000000000000000000001051503426445100275440ustar00rootroot00000000000000repeat set @a = @a + 1; select 1; until @a > 5 and x = 1 end repeat; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/repeat_multiple_statements.yml000066400000000000000000000025211503426445100275520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d51657630a73b8ae3d1fb36fe5494b43d57b3da9ba2fa3be71f3e3ddae4bea06 file: - statement: repeat_statement: keyword: repeat statement: set_statement: keyword: set variable: '@a' comparison_operator: raw_comparison_operator: '=' expression: variable: '@a' binary_operator: + numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: repeat_statement: - keyword: until - expression: - variable: '@a' - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '5' - binary_operator: and - column_reference: naked_identifier: x - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - keyword: end - keyword: repeat - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/repeat_no_label.sql000066400000000000000000000000611503426445100252160ustar00rootroot00000000000000repeat set @a = @a + 1; until @a > 5 end repeat; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/repeat_no_label.yml000066400000000000000000000017631503426445100252320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a907fbb417b57c4bcd31c689236a595b55b81c6367bf4c1bead609bf102f5568 file: - statement: repeat_statement: keyword: repeat statement: set_statement: keyword: set variable: '@a' comparison_operator: raw_comparison_operator: '=' expression: variable: '@a' binary_operator: + numeric_literal: '1' - statement_terminator: ; - statement: repeat_statement: - keyword: until - expression: variable: '@a' comparison_operator: raw_comparison_operator: '>' numeric_literal: '5' - keyword: end - keyword: repeat - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/replace.sql000066400000000000000000000014241503426445100235220ustar00rootroot00000000000000REPLACE tbl_name VALUES (1, 2); REPLACE tbl_name VALUES (DEFAULT, DEFAULT); REPLACE tbl_name VALUES (1, 2), (11, 22); REPLACE tbl_name VALUE (1, 2), (11, 22); REPLACE tbl_name (col1, col2) VALUES (1, 2); REPLACE tbl_name (col1, col2) VALUES ROW(1, 2), ROW(11, 22); REPLACE LOW_PRIORITY tbl_name VALUES (1, 2); REPLACE DELAYED tbl_name VALUES (1, 2); REPLACE LOW_PRIORITY INTO tbl_name VALUES (1, 2); REPLACE tbl_name PARTITION (partition_name) VALUES (1, 2); REPLACE tbl_name SET col1 = 1, col2 = 2; REPLACE LOW_PRIORITY tbl_name SET col1 = 1, col2 = 2; REPLACE DELAYED tbl_name SET col1 = 1, col2 = 2; REPLACE LOW_PRIORITY INTO tbl_name SET col1 = 1, col2 = 2; REPLACE tbl_name PARTITION (partition_name) SET col1 = 1, col2 = 2; REPLACE tbl_name SELECT * FROM table_name; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/replace.yml000066400000000000000000000211621503426445100235250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b343d858f9614e81cf80ec421c746686ad3ce2ba4c4e22f2b4a39b3dd619e817 file: - statement: replace_statement: keyword: REPLACE table_reference: naked_identifier: tbl_name values_clause: keyword: VALUES bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: replace_statement: keyword: REPLACE table_reference: naked_identifier: tbl_name values_clause: keyword: VALUES bracketed: - start_bracket: ( - keyword: DEFAULT - comma: ',' - keyword: DEFAULT - end_bracket: ) - statement_terminator: ; - statement: replace_statement: keyword: REPLACE table_reference: naked_identifier: tbl_name values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - numeric_literal: '11' - comma: ',' - numeric_literal: '22' - end_bracket: ) - statement_terminator: ; - statement: replace_statement: keyword: REPLACE table_reference: naked_identifier: tbl_name values_clause: - keyword: VALUE - bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - numeric_literal: '11' - comma: ',' - numeric_literal: '22' - end_bracket: ) - statement_terminator: ; - statement: replace_statement: keyword: REPLACE table_reference: naked_identifier: tbl_name bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) values_clause: keyword: VALUES bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: replace_statement: keyword: REPLACE table_reference: naked_identifier: tbl_name bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) values_clause: - keyword: VALUES - keyword: ROW - bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - comma: ',' - keyword: ROW - bracketed: - start_bracket: ( - numeric_literal: '11' - comma: ',' - numeric_literal: '22' - end_bracket: ) - statement_terminator: ; - statement: replace_statement: - keyword: REPLACE - keyword: LOW_PRIORITY - table_reference: naked_identifier: tbl_name - values_clause: keyword: VALUES bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: replace_statement: - keyword: REPLACE - keyword: DELAYED - table_reference: naked_identifier: tbl_name - values_clause: keyword: VALUES bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: replace_statement: - keyword: REPLACE - keyword: LOW_PRIORITY - keyword: INTO - table_reference: naked_identifier: tbl_name - values_clause: keyword: VALUES bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: replace_statement: keyword: REPLACE table_reference: naked_identifier: tbl_name partition_clause: keyword: PARTITION bracketed: start_bracket: ( object_reference: naked_identifier: partition_name end_bracket: ) values_clause: keyword: VALUES bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: replace_statement: keyword: REPLACE table_reference: naked_identifier: tbl_name set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - comma: ',' - set_clause: column_reference: naked_identifier: col2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - statement_terminator: ; - statement: replace_statement: - keyword: REPLACE - keyword: LOW_PRIORITY - table_reference: naked_identifier: tbl_name - set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - comma: ',' - set_clause: column_reference: naked_identifier: col2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - statement_terminator: ; - statement: replace_statement: - keyword: REPLACE - keyword: DELAYED - table_reference: naked_identifier: tbl_name - set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - comma: ',' - set_clause: column_reference: naked_identifier: col2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - statement_terminator: ; - statement: replace_statement: - keyword: REPLACE - keyword: LOW_PRIORITY - keyword: INTO - table_reference: naked_identifier: tbl_name - set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - comma: ',' - set_clause: column_reference: naked_identifier: col2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - statement_terminator: ; - statement: replace_statement: keyword: REPLACE table_reference: naked_identifier: tbl_name partition_clause: keyword: PARTITION bracketed: start_bracket: ( object_reference: naked_identifier: partition_name end_bracket: ) set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - comma: ',' - set_clause: column_reference: naked_identifier: col2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - statement_terminator: ; - statement: replace_statement: keyword: REPLACE table_reference: naked_identifier: tbl_name select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/replace_returning.sql000066400000000000000000000021441503426445100256170ustar00rootroot00000000000000REPLACE tbl_name VALUES (1, 2) RETURNING col1, col2; REPLACE tbl_name VALUES (DEFAULT, DEFAULT) RETURNING col1, col2; REPLACE tbl_name VALUES (1, 2), (11, 22) RETURNING col1, col2; REPLACE tbl_name VALUE (1, 2), (11, 22) RETURNING col1, col2; REPLACE tbl_name (col1, col2) VALUES (1, 2) RETURNING col1, col2; REPLACE tbl_name (col1, col2) VALUES ROW(1, 2), ROW(11, 22) RETURNING col1, col2; REPLACE LOW_PRIORITY tbl_name VALUES (1, 2) RETURNING col1, col2; REPLACE DELAYED tbl_name VALUES (1, 2) RETURNING col1, col2; REPLACE LOW_PRIORITY INTO tbl_name VALUES (1, 2) RETURNING col1, col2; REPLACE tbl_name PARTITION (partition_name) VALUES (1, 2) RETURNING col1, col2; REPLACE tbl_name SET col1 = 1, col2 = 2 RETURNING col1, col2; REPLACE LOW_PRIORITY tbl_name SET col1 = 1, col2 = 2 RETURNING col1, col2; REPLACE DELAYED tbl_name SET col1 = 1, col2 = 2 RETURNING col1, col2; REPLACE LOW_PRIORITY INTO tbl_name SET col1 = 1, col2 = 2 RETURNING col1, col2; REPLACE tbl_name PARTITION (partition_name) SET col1 = 1, col2 = 2 RETURNING col1, col2; REPLACE tbl_name SELECT * FROM table_name RETURNING col1, col2; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/replace_returning.yml000066400000000000000000000312221503426445100256200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 357ea7858359406444885989629b162b12316baca80c9dcb6c18aff0e66a46e7 file: - statement: replace_statement: keyword: REPLACE table_reference: naked_identifier: tbl_name values_clause: keyword: VALUES bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) returning_clause: - keyword: RETURNING - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 - statement_terminator: ; - statement: replace_statement: keyword: REPLACE table_reference: naked_identifier: tbl_name values_clause: keyword: VALUES bracketed: - start_bracket: ( - keyword: DEFAULT - comma: ',' - keyword: DEFAULT - end_bracket: ) returning_clause: - keyword: RETURNING - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 - statement_terminator: ; - statement: replace_statement: keyword: REPLACE table_reference: naked_identifier: tbl_name values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - numeric_literal: '11' - comma: ',' - numeric_literal: '22' - end_bracket: ) returning_clause: - keyword: RETURNING - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 - statement_terminator: ; - statement: replace_statement: keyword: REPLACE table_reference: naked_identifier: tbl_name values_clause: - keyword: VALUE - bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - numeric_literal: '11' - comma: ',' - numeric_literal: '22' - end_bracket: ) returning_clause: - keyword: RETURNING - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 - statement_terminator: ; - statement: replace_statement: keyword: REPLACE table_reference: naked_identifier: tbl_name bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) values_clause: keyword: VALUES bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) returning_clause: - keyword: RETURNING - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 - statement_terminator: ; - statement: replace_statement: keyword: REPLACE table_reference: naked_identifier: tbl_name bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) values_clause: - keyword: VALUES - keyword: ROW - bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - comma: ',' - keyword: ROW - bracketed: - start_bracket: ( - numeric_literal: '11' - comma: ',' - numeric_literal: '22' - end_bracket: ) returning_clause: - keyword: RETURNING - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 - statement_terminator: ; - statement: replace_statement: - keyword: REPLACE - keyword: LOW_PRIORITY - table_reference: naked_identifier: tbl_name - values_clause: keyword: VALUES bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - returning_clause: - keyword: RETURNING - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 - statement_terminator: ; - statement: replace_statement: - keyword: REPLACE - keyword: DELAYED - table_reference: naked_identifier: tbl_name - values_clause: keyword: VALUES bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - returning_clause: - keyword: RETURNING - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 - statement_terminator: ; - statement: replace_statement: - keyword: REPLACE - keyword: LOW_PRIORITY - keyword: INTO - table_reference: naked_identifier: tbl_name - values_clause: keyword: VALUES bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - returning_clause: - keyword: RETURNING - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 - statement_terminator: ; - statement: replace_statement: keyword: REPLACE table_reference: naked_identifier: tbl_name partition_clause: keyword: PARTITION bracketed: start_bracket: ( object_reference: naked_identifier: partition_name end_bracket: ) values_clause: keyword: VALUES bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) returning_clause: - keyword: RETURNING - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 - statement_terminator: ; - statement: replace_statement: keyword: REPLACE table_reference: naked_identifier: tbl_name set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - comma: ',' - set_clause: column_reference: naked_identifier: col2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' returning_clause: - keyword: RETURNING - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 - statement_terminator: ; - statement: replace_statement: - keyword: REPLACE - keyword: LOW_PRIORITY - table_reference: naked_identifier: tbl_name - set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - comma: ',' - set_clause: column_reference: naked_identifier: col2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - returning_clause: - keyword: RETURNING - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 - statement_terminator: ; - statement: replace_statement: - keyword: REPLACE - keyword: DELAYED - table_reference: naked_identifier: tbl_name - set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - comma: ',' - set_clause: column_reference: naked_identifier: col2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - returning_clause: - keyword: RETURNING - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 - statement_terminator: ; - statement: replace_statement: - keyword: REPLACE - keyword: LOW_PRIORITY - keyword: INTO - table_reference: naked_identifier: tbl_name - set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - comma: ',' - set_clause: column_reference: naked_identifier: col2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - returning_clause: - keyword: RETURNING - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 - statement_terminator: ; - statement: replace_statement: keyword: REPLACE table_reference: naked_identifier: tbl_name partition_clause: keyword: PARTITION bracketed: start_bracket: ( object_reference: naked_identifier: partition_name end_bracket: ) set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - comma: ',' - set_clause: column_reference: naked_identifier: col2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' returning_clause: - keyword: RETURNING - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 - statement_terminator: ; - statement: replace_statement: keyword: REPLACE table_reference: naked_identifier: tbl_name select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name returning_clause: - keyword: RETURNING - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/reset_master.sql000066400000000000000000000000441503426445100246010ustar00rootroot00000000000000RESET MASTER; RESET MASTER TO 1234; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/reset_master.yml000066400000000000000000000012261503426445100246060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0c1e5dac0655f6e9471d3cab9d0b711be754e37c946b8099192ccd30e6ed4608 file: - statement: reset_master_statement: - keyword: RESET - keyword: MASTER - statement_terminator: ; - statement: reset_master_statement: - keyword: RESET - keyword: MASTER - keyword: TO - numeric_literal: '1234' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/resignal.sql000066400000000000000000000000121503426445100237030ustar00rootroot00000000000000resignal; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/resignal.yml000066400000000000000000000007371503426445100237230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5486a51152d09bd7aa929d6aad8035b68a7af6d3b97581f55893f1b5b23015ff file: statement: resignal_segment: keyword: resignal statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/resignal_condition_name.sql000066400000000000000000000000301503426445100267510ustar00rootroot00000000000000resignal testcondition; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/resignal_condition_name.yml000066400000000000000000000010051503426445100267560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ddb0f0a16d3621ad997255a98f4989de04e3689165072077b221c139f0cf28bd file: statement: resignal_segment: keyword: resignal naked_identifier: testcondition statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/resignal_condition_sqlstate.sql000066400000000000000000000000331503426445100276740ustar00rootroot00000000000000resignal sqlstate '42S02'; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/resignal_condition_sqlstate.yml000066400000000000000000000010271503426445100277020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b9e5f8fee59dc88d1372330944c9d54873fa0887ccc3c70659e4f52157a87aff file: statement: resignal_segment: - keyword: resignal - keyword: sqlstate - quoted_literal: "'42S02'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/resignal_condition_sqlstate_value.sql000066400000000000000000000000411503426445100310670ustar00rootroot00000000000000resignal sqlstate value '42S02'; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/resignal_condition_sqlstate_value.yml000066400000000000000000000010541503426445100310760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7b6936c92a4757c398e5999d01c2144a086621ab6f1cb6003576f46ab6bacf3d file: statement: resignal_segment: - keyword: resignal - keyword: sqlstate - keyword: value - quoted_literal: "'42S02'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/resignal_set_signal_info.sql000066400000000000000000000000541503426445100271340ustar00rootroot00000000000000resignal set message_text = 'test message'; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/resignal_set_signal_info.yml000066400000000000000000000011651503426445100271420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 42689aa92182e2de491abf166e1e269b06ae2dbf027126ac7bdf35f04481ed44 file: statement: resignal_segment: - keyword: resignal - keyword: set - keyword: message_text - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test message'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/resignal_set_signal_info_multiple.sql000066400000000000000000000001041503426445100310430ustar00rootroot00000000000000resignal set message_text = 'test message', mysql_errno = '42S500'; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/resignal_set_signal_info_multiple.yml000066400000000000000000000014021503426445100310470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e0e7a0de0aa07cfecf1c1dd4411eef615077c791427559e4c0dfb6095cc0b53a file: statement: resignal_segment: - keyword: resignal - keyword: set - keyword: message_text - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test message'" - comma: ',' - keyword: mysql_errno - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'42S500'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_boolean_operators.sql000066400000000000000000000001741503426445100271640ustar00rootroot00000000000000SELECT !1; SELECT 1 && 1; SELECT 1 && 0; SELECT 1 XOR 1; SELECT 1 || 1; SELECT col_1 && 1; SELECT (col_1 = col_2) || col_3; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_boolean_operators.yml000066400000000000000000000047671503426445100272020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d21bd7c1927fa694dafe5d0812e99cabeeffb6fd21b122189c8c7dbb79328f70 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: not_operator: '!' numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '&&' - numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '&&' - numeric_literal: '0' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: XOR - numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '||' - numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: column_reference: naked_identifier: col_1 binary_operator: '&&' numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: col_1 - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: col_2 end_bracket: ) binary_operator: '||' column_reference: naked_identifier: col_3 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_distinctrow.sql000066400000000000000000000000421503426445100260120ustar00rootroot00000000000000select distinctrow * from table1; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_distinctrow.yml000066400000000000000000000015651503426445100260270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9871a45f50a15605c76b50132724c5f346aaa8192ccb8d8808c5a46f5fd4d8b8 file: statement: select_statement: select_clause: keyword: select select_clause_modifier: keyword: distinctrow select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_for_share.sql000066400000000000000000000000401503426445100254070ustar00rootroot00000000000000SELECT 1 FROM table1 FOR SHARE; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_for_share.yml000066400000000000000000000014731503426445100254240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 02225e92a0b90b53bdd96e50dc68defd5f0a350525db6500100b390bb2403fa3 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 for_clause: - keyword: FOR - keyword: SHARE statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_for_update.sql000066400000000000000000000000411503426445100255700ustar00rootroot00000000000000SELECT 1 FROM table1 FOR UPDATE; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_for_update.yml000066400000000000000000000014741503426445100256050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c93bb773f206085006bb91a60b1174791819d70a67b1d49f89d67f985871b092 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 for_clause: - keyword: FOR - keyword: UPDATE statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_for_update_lock_in_share_mode.sql000066400000000000000000000000511503426445100314550ustar00rootroot00000000000000SELECT 1 FROM table1 LOCK IN SHARE MODE; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_for_update_lock_in_share_mode.yml000066400000000000000000000015461503426445100314710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d370a7c336d97129568cc88f70c4b8d79cce6e3d13c34fcfba66e315f4bfd27f file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 for_clause: - keyword: LOCK - keyword: IN - keyword: SHARE - keyword: MODE statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_for_update_nowait.sql000066400000000000000000000000501503426445100271510ustar00rootroot00000000000000SELECT 1 FROM table1 FOR UPDATE NOWAIT; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_for_update_nowait.yml000066400000000000000000000015241503426445100271620ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d507810a93ebf607747effb7b223e1813effc969a3deba66c9bb62ea9c82be5e file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 for_clause: - keyword: FOR - keyword: UPDATE - keyword: NOWAIT statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_for_update_of.sql000066400000000000000000000000511503426445100262550ustar00rootroot00000000000000SELECT 1 FROM table1 FOR UPDATE OF test; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_for_update_of.yml000066400000000000000000000015571503426445100262730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: efebbedd435ef3a626ed5e4556668b386152d98ece84affadbd1dab651a77223 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 for_clause: - keyword: FOR - keyword: UPDATE - keyword: OF - naked_identifier: test statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_for_update_of_multiple.sql000066400000000000000000000000611503426445100301710ustar00rootroot00000000000000SELECT 1 FROM table1 FOR UPDATE OF test1, test2; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_for_update_of_multiple.yml000066400000000000000000000016431503426445100302020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 495a190960fe7a8c4ec218cd448be5b7af1c4f54cb09c25f058bc29a18356f48 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 for_clause: - keyword: FOR - keyword: UPDATE - keyword: OF - naked_identifier: test1 - comma: ',' - naked_identifier: test2 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_for_update_skip_locked.sql000066400000000000000000000000551503426445100301440ustar00rootroot00000000000000SELECT 1 FROM table1 FOR UPDATE SKIP LOCKED; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_for_update_skip_locked.yml000066400000000000000000000015521503426445100301510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 54602cc7d83eb9cbaa73bb881f60b100d97193a89f31f284943f9b8b9da2dc5a file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 for_clause: - keyword: FOR - keyword: UPDATE - keyword: SKIP - keyword: LOCKED statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_high_priority.sql000066400000000000000000000000441503426445100263230ustar00rootroot00000000000000select HIGH_PRIORITY * from table1; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_high_priority.yml000066400000000000000000000015671503426445100263400ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3006519272c9151e4e617e0fbaf1af3e6f16b3e2880d360c38b8800a6cb0ca24 file: statement: select_statement: select_clause: keyword: select select_clause_modifier: keyword: HIGH_PRIORITY select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_into_dumpfile.sql000066400000000000000000000000471503426445100263040ustar00rootroot00000000000000select * into dumpfile '' from table1; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_into_dumpfile.yml000066400000000000000000000016261503426445100263120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bcd9484202e1bc4f4c1f2d4680f63be211d5619a1db600890359fc5ecf5784ce file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' into_clause: - keyword: into - keyword: dumpfile - quoted_literal: "''" from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_into_multiple_variable.sql000066400000000000000000000001501503426445100301720ustar00rootroot00000000000000select 1, @test2, _test3, 'test4', func(test5) into @test1, @test2, _test3, @test4, @test5 from table1; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_into_multiple_variable.yml000066400000000000000000000032371503426445100302050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: baea11442609720c73b4ed747cd141b03b536e9cd40ce23820dbd2a07c910f5c file: statement: select_statement: select_clause: - keyword: select - select_clause_element: numeric_literal: '1' - comma: ',' - select_clause_element: column_reference: variable: '@test2' - comma: ',' - select_clause_element: column_reference: naked_identifier: _test3 - comma: ',' - select_clause_element: quoted_literal: "'test4'" - comma: ',' - select_clause_element: function: function_name: function_name_identifier: func function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: test5 end_bracket: ) into_clause: - keyword: into - variable: '@test1' - comma: ',' - variable: '@test2' - comma: ',' - variable: _test3 - comma: ',' - variable: '@test4' - comma: ',' - variable: '@test5' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_into_outfile.sql000066400000000000000000000000471503426445100261460ustar00rootroot00000000000000select * into outfile 'a' from table1; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_into_outfile.yml000066400000000000000000000016261503426445100261540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 505911514778724197f4931c1d669f0b50165d64f5621b820845c34104536d09 file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' into_clause: - keyword: into - keyword: outfile - quoted_literal: "'a'" from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_into_outfile_fields_enclosed.sql000066400000000000000000000000761503426445100313520ustar00rootroot00000000000000select * into outfile 'a' fields enclosed by '"' from table1; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_into_outfile_fields_enclosed.yml000066400000000000000000000017731503426445100313610ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 99ffe72d2f7edba62a6d43380981f86080c389bee2cda980e8c2fb8805cbc7d1 file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' into_clause: - keyword: into - keyword: outfile - quoted_literal: "'a'" - keyword: fields - keyword: enclosed - keyword: by - quoted_literal: "'\"'" from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_into_outfile_fields_escaped.sql000066400000000000000000000000751503426445100311610ustar00rootroot00000000000000select * into outfile 'a' fields escaped by '-' from table1; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_into_outfile_fields_escaped.yml000066400000000000000000000017711503426445100311670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f7c9103219a5b85415c22c586b419fd91382c37367dfe08abb3e461ee2efa3d2 file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' into_clause: - keyword: into - keyword: outfile - quoted_literal: "'a'" - keyword: fields - keyword: escaped - keyword: by - quoted_literal: "'-'" from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_into_outfile_fields_optionally_enclosed.sql000066400000000000000000000001111503426445100336120ustar00rootroot00000000000000select * into outfile 'a' fields optionally enclosed by '"' from table1; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_into_outfile_fields_optionally_enclosed.yml000066400000000000000000000020271503426445100336240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6bd9d4b9b538b8ebebfe52323b66faed25fc48881d0183faa98539853d10503f file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' into_clause: - keyword: into - keyword: outfile - quoted_literal: "'a'" - keyword: fields - keyword: optionally - keyword: enclosed - keyword: by - quoted_literal: "'\"'" from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_into_outfile_fields_terminated.sql000066400000000000000000000001001503426445100316760ustar00rootroot00000000000000select * into outfile 'a' fields terminated by '"' from table1; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_into_outfile_fields_terminated.yml000066400000000000000000000017751503426445100317230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d5dfacac0b7fd70cddb0b5cf230f3682c824fb39d1025f11323848005c8ada5d file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' into_clause: - keyword: into - keyword: outfile - quoted_literal: "'a'" - keyword: fields - keyword: terminated - keyword: by - quoted_literal: "'\"'" from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_into_outfile_lines_starting.sql000066400000000000000000000000761503426445100312550ustar00rootroot00000000000000select * into outfile 'a' lines starting by '\n' from table1; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_into_outfile_lines_starting.yml000066400000000000000000000017731503426445100312640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a286d11eea201a33aee3a9b77876dbc4407ee4d786d5716796e77fcadb84be95 file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' into_clause: - keyword: into - keyword: outfile - quoted_literal: "'a'" - keyword: lines - keyword: starting - keyword: by - quoted_literal: "'\\n'" from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_into_outfile_lines_terminated.sql000066400000000000000000000001001503426445100315420ustar00rootroot00000000000000select * into outfile 'a' lines terminated by '\n' from table1; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_into_outfile_lines_terminated.yml000066400000000000000000000017751503426445100315670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0307c93cec46c6a3506a21fbb3f8c528694bfa2007f1d7a1ead818264363f29d file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' into_clause: - keyword: into - keyword: outfile - quoted_literal: "'a'" - keyword: lines - keyword: terminated - keyword: by - quoted_literal: "'\\n'" from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_into_session_variable.sql000066400000000000000000000001771503426445100300330ustar00rootroot00000000000000select 1 into @dumpfile from table1; SELECT name INTO @name FROM t WHERE id = 1; SELECT name FROM t WHERE id = 1 INTO @name; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_into_session_variable.yml000066400000000000000000000042541503426445100300350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3d90396fce3fe718549e0cb44f0a32d13f527711a1a3b6edddfb6f9f76d674dc file: - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' into_clause: keyword: into variable: '@dumpfile' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: name into_clause: keyword: INTO variable: '@name' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t where_clause: keyword: WHERE expression: column_reference: naked_identifier: id comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t where_clause: keyword: WHERE expression: column_reference: naked_identifier: id comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' into_clause: keyword: INTO variable: '@name' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_local_variable.sql000066400000000000000000000000161503426445100264010ustar00rootroot00000000000000select test2; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_local_variable.yml000066400000000000000000000011231503426445100264030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9af97739ef6ad17f592f5387bb4522bd1bf97e6b52ae98b460f76bf6ed3ffcdd file: statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: test2 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_lock_in_share_mode.sql000066400000000000000000000000511503426445100272450ustar00rootroot00000000000000select 1 from table1 lock in share mode; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_lock_in_share_mode.yml000066400000000000000000000015461503426445100272610ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9fbe45c2085ef72cbde8c14405f345140dc661d053add571a454c18ae3e79500 file: statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 for_clause: - keyword: lock - keyword: in - keyword: share - keyword: mode statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_multiple_partition.sql000066400000000000000000000000561503426445100273720ustar00rootroot00000000000000select * from table1 PARTITION(part1, part2); sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_multiple_partition.yml000066400000000000000000000021051503426445100273710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1fc300a18d46f88268099f98290a1499b9c66fa35604ee9cd2cdf6d278b6af14 file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 partition_clause: keyword: PARTITION bracketed: - start_bracket: ( - object_reference: naked_identifier: part1 - comma: ',' - object_reference: naked_identifier: part2 - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_partition.sql000066400000000000000000000000471503426445100254570ustar00rootroot00000000000000select * from table1 PARTITION(part1); sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_partition.yml000066400000000000000000000017601503426445100254640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c508e3cccfffae3b003ffd16e156f90f1ebed5bf0524be2a458da7a218c202c1 file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 partition_clause: keyword: PARTITION bracketed: start_bracket: ( object_reference: naked_identifier: part1 end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_session_variable.sql000066400000000000000000000000171503426445100267730ustar00rootroot00000000000000select @test2; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_session_variable.yml000066400000000000000000000011161503426445100267760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f9c4401b3123d25feb2c718e2b77f7f7af27bef874d71b33357b354614b80daa file: statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: variable: '@test2' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_sql_big_result.sql000066400000000000000000000000451503426445100264620ustar00rootroot00000000000000select SQL_BIG_RESULT * from table1; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_sql_big_result.yml000066400000000000000000000015701503426445100264700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3f4d35b2a7b796d378d6888b3b66a816977025435ad2330584bc4e0d0192c9bf file: statement: select_statement: select_clause: keyword: select select_clause_modifier: keyword: SQL_BIG_RESULT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_sql_buffer_result.sql000066400000000000000000000000501503426445100271660ustar00rootroot00000000000000select SQL_BUFFER_RESULT * from table1; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_sql_buffer_result.yml000066400000000000000000000015731503426445100272030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bebb36790002ef31a1e293fe5c53ae91060da9a684608734fd9ae851ff032493 file: statement: select_statement: select_clause: keyword: select select_clause_modifier: keyword: SQL_BUFFER_RESULT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_sql_cache.sql000066400000000000000000000000401503426445100253610ustar00rootroot00000000000000select SQL_CACHE * from table1; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_sql_cache.yml000066400000000000000000000015631503426445100253760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1ad574866e07fcfc72d05757ea620ff73184427caa3756357a5d8b13259ea4c7 file: statement: select_statement: select_clause: keyword: select select_clause_modifier: keyword: SQL_CACHE select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_sql_calc_found_rows.sql000066400000000000000000000000521503426445100274700ustar00rootroot00000000000000select SQL_CALC_FOUND_ROWS * from table1; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_sql_calc_found_rows.yml000066400000000000000000000015751503426445100275050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f50fd122c842bf90fce3cd99e66ed64121af15acd587b423c38251d0dfcaad51 file: statement: select_statement: select_clause: keyword: select select_clause_modifier: keyword: SQL_CALC_FOUND_ROWS select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_sql_no_cache.sql000066400000000000000000000000431503426445100260600ustar00rootroot00000000000000select SQL_NO_CACHE * from table1; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_sql_no_cache.yml000066400000000000000000000015661503426445100260750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6de0523b89aa7d305d9217f843e0fdd35e1e092140f7bb1d5b4d692c6247e4c5 file: statement: select_statement: select_clause: keyword: select select_clause_modifier: keyword: SQL_NO_CACHE select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_sql_small_result.sql000066400000000000000000000000471503426445100270330ustar00rootroot00000000000000select SQL_SMALL_RESULT * from table1; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_sql_small_result.yml000066400000000000000000000015721503426445100270410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e5aec50a0e01748aaba96d940e14c71dd2f03e7d27d777d58c5392e2ed3dac7e file: statement: select_statement: select_clause: keyword: select select_clause_modifier: keyword: SQL_SMALL_RESULT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_straight_join.sql000066400000000000000000000000441503426445100263070ustar00rootroot00000000000000select STRAIGHT_JOIN * from table1; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_straight_join.yml000066400000000000000000000015671503426445100263240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6b9d5f425cf93327107ccb3168505d5544de5d89cdf9b82ce91cc9eaa06831fc file: statement: select_statement: select_clause: keyword: select select_clause_modifier: keyword: STRAIGHT_JOIN select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_with_regexp.sql000066400000000000000000000000631503426445100257710ustar00rootroot00000000000000SELECT * FROM `db`.tbl WHERE col REGEXP '^[0-9]*$' sqlfluff-3.4.2/test/fixtures/dialects/mariadb/select_with_regexp.yml000066400000000000000000000020301503426445100257670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a4ac85c17fee6271dc7e9de20ce2d8943e318d543a49d9179712bce0ff30e346 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`db`' dot: . naked_identifier: tbl where_clause: keyword: WHERE expression: column_reference: naked_identifier: col keyword: REGEXP quoted_literal: "'^[0-9]*$'" sqlfluff-3.4.2/test/fixtures/dialects/mariadb/set.sql000066400000000000000000000010251503426445100226770ustar00rootroot00000000000000SET max_error_count=128; SET skip_parallel_replication=ON; SET GLOBAL max_error_count=256; SET GLOBAL innodb_sync_spin_loops=120; SET some_bool_param = ON; SET some_bool_param = OFF; SET some_bool_param = TRUE; SET some_bool_param = FALSE; SET some_bool_param = 0; SET some_bool_param = 1; SET a = 1, b = 2; SET @abc = 1 + 2; SET @abc = (SELECT 1); SET @id = (SELECT id FROM table1 WHERE field = TRUE LIMIT 1); SET @abc = 1; SET @my_var = 1; SET @my$currency = 1; SET @sha256enabled = 1; SET sql_log_bin = ON; SET sql_log_bin = OFF; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/set.yml000066400000000000000000000131701503426445100227050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ad22964072b2ca81c383e123d6db4d4c5529ed29ec5071aed19963ec3c6ad913 file: - statement: set_statement: keyword: SET variable: max_error_count comparison_operator: raw_comparison_operator: '=' numeric_literal: '128' - statement_terminator: ; - statement: set_statement: - keyword: SET - variable: skip_parallel_replication - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: GLOBAL - variable: max_error_count - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '256' - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: GLOBAL - variable: innodb_sync_spin_loops - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '120' - statement_terminator: ; - statement: set_statement: - keyword: SET - variable: some_bool_param - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - statement_terminator: ; - statement: set_statement: - keyword: SET - variable: some_bool_param - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - statement_terminator: ; - statement: set_statement: - keyword: SET - variable: some_bool_param - comparison_operator: raw_comparison_operator: '=' - keyword: 'TRUE' - statement_terminator: ; - statement: set_statement: - keyword: SET - variable: some_bool_param - comparison_operator: raw_comparison_operator: '=' - keyword: 'FALSE' - statement_terminator: ; - statement: set_statement: keyword: SET variable: some_bool_param comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' - statement_terminator: ; - statement: set_statement: keyword: SET variable: some_bool_param comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: set_statement: - keyword: SET - variable: a - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - comma: ',' - variable: b - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '2' - statement_terminator: ; - statement: set_statement: keyword: SET variable: '@abc' comparison_operator: raw_comparison_operator: '=' expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - statement_terminator: ; - statement: set_statement: keyword: SET variable: '@abc' comparison_operator: raw_comparison_operator: '=' expression: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: set_statement: keyword: SET variable: '@id' comparison_operator: raw_comparison_operator: '=' expression: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 where_clause: keyword: WHERE expression: column_reference: naked_identifier: field comparison_operator: raw_comparison_operator: '=' boolean_literal: 'TRUE' limit_clause: keyword: LIMIT numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: set_statement: keyword: SET variable: '@abc' comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: set_statement: keyword: SET variable: '@my_var' comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: set_statement: keyword: SET variable: '@my$currency' comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: set_statement: keyword: SET variable: '@sha256enabled' comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: set_statement: - keyword: SET - variable: sql_log_bin - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - statement_terminator: ; - statement: set_statement: - keyword: SET - variable: sql_log_bin - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/set_names.sql000066400000000000000000000002541503426445100240650ustar00rootroot00000000000000SET NAMES utf8mb4; SET NAMES 'utf8'; SET NAMES DEFAULT; SET NAMES ascii COLLATE ascii_bin; SET NAMES 'ascii' COLLATE 'ascii_bin'; SET NAMES ascii COLLATE 'ascii_bin'; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/set_names.yml000066400000000000000000000026621503426445100240740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2ef00c1db7596009c1fc590674dde894e304d836a5f124b3f03d818ec82ca666 file: - statement: set_names_statement: - keyword: SET - keyword: NAMES - naked_identifier: utf8mb4 - statement_terminator: ; - statement: set_names_statement: - keyword: SET - keyword: NAMES - quoted_literal: "'utf8'" - statement_terminator: ; - statement: set_names_statement: - keyword: SET - keyword: NAMES - keyword: DEFAULT - statement_terminator: ; - statement: set_names_statement: - keyword: SET - keyword: NAMES - naked_identifier: ascii - keyword: COLLATE - collation_reference: naked_identifier: ascii_bin - statement_terminator: ; - statement: set_names_statement: - keyword: SET - keyword: NAMES - quoted_literal: "'ascii'" - keyword: COLLATE - collation_reference: quoted_literal: "'ascii_bin'" - statement_terminator: ; - statement: set_names_statement: - keyword: SET - keyword: NAMES - naked_identifier: ascii - keyword: COLLATE - collation_reference: quoted_literal: "'ascii_bin'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/signal.sql000066400000000000000000000000121503426445100233540ustar00rootroot00000000000000resignal; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/signal.yml000066400000000000000000000007371503426445100233740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5486a51152d09bd7aa929d6aad8035b68a7af6d3b97581f55893f1b5b23015ff file: statement: resignal_segment: keyword: resignal statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/signal_condition_name.sql000066400000000000000000000000301503426445100264220ustar00rootroot00000000000000resignal testcondition; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/signal_condition_name.yml000066400000000000000000000010051503426445100264270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ddb0f0a16d3621ad997255a98f4989de04e3689165072077b221c139f0cf28bd file: statement: resignal_segment: keyword: resignal naked_identifier: testcondition statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/signal_condition_sqlstate.sql000066400000000000000000000000331503426445100273450ustar00rootroot00000000000000resignal sqlstate '42S02'; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/signal_condition_sqlstate.yml000066400000000000000000000010271503426445100273530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b9e5f8fee59dc88d1372330944c9d54873fa0887ccc3c70659e4f52157a87aff file: statement: resignal_segment: - keyword: resignal - keyword: sqlstate - quoted_literal: "'42S02'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/signal_condition_sqlstate_value.sql000066400000000000000000000000411503426445100305400ustar00rootroot00000000000000resignal sqlstate value '42S02'; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/signal_condition_sqlstate_value.yml000066400000000000000000000010541503426445100305470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7b6936c92a4757c398e5999d01c2144a086621ab6f1cb6003576f46ab6bacf3d file: statement: resignal_segment: - keyword: resignal - keyword: sqlstate - keyword: value - quoted_literal: "'42S02'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/signal_set_signal_info.sql000066400000000000000000000000541503426445100266050ustar00rootroot00000000000000resignal set message_text = 'test message'; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/signal_set_signal_info.yml000066400000000000000000000011651503426445100266130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 42689aa92182e2de491abf166e1e269b06ae2dbf027126ac7bdf35f04481ed44 file: statement: resignal_segment: - keyword: resignal - keyword: set - keyword: message_text - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test message'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/signal_set_signal_info_multiple.sql000066400000000000000000000001041503426445100305140ustar00rootroot00000000000000resignal set message_text = 'test message', mysql_errno = '42S500'; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/signal_set_signal_info_multiple.yml000066400000000000000000000014021503426445100305200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e0e7a0de0aa07cfecf1c1dd4411eef615077c791427559e4c0dfb6095cc0b53a file: statement: resignal_segment: - keyword: resignal - keyword: set - keyword: message_text - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test message'" - comma: ',' - keyword: mysql_errno - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'42S500'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/system_variables.sql000066400000000000000000000002441503426445100254620ustar00rootroot00000000000000SELECT @@global.time_zone; SELECT @@session.time_zone; SELECT @@global.version; SELECT @@session.rand_seed1; SELECT CONVERT_TZ(NOW(), @@global.time_zone, '+00:00') sqlfluff-3.4.2/test/fixtures/dialects/mariadb/system_variables.yml000066400000000000000000000036621503426445100254730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c58b17a9c3d4b1d3cf78b6a2333131c7a700e969cf454690e1336c7defc2c91a file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: system_variable: '@@global.time_zone' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: system_variable: '@@session.time_zone' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: system_variable: '@@global.version' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: system_variable: '@@session.rand_seed1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CONVERT_TZ function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: NOW function_contents: bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: system_variable: '@@global.time_zone' - comma: ',' - expression: quoted_literal: "'+00:00'" - end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/mariadb/update.sql000066400000000000000000000017471503426445100234010ustar00rootroot00000000000000UPDATE t1 SET col1 = col1 + 1; UPDATE t1 SET col1 = col1 + 1, col2 = col1; UPDATE items,month SET items.price=month.price WHERE items.id=month.id; UPDATE t SET id = id + 1 ORDER BY id DESC; UPDATE items SET retail = retail * 0.9 WHERE id IN (SELECT id FROM items WHERE retail / wholesale >= 1.3 AND quantity > 100); UPDATE items, (SELECT id FROM items WHERE id IN (SELECT id FROM items WHERE retail / wholesale >= 1.3 AND quantity < 100)) AS discounted SET items.retail = items.retail * 0.9 WHERE items.id = discounted.id; UPDATE items, (SELECT id, retail / wholesale AS markup, quantity FROM items) AS discounted SET items.retail = items.retail * 0.9 WHERE discounted.markup >= 1.3 AND discounted.quantity < 100 AND items.id = discounted.id; UPDATE LOW_PRIORITY foo SET bar = 7 LIMIT 4; UPDATE a, b SET a.name = b.name WHERE a.id = b.id; UPDATE a join b on a.id = b.id set a.type = b.type where a.type is null; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/update.yml000066400000000000000000000341771503426445100234060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5250205eaf35f3295d87fa6bdb962764284a844456ed7a11726746db6f2dd411 file: - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: t1 set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: col1 binary_operator: + numeric_literal: '1' - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: t1 set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: col1 binary_operator: + numeric_literal: '1' - comma: ',' - set_clause: - column_reference: naked_identifier: col2 - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: col1 - statement_terminator: ; - statement: update_statement: - keyword: UPDATE - table_reference: naked_identifier: items - comma: ',' - table_reference: naked_identifier: month - set_clause_list: keyword: SET set_clause: - column_reference: - naked_identifier: items - dot: . - naked_identifier: price - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: month - dot: . - naked_identifier: price - where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: items - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: month - dot: . - naked_identifier: id - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: t set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: id comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: id binary_operator: + numeric_literal: '1' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: id - keyword: DESC - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: items set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: retail comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: retail binary_operator: '*' numeric_literal: '0.9' where_clause: keyword: WHERE expression: column_reference: naked_identifier: id keyword: IN bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: items where_clause: keyword: WHERE expression: - column_reference: naked_identifier: retail - binary_operator: / - column_reference: naked_identifier: wholesale - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - numeric_literal: '1.3' - binary_operator: AND - column_reference: naked_identifier: quantity - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '100' end_bracket: ) - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: items comma: ',' from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: items where_clause: keyword: WHERE expression: column_reference: naked_identifier: id keyword: IN bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: items where_clause: keyword: WHERE expression: - column_reference: naked_identifier: retail - binary_operator: / - column_reference: naked_identifier: wholesale - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - numeric_literal: '1.3' - binary_operator: AND - column_reference: naked_identifier: quantity - comparison_operator: raw_comparison_operator: < - numeric_literal: '100' end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: discounted set_clause_list: keyword: SET set_clause: column_reference: - naked_identifier: items - dot: . - naked_identifier: retail comparison_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: items - dot: . - naked_identifier: retail binary_operator: '*' numeric_literal: '0.9' where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: items - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: discounted - dot: . - naked_identifier: id - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: items comma: ',' from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: retail - binary_operator: / - column_reference: naked_identifier: wholesale alias_expression: alias_operator: keyword: AS naked_identifier: markup - comma: ',' - select_clause_element: column_reference: naked_identifier: quantity from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: items end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: discounted set_clause_list: keyword: SET set_clause: column_reference: - naked_identifier: items - dot: . - naked_identifier: retail comparison_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: items - dot: . - naked_identifier: retail binary_operator: '*' numeric_literal: '0.9' where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: discounted - dot: . - naked_identifier: markup - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - numeric_literal: '1.3' - binary_operator: AND - column_reference: - naked_identifier: discounted - dot: . - naked_identifier: quantity - comparison_operator: raw_comparison_operator: < - numeric_literal: '100' - binary_operator: AND - column_reference: - naked_identifier: items - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: discounted - dot: . - naked_identifier: id - statement_terminator: ; - statement: update_statement: - keyword: UPDATE - keyword: LOW_PRIORITY - table_reference: naked_identifier: foo - set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: bar comparison_operator: raw_comparison_operator: '=' numeric_literal: '7' - limit_clause: keyword: LIMIT numeric_literal: '4' - statement_terminator: ; - statement: update_statement: - keyword: UPDATE - table_reference: naked_identifier: a - comma: ',' - table_reference: naked_identifier: b - set_clause_list: keyword: SET set_clause: - column_reference: - naked_identifier: a - dot: . - naked_identifier: name - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: b - dot: . - naked_identifier: name - where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: a - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: b - dot: . - naked_identifier: id - statement_terminator: ; - statement: update_statement: keyword: UPDATE from_expression: from_expression_element: table_expression: table_reference: naked_identifier: a join_clause: keyword: join from_expression_element: table_expression: table_reference: naked_identifier: b join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: a - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: b - dot: . - naked_identifier: id set_clause_list: keyword: set set_clause: - column_reference: - naked_identifier: a - dot: . - naked_identifier: type - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: b - dot: . - naked_identifier: type where_clause: keyword: where expression: column_reference: - naked_identifier: a - dot: . - naked_identifier: type keyword: is null_literal: 'null' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/use_database.sql000066400000000000000000000000131503426445100245200ustar00rootroot00000000000000use my_db; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/use_database.yml000066400000000000000000000010211503426445100245220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3aaf2dd630ae2dbc3dde9cead50589cf7b516507c6ed959abf06d7a0ce7ba46b file: statement: use_statement: keyword: use database_reference: naked_identifier: my_db statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/use_index.sql000066400000000000000000000000461503426445100240710ustar00rootroot00000000000000SELECT * FROM t1 test USE INDEX (i2); sqlfluff-3.4.2/test/fixtures/dialects/mariadb/use_index.yml000066400000000000000000000020761503426445100241000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1cac348257a408e8fd1127849665a5ee7ed563c0a2aa93770b2fc29857958514 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 alias_expression: naked_identifier: test index_hint_clause: - keyword: USE - keyword: INDEX - bracketed: start_bracket: ( object_reference: naked_identifier: i2 end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/use_index_for_group_by.sql000066400000000000000000000000631503426445100266440ustar00rootroot00000000000000SELECT * FROM t1 test USE INDEX FOR GROUP BY (i2); sqlfluff-3.4.2/test/fixtures/dialects/mariadb/use_index_for_group_by.yml000066400000000000000000000021761503426445100266550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d4a1185294cbe267fc87d4cd099f2f22f3c83e590e106f237d37715f4f284107 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 alias_expression: naked_identifier: test index_hint_clause: - keyword: USE - keyword: INDEX - keyword: FOR - keyword: GROUP - keyword: BY - bracketed: start_bracket: ( object_reference: naked_identifier: i2 end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/use_index_for_join.sql000066400000000000000000000000571503426445100257600ustar00rootroot00000000000000SELECT * FROM t1 test USE INDEX FOR JOIN (i2); sqlfluff-3.4.2/test/fixtures/dialects/mariadb/use_index_for_join.yml000066400000000000000000000021511503426445100257570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9a6441958ff22d6693ae703886de81600e7bb086f08a61a24ea841a6792b7d8c file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 alias_expression: naked_identifier: test index_hint_clause: - keyword: USE - keyword: INDEX - keyword: FOR - keyword: JOIN - bracketed: start_bracket: ( object_reference: naked_identifier: i2 end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/use_index_for_order_by.sql000066400000000000000000000000631503426445100266230ustar00rootroot00000000000000SELECT * FROM t1 test USE INDEX FOR ORDER BY (i2); sqlfluff-3.4.2/test/fixtures/dialects/mariadb/use_index_for_order_by.yml000066400000000000000000000021761503426445100266340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a62cf5389e97666d68013628c8769ee03126c1ef12d7f64c506d8f5ef89a464a file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 alias_expression: naked_identifier: test index_hint_clause: - keyword: USE - keyword: INDEX - keyword: FOR - keyword: ORDER - keyword: BY - bracketed: start_bracket: ( object_reference: naked_identifier: i2 end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/use_key.sql000066400000000000000000000000441503426445100235500ustar00rootroot00000000000000SELECT * FROM t1 test USE KEY (i2); sqlfluff-3.4.2/test/fixtures/dialects/mariadb/use_key.yml000066400000000000000000000020741503426445100235570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0506aa4eb10450054ebb9990ae570d8cdb46ae6b2e7292aa2dd0e96f0ec7cd29 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 alias_expression: naked_identifier: test index_hint_clause: - keyword: USE - keyword: KEY - bracketed: start_bracket: ( object_reference: naked_identifier: i2 end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/use_statement.sql000066400000000000000000000000101503426445100247550ustar00rootroot00000000000000USE db; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/use_statement.yml000066400000000000000000000010161503426445100247660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1247f4ff8e352edcb3de7913a509af396d6c2bdcdfad9481ebf698f8b319e7a4 file: statement: use_statement: keyword: USE database_reference: naked_identifier: db statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/values_statement.sql000066400000000000000000000002141503426445100254660ustar00rootroot00000000000000VALUES ROW ('a', 1), ROW ('b', 2); VALUES ROW ('a', 1), ROW (upper('b'), 2+1); VALUES ROW (CURRENT_DATE, '2020-06-04' + interval -5 day); sqlfluff-3.4.2/test/fixtures/dialects/mariadb/values_statement.yml000066400000000000000000000041301503426445100254710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 51d7d52e8904436e491ebc07912e37a2a3803e329c3f1df3f9c3e2ba9209651a file: - statement: values_clause: - keyword: VALUES - keyword: ROW - bracketed: start_bracket: ( quoted_literal: "'a'" comma: ',' numeric_literal: '1' end_bracket: ) - comma: ',' - keyword: ROW - bracketed: start_bracket: ( quoted_literal: "'b'" comma: ',' numeric_literal: '2' end_bracket: ) - statement_terminator: ; - statement: values_clause: - keyword: VALUES - keyword: ROW - bracketed: start_bracket: ( quoted_literal: "'a'" comma: ',' numeric_literal: '1' end_bracket: ) - comma: ',' - keyword: ROW - bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: upper function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'b'" end_bracket: ) - comma: ',' - expression: - numeric_literal: '2' - binary_operator: + - numeric_literal: '1' - end_bracket: ) - statement_terminator: ; - statement: values_clause: - keyword: VALUES - keyword: ROW - bracketed: - start_bracket: ( - expression: bare_function: CURRENT_DATE - comma: ',' - expression: quoted_literal: "'2020-06-04'" binary_operator: + interval_expression: keyword: interval expression: numeric_literal: sign_indicator: '-' numeric_literal: '5' date_part: day - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/variable_assignment.sql000066400000000000000000000001611503426445100261210ustar00rootroot00000000000000SELECT @var1:=COUNT(*) FROM t1; SET @var1:=0; SET @var1:=@var2:=0; UPDATE t1 SET c1 = 2 WHERE c1 = @var1:= 1; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/variable_assignment.yml000066400000000000000000000041071503426445100261270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 880ce0a8a6ed5735d5b6287673a5379103f16782534cac1816dccb8a05c8e30b file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: variable: '@var1' assignment_operator: := function: function_name: function_name_identifier: COUNT function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: set_statement: keyword: SET variable: '@var1' assignment_operator: := numeric_literal: '0' - statement_terminator: ; - statement: set_statement: keyword: SET variable: '@var1' assignment_operator: := expression: variable: '@var2' assignment_operator: := numeric_literal: '0' - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: t1 set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: c1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' where_clause: keyword: WHERE expression: column_reference: naked_identifier: c1 comparison_operator: raw_comparison_operator: '=' variable: '@var1' assignment_operator: := numeric_literal: '1' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/while_label.sql000066400000000000000000000001161503426445100243530ustar00rootroot00000000000000iteration:while _cnt <= _max_cnt do set _cnt = _cnt + 1; end while iteration; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/while_label.yml000066400000000000000000000023241503426445100243600ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: eb5d12b359790327f36cb23a2ea83afc299ff4e2662c3277729a7cfd4bc91795 file: - statement: while_statement: - naked_identifier: iteration - colon: ':' - keyword: while - expression: - column_reference: naked_identifier: _cnt - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - column_reference: naked_identifier: _max_cnt - keyword: do - statement: set_statement: keyword: set variable: _cnt comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: _cnt binary_operator: + numeric_literal: '1' - statement_terminator: ; - statement: while_statement: - keyword: end - keyword: while - naked_identifier: iteration - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/while_no_label.sql000066400000000000000000000000721503426445100250500ustar00rootroot00000000000000while _cnt <= _max_cnt do set _cnt = _cnt + 1; end while; sqlfluff-3.4.2/test/fixtures/dialects/mariadb/while_no_label.yml000066400000000000000000000021771503426445100250620ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8a5b1995fc531a7204a5cefdb23104e3ac19d6cc33e85f9b7036d80e950a8887 file: - statement: while_statement: - keyword: while - expression: - column_reference: naked_identifier: _cnt - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - column_reference: naked_identifier: _max_cnt - keyword: do - statement: set_statement: keyword: set variable: _cnt comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: _cnt binary_operator: + numeric_literal: '1' - statement_terminator: ; - statement: while_statement: - keyword: end - keyword: while - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/materialize/000077500000000000000000000000001503426445100222745ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/dialects/materialize/.sqlfluff000066400000000000000000000000411503426445100241120ustar00rootroot00000000000000[sqlfluff] dialect = materialize sqlfluff-3.4.2/test/fixtures/dialects/materialize/alter_statements.sql000066400000000000000000000015161503426445100263760ustar00rootroot00000000000000 -- Alter connection rotate keys ALTER CONNECTION test rotate keys; -- Alter default privileges ALTER DEFAULT PRIVILEGES FOR ROLE mike GRANT SELECT ON TABLES TO joe; ALTER DEFAULT PRIVILEGES FOR ALL ROLES GRANT SELECT ON TABLES TO managers; -- Alter name ALTER CONNECTION test RENAME TO test2; ALTER INDEX test RENAME TO test2; ALTER MATERIALIZED VIEW test RENAME TO test2; ALTER SOURCE test RENAME TO test2; ALTER SINK test RENAME TO test2; ALTER TABLE test RENAME TO test2; ALTER VIEW test RENAME TO test2; ALTER SECRET test RENAME TO test2; -- Alter index enable ALTER INDEX test_idx SET ENABLED; -- Alter secret value ALTER SECRET IF EXISTS name AS value; ALTER SECRET name AS value; -- Alter Sink size ALTER SOURCE IF EXISTS sink_name SET ( SIZE 'xsmall' ); -- Alter Source size ALTER SINK IF EXISTS source_name SET ( SIZE 'xsmall' ); sqlfluff-3.4.2/test/fixtures/dialects/materialize/alter_statements.yml000066400000000000000000000114031503426445100263740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d6768381740897c18bd4d77635ea8afe62219c79f093d769df8066cf13948b87 file: - statement: alter_connection_rotate_keys: - keyword: ALTER - keyword: CONNECTION - object_reference: naked_identifier: test - keyword: rotate - keyword: keys - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: ROLE - object_reference: naked_identifier: mike - keyword: GRANT - keyword: SELECT - keyword: 'ON' - keyword: TABLES - keyword: TO - object_reference: naked_identifier: joe - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: ALL - keyword: ROLES - keyword: GRANT - keyword: SELECT - keyword: 'ON' - keyword: TABLES - keyword: TO - object_reference: naked_identifier: managers - statement_terminator: ; - statement: alter_rename_statement: - keyword: ALTER - keyword: CONNECTION - object_reference: naked_identifier: test - keyword: RENAME - keyword: TO - object_reference: naked_identifier: test2 - statement_terminator: ; - statement: alter_rename_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: test - keyword: RENAME - keyword: TO - object_reference: naked_identifier: test2 - statement_terminator: ; - statement: alter_rename_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - object_reference: naked_identifier: test - keyword: RENAME - keyword: TO - object_reference: naked_identifier: test2 - statement_terminator: ; - statement: alter_rename_statement: - keyword: ALTER - keyword: SOURCE - object_reference: naked_identifier: test - keyword: RENAME - keyword: TO - object_reference: naked_identifier: test2 - statement_terminator: ; - statement: alter_rename_statement: - keyword: ALTER - keyword: SINK - object_reference: naked_identifier: test - keyword: RENAME - keyword: TO - object_reference: naked_identifier: test2 - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: test - keyword: RENAME - keyword: TO - table_reference: naked_identifier: test2 - statement_terminator: ; - statement: alter_rename_statement: - keyword: ALTER - keyword: VIEW - object_reference: naked_identifier: test - keyword: RENAME - keyword: TO - object_reference: naked_identifier: test2 - statement_terminator: ; - statement: alter_rename_statement: - keyword: ALTER - keyword: SECRET - object_reference: naked_identifier: test - keyword: RENAME - keyword: TO - object_reference: naked_identifier: test2 - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: test_idx - keyword: SET - keyword: ENABLED - statement_terminator: ; - statement: alter_secret_statement: - keyword: ALTER - keyword: SECRET - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: name - keyword: AS - word: value - statement_terminator: ; - statement: alter_secret_statement: - keyword: ALTER - keyword: SECRET - object_reference: naked_identifier: name - keyword: AS - word: value - statement_terminator: ; - statement: alter_source_sink_size_statement: - keyword: ALTER - keyword: SOURCE - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: sink_name - keyword: SET - bracketed: start_bracket: ( keyword: SIZE compression_type: "'xsmall'" end_bracket: ) - statement_terminator: ; - statement: alter_source_sink_size_statement: - keyword: ALTER - keyword: SINK - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: source_name - keyword: SET - bracketed: start_bracket: ( keyword: SIZE compression_type: "'xsmall'" end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/materialize/begin_close_statements.sql000066400000000000000000000000701503426445100275320ustar00rootroot00000000000000 BEGIN; CLOSE my_cursor; END; CLOSE CURSOR; COMMIT; sqlfluff-3.4.2/test/fixtures/dialects/materialize/begin_close_statements.yml000066400000000000000000000016451503426445100275450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0de000b9f06ec8d81805df41727cab47fcc79f2671969f20c7365d5d379ee2ae file: - statement: transaction_statement: keyword: BEGIN - statement_terminator: ; - statement: close_statement: keyword: CLOSE object_reference: naked_identifier: my_cursor - statement_terminator: ; - statement: transaction_statement: keyword: END - statement_terminator: ; - statement: close_statement: keyword: CLOSE object_reference: naked_identifier: CURSOR - statement_terminator: ; - statement: transaction_statement: keyword: COMMIT - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/materialize/copy_to_from_statements.sql000066400000000000000000000010071503426445100277610ustar00rootroot00000000000000 COPY (SELECT * FROM t1 WHERE value < 100) TO STDOUT; COPY (SELECT * FROM t1 WHERE value < 100) TO STDOUT WITH (FORMAT binary); COPY (SUBSCRIBE some_view) TO STDOUT; COPY (SUBSCRIBE some_view) TO STDOUT WITH (FORMAT binary); COPY (VALUES (1, '2'), (3, '4'), (5, '\\\t\n\rtest\\N'), (6, NULL) ORDER BY column1) TO STDOUT; COPY t FROM STDIN; COPY t FROM STDIN WITH (FORMAT CSV, DELIMITER '!', QUOTE '!'); COPY t FROM STDIN WITH (DELIMITER '|'); COPY t FROM STDIN (FORMAT CSV); COPY t FROM STDIN (DELIMITER '|'); sqlfluff-3.4.2/test/fixtures/dialects/materialize/copy_to_from_statements.yml000066400000000000000000000125021503426445100277650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 84e1fc600667de47a3321ada91cfbbd0173dafba5bd4067ca76d87af6c07b9be file: - statement: copy_to_statement: - keyword: COPY - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 where_clause: keyword: WHERE expression: column_reference: naked_identifier: value comparison_operator: raw_comparison_operator: < numeric_literal: '100' end_bracket: ) - keyword: TO - keyword: STDOUT - statement_terminator: ; - statement: copy_to_statement: - keyword: COPY - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 where_clause: keyword: WHERE expression: column_reference: naked_identifier: value comparison_operator: raw_comparison_operator: < numeric_literal: '100' end_bracket: ) - keyword: TO - keyword: STDOUT - keyword: WITH - bracketed: - start_bracket: ( - word: FORMAT - word: binary - end_bracket: ) - statement_terminator: ; - statement: copy_to_statement: - keyword: COPY - bracketed: start_bracket: ( keyword: SUBSCRIBE object_reference: naked_identifier: some_view end_bracket: ) - keyword: TO - keyword: STDOUT - statement_terminator: ; - statement: copy_to_statement: - keyword: COPY - bracketed: start_bracket: ( keyword: SUBSCRIBE object_reference: naked_identifier: some_view end_bracket: ) - keyword: TO - keyword: STDOUT - keyword: WITH - bracketed: - start_bracket: ( - word: FORMAT - word: binary - end_bracket: ) - statement_terminator: ; - statement: copy_to_statement: - keyword: COPY - bracketed: - start_bracket: ( - keyword: VALUES - bracketed: start_bracket: ( numeric_literal: '1' comma: ',' single_quote: "'2'" end_bracket: ) - comma: ',' - bracketed: start_bracket: ( numeric_literal: '3' comma: ',' single_quote: "'4'" end_bracket: ) - comma: ',' - bracketed: start_bracket: ( numeric_literal: '5' comma: ',' single_quote: "'\\\\\\t\\n\\rtest\\\\N'" end_bracket: ) - comma: ',' - bracketed: start_bracket: ( numeric_literal: '6' comma: ',' word: 'NULL' end_bracket: ) - word: ORDER - word: BY - word: column1 - end_bracket: ) - keyword: TO - keyword: STDOUT - statement_terminator: ; - statement: copy_from_statement: - keyword: COPY - object_reference: naked_identifier: t - keyword: FROM - keyword: STDIN - statement_terminator: ; - statement: copy_from_statement: - keyword: COPY - object_reference: naked_identifier: t - keyword: FROM - keyword: STDIN - keyword: WITH - bracketed: - start_bracket: ( - word: FORMAT - word: CSV - comma: ',' - word: DELIMITER - single_quote: "'!'" - comma: ',' - word: QUOTE - single_quote: "'!'" - end_bracket: ) - statement_terminator: ; - statement: copy_from_statement: - keyword: COPY - object_reference: naked_identifier: t - keyword: FROM - keyword: STDIN - keyword: WITH - bracketed: start_bracket: ( word: DELIMITER single_quote: "'|'" end_bracket: ) - statement_terminator: ; - statement: copy_from_statement: - keyword: COPY - object_reference: naked_identifier: t - keyword: FROM - keyword: STDIN - bracketed: - start_bracket: ( - word: FORMAT - word: CSV - end_bracket: ) - statement_terminator: ; - statement: copy_from_statement: - keyword: COPY - object_reference: naked_identifier: t - keyword: FROM - keyword: STDIN - bracketed: start_bracket: ( word: DELIMITER single_quote: "'|'" end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/materialize/create_cluster_replica_statements.sql000066400000000000000000000007341503426445100317730ustar00rootroot00000000000000 CREATE CLUSTER mz_joe REPLICAS (r1 (size '1')); CREATE CLUSTER mz_joe REPLICAS (r1 (size '1'), r2 (size '1')); CREATE CLUSTER c1 SIZE = 'medium', REPLICATION FACTOR = 2; CREATE CLUSTER c SIZE = 'xsmall', INTROSPECTION INTERVAL = 0; CREATE CLUSTER c1 SIZE 'xsmall', REPLICATION FACTOR = 0; CREATE CLUSTER REPLICA default.size_1 SIZE 'large'; CREATE CLUSTER REPLICA c1.r1 SIZE = 'medium'; CREATE CLUSTER REPLICA default.replica AVAILABILITY ZONE 'a', AVAILABILITY ZONE 'b'; sqlfluff-3.4.2/test/fixtures/dialects/materialize/create_cluster_replica_statements.yml000066400000000000000000000063171503426445100320000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 72d7560a2b9989efd3cc3346918c571f1c4ce49d86ee6353d81f12fe4054cd93 file: - statement: create_cluster_statement: - keyword: CREATE - keyword: CLUSTER - object_reference: naked_identifier: mz_joe - keyword: REPLICAS - bracketed: start_bracket: ( word: r1 bracketed: start_bracket: ( word: size single_quote: "'1'" end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_cluster_statement: - keyword: CREATE - keyword: CLUSTER - object_reference: naked_identifier: mz_joe - keyword: REPLICAS - bracketed: - start_bracket: ( - word: r1 - bracketed: start_bracket: ( word: size single_quote: "'1'" end_bracket: ) - comma: ',' - word: r2 - bracketed: start_bracket: ( word: size single_quote: "'1'" end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_cluster_statement: - keyword: CREATE - keyword: CLUSTER - object_reference: naked_identifier: c1 - word: SIZE - equals: '=' - single_quote: "'medium'" - comma: ',' - word: REPLICATION - word: FACTOR - equals: '=' - numeric_literal: '2' - statement_terminator: ; - statement: create_cluster_statement: - keyword: CREATE - keyword: CLUSTER - object_reference: naked_identifier: c - word: SIZE - equals: '=' - single_quote: "'xsmall'" - comma: ',' - word: INTROSPECTION - word: INTERVAL - equals: '=' - numeric_literal: '0' - statement_terminator: ; - statement: create_cluster_statement: - keyword: CREATE - keyword: CLUSTER - object_reference: naked_identifier: c1 - word: SIZE - single_quote: "'xsmall'" - comma: ',' - word: REPLICATION - word: FACTOR - equals: '=' - numeric_literal: '0' - statement_terminator: ; - statement: create_cluster_statement: - keyword: CREATE - keyword: CLUSTER - object_reference: naked_identifier: REPLICA - word: default - dot: . - word: size_1 - word: SIZE - single_quote: "'large'" - statement_terminator: ; - statement: create_cluster_statement: - keyword: CREATE - keyword: CLUSTER - object_reference: naked_identifier: REPLICA - word: c1 - dot: . - word: r1 - word: SIZE - equals: '=' - single_quote: "'medium'" - statement_terminator: ; - statement: create_cluster_statement: - keyword: CREATE - keyword: CLUSTER - object_reference: naked_identifier: REPLICA - word: default - dot: . - word: replica - word: AVAILABILITY - word: ZONE - single_quote: "'a'" - comma: ',' - word: AVAILABILITY - word: ZONE - single_quote: "'b'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/materialize/create_connection_statement.sql000066400000000000000000000031261503426445100305650ustar00rootroot00000000000000 CREATE SECRET IF NOT EXISTS name AS value; CREATE SECRET name AS value; CREATE CONNECTION privatelink_svc TO AWS PRIVATELINK ( SERVICE NAME 'com.amazonaws.vpce.us-east-1.vpce-svc-0e123abc123198abc', AVAILABILITY ZONES ('use1-az1', 'use1-az4') ); CREATE CONNECTION csr_ssl TO CONFLUENT SCHEMA REGISTRY ( URL 'https://rp-f00000bar.data.vectorized.cloud:30993', SSL KEY = SECRET csr_ssl_key, SSL CERTIFICATE = SECRET csr_ssl_crt, USERNAME = 'foo', PASSWORD = SECRET csr_password ); CREATE CONNECTION privatelink_svc TO AWS PRIVATELINK ( SERVICE NAME 'com.amazonaws.vpce.us-east-1.vpce-svc-0e123abc123198abc', AVAILABILITY ZONES ('use1-az1', 'use1-az4') ); CREATE CONNECTION csr_privatelink TO CONFLUENT SCHEMA REGISTRY ( URL 'http://my-confluent-schema-registry:8081', AWS PRIVATELINK privatelink_svc ); CREATE CONNECTION kafka_connection TO KAFKA ( BROKER 'rp-f00000bar.data.vectorized.cloud:30365', SSL KEY = SECRET kafka_ssl_key, SSL CERTIFICATE = SECRET kafka_ssl_crt ); CREATE CONNECTION kafka_connection TO KAFKA ( BROKERS ('broker1:9092', 'broker2:9092') ); CREATE CONNECTION pg_connection TO POSTGRES ( HOST 'instance.foo000.us-west-1.rds.amazonaws.com', PORT 5432, USER 'postgres', PASSWORD SECRET pgpass, SSL MODE 'require', DATABASE 'postgres' ); CREATE CONNECTION tunnel TO SSH TUNNEL ( HOST 'bastion-host', PORT 22, USER 'materialize', ); CREATE CONNECTION pg_connection TO POSTGRES ( HOST 'instance.foo000.us-west-1.rds.amazonaws.com', PORT 5432, SSH TUNNEL tunnel, DATABASE 'postgres' ); sqlfluff-3.4.2/test/fixtures/dialects/materialize/create_connection_statement.yml000066400000000000000000000147341503426445100305760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0c7d732ae2c0a2f6008f208779232b37be772e259e75bc46f69e8f16968ef08c file: - statement: create_secret_statement: - keyword: CREATE - keyword: SECRET - keyword: IF - keyword: NOT - keyword: EXISTS - object_reference: naked_identifier: name - keyword: AS - word: value - statement_terminator: ; - statement: create_secret_statement: - keyword: CREATE - keyword: SECRET - object_reference: naked_identifier: name - keyword: AS - word: value - statement_terminator: ; - statement: create_connection_statement: - keyword: CREATE - keyword: CONNECTION - object_reference: naked_identifier: privatelink_svc - keyword: TO - keyword: AWS - keyword: PRIVATELINK - bracketed: - start_bracket: ( - word: SERVICE - word: NAME - single_quote: "'com.amazonaws.vpce.us-east-1.vpce-svc-0e123abc123198abc'" - comma: ',' - word: AVAILABILITY - word: ZONES - bracketed: - start_bracket: ( - single_quote: "'use1-az1'" - comma: ',' - single_quote: "'use1-az4'" - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_connection_statement: - keyword: CREATE - keyword: CONNECTION - object_reference: naked_identifier: csr_ssl - keyword: TO - keyword: CONFLUENT - keyword: SCHEMA - keyword: REGISTRY - bracketed: - start_bracket: ( - word: URL - single_quote: "'https://rp-f00000bar.data.vectorized.cloud:30993'" - comma: ',' - word: SSL - word: KEY - equals: '=' - word: SECRET - word: csr_ssl_key - comma: ',' - word: SSL - word: CERTIFICATE - equals: '=' - word: SECRET - word: csr_ssl_crt - comma: ',' - word: USERNAME - equals: '=' - single_quote: "'foo'" - comma: ',' - word: PASSWORD - equals: '=' - word: SECRET - word: csr_password - end_bracket: ) - statement_terminator: ; - statement: create_connection_statement: - keyword: CREATE - keyword: CONNECTION - object_reference: naked_identifier: privatelink_svc - keyword: TO - keyword: AWS - keyword: PRIVATELINK - bracketed: - start_bracket: ( - word: SERVICE - word: NAME - single_quote: "'com.amazonaws.vpce.us-east-1.vpce-svc-0e123abc123198abc'" - comma: ',' - word: AVAILABILITY - word: ZONES - bracketed: - start_bracket: ( - single_quote: "'use1-az1'" - comma: ',' - single_quote: "'use1-az4'" - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_connection_statement: - keyword: CREATE - keyword: CONNECTION - object_reference: naked_identifier: csr_privatelink - keyword: TO - keyword: CONFLUENT - keyword: SCHEMA - keyword: REGISTRY - bracketed: - start_bracket: ( - word: URL - single_quote: "'http://my-confluent-schema-registry:8081'" - comma: ',' - word: AWS - word: PRIVATELINK - word: privatelink_svc - end_bracket: ) - statement_terminator: ; - statement: create_connection_statement: - keyword: CREATE - keyword: CONNECTION - object_reference: naked_identifier: kafka_connection - keyword: TO - keyword: KAFKA - bracketed: - start_bracket: ( - word: BROKER - single_quote: "'rp-f00000bar.data.vectorized.cloud:30365'" - comma: ',' - word: SSL - word: KEY - equals: '=' - word: SECRET - word: kafka_ssl_key - comma: ',' - word: SSL - word: CERTIFICATE - equals: '=' - word: SECRET - word: kafka_ssl_crt - end_bracket: ) - statement_terminator: ; - statement: create_connection_statement: - keyword: CREATE - keyword: CONNECTION - object_reference: naked_identifier: kafka_connection - keyword: TO - keyword: KAFKA - bracketed: start_bracket: ( word: BROKERS bracketed: - start_bracket: ( - single_quote: "'broker1:9092'" - comma: ',' - single_quote: "'broker2:9092'" - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_connection_statement: - keyword: CREATE - keyword: CONNECTION - object_reference: naked_identifier: pg_connection - keyword: TO - keyword: POSTGRES - bracketed: - start_bracket: ( - word: HOST - single_quote: "'instance.foo000.us-west-1.rds.amazonaws.com'" - comma: ',' - word: PORT - numeric_literal: '5432' - comma: ',' - word: USER - single_quote: "'postgres'" - comma: ',' - word: PASSWORD - word: SECRET - word: pgpass - comma: ',' - word: SSL - word: MODE - single_quote: "'require'" - comma: ',' - word: DATABASE - single_quote: "'postgres'" - end_bracket: ) - statement_terminator: ; - statement: create_connection_statement: - keyword: CREATE - keyword: CONNECTION - object_reference: naked_identifier: tunnel - keyword: TO - keyword: SSH - keyword: TUNNEL - bracketed: - start_bracket: ( - word: HOST - single_quote: "'bastion-host'" - comma: ',' - word: PORT - numeric_literal: '22' - comma: ',' - word: USER - single_quote: "'materialize'" - comma: ',' - end_bracket: ) - statement_terminator: ; - statement: create_connection_statement: - keyword: CREATE - keyword: CONNECTION - object_reference: naked_identifier: pg_connection - keyword: TO - keyword: POSTGRES - bracketed: - start_bracket: ( - word: HOST - single_quote: "'instance.foo000.us-west-1.rds.amazonaws.com'" - comma: ',' - word: PORT - numeric_literal: '5432' - comma: ',' - word: SSH - word: TUNNEL - word: tunnel - comma: ',' - word: DATABASE - single_quote: "'postgres'" - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/materialize/create_index.sql000066400000000000000000000003001503426445100254400ustar00rootroot00000000000000 CREATE INDEX active_customers_geo_idx ON active_customers (geo_id); CREATE INDEX active_customers_exp_idx ON active_customers (upper(guid)); CREATE INDEX i2 IN CLUSTER cluster2 ON t1 (f1); sqlfluff-3.4.2/test/fixtures/dialects/materialize/create_index.yml000066400000000000000000000031231503426445100254500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0d5e8e3fb676ceada06a3127eb1fc1be40b9896c5b0f0dd82f01043d3ee2f6d3 file: - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - object_reference: naked_identifier: active_customers_geo_idx - keyword: 'ON' - object_reference: naked_identifier: active_customers - bracketed: start_bracket: ( word: geo_id end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - object_reference: naked_identifier: active_customers_exp_idx - keyword: 'ON' - object_reference: naked_identifier: active_customers - bracketed: start_bracket: ( word: upper bracketed: start_bracket: ( word: guid end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - object_reference: naked_identifier: i2 - keyword: IN - keyword: CLUSTER - object_reference: naked_identifier: cluster2 - keyword: 'ON' - object_reference: naked_identifier: t1 - bracketed: start_bracket: ( word: f1 end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/materialize/create_sink_statements.sql000066400000000000000000000010151503426445100275500ustar00rootroot00000000000000CREATE SINK quotes_sink FROM quotes INTO KAFKA CONNECTION kafka_connection (TOPIC 'quotes-sink') FORMAT JSON ENVELOPE DEBEZIUM WITH (SIZE = '3xsmall'); CREATE SINK frank_quotes_sink FROM frank_quotes INTO KAFKA CONNECTION kafka_connection (TOPIC 'frank-quotes-sink') FORMAT JSON ENVELOPE DEBEZIUM WITH (SIZE = '3xsmall'); CREATE SINK frank_quotes_cluster IN CLUSTER my_cluster FROM frank_quotes INTO KAFKA CONNECTION kafka_connection (TOPIC 'frank-quotes-sink') FORMAT JSON ENVELOPE DEBEZIUM; sqlfluff-3.4.2/test/fixtures/dialects/materialize/create_sink_statements.yml000066400000000000000000000046551503426445100275670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 454f298dd31e8681e994d0a8f8da924757e59347a6468218e1b173feb5fdcd36 file: - statement: create_sink_kafka_statement: - keyword: CREATE - keyword: SINK - object_reference: naked_identifier: quotes_sink - keyword: FROM - object_reference: naked_identifier: quotes - keyword: INTO - word: KAFKA - word: CONNECTION - word: kafka_connection - bracketed: start_bracket: ( word: TOPIC single_quote: "'quotes-sink'" end_bracket: ) - word: FORMAT - word: JSON - word: ENVELOPE - word: DEBEZIUM - word: WITH - bracketed: start_bracket: ( word: SIZE equals: '=' single_quote: "'3xsmall'" end_bracket: ) - statement_terminator: ; - statement: create_sink_kafka_statement: - keyword: CREATE - keyword: SINK - object_reference: naked_identifier: frank_quotes_sink - keyword: FROM - object_reference: naked_identifier: frank_quotes - keyword: INTO - word: KAFKA - word: CONNECTION - word: kafka_connection - bracketed: start_bracket: ( word: TOPIC single_quote: "'frank-quotes-sink'" end_bracket: ) - word: FORMAT - word: JSON - word: ENVELOPE - word: DEBEZIUM - word: WITH - bracketed: start_bracket: ( word: SIZE equals: '=' single_quote: "'3xsmall'" end_bracket: ) - statement_terminator: ; - statement: create_sink_kafka_statement: - keyword: CREATE - keyword: SINK - object_reference: naked_identifier: frank_quotes_cluster - keyword: IN - keyword: CLUSTER - object_reference: naked_identifier: my_cluster - keyword: FROM - object_reference: naked_identifier: frank_quotes - keyword: INTO - word: KAFKA - word: CONNECTION - word: kafka_connection - bracketed: start_bracket: ( word: TOPIC single_quote: "'frank-quotes-sink'" end_bracket: ) - word: FORMAT - word: JSON - word: ENVELOPE - word: DEBEZIUM - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/materialize/create_source_statements.sql000066400000000000000000000057411503426445100301160ustar00rootroot00000000000000 CREATE SOURCE avro_source FROM KAFKA CONNECTION kafka_connection (TOPIC 'test_topic') FORMAT AVRO USING CONFLUENT SCHEMA REGISTRY CONNECTION csr_connection WITH (SIZE = '3xsmall'); CREATE VIEW jsonified_kafka_source AS SELECT data->>'field1' AS field_1, data->>'field2' AS field_2, data->>'field3' AS field_3 FROM (SELECT CONVERT_FROM(data, 'utf8')::jsonb AS data FROM json_source); CREATE SOURCE proto_source FROM KAFKA CONNECTION kafka_connection (TOPIC 'test_topic') FORMAT PROTOBUF USING CONFLUENT SCHEMA REGISTRY CONNECTION csr_connection WITH (SIZE = '3xsmall'); CREATE SOURCE text_source FROM KAFKA CONNECTION kafka_connection (TOPIC 'test_topic') FORMAT TEXT ENVELOPE UPSERT WITH (SIZE = '3xsmall'); CREATE SOURCE csv_source (col_foo, col_bar, col_baz) FROM KAFKA CONNECTION kafka_connection (TOPIC 'test_topic') FORMAT CSV WITH 3 COLUMNS WITH (SIZE = '3xsmall'); CREATE SOURCE auction_house FROM LOAD GENERATOR AUCTION FOR ALL TABLES WITH (SIZE = '3xsmall'); CREATE SOURCE marketing FROM LOAD GENERATOR MARKETING (SCALE FACTOR 1) FOR ALL TABLES WITH (SIZE = '3xsmall'); CREATE SOURCE marketing IN CLUSTER my_cluster FROM LOAD GENERATOR MARKETING FOR ALL TABLES; CREATE SOURCE tpch FROM LOAD GENERATOR TPCH (SCALE FACTOR 1) FOR ALL TABLES WITH (SIZE = '3xsmall'); CREATE SOURCE counter FROM LOAD GENERATOR COUNTER WITH (SIZE = '3xsmall'); CREATE SOURCE mz_source FROM POSTGRES CONNECTION pg_connection (PUBLICATION 'mz_source') FOR ALL TABLES WITH (SIZE = '3xsmall'); CREATE SOURCE mz_source IN CLUSTER my_cluster FROM POSTGRES CONNECTION pg_connection (PUBLICATION 'mz_source') FOR ALL TABLES; CREATE SOURCE mz_source FROM POSTGRES CONNECTION pg_connection (PUBLICATION 'mz_source') FOR TABLES (table_1, table_2 AS alias_table_2) WITH (SIZE = '3xsmall'); CREATE SOURCE mz_source FROM POSTGRES CONNECTION pg_connection ( PUBLICATION 'mz_source', TEXT COLUMNS (table.column_of_unsupported_type) ) FOR ALL TABLES WITH (SIZE = '3xsmall'); CREATE SOURCE mz_source FROM POSTGRES CONNECTION pg_connection (PUBLICATION 'mz_source') WITH (SIZE = '3xsmall'); CREATE SOURCE my_webhook_source IN CLUSTER my_cluster FROM WEBHOOK BODY FORMAT JSON INCLUDE HEADERS ( NOT 'authorization', NOT 'x-api-key' ); CREATE SOURCE my_webhook_source IN CLUSTER my_cluster FROM WEBHOOK BODY FORMAT JSON CHECK ( WITH ( HEADERS, BODY AS request_body, SECRET my_webhook_shared_secret ) decode(headers->'x-signature', 'base64') = hmac(request_body, my_webhook_shared_secret, 'sha256') ); CREATE SOURCE webhook_with_basic_auth IN CLUSTER my_cluster FROM WEBHOOK BODY FORMAT JSON CHECK ( WITH ( HEADERS, BODY AS request_body, SECRET BASIC_HOOK_AUTH ) headers->'authorization' = BASIC_HOOK_AUTH ); CREATE TYPE type_name AS ( field_name field_type , field_name field_type ); CREATE TYPE row_type AS (a int, b text); CREATE TYPE nested_row_type AS (a row_type, b float8); sqlfluff-3.4.2/test/fixtures/dialects/materialize/create_source_statements.yml000066400000000000000000000413111503426445100301110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 83624626813f5a9ac5b981a054f749927531caac54b3f71cf1231a08b21b7300 file: - statement: create_source_kafka_statement: - keyword: CREATE - keyword: SOURCE - object_reference: naked_identifier: avro_source - keyword: FROM - keyword: KAFKA - keyword: CONNECTION - object_reference: naked_identifier: kafka_connection - bracketed: start_bracket: ( word: TOPIC single_quote: "'test_topic'" end_bracket: ) - keyword: FORMAT - word: AVRO - word: USING - word: CONFLUENT - word: SCHEMA - word: REGISTRY - word: CONNECTION - word: csr_connection - word: WITH - bracketed: start_bracket: ( word: SIZE equals: '=' single_quote: "'3xsmall'" end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - object_reference: naked_identifier: jsonified_kafka_source - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: column_reference: naked_identifier: data binary_operator: ->> quoted_literal: "'field1'" alias_expression: alias_operator: keyword: AS naked_identifier: field_1 - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: data binary_operator: ->> quoted_literal: "'field2'" alias_expression: alias_operator: keyword: AS naked_identifier: field_2 - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: data binary_operator: ->> quoted_literal: "'field3'" alias_expression: alias_operator: keyword: AS naked_identifier: field_3 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: function: function_name: function_name_identifier: CONVERT_FROM function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: data - comma: ',' - expression: quoted_literal: "'utf8'" - end_bracket: ) casting_operator: '::' data_type: keyword: jsonb alias_expression: alias_operator: keyword: AS naked_identifier: data from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: json_source end_bracket: ) - statement_terminator: ; - statement: create_source_kafka_statement: - keyword: CREATE - keyword: SOURCE - object_reference: naked_identifier: proto_source - keyword: FROM - keyword: KAFKA - keyword: CONNECTION - object_reference: naked_identifier: kafka_connection - bracketed: start_bracket: ( word: TOPIC single_quote: "'test_topic'" end_bracket: ) - keyword: FORMAT - word: PROTOBUF - word: USING - word: CONFLUENT - word: SCHEMA - word: REGISTRY - word: CONNECTION - word: csr_connection - word: WITH - bracketed: start_bracket: ( word: SIZE equals: '=' single_quote: "'3xsmall'" end_bracket: ) - statement_terminator: ; - statement: create_source_kafka_statement: - keyword: CREATE - keyword: SOURCE - object_reference: naked_identifier: text_source - keyword: FROM - keyword: KAFKA - keyword: CONNECTION - object_reference: naked_identifier: kafka_connection - bracketed: start_bracket: ( word: TOPIC single_quote: "'test_topic'" end_bracket: ) - keyword: FORMAT - word: TEXT - word: ENVELOPE - word: UPSERT - word: WITH - bracketed: start_bracket: ( word: SIZE equals: '=' single_quote: "'3xsmall'" end_bracket: ) - statement_terminator: ; - statement: create_source_kafka_statement: - keyword: CREATE - keyword: SOURCE - object_reference: naked_identifier: csv_source - bracketed: - start_bracket: ( - column_reference: naked_identifier: col_foo - comma: ',' - column_reference: naked_identifier: col_bar - comma: ',' - column_reference: naked_identifier: col_baz - end_bracket: ) - keyword: FROM - keyword: KAFKA - keyword: CONNECTION - object_reference: naked_identifier: kafka_connection - bracketed: start_bracket: ( word: TOPIC single_quote: "'test_topic'" end_bracket: ) - keyword: FORMAT - word: CSV - word: WITH - numeric_literal: '3' - word: COLUMNS - word: WITH - bracketed: start_bracket: ( word: SIZE equals: '=' single_quote: "'3xsmall'" end_bracket: ) - statement_terminator: ; - statement: create_source_load_generator_statement: - keyword: CREATE - keyword: SOURCE - object_reference: naked_identifier: auction_house - keyword: FROM - keyword: LOAD - keyword: GENERATOR - keyword: AUCTION - keyword: FOR - keyword: ALL - keyword: TABLES - keyword: WITH - bracketed: start_bracket: ( word: SIZE equals: '=' single_quote: "'3xsmall'" end_bracket: ) - statement_terminator: ; - statement: create_source_load_generator_statement: - keyword: CREATE - keyword: SOURCE - object_reference: naked_identifier: marketing - keyword: FROM - keyword: LOAD - keyword: GENERATOR - keyword: MARKETING - bracketed: - start_bracket: ( - word: SCALE - word: FACTOR - numeric_literal: '1' - end_bracket: ) - keyword: FOR - keyword: ALL - keyword: TABLES - keyword: WITH - bracketed: start_bracket: ( word: SIZE equals: '=' single_quote: "'3xsmall'" end_bracket: ) - statement_terminator: ; - statement: create_source_load_generator_statement: - keyword: CREATE - keyword: SOURCE - object_reference: naked_identifier: marketing - keyword: IN - keyword: CLUSTER - object_reference: naked_identifier: my_cluster - keyword: FROM - keyword: LOAD - keyword: GENERATOR - keyword: MARKETING - keyword: FOR - keyword: ALL - keyword: TABLES - statement_terminator: ; - statement: create_source_load_generator_statement: - keyword: CREATE - keyword: SOURCE - object_reference: naked_identifier: tpch - keyword: FROM - keyword: LOAD - keyword: GENERATOR - keyword: TPCH - bracketed: - start_bracket: ( - word: SCALE - word: FACTOR - numeric_literal: '1' - end_bracket: ) - keyword: FOR - keyword: ALL - keyword: TABLES - keyword: WITH - bracketed: start_bracket: ( word: SIZE equals: '=' single_quote: "'3xsmall'" end_bracket: ) - statement_terminator: ; - statement: create_source_load_generator_statement: - keyword: CREATE - keyword: SOURCE - object_reference: naked_identifier: counter - keyword: FROM - keyword: LOAD - keyword: GENERATOR - keyword: COUNTER - keyword: WITH - bracketed: start_bracket: ( word: SIZE equals: '=' single_quote: "'3xsmall'" end_bracket: ) - statement_terminator: ; - statement: create_source_postgres_statement: - keyword: CREATE - keyword: SOURCE - object_reference: naked_identifier: mz_source - keyword: FROM - keyword: POSTGRES - keyword: CONNECTION - object_reference: naked_identifier: pg_connection - bracketed: start_bracket: ( word: PUBLICATION single_quote: "'mz_source'" end_bracket: ) - keyword: FOR - keyword: ALL - keyword: TABLES - keyword: WITH - bracketed: start_bracket: ( word: SIZE equals: '=' single_quote: "'3xsmall'" end_bracket: ) - statement_terminator: ; - statement: create_source_postgres_statement: - keyword: CREATE - keyword: SOURCE - object_reference: naked_identifier: mz_source - keyword: IN - keyword: CLUSTER - object_reference: naked_identifier: my_cluster - keyword: FROM - keyword: POSTGRES - keyword: CONNECTION - object_reference: naked_identifier: pg_connection - bracketed: start_bracket: ( word: PUBLICATION single_quote: "'mz_source'" end_bracket: ) - keyword: FOR - keyword: ALL - keyword: TABLES - statement_terminator: ; - statement: create_source_postgres_statement: - keyword: CREATE - keyword: SOURCE - object_reference: naked_identifier: mz_source - keyword: FROM - keyword: POSTGRES - keyword: CONNECTION - object_reference: naked_identifier: pg_connection - bracketed: start_bracket: ( word: PUBLICATION single_quote: "'mz_source'" end_bracket: ) - keyword: FOR - keyword: TABLES - bracketed: - start_bracket: ( - word: table_1 - comma: ',' - word: table_2 - word: AS - word: alias_table_2 - end_bracket: ) - keyword: WITH - bracketed: start_bracket: ( word: SIZE equals: '=' single_quote: "'3xsmall'" end_bracket: ) - statement_terminator: ; - statement: create_source_postgres_statement: - keyword: CREATE - keyword: SOURCE - object_reference: naked_identifier: mz_source - keyword: FROM - keyword: POSTGRES - keyword: CONNECTION - object_reference: naked_identifier: pg_connection - bracketed: - start_bracket: ( - word: PUBLICATION - single_quote: "'mz_source'" - comma: ',' - word: TEXT - word: COLUMNS - bracketed: - start_bracket: ( - word: table - dot: . - word: column_of_unsupported_type - end_bracket: ) - end_bracket: ) - keyword: FOR - keyword: ALL - keyword: TABLES - keyword: WITH - bracketed: start_bracket: ( word: SIZE equals: '=' single_quote: "'3xsmall'" end_bracket: ) - statement_terminator: ; - statement: create_source_postgres_statement: - keyword: CREATE - keyword: SOURCE - object_reference: naked_identifier: mz_source - keyword: FROM - keyword: POSTGRES - keyword: CONNECTION - object_reference: naked_identifier: pg_connection - bracketed: start_bracket: ( word: PUBLICATION single_quote: "'mz_source'" end_bracket: ) - keyword: WITH - bracketed: start_bracket: ( word: SIZE equals: '=' single_quote: "'3xsmall'" end_bracket: ) - statement_terminator: ; - statement: create_source_load_generator_statement: - keyword: CREATE - keyword: SOURCE - object_reference: naked_identifier: my_webhook_source - keyword: IN - keyword: CLUSTER - object_reference: naked_identifier: my_cluster - keyword: FROM - keyword: WEBHOOK - keyword: BODY - keyword: FORMAT - keyword: JSON - keyword: INCLUDE - keyword: HEADERS - bracketed: - start_bracket: ( - word: NOT - single_quote: "'authorization'" - comma: ',' - word: NOT - single_quote: "'x-api-key'" - end_bracket: ) - statement_terminator: ; - statement: create_source_load_generator_statement: - keyword: CREATE - keyword: SOURCE - object_reference: naked_identifier: my_webhook_source - keyword: IN - keyword: CLUSTER - object_reference: naked_identifier: my_cluster - keyword: FROM - keyword: WEBHOOK - keyword: BODY - keyword: FORMAT - keyword: JSON - word: CHECK - bracketed: - start_bracket: ( - word: WITH - bracketed: - start_bracket: ( - word: HEADERS - comma: ',' - word: BODY - word: AS - word: request_body - comma: ',' - word: SECRET - word: my_webhook_shared_secret - end_bracket: ) - word: decode - bracketed: - start_bracket: ( - word: headers - json_operator: -> - single_quote: "'x-signature'" - comma: ',' - single_quote: "'base64'" - end_bracket: ) - equals: '=' - word: hmac - bracketed: - start_bracket: ( - word: request_body - comma: ',' - word: my_webhook_shared_secret - comma: ',' - single_quote: "'sha256'" - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_source_load_generator_statement: - keyword: CREATE - keyword: SOURCE - object_reference: naked_identifier: webhook_with_basic_auth - keyword: IN - keyword: CLUSTER - object_reference: naked_identifier: my_cluster - keyword: FROM - keyword: WEBHOOK - keyword: BODY - keyword: FORMAT - keyword: JSON - word: CHECK - bracketed: - start_bracket: ( - word: WITH - bracketed: - start_bracket: ( - word: HEADERS - comma: ',' - word: BODY - word: AS - word: request_body - comma: ',' - word: SECRET - word: BASIC_HOOK_AUTH - end_bracket: ) - word: headers - json_operator: -> - single_quote: "'authorization'" - equals: '=' - word: BASIC_HOOK_AUTH - end_bracket: ) - statement_terminator: ; - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - object_reference: naked_identifier: type_name - keyword: AS - bracketed: - start_bracket: ( - object_reference: naked_identifier: field_name - data_type: data_type_identifier: field_type - comma: ',' - object_reference: naked_identifier: field_name - data_type: data_type_identifier: field_type - end_bracket: ) - statement_terminator: ; - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - object_reference: naked_identifier: row_type - keyword: AS - bracketed: - start_bracket: ( - object_reference: naked_identifier: a - data_type: keyword: int - comma: ',' - object_reference: naked_identifier: b - data_type: keyword: text - end_bracket: ) - statement_terminator: ; - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - object_reference: naked_identifier: nested_row_type - keyword: AS - bracketed: - start_bracket: ( - object_reference: naked_identifier: a - data_type: data_type_identifier: row_type - comma: ',' - object_reference: naked_identifier: b - data_type: keyword: float8 - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/materialize/create_views.sql000066400000000000000000000016021503426445100254740ustar00rootroot00000000000000 CREATE MATERIALIZED VIEW "test"."test" AS SELECT 1 AS "id"; CREATE VIEW "test"."test" AS SELECT 1 AS "id"; CREATE MATERIALIZED VIEW "test"."test" AS SELECT '{"a": 1}'::json AS "id"; CREATE MATERIALIZED VIEW active_customer_per_geo AS SELECT geo.name, count(*) FROM geo_regions AS geo JOIN active_customers ON active_customers.geo_id = geo.id GROUP BY geo.name; CREATE MATERIALIZED VIEW active_customers AS SELECT guid, geo_id, last_active_on FROM customer_source GROUP BY geo_id; CREATE VIEW purchase_sum_by_region AS SELECT sum(purchase.amount) AS region_sum, region.id AS region_id FROM region INNER JOIN user ON region.id = user.region_id INNER JOIN purchase ON purchase.user_id = user.id GROUP BY region.id; CREATE TEMP VIEW "test"."test" AS SELECT 1 AS "id"; CREATE TEMPORARY TABLE t (a int, b text NOT NULL); sqlfluff-3.4.2/test/fixtures/dialects/materialize/create_views.yml000066400000000000000000000154211503426445100255020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e20bdbd6051c0913632b730da28fc70cead8de08bb90922f1d1dddfc221dbb32 file: - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - object_reference: - quoted_identifier: '"test"' - dot: . - quoted_identifier: '"test"' - keyword: AS - word: SELECT - numeric_literal: '1' - word: AS - double_quote: '"id"' - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - object_reference: - quoted_identifier: '"test"' - dot: . - quoted_identifier: '"test"' - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: AS quoted_identifier: '"id"' - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - object_reference: - quoted_identifier: '"test"' - dot: . - quoted_identifier: '"test"' - keyword: AS - word: SELECT - single_quote: "'{\"a\": 1}'" - casting_operator: '::' - word: json - word: AS - double_quote: '"id"' - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - object_reference: naked_identifier: active_customer_per_geo - keyword: AS - word: SELECT - word: geo - dot: . - word: name - comma: ',' - word: count - bracketed: start_bracket: ( star: '*' end_bracket: ) - word: FROM - word: geo_regions - word: AS - word: geo - word: JOIN - word: active_customers - word: 'ON' - word: active_customers - dot: . - word: geo_id - equals: '=' - word: geo - dot: . - word: id - word: GROUP - word: BY - word: geo - dot: . - word: name - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - object_reference: naked_identifier: active_customers - keyword: AS - word: SELECT - word: guid - comma: ',' - word: geo_id - comma: ',' - word: last_active_on - word: FROM - word: customer_source - word: GROUP - word: BY - word: geo_id - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - object_reference: naked_identifier: purchase_sum_by_region - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: purchase - dot: . - naked_identifier: amount end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: region_sum - comma: ',' - select_clause_element: column_reference: - naked_identifier: region - dot: . - naked_identifier: id alias_expression: alias_operator: keyword: AS naked_identifier: region_id from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: region - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: bare_function: user - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: region - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: user - dot: . - naked_identifier: region_id - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: purchase - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: purchase - dot: . - naked_identifier: user_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: user - dot: . - naked_identifier: id groupby_clause: - keyword: GROUP - keyword: BY - column_reference: - naked_identifier: region - dot: . - naked_identifier: id - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: TEMP - keyword: VIEW - object_reference: - quoted_identifier: '"test"' - dot: . - quoted_identifier: '"test"' - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: AS quoted_identifier: '"id"' - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: t - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: b - data_type: keyword: text - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/materialize/drop_statements.sql000066400000000000000000000015111503426445100262260ustar00rootroot00000000000000DROP CONNECTION IF EXISTS name CASCADE; DROP CLUSTER IF EXISTS name CASCADE; DROP CLUSTER REPLICA IF EXISTS name CASCADE; DROP DATABASE IF EXISTS name CASCADE; DROP INDEX IF EXISTS name CASCADE; DROP MATERIALIZED VIEW IF EXISTS name CASCADE; DROP ROLE IF EXISTS name CASCADE; DROP SECRET IF EXISTS name CASCADE; DROP SCHEMA IF EXISTS name CASCADE; DROP SINK IF EXISTS name CASCADE; DROP SOURCE IF EXISTS name CASCADE; DROP TABLE IF EXISTS name CASCADE; DROP TYPE IF EXISTS name CASCADE; DROP VIEW IF EXISTS name CASCADE; DROP USER IF EXISTS name CASCADE; DROP CONNECTION name; DROP CLUSTER name; DROP CLUSTER REPLICA name; DROP DATABASE name; DROP INDEX name; DROP MATERIALIZED VIEW name; DROP ROLE name; DROP SECRET name; DROP SCHEMA name; DROP SINK name; DROP SOURCE name; DROP TABLE name; DROP TYPE name; DROP VIEW name; DROP USER name; sqlfluff-3.4.2/test/fixtures/dialects/materialize/drop_statements.yml000066400000000000000000000137721503426445100262440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 60d61fd583710b501d72748aa44e061fe48d833ac28dc786840388d1e463b0e4 file: - statement: drop_statement: - keyword: DROP - keyword: CONNECTION - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: name - keyword: CASCADE - statement_terminator: ; - statement: drop_statement: - keyword: DROP - keyword: CLUSTER - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: name - keyword: CASCADE - statement_terminator: ; - statement: drop_statement: - keyword: DROP - keyword: CLUSTER - keyword: REPLICA - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: name - keyword: CASCADE - statement_terminator: ; - statement: drop_statement: - keyword: DROP - keyword: DATABASE - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: name - keyword: CASCADE - statement_terminator: ; - statement: drop_statement: - keyword: DROP - keyword: INDEX - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: name - keyword: CASCADE - statement_terminator: ; - statement: drop_statement: - keyword: DROP - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: name - keyword: CASCADE - statement_terminator: ; - statement: drop_statement: - keyword: DROP - keyword: ROLE - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: name - keyword: CASCADE - statement_terminator: ; - statement: drop_statement: - keyword: DROP - keyword: SECRET - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: name - keyword: CASCADE - statement_terminator: ; - statement: drop_schema_statement: - keyword: DROP - keyword: SCHEMA - keyword: IF - keyword: EXISTS - schema_reference: naked_identifier: name - keyword: CASCADE - statement_terminator: ; - statement: drop_statement: - keyword: DROP - keyword: SINK - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: name - keyword: CASCADE - statement_terminator: ; - statement: drop_statement: - keyword: DROP - keyword: SOURCE - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: name - keyword: CASCADE - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: name - keyword: CASCADE - statement_terminator: ; - statement: drop_type_statement: - keyword: DROP - keyword: TYPE - keyword: IF - keyword: EXISTS - data_type: data_type_identifier: name - keyword: CASCADE - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: name - keyword: CASCADE - statement_terminator: ; - statement: drop_statement: - keyword: DROP - keyword: USER - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: name - keyword: CASCADE - statement_terminator: ; - statement: drop_statement: - keyword: DROP - keyword: CONNECTION - object_reference: naked_identifier: name - statement_terminator: ; - statement: drop_statement: - keyword: DROP - keyword: CLUSTER - object_reference: naked_identifier: name - statement_terminator: ; - statement: drop_statement: - keyword: DROP - keyword: CLUSTER - keyword: REPLICA - object_reference: naked_identifier: name - statement_terminator: ; - statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - database_reference: naked_identifier: name - statement_terminator: ; - statement: drop_statement: - keyword: DROP - keyword: INDEX - object_reference: naked_identifier: name - statement_terminator: ; - statement: drop_statement: - keyword: DROP - keyword: MATERIALIZED - keyword: VIEW - object_reference: naked_identifier: name - statement_terminator: ; - statement: drop_role_statement: - keyword: DROP - keyword: ROLE - naked_identifier: name - statement_terminator: ; - statement: drop_statement: - keyword: DROP - keyword: SECRET - object_reference: naked_identifier: name - statement_terminator: ; - statement: drop_schema_statement: - keyword: DROP - keyword: SCHEMA - schema_reference: naked_identifier: name - statement_terminator: ; - statement: drop_statement: - keyword: DROP - keyword: SINK - object_reference: naked_identifier: name - statement_terminator: ; - statement: drop_statement: - keyword: DROP - keyword: SOURCE - object_reference: naked_identifier: name - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: naked_identifier: name - statement_terminator: ; - statement: drop_type_statement: - keyword: DROP - keyword: TYPE - data_type: data_type_identifier: name - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - table_reference: naked_identifier: name - statement_terminator: ; - statement: drop_user_statement: - keyword: DROP - keyword: USER - role_reference: naked_identifier: name - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/materialize/explain_statements.sql000066400000000000000000000003361503426445100267260ustar00rootroot00000000000000 EXPLAIN SELECT * FROM t1; EXPLAIN SELECT * FROM t1 WHERE f1 = 1; EXPLAIN PHYSICAL PLAN FOR VIEW v1; EXPLAIN VIEW v; EXPLAIN WITH(arity, join_impls) VIEW foo; EXPLAIN OPTIMIZED PLAN WITH(arity) AS TEXT FOR VIEW test1; sqlfluff-3.4.2/test/fixtures/dialects/materialize/explain_statements.yml000066400000000000000000000051771503426445100267400ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b038606aef92dbf2699f3794a67c439727bbfc8cde01ece39a63d8c8f0d3b2df file: - statement: explain_statement: keyword: EXPLAIN select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 where_clause: keyword: WHERE expression: column_reference: naked_identifier: f1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: explain_statement: - keyword: EXPLAIN - keyword: PHYSICAL - keyword: PLAN - keyword: FOR - keyword: VIEW - object_reference: naked_identifier: v1 - statement_terminator: ; - statement: explain_statement: - keyword: EXPLAIN - keyword: VIEW - object_reference: naked_identifier: v - statement_terminator: ; - statement: explain_statement: - keyword: EXPLAIN - keyword: WITH - bracketed: - start_bracket: ( - word: arity - comma: ',' - word: join_impls - end_bracket: ) - keyword: VIEW - object_reference: naked_identifier: foo - statement_terminator: ; - statement: explain_statement: - keyword: EXPLAIN - keyword: OPTIMIZED - keyword: PLAN - keyword: WITH - bracketed: start_bracket: ( word: arity end_bracket: ) - keyword: AS - keyword: TEXT - keyword: FOR - keyword: VIEW - object_reference: naked_identifier: test1 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/materialize/grant_statements.sql000066400000000000000000000002321503426445100263740ustar00rootroot00000000000000GRANT SELECT ON mv TO joe, mike; GRANT USAGE, CREATE ON DATABASE materialize TO joe; GRANT ALL ON CLUSTER dev TO joe; GRANT CREATEDB ON SYSTEM TO joe; sqlfluff-3.4.2/test/fixtures/dialects/materialize/grant_statements.yml000066400000000000000000000030201503426445100263740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 994ca2114abc64576e549d934ff7e6e1ef58be7efffacde90e0f385c5b84b108 file: - statement: access_statement: - keyword: GRANT - keyword: SELECT - keyword: 'ON' - object_reference: naked_identifier: mv - keyword: TO - role_reference: naked_identifier: joe - comma: ',' - role_reference: naked_identifier: mike - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: USAGE - comma: ',' - keyword: CREATE - keyword: 'ON' - keyword: DATABASE - object_reference: naked_identifier: materialize - keyword: TO - role_reference: naked_identifier: joe - statement_terminator: ; - statement: grant_statement: - keyword: GRANT - keyword: ALL - keyword: 'ON' - keyword: CLUSTER - object_reference: naked_identifier: dev - keyword: TO - object_reference: naked_identifier: joe - statement_terminator: ; - statement: grant_statement: - keyword: GRANT - keyword: CREATEDB - keyword: 'ON' - object_reference: naked_identifier: SYSTEM - keyword: TO - object_reference: naked_identifier: joe - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/materialize/insert_statements.sql000066400000000000000000000006401503426445100265700ustar00rootroot00000000000000INSERT INTO kv VALUES ('A'); INSERT INTO kv (v) VALUES ('a'); INSERT INTO kv (k) VALUES ('nil1'); INSERT INTO kv (k) VALUES ('nil2'); INSERT INTO kv VALUES ('nil3', NULL); INSERT INTO kv VALUES ('nil4', NULL); INSERT INTO kv (k,v) VALUES ('a', 'b'), ('c', 'd'); -- RETURNING INSERT INTO t (a) VALUES (10) RETURNING b; INSERT INTO t VALUES (7, 8) RETURNING (SELECT 1); INSERT INTO t VALUES (7, 8) RETURNING z; sqlfluff-3.4.2/test/fixtures/dialects/materialize/insert_statements.yml000066400000000000000000000125221503426445100265740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 173ef80a92e232d685064d8fe7b1b8bfffd92028ffa232af0f9201afcb59f42a file: - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: kv - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: quoted_literal: "'A'" end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: kv - bracketed: start_bracket: ( column_reference: naked_identifier: v end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: quoted_literal: "'a'" end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: kv - bracketed: start_bracket: ( column_reference: naked_identifier: k end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: quoted_literal: "'nil1'" end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: kv - bracketed: start_bracket: ( column_reference: naked_identifier: k end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: quoted_literal: "'nil2'" end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: kv - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: quoted_literal: "'nil3'" - comma: ',' - expression: null_literal: 'NULL' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: kv - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: quoted_literal: "'nil4'" - comma: ',' - expression: null_literal: 'NULL' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: kv - bracketed: - start_bracket: ( - column_reference: naked_identifier: k - comma: ',' - column_reference: naked_identifier: v - end_bracket: ) - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: quoted_literal: "'a'" - comma: ',' - expression: quoted_literal: "'b'" - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: quoted_literal: "'c'" - comma: ',' - expression: quoted_literal: "'d'" - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t - bracketed: start_bracket: ( column_reference: naked_identifier: a end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: numeric_literal: '10' end_bracket: ) - keyword: RETURNING - expression: column_reference: naked_identifier: b - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '7' - comma: ',' - expression: numeric_literal: '8' - end_bracket: ) - keyword: RETURNING - expression: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '7' - comma: ',' - expression: numeric_literal: '8' - end_bracket: ) - keyword: RETURNING - expression: column_reference: naked_identifier: z - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/materialize/show_statements.sql000066400000000000000000000020741503426445100262470ustar00rootroot00000000000000 SHOW COLUMNS; SHOW CONNECTIONS; SHOW CLUSTERS; SHOW CLUSTER REPLICAS; SHOW DATABASES; SHOW INDEXES; SHOW MATERIALIZED VIEWS; SHOW SECRETS; SHOW SCHEMAS; SHOW SINKS; SHOW SOURCES; SHOW TABLES; SHOW TYPES; SHOW VIEWS; SHOW OBJECTS; SHOW CREATE CONNECTION connection_name; SHOW CREATE INDEX index_name; SHOW CREATE MATERIALIZED VIEW view_name; SHOW CREATE SINK sink_name; SHOW CREATE SOURCE source_name; SHOW CREATE TABLE table_name; SHOW CREATE VIEW view_name; SHOW TABLES LIKE 'v%'; SHOW DATABASES WHERE database_name = 'database_name'; SHOW SECRETS WHERE name='secret_name'; SHOW SECRETS LIKE 'secret_name%'; SHOW COLUMNS FROM some_table WHERE name='column_name'; SHOW SECRETS FROM some_schema WHERE name='secret_name'; SHOW SECRETS FROM some_schema WHERE LIKE 'name%'; SHOW SCHEMAS FROM database_name; SHOW SINKS FROM some_schema; SHOW SOURCES FROM some_schema; SHOW TABLES FROM some_schema; SHOW TYPES FROM some_schema; SHOW VIEWS FROM some_schema; SHOW OBJECTS FROM some_schema; SHOW INDEXES IN CLUSTER bar WHERE name NOT LIKE 'mz_%'; SHOW MATERIALIZED VIEWS IN CLUSTER other; sqlfluff-3.4.2/test/fixtures/dialects/materialize/show_statements.yml000066400000000000000000000161541503426445100262550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3a15be35a6e89c451fc42a13f335474208d1cf9af5aa778e64035de882852fb6 file: - statement: show_statement: - keyword: SHOW - keyword: COLUMNS - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: CONNECTIONS - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: CLUSTERS - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: CLUSTER - keyword: REPLICAS - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: DATABASES - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: INDEXES - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: MATERIALIZED - keyword: VIEWS - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: SECRETS - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: SCHEMAS - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: SINKS - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: SOURCES - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TABLES - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TYPES - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: VIEWS - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: OBJECTS - statement_terminator: ; - statement: show_create_statement: - keyword: SHOW - keyword: CREATE - keyword: CONNECTION - object_reference: naked_identifier: connection_name - statement_terminator: ; - statement: show_create_statement: - keyword: SHOW - keyword: CREATE - keyword: INDEX - object_reference: naked_identifier: index_name - statement_terminator: ; - statement: show_create_statement: - keyword: SHOW - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - object_reference: naked_identifier: view_name - statement_terminator: ; - statement: show_create_statement: - keyword: SHOW - keyword: CREATE - keyword: SINK - object_reference: naked_identifier: sink_name - statement_terminator: ; - statement: show_create_statement: - keyword: SHOW - keyword: CREATE - keyword: SOURCE - object_reference: naked_identifier: source_name - statement_terminator: ; - statement: show_create_statement: - keyword: SHOW - keyword: CREATE - keyword: TABLE - object_reference: naked_identifier: table_name - statement_terminator: ; - statement: show_create_statement: - keyword: SHOW - keyword: CREATE - keyword: VIEW - object_reference: naked_identifier: view_name - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TABLES - keyword: LIKE - quoted_literal: "'v%'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: DATABASES - keyword: WHERE - expression: column_reference: naked_identifier: database_name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'database_name'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: SECRETS - keyword: WHERE - expression: column_reference: naked_identifier: name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'secret_name'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: SECRETS - keyword: LIKE - quoted_literal: "'secret_name%'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: COLUMNS - keyword: FROM - object_reference: naked_identifier: some_table - keyword: WHERE - expression: column_reference: naked_identifier: name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'column_name'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: SECRETS - keyword: FROM - object_reference: naked_identifier: some_schema - keyword: WHERE - expression: column_reference: naked_identifier: name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'secret_name'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: SECRETS - keyword: FROM - object_reference: naked_identifier: some_schema - keyword: WHERE - expression: data_type: data_type_identifier: LIKE quoted_literal: "'name%'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: SCHEMAS - keyword: FROM - object_reference: naked_identifier: database_name - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: SINKS - keyword: FROM - object_reference: naked_identifier: some_schema - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: SOURCES - keyword: FROM - object_reference: naked_identifier: some_schema - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TABLES - keyword: FROM - object_reference: naked_identifier: some_schema - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TYPES - keyword: FROM - object_reference: naked_identifier: some_schema - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: VIEWS - keyword: FROM - object_reference: naked_identifier: some_schema - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: OBJECTS - keyword: FROM - object_reference: naked_identifier: some_schema - statement_terminator: ; - statement: show_indexes_statement: - keyword: SHOW - keyword: INDEXES - keyword: IN - keyword: CLUSTER - object_reference: naked_identifier: bar - keyword: WHERE - expression: - column_reference: naked_identifier: name - keyword: NOT - keyword: LIKE - quoted_literal: "'mz_%'" - statement_terminator: ; - statement: show_materialized_views_statement: - keyword: SHOW - keyword: MATERIALIZED - keyword: VIEWS - keyword: IN - keyword: CLUSTER - object_reference: naked_identifier: other - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/materialize/subscribe_fetch_statements.sql000066400000000000000000000002311503426445100304120ustar00rootroot00000000000000 FETCH 100 c WITH (timeout='1s'); FETCH ALL c1; DECLARE c CURSOR FOR SUBSCRIBE fetch_during_ingest; DECLARE c CURSOR FOR SUBSCRIBE (SELECT * FROM t1); sqlfluff-3.4.2/test/fixtures/dialects/materialize/subscribe_fetch_statements.yml000066400000000000000000000026351503426445100304260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 707e4a0f12e79ac3abfcad57ba1da95f7c6b6fbc48f4e5b16a7cf68c568cfa04 file: - statement: fetch_statement: - keyword: FETCH - numeric_literal: '100' - object_reference: naked_identifier: c - keyword: WITH - bracketed: start_bracket: ( word: timeout equals: '=' single_quote: "'1s'" end_bracket: ) - statement_terminator: ; - statement: fetch_statement: - keyword: FETCH - keyword: ALL - object_reference: naked_identifier: c1 - statement_terminator: ; - statement: declare_statement: - keyword: DECLARE - object_reference: naked_identifier: c - keyword: CURSOR - keyword: FOR - word: SUBSCRIBE - word: fetch_during_ingest - statement_terminator: ; - statement: declare_statement: - keyword: DECLARE - object_reference: naked_identifier: c - keyword: CURSOR - keyword: FOR - word: SUBSCRIBE - bracketed: - start_bracket: ( - word: SELECT - star: '*' - word: FROM - word: t1 - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/000077500000000000000000000000001503426445100211335ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/dialects/mysql/.sqlfluff000066400000000000000000000000331503426445100227520ustar00rootroot00000000000000[sqlfluff] dialect = mysql sqlfluff-3.4.2/test/fixtures/dialects/mysql/alter_database.sql000066400000000000000000000007561503426445100246170ustar00rootroot00000000000000ALTER DATABASE my_database DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci DEFAULT ENCRYPTION 'N'; ALTER DATABASE my_database DEFAULT CHARACTER SET = utf8mb4 COLLATE = utf8mb4_0900_ai_ci DEFAULT ENCRYPTION = 'N'; ALTER SCHEMA my_database DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci DEFAULT ENCRYPTION 'N'; ALTER DATABASE my_database READ ONLY DEFAULT; ALTER DATABASE my_database READ ONLY 0; ALTER DATABASE my_database READ ONLY 1; ALTER DATABASE READ ONLY DEFAULT; sqlfluff-3.4.2/test/fixtures/dialects/mysql/alter_database.yml000066400000000000000000000063431503426445100246170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a1c4b4182cc44aace3c98db61b2527155aad4526958fbad5a5d52dc0be2e7079 file: - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: my_database - alter_option_segment: - keyword: DEFAULT - keyword: CHARACTER - keyword: SET - naked_identifier: utf8mb4 - alter_option_segment: keyword: COLLATE collation_reference: naked_identifier: utf8mb4_0900_ai_ci - alter_option_segment: - keyword: DEFAULT - keyword: ENCRYPTION - quoted_literal: "'N'" - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: my_database - alter_option_segment: - keyword: DEFAULT - keyword: CHARACTER - keyword: SET - comparison_operator: raw_comparison_operator: '=' - naked_identifier: utf8mb4 - alter_option_segment: keyword: COLLATE comparison_operator: raw_comparison_operator: '=' collation_reference: naked_identifier: utf8mb4_0900_ai_ci - alter_option_segment: - keyword: DEFAULT - keyword: ENCRYPTION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'N'" - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: SCHEMA - database_reference: naked_identifier: my_database - alter_option_segment: - keyword: DEFAULT - keyword: CHARACTER - keyword: SET - naked_identifier: utf8mb4 - alter_option_segment: keyword: COLLATE collation_reference: naked_identifier: utf8mb4_0900_ai_ci - alter_option_segment: - keyword: DEFAULT - keyword: ENCRYPTION - quoted_literal: "'N'" - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: my_database - alter_option_segment: - keyword: READ - keyword: ONLY - keyword: DEFAULT - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: my_database - alter_option_segment: - keyword: READ - keyword: ONLY - numeric_literal: '0' - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: my_database - alter_option_segment: - keyword: READ - keyword: ONLY - numeric_literal: '1' - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - alter_option_segment: - keyword: READ - keyword: ONLY - keyword: DEFAULT - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/alter_event.sql000066400000000000000000000006701503426445100241670ustar00rootroot00000000000000ALTER EVENT no_such_event ON SCHEDULE EVERY '2:3' DAY_HOUR; ALTER EVENT myevent ON SCHEDULE EVERY 12 HOUR STARTS CURRENT_TIMESTAMP + INTERVAL 4 HOUR; ALTER EVENT myevent ON SCHEDULE AT CURRENT_TIMESTAMP + INTERVAL 1 DAY DO TRUNCATE TABLE myschema.mytable; ALTER EVENT myevent DISABLE; ALTER EVENT myevent RENAME TO yourevent; ALTER EVENT olddb.myevent RENAME TO newdb.myevent; sqlfluff-3.4.2/test/fixtures/dialects/mysql/alter_event.yml000066400000000000000000000053011503426445100241650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 958dd9ccd26155a2656b06b47b13b8456b2f3f1d7161a9d71b7520b12a3cb3ef file: - statement: alter_event_statement: - keyword: ALTER - keyword: EVENT - object_reference: naked_identifier: no_such_event - keyword: 'ON' - keyword: SCHEDULE - keyword: EVERY - expression: quoted_literal: "'2:3'" - date_part: DAY_HOUR - statement_terminator: ; - statement: alter_event_statement: - keyword: ALTER - keyword: EVENT - object_reference: naked_identifier: myevent - keyword: 'ON' - keyword: SCHEDULE - keyword: EVERY - expression: numeric_literal: '12' - date_part: HOUR - keyword: STARTS - expression: bare_function: CURRENT_TIMESTAMP binary_operator: + interval_expression: keyword: INTERVAL expression: numeric_literal: '4' date_part: HOUR - statement_terminator: ; - statement: alter_event_statement: - keyword: ALTER - keyword: EVENT - object_reference: naked_identifier: myevent - keyword: 'ON' - keyword: SCHEDULE - keyword: AT - expression: bare_function: CURRENT_TIMESTAMP binary_operator: + interval_expression: keyword: INTERVAL expression: numeric_literal: '1' date_part: DAY - keyword: DO - statement: truncate_table: - keyword: TRUNCATE - keyword: TABLE - table_reference: - naked_identifier: myschema - dot: . - naked_identifier: mytable - statement_terminator: ; - statement: alter_event_statement: - keyword: ALTER - keyword: EVENT - object_reference: naked_identifier: myevent - keyword: DISABLE - statement_terminator: ; - statement: alter_event_statement: - keyword: ALTER - keyword: EVENT - object_reference: naked_identifier: myevent - keyword: RENAME - keyword: TO - object_reference: naked_identifier: yourevent - statement_terminator: ; - statement: alter_event_statement: - keyword: ALTER - keyword: EVENT - object_reference: - naked_identifier: olddb - dot: . - naked_identifier: myevent - keyword: RENAME - keyword: TO - object_reference: - naked_identifier: newdb - dot: . - naked_identifier: myevent - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/alter_table.sql000066400000000000000000000055401503426445100241360ustar00rootroot00000000000000ALTER TABLE `users` MODIFY COLUMN `name` varchar(255) NOT NULL, COMMENT "name of user"; ALTER TABLE `users` MODIFY `name` varchar(255) NOT NULL FIRST; ALTER TABLE `users` RENAME TO `user`; ALTER TABLE `user` RENAME AS `users`; ALTER TABLE `users` RENAME `user`; ALTER TABLE `users` RENAME COLUMN `col_1` TO `del_col_1`; ALTER TABLE `users` CHANGE COLUMN `birthday` `date_of_birth` INT(11) NULL DEFAULT NULL; ALTER TABLE `users` CHANGE COLUMN `birthday` `date_of_birth` INT(11) NOT NULL; ALTER TABLE `users` CHANGE COLUMN `birthday` `date_of_birth` INT(11) FIRST; ALTER TABLE `users` CHANGE COLUMN `birthday` `date_of_birth` INT(11) AFTER `name`; ALTER TABLE `users` DROP COLUMN `age`; ALTER TABLE `foo`.`bar` ADD CONSTRAINT `index_name` UNIQUE(`col_1`, `col_2`, `col_3`); ALTER TABLE `foo`.`bar` ADD UNIQUE `index_name`(`col_1`, `col_2`, `col_3`); ALTER TABLE `foo`.`bar` ADD CONSTRAINT `index_name` UNIQUE INDEX (`col_1`, `col_2`, `col_3`); ALTER TABLE `foo`.`bar` ADD UNIQUE INDEX `index_name`(`col_1`, `col_2`, `col_3`); ALTER TABLE `foo`.`bar` ADD INDEX `index_name`(`col_1`, `col_2`, `col_3`); ALTER TABLE `foo`.`bar` ADD INDEX `index_name`(`col_1`, `col_2`, `col_3`) KEY_BLOCK_SIZE = 8; ALTER TABLE `foo`.`bar` ADD INDEX `index_name`(`col_1`, `col_2`, `col_3`) KEY_BLOCK_SIZE 8; ALTER TABLE `foo`.`bar` ADD INDEX `index_name`(`col_1`, `col_2`, `col_3`) KEY_BLOCK_SIZE 8 COMMENT 'index for col_1, col_2, col_3'; ALTER TABLE `foo`.`bar` DROP INDEX `index_name`; ALTER TABLE `foo`.`bar` RENAME INDEX `index_name` to `new_index_name`; ALTER TABLE `foo`.`bar` RENAME KEY `key_name` to `new_key_name`; ALTER TABLE `x` ADD CONSTRAINT FOREIGN KEY(`xk`) REFERENCES `y`(`yk`); ALTER TABLE `users` ADD COLUMN `active` tinyint(1) DEFAULT '0'; ALTER TABLE `users` ADD COLUMN IF NOT EXISTS `active` tinyint(1) DEFAULT '0'; ALTER TABLE `foo` ADD `bar` INT FIRST; ALTER TABLE `foo` ADD COLUMN d INT GENERATED ALWAYS AS (a*abs(b)) VIRTUAL; ALTER TABLE `foo` ADD COLUMN e TEXT GENERATED ALWAYS AS (substr(c,b,b+1)) STORED; ALTER TABLE `foo` ADD COLUMN d INT AS (a*abs(b)); ALTER TABLE `foo` ADD COLUMN e TEXT AS (substr(c,b,b+1)) STORED; ALTER TABLE `foo` CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci; ALTER TABLE `foo` CONVERT TO CHARACTER SET `utf8mb4` COLLATE `utf8mb4_unicode_ci`; ALTER TABLE `foo` CONVERT TO CHARACTER SET 'utf8mb4' COLLATE 'utf8mb4_unicode_ci'; ALTER TABLE `foo` CONVERT TO CHARACTER SET "utf8mb4" COLLATE "utf8mb4_unicode_ci"; ALTER TABLE CUST TRUNCATE PARTITION data_dt; ALTER TABLE `foo` ADD COLUMN geo GEOMETRY NOT NULL SRID 4326; ALTER TABLE my_table MODIFY num INT SIGNED NOT NULL; ALTER TABLE my_table MODIFY num INT UNSIGNED NOT NULL; ALTER TABLE my_table MODIFY num INT ZEROFILL NOT NULL; ALTER TABLE my_table MODIFY num INT UNSIGNED ZEROFILL NOT NULL; ALTER TABLE my_table MODIFY num INT ZEROFILL UNSIGNED NOT NULL; sqlfluff-3.4.2/test/fixtures/dialects/mysql/alter_table.yml000066400000000000000000000565611503426445100241510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b39a62412d0e11181360096004daad25ba6ab5a6c4bf56c4c7c22e5b3cd307f5 file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`users`' - keyword: MODIFY - keyword: COLUMN - column_definition: quoted_identifier: '`name`' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '255' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - parameter: COMMENT - quoted_literal: '"name of user"' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`users`' - keyword: MODIFY - column_definition: quoted_identifier: '`name`' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '255' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - keyword: FIRST - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`users`' - keyword: RENAME - keyword: TO - table_reference: quoted_identifier: '`user`' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`user`' - keyword: RENAME - keyword: AS - table_reference: quoted_identifier: '`users`' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`users`' - keyword: RENAME - table_reference: quoted_identifier: '`user`' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`users`' - keyword: RENAME - keyword: COLUMN - column_reference: quoted_identifier: '`col_1`' - keyword: TO - column_reference: quoted_identifier: '`del_col_1`' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`users`' - keyword: CHANGE - keyword: COLUMN - column_reference: quoted_identifier: '`birthday`' - column_definition: - quoted_identifier: '`date_of_birth`' - data_type: data_type_identifier: INT bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '11' end_bracket: ) - column_constraint_segment: keyword: 'NULL' - column_constraint_segment: keyword: DEFAULT null_literal: 'NULL' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`users`' - keyword: CHANGE - keyword: COLUMN - column_reference: quoted_identifier: '`birthday`' - column_definition: quoted_identifier: '`date_of_birth`' data_type: data_type_identifier: INT bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '11' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`users`' - keyword: CHANGE - keyword: COLUMN - column_reference: quoted_identifier: '`birthday`' - column_definition: quoted_identifier: '`date_of_birth`' data_type: data_type_identifier: INT bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '11' end_bracket: ) - keyword: FIRST - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`users`' - keyword: CHANGE - keyword: COLUMN - column_reference: quoted_identifier: '`birthday`' - column_definition: quoted_identifier: '`date_of_birth`' data_type: data_type_identifier: INT bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '11' end_bracket: ) - keyword: AFTER - column_reference: quoted_identifier: '`name`' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`users`' - keyword: DROP - keyword: COLUMN - column_reference: quoted_identifier: '`age`' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - keyword: ADD - table_constraint: - keyword: CONSTRAINT - object_reference: quoted_identifier: '`index_name`' - keyword: UNIQUE - bracketed: - start_bracket: ( - column_reference: quoted_identifier: '`col_1`' - comma: ',' - column_reference: quoted_identifier: '`col_2`' - comma: ',' - column_reference: quoted_identifier: '`col_3`' - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - keyword: ADD - table_constraint: keyword: UNIQUE index_reference: quoted_identifier: '`index_name`' bracketed: - start_bracket: ( - column_reference: quoted_identifier: '`col_1`' - comma: ',' - column_reference: quoted_identifier: '`col_2`' - comma: ',' - column_reference: quoted_identifier: '`col_3`' - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - keyword: ADD - table_constraint: - keyword: CONSTRAINT - object_reference: quoted_identifier: '`index_name`' - keyword: UNIQUE - keyword: INDEX - bracketed: - start_bracket: ( - column_reference: quoted_identifier: '`col_1`' - comma: ',' - column_reference: quoted_identifier: '`col_2`' - comma: ',' - column_reference: quoted_identifier: '`col_3`' - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - keyword: ADD - table_constraint: - keyword: UNIQUE - keyword: INDEX - index_reference: quoted_identifier: '`index_name`' - bracketed: - start_bracket: ( - column_reference: quoted_identifier: '`col_1`' - comma: ',' - column_reference: quoted_identifier: '`col_2`' - comma: ',' - column_reference: quoted_identifier: '`col_3`' - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - keyword: ADD - table_constraint: keyword: INDEX index_reference: quoted_identifier: '`index_name`' bracketed: - start_bracket: ( - column_reference: quoted_identifier: '`col_1`' - comma: ',' - column_reference: quoted_identifier: '`col_2`' - comma: ',' - column_reference: quoted_identifier: '`col_3`' - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - keyword: ADD - table_constraint: keyword: INDEX index_reference: quoted_identifier: '`index_name`' bracketed: - start_bracket: ( - column_reference: quoted_identifier: '`col_1`' - comma: ',' - column_reference: quoted_identifier: '`col_2`' - comma: ',' - column_reference: quoted_identifier: '`col_3`' - end_bracket: ) index_option: keyword: KEY_BLOCK_SIZE comparison_operator: raw_comparison_operator: '=' numeric_literal: '8' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - keyword: ADD - table_constraint: keyword: INDEX index_reference: quoted_identifier: '`index_name`' bracketed: - start_bracket: ( - column_reference: quoted_identifier: '`col_1`' - comma: ',' - column_reference: quoted_identifier: '`col_2`' - comma: ',' - column_reference: quoted_identifier: '`col_3`' - end_bracket: ) index_option: keyword: KEY_BLOCK_SIZE numeric_literal: '8' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - keyword: ADD - table_constraint: keyword: INDEX index_reference: quoted_identifier: '`index_name`' bracketed: - start_bracket: ( - column_reference: quoted_identifier: '`col_1`' - comma: ',' - column_reference: quoted_identifier: '`col_2`' - comma: ',' - column_reference: quoted_identifier: '`col_3`' - end_bracket: ) index_option: keyword: KEY_BLOCK_SIZE numeric_literal: '8' comment_clause: keyword: COMMENT quoted_literal: "'index for col_1, col_2, col_3'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`index_name`' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - keyword: RENAME - keyword: INDEX - index_reference: quoted_identifier: '`index_name`' - keyword: to - index_reference: quoted_identifier: '`new_index_name`' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - keyword: RENAME - keyword: KEY - index_reference: quoted_identifier: '`key_name`' - keyword: to - index_reference: quoted_identifier: '`new_key_name`' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`x`' - keyword: ADD - table_constraint: - keyword: CONSTRAINT - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: quoted_identifier: '`xk`' end_bracket: ) - keyword: REFERENCES - column_reference: quoted_identifier: '`y`' - bracketed: start_bracket: ( column_reference: quoted_identifier: '`yk`' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`users`' - keyword: ADD - keyword: COLUMN - column_definition: quoted_identifier: '`active`' data_type: data_type_identifier: tinyint bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) column_constraint_segment: keyword: DEFAULT quoted_literal: "'0'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`users`' - keyword: ADD - keyword: COLUMN - keyword: IF - keyword: NOT - keyword: EXISTS - column_definition: quoted_identifier: '`active`' data_type: data_type_identifier: tinyint bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) column_constraint_segment: keyword: DEFAULT quoted_literal: "'0'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`foo`' - keyword: ADD - column_definition: quoted_identifier: '`bar`' data_type: data_type_identifier: INT - keyword: FIRST - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`foo`' - keyword: ADD - keyword: COLUMN - column_definition: naked_identifier: d data_type: data_type_identifier: INT column_constraint_segment: - keyword: GENERATED - keyword: ALWAYS - keyword: AS - bracketed: start_bracket: ( expression: column_reference: naked_identifier: a binary_operator: '*' function: function_name: function_name_identifier: abs function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: b end_bracket: ) end_bracket: ) - keyword: VIRTUAL - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`foo`' - keyword: ADD - keyword: COLUMN - column_definition: naked_identifier: e data_type: data_type_identifier: TEXT column_constraint_segment: - keyword: GENERATED - keyword: ALWAYS - keyword: AS - bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: substr function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: c - comma: ',' - expression: column_reference: naked_identifier: b - comma: ',' - expression: column_reference: naked_identifier: b binary_operator: + numeric_literal: '1' - end_bracket: ) end_bracket: ) - keyword: STORED - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`foo`' - keyword: ADD - keyword: COLUMN - column_definition: naked_identifier: d data_type: data_type_identifier: INT column_constraint_segment: keyword: AS bracketed: start_bracket: ( expression: column_reference: naked_identifier: a binary_operator: '*' function: function_name: function_name_identifier: abs function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: b end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`foo`' - keyword: ADD - keyword: COLUMN - column_definition: naked_identifier: e data_type: data_type_identifier: TEXT column_constraint_segment: - keyword: AS - bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: substr function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: c - comma: ',' - expression: column_reference: naked_identifier: b - comma: ',' - expression: column_reference: naked_identifier: b binary_operator: + numeric_literal: '1' - end_bracket: ) end_bracket: ) - keyword: STORED - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`foo`' - keyword: CONVERT - keyword: TO - alter_option_segment: - keyword: CHARACTER - keyword: SET - naked_identifier: utf8mb4 - alter_option_segment: keyword: COLLATE collation_reference: naked_identifier: utf8mb4_unicode_ci - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`foo`' - keyword: CONVERT - keyword: TO - alter_option_segment: - keyword: CHARACTER - keyword: SET - quoted_identifier: '`utf8mb4`' - alter_option_segment: keyword: COLLATE collation_reference: quoted_identifier: '`utf8mb4_unicode_ci`' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`foo`' - keyword: CONVERT - keyword: TO - alter_option_segment: - keyword: CHARACTER - keyword: SET - quoted_identifier: "'utf8mb4'" - alter_option_segment: keyword: COLLATE collation_reference: quoted_literal: "'utf8mb4_unicode_ci'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`foo`' - keyword: CONVERT - keyword: TO - alter_option_segment: - keyword: CHARACTER - keyword: SET - quoted_identifier: '"utf8mb4"' - alter_option_segment: keyword: COLLATE collation_reference: quoted_literal: '"utf8mb4_unicode_ci"' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: CUST - keyword: TRUNCATE - keyword: PARTITION - naked_identifier: data_dt - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`foo`' - keyword: ADD - keyword: COLUMN - column_definition: - naked_identifier: geo - data_type: data_type_identifier: GEOMETRY - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: SRID numeric_literal: '4326' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - keyword: MODIFY - column_definition: naked_identifier: num data_type: data_type_identifier: INT keyword: SIGNED column_constraint_segment: - keyword: NOT - keyword: 'NULL' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - keyword: MODIFY - column_definition: naked_identifier: num data_type: data_type_identifier: INT keyword: UNSIGNED column_constraint_segment: - keyword: NOT - keyword: 'NULL' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - keyword: MODIFY - column_definition: naked_identifier: num data_type: data_type_identifier: INT keyword: ZEROFILL column_constraint_segment: - keyword: NOT - keyword: 'NULL' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - keyword: MODIFY - column_definition: naked_identifier: num data_type: - data_type_identifier: INT - keyword: UNSIGNED - keyword: ZEROFILL column_constraint_segment: - keyword: NOT - keyword: 'NULL' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - keyword: MODIFY - column_definition: naked_identifier: num data_type: - data_type_identifier: INT - keyword: ZEROFILL - keyword: UNSIGNED column_constraint_segment: - keyword: NOT - keyword: 'NULL' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/alter_view.sql000066400000000000000000000004141503426445100240140ustar00rootroot00000000000000ALTER VIEW v2 AS SELECT c, d FROM v1; ALTER VIEW v2 AS (SELECT c, d FROM v1); ALTER VIEW v1 (c,d) AS SELECT a,max(b) FROM t1 GROUP BY a; ALTER VIEW v2 AS SELECT * FROM t2 WHERE s1 IN (SELECT s1 FROM t1) WITH CHECK OPTION; ALTER VIEW v2 AS SELECT 1 UNION SELECT 2; sqlfluff-3.4.2/test/fixtures/dialects/mysql/alter_view.yml000066400000000000000000000117261503426445100240260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5f84a1c4fd2ee5090b3c44289a6320ca69fd9c9a01acbf4f8ec76278e3e1a909 file: - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: v2 - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: c - comma: ',' - select_clause_element: column_reference: naked_identifier: d from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: v1 - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: v2 - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: c - comma: ',' - select_clause_element: column_reference: naked_identifier: d from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: v1 end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: v1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: c - comma: ',' - column_reference: naked_identifier: d - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: function: function_name: function_name_identifier: max function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: b end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: a - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: v2 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 where_clause: keyword: WHERE expression: column_reference: naked_identifier: s1 keyword: IN bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: s1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 end_bracket: ) - with_check_options: - keyword: WITH - keyword: CHECK - keyword: OPTION - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: v2 - keyword: AS - set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - set_operator: keyword: UNION - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '2' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/analyze_table.sql000066400000000000000000000023311503426445100244650ustar00rootroot00000000000000ANALYZE TABLE some_table; ANALYZE TABLE some_table1, some_table2; ANALYZE NO_WRITE_TO_BINLOG TABLE some_table; ANALYZE NO_WRITE_TO_BINLOG TABLE some_table1, some_table2; ANALYZE LOCAL TABLE some_table; ANALYZE LOCAL TABLE some_table1, some_table2; ANALYZE TABLE some_table UPDATE HISTOGRAM ON some_col; ANALYZE TABLE some_table UPDATE HISTOGRAM ON some_col1, some_col2; ANALYZE NO_WRITE_TO_BINLOG TABLE some_table UPDATE HISTOGRAM ON some_col; ANALYZE NO_WRITE_TO_BINLOG TABLE some_table UPDATE HISTOGRAM ON some_col1, some_col2; ANALYZE LOCAL TABLE some_table UPDATE HISTOGRAM ON some_col; ANALYZE LOCAL TABLE some_table UPDATE HISTOGRAM ON some_col1, some_col2; ANALYZE TABLE some_table UPDATE HISTOGRAM ON some_col WITH 10 BUCKETS; ANALYZE TABLE some_table UPDATE HISTOGRAM ON some_col1, some_col2 WITH 10 BUCKETS; ANALYZE TABLE some_table DROP HISTOGRAM ON some_col; ANALYZE TABLE some_table DROP HISTOGRAM ON some_col1, some_col2; ANALYZE NO_WRITE_TO_BINLOG TABLE some_table DROP HISTOGRAM ON some_col; ANALYZE NO_WRITE_TO_BINLOG TABLE some_table DROP HISTOGRAM ON some_col1, some_col2; ANALYZE LOCAL TABLE some_table DROP HISTOGRAM ON some_col; ANALYZE LOCAL TABLE some_table DROP HISTOGRAM ON some_col1, some_col2; sqlfluff-3.4.2/test/fixtures/dialects/mysql/analyze_table.yml000066400000000000000000000152711503426445100244760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: da31a67c607ae72d4f415cdc0446249963c84a488dfa30c0355b264bc81d221c file: - statement: analyze_table_statement: - keyword: ANALYZE - keyword: TABLE - table_reference: naked_identifier: some_table - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: NO_WRITE_TO_BINLOG - keyword: TABLE - table_reference: naked_identifier: some_table - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: NO_WRITE_TO_BINLOG - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: LOCAL - keyword: TABLE - table_reference: naked_identifier: some_table - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: LOCAL - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: UPDATE - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: UPDATE - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col1 - comma: ',' - column_reference: naked_identifier: some_col2 - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: NO_WRITE_TO_BINLOG - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: UPDATE - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: NO_WRITE_TO_BINLOG - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: UPDATE - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col1 - comma: ',' - column_reference: naked_identifier: some_col2 - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: LOCAL - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: UPDATE - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: LOCAL - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: UPDATE - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col1 - comma: ',' - column_reference: naked_identifier: some_col2 - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: UPDATE - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col - keyword: WITH - numeric_literal: '10' - keyword: BUCKETS - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: UPDATE - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col1 - comma: ',' - column_reference: naked_identifier: some_col2 - keyword: WITH - numeric_literal: '10' - keyword: BUCKETS - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: DROP - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: DROP - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col1 - comma: ',' - column_reference: naked_identifier: some_col2 - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: NO_WRITE_TO_BINLOG - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: DROP - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: NO_WRITE_TO_BINLOG - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: DROP - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col1 - comma: ',' - column_reference: naked_identifier: some_col2 - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: LOCAL - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: DROP - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: LOCAL - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: DROP - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col1 - comma: ',' - column_reference: naked_identifier: some_col2 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/begin.sql000066400000000000000000000000431503426445100227350ustar00rootroot00000000000000blocks:BEGIN select 1; END blocks~ sqlfluff-3.4.2/test/fixtures/dialects/mysql/begin.yml000066400000000000000000000014551503426445100227470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c17580c773c4cd4a54b495af7c9b0c627eeecc3755e81f3e64bcf2954d9fc267 file: - statement: transaction_statement: naked_identifier: blocks colon: ':' keyword: BEGIN statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: END naked_identifier: blocks - statement_terminator: '~' sqlfluff-3.4.2/test/fixtures/dialects/mysql/bit_value_literal.sql000066400000000000000000000000511503426445100253360ustar00rootroot00000000000000SELECT b'01'; SELECT B'01'; SELECT 0b01; sqlfluff-3.4.2/test/fixtures/dialects/mysql/bit_value_literal.yml000066400000000000000000000016151503426445100253470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: df122f68f5da6f8cb4ec03be5a3c8c0337afa15f4c06a19cd3d96ef603abec6b file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: "b'01'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: "B'01'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '0b01' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/call_statement.sql000066400000000000000000000003121503426445100246470ustar00rootroot00000000000000CALL somefunction('a'); CALL somefunction(test('something')); CALL somefunction('test', @test1, test2, test3('test'), "test4"); CALL somefunction(); CALL `somefunction`('a'); CALL testdb.testfunc(123); sqlfluff-3.4.2/test/fixtures/dialects/mysql/call_statement.yml000066400000000000000000000063561503426445100246670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d38fa4a8a76f50f82728466e6980cf4b3da33742edc200256a9b5dfca1eb3c1a file: - statement: call_statement: keyword: CALL function: function_name: function_name_identifier: somefunction function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'a'" end_bracket: ) - statement_terminator: ; - statement: call_statement: keyword: CALL function: function_name: function_name_identifier: somefunction function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: test function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'something'" end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: call_statement: keyword: CALL function: function_name: function_name_identifier: somefunction function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'test'" - comma: ',' - expression: variable: '@test1' - comma: ',' - expression: column_reference: naked_identifier: test2 - comma: ',' - expression: function: function_name: function_name_identifier: test3 function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'test'" end_bracket: ) - comma: ',' - expression: quoted_literal: '"test4"' - end_bracket: ) - statement_terminator: ; - statement: call_statement: keyword: CALL function: function_name: function_name_identifier: somefunction function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: call_statement: keyword: CALL function: function_name: quoted_identifier: '`somefunction`' function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'a'" end_bracket: ) - statement_terminator: ; - statement: call_statement: keyword: CALL function: function_name: naked_identifier: testdb dot: . function_name_identifier: testfunc function_contents: bracketed: start_bracket: ( expression: numeric_literal: '123' end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/check_constraint.sql000066400000000000000000000004531503426445100251770ustar00rootroot00000000000000CREATE TABLE t1 ( CHECK (c1 <> c2), c1 INT CHECK (c1 > 10), c2 INT CONSTRAINT c2_positive CHECK (c2 > 0), c3 INT CHECK (c3 < 100), CONSTRAINT c1_nonzero CHECK (c1 <> 0), CHECK (c1 > c3) ); ALTER TABLE t1 ALTER CHECK c2_positive NOT ENFORCED; ALTER TABLE t1 DROP CONSTRAINT c1_nonzero; sqlfluff-3.4.2/test/fixtures/dialects/mysql/check_constraint.yml000066400000000000000000000100161503426445100251750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 819548e097fc750b37e657682d090047d51f01a458161bb97c18b26796a51181 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - table_constraint: keyword: CHECK bracketed: start_bracket: ( expression: - column_reference: naked_identifier: c1 - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '>' - column_reference: naked_identifier: c2 end_bracket: ) - comma: ',' - column_definition: naked_identifier: c1 data_type: data_type_identifier: INT column_constraint_segment: keyword: CHECK bracketed: start_bracket: ( expression: column_reference: naked_identifier: c1 comparison_operator: raw_comparison_operator: '>' numeric_literal: '10' end_bracket: ) - comma: ',' - column_definition: naked_identifier: c2 data_type: data_type_identifier: INT column_constraint_segment: - keyword: CONSTRAINT - object_reference: naked_identifier: c2_positive - keyword: CHECK - bracketed: start_bracket: ( expression: column_reference: naked_identifier: c2 comparison_operator: raw_comparison_operator: '>' numeric_literal: '0' end_bracket: ) - comma: ',' - column_definition: naked_identifier: c3 data_type: data_type_identifier: INT column_constraint_segment: keyword: CHECK bracketed: start_bracket: ( expression: column_reference: naked_identifier: c3 comparison_operator: raw_comparison_operator: < numeric_literal: '100' end_bracket: ) - comma: ',' - table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: c1_nonzero - keyword: CHECK - bracketed: start_bracket: ( expression: column_reference: naked_identifier: c1 comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '>' numeric_literal: '0' end_bracket: ) - comma: ',' - table_constraint: keyword: CHECK bracketed: start_bracket: ( expression: - column_reference: naked_identifier: c1 - comparison_operator: raw_comparison_operator: '>' - column_reference: naked_identifier: c3 end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: ALTER - keyword: CHECK - object_reference: naked_identifier: c2_positive - keyword: NOT - keyword: ENFORCED - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: DROP - keyword: CONSTRAINT - object_reference: naked_identifier: c1_nonzero - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/check_table.sql000066400000000000000000000010011503426445100240700ustar00rootroot00000000000000CHECK TABLE some_table FOR UPGRADE; CHECK TABLE some_table1, some_table2 FOR UPGRADE; CHECK TABLE some_table QUICK; CHECK TABLE some_table1, some_table2 QUICK; CHECK TABLE some_table FAST; CHECK TABLE some_table1, some_table2 FAST; CHECK TABLE some_table MEDIUM; CHECK TABLE some_table1, some_table2 MEDIUM; CHECK TABLE some_table EXTENDED; CHECK TABLE some_table1, some_table2 EXTENDED; CHECK TABLE some_table CHANGED; CHECK TABLE some_table1, some_table2 CHANGED; CHECK TABLE some_table FAST QUICK; sqlfluff-3.4.2/test/fixtures/dialects/mysql/check_table.yml000066400000000000000000000065051503426445100241100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5db8cdf9f2d5bbf23fbb9a36401f9236af4d0b54707dcbbfaa0a41d12905720b file: - statement: check_table_statement: - keyword: CHECK - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: FOR - keyword: UPGRADE - statement_terminator: ; - statement: check_table_statement: - keyword: CHECK - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - keyword: FOR - keyword: UPGRADE - statement_terminator: ; - statement: check_table_statement: - keyword: CHECK - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: QUICK - statement_terminator: ; - statement: check_table_statement: - keyword: CHECK - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - keyword: QUICK - statement_terminator: ; - statement: check_table_statement: - keyword: CHECK - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: FAST - statement_terminator: ; - statement: check_table_statement: - keyword: CHECK - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - keyword: FAST - statement_terminator: ; - statement: check_table_statement: - keyword: CHECK - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: MEDIUM - statement_terminator: ; - statement: check_table_statement: - keyword: CHECK - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - keyword: MEDIUM - statement_terminator: ; - statement: check_table_statement: - keyword: CHECK - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: EXTENDED - statement_terminator: ; - statement: check_table_statement: - keyword: CHECK - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - keyword: EXTENDED - statement_terminator: ; - statement: check_table_statement: - keyword: CHECK - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: CHANGED - statement_terminator: ; - statement: check_table_statement: - keyword: CHECK - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - keyword: CHANGED - statement_terminator: ; - statement: check_table_statement: - keyword: CHECK - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: FAST - keyword: QUICK - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/checksum_table.sql000066400000000000000000000002511503426445100246230ustar00rootroot00000000000000CHECKSUM TABLE some_table QUICK; CHECKSUM TABLE some_table1, some_table2 QUICK; CHECKSUM TABLE some_table EXTENDED; CHECKSUM TABLE some_table1, some_table2 EXTENDED; sqlfluff-3.4.2/test/fixtures/dialects/mysql/checksum_table.yml000066400000000000000000000024721503426445100246340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 59a13f3bdf5aff9ad8d165d083d825c9d4897885a57ea598ca91a825143195fe file: - statement: checksum_table_statement: - keyword: CHECKSUM - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: QUICK - statement_terminator: ; - statement: checksum_table_statement: - keyword: CHECKSUM - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - keyword: QUICK - statement_terminator: ; - statement: checksum_table_statement: - keyword: CHECKSUM - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: EXTENDED - statement_terminator: ; - statement: checksum_table_statement: - keyword: CHECKSUM - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - keyword: EXTENDED - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/close.sql000066400000000000000000000000211503426445100227520ustar00rootroot00000000000000CLOSE curcursor; sqlfluff-3.4.2/test/fixtures/dialects/mysql/close.yml000066400000000000000000000010071503426445100227610ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: be7b55beb5bbee604a34bad94304e061d9d9078a0755be0f543c4104ba563e86 file: statement: cursor_open_close_segment: keyword: CLOSE naked_identifier: curcursor statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/close_qualified.sql000066400000000000000000000000231503426445100247770ustar00rootroot00000000000000CLOSE `curcursor`; sqlfluff-3.4.2/test/fixtures/dialects/mysql/close_qualified.yml000066400000000000000000000010141503426445100250020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0ff3a6055061615df1ede7802e3b9da4ce61b68f2c3adb5ead2883c43efe7e5d file: statement: cursor_open_close_segment: keyword: CLOSE quoted_identifier: '`curcursor`' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/collate.sql000066400000000000000000000000601503426445100232730ustar00rootroot00000000000000SELECT "a string" COLLATE "utf8mb4_general_ci"; sqlfluff-3.4.2/test/fixtures/dialects/mysql/collate.yml000066400000000000000000000013051503426445100233000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a45811c5f37876180a216a61d4fd2c31baf906e06f25659393e277ee50442028 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: quoted_literal: '"a string"' keyword: COLLATE collation_reference: quoted_literal: '"utf8mb4_general_ci"' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/column_alias.sql000066400000000000000000000001541503426445100243220ustar00rootroot00000000000000SELECT 1 AS `one`; SELECT 2 AS 'two'; SELECT 3 AS "three"; SELECT 4 AS "four""_with_escaped_double_quotes"; sqlfluff-3.4.2/test/fixtures/dialects/mysql/column_alias.yml000066400000000000000000000030561503426445100243300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7fe1e4deda95ada9f20c59d9a235144ef8343ae59730e55350693659095a0c3c file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: AS quoted_identifier: '`one`' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '2' alias_expression: alias_operator: keyword: AS quoted_identifier: "'two'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '3' alias_expression: alias_operator: keyword: AS quoted_identifier: '"three"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '4' alias_expression: alias_operator: keyword: AS quoted_identifier: '"four""_with_escaped_double_quotes"' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/create_database.sql000066400000000000000000000007511503426445100247460ustar00rootroot00000000000000CREATE DATABASE my_database; CREATE DATABASE IF NOT EXISTS my_database; CREATE DATABASE my_database DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci DEFAULT ENCRYPTION 'N'; CREATE DATABASE my_database DEFAULT CHARACTER SET = utf8mb4 COLLATE = utf8mb4_0900_ai_ci DEFAULT ENCRYPTION = 'N'; CREATE SCHEMA my_database DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci DEFAULT ENCRYPTION 'N'; CREATE DATABASE IF NOT EXISTS xxx CHARACTER SET "utf8mb4" COLLATE "utf8mb4_bin"; sqlfluff-3.4.2/test/fixtures/dialects/mysql/create_database.yml000066400000000000000000000061261503426445100247520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 28d48d2ecab94ea00fef8f5f3e64a0c0f392dab3b36b7d29548cfdd50bb2a0b2 file: - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: my_database - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: my_database - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: my_database - create_option_segment: - keyword: DEFAULT - keyword: CHARACTER - keyword: SET - naked_identifier: utf8mb4 - create_option_segment: keyword: COLLATE collation_reference: naked_identifier: utf8mb4_0900_ai_ci - create_option_segment: - keyword: DEFAULT - keyword: ENCRYPTION - quoted_literal: "'N'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: my_database - create_option_segment: - keyword: DEFAULT - keyword: CHARACTER - keyword: SET - comparison_operator: raw_comparison_operator: '=' - naked_identifier: utf8mb4 - create_option_segment: keyword: COLLATE comparison_operator: raw_comparison_operator: '=' collation_reference: naked_identifier: utf8mb4_0900_ai_ci - create_option_segment: - keyword: DEFAULT - keyword: ENCRYPTION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'N'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: SCHEMA - database_reference: naked_identifier: my_database - create_option_segment: - keyword: DEFAULT - keyword: CHARACTER - keyword: SET - naked_identifier: utf8mb4 - create_option_segment: keyword: COLLATE collation_reference: naked_identifier: utf8mb4_0900_ai_ci - create_option_segment: - keyword: DEFAULT - keyword: ENCRYPTION - quoted_literal: "'N'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: xxx - create_option_segment: - keyword: CHARACTER - keyword: SET - quoted_literal: '"utf8mb4"' - create_option_segment: keyword: COLLATE collation_reference: quoted_literal: '"utf8mb4_bin"' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/create_event.sql000066400000000000000000000021231503426445100243160ustar00rootroot00000000000000CREATE EVENT myevent ON SCHEDULE AT CURRENT_TIMESTAMP + INTERVAL 1 HOUR DO UPDATE myschema.mytable SET mycol = mycol + 1; CREATE EVENT e_totals ON SCHEDULE AT '2006-02-10 23:59:00' DO INSERT INTO test.totals VALUES (NOW()); CREATE EVENT e_hourly ON SCHEDULE EVERY 1 HOUR COMMENT 'Clears out sessions table each hour.' DO DELETE FROM site_activity.sessions; CREATE EVENT e_daily ON SCHEDULE EVERY 1 DAY COMMENT 'Saves total number of sessions then clears the table each day' DO BEGIN INSERT INTO site_activity.totals (time, total) SELECT CURRENT_TIMESTAMP, COUNT(*) FROM site_activity.sessions; DELETE FROM site_activity.sessions; END; CREATE EVENT e_call_myproc ON SCHEDULE AT CURRENT_TIMESTAMP + INTERVAL 1 DAY DO CALL myproc(5, 27); CREATE EVENT e ON SCHEDULE EVERY interval SECOND STARTS CURRENT_TIMESTAMP + INTERVAL 10 SECOND ENDS CURRENT_TIMESTAMP + INTERVAL 2 MINUTE ON COMPLETION PRESERVE DO INSERT INTO d.t1 VALUES ROW(NULL, NOW(), FLOOR(RAND()*100)); sqlfluff-3.4.2/test/fixtures/dialects/mysql/create_event.yml000066400000000000000000000213571503426445100243320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0c0b681abf8db8b52b35bb4927c019530edc4a0f60dd5f575a39032e35763315 file: - statement: create_event_statement: - keyword: CREATE - keyword: EVENT - object_reference: naked_identifier: myevent - keyword: 'ON' - keyword: SCHEDULE - keyword: AT - expression: bare_function: CURRENT_TIMESTAMP binary_operator: + interval_expression: keyword: INTERVAL expression: numeric_literal: '1' date_part: HOUR - keyword: DO - statement: update_statement: keyword: UPDATE table_reference: - naked_identifier: myschema - dot: . - naked_identifier: mytable set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: mycol comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: mycol binary_operator: + numeric_literal: '1' - statement_terminator: ; - statement: create_event_statement: - keyword: CREATE - keyword: EVENT - object_reference: naked_identifier: e_totals - keyword: 'ON' - keyword: SCHEDULE - keyword: AT - expression: quoted_literal: "'2006-02-10 23:59:00'" - keyword: DO - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: test - dot: . - naked_identifier: totals - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: NOW function_contents: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_event_statement: - keyword: CREATE - keyword: EVENT - object_reference: naked_identifier: e_hourly - keyword: 'ON' - keyword: SCHEDULE - keyword: EVERY - expression: numeric_literal: '1' - date_part: HOUR - comment_clause: keyword: COMMENT quoted_literal: "'Clears out sessions table each hour.'" - keyword: DO - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: site_activity - dot: . - naked_identifier: sessions - statement_terminator: ; - statement: create_event_statement: - keyword: CREATE - keyword: EVENT - object_reference: naked_identifier: e_daily - keyword: 'ON' - keyword: SCHEDULE - keyword: EVERY - expression: numeric_literal: '1' - date_part: DAY - comment_clause: keyword: COMMENT quoted_literal: "'Saves total number of sessions then clears the table each\ \ day'" - keyword: DO - statement: transaction_statement: keyword: BEGIN statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: site_activity - dot: . - naked_identifier: totals - bracketed: - start_bracket: ( - column_reference: naked_identifier: time - comma: ',' - column_reference: naked_identifier: total - end_bracket: ) - select_statement: select_clause: - keyword: SELECT - select_clause_element: bare_function: CURRENT_TIMESTAMP - comma: ',' - select_clause_element: function: function_name: function_name_identifier: COUNT function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: site_activity - dot: . - naked_identifier: sessions - statement_terminator: ; - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: site_activity - dot: . - naked_identifier: sessions - statement_terminator: ; - statement: transaction_statement: keyword: END - statement_terminator: ; - statement: create_event_statement: - keyword: CREATE - keyword: EVENT - object_reference: naked_identifier: e_call_myproc - keyword: 'ON' - keyword: SCHEDULE - keyword: AT - expression: bare_function: CURRENT_TIMESTAMP binary_operator: + interval_expression: keyword: INTERVAL expression: numeric_literal: '1' date_part: DAY - keyword: DO - statement: call_statement: keyword: CALL function: function_name: function_name_identifier: myproc function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '5' - comma: ',' - expression: numeric_literal: '27' - end_bracket: ) - statement_terminator: ; - statement: create_event_statement: - keyword: CREATE - keyword: EVENT - object_reference: naked_identifier: e - keyword: 'ON' - keyword: SCHEDULE - keyword: EVERY - expression: interval_expression: keyword: interval date_part: SECOND - keyword: STARTS - expression: bare_function: CURRENT_TIMESTAMP binary_operator: + interval_expression: keyword: INTERVAL expression: numeric_literal: '10' date_part: SECOND - keyword: ENDS - expression: bare_function: CURRENT_TIMESTAMP binary_operator: + interval_expression: keyword: INTERVAL expression: numeric_literal: '2' date_part: MINUTE - keyword: 'ON' - keyword: COMPLETION - keyword: PRESERVE - keyword: DO - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: d - dot: . - naked_identifier: t1 - values_clause: - keyword: VALUES - keyword: ROW - bracketed: - start_bracket: ( - null_literal: 'NULL' - comma: ',' - expression: function: function_name: function_name_identifier: NOW function_contents: bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: function: function_name: function_name_identifier: FLOOR function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: RAND function_contents: bracketed: start_bracket: ( end_bracket: ) binary_operator: '*' numeric_literal: '100' end_bracket: ) - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/create_index.sql000066400000000000000000000011361503426445100243070ustar00rootroot00000000000000CREATE INDEX idx ON tbl (col); CREATE UNIQUE INDEX idx ON tbl (col); CREATE FULLTEXT INDEX idx ON tbl (col); CREATE SPATIAL INDEX idx ON tbl (col); CREATE INDEX idx USING BTREE ON tbl (col); CREATE INDEX idx USING HASH ON tbl (col); CREATE INDEX idx ON tbl (col ASC); CREATE INDEX idx ON tbl (col DESC); CREATE INDEX part_of_name ON customer (name(10)); CREATE INDEX idx ON tbl (col) ALGORITHM DEFAULT; CREATE INDEX idx ON tbl (col) ALGORITHM NOCOPY; CREATE INDEX idx ON tbl (col) ALGORITHM INSTANT; CREATE INDEX idx ON tbl (col) LOCK DEFAULT; CREATE INDEX idx ON tbl ((col1 + col2), (col1 - col2), col1); sqlfluff-3.4.2/test/fixtures/dialects/mysql/create_index.yml000066400000000000000000000145701503426445100243170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5ba008b762b05319be63ea1b01262c62e054e8ba120801da8589dc396cd1fdd3 file: - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: idx - keyword: 'ON' - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: UNIQUE - keyword: INDEX - index_reference: naked_identifier: idx - keyword: 'ON' - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - index_reference: naked_identifier: idx - keyword: 'ON' - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: SPATIAL - keyword: INDEX - index_reference: naked_identifier: idx - keyword: 'ON' - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: idx - index_type: - keyword: USING - keyword: BTREE - keyword: 'ON' - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: idx - index_type: - keyword: USING - keyword: HASH - keyword: 'ON' - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: idx - keyword: 'ON' - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: col keyword: ASC end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: idx - keyword: 'ON' - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: col keyword: DESC end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: part_of_name - keyword: 'ON' - table_reference: naked_identifier: customer - bracketed: start_bracket: ( column_reference: naked_identifier: name bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: idx - keyword: 'ON' - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) - keyword: ALGORITHM - keyword: DEFAULT - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: idx - keyword: 'ON' - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) - keyword: ALGORITHM - keyword: NOCOPY - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: idx - keyword: 'ON' - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) - keyword: ALGORITHM - keyword: INSTANT - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: idx - keyword: 'ON' - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) - keyword: LOCK - keyword: DEFAULT - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: idx - keyword: 'ON' - table_reference: naked_identifier: tbl - bracketed: - start_bracket: ( - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: col1 - binary_operator: + - column_reference: naked_identifier: col2 end_bracket: ) - comma: ',' - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: col1 - binary_operator: '-' - column_reference: naked_identifier: col2 end_bracket: ) - comma: ',' - column_reference: naked_identifier: col1 - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/create_role.sql000066400000000000000000000000521503426445100241350ustar00rootroot00000000000000CREATE ROLE IF NOT EXISTS 'example-role'; sqlfluff-3.4.2/test/fixtures/dialects/mysql/create_role.yml000066400000000000000000000011631503426445100241430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3ab69fdfaf7a2669b62e188941317fb7f7d67caea7c4cf122dbe07fd4001e936 file: statement: create_role_statement: - keyword: CREATE - keyword: ROLE - keyword: IF - keyword: NOT - keyword: EXISTS - role_reference: quoted_identifier: "'example-role'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/create_table.sql000066400000000000000000000035341503426445100242730ustar00rootroot00000000000000CREATE TABLE `foo` ( b VARCHAR(255) BINARY, `id` int(11) unsigned NOT NULL AUTO_INCREMENT, PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; CREATE TABLE `foo` ( b VARCHAR(255) BINARY, `id` int(11) unsigned NOT NULL AUTO_INCREMENT, PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=`utf8mb4` COLLATE=`utf8mb4_unicode_ci`; CREATE TABLE `foo` ( b VARCHAR(255) BINARY, `id` int(11) unsigned NOT NULL AUTO_INCREMENT, PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET='utf8mb4' COLLATE='utf8mb4_unicode_ci'; CREATE TABLE `foo` ( b VARCHAR(255) BINARY, `id` int(11) unsigned NOT NULL AUTO_INCREMENT, PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET="utf8mb4" COLLATE="utf8mb4_unicode_ci"; create table `tickets` ( `id` serial primary key, `material_number` varchar(255) default null, `material_name` varchar(255) default null, `date_created` date not null default (current_date), `date_closed` date default null ); create table _ (a int); CREATE TABLE foo SELECT * FROM bar; CREATE TEMPORARY TABLE tbl_name ( id INT PRIMARY KEY AUTO_INCREMENT, col VARCHAR(255) DEFAULT '' NOT NULL, INDEX(col) ) AS SELECT id, col FROM table_name; CREATE TEMPORARY TABLE tbl_name ( id INT PRIMARY KEY AUTO_INCREMENT, col VARCHAR(255) DEFAULT '' NOT NULL, INDEX(col) ) SELECT id, col FROM table_name; CREATE TEMPORARY TABLE tbl_name (INDEX(col)) AS SELECT id, col FROM table_name; CREATE TEMPORARY TABLE tbl_name (INDEX(col)) SELECT id, col FROM table_name; CREATE TABLE geom ( p POINT SRID 0, g GEOMETRY NOT NULL SRID 4326 ); CREATE TABLE my_table (num INT(5) SIGNED); CREATE TABLE my_table (num INT(5) UNSIGNED); CREATE TABLE my_table (num INT(5) ZEROFILL); CREATE TABLE my_table (num INT(5) UNSIGNED ZEROFILL); CREATE TABLE my_table (num INT(5) ZEROFILL UNSIGNED); sqlfluff-3.4.2/test/fixtures/dialects/mysql/create_table.yml000066400000000000000000000447731503426445100243070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2c6fe53182dfd033347e294bf471abfe987dc6390f23392a58b88dd8dd0d8a07 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: quoted_identifier: '`foo`' - bracketed: - start_bracket: ( - column_definition: naked_identifier: b data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '255' end_bracket: ) keyword: BINARY - comma: ',' - column_definition: - quoted_identifier: '`id`' - data_type: data_type_identifier: int bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '11' end_bracket: ) keyword: unsigned - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: AUTO_INCREMENT - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: quoted_identifier: '`id`' end_bracket: ) - end_bracket: ) - parameter: ENGINE - comparison_operator: raw_comparison_operator: '=' - parameter: InnoDB - keyword: DEFAULT - parameter: CHARSET - comparison_operator: raw_comparison_operator: '=' - parameter: utf8mb4 - parameter: COLLATE - comparison_operator: raw_comparison_operator: '=' - parameter: utf8mb4_unicode_ci - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: quoted_identifier: '`foo`' - bracketed: - start_bracket: ( - column_definition: naked_identifier: b data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '255' end_bracket: ) keyword: BINARY - comma: ',' - column_definition: - quoted_identifier: '`id`' - data_type: data_type_identifier: int bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '11' end_bracket: ) keyword: unsigned - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: AUTO_INCREMENT - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: quoted_identifier: '`id`' end_bracket: ) - end_bracket: ) - parameter: ENGINE - comparison_operator: raw_comparison_operator: '=' - parameter: InnoDB - keyword: DEFAULT - parameter: CHARSET - comparison_operator: raw_comparison_operator: '=' - parameter: '`utf8mb4`' - parameter: COLLATE - comparison_operator: raw_comparison_operator: '=' - parameter: '`utf8mb4_unicode_ci`' - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: quoted_identifier: '`foo`' - bracketed: - start_bracket: ( - column_definition: naked_identifier: b data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '255' end_bracket: ) keyword: BINARY - comma: ',' - column_definition: - quoted_identifier: '`id`' - data_type: data_type_identifier: int bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '11' end_bracket: ) keyword: unsigned - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: AUTO_INCREMENT - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: quoted_identifier: '`id`' end_bracket: ) - end_bracket: ) - parameter: ENGINE - comparison_operator: raw_comparison_operator: '=' - parameter: InnoDB - keyword: DEFAULT - parameter: CHARSET - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'utf8mb4'" - parameter: COLLATE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'utf8mb4_unicode_ci'" - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: quoted_identifier: '`foo`' - bracketed: - start_bracket: ( - column_definition: naked_identifier: b data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '255' end_bracket: ) keyword: BINARY - comma: ',' - column_definition: - quoted_identifier: '`id`' - data_type: data_type_identifier: int bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '11' end_bracket: ) keyword: unsigned - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: AUTO_INCREMENT - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: quoted_identifier: '`id`' end_bracket: ) - end_bracket: ) - parameter: ENGINE - comparison_operator: raw_comparison_operator: '=' - parameter: InnoDB - keyword: DEFAULT - parameter: CHARSET - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"utf8mb4"' - parameter: COLLATE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"utf8mb4_unicode_ci"' - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: quoted_identifier: '`tickets`' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '`id`' data_type: data_type_identifier: serial column_constraint_segment: - keyword: primary - keyword: key - comma: ',' - column_definition: quoted_identifier: '`material_number`' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '255' end_bracket: ) column_constraint_segment: keyword: default null_literal: 'null' - comma: ',' - column_definition: quoted_identifier: '`material_name`' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '255' end_bracket: ) column_constraint_segment: keyword: default null_literal: 'null' - comma: ',' - column_definition: - quoted_identifier: '`date_created`' - data_type: data_type_identifier: date - column_constraint_segment: - keyword: not - keyword: 'null' - column_constraint_segment: keyword: default bracketed: start_bracket: ( bare_function: current_date end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '`date_closed`' data_type: data_type_identifier: date column_constraint_segment: keyword: default null_literal: 'null' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: _ - bracketed: start_bracket: ( column_definition: naked_identifier: a data_type: data_type_identifier: int end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: foo - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: tbl_name - bracketed: - start_bracket: ( - column_definition: - naked_identifier: id - data_type: data_type_identifier: INT - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - column_constraint_segment: keyword: AUTO_INCREMENT - comma: ',' - column_definition: - naked_identifier: col - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '255' end_bracket: ) - column_constraint_segment: keyword: DEFAULT quoted_literal: "''" - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - table_constraint: keyword: INDEX bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: col from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: tbl_name - bracketed: - start_bracket: ( - column_definition: - naked_identifier: id - data_type: data_type_identifier: INT - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - column_constraint_segment: keyword: AUTO_INCREMENT - comma: ',' - column_definition: - naked_identifier: col - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '255' end_bracket: ) - column_constraint_segment: keyword: DEFAULT quoted_literal: "''" - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - table_constraint: keyword: INDEX bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) - end_bracket: ) - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: col from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: tbl_name - bracketed: start_bracket: ( table_constraint: keyword: INDEX bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: col from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: tbl_name - bracketed: start_bracket: ( table_constraint: keyword: INDEX bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) end_bracket: ) - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: col from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: geom - bracketed: - start_bracket: ( - column_definition: naked_identifier: p data_type: data_type_identifier: POINT column_constraint_segment: keyword: SRID numeric_literal: '0' - comma: ',' - column_definition: - naked_identifier: g - data_type: data_type_identifier: GEOMETRY - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: SRID numeric_literal: '4326' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: my_table - bracketed: start_bracket: ( column_definition: naked_identifier: num data_type: data_type_identifier: INT bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) keyword: SIGNED end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: my_table - bracketed: start_bracket: ( column_definition: naked_identifier: num data_type: data_type_identifier: INT bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) keyword: UNSIGNED end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: my_table - bracketed: start_bracket: ( column_definition: naked_identifier: num data_type: data_type_identifier: INT bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) keyword: ZEROFILL end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: my_table - bracketed: start_bracket: ( column_definition: naked_identifier: num data_type: - data_type_identifier: INT - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - keyword: UNSIGNED - keyword: ZEROFILL end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: my_table - bracketed: start_bracket: ( column_definition: naked_identifier: num data_type: - data_type_identifier: INT - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - keyword: ZEROFILL - keyword: UNSIGNED end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/create_table_column_charset.sql000066400000000000000000000006371503426445100273620ustar00rootroot00000000000000CREATE TABLE t1 ( col1 VARCHAR(5) CHARACTER SET latin1 COLLATE latin1_german1_ci ); CREATE TABLE t1 ( col1 VARCHAR(5) CHARACTER SET `latin1` COLLATE `latin1_german1_ci` ); CREATE TABLE t1 ( col1 VARCHAR(5) CHARACTER SET 'latin1' COLLATE 'latin1_german1_ci' ); CREATE TABLE t1 ( col1 VARCHAR(5) CHARACTER SET "latin1" COLLATE "latin1_german1_ci" ); sqlfluff-3.4.2/test/fixtures/dialects/mysql/create_table_column_charset.yml000066400000000000000000000067621503426445100273710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3d383e37b65631c852af5deffd0bdde1132fbaef0e98b247fd01bbc2cece2e8c file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: start_bracket: ( column_definition: - naked_identifier: col1 - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - column_constraint_segment: - keyword: CHARACTER - keyword: SET - naked_identifier: latin1 - column_constraint_segment: keyword: COLLATE collation_reference: naked_identifier: latin1_german1_ci end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: start_bracket: ( column_definition: - naked_identifier: col1 - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - column_constraint_segment: - keyword: CHARACTER - keyword: SET - quoted_identifier: '`latin1`' - column_constraint_segment: keyword: COLLATE collation_reference: quoted_identifier: '`latin1_german1_ci`' end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: start_bracket: ( column_definition: - naked_identifier: col1 - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - column_constraint_segment: - keyword: CHARACTER - keyword: SET - quoted_identifier: "'latin1'" - column_constraint_segment: keyword: COLLATE collation_reference: quoted_literal: "'latin1_german1_ci'" end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: start_bracket: ( column_definition: - naked_identifier: col1 - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - column_constraint_segment: - keyword: CHARACTER - keyword: SET - quoted_identifier: '"latin1"' - column_constraint_segment: keyword: COLLATE collation_reference: quoted_literal: '"latin1_german1_ci"' end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/create_table_constraint_unique.sql000066400000000000000000000002501503426445100301150ustar00rootroot00000000000000CREATE TABLE a( a INT NOT NULL, UNIQUE (a), UNIQUE idx_c(a), UNIQUE KEY (a), UNIQUE KEY idx_a(a), UNIQUE INDEX (a), UNIQUE INDEX idx_b(a) ) sqlfluff-3.4.2/test/fixtures/dialects/mysql/create_table_constraint_unique.yml000066400000000000000000000044651503426445100301330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: dc9e42ca7dac29e96e7c93596f533e5ccef4a0db1fe35bb1f46c8aaff86b57f5 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: a - bracketed: - start_bracket: ( - column_definition: naked_identifier: a data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - table_constraint: keyword: UNIQUE bracketed: start_bracket: ( column_reference: naked_identifier: a end_bracket: ) - comma: ',' - table_constraint: keyword: UNIQUE index_reference: naked_identifier: idx_c bracketed: start_bracket: ( column_reference: naked_identifier: a end_bracket: ) - comma: ',' - table_constraint: - keyword: UNIQUE - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: a end_bracket: ) - comma: ',' - table_constraint: - keyword: UNIQUE - keyword: KEY - index_reference: naked_identifier: idx_a - bracketed: start_bracket: ( column_reference: naked_identifier: a end_bracket: ) - comma: ',' - table_constraint: - keyword: UNIQUE - keyword: INDEX - bracketed: start_bracket: ( column_reference: naked_identifier: a end_bracket: ) - comma: ',' - table_constraint: - keyword: UNIQUE - keyword: INDEX - index_reference: naked_identifier: idx_b - bracketed: start_bracket: ( column_reference: naked_identifier: a end_bracket: ) - end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/mysql/create_table_datetime.sql000066400000000000000000000023771503426445100261530ustar00rootroot00000000000000CREATE TABLE `foo` ( created_date DATETIME DEFAULT CURRENT_TIMESTAMP NOT NULL, ts1 TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, dt1 DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, ts2 TIMESTAMP DEFAULT CURRENT_TIMESTAMP, dt2 DATETIME DEFAULT CURRENT_TIMESTAMP, ts3 TIMESTAMP DEFAULT 0, dt3 DATETIME DEFAULT 0, ts4 TIMESTAMP DEFAULT 0 ON UPDATE CURRENT_TIMESTAMP, dt4 DATETIME DEFAULT 0 ON UPDATE CURRENT_TIMESTAMP, ts5 TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, -- default 0 ts6 TIMESTAMP NULL ON UPDATE CURRENT_TIMESTAMP, -- default NULL dt5 DATETIME ON UPDATE CURRENT_TIMESTAMP, -- default NULL dt6 DATETIME NOT NULL ON UPDATE CURRENT_TIMESTAMP, -- default 0 ts7 TIMESTAMP(6) DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6), ts8 TIMESTAMP NULL DEFAULT NULL, ts9 TIMESTAMP NULL DEFAULT 0, ts10 TIMESTAMP NULL DEFAULT CURRENT_TIMESTAMP, ts11 TIMESTAMP NULL DEFAULT CURRENT_TIMESTAMP(), ts12 TIMESTAMP NULL DEFAULT '0000-00-00 00:00:00', ts13 TIMESTAMP NULL DEFAULT NOW ON UPDATE NOW, ts14 TIMESTAMP NULL DEFAULT NOW() ON UPDATE NOW(), ts15 TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, ts16 TIMESTAMP NULL DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP ) sqlfluff-3.4.2/test/fixtures/dialects/mysql/create_table_datetime.yml000066400000000000000000000142151503426445100261470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 04711fd76a3d8efbe053513b9a50ce0e2aaf4af87a96e105f521c58ca30810c6 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: quoted_identifier: '`foo`' - bracketed: - start_bracket: ( - column_definition: - naked_identifier: created_date - keyword: DATETIME - keyword: DEFAULT - keyword: CURRENT_TIMESTAMP - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: - naked_identifier: ts1 - keyword: TIMESTAMP - keyword: DEFAULT - keyword: CURRENT_TIMESTAMP - keyword: 'ON' - keyword: UPDATE - keyword: CURRENT_TIMESTAMP - comma: ',' - column_definition: - naked_identifier: dt1 - keyword: DATETIME - keyword: DEFAULT - keyword: CURRENT_TIMESTAMP - keyword: 'ON' - keyword: UPDATE - keyword: CURRENT_TIMESTAMP - comma: ',' - column_definition: - naked_identifier: ts2 - keyword: TIMESTAMP - keyword: DEFAULT - keyword: CURRENT_TIMESTAMP - comma: ',' - column_definition: - naked_identifier: dt2 - keyword: DATETIME - keyword: DEFAULT - keyword: CURRENT_TIMESTAMP - comma: ',' - column_definition: - naked_identifier: ts3 - keyword: TIMESTAMP - keyword: DEFAULT - numeric_literal: '0' - comma: ',' - column_definition: - naked_identifier: dt3 - keyword: DATETIME - keyword: DEFAULT - numeric_literal: '0' - comma: ',' - column_definition: - naked_identifier: ts4 - keyword: TIMESTAMP - keyword: DEFAULT - numeric_literal: '0' - keyword: 'ON' - keyword: UPDATE - keyword: CURRENT_TIMESTAMP - comma: ',' - column_definition: - naked_identifier: dt4 - keyword: DATETIME - keyword: DEFAULT - numeric_literal: '0' - keyword: 'ON' - keyword: UPDATE - keyword: CURRENT_TIMESTAMP - comma: ',' - column_definition: - naked_identifier: ts5 - keyword: TIMESTAMP - keyword: 'ON' - keyword: UPDATE - keyword: CURRENT_TIMESTAMP - comma: ',' - column_definition: - naked_identifier: ts6 - keyword: TIMESTAMP - keyword: 'NULL' - keyword: 'ON' - keyword: UPDATE - keyword: CURRENT_TIMESTAMP - comma: ',' - column_definition: - naked_identifier: dt5 - keyword: DATETIME - keyword: 'ON' - keyword: UPDATE - keyword: CURRENT_TIMESTAMP - comma: ',' - column_definition: - naked_identifier: dt6 - keyword: DATETIME - keyword: NOT - keyword: 'NULL' - keyword: 'ON' - keyword: UPDATE - keyword: CURRENT_TIMESTAMP - comma: ',' - column_definition: - naked_identifier: ts7 - keyword: TIMESTAMP - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) - keyword: DEFAULT - keyword: CURRENT_TIMESTAMP - bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) - keyword: 'ON' - keyword: UPDATE - keyword: CURRENT_TIMESTAMP - bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) - comma: ',' - column_definition: - naked_identifier: ts8 - keyword: TIMESTAMP - keyword: 'NULL' - keyword: DEFAULT - keyword: 'NULL' - comma: ',' - column_definition: - naked_identifier: ts9 - keyword: TIMESTAMP - keyword: 'NULL' - keyword: DEFAULT - numeric_literal: '0' - comma: ',' - column_definition: - naked_identifier: ts10 - keyword: TIMESTAMP - keyword: 'NULL' - keyword: DEFAULT - keyword: CURRENT_TIMESTAMP - comma: ',' - column_definition: - naked_identifier: ts11 - keyword: TIMESTAMP - keyword: 'NULL' - keyword: DEFAULT - keyword: CURRENT_TIMESTAMP - bracketed: start_bracket: ( end_bracket: ) - comma: ',' - column_definition: - naked_identifier: ts12 - keyword: TIMESTAMP - keyword: 'NULL' - keyword: DEFAULT - quoted_literal: "'0000-00-00 00:00:00'" - comma: ',' - column_definition: - naked_identifier: ts13 - keyword: TIMESTAMP - keyword: 'NULL' - keyword: DEFAULT - keyword: NOW - keyword: 'ON' - keyword: UPDATE - keyword: NOW - comma: ',' - column_definition: - naked_identifier: ts14 - keyword: TIMESTAMP - keyword: 'NULL' - keyword: DEFAULT - keyword: NOW - bracketed: start_bracket: ( end_bracket: ) - keyword: 'ON' - keyword: UPDATE - keyword: NOW - bracketed: start_bracket: ( end_bracket: ) - comma: ',' - column_definition: - naked_identifier: ts15 - keyword: TIMESTAMP - keyword: NOT - keyword: 'NULL' - keyword: DEFAULT - keyword: CURRENT_TIMESTAMP - keyword: 'ON' - keyword: UPDATE - keyword: CURRENT_TIMESTAMP - comma: ',' - column_definition: - naked_identifier: ts16 - keyword: TIMESTAMP - keyword: 'NULL' - keyword: DEFAULT - keyword: 'NULL' - keyword: 'ON' - keyword: UPDATE - keyword: CURRENT_TIMESTAMP - end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/mysql/create_table_equals_optional.sql000066400000000000000000000000671503426445100275500ustar00rootroot00000000000000CREATE TABLE a ( id INT ) COLLATE utf8_general_ci; sqlfluff-3.4.2/test/fixtures/dialects/mysql/create_table_equals_optional.yml000066400000000000000000000014331503426445100275500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 58a605d729c2c4dda00b1d884f06b88b929780984ff660f716d2173b480119dc file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: a - bracketed: start_bracket: ( column_definition: naked_identifier: id data_type: data_type_identifier: INT end_bracket: ) - parameter: COLLATE - parameter: utf8_general_ci statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/create_table_generated_column.sql000066400000000000000000000005051503426445100276610ustar00rootroot00000000000000CREATE TABLE t1 ( a INT, b INT, c TEXT, d INT GENERATED ALWAYS AS (a*abs(b)) VIRTUAL, e TEXT GENERATED ALWAYS AS (substr(c,b,b+1)) STORED, PRIMARY KEY (a) ); CREATE TABLE t1 ( a INT, b INT, c TEXT, d INT AS (a*abs(b)), e TEXT AS (substr(c,b,b+1)) STORED COMMENT 'foo', PRIMARY KEY (a) ); sqlfluff-3.4.2/test/fixtures/dialects/mysql/create_table_generated_column.yml000066400000000000000000000140351503426445100276660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 44f2baed4cdbb769fa54f93e5249f2d64ea838a4da668d9bc98e870bb45e0fa2 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: a data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: b data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: c data_type: data_type_identifier: TEXT - comma: ',' - column_definition: naked_identifier: d data_type: data_type_identifier: INT column_constraint_segment: - keyword: GENERATED - keyword: ALWAYS - keyword: AS - bracketed: start_bracket: ( expression: column_reference: naked_identifier: a binary_operator: '*' function: function_name: function_name_identifier: abs function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: b end_bracket: ) end_bracket: ) - keyword: VIRTUAL - comma: ',' - column_definition: naked_identifier: e data_type: data_type_identifier: TEXT column_constraint_segment: - keyword: GENERATED - keyword: ALWAYS - keyword: AS - bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: substr function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: c - comma: ',' - expression: column_reference: naked_identifier: b - comma: ',' - expression: column_reference: naked_identifier: b binary_operator: + numeric_literal: '1' - end_bracket: ) end_bracket: ) - keyword: STORED - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: a end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: a data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: b data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: c data_type: data_type_identifier: TEXT - comma: ',' - column_definition: naked_identifier: d data_type: data_type_identifier: INT column_constraint_segment: keyword: AS bracketed: start_bracket: ( expression: column_reference: naked_identifier: a binary_operator: '*' function: function_name: function_name_identifier: abs function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: b end_bracket: ) end_bracket: ) - comma: ',' - column_definition: - naked_identifier: e - data_type: data_type_identifier: TEXT - column_constraint_segment: - keyword: AS - bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: substr function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: c - comma: ',' - expression: column_reference: naked_identifier: b - comma: ',' - expression: column_reference: naked_identifier: b binary_operator: + numeric_literal: '1' - end_bracket: ) end_bracket: ) - keyword: STORED - column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: "'foo'" - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: a end_bracket: ) - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/create_table_index.sql000066400000000000000000000005171503426445100254600ustar00rootroot00000000000000CREATE TABLE foo ( id INT UNSIGNED AUTO_INCREMENT NOT NULL, a TEXT(500), b INT, c INT, PRIMARY KEY (id) COMMENT 'primary key (id)', FULLTEXT `idx_a` (a) COMMENT 'index (a)', INDEX `idx_prefix_a` (a(20)), INDEX `idx_b` (b) COMMENT 'index (b)', INDEX `idx_desc_b` (b DESC), INDEX `idx_asc_c` (c ASC) ) ENGINE=InnoDB; sqlfluff-3.4.2/test/fixtures/dialects/mysql/create_table_index.yml000066400000000000000000000073541503426445100254700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 99426d9a1d7e06af48cd81e5c09abd4f275ecd5dec99e2f398195bd08c14f018 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_definition: - naked_identifier: id - data_type: data_type_identifier: INT keyword: UNSIGNED - column_constraint_segment: keyword: AUTO_INCREMENT - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: a data_type: data_type_identifier: TEXT bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '500' end_bracket: ) - comma: ',' - column_definition: naked_identifier: b data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: c data_type: data_type_identifier: INT - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - index_option: comment_clause: keyword: COMMENT quoted_literal: "'primary key (id)'" - comma: ',' - table_constraint: keyword: FULLTEXT index_reference: quoted_identifier: '`idx_a`' bracketed: start_bracket: ( column_reference: naked_identifier: a end_bracket: ) index_option: comment_clause: keyword: COMMENT quoted_literal: "'index (a)'" - comma: ',' - table_constraint: keyword: INDEX index_reference: quoted_identifier: '`idx_prefix_a`' bracketed: start_bracket: ( column_reference: naked_identifier: a bracketed: start_bracket: ( numeric_literal: '20' end_bracket: ) end_bracket: ) - comma: ',' - table_constraint: keyword: INDEX index_reference: quoted_identifier: '`idx_b`' bracketed: start_bracket: ( column_reference: naked_identifier: b end_bracket: ) index_option: comment_clause: keyword: COMMENT quoted_literal: "'index (b)'" - comma: ',' - table_constraint: keyword: INDEX index_reference: quoted_identifier: '`idx_desc_b`' bracketed: start_bracket: ( column_reference: naked_identifier: b keyword: DESC end_bracket: ) - comma: ',' - table_constraint: keyword: INDEX index_reference: quoted_identifier: '`idx_asc_c`' bracketed: start_bracket: ( column_reference: naked_identifier: c keyword: ASC end_bracket: ) - end_bracket: ) - parameter: ENGINE - comparison_operator: raw_comparison_operator: '=' - parameter: InnoDB statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/create_table_null_position.sql000066400000000000000000000005431503426445100272460ustar00rootroot00000000000000CREATE TABLE IF NOT EXISTS db_name.table_name ( updated_at1 timestamp default CURRENT_TIMESTAMP not null on update CURRENT_TIMESTAMP, updated_at2 timestamp not null default CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP, updated_at3 timestamp default CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP not null, updated_at4 timestamp ); sqlfluff-3.4.2/test/fixtures/dialects/mysql/create_table_null_position.yml000066400000000000000000000033621503426445100272520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 15f782abb1331d16b725b8b668192875e574c8c085ef79377398499dd95efbf1 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: - naked_identifier: db_name - dot: . - naked_identifier: table_name - bracketed: - start_bracket: ( - column_definition: - naked_identifier: updated_at1 - keyword: timestamp - keyword: default - keyword: CURRENT_TIMESTAMP - keyword: not - keyword: 'null' - keyword: 'on' - keyword: update - keyword: CURRENT_TIMESTAMP - comma: ',' - column_definition: - naked_identifier: updated_at2 - keyword: timestamp - keyword: not - keyword: 'null' - keyword: default - keyword: CURRENT_TIMESTAMP - keyword: 'on' - keyword: update - keyword: CURRENT_TIMESTAMP - comma: ',' - column_definition: - naked_identifier: updated_at3 - keyword: timestamp - keyword: default - keyword: CURRENT_TIMESTAMP - keyword: 'on' - keyword: update - keyword: CURRENT_TIMESTAMP - keyword: not - keyword: 'null' - comma: ',' - column_definition: naked_identifier: updated_at4 keyword: timestamp - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/create_table_primary_foreign_keys.sql000066400000000000000000000021461503426445100306000ustar00rootroot00000000000000CREATE TABLE parent ( id INT NOT NULL, PRIMARY KEY (id) ); CREATE TABLE child ( id INT, parent_id INT, INDEX par_ind (parent_id), FOREIGN KEY (parent_id) REFERENCES parent(id) ON DELETE CASCADE ); CREATE TABLE product ( category INT NOT NULL, id INT NOT NULL, price DECIMAL, PRIMARY KEY(category, id) ); CREATE TABLE customer ( id INT NOT NULL, PRIMARY KEY (id) ); CREATE TABLE product_order ( product_category INT NOT NULL, product_id INT NOT NULL, customer_id INT NOT NULL, PRIMARY KEY(no), -- INDEX (product_category, product_id), -- INDEX (customer_id), FOREIGN KEY (product_category, product_id) REFERENCES product(category, id) ON UPDATE CASCADE ON DELETE RESTRICT, FOREIGN KEY (customer_id) REFERENCES customer(id) ); CREATE TABLE source_tag_assoc ( source_id INT UNSIGNED NOT NULL, tag_id INT UNSIGNED NOT NULL, PRIMARY KEY (source_id, tag_id), FOREIGN KEY (source_id) REFERENCES source (id) ON DELETE CASCADE, FOREIGN KEY (tag_id) REFERENCES source_tag (id) ON DELETE CASCADE ); sqlfluff-3.4.2/test/fixtures/dialects/mysql/create_table_primary_foreign_keys.yml000066400000000000000000000213131503426445100305770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fb91070986ad8b8e82dd3bd9b793200a0ed9398192aa045f0361fdcae06af34b file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: parent - bracketed: start_bracket: ( column_definition: naked_identifier: id data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' comma: ',' table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: child - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: parent_id data_type: data_type_identifier: INT - comma: ',' - table_constraint: keyword: INDEX index_reference: naked_identifier: par_ind bracketed: start_bracket: ( column_reference: naked_identifier: parent_id end_bracket: ) - comma: ',' - table_constraint: - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: parent_id end_bracket: ) - keyword: REFERENCES - column_reference: naked_identifier: parent - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - keyword: 'ON' - keyword: DELETE - keyword: CASCADE - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: product - bracketed: - start_bracket: ( - column_definition: naked_identifier: category data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: id data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: price data_type: data_type_identifier: DECIMAL - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: category - comma: ',' - column_reference: naked_identifier: id - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: customer - bracketed: start_bracket: ( column_definition: naked_identifier: id data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' comma: ',' table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: product_order - bracketed: - start_bracket: ( - column_definition: naked_identifier: product_category data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: product_id data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: customer_id data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: 'no' end_bracket: ) - comma: ',' - table_constraint: - keyword: FOREIGN - keyword: KEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: product_category - comma: ',' - column_reference: naked_identifier: product_id - end_bracket: ) - keyword: REFERENCES - column_reference: naked_identifier: product - bracketed: - start_bracket: ( - column_reference: naked_identifier: category - comma: ',' - column_reference: naked_identifier: id - end_bracket: ) - keyword: 'ON' - keyword: UPDATE - keyword: CASCADE - keyword: 'ON' - keyword: DELETE - keyword: RESTRICT - comma: ',' - table_constraint: - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: customer_id end_bracket: ) - keyword: REFERENCES - column_reference: naked_identifier: customer - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: source_tag_assoc - bracketed: - start_bracket: ( - column_definition: naked_identifier: source_id data_type: data_type_identifier: INT keyword: UNSIGNED column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: tag_id data_type: data_type_identifier: INT keyword: UNSIGNED column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: source_id - comma: ',' - column_reference: naked_identifier: tag_id - end_bracket: ) - comma: ',' - table_constraint: - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: source_id end_bracket: ) - keyword: REFERENCES - column_reference: naked_identifier: source - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - keyword: 'ON' - keyword: DELETE - keyword: CASCADE - comma: ',' - table_constraint: - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: tag_id end_bracket: ) - keyword: REFERENCES - column_reference: naked_identifier: source_tag - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - keyword: 'ON' - keyword: DELETE - keyword: CASCADE - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/create_table_unique_key.sql000066400000000000000000000000501503426445100265170ustar00rootroot00000000000000create table a( b int unique key ); sqlfluff-3.4.2/test/fixtures/dialects/mysql/create_table_unique_key.yml000066400000000000000000000014721503426445100265320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: af44e65430301a281d5baa8730323d7ad9c796dcb0140ca1771982c21ee78e69 file: statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: a - bracketed: start_bracket: ( column_definition: naked_identifier: b data_type: data_type_identifier: int column_constraint_segment: - keyword: unique - keyword: key end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/create_trigger.sql000066400000000000000000000032551503426445100246470ustar00rootroot00000000000000CREATE TRIGGER delete_members_after_transactions AFTER DELETE ON transactions FOR EACH ROW DELETE FROM members WHERE username NOT IN (SELECT UNIQUE(username) FROM transactions); CREATE TRIGGER some_trigger AFTER DELETE ON some_table FOR EACH ROW BEGIN DELETE FROM some_table; INSERT INTO some_table; END; CREATE TRIGGER ins_sum BEFORE INSERT ON account FOR EACH ROW SET @sum = @sum + NEW.amount; CREATE TRIGGER some_trigger AFTER DELETE ON some_table FOR EACH ROW DELETE FROM other_table; CREATE TRIGGER some_trigger BEFORE DELETE ON some_table FOR EACH ROW DELETE FROM other_table; CREATE TRIGGER some_trigger AFTER UPDATE ON some_table FOR EACH ROW DELETE FROM other_table; CREATE TRIGGER some_trigger BEFORE UPDATE ON some_table FOR EACH ROW DELETE FROM other_table; CREATE TRIGGER some_trigger AFTER INSERT ON some_table FOR EACH ROW DELETE FROM other_table; CREATE TRIGGER some_trigger BEFORE INSERT ON some_table FOR EACH ROW DELETE FROM other_table; CREATE TRIGGER IF NOT EXISTS some_trigger AFTER DELETE ON some_table FOR EACH ROW DELETE FROM other_table; CREATE TRIGGER some_trigger AFTER DELETE ON some_table FOR EACH ROW FOLLOWS some_other_trigger DELETE FROM other_table; CREATE TRIGGER some_trigger AFTER DELETE ON some_table FOR EACH ROW PRECEDES some_other_trigger DELETE FROM other_table; CREATE DEFINER=`root`@`127.0.0.1` TRIGGER ins_sum BEFORE INSERT ON account FOR EACH ROW SET @sum = @sum + NEW.amount; CREATE DEFINER=CURRENT_USER TRIGGER ins_sum BEFORE INSERT ON account FOR EACH ROW SET @sum = @sum + NEW.amount; CREATE TRIGGER tr_downloads_i_copy_new_fields BEFORE INSERT ON downloads FOR EACH ROW BEGIN SET NEW.createdate = UNIX_TIMESTAMP(NEW.createdate_m); END; sqlfluff-3.4.2/test/fixtures/dialects/mysql/create_trigger.yml000066400000000000000000000314731503426445100246540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 12dee21fcf99ef3e3f088755a81789325e2979c98c98d67269233660a7c700b5 file: - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: delete_members_after_transactions - keyword: AFTER - keyword: DELETE - keyword: 'ON' - table_reference: naked_identifier: transactions - keyword: FOR - keyword: EACH - keyword: ROW - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: members where_clause: keyword: WHERE expression: - column_reference: naked_identifier: username - keyword: NOT - keyword: IN - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: UNIQUE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: username end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: transactions end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: some_trigger - keyword: AFTER - keyword: DELETE - keyword: 'ON' - table_reference: naked_identifier: some_table - keyword: FOR - keyword: EACH - keyword: ROW - statement: transaction_statement: keyword: BEGIN statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: some_table - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: some_table - statement_terminator: ; - statement: transaction_statement: keyword: END - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: ins_sum - keyword: BEFORE - keyword: INSERT - keyword: 'ON' - table_reference: naked_identifier: account - keyword: FOR - keyword: EACH - keyword: ROW - statement: set_statement: keyword: SET variable: '@sum' comparison_operator: raw_comparison_operator: '=' expression: variable: '@sum' binary_operator: + column_reference: - naked_identifier: NEW - dot: . - naked_identifier: amount - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: some_trigger - keyword: AFTER - keyword: DELETE - keyword: 'ON' - table_reference: naked_identifier: some_table - keyword: FOR - keyword: EACH - keyword: ROW - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: other_table - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: some_trigger - keyword: BEFORE - keyword: DELETE - keyword: 'ON' - table_reference: naked_identifier: some_table - keyword: FOR - keyword: EACH - keyword: ROW - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: other_table - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: some_trigger - keyword: AFTER - keyword: UPDATE - keyword: 'ON' - table_reference: naked_identifier: some_table - keyword: FOR - keyword: EACH - keyword: ROW - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: other_table - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: some_trigger - keyword: BEFORE - keyword: UPDATE - keyword: 'ON' - table_reference: naked_identifier: some_table - keyword: FOR - keyword: EACH - keyword: ROW - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: other_table - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: some_trigger - keyword: AFTER - keyword: INSERT - keyword: 'ON' - table_reference: naked_identifier: some_table - keyword: FOR - keyword: EACH - keyword: ROW - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: other_table - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: some_trigger - keyword: BEFORE - keyword: INSERT - keyword: 'ON' - table_reference: naked_identifier: some_table - keyword: FOR - keyword: EACH - keyword: ROW - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: other_table - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - keyword: IF - keyword: NOT - keyword: EXISTS - trigger_reference: naked_identifier: some_trigger - keyword: AFTER - keyword: DELETE - keyword: 'ON' - table_reference: naked_identifier: some_table - keyword: FOR - keyword: EACH - keyword: ROW - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: other_table - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: some_trigger - keyword: AFTER - keyword: DELETE - keyword: 'ON' - table_reference: naked_identifier: some_table - keyword: FOR - keyword: EACH - keyword: ROW - keyword: FOLLOWS - naked_identifier: some_other_trigger - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: other_table - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: some_trigger - keyword: AFTER - keyword: DELETE - keyword: 'ON' - table_reference: naked_identifier: some_table - keyword: FOR - keyword: EACH - keyword: ROW - keyword: PRECEDES - naked_identifier: some_other_trigger - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: other_table - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - definer_segment: keyword: DEFINER comparison_operator: raw_comparison_operator: '=' role_reference: - quoted_identifier: '`root`' - at_sign_literal: '@' - quoted_identifier: '`127.0.0.1`' - keyword: TRIGGER - trigger_reference: naked_identifier: ins_sum - keyword: BEFORE - keyword: INSERT - keyword: 'ON' - table_reference: naked_identifier: account - keyword: FOR - keyword: EACH - keyword: ROW - statement: set_statement: keyword: SET variable: '@sum' comparison_operator: raw_comparison_operator: '=' expression: variable: '@sum' binary_operator: + column_reference: - naked_identifier: NEW - dot: . - naked_identifier: amount - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - definer_segment: keyword: DEFINER comparison_operator: raw_comparison_operator: '=' role_reference: keyword: CURRENT_USER - keyword: TRIGGER - trigger_reference: naked_identifier: ins_sum - keyword: BEFORE - keyword: INSERT - keyword: 'ON' - table_reference: naked_identifier: account - keyword: FOR - keyword: EACH - keyword: ROW - statement: set_statement: keyword: SET variable: '@sum' comparison_operator: raw_comparison_operator: '=' expression: variable: '@sum' binary_operator: + column_reference: - naked_identifier: NEW - dot: . - naked_identifier: amount - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: tr_downloads_i_copy_new_fields - keyword: BEFORE - keyword: INSERT - keyword: 'ON' - table_reference: naked_identifier: downloads - keyword: FOR - keyword: EACH - keyword: ROW - statement: transaction_statement: keyword: BEGIN statement: set_statement: - keyword: SET - keyword: NEW - dot: . - variable: createdate - comparison_operator: raw_comparison_operator: '=' - function: function_name: function_name_identifier: UNIX_TIMESTAMP function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: NEW - dot: . - naked_identifier: createdate_m end_bracket: ) - statement_terminator: ; - statement: transaction_statement: keyword: END - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/create_user.sql000066400000000000000000000065431503426445100241650ustar00rootroot00000000000000CREATE USER jeffrey; CREATE USER IF NOT EXISTS jeffrey; CREATE USER 'prj_svc' IDENTIFIED WITH AWSAuthenticationPlugin AS 'RDS'; CREATE USER 'jeffrey'@'localhost' IDENTIFIED BY 'password'; CREATE USER "jeffrey"@"localhost" IDENTIFIED BY "password"; CREATE USER `jeffrey`@`localhost` IDENTIFIED BY "password"; CREATE USER 'jeffrey'@'localhost' IDENTIFIED BY 'new_password' PASSWORD EXPIRE; CREATE USER 'jeffrey'@'localhost' IDENTIFIED WITH caching_sha2_password BY 'new_password' PASSWORD EXPIRE INTERVAL 180 DAY FAILED_LOGIN_ATTEMPTS 3 PASSWORD_LOCK_TIME 2; CREATE USER 'jeffrey'@'localhost' IDENTIFIED WITH mysql_native_password BY 'new_password1', 'jeanne'@'localhost' IDENTIFIED WITH caching_sha2_password BY 'new_password2' REQUIRE X509 WITH MAX_QUERIES_PER_HOUR 60 PASSWORD HISTORY 5 ACCOUNT LOCK; CREATE USER 'jeffrey'@'localhost' IDENTIFIED WITH mysql_native_password BY 'password'; CREATE USER 'u1'@'localhost' IDENTIFIED WITH caching_sha2_password BY 'sha2_password' AND IDENTIFIED WITH authentication_ldap_sasl AS 'uid=u1_ldap,ou=People,dc=example,dc=com'; CREATE USER 'u1'@'localhost' IDENTIFIED WITH caching_sha2_password BY 'sha2_password' AND IDENTIFIED WITH authentication_ldap_sasl AS 'uid=u1_ldap,ou=People,dc=example,dc=com' AND IDENTIFIED WITH authentication_fido; CREATE USER user IDENTIFIED WITH authentication_fido INITIAL AUTHENTICATION IDENTIFIED BY RANDOM PASSWORD; CREATE USER 'joe'@'10.0.0.1' DEFAULT ROLE administrator, developer; CREATE USER 'jeffrey'@'localhost' REQUIRE NONE; CREATE USER 'jeffrey'@'localhost' REQUIRE SSL; CREATE USER 'jeffrey'@'localhost' REQUIRE X509; CREATE USER 'jeffrey'@'localhost' REQUIRE ISSUER '/C=SE/ST=Stockholm/L=Stockholm/ O=MySQL/CN=CA/emailAddress=ca@example.com'; CREATE USER 'jeffrey'@'localhost' REQUIRE SUBJECT '/C=SE/ST=Stockholm/L=Stockholm/ O=MySQL demo client certificate/ CN=client/emailAddress=client@example.com'; CREATE USER 'jeffrey'@'localhost' REQUIRE CIPHER 'EDH-RSA-DES-CBC3-SHA'; CREATE USER 'jeffrey'@'localhost' REQUIRE SUBJECT '/C=SE/ST=Stockholm/L=Stockholm/ O=MySQL demo client certificate/ CN=client/emailAddress=client@example.com' AND ISSUER '/C=SE/ST=Stockholm/L=Stockholm/ O=MySQL/CN=CA/emailAddress=ca@example.com' AND CIPHER 'EDH-RSA-DES-CBC3-SHA'; CREATE USER 'jeffrey'@'localhost' WITH MAX_QUERIES_PER_HOUR 500 MAX_UPDATES_PER_HOUR 100; CREATE USER 'jeffrey'@'localhost' PASSWORD EXPIRE; CREATE USER 'jeffrey'@'localhost' PASSWORD EXPIRE DEFAULT; CREATE USER 'jeffrey'@'localhost' PASSWORD EXPIRE NEVER; CREATE USER 'jeffrey'@'localhost' PASSWORD EXPIRE INTERVAL 180 DAY; CREATE USER 'jeffrey'@'localhost' PASSWORD HISTORY DEFAULT; CREATE USER 'jeffrey'@'localhost' PASSWORD HISTORY 6; CREATE USER 'jeffrey'@'localhost' PASSWORD REUSE INTERVAL DEFAULT; CREATE USER 'jeffrey'@'localhost' PASSWORD REUSE INTERVAL 360 DAY; CREATE USER 'jeffrey'@'localhost' PASSWORD REQUIRE CURRENT; CREATE USER 'jeffrey'@'localhost' PASSWORD REQUIRE CURRENT OPTIONAL; CREATE USER 'jeffrey'@'localhost' PASSWORD REQUIRE CURRENT DEFAULT; CREATE USER 'jeffrey'@'localhost' FAILED_LOGIN_ATTEMPTS 4 PASSWORD_LOCK_TIME 2; CREATE USER 'jon'@'localhost' COMMENT 'Some information about Jon'; CREATE USER 'jim'@'localhost' ATTRIBUTE '{"fname": "James", "lname": "Scott", "phone": "123-456-7890"}'; sqlfluff-3.4.2/test/fixtures/dialects/mysql/create_user.yml000066400000000000000000000340421503426445100241620ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cfac964903c8d0523874124c9112fc8b2656dee2662863d82245c0c631d0b947 file: - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: jeffrey - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - keyword: IF - keyword: NOT - keyword: EXISTS - role_reference: naked_identifier: jeffrey - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: quoted_identifier: "'prj_svc'" - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: AWSAuthenticationPlugin - keyword: AS - quoted_literal: "'RDS'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: IDENTIFIED - keyword: BY - quoted_literal: "'password'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_literal: '"jeffrey"' - at_sign_literal: '@' - quoted_literal: '"localhost"' - keyword: IDENTIFIED - keyword: BY - quoted_literal: '"password"' - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: '`jeffrey`' - at_sign_literal: '@' - quoted_identifier: '`localhost`' - keyword: IDENTIFIED - keyword: BY - quoted_literal: '"password"' - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: IDENTIFIED - keyword: BY - quoted_literal: "'new_password'" - keyword: PASSWORD - keyword: EXPIRE - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: caching_sha2_password - keyword: BY - quoted_literal: "'new_password'" - keyword: PASSWORD - keyword: EXPIRE - keyword: INTERVAL - numeric_literal: '180' - keyword: DAY - keyword: FAILED_LOGIN_ATTEMPTS - numeric_literal: '3' - keyword: PASSWORD_LOCK_TIME - numeric_literal: '2' - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: mysql_native_password - keyword: BY - quoted_literal: "'new_password1'" - comma: ',' - role_reference: - quoted_identifier: "'jeanne'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: caching_sha2_password - keyword: BY - quoted_literal: "'new_password2'" - keyword: REQUIRE - keyword: X509 - keyword: WITH - keyword: MAX_QUERIES_PER_HOUR - numeric_literal: '60' - keyword: PASSWORD - keyword: HISTORY - numeric_literal: '5' - keyword: ACCOUNT - keyword: LOCK - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: mysql_native_password - keyword: BY - quoted_literal: "'password'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'u1'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: caching_sha2_password - keyword: BY - quoted_literal: "'sha2_password'" - keyword: AND - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: authentication_ldap_sasl - keyword: AS - quoted_literal: "'uid=u1_ldap,ou=People,dc=example,dc=com'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'u1'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: caching_sha2_password - keyword: BY - quoted_literal: "'sha2_password'" - keyword: AND - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: authentication_ldap_sasl - keyword: AS - quoted_literal: "'uid=u1_ldap,ou=People,dc=example,dc=com'" - keyword: AND - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: authentication_fido - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: user - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: authentication_fido - keyword: INITIAL - keyword: AUTHENTICATION - keyword: IDENTIFIED - keyword: BY - keyword: RANDOM - keyword: PASSWORD - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'joe'" - at_sign_literal: '@' - quoted_identifier: "'10.0.0.1'" - keyword: DEFAULT - keyword: ROLE - role_reference: naked_identifier: administrator - comma: ',' - role_reference: naked_identifier: developer - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: REQUIRE - keyword: NONE - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: REQUIRE - keyword: SSL - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: REQUIRE - keyword: X509 - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: REQUIRE - keyword: ISSUER - quoted_literal: "'/C=SE/ST=Stockholm/L=Stockholm/\n O=MySQL/CN=CA/emailAddress=ca@example.com'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: REQUIRE - keyword: SUBJECT - quoted_literal: "'/C=SE/ST=Stockholm/L=Stockholm/\n O=MySQL demo client certificate/\n\ \ CN=client/emailAddress=client@example.com'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: REQUIRE - keyword: CIPHER - quoted_literal: "'EDH-RSA-DES-CBC3-SHA'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: REQUIRE - keyword: SUBJECT - quoted_literal: "'/C=SE/ST=Stockholm/L=Stockholm/\n O=MySQL demo client certificate/\n\ \ CN=client/emailAddress=client@example.com'" - keyword: AND - keyword: ISSUER - quoted_literal: "'/C=SE/ST=Stockholm/L=Stockholm/\n O=MySQL/CN=CA/emailAddress=ca@example.com'" - keyword: AND - keyword: CIPHER - quoted_literal: "'EDH-RSA-DES-CBC3-SHA'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: WITH - keyword: MAX_QUERIES_PER_HOUR - numeric_literal: '500' - keyword: MAX_UPDATES_PER_HOUR - numeric_literal: '100' - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: EXPIRE - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: EXPIRE - keyword: DEFAULT - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: EXPIRE - keyword: NEVER - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: EXPIRE - keyword: INTERVAL - numeric_literal: '180' - keyword: DAY - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: HISTORY - keyword: DEFAULT - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: HISTORY - numeric_literal: '6' - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: REUSE - keyword: INTERVAL - keyword: DEFAULT - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: REUSE - keyword: INTERVAL - numeric_literal: '360' - keyword: DAY - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: REQUIRE - keyword: CURRENT - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: REQUIRE - keyword: CURRENT - keyword: OPTIONAL - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: REQUIRE - keyword: CURRENT - keyword: DEFAULT - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: FAILED_LOGIN_ATTEMPTS - numeric_literal: '4' - keyword: PASSWORD_LOCK_TIME - numeric_literal: '2' - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jon'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: COMMENT - quoted_literal: "'Some information about Jon'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jim'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: ATTRIBUTE - quoted_literal: "'{\"fname\": \"James\", \"lname\": \"Scott\", \"phone\": \"\ 123-456-7890\"}'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/create_view.sql000066400000000000000000000007251503426445100241550ustar00rootroot00000000000000CREATE VIEW v1 (c,d) AS SELECT a,b FROM t1; CREATE OR REPLACE VIEW v1 (c,d,e,f) AS SELECT a,b, a IN (SELECT a+2 FROM t1), a = all (SELECT a FROM t1) FROM t1; CREATE VIEW v2 AS SELECT a FROM t1 WITH CASCADED CHECK OPTION; CREATE VIEW v2 AS (SELECT a FROM t1) WITH CASCADED CHECK OPTION; CREATE VIEW v2 AS SELECT 1 UNION SELECT 2; CREATE VIEW vw_test AS WITH testing_cte as ( SELECT a , b FROM t1 ) SELECT a, b from testing_cte ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/create_view.yml000066400000000000000000000173061503426445100241620ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2fb2b7b92145a270d64b62a485c50214b9a7c2db798ad5427f73a153042c91a7 file: - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: v1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: c - comma: ',' - column_reference: naked_identifier: d - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: VIEW - table_reference: naked_identifier: v1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: c - comma: ',' - column_reference: naked_identifier: d - comma: ',' - column_reference: naked_identifier: e - comma: ',' - column_reference: naked_identifier: f - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: a keyword: IN bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: expression: column_reference: naked_identifier: a binary_operator: + numeric_literal: '2' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 end_bracket: ) - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: '=' function: function_name: function_name_identifier: all function_contents: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: v2 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - with_check_options: - keyword: WITH - keyword: CASCADED - keyword: CHECK - keyword: OPTION - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: v2 - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 end_bracket: ) - with_check_options: - keyword: WITH - keyword: CASCADED - keyword: CHECK - keyword: OPTION - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: v2 - keyword: AS - set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - set_operator: keyword: UNION - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '2' - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: vw_test - keyword: AS - with_compound_statement: keyword: WITH common_table_expression: naked_identifier: testing_cte keyword: as bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: testing_cte - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/deallocate_prepare.sql000066400000000000000000000000321503426445100254620ustar00rootroot00000000000000DEALLOCATE PREPARE dynam; sqlfluff-3.4.2/test/fixtures/dialects/mysql/deallocate_prepare.yml000066400000000000000000000010301503426445100254630ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 47c2b5531df3cb85f6eb69f699db36588afa339a5cde82a5058f928718671932 file: statement: deallocate_segment: - keyword: DEALLOCATE - keyword: PREPARE - naked_identifier: dynam statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/declare_condition.sql000066400000000000000000000000621503426445100253170ustar00rootroot00000000000000DECLARE random_condition_name CONDITION FOR 1051; sqlfluff-3.4.2/test/fixtures/dialects/mysql/declare_condition.yml000066400000000000000000000011271503426445100253240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7bdce909905e143f2896cccb3ca99f0b52c2f36ae44e5cbbbf19ab71e621efc1 file: statement: declare_statement: - keyword: DECLARE - naked_identifier: random_condition_name - keyword: CONDITION - keyword: FOR - numeric_literal: '1051' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/declare_continue_handler_sqlexception.sql000066400000000000000000000000771503426445100314560ustar00rootroot00000000000000DECLARE continue handler for sqlexception begin select 1; end; sqlfluff-3.4.2/test/fixtures/dialects/mysql/declare_continue_handler_sqlexception.yml000066400000000000000000000016311503426445100314550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0ef9e8623111d364c44fed8e8f3660ed551cf7158ff3dc87e02ab81cd98a02e0 file: - statement: declare_statement: - keyword: DECLARE - keyword: continue - keyword: handler - keyword: for - keyword: sqlexception - statement: transaction_statement: keyword: begin statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: end - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/declare_cursor.sql000066400000000000000000000000421503426445100246440ustar00rootroot00000000000000DECLARE test CURSOR FOR SELECT 1; sqlfluff-3.4.2/test/fixtures/dialects/mysql/declare_cursor.yml000066400000000000000000000013131503426445100246500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 55c9581409fbc9d064dd492a2842e4df7d4102077d475bb9f50a7a33906204cd file: statement: declare_statement: - keyword: DECLARE - naked_identifier: test - keyword: CURSOR - keyword: FOR - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/declare_default_numeric.sql000066400000000000000000000000331503426445100264750ustar00rootroot00000000000000DECLARE abc int default 1; sqlfluff-3.4.2/test/fixtures/dialects/mysql/declare_default_numeric.yml000066400000000000000000000011301503426445100264760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a302920845b75b85f450540a7283e49735d86a7878f913048cc3adf4efc5adfc file: statement: declare_statement: - keyword: DECLARE - variable: abc - data_type: data_type_identifier: int - keyword: default - numeric_literal: '1' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/declare_default_quotedliteral.sql000066400000000000000000000000451503426445100277140ustar00rootroot00000000000000DECLARE abc longtext default 'test'; sqlfluff-3.4.2/test/fixtures/dialects/mysql/declare_default_quotedliteral.yml000066400000000000000000000011411503426445100277140ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6d67b2c5474e3a36857b12ef0b488a847015db1c9b9e0f45ea154871bf736428 file: statement: declare_statement: - keyword: DECLARE - variable: abc - data_type: data_type_identifier: longtext - keyword: default - quoted_literal: "'test'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/declare_exit_handler_sqlexception.sql000066400000000000000000000000731503426445100305770ustar00rootroot00000000000000DECLARE exit handler for sqlexception begin select 1; end; sqlfluff-3.4.2/test/fixtures/dialects/mysql/declare_exit_handler_sqlexception.yml000066400000000000000000000016251503426445100306050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fa28b2deb8b2ba8bbc00b29368ae51e09b2c142b0654a70ae093cb9e37d507b0 file: - statement: declare_statement: - keyword: DECLARE - keyword: exit - keyword: handler - keyword: for - keyword: sqlexception - statement: transaction_statement: keyword: begin statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: end - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/declare_local_variable.sql000066400000000000000000000000211503426445100262630ustar00rootroot00000000000000DECLARE abc int; sqlfluff-3.4.2/test/fixtures/dialects/mysql/declare_local_variable.yml000066400000000000000000000010461503426445100262750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b9f8d9b37c576c621698357759133c61224aedf19208cfaa65ef8ac6ca395b35 file: statement: declare_statement: keyword: DECLARE variable: abc data_type: data_type_identifier: int statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/declare_undo_handler_sqlexception.sql000066400000000000000000000000731503426445100305730ustar00rootroot00000000000000DECLARE undo handler for sqlexception begin select 1; end; sqlfluff-3.4.2/test/fixtures/dialects/mysql/declare_undo_handler_sqlexception.yml000066400000000000000000000016251503426445100306010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9ec056f47e16db2062f9cfe150451dd9dc8bb603e0c028f404f7b7cd77a7ee3a file: - statement: declare_statement: - keyword: DECLARE - keyword: undo - keyword: handler - keyword: for - keyword: sqlexception - statement: transaction_statement: keyword: begin statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: end - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/delete_multitable.sql000066400000000000000000000017721503426445100253470ustar00rootroot00000000000000DELETE a FROM a JOIN b USING (id) WHERE b.name = 'example'; DELETE FROM somelog WHERE user = 'jcole' ORDER BY timestamp_column LIMIT 1; DELETE LOW_PRIORITY QUICK IGNORE a FROM a JOIN b USING (id) WHERE b.name = 'example'; DELETE FROM a PARTITION (p) WHERE b.name = 'example'; -- Multiple-Table Syntax 1 DELETE t1, t2 FROM t1 INNER JOIN t2 INNER JOIN t3 WHERE t1.id=t2.id AND t2.id=t3.id; DELETE LOW_PRIORITY QUICK IGNORE t1, t2 FROM t1 INNER JOIN t2 INNER JOIN t3 WHERE t1.id=t2.id AND t2.id=t3.id; -- Multiple-Table Syntax 2 DELETE FROM t1, t2 USING t1 INNER JOIN t2 INNER JOIN t3 WHERE t1.id=t2.id AND t2.id=t3.id; DELETE LOW_PRIORITY QUICK IGNORE FROM t1, t2 USING t1 INNER JOIN t2 INNER JOIN t3 WHERE t1.id=t2.id AND t2.id=t3.id; DELETE a1, a2 FROM t1 AS a1 INNER JOIN t2 AS a2 WHERE a1.id=a2.id; -- .* after table name DELETE t1.*, t2.* FROM t1 INNER JOIN t2 INNER JOIN t3 WHERE t1.id=t2.id AND t2.id=t3.id; DELETE FROM t1.*, t2.* USING t1 INNER JOIN t2 INNER JOIN t3 WHERE t1.id=t2.id AND t2.id=t3.id; sqlfluff-3.4.2/test/fixtures/dialects/mysql/delete_multitable.yml000066400000000000000000000353531503426445100253530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3dea0ed0320789e1e32f39116489553f21b5f9bdd65386036c72ea39db7b7aed file: - statement: delete_statement: keyword: DELETE delete_target_table: table_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: a join_clause: - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: b - keyword: USING - bracketed: start_bracket: ( naked_identifier: id end_bracket: ) where_clause: keyword: WHERE expression: column_reference: - naked_identifier: b - dot: . - naked_identifier: name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'example'" - statement_terminator: ; - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: somelog where_clause: keyword: WHERE expression: column_reference: naked_identifier: user comparison_operator: raw_comparison_operator: '=' quoted_literal: "'jcole'" orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: timestamp_column limit_clause: keyword: LIMIT numeric_literal: '1' - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: LOW_PRIORITY - keyword: QUICK - keyword: IGNORE - delete_target_table: table_reference: naked_identifier: a - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: a join_clause: - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: b - keyword: USING - bracketed: start_bracket: ( naked_identifier: id end_bracket: ) - where_clause: keyword: WHERE expression: column_reference: - naked_identifier: b - dot: . - naked_identifier: name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'example'" - statement_terminator: ; - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: a partition_clause: keyword: PARTITION bracketed: start_bracket: ( object_reference: naked_identifier: p end_bracket: ) where_clause: keyword: WHERE expression: column_reference: - naked_identifier: b - dot: . - naked_identifier: name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'example'" - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - delete_target_table: table_reference: naked_identifier: t1 - comma: ',' - delete_target_table: table_reference: naked_identifier: t2 - from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: t1 - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t2 - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t3 - where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id - binary_operator: AND - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t3 - dot: . - naked_identifier: id - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: LOW_PRIORITY - keyword: QUICK - keyword: IGNORE - delete_target_table: table_reference: naked_identifier: t1 - comma: ',' - delete_target_table: table_reference: naked_identifier: t2 - from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: t1 - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t2 - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t3 - where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id - binary_operator: AND - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t3 - dot: . - naked_identifier: id - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - delete_target_table: table_reference: naked_identifier: t1 - comma: ',' - delete_target_table: table_reference: naked_identifier: t2 - using_clause: keyword: USING from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: t1 - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t2 - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t3 - where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id - binary_operator: AND - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t3 - dot: . - naked_identifier: id - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: LOW_PRIORITY - keyword: QUICK - keyword: IGNORE - keyword: FROM - delete_target_table: table_reference: naked_identifier: t1 - comma: ',' - delete_target_table: table_reference: naked_identifier: t2 - using_clause: keyword: USING from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: t1 - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t2 - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t3 - where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id - binary_operator: AND - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t3 - dot: . - naked_identifier: id - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - delete_target_table: table_reference: naked_identifier: a1 - comma: ',' - delete_target_table: table_reference: naked_identifier: a2 - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 alias_expression: alias_operator: keyword: AS naked_identifier: a1 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t2 alias_expression: alias_operator: keyword: AS naked_identifier: a2 - where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: a1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: a2 - dot: . - naked_identifier: id - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - delete_target_table: table_reference: naked_identifier: t1 dot: . star: '*' - comma: ',' - delete_target_table: table_reference: naked_identifier: t2 dot: . star: '*' - from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: t1 - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t2 - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t3 - where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id - binary_operator: AND - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t3 - dot: . - naked_identifier: id - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - delete_target_table: table_reference: naked_identifier: t1 dot: . star: '*' - comma: ',' - delete_target_table: table_reference: naked_identifier: t2 dot: . star: '*' - using_clause: keyword: USING from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: t1 - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t2 - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t3 - where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id - binary_operator: AND - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t3 - dot: . - naked_identifier: id - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/delimiter_function.sql000066400000000000000000000002371503426445100255410ustar00rootroot00000000000000DELIMITER ~ CREATE FUNCTION `add`(test int) RETURNS longtext DETERMINISTIC LANGUAGE SQL CONTAINS SQL SQL SECURITY DEFINER BEGIN SELECT 1 + 2; END~ DELIMITER ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/delimiter_function.yml000066400000000000000000000032611503426445100255430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e86cfba356795fe4599cef806bd5c5c782881b14785deacab9bef35c96bef07b file: - statement: delimiter_statement: keyword: DELIMITER - statement_terminator: '~' - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: quoted_identifier: '`add`' - function_parameter_list: bracketed: start_bracket: ( parameter: test data_type: data_type_identifier: int end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: longtext - characteristic_statement: - keyword: DETERMINISTIC - keyword: LANGUAGE - keyword: SQL - keyword: CONTAINS - keyword: SQL - keyword: SQL - keyword: SECURITY - keyword: DEFINER - function_definition: transaction_statement: keyword: BEGIN statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - statement_terminator: ; - statement: transaction_statement: keyword: END - statement_terminator: '~' - statement: delimiter_statement: keyword: DELIMITER - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/delimiter_procedure.sql000066400000000000000000000002311503426445100256760ustar00rootroot00000000000000DELIMITER ~ CREATE PROCEDURE `testprocedure`(test int) DETERMINISTIC LANGUAGE SQL CONTAINS SQL SQL SECURITY DEFINER BEGIN SELECT 1 + 2; END~ DELIMITER ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/delimiter_procedure.yml000066400000000000000000000031571503426445100257120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f8885557a4ee5d4d90b6fc6f07983158a33d5c9eaadcf2cdcae85be72280973c file: - statement: delimiter_statement: keyword: DELIMITER - statement_terminator: '~' - statement: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - function_name: quoted_identifier: '`testprocedure`' - procedure_parameter_list: bracketed: start_bracket: ( parameter: test data_type: data_type_identifier: int end_bracket: ) - characteristic_statement: - keyword: DETERMINISTIC - keyword: LANGUAGE - keyword: SQL - keyword: CONTAINS - keyword: SQL - keyword: SQL - keyword: SECURITY - keyword: DEFINER - function_definition: transaction_statement: keyword: BEGIN statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - statement_terminator: ; - statement: transaction_statement: keyword: END - statement_terminator: '~' - statement: delimiter_statement: keyword: DELIMITER - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/delimiter_select.sql000066400000000000000000000000421503426445100251650ustar00rootroot00000000000000DELIMITER ~ SELECT 1~ DELIMITER ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/delimiter_select.yml000066400000000000000000000013501503426445100251720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a7b5e6298ad0f76791911956fe2dcb7762a8483f6dd646d30160c4e0c39a9b2e file: - statement: delimiter_statement: keyword: DELIMITER - statement_terminator: '~' - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - statement_terminator: '~' - statement: delimiter_statement: keyword: DELIMITER - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/drop_event.sql000066400000000000000000000000551503426445100240210ustar00rootroot00000000000000DROP EVENT test; DROP EVENT IF EXISTS test; sqlfluff-3.4.2/test/fixtures/dialects/mysql/drop_event.yml000066400000000000000000000013641503426445100240270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c51ea105f48aeac608b37754da7a991b2353072011554a12d4dcc01e2b4a04c4 file: - statement: drop_event_statement: - keyword: DROP - keyword: EVENT - object_reference: naked_identifier: test - statement_terminator: ; - statement: drop_event_statement: - keyword: DROP - keyword: EVENT - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: test - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/drop_function.sql000066400000000000000000000000501503426445100245200ustar00rootroot00000000000000DROP FUNCTION IF EXISTS `testfunction`; sqlfluff-3.4.2/test/fixtures/dialects/mysql/drop_function.yml000066400000000000000000000011431503426445100245260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 36f8ff2a93f64a4996cc8a997d1048029f86ffcd5114b0db81ca5b4c984860bf file: statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - keyword: IF - keyword: EXISTS - function_name: quoted_identifier: '`testfunction`' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/drop_index.sql000066400000000000000000000000451503426445100240060ustar00rootroot00000000000000DROP INDEX `test` ON `table1`.`foo`; sqlfluff-3.4.2/test/fixtures/dialects/mysql/drop_index.yml000066400000000000000000000012621503426445100240120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 55a1c03846735c33fdff186577506b3f00b63f73d930c2bf8974f73d4d0f7cea file: statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: - quoted_identifier: '`table1`' - dot: . - quoted_identifier: '`foo`' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/drop_index_with_algorithm.sql000066400000000000000000000005121503426445100271060ustar00rootroot00000000000000DROP INDEX `test` ON `table1`.`foo` ALGORITHM = DEFAULT; DROP INDEX `test` ON `table1`.`foo` ALGORITHM = INPLACE; DROP INDEX `test` ON `table1`.`foo` ALGORITHM = COPY; DROP INDEX `test` ON `table1`.`foo` ALGORITHM DEFAULT; DROP INDEX `test` ON `table1`.`foo` ALGORITHM INPLACE; DROP INDEX `test` ON `table1`.`foo` ALGORITHM COPY; sqlfluff-3.4.2/test/fixtures/dialects/mysql/drop_index_with_algorithm.yml000066400000000000000000000051241503426445100271140ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2f5fbf257ef1ab0490d88f2c0a9840283f4ea5595471e662572591e7fb25dd6b file: - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: - quoted_identifier: '`table1`' - dot: . - quoted_identifier: '`foo`' - keyword: ALGORITHM - comparison_operator: raw_comparison_operator: '=' - keyword: DEFAULT - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: - quoted_identifier: '`table1`' - dot: . - quoted_identifier: '`foo`' - keyword: ALGORITHM - comparison_operator: raw_comparison_operator: '=' - keyword: INPLACE - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: - quoted_identifier: '`table1`' - dot: . - quoted_identifier: '`foo`' - keyword: ALGORITHM - comparison_operator: raw_comparison_operator: '=' - keyword: COPY - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: - quoted_identifier: '`table1`' - dot: . - quoted_identifier: '`foo`' - keyword: ALGORITHM - keyword: DEFAULT - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: - quoted_identifier: '`table1`' - dot: . - quoted_identifier: '`foo`' - keyword: ALGORITHM - keyword: INPLACE - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: - quoted_identifier: '`table1`' - dot: . - quoted_identifier: '`foo`' - keyword: ALGORITHM - keyword: COPY - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/drop_index_with_lock.sql000066400000000000000000000005441503426445100260550ustar00rootroot00000000000000DROP INDEX `test` ON `table1` LOCK = DEFAULT; DROP INDEX `test` ON `table1` LOCK = NONE; DROP INDEX `test` ON `table1` LOCK = SHARED; DROP INDEX `test` ON `table1` LOCK = EXCLUSIVE; DROP INDEX `test` ON `table1` LOCK DEFAULT; DROP INDEX `test` ON `table1` LOCK NONE; DROP INDEX `test` ON `table1` LOCK SHARED; DROP INDEX `test` ON `table1` LOCK EXCLUSIVE; sqlfluff-3.4.2/test/fixtures/dialects/mysql/drop_index_with_lock.yml000066400000000000000000000056161503426445100260640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0c91a2276d5d611e727906a9c2b014e808dc5fb97fe6f238268b7fed429adb32 file: - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: quoted_identifier: '`table1`' - keyword: LOCK - comparison_operator: raw_comparison_operator: '=' - keyword: DEFAULT - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: quoted_identifier: '`table1`' - keyword: LOCK - comparison_operator: raw_comparison_operator: '=' - keyword: NONE - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: quoted_identifier: '`table1`' - keyword: LOCK - comparison_operator: raw_comparison_operator: '=' - keyword: SHARED - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: quoted_identifier: '`table1`' - keyword: LOCK - comparison_operator: raw_comparison_operator: '=' - keyword: EXCLUSIVE - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: quoted_identifier: '`table1`' - keyword: LOCK - keyword: DEFAULT - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: quoted_identifier: '`table1`' - keyword: LOCK - keyword: NONE - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: quoted_identifier: '`table1`' - keyword: LOCK - keyword: SHARED - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: quoted_identifier: '`table1`' - keyword: LOCK - keyword: EXCLUSIVE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/drop_prepare.sql000066400000000000000000000000241503426445100243320ustar00rootroot00000000000000DROP PREPARE dynam; sqlfluff-3.4.2/test/fixtures/dialects/mysql/drop_prepare.yml000066400000000000000000000010221503426445100243330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6fb0ffbafdc168dc91f61b1dc45a6ae2bb23726da0391e6a2bb85fb0c72e481d file: statement: deallocate_segment: - keyword: DROP - keyword: PREPARE - naked_identifier: dynam statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/drop_procedure.sql000066400000000000000000000000521503426445100246650ustar00rootroot00000000000000DROP PROCEDURE IF EXISTS `testprocedure`; sqlfluff-3.4.2/test/fixtures/dialects/mysql/drop_procedure.yml000066400000000000000000000011511503426445100246700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6226dad29f08fa6bc945f7ffbd3ee824cfb9513fec02f752efccc635d8bfc0e6 file: statement: drop_procedure_statement: - keyword: DROP - keyword: PROCEDURE - keyword: IF - keyword: EXISTS - object_reference: quoted_identifier: '`testprocedure`' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/drop_table.sql000066400000000000000000000002141503426445100237640ustar00rootroot00000000000000DROP TEMPORARY TABLE IF EXISTS t; DROP TEMPORARY TABLE IF EXISTS t, t2; DROP TABLE IF EXISTS t RESTRICT; DROP TABLE IF EXISTS t CASCADE; sqlfluff-3.4.2/test/fixtures/dialects/mysql/drop_table.yml000066400000000000000000000025041503426445100237720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 079d162da6241fd53a7106c843d2c5b885c9fa93adf3e4e1fedc73f706bc00c4 file: - statement: drop_table_statement: - keyword: DROP - keyword: TEMPORARY - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: t - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TEMPORARY - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: t - comma: ',' - table_reference: naked_identifier: t2 - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: t - keyword: RESTRICT - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: t - keyword: CASCADE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/drop_trigger.sql000066400000000000000000000001511503426445100243400ustar00rootroot00000000000000DROP TRIGGER trigger_name; DROP TRIGGER schema_name.trigger_name; DROP TRIGGER IF EXISTS trigger_name; sqlfluff-3.4.2/test/fixtures/dialects/mysql/drop_trigger.yml000066400000000000000000000017231503426445100243500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3ea2814154a2f377dfa41e981612ca02ec575bbb9384d4356a6f87d69d8367df file: - statement: drop_trigger: - keyword: DROP - keyword: TRIGGER - trigger_reference: naked_identifier: trigger_name - statement_terminator: ; - statement: drop_trigger: - keyword: DROP - keyword: TRIGGER - trigger_reference: - naked_identifier: schema_name - dot: . - naked_identifier: trigger_name - statement_terminator: ; - statement: drop_trigger: - keyword: DROP - keyword: TRIGGER - keyword: IF - keyword: EXISTS - trigger_reference: naked_identifier: trigger_name - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/execute_prepared_stmt.sql000066400000000000000000000000161503426445100262440ustar00rootroot00000000000000execute test; sqlfluff-3.4.2/test/fixtures/dialects/mysql/execute_prepared_stmt.yml000066400000000000000000000007721503426445100262570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 645d3d2f6b6a99f0b56f9e61558eddbd807982b88dd4b5a85926d08ad6fde973 file: statement: execute_segment: keyword: execute naked_identifier: test statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/execute_prepared_stmt_using.sql000066400000000000000000000000321503426445100274470ustar00rootroot00000000000000execute test using @test; sqlfluff-3.4.2/test/fixtures/dialects/mysql/execute_prepared_stmt_using.yml000066400000000000000000000010471503426445100274600ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c84c2b918a06a90ade5b2628eaf69ac7ee1220e235e947be52d9663ea1717e60 file: statement: execute_segment: - keyword: execute - naked_identifier: test - keyword: using - variable: '@test' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/execute_prepared_stmt_using_multiple_variable.sql000066400000000000000000000000421503426445100332300ustar00rootroot00000000000000execute test using @test, @test1; sqlfluff-3.4.2/test/fixtures/dialects/mysql/execute_prepared_stmt_using_multiple_variable.yml000066400000000000000000000011211503426445100332310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e883899cefef9dcaa1775b3ebae0ba07aa11a5b9fb3c10843c74b452f7e9921c file: statement: execute_segment: - keyword: execute - naked_identifier: test - keyword: using - variable: '@test' - comma: ',' - variable: '@test1' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/explain.sql000066400000000000000000000002271503426445100233150ustar00rootroot00000000000000explain select 1; explain update tbl set foo = 1 where bar = 2; explain delete from tbl where foo = 1; explain insert into tbl (col1) values (123); sqlfluff-3.4.2/test/fixtures/dialects/mysql/explain.yml000066400000000000000000000045331503426445100233230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 77b5d16c2cddca62f5fb8d333728260a2f33be4f0e46cecfdc10d68bf5e4d542 file: - statement: explain_statement: keyword: explain select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: explain_statement: keyword: explain update_statement: keyword: update table_reference: naked_identifier: tbl set_clause_list: keyword: set set_clause: column_reference: naked_identifier: foo comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' where_clause: keyword: where expression: column_reference: naked_identifier: bar comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - statement_terminator: ; - statement: explain_statement: keyword: explain delete_statement: keyword: delete from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl where_clause: keyword: where expression: column_reference: naked_identifier: foo comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: explain_statement: keyword: explain insert_statement: - keyword: insert - keyword: into - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - values_clause: keyword: values bracketed: start_bracket: ( numeric_literal: '123' end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/fetch.sql000066400000000000000000000000331503426445100227410ustar00rootroot00000000000000fetch curcursor into test; sqlfluff-3.4.2/test/fixtures/dialects/mysql/fetch.yml000066400000000000000000000010531503426445100227460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ffb648d6082513210891e9d343ab35d75b56ddc97fee1356ec805b821eb52fbc file: statement: cursor_fetch_segment: - keyword: fetch - naked_identifier: curcursor - keyword: into - variable: test statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/fetch_from.sql000066400000000000000000000000401503426445100237620ustar00rootroot00000000000000fetch from curcursor into test; sqlfluff-3.4.2/test/fixtures/dialects/mysql/fetch_from.yml000066400000000000000000000010771503426445100237770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ac2545f40f18314609db52b54bcf5af85eeb4a464c012915e157261c76d0acdb file: statement: cursor_fetch_segment: - keyword: fetch - keyword: from - naked_identifier: curcursor - keyword: into - variable: test statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/fetch_multiple.sql000066400000000000000000000000421503426445100246540ustar00rootroot00000000000000fetch curcursor into test, test2; sqlfluff-3.4.2/test/fixtures/dialects/mysql/fetch_multiple.yml000066400000000000000000000011221503426445100246560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b8f2e64f6c4bfeb9e324e9f8fbf9ab2024c805ba70341e9763ca33556c367cbc file: statement: cursor_fetch_segment: - keyword: fetch - naked_identifier: curcursor - keyword: into - variable: test - comma: ',' - variable: test2 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/fetch_next_from.sql000066400000000000000000000000451503426445100250250ustar00rootroot00000000000000fetch next from curcursor into test; sqlfluff-3.4.2/test/fixtures/dialects/mysql/fetch_next_from.yml000066400000000000000000000011231503426445100250250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5799de98d4fbb26b52d336da9b39a4a58ecbce7623c1a8aefa4ccf02ba3ec0bc file: statement: cursor_fetch_segment: - keyword: fetch - keyword: next - keyword: from - naked_identifier: curcursor - keyword: into - variable: test statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/fetch_session.sql000066400000000000000000000000341503426445100245050ustar00rootroot00000000000000fetch curcursor into @test; sqlfluff-3.4.2/test/fixtures/dialects/mysql/fetch_session.yml000066400000000000000000000010561503426445100245140ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0282fb848f6361a6299ff7b64ab47fb43966975f758c1e30e5ed1f55e66ee7ff file: statement: cursor_fetch_segment: - keyword: fetch - naked_identifier: curcursor - keyword: into - variable: '@test' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/fetch_session_multiple.sql000066400000000000000000000000441503426445100264210ustar00rootroot00000000000000fetch curcursor into @test, @test2; sqlfluff-3.4.2/test/fixtures/dialects/mysql/fetch_session_multiple.yml000066400000000000000000000011301503426445100264200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b21f217493fdc862ebf564013c0b16d8d4139163bc5a9910cb3f586f45c04114 file: statement: cursor_fetch_segment: - keyword: fetch - naked_identifier: curcursor - keyword: into - variable: '@test' - comma: ',' - variable: '@test2' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/flush.sql000066400000000000000000000006411503426445100227760ustar00rootroot00000000000000FLUSH LOGS; FLUSH NO_WRITE_TO_BINLOG BINARY LOGS, ENGINE LOGS, ERROR LOGS, GENERAL LOGS, HOSTS; FLUSH LOCAL PRIVILEGES, OPTIMIZER_COSTS, RELAY LOGS, SLOW LOGS, STATUS, USER_RESOURCES; FLUSH RELAY LOGS FOR CHANNEL my_channel; FLUSH TABLES; FLUSH TABLES WITH READ LOCK; FLUSH TABLES table1; FLUSH TABLES table1, `foo`.`bar`; FLUSH TABLES table1, `foo`.`bar` WITH READ LOCK; FLUSH TABLES table1, `foo`.`bar` FOR EXPORT; sqlfluff-3.4.2/test/fixtures/dialects/mysql/flush.yml000066400000000000000000000054371503426445100230100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 19d48e39bc87b05358abcf06a2d2edd289d9e249da43532890fd301fe03d83df file: - statement: flush_statement: - keyword: FLUSH - keyword: LOGS - statement_terminator: ; - statement: flush_statement: - keyword: FLUSH - keyword: NO_WRITE_TO_BINLOG - keyword: BINARY - keyword: LOGS - comma: ',' - keyword: ENGINE - keyword: LOGS - comma: ',' - keyword: ERROR - keyword: LOGS - comma: ',' - keyword: GENERAL - keyword: LOGS - comma: ',' - keyword: HOSTS - statement_terminator: ; - statement: flush_statement: - keyword: FLUSH - keyword: LOCAL - keyword: PRIVILEGES - comma: ',' - keyword: OPTIMIZER_COSTS - comma: ',' - keyword: RELAY - keyword: LOGS - comma: ',' - keyword: SLOW - keyword: LOGS - comma: ',' - keyword: STATUS - comma: ',' - keyword: USER_RESOURCES - statement_terminator: ; - statement: flush_statement: - keyword: FLUSH - keyword: RELAY - keyword: LOGS - keyword: FOR - keyword: CHANNEL - object_reference: naked_identifier: my_channel - statement_terminator: ; - statement: flush_statement: - keyword: FLUSH - keyword: TABLES - statement_terminator: ; - statement: flush_statement: - keyword: FLUSH - keyword: TABLES - keyword: WITH - keyword: READ - keyword: LOCK - statement_terminator: ; - statement: flush_statement: - keyword: FLUSH - keyword: TABLES - table_reference: naked_identifier: table1 - statement_terminator: ; - statement: flush_statement: - keyword: FLUSH - keyword: TABLES - table_reference: naked_identifier: table1 - comma: ',' - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - statement_terminator: ; - statement: flush_statement: - keyword: FLUSH - keyword: TABLES - table_reference: naked_identifier: table1 - comma: ',' - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - keyword: WITH - keyword: READ - keyword: LOCK - statement_terminator: ; - statement: flush_statement: - keyword: FLUSH - keyword: TABLES - table_reference: naked_identifier: table1 - comma: ',' - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - keyword: FOR - keyword: EXPORT - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/force_index.sql000066400000000000000000000000601503426445100241350ustar00rootroot00000000000000SELECT * FROM onetable FORCE INDEX (idx_index); sqlfluff-3.4.2/test/fixtures/dialects/mysql/force_index.yml000066400000000000000000000020121503426445100241360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 76f81441b5f5b820ccbdc7248648433a05eac5d802017015e52e7d2adbfe9444 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: onetable index_hint_clause: - keyword: FORCE - keyword: INDEX - bracketed: start_bracket: ( object_reference: naked_identifier: idx_index end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/force_index_for_group_by.sql000066400000000000000000000000661503426445100267170ustar00rootroot00000000000000SELECT * FROM onetable FORCE INDEX FOR GROUP BY (i2); sqlfluff-3.4.2/test/fixtures/dialects/mysql/force_index_for_group_by.yml000066400000000000000000000021031503426445100267130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 87773b30b37f699859714e573179e58c6f5dafb6191d5538e681e5831418af67 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: onetable index_hint_clause: - keyword: FORCE - keyword: INDEX - keyword: FOR - keyword: GROUP - keyword: BY - bracketed: start_bracket: ( object_reference: naked_identifier: i2 end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/force_index_for_join.sql000066400000000000000000000000621503426445100260240ustar00rootroot00000000000000SELECT * FROM onetable FORCE INDEX FOR JOIN (i2); sqlfluff-3.4.2/test/fixtures/dialects/mysql/force_index_for_join.yml000066400000000000000000000020561503426445100260330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b6b94cfaf91b378bf96312ae5b473616021fb82e21522046a4e9543d1191e027 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: onetable index_hint_clause: - keyword: FORCE - keyword: INDEX - keyword: FOR - keyword: JOIN - bracketed: start_bracket: ( object_reference: naked_identifier: i2 end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/force_index_for_order_by.sql000066400000000000000000000000661503426445100266760ustar00rootroot00000000000000SELECT * FROM onetable FORCE INDEX FOR ORDER BY (i2); sqlfluff-3.4.2/test/fixtures/dialects/mysql/force_index_for_order_by.yml000066400000000000000000000021031503426445100266720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d0c09a9cf14085e00ca694807253593f891828a1f101948491a5ad7083d4546e file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: onetable index_hint_clause: - keyword: FORCE - keyword: INDEX - keyword: FOR - keyword: ORDER - keyword: BY - bracketed: start_bracket: ( object_reference: naked_identifier: i2 end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/force_index_left_join.sql000066400000000000000000000001671503426445100261760ustar00rootroot00000000000000SELECT onetable.f1, twotable.f1 FROM onetable left join twotable FORCE INDEX (idx_index) on onetable.f1 = twotable.f1; sqlfluff-3.4.2/test/fixtures/dialects/mysql/force_index_left_join.yml000066400000000000000000000035031503426445100261750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7156f479a87ebed7b8e34b4cd7016aadd3dab7e0bd6b0ad14bc7d4795fd2de90 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: onetable - dot: . - naked_identifier: f1 - comma: ',' - select_clause_element: column_reference: - naked_identifier: twotable - dot: . - naked_identifier: f1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: onetable join_clause: - keyword: left - keyword: join - from_expression_element: table_expression: table_reference: naked_identifier: twotable index_hint_clause: - keyword: FORCE - keyword: INDEX - bracketed: start_bracket: ( object_reference: naked_identifier: idx_index end_bracket: ) - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: onetable - dot: . - naked_identifier: f1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: twotable - dot: . - naked_identifier: f1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/force_key.sql000066400000000000000000000000561503426445100236230ustar00rootroot00000000000000SELECT * FROM onetable FORCE KEY (idx_index); sqlfluff-3.4.2/test/fixtures/dialects/mysql/force_key.yml000066400000000000000000000020101503426445100236150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4141b5f48dff30ccb10e63d5fffe11c7ba21cecf1f39f273af9cd2de9e2ef10f file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: onetable index_hint_clause: - keyword: FORCE - keyword: KEY - bracketed: start_bracket: ( object_reference: naked_identifier: idx_index end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/function_comment.sql000066400000000000000000000001571503426445100252260ustar00rootroot00000000000000CREATE FUNCTION `testfunction`(var1 int) RETURNS longtext COMMENT 'this is a comment' DETERMINISTIC BEGIN END~ sqlfluff-3.4.2/test/fixtures/dialects/mysql/function_comment.yml000066400000000000000000000022151503426445100252250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f07d63ae8143264331610358ea3a6e91b3340a65e311f82bffdc8d05dc5f1438 file: statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: quoted_identifier: '`testfunction`' - function_parameter_list: bracketed: start_bracket: ( parameter: var1 data_type: data_type_identifier: int end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: longtext - comment_clause: keyword: COMMENT quoted_literal: "'this is a comment'" - characteristic_statement: keyword: DETERMINISTIC - function_definition: transaction_statement: keyword: BEGIN statement: transaction_statement: keyword: END statement_terminator: '~' sqlfluff-3.4.2/test/fixtures/dialects/mysql/function_definer.sql000066400000000000000000000001501503426445100251710ustar00rootroot00000000000000CREATE DEFINER=`test`@`%` FUNCTION `testfunction`() RETURNS longtext DETERMINISTIC BEGIN SELECT 1; END~ sqlfluff-3.4.2/test/fixtures/dialects/mysql/function_definer.yml000066400000000000000000000026171503426445100252050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 63402a8d737600b4ceec102c8982aca0423da4d2e24e7cd7893a8d394c15091c file: - statement: create_function_statement: - keyword: CREATE - definer_segment: keyword: DEFINER comparison_operator: raw_comparison_operator: '=' role_reference: - quoted_identifier: '`test`' - at_sign_literal: '@' - quoted_identifier: '`%`' - keyword: FUNCTION - function_name: quoted_identifier: '`testfunction`' - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: longtext - characteristic_statement: keyword: DETERMINISTIC - function_definition: transaction_statement: keyword: BEGIN statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: END - statement_terminator: '~' sqlfluff-3.4.2/test/fixtures/dialects/mysql/function_modifies_sql.sql000066400000000000000000000001521503426445100262350ustar00rootroot00000000000000CREATE FUNCTION `add`(test int) RETURNS longtext DETERMINISTIC MODIFIES SQL DATA BEGIN SELECT 1 + 2; END~ sqlfluff-3.4.2/test/fixtures/dialects/mysql/function_modifies_sql.yml000066400000000000000000000026341503426445100262460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c950f8fdbb6c66257b3ecd2a33e1db18ef6b958973246fbc0e17486c6e30758e file: - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: quoted_identifier: '`add`' - function_parameter_list: bracketed: start_bracket: ( parameter: test data_type: data_type_identifier: int end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: longtext - characteristic_statement: - keyword: DETERMINISTIC - keyword: MODIFIES - keyword: SQL - keyword: DATA - function_definition: transaction_statement: keyword: BEGIN statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - statement_terminator: ; - statement: transaction_statement: keyword: END - statement_terminator: '~' sqlfluff-3.4.2/test/fixtures/dialects/mysql/function_no_sql.sql000066400000000000000000000001371503426445100250550ustar00rootroot00000000000000CREATE FUNCTION `add`(test int) RETURNS longtext DETERMINISTIC NO SQL BEGIN SELECT 1 + 2; END~ sqlfluff-3.4.2/test/fixtures/dialects/mysql/function_no_sql.yml000066400000000000000000000026021503426445100250560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7cf6b2f6629057890ce653e7f421bdab2d9b541afed3344da33927c25e3f2d4e file: - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: quoted_identifier: '`add`' - function_parameter_list: bracketed: start_bracket: ( parameter: test data_type: data_type_identifier: int end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: longtext - characteristic_statement: - keyword: DETERMINISTIC - keyword: 'NO' - keyword: SQL - function_definition: transaction_statement: keyword: BEGIN statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - statement_terminator: ; - statement: transaction_statement: keyword: END - statement_terminator: '~' sqlfluff-3.4.2/test/fixtures/dialects/mysql/function_notdeterministic.sql000066400000000000000000000001341503426445100271430ustar00rootroot00000000000000CREATE FUNCTION `add`(test int) RETURNS longtext NOT DETERMINISTIC BEGIN SELECT 1 + 2; END~ sqlfluff-3.4.2/test/fixtures/dialects/mysql/function_notdeterministic.yml000066400000000000000000000025541503426445100271550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8a419b4154858e8c36166a450bc605ade73c8c8bd9cf192c2581831c5ba5213d file: - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: quoted_identifier: '`add`' - function_parameter_list: bracketed: start_bracket: ( parameter: test data_type: data_type_identifier: int end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: longtext - characteristic_statement: - keyword: NOT - keyword: DETERMINISTIC - function_definition: transaction_statement: keyword: BEGIN statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - statement_terminator: ; - statement: transaction_statement: keyword: END - statement_terminator: '~' sqlfluff-3.4.2/test/fixtures/dialects/mysql/function_reads_sql.sql000066400000000000000000000001471503426445100255400ustar00rootroot00000000000000CREATE FUNCTION `add`(test int) RETURNS longtext DETERMINISTIC READS SQL DATA BEGIN SELECT 1 + 2; END~ sqlfluff-3.4.2/test/fixtures/dialects/mysql/function_reads_sql.yml000066400000000000000000000026311503426445100255420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4f21236c7ba039e1347af4a274638f535a951ab5775f1bd9ced459f5a19dd5d8 file: - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: quoted_identifier: '`add`' - function_parameter_list: bracketed: start_bracket: ( parameter: test data_type: data_type_identifier: int end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: longtext - characteristic_statement: - keyword: DETERMINISTIC - keyword: READS - keyword: SQL - keyword: DATA - function_definition: transaction_statement: keyword: BEGIN statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - statement_terminator: ; - statement: transaction_statement: keyword: END - statement_terminator: '~' sqlfluff-3.4.2/test/fixtures/dialects/mysql/function_return.sql000066400000000000000000000001411503426445100250740ustar00rootroot00000000000000CREATE FUNCTION `testfunction`(var1 int) RETURNS int DETERMINISTIC BEGIN RETURN (var1 + 1); END~ sqlfluff-3.4.2/test/fixtures/dialects/mysql/function_return.yml000066400000000000000000000026701503426445100251070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f900c91d9372a026ebd6e47b33e72d76158191583040ff73d52264c797df7317 file: - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: quoted_identifier: '`testfunction`' - function_parameter_list: bracketed: start_bracket: ( parameter: var1 data_type: data_type_identifier: int end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: int - characteristic_statement: keyword: DETERMINISTIC - function_definition: transaction_statement: keyword: BEGIN statement: return_statement: keyword: RETURN expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: var1 binary_operator: + numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: transaction_statement: keyword: END - statement_terminator: '~' sqlfluff-3.4.2/test/fixtures/dialects/mysql/function_sql_security_definer.sql000066400000000000000000000001551503426445100300040ustar00rootroot00000000000000CREATE FUNCTION `add`(test int) RETURNS longtext DETERMINISTIC SQL SECURITY DEFINER BEGIN SELECT 1 + 2; END~ sqlfluff-3.4.2/test/fixtures/dialects/mysql/function_sql_security_definer.yml000066400000000000000000000026371503426445100300150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 74abdb518948c19065ad637d90fbcfff31c9b6cfde82cf79d4586f4605f9a2a6 file: - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: quoted_identifier: '`add`' - function_parameter_list: bracketed: start_bracket: ( parameter: test data_type: data_type_identifier: int end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: longtext - characteristic_statement: - keyword: DETERMINISTIC - keyword: SQL - keyword: SECURITY - keyword: DEFINER - function_definition: transaction_statement: keyword: BEGIN statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - statement_terminator: ; - statement: transaction_statement: keyword: END - statement_terminator: '~' sqlfluff-3.4.2/test/fixtures/dialects/mysql/function_sql_security_invoker.sql000066400000000000000000000001551503426445100300450ustar00rootroot00000000000000CREATE FUNCTION `add`(test int) RETURNS longtext DETERMINISTIC SQL SECURITY INVOKER BEGIN SELECT 1 + 2; END~ sqlfluff-3.4.2/test/fixtures/dialects/mysql/function_sql_security_invoker.yml000066400000000000000000000026371503426445100300560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c69f593c8af3fc5de0f6d04a417a91f5c128e29bb2d4cde483edd95f1799b39b file: - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: quoted_identifier: '`add`' - function_parameter_list: bracketed: start_bracket: ( parameter: test data_type: data_type_identifier: int end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: longtext - characteristic_statement: - keyword: DETERMINISTIC - keyword: SQL - keyword: SECURITY - keyword: INVOKER - function_definition: transaction_statement: keyword: BEGIN statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - statement_terminator: ; - statement: transaction_statement: keyword: END - statement_terminator: '~' sqlfluff-3.4.2/test/fixtures/dialects/mysql/get_diagnostics_condition_info_local_variable.sql000066400000000000000000000000621503426445100331200ustar00rootroot00000000000000GET DIAGNOSTICS CONDITION 1 _test = CLASS_ORIGIN; sqlfluff-3.4.2/test/fixtures/dialects/mysql/get_diagnostics_condition_info_local_variable.yml000066400000000000000000000012331503426445100331230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f1f183ac1cbee5d2382d84afc9c8151c8037a58872cbf2315d25442242ae7a3e file: statement: get_diagnostics_segment: - keyword: GET - keyword: DIAGNOSTICS - keyword: CONDITION - variable: '1' - variable: _test - comparison_operator: raw_comparison_operator: '=' - keyword: CLASS_ORIGIN statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/get_diagnostics_condition_info_multiple_variable.sql000066400000000000000000000001061503426445100336600ustar00rootroot00000000000000GET DIAGNOSTICS CONDITION 1 _test = CLASS_ORIGIN, @test = TABLE_NAME; sqlfluff-3.4.2/test/fixtures/dialects/mysql/get_diagnostics_condition_info_multiple_variable.yml000066400000000000000000000014361503426445100336710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a774db06cd2a028f74beaf8e4fbc800bf95454357357782ca3dad888a9580e8b file: statement: get_diagnostics_segment: - keyword: GET - keyword: DIAGNOSTICS - keyword: CONDITION - variable: '1' - variable: _test - comparison_operator: raw_comparison_operator: '=' - keyword: CLASS_ORIGIN - comma: ',' - variable: '@test' - comparison_operator: raw_comparison_operator: '=' - keyword: TABLE_NAME statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/get_diagnostics_condition_info_session_variable.sql000066400000000000000000000000621503426445100335110ustar00rootroot00000000000000GET DIAGNOSTICS CONDITION 1 @test = CLASS_ORIGIN; sqlfluff-3.4.2/test/fixtures/dialects/mysql/get_diagnostics_condition_info_session_variable.yml000066400000000000000000000012351503426445100335160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 57cdf831e601b7330259c2fb2a9896682094508706204a3dfc889ab5ee7de24d file: statement: get_diagnostics_segment: - keyword: GET - keyword: DIAGNOSTICS - keyword: CONDITION - variable: '1' - variable: '@test' - comparison_operator: raw_comparison_operator: '=' - keyword: CLASS_ORIGIN statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/get_diagnostics_condition_local_variable.sql000066400000000000000000000000411503426445100321020ustar00rootroot00000000000000GET DIAGNOSTICS CONDITION _test; sqlfluff-3.4.2/test/fixtures/dialects/mysql/get_diagnostics_condition_local_variable.yml000066400000000000000000000010531503426445100321100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 42bce6b496e8e1802b7fb8dd2b7d3869ae2de167e39670d4b0d24007814e9441 file: statement: get_diagnostics_segment: - keyword: GET - keyword: DIAGNOSTICS - keyword: CONDITION - variable: _test statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/get_diagnostics_condition_numeric.sql000066400000000000000000000000351503426445100306100ustar00rootroot00000000000000GET DIAGNOSTICS CONDITION 1; sqlfluff-3.4.2/test/fixtures/dialects/mysql/get_diagnostics_condition_numeric.yml000066400000000000000000000010511503426445100306110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b7dd3117703135613c7197d4be59c7a2de1398f0d20291fe13aaa129cb5df786 file: statement: get_diagnostics_segment: - keyword: GET - keyword: DIAGNOSTICS - keyword: CONDITION - variable: '1' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/get_diagnostics_condition_session_variable.sql000066400000000000000000000000411503426445100324730ustar00rootroot00000000000000GET DIAGNOSTICS CONDITION @test; sqlfluff-3.4.2/test/fixtures/dialects/mysql/get_diagnostics_condition_session_variable.yml000066400000000000000000000010551503426445100325030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ac7218df685e13ae13347608b37dce89733405707037b02020f696a4b448ed6f file: statement: get_diagnostics_segment: - keyword: GET - keyword: DIAGNOSTICS - keyword: CONDITION - variable: '@test' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/get_diagnostics_number.sql000066400000000000000000000000511503426445100263660ustar00rootroot00000000000000GET DIAGNOSTICS @a = NUMBER CONDITION 1; sqlfluff-3.4.2/test/fixtures/dialects/mysql/get_diagnostics_number.yml000066400000000000000000000012241503426445100263730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 796518d65a6e3a9acb2574ac1162af0a4b26e7801d0169c1912a973abf831243 file: statement: get_diagnostics_segment: - keyword: GET - keyword: DIAGNOSTICS - variable: '@a' - comparison_operator: raw_comparison_operator: '=' - keyword: NUMBER - keyword: CONDITION - variable: '1' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/get_diagnostics_row_count.sql000066400000000000000000000000541503426445100271200ustar00rootroot00000000000000GET DIAGNOSTICS @a = ROW_COUNT CONDITION 1; sqlfluff-3.4.2/test/fixtures/dialects/mysql/get_diagnostics_row_count.yml000066400000000000000000000012271503426445100271250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c2e7f5ba8bf71309f0b86e1e2945924f9cc6523a1f063b04a44cd734b1213f7c file: statement: get_diagnostics_segment: - keyword: GET - keyword: DIAGNOSTICS - variable: '@a' - comparison_operator: raw_comparison_operator: '=' - keyword: ROW_COUNT - keyword: CONDITION - variable: '1' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/grant.sql000066400000000000000000000013721503426445100227720ustar00rootroot00000000000000GRANT INSERT, UPDATE, DELETE, SELECT, REFERENCES ON prj_table TO prj_svc; GRANT INSERT, UPDATE, DELETE, SELECT, REFERENCES ON prj_table TO 'prj_svc'; GRANT INSERT, UPDATE, DELETE, SELECT, REFERENCES ON prj_table TO "prj_svc"; GRANT INSERT, UPDATE, DELETE, SELECT, REFERENCES ON prj_table TO `prj_svc`; GRANT INSERT, UPDATE, DELETE, SELECT, REFERENCES ON prj_table TO 'prj_svc'@'%'; GRANT INSERT, UPDATE, DELETE, SELECT, REFERENCES ON prj_table TO "prj_svc"@"%"; GRANT INSERT, UPDATE, DELETE, SELECT, REFERENCES ON prj_table TO `prj_svc`@`%`; GRANT INSERT, UPDATE, DELETE, SELECT, REFERENCES ON prj_table TO `prj_svc` @`%`; GRANT ALL ON db1.* TO 'prj_svc'@'%'; GRANT ALL PRIVILEGES ON db1.* TO 'prj_svc'@'localhost'; GRANT ALL PRIVILEGES ON *.* TO 'prj_svc'@'%'; sqlfluff-3.4.2/test/fixtures/dialects/mysql/grant.yml000066400000000000000000000117741503426445100230030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: df855ae682e4d1b57088f24c78ffb933010ac6eb4fd31fa7a6fc6f6f6466ee06 file: - statement: access_statement: - keyword: GRANT - keyword: INSERT - comma: ',' - keyword: UPDATE - comma: ',' - keyword: DELETE - comma: ',' - keyword: SELECT - comma: ',' - keyword: REFERENCES - keyword: 'ON' - object_reference: naked_identifier: prj_table - keyword: TO - role_reference: naked_identifier: prj_svc - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: INSERT - comma: ',' - keyword: UPDATE - comma: ',' - keyword: DELETE - comma: ',' - keyword: SELECT - comma: ',' - keyword: REFERENCES - keyword: 'ON' - object_reference: naked_identifier: prj_table - keyword: TO - role_reference: quoted_identifier: "'prj_svc'" - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: INSERT - comma: ',' - keyword: UPDATE - comma: ',' - keyword: DELETE - comma: ',' - keyword: SELECT - comma: ',' - keyword: REFERENCES - keyword: 'ON' - object_reference: naked_identifier: prj_table - keyword: TO - role_reference: quoted_literal: '"prj_svc"' - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: INSERT - comma: ',' - keyword: UPDATE - comma: ',' - keyword: DELETE - comma: ',' - keyword: SELECT - comma: ',' - keyword: REFERENCES - keyword: 'ON' - object_reference: naked_identifier: prj_table - keyword: TO - role_reference: quoted_identifier: '`prj_svc`' - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: INSERT - comma: ',' - keyword: UPDATE - comma: ',' - keyword: DELETE - comma: ',' - keyword: SELECT - comma: ',' - keyword: REFERENCES - keyword: 'ON' - object_reference: naked_identifier: prj_table - keyword: TO - role_reference: - quoted_identifier: "'prj_svc'" - at_sign_literal: '@' - quoted_identifier: "'%'" - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: INSERT - comma: ',' - keyword: UPDATE - comma: ',' - keyword: DELETE - comma: ',' - keyword: SELECT - comma: ',' - keyword: REFERENCES - keyword: 'ON' - object_reference: naked_identifier: prj_table - keyword: TO - role_reference: - quoted_literal: '"prj_svc"' - at_sign_literal: '@' - quoted_literal: '"%"' - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: INSERT - comma: ',' - keyword: UPDATE - comma: ',' - keyword: DELETE - comma: ',' - keyword: SELECT - comma: ',' - keyword: REFERENCES - keyword: 'ON' - object_reference: naked_identifier: prj_table - keyword: TO - role_reference: - quoted_identifier: '`prj_svc`' - at_sign_literal: '@' - quoted_identifier: '`%`' - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: INSERT - comma: ',' - keyword: UPDATE - comma: ',' - keyword: DELETE - comma: ',' - keyword: SELECT - comma: ',' - keyword: REFERENCES - keyword: 'ON' - object_reference: naked_identifier: prj_table - keyword: TO - role_reference: - quoted_identifier: '`prj_svc`' - at_sign_literal: '@' - quoted_identifier: '`%`' - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: ALL - keyword: 'ON' - wildcard_identifier: naked_identifier: db1 dot: . star: '*' - keyword: TO - role_reference: - quoted_identifier: "'prj_svc'" - at_sign_literal: '@' - quoted_identifier: "'%'" - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: ALL - keyword: PRIVILEGES - keyword: 'ON' - wildcard_identifier: naked_identifier: db1 dot: . star: '*' - keyword: TO - role_reference: - quoted_identifier: "'prj_svc'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: ALL - keyword: PRIVILEGES - keyword: 'ON' - wildcard_identifier: - star: '*' - dot: . - star: '*' - keyword: TO - role_reference: - quoted_identifier: "'prj_svc'" - at_sign_literal: '@' - quoted_identifier: "'%'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/handler_condition_name.sql000066400000000000000000000000741503426445100263400ustar00rootroot00000000000000DECLARE exit handler for conditionName begin select 1; end; sqlfluff-3.4.2/test/fixtures/dialects/mysql/handler_condition_name.yml000066400000000000000000000016371503426445100263500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 414ccaf762a7703b813181a3ad0d367889e5491629f899021b28dca8767b320d file: - statement: declare_statement: - keyword: DECLARE - keyword: exit - keyword: handler - keyword: for - naked_identifier: conditionName - statement: transaction_statement: keyword: begin statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: end - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/handler_continue_sqlexception.yml000066400000000000000000000010151503426445100277720ustar00rootroot00000000000000file: - statement: declare_statement: - keyword: DECLARE - keyword: continue - keyword: handler - keyword: for - keyword: sqlexception - statement: transaction_statement: keyword: begin statement: select_statement: select_clause: keyword: select select_clause_element: literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: end - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/handler_error_code.sql000066400000000000000000000000631503426445100254730ustar00rootroot00000000000000DECLARE exit handler for 1051 begin select 1; end; sqlfluff-3.4.2/test/fixtures/dialects/mysql/handler_error_code.yml000066400000000000000000000016271503426445100255040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3d34172436001fa633eaf25d34177ba7a6c903cbd232b5a83f7146fca0964287 file: - statement: declare_statement: - keyword: DECLARE - keyword: exit - keyword: handler - keyword: for - numeric_literal: '1051' - statement: transaction_statement: keyword: begin statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: end - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/handler_exit_sqlexception.yml000066400000000000000000000010111503426445100271130ustar00rootroot00000000000000file: - statement: declare_statement: - keyword: DECLARE - keyword: exit - keyword: handler - keyword: for - keyword: sqlexception - statement: transaction_statement: keyword: begin statement: select_statement: select_clause: keyword: select select_clause_element: literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: end - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/handler_not_found.sql000066400000000000000000000000701503426445100253410ustar00rootroot00000000000000DECLARE exit handler for not found begin select 1; end; sqlfluff-3.4.2/test/fixtures/dialects/mysql/handler_not_found.yml000066400000000000000000000016411503426445100253500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c939949a50ffc6352dc498e1144348bcb1ae7813f5e690feaa32fffe9abf39b0 file: - statement: declare_statement: - keyword: DECLARE - keyword: exit - keyword: handler - keyword: for - keyword: not - keyword: found - statement: transaction_statement: keyword: begin statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: end - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/handler_sqlstate.sql000066400000000000000000000000731503426445100252110ustar00rootroot00000000000000DECLARE exit handler for SQLSTATE '1' begin select 1; end; sqlfluff-3.4.2/test/fixtures/dialects/mysql/handler_sqlstate.yml000066400000000000000000000016551503426445100252220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: efc0bd7a7a41d0f364d01e8bd2477733f99322c5f713df571ae464635cc55893 file: - statement: declare_statement: - keyword: DECLARE - keyword: exit - keyword: handler - keyword: for - keyword: SQLSTATE - quoted_literal: "'1'" - statement: transaction_statement: keyword: begin statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: end - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/handler_sqlstate_value.sql000066400000000000000000000001011503426445100263750ustar00rootroot00000000000000DECLARE exit handler for SQLSTATE VALUE '1' begin select 1; end; sqlfluff-3.4.2/test/fixtures/dialects/mysql/handler_sqlstate_value.yml000066400000000000000000000017021503426445100264070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ada5a017cc63b573515dfe48d409c682a6e71ecca29d0cce9cea35e446085a83 file: - statement: declare_statement: - keyword: DECLARE - keyword: exit - keyword: handler - keyword: for - keyword: SQLSTATE - keyword: VALUE - quoted_literal: "'1'" - statement: transaction_statement: keyword: begin statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: end - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/handler_sqlwarning.sql000066400000000000000000000000711503426445100255340ustar00rootroot00000000000000DECLARE exit handler for sqlwarning begin select 1; end; sqlfluff-3.4.2/test/fixtures/dialects/mysql/handler_sqlwarning.yml000066400000000000000000000016231503426445100255420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ec2fecbc723f160082a61a9b6f8a2bca85e98f87c99cfc923e06a591905870c7 file: - statement: declare_statement: - keyword: DECLARE - keyword: exit - keyword: handler - keyword: for - keyword: sqlwarning - statement: transaction_statement: keyword: begin statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: end - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/handler_undo_sqlexception.yml000066400000000000000000000010111503426445100271070ustar00rootroot00000000000000file: - statement: declare_statement: - keyword: DECLARE - keyword: undo - keyword: handler - keyword: for - keyword: sqlexception - statement: transaction_statement: keyword: begin statement: select_statement: select_clause: keyword: select select_clause_element: literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: end - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/help_statement.sql000066400000000000000000000001501503426445100246640ustar00rootroot00000000000000HELP 'contents'; HELP 'data types'; HELP 'ascii'; HELP 'create table'; HELP 'status'; HELP 'functions'; sqlfluff-3.4.2/test/fixtures/dialects/mysql/help_statement.yml000066400000000000000000000020701503426445100246710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 975fb491721232f31c36d1c6f435d0a640fd60e56b4014ae471e397267475c3b file: - statement: help_statement: keyword: HELP quoted_literal: "'contents'" - statement_terminator: ; - statement: help_statement: keyword: HELP quoted_literal: "'data types'" - statement_terminator: ; - statement: help_statement: keyword: HELP quoted_literal: "'ascii'" - statement_terminator: ; - statement: help_statement: keyword: HELP quoted_literal: "'create table'" - statement_terminator: ; - statement: help_statement: keyword: HELP quoted_literal: "'status'" - statement_terminator: ; - statement: help_statement: keyword: HELP quoted_literal: "'functions'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/hexadecimal_literal.sql000066400000000000000000000001361503426445100256340ustar00rootroot00000000000000SELECT X'01AF'; SELECT X'01af'; SELECT x'01AF'; SELECT x'01af'; SELECT 0x01AF; SELECT 0x01af; sqlfluff-3.4.2/test/fixtures/dialects/mysql/hexadecimal_literal.yml000066400000000000000000000026341503426445100256430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ef38518cd78f06a19aff28ba8e786d3ce515fb506666dddfb9614d1ec9a5b52a file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: "X'01AF'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: "X'01af'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: "x'01AF'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: "x'01af'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '0x01AF' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '0x01af' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/if.sql000066400000000000000000000000641503426445100222520ustar00rootroot00000000000000if (x = 0) then set @errmsg = ''; select 1; end if; sqlfluff-3.4.2/test/fixtures/dialects/mysql/if.yml000066400000000000000000000023311503426445100222530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: da9fd2e77d3348ffe9c07370381c002cd0968f76c1863adb19a260b7f9aa396a file: - statement: if_then_statement: - keyword: if - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' end_bracket: ) - keyword: then - statement: set_statement: keyword: set variable: '@errmsg' comparison_operator: raw_comparison_operator: '=' quoted_literal: "''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: if_then_statement: - keyword: end - keyword: if - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/if_else.sql000066400000000000000000000000711503426445100232600ustar00rootroot00000000000000if (x = 0) then set @errmsg = ''; select 1; else end if; sqlfluff-3.4.2/test/fixtures/dialects/mysql/if_else.yml000066400000000000000000000024411503426445100232650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 69efae2cc2e5a1387423945d404086dba866559c76178f3462f598157ce7bb77 file: - statement: if_then_statement: - keyword: if - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' end_bracket: ) - keyword: then - statement: set_statement: keyword: set variable: '@errmsg' comparison_operator: raw_comparison_operator: '=' quoted_literal: "''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: if_then_statement: keyword: else statement: if_then_statement: - keyword: end - keyword: if - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/if_elseif.sql000066400000000000000000000001461503426445100236020ustar00rootroot00000000000000if (x = 0) then set @errmsg = ''; select 1; elseif (x = 1) then set _test = 1; else select 2; end if; sqlfluff-3.4.2/test/fixtures/dialects/mysql/if_elseif.yml000066400000000000000000000040251503426445100236040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b03d179afec3edaf60243ad42bbd2ff4372a446e28fd033ec04c0b17e74adf7e file: - statement: if_then_statement: - keyword: if - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' end_bracket: ) - keyword: then - statement: set_statement: keyword: set variable: '@errmsg' comparison_operator: raw_comparison_operator: '=' quoted_literal: "''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: if_then_statement: - keyword: elseif - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' end_bracket: ) - keyword: then - statement: set_statement: keyword: set variable: _test comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: if_then_statement: keyword: else statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '2' - statement_terminator: ; - statement: if_then_statement: - keyword: end - keyword: if - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/if_multiple_expression.sql000066400000000000000000000001321503426445100264400ustar00rootroot00000000000000if ((select count(*) from table1) = 0 and x = 1) then set @errmsg = ''; select 1; end if; sqlfluff-3.4.2/test/fixtures/dialects/mysql/if_multiple_expression.yml000066400000000000000000000043241503426445100264510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e29aea27c8f69eaf0019d88861f60a0204978037b4d672f5d74daa3054b5db49 file: - statement: if_then_statement: - keyword: if - expression: bracketed: start_bracket: ( expression: - bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '0' - binary_operator: and - column_reference: naked_identifier: x - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' end_bracket: ) - keyword: then - statement: set_statement: keyword: set variable: '@errmsg' comparison_operator: raw_comparison_operator: '=' quoted_literal: "''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: if_then_statement: - keyword: end - keyword: if - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/if_nested.sql000066400000000000000000000001261503426445100236130ustar00rootroot00000000000000if (x = 0) then select 0; if (y = 1) then set @errmsg = ''; select 1; end if; end if; sqlfluff-3.4.2/test/fixtures/dialects/mysql/if_nested.yml000066400000000000000000000035421503426445100236220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b374d03f02f5c7eb800e9262fd5075764b7775db085d5eb2fe21e78c8d5ab96d file: - statement: if_then_statement: - keyword: if - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' end_bracket: ) - keyword: then - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '0' - statement_terminator: ; - statement: if_then_statement: - keyword: if - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: y comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' end_bracket: ) - keyword: then - statement: set_statement: keyword: set variable: '@errmsg' comparison_operator: raw_comparison_operator: '=' quoted_literal: "''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: if_then_statement: - keyword: end - keyword: if - statement_terminator: ; - statement: if_then_statement: - keyword: end - keyword: if - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/if_session_variable.sql000066400000000000000000000000601503426445100256560ustar00rootroot00000000000000if (@x = 0) then set @b = ''; select 1; end if; sqlfluff-3.4.2/test/fixtures/dialects/mysql/if_session_variable.yml000066400000000000000000000022571503426445100256720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fdd4b0c9117b6b228762f9b46873ba13b9207f58d2cdeb6a2ebadc2068dea99f file: - statement: if_then_statement: - keyword: if - expression: bracketed: start_bracket: ( expression: variable: '@x' comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' end_bracket: ) - keyword: then - statement: set_statement: keyword: set variable: '@b' comparison_operator: raw_comparison_operator: '=' quoted_literal: "''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: if_then_statement: - keyword: end - keyword: if - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/if_subquery_expression.sql000066400000000000000000000001201503426445100264610ustar00rootroot00000000000000if ((select count(*) from table1) = 0) then set @errmsg = ''; select 1; end if; sqlfluff-3.4.2/test/fixtures/dialects/mysql/if_subquery_expression.yml000066400000000000000000000040061503426445100264720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5626b33570f2e08309801628d9fd5dc612419586627f73721e2abb1adc1028cc file: - statement: if_then_statement: - keyword: if - expression: bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 end_bracket: ) comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' end_bracket: ) - keyword: then - statement: set_statement: keyword: set variable: '@errmsg' comparison_operator: raw_comparison_operator: '=' quoted_literal: "''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: if_then_statement: - keyword: end - keyword: if - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/ignore_index.sql000066400000000000000000000000521503426445100243230ustar00rootroot00000000000000SELECT * FROM onetable IGNORE INDEX (i2); sqlfluff-3.4.2/test/fixtures/dialects/mysql/ignore_index.yml000066400000000000000000000020041503426445100243240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2e1f60338262df41c57f37eece235e168d74a57176116c55c9c443126c8373a5 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: onetable index_hint_clause: - keyword: IGNORE - keyword: INDEX - bracketed: start_bracket: ( object_reference: naked_identifier: i2 end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/ignore_index_for_group_by.sql000066400000000000000000000000671503426445100271050ustar00rootroot00000000000000SELECT * FROM onetable IGNORE INDEX FOR GROUP BY (i2); sqlfluff-3.4.2/test/fixtures/dialects/mysql/ignore_index_for_group_by.yml000066400000000000000000000021041503426445100271010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 26e55b2b5d567db8e63bd7802f436aeec190ab9f4c94603820231c9c8026ae89 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: onetable index_hint_clause: - keyword: IGNORE - keyword: INDEX - keyword: FOR - keyword: GROUP - keyword: BY - bracketed: start_bracket: ( object_reference: naked_identifier: i2 end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/ignore_index_for_join.sql000066400000000000000000000000631503426445100262120ustar00rootroot00000000000000SELECT * FROM onetable IGNORE INDEX FOR JOIN (i2); sqlfluff-3.4.2/test/fixtures/dialects/mysql/ignore_index_for_join.yml000066400000000000000000000020571503426445100262210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 60cc4d293f24f927ac5fc8ed7e486d7604260516c11b3fb241c6d9d7a2362f05 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: onetable index_hint_clause: - keyword: IGNORE - keyword: INDEX - keyword: FOR - keyword: JOIN - bracketed: start_bracket: ( object_reference: naked_identifier: i2 end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/ignore_index_for_order_by.sql000066400000000000000000000000671503426445100270640ustar00rootroot00000000000000SELECT * FROM onetable IGNORE INDEX FOR ORDER BY (i2); sqlfluff-3.4.2/test/fixtures/dialects/mysql/ignore_index_for_order_by.yml000066400000000000000000000021041503426445100270600ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7e4537df9e55c6973d4495dc1a329920562069731c0838927483119d8588b25e file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: onetable index_hint_clause: - keyword: IGNORE - keyword: INDEX - keyword: FOR - keyword: ORDER - keyword: BY - bracketed: start_bracket: ( object_reference: naked_identifier: i2 end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/ignore_key.sql000066400000000000000000000000501503426445100240020ustar00rootroot00000000000000SELECT * FROM onetable IGNORE KEY (i2); sqlfluff-3.4.2/test/fixtures/dialects/mysql/ignore_key.yml000066400000000000000000000020021503426445100240030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3033d4c9bc24004b84b6b337e6a280aa6d576857c8b2df2e6bac02a4392ac050 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: onetable index_hint_clause: - keyword: IGNORE - keyword: KEY - bracketed: start_bracket: ( object_reference: naked_identifier: i2 end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/insert.sql000066400000000000000000000015021503426445100231560ustar00rootroot00000000000000INSERT INTO t1 (a,b,c) VALUES (1,2,3),(4,5,6); INSERT INTO t1 SELECT * FROM (SELECT c, c+d AS e FROM t2) AS dt; INSERT INTO t1 (a,b,c) VALUES (1,2,3),(4,5,6) AS t2 ON DUPLICATE KEY UPDATE c = t2.a+t2.b; INSERT INTO t1 (a,b,c) VALUES (1,2,3),(4,5,6) AS t2 ON DUPLICATE KEY UPDATE a = VALUES(a), b = VALUES(b); INSERT INTO t1 (a,b,c) VALUES (1,2,3),(4,5,6) AS t2(m,n,p) ON DUPLICATE KEY UPDATE c = m+n; INSERT INTO t1 SELECT * FROM (SELECT c, c+d AS e FROM t2) AS dt ON DUPLICATE KEY UPDATE b = e; INSERT INTO t1 SELECT * FROM (SELECT c, c+d AS e FROM t2) ON DUPLICATE KEY UPDATE b = e; INSERT INTO t1 (a,b,c) TABLE t2 as t3(m,n,p) ON DUPLICATE KEY UPDATE b = n+p; INSERT INTO t1 SET a=1,b=2,c=3 AS t2 ON DUPLICATE KEY UPDATE c = t2.a+t2.b; INSERT INTO t1 SET a=1,b=2,c=3 AS t2(m,n,p) ON DUPLICATE KEY UPDATE c = m+n, b = n+p; sqlfluff-3.4.2/test/fixtures/dialects/mysql/insert.yml000066400000000000000000000407061503426445100231710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: aa6ea67f4ca812ba82027e7ee132d14c0ec5bcea07abe34c915567df4aa93868 file: - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - numeric_literal: '4' - comma: ',' - numeric_literal: '5' - comma: ',' - numeric_literal: '6' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: c - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: c - binary_operator: + - column_reference: naked_identifier: d alias_expression: alias_operator: keyword: AS naked_identifier: e from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: dt - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - numeric_literal: '4' - comma: ',' - numeric_literal: '5' - comma: ',' - numeric_literal: '6' - end_bracket: ) - insert_row_alias: keyword: AS naked_identifier: t2 - upsert_clause_list: - keyword: 'ON' - keyword: DUPLICATE - keyword: KEY - keyword: UPDATE - set_clause: column_reference: naked_identifier: c comparison_operator: raw_comparison_operator: '=' expression: - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: a - binary_operator: + - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: b - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - numeric_literal: '4' - comma: ',' - numeric_literal: '5' - comma: ',' - numeric_literal: '6' - end_bracket: ) - insert_row_alias: keyword: AS naked_identifier: t2 - upsert_clause_list: - keyword: 'ON' - keyword: DUPLICATE - keyword: KEY - keyword: UPDATE - set_clause: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: '=' values_clause: keyword: VALUES bracketed: start_bracket: ( expression: column_reference: naked_identifier: a end_bracket: ) - comma: ',' - set_clause: column_reference: naked_identifier: b comparison_operator: raw_comparison_operator: '=' values_clause: keyword: VALUES bracketed: start_bracket: ( expression: column_reference: naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - numeric_literal: '4' - comma: ',' - numeric_literal: '5' - comma: ',' - numeric_literal: '6' - end_bracket: ) - insert_row_alias: keyword: AS naked_identifier: t2 bracketed: start_bracket: ( identifier_list: - naked_identifier: m - comma: ',' - naked_identifier: n - comma: ',' - naked_identifier: p end_bracket: ) - upsert_clause_list: - keyword: 'ON' - keyword: DUPLICATE - keyword: KEY - keyword: UPDATE - set_clause: column_reference: naked_identifier: c comparison_operator: raw_comparison_operator: '=' expression: - column_reference: naked_identifier: m - binary_operator: + - column_reference: naked_identifier: n - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: c - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: c - binary_operator: + - column_reference: naked_identifier: d alias_expression: alias_operator: keyword: AS naked_identifier: e from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: dt - upsert_clause_list: - keyword: 'ON' - keyword: DUPLICATE - keyword: KEY - keyword: UPDATE - set_clause: - column_reference: naked_identifier: b - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: e - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: c - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: c - binary_operator: + - column_reference: naked_identifier: d alias_expression: alias_operator: keyword: AS naked_identifier: e from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 end_bracket: ) - upsert_clause_list: - keyword: 'ON' - keyword: DUPLICATE - keyword: KEY - keyword: UPDATE - set_clause: - column_reference: naked_identifier: b - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: e - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) - keyword: TABLE - table_reference: naked_identifier: t2 - insert_row_alias: keyword: as naked_identifier: t3 bracketed: start_bracket: ( identifier_list: - naked_identifier: m - comma: ',' - naked_identifier: n - comma: ',' - naked_identifier: p end_bracket: ) - upsert_clause_list: - keyword: 'ON' - keyword: DUPLICATE - keyword: KEY - keyword: UPDATE - set_clause: column_reference: naked_identifier: b comparison_operator: raw_comparison_operator: '=' expression: - column_reference: naked_identifier: n - binary_operator: + - column_reference: naked_identifier: p - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - comma: ',' - set_clause: column_reference: naked_identifier: b comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - comma: ',' - set_clause: column_reference: naked_identifier: c comparison_operator: raw_comparison_operator: '=' numeric_literal: '3' - insert_row_alias: keyword: AS naked_identifier: t2 - upsert_clause_list: - keyword: 'ON' - keyword: DUPLICATE - keyword: KEY - keyword: UPDATE - set_clause: column_reference: naked_identifier: c comparison_operator: raw_comparison_operator: '=' expression: - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: a - binary_operator: + - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: b - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - comma: ',' - set_clause: column_reference: naked_identifier: b comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - comma: ',' - set_clause: column_reference: naked_identifier: c comparison_operator: raw_comparison_operator: '=' numeric_literal: '3' - insert_row_alias: keyword: AS naked_identifier: t2 bracketed: start_bracket: ( identifier_list: - naked_identifier: m - comma: ',' - naked_identifier: n - comma: ',' - naked_identifier: p end_bracket: ) - upsert_clause_list: - keyword: 'ON' - keyword: DUPLICATE - keyword: KEY - keyword: UPDATE - set_clause: column_reference: naked_identifier: c comparison_operator: raw_comparison_operator: '=' expression: - column_reference: naked_identifier: m - binary_operator: + - column_reference: naked_identifier: n - comma: ',' - set_clause: column_reference: naked_identifier: b comparison_operator: raw_comparison_operator: '=' expression: - column_reference: naked_identifier: n - binary_operator: + - column_reference: naked_identifier: p - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/interval.sql000066400000000000000000000007471503426445100235100ustar00rootroot00000000000000SELECT DATE_ADD(CURDATE(), INTERVAL -30 DAY); SELECT SUBDATE('2008-01-02', INTERVAL 31 DAY); SELECT ADDDATE(CURDATE(), INTERVAL -30 DAY); SELECT DATE_SUB('1992-12-31 23:59:59.000002', INTERVAL '1.999999' SECOND_MICROSECOND); SELECT DATE_ADD('2100-12-31 23:59:59', INTERVAL '1:1' MINUTE_SECOND); SELECT DATE_ADD(CURDATE(), INTERVAL 7 * 4 DAY); SELECT ADDDATE(CURDATE(), INTERVAL col1 DAY) FROM tbl1 ; SELECT SUBDATE(CURDATE(), INTERVAL col1 + col2 DAY) FROM tbl1 ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/interval.yml000066400000000000000000000172061503426445100235100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 73a7c191d4f98f8820605af6e84e8c2afa8b19957574d4573dcaba5e9e580e47 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: DATE_ADD function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: CURDATE function_contents: bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: numeric_literal: sign_indicator: '-' numeric_literal: '30' date_part: DAY - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: SUBDATE function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'2008-01-02'" - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: numeric_literal: '31' date_part: DAY - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: ADDDATE function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: CURDATE function_contents: bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: numeric_literal: sign_indicator: '-' numeric_literal: '30' date_part: DAY - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: DATE_SUB function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'1992-12-31 23:59:59.000002'" - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: quoted_literal: "'1.999999'" date_part: SECOND_MICROSECOND - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: DATE_ADD function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'2100-12-31 23:59:59'" - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: quoted_literal: "'1:1'" date_part: MINUTE_SECOND - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: DATE_ADD function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: CURDATE function_contents: bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: - numeric_literal: '7' - binary_operator: '*' - numeric_literal: '4' date_part: DAY - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: ADDDATE function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: CURDATE function_contents: bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: column_reference: naked_identifier: col1 date_part: DAY - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: SUBDATE function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: CURDATE function_contents: bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: - column_reference: naked_identifier: col1 - binary_operator: + - column_reference: naked_identifier: col2 date_part: DAY - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/json.sql000066400000000000000000000006661503426445100226350ustar00rootroot00000000000000CREATE TABLE facts (sentence JSON); INSERT INTO facts VALUES (JSON_OBJECT("mascot", "Our mascot is a dolphin named \"Sakila\".")); SELECT sentence->"$.mascot" FROM facts; SELECT sentence->'$.mascot' FROM facts; SELECT sentence->>"$.mascot" FROM facts; SELECT sentence->>'$.mascot' FROM facts; SELECT sentence FROM facts WHERE JSON_TYPE(sentence->"$.mascot") = "NULL"; SELECT sentence FROM facts WHERE sentence->"$.mascot" IS NULL; sqlfluff-3.4.2/test/fixtures/dialects/mysql/json.yml000066400000000000000000000121121503426445100226240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 64f1b276000ceac9ed592190bf0f896fe2a725875d55f487b8eba62b597cbc33 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: facts - bracketed: start_bracket: ( column_definition: naked_identifier: sentence data_type: data_type_identifier: JSON end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: facts - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: JSON_OBJECT function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: '"mascot"' - comma: ',' - expression: quoted_literal: '"Our mascot is a dolphin named \"Sakila\"."' - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: column_reference: naked_identifier: sentence column_path_operator: -> quoted_literal: '"$.mascot"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: facts - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: column_reference: naked_identifier: sentence column_path_operator: -> quoted_literal: "'$.mascot'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: facts - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: column_reference: naked_identifier: sentence column_path_operator: ->> quoted_literal: '"$.mascot"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: facts - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: column_reference: naked_identifier: sentence column_path_operator: ->> quoted_literal: "'$.mascot'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: facts - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: sentence from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: facts where_clause: keyword: WHERE expression: function: function_name: function_name_identifier: JSON_TYPE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: sentence column_path_operator: -> quoted_literal: '"$.mascot"' end_bracket: ) comparison_operator: raw_comparison_operator: '=' quoted_literal: '"NULL"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: sentence from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: facts where_clause: keyword: WHERE expression: column_reference: naked_identifier: sentence column_path_operator: -> quoted_literal: '"$.mascot"' keyword: IS null_literal: 'NULL' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/line_comment.sql000066400000000000000000000000511503426445100243210ustar00rootroot00000000000000-- Hello --Hello --From Curaçao USE db; sqlfluff-3.4.2/test/fixtures/dialects/mysql/line_comment.yml000066400000000000000000000010161503426445100243250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1247f4ff8e352edcb3de7913a509af396d6c2bdcdfad9481ebf698f8b319e7a4 file: statement: use_statement: keyword: USE database_reference: naked_identifier: db statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/load_data.sql000066400000000000000000000024401503426445100235640ustar00rootroot00000000000000LOAD DATA INFILE '/var/lib/mysql-files/libaccess.csv' INTO TABLE libaccess FIELDS TERMINATED BY '\t' OPTIONALLY ENCLOSED BY '"' IGNORE 1 LINES; LOAD DATA INFILE 'data.txt' INTO TABLE db2.my_table; LOAD DATA INFILE 'data.txt' INTO TABLE db2.my_table PARTITION (partition_name); LOAD DATA INFILE '/tmp/test.txt' INTO TABLE test FIELDS TERMINATED BY ',' LINES STARTING BY 'xxx'; LOAD DATA INFILE '/tmp/test.txt' INTO TABLE test IGNORE 1 LINES; LOAD DATA INFILE 'data.txt' INTO TABLE table2 FIELDS TERMINATED BY ','; LOAD DATA INFILE 'data.txt' INTO TABLE table2 FIELDS TERMINATED BY '\t'; LOAD DATA INFILE 'data.txt' INTO TABLE tbl_name FIELDS TERMINATED BY ',' ENCLOSED BY '"' LINES TERMINATED BY '\r\n' IGNORE 1 LINES; LOAD DATA INFILE '/tmp/jokes.txt' INTO TABLE jokes FIELDS TERMINATED BY '' LINES TERMINATED BY '\n%%\n' (joke); LOAD DATA INFILE 'persondata.txt' INTO TABLE persondata; LOAD DATA INFILE 'file.txt' INTO TABLE t1 (column1, @var1) SET column2 = @var1/100; LOAD DATA INFILE 'file.txt' INTO TABLE t1 (column1, column2) SET column3 = CURRENT_TIMESTAMP; LOAD DATA INFILE 'file.txt' INTO TABLE t1 (column1, @dummy, column2, @dummy, column3); LOAD DATA INFILE '/local/access_log' INTO TABLE tbl_name FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '"' ESCAPED BY '\\' sqlfluff-3.4.2/test/fixtures/dialects/mysql/load_data.yml000066400000000000000000000160231503426445100235700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0f068e4f34b1a5b03afb39dd77f1cf1dfdaa97fefef25d35dbd6c3e7288ed55b file: - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'/var/lib/mysql-files/libaccess.csv'" - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: libaccess - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "'\\t'" - keyword: OPTIONALLY - keyword: ENCLOSED - keyword: BY - quoted_literal: "'\"'" - keyword: IGNORE - numeric_literal: '1' - keyword: LINES - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'data.txt'" - keyword: INTO - keyword: TABLE - table_reference: - naked_identifier: db2 - dot: . - naked_identifier: my_table - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'data.txt'" - keyword: INTO - keyword: TABLE - table_reference: - naked_identifier: db2 - dot: . - naked_identifier: my_table - partition_clause: keyword: PARTITION bracketed: start_bracket: ( object_reference: naked_identifier: partition_name end_bracket: ) - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'/tmp/test.txt'" - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: test - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "','" - keyword: LINES - keyword: STARTING - keyword: BY - quoted_literal: "'xxx'" - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'/tmp/test.txt'" - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: test - keyword: IGNORE - numeric_literal: '1' - keyword: LINES - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'data.txt'" - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: table2 - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "','" - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'data.txt'" - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: table2 - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "'\\t'" - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'data.txt'" - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: tbl_name - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "','" - keyword: ENCLOSED - keyword: BY - quoted_literal: "'\"'" - keyword: LINES - keyword: TERMINATED - keyword: BY - quoted_literal: "'\\r\\n'" - keyword: IGNORE - numeric_literal: '1' - keyword: LINES - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'/tmp/jokes.txt'" - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: jokes - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "''" - keyword: LINES - keyword: TERMINATED - keyword: BY - quoted_literal: "'\\n%%\\n'" - bracketed: start_bracket: ( column_reference: naked_identifier: joke end_bracket: ) - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'persondata.txt'" - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: persondata - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'file.txt'" - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: column1 - comma: ',' - column_reference: variable: '@var1' - end_bracket: ) - keyword: SET - column_reference: naked_identifier: column2 - comparison_operator: raw_comparison_operator: '=' - variable: '@var1' - binary_operator: / - numeric_literal: '100' - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'file.txt'" - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: column1 - comma: ',' - column_reference: naked_identifier: column2 - end_bracket: ) - keyword: SET - column_reference: naked_identifier: column3 - comparison_operator: raw_comparison_operator: '=' - bare_function: CURRENT_TIMESTAMP - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'file.txt'" - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: column1 - comma: ',' - column_reference: variable: '@dummy' - comma: ',' - column_reference: naked_identifier: column2 - comma: ',' - column_reference: variable: '@dummy' - comma: ',' - column_reference: naked_identifier: column3 - end_bracket: ) - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'/local/access_log'" - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: tbl_name - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "','" - keyword: OPTIONALLY - keyword: ENCLOSED - keyword: BY - quoted_literal: "'\"'" - keyword: ESCAPED - keyword: BY - quoted_literal: "'\\\\'" sqlfluff-3.4.2/test/fixtures/dialects/mysql/loop_label.sql000066400000000000000000000001211503426445100237560ustar00rootroot00000000000000iteration:loop select 1; iterate iteration; leave iteration; end loop iteration; sqlfluff-3.4.2/test/fixtures/dialects/mysql/loop_label.yml000066400000000000000000000020461503426445100237700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2ee6c612bc68b46b0637885e6c2d45996d0657a723d4c8aaf116166d7483357b file: - statement: loop_statement: naked_identifier: iteration colon: ':' keyword: loop statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: iterate_statement: keyword: iterate naked_identifier: iteration - statement_terminator: ; - statement: transaction_statement: keyword: leave naked_identifier: iteration - statement_terminator: ; - statement: loop_statement: - keyword: end - keyword: loop - naked_identifier: iteration - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/loop_multiple_statements.sql000066400000000000000000000000611503426445100270040ustar00rootroot00000000000000loop select 1; select * from onetable; end loop; sqlfluff-3.4.2/test/fixtures/dialects/mysql/loop_multiple_statements.yml000066400000000000000000000022171503426445100270130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1f4385380b2be8c5e17f459866e0ad060d6cdb7b1a3aabcac090d59502b9d0a8 file: - statement: loop_statement: keyword: loop statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: onetable - statement_terminator: ; - statement: loop_statement: - keyword: end - keyword: loop - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/loop_no_label.sql000066400000000000000000000000311503426445100244520ustar00rootroot00000000000000loop select 1; end loop; sqlfluff-3.4.2/test/fixtures/dialects/mysql/loop_no_label.yml000066400000000000000000000013411503426445100244610ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5257dc626087e74ee817942fa42136a8f5b1fa385f436cb278a903375ababa10 file: - statement: loop_statement: keyword: loop statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: loop_statement: - keyword: end - keyword: loop - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/nested_begin.sql000066400000000000000000000000741503426445100243030ustar00rootroot00000000000000blocks:BEGIN nest:begin set @abc = 1; end nest; END blocks~ sqlfluff-3.4.2/test/fixtures/dialects/mysql/nested_begin.yml000066400000000000000000000021251503426445100243040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4c8251d83f6e7828cccb5ba0c8f5e3670ba41d4a06660193eed35d37ba6d0f8b file: - statement: transaction_statement: naked_identifier: blocks colon: ':' keyword: BEGIN statement: transaction_statement: naked_identifier: nest colon: ':' keyword: begin statement: set_statement: keyword: set variable: '@abc' comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: end naked_identifier: nest - statement_terminator: ; - statement: transaction_statement: keyword: END naked_identifier: blocks - statement_terminator: '~' sqlfluff-3.4.2/test/fixtures/dialects/mysql/null_safe_equal.sql000066400000000000000000000001071503426445100250110ustar00rootroot00000000000000SELECT 1 <=> 1, NULL <=> NULL, 1 <=> NULL; SELECT 1 WHERE NULL <=> 1; sqlfluff-3.4.2/test/fixtures/dialects/mysql/null_safe_equal.yml000066400000000000000000000034341503426445100250210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0a7443feb698ba6dd2813824d70b0cdd3a879683434b1177f4047193b1b6084a file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: - numeric_literal: '1' - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - raw_comparison_operator: '>' - numeric_literal: '1' - comma: ',' - select_clause_element: expression: - null_literal: 'NULL' - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - raw_comparison_operator: '>' - null_literal: 'NULL' - comma: ',' - select_clause_element: expression: numeric_literal: '1' comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - raw_comparison_operator: '>' null_literal: 'NULL' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' where_clause: keyword: WHERE expression: null_literal: 'NULL' comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - raw_comparison_operator: '>' numeric_literal: '1' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/open.sql000066400000000000000000000000201503426445100226050ustar00rootroot00000000000000open curcursor; sqlfluff-3.4.2/test/fixtures/dialects/mysql/open.yml000066400000000000000000000010061503426445100226140ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e1aebd2df56d3ec79fea9159ce9c4448a2f2f46696461f7886d3b27f321ed502 file: statement: cursor_open_close_segment: keyword: open naked_identifier: curcursor statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/open_qualified.sql000066400000000000000000000000221503426445100246320ustar00rootroot00000000000000open `curcursor`; sqlfluff-3.4.2/test/fixtures/dialects/mysql/open_qualified.yml000066400000000000000000000010131503426445100246350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3adadcefab1078bb37b5f960ec6344ab8391a808cb512db12cedeeaf6bc8612d file: statement: cursor_open_close_segment: keyword: open quoted_identifier: '`curcursor`' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/optimize_table.sql000066400000000000000000000004031503426445100246600ustar00rootroot00000000000000OPTIMIZE TABLE some_table; OPTIMIZE TABLE some_table1, some_table2; OPTIMIZE NO_WRITE_TO_BINLOG TABLE some_table; OPTIMIZE NO_WRITE_TO_BINLOG TABLE some_table1, some_table2; OPTIMIZE LOCAL TABLE some_table; OPTIMIZE LOCAL TABLE some_table1, some_table2; sqlfluff-3.4.2/test/fixtures/dialects/mysql/optimize_table.yml000066400000000000000000000033711503426445100246710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: feffb21d789f331a3e487e1b8d5d71deb4fbfc668e677fd34995e77e3cd87c4d file: - statement: optimize_table_statement: - keyword: OPTIMIZE - keyword: TABLE - table_reference: naked_identifier: some_table - statement_terminator: ; - statement: optimize_table_statement: - keyword: OPTIMIZE - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - statement_terminator: ; - statement: optimize_table_statement: - keyword: OPTIMIZE - keyword: NO_WRITE_TO_BINLOG - keyword: TABLE - table_reference: naked_identifier: some_table - statement_terminator: ; - statement: optimize_table_statement: - keyword: OPTIMIZE - keyword: NO_WRITE_TO_BINLOG - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - statement_terminator: ; - statement: optimize_table_statement: - keyword: OPTIMIZE - keyword: LOCAL - keyword: TABLE - table_reference: naked_identifier: some_table - statement_terminator: ; - statement: optimize_table_statement: - keyword: OPTIMIZE - keyword: LOCAL - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/prepare_local_variable.sql000066400000000000000000000000311503426445100263230ustar00rootroot00000000000000PREPARE test FROM _test; sqlfluff-3.4.2/test/fixtures/dialects/mysql/prepare_local_variable.yml000066400000000000000000000010441503426445100263320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 25b6e93e2262f01eaf66c26d2f5644717653ed3e01b5bd341134513ded5c1b38 file: statement: prepare_segment: - keyword: PREPARE - naked_identifier: test - keyword: FROM - variable: _test statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/prepare_session_variable.sql000066400000000000000000000000311503426445100267140ustar00rootroot00000000000000PREPARE test FROM @test; sqlfluff-3.4.2/test/fixtures/dialects/mysql/prepare_session_variable.yml000066400000000000000000000010461503426445100267250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 465db8acb36a650a88c7dd35993d5321557e4b9b2241b2ee99a97b8ef7f39058 file: statement: prepare_segment: - keyword: PREPARE - naked_identifier: test - keyword: FROM - variable: '@test' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/prepare_statement.sql000066400000000000000000000000371503426445100253760ustar00rootroot00000000000000PREPARE test FROM 'select 1;'; sqlfluff-3.4.2/test/fixtures/dialects/mysql/prepare_statement.yml000066400000000000000000000010621503426445100253770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6a27f8f2efa35c42ccfda413a81d24920035e09430118145ee749b06f2d61285 file: statement: prepare_segment: - keyword: PREPARE - naked_identifier: test - keyword: FROM - quoted_literal: "'select 1;'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/procedure_definer.sql000066400000000000000000000001011503426445100253300ustar00rootroot00000000000000CREATE DEFINER=`test`@`%` PROCEDURE `testprocedure`() BEGIN END~ sqlfluff-3.4.2/test/fixtures/dialects/mysql/procedure_definer.yml000066400000000000000000000021051503426445100253400ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4a9679dce024353f3845d4f1b920ea507add980899b88e70c45c5864f274f9ce file: statement: create_procedure_statement: - keyword: CREATE - definer_segment: keyword: DEFINER comparison_operator: raw_comparison_operator: '=' role_reference: - quoted_identifier: '`test`' - at_sign_literal: '@' - quoted_identifier: '`%`' - keyword: PROCEDURE - function_name: quoted_identifier: '`testprocedure`' - procedure_parameter_list: bracketed: start_bracket: ( end_bracket: ) - function_definition: transaction_statement: keyword: BEGIN statement: transaction_statement: keyword: END statement_terminator: '~' sqlfluff-3.4.2/test/fixtures/dialects/mysql/procedure_in_param.sql000066400000000000000000000000711503426445100255100ustar00rootroot00000000000000CREATE PROCEDURE `testprocedure`(in test int) BEGIN END~ sqlfluff-3.4.2/test/fixtures/dialects/mysql/procedure_in_param.yml000066400000000000000000000017101503426445100255130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c2d6aefd94f35d5599b705fa4c4dfe7a3a8a0da4c29696e30d4d648c15bc5042 file: statement: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - function_name: quoted_identifier: '`testprocedure`' - procedure_parameter_list: bracketed: start_bracket: ( parameter_direction: in parameter: test data_type: data_type_identifier: int end_bracket: ) - function_definition: transaction_statement: keyword: BEGIN statement: transaction_statement: keyword: END statement_terminator: '~' sqlfluff-3.4.2/test/fixtures/dialects/mysql/procedure_inout_param.sql000066400000000000000000000000741503426445100262430ustar00rootroot00000000000000CREATE PROCEDURE `testprocedure`(inout test int) BEGIN END~ sqlfluff-3.4.2/test/fixtures/dialects/mysql/procedure_inout_param.yml000066400000000000000000000017131503426445100262460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fbe00aeccc4e8664fcef735462688e8f15ee4edc02e12cd9e6e372c85a2514d3 file: statement: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - function_name: quoted_identifier: '`testprocedure`' - procedure_parameter_list: bracketed: start_bracket: ( parameter_direction: inout parameter: test data_type: data_type_identifier: int end_bracket: ) - function_definition: transaction_statement: keyword: BEGIN statement: transaction_statement: keyword: END statement_terminator: '~' sqlfluff-3.4.2/test/fixtures/dialects/mysql/procedure_out_param.sql000066400000000000000000000000721503426445100257120ustar00rootroot00000000000000CREATE PROCEDURE `testprocedure`(out test int) BEGIN END~ sqlfluff-3.4.2/test/fixtures/dialects/mysql/procedure_out_param.yml000066400000000000000000000017111503426445100257150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b743b1d3c50497e605139814614c4b3b16413ada9a280ad66415562fa6b89121 file: statement: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - function_name: quoted_identifier: '`testprocedure`' - procedure_parameter_list: bracketed: start_bracket: ( parameter_direction: out parameter: test data_type: data_type_identifier: int end_bracket: ) - function_definition: transaction_statement: keyword: BEGIN statement: transaction_statement: keyword: END statement_terminator: '~' sqlfluff-3.4.2/test/fixtures/dialects/mysql/purge_binary_logs.sql000066400000000000000000000003561503426445100253720ustar00rootroot00000000000000PURGE BINARY LOGS TO 'mysql-bin.010'; PURGE BINARY LOGS BEFORE '2019-04-02 22:46:26'; PURGE BINARY LOGS BEFORE TIMESTAMP '2019-04-02 22:46:26'; PURGE BINARY LOGS BEFORE 19830905132800; PURGE BINARY LOGS BEFORE TIMESTAMP 19830905132800; sqlfluff-3.4.2/test/fixtures/dialects/mysql/purge_binary_logs.yml000066400000000000000000000027731503426445100254010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 539e0add6ce7c2a4e4e22378a706fc247a3c09b0fe8fd0d36ab241fe4220ca07 file: - statement: purge_binary_logs_statement: - keyword: PURGE - keyword: BINARY - keyword: LOGS - keyword: TO - quoted_literal: "'mysql-bin.010'" - statement_terminator: ; - statement: purge_binary_logs_statement: - keyword: PURGE - keyword: BINARY - keyword: LOGS - keyword: BEFORE - expression: quoted_literal: "'2019-04-02 22:46:26'" - statement_terminator: ; - statement: purge_binary_logs_statement: - keyword: PURGE - keyword: BINARY - keyword: LOGS - keyword: BEFORE - expression: keyword: TIMESTAMP date_constructor_literal: "'2019-04-02 22:46:26'" - statement_terminator: ; - statement: purge_binary_logs_statement: - keyword: PURGE - keyword: BINARY - keyword: LOGS - keyword: BEFORE - expression: numeric_literal: '19830905132800' - statement_terminator: ; - statement: purge_binary_logs_statement: - keyword: PURGE - keyword: BINARY - keyword: LOGS - keyword: BEFORE - expression: keyword: TIMESTAMP numeric_literal: '19830905132800' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/quoted_literal.sql000066400000000000000000000014361503426445100246750ustar00rootroot00000000000000SELECT ''; SELECT ""; SELECT ''''; SELECT """"; SELECT ' '; SELECT " "; SELECT '''aaa'''; SELECT """aaa"""; SELECT ' '' '; SELECT " "" "; SELECT 'foo' 'bar'; SELECT "foo" "bar"; SELECT 'foo' 'bar'; SELECT "foo" "bar"; SELECT 'foo' "bar"; SELECT 'foo' 'bar'; SELECT "foo" "bar"; SELECT 'foo' -- some comment 'bar'; SELECT "foo" -- some comment "bar"; SELECT 'foo' /* some comment */ 'bar'; SELECT "foo" /* some comment */ "bar"; UPDATE table1 SET column1 = 'baz\'s'; UPDATE table1 SET column1 = "baz\"s"; SELECT 'terminating MySQL-y escaped single-quote bazs\''; SELECT "terminating MySQL-y escaped double-quote bazs\""; SELECT 'terminating ANSI-ish escaped single-quote '''; SELECT "terminating ANSI-ish escaped double-quote """; SELECT '\\'; SELECT "\\"; sqlfluff-3.4.2/test/fixtures/dialects/mysql/quoted_literal.yml000066400000000000000000000142671503426445100247050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b60a070dbc152e87be49e05488538fa0d1bf418b18f296b7ba01eb24b4cb8937 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: '""' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "''''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: '""""' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'\n\n'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "\"\n\n\"" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'''aaa'''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: '"""aaa"""' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'\n''\n'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "\"\n\"\"\n\"" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: "'foo'" - quoted_literal: "'bar'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: '"foo"' - quoted_literal: '"bar"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: "'foo'" - quoted_literal: "'bar'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: '"foo"' - quoted_literal: '"bar"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: "'foo'" - quoted_literal: '"bar"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: "'foo'" - quoted_literal: "'bar'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: '"foo"' - quoted_literal: '"bar"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: "'foo'" - quoted_literal: "'bar'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: '"foo"' - quoted_literal: '"bar"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: "'foo'" - quoted_literal: "'bar'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: '"foo"' - quoted_literal: '"bar"' - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: table1 set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: column1 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'baz\\'s'" - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: table1 set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: column1 comparison_operator: raw_comparison_operator: '=' quoted_literal: '"baz\"s"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'terminating MySQL-y escaped single-quote bazs\\''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: '"terminating MySQL-y escaped double-quote bazs\""' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'terminating ANSI-ish escaped single-quote '''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: '"terminating ANSI-ish escaped double-quote """' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'\\\\'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: '"\\"' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/rename_table.sql000066400000000000000000000001461503426445100242730ustar00rootroot00000000000000RENAME TABLE old_table TO new_table; RENAME TABLE old_table1 TO new_table1, old_table2 TO new_table2; sqlfluff-3.4.2/test/fixtures/dialects/mysql/rename_table.yml000066400000000000000000000020231503426445100242710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c90b566ed72ba3be4bcb5183c0ff3089ab6ea7ce8b37783931b52e0aa1d855c6 file: - statement: rename_table_statement: - keyword: RENAME - keyword: TABLE - table_reference: naked_identifier: old_table - keyword: TO - table_reference: naked_identifier: new_table - statement_terminator: ; - statement: rename_table_statement: - keyword: RENAME - keyword: TABLE - table_reference: naked_identifier: old_table1 - keyword: TO - table_reference: naked_identifier: new_table1 - comma: ',' - table_reference: naked_identifier: old_table2 - keyword: TO - table_reference: naked_identifier: new_table2 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/repair_table.sql000066400000000000000000000006151503426445100243070ustar00rootroot00000000000000REPAIR TABLE some_table; REPAIR TABLE some_table1, some_table2; REPAIR NO_WRITE_TO_BINLOG TABLE some_table; REPAIR NO_WRITE_TO_BINLOG TABLE some_table1, some_table2; REPAIR LOCAL TABLE some_table; REPAIR LOCAL TABLE some_table1, some_table2; REPAIR TABLE some_table QUICK; REPAIR TABLE some_table EXTENDED; REPAIR TABLE some_table USE_FRM; REPAIR TABLE some_table QUICK EXTENDED USE_FRM; sqlfluff-3.4.2/test/fixtures/dialects/mysql/repair_table.yml000066400000000000000000000050211503426445100243050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1faf15f84e873b3e2be5b00bf51587fe7e641b27ecc10fdfd77ea0c3bc5e56df file: - statement: repair_table_statement: - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: some_table - statement_terminator: ; - statement: repair_table_statement: - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - statement_terminator: ; - statement: repair_table_statement: - keyword: REPAIR - keyword: NO_WRITE_TO_BINLOG - keyword: TABLE - table_reference: naked_identifier: some_table - statement_terminator: ; - statement: repair_table_statement: - keyword: REPAIR - keyword: NO_WRITE_TO_BINLOG - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - statement_terminator: ; - statement: repair_table_statement: - keyword: REPAIR - keyword: LOCAL - keyword: TABLE - table_reference: naked_identifier: some_table - statement_terminator: ; - statement: repair_table_statement: - keyword: REPAIR - keyword: LOCAL - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - statement_terminator: ; - statement: repair_table_statement: - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: QUICK - statement_terminator: ; - statement: repair_table_statement: - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: EXTENDED - statement_terminator: ; - statement: repair_table_statement: - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: USE_FRM - statement_terminator: ; - statement: repair_table_statement: - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: QUICK - keyword: EXTENDED - keyword: USE_FRM - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/repeat_label.sql000066400000000000000000000001051503426445100242670ustar00rootroot00000000000000iteration:repeat set @a = @a + 1; until @a > 5 end repeat iteration; sqlfluff-3.4.2/test/fixtures/dialects/mysql/repeat_label.yml000066400000000000000000000021101503426445100242670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6202885f33bcd0a346d2436056aa723837e6127704fddff948205986aad344dc file: - statement: repeat_statement: naked_identifier: iteration colon: ':' keyword: repeat statement: set_statement: keyword: set variable: '@a' comparison_operator: raw_comparison_operator: '=' expression: variable: '@a' binary_operator: + numeric_literal: '1' - statement_terminator: ; - statement: repeat_statement: - keyword: until - expression: variable: '@a' comparison_operator: raw_comparison_operator: '>' numeric_literal: '5' - keyword: end - keyword: repeat - naked_identifier: iteration - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/repeat_multiple_statements.sql000066400000000000000000000001051503426445100273120ustar00rootroot00000000000000repeat set @a = @a + 1; select 1; until @a > 5 and x = 1 end repeat; sqlfluff-3.4.2/test/fixtures/dialects/mysql/repeat_multiple_statements.yml000066400000000000000000000025211503426445100273200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d51657630a73b8ae3d1fb36fe5494b43d57b3da9ba2fa3be71f3e3ddae4bea06 file: - statement: repeat_statement: keyword: repeat statement: set_statement: keyword: set variable: '@a' comparison_operator: raw_comparison_operator: '=' expression: variable: '@a' binary_operator: + numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: repeat_statement: - keyword: until - expression: - variable: '@a' - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '5' - binary_operator: and - column_reference: naked_identifier: x - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - keyword: end - keyword: repeat - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/repeat_no_label.sql000066400000000000000000000000611503426445100247640ustar00rootroot00000000000000repeat set @a = @a + 1; until @a > 5 end repeat; sqlfluff-3.4.2/test/fixtures/dialects/mysql/repeat_no_label.yml000066400000000000000000000017631503426445100250000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a907fbb417b57c4bcd31c689236a595b55b81c6367bf4c1bead609bf102f5568 file: - statement: repeat_statement: keyword: repeat statement: set_statement: keyword: set variable: '@a' comparison_operator: raw_comparison_operator: '=' expression: variable: '@a' binary_operator: + numeric_literal: '1' - statement_terminator: ; - statement: repeat_statement: - keyword: until - expression: variable: '@a' comparison_operator: raw_comparison_operator: '>' numeric_literal: '5' - keyword: end - keyword: repeat - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/replace.sql000066400000000000000000000020731503426445100232710ustar00rootroot00000000000000REPLACE tbl_name VALUES (1, 2); REPLACE tbl_name VALUES (DEFAULT, DEFAULT); REPLACE tbl_name VALUES (1, 2), (11, 22); REPLACE tbl_name VALUE (1, 2), (11, 22); REPLACE tbl_name (col1, col2) VALUES (1, 2); REPLACE tbl_name (col1, col2) VALUES ROW(1, 2), ROW(11, 22); REPLACE LOW_PRIORITY tbl_name VALUES (1, 2); REPLACE DELAYED tbl_name VALUES (1, 2); REPLACE LOW_PRIORITY INTO tbl_name VALUES (1, 2); REPLACE tbl_name PARTITION (partition_name) VALUES (1, 2); REPLACE tbl_name SET col1 = 1, col2 = 2; REPLACE LOW_PRIORITY tbl_name SET col1 = 1, col2 = 2; REPLACE DELAYED tbl_name SET col1 = 1, col2 = 2; REPLACE LOW_PRIORITY INTO tbl_name SET col1 = 1, col2 = 2; REPLACE tbl_name PARTITION (partition_name) SET col1 = 1, col2 = 2; REPLACE tbl_name SELECT * FROM table_name; REPLACE tbl_name TABLE table_name; REPLACE LOW_PRIORITY tbl_name TABLE table_name; REPLACE DELAYED tbl_name TABLE table_name; REPLACE LOW_PRIORITY INTO tbl_name TABLE table_name; REPLACE tbl_name (col1, col2) TABLE table_name; REPLACE tbl_name PARTITION (partition_name) TABLE table_name; sqlfluff-3.4.2/test/fixtures/dialects/mysql/replace.yml000066400000000000000000000246421503426445100233010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5e56839dde36d79ac2153e62446f262002bffb704a7185d5e155ef88f1e1b528 file: - statement: replace_statement: keyword: REPLACE table_reference: naked_identifier: tbl_name values_clause: keyword: VALUES bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: replace_statement: keyword: REPLACE table_reference: naked_identifier: tbl_name values_clause: keyword: VALUES bracketed: - start_bracket: ( - keyword: DEFAULT - comma: ',' - keyword: DEFAULT - end_bracket: ) - statement_terminator: ; - statement: replace_statement: keyword: REPLACE table_reference: naked_identifier: tbl_name values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - numeric_literal: '11' - comma: ',' - numeric_literal: '22' - end_bracket: ) - statement_terminator: ; - statement: replace_statement: keyword: REPLACE table_reference: naked_identifier: tbl_name values_clause: - keyword: VALUE - bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - numeric_literal: '11' - comma: ',' - numeric_literal: '22' - end_bracket: ) - statement_terminator: ; - statement: replace_statement: keyword: REPLACE table_reference: naked_identifier: tbl_name bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) values_clause: keyword: VALUES bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: replace_statement: keyword: REPLACE table_reference: naked_identifier: tbl_name bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) values_clause: - keyword: VALUES - keyword: ROW - bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - comma: ',' - keyword: ROW - bracketed: - start_bracket: ( - numeric_literal: '11' - comma: ',' - numeric_literal: '22' - end_bracket: ) - statement_terminator: ; - statement: replace_statement: - keyword: REPLACE - keyword: LOW_PRIORITY - table_reference: naked_identifier: tbl_name - values_clause: keyword: VALUES bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: replace_statement: - keyword: REPLACE - keyword: DELAYED - table_reference: naked_identifier: tbl_name - values_clause: keyword: VALUES bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: replace_statement: - keyword: REPLACE - keyword: LOW_PRIORITY - keyword: INTO - table_reference: naked_identifier: tbl_name - values_clause: keyword: VALUES bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: replace_statement: keyword: REPLACE table_reference: naked_identifier: tbl_name partition_clause: keyword: PARTITION bracketed: start_bracket: ( object_reference: naked_identifier: partition_name end_bracket: ) values_clause: keyword: VALUES bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: replace_statement: keyword: REPLACE table_reference: naked_identifier: tbl_name set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - comma: ',' - set_clause: column_reference: naked_identifier: col2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - statement_terminator: ; - statement: replace_statement: - keyword: REPLACE - keyword: LOW_PRIORITY - table_reference: naked_identifier: tbl_name - set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - comma: ',' - set_clause: column_reference: naked_identifier: col2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - statement_terminator: ; - statement: replace_statement: - keyword: REPLACE - keyword: DELAYED - table_reference: naked_identifier: tbl_name - set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - comma: ',' - set_clause: column_reference: naked_identifier: col2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - statement_terminator: ; - statement: replace_statement: - keyword: REPLACE - keyword: LOW_PRIORITY - keyword: INTO - table_reference: naked_identifier: tbl_name - set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - comma: ',' - set_clause: column_reference: naked_identifier: col2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - statement_terminator: ; - statement: replace_statement: keyword: REPLACE table_reference: naked_identifier: tbl_name partition_clause: keyword: PARTITION bracketed: start_bracket: ( object_reference: naked_identifier: partition_name end_bracket: ) set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - comma: ',' - set_clause: column_reference: naked_identifier: col2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - statement_terminator: ; - statement: replace_statement: keyword: REPLACE table_reference: naked_identifier: tbl_name select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name - statement_terminator: ; - statement: replace_statement: - keyword: REPLACE - table_reference: naked_identifier: tbl_name - keyword: TABLE - table_reference: naked_identifier: table_name - statement_terminator: ; - statement: replace_statement: - keyword: REPLACE - keyword: LOW_PRIORITY - table_reference: naked_identifier: tbl_name - keyword: TABLE - table_reference: naked_identifier: table_name - statement_terminator: ; - statement: replace_statement: - keyword: REPLACE - keyword: DELAYED - table_reference: naked_identifier: tbl_name - keyword: TABLE - table_reference: naked_identifier: table_name - statement_terminator: ; - statement: replace_statement: - keyword: REPLACE - keyword: LOW_PRIORITY - keyword: INTO - table_reference: naked_identifier: tbl_name - keyword: TABLE - table_reference: naked_identifier: table_name - statement_terminator: ; - statement: replace_statement: - keyword: REPLACE - table_reference: naked_identifier: tbl_name - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: TABLE - table_reference: naked_identifier: table_name - statement_terminator: ; - statement: replace_statement: - keyword: REPLACE - table_reference: naked_identifier: tbl_name - partition_clause: keyword: PARTITION bracketed: start_bracket: ( object_reference: naked_identifier: partition_name end_bracket: ) - keyword: TABLE - table_reference: naked_identifier: table_name - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/reset_master.sql000066400000000000000000000000441503426445100243470ustar00rootroot00000000000000RESET MASTER; RESET MASTER TO 1234; sqlfluff-3.4.2/test/fixtures/dialects/mysql/reset_master.yml000066400000000000000000000012261503426445100243540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0c1e5dac0655f6e9471d3cab9d0b711be754e37c946b8099192ccd30e6ed4608 file: - statement: reset_master_statement: - keyword: RESET - keyword: MASTER - statement_terminator: ; - statement: reset_master_statement: - keyword: RESET - keyword: MASTER - keyword: TO - numeric_literal: '1234' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/resignal.sql000066400000000000000000000000121503426445100234510ustar00rootroot00000000000000resignal; sqlfluff-3.4.2/test/fixtures/dialects/mysql/resignal.yml000066400000000000000000000007371503426445100234710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5486a51152d09bd7aa929d6aad8035b68a7af6d3b97581f55893f1b5b23015ff file: statement: resignal_segment: keyword: resignal statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/resignal_condition_name.sql000066400000000000000000000000301503426445100265170ustar00rootroot00000000000000resignal testcondition; sqlfluff-3.4.2/test/fixtures/dialects/mysql/resignal_condition_name.yml000066400000000000000000000010051503426445100265240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ddb0f0a16d3621ad997255a98f4989de04e3689165072077b221c139f0cf28bd file: statement: resignal_segment: keyword: resignal naked_identifier: testcondition statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/resignal_condition_sqlstate.sql000066400000000000000000000000331503426445100274420ustar00rootroot00000000000000resignal sqlstate '42S02'; sqlfluff-3.4.2/test/fixtures/dialects/mysql/resignal_condition_sqlstate.yml000066400000000000000000000010271503426445100274500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b9e5f8fee59dc88d1372330944c9d54873fa0887ccc3c70659e4f52157a87aff file: statement: resignal_segment: - keyword: resignal - keyword: sqlstate - quoted_literal: "'42S02'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/resignal_condition_sqlstate_value.sql000066400000000000000000000000411503426445100306350ustar00rootroot00000000000000resignal sqlstate value '42S02'; sqlfluff-3.4.2/test/fixtures/dialects/mysql/resignal_condition_sqlstate_value.yml000066400000000000000000000010541503426445100306440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7b6936c92a4757c398e5999d01c2144a086621ab6f1cb6003576f46ab6bacf3d file: statement: resignal_segment: - keyword: resignal - keyword: sqlstate - keyword: value - quoted_literal: "'42S02'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/resignal_set_signal_info.sql000066400000000000000000000000541503426445100267020ustar00rootroot00000000000000resignal set message_text = 'test message'; sqlfluff-3.4.2/test/fixtures/dialects/mysql/resignal_set_signal_info.yml000066400000000000000000000011651503426445100267100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 42689aa92182e2de491abf166e1e269b06ae2dbf027126ac7bdf35f04481ed44 file: statement: resignal_segment: - keyword: resignal - keyword: set - keyword: message_text - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test message'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/resignal_set_signal_info_multiple.sql000066400000000000000000000001041503426445100306110ustar00rootroot00000000000000resignal set message_text = 'test message', mysql_errno = '42S500'; sqlfluff-3.4.2/test/fixtures/dialects/mysql/resignal_set_signal_info_multiple.yml000066400000000000000000000014021503426445100306150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e0e7a0de0aa07cfecf1c1dd4411eef615077c791427559e4c0dfb6095cc0b53a file: statement: resignal_segment: - keyword: resignal - keyword: set - keyword: message_text - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test message'" - comma: ',' - keyword: mysql_errno - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'42S500'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_distinctrow.sql000066400000000000000000000000421503426445100255600ustar00rootroot00000000000000select distinctrow * from table1; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_distinctrow.yml000066400000000000000000000015651503426445100255750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9871a45f50a15605c76b50132724c5f346aaa8192ccb8d8808c5a46f5fd4d8b8 file: statement: select_statement: select_clause: keyword: select select_clause_modifier: keyword: distinctrow select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_for_share.sql000066400000000000000000000000401503426445100251550ustar00rootroot00000000000000SELECT 1 FROM table1 FOR SHARE; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_for_share.yml000066400000000000000000000014731503426445100251720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 02225e92a0b90b53bdd96e50dc68defd5f0a350525db6500100b390bb2403fa3 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 for_clause: - keyword: FOR - keyword: SHARE statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_for_update.sql000066400000000000000000000000411503426445100253360ustar00rootroot00000000000000SELECT 1 FROM table1 FOR UPDATE; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_for_update.yml000066400000000000000000000014741503426445100253530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c93bb773f206085006bb91a60b1174791819d70a67b1d49f89d67f985871b092 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 for_clause: - keyword: FOR - keyword: UPDATE statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_for_update_lock_in_share_mode.sql000066400000000000000000000000511503426445100312230ustar00rootroot00000000000000SELECT 1 FROM table1 LOCK IN SHARE MODE; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_for_update_lock_in_share_mode.yml000066400000000000000000000015461503426445100312370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d370a7c336d97129568cc88f70c4b8d79cce6e3d13c34fcfba66e315f4bfd27f file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 for_clause: - keyword: LOCK - keyword: IN - keyword: SHARE - keyword: MODE statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_for_update_nowait.sql000066400000000000000000000000501503426445100267170ustar00rootroot00000000000000SELECT 1 FROM table1 FOR UPDATE NOWAIT; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_for_update_nowait.yml000066400000000000000000000015241503426445100267300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d507810a93ebf607747effb7b223e1813effc969a3deba66c9bb62ea9c82be5e file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 for_clause: - keyword: FOR - keyword: UPDATE - keyword: NOWAIT statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_for_update_of.sql000066400000000000000000000000511503426445100260230ustar00rootroot00000000000000SELECT 1 FROM table1 FOR UPDATE OF test; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_for_update_of.yml000066400000000000000000000015571503426445100260410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: efebbedd435ef3a626ed5e4556668b386152d98ece84affadbd1dab651a77223 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 for_clause: - keyword: FOR - keyword: UPDATE - keyword: OF - naked_identifier: test statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_for_update_of_multiple.sql000066400000000000000000000000611503426445100277370ustar00rootroot00000000000000SELECT 1 FROM table1 FOR UPDATE OF test1, test2; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_for_update_of_multiple.yml000066400000000000000000000016431503426445100277500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 495a190960fe7a8c4ec218cd448be5b7af1c4f54cb09c25f058bc29a18356f48 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 for_clause: - keyword: FOR - keyword: UPDATE - keyword: OF - naked_identifier: test1 - comma: ',' - naked_identifier: test2 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_for_update_skip_locked.sql000066400000000000000000000000551503426445100277120ustar00rootroot00000000000000SELECT 1 FROM table1 FOR UPDATE SKIP LOCKED; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_for_update_skip_locked.yml000066400000000000000000000015521503426445100277170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 54602cc7d83eb9cbaa73bb881f60b100d97193a89f31f284943f9b8b9da2dc5a file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 for_clause: - keyword: FOR - keyword: UPDATE - keyword: SKIP - keyword: LOCKED statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_high_priority.sql000066400000000000000000000000441503426445100260710ustar00rootroot00000000000000select HIGH_PRIORITY * from table1; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_high_priority.yml000066400000000000000000000015671503426445100261060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3006519272c9151e4e617e0fbaf1af3e6f16b3e2880d360c38b8800a6cb0ca24 file: statement: select_statement: select_clause: keyword: select select_clause_modifier: keyword: HIGH_PRIORITY select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_into_dumpfile.sql000066400000000000000000000000471503426445100260520ustar00rootroot00000000000000select * into dumpfile '' from table1; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_into_dumpfile.yml000066400000000000000000000016261503426445100260600ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bcd9484202e1bc4f4c1f2d4680f63be211d5619a1db600890359fc5ecf5784ce file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' into_clause: - keyword: into - keyword: dumpfile - quoted_literal: "''" from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_into_multiple_variable.sql000066400000000000000000000001501503426445100277400ustar00rootroot00000000000000select 1, @test2, _test3, 'test4', func(test5) into @test1, @test2, _test3, @test4, @test5 from table1; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_into_multiple_variable.yml000066400000000000000000000032371503426445100277530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: baea11442609720c73b4ed747cd141b03b536e9cd40ce23820dbd2a07c910f5c file: statement: select_statement: select_clause: - keyword: select - select_clause_element: numeric_literal: '1' - comma: ',' - select_clause_element: column_reference: variable: '@test2' - comma: ',' - select_clause_element: column_reference: naked_identifier: _test3 - comma: ',' - select_clause_element: quoted_literal: "'test4'" - comma: ',' - select_clause_element: function: function_name: function_name_identifier: func function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: test5 end_bracket: ) into_clause: - keyword: into - variable: '@test1' - comma: ',' - variable: '@test2' - comma: ',' - variable: _test3 - comma: ',' - variable: '@test4' - comma: ',' - variable: '@test5' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_into_outfile.sql000066400000000000000000000000471503426445100257140ustar00rootroot00000000000000select * into outfile 'a' from table1; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_into_outfile.yml000066400000000000000000000016261503426445100257220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 505911514778724197f4931c1d669f0b50165d64f5621b820845c34104536d09 file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' into_clause: - keyword: into - keyword: outfile - quoted_literal: "'a'" from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_into_outfile_fields_enclosed.sql000066400000000000000000000000761503426445100311200ustar00rootroot00000000000000select * into outfile 'a' fields enclosed by '"' from table1; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_into_outfile_fields_enclosed.yml000066400000000000000000000017731503426445100311270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 99ffe72d2f7edba62a6d43380981f86080c389bee2cda980e8c2fb8805cbc7d1 file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' into_clause: - keyword: into - keyword: outfile - quoted_literal: "'a'" - keyword: fields - keyword: enclosed - keyword: by - quoted_literal: "'\"'" from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_into_outfile_fields_escaped.sql000066400000000000000000000000751503426445100307270ustar00rootroot00000000000000select * into outfile 'a' fields escaped by '-' from table1; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_into_outfile_fields_escaped.yml000066400000000000000000000017711503426445100307350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f7c9103219a5b85415c22c586b419fd91382c37367dfe08abb3e461ee2efa3d2 file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' into_clause: - keyword: into - keyword: outfile - quoted_literal: "'a'" - keyword: fields - keyword: escaped - keyword: by - quoted_literal: "'-'" from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_into_outfile_fields_optionally_enclosed.sql000066400000000000000000000001111503426445100333600ustar00rootroot00000000000000select * into outfile 'a' fields optionally enclosed by '"' from table1; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_into_outfile_fields_optionally_enclosed.yml000066400000000000000000000020271503426445100333720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6bd9d4b9b538b8ebebfe52323b66faed25fc48881d0183faa98539853d10503f file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' into_clause: - keyword: into - keyword: outfile - quoted_literal: "'a'" - keyword: fields - keyword: optionally - keyword: enclosed - keyword: by - quoted_literal: "'\"'" from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_into_outfile_fields_terminated.sql000066400000000000000000000001001503426445100314440ustar00rootroot00000000000000select * into outfile 'a' fields terminated by '"' from table1; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_into_outfile_fields_terminated.yml000066400000000000000000000017751503426445100314710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d5dfacac0b7fd70cddb0b5cf230f3682c824fb39d1025f11323848005c8ada5d file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' into_clause: - keyword: into - keyword: outfile - quoted_literal: "'a'" - keyword: fields - keyword: terminated - keyword: by - quoted_literal: "'\"'" from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_into_outfile_lines_starting.sql000066400000000000000000000000761503426445100310230ustar00rootroot00000000000000select * into outfile 'a' lines starting by '\n' from table1; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_into_outfile_lines_starting.yml000066400000000000000000000017731503426445100310320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a286d11eea201a33aee3a9b77876dbc4407ee4d786d5716796e77fcadb84be95 file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' into_clause: - keyword: into - keyword: outfile - quoted_literal: "'a'" - keyword: lines - keyword: starting - keyword: by - quoted_literal: "'\\n'" from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_into_outfile_lines_terminated.sql000066400000000000000000000001001503426445100313100ustar00rootroot00000000000000select * into outfile 'a' lines terminated by '\n' from table1; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_into_outfile_lines_terminated.yml000066400000000000000000000017751503426445100313350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0307c93cec46c6a3506a21fbb3f8c528694bfa2007f1d7a1ead818264363f29d file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' into_clause: - keyword: into - keyword: outfile - quoted_literal: "'a'" - keyword: lines - keyword: terminated - keyword: by - quoted_literal: "'\\n'" from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_into_session_variable.sql000066400000000000000000000001771503426445100276010ustar00rootroot00000000000000select 1 into @dumpfile from table1; SELECT name INTO @name FROM t WHERE id = 1; SELECT name FROM t WHERE id = 1 INTO @name; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_into_session_variable.yml000066400000000000000000000042541503426445100276030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3d90396fce3fe718549e0cb44f0a32d13f527711a1a3b6edddfb6f9f76d674dc file: - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' into_clause: keyword: into variable: '@dumpfile' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: name into_clause: keyword: INTO variable: '@name' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t where_clause: keyword: WHERE expression: column_reference: naked_identifier: id comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t where_clause: keyword: WHERE expression: column_reference: naked_identifier: id comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' into_clause: keyword: INTO variable: '@name' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_local_variable.sql000066400000000000000000000000161503426445100261470ustar00rootroot00000000000000select test2; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_local_variable.yml000066400000000000000000000011231503426445100261510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9af97739ef6ad17f592f5387bb4522bd1bf97e6b52ae98b460f76bf6ed3ffcdd file: statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: test2 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_lock_in_share_mode.sql000066400000000000000000000000511503426445100270130ustar00rootroot00000000000000select 1 from table1 lock in share mode; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_lock_in_share_mode.yml000066400000000000000000000015461503426445100270270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9fbe45c2085ef72cbde8c14405f345140dc661d053add571a454c18ae3e79500 file: statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 for_clause: - keyword: lock - keyword: in - keyword: share - keyword: mode statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_multiple_partition.sql000066400000000000000000000000561503426445100271400ustar00rootroot00000000000000select * from table1 PARTITION(part1, part2); sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_multiple_partition.yml000066400000000000000000000021051503426445100271370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1fc300a18d46f88268099f98290a1499b9c66fa35604ee9cd2cdf6d278b6af14 file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 partition_clause: keyword: PARTITION bracketed: - start_bracket: ( - object_reference: naked_identifier: part1 - comma: ',' - object_reference: naked_identifier: part2 - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_operators.sql000066400000000000000000000002341503426445100252300ustar00rootroot00000000000000SELECT !1; SELECT 1 && 1; SELECT 1 && 0; SELECT 1 XOR 1; SELECT 1 || 1; SELECT col_1 && 1; SELECT (col_1 = col_2) || col_3; SELECT 5 DIV 2; SELECT 5 MOD 2; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_operators.yml000066400000000000000000000057731503426445100252470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1cedf78417964649de92fb66c5035c6d7b5cf09c22b3f5b5232e2e233083c132 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: not_operator: '!' numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '&&' - numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '&&' - numeric_literal: '0' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: XOR - numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '||' - numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: column_reference: naked_identifier: col_1 binary_operator: '&&' numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: col_1 - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: col_2 end_bracket: ) binary_operator: '||' column_reference: naked_identifier: col_3 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '5' - binary_operator: DIV - numeric_literal: '2' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '5' - binary_operator: MOD - numeric_literal: '2' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_partition.sql000066400000000000000000000000471503426445100252250ustar00rootroot00000000000000select * from table1 PARTITION(part1); sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_partition.yml000066400000000000000000000017601503426445100252320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c508e3cccfffae3b003ffd16e156f90f1ebed5bf0524be2a458da7a218c202c1 file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 partition_clause: keyword: PARTITION bracketed: start_bracket: ( object_reference: naked_identifier: part1 end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_session_variable.sql000066400000000000000000000000171503426445100265410ustar00rootroot00000000000000select @test2; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_session_variable.yml000066400000000000000000000011161503426445100265440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f9c4401b3123d25feb2c718e2b77f7f7af27bef874d71b33357b354614b80daa file: statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: variable: '@test2' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_sql_big_result.sql000066400000000000000000000000451503426445100262300ustar00rootroot00000000000000select SQL_BIG_RESULT * from table1; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_sql_big_result.yml000066400000000000000000000015701503426445100262360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3f4d35b2a7b796d378d6888b3b66a816977025435ad2330584bc4e0d0192c9bf file: statement: select_statement: select_clause: keyword: select select_clause_modifier: keyword: SQL_BIG_RESULT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_sql_buffer_result.sql000066400000000000000000000000501503426445100267340ustar00rootroot00000000000000select SQL_BUFFER_RESULT * from table1; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_sql_buffer_result.yml000066400000000000000000000015731503426445100267510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bebb36790002ef31a1e293fe5c53ae91060da9a684608734fd9ae851ff032493 file: statement: select_statement: select_clause: keyword: select select_clause_modifier: keyword: SQL_BUFFER_RESULT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_sql_cache.sql000066400000000000000000000000401503426445100251270ustar00rootroot00000000000000select SQL_CACHE * from table1; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_sql_cache.yml000066400000000000000000000015631503426445100251440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1ad574866e07fcfc72d05757ea620ff73184427caa3756357a5d8b13259ea4c7 file: statement: select_statement: select_clause: keyword: select select_clause_modifier: keyword: SQL_CACHE select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_sql_calc_found_rows.sql000066400000000000000000000000521503426445100272360ustar00rootroot00000000000000select SQL_CALC_FOUND_ROWS * from table1; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_sql_calc_found_rows.yml000066400000000000000000000015751503426445100272530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f50fd122c842bf90fce3cd99e66ed64121af15acd587b423c38251d0dfcaad51 file: statement: select_statement: select_clause: keyword: select select_clause_modifier: keyword: SQL_CALC_FOUND_ROWS select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_sql_no_cache.sql000066400000000000000000000000431503426445100256260ustar00rootroot00000000000000select SQL_NO_CACHE * from table1; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_sql_no_cache.yml000066400000000000000000000015661503426445100256430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6de0523b89aa7d305d9217f843e0fdd35e1e092140f7bb1d5b4d692c6247e4c5 file: statement: select_statement: select_clause: keyword: select select_clause_modifier: keyword: SQL_NO_CACHE select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_sql_small_result.sql000066400000000000000000000000471503426445100266010ustar00rootroot00000000000000select SQL_SMALL_RESULT * from table1; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_sql_small_result.yml000066400000000000000000000015721503426445100266070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e5aec50a0e01748aaba96d940e14c71dd2f03e7d27d777d58c5392e2ed3dac7e file: statement: select_statement: select_clause: keyword: select select_clause_modifier: keyword: SQL_SMALL_RESULT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_straight_join.sql000066400000000000000000000000441503426445100260550ustar00rootroot00000000000000select STRAIGHT_JOIN * from table1; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_straight_join.yml000066400000000000000000000015671503426445100260720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6b9d5f425cf93327107ccb3168505d5544de5d89cdf9b82ce91cc9eaa06831fc file: statement: select_statement: select_clause: keyword: select select_clause_modifier: keyword: STRAIGHT_JOIN select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_with_date_part_function.sql000066400000000000000000000004631503426445100301210ustar00rootroot00000000000000SELECT EXTRACT(YEAR FROM '2019-07-02'); SELECT TIMESTAMPADD(MINUTE, 1, '2003-01-02'); SELECT TIMESTAMPDIFF(MONTH, '2003-02-01', '2003-05-01'); SELECT TIMESTAMPDIFF(HOUR, x.time_created, x.time_updated) FROM example_table AS x; SELECT TIMESTAMPADD(SECOND, 1, x.some_timestamp_field) FROM example_table AS x; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_with_date_part_function.yml000066400000000000000000000102271503426445100301220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b735d311acc9663a75452e7a80819238eb18a19f99b67cd14c8c5dfa940c13ba file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: YEAR keyword: FROM expression: quoted_literal: "'2019-07-02'" end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: TIMESTAMPADD function_contents: bracketed: - start_bracket: ( - date_part: MINUTE - comma: ',' - expression: numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'2003-01-02'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: TIMESTAMPDIFF function_contents: bracketed: - start_bracket: ( - date_part: MONTH - comma: ',' - expression: quoted_literal: "'2003-02-01'" - comma: ',' - expression: quoted_literal: "'2003-05-01'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: TIMESTAMPDIFF function_contents: bracketed: - start_bracket: ( - date_part: HOUR - comma: ',' - expression: column_reference: - naked_identifier: x - dot: . - naked_identifier: time_created - comma: ',' - expression: column_reference: - naked_identifier: x - dot: . - naked_identifier: time_updated - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: example_table alias_expression: alias_operator: keyword: AS naked_identifier: x - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: TIMESTAMPADD function_contents: bracketed: - start_bracket: ( - date_part: SECOND - comma: ',' - expression: numeric_literal: '1' - comma: ',' - expression: column_reference: - naked_identifier: x - dot: . - naked_identifier: some_timestamp_field - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: example_table alias_expression: alias_operator: keyword: AS naked_identifier: x - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_with_regexp.sql000066400000000000000000000000631503426445100255370ustar00rootroot00000000000000SELECT * FROM `db`.tbl WHERE col REGEXP '^[0-9]*$' sqlfluff-3.4.2/test/fixtures/dialects/mysql/select_with_regexp.yml000066400000000000000000000020301503426445100255350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a4ac85c17fee6271dc7e9de20ce2d8943e318d543a49d9179712bce0ff30e346 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`db`' dot: . naked_identifier: tbl where_clause: keyword: WHERE expression: column_reference: naked_identifier: col keyword: REGEXP quoted_literal: "'^[0-9]*$'" sqlfluff-3.4.2/test/fixtures/dialects/mysql/set.sql000066400000000000000000000031311503426445100224450ustar00rootroot00000000000000SET @name = 43; SET @total_tax = (SELECT SUM(tax) FROM taxable_transactions); SET counter = counter + increment; SET GLOBAL max_connections = 1000; SET @@GLOBAL.max_connections = 1000; SET SESSION sql_mode = 'TRADITIONAL'; SET LOCAL sql_mode = 'TRADITIONAL'; SET @@SESSION.sql_mode = 'TRADITIONAL'; SET @@LOCAL.sql_mode = 'TRADITIONAL'; SET @@sql_mode = 'TRADITIONAL'; SET sql_mode = 'TRADITIONAL'; SET PERSIST max_connections = 1000; SET @@PERSIST.max_connections = 1000; SET PERSIST_ONLY back_log = 100; SET @@PERSIST_ONLY.back_log = 100; SET @@SESSION.max_join_size = DEFAULT; SET @@SESSION.max_join_size = @@GLOBAL.max_join_size; SET @x = 1, SESSION sql_mode = ''; SET GLOBAL sort_buffer_size = 1000000, SESSION sort_buffer_size = 1000000; SET @@GLOBAL.sort_buffer_size = 1000000, @@LOCAL.sort_buffer_size = 1000000; SET GLOBAL max_connections = 1000, sort_buffer_size = 1000000; SET @@GLOBAL.sort_buffer_size = 50000, sort_buffer_size = 1000000; SET @abc = 1 + 2; SET @abc = (SELECT 1); SET @id = (SELECT id FROM table1 WHERE field = TRUE LIMIT 1); SET @abc = 1; SET @my_var = 1; SET @my$currency = 1; SET @sha256enabled = 1; SET some_bool_param = ON; SET some_bool_param = OFF; SET some_bool_param = TRUE; SET some_bool_param = FALSE; SET some_bool_param = 0; SET some_bool_param = 1; SET sql_log_bin = ON; SET sql_log_bin = OFF; SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0; SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0; SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION'; sqlfluff-3.4.2/test/fixtures/dialects/mysql/set.yml000066400000000000000000000301041503426445100224470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 24b9e8109ddc85b6382e1611ab4cbf65746f05148167b2548d20f4cab82dec35 file: - statement: set_statement: keyword: SET variable: '@name' comparison_operator: raw_comparison_operator: '=' numeric_literal: '43' - statement_terminator: ; - statement: set_statement: keyword: SET variable: '@total_tax' comparison_operator: raw_comparison_operator: '=' expression: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: tax end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: taxable_transactions end_bracket: ) - statement_terminator: ; - statement: set_statement: keyword: SET variable: counter comparison_operator: raw_comparison_operator: '=' expression: - column_reference: naked_identifier: counter - binary_operator: + - column_reference: naked_identifier: increment - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: GLOBAL - variable: max_connections - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1000' - statement_terminator: ; - statement: set_statement: keyword: SET system_variable: '@@GLOBAL.max_connections' comparison_operator: raw_comparison_operator: '=' numeric_literal: '1000' - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: SESSION - variable: sql_mode - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'TRADITIONAL'" - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: LOCAL - variable: sql_mode - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'TRADITIONAL'" - statement_terminator: ; - statement: set_statement: keyword: SET system_variable: '@@SESSION.sql_mode' comparison_operator: raw_comparison_operator: '=' quoted_literal: "'TRADITIONAL'" - statement_terminator: ; - statement: set_statement: keyword: SET system_variable: '@@LOCAL.sql_mode' comparison_operator: raw_comparison_operator: '=' quoted_literal: "'TRADITIONAL'" - statement_terminator: ; - statement: set_statement: keyword: SET system_variable: '@@sql_mode' comparison_operator: raw_comparison_operator: '=' quoted_literal: "'TRADITIONAL'" - statement_terminator: ; - statement: set_statement: keyword: SET variable: sql_mode comparison_operator: raw_comparison_operator: '=' quoted_literal: "'TRADITIONAL'" - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: PERSIST - variable: max_connections - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1000' - statement_terminator: ; - statement: set_statement: keyword: SET system_variable: '@@PERSIST.max_connections' comparison_operator: raw_comparison_operator: '=' numeric_literal: '1000' - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: PERSIST_ONLY - variable: back_log - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '100' - statement_terminator: ; - statement: set_statement: keyword: SET system_variable: '@@PERSIST_ONLY.back_log' comparison_operator: raw_comparison_operator: '=' numeric_literal: '100' - statement_terminator: ; - statement: set_statement: keyword: SET system_variable: '@@SESSION.max_join_size' comparison_operator: raw_comparison_operator: '=' variable: DEFAULT - statement_terminator: ; - statement: set_statement: - keyword: SET - system_variable: '@@SESSION.max_join_size' - comparison_operator: raw_comparison_operator: '=' - system_variable: '@@GLOBAL.max_join_size' - statement_terminator: ; - statement: set_statement: - keyword: SET - variable: '@x' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - comma: ',' - keyword: SESSION - variable: sql_mode - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: GLOBAL - variable: sort_buffer_size - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1000000' - comma: ',' - keyword: SESSION - variable: sort_buffer_size - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1000000' - statement_terminator: ; - statement: set_statement: - keyword: SET - system_variable: '@@GLOBAL.sort_buffer_size' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1000000' - comma: ',' - system_variable: '@@LOCAL.sort_buffer_size' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1000000' - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: GLOBAL - variable: max_connections - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1000' - comma: ',' - variable: sort_buffer_size - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1000000' - statement_terminator: ; - statement: set_statement: - keyword: SET - system_variable: '@@GLOBAL.sort_buffer_size' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '50000' - comma: ',' - variable: sort_buffer_size - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1000000' - statement_terminator: ; - statement: set_statement: keyword: SET variable: '@abc' comparison_operator: raw_comparison_operator: '=' expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - statement_terminator: ; - statement: set_statement: keyword: SET variable: '@abc' comparison_operator: raw_comparison_operator: '=' expression: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: set_statement: keyword: SET variable: '@id' comparison_operator: raw_comparison_operator: '=' expression: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 where_clause: keyword: WHERE expression: column_reference: naked_identifier: field comparison_operator: raw_comparison_operator: '=' boolean_literal: 'TRUE' limit_clause: keyword: LIMIT numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: set_statement: keyword: SET variable: '@abc' comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: set_statement: keyword: SET variable: '@my_var' comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: set_statement: keyword: SET variable: '@my$currency' comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: set_statement: keyword: SET variable: '@sha256enabled' comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: set_statement: - keyword: SET - variable: some_bool_param - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - statement_terminator: ; - statement: set_statement: - keyword: SET - variable: some_bool_param - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - statement_terminator: ; - statement: set_statement: - keyword: SET - variable: some_bool_param - comparison_operator: raw_comparison_operator: '=' - keyword: 'TRUE' - statement_terminator: ; - statement: set_statement: - keyword: SET - variable: some_bool_param - comparison_operator: raw_comparison_operator: '=' - keyword: 'FALSE' - statement_terminator: ; - statement: set_statement: keyword: SET variable: some_bool_param comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' - statement_terminator: ; - statement: set_statement: keyword: SET variable: some_bool_param comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: set_statement: - keyword: SET - variable: sql_log_bin - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - statement_terminator: ; - statement: set_statement: - keyword: SET - variable: sql_log_bin - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - statement_terminator: ; - statement: set_statement: - keyword: SET - variable: '@OLD_UNIQUE_CHECKS' - comparison_operator: raw_comparison_operator: '=' - system_variable: '@@UNIQUE_CHECKS' - comma: ',' - variable: UNIQUE_CHECKS - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '0' - statement_terminator: ; - statement: set_statement: - keyword: SET - variable: '@OLD_FOREIGN_KEY_CHECKS' - comparison_operator: raw_comparison_operator: '=' - system_variable: '@@FOREIGN_KEY_CHECKS' - comma: ',' - variable: FOREIGN_KEY_CHECKS - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '0' - statement_terminator: ; - statement: set_statement: - keyword: SET - variable: '@OLD_SQL_MODE' - comparison_operator: raw_comparison_operator: '=' - system_variable: '@@SQL_MODE' - comma: ',' - variable: SQL_MODE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/set_names.sql000066400000000000000000000002541503426445100236330ustar00rootroot00000000000000SET NAMES utf8mb4; SET NAMES 'utf8'; SET NAMES DEFAULT; SET NAMES ascii COLLATE ascii_bin; SET NAMES 'ascii' COLLATE 'ascii_bin'; SET NAMES ascii COLLATE 'ascii_bin'; sqlfluff-3.4.2/test/fixtures/dialects/mysql/set_names.yml000066400000000000000000000026621503426445100236420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2ef00c1db7596009c1fc590674dde894e304d836a5f124b3f03d818ec82ca666 file: - statement: set_names_statement: - keyword: SET - keyword: NAMES - naked_identifier: utf8mb4 - statement_terminator: ; - statement: set_names_statement: - keyword: SET - keyword: NAMES - quoted_literal: "'utf8'" - statement_terminator: ; - statement: set_names_statement: - keyword: SET - keyword: NAMES - keyword: DEFAULT - statement_terminator: ; - statement: set_names_statement: - keyword: SET - keyword: NAMES - naked_identifier: ascii - keyword: COLLATE - collation_reference: naked_identifier: ascii_bin - statement_terminator: ; - statement: set_names_statement: - keyword: SET - keyword: NAMES - quoted_literal: "'ascii'" - keyword: COLLATE - collation_reference: quoted_literal: "'ascii_bin'" - statement_terminator: ; - statement: set_names_statement: - keyword: SET - keyword: NAMES - naked_identifier: ascii - keyword: COLLATE - collation_reference: quoted_literal: "'ascii_bin'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/set_transaction.sql000066400000000000000000000011631503426445100250550ustar00rootroot00000000000000SET TRANSACTION ISOLATION LEVEL REPEATABLE READ; SET TRANSACTION ISOLATION LEVEL READ COMMITTED; SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; SET GLOBAL TRANSACTION ISOLATION LEVEL REPEATABLE READ; SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; SET TRANSACTION READ WRITE; SET GLOBAL TRANSACTION READ ONLY; SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ, READ ONLY; SET GLOBAL TRANSACTION ISOLATION LEVEL SERIALIZABLE, READ WRITE; SET SESSION TRANSACTION READ WRITE, ISOLATION LEVEL READ UNCOMMITTED; SET TRANSACTION READ ONLY, ISOLATION LEVEL SERIALIZABLE; sqlfluff-3.4.2/test/fixtures/dialects/mysql/set_transaction.yml000066400000000000000000000060321503426445100250570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2b92c42c17bfd5ec314ef478d91a9ef359dc6ff30578dc60aeef9c89a43f25e8 file: - statement: set_transaction_statement: - keyword: SET - keyword: TRANSACTION - keyword: ISOLATION - keyword: LEVEL - keyword: REPEATABLE - keyword: READ - statement_terminator: ; - statement: set_transaction_statement: - keyword: SET - keyword: TRANSACTION - keyword: ISOLATION - keyword: LEVEL - keyword: READ - keyword: COMMITTED - statement_terminator: ; - statement: set_transaction_statement: - keyword: SET - keyword: TRANSACTION - keyword: ISOLATION - keyword: LEVEL - keyword: READ - keyword: UNCOMMITTED - statement_terminator: ; - statement: set_transaction_statement: - keyword: SET - keyword: TRANSACTION - keyword: ISOLATION - keyword: LEVEL - keyword: SERIALIZABLE - statement_terminator: ; - statement: set_transaction_statement: - keyword: SET - keyword: GLOBAL - keyword: TRANSACTION - keyword: ISOLATION - keyword: LEVEL - keyword: REPEATABLE - keyword: READ - statement_terminator: ; - statement: set_transaction_statement: - keyword: SET - keyword: SESSION - keyword: TRANSACTION - keyword: ISOLATION - keyword: LEVEL - keyword: READ - keyword: COMMITTED - statement_terminator: ; - statement: set_transaction_statement: - keyword: SET - keyword: TRANSACTION - keyword: READ - keyword: WRITE - statement_terminator: ; - statement: set_transaction_statement: - keyword: SET - keyword: GLOBAL - keyword: TRANSACTION - keyword: READ - keyword: ONLY - statement_terminator: ; - statement: set_transaction_statement: - keyword: SET - keyword: SESSION - keyword: TRANSACTION - keyword: ISOLATION - keyword: LEVEL - keyword: REPEATABLE - keyword: READ - comma: ',' - keyword: READ - keyword: ONLY - statement_terminator: ; - statement: set_transaction_statement: - keyword: SET - keyword: GLOBAL - keyword: TRANSACTION - keyword: ISOLATION - keyword: LEVEL - keyword: SERIALIZABLE - comma: ',' - keyword: READ - keyword: WRITE - statement_terminator: ; - statement: set_transaction_statement: - keyword: SET - keyword: SESSION - keyword: TRANSACTION - keyword: READ - keyword: WRITE - comma: ',' - keyword: ISOLATION - keyword: LEVEL - keyword: READ - keyword: UNCOMMITTED - statement_terminator: ; - statement: set_transaction_statement: - keyword: SET - keyword: TRANSACTION - keyword: READ - keyword: ONLY - comma: ',' - keyword: ISOLATION - keyword: LEVEL - keyword: SERIALIZABLE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/signal.sql000066400000000000000000000000121503426445100231220ustar00rootroot00000000000000resignal; sqlfluff-3.4.2/test/fixtures/dialects/mysql/signal.yml000066400000000000000000000007371503426445100231420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5486a51152d09bd7aa929d6aad8035b68a7af6d3b97581f55893f1b5b23015ff file: statement: resignal_segment: keyword: resignal statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/signal_condition_name.sql000066400000000000000000000000301503426445100261700ustar00rootroot00000000000000resignal testcondition; sqlfluff-3.4.2/test/fixtures/dialects/mysql/signal_condition_name.yml000066400000000000000000000010051503426445100261750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ddb0f0a16d3621ad997255a98f4989de04e3689165072077b221c139f0cf28bd file: statement: resignal_segment: keyword: resignal naked_identifier: testcondition statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/signal_condition_sqlstate.sql000066400000000000000000000000331503426445100271130ustar00rootroot00000000000000resignal sqlstate '42S02'; sqlfluff-3.4.2/test/fixtures/dialects/mysql/signal_condition_sqlstate.yml000066400000000000000000000010271503426445100271210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b9e5f8fee59dc88d1372330944c9d54873fa0887ccc3c70659e4f52157a87aff file: statement: resignal_segment: - keyword: resignal - keyword: sqlstate - quoted_literal: "'42S02'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/signal_condition_sqlstate_value.sql000066400000000000000000000000411503426445100303060ustar00rootroot00000000000000resignal sqlstate value '42S02'; sqlfluff-3.4.2/test/fixtures/dialects/mysql/signal_condition_sqlstate_value.yml000066400000000000000000000010541503426445100303150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7b6936c92a4757c398e5999d01c2144a086621ab6f1cb6003576f46ab6bacf3d file: statement: resignal_segment: - keyword: resignal - keyword: sqlstate - keyword: value - quoted_literal: "'42S02'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/signal_set_signal_info.sql000066400000000000000000000000541503426445100263530ustar00rootroot00000000000000resignal set message_text = 'test message'; sqlfluff-3.4.2/test/fixtures/dialects/mysql/signal_set_signal_info.yml000066400000000000000000000011651503426445100263610ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 42689aa92182e2de491abf166e1e269b06ae2dbf027126ac7bdf35f04481ed44 file: statement: resignal_segment: - keyword: resignal - keyword: set - keyword: message_text - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test message'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/signal_set_signal_info_multiple.sql000066400000000000000000000001041503426445100302620ustar00rootroot00000000000000resignal set message_text = 'test message', mysql_errno = '42S500'; sqlfluff-3.4.2/test/fixtures/dialects/mysql/signal_set_signal_info_multiple.yml000066400000000000000000000014021503426445100302660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e0e7a0de0aa07cfecf1c1dd4411eef615077c791427559e4c0dfb6095cc0b53a file: statement: resignal_segment: - keyword: resignal - keyword: set - keyword: message_text - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test message'" - comma: ',' - keyword: mysql_errno - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'42S500'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/system_variables.sql000066400000000000000000000002441503426445100252300ustar00rootroot00000000000000SELECT @@global.time_zone; SELECT @@session.time_zone; SELECT @@global.version; SELECT @@session.rand_seed1; SELECT CONVERT_TZ(NOW(), @@global.time_zone, '+00:00') sqlfluff-3.4.2/test/fixtures/dialects/mysql/system_variables.yml000066400000000000000000000036621503426445100252410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c58b17a9c3d4b1d3cf78b6a2333131c7a700e969cf454690e1336c7defc2c91a file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: system_variable: '@@global.time_zone' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: system_variable: '@@session.time_zone' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: system_variable: '@@global.version' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: system_variable: '@@session.rand_seed1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CONVERT_TZ function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: NOW function_contents: bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: system_variable: '@@global.time_zone' - comma: ',' - expression: quoted_literal: "'+00:00'" - end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/mysql/update.sql000066400000000000000000000017471503426445100231470ustar00rootroot00000000000000UPDATE t1 SET col1 = col1 + 1; UPDATE t1 SET col1 = col1 + 1, col2 = col1; UPDATE items,month SET items.price=month.price WHERE items.id=month.id; UPDATE t SET id = id + 1 ORDER BY id DESC; UPDATE items SET retail = retail * 0.9 WHERE id IN (SELECT id FROM items WHERE retail / wholesale >= 1.3 AND quantity > 100); UPDATE items, (SELECT id FROM items WHERE id IN (SELECT id FROM items WHERE retail / wholesale >= 1.3 AND quantity < 100)) AS discounted SET items.retail = items.retail * 0.9 WHERE items.id = discounted.id; UPDATE items, (SELECT id, retail / wholesale AS markup, quantity FROM items) AS discounted SET items.retail = items.retail * 0.9 WHERE discounted.markup >= 1.3 AND discounted.quantity < 100 AND items.id = discounted.id; UPDATE LOW_PRIORITY foo SET bar = 7 LIMIT 4; UPDATE a, b SET a.name = b.name WHERE a.id = b.id; UPDATE a join b on a.id = b.id set a.type = b.type where a.type is null; sqlfluff-3.4.2/test/fixtures/dialects/mysql/update.yml000066400000000000000000000341771503426445100231540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5250205eaf35f3295d87fa6bdb962764284a844456ed7a11726746db6f2dd411 file: - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: t1 set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: col1 binary_operator: + numeric_literal: '1' - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: t1 set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: col1 binary_operator: + numeric_literal: '1' - comma: ',' - set_clause: - column_reference: naked_identifier: col2 - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: col1 - statement_terminator: ; - statement: update_statement: - keyword: UPDATE - table_reference: naked_identifier: items - comma: ',' - table_reference: naked_identifier: month - set_clause_list: keyword: SET set_clause: - column_reference: - naked_identifier: items - dot: . - naked_identifier: price - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: month - dot: . - naked_identifier: price - where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: items - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: month - dot: . - naked_identifier: id - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: t set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: id comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: id binary_operator: + numeric_literal: '1' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: id - keyword: DESC - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: items set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: retail comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: retail binary_operator: '*' numeric_literal: '0.9' where_clause: keyword: WHERE expression: column_reference: naked_identifier: id keyword: IN bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: items where_clause: keyword: WHERE expression: - column_reference: naked_identifier: retail - binary_operator: / - column_reference: naked_identifier: wholesale - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - numeric_literal: '1.3' - binary_operator: AND - column_reference: naked_identifier: quantity - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '100' end_bracket: ) - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: items comma: ',' from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: items where_clause: keyword: WHERE expression: column_reference: naked_identifier: id keyword: IN bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: items where_clause: keyword: WHERE expression: - column_reference: naked_identifier: retail - binary_operator: / - column_reference: naked_identifier: wholesale - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - numeric_literal: '1.3' - binary_operator: AND - column_reference: naked_identifier: quantity - comparison_operator: raw_comparison_operator: < - numeric_literal: '100' end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: discounted set_clause_list: keyword: SET set_clause: column_reference: - naked_identifier: items - dot: . - naked_identifier: retail comparison_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: items - dot: . - naked_identifier: retail binary_operator: '*' numeric_literal: '0.9' where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: items - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: discounted - dot: . - naked_identifier: id - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: items comma: ',' from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: retail - binary_operator: / - column_reference: naked_identifier: wholesale alias_expression: alias_operator: keyword: AS naked_identifier: markup - comma: ',' - select_clause_element: column_reference: naked_identifier: quantity from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: items end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: discounted set_clause_list: keyword: SET set_clause: column_reference: - naked_identifier: items - dot: . - naked_identifier: retail comparison_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: items - dot: . - naked_identifier: retail binary_operator: '*' numeric_literal: '0.9' where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: discounted - dot: . - naked_identifier: markup - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - numeric_literal: '1.3' - binary_operator: AND - column_reference: - naked_identifier: discounted - dot: . - naked_identifier: quantity - comparison_operator: raw_comparison_operator: < - numeric_literal: '100' - binary_operator: AND - column_reference: - naked_identifier: items - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: discounted - dot: . - naked_identifier: id - statement_terminator: ; - statement: update_statement: - keyword: UPDATE - keyword: LOW_PRIORITY - table_reference: naked_identifier: foo - set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: bar comparison_operator: raw_comparison_operator: '=' numeric_literal: '7' - limit_clause: keyword: LIMIT numeric_literal: '4' - statement_terminator: ; - statement: update_statement: - keyword: UPDATE - table_reference: naked_identifier: a - comma: ',' - table_reference: naked_identifier: b - set_clause_list: keyword: SET set_clause: - column_reference: - naked_identifier: a - dot: . - naked_identifier: name - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: b - dot: . - naked_identifier: name - where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: a - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: b - dot: . - naked_identifier: id - statement_terminator: ; - statement: update_statement: keyword: UPDATE from_expression: from_expression_element: table_expression: table_reference: naked_identifier: a join_clause: keyword: join from_expression_element: table_expression: table_reference: naked_identifier: b join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: a - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: b - dot: . - naked_identifier: id set_clause_list: keyword: set set_clause: - column_reference: - naked_identifier: a - dot: . - naked_identifier: type - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: b - dot: . - naked_identifier: type where_clause: keyword: where expression: column_reference: - naked_identifier: a - dot: . - naked_identifier: type keyword: is null_literal: 'null' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/use_database.sql000066400000000000000000000000131503426445100242660ustar00rootroot00000000000000use my_db; sqlfluff-3.4.2/test/fixtures/dialects/mysql/use_database.yml000066400000000000000000000010211503426445100242700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3aaf2dd630ae2dbc3dde9cead50589cf7b516507c6ed959abf06d7a0ce7ba46b file: statement: use_statement: keyword: use database_reference: naked_identifier: my_db statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/use_index.sql000066400000000000000000000000461503426445100236370ustar00rootroot00000000000000SELECT * FROM t1 test USE INDEX (i2); sqlfluff-3.4.2/test/fixtures/dialects/mysql/use_index.yml000066400000000000000000000020761503426445100236460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1cac348257a408e8fd1127849665a5ee7ed563c0a2aa93770b2fc29857958514 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 alias_expression: naked_identifier: test index_hint_clause: - keyword: USE - keyword: INDEX - bracketed: start_bracket: ( object_reference: naked_identifier: i2 end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/use_index_for_group_by.sql000066400000000000000000000000631503426445100264120ustar00rootroot00000000000000SELECT * FROM t1 test USE INDEX FOR GROUP BY (i2); sqlfluff-3.4.2/test/fixtures/dialects/mysql/use_index_for_group_by.yml000066400000000000000000000021761503426445100264230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d4a1185294cbe267fc87d4cd099f2f22f3c83e590e106f237d37715f4f284107 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 alias_expression: naked_identifier: test index_hint_clause: - keyword: USE - keyword: INDEX - keyword: FOR - keyword: GROUP - keyword: BY - bracketed: start_bracket: ( object_reference: naked_identifier: i2 end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/use_index_for_join.sql000066400000000000000000000000571503426445100255260ustar00rootroot00000000000000SELECT * FROM t1 test USE INDEX FOR JOIN (i2); sqlfluff-3.4.2/test/fixtures/dialects/mysql/use_index_for_join.yml000066400000000000000000000021511503426445100255250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9a6441958ff22d6693ae703886de81600e7bb086f08a61a24ea841a6792b7d8c file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 alias_expression: naked_identifier: test index_hint_clause: - keyword: USE - keyword: INDEX - keyword: FOR - keyword: JOIN - bracketed: start_bracket: ( object_reference: naked_identifier: i2 end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/use_index_for_order_by.sql000066400000000000000000000000631503426445100263710ustar00rootroot00000000000000SELECT * FROM t1 test USE INDEX FOR ORDER BY (i2); sqlfluff-3.4.2/test/fixtures/dialects/mysql/use_index_for_order_by.yml000066400000000000000000000021761503426445100264020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a62cf5389e97666d68013628c8769ee03126c1ef12d7f64c506d8f5ef89a464a file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 alias_expression: naked_identifier: test index_hint_clause: - keyword: USE - keyword: INDEX - keyword: FOR - keyword: ORDER - keyword: BY - bracketed: start_bracket: ( object_reference: naked_identifier: i2 end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/use_key.sql000066400000000000000000000000441503426445100233160ustar00rootroot00000000000000SELECT * FROM t1 test USE KEY (i2); sqlfluff-3.4.2/test/fixtures/dialects/mysql/use_key.yml000066400000000000000000000020741503426445100233250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0506aa4eb10450054ebb9990ae570d8cdb46ae6b2e7292aa2dd0e96f0ec7cd29 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 alias_expression: naked_identifier: test index_hint_clause: - keyword: USE - keyword: KEY - bracketed: start_bracket: ( object_reference: naked_identifier: i2 end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/use_statement.sql000066400000000000000000000000101503426445100245230ustar00rootroot00000000000000USE db; sqlfluff-3.4.2/test/fixtures/dialects/mysql/use_statement.yml000066400000000000000000000010161503426445100245340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1247f4ff8e352edcb3de7913a509af396d6c2bdcdfad9481ebf698f8b319e7a4 file: statement: use_statement: keyword: USE database_reference: naked_identifier: db statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/values_statement.sql000066400000000000000000000002141503426445100252340ustar00rootroot00000000000000VALUES ROW ('a', 1), ROW ('b', 2); VALUES ROW ('a', 1), ROW (upper('b'), 2+1); VALUES ROW (CURRENT_DATE, '2020-06-04' + interval -5 day); sqlfluff-3.4.2/test/fixtures/dialects/mysql/values_statement.yml000066400000000000000000000041301503426445100252370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 51d7d52e8904436e491ebc07912e37a2a3803e329c3f1df3f9c3e2ba9209651a file: - statement: values_clause: - keyword: VALUES - keyword: ROW - bracketed: start_bracket: ( quoted_literal: "'a'" comma: ',' numeric_literal: '1' end_bracket: ) - comma: ',' - keyword: ROW - bracketed: start_bracket: ( quoted_literal: "'b'" comma: ',' numeric_literal: '2' end_bracket: ) - statement_terminator: ; - statement: values_clause: - keyword: VALUES - keyword: ROW - bracketed: start_bracket: ( quoted_literal: "'a'" comma: ',' numeric_literal: '1' end_bracket: ) - comma: ',' - keyword: ROW - bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: upper function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'b'" end_bracket: ) - comma: ',' - expression: - numeric_literal: '2' - binary_operator: + - numeric_literal: '1' - end_bracket: ) - statement_terminator: ; - statement: values_clause: - keyword: VALUES - keyword: ROW - bracketed: - start_bracket: ( - expression: bare_function: CURRENT_DATE - comma: ',' - expression: quoted_literal: "'2020-06-04'" binary_operator: + interval_expression: keyword: interval expression: numeric_literal: sign_indicator: '-' numeric_literal: '5' date_part: day - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/variable_assignment.sql000066400000000000000000000001611503426445100256670ustar00rootroot00000000000000SELECT @var1:=COUNT(*) FROM t1; SET @var1:=0; SET @var1:=@var2:=0; UPDATE t1 SET c1 = 2 WHERE c1 = @var1:= 1; sqlfluff-3.4.2/test/fixtures/dialects/mysql/variable_assignment.yml000066400000000000000000000041071503426445100256750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 880ce0a8a6ed5735d5b6287673a5379103f16782534cac1816dccb8a05c8e30b file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: variable: '@var1' assignment_operator: := function: function_name: function_name_identifier: COUNT function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: set_statement: keyword: SET variable: '@var1' assignment_operator: := numeric_literal: '0' - statement_terminator: ; - statement: set_statement: keyword: SET variable: '@var1' assignment_operator: := expression: variable: '@var2' assignment_operator: := numeric_literal: '0' - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: t1 set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: c1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' where_clause: keyword: WHERE expression: column_reference: naked_identifier: c1 comparison_operator: raw_comparison_operator: '=' variable: '@var1' assignment_operator: := numeric_literal: '1' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/while_label.sql000066400000000000000000000001161503426445100241210ustar00rootroot00000000000000iteration:while _cnt <= _max_cnt do set _cnt = _cnt + 1; end while iteration; sqlfluff-3.4.2/test/fixtures/dialects/mysql/while_label.yml000066400000000000000000000023241503426445100241260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: eb5d12b359790327f36cb23a2ea83afc299ff4e2662c3277729a7cfd4bc91795 file: - statement: while_statement: - naked_identifier: iteration - colon: ':' - keyword: while - expression: - column_reference: naked_identifier: _cnt - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - column_reference: naked_identifier: _max_cnt - keyword: do - statement: set_statement: keyword: set variable: _cnt comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: _cnt binary_operator: + numeric_literal: '1' - statement_terminator: ; - statement: while_statement: - keyword: end - keyword: while - naked_identifier: iteration - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/mysql/while_no_label.sql000066400000000000000000000000721503426445100246160ustar00rootroot00000000000000while _cnt <= _max_cnt do set _cnt = _cnt + 1; end while; sqlfluff-3.4.2/test/fixtures/dialects/mysql/while_no_label.yml000066400000000000000000000021771503426445100246300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8a5b1995fc531a7204a5cefdb23104e3ac19d6cc33e85f9b7036d80e950a8887 file: - statement: while_statement: - keyword: while - expression: - column_reference: naked_identifier: _cnt - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - column_reference: naked_identifier: _max_cnt - keyword: do - statement: set_statement: keyword: set variable: _cnt comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: _cnt binary_operator: + numeric_literal: '1' - statement_terminator: ; - statement: while_statement: - keyword: end - keyword: while - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/oracle/000077500000000000000000000000001503426445100212335ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/dialects/oracle/.sqlfluff000066400000000000000000000000341503426445100230530ustar00rootroot00000000000000[sqlfluff] dialect = oracle sqlfluff-3.4.2/test/fixtures/dialects/oracle/alter_function.sql000066400000000000000000000001251503426445100247660ustar00rootroot00000000000000ALTER FUNCTION oe.get_bal COMPILE; ALTER PROCEDURE IF EXISTS hr.remove_emp COMPILE; sqlfluff-3.4.2/test/fixtures/dialects/oracle/alter_function.yml000066400000000000000000000016361503426445100250000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 35d74f03abcec25009bdcbeddff50fa383676308f2d496ad49df2e789c456b4b file: - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: naked_identifier: oe dot: . function_name_identifier: get_bal - keyword: COMPILE - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: PROCEDURE - keyword: IF - keyword: EXISTS - function_name: naked_identifier: hr dot: . function_name_identifier: remove_emp - keyword: COMPILE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/oracle/alter_package.sql000066400000000000000000000001211503426445100245300ustar00rootroot00000000000000ALTER PACKAGE emp_mgmt COMPILE PACKAGE; ALTER PACKAGE hr.emp_mgmt COMPILE BODY; sqlfluff-3.4.2/test/fixtures/dialects/oracle/alter_package.yml000066400000000000000000000015471503426445100245470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7b93f12aba96dc9e5ca0ee6f9da3100f3a126e4ae0f7e88d8e3d8d7fb410483a file: - statement: alter_package_statement: - keyword: ALTER - keyword: PACKAGE - package_reference: naked_identifier: emp_mgmt - keyword: COMPILE - keyword: PACKAGE - statement_terminator: ; - statement: alter_package_statement: - keyword: ALTER - keyword: PACKAGE - package_reference: - naked_identifier: hr - dot: . - naked_identifier: emp_mgmt - keyword: COMPILE - keyword: BODY - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/oracle/alter_table.sql000066400000000000000000000016111503426445100242310ustar00rootroot00000000000000-- AlterTableColumnClausesSegment ALTER TABLE table_name RENAME COLUMN old_column_name TO new_column_name; -- add_column_clause ALTER TABLE table_name ADD (column_name NUMBER(18)); -- modify_column_clauses ALTER TABLE table_name MODIFY column_name NUMBER(18); -- drop_column_clause ALTER TABLE table_name DROP COLUMN column_name; ALTER TABLE table_name DROP (column_name_one, column_name_two); -- AlterTableConstraintClauses ALTER TABLE table_name ADD CONSTRAINT constraint_name FOREIGN KEY (column_name) REFERENCES other_table_name (other_column_name); -- drop_constraint_clause ALTER TABLE table_name DROP CONSTRAINT constraint_name; ALTER TABLE table_name MODIFY (column_name NOT NULL ENABLE); ALTER TABLE table_name MODIFY (column_name DEFAULT 10); ALTER TABLE table_name MODIFY (column_name DEFAULT 10 NOT NULL ENABLE); ALTER TABLE employees ADD CONSTRAINT salary_check CHECK (salary > 0); sqlfluff-3.4.2/test/fixtures/dialects/oracle/alter_table.yml000066400000000000000000000136721503426445100242450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9eec6cfe5e73ecdcee8624fae8b6172f082deb550dcb83cab053fe1e6500bf6e file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - alter_table_column_clauses: - keyword: RENAME - keyword: COLUMN - column_reference: naked_identifier: old_column_name - keyword: TO - column_reference: naked_identifier: new_column_name - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - alter_table_column_clauses: keyword: ADD bracketed: start_bracket: ( column_definition: naked_identifier: column_name data_type: data_type_identifier: NUMBER bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '18' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - alter_table_column_clauses: keyword: MODIFY column_definition: naked_identifier: column_name data_type: data_type_identifier: NUMBER bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '18' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - alter_table_column_clauses: - keyword: DROP - keyword: COLUMN - column_reference: naked_identifier: column_name - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - alter_table_column_clauses: keyword: DROP bracketed: - start_bracket: ( - column_reference: naked_identifier: column_name_one - comma: ',' - column_reference: naked_identifier: column_name_two - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - alter_table_constraint_clauses: keyword: ADD table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: constraint_name - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: column_name end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: other_table_name - bracketed: start_bracket: ( column_reference: naked_identifier: other_column_name end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - alter_table_constraint_clauses: - keyword: DROP - keyword: CONSTRAINT - object_reference: naked_identifier: constraint_name - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - alter_table_column_clauses: keyword: MODIFY bracketed: start_bracket: ( column_definition: naked_identifier: column_name column_constraint_segment: - keyword: NOT - keyword: 'NULL' keyword: ENABLE end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - alter_table_column_clauses: keyword: MODIFY bracketed: start_bracket: ( column_definition: naked_identifier: column_name column_constraint_segment: keyword: DEFAULT numeric_literal: '10' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - alter_table_column_clauses: keyword: MODIFY bracketed: start_bracket: ( column_definition: - naked_identifier: column_name - column_constraint_segment: keyword: DEFAULT numeric_literal: '10' - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - keyword: ENABLE end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: employees - alter_table_constraint_clauses: keyword: ADD table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: salary_check - keyword: CHECK - bracketed: start_bracket: ( expression: column_reference: naked_identifier: salary comparison_operator: raw_comparison_operator: '>' numeric_literal: '0' end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/oracle/alter_trigger.sql000066400000000000000000000001241503426445100246030ustar00rootroot00000000000000ALTER TRIGGER update_job_history DISABLE; ALTER TRIGGER update_job_history ENABLE; sqlfluff-3.4.2/test/fixtures/dialects/oracle/alter_trigger.yml000066400000000000000000000014531503426445100246130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: addf6d049dbc44b7783a567ed0e5debfff0bd5d500946a152fc8d4f331dad0b5 file: - statement: alter_trigger_statement: - keyword: ALTER - keyword: TRIGGER - function_name: function_name_identifier: update_job_history - keyword: DISABLE - statement_terminator: ; - statement: alter_trigger_statement: - keyword: ALTER - keyword: TRIGGER - function_name: function_name_identifier: update_job_history - keyword: ENABLE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/oracle/assignment.sql000066400000000000000000000025531503426445100241310ustar00rootroot00000000000000DECLARE -- You can assign initial values here wages NUMBER; hours_worked NUMBER := 40; hourly_salary NUMBER := 22.50; bonus NUMBER := 150; country VARCHAR2(128); counter NUMBER := 0; done BOOLEAN; valid_id BOOLEAN; emp_rec1 employees%ROWTYPE; emp_rec2 employees%ROWTYPE; TYPE commissions IS TABLE OF NUMBER INDEX BY PLS_INTEGER; comm_tab commissions; BEGIN -- You can assign values here too wages := (hours_worked * hourly_salary) + bonus; country := 'France'; country := UPPER('Canada'); done := (counter > 100); valid_id := TRUE; emp_rec1.first_name := 'Antonio'; emp_rec1.last_name := 'Ortiz'; emp_rec1 := emp_rec2; comm_tab(5) := 20000 * 0.15; END; / DECLARE done BOOLEAN; -- Initial value is NULL by default counter NUMBER := 0; BEGIN done := FALSE; -- Assign literal value WHILE done != TRUE -- Compare to literal value LOOP counter := counter + 1; done := (counter > 500); -- Assign value of BOOLEAN expression END LOOP; END; / DECLARE TYPE triplet IS VARRAY(3) OF VARCHAR2(15); TYPE trio IS VARRAY(3) OF VARCHAR2(15); group1 triplet := triplet('Jones', 'Wong', 'Marceau'); group2 triplet; group3 trio; BEGIN group2 := group1; -- succeeds group3 := group1; -- fails END; / sqlfluff-3.4.2/test/fixtures/dialects/oracle/assignment.yml000066400000000000000000000307521503426445100241350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 52f1762266efefdd2dd4ba4dbcbaaee3fd114f7918096ef4167a5bfd5b77639a file: - statement: begin_end_block: - declare_segment: - keyword: DECLARE - naked_identifier: wages - data_type: data_type_identifier: NUMBER - statement_terminator: ; - naked_identifier: hours_worked - data_type: data_type_identifier: NUMBER - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: numeric_literal: '40' - statement_terminator: ; - naked_identifier: hourly_salary - data_type: data_type_identifier: NUMBER - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: numeric_literal: '22.50' - statement_terminator: ; - naked_identifier: bonus - data_type: data_type_identifier: NUMBER - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: numeric_literal: '150' - statement_terminator: ; - naked_identifier: country - data_type: data_type_identifier: VARCHAR2 bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '128' end_bracket: ) - statement_terminator: ; - naked_identifier: counter - data_type: data_type_identifier: NUMBER - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: numeric_literal: '0' - statement_terminator: ; - naked_identifier: done - data_type: data_type_identifier: BOOLEAN - statement_terminator: ; - naked_identifier: valid_id - data_type: data_type_identifier: BOOLEAN - statement_terminator: ; - naked_identifier: emp_rec1 - row_type_reference: table_reference: naked_identifier: employees binary_operator: '%' keyword: ROWTYPE - statement_terminator: ; - naked_identifier: emp_rec2 - row_type_reference: table_reference: naked_identifier: employees binary_operator: '%' keyword: ROWTYPE - statement_terminator: ; - collection_type: - keyword: TYPE - naked_identifier: commissions - keyword: IS - keyword: TABLE - keyword: OF - data_type: data_type_identifier: NUMBER - keyword: INDEX - keyword: BY - data_type: data_type_identifier: PLS_INTEGER - statement_terminator: ; - naked_identifier: comm_tab - data_type: data_type_identifier: commissions - statement_terminator: ; - keyword: BEGIN - statement: assignment_segment_statement: object_reference: naked_identifier: wages colon: ':' comparison_operator: raw_comparison_operator: '=' expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: hours_worked - binary_operator: '*' - column_reference: naked_identifier: hourly_salary end_bracket: ) binary_operator: + column_reference: naked_identifier: bonus - statement_terminator: ; - statement: assignment_segment_statement: object_reference: naked_identifier: country colon: ':' comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'France'" - statement_terminator: ; - statement: assignment_segment_statement: object_reference: naked_identifier: country colon: ':' comparison_operator: raw_comparison_operator: '=' expression: function: function_name: function_name_identifier: UPPER function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Canada'" end_bracket: ) - statement_terminator: ; - statement: assignment_segment_statement: object_reference: naked_identifier: done colon: ':' comparison_operator: raw_comparison_operator: '=' expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: counter comparison_operator: raw_comparison_operator: '>' numeric_literal: '100' end_bracket: ) - statement_terminator: ; - statement: assignment_segment_statement: object_reference: naked_identifier: valid_id colon: ':' comparison_operator: raw_comparison_operator: '=' expression: boolean_literal: 'TRUE' - statement_terminator: ; - statement: assignment_segment_statement: object_reference: - naked_identifier: emp_rec1 - dot: . - naked_identifier: first_name colon: ':' comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'Antonio'" - statement_terminator: ; - statement: assignment_segment_statement: object_reference: - naked_identifier: emp_rec1 - dot: . - naked_identifier: last_name colon: ':' comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'Ortiz'" - statement_terminator: ; - statement: assignment_segment_statement: object_reference: naked_identifier: emp_rec1 colon: ':' comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: emp_rec2 - statement_terminator: ; - statement: function: function_name: function_name_identifier: comm_tab function_contents: bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) - statement: assignment_segment_statement: colon: ':' comparison_operator: raw_comparison_operator: '=' expression: - numeric_literal: '20000' - binary_operator: '*' - numeric_literal: '0.15' - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: - keyword: DECLARE - naked_identifier: done - data_type: data_type_identifier: BOOLEAN - statement_terminator: ; - naked_identifier: counter - data_type: data_type_identifier: NUMBER - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: numeric_literal: '0' - statement_terminator: ; - keyword: BEGIN - statement: assignment_segment_statement: object_reference: naked_identifier: done colon: ':' comparison_operator: raw_comparison_operator: '=' expression: boolean_literal: 'FALSE' - statement_terminator: ; - statement: while_loop_statement: keyword: WHILE expression: column_reference: naked_identifier: done comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '=' boolean_literal: 'TRUE' loop_statement: - keyword: LOOP - statement: assignment_segment_statement: object_reference: naked_identifier: counter colon: ':' comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: counter binary_operator: + numeric_literal: '1' - statement_terminator: ; - statement: assignment_segment_statement: object_reference: naked_identifier: done colon: ':' comparison_operator: raw_comparison_operator: '=' expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: counter comparison_operator: raw_comparison_operator: '>' numeric_literal: '500' end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: - keyword: DECLARE - collection_type: - keyword: TYPE - naked_identifier: triplet - keyword: IS - data_type: data_type_identifier: VARRAY bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '3' end_bracket: ) - keyword: OF - data_type: data_type_identifier: VARCHAR2 bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '15' end_bracket: ) - statement_terminator: ; - collection_type: - keyword: TYPE - naked_identifier: trio - keyword: IS - data_type: data_type_identifier: VARRAY bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '3' end_bracket: ) - keyword: OF - data_type: data_type_identifier: VARCHAR2 bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '15' end_bracket: ) - statement_terminator: ; - naked_identifier: group1 - data_type: data_type_identifier: triplet - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: function: function_name: function_name_identifier: triplet function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'Jones'" - comma: ',' - expression: quoted_literal: "'Wong'" - comma: ',' - expression: quoted_literal: "'Marceau'" - end_bracket: ) - statement_terminator: ; - naked_identifier: group2 - data_type: data_type_identifier: triplet - statement_terminator: ; - naked_identifier: group3 - data_type: data_type_identifier: trio - statement_terminator: ; - keyword: BEGIN - statement: assignment_segment_statement: object_reference: naked_identifier: group2 colon: ':' comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: group1 - statement_terminator: ; - statement: assignment_segment_statement: object_reference: naked_identifier: group3 colon: ':' comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: group1 - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / sqlfluff-3.4.2/test/fixtures/dialects/oracle/at_signs.sql000066400000000000000000000002541503426445100235640ustar00rootroot00000000000000@@some_other_sql_file.sql @@some_other_sql_file_with_args.sql foo bar baz @some_other_sql_file.sql @some_other_sql_file_with_args.sql foo bar baz SELECT * from some_table; sqlfluff-3.4.2/test/fixtures/dialects/oracle/at_signs.yml000066400000000000000000000027641503426445100235760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d687c62efbf650d4457868fae33d95c534fc475817ebf2075348fd2b89f86bcf file: - execute_file_statement: - at_sign: '@' - at_sign: '@' - naked_identifier: some_other_sql_file - dot: . - naked_identifier: sql - execute_file_statement: - at_sign: '@' - at_sign: '@' - naked_identifier: some_other_sql_file_with_args - dot: . - naked_identifier: sql - naked_identifier: foo - naked_identifier: bar - naked_identifier: baz - execute_file_statement: - at_sign: '@' - naked_identifier: some_other_sql_file - dot: . - naked_identifier: sql - execute_file_statement: - at_sign: '@' - naked_identifier: some_other_sql_file_with_args - dot: . - naked_identifier: sql - naked_identifier: foo - naked_identifier: bar - naked_identifier: baz - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: some_table - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/oracle/bare_functions.sql000066400000000000000000000003141503426445100247530ustar00rootroot00000000000000SELECT a.foo, b.bar, current_date, current_timestamp, dbtimezone, localtimestamp, sessiontimestamp, sysdate, systimestamp FROM first_table a INNER JOIN second_table b ON a.baz = b.baz ; sqlfluff-3.4.2/test/fixtures/dialects/oracle/bare_functions.yml000066400000000000000000000046051503426445100247640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8e263a00681db97d124e7a639374ee40f65642e13597c704ac5a150b1e308206 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: a - dot: . - naked_identifier: foo - comma: ',' - select_clause_element: column_reference: - naked_identifier: b - dot: . - naked_identifier: bar - comma: ',' - select_clause_element: bare_function: current_date - comma: ',' - select_clause_element: bare_function: current_timestamp - comma: ',' - select_clause_element: bare_function: dbtimezone - comma: ',' - select_clause_element: bare_function: localtimestamp - comma: ',' - select_clause_element: bare_function: sessiontimestamp - comma: ',' - select_clause_element: bare_function: sysdate - comma: ',' - select_clause_element: bare_function: systimestamp from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: first_table alias_expression: naked_identifier: a join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: second_table alias_expression: naked_identifier: b - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: a - dot: . - naked_identifier: baz - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: b - dot: . - naked_identifier: baz statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/oracle/bind_variables.sql000066400000000000000000000002171503426445100247200ustar00rootroot00000000000000select :abc from dual; insert into mytab values(:abc,:xyz); select column_name from table_name where column_name = :column_name_filter; sqlfluff-3.4.2/test/fixtures/dialects/oracle/bind_variables.yml000066400000000000000000000036651503426445100247340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: dd712e00e905964fff249bd96878cc3008d776d7a6192bbec01a12b353109633 file: - statement: select_statement: select_clause: keyword: select select_clause_element: sqlplus_variable: colon: ':' parameter: abc from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual - statement_terminator: ; - statement: insert_statement: - keyword: insert - keyword: into - table_reference: naked_identifier: mytab - values_clause: keyword: values bracketed: - start_bracket: ( - sqlplus_variable: colon: ':' parameter: abc - comma: ',' - sqlplus_variable: colon: ':' parameter: xyz - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: column_name from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name where_clause: keyword: where expression: column_reference: naked_identifier: column_name comparison_operator: raw_comparison_operator: '=' sqlplus_variable: colon: ':' parameter: column_name_filter - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/oracle/collections.sql000066400000000000000000000056571503426445100243070ustar00rootroot00000000000000DECLARE -- Associative array indexed by string: TYPE population IS TABLE OF NUMBER -- Associative array type INDEX BY VARCHAR2(64); -- indexed by string city_population population; -- Associative array variable i VARCHAR2(64); -- Scalar variable BEGIN -- Add elements (key-value pairs) to associative array: city_population('Smallville') := 2000; city_population('Midland') := 750000; city_population('Megalopolis') := 1000000; -- Change value associated with key 'Smallville': city_population('Smallville') := 2001; -- Print associative array: i := city_population.FIRST; -- Get first element of array WHILE i IS NOT NULL LOOP DBMS_Output.PUT_LINE ('Population of ' || i || ' is ' || city_population(i)); i := city_population.NEXT(i); -- Get next element of array END LOOP; END; / DECLARE TYPE sum_multiples IS TABLE OF PLS_INTEGER INDEX BY PLS_INTEGER; n PLS_INTEGER := 5; -- number of multiples to sum for display sn PLS_INTEGER := 10; -- number of multiples to sum m PLS_INTEGER := 3; -- multiple FUNCTION get_sum_multiples ( multiple IN PLS_INTEGER, num IN PLS_INTEGER ) RETURN sum_multiples IS s sum_multiples; BEGIN FOR i IN 1..num LOOP s(i) := multiple * ((i * (i + 1)) / 2); -- sum of multiples END LOOP; RETURN s; END get_sum_multiples; BEGIN DBMS_OUTPUT.PUT_LINE ( 'Sum of the first ' || TO_CHAR(n) || ' multiples of ' || TO_CHAR(m) || ' is ' || TO_CHAR(get_sum_multiples (m, sn)(n)) ); END; / DECLARE TYPE Foursome IS VARRAY(4) OF VARCHAR2(15); -- VARRAY type -- varray variable initialized with constructor: team Foursome := Foursome('John', 'Mary', 'Alberto', 'Juanita'); PROCEDURE print_team (heading VARCHAR2) IS BEGIN DBMS_OUTPUT.PUT_LINE(heading); FOR i IN 1..4 LOOP DBMS_OUTPUT.PUT_LINE(i || '.' || team(i)); END LOOP; DBMS_OUTPUT.PUT_LINE('---'); END; BEGIN print_team('2001 Team:'); team(3) := 'Pierre'; -- Change values of two elements team(4) := 'Yvonne'; print_team('2005 Team:'); -- Invoke constructor to assign new values to varray variable: team := Foursome('Arun', 'Amitha', 'Allan', 'Mae'); print_team('2009 Team:'); END; / DECLARE TYPE Roster IS TABLE OF VARCHAR2(15); -- nested table type -- nested table variable initialized with constructor: names Roster := Roster('D Caruso', 'J Hamil', 'D Piro', 'R Singh'); PROCEDURE print_names (heading VARCHAR2) IS BEGIN DBMS_OUTPUT.PUT_LINE(heading); FOR i IN names.FIRST .. names.LAST LOOP -- For first to last element DBMS_OUTPUT.PUT_LINE(names(i)); END LOOP; DBMS_OUTPUT.PUT_LINE('---'); END; BEGIN print_names('Initial Values:'); names(3) := 'P Perez'; -- Change value of one element print_names('Current Values:'); names := Roster('A Jansen', 'B Gupta'); -- Change entire table print_names('Current Values:'); END; / sqlfluff-3.4.2/test/fixtures/dialects/oracle/collections.yml000066400000000000000000000675711503426445100243140ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 83f3a149fbf3a452468d9d7a0835e12ba4cabb0b3fab78fbf4ca5cc5b9b9acca file: - statement: begin_end_block: - declare_segment: - keyword: DECLARE - collection_type: - keyword: TYPE - naked_identifier: population - keyword: IS - keyword: TABLE - keyword: OF - data_type: data_type_identifier: NUMBER - keyword: INDEX - keyword: BY - data_type: data_type_identifier: VARCHAR2 bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '64' end_bracket: ) - statement_terminator: ; - naked_identifier: city_population - data_type: data_type_identifier: population - statement_terminator: ; - naked_identifier: i - data_type: data_type_identifier: VARCHAR2 bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '64' end_bracket: ) - statement_terminator: ; - keyword: BEGIN - statement: function: function_name: function_name_identifier: city_population function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Smallville'" end_bracket: ) - statement: assignment_segment_statement: colon: ':' comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '2000' - statement_terminator: ; - statement: function: function_name: function_name_identifier: city_population function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Midland'" end_bracket: ) - statement: assignment_segment_statement: colon: ':' comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '750000' - statement_terminator: ; - statement: function: function_name: function_name_identifier: city_population function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Megalopolis'" end_bracket: ) - statement: assignment_segment_statement: colon: ':' comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '1000000' - statement_terminator: ; - statement: function: function_name: function_name_identifier: city_population function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Smallville'" end_bracket: ) - statement: assignment_segment_statement: colon: ':' comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '2001' - statement_terminator: ; - statement: assignment_segment_statement: object_reference: naked_identifier: i colon: ':' comparison_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: city_population - dot: . - naked_identifier: FIRST - statement_terminator: ; - statement: while_loop_statement: keyword: WHILE expression: - column_reference: naked_identifier: i - keyword: IS - keyword: NOT - null_literal: 'NULL' loop_statement: - keyword: LOOP - statement: function: function_name: naked_identifier: DBMS_Output dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - quoted_literal: "'Population of '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: naked_identifier: i - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "' is '" - binary_operator: - pipe: '|' - pipe: '|' - function: function_name: function_name_identifier: city_population function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: i end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: assignment_segment_statement: object_reference: naked_identifier: i colon: ':' comparison_operator: raw_comparison_operator: '=' expression: function: function_name: naked_identifier: city_population dot: . function_name_identifier: NEXT function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: i end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: - keyword: DECLARE - collection_type: - keyword: TYPE - naked_identifier: sum_multiples - keyword: IS - keyword: TABLE - keyword: OF - data_type: data_type_identifier: PLS_INTEGER - keyword: INDEX - keyword: BY - data_type: data_type_identifier: PLS_INTEGER - statement_terminator: ; - naked_identifier: n - data_type: data_type_identifier: PLS_INTEGER - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: numeric_literal: '5' - statement_terminator: ; - naked_identifier: sn - data_type: data_type_identifier: PLS_INTEGER - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: numeric_literal: '10' - statement_terminator: ; - naked_identifier: m - data_type: data_type_identifier: PLS_INTEGER - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: numeric_literal: '3' - statement_terminator: ; - create_function_statement: - keyword: FUNCTION - function_name: function_name_identifier: get_sum_multiples - function_parameter_list: bracketed: - start_bracket: ( - parameter: multiple - keyword: IN - data_type: data_type_identifier: PLS_INTEGER - comma: ',' - parameter: num - keyword: IN - data_type: data_type_identifier: PLS_INTEGER - end_bracket: ) - keyword: RETURN - data_type: data_type_identifier: sum_multiples - keyword: IS - declare_segment: naked_identifier: s data_type: data_type_identifier: sum_multiples statement_terminator: ; - begin_end_block: - keyword: BEGIN - statement: for_loop_statement: - keyword: FOR - naked_identifier: i - keyword: IN - numeric_literal: '1' - dot: . - dot: . - naked_identifier: num - loop_statement: - keyword: LOOP - statement: assignment_segment_statement: object_reference: naked_identifier: s bracketed: start_bracket: ( object_reference: naked_identifier: i end_bracket: ) colon: ':' comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: multiple binary_operator: '*' bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: i binary_operator: '*' bracketed: start_bracket: ( expression: column_reference: naked_identifier: i binary_operator: + numeric_literal: '1' end_bracket: ) end_bracket: ) binary_operator: / numeric_literal: '2' end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - statement: return_statement: keyword: RETURN expression: column_reference: naked_identifier: s - statement_terminator: ; - keyword: END - object_reference: naked_identifier: get_sum_multiples - statement_terminator: ; - keyword: BEGIN - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - quoted_literal: "'Sum of the first '" - binary_operator: - pipe: '|' - pipe: '|' - function: function_name: function_name_identifier: TO_CHAR function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: n end_bracket: ) - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "' multiples of '" - binary_operator: - pipe: '|' - pipe: '|' - function: function_name: function_name_identifier: TO_CHAR function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: m end_bracket: ) - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "' is '" - binary_operator: - pipe: '|' - pipe: '|' - function: function_name: function_name_identifier: TO_CHAR function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: get_sum_multiples function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: m - comma: ',' - expression: column_reference: naked_identifier: sn - end_bracket: ) - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: n end_bracket: ) - end_bracket: ) end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: - keyword: DECLARE - collection_type: - keyword: TYPE - naked_identifier: Foursome - keyword: IS - data_type: data_type_identifier: VARRAY bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '4' end_bracket: ) - keyword: OF - data_type: data_type_identifier: VARCHAR2 bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '15' end_bracket: ) - statement_terminator: ; - naked_identifier: team - data_type: data_type_identifier: Foursome - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: function: function_name: function_name_identifier: Foursome function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'John'" - comma: ',' - expression: quoted_literal: "'Mary'" - comma: ',' - expression: quoted_literal: "'Alberto'" - comma: ',' - expression: quoted_literal: "'Juanita'" - end_bracket: ) - statement_terminator: ; - create_procedure_statement: - keyword: PROCEDURE - function_name: function_name_identifier: print_team - function_parameter_list: bracketed: start_bracket: ( parameter: heading data_type: data_type_identifier: VARCHAR2 end_bracket: ) - keyword: IS - begin_end_block: - keyword: BEGIN - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: heading end_bracket: ) - statement_terminator: ; - statement: for_loop_statement: - keyword: FOR - naked_identifier: i - keyword: IN - numeric_literal: '1' - dot: . - dot: . - numeric_literal: '4' - loop_statement: - keyword: LOOP - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: i - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "'.'" - binary_operator: - pipe: '|' - pipe: '|' - function: function_name: function_name_identifier: team function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: i end_bracket: ) end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'---'" end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - keyword: BEGIN - statement: function: function_name: function_name_identifier: print_team function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'2001 Team:'" end_bracket: ) - statement_terminator: ; - statement: function: function_name: function_name_identifier: team function_contents: bracketed: start_bracket: ( expression: numeric_literal: '3' end_bracket: ) - statement: assignment_segment_statement: colon: ':' comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'Pierre'" - statement_terminator: ; - statement: function: function_name: function_name_identifier: team function_contents: bracketed: start_bracket: ( expression: numeric_literal: '4' end_bracket: ) - statement: assignment_segment_statement: colon: ':' comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'Yvonne'" - statement_terminator: ; - statement: function: function_name: function_name_identifier: print_team function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'2005 Team:'" end_bracket: ) - statement_terminator: ; - statement: assignment_segment_statement: object_reference: naked_identifier: team colon: ':' comparison_operator: raw_comparison_operator: '=' expression: function: function_name: function_name_identifier: Foursome function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'Arun'" - comma: ',' - expression: quoted_literal: "'Amitha'" - comma: ',' - expression: quoted_literal: "'Allan'" - comma: ',' - expression: quoted_literal: "'Mae'" - end_bracket: ) - statement_terminator: ; - statement: function: function_name: function_name_identifier: print_team function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'2009 Team:'" end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: - keyword: DECLARE - collection_type: - keyword: TYPE - naked_identifier: Roster - keyword: IS - keyword: TABLE - keyword: OF - data_type: data_type_identifier: VARCHAR2 bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '15' end_bracket: ) - statement_terminator: ; - naked_identifier: names - data_type: data_type_identifier: Roster - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: function: function_name: function_name_identifier: Roster function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'D Caruso'" - comma: ',' - expression: quoted_literal: "'J Hamil'" - comma: ',' - expression: quoted_literal: "'D Piro'" - comma: ',' - expression: quoted_literal: "'R Singh'" - end_bracket: ) - statement_terminator: ; - create_procedure_statement: - keyword: PROCEDURE - function_name: function_name_identifier: print_names - function_parameter_list: bracketed: start_bracket: ( parameter: heading data_type: data_type_identifier: VARCHAR2 end_bracket: ) - keyword: IS - begin_end_block: - keyword: BEGIN - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: heading end_bracket: ) - statement_terminator: ; - statement: for_loop_statement: - keyword: FOR - naked_identifier: i - keyword: IN - naked_identifier: names - dot: . - naked_identifier: FIRST - dot: . - dot: . - naked_identifier: names - dot: . - naked_identifier: LAST - loop_statement: - keyword: LOOP - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: names function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: i end_bracket: ) end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'---'" end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - keyword: BEGIN - statement: function: function_name: function_name_identifier: print_names function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Initial Values:'" end_bracket: ) - statement_terminator: ; - statement: function: function_name: function_name_identifier: names function_contents: bracketed: start_bracket: ( expression: numeric_literal: '3' end_bracket: ) - statement: assignment_segment_statement: colon: ':' comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'P Perez'" - statement_terminator: ; - statement: function: function_name: function_name_identifier: print_names function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Current Values:'" end_bracket: ) - statement_terminator: ; - statement: assignment_segment_statement: object_reference: naked_identifier: names colon: ':' comparison_operator: raw_comparison_operator: '=' expression: function: function_name: function_name_identifier: Roster function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'A Jansen'" - comma: ',' - expression: quoted_literal: "'B Gupta'" - end_bracket: ) - statement_terminator: ; - statement: function: function_name: function_name_identifier: print_names function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Current Values:'" end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / sqlfluff-3.4.2/test/fixtures/dialects/oracle/column_type.sql000066400000000000000000000004441503426445100243140ustar00rootroot00000000000000DECLARE surname employees.last_name%TYPE; BEGIN DBMS_OUTPUT.PUT_LINE('surname=' || surname); END; / DECLARE name VARCHAR(25) NOT NULL := 'Smith'; surname name%TYPE := 'Jones'; BEGIN DBMS_OUTPUT.PUT_LINE('name=' || name); DBMS_OUTPUT.PUT_LINE('surname=' || surname); END; / sqlfluff-3.4.2/test/fixtures/dialects/oracle/column_type.yml000066400000000000000000000066251503426445100243250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5bbb8d34a75517d3f819b9ba145940ed9e16cf651dce83788eefc6928a8f7786 file: - statement: begin_end_block: - declare_segment: keyword: DECLARE naked_identifier: surname column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: last_name binary_operator: '%' keyword: TYPE statement_terminator: ; - keyword: BEGIN - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'surname='" binary_operator: - pipe: '|' - pipe: '|' column_reference: naked_identifier: surname end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: - keyword: DECLARE - naked_identifier: name - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '25' end_bracket: ) - keyword: NOT - keyword: 'NULL' - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: quoted_literal: "'Smith'" - statement_terminator: ; - naked_identifier: surname - column_type_reference: column_reference: naked_identifier: name binary_operator: '%' keyword: TYPE - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: quoted_literal: "'Jones'" - statement_terminator: ; - keyword: BEGIN - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'name='" binary_operator: - pipe: '|' - pipe: '|' column_reference: naked_identifier: name end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'surname='" binary_operator: - pipe: '|' - pipe: '|' column_reference: naked_identifier: surname end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / sqlfluff-3.4.2/test/fixtures/dialects/oracle/comment.sql000066400000000000000000000005161503426445100234200ustar00rootroot00000000000000COMMENT ON COLUMN employees.job_id IS 'abbreviated job title'; COMMENT ON TABLE employees IS 'employees table'; COMMENT ON INDEXTYPE employees_indextype IS 'employees indextype'; COMMENT ON OPERATOR employees_operator IS 'employees operator'; COMMENT ON MATERIALIZED VIEW employees_mv IS 'employees materialized view'; sqlfluff-3.4.2/test/fixtures/dialects/oracle/comment.yml000066400000000000000000000033331503426445100234220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 60c4002a0a46717b7f7585fd94f08af0d21c73b1481783df59889ef0cdda8c50 file: - statement: comment_statement: - keyword: COMMENT - keyword: 'ON' - keyword: COLUMN - column_reference: - naked_identifier: employees - dot: . - naked_identifier: job_id - keyword: IS - quoted_literal: "'abbreviated job title'" - statement_terminator: ; - statement: comment_statement: - keyword: COMMENT - keyword: 'ON' - keyword: TABLE - table_reference: naked_identifier: employees - keyword: IS - quoted_literal: "'employees table'" - statement_terminator: ; - statement: comment_statement: - keyword: COMMENT - keyword: 'ON' - keyword: INDEXTYPE - indextype_reference: naked_identifier: employees_indextype - keyword: IS - quoted_literal: "'employees indextype'" - statement_terminator: ; - statement: comment_statement: - keyword: COMMENT - keyword: 'ON' - keyword: OPERATOR - object_reference: naked_identifier: employees_operator - keyword: IS - quoted_literal: "'employees operator'" - statement_terminator: ; - statement: comment_statement: - keyword: COMMENT - keyword: 'ON' - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: employees_mv - keyword: IS - quoted_literal: "'employees materialized view'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/oracle/comparison_operators_with_space.sql000066400000000000000000000001651503426445100304340ustar00rootroot00000000000000select 1 from dual where 3 < = 5; select 1 from dual where 4 > = 2; select 1 from dual where 1 ! = 3; sqlfluff-3.4.2/test/fixtures/dialects/oracle/comparison_operators_with_space.yml000066400000000000000000000042451503426445100304410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 70bbe6d19831c69c75359a6098c6a2cff155fcbddf2ebfbb444909ff2e756898 file: - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual where_clause: keyword: where expression: - numeric_literal: '3' - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual where_clause: keyword: where expression: - numeric_literal: '4' - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - numeric_literal: '2' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual where_clause: keyword: where expression: - numeric_literal: '1' - comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '=' - numeric_literal: '3' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/oracle/continue.sql000066400000000000000000000014131503426445100235770ustar00rootroot00000000000000DECLARE x NUMBER := 0; BEGIN LOOP -- After CONTINUE statement, control resumes here DBMS_OUTPUT.PUT_LINE ('Inside loop: x = ' || TO_CHAR(x)); x := x + 1; IF x < 3 THEN CONTINUE; END IF; DBMS_OUTPUT.PUT_LINE ('Inside loop, after CONTINUE: x = ' || TO_CHAR(x)); EXIT WHEN x = 5; END LOOP; DBMS_OUTPUT.PUT_LINE (' After loop: x = ' || TO_CHAR(x)); END; / DECLARE x NUMBER := 0; BEGIN LOOP -- After CONTINUE statement, control resumes here DBMS_OUTPUT.PUT_LINE ('Inside loop: x = ' || TO_CHAR(x)); x := x + 1; CONTINUE WHEN x < 3; DBMS_OUTPUT.PUT_LINE ('Inside loop, after CONTINUE: x = ' || TO_CHAR(x)); EXIT WHEN x = 5; END LOOP; DBMS_OUTPUT.PUT_LINE (' After loop: x = ' || TO_CHAR(x)); END; / sqlfluff-3.4.2/test/fixtures/dialects/oracle/continue.yml000066400000000000000000000227261503426445100236130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ab9cc8857dfe14b2334003892a1a992f12e45591d40b4d0b92a0be7d19905df8 file: - statement: begin_end_block: - declare_segment: keyword: DECLARE naked_identifier: x data_type: data_type_identifier: NUMBER colon: ':' comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '0' statement_terminator: ; - keyword: BEGIN - statement: loop_statement: - keyword: LOOP - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Inside loop: x = '" binary_operator: - pipe: '|' - pipe: '|' function: function_name: function_name_identifier: TO_CHAR function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: assignment_segment_statement: object_reference: naked_identifier: x colon: ':' comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: x binary_operator: + numeric_literal: '1' - statement_terminator: ; - statement: if_then_statement: - if_clause: - keyword: IF - expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: < numeric_literal: '3' - keyword: THEN - statement: continue_statement: keyword: CONTINUE - statement_terminator: ; - keyword: END - keyword: IF - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Inside loop, after CONTINUE: x = '" binary_operator: - pipe: '|' - pipe: '|' function: function_name: function_name_identifier: TO_CHAR function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: exit_statement: - keyword: EXIT - keyword: WHEN - expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "' After loop: x = '" binary_operator: - pipe: '|' - pipe: '|' function: function_name: function_name_identifier: TO_CHAR function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x end_bracket: ) end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: keyword: DECLARE naked_identifier: x data_type: data_type_identifier: NUMBER colon: ':' comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '0' statement_terminator: ; - keyword: BEGIN - statement: loop_statement: - keyword: LOOP - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Inside loop: x = '" binary_operator: - pipe: '|' - pipe: '|' function: function_name: function_name_identifier: TO_CHAR function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: assignment_segment_statement: object_reference: naked_identifier: x colon: ':' comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: x binary_operator: + numeric_literal: '1' - statement_terminator: ; - statement: continue_statement: - keyword: CONTINUE - keyword: WHEN - expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: < numeric_literal: '3' - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Inside loop, after CONTINUE: x = '" binary_operator: - pipe: '|' - pipe: '|' function: function_name: function_name_identifier: TO_CHAR function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: exit_statement: - keyword: EXIT - keyword: WHEN - expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "' After loop: x = '" binary_operator: - pipe: '|' - pipe: '|' function: function_name: function_name_identifier: TO_CHAR function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x end_bracket: ) end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / sqlfluff-3.4.2/test/fixtures/dialects/oracle/create_function.sql000066400000000000000000000030671503426445100251320ustar00rootroot00000000000000CREATE FUNCTION IF NOT EXISTS get_bal(acc_no IN NUMBER) RETURN NUMBER IS acc_bal NUMBER(11,2); BEGIN SELECT order_total INTO acc_bal FROM orders WHERE customer_id = acc_no; RETURN(acc_bal); END; / CREATE OR REPLACE FUNCTION text_length(a CLOB) RETURN NUMBER DETERMINISTIC IS BEGIN RETURN DBMS_LOB.GETLENGTH(a); END; / DECLARE -- Declare and define function FUNCTION square (original NUMBER) -- parameter list RETURN NUMBER -- RETURN clause AS -- Declarative part begins original_squared NUMBER; BEGIN -- Executable part begins original_squared := original * original; RETURN original_squared; -- RETURN statement END; BEGIN DBMS_OUTPUT.PUT_LINE(square(100)); -- invocation END; / DECLARE x INTEGER; FUNCTION f (n INTEGER) RETURN INTEGER IS BEGIN RETURN (n*n); END; BEGIN DBMS_OUTPUT.PUT_LINE ( 'f returns ' || f(2) || '. Execution returns here (1).' ); x := f(2); DBMS_OUTPUT.PUT_LINE('Execution returns here (2).'); END; / CREATE OR REPLACE FUNCTION f (n INTEGER) RETURN INTEGER AUTHID DEFINER IS BEGIN IF n = 0 THEN RETURN 1; ELSIF n = 1 THEN RETURN n; END IF; END; / CREATE OR REPLACE FUNCTION f (n INTEGER) RETURN INTEGER AUTHID DEFINER IS BEGIN IF n = 0 THEN RETURN 1; ELSIF n = 1 THEN RETURN n; ELSE RETURN n*n; END IF; END; / BEGIN FOR i IN 0 .. 3 LOOP DBMS_OUTPUT.PUT_LINE('f(' || i || ') = ' || f(i)); END LOOP; END; / sqlfluff-3.4.2/test/fixtures/dialects/oracle/create_function.yml000066400000000000000000000366321503426445100251400ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 478030d9da0ca0ed130a6ba7fea2af199c47230c95610dea8968163f39066ca3 file: - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - keyword: IF - keyword: NOT - keyword: EXISTS - function_name: function_name_identifier: get_bal - function_parameter_list: bracketed: start_bracket: ( parameter: acc_no keyword: IN data_type: data_type_identifier: NUMBER end_bracket: ) - keyword: RETURN - data_type: data_type_identifier: NUMBER - keyword: IS - declare_segment: naked_identifier: acc_bal data_type: data_type_identifier: NUMBER bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '11' - comma: ',' - numeric_literal: '2' - end_bracket: ) statement_terminator: ; - begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: order_total into_clause: keyword: INTO naked_identifier: acc_bal from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders where_clause: keyword: WHERE expression: - column_reference: naked_identifier: customer_id - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: acc_no - statement_terminator: ; - statement: function: function_name: function_name_identifier: RETURN function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: acc_bal end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name: function_name_identifier: text_length - function_parameter_list: bracketed: start_bracket: ( parameter: a data_type: data_type_identifier: CLOB end_bracket: ) - keyword: RETURN - data_type: data_type_identifier: NUMBER - keyword: DETERMINISTIC - keyword: IS - begin_end_block: - keyword: BEGIN - statement: return_statement: keyword: RETURN expression: function: function_name: naked_identifier: DBMS_LOB dot: . function_name_identifier: GETLENGTH function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: a end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: keyword: DECLARE create_function_statement: - keyword: FUNCTION - function_name: function_name_identifier: square - function_parameter_list: bracketed: start_bracket: ( parameter: original data_type: data_type_identifier: NUMBER end_bracket: ) - keyword: RETURN - data_type: data_type_identifier: NUMBER - keyword: AS - declare_segment: naked_identifier: original_squared data_type: data_type_identifier: NUMBER statement_terminator: ; - begin_end_block: - keyword: BEGIN - statement: assignment_segment_statement: object_reference: naked_identifier: original_squared colon: ':' comparison_operator: raw_comparison_operator: '=' expression: - column_reference: naked_identifier: original - binary_operator: '*' - column_reference: naked_identifier: original - statement_terminator: ; - statement: return_statement: keyword: RETURN expression: column_reference: naked_identifier: original_squared - statement_terminator: ; - keyword: END - statement_terminator: ; - keyword: BEGIN - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: square function_contents: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: keyword: DECLARE naked_identifier: x data_type: data_type_identifier: INTEGER statement_terminator: ; create_function_statement: - keyword: FUNCTION - function_name: function_name_identifier: f - function_parameter_list: bracketed: start_bracket: ( parameter: n data_type: data_type_identifier: INTEGER end_bracket: ) - keyword: RETURN - data_type: data_type_identifier: INTEGER - keyword: IS - begin_end_block: - keyword: BEGIN - statement: function: function_name: function_name_identifier: RETURN function_contents: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: n - binary_operator: '*' - column_reference: naked_identifier: n end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - keyword: BEGIN - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - quoted_literal: "'f returns '" - binary_operator: - pipe: '|' - pipe: '|' - function: function_name: function_name_identifier: f function_contents: bracketed: start_bracket: ( expression: numeric_literal: '2' end_bracket: ) - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "'. Execution returns here (1).'" end_bracket: ) - statement_terminator: ; - statement: assignment_segment_statement: object_reference: naked_identifier: x colon: ':' comparison_operator: raw_comparison_operator: '=' expression: function: function_name: function_name_identifier: f function_contents: bracketed: start_bracket: ( expression: numeric_literal: '2' end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Execution returns here (2).'" end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: - create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name: function_name_identifier: f - function_parameter_list: bracketed: start_bracket: ( parameter: n data_type: data_type_identifier: INTEGER end_bracket: ) - keyword: RETURN - data_type: data_type_identifier: INTEGER - keyword: AUTHID - keyword: DEFINER - keyword: IS - begin_end_block: - keyword: BEGIN - statement: if_then_statement: - if_clause: - keyword: IF - expression: column_reference: naked_identifier: n comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' - keyword: THEN - statement: return_statement: keyword: RETURN expression: numeric_literal: '1' - statement_terminator: ; - keyword: ELSIF - expression: column_reference: naked_identifier: n comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - keyword: THEN - statement: return_statement: keyword: RETURN expression: column_reference: naked_identifier: n - statement_terminator: ; - keyword: END - keyword: IF - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name: function_name_identifier: f - function_parameter_list: bracketed: start_bracket: ( parameter: n data_type: data_type_identifier: INTEGER end_bracket: ) - keyword: RETURN - data_type: data_type_identifier: INTEGER - keyword: AUTHID - keyword: DEFINER - keyword: IS - begin_end_block: - keyword: BEGIN - statement: if_then_statement: - if_clause: - keyword: IF - expression: column_reference: naked_identifier: n comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' - keyword: THEN - statement: return_statement: keyword: RETURN expression: numeric_literal: '1' - statement_terminator: ; - keyword: ELSIF - expression: column_reference: naked_identifier: n comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - keyword: THEN - statement: return_statement: keyword: RETURN expression: column_reference: naked_identifier: n - statement_terminator: ; - keyword: ELSE - statement: return_statement: keyword: RETURN expression: - column_reference: naked_identifier: n - binary_operator: '*' - column_reference: naked_identifier: n - statement_terminator: ; - keyword: END - keyword: IF - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - keyword: BEGIN - statement: for_loop_statement: - keyword: FOR - naked_identifier: i - keyword: IN - numeric_literal: '0' - dot: . - dot: . - numeric_literal: '3' - loop_statement: - keyword: LOOP - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - quoted_literal: "'f('" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: naked_identifier: i - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "') = '" - binary_operator: - pipe: '|' - pipe: '|' - function: function_name: function_name_identifier: f function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: i end_bracket: ) end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / sqlfluff-3.4.2/test/fixtures/dialects/oracle/create_package.sql000066400000000000000000000053741503426445100247030ustar00rootroot00000000000000CREATE PACKAGE IF NOT EXISTS emp_mgmt AS FUNCTION hire (last_name VARCHAR2, job_id VARCHAR2, manager_id NUMBER, salary NUMBER, commission_pct NUMBER, department_id NUMBER) RETURN NUMBER; FUNCTION create_dept(department_id NUMBER, location_id NUMBER) RETURN NUMBER; PROCEDURE remove_emp(employee_id NUMBER); PROCEDURE remove_dept(department_id NUMBER); PROCEDURE increase_sal(employee_id NUMBER, salary_incr NUMBER); PROCEDURE increase_comm(employee_id NUMBER, comm_incr NUMBER); no_comm EXCEPTION; no_sal EXCEPTION; END emp_mgmt; / CREATE OR REPLACE PACKAGE BODY emp_mgmt AS tot_emps NUMBER; tot_depts NUMBER; FUNCTION hire (last_name VARCHAR2, job_id VARCHAR2, manager_id NUMBER, salary NUMBER, commission_pct NUMBER, department_id NUMBER) RETURN NUMBER IS new_empno NUMBER; BEGIN SELECT employees_seq.NEXTVAL INTO new_empno FROM DUAL; INSERT INTO employees VALUES (new_empno, 'First', 'Last','first.example@example.com', '(415)555-0100', TO_DATE('18-JUN-2002','DD-MON-YYYY'), 'IT_PROG',90000000,00, 100,110); tot_emps := tot_emps + 1; RETURN(new_empno); END; FUNCTION create_dept(department_id NUMBER, location_id NUMBER) RETURN NUMBER IS new_deptno NUMBER; BEGIN SELECT departments_seq.NEXTVAL INTO new_deptno FROM dual; INSERT INTO departments VALUES (new_deptno, 'department name', 100, 1700); tot_depts := tot_depts + 1; RETURN(new_deptno); END; PROCEDURE remove_emp (employee_id NUMBER) IS BEGIN DELETE FROM employees WHERE employees.employee_id = remove_emp.employee_id; tot_emps := tot_emps - 1; END; PROCEDURE remove_dept(department_id NUMBER) IS BEGIN DELETE FROM departments WHERE departments.department_id = remove_dept.department_id; tot_depts := tot_depts - 1; SELECT COUNT(*) INTO tot_emps FROM employees; END; PROCEDURE increase_sal(employee_id NUMBER, salary_incr NUMBER) IS curr_sal NUMBER; BEGIN SELECT salary INTO curr_sal FROM employees WHERE employees.employee_id = increase_sal.employee_id; IF curr_sal IS NULL THEN RAISE no_sal; ELSE UPDATE employees SET salary = salary + salary_incr WHERE employee_id = employee_id; END IF; END; PROCEDURE increase_comm(employee_id NUMBER, comm_incr NUMBER) IS curr_comm NUMBER; BEGIN SELECT commission_pct INTO curr_comm FROM employees WHERE employees.employee_id = increase_comm.employee_id; IF curr_comm IS NULL THEN RAISE no_comm; ELSE UPDATE employees SET commission_pct = commission_pct + comm_incr; END IF; END; END emp_mgmt; / sqlfluff-3.4.2/test/fixtures/dialects/oracle/create_package.yml000066400000000000000000000616241503426445100247050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e5cfe095e6f25f4428d881e2a8cc8946c2ef6919dd6505804987cfcbc2d4dfba file: - statement: create_package_statement: - keyword: CREATE - keyword: PACKAGE - keyword: IF - keyword: NOT - keyword: EXISTS - package_reference: naked_identifier: emp_mgmt - keyword: AS - declare_segment: - create_function_statement: - keyword: FUNCTION - function_name: function_name_identifier: hire - function_parameter_list: bracketed: - start_bracket: ( - parameter: last_name - data_type: data_type_identifier: VARCHAR2 - comma: ',' - parameter: job_id - data_type: data_type_identifier: VARCHAR2 - comma: ',' - parameter: manager_id - data_type: data_type_identifier: NUMBER - comma: ',' - parameter: salary - data_type: data_type_identifier: NUMBER - comma: ',' - parameter: commission_pct - data_type: data_type_identifier: NUMBER - comma: ',' - parameter: department_id - data_type: data_type_identifier: NUMBER - end_bracket: ) - keyword: RETURN - data_type: data_type_identifier: NUMBER - statement_terminator: ; - create_function_statement: - keyword: FUNCTION - function_name: function_name_identifier: create_dept - function_parameter_list: bracketed: - start_bracket: ( - parameter: department_id - data_type: data_type_identifier: NUMBER - comma: ',' - parameter: location_id - data_type: data_type_identifier: NUMBER - end_bracket: ) - keyword: RETURN - data_type: data_type_identifier: NUMBER - statement_terminator: ; - create_procedure_statement: keyword: PROCEDURE function_name: function_name_identifier: remove_emp function_parameter_list: bracketed: start_bracket: ( parameter: employee_id data_type: data_type_identifier: NUMBER end_bracket: ) statement_terminator: ; - create_procedure_statement: keyword: PROCEDURE function_name: function_name_identifier: remove_dept function_parameter_list: bracketed: start_bracket: ( parameter: department_id data_type: data_type_identifier: NUMBER end_bracket: ) statement_terminator: ; - create_procedure_statement: keyword: PROCEDURE function_name: function_name_identifier: increase_sal function_parameter_list: bracketed: - start_bracket: ( - parameter: employee_id - data_type: data_type_identifier: NUMBER - comma: ',' - parameter: salary_incr - data_type: data_type_identifier: NUMBER - end_bracket: ) statement_terminator: ; - create_procedure_statement: keyword: PROCEDURE function_name: function_name_identifier: increase_comm function_parameter_list: bracketed: - start_bracket: ( - parameter: employee_id - data_type: data_type_identifier: NUMBER - comma: ',' - parameter: comm_incr - data_type: data_type_identifier: NUMBER - end_bracket: ) statement_terminator: ; - naked_identifier: no_comm - data_type: data_type_identifier: EXCEPTION - statement_terminator: ; - naked_identifier: no_sal - data_type: data_type_identifier: EXCEPTION - statement_terminator: ; - keyword: END - package_reference: naked_identifier: emp_mgmt - statement_terminator: ; - statement_terminator: / - statement: create_package_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PACKAGE - keyword: BODY - package_reference: naked_identifier: emp_mgmt - keyword: AS - declare_segment: - naked_identifier: tot_emps - data_type: data_type_identifier: NUMBER - statement_terminator: ; - naked_identifier: tot_depts - data_type: data_type_identifier: NUMBER - statement_terminator: ; - create_function_statement: - keyword: FUNCTION - function_name: function_name_identifier: hire - function_parameter_list: bracketed: - start_bracket: ( - parameter: last_name - data_type: data_type_identifier: VARCHAR2 - comma: ',' - parameter: job_id - data_type: data_type_identifier: VARCHAR2 - comma: ',' - parameter: manager_id - data_type: data_type_identifier: NUMBER - comma: ',' - parameter: salary - data_type: data_type_identifier: NUMBER - comma: ',' - parameter: commission_pct - data_type: data_type_identifier: NUMBER - comma: ',' - parameter: department_id - data_type: data_type_identifier: NUMBER - end_bracket: ) - keyword: RETURN - data_type: data_type_identifier: NUMBER - keyword: IS - declare_segment: naked_identifier: new_empno data_type: data_type_identifier: NUMBER statement_terminator: ; - begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: employees_seq - dot: . - naked_identifier: NEXTVAL into_clause: keyword: INTO naked_identifier: new_empno from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: DUAL - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: employees - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: new_empno - comma: ',' - quoted_literal: "'First'" - comma: ',' - quoted_literal: "'Last'" - comma: ',' - quoted_literal: "'first.example@example.com'" - comma: ',' - quoted_literal: "'(415)555-0100'" - comma: ',' - expression: function: function_name: function_name_identifier: TO_DATE function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'18-JUN-2002'" - comma: ',' - expression: quoted_literal: "'DD-MON-YYYY'" - end_bracket: ) - comma: ',' - quoted_literal: "'IT_PROG'" - comma: ',' - numeric_literal: '90000000' - comma: ',' - numeric_literal: '00' - comma: ',' - numeric_literal: '100' - comma: ',' - numeric_literal: '110' - end_bracket: ) - statement_terminator: ; - statement: assignment_segment_statement: object_reference: naked_identifier: tot_emps colon: ':' comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: tot_emps binary_operator: + numeric_literal: '1' - statement_terminator: ; - statement: function: function_name: function_name_identifier: RETURN function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: new_empno end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - create_function_statement: - keyword: FUNCTION - function_name: function_name_identifier: create_dept - function_parameter_list: bracketed: - start_bracket: ( - parameter: department_id - data_type: data_type_identifier: NUMBER - comma: ',' - parameter: location_id - data_type: data_type_identifier: NUMBER - end_bracket: ) - keyword: RETURN - data_type: data_type_identifier: NUMBER - keyword: IS - declare_segment: naked_identifier: new_deptno data_type: data_type_identifier: NUMBER statement_terminator: ; - begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: departments_seq - dot: . - naked_identifier: NEXTVAL into_clause: keyword: INTO naked_identifier: new_deptno from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: departments - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: new_deptno - comma: ',' - quoted_literal: "'department name'" - comma: ',' - numeric_literal: '100' - comma: ',' - numeric_literal: '1700' - end_bracket: ) - statement_terminator: ; - statement: assignment_segment_statement: object_reference: naked_identifier: tot_depts colon: ':' comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: tot_depts binary_operator: + numeric_literal: '1' - statement_terminator: ; - statement: function: function_name: function_name_identifier: RETURN function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: new_deptno end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - create_procedure_statement: - keyword: PROCEDURE - function_name: function_name_identifier: remove_emp - function_parameter_list: bracketed: start_bracket: ( parameter: employee_id data_type: data_type_identifier: NUMBER end_bracket: ) - keyword: IS - begin_end_block: - keyword: BEGIN - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: employees - dot: . - naked_identifier: employee_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: remove_emp - dot: . - naked_identifier: employee_id - statement_terminator: ; - statement: assignment_segment_statement: object_reference: naked_identifier: tot_emps colon: ':' comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: tot_emps binary_operator: '-' numeric_literal: '1' - statement_terminator: ; - keyword: END - statement_terminator: ; - create_procedure_statement: - keyword: PROCEDURE - function_name: function_name_identifier: remove_dept - function_parameter_list: bracketed: start_bracket: ( parameter: department_id data_type: data_type_identifier: NUMBER end_bracket: ) - keyword: IS - begin_end_block: - keyword: BEGIN - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: departments where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: departments - dot: . - naked_identifier: department_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: remove_dept - dot: . - naked_identifier: department_id - statement_terminator: ; - statement: assignment_segment_statement: object_reference: naked_identifier: tot_depts colon: ':' comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: tot_depts binary_operator: '-' numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: COUNT function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) into_clause: keyword: INTO naked_identifier: tot_emps from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees - statement_terminator: ; - keyword: END - statement_terminator: ; - create_procedure_statement: - keyword: PROCEDURE - function_name: function_name_identifier: increase_sal - function_parameter_list: bracketed: - start_bracket: ( - parameter: employee_id - data_type: data_type_identifier: NUMBER - comma: ',' - parameter: salary_incr - data_type: data_type_identifier: NUMBER - end_bracket: ) - keyword: IS - declare_segment: naked_identifier: curr_sal data_type: data_type_identifier: NUMBER statement_terminator: ; - begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: salary into_clause: keyword: INTO naked_identifier: curr_sal from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: employees - dot: . - naked_identifier: employee_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: increase_sal - dot: . - naked_identifier: employee_id - statement_terminator: ; - statement: if_then_statement: - if_clause: - keyword: IF - expression: column_reference: naked_identifier: curr_sal keyword: IS null_literal: 'NULL' - keyword: THEN - statement: raise_statement: keyword: RAISE naked_identifier: no_sal - statement_terminator: ; - keyword: ELSE - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: employees set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: salary comparison_operator: raw_comparison_operator: '=' expression: - column_reference: naked_identifier: salary - binary_operator: + - column_reference: naked_identifier: salary_incr where_clause: keyword: WHERE expression: - column_reference: naked_identifier: employee_id - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: employee_id - statement_terminator: ; - keyword: END - keyword: IF - statement_terminator: ; - keyword: END - statement_terminator: ; - create_procedure_statement: - keyword: PROCEDURE - function_name: function_name_identifier: increase_comm - function_parameter_list: bracketed: - start_bracket: ( - parameter: employee_id - data_type: data_type_identifier: NUMBER - comma: ',' - parameter: comm_incr - data_type: data_type_identifier: NUMBER - end_bracket: ) - keyword: IS - declare_segment: naked_identifier: curr_comm data_type: data_type_identifier: NUMBER statement_terminator: ; - begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: commission_pct into_clause: keyword: INTO naked_identifier: curr_comm from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: employees - dot: . - naked_identifier: employee_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: increase_comm - dot: . - naked_identifier: employee_id - statement_terminator: ; - statement: if_then_statement: - if_clause: - keyword: IF - expression: column_reference: naked_identifier: curr_comm keyword: IS null_literal: 'NULL' - keyword: THEN - statement: raise_statement: keyword: RAISE naked_identifier: no_comm - statement_terminator: ; - keyword: ELSE - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: employees set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: commission_pct comparison_operator: raw_comparison_operator: '=' expression: - column_reference: naked_identifier: commission_pct - binary_operator: + - column_reference: naked_identifier: comm_incr - statement_terminator: ; - keyword: END - keyword: IF - statement_terminator: ; - keyword: END - statement_terminator: ; - keyword: END - package_reference: naked_identifier: emp_mgmt - statement_terminator: ; - statement_terminator: / sqlfluff-3.4.2/test/fixtures/dialects/oracle/create_procedure.sql000066400000000000000000000035521503426445100252740ustar00rootroot00000000000000CREATE PROCEDURE IF NOT EXISTS remove_emp (employee_id NUMBER) AS tot_emps NUMBER; BEGIN DELETE FROM employees WHERE employees.employee_id = remove_emp.employee_id; tot_emps := tot_emps - 1; END; / CREATE PROCEDURE top_protected_proc ACCESSIBLE BY (PROCEDURE top_trusted_proc) AS BEGIN DBMS_OUTPUT.PUT_LINE('Processed top_protected_proc.'); END; / CREATE OR REPLACE PROCEDURE p (x BOOLEAN) AUTHID DEFINER AS BEGIN IF x THEN DBMS_OUTPUT.PUT_LINE('x is true'); END IF; END; / CREATE PROCEDURE p ( sales NUMBER, quota NUMBER, emp_id NUMBER ) IS bonus NUMBER := 0; updated VARCHAR2(3) := 'No'; BEGIN IF sales > (quota + 200) THEN bonus := (sales - quota)/4; UPDATE employees SET salary = salary + bonus WHERE employee_id = emp_id; updated := 'Yes'; END IF; DBMS_OUTPUT.PUT_LINE ( 'Table updated? ' || updated || ', ' || 'bonus = ' || bonus || '.' ); END; / CREATE PROCEDURE p ( sales NUMBER, quota NUMBER, emp_id NUMBER ) IS bonus NUMBER := 0; BEGIN IF sales > (quota + 200) THEN bonus := (sales - quota)/4; ELSE IF sales > quota THEN bonus := 50; ELSE bonus := 0; END IF; END IF; DBMS_OUTPUT.PUT_LINE('bonus = ' || bonus); UPDATE employees SET salary = salary + bonus WHERE employee_id = emp_id; END; / CREATE PROCEDURE create_email ( name1 VARCHAR2, name2 VARCHAR2, company VARCHAR2 ) IS error_message VARCHAR2(30) := 'Email address is too long.'; BEGIN email := name1 || '.' || name2 || '@' || company; EXCEPTION WHEN VALUE_ERROR THEN DBMS_OUTPUT.PUT_LINE(error_message); END; / CREATE PROCEDURE p IS BEGIN DBMS_OUTPUT.PUT_LINE('Inside p'); RETURN; DBMS_OUTPUT.PUT_LINE('Unreachable statement.'); END; / sqlfluff-3.4.2/test/fixtures/dialects/oracle/create_procedure.yml000066400000000000000000000456041503426445100253020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6fe577f21a538a984752889d5c57e135c42f82ea9954f5a82e1d7024932ee3a1 file: - statement: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - keyword: IF - keyword: NOT - keyword: EXISTS - function_name: function_name_identifier: remove_emp - function_parameter_list: bracketed: start_bracket: ( parameter: employee_id data_type: data_type_identifier: NUMBER end_bracket: ) - keyword: AS - declare_segment: naked_identifier: tot_emps data_type: data_type_identifier: NUMBER statement_terminator: ; - begin_end_block: - keyword: BEGIN - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: employees - dot: . - naked_identifier: employee_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: remove_emp - dot: . - naked_identifier: employee_id - statement_terminator: ; - statement: assignment_segment_statement: object_reference: naked_identifier: tot_emps colon: ':' comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: tot_emps binary_operator: '-' numeric_literal: '1' - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - function_name: function_name_identifier: top_protected_proc - keyword: ACCESSIBLE - keyword: BY - bracketed: start_bracket: ( keyword: PROCEDURE function_name: function_name_identifier: top_trusted_proc end_bracket: ) - keyword: AS - begin_end_block: - keyword: BEGIN - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Processed top_protected_proc.'" end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PROCEDURE - function_name: function_name_identifier: p - function_parameter_list: bracketed: start_bracket: ( parameter: x data_type: data_type_identifier: BOOLEAN end_bracket: ) - keyword: AUTHID - keyword: DEFINER - keyword: AS - begin_end_block: - keyword: BEGIN - statement: if_then_statement: - if_clause: - keyword: IF - expression: column_reference: naked_identifier: x - keyword: THEN - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'x is true'" end_bracket: ) - statement_terminator: ; - keyword: END - keyword: IF - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - function_name: function_name_identifier: p - function_parameter_list: bracketed: - start_bracket: ( - parameter: sales - data_type: data_type_identifier: NUMBER - comma: ',' - parameter: quota - data_type: data_type_identifier: NUMBER - comma: ',' - parameter: emp_id - data_type: data_type_identifier: NUMBER - end_bracket: ) - keyword: IS - declare_segment: - naked_identifier: bonus - data_type: data_type_identifier: NUMBER - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: numeric_literal: '0' - statement_terminator: ; - naked_identifier: updated - data_type: data_type_identifier: VARCHAR2 bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '3' end_bracket: ) - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: quoted_literal: "'No'" - statement_terminator: ; - begin_end_block: - keyword: BEGIN - statement: if_then_statement: - if_clause: - keyword: IF - expression: column_reference: naked_identifier: sales comparison_operator: raw_comparison_operator: '>' bracketed: start_bracket: ( expression: column_reference: naked_identifier: quota binary_operator: + numeric_literal: '200' end_bracket: ) - keyword: THEN - statement: assignment_segment_statement: object_reference: naked_identifier: bonus colon: ':' comparison_operator: raw_comparison_operator: '=' expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: sales - binary_operator: '-' - column_reference: naked_identifier: quota end_bracket: ) binary_operator: / numeric_literal: '4' - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: employees set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: salary comparison_operator: raw_comparison_operator: '=' expression: - column_reference: naked_identifier: salary - binary_operator: + - column_reference: naked_identifier: bonus where_clause: keyword: WHERE expression: - column_reference: naked_identifier: employee_id - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: emp_id - statement_terminator: ; - statement: assignment_segment_statement: object_reference: naked_identifier: updated colon: ':' comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'Yes'" - statement_terminator: ; - keyword: END - keyword: IF - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - quoted_literal: "'Table updated? '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: naked_identifier: updated - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "', '" - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "'bonus = '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: naked_identifier: bonus - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "'.'" end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - function_name: function_name_identifier: p - function_parameter_list: bracketed: - start_bracket: ( - parameter: sales - data_type: data_type_identifier: NUMBER - comma: ',' - parameter: quota - data_type: data_type_identifier: NUMBER - comma: ',' - parameter: emp_id - data_type: data_type_identifier: NUMBER - end_bracket: ) - keyword: IS - declare_segment: naked_identifier: bonus data_type: data_type_identifier: NUMBER colon: ':' comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '0' statement_terminator: ; - begin_end_block: - keyword: BEGIN - statement: if_then_statement: - if_clause: - keyword: IF - expression: column_reference: naked_identifier: sales comparison_operator: raw_comparison_operator: '>' bracketed: start_bracket: ( expression: column_reference: naked_identifier: quota binary_operator: + numeric_literal: '200' end_bracket: ) - keyword: THEN - statement: assignment_segment_statement: object_reference: naked_identifier: bonus colon: ':' comparison_operator: raw_comparison_operator: '=' expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: sales - binary_operator: '-' - column_reference: naked_identifier: quota end_bracket: ) binary_operator: / numeric_literal: '4' - statement_terminator: ; - keyword: ELSE - statement: if_then_statement: - if_clause: - keyword: IF - expression: - column_reference: naked_identifier: sales - comparison_operator: raw_comparison_operator: '>' - column_reference: naked_identifier: quota - keyword: THEN - statement: assignment_segment_statement: object_reference: naked_identifier: bonus colon: ':' comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '50' - statement_terminator: ; - keyword: ELSE - statement: assignment_segment_statement: object_reference: naked_identifier: bonus colon: ':' comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '0' - statement_terminator: ; - keyword: END - keyword: IF - statement_terminator: ; - keyword: END - keyword: IF - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'bonus = '" binary_operator: - pipe: '|' - pipe: '|' column_reference: naked_identifier: bonus end_bracket: ) - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: employees set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: salary comparison_operator: raw_comparison_operator: '=' expression: - column_reference: naked_identifier: salary - binary_operator: + - column_reference: naked_identifier: bonus where_clause: keyword: WHERE expression: - column_reference: naked_identifier: employee_id - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: emp_id - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - function_name: function_name_identifier: create_email - function_parameter_list: bracketed: - start_bracket: ( - parameter: name1 - data_type: data_type_identifier: VARCHAR2 - comma: ',' - parameter: name2 - data_type: data_type_identifier: VARCHAR2 - comma: ',' - parameter: company - data_type: data_type_identifier: VARCHAR2 - end_bracket: ) - keyword: IS - declare_segment: naked_identifier: error_message data_type: data_type_identifier: VARCHAR2 bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '30' end_bracket: ) colon: ':' comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'Email address is too long.'" statement_terminator: ; - begin_end_block: - keyword: BEGIN - statement: assignment_segment_statement: object_reference: naked_identifier: email colon: ':' comparison_operator: raw_comparison_operator: '=' expression: - column_reference: naked_identifier: name1 - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "'.'" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: naked_identifier: name2 - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "'@'" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: naked_identifier: company - statement_terminator: ; - keyword: EXCEPTION - keyword: WHEN - naked_identifier: VALUE_ERROR - keyword: THEN - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: error_message end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - function_name: function_name_identifier: p - keyword: IS - begin_end_block: - keyword: BEGIN - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Inside p'" end_bracket: ) - statement_terminator: ; - statement: return_statement: keyword: RETURN - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Unreachable statement.'" end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / sqlfluff-3.4.2/test/fixtures/dialects/oracle/create_sequence.sql000066400000000000000000000002641503426445100251110ustar00rootroot00000000000000CREATE SEQUENCE TBL_SERVICEALERTMESSAGE_S INCREMENT BY 1 START WITH 1 NOMAXVALUE NOCYCLE; CREATE SEQUENCE TBL_SERVICEALERTMESSAGE_S2 INCREMENT BY 1 NOMINVALUE NOMAXVALUE NOCYCLE; sqlfluff-3.4.2/test/fixtures/dialects/oracle/create_sequence.yml000066400000000000000000000026571503426445100251230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0a40d9159b67e8652d7b8b7e98cdc9607afa6d3abac9f020f7ca8690da54b2e2 file: - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: TBL_SERVICEALERTMESSAGE_S - create_sequence_options_segment: - keyword: INCREMENT - keyword: BY - numeric_literal: '1' - create_sequence_options_segment: - keyword: START - keyword: WITH - numeric_literal: '1' - create_sequence_options_segment: keyword: NOMAXVALUE - create_sequence_options_segment: keyword: NOCYCLE - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: TBL_SERVICEALERTMESSAGE_S2 - create_sequence_options_segment: - keyword: INCREMENT - keyword: BY - numeric_literal: '1' - create_sequence_options_segment: keyword: NOMINVALUE - create_sequence_options_segment: keyword: NOMAXVALUE - create_sequence_options_segment: keyword: NOCYCLE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/oracle/create_table.sql000066400000000000000000000007611503426445100243720ustar00rootroot00000000000000-- create table with # in table name create table tabl#e1 (c1 SMALLINT, c2 DATE); -- create table with $ in table name create table table1$ (c1 SMALLINT, c2 DATE); -- create table with both $ & # in table name create table tab#le1$ (c1 SMALLINT, c2 DATE); -- create table with $ & # in column name create table tab#le1$ (c#1 SMALLINT, c$2 DATE); CREATE TABLE t1 (id NUMBER GENERATED AS IDENTITY); CREATE TABLE t2 (id NUMBER GENERATED BY DEFAULT AS IDENTITY (START WITH 100 INCREMENT BY 10)); sqlfluff-3.4.2/test/fixtures/dialects/oracle/create_table.yml000066400000000000000000000066751503426445100244060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: da6ee42f9933d5f09ad9c2c749bac9574d04873451b82b48689f7b171d20022f file: - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: tabl#e1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: c1 data_type: data_type_identifier: SMALLINT - comma: ',' - column_definition: naked_identifier: c2 data_type: data_type_identifier: DATE - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: table1$ - bracketed: - start_bracket: ( - column_definition: naked_identifier: c1 data_type: data_type_identifier: SMALLINT - comma: ',' - column_definition: naked_identifier: c2 data_type: data_type_identifier: DATE - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: tab#le1$ - bracketed: - start_bracket: ( - column_definition: naked_identifier: c1 data_type: data_type_identifier: SMALLINT - comma: ',' - column_definition: naked_identifier: c2 data_type: data_type_identifier: DATE - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: tab#le1$ - bracketed: - start_bracket: ( - column_definition: naked_identifier: c#1 data_type: data_type_identifier: SMALLINT - comma: ',' - column_definition: naked_identifier: c$2 data_type: data_type_identifier: DATE - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: start_bracket: ( column_definition: - naked_identifier: id - data_type: data_type_identifier: NUMBER - keyword: GENERATED - keyword: AS - keyword: IDENTITY end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t2 - bracketed: start_bracket: ( column_definition: - naked_identifier: id - data_type: data_type_identifier: NUMBER - keyword: GENERATED - keyword: BY - keyword: DEFAULT - keyword: AS - keyword: IDENTITY - bracketed: - start_bracket: ( - keyword: START - keyword: WITH - numeric_literal: '100' - keyword: INCREMENT - keyword: BY - numeric_literal: '10' - end_bracket: ) end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/oracle/create_trigger.sql000066400000000000000000000063041503426445100247450ustar00rootroot00000000000000CREATE OR REPLACE TRIGGER t BEFORE INSERT OR UPDATE OF salary, department_id OR DELETE ON employees BEGIN CASE WHEN INSERTING THEN DBMS_OUTPUT.PUT_LINE('Inserting'); WHEN UPDATING('salary') THEN DBMS_OUTPUT.PUT_LINE('Updating salary'); WHEN UPDATING('department_id') THEN DBMS_OUTPUT.PUT_LINE('Updating department ID'); WHEN DELETING THEN DBMS_OUTPUT.PUT_LINE('Deleting'); END CASE; END; / CREATE OR REPLACE TRIGGER order_info_insert INSTEAD OF INSERT ON order_info DECLARE duplicate_info EXCEPTION; PRAGMA EXCEPTION_INIT (duplicate_info, -00001); BEGIN INSERT INTO customers (customer_id, cust_last_name, cust_first_name) VALUES ( :new.customer_id, :new.cust_last_name, :new.cust_first_name); INSERT INTO orders (order_id, order_date, customer_id) VALUES ( :new.order_id, :new.order_date, :new.customer_id); EXCEPTION WHEN duplicate_info THEN RAISE_APPLICATION_ERROR ( num=> -20107, msg=> 'Duplicate customer or order ID'); END order_info_insert; / CREATE OR REPLACE TRIGGER dept_emplist_tr INSTEAD OF INSERT ON NESTED TABLE emplist OF dept_view REFERENCING NEW AS Employee PARENT AS Department FOR EACH ROW BEGIN -- Insert on nested table translates to insert on base table: INSERT INTO employees ( employee_id, last_name, email, hire_date, job_id, salary, department_id ) VALUES ( :Employee.emp_id, -- employee_id :Employee.lastname, -- last_name :Employee.lastname || '@example.com', -- email SYSDATE, -- hire_date :Employee.job, -- job_id :Employee.sal, -- salary :Department.department_id -- department_id ); END; / CREATE OR REPLACE TRIGGER maintain_employee_salaries FOR UPDATE OF salary ON employees COMPOUND TRIGGER -- Declarative Part: -- Choose small threshhold value to show how example works: threshhold CONSTANT SIMPLE_INTEGER := 7; TYPE salaries_t IS TABLE OF employee_salaries%ROWTYPE INDEX BY SIMPLE_INTEGER; salaries salaries_t; idx SIMPLE_INTEGER := 0; PROCEDURE flush_array IS n CONSTANT SIMPLE_INTEGER := salaries.count(); BEGIN FORALL j IN 1..n INSERT INTO employee_salaries VALUES salaries(j); salaries.delete(); idx := 0; DBMS_OUTPUT.PUT_LINE('Flushed ' || n || ' rows'); END flush_array; -- AFTER EACH ROW Section: AFTER EACH ROW IS BEGIN idx := idx + 1; salaries(idx).employee_id := :NEW.employee_id; salaries(idx).change_date := SYSTIMESTAMP; salaries(idx).salary := :NEW.salary; IF idx >= threshhold THEN flush_array(); END IF; END AFTER EACH ROW; -- AFTER STATEMENT Section: AFTER STATEMENT IS BEGIN flush_array(); END AFTER STATEMENT; END maintain_employee_salaries; / CREATE OR REPLACE TRIGGER insert_or_update_trigger BEFORE INSERT OR UPDATE ON your_table_name FOR EACH ROW BEGIN IF INSERTING THEN :new.created_at := CURRENT_TIMESTAMP; ELSIF UPDATING THEN :new.updated_at := CURRENT_TIMESTAMP; END IF; END; sqlfluff-3.4.2/test/fixtures/dialects/oracle/create_trigger.yml000066400000000000000000000732171503426445100247560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0d8e851e9e3b7d2d99dbfc90f19362d1e67daa6df553f6f01dd1881c3679d31f file: statement: create_trigger_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TRIGGER - trigger_reference: naked_identifier: t - keyword: BEFORE - dml_event_clause: - keyword: INSERT - keyword: OR - keyword: UPDATE - keyword: OF - column_reference: naked_identifier: salary - comma: ',' - column_reference: naked_identifier: department_id - keyword: OR - keyword: DELETE - keyword: 'ON' - table_reference: naked_identifier: employees - statement: begin_end_block: - keyword: BEGIN - statement: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - keyword: INSERTING - keyword: THEN - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Inserting'" end_bracket: ) - statement_terminator: ; - when_clause: - keyword: WHEN - expression: function: function_name: function_name_identifier: UPDATING function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'salary'" end_bracket: ) - keyword: THEN - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Updating salary'" end_bracket: ) - statement_terminator: ; - when_clause: - keyword: WHEN - expression: function: function_name: function_name_identifier: UPDATING function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'department_id'" end_bracket: ) - keyword: THEN - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Updating department ID'" end_bracket: ) - statement_terminator: ; - when_clause: - keyword: WHEN - keyword: DELETING - keyword: THEN - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Deleting'" end_bracket: ) - statement_terminator: ; - keyword: END - keyword: CASE - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: create_trigger_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TRIGGER - trigger_reference: naked_identifier: order_info_insert - keyword: INSTEAD - keyword: OF - dml_event_clause: - keyword: INSERT - keyword: 'ON' - table_reference: naked_identifier: order_info - statement: begin_end_block: - declare_segment: - keyword: DECLARE - naked_identifier: duplicate_info - data_type: data_type_identifier: EXCEPTION - statement_terminator: ; - keyword: PRAGMA - function: function_name: function_name_identifier: EXCEPTION_INIT function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: duplicate_info - comma: ',' - expression: numeric_literal: sign_indicator: '-' numeric_literal: '00001' - end_bracket: ) - statement_terminator: ; - keyword: BEGIN - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: customers - bracketed: - start_bracket: ( - column_reference: naked_identifier: customer_id - comma: ',' - column_reference: naked_identifier: cust_last_name - comma: ',' - column_reference: naked_identifier: cust_first_name - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - sqlplus_variable: - colon: ':' - parameter: new - dot: . - parameter: customer_id - comma: ',' - sqlplus_variable: - colon: ':' - parameter: new - dot: . - parameter: cust_last_name - comma: ',' - sqlplus_variable: - colon: ':' - parameter: new - dot: . - parameter: cust_first_name - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: orders - bracketed: - start_bracket: ( - column_reference: naked_identifier: order_id - comma: ',' - column_reference: naked_identifier: order_date - comma: ',' - column_reference: naked_identifier: customer_id - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - sqlplus_variable: - colon: ':' - parameter: new - dot: . - parameter: order_id - comma: ',' - sqlplus_variable: - colon: ':' - parameter: new - dot: . - parameter: order_date - comma: ',' - sqlplus_variable: - colon: ':' - parameter: new - dot: . - parameter: customer_id - end_bracket: ) - statement_terminator: ; - keyword: EXCEPTION - keyword: WHEN - naked_identifier: duplicate_info - keyword: THEN - statement: function: function_name: function_name_identifier: RAISE_APPLICATION_ERROR function_contents: bracketed: - start_bracket: ( - named_argument: naked_identifier: num right_arrow: => expression: numeric_literal: sign_indicator: '-' numeric_literal: '20107' - comma: ',' - named_argument: naked_identifier: msg right_arrow: => expression: quoted_literal: "'Duplicate customer or order ID'" - end_bracket: ) - statement_terminator: ; - keyword: END - object_reference: naked_identifier: order_info_insert - statement_terminator: ; - statement_terminator: / - statement: create_trigger_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TRIGGER - trigger_reference: naked_identifier: dept_emplist_tr - keyword: INSTEAD - keyword: OF - dml_event_clause: - keyword: INSERT - keyword: 'ON' - keyword: NESTED - keyword: TABLE - column_reference: naked_identifier: emplist - keyword: OF - table_reference: naked_identifier: dept_view - referencing_clause: - keyword: REFERENCING - keyword: NEW - keyword: AS - naked_identifier: Employee - keyword: PARENT - keyword: AS - naked_identifier: Department - keyword: FOR - keyword: EACH - keyword: ROW - statement: begin_end_block: - keyword: BEGIN - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: employees - bracketed: - start_bracket: ( - column_reference: naked_identifier: employee_id - comma: ',' - column_reference: naked_identifier: last_name - comma: ',' - column_reference: naked_identifier: email - comma: ',' - column_reference: naked_identifier: hire_date - comma: ',' - column_reference: naked_identifier: job_id - comma: ',' - column_reference: naked_identifier: salary - comma: ',' - column_reference: naked_identifier: department_id - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - sqlplus_variable: - colon: ':' - parameter: Employee - dot: . - parameter: emp_id - comma: ',' - sqlplus_variable: - colon: ':' - parameter: Employee - dot: . - parameter: lastname - comma: ',' - expression: sqlplus_variable: - colon: ':' - parameter: Employee - dot: . - parameter: lastname binary_operator: - pipe: '|' - pipe: '|' quoted_literal: "'@example.com'" - comma: ',' - expression: bare_function: SYSDATE - comma: ',' - sqlplus_variable: - colon: ':' - parameter: Employee - dot: . - parameter: job - comma: ',' - sqlplus_variable: - colon: ':' - parameter: Employee - dot: . - parameter: sal - comma: ',' - sqlplus_variable: - colon: ':' - parameter: Department - dot: . - parameter: department_id - end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: create_trigger_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TRIGGER - trigger_reference: naked_identifier: maintain_employee_salaries - keyword: FOR - dml_event_clause: - keyword: UPDATE - keyword: OF - column_reference: naked_identifier: salary - keyword: 'ON' - table_reference: naked_identifier: employees - compound_trigger_statement: - keyword: COMPOUND - keyword: TRIGGER - declare_segment: - naked_identifier: threshhold - keyword: CONSTANT - data_type: data_type_identifier: SIMPLE_INTEGER - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: numeric_literal: '7' - statement_terminator: ; - collection_type: - keyword: TYPE - naked_identifier: salaries_t - keyword: IS - keyword: TABLE - keyword: OF - row_type_reference: table_reference: naked_identifier: employee_salaries binary_operator: '%' keyword: ROWTYPE - keyword: INDEX - keyword: BY - data_type: data_type_identifier: SIMPLE_INTEGER - statement_terminator: ; - naked_identifier: salaries - data_type: data_type_identifier: salaries_t - statement_terminator: ; - naked_identifier: idx - data_type: data_type_identifier: SIMPLE_INTEGER - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: numeric_literal: '0' - statement_terminator: ; - create_procedure_statement: - keyword: PROCEDURE - function_name: function_name_identifier: flush_array - keyword: IS - declare_segment: naked_identifier: n keyword: CONSTANT data_type: data_type_identifier: SIMPLE_INTEGER colon: ':' comparison_operator: raw_comparison_operator: '=' expression: function: function_name: naked_identifier: salaries dot: . function_name_identifier: count function_contents: bracketed: start_bracket: ( end_bracket: ) statement_terminator: ; - begin_end_block: - keyword: BEGIN - statement: forall_statement: - keyword: FORALL - naked_identifier: j - keyword: IN - numeric_literal: '1' - dot: . - dot: . - naked_identifier: n - insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: employee_salaries - keyword: VALUES - naked_identifier: salaries - bracketed: start_bracket: ( naked_identifier: j end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: salaries dot: . function_name_identifier: delete function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: assignment_segment_statement: object_reference: naked_identifier: idx colon: ':' comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '0' - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - quoted_literal: "'Flushed '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: naked_identifier: n - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "' rows'" end_bracket: ) - statement_terminator: ; - keyword: END - object_reference: naked_identifier: flush_array - statement_terminator: ; - timing_point_section: - keyword: AFTER - keyword: EACH - keyword: ROW - keyword: IS - keyword: BEGIN - statement: assignment_segment_statement: object_reference: naked_identifier: idx colon: ':' comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: idx binary_operator: + numeric_literal: '1' - statement_terminator: ; - statement: assignment_segment_statement: - object_reference: naked_identifier: salaries - bracketed: start_bracket: ( object_reference: naked_identifier: idx end_bracket: ) - dot: . - object_reference: naked_identifier: employee_id - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: sqlplus_variable: - colon: ':' - parameter: NEW - dot: . - parameter: employee_id - statement_terminator: ; - statement: assignment_segment_statement: - object_reference: naked_identifier: salaries - bracketed: start_bracket: ( object_reference: naked_identifier: idx end_bracket: ) - dot: . - object_reference: naked_identifier: change_date - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: bare_function: SYSTIMESTAMP - statement_terminator: ; - statement: assignment_segment_statement: - object_reference: naked_identifier: salaries - bracketed: start_bracket: ( object_reference: naked_identifier: idx end_bracket: ) - dot: . - object_reference: naked_identifier: salary - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: sqlplus_variable: - colon: ':' - parameter: NEW - dot: . - parameter: salary - statement_terminator: ; - statement: if_then_statement: - if_clause: - keyword: IF - expression: - column_reference: naked_identifier: idx - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - column_reference: naked_identifier: threshhold - keyword: THEN - statement: function: function_name: function_name_identifier: flush_array function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - keyword: END - keyword: IF - statement_terminator: ; - keyword: END - keyword: AFTER - keyword: EACH - keyword: ROW - statement_terminator: ; - timing_point_section: - keyword: AFTER - keyword: STATEMENT - keyword: IS - keyword: BEGIN - statement: function: function_name: function_name_identifier: flush_array function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - keyword: END - keyword: AFTER - keyword: STATEMENT - statement_terminator: ; - keyword: END - trigger_reference: naked_identifier: maintain_employee_salaries - statement_terminator: ; - statement_terminator: / - statement: create_trigger_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TRIGGER - trigger_reference: naked_identifier: insert_or_update_trigger - keyword: BEFORE - dml_event_clause: - keyword: INSERT - keyword: OR - keyword: UPDATE - keyword: 'ON' - table_reference: naked_identifier: your_table_name - keyword: FOR - keyword: EACH - keyword: ROW - statement: begin_end_block: - keyword: BEGIN - statement: if_then_statement: - if_clause: - keyword: IF - keyword: INSERTING - keyword: THEN - statement: assignment_segment_statement: sqlplus_variable: - colon: ':' - parameter: new - dot: . - parameter: created_at colon: ':' comparison_operator: raw_comparison_operator: '=' expression: bare_function: CURRENT_TIMESTAMP - statement_terminator: ; - keyword: ELSIF - keyword: UPDATING - keyword: THEN - statement: assignment_segment_statement: sqlplus_variable: - colon: ':' - parameter: new - dot: . - parameter: updated_at colon: ':' comparison_operator: raw_comparison_operator: '=' expression: bare_function: CURRENT_TIMESTAMP - statement_terminator: ; - keyword: END - keyword: IF - statement_terminator: ; - keyword: END - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/oracle/create_type.sql000066400000000000000000000036421503426445100242650ustar00rootroot00000000000000CREATE TYPE customer_typ_demo AS OBJECT ( customer_id NUMBER(6) , cust_first_name VARCHAR2(20) , cust_last_name VARCHAR2(20) , cust_address CUST_ADDRESS_TYP , phone_numbers PHONE_LIST_TYP , nls_language VARCHAR2(3) , nls_territory VARCHAR2(30) , credit_limit NUMBER(9,2) , cust_email VARCHAR2(30) , cust_orders ORDER_LIST_TYP ) ; / CREATE TYPE data_typ1 AS OBJECT ( year NUMBER, MEMBER FUNCTION prod(invent NUMBER) RETURN NUMBER ); / CREATE TYPE corporate_customer_typ_demo UNDER customer_typ ( account_mgr_id NUMBER(6) ); / CREATE TYPE person_t AS OBJECT (name VARCHAR2(100), ssn NUMBER) NOT FINAL; / CREATE TYPE employee_t UNDER person_t (department_id NUMBER, salary NUMBER) NOT FINAL; / CREATE TYPE part_time_emp_t UNDER employee_t (num_hrs NUMBER); / CREATE TYPE phone_list_typ_demo AS VARRAY(5) OF VARCHAR2(25); / CREATE TYPE IF NOT EXISTS varr_int AS VARRAY(10) OF (PLS_INTEGER) NOT PERSISTABLE; / CREATE TYPE plsint AS OBJECT (I PLS_INTEGER) NOT PERSISTABLE; / CREATE TYPE tab_plsint AS TABLE OF (PLS_INTEGER) NOT PERSISTABLE; / CREATE TYPE textdoc_typ AS OBJECT ( document_typ VARCHAR2(32) , formatted_doc BLOB ) ; / CREATE TYPE textdoc_tab AS TABLE OF textdoc_typ; / CREATE TYPE cust_address_typ2 AS OBJECT ( street_address VARCHAR2(40) , postal_code VARCHAR2(10) , city VARCHAR2(30) , state_province VARCHAR2(10) , country_id CHAR(2) , phone phone_list_typ_demo ); / CREATE TYPE cust_nt_address_typ AS TABLE OF cust_address_typ2; / CREATE TYPE demo_typ1 AS OBJECT (a1 NUMBER, a2 NUMBER); / CREATE TYPE demo_typ2 AS OBJECT (a1 NUMBER, MEMBER FUNCTION get_square RETURN NUMBER); / CREATE OR REPLACE TYPE department_t AS OBJECT ( deptno number(10), dname CHAR(30)); / sqlfluff-3.4.2/test/fixtures/dialects/oracle/create_type.yml000066400000000000000000000315551503426445100242730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 077f26cb3da5612a2e421697b9c7a968da02430f65c4908a0d9b8350f61b1234 file: - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - type_reference: naked_identifier: customer_typ_demo - keyword: AS - keyword: OBJECT - bracketed: - start_bracket: ( - naked_identifier: customer_id - data_type: data_type_identifier: NUMBER bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) - comma: ',' - naked_identifier: cust_first_name - data_type: data_type_identifier: VARCHAR2 bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '20' end_bracket: ) - comma: ',' - naked_identifier: cust_last_name - data_type: data_type_identifier: VARCHAR2 bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '20' end_bracket: ) - comma: ',' - naked_identifier: cust_address - data_type: data_type_identifier: CUST_ADDRESS_TYP - comma: ',' - naked_identifier: phone_numbers - data_type: data_type_identifier: PHONE_LIST_TYP - comma: ',' - naked_identifier: nls_language - data_type: data_type_identifier: VARCHAR2 bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '3' end_bracket: ) - comma: ',' - naked_identifier: nls_territory - data_type: data_type_identifier: VARCHAR2 bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '30' end_bracket: ) - comma: ',' - naked_identifier: credit_limit - data_type: data_type_identifier: NUMBER bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '9' - comma: ',' - numeric_literal: '2' - end_bracket: ) - comma: ',' - naked_identifier: cust_email - data_type: data_type_identifier: VARCHAR2 bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '30' end_bracket: ) - comma: ',' - naked_identifier: cust_orders - data_type: data_type_identifier: ORDER_LIST_TYP - end_bracket: ) - statement_terminator: ; - statement_terminator: / - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - type_reference: naked_identifier: data_typ1 - keyword: AS - keyword: OBJECT - bracketed: start_bracket: ( naked_identifier: year data_type: data_type_identifier: NUMBER comma: ',' keyword: MEMBER create_function_statement: - keyword: FUNCTION - function_name: function_name_identifier: prod - function_parameter_list: bracketed: start_bracket: ( parameter: invent data_type: data_type_identifier: NUMBER end_bracket: ) - keyword: RETURN - data_type: data_type_identifier: NUMBER end_bracket: ) - statement_terminator: ; - statement_terminator: / - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - type_reference: naked_identifier: corporate_customer_typ_demo - keyword: UNDER - object_reference: naked_identifier: customer_typ - bracketed: start_bracket: ( naked_identifier: account_mgr_id data_type: data_type_identifier: NUMBER bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement_terminator: / - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - type_reference: naked_identifier: person_t - keyword: AS - keyword: OBJECT - bracketed: - start_bracket: ( - naked_identifier: name - data_type: data_type_identifier: VARCHAR2 bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '100' end_bracket: ) - comma: ',' - naked_identifier: ssn - data_type: data_type_identifier: NUMBER - end_bracket: ) - keyword: NOT - keyword: FINAL - statement_terminator: ; - statement_terminator: / - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - type_reference: naked_identifier: employee_t - keyword: UNDER - object_reference: naked_identifier: person_t - bracketed: - start_bracket: ( - naked_identifier: department_id - data_type: data_type_identifier: NUMBER - comma: ',' - naked_identifier: salary - data_type: data_type_identifier: NUMBER - end_bracket: ) - keyword: NOT - keyword: FINAL - statement_terminator: ; - statement_terminator: / - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - type_reference: naked_identifier: part_time_emp_t - keyword: UNDER - object_reference: naked_identifier: employee_t - bracketed: start_bracket: ( naked_identifier: num_hrs data_type: data_type_identifier: NUMBER end_bracket: ) - statement_terminator: ; - statement_terminator: / - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - type_reference: naked_identifier: phone_list_typ_demo - keyword: AS - keyword: VARRAY - bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - keyword: OF - data_type: data_type_identifier: VARCHAR2 bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '25' end_bracket: ) - statement_terminator: ; - statement_terminator: / - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - keyword: IF - keyword: NOT - keyword: EXISTS - type_reference: naked_identifier: varr_int - keyword: AS - keyword: VARRAY - bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - keyword: OF - bracketed: start_bracket: ( data_type: data_type_identifier: PLS_INTEGER end_bracket: ) - keyword: NOT - keyword: PERSISTABLE - statement_terminator: ; - statement_terminator: / - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - type_reference: naked_identifier: plsint - keyword: AS - keyword: OBJECT - bracketed: start_bracket: ( naked_identifier: I data_type: data_type_identifier: PLS_INTEGER end_bracket: ) - keyword: NOT - keyword: PERSISTABLE - statement_terminator: ; - statement_terminator: / - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - type_reference: naked_identifier: tab_plsint - keyword: AS - keyword: TABLE - keyword: OF - bracketed: start_bracket: ( data_type: data_type_identifier: PLS_INTEGER end_bracket: ) - keyword: NOT - keyword: PERSISTABLE - statement_terminator: ; - statement_terminator: / - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - type_reference: naked_identifier: textdoc_typ - keyword: AS - keyword: OBJECT - bracketed: - start_bracket: ( - naked_identifier: document_typ - data_type: data_type_identifier: VARCHAR2 bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '32' end_bracket: ) - comma: ',' - naked_identifier: formatted_doc - data_type: data_type_identifier: BLOB - end_bracket: ) - statement_terminator: ; - statement_terminator: / - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - type_reference: naked_identifier: textdoc_tab - keyword: AS - keyword: TABLE - keyword: OF - data_type: data_type_identifier: textdoc_typ - statement_terminator: ; - statement_terminator: / - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - type_reference: naked_identifier: cust_address_typ2 - keyword: AS - keyword: OBJECT - bracketed: - start_bracket: ( - naked_identifier: street_address - data_type: data_type_identifier: VARCHAR2 bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '40' end_bracket: ) - comma: ',' - naked_identifier: postal_code - data_type: data_type_identifier: VARCHAR2 bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - comma: ',' - naked_identifier: city - data_type: data_type_identifier: VARCHAR2 bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '30' end_bracket: ) - comma: ',' - naked_identifier: state_province - data_type: data_type_identifier: VARCHAR2 bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - comma: ',' - naked_identifier: country_id - data_type: data_type_identifier: CHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '2' end_bracket: ) - comma: ',' - naked_identifier: phone - data_type: data_type_identifier: phone_list_typ_demo - end_bracket: ) - statement_terminator: ; - statement_terminator: / - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - type_reference: naked_identifier: cust_nt_address_typ - keyword: AS - keyword: TABLE - keyword: OF - data_type: data_type_identifier: cust_address_typ2 - statement_terminator: ; - statement_terminator: / - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - type_reference: naked_identifier: demo_typ1 - keyword: AS - keyword: OBJECT - bracketed: - start_bracket: ( - naked_identifier: a1 - data_type: data_type_identifier: NUMBER - comma: ',' - naked_identifier: a2 - data_type: data_type_identifier: NUMBER - end_bracket: ) - statement_terminator: ; - statement_terminator: / - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - type_reference: naked_identifier: demo_typ2 - keyword: AS - keyword: OBJECT - bracketed: start_bracket: ( naked_identifier: a1 data_type: data_type_identifier: NUMBER comma: ',' keyword: MEMBER create_function_statement: - keyword: FUNCTION - function_name: function_name_identifier: get_square - keyword: RETURN - data_type: data_type_identifier: NUMBER end_bracket: ) - statement_terminator: ; - statement_terminator: / - statement: create_type_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TYPE - type_reference: naked_identifier: department_t - keyword: AS - keyword: OBJECT - bracketed: - start_bracket: ( - naked_identifier: deptno - data_type: data_type_identifier: number bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - comma: ',' - naked_identifier: dname - data_type: data_type_identifier: CHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '30' end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement_terminator: / sqlfluff-3.4.2/test/fixtures/dialects/oracle/create_type_body.sql000066400000000000000000000005571503426445100253040ustar00rootroot00000000000000CREATE TYPE BODY data_typ1 IS MEMBER FUNCTION prod (invent NUMBER) RETURN NUMBER IS BEGIN RETURN (year + invent); END; END; / CREATE TYPE BODY demo_typ2 IS MEMBER FUNCTION get_square RETURN NUMBER IS x NUMBER; BEGIN SELECT c.col.a1*c.col.a1 INTO x FROM demo_tab2 c; RETURN (x); END; END; / sqlfluff-3.4.2/test/fixtures/dialects/oracle/create_type_body.yml000066400000000000000000000076671503426445100253170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a4ef227d2f94f5b5a3d4eff6bed3d34a22fd7fb0582b725c59676e89c534efca file: - statement: create_type_body_statement: - keyword: CREATE - keyword: TYPE - keyword: BODY - type_reference: naked_identifier: data_typ1 - keyword: IS - keyword: MEMBER - create_function_statement: - keyword: FUNCTION - function_name: function_name_identifier: prod - function_parameter_list: bracketed: start_bracket: ( parameter: invent data_type: data_type_identifier: NUMBER end_bracket: ) - keyword: RETURN - data_type: data_type_identifier: NUMBER - keyword: IS - begin_end_block: - keyword: BEGIN - statement: function: function_name: function_name_identifier: RETURN function_contents: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: year - binary_operator: + - column_reference: naked_identifier: invent end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: create_type_body_statement: - keyword: CREATE - keyword: TYPE - keyword: BODY - type_reference: naked_identifier: demo_typ2 - keyword: IS - keyword: MEMBER - create_function_statement: - keyword: FUNCTION - function_name: function_name_identifier: get_square - keyword: RETURN - data_type: data_type_identifier: NUMBER - keyword: IS - declare_segment: naked_identifier: x data_type: data_type_identifier: NUMBER statement_terminator: ; - begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - column_reference: - naked_identifier: c - dot: . - naked_identifier: col - dot: . - naked_identifier: a1 - binary_operator: '*' - column_reference: - naked_identifier: c - dot: . - naked_identifier: col - dot: . - naked_identifier: a1 into_clause: keyword: INTO naked_identifier: x from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: demo_tab2 alias_expression: naked_identifier: c - statement_terminator: ; - statement: function: function_name: function_name_identifier: RETURN function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / sqlfluff-3.4.2/test/fixtures/dialects/oracle/create_user.sql000066400000000000000000000021231503426445100242530ustar00rootroot00000000000000CREATE USER jsmith IDENTIFIED EXTERNALLY AS "CN=foo,DNQ=123,SERIAL=234"; CREATE USER tjones IDENTIFIED EXTERNALLY AS "CN=foo,dnQualifier=123,SERIALNUMER=234"; CREATE USER peter_fitch IDENTIFIED GLOBALLY AS 'AZURE_USER=peter.fitch@example.com'; CREATE USER dba_azure IDENTIFIED GLOBALLY AS 'AZURE_ROLE=AZURE_DBA'; CREATE USER u1 IDENTIFIED BY p1 PROFILE prof1; CREATE USER sidney IDENTIFIED BY out_standing1 DEFAULT TABLESPACE example QUOTA 10M ON example TEMPORARY TABLESPACE temp QUOTA 5M ON system PROFILE app_user PASSWORD EXPIRE; CREATE USER app_user1 IDENTIFIED EXTERNALLY DEFAULT TABLESPACE example QUOTA 5M ON example PROFILE app_user; CREATE USER ops$external_user IDENTIFIED EXTERNALLY DEFAULT TABLESPACE example QUOTA 5M ON example PROFILE app_user; CREATE USER global_user IDENTIFIED GLOBALLY AS 'CN=analyst, OU=division1, O=oracle, C=US' DEFAULT TABLESPACE example QUOTA 5M ON example; CREATE USER c##comm_user IDENTIFIED BY comm_pwd DEFAULT TABLESPACE example QUOTA 20M ON example TEMPORARY TABLESPACE temp_tbs; sqlfluff-3.4.2/test/fixtures/dialects/oracle/create_user.yml000066400000000000000000000116631503426445100242660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e21710c455a5d1e432b5e7bdbd8a81f8e133fd2117cea4dd4e9878fefd8af294 file: - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: jsmith - keyword: IDENTIFIED - keyword: EXTERNALLY - keyword: AS - quoted_identifier: '"CN=foo,DNQ=123,SERIAL=234"' - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: tjones - keyword: IDENTIFIED - keyword: EXTERNALLY - keyword: AS - quoted_identifier: '"CN=foo,dnQualifier=123,SERIALNUMER=234"' - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: peter_fitch - keyword: IDENTIFIED - keyword: GLOBALLY - keyword: AS - quoted_identifier: "'AZURE_USER=peter.fitch@example.com'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: dba_azure - keyword: IDENTIFIED - keyword: GLOBALLY - keyword: AS - quoted_identifier: "'AZURE_ROLE=AZURE_DBA'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: u1 - keyword: IDENTIFIED - keyword: BY - naked_identifier: p1 - keyword: PROFILE - object_reference: naked_identifier: prof1 - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: sidney - keyword: IDENTIFIED - keyword: BY - naked_identifier: out_standing1 - keyword: DEFAULT - keyword: TABLESPACE - object_reference: naked_identifier: example - keyword: QUOTA - numeric_literal: '10' - size_prefix: M - keyword: 'ON' - object_reference: naked_identifier: example - keyword: TEMPORARY - keyword: TABLESPACE - object_reference: naked_identifier: temp - keyword: QUOTA - numeric_literal: '5' - size_prefix: M - keyword: 'ON' - object_reference: naked_identifier: system - keyword: PROFILE - object_reference: naked_identifier: app_user - keyword: PASSWORD - keyword: EXPIRE - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: app_user1 - keyword: IDENTIFIED - keyword: EXTERNALLY - keyword: DEFAULT - keyword: TABLESPACE - object_reference: naked_identifier: example - keyword: QUOTA - numeric_literal: '5' - size_prefix: M - keyword: 'ON' - object_reference: naked_identifier: example - keyword: PROFILE - object_reference: naked_identifier: app_user - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: ops$external_user - keyword: IDENTIFIED - keyword: EXTERNALLY - keyword: DEFAULT - keyword: TABLESPACE - object_reference: naked_identifier: example - keyword: QUOTA - numeric_literal: '5' - size_prefix: M - keyword: 'ON' - object_reference: naked_identifier: example - keyword: PROFILE - object_reference: naked_identifier: app_user - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: global_user - keyword: IDENTIFIED - keyword: GLOBALLY - keyword: AS - quoted_identifier: "'CN=analyst, OU=division1, O=oracle, C=US'" - keyword: DEFAULT - keyword: TABLESPACE - object_reference: naked_identifier: example - keyword: QUOTA - numeric_literal: '5' - size_prefix: M - keyword: 'ON' - object_reference: naked_identifier: example - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: c##comm_user - keyword: IDENTIFIED - keyword: BY - naked_identifier: comm_pwd - keyword: DEFAULT - keyword: TABLESPACE - object_reference: naked_identifier: example - keyword: QUOTA - numeric_literal: '20' - size_prefix: M - keyword: 'ON' - object_reference: naked_identifier: example - keyword: TEMPORARY - keyword: TABLESPACE - object_reference: naked_identifier: temp_tbs - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/oracle/create_view.sql000066400000000000000000000023201503426445100242460ustar00rootroot00000000000000-- BASIC CREATE VIEW CREATE VIEW NEW_VIEW AS (select PERSON_ID from PERSONS p); -- CREATE OR REPLACE CREATE OR REPLACE VIEW NEW_VIEW AS (select PERSON_ID from PERSONS p); -- CREATE VIEW WITH FORCE CREATE FORCE VIEW NEW_VIEW AS (select PERSON_ID from PERSONS p); -- CREATE VIEW WITH NO FORCE CREATE NO FORCE VIEW NEW_VIEW AS (select PERSON_ID from PERSONS p); CREATE OR REPLACE NO FORCE VIEW NEW_VIEW AS (select PERSON_ID from PERSONS p); -- CREATE VIEW WITH EDITIONING CREATE EDITIONING VIEW NEW_VIEW AS (select PERSON_ID from PERSONS p); -- CREATE VIEW WITH EDITIONABLE CREATE EDITIONABLE VIEW NEW_VIEW AS (select PERSON_ID from PERSONS p); -- CREATE VIEW WITH NONEDITIONABLE CREATE NONEDITIONABLE VIEW NEW_VIEW AS (select PERSON_ID from PERSONS p); -- CREATE VIEW WITH EDITIONABLE EDITIONING CREATE EDITIONABLE EDITIONING VIEW NEW_VIEW AS (select PERSON_ID from PERSONS p); -- CREATE OR REPLACE VIEW WITH EDITIONING CREATE OR REPLACE EDITIONING VIEW NEW_VIEW AS (select PERSON_ID from PERSONS p); -- CREATE MATERIALIZED VIEW CREATE MATERIALIZED VIEW NEW_VIEW AS( select PERSON_ID from PERSONS p); -- CREATE OR REPLACE MATERIALIZED VIEW CREATE OR REPLACE MATERIALIZED VIEW NEW_VIEW AS( select PERSON_ID from PERSONS p); sqlfluff-3.4.2/test/fixtures/dialects/oracle/create_view.yml000066400000000000000000000231031503426445100242520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f00daf451a52a782296944b6de0c981ed0b2d02a3a1c265b1cf9ed09972247a7 file: - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: NEW_VIEW - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: PERSON_ID from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: PERSONS alias_expression: naked_identifier: p end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: VIEW - table_reference: naked_identifier: NEW_VIEW - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: PERSON_ID from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: PERSONS alias_expression: naked_identifier: p end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: FORCE - keyword: VIEW - table_reference: naked_identifier: NEW_VIEW - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: PERSON_ID from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: PERSONS alias_expression: naked_identifier: p end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: 'NO' - keyword: FORCE - keyword: VIEW - table_reference: naked_identifier: NEW_VIEW - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: PERSON_ID from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: PERSONS alias_expression: naked_identifier: p end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: 'NO' - keyword: FORCE - keyword: VIEW - table_reference: naked_identifier: NEW_VIEW - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: PERSON_ID from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: PERSONS alias_expression: naked_identifier: p end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: EDITIONING - keyword: VIEW - table_reference: naked_identifier: NEW_VIEW - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: PERSON_ID from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: PERSONS alias_expression: naked_identifier: p end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: EDITIONABLE - keyword: VIEW - table_reference: naked_identifier: NEW_VIEW - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: PERSON_ID from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: PERSONS alias_expression: naked_identifier: p end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: NONEDITIONABLE - keyword: VIEW - table_reference: naked_identifier: NEW_VIEW - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: PERSON_ID from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: PERSONS alias_expression: naked_identifier: p end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: EDITIONABLE - keyword: EDITIONING - keyword: VIEW - table_reference: naked_identifier: NEW_VIEW - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: PERSON_ID from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: PERSONS alias_expression: naked_identifier: p end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: EDITIONING - keyword: VIEW - table_reference: naked_identifier: NEW_VIEW - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: PERSON_ID from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: PERSONS alias_expression: naked_identifier: p end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: NEW_VIEW - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: PERSON_ID from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: PERSONS alias_expression: naked_identifier: p end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: NEW_VIEW - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: PERSON_ID from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: PERSONS alias_expression: naked_identifier: p end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/oracle/cursor.sql000066400000000000000000000247201503426445100232760ustar00rootroot00000000000000DECLARE TYPE empcurtyp IS REF CURSOR RETURN employees%ROWTYPE; -- strong type TYPE genericcurtyp IS REF CURSOR; -- weak type cursor1 empcurtyp; -- strong cursor variable cursor2 genericcurtyp; -- weak cursor variable my_cursor SYS_REFCURSOR; -- weak cursor variable TYPE deptcurtyp IS REF CURSOR RETURN departments%ROWTYPE; -- strong type dept_cv deptcurtyp; -- strong cursor variable BEGIN NULL; END; / DECLARE TYPE EmpRecTyp IS RECORD ( employee_id NUMBER, last_name VARCHAR2(25), salary NUMBER(8,2)); TYPE EmpCurTyp IS REF CURSOR RETURN EmpRecTyp; emp_cv EmpCurTyp; BEGIN NULL; END; / DECLARE sal employees.salary%TYPE; sal_multiple employees.salary%TYPE; factor INTEGER := 2; cv SYS_REFCURSOR; BEGIN OPEN cv FOR SELECT salary, salary*factor FROM employees WHERE job_id LIKE 'AD_%'; -- PL/SQL evaluates factor LOOP FETCH cv INTO sal, sal_multiple; EXIT WHEN cv%NOTFOUND; DBMS_OUTPUT.PUT_LINE('factor = ' || factor); DBMS_OUTPUT.PUT_LINE('sal = ' || sal); DBMS_OUTPUT.PUT_LINE('sal_multiple = ' || sal_multiple); factor := factor + 1; -- Does not affect sal_multiple END LOOP; CLOSE cv; END; / DECLARE sal employees.salary%TYPE; sal_multiple employees.salary%TYPE; factor INTEGER := 2; cv SYS_REFCURSOR; BEGIN DBMS_OUTPUT.PUT_LINE('factor = ' || factor); OPEN cv FOR SELECT salary, salary*factor FROM employees WHERE job_id LIKE 'AD_%'; -- PL/SQL evaluates factor LOOP FETCH cv INTO sal, sal_multiple; EXIT WHEN cv%NOTFOUND; DBMS_OUTPUT.PUT_LINE('sal = ' || sal); DBMS_OUTPUT.PUT_LINE('sal_multiple = ' || sal_multiple); END LOOP; factor := factor + 1; DBMS_OUTPUT.PUT_LINE('factor = ' || factor); OPEN cv FOR SELECT salary, salary*factor FROM employees WHERE job_id LIKE 'AD_%'; -- PL/SQL evaluates factor LOOP FETCH cv INTO sal, sal_multiple; EXIT WHEN cv%NOTFOUND; DBMS_OUTPUT.PUT_LINE('sal = ' || sal); DBMS_OUTPUT.PUT_LINE('sal_multiple = ' || sal_multiple); END LOOP; CLOSE cv; END; / DECLARE v1 pkg.mytab; -- collection of records v2 pkg.rec; c1 SYS_REFCURSOR; BEGIN v1(1).f1 := 1; v1(1).f2 := 'one'; OPEN c1 FOR SELECT * FROM TABLE(v1); FETCH c1 INTO v2; CLOSE c1; DBMS_OUTPUT.PUT_LINE('Values in record are ' || v2.f1 || ' and ' || v2.f2); END; / DECLARE CURSOR c1 RETURN departments%ROWTYPE; -- Declare c1 CURSOR c2 IS -- Declare and define c2 SELECT employee_id, job_id, salary FROM employees WHERE salary > 2000; CURSOR c1 RETURN departments%ROWTYPE IS -- Define c1, SELECT * FROM departments -- repeating return type WHERE department_id = 110; CURSOR c3 RETURN locations%ROWTYPE; -- Declare c3 CURSOR c3 IS -- Define c3, SELECT * FROM locations -- omitting return type WHERE country_id = 'JP'; BEGIN NULL; END; / DECLARE sal employees.salary%TYPE; sal_multiple employees.salary%TYPE; factor INTEGER := 2; CURSOR c1 IS SELECT salary, salary*factor FROM employees WHERE job_id LIKE 'AD_%'; BEGIN OPEN c1; -- PL/SQL evaluates factor LOOP FETCH c1 INTO sal, sal_multiple; EXIT WHEN c1%NOTFOUND; DBMS_OUTPUT.PUT_LINE('factor = ' || factor); DBMS_OUTPUT.PUT_LINE('sal = ' || sal); DBMS_OUTPUT.PUT_LINE('sal_multiple = ' || sal_multiple); factor := factor + 1; -- Does not affect sal_multiple END LOOP; CLOSE c1; END; / DECLARE sal employees.salary%TYPE; sal_multiple employees.salary%TYPE; factor INTEGER := 2; CURSOR c1 IS SELECT salary, salary*factor FROM employees WHERE job_id LIKE 'AD_%'; BEGIN DBMS_OUTPUT.PUT_LINE('factor = ' || factor); OPEN c1; -- PL/SQL evaluates factor LOOP FETCH c1 INTO sal, sal_multiple; EXIT WHEN c1%NOTFOUND; DBMS_OUTPUT.PUT_LINE('sal = ' || sal); DBMS_OUTPUT.PUT_LINE('sal_multiple = ' || sal_multiple); END LOOP; CLOSE c1; factor := factor + 1; DBMS_OUTPUT.PUT_LINE('factor = ' || factor); OPEN c1; -- PL/SQL evaluates factor LOOP FETCH c1 INTO sal, sal_multiple; EXIT WHEN c1%NOTFOUND; DBMS_OUTPUT.PUT_LINE('sal = ' || sal); DBMS_OUTPUT.PUT_LINE('sal_multiple = ' || sal_multiple); END LOOP; CLOSE c1; END; / DECLARE CURSOR c1 IS SELECT employee_id, (salary * 0.05) raisee FROM employees WHERE job_id LIKE '%_MAN' ORDER BY employee_id; emp_rec c1%ROWTYPE; BEGIN OPEN c1; LOOP FETCH c1 INTO emp_rec; EXIT WHEN c1%NOTFOUND; DBMS_OUTPUT.PUT_LINE ( 'Raise for employee #' || emp_rec.employee_id || ' is $' || emp_rec.raisee ); END LOOP; CLOSE c1; END; / DECLARE CURSOR c (job VARCHAR2, max_sal NUMBER) IS SELECT last_name, first_name, (salary - max_sal) overpayment FROM employees WHERE job_id = job AND salary > max_sal ORDER BY salary; PROCEDURE print_overpaid IS last_name_ employees.last_name%TYPE; first_name_ employees.first_name%TYPE; overpayment_ employees.salary%TYPE; BEGIN LOOP FETCH c INTO last_name_, first_name_, overpayment_; EXIT WHEN c%NOTFOUND; DBMS_OUTPUT.PUT_LINE(last_name_ || ', ' || first_name_ || ' (by ' || overpayment_ || ')'); END LOOP; END print_overpaid; BEGIN DBMS_OUTPUT.PUT_LINE('----------------------'); DBMS_OUTPUT.PUT_LINE('Overpaid Stock Clerks:'); DBMS_OUTPUT.PUT_LINE('----------------------'); OPEN c('ST_CLERK', 5000); print_overpaid(); CLOSE c; DBMS_OUTPUT.PUT_LINE('-------------------------------'); DBMS_OUTPUT.PUT_LINE('Overpaid Sales Representatives:'); DBMS_OUTPUT.PUT_LINE('-------------------------------'); OPEN c('SA_REP', 10000); print_overpaid(); CLOSE c; END; / DECLARE CURSOR c (location NUMBER DEFAULT 1700) IS SELECT d.department_name, e.last_name manager, l.city FROM departments d, employees e, locations l WHERE l.location_id = location AND l.location_id = d.location_id AND d.department_id = e.department_id ORDER BY d.department_id; PROCEDURE print_depts IS dept_name departments.department_name%TYPE; mgr_name employees.last_name%TYPE; city_name locations.city%TYPE; BEGIN LOOP FETCH c INTO dept_name, mgr_name, city_name; EXIT WHEN c%NOTFOUND; DBMS_OUTPUT.PUT_LINE(dept_name || ' (Manager: ' || mgr_name || ')'); END LOOP; END print_depts; BEGIN DBMS_OUTPUT.PUT_LINE('DEPARTMENTS AT HEADQUARTERS:'); DBMS_OUTPUT.PUT_LINE('--------------------------------'); OPEN c; print_depts(); DBMS_OUTPUT.PUT_LINE('--------------------------------'); CLOSE c; DBMS_OUTPUT.PUT_LINE('DEPARTMENTS IN CANADA:'); DBMS_OUTPUT.PUT_LINE('--------------------------------'); OPEN c(1800); -- Toronto print_depts(); CLOSE c; OPEN c(1900); -- Whitehorse print_depts(); CLOSE c; END; / DECLARE CURSOR c (job VARCHAR2, max_sal NUMBER, hired DATE DEFAULT TO_DATE('31-DEC-1999', 'DD-MON-YYYY')) IS SELECT last_name, first_name, (salary - max_sal) overpayment FROM employees WHERE job_id = job AND salary > max_sal AND hire_date > hired ORDER BY salary; PROCEDURE print_overpaid IS last_name_ employees.last_name%TYPE; first_name_ employees.first_name%TYPE; overpayment_ employees.salary%TYPE; BEGIN LOOP FETCH c INTO last_name_, first_name_, overpayment_; EXIT WHEN c%NOTFOUND; DBMS_OUTPUT.PUT_LINE(last_name_ || ', ' || first_name_ || ' (by ' || overpayment_ || ')'); END LOOP; END print_overpaid; BEGIN DBMS_OUTPUT.PUT_LINE('-------------------------------'); DBMS_OUTPUT.PUT_LINE('Overpaid Sales Representatives:'); DBMS_OUTPUT.PUT_LINE('-------------------------------'); OPEN c('SA_REP', 10000); -- existing reference print_overpaid(); CLOSE c; DBMS_OUTPUT.PUT_LINE('------------------------------------------------'); DBMS_OUTPUT.PUT_LINE('Overpaid Sales Representatives Hired After 2014:'); DBMS_OUTPUT.PUT_LINE('------------------------------------------------'); OPEN c('SA_REP', 10000, TO_DATE('31-DEC-2014', 'DD-MON-YYYY')); -- new reference print_overpaid(); CLOSE c; END; / DECLARE CURSOR c1 IS SELECT t1.department_id, department_name, staff FROM departments t1, ( SELECT department_id, COUNT(*) AS staff FROM employees GROUP BY department_id ) t2 WHERE (t1.department_id = t2.department_id) AND staff >= 5 ORDER BY staff; BEGIN FOR dept IN c1 LOOP DBMS_OUTPUT.PUT_LINE ('Department = ' || dept.department_name || ', staff = ' || dept.staff); END LOOP; END; / DECLARE CURSOR c1 IS SELECT department_id, last_name, salary FROM employees t WHERE salary > ( SELECT AVG(salary) FROM employees WHERE t.department_id = department_id ) ORDER BY department_id, last_name; BEGIN FOR person IN c1 LOOP DBMS_OUTPUT.PUT_LINE('Making above-average salary = ' || person.last_name); END LOOP; END; / DECLARE TYPE emp_cur_typ IS REF CURSOR; emp_cur emp_cur_typ; dept_name departments.department_name%TYPE; emp_name employees.last_name%TYPE; CURSOR c1 IS SELECT department_name, CURSOR ( SELECT e.last_name FROM employees e WHERE e.department_id = d.department_id ORDER BY e.last_name ) employees FROM departments d WHERE department_name LIKE 'A%' ORDER BY department_name; BEGIN OPEN c1; LOOP -- Process each row of query result set FETCH c1 INTO dept_name, emp_cur; EXIT WHEN c1%NOTFOUND; DBMS_OUTPUT.PUT_LINE('Department: ' || dept_name); LOOP -- Process each row of subquery result set FETCH emp_cur INTO emp_name; EXIT WHEN emp_cur%NOTFOUND; DBMS_OUTPUT.PUT_LINE('-- Employee: ' || emp_name); END LOOP; END LOOP; CLOSE c1; END; / DECLARE CURSOR c1 IS SELECT * FROM emp FOR UPDATE OF salary ORDER BY employee_id; emp_rec emp%ROWTYPE; BEGIN OPEN c1; LOOP FETCH c1 INTO emp_rec; -- fails on second iteration EXIT WHEN c1%NOTFOUND; DBMS_OUTPUT.PUT_LINE ( 'emp_rec.employee_id = ' || TO_CHAR(emp_rec.employee_id) ); UPDATE emp SET salary = salary * 1.05 WHERE employee_id = 105; COMMIT; -- releases locks END LOOP; END; / sqlfluff-3.4.2/test/fixtures/dialects/oracle/cursor.yml000066400000000000000000003001301503426445100232700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8de9165e146e0bc42edececd377ee8a4875b98776a40b90e91132dfe8f68a15b file: - statement: begin_end_block: - declare_segment: - keyword: DECLARE - ref_cursor_type: - keyword: TYPE - naked_identifier: empcurtyp - keyword: IS - keyword: REF - keyword: CURSOR - keyword: RETURN - row_type_reference: table_reference: naked_identifier: employees binary_operator: '%' keyword: ROWTYPE - statement_terminator: ; - ref_cursor_type: - keyword: TYPE - naked_identifier: genericcurtyp - keyword: IS - keyword: REF - keyword: CURSOR - statement_terminator: ; - naked_identifier: cursor1 - data_type: data_type_identifier: empcurtyp - statement_terminator: ; - naked_identifier: cursor2 - data_type: data_type_identifier: genericcurtyp - statement_terminator: ; - naked_identifier: my_cursor - data_type: data_type_identifier: SYS_REFCURSOR - statement_terminator: ; - ref_cursor_type: - keyword: TYPE - naked_identifier: deptcurtyp - keyword: IS - keyword: REF - keyword: CURSOR - keyword: RETURN - row_type_reference: table_reference: naked_identifier: departments binary_operator: '%' keyword: ROWTYPE - statement_terminator: ; - naked_identifier: dept_cv - data_type: data_type_identifier: deptcurtyp - statement_terminator: ; - keyword: BEGIN - statement: null_statement: keyword: 'NULL' - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: - keyword: DECLARE - record_type: - keyword: TYPE - naked_identifier: EmpRecTyp - keyword: IS - keyword: RECORD - bracketed: - start_bracket: ( - naked_identifier: employee_id - data_type: data_type_identifier: NUMBER - comma: ',' - naked_identifier: last_name - data_type: data_type_identifier: VARCHAR2 bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '25' end_bracket: ) - comma: ',' - naked_identifier: salary - data_type: data_type_identifier: NUMBER bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '8' - comma: ',' - numeric_literal: '2' - end_bracket: ) - end_bracket: ) - statement_terminator: ; - ref_cursor_type: - keyword: TYPE - naked_identifier: EmpCurTyp - keyword: IS - keyword: REF - keyword: CURSOR - keyword: RETURN - object_reference: naked_identifier: EmpRecTyp - statement_terminator: ; - naked_identifier: emp_cv - data_type: data_type_identifier: EmpCurTyp - statement_terminator: ; - keyword: BEGIN - statement: null_statement: keyword: 'NULL' - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: - keyword: DECLARE - naked_identifier: sal - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: salary binary_operator: '%' keyword: TYPE - statement_terminator: ; - naked_identifier: sal_multiple - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: salary binary_operator: '%' keyword: TYPE - statement_terminator: ; - naked_identifier: factor - data_type: data_type_identifier: INTEGER - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: numeric_literal: '2' - statement_terminator: ; - naked_identifier: cv - data_type: data_type_identifier: SYS_REFCURSOR - statement_terminator: ; - keyword: BEGIN - statement: open_for_statement: - keyword: OPEN - naked_identifier: cv - keyword: FOR - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: salary - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: salary - binary_operator: '*' - column_reference: naked_identifier: factor from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: WHERE expression: column_reference: naked_identifier: job_id keyword: LIKE quoted_literal: "'AD_%'" - statement_terminator: ; - statement: loop_statement: - keyword: LOOP - statement: fetch_statement: keyword: FETCH naked_identifier: cv into_clause: - keyword: INTO - naked_identifier: sal - comma: ',' - naked_identifier: sal_multiple - statement_terminator: ; - statement: exit_statement: - keyword: EXIT - keyword: WHEN - expression: naked_identifier: cv binary_operator: '%' keyword: NOTFOUND - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'factor = '" binary_operator: - pipe: '|' - pipe: '|' column_reference: naked_identifier: factor end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'sal = '" binary_operator: - pipe: '|' - pipe: '|' column_reference: naked_identifier: sal end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'sal_multiple = '" binary_operator: - pipe: '|' - pipe: '|' column_reference: naked_identifier: sal_multiple end_bracket: ) - statement_terminator: ; - statement: assignment_segment_statement: object_reference: naked_identifier: factor colon: ':' comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: factor binary_operator: + numeric_literal: '1' - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - statement: close_statement: keyword: CLOSE naked_identifier: cv - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: - keyword: DECLARE - naked_identifier: sal - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: salary binary_operator: '%' keyword: TYPE - statement_terminator: ; - naked_identifier: sal_multiple - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: salary binary_operator: '%' keyword: TYPE - statement_terminator: ; - naked_identifier: factor - data_type: data_type_identifier: INTEGER - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: numeric_literal: '2' - statement_terminator: ; - naked_identifier: cv - data_type: data_type_identifier: SYS_REFCURSOR - statement_terminator: ; - keyword: BEGIN - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'factor = '" binary_operator: - pipe: '|' - pipe: '|' column_reference: naked_identifier: factor end_bracket: ) - statement_terminator: ; - statement: open_for_statement: - keyword: OPEN - naked_identifier: cv - keyword: FOR - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: salary - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: salary - binary_operator: '*' - column_reference: naked_identifier: factor from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: WHERE expression: column_reference: naked_identifier: job_id keyword: LIKE quoted_literal: "'AD_%'" - statement_terminator: ; - statement: loop_statement: - keyword: LOOP - statement: fetch_statement: keyword: FETCH naked_identifier: cv into_clause: - keyword: INTO - naked_identifier: sal - comma: ',' - naked_identifier: sal_multiple - statement_terminator: ; - statement: exit_statement: - keyword: EXIT - keyword: WHEN - expression: naked_identifier: cv binary_operator: '%' keyword: NOTFOUND - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'sal = '" binary_operator: - pipe: '|' - pipe: '|' column_reference: naked_identifier: sal end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'sal_multiple = '" binary_operator: - pipe: '|' - pipe: '|' column_reference: naked_identifier: sal_multiple end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - statement: assignment_segment_statement: object_reference: naked_identifier: factor colon: ':' comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: factor binary_operator: + numeric_literal: '1' - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'factor = '" binary_operator: - pipe: '|' - pipe: '|' column_reference: naked_identifier: factor end_bracket: ) - statement_terminator: ; - statement: open_for_statement: - keyword: OPEN - naked_identifier: cv - keyword: FOR - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: salary - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: salary - binary_operator: '*' - column_reference: naked_identifier: factor from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: WHERE expression: column_reference: naked_identifier: job_id keyword: LIKE quoted_literal: "'AD_%'" - statement_terminator: ; - statement: loop_statement: - keyword: LOOP - statement: fetch_statement: keyword: FETCH naked_identifier: cv into_clause: - keyword: INTO - naked_identifier: sal - comma: ',' - naked_identifier: sal_multiple - statement_terminator: ; - statement: exit_statement: - keyword: EXIT - keyword: WHEN - expression: naked_identifier: cv binary_operator: '%' keyword: NOTFOUND - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'sal = '" binary_operator: - pipe: '|' - pipe: '|' column_reference: naked_identifier: sal end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'sal_multiple = '" binary_operator: - pipe: '|' - pipe: '|' column_reference: naked_identifier: sal_multiple end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - statement: close_statement: keyword: CLOSE naked_identifier: cv - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: - keyword: DECLARE - naked_identifier: v1 - data_type: naked_identifier: pkg dot: . data_type_identifier: mytab - statement_terminator: ; - naked_identifier: v2 - data_type: naked_identifier: pkg dot: . data_type_identifier: rec - statement_terminator: ; - naked_identifier: c1 - data_type: data_type_identifier: SYS_REFCURSOR - statement_terminator: ; - keyword: BEGIN - statement: function: function_name: function_name_identifier: v1 function_contents: bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) - statement: assignment_segment_statement: dot: . object_reference: naked_identifier: f1 colon: ':' comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '1' - statement_terminator: ; - statement: function: function_name: function_name_identifier: v1 function_contents: bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) - statement: assignment_segment_statement: dot: . object_reference: naked_identifier: f2 colon: ':' comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'one'" - statement_terminator: ; - statement: open_for_statement: - keyword: OPEN - naked_identifier: c1 - keyword: FOR - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: TABLE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: v1 end_bracket: ) - statement_terminator: ; - statement: fetch_statement: keyword: FETCH naked_identifier: c1 into_clause: keyword: INTO naked_identifier: v2 - statement_terminator: ; - statement: close_statement: keyword: CLOSE naked_identifier: c1 - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - quoted_literal: "'Values in record are '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: - naked_identifier: v2 - dot: . - naked_identifier: f1 - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "' and '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: - naked_identifier: v2 - dot: . - naked_identifier: f2 end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: - keyword: DECLARE - cursor_variable: - keyword: CURSOR - naked_identifier: c1 - keyword: RETURN - row_type_reference: table_reference: naked_identifier: departments binary_operator: '%' keyword: ROWTYPE - statement_terminator: ; - cursor_variable: - keyword: CURSOR - naked_identifier: c2 - keyword: IS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: employee_id - comma: ',' - select_clause_element: column_reference: naked_identifier: job_id - comma: ',' - select_clause_element: column_reference: naked_identifier: salary from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: WHERE expression: column_reference: naked_identifier: salary comparison_operator: raw_comparison_operator: '>' numeric_literal: '2000' - statement_terminator: ; - cursor_variable: - keyword: CURSOR - naked_identifier: c1 - keyword: RETURN - row_type_reference: table_reference: naked_identifier: departments binary_operator: '%' keyword: ROWTYPE - keyword: IS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: departments where_clause: keyword: WHERE expression: column_reference: naked_identifier: department_id comparison_operator: raw_comparison_operator: '=' numeric_literal: '110' - statement_terminator: ; - cursor_variable: - keyword: CURSOR - naked_identifier: c3 - keyword: RETURN - row_type_reference: table_reference: naked_identifier: locations binary_operator: '%' keyword: ROWTYPE - statement_terminator: ; - cursor_variable: - keyword: CURSOR - naked_identifier: c3 - keyword: IS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: locations where_clause: keyword: WHERE expression: column_reference: naked_identifier: country_id comparison_operator: raw_comparison_operator: '=' quoted_literal: "'JP'" - statement_terminator: ; - keyword: BEGIN - statement: null_statement: keyword: 'NULL' - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: - keyword: DECLARE - naked_identifier: sal - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: salary binary_operator: '%' keyword: TYPE - statement_terminator: ; - naked_identifier: sal_multiple - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: salary binary_operator: '%' keyword: TYPE - statement_terminator: ; - naked_identifier: factor - data_type: data_type_identifier: INTEGER - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: numeric_literal: '2' - statement_terminator: ; - cursor_variable: - keyword: CURSOR - naked_identifier: c1 - keyword: IS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: salary - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: salary - binary_operator: '*' - column_reference: naked_identifier: factor from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: WHERE expression: column_reference: naked_identifier: job_id keyword: LIKE quoted_literal: "'AD_%'" - statement_terminator: ; - keyword: BEGIN - statement: open_statement: keyword: OPEN naked_identifier: c1 - statement_terminator: ; - statement: loop_statement: - keyword: LOOP - statement: fetch_statement: keyword: FETCH naked_identifier: c1 into_clause: - keyword: INTO - naked_identifier: sal - comma: ',' - naked_identifier: sal_multiple - statement_terminator: ; - statement: exit_statement: - keyword: EXIT - keyword: WHEN - expression: naked_identifier: c1 binary_operator: '%' keyword: NOTFOUND - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'factor = '" binary_operator: - pipe: '|' - pipe: '|' column_reference: naked_identifier: factor end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'sal = '" binary_operator: - pipe: '|' - pipe: '|' column_reference: naked_identifier: sal end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'sal_multiple = '" binary_operator: - pipe: '|' - pipe: '|' column_reference: naked_identifier: sal_multiple end_bracket: ) - statement_terminator: ; - statement: assignment_segment_statement: object_reference: naked_identifier: factor colon: ':' comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: factor binary_operator: + numeric_literal: '1' - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - statement: close_statement: keyword: CLOSE naked_identifier: c1 - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: - keyword: DECLARE - naked_identifier: sal - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: salary binary_operator: '%' keyword: TYPE - statement_terminator: ; - naked_identifier: sal_multiple - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: salary binary_operator: '%' keyword: TYPE - statement_terminator: ; - naked_identifier: factor - data_type: data_type_identifier: INTEGER - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: numeric_literal: '2' - statement_terminator: ; - cursor_variable: - keyword: CURSOR - naked_identifier: c1 - keyword: IS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: salary - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: salary - binary_operator: '*' - column_reference: naked_identifier: factor from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: WHERE expression: column_reference: naked_identifier: job_id keyword: LIKE quoted_literal: "'AD_%'" - statement_terminator: ; - keyword: BEGIN - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'factor = '" binary_operator: - pipe: '|' - pipe: '|' column_reference: naked_identifier: factor end_bracket: ) - statement_terminator: ; - statement: open_statement: keyword: OPEN naked_identifier: c1 - statement_terminator: ; - statement: loop_statement: - keyword: LOOP - statement: fetch_statement: keyword: FETCH naked_identifier: c1 into_clause: - keyword: INTO - naked_identifier: sal - comma: ',' - naked_identifier: sal_multiple - statement_terminator: ; - statement: exit_statement: - keyword: EXIT - keyword: WHEN - expression: naked_identifier: c1 binary_operator: '%' keyword: NOTFOUND - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'sal = '" binary_operator: - pipe: '|' - pipe: '|' column_reference: naked_identifier: sal end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'sal_multiple = '" binary_operator: - pipe: '|' - pipe: '|' column_reference: naked_identifier: sal_multiple end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - statement: close_statement: keyword: CLOSE naked_identifier: c1 - statement_terminator: ; - statement: assignment_segment_statement: object_reference: naked_identifier: factor colon: ':' comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: factor binary_operator: + numeric_literal: '1' - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'factor = '" binary_operator: - pipe: '|' - pipe: '|' column_reference: naked_identifier: factor end_bracket: ) - statement_terminator: ; - statement: open_statement: keyword: OPEN naked_identifier: c1 - statement_terminator: ; - statement: loop_statement: - keyword: LOOP - statement: fetch_statement: keyword: FETCH naked_identifier: c1 into_clause: - keyword: INTO - naked_identifier: sal - comma: ',' - naked_identifier: sal_multiple - statement_terminator: ; - statement: exit_statement: - keyword: EXIT - keyword: WHEN - expression: naked_identifier: c1 binary_operator: '%' keyword: NOTFOUND - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'sal = '" binary_operator: - pipe: '|' - pipe: '|' column_reference: naked_identifier: sal end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'sal_multiple = '" binary_operator: - pipe: '|' - pipe: '|' column_reference: naked_identifier: sal_multiple end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - statement: close_statement: keyword: CLOSE naked_identifier: c1 - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: keyword: DECLARE cursor_variable: - keyword: CURSOR - naked_identifier: c1 - keyword: IS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: employee_id - comma: ',' - select_clause_element: expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: salary binary_operator: '*' numeric_literal: '0.05' end_bracket: ) alias_expression: naked_identifier: raisee from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: WHERE expression: column_reference: naked_identifier: job_id keyword: LIKE quoted_literal: "'%_MAN'" orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: employee_id - statement_terminator: ; naked_identifier: emp_rec row_type_reference: table_reference: naked_identifier: c1 binary_operator: '%' keyword: ROWTYPE statement_terminator: ; - keyword: BEGIN - statement: open_statement: keyword: OPEN naked_identifier: c1 - statement_terminator: ; - statement: loop_statement: - keyword: LOOP - statement: fetch_statement: keyword: FETCH naked_identifier: c1 into_clause: keyword: INTO naked_identifier: emp_rec - statement_terminator: ; - statement: exit_statement: - keyword: EXIT - keyword: WHEN - expression: naked_identifier: c1 binary_operator: '%' keyword: NOTFOUND - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - quoted_literal: "'Raise for employee #'" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: - naked_identifier: emp_rec - dot: . - naked_identifier: employee_id - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "' is $'" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: - naked_identifier: emp_rec - dot: . - naked_identifier: raisee end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - statement: close_statement: keyword: CLOSE naked_identifier: c1 - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: keyword: DECLARE cursor_variable: - keyword: CURSOR - naked_identifier: c - function_parameter_list: bracketed: - start_bracket: ( - parameter: job - data_type: data_type_identifier: VARCHAR2 - comma: ',' - parameter: max_sal - data_type: data_type_identifier: NUMBER - end_bracket: ) - keyword: IS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: last_name - comma: ',' - select_clause_element: column_reference: naked_identifier: first_name - comma: ',' - select_clause_element: expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: salary - binary_operator: '-' - column_reference: naked_identifier: max_sal end_bracket: ) alias_expression: naked_identifier: overpayment from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: WHERE expression: - column_reference: naked_identifier: job_id - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: job - binary_operator: AND - column_reference: naked_identifier: salary - comparison_operator: raw_comparison_operator: '>' - column_reference: naked_identifier: max_sal orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: salary - statement_terminator: ; create_procedure_statement: - keyword: PROCEDURE - function_name: function_name_identifier: print_overpaid - keyword: IS - declare_segment: - naked_identifier: last_name_ - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: last_name binary_operator: '%' keyword: TYPE - statement_terminator: ; - naked_identifier: first_name_ - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: first_name binary_operator: '%' keyword: TYPE - statement_terminator: ; - naked_identifier: overpayment_ - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: salary binary_operator: '%' keyword: TYPE - statement_terminator: ; - begin_end_block: - keyword: BEGIN - statement: loop_statement: - keyword: LOOP - statement: fetch_statement: keyword: FETCH naked_identifier: c into_clause: - keyword: INTO - naked_identifier: last_name_ - comma: ',' - naked_identifier: first_name_ - comma: ',' - naked_identifier: overpayment_ - statement_terminator: ; - statement: exit_statement: - keyword: EXIT - keyword: WHEN - expression: naked_identifier: c binary_operator: '%' keyword: NOTFOUND - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: last_name_ - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "', '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: naked_identifier: first_name_ - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "' (by '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: naked_identifier: overpayment_ - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "')'" end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - keyword: END - object_reference: naked_identifier: print_overpaid - statement_terminator: ; - keyword: BEGIN - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'----------------------'" end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Overpaid Stock Clerks:'" end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'----------------------'" end_bracket: ) - statement_terminator: ; - statement: open_statement: keyword: OPEN naked_identifier: c function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'ST_CLERK'" - comma: ',' - expression: numeric_literal: '5000' - end_bracket: ) - statement_terminator: ; - statement: function: function_name: function_name_identifier: print_overpaid function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: close_statement: keyword: CLOSE naked_identifier: c - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'-------------------------------'" end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Overpaid Sales Representatives:'" end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'-------------------------------'" end_bracket: ) - statement_terminator: ; - statement: open_statement: keyword: OPEN naked_identifier: c function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'SA_REP'" - comma: ',' - expression: numeric_literal: '10000' - end_bracket: ) - statement_terminator: ; - statement: function: function_name: function_name_identifier: print_overpaid function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: close_statement: keyword: CLOSE naked_identifier: c - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: keyword: DECLARE cursor_variable: - keyword: CURSOR - naked_identifier: c - function_parameter_list: bracketed: start_bracket: ( parameter: location data_type: data_type_identifier: NUMBER keyword: DEFAULT expression: numeric_literal: '1700' end_bracket: ) - keyword: IS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: d - dot: . - naked_identifier: department_name - comma: ',' - select_clause_element: column_reference: - naked_identifier: e - dot: . - naked_identifier: last_name alias_expression: naked_identifier: manager - comma: ',' - select_clause_element: column_reference: - naked_identifier: l - dot: . - naked_identifier: city from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: departments alias_expression: naked_identifier: d - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees alias_expression: naked_identifier: e - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: locations alias_expression: naked_identifier: l where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: l - dot: . - naked_identifier: location_id - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: location - binary_operator: AND - column_reference: - naked_identifier: l - dot: . - naked_identifier: location_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: d - dot: . - naked_identifier: location_id - binary_operator: AND - column_reference: - naked_identifier: d - dot: . - naked_identifier: department_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: e - dot: . - naked_identifier: department_id orderby_clause: - keyword: ORDER - keyword: BY - column_reference: - naked_identifier: d - dot: . - naked_identifier: department_id - statement_terminator: ; create_procedure_statement: - keyword: PROCEDURE - function_name: function_name_identifier: print_depts - keyword: IS - declare_segment: - naked_identifier: dept_name - column_type_reference: column_reference: - naked_identifier: departments - dot: . - naked_identifier: department_name binary_operator: '%' keyword: TYPE - statement_terminator: ; - naked_identifier: mgr_name - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: last_name binary_operator: '%' keyword: TYPE - statement_terminator: ; - naked_identifier: city_name - column_type_reference: column_reference: - naked_identifier: locations - dot: . - naked_identifier: city binary_operator: '%' keyword: TYPE - statement_terminator: ; - begin_end_block: - keyword: BEGIN - statement: loop_statement: - keyword: LOOP - statement: fetch_statement: keyword: FETCH naked_identifier: c into_clause: - keyword: INTO - naked_identifier: dept_name - comma: ',' - naked_identifier: mgr_name - comma: ',' - naked_identifier: city_name - statement_terminator: ; - statement: exit_statement: - keyword: EXIT - keyword: WHEN - expression: naked_identifier: c binary_operator: '%' keyword: NOTFOUND - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: dept_name - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "' (Manager: '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: naked_identifier: mgr_name - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "')'" end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - keyword: END - object_reference: naked_identifier: print_depts - statement_terminator: ; - keyword: BEGIN - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'DEPARTMENTS AT HEADQUARTERS:'" end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'--------------------------------'" end_bracket: ) - statement_terminator: ; - statement: open_statement: keyword: OPEN naked_identifier: c - statement_terminator: ; - statement: function: function_name: function_name_identifier: print_depts function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'--------------------------------'" end_bracket: ) - statement_terminator: ; - statement: close_statement: keyword: CLOSE naked_identifier: c - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'DEPARTMENTS IN CANADA:'" end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'--------------------------------'" end_bracket: ) - statement_terminator: ; - statement: open_statement: keyword: OPEN naked_identifier: c function_contents: bracketed: start_bracket: ( expression: numeric_literal: '1800' end_bracket: ) - statement_terminator: ; - statement: function: function_name: function_name_identifier: print_depts function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: close_statement: keyword: CLOSE naked_identifier: c - statement_terminator: ; - statement: open_statement: keyword: OPEN naked_identifier: c function_contents: bracketed: start_bracket: ( expression: numeric_literal: '1900' end_bracket: ) - statement_terminator: ; - statement: function: function_name: function_name_identifier: print_depts function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: close_statement: keyword: CLOSE naked_identifier: c - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: keyword: DECLARE cursor_variable: - keyword: CURSOR - naked_identifier: c - function_parameter_list: bracketed: - start_bracket: ( - parameter: job - data_type: data_type_identifier: VARCHAR2 - comma: ',' - parameter: max_sal - data_type: data_type_identifier: NUMBER - comma: ',' - parameter: hired - data_type: data_type_identifier: DATE - keyword: DEFAULT - expression: function: function_name: function_name_identifier: TO_DATE function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'31-DEC-1999'" - comma: ',' - expression: quoted_literal: "'DD-MON-YYYY'" - end_bracket: ) - end_bracket: ) - keyword: IS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: last_name - comma: ',' - select_clause_element: column_reference: naked_identifier: first_name - comma: ',' - select_clause_element: expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: salary - binary_operator: '-' - column_reference: naked_identifier: max_sal end_bracket: ) alias_expression: naked_identifier: overpayment from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: WHERE expression: - column_reference: naked_identifier: job_id - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: job - binary_operator: AND - column_reference: naked_identifier: salary - comparison_operator: raw_comparison_operator: '>' - column_reference: naked_identifier: max_sal - binary_operator: AND - column_reference: naked_identifier: hire_date - comparison_operator: raw_comparison_operator: '>' - column_reference: naked_identifier: hired orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: salary - statement_terminator: ; create_procedure_statement: - keyword: PROCEDURE - function_name: function_name_identifier: print_overpaid - keyword: IS - declare_segment: - naked_identifier: last_name_ - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: last_name binary_operator: '%' keyword: TYPE - statement_terminator: ; - naked_identifier: first_name_ - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: first_name binary_operator: '%' keyword: TYPE - statement_terminator: ; - naked_identifier: overpayment_ - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: salary binary_operator: '%' keyword: TYPE - statement_terminator: ; - begin_end_block: - keyword: BEGIN - statement: loop_statement: - keyword: LOOP - statement: fetch_statement: keyword: FETCH naked_identifier: c into_clause: - keyword: INTO - naked_identifier: last_name_ - comma: ',' - naked_identifier: first_name_ - comma: ',' - naked_identifier: overpayment_ - statement_terminator: ; - statement: exit_statement: - keyword: EXIT - keyword: WHEN - expression: naked_identifier: c binary_operator: '%' keyword: NOTFOUND - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: last_name_ - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "', '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: naked_identifier: first_name_ - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "' (by '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: naked_identifier: overpayment_ - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "')'" end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - keyword: END - object_reference: naked_identifier: print_overpaid - statement_terminator: ; - keyword: BEGIN - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'-------------------------------'" end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Overpaid Sales Representatives:'" end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'-------------------------------'" end_bracket: ) - statement_terminator: ; - statement: open_statement: keyword: OPEN naked_identifier: c function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'SA_REP'" - comma: ',' - expression: numeric_literal: '10000' - end_bracket: ) - statement_terminator: ; - statement: function: function_name: function_name_identifier: print_overpaid function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: close_statement: keyword: CLOSE naked_identifier: c - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'------------------------------------------------'" end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Overpaid Sales Representatives Hired After 2014:'" end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'------------------------------------------------'" end_bracket: ) - statement_terminator: ; - statement: open_statement: keyword: OPEN naked_identifier: c function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'SA_REP'" - comma: ',' - expression: numeric_literal: '10000' - comma: ',' - expression: function: function_name: function_name_identifier: TO_DATE function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'31-DEC-2014'" - comma: ',' - expression: quoted_literal: "'DD-MON-YYYY'" - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: function: function_name: function_name_identifier: print_overpaid function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: close_statement: keyword: CLOSE naked_identifier: c - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: keyword: DECLARE cursor_variable: - keyword: CURSOR - naked_identifier: c1 - keyword: IS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: department_id - comma: ',' - select_clause_element: column_reference: naked_identifier: department_name - comma: ',' - select_clause_element: column_reference: naked_identifier: staff from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: departments alias_expression: naked_identifier: t1 - comma: ',' - from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: department_id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: COUNT function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: staff from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: department_id end_bracket: ) alias_expression: naked_identifier: t2 where_clause: keyword: WHERE expression: bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: department_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: department_id end_bracket: ) binary_operator: AND column_reference: naked_identifier: staff comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' numeric_literal: '5' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: staff - statement_terminator: ; - keyword: BEGIN - statement: for_loop_statement: - keyword: FOR - naked_identifier: dept - keyword: IN - expression: column_reference: naked_identifier: c1 - loop_statement: - keyword: LOOP - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - quoted_literal: "'Department = '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: - naked_identifier: dept - dot: . - naked_identifier: department_name - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "', staff = '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: - naked_identifier: dept - dot: . - naked_identifier: staff end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: keyword: DECLARE cursor_variable: - keyword: CURSOR - naked_identifier: c1 - keyword: IS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: department_id - comma: ',' - select_clause_element: column_reference: naked_identifier: last_name - comma: ',' - select_clause_element: column_reference: naked_identifier: salary from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees alias_expression: naked_identifier: t where_clause: keyword: WHERE expression: column_reference: naked_identifier: salary comparison_operator: raw_comparison_operator: '>' bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: AVG function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: salary end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: department_id - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: department_id end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: department_id - comma: ',' - column_reference: naked_identifier: last_name - statement_terminator: ; - keyword: BEGIN - statement: for_loop_statement: - keyword: FOR - naked_identifier: person - keyword: IN - expression: column_reference: naked_identifier: c1 - loop_statement: - keyword: LOOP - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Making above-average salary = '" binary_operator: - pipe: '|' - pipe: '|' column_reference: - naked_identifier: person - dot: . - naked_identifier: last_name end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: - keyword: DECLARE - ref_cursor_type: - keyword: TYPE - naked_identifier: emp_cur_typ - keyword: IS - keyword: REF - keyword: CURSOR - statement_terminator: ; - naked_identifier: emp_cur - data_type: data_type_identifier: emp_cur_typ - statement_terminator: ; - naked_identifier: dept_name - column_type_reference: column_reference: - naked_identifier: departments - dot: . - naked_identifier: department_name binary_operator: '%' keyword: TYPE - statement_terminator: ; - naked_identifier: emp_name - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: last_name binary_operator: '%' keyword: TYPE - statement_terminator: ; - cursor_variable: - keyword: CURSOR - naked_identifier: c1 - keyword: IS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: department_name - comma: ',' - select_clause_element: function: function_name: function_name_identifier: CURSOR function_contents: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: e - dot: . - naked_identifier: last_name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees alias_expression: naked_identifier: e where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: e - dot: . - naked_identifier: department_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: d - dot: . - naked_identifier: department_id orderby_clause: - keyword: ORDER - keyword: BY - column_reference: - naked_identifier: e - dot: . - naked_identifier: last_name end_bracket: ) alias_expression: naked_identifier: employees from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: departments alias_expression: naked_identifier: d where_clause: keyword: WHERE expression: column_reference: naked_identifier: department_name keyword: LIKE quoted_literal: "'A%'" orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: department_name - statement_terminator: ; - keyword: BEGIN - statement: open_statement: keyword: OPEN naked_identifier: c1 - statement_terminator: ; - statement: loop_statement: - keyword: LOOP - statement: fetch_statement: keyword: FETCH naked_identifier: c1 into_clause: - keyword: INTO - naked_identifier: dept_name - comma: ',' - naked_identifier: emp_cur - statement_terminator: ; - statement: exit_statement: - keyword: EXIT - keyword: WHEN - expression: naked_identifier: c1 binary_operator: '%' keyword: NOTFOUND - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Department: '" binary_operator: - pipe: '|' - pipe: '|' column_reference: naked_identifier: dept_name end_bracket: ) - statement_terminator: ; - statement: loop_statement: - keyword: LOOP - statement: fetch_statement: keyword: FETCH naked_identifier: emp_cur into_clause: keyword: INTO naked_identifier: emp_name - statement_terminator: ; - statement: exit_statement: - keyword: EXIT - keyword: WHEN - expression: naked_identifier: emp_cur binary_operator: '%' keyword: NOTFOUND - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'-- Employee: '" binary_operator: - pipe: '|' - pipe: '|' column_reference: naked_identifier: emp_name end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - statement: close_statement: keyword: CLOSE naked_identifier: c1 - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: keyword: DECLARE cursor_variable: - keyword: CURSOR - naked_identifier: c1 - keyword: IS - select_statement: - select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: emp - keyword: FOR - keyword: UPDATE - keyword: OF - table_reference: naked_identifier: salary - orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: employee_id - statement_terminator: ; naked_identifier: emp_rec row_type_reference: table_reference: naked_identifier: emp binary_operator: '%' keyword: ROWTYPE statement_terminator: ; - keyword: BEGIN - statement: open_statement: keyword: OPEN naked_identifier: c1 - statement_terminator: ; - statement: loop_statement: - keyword: LOOP - statement: fetch_statement: keyword: FETCH naked_identifier: c1 into_clause: keyword: INTO naked_identifier: emp_rec - statement_terminator: ; - statement: exit_statement: - keyword: EXIT - keyword: WHEN - expression: naked_identifier: c1 binary_operator: '%' keyword: NOTFOUND - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'emp_rec.employee_id = '" binary_operator: - pipe: '|' - pipe: '|' function: function_name: function_name_identifier: TO_CHAR function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: emp_rec - dot: . - naked_identifier: employee_id end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: emp set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: salary comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: salary binary_operator: '*' numeric_literal: '1.05' where_clause: keyword: WHERE expression: column_reference: naked_identifier: employee_id comparison_operator: raw_comparison_operator: '=' numeric_literal: '105' - statement_terminator: ; - statement: transaction_statement: keyword: COMMIT - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / sqlfluff-3.4.2/test/fixtures/dialects/oracle/database_link.sql000066400000000000000000000002521503426445100245340ustar00rootroot00000000000000select * from foo@bar where 1 = 1; select baz.name from foo@bar baz where 1 = 1; select function_a@orcl() from dual; select pkg_test.function_a@orcl(1) from dual; sqlfluff-3.4.2/test/fixtures/dialects/oracle/database_link.yml000066400000000000000000000062631503426445100245460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5d58dab8c810a69405689c1ac6eb6f0416b3389c1901c5a08601b0f41ed0d2d4 file: - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: foo - at_sign: '@' - naked_identifier: bar where_clause: keyword: where expression: - numeric_literal: '1' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: - naked_identifier: baz - dot: . - naked_identifier: name from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: foo - at_sign: '@' - naked_identifier: bar alias_expression: naked_identifier: baz where_clause: keyword: where expression: - numeric_literal: '1' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: - function_name_identifier: function_a - at_sign: '@' - function_name_identifier: orcl function_contents: bracketed: start_bracket: ( end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: - naked_identifier: pkg_test - dot: . - function_name_identifier: function_a - at_sign: '@' - function_name_identifier: orcl function_contents: bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/oracle/drop_package.sql000066400000000000000000000000271503426445100243720ustar00rootroot00000000000000DROP PACKAGE emp_mgmt; sqlfluff-3.4.2/test/fixtures/dialects/oracle/drop_package.yml000066400000000000000000000010641503426445100243760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ad95a8c4c861de157b9cb1474c0e529c48b47f70c9cd28001f56c0c54f9f33f3 file: statement: drop_package_statement: - keyword: DROP - keyword: PACKAGE - package_reference: naked_identifier: emp_mgmt statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/oracle/drop_procedure.sql000066400000000000000000000000361503426445100247670ustar00rootroot00000000000000DROP PROCEDURE hr.remove_emp; sqlfluff-3.4.2/test/fixtures/dialects/oracle/drop_procedure.yml000066400000000000000000000011521503426445100247710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 11d2b406537784262f8d1ff4dcf1f7d7ca200480a0f958c6387e9d990db0a493 file: statement: drop_procedure_statement: - keyword: DROP - keyword: PROCEDURE - function_name: naked_identifier: hr dot: . function_name_identifier: remove_emp statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/oracle/drop_table.sql000066400000000000000000000001601503426445100240640ustar00rootroot00000000000000DROP TABLE foo.bar CASCADE CONSTRAINTS PURGE; DROP TABLE foo.bar CASCADE CONSTRAINTS; DROP TABLE foo.bar PURGE; sqlfluff-3.4.2/test/fixtures/dialects/oracle/drop_table.yml000066400000000000000000000021741503426445100240750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: db58b5ebc6efad7ee53b2673aefe4004241b0b3f282079aa6e02b48745243b16 file: - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: - naked_identifier: foo - dot: . - naked_identifier: bar - keyword: CASCADE - keyword: CONSTRAINTS - keyword: PURGE - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: - naked_identifier: foo - dot: . - naked_identifier: bar - keyword: CASCADE - keyword: CONSTRAINTS - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: - naked_identifier: foo - dot: . - naked_identifier: bar - keyword: PURGE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/oracle/drop_type.sql000066400000000000000000000000771503426445100237650ustar00rootroot00000000000000DROP TYPE IF EXISTS person_t FORCE; DROP TYPE BODY data_typ1; sqlfluff-3.4.2/test/fixtures/dialects/oracle/drop_type.yml000066400000000000000000000014421503426445100237640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 12cc8c15b864c7b5b34bc9f57d9be238033e44de56a382c618f704c9d757d1cb file: - statement: drop_type_statement: - keyword: DROP - keyword: TYPE - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: person_t - keyword: FORCE - statement_terminator: ; - statement: drop_type_statement: - keyword: DROP - keyword: TYPE - keyword: BODY - object_reference: naked_identifier: data_typ1 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/oracle/exit.sql000066400000000000000000000010671503426445100227310ustar00rootroot00000000000000DECLARE x NUMBER := 0; BEGIN LOOP DBMS_OUTPUT.PUT_LINE ('Inside loop: x = ' || TO_CHAR(x)); x := x + 1; IF x > 3 THEN EXIT; END IF; END LOOP; -- After EXIT, control resumes here DBMS_OUTPUT.PUT_LINE(' After loop: x = ' || TO_CHAR(x)); END; / DECLARE x NUMBER := 0; BEGIN LOOP DBMS_OUTPUT.PUT_LINE('Inside loop: x = ' || TO_CHAR(x)); x := x + 1; -- prevents infinite loop EXIT WHEN x > 3; END LOOP; -- After EXIT statement, control resumes here DBMS_OUTPUT.PUT_LINE('After loop: x = ' || TO_CHAR(x)); END; / sqlfluff-3.4.2/test/fixtures/dialects/oracle/exit.yml000066400000000000000000000155271503426445100227410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 02085cc6a1f1b2aeee4114071918bab0021460d3989d0fefad474eb4d15b72d4 file: - statement: begin_end_block: - declare_segment: keyword: DECLARE naked_identifier: x data_type: data_type_identifier: NUMBER colon: ':' comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '0' statement_terminator: ; - keyword: BEGIN - statement: loop_statement: - keyword: LOOP - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Inside loop: x = '" binary_operator: - pipe: '|' - pipe: '|' function: function_name: function_name_identifier: TO_CHAR function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: assignment_segment_statement: object_reference: naked_identifier: x colon: ':' comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: x binary_operator: + numeric_literal: '1' - statement_terminator: ; - statement: if_then_statement: - if_clause: - keyword: IF - expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: '>' numeric_literal: '3' - keyword: THEN - statement: exit_statement: keyword: EXIT - statement_terminator: ; - keyword: END - keyword: IF - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "' After loop: x = '" binary_operator: - pipe: '|' - pipe: '|' function: function_name: function_name_identifier: TO_CHAR function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x end_bracket: ) end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: keyword: DECLARE naked_identifier: x data_type: data_type_identifier: NUMBER colon: ':' comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '0' statement_terminator: ; - keyword: BEGIN - statement: loop_statement: - keyword: LOOP - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Inside loop: x = '" binary_operator: - pipe: '|' - pipe: '|' function: function_name: function_name_identifier: TO_CHAR function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: assignment_segment_statement: object_reference: naked_identifier: x colon: ':' comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: x binary_operator: + numeric_literal: '1' - statement_terminator: ; - statement: exit_statement: - keyword: EXIT - keyword: WHEN - expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: '>' numeric_literal: '3' - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'After loop: x = '" binary_operator: - pipe: '|' - pipe: '|' function: function_name: function_name_identifier: TO_CHAR function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x end_bracket: ) end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / sqlfluff-3.4.2/test/fixtures/dialects/oracle/fetch.sql000066400000000000000000000177521503426445100230610ustar00rootroot00000000000000DECLARE TYPE EmpRecTyp IS RECORD ( emp_id employees.employee_id%TYPE, salary employees.salary%TYPE ); CURSOR desc_salary RETURN EmpRecTyp IS SELECT employee_id, salary FROM employees ORDER BY salary DESC; highest_paid_emp EmpRecTyp; next_highest_paid_emp EmpRecTyp; FUNCTION nth_highest_salary (n INTEGER) RETURN EmpRecTyp IS emp_rec EmpRecTyp; BEGIN OPEN desc_salary; FOR i IN 1..n LOOP FETCH desc_salary INTO emp_rec; END LOOP; CLOSE desc_salary; RETURN emp_rec; END nth_highest_salary; BEGIN highest_paid_emp := nth_highest_salary(1); next_highest_paid_emp := nth_highest_salary(2); DBMS_OUTPUT.PUT_LINE( 'Highest Paid: #' || highest_paid_emp.emp_id || ', $' || highest_paid_emp.salary ); DBMS_OUTPUT.PUT_LINE( 'Next Highest Paid: #' || next_highest_paid_emp.emp_id || ', $' || next_highest_paid_emp.salary ); END; / DECLARE CURSOR c1 IS SELECT last_name, job_id FROM employees WHERE REGEXP_LIKE (job_id, 'S[HT]_CLERK') ORDER BY last_name; v_lastname employees.last_name%TYPE; -- variable for last_name v_jobid employees.job_id%TYPE; -- variable for job_id CURSOR c2 IS SELECT * FROM employees WHERE REGEXP_LIKE (job_id, '[ACADFIMKSA]_M[ANGR]') ORDER BY job_id; v_employees employees%ROWTYPE; -- record variable for row of table BEGIN OPEN c1; LOOP -- Fetches 2 columns into variables FETCH c1 INTO v_lastname, v_jobid; EXIT WHEN c1%NOTFOUND; DBMS_OUTPUT.PUT_LINE( RPAD(v_lastname, 25, ' ') || v_jobid ); END LOOP; CLOSE c1; DBMS_OUTPUT.PUT_LINE( '-------------------------------------' ); OPEN c2; LOOP -- Fetches entire row into the v_employees record FETCH c2 INTO v_employees; EXIT WHEN c2%NOTFOUND; DBMS_OUTPUT.PUT_LINE( RPAD(v_employees.last_name, 25, ' ') || v_employees.job_id ); END LOOP; CLOSE c2; END; / DECLARE CURSOR c IS SELECT e.job_id, j.job_title FROM employees e, jobs j WHERE e.job_id = j.job_id AND e.manager_id = 100 ORDER BY last_name; -- Record variables for rows of cursor result set: job1 c%ROWTYPE; job2 c%ROWTYPE; job3 c%ROWTYPE; job4 c%ROWTYPE; job5 c%ROWTYPE; BEGIN OPEN c; FETCH c INTO job1; -- fetches first row FETCH c INTO job2; -- fetches second row FETCH c INTO job3; -- fetches third row FETCH c INTO job4; -- fetches fourth row FETCH c INTO job5; -- fetches fifth row CLOSE c; DBMS_OUTPUT.PUT_LINE(job1.job_title || ' (' || job1.job_id || ')'); DBMS_OUTPUT.PUT_LINE(job2.job_title || ' (' || job2.job_id || ')'); DBMS_OUTPUT.PUT_LINE(job3.job_title || ' (' || job3.job_id || ')'); DBMS_OUTPUT.PUT_LINE(job4.job_title || ' (' || job4.job_id || ')'); DBMS_OUTPUT.PUT_LINE(job5.job_title || ' (' || job5.job_id || ')'); END; / DECLARE cv SYS_REFCURSOR; -- cursor variable v_lastname employees.last_name%TYPE; -- variable for last_name v_jobid employees.job_id%TYPE; -- variable for job_id query_2 VARCHAR2(200) := 'SELECT * FROM employees WHERE REGEXP_LIKE (job_id, ''[ACADFIMKSA]_M[ANGR]'') ORDER BY job_id'; v_employees employees%ROWTYPE; -- record variable row of table BEGIN OPEN cv FOR SELECT last_name, job_id FROM employees WHERE REGEXP_LIKE (job_id, 'S[HT]_CLERK') ORDER BY last_name; LOOP -- Fetches 2 columns into variables FETCH cv INTO v_lastname, v_jobid; EXIT WHEN cv%NOTFOUND; DBMS_OUTPUT.PUT_LINE( RPAD(v_lastname, 25, ' ') || v_jobid ); END LOOP; DBMS_OUTPUT.PUT_LINE( '-------------------------------------' ); OPEN cv FOR query_2; LOOP -- Fetches entire row into the v_employees record FETCH cv INTO v_employees; EXIT WHEN cv%NOTFOUND; DBMS_OUTPUT.PUT_LINE( RPAD(v_employees.last_name, 25, ' ') || v_employees.job_id ); END LOOP; CLOSE cv; END; / DECLARE TYPE empcurtyp IS REF CURSOR; TYPE namelist IS TABLE OF employees.last_name%TYPE; TYPE sallist IS TABLE OF employees.salary%TYPE; emp_cv empcurtyp; names namelist; sals sallist; BEGIN OPEN emp_cv FOR SELECT last_name, salary FROM employees WHERE job_id = 'SA_REP' ORDER BY salary DESC; FETCH emp_cv BULK COLLECT INTO names, sals; CLOSE emp_cv; -- loop through the names and sals collections FOR i IN names.FIRST .. names.LAST LOOP DBMS_OUTPUT.PUT_LINE ('Name = ' || names(i) || ', salary = ' || sals(i)); END LOOP; END; / DECLARE CURSOR c1 IS SELECT * FROM emp FOR UPDATE OF salary ORDER BY employee_id; emp_rec emp%ROWTYPE; BEGIN OPEN c1; LOOP FETCH c1 INTO emp_rec; -- fails on second iteration EXIT WHEN c1%NOTFOUND; DBMS_OUTPUT.PUT_LINE ( 'emp_rec.employee_id = ' || TO_CHAR(emp_rec.employee_id) ); UPDATE emp SET salary = salary * 1.05 WHERE employee_id = 105; COMMIT; -- releases locks END LOOP; END; / DECLARE TYPE EmpCurTyp IS REF CURSOR; v_emp_cursor EmpCurTyp; emp_record employees%ROWTYPE; v_stmt_str VARCHAR2(200); v_e_job employees.job_id%TYPE; BEGIN -- Dynamic SQL statement with placeholder: v_stmt_str := 'SELECT * FROM employees WHERE job_id = :j'; -- Open cursor & specify bind variable in USING clause: OPEN v_emp_cursor FOR v_stmt_str USING 'MANAGER'; -- Fetch rows from result set one at a time: LOOP FETCH v_emp_cursor INTO emp_record; EXIT WHEN v_emp_cursor%NOTFOUND; END LOOP; -- Close cursor: CLOSE v_emp_cursor; END; / DECLARE TYPE NameList IS TABLE OF employees.last_name%TYPE; TYPE SalList IS TABLE OF employees.salary%TYPE; CURSOR c1 IS SELECT last_name, salary FROM employees WHERE salary > 10000 ORDER BY last_name; names NameList; sals SalList; TYPE RecList IS TABLE OF c1%ROWTYPE; recs RecList; v_limit PLS_INTEGER := 10; PROCEDURE print_results IS BEGIN -- Check if collections are empty: IF names IS NULL OR names.COUNT = 0 THEN DBMS_OUTPUT.PUT_LINE('No results!'); ELSE DBMS_OUTPUT.PUT_LINE('Result: '); FOR i IN names.FIRST .. names.LAST LOOP DBMS_OUTPUT.PUT_LINE(' Employee ' || names(i) || ': $' || sals(i)); END LOOP; END IF; END; BEGIN DBMS_OUTPUT.PUT_LINE ('--- Processing all results simultaneously ---'); OPEN c1; FETCH c1 BULK COLLECT INTO names, sals; CLOSE c1; print_results(); DBMS_OUTPUT.PUT_LINE ('--- Processing ' || v_limit || ' rows at a time ---'); OPEN c1; LOOP FETCH c1 BULK COLLECT INTO names, sals LIMIT v_limit; EXIT WHEN names.COUNT = 0; print_results(); END LOOP; CLOSE c1; DBMS_OUTPUT.PUT_LINE ('--- Fetching records rather than columns ---'); OPEN c1; FETCH c1 BULK COLLECT INTO recs; FOR i IN recs.FIRST .. recs.LAST LOOP -- Now all columns from result set come from one record DBMS_OUTPUT.PUT_LINE (1); END LOOP; END; / DECLARE CURSOR c1 IS SELECT first_name, last_name, hire_date FROM employees; TYPE NameSet IS TABLE OF c1%ROWTYPE; stock_managers NameSet; -- nested table of records TYPE cursor_var_type is REF CURSOR; cv cursor_var_type; BEGIN -- Assign values to nested table of records: OPEN cv FOR SELECT first_name, last_name, hire_date FROM employees WHERE job_id = 'ST_MAN' ORDER BY hire_date; FETCH cv BULK COLLECT INTO stock_managers; CLOSE cv; -- Print nested table of records: FOR i IN stock_managers.FIRST .. stock_managers.LAST LOOP DBMS_OUTPUT.PUT_LINE (1); END LOOP;END; / DECLARE TYPE numtab IS TABLE OF NUMBER INDEX BY PLS_INTEGER; CURSOR c1 IS SELECT employee_id FROM employees WHERE department_id = 80 ORDER BY employee_id; empids numtab; BEGIN OPEN c1; LOOP -- Fetch 10 rows or fewer in each iteration FETCH c1 BULK COLLECT INTO empids LIMIT 10; DBMS_OUTPUT.PUT_LINE ('------- Results from One Bulk Fetch --------'); FOR i IN 1..empids.COUNT LOOP DBMS_OUTPUT.PUT_LINE ('Employee Id: ' || empids(i)); END LOOP; EXIT WHEN c1%NOTFOUND; END LOOP; CLOSE c1; END; / sqlfluff-3.4.2/test/fixtures/dialects/oracle/fetch.yml000066400000000000000000002070331503426445100230540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7f798e59792782b96c49cc32cef036be06ede9a4d141ae27b70da8cf030148bf file: - statement: begin_end_block: - declare_segment: - keyword: DECLARE - record_type: - keyword: TYPE - naked_identifier: EmpRecTyp - keyword: IS - keyword: RECORD - bracketed: - start_bracket: ( - naked_identifier: emp_id - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: employee_id binary_operator: '%' keyword: TYPE - comma: ',' - naked_identifier: salary - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: salary binary_operator: '%' keyword: TYPE - end_bracket: ) - statement_terminator: ; - cursor_variable: - keyword: CURSOR - naked_identifier: desc_salary - keyword: RETURN - data_type: data_type_identifier: EmpRecTyp - keyword: IS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: employee_id - comma: ',' - select_clause_element: column_reference: naked_identifier: salary from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: salary - keyword: DESC - statement_terminator: ; - naked_identifier: highest_paid_emp - data_type: data_type_identifier: EmpRecTyp - statement_terminator: ; - naked_identifier: next_highest_paid_emp - data_type: data_type_identifier: EmpRecTyp - statement_terminator: ; - create_function_statement: - keyword: FUNCTION - function_name: function_name_identifier: nth_highest_salary - function_parameter_list: bracketed: start_bracket: ( parameter: n data_type: data_type_identifier: INTEGER end_bracket: ) - keyword: RETURN - data_type: data_type_identifier: EmpRecTyp - keyword: IS - declare_segment: naked_identifier: emp_rec data_type: data_type_identifier: EmpRecTyp statement_terminator: ; - begin_end_block: - keyword: BEGIN - statement: open_statement: keyword: OPEN naked_identifier: desc_salary - statement_terminator: ; - statement: for_loop_statement: - keyword: FOR - naked_identifier: i - keyword: IN - numeric_literal: '1' - dot: . - dot: . - naked_identifier: n - loop_statement: - keyword: LOOP - statement: fetch_statement: keyword: FETCH naked_identifier: desc_salary into_clause: keyword: INTO naked_identifier: emp_rec - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - statement: close_statement: keyword: CLOSE naked_identifier: desc_salary - statement_terminator: ; - statement: return_statement: keyword: RETURN expression: column_reference: naked_identifier: emp_rec - statement_terminator: ; - keyword: END - object_reference: naked_identifier: nth_highest_salary - statement_terminator: ; - keyword: BEGIN - statement: assignment_segment_statement: object_reference: naked_identifier: highest_paid_emp colon: ':' comparison_operator: raw_comparison_operator: '=' expression: function: function_name: function_name_identifier: nth_highest_salary function_contents: bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: assignment_segment_statement: object_reference: naked_identifier: next_highest_paid_emp colon: ':' comparison_operator: raw_comparison_operator: '=' expression: function: function_name: function_name_identifier: nth_highest_salary function_contents: bracketed: start_bracket: ( expression: numeric_literal: '2' end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - quoted_literal: "'Highest Paid: #'" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: - naked_identifier: highest_paid_emp - dot: . - naked_identifier: emp_id - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "', $'" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: - naked_identifier: highest_paid_emp - dot: . - naked_identifier: salary end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - quoted_literal: "'Next Highest Paid: #'" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: - naked_identifier: next_highest_paid_emp - dot: . - naked_identifier: emp_id - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "', $'" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: - naked_identifier: next_highest_paid_emp - dot: . - naked_identifier: salary end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: - keyword: DECLARE - cursor_variable: - keyword: CURSOR - naked_identifier: c1 - keyword: IS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: last_name - comma: ',' - select_clause_element: column_reference: naked_identifier: job_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: WHERE expression: function: function_name: function_name_identifier: REGEXP_LIKE function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: job_id - comma: ',' - expression: quoted_literal: "'S[HT]_CLERK'" - end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: last_name - statement_terminator: ; - naked_identifier: v_lastname - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: last_name binary_operator: '%' keyword: TYPE - statement_terminator: ; - naked_identifier: v_jobid - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: job_id binary_operator: '%' keyword: TYPE - statement_terminator: ; - cursor_variable: - keyword: CURSOR - naked_identifier: c2 - keyword: IS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: WHERE expression: function: function_name: function_name_identifier: REGEXP_LIKE function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: job_id - comma: ',' - expression: quoted_literal: "'[ACADFIMKSA]_M[ANGR]'" - end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: job_id - statement_terminator: ; - naked_identifier: v_employees - row_type_reference: table_reference: naked_identifier: employees binary_operator: '%' keyword: ROWTYPE - statement_terminator: ; - keyword: BEGIN - statement: open_statement: keyword: OPEN naked_identifier: c1 - statement_terminator: ; - statement: loop_statement: - keyword: LOOP - statement: fetch_statement: keyword: FETCH naked_identifier: c1 into_clause: - keyword: INTO - naked_identifier: v_lastname - comma: ',' - naked_identifier: v_jobid - statement_terminator: ; - statement: exit_statement: - keyword: EXIT - keyword: WHEN - expression: naked_identifier: c1 binary_operator: '%' keyword: NOTFOUND - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: RPAD function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: v_lastname - comma: ',' - expression: numeric_literal: '25' - comma: ',' - expression: quoted_literal: "' '" - end_bracket: ) binary_operator: - pipe: '|' - pipe: '|' column_reference: naked_identifier: v_jobid end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - statement: close_statement: keyword: CLOSE naked_identifier: c1 - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'-------------------------------------'" end_bracket: ) - statement_terminator: ; - statement: open_statement: keyword: OPEN naked_identifier: c2 - statement_terminator: ; - statement: loop_statement: - keyword: LOOP - statement: fetch_statement: keyword: FETCH naked_identifier: c2 into_clause: keyword: INTO naked_identifier: v_employees - statement_terminator: ; - statement: exit_statement: - keyword: EXIT - keyword: WHEN - expression: naked_identifier: c2 binary_operator: '%' keyword: NOTFOUND - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: RPAD function_contents: bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: v_employees - dot: . - naked_identifier: last_name - comma: ',' - expression: numeric_literal: '25' - comma: ',' - expression: quoted_literal: "' '" - end_bracket: ) binary_operator: - pipe: '|' - pipe: '|' column_reference: - naked_identifier: v_employees - dot: . - naked_identifier: job_id end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - statement: close_statement: keyword: CLOSE naked_identifier: c2 - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: - keyword: DECLARE - cursor_variable: - keyword: CURSOR - naked_identifier: c - keyword: IS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: e - dot: . - naked_identifier: job_id - comma: ',' - select_clause_element: column_reference: - naked_identifier: j - dot: . - naked_identifier: job_title from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees alias_expression: naked_identifier: e - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: jobs alias_expression: naked_identifier: j where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: e - dot: . - naked_identifier: job_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: j - dot: . - naked_identifier: job_id - binary_operator: AND - column_reference: - naked_identifier: e - dot: . - naked_identifier: manager_id - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '100' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: last_name - statement_terminator: ; - naked_identifier: job1 - row_type_reference: table_reference: naked_identifier: c binary_operator: '%' keyword: ROWTYPE - statement_terminator: ; - naked_identifier: job2 - row_type_reference: table_reference: naked_identifier: c binary_operator: '%' keyword: ROWTYPE - statement_terminator: ; - naked_identifier: job3 - row_type_reference: table_reference: naked_identifier: c binary_operator: '%' keyword: ROWTYPE - statement_terminator: ; - naked_identifier: job4 - row_type_reference: table_reference: naked_identifier: c binary_operator: '%' keyword: ROWTYPE - statement_terminator: ; - naked_identifier: job5 - row_type_reference: table_reference: naked_identifier: c binary_operator: '%' keyword: ROWTYPE - statement_terminator: ; - keyword: BEGIN - statement: open_statement: keyword: OPEN naked_identifier: c - statement_terminator: ; - statement: fetch_statement: keyword: FETCH naked_identifier: c into_clause: keyword: INTO naked_identifier: job1 - statement_terminator: ; - statement: fetch_statement: keyword: FETCH naked_identifier: c into_clause: keyword: INTO naked_identifier: job2 - statement_terminator: ; - statement: fetch_statement: keyword: FETCH naked_identifier: c into_clause: keyword: INTO naked_identifier: job3 - statement_terminator: ; - statement: fetch_statement: keyword: FETCH naked_identifier: c into_clause: keyword: INTO naked_identifier: job4 - statement_terminator: ; - statement: fetch_statement: keyword: FETCH naked_identifier: c into_clause: keyword: INTO naked_identifier: job5 - statement_terminator: ; - statement: close_statement: keyword: CLOSE naked_identifier: c - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: job1 - dot: . - naked_identifier: job_title - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "' ('" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: - naked_identifier: job1 - dot: . - naked_identifier: job_id - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "')'" end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: job2 - dot: . - naked_identifier: job_title - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "' ('" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: - naked_identifier: job2 - dot: . - naked_identifier: job_id - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "')'" end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: job3 - dot: . - naked_identifier: job_title - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "' ('" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: - naked_identifier: job3 - dot: . - naked_identifier: job_id - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "')'" end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: job4 - dot: . - naked_identifier: job_title - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "' ('" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: - naked_identifier: job4 - dot: . - naked_identifier: job_id - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "')'" end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: job5 - dot: . - naked_identifier: job_title - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "' ('" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: - naked_identifier: job5 - dot: . - naked_identifier: job_id - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "')'" end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: - keyword: DECLARE - naked_identifier: cv - data_type: data_type_identifier: SYS_REFCURSOR - statement_terminator: ; - naked_identifier: v_lastname - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: last_name binary_operator: '%' keyword: TYPE - statement_terminator: ; - naked_identifier: v_jobid - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: job_id binary_operator: '%' keyword: TYPE - statement_terminator: ; - naked_identifier: query_2 - data_type: data_type_identifier: VARCHAR2 bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '200' end_bracket: ) - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: quoted_literal: "'SELECT * FROM employees\n WHERE REGEXP_LIKE (job_id,\ \ ''[ACADFIMKSA]_M[ANGR]'')\n ORDER BY job_id'" - statement_terminator: ; - naked_identifier: v_employees - row_type_reference: table_reference: naked_identifier: employees binary_operator: '%' keyword: ROWTYPE - statement_terminator: ; - keyword: BEGIN - statement: open_for_statement: - keyword: OPEN - naked_identifier: cv - keyword: FOR - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: last_name - comma: ',' - select_clause_element: column_reference: naked_identifier: job_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: WHERE expression: function: function_name: function_name_identifier: REGEXP_LIKE function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: job_id - comma: ',' - expression: quoted_literal: "'S[HT]_CLERK'" - end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: last_name - statement_terminator: ; - statement: loop_statement: - keyword: LOOP - statement: fetch_statement: keyword: FETCH naked_identifier: cv into_clause: - keyword: INTO - naked_identifier: v_lastname - comma: ',' - naked_identifier: v_jobid - statement_terminator: ; - statement: exit_statement: - keyword: EXIT - keyword: WHEN - expression: naked_identifier: cv binary_operator: '%' keyword: NOTFOUND - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: RPAD function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: v_lastname - comma: ',' - expression: numeric_literal: '25' - comma: ',' - expression: quoted_literal: "' '" - end_bracket: ) binary_operator: - pipe: '|' - pipe: '|' column_reference: naked_identifier: v_jobid end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'-------------------------------------'" end_bracket: ) - statement_terminator: ; - statement: open_for_statement: - keyword: OPEN - naked_identifier: cv - keyword: FOR - naked_identifier: query_2 - statement_terminator: ; - statement: loop_statement: - keyword: LOOP - statement: fetch_statement: keyword: FETCH naked_identifier: cv into_clause: keyword: INTO naked_identifier: v_employees - statement_terminator: ; - statement: exit_statement: - keyword: EXIT - keyword: WHEN - expression: naked_identifier: cv binary_operator: '%' keyword: NOTFOUND - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: RPAD function_contents: bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: v_employees - dot: . - naked_identifier: last_name - comma: ',' - expression: numeric_literal: '25' - comma: ',' - expression: quoted_literal: "' '" - end_bracket: ) binary_operator: - pipe: '|' - pipe: '|' column_reference: - naked_identifier: v_employees - dot: . - naked_identifier: job_id end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - statement: close_statement: keyword: CLOSE naked_identifier: cv - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: - keyword: DECLARE - ref_cursor_type: - keyword: TYPE - naked_identifier: empcurtyp - keyword: IS - keyword: REF - keyword: CURSOR - statement_terminator: ; - collection_type: - keyword: TYPE - naked_identifier: namelist - keyword: IS - keyword: TABLE - keyword: OF - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: last_name binary_operator: '%' keyword: TYPE - statement_terminator: ; - collection_type: - keyword: TYPE - naked_identifier: sallist - keyword: IS - keyword: TABLE - keyword: OF - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: salary binary_operator: '%' keyword: TYPE - statement_terminator: ; - naked_identifier: emp_cv - data_type: data_type_identifier: empcurtyp - statement_terminator: ; - naked_identifier: names - data_type: data_type_identifier: namelist - statement_terminator: ; - naked_identifier: sals - data_type: data_type_identifier: sallist - statement_terminator: ; - keyword: BEGIN - statement: open_for_statement: - keyword: OPEN - naked_identifier: emp_cv - keyword: FOR - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: last_name - comma: ',' - select_clause_element: column_reference: naked_identifier: salary from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: WHERE expression: column_reference: naked_identifier: job_id comparison_operator: raw_comparison_operator: '=' quoted_literal: "'SA_REP'" orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: salary - keyword: DESC - statement_terminator: ; - statement: fetch_statement: keyword: FETCH naked_identifier: emp_cv bulk_collect_into_clause: - keyword: BULK - keyword: COLLECT - keyword: INTO - naked_identifier: names - comma: ',' - naked_identifier: sals - statement_terminator: ; - statement: close_statement: keyword: CLOSE naked_identifier: emp_cv - statement_terminator: ; - statement: for_loop_statement: - keyword: FOR - naked_identifier: i - keyword: IN - naked_identifier: names - dot: . - naked_identifier: FIRST - dot: . - dot: . - naked_identifier: names - dot: . - naked_identifier: LAST - loop_statement: - keyword: LOOP - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - quoted_literal: "'Name = '" - binary_operator: - pipe: '|' - pipe: '|' - function: function_name: function_name_identifier: names function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: i end_bracket: ) - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "', salary = '" - binary_operator: - pipe: '|' - pipe: '|' - function: function_name: function_name_identifier: sals function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: i end_bracket: ) end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: keyword: DECLARE cursor_variable: - keyword: CURSOR - naked_identifier: c1 - keyword: IS - select_statement: - select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: emp - keyword: FOR - keyword: UPDATE - keyword: OF - table_reference: naked_identifier: salary - orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: employee_id - statement_terminator: ; naked_identifier: emp_rec row_type_reference: table_reference: naked_identifier: emp binary_operator: '%' keyword: ROWTYPE statement_terminator: ; - keyword: BEGIN - statement: open_statement: keyword: OPEN naked_identifier: c1 - statement_terminator: ; - statement: loop_statement: - keyword: LOOP - statement: fetch_statement: keyword: FETCH naked_identifier: c1 into_clause: keyword: INTO naked_identifier: emp_rec - statement_terminator: ; - statement: exit_statement: - keyword: EXIT - keyword: WHEN - expression: naked_identifier: c1 binary_operator: '%' keyword: NOTFOUND - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'emp_rec.employee_id = '" binary_operator: - pipe: '|' - pipe: '|' function: function_name: function_name_identifier: TO_CHAR function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: emp_rec - dot: . - naked_identifier: employee_id end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: emp set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: salary comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: salary binary_operator: '*' numeric_literal: '1.05' where_clause: keyword: WHERE expression: column_reference: naked_identifier: employee_id comparison_operator: raw_comparison_operator: '=' numeric_literal: '105' - statement_terminator: ; - statement: transaction_statement: keyword: COMMIT - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: - keyword: DECLARE - ref_cursor_type: - keyword: TYPE - naked_identifier: EmpCurTyp - keyword: IS - keyword: REF - keyword: CURSOR - statement_terminator: ; - naked_identifier: v_emp_cursor - data_type: data_type_identifier: EmpCurTyp - statement_terminator: ; - naked_identifier: emp_record - row_type_reference: table_reference: naked_identifier: employees binary_operator: '%' keyword: ROWTYPE - statement_terminator: ; - naked_identifier: v_stmt_str - data_type: data_type_identifier: VARCHAR2 bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '200' end_bracket: ) - statement_terminator: ; - naked_identifier: v_e_job - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: job_id binary_operator: '%' keyword: TYPE - statement_terminator: ; - keyword: BEGIN - statement: assignment_segment_statement: object_reference: naked_identifier: v_stmt_str colon: ':' comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'SELECT * FROM employees WHERE job_id = :j'" - statement_terminator: ; - statement: open_for_statement: - keyword: OPEN - naked_identifier: v_emp_cursor - keyword: FOR - naked_identifier: v_stmt_str - keyword: USING - quoted_identifier: "'MANAGER'" - statement_terminator: ; - statement: loop_statement: - keyword: LOOP - statement: fetch_statement: keyword: FETCH naked_identifier: v_emp_cursor into_clause: keyword: INTO naked_identifier: emp_record - statement_terminator: ; - statement: exit_statement: - keyword: EXIT - keyword: WHEN - expression: naked_identifier: v_emp_cursor binary_operator: '%' keyword: NOTFOUND - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - statement: close_statement: keyword: CLOSE naked_identifier: v_emp_cursor - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: - keyword: DECLARE - collection_type: - keyword: TYPE - naked_identifier: NameList - keyword: IS - keyword: TABLE - keyword: OF - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: last_name binary_operator: '%' keyword: TYPE - statement_terminator: ; - collection_type: - keyword: TYPE - naked_identifier: SalList - keyword: IS - keyword: TABLE - keyword: OF - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: salary binary_operator: '%' keyword: TYPE - statement_terminator: ; - cursor_variable: - keyword: CURSOR - naked_identifier: c1 - keyword: IS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: last_name - comma: ',' - select_clause_element: column_reference: naked_identifier: salary from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: WHERE expression: column_reference: naked_identifier: salary comparison_operator: raw_comparison_operator: '>' numeric_literal: '10000' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: last_name - statement_terminator: ; - naked_identifier: names - data_type: data_type_identifier: NameList - statement_terminator: ; - naked_identifier: sals - data_type: data_type_identifier: SalList - statement_terminator: ; - collection_type: - keyword: TYPE - naked_identifier: RecList - keyword: IS - keyword: TABLE - keyword: OF - row_type_reference: table_reference: naked_identifier: c1 binary_operator: '%' keyword: ROWTYPE - statement_terminator: ; - naked_identifier: recs - data_type: data_type_identifier: RecList - statement_terminator: ; - naked_identifier: v_limit - data_type: data_type_identifier: PLS_INTEGER - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: numeric_literal: '10' - statement_terminator: ; - create_procedure_statement: - keyword: PROCEDURE - function_name: function_name_identifier: print_results - keyword: IS - begin_end_block: - keyword: BEGIN - statement: if_then_statement: - if_clause: - keyword: IF - expression: - column_reference: naked_identifier: names - keyword: IS - null_literal: 'NULL' - binary_operator: OR - column_reference: - naked_identifier: names - dot: . - naked_identifier: COUNT - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '0' - keyword: THEN - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'No results!'" end_bracket: ) - statement_terminator: ; - keyword: ELSE - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Result: '" end_bracket: ) - statement_terminator: ; - statement: for_loop_statement: - keyword: FOR - naked_identifier: i - keyword: IN - naked_identifier: names - dot: . - naked_identifier: FIRST - dot: . - dot: . - naked_identifier: names - dot: . - naked_identifier: LAST - loop_statement: - keyword: LOOP - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - quoted_literal: "' Employee '" - binary_operator: - pipe: '|' - pipe: '|' - function: function_name: function_name_identifier: names function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: i end_bracket: ) - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "': $'" - binary_operator: - pipe: '|' - pipe: '|' - function: function_name: function_name_identifier: sals function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: i end_bracket: ) end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - keyword: END - keyword: IF - statement_terminator: ; - keyword: END - statement_terminator: ; - keyword: BEGIN - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'--- Processing all results simultaneously ---'" end_bracket: ) - statement_terminator: ; - statement: open_statement: keyword: OPEN naked_identifier: c1 - statement_terminator: ; - statement: fetch_statement: keyword: FETCH naked_identifier: c1 bulk_collect_into_clause: - keyword: BULK - keyword: COLLECT - keyword: INTO - naked_identifier: names - comma: ',' - naked_identifier: sals - statement_terminator: ; - statement: close_statement: keyword: CLOSE naked_identifier: c1 - statement_terminator: ; - statement: function: function_name: function_name_identifier: print_results function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - quoted_literal: "'--- Processing '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: naked_identifier: v_limit - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "' rows at a time ---'" end_bracket: ) - statement_terminator: ; - statement: open_statement: keyword: OPEN naked_identifier: c1 - statement_terminator: ; - statement: loop_statement: - keyword: LOOP - statement: fetch_statement: - keyword: FETCH - naked_identifier: c1 - bulk_collect_into_clause: - keyword: BULK - keyword: COLLECT - keyword: INTO - naked_identifier: names - comma: ',' - naked_identifier: sals - keyword: LIMIT - naked_identifier: v_limit - statement_terminator: ; - statement: exit_statement: - keyword: EXIT - keyword: WHEN - expression: column_reference: - naked_identifier: names - dot: . - naked_identifier: COUNT comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' - statement_terminator: ; - statement: function: function_name: function_name_identifier: print_results function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - statement: close_statement: keyword: CLOSE naked_identifier: c1 - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'--- Fetching records rather than columns ---'" end_bracket: ) - statement_terminator: ; - statement: open_statement: keyword: OPEN naked_identifier: c1 - statement_terminator: ; - statement: fetch_statement: keyword: FETCH naked_identifier: c1 bulk_collect_into_clause: - keyword: BULK - keyword: COLLECT - keyword: INTO - naked_identifier: recs - statement_terminator: ; - statement: for_loop_statement: - keyword: FOR - naked_identifier: i - keyword: IN - naked_identifier: recs - dot: . - naked_identifier: FIRST - dot: . - dot: . - naked_identifier: recs - dot: . - naked_identifier: LAST - loop_statement: - keyword: LOOP - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: - keyword: DECLARE - cursor_variable: - keyword: CURSOR - naked_identifier: c1 - keyword: IS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: first_name - comma: ',' - select_clause_element: column_reference: naked_identifier: last_name - comma: ',' - select_clause_element: column_reference: naked_identifier: hire_date from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees - statement_terminator: ; - collection_type: - keyword: TYPE - naked_identifier: NameSet - keyword: IS - keyword: TABLE - keyword: OF - row_type_reference: table_reference: naked_identifier: c1 binary_operator: '%' keyword: ROWTYPE - statement_terminator: ; - naked_identifier: stock_managers - data_type: data_type_identifier: NameSet - statement_terminator: ; - ref_cursor_type: - keyword: TYPE - naked_identifier: cursor_var_type - keyword: is - keyword: REF - keyword: CURSOR - statement_terminator: ; - naked_identifier: cv - data_type: data_type_identifier: cursor_var_type - statement_terminator: ; - keyword: BEGIN - statement: open_for_statement: - keyword: OPEN - naked_identifier: cv - keyword: FOR - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: first_name - comma: ',' - select_clause_element: column_reference: naked_identifier: last_name - comma: ',' - select_clause_element: column_reference: naked_identifier: hire_date from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: WHERE expression: column_reference: naked_identifier: job_id comparison_operator: raw_comparison_operator: '=' quoted_literal: "'ST_MAN'" orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: hire_date - statement_terminator: ; - statement: fetch_statement: keyword: FETCH naked_identifier: cv bulk_collect_into_clause: - keyword: BULK - keyword: COLLECT - keyword: INTO - naked_identifier: stock_managers - statement_terminator: ; - statement: close_statement: keyword: CLOSE naked_identifier: cv - statement_terminator: ; - statement: for_loop_statement: - keyword: FOR - naked_identifier: i - keyword: IN - naked_identifier: stock_managers - dot: . - naked_identifier: FIRST - dot: . - dot: . - naked_identifier: stock_managers - dot: . - naked_identifier: LAST - loop_statement: - keyword: LOOP - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: - keyword: DECLARE - collection_type: - keyword: TYPE - naked_identifier: numtab - keyword: IS - keyword: TABLE - keyword: OF - data_type: data_type_identifier: NUMBER - keyword: INDEX - keyword: BY - data_type: data_type_identifier: PLS_INTEGER - statement_terminator: ; - cursor_variable: - keyword: CURSOR - naked_identifier: c1 - keyword: IS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: employee_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: WHERE expression: column_reference: naked_identifier: department_id comparison_operator: raw_comparison_operator: '=' numeric_literal: '80' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: employee_id - statement_terminator: ; - naked_identifier: empids - data_type: data_type_identifier: numtab - statement_terminator: ; - keyword: BEGIN - statement: open_statement: keyword: OPEN naked_identifier: c1 - statement_terminator: ; - statement: loop_statement: - keyword: LOOP - statement: fetch_statement: - keyword: FETCH - naked_identifier: c1 - bulk_collect_into_clause: - keyword: BULK - keyword: COLLECT - keyword: INTO - naked_identifier: empids - keyword: LIMIT - numeric_literal: '10' - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'------- Results from One Bulk Fetch --------'" end_bracket: ) - statement_terminator: ; - statement: for_loop_statement: - keyword: FOR - naked_identifier: i - keyword: IN - numeric_literal: '1' - dot: . - dot: . - naked_identifier: empids - dot: . - naked_identifier: COUNT - loop_statement: - keyword: LOOP - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Employee Id: '" binary_operator: - pipe: '|' - pipe: '|' function: function_name: function_name_identifier: empids function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: i end_bracket: ) end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - statement: exit_statement: - keyword: EXIT - keyword: WHEN - expression: naked_identifier: c1 binary_operator: '%' keyword: NOTFOUND - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - statement: close_statement: keyword: CLOSE naked_identifier: c1 - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / sqlfluff-3.4.2/test/fixtures/dialects/oracle/fetch_first_row_only.sql000066400000000000000000000003711503426445100262050ustar00rootroot00000000000000select column_name from table_name fetch first row only; select column_name from table_name fetch first rows only; select column_name from table_name fetch first 2 row only; select column_name from table_name fetch first 2 rows only; sqlfluff-3.4.2/test/fixtures/dialects/oracle/fetch_first_row_only.yml000066400000000000000000000047621503426445100262170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 536341383952d140b9f33fde33d846482376e3b154d8e8fd49cbd82d2ddfa34e file: - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: column_name from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name fetch_clause: - keyword: fetch - keyword: first - keyword: row - keyword: only - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: column_name from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name fetch_clause: - keyword: fetch - keyword: first - keyword: rows - keyword: only - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: column_name from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name fetch_clause: - keyword: fetch - keyword: first - numeric_literal: '2' - keyword: row - keyword: only - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: column_name from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name fetch_clause: - keyword: fetch - keyword: first - numeric_literal: '2' - keyword: rows - keyword: only - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/oracle/for_loop.sql000066400000000000000000000044361503426445100236020ustar00rootroot00000000000000BEGIN FOR i IN 1..3 LOOP DBMS_OUTPUT.PUT_LINE (i); END LOOP; END; / BEGIN FOR i IN REVERSE 1..3 LOOP DBMS_OUTPUT.PUT_LINE (i); END LOOP; END; / BEGIN FOR power IN 1, REPEAT power*2 WHILE power <= 64 LOOP DBMS_OUTPUT.PUT_LINE(power); END LOOP; END; / BEGIN FOR power IN 2, REPEAT power*2 WHILE power <= 64 WHEN MOD(power, 32)= 0 LOOP DBMS_OUTPUT.PUT_LINE(power); END LOOP; END; / BEGIN FOR i IN 1..3 LOOP DBMS_OUTPUT.PUT_LINE ('Inside loop, i is ' || TO_CHAR(i)); END LOOP; DBMS_OUTPUT.PUT_LINE ('Outside loop, i is ' || TO_CHAR(i)); END; / DECLARE i NUMBER := 5; BEGIN FOR i IN 1..3 LOOP DBMS_OUTPUT.PUT_LINE ('Inside loop, i is ' || TO_CHAR(i)); END LOOP; DBMS_OUTPUT.PUT_LINE ('Outside loop, i is ' || TO_CHAR(i)); END; / DECLARE i NUMBER := 5; BEGIN FOR i IN 1..3 LOOP DBMS_OUTPUT.PUT_LINE ( 'local: ' || TO_CHAR(i) || ', global: ' || TO_CHAR(main.i) -- Qualify reference with block label. ); END LOOP; END main; / BEGIN FOR i IN 1..3 LOOP FOR i IN 1..3 LOOP IF outer_loop.i = 2 THEN DBMS_OUTPUT.PUT_LINE ('outer: ' || TO_CHAR(outer_loop.i) || ' inner: ' || TO_CHAR(inner_loop.i)); END IF; END LOOP inner_loop; END LOOP outer_loop; END; / DECLARE v_employees employees%ROWTYPE; CURSOR c1 is SELECT * FROM employees; BEGIN OPEN c1; -- Fetch entire row into v_employees record: FOR i IN 1..10 LOOP FETCH c1 INTO v_employees; EXIT WHEN c1%NOTFOUND; -- Process data here END LOOP; CLOSE c1; END; / DECLARE v_employees employees%ROWTYPE; CURSOR c1 is SELECT * FROM employees; BEGIN OPEN c1; -- Fetch entire row into v_employees record: FOR i IN 1..10 LOOP -- Process data here FOR j IN 1..10 LOOP FETCH c1 INTO v_employees; EXIT outer_loop WHEN c1%NOTFOUND; -- Process data here END LOOP; END LOOP outer_loop; CLOSE c1; END; / DECLARE v_employees employees%ROWTYPE; CURSOR c1 is SELECT * FROM employees; BEGIN OPEN c1; -- Fetch entire row into v_employees record: FOR i IN 1..10 LOOP -- Process data here FOR j IN 1..10 LOOP FETCH c1 INTO v_employees; CONTINUE outer_loop WHEN c1%NOTFOUND; -- Process data here END LOOP; END LOOP outer_loop; CLOSE c1; END; / sqlfluff-3.4.2/test/fixtures/dialects/oracle/for_loop.yml000066400000000000000000000611741503426445100236060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8b31abe9a78fb23d45db60eaaa3ba6cb3a5abf9840e89ff17c211068515a17b1 file: - statement: begin_end_block: - keyword: BEGIN - statement: for_loop_statement: - keyword: FOR - naked_identifier: i - keyword: IN - numeric_literal: '1' - dot: . - dot: . - numeric_literal: '3' - loop_statement: - keyword: LOOP - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: i end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - keyword: BEGIN - statement: for_loop_statement: - keyword: FOR - naked_identifier: i - keyword: IN - keyword: REVERSE - numeric_literal: '1' - dot: . - dot: . - numeric_literal: '3' - loop_statement: - keyword: LOOP - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: i end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - keyword: BEGIN - statement: for_loop_statement: - keyword: FOR - naked_identifier: power - keyword: IN - expression: numeric_literal: '1' - comma: ',' - keyword: REPEAT - expression: column_reference: naked_identifier: power binary_operator: '*' numeric_literal: '2' - keyword: WHILE - expression: column_reference: naked_identifier: power comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '64' - loop_statement: - keyword: LOOP - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: power end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - keyword: BEGIN - statement: for_loop_statement: - keyword: FOR - naked_identifier: power - keyword: IN - expression: numeric_literal: '2' - comma: ',' - keyword: REPEAT - expression: column_reference: naked_identifier: power binary_operator: '*' numeric_literal: '2' - keyword: WHILE - expression: column_reference: naked_identifier: power comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '64' - keyword: WHEN - expression: function: function_name: function_name_identifier: MOD function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: power - comma: ',' - expression: numeric_literal: '32' - end_bracket: ) comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' - loop_statement: - keyword: LOOP - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: power end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - keyword: BEGIN - statement: for_loop_statement: - keyword: FOR - naked_identifier: i - keyword: IN - numeric_literal: '1' - dot: . - dot: . - numeric_literal: '3' - loop_statement: - keyword: LOOP - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Inside loop, i is '" binary_operator: - pipe: '|' - pipe: '|' function: function_name: function_name_identifier: TO_CHAR function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: i end_bracket: ) end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Outside loop, i is '" binary_operator: - pipe: '|' - pipe: '|' function: function_name: function_name_identifier: TO_CHAR function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: i end_bracket: ) end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: keyword: DECLARE naked_identifier: i data_type: data_type_identifier: NUMBER colon: ':' comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '5' statement_terminator: ; - keyword: BEGIN - statement: for_loop_statement: - keyword: FOR - naked_identifier: i - keyword: IN - numeric_literal: '1' - dot: . - dot: . - numeric_literal: '3' - loop_statement: - keyword: LOOP - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Inside loop, i is '" binary_operator: - pipe: '|' - pipe: '|' function: function_name: function_name_identifier: TO_CHAR function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: i end_bracket: ) end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Outside loop, i is '" binary_operator: - pipe: '|' - pipe: '|' function: function_name: function_name_identifier: TO_CHAR function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: i end_bracket: ) end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: keyword: DECLARE naked_identifier: i data_type: data_type_identifier: NUMBER colon: ':' comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '5' statement_terminator: ; - keyword: BEGIN - statement: for_loop_statement: - keyword: FOR - naked_identifier: i - keyword: IN - numeric_literal: '1' - dot: . - dot: . - numeric_literal: '3' - loop_statement: - keyword: LOOP - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - quoted_literal: "'local: '" - binary_operator: - pipe: '|' - pipe: '|' - function: function_name: function_name_identifier: TO_CHAR function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: i end_bracket: ) - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "', global: '" - binary_operator: - pipe: '|' - pipe: '|' - function: function_name: function_name_identifier: TO_CHAR function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: main - dot: . - naked_identifier: i end_bracket: ) end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - keyword: END - object_reference: naked_identifier: main - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - keyword: BEGIN - statement: for_loop_statement: - keyword: FOR - naked_identifier: i - keyword: IN - numeric_literal: '1' - dot: . - dot: . - numeric_literal: '3' - loop_statement: - keyword: LOOP - statement: for_loop_statement: - keyword: FOR - naked_identifier: i - keyword: IN - numeric_literal: '1' - dot: . - dot: . - numeric_literal: '3' - loop_statement: - keyword: LOOP - statement: if_then_statement: - if_clause: - keyword: IF - expression: column_reference: - naked_identifier: outer_loop - dot: . - naked_identifier: i comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - keyword: THEN - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - quoted_literal: "'outer: '" - binary_operator: - pipe: '|' - pipe: '|' - function: function_name: function_name_identifier: TO_CHAR function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: outer_loop - dot: . - naked_identifier: i end_bracket: ) - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "' inner: '" - binary_operator: - pipe: '|' - pipe: '|' - function: function_name: function_name_identifier: TO_CHAR function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: inner_loop - dot: . - naked_identifier: i end_bracket: ) end_bracket: ) - statement_terminator: ; - keyword: END - keyword: IF - statement_terminator: ; - keyword: END - keyword: LOOP - naked_identifier: inner_loop - statement_terminator: ; - keyword: END - keyword: LOOP - naked_identifier: outer_loop - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: keyword: DECLARE naked_identifier: v_employees row_type_reference: table_reference: naked_identifier: employees binary_operator: '%' keyword: ROWTYPE statement_terminator: ; cursor_variable: - keyword: CURSOR - naked_identifier: c1 - keyword: is - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees - statement_terminator: ; - keyword: BEGIN - statement: open_statement: keyword: OPEN naked_identifier: c1 - statement_terminator: ; - statement: for_loop_statement: - keyword: FOR - naked_identifier: i - keyword: IN - numeric_literal: '1' - dot: . - dot: . - numeric_literal: '10' - loop_statement: - keyword: LOOP - statement: fetch_statement: keyword: FETCH naked_identifier: c1 into_clause: keyword: INTO naked_identifier: v_employees - statement_terminator: ; - statement: exit_statement: - keyword: EXIT - keyword: WHEN - expression: naked_identifier: c1 binary_operator: '%' keyword: NOTFOUND - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - statement: close_statement: keyword: CLOSE naked_identifier: c1 - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: keyword: DECLARE naked_identifier: v_employees row_type_reference: table_reference: naked_identifier: employees binary_operator: '%' keyword: ROWTYPE statement_terminator: ; cursor_variable: - keyword: CURSOR - naked_identifier: c1 - keyword: is - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees - statement_terminator: ; - keyword: BEGIN - statement: open_statement: keyword: OPEN naked_identifier: c1 - statement_terminator: ; - statement: for_loop_statement: - keyword: FOR - naked_identifier: i - keyword: IN - numeric_literal: '1' - dot: . - dot: . - numeric_literal: '10' - loop_statement: - keyword: LOOP - statement: for_loop_statement: - keyword: FOR - naked_identifier: j - keyword: IN - numeric_literal: '1' - dot: . - dot: . - numeric_literal: '10' - loop_statement: - keyword: LOOP - statement: fetch_statement: keyword: FETCH naked_identifier: c1 into_clause: keyword: INTO naked_identifier: v_employees - statement_terminator: ; - statement: exit_statement: - keyword: EXIT - naked_identifier: outer_loop - keyword: WHEN - expression: naked_identifier: c1 binary_operator: '%' keyword: NOTFOUND - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - keyword: END - keyword: LOOP - naked_identifier: outer_loop - statement_terminator: ; - statement: close_statement: keyword: CLOSE naked_identifier: c1 - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: keyword: DECLARE naked_identifier: v_employees row_type_reference: table_reference: naked_identifier: employees binary_operator: '%' keyword: ROWTYPE statement_terminator: ; cursor_variable: - keyword: CURSOR - naked_identifier: c1 - keyword: is - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees - statement_terminator: ; - keyword: BEGIN - statement: open_statement: keyword: OPEN naked_identifier: c1 - statement_terminator: ; - statement: for_loop_statement: - keyword: FOR - naked_identifier: i - keyword: IN - numeric_literal: '1' - dot: . - dot: . - numeric_literal: '10' - loop_statement: - keyword: LOOP - statement: for_loop_statement: - keyword: FOR - naked_identifier: j - keyword: IN - numeric_literal: '1' - dot: . - dot: . - numeric_literal: '10' - loop_statement: - keyword: LOOP - statement: fetch_statement: keyword: FETCH naked_identifier: c1 into_clause: keyword: INTO naked_identifier: v_employees - statement_terminator: ; - statement: continue_statement: - keyword: CONTINUE - naked_identifier: outer_loop - keyword: WHEN - expression: naked_identifier: c1 binary_operator: '%' keyword: NOTFOUND - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - keyword: END - keyword: LOOP - naked_identifier: outer_loop - statement_terminator: ; - statement: close_statement: keyword: CLOSE naked_identifier: c1 - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / sqlfluff-3.4.2/test/fixtures/dialects/oracle/forall.sql000066400000000000000000000041021503426445100232300ustar00rootroot00000000000000DECLARE TYPE NumList IS VARRAY(20) OF NUMBER; depts NumList := NumList(10, 30, 70); -- department numbers BEGIN FORALL i IN depts.FIRST..depts.LAST DELETE FROM employees_temp WHERE department_id = depts(i); END; / DECLARE TYPE NumTab IS TABLE OF parts1.pnum%TYPE INDEX BY PLS_INTEGER; TYPE NameTab IS TABLE OF parts1.pname%TYPE INDEX BY PLS_INTEGER; pnums NumTab; pnames NameTab; iterations CONSTANT PLS_INTEGER := 50000; t1 INTEGER; t2 INTEGER; t3 INTEGER; BEGIN FOR j IN 1..iterations LOOP -- populate collections pnums(j) := j; pnames(j) := 'Part No. ' || TO_CHAR(j); END LOOP; t1 := DBMS_UTILITY.get_time; FOR i IN 1..iterations LOOP INSERT INTO parts1 (pnum, pname) VALUES (pnums(i), pnames(i)); END LOOP; t2 := DBMS_UTILITY.get_time; FORALL i IN 1..iterations INSERT INTO parts2 (pnum, pname) VALUES (pnums(i), pnames(i)); t3 := DBMS_UTILITY.get_time; DBMS_OUTPUT.PUT_LINE('Execution Time (secs)'); DBMS_OUTPUT.PUT_LINE('---------------------'); DBMS_OUTPUT.PUT_LINE('FOR LOOP: ' || TO_CHAR((t2 - t1)/100)); DBMS_OUTPUT.PUT_LINE('FORALL: ' || TO_CHAR((t3 - t2)/100)); COMMIT; END; / DECLARE TYPE NumList IS VARRAY(10) OF NUMBER; depts NumList := NumList(5,10,20,30,50,55,57,60,70,75); BEGIN FORALL j IN 4..7 DELETE FROM employees_temp WHERE department_id = depts(j); END; / CREATE OR REPLACE PROCEDURE p AUTHID DEFINER AS TYPE NumList IS TABLE OF NUMBER; depts NumList := NumList(10, 20, 30); error_message VARCHAR2(100); BEGIN -- Populate table: INSERT INTO emp_temp (deptno, job) VALUES (10, 'Clerk'); INSERT INTO emp_temp (deptno, job) VALUES (20, 'Bookkeeper'); INSERT INTO emp_temp (deptno, job) VALUES (30, 'Analyst'); COMMIT; -- Append 9-character string to each job: FORALL j IN depts.FIRST..depts.LAST UPDATE emp_temp SET job = job || ' (Senior)' WHERE deptno = depts(j); EXCEPTION WHEN OTHERS THEN error_message := SQLERRM; DBMS_OUTPUT.PUT_LINE (error_message); COMMIT; -- Commit results of successful updates RAISE; END; / sqlfluff-3.4.2/test/fixtures/dialects/oracle/forall.yml000066400000000000000000000611711503426445100232430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c94c7ad0bc53b6dc0a0079fed3d8aca5d30676b2f370bce39cc1e2d87ae8d062 file: - statement: begin_end_block: - declare_segment: - keyword: DECLARE - collection_type: - keyword: TYPE - naked_identifier: NumList - keyword: IS - data_type: data_type_identifier: VARRAY bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '20' end_bracket: ) - keyword: OF - data_type: data_type_identifier: NUMBER - statement_terminator: ; - naked_identifier: depts - data_type: data_type_identifier: NumList - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: function: function_name: function_name_identifier: NumList function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '30' - comma: ',' - expression: numeric_literal: '70' - end_bracket: ) - statement_terminator: ; - keyword: BEGIN - statement: forall_statement: - keyword: FORALL - naked_identifier: i - keyword: IN - naked_identifier: depts - dot: . - naked_identifier: FIRST - dot: . - dot: . - naked_identifier: depts - dot: . - naked_identifier: LAST - delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees_temp where_clause: keyword: WHERE expression: column_reference: naked_identifier: department_id comparison_operator: raw_comparison_operator: '=' function: function_name: function_name_identifier: depts function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: i end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: - keyword: DECLARE - collection_type: - keyword: TYPE - naked_identifier: NumTab - keyword: IS - keyword: TABLE - keyword: OF - column_type_reference: column_reference: - naked_identifier: parts1 - dot: . - naked_identifier: pnum binary_operator: '%' keyword: TYPE - keyword: INDEX - keyword: BY - data_type: data_type_identifier: PLS_INTEGER - statement_terminator: ; - collection_type: - keyword: TYPE - naked_identifier: NameTab - keyword: IS - keyword: TABLE - keyword: OF - column_type_reference: column_reference: - naked_identifier: parts1 - dot: . - naked_identifier: pname binary_operator: '%' keyword: TYPE - keyword: INDEX - keyword: BY - data_type: data_type_identifier: PLS_INTEGER - statement_terminator: ; - naked_identifier: pnums - data_type: data_type_identifier: NumTab - statement_terminator: ; - naked_identifier: pnames - data_type: data_type_identifier: NameTab - statement_terminator: ; - naked_identifier: iterations - keyword: CONSTANT - data_type: data_type_identifier: PLS_INTEGER - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: numeric_literal: '50000' - statement_terminator: ; - naked_identifier: t1 - data_type: data_type_identifier: INTEGER - statement_terminator: ; - naked_identifier: t2 - data_type: data_type_identifier: INTEGER - statement_terminator: ; - naked_identifier: t3 - data_type: data_type_identifier: INTEGER - statement_terminator: ; - keyword: BEGIN - statement: for_loop_statement: - keyword: FOR - naked_identifier: j - keyword: IN - numeric_literal: '1' - dot: . - dot: . - naked_identifier: iterations - loop_statement: - keyword: LOOP - statement: assignment_segment_statement: object_reference: naked_identifier: pnums bracketed: start_bracket: ( object_reference: naked_identifier: j end_bracket: ) colon: ':' comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: j - statement_terminator: ; - statement: assignment_segment_statement: object_reference: naked_identifier: pnames bracketed: start_bracket: ( object_reference: naked_identifier: j end_bracket: ) colon: ':' comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'Part No. '" binary_operator: - pipe: '|' - pipe: '|' function: function_name: function_name_identifier: TO_CHAR function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: j end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - statement: assignment_segment_statement: object_reference: naked_identifier: t1 colon: ':' comparison_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: DBMS_UTILITY - dot: . - naked_identifier: get_time - statement_terminator: ; - statement: for_loop_statement: - keyword: FOR - naked_identifier: i - keyword: IN - numeric_literal: '1' - dot: . - dot: . - naked_identifier: iterations - loop_statement: - keyword: LOOP - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: parts1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: pnum - comma: ',' - column_reference: naked_identifier: pname - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: pnums function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: i end_bracket: ) - comma: ',' - expression: function: function_name: function_name_identifier: pnames function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: i end_bracket: ) - end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - statement: assignment_segment_statement: object_reference: naked_identifier: t2 colon: ':' comparison_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: DBMS_UTILITY - dot: . - naked_identifier: get_time - statement_terminator: ; - statement: forall_statement: - keyword: FORALL - naked_identifier: i - keyword: IN - numeric_literal: '1' - dot: . - dot: . - naked_identifier: iterations - insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: parts2 - bracketed: - start_bracket: ( - column_reference: naked_identifier: pnum - comma: ',' - column_reference: naked_identifier: pname - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: pnums function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: i end_bracket: ) - comma: ',' - expression: function: function_name: function_name_identifier: pnames function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: i end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: assignment_segment_statement: object_reference: naked_identifier: t3 colon: ':' comparison_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: DBMS_UTILITY - dot: . - naked_identifier: get_time - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Execution Time (secs)'" end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'---------------------'" end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'FOR LOOP: '" binary_operator: - pipe: '|' - pipe: '|' function: function_name: function_name_identifier: TO_CHAR function_contents: bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: t2 - binary_operator: '-' - column_reference: naked_identifier: t1 end_bracket: ) binary_operator: / numeric_literal: '100' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'FORALL: '" binary_operator: - pipe: '|' - pipe: '|' function: function_name: function_name_identifier: TO_CHAR function_contents: bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: t3 - binary_operator: '-' - column_reference: naked_identifier: t2 end_bracket: ) binary_operator: / numeric_literal: '100' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: transaction_statement: keyword: COMMIT - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: - keyword: DECLARE - collection_type: - keyword: TYPE - naked_identifier: NumList - keyword: IS - data_type: data_type_identifier: VARRAY bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - keyword: OF - data_type: data_type_identifier: NUMBER - statement_terminator: ; - naked_identifier: depts - data_type: data_type_identifier: NumList - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: function: function_name: function_name_identifier: NumList function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '5' - comma: ',' - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '20' - comma: ',' - expression: numeric_literal: '30' - comma: ',' - expression: numeric_literal: '50' - comma: ',' - expression: numeric_literal: '55' - comma: ',' - expression: numeric_literal: '57' - comma: ',' - expression: numeric_literal: '60' - comma: ',' - expression: numeric_literal: '70' - comma: ',' - expression: numeric_literal: '75' - end_bracket: ) - statement_terminator: ; - keyword: BEGIN - statement: forall_statement: - keyword: FORALL - naked_identifier: j - keyword: IN - numeric_literal: '4' - dot: . - dot: . - numeric_literal: '7' - delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees_temp where_clause: keyword: WHERE expression: column_reference: naked_identifier: department_id comparison_operator: raw_comparison_operator: '=' function: function_name: function_name_identifier: depts function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: j end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PROCEDURE - function_name: function_name_identifier: p - keyword: AUTHID - keyword: DEFINER - keyword: AS - declare_segment: - collection_type: - keyword: TYPE - naked_identifier: NumList - keyword: IS - keyword: TABLE - keyword: OF - data_type: data_type_identifier: NUMBER - statement_terminator: ; - naked_identifier: depts - data_type: data_type_identifier: NumList - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: function: function_name: function_name_identifier: NumList function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '20' - comma: ',' - expression: numeric_literal: '30' - end_bracket: ) - statement_terminator: ; - naked_identifier: error_message - data_type: data_type_identifier: VARCHAR2 bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '100' end_bracket: ) - statement_terminator: ; - begin_end_block: - keyword: BEGIN - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: emp_temp - bracketed: - start_bracket: ( - column_reference: naked_identifier: deptno - comma: ',' - column_reference: naked_identifier: job - end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( numeric_literal: '10' comma: ',' quoted_literal: "'Clerk'" end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: emp_temp - bracketed: - start_bracket: ( - column_reference: naked_identifier: deptno - comma: ',' - column_reference: naked_identifier: job - end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( numeric_literal: '20' comma: ',' quoted_literal: "'Bookkeeper'" end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: emp_temp - bracketed: - start_bracket: ( - column_reference: naked_identifier: deptno - comma: ',' - column_reference: naked_identifier: job - end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( numeric_literal: '30' comma: ',' quoted_literal: "'Analyst'" end_bracket: ) - statement_terminator: ; - statement: transaction_statement: keyword: COMMIT - statement_terminator: ; - statement: forall_statement: - keyword: FORALL - naked_identifier: j - keyword: IN - naked_identifier: depts - dot: . - naked_identifier: FIRST - dot: . - dot: . - naked_identifier: depts - dot: . - naked_identifier: LAST - update_statement: keyword: UPDATE table_reference: naked_identifier: emp_temp set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: job comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: job binary_operator: - pipe: '|' - pipe: '|' quoted_literal: "' (Senior)'" where_clause: keyword: WHERE expression: column_reference: naked_identifier: deptno comparison_operator: raw_comparison_operator: '=' function: function_name: function_name_identifier: depts function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: j end_bracket: ) - statement_terminator: ; - keyword: EXCEPTION - keyword: WHEN - keyword: OTHERS - keyword: THEN - statement: assignment_segment_statement: object_reference: naked_identifier: error_message colon: ':' comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: SQLERRM - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: error_message end_bracket: ) - statement_terminator: ; - statement: transaction_statement: keyword: COMMIT - statement_terminator: ; - statement: raise_statement: keyword: RAISE - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / sqlfluff-3.4.2/test/fixtures/dialects/oracle/hierarchical_queries.sql000066400000000000000000000031461503426445100261330ustar00rootroot00000000000000SELECT employee_id, last_name, manager_id FROM employees CONNECT BY PRIOR employee_id = manager_id; SELECT employee_id, last_name, manager_id, LEVEL FROM employees CONNECT BY PRIOR employee_id = manager_id; SELECT last_name, employee_id, manager_id, LEVEL FROM employees START WITH employee_id = 100 CONNECT BY PRIOR employee_id = manager_id ORDER SIBLINGS BY last_name; SELECT last_name "Employee", LEVEL, SYS_CONNECT_BY_PATH(last_name, '/') "Path" FROM employees WHERE level <= 3 AND department_id = 80 START WITH last_name = 'King' CONNECT BY PRIOR employee_id = manager_id AND LEVEL <= 4; SELECT last_name "Employee", CONNECT_BY_ISCYCLE "Cycle", LEVEL, SYS_CONNECT_BY_PATH(last_name, '/') "Path" FROM employees WHERE level <= 3 AND department_id = 80 START WITH last_name = 'King' CONNECT BY NOCYCLE PRIOR employee_id = manager_id AND LEVEL <= 4 ORDER BY "Employee", "Cycle", LEVEL, "Path"; SELECT LTRIM(SYS_CONNECT_BY_PATH (warehouse_id,','),',') FROM (SELECT ROWNUM r, warehouse_id FROM warehouses) WHERE CONNECT_BY_ISLEAF = 1 START WITH r = 1 CONNECT BY r = PRIOR r + 1 ORDER BY warehouse_id; SELECT last_name "Employee", CONNECT_BY_ROOT last_name "Manager", LEVEL-1 "Pathlen", SYS_CONNECT_BY_PATH(last_name, '/') "Path" FROM employees WHERE LEVEL > 1 and department_id = 110 CONNECT BY PRIOR employee_id = manager_id ORDER BY "Employee", "Manager", "Pathlen", "Path"; SELECT name, SUM(salary) "Total_Salary" FROM ( SELECT CONNECT_BY_ROOT last_name as name, Salary FROM employees WHERE department_id = 110 CONNECT BY PRIOR employee_id = manager_id) GROUP BY name ORDER BY name, "Total_Salary"; sqlfluff-3.4.2/test/fixtures/dialects/oracle/hierarchical_queries.yml000066400000000000000000000432451503426445100261410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5cd22054b73c6bc9aebbea851f9df614763a44922e251854c368e9ce175ede44 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: employee_id - comma: ',' - select_clause_element: column_reference: naked_identifier: last_name - comma: ',' - select_clause_element: column_reference: naked_identifier: manager_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees hierarchical_query_clause: connectby_clause: - keyword: CONNECT - keyword: BY - expression: - keyword: PRIOR - column_reference: naked_identifier: employee_id - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: manager_id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: employee_id - comma: ',' - select_clause_element: column_reference: naked_identifier: last_name - comma: ',' - select_clause_element: column_reference: naked_identifier: manager_id - comma: ',' - select_clause_element: keyword: LEVEL from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees hierarchical_query_clause: connectby_clause: - keyword: CONNECT - keyword: BY - expression: - keyword: PRIOR - column_reference: naked_identifier: employee_id - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: manager_id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: last_name - comma: ',' - select_clause_element: column_reference: naked_identifier: employee_id - comma: ',' - select_clause_element: column_reference: naked_identifier: manager_id - comma: ',' - select_clause_element: keyword: LEVEL from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees hierarchical_query_clause: startwith_clause: - keyword: START - keyword: WITH - expression: column_reference: naked_identifier: employee_id comparison_operator: raw_comparison_operator: '=' numeric_literal: '100' connectby_clause: - keyword: CONNECT - keyword: BY - expression: - keyword: PRIOR - column_reference: naked_identifier: employee_id - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: manager_id orderby_clause: - keyword: ORDER - keyword: SIBLINGS - keyword: BY - column_reference: naked_identifier: last_name - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: last_name alias_expression: quoted_identifier: '"Employee"' - comma: ',' - select_clause_element: keyword: LEVEL - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SYS_CONNECT_BY_PATH function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: last_name - comma: ',' - expression: quoted_literal: "'/'" - end_bracket: ) alias_expression: quoted_identifier: '"Path"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: WHERE expression: - keyword: level - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - numeric_literal: '3' - binary_operator: AND - column_reference: naked_identifier: department_id - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '80' hierarchical_query_clause: startwith_clause: - keyword: START - keyword: WITH - expression: column_reference: naked_identifier: last_name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'King'" connectby_clause: - keyword: CONNECT - keyword: BY - expression: - keyword: PRIOR - column_reference: naked_identifier: employee_id - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: manager_id - binary_operator: AND - keyword: LEVEL - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - numeric_literal: '4' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: last_name alias_expression: quoted_identifier: '"Employee"' - comma: ',' - select_clause_element: column_reference: naked_identifier: CONNECT_BY_ISCYCLE alias_expression: quoted_identifier: '"Cycle"' - comma: ',' - select_clause_element: keyword: LEVEL - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SYS_CONNECT_BY_PATH function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: last_name - comma: ',' - expression: quoted_literal: "'/'" - end_bracket: ) alias_expression: quoted_identifier: '"Path"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: WHERE expression: - keyword: level - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - numeric_literal: '3' - binary_operator: AND - column_reference: naked_identifier: department_id - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '80' hierarchical_query_clause: startwith_clause: - keyword: START - keyword: WITH - expression: column_reference: naked_identifier: last_name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'King'" connectby_clause: - keyword: CONNECT - keyword: BY - keyword: NOCYCLE - expression: - keyword: PRIOR - column_reference: naked_identifier: employee_id - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: manager_id - binary_operator: AND - keyword: LEVEL - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - numeric_literal: '4' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: quoted_identifier: '"Employee"' - comma: ',' - column_reference: quoted_identifier: '"Cycle"' - comma: ',' - expression: keyword: LEVEL - comma: ',' - column_reference: quoted_identifier: '"Path"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: LTRIM function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: SYS_CONNECT_BY_PATH function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: warehouse_id - comma: ',' - expression: quoted_literal: "','" - end_bracket: ) - comma: ',' - expression: quoted_literal: "','" - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: keyword: ROWNUM alias_expression: naked_identifier: r - comma: ',' - select_clause_element: column_reference: naked_identifier: warehouse_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: warehouses end_bracket: ) where_clause: keyword: WHERE expression: column_reference: naked_identifier: CONNECT_BY_ISLEAF comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' hierarchical_query_clause: startwith_clause: - keyword: START - keyword: WITH - expression: column_reference: naked_identifier: r comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' connectby_clause: - keyword: CONNECT - keyword: BY - expression: - column_reference: naked_identifier: r - comparison_operator: raw_comparison_operator: '=' - keyword: PRIOR - column_reference: naked_identifier: r - binary_operator: + - numeric_literal: '1' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: warehouse_id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: last_name alias_expression: quoted_identifier: '"Employee"' - comma: ',' - select_clause_element: keyword: CONNECT_BY_ROOT naked_identifier: last_name alias_expression: quoted_identifier: '"Manager"' - comma: ',' - select_clause_element: expression: keyword: LEVEL binary_operator: '-' numeric_literal: '1' alias_expression: quoted_identifier: '"Pathlen"' - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SYS_CONNECT_BY_PATH function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: last_name - comma: ',' - expression: quoted_literal: "'/'" - end_bracket: ) alias_expression: quoted_identifier: '"Path"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: WHERE expression: - keyword: LEVEL - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '1' - binary_operator: and - column_reference: naked_identifier: department_id - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '110' hierarchical_query_clause: connectby_clause: - keyword: CONNECT - keyword: BY - expression: - keyword: PRIOR - column_reference: naked_identifier: employee_id - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: manager_id orderby_clause: - keyword: ORDER - keyword: BY - column_reference: quoted_identifier: '"Employee"' - comma: ',' - column_reference: quoted_identifier: '"Manager"' - comma: ',' - column_reference: quoted_identifier: '"Pathlen"' - comma: ',' - column_reference: quoted_identifier: '"Path"' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: salary end_bracket: ) alias_expression: quoted_identifier: '"Total_Salary"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: keyword: CONNECT_BY_ROOT naked_identifier: last_name alias_expression: alias_operator: keyword: as naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: Salary from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: WHERE expression: column_reference: naked_identifier: department_id comparison_operator: raw_comparison_operator: '=' numeric_literal: '110' hierarchical_query_clause: connectby_clause: - keyword: CONNECT - keyword: BY - expression: - keyword: PRIOR - column_reference: naked_identifier: employee_id - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: manager_id end_bracket: ) groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: name orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: name - comma: ',' - column_reference: quoted_identifier: '"Total_Salary"' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/oracle/if.sql000066400000000000000000000035141503426445100223550ustar00rootroot00000000000000DECLARE PROCEDURE p ( sales NUMBER, quota NUMBER, emp_id NUMBER ) IS bonus NUMBER := 0; updated VARCHAR2(3) := 'No'; BEGIN IF sales > (quota + 200) THEN bonus := (sales - quota)/4; UPDATE employees SET salary = salary + bonus WHERE employee_id = emp_id; updated := 'Yes'; END IF; DBMS_OUTPUT.PUT_LINE ( 'Table updated? ' || updated || ', ' || 'bonus = ' || bonus || '.' ); END p; BEGIN p(10100, 10000, 120); p(10500, 10000, 121); END; / DECLARE PROCEDURE p ( sales NUMBER, quota NUMBER, emp_id NUMBER ) IS bonus NUMBER := 0; BEGIN IF sales > (quota + 200) THEN bonus := (sales - quota)/4; ELSE bonus := 50; END IF; DBMS_OUTPUT.PUT_LINE('bonus = ' || bonus); UPDATE employees SET salary = salary + bonus WHERE employee_id = emp_id; END p; BEGIN p(10100, 10000, 120); p(10500, 10000, 121); END; / DECLARE PROCEDURE p ( sales NUMBER, quota NUMBER, emp_id NUMBER ) IS bonus NUMBER := 0; BEGIN IF sales > (quota + 200) THEN bonus := (sales - quota)/4; ELSE IF sales > quota THEN bonus := 50; ELSE bonus := 0; END IF; END IF; DBMS_OUTPUT.PUT_LINE('bonus = ' || bonus); UPDATE employees SET salary = salary + bonus WHERE employee_id = emp_id; END p; BEGIN p(10100, 10000, 120); p(10500, 10000, 121); p(9500, 10000, 122); END; / DECLARE PROCEDURE p (sales NUMBER) IS bonus NUMBER := 0; BEGIN IF sales > 50000 THEN bonus := 1500; ELSIF sales > 35000 THEN bonus := 500; ELSE bonus := 100; END IF; DBMS_OUTPUT.PUT_LINE ( 'Sales = ' || sales || ', bonus = ' || bonus || '.' ); END p; BEGIN p(55000); p(40000); p(30000); END; / sqlfluff-3.4.2/test/fixtures/dialects/oracle/if.yml000066400000000000000000000635451503426445100223710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e67ce96f8afb40f1f0d3294f475582134acf57bc2b9c31635c2c8095a9878978 file: - statement: begin_end_block: - declare_segment: keyword: DECLARE create_procedure_statement: - keyword: PROCEDURE - function_name: function_name_identifier: p - function_parameter_list: bracketed: - start_bracket: ( - parameter: sales - data_type: data_type_identifier: NUMBER - comma: ',' - parameter: quota - data_type: data_type_identifier: NUMBER - comma: ',' - parameter: emp_id - data_type: data_type_identifier: NUMBER - end_bracket: ) - keyword: IS - declare_segment: - naked_identifier: bonus - data_type: data_type_identifier: NUMBER - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: numeric_literal: '0' - statement_terminator: ; - naked_identifier: updated - data_type: data_type_identifier: VARCHAR2 bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '3' end_bracket: ) - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: quoted_literal: "'No'" - statement_terminator: ; - begin_end_block: - keyword: BEGIN - statement: if_then_statement: - if_clause: - keyword: IF - expression: column_reference: naked_identifier: sales comparison_operator: raw_comparison_operator: '>' bracketed: start_bracket: ( expression: column_reference: naked_identifier: quota binary_operator: + numeric_literal: '200' end_bracket: ) - keyword: THEN - statement: assignment_segment_statement: object_reference: naked_identifier: bonus colon: ':' comparison_operator: raw_comparison_operator: '=' expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: sales - binary_operator: '-' - column_reference: naked_identifier: quota end_bracket: ) binary_operator: / numeric_literal: '4' - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: employees set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: salary comparison_operator: raw_comparison_operator: '=' expression: - column_reference: naked_identifier: salary - binary_operator: + - column_reference: naked_identifier: bonus where_clause: keyword: WHERE expression: - column_reference: naked_identifier: employee_id - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: emp_id - statement_terminator: ; - statement: assignment_segment_statement: object_reference: naked_identifier: updated colon: ':' comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'Yes'" - statement_terminator: ; - keyword: END - keyword: IF - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - quoted_literal: "'Table updated? '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: naked_identifier: updated - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "', '" - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "'bonus = '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: naked_identifier: bonus - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "'.'" end_bracket: ) - statement_terminator: ; - keyword: END - object_reference: naked_identifier: p - statement_terminator: ; - keyword: BEGIN - statement: function: function_name: function_name_identifier: p function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '10100' - comma: ',' - expression: numeric_literal: '10000' - comma: ',' - expression: numeric_literal: '120' - end_bracket: ) - statement_terminator: ; - statement: function: function_name: function_name_identifier: p function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '10500' - comma: ',' - expression: numeric_literal: '10000' - comma: ',' - expression: numeric_literal: '121' - end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: keyword: DECLARE create_procedure_statement: - keyword: PROCEDURE - function_name: function_name_identifier: p - function_parameter_list: bracketed: - start_bracket: ( - parameter: sales - data_type: data_type_identifier: NUMBER - comma: ',' - parameter: quota - data_type: data_type_identifier: NUMBER - comma: ',' - parameter: emp_id - data_type: data_type_identifier: NUMBER - end_bracket: ) - keyword: IS - declare_segment: naked_identifier: bonus data_type: data_type_identifier: NUMBER colon: ':' comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '0' statement_terminator: ; - begin_end_block: - keyword: BEGIN - statement: if_then_statement: - if_clause: - keyword: IF - expression: column_reference: naked_identifier: sales comparison_operator: raw_comparison_operator: '>' bracketed: start_bracket: ( expression: column_reference: naked_identifier: quota binary_operator: + numeric_literal: '200' end_bracket: ) - keyword: THEN - statement: assignment_segment_statement: object_reference: naked_identifier: bonus colon: ':' comparison_operator: raw_comparison_operator: '=' expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: sales - binary_operator: '-' - column_reference: naked_identifier: quota end_bracket: ) binary_operator: / numeric_literal: '4' - statement_terminator: ; - keyword: ELSE - statement: assignment_segment_statement: object_reference: naked_identifier: bonus colon: ':' comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '50' - statement_terminator: ; - keyword: END - keyword: IF - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'bonus = '" binary_operator: - pipe: '|' - pipe: '|' column_reference: naked_identifier: bonus end_bracket: ) - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: employees set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: salary comparison_operator: raw_comparison_operator: '=' expression: - column_reference: naked_identifier: salary - binary_operator: + - column_reference: naked_identifier: bonus where_clause: keyword: WHERE expression: - column_reference: naked_identifier: employee_id - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: emp_id - statement_terminator: ; - keyword: END - object_reference: naked_identifier: p - statement_terminator: ; - keyword: BEGIN - statement: function: function_name: function_name_identifier: p function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '10100' - comma: ',' - expression: numeric_literal: '10000' - comma: ',' - expression: numeric_literal: '120' - end_bracket: ) - statement_terminator: ; - statement: function: function_name: function_name_identifier: p function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '10500' - comma: ',' - expression: numeric_literal: '10000' - comma: ',' - expression: numeric_literal: '121' - end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: keyword: DECLARE create_procedure_statement: - keyword: PROCEDURE - function_name: function_name_identifier: p - function_parameter_list: bracketed: - start_bracket: ( - parameter: sales - data_type: data_type_identifier: NUMBER - comma: ',' - parameter: quota - data_type: data_type_identifier: NUMBER - comma: ',' - parameter: emp_id - data_type: data_type_identifier: NUMBER - end_bracket: ) - keyword: IS - declare_segment: naked_identifier: bonus data_type: data_type_identifier: NUMBER colon: ':' comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '0' statement_terminator: ; - begin_end_block: - keyword: BEGIN - statement: if_then_statement: - if_clause: - keyword: IF - expression: column_reference: naked_identifier: sales comparison_operator: raw_comparison_operator: '>' bracketed: start_bracket: ( expression: column_reference: naked_identifier: quota binary_operator: + numeric_literal: '200' end_bracket: ) - keyword: THEN - statement: assignment_segment_statement: object_reference: naked_identifier: bonus colon: ':' comparison_operator: raw_comparison_operator: '=' expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: sales - binary_operator: '-' - column_reference: naked_identifier: quota end_bracket: ) binary_operator: / numeric_literal: '4' - statement_terminator: ; - keyword: ELSE - statement: if_then_statement: - if_clause: - keyword: IF - expression: - column_reference: naked_identifier: sales - comparison_operator: raw_comparison_operator: '>' - column_reference: naked_identifier: quota - keyword: THEN - statement: assignment_segment_statement: object_reference: naked_identifier: bonus colon: ':' comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '50' - statement_terminator: ; - keyword: ELSE - statement: assignment_segment_statement: object_reference: naked_identifier: bonus colon: ':' comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '0' - statement_terminator: ; - keyword: END - keyword: IF - statement_terminator: ; - keyword: END - keyword: IF - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'bonus = '" binary_operator: - pipe: '|' - pipe: '|' column_reference: naked_identifier: bonus end_bracket: ) - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: employees set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: salary comparison_operator: raw_comparison_operator: '=' expression: - column_reference: naked_identifier: salary - binary_operator: + - column_reference: naked_identifier: bonus where_clause: keyword: WHERE expression: - column_reference: naked_identifier: employee_id - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: emp_id - statement_terminator: ; - keyword: END - object_reference: naked_identifier: p - statement_terminator: ; - keyword: BEGIN - statement: function: function_name: function_name_identifier: p function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '10100' - comma: ',' - expression: numeric_literal: '10000' - comma: ',' - expression: numeric_literal: '120' - end_bracket: ) - statement_terminator: ; - statement: function: function_name: function_name_identifier: p function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '10500' - comma: ',' - expression: numeric_literal: '10000' - comma: ',' - expression: numeric_literal: '121' - end_bracket: ) - statement_terminator: ; - statement: function: function_name: function_name_identifier: p function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '9500' - comma: ',' - expression: numeric_literal: '10000' - comma: ',' - expression: numeric_literal: '122' - end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: keyword: DECLARE create_procedure_statement: - keyword: PROCEDURE - function_name: function_name_identifier: p - function_parameter_list: bracketed: start_bracket: ( parameter: sales data_type: data_type_identifier: NUMBER end_bracket: ) - keyword: IS - declare_segment: naked_identifier: bonus data_type: data_type_identifier: NUMBER colon: ':' comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '0' statement_terminator: ; - begin_end_block: - keyword: BEGIN - statement: if_then_statement: - if_clause: - keyword: IF - expression: column_reference: naked_identifier: sales comparison_operator: raw_comparison_operator: '>' numeric_literal: '50000' - keyword: THEN - statement: assignment_segment_statement: object_reference: naked_identifier: bonus colon: ':' comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '1500' - statement_terminator: ; - keyword: ELSIF - expression: column_reference: naked_identifier: sales comparison_operator: raw_comparison_operator: '>' numeric_literal: '35000' - keyword: THEN - statement: assignment_segment_statement: object_reference: naked_identifier: bonus colon: ':' comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '500' - statement_terminator: ; - keyword: ELSE - statement: assignment_segment_statement: object_reference: naked_identifier: bonus colon: ':' comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '100' - statement_terminator: ; - keyword: END - keyword: IF - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - quoted_literal: "'Sales = '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: naked_identifier: sales - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "', bonus = '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: naked_identifier: bonus - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "'.'" end_bracket: ) - statement_terminator: ; - keyword: END - object_reference: naked_identifier: p - statement_terminator: ; - keyword: BEGIN - statement: function: function_name: function_name_identifier: p function_contents: bracketed: start_bracket: ( expression: numeric_literal: '55000' end_bracket: ) - statement_terminator: ; - statement: function: function_name: function_name_identifier: p function_contents: bracketed: start_bracket: ( expression: numeric_literal: '40000' end_bracket: ) - statement_terminator: ; - statement: function: function_name: function_name_identifier: p function_contents: bracketed: start_bracket: ( expression: numeric_literal: '30000' end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / sqlfluff-3.4.2/test/fixtures/dialects/oracle/interval_operations.sql000066400000000000000000000005071503426445100260450ustar00rootroot00000000000000select 1 from dual where sysdate > sysdate - interval '2' hour; select sysdate - interval '3' year from dual; select interval '2 3:04:11.333' day to second from dual; select 1 from dual where sysdate > to_date('01/01/1970', 'dd/mm/yyyy') + interval '600' month; select sysdate + interval '10' minute from dual; sqlfluff-3.4.2/test/fixtures/dialects/oracle/interval_operations.yml000066400000000000000000000071601503426445100260510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9cd1d5fc1f9dde36e2cffb518e1baf9011a19d7b21ca9280d6075009b7d11c54 file: - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual where_clause: keyword: where expression: - bare_function: sysdate - comparison_operator: raw_comparison_operator: '>' - bare_function: sysdate - binary_operator: '-' - keyword: interval - date_constructor_literal: "'2'" - keyword: hour - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: - bare_function: sysdate - binary_operator: '-' - keyword: interval - date_constructor_literal: "'3'" - keyword: year from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: - keyword: interval - date_constructor_literal: "'2 3:04:11.333'" - keyword: day - keyword: to - keyword: second from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual where_clause: keyword: where expression: - bare_function: sysdate - comparison_operator: raw_comparison_operator: '>' - function: function_name: function_name_identifier: to_date function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'01/01/1970'" - comma: ',' - expression: quoted_literal: "'dd/mm/yyyy'" - end_bracket: ) - binary_operator: + - keyword: interval - date_constructor_literal: "'600'" - keyword: month - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: - bare_function: sysdate - binary_operator: + - keyword: interval - date_constructor_literal: "'10'" - keyword: minute from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/oracle/join_types.sql000066400000000000000000000027421503426445100241440ustar00rootroot00000000000000-- inner join SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee INNER JOIN department ON employee.deptno = department.deptno; -- left join SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee LEFT JOIN department ON employee.deptno = department.deptno; SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee LEFT OUTER JOIN department ON employee.deptno = department.deptno; -- right join SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee RIGHT JOIN department ON employee.deptno = department.deptno; SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee RIGHT OUTER JOIN department ON employee.deptno = department.deptno; -- full join SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee FULL JOIN department ON employee.deptno = department.deptno; SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee FULL OUTER JOIN department ON employee.deptno = department.deptno; -- cross join SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee CROSS JOIN department; SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee, department; sqlfluff-3.4.2/test/fixtures/dialects/oracle/join_types.yml000066400000000000000000000343251503426445100241500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 90e19f9063ac31d3d2dba6ebfbd6c2ddaceb381a3fc409787f58bb4f83ad59a2 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: LEFT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: RIGHT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: RIGHT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: FULL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: FULL - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: CROSS - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: department - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/oracle/json_object.sql000066400000000000000000000015731503426445100242610ustar00rootroot00000000000000SELECT JSON_OBJECT( 'name' : first_name || ' ' || last_name, 'email' : email, 'phone' : phone_number, 'hire_date' : hire_date ) FROM employees WHERE employee_id = 140; SELECT JSON_OBJECT(*) FROM employees WHERE employee_id = 140; SELECT JSON_OBJECT('NAME' VALUE first_name) FROM employees e, departments d WHERE e.department_id = d.department_id AND e.employee_id = 140; SELECT JSON_ARRAYAGG(JSON_OBJECT(*)) FROM departments; SELECT JSON_OBJECT ('name' value 'Foo') FROM DUAL; SELECT JSON_OBJECT ('name' value 'Foo' FORMAT JSON ) FROM DUAL; SELECT JSON_OBJECT ( KEY 'deptno' VALUE d.department_id, KEY 'deptname' VALUE d.department_name ) "Department Objects" FROM departments d ORDER BY d.department_id; SELECT JSON_OBJECT(first_name, last_name, email, hire_date) FROM employees WHERE employee_id = 140; SELECT JSON_OBJECT(eMail) FROM employees WHERE employee_id = 140; sqlfluff-3.4.2/test/fixtures/dialects/oracle/json_object.yml000066400000000000000000000254531503426445100242660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9a8e4e050743f228c2747538c6facfef1c90a9fdf99b5a11036c10699d4d7f0f file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: JSON_OBJECT function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'name'" - colon: ':' - expression: - column_reference: naked_identifier: first_name - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "' '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: naked_identifier: last_name - comma: ',' - expression: quoted_literal: "'email'" - colon: ':' - expression: column_reference: naked_identifier: email - comma: ',' - expression: quoted_literal: "'phone'" - colon: ':' - expression: column_reference: naked_identifier: phone_number - comma: ',' - expression: quoted_literal: "'hire_date'" - colon: ':' - expression: column_reference: naked_identifier: hire_date - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: WHERE expression: column_reference: naked_identifier: employee_id comparison_operator: raw_comparison_operator: '=' numeric_literal: '140' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: JSON_OBJECT function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: WHERE expression: column_reference: naked_identifier: employee_id comparison_operator: raw_comparison_operator: '=' numeric_literal: '140' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: JSON_OBJECT function_contents: bracketed: start_bracket: ( quoted_literal: "'NAME'" keyword: VALUE expression: column_reference: naked_identifier: first_name end_bracket: ) from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees alias_expression: naked_identifier: e - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: departments alias_expression: naked_identifier: d where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: e - dot: . - naked_identifier: department_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: d - dot: . - naked_identifier: department_id - binary_operator: AND - column_reference: - naked_identifier: e - dot: . - naked_identifier: employee_id - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '140' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: JSON_ARRAYAGG function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: JSON_OBJECT function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: departments - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: JSON_OBJECT function_contents: bracketed: start_bracket: ( quoted_literal: "'name'" keyword: value expression: quoted_literal: "'Foo'" end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: DUAL - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: JSON_OBJECT function_contents: bracketed: - start_bracket: ( - quoted_literal: "'name'" - keyword: value - expression: quoted_literal: "'Foo'" - keyword: FORMAT - keyword: JSON - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: DUAL - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: JSON_OBJECT function_contents: bracketed: - start_bracket: ( - keyword: KEY - quoted_literal: "'deptno'" - keyword: VALUE - expression: column_reference: - naked_identifier: d - dot: . - naked_identifier: department_id - comma: ',' - keyword: KEY - quoted_literal: "'deptname'" - keyword: VALUE - expression: column_reference: - naked_identifier: d - dot: . - naked_identifier: department_name - end_bracket: ) alias_expression: quoted_identifier: '"Department Objects"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: departments alias_expression: naked_identifier: d orderby_clause: - keyword: ORDER - keyword: BY - column_reference: - naked_identifier: d - dot: . - naked_identifier: department_id - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: JSON_OBJECT function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: first_name - comma: ',' - expression: column_reference: naked_identifier: last_name - comma: ',' - expression: column_reference: naked_identifier: email - comma: ',' - expression: column_reference: naked_identifier: hire_date - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: WHERE expression: column_reference: naked_identifier: employee_id comparison_operator: raw_comparison_operator: '=' numeric_literal: '140' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: JSON_OBJECT function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: eMail end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: WHERE expression: column_reference: naked_identifier: employee_id comparison_operator: raw_comparison_operator: '=' numeric_literal: '140' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/oracle/lateral.sql000066400000000000000000000010431503426445100233760ustar00rootroot00000000000000-- inner join with a lateral SELECT t1.id, t2.id AS t2_id, t2.col1 FROM tbl1 t1 INNER JOIN LATERAL (SELECT id, col1 FROM tbl2) t2 ON t1.id = t2.id; -- cross join with a lateral SELECT t1.id, t2.id AS t2_id, t2.col1 FROM tbl1 t1 CROSS JOIN LATERAL ( SELECT id, col1 FROM tbl2 ) t2; -- comma cross join with a lateral SELECT t1.id, t2.id AS t2_id, t2.col1 FROM tbl1 t1, LATERAL (SELECT id, col1 FROM tbl2) t2; sqlfluff-3.4.2/test/fixtures/dialects/oracle/lateral.yml000066400000000000000000000152121503426445100234030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: acd2d7ccea6b8957dd10d36e45d32f26fb39e85e66e1b179bebe4b6fbd3b7eb9 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id alias_expression: alias_operator: keyword: AS naked_identifier: t2_id - comma: ',' - select_clause_element: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 alias_expression: naked_identifier: t1 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: keyword: LATERAL table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl2 end_bracket: ) alias_expression: naked_identifier: t2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id alias_expression: alias_operator: keyword: AS naked_identifier: t2_id - comma: ',' - select_clause_element: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 alias_expression: naked_identifier: t1 join_clause: - keyword: CROSS - keyword: JOIN - from_expression_element: keyword: LATERAL table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl2 end_bracket: ) alias_expression: naked_identifier: t2 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id alias_expression: alias_operator: keyword: AS naked_identifier: t2_id - comma: ',' - select_clause_element: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: col1 from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 alias_expression: naked_identifier: t1 - comma: ',' - from_expression: from_expression_element: keyword: LATERAL table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl2 end_bracket: ) alias_expression: naked_identifier: t2 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/oracle/named_argument.sql000066400000000000000000000004311503426445100247400ustar00rootroot00000000000000--https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/Function-Expressions.html#GUID-C47F0B7D-9058-481F-815E-A31FB21F3BD5 select my_function(arg1 => 3, arg2 => 4) from dual; select my_function(3, arg2 => 4) from dual; select my_function(arg1 => 3, 4) from dual; sqlfluff-3.4.2/test/fixtures/dialects/oracle/named_argument.yml000066400000000000000000000056601503426445100247530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a5fdbbf6eaa08251ecd5c0f8ccd12e22cf310f3710e32772b2131433c037dfa5 file: - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: my_function function_contents: bracketed: - start_bracket: ( - named_argument: naked_identifier: arg1 right_arrow: => expression: numeric_literal: '3' - comma: ',' - named_argument: naked_identifier: arg2 right_arrow: => expression: numeric_literal: '4' - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: my_function function_contents: bracketed: start_bracket: ( expression: numeric_literal: '3' comma: ',' named_argument: naked_identifier: arg2 right_arrow: => expression: numeric_literal: '4' end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: my_function function_contents: bracketed: start_bracket: ( named_argument: naked_identifier: arg1 right_arrow: => expression: numeric_literal: '3' comma: ',' expression: numeric_literal: '4' end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/oracle/non_ansi_joins.sql000066400000000000000000000013571503426445100247700ustar00rootroot00000000000000SELECT suppliers.supplier_id, suppliers.supplier_name, orders.order_date FROM suppliers, orders WHERE suppliers.supplier_id (+) = orders.supplier_id; SELECT suppliers.supplier_id, suppliers.supplier_name, orders.order_date FROM suppliers, orders WHERE suppliers.supplier_id = orders.supplier_id(+); SELECT suppliers.supplier_id, suppliers.supplier_name, orders.order_date FROM suppliers, orders, customers WHERE suppliers.supplier_id = orders.supplier_id AND orders.customer_id = customers.customer_id (+); SELECT * FROM table_a, table_b WHERE column_a(+) = nvl(column_b, 1); SELECT * FROM table_a, table_b WHERE nvl(column_b, 1) = column_a(+); sqlfluff-3.4.2/test/fixtures/dialects/oracle/non_ansi_joins.yml000066400000000000000000000177101503426445100247720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 63c86ad061dc379a372315194dc7458692492d468a50742a822af96f1f6ae027 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: suppliers - dot: . - naked_identifier: supplier_id - comma: ',' - select_clause_element: column_reference: - naked_identifier: suppliers - dot: . - naked_identifier: supplier_name - comma: ',' - select_clause_element: column_reference: - naked_identifier: orders - dot: . - naked_identifier: order_date from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: suppliers - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: suppliers - dot: . - naked_identifier: supplier_id - bracketed: start_bracket: ( plus_join_symbol: + end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: orders - dot: . - naked_identifier: supplier_id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: suppliers - dot: . - naked_identifier: supplier_id - comma: ',' - select_clause_element: column_reference: - naked_identifier: suppliers - dot: . - naked_identifier: supplier_name - comma: ',' - select_clause_element: column_reference: - naked_identifier: orders - dot: . - naked_identifier: order_date from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: suppliers - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: suppliers - dot: . - naked_identifier: supplier_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: orders - dot: . - naked_identifier: supplier_id - bracketed: start_bracket: ( plus_join_symbol: + end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: suppliers - dot: . - naked_identifier: supplier_id - comma: ',' - select_clause_element: column_reference: - naked_identifier: suppliers - dot: . - naked_identifier: supplier_name - comma: ',' - select_clause_element: column_reference: - naked_identifier: orders - dot: . - naked_identifier: order_date from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: suppliers - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: customers where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: suppliers - dot: . - naked_identifier: supplier_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: orders - dot: . - naked_identifier: supplier_id - binary_operator: AND - column_reference: - naked_identifier: orders - dot: . - naked_identifier: customer_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: customers - dot: . - naked_identifier: customer_id - bracketed: start_bracket: ( plus_join_symbol: + end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_a - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_b where_clause: keyword: WHERE expression: column_reference: naked_identifier: column_a bracketed: start_bracket: ( plus_join_symbol: + end_bracket: ) comparison_operator: raw_comparison_operator: '=' function: function_name: function_name_identifier: nvl function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: column_b - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_a - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_b where_clause: keyword: WHERE expression: function: function_name: function_name_identifier: nvl function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: column_b - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) comparison_operator: raw_comparison_operator: '=' column_reference: naked_identifier: column_a bracketed: start_bracket: ( plus_join_symbol: + end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/oracle/open_for.sql000066400000000000000000000042401503426445100235630ustar00rootroot00000000000000DECLARE cv SYS_REFCURSOR; -- cursor variable v_lastname employees.last_name%TYPE; -- variable for last_name v_jobid employees.job_id%TYPE; -- variable for job_id query_2 VARCHAR2(200) := 'SELECT * FROM employees WHERE REGEXP_LIKE (job_id, ''[ACADFIMKSA]_M[ANGR]'') ORDER BY job_id'; v_employees employees%ROWTYPE; -- record variable row of table BEGIN OPEN cv FOR SELECT last_name, job_id FROM employees WHERE REGEXP_LIKE (job_id, 'S[HT]_CLERK') ORDER BY last_name; LOOP -- Fetches 2 columns into variables FETCH cv INTO v_lastname, v_jobid; EXIT WHEN cv%NOTFOUND; DBMS_OUTPUT.PUT_LINE( RPAD(v_lastname, 25, ' ') || v_jobid ); END LOOP; DBMS_OUTPUT.PUT_LINE( '-------------------------------------' ); OPEN cv FOR query_2; LOOP -- Fetches entire row into the v_employees record FETCH cv INTO v_employees; EXIT WHEN cv%NOTFOUND; DBMS_OUTPUT.PUT_LINE( RPAD(v_employees.last_name, 25, ' ') || v_employees.job_id ); END LOOP; CLOSE cv; END; / DECLARE v1 pkg.mytab; -- collection of records v2 pkg.rec; c1 SYS_REFCURSOR; BEGIN v1(1).f1 := 1; v1(1).f2 := 'one'; OPEN c1 FOR SELECT * FROM TABLE(v1); FETCH c1 INTO v2; CLOSE c1; DBMS_OUTPUT.PUT_LINE('Values in record are ' || v2.f1 || ' and ' || v2.f2); END; / DECLARE TYPE EmpCurTyp IS REF CURSOR; v_emp_cursor EmpCurTyp; emp_record employees%ROWTYPE; v_stmt_str VARCHAR2(200); v_e_job employees.job_id%TYPE; BEGIN -- Dynamic SQL statement with placeholder: v_stmt_str := 'SELECT * FROM employees WHERE job_id = :j'; -- Open cursor & specify bind variable in USING clause: OPEN v_emp_cursor FOR v_stmt_str USING 'MANAGER'; -- Fetch rows from result set one at a time: LOOP FETCH v_emp_cursor INTO emp_record; EXIT WHEN v_emp_cursor%NOTFOUND; END LOOP; -- Close cursor: CLOSE v_emp_cursor; END; / DECLARE v1 pkg.mytab; -- collection of records v2 pkg.rec; c1 SYS_REFCURSOR; BEGIN OPEN c1 FOR 'SELECT * FROM TABLE(:1)' USING v1; FETCH c1 INTO v2; CLOSE c1; DBMS_OUTPUT.PUT_LINE('Values in record are ' || v2.f1 || ' and ' || v2.f2); END; / sqlfluff-3.4.2/test/fixtures/dialects/oracle/open_for.yml000066400000000000000000000416241503426445100235740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ba630bd9468bf48d98f40ad379546cb4fa327007e81cdcdc708e49321538f6cc file: - statement: begin_end_block: - declare_segment: - keyword: DECLARE - naked_identifier: cv - data_type: data_type_identifier: SYS_REFCURSOR - statement_terminator: ; - naked_identifier: v_lastname - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: last_name binary_operator: '%' keyword: TYPE - statement_terminator: ; - naked_identifier: v_jobid - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: job_id binary_operator: '%' keyword: TYPE - statement_terminator: ; - naked_identifier: query_2 - data_type: data_type_identifier: VARCHAR2 bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '200' end_bracket: ) - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: quoted_literal: "'SELECT * FROM employees\n WHERE REGEXP_LIKE (job_id,\ \ ''[ACADFIMKSA]_M[ANGR]'')\n ORDER BY job_id'" - statement_terminator: ; - naked_identifier: v_employees - row_type_reference: table_reference: naked_identifier: employees binary_operator: '%' keyword: ROWTYPE - statement_terminator: ; - keyword: BEGIN - statement: open_for_statement: - keyword: OPEN - naked_identifier: cv - keyword: FOR - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: last_name - comma: ',' - select_clause_element: column_reference: naked_identifier: job_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: WHERE expression: function: function_name: function_name_identifier: REGEXP_LIKE function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: job_id - comma: ',' - expression: quoted_literal: "'S[HT]_CLERK'" - end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: last_name - statement_terminator: ; - statement: loop_statement: - keyword: LOOP - statement: fetch_statement: keyword: FETCH naked_identifier: cv into_clause: - keyword: INTO - naked_identifier: v_lastname - comma: ',' - naked_identifier: v_jobid - statement_terminator: ; - statement: exit_statement: - keyword: EXIT - keyword: WHEN - expression: naked_identifier: cv binary_operator: '%' keyword: NOTFOUND - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: RPAD function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: v_lastname - comma: ',' - expression: numeric_literal: '25' - comma: ',' - expression: quoted_literal: "' '" - end_bracket: ) binary_operator: - pipe: '|' - pipe: '|' column_reference: naked_identifier: v_jobid end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'-------------------------------------'" end_bracket: ) - statement_terminator: ; - statement: open_for_statement: - keyword: OPEN - naked_identifier: cv - keyword: FOR - naked_identifier: query_2 - statement_terminator: ; - statement: loop_statement: - keyword: LOOP - statement: fetch_statement: keyword: FETCH naked_identifier: cv into_clause: keyword: INTO naked_identifier: v_employees - statement_terminator: ; - statement: exit_statement: - keyword: EXIT - keyword: WHEN - expression: naked_identifier: cv binary_operator: '%' keyword: NOTFOUND - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: RPAD function_contents: bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: v_employees - dot: . - naked_identifier: last_name - comma: ',' - expression: numeric_literal: '25' - comma: ',' - expression: quoted_literal: "' '" - end_bracket: ) binary_operator: - pipe: '|' - pipe: '|' column_reference: - naked_identifier: v_employees - dot: . - naked_identifier: job_id end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - statement: close_statement: keyword: CLOSE naked_identifier: cv - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: - keyword: DECLARE - naked_identifier: v1 - data_type: naked_identifier: pkg dot: . data_type_identifier: mytab - statement_terminator: ; - naked_identifier: v2 - data_type: naked_identifier: pkg dot: . data_type_identifier: rec - statement_terminator: ; - naked_identifier: c1 - data_type: data_type_identifier: SYS_REFCURSOR - statement_terminator: ; - keyword: BEGIN - statement: function: function_name: function_name_identifier: v1 function_contents: bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) - statement: assignment_segment_statement: dot: . object_reference: naked_identifier: f1 colon: ':' comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '1' - statement_terminator: ; - statement: function: function_name: function_name_identifier: v1 function_contents: bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) - statement: assignment_segment_statement: dot: . object_reference: naked_identifier: f2 colon: ':' comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'one'" - statement_terminator: ; - statement: open_for_statement: - keyword: OPEN - naked_identifier: c1 - keyword: FOR - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: TABLE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: v1 end_bracket: ) - statement_terminator: ; - statement: fetch_statement: keyword: FETCH naked_identifier: c1 into_clause: keyword: INTO naked_identifier: v2 - statement_terminator: ; - statement: close_statement: keyword: CLOSE naked_identifier: c1 - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - quoted_literal: "'Values in record are '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: - naked_identifier: v2 - dot: . - naked_identifier: f1 - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "' and '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: - naked_identifier: v2 - dot: . - naked_identifier: f2 end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: - keyword: DECLARE - ref_cursor_type: - keyword: TYPE - naked_identifier: EmpCurTyp - keyword: IS - keyword: REF - keyword: CURSOR - statement_terminator: ; - naked_identifier: v_emp_cursor - data_type: data_type_identifier: EmpCurTyp - statement_terminator: ; - naked_identifier: emp_record - row_type_reference: table_reference: naked_identifier: employees binary_operator: '%' keyword: ROWTYPE - statement_terminator: ; - naked_identifier: v_stmt_str - data_type: data_type_identifier: VARCHAR2 bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '200' end_bracket: ) - statement_terminator: ; - naked_identifier: v_e_job - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: job_id binary_operator: '%' keyword: TYPE - statement_terminator: ; - keyword: BEGIN - statement: assignment_segment_statement: object_reference: naked_identifier: v_stmt_str colon: ':' comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'SELECT * FROM employees WHERE job_id = :j'" - statement_terminator: ; - statement: open_for_statement: - keyword: OPEN - naked_identifier: v_emp_cursor - keyword: FOR - naked_identifier: v_stmt_str - keyword: USING - quoted_identifier: "'MANAGER'" - statement_terminator: ; - statement: loop_statement: - keyword: LOOP - statement: fetch_statement: keyword: FETCH naked_identifier: v_emp_cursor into_clause: keyword: INTO naked_identifier: emp_record - statement_terminator: ; - statement: exit_statement: - keyword: EXIT - keyword: WHEN - expression: naked_identifier: v_emp_cursor binary_operator: '%' keyword: NOTFOUND - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - statement: close_statement: keyword: CLOSE naked_identifier: v_emp_cursor - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: - keyword: DECLARE - naked_identifier: v1 - data_type: naked_identifier: pkg dot: . data_type_identifier: mytab - statement_terminator: ; - naked_identifier: v2 - data_type: naked_identifier: pkg dot: . data_type_identifier: rec - statement_terminator: ; - naked_identifier: c1 - data_type: data_type_identifier: SYS_REFCURSOR - statement_terminator: ; - keyword: BEGIN - statement: open_for_statement: - keyword: OPEN - naked_identifier: c1 - keyword: FOR - quoted_identifier: "'SELECT * FROM TABLE(:1)'" - keyword: USING - naked_identifier: v1 - statement_terminator: ; - statement: fetch_statement: keyword: FETCH naked_identifier: c1 into_clause: keyword: INTO naked_identifier: v2 - statement_terminator: ; - statement: close_statement: keyword: CLOSE naked_identifier: c1 - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - quoted_literal: "'Values in record are '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: - naked_identifier: v2 - dot: . - naked_identifier: f1 - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "' and '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: - naked_identifier: v2 - dot: . - naked_identifier: f2 end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / sqlfluff-3.4.2/test/fixtures/dialects/oracle/pivot_unpivot.sql000066400000000000000000000022011503426445100246740ustar00rootroot00000000000000select * from ( select times_purchased, state_code from customers t ) pivot ( count(state_code) for state_code in ('NY' as new_york,'CT','NJ','FL','MO') ); select * from ( select times_purchased, state_code from customers t ) pivot ( count(state_code) for state_code in (select distinct state_code from state) ); select * from ( select times_purchased, state_code from customers t ) pivot ( count(state_code) for state_code in (any) ); select * from sale_stats unpivot ( quantity for product_code in ( product_a AS 'A', product_b AS 'B', product_c AS 'C' ) ); select * from sale_stats unpivot include nulls ( quantity for product_code in ( product_a AS 'A', product_b AS 'B', product_c AS 'C' ) ); select * from sale_stats unpivot ( (quantity, amount) for product_code in ( (a_qty, a_value) as 'A', (b_qty, b_value) as 'B' ) ); select * from ( select times_purchased, state_code from customers t ) pivot ( count(state_code) as state_code for state_code in (any) ); sqlfluff-3.4.2/test/fixtures/dialects/oracle/pivot_unpivot.yml000066400000000000000000000325761503426445100247200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 689eb8e9c6be114a8b8e8bf0fafdcebe5d65d2d9d874f6e6cd3f7f1d278b722c file: - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: times_purchased - comma: ',' - select_clause_element: column_reference: naked_identifier: state_code from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: customers alias_expression: naked_identifier: t end_bracket: ) pivot_clause: keyword: pivot bracketed: - start_bracket: ( - function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: state_code end_bracket: ) - keyword: for - column_reference: naked_identifier: state_code - keyword: in - bracketed: - start_bracket: ( - quoted_literal: "'NY'" - alias_expression: alias_operator: keyword: as naked_identifier: new_york - comma: ',' - quoted_literal: "'CT'" - comma: ',' - quoted_literal: "'NJ'" - comma: ',' - quoted_literal: "'FL'" - comma: ',' - quoted_literal: "'MO'" - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: times_purchased - comma: ',' - select_clause_element: column_reference: naked_identifier: state_code from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: customers alias_expression: naked_identifier: t end_bracket: ) pivot_clause: keyword: pivot bracketed: - start_bracket: ( - function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: state_code end_bracket: ) - keyword: for - column_reference: naked_identifier: state_code - keyword: in - bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_modifier: keyword: distinct select_clause_element: column_reference: naked_identifier: state_code from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: state end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: times_purchased - comma: ',' - select_clause_element: column_reference: naked_identifier: state_code from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: customers alias_expression: naked_identifier: t end_bracket: ) pivot_clause: keyword: pivot bracketed: - start_bracket: ( - function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: state_code end_bracket: ) - keyword: for - column_reference: naked_identifier: state_code - keyword: in - bracketed: start_bracket: ( keyword: any end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sale_stats unpivot_clause: keyword: unpivot bracketed: - start_bracket: ( - column_reference: naked_identifier: quantity - keyword: for - column_reference: naked_identifier: product_code - keyword: in - bracketed: - start_bracket: ( - column_reference: naked_identifier: product_a - alias_expression: alias_operator: keyword: AS quoted_identifier: "'A'" - comma: ',' - column_reference: naked_identifier: product_b - alias_expression: alias_operator: keyword: AS quoted_identifier: "'B'" - comma: ',' - column_reference: naked_identifier: product_c - alias_expression: alias_operator: keyword: AS quoted_identifier: "'C'" - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sale_stats unpivot_clause: - keyword: unpivot - keyword: include - keyword: nulls - bracketed: - start_bracket: ( - column_reference: naked_identifier: quantity - keyword: for - column_reference: naked_identifier: product_code - keyword: in - bracketed: - start_bracket: ( - column_reference: naked_identifier: product_a - alias_expression: alias_operator: keyword: AS quoted_identifier: "'A'" - comma: ',' - column_reference: naked_identifier: product_b - alias_expression: alias_operator: keyword: AS quoted_identifier: "'B'" - comma: ',' - column_reference: naked_identifier: product_c - alias_expression: alias_operator: keyword: AS quoted_identifier: "'C'" - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sale_stats unpivot_clause: keyword: unpivot bracketed: - start_bracket: ( - bracketed: - start_bracket: ( - column_reference: naked_identifier: quantity - comma: ',' - column_reference: naked_identifier: amount - end_bracket: ) - keyword: for - column_reference: naked_identifier: product_code - keyword: in - bracketed: - start_bracket: ( - bracketed: - start_bracket: ( - column_reference: naked_identifier: a_qty - comma: ',' - column_reference: naked_identifier: a_value - end_bracket: ) - alias_expression: alias_operator: keyword: as quoted_identifier: "'A'" - comma: ',' - bracketed: - start_bracket: ( - column_reference: naked_identifier: b_qty - comma: ',' - column_reference: naked_identifier: b_value - end_bracket: ) - alias_expression: alias_operator: keyword: as quoted_identifier: "'B'" - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: times_purchased - comma: ',' - select_clause_element: column_reference: naked_identifier: state_code from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: customers alias_expression: naked_identifier: t end_bracket: ) pivot_clause: keyword: pivot bracketed: - start_bracket: ( - function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: state_code end_bracket: ) - alias_expression: alias_operator: keyword: as naked_identifier: state_code - keyword: for - column_reference: naked_identifier: state_code - keyword: in - bracketed: start_bracket: ( keyword: any end_bracket: ) - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/oracle/prompt.sql000066400000000000000000000001731503426445100232760ustar00rootroot00000000000000PROMPT this is an Oracle SQL newline delimited prompt statement PROMPT PROMPT another prompt SELECT job_id from employees; sqlfluff-3.4.2/test/fixtures/dialects/oracle/prompt.yml000066400000000000000000000014421503426445100233000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 644b16a31198e9167e689a0a8420ae5b78ab8aa0a39e43b68c8328503d12790f file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: job_id from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/oracle/quoted_slash.sql000066400000000000000000000003151503426445100244460ustar00rootroot00000000000000select a.column_a || '\' || a.column_b test from test_table a; select * from test_table a where a.column_a || '\' || a.column_b = '10\10'; select 'Test\ ' from dual; select '\Test\' from dual; sqlfluff-3.4.2/test/fixtures/dialects/oracle/quoted_slash.yml000066400000000000000000000061301503426445100244510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fc6be259bf3c4a5351db0bcea71a973b9a94c1ae2f1a70e9194f09b18356debe file: - statement: select_statement: select_clause: keyword: select select_clause_element: expression: - column_reference: - naked_identifier: a - dot: . - naked_identifier: column_a - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "'\\'" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: - naked_identifier: a - dot: . - naked_identifier: column_b alias_expression: naked_identifier: test from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table alias_expression: naked_identifier: a - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table alias_expression: naked_identifier: a where_clause: keyword: where expression: - column_reference: - naked_identifier: a - dot: . - naked_identifier: column_a - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "'\\'" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: - naked_identifier: a - dot: . - naked_identifier: column_b - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'10\\10'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: quoted_literal: "'Test\\ '" from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: quoted_literal: "'\\Test\\'" from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/oracle/raise.sql000066400000000000000000000030531503426445100230600ustar00rootroot00000000000000CREATE PROCEDURE account_status ( due_date DATE, today DATE ) AUTHID DEFINER IS past_due EXCEPTION; -- declare exception BEGIN IF due_date < today THEN RAISE past_due; -- explicitly raise exception END IF; EXCEPTION WHEN past_due THEN -- handle exception DBMS_OUTPUT.PUT_LINE ('Account past due.'); END; / CREATE PROCEDURE p (n NUMBER) AUTHID DEFINER IS default_number NUMBER := 0; BEGIN IF n < 0 THEN RAISE INVALID_NUMBER; -- raise explicitly ELSE INSERT INTO t VALUES(TO_NUMBER('100.00', '9G999')); -- raise implicitly END IF; EXCEPTION WHEN INVALID_NUMBER THEN DBMS_OUTPUT.PUT_LINE('Substituting default value for invalid number.'); INSERT INTO t VALUES(default_number); END; / DECLARE salary_too_high EXCEPTION; current_salary NUMBER := 20000; max_salary NUMBER := 10000; erroneous_salary NUMBER; BEGIN BEGIN IF current_salary > max_salary THEN RAISE salary_too_high; -- raise exception END IF; EXCEPTION WHEN salary_too_high THEN -- start handling exception erroneous_salary := current_salary; DBMS_OUTPUT.PUT_LINE('Salary ' || erroneous_salary ||' is out of range.'); DBMS_OUTPUT.PUT_LINE ('Maximum salary is ' || max_salary || '.'); RAISE; -- reraise current exception (exception name is optional) END; EXCEPTION WHEN salary_too_high THEN -- finish handling exception current_salary := max_salary; DBMS_OUTPUT.PUT_LINE ( 'Revising salary from ' || erroneous_salary || ' to ' || current_salary || '.' ); END; / sqlfluff-3.4.2/test/fixtures/dialects/oracle/raise.yml000066400000000000000000000257021503426445100230670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d600b2ec2d61a0bbc758596b53549e857b5f86d7f0081b8dcd1932f8cfa42669 file: - statement: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - function_name: function_name_identifier: account_status - function_parameter_list: bracketed: - start_bracket: ( - parameter: due_date - data_type: data_type_identifier: DATE - comma: ',' - parameter: today - data_type: data_type_identifier: DATE - end_bracket: ) - keyword: AUTHID - keyword: DEFINER - keyword: IS - declare_segment: naked_identifier: past_due data_type: data_type_identifier: EXCEPTION statement_terminator: ; - begin_end_block: - keyword: BEGIN - statement: if_then_statement: - if_clause: - keyword: IF - expression: - column_reference: naked_identifier: due_date - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: today - keyword: THEN - statement: raise_statement: keyword: RAISE naked_identifier: past_due - statement_terminator: ; - keyword: END - keyword: IF - statement_terminator: ; - keyword: EXCEPTION - keyword: WHEN - naked_identifier: past_due - keyword: THEN - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Account past due.'" end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - function_name: function_name_identifier: p - function_parameter_list: bracketed: start_bracket: ( parameter: n data_type: data_type_identifier: NUMBER end_bracket: ) - keyword: AUTHID - keyword: DEFINER - keyword: IS - declare_segment: naked_identifier: default_number data_type: data_type_identifier: NUMBER colon: ':' comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '0' statement_terminator: ; - begin_end_block: - keyword: BEGIN - statement: if_then_statement: - if_clause: - keyword: IF - expression: column_reference: naked_identifier: n comparison_operator: raw_comparison_operator: < numeric_literal: '0' - keyword: THEN - statement: raise_statement: keyword: RAISE naked_identifier: INVALID_NUMBER - statement_terminator: ; - keyword: ELSE - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: TO_NUMBER function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'100.00'" - comma: ',' - expression: quoted_literal: "'9G999'" - end_bracket: ) end_bracket: ) - statement_terminator: ; - keyword: END - keyword: IF - statement_terminator: ; - keyword: EXCEPTION - keyword: WHEN - naked_identifier: INVALID_NUMBER - keyword: THEN - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Substituting default value for invalid number.'" end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: column_reference: naked_identifier: default_number end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: - keyword: DECLARE - naked_identifier: salary_too_high - data_type: data_type_identifier: EXCEPTION - statement_terminator: ; - naked_identifier: current_salary - data_type: data_type_identifier: NUMBER - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: numeric_literal: '20000' - statement_terminator: ; - naked_identifier: max_salary - data_type: data_type_identifier: NUMBER - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: numeric_literal: '10000' - statement_terminator: ; - naked_identifier: erroneous_salary - data_type: data_type_identifier: NUMBER - statement_terminator: ; - keyword: BEGIN - statement: begin_end_block: - keyword: BEGIN - statement: if_then_statement: - if_clause: - keyword: IF - expression: - column_reference: naked_identifier: current_salary - comparison_operator: raw_comparison_operator: '>' - column_reference: naked_identifier: max_salary - keyword: THEN - statement: raise_statement: keyword: RAISE naked_identifier: salary_too_high - statement_terminator: ; - keyword: END - keyword: IF - statement_terminator: ; - keyword: EXCEPTION - keyword: WHEN - naked_identifier: salary_too_high - keyword: THEN - statement: assignment_segment_statement: object_reference: naked_identifier: erroneous_salary colon: ':' comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: current_salary - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - quoted_literal: "'Salary '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: naked_identifier: erroneous_salary - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "' is out of range.'" end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - quoted_literal: "'Maximum salary is '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: naked_identifier: max_salary - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "'.'" end_bracket: ) - statement_terminator: ; - statement: raise_statement: keyword: RAISE - statement_terminator: ; - keyword: END - statement_terminator: ; - keyword: EXCEPTION - keyword: WHEN - naked_identifier: salary_too_high - keyword: THEN - statement: assignment_segment_statement: object_reference: naked_identifier: current_salary colon: ':' comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: max_salary - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - quoted_literal: "'Revising salary from '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: naked_identifier: erroneous_salary - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "' to '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: naked_identifier: current_salary - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "'.'" end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / sqlfluff-3.4.2/test/fixtures/dialects/oracle/record_type.sql000066400000000000000000000024731503426445100243010ustar00rootroot00000000000000DECLARE TYPE DeptRecTyp IS RECORD ( dept_id NUMBER(4) NOT NULL := 10, dept_name VARCHAR2(30) NOT NULL := 'Administration', mgr_id NUMBER(6) := 200, loc_id NUMBER(4) := 1700 ); dept_rec DeptRecTyp; BEGIN DBMS_OUTPUT.PUT_LINE('dept_id: ' || dept_rec.dept_id); DBMS_OUTPUT.PUT_LINE('dept_name: ' || dept_rec.dept_name); DBMS_OUTPUT.PUT_LINE('mgr_id: ' || dept_rec.mgr_id); DBMS_OUTPUT.PUT_LINE('loc_id: ' || dept_rec.loc_id); END; / DECLARE TYPE name_rec IS RECORD ( first employees.first_name%TYPE, last employees.last_name%TYPE ); TYPE contact IS RECORD ( name name_rec, -- nested record phone employees.phone_number%TYPE ); friend contact; BEGIN friend.name.first := 'John'; friend.name.last := 'Smith'; friend.phone := '1-650-555-1234'; DBMS_OUTPUT.PUT_LINE ( friend.name.first || ' ' || friend.name.last || ', ' || friend.phone ); END; / DECLARE TYPE full_name IS VARRAY(2) OF VARCHAR2(20); TYPE contact IS RECORD ( name full_name := full_name('John', 'Smith'), -- varray field phone employees.phone_number%TYPE ); friend contact; BEGIN friend.phone := '1-650-555-1234'; DBMS_OUTPUT.PUT_LINE ( friend.name(1) || ' ' || friend.name(2) || ', ' || friend.phone ); END; / sqlfluff-3.4.2/test/fixtures/dialects/oracle/record_type.yml000066400000000000000000000327761503426445100243140ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 56fea9914ddf682daf9df231a0168f8ed291e7d2c8447eb24820d170eb446a42 file: - statement: begin_end_block: - declare_segment: - keyword: DECLARE - record_type: - keyword: TYPE - naked_identifier: DeptRecTyp - keyword: IS - keyword: RECORD - bracketed: - start_bracket: ( - naked_identifier: dept_id - data_type: data_type_identifier: NUMBER bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '4' end_bracket: ) - keyword: NOT - keyword: 'NULL' - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: numeric_literal: '10' - comma: ',' - naked_identifier: dept_name - data_type: data_type_identifier: VARCHAR2 bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '30' end_bracket: ) - keyword: NOT - keyword: 'NULL' - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: quoted_literal: "'Administration'" - comma: ',' - naked_identifier: mgr_id - data_type: data_type_identifier: NUMBER bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: numeric_literal: '200' - comma: ',' - naked_identifier: loc_id - data_type: data_type_identifier: NUMBER bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '4' end_bracket: ) - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: numeric_literal: '1700' - end_bracket: ) - statement_terminator: ; - naked_identifier: dept_rec - data_type: data_type_identifier: DeptRecTyp - statement_terminator: ; - keyword: BEGIN - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'dept_id: '" binary_operator: - pipe: '|' - pipe: '|' column_reference: - naked_identifier: dept_rec - dot: . - naked_identifier: dept_id end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'dept_name: '" binary_operator: - pipe: '|' - pipe: '|' column_reference: - naked_identifier: dept_rec - dot: . - naked_identifier: dept_name end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'mgr_id: '" binary_operator: - pipe: '|' - pipe: '|' column_reference: - naked_identifier: dept_rec - dot: . - naked_identifier: mgr_id end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'loc_id: '" binary_operator: - pipe: '|' - pipe: '|' column_reference: - naked_identifier: dept_rec - dot: . - naked_identifier: loc_id end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: - keyword: DECLARE - record_type: - keyword: TYPE - naked_identifier: name_rec - keyword: IS - keyword: RECORD - bracketed: - start_bracket: ( - naked_identifier: first - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: first_name binary_operator: '%' keyword: TYPE - comma: ',' - naked_identifier: last - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: last_name binary_operator: '%' keyword: TYPE - end_bracket: ) - statement_terminator: ; - record_type: - keyword: TYPE - naked_identifier: contact - keyword: IS - keyword: RECORD - bracketed: - start_bracket: ( - naked_identifier: name - data_type: data_type_identifier: name_rec - comma: ',' - naked_identifier: phone - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: phone_number binary_operator: '%' keyword: TYPE - end_bracket: ) - statement_terminator: ; - naked_identifier: friend - data_type: data_type_identifier: contact - statement_terminator: ; - keyword: BEGIN - statement: assignment_segment_statement: object_reference: - naked_identifier: friend - dot: . - naked_identifier: name - dot: . - naked_identifier: first colon: ':' comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'John'" - statement_terminator: ; - statement: assignment_segment_statement: object_reference: - naked_identifier: friend - dot: . - naked_identifier: name - dot: . - naked_identifier: last colon: ':' comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'Smith'" - statement_terminator: ; - statement: assignment_segment_statement: object_reference: - naked_identifier: friend - dot: . - naked_identifier: phone colon: ':' comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'1-650-555-1234'" - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: friend - dot: . - naked_identifier: name - dot: . - naked_identifier: first - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "' '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: - naked_identifier: friend - dot: . - naked_identifier: name - dot: . - naked_identifier: last - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "', '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: - naked_identifier: friend - dot: . - naked_identifier: phone end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: - keyword: DECLARE - collection_type: - keyword: TYPE - naked_identifier: full_name - keyword: IS - data_type: data_type_identifier: VARRAY bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '2' end_bracket: ) - keyword: OF - data_type: data_type_identifier: VARCHAR2 bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '20' end_bracket: ) - statement_terminator: ; - record_type: - keyword: TYPE - naked_identifier: contact - keyword: IS - keyword: RECORD - bracketed: - start_bracket: ( - naked_identifier: name - data_type: data_type_identifier: full_name - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: function: function_name: function_name_identifier: full_name function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'John'" - comma: ',' - expression: quoted_literal: "'Smith'" - end_bracket: ) - comma: ',' - naked_identifier: phone - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: phone_number binary_operator: '%' keyword: TYPE - end_bracket: ) - statement_terminator: ; - naked_identifier: friend - data_type: data_type_identifier: contact - statement_terminator: ; - keyword: BEGIN - statement: assignment_segment_statement: object_reference: - naked_identifier: friend - dot: . - naked_identifier: phone colon: ':' comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'1-650-555-1234'" - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - function: function_name: naked_identifier: friend dot: . function_name_identifier: name function_contents: bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "' '" - binary_operator: - pipe: '|' - pipe: '|' - function: function_name: naked_identifier: friend dot: . function_name_identifier: name function_contents: bracketed: start_bracket: ( expression: numeric_literal: '2' end_bracket: ) - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "', '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: - naked_identifier: friend - dot: . - naked_identifier: phone end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / sqlfluff-3.4.2/test/fixtures/dialects/oracle/return.sql000066400000000000000000000022041503426445100232710ustar00rootroot00000000000000DECLARE x INTEGER; FUNCTION f (n INTEGER) RETURN INTEGER IS BEGIN RETURN (n*n); END; BEGIN DBMS_OUTPUT.PUT_LINE ( 'f returns ' || f(2) || '. Execution returns here (1).' ); x := f(2); DBMS_OUTPUT.PUT_LINE('Execution returns here (2).'); END; / CREATE OR REPLACE FUNCTION f (n INTEGER) RETURN INTEGER AUTHID DEFINER IS BEGIN IF n = 0 THEN RETURN 1; ELSIF n = 1 THEN RETURN n; END IF; END; / CREATE OR REPLACE FUNCTION f (n INTEGER) RETURN INTEGER AUTHID DEFINER IS BEGIN IF n = 0 THEN RETURN 1; ELSIF n = 1 THEN RETURN n; ELSE RETURN n*n; END IF; END; / BEGIN FOR i IN 0 .. 3 LOOP DBMS_OUTPUT.PUT_LINE('f(' || i || ') = ' || f(i)); END LOOP; END; / DECLARE PROCEDURE p IS BEGIN DBMS_OUTPUT.PUT_LINE('Inside p'); RETURN; DBMS_OUTPUT.PUT_LINE('Unreachable statement.'); END; BEGIN p(); DBMS_OUTPUT.PUT_LINE('Control returns here.'); END; / BEGIN BEGIN DBMS_OUTPUT.PUT_LINE('Inside inner block.'); RETURN; DBMS_OUTPUT.PUT_LINE('Unreachable statement.'); END; DBMS_OUTPUT.PUT_LINE('Inside outer block. Unreachable statement.'); END; / sqlfluff-3.4.2/test/fixtures/dialects/oracle/return.yml000066400000000000000000000326031503426445100233010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2026601572482c6740fe1fc33cff9bfdd379e693d307748a7cabc0a89219d4cd file: - statement: begin_end_block: - declare_segment: keyword: DECLARE naked_identifier: x data_type: data_type_identifier: INTEGER statement_terminator: ; create_function_statement: - keyword: FUNCTION - function_name: function_name_identifier: f - function_parameter_list: bracketed: start_bracket: ( parameter: n data_type: data_type_identifier: INTEGER end_bracket: ) - keyword: RETURN - data_type: data_type_identifier: INTEGER - keyword: IS - begin_end_block: - keyword: BEGIN - statement: function: function_name: function_name_identifier: RETURN function_contents: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: n - binary_operator: '*' - column_reference: naked_identifier: n end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - keyword: BEGIN - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - quoted_literal: "'f returns '" - binary_operator: - pipe: '|' - pipe: '|' - function: function_name: function_name_identifier: f function_contents: bracketed: start_bracket: ( expression: numeric_literal: '2' end_bracket: ) - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "'. Execution returns here (1).'" end_bracket: ) - statement_terminator: ; - statement: assignment_segment_statement: object_reference: naked_identifier: x colon: ':' comparison_operator: raw_comparison_operator: '=' expression: function: function_name: function_name_identifier: f function_contents: bracketed: start_bracket: ( expression: numeric_literal: '2' end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Execution returns here (2).'" end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: - create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name: function_name_identifier: f - function_parameter_list: bracketed: start_bracket: ( parameter: n data_type: data_type_identifier: INTEGER end_bracket: ) - keyword: RETURN - data_type: data_type_identifier: INTEGER - keyword: AUTHID - keyword: DEFINER - keyword: IS - begin_end_block: - keyword: BEGIN - statement: if_then_statement: - if_clause: - keyword: IF - expression: column_reference: naked_identifier: n comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' - keyword: THEN - statement: return_statement: keyword: RETURN expression: numeric_literal: '1' - statement_terminator: ; - keyword: ELSIF - expression: column_reference: naked_identifier: n comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - keyword: THEN - statement: return_statement: keyword: RETURN expression: column_reference: naked_identifier: n - statement_terminator: ; - keyword: END - keyword: IF - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name: function_name_identifier: f - function_parameter_list: bracketed: start_bracket: ( parameter: n data_type: data_type_identifier: INTEGER end_bracket: ) - keyword: RETURN - data_type: data_type_identifier: INTEGER - keyword: AUTHID - keyword: DEFINER - keyword: IS - begin_end_block: - keyword: BEGIN - statement: if_then_statement: - if_clause: - keyword: IF - expression: column_reference: naked_identifier: n comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' - keyword: THEN - statement: return_statement: keyword: RETURN expression: numeric_literal: '1' - statement_terminator: ; - keyword: ELSIF - expression: column_reference: naked_identifier: n comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - keyword: THEN - statement: return_statement: keyword: RETURN expression: column_reference: naked_identifier: n - statement_terminator: ; - keyword: ELSE - statement: return_statement: keyword: RETURN expression: - column_reference: naked_identifier: n - binary_operator: '*' - column_reference: naked_identifier: n - statement_terminator: ; - keyword: END - keyword: IF - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - keyword: BEGIN - statement: for_loop_statement: - keyword: FOR - naked_identifier: i - keyword: IN - numeric_literal: '0' - dot: . - dot: . - numeric_literal: '3' - loop_statement: - keyword: LOOP - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - quoted_literal: "'f('" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: naked_identifier: i - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "') = '" - binary_operator: - pipe: '|' - pipe: '|' - function: function_name: function_name_identifier: f function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: i end_bracket: ) end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: keyword: DECLARE create_procedure_statement: - keyword: PROCEDURE - function_name: function_name_identifier: p - keyword: IS - begin_end_block: - keyword: BEGIN - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Inside p'" end_bracket: ) - statement_terminator: ; - statement: return_statement: keyword: RETURN - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Unreachable statement.'" end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - keyword: BEGIN - statement: function: function_name: function_name_identifier: p function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Control returns here.'" end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - keyword: BEGIN - statement: begin_end_block: - keyword: BEGIN - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Inside inner block.'" end_bracket: ) - statement_terminator: ; - statement: return_statement: keyword: RETURN - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Unreachable statement.'" end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Inside outer block. Unreachable statement.'" end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / sqlfluff-3.4.2/test/fixtures/dialects/oracle/returning_into.sql000066400000000000000000000077441503426445100250360ustar00rootroot00000000000000DECLARE TYPE EmpRec IS RECORD ( last_name employees.last_name%TYPE, salary employees.salary%TYPE ); emp_info EmpRec; old_salary employees.salary%TYPE; BEGIN SELECT salary INTO old_salary FROM employees WHERE employee_id = 100; UPDATE employees SET salary = salary * 1.1 WHERE employee_id = 100 RETURNING last_name, salary INTO emp_info; DBMS_OUTPUT.PUT_LINE ( 'Salary of ' || emp_info.last_name || ' raised from ' || old_salary || ' to ' || emp_info.salary ); END; / DECLARE emp_id employees_temp.employee_id%TYPE := 299; emp_first_name employees_temp.first_name%TYPE := 'Bob'; emp_last_name employees_temp.last_name%TYPE := 'Henry'; BEGIN INSERT INTO employees_temp (employee_id, first_name, last_name) VALUES (emp_id, emp_first_name, emp_last_name); UPDATE employees_temp SET first_name = 'Robert' WHERE employee_id = emp_id; DELETE FROM employees_temp WHERE employee_id = emp_id RETURNING first_name, last_name INTO emp_first_name, emp_last_name; COMMIT; DBMS_OUTPUT.PUT_LINE (emp_first_name || ' ' || emp_last_name); END; / DECLARE TYPE NumList IS TABLE OF employees.employee_id%TYPE; enums NumList; TYPE NameList IS TABLE OF employees.last_name%TYPE; names NameList; BEGIN DELETE FROM emp_temp WHERE department_id = 30 RETURNING employee_id, last_name BULK COLLECT INTO enums, names; DBMS_OUTPUT.PUT_LINE ('Deleted ' || SQL%ROWCOUNT || ' rows:'); FOR i IN enums.FIRST .. enums.LAST LOOP DBMS_OUTPUT.PUT_LINE ('Employee #' || enums(i) || ': ' || names(i)); END LOOP; END; / DECLARE TYPE SalList IS TABLE OF employees.salary%TYPE; old_sals SalList; new_sals SalList; TYPE NameList IS TABLE OF employees.last_name%TYPE; names NameList; BEGIN UPDATE emp_temp SET salary = salary * 1.15 WHERE salary < 2500 RETURNING OLD salary, NEW salary, last_name BULK COLLECT INTO old_sals, new_sals, names; DBMS_OUTPUT.PUT_LINE('Updated ' || SQL%ROWCOUNT || ' rows: '); FOR i IN old_sals.FIRST .. old_sals.LAST LOOP DBMS_OUTPUT.PUT_LINE(names(i) || ': Old Salary $' || old_sals(i) || ', New Salary $' || new_sals(i)); END LOOP; END; / DECLARE TYPE NumList IS TABLE OF NUMBER; depts NumList := NumList(10,20,30); TYPE enum_t IS TABLE OF employees.employee_id%TYPE; e_ids enum_t; TYPE dept_t IS TABLE OF employees.department_id%TYPE; d_ids dept_t; BEGIN FORALL j IN depts.FIRST..depts.LAST DELETE FROM emp_temp WHERE department_id = depts(j) RETURNING employee_id, department_id BULK COLLECT INTO e_ids, d_ids; DBMS_OUTPUT.PUT_LINE ('Deleted ' || SQL%ROWCOUNT || ' rows:'); FOR i IN e_ids.FIRST .. e_ids.LAST LOOP DBMS_OUTPUT.PUT_LINE ( 'Employee #' || e_ids(i) || ' from dept #' || d_ids(i) ); END LOOP; END; / DECLARE TYPE t_desc_tab IS TABLE OF t1.description%TYPE; TYPE t_id_tab IS TABLE OF t1.id%TYPE; TYPE t_desc_out_tab IS TABLE OF t1.description%TYPE; l_desc_tab t_desc_tab := t_desc_tab('FIVE', 'SIX', 'SEVEN'); l_id_tab t_id_tab; l_desc_out_tab t_desc_out_tab; BEGIN FORALL i IN l_desc_tab.FIRST .. l_desc_tab.LAST INSERT INTO t1 VALUES (t1_seq.NEXTVAL, l_desc_tab(i)) RETURNING id, description BULK COLLECT INTO l_id_tab, l_desc_out_tab; FOR i IN l_id_tab.FIRST .. l_id_tab.LAST LOOP DBMS_OUTPUT.put_line('INSERT ID=' || l_id_tab(i) || ' DESC=' || l_desc_out_tab(i)); END LOOP; COMMIT; END; / DECLARE TYPE t_sal_tab IS TABLE OF emp.sal%TYPE; TYPE t_empno_tab IS TABLE OF emp.empno%TYPE; l_empno t_empno_tab; l_salo t_sal_tab; l_saln t_sal_tab; BEGIN MERGE INTO emp t USING emp_sal_increase q ON (t.deptno = q.deptno) WHEN MATCHED THEN UPDATE SET t.sal=t.sal*(1+q.increase_pct/100) RETURNING empno, OLD sal, NEW sal BULK COLLECT INTO l_empno, l_salo, l_saln; FOR i IN l_salo.first .. l_salo.last LOOP DBMS_OUTPUT.put_line('EMPNO=' || l_empno(i)|| ', SAL changed from '||l_salo(i) ||' to ' ||l_saln(i)); END LOOP; END; / sqlfluff-3.4.2/test/fixtures/dialects/oracle/returning_into.yml000066400000000000000000001213541503426445100250320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7d0edcc692653b622543df1114b4d3ccdb6e9d087105fb91acaf6b933ddf32c8 file: - statement: begin_end_block: - declare_segment: - keyword: DECLARE - record_type: - keyword: TYPE - naked_identifier: EmpRec - keyword: IS - keyword: RECORD - bracketed: - start_bracket: ( - naked_identifier: last_name - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: last_name binary_operator: '%' keyword: TYPE - comma: ',' - naked_identifier: salary - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: salary binary_operator: '%' keyword: TYPE - end_bracket: ) - statement_terminator: ; - naked_identifier: emp_info - data_type: data_type_identifier: EmpRec - statement_terminator: ; - naked_identifier: old_salary - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: salary binary_operator: '%' keyword: TYPE - statement_terminator: ; - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: salary into_clause: keyword: INTO naked_identifier: old_salary from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: WHERE expression: column_reference: naked_identifier: employee_id comparison_operator: raw_comparison_operator: '=' numeric_literal: '100' - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: employees set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: salary comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: salary binary_operator: '*' numeric_literal: '1.1' where_clause: keyword: WHERE expression: column_reference: naked_identifier: employee_id comparison_operator: raw_comparison_operator: '=' numeric_literal: '100' returning_clause: - keyword: RETURNING - naked_identifier: last_name - comma: ',' - naked_identifier: salary - into_clause: keyword: INTO naked_identifier: emp_info - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - quoted_literal: "'Salary of '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: - naked_identifier: emp_info - dot: . - naked_identifier: last_name - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "' raised from '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: naked_identifier: old_salary - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "' to '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: - naked_identifier: emp_info - dot: . - naked_identifier: salary end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: - keyword: DECLARE - naked_identifier: emp_id - column_type_reference: column_reference: - naked_identifier: employees_temp - dot: . - naked_identifier: employee_id binary_operator: '%' keyword: TYPE - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: numeric_literal: '299' - statement_terminator: ; - naked_identifier: emp_first_name - column_type_reference: column_reference: - naked_identifier: employees_temp - dot: . - naked_identifier: first_name binary_operator: '%' keyword: TYPE - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: quoted_literal: "'Bob'" - statement_terminator: ; - naked_identifier: emp_last_name - column_type_reference: column_reference: - naked_identifier: employees_temp - dot: . - naked_identifier: last_name binary_operator: '%' keyword: TYPE - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: quoted_literal: "'Henry'" - statement_terminator: ; - keyword: BEGIN - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: employees_temp - bracketed: - start_bracket: ( - column_reference: naked_identifier: employee_id - comma: ',' - column_reference: naked_identifier: first_name - comma: ',' - column_reference: naked_identifier: last_name - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: emp_id - comma: ',' - expression: column_reference: naked_identifier: emp_first_name - comma: ',' - expression: column_reference: naked_identifier: emp_last_name - end_bracket: ) - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: employees_temp set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: first_name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Robert'" where_clause: keyword: WHERE expression: - column_reference: naked_identifier: employee_id - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: emp_id - statement_terminator: ; - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees_temp where_clause: keyword: WHERE expression: - column_reference: naked_identifier: employee_id - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: emp_id returning_clause: - keyword: RETURNING - naked_identifier: first_name - comma: ',' - naked_identifier: last_name - into_clause: - keyword: INTO - naked_identifier: emp_first_name - comma: ',' - naked_identifier: emp_last_name - statement_terminator: ; - statement: transaction_statement: keyword: COMMIT - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: emp_first_name - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "' '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: naked_identifier: emp_last_name end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: - keyword: DECLARE - collection_type: - keyword: TYPE - naked_identifier: NumList - keyword: IS - keyword: TABLE - keyword: OF - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: employee_id binary_operator: '%' keyword: TYPE - statement_terminator: ; - naked_identifier: enums - data_type: data_type_identifier: NumList - statement_terminator: ; - collection_type: - keyword: TYPE - naked_identifier: NameList - keyword: IS - keyword: TABLE - keyword: OF - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: last_name binary_operator: '%' keyword: TYPE - statement_terminator: ; - naked_identifier: names - data_type: data_type_identifier: NameList - statement_terminator: ; - keyword: BEGIN - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: emp_temp where_clause: keyword: WHERE expression: column_reference: naked_identifier: department_id comparison_operator: raw_comparison_operator: '=' numeric_literal: '30' returning_clause: - keyword: RETURNING - naked_identifier: employee_id - comma: ',' - naked_identifier: last_name - bulk_collect_into_clause: - keyword: BULK - keyword: COLLECT - keyword: INTO - naked_identifier: enums - comma: ',' - naked_identifier: names - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - quoted_literal: "'Deleted '" - binary_operator: - pipe: '|' - pipe: '|' - naked_identifier: SQL - binary_operator: '%' - keyword: ROWCOUNT - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "' rows:'" end_bracket: ) - statement_terminator: ; - statement: for_loop_statement: - keyword: FOR - naked_identifier: i - keyword: IN - naked_identifier: enums - dot: . - naked_identifier: FIRST - dot: . - dot: . - naked_identifier: enums - dot: . - naked_identifier: LAST - loop_statement: - keyword: LOOP - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - quoted_literal: "'Employee #'" - binary_operator: - pipe: '|' - pipe: '|' - function: function_name: function_name_identifier: enums function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: i end_bracket: ) - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "': '" - binary_operator: - pipe: '|' - pipe: '|' - function: function_name: function_name_identifier: names function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: i end_bracket: ) end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: - keyword: DECLARE - collection_type: - keyword: TYPE - naked_identifier: SalList - keyword: IS - keyword: TABLE - keyword: OF - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: salary binary_operator: '%' keyword: TYPE - statement_terminator: ; - naked_identifier: old_sals - data_type: data_type_identifier: SalList - statement_terminator: ; - naked_identifier: new_sals - data_type: data_type_identifier: SalList - statement_terminator: ; - collection_type: - keyword: TYPE - naked_identifier: NameList - keyword: IS - keyword: TABLE - keyword: OF - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: last_name binary_operator: '%' keyword: TYPE - statement_terminator: ; - naked_identifier: names - data_type: data_type_identifier: NameList - statement_terminator: ; - keyword: BEGIN - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: emp_temp set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: salary comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: salary binary_operator: '*' numeric_literal: '1.15' where_clause: keyword: WHERE expression: column_reference: naked_identifier: salary comparison_operator: raw_comparison_operator: < numeric_literal: '2500' returning_clause: - keyword: RETURNING - keyword: OLD - naked_identifier: salary - comma: ',' - keyword: NEW - naked_identifier: salary - comma: ',' - naked_identifier: last_name - bulk_collect_into_clause: - keyword: BULK - keyword: COLLECT - keyword: INTO - naked_identifier: old_sals - comma: ',' - naked_identifier: new_sals - comma: ',' - naked_identifier: names - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - quoted_literal: "'Updated '" - binary_operator: - pipe: '|' - pipe: '|' - naked_identifier: SQL - binary_operator: '%' - keyword: ROWCOUNT - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "' rows: '" end_bracket: ) - statement_terminator: ; - statement: for_loop_statement: - keyword: FOR - naked_identifier: i - keyword: IN - naked_identifier: old_sals - dot: . - naked_identifier: FIRST - dot: . - dot: . - naked_identifier: old_sals - dot: . - naked_identifier: LAST - loop_statement: - keyword: LOOP - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - function: function_name: function_name_identifier: names function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: i end_bracket: ) - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "': Old Salary $'" - binary_operator: - pipe: '|' - pipe: '|' - function: function_name: function_name_identifier: old_sals function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: i end_bracket: ) - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "', New Salary $'" - binary_operator: - pipe: '|' - pipe: '|' - function: function_name: function_name_identifier: new_sals function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: i end_bracket: ) end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: - keyword: DECLARE - collection_type: - keyword: TYPE - naked_identifier: NumList - keyword: IS - keyword: TABLE - keyword: OF - data_type: data_type_identifier: NUMBER - statement_terminator: ; - naked_identifier: depts - data_type: data_type_identifier: NumList - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: function: function_name: function_name_identifier: NumList function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '20' - comma: ',' - expression: numeric_literal: '30' - end_bracket: ) - statement_terminator: ; - collection_type: - keyword: TYPE - naked_identifier: enum_t - keyword: IS - keyword: TABLE - keyword: OF - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: employee_id binary_operator: '%' keyword: TYPE - statement_terminator: ; - naked_identifier: e_ids - data_type: data_type_identifier: enum_t - statement_terminator: ; - collection_type: - keyword: TYPE - naked_identifier: dept_t - keyword: IS - keyword: TABLE - keyword: OF - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: department_id binary_operator: '%' keyword: TYPE - statement_terminator: ; - naked_identifier: d_ids - data_type: data_type_identifier: dept_t - statement_terminator: ; - keyword: BEGIN - statement: forall_statement: - keyword: FORALL - naked_identifier: j - keyword: IN - naked_identifier: depts - dot: . - naked_identifier: FIRST - dot: . - dot: . - naked_identifier: depts - dot: . - naked_identifier: LAST - delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: emp_temp where_clause: keyword: WHERE expression: column_reference: naked_identifier: department_id comparison_operator: raw_comparison_operator: '=' function: function_name: function_name_identifier: depts function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: j end_bracket: ) returning_clause: - keyword: RETURNING - naked_identifier: employee_id - comma: ',' - naked_identifier: department_id - bulk_collect_into_clause: - keyword: BULK - keyword: COLLECT - keyword: INTO - naked_identifier: e_ids - comma: ',' - naked_identifier: d_ids - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - quoted_literal: "'Deleted '" - binary_operator: - pipe: '|' - pipe: '|' - naked_identifier: SQL - binary_operator: '%' - keyword: ROWCOUNT - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "' rows:'" end_bracket: ) - statement_terminator: ; - statement: for_loop_statement: - keyword: FOR - naked_identifier: i - keyword: IN - naked_identifier: e_ids - dot: . - naked_identifier: FIRST - dot: . - dot: . - naked_identifier: e_ids - dot: . - naked_identifier: LAST - loop_statement: - keyword: LOOP - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - quoted_literal: "'Employee #'" - binary_operator: - pipe: '|' - pipe: '|' - function: function_name: function_name_identifier: e_ids function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: i end_bracket: ) - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "' from dept #'" - binary_operator: - pipe: '|' - pipe: '|' - function: function_name: function_name_identifier: d_ids function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: i end_bracket: ) end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: - keyword: DECLARE - collection_type: - keyword: TYPE - naked_identifier: t_desc_tab - keyword: IS - keyword: TABLE - keyword: OF - column_type_reference: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: description binary_operator: '%' keyword: TYPE - statement_terminator: ; - collection_type: - keyword: TYPE - naked_identifier: t_id_tab - keyword: IS - keyword: TABLE - keyword: OF - column_type_reference: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: id binary_operator: '%' keyword: TYPE - statement_terminator: ; - collection_type: - keyword: TYPE - naked_identifier: t_desc_out_tab - keyword: IS - keyword: TABLE - keyword: OF - column_type_reference: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: description binary_operator: '%' keyword: TYPE - statement_terminator: ; - naked_identifier: l_desc_tab - data_type: data_type_identifier: t_desc_tab - colon: ':' - comparison_operator: raw_comparison_operator: '=' - expression: function: function_name: function_name_identifier: t_desc_tab function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'FIVE'" - comma: ',' - expression: quoted_literal: "'SIX'" - comma: ',' - expression: quoted_literal: "'SEVEN'" - end_bracket: ) - statement_terminator: ; - naked_identifier: l_id_tab - data_type: data_type_identifier: t_id_tab - statement_terminator: ; - naked_identifier: l_desc_out_tab - data_type: data_type_identifier: t_desc_out_tab - statement_terminator: ; - keyword: BEGIN - statement: forall_statement: - keyword: FORALL - naked_identifier: i - keyword: IN - naked_identifier: l_desc_tab - dot: . - naked_identifier: FIRST - dot: . - dot: . - naked_identifier: l_desc_tab - dot: . - naked_identifier: LAST - insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: t1_seq - dot: . - naked_identifier: NEXTVAL - comma: ',' - expression: function: function_name: function_name_identifier: l_desc_tab function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: i end_bracket: ) - end_bracket: ) - returning_clause: - keyword: RETURNING - naked_identifier: id - comma: ',' - naked_identifier: description - bulk_collect_into_clause: - keyword: BULK - keyword: COLLECT - keyword: INTO - naked_identifier: l_id_tab - comma: ',' - naked_identifier: l_desc_out_tab - statement_terminator: ; - statement: for_loop_statement: - keyword: FOR - naked_identifier: i - keyword: IN - naked_identifier: l_id_tab - dot: . - naked_identifier: FIRST - dot: . - dot: . - naked_identifier: l_id_tab - dot: . - naked_identifier: LAST - loop_statement: - keyword: LOOP - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: put_line function_contents: bracketed: start_bracket: ( expression: - quoted_literal: "'INSERT ID='" - binary_operator: - pipe: '|' - pipe: '|' - function: function_name: function_name_identifier: l_id_tab function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: i end_bracket: ) - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "' DESC='" - binary_operator: - pipe: '|' - pipe: '|' - function: function_name: function_name_identifier: l_desc_out_tab function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: i end_bracket: ) end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - statement: transaction_statement: keyword: COMMIT - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: - keyword: DECLARE - collection_type: - keyword: TYPE - naked_identifier: t_sal_tab - keyword: IS - keyword: TABLE - keyword: OF - column_type_reference: column_reference: - naked_identifier: emp - dot: . - naked_identifier: sal binary_operator: '%' keyword: TYPE - statement_terminator: ; - collection_type: - keyword: TYPE - naked_identifier: t_empno_tab - keyword: IS - keyword: TABLE - keyword: OF - column_type_reference: column_reference: - naked_identifier: emp - dot: . - naked_identifier: empno binary_operator: '%' keyword: TYPE - statement_terminator: ; - naked_identifier: l_empno - data_type: data_type_identifier: t_empno_tab - statement_terminator: ; - naked_identifier: l_salo - data_type: data_type_identifier: t_sal_tab - statement_terminator: ; - naked_identifier: l_saln - data_type: data_type_identifier: t_sal_tab - statement_terminator: ; - keyword: BEGIN - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: emp - alias_expression: naked_identifier: t - keyword: USING - table_reference: naked_identifier: emp_sal_increase - alias_expression: naked_identifier: q - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: q - dot: . - naked_identifier: deptno end_bracket: ) - merge_match: merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: keyword: SET set_clause: column_reference: - naked_identifier: t - dot: . - naked_identifier: sal comparison_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: t - dot: . - naked_identifier: sal binary_operator: '*' bracketed: start_bracket: ( expression: - numeric_literal: '1' - binary_operator: + - column_reference: - naked_identifier: q - dot: . - naked_identifier: increase_pct - binary_operator: / - numeric_literal: '100' end_bracket: ) returning_clause: - keyword: RETURNING - naked_identifier: empno - comma: ',' - keyword: OLD - naked_identifier: sal - comma: ',' - keyword: NEW - naked_identifier: sal - bulk_collect_into_clause: - keyword: BULK - keyword: COLLECT - keyword: INTO - naked_identifier: l_empno - comma: ',' - naked_identifier: l_salo - comma: ',' - naked_identifier: l_saln - statement_terminator: ; - statement: for_loop_statement: - keyword: FOR - naked_identifier: i - keyword: IN - naked_identifier: l_salo - dot: . - naked_identifier: first - dot: . - dot: . - naked_identifier: l_salo - dot: . - naked_identifier: last - loop_statement: - keyword: LOOP - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: put_line function_contents: bracketed: start_bracket: ( expression: - quoted_literal: "'EMPNO='" - binary_operator: - pipe: '|' - pipe: '|' - function: function_name: function_name_identifier: l_empno function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: i end_bracket: ) - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "', SAL changed from '" - binary_operator: - pipe: '|' - pipe: '|' - function: function_name: function_name_identifier: l_salo function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: i end_bracket: ) - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "' to '" - binary_operator: - pipe: '|' - pipe: '|' - function: function_name: function_name_identifier: l_saln function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: i end_bracket: ) end_bracket: ) - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / sqlfluff-3.4.2/test/fixtures/dialects/oracle/rowtype.sql000066400000000000000000000040601503426445100234650ustar00rootroot00000000000000DECLARE dept_rec departments%ROWTYPE; BEGIN -- Assign values to fields: dept_rec.department_id := 10; dept_rec.department_name := 'Administration'; dept_rec.manager_id := 200; dept_rec.location_id := 1700; -- Print fields: DBMS_OUTPUT.PUT_LINE('dept_id: ' || dept_rec.department_id); DBMS_OUTPUT.PUT_LINE('dept_name: ' || dept_rec.department_name); DBMS_OUTPUT.PUT_LINE('mgr_id: ' || dept_rec.manager_id); DBMS_OUTPUT.PUT_LINE('loc_id: ' || dept_rec.location_id); END; / DECLARE t1_row t1%ROWTYPE; BEGIN DBMS_OUTPUT.PUT('t1.c1 = '); DBMS_OUTPUT.PUT_LINE(NVL(TO_CHAR(t1_row.c1), 'NULL')); DBMS_OUTPUT.PUT('t1.c2 = '); print(t1_row.c2); DBMS_OUTPUT.PUT_LINE(NVL(TO_CHAR(t1_row.c2), 'NULL')); END; / DECLARE CURSOR c IS SELECT first_name, last_name, phone_number FROM employees; friend c%ROWTYPE; BEGIN friend.first_name := 'John'; friend.last_name := 'Smith'; friend.phone_number := '1-650-555-1234'; DBMS_OUTPUT.PUT_LINE ( friend.first_name || ' ' || friend.last_name || ', ' || friend.phone_number ); END; / DECLARE CURSOR c2 IS SELECT employee_id, email, employees.manager_id, location_id FROM employees, departments WHERE employees.department_id = departments.department_id; join_rec c2%ROWTYPE; -- includes columns from two tables BEGIN NULL; END; / DECLARE t_rec t%ROWTYPE; -- t_rec has fields a and b, but not c BEGIN SELECT * INTO t_rec FROM t WHERE ROWNUM < 2; -- t_rec(a)=1, t_rec(b)=2 DBMS_OUTPUT.PUT_LINE('c = ' || t_rec.c); END; / DECLARE TYPE name_rec IS RECORD ( first employees.first_name%TYPE DEFAULT 'John', last employees.last_name%TYPE DEFAULT 'Doe' ); CURSOR c IS SELECT first_name, last_name FROM employees; target name_rec; source c%ROWTYPE; BEGIN source.first_name := 'Jane'; source.last_name := 'Smith'; DBMS_OUTPUT.PUT_LINE ( 'source: ' || source.first_name || ' ' || source.last_name ); target := source; DBMS_OUTPUT.PUT_LINE ( 'target: ' || target.first || ' ' || target.last ); END; / sqlfluff-3.4.2/test/fixtures/dialects/oracle/rowtype.yml000066400000000000000000000524311503426445100234740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 68079a7f079026780f4052ad59b5aa8d021a1b94b91ab755bf49b0eae4d425c3 file: - statement: begin_end_block: - declare_segment: keyword: DECLARE naked_identifier: dept_rec row_type_reference: table_reference: naked_identifier: departments binary_operator: '%' keyword: ROWTYPE statement_terminator: ; - keyword: BEGIN - statement: assignment_segment_statement: object_reference: - naked_identifier: dept_rec - dot: . - naked_identifier: department_id colon: ':' comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '10' - statement_terminator: ; - statement: assignment_segment_statement: object_reference: - naked_identifier: dept_rec - dot: . - naked_identifier: department_name colon: ':' comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'Administration'" - statement_terminator: ; - statement: assignment_segment_statement: object_reference: - naked_identifier: dept_rec - dot: . - naked_identifier: manager_id colon: ':' comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '200' - statement_terminator: ; - statement: assignment_segment_statement: object_reference: - naked_identifier: dept_rec - dot: . - naked_identifier: location_id colon: ':' comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '1700' - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'dept_id: '" binary_operator: - pipe: '|' - pipe: '|' column_reference: - naked_identifier: dept_rec - dot: . - naked_identifier: department_id end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'dept_name: '" binary_operator: - pipe: '|' - pipe: '|' column_reference: - naked_identifier: dept_rec - dot: . - naked_identifier: department_name end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'mgr_id: '" binary_operator: - pipe: '|' - pipe: '|' column_reference: - naked_identifier: dept_rec - dot: . - naked_identifier: manager_id end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'loc_id: '" binary_operator: - pipe: '|' - pipe: '|' column_reference: - naked_identifier: dept_rec - dot: . - naked_identifier: location_id end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: keyword: DECLARE naked_identifier: t1_row row_type_reference: table_reference: naked_identifier: t1 binary_operator: '%' keyword: ROWTYPE statement_terminator: ; - keyword: BEGIN - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'t1.c1 = '" end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: NVL function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: TO_CHAR function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: t1_row - dot: . - naked_identifier: c1 end_bracket: ) - comma: ',' - expression: quoted_literal: "'NULL'" - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'t1.c2 = '" end_bracket: ) - statement_terminator: ; - statement: function: function_name: function_name_identifier: print function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: t1_row - dot: . - naked_identifier: c2 end_bracket: ) - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: NVL function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: TO_CHAR function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: t1_row - dot: . - naked_identifier: c2 end_bracket: ) - comma: ',' - expression: quoted_literal: "'NULL'" - end_bracket: ) end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: keyword: DECLARE cursor_variable: - keyword: CURSOR - naked_identifier: c - keyword: IS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: first_name - comma: ',' - select_clause_element: column_reference: naked_identifier: last_name - comma: ',' - select_clause_element: column_reference: naked_identifier: phone_number from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees - statement_terminator: ; naked_identifier: friend row_type_reference: table_reference: naked_identifier: c binary_operator: '%' keyword: ROWTYPE statement_terminator: ; - keyword: BEGIN - statement: assignment_segment_statement: object_reference: - naked_identifier: friend - dot: . - naked_identifier: first_name colon: ':' comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'John'" - statement_terminator: ; - statement: assignment_segment_statement: object_reference: - naked_identifier: friend - dot: . - naked_identifier: last_name colon: ':' comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'Smith'" - statement_terminator: ; - statement: assignment_segment_statement: object_reference: - naked_identifier: friend - dot: . - naked_identifier: phone_number colon: ':' comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'1-650-555-1234'" - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: friend - dot: . - naked_identifier: first_name - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "' '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: - naked_identifier: friend - dot: . - naked_identifier: last_name - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "', '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: - naked_identifier: friend - dot: . - naked_identifier: phone_number end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: keyword: DECLARE cursor_variable: - keyword: CURSOR - naked_identifier: c2 - keyword: IS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: employee_id - comma: ',' - select_clause_element: column_reference: naked_identifier: email - comma: ',' - select_clause_element: column_reference: - naked_identifier: employees - dot: . - naked_identifier: manager_id - comma: ',' - select_clause_element: column_reference: naked_identifier: location_id from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: departments where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: employees - dot: . - naked_identifier: department_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: departments - dot: . - naked_identifier: department_id - statement_terminator: ; naked_identifier: join_rec row_type_reference: table_reference: naked_identifier: c2 binary_operator: '%' keyword: ROWTYPE statement_terminator: ; - keyword: BEGIN - statement: null_statement: keyword: 'NULL' - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: keyword: DECLARE naked_identifier: t_rec row_type_reference: table_reference: naked_identifier: t binary_operator: '%' keyword: ROWTYPE statement_terminator: ; - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' into_clause: keyword: INTO naked_identifier: t_rec from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t where_clause: keyword: WHERE expression: keyword: ROWNUM comparison_operator: raw_comparison_operator: < numeric_literal: '2' - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'c = '" binary_operator: - pipe: '|' - pipe: '|' column_reference: - naked_identifier: t_rec - dot: . - naked_identifier: c end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / - statement: begin_end_block: - declare_segment: - keyword: DECLARE - record_type: - keyword: TYPE - naked_identifier: name_rec - keyword: IS - keyword: RECORD - bracketed: - start_bracket: ( - naked_identifier: first - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: first_name binary_operator: '%' keyword: TYPE - keyword: DEFAULT - expression: quoted_literal: "'John'" - comma: ',' - naked_identifier: last - column_type_reference: column_reference: - naked_identifier: employees - dot: . - naked_identifier: last_name binary_operator: '%' keyword: TYPE - keyword: DEFAULT - expression: quoted_literal: "'Doe'" - end_bracket: ) - statement_terminator: ; - cursor_variable: - keyword: CURSOR - naked_identifier: c - keyword: IS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: first_name - comma: ',' - select_clause_element: column_reference: naked_identifier: last_name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees - statement_terminator: ; - naked_identifier: target - data_type: data_type_identifier: name_rec - statement_terminator: ; - naked_identifier: source - row_type_reference: table_reference: naked_identifier: c binary_operator: '%' keyword: ROWTYPE - statement_terminator: ; - keyword: BEGIN - statement: assignment_segment_statement: object_reference: - naked_identifier: source - dot: . - naked_identifier: first_name colon: ':' comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'Jane'" - statement_terminator: ; - statement: assignment_segment_statement: object_reference: - naked_identifier: source - dot: . - naked_identifier: last_name colon: ':' comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'Smith'" - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - quoted_literal: "'source: '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: - naked_identifier: source - dot: . - naked_identifier: first_name - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "' '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: - naked_identifier: source - dot: . - naked_identifier: last_name end_bracket: ) - statement_terminator: ; - statement: assignment_segment_statement: object_reference: naked_identifier: target colon: ':' comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: source - statement_terminator: ; - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: - quoted_literal: "'target: '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: - naked_identifier: target - dot: . - naked_identifier: first - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "' '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: - naked_identifier: target - dot: . - naked_identifier: last end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / sqlfluff-3.4.2/test/fixtures/dialects/oracle/select.sql000066400000000000000000000001531503426445100232320ustar00rootroot00000000000000select created_at, status, code from item_errors error; select quantity / 100 from inventory; sqlfluff-3.4.2/test/fixtures/dialects/oracle/select.yml000066400000000000000000000031061503426445100232350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7af02b3736ee5d5184d7f9a3145506f5ef95a3af84d202ff38f999784ea48d94 file: - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: created_at - comma: ',' - select_clause_element: column_reference: naked_identifier: status - comma: ',' - select_clause_element: column_reference: naked_identifier: code from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: item_errors alias_expression: naked_identifier: error - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: column_reference: naked_identifier: quantity binary_operator: / numeric_literal: '100' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: inventory - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/oracle/select_for_update.sql000066400000000000000000000011451503426445100254440ustar00rootroot00000000000000SELECT e.employee_id, e.salary, e.commission_pct FROM employees e, departments d WHERE job_id = 'SA_REP' AND e.department_id = d.department_id AND location_id = 2500 ORDER BY e.employee_id FOR UPDATE; SELECT e.employee_id, e.salary, e.commission_pct FROM employees e JOIN departments d USING (department_id) WHERE job_id = 'SA_REP' AND location_id = 2500 ORDER BY e.employee_id FOR UPDATE OF e.salary; SELECT employee_id FROM (SELECT * FROM employees) FOR UPDATE OF employee_id; SELECT employee_id FROM (SELECT employee_id+1 AS employee_id FROM employees) FOR UPDATE; sqlfluff-3.4.2/test/fixtures/dialects/oracle/select_for_update.yml000066400000000000000000000155261503426445100254560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cca773e16b14f61a50a4dc183e640ff15a57294c008d06ca26b7982071b3e361 file: - statement: select_statement: - select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: e - dot: . - naked_identifier: employee_id - comma: ',' - select_clause_element: column_reference: - naked_identifier: e - dot: . - naked_identifier: salary - comma: ',' - select_clause_element: column_reference: - naked_identifier: e - dot: . - naked_identifier: commission_pct - from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees alias_expression: naked_identifier: e - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: departments alias_expression: naked_identifier: d - where_clause: keyword: WHERE expression: - column_reference: naked_identifier: job_id - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'SA_REP'" - binary_operator: AND - column_reference: - naked_identifier: e - dot: . - naked_identifier: department_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: d - dot: . - naked_identifier: department_id - binary_operator: AND - column_reference: naked_identifier: location_id - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '2500' - orderby_clause: - keyword: ORDER - keyword: BY - column_reference: - naked_identifier: e - dot: . - naked_identifier: employee_id - keyword: FOR - keyword: UPDATE - statement_terminator: ; - statement: select_statement: - select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: e - dot: . - naked_identifier: employee_id - comma: ',' - select_clause_element: column_reference: - naked_identifier: e - dot: . - naked_identifier: salary - comma: ',' - select_clause_element: column_reference: - naked_identifier: e - dot: . - naked_identifier: commission_pct - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees alias_expression: naked_identifier: e join_clause: - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: departments alias_expression: naked_identifier: d - keyword: USING - bracketed: start_bracket: ( naked_identifier: department_id end_bracket: ) - where_clause: keyword: WHERE expression: - column_reference: naked_identifier: job_id - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'SA_REP'" - binary_operator: AND - column_reference: naked_identifier: location_id - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '2500' - orderby_clause: - keyword: ORDER - keyword: BY - column_reference: - naked_identifier: e - dot: . - naked_identifier: employee_id - keyword: FOR - keyword: UPDATE - keyword: OF - table_reference: - naked_identifier: e - dot: . - naked_identifier: salary - statement_terminator: ; - statement: select_statement: - select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: employee_id - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees end_bracket: ) - keyword: FOR - keyword: UPDATE - keyword: OF - table_reference: naked_identifier: employee_id - statement_terminator: ; - statement: select_statement: - select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: employee_id - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: expression: column_reference: naked_identifier: employee_id binary_operator: + numeric_literal: '1' alias_expression: alias_operator: keyword: AS naked_identifier: employee_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees end_bracket: ) - keyword: FOR - keyword: UPDATE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/oracle/select_natural_join.sql000066400000000000000000000006171503426445100260040ustar00rootroot00000000000000SELECT * FROM table1 NATURAL JOIN table2; SELECT * FROM table1 NATURAL INNER JOIN table2; SELECT * FROM table1 NATURAL LEFT JOIN table2; SELECT * FROM table1 NATURAL LEFT OUTER JOIN table2; SELECT * FROM table1 NATURAL RIGHT JOIN table2; SELECT * FROM table1 NATURAL RIGHT OUTER JOIN table2; SELECT * FROM table1 NATURAL FULL JOIN table2; SELECT * FROM table1 NATURAL FULL OUTER JOIN table2; sqlfluff-3.4.2/test/fixtures/dialects/oracle/select_natural_join.yml000066400000000000000000000133541503426445100260100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 83e3312a773ee3e3dee22bd7341b922507c1aaee51ef41c02c3ea414ef6b8c82 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: NATURAL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: NATURAL - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: NATURAL - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: NATURAL - keyword: LEFT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: NATURAL - keyword: RIGHT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: NATURAL - keyword: RIGHT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: NATURAL - keyword: FULL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: NATURAL - keyword: FULL - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/oracle/space_between_alias_and_column.sql000066400000000000000000000003001503426445100301210ustar00rootroot00000000000000select a.column_a from test_table a where a. column_b = 1; select a.column_a from test_table a where 1 = a. column_a; select a. column_a from test_table a where 1 = a.column_a; sqlfluff-3.4.2/test/fixtures/dialects/oracle/space_between_alias_and_column.yml000066400000000000000000000054101503426445100301320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e06e67cb2a2be631371438386fa46d37d2c019b329733210725cbeb6aa392b85 file: - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: - naked_identifier: a - dot: . - naked_identifier: column_a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table alias_expression: naked_identifier: a where_clause: keyword: where expression: column_reference: - naked_identifier: a - dot: . - naked_identifier: column_b comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: - naked_identifier: a - dot: . - naked_identifier: column_a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table alias_expression: naked_identifier: a where_clause: keyword: where expression: numeric_literal: '1' comparison_operator: raw_comparison_operator: '=' column_reference: - naked_identifier: a - dot: . - naked_identifier: column_a - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: - naked_identifier: a - dot: . - naked_identifier: column_a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table alias_expression: naked_identifier: a where_clause: keyword: where expression: numeric_literal: '1' comparison_operator: raw_comparison_operator: '=' column_reference: - naked_identifier: a - dot: . - naked_identifier: column_a - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/oracle/substitution_variable.sql000066400000000000000000000006721503426445100264020ustar00rootroot00000000000000grant select on my_table to ®ISTRY; SELECT &SORTCOL, SALARY FROM &MYTABLE WHERE SALARY>12000; select employee_id from employees where last_name = '&myv'; select * from employees where employee_id = &myv; SELECT SALARY FROM EMP_DETAILS_VIEW WHERE EMPLOYEE_ID='&X.5'; SELECT &GROUP_COL, MAX(&NUMBER_COL) MAXIMUM FROM &MY_TABLE GROUP BY &GROUP_COL; select * from employees where employee_id = &&myv; insert into mytable values (&myv); sqlfluff-3.4.2/test/fixtures/dialects/oracle/substitution_variable.yml000066400000000000000000000133651503426445100264070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 21f949c3e22dc36d82f0e1ec032237d865836e8822be6f181ed39efd44f6cce6 file: - statement: access_statement: - keyword: grant - keyword: select - keyword: 'on' - object_reference: naked_identifier: my_table - keyword: to - role_reference: sqlplus_variable: ampersand: '&' naked_identifier: REGISTRY - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: sqlplus_variable: ampersand: '&' naked_identifier: SORTCOL - comma: ',' - select_clause_element: column_reference: naked_identifier: SALARY from_clause: keyword: FROM from_expression: from_expression_element: table_expression: sqlplus_variable: ampersand: '&' naked_identifier: MYTABLE where_clause: keyword: WHERE expression: column_reference: naked_identifier: SALARY comparison_operator: raw_comparison_operator: '>' numeric_literal: '12000' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: employee_id from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: where expression: column_reference: naked_identifier: last_name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'&myv'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: where expression: column_reference: naked_identifier: employee_id comparison_operator: raw_comparison_operator: '=' sqlplus_variable: ampersand: '&' naked_identifier: myv - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: SALARY from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: EMP_DETAILS_VIEW where_clause: keyword: WHERE expression: column_reference: naked_identifier: EMPLOYEE_ID comparison_operator: raw_comparison_operator: '=' quoted_literal: "'&X.5'" - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: sqlplus_variable: ampersand: '&' naked_identifier: GROUP_COL - comma: ',' - select_clause_element: function: function_name: function_name_identifier: MAX function_contents: bracketed: start_bracket: ( expression: sqlplus_variable: ampersand: '&' naked_identifier: NUMBER_COL end_bracket: ) alias_expression: naked_identifier: MAXIMUM from_clause: keyword: FROM from_expression: from_expression_element: table_expression: sqlplus_variable: ampersand: '&' naked_identifier: MY_TABLE groupby_clause: - keyword: GROUP - keyword: BY - expression: sqlplus_variable: ampersand: '&' naked_identifier: GROUP_COL - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: where expression: column_reference: naked_identifier: employee_id comparison_operator: raw_comparison_operator: '=' sqlplus_variable: - ampersand: '&' - ampersand: '&' - naked_identifier: myv - statement_terminator: ; - statement: insert_statement: - keyword: insert - keyword: into - table_reference: naked_identifier: mytable - values_clause: keyword: values bracketed: start_bracket: ( expression: sqlplus_variable: ampersand: '&' naked_identifier: myv end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/oracle/temporary_table.sql000066400000000000000000000014521503426445100251470ustar00rootroot00000000000000CREATE GLOBAL TEMPORARY TABLE today_sales ON COMMIT PRESERVE ROWS AS SELECT * FROM orders WHERE order_date = SYSDATE; CREATE GLOBAL TEMPORARY TABLE HT_AFFAIRES (ID CHAR (36 CHAR)) ON COMMIT DELETE ROWS; CREATE GLOBAL TEMPORARY TABLE my_temp_table ( id NUMBER, description VARCHAR2(20) ) ON COMMIT DELETE ROWS; CREATE GLOBAL TEMPORARY TABLE my_temp_table ( id NUMBER, description VARCHAR2(20) ) ON COMMIT PRESERVE ROWS; CREATE PRIVATE TEMPORARY TABLE ora$ptt_my_temp_table ( id NUMBER, description VARCHAR2(20 BYTE) ) ON COMMIT DROP DEFINITION; CREATE PRIVATE TEMPORARY TABLE ora$ptt_my_temp_table ( id NUMBER, description VARCHAR2(20 CHAR) ) ON COMMIT PRESERVE DEFINITION; CREATE PRIVATE TEMPORARY TABLE ora$ptt_emp AS SELECT * FROM emp; sqlfluff-3.4.2/test/fixtures/dialects/oracle/temporary_table.yml000066400000000000000000000133101503426445100251450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ca868384d802f535ba528cb300ea943a1e0e47a38c09240a0fda06b65c8123e1 file: - statement: create_table_statement: - keyword: CREATE - keyword: GLOBAL - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: today_sales - keyword: 'ON' - keyword: COMMIT - keyword: PRESERVE - keyword: ROWS - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders where_clause: keyword: WHERE expression: column_reference: naked_identifier: order_date comparison_operator: raw_comparison_operator: '=' bare_function: SYSDATE - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: GLOBAL - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: HT_AFFAIRES - bracketed: start_bracket: ( column_definition: naked_identifier: ID data_type: data_type_identifier: CHAR bracketed: start_bracket: ( numeric_literal: '36' word: CHAR end_bracket: ) end_bracket: ) - keyword: 'ON' - keyword: COMMIT - keyword: DELETE - keyword: ROWS - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: GLOBAL - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: my_temp_table - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: NUMBER - comma: ',' - column_definition: naked_identifier: description data_type: data_type_identifier: VARCHAR2 bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '20' end_bracket: ) - end_bracket: ) - keyword: 'ON' - keyword: COMMIT - keyword: DELETE - keyword: ROWS - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: GLOBAL - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: my_temp_table - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: NUMBER - comma: ',' - column_definition: naked_identifier: description data_type: data_type_identifier: VARCHAR2 bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '20' end_bracket: ) - end_bracket: ) - keyword: 'ON' - keyword: COMMIT - keyword: PRESERVE - keyword: ROWS - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: PRIVATE - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: ora$ptt_my_temp_table - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: NUMBER - comma: ',' - column_definition: naked_identifier: description data_type: data_type_identifier: VARCHAR2 bracketed: start_bracket: ( numeric_literal: '20' word: BYTE end_bracket: ) - end_bracket: ) - keyword: 'ON' - keyword: COMMIT - keyword: DROP - keyword: DEFINITION - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: PRIVATE - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: ora$ptt_my_temp_table - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: NUMBER - comma: ',' - column_definition: naked_identifier: description data_type: data_type_identifier: VARCHAR2 bracketed: start_bracket: ( numeric_literal: '20' word: CHAR end_bracket: ) - end_bracket: ) - keyword: 'ON' - keyword: COMMIT - keyword: PRESERVE - keyword: DEFINITION - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: PRIVATE - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: ora$ptt_emp - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: emp - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/oracle/while_loop.sql000066400000000000000000000004311503426445100241130ustar00rootroot00000000000000DECLARE done BOOLEAN := FALSE; BEGIN WHILE done LOOP DBMS_OUTPUT.PUT_LINE ('This line does not print.'); done := TRUE; -- This assignment is not made. END LOOP; WHILE NOT done LOOP DBMS_OUTPUT.PUT_LINE ('Hello, world!'); done := TRUE; END LOOP; END; / sqlfluff-3.4.2/test/fixtures/dialects/oracle/while_loop.yml000066400000000000000000000060371503426445100241250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: eee3100def0953c3fd62304824d0a552bc8c1b327cbbbdf3ea6ceff9c293897c file: - statement: begin_end_block: - declare_segment: keyword: DECLARE naked_identifier: done data_type: data_type_identifier: BOOLEAN colon: ':' comparison_operator: raw_comparison_operator: '=' expression: boolean_literal: 'FALSE' statement_terminator: ; - keyword: BEGIN - statement: while_loop_statement: keyword: WHILE expression: column_reference: naked_identifier: done loop_statement: - keyword: LOOP - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'This line does not print.'" end_bracket: ) - statement_terminator: ; - statement: assignment_segment_statement: object_reference: naked_identifier: done colon: ':' comparison_operator: raw_comparison_operator: '=' expression: boolean_literal: 'TRUE' - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - statement: while_loop_statement: keyword: WHILE expression: keyword: NOT column_reference: naked_identifier: done loop_statement: - keyword: LOOP - statement: function: function_name: naked_identifier: DBMS_OUTPUT dot: . function_name_identifier: PUT_LINE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'Hello, world!'" end_bracket: ) - statement_terminator: ; - statement: assignment_segment_statement: object_reference: naked_identifier: done colon: ':' comparison_operator: raw_comparison_operator: '=' expression: boolean_literal: 'TRUE' - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - keyword: END - statement_terminator: ; - statement_terminator: / sqlfluff-3.4.2/test/fixtures/dialects/oracle/within_group.sql000066400000000000000000000017301503426445100244730ustar00rootroot00000000000000--https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/LISTAGG.html#GUID-B6E50D8E-F467-425B-9436-F7F8BF38D466 SELECT LISTAGG(last_name, '; ') WITHIN GROUP (ORDER BY hire_date, last_name) "Emp_list", MIN(hire_date) "Earliest" FROM employees WHERE department_id = 30; SELECT department_id "Dept.", LISTAGG(last_name, '; ') WITHIN GROUP (ORDER BY hire_date) "Employees" FROM employees GROUP BY department_id ORDER BY department_id; SELECT department_id "Dept.", LISTAGG(last_name, '; ' ON OVERFLOW TRUNCATE '...') WITHIN GROUP (ORDER BY hire_date) "Employees" FROM employees GROUP BY department_id ORDER BY department_id; SELECT department_id "Dept", hire_date "Date", last_name "Name", LISTAGG(last_name, '; ') WITHIN GROUP (ORDER BY hire_date, last_name) OVER (PARTITION BY department_id) as "Emp_list" FROM employees WHERE hire_date < '01-SEP-2003' ORDER BY "Dept", "Date", "Name"; sqlfluff-3.4.2/test/fixtures/dialects/oracle/within_group.yml000066400000000000000000000211011503426445100244670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 088c1674b18b21fef3f58ffe274a3fc6854f5d490f599e5f8e4cf5c418723b07 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: LISTAGG function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: last_name - comma: ',' - expression: quoted_literal: "'; '" - end_bracket: ) withingroup_clause: - keyword: WITHIN - keyword: GROUP - bracketed: start_bracket: ( orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: hire_date - comma: ',' - column_reference: naked_identifier: last_name end_bracket: ) alias_expression: quoted_identifier: '"Emp_list"' - comma: ',' - select_clause_element: function: function_name: function_name_identifier: MIN function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: hire_date end_bracket: ) alias_expression: quoted_identifier: '"Earliest"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: WHERE expression: column_reference: naked_identifier: department_id comparison_operator: raw_comparison_operator: '=' numeric_literal: '30' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: department_id alias_expression: quoted_identifier: '"Dept."' - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LISTAGG function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: last_name - comma: ',' - expression: quoted_literal: "'; '" - end_bracket: ) withingroup_clause: - keyword: WITHIN - keyword: GROUP - bracketed: start_bracket: ( orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: hire_date end_bracket: ) alias_expression: quoted_identifier: '"Employees"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: department_id orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: department_id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: department_id alias_expression: quoted_identifier: '"Dept."' - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LISTAGG function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: last_name - comma: ',' - expression: quoted_literal: "'; '" - listagg_overflow_clause: - keyword: 'ON' - keyword: OVERFLOW - keyword: TRUNCATE - quoted_identifier: "'...'" - end_bracket: ) withingroup_clause: - keyword: WITHIN - keyword: GROUP - bracketed: start_bracket: ( orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: hire_date end_bracket: ) alias_expression: quoted_identifier: '"Employees"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: department_id orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: department_id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: department_id alias_expression: quoted_identifier: '"Dept"' - comma: ',' - select_clause_element: column_reference: naked_identifier: hire_date alias_expression: quoted_identifier: '"Date"' - comma: ',' - select_clause_element: column_reference: naked_identifier: last_name alias_expression: quoted_identifier: '"Name"' - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LISTAGG function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: last_name - comma: ',' - expression: quoted_literal: "'; '" - end_bracket: ) withingroup_clause: - keyword: WITHIN - keyword: GROUP - bracketed: start_bracket: ( orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: hire_date - comma: ',' - column_reference: naked_identifier: last_name end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: department_id end_bracket: ) alias_expression: alias_operator: keyword: as quoted_identifier: '"Emp_list"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: WHERE expression: column_reference: naked_identifier: hire_date comparison_operator: raw_comparison_operator: < quoted_literal: "'01-SEP-2003'" orderby_clause: - keyword: ORDER - keyword: BY - column_reference: quoted_identifier: '"Dept"' - comma: ',' - column_reference: quoted_identifier: '"Date"' - comma: ',' - column_reference: quoted_identifier: '"Name"' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/000077500000000000000000000000001503426445100216345ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/dialects/postgres/.sqlfluff000066400000000000000000000000361503426445100234560ustar00rootroot00000000000000[sqlfluff] dialect = postgres sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_aggregate.sql000066400000000000000000000006571503426445100255020ustar00rootroot00000000000000ALTER AGGREGATE range_agg_preserve_gaps (TSTZRANGE) RENAME TO my_agg; ALTER AGGREGATE my_agg (TSTZRANGE) OWNER TO me; ALTER AGGREGATE my_agg (TSTZRANGE) OWNER TO CURRENT_ROLE; ALTER AGGREGATE my_agg (TSTZRANGE) OWNER TO CURRENT_USER; ALTER AGGREGATE my_agg (TSTZRANGE) OWNER TO SESSION_USER; ALTER AGGREGATE my_agg (*) SET SCHEMA api; ALTER AGGREGATE complex_agg_function(integer, text, numeric) RENAME TO renamed_agg_function; sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_aggregate.yml000066400000000000000000000055661503426445100255100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2bab70cb3d96e59b198ec8bdc4355e0b9e1433b58324de800aec8457a62ee1f7 file: - statement: alter_aggregate_statement: - keyword: ALTER - keyword: AGGREGATE - object_reference: naked_identifier: range_agg_preserve_gaps - bracketed: start_bracket: ( word: TSTZRANGE end_bracket: ) - keyword: RENAME - keyword: TO - function_name: function_name_identifier: my_agg - statement_terminator: ; - statement: alter_aggregate_statement: - keyword: ALTER - keyword: AGGREGATE - object_reference: naked_identifier: my_agg - bracketed: start_bracket: ( word: TSTZRANGE end_bracket: ) - keyword: OWNER - keyword: TO - role_reference: naked_identifier: me - statement_terminator: ; - statement: alter_aggregate_statement: - keyword: ALTER - keyword: AGGREGATE - object_reference: naked_identifier: my_agg - bracketed: start_bracket: ( word: TSTZRANGE end_bracket: ) - keyword: OWNER - keyword: TO - keyword: CURRENT_ROLE - statement_terminator: ; - statement: alter_aggregate_statement: - keyword: ALTER - keyword: AGGREGATE - object_reference: naked_identifier: my_agg - bracketed: start_bracket: ( word: TSTZRANGE end_bracket: ) - keyword: OWNER - keyword: TO - keyword: CURRENT_USER - statement_terminator: ; - statement: alter_aggregate_statement: - keyword: ALTER - keyword: AGGREGATE - object_reference: naked_identifier: my_agg - bracketed: start_bracket: ( word: TSTZRANGE end_bracket: ) - keyword: OWNER - keyword: TO - keyword: SESSION_USER - statement_terminator: ; - statement: alter_aggregate_statement: - keyword: ALTER - keyword: AGGREGATE - object_reference: naked_identifier: my_agg - bracketed: start_bracket: ( star: '*' end_bracket: ) - keyword: SET - keyword: SCHEMA - schema_reference: naked_identifier: api - statement_terminator: ; - statement: alter_aggregate_statement: - keyword: ALTER - keyword: AGGREGATE - object_reference: naked_identifier: complex_agg_function - bracketed: - start_bracket: ( - word: integer - comma: ',' - word: text - comma: ',' - word: numeric - end_bracket: ) - keyword: RENAME - keyword: TO - function_name: function_name_identifier: renamed_agg_function - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_database.sql000066400000000000000000000026171503426445100253160ustar00rootroot00000000000000ALTER DATABASE db; ALTER DATABASE db ALLOW_CONNECTIONS true; ALTER DATABASE db WITH ALLOW_CONNECTIONS true; ALTER DATABASE db CONNECTION LIMIT 10; ALTER DATABASE db WITH CONNECTION LIMIT 10; ALTER DATABASE db IS_TEMPLATE true; ALTER DATABASE db WITH IS_TEMPLATE true; ALTER DATABASE db IS_TEMPLATE true ALLOW_CONNECTIONS true; ALTER DATABASE db WITH IS_TEMPLATE true ALLOW_CONNECTIONS true; ALTER DATABASE db CONNECTION LIMIT 10 IS_TEMPLATE true ALLOW_CONNECTIONS true; ALTER DATABASE db WITH CONNECTION LIMIT 10 IS_TEMPLATE true ALLOW_CONNECTIONS true; ALTER DATABASE db RENAME TO new_db; ALTER DATABASE db OWNER TO other_role; ALTER DATABASE db OWNER TO CURRENT_ROLE; ALTER DATABASE db OWNER TO CURRENT_USER; ALTER DATABASE db OWNER TO SESSION_USER; -- Issue:2017 ALTER DATABASE postgres SET password_encryption TO 'scram-sha-256'; ALTER DATABASE db SET TABLESPACE new_tablespace; ALTER DATABASE db SET parameter1 TO 1; ALTER DATABASE db SET parameter1 TO 'some_value'; ALTER DATABASE db SET parameter1 TO DEFAULT; ALTER DATABASE db SET parameter1 = 1; ALTER DATABASE db SET parameter1 = 'some_value'; ALTER DATABASE db SET parameter1 = DEFAULT; ALTER DATABASE db SET parameter1 FROM CURRENT; ALTER DATABASE db SET search_path TO my_schema; ALTER DATABASE db SET search_path TO "my_schema"; ALTER USER some_user SET default_transaction_read_only = ON; ALTER DATABASE db RESET parameter1; ALTER DATABASE db RESET ALL; sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_database.yml000066400000000000000000000204551503426445100253200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ebe9b3efe68efa22841747ec417e0a3edf9a574f5a1f0a29ae65a070a14437ec file: - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: ALLOW_CONNECTIONS - boolean_literal: 'true' - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: WITH - keyword: ALLOW_CONNECTIONS - boolean_literal: 'true' - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: CONNECTION - keyword: LIMIT - numeric_literal: '10' - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: WITH - keyword: CONNECTION - keyword: LIMIT - numeric_literal: '10' - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: IS_TEMPLATE - boolean_literal: 'true' - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: WITH - keyword: IS_TEMPLATE - boolean_literal: 'true' - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: IS_TEMPLATE - boolean_literal: 'true' - keyword: ALLOW_CONNECTIONS - boolean_literal: 'true' - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: WITH - keyword: IS_TEMPLATE - boolean_literal: 'true' - keyword: ALLOW_CONNECTIONS - boolean_literal: 'true' - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: CONNECTION - keyword: LIMIT - numeric_literal: '10' - keyword: IS_TEMPLATE - boolean_literal: 'true' - keyword: ALLOW_CONNECTIONS - boolean_literal: 'true' - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: WITH - keyword: CONNECTION - keyword: LIMIT - numeric_literal: '10' - keyword: IS_TEMPLATE - boolean_literal: 'true' - keyword: ALLOW_CONNECTIONS - boolean_literal: 'true' - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: RENAME - keyword: TO - database_reference: naked_identifier: new_db - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: OWNER - keyword: TO - object_reference: naked_identifier: other_role - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: OWNER - keyword: TO - keyword: CURRENT_ROLE - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: OWNER - keyword: TO - object_reference: naked_identifier: CURRENT_USER - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: OWNER - keyword: TO - keyword: SESSION_USER - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: postgres - keyword: SET - parameter: password_encryption - keyword: TO - quoted_literal: "'scram-sha-256'" - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: SET - keyword: TABLESPACE - tablespace_reference: naked_identifier: new_tablespace - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: SET - parameter: parameter1 - keyword: TO - numeric_literal: '1' - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: SET - parameter: parameter1 - keyword: TO - quoted_literal: "'some_value'" - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: SET - parameter: parameter1 - keyword: TO - keyword: DEFAULT - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: SET - parameter: parameter1 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: SET - parameter: parameter1 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'some_value'" - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: SET - parameter: parameter1 - comparison_operator: raw_comparison_operator: '=' - keyword: DEFAULT - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: SET - parameter: parameter1 - keyword: FROM - keyword: CURRENT - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: SET - parameter: search_path - keyword: TO - naked_identifier: my_schema - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: SET - parameter: search_path - keyword: TO - quoted_identifier: '"my_schema"' - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: some_user - keyword: SET - parameter: default_transaction_read_only - comparison_operator: raw_comparison_operator: '=' - naked_identifier: 'ON' - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: RESET - parameter: parameter1 - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: RESET - keyword: ALL - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_default_privileges.sql000066400000000000000000000045541503426445100274310ustar00rootroot00000000000000ALTER DEFAULT PRIVILEGES FOR USER my_user GRANT SELECT ON TABLES TO my_group; ALTER DEFAULT PRIVILEGES FOR USER my_user IN SCHEMA my_schema GRANT INSERT ON TABLES TO my_group; ALTER DEFAULT PRIVILEGES FOR ROLE my_user GRANT UPDATE ON TABLES TO my_group; ALTER DEFAULT PRIVILEGES FOR ROLE my_user IN SCHEMA my_schema, your_schema GRANT DELETE ON TABLES TO my_group WITH GRANT OPTION; ALTER DEFAULT PRIVILEGES FOR ROLE my_user, your_user GRANT TRUNCATE ON TABLES TO GROUP my_group; ALTER DEFAULT PRIVILEGES FOR ROLE my_user GRANT REFERENCES, TRIGGER ON TABLES TO PUBLIC; ALTER DEFAULT PRIVILEGES FOR ROLE my_user GRANT ALL ON SEQUENCES TO GROUP my_group, GROUP your_group; ALTER DEFAULT PRIVILEGES FOR ROLE my_user GRANT EXECUTE ON ROUTINES TO my_group; ALTER DEFAULT PRIVILEGES GRANT ALL PRIVILEGES ON FUNCTIONS TO my_group; ALTER DEFAULT PRIVILEGES IN SCHEMA my_schema, your_schema GRANT USAGE ON TYPES TO my_group, GROUP your_group; ALTER DEFAULT PRIVILEGES FOR ROLE my_user GRANT USAGE ON SCHEMAS TO my_group; ALTER DEFAULT PRIVILEGES FOR ROLE my_user GRANT CREATE ON SCHEMAS TO my_group; ALTER DEFAULT PRIVILEGES FOR USER my_user, your_user IN SCHEMA my_schema, your_schema GRANT SELECT, INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON TABLES TO GROUP my_group, PUBLIC WITH GRANT OPTION; ALTER DEFAULT PRIVILEGES FOR USER my_user REVOKE ALL ON TABLES FROM my_group; ALTER DEFAULT PRIVILEGES FOR USER my_user IN SCHEMA my_schema REVOKE SELECT ON TABLES FROM GROUP my_group; ALTER DEFAULT PRIVILEGES FOR ROLE my_user IN SCHEMA my_schema, your_schema REVOKE INSERT ON TABLES FROM PUBLIC CASCADE; ALTER DEFAULT PRIVILEGES FOR ROLE my_user IN SCHEMA my_schema REVOKE UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON TABLES FROM PUBLIC RESTRICT; ALTER DEFAULT PRIVILEGES FOR ROLE my_user, your_user REVOKE ALL PRIVILEGES ON SEQUENCES FROM my_group; ALTER DEFAULT PRIVILEGES FOR ROLE my_user REVOKE EXECUTE ON FUNCTIONS FROM my_group; ALTER DEFAULT PRIVILEGES REVOKE EXECUTE ON ROUTINES FROM my_group; ALTER DEFAULT PRIVILEGES IN SCHEMA my_schema, your_schema REVOKE USAGE ON TYPES FROM my_group, GROUP your_group; ALTER DEFAULT PRIVILEGES FOR ROLE my_user REVOKE USAGE, CREATE ON TYPES FROM my_group; ALTER DEFAULT PRIVILEGES FOR ROLE my_user, your_user IN SCHEMA my_schema, your_schema REVOKE SELECT, INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON TABLES FROM GROUP my_group, PUBLIC CASCADE; sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_default_privileges.yml000066400000000000000000000426511503426445100274330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6cecbb7ede0ff8bce72a86691b5b2d4883afc280921f2f35fb7c28392b1ef09e file: - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: USER - object_reference: naked_identifier: my_user - alter_default_privileges_grant: - keyword: GRANT - alter_default_privileges_object_privilege: keyword: SELECT - keyword: 'ON' - alter_default_privileges_schema_object: keyword: TABLES - keyword: TO - alter_default_privileges_to_from_roles: role_reference: naked_identifier: my_group - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: USER - object_reference: naked_identifier: my_user - keyword: IN - keyword: SCHEMA - schema_reference: naked_identifier: my_schema - alter_default_privileges_grant: - keyword: GRANT - alter_default_privileges_object_privilege: keyword: INSERT - keyword: 'ON' - alter_default_privileges_schema_object: keyword: TABLES - keyword: TO - alter_default_privileges_to_from_roles: role_reference: naked_identifier: my_group - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: ROLE - object_reference: naked_identifier: my_user - alter_default_privileges_grant: - keyword: GRANT - alter_default_privileges_object_privilege: keyword: UPDATE - keyword: 'ON' - alter_default_privileges_schema_object: keyword: TABLES - keyword: TO - alter_default_privileges_to_from_roles: role_reference: naked_identifier: my_group - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: ROLE - object_reference: naked_identifier: my_user - keyword: IN - keyword: SCHEMA - schema_reference: naked_identifier: my_schema - comma: ',' - schema_reference: naked_identifier: your_schema - alter_default_privileges_grant: - keyword: GRANT - alter_default_privileges_object_privilege: keyword: DELETE - keyword: 'ON' - alter_default_privileges_schema_object: keyword: TABLES - keyword: TO - alter_default_privileges_to_from_roles: role_reference: naked_identifier: my_group - keyword: WITH - keyword: GRANT - keyword: OPTION - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: ROLE - object_reference: naked_identifier: my_user - comma: ',' - object_reference: naked_identifier: your_user - alter_default_privileges_grant: - keyword: GRANT - alter_default_privileges_object_privilege: keyword: TRUNCATE - keyword: 'ON' - alter_default_privileges_schema_object: keyword: TABLES - keyword: TO - alter_default_privileges_to_from_roles: keyword: GROUP role_reference: naked_identifier: my_group - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: ROLE - object_reference: naked_identifier: my_user - alter_default_privileges_grant: - keyword: GRANT - alter_default_privileges_object_privilege: - keyword: REFERENCES - comma: ',' - keyword: TRIGGER - keyword: 'ON' - alter_default_privileges_schema_object: keyword: TABLES - keyword: TO - alter_default_privileges_to_from_roles: role_reference: naked_identifier: PUBLIC - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: ROLE - object_reference: naked_identifier: my_user - alter_default_privileges_grant: - keyword: GRANT - alter_default_privileges_object_privilege: keyword: ALL - keyword: 'ON' - alter_default_privileges_schema_object: keyword: SEQUENCES - keyword: TO - alter_default_privileges_to_from_roles: keyword: GROUP role_reference: naked_identifier: my_group - comma: ',' - alter_default_privileges_to_from_roles: keyword: GROUP role_reference: naked_identifier: your_group - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: ROLE - object_reference: naked_identifier: my_user - alter_default_privileges_grant: - keyword: GRANT - alter_default_privileges_object_privilege: keyword: EXECUTE - keyword: 'ON' - alter_default_privileges_schema_object: keyword: ROUTINES - keyword: TO - alter_default_privileges_to_from_roles: role_reference: naked_identifier: my_group - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - alter_default_privileges_grant: - keyword: GRANT - alter_default_privileges_object_privilege: - keyword: ALL - keyword: PRIVILEGES - keyword: 'ON' - alter_default_privileges_schema_object: keyword: FUNCTIONS - keyword: TO - alter_default_privileges_to_from_roles: role_reference: naked_identifier: my_group - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: IN - keyword: SCHEMA - schema_reference: naked_identifier: my_schema - comma: ',' - schema_reference: naked_identifier: your_schema - alter_default_privileges_grant: - keyword: GRANT - alter_default_privileges_object_privilege: keyword: USAGE - keyword: 'ON' - alter_default_privileges_schema_object: keyword: TYPES - keyword: TO - alter_default_privileges_to_from_roles: role_reference: naked_identifier: my_group - comma: ',' - alter_default_privileges_to_from_roles: keyword: GROUP role_reference: naked_identifier: your_group - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: ROLE - object_reference: naked_identifier: my_user - alter_default_privileges_grant: - keyword: GRANT - alter_default_privileges_object_privilege: keyword: USAGE - keyword: 'ON' - alter_default_privileges_schema_object: keyword: SCHEMAS - keyword: TO - alter_default_privileges_to_from_roles: role_reference: naked_identifier: my_group - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: ROLE - object_reference: naked_identifier: my_user - alter_default_privileges_grant: - keyword: GRANT - alter_default_privileges_object_privilege: keyword: CREATE - keyword: 'ON' - alter_default_privileges_schema_object: keyword: SCHEMAS - keyword: TO - alter_default_privileges_to_from_roles: role_reference: naked_identifier: my_group - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: USER - object_reference: naked_identifier: my_user - comma: ',' - object_reference: naked_identifier: your_user - keyword: IN - keyword: SCHEMA - schema_reference: naked_identifier: my_schema - comma: ',' - schema_reference: naked_identifier: your_schema - alter_default_privileges_grant: - keyword: GRANT - alter_default_privileges_object_privilege: - keyword: SELECT - comma: ',' - keyword: INSERT - comma: ',' - keyword: UPDATE - comma: ',' - keyword: DELETE - comma: ',' - keyword: TRUNCATE - comma: ',' - keyword: REFERENCES - comma: ',' - keyword: TRIGGER - keyword: 'ON' - alter_default_privileges_schema_object: keyword: TABLES - keyword: TO - alter_default_privileges_to_from_roles: keyword: GROUP role_reference: naked_identifier: my_group - comma: ',' - alter_default_privileges_to_from_roles: role_reference: naked_identifier: PUBLIC - keyword: WITH - keyword: GRANT - keyword: OPTION - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: USER - object_reference: naked_identifier: my_user - alter_default_privileges_revoke: - keyword: REVOKE - alter_default_privileges_object_privilege: keyword: ALL - keyword: 'ON' - alter_default_privileges_schema_object: keyword: TABLES - keyword: FROM - alter_default_privileges_to_from_roles: role_reference: naked_identifier: my_group - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: USER - object_reference: naked_identifier: my_user - keyword: IN - keyword: SCHEMA - schema_reference: naked_identifier: my_schema - alter_default_privileges_revoke: - keyword: REVOKE - alter_default_privileges_object_privilege: keyword: SELECT - keyword: 'ON' - alter_default_privileges_schema_object: keyword: TABLES - keyword: FROM - alter_default_privileges_to_from_roles: keyword: GROUP role_reference: naked_identifier: my_group - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: ROLE - object_reference: naked_identifier: my_user - keyword: IN - keyword: SCHEMA - schema_reference: naked_identifier: my_schema - comma: ',' - schema_reference: naked_identifier: your_schema - alter_default_privileges_revoke: - keyword: REVOKE - alter_default_privileges_object_privilege: keyword: INSERT - keyword: 'ON' - alter_default_privileges_schema_object: keyword: TABLES - keyword: FROM - alter_default_privileges_to_from_roles: role_reference: naked_identifier: PUBLIC - keyword: CASCADE - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: ROLE - object_reference: naked_identifier: my_user - keyword: IN - keyword: SCHEMA - schema_reference: naked_identifier: my_schema - alter_default_privileges_revoke: - keyword: REVOKE - alter_default_privileges_object_privilege: - keyword: UPDATE - comma: ',' - keyword: DELETE - comma: ',' - keyword: TRUNCATE - comma: ',' - keyword: REFERENCES - comma: ',' - keyword: TRIGGER - keyword: 'ON' - alter_default_privileges_schema_object: keyword: TABLES - keyword: FROM - alter_default_privileges_to_from_roles: role_reference: naked_identifier: PUBLIC - keyword: RESTRICT - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: ROLE - object_reference: naked_identifier: my_user - comma: ',' - object_reference: naked_identifier: your_user - alter_default_privileges_revoke: - keyword: REVOKE - alter_default_privileges_object_privilege: - keyword: ALL - keyword: PRIVILEGES - keyword: 'ON' - alter_default_privileges_schema_object: keyword: SEQUENCES - keyword: FROM - alter_default_privileges_to_from_roles: role_reference: naked_identifier: my_group - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: ROLE - object_reference: naked_identifier: my_user - alter_default_privileges_revoke: - keyword: REVOKE - alter_default_privileges_object_privilege: keyword: EXECUTE - keyword: 'ON' - alter_default_privileges_schema_object: keyword: FUNCTIONS - keyword: FROM - alter_default_privileges_to_from_roles: role_reference: naked_identifier: my_group - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - alter_default_privileges_revoke: - keyword: REVOKE - alter_default_privileges_object_privilege: keyword: EXECUTE - keyword: 'ON' - alter_default_privileges_schema_object: keyword: ROUTINES - keyword: FROM - alter_default_privileges_to_from_roles: role_reference: naked_identifier: my_group - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: IN - keyword: SCHEMA - schema_reference: naked_identifier: my_schema - comma: ',' - schema_reference: naked_identifier: your_schema - alter_default_privileges_revoke: - keyword: REVOKE - alter_default_privileges_object_privilege: keyword: USAGE - keyword: 'ON' - alter_default_privileges_schema_object: keyword: TYPES - keyword: FROM - alter_default_privileges_to_from_roles: role_reference: naked_identifier: my_group - comma: ',' - alter_default_privileges_to_from_roles: keyword: GROUP role_reference: naked_identifier: your_group - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: ROLE - object_reference: naked_identifier: my_user - alter_default_privileges_revoke: - keyword: REVOKE - alter_default_privileges_object_privilege: - keyword: USAGE - comma: ',' - keyword: CREATE - keyword: 'ON' - alter_default_privileges_schema_object: keyword: TYPES - keyword: FROM - alter_default_privileges_to_from_roles: role_reference: naked_identifier: my_group - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: ROLE - object_reference: naked_identifier: my_user - comma: ',' - object_reference: naked_identifier: your_user - keyword: IN - keyword: SCHEMA - schema_reference: naked_identifier: my_schema - comma: ',' - schema_reference: naked_identifier: your_schema - alter_default_privileges_revoke: - keyword: REVOKE - alter_default_privileges_object_privilege: - keyword: SELECT - comma: ',' - keyword: INSERT - comma: ',' - keyword: UPDATE - comma: ',' - keyword: DELETE - comma: ',' - keyword: TRUNCATE - comma: ',' - keyword: REFERENCES - comma: ',' - keyword: TRIGGER - keyword: 'ON' - alter_default_privileges_schema_object: keyword: TABLES - keyword: FROM - alter_default_privileges_to_from_roles: keyword: GROUP role_reference: naked_identifier: my_group - comma: ',' - alter_default_privileges_to_from_roles: role_reference: naked_identifier: PUBLIC - keyword: CASCADE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_domain.sql000066400000000000000000000005631503426445100250170ustar00rootroot00000000000000ALTER DOMAIN zipcode SET NOT NULL; ALTER DOMAIN zipcode DROP NOT NULL; ALTER DOMAIN zipcode ADD CONSTRAINT zipchk CHECK (char_length(VALUE) = 5); ALTER DOMAIN zipcode DROP CONSTRAINT zipchk; ALTER DOMAIN zipcode RENAME CONSTRAINT zipchk TO zip_check; ALTER DOMAIN zipcode SET SCHEMA customers; alter domain oname add constraint "test" check (length(value) < 512); sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_domain.yml000066400000000000000000000065761503426445100250330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f581f132a8df0f9dcef9165ba325053bded2a28444e08601750dd13aacfa6674 file: - statement: alter_domain_statement: - keyword: ALTER - keyword: DOMAIN - object_reference: naked_identifier: zipcode - keyword: SET - keyword: NOT - keyword: 'NULL' - statement_terminator: ; - statement: alter_domain_statement: - keyword: ALTER - keyword: DOMAIN - object_reference: naked_identifier: zipcode - keyword: DROP - keyword: NOT - keyword: 'NULL' - statement_terminator: ; - statement: alter_domain_statement: - keyword: ALTER - keyword: DOMAIN - object_reference: naked_identifier: zipcode - keyword: ADD - keyword: CONSTRAINT - object_reference: naked_identifier: zipchk - keyword: CHECK - expression: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: char_length function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: VALUE end_bracket: ) comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: alter_domain_statement: - keyword: ALTER - keyword: DOMAIN - object_reference: naked_identifier: zipcode - keyword: DROP - keyword: CONSTRAINT - object_reference: naked_identifier: zipchk - statement_terminator: ; - statement: alter_domain_statement: - keyword: ALTER - keyword: DOMAIN - object_reference: naked_identifier: zipcode - keyword: RENAME - keyword: CONSTRAINT - object_reference: naked_identifier: zipchk - keyword: TO - object_reference: naked_identifier: zip_check - statement_terminator: ; - statement: alter_domain_statement: - keyword: ALTER - keyword: DOMAIN - object_reference: naked_identifier: zipcode - keyword: SET - keyword: SCHEMA - object_reference: naked_identifier: customers - statement_terminator: ; - statement: alter_domain_statement: - keyword: alter - keyword: domain - object_reference: naked_identifier: oname - keyword: add - keyword: constraint - object_reference: quoted_identifier: '"test"' - keyword: check - expression: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: length function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: value end_bracket: ) comparison_operator: raw_comparison_operator: < numeric_literal: '512' end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_extension.sql000066400000000000000000000003571503426445100255650ustar00rootroot00000000000000ALTER EXTENSION hstore SET SCHEMA utils; ALTER EXTENSION hstore ADD FUNCTION populate_record(anyelement, hstore); ALTER EXTENSION "hstore" DROP TABLE public.ref_table; ALTER EXTENSION hstore UPDATE TO '2.0'; ALTER EXTENSION repmgr UPDATE; sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_extension.yml000066400000000000000000000036751503426445100255750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 438001101b37641a260ff62f2d66dad4a72bfdc791834090fac15d5d1035fd0b file: - statement: alter_extension_statement: - keyword: ALTER - keyword: EXTENSION - extension_reference: naked_identifier: hstore - keyword: SET - keyword: SCHEMA - schema_reference: naked_identifier: utils - statement_terminator: ; - statement: alter_extension_statement: - keyword: ALTER - keyword: EXTENSION - extension_reference: naked_identifier: hstore - keyword: ADD - keyword: FUNCTION - function_name: function_name_identifier: populate_record - function_parameter_list: bracketed: - start_bracket: ( - data_type: data_type_identifier: anyelement - comma: ',' - data_type: data_type_identifier: hstore - end_bracket: ) - statement_terminator: ; - statement: alter_extension_statement: - keyword: ALTER - keyword: EXTENSION - extension_reference: quoted_identifier: '"hstore"' - keyword: DROP - keyword: TABLE - table_reference: - naked_identifier: public - dot: . - naked_identifier: ref_table - statement_terminator: ; - statement: alter_extension_statement: - keyword: ALTER - keyword: EXTENSION - extension_reference: naked_identifier: hstore - keyword: UPDATE - keyword: TO - quoted_literal: "'2.0'" - statement_terminator: ; - statement: alter_extension_statement: - keyword: ALTER - keyword: EXTENSION - extension_reference: naked_identifier: repmgr - keyword: UPDATE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_foreign_table.sql000066400000000000000000000007231503426445100263460ustar00rootroot00000000000000ALTER FOREIGN TABLE distributors ALTER COLUMN street SET NOT NULL; ALTER FOREIGN TABLE t_user ADD COLUMN my_column text; ALTER TABLE bar_fdw.foo ADD test varchar NULL; ALTER FOREIGN TABLE myschema.distributors OPTIONS (ADD opt1 'value', SET opt2 'value2', DROP opt3); ALTER FOREIGN TABLE test OPTIONS (SET table $$(select my_column from my_table)$$); ALTER FOREIGN TABLE test ADD COLUMN new_column int8, OPTIONS (SET table $$(select my_column from my_table)$$); sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_foreign_table.yml000066400000000000000000000065321503426445100263540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c04f3f1caaed61735224ce3ab6ef577922471c3baa76a318b2f819daf9d5c087 file: - statement: alter_foreign_table_statement: - keyword: ALTER - keyword: FOREIGN - keyword: TABLE - table_reference: naked_identifier: distributors - alter_foreign_table_action_segment: - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: street - keyword: SET - keyword: NOT - keyword: 'NULL' - statement_terminator: ; - statement: alter_foreign_table_statement: - keyword: ALTER - keyword: FOREIGN - keyword: TABLE - table_reference: naked_identifier: t_user - alter_foreign_table_action_segment: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: my_column - data_type: keyword: text - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: bar_fdw - dot: . - naked_identifier: foo - alter_table_action_segment: keyword: ADD column_reference: naked_identifier: test data_type: keyword: varchar column_constraint_segment: keyword: 'NULL' - statement_terminator: ; - statement: alter_foreign_table_statement: - keyword: ALTER - keyword: FOREIGN - keyword: TABLE - table_reference: - naked_identifier: myschema - dot: . - naked_identifier: distributors - alter_foreign_table_action_segment: keyword: OPTIONS bracketed: - start_bracket: ( - keyword: ADD - naked_identifier: opt1 - quoted_literal: "'value'" - comma: ',' - keyword: SET - naked_identifier: opt2 - quoted_literal: "'value2'" - comma: ',' - keyword: DROP - naked_identifier: opt3 - end_bracket: ) - statement_terminator: ; - statement: alter_foreign_table_statement: - keyword: ALTER - keyword: FOREIGN - keyword: TABLE - table_reference: naked_identifier: test - alter_foreign_table_action_segment: keyword: OPTIONS bracketed: start_bracket: ( keyword: SET naked_identifier: table quoted_literal: $$(select my_column from my_table)$$ end_bracket: ) - statement_terminator: ; - statement: alter_foreign_table_statement: - keyword: ALTER - keyword: FOREIGN - keyword: TABLE - table_reference: naked_identifier: test - alter_foreign_table_action_segment: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: new_column - data_type: keyword: int8 - comma: ',' - alter_foreign_table_action_segment: keyword: OPTIONS bracketed: start_bracket: ( keyword: SET naked_identifier: table quoted_literal: $$(select my_column from my_table)$$ end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_function.sql000066400000000000000000000113331503426445100253720ustar00rootroot00000000000000-- Issue:2089 ALTER FUNCTION fn OWNER TO auser; ALTER FUNCTION fn(int, arg2 text) OWNER TO auser; ALTER FUNCTION fn OWNER TO auser; ALTER FUNCTION fn OWNER TO CURRENT_ROLE; ALTER FUNCTION fn OWNER TO CURRENT_USER; ALTER FUNCTION fn OWNER TO SESSION_USER; ALTER FUNCTION public.fn OWNER TO auser; ALTER FUNCTION public.fn OWNER TO CURRENT_USER; ALTER FUNCTION public.fn OWNER TO CURRENT_ROLE; ALTER FUNCTION public.fn OWNER TO SESSION_USER; ALTER FUNCTION fn CALLED ON NULL INPUT; ALTER FUNCTION public.fn CALLED ON NULL INPUT; ALTER FUNCTION fn CALLED ON NULL INPUT RESTRICT; ALTER FUNCTION fn(arg1 int) CALLED ON NULL INPUT RESTRICT; ALTER FUNCTION public.fn(arg1 int) CALLED ON NULL INPUT RESTRICT; ALTER FUNCTION fn RETURNS NULL ON NULL INPUT; ALTER FUNCTION fn(int, text) RETURNS NULL ON NULL INPUT; ALTER FUNCTION fn RETURNS NULL ON NULL INPUT RESTRICT; ALTER FUNCTION public.fn RETURNS NULL ON NULL INPUT RESTRICT; ALTER FUNCTION fn(int, text) RETURNS NULL ON NULL INPUT RESTRICT; ALTER FUNCTION fn STRICT; ALTER FUNCTION fn(int) STRICT; ALTER FUNCTION fn STRICT RESTRICT; ALTER FUNCTION public.fn STRICT RESTRICT; ALTER FUNCTION fn(arg1 int) STRICT RESTRICT; ALTER FUNCTION fn IMMUTABLE; ALTER FUNCTION fn IMMUTABLE RESTRICT; ALTER FUNCTION public.fn IMMUTABLE RESTRICT; ALTER FUNCTION fn STABLE; ALTER FUNCTION public.fn STABLE; ALTER FUNCTION fn STABLE RESTRICT; ALTER FUNCTION fn VOLATILE; ALTER FUNCTION fn VOLATILE RESTRICT; ALTER FUNCTION fn(int, arg2 text) IMMUTABLE; ALTER FUNCTION fn(int, arg2 text) IMMUTABLE RESTRICT; ALTER FUNCTION public.fn(int, arg2 text) IMMUTABLE RESTRICT; ALTER FUNCTION fn(int) STABLE; ALTER FUNCTION fn(int) STABLE RESTRICT; ALTER FUNCTION fn(arg int) VOLATILE; ALTER FUNCTION fn(arg int) VOLATILE RESTRICT; ALTER FUNCTION fn LEAKPROOF; ALTER FUNCTION fn LEAKPROOF RESTRICT; ALTER FUNCTION fn(int, arg2 text) LEAKPROOF; ALTER FUNCTION fn(int, arg2 text) LEAKPROOF RESTRICT; ALTER FUNCTION fn NOT LEAKPROOF; ALTER FUNCTION fn NOT LEAKPROOF RESTRICT; ALTER FUNCTION fn(arg int) NOT LEAKPROOF; ALTER FUNCTION fn(arg int) NOT LEAKPROOF RESTRICT; ALTER FUNCTION fn SECURITY INVOKER; ALTER FUNCTION fn SECURITY INVOKER RESTRICT; ALTER FUNCTION fn(int, text, boolean) SECURITY INVOKER; ALTER FUNCTION fn(int, text, boolean) SECURITY INVOKER RESTRICT; ALTER FUNCTION fn EXTERNAL SECURITY INVOKER; ALTER FUNCTION fn EXTERNAL SECURITY INVOKER RESTRICT; ALTER FUNCTION fn(int, text) EXTERNAL SECURITY INVOKER; ALTER FUNCTION fn(int, text) EXTERNAL SECURITY INVOKER RESTRICT; ALTER FUNCTION fn SECURITY DEFINER; ALTER FUNCTION fn SECURITY DEFINER RESTRICT; ALTER FUNCTION fn EXTERNAL SECURITY DEFINER; ALTER FUNCTION fn EXTERNAL SECURITY DEFINER RESTRICT; ALTER FUNCTION fn(arg1 int, arg2 text, boolean) EXTERNAL SECURITY DEFINER; ALTER FUNCTION fn(arg1 int, arg2 text, boolean) EXTERNAL SECURITY DEFINER RESTRICT; ALTER FUNCTION fn PARALLEL UNSAFE; ALTER FUNCTION fn(arg int) PARALLEL UNSAFE; ALTER FUNCTION fn PARALLEL UNSAFE RESTRICT; ALTER FUNCTION fn(int) PARALLEL UNSAFE RESTRICT; ALTER FUNCTION fn PARALLEL RESTRICTED; ALTER FUNCTION fn PARALLEL RESTRICTED RESTRICT; ALTER FUNCTION fn(int, text) PARALLEL RESTRICTED; ALTER FUNCTION fn(text) PARALLEL RESTRICTED RESTRICT; ALTER FUNCTION fn PARALLEL SAFE; ALTER FUNCTION public.fn PARALLEL SAFE RESTRICT; ALTER FUNCTION fn(text, arg2 int) PARALLEL SAFE; ALTER FUNCTION fn(text, text) PARALLEL SAFE RESTRICT; ALTER FUNCTION fn COST 10; ALTER FUNCTION fn COST 10 RESTRICT; ALTER FUNCTION fn(arg1 int, arg2 text) COST 10; ALTER FUNCTION fn(arg1 int, arg2 text) COST 10 RESTRICT; ALTER FUNCTION fn ROWS 10; ALTER FUNCTION fn ROWS 10 RESTRICT; ALTER FUNCTION fn(arg1 int, arg2 text, int, int) ROWS 10; ALTER FUNCTION fn(arg1 int, arg2 text, int, int) ROWS 10 RESTRICT; ALTER FUNCTION fn SUPPORT supportfn; ALTER FUNCTION public.fn SUPPORT supportfn; ALTER FUNCTION fn(int, int) SUPPORT supportfn; ALTER FUNCTION fn(int, int) SUPPORT supportfn RESTRICT; ALTER FUNCTION fn SET param1 TO 1; ALTER FUNCTION fn SET param1 TO 'value'; ALTER FUNCTION fn SET param1 TO DEFAULT; ALTER FUNCTION fn SET param1 TO 'value' RESTRICT; ALTER FUNCTION fn(int, int) SET param1 TO DEFAULT; ALTER FUNCTION fn(int, int) SET param1 TO DEFAULT RESTRICT; ALTER FUNCTION fn SET param1 TO avalue; ALTER FUNCTION fn SET param1 = 1; ALTER FUNCTION fn SET param1 = 'value'; ALTER FUNCTION fn SET param1 = avalue; ALTER FUNCTION fn SET param1 = DEFAULT; ALTER FUNCTION fn(arg1 int, arg2 text) SET param1 = 'value'; ALTER FUNCTION fn(int) SET param1 = avalue; ALTER FUNCTION fn(text, int) SET param1 = avalue; ALTER FUNCTION fn(int, int) SUPPORT supportfn; ALTER FUNCTION fn SET param1 FROM CURRENT; ALTER FUNCTION fn(boolean) SET param1 FROM CURRENT; ALTER FUNCTION public.fn(boolean, int) SET param1 FROM CURRENT; ALTER FUNCTION fn RESET param1; ALTER FUNCTION fn RESET ALL; sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_function.yml000066400000000000000000001171071503426445100254020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 24246d19967abbaf8f3d13a9a26e338003476e9fbe7505a9ed735e1907e20e68 file: - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - keyword: OWNER - keyword: TO - parameter: auser - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: int - comma: ',' - parameter: arg2 - data_type: keyword: text - end_bracket: ) - keyword: OWNER - keyword: TO - parameter: auser - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - keyword: OWNER - keyword: TO - parameter: auser - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - keyword: OWNER - keyword: TO - parameter: CURRENT_ROLE - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - keyword: OWNER - keyword: TO - parameter: CURRENT_USER - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - keyword: OWNER - keyword: TO - parameter: SESSION_USER - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: naked_identifier: public dot: . function_name_identifier: fn - keyword: OWNER - keyword: TO - parameter: auser - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: naked_identifier: public dot: . function_name_identifier: fn - keyword: OWNER - keyword: TO - parameter: CURRENT_USER - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: naked_identifier: public dot: . function_name_identifier: fn - keyword: OWNER - keyword: TO - parameter: CURRENT_ROLE - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: naked_identifier: public dot: . function_name_identifier: fn - keyword: OWNER - keyword: TO - parameter: SESSION_USER - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: CALLED - keyword: 'ON' - keyword: 'NULL' - keyword: INPUT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: naked_identifier: public dot: . function_name_identifier: fn - alter_function_action_segment: - keyword: CALLED - keyword: 'ON' - keyword: 'NULL' - keyword: INPUT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: CALLED - keyword: 'ON' - keyword: 'NULL' - keyword: INPUT - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: start_bracket: ( parameter: arg1 data_type: keyword: int end_bracket: ) - alter_function_action_segment: - keyword: CALLED - keyword: 'ON' - keyword: 'NULL' - keyword: INPUT - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: naked_identifier: public dot: . function_name_identifier: fn - function_parameter_list: bracketed: start_bracket: ( parameter: arg1 data_type: keyword: int end_bracket: ) - alter_function_action_segment: - keyword: CALLED - keyword: 'ON' - keyword: 'NULL' - keyword: INPUT - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: RETURNS - keyword: 'NULL' - keyword: 'ON' - keyword: 'NULL' - keyword: INPUT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: int - comma: ',' - data_type: keyword: text - end_bracket: ) - alter_function_action_segment: - keyword: RETURNS - keyword: 'NULL' - keyword: 'ON' - keyword: 'NULL' - keyword: INPUT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: RETURNS - keyword: 'NULL' - keyword: 'ON' - keyword: 'NULL' - keyword: INPUT - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: naked_identifier: public dot: . function_name_identifier: fn - alter_function_action_segment: - keyword: RETURNS - keyword: 'NULL' - keyword: 'ON' - keyword: 'NULL' - keyword: INPUT - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: int - comma: ',' - data_type: keyword: text - end_bracket: ) - alter_function_action_segment: - keyword: RETURNS - keyword: 'NULL' - keyword: 'ON' - keyword: 'NULL' - keyword: INPUT - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: keyword: STRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: start_bracket: ( data_type: keyword: int end_bracket: ) - alter_function_action_segment: keyword: STRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: STRICT - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: naked_identifier: public dot: . function_name_identifier: fn - alter_function_action_segment: - keyword: STRICT - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: start_bracket: ( parameter: arg1 data_type: keyword: int end_bracket: ) - alter_function_action_segment: - keyword: STRICT - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: keyword: IMMUTABLE - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: IMMUTABLE - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: naked_identifier: public dot: . function_name_identifier: fn - alter_function_action_segment: - keyword: IMMUTABLE - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: keyword: STABLE - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: naked_identifier: public dot: . function_name_identifier: fn - alter_function_action_segment: keyword: STABLE - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: STABLE - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: keyword: VOLATILE - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: VOLATILE - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: int - comma: ',' - parameter: arg2 - data_type: keyword: text - end_bracket: ) - alter_function_action_segment: keyword: IMMUTABLE - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: int - comma: ',' - parameter: arg2 - data_type: keyword: text - end_bracket: ) - alter_function_action_segment: - keyword: IMMUTABLE - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: naked_identifier: public dot: . function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: int - comma: ',' - parameter: arg2 - data_type: keyword: text - end_bracket: ) - alter_function_action_segment: - keyword: IMMUTABLE - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: start_bracket: ( data_type: keyword: int end_bracket: ) - alter_function_action_segment: keyword: STABLE - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: start_bracket: ( data_type: keyword: int end_bracket: ) - alter_function_action_segment: - keyword: STABLE - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: start_bracket: ( parameter: arg data_type: keyword: int end_bracket: ) - alter_function_action_segment: keyword: VOLATILE - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: start_bracket: ( parameter: arg data_type: keyword: int end_bracket: ) - alter_function_action_segment: - keyword: VOLATILE - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: keyword: LEAKPROOF - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: LEAKPROOF - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: int - comma: ',' - parameter: arg2 - data_type: keyword: text - end_bracket: ) - alter_function_action_segment: keyword: LEAKPROOF - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: int - comma: ',' - parameter: arg2 - data_type: keyword: text - end_bracket: ) - alter_function_action_segment: - keyword: LEAKPROOF - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: NOT - keyword: LEAKPROOF - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: NOT - keyword: LEAKPROOF - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: start_bracket: ( parameter: arg data_type: keyword: int end_bracket: ) - alter_function_action_segment: - keyword: NOT - keyword: LEAKPROOF - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: start_bracket: ( parameter: arg data_type: keyword: int end_bracket: ) - alter_function_action_segment: - keyword: NOT - keyword: LEAKPROOF - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: SECURITY - keyword: INVOKER - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: SECURITY - keyword: INVOKER - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: int - comma: ',' - data_type: keyword: text - comma: ',' - data_type: keyword: boolean - end_bracket: ) - alter_function_action_segment: - keyword: SECURITY - keyword: INVOKER - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: int - comma: ',' - data_type: keyword: text - comma: ',' - data_type: keyword: boolean - end_bracket: ) - alter_function_action_segment: - keyword: SECURITY - keyword: INVOKER - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: EXTERNAL - keyword: SECURITY - keyword: INVOKER - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: EXTERNAL - keyword: SECURITY - keyword: INVOKER - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: int - comma: ',' - data_type: keyword: text - end_bracket: ) - alter_function_action_segment: - keyword: EXTERNAL - keyword: SECURITY - keyword: INVOKER - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: int - comma: ',' - data_type: keyword: text - end_bracket: ) - alter_function_action_segment: - keyword: EXTERNAL - keyword: SECURITY - keyword: INVOKER - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: SECURITY - keyword: DEFINER - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: SECURITY - keyword: DEFINER - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: EXTERNAL - keyword: SECURITY - keyword: DEFINER - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: EXTERNAL - keyword: SECURITY - keyword: DEFINER - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - parameter: arg1 - data_type: keyword: int - comma: ',' - parameter: arg2 - data_type: keyword: text - comma: ',' - data_type: keyword: boolean - end_bracket: ) - alter_function_action_segment: - keyword: EXTERNAL - keyword: SECURITY - keyword: DEFINER - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - parameter: arg1 - data_type: keyword: int - comma: ',' - parameter: arg2 - data_type: keyword: text - comma: ',' - data_type: keyword: boolean - end_bracket: ) - alter_function_action_segment: - keyword: EXTERNAL - keyword: SECURITY - keyword: DEFINER - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: PARALLEL - keyword: UNSAFE - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: start_bracket: ( parameter: arg data_type: keyword: int end_bracket: ) - alter_function_action_segment: - keyword: PARALLEL - keyword: UNSAFE - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: PARALLEL - keyword: UNSAFE - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: start_bracket: ( data_type: keyword: int end_bracket: ) - alter_function_action_segment: - keyword: PARALLEL - keyword: UNSAFE - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: PARALLEL - keyword: RESTRICTED - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: PARALLEL - keyword: RESTRICTED - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: int - comma: ',' - data_type: keyword: text - end_bracket: ) - alter_function_action_segment: - keyword: PARALLEL - keyword: RESTRICTED - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: start_bracket: ( data_type: keyword: text end_bracket: ) - alter_function_action_segment: - keyword: PARALLEL - keyword: RESTRICTED - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: PARALLEL - keyword: SAFE - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: naked_identifier: public dot: . function_name_identifier: fn - alter_function_action_segment: - keyword: PARALLEL - keyword: SAFE - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: text - comma: ',' - parameter: arg2 - data_type: keyword: int - end_bracket: ) - alter_function_action_segment: - keyword: PARALLEL - keyword: SAFE - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: text - comma: ',' - data_type: keyword: text - end_bracket: ) - alter_function_action_segment: - keyword: PARALLEL - keyword: SAFE - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: keyword: COST numeric_literal: '10' - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: COST - numeric_literal: '10' - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - parameter: arg1 - data_type: keyword: int - comma: ',' - parameter: arg2 - data_type: keyword: text - end_bracket: ) - alter_function_action_segment: keyword: COST numeric_literal: '10' - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - parameter: arg1 - data_type: keyword: int - comma: ',' - parameter: arg2 - data_type: keyword: text - end_bracket: ) - alter_function_action_segment: - keyword: COST - numeric_literal: '10' - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: keyword: ROWS numeric_literal: '10' - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: ROWS - numeric_literal: '10' - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - parameter: arg1 - data_type: keyword: int - comma: ',' - parameter: arg2 - data_type: keyword: text - comma: ',' - data_type: keyword: int - comma: ',' - data_type: keyword: int - end_bracket: ) - alter_function_action_segment: keyword: ROWS numeric_literal: '10' - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - parameter: arg1 - data_type: keyword: int - comma: ',' - parameter: arg2 - data_type: keyword: text - comma: ',' - data_type: keyword: int - comma: ',' - data_type: keyword: int - end_bracket: ) - alter_function_action_segment: - keyword: ROWS - numeric_literal: '10' - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: keyword: SUPPORT parameter: supportfn - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: naked_identifier: public dot: . function_name_identifier: fn - alter_function_action_segment: keyword: SUPPORT parameter: supportfn - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: int - comma: ',' - data_type: keyword: int - end_bracket: ) - alter_function_action_segment: keyword: SUPPORT parameter: supportfn - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: int - comma: ',' - data_type: keyword: int - end_bracket: ) - alter_function_action_segment: - keyword: SUPPORT - parameter: supportfn - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: SET - parameter: param1 - keyword: TO - numeric_literal: '1' - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: SET - parameter: param1 - keyword: TO - quoted_literal: "'value'" - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: SET - parameter: param1 - keyword: TO - keyword: DEFAULT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: SET - parameter: param1 - keyword: TO - quoted_literal: "'value'" - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: int - comma: ',' - data_type: keyword: int - end_bracket: ) - alter_function_action_segment: - keyword: SET - parameter: param1 - keyword: TO - keyword: DEFAULT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: int - comma: ',' - data_type: keyword: int - end_bracket: ) - alter_function_action_segment: - keyword: SET - parameter: param1 - keyword: TO - keyword: DEFAULT - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: SET - parameter: param1 - keyword: TO - naked_identifier: avalue - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: keyword: SET parameter: param1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: keyword: SET parameter: param1 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'value'" - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: keyword: SET parameter: param1 comparison_operator: raw_comparison_operator: '=' naked_identifier: avalue - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: SET - parameter: param1 - comparison_operator: raw_comparison_operator: '=' - keyword: DEFAULT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - parameter: arg1 - data_type: keyword: int - comma: ',' - parameter: arg2 - data_type: keyword: text - end_bracket: ) - alter_function_action_segment: keyword: SET parameter: param1 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'value'" - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: start_bracket: ( data_type: keyword: int end_bracket: ) - alter_function_action_segment: keyword: SET parameter: param1 comparison_operator: raw_comparison_operator: '=' naked_identifier: avalue - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: text - comma: ',' - data_type: keyword: int - end_bracket: ) - alter_function_action_segment: keyword: SET parameter: param1 comparison_operator: raw_comparison_operator: '=' naked_identifier: avalue - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: int - comma: ',' - data_type: keyword: int - end_bracket: ) - alter_function_action_segment: keyword: SUPPORT parameter: supportfn - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: SET - parameter: param1 - keyword: FROM - keyword: CURRENT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: start_bracket: ( data_type: keyword: boolean end_bracket: ) - alter_function_action_segment: - keyword: SET - parameter: param1 - keyword: FROM - keyword: CURRENT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: naked_identifier: public dot: . function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: boolean - comma: ',' - data_type: keyword: int - end_bracket: ) - alter_function_action_segment: - keyword: SET - parameter: param1 - keyword: FROM - keyword: CURRENT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: keyword: RESET parameter: param1 - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: RESET - keyword: ALL - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_index.sql000066400000000000000000000007571503426445100246640ustar00rootroot00000000000000ALTER INDEX distributors RENAME TO suppliers; ALTER INDEX distributors SET TABLESPACE fasttablespace; ALTER INDEX distributors SET (fillfactor = 75); ALTER INDEX coord_idx ALTER COLUMN 3 SET STATISTICS 1000; ALTER INDEX IF EXISTS foo ATTACH PARTITION bar; ALTER INDEX foo NO DEPENDS ON EXTENSION barr; ALTER INDEX foo RESET (thing, other_thing); ALTER INDEX foo ALTER 4 SET STATISTICS 7; ALTER INDEX ALL IN TABLESPACE foo OWNED BY role_1, account_admin, steve SET TABLESPACE bar NOWAIT; sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_index.yml000066400000000000000000000065721503426445100246670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5cf064cb4d9880172559d9988a04e7ea296d8329e620aeb900c6237cf9ba5ee3 file: - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - index_reference: naked_identifier: distributors - keyword: RENAME - keyword: TO - index_reference: naked_identifier: suppliers - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - index_reference: naked_identifier: distributors - keyword: SET - keyword: TABLESPACE - tablespace_reference: naked_identifier: fasttablespace - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - index_reference: naked_identifier: distributors - keyword: SET - bracketed: start_bracket: ( parameter: fillfactor comparison_operator: raw_comparison_operator: '=' numeric_literal: '75' end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - index_reference: naked_identifier: coord_idx - keyword: ALTER - keyword: COLUMN - numeric_literal: '3' - keyword: SET - keyword: STATISTICS - numeric_literal: '1000' - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - keyword: IF - keyword: EXISTS - index_reference: naked_identifier: foo - keyword: ATTACH - keyword: PARTITION - index_reference: naked_identifier: bar - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - index_reference: naked_identifier: foo - keyword: 'NO' - keyword: DEPENDS - keyword: 'ON' - keyword: EXTENSION - extension_reference: naked_identifier: barr - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - index_reference: naked_identifier: foo - keyword: RESET - bracketed: - start_bracket: ( - parameter: thing - comma: ',' - parameter: other_thing - end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - index_reference: naked_identifier: foo - keyword: ALTER - numeric_literal: '4' - keyword: SET - keyword: STATISTICS - numeric_literal: '7' - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - keyword: ALL - keyword: IN - keyword: TABLESPACE - tablespace_reference: naked_identifier: foo - keyword: OWNED - keyword: BY - role_reference: naked_identifier: role_1 - comma: ',' - role_reference: naked_identifier: account_admin - comma: ',' - role_reference: naked_identifier: steve - keyword: SET - keyword: TABLESPACE - tablespace_reference: naked_identifier: bar - keyword: NOWAIT - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_materialized_view.sql000066400000000000000000000047161503426445100272600ustar00rootroot00000000000000ALTER MATERIALIZED VIEW bar ALTER column_name SET STATISTICS 1; ALTER MATERIALIZED VIEW bar ALTER COLUMN column_name SET STATISTICS 1; ALTER MATERIALIZED VIEW bar ALTER column_name SET ( attribute_option = 1); ALTER MATERIALIZED VIEW bar ALTER COLUMN column_name SET ( attribute_option1 = 'avalue', attribute_option2 = 'avalue' ); ALTER MATERIALIZED VIEW bar ALTER column_name RESET ( attribute_option ); ALTER MATERIALIZED VIEW bar ALTER column_name RESET ( attribute_option, attribute_option2 ); ALTER MATERIALIZED VIEW bar ALTER COLUMN column_name RESET ( attribute_option ); ALTER MATERIALIZED VIEW bar ALTER COLUMN column_name RESET ( attribute_option, attribute_option2 ); ALTER MATERIALIZED VIEW bar ALTER column_name SET STORAGE PLAIN; ALTER MATERIALIZED VIEW bar ALTER COLUMN column_name SET STORAGE EXTENDED; ALTER MATERIALIZED VIEW bar CLUSTER ON index_name; ALTER MATERIALIZED VIEW bar SET WITHOUT CLUSTER; ALTER MATERIALIZED VIEW bar SET ( storage_parameter); ALTER MATERIALIZED VIEW bar SET ( storage_parameter, storage_parameter); ALTER MATERIALIZED VIEW bar SET ( storage_parameter = 'some_value', storage_parameter ); ALTER MATERIALIZED VIEW bar RESET ( storage_parameter); ALTER MATERIALIZED VIEW bar RESET ( storage_parameter, storage_parameter); ALTER MATERIALIZED VIEW bar OWNER TO baz_role; ALTER MATERIALIZED VIEW bar OWNER TO "baz-role"; ALTER MATERIALIZED VIEW bar DEPENDS ON EXTENSION baz; ALTER MATERIALIZED VIEW bar NO DEPENDS ON EXTENSION baz; ALTER MATERIALIZED VIEW bar RENAME column_name TO new_column_name; ALTER MATERIALIZED VIEW IF EXISTS bar RENAME COLUMN column_name TO new_column_name; ALTER MATERIALIZED VIEW bar RENAME TO baz; ALTER MATERIALIZED VIEW IF EXISTS bar RENAME TO baz; ALTER MATERIALIZED VIEW bar SET SCHEMA new_schema; ALTER MATERIALIZED VIEW IF EXISTS bar SET SCHEMA new_schema; ALTER MATERIALIZED VIEW ALL IN TABLESPACE old_tablespace SET TABLESPACE new_tablespace; ALTER MATERIALIZED VIEW ALL IN TABLESPACE old_tablespace SET TABLESPACE new_tablespace NOWAIT; ALTER MATERIALIZED VIEW ALL IN TABLESPACE old_tablespace OWNED BY role_name SET TABLESPACE new_tablespace; ALTER MATERIALIZED VIEW ALL IN TABLESPACE old_tablespace OWNED BY role_name SET TABLESPACE new_tablespace NOWAIT; ALTER MATERIALIZED VIEW ALL IN TABLESPACE old_tablespace OWNED BY role_name, role_name_2 SET TABLESPACE new_tablespace; ALTER MATERIALIZED VIEW ALL IN TABLESPACE old_tablespace OWNED BY role_name, role_name_2 SET TABLESPACE new_tablespace NOWAIT; sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_materialized_view.yml000066400000000000000000000345521503426445100272630ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3827e0a24b512773b329bff3e882cb66d93c1d21315b2c99482da5948691f869 file: - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - alter_materialized_view_action_segment: - keyword: ALTER - column_reference: naked_identifier: column_name - keyword: SET - keyword: STATISTICS - numeric_literal: '1' - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - alter_materialized_view_action_segment: - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: column_name - keyword: SET - keyword: STATISTICS - numeric_literal: '1' - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - alter_materialized_view_action_segment: - keyword: ALTER - column_reference: naked_identifier: column_name - keyword: SET - bracketed: start_bracket: ( parameter: attribute_option comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - alter_materialized_view_action_segment: - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: column_name - keyword: SET - bracketed: - start_bracket: ( - parameter: attribute_option1 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'avalue'" - comma: ',' - parameter: attribute_option2 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'avalue'" - end_bracket: ) - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - alter_materialized_view_action_segment: - keyword: ALTER - column_reference: naked_identifier: column_name - keyword: RESET - bracketed: start_bracket: ( parameter: attribute_option end_bracket: ) - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - alter_materialized_view_action_segment: - keyword: ALTER - column_reference: naked_identifier: column_name - keyword: RESET - bracketed: - start_bracket: ( - parameter: attribute_option - comma: ',' - parameter: attribute_option2 - end_bracket: ) - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - alter_materialized_view_action_segment: - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: column_name - keyword: RESET - bracketed: start_bracket: ( parameter: attribute_option end_bracket: ) - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - alter_materialized_view_action_segment: - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: column_name - keyword: RESET - bracketed: - start_bracket: ( - parameter: attribute_option - comma: ',' - parameter: attribute_option2 - end_bracket: ) - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - alter_materialized_view_action_segment: - keyword: ALTER - column_reference: naked_identifier: column_name - keyword: SET - keyword: STORAGE - keyword: PLAIN - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - alter_materialized_view_action_segment: - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: column_name - keyword: SET - keyword: STORAGE - keyword: EXTENDED - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - alter_materialized_view_action_segment: - keyword: CLUSTER - keyword: 'ON' - parameter: index_name - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - alter_materialized_view_action_segment: - keyword: SET - keyword: WITHOUT - keyword: CLUSTER - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - alter_materialized_view_action_segment: keyword: SET bracketed: start_bracket: ( parameter: storage_parameter end_bracket: ) - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - alter_materialized_view_action_segment: keyword: SET bracketed: - start_bracket: ( - parameter: storage_parameter - comma: ',' - parameter: storage_parameter - end_bracket: ) - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - alter_materialized_view_action_segment: keyword: SET bracketed: - start_bracket: ( - parameter: storage_parameter - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'some_value'" - comma: ',' - parameter: storage_parameter - end_bracket: ) - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - alter_materialized_view_action_segment: keyword: RESET bracketed: start_bracket: ( parameter: storage_parameter end_bracket: ) - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - alter_materialized_view_action_segment: keyword: RESET bracketed: - start_bracket: ( - parameter: storage_parameter - comma: ',' - parameter: storage_parameter - end_bracket: ) - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - alter_materialized_view_action_segment: - keyword: OWNER - keyword: TO - object_reference: naked_identifier: baz_role - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - alter_materialized_view_action_segment: - keyword: OWNER - keyword: TO - object_reference: quoted_identifier: '"baz-role"' - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - keyword: DEPENDS - keyword: 'ON' - keyword: EXTENSION - extension_reference: naked_identifier: baz - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - keyword: 'NO' - keyword: DEPENDS - keyword: 'ON' - keyword: EXTENSION - extension_reference: naked_identifier: baz - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - keyword: RENAME - column_reference: naked_identifier: column_name - keyword: TO - column_reference: naked_identifier: new_column_name - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: bar - keyword: RENAME - keyword: COLUMN - column_reference: naked_identifier: column_name - keyword: TO - column_reference: naked_identifier: new_column_name - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - keyword: RENAME - keyword: TO - table_reference: naked_identifier: baz - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: bar - keyword: RENAME - keyword: TO - table_reference: naked_identifier: baz - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - keyword: SET - keyword: SCHEMA - schema_reference: naked_identifier: new_schema - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: bar - keyword: SET - keyword: SCHEMA - schema_reference: naked_identifier: new_schema - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - keyword: ALL - keyword: IN - keyword: TABLESPACE - tablespace_reference: naked_identifier: old_tablespace - keyword: SET - keyword: TABLESPACE - tablespace_reference: naked_identifier: new_tablespace - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - keyword: ALL - keyword: IN - keyword: TABLESPACE - tablespace_reference: naked_identifier: old_tablespace - keyword: SET - keyword: TABLESPACE - tablespace_reference: naked_identifier: new_tablespace - keyword: NOWAIT - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - keyword: ALL - keyword: IN - keyword: TABLESPACE - tablespace_reference: naked_identifier: old_tablespace - keyword: OWNED - keyword: BY - object_reference: naked_identifier: role_name - keyword: SET - keyword: TABLESPACE - tablespace_reference: naked_identifier: new_tablespace - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - keyword: ALL - keyword: IN - keyword: TABLESPACE - tablespace_reference: naked_identifier: old_tablespace - keyword: OWNED - keyword: BY - object_reference: naked_identifier: role_name - keyword: SET - keyword: TABLESPACE - tablespace_reference: naked_identifier: new_tablespace - keyword: NOWAIT - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - keyword: ALL - keyword: IN - keyword: TABLESPACE - tablespace_reference: naked_identifier: old_tablespace - keyword: OWNED - keyword: BY - object_reference: naked_identifier: role_name - comma: ',' - object_reference: naked_identifier: role_name_2 - keyword: SET - keyword: TABLESPACE - tablespace_reference: naked_identifier: new_tablespace - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - keyword: ALL - keyword: IN - keyword: TABLESPACE - tablespace_reference: naked_identifier: old_tablespace - keyword: OWNED - keyword: BY - object_reference: naked_identifier: role_name - comma: ',' - object_reference: naked_identifier: role_name_2 - keyword: SET - keyword: TABLESPACE - tablespace_reference: naked_identifier: new_tablespace - keyword: NOWAIT - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_policy.sql000066400000000000000000000013761503426445100250520ustar00rootroot00000000000000ALTER POLICY account_managers ON accounts RENAME TO account_users; ALTER POLICY account_managers ON accounts TO current_user; ALTER POLICY account_managers ON accounts TO public, session_user; ALTER POLICY account_managers ON accounts WITH CHECK ( NOT accounts_is_excluded_full_name(full_name) ); ALTER POLICY account_managers ON accounts WITH CHECK ( col > 10 ); ALTER POLICY account_managers ON accounts USING (username = current_user); ALTER POLICY sales_rep_is_self ON invoices WITH CHECK (sales_rep = CURRENT_USER AND CURRENT_USER IN ( SELECT user_id FROM allowed_users )); ALTER POLICY test_policy ON test_table TO public, session_user USING (username = current_user) WITH CHECK (col > 10); sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_policy.yml000066400000000000000000000125121503426445100250460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e78af7a252b3bef96469b1a43141d16eeccc9304424d52eb076f718ed1a814cd file: - statement: alter_policy_statement: - keyword: ALTER - keyword: POLICY - object_reference: naked_identifier: account_managers - keyword: 'ON' - table_reference: naked_identifier: accounts - keyword: RENAME - keyword: TO - object_reference: naked_identifier: account_users - statement_terminator: ; - statement: alter_policy_statement: - keyword: ALTER - keyword: POLICY - object_reference: naked_identifier: account_managers - keyword: 'ON' - table_reference: naked_identifier: accounts - keyword: TO - role_reference: naked_identifier: current_user - statement_terminator: ; - statement: alter_policy_statement: - keyword: ALTER - keyword: POLICY - object_reference: naked_identifier: account_managers - keyword: 'ON' - table_reference: naked_identifier: accounts - keyword: TO - role_reference: naked_identifier: public - comma: ',' - keyword: session_user - statement_terminator: ; - statement: alter_policy_statement: - keyword: ALTER - keyword: POLICY - object_reference: naked_identifier: account_managers - keyword: 'ON' - table_reference: naked_identifier: accounts - keyword: WITH - keyword: CHECK - bracketed: start_bracket: ( expression: keyword: NOT function: function_name: function_name_identifier: accounts_is_excluded_full_name function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: full_name end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: alter_policy_statement: - keyword: ALTER - keyword: POLICY - object_reference: naked_identifier: account_managers - keyword: 'ON' - table_reference: naked_identifier: accounts - keyword: WITH - keyword: CHECK - bracketed: start_bracket: ( expression: column_reference: naked_identifier: col comparison_operator: raw_comparison_operator: '>' numeric_literal: '10' end_bracket: ) - statement_terminator: ; - statement: alter_policy_statement: - keyword: ALTER - keyword: POLICY - object_reference: naked_identifier: account_managers - keyword: 'ON' - table_reference: naked_identifier: accounts - keyword: USING - bracketed: start_bracket: ( expression: column_reference: naked_identifier: username comparison_operator: raw_comparison_operator: '=' bare_function: current_user end_bracket: ) - statement_terminator: ; - statement: alter_policy_statement: - keyword: ALTER - keyword: POLICY - object_reference: naked_identifier: sales_rep_is_self - keyword: 'ON' - table_reference: naked_identifier: invoices - keyword: WITH - keyword: CHECK - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: sales_rep - comparison_operator: raw_comparison_operator: '=' - bare_function: CURRENT_USER - binary_operator: AND - bare_function: CURRENT_USER - keyword: IN - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: user_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: allowed_users end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: alter_policy_statement: - keyword: ALTER - keyword: POLICY - object_reference: naked_identifier: test_policy - keyword: 'ON' - table_reference: naked_identifier: test_table - keyword: TO - role_reference: naked_identifier: public - comma: ',' - keyword: session_user - keyword: USING - bracketed: start_bracket: ( expression: column_reference: naked_identifier: username comparison_operator: raw_comparison_operator: '=' bare_function: current_user end_bracket: ) - keyword: WITH - keyword: CHECK - bracketed: start_bracket: ( expression: column_reference: naked_identifier: col comparison_operator: raw_comparison_operator: '>' numeric_literal: '10' end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_procedure.sql000066400000000000000000000007261503426445100255410ustar00rootroot00000000000000ALTER PROCEDURE insert_data(integer, integer) RENAME TO insert_record; ALTER PROCEDURE insert_data(integer, integer) OWNER TO joe; ALTER PROCEDURE insert_data(integer, integer) OWNER TO CURRENT_USER; ALTER PROCEDURE insert_data(integer, integer) SET SCHEMA accounting; ALTER PROCEDURE insert_data(integer, integer) DEPENDS ON EXTENSION myext; ALTER PROCEDURE check_password(text) SET search_path = admin, pg_temp; ALTER PROCEDURE check_password(text) RESET search_path; sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_procedure.yml000066400000000000000000000073661503426445100255520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1778fedc288d89d634477c6fc5d8e8f07e816b42b47d6a2ce491e73b64386e16 file: - statement: alter_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - function_name: function_name_identifier: insert_data - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: integer - comma: ',' - data_type: keyword: integer - end_bracket: ) - keyword: RENAME - keyword: TO - function_name: function_name_identifier: insert_record - statement_terminator: ; - statement: alter_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - function_name: function_name_identifier: insert_data - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: integer - comma: ',' - data_type: keyword: integer - end_bracket: ) - keyword: OWNER - keyword: TO - parameter: joe - statement_terminator: ; - statement: alter_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - function_name: function_name_identifier: insert_data - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: integer - comma: ',' - data_type: keyword: integer - end_bracket: ) - keyword: OWNER - keyword: TO - parameter: CURRENT_USER - statement_terminator: ; - statement: alter_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - function_name: function_name_identifier: insert_data - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: integer - comma: ',' - data_type: keyword: integer - end_bracket: ) - keyword: SET - keyword: SCHEMA - schema_reference: naked_identifier: accounting - statement_terminator: ; - statement: alter_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - function_name: function_name_identifier: insert_data - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: integer - comma: ',' - data_type: keyword: integer - end_bracket: ) - keyword: DEPENDS - keyword: 'ON' - keyword: EXTENSION - extension_reference: naked_identifier: myext - statement_terminator: ; - statement: alter_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - function_name: function_name_identifier: check_password - function_parameter_list: bracketed: start_bracket: ( data_type: keyword: text end_bracket: ) - keyword: SET - parameter: search_path - comparison_operator: raw_comparison_operator: '=' - parameter: admin - comma: ',' - parameter: pg_temp - statement_terminator: ; - statement: alter_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - function_name: function_name_identifier: check_password - function_parameter_list: bracketed: start_bracket: ( data_type: keyword: text end_bracket: ) - alter_procedure_action_segment: keyword: RESET parameter: search_path - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_publication.sql000066400000000000000000000021621503426445100260560ustar00rootroot00000000000000-- More thorough testing of the PublicationObjectsSegment is in postgres_create_publication.sql. ALTER PUBLICATION abc ADD TABLE def; ALTER PUBLICATION abc ADD TABLE def, TABLE ghi; ALTER PUBLICATION abc ADD TABLE def, ghi*, ONLY jkl, ONLY (mno); ALTER PUBLICATION abc SET TABLE def, ghi, TABLES IN SCHEMA y, z, CURRENT_SCHEMA; ALTER PUBLICATION abc SET (publish = 'insert,update', publish_via_partition_root = TRUE); ALTER PUBLICATION abc OWNER TO bob; ALTER PUBLICATION abc OWNER TO CURRENT_ROLE; ALTER PUBLICATION abc OWNER TO CURRENT_USER; ALTER PUBLICATION abc OWNER TO SESSION_USER; ALTER PUBLICATION abc RENAME TO def; -- examples from https://www.postgresql.org/docs/15/sql-alterpublication.html ALTER PUBLICATION noinsert SET (publish = 'update, delete'); ALTER PUBLICATION mypublication ADD TABLE users (user_id, firstname), departments; ALTER PUBLICATION mypublication SET TABLE users (user_id, firstname, lastname), TABLE departments; ALTER PUBLICATION sales_publication ADD TABLES IN SCHEMA marketing, sales; ALTER PUBLICATION production_publication ADD TABLE users, departments, TABLES IN SCHEMA production; sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_publication.yml000066400000000000000000000174061503426445100260670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3090d22ed8d674194f74900d99e51ec931338de29f73450737f52cc62ff7c088 file: - statement: alter_publication_statement: - keyword: ALTER - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: ADD - publication_objects: keyword: TABLE publication_table: table_reference: naked_identifier: def - statement_terminator: ; - statement: alter_publication_statement: - keyword: ALTER - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: ADD - publication_objects: keyword: TABLE publication_table: table_reference: naked_identifier: def - comma: ',' - publication_objects: keyword: TABLE publication_table: table_reference: naked_identifier: ghi - statement_terminator: ; - statement: alter_publication_statement: - keyword: ALTER - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: ADD - publication_objects: - keyword: TABLE - publication_table: table_reference: naked_identifier: def - comma: ',' - publication_table: table_reference: naked_identifier: ghi star: '*' - comma: ',' - publication_table: keyword: ONLY table_reference: naked_identifier: jkl - comma: ',' - publication_table: keyword: ONLY bracketed: start_bracket: ( table_reference: naked_identifier: mno end_bracket: ) - statement_terminator: ; - statement: alter_publication_statement: - keyword: ALTER - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: SET - publication_objects: - keyword: TABLE - publication_table: table_reference: naked_identifier: def - comma: ',' - publication_table: table_reference: naked_identifier: ghi - comma: ',' - publication_objects: - keyword: TABLES - keyword: IN - keyword: SCHEMA - schema_reference: naked_identifier: y - comma: ',' - schema_reference: naked_identifier: z - comma: ',' - keyword: CURRENT_SCHEMA - statement_terminator: ; - statement: alter_publication_statement: - keyword: ALTER - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: SET - definition_parameters: bracketed: - start_bracket: ( - definition_parameter: properties_naked_identifier: publish comparison_operator: raw_comparison_operator: '=' quoted_literal: "'insert,update'" - comma: ',' - definition_parameter: properties_naked_identifier: publish_via_partition_root comparison_operator: raw_comparison_operator: '=' boolean_literal: 'TRUE' - end_bracket: ) - statement_terminator: ; - statement: alter_publication_statement: - keyword: ALTER - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: OWNER - keyword: TO - role_reference: naked_identifier: bob - statement_terminator: ; - statement: alter_publication_statement: - keyword: ALTER - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: OWNER - keyword: TO - keyword: CURRENT_ROLE - statement_terminator: ; - statement: alter_publication_statement: - keyword: ALTER - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: OWNER - keyword: TO - keyword: CURRENT_USER - statement_terminator: ; - statement: alter_publication_statement: - keyword: ALTER - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: OWNER - keyword: TO - keyword: SESSION_USER - statement_terminator: ; - statement: alter_publication_statement: - keyword: ALTER - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: RENAME - keyword: TO - publication_reference: naked_identifier: def - statement_terminator: ; - statement: alter_publication_statement: - keyword: ALTER - keyword: PUBLICATION - publication_reference: naked_identifier: noinsert - keyword: SET - definition_parameters: bracketed: start_bracket: ( definition_parameter: properties_naked_identifier: publish comparison_operator: raw_comparison_operator: '=' quoted_literal: "'update, delete'" end_bracket: ) - statement_terminator: ; - statement: alter_publication_statement: - keyword: ALTER - keyword: PUBLICATION - publication_reference: naked_identifier: mypublication - keyword: ADD - publication_objects: - keyword: TABLE - publication_table: table_reference: naked_identifier: users bracketed: - start_bracket: ( - column_reference: naked_identifier: user_id - comma: ',' - column_reference: naked_identifier: firstname - end_bracket: ) - comma: ',' - publication_table: table_reference: naked_identifier: departments - statement_terminator: ; - statement: alter_publication_statement: - keyword: ALTER - keyword: PUBLICATION - publication_reference: naked_identifier: mypublication - keyword: SET - publication_objects: keyword: TABLE publication_table: table_reference: naked_identifier: users bracketed: - start_bracket: ( - column_reference: naked_identifier: user_id - comma: ',' - column_reference: naked_identifier: firstname - comma: ',' - column_reference: naked_identifier: lastname - end_bracket: ) - comma: ',' - publication_objects: keyword: TABLE publication_table: table_reference: naked_identifier: departments - statement_terminator: ; - statement: alter_publication_statement: - keyword: ALTER - keyword: PUBLICATION - publication_reference: naked_identifier: sales_publication - keyword: ADD - publication_objects: - keyword: TABLES - keyword: IN - keyword: SCHEMA - schema_reference: naked_identifier: marketing - comma: ',' - schema_reference: naked_identifier: sales - statement_terminator: ; - statement: alter_publication_statement: - keyword: ALTER - keyword: PUBLICATION - publication_reference: naked_identifier: production_publication - keyword: ADD - publication_objects: - keyword: TABLE - publication_table: table_reference: naked_identifier: users - comma: ',' - publication_table: table_reference: naked_identifier: departments - comma: ',' - publication_objects: - keyword: TABLES - keyword: IN - keyword: SCHEMA - schema_reference: naked_identifier: production - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_role.sql000066400000000000000000000037371503426445100245170ustar00rootroot00000000000000ALTER ROLE davide WITH PASSWORD 'hu8jmn3'; ALTER ROLE davide WITH PASSWORD NULL; ALTER ROLE chris VALID UNTIL 'May 4 12:00:00 2015 +1'; ALTER ROLE fred VALID UNTIL 'infinity'; ALTER ROLE worker_bee SET maintenance_work_mem = '100000'; ALTER ROLE fred IN DATABASE devel SET client_min_messages TO DEFAULT; ALTER ROLE miriam CREATEROLE CREATEDB; ALTER USER davide WITH PASSWORD 'hu8jmn3'; ALTER USER davide WITH PASSWORD NULL; ALTER USER chris VALID UNTIL 'May 4 12:00:00 2015 +1'; ALTER USER fred VALID UNTIL 'infinity'; ALTER USER worker_bee SET maintenance_work_mem = '100000'; ALTER USER fred IN DATABASE devel SET client_min_messages TO DEFAULT; ALTER USER miriam CREATEROLE CREATEDB; -- more SET tests: ALTER ROLE fred SET testing FROM CURRENT; ALTER ROLE fred IN DATABASE devel SET testing FROM CURRENT; ALTER ROLE fred IN DATABASE devel SET testing TO 1234; ALTER ROLE fred IN DATABASE devel SET testing = 1234; ALTER ROLE fred IN DATABASE devel SET testing TO DEFAULT; ALTER ROLE fred IN DATABASE devel SET testing = DEFAULT; ALTER ROLE fred IN DATABASE devel SET testing = TRUE; ALTER ROLE fred IN DATABASE devel SET testing = FALSE; ALTER ROLE fred IN DATABASE devel SET testing = 'string value'; ALTER ROLE fred IN DATABASE devel SET testing = on, off, auto; ALTER ROLE fred RESET ALL; ALTER ROLE fred RESET testing; ALTER ROLE fred IN DATABASE devel RESET ALL; ALTER ROLE fred IN DATABASE devel RESET testing; -- CURRENT_ROLE/CURRENT_USER/SESSION_USER and ALL ALTER USER CURRENT_ROLE WITH PASSWORD NULL; ALTER USER CURRENT_USER WITH PASSWORD NULL; ALTER USER SESSION_USER WITH PASSWORD NULL; ALTER ROLE CURRENT_ROLE IN DATABASE devel SET testing FROM CURRENT; ALTER ROLE CURRENT_ROLE IN DATABASE devel SET testing TO 1234; ALTER ROLE CURRENT_ROLE IN DATABASE devel SET testing = 1234; ALTER ROLE CURRENT_ROLE IN DATABASE devel SET testing TO DEFAULT; ALTER ROLE ALL RESET ALL; ALTER ROLE ALL RESET testing; ALTER USER ALL IN DATABASE devel RESET ALL; ALTER ROLE ALL IN DATABASE devel RESET testing; sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_role.yml000066400000000000000000000266161503426445100245220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c42b380e90f54eebc0856cf39254d7d6c3326462faa6e7a9bc7c9552c6da4dff file: - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: davide - keyword: WITH - keyword: PASSWORD - quoted_literal: "'hu8jmn3'" - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: davide - keyword: WITH - keyword: PASSWORD - keyword: 'NULL' - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: chris - keyword: VALID - keyword: UNTIL - quoted_literal: "'May 4 12:00:00 2015 +1'" - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: fred - keyword: VALID - keyword: UNTIL - quoted_literal: "'infinity'" - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: worker_bee - keyword: SET - parameter: maintenance_work_mem - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'100000'" - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: fred - keyword: IN - keyword: DATABASE - database_reference: naked_identifier: devel - keyword: SET - parameter: client_min_messages - keyword: TO - keyword: DEFAULT - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: miriam - keyword: CREATEROLE - keyword: CREATEDB - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: davide - keyword: WITH - keyword: PASSWORD - quoted_literal: "'hu8jmn3'" - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: davide - keyword: WITH - keyword: PASSWORD - keyword: 'NULL' - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: chris - keyword: VALID - keyword: UNTIL - quoted_literal: "'May 4 12:00:00 2015 +1'" - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: fred - keyword: VALID - keyword: UNTIL - quoted_literal: "'infinity'" - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: worker_bee - keyword: SET - parameter: maintenance_work_mem - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'100000'" - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: fred - keyword: IN - keyword: DATABASE - database_reference: naked_identifier: devel - keyword: SET - parameter: client_min_messages - keyword: TO - keyword: DEFAULT - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: miriam - keyword: CREATEROLE - keyword: CREATEDB - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: fred - keyword: SET - parameter: testing - keyword: FROM - keyword: CURRENT - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: fred - keyword: IN - keyword: DATABASE - database_reference: naked_identifier: devel - keyword: SET - parameter: testing - keyword: FROM - keyword: CURRENT - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: fred - keyword: IN - keyword: DATABASE - database_reference: naked_identifier: devel - keyword: SET - parameter: testing - keyword: TO - numeric_literal: '1234' - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: fred - keyword: IN - keyword: DATABASE - database_reference: naked_identifier: devel - keyword: SET - parameter: testing - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1234' - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: fred - keyword: IN - keyword: DATABASE - database_reference: naked_identifier: devel - keyword: SET - parameter: testing - keyword: TO - keyword: DEFAULT - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: fred - keyword: IN - keyword: DATABASE - database_reference: naked_identifier: devel - keyword: SET - parameter: testing - comparison_operator: raw_comparison_operator: '=' - keyword: DEFAULT - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: fred - keyword: IN - keyword: DATABASE - database_reference: naked_identifier: devel - keyword: SET - parameter: testing - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: fred - keyword: IN - keyword: DATABASE - database_reference: naked_identifier: devel - keyword: SET - parameter: testing - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: fred - keyword: IN - keyword: DATABASE - database_reference: naked_identifier: devel - keyword: SET - parameter: testing - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'string value'" - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: fred - keyword: IN - keyword: DATABASE - database_reference: naked_identifier: devel - keyword: SET - parameter: testing - comparison_operator: raw_comparison_operator: '=' - naked_identifier: 'on' - comma: ',' - naked_identifier: 'off' - comma: ',' - naked_identifier: auto - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: fred - keyword: RESET - parameter: ALL - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: fred - keyword: RESET - parameter: testing - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: fred - keyword: IN - keyword: DATABASE - database_reference: naked_identifier: devel - keyword: RESET - parameter: ALL - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: fred - keyword: IN - keyword: DATABASE - database_reference: naked_identifier: devel - keyword: RESET - parameter: testing - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: USER - keyword: CURRENT_ROLE - keyword: WITH - keyword: PASSWORD - keyword: 'NULL' - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: USER - keyword: CURRENT_USER - keyword: WITH - keyword: PASSWORD - keyword: 'NULL' - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: USER - keyword: SESSION_USER - keyword: WITH - keyword: PASSWORD - keyword: 'NULL' - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - keyword: CURRENT_ROLE - keyword: IN - keyword: DATABASE - database_reference: naked_identifier: devel - keyword: SET - parameter: testing - keyword: FROM - keyword: CURRENT - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - keyword: CURRENT_ROLE - keyword: IN - keyword: DATABASE - database_reference: naked_identifier: devel - keyword: SET - parameter: testing - keyword: TO - numeric_literal: '1234' - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - keyword: CURRENT_ROLE - keyword: IN - keyword: DATABASE - database_reference: naked_identifier: devel - keyword: SET - parameter: testing - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1234' - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - keyword: CURRENT_ROLE - keyword: IN - keyword: DATABASE - database_reference: naked_identifier: devel - keyword: SET - parameter: testing - keyword: TO - keyword: DEFAULT - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - keyword: ALL - keyword: RESET - parameter: ALL - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - keyword: ALL - keyword: RESET - parameter: testing - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: USER - keyword: ALL - keyword: IN - keyword: DATABASE - database_reference: naked_identifier: devel - keyword: RESET - parameter: ALL - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - keyword: ALL - keyword: IN - keyword: DATABASE - database_reference: naked_identifier: devel - keyword: RESET - parameter: testing - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_schema.sql000066400000000000000000000001211503426445100247760ustar00rootroot00000000000000ALTER SCHEMA schema1 RENAME TO schema2; ALTER SCHEMA schema1 OWNER TO new_owner; sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_schema.yml000066400000000000000000000016351503426445100250130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cfb0c467bd784e5970002e70123a796062479878220480a29e8387b5e0babbd8 file: - statement: alter_schema_statement: - keyword: ALTER - keyword: SCHEMA - schema_reference: naked_identifier: schema1 - keyword: RENAME - keyword: TO - schema_reference: naked_identifier: schema2 - statement_terminator: ; - statement: alter_schema_statement: - keyword: ALTER - keyword: SCHEMA - schema_reference: naked_identifier: schema1 - keyword: OWNER - keyword: TO - role_reference: naked_identifier: new_owner - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_sequence.sql000066400000000000000000000011141503426445100253510ustar00rootroot00000000000000ALTER SEQUENCE IF EXISTS foo AS integer; ALTER SEQUENCE foo INCREMENT BY 4 NO MINVALUE MAXVALUE 56; ALTER SEQUENCE foo INCREMENT 3 NO MAXVALUE MINVALUE 3; ALTER SEQUENCE foo START 7 CACHE 4; ALTER SEQUENCE foo RESTART WITH 14 NO CYCLE; ALTER SEQUENCE foo OWNED BY foo.foo; ALTER SEQUENCE foo OWNED BY NONE; ALTER SEQUENCE IF EXISTS foo OWNER TO my_user; ALTER SEQUENCE foo OWNER TO CURRENT_USER; ALTER SEQUENCE foo OWNER TO SESSION_USER; ALTER SEQUENCE foo RENAME TO foo2; ALTER SEQUENCE foo SET SCHEMA my_schema; ALTER SEQUENCE foo INCREMENT BY -4 MINVALUE -100 MAXVALUE +2; sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_sequence.yml000066400000000000000000000113101503426445100253520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0d959f82af31851b39c885cc891c8ace400e3e5281dc3fea415ffea82d061c86 file: - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - keyword: IF - keyword: EXISTS - sequence_reference: naked_identifier: foo - alter_sequence_options_segment: keyword: AS data_type: keyword: integer - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - alter_sequence_options_segment: - keyword: INCREMENT - keyword: BY - numeric_literal: '4' - alter_sequence_options_segment: - keyword: 'NO' - keyword: MINVALUE - alter_sequence_options_segment: keyword: MAXVALUE numeric_literal: '56' - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - alter_sequence_options_segment: keyword: INCREMENT numeric_literal: '3' - alter_sequence_options_segment: - keyword: 'NO' - keyword: MAXVALUE - alter_sequence_options_segment: keyword: MINVALUE numeric_literal: '3' - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - alter_sequence_options_segment: keyword: START numeric_literal: '7' - alter_sequence_options_segment: keyword: CACHE numeric_literal: '4' - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - alter_sequence_options_segment: - keyword: RESTART - keyword: WITH - numeric_literal: '14' - alter_sequence_options_segment: - keyword: 'NO' - keyword: CYCLE - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - alter_sequence_options_segment: - keyword: OWNED - keyword: BY - column_reference: - naked_identifier: foo - dot: . - naked_identifier: foo - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - alter_sequence_options_segment: - keyword: OWNED - keyword: BY - keyword: NONE - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - keyword: IF - keyword: EXISTS - sequence_reference: naked_identifier: foo - keyword: OWNER - keyword: TO - parameter: my_user - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - keyword: OWNER - keyword: TO - parameter: CURRENT_USER - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - keyword: OWNER - keyword: TO - parameter: SESSION_USER - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - keyword: RENAME - keyword: TO - sequence_reference: naked_identifier: foo2 - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - keyword: SET - keyword: SCHEMA - schema_reference: naked_identifier: my_schema - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - alter_sequence_options_segment: - keyword: INCREMENT - keyword: BY - sign_indicator: '-' - numeric_literal: '4' - alter_sequence_options_segment: keyword: MINVALUE sign_indicator: '-' numeric_literal: '100' - alter_sequence_options_segment: keyword: MAXVALUE sign_indicator: + numeric_literal: '2' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_statistics.sql000066400000000000000000000002421503426445100257340ustar00rootroot00000000000000ALTER STATISTICS foo OWNER TO CURRENT_USER; ALTER STATISTICS foo RENAME TO bar; ALTER STATISTICS foo SET SCHEMA my_schema; ALTER STATISTICS foo SET STATISTICS 4; sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_statistics.yml000066400000000000000000000026361503426445100257470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d229332af88fd940c50ebd3bbf4504adc8d425609d42c4ed66e219e49eb267f1 file: - statement: alter_statistics_statement: - keyword: ALTER - keyword: STATISTICS - statistics_reference: naked_identifier: foo - keyword: OWNER - keyword: TO - parameter: CURRENT_USER - statement_terminator: ; - statement: alter_statistics_statement: - keyword: ALTER - keyword: STATISTICS - statistics_reference: naked_identifier: foo - keyword: RENAME - keyword: TO - statistics_reference: naked_identifier: bar - statement_terminator: ; - statement: alter_statistics_statement: - keyword: ALTER - keyword: STATISTICS - statistics_reference: naked_identifier: foo - keyword: SET - keyword: SCHEMA - schema_reference: naked_identifier: my_schema - statement_terminator: ; - statement: alter_statistics_statement: - keyword: ALTER - keyword: STATISTICS - statistics_reference: naked_identifier: foo - keyword: SET - keyword: STATISTICS - numeric_literal: '4' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_subscription.sql000066400000000000000000000001431503426445100262660ustar00rootroot00000000000000ALTER SUBSCRIPTION my_subscription DISABLE; ALTER SUBSCRIPTION mysub SET PUBLICATION insert_only; sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_subscription.yml000066400000000000000000000015701503426445100262750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 80f75ff91af0193f6cee4b0bd0906f7592a44d343373cac63a7e7f97d2ee55c3 file: - statement: alter_subscription: - keyword: ALTER - keyword: SUBSCRIPTION - subscription_reference: naked_identifier: my_subscription - keyword: DISABLE - statement_terminator: ; - statement: alter_subscription: - keyword: ALTER - keyword: SUBSCRIPTION - subscription_reference: naked_identifier: mysub - keyword: SET - keyword: PUBLICATION - publication_reference: naked_identifier: insert_only - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_table.sql000066400000000000000000000176031503426445100246420ustar00rootroot00000000000000ALTER TABLE distributors ADD COLUMN address varchar(30); ALTER TABLE measurements ADD COLUMN mtime timestamp with time zone DEFAULT now(); ALTER TABLE transactions ADD COLUMN status varchar(30) DEFAULT 'old', ALTER COLUMN status SET default 'current'; ALTER TABLE distributors DROP COLUMN address RESTRICT; ALTER TABLE distributors ALTER COLUMN address TYPE varchar(80), ALTER COLUMN name TYPE varchar(100); ALTER TABLE foo ALTER COLUMN foo_timestamp SET DATA TYPE timestamp with time zone USING timestamp with time zone 'epoch' + foo_timestamp * interval '1 second'; ALTER TABLE foo ALTER COLUMN foo_timestamp DROP DEFAULT, ALTER COLUMN foo_timestamp TYPE timestamp with time zone USING timestamp 'epoch' + foo_timestamp * interval '1 second', ALTER COLUMN foo_timestamp SET DEFAULT now(); ALTER TABLE mytable ALTER date_column SET DEFAULT NOW(); ALTER TABLE mytable ALTER int_column SET DEFAULT 1; ALTER TABLE mytable ALTER text_column SET DEFAULT 'value'; ALTER TABLE mytable ALTER bool_column SET DEFAULT false; ALTER TABLE mytable ALTER other_column SET DEFAULT other_value; ALTER TABLE mytable ALTER other_column SET DEFAULT CURRENT_TIMESTAMP; ALTER TABLE mytable ALTER other_column SET DEFAULT a_function(a_parameter); ALTER TABLE mytable ALTER other_column SET DEFAULT a_function('a_parameter'); ALTER TABLE mytable ALTER other_column SET DEFAULT 1 + 2 + 3; ALTER TABLE mytable ALTER other_column SET DEFAULT (1 + 2 + 3); ALTER TABLE mytable ALTER other_column DROP DEFAULT; ALTER TABLE IF EXISTS mytable ALTER date_column SET DEFAULT NOW(); ALTER TABLE IF EXISTS mytable ALTER int_column SET DEFAULT 1; ALTER TABLE IF EXISTS mytable ALTER text_column SET DEFAULT 'value'; ALTER TABLE IF EXISTS mytable ALTER bool_column SET DEFAULT false; ALTER TABLE IF EXISTS mytable ALTER other_column SET DEFAULT other_value; ALTER TABLE IF EXISTS mytable ALTER other_column SET DEFAULT CURRENT_TIMESTAMP; ALTER TABLE IF EXISTS mytable ALTER other_column SET DEFAULT a_function(a_parameter); ALTER TABLE IF EXISTS mytable ALTER other_column SET DEFAULT a_function('a_parameter'); ALTER TABLE IF EXISTS mytable ALTER other_column SET DEFAULT 1 + 2 + 3; ALTER TABLE IF EXISTS mytable ALTER other_column SET DEFAULT (1 + 2 + 3); ALTER TABLE IF EXISTS mytable ALTER other_column DROP DEFAULT; ALTER TABLE distributors RENAME COLUMN address TO city; ALTER TABLE distributors RENAME TO suppliers; ALTER TABLE distributors RENAME CONSTRAINT zipchk TO zip_check; ALTER TABLE distributors ALTER COLUMN street SET NOT NULL; ALTER TABLE distributors ALTER COLUMN street DROP NOT NULL; ALTER TABLE distributors ADD CONSTRAINT zipchk CHECK (char_length(zipcode) = 5); ALTER TABLE distributors ADD CONSTRAINT zipchk CHECK (char_length(zipcode) = 5) NO INHERIT; ALTER TABLE distributors DROP CONSTRAINT zipchk; -- constraints can optionally have their names double-quoted ALTER TABLE distributors DROP CONSTRAINT "zipchk"; ALTER TABLE ONLY distributors DROP CONSTRAINT zipchk; ALTER TABLE distributors ADD CONSTRAINT distfk FOREIGN KEY (address) REFERENCES addresses (address); ALTER TABLE distributors ADD CONSTRAINT distfk FOREIGN KEY (address) REFERENCES addresses (address) MATCH FULL; ALTER TABLE distributors ADD CONSTRAINT distfk FOREIGN KEY (address) REFERENCES addresses (address) ON DELETE RESTRICT ON UPDATE CASCADE; ALTER TABLE distributors ADD CONSTRAINT distfk FOREIGN KEY (address) REFERENCES addresses (address) NOT VALID; ALTER TABLE distributors VALIDATE CONSTRAINT distfk; ALTER TABLE distributors ADD CONSTRAINT dist_id_zipcode_key UNIQUE (dist_id, zipcode); ALTER TABLE distributors ADD PRIMARY KEY (dist_id); ALTER TABLE distributors SET TABLESPACE fasttablespace; -- Issue:2071 ALTER TABLE distributors SET (parameter_1 = 'value'); ALTER TABLE distributors SET (parameter_1 = 1); ALTER TABLE distributors SET (parameter_1 = 1, parameter_2 = 'value'); ALTER TABLE myschema.distributors SET SCHEMA yourschema; ALTER TABLE distributors DROP CONSTRAINT distributors_pkey, ADD CONSTRAINT distributors_pkey PRIMARY KEY USING INDEX dist_id_temp_idx; ALTER TABLE measurement ATTACH PARTITION measurement_y2016m07 FOR VALUES FROM ('2016-07-01') TO ('2016-08-01'); ALTER TABLE cities ATTACH PARTITION cities_ab FOR VALUES IN ('a', 'b'); ALTER TABLE orders ATTACH PARTITION orders_p4 FOR VALUES WITH (MODULUS 4, REMAINDER 3); ALTER TABLE cities ATTACH PARTITION cities_partdef DEFAULT; ALTER TABLE measurement DETACH PARTITION measurement_y2015m12; ALTER TABLE measurement DETACH PARTITION measurement_y2021m10 CONCURRENTLY FINALIZE; ALTER TABLE landing.workorderhistory ADD CONSTRAINT workorder_id_foreign_key FOREIGN KEY(workorderid) REFERENCES landing.workorder(id); ALTER TABLE my_table ADD COLUMN IF NOT EXISTS foo TEXT; ALTER TABLE public.obm_buildings OWNER to postgres; ALTER TABLE distributors ALTER COLUMN street ADD GENERATED ALWAYS AS IDENTITY (INCREMENT 4 NO MAXVALUE); ALTER TABLE distributors ALTER COLUMN street SET RESTART WITH 3; ALTER TABLE distributors ADD my_column int GENERATED BY DEFAULT AS IDENTITY (CACHE 3 MAXVALUE 63 OWNED BY NONE); ALTER TABLE public.test OWNER TO "ID"; ALTER TABLE public.test OWNER TO ID; ALTER TABLE IF EXISTS ONLY public.test OWNER TO CURRENT_ROLE; ALTER TABLE public.history ALTER COLUMN id ADD GENERATED ALWAYS AS IDENTITY ( SEQUENCE NAME public.history_id_seq ); -- Test adding columns with UNIQUE and PRIMARY KEY constraints ALTER TABLE tbl ADD COLUMN nulls_distinct text UNIQUE NULLS DISTINCT, ADD COLUMN nulls_not_distinct text UNIQUE NULLS NOT DISTINCT, ADD everything text UNIQUE NULLS DISTINCT WITH (arg1=3, arg5='str') USING INDEX TABLESPACE spc; ALTER TABLE tbl ADD pk text DEFAULT 'hello' PRIMARY KEY WITH (arg1=3, arg5='str') USING INDEX TABLESPACE tblspace NOT NULL; ALTER TABLE tbl ADD CONSTRAINT foo1 UNIQUE (fld, col), ADD CONSTRAINT foo2 UNIQUE NULLS DISTINCT (fld), ADD CONSTRAINT foo3 UNIQUE NULLS NOT DISTINCT (fld), ADD CONSTRAINT everything UNIQUE NULLS DISTINCT (fld, col) INCLUDE (two, three) WITH (arg1=3, arg5='str') USING INDEX TABLESPACE tblspc, ADD CONSTRAINT pk PRIMARY KEY (fld, col) INCLUDE (four) WITH (ff=auto, gg=stuff) USING INDEX TABLESPACE tblspc; -- Test SET/RESET actions on both table and column ALTER TABLE foo SET (opt1, opt2=5, opt3='str', ns.opt4, ns.opt5=6, ns.opt6='str', opt7=ASC); ALTER TABLE foo RESET (opt1, opt2=5, opt3='str', ns.opt4, ns.opt5=6, ns.opt6='str', opt7=ASC); ALTER TABLE foo ALTER COLUMN baz SET (opt1, opt2=5, opt3='str', ns.opt4, ns.opt5=6, ns.opt6='str', opt7=ASC); ALTER TABLE foo ALTER COLUMN baz RESET (opt1, opt2=5, opt3='str', ns.opt4, ns.opt5=6, ns.opt6='str', opt7=ASC); -- Test out EXCLUDE constraints, as well as other more advanced index parameters on constraints -- from https://www.postgresql.org/docs/15/rangetypes.html: basic usage (adapted for ALTER TABLE) ALTER TABLE reservation ADD EXCLUDE USING gist (during WITH &&); ALTER TABLE room_reservation ADD CONSTRAINT cons EXCLUDE USING gist (room WITH =, during WITH &&); -- all the gnarly options: not every option is valid, but this will parse successfully on PG 15. ALTER TABLE no_using ADD EXCLUDE (field WITH =) NOT DEFERRABLE INITIALLY IMMEDIATE NO INHERIT; ALTER TABLE many_options ADD EXCLUDE USING gist ( one WITH =, nulls_opclass nulls WITH =, nulls_last NULLS LAST WITH =, two COLLATE "en-US" opclass (opt1, opt2=5, opt3='str', ns.opt4, ns.opt5=6, ns.opt6='str', opt7=ASC) ASC NULLS FIRST WITH =, (two + 5) WITH =, myfunc(a, b) WITH =, myfunc_opclass(a, b) fop (opt=1, foo=2) WITH =, only_opclass opclass WITH =, desc_order DESC WITH = ) INCLUDE (a, b) WITH (idx_num = 5, idx_str = 'idx_value', idx_kw=DESC) USING INDEX TABLESPACE tblspc WHERE (field != 'def') DEFERRABLE NOT VALID INITIALLY DEFERRED; sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_table.yml000066400000000000000000001762261503426445100246530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 098a51ee4994b04f7bf36fb971f45cc33acd908bbf261ef50faca80d0199026d file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: address - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '30' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: measurements - alter_table_action_segment: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: mtime - data_type: datetime_type_identifier: - keyword: timestamp - keyword: with - keyword: time - keyword: zone - column_constraint_segment: keyword: DEFAULT function: function_name: function_name_identifier: now function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: transactions - alter_table_action_segment: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: status - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '30' end_bracket: ) - column_constraint_segment: keyword: DEFAULT quoted_literal: "'old'" - comma: ',' - alter_table_action_segment: - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: status - keyword: SET - keyword: default - quoted_literal: "'current'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: - keyword: DROP - keyword: COLUMN - column_reference: naked_identifier: address - keyword: RESTRICT - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: address - keyword: TYPE - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '80' end_bracket: ) - comma: ',' - alter_table_action_segment: - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: name - keyword: TYPE - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '100' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: foo - alter_table_action_segment: - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: foo_timestamp - keyword: SET - keyword: DATA - keyword: TYPE - data_type: datetime_type_identifier: - keyword: timestamp - keyword: with - keyword: time - keyword: zone - keyword: USING - expression: - datetime_literal: datetime_type_identifier: - keyword: timestamp - keyword: with - keyword: time - keyword: zone quoted_literal: "'epoch'" - binary_operator: + - column_reference: naked_identifier: foo_timestamp - binary_operator: '*' - datetime_literal: datetime_type_identifier: keyword: interval quoted_literal: "'1 second'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: foo - alter_table_action_segment: - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: foo_timestamp - keyword: DROP - keyword: DEFAULT - comma: ',' - alter_table_action_segment: - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: foo_timestamp - keyword: TYPE - data_type: datetime_type_identifier: - keyword: timestamp - keyword: with - keyword: time - keyword: zone - keyword: USING - expression: - datetime_literal: datetime_type_identifier: keyword: timestamp quoted_literal: "'epoch'" - binary_operator: + - column_reference: naked_identifier: foo_timestamp - binary_operator: '*' - datetime_literal: datetime_type_identifier: keyword: interval quoted_literal: "'1 second'" - comma: ',' - alter_table_action_segment: - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: foo_timestamp - keyword: SET - keyword: DEFAULT - function: function_name: function_name_identifier: now function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: date_column - keyword: SET - keyword: DEFAULT - function: function_name: function_name_identifier: NOW function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: int_column - keyword: SET - keyword: DEFAULT - numeric_literal: '1' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: text_column - keyword: SET - keyword: DEFAULT - quoted_literal: "'value'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: bool_column - keyword: SET - keyword: DEFAULT - boolean_literal: 'false' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: SET - keyword: DEFAULT - expression: column_reference: naked_identifier: other_value - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: SET - keyword: DEFAULT - bare_function: CURRENT_TIMESTAMP - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: SET - keyword: DEFAULT - function: function_name: function_name_identifier: a_function function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: a_parameter end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: SET - keyword: DEFAULT - function: function_name: function_name_identifier: a_function function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'a_parameter'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: SET - keyword: DEFAULT - expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - binary_operator: + - numeric_literal: '3' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: SET - keyword: DEFAULT - expression: bracketed: start_bracket: ( expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - binary_operator: + - numeric_literal: '3' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: DROP - keyword: DEFAULT - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: date_column - keyword: SET - keyword: DEFAULT - function: function_name: function_name_identifier: NOW function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: int_column - keyword: SET - keyword: DEFAULT - numeric_literal: '1' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: text_column - keyword: SET - keyword: DEFAULT - quoted_literal: "'value'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: bool_column - keyword: SET - keyword: DEFAULT - boolean_literal: 'false' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: SET - keyword: DEFAULT - expression: column_reference: naked_identifier: other_value - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: SET - keyword: DEFAULT - bare_function: CURRENT_TIMESTAMP - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: SET - keyword: DEFAULT - function: function_name: function_name_identifier: a_function function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: a_parameter end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: SET - keyword: DEFAULT - function: function_name: function_name_identifier: a_function function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'a_parameter'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: SET - keyword: DEFAULT - expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - binary_operator: + - numeric_literal: '3' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: SET - keyword: DEFAULT - expression: bracketed: start_bracket: ( expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - binary_operator: + - numeric_literal: '3' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: DROP - keyword: DEFAULT - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - keyword: RENAME - keyword: COLUMN - column_reference: naked_identifier: address - keyword: TO - column_reference: naked_identifier: city - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - keyword: RENAME - keyword: TO - table_reference: naked_identifier: suppliers - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - keyword: RENAME - keyword: CONSTRAINT - parameter: zipchk - keyword: TO - parameter: zip_check - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: street - keyword: SET - keyword: NOT - keyword: 'NULL' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: street - keyword: DROP - keyword: NOT - keyword: 'NULL' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: keyword: ADD table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: zipchk - keyword: CHECK - bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: char_length function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: zipcode end_bracket: ) comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: keyword: ADD table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: zipchk - keyword: CHECK - bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: char_length function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: zipcode end_bracket: ) comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' end_bracket: ) - keyword: 'NO' - keyword: INHERIT - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: - keyword: DROP - keyword: CONSTRAINT - parameter: zipchk - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: - keyword: DROP - keyword: CONSTRAINT - parameter: '"zipchk"' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - keyword: ONLY - table_reference: naked_identifier: distributors - alter_table_action_segment: - keyword: DROP - keyword: CONSTRAINT - parameter: zipchk - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: keyword: ADD table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: distfk - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: address end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: addresses - bracketed: start_bracket: ( column_reference: naked_identifier: address end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: keyword: ADD table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: distfk - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: address end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: addresses - bracketed: start_bracket: ( column_reference: naked_identifier: address end_bracket: ) - keyword: MATCH - keyword: FULL - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: keyword: ADD table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: distfk - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: address end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: addresses - bracketed: start_bracket: ( column_reference: naked_identifier: address end_bracket: ) - keyword: 'ON' - keyword: DELETE - keyword: RESTRICT - keyword: 'ON' - keyword: UPDATE - keyword: CASCADE - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: keyword: ADD table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: distfk - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: address end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: addresses - bracketed: start_bracket: ( column_reference: naked_identifier: address end_bracket: ) - keyword: NOT - keyword: VALID - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: - keyword: VALIDATE - keyword: CONSTRAINT - parameter: distfk - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: keyword: ADD table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: dist_id_zipcode_key - keyword: UNIQUE - bracketed: - start_bracket: ( - column_reference: naked_identifier: dist_id - comma: ',' - column_reference: naked_identifier: zipcode - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: keyword: ADD table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: dist_id end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: - keyword: SET - keyword: TABLESPACE - tablespace_reference: naked_identifier: fasttablespace - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: keyword: SET relation_options: bracketed: start_bracket: ( relation_option: properties_naked_identifier: parameter_1 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'value'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: keyword: SET relation_options: bracketed: start_bracket: ( relation_option: properties_naked_identifier: parameter_1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: keyword: SET relation_options: bracketed: - start_bracket: ( - relation_option: properties_naked_identifier: parameter_1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - comma: ',' - relation_option: properties_naked_identifier: parameter_2 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'value'" - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: myschema - dot: . - naked_identifier: distributors - keyword: SET - keyword: SCHEMA - schema_reference: naked_identifier: yourschema - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: - keyword: DROP - keyword: CONSTRAINT - parameter: distributors_pkey - comma: ',' - alter_table_action_segment: keyword: ADD table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: distributors_pkey - keyword: PRIMARY - keyword: KEY - keyword: USING - keyword: INDEX - index_reference: naked_identifier: dist_id_temp_idx - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: measurement - keyword: ATTACH - keyword: PARTITION - parameter: measurement_y2016m07 - keyword: FOR - keyword: VALUES - partition_bound_spec: - keyword: FROM - bracketed: start_bracket: ( expression: quoted_literal: "'2016-07-01'" end_bracket: ) - keyword: TO - bracketed: start_bracket: ( expression: quoted_literal: "'2016-08-01'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: cities - keyword: ATTACH - keyword: PARTITION - parameter: cities_ab - keyword: FOR - keyword: VALUES - partition_bound_spec: keyword: IN bracketed: - start_bracket: ( - expression: quoted_literal: "'a'" - comma: ',' - expression: quoted_literal: "'b'" - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: orders - keyword: ATTACH - keyword: PARTITION - parameter: orders_p4 - keyword: FOR - keyword: VALUES - partition_bound_spec: keyword: WITH bracketed: - start_bracket: ( - keyword: MODULUS - numeric_literal: '4' - comma: ',' - keyword: REMAINDER - numeric_literal: '3' - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: cities - keyword: ATTACH - keyword: PARTITION - parameter: cities_partdef - keyword: DEFAULT - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: measurement - keyword: DETACH - keyword: PARTITION - parameter: measurement_y2015m12 - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: measurement - keyword: DETACH - keyword: PARTITION - parameter: measurement_y2021m10 - keyword: CONCURRENTLY - keyword: FINALIZE - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: landing - dot: . - naked_identifier: workorderhistory - alter_table_action_segment: keyword: ADD table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: workorder_id_foreign_key - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: workorderid end_bracket: ) - keyword: REFERENCES - table_reference: - naked_identifier: landing - dot: . - naked_identifier: workorder - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_action_segment: - keyword: ADD - keyword: COLUMN - keyword: IF - keyword: NOT - keyword: EXISTS - column_reference: naked_identifier: foo - data_type: keyword: TEXT - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: public - dot: . - naked_identifier: obm_buildings - alter_table_action_segment: - keyword: OWNER - keyword: to - parameter: postgres - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: street - keyword: ADD - keyword: GENERATED - keyword: ALWAYS - keyword: AS - keyword: IDENTITY - bracketed: - start_bracket: ( - alter_sequence_options_segment: keyword: INCREMENT numeric_literal: '4' - alter_sequence_options_segment: - keyword: 'NO' - keyword: MAXVALUE - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: street - keyword: SET - alter_sequence_options_segment: - keyword: RESTART - keyword: WITH - numeric_literal: '3' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: keyword: ADD column_reference: naked_identifier: my_column data_type: keyword: int column_constraint_segment: - keyword: GENERATED - keyword: BY - keyword: DEFAULT - keyword: AS - keyword: IDENTITY - bracketed: - start_bracket: ( - alter_sequence_options_segment: keyword: CACHE numeric_literal: '3' - alter_sequence_options_segment: keyword: MAXVALUE numeric_literal: '63' - alter_sequence_options_segment: - keyword: OWNED - keyword: BY - keyword: NONE - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: public - dot: . - naked_identifier: test - alter_table_action_segment: - keyword: OWNER - keyword: TO - parameter: '"ID"' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: public - dot: . - naked_identifier: test - alter_table_action_segment: - keyword: OWNER - keyword: TO - parameter: ID - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - keyword: IF - keyword: EXISTS - keyword: ONLY - table_reference: - naked_identifier: public - dot: . - naked_identifier: test - alter_table_action_segment: - keyword: OWNER - keyword: TO - parameter: CURRENT_ROLE - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: public - dot: . - naked_identifier: history - alter_table_action_segment: - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: id - keyword: ADD - keyword: GENERATED - keyword: ALWAYS - keyword: AS - keyword: IDENTITY - bracketed: start_bracket: ( alter_sequence_options_segment: - keyword: SEQUENCE - keyword: NAME - sequence_reference: - naked_identifier: public - dot: . - naked_identifier: history_id_seq end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: tbl - alter_table_action_segment: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: nulls_distinct - data_type: keyword: text - column_constraint_segment: - keyword: UNIQUE - keyword: NULLS - keyword: DISTINCT - comma: ',' - alter_table_action_segment: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: nulls_not_distinct - data_type: keyword: text - column_constraint_segment: - keyword: UNIQUE - keyword: NULLS - keyword: NOT - keyword: DISTINCT - comma: ',' - alter_table_action_segment: keyword: ADD column_reference: naked_identifier: everything data_type: keyword: text column_constraint_segment: - keyword: UNIQUE - keyword: NULLS - keyword: DISTINCT - keyword: WITH - definition_parameters: bracketed: - start_bracket: ( - definition_parameter: properties_naked_identifier: arg1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '3' - comma: ',' - definition_parameter: properties_naked_identifier: arg5 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'str'" - end_bracket: ) - keyword: USING - keyword: INDEX - keyword: TABLESPACE - tablespace_reference: naked_identifier: spc - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: tbl - alter_table_action_segment: - keyword: ADD - column_reference: naked_identifier: pk - data_type: keyword: text - column_constraint_segment: keyword: DEFAULT quoted_literal: "'hello'" - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - keyword: WITH - definition_parameters: bracketed: - start_bracket: ( - definition_parameter: properties_naked_identifier: arg1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '3' - comma: ',' - definition_parameter: properties_naked_identifier: arg5 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'str'" - end_bracket: ) - keyword: USING - keyword: INDEX - keyword: TABLESPACE - tablespace_reference: naked_identifier: tblspace - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: tbl - alter_table_action_segment: keyword: ADD table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: foo1 - keyword: UNIQUE - bracketed: - start_bracket: ( - column_reference: naked_identifier: fld - comma: ',' - column_reference: naked_identifier: col - end_bracket: ) - comma: ',' - alter_table_action_segment: keyword: ADD table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: foo2 - keyword: UNIQUE - keyword: NULLS - keyword: DISTINCT - bracketed: start_bracket: ( column_reference: naked_identifier: fld end_bracket: ) - comma: ',' - alter_table_action_segment: keyword: ADD table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: foo3 - keyword: UNIQUE - keyword: NULLS - keyword: NOT - keyword: DISTINCT - bracketed: start_bracket: ( column_reference: naked_identifier: fld end_bracket: ) - comma: ',' - alter_table_action_segment: keyword: ADD table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: everything - keyword: UNIQUE - keyword: NULLS - keyword: DISTINCT - bracketed: - start_bracket: ( - column_reference: naked_identifier: fld - comma: ',' - column_reference: naked_identifier: col - end_bracket: ) - index_parameters: - keyword: INCLUDE - bracketed: - start_bracket: ( - column_reference: naked_identifier: two - comma: ',' - column_reference: naked_identifier: three - end_bracket: ) - keyword: WITH - definition_parameters: bracketed: - start_bracket: ( - definition_parameter: properties_naked_identifier: arg1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '3' - comma: ',' - definition_parameter: properties_naked_identifier: arg5 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'str'" - end_bracket: ) - keyword: USING - keyword: INDEX - keyword: TABLESPACE - tablespace_reference: naked_identifier: tblspc - comma: ',' - alter_table_action_segment: keyword: ADD table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: pk - keyword: PRIMARY - keyword: KEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: fld - comma: ',' - column_reference: naked_identifier: col - end_bracket: ) - index_parameters: - keyword: INCLUDE - bracketed: start_bracket: ( column_reference: naked_identifier: four end_bracket: ) - keyword: WITH - definition_parameters: bracketed: - start_bracket: ( - definition_parameter: - properties_naked_identifier: ff - comparison_operator: raw_comparison_operator: '=' - properties_naked_identifier: auto - comma: ',' - definition_parameter: - properties_naked_identifier: gg - comparison_operator: raw_comparison_operator: '=' - properties_naked_identifier: stuff - end_bracket: ) - keyword: USING - keyword: INDEX - keyword: TABLESPACE - tablespace_reference: naked_identifier: tblspc - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: foo - alter_table_action_segment: keyword: SET relation_options: bracketed: - start_bracket: ( - relation_option: properties_naked_identifier: opt1 - comma: ',' - relation_option: properties_naked_identifier: opt2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' - comma: ',' - relation_option: properties_naked_identifier: opt3 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'str'" - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt4 - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt5 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '6' - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt6 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'str'" - comma: ',' - relation_option: - properties_naked_identifier: opt7 - comparison_operator: raw_comparison_operator: '=' - properties_naked_identifier: ASC - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: foo - alter_table_action_segment: keyword: RESET relation_options: bracketed: - start_bracket: ( - relation_option: properties_naked_identifier: opt1 - comma: ',' - relation_option: properties_naked_identifier: opt2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' - comma: ',' - relation_option: properties_naked_identifier: opt3 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'str'" - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt4 - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt5 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '6' - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt6 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'str'" - comma: ',' - relation_option: - properties_naked_identifier: opt7 - comparison_operator: raw_comparison_operator: '=' - properties_naked_identifier: ASC - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: foo - alter_table_action_segment: - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: baz - keyword: SET - relation_options: bracketed: - start_bracket: ( - relation_option: properties_naked_identifier: opt1 - comma: ',' - relation_option: properties_naked_identifier: opt2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' - comma: ',' - relation_option: properties_naked_identifier: opt3 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'str'" - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt4 - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt5 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '6' - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt6 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'str'" - comma: ',' - relation_option: - properties_naked_identifier: opt7 - comparison_operator: raw_comparison_operator: '=' - properties_naked_identifier: ASC - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: foo - alter_table_action_segment: - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: baz - keyword: RESET - relation_options: bracketed: - start_bracket: ( - relation_option: properties_naked_identifier: opt1 - comma: ',' - relation_option: properties_naked_identifier: opt2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' - comma: ',' - relation_option: properties_naked_identifier: opt3 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'str'" - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt4 - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt5 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '6' - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt6 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'str'" - comma: ',' - relation_option: - properties_naked_identifier: opt7 - comparison_operator: raw_comparison_operator: '=' - properties_naked_identifier: ASC - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: reservation - alter_table_action_segment: keyword: ADD table_constraint: - keyword: EXCLUDE - keyword: USING - index_access_method: naked_identifier: gist - bracketed: start_bracket: ( exclusion_constraint_element: index_element: column_reference: naked_identifier: during keyword: WITH comparison_operator: - ampersand: '&' - ampersand: '&' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: room_reservation - alter_table_action_segment: keyword: ADD table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: cons - keyword: EXCLUDE - keyword: USING - index_access_method: naked_identifier: gist - bracketed: - start_bracket: ( - exclusion_constraint_element: index_element: column_reference: naked_identifier: room keyword: WITH comparison_operator: raw_comparison_operator: '=' - comma: ',' - exclusion_constraint_element: index_element: column_reference: naked_identifier: during keyword: WITH comparison_operator: - ampersand: '&' - ampersand: '&' - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: no_using - alter_table_action_segment: keyword: ADD table_constraint: - keyword: EXCLUDE - bracketed: start_bracket: ( exclusion_constraint_element: index_element: column_reference: naked_identifier: field keyword: WITH comparison_operator: raw_comparison_operator: '=' end_bracket: ) - keyword: NOT - keyword: DEFERRABLE - keyword: INITIALLY - keyword: IMMEDIATE - keyword: 'NO' - keyword: INHERIT - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: many_options - alter_table_action_segment: keyword: ADD table_constraint: - keyword: EXCLUDE - keyword: USING - index_access_method: naked_identifier: gist - bracketed: - start_bracket: ( - exclusion_constraint_element: index_element: column_reference: naked_identifier: one keyword: WITH comparison_operator: raw_comparison_operator: '=' - comma: ',' - exclusion_constraint_element: index_element: column_reference: naked_identifier: nulls_opclass index_element_options: operator_class_reference: naked_identifier: nulls keyword: WITH comparison_operator: raw_comparison_operator: '=' - comma: ',' - exclusion_constraint_element: index_element: column_reference: naked_identifier: nulls_last index_element_options: - keyword: NULLS - keyword: LAST keyword: WITH comparison_operator: raw_comparison_operator: '=' - comma: ',' - exclusion_constraint_element: index_element: column_reference: naked_identifier: two index_element_options: - keyword: COLLATE - collation_reference: quoted_identifier: '"en-US"' - operator_class_reference: naked_identifier: opclass - relation_options: bracketed: - start_bracket: ( - relation_option: properties_naked_identifier: opt1 - comma: ',' - relation_option: properties_naked_identifier: opt2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' - comma: ',' - relation_option: properties_naked_identifier: opt3 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'str'" - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt4 - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt5 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '6' - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt6 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'str'" - comma: ',' - relation_option: - properties_naked_identifier: opt7 - comparison_operator: raw_comparison_operator: '=' - properties_naked_identifier: ASC - end_bracket: ) - keyword: ASC - keyword: NULLS - keyword: FIRST keyword: WITH comparison_operator: raw_comparison_operator: '=' - comma: ',' - exclusion_constraint_element: index_element: bracketed: start_bracket: ( expression: column_reference: naked_identifier: two binary_operator: + numeric_literal: '5' end_bracket: ) keyword: WITH comparison_operator: raw_comparison_operator: '=' - comma: ',' - exclusion_constraint_element: index_element: function: function_name: function_name_identifier: myfunc function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: a - comma: ',' - expression: column_reference: naked_identifier: b - end_bracket: ) keyword: WITH comparison_operator: raw_comparison_operator: '=' - comma: ',' - exclusion_constraint_element: index_element: function: function_name: function_name_identifier: myfunc_opclass function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: a - comma: ',' - expression: column_reference: naked_identifier: b - end_bracket: ) index_element_options: operator_class_reference: naked_identifier: fop relation_options: bracketed: - start_bracket: ( - relation_option: properties_naked_identifier: opt comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - comma: ',' - relation_option: properties_naked_identifier: foo comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - end_bracket: ) keyword: WITH comparison_operator: raw_comparison_operator: '=' - comma: ',' - exclusion_constraint_element: index_element: column_reference: naked_identifier: only_opclass index_element_options: operator_class_reference: naked_identifier: opclass keyword: WITH comparison_operator: raw_comparison_operator: '=' - comma: ',' - exclusion_constraint_element: index_element: column_reference: naked_identifier: desc_order index_element_options: keyword: DESC keyword: WITH comparison_operator: raw_comparison_operator: '=' - end_bracket: ) - index_parameters: - keyword: INCLUDE - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - end_bracket: ) - keyword: WITH - definition_parameters: bracketed: - start_bracket: ( - definition_parameter: properties_naked_identifier: idx_num comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' - comma: ',' - definition_parameter: properties_naked_identifier: idx_str comparison_operator: raw_comparison_operator: '=' quoted_literal: "'idx_value'" - comma: ',' - definition_parameter: - properties_naked_identifier: idx_kw - comparison_operator: raw_comparison_operator: '=' - properties_naked_identifier: DESC - end_bracket: ) - keyword: USING - keyword: INDEX - keyword: TABLESPACE - tablespace_reference: naked_identifier: tblspc - keyword: WHERE - bracketed: start_bracket: ( expression: column_reference: naked_identifier: field comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '=' quoted_literal: "'def'" end_bracket: ) - keyword: DEFERRABLE - keyword: NOT - keyword: VALID - keyword: INITIALLY - keyword: DEFERRED - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_trigger.sql000066400000000000000000000004721503426445100252120ustar00rootroot00000000000000ALTER TRIGGER foo ON table_name RENAME TO new_name; ALTER TRIGGER foo ON table_name DEPENDS ON EXTENSION extension_name; ALTER TRIGGER foo ON table_name NO DEPENDS ON EXTENSION extension_name; ALTER TRIGGER emp_stamp ON emp RENAME TO emp_track_chgs; ALTER TRIGGER emp_stamp ON emp DEPENDS ON EXTENSION emplib; sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_trigger.yml000066400000000000000000000042331503426445100252130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 13e3b4d4c698c500750e8957a91ac71d8ee8d711d531847e9b197400f95fd866 file: - statement: alter_trigger: - keyword: ALTER - keyword: TRIGGER - trigger_reference: naked_identifier: foo - keyword: 'ON' - table_reference: naked_identifier: table_name - keyword: RENAME - keyword: TO - trigger_reference: naked_identifier: new_name - statement_terminator: ; - statement: alter_trigger: - keyword: ALTER - keyword: TRIGGER - trigger_reference: naked_identifier: foo - keyword: 'ON' - table_reference: naked_identifier: table_name - keyword: DEPENDS - keyword: 'ON' - keyword: EXTENSION - extension_reference: naked_identifier: extension_name - statement_terminator: ; - statement: alter_trigger: - keyword: ALTER - keyword: TRIGGER - trigger_reference: naked_identifier: foo - keyword: 'ON' - table_reference: naked_identifier: table_name - keyword: 'NO' - keyword: DEPENDS - keyword: 'ON' - keyword: EXTENSION - extension_reference: naked_identifier: extension_name - statement_terminator: ; - statement: alter_trigger: - keyword: ALTER - keyword: TRIGGER - trigger_reference: naked_identifier: emp_stamp - keyword: 'ON' - table_reference: naked_identifier: emp - keyword: RENAME - keyword: TO - trigger_reference: naked_identifier: emp_track_chgs - statement_terminator: ; - statement: alter_trigger: - keyword: ALTER - keyword: TRIGGER - trigger_reference: naked_identifier: emp_stamp - keyword: 'ON' - table_reference: naked_identifier: emp - keyword: DEPENDS - keyword: 'ON' - keyword: EXTENSION - extension_reference: naked_identifier: emplib - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_type.sql000066400000000000000000000011421503426445100245230ustar00rootroot00000000000000-- https://www.postgresql.org/docs/current/sql-altertype.html ALTER TYPE foo RENAME TO bar; ALTER TYPE foo OWNER TO CURRENT_USER; ALTER TYPE foo OWNER TO new_owner; ALTER TYPE foo SET SCHEMA new_schema; ALTER TYPE compfoo ADD ATTRIBUTE f3 int, DROP ATTRIBUTE IF EXISTS f4, ALTER ATTRIBUTE f5 TYPE int; ALTER TYPE compfoo RENAME ATTRIBUTE f6 TO f7; ALTER TYPE colors ADD VALUE 'orange' AFTER 'red'; ALTER TYPE foo ADD VALUE 'baz'; ALTER TYPE foo ADD VALUE 'qux' BEFORE 'baz'; ALTER TYPE foo ADD VALUE 'quux' AFTER 'baz'; ALTER TYPE financial.reporting_statuses RENAME VALUE 'partially' TO 'partially-reported'; sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_type.yml000066400000000000000000000073401503426445100245330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2a621393789f616aeff908fe9e6dd6481829a9b515fe3fdeb69f637c4a1ef7c9 file: - statement: alter_type_statement: - keyword: ALTER - keyword: TYPE - object_reference: naked_identifier: foo - keyword: RENAME - keyword: TO - object_reference: naked_identifier: bar - statement_terminator: ; - statement: alter_type_statement: - keyword: ALTER - keyword: TYPE - object_reference: naked_identifier: foo - keyword: OWNER - keyword: TO - keyword: CURRENT_USER - statement_terminator: ; - statement: alter_type_statement: - keyword: ALTER - keyword: TYPE - object_reference: naked_identifier: foo - keyword: OWNER - keyword: TO - object_reference: naked_identifier: new_owner - statement_terminator: ; - statement: alter_type_statement: - keyword: ALTER - keyword: TYPE - object_reference: naked_identifier: foo - keyword: SET - keyword: SCHEMA - schema_reference: naked_identifier: new_schema - statement_terminator: ; - statement: alter_type_statement: - keyword: ALTER - keyword: TYPE - object_reference: naked_identifier: compfoo - keyword: ADD - keyword: ATTRIBUTE - column_reference: naked_identifier: f3 - data_type: keyword: int - comma: ',' - keyword: DROP - keyword: ATTRIBUTE - keyword: IF - keyword: EXISTS - column_reference: naked_identifier: f4 - comma: ',' - keyword: ALTER - keyword: ATTRIBUTE - column_reference: naked_identifier: f5 - keyword: TYPE - data_type: keyword: int - statement_terminator: ; - statement: alter_type_statement: - keyword: ALTER - keyword: TYPE - object_reference: naked_identifier: compfoo - keyword: RENAME - keyword: ATTRIBUTE - column_reference: naked_identifier: f6 - keyword: TO - column_reference: naked_identifier: f7 - statement_terminator: ; - statement: alter_type_statement: - keyword: ALTER - keyword: TYPE - object_reference: naked_identifier: colors - keyword: ADD - keyword: VALUE - quoted_literal: "'orange'" - keyword: AFTER - quoted_literal: "'red'" - statement_terminator: ; - statement: alter_type_statement: - keyword: ALTER - keyword: TYPE - object_reference: naked_identifier: foo - keyword: ADD - keyword: VALUE - quoted_literal: "'baz'" - statement_terminator: ; - statement: alter_type_statement: - keyword: ALTER - keyword: TYPE - object_reference: naked_identifier: foo - keyword: ADD - keyword: VALUE - quoted_literal: "'qux'" - keyword: BEFORE - quoted_literal: "'baz'" - statement_terminator: ; - statement: alter_type_statement: - keyword: ALTER - keyword: TYPE - object_reference: naked_identifier: foo - keyword: ADD - keyword: VALUE - quoted_literal: "'quux'" - keyword: AFTER - quoted_literal: "'baz'" - statement_terminator: ; - statement: alter_type_statement: - keyword: ALTER - keyword: TYPE - object_reference: - naked_identifier: financial - dot: . - naked_identifier: reporting_statuses - keyword: RENAME - keyword: VALUE - quoted_literal: "'partially'" - keyword: TO - quoted_literal: "'partially-reported'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_view.sql000066400000000000000000000053061503426445100245220ustar00rootroot00000000000000-- issue:2094 ALTER VIEW myview ALTER date_column SET DEFAULT NOW(); ALTER VIEW myview ALTER int_column SET DEFAULT 1; ALTER VIEW myview ALTER text_column SET DEFAULT 'value'; ALTER VIEW myview ALTER bool_column SET DEFAULT false; ALTER VIEW myview ALTER other_column SET DEFAULT other_value; ALTER VIEW myview ALTER other_column SET DEFAULT CURRENT_TIMESTAMP; ALTER VIEW myview ALTER other_column SET DEFAULT a_function(a_parameter); ALTER VIEW myview ALTER other_column SET DEFAULT a_function('a_parameter'); ALTER VIEW myview ALTER other_column DROP DEFAULT; ALTER VIEW IF EXISTS myview ALTER date_column SET DEFAULT NOW(); ALTER VIEW IF EXISTS myview ALTER int_column SET DEFAULT 1; ALTER VIEW IF EXISTS myview ALTER text_column SET DEFAULT 'value'; ALTER VIEW IF EXISTS myview ALTER bool_column SET DEFAULT false; ALTER VIEW IF EXISTS myview ALTER other_column SET DEFAULT other_value; ALTER VIEW IF EXISTS myview ALTER other_column SET DEFAULT CURRENT_TIMESTAMP; ALTER VIEW IF EXISTS myview ALTER other_column SET DEFAULT a_function(a_parameter); ALTER VIEW IF EXISTS myview ALTER other_column SET DEFAULT a_function('a_parameter'); ALTER VIEW IF EXISTS myview ALTER other_column DROP DEFAULT; ALTER VIEW myview OWNER TO baz_role; ALTER VIEW myview OWNER TO "baz-role"; ALTER VIEW myview OWNER TO CURRENT_ROLE; ALTER VIEW myview OWNER TO CURRENT_USER; ALTER VIEW myview OWNER TO SESSION_USER; ALTER VIEW IF EXISTS myview OWNER TO baz_role; ALTER VIEW IF EXISTS myview OWNER TO "baz-role"; ALTER VIEW IF EXISTS myview OWNER TO CURRENT_ROLE; ALTER VIEW IF EXISTS myview OWNER TO CURRENT_USER; ALTER VIEW IF EXISTS myview OWNER TO SESSION_USER; ALTER VIEW myview RENAME column_name TO new_column_name; ALTER VIEW myview RENAME COLUMN column_name TO new_column_name; ALTER VIEW IF EXISTS myview RENAME column_name TO new_column_name; ALTER VIEW IF EXISTS myview RENAME COLUMN column_name TO new_column_name; ALTER VIEW myview RENAME TO new_name; ALTER VIEW IF EXISTS myview RENAME TO new_name; ALTER VIEW myview SET SCHEMA new_schema; ALTER VIEW IF EXISTS myview SET SCHEMA new_schema; ALTER VIEW myview SET ( view_option_name ); ALTER VIEW myview SET ( view_option_name = 1); ALTER VIEW myview SET ( view_option_name = 1, view_option_name2 = 'value', view_option_name3, view_option_name4 = false); ALTER VIEW IF EXISTS myview SET ( view_option_name ); ALTER VIEW IF EXISTS myview SET ( view_option_name = 1); ALTER VIEW IF EXISTS myview SET ( view_option_name, view_option_name2 = 1, view_option_name3); ALTER VIEW myview RESET ( view_option_name ); ALTER VIEW myview RESET ( view_option_name, view_option_name2 ); ALTER VIEW IF EXISTS myview RESET ( view_option_name ); ALTER VIEW IF EXISTS myview RESET ( view_option_name, view_option_name2 ); sqlfluff-3.4.2/test/fixtures/dialects/postgres/alter_view.yml000066400000000000000000000413171503426445100245260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 75bd79907979e6ea7aebeb6f7d48fc286a783a67d2656f26fe0d620845221e4d file: - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: ALTER - column_reference: naked_identifier: date_column - keyword: SET - keyword: DEFAULT - function: function_name: function_name_identifier: NOW function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: ALTER - column_reference: naked_identifier: int_column - keyword: SET - keyword: DEFAULT - numeric_literal: '1' - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: ALTER - column_reference: naked_identifier: text_column - keyword: SET - keyword: DEFAULT - quoted_literal: "'value'" - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: ALTER - column_reference: naked_identifier: bool_column - keyword: SET - keyword: DEFAULT - boolean_literal: 'false' - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: SET - keyword: DEFAULT - expression: column_reference: naked_identifier: other_value - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: SET - keyword: DEFAULT - bare_function: CURRENT_TIMESTAMP - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: SET - keyword: DEFAULT - function: function_name: function_name_identifier: a_function function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: a_parameter end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: SET - keyword: DEFAULT - function: function_name: function_name_identifier: a_function function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'a_parameter'" end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: DROP - keyword: DEFAULT - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: ALTER - column_reference: naked_identifier: date_column - keyword: SET - keyword: DEFAULT - function: function_name: function_name_identifier: NOW function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: ALTER - column_reference: naked_identifier: int_column - keyword: SET - keyword: DEFAULT - numeric_literal: '1' - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: ALTER - column_reference: naked_identifier: text_column - keyword: SET - keyword: DEFAULT - quoted_literal: "'value'" - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: ALTER - column_reference: naked_identifier: bool_column - keyword: SET - keyword: DEFAULT - boolean_literal: 'false' - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: SET - keyword: DEFAULT - expression: column_reference: naked_identifier: other_value - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: SET - keyword: DEFAULT - bare_function: CURRENT_TIMESTAMP - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: SET - keyword: DEFAULT - function: function_name: function_name_identifier: a_function function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: a_parameter end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: SET - keyword: DEFAULT - function: function_name: function_name_identifier: a_function function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'a_parameter'" end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: DROP - keyword: DEFAULT - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: OWNER - keyword: TO - object_reference: naked_identifier: baz_role - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: OWNER - keyword: TO - object_reference: quoted_identifier: '"baz-role"' - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: OWNER - keyword: TO - keyword: CURRENT_ROLE - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: OWNER - keyword: TO - object_reference: naked_identifier: CURRENT_USER - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: OWNER - keyword: TO - keyword: SESSION_USER - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: OWNER - keyword: TO - object_reference: naked_identifier: baz_role - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: OWNER - keyword: TO - object_reference: quoted_identifier: '"baz-role"' - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: OWNER - keyword: TO - keyword: CURRENT_ROLE - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: OWNER - keyword: TO - object_reference: naked_identifier: CURRENT_USER - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: OWNER - keyword: TO - keyword: SESSION_USER - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: RENAME - column_reference: naked_identifier: column_name - keyword: TO - column_reference: naked_identifier: new_column_name - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: RENAME - keyword: COLUMN - column_reference: naked_identifier: column_name - keyword: TO - column_reference: naked_identifier: new_column_name - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: RENAME - column_reference: naked_identifier: column_name - keyword: TO - column_reference: naked_identifier: new_column_name - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: RENAME - keyword: COLUMN - column_reference: naked_identifier: column_name - keyword: TO - column_reference: naked_identifier: new_column_name - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: RENAME - keyword: TO - table_reference: naked_identifier: new_name - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: RENAME - keyword: TO - table_reference: naked_identifier: new_name - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: SET - keyword: SCHEMA - schema_reference: naked_identifier: new_schema - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: SET - keyword: SCHEMA - schema_reference: naked_identifier: new_schema - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: SET - bracketed: start_bracket: ( parameter: view_option_name end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: SET - bracketed: start_bracket: ( parameter: view_option_name comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: SET - bracketed: - start_bracket: ( - parameter: view_option_name - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - comma: ',' - parameter: view_option_name2 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value'" - comma: ',' - parameter: view_option_name3 - comma: ',' - parameter: view_option_name4 - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'false' - end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: SET - bracketed: start_bracket: ( parameter: view_option_name end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: SET - bracketed: start_bracket: ( parameter: view_option_name comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: SET - bracketed: - start_bracket: ( - parameter: view_option_name - comma: ',' - parameter: view_option_name2 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - comma: ',' - parameter: view_option_name3 - end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: RESET - bracketed: start_bracket: ( parameter: view_option_name end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: RESET - bracketed: - start_bracket: ( - parameter: view_option_name - comma: ',' - parameter: view_option_name2 - end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: RESET - bracketed: start_bracket: ( parameter: view_option_name end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: RESET - bracketed: - start_bracket: ( - parameter: view_option_name - comma: ',' - parameter: view_option_name2 - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/analyze.sql000066400000000000000000000003761503426445100240260ustar00rootroot00000000000000ANALYZE foo; ANALYSE foo; ANALYZE VERBOSE foo; ANALYZE (VERBOSE, SKIP_LOCKED) foo; ANALYZE (VERBOSE FALSE, SKIP_LOCKED TRUE) foo; ANALYZE (SKIP_LOCKED, VERBOSE FALSE) foo; ANALYZE VERBOSE foo (bar, bat); ANALYZE foo (bar, bat), foo2 (bar2, bat2); sqlfluff-3.4.2/test/fixtures/dialects/postgres/analyze.yml000066400000000000000000000052511503426445100240250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c6b6c8641a16f20ac8898a51c1dd2ff9b1f1c79eef7aaac5cf250133ffb09727 file: - statement: analyze_statement: keyword: ANALYZE table_reference: naked_identifier: foo - statement_terminator: ; - statement: analyze_statement: keyword: ANALYSE table_reference: naked_identifier: foo - statement_terminator: ; - statement: analyze_statement: - keyword: ANALYZE - keyword: VERBOSE - table_reference: naked_identifier: foo - statement_terminator: ; - statement: analyze_statement: keyword: ANALYZE bracketed: - start_bracket: ( - keyword: VERBOSE - comma: ',' - keyword: SKIP_LOCKED - end_bracket: ) table_reference: naked_identifier: foo - statement_terminator: ; - statement: analyze_statement: keyword: ANALYZE bracketed: - start_bracket: ( - keyword: VERBOSE - boolean_literal: 'FALSE' - comma: ',' - keyword: SKIP_LOCKED - boolean_literal: 'TRUE' - end_bracket: ) table_reference: naked_identifier: foo - statement_terminator: ; - statement: analyze_statement: keyword: ANALYZE bracketed: - start_bracket: ( - keyword: SKIP_LOCKED - comma: ',' - keyword: VERBOSE - boolean_literal: 'FALSE' - end_bracket: ) table_reference: naked_identifier: foo - statement_terminator: ; - statement: analyze_statement: - keyword: ANALYZE - keyword: VERBOSE - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_reference: naked_identifier: bar - comma: ',' - column_reference: naked_identifier: bat - end_bracket: ) - statement_terminator: ; - statement: analyze_statement: - keyword: ANALYZE - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_reference: naked_identifier: bar - comma: ',' - column_reference: naked_identifier: bat - end_bracket: ) - comma: ',' - table_reference: naked_identifier: foo2 - bracketed: - start_bracket: ( - column_reference: naked_identifier: bar2 - comma: ',' - column_reference: naked_identifier: bat2 - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/array.sql000066400000000000000000000034161503426445100234770ustar00rootroot00000000000000SELECT ARRAY[1,2] || ARRAY[3,4]; SELECT ARRAY[['meeting', 'lunch'], ['training', 'presentation']]; CREATE TABLE sal_emp ( name text, pay_by_quarter integer[], schedule text[][] ); CREATE TABLE tictactoe ( squares integer[3][3] ); SELECT * FROM sal_emp WHERE pay_by_quarter[1] = 10000 OR pay_by_quarter[2] = 10000 OR pay_by_quarter[3] = 10000 OR pay_by_quarter[4] = 10000; INSERT INTO sal_emp VALUES ('Bill', ARRAY[10000, 10000, 10000, 10000], ARRAY[['meeting', 'lunch'], ['training', 'presentation']]); INSERT INTO sal_emp VALUES ('Carol', ARRAY[20000, 25000, 25000, 25000], ARRAY[['breakfast', 'consulting'], ['meeting', 'lunch']]); SELECT name FROM sal_emp WHERE pay_by_quarter[1] <> pay_by_quarter[2]; SELECT schedule[1:2][1:1] FROM sal_emp WHERE name = 'Bill'; UPDATE sal_emp SET pay_by_quarter[4] = 15000 WHERE name = 'Bill'; UPDATE sal_emp SET pay_by_quarter[1:2] = '{27000,27000}' WHERE name = 'Carol'; SELECT array_dims(ARRAY[1,2] || ARRAY[3,4,5]); SELECT array_dims(ARRAY[1,2] || ARRAY[[3,4],[5,6]]); SELECT ARRAY[1, 2] || '{3, 4}'; SELECT array_position(ARRAY['sun','mon','tue','wed','thu','fri','sat'], 'mon'); SELECT f1[1][-2][3] AS e1, f1[1][-1][5] AS e2 FROM (SELECT '[1:1][-2:-1][3:5]={{{1,2,3},{4,5,6}}}'::int[] AS f1) AS ss; SELECT '{Hello,World}'::_text AS text_array; SELECT ARRAY['A', 'B', 'C']::_TEXT; SELECT SUM(CASE WHEN direction = 'forward' THEN unit ELSE 0 END ) * (MAX(ARRAY[id, vertical]))[2] FROM direction_with_vertical_change; -- More advanced cases with expressions and missing slice start/end when accessing SELECT a[:], b[:1], c[2:], d[2:3]; SELECT a[1+2:3+4], b[5+6]; sqlfluff-3.4.2/test/fixtures/dialects/postgres/array.yml000066400000000000000000000613751503426445100235110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6b4fba2da58dafdeec56d824a628616cdc58954bfcf1a626c31cccd35bafcb28 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_square_bracket: ']' - binary_operator: - pipe: '|' - pipe: '|' - typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '3' - comma: ',' - numeric_literal: '4' - end_square_bracket: ']' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - array_literal: - start_square_bracket: '[' - quoted_literal: "'meeting'" - comma: ',' - quoted_literal: "'lunch'" - end_square_bracket: ']' - comma: ',' - array_literal: - start_square_bracket: '[' - quoted_literal: "'training'" - comma: ',' - quoted_literal: "'presentation'" - end_square_bracket: ']' - end_square_bracket: ']' - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: sal_emp - bracketed: - start_bracket: ( - column_reference: naked_identifier: name - data_type: keyword: text - comma: ',' - column_reference: naked_identifier: pay_by_quarter - data_type: keyword: integer start_square_bracket: '[' end_square_bracket: ']' - comma: ',' - column_reference: naked_identifier: schedule - data_type: - keyword: text - start_square_bracket: '[' - end_square_bracket: ']' - start_square_bracket: '[' - end_square_bracket: ']' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: tictactoe - bracketed: start_bracket: ( column_reference: naked_identifier: squares data_type: - keyword: integer - start_square_bracket: '[' - expression: numeric_literal: '3' - end_square_bracket: ']' - start_square_bracket: '[' - expression: numeric_literal: '3' - end_square_bracket: ']' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sal_emp where_clause: keyword: WHERE expression: - column_reference: naked_identifier: pay_by_quarter - array_accessor: start_square_bracket: '[' numeric_literal: '1' end_square_bracket: ']' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '10000' - binary_operator: OR - column_reference: naked_identifier: pay_by_quarter - array_accessor: start_square_bracket: '[' numeric_literal: '2' end_square_bracket: ']' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '10000' - binary_operator: OR - column_reference: naked_identifier: pay_by_quarter - array_accessor: start_square_bracket: '[' numeric_literal: '3' end_square_bracket: ']' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '10000' - binary_operator: OR - column_reference: naked_identifier: pay_by_quarter - array_accessor: start_square_bracket: '[' numeric_literal: '4' end_square_bracket: ']' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '10000' - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: sal_emp - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: quoted_literal: "'Bill'" - comma: ',' - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '10000' - comma: ',' - numeric_literal: '10000' - comma: ',' - numeric_literal: '10000' - comma: ',' - numeric_literal: '10000' - end_square_bracket: ']' - comma: ',' - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - array_literal: - start_square_bracket: '[' - quoted_literal: "'meeting'" - comma: ',' - quoted_literal: "'lunch'" - end_square_bracket: ']' - comma: ',' - array_literal: - start_square_bracket: '[' - quoted_literal: "'training'" - comma: ',' - quoted_literal: "'presentation'" - end_square_bracket: ']' - end_square_bracket: ']' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: sal_emp - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: quoted_literal: "'Carol'" - comma: ',' - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '20000' - comma: ',' - numeric_literal: '25000' - comma: ',' - numeric_literal: '25000' - comma: ',' - numeric_literal: '25000' - end_square_bracket: ']' - comma: ',' - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - array_literal: - start_square_bracket: '[' - quoted_literal: "'breakfast'" - comma: ',' - quoted_literal: "'consulting'" - end_square_bracket: ']' - comma: ',' - array_literal: - start_square_bracket: '[' - quoted_literal: "'meeting'" - comma: ',' - quoted_literal: "'lunch'" - end_square_bracket: ']' - end_square_bracket: ']' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sal_emp where_clause: keyword: WHERE expression: - column_reference: naked_identifier: pay_by_quarter - array_accessor: start_square_bracket: '[' numeric_literal: '1' end_square_bracket: ']' - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '>' - column_reference: naked_identifier: pay_by_quarter - array_accessor: start_square_bracket: '[' numeric_literal: '2' end_square_bracket: ']' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - column_reference: naked_identifier: schedule - array_accessor: - start_square_bracket: '[' - numeric_literal: '1' - slice: ':' - numeric_literal: '2' - end_square_bracket: ']' - array_accessor: - start_square_bracket: '[' - numeric_literal: '1' - slice: ':' - numeric_literal: '1' - end_square_bracket: ']' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sal_emp where_clause: keyword: WHERE expression: column_reference: naked_identifier: name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Bill'" - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: sal_emp set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: pay_by_quarter array_accessor: start_square_bracket: '[' numeric_literal: '4' end_square_bracket: ']' comparison_operator: raw_comparison_operator: '=' numeric_literal: '15000' where_clause: keyword: WHERE expression: column_reference: naked_identifier: name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Bill'" - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: sal_emp set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: pay_by_quarter array_accessor: - start_square_bracket: '[' - numeric_literal: '1' - slice: ':' - numeric_literal: '2' - end_square_bracket: ']' comparison_operator: raw_comparison_operator: '=' quoted_literal: "'{27000,27000}'" where_clause: keyword: WHERE expression: column_reference: naked_identifier: name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Carol'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: array_dims function_contents: bracketed: start_bracket: ( expression: - typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_square_bracket: ']' - binary_operator: - pipe: '|' - pipe: '|' - typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '3' - comma: ',' - numeric_literal: '4' - comma: ',' - numeric_literal: '5' - end_square_bracket: ']' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: array_dims function_contents: bracketed: start_bracket: ( expression: - typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_square_bracket: ']' - binary_operator: - pipe: '|' - pipe: '|' - typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - array_literal: - start_square_bracket: '[' - numeric_literal: '3' - comma: ',' - numeric_literal: '4' - end_square_bracket: ']' - comma: ',' - array_literal: - start_square_bracket: '[' - numeric_literal: '5' - comma: ',' - numeric_literal: '6' - end_square_bracket: ']' - end_square_bracket: ']' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_square_bracket: ']' binary_operator: - pipe: '|' - pipe: '|' quoted_literal: "'{3, 4}'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: array_position function_contents: bracketed: - start_bracket: ( - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - quoted_literal: "'sun'" - comma: ',' - quoted_literal: "'mon'" - comma: ',' - quoted_literal: "'tue'" - comma: ',' - quoted_literal: "'wed'" - comma: ',' - quoted_literal: "'thu'" - comma: ',' - quoted_literal: "'fri'" - comma: ',' - quoted_literal: "'sat'" - end_square_bracket: ']' - comma: ',' - expression: quoted_literal: "'mon'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: - column_reference: naked_identifier: f1 - array_accessor: start_square_bracket: '[' numeric_literal: '1' end_square_bracket: ']' - array_accessor: start_square_bracket: '[' numeric_literal: sign_indicator: '-' numeric_literal: '2' end_square_bracket: ']' - array_accessor: start_square_bracket: '[' numeric_literal: '3' end_square_bracket: ']' alias_expression: alias_operator: keyword: AS naked_identifier: e1 - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: f1 - array_accessor: start_square_bracket: '[' numeric_literal: '1' end_square_bracket: ']' - array_accessor: start_square_bracket: '[' numeric_literal: sign_indicator: '-' numeric_literal: '1' end_square_bracket: ']' - array_accessor: start_square_bracket: '[' numeric_literal: '5' end_square_bracket: ']' alias_expression: alias_operator: keyword: AS naked_identifier: e2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'[1:1][-2:-1][3:5]={{{1,2,3},{4,5,6}}}'" casting_operator: '::' data_type: keyword: int start_square_bracket: '[' end_square_bracket: ']' alias_expression: alias_operator: keyword: AS naked_identifier: f1 end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: ss - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'{Hello,World}'" casting_operator: '::' data_type: data_type_identifier: _text alias_expression: alias_operator: keyword: AS naked_identifier: text_array - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - quoted_literal: "'A'" - comma: ',' - quoted_literal: "'B'" - comma: ',' - quoted_literal: "'C'" - end_square_bracket: ']' casting_operator: '::' data_type: data_type_identifier: _TEXT - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: direction comparison_operator: raw_comparison_operator: '=' quoted_literal: "'forward'" - keyword: THEN - expression: column_reference: naked_identifier: unit - else_clause: keyword: ELSE expression: numeric_literal: '0' - keyword: END end_bracket: ) binary_operator: '*' bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: MAX function_contents: bracketed: start_bracket: ( expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - column_reference: naked_identifier: id - comma: ',' - column_reference: naked_identifier: vertical - end_square_bracket: ']' end_bracket: ) end_bracket: ) array_accessor: start_square_bracket: '[' numeric_literal: '2' end_square_bracket: ']' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: direction_with_vertical_change - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: column_reference: naked_identifier: a array_accessor: start_square_bracket: '[' slice: ':' end_square_bracket: ']' - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: b array_accessor: start_square_bracket: '[' slice: ':' numeric_literal: '1' end_square_bracket: ']' - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: c array_accessor: start_square_bracket: '[' numeric_literal: '2' slice: ':' end_square_bracket: ']' - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: d array_accessor: - start_square_bracket: '[' - numeric_literal: '2' - slice: ':' - numeric_literal: '3' - end_square_bracket: ']' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: column_reference: naked_identifier: a array_accessor: - start_square_bracket: '[' - expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - slice: ':' - expression: - numeric_literal: '3' - binary_operator: + - numeric_literal: '4' - end_square_bracket: ']' - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: b array_accessor: start_square_bracket: '[' expression: - numeric_literal: '5' - binary_operator: + - numeric_literal: '6' end_square_bracket: ']' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/bare_functions.sql000066400000000000000000000004171503426445100253600ustar00rootroot00000000000000SELECT current_date AS col1, current_timestamp AS col2, current_time as col3, localtime as col4, localtimestamp as col5, current_role as col6, current_schema as col7, current_user as col8, session_user as col9, system_user as col10, user as col11 ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/bare_functions.yml000066400000000000000000000052571503426445100253710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f1508c504e4eedd31dec9bc5b9e2213d2fceeb15d4707f8aaed123a7a8e3e67a file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: bare_function: current_date alias_expression: alias_operator: keyword: AS naked_identifier: col1 - comma: ',' - select_clause_element: bare_function: current_timestamp alias_expression: alias_operator: keyword: AS naked_identifier: col2 - comma: ',' - select_clause_element: bare_function: current_time alias_expression: alias_operator: keyword: as naked_identifier: col3 - comma: ',' - select_clause_element: bare_function: localtime alias_expression: alias_operator: keyword: as naked_identifier: col4 - comma: ',' - select_clause_element: bare_function: localtimestamp alias_expression: alias_operator: keyword: as naked_identifier: col5 - comma: ',' - select_clause_element: bare_function: current_role alias_expression: alias_operator: keyword: as naked_identifier: col6 - comma: ',' - select_clause_element: bare_function: current_schema alias_expression: alias_operator: keyword: as naked_identifier: col7 - comma: ',' - select_clause_element: bare_function: current_user alias_expression: alias_operator: keyword: as naked_identifier: col8 - comma: ',' - select_clause_element: bare_function: session_user alias_expression: alias_operator: keyword: as naked_identifier: col9 - comma: ',' - select_clause_element: bare_function: system_user alias_expression: alias_operator: keyword: as naked_identifier: col10 - comma: ',' - select_clause_element: bare_function: user alias_expression: alias_operator: keyword: as naked_identifier: col11 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/call.sql000066400000000000000000000003071503426445100232700ustar00rootroot00000000000000CALL do_db_maintenance(); CALL my_schema.do_db_maintenance(); call procedure_name(); call procedure_name('param1', 123); call schema.procedure_name(); call schema.procedure_name('param1', 123); sqlfluff-3.4.2/test/fixtures/dialects/postgres/call.yml000066400000000000000000000046771503426445100233100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d31b2ffbf36a1430d753dcade994e36c24037ec99b446a7a026bacbe3d0a0caf file: - statement: call_statement: keyword: CALL function: function_name: function_name_identifier: do_db_maintenance function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: call_statement: keyword: CALL function: function_name: naked_identifier: my_schema dot: . function_name_identifier: do_db_maintenance function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: call_statement: keyword: call function: function_name: function_name_identifier: procedure_name function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: call_statement: keyword: call function: function_name: function_name_identifier: procedure_name function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'param1'" - comma: ',' - expression: numeric_literal: '123' - end_bracket: ) - statement_terminator: ; - statement: call_statement: keyword: call function: function_name: naked_identifier: schema dot: . function_name_identifier: procedure_name function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: call_statement: keyword: call function: function_name: naked_identifier: schema dot: . function_name_identifier: procedure_name function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'param1'" - comma: ',' - expression: numeric_literal: '123' - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/cast_with_whitespaces.sql000066400000000000000000000021601503426445100267400ustar00rootroot00000000000000-- postgres_cast_with_whitespaces.sql /* Several valid queries where there is whitespace surrounding the Postgres cast operator (::) */ -- query from https://github.com/sqlfluff/sqlfluff/issues/2720 SELECT amount_of_honey :: FLOAT FROM bear_inventory; -- should be able to support an arbitrary amount of whitespace SELECT amount_of_honey :: FLOAT FROM bear_inventory; SELECT amount_of_honey:: FLOAT FROM bear_inventory; SELECT amount_of_honey ::FLOAT FROM bear_inventory; -- should support a wide variety of typecasts SELECT amount_of_honey :: time FROM bear_inventory; SELECT amount_of_honey :: text FROM bear_inventory; SELECT amount_of_honey :: VARCHAR( 512 ) FROM bear_inventory; SELECT amount_of_honey :: TIMESTAMPTZ FROM bear_inventory; SELECT amount_of_honey :: TIMESTAMP WITHOUT TIME ZONE FROM bear_inventory; -- should support casts with arbitrary amount of whitespace in join statements SELECT bi.amount_of_honey FROM bear_inventory bi LEFT JOIN favorite_cola fc ON fc.bear_id :: VARCHAR(512) = bi.bear_id ::VARCHAR(512) WHERE fc.favorite_cola = 'RC Cola'; sqlfluff-3.4.2/test/fixtures/dialects/postgres/cast_with_whitespaces.yml000066400000000000000000000201111503426445100267360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9ffe4672dbafb711fcec2fbb99a631e55248b94fef39b807338c7544180ff1bc file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: keyword: FLOAT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: keyword: FLOAT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: keyword: FLOAT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: keyword: FLOAT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: datetime_type_identifier: keyword: time from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: keyword: text from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '512' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: datetime_type_identifier: keyword: TIMESTAMPTZ from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: datetime_type_identifier: - keyword: TIMESTAMP - keyword: WITHOUT - keyword: TIME - keyword: ZONE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: bi - dot: . - naked_identifier: amount_of_honey from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory alias_expression: naked_identifier: bi join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: favorite_cola alias_expression: naked_identifier: fc - join_on_condition: keyword: 'ON' expression: - cast_expression: column_reference: - naked_identifier: fc - dot: . - naked_identifier: bear_id casting_operator: '::' data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '512' end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - cast_expression: column_reference: - naked_identifier: bi - dot: . - naked_identifier: bear_id casting_operator: '::' data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '512' end_bracket: ) where_clause: keyword: WHERE expression: column_reference: - naked_identifier: fc - dot: . - naked_identifier: favorite_cola comparison_operator: raw_comparison_operator: '=' quoted_literal: "'RC Cola'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/cluster.sql000066400000000000000000000003301503426445100240320ustar00rootroot00000000000000CLUSTER employees USING employees_ind; CLUSTER VERBOSE employees USING employees_ind; CLUSTER employees; CLUSTER; CLUSTER VERBOSE; CLUSTER index_name ON table_name; CLUSTER public.temp_table USING idx_temp_table_ra; sqlfluff-3.4.2/test/fixtures/dialects/postgres/cluster.yml000066400000000000000000000032641503426445100240450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0d16521927fc3186064bbef4aa28fc13853179831ee346d6cbc2186d1bb483a7 file: - statement: cluster_statement: - keyword: CLUSTER - table_reference: naked_identifier: employees - keyword: USING - index_reference: naked_identifier: employees_ind - statement_terminator: ; - statement: cluster_statement: - keyword: CLUSTER - keyword: VERBOSE - table_reference: naked_identifier: employees - keyword: USING - index_reference: naked_identifier: employees_ind - statement_terminator: ; - statement: cluster_statement: keyword: CLUSTER table_reference: naked_identifier: employees - statement_terminator: ; - statement: cluster_statement: keyword: CLUSTER - statement_terminator: ; - statement: cluster_statement: - keyword: CLUSTER - keyword: VERBOSE - statement_terminator: ; - statement: cluster_statement: - keyword: CLUSTER - index_reference: naked_identifier: index_name - keyword: 'ON' - table_reference: naked_identifier: table_name - statement_terminator: ; - statement: cluster_statement: - keyword: CLUSTER - table_reference: - naked_identifier: public - dot: . - naked_identifier: temp_table - keyword: USING - index_reference: naked_identifier: idx_temp_table_ra - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/comment_on.sql000066400000000000000000000067201503426445100245200ustar00rootroot00000000000000-- This test file includes all examples from the Postgres docs, -- but not all are implemented so some are commented out for now. -- See https://www.postgresql.org/docs/13/sql-comment.html COMMENT ON TABLE mytable IS 'This is my table.'; COMMENT ON TABLE mytable IS NULL; COMMENT ON ACCESS METHOD gin IS 'GIN index access method'; COMMENT ON AGGREGATE my_aggregate (double precision) IS 'Computes sample variance'; COMMENT ON CAST (text AS int4) IS 'Allow casts from text to int4'; COMMENT ON COLLATION "fr_CA" IS 'Canadian French'; COMMENT ON COLUMN my_table.my_column IS 'Employee ID number'; COMMENT ON CONVERSION my_conv IS 'Conversion to UTF8'; COMMENT ON CONSTRAINT bar_col_cons ON bar IS 'Constrains column col'; COMMENT ON CONSTRAINT dom_col_constr ON DOMAIN dom IS 'Constrains col of domain'; COMMENT ON DATABASE my_database IS 'Development Database'; COMMENT ON DOMAIN my_domain IS 'Email Address Domain'; COMMENT ON EVENT TRIGGER abort_ddl IS 'Aborts all DDL commands'; COMMENT ON EXTENSION hstore IS 'implements the hstore data type'; COMMENT ON FOREIGN DATA WRAPPER mywrapper IS 'my foreign data wrapper'; COMMENT ON FOREIGN TABLE my_foreign_table IS 'Employee Information in other database'; COMMENT ON FUNCTION my_function (timestamp) IS 'Returns Roman Numeral'; comment on function function1 is 'comment'; comment on function function2() is 'comment'; COMMENT ON INDEX my_index IS 'Enforces uniqueness on employee ID'; COMMENT ON LANGUAGE plpython IS 'Python support for stored procedures'; --COMMENT ON LARGE OBJECT 346344 IS 'Planning document'; COMMENT ON MATERIALIZED VIEW my_matview IS 'Summary of order history'; --COMMENT ON OPERATOR ^ (text, text) IS 'Performs intersection of two texts'; --COMMENT ON OPERATOR - (NONE, integer) IS 'Unary minus'; --COMMENT ON OPERATOR CLASS int4ops USING btree IS '4 byte integer operators for btrees'; --COMMENT ON OPERATOR FAMILY integer_ops USING btree IS 'all integer operators for btrees'; COMMENT ON POLICY my_policy ON mytable IS 'Filter rows by users'; COMMENT ON PROCEDURE my_proc (integer, integer) IS 'Runs a report'; comment on procedure procedure1 is 'comment'; comment on procedure procedure2() is 'comment'; COMMENT ON PUBLICATION alltables IS 'Publishes all operations on all tables'; COMMENT ON ROLE my_role IS 'Administration group for finance tables'; COMMENT ON ROUTINE my_routine (integer, integer) IS 'Runs a routine (which is a function or procedure)'; COMMENT ON RULE my_rule ON my_table IS 'Logs updates of employee records'; COMMENT ON SCHEMA my_schema IS 'Departmental data'; COMMENT ON SEQUENCE my_sequence IS 'Used to generate primary keys'; COMMENT ON SERVER myserver IS 'my foreign server'; COMMENT ON STATISTICS my_statistics IS 'Improves planner row estimations'; COMMENT ON SUBSCRIPTION alltables IS 'Subscription for all operations on all tables'; COMMENT ON TABLE my_schema.my_table IS 'Employee Information'; COMMENT ON TABLESPACE my_tablespace IS 'Tablespace for indexes'; COMMENT ON TEXT SEARCH CONFIGURATION my_config IS 'Special word filtering'; COMMENT ON TEXT SEARCH DICTIONARY swedish IS 'Snowball stemmer for Swedish language'; COMMENT ON TEXT SEARCH PARSER my_parser IS 'Splits text into words'; COMMENT ON TEXT SEARCH TEMPLATE snowball IS 'Snowball stemmer'; --COMMENT ON TRANSFORM FOR hstore LANGUAGE plpythonu IS 'Transform between hstore and Python dict'; COMMENT ON TRIGGER my_trigger ON my_table IS 'Used for RI'; COMMENT ON TYPE complex IS 'Complex number data type'; COMMENT ON VIEW my_view IS 'View of departmental costs'; sqlfluff-3.4.2/test/fixtures/dialects/postgres/comment_on.yml000066400000000000000000000316771503426445100245330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 00b5ada20d2830d952d308896106cf7e630812ea7427d6222fe1339d23f7b411 file: - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: TABLE - table_reference: naked_identifier: mytable - keyword: IS - quoted_literal: "'This is my table.'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: TABLE - table_reference: naked_identifier: mytable - keyword: IS - keyword: 'NULL' - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: ACCESS - keyword: METHOD - object_reference: naked_identifier: gin - keyword: IS - quoted_literal: "'GIN index access method'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: AGGREGATE - object_reference: naked_identifier: my_aggregate - bracketed: - start_bracket: ( - word: double - word: precision - end_bracket: ) - keyword: IS - quoted_literal: "'Computes sample variance'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: CAST - bracketed: - start_bracket: ( - object_reference: naked_identifier: text - keyword: AS - object_reference: naked_identifier: int4 - end_bracket: ) - keyword: IS - quoted_literal: "'Allow casts from text to int4'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: COLLATION - object_reference: quoted_identifier: '"fr_CA"' - keyword: IS - quoted_literal: "'Canadian French'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: COLUMN - column_reference: - naked_identifier: my_table - dot: . - naked_identifier: my_column - keyword: IS - quoted_literal: "'Employee ID number'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: CONVERSION - object_reference: naked_identifier: my_conv - keyword: IS - quoted_literal: "'Conversion to UTF8'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: CONSTRAINT - object_reference: naked_identifier: bar_col_cons - keyword: 'ON' - object_reference: naked_identifier: bar - keyword: IS - quoted_literal: "'Constrains column col'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: CONSTRAINT - object_reference: naked_identifier: dom_col_constr - keyword: 'ON' - keyword: DOMAIN - object_reference: naked_identifier: dom - keyword: IS - quoted_literal: "'Constrains col of domain'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: DATABASE - database_reference: naked_identifier: my_database - keyword: IS - quoted_literal: "'Development Database'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: DOMAIN - object_reference: naked_identifier: my_domain - keyword: IS - quoted_literal: "'Email Address Domain'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: EVENT - keyword: TRIGGER - object_reference: naked_identifier: abort_ddl - keyword: IS - quoted_literal: "'Aborts all DDL commands'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: EXTENSION - extension_reference: naked_identifier: hstore - keyword: IS - quoted_literal: "'implements the hstore data type'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: FOREIGN - keyword: DATA - keyword: WRAPPER - object_reference: naked_identifier: mywrapper - keyword: IS - quoted_literal: "'my foreign data wrapper'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: FOREIGN - keyword: TABLE - object_reference: naked_identifier: my_foreign_table - keyword: IS - quoted_literal: "'Employee Information in other database'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: FUNCTION - function_name: function_name_identifier: my_function - function_parameter_list: bracketed: start_bracket: ( data_type: datetime_type_identifier: keyword: timestamp end_bracket: ) - keyword: IS - quoted_literal: "'Returns Roman Numeral'" - statement_terminator: ; - statement: comment_clause: - keyword: comment - keyword: 'on' - keyword: function - function_name: function_name_identifier: function1 - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_clause: - keyword: comment - keyword: 'on' - keyword: function - function_name: function_name_identifier: function2 - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: INDEX - index_reference: naked_identifier: my_index - keyword: IS - quoted_literal: "'Enforces uniqueness on employee ID'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: LANGUAGE - object_reference: naked_identifier: plpython - keyword: IS - quoted_literal: "'Python support for stored procedures'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: MATERIALIZED - keyword: VIEW - object_reference: naked_identifier: my_matview - keyword: IS - quoted_literal: "'Summary of order history'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: POLICY - object_reference: naked_identifier: my_policy - keyword: 'ON' - object_reference: naked_identifier: mytable - keyword: IS - quoted_literal: "'Filter rows by users'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: PROCEDURE - object_reference: naked_identifier: my_proc - bracketed: - start_bracket: ( - word: integer - comma: ',' - word: integer - end_bracket: ) - keyword: IS - quoted_literal: "'Runs a report'" - statement_terminator: ; - statement: comment_clause: - keyword: comment - keyword: 'on' - keyword: procedure - object_reference: naked_identifier: procedure1 - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_clause: - keyword: comment - keyword: 'on' - keyword: procedure - object_reference: naked_identifier: procedure2 - bracketed: start_bracket: ( end_bracket: ) - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: PUBLICATION - object_reference: naked_identifier: alltables - keyword: IS - quoted_literal: "'Publishes all operations on all tables'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: ROLE - object_reference: naked_identifier: my_role - keyword: IS - quoted_literal: "'Administration group for finance tables'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: ROUTINE - object_reference: naked_identifier: my_routine - bracketed: - start_bracket: ( - word: integer - comma: ',' - word: integer - end_bracket: ) - keyword: IS - quoted_literal: "'Runs a routine (which is a function or procedure)'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: RULE - object_reference: naked_identifier: my_rule - keyword: 'ON' - object_reference: naked_identifier: my_table - keyword: IS - quoted_literal: "'Logs updates of employee records'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: SCHEMA - schema_reference: naked_identifier: my_schema - keyword: IS - quoted_literal: "'Departmental data'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: SEQUENCE - object_reference: naked_identifier: my_sequence - keyword: IS - quoted_literal: "'Used to generate primary keys'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: SERVER - object_reference: naked_identifier: myserver - keyword: IS - quoted_literal: "'my foreign server'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: STATISTICS - object_reference: naked_identifier: my_statistics - keyword: IS - quoted_literal: "'Improves planner row estimations'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: SUBSCRIPTION - object_reference: naked_identifier: alltables - keyword: IS - quoted_literal: "'Subscription for all operations on all tables'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: TABLE - table_reference: - naked_identifier: my_schema - dot: . - naked_identifier: my_table - keyword: IS - quoted_literal: "'Employee Information'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: TABLESPACE - object_reference: naked_identifier: my_tablespace - keyword: IS - quoted_literal: "'Tablespace for indexes'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: TEXT - keyword: SEARCH - keyword: CONFIGURATION - object_reference: naked_identifier: my_config - keyword: IS - quoted_literal: "'Special word filtering'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: TEXT - keyword: SEARCH - keyword: DICTIONARY - object_reference: naked_identifier: swedish - keyword: IS - quoted_literal: "'Snowball stemmer for Swedish language'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: TEXT - keyword: SEARCH - keyword: PARSER - object_reference: naked_identifier: my_parser - keyword: IS - quoted_literal: "'Splits text into words'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: TEXT - keyword: SEARCH - keyword: TEMPLATE - object_reference: naked_identifier: snowball - keyword: IS - quoted_literal: "'Snowball stemmer'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: TRIGGER - object_reference: naked_identifier: my_trigger - keyword: 'ON' - object_reference: naked_identifier: my_table - keyword: IS - quoted_literal: "'Used for RI'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: TYPE - object_reference: naked_identifier: complex - keyword: IS - quoted_literal: "'Complex number data type'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: VIEW - table_reference: naked_identifier: my_view - keyword: IS - quoted_literal: "'View of departmental costs'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/composite_types.sql000066400000000000000000000001711503426445100256020ustar00rootroot00000000000000CREATE TYPE my_type AS ( int_ INT4, bool_ BOOLEAN, comment_ TEXT ); SELECT ((1, true, null)::my_type).int_; sqlfluff-3.4.2/test/fixtures/dialects/postgres/composite_types.yml000066400000000000000000000031131503426445100256030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2953cebbe0daec9dac94aafed93a8feae0f0c153bf399f8956e7fa5cc36fca69 file: - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - object_reference: naked_identifier: my_type - keyword: AS - bracketed: - start_bracket: ( - word: int_ - word: INT4 - comma: ',' - word: bool_ - word: BOOLEAN - comma: ',' - word: comment_ - word: TEXT - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bracketed: start_bracket: ( expression: cast_expression: bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - boolean_literal: 'true' - comma: ',' - null_literal: 'null' - end_bracket: ) casting_operator: '::' data_type: data_type_identifier: my_type end_bracket: ) semi_structured_expression: dot: . naked_identifier: int_ - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/copy.sql000066400000000000000000000106451503426445100233350ustar00rootroot00000000000000-- Issue #2480 COPY (Select my_col From my_table) TO '/tmp/dump.csv' WITH (FORMAT csv, HEADER, DELIMITER '#', ENCODING 'UTF8'); COPY (Select * From my_table) TO '/tmp/dump.csv' WITH (FORMAT csv, HEADER, DELIMITER '#', NULL 'null', QUOTE '"'); COPY (Select * From my_table) TO '/tmp/dump.csv' WITH (FORMAT csv, ESCAPE '\', FREEZE true); COPY (Select * From my_table) TO '/tmp/dump.csv' WITH (FORMAT csv, ESCAPE '\', FORCE_QUOTE (col1, col2)); COPY (Select * From my_table) TO '/tmp/dump.csv' WITH (FORMAT csv, ESCAPE '\', FORCE_QUOTE *); COPY (Select * From my_table) TO '/tmp/dump.csv' WITH (FORMAT csv, ESCAPE '\', FORCE_NOT_NULL (col1, col2)); COPY (Select * From my_table) TO '/tmp/dump.csv' WITH (FORMAT csv, ESCAPE '\', FORCE_NULL (col1, col2), FREEZE false); COPY (Select * From my_table) TO STDOUT WITH (FORMAT csv, ESCAPE '\', FORCE_NULL (col1, col2), FREEZE true); COPY (Select * From my_table) TO PROGRAM '/path/to/script' WITH (FORMAT csv, ESCAPE '\', FORCE_NULL (col1, col2), FREEZE false); COPY my_table(col) TO '/tmp/dump.csv'; COPY my_table TO '/tmp/dump.csv' WITH (FORMAT csv, HEADER true, FREEZE true, FORCE_NULL (col1, col2)); COPY my_table(col1, col2) TO '/tmp/dump.csv' WITH (FORMAT csv, HEADER true); COPY my_table(col1, col2, col3, col4) TO PROGRAM '/path/to/script' WITH (FORMAT csv, HEADER true, FREEZE); COPY my_table(col1, col2) TO STDOUT; COPY my_table(col1, col2) TO STDOUT WITH (FORMAT csv, HEADER true, FREEZE false); COPY my_table TO STDOUT WITH (FORMAT csv, HEADER true, FREEZE true, FORCE_NULL (col1, col2)); COPY my_table FROM '/tmp/dump.csv'; COPY my_table FROM STDIN; COPY my_table FROM PROGRAM '/path/to/script'; COPY my_table(col) FROM '/tmp/dump.csv'; COPY my_table(col1, col2, col3) FROM STDIN; COPY my_table(col1, col2) FROM PROGRAM '/path/to/script'; COPY my_table(col1, col2,col3, col4) FROM PROGRAM '/path/to/script' WITH (FORMAT csv, HEADER true, FREEZE true, FORCE_NULL (col1, col2)); COPY my_table(col1, col2,col3, col4) FROM '/tmp/dump.csv' WITH (FORMAT csv, ESCAPE '\', FORCE_QUOTE *); COPY my_table FROM STDIN WITH (FORMAT csv, HEADER, DELIMITER '#', ENCODING 'UTF8'); COPY my_table FROM STDIN WITH (FORMAT csv, ESCAPE '\', FORCE_NULL (col1, col2), FREEZE true); COPY my_table FROM STDIN WITH (FORMAT csv, HEADER, DELIMITER '#', NULL 'null', QUOTE '"', FORCE_QUOTE *); COPY my_table FROM STDIN WITH (FORMAT csv, HEADER, DELIMITER '#', NULL 'null', QUOTE '"', FORCE_QUOTE *) WHERE col1 = 'some_value'; COPY copy_statement_bug FROM stdin WITH csv header; COPY my_table FROM STDIN WITH; COPY my_table FROM STDIN WITH BINARY; COPY my_table FROM STDIN WITH DELIMITER '#'; COPY my_table FROM STDIN WITH DELIMITER AS '#'; COPY my_table FROM STDIN WITH NULL 'null'; COPY my_table FROM STDIN WITH NULL AS 'null'; COPY my_table FROM STDIN WITH CSV; COPY my_table FROM STDIN WITH CSV QUOTE '"'; COPY my_table FROM STDIN WITH CSV QUOTE AS '"'; COPY my_table FROM STDIN WITH CSV ESCAPE '\'; COPY my_table FROM STDIN WITH CSV ESCAPE AS '\'; COPY my_table FROM STDIN WITH CSV FORCE NOT NULL col1; COPY my_table FROM STDIN WITH CSV FORCE NOT NULL col1, col2; COPY my_table FROM '/tmp/dump.csv' WITH BINARY; COPY my_table FROM '/tmp/dump.csv' WITH DELIMITER '#'; COPY my_table FROM '/tmp/dump.csv' WITH DELIMITER AS '#'; COPY my_table FROM '/tmp/dump.csv' WITH NULL 'null'; COPY my_table FROM '/tmp/dump.csv' WITH NULL AS 'null'; COPY my_table FROM '/tmp/dump.csv' WITH CSV; COPY my_table FROM '/tmp/dump.csv' WITH CSV QUOTE '"'; COPY my_table FROM '/tmp/dump.csv' WITH CSV QUOTE AS '"'; COPY my_table FROM '/tmp/dump.csv' WITH CSV ESCAPE '\'; COPY my_table FROM '/tmp/dump.csv' WITH CSV ESCAPE AS '\'; COPY (SELECT * FROM country WHERE country_name LIKE 'A%') TO '/usr1/proj/bray/sql/a_list_countries.copy'; COPY my_table(col1, col2) TO STDOUT; COPY my_table(col2) TO STDOUT; COPY my_table(col1, col2) TO STDOUT WITH; COPY my_table(col1, col2) TO STDOUT WITH BINARY; COPY my_table(col1, col2) TO STDOUT WITH DELIMITER '#'; COPY my_table(col1, col2) TO STDOUT WITH DELIMITER AS '#'; COPY my_table TO STDOUT WITH NULL 'null'; COPY my_table TO STDOUT WITH NULL AS 'null'; COPY my_table(col1) TO STDOUT WITH CSV; COPY my_table TO STDOUT WITH CSV HEADER; COPY my_table TO STDOUT WITH CSV QUOTE '"'; COPY my_table TO STDOUT WITH CSV QUOTE AS '"'; COPY my_table TO STDOUT WITH CSV ESCAPE '\'; COPY my_table(col1, col2) TO STDOUT WITH CSV ESCAPE AS '\'; COPY my_table(col1, col2) TO STDOUT WITH CSV FORCE QUOTE *; COPY my_table(col1, col2) TO STDOUT WITH CSV FORCE QUOTE (col1, col2); sqlfluff-3.4.2/test/fixtures/dialects/postgres/copy.yml000066400000000000000000001030641503426445100233350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4b115542ab2c94e730d6366b2cd172e0e8f7802ebe34ee63239b86b3346e1e96 file: - statement: copy_statement: - keyword: COPY - bracketed: start_bracket: ( select_statement: select_clause: keyword: Select select_clause_element: column_reference: naked_identifier: my_col from_clause: keyword: From from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table end_bracket: ) - keyword: TO - quoted_literal: "'/tmp/dump.csv'" - keyword: WITH - bracketed: - start_bracket: ( - keyword: FORMAT - naked_identifier: csv - comma: ',' - keyword: HEADER - comma: ',' - keyword: DELIMITER - quoted_literal: "'#'" - comma: ',' - keyword: ENCODING - quoted_literal: "'UTF8'" - end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - bracketed: start_bracket: ( select_statement: select_clause: keyword: Select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: From from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table end_bracket: ) - keyword: TO - quoted_literal: "'/tmp/dump.csv'" - keyword: WITH - bracketed: - start_bracket: ( - keyword: FORMAT - naked_identifier: csv - comma: ',' - keyword: HEADER - comma: ',' - keyword: DELIMITER - quoted_literal: "'#'" - comma: ',' - keyword: 'NULL' - quoted_literal: "'null'" - comma: ',' - keyword: QUOTE - quoted_literal: "'\"'" - end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - bracketed: start_bracket: ( select_statement: select_clause: keyword: Select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: From from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table end_bracket: ) - keyword: TO - quoted_literal: "'/tmp/dump.csv'" - keyword: WITH - bracketed: - start_bracket: ( - keyword: FORMAT - naked_identifier: csv - comma: ',' - keyword: ESCAPE - quoted_literal: "'\\'" - comma: ',' - keyword: FREEZE - boolean_literal: 'true' - end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - bracketed: start_bracket: ( select_statement: select_clause: keyword: Select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: From from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table end_bracket: ) - keyword: TO - quoted_literal: "'/tmp/dump.csv'" - keyword: WITH - bracketed: - start_bracket: ( - keyword: FORMAT - naked_identifier: csv - comma: ',' - keyword: ESCAPE - quoted_literal: "'\\'" - comma: ',' - keyword: FORCE_QUOTE - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - bracketed: start_bracket: ( select_statement: select_clause: keyword: Select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: From from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table end_bracket: ) - keyword: TO - quoted_literal: "'/tmp/dump.csv'" - keyword: WITH - bracketed: - start_bracket: ( - keyword: FORMAT - naked_identifier: csv - comma: ',' - keyword: ESCAPE - quoted_literal: "'\\'" - comma: ',' - keyword: FORCE_QUOTE - star: '*' - end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - bracketed: start_bracket: ( select_statement: select_clause: keyword: Select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: From from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table end_bracket: ) - keyword: TO - quoted_literal: "'/tmp/dump.csv'" - keyword: WITH - bracketed: - start_bracket: ( - keyword: FORMAT - naked_identifier: csv - comma: ',' - keyword: ESCAPE - quoted_literal: "'\\'" - comma: ',' - keyword: FORCE_NOT_NULL - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - bracketed: start_bracket: ( select_statement: select_clause: keyword: Select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: From from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table end_bracket: ) - keyword: TO - quoted_literal: "'/tmp/dump.csv'" - keyword: WITH - bracketed: - start_bracket: ( - keyword: FORMAT - naked_identifier: csv - comma: ',' - keyword: ESCAPE - quoted_literal: "'\\'" - comma: ',' - keyword: FORCE_NULL - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - comma: ',' - keyword: FREEZE - boolean_literal: 'false' - end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - bracketed: start_bracket: ( select_statement: select_clause: keyword: Select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: From from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table end_bracket: ) - keyword: TO - keyword: STDOUT - keyword: WITH - bracketed: - start_bracket: ( - keyword: FORMAT - naked_identifier: csv - comma: ',' - keyword: ESCAPE - quoted_literal: "'\\'" - comma: ',' - keyword: FORCE_NULL - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - comma: ',' - keyword: FREEZE - boolean_literal: 'true' - end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - bracketed: start_bracket: ( select_statement: select_clause: keyword: Select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: From from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table end_bracket: ) - keyword: TO - keyword: PROGRAM - quoted_literal: "'/path/to/script'" - keyword: WITH - bracketed: - start_bracket: ( - keyword: FORMAT - naked_identifier: csv - comma: ',' - keyword: ESCAPE - quoted_literal: "'\\'" - comma: ',' - keyword: FORCE_NULL - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - comma: ',' - keyword: FREEZE - boolean_literal: 'false' - end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) - keyword: TO - quoted_literal: "'/tmp/dump.csv'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: TO - quoted_literal: "'/tmp/dump.csv'" - keyword: WITH - bracketed: - start_bracket: ( - keyword: FORMAT - naked_identifier: csv - comma: ',' - keyword: HEADER - boolean_literal: 'true' - comma: ',' - keyword: FREEZE - boolean_literal: 'true' - comma: ',' - keyword: FORCE_NULL - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: TO - quoted_literal: "'/tmp/dump.csv'" - keyword: WITH - bracketed: - start_bracket: ( - keyword: FORMAT - naked_identifier: csv - comma: ',' - keyword: HEADER - boolean_literal: 'true' - end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - comma: ',' - column_reference: naked_identifier: col3 - comma: ',' - column_reference: naked_identifier: col4 - end_bracket: ) - keyword: TO - keyword: PROGRAM - quoted_literal: "'/path/to/script'" - keyword: WITH - bracketed: - start_bracket: ( - keyword: FORMAT - naked_identifier: csv - comma: ',' - keyword: HEADER - boolean_literal: 'true' - comma: ',' - keyword: FREEZE - end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: TO - keyword: STDOUT - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: TO - keyword: STDOUT - keyword: WITH - bracketed: - start_bracket: ( - keyword: FORMAT - naked_identifier: csv - comma: ',' - keyword: HEADER - boolean_literal: 'true' - comma: ',' - keyword: FREEZE - boolean_literal: 'false' - end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: TO - keyword: STDOUT - keyword: WITH - bracketed: - start_bracket: ( - keyword: FORMAT - naked_identifier: csv - comma: ',' - keyword: HEADER - boolean_literal: 'true' - comma: ',' - keyword: FREEZE - boolean_literal: 'true' - comma: ',' - keyword: FORCE_NULL - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: FROM - quoted_literal: "'/tmp/dump.csv'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: FROM - keyword: STDIN - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: FROM - keyword: PROGRAM - quoted_literal: "'/path/to/script'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) - keyword: FROM - quoted_literal: "'/tmp/dump.csv'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - comma: ',' - column_reference: naked_identifier: col3 - end_bracket: ) - keyword: FROM - keyword: STDIN - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: FROM - keyword: PROGRAM - quoted_literal: "'/path/to/script'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - comma: ',' - column_reference: naked_identifier: col3 - comma: ',' - column_reference: naked_identifier: col4 - end_bracket: ) - keyword: FROM - keyword: PROGRAM - quoted_literal: "'/path/to/script'" - keyword: WITH - bracketed: - start_bracket: ( - keyword: FORMAT - naked_identifier: csv - comma: ',' - keyword: HEADER - boolean_literal: 'true' - comma: ',' - keyword: FREEZE - boolean_literal: 'true' - comma: ',' - keyword: FORCE_NULL - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - comma: ',' - column_reference: naked_identifier: col3 - comma: ',' - column_reference: naked_identifier: col4 - end_bracket: ) - keyword: FROM - quoted_literal: "'/tmp/dump.csv'" - keyword: WITH - bracketed: - start_bracket: ( - keyword: FORMAT - naked_identifier: csv - comma: ',' - keyword: ESCAPE - quoted_literal: "'\\'" - comma: ',' - keyword: FORCE_QUOTE - star: '*' - end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: FROM - keyword: STDIN - keyword: WITH - bracketed: - start_bracket: ( - keyword: FORMAT - naked_identifier: csv - comma: ',' - keyword: HEADER - comma: ',' - keyword: DELIMITER - quoted_literal: "'#'" - comma: ',' - keyword: ENCODING - quoted_literal: "'UTF8'" - end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: FROM - keyword: STDIN - keyword: WITH - bracketed: - start_bracket: ( - keyword: FORMAT - naked_identifier: csv - comma: ',' - keyword: ESCAPE - quoted_literal: "'\\'" - comma: ',' - keyword: FORCE_NULL - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - comma: ',' - keyword: FREEZE - boolean_literal: 'true' - end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: FROM - keyword: STDIN - keyword: WITH - bracketed: - start_bracket: ( - keyword: FORMAT - naked_identifier: csv - comma: ',' - keyword: HEADER - comma: ',' - keyword: DELIMITER - quoted_literal: "'#'" - comma: ',' - keyword: 'NULL' - quoted_literal: "'null'" - comma: ',' - keyword: QUOTE - quoted_literal: "'\"'" - comma: ',' - keyword: FORCE_QUOTE - star: '*' - end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: FROM - keyword: STDIN - keyword: WITH - bracketed: - start_bracket: ( - keyword: FORMAT - naked_identifier: csv - comma: ',' - keyword: HEADER - comma: ',' - keyword: DELIMITER - quoted_literal: "'#'" - comma: ',' - keyword: 'NULL' - quoted_literal: "'null'" - comma: ',' - keyword: QUOTE - quoted_literal: "'\"'" - comma: ',' - keyword: FORCE_QUOTE - star: '*' - end_bracket: ) - keyword: WHERE - expression: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'some_value'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: copy_statement_bug - keyword: FROM - keyword: stdin - keyword: WITH - keyword: csv - keyword: header - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: FROM - keyword: STDIN - keyword: WITH - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: FROM - keyword: STDIN - keyword: WITH - keyword: BINARY - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: FROM - keyword: STDIN - keyword: WITH - keyword: DELIMITER - quoted_literal: "'#'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: FROM - keyword: STDIN - keyword: WITH - keyword: DELIMITER - keyword: AS - quoted_literal: "'#'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: FROM - keyword: STDIN - keyword: WITH - keyword: 'NULL' - quoted_literal: "'null'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: FROM - keyword: STDIN - keyword: WITH - keyword: 'NULL' - keyword: AS - quoted_literal: "'null'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: FROM - keyword: STDIN - keyword: WITH - keyword: CSV - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: FROM - keyword: STDIN - keyword: WITH - keyword: CSV - keyword: QUOTE - quoted_literal: "'\"'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: FROM - keyword: STDIN - keyword: WITH - keyword: CSV - keyword: QUOTE - keyword: AS - quoted_literal: "'\"'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: FROM - keyword: STDIN - keyword: WITH - keyword: CSV - keyword: ESCAPE - quoted_literal: "'\\'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: FROM - keyword: STDIN - keyword: WITH - keyword: CSV - keyword: ESCAPE - keyword: AS - quoted_literal: "'\\'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: FROM - keyword: STDIN - keyword: WITH - keyword: CSV - keyword: FORCE - keyword: NOT - keyword: 'NULL' - column_reference: naked_identifier: col1 - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: FROM - keyword: STDIN - keyword: WITH - keyword: CSV - keyword: FORCE - keyword: NOT - keyword: 'NULL' - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: FROM - quoted_literal: "'/tmp/dump.csv'" - keyword: WITH - keyword: BINARY - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: FROM - quoted_literal: "'/tmp/dump.csv'" - keyword: WITH - keyword: DELIMITER - quoted_literal: "'#'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: FROM - quoted_literal: "'/tmp/dump.csv'" - keyword: WITH - keyword: DELIMITER - keyword: AS - quoted_literal: "'#'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: FROM - quoted_literal: "'/tmp/dump.csv'" - keyword: WITH - keyword: 'NULL' - quoted_literal: "'null'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: FROM - quoted_literal: "'/tmp/dump.csv'" - keyword: WITH - keyword: 'NULL' - keyword: AS - quoted_literal: "'null'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: FROM - quoted_literal: "'/tmp/dump.csv'" - keyword: WITH - keyword: CSV - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: FROM - quoted_literal: "'/tmp/dump.csv'" - keyword: WITH - keyword: CSV - keyword: QUOTE - quoted_literal: "'\"'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: FROM - quoted_literal: "'/tmp/dump.csv'" - keyword: WITH - keyword: CSV - keyword: QUOTE - keyword: AS - quoted_literal: "'\"'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: FROM - quoted_literal: "'/tmp/dump.csv'" - keyword: WITH - keyword: CSV - keyword: ESCAPE - quoted_literal: "'\\'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: FROM - quoted_literal: "'/tmp/dump.csv'" - keyword: WITH - keyword: CSV - keyword: ESCAPE - keyword: AS - quoted_literal: "'\\'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: country where_clause: keyword: WHERE expression: column_reference: naked_identifier: country_name keyword: LIKE quoted_literal: "'A%'" end_bracket: ) - keyword: TO - quoted_literal: "'/usr1/proj/bray/sql/a_list_countries.copy'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: TO - keyword: STDOUT - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - bracketed: start_bracket: ( column_reference: naked_identifier: col2 end_bracket: ) - keyword: TO - keyword: STDOUT - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: TO - keyword: STDOUT - keyword: WITH - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: TO - keyword: STDOUT - keyword: WITH - keyword: BINARY - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: TO - keyword: STDOUT - keyword: WITH - keyword: DELIMITER - quoted_literal: "'#'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: TO - keyword: STDOUT - keyword: WITH - keyword: DELIMITER - keyword: AS - quoted_literal: "'#'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: TO - keyword: STDOUT - keyword: WITH - keyword: 'NULL' - quoted_literal: "'null'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: TO - keyword: STDOUT - keyword: WITH - keyword: 'NULL' - keyword: AS - quoted_literal: "'null'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: TO - keyword: STDOUT - keyword: WITH - keyword: CSV - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: TO - keyword: STDOUT - keyword: WITH - keyword: CSV - keyword: HEADER - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: TO - keyword: STDOUT - keyword: WITH - keyword: CSV - keyword: QUOTE - quoted_literal: "'\"'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: TO - keyword: STDOUT - keyword: WITH - keyword: CSV - keyword: QUOTE - keyword: AS - quoted_literal: "'\"'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: TO - keyword: STDOUT - keyword: WITH - keyword: CSV - keyword: ESCAPE - quoted_literal: "'\\'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: TO - keyword: STDOUT - keyword: WITH - keyword: CSV - keyword: ESCAPE - keyword: AS - quoted_literal: "'\\'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: TO - keyword: STDOUT - keyword: WITH - keyword: CSV - keyword: FORCE - keyword: QUOTE - star: '*' - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: TO - keyword: STDOUT - keyword: WITH - keyword: CSV - keyword: FORCE - keyword: QUOTE - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_aggregate.sql000066400000000000000000000002241503426445100256240ustar00rootroot00000000000000CREATE AGGREGATE agg_twocols(numeric, numeric) ( SFUNC = mysfunc_accum, STYPE = numeric, COMBINEFUNC = mycombine_accum, INITCOND = 0 ); sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_aggregate.yml000066400000000000000000000033311503426445100256300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 98271f8e2a97d5965c25e12d880709e5d652e39c2056e0fe56a2d36b6f64988e file: statement: create_aggregate_statement: - keyword: CREATE - keyword: AGGREGATE - object_reference: naked_identifier: agg_twocols - bracketed: - start_bracket: ( - word: numeric - comma: ',' - word: numeric - end_bracket: ) - function_parameter_list: bracketed: - start_bracket: ( - data_type: data_type_identifier: SFUNC - comparison_operator: raw_comparison_operator: '=' - expression: column_reference: naked_identifier: mysfunc_accum - comma: ',' - data_type: data_type_identifier: STYPE - comparison_operator: raw_comparison_operator: '=' - expression: column_reference: naked_identifier: numeric - comma: ',' - data_type: data_type_identifier: COMBINEFUNC - comparison_operator: raw_comparison_operator: '=' - expression: column_reference: naked_identifier: mycombine_accum - comma: ',' - data_type: data_type_identifier: INITCOND - comparison_operator: raw_comparison_operator: '=' - expression: numeric_literal: '0' - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_cast.sql000066400000000000000000000017161503426445100246370ustar00rootroot00000000000000CREATE CAST (int AS bool) WITH FUNCTION fname; CREATE CAST (int AS bool) WITH FUNCTION fname AS ASSIGNMENT; CREATE CAST (int AS bool) WITH FUNCTION fname AS IMPLICIT; CREATE CAST (int AS bool) WITH FUNCTION fname(); CREATE CAST (int AS bool) WITH FUNCTION fname() AS ASSIGNMENT; CREATE CAST (int AS bool) WITH FUNCTION fname() AS IMPLICIT; CREATE CAST (int AS bool) WITH FUNCTION fname(bool); CREATE CAST (int AS bool) WITH FUNCTION sch.fname(int, bool) AS ASSIGNMENT; CREATE CAST (udt_1 AS udt_2) WITH FUNCTION fname(udt_1, udt_2); CREATE CAST (sch.udt_1 AS sch.udt_2) WITH FUNCTION sch.fname(sch.udt_1, sch.udt_2); -- PG extension for not listing an actual function: CREATE CAST (int AS bool) WITHOUT FUNCTION; CREATE CAST (int AS bool) WITHOUT FUNCTION AS ASSIGNMENT; CREATE CAST (int AS bool) WITHOUT FUNCTION AS IMPLICIT; CREATE CAST (int AS bool) WITH INOUT; CREATE CAST (int AS bool) WITH INOUT AS ASSIGNMENT; CREATE CAST (int AS bool) WITH INOUT AS IMPLICIT; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_cast.yml000066400000000000000000000175711503426445100246470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9e34d987801d78938da48d706596e4179445070e27a9b8e4a6146a28543d4c37 file: - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: keyword: int - keyword: AS - data_type: keyword: bool - end_bracket: ) - keyword: WITH - keyword: FUNCTION - function_name: function_name_identifier: fname - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: keyword: int - keyword: AS - data_type: keyword: bool - end_bracket: ) - keyword: WITH - keyword: FUNCTION - function_name: function_name_identifier: fname - keyword: AS - keyword: ASSIGNMENT - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: keyword: int - keyword: AS - data_type: keyword: bool - end_bracket: ) - keyword: WITH - keyword: FUNCTION - function_name: function_name_identifier: fname - keyword: AS - keyword: IMPLICIT - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: keyword: int - keyword: AS - data_type: keyword: bool - end_bracket: ) - keyword: WITH - keyword: FUNCTION - function_name: function_name_identifier: fname - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: keyword: int - keyword: AS - data_type: keyword: bool - end_bracket: ) - keyword: WITH - keyword: FUNCTION - function_name: function_name_identifier: fname - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: AS - keyword: ASSIGNMENT - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: keyword: int - keyword: AS - data_type: keyword: bool - end_bracket: ) - keyword: WITH - keyword: FUNCTION - function_name: function_name_identifier: fname - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: AS - keyword: IMPLICIT - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: keyword: int - keyword: AS - data_type: keyword: bool - end_bracket: ) - keyword: WITH - keyword: FUNCTION - function_name: function_name_identifier: fname - function_parameter_list: bracketed: start_bracket: ( data_type: keyword: bool end_bracket: ) - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: keyword: int - keyword: AS - data_type: keyword: bool - end_bracket: ) - keyword: WITH - keyword: FUNCTION - function_name: naked_identifier: sch dot: . function_name_identifier: fname - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: int - comma: ',' - data_type: keyword: bool - end_bracket: ) - keyword: AS - keyword: ASSIGNMENT - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: data_type_identifier: udt_1 - keyword: AS - data_type: data_type_identifier: udt_2 - end_bracket: ) - keyword: WITH - keyword: FUNCTION - function_name: function_name_identifier: fname - function_parameter_list: bracketed: - start_bracket: ( - data_type: data_type_identifier: udt_1 - comma: ',' - data_type: data_type_identifier: udt_2 - end_bracket: ) - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: naked_identifier: sch dot: . data_type_identifier: udt_1 - keyword: AS - data_type: naked_identifier: sch dot: . data_type_identifier: udt_2 - end_bracket: ) - keyword: WITH - keyword: FUNCTION - function_name: naked_identifier: sch dot: . function_name_identifier: fname - function_parameter_list: bracketed: - start_bracket: ( - data_type: naked_identifier: sch dot: . data_type_identifier: udt_1 - comma: ',' - data_type: naked_identifier: sch dot: . data_type_identifier: udt_2 - end_bracket: ) - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: keyword: int - keyword: AS - data_type: keyword: bool - end_bracket: ) - keyword: WITHOUT - keyword: FUNCTION - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: keyword: int - keyword: AS - data_type: keyword: bool - end_bracket: ) - keyword: WITHOUT - keyword: FUNCTION - keyword: AS - keyword: ASSIGNMENT - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: keyword: int - keyword: AS - data_type: keyword: bool - end_bracket: ) - keyword: WITHOUT - keyword: FUNCTION - keyword: AS - keyword: IMPLICIT - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: keyword: int - keyword: AS - data_type: keyword: bool - end_bracket: ) - keyword: WITH - keyword: INOUT - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: keyword: int - keyword: AS - data_type: keyword: bool - end_bracket: ) - keyword: WITH - keyword: INOUT - keyword: AS - keyword: ASSIGNMENT - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: keyword: int - keyword: AS - data_type: keyword: bool - end_bracket: ) - keyword: WITH - keyword: INOUT - keyword: AS - keyword: IMPLICIT - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_collation.sql000066400000000000000000000003631503426445100256660ustar00rootroot00000000000000CREATE COLLATION numeric (provider = icu, locale = 'en@colNumeric=yes'); CREATE COLLATION french (locale = 'fr_FR.utf8'); CREATE COLLATION german_phonebook (provider = icu, locale = 'de-u-co-phonebk'); CREATE COLLATION german FROM "de_DE"; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_collation.yml000066400000000000000000000040061503426445100256660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6fba52e4dad257e2487a5070e2e0a6e61fc1a9c6f15f0d7e7787087fa0d28006 file: - statement: create_collation_statement: - keyword: CREATE - keyword: COLLATION - object_reference: naked_identifier: numeric - bracketed: - start_bracket: ( - keyword: provider - comparison_operator: raw_comparison_operator: '=' - keyword: icu - comma: ',' - keyword: locale - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'en@colNumeric=yes'" - end_bracket: ) - statement_terminator: ; - statement: create_collation_statement: - keyword: CREATE - keyword: COLLATION - object_reference: naked_identifier: french - bracketed: start_bracket: ( keyword: locale comparison_operator: raw_comparison_operator: '=' quoted_literal: "'fr_FR.utf8'" end_bracket: ) - statement_terminator: ; - statement: create_collation_statement: - keyword: CREATE - keyword: COLLATION - object_reference: naked_identifier: german_phonebook - bracketed: - start_bracket: ( - keyword: provider - comparison_operator: raw_comparison_operator: '=' - keyword: icu - comma: ',' - keyword: locale - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'de-u-co-phonebk'" - end_bracket: ) - statement_terminator: ; - statement: create_collation_statement: - keyword: CREATE - keyword: COLLATION - object_reference: naked_identifier: german - keyword: FROM - object_reference: quoted_identifier: '"de_DE"' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_database.sql000066400000000000000000000042621503426445100254500ustar00rootroot00000000000000CREATE DATABASE db; CREATE DATABASE db OWNER user_name; CREATE DATABASE db OWNER = user_name; CREATE DATABASE db WITH OWNER = user_name; CREATE DATABASE db ENCODING = 'UTF8'; CREATE DATABASE db TEMPLATE = template_name; CREATE DATABASE db WITH TEMPLATE = template_name; CREATE DATABASE db ENCODING 'UTF8'; CREATE DATABASE db WITH ENCODING = 'UTF8'; CREATE DATABASE db LOCALE 'en_US.UTF-8'; CREATE DATABASE db LOCALE = 'en_US.UTF-8'; CREATE DATABASE db WITH LOCALE = 'en_US.UTF-8'; CREATE DATABASE db LC_COLLATE 'en_US.UTF-8'; CREATE DATABASE db LC_CTYPE 'en_US.UTF-8'; CREATE DATABASE db LC_COLLATE 'en_US.UTF-8' LC_CTYPE 'en_US.UTF-8'; CREATE DATABASE db WITH LC_COLLATE 'en_US.UTF-8' LC_CTYPE 'en_US.UTF-8'; CREATE DATABASE db WITH LC_CTYPE 'en_US.UTF-8' LC_COLLATE 'en_US.UTF-8' ; CREATE DATABASE db LC_COLLATE = 'en_US.UTF-8' LC_CTYPE = 'en_US.UTF-8'; CREATE DATABASE db WITH LC_COLLATE = 'en_US.UTF-8' LC_CTYPE = 'en_US.UTF-8'; CREATE DATABASE db WITH LC_CTYPE = 'en_US.UTF-8' LC_COLLATE = 'en_US.UTF-8'; CREATE DATABASE db TABLESPACE DEFAULT; CREATE DATABASE db TABLESPACE = DEFAULT; CREATE DATABASE db TABLESPACE new_tablespace; CREATE DATABASE db TABLESPACE = new_tablespace; CREATE DATABASE db WITH TABLESPACE = new_tablespace; CREATE DATABASE db ALLOW_CONNECTIONS true; CREATE DATABASE db ALLOW_CONNECTIONS = true; CREATE DATABASE db CONNECTION LIMIT 10; CREATE DATABASE db CONNECTION LIMIT = 10; CREATE DATABASE db IS_TEMPLATE true; CREATE DATABASE db IS_TEMPLATE = true; CREATE DATABASE db IS_TEMPLATE = true CONNECTION LIMIT 10 ALLOW_CONNECTIONS = true; CREATE DATABASE db WITH IS_TEMPLATE = true CONNECTION LIMIT 10 ALLOW_CONNECTIONS = true; CREATE DATABASE db WITH IS_TEMPLATE = true CONNECTION LIMIT 10 ALLOW_CONNECTIONS = true TEMPLATE = template_name ENCODING = 'UTF8' LOCALE 'en_US.UTF-8' OWNER user_name; CREATE DATABASE db IS_TEMPLATE = true CONNECTION LIMIT 10 ALLOW_CONNECTIONS = true TEMPLATE = template_name ENCODING = 'UTF8' LOCALE = 'en_US.UTF-8' OWNER user_name; CREATE DATABASE db IS_TEMPLATE = true CONNECTION LIMIT 10 ALLOW_CONNECTIONS = true TEMPLATE = template_name ENCODING = 'UTF8' LC_COLLATE 'en_US.UTF-8' LC_CTYPE 'en_US.UTF-8' OWNER user_name; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_database.yml000066400000000000000000000322121503426445100254460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: aefcaa60a58bff7c7fbe9aca764513bd1eb940f4e5ee0422f80dde8654641977 file: - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: OWNER - object_reference: naked_identifier: user_name - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: OWNER - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: user_name - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: WITH - keyword: OWNER - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: user_name - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: ENCODING - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'UTF8'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: TEMPLATE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: template_name - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: WITH - keyword: TEMPLATE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: template_name - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: ENCODING - quoted_literal: "'UTF8'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: WITH - keyword: ENCODING - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'UTF8'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: LOCALE - quoted_literal: "'en_US.UTF-8'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: LOCALE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'en_US.UTF-8'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: WITH - keyword: LOCALE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'en_US.UTF-8'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: LC_COLLATE - quoted_literal: "'en_US.UTF-8'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: LC_CTYPE - quoted_literal: "'en_US.UTF-8'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: LC_COLLATE - quoted_literal: "'en_US.UTF-8'" - keyword: LC_CTYPE - quoted_literal: "'en_US.UTF-8'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: WITH - keyword: LC_COLLATE - quoted_literal: "'en_US.UTF-8'" - keyword: LC_CTYPE - quoted_literal: "'en_US.UTF-8'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: WITH - keyword: LC_CTYPE - quoted_literal: "'en_US.UTF-8'" - keyword: LC_COLLATE - quoted_literal: "'en_US.UTF-8'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: LC_COLLATE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'en_US.UTF-8'" - keyword: LC_CTYPE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'en_US.UTF-8'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: WITH - keyword: LC_COLLATE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'en_US.UTF-8'" - keyword: LC_CTYPE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'en_US.UTF-8'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: WITH - keyword: LC_CTYPE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'en_US.UTF-8'" - keyword: LC_COLLATE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'en_US.UTF-8'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: TABLESPACE - keyword: DEFAULT - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: TABLESPACE - comparison_operator: raw_comparison_operator: '=' - keyword: DEFAULT - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: TABLESPACE - tablespace_reference: naked_identifier: new_tablespace - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: TABLESPACE - comparison_operator: raw_comparison_operator: '=' - tablespace_reference: naked_identifier: new_tablespace - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: WITH - keyword: TABLESPACE - comparison_operator: raw_comparison_operator: '=' - tablespace_reference: naked_identifier: new_tablespace - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: ALLOW_CONNECTIONS - boolean_literal: 'true' - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: ALLOW_CONNECTIONS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: CONNECTION - keyword: LIMIT - numeric_literal: '10' - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: CONNECTION - keyword: LIMIT - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '10' - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: IS_TEMPLATE - boolean_literal: 'true' - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: IS_TEMPLATE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: IS_TEMPLATE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: CONNECTION - keyword: LIMIT - numeric_literal: '10' - keyword: ALLOW_CONNECTIONS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: WITH - keyword: IS_TEMPLATE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: CONNECTION - keyword: LIMIT - numeric_literal: '10' - keyword: ALLOW_CONNECTIONS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: WITH - keyword: IS_TEMPLATE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: CONNECTION - keyword: LIMIT - numeric_literal: '10' - keyword: ALLOW_CONNECTIONS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: TEMPLATE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: template_name - keyword: ENCODING - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'UTF8'" - keyword: LOCALE - quoted_literal: "'en_US.UTF-8'" - keyword: OWNER - object_reference: naked_identifier: user_name - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: IS_TEMPLATE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: CONNECTION - keyword: LIMIT - numeric_literal: '10' - keyword: ALLOW_CONNECTIONS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: TEMPLATE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: template_name - keyword: ENCODING - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'UTF8'" - keyword: LOCALE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'en_US.UTF-8'" - keyword: OWNER - object_reference: naked_identifier: user_name - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: IS_TEMPLATE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: CONNECTION - keyword: LIMIT - numeric_literal: '10' - keyword: ALLOW_CONNECTIONS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: TEMPLATE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: template_name - keyword: ENCODING - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'UTF8'" - keyword: LC_COLLATE - quoted_literal: "'en_US.UTF-8'" - keyword: LC_CTYPE - quoted_literal: "'en_US.UTF-8'" - keyword: OWNER - object_reference: naked_identifier: user_name - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_dollar_quoted_function.sql000066400000000000000000000001451503426445100304430ustar00rootroot00000000000000CREATE FUNCTION foo(integer, integer) RETURNS integer AS $$ select $1 + $2; $$ LANGUAGE SQL; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_dollar_quoted_function.yml000066400000000000000000000020131503426445100304410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0187462d58927b4c36410e856006e4b9add87bd3af7d3937567adbc49fd54695 file: statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: foo - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: integer - comma: ',' - data_type: keyword: integer - end_bracket: ) - keyword: RETURNS - data_type: keyword: integer - function_definition: keyword: AS quoted_literal: $$ select $1 + $2; $$ language_clause: keyword: LANGUAGE naked_identifier: SQL statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_domain.sql000066400000000000000000000003741503426445100251530ustar00rootroot00000000000000CREATE DOMAIN us_postal_code AS TEXT CHECK( VALUE ~ '^\d{5}$' OR VALUE ~ '^\d{5}-\d{4}$' ); create domain oname as text; CREATE DOMAIN mystr AS text CONSTRAINT not_empty CHECK (LENGTH(value) > 0) CONSTRAINT too_big CHECK (LENGTH(value) <= 50000); sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_domain.yml000066400000000000000000000055361503426445100251620ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c174edad00205a8149520f9c9a071978bf4f60018ea6037638202e6abee9fbec file: - statement: create_domain_statement: - keyword: CREATE - keyword: DOMAIN - object_reference: naked_identifier: us_postal_code - keyword: AS - data_type: keyword: TEXT - keyword: CHECK - expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: VALUE - like_operator: '~' - quoted_literal: "'^\\d{5}$'" - binary_operator: OR - column_reference: naked_identifier: VALUE - like_operator: '~' - quoted_literal: "'^\\d{5}-\\d{4}$'" end_bracket: ) - statement_terminator: ; - statement: create_domain_statement: - keyword: create - keyword: domain - object_reference: naked_identifier: oname - keyword: as - data_type: keyword: text - statement_terminator: ; - statement: create_domain_statement: - keyword: CREATE - keyword: DOMAIN - object_reference: naked_identifier: mystr - keyword: AS - data_type: keyword: text - keyword: CONSTRAINT - object_reference: naked_identifier: not_empty - keyword: CHECK - expression: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: LENGTH function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: value end_bracket: ) comparison_operator: raw_comparison_operator: '>' numeric_literal: '0' end_bracket: ) - keyword: CONSTRAINT - object_reference: naked_identifier: too_big - keyword: CHECK - expression: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: LENGTH function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: value end_bracket: ) comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '50000' end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_extension.sql000066400000000000000000000007001503426445100257110ustar00rootroot00000000000000CREATE EXTENSION amazing_extension with schema schema1 VERSION '2.0.1.2' FROM '1.0'; CREATE EXTENSION IF NOT EXISTS amazing_extension with schema schema1 VERSION '1.2.3a4' FROM '1.0'; CREATE EXTENSION amazing_extension with schema schema1 VERSION version_named FROM from_named; CREATE EXTENSION IF NOT EXISTS aws_lambda CASCADE; DROP EXTENSION amazing_extension; DROP EXTENSION IF EXISTS amazing_extension; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_extension.yml000066400000000000000000000046741503426445100257310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a886644c3efff46c5ca5b1a758be2ed6010af31af1e60852590b7ea9b4cc555f file: - statement: create_extension_statement: - keyword: CREATE - keyword: EXTENSION - extension_reference: naked_identifier: amazing_extension - keyword: with - keyword: schema - schema_reference: naked_identifier: schema1 - keyword: VERSION - version_identifier: quoted_literal: "'2.0.1.2'" - keyword: FROM - version_identifier: quoted_literal: "'1.0'" - statement_terminator: ; - statement: create_extension_statement: - keyword: CREATE - keyword: EXTENSION - keyword: IF - keyword: NOT - keyword: EXISTS - extension_reference: naked_identifier: amazing_extension - keyword: with - keyword: schema - schema_reference: naked_identifier: schema1 - keyword: VERSION - version_identifier: quoted_literal: "'1.2.3a4'" - keyword: FROM - version_identifier: quoted_literal: "'1.0'" - statement_terminator: ; - statement: create_extension_statement: - keyword: CREATE - keyword: EXTENSION - extension_reference: naked_identifier: amazing_extension - keyword: with - keyword: schema - schema_reference: naked_identifier: schema1 - keyword: VERSION - version_identifier: naked_identifier: version_named - keyword: FROM - version_identifier: naked_identifier: from_named - statement_terminator: ; - statement: create_extension_statement: - keyword: CREATE - keyword: EXTENSION - keyword: IF - keyword: NOT - keyword: EXISTS - extension_reference: naked_identifier: aws_lambda - keyword: CASCADE - statement_terminator: ; - statement: drop_extension_statement: - keyword: DROP - keyword: EXTENSION - extension_reference: naked_identifier: amazing_extension - statement_terminator: ; - statement: drop_extension_statement: - keyword: DROP - keyword: EXTENSION - keyword: IF - keyword: EXISTS - extension_reference: naked_identifier: amazing_extension - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_foreign_table.sql000066400000000000000000000122401503426445100264770ustar00rootroot00000000000000CREATE FOREIGN TABLE foreign_table ( simple_column integer, column_with_options char(5) OPTIONS (a 'foo', b 'bar'), column_with_collate text COLLATE "de_DE", column_with_not_null_constraint date NOT NULL, column_with_null_constraint varchar(50) NULL, column_with_check_constraint float CHECK (column_with_check_constraint > 0.0), column_with_default_constraint timestamp DEFAULT CURRENT_TIMESTAMP, column_with_generated_constraint bigint GENERATED ALWAYS AS (simple_column * 2) STORED, column_with_more_than_one_constraint int NOT NULL CHECK (column_with_more_than_one_constraint > 0), column_with_options_and_collate char(5) OPTIONS (a 'foo', b 'bar') COLLATE "es_ES", column_with_options_and_constraint char(5) OPTIONS (a 'foo', b 'bar') NOT NULL, column_with_collate_and_constraint char(5) COLLATE "de_DE" NOT NULL, column_with_options_collate_and_constraint char(5) OPTIONS (a 'foo', b 'bar') COLLATE "de_DE" NOT NULL, CHECK (simple_column > 0), CHECK (simple_column < 10) NO INHERIT, CONSTRAINT named_table_constraint CHECK (column_with_options <> ''), CONSTRAINT named_table_constraint_no_inherit CHECK (column_with_collate <> '') NO INHERIT ) SERVER a_server; CREATE FOREIGN TABLE IF NOT EXISTS foreign_table_that_might_already_exist ( simple_column integer ) SERVER a_server; CREATE FOREIGN TABLE foreign_table_that_inherits ( simple_column integer ) INHERITS ( another_table ) SERVER a_server; CREATE FOREIGN TABLE IF NOT EXISTS foreign_table_that_inherits_that_might_already_exist ( simple_column integer ) INHERITS ( another_table ) SERVER a_server; CREATE FOREIGN TABLE foreign_table_with_options ( simple_column integer ) SERVER a_server OPTIONS (c 'baz'); CREATE FOREIGN TABLE IF NOT EXISTS foreign_table_with_options_that_might_already_exist ( simple_column integer ) SERVER a_server OPTIONS (c 'baz'); CREATE FOREIGN TABLE foreign_table_that_inherits_and_has_options ( simple_column integer ) INHERITS ( another_table ) SERVER a_server OPTIONS (c 'baz'); CREATE FOREIGN TABLE IF NOT EXISTS foreign_table_that_inherits_and_has_options_that_might_already_exist ( simple_column integer ) INHERITS ( another_table ) SERVER a_server OPTIONS (c 'baz'); CREATE FOREIGN TABLE foreign_table_partition_in PARTITION OF another_table FOR VALUES IN ('2016-07-01', '2016-08-01') SERVER a_server; CREATE FOREIGN TABLE foreign_table_partition_from_min_to_max PARTITION OF another_table FOR VALUES FROM ('2016-07-01') TO ('2016-08-01') SERVER a_server; CREATE FOREIGN TABLE foreign_table_partition_with PARTITION OF another_table FOR VALUES WITH ( MODULUS 2, REMAINDER 0) SERVER a_server; CREATE FOREIGN TABLE IF NOT EXISTS foreign_table_partition_in_that_might_already_exist PARTITION OF another_table FOR VALUES IN ('2016-07-01', '2016-08-01') SERVER a_server; CREATE FOREIGN TABLE IF NOT EXISTS foreign_table_partition_from_min_to_max_that_might_already_exist PARTITION OF another_table FOR VALUES FROM ('2016-07-01') TO ('2016-08-01') SERVER a_server; CREATE FOREIGN TABLE IF NOT EXISTS foreign_table_partition_with_that_might_already_exist PARTITION OF another_table FOR VALUES WITH ( MODULUS 2, REMAINDER 0) SERVER a_server; CREATE FOREIGN TABLE foreign_table_partition_in_with_options PARTITION OF another_table FOR VALUES IN ('2016-07-01', '2016-08-01') SERVER a_server OPTIONS (foo 'bar'); CREATE FOREIGN TABLE foreign_table_partition_from_min_to_max_with_options PARTITION OF another_table FOR VALUES FROM ('2016-07-01') TO ('2016-08-01') SERVER a_server OPTIONS (foo 'bar'); CREATE FOREIGN TABLE foreign_table_partition_with_with_options PARTITION OF another_table FOR VALUES WITH ( MODULUS 2, REMAINDER 0) SERVER a_server OPTIONS (foo 'bar'); CREATE FOREIGN TABLE foreign_table_partition_in_with_columns PARTITION OF another_table ( simple_column, column_with_options WITH OPTIONS, column_with_not_null_constraint NOT NULL, column_with_null_constraint NULL, column_with_check_constraint CHECK (column_with_check_constraint > 0.0), column_with_default_constraint DEFAULT CURRENT_TIMESTAMP, column_with_generated_constraint GENERATED ALWAYS AS (simple_column * 2) STORED, column_with_more_than_one_constraint NOT NULL CHECK (column_with_more_than_one_constraint > 0), column_with_options_and_not_null_constraint WITH OPTIONS NOT NULL, CHECK (simple_column > 0), CHECK (simple_column < 10) NO INHERIT, CONSTRAINT named_table_constraint CHECK (column_with_options <> ''), CONSTRAINT named_table_constraint_no_inherit CHECK (column_with_options_and_not_null_constraint <> '') NO INHERIT ) FOR VALUES IN ('2016-07-01', '2016-08-01') SERVER a_server; CREATE FOREIGN TABLE foreign_table_partition_with_from_min_to_max_with_columns PARTITION OF another_table ( simple_column ) FOR VALUES FROM ('2016-07-01') TO ('2016-08-01') SERVER a_server; CREATE FOREIGN TABLE foreign_table_partition_with_with_columns PARTITION OF another_table ( simple_column ) FOR VALUES WITH ( MODULUS 2, REMAINDER 0) SERVER a_server; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_foreign_table.yml000066400000000000000000000667221503426445100265170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cdcbe24329fb7f0e321e4ad584f94a5d6e918340c17cc53d61af8dedc57a473d file: - statement: create_foreign_table_statement: - keyword: CREATE - keyword: FOREIGN - keyword: TABLE - table_reference: naked_identifier: foreign_table - bracketed: - start_bracket: ( - column_reference: naked_identifier: simple_column - data_type: keyword: integer - comma: ',' - column_reference: naked_identifier: column_with_options - data_type: keyword: char bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - keyword: OPTIONS - bracketed: - start_bracket: ( - naked_identifier_all: a - quoted_literal: "'foo'" - comma: ',' - naked_identifier_all: b - quoted_literal: "'bar'" - end_bracket: ) - comma: ',' - column_reference: naked_identifier: column_with_collate - data_type: keyword: text - keyword: COLLATE - collation_reference: quoted_identifier: '"de_DE"' - comma: ',' - column_reference: naked_identifier: column_with_not_null_constraint - data_type: datetime_type_identifier: keyword: date - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: column_with_null_constraint - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '50' end_bracket: ) - column_constraint_segment: keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: column_with_check_constraint - data_type: keyword: float - column_constraint_segment: keyword: CHECK bracketed: start_bracket: ( expression: column_reference: naked_identifier: column_with_check_constraint comparison_operator: raw_comparison_operator: '>' numeric_literal: '0.0' end_bracket: ) - comma: ',' - column_reference: naked_identifier: column_with_default_constraint - data_type: datetime_type_identifier: keyword: timestamp - column_constraint_segment: keyword: DEFAULT bare_function: CURRENT_TIMESTAMP - comma: ',' - column_reference: naked_identifier: column_with_generated_constraint - data_type: keyword: bigint - column_constraint_segment: - keyword: GENERATED - keyword: ALWAYS - keyword: AS - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: simple_column binary_operator: '*' numeric_literal: '2' end_bracket: ) - keyword: STORED - comma: ',' - column_reference: naked_identifier: column_with_more_than_one_constraint - data_type: keyword: int - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: CHECK bracketed: start_bracket: ( expression: column_reference: naked_identifier: column_with_more_than_one_constraint comparison_operator: raw_comparison_operator: '>' numeric_literal: '0' end_bracket: ) - comma: ',' - column_reference: naked_identifier: column_with_options_and_collate - data_type: keyword: char bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - keyword: OPTIONS - bracketed: - start_bracket: ( - naked_identifier_all: a - quoted_literal: "'foo'" - comma: ',' - naked_identifier_all: b - quoted_literal: "'bar'" - end_bracket: ) - keyword: COLLATE - collation_reference: quoted_identifier: '"es_ES"' - comma: ',' - column_reference: naked_identifier: column_with_options_and_constraint - data_type: keyword: char bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - keyword: OPTIONS - bracketed: - start_bracket: ( - naked_identifier_all: a - quoted_literal: "'foo'" - comma: ',' - naked_identifier_all: b - quoted_literal: "'bar'" - end_bracket: ) - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: column_with_collate_and_constraint - data_type: keyword: char bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - keyword: COLLATE - collation_reference: quoted_identifier: '"de_DE"' - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: column_with_options_collate_and_constraint - data_type: keyword: char bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - keyword: OPTIONS - bracketed: - start_bracket: ( - naked_identifier_all: a - quoted_literal: "'foo'" - comma: ',' - naked_identifier_all: b - quoted_literal: "'bar'" - end_bracket: ) - keyword: COLLATE - collation_reference: quoted_identifier: '"de_DE"' - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - table_constraint: keyword: CHECK bracketed: start_bracket: ( expression: column_reference: naked_identifier: simple_column comparison_operator: raw_comparison_operator: '>' numeric_literal: '0' end_bracket: ) - comma: ',' - table_constraint: - keyword: CHECK - bracketed: start_bracket: ( expression: column_reference: naked_identifier: simple_column comparison_operator: raw_comparison_operator: < numeric_literal: '10' end_bracket: ) - keyword: 'NO' - keyword: INHERIT - comma: ',' - table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: named_table_constraint - keyword: CHECK - bracketed: start_bracket: ( expression: column_reference: naked_identifier: column_with_options comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '>' quoted_literal: "''" end_bracket: ) - comma: ',' - table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: named_table_constraint_no_inherit - keyword: CHECK - bracketed: start_bracket: ( expression: column_reference: naked_identifier: column_with_collate comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '>' quoted_literal: "''" end_bracket: ) - keyword: 'NO' - keyword: INHERIT - end_bracket: ) - keyword: SERVER - server_reference: naked_identifier: a_server - statement_terminator: ; - statement: create_foreign_table_statement: - keyword: CREATE - keyword: FOREIGN - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: foreign_table_that_might_already_exist - bracketed: start_bracket: ( column_reference: naked_identifier: simple_column data_type: keyword: integer end_bracket: ) - keyword: SERVER - server_reference: naked_identifier: a_server - statement_terminator: ; - statement: create_foreign_table_statement: - keyword: CREATE - keyword: FOREIGN - keyword: TABLE - table_reference: naked_identifier: foreign_table_that_inherits - bracketed: start_bracket: ( column_reference: naked_identifier: simple_column data_type: keyword: integer end_bracket: ) - keyword: INHERITS - bracketed: start_bracket: ( table_reference: naked_identifier: another_table end_bracket: ) - keyword: SERVER - server_reference: naked_identifier: a_server - statement_terminator: ; - statement: create_foreign_table_statement: - keyword: CREATE - keyword: FOREIGN - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: foreign_table_that_inherits_that_might_already_exist - bracketed: start_bracket: ( column_reference: naked_identifier: simple_column data_type: keyword: integer end_bracket: ) - keyword: INHERITS - bracketed: start_bracket: ( table_reference: naked_identifier: another_table end_bracket: ) - keyword: SERVER - server_reference: naked_identifier: a_server - statement_terminator: ; - statement: create_foreign_table_statement: - keyword: CREATE - keyword: FOREIGN - keyword: TABLE - table_reference: naked_identifier: foreign_table_with_options - bracketed: start_bracket: ( column_reference: naked_identifier: simple_column data_type: keyword: integer end_bracket: ) - keyword: SERVER - server_reference: naked_identifier: a_server - keyword: OPTIONS - bracketed: start_bracket: ( naked_identifier_all: c quoted_literal: "'baz'" end_bracket: ) - statement_terminator: ; - statement: create_foreign_table_statement: - keyword: CREATE - keyword: FOREIGN - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: foreign_table_with_options_that_might_already_exist - bracketed: start_bracket: ( column_reference: naked_identifier: simple_column data_type: keyword: integer end_bracket: ) - keyword: SERVER - server_reference: naked_identifier: a_server - keyword: OPTIONS - bracketed: start_bracket: ( naked_identifier_all: c quoted_literal: "'baz'" end_bracket: ) - statement_terminator: ; - statement: create_foreign_table_statement: - keyword: CREATE - keyword: FOREIGN - keyword: TABLE - table_reference: naked_identifier: foreign_table_that_inherits_and_has_options - bracketed: start_bracket: ( column_reference: naked_identifier: simple_column data_type: keyword: integer end_bracket: ) - keyword: INHERITS - bracketed: start_bracket: ( table_reference: naked_identifier: another_table end_bracket: ) - keyword: SERVER - server_reference: naked_identifier: a_server - keyword: OPTIONS - bracketed: start_bracket: ( naked_identifier_all: c quoted_literal: "'baz'" end_bracket: ) - statement_terminator: ; - statement: create_foreign_table_statement: - keyword: CREATE - keyword: FOREIGN - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: foreign_table_that_inherits_and_has_options_that_might_already_exist - bracketed: start_bracket: ( column_reference: naked_identifier: simple_column data_type: keyword: integer end_bracket: ) - keyword: INHERITS - bracketed: start_bracket: ( table_reference: naked_identifier: another_table end_bracket: ) - keyword: SERVER - server_reference: naked_identifier: a_server - keyword: OPTIONS - bracketed: start_bracket: ( naked_identifier_all: c quoted_literal: "'baz'" end_bracket: ) - statement_terminator: ; - statement: create_foreign_table_statement: - keyword: CREATE - keyword: FOREIGN - keyword: TABLE - table_reference: naked_identifier: foreign_table_partition_in - keyword: PARTITION - keyword: OF - table_reference: naked_identifier: another_table - keyword: FOR - keyword: VALUES - partition_bound_spec: keyword: IN bracketed: - start_bracket: ( - expression: quoted_literal: "'2016-07-01'" - comma: ',' - expression: quoted_literal: "'2016-08-01'" - end_bracket: ) - keyword: SERVER - server_reference: naked_identifier: a_server - statement_terminator: ; - statement: create_foreign_table_statement: - keyword: CREATE - keyword: FOREIGN - keyword: TABLE - table_reference: naked_identifier: foreign_table_partition_from_min_to_max - keyword: PARTITION - keyword: OF - table_reference: naked_identifier: another_table - keyword: FOR - keyword: VALUES - partition_bound_spec: - keyword: FROM - bracketed: start_bracket: ( expression: quoted_literal: "'2016-07-01'" end_bracket: ) - keyword: TO - bracketed: start_bracket: ( expression: quoted_literal: "'2016-08-01'" end_bracket: ) - keyword: SERVER - server_reference: naked_identifier: a_server - statement_terminator: ; - statement: create_foreign_table_statement: - keyword: CREATE - keyword: FOREIGN - keyword: TABLE - table_reference: naked_identifier: foreign_table_partition_with - keyword: PARTITION - keyword: OF - table_reference: naked_identifier: another_table - keyword: FOR - keyword: VALUES - partition_bound_spec: keyword: WITH bracketed: - start_bracket: ( - keyword: MODULUS - numeric_literal: '2' - comma: ',' - keyword: REMAINDER - numeric_literal: '0' - end_bracket: ) - keyword: SERVER - server_reference: naked_identifier: a_server - statement_terminator: ; - statement: create_foreign_table_statement: - keyword: CREATE - keyword: FOREIGN - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: foreign_table_partition_in_that_might_already_exist - keyword: PARTITION - keyword: OF - table_reference: naked_identifier: another_table - keyword: FOR - keyword: VALUES - partition_bound_spec: keyword: IN bracketed: - start_bracket: ( - expression: quoted_literal: "'2016-07-01'" - comma: ',' - expression: quoted_literal: "'2016-08-01'" - end_bracket: ) - keyword: SERVER - server_reference: naked_identifier: a_server - statement_terminator: ; - statement: create_foreign_table_statement: - keyword: CREATE - keyword: FOREIGN - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: foreign_table_partition_from_min_to_max_that_might_already_exist - keyword: PARTITION - keyword: OF - table_reference: naked_identifier: another_table - keyword: FOR - keyword: VALUES - partition_bound_spec: - keyword: FROM - bracketed: start_bracket: ( expression: quoted_literal: "'2016-07-01'" end_bracket: ) - keyword: TO - bracketed: start_bracket: ( expression: quoted_literal: "'2016-08-01'" end_bracket: ) - keyword: SERVER - server_reference: naked_identifier: a_server - statement_terminator: ; - statement: create_foreign_table_statement: - keyword: CREATE - keyword: FOREIGN - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: foreign_table_partition_with_that_might_already_exist - keyword: PARTITION - keyword: OF - table_reference: naked_identifier: another_table - keyword: FOR - keyword: VALUES - partition_bound_spec: keyword: WITH bracketed: - start_bracket: ( - keyword: MODULUS - numeric_literal: '2' - comma: ',' - keyword: REMAINDER - numeric_literal: '0' - end_bracket: ) - keyword: SERVER - server_reference: naked_identifier: a_server - statement_terminator: ; - statement: create_foreign_table_statement: - keyword: CREATE - keyword: FOREIGN - keyword: TABLE - table_reference: naked_identifier: foreign_table_partition_in_with_options - keyword: PARTITION - keyword: OF - table_reference: naked_identifier: another_table - keyword: FOR - keyword: VALUES - partition_bound_spec: keyword: IN bracketed: - start_bracket: ( - expression: quoted_literal: "'2016-07-01'" - comma: ',' - expression: quoted_literal: "'2016-08-01'" - end_bracket: ) - keyword: SERVER - server_reference: naked_identifier: a_server - keyword: OPTIONS - bracketed: start_bracket: ( naked_identifier_all: foo quoted_literal: "'bar'" end_bracket: ) - statement_terminator: ; - statement: create_foreign_table_statement: - keyword: CREATE - keyword: FOREIGN - keyword: TABLE - table_reference: naked_identifier: foreign_table_partition_from_min_to_max_with_options - keyword: PARTITION - keyword: OF - table_reference: naked_identifier: another_table - keyword: FOR - keyword: VALUES - partition_bound_spec: - keyword: FROM - bracketed: start_bracket: ( expression: quoted_literal: "'2016-07-01'" end_bracket: ) - keyword: TO - bracketed: start_bracket: ( expression: quoted_literal: "'2016-08-01'" end_bracket: ) - keyword: SERVER - server_reference: naked_identifier: a_server - keyword: OPTIONS - bracketed: start_bracket: ( naked_identifier_all: foo quoted_literal: "'bar'" end_bracket: ) - statement_terminator: ; - statement: create_foreign_table_statement: - keyword: CREATE - keyword: FOREIGN - keyword: TABLE - table_reference: naked_identifier: foreign_table_partition_with_with_options - keyword: PARTITION - keyword: OF - table_reference: naked_identifier: another_table - keyword: FOR - keyword: VALUES - partition_bound_spec: keyword: WITH bracketed: - start_bracket: ( - keyword: MODULUS - numeric_literal: '2' - comma: ',' - keyword: REMAINDER - numeric_literal: '0' - end_bracket: ) - keyword: SERVER - server_reference: naked_identifier: a_server - keyword: OPTIONS - bracketed: start_bracket: ( naked_identifier_all: foo quoted_literal: "'bar'" end_bracket: ) - statement_terminator: ; - statement: create_foreign_table_statement: - keyword: CREATE - keyword: FOREIGN - keyword: TABLE - table_reference: naked_identifier: foreign_table_partition_in_with_columns - keyword: PARTITION - keyword: OF - table_reference: naked_identifier: another_table - bracketed: - start_bracket: ( - column_reference: naked_identifier: simple_column - comma: ',' - column_reference: naked_identifier: column_with_options - keyword: WITH - keyword: OPTIONS - comma: ',' - column_reference: naked_identifier: column_with_not_null_constraint - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: column_with_null_constraint - column_constraint_segment: keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: column_with_check_constraint - column_constraint_segment: keyword: CHECK bracketed: start_bracket: ( expression: column_reference: naked_identifier: column_with_check_constraint comparison_operator: raw_comparison_operator: '>' numeric_literal: '0.0' end_bracket: ) - comma: ',' - column_reference: naked_identifier: column_with_default_constraint - column_constraint_segment: keyword: DEFAULT bare_function: CURRENT_TIMESTAMP - comma: ',' - column_reference: naked_identifier: column_with_generated_constraint - column_constraint_segment: - keyword: GENERATED - keyword: ALWAYS - keyword: AS - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: simple_column binary_operator: '*' numeric_literal: '2' end_bracket: ) - keyword: STORED - comma: ',' - column_reference: naked_identifier: column_with_more_than_one_constraint - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: CHECK bracketed: start_bracket: ( expression: column_reference: naked_identifier: column_with_more_than_one_constraint comparison_operator: raw_comparison_operator: '>' numeric_literal: '0' end_bracket: ) - comma: ',' - column_reference: naked_identifier: column_with_options_and_not_null_constraint - keyword: WITH - keyword: OPTIONS - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - table_constraint: keyword: CHECK bracketed: start_bracket: ( expression: column_reference: naked_identifier: simple_column comparison_operator: raw_comparison_operator: '>' numeric_literal: '0' end_bracket: ) - comma: ',' - table_constraint: - keyword: CHECK - bracketed: start_bracket: ( expression: column_reference: naked_identifier: simple_column comparison_operator: raw_comparison_operator: < numeric_literal: '10' end_bracket: ) - keyword: 'NO' - keyword: INHERIT - comma: ',' - table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: named_table_constraint - keyword: CHECK - bracketed: start_bracket: ( expression: column_reference: naked_identifier: column_with_options comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '>' quoted_literal: "''" end_bracket: ) - comma: ',' - table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: named_table_constraint_no_inherit - keyword: CHECK - bracketed: start_bracket: ( expression: column_reference: naked_identifier: column_with_options_and_not_null_constraint comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '>' quoted_literal: "''" end_bracket: ) - keyword: 'NO' - keyword: INHERIT - end_bracket: ) - keyword: FOR - keyword: VALUES - partition_bound_spec: keyword: IN bracketed: - start_bracket: ( - expression: quoted_literal: "'2016-07-01'" - comma: ',' - expression: quoted_literal: "'2016-08-01'" - end_bracket: ) - keyword: SERVER - server_reference: naked_identifier: a_server - statement_terminator: ; - statement: create_foreign_table_statement: - keyword: CREATE - keyword: FOREIGN - keyword: TABLE - table_reference: naked_identifier: foreign_table_partition_with_from_min_to_max_with_columns - keyword: PARTITION - keyword: OF - table_reference: naked_identifier: another_table - bracketed: start_bracket: ( column_reference: naked_identifier: simple_column end_bracket: ) - keyword: FOR - keyword: VALUES - partition_bound_spec: - keyword: FROM - bracketed: start_bracket: ( expression: quoted_literal: "'2016-07-01'" end_bracket: ) - keyword: TO - bracketed: start_bracket: ( expression: quoted_literal: "'2016-08-01'" end_bracket: ) - keyword: SERVER - server_reference: naked_identifier: a_server - statement_terminator: ; - statement: create_foreign_table_statement: - keyword: CREATE - keyword: FOREIGN - keyword: TABLE - table_reference: naked_identifier: foreign_table_partition_with_with_columns - keyword: PARTITION - keyword: OF - table_reference: naked_identifier: another_table - bracketed: start_bracket: ( column_reference: naked_identifier: simple_column end_bracket: ) - keyword: FOR - keyword: VALUES - partition_bound_spec: keyword: WITH bracketed: - start_bracket: ( - keyword: MODULUS - numeric_literal: '2' - comma: ',' - keyword: REMAINDER - numeric_literal: '0' - end_bracket: ) - keyword: SERVER - server_reference: naked_identifier: a_server - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_foreign_wrapper.sql000066400000000000000000000001561503426445100270730ustar00rootroot00000000000000create foreign data wrapper stripe_wrapper handler stripe_fdw_handler validator stripe_fdw_validator; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_foreign_wrapper.yml000066400000000000000000000013311503426445100270710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 43b2813d5760fc078d59ae11a7fe6bec905c4f1bfeacc37bfe302af38a9b5d02 file: statement: create_foreign_data_wrapper: - keyword: create - keyword: foreign - keyword: data - keyword: wrapper - naked_identifier: stripe_wrapper - keyword: handler - naked_identifier: stripe_fdw_handler - keyword: validator - naked_identifier: stripe_fdw_validator statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_function.sql000066400000000000000000000133651503426445100255350ustar00rootroot00000000000000-- Some more complicated Postgres function creations. CREATE FUNCTION add(integer, integer) RETURNS integer AS 'select $1 + $2;' LANGUAGE SQL; -- Quoted language options are deprecated but still supported CREATE FUNCTION add(integer, integer) RETURNS integer AS 'select $1 + $2;' LANGUAGE 'sql'; CREATE OR REPLACE FUNCTION increment(i integer) RETURNS integer AS ' BEGIN RETURN i + 1; END; ' LANGUAGE plpgsql VOLATILE; CREATE OR REPLACE FUNCTION increment(i integer) RETURNS integer AS ' BEGIN RETURN i + 1; END; ' LANGUAGE plpgsql WINDOW IMMUTABLE STABLE LEAKPROOF RETURNS NULL ON NULL INPUT EXTERNAL SECURITY DEFINER ROWS 5 SET test_param = 3; CREATE OR REPLACE FUNCTION increment(i integer) RETURNS integer AS 'C:\\my_file.c', 'symlink_c' LANGUAGE plpgsql WINDOW IMMUTABLE STABLE NOT LEAKPROOF CALLED ON NULL INPUT EXTERNAL SECURITY DEFINER COST 123 ROWS 5 SET test_param = 3 WITH (isStrict); CREATE OR REPLACE FUNCTION increment(i integer) RETURNS integer PARALLEL UNSAFE AS $$ BEGIN RETURN i + 1; END; $$ LANGUAGE plpgsql SUPPORT my_function; CREATE FUNCTION add(integer, integer) RETURNS integer AS 'select $1 + $2;' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; CREATE OR REPLACE FUNCTION increment(i integer) RETURNS integer AS $$ BEGIN RETURN i + 1; END; $$ LANGUAGE plpgsql; CREATE FUNCTION dup(in int, out f1 int, out f2 text) AS $$ SELECT $1, CAST($1 AS text) || ' is text' $$ LANGUAGE SQL; SELECT * FROM dup(42); CREATE TYPE dup_result AS (f1 int, f2 text); CREATE FUNCTION dup(int) RETURNS dup_result AS $$ SELECT $1, CAST($1 AS text) || ' is text' $$ LANGUAGE SQL; SELECT * FROM dup(42); CREATE FUNCTION dup(int) RETURNS TABLE(f1 int, f2 text) AS $$ SELECT $1, CAST($1 AS text) || ' is text' $$ LANGUAGE SQL; CREATE FUNCTION dup(int) RETURNS TABLE("f1" int, "f2" text) AS $$ SELECT $1, CAST($1 AS text) || ' is text' $$ LANGUAGE SQL; SELECT * FROM dup(42); CREATE FUNCTION check_password(uname TEXT, pass TEXT) RETURNS BOOLEAN AS $$ DECLARE passed BOOLEAN; BEGIN SELECT (pwd = $2) INTO passed FROM pwds WHERE username = $1; RETURN passed; END; $$ LANGUAGE plpgsql SECURITY DEFINER SET search_path = admin, pg_temp; BEGIN; CREATE FUNCTION check_password(uname TEXT, pass TEXT) RETURNS BOOLEAN AS $$ DECLARE passed BOOLEAN; BEGIN SELECT (pwd = $2) INTO passed FROM pwds WHERE username = $1; RETURN passed; END; $$ LANGUAGE plpgsql SECURITY DEFINER; REVOKE ALL ON FUNCTION check_password(uname TEXT, pass TEXT) FROM PUBLIC; GRANT EXECUTE ON FUNCTION check_password(uname TEXT, pass TEXT) TO admins; COMMIT; CREATE OR REPLACE FUNCTION public.setof_test() RETURNS SETOF text LANGUAGE sql STABLE STRICT AS $function$ select unnest(array['hi', 'test']) $function$ ; CREATE OR REPLACE FUNCTION public.foo(_a TEXT, _$b INT) RETURNS FLOAT AS $$ RETURN 0.0 $$ LANGUAGE plpgsql STABLE PARALLEL SAFE; CREATE FUNCTION _add(integer, integer) RETURNS integer AS 'select $1 + $2;' LANGUAGE SQL; CREATE FUNCTION _$add(integer, integer) RETURNS integer AS 'select $1 + $2;' LANGUAGE SQL; create function test2( x date = current_date ) returns date as $$ begin return x; end; $$; create function test3( x date default current_date ) returns date as $$ begin return x; end; $$; CREATE OR REPLACE FUNCTION data_wrapper() RETURNS SETOF data STABLE PARALLEL SAFE LEAKPROOF BEGIN ATOMIC SELECT * FROM data; END; create or replace function tz_date(timestamp with time zone, text) returns date language sql immutable strict return ($1 at time zone $2)::date; CREATE FUNCTION storage.insert_dimension (in_ordinality int, in_fieldname varchar, in_default_val varchar, in_valid_from timestamp, in_valid_until timestamp) returns storage.dimensions language sql BEGIN ATOMIC UPDATE storage.dimensions SET ordinality = ordinality + 1 WHERE ordinality >= in_ordinality; INSERT INTO storage.dimensions (ordinality, fieldname, default_val, valid_from, valid_until) VALUES (in_ordinality, in_fieldname, coalesce(in_default_val, 'notexist'), coalesce(in_valid_from, '-infinity'), coalesce(in_valid_until, 'infinity')) RETURNING *; END; CREATE OR REPLACE FUNCTION time_bucket( _time timestamp without time zone, _from timestamp without time zone, _to timestamp without time zone, _buckets integer DEFAULT 200, _offset integer DEFAULT 0 ) RETURNS timestamp without time zone IMMUTABLE PARALLEL SAFE BEGIN ATOMIC SELECT date_bin(((_to - _from) / greatest((_buckets - 1), 1)), _time, _from) + ((_to - _from) / greatest((_buckets - 1), 1)) * (_offset + 1); END; CREATE OR REPLACE FUNCTION time_bucket_limited(_time timestamp, _from timestamp, _to timestamp, _buckets int = 200) RETURNS timestamp IMMUTABLE PARALLEL SAFE BEGIN ATOMIC RETURN CASE WHEN _time <= _from THEN _from WHEN _time >= _to THEN _to ELSE DATE_BIN((_to - _from) / GREATEST(_buckets - 1, 1), _time, _from) + ((_to - _from) / GREATEST(_buckets - 1, 1)) end; END; CREATE OR REPLACE FUNCTION time_series( _from timestamp without time zone, _to timestamp without time zone, _buckets integer DEFAULT 200 ) RETURNS TABLE ("time" timestamp without time zone) IMMUTABLE PARALLEL SAFE BEGIN ATOMIC -- ATTENTION: use integer to generate series, since with timestamps there are rounding issues SELECT time_bucket(_from, _from, _to, _buckets, g.ofs - 1) FROM generate_series(0, greatest((_buckets - 1), 1)) AS g (ofs); END; create or replace function test ( _pt geography(point) ) returns numeric language sql begin atomic select st_x(_pt::geometry); end; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_function.yml000066400000000000000000001301671503426445100255370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e6229c7ba24fc50cad903cc46910c65c736371d9afaa5ff0847217eca4d6d165 file: - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: add - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: integer - comma: ',' - data_type: keyword: integer - end_bracket: ) - keyword: RETURNS - data_type: keyword: integer - function_definition: keyword: AS quoted_literal: "'select $1 + $2;'" language_clause: keyword: LANGUAGE naked_identifier: SQL - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: add - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: integer - comma: ',' - data_type: keyword: integer - end_bracket: ) - keyword: RETURNS - data_type: keyword: integer - function_definition: keyword: AS quoted_literal: "'select $1 + $2;'" language_clause: keyword: LANGUAGE quoted_identifier: "'sql'" - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name: function_name_identifier: increment - function_parameter_list: bracketed: start_bracket: ( parameter: i data_type: keyword: integer end_bracket: ) - keyword: RETURNS - data_type: keyword: integer - function_definition: - keyword: AS - quoted_literal: "'\n BEGIN\n RETURN i + 1;\n END;\n'" - language_clause: keyword: LANGUAGE naked_identifier: plpgsql - keyword: VOLATILE - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name: function_name_identifier: increment - function_parameter_list: bracketed: start_bracket: ( parameter: i data_type: keyword: integer end_bracket: ) - keyword: RETURNS - data_type: keyword: integer - function_definition: - keyword: AS - quoted_literal: "'\n BEGIN\n RETURN i + 1;\n END;\n'" - language_clause: keyword: LANGUAGE naked_identifier: plpgsql - keyword: WINDOW - keyword: IMMUTABLE - keyword: STABLE - keyword: LEAKPROOF - keyword: RETURNS - keyword: 'NULL' - keyword: 'ON' - keyword: 'NULL' - keyword: INPUT - keyword: EXTERNAL - keyword: SECURITY - keyword: DEFINER - keyword: ROWS - numeric_literal: '5' - keyword: SET - parameter: test_param - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '3' - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name: function_name_identifier: increment - function_parameter_list: bracketed: start_bracket: ( parameter: i data_type: keyword: integer end_bracket: ) - keyword: RETURNS - data_type: keyword: integer - function_definition: - keyword: AS - quoted_literal: "'C:\\\\my_file.c'" - comma: ',' - quoted_literal: "'symlink_c'" - language_clause: keyword: LANGUAGE naked_identifier: plpgsql - keyword: WINDOW - keyword: IMMUTABLE - keyword: STABLE - keyword: NOT - keyword: LEAKPROOF - keyword: CALLED - keyword: 'ON' - keyword: 'NULL' - keyword: INPUT - keyword: EXTERNAL - keyword: SECURITY - keyword: DEFINER - keyword: COST - numeric_literal: '123' - keyword: ROWS - numeric_literal: '5' - keyword: SET - parameter: test_param - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '3' - keyword: WITH - bracketed: start_bracket: ( parameter: isStrict end_bracket: ) - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name: function_name_identifier: increment - function_parameter_list: bracketed: start_bracket: ( parameter: i data_type: keyword: integer end_bracket: ) - keyword: RETURNS - data_type: keyword: integer - function_definition: - keyword: PARALLEL - keyword: UNSAFE - keyword: AS - quoted_literal: "$$\n BEGIN\n RETURN i + 1;\n END;\n$$" - language_clause: keyword: LANGUAGE naked_identifier: plpgsql - keyword: SUPPORT - parameter: my_function - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: add - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: integer - comma: ',' - data_type: keyword: integer - end_bracket: ) - keyword: RETURNS - data_type: keyword: integer - function_definition: - keyword: AS - quoted_literal: "'select $1 + $2;'" - language_clause: keyword: LANGUAGE naked_identifier: SQL - keyword: IMMUTABLE - keyword: RETURNS - keyword: 'NULL' - keyword: 'ON' - keyword: 'NULL' - keyword: INPUT - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name: function_name_identifier: increment - function_parameter_list: bracketed: start_bracket: ( parameter: i data_type: keyword: integer end_bracket: ) - keyword: RETURNS - data_type: keyword: integer - function_definition: keyword: AS quoted_literal: "$$\n BEGIN\n RETURN i + 1;\n \ \ END;\n$$" language_clause: keyword: LANGUAGE naked_identifier: plpgsql - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: dup - function_parameter_list: bracketed: - start_bracket: ( - keyword: in - data_type: keyword: int - comma: ',' - keyword: out - parameter: f1 - data_type: keyword: int - comma: ',' - keyword: out - parameter: f2 - data_type: keyword: text - end_bracket: ) - function_definition: keyword: AS quoted_literal: "$$ SELECT $1, CAST($1 AS text) || ' is text' $$" language_clause: keyword: LANGUAGE naked_identifier: SQL - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: dup function_contents: bracketed: start_bracket: ( expression: numeric_literal: '42' end_bracket: ) - statement_terminator: ; - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - object_reference: naked_identifier: dup_result - keyword: AS - bracketed: - start_bracket: ( - word: f1 - word: int - comma: ',' - word: f2 - word: text - end_bracket: ) - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: dup - function_parameter_list: bracketed: start_bracket: ( data_type: keyword: int end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: dup_result - function_definition: keyword: AS quoted_literal: "$$ SELECT $1, CAST($1 AS text) || ' is text' $$" language_clause: keyword: LANGUAGE naked_identifier: SQL - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: dup function_contents: bracketed: start_bracket: ( expression: numeric_literal: '42' end_bracket: ) - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: dup - function_parameter_list: bracketed: start_bracket: ( data_type: keyword: int end_bracket: ) - keyword: RETURNS - keyword: TABLE - bracketed: - start_bracket: ( - column_reference: naked_identifier: f1 - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: f2 - data_type: keyword: text - end_bracket: ) - function_definition: keyword: AS quoted_literal: "$$ SELECT $1, CAST($1 AS text) || ' is text' $$" language_clause: keyword: LANGUAGE naked_identifier: SQL - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: dup - function_parameter_list: bracketed: start_bracket: ( data_type: keyword: int end_bracket: ) - keyword: RETURNS - keyword: TABLE - bracketed: - start_bracket: ( - column_reference: quoted_identifier: '"f1"' - data_type: keyword: int - comma: ',' - column_reference: quoted_identifier: '"f2"' - data_type: keyword: text - end_bracket: ) - function_definition: keyword: AS quoted_literal: "$$ SELECT $1, CAST($1 AS text) || ' is text' $$" language_clause: keyword: LANGUAGE naked_identifier: SQL - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: dup function_contents: bracketed: start_bracket: ( expression: numeric_literal: '42' end_bracket: ) - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: check_password - function_parameter_list: bracketed: - start_bracket: ( - parameter: uname - data_type: keyword: TEXT - comma: ',' - parameter: pass - data_type: keyword: TEXT - end_bracket: ) - keyword: RETURNS - data_type: keyword: BOOLEAN - function_definition: - keyword: AS - quoted_literal: "$$\nDECLARE passed BOOLEAN;\nBEGIN\n SELECT (pwd\ \ = $2) INTO passed\n FROM pwds\n WHERE username = $1;\n\ \n RETURN passed;\nEND;\n$$" - language_clause: keyword: LANGUAGE naked_identifier: plpgsql - keyword: SECURITY - keyword: DEFINER - keyword: SET - parameter: search_path - comparison_operator: raw_comparison_operator: '=' - parameter: admin - comma: ',' - parameter: pg_temp - statement_terminator: ; - statement: transaction_statement: keyword: BEGIN - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: check_password - function_parameter_list: bracketed: - start_bracket: ( - parameter: uname - data_type: keyword: TEXT - comma: ',' - parameter: pass - data_type: keyword: TEXT - end_bracket: ) - keyword: RETURNS - data_type: keyword: BOOLEAN - function_definition: - keyword: AS - quoted_literal: "$$\nDECLARE passed BOOLEAN;\nBEGIN\n SELECT (pwd\ \ = $2) INTO passed\n FROM pwds\n WHERE username = $1;\n\ \n RETURN passed;\nEND;\n$$" - language_clause: keyword: LANGUAGE naked_identifier: plpgsql - keyword: SECURITY - keyword: DEFINER - statement_terminator: ; - statement: access_statement: - keyword: REVOKE - keyword: ALL - keyword: 'ON' - keyword: FUNCTION - function_name: function_name_identifier: check_password - function_parameter_list: bracketed: - start_bracket: ( - parameter: uname - data_type: keyword: TEXT - comma: ',' - parameter: pass - data_type: keyword: TEXT - end_bracket: ) - keyword: FROM - object_reference: naked_identifier: PUBLIC - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: EXECUTE - keyword: 'ON' - keyword: FUNCTION - function_name: function_name_identifier: check_password - function_parameter_list: bracketed: - start_bracket: ( - parameter: uname - data_type: keyword: TEXT - comma: ',' - parameter: pass - data_type: keyword: TEXT - end_bracket: ) - keyword: TO - role_reference: naked_identifier: admins - statement_terminator: ; - statement: transaction_statement: keyword: COMMIT - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name: naked_identifier: public dot: . function_name_identifier: setof_test - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - keyword: SETOF - data_type: keyword: text - function_definition: - language_clause: keyword: LANGUAGE naked_identifier: sql - keyword: STABLE - keyword: STRICT - keyword: AS - quoted_literal: "$function$\nselect unnest(array['hi', 'test'])\n$function$" - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name: naked_identifier: public dot: . function_name_identifier: foo - function_parameter_list: bracketed: - start_bracket: ( - parameter: _a - data_type: keyword: TEXT - comma: ',' - parameter: _$b - data_type: keyword: INT - end_bracket: ) - keyword: RETURNS - data_type: keyword: FLOAT - function_definition: - keyword: AS - quoted_literal: "$$\n RETURN 0.0\n$$" - language_clause: keyword: LANGUAGE naked_identifier: plpgsql - keyword: STABLE - keyword: PARALLEL - keyword: SAFE - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: _add - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: integer - comma: ',' - data_type: keyword: integer - end_bracket: ) - keyword: RETURNS - data_type: keyword: integer - function_definition: keyword: AS quoted_literal: "'select $1 + $2;'" language_clause: keyword: LANGUAGE naked_identifier: SQL - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: _$add - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: integer - comma: ',' - data_type: keyword: integer - end_bracket: ) - keyword: RETURNS - data_type: keyword: integer - function_definition: keyword: AS quoted_literal: "'select $1 + $2;'" language_clause: keyword: LANGUAGE naked_identifier: SQL - statement_terminator: ; - statement: create_function_statement: - keyword: create - keyword: function - function_name: function_name_identifier: test2 - function_parameter_list: bracketed: start_bracket: ( parameter: x data_type: datetime_type_identifier: keyword: date comparison_operator: raw_comparison_operator: '=' expression: bare_function: current_date end_bracket: ) - keyword: returns - data_type: datetime_type_identifier: keyword: date - function_definition: keyword: as quoted_literal: "$$\n begin\n return x;\n end;\n$$" - statement_terminator: ; - statement: create_function_statement: - keyword: create - keyword: function - function_name: function_name_identifier: test3 - function_parameter_list: bracketed: start_bracket: ( parameter: x data_type: datetime_type_identifier: keyword: date keyword: default expression: bare_function: current_date end_bracket: ) - keyword: returns - data_type: datetime_type_identifier: keyword: date - function_definition: keyword: as quoted_literal: "$$\n begin\n return x;\n end;\n$$" - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name: function_name_identifier: data_wrapper - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - keyword: SETOF - data_type: data_type_identifier: data - function_definition: - keyword: STABLE - keyword: PARALLEL - keyword: SAFE - keyword: LEAKPROOF - keyword: BEGIN - keyword: ATOMIC - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: data - statement_terminator: ; - keyword: END - statement_terminator: ; - statement: create_function_statement: - keyword: create - keyword: or - keyword: replace - keyword: function - function_name: function_name_identifier: tz_date - function_parameter_list: bracketed: - start_bracket: ( - data_type: datetime_type_identifier: - keyword: timestamp - keyword: with - keyword: time - keyword: zone - comma: ',' - data_type: keyword: text - end_bracket: ) - keyword: returns - data_type: datetime_type_identifier: keyword: date - function_definition: - language_clause: keyword: language naked_identifier: sql - keyword: immutable - keyword: strict - keyword: return - expression: cast_expression: bracketed: start_bracket: ( expression: dollar_numeric_literal: $1 time_zone_grammar: - keyword: at - keyword: time - keyword: zone - expression: dollar_numeric_literal: $2 end_bracket: ) casting_operator: '::' data_type: datetime_type_identifier: keyword: date - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: naked_identifier: storage dot: . function_name_identifier: insert_dimension - function_parameter_list: bracketed: - start_bracket: ( - parameter: in_ordinality - data_type: keyword: int - comma: ',' - parameter: in_fieldname - data_type: keyword: varchar - comma: ',' - parameter: in_default_val - data_type: keyword: varchar - comma: ',' - parameter: in_valid_from - data_type: datetime_type_identifier: keyword: timestamp - comma: ',' - parameter: in_valid_until - data_type: datetime_type_identifier: keyword: timestamp - end_bracket: ) - keyword: returns - data_type: naked_identifier: storage dot: . data_type_identifier: dimensions - function_definition: - language_clause: keyword: language naked_identifier: sql - keyword: BEGIN - keyword: ATOMIC - update_statement: keyword: UPDATE table_reference: - naked_identifier: storage - dot: . - naked_identifier: dimensions set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: ordinality comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: ordinality binary_operator: + numeric_literal: '1' where_clause: keyword: WHERE expression: - column_reference: naked_identifier: ordinality - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - column_reference: naked_identifier: in_ordinality - statement_terminator: ; - insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: storage - dot: . - naked_identifier: dimensions - bracketed: - start_bracket: ( - column_reference: naked_identifier: ordinality - comma: ',' - column_reference: naked_identifier: fieldname - comma: ',' - column_reference: naked_identifier: default_val - comma: ',' - column_reference: naked_identifier: valid_from - comma: ',' - column_reference: naked_identifier: valid_until - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: in_ordinality - comma: ',' - expression: column_reference: naked_identifier: in_fieldname - comma: ',' - expression: function: function_name: function_name_identifier: coalesce function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: in_default_val - comma: ',' - expression: quoted_literal: "'notexist'" - end_bracket: ) - comma: ',' - expression: function: function_name: function_name_identifier: coalesce function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: in_valid_from - comma: ',' - expression: quoted_literal: "'-infinity'" - end_bracket: ) - comma: ',' - expression: function: function_name: function_name_identifier: coalesce function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: in_valid_until - comma: ',' - expression: quoted_literal: "'infinity'" - end_bracket: ) - end_bracket: ) - keyword: RETURNING - star: '*' - statement_terminator: ; - keyword: END - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name: function_name_identifier: time_bucket - function_parameter_list: bracketed: - start_bracket: ( - parameter: _time - data_type: datetime_type_identifier: - keyword: timestamp - keyword: without - keyword: time - keyword: zone - comma: ',' - parameter: _from - data_type: datetime_type_identifier: - keyword: timestamp - keyword: without - keyword: time - keyword: zone - comma: ',' - parameter: _to - data_type: datetime_type_identifier: - keyword: timestamp - keyword: without - keyword: time - keyword: zone - comma: ',' - parameter: _buckets - data_type: keyword: integer - keyword: DEFAULT - expression: numeric_literal: '200' - comma: ',' - parameter: _offset - data_type: keyword: integer - keyword: DEFAULT - expression: numeric_literal: '0' - end_bracket: ) - keyword: RETURNS - data_type: datetime_type_identifier: - keyword: timestamp - keyword: without - keyword: time - keyword: zone - function_definition: - keyword: IMMUTABLE - keyword: PARALLEL - keyword: SAFE - keyword: BEGIN - keyword: ATOMIC - select_statement: select_clause: keyword: SELECT select_clause_element: expression: - function: function_name: function_name_identifier: date_bin function_contents: bracketed: - start_bracket: ( - expression: bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: _to - binary_operator: '-' - column_reference: naked_identifier: _from end_bracket: ) binary_operator: / function: function_name: function_name_identifier: greatest function_contents: bracketed: - start_bracket: ( - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: _buckets binary_operator: '-' numeric_literal: '1' end_bracket: ) - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) end_bracket: ) - comma: ',' - expression: column_reference: naked_identifier: _time - comma: ',' - expression: column_reference: naked_identifier: _from - end_bracket: ) - binary_operator: + - bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: _to - binary_operator: '-' - column_reference: naked_identifier: _from end_bracket: ) binary_operator: / function: function_name: function_name_identifier: greatest function_contents: bracketed: - start_bracket: ( - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: _buckets binary_operator: '-' numeric_literal: '1' end_bracket: ) - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) end_bracket: ) - binary_operator: '*' - bracketed: start_bracket: ( expression: column_reference: naked_identifier: _offset binary_operator: + numeric_literal: '1' end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name: function_name_identifier: time_bucket_limited - function_parameter_list: bracketed: - start_bracket: ( - parameter: _time - data_type: datetime_type_identifier: keyword: timestamp - comma: ',' - parameter: _from - data_type: datetime_type_identifier: keyword: timestamp - comma: ',' - parameter: _to - data_type: datetime_type_identifier: keyword: timestamp - comma: ',' - parameter: _buckets - data_type: keyword: int - comparison_operator: raw_comparison_operator: '=' - expression: numeric_literal: '200' - end_bracket: ) - keyword: RETURNS - data_type: datetime_type_identifier: keyword: timestamp - function_definition: - keyword: IMMUTABLE - keyword: PARALLEL - keyword: SAFE - keyword: BEGIN - keyword: ATOMIC - keyword: RETURN - expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: - column_reference: naked_identifier: _time - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - column_reference: naked_identifier: _from - keyword: THEN - expression: column_reference: naked_identifier: _from - when_clause: - keyword: WHEN - expression: - column_reference: naked_identifier: _time - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - column_reference: naked_identifier: _to - keyword: THEN - expression: column_reference: naked_identifier: _to - else_clause: keyword: ELSE expression: function: function_name: function_name_identifier: DATE_BIN function_contents: bracketed: - start_bracket: ( - expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: _to - binary_operator: '-' - column_reference: naked_identifier: _from end_bracket: ) binary_operator: / function: function_name: function_name_identifier: GREATEST function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: _buckets binary_operator: '-' numeric_literal: '1' - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) - comma: ',' - expression: column_reference: naked_identifier: _time - comma: ',' - expression: column_reference: naked_identifier: _from - end_bracket: ) binary_operator: + bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: _to - binary_operator: '-' - column_reference: naked_identifier: _from end_bracket: ) binary_operator: / function: function_name: function_name_identifier: GREATEST function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: _buckets binary_operator: '-' numeric_literal: '1' - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) end_bracket: ) - keyword: end - statement_terminator: ; - keyword: END - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name: function_name_identifier: time_series - function_parameter_list: bracketed: - start_bracket: ( - parameter: _from - data_type: datetime_type_identifier: - keyword: timestamp - keyword: without - keyword: time - keyword: zone - comma: ',' - parameter: _to - data_type: datetime_type_identifier: - keyword: timestamp - keyword: without - keyword: time - keyword: zone - comma: ',' - parameter: _buckets - data_type: keyword: integer - keyword: DEFAULT - expression: numeric_literal: '200' - end_bracket: ) - keyword: RETURNS - keyword: TABLE - bracketed: start_bracket: ( column_reference: quoted_identifier: '"time"' data_type: datetime_type_identifier: - keyword: timestamp - keyword: without - keyword: time - keyword: zone end_bracket: ) - function_definition: - keyword: IMMUTABLE - keyword: PARALLEL - keyword: SAFE - keyword: BEGIN - keyword: ATOMIC - select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: time_bucket function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: _from - comma: ',' - expression: column_reference: naked_identifier: _from - comma: ',' - expression: column_reference: naked_identifier: _to - comma: ',' - expression: column_reference: naked_identifier: _buckets - comma: ',' - expression: column_reference: - naked_identifier: g - dot: . - naked_identifier: ofs binary_operator: '-' numeric_literal: '1' - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: generate_series function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '0' - comma: ',' - expression: function: function_name: function_name_identifier: greatest function_contents: bracketed: - start_bracket: ( - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: _buckets binary_operator: '-' numeric_literal: '1' end_bracket: ) - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: g bracketed: start_bracket: ( identifier_list: naked_identifier: ofs end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement: create_function_statement: - keyword: create - keyword: or - keyword: replace - keyword: function - function_name: function_name_identifier: test - function_parameter_list: bracketed: start_bracket: ( parameter: _pt data_type: wkt_geometry_type: keyword: geography bracketed: start_bracket: ( keyword: point end_bracket: ) end_bracket: ) - keyword: returns - data_type: keyword: numeric - function_definition: - language_clause: keyword: language naked_identifier: sql - keyword: begin - keyword: atomic - select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: st_x function_contents: bracketed: start_bracket: ( expression: cast_expression: column_reference: naked_identifier: _pt casting_operator: '::' data_type: data_type_identifier: geometry end_bracket: ) - statement_terminator: ; - keyword: end - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_index.sql000066400000000000000000000030351503426445100250100ustar00rootroot00000000000000CREATE UNIQUE INDEX title_idx ON films (title); CREATE UNIQUE INDEX title_idx ON films (title) INCLUDE (director, rating); CREATE INDEX title_idx ON films (title) WITH (deduplicate_items = 'off'); CREATE INDEX ON films ((lower(title))); CREATE INDEX title_idx_german ON films (title COLLATE "de_DE"); CREATE INDEX title_idx_nulls_low ON films (title NULLS FIRST); CREATE INDEX title_idx_nulls_high ON films (title NULLS LAST); CREATE UNIQUE INDEX title_idx ON films (title) WITH (fillfactor = 70); CREATE INDEX gin_idx ON documents_table USING GIN (locations) WITH (fastupdate = 'off'); CREATE INDEX code_idx ON films (code) TABLESPACE indexspace; CREATE INDEX pointloc ON points USING gist (box(location,location)); CREATE INDEX CONCURRENTLY sales_quantity_index ON sales_table (quantity); CREATE INDEX super_idx ON super_table USING btree(super_column DESC); CREATE INDEX opclass_index ON schema.opclass_table (col varchar_pattern_ops); CREATE INDEX opclass_index_with_parameters ON schema.opclass_table (col varchar_pattern_ops(p1='3', p2='4')); CREATE UNIQUE INDEX tests_success_constraint ON tests (subject, target) WHERE success; CREATE INDEX nulls_distinct_index ON documents_table USING GIN (locations) NULLS DISTINCT WITH (fastupdate = 'off'); CREATE INDEX nulls_not_distinct_index ON documents_table USING GIN (locations) NULLS NOT DISTINCT WITH (fastupdate = 'off'); CREATE INDEX code_idx ON films (code) TABLESPACE indexspace; CREATE INDEX CONCURRENTLY IF NOT EXISTS user_my_column_idx ON my_schema.user (my_column); sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_index.yml000066400000000000000000000322531503426445100250160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4672198550270b7d4506c0a3189b92c6a115ac3fa72247a53943c986fd45dc80 file: - statement: create_index_statement: - keyword: CREATE - keyword: UNIQUE - keyword: INDEX - index_reference: naked_identifier: title_idx - keyword: 'ON' - table_reference: naked_identifier: films - bracketed: start_bracket: ( index_element: column_reference: naked_identifier: title end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: UNIQUE - keyword: INDEX - index_reference: naked_identifier: title_idx - keyword: 'ON' - table_reference: naked_identifier: films - bracketed: start_bracket: ( index_element: column_reference: naked_identifier: title end_bracket: ) - keyword: INCLUDE - bracketed: - start_bracket: ( - index_element: column_reference: naked_identifier: director - comma: ',' - index_element: column_reference: naked_identifier: rating - end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: title_idx - keyword: 'ON' - table_reference: naked_identifier: films - bracketed: start_bracket: ( index_element: column_reference: naked_identifier: title end_bracket: ) - keyword: WITH - relation_options: bracketed: start_bracket: ( relation_option: properties_naked_identifier: deduplicate_items comparison_operator: raw_comparison_operator: '=' quoted_literal: "'off'" end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - keyword: 'ON' - table_reference: naked_identifier: films - bracketed: start_bracket: ( index_element: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: lower function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: title end_bracket: ) end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: title_idx_german - keyword: 'ON' - table_reference: naked_identifier: films - bracketed: start_bracket: ( index_element: column_reference: naked_identifier: title index_element_options: keyword: COLLATE collation_reference: quoted_identifier: '"de_DE"' end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: title_idx_nulls_low - keyword: 'ON' - table_reference: naked_identifier: films - bracketed: start_bracket: ( index_element: column_reference: naked_identifier: title index_element_options: - keyword: NULLS - keyword: FIRST end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: title_idx_nulls_high - keyword: 'ON' - table_reference: naked_identifier: films - bracketed: start_bracket: ( index_element: column_reference: naked_identifier: title index_element_options: - keyword: NULLS - keyword: LAST end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: UNIQUE - keyword: INDEX - index_reference: naked_identifier: title_idx - keyword: 'ON' - table_reference: naked_identifier: films - bracketed: start_bracket: ( index_element: column_reference: naked_identifier: title end_bracket: ) - keyword: WITH - relation_options: bracketed: start_bracket: ( relation_option: properties_naked_identifier: fillfactor comparison_operator: raw_comparison_operator: '=' numeric_literal: '70' end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: gin_idx - keyword: 'ON' - table_reference: naked_identifier: documents_table - keyword: USING - index_access_method: naked_identifier: GIN - bracketed: start_bracket: ( index_element: column_reference: naked_identifier: locations end_bracket: ) - keyword: WITH - relation_options: bracketed: start_bracket: ( relation_option: properties_naked_identifier: fastupdate comparison_operator: raw_comparison_operator: '=' quoted_literal: "'off'" end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: code_idx - keyword: 'ON' - table_reference: naked_identifier: films - bracketed: start_bracket: ( index_element: column_reference: naked_identifier: code end_bracket: ) - keyword: TABLESPACE - tablespace_reference: naked_identifier: indexspace - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: pointloc - keyword: 'ON' - table_reference: naked_identifier: points - keyword: USING - index_access_method: naked_identifier: gist - bracketed: start_bracket: ( index_element: function: function_name: function_name_identifier: box function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: location - comma: ',' - expression: column_reference: naked_identifier: location - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - keyword: CONCURRENTLY - index_reference: naked_identifier: sales_quantity_index - keyword: 'ON' - table_reference: naked_identifier: sales_table - bracketed: start_bracket: ( index_element: column_reference: naked_identifier: quantity end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: super_idx - keyword: 'ON' - table_reference: naked_identifier: super_table - keyword: USING - index_access_method: naked_identifier: btree - bracketed: start_bracket: ( index_element: column_reference: naked_identifier: super_column index_element_options: keyword: DESC end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: opclass_index - keyword: 'ON' - table_reference: - naked_identifier: schema - dot: . - naked_identifier: opclass_table - bracketed: start_bracket: ( index_element: column_reference: naked_identifier: col index_element_options: operator_class_reference: naked_identifier: varchar_pattern_ops end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: opclass_index_with_parameters - keyword: 'ON' - table_reference: - naked_identifier: schema - dot: . - naked_identifier: opclass_table - bracketed: start_bracket: ( index_element: column_reference: naked_identifier: col index_element_options: operator_class_reference: naked_identifier: varchar_pattern_ops relation_options: bracketed: - start_bracket: ( - relation_option: properties_naked_identifier: p1 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'3'" - comma: ',' - relation_option: properties_naked_identifier: p2 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'4'" - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: UNIQUE - keyword: INDEX - index_reference: naked_identifier: tests_success_constraint - keyword: 'ON' - table_reference: naked_identifier: tests - bracketed: - start_bracket: ( - index_element: column_reference: naked_identifier: subject - comma: ',' - index_element: column_reference: naked_identifier: target - end_bracket: ) - keyword: WHERE - expression: column_reference: naked_identifier: success - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: nulls_distinct_index - keyword: 'ON' - table_reference: naked_identifier: documents_table - keyword: USING - index_access_method: naked_identifier: GIN - bracketed: start_bracket: ( index_element: column_reference: naked_identifier: locations end_bracket: ) - keyword: NULLS - keyword: DISTINCT - keyword: WITH - relation_options: bracketed: start_bracket: ( relation_option: properties_naked_identifier: fastupdate comparison_operator: raw_comparison_operator: '=' quoted_literal: "'off'" end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: nulls_not_distinct_index - keyword: 'ON' - table_reference: naked_identifier: documents_table - keyword: USING - index_access_method: naked_identifier: GIN - bracketed: start_bracket: ( index_element: column_reference: naked_identifier: locations end_bracket: ) - keyword: NULLS - keyword: NOT - keyword: DISTINCT - keyword: WITH - relation_options: bracketed: start_bracket: ( relation_option: properties_naked_identifier: fastupdate comparison_operator: raw_comparison_operator: '=' quoted_literal: "'off'" end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: code_idx - keyword: 'ON' - table_reference: naked_identifier: films - bracketed: start_bracket: ( index_element: column_reference: naked_identifier: code end_bracket: ) - keyword: TABLESPACE - tablespace_reference: naked_identifier: indexspace - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - keyword: CONCURRENTLY - keyword: IF - keyword: NOT - keyword: EXISTS - index_reference: naked_identifier: user_my_column_idx - keyword: 'ON' - table_reference: - naked_identifier: my_schema - dot: . - naked_identifier: user - bracketed: start_bracket: ( index_element: column_reference: naked_identifier: my_column end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_materialized_view.sql000066400000000000000000000046051503426445100274110ustar00rootroot00000000000000CREATE MATERIALIZED VIEW my_mat_view AS SELECT a FROM my_table; CREATE MATERIALIZED VIEW IF NOT EXISTS my_mat_view AS SELECT a FROM my_table; CREATE MATERIALIZED VIEW my_mat_view AS ( SELECT a FROM my_table ); CREATE MATERIALIZED VIEW my_mat_view AS SELECT a FROM my_table WITH NO DATA; CREATE MATERIALIZED VIEW my_mat_view AS SELECT a FROM my_table WITH DATA; CREATE MATERIALIZED VIEW IF NOT EXISTS my_mat_view AS ( SELECT a FROM my_table ); CREATE MATERIALIZED VIEW my_mat_view AS ( SELECT a, b FROM my_table WHERE y = 'value' ); CREATE MATERIALIZED VIEW IF NOT EXISTS my_mat_view AS ( SELECT a, b FROM my_table WHERE y = 'value' ); CREATE MATERIALIZED VIEW my_mat_view AS SELECT a, b FROM my_table WHERE y = 'value'; CREATE MATERIALIZED VIEW IF NOT EXISTS my_mat_view AS SELECT a, b FROM my_table WHERE y = 'value'; CREATE MATERIALIZED VIEW my_mat_view AS SELECT a, b FROM my_table; CREATE MATERIALIZED VIEW IF NOT EXISTS my_mat_view AS SELECT a, b FROM my_table; -- SQL from issue #2039 CREATE MATERIALIZED VIEW bar AS ( SELECT col FROM my_table ) WITH NO DATA; CREATE MATERIALIZED VIEW IF NOT EXISTS bar AS ( SELECT col FROM my_table ) WITH NO DATA; CREATE MATERIALIZED VIEW my_mat_view USING heap WITH (prop_a = 1, prob_b = 'some_value', prop_c = FALSE, prop_d) TABLESPACE pg_default AS ( SELECT a, avg(b) AS my_avg, count(*) AS my_count FROM my_table GROUP BY grp HAVING col > 2 ) WITH DATA; CREATE MATERIALIZED VIEW IF NOT EXISTS my_mat_view USING heap WITH (prop_a = 1, prob_b = 'some_value', prop_c = FALSE, prop_d) TABLESPACE pg_default AS ( SELECT a, avg(b) AS my_avg, count(*) AS my_count FROM my_table GROUP BY grp HAVING col > 2 ) WITH DATA; CREATE MATERIALIZED VIEW my_mat_view TABLESPACE pg_default AS SELECT table_1.field_1, table_1.field_2 FROM table_1 UNION SELECT table_2.field_1, table_2.field_2 FROM table_2 ORDER BY field_1, field_2 WITH DATA; CREATE MATERIALIZED VIEW my_mat_view WITH (left.right) AS SELECT a FROM my_table; CREATE MATERIALIZED VIEW my_mat_view WITH (opt1, opt2=5, opt3='str', ns.opt4, ns.opt5=6, ns.opt6='str', opt7=ASC) AS SELECT a FROM my_table; CREATE OR REPLACE MATERIALIZED VIEW my_mat_view AS SELECT a FROM my_table; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_materialized_view.yml000066400000000000000000000567631503426445100274270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9ced3217608b0057fd622848b8cf35f25cd4b3c5ad64af0f64e6114cbcf03189 file: - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: my_mat_view - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: my_mat_view - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: my_mat_view - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table end_bracket: ) - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: my_mat_view - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - with_data_clause: - keyword: WITH - keyword: 'NO' - keyword: DATA - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: my_mat_view - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - with_data_clause: - keyword: WITH - keyword: DATA - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: my_mat_view - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table end_bracket: ) - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: my_mat_view - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table where_clause: keyword: WHERE expression: column_reference: naked_identifier: y comparison_operator: raw_comparison_operator: '=' quoted_literal: "'value'" end_bracket: ) - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: my_mat_view - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table where_clause: keyword: WHERE expression: column_reference: naked_identifier: y comparison_operator: raw_comparison_operator: '=' quoted_literal: "'value'" end_bracket: ) - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: my_mat_view - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table where_clause: keyword: WHERE expression: column_reference: naked_identifier: y comparison_operator: raw_comparison_operator: '=' quoted_literal: "'value'" - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: my_mat_view - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table where_clause: keyword: WHERE expression: column_reference: naked_identifier: y comparison_operator: raw_comparison_operator: '=' quoted_literal: "'value'" - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: my_mat_view - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: my_mat_view - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table end_bracket: ) - with_data_clause: - keyword: WITH - keyword: 'NO' - keyword: DATA - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: bar - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table end_bracket: ) - with_data_clause: - keyword: WITH - keyword: 'NO' - keyword: DATA - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: my_mat_view - keyword: USING - parameter: heap - keyword: WITH - relation_options: bracketed: - start_bracket: ( - relation_option: properties_naked_identifier: prop_a comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - comma: ',' - relation_option: properties_naked_identifier: prob_b comparison_operator: raw_comparison_operator: '=' quoted_literal: "'some_value'" - comma: ',' - relation_option: properties_naked_identifier: prop_c comparison_operator: raw_comparison_operator: '=' boolean_literal: 'FALSE' - comma: ',' - relation_option: properties_naked_identifier: prop_d - end_bracket: ) - keyword: TABLESPACE - tablespace_reference: naked_identifier: pg_default - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: function: function_name: function_name_identifier: avg function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: b end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: my_avg - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: my_count from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: grp having_clause: keyword: HAVING expression: column_reference: naked_identifier: col comparison_operator: raw_comparison_operator: '>' numeric_literal: '2' end_bracket: ) - with_data_clause: - keyword: WITH - keyword: DATA - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: my_mat_view - keyword: USING - parameter: heap - keyword: WITH - relation_options: bracketed: - start_bracket: ( - relation_option: properties_naked_identifier: prop_a comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - comma: ',' - relation_option: properties_naked_identifier: prob_b comparison_operator: raw_comparison_operator: '=' quoted_literal: "'some_value'" - comma: ',' - relation_option: properties_naked_identifier: prop_c comparison_operator: raw_comparison_operator: '=' boolean_literal: 'FALSE' - comma: ',' - relation_option: properties_naked_identifier: prop_d - end_bracket: ) - keyword: TABLESPACE - tablespace_reference: naked_identifier: pg_default - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: function: function_name: function_name_identifier: avg function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: b end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: my_avg - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: my_count from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: grp having_clause: keyword: HAVING expression: column_reference: naked_identifier: col comparison_operator: raw_comparison_operator: '>' numeric_literal: '2' end_bracket: ) - with_data_clause: - keyword: WITH - keyword: DATA - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: my_mat_view - keyword: TABLESPACE - tablespace_reference: naked_identifier: pg_default - keyword: AS - set_expression: - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: table_1 - dot: . - naked_identifier: field_1 - comma: ',' - select_clause_element: column_reference: - naked_identifier: table_1 - dot: . - naked_identifier: field_2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_1 - set_operator: keyword: UNION - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: table_2 - dot: . - naked_identifier: field_1 - comma: ',' - select_clause_element: column_reference: - naked_identifier: table_2 - dot: . - naked_identifier: field_2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_2 - orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: field_1 - comma: ',' - column_reference: naked_identifier: field_2 - with_data_clause: - keyword: WITH - keyword: DATA - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: my_mat_view - keyword: WITH - relation_options: bracketed: start_bracket: ( relation_option: - properties_naked_identifier: left - dot: . - properties_naked_identifier: right end_bracket: ) - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: my_mat_view - keyword: WITH - relation_options: bracketed: - start_bracket: ( - relation_option: properties_naked_identifier: opt1 - comma: ',' - relation_option: properties_naked_identifier: opt2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' - comma: ',' - relation_option: properties_naked_identifier: opt3 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'str'" - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt4 - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt5 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '6' - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt6 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'str'" - comma: ',' - relation_option: - properties_naked_identifier: opt7 - comparison_operator: raw_comparison_operator: '=' - properties_naked_identifier: ASC - end_bracket: ) - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: my_mat_view - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_operator.sql000066400000000000000000000005731503426445100255400ustar00rootroot00000000000000CREATE OPERATOR >=( LEFTARG = semantic_version, RIGHTARG = semantic_version, PROCEDURE = semantic_version_ge, COMMUTATOR = <= ); CREATE OPERATOR === ( LEFTARG = box, RIGHTARG = box, FUNCTION = area_equal_function, COMMUTATOR = ===, NEGATOR = !==, RESTRICT = area_restriction_function, JOIN = area_join_function, HASHES, MERGES ); sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_operator.yml000066400000000000000000000055431503426445100255440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: af864a956638301b5fa8963592619d134b96ecc672ce013ee911fa0e8fc96a41 file: - statement: create_operator_statement: - keyword: CREATE - keyword: OPERATOR - commutator: '>' - commutator: '=' - bracketed: - start_bracket: ( - keyword: LEFTARG - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: semantic_version - comma: ',' - keyword: RIGHTARG - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: semantic_version - comma: ',' - keyword: PROCEDURE - comparison_operator: raw_comparison_operator: '=' - function_name: function_name_identifier: semantic_version_ge - comma: ',' - keyword: COMMUTATOR - comparison_operator: raw_comparison_operator: '=' - commutator: < - commutator: '=' - end_bracket: ) - statement_terminator: ; - statement: create_operator_statement: - keyword: CREATE - keyword: OPERATOR - commutator: '=' - commutator: '=' - commutator: '=' - bracketed: - start_bracket: ( - keyword: LEFTARG - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: box - comma: ',' - keyword: RIGHTARG - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: box - comma: ',' - keyword: FUNCTION - comparison_operator: raw_comparison_operator: '=' - function_name: function_name_identifier: area_equal_function - comma: ',' - keyword: COMMUTATOR - comparison_operator: raw_comparison_operator: '=' - commutator: '=' - commutator: '=' - commutator: '=' - comma: ',' - keyword: NEGATOR - comparison_operator: raw_comparison_operator: '=' - negator: '!' - negator: '=' - negator: '=' - comma: ',' - keyword: RESTRICT - comparison_operator: raw_comparison_operator: '=' - function_name: function_name_identifier: area_restriction_function - comma: ',' - keyword: JOIN - comparison_operator: raw_comparison_operator: '=' - function_name: function_name_identifier: area_join_function - comma: ',' - keyword: HASHES - comma: ',' - keyword: MERGES - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_policy.sql000066400000000000000000000011611503426445100251760ustar00rootroot00000000000000CREATE POLICY account_managers ON accounts TO current_user; CREATE POLICY account_managers ON sch.accounts AS permissive FOR ALL TO managers; CREATE POLICY account_managers ON accounts TO public, session_user; CREATE POLICY account_managers ON accounts WITH CHECK ( NOT accounts_is_excluded_full_name(full_name) ); CREATE POLICY emp_rls_policy ON employee FOR all TO public USING (ename=current_setting('rls.ename')); CREATE POLICY account_managers ON accounts WITH CHECK ( col > 10 ); CREATE POLICY account_managers ON accounts USING (username = current_user); sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_policy.yml000066400000000000000000000103441503426445100252030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0bc9db2c03f65dc23689d69f3df2d39b398ce48bc4d63bd0dcd51491e8896b52 file: - statement: create_policy_statement: - keyword: CREATE - keyword: POLICY - object_reference: naked_identifier: account_managers - keyword: 'ON' - table_reference: naked_identifier: accounts - keyword: TO - object_reference: naked_identifier: current_user - statement_terminator: ; - statement: create_policy_statement: - keyword: CREATE - keyword: POLICY - object_reference: naked_identifier: account_managers - keyword: 'ON' - table_reference: - naked_identifier: sch - dot: . - naked_identifier: accounts - keyword: AS - keyword: permissive - keyword: FOR - keyword: ALL - keyword: TO - object_reference: naked_identifier: managers - statement_terminator: ; - statement: create_policy_statement: - keyword: CREATE - keyword: POLICY - object_reference: naked_identifier: account_managers - keyword: 'ON' - table_reference: naked_identifier: accounts - keyword: TO - object_reference: naked_identifier: public - comma: ',' - keyword: session_user - statement_terminator: ; - statement: create_policy_statement: - keyword: CREATE - keyword: POLICY - object_reference: naked_identifier: account_managers - keyword: 'ON' - table_reference: naked_identifier: accounts - keyword: WITH - keyword: CHECK - bracketed: start_bracket: ( expression: keyword: NOT function: function_name: function_name_identifier: accounts_is_excluded_full_name function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: full_name end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_policy_statement: - keyword: CREATE - keyword: POLICY - object_reference: naked_identifier: emp_rls_policy - keyword: 'ON' - table_reference: naked_identifier: employee - keyword: FOR - keyword: all - keyword: TO - object_reference: naked_identifier: public - keyword: USING - bracketed: start_bracket: ( expression: column_reference: naked_identifier: ename comparison_operator: raw_comparison_operator: '=' function: function_name: function_name_identifier: current_setting function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'rls.ename'" end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_policy_statement: - keyword: CREATE - keyword: POLICY - object_reference: naked_identifier: account_managers - keyword: 'ON' - table_reference: naked_identifier: accounts - keyword: WITH - keyword: CHECK - bracketed: start_bracket: ( expression: column_reference: naked_identifier: col comparison_operator: raw_comparison_operator: '>' numeric_literal: '10' end_bracket: ) - statement_terminator: ; - statement: create_policy_statement: - keyword: CREATE - keyword: POLICY - object_reference: naked_identifier: account_managers - keyword: 'ON' - table_reference: naked_identifier: accounts - keyword: USING - bracketed: start_bracket: ( expression: column_reference: naked_identifier: username comparison_operator: raw_comparison_operator: '=' bare_function: current_user end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_procedure.sql000066400000000000000000000006001503426445100256640ustar00rootroot00000000000000CREATE OR REPLACE PROCEDURE create_account ( _account_uuid UUID ) AS $$ BEGIN RETURN; END; $$ LANGUAGE plpgsql; CREATE PROCEDURE insert_data(a integer, b integer) LANGUAGE SQL AS $$ INSERT INTO tbl VALUES (a); INSERT INTO tbl VALUES (b); $$; CREATE PROCEDURE abc.cdf() LANGUAGE sql BEGIN ATOMIC WITH tbl2 AS ( SELECT a.apple FROM tbl1 a ) SELECT t.apple FROM tbl2 t ; END; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_procedure.yml000066400000000000000000000071511503426445100256760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c27dee8201bd74584feb07a8e9d3d0c9b16aef3bf9bb6b5a196a061db5428821 file: - statement: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PROCEDURE - function_name: function_name_identifier: create_account - function_parameter_list: bracketed: start_bracket: ( parameter: _account_uuid data_type: keyword: UUID end_bracket: ) - function_definition: keyword: AS quoted_literal: "$$\nBEGIN\n RETURN;\nEND;\n$$" language_clause: keyword: LANGUAGE naked_identifier: plpgsql - statement_terminator: ; - statement: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - function_name: function_name_identifier: insert_data - function_parameter_list: bracketed: - start_bracket: ( - parameter: a - data_type: keyword: integer - comma: ',' - parameter: b - data_type: keyword: integer - end_bracket: ) - function_definition: language_clause: keyword: LANGUAGE naked_identifier: SQL keyword: AS quoted_literal: "$$\nINSERT INTO tbl VALUES (a);\nINSERT INTO tbl VALUES (b);\n\ $$" - statement_terminator: ; - statement: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - function_name: naked_identifier: abc dot: . function_name_identifier: cdf - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - function_definition: - language_clause: keyword: LANGUAGE naked_identifier: sql - keyword: BEGIN - keyword: ATOMIC - with_compound_statement: keyword: WITH common_table_expression: naked_identifier: tbl2 keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: a - dot: . - naked_identifier: apple from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 alias_expression: naked_identifier: a end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: apple from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl2 alias_expression: naked_identifier: t - statement_terminator: ; - keyword: END - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_publication.sql000066400000000000000000000030721503426445100262130ustar00rootroot00000000000000CREATE PUBLICATION abc; CREATE PUBLICATION abc FOR ALL TABLES; CREATE PUBLICATION abc FOR TABLE def; CREATE PUBLICATION abc FOR TABLE def, sch.ghi; CREATE PUBLICATION abc FOR TABLE def, TABLE sch.ghi; CREATE PUBLICATION abc FOR TABLE def*; CREATE PUBLICATION abc FOR TABLE a, TABLE aa, ab, ac, TABLE ONLY b, TABLE c*, TABLE ca*, cb*, TABLE ONLY (d), TABLE e (col1), TABLE f (col2, col3), TABLE g* (col4, col5), TABLE h WHERE (col6 > col7), TABLE i (col8, col9) WHERE (col10 > col11), TABLES IN SCHEMA j, TABLES IN SCHEMA k, TABLES IN SCHEMA CURRENT_SCHEMA, l, m, TABLES IN SCHEMA n, o, p; CREATE PUBLICATION abc FOR TABLE a, b WITH (publish = 'insert,update', publish_via_partition_root = TRUE); CREATE PUBLICATION abc FOR TABLE a, b WITH (publish_via_partition_root = TRUE); CREATE PUBLICATION abc FOR TABLE a, b WITH (publish = 'insert,update'); CREATE PUBLICATION abc WITH (publish = 'insert,update'); -- examples from https://www.postgresql.org/docs/15/sql-createpublication.html CREATE PUBLICATION mypublication FOR TABLE users, departments; CREATE PUBLICATION active_departments FOR TABLE departments WHERE (active IS TRUE); CREATE PUBLICATION alltables FOR ALL TABLES; CREATE PUBLICATION insert_only FOR TABLE mydata WITH (publish = 'insert'); CREATE PUBLICATION production_publication FOR TABLE users, departments, TABLES IN SCHEMA production; CREATE PUBLICATION sales_publication FOR TABLES IN SCHEMA marketing, sales; CREATE PUBLICATION users_filtered FOR TABLE users (user_id, firstname); sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_publication.yml000066400000000000000000000335421503426445100262220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6e1270cb4f018e96c5d840c29e7574e725d6959c44388f68cee92557afded9f0 file: - statement: create_publication_statement: - keyword: CREATE - keyword: PUBLICATION - publication_reference: naked_identifier: abc - statement_terminator: ; - statement: create_publication_statement: - keyword: CREATE - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: FOR - keyword: ALL - keyword: TABLES - statement_terminator: ; - statement: create_publication_statement: - keyword: CREATE - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: FOR - publication_objects: keyword: TABLE publication_table: table_reference: naked_identifier: def - statement_terminator: ; - statement: create_publication_statement: - keyword: CREATE - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: FOR - publication_objects: - keyword: TABLE - publication_table: table_reference: naked_identifier: def - comma: ',' - publication_table: table_reference: - naked_identifier: sch - dot: . - naked_identifier: ghi - statement_terminator: ; - statement: create_publication_statement: - keyword: CREATE - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: FOR - publication_objects: keyword: TABLE publication_table: table_reference: naked_identifier: def - comma: ',' - publication_objects: keyword: TABLE publication_table: table_reference: - naked_identifier: sch - dot: . - naked_identifier: ghi - statement_terminator: ; - statement: create_publication_statement: - keyword: CREATE - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: FOR - publication_objects: keyword: TABLE publication_table: table_reference: naked_identifier: def star: '*' - statement_terminator: ; - statement: create_publication_statement: - keyword: CREATE - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: FOR - publication_objects: keyword: TABLE publication_table: table_reference: naked_identifier: a - comma: ',' - publication_objects: - keyword: TABLE - publication_table: table_reference: naked_identifier: aa - comma: ',' - publication_table: table_reference: naked_identifier: ab - comma: ',' - publication_table: table_reference: naked_identifier: ac - comma: ',' - publication_objects: keyword: TABLE publication_table: keyword: ONLY table_reference: naked_identifier: b - comma: ',' - publication_objects: keyword: TABLE publication_table: table_reference: naked_identifier: c star: '*' - comma: ',' - publication_objects: - keyword: TABLE - publication_table: table_reference: naked_identifier: ca star: '*' - comma: ',' - publication_table: table_reference: naked_identifier: cb star: '*' - comma: ',' - publication_objects: keyword: TABLE publication_table: keyword: ONLY bracketed: start_bracket: ( table_reference: naked_identifier: d end_bracket: ) - comma: ',' - publication_objects: keyword: TABLE publication_table: table_reference: naked_identifier: e bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - comma: ',' - publication_objects: keyword: TABLE publication_table: table_reference: naked_identifier: f bracketed: - start_bracket: ( - column_reference: naked_identifier: col2 - comma: ',' - column_reference: naked_identifier: col3 - end_bracket: ) - comma: ',' - publication_objects: keyword: TABLE publication_table: table_reference: naked_identifier: g star: '*' bracketed: - start_bracket: ( - column_reference: naked_identifier: col4 - comma: ',' - column_reference: naked_identifier: col5 - end_bracket: ) - comma: ',' - publication_objects: keyword: TABLE publication_table: table_reference: naked_identifier: h keyword: WHERE bracketed: start_bracket: ( expression: - column_reference: naked_identifier: col6 - comparison_operator: raw_comparison_operator: '>' - column_reference: naked_identifier: col7 end_bracket: ) - comma: ',' - publication_objects: keyword: TABLE publication_table: - table_reference: naked_identifier: i - bracketed: - start_bracket: ( - column_reference: naked_identifier: col8 - comma: ',' - column_reference: naked_identifier: col9 - end_bracket: ) - keyword: WHERE - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: col10 - comparison_operator: raw_comparison_operator: '>' - column_reference: naked_identifier: col11 end_bracket: ) - comma: ',' - publication_objects: - keyword: TABLES - keyword: IN - keyword: SCHEMA - schema_reference: naked_identifier: j - comma: ',' - publication_objects: - keyword: TABLES - keyword: IN - keyword: SCHEMA - schema_reference: naked_identifier: k - comma: ',' - publication_objects: - keyword: TABLES - keyword: IN - keyword: SCHEMA - keyword: CURRENT_SCHEMA - comma: ',' - schema_reference: naked_identifier: l - comma: ',' - schema_reference: naked_identifier: m - comma: ',' - publication_objects: - keyword: TABLES - keyword: IN - keyword: SCHEMA - schema_reference: naked_identifier: n - comma: ',' - schema_reference: naked_identifier: o - comma: ',' - schema_reference: naked_identifier: p - statement_terminator: ; - statement: create_publication_statement: - keyword: CREATE - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: FOR - publication_objects: - keyword: TABLE - publication_table: table_reference: naked_identifier: a - comma: ',' - publication_table: table_reference: naked_identifier: b - keyword: WITH - definition_parameters: bracketed: - start_bracket: ( - definition_parameter: properties_naked_identifier: publish comparison_operator: raw_comparison_operator: '=' quoted_literal: "'insert,update'" - comma: ',' - definition_parameter: properties_naked_identifier: publish_via_partition_root comparison_operator: raw_comparison_operator: '=' boolean_literal: 'TRUE' - end_bracket: ) - statement_terminator: ; - statement: create_publication_statement: - keyword: CREATE - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: FOR - publication_objects: - keyword: TABLE - publication_table: table_reference: naked_identifier: a - comma: ',' - publication_table: table_reference: naked_identifier: b - keyword: WITH - definition_parameters: bracketed: start_bracket: ( definition_parameter: properties_naked_identifier: publish_via_partition_root comparison_operator: raw_comparison_operator: '=' boolean_literal: 'TRUE' end_bracket: ) - statement_terminator: ; - statement: create_publication_statement: - keyword: CREATE - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: FOR - publication_objects: - keyword: TABLE - publication_table: table_reference: naked_identifier: a - comma: ',' - publication_table: table_reference: naked_identifier: b - keyword: WITH - definition_parameters: bracketed: start_bracket: ( definition_parameter: properties_naked_identifier: publish comparison_operator: raw_comparison_operator: '=' quoted_literal: "'insert,update'" end_bracket: ) - statement_terminator: ; - statement: create_publication_statement: - keyword: CREATE - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: WITH - definition_parameters: bracketed: start_bracket: ( definition_parameter: properties_naked_identifier: publish comparison_operator: raw_comparison_operator: '=' quoted_literal: "'insert,update'" end_bracket: ) - statement_terminator: ; - statement: create_publication_statement: - keyword: CREATE - keyword: PUBLICATION - publication_reference: naked_identifier: mypublication - keyword: FOR - publication_objects: - keyword: TABLE - publication_table: table_reference: naked_identifier: users - comma: ',' - publication_table: table_reference: naked_identifier: departments - statement_terminator: ; - statement: create_publication_statement: - keyword: CREATE - keyword: PUBLICATION - publication_reference: naked_identifier: active_departments - keyword: FOR - publication_objects: keyword: TABLE publication_table: table_reference: naked_identifier: departments keyword: WHERE bracketed: start_bracket: ( expression: column_reference: naked_identifier: active keyword: IS boolean_literal: 'TRUE' end_bracket: ) - statement_terminator: ; - statement: create_publication_statement: - keyword: CREATE - keyword: PUBLICATION - publication_reference: naked_identifier: alltables - keyword: FOR - keyword: ALL - keyword: TABLES - statement_terminator: ; - statement: create_publication_statement: - keyword: CREATE - keyword: PUBLICATION - publication_reference: naked_identifier: insert_only - keyword: FOR - publication_objects: keyword: TABLE publication_table: table_reference: naked_identifier: mydata - keyword: WITH - definition_parameters: bracketed: start_bracket: ( definition_parameter: properties_naked_identifier: publish comparison_operator: raw_comparison_operator: '=' quoted_literal: "'insert'" end_bracket: ) - statement_terminator: ; - statement: create_publication_statement: - keyword: CREATE - keyword: PUBLICATION - publication_reference: naked_identifier: production_publication - keyword: FOR - publication_objects: - keyword: TABLE - publication_table: table_reference: naked_identifier: users - comma: ',' - publication_table: table_reference: naked_identifier: departments - comma: ',' - publication_objects: - keyword: TABLES - keyword: IN - keyword: SCHEMA - schema_reference: naked_identifier: production - statement_terminator: ; - statement: create_publication_statement: - keyword: CREATE - keyword: PUBLICATION - publication_reference: naked_identifier: sales_publication - keyword: FOR - publication_objects: - keyword: TABLES - keyword: IN - keyword: SCHEMA - schema_reference: naked_identifier: marketing - comma: ',' - schema_reference: naked_identifier: sales - statement_terminator: ; - statement: create_publication_statement: - keyword: CREATE - keyword: PUBLICATION - publication_reference: naked_identifier: users_filtered - keyword: FOR - publication_objects: keyword: TABLE publication_table: table_reference: naked_identifier: users bracketed: - start_bracket: ( - column_reference: naked_identifier: user_id - comma: ',' - column_reference: naked_identifier: firstname - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_role.sql000066400000000000000000000015511503426445100246430ustar00rootroot00000000000000CREATE USER foo_role WITH SUPERUSER NOLOGIN REPLICATION IN GROUP foo_group; CREATE USER foo_role; CREATE USER frank WITH CONNECTION LIMIT 1; CREATE USER frank WITH IN ROLE frank; CREATE USER frank WITH SUPERUSER CREATEDB CREATEROLE; CREATE USER frank WITH INHERIT LOGIN REPLICATION BYPASSRLS; CREATE USER davide WITH PASSWORD 'jw8s0F4'; CREATE USER miriam WITH LOGIN PASSWORD 'jw8s0F4' VALID UNTIL '2005-01-01'; CREATE USER test WITH ENCRYPTED PASSWORD 'test'; CREATE ROLE foo_role WITH SUPERUSER NOLOGIN REPLICATION IN GROUP foo_group; CREATE ROLE foo_role; CREATE ROLE frank WITH CONNECTION LIMIT 1; CREATE ROLE frank WITH IN ROLE frank; CREATE ROLE frank WITH SUPERUSER CREATEDB CREATEROLE; CREATE ROLE frank WITH INHERIT LOGIN REPLICATION BYPASSRLS; CREATE ROLE davide WITH PASSWORD 'jw8s0F4'; CREATE ROLE miriam WITH LOGIN PASSWORD 'jw8s0F4' VALID UNTIL '2005-01-01'; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_role.yml000066400000000000000000000116011503426445100246420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 162c81a447b5a349749a15d774311ec18efbe5efe1ad589c14563c12bb115ecf file: - statement: create_role_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: foo_role - keyword: WITH - keyword: SUPERUSER - keyword: NOLOGIN - keyword: REPLICATION - keyword: IN - keyword: GROUP - role_reference: naked_identifier: foo_group - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: foo_role - statement_terminator: ; - statement: create_role_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: frank - keyword: WITH - keyword: CONNECTION - keyword: LIMIT - numeric_literal: '1' - statement_terminator: ; - statement: create_role_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: frank - keyword: WITH - keyword: IN - keyword: ROLE - role_reference: naked_identifier: frank - statement_terminator: ; - statement: create_role_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: frank - keyword: WITH - keyword: SUPERUSER - keyword: CREATEDB - keyword: CREATEROLE - statement_terminator: ; - statement: create_role_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: frank - keyword: WITH - keyword: INHERIT - keyword: LOGIN - keyword: REPLICATION - keyword: BYPASSRLS - statement_terminator: ; - statement: create_role_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: davide - keyword: WITH - keyword: PASSWORD - quoted_literal: "'jw8s0F4'" - statement_terminator: ; - statement: create_role_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: miriam - keyword: WITH - keyword: LOGIN - keyword: PASSWORD - quoted_literal: "'jw8s0F4'" - keyword: VALID - keyword: UNTIL - quoted_literal: "'2005-01-01'" - statement_terminator: ; - statement: create_role_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: test - keyword: WITH - keyword: ENCRYPTED - keyword: PASSWORD - quoted_literal: "'test'" - statement_terminator: ; - statement: create_role_statement: - keyword: CREATE - keyword: ROLE - role_reference: naked_identifier: foo_role - keyword: WITH - keyword: SUPERUSER - keyword: NOLOGIN - keyword: REPLICATION - keyword: IN - keyword: GROUP - role_reference: naked_identifier: foo_group - statement_terminator: ; - statement: create_role_statement: - keyword: CREATE - keyword: ROLE - role_reference: naked_identifier: foo_role - statement_terminator: ; - statement: create_role_statement: - keyword: CREATE - keyword: ROLE - role_reference: naked_identifier: frank - keyword: WITH - keyword: CONNECTION - keyword: LIMIT - numeric_literal: '1' - statement_terminator: ; - statement: create_role_statement: - keyword: CREATE - keyword: ROLE - role_reference: naked_identifier: frank - keyword: WITH - keyword: IN - keyword: ROLE - role_reference: naked_identifier: frank - statement_terminator: ; - statement: create_role_statement: - keyword: CREATE - keyword: ROLE - role_reference: naked_identifier: frank - keyword: WITH - keyword: SUPERUSER - keyword: CREATEDB - keyword: CREATEROLE - statement_terminator: ; - statement: create_role_statement: - keyword: CREATE - keyword: ROLE - role_reference: naked_identifier: frank - keyword: WITH - keyword: INHERIT - keyword: LOGIN - keyword: REPLICATION - keyword: BYPASSRLS - statement_terminator: ; - statement: create_role_statement: - keyword: CREATE - keyword: ROLE - role_reference: naked_identifier: davide - keyword: WITH - keyword: PASSWORD - quoted_literal: "'jw8s0F4'" - statement_terminator: ; - statement: create_role_statement: - keyword: CREATE - keyword: ROLE - role_reference: naked_identifier: miriam - keyword: WITH - keyword: LOGIN - keyword: PASSWORD - quoted_literal: "'jw8s0F4'" - keyword: VALID - keyword: UNTIL - quoted_literal: "'2005-01-01'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_schema.sql000066400000000000000000000003451503426445100251420ustar00rootroot00000000000000CREATE SCHEMA asdf; CREATE SCHEMA IF NOT EXISTS asdf; CREATE SCHEMA asdf AUTHORIZATION bob; CREATE SCHEMA AUTHORIZATION bob; CREATE SCHEMA IF NOT EXISTS asdf AUTHORIZATION bob; CREATE SCHEMA IF NOT EXISTS AUTHORIZATION bob; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_schema.yml000066400000000000000000000033731503426445100251500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fb7e7b5ffc233fb6fb4275a3a828a7ec400f50bc68a49014a36be30461ccd0c7 file: - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - schema_reference: naked_identifier: asdf - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - keyword: IF - keyword: NOT - keyword: EXISTS - schema_reference: naked_identifier: asdf - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - schema_reference: naked_identifier: asdf - keyword: AUTHORIZATION - role_reference: naked_identifier: bob - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - keyword: AUTHORIZATION - role_reference: naked_identifier: bob - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - keyword: IF - keyword: NOT - keyword: EXISTS - schema_reference: naked_identifier: asdf - keyword: AUTHORIZATION - role_reference: naked_identifier: bob - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - keyword: IF - keyword: NOT - keyword: EXISTS - keyword: AUTHORIZATION - role_reference: naked_identifier: bob - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_sequence.sql000066400000000000000000000007541503426445100255160ustar00rootroot00000000000000CREATE SEQUENCE foo; CREATE SEQUENCE foo AS integer; CREATE SEQUENCE foo INCREMENT BY 3; CREATE SEQUENCE foo MINVALUE 5 NO MAXVALUE; CREATE SEQUENCE foo NO MINVALUE MAXVALUE 12; CREATE SEQUENCE foo INCREMENT 5 START WITH 8 CACHE 4; CREATE SEQUENCE foo NO CYCLE; CREATE SEQUENCE foo OWNED BY NONE; CREATE SEQUENCE foo OWNED BY my_table.my_column; CREATE TEMP SEQUENCE IF NOT EXISTS foo; CREATE TEMPORARY SEQUENCE foo; CREATE SEQUENCE derp INCREMENT BY -5 MINVALUE +50 MAXVALUE -5;; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_sequence.yml000066400000000000000000000102251503426445100255120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a85996eb46a2024fb4d0ea8adbb883763b1842165a6ccde742dd7eb24de494b3 file: - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - create_sequence_options_segment: keyword: AS data_type: keyword: integer - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - create_sequence_options_segment: - keyword: INCREMENT - keyword: BY - numeric_literal: '3' - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - create_sequence_options_segment: keyword: MINVALUE numeric_literal: '5' - create_sequence_options_segment: - keyword: 'NO' - keyword: MAXVALUE - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - create_sequence_options_segment: - keyword: 'NO' - keyword: MINVALUE - create_sequence_options_segment: keyword: MAXVALUE numeric_literal: '12' - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - create_sequence_options_segment: keyword: INCREMENT numeric_literal: '5' - create_sequence_options_segment: - keyword: START - keyword: WITH - numeric_literal: '8' - create_sequence_options_segment: keyword: CACHE numeric_literal: '4' - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - create_sequence_options_segment: - keyword: 'NO' - keyword: CYCLE - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - create_sequence_options_segment: - keyword: OWNED - keyword: BY - keyword: NONE - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - create_sequence_options_segment: - keyword: OWNED - keyword: BY - column_reference: - naked_identifier: my_table - dot: . - naked_identifier: my_column - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: TEMP - keyword: SEQUENCE - keyword: IF - keyword: NOT - keyword: EXISTS - sequence_reference: naked_identifier: foo - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: derp - create_sequence_options_segment: - keyword: INCREMENT - keyword: BY - sign_indicator: '-' - numeric_literal: '5' - create_sequence_options_segment: keyword: MINVALUE sign_indicator: + numeric_literal: '50' - create_sequence_options_segment: keyword: MAXVALUE sign_indicator: '-' numeric_literal: '5' - statement_terminator: ; - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_server.sql000066400000000000000000000004631503426445100252110ustar00rootroot00000000000000CREATE SERVER test FOREIGN DATA WRAPPER postgres_fdw; CREATE SERVER IF NOT EXISTS test FOREIGN DATA WRAPPER oracle_fdw; CREATE SERVER test TYPE 'test' VERSION '1.0' FOREIGN DATA WRAPPER postgres_fdw; CREATE SERVER test FOREIGN DATA WRAPPER postgres_fdw OPTIONS (host 'foo', dbname 'foodb', port '5432'); sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_server.yml000066400000000000000000000040631503426445100252130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7754ff4e3a7fbc34a35af1df6c1417baac7d74ca5e946b8d99cd7d6dff991a4b file: - statement: create_server_statement: - keyword: CREATE - keyword: SERVER - server_reference: naked_identifier: test - keyword: FOREIGN - keyword: DATA - keyword: WRAPPER - object_reference: naked_identifier: postgres_fdw - statement_terminator: ; - statement: create_server_statement: - keyword: CREATE - keyword: SERVER - keyword: IF - keyword: NOT - keyword: EXISTS - server_reference: naked_identifier: test - keyword: FOREIGN - keyword: DATA - keyword: WRAPPER - object_reference: naked_identifier: oracle_fdw - statement_terminator: ; - statement: create_server_statement: - keyword: CREATE - keyword: SERVER - server_reference: naked_identifier: test - keyword: TYPE - quoted_literal: "'test'" - keyword: VERSION - version_identifier: quoted_literal: "'1.0'" - keyword: FOREIGN - keyword: DATA - keyword: WRAPPER - object_reference: naked_identifier: postgres_fdw - statement_terminator: ; - statement: create_server_statement: - keyword: CREATE - keyword: SERVER - server_reference: naked_identifier: test - keyword: FOREIGN - keyword: DATA - keyword: WRAPPER - object_reference: naked_identifier: postgres_fdw - keyword: OPTIONS - bracketed: - start_bracket: ( - naked_identifier_all: host - quoted_literal: "'foo'" - comma: ',' - naked_identifier_all: dbname - quoted_literal: "'foodb'" - comma: ',' - naked_identifier_all: port - quoted_literal: "'5432'" - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_statistics.sql000066400000000000000000000004311503426445100260700ustar00rootroot00000000000000CREATE STATISTICS s3 (ndistinct) ON date_trunc('month', a), date_trunc('day', a) FROM t3; CREATE STATISTICS my_statistic (dependencies) ON foo, bar FROM baz; CREATE STATISTICS IF NOT EXISTS s3 (ndistinct, mcv, dependencies) ON date_trunc('month', a), date_trunc('day', a) FROM t3; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_statistics.yml000066400000000000000000000065661503426445100261110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 246fe75508ab66a1f76e7d1699f023b674c072ffb372bb09bb733a27565958e6 file: - statement: create_statistics_statement: - keyword: CREATE - keyword: STATISTICS - statistics_reference: naked_identifier: s3 - bracketed: start_bracket: ( keyword: ndistinct end_bracket: ) - keyword: 'ON' - expression: function: function_name: function_name_identifier: date_trunc function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'month'" - comma: ',' - expression: column_reference: naked_identifier: a - end_bracket: ) - comma: ',' - expression: function: function_name: function_name_identifier: date_trunc function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'day'" - comma: ',' - expression: column_reference: naked_identifier: a - end_bracket: ) - keyword: FROM - table_reference: naked_identifier: t3 - statement_terminator: ; - statement: create_statistics_statement: - keyword: CREATE - keyword: STATISTICS - statistics_reference: naked_identifier: my_statistic - bracketed: start_bracket: ( keyword: dependencies end_bracket: ) - keyword: 'ON' - column_reference: naked_identifier: foo - comma: ',' - column_reference: naked_identifier: bar - keyword: FROM - table_reference: naked_identifier: baz - statement_terminator: ; - statement: create_statistics_statement: - keyword: CREATE - keyword: STATISTICS - keyword: IF - keyword: NOT - keyword: EXISTS - statistics_reference: naked_identifier: s3 - bracketed: - start_bracket: ( - keyword: ndistinct - comma: ',' - keyword: mcv - comma: ',' - keyword: dependencies - end_bracket: ) - keyword: 'ON' - expression: function: function_name: function_name_identifier: date_trunc function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'month'" - comma: ',' - expression: column_reference: naked_identifier: a - end_bracket: ) - comma: ',' - expression: function: function_name: function_name_identifier: date_trunc function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'day'" - comma: ',' - expression: column_reference: naked_identifier: a - end_bracket: ) - keyword: FROM - table_reference: naked_identifier: t3 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_subscription.sql000066400000000000000000000010101503426445100264140ustar00rootroot00000000000000CREATE SUBSCRIPTION my_subscription CONNECTION 'publishers_uri' PUBLICATION my_publication WITH ( binary = true, copy_data = true, create_slot = true, run_as_owner = false, slot_name = 'my_slot_name', streaming = 'parallel' ); CREATE SUBSCRIPTION mysub CONNECTION 'host=192.168.1.50 port=5432 user=foo dbname=foodb' PUBLICATION mypublication, insert_only; CREATE SUBSCRIPTION mysub CONNECTION 'host=192.168.1.50 port=5432 user=foo dbname=foodb' PUBLICATION insert_only WITH (enabled = false); sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_subscription.yml000066400000000000000000000063441503426445100264350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a75275444ffaddd17351f61933b8914d8fb837cc111032d2ee00bb2d40920a86 file: - statement: create_subscription: - keyword: CREATE - keyword: SUBSCRIPTION - subscription_reference: naked_identifier: my_subscription - keyword: CONNECTION - quoted_literal: "'publishers_uri'" - keyword: PUBLICATION - publication_reference: naked_identifier: my_publication - keyword: WITH - definition_parameters: bracketed: - start_bracket: ( - definition_parameter: properties_naked_identifier: binary comparison_operator: raw_comparison_operator: '=' boolean_literal: 'true' - comma: ',' - definition_parameter: properties_naked_identifier: copy_data comparison_operator: raw_comparison_operator: '=' boolean_literal: 'true' - comma: ',' - definition_parameter: properties_naked_identifier: create_slot comparison_operator: raw_comparison_operator: '=' boolean_literal: 'true' - comma: ',' - definition_parameter: properties_naked_identifier: run_as_owner comparison_operator: raw_comparison_operator: '=' boolean_literal: 'false' - comma: ',' - definition_parameter: properties_naked_identifier: slot_name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'my_slot_name'" - comma: ',' - definition_parameter: properties_naked_identifier: streaming comparison_operator: raw_comparison_operator: '=' quoted_literal: "'parallel'" - end_bracket: ) - statement_terminator: ; - statement: create_subscription: - keyword: CREATE - keyword: SUBSCRIPTION - subscription_reference: naked_identifier: mysub - keyword: CONNECTION - quoted_literal: "'host=192.168.1.50 port=5432 user=foo dbname=foodb'" - keyword: PUBLICATION - publication_reference: naked_identifier: mypublication - comma: ',' - publication_reference: naked_identifier: insert_only - statement_terminator: ; - statement: create_subscription: - keyword: CREATE - keyword: SUBSCRIPTION - subscription_reference: naked_identifier: mysub - keyword: CONNECTION - quoted_literal: "'host=192.168.1.50 port=5432 user=foo dbname=foodb'" - keyword: PUBLICATION - publication_reference: naked_identifier: insert_only - keyword: WITH - definition_parameters: bracketed: start_bracket: ( definition_parameter: properties_naked_identifier: enabled comparison_operator: raw_comparison_operator: '=' boolean_literal: 'false' end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_table.sql000066400000000000000000000233721503426445100247760ustar00rootroot00000000000000-- Test qualifying datatype with schema CREATE TABLE counters ( my_type public.MY_TYPE ); --CREATE TABLE films ( -- code char(5) CONSTRAINT firstkey PRIMARY KEY, -- title varchar(40) NOT NULL, -- did integer NOT NULL, -- date_prod date, -- kind varchar(10), -- len interval hour to minute --); CREATE TABLE distributors ( did integer PRIMARY KEY GENERATED BY DEFAULT AS IDENTITY, name varchar(40) NOT NULL CHECK (name <> '') ); CREATE TABLE array_int ( vector int[][] ); --CREATE TABLE films ( -- code char(5), -- title varchar(40), -- did integer, -- date_prod date, -- kind varchar(10), -- len interval hour to minute, -- CONSTRAINT production UNIQUE(date_prod) --); CREATE TABLE distributors ( did integer CHECK (did > 100), name varchar(40), long_varying char varying(100) ); CREATE TABLE distributors ( did integer, name varchar(40), CONSTRAINT con1 CHECK (did > 100 AND name <> '') ); --CREATE TABLE films ( -- code char(5), -- title varchar(40), -- did integer, -- date_prod date, -- kind varchar(10), -- len interval hour to minute, -- CONSTRAINT code_title PRIMARY KEY(code,title) --); CREATE TABLE distributors ( did integer, name varchar(40), PRIMARY KEY(did) ); CREATE TABLE distributors ( did integer PRIMARY KEY, name varchar(40) ); CREATE TABLE distributors ( name varchar(40) DEFAULT 'Luso Films', did integer DEFAULT nextval('distributors_serial'), modtime timestamp DEFAULT current_timestamp ); CREATE TABLE distributors ( did integer CONSTRAINT no_null NOT NULL, name varchar(40) NOT NULL ); CREATE TABLE distributors ( did integer, name varchar(40) UNIQUE ); CREATE TABLE distributors ( did integer, name varchar(40), UNIQUE(name) ); CREATE TABLE distributors ( did integer, name varchar(40), UNIQUE(name) WITH (fillfactor=70) ) WITH (fillfactor=70); --CREATE TABLE circles ( -- c circle, -- EXCLUDE USING gist (c WITH &&) --); CREATE TABLE cinemas ( id serial, name text, location text ) TABLESPACE diskvol1; CREATE TYPE employee_type AS (name text, salary numeric); CREATE TABLE employees OF employee_type ( PRIMARY KEY (name), salary WITH OPTIONS DEFAULT 1000 ); CREATE TABLE measurement ( logdate date not null, peaktemp int, unitsales int ) PARTITION BY RANGE (logdate); CREATE TABLE measurement_year_month ( logdate date not null, peaktemp int, unitsales int ) PARTITION BY RANGE (EXTRACT(YEAR FROM logdate), EXTRACT(MONTH FROM logdate)); CREATE TABLE cities ( city_id bigserial not null, name text not null, population bigint ) PARTITION BY LIST (left(lower(name), 1)); CREATE TABLE orders ( order_id bigint not null, cust_id bigint not null, status text ) PARTITION BY HASH (order_id); CREATE TABLE measurement_y2016m07 PARTITION OF measurement ( unitsales DEFAULT 0 ) FOR VALUES FROM ('2016-07-01') TO ('2016-08-01'); CREATE TABLE measurement_ym_older PARTITION OF measurement_year_month FOR VALUES FROM (MINVALUE, MINVALUE) TO (2016, 11); CREATE TABLE measurement_ym_y2016m11 PARTITION OF measurement_year_month FOR VALUES FROM (2016, 11) TO (2016, 12); CREATE TABLE measurement_ym_y2016m12 PARTITION OF measurement_year_month FOR VALUES FROM (2016, 12) TO (2017, 01); CREATE TABLE measurement_ym_y2017m01 PARTITION OF measurement_year_month FOR VALUES FROM (2017, 01) TO (2017, 02); CREATE TABLE cities_ab PARTITION OF cities ( CONSTRAINT city_id_nonzero CHECK (city_id != 0) ) FOR VALUES IN ('a', 'b'); CREATE TABLE cities_ab PARTITION OF cities ( CONSTRAINT city_id_nonzero CHECK (city_id != 0) ) FOR VALUES IN ('a', 'b') PARTITION BY RANGE (population); CREATE TABLE cities_ab_10000_to_100000 PARTITION OF cities_ab FOR VALUES FROM (10000) TO (100000); CREATE TABLE orders_p1 PARTITION OF orders FOR VALUES WITH (MODULUS 4, REMAINDER 0); CREATE TABLE orders_p2 PARTITION OF orders FOR VALUES WITH (MODULUS 4, REMAINDER 1); CREATE TABLE orders_p3 PARTITION OF orders FOR VALUES WITH (MODULUS 4, REMAINDER 2); CREATE TABLE orders_p4 PARTITION OF orders FOR VALUES WITH (MODULUS 4, REMAINDER 3); CREATE TABLE cities_partdef PARTITION OF cities DEFAULT; CREATE UNLOGGED TABLE staging ( event_type INTEGER , event_time TIMESTAMP , user_email VARCHAR , phone_number VARCHAR , processing_date DATE , PRIMARY KEY (event_type, event_time, user_email, phone_number, processing_date) ); CREATE TABLE measurement ( city_id int NOT NULL, logdate date NOT NULL, peaktemp int, unitsales int ) PARTITION BY RANGE (logdate); CREATE TABLE public.public ( id serial NOT NULL, name text NOT NULL, group_name text NULL, cluster_id int8 NULL, date_created timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, date_updated timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, operation_id int4 NOT NULL DEFAULT '-1'::integer ); CREATE TABLE main.test_table ( "col1" character varying(40) NOT NULL, "col2" double precision ); CREATE TABLE groups ( group_id INTEGER PRIMARY KEY generated BY DEFAULT AS IDENTITY ); CREATE TABLE users ( user_id INTEGER PRIMARY KEY generated BY DEFAULT AS IDENTITY, group_id INTEGER REFERENCES groups (group_id) ON DELETE CASCADE, domain_id INTEGER REFERENCES groups (group_id) ON UPDATE RESTRICT, other_id INTEGER REFERENCES groups (group_id) MATCH SIMPLE ); CREATE TABLE orders ( id bigint NOT NULL DEFAULT NEXTVAL('orders_id_seq'::regclass), constraint_collate_constraints text UNIQUE COLLATE numeric NOT NULL PRIMARY KEY, constraints_collate text NOT NULL UNIQUE COLLATE numeric, collate_constraints text COLLATE numeric NOT NULL UNIQUE, nulls_distinct text UNIQUE NULLS DISTINCT, nulls_not_distinct text UNIQUE NULLS NOT DISTINCT, everything text UNIQUE NULLS DISTINCT WITH (arg1=3, arg5='str') USING INDEX TABLESPACE tblspace COLLATE numeric ); CREATE TABLE primary_key_options ( everything int PRIMARY KEY WITH (arg1=3, arg5='str') USING INDEX TABLESPACE tblspace NOT NULL ); -- Use non-reserved `usage` word as a table identifier CREATE TABLE IF NOT EXISTS quotas.usage(foo int); -- Use non-reserved `usage` word as a column identifier CREATE TABLE IF NOT EXISTS quotas.my_table(usage int); -- NOT NULL both before and after a default constraint CREATE TABLE with_constraints1 ( col_1 boolean NOT NULL DEFAULT false ); CREATE TABLE with_constraints2 ( col_1 boolean DEFAULT false NOT NULL ); -- default constraint expression CREATE TABLE with_constraints3 ( col_1 int DEFAULT (1 + 2) * (3 + 4) NOT NULL ); CREATE TABLE with_constraints33 ( col_1 int DEFAULT 1 + 2 * 3 + 4 NOT NULL ); CREATE TABLE with_constraints4 ( col_1 int DEFAULT (1 + 2 * 3 + 4) NOT NULL ); CREATE TABLE with_constraints5 ( col_1 bool DEFAULT (1 NOT IN (3, 4)) NOT NULL ); CREATE TABLE with_constraints6 ( col_1 bool NOT NULL DEFAULT (5 NOT IN (5, 6)) ); CREATE TABLE test_with_storage_param ( col_1 boolean ) WITH (autovacuum_enabled=true); CREATE TABLE test_with_storage_params ( col_1 boolean ) WITH (autovacuum_enabled=true, vacuum_truncate=false); CREATE TABLE tbl ( -- All forms of character data types listed at: -- https://www.postgresql.org/docs/current/datatype-character.html col_char_varying_unlimited character varying, col_char_varying_limited character varying(50), col_varchar_unlimited varchar, col_varchar_limited varchar(50), col_character_default character, col_character_specified character(50), col_char_default char, col_char_specified character(50), col_text text, -- some types you'll find in pg_catalog col_system_char "char", -- this is NOT the same as unquoted char col_name name ); -- Test out EXCLUDE constraints, as well as other more advanced index parameters on constraints -- from https://www.postgresql.org/docs/15/rangetypes.html: basic usage CREATE TABLE reservation ( during tsrange, EXCLUDE USING gist (during WITH &&) ); CREATE TABLE room_reservation ( room text, during tsrange, EXCLUDE USING gist (room WITH =, during WITH &&) ); -- all the gnarly options: not every option is valid, but this will parse successfully on PG 15. CREATE TABLE no_using ( field text, EXCLUDE (field WITH =) NOT DEFERRABLE INITIALLY IMMEDIATE NO INHERIT ); CREATE TABLE many_options ( field text, EXCLUDE USING gist ( one WITH =, nulls_opclass nulls WITH =, nulls_last NULLS LAST WITH =, two COLLATE "en-US" opclass (opt1, opt2=5, opt3='str', ns.opt4, ns.opt5=6, ns.opt6='str', opt7=ASC) ASC NULLS FIRST WITH =, (two + 5) WITH =, myfunc(a, b) WITH =, myfunc_opclass(a, b) fop (opt=1, foo=2) WITH =, only_opclass opclass WITH =, desc_order DESC WITH = ) INCLUDE (a, b) WITH (idx_num = 5, idx_str = 'idx_value', idx_kw=DESC) USING INDEX TABLESPACE tblspc WHERE (field != 'def') DEFERRABLE INITIALLY DEFERRED ); CREATE TABLE example_table () INHERITS (parent_table); CREATE TABLE IF NOT EXISTS table2( col1 int, col2 int NOT NULL, col3 int, FOREIGN KEY (col1, col2) REFERENCES table1 (col1, col2) ON DELETE SET NULL (col1) ); CREATE TABLE IF NOT EXISTS table2( col1 int, col2 int NOT NULL, col3 int, FOREIGN KEY (col1, col2) REFERENCES table1 (col1, col2) ON DELETE SET DEFAULT (col1) ); CREATE TABLE myschema.user ( user_id bigint PRIMARY KEY, name varchar(40) ); CREATE TABLE my_table ( interval bigint ); sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_table.yml000066400000000000000000002100421503426445100247700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 35d9fd35988a7ef7adf8b121860a91b2b7e45eceaf4ccc223800d9136828a9af file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: counters - bracketed: start_bracket: ( column_reference: naked_identifier: my_type data_type: naked_identifier: public dot: . data_type_identifier: MY_TYPE end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: distributors - bracketed: - start_bracket: ( - column_reference: naked_identifier: did - data_type: keyword: integer - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - column_constraint_segment: - keyword: GENERATED - keyword: BY - keyword: DEFAULT - keyword: AS - keyword: IDENTITY - comma: ',' - column_reference: naked_identifier: name - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '40' end_bracket: ) - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: CHECK bracketed: start_bracket: ( expression: column_reference: naked_identifier: name comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '>' quoted_literal: "''" end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: array_int - bracketed: start_bracket: ( column_reference: naked_identifier: vector data_type: - keyword: int - start_square_bracket: '[' - end_square_bracket: ']' - start_square_bracket: '[' - end_square_bracket: ']' end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: distributors - bracketed: - start_bracket: ( - column_reference: naked_identifier: did - data_type: keyword: integer - column_constraint_segment: keyword: CHECK bracketed: start_bracket: ( expression: column_reference: naked_identifier: did comparison_operator: raw_comparison_operator: '>' numeric_literal: '100' end_bracket: ) - comma: ',' - column_reference: naked_identifier: name - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '40' end_bracket: ) - comma: ',' - column_reference: naked_identifier: long_varying - data_type: - keyword: char - keyword: varying - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '100' end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: distributors - bracketed: - start_bracket: ( - column_reference: naked_identifier: did - data_type: keyword: integer - comma: ',' - column_reference: naked_identifier: name - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '40' end_bracket: ) - comma: ',' - table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: con1 - keyword: CHECK - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: did - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '100' - binary_operator: AND - column_reference: naked_identifier: name - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '>' - quoted_literal: "''" end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: distributors - bracketed: - start_bracket: ( - column_reference: naked_identifier: did - data_type: keyword: integer - comma: ',' - column_reference: naked_identifier: name - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '40' end_bracket: ) - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: did end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: distributors - bracketed: - start_bracket: ( - column_reference: naked_identifier: did - data_type: keyword: integer - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_reference: naked_identifier: name - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '40' end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: distributors - bracketed: - start_bracket: ( - column_reference: naked_identifier: name - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '40' end_bracket: ) - column_constraint_segment: keyword: DEFAULT quoted_literal: "'Luso Films'" - comma: ',' - column_reference: naked_identifier: did - data_type: keyword: integer - column_constraint_segment: keyword: DEFAULT function: function_name: function_name_identifier: nextval function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'distributors_serial'" end_bracket: ) - comma: ',' - column_reference: naked_identifier: modtime - data_type: datetime_type_identifier: keyword: timestamp - column_constraint_segment: keyword: DEFAULT bare_function: current_timestamp - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: distributors - bracketed: - start_bracket: ( - column_reference: naked_identifier: did - data_type: keyword: integer - column_constraint_segment: - keyword: CONSTRAINT - object_reference: naked_identifier: no_null - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: name - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '40' end_bracket: ) - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: distributors - bracketed: - start_bracket: ( - column_reference: naked_identifier: did - data_type: keyword: integer - comma: ',' - column_reference: naked_identifier: name - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '40' end_bracket: ) - column_constraint_segment: keyword: UNIQUE - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: distributors - bracketed: - start_bracket: ( - column_reference: naked_identifier: did - data_type: keyword: integer - comma: ',' - column_reference: naked_identifier: name - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '40' end_bracket: ) - comma: ',' - table_constraint: keyword: UNIQUE bracketed: start_bracket: ( column_reference: naked_identifier: name end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: distributors - bracketed: - start_bracket: ( - column_reference: naked_identifier: did - data_type: keyword: integer - comma: ',' - column_reference: naked_identifier: name - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '40' end_bracket: ) - comma: ',' - table_constraint: keyword: UNIQUE bracketed: start_bracket: ( column_reference: naked_identifier: name end_bracket: ) index_parameters: keyword: WITH definition_parameters: bracketed: start_bracket: ( definition_parameter: properties_naked_identifier: fillfactor comparison_operator: raw_comparison_operator: '=' numeric_literal: '70' end_bracket: ) - end_bracket: ) - keyword: WITH - relation_options: bracketed: start_bracket: ( relation_option: properties_naked_identifier: fillfactor comparison_operator: raw_comparison_operator: '=' numeric_literal: '70' end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: cinemas - bracketed: - start_bracket: ( - column_reference: naked_identifier: id - data_type: keyword: serial - comma: ',' - column_reference: naked_identifier: name - data_type: keyword: text - comma: ',' - column_reference: naked_identifier: location - data_type: keyword: text - end_bracket: ) - keyword: TABLESPACE - tablespace_reference: naked_identifier: diskvol1 - statement_terminator: ; - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - object_reference: naked_identifier: employee_type - keyword: AS - bracketed: - start_bracket: ( - word: name - word: text - comma: ',' - word: salary - word: numeric - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: employees - keyword: OF - parameter: employee_type - bracketed: - start_bracket: ( - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: name end_bracket: ) - comma: ',' - column_reference: naked_identifier: salary - keyword: WITH - keyword: OPTIONS - column_constraint_segment: keyword: DEFAULT numeric_literal: '1000' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: measurement - bracketed: - start_bracket: ( - column_reference: naked_identifier: logdate - data_type: datetime_type_identifier: keyword: date - column_constraint_segment: - keyword: not - keyword: 'null' - comma: ',' - column_reference: naked_identifier: peaktemp - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: unitsales - data_type: keyword: int - end_bracket: ) - keyword: PARTITION - keyword: BY - keyword: RANGE - bracketed: start_bracket: ( column_reference: naked_identifier: logdate end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: measurement_year_month - bracketed: - start_bracket: ( - column_reference: naked_identifier: logdate - data_type: datetime_type_identifier: keyword: date - column_constraint_segment: - keyword: not - keyword: 'null' - comma: ',' - column_reference: naked_identifier: peaktemp - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: unitsales - data_type: keyword: int - end_bracket: ) - keyword: PARTITION - keyword: BY - keyword: RANGE - bracketed: - start_bracket: ( - function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: YEAR keyword: FROM expression: column_reference: naked_identifier: logdate end_bracket: ) - comma: ',' - function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: MONTH keyword: FROM expression: column_reference: naked_identifier: logdate end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: cities - bracketed: - start_bracket: ( - column_reference: naked_identifier: city_id - data_type: keyword: bigserial - column_constraint_segment: - keyword: not - keyword: 'null' - comma: ',' - column_reference: naked_identifier: name - data_type: keyword: text - column_constraint_segment: - keyword: not - keyword: 'null' - comma: ',' - column_reference: naked_identifier: population - data_type: keyword: bigint - end_bracket: ) - keyword: PARTITION - keyword: BY - keyword: LIST - bracketed: start_bracket: ( function: function_name: function_name_identifier: left function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: lower function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: name end_bracket: ) - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: orders - bracketed: - start_bracket: ( - column_reference: naked_identifier: order_id - data_type: keyword: bigint - column_constraint_segment: - keyword: not - keyword: 'null' - comma: ',' - column_reference: naked_identifier: cust_id - data_type: keyword: bigint - column_constraint_segment: - keyword: not - keyword: 'null' - comma: ',' - column_reference: naked_identifier: status - data_type: keyword: text - end_bracket: ) - keyword: PARTITION - keyword: BY - keyword: HASH - bracketed: start_bracket: ( column_reference: naked_identifier: order_id end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: measurement_y2016m07 - keyword: PARTITION - keyword: OF - table_reference: naked_identifier: measurement - bracketed: start_bracket: ( column_reference: naked_identifier: unitsales column_constraint_segment: keyword: DEFAULT numeric_literal: '0' end_bracket: ) - keyword: FOR - keyword: VALUES - partition_bound_spec: - keyword: FROM - bracketed: start_bracket: ( expression: quoted_literal: "'2016-07-01'" end_bracket: ) - keyword: TO - bracketed: start_bracket: ( expression: quoted_literal: "'2016-08-01'" end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: measurement_ym_older - keyword: PARTITION - keyword: OF - table_reference: naked_identifier: measurement_year_month - keyword: FOR - keyword: VALUES - partition_bound_spec: - keyword: FROM - bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: MINVALUE - comma: ',' - expression: column_reference: naked_identifier: MINVALUE - end_bracket: ) - keyword: TO - bracketed: - start_bracket: ( - expression: numeric_literal: '2016' - comma: ',' - expression: numeric_literal: '11' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: measurement_ym_y2016m11 - keyword: PARTITION - keyword: OF - table_reference: naked_identifier: measurement_year_month - keyword: FOR - keyword: VALUES - partition_bound_spec: - keyword: FROM - bracketed: - start_bracket: ( - expression: numeric_literal: '2016' - comma: ',' - expression: numeric_literal: '11' - end_bracket: ) - keyword: TO - bracketed: - start_bracket: ( - expression: numeric_literal: '2016' - comma: ',' - expression: numeric_literal: '12' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: measurement_ym_y2016m12 - keyword: PARTITION - keyword: OF - table_reference: naked_identifier: measurement_year_month - keyword: FOR - keyword: VALUES - partition_bound_spec: - keyword: FROM - bracketed: - start_bracket: ( - expression: numeric_literal: '2016' - comma: ',' - expression: numeric_literal: '12' - end_bracket: ) - keyword: TO - bracketed: - start_bracket: ( - expression: numeric_literal: '2017' - comma: ',' - expression: numeric_literal: '01' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: measurement_ym_y2017m01 - keyword: PARTITION - keyword: OF - table_reference: naked_identifier: measurement_year_month - keyword: FOR - keyword: VALUES - partition_bound_spec: - keyword: FROM - bracketed: - start_bracket: ( - expression: numeric_literal: '2017' - comma: ',' - expression: numeric_literal: '01' - end_bracket: ) - keyword: TO - bracketed: - start_bracket: ( - expression: numeric_literal: '2017' - comma: ',' - expression: numeric_literal: '02' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: cities_ab - keyword: PARTITION - keyword: OF - table_reference: naked_identifier: cities - bracketed: start_bracket: ( table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: city_id_nonzero - keyword: CHECK - bracketed: start_bracket: ( expression: column_reference: naked_identifier: city_id comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '=' numeric_literal: '0' end_bracket: ) end_bracket: ) - keyword: FOR - keyword: VALUES - partition_bound_spec: keyword: IN bracketed: - start_bracket: ( - expression: quoted_literal: "'a'" - comma: ',' - expression: quoted_literal: "'b'" - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: cities_ab - keyword: PARTITION - keyword: OF - table_reference: naked_identifier: cities - bracketed: start_bracket: ( table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: city_id_nonzero - keyword: CHECK - bracketed: start_bracket: ( expression: column_reference: naked_identifier: city_id comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '=' numeric_literal: '0' end_bracket: ) end_bracket: ) - keyword: FOR - keyword: VALUES - partition_bound_spec: keyword: IN bracketed: - start_bracket: ( - expression: quoted_literal: "'a'" - comma: ',' - expression: quoted_literal: "'b'" - end_bracket: ) - keyword: PARTITION - keyword: BY - keyword: RANGE - bracketed: start_bracket: ( column_reference: naked_identifier: population end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: cities_ab_10000_to_100000 - keyword: PARTITION - keyword: OF - table_reference: naked_identifier: cities_ab - keyword: FOR - keyword: VALUES - partition_bound_spec: - keyword: FROM - bracketed: start_bracket: ( expression: numeric_literal: '10000' end_bracket: ) - keyword: TO - bracketed: start_bracket: ( expression: numeric_literal: '100000' end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: orders_p1 - keyword: PARTITION - keyword: OF - table_reference: naked_identifier: orders - keyword: FOR - keyword: VALUES - partition_bound_spec: keyword: WITH bracketed: - start_bracket: ( - keyword: MODULUS - numeric_literal: '4' - comma: ',' - keyword: REMAINDER - numeric_literal: '0' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: orders_p2 - keyword: PARTITION - keyword: OF - table_reference: naked_identifier: orders - keyword: FOR - keyword: VALUES - partition_bound_spec: keyword: WITH bracketed: - start_bracket: ( - keyword: MODULUS - numeric_literal: '4' - comma: ',' - keyword: REMAINDER - numeric_literal: '1' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: orders_p3 - keyword: PARTITION - keyword: OF - table_reference: naked_identifier: orders - keyword: FOR - keyword: VALUES - partition_bound_spec: keyword: WITH bracketed: - start_bracket: ( - keyword: MODULUS - numeric_literal: '4' - comma: ',' - keyword: REMAINDER - numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: orders_p4 - keyword: PARTITION - keyword: OF - table_reference: naked_identifier: orders - keyword: FOR - keyword: VALUES - partition_bound_spec: keyword: WITH bracketed: - start_bracket: ( - keyword: MODULUS - numeric_literal: '4' - comma: ',' - keyword: REMAINDER - numeric_literal: '3' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: cities_partdef - keyword: PARTITION - keyword: OF - table_reference: naked_identifier: cities - keyword: DEFAULT - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: UNLOGGED - keyword: TABLE - table_reference: naked_identifier: staging - bracketed: - start_bracket: ( - column_reference: naked_identifier: event_type - data_type: keyword: INTEGER - comma: ',' - column_reference: naked_identifier: event_time - data_type: datetime_type_identifier: keyword: TIMESTAMP - comma: ',' - column_reference: naked_identifier: user_email - data_type: keyword: VARCHAR - comma: ',' - column_reference: naked_identifier: phone_number - data_type: keyword: VARCHAR - comma: ',' - column_reference: naked_identifier: processing_date - data_type: datetime_type_identifier: keyword: DATE - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: event_type - comma: ',' - column_reference: naked_identifier: event_time - comma: ',' - column_reference: naked_identifier: user_email - comma: ',' - column_reference: naked_identifier: phone_number - comma: ',' - column_reference: naked_identifier: processing_date - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: measurement - bracketed: - start_bracket: ( - column_reference: naked_identifier: city_id - data_type: keyword: int - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: logdate - data_type: datetime_type_identifier: keyword: date - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: peaktemp - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: unitsales - data_type: keyword: int - end_bracket: ) - keyword: PARTITION - keyword: BY - keyword: RANGE - bracketed: start_bracket: ( column_reference: naked_identifier: logdate end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: public - dot: . - naked_identifier: public - bracketed: - start_bracket: ( - column_reference: naked_identifier: id - data_type: keyword: serial - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: name - data_type: keyword: text - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: group_name - data_type: keyword: text - column_constraint_segment: keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: cluster_id - data_type: keyword: int8 - column_constraint_segment: keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: date_created - data_type: datetime_type_identifier: keyword: timestamp - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: DEFAULT bare_function: CURRENT_TIMESTAMP - comma: ',' - column_reference: naked_identifier: date_updated - data_type: datetime_type_identifier: keyword: timestamp - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: DEFAULT bare_function: CURRENT_TIMESTAMP - comma: ',' - column_reference: naked_identifier: operation_id - data_type: keyword: int4 - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: DEFAULT cast_expression: quoted_literal: "'-1'" casting_operator: '::' data_type: keyword: integer - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: main - dot: . - naked_identifier: test_table - bracketed: - start_bracket: ( - column_reference: quoted_identifier: '"col1"' - data_type: - keyword: character - keyword: varying - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '40' end_bracket: ) - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: quoted_identifier: '"col2"' - data_type: - keyword: double - keyword: precision - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: groups - bracketed: - start_bracket: ( - column_reference: naked_identifier: group_id - data_type: keyword: INTEGER - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - column_constraint_segment: - keyword: generated - keyword: BY - keyword: DEFAULT - keyword: AS - keyword: IDENTITY - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: users - bracketed: - start_bracket: ( - column_reference: naked_identifier: user_id - data_type: keyword: INTEGER - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - column_constraint_segment: - keyword: generated - keyword: BY - keyword: DEFAULT - keyword: AS - keyword: IDENTITY - comma: ',' - column_reference: naked_identifier: group_id - data_type: keyword: INTEGER - column_constraint_segment: - keyword: REFERENCES - table_reference: naked_identifier: groups - bracketed: start_bracket: ( column_reference: naked_identifier: group_id end_bracket: ) - keyword: 'ON' - keyword: DELETE - keyword: CASCADE - comma: ',' - column_reference: naked_identifier: domain_id - data_type: keyword: INTEGER - column_constraint_segment: - keyword: REFERENCES - table_reference: naked_identifier: groups - bracketed: start_bracket: ( column_reference: naked_identifier: group_id end_bracket: ) - keyword: 'ON' - keyword: UPDATE - keyword: RESTRICT - comma: ',' - column_reference: naked_identifier: other_id - data_type: keyword: INTEGER - column_constraint_segment: - keyword: REFERENCES - table_reference: naked_identifier: groups - bracketed: start_bracket: ( column_reference: naked_identifier: group_id end_bracket: ) - keyword: MATCH - keyword: SIMPLE - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: orders - bracketed: - start_bracket: ( - column_reference: naked_identifier: id - data_type: keyword: bigint - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: DEFAULT function: function_name: function_name_identifier: NEXTVAL function_contents: bracketed: start_bracket: ( expression: cast_expression: quoted_literal: "'orders_id_seq'" casting_operator: '::' data_type: data_type_identifier: regclass end_bracket: ) - comma: ',' - column_reference: naked_identifier: constraint_collate_constraints - data_type: keyword: text - column_constraint_segment: keyword: UNIQUE - keyword: COLLATE - collation_reference: naked_identifier: numeric - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_reference: naked_identifier: constraints_collate - data_type: keyword: text - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: UNIQUE - keyword: COLLATE - collation_reference: naked_identifier: numeric - comma: ',' - column_reference: naked_identifier: collate_constraints - data_type: keyword: text - keyword: COLLATE - collation_reference: naked_identifier: numeric - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: UNIQUE - comma: ',' - column_reference: naked_identifier: nulls_distinct - data_type: keyword: text - column_constraint_segment: - keyword: UNIQUE - keyword: NULLS - keyword: DISTINCT - comma: ',' - column_reference: naked_identifier: nulls_not_distinct - data_type: keyword: text - column_constraint_segment: - keyword: UNIQUE - keyword: NULLS - keyword: NOT - keyword: DISTINCT - comma: ',' - column_reference: naked_identifier: everything - data_type: keyword: text - column_constraint_segment: - keyword: UNIQUE - keyword: NULLS - keyword: DISTINCT - keyword: WITH - definition_parameters: bracketed: - start_bracket: ( - definition_parameter: properties_naked_identifier: arg1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '3' - comma: ',' - definition_parameter: properties_naked_identifier: arg5 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'str'" - end_bracket: ) - keyword: USING - keyword: INDEX - keyword: TABLESPACE - tablespace_reference: naked_identifier: tblspace - keyword: COLLATE - collation_reference: naked_identifier: numeric - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: primary_key_options - bracketed: - start_bracket: ( - column_reference: naked_identifier: everything - data_type: keyword: int - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - keyword: WITH - definition_parameters: bracketed: - start_bracket: ( - definition_parameter: properties_naked_identifier: arg1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '3' - comma: ',' - definition_parameter: properties_naked_identifier: arg5 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'str'" - end_bracket: ) - keyword: USING - keyword: INDEX - keyword: TABLESPACE - tablespace_reference: naked_identifier: tblspace - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: - naked_identifier: quotas - dot: . - naked_identifier: usage - bracketed: start_bracket: ( column_reference: naked_identifier: foo data_type: keyword: int end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: - naked_identifier: quotas - dot: . - naked_identifier: my_table - bracketed: start_bracket: ( column_reference: naked_identifier: usage data_type: keyword: int end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: with_constraints1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col_1 - data_type: keyword: boolean - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: DEFAULT boolean_literal: 'false' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: with_constraints2 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col_1 - data_type: keyword: boolean - column_constraint_segment: keyword: DEFAULT boolean_literal: 'false' - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: with_constraints3 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col_1 - data_type: keyword: int - column_constraint_segment: keyword: DEFAULT expression: - bracketed: start_bracket: ( expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' end_bracket: ) - binary_operator: '*' - bracketed: start_bracket: ( expression: - numeric_literal: '3' - binary_operator: + - numeric_literal: '4' end_bracket: ) - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: with_constraints33 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col_1 - data_type: keyword: int - column_constraint_segment: keyword: DEFAULT expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - binary_operator: '*' - numeric_literal: '3' - binary_operator: + - numeric_literal: '4' - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: with_constraints4 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col_1 - data_type: keyword: int - column_constraint_segment: keyword: DEFAULT expression: bracketed: start_bracket: ( expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - binary_operator: '*' - numeric_literal: '3' - binary_operator: + - numeric_literal: '4' end_bracket: ) - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: with_constraints5 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col_1 - data_type: keyword: bool - column_constraint_segment: keyword: DEFAULT expression: bracketed: start_bracket: ( expression: - numeric_literal: '1' - keyword: NOT - keyword: IN - bracketed: - start_bracket: ( - numeric_literal: '3' - comma: ',' - numeric_literal: '4' - end_bracket: ) end_bracket: ) - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: with_constraints6 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col_1 - data_type: keyword: bool - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: DEFAULT expression: bracketed: start_bracket: ( expression: - numeric_literal: '5' - keyword: NOT - keyword: IN - bracketed: - start_bracket: ( - numeric_literal: '5' - comma: ',' - numeric_literal: '6' - end_bracket: ) end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: test_with_storage_param - bracketed: start_bracket: ( column_reference: naked_identifier: col_1 data_type: keyword: boolean end_bracket: ) - keyword: WITH - relation_options: bracketed: start_bracket: ( relation_option: properties_naked_identifier: autovacuum_enabled comparison_operator: raw_comparison_operator: '=' boolean_literal: 'true' end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: test_with_storage_params - bracketed: start_bracket: ( column_reference: naked_identifier: col_1 data_type: keyword: boolean end_bracket: ) - keyword: WITH - relation_options: bracketed: - start_bracket: ( - relation_option: properties_naked_identifier: autovacuum_enabled comparison_operator: raw_comparison_operator: '=' boolean_literal: 'true' - comma: ',' - relation_option: properties_naked_identifier: vacuum_truncate comparison_operator: raw_comparison_operator: '=' boolean_literal: 'false' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: tbl - bracketed: - start_bracket: ( - column_reference: naked_identifier: col_char_varying_unlimited - data_type: - keyword: character - keyword: varying - comma: ',' - column_reference: naked_identifier: col_char_varying_limited - data_type: - keyword: character - keyword: varying - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '50' end_bracket: ) - comma: ',' - column_reference: naked_identifier: col_varchar_unlimited - data_type: keyword: varchar - comma: ',' - column_reference: naked_identifier: col_varchar_limited - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '50' end_bracket: ) - comma: ',' - column_reference: naked_identifier: col_character_default - data_type: keyword: character - comma: ',' - column_reference: naked_identifier: col_character_specified - data_type: keyword: character bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '50' end_bracket: ) - comma: ',' - column_reference: naked_identifier: col_char_default - data_type: keyword: char - comma: ',' - column_reference: naked_identifier: col_char_specified - data_type: keyword: character bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '50' end_bracket: ) - comma: ',' - column_reference: naked_identifier: col_text - data_type: keyword: text - comma: ',' - column_reference: naked_identifier: col_system_char - data_type: quoted_identifier: '"char"' - comma: ',' - column_reference: naked_identifier: col_name - data_type: data_type_identifier: name - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: reservation - bracketed: start_bracket: ( column_reference: naked_identifier: during data_type: keyword: tsrange comma: ',' table_constraint: - keyword: EXCLUDE - keyword: USING - index_access_method: naked_identifier: gist - bracketed: start_bracket: ( exclusion_constraint_element: index_element: column_reference: naked_identifier: during keyword: WITH comparison_operator: - ampersand: '&' - ampersand: '&' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: room_reservation - bracketed: - start_bracket: ( - column_reference: naked_identifier: room - data_type: keyword: text - comma: ',' - column_reference: naked_identifier: during - data_type: keyword: tsrange - comma: ',' - table_constraint: - keyword: EXCLUDE - keyword: USING - index_access_method: naked_identifier: gist - bracketed: - start_bracket: ( - exclusion_constraint_element: index_element: column_reference: naked_identifier: room keyword: WITH comparison_operator: raw_comparison_operator: '=' - comma: ',' - exclusion_constraint_element: index_element: column_reference: naked_identifier: during keyword: WITH comparison_operator: - ampersand: '&' - ampersand: '&' - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: no_using - bracketed: start_bracket: ( column_reference: naked_identifier: field data_type: keyword: text comma: ',' table_constraint: - keyword: EXCLUDE - bracketed: start_bracket: ( exclusion_constraint_element: index_element: column_reference: naked_identifier: field keyword: WITH comparison_operator: raw_comparison_operator: '=' end_bracket: ) - keyword: NOT - keyword: DEFERRABLE - keyword: INITIALLY - keyword: IMMEDIATE - keyword: 'NO' - keyword: INHERIT end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: many_options - bracketed: start_bracket: ( column_reference: naked_identifier: field data_type: keyword: text comma: ',' table_constraint: - keyword: EXCLUDE - keyword: USING - index_access_method: naked_identifier: gist - bracketed: - start_bracket: ( - exclusion_constraint_element: index_element: column_reference: naked_identifier: one keyword: WITH comparison_operator: raw_comparison_operator: '=' - comma: ',' - exclusion_constraint_element: index_element: column_reference: naked_identifier: nulls_opclass index_element_options: operator_class_reference: naked_identifier: nulls keyword: WITH comparison_operator: raw_comparison_operator: '=' - comma: ',' - exclusion_constraint_element: index_element: column_reference: naked_identifier: nulls_last index_element_options: - keyword: NULLS - keyword: LAST keyword: WITH comparison_operator: raw_comparison_operator: '=' - comma: ',' - exclusion_constraint_element: index_element: column_reference: naked_identifier: two index_element_options: - keyword: COLLATE - collation_reference: quoted_identifier: '"en-US"' - operator_class_reference: naked_identifier: opclass - relation_options: bracketed: - start_bracket: ( - relation_option: properties_naked_identifier: opt1 - comma: ',' - relation_option: properties_naked_identifier: opt2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' - comma: ',' - relation_option: properties_naked_identifier: opt3 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'str'" - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt4 - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt5 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '6' - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt6 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'str'" - comma: ',' - relation_option: - properties_naked_identifier: opt7 - comparison_operator: raw_comparison_operator: '=' - properties_naked_identifier: ASC - end_bracket: ) - keyword: ASC - keyword: NULLS - keyword: FIRST keyword: WITH comparison_operator: raw_comparison_operator: '=' - comma: ',' - exclusion_constraint_element: index_element: bracketed: start_bracket: ( expression: column_reference: naked_identifier: two binary_operator: + numeric_literal: '5' end_bracket: ) keyword: WITH comparison_operator: raw_comparison_operator: '=' - comma: ',' - exclusion_constraint_element: index_element: function: function_name: function_name_identifier: myfunc function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: a - comma: ',' - expression: column_reference: naked_identifier: b - end_bracket: ) keyword: WITH comparison_operator: raw_comparison_operator: '=' - comma: ',' - exclusion_constraint_element: index_element: function: function_name: function_name_identifier: myfunc_opclass function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: a - comma: ',' - expression: column_reference: naked_identifier: b - end_bracket: ) index_element_options: operator_class_reference: naked_identifier: fop relation_options: bracketed: - start_bracket: ( - relation_option: properties_naked_identifier: opt comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - comma: ',' - relation_option: properties_naked_identifier: foo comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - end_bracket: ) keyword: WITH comparison_operator: raw_comparison_operator: '=' - comma: ',' - exclusion_constraint_element: index_element: column_reference: naked_identifier: only_opclass index_element_options: operator_class_reference: naked_identifier: opclass keyword: WITH comparison_operator: raw_comparison_operator: '=' - comma: ',' - exclusion_constraint_element: index_element: column_reference: naked_identifier: desc_order index_element_options: keyword: DESC keyword: WITH comparison_operator: raw_comparison_operator: '=' - end_bracket: ) - index_parameters: - keyword: INCLUDE - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - end_bracket: ) - keyword: WITH - definition_parameters: bracketed: - start_bracket: ( - definition_parameter: properties_naked_identifier: idx_num comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' - comma: ',' - definition_parameter: properties_naked_identifier: idx_str comparison_operator: raw_comparison_operator: '=' quoted_literal: "'idx_value'" - comma: ',' - definition_parameter: - properties_naked_identifier: idx_kw - comparison_operator: raw_comparison_operator: '=' - properties_naked_identifier: DESC - end_bracket: ) - keyword: USING - keyword: INDEX - keyword: TABLESPACE - tablespace_reference: naked_identifier: tblspc - keyword: WHERE - bracketed: start_bracket: ( expression: column_reference: naked_identifier: field comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '=' quoted_literal: "'def'" end_bracket: ) - keyword: DEFERRABLE - keyword: INITIALLY - keyword: DEFERRED end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: example_table - bracketed: start_bracket: ( end_bracket: ) - keyword: INHERITS - bracketed: start_bracket: ( table_reference: naked_identifier: parent_table end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: table2 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: int - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: col3 - data_type: keyword: int - comma: ',' - table_constraint: - keyword: FOREIGN - keyword: KEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: table1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: 'ON' - keyword: DELETE - keyword: SET - keyword: 'NULL' - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: table2 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: int - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: col3 - data_type: keyword: int - comma: ',' - table_constraint: - keyword: FOREIGN - keyword: KEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: table1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: 'ON' - keyword: DELETE - keyword: SET - keyword: DEFAULT - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: myschema - dot: . - naked_identifier: user - bracketed: - start_bracket: ( - column_reference: naked_identifier: user_id - data_type: keyword: bigint - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_reference: naked_identifier: name - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '40' end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: my_table - bracketed: start_bracket: ( column_reference: naked_identifier: interval data_type: keyword: bigint end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_table_as.sql000066400000000000000000000035001503426445100254500ustar00rootroot00000000000000CREATE TEMP TABLE t1 AS ( SELECT something FROM t2 ); CREATE TEMP TABLE t1 AS SELECT something FROM t2 ; CREATE TEMPORARY TABLE t1 AS SELECT something FROM t2 ; CREATE TABLE t1 AS ( SELECT something FROM t2 ); CREATE TABLE t1 AS SELECT something FROM t2 ; CREATE TABLE IF NOT EXISTS t1 AS SELECT something FROM t2 ; CREATE TABLE t1 ON COMMIT DELETE ROWS AS SELECT something FROM t2 ; CREATE TABLE t1 ON COMMIT PRESERVE ROWS AS SELECT something FROM t2 ; CREATE TABLE t1 ON COMMIT DROP AS SELECT something FROM t2 ; CREATE TABLE t1 AS ( SELECT something FROM t2 ) WITH NO DATA ; CREATE TABLE t1 AS SELECT something FROM t2 WITH NO DATA ; CREATE TABLE t1 AS ( SELECT something FROM t2 ) WITH DATA ; CREATE TABLE t1 AS SELECT something FROM t2 WITH DATA ; CREATE UNLOGGED TABLE t1 AS SELECT something FROM t2 ; CREATE GLOBAL TEMP TABLE t1 AS SELECT something FROM t2 ; CREATE LOCAL TEMP TABLE t1 AS SELECT something FROM t2 ; CREATE TABLE t1 USING method AS SELECT something FROM t2 ; CREATE TABLE t1 WITHOUT OIDS AS SELECT something FROM t2 ; CREATE TABLE t1 (c1, c2, c3) AS VALUES ('val1', 'val2', 'val3'), ('val4', 'val5', 'val6') ; CREATE TABLE t1 AS TABLE t2 ; CREATE TABLE t1 AS EXECUTE func() ; CREATE TABLE t1 TABLESPACE ts AS SELECT something FROM t2 ; CREATE TABLE t1 WITH (val=70) AS SELECT something FROM t2 ; create temp table t1 with (autovacuum_enabled = true, toast_tuple_target = 123, vacuum_index_cleanup = false) as select column_1 , column_2 , column_3 from tablename; create temp table a_new_table with (appendoptimized = true, compresstype = zstd) as select column_1 , column_2 , column_3 from schema.tablename group by 1, 2, 3; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_table_as.yml000066400000000000000000000431611503426445100254610ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: dffcd1a729553604d7ad2e9972c6527e81b9bd6610353e235fa29ba8867b9e2d file: - statement: create_table_as_statement: - keyword: CREATE - keyword: TEMP - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 end_bracket: ) - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TEMP - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 end_bracket: ) - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: t1 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: 'ON' - keyword: COMMIT - keyword: DELETE - keyword: ROWS - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: 'ON' - keyword: COMMIT - keyword: PRESERVE - keyword: ROWS - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: 'ON' - keyword: COMMIT - keyword: DROP - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 end_bracket: ) - with_data_clause: - keyword: WITH - keyword: 'NO' - keyword: DATA - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - with_data_clause: - keyword: WITH - keyword: 'NO' - keyword: DATA - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 end_bracket: ) - with_data_clause: - keyword: WITH - keyword: DATA - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - with_data_clause: - keyword: WITH - keyword: DATA - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: UNLOGGED - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: GLOBAL - keyword: TEMP - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: LOCAL - keyword: TEMP - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: USING - parameter: method - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: WITHOUT - keyword: OIDS - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: c1 - comma: ',' - column_reference: naked_identifier: c2 - comma: ',' - column_reference: naked_identifier: c3 - end_bracket: ) - keyword: AS - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: quoted_literal: "'val1'" - comma: ',' - expression: quoted_literal: "'val2'" - comma: ',' - expression: quoted_literal: "'val3'" - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: quoted_literal: "'val4'" - comma: ',' - expression: quoted_literal: "'val5'" - comma: ',' - expression: quoted_literal: "'val6'" - end_bracket: ) - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: AS - keyword: TABLE - table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: AS - keyword: EXECUTE - function: function_name: function_name_identifier: func function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: TABLESPACE - tablespace_reference: naked_identifier: ts - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: WITH - bracketed: start_bracket: ( parameter: val comparison_operator: raw_comparison_operator: '=' numeric_literal: '70' end_bracket: ) - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: create - keyword: temp - keyword: table - table_reference: naked_identifier: t1 - keyword: with - bracketed: - start_bracket: ( - parameter: autovacuum_enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - comma: ',' - parameter: toast_tuple_target - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '123' - comma: ',' - parameter: vacuum_index_cleanup - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'false' - end_bracket: ) - keyword: as - select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: column_1 - comma: ',' - select_clause_element: column_reference: naked_identifier: column_2 - comma: ',' - select_clause_element: column_reference: naked_identifier: column_3 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tablename - statement_terminator: ; - statement: create_table_as_statement: - keyword: create - keyword: temp - keyword: table - table_reference: naked_identifier: a_new_table - keyword: with - bracketed: - start_bracket: ( - parameter: appendoptimized - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - comma: ',' - parameter: compresstype - comparison_operator: raw_comparison_operator: '=' - naked_identifier: zstd - end_bracket: ) - keyword: as - select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: column_1 - comma: ',' - select_clause_element: column_reference: naked_identifier: column_2 - comma: ',' - select_clause_element: column_reference: naked_identifier: column_3 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: schema - dot: . - naked_identifier: tablename groupby_clause: - keyword: group - keyword: by - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_trigger.sql000066400000000000000000000045021503426445100253440ustar00rootroot00000000000000CREATE TRIGGER foo AFTER INSERT ON bar EXECUTE FUNCTION proc(args); CREATE TRIGGER foo BEFORE INSERT on bar EXECUTE FUNCTION proc(args); CREATE TRIGGER foo AFTER UPDATE OF bar, baz ON bar EXECUTE FUNCTION proc(args); CREATE TRIGGER foo INSTEAD OF DELETE ON bar FROM baz DEFERRABLE INITIALLY DEFERRED FOR EACH STATEMENT EXECUTE FUNCTION proc(args); CREATE TRIGGER foo INSTEAD OF DELETE ON bar FROM baz DEFERRABLE INITIALLY DEFERRED FOR EACH STATEMENT EXECUTE FUNCTION schema.proc(args); CREATE TRIGGER foo BEFORE INSERT ON bar WHEN (a=b) EXECUTE FUNCTION proc(args); CREATE OR REPLACE CONSTRAINT TRIGGER foo BEFORE INSERT ON bar EXECUTE FUNCTION proc(args); CREATE TRIGGER foo BEFORE INSERT ON bar REFERENCING OLD TABLE as old_table NEW TABLE AS new_table EXECUTE PROCEDURE proc(args); CREATE TRIGGER check_update BEFORE INSERT OR UPDATE ON accounts FOR EACH ROW EXECUTE FUNCTION check_account_update(); CREATE OR REPLACE TRIGGER check_update BEFORE UPDATE OF balance, transactions ON accounts FOR EACH ROW EXECUTE FUNCTION check_account_update(); CREATE OR REPLACE TRIGGER check_update BEFORE UPDATE OF balance, transactions OR TRUNCATE ON accounts FOR EACH ROW EXECUTE FUNCTION check_account_update(); CREATE OR REPLACE TRIGGER check_update BEFORE UPDATE OF balance ON accounts FOR EACH ROW EXECUTE FUNCTION check_account_update(); CREATE TRIGGER check_update BEFORE UPDATE ON accounts FOR EACH ROW WHEN (OLD.balance IS DISTINCT FROM NEW.balance) EXECUTE FUNCTION check_account_update(); --CREATE TRIGGER log_update -- AFTER UPDATE ON accounts -- FOR EACH ROW -- WHEN (OLD.* IS DISTINCT FROM NEW.*) -- EXECUTE FUNCTION log_account_update(); CREATE TRIGGER view_insert INSTEAD OF INSERT ON my_view FOR EACH ROW EXECUTE FUNCTION view_insert_row(); CREATE TRIGGER transfer_insert AFTER INSERT ON transfer REFERENCING NEW TABLE AS inserted FOR EACH STATEMENT EXECUTE FUNCTION check_transfer_balances_to_zero(); CREATE TRIGGER paired_items_update AFTER UPDATE ON paired_items REFERENCING NEW TABLE AS newtab OLD TABLE AS oldtab FOR EACH ROW EXECUTE FUNCTION check_matching_pairs(); CREATE TRIGGER log_update AFTER UPDATE ON accounts FOR EACH ROW WHEN (OLD.* IS DISTINCT FROM NEW.*) EXECUTE FUNCTION log_account_update(); sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_trigger.yml000066400000000000000000000324471503426445100253570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 56480788d573c81c4ae61f3a8b80adf517b9075a2f420a82aaef1ee3889e0602 file: - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: foo - keyword: AFTER - keyword: INSERT - keyword: 'ON' - table_reference: naked_identifier: bar - keyword: EXECUTE - keyword: FUNCTION - function: function_name: function_name_identifier: proc function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: args end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: foo - keyword: BEFORE - keyword: INSERT - keyword: 'on' - table_reference: naked_identifier: bar - keyword: EXECUTE - keyword: FUNCTION - function: function_name: function_name_identifier: proc function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: args end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: foo - keyword: AFTER - keyword: UPDATE - keyword: OF - column_reference: naked_identifier: bar - comma: ',' - column_reference: naked_identifier: baz - keyword: 'ON' - table_reference: naked_identifier: bar - keyword: EXECUTE - keyword: FUNCTION - function: function_name: function_name_identifier: proc function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: args end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: foo - keyword: INSTEAD - keyword: OF - keyword: DELETE - keyword: 'ON' - table_reference: naked_identifier: bar - keyword: FROM - table_reference: naked_identifier: baz - keyword: DEFERRABLE - keyword: INITIALLY - keyword: DEFERRED - keyword: FOR - keyword: EACH - keyword: STATEMENT - keyword: EXECUTE - keyword: FUNCTION - function: function_name: function_name_identifier: proc function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: args end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: foo - keyword: INSTEAD - keyword: OF - keyword: DELETE - keyword: 'ON' - table_reference: naked_identifier: bar - keyword: FROM - table_reference: naked_identifier: baz - keyword: DEFERRABLE - keyword: INITIALLY - keyword: DEFERRED - keyword: FOR - keyword: EACH - keyword: STATEMENT - keyword: EXECUTE - keyword: FUNCTION - function: function_name: naked_identifier: schema dot: . function_name_identifier: proc function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: args end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: foo - keyword: BEFORE - keyword: INSERT - keyword: 'ON' - table_reference: naked_identifier: bar - keyword: WHEN - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b end_bracket: ) - keyword: EXECUTE - keyword: FUNCTION - function: function_name: function_name_identifier: proc function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: args end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: CONSTRAINT - keyword: TRIGGER - trigger_reference: naked_identifier: foo - keyword: BEFORE - keyword: INSERT - keyword: 'ON' - table_reference: naked_identifier: bar - keyword: EXECUTE - keyword: FUNCTION - function: function_name: function_name_identifier: proc function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: args end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: foo - keyword: BEFORE - keyword: INSERT - keyword: 'ON' - table_reference: naked_identifier: bar - keyword: REFERENCING - keyword: OLD - keyword: TABLE - keyword: as - table_reference: naked_identifier: old_table - keyword: NEW - keyword: TABLE - keyword: AS - table_reference: naked_identifier: new_table - keyword: EXECUTE - keyword: PROCEDURE - function: function_name: function_name_identifier: proc function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: args end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: check_update - keyword: BEFORE - keyword: INSERT - keyword: OR - keyword: UPDATE - keyword: 'ON' - table_reference: naked_identifier: accounts - keyword: FOR - keyword: EACH - keyword: ROW - keyword: EXECUTE - keyword: FUNCTION - function: function_name: function_name_identifier: check_account_update function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TRIGGER - trigger_reference: naked_identifier: check_update - keyword: BEFORE - keyword: UPDATE - keyword: OF - column_reference: naked_identifier: balance - comma: ',' - column_reference: naked_identifier: transactions - keyword: 'ON' - table_reference: naked_identifier: accounts - keyword: FOR - keyword: EACH - keyword: ROW - keyword: EXECUTE - keyword: FUNCTION - function: function_name: function_name_identifier: check_account_update function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TRIGGER - trigger_reference: naked_identifier: check_update - keyword: BEFORE - keyword: UPDATE - keyword: OF - column_reference: naked_identifier: balance - comma: ',' - column_reference: naked_identifier: transactions - keyword: OR - keyword: TRUNCATE - keyword: 'ON' - table_reference: naked_identifier: accounts - keyword: FOR - keyword: EACH - keyword: ROW - keyword: EXECUTE - keyword: FUNCTION - function: function_name: function_name_identifier: check_account_update function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TRIGGER - trigger_reference: naked_identifier: check_update - keyword: BEFORE - keyword: UPDATE - keyword: OF - column_reference: naked_identifier: balance - keyword: 'ON' - table_reference: naked_identifier: accounts - keyword: FOR - keyword: EACH - keyword: ROW - keyword: EXECUTE - keyword: FUNCTION - function: function_name: function_name_identifier: check_account_update function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: check_update - keyword: BEFORE - keyword: UPDATE - keyword: 'ON' - table_reference: naked_identifier: accounts - keyword: FOR - keyword: EACH - keyword: ROW - keyword: WHEN - bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: OLD - dot: . - naked_identifier: balance - keyword: IS - keyword: DISTINCT - keyword: FROM - column_reference: - naked_identifier: NEW - dot: . - naked_identifier: balance end_bracket: ) - keyword: EXECUTE - keyword: FUNCTION - function: function_name: function_name_identifier: check_account_update function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: view_insert - keyword: INSTEAD - keyword: OF - keyword: INSERT - keyword: 'ON' - table_reference: naked_identifier: my_view - keyword: FOR - keyword: EACH - keyword: ROW - keyword: EXECUTE - keyword: FUNCTION - function: function_name: function_name_identifier: view_insert_row function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: transfer_insert - keyword: AFTER - keyword: INSERT - keyword: 'ON' - table_reference: naked_identifier: transfer - keyword: REFERENCING - keyword: NEW - keyword: TABLE - keyword: AS - table_reference: naked_identifier: inserted - keyword: FOR - keyword: EACH - keyword: STATEMENT - keyword: EXECUTE - keyword: FUNCTION - function: function_name: function_name_identifier: check_transfer_balances_to_zero function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: paired_items_update - keyword: AFTER - keyword: UPDATE - keyword: 'ON' - table_reference: naked_identifier: paired_items - keyword: REFERENCING - keyword: NEW - keyword: TABLE - keyword: AS - table_reference: naked_identifier: newtab - keyword: OLD - keyword: TABLE - keyword: AS - table_reference: naked_identifier: oldtab - keyword: FOR - keyword: EACH - keyword: ROW - keyword: EXECUTE - keyword: FUNCTION - function: function_name: function_name_identifier: check_matching_pairs function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: log_update - keyword: AFTER - keyword: UPDATE - keyword: 'ON' - table_reference: naked_identifier: accounts - keyword: FOR - keyword: EACH - keyword: ROW - keyword: WHEN - bracketed: start_bracket: ( expression: - naked_identifier: OLD - dot: . - star: '*' - keyword: IS - keyword: DISTINCT - keyword: FROM - naked_identifier: NEW - dot: . - star: '*' end_bracket: ) - keyword: EXECUTE - keyword: FUNCTION - function: function_name: function_name_identifier: log_account_update function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_type.sql000066400000000000000000000004501503426445100246600ustar00rootroot00000000000000-- https://www.postgresql.org/docs/current/sql-createtype.html CREATE TYPE foo; CREATE TYPE bar AS ENUM (); CREATE TYPE bar AS ENUM ('foo', 'bar'); CREATE TYPE foobar AS RANGE (SUBTYPE = FLOAT); CREATE TYPE barbar AS (INPUT = foo, OUTPUT = bar); CREATE TYPE foofoo AS (foo varchar collate utf8); sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_type.yml000066400000000000000000000042411503426445100246640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6b3e6afb2ee17a7d0c7b9a419102e42e652d0d4c57d13d0172c7bcc342b4416b file: - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - object_reference: naked_identifier: foo - statement_terminator: ; - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - object_reference: naked_identifier: bar - keyword: AS - keyword: ENUM - bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - object_reference: naked_identifier: bar - keyword: AS - keyword: ENUM - bracketed: - start_bracket: ( - single_quote: "'foo'" - comma: ',' - single_quote: "'bar'" - end_bracket: ) - statement_terminator: ; - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - object_reference: naked_identifier: foobar - keyword: AS - keyword: RANGE - bracketed: - start_bracket: ( - word: SUBTYPE - equals: '=' - word: FLOAT - end_bracket: ) - statement_terminator: ; - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - object_reference: naked_identifier: barbar - keyword: AS - bracketed: - start_bracket: ( - word: INPUT - equals: '=' - word: foo - comma: ',' - word: OUTPUT - equals: '=' - word: bar - end_bracket: ) - statement_terminator: ; - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - object_reference: naked_identifier: foofoo - keyword: AS - bracketed: - start_bracket: ( - word: foo - word: varchar - word: collate - word: utf8 - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_user_mapping.sql000066400000000000000000000004021503426445100263650ustar00rootroot00000000000000CREATE USER MAPPING FOR bob SERVER foo OPTIONS (user 'bob', password 'secret'); CREATE USER MAPPING IF NOT EXISTS FOR PUBLIC SERVER foo; CREATE USER MAPPING IF NOT EXISTS FOR CURRENT_USER SERVER foo OPTIONS (user 'bob', password 'secret', option 'value'); sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_user_mapping.yml000066400000000000000000000035501503426445100263760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e4b7bf32c9556794f7bc41d9292ef43e4b0804378e9bcb069042835912b4795e file: - statement: create_user_mapping_statement: - keyword: CREATE - keyword: USER - keyword: MAPPING - keyword: FOR - naked_identifier: bob - keyword: SERVER - server_reference: naked_identifier: foo - keyword: OPTIONS - bracketed: - start_bracket: ( - naked_identifier_all: user - quoted_literal: "'bob'" - comma: ',' - naked_identifier_all: password - quoted_literal: "'secret'" - end_bracket: ) - statement_terminator: ; - statement: create_user_mapping_statement: - keyword: CREATE - keyword: USER - keyword: MAPPING - keyword: IF - keyword: NOT - keyword: EXISTS - keyword: FOR - naked_identifier: PUBLIC - keyword: SERVER - server_reference: naked_identifier: foo - statement_terminator: ; - statement: create_user_mapping_statement: - keyword: CREATE - keyword: USER - keyword: MAPPING - keyword: IF - keyword: NOT - keyword: EXISTS - keyword: FOR - naked_identifier: CURRENT_USER - keyword: SERVER - server_reference: naked_identifier: foo - keyword: OPTIONS - bracketed: - start_bracket: ( - naked_identifier_all: user - quoted_literal: "'bob'" - comma: ',' - naked_identifier_all: password - quoted_literal: "'secret'" - comma: ',' - naked_identifier_all: option - quoted_literal: "'value'" - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_view.sql000066400000000000000000000036451503426445100246620ustar00rootroot00000000000000CREATE VIEW vista AS SELECT 'Hello World'; CREATE OR REPLACE VIEW vista AS SELECT 'Hello World'; CREATE VIEW vista AS SELECT text 'Hello World' AS hello; CREATE TEMP VIEW vista AS SELECT text 'Hello World' AS hello; CREATE TEMPORARY VIEW vista AS SELECT text 'Hello World' AS hello; CREATE VIEW comedies AS SELECT * FROM films WHERE kind = 'Comedy'; CREATE VIEW pg_comedies AS VALUES (1, 'one'), (2, 'two'), (3, 'three') WITH LOCAL CHECK OPTION; CREATE VIEW pg_comedies AS SELECT * FROM comedies WHERE classification = 'PG' WITH CASCADED CHECK OPTION; create view foo with (security_invoker) as select 1; create view foo with (security_barrier) as select 1; create view foo with (security_invoker=BOOLEAN) as select 1; create view foo with (security_barrier=BOOLEAN) as select 1; create view foo with (check_option=local) as select * from OTHER_VIEW; create view foo with (check_option=cascaded) as select * from OTHER_VIEW; create view foo with (opt1, opt2=5, opt3='str', ns.opt4, ns.opt5=6, ns.opt6='str', opt7=ASC) as select 1; create view foo as select * from OTHER_VIEW with local check option; create view foo as select * from OTHER_VIEW with cascaded check option; CREATE OR REPLACE RECURSIVE VIEW "grouping_node" ( "node_id", "ancestors", "category_id", "path", "path_nodes" ) AS SELECT "group_id" AS "node_id", ARRAY[]::INTEGER[] AS "ancestors", "category_id", ARRAY["name"]::text[] AS "path", ARRAY["group_id"]::INTEGER[] AS "path_nodes" FROM "grouping_managementgroup" WHERE "parent_id" IS NULL UNION ALL SELECT "group_id", "ancestors" || "parent_id", "grouping_node"."category_id", "path" || "name"::text, "path_nodes" || "group_id" FROM "grouping_managementgroup", "grouping_node" WHERE "parent_id" = "node_id"; -- use of collation as non-reserved keyword create view foo as select col1 as collation from OTHER_VIEW; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_view.yml000066400000000000000000000472301503426445100246620ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4de1412edb707d1fbe44f774c60d2763dc5c476ec6f59a7460a302202a30e426 file: - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: vista - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'Hello World'" - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: VIEW - table_reference: naked_identifier: vista - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'Hello World'" - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: vista - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: expression: data_type: keyword: text quoted_literal: "'Hello World'" alias_expression: alias_operator: keyword: AS naked_identifier: hello - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: TEMP - keyword: VIEW - table_reference: naked_identifier: vista - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: expression: data_type: keyword: text quoted_literal: "'Hello World'" alias_expression: alias_operator: keyword: AS naked_identifier: hello - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: VIEW - table_reference: naked_identifier: vista - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: expression: data_type: keyword: text quoted_literal: "'Hello World'" alias_expression: alias_operator: keyword: AS naked_identifier: hello - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: comedies - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: films where_clause: keyword: WHERE expression: column_reference: naked_identifier: kind comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Comedy'" - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: pg_comedies - keyword: AS - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'one'" - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '2' - comma: ',' - expression: quoted_literal: "'two'" - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: quoted_literal: "'three'" - end_bracket: ) - with_check_option: - keyword: WITH - keyword: LOCAL - keyword: CHECK - keyword: OPTION - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: pg_comedies - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: comedies where_clause: keyword: WHERE expression: column_reference: naked_identifier: classification comparison_operator: raw_comparison_operator: '=' quoted_literal: "'PG'" - with_check_option: - keyword: WITH - keyword: CASCADED - keyword: CHECK - keyword: OPTION - statement_terminator: ; - statement: create_view_statement: - keyword: create - keyword: view - table_reference: naked_identifier: foo - keyword: with - relation_options: bracketed: start_bracket: ( relation_option: properties_naked_identifier: security_invoker end_bracket: ) - keyword: as - select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: create_view_statement: - keyword: create - keyword: view - table_reference: naked_identifier: foo - keyword: with - relation_options: bracketed: start_bracket: ( relation_option: properties_naked_identifier: security_barrier end_bracket: ) - keyword: as - select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: create_view_statement: - keyword: create - keyword: view - table_reference: naked_identifier: foo - keyword: with - relation_options: bracketed: start_bracket: ( relation_option: - properties_naked_identifier: security_invoker - comparison_operator: raw_comparison_operator: '=' - properties_naked_identifier: BOOLEAN end_bracket: ) - keyword: as - select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: create_view_statement: - keyword: create - keyword: view - table_reference: naked_identifier: foo - keyword: with - relation_options: bracketed: start_bracket: ( relation_option: - properties_naked_identifier: security_barrier - comparison_operator: raw_comparison_operator: '=' - properties_naked_identifier: BOOLEAN end_bracket: ) - keyword: as - select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: create_view_statement: - keyword: create - keyword: view - table_reference: naked_identifier: foo - keyword: with - relation_options: bracketed: start_bracket: ( relation_option: - properties_naked_identifier: check_option - comparison_operator: raw_comparison_operator: '=' - properties_naked_identifier: local end_bracket: ) - keyword: as - select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: OTHER_VIEW - statement_terminator: ; - statement: create_view_statement: - keyword: create - keyword: view - table_reference: naked_identifier: foo - keyword: with - relation_options: bracketed: start_bracket: ( relation_option: - properties_naked_identifier: check_option - comparison_operator: raw_comparison_operator: '=' - properties_naked_identifier: cascaded end_bracket: ) - keyword: as - select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: OTHER_VIEW - statement_terminator: ; - statement: create_view_statement: - keyword: create - keyword: view - table_reference: naked_identifier: foo - keyword: with - relation_options: bracketed: - start_bracket: ( - relation_option: properties_naked_identifier: opt1 - comma: ',' - relation_option: properties_naked_identifier: opt2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' - comma: ',' - relation_option: properties_naked_identifier: opt3 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'str'" - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt4 - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt5 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '6' - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt6 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'str'" - comma: ',' - relation_option: - properties_naked_identifier: opt7 - comparison_operator: raw_comparison_operator: '=' - properties_naked_identifier: ASC - end_bracket: ) - keyword: as - select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: create_view_statement: - keyword: create - keyword: view - table_reference: naked_identifier: foo - keyword: as - select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: OTHER_VIEW - with_check_option: - keyword: with - keyword: local - keyword: check - keyword: option - statement_terminator: ; - statement: create_view_statement: - keyword: create - keyword: view - table_reference: naked_identifier: foo - keyword: as - select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: OTHER_VIEW - with_check_option: - keyword: with - keyword: cascaded - keyword: check - keyword: option - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: RECURSIVE - keyword: VIEW - table_reference: quoted_identifier: '"grouping_node"' - bracketed: - start_bracket: ( - column_reference: quoted_identifier: '"node_id"' - comma: ',' - column_reference: quoted_identifier: '"ancestors"' - comma: ',' - column_reference: quoted_identifier: '"category_id"' - comma: ',' - column_reference: quoted_identifier: '"path"' - comma: ',' - column_reference: quoted_identifier: '"path_nodes"' - end_bracket: ) - keyword: AS - set_expression: - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: quoted_identifier: '"group_id"' alias_expression: alias_operator: keyword: AS quoted_identifier: '"node_id"' - comma: ',' - select_clause_element: expression: cast_expression: typed_array_literal: array_type: keyword: ARRAY array_literal: start_square_bracket: '[' end_square_bracket: ']' casting_operator: '::' data_type: keyword: INTEGER start_square_bracket: '[' end_square_bracket: ']' alias_expression: alias_operator: keyword: AS quoted_identifier: '"ancestors"' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '"category_id"' - comma: ',' - select_clause_element: expression: cast_expression: typed_array_literal: array_type: keyword: ARRAY array_literal: start_square_bracket: '[' column_reference: quoted_identifier: '"name"' end_square_bracket: ']' casting_operator: '::' data_type: keyword: text start_square_bracket: '[' end_square_bracket: ']' alias_expression: alias_operator: keyword: AS quoted_identifier: '"path"' - comma: ',' - select_clause_element: expression: cast_expression: typed_array_literal: array_type: keyword: ARRAY array_literal: start_square_bracket: '[' column_reference: quoted_identifier: '"group_id"' end_square_bracket: ']' casting_operator: '::' data_type: keyword: INTEGER start_square_bracket: '[' end_square_bracket: ']' alias_expression: alias_operator: keyword: AS quoted_identifier: '"path_nodes"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '"grouping_managementgroup"' where_clause: keyword: WHERE expression: column_reference: quoted_identifier: '"parent_id"' keyword: IS null_literal: 'NULL' - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: quoted_identifier: '"group_id"' - comma: ',' - select_clause_element: expression: - column_reference: quoted_identifier: '"ancestors"' - binary_operator: - pipe: '|' - pipe: '|' - column_reference: quoted_identifier: '"parent_id"' - comma: ',' - select_clause_element: column_reference: - quoted_identifier: '"grouping_node"' - dot: . - quoted_identifier: '"category_id"' - comma: ',' - select_clause_element: expression: column_reference: quoted_identifier: '"path"' binary_operator: - pipe: '|' - pipe: '|' cast_expression: column_reference: quoted_identifier: '"name"' casting_operator: '::' data_type: keyword: text - comma: ',' - select_clause_element: expression: - column_reference: quoted_identifier: '"path_nodes"' - binary_operator: - pipe: '|' - pipe: '|' - column_reference: quoted_identifier: '"group_id"' from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '"grouping_managementgroup"' - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '"grouping_node"' where_clause: keyword: WHERE expression: - column_reference: quoted_identifier: '"parent_id"' - comparison_operator: raw_comparison_operator: '=' - column_reference: quoted_identifier: '"node_id"' - statement_terminator: ; - statement: create_view_statement: - keyword: create - keyword: view - table_reference: naked_identifier: foo - keyword: as - select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: col1 alias_expression: alias_operator: keyword: as naked_identifier: collation from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: OTHER_VIEW - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_zero_argument_function.sql000066400000000000000000000001241503426445100304630ustar00rootroot00000000000000CREATE OR REPLACE FUNCTION a() RETURNS integer AS $$ SELECT 1; $$ LANGUAGE SQL; sqlfluff-3.4.2/test/fixtures/dialects/postgres/create_zero_argument_function.yml000066400000000000000000000016731503426445100304770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 16714c14ab92e8d16180df1b9b8947ff24806a84991014788994ceba5835921f file: statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name: function_name_identifier: a - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - data_type: keyword: integer - function_definition: keyword: AS quoted_literal: "$$\n SELECT 1;\n$$" language_clause: keyword: LANGUAGE naked_identifier: SQL statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/cte_in_materialized_view.sql000066400000000000000000000010211503426445100273740ustar00rootroot00000000000000CREATE MATERIALIZED VIEW public.mv_sales TABLESPACE pg_default AS WITH regional_sales AS ( SELECT region, SUM(amount) AS total_sales FROM orders GROUP BY region ), top_regions AS ( SELECT region FROM regional_sales WHERE total_sales > (SELECT SUM(total_sales) / 10 FROM regional_sales) ) SELECT region, product, SUM(quantity) AS product_units, SUM(amount) AS product_sales FROM orders WHERE region IN (SELECT region FROM top_regions) GROUP BY region, product WITH DATA; sqlfluff-3.4.2/test/fixtures/dialects/postgres/cte_in_materialized_view.yml000066400000000000000000000161211503426445100274050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9472e9bac40732b6a255fe5128021c6228ce3e866ac8074a2a8c2aafedfeb070 file: statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: - naked_identifier: public - dot: . - naked_identifier: mv_sales - keyword: TABLESPACE - tablespace_reference: naked_identifier: pg_default - keyword: AS - with_compound_statement: - keyword: WITH - common_table_expression: naked_identifier: regional_sales keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: region - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: amount end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: total_sales from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: region end_bracket: ) - comma: ',' - common_table_expression: naked_identifier: top_regions keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: region from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: regional_sales where_clause: keyword: WHERE expression: column_reference: naked_identifier: total_sales comparison_operator: raw_comparison_operator: '>' bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: expression: function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: total_sales end_bracket: ) binary_operator: / numeric_literal: '10' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: regional_sales end_bracket: ) end_bracket: ) - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: region - comma: ',' - select_clause_element: column_reference: naked_identifier: product - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: product_units - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: amount end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: product_sales from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders where_clause: keyword: WHERE expression: column_reference: naked_identifier: region keyword: IN bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: region from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: top_regions end_bracket: ) groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: region - comma: ',' - column_reference: naked_identifier: product - with_data_clause: - keyword: WITH - keyword: DATA statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/datatypes.sql000066400000000000000000000042101503426445100243500ustar00rootroot00000000000000create table a( a smallint, b integer, ba int2, bb int4, bc int8, bd int, c bigint, d real, e double precision, f smallserial, g serial, ga serial2, gb serial4, gc serial8, h bigserial ); create table b( a float, b float(24), c float4, e float8 ); create table c( a numeric, aa decimal, b numeric(7), ba decimal(7), c numeric(7,2), ca decimal(7,2) ); create table d( a money ); create table e( a char, b char(7), c character, d character(5), e character varying, f character varying(8), g varchar(9), h varchar, i text ); create table f( a bytea ); create table g( a date, b interval(4), c time(4), d time(4) with time zone, e time(4) without time zone, f timestamp(4), g timestamp(4) with time zone, h timestamp(4) without time zone, i timetz, j timetz(4), k timestamptz, l timestamptz(4) ); create table h( a boolean, b bool ); create table i( a point, b line, c lseg, d box, e path, f polygon, g circle ); create table j( a cidr, b inet, c macaddr, d macaddr8 ); create table k( a bit, b bit(3), c bit varying, d bit varying(5) ); create table l( a pg_lsn ); create table l( a tsvector, b tsquery ); create table m( a uuid ); create table n( a xml ); create table o( a json, b jsonb ); create table p( a integer[], b float[][], c char[1], d jsonb[3][5], e money ARRAY, f money ARRAY[7] ); -- user defined data types CREATE TYPE bar AS ENUM ('foo', 'bar'); create table q( a bar ); -- data type with schema create type public.c AS ENUM ('foo', 'bar'); create table r( a public.c ); -- DATETIME is a valid datatype, but is not a date_time_identifier; it is only -- potentially a user-defined type (i.e. a data_type_identifier). CREATE TABLE a ( b DATE, c DATETIME ); -- from https://github.com/sqlfluff/sqlfluff/issues/2649 SELECT b::DATETIME FROM a; SELECT b, c::DATE FROM a; create table test ( situation bpchar(1) null default 'A'::bpchar ); sqlfluff-3.4.2/test/fixtures/dialects/postgres/datatypes.yml000066400000000000000000000531111503426445100243560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7ece17f5ffc617f174ce2ca9d1a0dc541cbac65db272aba6f895573e6cb6586d file: - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: a - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - data_type: keyword: smallint - comma: ',' - column_reference: naked_identifier: b - data_type: keyword: integer - comma: ',' - column_reference: naked_identifier: ba - data_type: keyword: int2 - comma: ',' - column_reference: naked_identifier: bb - data_type: keyword: int4 - comma: ',' - column_reference: naked_identifier: bc - data_type: keyword: int8 - comma: ',' - column_reference: naked_identifier: bd - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: c - data_type: keyword: bigint - comma: ',' - column_reference: naked_identifier: d - data_type: keyword: real - comma: ',' - column_reference: naked_identifier: e - data_type: - keyword: double - keyword: precision - comma: ',' - column_reference: naked_identifier: f - data_type: keyword: smallserial - comma: ',' - column_reference: naked_identifier: g - data_type: keyword: serial - comma: ',' - column_reference: naked_identifier: ga - data_type: keyword: serial2 - comma: ',' - column_reference: naked_identifier: gb - data_type: keyword: serial4 - comma: ',' - column_reference: naked_identifier: gc - data_type: keyword: serial8 - comma: ',' - column_reference: naked_identifier: h - data_type: keyword: bigserial - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: b - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - data_type: keyword: float - comma: ',' - column_reference: naked_identifier: b - data_type: keyword: float bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '24' end_bracket: ) - comma: ',' - column_reference: naked_identifier: c - data_type: keyword: float4 - comma: ',' - column_reference: naked_identifier: e - data_type: keyword: float8 - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: c - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - data_type: keyword: numeric - comma: ',' - column_reference: naked_identifier: aa - data_type: keyword: decimal - comma: ',' - column_reference: naked_identifier: b - data_type: keyword: numeric bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '7' end_bracket: ) - comma: ',' - column_reference: naked_identifier: ba - data_type: keyword: decimal bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '7' end_bracket: ) - comma: ',' - column_reference: naked_identifier: c - data_type: keyword: numeric bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '7' - comma: ',' - numeric_literal: '2' - end_bracket: ) - comma: ',' - column_reference: naked_identifier: ca - data_type: keyword: decimal bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '7' - comma: ',' - numeric_literal: '2' - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: d - bracketed: start_bracket: ( column_reference: naked_identifier: a data_type: keyword: money end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: e - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - data_type: keyword: char - comma: ',' - column_reference: naked_identifier: b - data_type: keyword: char bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '7' end_bracket: ) - comma: ',' - column_reference: naked_identifier: c - data_type: keyword: character - comma: ',' - column_reference: naked_identifier: d - data_type: keyword: character bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - comma: ',' - column_reference: naked_identifier: e - data_type: - keyword: character - keyword: varying - comma: ',' - column_reference: naked_identifier: f - data_type: - keyword: character - keyword: varying - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '8' end_bracket: ) - comma: ',' - column_reference: naked_identifier: g - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '9' end_bracket: ) - comma: ',' - column_reference: naked_identifier: h - data_type: keyword: varchar - comma: ',' - column_reference: naked_identifier: i - data_type: keyword: text - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: f - bracketed: start_bracket: ( column_reference: naked_identifier: a data_type: keyword: bytea end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: g - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - data_type: datetime_type_identifier: keyword: date - comma: ',' - column_reference: naked_identifier: b - data_type: datetime_type_identifier: keyword: interval bracketed: start_bracket: ( numeric_literal: '4' end_bracket: ) - comma: ',' - column_reference: naked_identifier: c - data_type: datetime_type_identifier: keyword: time bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '4' end_bracket: ) - comma: ',' - column_reference: naked_identifier: d - data_type: datetime_type_identifier: - keyword: time - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '4' end_bracket: ) - keyword: with - keyword: time - keyword: zone - comma: ',' - column_reference: naked_identifier: e - data_type: datetime_type_identifier: - keyword: time - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '4' end_bracket: ) - keyword: without - keyword: time - keyword: zone - comma: ',' - column_reference: naked_identifier: f - data_type: datetime_type_identifier: keyword: timestamp bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '4' end_bracket: ) - comma: ',' - column_reference: naked_identifier: g - data_type: datetime_type_identifier: - keyword: timestamp - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '4' end_bracket: ) - keyword: with - keyword: time - keyword: zone - comma: ',' - column_reference: naked_identifier: h - data_type: datetime_type_identifier: - keyword: timestamp - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '4' end_bracket: ) - keyword: without - keyword: time - keyword: zone - comma: ',' - column_reference: naked_identifier: i - data_type: datetime_type_identifier: keyword: timetz - comma: ',' - column_reference: naked_identifier: j - data_type: datetime_type_identifier: keyword: timetz bracketed: start_bracket: ( numeric_literal: '4' end_bracket: ) - comma: ',' - column_reference: naked_identifier: k - data_type: datetime_type_identifier: keyword: timestamptz - comma: ',' - column_reference: naked_identifier: l - data_type: datetime_type_identifier: keyword: timestamptz bracketed: start_bracket: ( numeric_literal: '4' end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: h - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - data_type: keyword: boolean - comma: ',' - column_reference: naked_identifier: b - data_type: keyword: bool - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: i - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - data_type: keyword: point - comma: ',' - column_reference: naked_identifier: b - data_type: keyword: line - comma: ',' - column_reference: naked_identifier: c - data_type: keyword: lseg - comma: ',' - column_reference: naked_identifier: d - data_type: keyword: box - comma: ',' - column_reference: naked_identifier: e - data_type: keyword: path - comma: ',' - column_reference: naked_identifier: f - data_type: keyword: polygon - comma: ',' - column_reference: naked_identifier: g - data_type: keyword: circle - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: j - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - data_type: keyword: cidr - comma: ',' - column_reference: naked_identifier: b - data_type: keyword: inet - comma: ',' - column_reference: naked_identifier: c - data_type: keyword: macaddr - comma: ',' - column_reference: naked_identifier: d - data_type: keyword: macaddr8 - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: k - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - data_type: keyword: bit - comma: ',' - column_reference: naked_identifier: b - data_type: keyword: bit bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '3' end_bracket: ) - comma: ',' - column_reference: naked_identifier: c - data_type: - keyword: bit - keyword: varying - comma: ',' - column_reference: naked_identifier: d - data_type: - keyword: bit - keyword: varying - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: l - bracketed: start_bracket: ( column_reference: naked_identifier: a data_type: keyword: pg_lsn end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: l - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - data_type: keyword: tsvector - comma: ',' - column_reference: naked_identifier: b - data_type: keyword: tsquery - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: m - bracketed: start_bracket: ( column_reference: naked_identifier: a data_type: keyword: uuid end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: n - bracketed: start_bracket: ( column_reference: naked_identifier: a data_type: keyword: xml end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: o - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - data_type: keyword: json - comma: ',' - column_reference: naked_identifier: b - data_type: keyword: jsonb - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: p - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - data_type: keyword: integer start_square_bracket: '[' end_square_bracket: ']' - comma: ',' - column_reference: naked_identifier: b - data_type: - keyword: float - start_square_bracket: '[' - end_square_bracket: ']' - start_square_bracket: '[' - end_square_bracket: ']' - comma: ',' - column_reference: naked_identifier: c - data_type: keyword: char start_square_bracket: '[' expression: numeric_literal: '1' end_square_bracket: ']' - comma: ',' - column_reference: naked_identifier: d - data_type: - keyword: jsonb - start_square_bracket: '[' - expression: numeric_literal: '3' - end_square_bracket: ']' - start_square_bracket: '[' - expression: numeric_literal: '5' - end_square_bracket: ']' - comma: ',' - column_reference: naked_identifier: e - data_type: keyword: money array_type: keyword: ARRAY - comma: ',' - column_reference: naked_identifier: f - data_type: keyword: money sized_array_type: array_type: keyword: ARRAY array_accessor: start_square_bracket: '[' numeric_literal: '7' end_square_bracket: ']' - end_bracket: ) - statement_terminator: ; - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - object_reference: naked_identifier: bar - keyword: AS - keyword: ENUM - bracketed: - start_bracket: ( - single_quote: "'foo'" - comma: ',' - single_quote: "'bar'" - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: q - bracketed: start_bracket: ( column_reference: naked_identifier: a data_type: data_type_identifier: bar end_bracket: ) - statement_terminator: ; - statement: create_type_statement: - keyword: create - keyword: type - object_reference: - naked_identifier: public - dot: . - naked_identifier: c - keyword: AS - keyword: ENUM - bracketed: - start_bracket: ( - single_quote: "'foo'" - comma: ',' - single_quote: "'bar'" - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: r - bracketed: start_bracket: ( column_reference: naked_identifier: a data_type: naked_identifier: public dot: . data_type_identifier: c end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: a - bracketed: - start_bracket: ( - column_reference: naked_identifier: b - data_type: datetime_type_identifier: keyword: DATE - comma: ',' - column_reference: naked_identifier: c - data_type: data_type_identifier: DATETIME - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: b casting_operator: '::' data_type: data_type_identifier: DATETIME from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: a - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: expression: cast_expression: column_reference: naked_identifier: c casting_operator: '::' data_type: datetime_type_identifier: keyword: DATE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: a - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: test - bracketed: - start_bracket: ( - column_reference: naked_identifier: situation - data_type: keyword: bpchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) - column_constraint_segment: keyword: 'null' - column_constraint_segment: keyword: default cast_expression: quoted_literal: "'A'" casting_operator: '::' data_type: keyword: bpchar - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/datetime_units.sql000066400000000000000000000034201503426445100253720ustar00rootroot00000000000000SELECT t1.field, EXTRACT(CENTURY FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(DECADE FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(DOW FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(DOY FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(EPOCH FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(ISODOW FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(ISOYEAR FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(MICROSECONDS FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(MILLENNIUM FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(MILLISECONDS FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(TIMEZONE FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(TIMEZONE_HOUR FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(TIMEZONE_MINUTE FROM t1.sometime) AS a FROM t1; SELECT t1.field, DATE '2001-09-28' + 7 + t1.data AS a FROM t1; SELECT t1.field, NOW() + INTERVAL '1 HOUR' + t1.data AS a FROM t1; SELECT t1.field, t1.data + TIME '03:00' AS a FROM t1; SELECT t1.field, INTERVAL '1 DAY' + INTERVAL '1 HOUR 2 MINUTES' + t1.data AS a FROM t1; SELECT t1.field, TIMESTAMP '2001-09-28 01:00' + INTERVAL '23 HOURS' + t1.data AS a FROM t1; SELECT t1.field, TIME '01:00' + INTERVAL '3S' + t1.data AS a FROM t1; SELECT t1.field, - INTERVAL '23 HOURS' + t1.data AS a FROM t1; SELECT t1.field, INTERVAL '1 HOUR' / 1.5 + t1.data AS a FROM t1; SELECT t1.field, INTERVAL '1' HOUR * 3.5 + t1.data AS a FROM t1; SELECT t1.field, TIMESTAMP WITH TIME ZONE '2005-04-02 12:00:00-07' + INTERVAL '24 HOURS' + t1.data AS a FROM t1; sqlfluff-3.4.2/test/fixtures/dialects/postgres/datetime_units.yml000066400000000000000000000607321503426445100254050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 867ab4cd4f7555cfb4a6efffb82fc4a1283f45d4c4c36668aa5a999045ba3288 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: CENTURY keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: DECADE keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: DOW keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: DOY keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: EPOCH keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: ISODOW keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: ISOYEAR keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: MICROSECONDS keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: MILLENNIUM keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: MILLISECONDS keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: TIMEZONE keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: TIMEZONE_HOUR keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: TIMEZONE_MINUTE keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: expression: - datetime_literal: datetime_type_identifier: keyword: DATE quoted_literal: "'2001-09-28'" - binary_operator: + - numeric_literal: '7' - binary_operator: + - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: data alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: expression: - function: function_name: function_name_identifier: NOW function_contents: bracketed: start_bracket: ( end_bracket: ) - binary_operator: + - datetime_literal: datetime_type_identifier: keyword: INTERVAL quoted_literal: "'1 HOUR'" - binary_operator: + - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: data alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: data binary_operator: + datetime_literal: datetime_type_identifier: keyword: TIME quoted_literal: "'03:00'" alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: expression: - datetime_literal: datetime_type_identifier: keyword: INTERVAL quoted_literal: "'1 DAY'" - binary_operator: + - datetime_literal: datetime_type_identifier: keyword: INTERVAL quoted_literal: "'1 HOUR 2 MINUTES'" - binary_operator: + - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: data alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: expression: - datetime_literal: datetime_type_identifier: keyword: TIMESTAMP quoted_literal: "'2001-09-28 01:00'" - binary_operator: + - datetime_literal: datetime_type_identifier: keyword: INTERVAL quoted_literal: "'23 HOURS'" - binary_operator: + - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: data alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: expression: - datetime_literal: datetime_type_identifier: keyword: TIME quoted_literal: "'01:00'" - binary_operator: + - datetime_literal: datetime_type_identifier: keyword: INTERVAL quoted_literal: "'3S'" - binary_operator: + - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: data alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: expression: sign_indicator: '-' datetime_literal: datetime_type_identifier: keyword: INTERVAL quoted_literal: "'23 HOURS'" binary_operator: + column_reference: - naked_identifier: t1 - dot: . - naked_identifier: data alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: expression: - datetime_literal: datetime_type_identifier: keyword: INTERVAL quoted_literal: "'1 HOUR'" - binary_operator: / - numeric_literal: '1.5' - binary_operator: + - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: data alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: expression: - interval_expression: keyword: INTERVAL quoted_literal: "'1'" date_part: HOUR - binary_operator: '*' - numeric_literal: '3.5' - binary_operator: + - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: data alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: expression: - datetime_literal: datetime_type_identifier: - keyword: TIMESTAMP - keyword: WITH - keyword: TIME - keyword: ZONE quoted_literal: "'2005-04-02 12:00:00-07'" - binary_operator: + - datetime_literal: datetime_type_identifier: keyword: INTERVAL quoted_literal: "'24 HOURS'" - binary_operator: + - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: data alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/deallocate.sql000066400000000000000000000001511503426445100244470ustar00rootroot00000000000000DEALLOCATE statement_name; DEALLOCATE PREPARE statement_name; DEALLOCATE ALL; DEALLOCATE PREPARE ALL; sqlfluff-3.4.2/test/fixtures/dialects/postgres/deallocate.yml000066400000000000000000000017141503426445100244570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7f1bc49cfff212f68642836703f13cb5ac7b3fff79da28248451eecfb8016274 file: - statement: deallocate_statement: keyword: DEALLOCATE object_reference: naked_identifier: statement_name - statement_terminator: ; - statement: deallocate_statement: - keyword: DEALLOCATE - keyword: PREPARE - object_reference: naked_identifier: statement_name - statement_terminator: ; - statement: deallocate_statement: - keyword: DEALLOCATE - keyword: ALL - statement_terminator: ; - statement: deallocate_statement: - keyword: DEALLOCATE - keyword: PREPARE - keyword: ALL - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/delete.sql000066400000000000000000000030361503426445100236210ustar00rootroot00000000000000DELETE FROM films; DELETE FROM ONLY films; DELETE FROM films *; DELETE FROM films AS f; DELETE FROM films f; DELETE FROM films USING producers WHERE producer_id = producers.id AND producers.name = 'foo'; DELETE FROM films AS f USING producers AS p WHERE f.producer_id = p.id AND p.name = 'foo'; DELETE FROM films AS f USING producers AS p, actors AS a WHERE f.producer_id = p.id AND p.name = 'foo' AND f.actor_id = a.id AND a.name = 'joe cool'; DELETE FROM films f USING producers p WHERE f.producer_id = p.id AND p.name = 'foo'; DELETE FROM films f USING producers p, actors a WHERE f.producer_id = p.id AND p.name = 'foo' AND f.actor_id = a.id AND a.name = 'joe cool'; DELETE FROM tasks WHERE CURRENT OF c_tasks; DELETE FROM films WHERE kind <> 'Musical'; DELETE FROM tasks WHERE status = 'DONE' RETURNING *; DELETE FROM tasks WHERE status = 'DONE' RETURNING actor_id; DELETE FROM tasks WHERE status = 'DONE' RETURNING actor_id as a_id; DELETE FROM tasks WHERE status = 'DONE' RETURNING actor_id a_id; DELETE FROM tasks WHERE status = 'DONE' RETURNING actor_id, producer_id; DELETE FROM tasks WHERE status = 'DONE' RETURNING actor_id as a_id, producer_id as p_id; DELETE FROM tasks WHERE status = 'DONE' RETURNING actor_id a_id, producer_id p_id; WITH test as (select foo from bar) DELETE FROM films; WITH RECURSIVE t(n) AS ( VALUES (1) UNION ALL SELECT n+1 FROM t WHERE n < 100 ) DELETE FROM films; DELETE FROM foo USING baz LEFT JOIN bar ON baz.bar_id = bar.id WHERE foo.id = bar.foo_id AND baz.id = 'c'; sqlfluff-3.4.2/test/fixtures/dialects/postgres/delete.yml000066400000000000000000000406521503426445100236300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 22cb784e9c27d8bfa7926e3509a3f79e05b85abd94fcb69b6d4f4ef4f76e16f6 file: - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: films - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - keyword: ONLY - table_reference: naked_identifier: films - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: films - star: '*' - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: films - alias_expression: alias_operator: keyword: AS naked_identifier: f - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: films - alias_expression: naked_identifier: f - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: films - keyword: USING - table_expression: table_reference: naked_identifier: producers - where_clause: keyword: WHERE expression: - column_reference: naked_identifier: producer_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: producers - dot: . - naked_identifier: id - binary_operator: AND - column_reference: - naked_identifier: producers - dot: . - naked_identifier: name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foo'" - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: films - alias_expression: alias_operator: keyword: AS naked_identifier: f - keyword: USING - table_expression: table_reference: naked_identifier: producers - alias_expression: alias_operator: keyword: AS naked_identifier: p - where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: f - dot: . - naked_identifier: producer_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: p - dot: . - naked_identifier: id - binary_operator: AND - column_reference: - naked_identifier: p - dot: . - naked_identifier: name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foo'" - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: films - alias_expression: alias_operator: keyword: AS naked_identifier: f - keyword: USING - table_expression: table_reference: naked_identifier: producers - alias_expression: alias_operator: keyword: AS naked_identifier: p - comma: ',' - table_expression: table_reference: naked_identifier: actors - alias_expression: alias_operator: keyword: AS naked_identifier: a - where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: f - dot: . - naked_identifier: producer_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: p - dot: . - naked_identifier: id - binary_operator: AND - column_reference: - naked_identifier: p - dot: . - naked_identifier: name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foo'" - binary_operator: AND - column_reference: - naked_identifier: f - dot: . - naked_identifier: actor_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: a - dot: . - naked_identifier: id - binary_operator: AND - column_reference: - naked_identifier: a - dot: . - naked_identifier: name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'joe cool'" - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: films - alias_expression: naked_identifier: f - keyword: USING - table_expression: table_reference: naked_identifier: producers - alias_expression: naked_identifier: p - where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: f - dot: . - naked_identifier: producer_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: p - dot: . - naked_identifier: id - binary_operator: AND - column_reference: - naked_identifier: p - dot: . - naked_identifier: name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foo'" - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: films - alias_expression: naked_identifier: f - keyword: USING - table_expression: table_reference: naked_identifier: producers - alias_expression: naked_identifier: p - comma: ',' - table_expression: table_reference: naked_identifier: actors - alias_expression: naked_identifier: a - where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: f - dot: . - naked_identifier: producer_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: p - dot: . - naked_identifier: id - binary_operator: AND - column_reference: - naked_identifier: p - dot: . - naked_identifier: name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foo'" - binary_operator: AND - column_reference: - naked_identifier: f - dot: . - naked_identifier: actor_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: a - dot: . - naked_identifier: id - binary_operator: AND - column_reference: - naked_identifier: a - dot: . - naked_identifier: name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'joe cool'" - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: tasks - keyword: WHERE - keyword: CURRENT - keyword: OF - object_reference: naked_identifier: c_tasks - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: films - where_clause: keyword: WHERE expression: column_reference: naked_identifier: kind comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '>' quoted_literal: "'Musical'" - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: tasks - where_clause: keyword: WHERE expression: column_reference: naked_identifier: status comparison_operator: raw_comparison_operator: '=' quoted_literal: "'DONE'" - keyword: RETURNING - star: '*' - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: tasks - where_clause: keyword: WHERE expression: column_reference: naked_identifier: status comparison_operator: raw_comparison_operator: '=' quoted_literal: "'DONE'" - keyword: RETURNING - expression: column_reference: naked_identifier: actor_id - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: tasks - where_clause: keyword: WHERE expression: column_reference: naked_identifier: status comparison_operator: raw_comparison_operator: '=' quoted_literal: "'DONE'" - keyword: RETURNING - expression: column_reference: naked_identifier: actor_id - alias_expression: alias_operator: keyword: as naked_identifier: a_id - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: tasks - where_clause: keyword: WHERE expression: column_reference: naked_identifier: status comparison_operator: raw_comparison_operator: '=' quoted_literal: "'DONE'" - keyword: RETURNING - expression: column_reference: naked_identifier: actor_id - alias_expression: naked_identifier: a_id - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: tasks - where_clause: keyword: WHERE expression: column_reference: naked_identifier: status comparison_operator: raw_comparison_operator: '=' quoted_literal: "'DONE'" - keyword: RETURNING - expression: column_reference: naked_identifier: actor_id - comma: ',' - expression: column_reference: naked_identifier: producer_id - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: tasks - where_clause: keyword: WHERE expression: column_reference: naked_identifier: status comparison_operator: raw_comparison_operator: '=' quoted_literal: "'DONE'" - keyword: RETURNING - expression: column_reference: naked_identifier: actor_id - alias_expression: alias_operator: keyword: as naked_identifier: a_id - comma: ',' - expression: column_reference: naked_identifier: producer_id - alias_expression: alias_operator: keyword: as naked_identifier: p_id - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: tasks - where_clause: keyword: WHERE expression: column_reference: naked_identifier: status comparison_operator: raw_comparison_operator: '=' quoted_literal: "'DONE'" - keyword: RETURNING - expression: column_reference: naked_identifier: actor_id - alias_expression: naked_identifier: a_id - comma: ',' - expression: column_reference: naked_identifier: producer_id - alias_expression: naked_identifier: p_id - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: test keyword: as bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: foo from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar end_bracket: ) delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: films - statement_terminator: ; - statement: with_compound_statement: - keyword: WITH - keyword: RECURSIVE - common_table_expression: naked_identifier: t cte_column_list: bracketed: start_bracket: ( identifier_list: naked_identifier: n end_bracket: ) keyword: AS bracketed: start_bracket: ( set_expression: values_clause: keyword: VALUES bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) set_operator: - keyword: UNION - keyword: ALL select_statement: select_clause: keyword: SELECT select_clause_element: expression: column_reference: naked_identifier: n binary_operator: + numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t where_clause: keyword: WHERE expression: column_reference: naked_identifier: n comparison_operator: raw_comparison_operator: < numeric_literal: '100' end_bracket: ) - delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: films - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: foo - keyword: USING - table_expression: table_reference: naked_identifier: baz - join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: bar - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: baz - dot: . - naked_identifier: bar_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: bar - dot: . - naked_identifier: id - where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: foo - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: bar - dot: . - naked_identifier: foo_id - binary_operator: AND - column_reference: - naked_identifier: baz - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'c'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/discard.sql000066400000000000000000000001201503426445100237570ustar00rootroot00000000000000DISCARD ALL; DISCARD PLANS; DISCARD SEQUENCES; DISCARD TEMPORARY; DISCARD TEMP; sqlfluff-3.4.2/test/fixtures/dialects/postgres/discard.yml000066400000000000000000000016411503426445100237720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 00eaafbc98c8ec708bf86bddd93c306cd4f60e77c39d8b9d01d8e7aa70b90d7f file: - statement: discard_statement: - keyword: DISCARD - keyword: ALL - statement_terminator: ; - statement: discard_statement: - keyword: DISCARD - keyword: PLANS - statement_terminator: ; - statement: discard_statement: - keyword: DISCARD - keyword: SEQUENCES - statement_terminator: ; - statement: discard_statement: - keyword: DISCARD - keyword: TEMPORARY - statement_terminator: ; - statement: discard_statement: - keyword: DISCARD - keyword: TEMP - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/do.sql000066400000000000000000000032311503426445100227560ustar00rootroot00000000000000-- postgres_do.sql /* Postgres DO statements (https://www.postgresql.org/docs/14/sql-do.html). */ -- From Issue #2018 (https://github.com/sqlfluff/sqlfluff/issues/2018) DO $$DECLARE r record; BEGIN FOR r IN SELECT table_schema, table_name FROM information_schema.tables WHERE table_type = 'VIEW' AND table_schema = 'public' LOOP EXECUTE 'GRANT ALL ON ' || quote_ident(r.table_schema) || '.' || quote_ident(r.table_name) || ' TO webuser'; END LOOP; END$$; -- can put language before code block DO LANGUAGE plpgsql $$ DECLARE r record; BEGIN FOR r IN SELECT table_schema, table_name FROM information_schema.tables WHERE table_type = 'VIEW' AND table_schema = 'public' LOOP EXECUTE 'GRANT ALL ON ' || quote_ident(r.table_schema) || '.' || quote_ident(r.table_name) || ' TO webuser'; END LOOP; END$$; -- can put language after code block DO $$ DECLARE r record; BEGIN FOR r IN SELECT table_schema, table_name FROM information_schema.tables WHERE table_type = 'VIEW' AND table_schema = 'public' LOOP EXECUTE 'GRANT ALL ON ' || quote_ident(r.table_schema) || '.' || quote_ident(r.table_name) || ' TO webuser'; END LOOP; END$$ LANGUAGE plpgsql; -- code block can be any string literal DO E' DECLARE r record; BEGIN FOR r IN SELECT table_schema, table_name FROM information_schema.tables WHERE table_type = \'VIEW\' AND table_schema = \'public\' LOOP EXECUTE \'GRANT ALL ON \' || quote_ident(r.table_schema) || \'.\' || quote_ident(r.table_name) || \' TO webuser\'; END LOOP; END'; DO 'DECLARE r record;'; DO U&'\0441\043B\043E\043D'; DO 'SELECT foo' 'bar'; sqlfluff-3.4.2/test/fixtures/dialects/postgres/do.yml000066400000000000000000000054221503426445100227640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f60058102cfdbfadc25b26e6235e09246c06828ad39beb4f0d79aec4618fac69 file: - statement: do_statement: keyword: DO quoted_literal: "$$DECLARE r record;\nBEGIN\n FOR r IN SELECT table_schema,\ \ table_name FROM information_schema.tables\n WHERE table_type\ \ = 'VIEW' AND table_schema = 'public'\n LOOP\n EXECUTE 'GRANT ALL\ \ ON ' || quote_ident(r.table_schema) || '.' || quote_ident(r.table_name)\ \ || ' TO webuser';\n END LOOP;\nEND$$" - statement_terminator: ; - statement: do_statement: keyword: DO language_clause: keyword: LANGUAGE naked_identifier: plpgsql quoted_literal: "$$\nDECLARE r record;\nBEGIN\n FOR r IN SELECT table_schema,\ \ table_name FROM information_schema.tables\n WHERE table_type\ \ = 'VIEW' AND table_schema = 'public'\n LOOP\n EXECUTE 'GRANT ALL\ \ ON ' || quote_ident(r.table_schema) || '.' || quote_ident(r.table_name)\ \ || ' TO webuser';\n END LOOP;\nEND$$" - statement_terminator: ; - statement: do_statement: keyword: DO quoted_literal: "$$\nDECLARE r record;\nBEGIN\n FOR r IN SELECT table_schema,\ \ table_name FROM information_schema.tables\n WHERE table_type\ \ = 'VIEW' AND table_schema = 'public'\n LOOP\n EXECUTE 'GRANT ALL\ \ ON ' || quote_ident(r.table_schema) || '.' || quote_ident(r.table_name)\ \ || ' TO webuser';\n END LOOP;\nEND$$" language_clause: keyword: LANGUAGE naked_identifier: plpgsql - statement_terminator: ; - statement: do_statement: keyword: DO quoted_literal: "E'\nDECLARE r record;\nBEGIN\n FOR r IN SELECT table_schema,\ \ table_name FROM information_schema.tables\n WHERE table_type\ \ = \\'VIEW\\' AND table_schema = \\'public\\'\n LOOP\n EXECUTE\ \ \\'GRANT ALL ON \\' || quote_ident(r.table_schema) || \\'.\\' || quote_ident(r.table_name)\ \ || \\' TO webuser\\';\n END LOOP;\nEND'" - statement_terminator: ; - statement: do_statement: keyword: DO quoted_literal: "'DECLARE r record;'" - statement_terminator: ; - statement: do_statement: keyword: DO quoted_literal: "U&'\\0441\\043B\\043E\\043D'" - statement_terminator: ; - statement: do_statement: - keyword: DO - quoted_literal: "'SELECT foo'" - quoted_literal: "'bar'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/drop_aggregate.sql000066400000000000000000000002161503426445100253260ustar00rootroot00000000000000DROP AGGREGATE myavg(integer), myavg(bigint); DROP AGGREGATE myrank(VARIADIC "any" ORDER BY VARIADIC "any"); DROP AGGREGATE myavg(integer); sqlfluff-3.4.2/test/fixtures/dialects/postgres/drop_aggregate.yml000066400000000000000000000031511503426445100253310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e379ce62be0a5953110b5fd48cbfa49fbe176a85ced651cdb7afa0c4bc03474a file: - statement: drop_aggregate_statement: - keyword: DROP - keyword: AGGREGATE - object_reference: naked_identifier: myavg - function_parameter_list: bracketed: start_bracket: ( data_type: keyword: integer end_bracket: ) - comma: ',' - object_reference: naked_identifier: myavg - function_parameter_list: bracketed: start_bracket: ( data_type: keyword: bigint end_bracket: ) - statement_terminator: ; - statement: drop_aggregate_statement: - keyword: DROP - keyword: AGGREGATE - object_reference: naked_identifier: myrank - bracketed: - start_bracket: ( - word: VARIADIC - double_quote: '"any"' - word: ORDER - word: BY - word: VARIADIC - double_quote: '"any"' - end_bracket: ) - statement_terminator: ; - statement: drop_aggregate_statement: - keyword: DROP - keyword: AGGREGATE - object_reference: naked_identifier: myavg - function_parameter_list: bracketed: start_bracket: ( data_type: keyword: integer end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/drop_cast.sql000066400000000000000000000005071503426445100243350ustar00rootroot00000000000000-- ANSI SQL: DROP CAST (int AS bool); DROP CAST (int AS bool) RESTRICT; DROP CAST (int AS bool) CASCADE; DROP CAST (udt_1 AS udt_2); DROP CAST (sch.udt_1 AS sch.udt_2); -- Additional PG extensions: DROP CAST IF EXISTS (int AS bool); DROP CAST IF EXISTS (int AS bool) RESTRICT; DROP CAST IF EXISTS (int AS bool) CASCADE; sqlfluff-3.4.2/test/fixtures/dialects/postgres/drop_cast.yml000066400000000000000000000056141503426445100243430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 797c59758187f686eabaaa295a9e2b392158e11ab37a934556cf024cf468e083 file: - statement: drop_cast_statement: - keyword: DROP - keyword: CAST - bracketed: - start_bracket: ( - data_type: keyword: int - keyword: AS - data_type: keyword: bool - end_bracket: ) - statement_terminator: ; - statement: drop_cast_statement: - keyword: DROP - keyword: CAST - bracketed: - start_bracket: ( - data_type: keyword: int - keyword: AS - data_type: keyword: bool - end_bracket: ) - keyword: RESTRICT - statement_terminator: ; - statement: drop_cast_statement: - keyword: DROP - keyword: CAST - bracketed: - start_bracket: ( - data_type: keyword: int - keyword: AS - data_type: keyword: bool - end_bracket: ) - keyword: CASCADE - statement_terminator: ; - statement: drop_cast_statement: - keyword: DROP - keyword: CAST - bracketed: - start_bracket: ( - data_type: data_type_identifier: udt_1 - keyword: AS - data_type: data_type_identifier: udt_2 - end_bracket: ) - statement_terminator: ; - statement: drop_cast_statement: - keyword: DROP - keyword: CAST - bracketed: - start_bracket: ( - data_type: naked_identifier: sch dot: . data_type_identifier: udt_1 - keyword: AS - data_type: naked_identifier: sch dot: . data_type_identifier: udt_2 - end_bracket: ) - statement_terminator: ; - statement: drop_cast_statement: - keyword: DROP - keyword: CAST - keyword: IF - keyword: EXISTS - bracketed: - start_bracket: ( - data_type: keyword: int - keyword: AS - data_type: keyword: bool - end_bracket: ) - statement_terminator: ; - statement: drop_cast_statement: - keyword: DROP - keyword: CAST - keyword: IF - keyword: EXISTS - bracketed: - start_bracket: ( - data_type: keyword: int - keyword: AS - data_type: keyword: bool - end_bracket: ) - keyword: RESTRICT - statement_terminator: ; - statement: drop_cast_statement: - keyword: DROP - keyword: CAST - keyword: IF - keyword: EXISTS - bracketed: - start_bracket: ( - data_type: keyword: int - keyword: AS - data_type: keyword: bool - end_bracket: ) - keyword: CASCADE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/drop_database.sql000066400000000000000000000002651503426445100251500ustar00rootroot00000000000000DROP DATABASE db; DROP DATABASE db (FORCE); DROP DATABASE db WITH (FORCE); DROP DATABASE IF EXISTS db; DROP DATABASE IF EXISTS db (FORCE); DROP DATABASE IF EXISTS db WITH (FORCE); sqlfluff-3.4.2/test/fixtures/dialects/postgres/drop_database.yml000066400000000000000000000035641503426445100251570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d40f9773665452c0ebe432f0dcd4dac3bb66eb3a31b67101aa130727475cb543 file: - statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - database_reference: naked_identifier: db - statement_terminator: ; - statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - database_reference: naked_identifier: db - bracketed: start_bracket: ( keyword: FORCE end_bracket: ) - statement_terminator: ; - statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - database_reference: naked_identifier: db - keyword: WITH - bracketed: start_bracket: ( keyword: FORCE end_bracket: ) - statement_terminator: ; - statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - keyword: IF - keyword: EXISTS - database_reference: naked_identifier: db - statement_terminator: ; - statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - keyword: IF - keyword: EXISTS - database_reference: naked_identifier: db - bracketed: start_bracket: ( keyword: FORCE end_bracket: ) - statement_terminator: ; - statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - keyword: IF - keyword: EXISTS - database_reference: naked_identifier: db - keyword: WITH - bracketed: start_bracket: ( keyword: FORCE end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/drop_domain.sql000066400000000000000000000000211503426445100246410ustar00rootroot00000000000000DROP DOMAIN box; sqlfluff-3.4.2/test/fixtures/dialects/postgres/drop_domain.yml000066400000000000000000000010541503426445100246520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 38570fd1de11ce681838552aac7a502c8163cd496475a4639714b51b0b736a48 file: statement: drop_domain_statement: - keyword: DROP - keyword: DOMAIN - object_reference: naked_identifier: box statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/drop_foreign_table.sql000066400000000000000000000002771503426445100262070ustar00rootroot00000000000000drop foreign table test; drop foreign table films, distributors; drop foreign table if exists test; drop foreign table if exists test cascade; drop foreign table if exists test restrict; sqlfluff-3.4.2/test/fixtures/dialects/postgres/drop_foreign_table.yml000066400000000000000000000030741503426445100262070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b58c740f9dd8eb9ce00bb6f086f3f93eed52e1bb59d2a6220b85db2ebb351871 file: - statement: drop_foreign_table_statement: - keyword: drop - keyword: foreign - keyword: table - table_reference: naked_identifier: test - statement_terminator: ; - statement: drop_foreign_table_statement: - keyword: drop - keyword: foreign - keyword: table - table_reference: naked_identifier: films - comma: ',' - table_reference: naked_identifier: distributors - statement_terminator: ; - statement: drop_foreign_table_statement: - keyword: drop - keyword: foreign - keyword: table - keyword: if - keyword: exists - table_reference: naked_identifier: test - statement_terminator: ; - statement: drop_foreign_table_statement: - keyword: drop - keyword: foreign - keyword: table - keyword: if - keyword: exists - table_reference: naked_identifier: test - keyword: cascade - statement_terminator: ; - statement: drop_foreign_table_statement: - keyword: drop - keyword: foreign - keyword: table - keyword: if - keyword: exists - table_reference: naked_identifier: test - keyword: restrict - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/drop_function.sql000066400000000000000000000004171503426445100252300ustar00rootroot00000000000000DROP FUNCTION sqrt (integer); DROP FUNCTION sqrt (integer), sqrt (bigint); DROP FUNCTION update_employee_salaries; DROP FUNCTION update_employee_salaries (); DROP FUNCTION IF EXISTS foo (IN my_var integer, VARIADIC my_var_2 text); DROP FUNCTION IF EXISTS f_name CASCADE; sqlfluff-3.4.2/test/fixtures/dialects/postgres/drop_function.yml000066400000000000000000000047011503426445100252320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c558b7e99c57564a274aa6d3dde08cbc81b55855a124d4df7cc3531f991db2ed file: - statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - object_reference: naked_identifier: sqrt - function_parameter_list: bracketed: start_bracket: ( data_type: keyword: integer end_bracket: ) - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - object_reference: naked_identifier: sqrt - function_parameter_list: bracketed: start_bracket: ( data_type: keyword: integer end_bracket: ) - comma: ',' - object_reference: naked_identifier: sqrt - function_parameter_list: bracketed: start_bracket: ( data_type: keyword: bigint end_bracket: ) - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - object_reference: naked_identifier: update_employee_salaries - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - object_reference: naked_identifier: update_employee_salaries - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: foo - function_parameter_list: bracketed: - start_bracket: ( - keyword: IN - parameter: my_var - data_type: keyword: integer - comma: ',' - keyword: VARIADIC - parameter: my_var_2 - data_type: keyword: text - end_bracket: ) - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: f_name - keyword: CASCADE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/drop_index.sql000066400000000000000000000011031503426445100245030ustar00rootroot00000000000000DROP INDEX abc; DROP INDEX "abc"; DROP INDEX IF EXISTS abc; DROP INDEX abc, "def", ghi; DROP INDEX IF EXISTS abc, def, ghi; -- Test CASCADE trailing keyword DROP INDEX abc CASCADE; DROP INDEX abc, def, ghi CASCADE; DROP INDEX IF EXISTS abc, def, ghi CASCADE; -- Test RESTRICT trailing keyword DROP INDEX abc RESTRICT; DROP INDEX abc, def, ghi RESTRICT; -- Test CONCURRENTLY DROP INDEX CONCURRENTLY abc; DROP INDEX CONCURRENTLY IF EXISTS abc; DROP INDEX CONCURRENTLY abc, def; DROP INDEX CONCURRENTLY IF EXISTS abc, def; DROP INDEX CONCURRENTLY abc, def CASCADE; sqlfluff-3.4.2/test/fixtures/dialects/postgres/drop_index.yml000066400000000000000000000101231503426445100245070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a8bfaf3bfd8572b31cd437d25ca9449d13350a5be6765b90320af9bf1047a214 file: - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: naked_identifier: abc - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '"abc"' - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - keyword: IF - keyword: EXISTS - index_reference: naked_identifier: abc - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: naked_identifier: abc - comma: ',' - index_reference: quoted_identifier: '"def"' - comma: ',' - index_reference: naked_identifier: ghi - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - keyword: IF - keyword: EXISTS - index_reference: naked_identifier: abc - comma: ',' - index_reference: naked_identifier: def - comma: ',' - index_reference: naked_identifier: ghi - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: naked_identifier: abc - keyword: CASCADE - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: naked_identifier: abc - comma: ',' - index_reference: naked_identifier: def - comma: ',' - index_reference: naked_identifier: ghi - keyword: CASCADE - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - keyword: IF - keyword: EXISTS - index_reference: naked_identifier: abc - comma: ',' - index_reference: naked_identifier: def - comma: ',' - index_reference: naked_identifier: ghi - keyword: CASCADE - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: naked_identifier: abc - keyword: RESTRICT - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: naked_identifier: abc - comma: ',' - index_reference: naked_identifier: def - comma: ',' - index_reference: naked_identifier: ghi - keyword: RESTRICT - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - keyword: CONCURRENTLY - index_reference: naked_identifier: abc - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - keyword: CONCURRENTLY - keyword: IF - keyword: EXISTS - index_reference: naked_identifier: abc - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - keyword: CONCURRENTLY - index_reference: naked_identifier: abc - comma: ',' - index_reference: naked_identifier: def - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - keyword: CONCURRENTLY - keyword: IF - keyword: EXISTS - index_reference: naked_identifier: abc - comma: ',' - index_reference: naked_identifier: def - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - keyword: CONCURRENTLY - index_reference: naked_identifier: abc - comma: ',' - index_reference: naked_identifier: def - keyword: CASCADE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/drop_materialized_view.sql000066400000000000000000000007561503426445100271150ustar00rootroot00000000000000DROP MATERIALIZED VIEW bar; DROP MATERIALIZED VIEW foo, bar; DROP MATERIALIZED VIEW bar CASCADE; DROP MATERIALIZED VIEW foo, bar CASCADE; DROP MATERIALIZED VIEW bar RESTRICT; DROP MATERIALIZED VIEW foo, bar RESTRICT; DROP MATERIALIZED VIEW IF EXISTS bar; DROP MATERIALIZED VIEW IF EXISTS foo, bar; DROP MATERIALIZED VIEW IF EXISTS bar CASCADE; DROP MATERIALIZED VIEW IF EXISTS foo, bar CASCADE; DROP MATERIALIZED VIEW IF EXISTS bar RESTRICT; DROP MATERIALIZED VIEW IF EXISTS foo, bar RESTRICT; sqlfluff-3.4.2/test/fixtures/dialects/postgres/drop_materialized_view.yml000066400000000000000000000070421503426445100271120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 363142dc482ee6645284249f7955af56d4238c12b8fac7bdae1b10e5419e0588 file: - statement: drop_materialized_view_statement: - keyword: DROP - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - statement_terminator: ; - statement: drop_materialized_view_statement: - keyword: DROP - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: foo - comma: ',' - table_reference: naked_identifier: bar - statement_terminator: ; - statement: drop_materialized_view_statement: - keyword: DROP - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - keyword: CASCADE - statement_terminator: ; - statement: drop_materialized_view_statement: - keyword: DROP - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: foo - comma: ',' - table_reference: naked_identifier: bar - keyword: CASCADE - statement_terminator: ; - statement: drop_materialized_view_statement: - keyword: DROP - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - keyword: RESTRICT - statement_terminator: ; - statement: drop_materialized_view_statement: - keyword: DROP - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: foo - comma: ',' - table_reference: naked_identifier: bar - keyword: RESTRICT - statement_terminator: ; - statement: drop_materialized_view_statement: - keyword: DROP - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: bar - statement_terminator: ; - statement: drop_materialized_view_statement: - keyword: DROP - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: foo - comma: ',' - table_reference: naked_identifier: bar - statement_terminator: ; - statement: drop_materialized_view_statement: - keyword: DROP - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: bar - keyword: CASCADE - statement_terminator: ; - statement: drop_materialized_view_statement: - keyword: DROP - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: foo - comma: ',' - table_reference: naked_identifier: bar - keyword: CASCADE - statement_terminator: ; - statement: drop_materialized_view_statement: - keyword: DROP - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: bar - keyword: RESTRICT - statement_terminator: ; - statement: drop_materialized_view_statement: - keyword: DROP - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: foo - comma: ',' - table_reference: naked_identifier: bar - keyword: RESTRICT - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/drop_owned.sql000066400000000000000000000005011503426445100245110ustar00rootroot00000000000000DROP OWNED BY bob; DROP OWNED BY bob, alice; DROP OWNED BY CURRENT_ROLE; DROP OWNED BY CURRENT_USER; DROP OWNED BY SESSION_USER; DROP OWNED BY bob, CURRENT_ROLE, alice, CURRENT_USER, ted; DROP OWNED BY bob CASCADE; DROP OWNED BY bob RESTRICT; DROP OWNED BY bob, alice CASCADE; DROP OWNED BY bob, alice RESTRICT; sqlfluff-3.4.2/test/fixtures/dialects/postgres/drop_owned.yml000066400000000000000000000050731503426445100245240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f30947e3b468c099a73640d9924d061e48b1c3df0bfbdbef07c4f22810c3ca42 file: - statement: drop_owned_statement: - keyword: DROP - keyword: OWNED - keyword: BY - role_reference: naked_identifier: bob - statement_terminator: ; - statement: drop_owned_statement: - keyword: DROP - keyword: OWNED - keyword: BY - role_reference: naked_identifier: bob - comma: ',' - role_reference: naked_identifier: alice - statement_terminator: ; - statement: drop_owned_statement: - keyword: DROP - keyword: OWNED - keyword: BY - keyword: CURRENT_ROLE - statement_terminator: ; - statement: drop_owned_statement: - keyword: DROP - keyword: OWNED - keyword: BY - keyword: CURRENT_USER - statement_terminator: ; - statement: drop_owned_statement: - keyword: DROP - keyword: OWNED - keyword: BY - keyword: SESSION_USER - statement_terminator: ; - statement: drop_owned_statement: - keyword: DROP - keyword: OWNED - keyword: BY - role_reference: naked_identifier: bob - comma: ',' - keyword: CURRENT_ROLE - comma: ',' - role_reference: naked_identifier: alice - comma: ',' - keyword: CURRENT_USER - comma: ',' - role_reference: naked_identifier: ted - statement_terminator: ; - statement: drop_owned_statement: - keyword: DROP - keyword: OWNED - keyword: BY - role_reference: naked_identifier: bob - keyword: CASCADE - statement_terminator: ; - statement: drop_owned_statement: - keyword: DROP - keyword: OWNED - keyword: BY - role_reference: naked_identifier: bob - keyword: RESTRICT - statement_terminator: ; - statement: drop_owned_statement: - keyword: DROP - keyword: OWNED - keyword: BY - role_reference: naked_identifier: bob - comma: ',' - role_reference: naked_identifier: alice - keyword: CASCADE - statement_terminator: ; - statement: drop_owned_statement: - keyword: DROP - keyword: OWNED - keyword: BY - role_reference: naked_identifier: bob - comma: ',' - role_reference: naked_identifier: alice - keyword: RESTRICT - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/drop_policy.sql000066400000000000000000000004001503426445100246720ustar00rootroot00000000000000DROP POLICY account_managers ON accounts; DROP POLICY IF EXISTS account_managers ON accounts; DROP POLICY account_managers ON accounts CASCADE; DROP POLICY account_managers ON accounts RESTRICT; DROP POLICY IF EXISTS account_managers ON accounts RESTRICT; sqlfluff-3.4.2/test/fixtures/dialects/postgres/drop_policy.yml000066400000000000000000000034221503426445100247030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1933526cad5db1a8339d06b992d0fb310d39f4bb001a60484304c512585d481b file: - statement: drop_policy_statement: - keyword: DROP - keyword: POLICY - object_reference: naked_identifier: account_managers - keyword: 'ON' - table_reference: naked_identifier: accounts - statement_terminator: ; - statement: drop_policy_statement: - keyword: DROP - keyword: POLICY - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: account_managers - keyword: 'ON' - table_reference: naked_identifier: accounts - statement_terminator: ; - statement: drop_policy_statement: - keyword: DROP - keyword: POLICY - object_reference: naked_identifier: account_managers - keyword: 'ON' - table_reference: naked_identifier: accounts - keyword: CASCADE - statement_terminator: ; - statement: drop_policy_statement: - keyword: DROP - keyword: POLICY - object_reference: naked_identifier: account_managers - keyword: 'ON' - table_reference: naked_identifier: accounts - keyword: RESTRICT - statement_terminator: ; - statement: drop_policy_statement: - keyword: DROP - keyword: POLICY - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: account_managers - keyword: 'ON' - table_reference: naked_identifier: accounts - keyword: RESTRICT - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/drop_procedure.sql000066400000000000000000000005671503426445100254010ustar00rootroot00000000000000DROP PROCEDURE do_db_maintenance(); drop procedure insert_actor; drop procedure insert_actor(varchar); drop procedure insert_actor(varchar, varchar); drop procedure delete_actor, update_actor; drop procedure delete_actor, update_actor CASCADE; drop procedure delete_actor(in id varchar); drop procedure insert_actor(varchar, varchar), insert_actor2(varchar, varchar); sqlfluff-3.4.2/test/fixtures/dialects/postgres/drop_procedure.yml000066400000000000000000000062671503426445100254060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9412d5773862ec85a0ca1370edef125b7c70bf59d502b73cf9419c88241353e6 file: - statement: drop_procedure_statement: - keyword: DROP - keyword: PROCEDURE - function_name: function_name_identifier: do_db_maintenance - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: drop_procedure_statement: - keyword: drop - keyword: procedure - function_name: function_name_identifier: insert_actor - statement_terminator: ; - statement: drop_procedure_statement: - keyword: drop - keyword: procedure - function_name: function_name_identifier: insert_actor - function_parameter_list: bracketed: start_bracket: ( data_type: keyword: varchar end_bracket: ) - statement_terminator: ; - statement: drop_procedure_statement: - keyword: drop - keyword: procedure - function_name: function_name_identifier: insert_actor - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: varchar - comma: ',' - data_type: keyword: varchar - end_bracket: ) - statement_terminator: ; - statement: drop_procedure_statement: - keyword: drop - keyword: procedure - function_name: function_name_identifier: delete_actor - comma: ',' - function_name: function_name_identifier: update_actor - statement_terminator: ; - statement: drop_procedure_statement: - keyword: drop - keyword: procedure - function_name: function_name_identifier: delete_actor - comma: ',' - function_name: function_name_identifier: update_actor - keyword: CASCADE - statement_terminator: ; - statement: drop_procedure_statement: - keyword: drop - keyword: procedure - function_name: function_name_identifier: delete_actor - function_parameter_list: bracketed: start_bracket: ( keyword: in parameter: id data_type: keyword: varchar end_bracket: ) - statement_terminator: ; - statement: drop_procedure_statement: - keyword: drop - keyword: procedure - function_name: function_name_identifier: insert_actor - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: varchar - comma: ',' - data_type: keyword: varchar - end_bracket: ) - comma: ',' - function_name: function_name_identifier: insert_actor2 - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: varchar - comma: ',' - data_type: keyword: varchar - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/drop_publication.sql000066400000000000000000000012431503426445100257120ustar00rootroot00000000000000-- Test no trailing keyword with combinations of: -- * IF EXISTS -- * One publication vs multiple publications. DROP PUBLICATION abc; DROP PUBLICATION "abc"; DROP PUBLICATION IF EXISTS abc; DROP PUBLICATION abc, "def", ghi; DROP PUBLICATION IF EXISTS abc, def, ghi; -- Test CASCADE trailing keyword DROP PUBLICATION abc CASCADE; DROP PUBLICATION IF EXISTS abc CASCADE; DROP PUBLICATION abc, def, ghi CASCADE; DROP PUBLICATION IF EXISTS abc, def, ghi CASCADE; -- Test RESTRICT trailing keyword DROP PUBLICATION abc RESTRICT; DROP PUBLICATION IF EXISTS abc RESTRICT; DROP PUBLICATION abc, def, ghi RESTRICT; DROP PUBLICATION IF EXISTS abc, def, ghi RESTRICT; sqlfluff-3.4.2/test/fixtures/dialects/postgres/drop_publication.yml000066400000000000000000000077151503426445100257260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 41225e922471bc8b5fb87ee6f2ac4199f7c7a5f29a52dd5dee1d99fdd1e16485 file: - statement: drop_publication_statement: - keyword: DROP - keyword: PUBLICATION - publication_reference: naked_identifier: abc - statement_terminator: ; - statement: drop_publication_statement: - keyword: DROP - keyword: PUBLICATION - publication_reference: quoted_identifier: '"abc"' - statement_terminator: ; - statement: drop_publication_statement: - keyword: DROP - keyword: PUBLICATION - keyword: IF - keyword: EXISTS - publication_reference: naked_identifier: abc - statement_terminator: ; - statement: drop_publication_statement: - keyword: DROP - keyword: PUBLICATION - publication_reference: naked_identifier: abc - comma: ',' - publication_reference: quoted_identifier: '"def"' - comma: ',' - publication_reference: naked_identifier: ghi - statement_terminator: ; - statement: drop_publication_statement: - keyword: DROP - keyword: PUBLICATION - keyword: IF - keyword: EXISTS - publication_reference: naked_identifier: abc - comma: ',' - publication_reference: naked_identifier: def - comma: ',' - publication_reference: naked_identifier: ghi - statement_terminator: ; - statement: drop_publication_statement: - keyword: DROP - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: CASCADE - statement_terminator: ; - statement: drop_publication_statement: - keyword: DROP - keyword: PUBLICATION - keyword: IF - keyword: EXISTS - publication_reference: naked_identifier: abc - keyword: CASCADE - statement_terminator: ; - statement: drop_publication_statement: - keyword: DROP - keyword: PUBLICATION - publication_reference: naked_identifier: abc - comma: ',' - publication_reference: naked_identifier: def - comma: ',' - publication_reference: naked_identifier: ghi - keyword: CASCADE - statement_terminator: ; - statement: drop_publication_statement: - keyword: DROP - keyword: PUBLICATION - keyword: IF - keyword: EXISTS - publication_reference: naked_identifier: abc - comma: ',' - publication_reference: naked_identifier: def - comma: ',' - publication_reference: naked_identifier: ghi - keyword: CASCADE - statement_terminator: ; - statement: drop_publication_statement: - keyword: DROP - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: RESTRICT - statement_terminator: ; - statement: drop_publication_statement: - keyword: DROP - keyword: PUBLICATION - keyword: IF - keyword: EXISTS - publication_reference: naked_identifier: abc - keyword: RESTRICT - statement_terminator: ; - statement: drop_publication_statement: - keyword: DROP - keyword: PUBLICATION - publication_reference: naked_identifier: abc - comma: ',' - publication_reference: naked_identifier: def - comma: ',' - publication_reference: naked_identifier: ghi - keyword: RESTRICT - statement_terminator: ; - statement: drop_publication_statement: - keyword: DROP - keyword: PUBLICATION - keyword: IF - keyword: EXISTS - publication_reference: naked_identifier: abc - comma: ',' - publication_reference: naked_identifier: def - comma: ',' - publication_reference: naked_identifier: ghi - keyword: RESTRICT - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/drop_sequence.sql000066400000000000000000000002141503426445100252060ustar00rootroot00000000000000DROP SEQUENCE foo; DROP SEQUENCE foo.foo; DROP SEQUENCE IF EXISTS foo; DROP SEQUENCE IF EXISTS foo CASCADE; DROP SEQUENCE foo RESTRICT; sqlfluff-3.4.2/test/fixtures/dialects/postgres/drop_sequence.yml000066400000000000000000000025761503426445100252250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0f19977ccbaad4b1cac3168bab6bc93b927fc4d2b68926f3abd0f25258bc1c93 file: - statement: drop_sequence_statement: - keyword: DROP - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - statement_terminator: ; - statement: drop_sequence_statement: - keyword: DROP - keyword: SEQUENCE - sequence_reference: - naked_identifier: foo - dot: . - naked_identifier: foo - statement_terminator: ; - statement: drop_sequence_statement: - keyword: DROP - keyword: SEQUENCE - keyword: IF - keyword: EXISTS - sequence_reference: naked_identifier: foo - statement_terminator: ; - statement: drop_sequence_statement: - keyword: DROP - keyword: SEQUENCE - keyword: IF - keyword: EXISTS - sequence_reference: naked_identifier: foo - keyword: CASCADE - statement_terminator: ; - statement: drop_sequence_statement: - keyword: DROP - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - keyword: RESTRICT - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/drop_statistics.sql000066400000000000000000000002261503426445100255730ustar00rootroot00000000000000DROP STATISTICS IF EXISTS accounting.users_uid_creation, public.grants_user_role; DROP STATISTICS foo CASCADE; DROP STATISTICS bar RESTRICT; sqlfluff-3.4.2/test/fixtures/dialects/postgres/drop_statistics.yml000066400000000000000000000022661503426445100256030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 690375d87b58153ff6eec04564ca6300d1c8e0c8e627604318c783d36dc6d5be file: - statement: drop_statistics_statement: - keyword: DROP - keyword: STATISTICS - keyword: IF - keyword: EXISTS - statistics_reference: - naked_identifier: accounting - dot: . - naked_identifier: users_uid_creation - comma: ',' - statistics_reference: - naked_identifier: public - dot: . - naked_identifier: grants_user_role - statement_terminator: ; - statement: drop_statistics_statement: - keyword: DROP - keyword: STATISTICS - statistics_reference: naked_identifier: foo - keyword: CASCADE - statement_terminator: ; - statement: drop_statistics_statement: - keyword: DROP - keyword: STATISTICS - statistics_reference: naked_identifier: bar - keyword: RESTRICT - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/drop_subscription.sql000066400000000000000000000001771503426445100261320ustar00rootroot00000000000000DROP SUBSCRIPTION mysub; DROP SUBSCRIPTION IF EXISTS mysub; DROP SUBSCRIPTION mysub CASCADE; DROP SUBSCRIPTION mysub RESTRICT; sqlfluff-3.4.2/test/fixtures/dialects/postgres/drop_subscription.yml000066400000000000000000000022211503426445100261240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: df0b3a9defe6bff083854bcd1255936ee682af04350326c7f0ce4fccbbebaf37 file: - statement: drop_subscription: - keyword: DROP - keyword: SUBSCRIPTION - subscription_reference: naked_identifier: mysub - statement_terminator: ; - statement: drop_subscription: - keyword: DROP - keyword: SUBSCRIPTION - keyword: IF - keyword: EXISTS - subscription_reference: naked_identifier: mysub - statement_terminator: ; - statement: drop_subscription: - keyword: DROP - keyword: SUBSCRIPTION - subscription_reference: naked_identifier: mysub - keyword: CASCADE - statement_terminator: ; - statement: drop_subscription: - keyword: DROP - keyword: SUBSCRIPTION - subscription_reference: naked_identifier: mysub - keyword: RESTRICT - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/drop_trigger.sql000066400000000000000000000002631503426445100250450ustar00rootroot00000000000000DROP TRIGGER foo ON bar; DROP TRIGGER IF EXISTS foo ON bar; DROP TRIGGER foo ON bar CASCADE; DROP TRIGGER IF EXISTS foo ON bar RESTRICT; DROP TRIGGER if_dist_exists ON films; sqlfluff-3.4.2/test/fixtures/dialects/postgres/drop_trigger.yml000066400000000000000000000032121503426445100250440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: df94df5aa4e31d1c17cd308ad44d4b6c3080f149b07591753693654209d4e6f9 file: - statement: drop_trigger: - keyword: DROP - keyword: TRIGGER - trigger_reference: naked_identifier: foo - keyword: 'ON' - table_reference: naked_identifier: bar - statement_terminator: ; - statement: drop_trigger: - keyword: DROP - keyword: TRIGGER - keyword: IF - keyword: EXISTS - trigger_reference: naked_identifier: foo - keyword: 'ON' - table_reference: naked_identifier: bar - statement_terminator: ; - statement: drop_trigger: - keyword: DROP - keyword: TRIGGER - trigger_reference: naked_identifier: foo - keyword: 'ON' - table_reference: naked_identifier: bar - keyword: CASCADE - statement_terminator: ; - statement: drop_trigger: - keyword: DROP - keyword: TRIGGER - keyword: IF - keyword: EXISTS - trigger_reference: naked_identifier: foo - keyword: 'ON' - table_reference: naked_identifier: bar - keyword: RESTRICT - statement_terminator: ; - statement: drop_trigger: - keyword: DROP - keyword: TRIGGER - trigger_reference: naked_identifier: if_dist_exists - keyword: 'ON' - table_reference: naked_identifier: films - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/drop_type.sql000066400000000000000000000001571503426445100243650ustar00rootroot00000000000000DROP TYPE foo; DROP TYPE IF EXISTS foo; DROP TYPE foo, bar; DROP TYPE foo CASCADE; DROP TYPE foo RESTRICT; sqlfluff-3.4.2/test/fixtures/dialects/postgres/drop_type.yml000066400000000000000000000024541503426445100243710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0ad296a4db75013a8f4ca8ff393984f2190041a834f478fc1f9ec2f7ead8c44b file: - statement: drop_type_statement: - keyword: DROP - keyword: TYPE - data_type: data_type_identifier: foo - statement_terminator: ; - statement: drop_type_statement: - keyword: DROP - keyword: TYPE - keyword: IF - keyword: EXISTS - data_type: data_type_identifier: foo - statement_terminator: ; - statement: drop_type_statement: - keyword: DROP - keyword: TYPE - data_type: data_type_identifier: foo - comma: ',' - data_type: data_type_identifier: bar - statement_terminator: ; - statement: drop_type_statement: - keyword: DROP - keyword: TYPE - data_type: data_type_identifier: foo - keyword: CASCADE - statement_terminator: ; - statement: drop_type_statement: - keyword: DROP - keyword: TYPE - data_type: data_type_identifier: foo - keyword: RESTRICT - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/drop_view.sql000066400000000000000000000007251503426445100243570ustar00rootroot00000000000000DROP VIEW abc; DROP VIEW "abc"; DROP VIEW IF EXISTS abc; DROP VIEW abc, "def", ghi; DROP VIEW IF EXISTS abc, def, ghi; -- Test CASCADE trailing keyword DROP VIEW abc CASCADE; DROP VIEW IF EXISTS abc CASCADE; DROP VIEW abc, def, ghi CASCADE; DROP VIEW IF EXISTS abc, def, ghi CASCADE; -- Test RESTRICT trailing keyword DROP VIEW abc RESTRICT; DROP VIEW IF EXISTS abc RESTRICT; DROP VIEW abc, def, ghi RESTRICT; DROP VIEW IF EXISTS abc, def, ghi RESTRICT; sqlfluff-3.4.2/test/fixtures/dialects/postgres/drop_view.yml000066400000000000000000000072011503426445100243550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 36b7a96e8518c113ef9d614a8b704ce02aa976d1b69949a8e9f68c287ded1420 file: - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - table_reference: naked_identifier: abc - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - table_reference: quoted_identifier: '"abc"' - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: abc - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - table_reference: naked_identifier: abc - comma: ',' - table_reference: quoted_identifier: '"def"' - comma: ',' - table_reference: naked_identifier: ghi - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: abc - comma: ',' - table_reference: naked_identifier: def - comma: ',' - table_reference: naked_identifier: ghi - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - table_reference: naked_identifier: abc - keyword: CASCADE - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: abc - keyword: CASCADE - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - table_reference: naked_identifier: abc - comma: ',' - table_reference: naked_identifier: def - comma: ',' - table_reference: naked_identifier: ghi - keyword: CASCADE - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: abc - comma: ',' - table_reference: naked_identifier: def - comma: ',' - table_reference: naked_identifier: ghi - keyword: CASCADE - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - table_reference: naked_identifier: abc - keyword: RESTRICT - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: abc - keyword: RESTRICT - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - table_reference: naked_identifier: abc - comma: ',' - table_reference: naked_identifier: def - comma: ',' - table_reference: naked_identifier: ghi - keyword: RESTRICT - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: abc - comma: ',' - table_reference: naked_identifier: def - comma: ',' - table_reference: naked_identifier: ghi - keyword: RESTRICT - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/escape.sql000066400000000000000000000001301503426445100236070ustar00rootroot00000000000000SELECT E'\''; SELECT E''''; SELECT E'''\''; SELECT E'\\\''''; SELECT E' \\ '' \\'; sqlfluff-3.4.2/test/fixtures/dialects/postgres/escape.yml000066400000000000000000000023741503426445100236250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0715ffc2128b2231adee743a4a824ee3eb3ae38c95574386180f541feb95cdc9 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "E'\\''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "E''''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "E'''\\''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "E'\\\\\\''''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "E'\n\n\\\\\n''\n\\\\'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/execute.sql000066400000000000000000000001311503426445100240120ustar00rootroot00000000000000EXECUTE statement_name; EXECUTE statement_name (1); EXECUTE statement_name (1, 'foo'); sqlfluff-3.4.2/test/fixtures/dialects/postgres/execute.yml000066400000000000000000000021751503426445100240260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d589e9c9eacb22e8d048d8d63422f89d12f83e18b1b909378c12ef2110b26834 file: - statement: execute_statement: keyword: EXECUTE object_reference: naked_identifier: statement_name - statement_terminator: ; - statement: execute_statement: keyword: EXECUTE object_reference: naked_identifier: statement_name bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: execute_statement: keyword: EXECUTE object_reference: naked_identifier: statement_name bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'foo'" - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/explain.sql000066400000000000000000000014001503426445100240100ustar00rootroot00000000000000explain ( analyze true, analyse true, verbose true, costs true, settings true, buffers true, wal true, timing true, summary true, format xml ) select 1; explain ( analyze false, analyse false, verbose false, costs false, settings false, buffers false, wal false, timing false, summary false, format xml ) select 1; explain ( analyze, analyse, verbose, costs, settings, buffers, wal, timing, summary, format xml ) select 1; explain analyze verbose select 1; explain analyse verbose select 1; explain analyze select 1; explain analyse select 1; explain (format text) select 1; explain (format json) select 1; explain (format yaml) select 1; sqlfluff-3.4.2/test/fixtures/dialects/postgres/explain.yml000066400000000000000000000137201503426445100240220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b43be4e2d4d82bc59868e3fea7954af36498598340cdc0d885be35d2da8ecbf5 file: - statement: explain_statement: keyword: explain bracketed: - start_bracket: ( - explain_option: keyword: analyze boolean_literal: 'true' - comma: ',' - explain_option: keyword: analyse boolean_literal: 'true' - comma: ',' - explain_option: keyword: verbose boolean_literal: 'true' - comma: ',' - explain_option: keyword: costs boolean_literal: 'true' - comma: ',' - explain_option: keyword: settings boolean_literal: 'true' - comma: ',' - explain_option: keyword: buffers boolean_literal: 'true' - comma: ',' - explain_option: keyword: wal boolean_literal: 'true' - comma: ',' - explain_option: keyword: timing boolean_literal: 'true' - comma: ',' - explain_option: keyword: summary boolean_literal: 'true' - comma: ',' - explain_option: - keyword: format - keyword: xml - end_bracket: ) select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: explain_statement: keyword: explain bracketed: - start_bracket: ( - explain_option: keyword: analyze boolean_literal: 'false' - comma: ',' - explain_option: keyword: analyse boolean_literal: 'false' - comma: ',' - explain_option: keyword: verbose boolean_literal: 'false' - comma: ',' - explain_option: keyword: costs boolean_literal: 'false' - comma: ',' - explain_option: keyword: settings boolean_literal: 'false' - comma: ',' - explain_option: keyword: buffers boolean_literal: 'false' - comma: ',' - explain_option: keyword: wal boolean_literal: 'false' - comma: ',' - explain_option: keyword: timing boolean_literal: 'false' - comma: ',' - explain_option: keyword: summary boolean_literal: 'false' - comma: ',' - explain_option: - keyword: format - keyword: xml - end_bracket: ) select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: explain_statement: keyword: explain bracketed: - start_bracket: ( - explain_option: keyword: analyze - comma: ',' - explain_option: keyword: analyse - comma: ',' - explain_option: keyword: verbose - comma: ',' - explain_option: keyword: costs - comma: ',' - explain_option: keyword: settings - comma: ',' - explain_option: keyword: buffers - comma: ',' - explain_option: keyword: wal - comma: ',' - explain_option: keyword: timing - comma: ',' - explain_option: keyword: summary - comma: ',' - explain_option: - keyword: format - keyword: xml - end_bracket: ) select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: explain_statement: - keyword: explain - keyword: analyze - keyword: verbose - select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: explain_statement: - keyword: explain - keyword: analyse - keyword: verbose - select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: explain_statement: - keyword: explain - keyword: analyze - select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: explain_statement: - keyword: explain - keyword: analyse - select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: explain_statement: keyword: explain bracketed: start_bracket: ( explain_option: - keyword: format - keyword: text end_bracket: ) select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: explain_statement: keyword: explain bracketed: start_bracket: ( explain_option: - keyword: format - keyword: json end_bracket: ) select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: explain_statement: keyword: explain bracketed: start_bracket: ( explain_option: - keyword: format - keyword: yaml end_bracket: ) select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/filter.sql000066400000000000000000000001321503426445100236360ustar00rootroot00000000000000SELECT COUNT(*) FILTER (WHERE c_expires > CURRENT_TIMESTAMP) AS c_active FROM t_test; sqlfluff-3.4.2/test/fixtures/dialects/postgres/filter.yml000066400000000000000000000027011503426445100236440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5fb85303598d9837f8c369f9115123c276fe17f3d0bd54a1535380bec0c2b1a1 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: COUNT function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) keyword: FILTER bracketed: start_bracket: ( keyword: WHERE expression: column_reference: naked_identifier: c_expires comparison_operator: raw_comparison_operator: '>' bare_function: CURRENT_TIMESTAMP end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: c_active from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t_test statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/full_text_search.sql000066400000000000000000000016371503426445100257170ustar00rootroot00000000000000SELECT 'a fat cat sat on a mat and ate a fat rat'::tsvector @@ 'cat & rat'::tsquery; SELECT 'fat & cow'::tsquery @@ 'a fat cat sat on a mat and ate a fat rat'::tsvector; SELECT to_tsvector('fat cats ate fat rats') @@ to_tsquery('fat & rat'); SELECT 'fat cats ate fat rats'::tsvector @@ to_tsquery('fat & rat'); SELECT 'fat cats ate fat rats'::tsvector @@ to_tsquery('fat & rat'); SELECT to_tsvector('error is not fatal') @@ to_tsquery('fatal <-> error'); SELECT phraseto_tsquery('cats ate rats'); SELECT phraseto_tsquery('the cats ate the rats'); SELECT 'a:1 b:2'::tsvector || 'c:1 d:2 b:3'::tsvector; SELECT 'fat | rat'::tsquery && 'cat'::tsquery; SELECT 'fat | rat'::tsquery || 'cat'::tsquery; SELECT to_tsquery('fat') <-> to_tsquery('rat'); SELECT 'cat'::tsquery @> 'cat & rat'::tsquery; SELECT 'cat'::tsquery <@ 'cat & rat'::tsquery; SELECT 'cat'::tsquery <@ '!cat & rat'::tsquery; SELECT !! 'cat'::tsquery; sqlfluff-3.4.2/test/fixtures/dialects/postgres/full_text_search.yml000066400000000000000000000232501503426445100257140ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8bb9420dd29df72d6d1872592484ba21c305359c1dfe2b9b2582454aafa7cbc7 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - cast_expression: quoted_literal: "'a fat cat sat on a mat and ate a fat rat'" casting_operator: '::' data_type: keyword: tsvector - binary_operator: '@@' - cast_expression: quoted_literal: "'cat & rat'" casting_operator: '::' data_type: keyword: tsquery - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - cast_expression: quoted_literal: "'fat & cow'" casting_operator: '::' data_type: keyword: tsquery - binary_operator: '@@' - cast_expression: quoted_literal: "'a fat cat sat on a mat and ate a fat rat'" casting_operator: '::' data_type: keyword: tsvector - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - function: function_name: function_name_identifier: to_tsvector function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'fat cats ate fat rats'" end_bracket: ) - binary_operator: '@@' - function: function_name: function_name_identifier: to_tsquery function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'fat & rat'" end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'fat cats ate fat rats'" casting_operator: '::' data_type: keyword: tsvector binary_operator: '@@' function: function_name: function_name_identifier: to_tsquery function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'fat & rat'" end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'fat cats ate fat rats'" casting_operator: '::' data_type: keyword: tsvector binary_operator: '@@' function: function_name: function_name_identifier: to_tsquery function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'fat & rat'" end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - function: function_name: function_name_identifier: to_tsvector function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'error is not fatal'" end_bracket: ) - binary_operator: '@@' - function: function_name: function_name_identifier: to_tsquery function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'fatal <-> error'" end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: phraseto_tsquery function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'cats ate rats'" end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: phraseto_tsquery function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'the cats ate the rats'" end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - cast_expression: quoted_literal: "'a:1 b:2'" casting_operator: '::' data_type: keyword: tsvector - binary_operator: - pipe: '|' - pipe: '|' - cast_expression: quoted_literal: "'c:1 d:2 b:3'" casting_operator: '::' data_type: keyword: tsvector - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - cast_expression: quoted_literal: "'fat | rat'" casting_operator: '::' data_type: keyword: tsquery - comparison_operator: - ampersand: '&' - ampersand: '&' - cast_expression: quoted_literal: "'cat'" casting_operator: '::' data_type: keyword: tsquery - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - cast_expression: quoted_literal: "'fat | rat'" casting_operator: '::' data_type: keyword: tsquery - binary_operator: - pipe: '|' - pipe: '|' - cast_expression: quoted_literal: "'cat'" casting_operator: '::' data_type: keyword: tsquery - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - function: function_name: function_name_identifier: to_tsquery function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'fat'" end_bracket: ) - binary_operator: <-> - function: function_name: function_name_identifier: to_tsquery function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'rat'" end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - cast_expression: quoted_literal: "'cat'" casting_operator: '::' data_type: keyword: tsquery - binary_operator: '@>' - cast_expression: quoted_literal: "'cat & rat'" casting_operator: '::' data_type: keyword: tsquery - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - cast_expression: quoted_literal: "'cat'" casting_operator: '::' data_type: keyword: tsquery - binary_operator: <@ - cast_expression: quoted_literal: "'cat & rat'" casting_operator: '::' data_type: keyword: tsquery - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - cast_expression: quoted_literal: "'cat'" casting_operator: '::' data_type: keyword: tsquery - binary_operator: <@ - cast_expression: quoted_literal: "'!cat & rat'" casting_operator: '::' data_type: keyword: tsquery - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: full_text_search_operator: '!!' cast_expression: quoted_literal: "'cat'" casting_operator: '::' data_type: keyword: tsquery - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/grant_privileges.sql000066400000000000000000000026741503426445100257320ustar00rootroot00000000000000GRANT USAGE ON SCHEMA api TO web_anon; GRANT EXECUTE ON FUNCTION api.test TO web_anon; GRANT web_anon TO my_user; GRANT CONNECT, CREATE, TEMP, TEMPORARY ON DATABASE my_db TO app; GRANT TRIGGER ON ALL TABLES IN SCHEMA my_schema TO app; GRANT USAGE ON DOMAIN my_domain TO my_user; GRANT USAGE ON FOREIGN DATA WRAPPER my_fdw TO my_user; GRANT USAGE ON FOREIGN SERVER fs TO my_user; GRANT EXECUTE ON PROCEDURE fn TO my_user; GRANT EXECUTE ON ROUTINE fn TO my_user; GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA my_schema TO my_user; GRANT EXECUTE ON ALL PROCEDURES IN SCHEMA my_schema TO my_user; GRANT EXECUTE ON ALL ROUTINES IN SCHEMA my_schema TO my_user; GRANT USAGE ON LANGUAGE my_lang TO my_user; GRANT SELECT ON LARGE OBJECT 564182 TO my_user; GRANT ALL ON TABLESPACE my_tblspc TO my_user; GRANT USAGE ON TYPE my_type TO my_role; GRANT my_user TO my_group WITH ADMIN OPTION GRANTED BY CURRENT_USER; GRANT my_user TO my_group GRANTED BY SESSION_USER; GRANT my_user TO my_group WITH ADMIN OPTION GRANTED BY my_new_role; GRANT CONNECT, CREATE, TEMP, TEMPORARY ON DATABASE my_db TO xyz; GRANT CONNECT, CREATE, TEMP, TEMPORARY ON DATABASE my_db TO my_user, my_other_user WITH GRANT OPTION; GRANT SELECT ON abc TO xyz, mno; GRANT EXECUTE ON ALL ROUTINES IN SCHEMA my_schema TO my_user, my_other_user; GRANT EXECUTE ON FUNCTION test(a IN TEXT, b OUT TEXT, INOUT c TEXT, VARIADIC d TEXT) TO my_user; GRANT EXECUTE ON FUNCTION test(a TEXT, b my_table.my_column%type) TO my_user; sqlfluff-3.4.2/test/fixtures/dialects/postgres/grant_privileges.yml000066400000000000000000000230301503426445100257210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: aebe7c883fd1accaed1dc7de8d30eb1df5fa6972af9ec015f498c09b50c9a0b9 file: - statement: access_statement: - keyword: GRANT - keyword: USAGE - keyword: 'ON' - keyword: SCHEMA - object_reference: naked_identifier: api - keyword: TO - role_reference: naked_identifier: web_anon - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: EXECUTE - keyword: 'ON' - keyword: FUNCTION - object_reference: - naked_identifier: api - dot: . - naked_identifier: test - keyword: TO - role_reference: naked_identifier: web_anon - statement_terminator: ; - statement: access_statement: - keyword: GRANT - object_reference: naked_identifier: web_anon - keyword: TO - role_reference: naked_identifier: my_user - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: CONNECT - comma: ',' - keyword: CREATE - comma: ',' - keyword: TEMP - comma: ',' - keyword: TEMPORARY - keyword: 'ON' - keyword: DATABASE - object_reference: naked_identifier: my_db - keyword: TO - role_reference: naked_identifier: app - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: TRIGGER - keyword: 'ON' - keyword: ALL - keyword: TABLES - keyword: IN - keyword: SCHEMA - object_reference: naked_identifier: my_schema - keyword: TO - role_reference: naked_identifier: app - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: USAGE - keyword: 'ON' - keyword: DOMAIN - object_reference: naked_identifier: my_domain - keyword: TO - role_reference: naked_identifier: my_user - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: USAGE - keyword: 'ON' - keyword: FOREIGN - keyword: DATA - keyword: WRAPPER - object_reference: naked_identifier: my_fdw - keyword: TO - role_reference: naked_identifier: my_user - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: USAGE - keyword: 'ON' - keyword: FOREIGN - keyword: SERVER - object_reference: naked_identifier: fs - keyword: TO - role_reference: naked_identifier: my_user - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: EXECUTE - keyword: 'ON' - keyword: PROCEDURE - object_reference: naked_identifier: fn - keyword: TO - role_reference: naked_identifier: my_user - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: EXECUTE - keyword: 'ON' - keyword: ROUTINE - object_reference: naked_identifier: fn - keyword: TO - role_reference: naked_identifier: my_user - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: EXECUTE - keyword: 'ON' - keyword: ALL - keyword: FUNCTIONS - keyword: IN - keyword: SCHEMA - object_reference: naked_identifier: my_schema - keyword: TO - role_reference: naked_identifier: my_user - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: EXECUTE - keyword: 'ON' - keyword: ALL - keyword: PROCEDURES - keyword: IN - keyword: SCHEMA - object_reference: naked_identifier: my_schema - keyword: TO - role_reference: naked_identifier: my_user - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: EXECUTE - keyword: 'ON' - keyword: ALL - keyword: ROUTINES - keyword: IN - keyword: SCHEMA - object_reference: naked_identifier: my_schema - keyword: TO - role_reference: naked_identifier: my_user - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: USAGE - keyword: 'ON' - keyword: LANGUAGE - object_reference: naked_identifier: my_lang - keyword: TO - role_reference: naked_identifier: my_user - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: SELECT - keyword: 'ON' - keyword: LARGE - keyword: OBJECT - numeric_literal: '564182' - keyword: TO - role_reference: naked_identifier: my_user - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: ALL - keyword: 'ON' - keyword: TABLESPACE - object_reference: naked_identifier: my_tblspc - keyword: TO - role_reference: naked_identifier: my_user - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: USAGE - keyword: 'ON' - keyword: TYPE - object_reference: naked_identifier: my_type - keyword: TO - role_reference: naked_identifier: my_role - statement_terminator: ; - statement: access_statement: - keyword: GRANT - object_reference: naked_identifier: my_user - keyword: TO - role_reference: naked_identifier: my_group - keyword: WITH - keyword: ADMIN - keyword: OPTION - keyword: GRANTED - keyword: BY - keyword: CURRENT_USER - statement_terminator: ; - statement: access_statement: - keyword: GRANT - object_reference: naked_identifier: my_user - keyword: TO - role_reference: naked_identifier: my_group - keyword: GRANTED - keyword: BY - keyword: SESSION_USER - statement_terminator: ; - statement: access_statement: - keyword: GRANT - object_reference: naked_identifier: my_user - keyword: TO - role_reference: naked_identifier: my_group - keyword: WITH - keyword: ADMIN - keyword: OPTION - keyword: GRANTED - keyword: BY - object_reference: naked_identifier: my_new_role - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: CONNECT - comma: ',' - keyword: CREATE - comma: ',' - keyword: TEMP - comma: ',' - keyword: TEMPORARY - keyword: 'ON' - keyword: DATABASE - object_reference: naked_identifier: my_db - keyword: TO - role_reference: naked_identifier: xyz - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: CONNECT - comma: ',' - keyword: CREATE - comma: ',' - keyword: TEMP - comma: ',' - keyword: TEMPORARY - keyword: 'ON' - keyword: DATABASE - object_reference: naked_identifier: my_db - keyword: TO - role_reference: naked_identifier: my_user - comma: ',' - role_reference: naked_identifier: my_other_user - keyword: WITH - keyword: GRANT - keyword: OPTION - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: SELECT - keyword: 'ON' - object_reference: naked_identifier: abc - keyword: TO - role_reference: naked_identifier: xyz - comma: ',' - role_reference: naked_identifier: mno - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: EXECUTE - keyword: 'ON' - keyword: ALL - keyword: ROUTINES - keyword: IN - keyword: SCHEMA - object_reference: naked_identifier: my_schema - keyword: TO - role_reference: naked_identifier: my_user - comma: ',' - role_reference: naked_identifier: my_other_user - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: EXECUTE - keyword: 'ON' - keyword: FUNCTION - function_name: function_name_identifier: test - function_parameter_list: bracketed: - start_bracket: ( - parameter: a - keyword: IN - data_type: keyword: TEXT - comma: ',' - parameter: b - keyword: OUT - data_type: keyword: TEXT - comma: ',' - keyword: INOUT - parameter: c - data_type: keyword: TEXT - comma: ',' - keyword: VARIADIC - parameter: d - data_type: keyword: TEXT - end_bracket: ) - keyword: TO - role_reference: naked_identifier: my_user - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: EXECUTE - keyword: 'ON' - keyword: FUNCTION - function_name: function_name_identifier: test - function_parameter_list: bracketed: - start_bracket: ( - parameter: a - data_type: keyword: TEXT - comma: ',' - parameter: b - column_type_reference: column_reference: - naked_identifier: my_table - dot: . - naked_identifier: my_column binary_operator: '%' keyword: type - end_bracket: ) - keyword: TO - role_reference: naked_identifier: my_user - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/group_by.sql000066400000000000000000000015241503426445100242050ustar00rootroot00000000000000SELECT region, city, grouping(region, city) AS grp_idx, count(DISTINCT id) AS num_total, count(DISTINCT id) FILTER (WHERE is_poi) AS num_poi, count(DISTINCT id) FILTER (WHERE is_gov) AS num_gov FROM location_data GROUP BY GROUPING SETS ( (region), (city), (region, city), () ); SELECT region, city, grouping(region, city) AS grp_idx, count(DISTINCT id) AS num_total, count(DISTINCT id) FILTER (WHERE is_poi) AS num_poi, count(DISTINCT id) FILTER (WHERE is_gov) AS num_gov FROM location_data GROUP BY ROLLUP ( (region), (city) ); SELECT region, city, grouping(region, city) AS grp_idx, count(DISTINCT id) AS num_total, count(DISTINCT id) FILTER (WHERE is_poi) AS num_poi, count(DISTINCT id) FILTER (WHERE is_gov) AS num_gov FROM location_data GROUP BY CUBE ( (region), (city) ); sqlfluff-3.4.2/test/fixtures/dialects/postgres/group_by.yml000066400000000000000000000310001503426445100241770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 18124d129b5b153297e8f27e9a86e7f1c039154b996e15fdb7b50b47d9fedf99 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: region - comma: ',' - select_clause_element: column_reference: naked_identifier: city - comma: ',' - select_clause_element: function: function_name: function_name_identifier: grouping function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: region - comma: ',' - expression: column_reference: naked_identifier: city - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: grp_idx - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( keyword: DISTINCT expression: column_reference: naked_identifier: id end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: num_total - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( keyword: DISTINCT expression: column_reference: naked_identifier: id end_bracket: ) keyword: FILTER bracketed: start_bracket: ( keyword: WHERE expression: column_reference: naked_identifier: is_poi end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: num_poi - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( keyword: DISTINCT expression: column_reference: naked_identifier: id end_bracket: ) keyword: FILTER bracketed: start_bracket: ( keyword: WHERE expression: column_reference: naked_identifier: is_gov end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: num_gov from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: location_data groupby_clause: - keyword: GROUP - keyword: BY - grouping_sets_clause: - keyword: GROUPING - keyword: SETS - bracketed: start_bracket: ( grouping_expression_list: - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: region end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: city end_bracket: ) - comma: ',' - expression: bracketed: - start_bracket: ( - column_reference: naked_identifier: region - comma: ',' - column_reference: naked_identifier: city - end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: region - comma: ',' - select_clause_element: column_reference: naked_identifier: city - comma: ',' - select_clause_element: function: function_name: function_name_identifier: grouping function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: region - comma: ',' - expression: column_reference: naked_identifier: city - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: grp_idx - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( keyword: DISTINCT expression: column_reference: naked_identifier: id end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: num_total - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( keyword: DISTINCT expression: column_reference: naked_identifier: id end_bracket: ) keyword: FILTER bracketed: start_bracket: ( keyword: WHERE expression: column_reference: naked_identifier: is_poi end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: num_poi - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( keyword: DISTINCT expression: column_reference: naked_identifier: id end_bracket: ) keyword: FILTER bracketed: start_bracket: ( keyword: WHERE expression: column_reference: naked_identifier: is_gov end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: num_gov from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: location_data groupby_clause: - keyword: GROUP - keyword: BY - cube_rollup_clause: function_name: function_name_identifier: ROLLUP bracketed: start_bracket: ( grouping_expression_list: - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: region end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: city end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: region - comma: ',' - select_clause_element: column_reference: naked_identifier: city - comma: ',' - select_clause_element: function: function_name: function_name_identifier: grouping function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: region - comma: ',' - expression: column_reference: naked_identifier: city - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: grp_idx - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( keyword: DISTINCT expression: column_reference: naked_identifier: id end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: num_total - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( keyword: DISTINCT expression: column_reference: naked_identifier: id end_bracket: ) keyword: FILTER bracketed: start_bracket: ( keyword: WHERE expression: column_reference: naked_identifier: is_poi end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: num_poi - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( keyword: DISTINCT expression: column_reference: naked_identifier: id end_bracket: ) keyword: FILTER bracketed: start_bracket: ( keyword: WHERE expression: column_reference: naked_identifier: is_gov end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: num_gov from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: location_data groupby_clause: - keyword: GROUP - keyword: BY - cube_rollup_clause: function_name: function_name_identifier: CUBE bracketed: start_bracket: ( grouping_expression_list: - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: region end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: city end_bracket: ) end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/import_foreign_schema.sql000066400000000000000000000003731503426445100267230ustar00rootroot00000000000000IMPORT FOREIGN SCHEMA foreign_films FROM SERVER film_server INTO films; IMPORT FOREIGN SCHEMA "TEST" FROM SERVER test_server INTO test; IMPORT FOREIGN SCHEMA foreign_films LIMIT TO (actors, directors) FROM SERVER film_server INTO films; sqlfluff-3.4.2/test/fixtures/dialects/postgres/import_foreign_schema.yml000066400000000000000000000033261503426445100267260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 07e21c879bb73b81e693b06dd837baeed944a5fb81ca3ea5230e7466117479e9 file: - statement: import_foreign_schema_statement: - keyword: IMPORT - keyword: FOREIGN - keyword: SCHEMA - schema_reference: naked_identifier: foreign_films - keyword: FROM - keyword: SERVER - server_reference: naked_identifier: film_server - keyword: INTO - schema_reference: naked_identifier: films - statement_terminator: ; - statement: import_foreign_schema_statement: - keyword: IMPORT - keyword: FOREIGN - keyword: SCHEMA - schema_reference: quoted_identifier: '"TEST"' - keyword: FROM - keyword: SERVER - server_reference: naked_identifier: test_server - keyword: INTO - schema_reference: naked_identifier: test - statement_terminator: ; - statement: import_foreign_schema_statement: - keyword: IMPORT - keyword: FOREIGN - keyword: SCHEMA - schema_reference: naked_identifier: foreign_films - keyword: LIMIT - keyword: TO - bracketed: - start_bracket: ( - naked_identifier_all: actors - comma: ',' - naked_identifier_all: directors - end_bracket: ) - keyword: FROM - keyword: SERVER - server_reference: naked_identifier: film_server - keyword: INTO - schema_reference: naked_identifier: films - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/insert.sql000066400000000000000000000042331503426445100236630ustar00rootroot00000000000000INSERT INTO foo (bar) VALUES(current_timestamp); INSERT INTO foo (bar, baz) VALUES(1, 2), (3, 4); INSERT INTO foo (bar, baz) VALUES(1 + 1, 2), (3, 4); INSERT INTO foo (bar) VALUES(DEFAULT); INSERT INTO distributors AS d (did, dname) VALUES (8, 'Anvil Distribution'); INSERT INTO test (id, col1) OVERRIDING SYSTEM VALUE VALUES (1, 'val'); INSERT INTO test (id, col1) OVERRIDING USER VALUE VALUES (1, 'val'); INSERT INTO foo (bar) DEFAULT VALUES; INSERT INTO films SELECT * FROM tmp_films WHERE date_prod < '2004-05-07'; INSERT INTO foo (bar) VALUES(current_timestamp) RETURNING *; INSERT INTO foo (bar) VALUES(current_timestamp) RETURNING bar; INSERT INTO foo (bar) VALUES(current_timestamp) RETURNING bar AS some_alias; INSERT INTO foo (bar, baz) VALUES(1, 2) RETURNING bar, baz; INSERT INTO foo (bar, baz) VALUES(1, 2) RETURNING bar AS alias1, baz AS alias2; INSERT INTO foo (bar, baz) VALUES (1, 'var') ON CONFLICT (bar) DO UPDATE SET baz = EXCLUDED.baz; INSERT INTO foo (bar, baz) VALUES (1, 'var') ON CONFLICT (bar) DO NOTHING; INSERT INTO foo AS f (bar, baz) VALUES (1, 'var') ON CONFLICT (bar) DO UPDATE SET baz = EXCLUDED.baz || ' (formerly ' || f.baz || ')' WHERE f.zipcode != '21201'; INSERT INTO foo (bar, baz) VALUES (1, 'var') ON CONFLICT ON CONSTRAINT foo_pkey DO NOTHING; INSERT INTO foo (bar, baz) VALUES (1, 'var') ON CONFLICT (bar) WHERE is_active DO NOTHING; INSERT INTO foo (bar, baz) VALUES (1, 'var') ON CONFLICT (bar) DO UPDATE SET (baz) = (SELECT baz FROM foobar WHERE bar = 1); INSERT INTO megatable (megacolumn) SELECT * FROM ( VALUES ( 'megavalue' ) ) AS tmp (megacolumn) WHERE NOT EXISTS ( SELECT FROM megatable AS mt WHERE mt.megacolumn = tmp.megacolumn ) ON CONFLICT DO NOTHING; INSERT INTO abc (foo, bar) SELECT foo, bar FROM baz RETURNING quux ; INSERT INTO tbl_a ( val1 , val2 ) SELECT val1 , val2 FROM tbl_2 ON CONFLICT ( val1 , COALESCE(val2, '') ) DO NOTHING; INSERT INTO prompt_variants ( test, test2 ) SELECT test, test2 RETURNING test, test2; INSERT INTO baz (state, state_changed_at, instance_id) SELECT 1, 2, 3 ON CONFLICT (instance_id) DO UPDATE SET state = foo, state_changed_at = bar; sqlfluff-3.4.2/test/fixtures/dialects/postgres/insert.yml000066400000000000000000000610111503426445100236620ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 114ebc036df2cf8cec886322564883072a43209ebd6e31cd69d7edafb4037acd file: - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: start_bracket: ( column_reference: naked_identifier: bar end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: bare_function: current_timestamp end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_reference: naked_identifier: bar - comma: ',' - column_reference: naked_identifier: baz - end_bracket: ) - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_reference: naked_identifier: bar - comma: ',' - column_reference: naked_identifier: baz - end_bracket: ) - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: start_bracket: ( column_reference: naked_identifier: bar end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( keyword: DEFAULT end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: distributors - alias_expression: alias_operator: keyword: AS naked_identifier: d - bracketed: - start_bracket: ( - column_reference: naked_identifier: did - comma: ',' - column_reference: naked_identifier: dname - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '8' - comma: ',' - expression: quoted_literal: "'Anvil Distribution'" - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: test - bracketed: - start_bracket: ( - column_reference: naked_identifier: id - comma: ',' - column_reference: naked_identifier: col1 - end_bracket: ) - keyword: OVERRIDING - keyword: SYSTEM - keyword: VALUE - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'val'" - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: test - bracketed: - start_bracket: ( - column_reference: naked_identifier: id - comma: ',' - column_reference: naked_identifier: col1 - end_bracket: ) - keyword: OVERRIDING - keyword: USER - keyword: VALUE - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'val'" - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: start_bracket: ( column_reference: naked_identifier: bar end_bracket: ) - keyword: DEFAULT - keyword: VALUES - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: films - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tmp_films where_clause: keyword: WHERE expression: column_reference: naked_identifier: date_prod comparison_operator: raw_comparison_operator: < quoted_literal: "'2004-05-07'" - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: start_bracket: ( column_reference: naked_identifier: bar end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: bare_function: current_timestamp end_bracket: ) - keyword: RETURNING - star: '*' - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: start_bracket: ( column_reference: naked_identifier: bar end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: bare_function: current_timestamp end_bracket: ) - keyword: RETURNING - expression: column_reference: naked_identifier: bar - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: start_bracket: ( column_reference: naked_identifier: bar end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: bare_function: current_timestamp end_bracket: ) - keyword: RETURNING - expression: column_reference: naked_identifier: bar - alias_expression: alias_operator: keyword: AS naked_identifier: some_alias - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_reference: naked_identifier: bar - comma: ',' - column_reference: naked_identifier: baz - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - keyword: RETURNING - expression: column_reference: naked_identifier: bar - comma: ',' - expression: column_reference: naked_identifier: baz - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_reference: naked_identifier: bar - comma: ',' - column_reference: naked_identifier: baz - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - keyword: RETURNING - expression: column_reference: naked_identifier: bar - alias_expression: alias_operator: keyword: AS naked_identifier: alias1 - comma: ',' - expression: column_reference: naked_identifier: baz - alias_expression: alias_operator: keyword: AS naked_identifier: alias2 - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_reference: naked_identifier: bar - comma: ',' - column_reference: naked_identifier: baz - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'var'" - end_bracket: ) - keyword: 'ON' - keyword: CONFLICT - conflict_target: bracketed: start_bracket: ( column_reference: naked_identifier: bar end_bracket: ) - conflict_action: - keyword: DO - keyword: UPDATE - keyword: SET - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '=' - expression: column_reference: - naked_identifier: EXCLUDED - dot: . - naked_identifier: baz - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_reference: naked_identifier: bar - comma: ',' - column_reference: naked_identifier: baz - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'var'" - end_bracket: ) - keyword: 'ON' - keyword: CONFLICT - conflict_target: bracketed: start_bracket: ( column_reference: naked_identifier: bar end_bracket: ) - conflict_action: - keyword: DO - keyword: NOTHING - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - alias_expression: alias_operator: keyword: AS naked_identifier: f - bracketed: - start_bracket: ( - column_reference: naked_identifier: bar - comma: ',' - column_reference: naked_identifier: baz - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'var'" - end_bracket: ) - keyword: 'ON' - keyword: CONFLICT - conflict_target: bracketed: start_bracket: ( column_reference: naked_identifier: bar end_bracket: ) - conflict_action: - keyword: DO - keyword: UPDATE - keyword: SET - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '=' - expression: - column_reference: - naked_identifier: EXCLUDED - dot: . - naked_identifier: baz - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "' (formerly '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: - naked_identifier: f - dot: . - naked_identifier: baz - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "')'" - keyword: WHERE - expression: column_reference: - naked_identifier: f - dot: . - naked_identifier: zipcode comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '=' quoted_literal: "'21201'" - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_reference: naked_identifier: bar - comma: ',' - column_reference: naked_identifier: baz - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'var'" - end_bracket: ) - keyword: 'ON' - keyword: CONFLICT - conflict_target: - keyword: 'ON' - keyword: CONSTRAINT - parameter: foo_pkey - conflict_action: - keyword: DO - keyword: NOTHING - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_reference: naked_identifier: bar - comma: ',' - column_reference: naked_identifier: baz - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'var'" - end_bracket: ) - keyword: 'ON' - keyword: CONFLICT - conflict_target: bracketed: start_bracket: ( column_reference: naked_identifier: bar end_bracket: ) keyword: WHERE expression: column_reference: naked_identifier: is_active - conflict_action: - keyword: DO - keyword: NOTHING - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_reference: naked_identifier: bar - comma: ',' - column_reference: naked_identifier: baz - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'var'" - end_bracket: ) - keyword: 'ON' - keyword: CONFLICT - conflict_target: bracketed: start_bracket: ( column_reference: naked_identifier: bar end_bracket: ) - conflict_action: - keyword: DO - keyword: UPDATE - keyword: SET - bracketed: start_bracket: ( column_reference: naked_identifier: baz end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: baz from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foobar where_clause: keyword: WHERE expression: column_reference: naked_identifier: bar comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: megatable - bracketed: start_bracket: ( column_reference: naked_identifier: megacolumn end_bracket: ) - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: keyword: VALUES bracketed: start_bracket: ( expression: quoted_literal: "'megavalue'" end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: tmp bracketed: start_bracket: ( identifier_list: naked_identifier: megacolumn end_bracket: ) where_clause: keyword: WHERE expression: - keyword: NOT - keyword: EXISTS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: megatable alias_expression: alias_operator: keyword: AS naked_identifier: mt where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: mt - dot: . - naked_identifier: megacolumn - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: tmp - dot: . - naked_identifier: megacolumn end_bracket: ) - keyword: 'ON' - keyword: CONFLICT - conflict_action: - keyword: DO - keyword: NOTHING - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: abc - bracketed: - start_bracket: ( - column_reference: naked_identifier: foo - comma: ',' - column_reference: naked_identifier: bar - end_bracket: ) - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: foo - comma: ',' - select_clause_element: column_reference: naked_identifier: bar from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: baz - keyword: RETURNING - expression: column_reference: naked_identifier: quux - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: tbl_a - bracketed: - start_bracket: ( - column_reference: naked_identifier: val1 - comma: ',' - column_reference: naked_identifier: val2 - end_bracket: ) - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: val1 - comma: ',' - select_clause_element: column_reference: naked_identifier: val2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl_2 - keyword: 'ON' - keyword: CONFLICT - conflict_target: bracketed: start_bracket: ( column_reference: naked_identifier: val1 comma: ',' function: function_name: function_name_identifier: COALESCE function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: val2 - comma: ',' - expression: quoted_literal: "''" - end_bracket: ) end_bracket: ) - conflict_action: - keyword: DO - keyword: NOTHING - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: prompt_variants - bracketed: - start_bracket: ( - column_reference: naked_identifier: test - comma: ',' - column_reference: naked_identifier: test2 - end_bracket: ) - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: test - comma: ',' - select_clause_element: column_reference: naked_identifier: test2 - keyword: RETURNING - expression: column_reference: naked_identifier: test - comma: ',' - expression: column_reference: naked_identifier: test2 - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: baz - bracketed: - start_bracket: ( - column_reference: naked_identifier: state - comma: ',' - column_reference: naked_identifier: state_changed_at - comma: ',' - column_reference: naked_identifier: instance_id - end_bracket: ) - select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '1' - comma: ',' - select_clause_element: numeric_literal: '2' - comma: ',' - select_clause_element: numeric_literal: '3' - keyword: 'ON' - keyword: CONFLICT - conflict_target: bracketed: start_bracket: ( column_reference: naked_identifier: instance_id end_bracket: ) - conflict_action: - keyword: DO - keyword: UPDATE - keyword: SET - column_reference: naked_identifier: state - comparison_operator: raw_comparison_operator: '=' - expression: column_reference: naked_identifier: foo - comma: ',' - column_reference: naked_identifier: state_changed_at - comparison_operator: raw_comparison_operator: '=' - expression: column_reference: naked_identifier: bar - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/is_json.sql000066400000000000000000000032571503426445100240300ustar00rootroot00000000000000-- PostgreSQL 16 IS JSON syntax tests -- Based on: https://www.postgresql.org/docs/16/functions-json.html#FUNCTIONS-SQLJSON-MISC -- Basic IS JSON syntax SELECT '{}' IS JSON; SELECT '[]' IS JSON; SELECT '"test"' IS JSON; SELECT 'invalid' IS JSON; -- IS NOT JSON syntax SELECT 'invalid' IS NOT JSON; SELECT '{}' IS NOT JSON; -- IS JSON with type specification SELECT '{}' IS JSON OBJECT; SELECT '[]' IS JSON ARRAY; SELECT '"test"' IS JSON SCALAR; SELECT '{"a": 1}' IS JSON VALUE; -- IS NOT JSON with type specification SELECT '"test"' IS NOT JSON OBJECT; SELECT '{}' IS NOT JSON ARRAY; SELECT '[]' IS NOT JSON SCALAR; SELECT 'invalid' IS NOT JSON VALUE; -- IS JSON with UNIQUE KEYS SELECT '{"a": 1, "b": 2}' IS JSON WITH UNIQUE KEYS; SELECT '{"a": 1, "a": 2}' IS JSON WITH UNIQUE KEYS; SELECT '{"a": 1, "b": 2}' IS JSON WITHOUT UNIQUE KEYS; -- IS JSON with type and unique keys SELECT '{"a": 1, "b": 2}' IS JSON OBJECT WITH UNIQUE KEYS; SELECT '{"a": 1, "a": 2}' IS JSON OBJECT WITH UNIQUE KEYS; SELECT '{"a": 1, "b": 2}' IS JSON OBJECT WITHOUT UNIQUE KEYS; -- IS NOT JSON with type and unique keys SELECT '[]' IS NOT JSON OBJECT WITH UNIQUE KEYS; SELECT '[1, 2]' IS NOT JSON OBJECT WITHOUT UNIQUE KEYS; -- Complex expressions with IS JSON SELECT col1 IS JSON, col2 IS NOT JSON ARRAY FROM table1; SELECT CASE WHEN data IS JSON OBJECT THEN 'valid' ELSE 'invalid' END FROM table1; -- IS JSON in WHERE clauses SELECT * FROM table1 WHERE data IS JSON; SELECT * FROM table1 WHERE config IS NOT JSON OBJECT; SELECT * FROM table1 WHERE metadata IS JSON WITH UNIQUE KEYS; -- IS JSON with column expressions SELECT (column_name::text) IS JSON FROM table1; SELECT COALESCE(data, '{}') IS JSON OBJECT FROM table1; sqlfluff-3.4.2/test/fixtures/dialects/postgres/is_json.yml000066400000000000000000000300651503426445100240270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bf62473fa3cda540f26cc43c6685cca05769a1c7c06dc9d927603ffffe31b930 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'{}'" - keyword: IS - keyword: JSON - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'[]'" - keyword: IS - keyword: JSON - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'\"test\"'" - keyword: IS - keyword: JSON - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'invalid'" - keyword: IS - keyword: JSON - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'invalid'" - keyword: IS - keyword: NOT - keyword: JSON - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'{}'" - keyword: IS - keyword: NOT - keyword: JSON - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'{}'" - keyword: IS - keyword: JSON - keyword: OBJECT - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'[]'" - keyword: IS - keyword: JSON - keyword: ARRAY - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'\"test\"'" - keyword: IS - keyword: JSON - keyword: SCALAR - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'{\"a\": 1}'" - keyword: IS - keyword: JSON - keyword: VALUE - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'\"test\"'" - keyword: IS - keyword: NOT - keyword: JSON - keyword: OBJECT - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'{}'" - keyword: IS - keyword: NOT - keyword: JSON - keyword: ARRAY - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'[]'" - keyword: IS - keyword: NOT - keyword: JSON - keyword: SCALAR - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'invalid'" - keyword: IS - keyword: NOT - keyword: JSON - keyword: VALUE - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'{\"a\": 1, \"b\": 2}'" - keyword: IS - keyword: JSON - keyword: WITH - keyword: UNIQUE - keyword: KEYS - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'{\"a\": 1, \"a\": 2}'" - keyword: IS - keyword: JSON - keyword: WITH - keyword: UNIQUE - keyword: KEYS - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'{\"a\": 1, \"b\": 2}'" - keyword: IS - keyword: JSON - keyword: WITHOUT - keyword: UNIQUE - keyword: KEYS - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'{\"a\": 1, \"b\": 2}'" - keyword: IS - keyword: JSON - keyword: OBJECT - keyword: WITH - keyword: UNIQUE - keyword: KEYS - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'{\"a\": 1, \"a\": 2}'" - keyword: IS - keyword: JSON - keyword: OBJECT - keyword: WITH - keyword: UNIQUE - keyword: KEYS - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'{\"a\": 1, \"b\": 2}'" - keyword: IS - keyword: JSON - keyword: OBJECT - keyword: WITHOUT - keyword: UNIQUE - keyword: KEYS - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'[]'" - keyword: IS - keyword: NOT - keyword: JSON - keyword: OBJECT - keyword: WITH - keyword: UNIQUE - keyword: KEYS - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'[1, 2]'" - keyword: IS - keyword: NOT - keyword: JSON - keyword: OBJECT - keyword: WITHOUT - keyword: UNIQUE - keyword: KEYS - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: - column_reference: naked_identifier: col1 - keyword: IS - keyword: JSON - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: col2 - keyword: IS - keyword: NOT - keyword: JSON - keyword: ARRAY from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: - column_reference: naked_identifier: data - keyword: IS - keyword: JSON - keyword: OBJECT - keyword: THEN - expression: quoted_literal: "'valid'" - else_clause: keyword: ELSE expression: quoted_literal: "'invalid'" - keyword: END from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 where_clause: keyword: WHERE expression: - column_reference: naked_identifier: data - keyword: IS - keyword: JSON - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 where_clause: keyword: WHERE expression: - column_reference: naked_identifier: config - keyword: IS - keyword: NOT - keyword: JSON - keyword: OBJECT - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 where_clause: keyword: WHERE expression: - column_reference: naked_identifier: metadata - keyword: IS - keyword: JSON - keyword: WITH - keyword: UNIQUE - keyword: KEYS - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - bracketed: start_bracket: ( expression: cast_expression: column_reference: naked_identifier: column_name casting_operator: '::' data_type: keyword: text end_bracket: ) - keyword: IS - keyword: JSON from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - function: function_name: function_name_identifier: COALESCE function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: data - comma: ',' - expression: quoted_literal: "'{}'" - end_bracket: ) - keyword: IS - keyword: JSON - keyword: OBJECT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/is_unknown.sql000066400000000000000000000000651503426445100245500ustar00rootroot00000000000000SELECT TRUE IS UNKNOWN; SELECT TRUE IS NOT UNKNOWN; sqlfluff-3.4.2/test/fixtures/dialects/postgres/is_unknown.yml000066400000000000000000000016351503426445100245560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a5302f4fd36f38a88561efc3cfe73b4ac9b7f36af37162454f5e7976afcb7275 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: boolean_literal: 'TRUE' keyword: IS null_literal: UNKNOWN - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - boolean_literal: 'TRUE' - keyword: IS - keyword: NOT - null_literal: UNKNOWN - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/join_lateral.sql000066400000000000000000000015051503426445100250210ustar00rootroot00000000000000-- Postgres should work with standard joins select tbl1.id from tbl1 join tbl2 on tbl1.id = tbl2.id; -- ... but also with lateral joins select tbl1.id from tbl1 join lateral (SELECT * FROM tbl2) AS foo ON tbl1.id = foo.id; -- ... and mixed ones as well! select tbl1.id from tbl1 full outer join lateral (SELECT * FROM tbl2) AS tbl2 on tbl1.id = tbl2.id cross join tbl3 left join lateral (SELECT * FROM tbl4) AS tbl4 on tbl1.id = tbl4.id; -- lateral with comma cross join syntax SELECT X.NUM, D.id FROM tbl1 AS D, LATERAL (values (0), (1)) AS X (NUM); -- lateral with function SELECT m.name AS mname, pname FROM manufacturers m, LATERAL get_product_names(m.id) pname; SELECT m.name AS mname, pname FROM manufacturers m LEFT JOIN LATERAL get_product_names(m.id) pname ON true; SELECT X.NUM FROM LATERAL (values (0), (1)) AS X (NUM); sqlfluff-3.4.2/test/fixtures/dialects/postgres/join_lateral.yml000066400000000000000000000310341503426445100250230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ae546a1a74bf0964dd8d94303015f5337dcffdbf2ceba9cd83cef14fdefbb1f1 file: - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 join_clause: keyword: join from_expression_element: table_expression: table_reference: naked_identifier: tbl2 join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 join_clause: keyword: join from_expression_element: keyword: lateral table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl2 end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: foo join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: foo - dot: . - naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id from_clause: keyword: from from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - join_clause: - keyword: full - keyword: outer - keyword: join - from_expression_element: keyword: lateral table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl2 end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: tbl2 - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: id - join_clause: - keyword: cross - keyword: join - from_expression_element: table_expression: table_reference: naked_identifier: tbl3 - join_clause: - keyword: left - keyword: join - from_expression_element: keyword: lateral table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl4 end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: tbl4 - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: tbl4 - dot: . - naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: X - dot: . - naked_identifier: NUM - comma: ',' - select_clause_element: column_reference: - naked_identifier: D - dot: . - naked_identifier: id from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 alias_expression: alias_operator: keyword: AS naked_identifier: D - comma: ',' - from_expression: from_expression_element: keyword: LATERAL bracketed: start_bracket: ( table_expression: values_clause: - keyword: values - bracketed: start_bracket: ( expression: numeric_literal: '0' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: X bracketed: start_bracket: ( identifier_list: naked_identifier: NUM end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: m - dot: . - naked_identifier: name alias_expression: alias_operator: keyword: AS naked_identifier: mname - comma: ',' - select_clause_element: column_reference: naked_identifier: pname from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: manufacturers alias_expression: naked_identifier: m - comma: ',' - from_expression: from_expression_element: keyword: LATERAL table_expression: function: function_name: function_name_identifier: get_product_names function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: m - dot: . - naked_identifier: id end_bracket: ) alias_expression: naked_identifier: pname - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: m - dot: . - naked_identifier: name alias_expression: alias_operator: keyword: AS naked_identifier: mname - comma: ',' - select_clause_element: column_reference: naked_identifier: pname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: manufacturers alias_expression: naked_identifier: m join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: keyword: LATERAL table_expression: function: function_name: function_name_identifier: get_product_names function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: m - dot: . - naked_identifier: id end_bracket: ) alias_expression: naked_identifier: pname - join_on_condition: keyword: 'ON' expression: boolean_literal: 'true' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: X - dot: . - naked_identifier: NUM from_clause: keyword: FROM from_expression: from_expression_element: keyword: LATERAL bracketed: start_bracket: ( table_expression: values_clause: - keyword: values - bracketed: start_bracket: ( expression: numeric_literal: '0' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: X bracketed: start_bracket: ( identifier_list: naked_identifier: NUM end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/join_no_space.sql000066400000000000000000000002161503426445100251620ustar00rootroot00000000000000-- Not missing space before ON SELECT * FROM "my_table2" INNER JOIN "my_database"."my_schema"."my_table"ON ("my_table2".foo = "my_table".foo) sqlfluff-3.4.2/test/fixtures/dialects/postgres/join_no_space.yml000066400000000000000000000033451503426445100251720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 035e5deaa786c505125b234894eff38e45deeaebc72d8dd2b29c345d09871420 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '"my_table2"' join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: - quoted_identifier: '"my_database"' - dot: . - quoted_identifier: '"my_schema"' - dot: . - quoted_identifier: '"my_table"' - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: quoted_identifier: '"my_table2"' dot: . naked_identifier: foo - comparison_operator: raw_comparison_operator: '=' - column_reference: quoted_identifier: '"my_table"' dot: . naked_identifier: foo end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/postgres/join_types.sql000066400000000000000000000027421503426445100245450ustar00rootroot00000000000000-- inner join SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee INNER JOIN department ON employee.deptno = department.deptno; -- left join SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee LEFT JOIN department ON employee.deptno = department.deptno; SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee LEFT OUTER JOIN department ON employee.deptno = department.deptno; -- right join SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee RIGHT JOIN department ON employee.deptno = department.deptno; SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee RIGHT OUTER JOIN department ON employee.deptno = department.deptno; -- full join SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee FULL JOIN department ON employee.deptno = department.deptno; SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee FULL OUTER JOIN department ON employee.deptno = department.deptno; -- cross join SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee CROSS JOIN department; SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee, department; sqlfluff-3.4.2/test/fixtures/dialects/postgres/join_types.yml000066400000000000000000000343251503426445100245510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 90e19f9063ac31d3d2dba6ebfbd6c2ddaceb381a3fc409787f58bb4f83ad59a2 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: LEFT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: RIGHT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: RIGHT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: FULL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: FULL - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: CROSS - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: department - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/json_operators.sql000066400000000000000000000024321503426445100254250ustar00rootroot00000000000000-- SQL from issue #2033 SELECT COALESCE(doc#>>'{fields}','') AS field FROM mytable WHERE doc ->> 'some_field' = 'some_value'; -- Get JSON array element (indexed from zero, negative integers count from the end) SELECT '[{"a":"foo"},{"b":"bar"},{"c":"baz"}]'::json->2; -- Get JSON object field by key SELECT '{"a": {"b":"foo"}}'::json->'a'; -- Get JSON array element as text SELECT '[1,2,3]'::json->>2; -- Get JSON object field as text SELECT '{"a":1,"b":2}'::json->>'b'; -- Get JSON object at the specified path SELECT '{"a": {"b":{"c": "foo"}}}'::json#>'{a,b}'; -- Get JSON object at the specified path as text SELECT '{"a":[1,2,3],"b":[4,5,6]}'::json#>>'{a,2}'; SELECT '{"a":1, "b":2}'::jsonb @> '{"b":2}'::jsonb, '{"b":2}'::jsonb <@ '{"a":1, "b":2}'::jsonb, '{"a":1, "b":2}'::jsonb ? 'b', '{"a":1, "b":2, "c":3}'::jsonb ?| array['b', 'd'], '["a", "b", "c"]'::jsonb ?& array['a', 'b'], '["a", {"b":1}]'::jsonb #- '{1,b}', '{"a":[1,2,3,4,5]}'::jsonb @? '$.a[*] ? (@ > 2)', '{"a":[1,2,3,4,5]}'::jsonb @@ '$.a[*] > 2'; SELECT json_object('code' VALUE 'P123', 'title': 'Jaws'); SELECT json_object( 'word': CASE WHEN str = '1' THEN 'One' WHEN str = '2' THEN 'Two' ELSE 'Unknown' END ) AS json_column FROM first_table; sqlfluff-3.4.2/test/fixtures/dialects/postgres/json_operators.yml000066400000000000000000000240521503426445100254310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bae80b2997fbfc750c5f908409d1fa0ed27921d4f42da06d4ec8b3e61deed46a file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: COALESCE function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: doc binary_operator: '#>>' quoted_literal: "'{fields}'" - comma: ',' - expression: quoted_literal: "''" - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: field from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable where_clause: keyword: WHERE expression: - column_reference: naked_identifier: doc - binary_operator: ->> - quoted_literal: "'some_field'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'some_value'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'[{\"a\":\"foo\"},{\"b\":\"bar\"},{\"c\":\"baz\"}]'" casting_operator: '::' data_type: keyword: json binary_operator: -> numeric_literal: '2' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'{\"a\": {\"b\":\"foo\"}}'" casting_operator: '::' data_type: keyword: json binary_operator: -> quoted_literal: "'a'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'[1,2,3]'" casting_operator: '::' data_type: keyword: json binary_operator: ->> numeric_literal: '2' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'{\"a\":1,\"b\":2}'" casting_operator: '::' data_type: keyword: json binary_operator: ->> quoted_literal: "'b'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'{\"a\": {\"b\":{\"c\": \"foo\"}}}'" casting_operator: '::' data_type: keyword: json binary_operator: '#>' quoted_literal: "'{a,b}'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'{\"a\":[1,2,3],\"b\":[4,5,6]}'" casting_operator: '::' data_type: keyword: json binary_operator: '#>>' quoted_literal: "'{a,2}'" - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: - cast_expression: quoted_literal: "'{\"a\":1, \"b\":2}'" casting_operator: '::' data_type: keyword: jsonb - binary_operator: '@>' - cast_expression: quoted_literal: "'{\"b\":2}'" casting_operator: '::' data_type: keyword: jsonb - comma: ',' - select_clause_element: expression: - cast_expression: quoted_literal: "'{\"b\":2}'" casting_operator: '::' data_type: keyword: jsonb - binary_operator: <@ - cast_expression: quoted_literal: "'{\"a\":1, \"b\":2}'" casting_operator: '::' data_type: keyword: jsonb - comma: ',' - select_clause_element: expression: cast_expression: quoted_literal: "'{\"a\":1, \"b\":2}'" casting_operator: '::' data_type: keyword: jsonb binary_operator: '?' quoted_literal: "'b'" - comma: ',' - select_clause_element: expression: cast_expression: quoted_literal: "'{\"a\":1, \"b\":2, \"c\":3}'" casting_operator: '::' data_type: keyword: jsonb binary_operator: ?| typed_array_literal: array_type: keyword: array array_literal: - start_square_bracket: '[' - quoted_literal: "'b'" - comma: ',' - quoted_literal: "'d'" - end_square_bracket: ']' - comma: ',' - select_clause_element: expression: cast_expression: quoted_literal: "'[\"a\", \"b\", \"c\"]'" casting_operator: '::' data_type: keyword: jsonb binary_operator: ?& typed_array_literal: array_type: keyword: array array_literal: - start_square_bracket: '[' - quoted_literal: "'a'" - comma: ',' - quoted_literal: "'b'" - end_square_bracket: ']' - comma: ',' - select_clause_element: expression: cast_expression: quoted_literal: "'[\"a\", {\"b\":1}]'" casting_operator: '::' data_type: keyword: jsonb binary_operator: '#-' quoted_literal: "'{1,b}'" - comma: ',' - select_clause_element: expression: cast_expression: quoted_literal: "'{\"a\":[1,2,3,4,5]}'" casting_operator: '::' data_type: keyword: jsonb binary_operator: '@?' quoted_literal: "'$.a[*] ? (@ > 2)'" - comma: ',' - select_clause_element: expression: cast_expression: quoted_literal: "'{\"a\":[1,2,3,4,5]}'" casting_operator: '::' data_type: keyword: jsonb binary_operator: '@@' quoted_literal: "'$.a[*] > 2'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: json_object function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'code'" - keyword: VALUE - expression: quoted_literal: "'P123'" - comma: ',' - expression: quoted_literal: "'title'" - colon: ':' - expression: quoted_literal: "'Jaws'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: json_object function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'word'" - colon: ':' - expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: str comparison_operator: raw_comparison_operator: '=' quoted_literal: "'1'" - keyword: THEN - expression: quoted_literal: "'One'" - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: str comparison_operator: raw_comparison_operator: '=' quoted_literal: "'2'" - keyword: THEN - expression: quoted_literal: "'Two'" - else_clause: keyword: ELSE expression: quoted_literal: "'Unknown'" - keyword: END - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: json_column from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: first_table - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/limit_clause.sql000066400000000000000000000003211503426445100250230ustar00rootroot00000000000000SELECT col_a FROM test_table LIMIT 2 * 5 * 10 OFFSET (5 + 10); SELECT col_a FROM test_table LIMIT (10 / 10) OFFSET 10 - 5; SELECT col_a FROM test_table LIMIT 100; SELECT col_a FROM test_table LIMIT ALL; sqlfluff-3.4.2/test/fixtures/dialects/postgres/limit_clause.yml000066400000000000000000000056751503426445100250460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c001065984213084c6197e3e7c67ef167e71f952c7aff6a88f0a4be14228046d file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col_a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table limit_clause: - keyword: LIMIT - expression: - numeric_literal: '2' - binary_operator: '*' - numeric_literal: '5' - binary_operator: '*' - numeric_literal: '10' - keyword: OFFSET - expression: bracketed: start_bracket: ( expression: - numeric_literal: '5' - binary_operator: + - numeric_literal: '10' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col_a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table limit_clause: - keyword: LIMIT - bracketed: start_bracket: ( expression: - numeric_literal: '10' - binary_operator: / - numeric_literal: '10' end_bracket: ) - keyword: OFFSET - expression: - numeric_literal: '10' - binary_operator: '-' - numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col_a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table limit_clause: keyword: LIMIT numeric_literal: '100' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col_a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table limit_clause: - keyword: LIMIT - keyword: ALL - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/load.sql000066400000000000000000000000711503426445100232720ustar00rootroot00000000000000LOAD 'funzioniGDB.so'; LOAD '/some/path/funzioniGDB.so'; sqlfluff-3.4.2/test/fixtures/dialects/postgres/load.yml000066400000000000000000000012051503426445100232740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a94deec8a1b961eb911df56dbfaba996a3b6669603cad435e7ec13091bbabdb2 file: - statement: load_statement: keyword: LOAD quoted_literal: "'funzioniGDB.so'" - statement_terminator: ; - statement: load_statement: keyword: LOAD quoted_literal: "'/some/path/funzioniGDB.so'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/lock_table.sql000066400000000000000000000003061503426445100244530ustar00rootroot00000000000000LOCK TABLE films IN SHARE MODE; LOCK TABLE films IN SHARE ROW EXCLUSIVE MODE; LOCK TABLE team IN ACCESS EXCLUSIVE MODE; lock table stud1 IN SHARE UPDATE EXCLUSIVE MODE; LOCK TABLE crontable NOWAIT; sqlfluff-3.4.2/test/fixtures/dialects/postgres/lock_table.yml000066400000000000000000000030511503426445100244550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0b634bc3cad1afb1344957fa51eeb1e7115d94e2c30635877c25bbed99daad50 file: - statement: lock_table_statement: - keyword: LOCK - keyword: TABLE - table_reference: naked_identifier: films - keyword: IN - keyword: SHARE - keyword: MODE - statement_terminator: ; - statement: lock_table_statement: - keyword: LOCK - keyword: TABLE - table_reference: naked_identifier: films - keyword: IN - keyword: SHARE - keyword: ROW - keyword: EXCLUSIVE - keyword: MODE - statement_terminator: ; - statement: lock_table_statement: - keyword: LOCK - keyword: TABLE - table_reference: naked_identifier: team - keyword: IN - keyword: ACCESS - keyword: EXCLUSIVE - keyword: MODE - statement_terminator: ; - statement: lock_table_statement: - keyword: lock - keyword: table - table_reference: naked_identifier: stud1 - keyword: IN - keyword: SHARE - keyword: UPDATE - keyword: EXCLUSIVE - keyword: MODE - statement_terminator: ; - statement: lock_table_statement: - keyword: LOCK - keyword: TABLE - table_reference: naked_identifier: crontable - keyword: NOWAIT - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/meta_commands_query_buffer.sql000066400000000000000000000010701503426445100277400ustar00rootroot00000000000000SELECT format('create index on my_table(%I)', attname) FROM pg_attribute WHERE attrelid = 'my_table'::regclass AND attnum > 0 ORDER BY attnum \gexec SELECT 'hello' AS var1, 10 AS var2 \gset SELECT 'hello' AS var1, 10 AS var2 \gset result_ SELECT EXISTS(SELECT 1 FROM customer WHERE customer_id = 123) as is_customer, EXISTS(SELECT 1 FROM employee WHERE employee_id = 456) as is_employee \gset SELECT 'hello' AS my_psql_var \gset SELECT :'my_psql_var'; SELECT relname, relkind FROM pg_class LIMIT 1 \gset SELECT i FROM generate_series(1,2) i \gset prefix sqlfluff-3.4.2/test/fixtures/dialects/postgres/meta_commands_query_buffer.yml000066400000000000000000000172451503426445100277550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: dd92c9922ca49c12d367a27af59354a4b304093756bd24c4a391acfbb9e97b4d file: - statement: meta_command_statement: - select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: format function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'create index on my_table(%I)'" - comma: ',' - expression: column_reference: naked_identifier: attname - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: pg_attribute where_clause: keyword: WHERE expression: - column_reference: naked_identifier: attrelid - comparison_operator: raw_comparison_operator: '=' - cast_expression: quoted_literal: "'my_table'" casting_operator: '::' data_type: data_type_identifier: regclass - binary_operator: AND - column_reference: naked_identifier: attnum - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '0' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: attnum - meta_command: \gexec - select_statement: select_clause: - keyword: SELECT - select_clause_element: quoted_literal: "'hello'" alias_expression: alias_operator: keyword: AS naked_identifier: var1 - comma: ',' - select_clause_element: numeric_literal: '10' alias_expression: alias_operator: keyword: AS naked_identifier: var2 - meta_command: \gset - select_statement: select_clause: - keyword: SELECT - select_clause_element: quoted_literal: "'hello'" alias_expression: alias_operator: keyword: AS naked_identifier: var1 - comma: ',' - select_clause_element: numeric_literal: '10' alias_expression: alias_operator: keyword: AS naked_identifier: var2 - meta_command: \gset result_ - select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: EXISTS function_contents: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: customer where_clause: keyword: WHERE expression: column_reference: naked_identifier: customer_id comparison_operator: raw_comparison_operator: '=' numeric_literal: '123' end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: is_customer - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXISTS function_contents: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee where_clause: keyword: WHERE expression: column_reference: naked_identifier: employee_id comparison_operator: raw_comparison_operator: '=' numeric_literal: '456' end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: is_employee - meta_command: \gset - select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'hello'" alias_expression: alias_operator: keyword: AS naked_identifier: my_psql_var - meta_command: \gset - select_statement: select_clause: keyword: SELECT select_clause_element: psql_variable: colon: ':' quoted_literal: "'my_psql_var'" - statement_terminator: ; - statement: meta_command_statement: - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: relname - comma: ',' - select_clause_element: column_reference: naked_identifier: relkind from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: pg_class limit_clause: keyword: LIMIT numeric_literal: '1' - meta_command: \gset - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: i from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: generate_series function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) alias_expression: naked_identifier: i - meta_command: \gset prefix sqlfluff-3.4.2/test/fixtures/dialects/postgres/notifications.sql000066400000000000000000000003701503426445100252260ustar00rootroot00000000000000LISTEN virtual; NOTIFY virtual; UNLISTEN virtual; LISTEN "virtual listener"; NOTIFY "virtual listener"; UNLISTEN "virtual listener"; LISTEN listener_a; LISTEN listener_b; NOTIFY listener_a, 'payload_a'; NOTIFY listener_b, 'payload_b'; UNLISTEN * sqlfluff-3.4.2/test/fixtures/dialects/postgres/notifications.yml000066400000000000000000000033751503426445100252400ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cc80629f888da31240579b304db060aa7eb4355012ee4b1315d4b00e471361c3 file: - statement: listen_statement: keyword: LISTEN naked_identifier: virtual - statement_terminator: ; - statement: notify_statement: keyword: NOTIFY naked_identifier: virtual - statement_terminator: ; - statement: unlisten_statement: keyword: UNLISTEN naked_identifier: virtual - statement_terminator: ; - statement: listen_statement: keyword: LISTEN quoted_identifier: '"virtual listener"' - statement_terminator: ; - statement: notify_statement: keyword: NOTIFY quoted_identifier: '"virtual listener"' - statement_terminator: ; - statement: unlisten_statement: keyword: UNLISTEN quoted_identifier: '"virtual listener"' - statement_terminator: ; - statement: listen_statement: keyword: LISTEN naked_identifier: listener_a - statement_terminator: ; - statement: listen_statement: keyword: LISTEN naked_identifier: listener_b - statement_terminator: ; - statement: notify_statement: keyword: NOTIFY naked_identifier: listener_a comma: ',' quoted_literal: "'payload_a'" - statement_terminator: ; - statement: notify_statement: keyword: NOTIFY naked_identifier: listener_b comma: ',' quoted_literal: "'payload_b'" - statement_terminator: ; - statement: unlisten_statement: keyword: UNLISTEN star: '*' sqlfluff-3.4.2/test/fixtures/dialects/postgres/null_filters.sql000066400000000000000000000006421503426445100250610ustar00rootroot00000000000000-- Check nullability tests with standard and non-standard syntax SELECT nullable_field IS NULL as standard_is_null, nullable_field ISNULL as non_standard_is_null, nullable_field IS NOT NULL as standard_not_null, nullable_field NOTNULL as non_standard_not_null FROM t_test WHERE nullable_field IS NULL OR nullable_field ISNULL OR nullable_field IS NOT NULL OR nullable_field NOTNULL sqlfluff-3.4.2/test/fixtures/dialects/postgres/null_filters.yml000066400000000000000000000050531503426445100250640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c65e8649bb622ca0c092ee6651dac408914b2bbe39213b74fc320dc94e45ca5e file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: column_reference: naked_identifier: nullable_field keyword: IS null_literal: 'NULL' alias_expression: alias_operator: keyword: as naked_identifier: standard_is_null - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: nullable_field keyword: ISNULL alias_expression: alias_operator: keyword: as naked_identifier: non_standard_is_null - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: nullable_field - keyword: IS - keyword: NOT - null_literal: 'NULL' alias_expression: alias_operator: keyword: as naked_identifier: standard_not_null - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: nullable_field keyword: NOTNULL alias_expression: alias_operator: keyword: as naked_identifier: non_standard_not_null from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t_test where_clause: keyword: WHERE expression: - column_reference: naked_identifier: nullable_field - keyword: IS - null_literal: 'NULL' - binary_operator: OR - column_reference: naked_identifier: nullable_field - keyword: ISNULL - binary_operator: OR - column_reference: naked_identifier: nullable_field - keyword: IS - keyword: NOT - null_literal: 'NULL' - binary_operator: OR - column_reference: naked_identifier: nullable_field - keyword: NOTNULL sqlfluff-3.4.2/test/fixtures/dialects/postgres/overlaps.sql000066400000000000000000000023231503426445100242100ustar00rootroot00000000000000-- with DATE select start_date, end_date from test_overlaps where (start_date, end_date) overlaps (DATE '2023-02-15', DATE '2023-03-15'); select start_date, end_date from test_overlaps where (start_date, end_date) overlaps ('2023-02-15', '2023-03-15'); SELECT t1.start_date, t1.end_date FROM test_overlaps1 AS t1 LEFT JOIN test_overlaps2 AS t2 WHERE (t1.start_date, t1.end_date) OVERLAPS (t2.start_date, t2.end_date); SELECT start_date, end_date FROM test_overlaps WHERE (start_date, end_date) OVERLAPS ('2023-12-30T00:00:00'::TIMESTAMP, '2024-01-14T13:01:39.884877'::TIMESTAMP); SELECT start_date, end_date FROM test_overlaps WHERE ('2023-12-30T00:00:00'::TIMESTAMP, '2024-01-14T13:01:39.884877'::TIMESTAMP) OVERLAPS (start_date, end_date); SELECT start_date, end_date FROM test_overlaps WHERE (start_date, end_date) OVERLAPS (DATE '2023-12-30', INTERVAL '2 HOURS'); SELECT start_date, end_date FROM test_overlaps WHERE (DATE '2023-12-30', DATE '2024-01-14') OVERLAPS (start_date, end_date); SELECT start_date_1, start_date_2, end_date FROM test_overlaps WHERE (DATE '2023-12-30', DATE '2024-01-14') OVERLAPS (GREATEST(start_date_1, start_date_2), end_date); sqlfluff-3.4.2/test/fixtures/dialects/postgres/overlaps.yml000066400000000000000000000303371503426445100242200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5d88e89fe2bdfbe86ef2ae5fb1319b78df369450453177510a2bd77364cb0d9b file: - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: start_date - comma: ',' - select_clause_element: column_reference: naked_identifier: end_date from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_overlaps where_clause: keyword: where expression: overlaps_clause: - bracketed: - start_bracket: ( - column_reference: naked_identifier: start_date - comma: ',' - column_reference: naked_identifier: end_date - end_bracket: ) - keyword: overlaps - bracketed: - start_bracket: ( - datetime_literal: datetime_type_identifier: keyword: DATE quoted_literal: "'2023-02-15'" - comma: ',' - datetime_literal: datetime_type_identifier: keyword: DATE quoted_literal: "'2023-03-15'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: start_date - comma: ',' - select_clause_element: column_reference: naked_identifier: end_date from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_overlaps where_clause: keyword: where expression: overlaps_clause: - bracketed: - start_bracket: ( - column_reference: naked_identifier: start_date - comma: ',' - column_reference: naked_identifier: end_date - end_bracket: ) - keyword: overlaps - bracketed: - start_bracket: ( - datetime_literal: quoted_literal: "'2023-02-15'" - comma: ',' - datetime_literal: quoted_literal: "'2023-03-15'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: start_date - comma: ',' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: end_date from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_overlaps1 alias_expression: alias_operator: keyword: AS naked_identifier: t1 join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test_overlaps2 alias_expression: alias_operator: keyword: AS naked_identifier: t2 where_clause: keyword: WHERE expression: overlaps_clause: - bracketed: - start_bracket: ( - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: start_date - comma: ',' - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: end_date - end_bracket: ) - keyword: OVERLAPS - bracketed: - start_bracket: ( - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: start_date - comma: ',' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: end_date - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: start_date - comma: ',' - select_clause_element: column_reference: naked_identifier: end_date from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_overlaps where_clause: keyword: WHERE expression: overlaps_clause: - bracketed: - start_bracket: ( - column_reference: naked_identifier: start_date - comma: ',' - column_reference: naked_identifier: end_date - end_bracket: ) - keyword: OVERLAPS - bracketed: - start_bracket: ( - cast_expression: quoted_literal: "'2023-12-30T00:00:00'" casting_operator: '::' data_type: datetime_type_identifier: keyword: TIMESTAMP - comma: ',' - cast_expression: quoted_literal: "'2024-01-14T13:01:39.884877'" casting_operator: '::' data_type: datetime_type_identifier: keyword: TIMESTAMP - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: start_date - comma: ',' - select_clause_element: column_reference: naked_identifier: end_date from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_overlaps where_clause: keyword: WHERE expression: overlaps_clause: - bracketed: - start_bracket: ( - cast_expression: quoted_literal: "'2023-12-30T00:00:00'" casting_operator: '::' data_type: datetime_type_identifier: keyword: TIMESTAMP - comma: ',' - cast_expression: quoted_literal: "'2024-01-14T13:01:39.884877'" casting_operator: '::' data_type: datetime_type_identifier: keyword: TIMESTAMP - end_bracket: ) - keyword: OVERLAPS - bracketed: - start_bracket: ( - column_reference: naked_identifier: start_date - comma: ',' - column_reference: naked_identifier: end_date - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: start_date - comma: ',' - select_clause_element: column_reference: naked_identifier: end_date from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_overlaps where_clause: keyword: WHERE expression: overlaps_clause: - bracketed: - start_bracket: ( - column_reference: naked_identifier: start_date - comma: ',' - column_reference: naked_identifier: end_date - end_bracket: ) - keyword: OVERLAPS - bracketed: - start_bracket: ( - datetime_literal: datetime_type_identifier: keyword: DATE quoted_literal: "'2023-12-30'" - comma: ',' - datetime_literal: datetime_type_identifier: keyword: INTERVAL quoted_literal: "'2 HOURS'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: start_date - comma: ',' - select_clause_element: column_reference: naked_identifier: end_date from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_overlaps where_clause: keyword: WHERE expression: overlaps_clause: - bracketed: - start_bracket: ( - datetime_literal: datetime_type_identifier: keyword: DATE quoted_literal: "'2023-12-30'" - comma: ',' - datetime_literal: datetime_type_identifier: keyword: DATE quoted_literal: "'2024-01-14'" - end_bracket: ) - keyword: OVERLAPS - bracketed: - start_bracket: ( - column_reference: naked_identifier: start_date - comma: ',' - column_reference: naked_identifier: end_date - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: start_date_1 - comma: ',' - select_clause_element: column_reference: naked_identifier: start_date_2 - comma: ',' - select_clause_element: column_reference: naked_identifier: end_date from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_overlaps where_clause: keyword: WHERE expression: overlaps_clause: - bracketed: - start_bracket: ( - datetime_literal: datetime_type_identifier: keyword: DATE quoted_literal: "'2023-12-30'" - comma: ',' - datetime_literal: datetime_type_identifier: keyword: DATE quoted_literal: "'2024-01-14'" - end_bracket: ) - keyword: OVERLAPS - bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: GREATEST function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: start_date_1 - comma: ',' - expression: column_reference: naked_identifier: start_date_2 - end_bracket: ) comma: ',' column_reference: naked_identifier: end_date end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/pattern_match_expressions.sql000066400000000000000000000030231503426445100276460ustar00rootroot00000000000000-- postgres_pattern_match_expressions.sql /* examples of pattern match expressions ( https://www.postgresql.org/docs/14/functions-matching.html ) that are supported in postgres. */ -- LIKE/ILIKE expressions supported SELECT * FROM animals WHERE family LIKE '%ursidae%'; SELECT * FROM animals WHERE family NOT LIKE '%ursidae%'; SELECT * FROM animals WHERE genus ILIKE '%ursus%'; SELECT * FROM animals WHERE genus NOT ILIKE '%ursus%'; SELECT * FROM animals WHERE family LIKE '%ursidae%' ESCAPE '\\'; SELECT * FROM animals WHERE genus NOT ILIKE '%ursus%' ESCAPE '\\'; SELECT COALESCE(family LIKE '%ursidae%' ESCAPE '\\', FALSE) AS is_bear FROM animals; -- SIMILAR TO expressions supported SELECT * FROM animals WHERE family SIMILAR TO '%ursidae%'; SELECT * FROM animals WHERE family NOT SIMILAR TO '%ursidae%'; SELECT * FROM animals WHERE genus SIMILAR TO '%ursus%'; SELECT * FROM animals WHERE genus NOT SIMILAR TO '%ursus%'; SELECT * FROM animals WHERE family SIMILAR TO '%ursidae%' ESCAPE '\\'; SELECT * FROM animals WHERE genus NOT SIMILAR TO '%ursus%' ESCAPE '\\'; SELECT COALESCE(family SIMILAR TO '%ursidae%' ESCAPE '\\', FALSE) AS is_bear FROM animals; -- From https://github.com/sqlfluff/sqlfluff/issues/2722 WITH cleaned_bear_financial_branch AS ( SELECT branch_id, TO_NUMBER(CASE WHEN honey_numerical_code SIMILAR TO '[0-9]{0,7}.?[0-9]{0,2}' THEN honey_numerical_code ELSE NULL END, '24601') AS honey_numerical_code FROM bear_financial_branch ) SELECT branch_id FROM cleaned_bear_financial_branch LIMIT 10; sqlfluff-3.4.2/test/fixtures/dialects/postgres/pattern_match_expressions.yml000066400000000000000000000316531503426445100276620ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2531f4c07b8ef215e826ec7909f923aceef841e9a0e692bb8979860564dc3f05 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: column_reference: naked_identifier: family keyword: LIKE quoted_literal: "'%ursidae%'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: - column_reference: naked_identifier: family - keyword: NOT - keyword: LIKE - quoted_literal: "'%ursidae%'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: column_reference: naked_identifier: genus keyword: ILIKE quoted_literal: "'%ursus%'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: - column_reference: naked_identifier: genus - keyword: NOT - keyword: ILIKE - quoted_literal: "'%ursus%'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: - column_reference: naked_identifier: family - keyword: LIKE - quoted_literal: "'%ursidae%'" - keyword: ESCAPE - quoted_literal: "'\\\\'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: - column_reference: naked_identifier: genus - keyword: NOT - keyword: ILIKE - quoted_literal: "'%ursus%'" - keyword: ESCAPE - quoted_literal: "'\\\\'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: COALESCE function_contents: bracketed: - start_bracket: ( - expression: - column_reference: naked_identifier: family - keyword: LIKE - quoted_literal: "'%ursidae%'" - keyword: ESCAPE - quoted_literal: "'\\\\'" - comma: ',' - expression: boolean_literal: 'FALSE' - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: is_bear from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: - column_reference: naked_identifier: family - keyword: SIMILAR - keyword: TO - quoted_literal: "'%ursidae%'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: - column_reference: naked_identifier: family - keyword: NOT - keyword: SIMILAR - keyword: TO - quoted_literal: "'%ursidae%'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: - column_reference: naked_identifier: genus - keyword: SIMILAR - keyword: TO - quoted_literal: "'%ursus%'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: - column_reference: naked_identifier: genus - keyword: NOT - keyword: SIMILAR - keyword: TO - quoted_literal: "'%ursus%'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: - column_reference: naked_identifier: family - keyword: SIMILAR - keyword: TO - quoted_literal: "'%ursidae%'" - keyword: ESCAPE - quoted_literal: "'\\\\'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: - column_reference: naked_identifier: genus - keyword: NOT - keyword: SIMILAR - keyword: TO - quoted_literal: "'%ursus%'" - keyword: ESCAPE - quoted_literal: "'\\\\'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: COALESCE function_contents: bracketed: - start_bracket: ( - expression: - column_reference: naked_identifier: family - keyword: SIMILAR - keyword: TO - quoted_literal: "'%ursidae%'" - keyword: ESCAPE - quoted_literal: "'\\\\'" - comma: ',' - expression: boolean_literal: 'FALSE' - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: is_bear from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: cleaned_bear_financial_branch keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: branch_id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: TO_NUMBER function_contents: bracketed: - start_bracket: ( - expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: - column_reference: naked_identifier: honey_numerical_code - keyword: SIMILAR - keyword: TO - quoted_literal: "'[0-9]{0,7}.?[0-9]{0,2}'" - keyword: THEN - expression: column_reference: naked_identifier: honey_numerical_code - else_clause: keyword: ELSE expression: null_literal: 'NULL' - keyword: END - comma: ',' - expression: quoted_literal: "'24601'" - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: honey_numerical_code from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_financial_branch end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: branch_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: cleaned_bear_financial_branch limit_clause: keyword: LIMIT numeric_literal: '10' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/pgvector.sql000066400000000000000000000000641503426445100242060ustar00rootroot00000000000000CREATE TABLE search ( embedding VECTOR(1536) ); sqlfluff-3.4.2/test/fixtures/dialects/postgres/pgvector.yml000066400000000000000000000015661503426445100242200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1bdf39db0361b096fda47dee34fa19fb470230e4f6ce2ca34c5328df47d9405f file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: search - bracketed: start_bracket: ( column_reference: naked_identifier: embedding data_type: keyword: VECTOR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '1536' end_bracket: ) end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/position.sql000066400000000000000000000003511503426445100242200ustar00rootroot00000000000000select u.user_id, u.user_email, p.product_id from user_tb as u inner join product_tb as p on u.user_id = p.user_id and position('@domain' in u.user_email) = 0; SELECT position('#' IN tbl.col_a -> 'b') AS hash_pos FROM tbl; sqlfluff-3.4.2/test/fixtures/dialects/postgres/position.yml000066400000000000000000000073071503426445100242320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 55497236ead9cc6fe63cbfe5bb0272662188d899997a7b6a5237f236496f275b file: - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: u - dot: . - naked_identifier: user_id - comma: ',' - select_clause_element: column_reference: - naked_identifier: u - dot: . - naked_identifier: user_email - comma: ',' - select_clause_element: column_reference: - naked_identifier: p - dot: . - naked_identifier: product_id from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: user_tb alias_expression: alias_operator: keyword: as naked_identifier: u join_clause: - keyword: inner - keyword: join - from_expression_element: table_expression: table_reference: naked_identifier: product_tb alias_expression: alias_operator: keyword: as naked_identifier: p - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: u - dot: . - naked_identifier: user_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: p - dot: . - naked_identifier: user_id - binary_operator: and - function: function_name: function_name_identifier: position function_contents: bracketed: start_bracket: ( quoted_literal: "'@domain'" keyword: in column_reference: - naked_identifier: u - dot: . - naked_identifier: user_email end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '0' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: position function_contents: bracketed: start_bracket: ( quoted_literal: "'#'" keyword: IN expression: column_reference: - naked_identifier: tbl - dot: . - naked_identifier: col_a binary_operator: -> quoted_literal: "'b'" end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: hash_pos from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/postgis.sql000066400000000000000000000027321503426445100240510ustar00rootroot00000000000000CREATE TABLE public.foo ( quadkey TEXT, my_geometry_column GEOMETRY (GEOMETRY, 4326), my_point POINT(0 0), my_linestring LINESTRING(0 0, 1 1, 2 1, 2 2), my_simple_polygon POLYGON((0 0, 1 0, 1 1, 0 1, 0 0)), my_complex_polygon POLYGON((0 0, 10 0, 10 10, 0 10, 0 0),(1 1, 1 2, 2 2, 2 1, 1 1)), my_geometry_collection GEOMETRYCOLLECTION(POINT(2 0),POLYGON((0 0, 1 0, 1 1, 0 1, 0 0))), my_3d_linestring LINESTRINGZ (0 0 0,1 0 0,1 1 2), my_geography_column GEOGRAPHY(GEOGRAPHY, 6679), my_4d_point POINTZM(1, 1, 1, 1), my_multicurve MULTICURVE( (0 0, 5 5), CIRCULARSTRING(4 0, 4 4, 8 4) ), my_tin TIN( ((0 0 0, 0 0 1, 0 1 0, 0 0 0)), ((0 0 0, 0 1 0, 1 1 0, 0 0 0)) ), my_triangle TRIANGLE ((0 0, 0 9, 9 0, 0 0)), my_polyhedral_surface POLYHEDRALSURFACE( ((0 0 0, 0 0 1, 0 1 1, 0 1 0, 0 0 0)), ((0 0 0, 0 1 0, 1 1 0, 1 0 0, 0 0 0)), ((0 0 0, 1 0 0, 1 0 1, 0 0 1, 0 0 0)), ((1 1 0, 1 1 1, 1 0 1, 1 0 0, 1 1 0)), ((0 1 0, 0 1 1, 1 1 1, 1 1 0, 0 1 0)), ((0 0 1, 1 0 1, 1 1 1, 0 1 1, 0 0 1)) ), my_3d_geometry_collection GEOMETRYCOLLECTIONM( POINTM(2 3 9), LINESTRINGM(2 3 4, 3 4 5) ), my_curve_polygon CURVEPOLYGON(CIRCULARSTRING(0 0, 4 0, 4 4, 0 4, 0 0),(1 1, 3 3, 3 1, 1 1)), my_multisurface MULTISURFACE(CURVEPOLYGON(CIRCULARSTRING(0 0, 4 0, 4 4, 0 4, 0 0),(1 1, 3 3, 3 1, 1 1)),((10 10, 14 12, 11 10, 10 10),(11 11, 11.5 11, 11 11.5, 11 11))), my_circularstring CIRCULARSTRING(0 0, 4 0, 4 4, 0 4, 0 0), PRIMARY KEY (quadkey) ); sqlfluff-3.4.2/test/fixtures/dialects/postgres/postgis.yml000066400000000000000000000536211503426445100240560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c10308e95f79279d0b35450b6a45cea25fea6d3f47481659ccb09343bc63dca2 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: public - dot: . - naked_identifier: foo - bracketed: - start_bracket: ( - column_reference: naked_identifier: quadkey - data_type: keyword: TEXT - comma: ',' - column_reference: naked_identifier: my_geometry_column - data_type: wkt_geometry_type: keyword: GEOMETRY bracketed: start_bracket: ( keyword: GEOMETRY comma: ',' numeric_literal: '4326' end_bracket: ) - comma: ',' - column_reference: naked_identifier: my_point - data_type: wkt_geometry_type: keyword: POINT bracketed: - start_bracket: ( - numeric_literal: '0' - numeric_literal: '0' - end_bracket: ) - comma: ',' - column_reference: naked_identifier: my_linestring - data_type: wkt_geometry_type: keyword: LINESTRING bracketed: - start_bracket: ( - numeric_literal: '0' - numeric_literal: '0' - comma: ',' - numeric_literal: '1' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - numeric_literal: '2' - end_bracket: ) - comma: ',' - column_reference: naked_identifier: my_simple_polygon - data_type: wkt_geometry_type: keyword: POLYGON bracketed: start_bracket: ( bracketed: - start_bracket: ( - numeric_literal: '0' - numeric_literal: '0' - comma: ',' - numeric_literal: '1' - numeric_literal: '0' - comma: ',' - numeric_literal: '1' - numeric_literal: '1' - comma: ',' - numeric_literal: '0' - numeric_literal: '1' - comma: ',' - numeric_literal: '0' - numeric_literal: '0' - end_bracket: ) end_bracket: ) - comma: ',' - column_reference: naked_identifier: my_complex_polygon - data_type: wkt_geometry_type: keyword: POLYGON bracketed: - start_bracket: ( - bracketed: - start_bracket: ( - numeric_literal: '0' - numeric_literal: '0' - comma: ',' - numeric_literal: '10' - numeric_literal: '0' - comma: ',' - numeric_literal: '10' - numeric_literal: '10' - comma: ',' - numeric_literal: '0' - numeric_literal: '10' - comma: ',' - numeric_literal: '0' - numeric_literal: '0' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - numeric_literal: '1' - numeric_literal: '1' - comma: ',' - numeric_literal: '1' - numeric_literal: '2' - comma: ',' - numeric_literal: '2' - numeric_literal: '2' - comma: ',' - numeric_literal: '2' - numeric_literal: '1' - comma: ',' - numeric_literal: '1' - numeric_literal: '1' - end_bracket: ) - end_bracket: ) - comma: ',' - column_reference: naked_identifier: my_geometry_collection - data_type: wkt_geometry_type: keyword: GEOMETRYCOLLECTION bracketed: - start_bracket: ( - wkt_geometry_type: keyword: POINT bracketed: - start_bracket: ( - numeric_literal: '2' - numeric_literal: '0' - end_bracket: ) - comma: ',' - wkt_geometry_type: keyword: POLYGON bracketed: start_bracket: ( bracketed: - start_bracket: ( - numeric_literal: '0' - numeric_literal: '0' - comma: ',' - numeric_literal: '1' - numeric_literal: '0' - comma: ',' - numeric_literal: '1' - numeric_literal: '1' - comma: ',' - numeric_literal: '0' - numeric_literal: '1' - comma: ',' - numeric_literal: '0' - numeric_literal: '0' - end_bracket: ) end_bracket: ) - end_bracket: ) - comma: ',' - column_reference: naked_identifier: my_3d_linestring - data_type: wkt_geometry_type: keyword: LINESTRINGZ bracketed: - start_bracket: ( - numeric_literal: '0' - numeric_literal: '0' - numeric_literal: '0' - comma: ',' - numeric_literal: '1' - numeric_literal: '0' - numeric_literal: '0' - comma: ',' - numeric_literal: '1' - numeric_literal: '1' - numeric_literal: '2' - end_bracket: ) - comma: ',' - column_reference: naked_identifier: my_geography_column - data_type: wkt_geometry_type: keyword: GEOGRAPHY bracketed: start_bracket: ( keyword: GEOGRAPHY comma: ',' numeric_literal: '6679' end_bracket: ) - comma: ',' - column_reference: naked_identifier: my_4d_point - data_type: wkt_geometry_type: keyword: POINTZM bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '1' - comma: ',' - numeric_literal: '1' - comma: ',' - numeric_literal: '1' - end_bracket: ) - comma: ',' - column_reference: naked_identifier: my_multicurve - data_type: wkt_geometry_type: keyword: MULTICURVE bracketed: start_bracket: ( bracketed: - start_bracket: ( - numeric_literal: '0' - numeric_literal: '0' - comma: ',' - numeric_literal: '5' - numeric_literal: '5' - end_bracket: ) comma: ',' wkt_geometry_type: keyword: CIRCULARSTRING bracketed: - start_bracket: ( - numeric_literal: '4' - numeric_literal: '0' - comma: ',' - numeric_literal: '4' - numeric_literal: '4' - comma: ',' - numeric_literal: '8' - numeric_literal: '4' - end_bracket: ) end_bracket: ) - comma: ',' - column_reference: naked_identifier: my_tin - data_type: wkt_geometry_type: keyword: TIN bracketed: - start_bracket: ( - bracketed: start_bracket: ( bracketed: - start_bracket: ( - numeric_literal: '0' - numeric_literal: '0' - numeric_literal: '0' - comma: ',' - numeric_literal: '0' - numeric_literal: '0' - numeric_literal: '1' - comma: ',' - numeric_literal: '0' - numeric_literal: '1' - numeric_literal: '0' - comma: ',' - numeric_literal: '0' - numeric_literal: '0' - numeric_literal: '0' - end_bracket: ) end_bracket: ) - comma: ',' - bracketed: start_bracket: ( bracketed: - start_bracket: ( - numeric_literal: '0' - numeric_literal: '0' - numeric_literal: '0' - comma: ',' - numeric_literal: '0' - numeric_literal: '1' - numeric_literal: '0' - comma: ',' - numeric_literal: '1' - numeric_literal: '1' - numeric_literal: '0' - comma: ',' - numeric_literal: '0' - numeric_literal: '0' - numeric_literal: '0' - end_bracket: ) end_bracket: ) - end_bracket: ) - comma: ',' - column_reference: naked_identifier: my_triangle - data_type: wkt_geometry_type: keyword: TRIANGLE bracketed: start_bracket: ( bracketed: - start_bracket: ( - numeric_literal: '0' - numeric_literal: '0' - comma: ',' - numeric_literal: '0' - numeric_literal: '9' - comma: ',' - numeric_literal: '9' - numeric_literal: '0' - comma: ',' - numeric_literal: '0' - numeric_literal: '0' - end_bracket: ) end_bracket: ) - comma: ',' - column_reference: naked_identifier: my_polyhedral_surface - data_type: wkt_geometry_type: keyword: POLYHEDRALSURFACE bracketed: - start_bracket: ( - bracketed: start_bracket: ( bracketed: - start_bracket: ( - numeric_literal: '0' - numeric_literal: '0' - numeric_literal: '0' - comma: ',' - numeric_literal: '0' - numeric_literal: '0' - numeric_literal: '1' - comma: ',' - numeric_literal: '0' - numeric_literal: '1' - numeric_literal: '1' - comma: ',' - numeric_literal: '0' - numeric_literal: '1' - numeric_literal: '0' - comma: ',' - numeric_literal: '0' - numeric_literal: '0' - numeric_literal: '0' - end_bracket: ) end_bracket: ) - comma: ',' - bracketed: start_bracket: ( bracketed: - start_bracket: ( - numeric_literal: '0' - numeric_literal: '0' - numeric_literal: '0' - comma: ',' - numeric_literal: '0' - numeric_literal: '1' - numeric_literal: '0' - comma: ',' - numeric_literal: '1' - numeric_literal: '1' - numeric_literal: '0' - comma: ',' - numeric_literal: '1' - numeric_literal: '0' - numeric_literal: '0' - comma: ',' - numeric_literal: '0' - numeric_literal: '0' - numeric_literal: '0' - end_bracket: ) end_bracket: ) - comma: ',' - bracketed: start_bracket: ( bracketed: - start_bracket: ( - numeric_literal: '0' - numeric_literal: '0' - numeric_literal: '0' - comma: ',' - numeric_literal: '1' - numeric_literal: '0' - numeric_literal: '0' - comma: ',' - numeric_literal: '1' - numeric_literal: '0' - numeric_literal: '1' - comma: ',' - numeric_literal: '0' - numeric_literal: '0' - numeric_literal: '1' - comma: ',' - numeric_literal: '0' - numeric_literal: '0' - numeric_literal: '0' - end_bracket: ) end_bracket: ) - comma: ',' - bracketed: start_bracket: ( bracketed: - start_bracket: ( - numeric_literal: '1' - numeric_literal: '1' - numeric_literal: '0' - comma: ',' - numeric_literal: '1' - numeric_literal: '1' - numeric_literal: '1' - comma: ',' - numeric_literal: '1' - numeric_literal: '0' - numeric_literal: '1' - comma: ',' - numeric_literal: '1' - numeric_literal: '0' - numeric_literal: '0' - comma: ',' - numeric_literal: '1' - numeric_literal: '1' - numeric_literal: '0' - end_bracket: ) end_bracket: ) - comma: ',' - bracketed: start_bracket: ( bracketed: - start_bracket: ( - numeric_literal: '0' - numeric_literal: '1' - numeric_literal: '0' - comma: ',' - numeric_literal: '0' - numeric_literal: '1' - numeric_literal: '1' - comma: ',' - numeric_literal: '1' - numeric_literal: '1' - numeric_literal: '1' - comma: ',' - numeric_literal: '1' - numeric_literal: '1' - numeric_literal: '0' - comma: ',' - numeric_literal: '0' - numeric_literal: '1' - numeric_literal: '0' - end_bracket: ) end_bracket: ) - comma: ',' - bracketed: start_bracket: ( bracketed: - start_bracket: ( - numeric_literal: '0' - numeric_literal: '0' - numeric_literal: '1' - comma: ',' - numeric_literal: '1' - numeric_literal: '0' - numeric_literal: '1' - comma: ',' - numeric_literal: '1' - numeric_literal: '1' - numeric_literal: '1' - comma: ',' - numeric_literal: '0' - numeric_literal: '1' - numeric_literal: '1' - comma: ',' - numeric_literal: '0' - numeric_literal: '0' - numeric_literal: '1' - end_bracket: ) end_bracket: ) - end_bracket: ) - comma: ',' - column_reference: naked_identifier: my_3d_geometry_collection - data_type: wkt_geometry_type: keyword: GEOMETRYCOLLECTIONM bracketed: - start_bracket: ( - wkt_geometry_type: keyword: POINTM bracketed: - start_bracket: ( - numeric_literal: '2' - numeric_literal: '3' - numeric_literal: '9' - end_bracket: ) - comma: ',' - wkt_geometry_type: keyword: LINESTRINGM bracketed: - start_bracket: ( - numeric_literal: '2' - numeric_literal: '3' - numeric_literal: '4' - comma: ',' - numeric_literal: '3' - numeric_literal: '4' - numeric_literal: '5' - end_bracket: ) - end_bracket: ) - comma: ',' - column_reference: naked_identifier: my_curve_polygon - data_type: wkt_geometry_type: keyword: CURVEPOLYGON bracketed: start_bracket: ( wkt_geometry_type: keyword: CIRCULARSTRING bracketed: - start_bracket: ( - numeric_literal: '0' - numeric_literal: '0' - comma: ',' - numeric_literal: '4' - numeric_literal: '0' - comma: ',' - numeric_literal: '4' - numeric_literal: '4' - comma: ',' - numeric_literal: '0' - numeric_literal: '4' - comma: ',' - numeric_literal: '0' - numeric_literal: '0' - end_bracket: ) comma: ',' bracketed: - start_bracket: ( - numeric_literal: '1' - numeric_literal: '1' - comma: ',' - numeric_literal: '3' - numeric_literal: '3' - comma: ',' - numeric_literal: '3' - numeric_literal: '1' - comma: ',' - numeric_literal: '1' - numeric_literal: '1' - end_bracket: ) end_bracket: ) - comma: ',' - column_reference: naked_identifier: my_multisurface - data_type: wkt_geometry_type: keyword: MULTISURFACE bracketed: start_bracket: ( wkt_geometry_type: keyword: CURVEPOLYGON bracketed: start_bracket: ( wkt_geometry_type: keyword: CIRCULARSTRING bracketed: - start_bracket: ( - numeric_literal: '0' - numeric_literal: '0' - comma: ',' - numeric_literal: '4' - numeric_literal: '0' - comma: ',' - numeric_literal: '4' - numeric_literal: '4' - comma: ',' - numeric_literal: '0' - numeric_literal: '4' - comma: ',' - numeric_literal: '0' - numeric_literal: '0' - end_bracket: ) comma: ',' bracketed: - start_bracket: ( - numeric_literal: '1' - numeric_literal: '1' - comma: ',' - numeric_literal: '3' - numeric_literal: '3' - comma: ',' - numeric_literal: '3' - numeric_literal: '1' - comma: ',' - numeric_literal: '1' - numeric_literal: '1' - end_bracket: ) end_bracket: ) comma: ',' bracketed: - start_bracket: ( - bracketed: - start_bracket: ( - numeric_literal: '10' - numeric_literal: '10' - comma: ',' - numeric_literal: '14' - numeric_literal: '12' - comma: ',' - numeric_literal: '11' - numeric_literal: '10' - comma: ',' - numeric_literal: '10' - numeric_literal: '10' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - numeric_literal: '11' - numeric_literal: '11' - comma: ',' - numeric_literal: '11.5' - numeric_literal: '11' - comma: ',' - numeric_literal: '11' - numeric_literal: '11.5' - comma: ',' - numeric_literal: '11' - numeric_literal: '11' - end_bracket: ) - end_bracket: ) end_bracket: ) - comma: ',' - column_reference: naked_identifier: my_circularstring - data_type: wkt_geometry_type: keyword: CIRCULARSTRING bracketed: - start_bracket: ( - numeric_literal: '0' - numeric_literal: '0' - comma: ',' - numeric_literal: '4' - numeric_literal: '0' - comma: ',' - numeric_literal: '4' - numeric_literal: '4' - comma: ',' - numeric_literal: '0' - numeric_literal: '4' - comma: ',' - numeric_literal: '0' - numeric_literal: '0' - end_bracket: ) - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: quadkey end_bracket: ) - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/postgres_pgvector_operators.sql000066400000000000000000000003731503426445100302350ustar00rootroot00000000000000SELECT * FROM items ORDER BY embedding <-> '[3,1,2]' LIMIT 5; SELECT * FROM items ORDER BY embedding <=> '[3,1,2]' LIMIT 5; SELECT * FROM items ORDER BY embedding <+> '[3,1,2]' LIMIT 5; SELECT * FROM items ORDER BY embedding <#> '[3,1,2]' LIMIT 5; sqlfluff-3.4.2/test/fixtures/dialects/postgres/postgres_pgvector_operators.yml000066400000000000000000000063161503426445100302420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ff4b9cc4d6ad46a17cfd8362cd5fd59e5c157569814a59a3c74732f876c2041a file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: items orderby_clause: - keyword: ORDER - keyword: BY - expression: column_reference: naked_identifier: embedding binary_operator: <-> quoted_literal: "'[3,1,2]'" limit_clause: keyword: LIMIT numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: items orderby_clause: - keyword: ORDER - keyword: BY - expression: column_reference: naked_identifier: embedding binary_operator: <=> quoted_literal: "'[3,1,2]'" limit_clause: keyword: LIMIT numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: items orderby_clause: - keyword: ORDER - keyword: BY - expression: column_reference: naked_identifier: embedding binary_operator: <+> quoted_literal: "'[3,1,2]'" limit_clause: keyword: LIMIT numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: items orderby_clause: - keyword: ORDER - keyword: BY - expression: column_reference: naked_identifier: embedding binary_operator: <#> quoted_literal: "'[3,1,2]'" limit_clause: keyword: LIMIT numeric_literal: '5' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/postgres_postgis_operators.sql000066400000000000000000000114561503426445100301000ustar00rootroot00000000000000SELECT tbl1.column1, tbl2.column1, tbl1.column2 && tbl2.column2 AS overlap FROM ( VALUES (1, 'LINESTRING(0 0, 3 3)'::geometry), (2, 'LINESTRING(0 1, 0 5)'::geometry)) AS tbl1, ( VALUES (3, 'LINESTRING(1 2, 4 6)'::geometry)) AS tbl2; SELECT tbl1.column1, tbl2.column1, tbl1.column2 &&& tbl2.column2 AS overlaps_3d, tbl1.column2 && tbl2.column2 AS overlaps_2d FROM ( VALUES (1, 'LINESTRING Z(0 0 1, 3 3 2)'::geometry), (2, 'LINESTRING Z(1 2 0, 0 5 -1)'::geometry)) AS tbl1, ( VALUES (3, 'LINESTRING Z(1 2 1, 4 6 1)'::geometry)) AS tbl2; SELECT tbl1.column1, tbl2.column1, tbl1.column2 &< tbl2.column2 AS overleft FROM ( VALUES (1, 'LINESTRING(1 2, 4 6)'::geometry)) AS tbl1, ( VALUES (2, 'LINESTRING(0 0, 3 3)'::geometry), (3, 'LINESTRING(0 1, 0 5)'::geometry), (4, 'LINESTRING(6 0, 6 1)'::geometry)) AS tbl2; SELECT tbl1.column1, tbl2.column1, tbl1.column2 &<| tbl2.column2 AS overbelow FROM ( VALUES (1, 'LINESTRING(6 0, 6 4)'::geometry)) AS tbl1, ( VALUES (2, 'LINESTRING(0 0, 3 3)'::geometry), (3, 'LINESTRING(0 1, 0 5)'::geometry), (4, 'LINESTRING(1 2, 4 6)'::geometry)) AS tbl2; SELECT tbl1.column1, tbl2.column1, tbl1.column2 &> tbl2.column2 AS overright FROM ( VALUES (1, 'LINESTRING(1 2, 4 6)'::geometry)) AS tbl1, ( VALUES (2, 'LINESTRING(0 0, 3 3)'::geometry), (3, 'LINESTRING(0 1, 0 5)'::geometry), (4, 'LINESTRING(6 0, 6 1)'::geometry)) AS tbl2; SELECT tbl1.column1, tbl2.column1, tbl1.column2 << tbl2.column2 AS strict_left FROM ( VALUES (1, 'LINESTRING (1 2, 1 5)'::geometry)) AS tbl1, ( VALUES (2, 'LINESTRING (0 0, 4 3)'::geometry), (3, 'LINESTRING (6 0, 6 5)'::geometry), (4, 'LINESTRING (2 2, 5 6)'::geometry)) AS tbl2; SELECT tbl1.column1, tbl2.column1, tbl1.column2 <<| tbl2.column2 AS below FROM ( VALUES (1, 'LINESTRING (0 0, 4 3)'::geometry)) AS tbl1, ( VALUES (2, 'LINESTRING (1 4, 1 7)'::geometry), (3, 'LINESTRING (6 1, 6 5)'::geometry), (4, 'LINESTRING (2 3, 5 6)'::geometry)) AS tbl2; SELECT 'LINESTRING(0 0, 0 1, 1 0)'::geometry = 'LINESTRING(1 1, 0 0)'::geometry; SELECT tbl1.column1, tbl2.column1, tbl1.column2 >> tbl2.column2 AS strict_right FROM ( VALUES (1, 'LINESTRING (2 3, 5 6)'::geometry)) AS tbl1, ( VALUES (2, 'LINESTRING (1 4, 1 7)'::geometry), (3, 'LINESTRING (6 1, 6 5)'::geometry), (4, 'LINESTRING (0 0, 4 3)'::geometry)) AS tbl2; SELECT tbl1.column1, tbl2.column1, tbl1.column2 @ tbl2.column2 AS contained FROM ( VALUES (1, 'LINESTRING (1 1, 3 3)'::geometry)) AS tbl1, ( VALUES (2, 'LINESTRING (0 0, 4 4)'::geometry), (3, 'LINESTRING (2 2, 4 4)'::geometry), (4, 'LINESTRING (1 1, 3 3)'::geometry)) AS tbl2; SELECT tbl1.column1, tbl2.column1, tbl1.column2 |&> tbl2.column2 AS overabove FROM ( VALUES (1, 'LINESTRING(6 0, 6 4)'::geometry)) AS tbl1, ( VALUES (2, 'LINESTRING(0 0, 3 3)'::geometry), (3, 'LINESTRING(0 1, 0 5)'::geometry), (4, 'LINESTRING(1 2, 4 6)'::geometry)) AS tbl2; SELECT tbl1.column1, tbl2.column1, tbl1.column2 |>> tbl2.column2 AS above FROM ( VALUES (1, 'LINESTRING (1 4, 1 7)'::geometry)) AS tbl1, ( VALUES (2, 'LINESTRING (0 0, 4 2)'::geometry), (3, 'LINESTRING (6 1, 6 5)'::geometry), (4, 'LINESTRING (2 3, 5 6)'::geometry)) AS tbl2; SELECT tbl1.column1, tbl2.column1, tbl1.column2 ~ tbl2.column2 AS contains FROM ( VALUES (1, 'LINESTRING (0 0, 3 3)'::geometry)) AS tbl1, ( VALUES (2, 'LINESTRING (0 0, 4 4)'::geometry), (3, 'LINESTRING (1 1, 2 2)'::geometry), (4, 'LINESTRING (0 0, 3 3)'::geometry)) AS tbl2; select 'LINESTRING(0 0, 1 1)'::geometry ~= 'LINESTRING(0 1, 1 0)'::geometry as equality; SELECT st_distance(geom, 'SRID=3005;POINT(1011102 450541)'::geometry) as d,edabbr, vaabbr FROM va2005 ORDER BY geom <-> 'SRID=3005;POINT(1011102 450541)'::geometry limit 10; SELECT track_id, dist FROM ( SELECT track_id, ST_DistanceCPA(tr,:qt) dist FROM trajectories ORDER BY tr |=| :qt LIMIT 5 ) foo; SELECT * FROM ( SELECT b.tlid, b.mtfcc, b.geom <#> ST_GeomFromText('LINESTRING(746149 2948672,745954 2948576, 745787 2948499,745740 2948468,745712 2948438, 745690 2948384,745677 2948319)',2249) As b_dist, ST_Distance(b.geom, ST_GeomFromText('LINESTRING(746149 2948672,745954 2948576, 745787 2948499,745740 2948468,745712 2948438, 745690 2948384,745677 2948319)',2249)) As act_dist FROM bos_roads As b ORDER BY b_dist, b.tlid LIMIT 100) As foo ORDER BY act_dist, tlid LIMIT 10; WITH index_query AS ( SELECT ST_Distance(geom, 'SRID=3005;POINT(1011102 450541)'::geometry) as d,edabbr, vaabbr FROM va2005 ORDER BY geom <<->> 'SRID=3005;POINT(1011102 450541)'::geometry LIMIT 100) SELECT * FROM index_query ORDER BY d limit 10; WITH index_query AS ( SELECT ST_Distance(geom, 'SRID=3005;POINT(1011102 450541)'::geometry) as d,edabbr, vaabbr FROM va2005 ORDER BY geom <<#>> 'SRID=3005;POINT(1011102 450541)'::geometry LIMIT 100) SELECT * FROM index_query ORDER BY d limit 10; sqlfluff-3.4.2/test/fixtures/dialects/postgres/postgres_postgis_operators.yml000066400000000000000000001673661503426445100301160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 12f503c0d1bc6b05ad3d58352ef3abc3ce719bb0670ba5dc109facbc7cf15f40 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: column1 - comma: ',' - select_clause_element: column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: column1 - comma: ',' - select_clause_element: expression: - column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: column2 - comparison_operator: - ampersand: '&' - ampersand: '&' - column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: column2 alias_expression: alias_operator: keyword: AS naked_identifier: overlap from_clause: - keyword: FROM - from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING(0 0, 3 3)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '2' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING(0 1, 0 5)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: tbl1 - comma: ',' - from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING(1 2, 4 6)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: tbl2 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: column1 - comma: ',' - select_clause_element: column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: column1 - comma: ',' - select_clause_element: expression: - column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: column2 - binary_operator: '&&&' - column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: column2 alias_expression: alias_operator: keyword: AS naked_identifier: overlaps_3d - comma: ',' - select_clause_element: expression: - column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: column2 - comparison_operator: - ampersand: '&' - ampersand: '&' - column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: column2 alias_expression: alias_operator: keyword: AS naked_identifier: overlaps_2d from_clause: - keyword: FROM - from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING Z(0 0 1, 3 3 2)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '2' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING Z(1 2 0, 0 5 -1)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: tbl1 - comma: ',' - from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING Z(1 2 1, 4 6 1)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: tbl2 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: column1 - comma: ',' - select_clause_element: column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: column1 - comma: ',' - select_clause_element: expression: - column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: column2 - comparison_operator: ampersand: '&' raw_comparison_operator: < - column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: column2 alias_expression: alias_operator: keyword: AS naked_identifier: overleft from_clause: - keyword: FROM - from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING(1 2, 4 6)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: tbl1 - comma: ',' - from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: numeric_literal: '2' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING(0 0, 3 3)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING(0 1, 0 5)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '4' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING(6 0, 6 1)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: tbl2 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: column1 - comma: ',' - select_clause_element: column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: column1 - comma: ',' - select_clause_element: expression: - column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: column2 - binary_operator: '&<|' - column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: column2 alias_expression: alias_operator: keyword: AS naked_identifier: overbelow from_clause: - keyword: FROM - from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING(6 0, 6 4)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: tbl1 - comma: ',' - from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: numeric_literal: '2' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING(0 0, 3 3)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING(0 1, 0 5)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '4' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING(1 2, 4 6)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: tbl2 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: column1 - comma: ',' - select_clause_element: column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: column1 - comma: ',' - select_clause_element: expression: - column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: column2 - comparison_operator: ampersand: '&' raw_comparison_operator: '>' - column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: column2 alias_expression: alias_operator: keyword: AS naked_identifier: overright from_clause: - keyword: FROM - from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING(1 2, 4 6)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: tbl1 - comma: ',' - from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: numeric_literal: '2' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING(0 0, 3 3)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING(0 1, 0 5)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '4' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING(6 0, 6 1)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: tbl2 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: column1 - comma: ',' - select_clause_element: column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: column1 - comma: ',' - select_clause_element: expression: - column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: column2 - binary_operator: - raw_comparison_operator: < - raw_comparison_operator: < - column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: column2 alias_expression: alias_operator: keyword: AS naked_identifier: strict_left from_clause: - keyword: FROM - from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING (1 2, 1 5)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: tbl1 - comma: ',' - from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: numeric_literal: '2' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING (0 0, 4 3)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING (6 0, 6 5)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '4' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING (2 2, 5 6)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: tbl2 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: column1 - comma: ',' - select_clause_element: column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: column1 - comma: ',' - select_clause_element: expression: - column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: column2 - binary_operator: <<| - column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: column2 alias_expression: alias_operator: keyword: AS naked_identifier: below from_clause: - keyword: FROM - from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING (0 0, 4 3)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: tbl1 - comma: ',' - from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: numeric_literal: '2' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING (1 4, 1 7)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING (6 1, 6 5)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '4' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING (2 3, 5 6)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: tbl2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - cast_expression: quoted_literal: "'LINESTRING(0 0, 0 1, 1 0)'" casting_operator: '::' data_type: data_type_identifier: geometry - comparison_operator: raw_comparison_operator: '=' - cast_expression: quoted_literal: "'LINESTRING(1 1, 0 0)'" casting_operator: '::' data_type: data_type_identifier: geometry - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: column1 - comma: ',' - select_clause_element: column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: column1 - comma: ',' - select_clause_element: expression: - column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: column2 - binary_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '>' - column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: column2 alias_expression: alias_operator: keyword: AS naked_identifier: strict_right from_clause: - keyword: FROM - from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING (2 3, 5 6)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: tbl1 - comma: ',' - from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: numeric_literal: '2' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING (1 4, 1 7)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING (6 1, 6 5)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '4' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING (0 0, 4 3)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: tbl2 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: column1 - comma: ',' - select_clause_element: column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: column1 - comma: ',' - select_clause_element: expression: - column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: column2 - binary_operator: '@' - column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: column2 alias_expression: alias_operator: keyword: AS naked_identifier: contained from_clause: - keyword: FROM - from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING (1 1, 3 3)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: tbl1 - comma: ',' - from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: numeric_literal: '2' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING (0 0, 4 4)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING (2 2, 4 4)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '4' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING (1 1, 3 3)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: tbl2 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: column1 - comma: ',' - select_clause_element: column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: column1 - comma: ',' - select_clause_element: expression: - column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: column2 - binary_operator: '|&>' - column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: column2 alias_expression: alias_operator: keyword: AS naked_identifier: overabove from_clause: - keyword: FROM - from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING(6 0, 6 4)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: tbl1 - comma: ',' - from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: numeric_literal: '2' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING(0 0, 3 3)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING(0 1, 0 5)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '4' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING(1 2, 4 6)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: tbl2 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: column1 - comma: ',' - select_clause_element: column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: column1 - comma: ',' - select_clause_element: expression: - column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: column2 - binary_operator: '|>>' - column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: column2 alias_expression: alias_operator: keyword: AS naked_identifier: above from_clause: - keyword: FROM - from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING (1 4, 1 7)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: tbl1 - comma: ',' - from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: numeric_literal: '2' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING (0 0, 4 2)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING (6 1, 6 5)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '4' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING (2 3, 5 6)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: tbl2 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: column1 - comma: ',' - select_clause_element: column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: column1 - comma: ',' - select_clause_element: expression: - column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: column2 - like_operator: '~' - column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: column2 alias_expression: alias_operator: keyword: AS naked_identifier: contains from_clause: - keyword: FROM - from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING (0 0, 3 3)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: tbl1 - comma: ',' - from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: numeric_literal: '2' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING (0 0, 4 4)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING (1 1, 2 2)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '4' - comma: ',' - expression: cast_expression: quoted_literal: "'LINESTRING (0 0, 3 3)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: tbl2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: - cast_expression: quoted_literal: "'LINESTRING(0 0, 1 1)'" casting_operator: '::' data_type: data_type_identifier: geometry - binary_operator: ~= - cast_expression: quoted_literal: "'LINESTRING(0 1, 1 0)'" casting_operator: '::' data_type: data_type_identifier: geometry alias_expression: alias_operator: keyword: as naked_identifier: equality - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: st_distance function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: geom - comma: ',' - expression: cast_expression: quoted_literal: "'SRID=3005;POINT(1011102 450541)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: d - comma: ',' - select_clause_element: column_reference: naked_identifier: edabbr - comma: ',' - select_clause_element: column_reference: naked_identifier: vaabbr from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: va2005 orderby_clause: - keyword: ORDER - keyword: BY - expression: column_reference: naked_identifier: geom binary_operator: <-> cast_expression: quoted_literal: "'SRID=3005;POINT(1011102 450541)'" casting_operator: '::' data_type: data_type_identifier: geometry limit_clause: keyword: limit numeric_literal: '10' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: track_id - comma: ',' - select_clause_element: column_reference: naked_identifier: dist from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: track_id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: ST_DistanceCPA function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: tr - comma: ',' - expression: psql_variable: colon: ':' parameter: qt - end_bracket: ) alias_expression: naked_identifier: dist from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: trajectories orderby_clause: - keyword: ORDER - keyword: BY - expression: column_reference: naked_identifier: tr binary_operator: '|=|' psql_variable: colon: ':' parameter: qt limit_clause: keyword: LIMIT numeric_literal: '5' end_bracket: ) alias_expression: naked_identifier: foo - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: b - dot: . - naked_identifier: tlid - comma: ',' - select_clause_element: column_reference: - naked_identifier: b - dot: . - naked_identifier: mtfcc - comma: ',' - select_clause_element: expression: column_reference: - naked_identifier: b - dot: . - naked_identifier: geom binary_operator: <#> function: function_name: function_name_identifier: ST_GeomFromText function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'LINESTRING(746149 2948672,745954\ \ 2948576,\n\t\t745787 2948499,745740 2948468,745712\ \ 2948438,\n\t\t745690 2948384,745677 2948319)'" - comma: ',' - expression: numeric_literal: '2249' - end_bracket: ) alias_expression: alias_operator: keyword: As naked_identifier: b_dist - comma: ',' - select_clause_element: function: function_name: function_name_identifier: ST_Distance function_contents: bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: b - dot: . - naked_identifier: geom - comma: ',' - expression: function: function_name: function_name_identifier: ST_GeomFromText function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'LINESTRING(746149 2948672,745954\ \ 2948576,\n\t\t745787 2948499,745740 2948468,745712\ \ 2948438,\n\t\t745690 2948384,745677 2948319)'" - comma: ',' - expression: numeric_literal: '2249' - end_bracket: ) - end_bracket: ) alias_expression: alias_operator: keyword: As naked_identifier: act_dist from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bos_roads alias_expression: alias_operator: keyword: As naked_identifier: b orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: b_dist - comma: ',' - column_reference: - naked_identifier: b - dot: . - naked_identifier: tlid limit_clause: keyword: LIMIT numeric_literal: '100' end_bracket: ) alias_expression: alias_operator: keyword: As naked_identifier: foo orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: act_dist - comma: ',' - column_reference: naked_identifier: tlid limit_clause: keyword: LIMIT numeric_literal: '10' - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: index_query keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: ST_Distance function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: geom - comma: ',' - expression: cast_expression: quoted_literal: "'SRID=3005;POINT(1011102 450541)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: d - comma: ',' - select_clause_element: column_reference: naked_identifier: edabbr - comma: ',' - select_clause_element: column_reference: naked_identifier: vaabbr from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: va2005 orderby_clause: - keyword: ORDER - keyword: BY - expression: column_reference: naked_identifier: geom binary_operator: <<->> cast_expression: quoted_literal: "'SRID=3005;POINT(1011102 450541)'" casting_operator: '::' data_type: data_type_identifier: geometry limit_clause: keyword: LIMIT numeric_literal: '100' end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: index_query orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: d limit_clause: keyword: limit numeric_literal: '10' - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: index_query keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: ST_Distance function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: geom - comma: ',' - expression: cast_expression: quoted_literal: "'SRID=3005;POINT(1011102 450541)'" casting_operator: '::' data_type: data_type_identifier: geometry - end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: d - comma: ',' - select_clause_element: column_reference: naked_identifier: edabbr - comma: ',' - select_clause_element: column_reference: naked_identifier: vaabbr from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: va2005 orderby_clause: - keyword: ORDER - keyword: BY - expression: column_reference: naked_identifier: geom binary_operator: <<#>> cast_expression: quoted_literal: "'SRID=3005;POINT(1011102 450541)'" casting_operator: '::' data_type: data_type_identifier: geometry limit_clause: keyword: LIMIT numeric_literal: '100' end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: index_query orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: d limit_clause: keyword: limit numeric_literal: '10' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/prepare.sql000066400000000000000000000012531503426445100240140ustar00rootroot00000000000000PREPARE select_statement AS SELECT * FROM table1; PREPARE insert_statement AS INSERT INTO table1 (col1, col2) VALUES (1, 'foo'); PREPARE update_statement AS UPDATE table1 SET col2 = 'bar' WHERE col1 = 1; PREPARE delete_statement AS DELETE FROM table1 WHERE col1 = 1; PREPARE values_statement AS VALUES (1, 'foo'); PREPARE merge_statement AS MERGE INTO table1 USING table2 ON (table1.col1 = table2.col1) WHEN MATCHED THEN UPDATE SET col1 = table2.col1, col2 = table2.col2 WHEN NOT MATCHED THEN INSERT (col1, col2) VALUES (table2.col1, table2.col2); PREPARE parametrized_statement_1 (int) AS VALUES ($1); PREPARE parametrized_statement_2 (int, character(3)) AS VALUES ($1, $2); sqlfluff-3.4.2/test/fixtures/dialects/postgres/prepare.yml000066400000000000000000000167161503426445100240300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 55149acf09f79971ad136af4650e63a8702c044e016f5fcabc016918eaba013f file: - statement: prepare_statement: - keyword: PREPARE - object_reference: naked_identifier: select_statement - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: prepare_statement: - keyword: PREPARE - object_reference: naked_identifier: insert_statement - keyword: AS - insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: table1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'foo'" - end_bracket: ) - statement_terminator: ; - statement: prepare_statement: - keyword: PREPARE - object_reference: naked_identifier: update_statement - keyword: AS - update_statement: keyword: UPDATE table_reference: naked_identifier: table1 set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: col2 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'bar'" where_clause: keyword: WHERE expression: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: prepare_statement: - keyword: PREPARE - object_reference: naked_identifier: delete_statement - keyword: AS - delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: table1 - where_clause: keyword: WHERE expression: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: prepare_statement: - keyword: PREPARE - object_reference: naked_identifier: values_statement - keyword: AS - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'foo'" - end_bracket: ) - statement_terminator: ; - statement: prepare_statement: - keyword: PREPARE - object_reference: naked_identifier: merge_statement - keyword: AS - merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: table1 - keyword: USING - table_reference: naked_identifier: table2 - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: table1 - dot: . - naked_identifier: col1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: table2 - dot: . - naked_identifier: col1 end_bracket: ) - merge_match: merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: - keyword: SET - set_clause: - column_reference: naked_identifier: col1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: table2 - dot: . - naked_identifier: col1 - comma: ',' - set_clause: - column_reference: naked_identifier: col2 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: table2 - dot: . - naked_identifier: col2 merge_when_not_matched_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: keyword: INSERT bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: table2 - dot: . - naked_identifier: col1 - comma: ',' - expression: column_reference: - naked_identifier: table2 - dot: . - naked_identifier: col2 - end_bracket: ) - statement_terminator: ; - statement: prepare_statement: - keyword: PREPARE - object_reference: naked_identifier: parametrized_statement_1 - bracketed: start_bracket: ( data_type: keyword: int end_bracket: ) - keyword: AS - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: dollar_numeric_literal: $1 end_bracket: ) - statement_terminator: ; - statement: prepare_statement: - keyword: PREPARE - object_reference: naked_identifier: parametrized_statement_2 - bracketed: - start_bracket: ( - data_type: keyword: int - comma: ',' - data_type: keyword: character bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '3' end_bracket: ) - end_bracket: ) - keyword: AS - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: dollar_numeric_literal: $1 - comma: ',' - expression: dollar_numeric_literal: $2 - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/psql_meta_command.sql000066400000000000000000000003371503426445100260430ustar00rootroot00000000000000\echo "thing" \echo "thing" \x \\ \echo "thing" SELECT 1; SELECT 2; SELECT 1 + 3; SELECT 1; \echo "thing" \\ SELECT 1; \echo "thing" \echo "thing2" \prompt 'Region (1 - quebec, 2 - east, 3 - west): ' region_number sqlfluff-3.4.2/test/fixtures/dialects/postgres/psql_meta_command.yml000066400000000000000000000024521503426445100260450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 885466d038482d3e004642c24f43593a6aeedf01dca7978598694841001be7ec file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '2' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '3' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/psql_variable.sql000066400000000000000000000016271503426445100252070ustar00rootroot00000000000000\prompt 'From Member #: ' m1 \prompt 'To Member #: ' m2 \prompt 'Charge Account #: ' a SELECT 'from' AS direction, users.email, rona_mms_charge_accounts.account_number FROM memberships JOIN users ON users.id = memberships.user_id LEFT OUTER JOIN rona_mms_charge_accounts ON users.id = rona_mms_charge_accounts.customer_id WHERE memberships.code = (:m1)::text AND rona_mms_charge_accounts.account_number = lpad((:a)::text, 10, '0'); \prompt 'From Member #: ' m1 \prompt 'To Member #: ' m2 \prompt 'Charge Account #: ' a SELECT 'from' AS direction, users.email, rona_mms_charge_accounts.account_number FROM memberships JOIN users ON users.id = memberships.user_id LEFT OUTER JOIN rona_mms_charge_accounts ON users.id = rona_mms_charge_accounts.customer_id WHERE memberships.code = :'m1' AND rona_mms_charge_accounts.account_number = lpad(:'a', 10, '0'); sqlfluff-3.4.2/test/fixtures/dialects/postgres/psql_variable.yml000066400000000000000000000170261503426445100252110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c720fe8f98cdb0a19156a0c598ca590f1acc80bc7e6c9d6fa120243035cf06a1 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: quoted_literal: "'from'" alias_expression: alias_operator: keyword: AS naked_identifier: direction - comma: ',' - select_clause_element: column_reference: - naked_identifier: users - dot: . - naked_identifier: email - comma: ',' - select_clause_element: column_reference: - naked_identifier: rona_mms_charge_accounts - dot: . - naked_identifier: account_number from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: memberships - join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: users join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: users - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: memberships - dot: . - naked_identifier: user_id - join_clause: - keyword: LEFT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: rona_mms_charge_accounts - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: users - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: rona_mms_charge_accounts - dot: . - naked_identifier: customer_id where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: memberships - dot: . - naked_identifier: code - comparison_operator: raw_comparison_operator: '=' - cast_expression: bracketed: start_bracket: ( expression: psql_variable: colon: ':' parameter: m1 end_bracket: ) casting_operator: '::' data_type: keyword: text - binary_operator: AND - column_reference: - naked_identifier: rona_mms_charge_accounts - dot: . - naked_identifier: account_number - comparison_operator: raw_comparison_operator: '=' - function: function_name: function_name_identifier: lpad function_contents: bracketed: - start_bracket: ( - expression: cast_expression: bracketed: start_bracket: ( expression: psql_variable: colon: ':' parameter: a end_bracket: ) casting_operator: '::' data_type: keyword: text - comma: ',' - expression: numeric_literal: '10' - comma: ',' - expression: quoted_literal: "'0'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: quoted_literal: "'from'" alias_expression: alias_operator: keyword: AS naked_identifier: direction - comma: ',' - select_clause_element: column_reference: - naked_identifier: users - dot: . - naked_identifier: email - comma: ',' - select_clause_element: column_reference: - naked_identifier: rona_mms_charge_accounts - dot: . - naked_identifier: account_number from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: memberships - join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: users join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: users - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: memberships - dot: . - naked_identifier: user_id - join_clause: - keyword: LEFT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: rona_mms_charge_accounts - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: users - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: rona_mms_charge_accounts - dot: . - naked_identifier: customer_id where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: memberships - dot: . - naked_identifier: code - comparison_operator: raw_comparison_operator: '=' - psql_variable: colon: ':' quoted_literal: "'m1'" - binary_operator: AND - column_reference: - naked_identifier: rona_mms_charge_accounts - dot: . - naked_identifier: account_number - comparison_operator: raw_comparison_operator: '=' - function: function_name: function_name_identifier: lpad function_contents: bracketed: - start_bracket: ( - expression: psql_variable: colon: ':' quoted_literal: "'a'" - comma: ',' - expression: numeric_literal: '10' - comma: ',' - expression: quoted_literal: "'0'" - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/range_operators.sql000066400000000000000000000013331503426445100255470ustar00rootroot00000000000000SELECT word.*, paragraph.id AS paragraph_id FROM word INNER JOIN paragraph ON paragraph.page_id = word.page_id WHERE word.character_range @> paragraph.character_range AND word.character_range <@ paragraph.character_range AND word.character_range && paragraph.character_range AND word.character_range << paragraph.character_range AND word.character_range >> paragraph.character_range AND word.character_range &> paragraph.character_range AND word.character_range &< paragraph.character_range AND word.character_range -|- paragraph.character_range AND word.character_range + paragraph.character_range AND word.character_range * paragraph.character_range AND word.character_range - paragraph.character_range sqlfluff-3.4.2/test/fixtures/dialects/postgres/range_operators.yml000066400000000000000000000133241503426445100255540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 43d2e161162d7698b1567f285ff0ab3bd5e5931c178f9de52588726a8a35b007 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: word dot: . star: '*' - comma: ',' - select_clause_element: column_reference: - naked_identifier: paragraph - dot: . - naked_identifier: id alias_expression: alias_operator: keyword: AS naked_identifier: paragraph_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: word join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: paragraph - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: paragraph - dot: . - naked_identifier: page_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: word - dot: . - naked_identifier: page_id where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: word - dot: . - naked_identifier: character_range - binary_operator: '@>' - column_reference: - naked_identifier: paragraph - dot: . - naked_identifier: character_range - binary_operator: AND - column_reference: - naked_identifier: word - dot: . - naked_identifier: character_range - binary_operator: <@ - column_reference: - naked_identifier: paragraph - dot: . - naked_identifier: character_range - binary_operator: AND - column_reference: - naked_identifier: word - dot: . - naked_identifier: character_range - comparison_operator: - ampersand: '&' - ampersand: '&' - column_reference: - naked_identifier: paragraph - dot: . - naked_identifier: character_range - binary_operator: AND - column_reference: - naked_identifier: word - dot: . - naked_identifier: character_range - binary_operator: - raw_comparison_operator: < - raw_comparison_operator: < - column_reference: - naked_identifier: paragraph - dot: . - naked_identifier: character_range - binary_operator: AND - column_reference: - naked_identifier: word - dot: . - naked_identifier: character_range - binary_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '>' - column_reference: - naked_identifier: paragraph - dot: . - naked_identifier: character_range - binary_operator: AND - column_reference: - naked_identifier: word - dot: . - naked_identifier: character_range - comparison_operator: ampersand: '&' raw_comparison_operator: '>' - column_reference: - naked_identifier: paragraph - dot: . - naked_identifier: character_range - binary_operator: AND - column_reference: - naked_identifier: word - dot: . - naked_identifier: character_range - comparison_operator: ampersand: '&' raw_comparison_operator: < - column_reference: - naked_identifier: paragraph - dot: . - naked_identifier: character_range - binary_operator: AND - column_reference: - naked_identifier: word - dot: . - naked_identifier: character_range - comparison_operator: - binary_operator: '-' - pipe: '|' - binary_operator: '-' - column_reference: - naked_identifier: paragraph - dot: . - naked_identifier: character_range - binary_operator: AND - column_reference: - naked_identifier: word - dot: . - naked_identifier: character_range - binary_operator: + - column_reference: - naked_identifier: paragraph - dot: . - naked_identifier: character_range - binary_operator: AND - column_reference: - naked_identifier: word - dot: . - naked_identifier: character_range - binary_operator: '*' - column_reference: - naked_identifier: paragraph - dot: . - naked_identifier: character_range - binary_operator: AND - column_reference: - naked_identifier: word - dot: . - naked_identifier: character_range - binary_operator: '-' - column_reference: - naked_identifier: paragraph - dot: . - naked_identifier: character_range sqlfluff-3.4.2/test/fixtures/dialects/postgres/reassign_owned.sql000066400000000000000000000004231503426445100253630ustar00rootroot00000000000000REASSIGN OWNED BY bob TO alice; REASSIGN OWNED BY bob, ted TO alice; REASSIGN OWNED BY bob, CURRENT_ROLE, ted, CURRENT_USER, sam, SESSION_USER TO alice; REASSIGN OWNED BY bob TO CURRENT_ROLE; REASSIGN OWNED BY bob TO CURRENT_USER; REASSIGN OWNED BY bob TO SESSION_USER; sqlfluff-3.4.2/test/fixtures/dialects/postgres/reassign_owned.yml000066400000000000000000000042221503426445100253660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7924caa50d4473f8535da52f0a3b8091a85c2cebb11fe03765ba868e31d034c6 file: - statement: reassign_owned_statement: - keyword: REASSIGN - keyword: OWNED - keyword: BY - role_reference: naked_identifier: bob - keyword: TO - role_reference: naked_identifier: alice - statement_terminator: ; - statement: reassign_owned_statement: - keyword: REASSIGN - keyword: OWNED - keyword: BY - role_reference: naked_identifier: bob - comma: ',' - role_reference: naked_identifier: ted - keyword: TO - role_reference: naked_identifier: alice - statement_terminator: ; - statement: reassign_owned_statement: - keyword: REASSIGN - keyword: OWNED - keyword: BY - role_reference: naked_identifier: bob - comma: ',' - keyword: CURRENT_ROLE - comma: ',' - role_reference: naked_identifier: ted - comma: ',' - keyword: CURRENT_USER - comma: ',' - role_reference: naked_identifier: sam - comma: ',' - keyword: SESSION_USER - keyword: TO - role_reference: naked_identifier: alice - statement_terminator: ; - statement: reassign_owned_statement: - keyword: REASSIGN - keyword: OWNED - keyword: BY - role_reference: naked_identifier: bob - keyword: TO - keyword: CURRENT_ROLE - statement_terminator: ; - statement: reassign_owned_statement: - keyword: REASSIGN - keyword: OWNED - keyword: BY - role_reference: naked_identifier: bob - keyword: TO - keyword: CURRENT_USER - statement_terminator: ; - statement: reassign_owned_statement: - keyword: REASSIGN - keyword: OWNED - keyword: BY - role_reference: naked_identifier: bob - keyword: TO - keyword: SESSION_USER - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/refresh_materialized_view.sql000066400000000000000000000002401503426445100275730ustar00rootroot00000000000000REFRESH MATERIALIZED VIEW bar; REFRESH MATERIALIZED VIEW CONCURRENTLY bar; REFRESH MATERIALIZED VIEW bar WITH DATA; REFRESH MATERIALIZED VIEW bar WITH NO DATA; sqlfluff-3.4.2/test/fixtures/dialects/postgres/refresh_materialized_view.yml000066400000000000000000000025641503426445100276100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 011617b04ac29d2c81f9dde19306c07200d628114014dbb490659d79ed70013c file: - statement: refresh_materialized_view_statement: - keyword: REFRESH - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - statement_terminator: ; - statement: refresh_materialized_view_statement: - keyword: REFRESH - keyword: MATERIALIZED - keyword: VIEW - keyword: CONCURRENTLY - table_reference: naked_identifier: bar - statement_terminator: ; - statement: refresh_materialized_view_statement: - keyword: REFRESH - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - with_data_clause: - keyword: WITH - keyword: DATA - statement_terminator: ; - statement: refresh_materialized_view_statement: - keyword: REFRESH - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - with_data_clause: - keyword: WITH - keyword: 'NO' - keyword: DATA - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/reindex.sql000066400000000000000000000003521503426445100240130ustar00rootroot00000000000000REINDEX INDEX my_index; REINDEX TABLE my_table; REINDEX DATABASE broken_db; REINDEX TABLE CONCURRENTLY my_broken_table; REINDEX (CONCURRENTLY FALSE) SYSTEM mydb; REINDEX (TABLESPACE my_tablespace, VERBOSE TRUE) SCHEMA my_schema; sqlfluff-3.4.2/test/fixtures/dialects/postgres/reindex.yml000066400000000000000000000034601503426445100240200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 37b97d6e332473f78947f58784c940e24a02c75218973d039b289b1c103668e5 file: - statement: reindex_statement_segment: - keyword: REINDEX - keyword: INDEX - index_reference: naked_identifier: my_index - statement_terminator: ; - statement: reindex_statement_segment: - keyword: REINDEX - keyword: TABLE - table_reference: naked_identifier: my_table - statement_terminator: ; - statement: reindex_statement_segment: - keyword: REINDEX - keyword: DATABASE - database_reference: naked_identifier: broken_db - statement_terminator: ; - statement: reindex_statement_segment: - keyword: REINDEX - keyword: TABLE - keyword: CONCURRENTLY - table_reference: naked_identifier: my_broken_table - statement_terminator: ; - statement: reindex_statement_segment: - keyword: REINDEX - bracketed: start_bracket: ( keyword: CONCURRENTLY boolean_literal: 'FALSE' end_bracket: ) - keyword: SYSTEM - database_reference: naked_identifier: mydb - statement_terminator: ; - statement: reindex_statement_segment: - keyword: REINDEX - bracketed: - start_bracket: ( - keyword: TABLESPACE - tablespace_reference: naked_identifier: my_tablespace - comma: ',' - keyword: VERBOSE - boolean_literal: 'TRUE' - end_bracket: ) - keyword: SCHEMA - schema_reference: naked_identifier: my_schema - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/reset.sql000066400000000000000000000000471503426445100235000ustar00rootroot00000000000000RESET timezone; RESET ALL; RESET ROLE; sqlfluff-3.4.2/test/fixtures/dialects/postgres/reset.yml000066400000000000000000000012761503426445100235070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f0addcc750a516055a2f60b81c5fb1476adfddf3f5c7ddbcf08ed68ea56d9872 file: - statement: reset_statement: keyword: RESET parameter: timezone - statement_terminator: ; - statement: reset_statement: - keyword: RESET - keyword: ALL - statement_terminator: ; - statement: reset_statement: - keyword: RESET - keyword: ROLE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/reset_session_authorization.sql000066400000000000000000000000351503426445100302200ustar00rootroot00000000000000RESET SESSION AUTHORIZATION; sqlfluff-3.4.2/test/fixtures/dialects/postgres/reset_session_authorization.yml000066400000000000000000000010451503426445100302240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 70cf88dfcd15d10c92876b5c1aa9b10c2e5fc14e4996af5b4f1edf0c74dc7358 file: statement: reset_session_authorization_statement: - keyword: RESET - keyword: SESSION - keyword: AUTHORIZATION statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/revoke.sql000066400000000000000000000000411503426445100236430ustar00rootroot00000000000000REVOKE lc_anonymous FROM lc_api; sqlfluff-3.4.2/test/fixtures/dialects/postgres/revoke.yml000066400000000000000000000011511503426445100236500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4e1d6e503ec62e4d3225c85fac6b6450391572150b70aa3cad19f8f64ab2a795 file: statement: access_statement: - keyword: REVOKE - object_reference: naked_identifier: lc_anonymous - keyword: FROM - object_reference: naked_identifier: lc_api statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/security_label.sql000066400000000000000000000007471503426445100253730ustar00rootroot00000000000000SECURITY LABEL FOR selinux ON TABLE mytable IS 'system_u:object_r:sepgsql_table_t:s0'; SECURITY LABEL FOR selinux ON TABLE mytable IS NULL; SECURITY LABEL ON FUNCTION show_credit(int) IS 'system_u:object_r:sepgsql_trusted_proc_exec_t:s0'; SECURITY LABEL ON COLUMN customer.credit IS 'system_u:object_r:sepgsql_secret_table_t:s0'; SECURITY LABEL FOR anon ON ROLE skynet IS 'MASKED'; SECURITY LABEL FOR anon ON COLUMN customer.first_name IS 'MASKED WITH FUNCTION anon.dummy_first_name()'; sqlfluff-3.4.2/test/fixtures/dialects/postgres/security_label.yml000066400000000000000000000051111503426445100253630ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f3133cc68a9911fec64385aed744780a68309de60782f7108f4e25cf18940133 file: - statement: security_label_statement: - keyword: SECURITY - keyword: LABEL - keyword: FOR - object_reference: naked_identifier: selinux - keyword: 'ON' - keyword: TABLE - table_reference: naked_identifier: mytable - keyword: IS - quoted_literal: "'system_u:object_r:sepgsql_table_t:s0'" - statement_terminator: ; - statement: security_label_statement: - keyword: SECURITY - keyword: LABEL - keyword: FOR - object_reference: naked_identifier: selinux - keyword: 'ON' - keyword: TABLE - table_reference: naked_identifier: mytable - keyword: IS - keyword: 'NULL' - statement_terminator: ; - statement: security_label_statement: - keyword: SECURITY - keyword: LABEL - keyword: 'ON' - keyword: FUNCTION - function_name: function_name_identifier: show_credit - bracketed: start_bracket: ( data_type: keyword: int end_bracket: ) - keyword: IS - quoted_literal: "'system_u:object_r:sepgsql_trusted_proc_exec_t:s0'" - statement_terminator: ; - statement: security_label_statement: - keyword: SECURITY - keyword: LABEL - keyword: 'ON' - keyword: COLUMN - column_reference: - naked_identifier: customer - dot: . - naked_identifier: credit - keyword: IS - quoted_literal: "'system_u:object_r:sepgsql_secret_table_t:s0'" - statement_terminator: ; - statement: security_label_statement: - keyword: SECURITY - keyword: LABEL - keyword: FOR - object_reference: naked_identifier: anon - keyword: 'ON' - keyword: ROLE - role_reference: naked_identifier: skynet - keyword: IS - quoted_literal: "'MASKED'" - statement_terminator: ; - statement: security_label_statement: - keyword: SECURITY - keyword: LABEL - keyword: FOR - object_reference: naked_identifier: anon - keyword: 'ON' - keyword: COLUMN - column_reference: - naked_identifier: customer - dot: . - naked_identifier: first_name - keyword: IS - quoted_literal: "'MASKED WITH FUNCTION anon.dummy_first_name()'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/select.sql000066400000000000000000000056121503426445100236400ustar00rootroot00000000000000SELECT timestamp with time zone '2005-04-02 12:00:00-07' + interval '1 day'; -- DATEADD is not a function in postgres so this should parse day as column name SELECT DATEADD(day, -2, current_date); SELECT timestamptz '2013-07-01 12:00:00' - timestamptz '2013-03-01 12:00:00'; SELECT 1.0::int; SELECT '2015-10-24 16:38:46'::TIMESTAMP; SELECT '2015-10-24 16:38:46'::TIMESTAMP AT TIME ZONE 'UTC'; SELECT '2015-10-24 16:38:46'::TIMESTAMP WITH TIME ZONE; SELECT '2015-10-24 16:38:46'::TIMESTAMP WITH TIME ZONE AT TIME ZONE 'UTC'; SELECT '2015-10-24 16:38:46'::TIMESTAMP WITHOUT TIME ZONE; SELECT '2015-10-24 16:38:46'::TIMESTAMPTZ; SELECT '2015-10-24 16:38:46'::TIMESTAMPTZ AT TIME ZONE 'UTC'; -- Some more example from https://database.guide/how-at-time-zone-works-in-postgresql/ SELECT timestamp with time zone '2025-11-20 00:00:00+00' AT TIME ZONE 'Africa/Cairo'; SELECT timestamp with time zone '2025-11-20 00:00:00'; SELECT timestamp without time zone '2025-11-20 00:00:00' AT TIME ZONE 'Africa/Cairo'; SELECT timestamp without time zone '2025-11-20 00:00:00+12' AT TIME ZONE 'Africa/Cairo'; SELECT timestamp without time zone '2025-11-20 00:00:00+12'; SELECT time with time zone '00:00:00+00' AT TIME ZONE 'Africa/Cairo'; SELECT time without time zone '00:00:00' AT TIME ZONE 'Africa/Cairo'; SELECT c_timestamp AT TIME ZONE 'Africa/Cairo' FROM t_table; SELECT (c_timestamp AT TIME ZONE 'Africa/Cairo')::time FROM t_table; SELECT a::double precision FROM my_table; SELECT schema1.table1.columna, t.col2 FROM schema1.table1 CROSS JOIN LATERAL somefunc(tb.columnb) as t(col1 text, col2 bool); SELECT a COLLATE "de_DE" < b FROM test1; SELECT a < ('foo' COLLATE "fr_FR") FROM test1; SELECT a < b COLLATE "de_DE" FROM test1; SELECT a COLLATE "de_DE" < b FROM test1; SELECT * FROM test1 ORDER BY a || b COLLATE "fr_FR"; -- Select elements are optional in Postgres SELECT FROM test1; -- keywords can be used as column names without quotes if qualified select id, start, periods.end from periods; SELECT concat_lower_or_upper('Hello', 'World', true); SELECT concat_lower_or_upper(a => 'Hello', b => 'World'); SELECT concat_lower_or_upper('Hello', 'World', uppercase => true); -- row-level locks can be used in Selects SELECT * FROM mytable FOR UPDATE; SELECT * FROM (SELECT * FROM mytable FOR UPDATE) ss WHERE col1 = 5; SELECT col1, col2 FROM mytable1 JOIN mytable2 ON col1 = col2 ORDER BY sync_time ASC LIMIT 1 FOR SHARE OF mytable1, mytable2 SKIP LOCKED ; Select * from foo TABLESAMPLE SYSTEM (10); Select * from foo TABLESAMPLE BERNOULLI (10); -- use of dollar quote in query SELECT * FROM (SELECT * FROM mytable FOR UPDATE) ss WHERE col1 = $1; SELECT i + $1 INTO j from foo; SELECT 1 /* hi hi /* foo */ ho ho */ AS bar; -- escape double quotes SELECT """t".col1 FROM tbl1 AS """t"; SELECT film_id, title FROM film ORDER BY title FETCH FIRST 10 ROW ONLY; SELECT foo FROM bar LIMIT 1 FOR UPDATE; sqlfluff-3.4.2/test/fixtures/dialects/postgres/select.yml000066400000000000000000000713231503426445100236440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a3033206a24c97d12ac7670f4e1ae4970096485fbcc630ee1be74cbca3d520a6 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - datetime_literal: datetime_type_identifier: - keyword: timestamp - keyword: with - keyword: time - keyword: zone quoted_literal: "'2005-04-02 12:00:00-07'" - binary_operator: + - datetime_literal: datetime_type_identifier: keyword: interval quoted_literal: "'1 day'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: DATEADD function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: day - comma: ',' - expression: numeric_literal: sign_indicator: '-' numeric_literal: '2' - comma: ',' - expression: bare_function: current_date - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - datetime_literal: datetime_type_identifier: keyword: timestamptz quoted_literal: "'2013-07-01 12:00:00'" - binary_operator: '-' - datetime_literal: datetime_type_identifier: keyword: timestamptz quoted_literal: "'2013-03-01 12:00:00'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: numeric_literal: '1.0' casting_operator: '::' data_type: keyword: int - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'2015-10-24 16:38:46'" casting_operator: '::' data_type: datetime_type_identifier: keyword: TIMESTAMP - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'2015-10-24 16:38:46'" casting_operator: '::' data_type: datetime_type_identifier: keyword: TIMESTAMP time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: "'UTC'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'2015-10-24 16:38:46'" casting_operator: '::' data_type: datetime_type_identifier: - keyword: TIMESTAMP - keyword: WITH - keyword: TIME - keyword: ZONE - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'2015-10-24 16:38:46'" casting_operator: '::' data_type: datetime_type_identifier: - keyword: TIMESTAMP - keyword: WITH - keyword: TIME - keyword: ZONE time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: "'UTC'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'2015-10-24 16:38:46'" casting_operator: '::' data_type: datetime_type_identifier: - keyword: TIMESTAMP - keyword: WITHOUT - keyword: TIME - keyword: ZONE - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'2015-10-24 16:38:46'" casting_operator: '::' data_type: datetime_type_identifier: keyword: TIMESTAMPTZ - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'2015-10-24 16:38:46'" casting_operator: '::' data_type: datetime_type_identifier: keyword: TIMESTAMPTZ time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: "'UTC'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: datetime_literal: datetime_type_identifier: - keyword: timestamp - keyword: with - keyword: time - keyword: zone quoted_literal: "'2025-11-20 00:00:00+00'" time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: "'Africa/Cairo'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: datetime_literal: datetime_type_identifier: - keyword: timestamp - keyword: with - keyword: time - keyword: zone quoted_literal: "'2025-11-20 00:00:00'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: datetime_literal: datetime_type_identifier: - keyword: timestamp - keyword: without - keyword: time - keyword: zone quoted_literal: "'2025-11-20 00:00:00'" time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: "'Africa/Cairo'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: datetime_literal: datetime_type_identifier: - keyword: timestamp - keyword: without - keyword: time - keyword: zone quoted_literal: "'2025-11-20 00:00:00+12'" time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: "'Africa/Cairo'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: datetime_literal: datetime_type_identifier: - keyword: timestamp - keyword: without - keyword: time - keyword: zone quoted_literal: "'2025-11-20 00:00:00+12'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: datetime_literal: datetime_type_identifier: - keyword: time - keyword: with - keyword: time - keyword: zone quoted_literal: "'00:00:00+00'" time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: "'Africa/Cairo'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: datetime_literal: datetime_type_identifier: - keyword: time - keyword: without - keyword: time - keyword: zone quoted_literal: "'00:00:00'" time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: "'Africa/Cairo'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: column_reference: naked_identifier: c_timestamp time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: "'Africa/Cairo'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: c_timestamp time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: "'Africa/Cairo'" end_bracket: ) casting_operator: '::' data_type: datetime_type_identifier: keyword: time from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: a casting_operator: '::' data_type: - keyword: double - keyword: precision from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: schema1 - dot: . - naked_identifier: table1 - dot: . - naked_identifier: columna - comma: ',' - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: schema1 - dot: . - naked_identifier: table1 join_clause: - keyword: CROSS - keyword: JOIN - from_expression_element: keyword: LATERAL table_expression: function: function_name: function_name_identifier: somefunc function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: tb - dot: . - naked_identifier: columnb end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: t bracketed: - start_bracket: ( - parameter: col1 - data_type: keyword: text - comma: ',' - parameter: col2 - data_type: keyword: bool - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - column_reference: naked_identifier: a - keyword: COLLATE - column_reference: quoted_identifier: '"de_DE"' - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: < bracketed: start_bracket: ( expression: quoted_literal: "'foo'" keyword: COLLATE column_reference: quoted_identifier: '"fr_FR"' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - keyword: COLLATE - column_reference: quoted_identifier: '"de_DE"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - column_reference: naked_identifier: a - keyword: COLLATE - column_reference: quoted_identifier: '"de_DE"' - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 orderby_clause: - keyword: ORDER - keyword: BY - expression: - column_reference: naked_identifier: a - binary_operator: - pipe: '|' - pipe: '|' - column_reference: naked_identifier: b - keyword: COLLATE - column_reference: quoted_identifier: '"fr_FR"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: start - comma: ',' - select_clause_element: column_reference: naked_identifier: periods dot: . naked_identifier_all: end from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: periods - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: concat_lower_or_upper function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'Hello'" - comma: ',' - expression: quoted_literal: "'World'" - comma: ',' - expression: boolean_literal: 'true' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: concat_lower_or_upper function_contents: bracketed: - start_bracket: ( - named_argument: naked_identifier: a right_arrow: => expression: quoted_literal: "'Hello'" - comma: ',' - named_argument: naked_identifier: b right_arrow: => expression: quoted_literal: "'World'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: concat_lower_or_upper function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'Hello'" - comma: ',' - expression: quoted_literal: "'World'" - comma: ',' - named_argument: naked_identifier: uppercase right_arrow: => expression: boolean_literal: 'true' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable for_clause: - keyword: FOR - keyword: UPDATE - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable for_clause: - keyword: FOR - keyword: UPDATE end_bracket: ) alias_expression: naked_identifier: ss where_clause: keyword: WHERE expression: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable1 join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: mytable2 join_on_condition: keyword: 'ON' expression: - column_reference: naked_identifier: col1 - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: col2 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: sync_time - keyword: ASC limit_clause: keyword: LIMIT numeric_literal: '1' for_clause: - keyword: FOR - keyword: SHARE - keyword: OF - table_reference: naked_identifier: mytable1 - comma: ',' - table_reference: naked_identifier: mytable2 - keyword: SKIP - keyword: LOCKED - statement_terminator: ; - statement: select_statement: select_clause: keyword: Select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo sample_expression: - keyword: TABLESAMPLE - keyword: SYSTEM - bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: Select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo sample_expression: - keyword: TABLESAMPLE - keyword: BERNOULLI - bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable for_clause: - keyword: FOR - keyword: UPDATE end_bracket: ) alias_expression: naked_identifier: ss where_clause: keyword: WHERE expression: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' dollar_numeric_literal: $1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: column_reference: naked_identifier: i binary_operator: + dollar_numeric_literal: $1 into_clause: keyword: INTO table_reference: naked_identifier: j from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: AS naked_identifier: bar - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: quoted_identifier: '"""t"' dot: . naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 alias_expression: alias_operator: keyword: AS quoted_identifier: '"""t"' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: film_id - comma: ',' - select_clause_element: column_reference: naked_identifier: title from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: film orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: title fetch_clause: - keyword: FETCH - keyword: FIRST - numeric_literal: '10' - keyword: ROW - keyword: ONLY - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: foo from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar limit_clause: keyword: LIMIT numeric_literal: '1' for_clause: - keyword: FOR - keyword: UPDATE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/select_case_cast.sql000066400000000000000000000005541503426445100256450ustar00rootroot00000000000000select col0, case when col1 then col2 else col3 end::text as mycol from table1; select col0, case when col1 then col2 else col3 end::int::float as mycol from table1; select col0, cast(case when col1 then col2 else col3 end as text) as mycol from table1; sqlfluff-3.4.2/test/fixtures/dialects/postgres/select_case_cast.yml000066400000000000000000000110251503426445100256420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5a930eac26dd0fbcf04a016f843bdc75228fa4ef211f09dd661469477f957005 file: - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: col0 - comma: ',' - select_clause_element: expression: cast_expression: case_expression: - keyword: case - when_clause: - keyword: when - expression: column_reference: naked_identifier: col1 - keyword: then - expression: column_reference: naked_identifier: col2 - else_clause: keyword: else expression: column_reference: naked_identifier: col3 - keyword: end casting_operator: '::' data_type: keyword: text alias_expression: alias_operator: keyword: as naked_identifier: mycol from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: col0 - comma: ',' - select_clause_element: expression: cast_expression: - case_expression: - keyword: case - when_clause: - keyword: when - expression: column_reference: naked_identifier: col1 - keyword: then - expression: column_reference: naked_identifier: col2 - else_clause: keyword: else expression: column_reference: naked_identifier: col3 - keyword: end - casting_operator: '::' - data_type: keyword: int - casting_operator: '::' - data_type: keyword: float alias_expression: alias_operator: keyword: as naked_identifier: mycol from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: col0 - comma: ',' - select_clause_element: function: function_name: function_name_identifier: cast function_contents: bracketed: start_bracket: ( expression: case_expression: - keyword: case - when_clause: - keyword: when - expression: column_reference: naked_identifier: col1 - keyword: then - expression: column_reference: naked_identifier: col2 - else_clause: keyword: else expression: column_reference: naked_identifier: col3 - keyword: end keyword: as data_type: keyword: text end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: mycol from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/select_frame_clause.sql000066400000000000000000000011301503426445100263350ustar00rootroot00000000000000select venuestate, venueseats, venuename, first_value(venuename ignore nulls) over(partition by venuestate order by venueseats desc rows between unbounded preceding and unbounded following) as col_name from table_name; SELECT rank () OVER (ORDER BY my_column RANGE BETWEEN 12 FOLLOWING AND CURRENT ROW EXCLUDE NO OTHERS); SELECT rank () OVER (ORDER BY my_column GROUPS UNBOUNDED PRECEDING EXCLUDE GROUP); SELECT rank () OVER (ORDER BY my_column RANGE BETWEEN INTERVAL '1 YEAR - 1 DAYS' PRECEDING AND INTERVAL '15 DAYS' PRECEDING); sqlfluff-3.4.2/test/fixtures/dialects/postgres/select_frame_clause.yml000066400000000000000000000131051503426445100263440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fc11622f2ead1ad581ae0729d6d0e9e42f0a750ecab773c992504e0418010f7e file: - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: venuestate - comma: ',' - select_clause_element: column_reference: naked_identifier: venueseats - comma: ',' - select_clause_element: column_reference: naked_identifier: venuename - comma: ',' - select_clause_element: function: function_name: function_name_identifier: first_value function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: venuename - keyword: ignore - keyword: nulls - end_bracket: ) over_clause: keyword: over bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: partition - keyword: by - expression: column_reference: naked_identifier: venuestate orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: venueseats - keyword: desc frame_clause: - keyword: rows - keyword: between - keyword: unbounded - keyword: preceding - keyword: and - keyword: unbounded - keyword: following end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: col_name from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: rank function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: my_column frame_clause: - keyword: RANGE - keyword: BETWEEN - numeric_literal: '12' - keyword: FOLLOWING - keyword: AND - keyword: CURRENT - keyword: ROW - keyword: EXCLUDE - keyword: 'NO' - keyword: OTHERS end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: rank function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: my_column frame_clause: - keyword: GROUPS - keyword: UNBOUNDED - keyword: PRECEDING - keyword: EXCLUDE - keyword: GROUP end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: rank function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: my_column frame_clause: - keyword: RANGE - keyword: BETWEEN - keyword: INTERVAL - quoted_literal: "'1 YEAR - 1 DAYS'" - keyword: PRECEDING - keyword: AND - keyword: INTERVAL - quoted_literal: "'15 DAYS'" - keyword: PRECEDING end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/select_into.sql000066400000000000000000000002051503426445100246620ustar00rootroot00000000000000SELECT foo, bar INTO baz FROM qux; SELECT * INTO TEMP TABLE baz; SELECT * INTO TEMPORARY baz; SELECT * INTO UNLOGGED baz; sqlfluff-3.4.2/test/fixtures/dialects/postgres/select_into.yml000066400000000000000000000040261503426445100246710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 39721c9c2ae192671c8136c9efac4feeb59675570b953e2893832ed3be408259 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: foo - comma: ',' - select_clause_element: column_reference: naked_identifier: bar into_clause: keyword: INTO table_reference: naked_identifier: baz from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: qux - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' into_clause: - keyword: INTO - keyword: TEMP - keyword: TABLE - table_reference: naked_identifier: baz - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' into_clause: - keyword: INTO - keyword: TEMPORARY - table_reference: naked_identifier: baz - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' into_clause: - keyword: INTO - keyword: UNLOGGED - table_reference: naked_identifier: baz - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/select_natural_join.sql000066400000000000000000000006171503426445100264050ustar00rootroot00000000000000SELECT * FROM table1 NATURAL JOIN table2; SELECT * FROM table1 NATURAL INNER JOIN table2; SELECT * FROM table1 NATURAL LEFT JOIN table2; SELECT * FROM table1 NATURAL LEFT OUTER JOIN table2; SELECT * FROM table1 NATURAL RIGHT JOIN table2; SELECT * FROM table1 NATURAL RIGHT OUTER JOIN table2; SELECT * FROM table1 NATURAL FULL JOIN table2; SELECT * FROM table1 NATURAL FULL OUTER JOIN table2; sqlfluff-3.4.2/test/fixtures/dialects/postgres/select_natural_join.yml000066400000000000000000000133541503426445100264110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 83e3312a773ee3e3dee22bd7341b922507c1aaee51ef41c02c3ea414ef6b8c82 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: NATURAL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: NATURAL - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: NATURAL - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: NATURAL - keyword: LEFT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: NATURAL - keyword: RIGHT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: NATURAL - keyword: RIGHT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: NATURAL - keyword: FULL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: NATURAL - keyword: FULL - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/select_offset.sql000066400000000000000000000002171503426445100252020ustar00rootroot00000000000000SELECT * FROM test OFFSET 10; SELECT * FROM test LIMIT 20 OFFSET 10; SELECT 1 FROM course_permissions AS cp WHERE cp.course_id = 4 OFFSET 1; sqlfluff-3.4.2/test/fixtures/dialects/postgres/select_offset.yml000066400000000000000000000044121503426445100252050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 00ac34f67f6be5a6e5e60821205465c2816e3c6b7a07fcdd2aae51d33268f4a3 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test offset_clause: keyword: OFFSET numeric_literal: '10' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test limit_clause: - keyword: LIMIT - numeric_literal: '20' - keyword: OFFSET - numeric_literal: '10' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: course_permissions alias_expression: alias_operator: keyword: AS naked_identifier: cp where_clause: keyword: WHERE expression: column_reference: - naked_identifier: cp - dot: . - naked_identifier: course_id comparison_operator: raw_comparison_operator: '=' numeric_literal: '4' offset_clause: keyword: OFFSET numeric_literal: '1' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/select_ordered_nested_sets.sql000066400000000000000000000002551503426445100277420ustar00rootroot00000000000000( SELECT * FROM tbl1 EXCEPT SELECT * FROM tbl2 ) UNION ALL ( SELECT * FROM tbl2 EXCEPT SELECT * FROM tbl1 ORDER BY column_1 ) ORDER BY column_2; sqlfluff-3.4.2/test/fixtures/dialects/postgres/select_ordered_nested_sets.yml000066400000000000000000000056171503426445100277530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2110b2cab8e4479d4f37f01644f6f085c055cf3c5eabdf9a29a0f040a409148e file: statement: set_expression: - bracketed: start_bracket: ( set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - set_operator: keyword: EXCEPT - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl2 end_bracket: ) - set_operator: - keyword: UNION - keyword: ALL - bracketed: start_bracket: ( set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl2 - set_operator: keyword: EXCEPT - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: column_1 end_bracket: ) - orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: column_2 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/set.sql000066400000000000000000000007271503426445100231560ustar00rootroot00000000000000SET LOCAL search_path = DEFAULT; SET search_path TO my_schema, public; SET datestyle TO postgres, dmy; SET SESSION datestyle TO postgres, 'dmy'; SET value = on, off, auto; SET value = TRUE, FALSE; SET TIME ZONE 'PST8PDT'; SET TIME ZONE 'Europe/Rome'; SET TIME ZONE LOCAL; SET TIME ZONE DEFAULT; SET SCHEMA 'my_schema'; SET SCHEMA 'public'; SET ROLE my_role; SET ROLE "my role"; SET ROLE NONE; SET LOCAL search_path = schema_name; SET LOCAL search_path = "schema_name"; sqlfluff-3.4.2/test/fixtures/dialects/postgres/set.yml000066400000000000000000000066611503426445100231630ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d35761777a5185b9bf00add5e5b946bbc548c41091b4209b93708e54e7512e23 file: - statement: set_statement: - keyword: SET - keyword: LOCAL - parameter: search_path - comparison_operator: raw_comparison_operator: '=' - keyword: DEFAULT - statement_terminator: ; - statement: set_statement: - keyword: SET - parameter: search_path - keyword: TO - naked_identifier: my_schema - comma: ',' - naked_identifier: public - statement_terminator: ; - statement: set_statement: - keyword: SET - parameter: datestyle - keyword: TO - naked_identifier: postgres - comma: ',' - naked_identifier: dmy - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: SESSION - parameter: datestyle - keyword: TO - naked_identifier: postgres - comma: ',' - quoted_literal: "'dmy'" - statement_terminator: ; - statement: set_statement: - keyword: SET - parameter: value - comparison_operator: raw_comparison_operator: '=' - naked_identifier: 'on' - comma: ',' - naked_identifier: 'off' - comma: ',' - naked_identifier: auto - statement_terminator: ; - statement: set_statement: - keyword: SET - parameter: value - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - boolean_literal: 'FALSE' - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: TIME - keyword: ZONE - quoted_literal: "'PST8PDT'" - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: TIME - keyword: ZONE - quoted_literal: "'Europe/Rome'" - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: TIME - keyword: ZONE - keyword: LOCAL - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: TIME - keyword: ZONE - keyword: DEFAULT - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: SCHEMA - quoted_literal: "'my_schema'" - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: SCHEMA - quoted_literal: "'public'" - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: ROLE - role_reference: naked_identifier: my_role - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: ROLE - role_reference: quoted_identifier: '"my role"' - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: ROLE - keyword: NONE - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: LOCAL - parameter: search_path - comparison_operator: raw_comparison_operator: '=' - naked_identifier: schema_name - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: LOCAL - parameter: search_path - comparison_operator: raw_comparison_operator: '=' - quoted_identifier: '"schema_name"' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/set_constraints.sql000066400000000000000000000001401503426445100255720ustar00rootroot00000000000000SET CONSTRAINTS ALL DEFERRED; SET CONSTRAINTS ALL IMMEDIATE; SET CONSTRAINTS pk1, uk1 DEFERRED; sqlfluff-3.4.2/test/fixtures/dialects/postgres/set_constraints.yml000066400000000000000000000017171503426445100256070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1cb1d5a11418fdecbc7aaaa7a31ade943e8451c64485127d021989724498618d file: - statement: set_constraint_statement: - keyword: SET - keyword: CONSTRAINTS - keyword: ALL - keyword: DEFERRED - statement_terminator: ; - statement: set_constraint_statement: - keyword: SET - keyword: CONSTRAINTS - keyword: ALL - keyword: IMMEDIATE - statement_terminator: ; - statement: set_constraint_statement: - keyword: SET - keyword: CONSTRAINTS - object_reference: naked_identifier: pk1 - comma: ',' - object_reference: naked_identifier: uk1 - keyword: DEFERRED - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/set_session_authorization.sql000066400000000000000000000002771503426445100277010ustar00rootroot00000000000000SET SESSION AUTHORIZATION DEFAULT; SET SESSION AUTHORIZATION my_user; SET SESSION AUTHORIZATION "my_user"; SET SESSION SESSION AUTHORIZATION my_user; SET LOCAL SESSION AUTHORIZATION my_user; sqlfluff-3.4.2/test/fixtures/dialects/postgres/set_session_authorization.yml000066400000000000000000000026451503426445100277040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f1d836fc549955a47cb8b2824bb7638d433cdf99163b3cc3ee584e68f0405e13 file: - statement: set_session_authorization_statement: - keyword: SET - keyword: SESSION - keyword: AUTHORIZATION - keyword: DEFAULT - statement_terminator: ; - statement: set_session_authorization_statement: - keyword: SET - keyword: SESSION - keyword: AUTHORIZATION - role_reference: naked_identifier: my_user - statement_terminator: ; - statement: set_session_authorization_statement: - keyword: SET - keyword: SESSION - keyword: AUTHORIZATION - role_reference: quoted_identifier: '"my_user"' - statement_terminator: ; - statement: set_session_authorization_statement: - keyword: SET - keyword: SESSION - keyword: SESSION - keyword: AUTHORIZATION - role_reference: naked_identifier: my_user - statement_terminator: ; - statement: set_session_authorization_statement: - keyword: SET - keyword: LOCAL - keyword: SESSION - keyword: AUTHORIZATION - role_reference: naked_identifier: my_user - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/show.sql000066400000000000000000000002031503426445100233300ustar00rootroot00000000000000SHOW ALL; SHOW IS_SUPERUSER; SHOW LC_COLLATE; SHOW LC_CTYPE; SHOW SERVER_ENCODING; SHOW SERVER_VERSION; SHOW DateStyle; SHOW geqo; sqlfluff-3.4.2/test/fixtures/dialects/postgres/show.yml000066400000000000000000000023211503426445100233350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 89c3af38d108f08ef402d62a68dcb33565452cfc594949cf2d6938ce1799f0df file: - statement: show_statement: - keyword: SHOW - keyword: ALL - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: IS_SUPERUSER - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: LC_COLLATE - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: LC_CTYPE - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: SERVER_ENCODING - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: SERVER_VERSION - statement_terminator: ; - statement: show_statement: keyword: SHOW parameter: DateStyle - statement_terminator: ; - statement: show_statement: keyword: SHOW parameter: geqo - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/single_quote.sql000066400000000000000000000013571503426445100250610ustar00rootroot00000000000000SELECT ''; SELECT ''''; SELECT ' '; SELECT '''aaa'''; SELECT ' '' '; SELECT '\'; SELECT 'foo' 'bar'; SELECT 'foo' 'bar'; SELECT 'foo' 'bar'; SELECT 'foo' -- some comment 'bar'; COMMENT ON TABLE "some_table" IS E'' 'This is a valid comment style' '\n\n' 'which is escaped'; SELECT e'da' --this is a comment 'ta'; SELECT e'value of newline here: ' 'space ' 'no' 'space'; SELECT U&'' 'd\0061t\+000061' ' which has unicode', U&'d!0061t!+000061' ' which has unicode' UESCAPE '!', u&'d!0061t!+000061 which has unicode' uescape '!'; SELECT b'' '000' '010' '101'; SELECT x'1234' 'abcd' 'dead' 'beEF'; SELECT e'two ' 'line', E'can have single quotes escaped this way: \' ', e'but the second line' 'requires it like this '' \n\n'; sqlfluff-3.4.2/test/fixtures/dialects/postgres/single_quote.yml000066400000000000000000000112361503426445100250600ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3fbc15dc7b966f59b1658bb6597bc67a7827225de6c24f681cac0ffb128fce84 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "''''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'\n\n'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'''aaa'''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'\n''\n'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'\\'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: "'foo'" - quoted_literal: "'bar'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: "'foo'" - quoted_literal: "'bar'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: "'foo'" - quoted_literal: "'bar'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: "'foo'" - quoted_literal: "'bar'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: TABLE - table_reference: quoted_identifier: '"some_table"' - keyword: IS - quoted_literal: "E''" - quoted_literal: "'This is a valid comment style'" - quoted_literal: "'\\n\\n'" - quoted_literal: "'which is escaped'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: "e'da'" - quoted_literal: "'ta'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: "e'value of newline here:\n'" - quoted_literal: "'space '" - quoted_literal: "'no'" - quoted_literal: "'space'" - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: - quoted_literal: "U&''" - quoted_literal: "'d\\0061t\\+000061'" - quoted_literal: "' which has unicode'" - comma: ',' - select_clause_element: - quoted_literal: "U&'d!0061t!+000061'" - quoted_literal: "' which has unicode'" - keyword: UESCAPE - unicode_escape_value: "'!'" - comma: ',' - select_clause_element: quoted_literal: "u&'d!0061t!+000061 which has unicode' uescape '!'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: "b''" - quoted_literal: "'000'" - quoted_literal: "'010'" - quoted_literal: "'101'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: "x'1234'" - quoted_literal: "'abcd'" - quoted_literal: "'dead'" - quoted_literal: "'beEF'" - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: - quoted_literal: "e'two '" - quoted_literal: "'line'" - comma: ',' - select_clause_element: quoted_literal: "E'can have single quotes escaped this way: \\' '" - comma: ',' - select_clause_element: - quoted_literal: "e'but the second line'" - quoted_literal: "'requires it like this '' \\n\\n'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/string_normalization.sql000066400000000000000000000042511503426445100266330ustar00rootroot00000000000000SELECT (test_column IS NORMALIZED) AS is_normalized FROM test_table; SELECT (test_column IS NFC NORMALIZED) AS is_normalized FROM test_table; SELECT (test_column IS NFD NORMALIZED) AS is_normalized FROM test_table; SELECT (test_column IS NFKC NORMALIZED) AS is_normalized FROM test_table; SELECT (test_column IS NFKD NORMALIZED) AS is_normalized FROM test_table; SELECT (test_column IS NOT NORMALIZED) AS is_normalized FROM test_table; SELECT (test_column IS NOT NFC NORMALIZED) AS is_normalized FROM test_table; SELECT (test_column IS NOT NFD NORMALIZED) AS is_normalized FROM test_table; SELECT (test_column IS NOT NFKC NORMALIZED) AS is_normalized FROM test_table; SELECT (test_column IS NOT NFKD NORMALIZED) AS is_normalized FROM test_table; CREATE DOMAIN text_default AS TEXT CHECK (VALUE IS NORMALIZED); CREATE DOMAIN text_nfc AS TEXT CHECK (VALUE IS NFC NORMALIZED); CREATE DOMAIN text_nfd AS TEXT CHECK (VALUE IS NFD NORMALIZED); CREATE DOMAIN text_nfkc AS TEXT CHECK (VALUE IS NFKC NORMALIZED); CREATE DOMAIN text_nfkd AS TEXT CHECK (VALUE IS NFKD NORMALIZED); CREATE DOMAIN text_default AS TEXT CHECK (VALUE IS NOT normalized); CREATE DOMAIN text_nfc AS TEXT CHECK (VALUE IS NOT NFC NORMALIZED); CREATE DOMAIN text_nfd AS TEXT CHECK (VALUE IS NOT NFD NORMALIZED); CREATE DOMAIN text_nfkc AS TEXT CHECK (VALUE IS NOT NFKC NORMALIZED); CREATE DOMAIN text_nfkd AS TEXT CHECK (VALUE IS NOT NFKD NORMALIZED); create table test_table ( test_column text primary key, CONSTRAINT default_constraint CHECK (test_column IS NORMALIZED), CONSTRAINT nfc_constraint CHECK (test_column IS NFC NORMALIZED), CONSTRAINT nfd_constraint CHECK (test_column IS NFD NORMALIZED), CONSTRAINT nfkc_constraint CHECK (test_column IS NFKC NORMALIZED), CONSTRAINT nfkd_constraint CHECK (test_column IS NFKD NORMALIZED), CONSTRAINT not_default_constraint CHECK (test_column IS NOT NORMALIZED), CONSTRAINT not_nfc_constraint CHECK (test_column IS NOT NFC NORMALIZED), CONSTRAINT not_nfd_constraint CHECK (test_column IS NOT NFD NORMALIZED), CONSTRAINT not_nfkc_constraint CHECK (test_column IS NOT NFKC NORMALIZED), CONSTRAINT not_nfkd_constraint CHECK (test_column IS NOT NFKD NORMALIZED) ); sqlfluff-3.4.2/test/fixtures/dialects/postgres/string_normalization.yml000066400000000000000000000436721503426445100266470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b58d916717cfc4567815879734201dde1d3db66ab05b0d721c080243b3e54a47 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: test_column - keyword: IS - keyword: NORMALIZED end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: is_normalized from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: test_column - keyword: IS - keyword: NFC - keyword: NORMALIZED end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: is_normalized from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: test_column - keyword: IS - keyword: NFD - keyword: NORMALIZED end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: is_normalized from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: test_column - keyword: IS - keyword: NFKC - keyword: NORMALIZED end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: is_normalized from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: test_column - keyword: IS - keyword: NFKD - keyword: NORMALIZED end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: is_normalized from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: test_column - keyword: IS - keyword: NOT - keyword: NORMALIZED end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: is_normalized from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: test_column - keyword: IS - keyword: NOT - keyword: NFC - keyword: NORMALIZED end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: is_normalized from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: test_column - keyword: IS - keyword: NOT - keyword: NFD - keyword: NORMALIZED end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: is_normalized from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: test_column - keyword: IS - keyword: NOT - keyword: NFKC - keyword: NORMALIZED end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: is_normalized from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: test_column - keyword: IS - keyword: NOT - keyword: NFKD - keyword: NORMALIZED end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: is_normalized from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table - statement_terminator: ; - statement: create_domain_statement: - keyword: CREATE - keyword: DOMAIN - object_reference: naked_identifier: text_default - keyword: AS - data_type: keyword: TEXT - keyword: CHECK - expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: VALUE - keyword: IS - keyword: NORMALIZED end_bracket: ) - statement_terminator: ; - statement: create_domain_statement: - keyword: CREATE - keyword: DOMAIN - object_reference: naked_identifier: text_nfc - keyword: AS - data_type: keyword: TEXT - keyword: CHECK - expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: VALUE - keyword: IS - keyword: NFC - keyword: NORMALIZED end_bracket: ) - statement_terminator: ; - statement: create_domain_statement: - keyword: CREATE - keyword: DOMAIN - object_reference: naked_identifier: text_nfd - keyword: AS - data_type: keyword: TEXT - keyword: CHECK - expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: VALUE - keyword: IS - keyword: NFD - keyword: NORMALIZED end_bracket: ) - statement_terminator: ; - statement: create_domain_statement: - keyword: CREATE - keyword: DOMAIN - object_reference: naked_identifier: text_nfkc - keyword: AS - data_type: keyword: TEXT - keyword: CHECK - expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: VALUE - keyword: IS - keyword: NFKC - keyword: NORMALIZED end_bracket: ) - statement_terminator: ; - statement: create_domain_statement: - keyword: CREATE - keyword: DOMAIN - object_reference: naked_identifier: text_nfkd - keyword: AS - data_type: keyword: TEXT - keyword: CHECK - expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: VALUE - keyword: IS - keyword: NFKD - keyword: NORMALIZED end_bracket: ) - statement_terminator: ; - statement: create_domain_statement: - keyword: CREATE - keyword: DOMAIN - object_reference: naked_identifier: text_default - keyword: AS - data_type: keyword: TEXT - keyword: CHECK - expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: VALUE - keyword: IS - keyword: NOT - keyword: normalized end_bracket: ) - statement_terminator: ; - statement: create_domain_statement: - keyword: CREATE - keyword: DOMAIN - object_reference: naked_identifier: text_nfc - keyword: AS - data_type: keyword: TEXT - keyword: CHECK - expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: VALUE - keyword: IS - keyword: NOT - keyword: NFC - keyword: NORMALIZED end_bracket: ) - statement_terminator: ; - statement: create_domain_statement: - keyword: CREATE - keyword: DOMAIN - object_reference: naked_identifier: text_nfd - keyword: AS - data_type: keyword: TEXT - keyword: CHECK - expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: VALUE - keyword: IS - keyword: NOT - keyword: NFD - keyword: NORMALIZED end_bracket: ) - statement_terminator: ; - statement: create_domain_statement: - keyword: CREATE - keyword: DOMAIN - object_reference: naked_identifier: text_nfkc - keyword: AS - data_type: keyword: TEXT - keyword: CHECK - expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: VALUE - keyword: IS - keyword: NOT - keyword: NFKC - keyword: NORMALIZED end_bracket: ) - statement_terminator: ; - statement: create_domain_statement: - keyword: CREATE - keyword: DOMAIN - object_reference: naked_identifier: text_nfkd - keyword: AS - data_type: keyword: TEXT - keyword: CHECK - expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: VALUE - keyword: IS - keyword: NOT - keyword: NFKD - keyword: NORMALIZED end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: test_table - bracketed: - start_bracket: ( - column_reference: naked_identifier: test_column - data_type: keyword: text - column_constraint_segment: - keyword: primary - keyword: key - comma: ',' - table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: default_constraint - keyword: CHECK - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: test_column - keyword: IS - keyword: NORMALIZED end_bracket: ) - comma: ',' - table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: nfc_constraint - keyword: CHECK - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: test_column - keyword: IS - keyword: NFC - keyword: NORMALIZED end_bracket: ) - comma: ',' - table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: nfd_constraint - keyword: CHECK - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: test_column - keyword: IS - keyword: NFD - keyword: NORMALIZED end_bracket: ) - comma: ',' - table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: nfkc_constraint - keyword: CHECK - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: test_column - keyword: IS - keyword: NFKC - keyword: NORMALIZED end_bracket: ) - comma: ',' - table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: nfkd_constraint - keyword: CHECK - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: test_column - keyword: IS - keyword: NFKD - keyword: NORMALIZED end_bracket: ) - comma: ',' - table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: not_default_constraint - keyword: CHECK - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: test_column - keyword: IS - keyword: NOT - keyword: NORMALIZED end_bracket: ) - comma: ',' - table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: not_nfc_constraint - keyword: CHECK - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: test_column - keyword: IS - keyword: NOT - keyword: NFC - keyword: NORMALIZED end_bracket: ) - comma: ',' - table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: not_nfd_constraint - keyword: CHECK - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: test_column - keyword: IS - keyword: NOT - keyword: NFD - keyword: NORMALIZED end_bracket: ) - comma: ',' - table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: not_nfkc_constraint - keyword: CHECK - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: test_column - keyword: IS - keyword: NOT - keyword: NFKC - keyword: NORMALIZED end_bracket: ) - comma: ',' - table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: not_nfkd_constraint - keyword: CHECK - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: test_column - keyword: IS - keyword: NOT - keyword: NFKD - keyword: NORMALIZED end_bracket: ) - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/subquery.sql000066400000000000000000000002731503426445100242360ustar00rootroot00000000000000INSERT INTO target_table (target_column) SELECT table1.column1 FROM table1 INNER JOIN ( SELECT table2.join_column FROM table2 ) AS temp3 ON table1.join_column = temp3.join_column sqlfluff-3.4.2/test/fixtures/dialects/postgres/subquery.yml000066400000000000000000000051661503426445100242460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: aff16529f248261edab60ac321e09ce7ee0346048a6b61470a6057e9b8d2c858 file: statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: target_table - bracketed: start_bracket: ( column_reference: naked_identifier: target_column end_bracket: ) - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: table1 - dot: . - naked_identifier: column1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: table2 - dot: . - naked_identifier: join_column from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table2 end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: temp3 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: table1 - dot: . - naked_identifier: join_column - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: temp3 - dot: . - naked_identifier: join_column sqlfluff-3.4.2/test/fixtures/dialects/postgres/substring.sql000066400000000000000000000003641503426445100244000ustar00rootroot00000000000000select * FROM substring('Thomas' from 2 for 3); select * FROM substring('Thomas' from 3); select * FROM substring('Thomas' for 2); select * FROM substring('Thomas' similar '%#"o_a#"_' escape '#'); select * FROM substring('Thomas' from '...$'); sqlfluff-3.4.2/test/fixtures/dialects/postgres/substring.yml000066400000000000000000000111161503426445100243770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 207c99a9018776d497e9b71b264c3250bc1d00e82f0eb44e197a5a7270ecfa4e file: - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: substring function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'Thomas'" - keyword: from - expression: numeric_literal: '2' - keyword: for - expression: numeric_literal: '3' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: substring function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'Thomas'" - keyword: from - expression: numeric_literal: '3' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: substring function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'Thomas'" - keyword: for - expression: numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: substring function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'Thomas'" - expression: data_type: data_type_identifier: similar quoted_literal: "'%#\"o_a#\"_'" - expression: data_type: data_type_identifier: escape quoted_literal: "'#'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: substring function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'Thomas'" - keyword: from - expression: quoted_literal: "'...$'" - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/table_functions.sql000066400000000000000000000010151503426445100255310ustar00rootroot00000000000000select * from unnest(array['123', '456']); select * from unnest(array['123', '456']) as a(val, row_num); select * from unnest(array['123', '456']) with ordinality; select * from unnest(array['123', '456']) with ordinality as a(val, row_num); SELECT * FROM table_1 WHERE utc_activity_start_dttm + make_interval(mins := activity_dur_mnt) BETWEEN '2024-01-07T00:00:00'::timestamp AND '2024-01-14T23:59:59.999999'::timestamp; SELECT ARRAY(SELECT UNNEST(list_field_1) INTERSECT SELECT UNNEST(list_field_2)) FROM table_1; sqlfluff-3.4.2/test/fixtures/dialects/postgres/table_functions.yml000066400000000000000000000211301503426445100255330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1f202736a579cc354661f68b7f36dbbb6c8698728d78ea05574158df015c0001 file: - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: unnest function_contents: bracketed: start_bracket: ( expression: typed_array_literal: array_type: keyword: array array_literal: - start_square_bracket: '[' - quoted_literal: "'123'" - comma: ',' - quoted_literal: "'456'" - end_square_bracket: ']' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: unnest function_contents: bracketed: start_bracket: ( expression: typed_array_literal: array_type: keyword: array array_literal: - start_square_bracket: '[' - quoted_literal: "'123'" - comma: ',' - quoted_literal: "'456'" - end_square_bracket: ']' end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: a bracketed: start_bracket: ( identifier_list: - naked_identifier: val - comma: ',' - naked_identifier: row_num end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: - function: function_name: function_name_identifier: unnest function_contents: bracketed: start_bracket: ( expression: typed_array_literal: array_type: keyword: array array_literal: - start_square_bracket: '[' - quoted_literal: "'123'" - comma: ',' - quoted_literal: "'456'" - end_square_bracket: ']' end_bracket: ) - keyword: with - keyword: ordinality - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: - function: function_name: function_name_identifier: unnest function_contents: bracketed: start_bracket: ( expression: typed_array_literal: array_type: keyword: array array_literal: - start_square_bracket: '[' - quoted_literal: "'123'" - comma: ',' - quoted_literal: "'456'" - end_square_bracket: ']' end_bracket: ) - keyword: with - keyword: ordinality alias_expression: alias_operator: keyword: as naked_identifier: a bracketed: start_bracket: ( identifier_list: - naked_identifier: val - comma: ',' - naked_identifier: row_num end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_1 where_clause: keyword: WHERE expression: - column_reference: naked_identifier: utc_activity_start_dttm - binary_operator: + - function: function_name: function_name_identifier: make_interval function_contents: bracketed: start_bracket: ( named_argument: naked_identifier: mins assignment_operator: := expression: column_reference: naked_identifier: activity_dur_mnt end_bracket: ) - keyword: BETWEEN - cast_expression: quoted_literal: "'2024-01-07T00:00:00'" casting_operator: '::' data_type: datetime_type_identifier: keyword: timestamp - keyword: AND - cast_expression: quoted_literal: "'2024-01-14T23:59:59.999999'" casting_operator: '::' data_type: datetime_type_identifier: keyword: timestamp - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: ARRAY function_contents: bracketed: start_bracket: ( set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: UNNEST function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: list_field_1 end_bracket: ) - set_operator: keyword: INTERSECT - select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: UNNEST function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: list_field_2 end_bracket: ) end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_1 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/truncate.sql000066400000000000000000000007651503426445100242120ustar00rootroot00000000000000TRUNCATE bigtable; TRUNCATE some_schema.bigtable; TRUNCATE TABLE bigtable; TRUNCATE ONLY bigtable; TRUNCATE TABLE ONLY bigtable; TRUNCATE bigtable *; TRUNCATE TABLE bigtable *; TRUNCATE bigtable, fattable; TRUNCATE TABLE bigtable, fattable; TRUNCATE ONLY bigtable, fattable *; TRUNCATE bigtable RESTART IDENTITY; TRUNCATE bigtable CONTINUE IDENTITY; TRUNCATE bigtable CASCADE; TRUNCATE bigtable RESTRICT; TRUNCATE TABLE ONLY bigtable, fattable *, ONLY slimtable CONTINUE IDENTITY CASCADE; sqlfluff-3.4.2/test/fixtures/dialects/postgres/truncate.yml000066400000000000000000000065341503426445100242140ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5b3af45b738a10f8b39d63c9d068e8931c6b259b24bda06e49e1797314c99d7f file: - statement: truncate_table: keyword: TRUNCATE table_reference: naked_identifier: bigtable - statement_terminator: ; - statement: truncate_table: keyword: TRUNCATE table_reference: - naked_identifier: some_schema - dot: . - naked_identifier: bigtable - statement_terminator: ; - statement: truncate_table: - keyword: TRUNCATE - keyword: TABLE - table_reference: naked_identifier: bigtable - statement_terminator: ; - statement: truncate_table: - keyword: TRUNCATE - keyword: ONLY - table_reference: naked_identifier: bigtable - statement_terminator: ; - statement: truncate_table: - keyword: TRUNCATE - keyword: TABLE - keyword: ONLY - table_reference: naked_identifier: bigtable - statement_terminator: ; - statement: truncate_table: keyword: TRUNCATE table_reference: naked_identifier: bigtable star: '*' - statement_terminator: ; - statement: truncate_table: - keyword: TRUNCATE - keyword: TABLE - table_reference: naked_identifier: bigtable - star: '*' - statement_terminator: ; - statement: truncate_table: - keyword: TRUNCATE - table_reference: naked_identifier: bigtable - comma: ',' - table_reference: naked_identifier: fattable - statement_terminator: ; - statement: truncate_table: - keyword: TRUNCATE - keyword: TABLE - table_reference: naked_identifier: bigtable - comma: ',' - table_reference: naked_identifier: fattable - statement_terminator: ; - statement: truncate_table: - keyword: TRUNCATE - keyword: ONLY - table_reference: naked_identifier: bigtable - comma: ',' - table_reference: naked_identifier: fattable - star: '*' - statement_terminator: ; - statement: truncate_table: - keyword: TRUNCATE - table_reference: naked_identifier: bigtable - keyword: RESTART - keyword: IDENTITY - statement_terminator: ; - statement: truncate_table: - keyword: TRUNCATE - table_reference: naked_identifier: bigtable - keyword: CONTINUE - keyword: IDENTITY - statement_terminator: ; - statement: truncate_table: - keyword: TRUNCATE - table_reference: naked_identifier: bigtable - keyword: CASCADE - statement_terminator: ; - statement: truncate_table: - keyword: TRUNCATE - table_reference: naked_identifier: bigtable - keyword: RESTRICT - statement_terminator: ; - statement: truncate_table: - keyword: TRUNCATE - keyword: TABLE - keyword: ONLY - table_reference: naked_identifier: bigtable - comma: ',' - table_reference: naked_identifier: fattable - star: '*' - comma: ',' - keyword: ONLY - table_reference: naked_identifier: slimtable - keyword: CONTINUE - keyword: IDENTITY - keyword: CASCADE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/unicode_double_quote.sql000066400000000000000000000001151503426445100265470ustar00rootroot00000000000000SELECT U&"a"; SELECT U&"aaaa" UESCAPE '!'; SELECT U&"aaaa" UESCAPE '!'; sqlfluff-3.4.2/test/fixtures/dialects/postgres/unicode_double_quote.yml000066400000000000000000000020241503426445100265520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d1f2c582ee465ffa166e90ef5a38705ca5eab3b80f6551cab438aff8656c4003 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: quoted_identifier: U&"a" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: quoted_identifier: "U&\"aaaa\" UESCAPE '!'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: quoted_identifier: "U&\"aaaa\"\n\n UESCAPE\n '!'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/unicode_single_quote.sql000066400000000000000000000002601503426445100265570ustar00rootroot00000000000000SELECT U&''; SELECT U&' '; SELECT U&''''; SELECT U&'aaa'''; SELECT U&' '' '; SELECT U&'' UESCAPE '!'; SELECT U&'asdf' UESCAPE 'P'; SELECT U&' somestuff ' UESCAPE '?'; sqlfluff-3.4.2/test/fixtures/dialects/postgres/unicode_single_quote.yml000066400000000000000000000034441503426445100265700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 279511c1fbe07646c0350990c6cc6b2cf7cb55d86f062574d6165ffe687dbc9c file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "U&''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "U&'\n'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "U&''''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "U&'aaa'''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "U&'\n''\n'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "U&'' UESCAPE '!'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "U&'asdf' UESCAPE 'P'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "U&'\nsomestuff\n'\nUESCAPE\n\n\n'?'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/update_table.sql000066400000000000000000000027011503426445100250060ustar00rootroot00000000000000UPDATE films SET kind = 'Dramatic' WHERE kind = 'Drama'; UPDATE weather SET temp_lo = temp_lo+1, temp_hi = temp_lo+15, prcp = DEFAULT WHERE city = 'San Francisco' AND date = '2003-07-03'; UPDATE weather SET temp_lo = temp_lo+1, temp_hi = temp_lo+15, prcp = DEFAULT WHERE city = 'San Francisco' AND date = '2003-07-03' RETURNING temp_lo, temp_hi, prcp; UPDATE weather SET (temp_lo, temp_hi, prcp) = (temp_lo+1, temp_lo+15, DEFAULT) WHERE city = 'San Francisco' AND date = '2003-07-03'; UPDATE employees SET sales_count = sales_count + 1 FROM accounts WHERE accounts.name = 'Acme Corporation' AND employees.id = accounts.sales_person; UPDATE employees SET sales_count = sales_count + 1 WHERE id = (SELECT sales_person FROM accounts WHERE name = 'Acme Corporation'); UPDATE accounts SET (contact_first_name, contact_last_name) = (SELECT first_name, last_name FROM salesmen WHERE salesmen.id = accounts.sales_id); UPDATE accounts SET contact_first_name = first_name, contact_last_name = last_name FROM salesmen WHERE salesmen.id = accounts.sales_id; UPDATE summary s SET (sum_x, sum_y, avg_x, avg_y) = (SELECT sum(x), sum(y), avg(x), avg(y) FROM data d WHERE d.group_id = s.group_id); UPDATE films SET kind = 'Dramatic' WHERE CURRENT OF c_films; UPDATE my_table SET my_column = "SQLFluff rules!" RETURNING my_column; UPDATE employees SET deleted_at = NOW() WHERE uuid = $1 RETURNING short_name AS employee_name; sqlfluff-3.4.2/test/fixtures/dialects/postgres/update_table.yml000066400000000000000000000417351503426445100250220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ca3ab8c1066b6ea04881273aa70b7a64bac115fac1efc171e9528ae1f4ea4b6b file: - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: films set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: kind comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Dramatic'" where_clause: keyword: WHERE expression: column_reference: naked_identifier: kind comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Drama'" - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: weather set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: temp_lo comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: temp_lo binary_operator: + numeric_literal: '1' - comma: ',' - set_clause: column_reference: naked_identifier: temp_hi comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: temp_lo binary_operator: + numeric_literal: '15' - comma: ',' - set_clause: column_reference: naked_identifier: prcp comparison_operator: raw_comparison_operator: '=' keyword: DEFAULT where_clause: keyword: WHERE expression: - column_reference: naked_identifier: city - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'San Francisco'" - binary_operator: AND - column_reference: naked_identifier: date - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'2003-07-03'" - statement_terminator: ; - statement: update_statement: - keyword: UPDATE - table_reference: naked_identifier: weather - set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: temp_lo comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: temp_lo binary_operator: + numeric_literal: '1' - comma: ',' - set_clause: column_reference: naked_identifier: temp_hi comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: temp_lo binary_operator: + numeric_literal: '15' - comma: ',' - set_clause: column_reference: naked_identifier: prcp comparison_operator: raw_comparison_operator: '=' keyword: DEFAULT - where_clause: keyword: WHERE expression: - column_reference: naked_identifier: city - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'San Francisco'" - binary_operator: AND - column_reference: naked_identifier: date - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'2003-07-03'" - keyword: RETURNING - expression: column_reference: naked_identifier: temp_lo - comma: ',' - expression: column_reference: naked_identifier: temp_hi - comma: ',' - expression: column_reference: naked_identifier: prcp - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: weather set_clause_list: keyword: SET set_clause: - bracketed: - start_bracket: ( - column_reference: naked_identifier: temp_lo - comma: ',' - column_reference: naked_identifier: temp_hi - comma: ',' - column_reference: naked_identifier: prcp - end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: temp_lo binary_operator: + numeric_literal: '1' - comma: ',' - expression: column_reference: naked_identifier: temp_lo binary_operator: + numeric_literal: '15' - comma: ',' - keyword: DEFAULT - end_bracket: ) where_clause: keyword: WHERE expression: - column_reference: naked_identifier: city - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'San Francisco'" - binary_operator: AND - column_reference: naked_identifier: date - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'2003-07-03'" - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: employees set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: sales_count comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: sales_count binary_operator: + numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: accounts where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: accounts - dot: . - naked_identifier: name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'Acme Corporation'" - binary_operator: AND - column_reference: - naked_identifier: employees - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: accounts - dot: . - naked_identifier: sales_person - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: employees set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: sales_count comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: sales_count binary_operator: + numeric_literal: '1' where_clause: keyword: WHERE expression: column_reference: naked_identifier: id comparison_operator: raw_comparison_operator: '=' bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: sales_person from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: accounts where_clause: keyword: WHERE expression: column_reference: naked_identifier: name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Acme Corporation'" end_bracket: ) - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: accounts set_clause_list: keyword: SET set_clause: - bracketed: - start_bracket: ( - column_reference: naked_identifier: contact_first_name - comma: ',' - column_reference: naked_identifier: contact_last_name - end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: first_name - comma: ',' - select_clause_element: column_reference: naked_identifier: last_name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: salesmen where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: salesmen - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: accounts - dot: . - naked_identifier: sales_id end_bracket: ) - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: accounts set_clause_list: - keyword: SET - set_clause: - column_reference: naked_identifier: contact_first_name - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: first_name - comma: ',' - set_clause: - column_reference: naked_identifier: contact_last_name - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: last_name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: salesmen where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: salesmen - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: accounts - dot: . - naked_identifier: sales_id - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: summary alias_expression: naked_identifier: s set_clause_list: keyword: SET set_clause: - bracketed: - start_bracket: ( - column_reference: naked_identifier: sum_x - comma: ',' - column_reference: naked_identifier: sum_y - comma: ',' - column_reference: naked_identifier: avg_x - comma: ',' - column_reference: naked_identifier: avg_y - end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: y end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: avg function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: avg function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: y end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: data alias_expression: naked_identifier: d where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: d - dot: . - naked_identifier: group_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: s - dot: . - naked_identifier: group_id end_bracket: ) - statement_terminator: ; - statement: update_statement: - keyword: UPDATE - table_reference: naked_identifier: films - set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: kind comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Dramatic'" - keyword: WHERE - keyword: CURRENT - keyword: OF - object_reference: naked_identifier: c_films - statement_terminator: ; - statement: update_statement: - keyword: UPDATE - table_reference: naked_identifier: my_table - set_clause_list: keyword: SET set_clause: - column_reference: naked_identifier: my_column - comparison_operator: raw_comparison_operator: '=' - column_reference: quoted_identifier: '"SQLFluff rules!"' - keyword: RETURNING - expression: column_reference: naked_identifier: my_column - statement_terminator: ; - statement: update_statement: - keyword: UPDATE - table_reference: naked_identifier: employees - set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: deleted_at comparison_operator: raw_comparison_operator: '=' function: function_name: function_name_identifier: NOW function_contents: bracketed: start_bracket: ( end_bracket: ) - where_clause: keyword: WHERE expression: column_reference: naked_identifier: uuid comparison_operator: raw_comparison_operator: '=' dollar_numeric_literal: $1 - keyword: RETURNING - expression: column_reference: naked_identifier: short_name - alias_expression: alias_operator: keyword: AS naked_identifier: employee_name - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/vacuum.sql000066400000000000000000000013221503426445100236530ustar00rootroot00000000000000-- Old-style vacuum commands VACUUM; VACUUM FULL; VACUUM FREEZE; VACUUM VERBOSE; VACUUM ANALYZE; VACUUM ANALYSE; VACUUM FULL FREEZE VERBOSE ANALYSE; VACUUM tbl; VACUUM tbl1, tbl2; VACUUM FULL FREEZE VERBOSE ANALYSE tbl1, tbl2; VACUUM FULL tbl1 (col1, col2), tbl2; VACUUM FULL tbl1 (col1), tbl2 (col1, col2); -- New-style vacuum commands VACUUM (FULL); VACUUM (FULL, FREEZE) tbl1; VACUUM (FULL, FREEZE) tbl1 (col1, col2), tbl2 (col3); VACUUM (FULL TRUE, FREEZE); VACUUM ( FULL TRUE, FREEZE FALSE, VERBOSE, ANALYZE, ANALYSE, DISABLE_PAGE_SKIPPING, SKIP_LOCKED, INDEX_CLEANUP on, PROCESS_TOAST, TRUNCATE, PARALLEL 70 ); VACUUM (INDEX_CLEANUP off); VACUUM (INDEX_CLEANUP auto); sqlfluff-3.4.2/test/fixtures/dialects/postgres/vacuum.yml000066400000000000000000000123241503426445100236610ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 617238e8c1decb78501479b84b05ee00fae0aca517bd0d5d3d0b112a6dcd7f29 file: - statement: vacuum_statement: keyword: VACUUM - statement_terminator: ; - statement: vacuum_statement: - keyword: VACUUM - keyword: FULL - statement_terminator: ; - statement: vacuum_statement: - keyword: VACUUM - keyword: FREEZE - statement_terminator: ; - statement: vacuum_statement: - keyword: VACUUM - keyword: VERBOSE - statement_terminator: ; - statement: vacuum_statement: - keyword: VACUUM - keyword: ANALYZE - statement_terminator: ; - statement: vacuum_statement: - keyword: VACUUM - keyword: ANALYSE - statement_terminator: ; - statement: vacuum_statement: - keyword: VACUUM - keyword: FULL - keyword: FREEZE - keyword: VERBOSE - keyword: ANALYSE - statement_terminator: ; - statement: vacuum_statement: keyword: VACUUM table_reference: naked_identifier: tbl - statement_terminator: ; - statement: vacuum_statement: - keyword: VACUUM - table_reference: naked_identifier: tbl1 - comma: ',' - table_reference: naked_identifier: tbl2 - statement_terminator: ; - statement: vacuum_statement: - keyword: VACUUM - keyword: FULL - keyword: FREEZE - keyword: VERBOSE - keyword: ANALYSE - table_reference: naked_identifier: tbl1 - comma: ',' - table_reference: naked_identifier: tbl2 - statement_terminator: ; - statement: vacuum_statement: - keyword: VACUUM - keyword: FULL - table_reference: naked_identifier: tbl1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - comma: ',' - table_reference: naked_identifier: tbl2 - statement_terminator: ; - statement: vacuum_statement: - keyword: VACUUM - keyword: FULL - table_reference: naked_identifier: tbl1 - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - comma: ',' - table_reference: naked_identifier: tbl2 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - statement_terminator: ; - statement: vacuum_statement: keyword: VACUUM bracketed: start_bracket: ( keyword: FULL end_bracket: ) - statement_terminator: ; - statement: vacuum_statement: keyword: VACUUM bracketed: - start_bracket: ( - keyword: FULL - comma: ',' - keyword: FREEZE - end_bracket: ) table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: vacuum_statement: - keyword: VACUUM - bracketed: - start_bracket: ( - keyword: FULL - comma: ',' - keyword: FREEZE - end_bracket: ) - table_reference: naked_identifier: tbl1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - comma: ',' - table_reference: naked_identifier: tbl2 - bracketed: start_bracket: ( column_reference: naked_identifier: col3 end_bracket: ) - statement_terminator: ; - statement: vacuum_statement: keyword: VACUUM bracketed: - start_bracket: ( - keyword: FULL - boolean_literal: 'TRUE' - comma: ',' - keyword: FREEZE - end_bracket: ) - statement_terminator: ; - statement: vacuum_statement: keyword: VACUUM bracketed: - start_bracket: ( - keyword: FULL - boolean_literal: 'TRUE' - comma: ',' - keyword: FREEZE - boolean_literal: 'FALSE' - comma: ',' - keyword: VERBOSE - comma: ',' - keyword: ANALYZE - comma: ',' - keyword: ANALYSE - comma: ',' - keyword: DISABLE_PAGE_SKIPPING - comma: ',' - keyword: SKIP_LOCKED - comma: ',' - keyword: INDEX_CLEANUP - naked_identifier: 'on' - comma: ',' - keyword: PROCESS_TOAST - comma: ',' - keyword: TRUNCATE - comma: ',' - keyword: PARALLEL - numeric_literal: '70' - end_bracket: ) - statement_terminator: ; - statement: vacuum_statement: keyword: VACUUM bracketed: start_bracket: ( keyword: INDEX_CLEANUP naked_identifier: 'off' end_bracket: ) - statement_terminator: ; - statement: vacuum_statement: keyword: VACUUM bracketed: start_bracket: ( keyword: INDEX_CLEANUP naked_identifier: auto end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/values.sql000066400000000000000000000004511503426445100236540ustar00rootroot00000000000000values (1, 2); VALUES (1+1, 2); values (1+1, 2::TEXT); values (1, 2), (3, 4); values (1, 2), (3, 4), (greatest(5, 6), least(7, 8)); values (1, 2), (3, 4) limit 1; values (1, 2), (3, 4) limit 1 offset 1; values (1, 2), (3, 4) order by 1 desc; values (1, 2), (3, 4) order by 1 desc limit 1; sqlfluff-3.4.2/test/fixtures/dialects/postgres/values.yml000066400000000000000000000130021503426445100236520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c9ac4865633124d729147c4c432dca1e9250cdf5c660d16cea30ac395ec6b017 file: - statement: values_clause: keyword: values bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: values_clause: keyword: values bracketed: - start_bracket: ( - expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '1' - comma: ',' - expression: cast_expression: numeric_literal: '2' casting_operator: '::' data_type: keyword: TEXT - end_bracket: ) - statement_terminator: ; - statement: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - statement_terminator: ; - statement: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: greatest function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '5' - comma: ',' - expression: numeric_literal: '6' - end_bracket: ) - comma: ',' - expression: function: function_name: function_name_identifier: least function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '7' - comma: ',' - expression: numeric_literal: '8' - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - limit_clause: keyword: limit numeric_literal: '1' - statement_terminator: ; - statement: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - limit_clause: - keyword: limit - numeric_literal: '1' - keyword: offset - numeric_literal: '1' - statement_terminator: ; - statement: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - orderby_clause: - keyword: order - keyword: by - numeric_literal: '1' - keyword: desc - statement_terminator: ; - statement: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - orderby_clause: - keyword: order - keyword: by - numeric_literal: '1' - keyword: desc - limit_clause: keyword: limit numeric_literal: '1' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/values_alias.sql000066400000000000000000000000721503426445100250240ustar00rootroot00000000000000select * from ( values (1, 2), (3, 4) ) as t(c1, c2); sqlfluff-3.4.2/test/fixtures/dialects/postgres/values_alias.yml000066400000000000000000000034641503426445100250360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b2fa00ab5629e7faa4eab24c597b89ddf3cceced0d8201da2067b3dc9907cf5a file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: c1 - comma: ',' - naked_identifier: c2 end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/values_in_subquery.sql000066400000000000000000000003061503426445100263000ustar00rootroot00000000000000WITH t (col_1, col_2) AS ( VALUES ('08RIX0', 0.435::NUMERIC(4, 3)) ) SELECT * FROM t; SELECT * FROM ( VALUES (1) ) AS t(c1); SELECT * FROM ( VALUES (1, 2), (3, 4) ) AS t(c1, c2); sqlfluff-3.4.2/test/fixtures/dialects/postgres/values_in_subquery.yml000066400000000000000000000104711503426445100263060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 01ee90f3a749aefbcf9c0cac6019ffa8026911a6d2cb2c00a92275933b67c112 file: - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: t cte_column_list: bracketed: start_bracket: ( identifier_list: - naked_identifier: col_1 - comma: ',' - naked_identifier: col_2 end_bracket: ) keyword: AS bracketed: start_bracket: ( values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: quoted_literal: "'08RIX0'" - comma: ',' - expression: cast_expression: numeric_literal: '0.435' casting_operator: '::' data_type: keyword: NUMERIC bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '4' - comma: ',' - numeric_literal: '3' - end_bracket: ) - end_bracket: ) end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: keyword: VALUES bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: t bracketed: start_bracket: ( identifier_list: naked_identifier: c1 end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: c1 - comma: ',' - naked_identifier: c2 end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/variadic.sql000066400000000000000000000017761503426445100241520ustar00rootroot00000000000000CREATE FUNCTION mleast(VARIADIC arr numeric[]) RETURNS numeric AS $$ SELECT min($1[i]) FROM generate_subscripts($1, 1) g(i); $$ LANGUAGE SQL; SELECT mleast(VARIADIC ARRAY[10, -1, 5, 4.4]); SELECT mleast(VARIADIC ARRAY[]::numeric[]); SELECT mleast(VARIADIC arr := ARRAY[10, -1, 5, 4.4]); CREATE FUNCTION anyleast (VARIADIC anyarray) RETURNS anyelement AS $$ SELECT min($1[i]) FROM generate_subscripts($1, 1) g(i); $$ LANGUAGE SQL; CREATE FUNCTION concat_values(text, VARIADIC anyarray) RETURNS text AS $$ SELECT array_to_string($2, $1); $$ LANGUAGE SQL; SELECT my_function(other_function( VARIADIC ARRAY_REMOVE(ARRAY[ a.value1, b.value2, c.value3 ], NULL) )) FROM a FULL OUTER JOIN b USING (id) FULL OUTER JOIN c USING (id); SELECT json_extract_path_text(t.col::json, VARIADIC ARRAY['foo'::text]) FROM t; SELECT my_function(VARIADIC ARRAY[ CASE WHEN x > 0 THEN x ELSE 0 END, y + z, 'literal' ]); SELECT my_function(VARIADIC ARRAY(SELECT value FROM table1)); sqlfluff-3.4.2/test/fixtures/dialects/postgres/variadic.yml000066400000000000000000000325771503426445100241570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bb65cc3c9b09e1bfd50bb9cff407fffcaf933dd3e2e6def51b4cfc580b874f9c file: - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: mleast - function_parameter_list: bracketed: start_bracket: ( keyword: VARIADIC parameter: arr data_type: keyword: numeric start_square_bracket: '[' end_square_bracket: ']' end_bracket: ) - keyword: RETURNS - data_type: keyword: numeric - function_definition: keyword: AS quoted_literal: "$$\n SELECT min($1[i]) FROM generate_subscripts($1, 1)\ \ g(i);\n$$" language_clause: keyword: LANGUAGE naked_identifier: SQL - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: mleast function_contents: bracketed: start_bracket: ( expression: typed_array_literal: keyword: VARIADIC array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '10' - comma: ',' - numeric_literal: sign_indicator: '-' numeric_literal: '1' - comma: ',' - numeric_literal: '5' - comma: ',' - numeric_literal: '4.4' - end_square_bracket: ']' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: mleast function_contents: bracketed: start_bracket: ( expression: cast_expression: typed_array_literal: keyword: VARIADIC array_type: keyword: ARRAY array_literal: start_square_bracket: '[' end_square_bracket: ']' casting_operator: '::' data_type: keyword: numeric start_square_bracket: '[' end_square_bracket: ']' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: mleast function_contents: bracketed: start_bracket: ( expression: typed_array_literal: keyword: VARIADIC naked_identifier: arr assignment_operator: := array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '10' - comma: ',' - numeric_literal: sign_indicator: '-' numeric_literal: '1' - comma: ',' - numeric_literal: '5' - comma: ',' - numeric_literal: '4.4' - end_square_bracket: ']' end_bracket: ) - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: anyleast - function_parameter_list: bracketed: start_bracket: ( keyword: VARIADIC data_type: data_type_identifier: anyarray end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: anyelement - function_definition: keyword: AS quoted_literal: "$$\n SELECT min($1[i]) FROM generate_subscripts($1, 1)\ \ g(i);\n$$" language_clause: keyword: LANGUAGE naked_identifier: SQL - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: concat_values - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: text - comma: ',' - keyword: VARIADIC - data_type: data_type_identifier: anyarray - end_bracket: ) - keyword: RETURNS - data_type: keyword: text - function_definition: keyword: AS quoted_literal: "$$\n SELECT array_to_string($2, $1);\n$$" language_clause: keyword: LANGUAGE naked_identifier: SQL - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: my_function function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: other_function function_contents: bracketed: start_bracket: ( keyword: VARIADIC expression: function: function_name: function_name_identifier: ARRAY_REMOVE function_contents: bracketed: - start_bracket: ( - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - column_reference: - naked_identifier: a - dot: . - naked_identifier: value1 - comma: ',' - column_reference: - naked_identifier: b - dot: . - naked_identifier: value2 - comma: ',' - column_reference: - naked_identifier: c - dot: . - naked_identifier: value3 - end_square_bracket: ']' - comma: ',' - expression: null_literal: 'NULL' - end_bracket: ) end_bracket: ) end_bracket: ) from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: a - join_clause: - keyword: FULL - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: b - keyword: USING - bracketed: start_bracket: ( naked_identifier: id end_bracket: ) - join_clause: - keyword: FULL - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: c - keyword: USING - bracketed: start_bracket: ( naked_identifier: id end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: json_extract_path_text function_contents: bracketed: - start_bracket: ( - expression: cast_expression: column_reference: - naked_identifier: t - dot: . - naked_identifier: col casting_operator: '::' data_type: keyword: json - comma: ',' - expression: typed_array_literal: keyword: VARIADIC array_type: keyword: ARRAY array_literal: start_square_bracket: '[' expression: cast_expression: quoted_literal: "'foo'" casting_operator: '::' data_type: keyword: text end_square_bracket: ']' - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: my_function function_contents: bracketed: start_bracket: ( expression: typed_array_literal: keyword: VARIADIC array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: '>' numeric_literal: '0' - keyword: THEN - expression: column_reference: naked_identifier: x - else_clause: keyword: ELSE expression: numeric_literal: '0' - keyword: END - comma: ',' - expression: - column_reference: naked_identifier: y - binary_operator: + - column_reference: naked_identifier: z - comma: ',' - quoted_literal: "'literal'" - end_square_bracket: ']' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: my_function function_contents: bracketed: start_bracket: ( keyword: VARIADIC expression: function: function_name: function_name_identifier: ARRAY function_contents: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: value from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 end_bracket: ) end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/with.sql000066400000000000000000000025451503426445100233360ustar00rootroot00000000000000WITH w AS MATERIALIZED ( SELECT * FROM other_table ) SELECT * FROM w AS w1 JOIN w AS w2 ON w1.key = w2.ref WHERE w2.key = 123; WITH w AS NOT MATERIALIZED ( SELECT * FROM big_table ) SELECT * FROM w AS w1 JOIN w AS w2 ON w1.key = w2.ref WHERE w2.key = 123; WITH RECURSIVE search_tree(id, link, data) AS ( SELECT t.id, t.link, t.data FROM tree t UNION ALL SELECT t.id, t.link, t.data FROM tree t, search_tree st WHERE t.id = st.link ) SEARCH DEPTH FIRST BY id SET ordercol SELECT * FROM search_tree ORDER BY ordercol; WITH RECURSIVE search_tree(id, link, data) AS ( SELECT t.id, t.link, t.data FROM tree t UNION ALL SELECT t.id, t.link, t.data FROM tree t, search_tree st WHERE t.id = st.link ) SEARCH BREADTH FIRST BY id SET ordercol SELECT * FROM search_tree ORDER BY ordercol; WITH RECURSIVE search_graph(id, link, data, depth) AS ( SELECT g.id, g.link, g.data, 1 FROM graph g UNION ALL SELECT g.id, g.link, g.data, sg.depth + 1 FROM graph g, search_graph sg WHERE g.id = sg.link ) CYCLE id SET is_cycle USING path SELECT * FROM search_graph; -- test that DML queries are also selectable WITH tbl AS ( INSERT INTO a VALUES (5) RETURNING * ) SELECT * FROM tbl; WITH tbl AS ( UPDATE a SET b = 5 RETURNING * ) SELECT * FROM tbl; WITH tbl AS ( DELETE FROM a RETURNING * ) SELECT * FROM tbl; sqlfluff-3.4.2/test/fixtures/dialects/postgres/with.yml000066400000000000000000000520661503426445100233430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 06cad93f8e6fb93125dc10f8f809051b85c28673b3ba5b57cd991ff0786a373a file: - statement: with_compound_statement: keyword: WITH common_table_expression: - naked_identifier: w - keyword: AS - keyword: MATERIALIZED - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: other_table end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: w alias_expression: alias_operator: keyword: AS naked_identifier: w1 join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: w alias_expression: alias_operator: keyword: AS naked_identifier: w2 join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: w1 - dot: . - naked_identifier: key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: w2 - dot: . - naked_identifier: ref where_clause: keyword: WHERE expression: column_reference: - naked_identifier: w2 - dot: . - naked_identifier: key comparison_operator: raw_comparison_operator: '=' numeric_literal: '123' - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: - naked_identifier: w - keyword: AS - keyword: NOT - keyword: MATERIALIZED - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: big_table end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: w alias_expression: alias_operator: keyword: AS naked_identifier: w1 join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: w alias_expression: alias_operator: keyword: AS naked_identifier: w2 join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: w1 - dot: . - naked_identifier: key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: w2 - dot: . - naked_identifier: ref where_clause: keyword: WHERE expression: column_reference: - naked_identifier: w2 - dot: . - naked_identifier: key comparison_operator: raw_comparison_operator: '=' numeric_literal: '123' - statement_terminator: ; - statement: with_compound_statement: - keyword: WITH - keyword: RECURSIVE - common_table_expression: - naked_identifier: search_tree - cte_column_list: bracketed: start_bracket: ( identifier_list: - naked_identifier: id - comma: ',' - naked_identifier: link - comma: ',' - naked_identifier: data end_bracket: ) - keyword: AS - bracketed: start_bracket: ( set_expression: - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: link - comma: ',' - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: data from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tree alias_expression: naked_identifier: t - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: link - comma: ',' - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: data from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tree alias_expression: naked_identifier: t - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: search_tree alias_expression: naked_identifier: st where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: st - dot: . - naked_identifier: link end_bracket: ) - keyword: SEARCH - keyword: DEPTH - keyword: FIRST - keyword: BY - column_reference: naked_identifier: id - keyword: SET - column_reference: naked_identifier: ordercol - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: search_tree orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: ordercol - statement_terminator: ; - statement: with_compound_statement: - keyword: WITH - keyword: RECURSIVE - common_table_expression: - naked_identifier: search_tree - cte_column_list: bracketed: start_bracket: ( identifier_list: - naked_identifier: id - comma: ',' - naked_identifier: link - comma: ',' - naked_identifier: data end_bracket: ) - keyword: AS - bracketed: start_bracket: ( set_expression: - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: link - comma: ',' - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: data from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tree alias_expression: naked_identifier: t - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: link - comma: ',' - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: data from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tree alias_expression: naked_identifier: t - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: search_tree alias_expression: naked_identifier: st where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: st - dot: . - naked_identifier: link end_bracket: ) - keyword: SEARCH - keyword: BREADTH - keyword: FIRST - keyword: BY - column_reference: naked_identifier: id - keyword: SET - column_reference: naked_identifier: ordercol - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: search_tree orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: ordercol - statement_terminator: ; - statement: with_compound_statement: - keyword: WITH - keyword: RECURSIVE - common_table_expression: - naked_identifier: search_graph - cte_column_list: bracketed: start_bracket: ( identifier_list: - naked_identifier: id - comma: ',' - naked_identifier: link - comma: ',' - naked_identifier: data - comma: ',' - naked_identifier: depth end_bracket: ) - keyword: AS - bracketed: start_bracket: ( set_expression: - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: g - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: g - dot: . - naked_identifier: link - comma: ',' - select_clause_element: column_reference: - naked_identifier: g - dot: . - naked_identifier: data - comma: ',' - select_clause_element: numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: graph alias_expression: naked_identifier: g - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: g - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: g - dot: . - naked_identifier: link - comma: ',' - select_clause_element: column_reference: - naked_identifier: g - dot: . - naked_identifier: data - comma: ',' - select_clause_element: expression: column_reference: - naked_identifier: sg - dot: . - naked_identifier: depth binary_operator: + numeric_literal: '1' from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: graph alias_expression: naked_identifier: g - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: search_graph alias_expression: naked_identifier: sg where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: g - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: sg - dot: . - naked_identifier: link end_bracket: ) - keyword: CYCLE - column_reference: naked_identifier: id - keyword: SET - column_reference: naked_identifier: is_cycle - keyword: USING - column_reference: naked_identifier: path - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: search_graph - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: tbl keyword: AS bracketed: start_bracket: ( insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: a - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) - keyword: RETURNING - star: '*' end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: tbl keyword: AS bracketed: start_bracket: ( update_statement: - keyword: UPDATE - table_reference: naked_identifier: a - set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: b comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' - keyword: RETURNING - star: '*' end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: tbl keyword: AS bracketed: start_bracket: ( delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: a - keyword: RETURNING - star: '*' end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/postgres/within_group.sql000066400000000000000000000002001503426445100250630ustar00rootroot00000000000000-- Postgres style WITHIN GROUP window functions SELECT ARRAY_AGG(o_orderkey) WITHIN GROUP (ORDER BY o_orderkey ASC) FROM orders sqlfluff-3.4.2/test/fixtures/dialects/postgres/within_group.yml000066400000000000000000000026251503426445100251020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 670133e38a4a4d1acd2b8a743c51545c422cb6cb968368bbc1a64b9110b9887c file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: ARRAY_AGG function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: o_orderkey end_bracket: ) withingroup_clause: - keyword: WITHIN - keyword: GROUP - bracketed: start_bracket: ( orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: o_orderkey - keyword: ASC end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders sqlfluff-3.4.2/test/fixtures/dialects/redshift/000077500000000000000000000000001503426445100215765ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/dialects/redshift/.sqlfluff000066400000000000000000000000361503426445100234200ustar00rootroot00000000000000[sqlfluff] dialect = redshift sqlfluff-3.4.2/test/fixtures/dialects/redshift/alter_datashare.sql000066400000000000000000000007221503426445100254430ustar00rootroot00000000000000ALTER DATASHARE salesshare SET PUBLICACCESSIBLE FALSE; ALTER DATASHARE salesshare SET INCLUDENEW = TRUE FOR SCHEMA public; ALTER DATASHARE salesshare ADD TABLE public.tbl1; ALTER DATASHARE salesshare ADD TABLE public.tbl1, public.tbl2; ALTER DATASHARE salesshare ADD SCHEMA public; ALTER DATASHARE salesshare ADD FUNCTION public.fn1, public.fn2; ALTER DATASHARE salesshare ADD ALL TABLES IN SCHEMA public; ALTER DATASHARE salesshare REMOVE TABLE public.tbl1; sqlfluff-3.4.2/test/fixtures/dialects/redshift/alter_datashare.yml000066400000000000000000000062161503426445100254510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2b1acf41db31b9f7890e2887ba06c4c546817087234a30c10f9474eb2df487d0 file: - statement: create_datashare_statement: - keyword: ALTER - keyword: DATASHARE - object_reference: naked_identifier: salesshare - keyword: SET - keyword: PUBLICACCESSIBLE - boolean_literal: 'FALSE' - statement_terminator: ; - statement: create_datashare_statement: - keyword: ALTER - keyword: DATASHARE - object_reference: naked_identifier: salesshare - keyword: SET - keyword: INCLUDENEW - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - keyword: FOR - keyword: SCHEMA - schema_reference: naked_identifier: public - statement_terminator: ; - statement: create_datashare_statement: - keyword: ALTER - keyword: DATASHARE - object_reference: naked_identifier: salesshare - keyword: ADD - keyword: TABLE - table_reference: - naked_identifier: public - dot: . - naked_identifier: tbl1 - statement_terminator: ; - statement: create_datashare_statement: - keyword: ALTER - keyword: DATASHARE - object_reference: naked_identifier: salesshare - keyword: ADD - keyword: TABLE - table_reference: - naked_identifier: public - dot: . - naked_identifier: tbl1 - comma: ',' - table_reference: - naked_identifier: public - dot: . - naked_identifier: tbl2 - statement_terminator: ; - statement: create_datashare_statement: - keyword: ALTER - keyword: DATASHARE - object_reference: naked_identifier: salesshare - keyword: ADD - keyword: SCHEMA - schema_reference: naked_identifier: public - statement_terminator: ; - statement: create_datashare_statement: - keyword: ALTER - keyword: DATASHARE - object_reference: naked_identifier: salesshare - keyword: ADD - keyword: FUNCTION - function_name: naked_identifier: public dot: . function_name_identifier: fn1 - comma: ',' - function_name: naked_identifier: public dot: . function_name_identifier: fn2 - statement_terminator: ; - statement: create_datashare_statement: - keyword: ALTER - keyword: DATASHARE - object_reference: naked_identifier: salesshare - keyword: ADD - keyword: ALL - keyword: TABLES - keyword: IN - keyword: SCHEMA - schema_reference: naked_identifier: public - statement_terminator: ; - statement: create_datashare_statement: - keyword: ALTER - keyword: DATASHARE - object_reference: naked_identifier: salesshare - keyword: REMOVE - keyword: TABLE - table_reference: - naked_identifier: public - dot: . - naked_identifier: tbl1 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/alter_default_privileges.sql000066400000000000000000000003341503426445100273630ustar00rootroot00000000000000ALTER DEFAULT PRIVILEGES FOR USER the_user IN SCHEMA the_schema GRANT EXECUTE ON PROCEDURES TO the_other_user; ALTER DEFAULT PRIVILEGES FOR USER the_user IN SCHEMA the_schema GRANT EXECUTE ON ROUTINES TO the_other_user; sqlfluff-3.4.2/test/fixtures/dialects/redshift/alter_default_privileges.yml000066400000000000000000000034641503426445100273740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f4eba93c6a11b233fad3114a515737c0e48856a894636163fe46be9f6c82c520 file: - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: USER - object_reference: naked_identifier: the_user - keyword: IN - keyword: SCHEMA - schema_reference: naked_identifier: the_schema - alter_default_privileges_grant: - keyword: GRANT - alter_default_privileges_object_privilege: keyword: EXECUTE - keyword: 'ON' - alter_default_privileges_schema_object: keyword: PROCEDURES - keyword: TO - alter_default_privileges_to_from_roles: role_reference: naked_identifier: the_other_user - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: USER - object_reference: naked_identifier: the_user - keyword: IN - keyword: SCHEMA - schema_reference: naked_identifier: the_schema - alter_default_privileges_grant: - keyword: GRANT - alter_default_privileges_object_privilege: keyword: EXECUTE - keyword: 'ON' - alter_default_privileges_schema_object: keyword: ROUTINES - keyword: TO - alter_default_privileges_to_from_roles: role_reference: naked_identifier: the_other_user - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/alter_group.sql000066400000000000000000000005161503426445100246440ustar00rootroot00000000000000alter group admin_group add user dwuser; alter group admin_group add user dwuser1, dwuser2; alter group admin_group drop user dwuser; alter group admin_group drop user dwuser1, dwuser2; alter group admin_group rename to administrators; alter group admin_group add user "test.user"; alter group "admin_group" add user "test.user"; sqlfluff-3.4.2/test/fixtures/dialects/redshift/alter_group.yml000066400000000000000000000045021503426445100246450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: dea9d3bfed3f28f59a598e1495b52b9a2051aa4609ca86239a161077c46b3f33 file: - statement: alter_group: - keyword: alter - keyword: group - object_reference: naked_identifier: admin_group - keyword: add - keyword: user - object_reference: naked_identifier: dwuser - statement_terminator: ; - statement: alter_group: - keyword: alter - keyword: group - object_reference: naked_identifier: admin_group - keyword: add - keyword: user - object_reference: naked_identifier: dwuser1 - comma: ',' - object_reference: naked_identifier: dwuser2 - statement_terminator: ; - statement: alter_group: - keyword: alter - keyword: group - object_reference: naked_identifier: admin_group - keyword: drop - keyword: user - object_reference: naked_identifier: dwuser - statement_terminator: ; - statement: alter_group: - keyword: alter - keyword: group - object_reference: naked_identifier: admin_group - keyword: drop - keyword: user - object_reference: naked_identifier: dwuser1 - comma: ',' - object_reference: naked_identifier: dwuser2 - statement_terminator: ; - statement: alter_group: - keyword: alter - keyword: group - object_reference: naked_identifier: admin_group - keyword: rename - keyword: to - object_reference: naked_identifier: administrators - statement_terminator: ; - statement: alter_group: - keyword: alter - keyword: group - object_reference: naked_identifier: admin_group - keyword: add - keyword: user - object_reference: quoted_identifier: '"test.user"' - statement_terminator: ; - statement: alter_group: - keyword: alter - keyword: group - object_reference: quoted_identifier: '"admin_group"' - keyword: add - keyword: user - object_reference: quoted_identifier: '"test.user"' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/alter_procedure.sql000066400000000000000000000004511503426445100254760ustar00rootroot00000000000000ALTER PROCEDURE first_quarter_revenue(volume INOUT bigint, at_price IN numeric, result OUT int) RENAME TO quarterly_revenue; ALTER PROCEDURE first_quarter_revenue(bigint, numeric) RENAME TO quarterly_revenue; ALTER PROCEDURE quarterly_revenue(volume bigint, at_price numeric) OWNER TO etl_user; sqlfluff-3.4.2/test/fixtures/dialects/redshift/alter_procedure.yml000066400000000000000000000042421503426445100255020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ca8018c5a2ffeed0b69802d6765871e436128425ebe00598e3ad114e71b9e227 file: - statement: alter_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - function_name: function_name_identifier: first_quarter_revenue - procedure_parameter_list: bracketed: - start_bracket: ( - parameter: volume - keyword: INOUT - data_type: keyword: bigint - comma: ',' - parameter: at_price - keyword: IN - data_type: keyword: numeric - comma: ',' - parameter: result - keyword: OUT - data_type: keyword: int - end_bracket: ) - keyword: RENAME - keyword: TO - function_name: function_name_identifier: quarterly_revenue - statement_terminator: ; - statement: alter_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - function_name: function_name_identifier: first_quarter_revenue - procedure_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: bigint - comma: ',' - data_type: keyword: numeric - end_bracket: ) - keyword: RENAME - keyword: TO - function_name: function_name_identifier: quarterly_revenue - statement_terminator: ; - statement: alter_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - function_name: function_name_identifier: quarterly_revenue - procedure_parameter_list: bracketed: - start_bracket: ( - parameter: volume - data_type: keyword: bigint - comma: ',' - parameter: at_price - data_type: keyword: numeric - end_bracket: ) - keyword: OWNER - keyword: TO - parameter: etl_user - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/alter_schema.sql000066400000000000000000000002311503426445100247420ustar00rootroot00000000000000ALTER SCHEMA schema1 RENAME TO schema2; ALTER SCHEMA schema1 OWNER TO new_owner; ALTER SCHEMA schema1 QUOTA 50 GB; ALTER SCHEMA schema1 QUOTA UNLIMITED; sqlfluff-3.4.2/test/fixtures/dialects/redshift/alter_schema.yml000066400000000000000000000025361503426445100247560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fd0a3f56eabf13406394006718db8bdeb0d09fe9ad3bfdbb476619984fe809b8 file: - statement: alter_schema_statement: - keyword: ALTER - keyword: SCHEMA - schema_reference: naked_identifier: schema1 - keyword: RENAME - keyword: TO - schema_reference: naked_identifier: schema2 - statement_terminator: ; - statement: alter_schema_statement: - keyword: ALTER - keyword: SCHEMA - schema_reference: naked_identifier: schema1 - keyword: OWNER - keyword: TO - role_reference: naked_identifier: new_owner - statement_terminator: ; - statement: alter_schema_statement: - keyword: ALTER - keyword: SCHEMA - schema_reference: naked_identifier: schema1 - keyword: QUOTA - numeric_literal: '50' - keyword: GB - statement_terminator: ; - statement: alter_schema_statement: - keyword: ALTER - keyword: SCHEMA - schema_reference: naked_identifier: schema1 - keyword: QUOTA - keyword: UNLIMITED - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/alter_table.sql000066400000000000000000000030561503426445100246010ustar00rootroot00000000000000ALTER TABLE example_table ADD CONSTRAINT example_name PRIMARY KEY (example_sk); alter table users rename to users_bkup; alter table venue owner to dwuser; alter table vdate owner to vuser; alter table venue rename column venueseats to venuesize; alter table category drop constraint category_pkey; alter table event alter column eventname type varchar(300); create table t1(c0 int encode lzo, c1 bigint encode zstd, c2 varchar(16) encode lzo, c3 varchar(32) encode zstd); alter table t1 alter column c0 encode az64; alter table t1 alter column c1 encode az64; alter table t1 alter column c2 encode bytedict; alter table t1 alter column c3 encode runlength; alter table inventory alter diststyle key distkey inv_warehouse_sk; alter table inventory alter distkey inv_item_sk; alter table inventory alter diststyle all; alter table t1 alter sortkey(c0, c1); alter table t1 alter sortkey none; alter table t1 alter sortkey(c0, c1); alter table t1 alter encode auto; alter table t2 alter column c0 encode lzo; ALTER TABLE the_schema.the_table ADD COLUMN the_timestamp TIMESTAMP; ALTER TABLE the_schema.the_table ADD COLUMN the_boolean BOOLEAN DEFAULT FALSE; alter table users add column feedback_score int default NULL; alter table users drop column feedback_score; alter table users drop column feedback_score cascade; ALTER TABLE the_schema.the_table APPEND FROM the_schema.the_temp_table IGNOREEXTRA FILLTARGET; ALTER TABLE the_schema.the_table APPEND FROM the_schema.the_temp_table; ALTER TABLE the_schema.the_table SET LOCATION 's3://bucket/folder/'; sqlfluff-3.4.2/test/fixtures/dialects/redshift/alter_table.yml000066400000000000000000000265471503426445100246150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: de2269d68c7f114d0429b086bb24aaa20fa9bacbf4497d5291eb34615dc88365 file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: example_table - alter_table_action_segment: keyword: ADD table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: example_name - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: example_sk end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: users - alter_table_action_segment: - keyword: rename - keyword: to - parameter: users_bkup - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: venue - alter_table_action_segment: - keyword: owner - keyword: to - parameter: dwuser - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: vdate - alter_table_action_segment: - keyword: owner - keyword: to - parameter: vuser - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: venue - keyword: rename - keyword: column - column_reference: naked_identifier: venueseats - keyword: to - column_reference: naked_identifier: venuesize - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: category - alter_table_action_segment: - keyword: drop - keyword: constraint - parameter: category_pkey - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: event - alter_table_action_segment: - keyword: alter - keyword: column - column_reference: naked_identifier: eventname - keyword: type - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '300' end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: c0 - data_type: keyword: int - column_attribute_segment: - keyword: encode - keyword: lzo - comma: ',' - column_reference: naked_identifier: c1 - data_type: keyword: bigint - column_attribute_segment: - keyword: encode - keyword: zstd - comma: ',' - column_reference: naked_identifier: c2 - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '16' end_bracket: ) - column_attribute_segment: - keyword: encode - keyword: lzo - comma: ',' - column_reference: naked_identifier: c3 - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '32' end_bracket: ) - column_attribute_segment: - keyword: encode - keyword: zstd - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: t1 - alter_table_action_segment: - keyword: alter - keyword: column - column_reference: naked_identifier: c0 - keyword: encode - keyword: az64 - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: t1 - alter_table_action_segment: - keyword: alter - keyword: column - column_reference: naked_identifier: c1 - keyword: encode - keyword: az64 - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: t1 - alter_table_action_segment: - keyword: alter - keyword: column - column_reference: naked_identifier: c2 - keyword: encode - keyword: bytedict - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: t1 - alter_table_action_segment: - keyword: alter - keyword: column - column_reference: naked_identifier: c3 - keyword: encode - keyword: runlength - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: inventory - alter_table_action_segment: - keyword: alter - keyword: diststyle - keyword: key - keyword: distkey - column_reference: naked_identifier: inv_warehouse_sk - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: inventory - alter_table_action_segment: - keyword: alter - keyword: distkey - column_reference: naked_identifier: inv_item_sk - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: inventory - alter_table_action_segment: - keyword: alter - keyword: diststyle - keyword: all - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: t1 - alter_table_action_segment: - keyword: alter - keyword: sortkey - bracketed: - start_bracket: ( - column_reference: naked_identifier: c0 - comma: ',' - column_reference: naked_identifier: c1 - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: t1 - alter_table_action_segment: - keyword: alter - keyword: sortkey - keyword: none - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: t1 - alter_table_action_segment: - keyword: alter - keyword: sortkey - bracketed: - start_bracket: ( - column_reference: naked_identifier: c0 - comma: ',' - column_reference: naked_identifier: c1 - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: t1 - alter_table_action_segment: - keyword: alter - keyword: encode - keyword: auto - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: t2 - alter_table_action_segment: - keyword: alter - keyword: column - column_reference: naked_identifier: c0 - keyword: encode - keyword: lzo - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: the_schema - dot: . - naked_identifier: the_table - alter_table_action_segment: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: the_timestamp - data_type: datetime_type_identifier: keyword: TIMESTAMP - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: the_schema - dot: . - naked_identifier: the_table - alter_table_action_segment: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: the_boolean - data_type: keyword: BOOLEAN - keyword: DEFAULT - expression: boolean_literal: 'FALSE' - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: users - alter_table_action_segment: - keyword: add - keyword: column - column_reference: naked_identifier: feedback_score - data_type: keyword: int - keyword: default - expression: null_literal: 'NULL' - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: users - alter_table_action_segment: - keyword: drop - keyword: column - column_reference: naked_identifier: feedback_score - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: users - alter_table_action_segment: - keyword: drop - keyword: column - column_reference: naked_identifier: feedback_score - keyword: cascade - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: the_schema - dot: . - naked_identifier: the_table - alter_table_action_segment: - keyword: APPEND - keyword: FROM - table_reference: - naked_identifier: the_schema - dot: . - naked_identifier: the_temp_table - keyword: IGNOREEXTRA - keyword: FILLTARGET - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: the_schema - dot: . - naked_identifier: the_table - alter_table_action_segment: - keyword: APPEND - keyword: FROM - table_reference: - naked_identifier: the_schema - dot: . - naked_identifier: the_temp_table - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: the_schema - dot: . - naked_identifier: the_table - alter_table_action_segment: - keyword: SET - keyword: LOCATION - quoted_literal: "'s3://bucket/folder/'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/alter_user.sql000066400000000000000000000032531503426445100244670ustar00rootroot00000000000000alter user admin createdb; alter user admin with createdb; alter user admin nocreatedb; alter user admin with nocreatedb; alter user "dbuser" reset var; alter user "dbuser" with reset var; alter user admin createuser; alter user admin with createuser; alter user admin nocreateuser; alter user admin with nocreateuser; alter user admin syslog access restricted; alter user admin with syslog access restricted; alter user admin syslog access unrestricted; alter user admin with syslog access unrestricted; alter user iam_superuser password 'mdA51234567890123456780123456789012'; alter user iam_superuser with password 'mdA51234567890123456780123456789012'; alter user iam_superuser password DISABLE; alter user iam_superuser with password DISABLE; alter user admin password 'adminPass9' valid until '2017-12-31 23:59'; alter user admin with password 'adminPass9' valid until '2017-12-31 23:59'; alter user admin rename to sysadmin; alter user admin with rename to sysadmin; alter user admin connection limit 10; alter user admin with connection limit 10; alter user admin connection limit unlimited; alter user admin with connection limit unlimited; alter user dbuser session timeout 300; alter user dbuser with session timeout 300; alter user dbuser reset session timeout; alter user dbuser with reset session timeout; alter user dbuser set var to 100; alter user dbuser with set var to 100; alter user dbuser set var = 'hi'; alter user dbuser with set var = 'hi'; alter user dbuser set var to default; alter user dbuser with set var to default; alter user dbuser set var = default; alter user dbuser with set var = default; alter user dbuser reset var; alter user dbuser with reset var; sqlfluff-3.4.2/test/fixtures/dialects/redshift/alter_user.yml000066400000000000000000000240721503426445100244730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 66d1f2a9a9ab34850220e4911cce2c553d39bf1d970baac751a52d9fe72b8e50 file: - statement: alter_role_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: admin - keyword: createdb - statement_terminator: ; - statement: alter_role_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: admin - keyword: with - keyword: createdb - statement_terminator: ; - statement: alter_role_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: admin - keyword: nocreatedb - statement_terminator: ; - statement: alter_role_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: admin - keyword: with - keyword: nocreatedb - statement_terminator: ; - statement: alter_role_statement: - keyword: alter - keyword: user - role_reference: quoted_identifier: '"dbuser"' - keyword: reset - parameter: var - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: quoted_identifier: '"dbuser"' - keyword: with - keyword: reset - object_reference: naked_identifier: var - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: admin - keyword: createuser - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: admin - keyword: with - keyword: createuser - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: admin - keyword: nocreateuser - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: admin - keyword: with - keyword: nocreateuser - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: admin - keyword: syslog - keyword: access - keyword: restricted - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: admin - keyword: with - keyword: syslog - keyword: access - keyword: restricted - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: admin - keyword: syslog - keyword: access - keyword: unrestricted - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: admin - keyword: with - keyword: syslog - keyword: access - keyword: unrestricted - statement_terminator: ; - statement: alter_role_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: iam_superuser - keyword: password - quoted_literal: "'mdA51234567890123456780123456789012'" - statement_terminator: ; - statement: alter_role_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: iam_superuser - keyword: with - keyword: password - quoted_literal: "'mdA51234567890123456780123456789012'" - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: iam_superuser - keyword: password - keyword: DISABLE - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: iam_superuser - keyword: with - keyword: password - keyword: DISABLE - statement_terminator: ; - statement: alter_role_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: admin - keyword: password - quoted_literal: "'adminPass9'" - keyword: valid - keyword: until - quoted_literal: "'2017-12-31 23:59'" - statement_terminator: ; - statement: alter_role_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: admin - keyword: with - keyword: password - quoted_literal: "'adminPass9'" - keyword: valid - keyword: until - quoted_literal: "'2017-12-31 23:59'" - statement_terminator: ; - statement: alter_role_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: admin - keyword: rename - keyword: to - role_reference: naked_identifier: sysadmin - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: admin - keyword: with - keyword: rename - keyword: to - object_reference: naked_identifier: sysadmin - statement_terminator: ; - statement: alter_role_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: admin - keyword: connection - keyword: limit - numeric_literal: '10' - statement_terminator: ; - statement: alter_role_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: admin - keyword: with - keyword: connection - keyword: limit - numeric_literal: '10' - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: admin - keyword: connection - keyword: limit - keyword: unlimited - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: admin - keyword: with - keyword: connection - keyword: limit - keyword: unlimited - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: dbuser - keyword: session - keyword: timeout - numeric_literal: '300' - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: dbuser - keyword: with - keyword: session - keyword: timeout - numeric_literal: '300' - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: dbuser - keyword: reset - keyword: session - keyword: timeout - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: dbuser - keyword: with - keyword: reset - keyword: session - keyword: timeout - statement_terminator: ; - statement: alter_role_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: dbuser - keyword: set - parameter: var - keyword: to - numeric_literal: '100' - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: dbuser - keyword: with - keyword: set - object_reference: naked_identifier: var - keyword: to - numeric_literal: '100' - statement_terminator: ; - statement: alter_role_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: dbuser - keyword: set - parameter: var - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'hi'" - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: dbuser - keyword: with - keyword: set - object_reference: naked_identifier: var - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'hi'" - statement_terminator: ; - statement: alter_role_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: dbuser - keyword: set - parameter: var - keyword: to - keyword: default - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: dbuser - keyword: with - keyword: set - object_reference: naked_identifier: var - keyword: to - keyword: default - statement_terminator: ; - statement: alter_role_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: dbuser - keyword: set - parameter: var - comparison_operator: raw_comparison_operator: '=' - keyword: default - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: dbuser - keyword: with - keyword: set - object_reference: naked_identifier: var - comparison_operator: raw_comparison_operator: '=' - keyword: default - statement_terminator: ; - statement: alter_role_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: dbuser - keyword: reset - parameter: var - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: dbuser - keyword: with - keyword: reset - object_reference: naked_identifier: var - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/analyze_compression.sql000066400000000000000000000002311503426445100263770ustar00rootroot00000000000000analyze compression; analyze compression listing; analyse compression sales(qtysold, commission, saletime); analyse compression sales comprows 10000; sqlfluff-3.4.2/test/fixtures/dialects/redshift/analyze_compression.yml000066400000000000000000000026231503426445100264100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d0afed803f6f284e6ce947638cc96551cfb53fd8f8506989cf0f815f91da0ce5 file: - statement: analyze_statement: keyword: analyze table_reference: naked_identifier: compression - statement_terminator: ; - statement: analyze_compression_statement: - keyword: analyze - keyword: compression - table_reference: naked_identifier: listing - statement_terminator: ; - statement: analyze_compression_statement: - keyword: analyse - keyword: compression - table_reference: naked_identifier: sales - bracketed: - start_bracket: ( - column_reference: naked_identifier: qtysold - comma: ',' - column_reference: naked_identifier: commission - comma: ',' - column_reference: naked_identifier: saletime - end_bracket: ) - statement_terminator: ; - statement: analyze_compression_statement: - keyword: analyse - keyword: compression - table_reference: naked_identifier: sales - keyword: comprows - numeric_literal: '10000' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/approximate_functions.sql000066400000000000000000000005351503426445100267430ustar00rootroot00000000000000select top 10 date.caldate, count(totalprice), sum(totalprice), approximate percentile_disc(0.5) within group (order by totalprice) from listing join date on listing.dateid = date.dateid group by date.caldate; select approximate count(distinct pricepaid) from sales; select count(distinct pricepaid) from sales; select approximate(foo) from bar; sqlfluff-3.4.2/test/fixtures/dialects/redshift/approximate_functions.yml000066400000000000000000000123411503426445100267430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 99c58441cd45d86be2cfaed552cbe8bfacaa56086500955319aec6d3c2ca9214 file: - statement: select_statement: select_clause: - keyword: select - select_clause_modifier: keyword: top numeric_literal: '10' - select_clause_element: column_reference: - naked_identifier: date - dot: . - naked_identifier: caldate - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: totalprice end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: totalprice end_bracket: ) - comma: ',' - select_clause_element: function: keyword: approximate function_name: function_name_identifier: percentile_disc function_contents: bracketed: start_bracket: ( expression: numeric_literal: '0.5' end_bracket: ) withingroup_clause: - keyword: within - keyword: group - bracketed: start_bracket: ( orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: totalprice end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: listing join_clause: keyword: join from_expression_element: table_expression: table_reference: naked_identifier: date join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: listing - dot: . - naked_identifier: dateid - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: date - dot: . - naked_identifier: dateid groupby_clause: - keyword: group - keyword: by - column_reference: - naked_identifier: date - dot: . - naked_identifier: caldate - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: keyword: approximate function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( keyword: distinct expression: column_reference: naked_identifier: pricepaid end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sales - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( keyword: distinct expression: column_reference: naked_identifier: pricepaid end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sales - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: approximate function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/array_unnest.sql000066400000000000000000000014311503426445100250300ustar00rootroot00000000000000WITH example_data AS ( SELECT 10 AS shop_id , json_parse('[1, 2]') AS inventory UNION ALL SELECT 20 AS shop_id , json_parse('[3, 4, 5]') AS inventory UNION ALL SELECT 30 AS shop_id , json_parse('[6, 7, 8, 9]') AS inventory ) SELECT shop_id , value , index FROM example_data ed, ed.inventory AS value AT index; SELECT c_name, orders.o_orderkey AS orderkey, index AS orderkey_index FROM customer_orders_lineitem c, c.c_orders AS orders AT index ORDER BY orderkey_index; -- can extract the correlated values from multiple arrays using the index variable SELECT value_a::BIGINT, array_b[idx]::VARCHAR AS value_b, array_c[MOD(idx, 3) + 1]::FLOAT8 AS value_c FROM mytable t, t.array_a AS value_a AT idx; sqlfluff-3.4.2/test/fixtures/dialects/redshift/array_unnest.yml000066400000000000000000000215451503426445100250420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e093f4f5e0c82351e8b64bb61bb4b4291c51f04d1d195f0a4981eaaa7d2eeaf8 file: - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: example_data keyword: AS bracketed: start_bracket: ( set_expression: - select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '10' alias_expression: alias_operator: keyword: AS naked_identifier: shop_id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: json_parse function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'[1, 2]'" end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: inventory - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '20' alias_expression: alias_operator: keyword: AS naked_identifier: shop_id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: json_parse function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'[3, 4, 5]'" end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: inventory - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '30' alias_expression: alias_operator: keyword: AS naked_identifier: shop_id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: json_parse function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'[6, 7, 8, 9]'" end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: inventory end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: shop_id - comma: ',' - select_clause_element: column_reference: naked_identifier: value - comma: ',' - select_clause_element: column_reference: naked_identifier: index from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: example_data alias_expression: naked_identifier: ed - comma: ',' - from_expression: from_expression_element: table_expression: array_unnesting: - object_reference: - naked_identifier: ed - dot: . - naked_identifier: inventory - keyword: AS - naked_identifier: value - keyword: AT - naked_identifier: index - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: c_name - comma: ',' - select_clause_element: column_reference: - naked_identifier: orders - dot: . - naked_identifier: o_orderkey alias_expression: alias_operator: keyword: AS naked_identifier: orderkey - comma: ',' - select_clause_element: column_reference: naked_identifier: index alias_expression: alias_operator: keyword: AS naked_identifier: orderkey_index from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: customer_orders_lineitem alias_expression: naked_identifier: c - comma: ',' - from_expression: from_expression_element: table_expression: array_unnesting: - object_reference: - naked_identifier: c - dot: . - naked_identifier: c_orders - keyword: AS - naked_identifier: orders - keyword: AT - naked_identifier: index orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: orderkey_index - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: cast_expression: column_reference: naked_identifier: value_a casting_operator: '::' data_type: keyword: BIGINT - comma: ',' - select_clause_element: expression: cast_expression: column_reference: naked_identifier: array_b array_accessor: start_square_bracket: '[' expression: column_reference: naked_identifier: idx end_square_bracket: ']' casting_operator: '::' data_type: keyword: VARCHAR alias_expression: alias_operator: keyword: AS naked_identifier: value_b - comma: ',' - select_clause_element: expression: cast_expression: column_reference: naked_identifier: array_c array_accessor: start_square_bracket: '[' expression: function: function_name: function_name_identifier: MOD function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: idx - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) binary_operator: + numeric_literal: '1' end_square_bracket: ']' casting_operator: '::' data_type: keyword: FLOAT8 alias_expression: alias_operator: keyword: AS naked_identifier: value_c from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable alias_expression: naked_identifier: t - comma: ',' - from_expression: from_expression_element: table_expression: array_unnesting: - object_reference: - naked_identifier: t - dot: . - naked_identifier: array_a - keyword: AS - naked_identifier: value_a - keyword: AT - naked_identifier: idx - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/attach_rls_policy.sql000066400000000000000000000002331503426445100260200ustar00rootroot00000000000000ATTACH RLS POLICY policy_concerts ON tickit_category_redshift TO ROLE analyst, ROLE dbadmin; ATTACH RLS POLICY policy_name ON TABLE table_name TO PUBLIC; sqlfluff-3.4.2/test/fixtures/dialects/redshift/attach_rls_policy.yml000066400000000000000000000023371503426445100260310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 132626be2d39562d6e274d1fc81e4129e591519f73f638a62e5cfb1d6bb87a15 file: - statement: manage_rls_policy_statement: - keyword: ATTACH - keyword: RLS - keyword: POLICY - object_reference: naked_identifier: policy_concerts - keyword: 'ON' - table_reference: naked_identifier: tickit_category_redshift - keyword: TO - keyword: ROLE - role_reference: naked_identifier: analyst - comma: ',' - keyword: ROLE - role_reference: naked_identifier: dbadmin - statement_terminator: ; - statement: manage_rls_policy_statement: - keyword: ATTACH - keyword: RLS - keyword: POLICY - object_reference: naked_identifier: policy_name - keyword: 'ON' - keyword: TABLE - table_reference: naked_identifier: table_name - keyword: TO - role_reference: naked_identifier: PUBLIC - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/call.sql000066400000000000000000000002001503426445100232220ustar00rootroot00000000000000CALL test_proc(); CALL test_proc(pg_last_query_id()); CALL outer_proc(5); call test_sp1(3,'book'); call test_sp2(2,'2019'); sqlfluff-3.4.2/test/fixtures/dialects/redshift/call.yml000066400000000000000000000044661503426445100232460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e9bd9e837749144da56d0437c753b8ee23bd8983fb5a80912de445473c7148f1 file: - statement: call_statement: keyword: CALL function: function_name: function_name_identifier: test_proc function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: call_statement: keyword: CALL function: function_name: function_name_identifier: test_proc function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: pg_last_query_id function_contents: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: call_statement: keyword: CALL function: function_name: function_name_identifier: outer_proc function_contents: bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: call_statement: keyword: call function: function_name: function_name_identifier: test_sp1 function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: quoted_literal: "'book'" - end_bracket: ) - statement_terminator: ; - statement: call_statement: keyword: call function: function_name: function_name_identifier: test_sp2 function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '2' - comma: ',' - expression: quoted_literal: "'2019'" - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/cast_conversion.sql000066400000000000000000000011161503426445100255150ustar00rootroot00000000000000select cast(col1 as integer) from tbl1; select convert(integer, col1) from tbl1; select col1::integer from tbl1; select cast(col1 as timestamptz) from tbl1; select convert(timestamptz, col1) from tbl1; select col1::timestamptz from tbl1; select cast(col1 as decimal(38, 2)) from tbl1; select convert(decimal(38, 2), col1) from tbl1; select col1::decimal(38, 2) from tbl1; select cast(col1 as interval) from tbl1; select convert(interval, col1) from tbl1; select col1::interval from tbl1; select cast(pg_namespace.nspname as information_schema.sql_identifier) from pg_namespace; sqlfluff-3.4.2/test/fixtures/dialects/redshift/cast_conversion.yml000066400000000000000000000242411503426445100255230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 926a4ef4cfeaf9d86cd3f01610698554d82783517b5c4f36eb6a7049d34d8b3d file: - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: cast function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: col1 keyword: as data_type: keyword: integer end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: keyword: convert function_contents: bracketed: start_bracket: ( data_type: keyword: integer comma: ',' expression: column_reference: naked_identifier: col1 end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: cast_expression: column_reference: naked_identifier: col1 casting_operator: '::' data_type: keyword: integer from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: cast function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: col1 keyword: as data_type: datetime_type_identifier: keyword: timestamptz end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: keyword: convert function_contents: bracketed: start_bracket: ( data_type: datetime_type_identifier: keyword: timestamptz comma: ',' expression: column_reference: naked_identifier: col1 end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: cast_expression: column_reference: naked_identifier: col1 casting_operator: '::' data_type: datetime_type_identifier: keyword: timestamptz from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: cast function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: col1 keyword: as data_type: keyword: decimal bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '38' - comma: ',' - numeric_literal: '2' - end_bracket: ) end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: keyword: convert function_contents: bracketed: start_bracket: ( data_type: keyword: decimal bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '38' - comma: ',' - numeric_literal: '2' - end_bracket: ) comma: ',' expression: column_reference: naked_identifier: col1 end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: cast_expression: column_reference: naked_identifier: col1 casting_operator: '::' data_type: keyword: decimal bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '38' - comma: ',' - numeric_literal: '2' - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: cast function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: col1 keyword: as data_type: keyword: interval end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: keyword: convert function_contents: bracketed: start_bracket: ( data_type: keyword: interval comma: ',' expression: column_reference: naked_identifier: col1 end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: cast_expression: column_reference: naked_identifier: col1 casting_operator: '::' data_type: keyword: interval from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: cast function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: pg_namespace - dot: . - naked_identifier: nspname keyword: as data_type: naked_identifier: information_schema dot: . data_type_identifier: sql_identifier end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: pg_namespace - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/cast_with_whitespaces.sql000066400000000000000000000021601503426445100267020ustar00rootroot00000000000000-- redshift_cast_with_whitespaces.sql /* Several valid queries where there is whitespace surrounding the Redshift cast operator (::) */ -- query from https://github.com/sqlfluff/sqlfluff/issues/2720 SELECT amount_of_honey :: FLOAT FROM bear_inventory; -- should be able to support an arbitrary amount of whitespace SELECT amount_of_honey :: FLOAT FROM bear_inventory; SELECT amount_of_honey:: FLOAT FROM bear_inventory; SELECT amount_of_honey ::FLOAT FROM bear_inventory; -- should support a wide variety of typecasts SELECT amount_of_honey :: time FROM bear_inventory; SELECT amount_of_honey :: text FROM bear_inventory; SELECT amount_of_honey :: VARCHAR( 512 ) FROM bear_inventory; SELECT amount_of_honey :: TIMESTAMPTZ FROM bear_inventory; SELECT amount_of_honey :: TIMESTAMP WITHOUT TIME ZONE FROM bear_inventory; -- should support casts with arbitrary amount of whitespace in join statements SELECT bi.amount_of_honey FROM bear_inventory bi LEFT JOIN favorite_cola fc ON fc.bear_id :: VARCHAR(512) = bi.bear_id ::VARCHAR(512) WHERE fc.favorite_cola = 'RC Cola'; sqlfluff-3.4.2/test/fixtures/dialects/redshift/cast_with_whitespaces.yml000066400000000000000000000201111503426445100267000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9ffe4672dbafb711fcec2fbb99a631e55248b94fef39b807338c7544180ff1bc file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: keyword: FLOAT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: keyword: FLOAT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: keyword: FLOAT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: keyword: FLOAT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: datetime_type_identifier: keyword: time from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: keyword: text from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '512' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: datetime_type_identifier: keyword: TIMESTAMPTZ from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: datetime_type_identifier: - keyword: TIMESTAMP - keyword: WITHOUT - keyword: TIME - keyword: ZONE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: bi - dot: . - naked_identifier: amount_of_honey from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory alias_expression: naked_identifier: bi join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: favorite_cola alias_expression: naked_identifier: fc - join_on_condition: keyword: 'ON' expression: - cast_expression: column_reference: - naked_identifier: fc - dot: . - naked_identifier: bear_id casting_operator: '::' data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '512' end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - cast_expression: column_reference: - naked_identifier: bi - dot: . - naked_identifier: bear_id casting_operator: '::' data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '512' end_bracket: ) where_clause: keyword: WHERE expression: column_reference: - naked_identifier: fc - dot: . - naked_identifier: favorite_cola comparison_operator: raw_comparison_operator: '=' quoted_literal: "'RC Cola'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/close.sql000066400000000000000000000000151503426445100234200ustar00rootroot00000000000000close curs1; sqlfluff-3.4.2/test/fixtures/dialects/redshift/close.yml000066400000000000000000000010231503426445100234220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ea4035c9363b77eb1d272c3d92125790f2a32576a1bca03b66ebe37f00e7653e file: statement: close_statement: keyword: close object_reference: naked_identifier: curs1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/copy.sql000066400000000000000000000064431503426445100233000ustar00rootroot00000000000000-- Retrieved from https://docs.aws.amazon.com/redshift/latest/dg/r_COPY_command_examples.html copy favoritemovies from 'dynamodb://Movies' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' readratio 50; copy listing from 's3://mybucket/data/listing/' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole'; copy sales from 'emr://j-SAMPLE2B500FC/myoutput/part-*' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' delimiter '\t' lzop; copy sales from 'emr://j-SAMPLE2B500FC/myoutput/json/' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' JSON 's3://mybucket/jsonpaths.txt'; copy category from 's3://mybucket/custdata' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole'; copy customer from 's3://mybucket/cust.manifest' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' manifest; copy listing from 's3://mybucket/data/listings/parquet/' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' format as parquet; copy event from 's3://mybucket/data/allevents_pipe.txt' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' removequotes emptyasnull blanksasnull maxerror 5 delimiter '|' timeformat 'YYYY-MM-DD HH:MI:SS'; copy venue from 's3://mybucket/data/venue_fw.txt' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' fixedwidth 'venueid:3,venuename:25,venuecity:12,venuestate:2,venueseats:6'; copy category from 's3://mybucket/data/category_csv.txt' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' csv; copy category from 's3://mybucket/data/category_csv.txt' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' csv quote as '%'; copy venue from 's3://mybucket/data/venue.txt' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' explicit_ids; copy time from 's3://mybucket/data/timerows.gz' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' gzip delimiter '|'; copy timestamp1 from 's3://mybucket/data/time.txt' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' timeformat 'YYYY-MM-DD HH:MI:SS'; copy venue_new(venueid, venuename, venuecity, venuestate) from 's3://mybucket/data/venue_noseats.txt' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' delimiter '|'; copy redshiftinfo from 's3://mybucket/data/redshiftinfo.txt' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' delimiter '|' escape; copy category from 's3://mybucket/category_object_auto.json' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' json 'auto'; copy category from 's3://mybucket/category_auto-ignorecase.avro' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' format as avro 'auto ignorecase'; COPY norway_natural FROM 's3://bucket_name/shapefiles/norway/gis_osm_natural_free_1.shp' FORMAT SHAPEFILE CREDENTIALS 'aws_iam_role=arn:aws:iam::123456789012:role/MyRoleName'; COPY norway_water FROM 's3://bucket_name/shapefiles/norway/gis_osm_water_a_free_1.shp' FORMAT SHAPEFILE SIMPLIFY AUTO 1.1E-05 MAXERROR 2 CREDENTIALS 'aws_iam_role=arn:aws:iam::123456789012:role/MyRoleName'; COPY norway_natural_order(wkb_geometry, osm_id, code, fclass, name) FROM 's3://bucket_name/shapefiles/norway/gis_osm_natural_free_1.shp' FORMAT SHAPEFILE CREDENTIALS 'aws_iam_role=arn:aws:iam::123456789012:role/MyRoleName'; COPY some_table FROM 's3://some_bucket/some/path' IAM_ROLE 'some_iam_role' FORMAT AS CSV TRUNCATECOLUMNS IGNOREHEADER 1 ACCEPTINVCHARS ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/copy.yml000066400000000000000000000252511503426445100233000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a189ac9b55ee6d03fec76f09b113dd37bd80d8ba3cd7f2a36358ab7377b94429 file: - statement: copy_statement: - keyword: copy - table_reference: naked_identifier: favoritemovies - keyword: from - quoted_literal: "'dynamodb://Movies'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: readratio - numeric_literal: '50' - statement_terminator: ; - statement: copy_statement: - keyword: copy - table_reference: naked_identifier: listing - keyword: from - quoted_literal: "'s3://mybucket/data/listing/'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - statement_terminator: ; - statement: copy_statement: - keyword: copy - table_reference: naked_identifier: sales - keyword: from - quoted_literal: "'emr://j-SAMPLE2B500FC/myoutput/part-*'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: delimiter - quoted_literal: "'\\t'" - keyword: lzop - statement_terminator: ; - statement: copy_statement: - keyword: copy - table_reference: naked_identifier: sales - keyword: from - quoted_literal: "'emr://j-SAMPLE2B500FC/myoutput/json/'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - data_format_segment: keyword: JSON quoted_literal: "'s3://mybucket/jsonpaths.txt'" - statement_terminator: ; - statement: copy_statement: - keyword: copy - table_reference: naked_identifier: category - keyword: from - quoted_literal: "'s3://mybucket/custdata'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - statement_terminator: ; - statement: copy_statement: - keyword: copy - table_reference: naked_identifier: customer - keyword: from - quoted_literal: "'s3://mybucket/cust.manifest'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: manifest - statement_terminator: ; - statement: copy_statement: - keyword: copy - table_reference: naked_identifier: listing - keyword: from - quoted_literal: "'s3://mybucket/data/listings/parquet/'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - data_format_segment: - keyword: format - keyword: as - keyword: parquet - statement_terminator: ; - statement: copy_statement: - keyword: copy - table_reference: naked_identifier: event - keyword: from - quoted_literal: "'s3://mybucket/data/allevents_pipe.txt'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: removequotes - keyword: emptyasnull - keyword: blanksasnull - keyword: maxerror - numeric_literal: '5' - keyword: delimiter - quoted_literal: "'|'" - keyword: timeformat - quoted_literal: "'YYYY-MM-DD HH:MI:SS'" - statement_terminator: ; - statement: copy_statement: - keyword: copy - table_reference: naked_identifier: venue - keyword: from - quoted_literal: "'s3://mybucket/data/venue_fw.txt'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: fixedwidth - quoted_literal: "'venueid:3,venuename:25,venuecity:12,venuestate:2,venueseats:6'" - statement_terminator: ; - statement: copy_statement: - keyword: copy - table_reference: naked_identifier: category - keyword: from - quoted_literal: "'s3://mybucket/data/category_csv.txt'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - data_format_segment: keyword: csv - statement_terminator: ; - statement: copy_statement: - keyword: copy - table_reference: naked_identifier: category - keyword: from - quoted_literal: "'s3://mybucket/data/category_csv.txt'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - data_format_segment: - keyword: csv - keyword: quote - keyword: as - quoted_literal: "'%'" - statement_terminator: ; - statement: copy_statement: - keyword: copy - table_reference: naked_identifier: venue - keyword: from - quoted_literal: "'s3://mybucket/data/venue.txt'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: explicit_ids - statement_terminator: ; - statement: copy_statement: - keyword: copy - table_reference: naked_identifier: time - keyword: from - quoted_literal: "'s3://mybucket/data/timerows.gz'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: gzip - keyword: delimiter - quoted_literal: "'|'" - statement_terminator: ; - statement: copy_statement: - keyword: copy - table_reference: naked_identifier: timestamp1 - keyword: from - quoted_literal: "'s3://mybucket/data/time.txt'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: timeformat - quoted_literal: "'YYYY-MM-DD HH:MI:SS'" - statement_terminator: ; - statement: copy_statement: - keyword: copy - table_reference: naked_identifier: venue_new - bracketed: - start_bracket: ( - column_reference: naked_identifier: venueid - comma: ',' - column_reference: naked_identifier: venuename - comma: ',' - column_reference: naked_identifier: venuecity - comma: ',' - column_reference: naked_identifier: venuestate - end_bracket: ) - keyword: from - quoted_literal: "'s3://mybucket/data/venue_noseats.txt'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: delimiter - quoted_literal: "'|'" - statement_terminator: ; - statement: copy_statement: - keyword: copy - table_reference: naked_identifier: redshiftinfo - keyword: from - quoted_literal: "'s3://mybucket/data/redshiftinfo.txt'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: delimiter - quoted_literal: "'|'" - keyword: escape - statement_terminator: ; - statement: copy_statement: - keyword: copy - table_reference: naked_identifier: category - keyword: from - quoted_literal: "'s3://mybucket/category_object_auto.json'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - data_format_segment: keyword: json quoted_literal: "'auto'" - statement_terminator: ; - statement: copy_statement: - keyword: copy - table_reference: naked_identifier: category - keyword: from - quoted_literal: "'s3://mybucket/category_auto-ignorecase.avro'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - data_format_segment: - keyword: format - keyword: as - keyword: avro - quoted_literal: "'auto ignorecase'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: norway_natural - keyword: FROM - quoted_literal: "'s3://bucket_name/shapefiles/norway/gis_osm_natural_free_1.shp'" - data_format_segment: - keyword: FORMAT - keyword: SHAPEFILE - authorization_segment: keyword: CREDENTIALS quoted_literal: "'aws_iam_role=arn:aws:iam::123456789012:role/MyRoleName'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: norway_water - keyword: FROM - quoted_literal: "'s3://bucket_name/shapefiles/norway/gis_osm_water_a_free_1.shp'" - data_format_segment: - keyword: FORMAT - keyword: SHAPEFILE - keyword: SIMPLIFY - keyword: AUTO - numeric_literal: '1.1E-05' - keyword: MAXERROR - numeric_literal: '2' - authorization_segment: keyword: CREDENTIALS quoted_literal: "'aws_iam_role=arn:aws:iam::123456789012:role/MyRoleName'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: norway_natural_order - bracketed: - start_bracket: ( - column_reference: naked_identifier: wkb_geometry - comma: ',' - column_reference: naked_identifier: osm_id - comma: ',' - column_reference: naked_identifier: code - comma: ',' - column_reference: naked_identifier: fclass - comma: ',' - column_reference: naked_identifier: name - end_bracket: ) - keyword: FROM - quoted_literal: "'s3://bucket_name/shapefiles/norway/gis_osm_natural_free_1.shp'" - data_format_segment: - keyword: FORMAT - keyword: SHAPEFILE - authorization_segment: keyword: CREDENTIALS quoted_literal: "'aws_iam_role=arn:aws:iam::123456789012:role/MyRoleName'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: some_table - keyword: FROM - quoted_literal: "'s3://some_bucket/some/path'" - authorization_segment: keyword: IAM_ROLE quoted_literal: "'some_iam_role'" - data_format_segment: - keyword: FORMAT - keyword: AS - keyword: CSV - keyword: TRUNCATECOLUMNS - keyword: IGNOREHEADER - numeric_literal: '1' - keyword: ACCEPTINVCHARS - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/create_datashare.sql000066400000000000000000000003001503426445100255670ustar00rootroot00000000000000CREATE DATASHARE salesshare; CREATE DATASHARE demoshare SET PUBLICACCESSIBLE = TRUE; CREATE DATASHARE demoshare PUBLICACCESSIBLE = FALSE; CREATE DATASHARE demoshare PUBLICACCESSIBLE FALSE; sqlfluff-3.4.2/test/fixtures/dialects/redshift/create_datashare.yml000066400000000000000000000026121503426445100256010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cdec9c9737e6edf34ea910601dd0cc0ee6a5568d3af81aa4361f92a84074eeeb file: - statement: create_datashare_statement: - keyword: CREATE - keyword: DATASHARE - object_reference: naked_identifier: salesshare - statement_terminator: ; - statement: create_datashare_statement: - keyword: CREATE - keyword: DATASHARE - object_reference: naked_identifier: demoshare - keyword: SET - keyword: PUBLICACCESSIBLE - comparison_operator: raw_comparison_operator: '=' - keyword: 'TRUE' - statement_terminator: ; - statement: create_datashare_statement: - keyword: CREATE - keyword: DATASHARE - object_reference: naked_identifier: demoshare - keyword: PUBLICACCESSIBLE - comparison_operator: raw_comparison_operator: '=' - keyword: 'FALSE' - statement_terminator: ; - statement: create_datashare_statement: - keyword: CREATE - keyword: DATASHARE - object_reference: naked_identifier: demoshare - keyword: PUBLICACCESSIBLE - keyword: 'FALSE' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/create_external_function.sql000066400000000000000000000007071503426445100273750ustar00rootroot00000000000000CREATE EXTERNAL FUNCTION exfunc_sum(INT,INT) RETURNS INT STABLE LAMBDA 'lambda_sum' IAM_ROLE 'arn:aws:iam::123456789012:role/Redshift-Exfunc-Test'; CREATE OR REPLACE EXTERNAL FUNCTION exfunc_upper() RETURNS varchar STABLE LAMBDA 'exfunc_sleep_3' IAM_ROLE 'arn:aws:iam::123456789012:role/Redshift-Exfunc-Test' RETRY_TIMEOUT 0; CREATE OR REPLACE EXTERNAL FUNCTION exfunc_foo(varchar) RETURNS int IMMUTABLE SAGEMAKER 'some_endpoint_name' IAM_ROLE default; sqlfluff-3.4.2/test/fixtures/dialects/redshift/create_external_function.yml000066400000000000000000000042561503426445100274020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d7ab0b2349a61c980c662f461b80832d9ce9a0053d5901f4781e5d10894d1b39 file: - statement: create_external_function_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: FUNCTION - function_name: function_name_identifier: exfunc_sum - bracketed: - start_bracket: ( - data_type: keyword: INT - comma: ',' - data_type: keyword: INT - end_bracket: ) - keyword: RETURNS - data_type: keyword: INT - keyword: STABLE - keyword: LAMBDA - quoted_literal: "'lambda_sum'" - keyword: IAM_ROLE - quoted_literal: "'arn:aws:iam::123456789012:role/Redshift-Exfunc-Test'" - statement_terminator: ; - statement: create_external_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: EXTERNAL - keyword: FUNCTION - function_name: function_name_identifier: exfunc_upper - bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - data_type: keyword: varchar - keyword: STABLE - keyword: LAMBDA - quoted_literal: "'exfunc_sleep_3'" - keyword: IAM_ROLE - quoted_literal: "'arn:aws:iam::123456789012:role/Redshift-Exfunc-Test'" - keyword: RETRY_TIMEOUT - numeric_literal: '0' - statement_terminator: ; - statement: create_external_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: EXTERNAL - keyword: FUNCTION - function_name: function_name_identifier: exfunc_foo - bracketed: start_bracket: ( data_type: keyword: varchar end_bracket: ) - keyword: RETURNS - data_type: keyword: int - keyword: IMMUTABLE - keyword: SAGEMAKER - quoted_literal: "'some_endpoint_name'" - keyword: IAM_ROLE - keyword: default - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/create_external_schema.sql000066400000000000000000000051201503426445100270020ustar00rootroot00000000000000create external schema spectrum_schema from data catalog database 'sampledb' region 'us-west-2' iam_role 'arn:aws:iam::123456789012:role/MySpectrumRole'; create external schema spectrum_schema from data catalog database 'spectrum_db' iam_role 'arn:aws:iam::123456789012:role/MySpectrumRole' create external database if not exists; create external schema hive_schema from hive metastore database 'hive_db' uri '172.10.10.10' port 99 iam_role 'arn:aws:iam::123456789012:role/MySpectrumRole'; create external schema spectrum_schema from data catalog database 'spectrum_db' iam_role 'arn:aws:iam::123456789012:role/myRedshiftRole,arn:aws:iam::123456789012:role/myS3Role' catalog_role 'arn:aws:iam::123456789012:role/myAthenaRole' create external database if not exists; CREATE EXTERNAL SCHEMA IF NOT EXISTS myRedshiftSchema FROM POSTGRES DATABASE 'my_aurora_db' SCHEMA 'my_aurora_schema' URI 'endpoint to aurora hostname' PORT 5432 IAM_ROLE 'arn:aws:iam::123456789012:role/MyAuroraRole' SECRET_ARN 'arn:aws:secretsmanager:us-east-2:123456789012:secret:development/MyTestDatabase-AbCdEf'; CREATE EXTERNAL SCHEMA sales_schema FROM REDSHIFT DATABASE 'sales_db' SCHEMA 'public'; CREATE EXTERNAL SCHEMA IF NOT EXISTS myRedshiftSchema FROM MYSQL DATABASE 'my_aurora_db' URI 'endpoint to aurora hostname' IAM_ROLE 'arn:aws:iam::123456789012:role/MyAuroraRole' SECRET_ARN 'arn:aws:secretsmanager:us-east-2:123456789012:secret:development/MyTestDatabase-AbCdEf'; create external schema spectrum_schema from data catalog database 'sampledb' region 'us-west-2' iam_role 'arn:aws:iam::123456789012:role/MySpectrumRole'; create external schema my_schema from kafka iam_role 'arn:aws:iam::012345678901:role/my_role' authentication iam uri 'b-1.myTestCluster.123z8u.c2.kafka.us-west-1.amazonaws.com:9098,b-2.myTestCluster.123z8u.c2.kafka.us-west-1.amazonaws.com:9098'; create external schema my_schema from kafka authentication none uri 'b-1.myTestCluster.123z8u.c2.kafka.us-west-1.amazonaws.com:9092,b-2.myTestCluster.123z8u.c2.kafka.us-west-1.amazonaws.com:9092'; create external schema my_schema from kafka iam_role 'arn:aws:iam::012345678901:role/my_role' authentication mtls uri 'lkc-2v531.domz6wj0p.us-west-1.aws.confluent.cloud:9092' authentication_arn 'arn:aws:acm:region:444455556666:certificate/certificate_ID'; create external schema my_schema from kafka iam_role 'arn:aws:iam::012345678901:role/my_role' authentication mtls uri 'b-1.myTestCluster.123z8u.c2.kafka.us-west-1.amazonaws.com:9094,b-2.myTestCluster.123z8u.c2.kafka.us-west-1.amazonaws.com:9094' secret_arn 'arn:aws:secretsmanager:us-east-1:012345678910:secret:myMTLSSecret'; sqlfluff-3.4.2/test/fixtures/dialects/redshift/create_external_schema.yml000066400000000000000000000162121503426445100270100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b3d5fda65dae918fce7c5488005253c63e3b356c8a2dbd727f5be5817f17cbd9 file: - statement: create_external_schema_statement: - keyword: create - keyword: external - keyword: schema - schema_reference: naked_identifier: spectrum_schema - keyword: from - keyword: data - keyword: catalog - keyword: database - quoted_literal: "'sampledb'" - keyword: region - quoted_literal: "'us-west-2'" - keyword: iam_role - quoted_literal: "'arn:aws:iam::123456789012:role/MySpectrumRole'" - statement_terminator: ; - statement: create_external_schema_statement: - keyword: create - keyword: external - keyword: schema - schema_reference: naked_identifier: spectrum_schema - keyword: from - keyword: data - keyword: catalog - keyword: database - quoted_literal: "'spectrum_db'" - keyword: iam_role - quoted_literal: "'arn:aws:iam::123456789012:role/MySpectrumRole'" - keyword: create - keyword: external - keyword: database - keyword: if - keyword: not - keyword: exists - statement_terminator: ; - statement: create_external_schema_statement: - keyword: create - keyword: external - keyword: schema - schema_reference: naked_identifier: hive_schema - keyword: from - keyword: hive - keyword: metastore - keyword: database - quoted_literal: "'hive_db'" - keyword: uri - quoted_literal: "'172.10.10.10'" - keyword: port - numeric_literal: '99' - keyword: iam_role - quoted_literal: "'arn:aws:iam::123456789012:role/MySpectrumRole'" - statement_terminator: ; - statement: create_external_schema_statement: - keyword: create - keyword: external - keyword: schema - schema_reference: naked_identifier: spectrum_schema - keyword: from - keyword: data - keyword: catalog - keyword: database - quoted_literal: "'spectrum_db'" - keyword: iam_role - quoted_literal: "'arn:aws:iam::123456789012:role/myRedshiftRole,arn:aws:iam::123456789012:role/myS3Role'" - keyword: catalog_role - quoted_literal: "'arn:aws:iam::123456789012:role/myAthenaRole'" - keyword: create - keyword: external - keyword: database - keyword: if - keyword: not - keyword: exists - statement_terminator: ; - statement: create_external_schema_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: SCHEMA - keyword: IF - keyword: NOT - keyword: EXISTS - schema_reference: naked_identifier: myRedshiftSchema - keyword: FROM - keyword: POSTGRES - keyword: DATABASE - quoted_literal: "'my_aurora_db'" - keyword: SCHEMA - quoted_literal: "'my_aurora_schema'" - keyword: URI - quoted_literal: "'endpoint to aurora hostname'" - keyword: PORT - numeric_literal: '5432' - keyword: IAM_ROLE - quoted_literal: "'arn:aws:iam::123456789012:role/MyAuroraRole'" - keyword: SECRET_ARN - quoted_literal: "'arn:aws:secretsmanager:us-east-2:123456789012:secret:development/MyTestDatabase-AbCdEf'" - statement_terminator: ; - statement: create_external_schema_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: SCHEMA - schema_reference: naked_identifier: sales_schema - keyword: FROM - keyword: REDSHIFT - keyword: DATABASE - quoted_literal: "'sales_db'" - keyword: SCHEMA - quoted_literal: "'public'" - statement_terminator: ; - statement: create_external_schema_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: SCHEMA - keyword: IF - keyword: NOT - keyword: EXISTS - schema_reference: naked_identifier: myRedshiftSchema - keyword: FROM - keyword: MYSQL - keyword: DATABASE - quoted_literal: "'my_aurora_db'" - keyword: URI - quoted_literal: "'endpoint to aurora hostname'" - keyword: IAM_ROLE - quoted_literal: "'arn:aws:iam::123456789012:role/MyAuroraRole'" - keyword: SECRET_ARN - quoted_literal: "'arn:aws:secretsmanager:us-east-2:123456789012:secret:development/MyTestDatabase-AbCdEf'" - statement_terminator: ; - statement: create_external_schema_statement: - keyword: create - keyword: external - keyword: schema - schema_reference: naked_identifier: spectrum_schema - keyword: from - keyword: data - keyword: catalog - keyword: database - quoted_literal: "'sampledb'" - keyword: region - quoted_literal: "'us-west-2'" - keyword: iam_role - quoted_literal: "'arn:aws:iam::123456789012:role/MySpectrumRole'" - statement_terminator: ; - statement: create_external_schema_statement: - keyword: create - keyword: external - keyword: schema - schema_reference: naked_identifier: my_schema - keyword: from - keyword: kafka - keyword: iam_role - quoted_literal: "'arn:aws:iam::012345678901:role/my_role'" - keyword: authentication - keyword: iam - keyword: uri - quoted_literal: "'b-1.myTestCluster.123z8u.c2.kafka.us-west-1.amazonaws.com:9098,b-2.myTestCluster.123z8u.c2.kafka.us-west-1.amazonaws.com:9098'" - statement_terminator: ; - statement: create_external_schema_statement: - keyword: create - keyword: external - keyword: schema - schema_reference: naked_identifier: my_schema - keyword: from - keyword: kafka - keyword: authentication - keyword: none - keyword: uri - quoted_literal: "'b-1.myTestCluster.123z8u.c2.kafka.us-west-1.amazonaws.com:9092,b-2.myTestCluster.123z8u.c2.kafka.us-west-1.amazonaws.com:9092'" - statement_terminator: ; - statement: create_external_schema_statement: - keyword: create - keyword: external - keyword: schema - schema_reference: naked_identifier: my_schema - keyword: from - keyword: kafka - keyword: iam_role - quoted_literal: "'arn:aws:iam::012345678901:role/my_role'" - keyword: authentication - keyword: mtls - keyword: uri - quoted_literal: "'lkc-2v531.domz6wj0p.us-west-1.aws.confluent.cloud:9092'" - keyword: authentication_arn - quoted_literal: "'arn:aws:acm:region:444455556666:certificate/certificate_ID'" - statement_terminator: ; - statement: create_external_schema_statement: - keyword: create - keyword: external - keyword: schema - schema_reference: naked_identifier: my_schema - keyword: from - keyword: kafka - keyword: iam_role - quoted_literal: "'arn:aws:iam::012345678901:role/my_role'" - keyword: authentication - keyword: mtls - keyword: uri - quoted_literal: "'b-1.myTestCluster.123z8u.c2.kafka.us-west-1.amazonaws.com:9094,b-2.myTestCluster.123z8u.c2.kafka.us-west-1.amazonaws.com:9094'" - keyword: secret_arn - quoted_literal: "'arn:aws:secretsmanager:us-east-1:012345678910:secret:myMTLSSecret'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/create_external_table.sql000066400000000000000000000051671503426445100266440ustar00rootroot00000000000000CREATE EXTERNAL TABLE external_schema.table_name ( col1 INTEGER ) STORED AS PARQUET LOCATION 's3://bucket/folder' ; CREATE EXTERNAL TABLE external_schema.table_name ( col1 INTEGER, col2 TEXT ) STORED AS PARQUET LOCATION 's3://bucket/folder' ; CREATE EXTERNAL TABLE external_schema.table_name ( col1 INTEGER, col2 TEXT ) STORED AS ORC LOCATION 's3://bucket/folder' ; CREATE EXTERNAL TABLE external_schema.table_name ( col1 INTEGER, col2 TEXT ) STORED AS AVRO LOCATION 's3://bucket/folder' ; CREATE EXTERNAL TABLE external_schema.table_name ( col1 INTEGER, col2 TEXT ) STORED AS TEXTFILE LOCATION 's3://bucket/folder' ; CREATE EXTERNAL TABLE external_schema.table_name ( col1 INTEGER, col2 TEXT ) PARTITIONED BY (col3 integer) STORED AS PARQUET LOCATION 's3://bucket/folder' ; CREATE EXTERNAL TABLE external_schema.table_name ( col1 INTEGER, col2 TEXT ) PARTITIONED BY (col3 INTEGER, col4 INTEGER) STORED AS PARQUET LOCATION 's3://bucket/folder' ; CREATE EXTERNAL TABLE external_schema.table_name ( col1 INTEGER ) STORED AS PARQUET LOCATION 's3://bucket/folder' TABLE PROPERTIES ('some_property1'='some_value1', 'some_property2'='some_value2') ; create external table spectrum.sales( salesid integer, saledate date, qtysold smallint, pricepaid decimal(8,2), saletime timestamp) row format delimited fields terminated by '\t' stored as textfile location 's3://awssampledbuswest2/tickit/spectrum/sales/' table properties ('numRows'='170000'); create external table spectrum.cloudtrail_json ( event_version int, event_id bigint, event_time timestamp, event_type varchar(10), recipientaccountid bigint) row format serde 'org.openx.data.jsonserde.JsonSerDe' with serdeproperties ( 'dots.in.keys' = 'true', 'mapping.requesttime' = 'requesttimestamp' ) stored as textfile location 's3://mybucket/json/cloudtrail'; CREATE EXTERNAL TABLE schema_spectrum_uddh.soccer_league ( league_rank smallint, club_name varchar(15), league_spi decimal(6,2), league_nspi smallint ) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' LINES TERMINATED BY '\n\l' stored as textfile LOCATION 's3://spectrum-uddh/league/' table properties ('skip.header.line.count'='1'); CREATE EXTERNAL TABLE tbl1 (col1 int, col2 varchar(10)) ROW FORMAT SERDE 'com.amazon.ionhiveserde.IonHiveSerDe' STORED AS INPUTFORMAT 'com.amazon.ionhiveserde.formats.IonInputFormat' OUTPUTFORMAT 'com.amazon.ionhiveserde.formats.IonOutputFormat' LOCATION 's3://s3-bucket/prefix'; CREATE EXTERNAL TABLE spectrum.partitioned_lineitem PARTITIONED BY (l_shipdate, l_shipmode) STORED AS parquet LOCATION 'S3://mybucket/cetas/partitioned_lineitem/' AS SELECT 1; sqlfluff-3.4.2/test/fixtures/dialects/redshift/create_external_table.yml000066400000000000000000000336631503426445100266500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6f643ba6ad3080f5938fee497cd07cb46e2eea57410e3989259a6b7d27f458f7 file: - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: table_name - bracketed: start_bracket: ( column_reference: naked_identifier: col1 data_type: keyword: INTEGER end_bracket: ) - keyword: STORED - keyword: AS - keyword: PARQUET - keyword: LOCATION - quoted_literal: "'s3://bucket/folder'" - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: table_name - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: INTEGER - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: TEXT - end_bracket: ) - keyword: STORED - keyword: AS - keyword: PARQUET - keyword: LOCATION - quoted_literal: "'s3://bucket/folder'" - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: table_name - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: INTEGER - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: TEXT - end_bracket: ) - keyword: STORED - keyword: AS - keyword: ORC - keyword: LOCATION - quoted_literal: "'s3://bucket/folder'" - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: table_name - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: INTEGER - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: TEXT - end_bracket: ) - keyword: STORED - keyword: AS - keyword: AVRO - keyword: LOCATION - quoted_literal: "'s3://bucket/folder'" - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: table_name - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: INTEGER - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: TEXT - end_bracket: ) - keyword: STORED - keyword: AS - keyword: TEXTFILE - keyword: LOCATION - quoted_literal: "'s3://bucket/folder'" - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: table_name - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: INTEGER - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: TEXT - end_bracket: ) - partitioned_by_segment: - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: col3 data_type: keyword: integer end_bracket: ) - keyword: STORED - keyword: AS - keyword: PARQUET - keyword: LOCATION - quoted_literal: "'s3://bucket/folder'" - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: table_name - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: INTEGER - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: TEXT - end_bracket: ) - partitioned_by_segment: - keyword: PARTITIONED - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col3 - data_type: keyword: INTEGER - comma: ',' - column_reference: naked_identifier: col4 - data_type: keyword: INTEGER - end_bracket: ) - keyword: STORED - keyword: AS - keyword: PARQUET - keyword: LOCATION - quoted_literal: "'s3://bucket/folder'" - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: table_name - bracketed: start_bracket: ( column_reference: naked_identifier: col1 data_type: keyword: INTEGER end_bracket: ) - keyword: STORED - keyword: AS - keyword: PARQUET - keyword: LOCATION - quoted_literal: "'s3://bucket/folder'" - keyword: TABLE - keyword: PROPERTIES - bracketed: - start_bracket: ( - quoted_literal: "'some_property1'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'some_value1'" - comma: ',' - quoted_literal: "'some_property2'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'some_value2'" - end_bracket: ) - statement_terminator: ; - statement: create_external_table_statement: - keyword: create - keyword: external - keyword: table - table_reference: - naked_identifier: spectrum - dot: . - naked_identifier: sales - bracketed: - start_bracket: ( - column_reference: naked_identifier: salesid - data_type: keyword: integer - comma: ',' - column_reference: naked_identifier: saledate - data_type: datetime_type_identifier: keyword: date - comma: ',' - column_reference: naked_identifier: qtysold - data_type: keyword: smallint - comma: ',' - column_reference: naked_identifier: pricepaid - data_type: keyword: decimal bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '8' - comma: ',' - numeric_literal: '2' - end_bracket: ) - comma: ',' - column_reference: naked_identifier: saletime - data_type: datetime_type_identifier: keyword: timestamp - end_bracket: ) - keyword: row - keyword: format - keyword: delimited - row_format_delimited_segment: - keyword: fields - keyword: terminated - keyword: by - quoted_literal: "'\\t'" - keyword: stored - keyword: as - keyword: textfile - keyword: location - quoted_literal: "'s3://awssampledbuswest2/tickit/spectrum/sales/'" - keyword: table - keyword: properties - bracketed: - start_bracket: ( - quoted_literal: "'numRows'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'170000'" - end_bracket: ) - statement_terminator: ; - statement: create_external_table_statement: - keyword: create - keyword: external - keyword: table - table_reference: - naked_identifier: spectrum - dot: . - naked_identifier: cloudtrail_json - bracketed: - start_bracket: ( - column_reference: naked_identifier: event_version - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: event_id - data_type: keyword: bigint - comma: ',' - column_reference: naked_identifier: event_time - data_type: datetime_type_identifier: keyword: timestamp - comma: ',' - column_reference: naked_identifier: event_type - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - comma: ',' - column_reference: naked_identifier: recipientaccountid - data_type: keyword: bigint - end_bracket: ) - keyword: row - keyword: format - keyword: serde - quoted_literal: "'org.openx.data.jsonserde.JsonSerDe'" - keyword: with - keyword: serdeproperties - bracketed: - start_bracket: ( - quoted_literal: "'dots.in.keys'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'true'" - comma: ',' - quoted_literal: "'mapping.requesttime'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'requesttimestamp'" - end_bracket: ) - keyword: stored - keyword: as - keyword: textfile - keyword: location - quoted_literal: "'s3://mybucket/json/cloudtrail'" - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: schema_spectrum_uddh - dot: . - naked_identifier: soccer_league - bracketed: - start_bracket: ( - column_reference: naked_identifier: league_rank - data_type: keyword: smallint - comma: ',' - column_reference: naked_identifier: club_name - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '15' end_bracket: ) - comma: ',' - column_reference: naked_identifier: league_spi - data_type: keyword: decimal bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '6' - comma: ',' - numeric_literal: '2' - end_bracket: ) - comma: ',' - column_reference: naked_identifier: league_nspi - data_type: keyword: smallint - end_bracket: ) - keyword: ROW - keyword: FORMAT - keyword: DELIMITED - row_format_delimited_segment: - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "','" - keyword: LINES - keyword: TERMINATED - keyword: BY - quoted_literal: "'\\n\\l'" - keyword: stored - keyword: as - keyword: textfile - keyword: LOCATION - quoted_literal: "'s3://spectrum-uddh/league/'" - keyword: table - keyword: properties - bracketed: - start_bracket: ( - quoted_literal: "'skip.header.line.count'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'1'" - end_bracket: ) - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: naked_identifier: tbl1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - end_bracket: ) - keyword: ROW - keyword: FORMAT - keyword: SERDE - quoted_literal: "'com.amazon.ionhiveserde.IonHiveSerDe'" - keyword: STORED - keyword: AS - keyword: INPUTFORMAT - quoted_literal: "'com.amazon.ionhiveserde.formats.IonInputFormat'" - keyword: OUTPUTFORMAT - quoted_literal: "'com.amazon.ionhiveserde.formats.IonOutputFormat'" - keyword: LOCATION - quoted_literal: "'s3://s3-bucket/prefix'" - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: spectrum - dot: . - naked_identifier: partitioned_lineitem - partitioned_by_segment: - keyword: PARTITIONED - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: l_shipdate - comma: ',' - column_reference: naked_identifier: l_shipmode - end_bracket: ) - keyword: STORED - keyword: AS - keyword: parquet - keyword: LOCATION - quoted_literal: "'S3://mybucket/cetas/partitioned_lineitem/'" - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/create_external_table_as.sql000066400000000000000000000044501503426445100273210ustar00rootroot00000000000000CREATE EXTERNAL TABLE external_schema.table_name STORED AS PARQUET LOCATION 's3://bucket/folder/' AS SELECT col1, col2 FROM external_schema.source_table ; CREATE EXTERNAL TABLE external_schema.table_name STORED AS PARQUET LOCATION 's3://bucket/folder/' AS SELECT * FROM external_schema.source_table ; CREATE EXTERNAL TABLE external_schema.table_name PARTITIONED BY (col1 integer) STORED AS PARQUET LOCATION 's3://bucket/folder/' AS SELECT col1, col2 FROM external_schema.source_table ; CREATE EXTERNAL TABLE external_schema.table_name PARTITIONED BY (col1 integer) STORED AS PARQUET LOCATION 's3://bucket/folder/' AS SELECT * FROM external_schema.source_table ; CREATE EXTERNAL TABLE external_schema.table_name PARTITIONED BY (col1 integer, col2 integer) STORED AS PARQUET LOCATION 's3://bucket/folder/' AS SELECT col1, col2 FROM external_schema.source_table ; CREATE EXTERNAL TABLE external_schema.table_name PARTITIONED BY (col1 integer, col2 integer) STORED AS PARQUET LOCATION 's3://bucket/folder/' AS SELECT * FROM external_schema.source_table ; CREATE EXTERNAL TABLE external_schema.table_name PARTITIONED BY (col1 integer, col2 integer) STORED AS PARQUET LOCATION 's3://bucket/folder/' AS ( SELECT col1, col2 FROM external_schema.source_table ) ; CREATE EXTERNAL TABLE external_schema.table_name PARTITIONED BY (col1 integer, col2 integer) ROW FORMAT DELIMITED LINES TERMINATED BY '\007' STORED AS PARQUET LOCATION 's3://bucket/folder/' AS ( SELECT col1, col2 FROM external_schema.source_table ) ; CREATE EXTERNAL TABLE external_schema.table_name PARTITIONED BY (col1 integer, col2 integer) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\007' STORED AS PARQUET LOCATION 's3://bucket/folder/' AS ( SELECT col1, col2 FROM external_schema.source_table ) ; CREATE EXTERNAL TABLE external_schema.table_name STORED AS PARQUET LOCATION 's3://bucket/folder/' TABLE PROPERTIES ('some_property1'='some_value1', 'some_property2'='some_value2') AS SELECT col1, col2 FROM external_schema.source_table ; CREATE EXTERNAL TABLE spectrum.partitioned_lineitem PARTITIONED BY (l_shipdate date, l_shipmode varchar(24)) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' LINES TERMINATED BY '\n\l' STORED AS textfile LOCATION 'S3://mybucket/cetas/partitioned_lineitem/' AS SELECT l_orderkey, l_shipmode, l_shipdate, l_partkey FROM local_table; sqlfluff-3.4.2/test/fixtures/dialects/redshift/create_external_table_as.yml000066400000000000000000000371341503426445100273300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 762ab854ca910e4d7691d14d0c772a1a0f5545e8a9815e4bbce5f1e3b8720bf1 file: - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: table_name - keyword: STORED - keyword: AS - keyword: PARQUET - keyword: LOCATION - quoted_literal: "'s3://bucket/folder/'" - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: source_table - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: table_name - keyword: STORED - keyword: AS - keyword: PARQUET - keyword: LOCATION - quoted_literal: "'s3://bucket/folder/'" - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: source_table - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: table_name - partitioned_by_segment: - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 data_type: keyword: integer end_bracket: ) - keyword: STORED - keyword: AS - keyword: PARQUET - keyword: LOCATION - quoted_literal: "'s3://bucket/folder/'" - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: source_table - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: table_name - partitioned_by_segment: - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 data_type: keyword: integer end_bracket: ) - keyword: STORED - keyword: AS - keyword: PARQUET - keyword: LOCATION - quoted_literal: "'s3://bucket/folder/'" - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: source_table - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: table_name - partitioned_by_segment: - keyword: PARTITIONED - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: integer - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: integer - end_bracket: ) - keyword: STORED - keyword: AS - keyword: PARQUET - keyword: LOCATION - quoted_literal: "'s3://bucket/folder/'" - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: source_table - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: table_name - partitioned_by_segment: - keyword: PARTITIONED - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: integer - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: integer - end_bracket: ) - keyword: STORED - keyword: AS - keyword: PARQUET - keyword: LOCATION - quoted_literal: "'s3://bucket/folder/'" - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: source_table - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: table_name - partitioned_by_segment: - keyword: PARTITIONED - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: integer - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: integer - end_bracket: ) - keyword: STORED - keyword: AS - keyword: PARQUET - keyword: LOCATION - quoted_literal: "'s3://bucket/folder/'" - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: source_table end_bracket: ) - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: table_name - partitioned_by_segment: - keyword: PARTITIONED - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: integer - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: integer - end_bracket: ) - keyword: ROW - keyword: FORMAT - keyword: DELIMITED - row_format_delimited_segment: - keyword: LINES - keyword: TERMINATED - keyword: BY - quoted_literal: "'\\007'" - keyword: STORED - keyword: AS - keyword: PARQUET - keyword: LOCATION - quoted_literal: "'s3://bucket/folder/'" - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: source_table end_bracket: ) - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: table_name - partitioned_by_segment: - keyword: PARTITIONED - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: integer - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: integer - end_bracket: ) - keyword: ROW - keyword: FORMAT - keyword: DELIMITED - row_format_delimited_segment: - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "'\\007'" - keyword: STORED - keyword: AS - keyword: PARQUET - keyword: LOCATION - quoted_literal: "'s3://bucket/folder/'" - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: source_table end_bracket: ) - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: table_name - keyword: STORED - keyword: AS - keyword: PARQUET - keyword: LOCATION - quoted_literal: "'s3://bucket/folder/'" - keyword: TABLE - keyword: PROPERTIES - bracketed: - start_bracket: ( - quoted_literal: "'some_property1'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'some_value1'" - comma: ',' - quoted_literal: "'some_property2'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'some_value2'" - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: source_table - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: spectrum - dot: . - naked_identifier: partitioned_lineitem - partitioned_by_segment: - keyword: PARTITIONED - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: l_shipdate - data_type: datetime_type_identifier: keyword: date - comma: ',' - column_reference: naked_identifier: l_shipmode - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '24' end_bracket: ) - end_bracket: ) - keyword: ROW - keyword: FORMAT - keyword: DELIMITED - row_format_delimited_segment: - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "','" - keyword: LINES - keyword: TERMINATED - keyword: BY - quoted_literal: "'\\n\\l'" - keyword: STORED - keyword: AS - keyword: textfile - keyword: LOCATION - quoted_literal: "'S3://mybucket/cetas/partitioned_lineitem/'" - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: l_orderkey - comma: ',' - select_clause_element: column_reference: naked_identifier: l_shipmode - comma: ',' - select_clause_element: column_reference: naked_identifier: l_shipdate - comma: ',' - select_clause_element: column_reference: naked_identifier: l_partkey from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: local_table - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/create_function.sql000066400000000000000000000003651503426445100254730ustar00rootroot00000000000000CREATE OR REPLACE FUNCTION public.iif( condition BOOLEAN , true_result ANYELEMENT , false_result ANYELEMENT) RETURNS ANYELEMENT STABLE AS $$ if condition: return true_result return false_result $$ LANGUAGE plpythonu; sqlfluff-3.4.2/test/fixtures/dialects/redshift/create_function.yml000066400000000000000000000025741503426445100255010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6bfe60aac773580df9cb9b10feae284a254229e90369885d79d7f030aa47006c file: statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name: naked_identifier: public dot: . function_name_identifier: iif - function_parameter_list: bracketed: - start_bracket: ( - parameter: condition - data_type: keyword: BOOLEAN - comma: ',' - parameter: true_result - data_type: keyword: ANYELEMENT - comma: ',' - parameter: false_result - data_type: keyword: ANYELEMENT - end_bracket: ) - keyword: RETURNS - data_type: keyword: ANYELEMENT - function_definition: - keyword: STABLE - keyword: AS - quoted_literal: "$$\n if condition:\n return true_result\n return\ \ false_result\n$$" - language_clause: keyword: LANGUAGE naked_identifier: plpythonu statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/create_group.sql000066400000000000000000000003531503426445100247770ustar00rootroot00000000000000create group admin_group; create group "admin_group"; create group admin_group user admin1; create group admin_group with user admin1; create group admin_group user admin1, admin2; create group admin_group with user admin1, admin2; sqlfluff-3.4.2/test/fixtures/dialects/redshift/create_group.yml000066400000000000000000000035131503426445100250020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: efa69dfefc61d1b9b842bb77160caadfe278edadf91a158c444f01a1e4c0e16a file: - statement: create_group: - keyword: create - keyword: group - object_reference: naked_identifier: admin_group - statement_terminator: ; - statement: create_group: - keyword: create - keyword: group - object_reference: quoted_identifier: '"admin_group"' - statement_terminator: ; - statement: create_group: - keyword: create - keyword: group - object_reference: naked_identifier: admin_group - keyword: user - object_reference: naked_identifier: admin1 - statement_terminator: ; - statement: create_group: - keyword: create - keyword: group - object_reference: naked_identifier: admin_group - keyword: with - keyword: user - object_reference: naked_identifier: admin1 - statement_terminator: ; - statement: create_group: - keyword: create - keyword: group - object_reference: naked_identifier: admin_group - keyword: user - object_reference: naked_identifier: admin1 - comma: ',' - object_reference: naked_identifier: admin2 - statement_terminator: ; - statement: create_group: - keyword: create - keyword: group - object_reference: naked_identifier: admin_group - keyword: with - keyword: user - object_reference: naked_identifier: admin1 - comma: ',' - object_reference: naked_identifier: admin2 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/create_library.sql000066400000000000000000000032351503426445100253110ustar00rootroot00000000000000create library lib1 language plpythonu from 's3://s3bucket/lib1.0.3.zip' credentials 'aws_iam_role=arn:aws:iam::123456789:role/role_name' region as 'us-east-1'; create library lib1 language plpythonu from 's3://s3bucket/lib1.0.3.zip' region as 'us-east-1' credentials 'aws_iam_role=arn:aws:iam::123456789:role/role_name'; create or replace library lib1 language plpythonu from 's3://s3bucket/lib1.0.3.zip' with credentials as 'aws_iam_role=arn:aws:iam::123456789:role/role_name' region as 'us-east-1'; create or replace library lib1 language plpythonu from 's3://s3bucket/lib1.0.3.zip' credentials as 'aws_access_key_id=;aws_secret_access_key=;token=' region as 'us-east-1'; create or replace library lib1 language plpythonu from 's3://s3bucket/lib1.0.3.zip' with credentials 'aws_access_key_id=;aws_secret_access_key=;token=' region as 'us-east-1'; create library lib1 language plpythonu from 's3://s3bucket/lib1.0.3.zip' iam_role 'aws_iam_role=arn:aws:iam::123456789:role/role_name'; create or replace library lib1 language plpythonu from 's3://s3bucket/lib1.0.3.zip' ACCESS_KEY_ID '' SECRET_ACCESS_KEY ''; create or replace library lib1 language plpythonu from 's3://s3bucket/lib1.0.3.zip' ACCESS_KEY_ID '' SECRET_ACCESS_KEY '' SESSION_TOKEN '' region 'us-east-1'; create library lib1 language plpythonu from 'https://example.com/packages/lib1.0.3.zip'; create or replace library lib1 language plpythonu from 'https://example.com/packages/lib1.0.3.zip'; sqlfluff-3.4.2/test/fixtures/dialects/redshift/create_library.yml000066400000000000000000000126061503426445100253150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f24b1e4e956475c5150d571f7d8e10636ec7026aaffb08098a08e3ed0eaf1f14 file: - statement: create_library_statement: - keyword: create - keyword: library - object_reference: naked_identifier: lib1 - keyword: language - keyword: plpythonu - keyword: from - quoted_literal: "'s3://s3bucket/lib1.0.3.zip'" - authorization_segment: keyword: credentials quoted_literal: "'aws_iam_role=arn:aws:iam::123456789:role/role_name'" - keyword: region - keyword: as - quoted_literal: "'us-east-1'" - statement_terminator: ; - statement: create_library_statement: - keyword: create - keyword: library - object_reference: naked_identifier: lib1 - keyword: language - keyword: plpythonu - keyword: from - quoted_literal: "'s3://s3bucket/lib1.0.3.zip'" - keyword: region - keyword: as - quoted_literal: "'us-east-1'" - authorization_segment: keyword: credentials quoted_literal: "'aws_iam_role=arn:aws:iam::123456789:role/role_name'" - statement_terminator: ; - statement: create_library_statement: - keyword: create - keyword: or - keyword: replace - keyword: library - object_reference: naked_identifier: lib1 - keyword: language - keyword: plpythonu - keyword: from - quoted_literal: "'s3://s3bucket/lib1.0.3.zip'" - authorization_segment: - keyword: with - keyword: credentials - keyword: as - quoted_literal: "'aws_iam_role=arn:aws:iam::123456789:role/role_name'" - keyword: region - keyword: as - quoted_literal: "'us-east-1'" - statement_terminator: ; - statement: create_library_statement: - keyword: create - keyword: or - keyword: replace - keyword: library - object_reference: naked_identifier: lib1 - keyword: language - keyword: plpythonu - keyword: from - quoted_literal: "'s3://s3bucket/lib1.0.3.zip'" - authorization_segment: - keyword: credentials - keyword: as - quoted_literal: "'aws_access_key_id=;aws_secret_access_key=;token='" - keyword: region - keyword: as - quoted_literal: "'us-east-1'" - statement_terminator: ; - statement: create_library_statement: - keyword: create - keyword: or - keyword: replace - keyword: library - object_reference: naked_identifier: lib1 - keyword: language - keyword: plpythonu - keyword: from - quoted_literal: "'s3://s3bucket/lib1.0.3.zip'" - authorization_segment: - keyword: with - keyword: credentials - quoted_literal: "'aws_access_key_id=;aws_secret_access_key=;token='" - keyword: region - keyword: as - quoted_literal: "'us-east-1'" - statement_terminator: ; - statement: create_library_statement: - keyword: create - keyword: library - object_reference: naked_identifier: lib1 - keyword: language - keyword: plpythonu - keyword: from - quoted_literal: "'s3://s3bucket/lib1.0.3.zip'" - authorization_segment: keyword: iam_role quoted_literal: "'aws_iam_role=arn:aws:iam::123456789:role/role_name'" - statement_terminator: ; - statement: create_library_statement: - keyword: create - keyword: or - keyword: replace - keyword: library - object_reference: naked_identifier: lib1 - keyword: language - keyword: plpythonu - keyword: from - quoted_literal: "'s3://s3bucket/lib1.0.3.zip'" - authorization_segment: - keyword: ACCESS_KEY_ID - quoted_literal: "''" - keyword: SECRET_ACCESS_KEY - quoted_literal: "''" - statement_terminator: ; - statement: create_library_statement: - keyword: create - keyword: or - keyword: replace - keyword: library - object_reference: naked_identifier: lib1 - keyword: language - keyword: plpythonu - keyword: from - quoted_literal: "'s3://s3bucket/lib1.0.3.zip'" - authorization_segment: - keyword: ACCESS_KEY_ID - quoted_literal: "''" - keyword: SECRET_ACCESS_KEY - quoted_literal: "''" - keyword: SESSION_TOKEN - quoted_literal: "''" - keyword: region - quoted_literal: "'us-east-1'" - statement_terminator: ; - statement: create_library_statement: - keyword: create - keyword: library - object_reference: naked_identifier: lib1 - keyword: language - keyword: plpythonu - keyword: from - quoted_literal: "'https://example.com/packages/lib1.0.3.zip'" - statement_terminator: ; - statement: create_library_statement: - keyword: create - keyword: or - keyword: replace - keyword: library - object_reference: naked_identifier: lib1 - keyword: language - keyword: plpythonu - keyword: from - quoted_literal: "'https://example.com/packages/lib1.0.3.zip'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/create_materialized_view.sql000066400000000000000000000012111503426445100273410ustar00rootroot00000000000000create materialized view mat_view_example backup yes auto refresh no as select col1 from example_table; CREATE MATERIALIZED VIEW tickets_mv AS select catgroup, sum(qtysold) as sold from category c, event e, sales s where c.catid = e.catid and e.eventid = s.eventid group by catgroup; CREATE MATERIALIZED VIEW mv_sales_vw as select salesid, qtysold, pricepaid, commission, saletime from public.sales union all select salesid, qtysold, pricepaid, commission, saletime from spectrum.sales ; CREATE MATERIALIZED VIEW mv_baseball DISTSTYLE ALL AUTO REFRESH YES AS SELECT ball AS baseball FROM baseball_table; sqlfluff-3.4.2/test/fixtures/dialects/redshift/create_materialized_view.yml000066400000000000000000000155421503426445100273570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 827b0fd13132cbb06243f9c709d9e457adb2a796df460d25b929889fababf931 file: - statement: create_materialized_view_statement: - keyword: create - keyword: materialized - keyword: view - table_reference: naked_identifier: mat_view_example - keyword: backup - keyword: 'yes' - keyword: auto - keyword: refresh - keyword: 'no' - keyword: as - select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: example_table - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: tickets_mv - keyword: AS - select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: catgroup - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: qtysold end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: sold from_clause: - keyword: from - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: category alias_expression: naked_identifier: c - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: event alias_expression: naked_identifier: e - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sales alias_expression: naked_identifier: s where_clause: keyword: where expression: - column_reference: - naked_identifier: c - dot: . - naked_identifier: catid - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: e - dot: . - naked_identifier: catid - binary_operator: and - column_reference: - naked_identifier: e - dot: . - naked_identifier: eventid - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: s - dot: . - naked_identifier: eventid groupby_clause: - keyword: group - keyword: by - column_reference: naked_identifier: catgroup - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: mv_sales_vw - keyword: as - set_expression: - select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: salesid - comma: ',' - select_clause_element: column_reference: naked_identifier: qtysold - comma: ',' - select_clause_element: column_reference: naked_identifier: pricepaid - comma: ',' - select_clause_element: column_reference: naked_identifier: commission - comma: ',' - select_clause_element: column_reference: naked_identifier: saletime from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: public - dot: . - naked_identifier: sales - set_operator: - keyword: union - keyword: all - select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: salesid - comma: ',' - select_clause_element: column_reference: naked_identifier: qtysold - comma: ',' - select_clause_element: column_reference: naked_identifier: pricepaid - comma: ',' - select_clause_element: column_reference: naked_identifier: commission - comma: ',' - select_clause_element: column_reference: naked_identifier: saletime from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: spectrum - dot: . - naked_identifier: sales - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: mv_baseball - table_constraint: - keyword: DISTSTYLE - keyword: ALL - keyword: AUTO - keyword: REFRESH - keyword: 'YES' - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: ball alias_expression: alias_operator: keyword: AS naked_identifier: baseball from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: baseball_table - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/create_model.sql000066400000000000000000000024611503426445100247450ustar00rootroot00000000000000CREATE MODEL abalone_xgboost_multi_predict_age FROM ( SELECT length_val, diameter, height, whole_weight, shucked_weight, viscera_weight, shell_weight, rings FROM abalone_xgb WHERE record_number < 2500 ) TARGET rings FUNCTION ml_fn_abalone_xgboost_multi_predict_age IAM_ROLE 'arn:aws:iam::XXXXXXXXXXXX:role/Redshift-ML' AUTO OFF MODEL_TYPE XGBOOST OBJECTIVE 'multi:softmax' PREPROCESSORS 'none' HYPERPARAMETERS DEFAULT EXCEPT (NUM_ROUND '100', NUM_CLASS '30') SETTINGS (S3_BUCKET 'bucket'); CREATE MODEL customer_churn FROM 'training-job-customer-churn-v4' FUNCTION customer_churn_predict (varchar, int, float, float) RETURNS int IAM_ROLE 'arn:aws:iam::123456789012:role/Redshift-ML' SETTINGS (S3_BUCKET 'bucket'); CREATE MODEL remote_customer_churn FUNCTION remote_fn_customer_churn_predict (varchar, int, float, float) RETURNS int SAGEMAKER 'customer-churn-endpoint' IAM_ROLE 'arn:aws:iam::0123456789012:role/Redshift-ML'; CREATE MODEL customers_clusters FROM customers FUNCTION customers_cluster IAM_ROLE 'iam-role-arn' AUTO OFF MODEL_TYPE KMEANS PREPROCESSORS '[ { "ColumnSet": [ "*" ], "Transformers": [ "NumericPassthrough" ] } ]' HYPERPARAMETERS DEFAULT EXCEPT ( K '5' ) SETTINGS (S3_BUCKET 'bucket'); sqlfluff-3.4.2/test/fixtures/dialects/redshift/create_model.yml000066400000000000000000000135641503426445100247550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8c2946b048370a168deda05b624717dd76ea3e5da3abfdc0f7c34cb61cfabfed file: - statement: create_model_statement: - keyword: CREATE - keyword: MODEL - object_reference: naked_identifier: abalone_xgboost_multi_predict_age - keyword: FROM - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: length_val - comma: ',' - select_clause_element: column_reference: naked_identifier: diameter - comma: ',' - select_clause_element: column_reference: naked_identifier: height - comma: ',' - select_clause_element: column_reference: naked_identifier: whole_weight - comma: ',' - select_clause_element: column_reference: naked_identifier: shucked_weight - comma: ',' - select_clause_element: column_reference: naked_identifier: viscera_weight - comma: ',' - select_clause_element: column_reference: naked_identifier: shell_weight - comma: ',' - select_clause_element: column_reference: naked_identifier: rings from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: abalone_xgb where_clause: keyword: WHERE expression: column_reference: naked_identifier: record_number comparison_operator: raw_comparison_operator: < numeric_literal: '2500' end_bracket: ) - keyword: TARGET - column_reference: naked_identifier: rings - keyword: FUNCTION - object_reference: naked_identifier: ml_fn_abalone_xgboost_multi_predict_age - keyword: IAM_ROLE - quoted_literal: "'arn:aws:iam::XXXXXXXXXXXX:role/Redshift-ML'" - keyword: AUTO - keyword: 'OFF' - keyword: MODEL_TYPE - keyword: XGBOOST - keyword: OBJECTIVE - quoted_literal: "'multi:softmax'" - keyword: PREPROCESSORS - quoted_literal: "'none'" - keyword: HYPERPARAMETERS - keyword: DEFAULT - keyword: EXCEPT - bracketed: - start_bracket: ( - word: NUM_ROUND - single_quote: "'100'" - comma: ',' - word: NUM_CLASS - single_quote: "'30'" - end_bracket: ) - keyword: SETTINGS - bracketed: start_bracket: ( keyword: S3_BUCKET quoted_literal: "'bucket'" end_bracket: ) - statement_terminator: ; - statement: create_model_statement: - keyword: CREATE - keyword: MODEL - object_reference: naked_identifier: customer_churn - keyword: FROM - quoted_literal: "'training-job-customer-churn-v4'" - keyword: FUNCTION - object_reference: naked_identifier: customer_churn_predict - bracketed: - start_bracket: ( - data_type: keyword: varchar - comma: ',' - data_type: keyword: int - comma: ',' - data_type: keyword: float - comma: ',' - data_type: keyword: float - end_bracket: ) - keyword: RETURNS - data_type: keyword: int - keyword: IAM_ROLE - quoted_literal: "'arn:aws:iam::123456789012:role/Redshift-ML'" - keyword: SETTINGS - bracketed: start_bracket: ( keyword: S3_BUCKET quoted_literal: "'bucket'" end_bracket: ) - statement_terminator: ; - statement: create_model_statement: - keyword: CREATE - keyword: MODEL - object_reference: naked_identifier: remote_customer_churn - keyword: FUNCTION - object_reference: naked_identifier: remote_fn_customer_churn_predict - bracketed: - start_bracket: ( - data_type: keyword: varchar - comma: ',' - data_type: keyword: int - comma: ',' - data_type: keyword: float - comma: ',' - data_type: keyword: float - end_bracket: ) - keyword: RETURNS - data_type: keyword: int - keyword: SAGEMAKER - quoted_literal: "'customer-churn-endpoint'" - keyword: IAM_ROLE - quoted_literal: "'arn:aws:iam::0123456789012:role/Redshift-ML'" - statement_terminator: ; - statement: create_model_statement: - keyword: CREATE - keyword: MODEL - object_reference: naked_identifier: customers_clusters - keyword: FROM - object_reference: naked_identifier: customers - keyword: FUNCTION - object_reference: naked_identifier: customers_cluster - keyword: IAM_ROLE - quoted_literal: "'iam-role-arn'" - keyword: AUTO - keyword: 'OFF' - keyword: MODEL_TYPE - keyword: KMEANS - keyword: PREPROCESSORS - quoted_literal: "'[\n {\n \"ColumnSet\": [ \"*\" ],\n \"Transformers\"\ : [ \"NumericPassthrough\" ]\n }\n]'" - keyword: HYPERPARAMETERS - keyword: DEFAULT - keyword: EXCEPT - bracketed: start_bracket: ( word: K single_quote: "'5'" end_bracket: ) - keyword: SETTINGS - bracketed: start_bracket: ( keyword: S3_BUCKET quoted_literal: "'bucket'" end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/create_procedure.sql000066400000000000000000000015201503426445100256300ustar00rootroot00000000000000CREATE OR REPLACE PROCEDURE test_sp1(f1 int, f2 varchar(20)) AS $$ DECLARE min_val int; BEGIN DROP TABLE IF EXISTS tmp_tbl; CREATE TEMP TABLE tmp_tbl(id int); INSERT INTO tmp_tbl values (f1),(10001),(10002); SELECT INTO min_val MIN(id) FROM tmp_tbl; RAISE INFO 'min_val = %, f2 = %', min_val, f2; END; $$ LANGUAGE plpgsql SECURITY INVOKER; CREATE OR REPLACE PROCEDURE test_sp2(f1 IN int, f2 INOUT varchar(256), out_var OUT varchar(256)) AS $$ DECLARE loop_var int; BEGIN IF f1 is null OR f2 is null THEN RAISE EXCEPTION 'input cannot be null'; END IF; DROP TABLE if exists my_etl; CREATE TEMP TABLE my_etl(a int, b varchar); FOR loop_var IN 1..f1 LOOP insert into my_etl values (loop_var, f2); f2 := f2 || '+' || f2; END LOOP; SELECT INTO out_var count(*) from my_etl; END; $$ LANGUAGE plpgsql; sqlfluff-3.4.2/test/fixtures/dialects/redshift/create_procedure.yml000066400000000000000000000061321503426445100256360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0e20869fcec0f6fe55c3878cff6ee41a18f240da24684251b572a59c7522478b file: - statement: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PROCEDURE - function_name: function_name_identifier: test_sp1 - procedure_parameter_list: bracketed: - start_bracket: ( - parameter: f1 - data_type: keyword: int - comma: ',' - parameter: f2 - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '20' end_bracket: ) - end_bracket: ) - function_definition: - keyword: AS - quoted_literal: "$$\nDECLARE\n min_val int;\nBEGIN\n DROP TABLE IF EXISTS\ \ tmp_tbl;\n CREATE TEMP TABLE tmp_tbl(id int);\n INSERT INTO tmp_tbl\ \ values (f1),(10001),(10002);\n SELECT INTO min_val MIN(id) FROM tmp_tbl;\n\ \ RAISE INFO 'min_val = %, f2 = %', min_val, f2;\nEND;\n$$" - language_clause: keyword: LANGUAGE naked_identifier: plpgsql - keyword: SECURITY - keyword: INVOKER - statement_terminator: ; - statement: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PROCEDURE - function_name: function_name_identifier: test_sp2 - procedure_parameter_list: bracketed: - start_bracket: ( - parameter: f1 - keyword: IN - data_type: keyword: int - comma: ',' - parameter: f2 - keyword: INOUT - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '256' end_bracket: ) - comma: ',' - parameter: out_var - keyword: OUT - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '256' end_bracket: ) - end_bracket: ) - function_definition: keyword: AS quoted_literal: "$$\nDECLARE\n loop_var int;\nBEGIN\n IF f1 is null OR f2\ \ is null THEN\n RAISE EXCEPTION 'input cannot be null';\n END IF;\n\ \ DROP TABLE if exists my_etl;\n CREATE TEMP TABLE my_etl(a int, b varchar);\n\ \ FOR loop_var IN 1..f1 LOOP\n insert into my_etl values (loop_var,\ \ f2);\n f2 := f2 || '+' || f2;\n END LOOP;\n SELECT INTO out_var\ \ count(*) from my_etl;\nEND;\n$$" language_clause: keyword: LANGUAGE naked_identifier: plpgsql - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/create_rls_policy.sql000066400000000000000000000003361503426445100260230ustar00rootroot00000000000000CREATE RLS POLICY policy_concerts WITH (catgroup VARCHAR(10)) USING (catgroup = 'Concerts'); CREATE RLS POLICY policy_name WITH (foo VARCHAR(10), bar DECIMAL(10, 2)) AS relation_alias USING (bar >= 12 AND foo = 'user1'); sqlfluff-3.4.2/test/fixtures/dialects/redshift/create_rls_policy.yml000066400000000000000000000052011503426445100260210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d5992076e54d3bc15cf54e4af1ec8468d24bb9616c68c394de8b58f31036df4e file: - statement: create_rls_policy_statement: - keyword: CREATE - keyword: RLS - keyword: POLICY - object_reference: naked_identifier: policy_concerts - keyword: WITH - bracketed: start_bracket: ( column_reference: naked_identifier: catgroup data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) end_bracket: ) - keyword: USING - bracketed: start_bracket: ( expression: column_reference: naked_identifier: catgroup comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Concerts'" end_bracket: ) - statement_terminator: ; - statement: create_rls_policy_statement: - keyword: CREATE - keyword: RLS - keyword: POLICY - object_reference: naked_identifier: policy_name - keyword: WITH - bracketed: - start_bracket: ( - column_reference: naked_identifier: foo - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - comma: ',' - column_reference: naked_identifier: bar - data_type: keyword: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '10' - comma: ',' - numeric_literal: '2' - end_bracket: ) - end_bracket: ) - keyword: AS - alias_expression: naked_identifier: relation_alias - keyword: USING - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: bar - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - numeric_literal: '12' - binary_operator: AND - column_reference: naked_identifier: foo - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'user1'" end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/create_schema.sql000066400000000000000000000015331503426445100251040ustar00rootroot00000000000000CREATE SCHEMA s1; CREATE SCHEMA IF NOT EXISTS s1; CREATE SCHEMA s1 AUTHORIZATION dwuser; CREATE SCHEMA IF NOT EXISTS s1 AUTHORIZATION dwuser; CREATE SCHEMA s1 AUTHORIZATION dwuser QUOTA 100 MB; CREATE SCHEMA IF NOT EXISTS s1 AUTHORIZATION dwuser QUOTA 100 MB; CREATE SCHEMA s1 AUTHORIZATION dwuser QUOTA 5 GB; CREATE SCHEMA IF NOT EXISTS s1 AUTHORIZATION dwuser QUOTA 5 GB; CREATE SCHEMA s1 AUTHORIZATION dwuser QUOTA 0.1 TB; CREATE SCHEMA IF NOT EXISTS s1 AUTHORIZATION dwuser QUOTA 0.1 TB; CREATE SCHEMA s1 AUTHORIZATION dwuser QUOTA UNLIMITED; CREATE SCHEMA IF NOT EXISTS s1 AUTHORIZATION dwuser QUOTA UNLIMITED; CREATE SCHEMA AUTHORIZATION dwuser; CREATE SCHEMA AUTHORIZATION dwuser QUOTA 100 MB; CREATE SCHEMA AUTHORIZATION dwuser QUOTA 5 GB; CREATE SCHEMA AUTHORIZATION dwuser QUOTA 0.1 TB; CREATE SCHEMA AUTHORIZATION dwuser QUOTA UNLIMITED; sqlfluff-3.4.2/test/fixtures/dialects/redshift/create_schema.yml000066400000000000000000000122541503426445100251100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5884da012d93c06f3600f3449b67610e88369ece73a415609623e7b8d6781281 file: - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - schema_reference: naked_identifier: s1 - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - keyword: IF - keyword: NOT - keyword: EXISTS - schema_reference: naked_identifier: s1 - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - schema_reference: naked_identifier: s1 - keyword: AUTHORIZATION - role_reference: naked_identifier: dwuser - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - keyword: IF - keyword: NOT - keyword: EXISTS - schema_reference: naked_identifier: s1 - keyword: AUTHORIZATION - role_reference: naked_identifier: dwuser - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - schema_reference: naked_identifier: s1 - keyword: AUTHORIZATION - role_reference: naked_identifier: dwuser - keyword: QUOTA - numeric_literal: '100' - keyword: MB - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - keyword: IF - keyword: NOT - keyword: EXISTS - schema_reference: naked_identifier: s1 - keyword: AUTHORIZATION - role_reference: naked_identifier: dwuser - keyword: QUOTA - numeric_literal: '100' - keyword: MB - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - schema_reference: naked_identifier: s1 - keyword: AUTHORIZATION - role_reference: naked_identifier: dwuser - keyword: QUOTA - numeric_literal: '5' - keyword: GB - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - keyword: IF - keyword: NOT - keyword: EXISTS - schema_reference: naked_identifier: s1 - keyword: AUTHORIZATION - role_reference: naked_identifier: dwuser - keyword: QUOTA - numeric_literal: '5' - keyword: GB - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - schema_reference: naked_identifier: s1 - keyword: AUTHORIZATION - role_reference: naked_identifier: dwuser - keyword: QUOTA - numeric_literal: '0.1' - keyword: TB - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - keyword: IF - keyword: NOT - keyword: EXISTS - schema_reference: naked_identifier: s1 - keyword: AUTHORIZATION - role_reference: naked_identifier: dwuser - keyword: QUOTA - numeric_literal: '0.1' - keyword: TB - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - schema_reference: naked_identifier: s1 - keyword: AUTHORIZATION - role_reference: naked_identifier: dwuser - keyword: QUOTA - keyword: UNLIMITED - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - keyword: IF - keyword: NOT - keyword: EXISTS - schema_reference: naked_identifier: s1 - keyword: AUTHORIZATION - role_reference: naked_identifier: dwuser - keyword: QUOTA - keyword: UNLIMITED - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - keyword: AUTHORIZATION - role_reference: naked_identifier: dwuser - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - keyword: AUTHORIZATION - role_reference: naked_identifier: dwuser - keyword: QUOTA - numeric_literal: '100' - keyword: MB - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - keyword: AUTHORIZATION - role_reference: naked_identifier: dwuser - keyword: QUOTA - numeric_literal: '5' - keyword: GB - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - keyword: AUTHORIZATION - role_reference: naked_identifier: dwuser - keyword: QUOTA - numeric_literal: '0.1' - keyword: TB - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - keyword: AUTHORIZATION - role_reference: naked_identifier: dwuser - keyword: QUOTA - keyword: UNLIMITED - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/create_table.sql000066400000000000000000000057551503426445100247450ustar00rootroot00000000000000CREATE TABLE t1 ( col1 INTEGER PRIMARY KEY, col2 VARCHAR(5) NOT NULL ) DISTKEY(col1) SORTKEY(col1) ; CREATE TABLE t1 ( col1 INTEGER PRIMARY KEY, col2 VARCHAR(5) GENERATED BY DEFAULT AS IDENTITY (1, 1) ) DISTKEY(col1) SORTKEY(col1) ; CREATE TABLE t1 ( col1 INTEGER PRIMARY KEY, col2 VARCHAR(5), col3 VARCHAR(5), col4 VARCHAR(5), col5 VARCHAR(5), col6 VARCHAR(5) ) DISTKEY (col1) COMPOUND SORTKEY (col4, col5, col6) ; CREATE TABLE t1 ( col1 INTEGER PRIMARY KEY, col2 VARCHAR(5) REFERENCES t2 (col1) ) DISTKEY(col1) SORTKEY(col1) ; CREATE TABLE IF NOT EXISTS t1 ( col1 INTEGER PRIMARY KEY, col2 VARCHAR(5) ) DISTKEY(col1) SORTKEY(col1) ; CREATE TEMPORARY TABLE t1 ( col1 INTEGER PRIMARY KEY, col2 VARCHAR(5) ) DISTKEY(col1) SORTKEY(col1) ; CREATE TEMP TABLE t1 ( col1 INTEGER PRIMARY KEY, col2 VARCHAR(5) ) DISTKEY(col1) SORTKEY(col1) ; CREATE LOCAL TEMPORARY TABLE t1 ( col1 INTEGER UNIQUE, col2 VARCHAR(5) ) BACKUP YES ; CREATE TEMPORARY TABLE t1 ( col1 INTEGER PRIMARY KEY, col2 VARCHAR(5) ) BACKUP NO DISTKEY(col1) SORTKEY(col1, col2) ; CREATE TABLE t1 ( col1 INTEGER ENCODE AZ64 PRIMARY KEY, col2 VARCHAR(5) ENCODE TEXT255 ) DISTKEY(col1) SORTKEY AUTO DISTSTYLE EVEN ; CREATE TABLE schema1.t1 ( col1 INTEGER ENCODE AZ64 PRIMARY KEY, col2 VARCHAR(5) ENCODE TEXT255, col3 VARCHAR(5) COLLATE CASE_SENSITIVE, col3 VARCHAR(5) COLLATE CASE_INSENSITIVE ) ; CREATE TABLE UniqueKey_demo ( col1 INT NOT NULL UNIQUE ,col2 DATE ,col3 VARCHAR(60 ) , UNIQUE (col1) ) DISTKEY(col1) COMPOUND SORTKEY(col1, col2); CREATE TABLE UniqueKey_demo ( col1 INT NOT NULL UNIQUE ,col2 DATE ,col3 VARCHAR(60 ) , PRIMARY KEY (col1) ) DISTKEY(col1) INTERLEAVED SORTKEY (col1, col2); CREATE TEMP TABLE IF NOT EXISTS UniqueKey_demo ( col1 INT NOT NULL UNIQUE ,col2 DATE ,col3 VARCHAR(60 ) , FOREIGN KEY (col3) REFERENCES t2 (col5) ) ; CREATE TEMP TABLE t1 (LIKE schema1.t2); CREATE TEMP TABLE t1 (LIKE schema1.t2 INCLUDING DEFAULTS); CREATE TABLE t1 (LIKE schema1.t2 EXCLUDING DEFAULTS); CREATE TABLE some_schema.example_table ( LIKE some_schema.another_table INCLUDING DEFAULTS , LIKE some_schema.next_table EXCLUDING DEFAULTS ); CREATE TABLE some_schema.example_table ( LIKE some_schema.another_table INCLUDING DEFAULTS , col_name VARCHAR(5) ); CREATE TABLE some_table ( some_column INTEGER NOT NULL DEFAULT 1 ); CREATE TABLE IdentityColumn_demo ( col1 BIGINT IDENTITY ); CREATE TABLE IdentityColumnGeneratedByDefault_demo ( col1 BIGINT GENERATED BY DEFAULT AS IDENTITY ); CREATE TABLE IdentityColumnNotNull_demo ( col1 BIGINT IDENTITY NOT NULL ); CREATE TABLE IdentityColumnGeneratedByDefaultNotNull_demo ( col1 BIGINT GENERATED BY DEFAULT AS IDENTITY NOT NULL ); create table public.t1 ( c1 int, c2 int, c3 int, unique (c1, c2) ); create table public.t2 ( c1 int, c2 int, c3 int, foreign key (c1, c2) references public.t1 (c1, c2) ); create table test(col1 varchar(max)); sqlfluff-3.4.2/test/fixtures/dialects/redshift/create_table.yml000066400000000000000000000650041503426445100247400ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: adab8d47461f5a8bd21c56e7de3380e8825d5c7efa3a236f8a7c9f2700f8697b file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: INTEGER - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - end_bracket: ) - table_constraint: - keyword: DISTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: SORTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: INTEGER - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - column_attribute_segment: - keyword: GENERATED - keyword: BY - keyword: DEFAULT - keyword: AS - keyword: IDENTITY - bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '1' - end_bracket: ) - end_bracket: ) - table_constraint: - keyword: DISTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: SORTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: INTEGER - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - comma: ',' - column_reference: naked_identifier: col3 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - comma: ',' - column_reference: naked_identifier: col4 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - comma: ',' - column_reference: naked_identifier: col5 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - comma: ',' - column_reference: naked_identifier: col6 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - end_bracket: ) - table_constraint: - keyword: DISTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: COMPOUND - keyword: SORTKEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col4 - comma: ',' - column_reference: naked_identifier: col5 - comma: ',' - column_reference: naked_identifier: col6 - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: INTEGER - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - column_constraint_segment: keyword: REFERENCES table_reference: naked_identifier: t2 bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - end_bracket: ) - table_constraint: - keyword: DISTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: SORTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: INTEGER - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - end_bracket: ) - table_constraint: - keyword: DISTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: SORTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: INTEGER - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - end_bracket: ) - table_constraint: - keyword: DISTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: SORTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TEMP - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: INTEGER - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - end_bracket: ) - table_constraint: - keyword: DISTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: SORTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: LOCAL - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: INTEGER - column_constraint_segment: keyword: UNIQUE - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - end_bracket: ) - keyword: BACKUP - keyword: 'YES' - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: INTEGER - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - end_bracket: ) - keyword: BACKUP - keyword: 'NO' - table_constraint: - keyword: DISTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: SORTKEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: INTEGER - column_attribute_segment: - keyword: ENCODE - keyword: AZ64 - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - column_attribute_segment: - keyword: ENCODE - keyword: TEXT255 - end_bracket: ) - table_constraint: - keyword: DISTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: SORTKEY - keyword: AUTO - keyword: DISTSTYLE - keyword: EVEN - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: schema1 - dot: . - naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: INTEGER - column_attribute_segment: - keyword: ENCODE - keyword: AZ64 - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - column_attribute_segment: - keyword: ENCODE - keyword: TEXT255 - comma: ',' - column_reference: naked_identifier: col3 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - column_attribute_segment: - keyword: COLLATE - keyword: CASE_SENSITIVE - comma: ',' - column_reference: naked_identifier: col3 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - column_attribute_segment: - keyword: COLLATE - keyword: CASE_INSENSITIVE - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: UniqueKey_demo - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: INT - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - keyword: UNIQUE - comma: ',' - column_reference: naked_identifier: col2 - data_type: datetime_type_identifier: keyword: DATE - comma: ',' - column_reference: naked_identifier: col3 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '60' end_bracket: ) - comma: ',' - table_constraint: keyword: UNIQUE bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - end_bracket: ) - table_constraint: - keyword: DISTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: COMPOUND - keyword: SORTKEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: UniqueKey_demo - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: INT - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - keyword: UNIQUE - comma: ',' - column_reference: naked_identifier: col2 - data_type: datetime_type_identifier: keyword: DATE - comma: ',' - column_reference: naked_identifier: col3 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '60' end_bracket: ) - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - end_bracket: ) - table_constraint: - keyword: DISTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: INTERLEAVED - keyword: SORTKEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TEMP - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: UniqueKey_demo - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: INT - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - keyword: UNIQUE - comma: ',' - column_reference: naked_identifier: col2 - data_type: datetime_type_identifier: keyword: DATE - comma: ',' - column_reference: naked_identifier: col3 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '60' end_bracket: ) - comma: ',' - table_constraint: - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: col3 end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: t2 - bracketed: start_bracket: ( column_reference: naked_identifier: col5 end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TEMP - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: start_bracket: ( keyword: LIKE table_reference: - naked_identifier: schema1 - dot: . - naked_identifier: t2 end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TEMP - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: start_bracket: ( keyword: LIKE table_reference: - naked_identifier: schema1 - dot: . - naked_identifier: t2 like_option_segment: - keyword: INCLUDING - keyword: DEFAULTS end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: start_bracket: ( keyword: LIKE table_reference: - naked_identifier: schema1 - dot: . - naked_identifier: t2 like_option_segment: - keyword: EXCLUDING - keyword: DEFAULTS end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: some_schema - dot: . - naked_identifier: example_table - bracketed: - start_bracket: ( - keyword: LIKE - table_reference: - naked_identifier: some_schema - dot: . - naked_identifier: another_table - like_option_segment: - keyword: INCLUDING - keyword: DEFAULTS - comma: ',' - keyword: LIKE - table_reference: - naked_identifier: some_schema - dot: . - naked_identifier: next_table - like_option_segment: - keyword: EXCLUDING - keyword: DEFAULTS - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: some_schema - dot: . - naked_identifier: example_table - bracketed: start_bracket: ( keyword: LIKE table_reference: - naked_identifier: some_schema - dot: . - naked_identifier: another_table like_option_segment: - keyword: INCLUDING - keyword: DEFAULTS comma: ',' column_reference: naked_identifier: col_name data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: some_table - bracketed: start_bracket: ( column_reference: naked_identifier: some_column data_type: keyword: INTEGER column_constraint_segment: - keyword: NOT - keyword: 'NULL' column_attribute_segment: keyword: DEFAULT expression: numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: IdentityColumn_demo - bracketed: start_bracket: ( column_reference: naked_identifier: col1 data_type: keyword: BIGINT column_attribute_segment: keyword: IDENTITY end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: IdentityColumnGeneratedByDefault_demo - bracketed: start_bracket: ( column_reference: naked_identifier: col1 data_type: keyword: BIGINT column_attribute_segment: - keyword: GENERATED - keyword: BY - keyword: DEFAULT - keyword: AS - keyword: IDENTITY end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: IdentityColumnNotNull_demo - bracketed: start_bracket: ( column_reference: naked_identifier: col1 data_type: keyword: BIGINT column_attribute_segment: keyword: IDENTITY column_constraint_segment: - keyword: NOT - keyword: 'NULL' end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: IdentityColumnGeneratedByDefaultNotNull_demo - bracketed: start_bracket: ( column_reference: naked_identifier: col1 data_type: keyword: BIGINT column_attribute_segment: - keyword: GENERATED - keyword: BY - keyword: DEFAULT - keyword: AS - keyword: IDENTITY column_constraint_segment: - keyword: NOT - keyword: 'NULL' end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: - naked_identifier: public - dot: . - naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: c1 - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: c2 - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: c3 - data_type: keyword: int - comma: ',' - table_constraint: keyword: unique bracketed: - start_bracket: ( - column_reference: naked_identifier: c1 - comma: ',' - column_reference: naked_identifier: c2 - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: - naked_identifier: public - dot: . - naked_identifier: t2 - bracketed: - start_bracket: ( - column_reference: naked_identifier: c1 - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: c2 - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: c3 - data_type: keyword: int - comma: ',' - table_constraint: - keyword: foreign - keyword: key - bracketed: - start_bracket: ( - column_reference: naked_identifier: c1 - comma: ',' - column_reference: naked_identifier: c2 - end_bracket: ) - keyword: references - table_reference: - naked_identifier: public - dot: . - naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: c1 - comma: ',' - column_reference: naked_identifier: c2 - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: test - bracketed: start_bracket: ( column_reference: naked_identifier: col1 data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( max_literal: max end_bracket: ) end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/create_table_as.sql000066400000000000000000000027721503426445100254240ustar00rootroot00000000000000CREATE TEMP TABLE t1 AS ( SELECT something FROM t2 ); CREATE TEMP TABLE t1 AS SELECT something FROM t2 ; CREATE TEMPORARY TABLE t1 AS SELECT something FROM t2 ; CREATE TABLE t1 AS ( SELECT something FROM t2 ); CREATE TABLE t1 AS SELECT something FROM t2 ; CREATE LOCAL TEMP TABLE t1 AS SELECT something FROM t2 ; CREATE TEMP TABLE t1 SORTKEY(col1) AS SELECT col1 FROM t2 ; CREATE TABLE t1 SORTKEY(col1) AS SELECT col1 FROM t2 ; CREATE TABLE t1 DISTKEY(col1) AS SELECT col1 FROM t2 ; CREATE TABLE t1 DISTKEY(col1) SORTKEY(col1) AS SELECT col1 FROM t2 ; CREATE TABLE t1 DISTSTYLE EVEN AS SELECT col1 FROM t2 ; CREATE TABLE t1 DISTSTYLE ALL DISTKEY(col1) SORTKEY(col1) AS SELECT col1 FROM t2 ; CREATE TABLE t1 DISTSTYLE ALL DISTKEY(col1) COMPOUND SORTKEY(col1, col2) AS SELECT col1 , col2 FROM t2 ; CREATE TABLE t1 DISTSTYLE ALL DISTKEY(col1) INTERLEAVED SORTKEY(col1, col2) AS SELECT col1 , col2 FROM t2 ; CREATE TABLE t1 (col1, col2) AS SELECT col1 , col2 FROM t2 ; CREATE TABLE t1 (col1, col2) BACKUP YES AS SELECT col1 , col2 FROM t2 ; CREATE TABLE t1 (col1, col2) BACKUP NO AS SELECT col1 , col2 FROM t2 ; CREATE TABLE t1 (col1, col2) AS ( SELECT col1 FROM tbl2 ); CREATE TABLE t1 (col1, col2) BACKUP NO DISTSTYLE ALL DISTKEY(col1) INTERLEAVED SORTKEY(col1, col2) AS ( SELECT col1 FROM tbl2 UNION ALL SELECT col2 FROM tbl3 ); sqlfluff-3.4.2/test/fixtures/dialects/redshift/create_table_as.yml000066400000000000000000000410241503426445100254170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b8f22c87a7c42b555349270f25b8e7a4678df7ba7ffbd79b0e85b983148a8bb5 file: - statement: create_table_as_statement: - keyword: CREATE - keyword: TEMP - keyword: TABLE - object_reference: naked_identifier: t1 - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 end_bracket: ) - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TEMP - keyword: TABLE - object_reference: naked_identifier: t1 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: TABLE - object_reference: naked_identifier: t1 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - object_reference: naked_identifier: t1 - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 end_bracket: ) - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - object_reference: naked_identifier: t1 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: LOCAL - keyword: TEMP - keyword: TABLE - object_reference: naked_identifier: t1 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TEMP - keyword: TABLE - object_reference: naked_identifier: t1 - table_constraint: keyword: SORTKEY bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - object_reference: naked_identifier: t1 - table_constraint: keyword: SORTKEY bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - object_reference: naked_identifier: t1 - table_constraint: keyword: DISTKEY bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - object_reference: naked_identifier: t1 - table_constraint: - keyword: DISTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: SORTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - object_reference: naked_identifier: t1 - table_constraint: - keyword: DISTSTYLE - keyword: EVEN - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - object_reference: naked_identifier: t1 - table_constraint: - keyword: DISTSTYLE - keyword: ALL - keyword: DISTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: SORTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - object_reference: naked_identifier: t1 - table_constraint: - keyword: DISTSTYLE - keyword: ALL - keyword: DISTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: COMPOUND - keyword: SORTKEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - object_reference: naked_identifier: t1 - table_constraint: - keyword: DISTSTYLE - keyword: ALL - keyword: DISTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: INTERLEAVED - keyword: SORTKEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - object_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - object_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: BACKUP - keyword: 'YES' - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - object_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: BACKUP - keyword: 'NO' - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - object_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl2 end_bracket: ) - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - object_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: BACKUP - keyword: 'NO' - table_constraint: - keyword: DISTSTYLE - keyword: ALL - keyword: DISTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: INTERLEAVED - keyword: SORTKEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: AS - bracketed: start_bracket: ( set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl2 - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl3 end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/create_user.sql000066400000000000000000000023641503426445100246250ustar00rootroot00000000000000CREATE USER user1 PASSWORD 'md5153c434b4b77c89e6b94f12c5393af5b'; CREATE USER admin WITH PASSWORD 'sha256|Mypassword1'; CREATE USER lazy PASSWORD DISABLE; CREATE USER lazy WITH PASSWORD DISABLE; CREATE USER "lazy" WITH PASSWORD DISABLE; CREATE USER user1 PASSWORD 'md5153c434b4b77c89e6b94f12c5393af5b' CREATEDB; CREATE USER user1 PASSWORD 'md5153c434b4b77c89e6b94f12c5393af5b' NOCREATEDB; CREATE USER user1 PASSWORD 'md5153c434b4b77c89e6b94f12c5393af5b' CREATEUSER; CREATE USER user1 PASSWORD 'md5153c434b4b77c89e6b94f12c5393af5b' NOCREATEUSER; CREATE USER user1 PASSWORD 'md5153c434b4b77c89e6b94f12c5393af5b' SYSLOG ACCESS RESTRICTED; CREATE USER user1 PASSWORD 'md5153c434b4b77c89e6b94f12c5393af5b' SYSLOG ACCESS UNRESTRICTED; CREATE USER user1 PASSWORD 'md5153c434b4b77c89e6b94f12c5393af5b' IN GROUP group_1; CREATE USER user1 PASSWORD 'md5153c434b4b77c89e6b94f12c5393af5b' IN GROUP group_1, group_2; CREATE USER user1 PASSWORD 'md5153c434b4b77c89e6b94f12c5393af5b' VALID UNTIL '2017-06-10'; CREATE USER user1 PASSWORD 'md5153c434b4b77c89e6b94f12c5393af5b' CONNECTION LIMIT 30; CREATE USER user1 PASSWORD 'md5153c434b4b77c89e6b94f12c5393af5b' CONNECTION LIMIT UNLIMITED; CREATE USER user1 PASSWORD 'md5153c434b4b77c89e6b94f12c5393af5b' SESSION TIMEOUT 120; sqlfluff-3.4.2/test/fixtures/dialects/redshift/create_user.yml000066400000000000000000000123521503426445100246250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 23591d862ca6e8701f9fc3b5b60ec3c5ee9c1f571020175eb83505e3442e6885 file: - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: user1 - keyword: PASSWORD - quoted_literal: "'md5153c434b4b77c89e6b94f12c5393af5b'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: admin - keyword: WITH - keyword: PASSWORD - quoted_literal: "'sha256|Mypassword1'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: lazy - keyword: PASSWORD - keyword: DISABLE - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: lazy - keyword: WITH - keyword: PASSWORD - keyword: DISABLE - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: quoted_identifier: '"lazy"' - keyword: WITH - keyword: PASSWORD - keyword: DISABLE - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: user1 - keyword: PASSWORD - quoted_literal: "'md5153c434b4b77c89e6b94f12c5393af5b'" - keyword: CREATEDB - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: user1 - keyword: PASSWORD - quoted_literal: "'md5153c434b4b77c89e6b94f12c5393af5b'" - keyword: NOCREATEDB - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: user1 - keyword: PASSWORD - quoted_literal: "'md5153c434b4b77c89e6b94f12c5393af5b'" - keyword: CREATEUSER - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: user1 - keyword: PASSWORD - quoted_literal: "'md5153c434b4b77c89e6b94f12c5393af5b'" - keyword: NOCREATEUSER - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: user1 - keyword: PASSWORD - quoted_literal: "'md5153c434b4b77c89e6b94f12c5393af5b'" - keyword: SYSLOG - keyword: ACCESS - keyword: RESTRICTED - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: user1 - keyword: PASSWORD - quoted_literal: "'md5153c434b4b77c89e6b94f12c5393af5b'" - keyword: SYSLOG - keyword: ACCESS - keyword: UNRESTRICTED - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: user1 - keyword: PASSWORD - quoted_literal: "'md5153c434b4b77c89e6b94f12c5393af5b'" - keyword: IN - keyword: GROUP - object_reference: naked_identifier: group_1 - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: user1 - keyword: PASSWORD - quoted_literal: "'md5153c434b4b77c89e6b94f12c5393af5b'" - keyword: IN - keyword: GROUP - object_reference: naked_identifier: group_1 - comma: ',' - object_reference: naked_identifier: group_2 - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: user1 - keyword: PASSWORD - quoted_literal: "'md5153c434b4b77c89e6b94f12c5393af5b'" - keyword: VALID - keyword: UNTIL - quoted_literal: "'2017-06-10'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: user1 - keyword: PASSWORD - quoted_literal: "'md5153c434b4b77c89e6b94f12c5393af5b'" - keyword: CONNECTION - keyword: LIMIT - numeric_literal: '30' - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: user1 - keyword: PASSWORD - quoted_literal: "'md5153c434b4b77c89e6b94f12c5393af5b'" - keyword: CONNECTION - keyword: LIMIT - keyword: UNLIMITED - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: user1 - keyword: PASSWORD - quoted_literal: "'md5153c434b4b77c89e6b94f12c5393af5b'" - keyword: SESSION - keyword: TIMEOUT - numeric_literal: '120' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/create_view.sql000066400000000000000000000001621503426445100246130ustar00rootroot00000000000000create view sales_vw as select * from public.sales union all select * from spectrum.sales with no schema binding; sqlfluff-3.4.2/test/fixtures/dialects/redshift/create_view.yml000066400000000000000000000034521503426445100246220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f74e44d078baf289d9899d89b523de92e3b9bbbc1e17970c385f14647931d931 file: statement: create_view_statement: - keyword: create - keyword: view - table_reference: naked_identifier: sales_vw - keyword: as - set_expression: - select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: public - dot: . - naked_identifier: sales - set_operator: - keyword: union - keyword: all - select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: spectrum - dot: . - naked_identifier: sales - with_no_schema_binding_clause: - keyword: with - keyword: 'no' - keyword: schema - keyword: binding statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/dateparts.sql000066400000000000000000000065721503426445100243200ustar00rootroot00000000000000select date_part(millennium, foo) from tbl1; select date_part(millennia, foo) from tbl1; select date_part(mil, foo) from tbl1; select date_part(mils, foo) from tbl1; select date_part(century, foo) from tbl1; select date_part(centuries, foo) from tbl1; select date_part(c, foo) from tbl1; select date_part(cent, foo) from tbl1; select date_part(cents, foo) from tbl1; select date_part(decade, foo) from tbl1; select date_part(decades, foo) from tbl1; select date_part(dec, foo) from tbl1; select date_part(decs, foo) from tbl1; select extract(epoch from foo) from tbl1; select extract(year from foo) from tbl1; select extract(years from foo) from tbl1; select extract(y from foo) from tbl1; select extract(yr from foo) from tbl1; select extract(yrs from foo) from tbl1; select extract(quarter from foo) from tbl1; select extract(quarters from foo) from tbl1; select extract(qtr from foo) from tbl1; select extract(qtrs from foo) from tbl1; select extract(month from foo) from tbl1; select extract(months from foo) from tbl1; select extract(mon from foo) from tbl1; select extract(mons from foo) from tbl1; select extract(week from foo) from tbl1; select extract(weeks from foo) from tbl1; select extract(w from foo) from tbl1; select extract(dayofweek from foo) from tbl1; select extract(dow from foo) from tbl1; select extract(dw from foo) from tbl1; select extract(weekday from foo) from tbl1; select extract(dayofyear from foo) from tbl1; select extract(doy from foo) from tbl1; select extract(dy from foo) from tbl1; select extract(yearday from foo) from tbl1; select extract(day from foo) from tbl1; select extract(days from foo) from tbl1; select extract(d from foo) from tbl1; select extract(hour from foo) from tbl1; select extract(hours from foo) from tbl1; select extract(h from foo) from tbl1; select extract(hr from foo) from tbl1; select extract(hrs from foo) from tbl1; select extract(minute from foo) from tbl1; select extract(minutes from foo) from tbl1; select extract(m from foo) from tbl1; select extract(min from foo) from tbl1; select extract(mins from foo) from tbl1; select extract(second from foo) from tbl1; select extract(seconds from foo) from tbl1; select dateadd(s, 123, foo) from tbl1; select dateadd(sec, 123, foo) from tbl1; select dateadd(secs, 123, foo) from tbl1; select dateadd(millisecond, 123, foo) from tbl1; select dateadd(milliseconds, 123, foo) from tbl1; select dateadd(ms, 123, foo) from tbl1; select dateadd(msec, 123, foo) from tbl1; select dateadd(msecs, 123, foo) from tbl1; select dateadd(msecond, 123, foo) from tbl1; select dateadd(mseconds, 123, foo) from tbl1; select dateadd(millisec, 123, foo) from tbl1; select dateadd(millisecs, 123, foo) from tbl1; select dateadd(millisecon, 123, foo) from tbl1; select dateadd(microsecond, 123, foo) from tbl1; select dateadd(microseconds, 123, foo) from tbl1; select datediff(microsec, foo, bar) from tbl1; select datediff(microsecs, foo, bar) from tbl1; select datediff(microsecond, foo, bar) from tbl1; select datediff(usecond, foo, bar) from tbl1; select datediff(useconds, foo, bar) from tbl1; select datediff(us, foo, bar) from tbl1; select datediff(usec, foo, bar) from tbl1; select datediff(usecs, foo, bar) from tbl1; select datediff(timezone, foo, bar) from tbl1; select datediff(timezone_hour, foo, bar) from tbl1; select datediff(timezone_minute, foo, bar) from tbl1; sqlfluff-3.4.2/test/fixtures/dialects/redshift/dateparts.yml000066400000000000000000001656121503426445100243230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cc64b416237cfa5f1ee9444a4f36ff10bbf0cfe9f52009a69c91e22bcafc10e7 file: - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: date_part function_contents: bracketed: start_bracket: ( date_part: millennium comma: ',' expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: date_part function_contents: bracketed: start_bracket: ( date_part: millennia comma: ',' expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: date_part function_contents: bracketed: start_bracket: ( date_part: mil comma: ',' expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: date_part function_contents: bracketed: start_bracket: ( date_part: mils comma: ',' expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: date_part function_contents: bracketed: start_bracket: ( date_part: century comma: ',' expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: date_part function_contents: bracketed: start_bracket: ( date_part: centuries comma: ',' expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: date_part function_contents: bracketed: start_bracket: ( date_part: c comma: ',' expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: date_part function_contents: bracketed: start_bracket: ( date_part: cent comma: ',' expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: date_part function_contents: bracketed: start_bracket: ( date_part: cents comma: ',' expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: date_part function_contents: bracketed: start_bracket: ( date_part: decade comma: ',' expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: date_part function_contents: bracketed: start_bracket: ( date_part: decades comma: ',' expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: date_part function_contents: bracketed: start_bracket: ( date_part: dec comma: ',' expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: date_part function_contents: bracketed: start_bracket: ( date_part: decs comma: ',' expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: epoch keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: year keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: years keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: y keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: yr keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: yrs keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: quarter keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: quarters keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: qtr keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: qtrs keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: month keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: months keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: mon keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: mons keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: week keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: weeks keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: w keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: dayofweek keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: dow keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: dw keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: weekday keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: dayofyear keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: doy keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: dy keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: yearday keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: day keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: days keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: d keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: hour keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: hours keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: h keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: hr keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: hrs keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: minute keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: minutes keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: m keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: min keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: mins keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: second keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: seconds keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: dateadd function_contents: bracketed: - start_bracket: ( - date_part: s - comma: ',' - expression: numeric_literal: '123' - comma: ',' - expression: column_reference: naked_identifier: foo - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: dateadd function_contents: bracketed: - start_bracket: ( - date_part: sec - comma: ',' - expression: numeric_literal: '123' - comma: ',' - expression: column_reference: naked_identifier: foo - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: dateadd function_contents: bracketed: - start_bracket: ( - date_part: secs - comma: ',' - expression: numeric_literal: '123' - comma: ',' - expression: column_reference: naked_identifier: foo - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: dateadd function_contents: bracketed: - start_bracket: ( - date_part: millisecond - comma: ',' - expression: numeric_literal: '123' - comma: ',' - expression: column_reference: naked_identifier: foo - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: dateadd function_contents: bracketed: - start_bracket: ( - date_part: milliseconds - comma: ',' - expression: numeric_literal: '123' - comma: ',' - expression: column_reference: naked_identifier: foo - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: dateadd function_contents: bracketed: - start_bracket: ( - date_part: ms - comma: ',' - expression: numeric_literal: '123' - comma: ',' - expression: column_reference: naked_identifier: foo - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: dateadd function_contents: bracketed: - start_bracket: ( - date_part: msec - comma: ',' - expression: numeric_literal: '123' - comma: ',' - expression: column_reference: naked_identifier: foo - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: dateadd function_contents: bracketed: - start_bracket: ( - date_part: msecs - comma: ',' - expression: numeric_literal: '123' - comma: ',' - expression: column_reference: naked_identifier: foo - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: dateadd function_contents: bracketed: - start_bracket: ( - date_part: msecond - comma: ',' - expression: numeric_literal: '123' - comma: ',' - expression: column_reference: naked_identifier: foo - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: dateadd function_contents: bracketed: - start_bracket: ( - date_part: mseconds - comma: ',' - expression: numeric_literal: '123' - comma: ',' - expression: column_reference: naked_identifier: foo - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: dateadd function_contents: bracketed: - start_bracket: ( - date_part: millisec - comma: ',' - expression: numeric_literal: '123' - comma: ',' - expression: column_reference: naked_identifier: foo - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: dateadd function_contents: bracketed: - start_bracket: ( - date_part: millisecs - comma: ',' - expression: numeric_literal: '123' - comma: ',' - expression: column_reference: naked_identifier: foo - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: dateadd function_contents: bracketed: - start_bracket: ( - date_part: millisecon - comma: ',' - expression: numeric_literal: '123' - comma: ',' - expression: column_reference: naked_identifier: foo - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: dateadd function_contents: bracketed: - start_bracket: ( - date_part: microsecond - comma: ',' - expression: numeric_literal: '123' - comma: ',' - expression: column_reference: naked_identifier: foo - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: dateadd function_contents: bracketed: - start_bracket: ( - date_part: microseconds - comma: ',' - expression: numeric_literal: '123' - comma: ',' - expression: column_reference: naked_identifier: foo - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: datediff function_contents: bracketed: - start_bracket: ( - date_part: microsec - comma: ',' - expression: column_reference: naked_identifier: foo - comma: ',' - expression: column_reference: naked_identifier: bar - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: datediff function_contents: bracketed: - start_bracket: ( - date_part: microsecs - comma: ',' - expression: column_reference: naked_identifier: foo - comma: ',' - expression: column_reference: naked_identifier: bar - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: datediff function_contents: bracketed: - start_bracket: ( - date_part: microsecond - comma: ',' - expression: column_reference: naked_identifier: foo - comma: ',' - expression: column_reference: naked_identifier: bar - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: datediff function_contents: bracketed: - start_bracket: ( - date_part: usecond - comma: ',' - expression: column_reference: naked_identifier: foo - comma: ',' - expression: column_reference: naked_identifier: bar - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: datediff function_contents: bracketed: - start_bracket: ( - date_part: useconds - comma: ',' - expression: column_reference: naked_identifier: foo - comma: ',' - expression: column_reference: naked_identifier: bar - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: datediff function_contents: bracketed: - start_bracket: ( - date_part: us - comma: ',' - expression: column_reference: naked_identifier: foo - comma: ',' - expression: column_reference: naked_identifier: bar - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: datediff function_contents: bracketed: - start_bracket: ( - date_part: usec - comma: ',' - expression: column_reference: naked_identifier: foo - comma: ',' - expression: column_reference: naked_identifier: bar - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: datediff function_contents: bracketed: - start_bracket: ( - date_part: usecs - comma: ',' - expression: column_reference: naked_identifier: foo - comma: ',' - expression: column_reference: naked_identifier: bar - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: datediff function_contents: bracketed: - start_bracket: ( - date_part: timezone - comma: ',' - expression: column_reference: naked_identifier: foo - comma: ',' - expression: column_reference: naked_identifier: bar - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: datediff function_contents: bracketed: - start_bracket: ( - date_part: timezone_hour - comma: ',' - expression: column_reference: naked_identifier: foo - comma: ',' - expression: column_reference: naked_identifier: bar - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: datediff function_contents: bracketed: - start_bracket: ( - date_part: timezone_minute - comma: ',' - expression: column_reference: naked_identifier: foo - comma: ',' - expression: column_reference: naked_identifier: bar - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/datetime_cast.sql000066400000000000000000000020461503426445100251270ustar00rootroot00000000000000-- redshift_datetime_cast.sql /* Example that casts a column to several DATETIME types */ -- from https://github.com/sqlfluff/sqlfluff/issues/2649 SELECT b::DATETIME FROM a; -- DATE SELECT b::DATE FROM a; -- TIME SELECT b::TIME, c::TIME WITH TIME ZONE, d::TIME WITHOUT TIME ZONE FROM a; -- TIMETZ SELECT b::TIMETZ FROM a; -- TIMESTAMP SELECT b::TIMESTAMP, c::TIMESTAMP WITHOUT TIME ZONE, d::TIMESTAMP WITH TIME ZONE FROM a; -- TIMESTAMPTZ SELECT b::TIMESTAMPTZ FROM a; --- AT TIME ZONE SELECT raw_data.status::VARCHAR AS status, raw_data.start::TIMESTAMPTZ AT TIME ZONE 'UTC' AS started_at, raw_data."end"::TIMESTAMPTZ AT TIME ZONE 'UTC' AS ended_at, raw_data.created::TIMESTAMPTZ AT TIME ZONE 'UTC' AS created_at, raw_data.identifier[0].value::VARCHAR AS communication_request_fhir_reference_origin, extension_extraction.database_reference, GETDATE() AT TIME ZONE 'UTC', (GETDATE() AT TIME ZONE 'UTC') AT TIME ZONE 'AEST', ((GETDATE() AT TIME ZONE 'UTC') AT TIME ZONE 'AEST') FROM raw_data sqlfluff-3.4.2/test/fixtures/dialects/redshift/datetime_cast.yml000066400000000000000000000266041503426445100251370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0fe134e46f0378b700a5ab7021faad4047728160aa0b3ac5dd68406e72990322 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: b casting_operator: '::' data_type: datetime_type_identifier: keyword: DATETIME from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: a - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: b casting_operator: '::' data_type: datetime_type_identifier: keyword: DATE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: a - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: cast_expression: column_reference: naked_identifier: b casting_operator: '::' data_type: datetime_type_identifier: keyword: TIME - comma: ',' - select_clause_element: expression: cast_expression: column_reference: naked_identifier: c casting_operator: '::' data_type: datetime_type_identifier: - keyword: TIME - keyword: WITH - keyword: TIME - keyword: ZONE - comma: ',' - select_clause_element: expression: cast_expression: column_reference: naked_identifier: d casting_operator: '::' data_type: datetime_type_identifier: - keyword: TIME - keyword: WITHOUT - keyword: TIME - keyword: ZONE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: a - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: b casting_operator: '::' data_type: datetime_type_identifier: keyword: TIMETZ from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: a - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: cast_expression: column_reference: naked_identifier: b casting_operator: '::' data_type: datetime_type_identifier: keyword: TIMESTAMP - comma: ',' - select_clause_element: expression: cast_expression: column_reference: naked_identifier: c casting_operator: '::' data_type: datetime_type_identifier: - keyword: TIMESTAMP - keyword: WITHOUT - keyword: TIME - keyword: ZONE - comma: ',' - select_clause_element: expression: cast_expression: column_reference: naked_identifier: d casting_operator: '::' data_type: datetime_type_identifier: - keyword: TIMESTAMP - keyword: WITH - keyword: TIME - keyword: ZONE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: a - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: b casting_operator: '::' data_type: datetime_type_identifier: keyword: TIMESTAMPTZ from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: a - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: cast_expression: column_reference: - naked_identifier: raw_data - dot: . - naked_identifier: status casting_operator: '::' data_type: keyword: VARCHAR alias_expression: alias_operator: keyword: AS naked_identifier: status - comma: ',' - select_clause_element: expression: cast_expression: column_reference: - naked_identifier: raw_data - dot: . - naked_identifier: start casting_operator: '::' data_type: datetime_type_identifier: keyword: TIMESTAMPTZ time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: "'UTC'" alias_expression: alias_operator: keyword: AS naked_identifier: started_at - comma: ',' - select_clause_element: expression: cast_expression: column_reference: naked_identifier: raw_data dot: . quoted_identifier: '"end"' casting_operator: '::' data_type: datetime_type_identifier: keyword: TIMESTAMPTZ time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: "'UTC'" alias_expression: alias_operator: keyword: AS naked_identifier: ended_at - comma: ',' - select_clause_element: expression: cast_expression: column_reference: - naked_identifier: raw_data - dot: . - naked_identifier: created casting_operator: '::' data_type: datetime_type_identifier: keyword: TIMESTAMPTZ time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: "'UTC'" alias_expression: alias_operator: keyword: AS naked_identifier: created_at - comma: ',' - select_clause_element: expression: cast_expression: column_reference: - naked_identifier: raw_data - dot: . - naked_identifier: identifier array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' semi_structured_expression: dot: . naked_identifier: value casting_operator: '::' data_type: keyword: VARCHAR alias_expression: alias_operator: keyword: AS naked_identifier: communication_request_fhir_reference_origin - comma: ',' - select_clause_element: column_reference: - naked_identifier: extension_extraction - dot: . - naked_identifier: database_reference - comma: ',' - select_clause_element: expression: function: function_name: function_name_identifier: GETDATE function_contents: bracketed: start_bracket: ( end_bracket: ) time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: "'UTC'" - comma: ',' - select_clause_element: expression: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: GETDATE function_contents: bracketed: start_bracket: ( end_bracket: ) time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: "'UTC'" end_bracket: ) time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: "'AEST'" - comma: ',' - select_clause_element: expression: bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: GETDATE function_contents: bracketed: start_bracket: ( end_bracket: ) time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: "'UTC'" end_bracket: ) time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: "'AEST'" end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: raw_data sqlfluff-3.4.2/test/fixtures/dialects/redshift/deallocate.sql000066400000000000000000000000771503426445100244200ustar00rootroot00000000000000DEALLOCATE statement_name; DEALLOCATE PREPARE statement_name; sqlfluff-3.4.2/test/fixtures/dialects/redshift/deallocate.yml000066400000000000000000000013311503426445100244140ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2b08b91ed4e5176aca16085aaaddbc21054d16b629faa8a0914c38644f8829c6 file: - statement: deallocate_statement: keyword: DEALLOCATE object_reference: naked_identifier: statement_name - statement_terminator: ; - statement: deallocate_statement: - keyword: DEALLOCATE - keyword: PREPARE - object_reference: naked_identifier: statement_name - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/declare.sql000066400000000000000000000003661503426445100237230ustar00rootroot00000000000000declare curs1 cursor for select col1, col2 from tbl1; declare lollapalooza cursor for select eventname, starttime, pricepaid/qtysold as costperticket, qtysold from sales, event where sales.eventid = event.eventid and eventname = 'lollapalooza'; sqlfluff-3.4.2/test/fixtures/dialects/redshift/declare.yml000066400000000000000000000060761503426445100237310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: aec9b21db511704d87981c9569cebcfe8fc258f47804deec8ce5fc335d902607 file: - statement: declare_statement: - keyword: declare - object_reference: naked_identifier: curs1 - keyword: cursor - keyword: for - select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: declare_statement: - keyword: declare - object_reference: naked_identifier: lollapalooza - keyword: cursor - keyword: for - select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: eventname - comma: ',' - select_clause_element: column_reference: naked_identifier: starttime - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: pricepaid - binary_operator: / - column_reference: naked_identifier: qtysold alias_expression: alias_operator: keyword: as naked_identifier: costperticket - comma: ',' - select_clause_element: column_reference: naked_identifier: qtysold from_clause: - keyword: from - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sales - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: event where_clause: keyword: where expression: - column_reference: - naked_identifier: sales - dot: . - naked_identifier: eventid - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: event - dot: . - naked_identifier: eventid - binary_operator: and - column_reference: naked_identifier: eventname - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'lollapalooza'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/desc_datashare.sql000066400000000000000000000003221503426445100252460ustar00rootroot00000000000000DESC DATASHARE salesshare; DESC DATASHARE salesshare of ACCOUNT '123456789012' NAMESPACE '13b8833d-17c6-4f16-8fe4-1a018f5ed00d'; DESC DATASHARE salesshare of NAMESPACE '13b8833d-17c6-4f16-8fe4-1a018f5ed00d'; sqlfluff-3.4.2/test/fixtures/dialects/redshift/desc_datashare.yml000066400000000000000000000022511503426445100252530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 89c3b0043561c04b1c690ba8ce291af5883281c4757f714acf7d2ecabba21a67 file: - statement: desc_datashare_statement: - keyword: DESC - keyword: DATASHARE - object_reference: naked_identifier: salesshare - statement_terminator: ; - statement: desc_datashare_statement: - keyword: DESC - keyword: DATASHARE - object_reference: naked_identifier: salesshare - keyword: of - keyword: ACCOUNT - quoted_literal: "'123456789012'" - keyword: NAMESPACE - quoted_literal: "'13b8833d-17c6-4f16-8fe4-1a018f5ed00d'" - statement_terminator: ; - statement: desc_datashare_statement: - keyword: DESC - keyword: DATASHARE - object_reference: naked_identifier: salesshare - keyword: of - keyword: NAMESPACE - quoted_literal: "'13b8833d-17c6-4f16-8fe4-1a018f5ed00d'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/detach_rls_policy.sql000066400000000000000000000002741503426445100260110ustar00rootroot00000000000000DETACH RLS POLICY policy_concerts ON tickit_category_redshift FROM ROLE analyst, ROLE dbadmin; DETACH RLS POLICY policy_concerts ON TABLE tickit_category_redshift FROM ROLE role1, user1; sqlfluff-3.4.2/test/fixtures/dialects/redshift/detach_rls_policy.yml000066400000000000000000000025171503426445100260150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2acff9b85e6066bdd75036b61be3b4bbf875e9a6871849e701e489e9eeffcdf6 file: - statement: manage_rls_policy_statement: - keyword: DETACH - keyword: RLS - keyword: POLICY - object_reference: naked_identifier: policy_concerts - keyword: 'ON' - table_reference: naked_identifier: tickit_category_redshift - keyword: FROM - keyword: ROLE - role_reference: naked_identifier: analyst - comma: ',' - keyword: ROLE - role_reference: naked_identifier: dbadmin - statement_terminator: ; - statement: manage_rls_policy_statement: - keyword: DETACH - keyword: RLS - keyword: POLICY - object_reference: naked_identifier: policy_concerts - keyword: 'ON' - keyword: TABLE - table_reference: naked_identifier: tickit_category_redshift - keyword: FROM - keyword: ROLE - role_reference: naked_identifier: role1 - comma: ',' - role_reference: naked_identifier: user1 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/drop_datashare.sql000066400000000000000000000000371503426445100252770ustar00rootroot00000000000000DROP DATASHARE datashare_name; sqlfluff-3.4.2/test/fixtures/dialects/redshift/drop_datashare.yml000066400000000000000000000010751503426445100253040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3b8ae2d1d669764b83baf7064840fefa0fdbb1d7be0cdfdbc7af34ab51d3eae9 file: statement: drop_datashare_statement: - keyword: DROP - keyword: DATASHARE - object_reference: naked_identifier: datashare_name statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/drop_procedure.sql000066400000000000000000000002521503426445100253320ustar00rootroot00000000000000DROP PROCEDURE quarterly_revenue(volume INOUT bigint, at_price IN numeric,result OUT int); DROP PROCEDURE quarterly_revenue(volume bigint, at_price numeric,result int); sqlfluff-3.4.2/test/fixtures/dialects/redshift/drop_procedure.yml000066400000000000000000000030661503426445100253420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7ab6533c8e6ad875d468410a3ec892f0d3f49adbabad9a52194178d5898d77e7 file: - statement: drop_procedure_statement: - keyword: DROP - keyword: PROCEDURE - function_name: function_name_identifier: quarterly_revenue - procedure_parameter_list: bracketed: - start_bracket: ( - parameter: volume - keyword: INOUT - data_type: keyword: bigint - comma: ',' - parameter: at_price - keyword: IN - data_type: keyword: numeric - comma: ',' - parameter: result - keyword: OUT - data_type: keyword: int - end_bracket: ) - statement_terminator: ; - statement: drop_procedure_statement: - keyword: DROP - keyword: PROCEDURE - function_name: function_name_identifier: quarterly_revenue - procedure_parameter_list: bracketed: - start_bracket: ( - parameter: volume - data_type: keyword: bigint - comma: ',' - parameter: at_price - data_type: keyword: numeric - comma: ',' - parameter: result - data_type: keyword: int - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/drop_rls_policy.sql000066400000000000000000000001251503426445100255200ustar00rootroot00000000000000DROP RLS POLICY policy_concerts; DROP RLS POLICY IF EXISTS policy_concerts CASCADE; sqlfluff-3.4.2/test/fixtures/dialects/redshift/drop_rls_policy.yml000066400000000000000000000015231503426445100255250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c159af16f7c7f42ae004cae6da4392cf60c21acd1ed7857aad4c199c47866af2 file: - statement: drop_rls_policy_statement: - keyword: DROP - keyword: RLS - keyword: POLICY - object_reference: naked_identifier: policy_concerts - statement_terminator: ; - statement: drop_rls_policy_statement: - keyword: DROP - keyword: RLS - keyword: POLICY - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: policy_concerts - keyword: CASCADE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/execute.sql000066400000000000000000000001311503426445100237540ustar00rootroot00000000000000EXECUTE statement_name; EXECUTE statement_name (1); EXECUTE statement_name (1, 'foo'); sqlfluff-3.4.2/test/fixtures/dialects/redshift/execute.yml000066400000000000000000000021751503426445100237700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d589e9c9eacb22e8d048d8d63422f89d12f83e18b1b909378c12ef2110b26834 file: - statement: execute_statement: keyword: EXECUTE object_reference: naked_identifier: statement_name - statement_terminator: ; - statement: execute_statement: keyword: EXECUTE object_reference: naked_identifier: statement_name bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: execute_statement: keyword: EXECUTE object_reference: naked_identifier: statement_name bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'foo'" - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/fetch.sql000066400000000000000000000001541503426445100234100ustar00rootroot00000000000000fetch next from curs1; fetch all from curs1; fetch forward 100 from curs1; fetch forward all from curs1; sqlfluff-3.4.2/test/fixtures/dialects/redshift/fetch.yml000066400000000000000000000022031503426445100234070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cc2d7173c01b14c8678d87572594149d6be691a41a16eb9cb3246d9cd5999561 file: - statement: fetch_statement: - keyword: fetch - keyword: next - keyword: from - object_reference: naked_identifier: curs1 - statement_terminator: ; - statement: fetch_statement: - keyword: fetch - keyword: all - keyword: from - object_reference: naked_identifier: curs1 - statement_terminator: ; - statement: fetch_statement: - keyword: fetch - keyword: forward - numeric_literal: '100' - keyword: from - object_reference: naked_identifier: curs1 - statement_terminator: ; - statement: fetch_statement: - keyword: fetch - keyword: forward - keyword: all - keyword: from - object_reference: naked_identifier: curs1 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/grant_datashare_usage.sql000066400000000000000000000003521503426445100266320ustar00rootroot00000000000000GRANT USAGE ON DATASHARE salesshare TO ACCOUNT '123456789012'; GRANT USAGE ON DATASHARE salesshare TO ACCOUNT '123456789012' VIA DATA CATALOG; GRANT USAGE ON DATASHARE salesshare TO NAMESPACE '13b8833d-17c6-4f16-8fe4-1a018f5ed00d'; sqlfluff-3.4.2/test/fixtures/dialects/redshift/grant_datashare_usage.yml000066400000000000000000000025401503426445100266350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 91aa81bf6c0558b19ae53f3fea0af24a78e4e8e9e8a4275f6fc70f98583b49f7 file: - statement: grant_datashare_statement: - keyword: GRANT - keyword: USAGE - keyword: 'ON' - keyword: DATASHARE - object_reference: naked_identifier: salesshare - keyword: TO - keyword: ACCOUNT - quoted_literal: "'123456789012'" - statement_terminator: ; - statement: grant_datashare_statement: - keyword: GRANT - keyword: USAGE - keyword: 'ON' - keyword: DATASHARE - object_reference: naked_identifier: salesshare - keyword: TO - keyword: ACCOUNT - quoted_literal: "'123456789012'" - keyword: VIA - keyword: DATA - keyword: CATALOG - statement_terminator: ; - statement: grant_datashare_statement: - keyword: GRANT - keyword: USAGE - keyword: 'ON' - keyword: DATASHARE - object_reference: naked_identifier: salesshare - keyword: TO - keyword: NAMESPACE - quoted_literal: "'13b8833d-17c6-4f16-8fe4-1a018f5ed00d'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/insert_into.sql000066400000000000000000000006401503426445100246540ustar00rootroot00000000000000INSERT INTO s1.t1 ( SELECT col1, col2, col3 FROM testtable ); INSERT INTO s1.t1 (col1, col2) ( select col1, col2, col3 from testtable ); INSERT INTO schema1.t1 SELECT col1, col2, col3 FROM testtable ; INSERT INTO schema1.t1 DEFAULT VALUES ; INSERT INTO s1.t1 (col1, col2) VALUES ('V1', 1), ('V2', 2) ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/insert_into.yml000066400000000000000000000105271503426445100246630ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e23fd7e56b08ac7f916d641810323b6ca4f3a87517db0bb0c80eb44b2b4fb2d7 file: - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: s1 - dot: . - naked_identifier: t1 - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 - comma: ',' - select_clause_element: column_reference: naked_identifier: col3 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: testtable end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: s1 - dot: . - naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - bracketed: start_bracket: ( select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 - comma: ',' - select_clause_element: column_reference: naked_identifier: col3 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: testtable end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: schema1 - dot: . - naked_identifier: t1 - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 - comma: ',' - select_clause_element: column_reference: naked_identifier: col3 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: testtable - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: schema1 - dot: . - naked_identifier: t1 - keyword: DEFAULT - keyword: VALUES - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: s1 - dot: . - naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: quoted_literal: "'V1'" - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: quoted_literal: "'V2'" - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/lock_table.sql000066400000000000000000000001211503426445100244100ustar00rootroot00000000000000lock event, sales; LOCK TABLE schema_name.table_name1, schema_name.table_name2; sqlfluff-3.4.2/test/fixtures/dialects/redshift/lock_table.yml000066400000000000000000000016751503426445100244310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d695887ab9247049a3b4f0e904609c3e52b445016e429664b97e11c1758cbae7 file: - statement: lock_table_statement: - keyword: lock - table_reference: naked_identifier: event - comma: ',' - table_reference: naked_identifier: sales - statement_terminator: ; - statement: lock_table_statement: - keyword: LOCK - keyword: TABLE - table_reference: - naked_identifier: schema_name - dot: . - naked_identifier: table_name1 - comma: ',' - table_reference: - naked_identifier: schema_name - dot: . - naked_identifier: table_name2 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/merge.sql000066400000000000000000000006221503426445100234160ustar00rootroot00000000000000MERGE INTO target USING source ON target.id = source.id WHEN MATCHED THEN UPDATE SET id = source.id, name = source.name WHEN NOT MATCHED THEN INSERT VALUES (source.id, source.name); MERGE INTO target USING source ON target.id = source.id WHEN MATCHED THEN DELETE WHEN NOT MATCHED THEN INSERT VALUES (source.id, source.name); MERGE INTO target USING source ON target.id = source.id REMOVE DUPLICATES; sqlfluff-3.4.2/test/fixtures/dialects/redshift/merge.yml000066400000000000000000000113261503426445100234230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f5b35d6ce26a76c12d950d88ae0c1ae42fe01ac8f34e76949783be5c40fd6463 file: - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: target - keyword: USING - table_reference: naked_identifier: source - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: target - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: source - dot: . - naked_identifier: id - merge_match: merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: - keyword: SET - set_clause: - column_reference: naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: source - dot: . - naked_identifier: id - comma: ',' - set_clause: - column_reference: naked_identifier: name - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: source - dot: . - naked_identifier: name merge_when_not_matched_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: keyword: INSERT values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: source - dot: . - naked_identifier: id - comma: ',' - expression: column_reference: - naked_identifier: source - dot: . - naked_identifier: name - end_bracket: ) - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: target - keyword: USING - table_reference: naked_identifier: source - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: target - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: source - dot: . - naked_identifier: id - merge_match: merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_delete_clause: keyword: DELETE merge_when_not_matched_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: keyword: INSERT values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: source - dot: . - naked_identifier: id - comma: ',' - expression: column_reference: - naked_identifier: source - dot: . - naked_identifier: name - end_bracket: ) - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: target - keyword: USING - table_reference: naked_identifier: source - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: target - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: source - dot: . - naked_identifier: id - keyword: REMOVE - keyword: DUPLICATES - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/object_unpivot.sql000066400000000000000000000011641503426445100253530ustar00rootroot00000000000000WITH example_data AS ( SELECT 10 AS shop_id , json_parse('{"apple_count": 2, "orange_count": 6}') AS inventory UNION ALL SELECT 20 AS shop_id , json_parse('{"pear_count": 10, "other_data": 42}') AS inventory UNION ALL SELECT 30 AS shop_id , json_parse('{"apple_count": 3, "lemon_count": 5}') AS inventory ) SELECT shop_id , key , value FROM example_data ed, UNPIVOT ed.inventory AS value AT key; SELECT attr as attribute_name, val as object_value FROM customer_orders_lineitem c, c.c_orders AS o, UNPIVOT o AS val AT attr WHERE c_custkey = 9451; sqlfluff-3.4.2/test/fixtures/dialects/redshift/object_unpivot.yml000066400000000000000000000151151503426445100253560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 596220583d11a390ef2206e51a70705f163a936c8a699e5dc5b394aed3d61f12 file: - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: example_data keyword: AS bracketed: start_bracket: ( set_expression: - select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '10' alias_expression: alias_operator: keyword: AS naked_identifier: shop_id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: json_parse function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'{\"apple_count\": 2, \"orange_count\"\ : 6}'" end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: inventory - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '20' alias_expression: alias_operator: keyword: AS naked_identifier: shop_id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: json_parse function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'{\"pear_count\": 10, \"other_data\": 42}'" end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: inventory - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '30' alias_expression: alias_operator: keyword: AS naked_identifier: shop_id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: json_parse function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'{\"apple_count\": 3, \"lemon_count\":\ \ 5}'" end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: inventory end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: shop_id - comma: ',' - select_clause_element: column_reference: naked_identifier: key - comma: ',' - select_clause_element: column_reference: naked_identifier: value from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: example_data alias_expression: naked_identifier: ed - comma: ',' - from_expression: from_expression_element: table_expression: object_unpivoting: - keyword: UNPIVOT - object_reference: - naked_identifier: ed - dot: . - naked_identifier: inventory - keyword: AS - naked_identifier: value - keyword: AT - naked_identifier: key - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: attr alias_expression: alias_operator: keyword: as naked_identifier: attribute_name - comma: ',' - select_clause_element: column_reference: naked_identifier: val alias_expression: alias_operator: keyword: as naked_identifier: object_value from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: customer_orders_lineitem alias_expression: naked_identifier: c - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: c - dot: . - naked_identifier: c_orders alias_expression: alias_operator: keyword: AS naked_identifier: o - comma: ',' - from_expression: from_expression_element: table_expression: object_unpivoting: - keyword: UNPIVOT - object_reference: naked_identifier: o - keyword: AS - naked_identifier: val - keyword: AT - naked_identifier: attr where_clause: keyword: WHERE expression: column_reference: naked_identifier: c_custkey comparison_operator: raw_comparison_operator: '=' numeric_literal: '9451' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/pattern_match_expressions.sql000066400000000000000000000030501503426445100276100ustar00rootroot00000000000000-- redshift_pattern_match_expressions.sql /* examples of pattern match expressions ( https://docs.aws.amazon.com/redshift/latest/dg/pattern-matching-conditions.html ) that are supported in redshift. */ -- LIKE/ILIKE expressions supported SELECT * FROM animals WHERE family LIKE '%ursidae%'; SELECT * FROM animals WHERE family NOT LIKE '%ursidae%'; SELECT * FROM animals WHERE genus ILIKE '%ursus%'; SELECT * FROM animals WHERE genus NOT ILIKE '%ursus%'; SELECT * FROM animals WHERE family LIKE '%ursidae%' ESCAPE '\\'; SELECT * FROM animals WHERE genus NOT ILIKE '%ursus%' ESCAPE '\\'; SELECT COALESCE(family LIKE '%ursidae%' ESCAPE '\\', FALSE) AS is_bear FROM animals; -- SIMILAR TO expressions supported SELECT * FROM animals WHERE family SIMILAR TO '%ursidae%'; SELECT * FROM animals WHERE family NOT SIMILAR TO '%ursidae%'; SELECT * FROM animals WHERE genus SIMILAR TO '%ursus%'; SELECT * FROM animals WHERE genus NOT SIMILAR TO '%ursus%'; SELECT * FROM animals WHERE family SIMILAR TO '%ursidae%' ESCAPE '\\'; SELECT * FROM animals WHERE genus NOT SIMILAR TO '%ursus%' ESCAPE '\\'; SELECT COALESCE(family SIMILAR TO '%ursidae%' ESCAPE '\\', FALSE) AS is_bear FROM animals; -- From https://github.com/sqlfluff/sqlfluff/issues/2722 WITH cleaned_bear_financial_branch AS ( SELECT branch_id, TO_NUMBER(CASE WHEN honey_numerical_code SIMILAR TO '[0-9]{0,7}.?[0-9]{0,2}' THEN honey_numerical_code ELSE NULL END, '24601') AS honey_numerical_code FROM bear_financial_branch ) SELECT branch_id FROM cleaned_bear_financial_branch LIMIT 10; sqlfluff-3.4.2/test/fixtures/dialects/redshift/pattern_match_expressions.yml000066400000000000000000000316531503426445100276240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2531f4c07b8ef215e826ec7909f923aceef841e9a0e692bb8979860564dc3f05 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: column_reference: naked_identifier: family keyword: LIKE quoted_literal: "'%ursidae%'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: - column_reference: naked_identifier: family - keyword: NOT - keyword: LIKE - quoted_literal: "'%ursidae%'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: column_reference: naked_identifier: genus keyword: ILIKE quoted_literal: "'%ursus%'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: - column_reference: naked_identifier: genus - keyword: NOT - keyword: ILIKE - quoted_literal: "'%ursus%'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: - column_reference: naked_identifier: family - keyword: LIKE - quoted_literal: "'%ursidae%'" - keyword: ESCAPE - quoted_literal: "'\\\\'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: - column_reference: naked_identifier: genus - keyword: NOT - keyword: ILIKE - quoted_literal: "'%ursus%'" - keyword: ESCAPE - quoted_literal: "'\\\\'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: COALESCE function_contents: bracketed: - start_bracket: ( - expression: - column_reference: naked_identifier: family - keyword: LIKE - quoted_literal: "'%ursidae%'" - keyword: ESCAPE - quoted_literal: "'\\\\'" - comma: ',' - expression: boolean_literal: 'FALSE' - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: is_bear from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: - column_reference: naked_identifier: family - keyword: SIMILAR - keyword: TO - quoted_literal: "'%ursidae%'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: - column_reference: naked_identifier: family - keyword: NOT - keyword: SIMILAR - keyword: TO - quoted_literal: "'%ursidae%'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: - column_reference: naked_identifier: genus - keyword: SIMILAR - keyword: TO - quoted_literal: "'%ursus%'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: - column_reference: naked_identifier: genus - keyword: NOT - keyword: SIMILAR - keyword: TO - quoted_literal: "'%ursus%'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: - column_reference: naked_identifier: family - keyword: SIMILAR - keyword: TO - quoted_literal: "'%ursidae%'" - keyword: ESCAPE - quoted_literal: "'\\\\'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: - column_reference: naked_identifier: genus - keyword: NOT - keyword: SIMILAR - keyword: TO - quoted_literal: "'%ursus%'" - keyword: ESCAPE - quoted_literal: "'\\\\'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: COALESCE function_contents: bracketed: - start_bracket: ( - expression: - column_reference: naked_identifier: family - keyword: SIMILAR - keyword: TO - quoted_literal: "'%ursidae%'" - keyword: ESCAPE - quoted_literal: "'\\\\'" - comma: ',' - expression: boolean_literal: 'FALSE' - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: is_bear from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: cleaned_bear_financial_branch keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: branch_id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: TO_NUMBER function_contents: bracketed: - start_bracket: ( - expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: - column_reference: naked_identifier: honey_numerical_code - keyword: SIMILAR - keyword: TO - quoted_literal: "'[0-9]{0,7}.?[0-9]{0,2}'" - keyword: THEN - expression: column_reference: naked_identifier: honey_numerical_code - else_clause: keyword: ELSE expression: null_literal: 'NULL' - keyword: END - comma: ',' - expression: quoted_literal: "'24601'" - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: honey_numerical_code from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_financial_branch end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: branch_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: cleaned_bear_financial_branch limit_clause: keyword: LIMIT numeric_literal: '10' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/percentile_cont.sql000066400000000000000000000005131503426445100254730ustar00rootroot00000000000000select dataset_id, (percentile_cont(0.20) within group ( order by tract_percent_below_poverty asc ) over(partition by dataset_id)) as percentile_20, percentile_cont(0.40) within group (order by tract_percent_below_poverty asc) over(partition by dataset_id) as percentile_40 from dataset_with_census sqlfluff-3.4.2/test/fixtures/dialects/redshift/percentile_cont.yml000066400000000000000000000071651503426445100255070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7f6850662988e19d1e87ca67316859c91011f1d981585f10925c682f454cf8d8 file: statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: dataset_id - comma: ',' - select_clause_element: expression: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: percentile_cont function_contents: bracketed: start_bracket: ( expression: numeric_literal: '0.20' end_bracket: ) withingroup_clause: - keyword: within - keyword: group - bracketed: start_bracket: ( orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: tract_percent_below_poverty - keyword: asc end_bracket: ) over_clause: keyword: over bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: partition - keyword: by - expression: column_reference: naked_identifier: dataset_id end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: percentile_20 - comma: ',' - select_clause_element: function: function_name: function_name_identifier: percentile_cont function_contents: bracketed: start_bracket: ( expression: numeric_literal: '0.40' end_bracket: ) withingroup_clause: - keyword: within - keyword: group - bracketed: start_bracket: ( orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: tract_percent_below_poverty - keyword: asc end_bracket: ) over_clause: keyword: over bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: partition - keyword: by - expression: column_reference: naked_identifier: dataset_id end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: percentile_40 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dataset_with_census sqlfluff-3.4.2/test/fixtures/dialects/redshift/pivot.sql000066400000000000000000000024631503426445100234650ustar00rootroot00000000000000-- redshift_pivot.sql /* Examples of SELECT statements that include PIVOT expressions. */ -- Below examples come from -- https://docs.aws.amazon.com/redshift/latest/dg/r_FROM_clause-pivot-unpivot-examples.html SELECT * FROM (SELECT partname, price FROM part) PIVOT ( AVG(price) FOR partname IN ('P1', 'P2', 'P3') ); SELECT * FROM (SELECT quality, manufacturer FROM part) PIVOT ( COUNT(*) FOR quality IN (1, 2, NULL) ); SELECT * FROM (SELECT quality, manufacturer FROM part) PIVOT ( COUNT(*) AS count FOR quality IN (1 AS high, 2 AS low, NULL AS na) ); -- End of AWS-provided examples -- Can do PIVOTs for CTEs WITH bear_diet AS ( SELECT bear_id, bear_species, food_eaten FROM bear_facts ) SELECT * FROM bear_diet PIVOT ( COUNT(*) AS num_ate_food FOR bear_species IN ( 'polar bear', 'brown bear', 'american black bear', 'asian black bear', 'giant panda', 'spectacled bear', 'sloth bear', 'sun bear' ) ); -- Can do Pivots for tables SELECT * FROM orders PIVOT (COUNT(*) FOR color IN ('red', 'blue')); -- Can also alias the pivoted table SELECT * FROM (SELECT quality, manufacturer FROM part) PIVOT ( COUNT(*) FOR quality IN (1, 2, NULL) ) AS quality_matrix; sqlfluff-3.4.2/test/fixtures/dialects/redshift/pivot.yml000066400000000000000000000323751503426445100234740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: df825f20b64b4289f0d57fd0fe76c66cc8a08c24ba9166f4aaa50bf89e5c2272 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: partname - comma: ',' - select_clause_element: column_reference: naked_identifier: price from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: part end_bracket: ) from_pivot_expression: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: AVG function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: price end_bracket: ) - keyword: FOR - column_reference: naked_identifier: partname - keyword: IN - bracketed: - start_bracket: ( - expression: quoted_literal: "'P1'" - comma: ',' - expression: quoted_literal: "'P2'" - comma: ',' - expression: quoted_literal: "'P3'" - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: quality - comma: ',' - select_clause_element: column_reference: naked_identifier: manufacturer from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: part end_bracket: ) from_pivot_expression: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: COUNT function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) - keyword: FOR - column_reference: naked_identifier: quality - keyword: IN - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: null_literal: 'NULL' - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: quality - comma: ',' - select_clause_element: column_reference: naked_identifier: manufacturer from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: part end_bracket: ) from_pivot_expression: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: COUNT function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: count - keyword: FOR - column_reference: naked_identifier: quality - keyword: IN - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - alias_expression: alias_operator: keyword: AS naked_identifier: high - comma: ',' - expression: numeric_literal: '2' - alias_expression: alias_operator: keyword: AS naked_identifier: low - comma: ',' - expression: null_literal: 'NULL' - alias_expression: alias_operator: keyword: AS naked_identifier: na - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: bear_diet keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: bear_id - comma: ',' - select_clause_element: column_reference: naked_identifier: bear_species - comma: ',' - select_clause_element: column_reference: naked_identifier: food_eaten from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_facts end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_diet from_pivot_expression: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: COUNT function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: num_ate_food - keyword: FOR - column_reference: naked_identifier: bear_species - keyword: IN - bracketed: - start_bracket: ( - expression: quoted_literal: "'polar bear'" - comma: ',' - expression: quoted_literal: "'brown bear'" - comma: ',' - expression: quoted_literal: "'american black bear'" - comma: ',' - expression: quoted_literal: "'asian black bear'" - comma: ',' - expression: quoted_literal: "'giant panda'" - comma: ',' - expression: quoted_literal: "'spectacled bear'" - comma: ',' - expression: quoted_literal: "'sloth bear'" - comma: ',' - expression: quoted_literal: "'sun bear'" - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders from_pivot_expression: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: COUNT function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) - keyword: FOR - column_reference: naked_identifier: color - keyword: IN - bracketed: - start_bracket: ( - expression: quoted_literal: "'red'" - comma: ',' - expression: quoted_literal: "'blue'" - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: quality - comma: ',' - select_clause_element: column_reference: naked_identifier: manufacturer from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: part end_bracket: ) from_pivot_expression: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: COUNT function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) - keyword: FOR - column_reference: naked_identifier: quality - keyword: IN - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: null_literal: 'NULL' - end_bracket: ) - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: quality_matrix - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/prepare.sql000066400000000000000000000006161503426445100237600ustar00rootroot00000000000000PREPARE select_statement AS SELECT * FROM table1; PREPARE insert_statement AS INSERT INTO table1 (col1, col2) VALUES (1, 'foo'); PREPARE update_statement AS UPDATE table1 SET col2 = 'bar' WHERE col1 = 1; PREPARE delete_statement AS DELETE FROM table1 WHERE col1 = 1; PREPARE parametrized_statement_1 (int) AS SELECT ($1); PREPARE parametrized_statement_2 (int, character(3)) AS SELECT $1, $2; sqlfluff-3.4.2/test/fixtures/dialects/redshift/prepare.yml000066400000000000000000000104621503426445100237620ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 398fef8cc69b64f5b9b0a771f4a8f4369c8587523cdc0d7a78806c24927f21da file: - statement: prepare_statement: - keyword: PREPARE - object_reference: naked_identifier: select_statement - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: prepare_statement: - keyword: PREPARE - object_reference: naked_identifier: insert_statement - keyword: AS - insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: table1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'foo'" - end_bracket: ) - statement_terminator: ; - statement: prepare_statement: - keyword: PREPARE - object_reference: naked_identifier: update_statement - keyword: AS - update_statement: keyword: UPDATE table_reference: naked_identifier: table1 set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: col2 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'bar'" where_clause: keyword: WHERE expression: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: prepare_statement: - keyword: PREPARE - object_reference: naked_identifier: delete_statement - keyword: AS - delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: table1 - where_clause: keyword: WHERE expression: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: prepare_statement: - keyword: PREPARE - object_reference: naked_identifier: parametrized_statement_1 - bracketed: start_bracket: ( data_type: keyword: int end_bracket: ) - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: expression: bracketed: start_bracket: ( expression: dollar_numeric_literal: $1 end_bracket: ) - statement_terminator: ; - statement: prepare_statement: - keyword: PREPARE - object_reference: naked_identifier: parametrized_statement_2 - bracketed: - start_bracket: ( - data_type: keyword: int - comma: ',' - data_type: keyword: character bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '3' end_bracket: ) - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: dollar_numeric_literal: $1 - comma: ',' - select_clause_element: dollar_numeric_literal: $2 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/qualify.sql000066400000000000000000000011211503426445100237640ustar00rootroot00000000000000SELECT * FROM store_sales ss WHERE ss_sold_time > time '12:00:00' QUALIFY row_number() OVER (PARTITION BY ss_sold_date ORDER BY ss_sales_price DESC) <= 2 ; SELECT * FROM store_sales ss QUALIFY last_value(ss_item) OVER (PARTITION BY ss_sold_date ORDER BY ss_sold_time ASC ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) = ss_item ; SELECT * FROM ( SELECT *, last_value(ss_item) OVER (PARTITION BY ss_sold_date ORDER BY ss_sold_time ASC ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) ss_last_item FROM store_sales ss ) WHERE ss_last_item = ss_item ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/qualify.yml000066400000000000000000000165121503426445100240000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2d25b00a2ecd112d1819ea5a9d17972e783c069731a71a416740e92dcfea1b4b file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: store_sales alias_expression: naked_identifier: ss where_clause: keyword: WHERE expression: column_reference: naked_identifier: ss_sold_time comparison_operator: raw_comparison_operator: '>' datetime_literal: datetime_type_identifier: keyword: time quoted_literal: "'12:00:00'" qualify_clause: keyword: QUALIFY expression: function: function_name: function_name_identifier: row_number function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: ss_sold_date orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: ss_sales_price - keyword: DESC end_bracket: ) comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '2' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: store_sales alias_expression: naked_identifier: ss qualify_clause: keyword: QUALIFY expression: function: function_name: function_name_identifier: last_value function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: ss_item end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: ss_sold_date orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: ss_sold_time - keyword: ASC frame_clause: - keyword: ROWS - keyword: BETWEEN - keyword: UNBOUNDED - keyword: PRECEDING - keyword: AND - keyword: UNBOUNDED - keyword: FOLLOWING end_bracket: ) comparison_operator: raw_comparison_operator: '=' column_reference: naked_identifier: ss_item - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: wildcard_expression: wildcard_identifier: star: '*' - comma: ',' - select_clause_element: function: function_name: function_name_identifier: last_value function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: ss_item end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: ss_sold_date orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: ss_sold_time - keyword: ASC frame_clause: - keyword: ROWS - keyword: BETWEEN - keyword: UNBOUNDED - keyword: PRECEDING - keyword: AND - keyword: UNBOUNDED - keyword: FOLLOWING end_bracket: ) alias_expression: naked_identifier: ss_last_item from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: store_sales alias_expression: naked_identifier: ss end_bracket: ) where_clause: keyword: WHERE expression: - column_reference: naked_identifier: ss_last_item - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: ss_item - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/select.sql000066400000000000000000000000601503426445100235720ustar00rootroot00000000000000SELECT foo, baz, SUM(*) FROM bar GROUP BY ALL ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/select.yml000066400000000000000000000023761503426445100236100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 191a176565af15c9e91a861ae465164810e0dcefb4b688d466cb41c1d109a0ef file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: foo - comma: ',' - select_clause_element: column_reference: naked_identifier: baz - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar groupby_clause: - keyword: GROUP - keyword: BY - keyword: ALL statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/select_dateadd.sql000066400000000000000000000000471503426445100252450ustar00rootroot00000000000000select dateadd(month,18,'2008-02-28'); sqlfluff-3.4.2/test/fixtures/dialects/redshift/select_dateadd.yml000066400000000000000000000017221503426445100252500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3836ab0bbef01b782a32d4beb3dc80adad3e690c4cd84f65eaf84fb97631ce8b file: statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: dateadd function_contents: bracketed: - start_bracket: ( - date_part: month - comma: ',' - expression: numeric_literal: '18' - comma: ',' - expression: quoted_literal: "'2008-02-28'" - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/select_datetime_functions.sql000066400000000000000000000005271503426445100275460ustar00rootroot00000000000000SELECT current_date; SELECT sysdate; SELECT current_timestamp; SELECT TRUNC(sysdate); -- As taken from: https://docs.aws.amazon.com/redshift/latest/dg/r_SYSDATE.html SELECT salesid, pricepaid, TRUNC(saletime) AS saletime, TRUNC(sysdate) AS now FROM sales WHERE saletime BETWEEN TRUNC(sysdate)-120 AND TRUNC(sysdate) ORDER BY saletime ASC; sqlfluff-3.4.2/test/fixtures/dialects/redshift/select_datetime_functions.yml000066400000000000000000000072611503426445100275520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6537745a5bfb75cac7ecca29a8fd62d8390445e9ea6f653f029c8fc36c76e294 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: bare_function: current_date - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: bare_function: sysdate - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: bare_function: current_timestamp - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: TRUNC function_contents: bracketed: start_bracket: ( expression: bare_function: sysdate end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: salesid - comma: ',' - select_clause_element: column_reference: naked_identifier: pricepaid - comma: ',' - select_clause_element: function: function_name: function_name_identifier: TRUNC function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: saletime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: saletime - comma: ',' - select_clause_element: function: function_name: function_name_identifier: TRUNC function_contents: bracketed: start_bracket: ( expression: bare_function: sysdate end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: now from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sales where_clause: keyword: WHERE expression: - column_reference: naked_identifier: saletime - keyword: BETWEEN - function: function_name: function_name_identifier: TRUNC function_contents: bracketed: start_bracket: ( expression: bare_function: sysdate end_bracket: ) - binary_operator: '-' - numeric_literal: '120' - keyword: AND - function: function_name: function_name_identifier: TRUNC function_contents: bracketed: start_bracket: ( expression: bare_function: sysdate end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: saletime - keyword: ASC - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/select_exclude.sql000066400000000000000000000002371503426445100253110ustar00rootroot00000000000000SELECT * EXCLUDE col1 FROM table1; SELECT * EXCLUDE col1, col2 FROM table1; SELECT * EXCLUDE (col1) FROM table1; SELECT * EXCLUDE (col1, col2) FROM table1; sqlfluff-3.4.2/test/fixtures/dialects/redshift/select_exclude.yml000066400000000000000000000054771503426445100253260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ccfe5628326043e2977dbfdf0721ccb5be8f8449f571dcb4fbb46d8e92aa9eb8 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_exclude_clause: keyword: EXCLUDE naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_exclude_clause: keyword: EXCLUDE naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_exclude_clause: keyword: EXCLUDE bracketed: start_bracket: ( naked_identifier: col1 end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_exclude_clause: keyword: EXCLUDE bracketed: - start_bracket: ( - naked_identifier: col1 - comma: ',' - naked_identifier: col2 - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/select_first_value.sql000066400000000000000000000001121503426445100261730ustar00rootroot00000000000000select first_value(finalsaleprice ignore nulls) over () as c1 from table1 sqlfluff-3.4.2/test/fixtures/dialects/redshift/select_first_value.yml000066400000000000000000000025241503426445100262060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d8d52a40c446b50c951e937ea1873d8f83c839ed1e207b95cb30362c3ddaf63f file: statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: first_value function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: finalsaleprice - keyword: ignore - keyword: nulls - end_bracket: ) over_clause: keyword: over bracketed: start_bracket: ( end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: c1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 sqlfluff-3.4.2/test/fixtures/dialects/redshift/select_from_with_parenthesis.sql000066400000000000000000000002501503426445100302560ustar00rootroot00000000000000-- https://github.com/sqlfluff/sqlfluff/issues/3955 SELECT table_1.id FROM (table_1); SELECT table_1.id FROM (table_1 INNER JOIN table_2 ON table_2.id = table_1.id); sqlfluff-3.4.2/test/fixtures/dialects/redshift/select_from_with_parenthesis.yml000066400000000000000000000042151503426445100302650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 09e3fcbf2cb0a63aeb30e1f39ff029bf24683b7644c11a9405b6ecc361315069 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: table_1 - dot: . - naked_identifier: id from_clause: keyword: FROM bracketed: start_bracket: ( from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_1 end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: table_1 - dot: . - naked_identifier: id from_clause: keyword: FROM bracketed: start_bracket: ( from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_1 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table_2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: table_2 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: table_1 - dot: . - naked_identifier: id end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/select_into.sql000066400000000000000000000003161503426445100246270ustar00rootroot00000000000000select * into newevent from event; select username, lastname, sum(pricepaid-commission) as profit into temp table profits from sales, users where sales.sellerid=users.userid group by 1, 2 order by 3 desc; sqlfluff-3.4.2/test/fixtures/dialects/redshift/select_into.yml000066400000000000000000000057211503426445100246360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a87caa59cd159927ea403e056d42c61e1ae17106c7327570e8d3d7198988af7a file: - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' into_clause: keyword: into table_reference: naked_identifier: newevent from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: event - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: username - comma: ',' - select_clause_element: column_reference: naked_identifier: lastname - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: pricepaid - binary_operator: '-' - column_reference: naked_identifier: commission end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: profit into_clause: - keyword: into - keyword: temp - keyword: table - table_reference: naked_identifier: profits from_clause: - keyword: from - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sales - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: users where_clause: keyword: where expression: - column_reference: - naked_identifier: sales - dot: . - naked_identifier: sellerid - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: users - dot: . - naked_identifier: userid groupby_clause: - keyword: group - keyword: by - numeric_literal: '1' - comma: ',' - numeric_literal: '2' orderby_clause: - keyword: order - keyword: by - numeric_literal: '3' - keyword: desc - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/select_keywords.sql000066400000000000000000000011041503426445100255210ustar00rootroot00000000000000SELECT pg_namespace.nspname AS constraint_schema, pg_constraint.conname AS constraint_name FROM pg_namespace, pg_constraint WHERE pg_namespace.oid = pg_constraint.connamespace; -- As taken from: https://docs.aws.amazon.com/redshift/latest/dg/c_join_PG_examples.html create view tables_vw as select distinct(id) table_id ,trim(datname) db_name ,trim(nspname) schema_name ,trim(relname) table_name from stv_tbl_perm join pg_class on pg_class.oid = stv_tbl_perm.id join pg_namespace on pg_namespace.oid = relnamespace join pg_database on pg_database.oid = stv_tbl_perm.db_id; sqlfluff-3.4.2/test/fixtures/dialects/redshift/select_keywords.yml000066400000000000000000000140751503426445100255360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b531c7dd4da826dd47e7f3600931812be60c72f3298ce517e62afd0f646f68f8 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: pg_namespace - dot: . - naked_identifier: nspname alias_expression: alias_operator: keyword: AS naked_identifier: constraint_schema - comma: ',' - select_clause_element: column_reference: - naked_identifier: pg_constraint - dot: . - naked_identifier: conname alias_expression: alias_operator: keyword: AS naked_identifier: constraint_name from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: pg_namespace - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: pg_constraint where_clause: keyword: WHERE expression: - column_reference: naked_identifier: pg_namespace dot: . naked_identifier_all: oid - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: pg_constraint - dot: . - naked_identifier: connamespace - statement_terminator: ; - statement: create_view_statement: - keyword: create - keyword: view - table_reference: naked_identifier: tables_vw - keyword: as - select_statement: select_clause: - keyword: select - select_clause_modifier: keyword: distinct - select_clause_element: expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: id end_bracket: ) alias_expression: naked_identifier: table_id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: trim function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: datname end_bracket: ) alias_expression: naked_identifier: db_name - comma: ',' - select_clause_element: function: function_name: function_name_identifier: trim function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: nspname end_bracket: ) alias_expression: naked_identifier: schema_name - comma: ',' - select_clause_element: function: function_name: function_name_identifier: trim function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: relname end_bracket: ) alias_expression: naked_identifier: table_name from_clause: keyword: from from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: stv_tbl_perm - join_clause: keyword: join from_expression_element: table_expression: table_reference: naked_identifier: pg_class join_on_condition: keyword: 'on' expression: - column_reference: naked_identifier: pg_class dot: . naked_identifier_all: oid - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: stv_tbl_perm - dot: . - naked_identifier: id - join_clause: keyword: join from_expression_element: table_expression: table_reference: naked_identifier: pg_namespace join_on_condition: keyword: 'on' expression: - column_reference: naked_identifier: pg_namespace dot: . naked_identifier_all: oid - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: relnamespace - join_clause: keyword: join from_expression_element: table_expression: table_reference: naked_identifier: pg_database join_on_condition: keyword: 'on' expression: - column_reference: naked_identifier: pg_database dot: . naked_identifier_all: oid - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: stv_tbl_perm - dot: . - naked_identifier: db_id - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/select_top.sql000066400000000000000000000001331503426445100244550ustar00rootroot00000000000000SELECT TOP 10 example_value_col FROM example_schema.some_table ORDER BY example_value_col; sqlfluff-3.4.2/test/fixtures/dialects/redshift/select_top.yml000066400000000000000000000021261503426445100244630ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 14acecbedbb6768c8a974060d9e7cefa43561e1c3d1b19a080e8ee8256aff133 file: statement: select_statement: select_clause: keyword: SELECT select_clause_modifier: keyword: TOP numeric_literal: '10' select_clause_element: column_reference: naked_identifier: example_value_col from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: example_schema - dot: . - naked_identifier: some_table orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: example_value_col statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/show_datashares.sql000066400000000000000000000000611503426445100254730ustar00rootroot00000000000000SHOW DATASHARES; SHOW DATASHARES LIKE 'sales%'; sqlfluff-3.4.2/test/fixtures/dialects/redshift/show_datashares.yml000066400000000000000000000012471503426445100255040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c57a1a628b2bd334f5e88794153f4b65e84287b82978760cba47ff9ec2d7082e file: - statement: show_datashares_statement: - keyword: SHOW - keyword: DATASHARES - statement_terminator: ; - statement: show_datashares_statement: - keyword: SHOW - keyword: DATASHARES - keyword: LIKE - quoted_literal: "'sales%'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/show_model.sql000066400000000000000000000000411503426445100244520ustar00rootroot00000000000000SHOW MODEL ALL; SHOW MODEL mdl; sqlfluff-3.4.2/test/fixtures/dialects/redshift/show_model.yml000066400000000000000000000012471503426445100244650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a18435e378fb780efb8c6b0210f84365b31ee39918b8668bbca95d6e74cfd36b file: - statement: show_model_statement: - keyword: SHOW - keyword: MODEL - keyword: ALL - statement_terminator: ; - statement: show_model_statement: - keyword: SHOW - keyword: MODEL - object_reference: naked_identifier: mdl - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/single_quote.sql000066400000000000000000000002671503426445100250220ustar00rootroot00000000000000SELECT ''; SELECT ''''; SELECT ' '; SELECT '''aaa'''; SELECT ' '' '; SELECT 'foo' 'bar'; SELECT 'foo' 'bar'; SELECT 'foo' 'bar'; SELECT 'foo' -- some comment 'bar'; sqlfluff-3.4.2/test/fixtures/dialects/redshift/single_quote.yml000066400000000000000000000040311503426445100250150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8d84e5a79ed1ed1950181d97584d7d68176ac88922906a83c0575227e7d55e47 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "''''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'\n\n'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'''aaa'''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'\n''\n'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: "'foo'" - quoted_literal: "'bar'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: "'foo'" - quoted_literal: "'bar'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: "'foo'" - quoted_literal: "'bar'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: "'foo'" - quoted_literal: "'bar'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/super_data_type.sql000066400000000000000000000030201503426445100255020ustar00rootroot00000000000000-- redshift_super_data_type.sql /* queries that implicitly and explicitly use the Redshift SUPER data type (https://docs.aws.amazon.com/redshift/latest/dg/super-overview.html). */ -- Example from https://github.com/sqlfluff/sqlfluff/issues/1672 SELECT c[0].col, o FROM customer_orders c, c.c_orders o; -- Can use SUPER data types in WHERE clauses SELECT COUNT(*) FROM customer_orders_lineitem WHERE c_orders[0].o_orderkey IS NOT NULL; SELECT c_custkey FROM customer_orders_lineitem WHERE CASE WHEN JSON_TYPEOF(c_orders[0].o_orderstatus) = 'string' THEN c_orders[0].o_orderstatus::VARCHAR <= 'P' ELSE NULL END; -- Can do multiple array accessors with SUPER data types SELECT c[0][1][2][3][4].col, o FROM customer_orders c, c.c_orders o; -- Can use wildcards SELECT c.*, o FROM customer_orders_lineitem c, c.c_orders o; -- Can access a single SUPER data type multiple times in a SELECT statement -- source: https://awscloudfeed.com/whats-new/big-data/work-with-semistructured-data-using-amazon-redshift-super SELECT messages[0].format, messages[0].topic FROM subscription_auto WHERE messages[0].payload.payload."assetId" > 0; -- Can perform functions and operations on SUPER data types. -- Adapted from: https://awscloudfeed.com/whats-new/big-data/work-with-semistructured-data-using-amazon-redshift-super SELECT messages[0].format, COUNT(messages[0].topic) FROM subscription_auto WHERE messages[0].payload.payload."assetId" > 'abc' GROUP BY messages[0].format; sqlfluff-3.4.2/test/fixtures/dialects/redshift/super_data_type.yml000066400000000000000000000263071503426445100255210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9164468cdf8d817d1b6d3fed83e7f44c9abbed1f9388821ec5ee293b606f24d3 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: column_reference: naked_identifier: c array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' semi_structured_expression: dot: . naked_identifier: col - comma: ',' - select_clause_element: column_reference: naked_identifier: o from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: customer_orders alias_expression: naked_identifier: c - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: c - dot: . - naked_identifier: c_orders alias_expression: naked_identifier: o - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: COUNT function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: customer_orders_lineitem where_clause: keyword: WHERE expression: - column_reference: naked_identifier: c_orders - array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' - semi_structured_expression: dot: . naked_identifier: o_orderkey - keyword: IS - keyword: NOT - null_literal: 'NULL' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c_custkey from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: customer_orders_lineitem where_clause: keyword: WHERE expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: function: function_name: function_name_identifier: JSON_TYPEOF function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: c_orders array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' semi_structured_expression: dot: . naked_identifier: o_orderstatus end_bracket: ) comparison_operator: raw_comparison_operator: '=' quoted_literal: "'string'" - keyword: THEN - expression: cast_expression: column_reference: naked_identifier: c_orders array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' semi_structured_expression: dot: . naked_identifier: o_orderstatus casting_operator: '::' data_type: keyword: VARCHAR comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' quoted_literal: "'P'" - else_clause: keyword: ELSE expression: null_literal: 'NULL' - keyword: END - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: column_reference: naked_identifier: c array_accessor: - start_square_bracket: '[' - numeric_literal: '0' - end_square_bracket: ']' - start_square_bracket: '[' - numeric_literal: '1' - end_square_bracket: ']' - start_square_bracket: '[' - numeric_literal: '2' - end_square_bracket: ']' - start_square_bracket: '[' - numeric_literal: '3' - end_square_bracket: ']' - start_square_bracket: '[' - numeric_literal: '4' - end_square_bracket: ']' semi_structured_expression: dot: . naked_identifier: col - comma: ',' - select_clause_element: column_reference: naked_identifier: o from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: customer_orders alias_expression: naked_identifier: c - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: c - dot: . - naked_identifier: c_orders alias_expression: naked_identifier: o - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: c dot: . star: '*' - comma: ',' - select_clause_element: column_reference: naked_identifier: o from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: customer_orders_lineitem alias_expression: naked_identifier: c - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: c - dot: . - naked_identifier: c_orders alias_expression: naked_identifier: o - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: column_reference: naked_identifier: messages array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' semi_structured_expression: dot: . naked_identifier: format - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: messages array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' semi_structured_expression: dot: . naked_identifier: topic from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: subscription_auto where_clause: keyword: WHERE expression: column_reference: naked_identifier: messages array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' semi_structured_expression: - dot: . - naked_identifier: payload - dot: . - naked_identifier: payload - dot: . - quoted_identifier: '"assetId"' comparison_operator: raw_comparison_operator: '>' numeric_literal: '0' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: column_reference: naked_identifier: messages array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' semi_structured_expression: dot: . naked_identifier: format - comma: ',' - select_clause_element: function: function_name: function_name_identifier: COUNT function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: messages array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' semi_structured_expression: dot: . naked_identifier: topic end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: subscription_auto where_clause: keyword: WHERE expression: column_reference: naked_identifier: messages array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' semi_structured_expression: - dot: . - naked_identifier: payload - dot: . - naked_identifier: payload - dot: . - quoted_identifier: '"assetId"' comparison_operator: raw_comparison_operator: '>' quoted_literal: "'abc'" groupby_clause: - keyword: GROUP - keyword: BY - expression: column_reference: naked_identifier: messages array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' semi_structured_expression: dot: . naked_identifier: format - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/temporary_tables.sql000066400000000000000000000004051503426445100256720ustar00rootroot00000000000000CREATE TEMPORARY TABLE #temp_table AS SELECT name FROM other_table; CREATE TABLE #other_temp_table (id int); COPY #temp_table FROM 's3://mybucket/path' CREDENTIALS 'aws_access_key_id=SECRET;aws_secret_access_key=ALSO_SECRET' GZIP; SELECT * FROM #temp_table; sqlfluff-3.4.2/test/fixtures/dialects/redshift/temporary_tables.yml000066400000000000000000000041571503426445100257040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3c60e384187593cd92b01bd368764c70dad60b74bd061dbc4f6c3d4b586509c3 file: - statement: create_table_as_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: TABLE - object_reference: naked_identifier: '#temp_table' - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: other_table - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: '#other_temp_table' - bracketed: start_bracket: ( column_reference: naked_identifier: id data_type: keyword: int end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: '#temp_table' - keyword: FROM - quoted_literal: "'s3://mybucket/path'" - authorization_segment: keyword: CREDENTIALS quoted_literal: "'aws_access_key_id=SECRET;aws_secret_access_key=ALSO_SECRET'" - keyword: GZIP - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: '#temp_table' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/transactions.sql000066400000000000000000000004131503426445100250250ustar00rootroot00000000000000begin; start transaction; begin work; begin transaction isolation level serializable; begin transaction isolation level serializable read only; start transaction read write; commit; end work; commit transaction; rollback; abort work; rollback transaction; sqlfluff-3.4.2/test/fixtures/dialects/redshift/transactions.yml000066400000000000000000000035471503426445100250420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9aac8a199f0aa4375d5da5c433f6ec048d0294798c1a9ab7de0894885939dd1e file: - statement: transaction_statement: keyword: begin - statement_terminator: ; - statement: transaction_statement: - keyword: start - keyword: transaction - statement_terminator: ; - statement: transaction_statement: - keyword: begin - keyword: work - statement_terminator: ; - statement: transaction_statement: - keyword: begin - keyword: transaction - keyword: isolation - keyword: level - keyword: serializable - statement_terminator: ; - statement: transaction_statement: - keyword: begin - keyword: transaction - keyword: isolation - keyword: level - keyword: serializable - keyword: read - keyword: only - statement_terminator: ; - statement: transaction_statement: - keyword: start - keyword: transaction - keyword: read - keyword: write - statement_terminator: ; - statement: transaction_statement: keyword: commit - statement_terminator: ; - statement: transaction_statement: - keyword: end - keyword: work - statement_terminator: ; - statement: transaction_statement: - keyword: commit - keyword: transaction - statement_terminator: ; - statement: transaction_statement: keyword: rollback - statement_terminator: ; - statement: transaction_statement: - keyword: abort - keyword: work - statement_terminator: ; - statement: transaction_statement: - keyword: rollback - keyword: transaction - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/unload.sql000066400000000000000000000054511503426445100236060ustar00rootroot00000000000000unload ('select * from venue') to 's3://mybucket/unload/' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole'; unload ('select * from lineitem') to 's3://mybucket/lineitem/' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' PARQUET PARTITION BY (l_shipdate); unload ('select * from venue') to 's3://mybucket/unload/' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' JSON; unload ('select * from venue') to 's3://mybucket/unload/' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' CSV; unload ('select * from venue') to 's3://mybucket/unload/' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' CSV DELIMITER AS '|'; unload ('select * from venue') to 's3://mybucket/venue_pipe_' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' manifest; unload ('select * from venue') to 's3://mybucket/unload_venue_folder/' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' manifest verbose; unload ('select * from venue where venueseats > 75000') to 's3://mybucket/unload/' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' header parallel off; unload ('select * from venue') to 's3://mybucket/unload/' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' maxfilesize 1 gb; unload ('select * from venue') to 's3://mybucket/venue_encrypt_kms' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' kms_key_id '1234abcd-12ab-34cd-56ef-1234567890ab' manifest encrypted; unload ('select * from venue') to 's3://mybucket/venue_encrypt_cmk' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' master_symmetric_key 'EXAMPLEMASTERKEYtkbjk/OpCwtYSx/M4/t7DMCDIK722' encrypted; unload ('select * from venue') to 's3://mybucket/venue_fw_' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' fixedwidth as 'venueid:3,venuename:39,venuecity:16,venuestate:2,venueseats:6'; unload ('select * from venue') to 's3://mybucket/venue_tab_' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' delimiter as '\t' gzip; unload ('select id, location from location') to 's3://mybucket/location_' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' delimiter ',' addquotes; unload ('select venuecity, venuestate, caldate, pricepaid, sum(pricepaid) over(partition by venuecity, venuestate order by caldate rows between 3 preceding and 3 following) as winsum from sales join date on sales.dateid=date.dateid join event on event.eventid=sales.eventid join venue on event.venueid=venue.venueid order by 1,2') to 's3://mybucket/tickit/winsum' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole'; unload ('select * from venue') to 's3://mybucket/nulls/' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' null as 'fred'; unload ('select * from tbl') to 's3://mybucket/unload/' csv header credentials 'aws_access_key_id=abc;aws_secret_access_key=def' extension 'csv' parallel false; sqlfluff-3.4.2/test/fixtures/dialects/redshift/unload.yml000066400000000000000000000212101503426445100235770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: edd66b8fab160cd1a515f49438695c53d8a66e019801dc01adf7228a3298048a file: - statement: unload_statement: - keyword: unload - bracketed: start_bracket: ( quoted_literal: "'select * from venue'" end_bracket: ) - keyword: to - quoted_literal: "'s3://mybucket/unload/'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - statement_terminator: ; - statement: unload_statement: - keyword: unload - bracketed: start_bracket: ( quoted_literal: "'select * from lineitem'" end_bracket: ) - keyword: to - quoted_literal: "'s3://mybucket/lineitem/'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: PARQUET - keyword: PARTITION - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: l_shipdate end_bracket: ) - statement_terminator: ; - statement: unload_statement: - keyword: unload - bracketed: start_bracket: ( quoted_literal: "'select * from venue'" end_bracket: ) - keyword: to - quoted_literal: "'s3://mybucket/unload/'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: JSON - statement_terminator: ; - statement: unload_statement: - keyword: unload - bracketed: start_bracket: ( quoted_literal: "'select * from venue'" end_bracket: ) - keyword: to - quoted_literal: "'s3://mybucket/unload/'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: CSV - statement_terminator: ; - statement: unload_statement: - keyword: unload - bracketed: start_bracket: ( quoted_literal: "'select * from venue'" end_bracket: ) - keyword: to - quoted_literal: "'s3://mybucket/unload/'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: CSV - keyword: DELIMITER - keyword: AS - quoted_literal: "'|'" - statement_terminator: ; - statement: unload_statement: - keyword: unload - bracketed: start_bracket: ( quoted_literal: "'select * from venue'" end_bracket: ) - keyword: to - quoted_literal: "'s3://mybucket/venue_pipe_'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: manifest - statement_terminator: ; - statement: unload_statement: - keyword: unload - bracketed: start_bracket: ( quoted_literal: "'select * from venue'" end_bracket: ) - keyword: to - quoted_literal: "'s3://mybucket/unload_venue_folder/'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: manifest - keyword: verbose - statement_terminator: ; - statement: unload_statement: - keyword: unload - bracketed: start_bracket: ( quoted_literal: "'select * from venue where venueseats > 75000'" end_bracket: ) - keyword: to - quoted_literal: "'s3://mybucket/unload/'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: header - keyword: parallel - keyword: 'off' - statement_terminator: ; - statement: unload_statement: - keyword: unload - bracketed: start_bracket: ( quoted_literal: "'select * from venue'" end_bracket: ) - keyword: to - quoted_literal: "'s3://mybucket/unload/'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: maxfilesize - numeric_literal: '1' - keyword: gb - statement_terminator: ; - statement: unload_statement: - keyword: unload - bracketed: start_bracket: ( quoted_literal: "'select * from venue'" end_bracket: ) - keyword: to - quoted_literal: "'s3://mybucket/venue_encrypt_kms'" - authorization_segment: - keyword: iam_role - quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: kms_key_id - quoted_literal: "'1234abcd-12ab-34cd-56ef-1234567890ab'" - keyword: manifest - keyword: encrypted - statement_terminator: ; - statement: unload_statement: - keyword: unload - bracketed: start_bracket: ( quoted_literal: "'select * from venue'" end_bracket: ) - keyword: to - quoted_literal: "'s3://mybucket/venue_encrypt_cmk'" - authorization_segment: - keyword: iam_role - quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: master_symmetric_key - quoted_literal: "'EXAMPLEMASTERKEYtkbjk/OpCwtYSx/M4/t7DMCDIK722'" - keyword: encrypted - statement_terminator: ; - statement: unload_statement: - keyword: unload - bracketed: start_bracket: ( quoted_literal: "'select * from venue'" end_bracket: ) - keyword: to - quoted_literal: "'s3://mybucket/venue_fw_'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: fixedwidth - keyword: as - quoted_literal: "'venueid:3,venuename:39,venuecity:16,venuestate:2,venueseats:6'" - statement_terminator: ; - statement: unload_statement: - keyword: unload - bracketed: start_bracket: ( quoted_literal: "'select * from venue'" end_bracket: ) - keyword: to - quoted_literal: "'s3://mybucket/venue_tab_'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: delimiter - keyword: as - quoted_literal: "'\\t'" - keyword: gzip - statement_terminator: ; - statement: unload_statement: - keyword: unload - bracketed: start_bracket: ( quoted_literal: "'select id, location from location'" end_bracket: ) - keyword: to - quoted_literal: "'s3://mybucket/location_'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: delimiter - quoted_literal: "','" - keyword: addquotes - statement_terminator: ; - statement: unload_statement: - keyword: unload - bracketed: start_bracket: ( quoted_literal: "'select venuecity, venuestate, caldate, pricepaid,\nsum(pricepaid)\ \ over(partition by venuecity, venuestate\norder by caldate rows between\ \ 3 preceding and 3 following) as winsum\nfrom sales join date on sales.dateid=date.dateid\n\ join event on event.eventid=sales.eventid\njoin venue on event.venueid=venue.venueid\n\ order by 1,2'" end_bracket: ) - keyword: to - quoted_literal: "'s3://mybucket/tickit/winsum'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - statement_terminator: ; - statement: unload_statement: - keyword: unload - bracketed: start_bracket: ( quoted_literal: "'select * from venue'" end_bracket: ) - keyword: to - quoted_literal: "'s3://mybucket/nulls/'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: 'null' - keyword: as - quoted_literal: "'fred'" - statement_terminator: ; - statement: unload_statement: - keyword: unload - bracketed: start_bracket: ( quoted_literal: "'select * from tbl'" end_bracket: ) - keyword: to - quoted_literal: "'s3://mybucket/unload/'" - keyword: csv - keyword: header - authorization_segment: keyword: credentials quoted_literal: "'aws_access_key_id=abc;aws_secret_access_key=def'" - keyword: extension - quoted_literal: "'csv'" - keyword: parallel - keyword: 'false' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/unpivot.sql000066400000000000000000000025531503426445100240300ustar00rootroot00000000000000-- redshift_unpivot.sql /* Examples of SELECT statements that include UNPIVOT expressions. */ -- Below examples come from -- https://docs.aws.amazon.com/redshift/latest/dg/r_FROM_clause-pivot-unpivot-examples.html SELECT * FROM (SELECT red, green, blue FROM count_by_color) UNPIVOT ( cnt FOR color IN (red, green, blue) ); SELECT * FROM ( SELECT red, green, blue FROM count_by_color ) UNPIVOT INCLUDE NULLS ( cnt FOR color IN (red, green, blue) ); SELECT * FROM count_by_color UNPIVOT ( cnt FOR color IN (red, green, blue) ); SELECT * FROM count_by_color UNPIVOT ( cnt FOR color IN (red AS r, green AS g, blue AS b) ); -- Examples provided by AWS end here -- Can do EXCLUDE NULLS as well SELECT * FROM ( SELECT red, green, blue FROM count_by_color ) UNPIVOT EXCLUDE NULLS ( cnt FOR color IN (red, green, blue) ); -- Can do this on CTEs WITH subset_color_counts AS ( SELECT red, green, blue FROM count_by_color ) SELECT * FROM subset_color_counts UNPIVOT ( cnt FOR color IN (red, green, blue) ); -- Can do this on tables SELECT * FROM count_by_color UNPIVOT ( cnt FOR color IN (red, green, blue) ); -- Can alias output of unpivot statement SELECT * FROM count_of_bears UNPIVOT ( cnt FOR species IN (giant_panda, moon_bear) ) AS floofy_bears; sqlfluff-3.4.2/test/fixtures/dialects/redshift/unpivot.yml000066400000000000000000000326301503426445100240310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 786eb84d432575634360595ad5333fa6d7a59538de2f18e2177cff0406fa79bb file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: red - comma: ',' - select_clause_element: column_reference: naked_identifier: green - comma: ',' - select_clause_element: column_reference: naked_identifier: blue from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: count_by_color end_bracket: ) from_unpivot_expression: keyword: UNPIVOT bracketed: - start_bracket: ( - column_reference: naked_identifier: cnt - keyword: FOR - column_reference: naked_identifier: color - keyword: IN - bracketed: - start_bracket: ( - column_reference: naked_identifier: red - comma: ',' - column_reference: naked_identifier: green - comma: ',' - column_reference: naked_identifier: blue - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: red - comma: ',' - select_clause_element: column_reference: naked_identifier: green - comma: ',' - select_clause_element: column_reference: naked_identifier: blue from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: count_by_color end_bracket: ) from_unpivot_expression: - keyword: UNPIVOT - keyword: INCLUDE - keyword: NULLS - bracketed: - start_bracket: ( - column_reference: naked_identifier: cnt - keyword: FOR - column_reference: naked_identifier: color - keyword: IN - bracketed: - start_bracket: ( - column_reference: naked_identifier: red - comma: ',' - column_reference: naked_identifier: green - comma: ',' - column_reference: naked_identifier: blue - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: count_by_color from_unpivot_expression: keyword: UNPIVOT bracketed: - start_bracket: ( - column_reference: naked_identifier: cnt - keyword: FOR - column_reference: naked_identifier: color - keyword: IN - bracketed: - start_bracket: ( - column_reference: naked_identifier: red - comma: ',' - column_reference: naked_identifier: green - comma: ',' - column_reference: naked_identifier: blue - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: count_by_color from_unpivot_expression: keyword: UNPIVOT bracketed: - start_bracket: ( - column_reference: naked_identifier: cnt - keyword: FOR - column_reference: naked_identifier: color - keyword: IN - bracketed: - start_bracket: ( - column_reference: naked_identifier: red - alias_expression: alias_operator: keyword: AS naked_identifier: r - comma: ',' - column_reference: naked_identifier: green - alias_expression: alias_operator: keyword: AS naked_identifier: g - comma: ',' - column_reference: naked_identifier: blue - alias_expression: alias_operator: keyword: AS naked_identifier: b - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: red - comma: ',' - select_clause_element: column_reference: naked_identifier: green - comma: ',' - select_clause_element: column_reference: naked_identifier: blue from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: count_by_color end_bracket: ) from_unpivot_expression: - keyword: UNPIVOT - keyword: EXCLUDE - keyword: NULLS - bracketed: - start_bracket: ( - column_reference: naked_identifier: cnt - keyword: FOR - column_reference: naked_identifier: color - keyword: IN - bracketed: - start_bracket: ( - column_reference: naked_identifier: red - comma: ',' - column_reference: naked_identifier: green - comma: ',' - column_reference: naked_identifier: blue - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: subset_color_counts keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: red - comma: ',' - select_clause_element: column_reference: naked_identifier: green - comma: ',' - select_clause_element: column_reference: naked_identifier: blue from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: count_by_color end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: subset_color_counts from_unpivot_expression: keyword: UNPIVOT bracketed: - start_bracket: ( - column_reference: naked_identifier: cnt - keyword: FOR - column_reference: naked_identifier: color - keyword: IN - bracketed: - start_bracket: ( - column_reference: naked_identifier: red - comma: ',' - column_reference: naked_identifier: green - comma: ',' - column_reference: naked_identifier: blue - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: count_by_color from_unpivot_expression: keyword: UNPIVOT bracketed: - start_bracket: ( - column_reference: naked_identifier: cnt - keyword: FOR - column_reference: naked_identifier: color - keyword: IN - bracketed: - start_bracket: ( - column_reference: naked_identifier: red - comma: ',' - column_reference: naked_identifier: green - comma: ',' - column_reference: naked_identifier: blue - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: count_of_bears from_unpivot_expression: keyword: UNPIVOT bracketed: - start_bracket: ( - column_reference: naked_identifier: cnt - keyword: FOR - column_reference: naked_identifier: species - keyword: IN - bracketed: - start_bracket: ( - column_reference: naked_identifier: giant_panda - comma: ',' - column_reference: naked_identifier: moon_bear - end_bracket: ) - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: floofy_bears - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/unreserved_keywords.sql000066400000000000000000000006621503426445100264340ustar00rootroot00000000000000-- Issue #2299 -- All these columns are unreserved keywords and should parse. SELECT auto, avro, backup, bzip2, case_insensitive, case_sensitive, compound, defaults, deflate, distkey, diststyle, encode, even, excluding, explicit, gzip, including, interleaved, language, lzop, offline, partitioned, sortkey, wallet, zstd FROM foo; sqlfluff-3.4.2/test/fixtures/dialects/redshift/unreserved_keywords.yml000066400000000000000000000070101503426445100264300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b9c2855fe7dd06fc4e5c99824e16eb736d7534e70017c2cdf5c83d50ad587896 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: auto - comma: ',' - select_clause_element: column_reference: naked_identifier: avro - comma: ',' - select_clause_element: column_reference: naked_identifier: backup - comma: ',' - select_clause_element: column_reference: naked_identifier: bzip2 - comma: ',' - select_clause_element: column_reference: naked_identifier: case_insensitive - comma: ',' - select_clause_element: column_reference: naked_identifier: case_sensitive - comma: ',' - select_clause_element: column_reference: naked_identifier: compound - comma: ',' - select_clause_element: column_reference: naked_identifier: defaults - comma: ',' - select_clause_element: column_reference: naked_identifier: deflate - comma: ',' - select_clause_element: column_reference: naked_identifier: distkey - comma: ',' - select_clause_element: column_reference: naked_identifier: diststyle - comma: ',' - select_clause_element: column_reference: naked_identifier: encode - comma: ',' - select_clause_element: column_reference: naked_identifier: even - comma: ',' - select_clause_element: column_reference: naked_identifier: excluding - comma: ',' - select_clause_element: column_reference: naked_identifier: explicit - comma: ',' - select_clause_element: column_reference: naked_identifier: gzip - comma: ',' - select_clause_element: column_reference: naked_identifier: including - comma: ',' - select_clause_element: column_reference: naked_identifier: interleaved - comma: ',' - select_clause_element: column_reference: naked_identifier: language - comma: ',' - select_clause_element: column_reference: naked_identifier: lzop - comma: ',' - select_clause_element: column_reference: naked_identifier: offline - comma: ',' - select_clause_element: column_reference: naked_identifier: partitioned - comma: ',' - select_clause_element: column_reference: naked_identifier: sortkey - comma: ',' - select_clause_element: column_reference: naked_identifier: wallet - comma: ',' - select_clause_element: column_reference: naked_identifier: zstd from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/update.sql000066400000000000000000000001021503426445100235720ustar00rootroot00000000000000update tbl1 set col1 = col2; update tbl1 as set set col1 = col2; sqlfluff-3.4.2/test/fixtures/dialects/redshift/update.yml000066400000000000000000000024141503426445100236040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 64584f13ce9fbfe1413873a2d2904ec392d2facff2ec7f785a813f9b56eea3b3 file: - statement: update_statement: keyword: update table_reference: naked_identifier: tbl1 set_clause_list: keyword: set set_clause: - column_reference: naked_identifier: col1 - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: col2 - statement_terminator: ; - statement: update_statement: keyword: update table_reference: naked_identifier: tbl1 alias_expression: alias_operator: keyword: as naked_identifier: set set_clause_list: keyword: set set_clause: - column_reference: naked_identifier: col1 - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: col2 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/vacuum.sql000066400000000000000000000004031503426445100236140ustar00rootroot00000000000000vacuum; vacuum sales; vacuum sales to 100 percent; vacuum recluster sales; vacuum sort only sales to 75 percent; vacuum delete only sales to 75 percent; vacuum reindex listing; vacuum reindex listing to 75 percent; vacuum listing to 75 percent BOOST; sqlfluff-3.4.2/test/fixtures/dialects/redshift/vacuum.yml000066400000000000000000000040731503426445100236250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ffe0de2c6413a5f57b2ad950d92a11bbfd256c6a19ee29b6db333c5d27eb17f0 file: - statement: vacuum_statement: keyword: vacuum - statement_terminator: ; - statement: vacuum_statement: keyword: vacuum table_reference: naked_identifier: sales - statement_terminator: ; - statement: vacuum_statement: - keyword: vacuum - table_reference: naked_identifier: sales - keyword: to - numeric_literal: '100' - keyword: percent - statement_terminator: ; - statement: vacuum_statement: - keyword: vacuum - keyword: recluster - table_reference: naked_identifier: sales - statement_terminator: ; - statement: vacuum_statement: - keyword: vacuum - keyword: sort - keyword: only - table_reference: naked_identifier: sales - keyword: to - numeric_literal: '75' - keyword: percent - statement_terminator: ; - statement: vacuum_statement: - keyword: vacuum - keyword: delete - keyword: only - table_reference: naked_identifier: sales - keyword: to - numeric_literal: '75' - keyword: percent - statement_terminator: ; - statement: vacuum_statement: - keyword: vacuum - keyword: reindex - table_reference: naked_identifier: listing - statement_terminator: ; - statement: vacuum_statement: - keyword: vacuum - keyword: reindex - table_reference: naked_identifier: listing - keyword: to - numeric_literal: '75' - keyword: percent - statement_terminator: ; - statement: vacuum_statement: - keyword: vacuum - table_reference: naked_identifier: listing - keyword: to - numeric_literal: '75' - keyword: percent - keyword: BOOST - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/redshift/window_functions.sql000066400000000000000000000001121503426445100257100ustar00rootroot00000000000000select lead(col1, 1) respect nulls over (order by col2 asc) from dual sqlfluff-3.4.2/test/fixtures/dialects/redshift/window_functions.yml000066400000000000000000000030501503426445100257160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f07f7ab22188a098a03f6ff1b07ecb88a3a5075e494dbeda228dca6dc1a6927b file: statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: lead function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: col1 - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) over_clause: - keyword: respect - keyword: nulls - keyword: over - bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: col2 - keyword: asc end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual sqlfluff-3.4.2/test/fixtures/dialects/snowflake/000077500000000000000000000000001503426445100217575ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/dialects/snowflake/.sqlfluff000066400000000000000000000000371503426445100236020ustar00rootroot00000000000000[sqlfluff] dialect = snowflake sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_account.sql000066400000000000000000000013131503426445100253210ustar00rootroot00000000000000ALTER ACCOUNT SET TIMEZONE = 'UTC'; ALTER ACCOUNT SET ALLOW_ID_TOKEN = TRUE, DEFAULT_DDL_COLLATION = 'en-ci', CLIENT_ENCRYPTION_KEY_SIZE = 128, NETWORK_POLICY = mypolicy ; ALTER ACCOUNT UNSET TIMEZONE; ALTER ACCOUNT UNSET DATA_RETENTION_TIME_IN_DAYS, JSON_INDENT; ALTER ACCOUNT SET RESOURCE_MONITOR = VERY_RESTRICTIVE_MONITOR; ALTER ACCOUNT SET PASSWORD POLICY mydb.security.at_least_twelve_characters; ALTER ACCOUNT SET SESSION POLICY mydb.policies.only_one_hour; ALTER ACCOUNT UNSET PASSWORD POLICY; ALTER ACCOUNT UNSET SESSION POLICY; ALTER ACCOUNT SET TAG env = 'prod', domain = 'sales' ; ALTER ACCOUNT UNSET TAG env, domain; ALTER ACCOUNT SET EVENT_TABLE = LOG_TRACE_DB.PUBLIC.EVENT_TABLE; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_account.yml000066400000000000000000000102211503426445100253210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3308a5d4a713f6b6af28b2ed40d1be038e1872d6258480544dfa2de8dc161107 file: - statement: alter_account_statement: - keyword: ALTER - keyword: ACCOUNT - keyword: SET - parameter: TIMEZONE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'UTC'" - statement_terminator: ; - statement: alter_account_statement: - keyword: ALTER - keyword: ACCOUNT - keyword: SET - parameter: ALLOW_ID_TOKEN - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - parameter: DEFAULT_DDL_COLLATION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'en-ci'" - comma: ',' - parameter: CLIENT_ENCRYPTION_KEY_SIZE - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '128' - comma: ',' - parameter: NETWORK_POLICY - comparison_operator: raw_comparison_operator: '=' - naked_identifier: mypolicy - statement_terminator: ; - statement: alter_account_statement: - keyword: ALTER - keyword: ACCOUNT - keyword: UNSET - naked_identifier: TIMEZONE - statement_terminator: ; - statement: alter_account_statement: - keyword: ALTER - keyword: ACCOUNT - keyword: UNSET - naked_identifier: DATA_RETENTION_TIME_IN_DAYS - comma: ',' - naked_identifier: JSON_INDENT - statement_terminator: ; - statement: alter_account_statement: - keyword: ALTER - keyword: ACCOUNT - keyword: SET - keyword: RESOURCE_MONITOR - comparison_operator: raw_comparison_operator: '=' - naked_identifier: VERY_RESTRICTIVE_MONITOR - statement_terminator: ; - statement: alter_account_statement: - keyword: ALTER - keyword: ACCOUNT - keyword: SET - keyword: PASSWORD - keyword: POLICY - table_reference: - naked_identifier: mydb - dot: . - naked_identifier: security - dot: . - naked_identifier: at_least_twelve_characters - statement_terminator: ; - statement: alter_account_statement: - keyword: ALTER - keyword: ACCOUNT - keyword: SET - keyword: SESSION - keyword: POLICY - table_reference: - naked_identifier: mydb - dot: . - naked_identifier: policies - dot: . - naked_identifier: only_one_hour - statement_terminator: ; - statement: alter_account_statement: - keyword: ALTER - keyword: ACCOUNT - keyword: UNSET - keyword: PASSWORD - keyword: POLICY - statement_terminator: ; - statement: alter_account_statement: - keyword: ALTER - keyword: ACCOUNT - keyword: UNSET - keyword: SESSION - keyword: POLICY - statement_terminator: ; - statement: alter_account_statement: - keyword: ALTER - keyword: ACCOUNT - keyword: SET - tag_equals: - keyword: TAG - tag_reference: naked_identifier: env - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'prod'" - comma: ',' - tag_reference: naked_identifier: domain - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'sales'" - statement_terminator: ; - statement: alter_account_statement: - keyword: ALTER - keyword: ACCOUNT - keyword: UNSET - keyword: TAG - tag_reference: naked_identifier: env - comma: ',' - tag_reference: naked_identifier: domain - statement_terminator: ; - statement: alter_account_statement: - keyword: ALTER - keyword: ACCOUNT - keyword: SET - parameter: EVENT_TABLE - comparison_operator: raw_comparison_operator: '=' - table_reference: - naked_identifier: LOG_TRACE_DB - dot: . - naked_identifier: PUBLIC - dot: . - naked_identifier: EVENT_TABLE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_cortex_search_service.sql000066400000000000000000000007571503426445100302510ustar00rootroot00000000000000ALTER CORTEX SEARCH SERVICE mysvc SET WAREHOUSE = my_new_wh; ALTER CORTEX SEARCH SERVICE mysvc SET COMMENT = 'new_comment'; ALTER CORTEX SEARCH SERVICE mysvc set target_lag = '1 hour'; ALTER CORTEX SEARCH SERVICE mysvc SUSPEND SERVING; alter cortex search service mysvc resume indexing; alter cortex search service if exists mysvc suspend indexing; ALTER CORTEX SEARCH SERVICE mysvc SET WAREHOUSE = my_new_wh; ALTER CORTEX SEARCH SERVICE mysvc SET COMMENT = 'new_comment' target_lag = '1 hour'; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_cortex_search_service.yml000066400000000000000000000063301503426445100302440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5ffe2787251973962e11960b54672bd8f7acc1dee7baca9604c8c7b0b0588475 file: - statement: alter_streamlit_statement: - keyword: ALTER - keyword: CORTEX - keyword: SEARCH - keyword: SERVICE - object_reference: naked_identifier: mysvc - keyword: SET - keyword: WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: my_new_wh - statement_terminator: ; - statement: alter_streamlit_statement: - keyword: ALTER - keyword: CORTEX - keyword: SEARCH - keyword: SERVICE - object_reference: naked_identifier: mysvc - keyword: SET - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'new_comment'" - statement_terminator: ; - statement: alter_streamlit_statement: - keyword: ALTER - keyword: CORTEX - keyword: SEARCH - keyword: SERVICE - object_reference: naked_identifier: mysvc - keyword: set - keyword: target_lag - comparison_operator: raw_comparison_operator: '=' - dynamic_table_lag_interval_segment: "'1 hour'" - statement_terminator: ; - statement: alter_streamlit_statement: - keyword: ALTER - keyword: CORTEX - keyword: SEARCH - keyword: SERVICE - object_reference: naked_identifier: mysvc - keyword: SUSPEND - keyword: SERVING - statement_terminator: ; - statement: alter_streamlit_statement: - keyword: alter - keyword: cortex - keyword: search - keyword: service - object_reference: naked_identifier: mysvc - keyword: resume - keyword: indexing - statement_terminator: ; - statement: alter_streamlit_statement: - keyword: alter - keyword: cortex - keyword: search - keyword: service - keyword: if - keyword: exists - object_reference: naked_identifier: mysvc - keyword: suspend - keyword: indexing - statement_terminator: ; - statement: alter_streamlit_statement: - keyword: ALTER - keyword: CORTEX - keyword: SEARCH - keyword: SERVICE - object_reference: naked_identifier: mysvc - keyword: SET - keyword: WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: my_new_wh - statement_terminator: ; - statement: alter_streamlit_statement: - keyword: ALTER - keyword: CORTEX - keyword: SEARCH - keyword: SERVICE - object_reference: naked_identifier: mysvc - keyword: SET - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'new_comment'" - keyword: target_lag - comparison_operator: raw_comparison_operator: '=' - dynamic_table_lag_interval_segment: "'1 hour'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_database.sql000066400000000000000000000010531503426445100254320ustar00rootroot00000000000000ALTER DATABASE mydb RENAME TO mydb2; ALTER DATABASE IF EXISTS mydb RENAME TO mydb2; ALTER DATABASE mydb SWAP WITH yourdb; ALTER DATABASE IF EXISTS mydb SWAP WITH yourdb; ALTER DATABASE mydb SET DATA_RETENTION_TIME_IN_DAYS = 7, MAX_DATA_EXTENSION_TIME_IN_DAYS = 14, DEFAULT_DDL_COLLATION = 'en_ci', COMMENT = 'My most excellent database' ; ALTER DATABASE mydb SET TAG environment = 'test', billed_to = 'sales'; ALTER DATABASE mydb UNSET TAG environment, billed_to; ALTER DATABASE mydb UNSET DATA_RETENTION_TIME_IN_DAYS, DEFAULT_DDL_COLLATION; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_database.yml000066400000000000000000000067131503426445100254440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b9d149b9fe24d2c93edc1b40d9832403c4b0d484279ba69f0002e5a7cb677004 file: - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - object_reference: naked_identifier: mydb - keyword: RENAME - keyword: TO - object_reference: naked_identifier: mydb2 - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: mydb - keyword: RENAME - keyword: TO - object_reference: naked_identifier: mydb2 - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - object_reference: naked_identifier: mydb - keyword: SWAP - keyword: WITH - object_reference: naked_identifier: yourdb - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: mydb - keyword: SWAP - keyword: WITH - object_reference: naked_identifier: yourdb - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - object_reference: naked_identifier: mydb - keyword: SET - parameter: DATA_RETENTION_TIME_IN_DAYS - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '7' - comma: ',' - parameter: MAX_DATA_EXTENSION_TIME_IN_DAYS - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '14' - comma: ',' - parameter: DEFAULT_DDL_COLLATION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'en_ci'" - comma: ',' - parameter: COMMENT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'My most excellent database'" - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - object_reference: naked_identifier: mydb - keyword: SET - tag_equals: - keyword: TAG - tag_reference: naked_identifier: environment - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test'" - comma: ',' - tag_reference: naked_identifier: billed_to - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'sales'" - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - object_reference: naked_identifier: mydb - keyword: UNSET - keyword: TAG - tag_reference: naked_identifier: environment - comma: ',' - tag_reference: naked_identifier: billed_to - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - object_reference: naked_identifier: mydb - keyword: UNSET - keyword: DATA_RETENTION_TIME_IN_DAYS - comma: ',' - keyword: DEFAULT_DDL_COLLATION - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_external_function.sql000066400000000000000000000013261503426445100274200ustar00rootroot00000000000000ALTER FUNCTION IF EXISTS FUNCTION1(NUMBER) RENAME TO FUNCTION2; ALTER FUNCTION FUNCTION2(NUMBER) SET SECURE; ALTER FUNCTION FUNCTION3(NUMBER) RENAME TO FUNCTION3B; ALTER FUNCTION FUNCTION4(NUMBER) SET API_INTEGRATION = API_INTEGRATION_2; ALTER FUNCTION FUNCTION5(NUMBER) SET MAX_BATCH_ROWS = 100; ALTER FUNCTION FUNCTION6(NUMBER) SET COMPRESSION = GZIP; ALTER FUNCTION FUNCTION7(NUMBER) SET REQUEST_TRANSLATOR = TRANSLATOR_FUNCTION; ALTER FUNCTION FUNCTION8(NUMBER) SET RESPONSE_TRANSLATOR = TRANSLATOR_FUNCTION; ALTER FUNCTION FUNCTION9(NUMBER) SET HEADERS = ('abc' = 'def'); ALTER FUNCTION FUNCTION10(NUMBER) SET CONTEXT_HEADERS = (CURRENT_ROLE,CURRENT_TIMESTAMP); ALTER FUNCTION FUNCTION11(NUMBER) SET COMMENT = 'Woohoo!'; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_external_function.yml000066400000000000000000000136201503426445100274220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4720a5df637f1af3735663cf93ba4e25d885c1db9c4eba5535eb085f019aa04e file: - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - keyword: IF - keyword: EXISTS - function_name: function_name_identifier: FUNCTION1 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: NUMBER end_bracket: ) - keyword: RENAME - keyword: TO - function_name: function_name_identifier: FUNCTION2 - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: FUNCTION2 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: NUMBER end_bracket: ) - keyword: SET - keyword: SECURE - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: FUNCTION3 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: NUMBER end_bracket: ) - keyword: RENAME - keyword: TO - function_name: function_name_identifier: FUNCTION3B - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: FUNCTION4 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: NUMBER end_bracket: ) - keyword: SET - keyword: API_INTEGRATION - comparison_operator: raw_comparison_operator: '=' - naked_identifier: API_INTEGRATION_2 - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: FUNCTION5 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: NUMBER end_bracket: ) - keyword: SET - keyword: MAX_BATCH_ROWS - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '100' - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: FUNCTION6 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: NUMBER end_bracket: ) - keyword: SET - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: GZIP - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: FUNCTION7 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: NUMBER end_bracket: ) - keyword: SET - keyword: REQUEST_TRANSLATOR - comparison_operator: raw_comparison_operator: '=' - function_name: function_name_identifier: TRANSLATOR_FUNCTION - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: FUNCTION8 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: NUMBER end_bracket: ) - keyword: SET - keyword: RESPONSE_TRANSLATOR - comparison_operator: raw_comparison_operator: '=' - function_name: function_name_identifier: TRANSLATOR_FUNCTION - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: FUNCTION9 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: NUMBER end_bracket: ) - keyword: SET - keyword: HEADERS - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_identifier: "'abc'" - comparison_operator: raw_comparison_operator: '=' - quoted_identifier: "'def'" - end_bracket: ) - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: FUNCTION10 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: NUMBER end_bracket: ) - keyword: SET - keyword: CONTEXT_HEADERS - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: CURRENT_ROLE - comma: ',' - keyword: CURRENT_TIMESTAMP - end_bracket: ) - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: FUNCTION11 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: NUMBER end_bracket: ) - keyword: SET - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Woohoo!'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_external_table.sql000066400000000000000000000010641503426445100266610ustar00rootroot00000000000000alter external table foo refresh; alter external table foo refresh '2018/08/05/'; alter external table foo add files ('foo/bar.json.gz', 'bar/foo.json.gz'); alter external table foo remove files ('foo/bar.json.gz', 'bar/foo.json.gz'); alter external table foo add partition(foo='baz', bar='bar', baz='foo') location '2022/01'; alter external table foo drop partition location '2022/01'; alter external table if exists foo set auto_refresh = true; alter external table if exists foo set tag foo = 'foo', bar = 'bar'; alter external table foo unset tag foo = 'foo'; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_external_table.yml000066400000000000000000000102401503426445100266570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7679aad86bf7581f023ca7e74c2ce3ec2ba9a8477f1a4f8241a6897881cce731 file: - statement: alter_external_table_statement: - keyword: alter - keyword: external - keyword: table - table_reference: naked_identifier: foo - keyword: refresh - statement_terminator: ; - statement: alter_external_table_statement: - keyword: alter - keyword: external - keyword: table - table_reference: naked_identifier: foo - keyword: refresh - quoted_literal: "'2018/08/05/'" - statement_terminator: ; - statement: alter_external_table_statement: - keyword: alter - keyword: external - keyword: table - table_reference: naked_identifier: foo - keyword: add - keyword: files - bracketed: - start_bracket: ( - quoted_literal: "'foo/bar.json.gz'" - comma: ',' - quoted_literal: "'bar/foo.json.gz'" - end_bracket: ) - statement_terminator: ; - statement: alter_external_table_statement: - keyword: alter - keyword: external - keyword: table - table_reference: naked_identifier: foo - keyword: remove - keyword: files - bracketed: - start_bracket: ( - quoted_literal: "'foo/bar.json.gz'" - comma: ',' - quoted_literal: "'bar/foo.json.gz'" - end_bracket: ) - statement_terminator: ; - statement: alter_external_table_statement: - keyword: alter - keyword: external - keyword: table - table_reference: naked_identifier: foo - keyword: add - keyword: partition - bracketed: - start_bracket: ( - column_reference: naked_identifier: foo - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'baz'" - comma: ',' - column_reference: naked_identifier: bar - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'bar'" - comma: ',' - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foo'" - end_bracket: ) - keyword: location - quoted_literal: "'2022/01'" - statement_terminator: ; - statement: alter_external_table_statement: - keyword: alter - keyword: external - keyword: table - table_reference: naked_identifier: foo - keyword: drop - keyword: partition - keyword: location - quoted_literal: "'2022/01'" - statement_terminator: ; - statement: alter_external_table_statement: - keyword: alter - keyword: external - keyword: table - keyword: if - keyword: exists - table_reference: naked_identifier: foo - keyword: set - keyword: auto_refresh - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - statement_terminator: ; - statement: alter_external_table_statement: - keyword: alter - keyword: external - keyword: table - keyword: if - keyword: exists - table_reference: naked_identifier: foo - keyword: set - tag_equals: - keyword: tag - tag_reference: naked_identifier: foo - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foo'" - comma: ',' - tag_reference: naked_identifier: bar - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'bar'" - statement_terminator: ; - statement: alter_external_table_statement: - keyword: alter - keyword: external - keyword: table - table_reference: naked_identifier: foo - keyword: unset - tag_equals: keyword: tag tag_reference: naked_identifier: foo comparison_operator: raw_comparison_operator: '=' quoted_literal: "'foo'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_external_volume.sql000066400000000000000000000016161503426445100271040ustar00rootroot00000000000000ALTER EXTERNAL VOLUME exvol1 ADD STORAGE_LOCATION = ( NAME = 'my-s3-us-central-2' STORAGE_PROVIDER = 'S3' STORAGE_BASE_URL = 's3://my_bucket_us_central-1/' STORAGE_AWS_ROLE_ARN = 'arn:aws:iam::123456789012:role/myrole' ); ALTER EXTERNAL VOLUME exvol2 ADD STORAGE_LOCATION = ( NAME = 'my-gcs-europe-west4' STORAGE_PROVIDER = 'GCS' STORAGE_BASE_URL = 'gcs://my_bucket_europe-west4/' ); ALTER EXTERNAL VOLUME exvol3 ADD STORAGE_LOCATION = ( NAME = 'my-azure-japaneast' STORAGE_PROVIDER = 'AZURE' STORAGE_BASE_URL = 'azure://sfcdev1.blob.core.windows.net/my_container_japaneast/' AZURE_TENANT_ID = 'a9876545-4321-987b-b23c-2kz436789d0' ); ALTER EXTERNAL VOLUME exvol4 REMOVE STORAGE_LOCATION 'foo'; ALTER EXTERNAL VOLUME exvol5 SET ALLOW_WRITES = TRUE; ALTER EXTERNAL VOLUME IF EXISTS exvol6 SET COMMENT = 'bar'; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_external_volume.yml000066400000000000000000000103011503426445100270750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 642ab9568c7c70d861b20a0b985d8277ff69d54e53250fcce71bbb0a16db41a4 file: - statement: alter_external_volume_statement: - keyword: ALTER - keyword: EXTERNAL - keyword: VOLUME - external_volume_reference: naked_identifier: exvol1 - keyword: ADD - keyword: STORAGE_LOCATION - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: NAME - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'my-s3-us-central-2'" - keyword: STORAGE_PROVIDER - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'S3'" - keyword: STORAGE_BASE_URL - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'s3://my_bucket_us_central-1/'" - keyword: STORAGE_AWS_ROLE_ARN - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'arn:aws:iam::123456789012:role/myrole'" - end_bracket: ) - statement_terminator: ; - statement: alter_external_volume_statement: - keyword: ALTER - keyword: EXTERNAL - keyword: VOLUME - external_volume_reference: naked_identifier: exvol2 - keyword: ADD - keyword: STORAGE_LOCATION - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: NAME - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'my-gcs-europe-west4'" - keyword: STORAGE_PROVIDER - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'GCS'" - keyword: STORAGE_BASE_URL - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'gcs://my_bucket_europe-west4/'" - end_bracket: ) - statement_terminator: ; - statement: alter_external_volume_statement: - keyword: ALTER - keyword: EXTERNAL - keyword: VOLUME - external_volume_reference: naked_identifier: exvol3 - keyword: ADD - keyword: STORAGE_LOCATION - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: NAME - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'my-azure-japaneast'" - keyword: STORAGE_PROVIDER - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'AZURE'" - keyword: STORAGE_BASE_URL - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'azure://sfcdev1.blob.core.windows.net/my_container_japaneast/'" - keyword: AZURE_TENANT_ID - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'a9876545-4321-987b-b23c-2kz436789d0'" - end_bracket: ) - statement_terminator: ; - statement: alter_external_volume_statement: - keyword: ALTER - keyword: EXTERNAL - keyword: VOLUME - external_volume_reference: naked_identifier: exvol4 - keyword: REMOVE - keyword: STORAGE_LOCATION - quoted_literal: "'foo'" - statement_terminator: ; - statement: alter_external_volume_statement: - keyword: ALTER - keyword: EXTERNAL - keyword: VOLUME - external_volume_reference: naked_identifier: exvol5 - keyword: SET - keyword: ALLOW_WRITES - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - statement_terminator: ; - statement: alter_external_volume_statement: - keyword: ALTER - keyword: EXTERNAL - keyword: VOLUME - keyword: IF - keyword: EXISTS - external_volume_reference: naked_identifier: exvol6 - keyword: SET - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'bar'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_file_format.sql000066400000000000000000000041101503426445100261520ustar00rootroot00000000000000ALTER FILE FORMAT IF EXISTS my_file_format RENAME TO your_file_format ; ALTER FILE FORMAT IF EXISTS my_csv_format SET TYPE = CSV, COMPRESSION = AUTO, RECORD_DELIMITER = NONE, FIELD_DELIMITER = NONE, FILE_EXTENSION = 'foobar', SKIP_HEADER = 1, SKIP_BLANK_LINES = TRUE, DATE_FORMAT = AUTO, TIME_FORMAT = AUTO, TIMESTAMP_FORMAT = AUTO, BINARY_FORMAT = HEX, TRIM_SPACE = TRUE, NULL_IF = ('foo', 'bar'), FIELD_OPTIONALLY_ENCLOSED_BY = NONE, ERROR_ON_COLUMN_COUNT_MISMATCH = TRUE, REPLACE_INVALID_CHARACTERS = TRUE, VALIDATE_UTF8 = TRUE, EMPTY_FIELD_AS_NULL = TRUE, SKIP_BYTE_ORDER_MARK = TRUE, ENCODING = UTF8 ; ALTER FILE FORMAT IF EXISTS my_json_format SET TYPE = JSON, COMPRESSION = AUTO, DATE_FORMAT = AUTO, TIME_FORMAT = AUTO, TIMESTAMP_FORMAT = AUTO, BINARY_FORMAT = HEX, TRIM_SPACE = TRUE, NULL_IF = ('foo', 'bar'), FILE_EXTENSION = 'foobar', ENABLE_OCTAL = TRUE, ALLOW_DUPLICATE = TRUE, STRIP_OUTER_ARRAY = TRUE, STRIP_NULL_VALUES = TRUE, REPLACE_INVALID_CHARACTERS = TRUE, IGNORE_UTF8_ERRORS = TRUE, SKIP_BYTE_ORDER_MARK = TRUE ; ALTER FILE FORMAT IF EXISTS my_avro_format SET TYPE = AVRO COMPRESSION = 'GZIP' TRIM_SPACE = FALSE NULL_IF = ('foo', 'bar') COMMENT = 'FOOBAR' ; ALTER FILE FORMAT IF EXISTS my_orc_format SET TYPE = ORC TRIM_SPACE = FALSE NULL_IF = ('foo', 'bar') COMMENT = 'FOOBAR' ; ALTER FILE FORMAT IF EXISTS my_parquet_format SET TYPE = PARQUET COMPRESSION = AUTO SNAPPY_COMPRESSION = FALSE TRIM_SPACE = FALSE BINARY_AS_TEXT = TRUE USE_LOGICAL_TYPE = FALSE USE_VECTORIZED_SCANNER = FALSE REPLACE_INVALID_CHARACTERS = FALSE NULL_IF = ('foo', 'bar') COMMENT = 'FOOBAR' ; ALTER FILE FORMAT IF EXISTS my_xml_format SET TYPE = XML COMPRESSION = GZIP IGNORE_UTF8_ERRORS = FALSE PRESERVE_SPACE = FALSE STRIP_OUTER_ELEMENT = FALSE DISABLE_SNOWFLAKE_DATA = FALSE DISABLE_AUTO_CONVERT = FALSE SKIP_BYTE_ORDER_MARK = FALSE COMMENT = 'FOOBAR' ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_file_format.yml000066400000000000000000000276361503426445100261760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a488e080ec2fb07fd4007721f6cd6cbf214ec358eccd06b64b5708c00caa9e38 file: - statement: alter_file_format_segment: - keyword: ALTER - keyword: FILE - keyword: FORMAT - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: my_file_format - keyword: RENAME - keyword: TO - object_reference: naked_identifier: your_file_format - statement_terminator: ; - statement: alter_file_format_segment: - keyword: ALTER - keyword: FILE - keyword: FORMAT - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: my_csv_format - keyword: SET - csv_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: CSV - comma: ',' - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: AUTO - comma: ',' - keyword: RECORD_DELIMITER - comparison_operator: raw_comparison_operator: '=' - keyword: NONE - comma: ',' - keyword: FIELD_DELIMITER - comparison_operator: raw_comparison_operator: '=' - keyword: NONE - comma: ',' - keyword: FILE_EXTENSION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foobar'" - comma: ',' - keyword: SKIP_HEADER - comparison_operator: raw_comparison_operator: '=' - integer_literal: '1' - comma: ',' - keyword: SKIP_BLANK_LINES - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: DATE_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: AUTO - comma: ',' - keyword: TIME_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: AUTO - comma: ',' - keyword: TIMESTAMP_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: AUTO - comma: ',' - keyword: BINARY_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: HEX - comma: ',' - keyword: TRIM_SPACE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: NULL_IF - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'foo'" - comma: ',' - quoted_literal: "'bar'" - end_bracket: ) - comma: ',' - keyword: FIELD_OPTIONALLY_ENCLOSED_BY - comparison_operator: raw_comparison_operator: '=' - keyword: NONE - comma: ',' - keyword: ERROR_ON_COLUMN_COUNT_MISMATCH - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: REPLACE_INVALID_CHARACTERS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: VALIDATE_UTF8 - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: EMPTY_FIELD_AS_NULL - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: SKIP_BYTE_ORDER_MARK - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: ENCODING - comparison_operator: raw_comparison_operator: '=' - keyword: UTF8 - statement_terminator: ; - statement: alter_file_format_segment: - keyword: ALTER - keyword: FILE - keyword: FORMAT - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: my_json_format - keyword: SET - json_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: JSON - comma: ',' - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: AUTO - comma: ',' - keyword: DATE_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: AUTO - comma: ',' - keyword: TIME_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: AUTO - comma: ',' - keyword: TIMESTAMP_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: AUTO - comma: ',' - keyword: BINARY_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: HEX - comma: ',' - keyword: TRIM_SPACE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: NULL_IF - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'foo'" - comma: ',' - quoted_literal: "'bar'" - end_bracket: ) - comma: ',' - keyword: FILE_EXTENSION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foobar'" - comma: ',' - keyword: ENABLE_OCTAL - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: ALLOW_DUPLICATE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: STRIP_OUTER_ARRAY - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: STRIP_NULL_VALUES - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: REPLACE_INVALID_CHARACTERS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: IGNORE_UTF8_ERRORS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: SKIP_BYTE_ORDER_MARK - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - statement_terminator: ; - statement: alter_file_format_segment: - keyword: ALTER - keyword: FILE - keyword: FORMAT - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: my_avro_format - keyword: SET - avro_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: AVRO - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: "'GZIP'" - keyword: TRIM_SPACE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: NULL_IF - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'foo'" - comma: ',' - quoted_literal: "'bar'" - end_bracket: ) - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'FOOBAR'" - statement_terminator: ; - statement: alter_file_format_segment: - keyword: ALTER - keyword: FILE - keyword: FORMAT - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: my_orc_format - keyword: SET - orc_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: ORC - keyword: TRIM_SPACE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: NULL_IF - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'foo'" - comma: ',' - quoted_literal: "'bar'" - end_bracket: ) - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'FOOBAR'" - statement_terminator: ; - statement: alter_file_format_segment: - keyword: ALTER - keyword: FILE - keyword: FORMAT - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: my_parquet_format - keyword: SET - parquet_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: PARQUET - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: AUTO - keyword: SNAPPY_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: TRIM_SPACE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: BINARY_AS_TEXT - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - keyword: USE_LOGICAL_TYPE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: USE_VECTORIZED_SCANNER - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: REPLACE_INVALID_CHARACTERS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: NULL_IF - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'foo'" - comma: ',' - quoted_literal: "'bar'" - end_bracket: ) - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'FOOBAR'" - statement_terminator: ; - statement: alter_file_format_segment: - keyword: ALTER - keyword: FILE - keyword: FORMAT - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: my_xml_format - keyword: SET - xml_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: XML - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: GZIP - keyword: IGNORE_UTF8_ERRORS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: PRESERVE_SPACE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: STRIP_OUTER_ELEMENT - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: DISABLE_SNOWFLAKE_DATA - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: DISABLE_AUTO_CONVERT - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: SKIP_BYTE_ORDER_MARK - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'FOOBAR'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_function.sql000066400000000000000000000016541503426445100255220ustar00rootroot00000000000000ALTER FUNCTION IF EXISTS function1(number) RENAME TO function2; ALTER FUNCTION IF EXISTS function2(number) SET SECURE; ALTER FUNCTION function3() UNSET COMMENT; ALTER function function1(FLOAT_PARAM1 FLOAT) SET TAG TAG1 = 'value1', TAG2 = 'value2', TAG3 = 'value3'; ALTER function function1() UNSET TAG TAG1, TAG2, TAG3; ALTER function function1() SET COMMENT = 'just a comment'; ALTER function function1() UNSET COMMENT; ALTER FUNCTION example_function() SET EXTERNAL_ACCESS_INTEGRATIONS = (my_external_access_integration), LOG_LEVEL = DEBUG, TRACE_LEVEL = ON_EVENT, SECRETS = ('cred' = oauth_token), COMMENT = 'just a comment' ; ALTER FUNCTION example_function() SET TAG TAG1 = 'value1', TAG2 = 'value2', TAG3 = 'value3', EXTERNAL_ACCESS_INTEGRATIONS = (my_external_access_integration), LOG_LEVEL = DEBUG, TRACE_LEVEL = ON_EVENT, SECRETS = ('cred' = oauth_token), COMMENT = 'just a comment' ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_function.yml000066400000000000000000000163401503426445100255220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: af1dd450cf59a08c5871cf146946a66e77c37436ca5e888f17db34928310ba4a file: - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - keyword: IF - keyword: EXISTS - function_name: function_name_identifier: function1 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: number end_bracket: ) - keyword: RENAME - keyword: TO - function_name: function_name_identifier: function2 - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - keyword: IF - keyword: EXISTS - function_name: function_name_identifier: function2 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: number end_bracket: ) - keyword: SET - keyword: SECURE - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: function3 - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: UNSET - keyword: COMMENT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: function - function_name: function_name_identifier: function1 - function_parameter_list: bracketed: start_bracket: ( parameter: FLOAT_PARAM1 data_type: data_type_identifier: FLOAT end_bracket: ) - keyword: SET - tag_equals: - keyword: TAG - tag_reference: naked_identifier: TAG1 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value1'" - comma: ',' - tag_reference: naked_identifier: TAG2 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value2'" - comma: ',' - tag_reference: naked_identifier: TAG3 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value3'" - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: function - function_name: function_name_identifier: function1 - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: UNSET - keyword: TAG - tag_reference: naked_identifier: TAG1 - comma: ',' - tag_reference: naked_identifier: TAG2 - comma: ',' - tag_reference: naked_identifier: TAG3 - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: function - function_name: function_name_identifier: function1 - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: SET - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'just a comment'" - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: function - function_name: function_name_identifier: function1 - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: UNSET - keyword: COMMENT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: example_function - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: SET - external_access_integration_equals: keyword: EXTERNAL_ACCESS_INTEGRATIONS comparison_operator: raw_comparison_operator: '=' bracketed: start_bracket: ( naked_identifier: my_external_access_integration end_bracket: ) - comma: ',' - log_level_equals: - keyword: LOG_LEVEL - comparison_operator: raw_comparison_operator: '=' - keyword: DEBUG - comma: ',' - trace_level_equals: - keyword: TRACE_LEVEL - comparison_operator: raw_comparison_operator: '=' - keyword: ON_EVENT - comma: ',' - external_access_integration_equals: keyword: SECRETS comparison_operator: raw_comparison_operator: '=' bracketed: start_bracket: ( quoted_literal: "'cred'" comparison_operator: raw_comparison_operator: '=' naked_identifier: oauth_token end_bracket: ) - comma: ',' - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'just a comment'" - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: example_function - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: SET - tag_equals: - keyword: TAG - tag_reference: naked_identifier: TAG1 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value1'" - comma: ',' - tag_reference: naked_identifier: TAG2 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value2'" - comma: ',' - tag_reference: naked_identifier: TAG3 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value3'" - comma: ',' - external_access_integration_equals: keyword: EXTERNAL_ACCESS_INTEGRATIONS comparison_operator: raw_comparison_operator: '=' bracketed: start_bracket: ( naked_identifier: my_external_access_integration end_bracket: ) - comma: ',' - log_level_equals: - keyword: LOG_LEVEL - comparison_operator: raw_comparison_operator: '=' - keyword: DEBUG - comma: ',' - trace_level_equals: - keyword: TRACE_LEVEL - comparison_operator: raw_comparison_operator: '=' - keyword: ON_EVENT - comma: ',' - external_access_integration_equals: keyword: SECRETS comparison_operator: raw_comparison_operator: '=' bracketed: start_bracket: ( quoted_literal: "'cred'" comparison_operator: raw_comparison_operator: '=' naked_identifier: oauth_token end_bracket: ) - comma: ',' - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'just a comment'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_masking_policy.sql000066400000000000000000000010231503426445100266730ustar00rootroot00000000000000ALTER MASKING POLICY IF EXISTS "a quoted policy name" RENAME TO a_sane_name; ALTER MASKING POLICY email_mask SET BODY -> CASE WHEN current_role() IN ('ANALYST') THEN VAL ELSE sha2(VAL, 512) END ; ALTER MASKING POLICY aggressively_mask_pii SET TAG environment = 'silver' , silo = 'sales'; ALTER MASKING POLICY IF EXISTS mask_pii_policy UNSET TAG environment, billing; ALTER MASKING POLICY db.sch.fully_redacted_policy SET COMMENT = 'A super strict policy'; ALTER MASKING POLICY IF EXISTS mask_pii_policy UNSET COMMENT; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_masking_policy.yml000066400000000000000000000076021503426445100267060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 56efd6624e93afedbae6b33354ea16ac3474278948be1b37a85ba6ee37af85f8 file: - statement: alter_masking_policy: - keyword: ALTER - keyword: MASKING - keyword: POLICY - keyword: IF - keyword: EXISTS - object_reference: quoted_identifier: '"a quoted policy name"' - keyword: RENAME - keyword: TO - object_reference: naked_identifier: a_sane_name - statement_terminator: ; - statement: alter_masking_policy: - keyword: ALTER - keyword: MASKING - keyword: POLICY - object_reference: naked_identifier: email_mask - keyword: SET - keyword: BODY - function_assigner: -> - expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: function: function_name: function_name_identifier: current_role function_contents: bracketed: start_bracket: ( end_bracket: ) keyword: IN bracketed: start_bracket: ( quoted_literal: "'ANALYST'" end_bracket: ) - keyword: THEN - expression: column_reference: naked_identifier: VAL - else_clause: keyword: ELSE expression: function: function_name: function_name_identifier: sha2 function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: VAL - comma: ',' - expression: numeric_literal: '512' - end_bracket: ) - keyword: END - statement_terminator: ; - statement: alter_masking_policy: - keyword: ALTER - keyword: MASKING - keyword: POLICY - object_reference: naked_identifier: aggressively_mask_pii - keyword: SET - tag_equals: - keyword: TAG - tag_reference: naked_identifier: environment - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'silver'" - comma: ',' - tag_reference: naked_identifier: silo - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'sales'" - statement_terminator: ; - statement: alter_masking_policy: - keyword: ALTER - keyword: MASKING - keyword: POLICY - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: mask_pii_policy - keyword: UNSET - keyword: TAG - tag_reference: naked_identifier: environment - comma: ',' - tag_reference: naked_identifier: billing - statement_terminator: ; - statement: alter_masking_policy: - keyword: ALTER - keyword: MASKING - keyword: POLICY - object_reference: - naked_identifier: db - dot: . - naked_identifier: sch - dot: . - naked_identifier: fully_redacted_policy - keyword: SET - keyword: COMMENT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'A super strict policy'" - statement_terminator: ; - statement: alter_masking_policy: - keyword: ALTER - keyword: MASKING - keyword: POLICY - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: mask_pii_policy - keyword: UNSET - keyword: COMMENT - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_materialized_view.sql000066400000000000000000000010361503426445100273730ustar00rootroot00000000000000alter materialized view table1_mv rename to my_mv; alter materialized view my_mv cluster by(i); alter materialized view my_mv suspend recluster; alter materialized view my_mv resume recluster; alter materialized view my_mv suspend; alter materialized view my_mv resume; alter materialized view my_mv drop clustering key; alter materialized view mv1 set secure; alter materialized view mv1 set comment = 'Sample view'; alter materialized view mv1 set tag my_tag = 'my tag'; alter materialized view mv1 unset tag my_tag = 'not my tag anymore'; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_materialized_view.yml000066400000000000000000000073321503426445100274020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: eb02d79aba938fead56e37e60e21c34daebb4c3f1ab0cb5a47eb78114ae536eb file: - statement: alter_materialized_view_statement: - keyword: alter - keyword: materialized - keyword: view - table_reference: naked_identifier: table1_mv - keyword: rename - keyword: to - table_reference: naked_identifier: my_mv - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: alter - keyword: materialized - keyword: view - table_reference: naked_identifier: my_mv - keyword: cluster - keyword: by - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: i end_bracket: ) - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: alter - keyword: materialized - keyword: view - table_reference: naked_identifier: my_mv - keyword: suspend - keyword: recluster - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: alter - keyword: materialized - keyword: view - table_reference: naked_identifier: my_mv - keyword: resume - keyword: recluster - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: alter - keyword: materialized - keyword: view - table_reference: naked_identifier: my_mv - keyword: suspend - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: alter - keyword: materialized - keyword: view - table_reference: naked_identifier: my_mv - keyword: resume - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: alter - keyword: materialized - keyword: view - table_reference: naked_identifier: my_mv - keyword: drop - keyword: clustering - keyword: key - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: alter - keyword: materialized - keyword: view - table_reference: naked_identifier: mv1 - keyword: set - keyword: secure - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: alter - keyword: materialized - keyword: view - table_reference: naked_identifier: mv1 - keyword: set - comment_equals_clause: keyword: comment comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Sample view'" - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: alter - keyword: materialized - keyword: view - table_reference: naked_identifier: mv1 - keyword: set - tag_equals: keyword: tag tag_reference: naked_identifier: my_tag comparison_operator: raw_comparison_operator: '=' quoted_literal: "'my tag'" - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: alter - keyword: materialized - keyword: view - table_reference: naked_identifier: mv1 - keyword: unset - tag_equals: keyword: tag tag_reference: naked_identifier: my_tag comparison_operator: raw_comparison_operator: '=' quoted_literal: "'not my tag anymore'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_network_policy.sql000066400000000000000000000007471503426445100267470ustar00rootroot00000000000000ALTER NETWORK POLICY mypolicy1 SET ALLOWED_IP_LIST=('192.168.1.0/24','192.168.255.100') BLOCKED_IP_LIST=('192.168.1.99'); ALTER NETWORK POLICY foo RENAME TO bar; ALTER NETWORK POLICY foo SET TAG admin.my_tag = 'foo'; ALTER NETWORK POLICY foo UNSET TAG admin.my_tag; -- ALTER NETWORK POLICY IF EXISTS foo UNSET COMMENT; ALTER NETWORK POLICY foo REMOVE ALLOWED_NETWORK_RULE_LIST='foo'; ALTER NETWORK POLICY foo ADD BLOCKED_NETWORK_RULE_LIST='bar'; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_network_policy.yml000066400000000000000000000055741503426445100267540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7896a836b627ffe251f7f06554f3b57976f63be10a1e0d50593a332cf412255d file: - statement: alter_network_policy_statement: - keyword: ALTER - keyword: NETWORK - keyword: POLICY - naked_identifier: mypolicy1 - keyword: SET - keyword: ALLOWED_IP_LIST - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'192.168.1.0/24'" - comma: ',' - quoted_literal: "'192.168.255.100'" - end_bracket: ) - keyword: BLOCKED_IP_LIST - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_literal: "'192.168.1.99'" end_bracket: ) - statement_terminator: ; - statement: alter_network_policy_statement: - keyword: ALTER - keyword: NETWORK - keyword: POLICY - naked_identifier: foo - keyword: RENAME - keyword: TO - naked_identifier: bar - statement_terminator: ; - statement: alter_network_policy_statement: - keyword: ALTER - keyword: NETWORK - keyword: POLICY - naked_identifier: foo - keyword: SET - tag_equals: keyword: TAG tag_reference: - naked_identifier: admin - dot: . - naked_identifier: my_tag comparison_operator: raw_comparison_operator: '=' quoted_literal: "'foo'" - statement_terminator: ; - statement: alter_network_policy_statement: - keyword: ALTER - keyword: NETWORK - keyword: POLICY - naked_identifier: foo - keyword: UNSET - keyword: TAG - tag_reference: - naked_identifier: admin - dot: . - naked_identifier: my_tag - statement_terminator: ; - statement: alter_network_policy_statement: - keyword: ALTER - keyword: NETWORK - keyword: POLICY - keyword: IF - keyword: EXISTS - naked_identifier: foo - keyword: UNSET - keyword: COMMENT - statement_terminator: ; - statement: alter_network_policy_statement: - keyword: ALTER - keyword: NETWORK - keyword: POLICY - naked_identifier: foo - keyword: REMOVE - keyword: ALLOWED_NETWORK_RULE_LIST - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foo'" - statement_terminator: ; - statement: alter_network_policy_statement: - keyword: ALTER - keyword: NETWORK - keyword: POLICY - naked_identifier: foo - keyword: ADD - keyword: BLOCKED_NETWORK_RULE_LIST - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'bar'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_password_policy.sql000066400000000000000000000006541503426445100271150ustar00rootroot00000000000000ALTER PASSWORD POLICY password_policy_prod_1 SET PASSWORD_MAX_RETRIES = 3; ALTER PASSWORD POLICY IF EXISTS password_policy_prod_1 RENAME TO password_policy_prod_2; ALTER PASSWORD POLICY IF EXISTS password_policy_prod_1 UNSET TAG foo; ALTER PASSWORD POLICY IF EXISTS password_policy_prod_1 UNSET PASSWORD_MIN_UPPER_CASE_CHARS PASSWORD_MAX_AGE_DAYS; ALTER PASSWORD POLICY IF EXISTS password_policy_prod_1 SET TAG foo = 'bar'; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_password_policy.yml000066400000000000000000000045311503426445100271150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 02fe159dc70f164ffe89d02cb52b3cd9705ae1e3fc7db77e3e71c0014795bc29 file: - statement: alter_password_policy_statement: - keyword: ALTER - keyword: PASSWORD - keyword: POLICY - password_policy_reference: naked_identifier: password_policy_prod_1 - keyword: SET - password_policy_options: keyword: PASSWORD_MAX_RETRIES comparison_operator: raw_comparison_operator: '=' numeric_literal: '3' - statement_terminator: ; - statement: alter_password_policy_statement: - keyword: ALTER - keyword: PASSWORD - keyword: POLICY - keyword: IF - keyword: EXISTS - password_policy_reference: naked_identifier: password_policy_prod_1 - keyword: RENAME - keyword: TO - password_policy_reference: naked_identifier: password_policy_prod_2 - statement_terminator: ; - statement: alter_password_policy_statement: - keyword: ALTER - keyword: PASSWORD - keyword: POLICY - keyword: IF - keyword: EXISTS - password_policy_reference: naked_identifier: password_policy_prod_1 - keyword: UNSET - keyword: TAG - tag_reference: naked_identifier: foo - statement_terminator: ; - statement: alter_password_policy_statement: - keyword: ALTER - keyword: PASSWORD - keyword: POLICY - keyword: IF - keyword: EXISTS - password_policy_reference: naked_identifier: password_policy_prod_1 - keyword: UNSET - keyword: PASSWORD_MIN_UPPER_CASE_CHARS - keyword: PASSWORD_MAX_AGE_DAYS - statement_terminator: ; - statement: alter_password_policy_statement: - keyword: ALTER - keyword: PASSWORD - keyword: POLICY - keyword: IF - keyword: EXISTS - password_policy_reference: naked_identifier: password_policy_prod_1 - keyword: SET - tag_equals: keyword: TAG tag_reference: naked_identifier: foo comparison_operator: raw_comparison_operator: '=' quoted_literal: "'bar'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_pipe.sql000066400000000000000000000011001503426445100246140ustar00rootroot00000000000000alter pipe mypipe refresh prefix = 'd1/'; alter pipe mypipe refresh prefix = 'd1/' modified_after = '2018-07-30T13:56:46-07:00'; alter pipe if exists mypipe refresh; alter pipe mypipe set comment = 'Pipe for North American sales data'; alter pipe mypipe set pipe_execution_paused = true comment = 'Pipe for North American sales data'; alter pipe mypipe set tag tag1 = 'value1', tag2 = 'value2'; alter pipe mypipe unset pipe_execution_paused; alter pipe mypipe unset comment; alter pipe mypipe unset tag foo, bar; ALTER PIPE mypipe SET ERROR_INTEGRATION = my_notification_int; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_pipe.yml000066400000000000000000000074401503426445100246330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bb6a849597e4a97cb75e0fd397aeece8f8499e4ae0bf12ba022965fd5d61118c file: - statement: alter_pipe_segment: - keyword: alter - keyword: pipe - object_reference: naked_identifier: mypipe - keyword: refresh - keyword: prefix - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'d1/'" - statement_terminator: ; - statement: alter_pipe_segment: - keyword: alter - keyword: pipe - object_reference: naked_identifier: mypipe - keyword: refresh - keyword: prefix - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'d1/'" - keyword: modified_after - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'2018-07-30T13:56:46-07:00'" - statement_terminator: ; - statement: alter_pipe_segment: - keyword: alter - keyword: pipe - keyword: if - keyword: exists - object_reference: naked_identifier: mypipe - keyword: refresh - statement_terminator: ; - statement: alter_pipe_segment: - keyword: alter - keyword: pipe - object_reference: naked_identifier: mypipe - keyword: set - comment_equals_clause: keyword: comment comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Pipe for North American sales data'" - statement_terminator: ; - statement: alter_pipe_segment: - keyword: alter - keyword: pipe - object_reference: naked_identifier: mypipe - keyword: set - keyword: pipe_execution_paused - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - comment_equals_clause: keyword: comment comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Pipe for North American sales data'" - statement_terminator: ; - statement: alter_pipe_segment: - keyword: alter - keyword: pipe - object_reference: naked_identifier: mypipe - keyword: set - tag_equals: - keyword: tag - tag_reference: naked_identifier: tag1 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value1'" - comma: ',' - tag_reference: naked_identifier: tag2 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value2'" - statement_terminator: ; - statement: alter_pipe_segment: - keyword: alter - keyword: pipe - object_reference: naked_identifier: mypipe - keyword: unset - keyword: pipe_execution_paused - statement_terminator: ; - statement: alter_pipe_segment: - keyword: alter - keyword: pipe - object_reference: naked_identifier: mypipe - keyword: unset - keyword: comment - statement_terminator: ; - statement: alter_pipe_segment: - keyword: alter - keyword: pipe - object_reference: naked_identifier: mypipe - keyword: unset - keyword: tag - tag_reference: naked_identifier: foo - comma: ',' - tag_reference: naked_identifier: bar - statement_terminator: ; - statement: alter_pipe_segment: - keyword: ALTER - keyword: PIPE - object_reference: naked_identifier: mypipe - keyword: SET - keyword: ERROR_INTEGRATION - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: my_notification_int - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_procedure.sql000066400000000000000000000017501503426445100256620ustar00rootroot00000000000000ALTER PROCEDURE IF EXISTS procedure1(FLOAT) RENAME TO procedure2; ALTER PROCEDURE IF EXISTS procedure1(FLOAT) EXECUTE AS CALLER; ALTER PROCEDURE IF EXISTS procedure1(FLOAT) EXECUTE AS OWNER; ALTER PROCEDURE procedure1(FLOAT_PARAM1 FLOAT) SET COMMENT = 'a_comment'; ALTER PROCEDURE procedure1(FLOAT_PARAM1 FLOAT) SET TAG TAG1 = 'value1', TAG2 = 'value2', TAG3 = 'value3'; ALTER PROCEDURE procedure1() UNSET COMMENT; ALTER PROCEDURE procedure1() UNSET TAG TAG1, TAG2, TAG3; ALTER PROCEDURE procedure1(varchar) SET LOG_LEVEL = WARN ; ALTER PROCEDURE procedure1(varchar) SET TRACE_LEVEL = ALWAYS ; ALTER PROCEDURE mirror.procedure1(varchar) SET LOG_LEVEL = WARN, TRACE_LEVEL = ON_EVENT ; ALTER PROCEDURE example_procedure() SET EXTERNAL_ACCESS_INTEGRATIONS = (my_external_access_integration), LOG_LEVEL = DEBUG, TRACE_LEVEL = ON_EVENT, SECRETS = ('cred' = oauth_token) ; ALTER PROCEDURE example_procedure() SET EXTERNAL_ACCESS_INTEGRATIONS = (my_external_access_integration) ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_procedure.yml000066400000000000000000000171201503426445100256620ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 46a186355de79a0f247fcd654c90e2060a3d26fde5d13860a12a10a3c2980dd2 file: - statement: alter_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - keyword: IF - keyword: EXISTS - function_name: function_name_identifier: procedure1 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: FLOAT end_bracket: ) - keyword: RENAME - keyword: TO - function_name: function_name_identifier: procedure2 - statement_terminator: ; - statement: alter_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - keyword: IF - keyword: EXISTS - function_name: function_name_identifier: procedure1 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: FLOAT end_bracket: ) - keyword: EXECUTE - keyword: AS - keyword: CALLER - statement_terminator: ; - statement: alter_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - keyword: IF - keyword: EXISTS - function_name: function_name_identifier: procedure1 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: FLOAT end_bracket: ) - keyword: EXECUTE - keyword: AS - keyword: OWNER - statement_terminator: ; - statement: alter_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - function_name: function_name_identifier: procedure1 - function_parameter_list: bracketed: start_bracket: ( parameter: FLOAT_PARAM1 data_type: data_type_identifier: FLOAT end_bracket: ) - keyword: SET - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'a_comment'" - statement_terminator: ; - statement: alter_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - function_name: function_name_identifier: procedure1 - function_parameter_list: bracketed: start_bracket: ( parameter: FLOAT_PARAM1 data_type: data_type_identifier: FLOAT end_bracket: ) - keyword: SET - tag_equals: - keyword: TAG - tag_reference: naked_identifier: TAG1 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value1'" - comma: ',' - tag_reference: naked_identifier: TAG2 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value2'" - comma: ',' - tag_reference: naked_identifier: TAG3 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value3'" - statement_terminator: ; - statement: alter_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - function_name: function_name_identifier: procedure1 - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: UNSET - keyword: COMMENT - statement_terminator: ; - statement: alter_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - function_name: function_name_identifier: procedure1 - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: UNSET - keyword: TAG - tag_reference: naked_identifier: TAG1 - comma: ',' - tag_reference: naked_identifier: TAG2 - comma: ',' - tag_reference: naked_identifier: TAG3 - statement_terminator: ; - statement: alter_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - function_name: function_name_identifier: procedure1 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: varchar end_bracket: ) - keyword: SET - log_level_equals: - keyword: LOG_LEVEL - comparison_operator: raw_comparison_operator: '=' - keyword: WARN - statement_terminator: ; - statement: alter_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - function_name: function_name_identifier: procedure1 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: varchar end_bracket: ) - keyword: SET - trace_level_equals: - keyword: TRACE_LEVEL - comparison_operator: raw_comparison_operator: '=' - keyword: ALWAYS - statement_terminator: ; - statement: alter_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - function_name: naked_identifier: mirror dot: . function_name_identifier: procedure1 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: varchar end_bracket: ) - keyword: SET - log_level_equals: - keyword: LOG_LEVEL - comparison_operator: raw_comparison_operator: '=' - keyword: WARN - comma: ',' - trace_level_equals: - keyword: TRACE_LEVEL - comparison_operator: raw_comparison_operator: '=' - keyword: ON_EVENT - statement_terminator: ; - statement: alter_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - function_name: function_name_identifier: example_procedure - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: SET - external_access_integration_equals: keyword: EXTERNAL_ACCESS_INTEGRATIONS comparison_operator: raw_comparison_operator: '=' bracketed: start_bracket: ( naked_identifier: my_external_access_integration end_bracket: ) - comma: ',' - log_level_equals: - keyword: LOG_LEVEL - comparison_operator: raw_comparison_operator: '=' - keyword: DEBUG - comma: ',' - trace_level_equals: - keyword: TRACE_LEVEL - comparison_operator: raw_comparison_operator: '=' - keyword: ON_EVENT - comma: ',' - external_access_integration_equals: keyword: SECRETS comparison_operator: raw_comparison_operator: '=' bracketed: start_bracket: ( quoted_literal: "'cred'" comparison_operator: raw_comparison_operator: '=' naked_identifier: oauth_token end_bracket: ) - statement_terminator: ; - statement: alter_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - function_name: function_name_identifier: example_procedure - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: SET - external_access_integration_equals: keyword: EXTERNAL_ACCESS_INTEGRATIONS comparison_operator: raw_comparison_operator: '=' bracketed: start_bracket: ( naked_identifier: my_external_access_integration end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_resource_monitor.sql000066400000000000000000000006651503426445100272740ustar00rootroot00000000000000alter resource monitor limiter set credit_quota=2000 notify_users = (jdoe, "jane smith", "john doe") FREQUENCY=DAILY start_timestamp = immediately end_timestamp = '2038-01-19 03:14:07' triggers on 80 percent do notify on 100 percent do suspend_immediate ; ALTER RESOURCE MONITOR limiter SET CREDIT_QUOTA=2000 TRIGGERS ON 80 PERCENT DO NOTIFY ON 100 PERCENT DO SUSPEND_IMMEDIATE ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_resource_monitor.yml000066400000000000000000000047511503426445100272760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f22a97807aed43287718615f1553e6c3190fe12a2182f1c12756d7e9b47ca5eb file: - statement: alter_resource_monitor_statement: - keyword: alter - keyword: resource - keyword: monitor - object_reference: naked_identifier: limiter - keyword: set - resource_monitor_options: - keyword: credit_quota - comparison_operator: raw_comparison_operator: '=' - integer_literal: '2000' - keyword: notify_users - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - object_reference: naked_identifier: jdoe - comma: ',' - object_reference: quoted_identifier: '"jane smith"' - comma: ',' - object_reference: quoted_identifier: '"john doe"' - end_bracket: ) - keyword: FREQUENCY - comparison_operator: raw_comparison_operator: '=' - keyword: DAILY - keyword: start_timestamp - comparison_operator: raw_comparison_operator: '=' - keyword: immediately - keyword: end_timestamp - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'2038-01-19 03:14:07'" - keyword: triggers - keyword: 'on' - integer_literal: '80' - keyword: percent - keyword: do - keyword: notify - keyword: 'on' - integer_literal: '100' - keyword: percent - keyword: do - keyword: suspend_immediate - statement_terminator: ; - statement: alter_resource_monitor_statement: - keyword: ALTER - keyword: RESOURCE - keyword: MONITOR - object_reference: naked_identifier: limiter - keyword: SET - resource_monitor_options: - keyword: CREDIT_QUOTA - comparison_operator: raw_comparison_operator: '=' - integer_literal: '2000' - keyword: TRIGGERS - keyword: 'ON' - integer_literal: '80' - keyword: PERCENT - keyword: DO - keyword: NOTIFY - keyword: 'ON' - integer_literal: '100' - keyword: PERCENT - keyword: DO - keyword: SUSPEND_IMMEDIATE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_role.sql000066400000000000000000000010641503426445100246310ustar00rootroot00000000000000ALTER ROLE IF EXISTS "test_role" RENAME TO "prod_role"; ALTER ROLE "test_role" RENAME TO "prod_role"; ALTER ROLE IF EXISTS "test_role" SET COMMENT = 'test_comment'; ALTER ROLE IF EXISTS "test_role" UNSET COMMENT; ALTER ROLE "test_role" SET COMMENT = 'test_comment'; ALTER ROLE "test_role" UNSET COMMENT; ALTER ROLE IF EXISTS "test_role" SET TAG TAG1 = 'value1'; ALTER ROLE IF EXISTS "test_role" SET TAG TAG1 = 'value1', TAG1 = 'value2', TAG1 = 'value3'; ALTER ROLE IF EXISTS "test_role" UNSET TAG TAG1; ALTER ROLE IF EXISTS "test_role" UNSET TAG TAG1, TAG2, TAG3; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_role.yml000066400000000000000000000101501503426445100246270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 087a0e455d9e14d4bae84ed573b38e4fcc411914f68a9ed336429258131050a7 file: - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - keyword: IF - keyword: EXISTS - role_reference: quoted_identifier: '"test_role"' - keyword: RENAME - keyword: TO - role_reference: quoted_identifier: '"prod_role"' - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: quoted_identifier: '"test_role"' - keyword: RENAME - keyword: TO - role_reference: quoted_identifier: '"prod_role"' - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - keyword: IF - keyword: EXISTS - role_reference: quoted_identifier: '"test_role"' - keyword: SET - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'test_comment'" - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - keyword: IF - keyword: EXISTS - role_reference: quoted_identifier: '"test_role"' - keyword: UNSET - role_reference: naked_identifier: COMMENT - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: quoted_identifier: '"test_role"' - keyword: SET - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'test_comment'" - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: quoted_identifier: '"test_role"' - keyword: UNSET - role_reference: naked_identifier: COMMENT - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - keyword: IF - keyword: EXISTS - role_reference: quoted_identifier: '"test_role"' - keyword: SET - tag_equals: keyword: TAG tag_reference: naked_identifier: TAG1 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'value1'" - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - keyword: IF - keyword: EXISTS - role_reference: quoted_identifier: '"test_role"' - keyword: SET - tag_equals: - keyword: TAG - tag_reference: naked_identifier: TAG1 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value1'" - comma: ',' - tag_reference: naked_identifier: TAG1 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value2'" - comma: ',' - tag_reference: naked_identifier: TAG1 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value3'" - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - keyword: IF - keyword: EXISTS - role_reference: quoted_identifier: '"test_role"' - keyword: UNSET - keyword: TAG - tag_reference: naked_identifier: TAG1 - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - keyword: IF - keyword: EXISTS - role_reference: quoted_identifier: '"test_role"' - keyword: UNSET - keyword: TAG - tag_reference: naked_identifier: TAG1 - comma: ',' - tag_reference: naked_identifier: TAG2 - comma: ',' - tag_reference: naked_identifier: TAG3 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_row_access_policy.sql000066400000000000000000000011231503426445100273730ustar00rootroot00000000000000ALTER ROW ACCESS POLICY my_access_policy RENAME TO other_access_policy; ALTER ROW ACCESS POLICY IF EXISTS my_access_policy RENAME TO other_access_policy; ALTER ROW ACCESS POLICY my_access_policy SET BODY -> EXISTS(some_val); ALTER ROW ACCESS POLICY my_access_policy SET TAG tag_name = 'tag_value'; ALTER ROW ACCESS POLICY my_access_policy SET TAG tag_name = 'tag_value', tag_name = 'tag_value'; ALTER ROW ACCESS POLICY my_access_policy UNSET TAG tag_name, tag_name; ALTER ROW ACCESS POLICY my_access_policy SET COMMENT = 'comment'; ALTER ROW ACCESS POLICY my_access_policy UNSET COMMENT; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_row_access_policy.yml000066400000000000000000000074261503426445100274110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f16a944d10a274988470446b5648900a38d181e792637c1c14f7f113a07ad575 file: - statement: alter_row_access_policy_statement: - keyword: ALTER - keyword: ROW - keyword: ACCESS - keyword: POLICY - object_reference: naked_identifier: my_access_policy - keyword: RENAME - keyword: TO - object_reference: naked_identifier: other_access_policy - statement_terminator: ; - statement: alter_row_access_policy_statement: - keyword: ALTER - keyword: ROW - keyword: ACCESS - keyword: POLICY - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: my_access_policy - keyword: RENAME - keyword: TO - object_reference: naked_identifier: other_access_policy - statement_terminator: ; - statement: alter_row_access_policy_statement: - keyword: ALTER - keyword: ROW - keyword: ACCESS - keyword: POLICY - object_reference: naked_identifier: my_access_policy - keyword: SET - keyword: BODY - function_assigner: -> - expression: function: function_name: function_name_identifier: EXISTS function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: some_val end_bracket: ) - statement_terminator: ; - statement: alter_row_access_policy_statement: - keyword: ALTER - keyword: ROW - keyword: ACCESS - keyword: POLICY - object_reference: naked_identifier: my_access_policy - keyword: SET - tag_equals: keyword: TAG tag_reference: naked_identifier: tag_name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'tag_value'" - statement_terminator: ; - statement: alter_row_access_policy_statement: - keyword: ALTER - keyword: ROW - keyword: ACCESS - keyword: POLICY - object_reference: naked_identifier: my_access_policy - keyword: SET - tag_equals: - keyword: TAG - tag_reference: naked_identifier: tag_name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'tag_value'" - comma: ',' - tag_reference: naked_identifier: tag_name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'tag_value'" - statement_terminator: ; - statement: alter_row_access_policy_statement: - keyword: ALTER - keyword: ROW - keyword: ACCESS - keyword: POLICY - object_reference: naked_identifier: my_access_policy - keyword: UNSET - keyword: TAG - tag_reference: naked_identifier: tag_name - comma: ',' - tag_reference: naked_identifier: tag_name - statement_terminator: ; - statement: alter_row_access_policy_statement: - keyword: ALTER - keyword: ROW - keyword: ACCESS - keyword: POLICY - object_reference: naked_identifier: my_access_policy - keyword: SET - keyword: COMMENT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'comment'" - statement_terminator: ; - statement: alter_row_access_policy_statement: - keyword: ALTER - keyword: ROW - keyword: ACCESS - keyword: POLICY - object_reference: naked_identifier: my_access_policy - keyword: UNSET - keyword: COMMENT - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_schema.sql000066400000000000000000000006711503426445100251330ustar00rootroot00000000000000alter schema if exists schema1 rename to schema2; alter schema schema1 swap with schema2; alter schema schema2 enable managed access; alter schema schema1 set data_retention_time_in_days = 3; alter schema schema1 set tag tag1 = 'value1', tag2 = 'value2'; alter schema schema1 unset data_retention_time_in_days; alter schema schema1 unset data_retention_time_in_days, max_data_extension_time_in_days; alter schema schema1 unset tag foo, bar; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_schema.yml000066400000000000000000000056441503426445100251420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0c7c8f3b3a77221c8a7a0cb73d92ad964212627ef575955d5952270448f9e589 file: - statement: alter_schema_statement: - keyword: alter - keyword: schema - keyword: if - keyword: exists - schema_reference: naked_identifier: schema1 - keyword: rename - keyword: to - schema_reference: naked_identifier: schema2 - statement_terminator: ; - statement: alter_schema_statement: - keyword: alter - keyword: schema - schema_reference: naked_identifier: schema1 - keyword: swap - keyword: with - schema_reference: naked_identifier: schema2 - statement_terminator: ; - statement: alter_schema_statement: - keyword: alter - keyword: schema - schema_reference: naked_identifier: schema2 - keyword: enable - keyword: managed - keyword: access - statement_terminator: ; - statement: alter_schema_statement: - keyword: alter - keyword: schema - schema_reference: naked_identifier: schema1 - keyword: set - schema_object_properties: keyword: data_retention_time_in_days comparison_operator: raw_comparison_operator: '=' numeric_literal: '3' - statement_terminator: ; - statement: alter_schema_statement: - keyword: alter - keyword: schema - schema_reference: naked_identifier: schema1 - keyword: set - tag_equals: - keyword: tag - tag_reference: naked_identifier: tag1 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value1'" - comma: ',' - tag_reference: naked_identifier: tag2 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value2'" - statement_terminator: ; - statement: alter_schema_statement: - keyword: alter - keyword: schema - schema_reference: naked_identifier: schema1 - keyword: unset - keyword: data_retention_time_in_days - statement_terminator: ; - statement: alter_schema_statement: - keyword: alter - keyword: schema - schema_reference: naked_identifier: schema1 - keyword: unset - keyword: data_retention_time_in_days - comma: ',' - keyword: max_data_extension_time_in_days - statement_terminator: ; - statement: alter_schema_statement: - keyword: alter - keyword: schema - schema_reference: naked_identifier: schema1 - keyword: unset - keyword: tag - tag_reference: naked_identifier: foo - comma: ',' - tag_reference: naked_identifier: bar - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_sequence.sql000066400000000000000000000007131503426445100255000ustar00rootroot00000000000000--ALTER SEQUENCE IF EXISTS seq RENAME TO seq2; ALTER SEQUENCE seq RENAME TO seq2; ALTER SEQUENCE seq SET INCREMENT BY = 2; ALTER SEQUENCE seq INCREMENT BY = 2; ALTER SEQUENCE seq INCREMENT = 2; ALTER SEQUENCE seq INCREMENT 2; ALTER SEQUENCE seq SET ORDER COMMENT = 'comment'; ALTER SEQUENCE seq SET NOORDER COMMENT = 'comment'; ALTER SEQUENCE seq UNSET COMMENT; ALTER SEQUENCE seq SET INCREMENT BY = 2 ORDER COMMENT = 'comment'; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_sequence.yml000066400000000000000000000062621503426445100255070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b36f427279f544c665da0a8d52bf8ad5ec94768f3936e445a1ffb9f460fa6792 file: - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: seq - keyword: RENAME - keyword: TO - sequence_reference: naked_identifier: seq2 - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: seq - keyword: SET - keyword: INCREMENT - keyword: BY - comparison_operator: raw_comparison_operator: '=' - integer_literal: '2' - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: seq - keyword: INCREMENT - keyword: BY - comparison_operator: raw_comparison_operator: '=' - integer_literal: '2' - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: seq - keyword: INCREMENT - comparison_operator: raw_comparison_operator: '=' - integer_literal: '2' - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: seq - keyword: INCREMENT - integer_literal: '2' - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: seq - keyword: SET - keyword: ORDER - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'comment'" - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: seq - keyword: SET - keyword: NOORDER - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'comment'" - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: seq - keyword: UNSET - keyword: COMMENT - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: seq - keyword: SET - keyword: INCREMENT - keyword: BY - comparison_operator: raw_comparison_operator: '=' - integer_literal: '2' - keyword: ORDER - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'comment'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_session_set_timezone.sql000066400000000000000000000000431503426445100301340ustar00rootroot00000000000000ALTER SESSION SET TIMEZONE = 'UTC' sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_session_set_timezone.yml000066400000000000000000000012301503426445100301350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e44effbdc1477b40271550d6dd33f2246be46bf0e5c50e56c34dab430b4eccd3 file: statement: alter_session_statement: - keyword: ALTER - keyword: SESSION - alter_session_set_statement: keyword: SET parameter: TIMEZONE comparison_operator: raw_comparison_operator: '=' quoted_literal: "'UTC'" sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_session_unset_parameters.sql000066400000000000000000000001011503426445100310030ustar00rootroot00000000000000ALTER SESSION UNSET TIME_OUTPUT_FORMAT, TWO_DIGIT_CENTURY_START; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_session_unset_parameters.yml000066400000000000000000000012271503426445100310170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 11408034dc3c2bf4379085bffc0c58769e8760046792bbb44be74840155d483a file: statement: alter_session_statement: - keyword: ALTER - keyword: SESSION - alter_session_unset_clause: - keyword: UNSET - parameter: TIME_OUTPUT_FORMAT - comma: ',' - parameter: TWO_DIGIT_CENTURY_START statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_share.sql000066400000000000000000000016361503426445100247770ustar00rootroot00000000000000ALTER SHARE MY_SHARE ADD ACCOUNTS = my_account_1; ALTER SHARE IF EXISTS MY_SHARE ADD ACCOUNTS = my_account_1; ALTER SHARE MY_SHARE REMOVE ACCOUNTS = my_account_1; ALTER SHARE MY_SHARE ADD ACCOUNTS = my_account_1, my_account_2; ALTER SHARE MY_SHARE ADD ACCOUNTS = my_account_1, my_account_2, my_account_3; ALTER SHARE MY_SHARE SET TAG tag1 = 'value1'; ALTER SHARE IF EXISTS MY_SHARE SET TAG tag1 = 'value1', tag2 = 'value2'; ALTER SHARE MY_SHARE UNSET TAG tag1; ALTER SHARE MY_SHARE UNSET TAG tag1, tag2; ALTER SHARE MY_SHARE UNSET COMMENT; ALTER SHARE MY_SHARE ADD ACCOUNTS = my_account_1 SHARE_RESTRICTIONS = TRUE; ALTER SHARE MY_SHARE ADD ACCOUNTS = my_account_1, my_account_2 SHARE_RESTRICTIONS = FALSE; ALTER SHARE MY_SHARE SET ACCOUNTS = my_account_1 COMMENT = 'my_comment'; ALTER SHARE IF EXISTS MY_SHARE SET ACCOUNTS = my_account_1, my_account_2 COMMENT = 'my_comment'; ALTER SHARE my_share ADD ACCOUNTS = org.account; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_share.yml000066400000000000000000000141641503426445100250010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8d2c7ce2989b3d13f324fc71c0d0042f957442a4696b7eef728f14eacbd28526 file: - statement: alter_share_statement: - keyword: ALTER - keyword: SHARE - naked_identifier: MY_SHARE - keyword: ADD - keyword: ACCOUNTS - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: my_account_1 - statement_terminator: ; - statement: alter_share_statement: - keyword: ALTER - keyword: SHARE - keyword: IF - keyword: EXISTS - naked_identifier: MY_SHARE - keyword: ADD - keyword: ACCOUNTS - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: my_account_1 - statement_terminator: ; - statement: alter_share_statement: - keyword: ALTER - keyword: SHARE - naked_identifier: MY_SHARE - keyword: REMOVE - keyword: ACCOUNTS - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: my_account_1 - statement_terminator: ; - statement: alter_share_statement: - keyword: ALTER - keyword: SHARE - naked_identifier: MY_SHARE - keyword: ADD - keyword: ACCOUNTS - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: my_account_1 - comma: ',' - object_reference: naked_identifier: my_account_2 - statement_terminator: ; - statement: alter_share_statement: - keyword: ALTER - keyword: SHARE - naked_identifier: MY_SHARE - keyword: ADD - keyword: ACCOUNTS - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: my_account_1 - comma: ',' - object_reference: naked_identifier: my_account_2 - comma: ',' - object_reference: naked_identifier: my_account_3 - statement_terminator: ; - statement: alter_share_statement: - keyword: ALTER - keyword: SHARE - naked_identifier: MY_SHARE - keyword: SET - tag_equals: keyword: TAG tag_reference: naked_identifier: tag1 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'value1'" - statement_terminator: ; - statement: alter_share_statement: - keyword: ALTER - keyword: SHARE - keyword: IF - keyword: EXISTS - naked_identifier: MY_SHARE - keyword: SET - tag_equals: - keyword: TAG - tag_reference: naked_identifier: tag1 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value1'" - comma: ',' - tag_reference: naked_identifier: tag2 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value2'" - statement_terminator: ; - statement: alter_share_statement: - keyword: ALTER - keyword: SHARE - naked_identifier: MY_SHARE - keyword: UNSET - keyword: TAG - tag_reference: naked_identifier: tag1 - statement_terminator: ; - statement: alter_share_statement: - keyword: ALTER - keyword: SHARE - naked_identifier: MY_SHARE - keyword: UNSET - keyword: TAG - tag_reference: naked_identifier: tag1 - comma: ',' - tag_reference: naked_identifier: tag2 - statement_terminator: ; - statement: alter_share_statement: - keyword: ALTER - keyword: SHARE - naked_identifier: MY_SHARE - keyword: UNSET - keyword: COMMENT - statement_terminator: ; - statement: alter_share_statement: - keyword: ALTER - keyword: SHARE - naked_identifier: MY_SHARE - keyword: ADD - keyword: ACCOUNTS - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: my_account_1 - keyword: SHARE_RESTRICTIONS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - statement_terminator: ; - statement: alter_share_statement: - keyword: ALTER - keyword: SHARE - naked_identifier: MY_SHARE - keyword: ADD - keyword: ACCOUNTS - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: my_account_1 - comma: ',' - object_reference: naked_identifier: my_account_2 - keyword: SHARE_RESTRICTIONS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - statement_terminator: ; - statement: alter_share_statement: - keyword: ALTER - keyword: SHARE - naked_identifier: MY_SHARE - keyword: SET - keyword: ACCOUNTS - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: my_account_1 - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'my_comment'" - statement_terminator: ; - statement: alter_share_statement: - keyword: ALTER - keyword: SHARE - keyword: IF - keyword: EXISTS - naked_identifier: MY_SHARE - keyword: SET - keyword: ACCOUNTS - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: my_account_1 - comma: ',' - object_reference: naked_identifier: my_account_2 - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'my_comment'" - statement_terminator: ; - statement: alter_share_statement: - keyword: ALTER - keyword: SHARE - naked_identifier: my_share - keyword: ADD - keyword: ACCOUNTS - comparison_operator: raw_comparison_operator: '=' - object_reference: - naked_identifier: org - dot: . - naked_identifier: account - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_stage.sql000066400000000000000000000007031503426445100247720ustar00rootroot00000000000000ALTER STAGE my_int_stage RENAME TO new_int_stage; ALTER STAGE my_ext_stage SET URL='s3://loading/files/new/' COPY_OPTIONS = (ON_ERROR='skip_file'); ALTER STAGE my_ext_stage SET STORAGE_INTEGRATION = myint; ALTER STAGE my_ext_stage SET CREDENTIALS=(AWS_KEY_ID='d4c3b2a1' AWS_SECRET_KEY='z9y8x7w6'); ALTER STAGE my_ext_stage3 SET ENCRYPTION=(TYPE='AWS_SSE_S3'); ALTER STAGE mystage REFRESH; ALTER STAGE mystage REFRESH SUBPATH = 'data'; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_stage.yml000066400000000000000000000064711503426445100250040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 77bfa50c6c13db3e9b515cec5d088d3c4590e1fbddbf72eb2aa9ffc6c7c1c73a file: - statement: alter_stage_statement: - keyword: ALTER - keyword: STAGE - object_reference: naked_identifier: my_int_stage - keyword: RENAME - keyword: TO - object_reference: naked_identifier: new_int_stage - statement_terminator: ; - statement: alter_stage_statement: - keyword: ALTER - keyword: STAGE - object_reference: naked_identifier: my_ext_stage - keyword: SET - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'s3://loading/files/new/'" - keyword: COPY_OPTIONS - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( copy_options: keyword: ON_ERROR comparison_operator: raw_comparison_operator: '=' copy_on_error_option: "'skip_file'" end_bracket: ) - statement_terminator: ; - statement: alter_stage_statement: - keyword: ALTER - keyword: STAGE - object_reference: naked_identifier: my_ext_stage - keyword: SET - stage_parameters: keyword: STORAGE_INTEGRATION comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: myint - statement_terminator: ; - statement: alter_stage_statement: - keyword: ALTER - keyword: STAGE - object_reference: naked_identifier: my_ext_stage - keyword: SET - stage_parameters: keyword: CREDENTIALS comparison_operator: raw_comparison_operator: '=' bracketed: - start_bracket: ( - keyword: AWS_KEY_ID - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'d4c3b2a1'" - keyword: AWS_SECRET_KEY - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'z9y8x7w6'" - end_bracket: ) - statement_terminator: ; - statement: alter_stage_statement: - keyword: ALTER - keyword: STAGE - object_reference: naked_identifier: my_ext_stage3 - keyword: SET - stage_parameters: keyword: ENCRYPTION comparison_operator: raw_comparison_operator: '=' bracketed: start_bracket: ( keyword: TYPE comparison_operator: raw_comparison_operator: '=' stage_encryption_option: "'AWS_SSE_S3'" end_bracket: ) - statement_terminator: ; - statement: alter_stage_statement: - keyword: ALTER - keyword: STAGE - object_reference: naked_identifier: mystage - keyword: REFRESH - statement_terminator: ; - statement: alter_stage_statement: - keyword: ALTER - keyword: STAGE - object_reference: naked_identifier: mystage - keyword: REFRESH - keyword: SUBPATH - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'data'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_storage_integration.sql000066400000000000000000000064201503426445100277400ustar00rootroot00000000000000alter storage integration test_integration set tag tag1 = 'value1'; alter storage integration test_integration set tag tag1 = 'value1', tag2 = 'value2'; alter storage integration test_integration set comment = 'test comment'; alter storage integration test_integration unset comment; alter storage integration test_integration unset tag tag1, tag2; alter storage integration if exists test_integration unset tag tag1, tag2; alter storage integration test_integration unset enabled; alter storage integration test_integration unset comment; alter storage integration test_integration unset storage_blocked_locations; alter storage integration test_integration set enabled = true; alter storage integration test_integration set enabled = false comment = 'test comment'; alter storage integration test_integration set comment = 'test comment' enabled = false; alter storage integration test_integration set storage_aws_role_arn = 'test_role_arn'; alter storage integration test_integration set storage_aws_object_acl = 'test_object_acl'; alter storage integration test_integration set azure_tenant_id = 'test_azure_tenant_id'; alter storage integration s3_int set storage_aws_role_arn = 'arn:aws:iam::001234567890:role/myrole' enabled = true storage_allowed_locations = ( 's3://mybucket1', 's3://mybucket2/' ); alter storage integration gcs_int set enabled = true storage_allowed_locations = ( 'gcs://mybucket1/path1/', 'gcs://mybucket2/path2/' ); alter storage integration azure_int set enabled = true azure_tenant_id = 'a123b4c5-1234-123a-a12b-1a23b45678c9' storage_allowed_locations = ( 'azure://myaccount.blob.core.windows.net/mycontainer/path1/', 'azure://myaccount.blob.core.windows.net/mycontainer/path2/' ); alter storage integration s3_int set storage_aws_role_arn = 'arn:aws:iam::001234567890:role/myrole' enabled = true storage_allowed_locations = ('*') storage_blocked_locations = ( 's3://mybucket3/path3/', 's3://mybucket4/path4/' ); alter storage integration gcs_int set enabled = true storage_allowed_locations = ('*') storage_blocked_locations = ( 'gcs://mybucket3/path3/', 'gcs://mybucket4/path4/' ); alter storage integration azure_int set enabled = true azure_tenant_id = 'a123b4c5-1234-123a-a12b-1a23b45678c9' storage_allowed_locations = ('*') storage_blocked_locations = ( 'azure://myaccount.blob.core.windows.net/mycontainer/path3/', 'azure://myaccount.blob.core.windows.net/mycontainer/path4/' ); alter storage integration azure_int set enabled = true comment = 'test_comment' azure_tenant_id = 'a123b4c5-1234-123a-a12b-1a23b45678c9' storage_allowed_locations = ('*') storage_blocked_locations = ( 'azure://myaccount.blob.core.windows.net/mycontainer/path3/', 'azure://myaccount.blob.core.windows.net/mycontainer/path4/' ); alter storage integration if exists azure_int set enabled = true comment = 'test_comment' azure_tenant_id = 'a123b4c5-1234-123a-a12b-1a23b45678c9' storage_allowed_locations = ('*') storage_blocked_locations = ( 'azure://myaccount.blob.core.windows.net/mycontainer/path3/', 'azure://myaccount.blob.core.windows.net/mycontainer/path4/' ) use_privatelink_endpoint = true; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_storage_integration.yml000066400000000000000000000332531503426445100277460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b97c860b46d977ac6f8bd9c7b9c467436770b9cf6f4c22a0e02cb83ae05426d2 file: - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: test_integration - keyword: set - tag_equals: keyword: tag tag_reference: naked_identifier: tag1 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'value1'" - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: test_integration - keyword: set - tag_equals: - keyword: tag - tag_reference: naked_identifier: tag1 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value1'" - comma: ',' - tag_reference: naked_identifier: tag2 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value2'" - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: test_integration - keyword: set - comment_equals_clause: keyword: comment comparison_operator: raw_comparison_operator: '=' quoted_literal: "'test comment'" - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: test_integration - keyword: unset - keyword: comment - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: test_integration - keyword: unset - keyword: tag - tag_reference: naked_identifier: tag1 - comma: ',' - tag_reference: naked_identifier: tag2 - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - keyword: if - keyword: exists - object_reference: naked_identifier: test_integration - keyword: unset - keyword: tag - tag_reference: naked_identifier: tag1 - comma: ',' - tag_reference: naked_identifier: tag2 - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: test_integration - keyword: unset - keyword: enabled - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: test_integration - keyword: unset - keyword: comment - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: test_integration - keyword: unset - keyword: storage_blocked_locations - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: test_integration - keyword: set - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: test_integration - keyword: set - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'false' - comment_equals_clause: keyword: comment comparison_operator: raw_comparison_operator: '=' quoted_literal: "'test comment'" - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: test_integration - keyword: set - comment_equals_clause: keyword: comment comparison_operator: raw_comparison_operator: '=' quoted_literal: "'test comment'" - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'false' - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: test_integration - keyword: set - keyword: storage_aws_role_arn - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test_role_arn'" - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: test_integration - keyword: set - keyword: storage_aws_object_acl - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test_object_acl'" - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: test_integration - keyword: set - keyword: azure_tenant_id - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test_azure_tenant_id'" - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: s3_int - keyword: set - keyword: storage_aws_role_arn - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'arn:aws:iam::001234567890:role/myrole'" - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: storage_allowed_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - bucket_path: "'s3://mybucket1'" - comma: ',' - bucket_path: "'s3://mybucket2/'" - end_bracket: ) - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: gcs_int - keyword: set - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: storage_allowed_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - bucket_path: "'gcs://mybucket1/path1/'" - comma: ',' - bucket_path: "'gcs://mybucket2/path2/'" - end_bracket: ) - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: azure_int - keyword: set - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: azure_tenant_id - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'a123b4c5-1234-123a-a12b-1a23b45678c9'" - keyword: storage_allowed_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/path1/'" - comma: ',' - bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/path2/'" - end_bracket: ) - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: s3_int - keyword: set - keyword: storage_aws_role_arn - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'arn:aws:iam::001234567890:role/myrole'" - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: storage_allowed_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_star: "'*'" end_bracket: ) - keyword: storage_blocked_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - bucket_path: "'s3://mybucket3/path3/'" - comma: ',' - bucket_path: "'s3://mybucket4/path4/'" - end_bracket: ) - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: gcs_int - keyword: set - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: storage_allowed_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_star: "'*'" end_bracket: ) - keyword: storage_blocked_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - bucket_path: "'gcs://mybucket3/path3/'" - comma: ',' - bucket_path: "'gcs://mybucket4/path4/'" - end_bracket: ) - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: azure_int - keyword: set - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: azure_tenant_id - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'a123b4c5-1234-123a-a12b-1a23b45678c9'" - keyword: storage_allowed_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_star: "'*'" end_bracket: ) - keyword: storage_blocked_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/path3/'" - comma: ',' - bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/path4/'" - end_bracket: ) - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: azure_int - keyword: set - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - comment_equals_clause: keyword: comment comparison_operator: raw_comparison_operator: '=' quoted_literal: "'test_comment'" - keyword: azure_tenant_id - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'a123b4c5-1234-123a-a12b-1a23b45678c9'" - keyword: storage_allowed_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_star: "'*'" end_bracket: ) - keyword: storage_blocked_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/path3/'" - comma: ',' - bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/path4/'" - end_bracket: ) - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - keyword: if - keyword: exists - object_reference: naked_identifier: azure_int - keyword: set - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - comment_equals_clause: keyword: comment comparison_operator: raw_comparison_operator: '=' quoted_literal: "'test_comment'" - keyword: azure_tenant_id - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'a123b4c5-1234-123a-a12b-1a23b45678c9'" - keyword: storage_allowed_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_star: "'*'" end_bracket: ) - keyword: storage_blocked_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/path3/'" - comma: ',' - bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/path4/'" - end_bracket: ) - keyword: use_privatelink_endpoint - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_stream.sql000066400000000000000000000006451503426445100251670ustar00rootroot00000000000000alter stream mystream set comment = 'New comment for stream'; alter stream if exists mystream set tag mytag='myvalue'; ALTER STREAM IF EXISTS mystream SET APPEND_ONLY = FALSE TAG mytag1='myvalue1', mytag2 = 'myvalue2' COMMENT = 'amazing comment'; ALTER STREAM IF EXISTS mystream SET INSERT_ONLY = TRUE COMMENT = 'amazing comment'; alter stream mystream unset comment; alter stream mystream unset tag mytag1, mytag2; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_stream.yml000066400000000000000000000061111503426445100251630ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7690a0551c1b067a37f2166c3c2afc249346c04d57824710e98d112ff6d9480d file: - statement: alter_stream_statement: - keyword: alter - keyword: stream - object_reference: naked_identifier: mystream - keyword: set - comment_equals_clause: keyword: comment comparison_operator: raw_comparison_operator: '=' quoted_literal: "'New comment for stream'" - statement_terminator: ; - statement: alter_stream_statement: - keyword: alter - keyword: stream - keyword: if - keyword: exists - object_reference: naked_identifier: mystream - keyword: set - tag_equals: keyword: tag tag_reference: naked_identifier: mytag comparison_operator: raw_comparison_operator: '=' quoted_literal: "'myvalue'" - statement_terminator: ; - statement: alter_stream_statement: - keyword: ALTER - keyword: STREAM - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: mystream - keyword: SET - keyword: APPEND_ONLY - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - tag_equals: - keyword: TAG - tag_reference: naked_identifier: mytag1 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'myvalue1'" - comma: ',' - tag_reference: naked_identifier: mytag2 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'myvalue2'" - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'amazing comment'" - statement_terminator: ; - statement: alter_stream_statement: - keyword: ALTER - keyword: STREAM - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: mystream - keyword: SET - keyword: INSERT_ONLY - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'amazing comment'" - statement_terminator: ; - statement: alter_stream_statement: - keyword: alter - keyword: stream - object_reference: naked_identifier: mystream - keyword: unset - keyword: comment - statement_terminator: ; - statement: alter_stream_statement: - keyword: alter - keyword: stream - object_reference: naked_identifier: mystream - keyword: unset - keyword: tag - tag_reference: naked_identifier: mytag1 - comma: ',' - tag_reference: naked_identifier: mytag2 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_streamlit.sql000066400000000000000000000006461503426445100257010ustar00rootroot00000000000000ALTER STREAMLIT my_streamlit SET ROOT_LOCATION = '@stage_name/folder' MAIN_FILE = 'main.py'; ALTER STREAMLIT my_streamlit SET ROOT_LOCATION = '@stage_name/folder' MAIN_FILE = 'main.py' QUERY_WAREHOUSE = my_wh; ALTER STREAMLIT my_streamlit SET ROOT_LOCATION = '@stage_name/folder' MAIN_FILE = 'main.py' QUERY_WAREHOUSE = my_wh comment = 'New comment for stream'; ALTER STREAMLIT my_streamlit RENAME TO new_my_streamlit; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_streamlit.yml000066400000000000000000000047301503426445100257010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b063729282b81f6b182c5c8bb978919f1e618d5d157e283e8529f98b0a0a375b file: - statement: alter_streamlit_statement: - keyword: ALTER - keyword: STREAMLIT - object_reference: naked_identifier: my_streamlit - keyword: SET - keyword: ROOT_LOCATION - comparison_operator: raw_comparison_operator: '=' - stage_path: "'@stage_name/folder'" - keyword: MAIN_FILE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'main.py'" - statement_terminator: ; - statement: alter_streamlit_statement: - keyword: ALTER - keyword: STREAMLIT - object_reference: naked_identifier: my_streamlit - keyword: SET - keyword: ROOT_LOCATION - comparison_operator: raw_comparison_operator: '=' - stage_path: "'@stage_name/folder'" - keyword: MAIN_FILE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'main.py'" - keyword: QUERY_WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: my_wh - statement_terminator: ; - statement: alter_streamlit_statement: - keyword: ALTER - keyword: STREAMLIT - object_reference: naked_identifier: my_streamlit - keyword: SET - keyword: ROOT_LOCATION - comparison_operator: raw_comparison_operator: '=' - stage_path: "'@stage_name/folder'" - keyword: MAIN_FILE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'main.py'" - keyword: QUERY_WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: my_wh - comment_equals_clause: keyword: comment comparison_operator: raw_comparison_operator: '=' quoted_literal: "'New comment for stream'" - statement_terminator: ; - statement: alter_streamlit_statement: - keyword: ALTER - keyword: STREAMLIT - object_reference: naked_identifier: my_streamlit - keyword: RENAME - keyword: TO - object_reference: naked_identifier: new_my_streamlit - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_table.sql000066400000000000000000000104071503426445100247600ustar00rootroot00000000000000ALTER TABLE my_old_table RENAME TO my_new_table; ALTER TABLE my_existing_table SWAP WITH my_another_table; ALTER TABLE my_existing_table ADD SEARCH OPTIMIZATION; ALTER TABLE my_existing_table DROP SEARCH OPTIMIZATION; ALTER TABLE my_table SET DATA_RETENTION_TIME_IN_DAYS = 30; ALTER TABLE my_table SET DEFAULT_DDL_COLLATION = 'en-ci'; ALTER TABLE my_table SET COMMENT = 'my table comment'; ALTER TABLE table1 ADD CONSTRAINT constraint1 PRIMARY KEY ( col1 ); ALTER TABLE table1 ADD CONSTRAINT constraint1 PRIMARY KEY ( col1 ) ENFORCED VALIDATE RELY; ALTER TABLE table1 ADD CONSTRAINT constraint1 PRIMARY KEY ( col1 ) NOT ENFORCED NOVALIDATE NORELY; ALTER TABLE table1 ADD CONSTRAINT constraint1 UNIQUE ( col1 ); ALTER TABLE table1 ADD CONSTRAINT constraint1 UNIQUE ( col1 ) ENFORCED VALIDATE RELY; ALTER TABLE table1 ADD CONSTRAINT constraint1 UNIQUE ( col1 ) NOT ENFORCED NOVALIDATE NORELY; ALTER TABLE table1 ADD CONSTRAINT "constraint1" PRIMARY KEY ( col1 ); ALTER TABLE table1 ADD CONSTRAINT "constraint1" PRIMARY KEY ( col1, col2 ); ALTER TABLE table1 ADD CONSTRAINT constraint1 FOREIGN KEY ( col1 ) REFERENCES table2 ( col2 ); ALTER TABLE table1 ADD CONSTRAINT constraint1 FOREIGN KEY ( col1 ) REFERENCES table2 ( col2 ) ENFORCED VALIDATE RELY; ALTER TABLE table1 ADD CONSTRAINT constraint1 FOREIGN KEY ( col1 ) REFERENCES table2 ( col2 ) NOT ENFORCED NOVALIDATE NORELY; ALTER TABLE table1 ADD CONSTRAINT "constraint1" FOREIGN KEY ( col1 ) REFERENCES table2 ( col2 ); ALTER TABLE table1 ADD CONSTRAINT "constraint1" FOREIGN KEY ( col1 ) REFERENCES "schema1"."table1" ("col2"); ALTER TABLE table1 ADD CONSTRAINT "constraint1" FOREIGN KEY ( col1 ) REFERENCES "schema1"."table1" ( col1, col2 ); ALTER TABLE table1 DROP CONSTRAINT constraint1 UNIQUE pk_col, pk_col2; ALTER TABLE table1 RENAME CONSTRAINT constraint1 TO constraint2; ALTER TABLE "ADW_TEMP"."FRUIT_PRICE_SAT" ADD CONSTRAINT "FK_2" FOREIGN KEY ("SPECIAL_OFFER_ID") REFERENCES "ADW_TEMP"."OFFER_SAT" ("SPECIAL_OFFER_ID"); ALTER TABLE "my_table" ALTER COLUMN "my_column" SET MASKING POLICY my_masking_policy FORCE; ALTER TABLE SAMPLE_DB.SAMPLE_SCHEMA.TBL UNSET COMMENT; ALTER TABLE table1 UNSET COMMENT, DATA_RETENTION_TIME_IN_DAYS; ALTER TABLE table1 ADD CONSTRAINT constraint1 FOREIGN KEY (address) REFERENCES addresses (address) ON DELETE RESTRICT ON UPDATE CASCADE; ALTER TABLE table2 ADD CONSTRAINT constraint2 FOREIGN KEY (address) REFERENCES addresses (address) ON DELETE NO ACTION ON UPDATE SET NULL; ALTER TABLE table3 ADD CONSTRAINT constraint3 FOREIGN KEY (address) REFERENCES addresses (address) ON DELETE SET DEFAULT; ALTER TABLE table1 ADD CONSTRAINT constraint1 FOREIGN KEY (address) REFERENCES addresses (address) MATCH FULL; ALTER TABLE table1 ADD CONSTRAINT constraint1 FOREIGN KEY (address) REFERENCES addresses (address) MATCH SIMPLE; ALTER TABLE table1 ADD CONSTRAINT constraint1 FOREIGN KEY (address) REFERENCES addresses (address) MATCH PARTIAL; ALTER TABLE table1 DROP CONSTRAINT my_constraint; ALTER TABLE my_table SET TAG tag1 = "some_value"; ALTER TABLE my_table SET TAG tag1 = "some_value", tag2 = "some_value", tag3 = "some_value", tag4 = "some_value"; ALTER TABLE my_table UNSET TAG tag1 = "some_value"; ALTER TABLE my_table UNSET TAG tag1 = "some_value", tag2 = "some_value", tag3 = "some_value", tag4 = "some_value"; ALTER TABLE my_table ADD ROW ACCESS POLICY my_policy ON (col1); ALTER TABLE my_table ADD ROW ACCESS POLICY my_policy ON (col1, col2, col3); ALTER TABLE my_table DROP ROW ACCESS POLICY my_policy; ALTER TABLE my_table DROP ROW ACCESS POLICY my_policy, ADD ROW ACCESS POLICY my_policy ON (col1); ALTER TABLE my_table DROP ROW ACCESS POLICY my_policy, ADD ROW ACCESS POLICY my_policy ON (col1, col2, col3); ALTER TABLE my_table DROP ALL ROW ACCESS POLICIES; ALTER TABLE my_table SET AGGREGATION POLICY my_policy; ALTER TABLE my_table SET AGGREGATION POLICY my_policy ENTITY KEY (col1); ALTER TABLE my_table SET AGGREGATION POLICY my_policy ENTITY KEY (col1, col2, col3); ALTER TABLE my_table SET AGGREGATION POLICY my_policy FORCE; ALTER TABLE my_table SET AGGREGATION POLICY my_policy ENTITY KEY (col1) FORCE; ALTER TABLE my_table UNSET AGGREGATION POLICY; ALTER TABLE my_table SET JOIN POLICY my_policy; ALTER TABLE my_table SET JOIN POLICY my_policy FORCE; ALTER TABLE my_table UNSET JOIN POLICY; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_table.yml000066400000000000000000000736721503426445100247770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e6062d5b4f5b9805e899563cd98d55bec1e0357eeef37ed3d4fa116a10b5af35 file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_old_table - keyword: RENAME - keyword: TO - table_reference: naked_identifier: my_new_table - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_existing_table - keyword: SWAP - keyword: WITH - table_reference: naked_identifier: my_another_table - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_existing_table - keyword: ADD - keyword: SEARCH - keyword: OPTIMIZATION - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_existing_table - keyword: DROP - keyword: SEARCH - keyword: OPTIMIZATION - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - keyword: SET - parameter: DATA_RETENTION_TIME_IN_DAYS - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '30' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - keyword: SET - parameter: DEFAULT_DDL_COLLATION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'en-ci'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - keyword: SET - parameter: COMMENT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'my table comment'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table1 - alter_table_constraint_action: keyword: ADD constraint_properties_segment: - keyword: CONSTRAINT - naked_identifier: constraint1 - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table1 - alter_table_constraint_action: keyword: ADD constraint_properties_segment: - keyword: CONSTRAINT - naked_identifier: constraint1 - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: ENFORCED - keyword: VALIDATE - keyword: RELY - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table1 - alter_table_constraint_action: keyword: ADD constraint_properties_segment: - keyword: CONSTRAINT - naked_identifier: constraint1 - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: NOT - keyword: ENFORCED - keyword: NOVALIDATE - keyword: NORELY - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table1 - alter_table_constraint_action: keyword: ADD constraint_properties_segment: - keyword: CONSTRAINT - naked_identifier: constraint1 - keyword: UNIQUE - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table1 - alter_table_constraint_action: keyword: ADD constraint_properties_segment: - keyword: CONSTRAINT - naked_identifier: constraint1 - keyword: UNIQUE - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: ENFORCED - keyword: VALIDATE - keyword: RELY - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table1 - alter_table_constraint_action: keyword: ADD constraint_properties_segment: - keyword: CONSTRAINT - naked_identifier: constraint1 - keyword: UNIQUE - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: NOT - keyword: ENFORCED - keyword: NOVALIDATE - keyword: NORELY - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table1 - alter_table_constraint_action: keyword: ADD constraint_properties_segment: - keyword: CONSTRAINT - quoted_identifier: '"constraint1"' - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table1 - alter_table_constraint_action: keyword: ADD constraint_properties_segment: - keyword: CONSTRAINT - quoted_identifier: '"constraint1"' - keyword: PRIMARY - keyword: KEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table1 - alter_table_constraint_action: keyword: ADD constraint_properties_segment: - keyword: CONSTRAINT - naked_identifier: constraint1 - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: table2 - bracketed: start_bracket: ( column_reference: naked_identifier: col2 end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table1 - alter_table_constraint_action: keyword: ADD constraint_properties_segment: - keyword: CONSTRAINT - naked_identifier: constraint1 - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: table2 - bracketed: start_bracket: ( column_reference: naked_identifier: col2 end_bracket: ) - keyword: ENFORCED - keyword: VALIDATE - keyword: RELY - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table1 - alter_table_constraint_action: keyword: ADD constraint_properties_segment: - keyword: CONSTRAINT - naked_identifier: constraint1 - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: table2 - bracketed: start_bracket: ( column_reference: naked_identifier: col2 end_bracket: ) - keyword: NOT - keyword: ENFORCED - keyword: NOVALIDATE - keyword: NORELY - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table1 - alter_table_constraint_action: keyword: ADD constraint_properties_segment: - keyword: CONSTRAINT - quoted_identifier: '"constraint1"' - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: table2 - bracketed: start_bracket: ( column_reference: naked_identifier: col2 end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table1 - alter_table_constraint_action: keyword: ADD constraint_properties_segment: - keyword: CONSTRAINT - quoted_identifier: '"constraint1"' - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: REFERENCES - table_reference: - quoted_identifier: '"schema1"' - dot: . - quoted_identifier: '"table1"' - bracketed: start_bracket: ( column_reference: quoted_identifier: '"col2"' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table1 - alter_table_constraint_action: keyword: ADD constraint_properties_segment: - keyword: CONSTRAINT - quoted_identifier: '"constraint1"' - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: REFERENCES - table_reference: - quoted_identifier: '"schema1"' - dot: . - quoted_identifier: '"table1"' - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table1 - alter_table_constraint_action: - keyword: DROP - keyword: CONSTRAINT - naked_identifier: constraint1 - keyword: UNIQUE - column_reference: naked_identifier: pk_col - comma: ',' - column_reference: naked_identifier: pk_col2 - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table1 - alter_table_constraint_action: - keyword: RENAME - keyword: CONSTRAINT - naked_identifier: constraint1 - keyword: TO - naked_identifier: constraint2 - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '"ADW_TEMP"' - dot: . - quoted_identifier: '"FRUIT_PRICE_SAT"' - alter_table_constraint_action: keyword: ADD constraint_properties_segment: - keyword: CONSTRAINT - quoted_identifier: '"FK_2"' - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: quoted_identifier: '"SPECIAL_OFFER_ID"' end_bracket: ) - keyword: REFERENCES - table_reference: - quoted_identifier: '"ADW_TEMP"' - dot: . - quoted_identifier: '"OFFER_SAT"' - bracketed: start_bracket: ( column_reference: quoted_identifier: '"SPECIAL_OFFER_ID"' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '"my_table"' - alter_table_table_column_action: - keyword: ALTER - keyword: COLUMN - column_reference: quoted_identifier: '"my_column"' - keyword: SET - keyword: MASKING - keyword: POLICY - function_name: function_name_identifier: my_masking_policy - keyword: FORCE - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: SAMPLE_DB - dot: . - naked_identifier: SAMPLE_SCHEMA - dot: . - naked_identifier: TBL - keyword: UNSET - parameter: COMMENT - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table1 - keyword: UNSET - parameter: COMMENT - comma: ',' - parameter: DATA_RETENTION_TIME_IN_DAYS - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table1 - alter_table_constraint_action: keyword: ADD constraint_properties_segment: - keyword: CONSTRAINT - naked_identifier: constraint1 - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: address end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: addresses - bracketed: start_bracket: ( column_reference: naked_identifier: address end_bracket: ) - keyword: 'ON' - keyword: DELETE - keyword: RESTRICT - keyword: 'ON' - keyword: UPDATE - keyword: CASCADE - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table2 - alter_table_constraint_action: keyword: ADD constraint_properties_segment: - keyword: CONSTRAINT - naked_identifier: constraint2 - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: address end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: addresses - bracketed: start_bracket: ( column_reference: naked_identifier: address end_bracket: ) - keyword: 'ON' - keyword: DELETE - keyword: 'NO' - keyword: ACTION - keyword: 'ON' - keyword: UPDATE - keyword: SET - keyword: 'NULL' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table3 - alter_table_constraint_action: keyword: ADD constraint_properties_segment: - keyword: CONSTRAINT - naked_identifier: constraint3 - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: address end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: addresses - bracketed: start_bracket: ( column_reference: naked_identifier: address end_bracket: ) - keyword: 'ON' - keyword: DELETE - keyword: SET - keyword: DEFAULT - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table1 - alter_table_constraint_action: keyword: ADD constraint_properties_segment: - keyword: CONSTRAINT - naked_identifier: constraint1 - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: address end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: addresses - bracketed: start_bracket: ( column_reference: naked_identifier: address end_bracket: ) - keyword: MATCH - keyword: FULL - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table1 - alter_table_constraint_action: keyword: ADD constraint_properties_segment: - keyword: CONSTRAINT - naked_identifier: constraint1 - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: address end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: addresses - bracketed: start_bracket: ( column_reference: naked_identifier: address end_bracket: ) - keyword: MATCH - keyword: SIMPLE - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table1 - alter_table_constraint_action: keyword: ADD constraint_properties_segment: - keyword: CONSTRAINT - naked_identifier: constraint1 - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: address end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: addresses - bracketed: start_bracket: ( column_reference: naked_identifier: address end_bracket: ) - keyword: MATCH - keyword: PARTIAL - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table1 - alter_table_table_column_action: - keyword: DROP - keyword: CONSTRAINT - object_reference: naked_identifier: my_constraint - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - data_governance_policy_tag_action_segment: keyword: SET tag_equals: keyword: TAG tag_reference: naked_identifier: tag1 comparison_operator: raw_comparison_operator: '=' quoted_literal: '"some_value"' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - data_governance_policy_tag_action_segment: keyword: SET tag_equals: - keyword: TAG - tag_reference: naked_identifier: tag1 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"some_value"' - comma: ',' - tag_reference: naked_identifier: tag2 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"some_value"' - comma: ',' - tag_reference: naked_identifier: tag3 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"some_value"' - comma: ',' - tag_reference: naked_identifier: tag4 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"some_value"' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - data_governance_policy_tag_action_segment: keyword: UNSET tag_equals: keyword: TAG tag_reference: naked_identifier: tag1 comparison_operator: raw_comparison_operator: '=' quoted_literal: '"some_value"' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - data_governance_policy_tag_action_segment: keyword: UNSET tag_equals: - keyword: TAG - tag_reference: naked_identifier: tag1 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"some_value"' - comma: ',' - tag_reference: naked_identifier: tag2 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"some_value"' - comma: ',' - tag_reference: naked_identifier: tag3 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"some_value"' - comma: ',' - tag_reference: naked_identifier: tag4 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"some_value"' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - data_governance_policy_tag_action_segment: - keyword: ADD - keyword: ROW - keyword: ACCESS - keyword: POLICY - object_reference: naked_identifier: my_policy - keyword: 'ON' - bracketed: start_bracket: ( object_reference: naked_identifier: col1 end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - data_governance_policy_tag_action_segment: - keyword: ADD - keyword: ROW - keyword: ACCESS - keyword: POLICY - object_reference: naked_identifier: my_policy - keyword: 'ON' - bracketed: - start_bracket: ( - object_reference: naked_identifier: col1 - comma: ',' - object_reference: naked_identifier: col2 - comma: ',' - object_reference: naked_identifier: col3 - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - data_governance_policy_tag_action_segment: - keyword: DROP - keyword: ROW - keyword: ACCESS - keyword: POLICY - object_reference: naked_identifier: my_policy - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - data_governance_policy_tag_action_segment: - keyword: DROP - keyword: ROW - keyword: ACCESS - keyword: POLICY - object_reference: naked_identifier: my_policy - comma: ',' - keyword: ADD - keyword: ROW - keyword: ACCESS - keyword: POLICY - object_reference: naked_identifier: my_policy - keyword: 'ON' - bracketed: start_bracket: ( object_reference: naked_identifier: col1 end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - data_governance_policy_tag_action_segment: - keyword: DROP - keyword: ROW - keyword: ACCESS - keyword: POLICY - object_reference: naked_identifier: my_policy - comma: ',' - keyword: ADD - keyword: ROW - keyword: ACCESS - keyword: POLICY - object_reference: naked_identifier: my_policy - keyword: 'ON' - bracketed: - start_bracket: ( - object_reference: naked_identifier: col1 - comma: ',' - object_reference: naked_identifier: col2 - comma: ',' - object_reference: naked_identifier: col3 - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - data_governance_policy_tag_action_segment: - keyword: DROP - keyword: ALL - keyword: ROW - keyword: ACCESS - keyword: POLICIES - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - data_governance_policy_tag_action_segment: - keyword: SET - keyword: AGGREGATION - keyword: POLICY - object_reference: naked_identifier: my_policy - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - data_governance_policy_tag_action_segment: - keyword: SET - keyword: AGGREGATION - keyword: POLICY - object_reference: naked_identifier: my_policy - keyword: ENTITY - keyword: KEY - bracketed: start_bracket: ( object_reference: naked_identifier: col1 end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - data_governance_policy_tag_action_segment: - keyword: SET - keyword: AGGREGATION - keyword: POLICY - object_reference: naked_identifier: my_policy - keyword: ENTITY - keyword: KEY - bracketed: - start_bracket: ( - object_reference: naked_identifier: col1 - comma: ',' - object_reference: naked_identifier: col2 - comma: ',' - object_reference: naked_identifier: col3 - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - data_governance_policy_tag_action_segment: - keyword: SET - keyword: AGGREGATION - keyword: POLICY - object_reference: naked_identifier: my_policy - keyword: FORCE - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - data_governance_policy_tag_action_segment: - keyword: SET - keyword: AGGREGATION - keyword: POLICY - object_reference: naked_identifier: my_policy - keyword: ENTITY - keyword: KEY - bracketed: start_bracket: ( object_reference: naked_identifier: col1 end_bracket: ) - keyword: FORCE - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - data_governance_policy_tag_action_segment: - keyword: UNSET - keyword: AGGREGATION - keyword: POLICY - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - data_governance_policy_tag_action_segment: - keyword: SET - keyword: JOIN - keyword: POLICY - object_reference: naked_identifier: my_policy - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - data_governance_policy_tag_action_segment: - keyword: SET - keyword: JOIN - keyword: POLICY - object_reference: naked_identifier: my_policy - keyword: FORCE - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - data_governance_policy_tag_action_segment: - keyword: UNSET - keyword: JOIN - keyword: POLICY - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_table_clustering_action.sql000066400000000000000000000010761503426445100305560ustar00rootroot00000000000000ALTER TABLE my_table CLUSTER BY (c1, c2); ALTER TABLE my_table CLUSTER BY (to_date(c1), substring(c2, 0, 10)); ALTER TABLE my_table CLUSTER BY (v:"Data":id::number); ALTER TABLE my_table RECLUSTER; ALTER TABLE my_table RECLUSTER MAX_SIZE = 100; ALTER TABLE my_table RECLUSTER WHERE create_date BETWEEN ('2016-01-01') AND ('2016-01-07'); ALTER TABLE my_table RECLUSTER MAX_SIZE = 100 WHERE create_date BETWEEN ('2016-01-01') AND ('2016-01-07'); ALTER TABLE my_table SUSPEND RECLUSTER; ALTER TABLE my_table RESUME RECLUSTER; ALTER TABLE my_table DROP CLUSTERING KEY; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_table_clustering_action.yml000066400000000000000000000133711503426445100305610ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: dbefed354133b87f63a0b46676c057e1f6b04f62b6258c8446426115d41e303f file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_clustering_action: - keyword: CLUSTER - keyword: BY - bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: c1 - comma: ',' - expression: column_reference: naked_identifier: c2 - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_clustering_action: - keyword: CLUSTER - keyword: BY - bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: to_date function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: c1 end_bracket: ) - comma: ',' - expression: function: function_name: function_name_identifier: substring function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: c2 - comma: ',' - expression: numeric_literal: '0' - comma: ',' - expression: numeric_literal: '10' - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_clustering_action: - keyword: CLUSTER - keyword: BY - bracketed: start_bracket: ( expression: cast_expression: column_reference: naked_identifier: v semi_structured_expression: - colon: ':' - semi_structured_element: '"Data"' - colon: ':' - semi_structured_element: id casting_operator: '::' data_type: data_type_identifier: number end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_clustering_action: keyword: RECLUSTER - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_clustering_action: - keyword: RECLUSTER - keyword: MAX_SIZE - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '100' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_clustering_action: keyword: RECLUSTER where_clause: keyword: WHERE expression: - column_reference: naked_identifier: create_date - keyword: BETWEEN - bracketed: start_bracket: ( expression: quoted_literal: "'2016-01-01'" end_bracket: ) - keyword: AND - bracketed: start_bracket: ( expression: quoted_literal: "'2016-01-07'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_clustering_action: - keyword: RECLUSTER - keyword: MAX_SIZE - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '100' - where_clause: keyword: WHERE expression: - column_reference: naked_identifier: create_date - keyword: BETWEEN - bracketed: start_bracket: ( expression: quoted_literal: "'2016-01-01'" end_bracket: ) - keyword: AND - bracketed: start_bracket: ( expression: quoted_literal: "'2016-01-07'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_clustering_action: - keyword: SUSPEND - keyword: RECLUSTER - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_clustering_action: - keyword: RESUME - keyword: RECLUSTER - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_clustering_action: - keyword: DROP - keyword: CLUSTERING - keyword: KEY - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_table_column.sql000066400000000000000000000110731503426445100263350ustar00rootroot00000000000000-- ADD column ---- Base cases ALTER TABLE my_table ADD COLUMN my_column INTEGER; ALTER TABLE my_table ADD COLUMN my_column VARCHAR(5000) NOT NULL; ALTER TABLE my_table ADD COLUMN IF NOT EXISTS my_column INTEGER; ------ Multiple columns ALTER TABLE my_table ADD COLUMN column_1 varchar, column_2 integer; ALTER TABLE my_table ADD COLUMN IF NOT EXISTS column_1 varchar, IF NOT EXISTS column_2 integer; ALTER TABLE my_table ADD COLUMN IF NOT EXISTS column_1 varchar, column_2 integer; ---- Default, auto-increment & identity ALTER TABLE my_table ADD COLUMN my_column INTEGER DEFAULT 1; ALTER TABLE my_table ADD COLUMN my_column INTEGER AUTOINCREMENT; ALTER TABLE my_table ADD COLUMN my_column INTEGER IDENTITY; ALTER TABLE my_table ADD COLUMN my_column INTEGER AUTOINCREMENT (10000, 1); ALTER TABLE my_table ADD COLUMN my_column INTEGER IDENTITY START 10000 INCREMENT 1; ---- Masking Policy ALTER TABLE my_table ADD COLUMN my_column INTEGER MASKING POLICY my_policy; ALTER TABLE my_table ADD COLUMN my_column INTEGER WITH MASKING POLICY my_policy; ALTER TABLE my_table ADD COLUMN my_column INTEGER WITH MASKING POLICY adatabase.aschema.apolicy; ALTER TABLE my_table ADD COLUMN my_column INTEGER WITH MASKING POLICY my_policy USING(my_column, my_column > 10); -- comment ALTER TABLE reporting_tbl ADD COLUMN reporting_group VARCHAR COMMENT 'internal reporting group defined by DE team'; -- without the word COLUMN ALTER TABLE rpt_enc_table ADD encounter_count INTEGER COMMENT 'count of encounters past year' ; -- Rename column ALTER TABLE empl_info RENAME COLUMN old_col_name TO new_col_name; -- Alter-modify column(s) ---- Base cases ------ Single column ALTER TABLE t1 alter column c1 drop not null; ALTER TABLE t1 alter c5 comment '50 character column'; ------ Multiple columns/properties ALTER TABLE t1 modify c2 drop default, c3 set default seq5.nextval ; ALTER TABLE t1 alter c4 set data type varchar(50), column c4 drop default; ---- Set Masking Policy ------ Single column ALTER TABLE xxxx.example_table MODIFY COLUMN employeeCode SET MASKING POLICY example_MASKING_POLICY; ALTER TABLE aschema.atable MODIFY COLUMN acolumn SET MASKING POLICY adatabase.aschema.apolicy; ALTER TABLE empl_info modify column empl_id set masking policy mask_empl_id; ALTER TABLE empl_info modify column empl_id set masking policy mask_empl_id using(empl_id, empl_id > 10); ------ Multiple columns ALTER TABLE empl_info modify column empl_id set masking policy mask_empl_id , column empl_dob set masking policy mask_empl_dob ; ---- Unset masking policy ------ Single column ALTER TABLE empl_info modify column empl_id unset masking policy; ------ Multiple columns ALTER TABLE empl_info modify column empl_id unset masking policy , column empl_dob unset masking policy ; --- Set Tag ALTER TABLE my_table MODIFY COLUMN my_column SET TAG my_tag = 'tagged'; --- Unset Tag ALTER TABLE my_table MODIFY COLUMN my_column UNSET TAG my_tag; -- Drop column ALTER TABLE empl_info DROP COLUMN my_column; ALTER TABLE some_schema.empl_info DROP COLUMN my_column; ALTER TABLE my_table DROP COLUMN column_1, column_2, column_3; -- Drop column if exists ALTER TABLE demo_db.public DROP column IF EXISTS public_name, IF EXISTS description_text, IF EXISTS type_alias; ALTER TABLE demo_db.public DROP column public_name, description_text, type_alias; ALTER TABLE demo_db.public DROP public_name, description_text, type_alias; ALTER TABLE demo_db.public DROP IF EXISTS public_name, IF EXISTS description_text, IF EXISTS type_alias; -- IF EXISTS ALTER TABLE IF EXISTS my_table ADD COLUMN my_column INTEGER; ALTER TABLE IF EXISTS empl_info DROP COLUMN my_column; ALTER TABLE IF EXISTS empl_info DROP my_column; ALTER TABLE IF EXISTS empl_info RENAME COLUMN old_col_name TO new_col_name; -- DROP PRIMARY KEY ALTER TABLE my_schema.my_table drop PRIMARY KEY; -- ADD PRIMARY KEY ALTER TABLE my_schema.my_table ADD PRIMARY KEY(TABLE_ID); -- ADD Virtual/Calculated columns ALTER TABLE some_schema.some_table ADD some_column_upr VARCHAR AS UPPER(some_column) COMMENT 'This is a virtual column'; ALTER TABLE some_schema.some_table ADD column IF NOT EXISTS some_other_column_upr VARCHAR AS UPPER(some_column) || 'some characters' || LOWER(some_column); ALTER TABLE some_schema.some_table ADD column IF NOT EXISTS some_column_upr VARCHAR AS (UPPER(some_column)); ALTER TABLE some_schema.some_table ADD column IF NOT EXISTS some_event_date_time_utc TIMESTAMP AS (IFF(is_condition_true AND TRY_TO_NUMBER(some_text_value) IS NOT NULL, TO_TIMESTAMP(SUBSTR(some_text_value, 5, 13)), '1900-01-01')); ALTER TABLE IF EXISTS table1 ADD COLUMN IF NOT EXISTS some_column INTEGER NOT NULL; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_table_column.yml000066400000000000000000000713321503426445100263430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3e6ca435299c839369d3279c1b959380dd8ef4e2417c98d4c48f029fb102958a file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_table_column_action: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: my_column - data_type: data_type_identifier: INTEGER - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_table_column_action: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: my_column - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5000' end_bracket: ) - keyword: NOT - keyword: 'NULL' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_table_column_action: - keyword: ADD - keyword: COLUMN - keyword: IF - keyword: NOT - keyword: EXISTS - column_reference: naked_identifier: my_column - data_type: data_type_identifier: INTEGER - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_table_column_action: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: column_1 - data_type: data_type_identifier: varchar - comma: ',' - column_reference: naked_identifier: column_2 - data_type: data_type_identifier: integer - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_table_column_action: - keyword: ADD - keyword: COLUMN - keyword: IF - keyword: NOT - keyword: EXISTS - column_reference: naked_identifier: column_1 - data_type: data_type_identifier: varchar - comma: ',' - keyword: IF - keyword: NOT - keyword: EXISTS - column_reference: naked_identifier: column_2 - data_type: data_type_identifier: integer - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_table_column_action: - keyword: ADD - keyword: COLUMN - keyword: IF - keyword: NOT - keyword: EXISTS - column_reference: naked_identifier: column_1 - data_type: data_type_identifier: varchar - comma: ',' - column_reference: naked_identifier: column_2 - data_type: data_type_identifier: integer - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_table_column_action: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: my_column - data_type: data_type_identifier: INTEGER - keyword: DEFAULT - expression: numeric_literal: '1' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_table_column_action: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: my_column - data_type: data_type_identifier: INTEGER - keyword: AUTOINCREMENT - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_table_column_action: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: my_column - data_type: data_type_identifier: INTEGER - keyword: IDENTITY - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_table_column_action: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: my_column - data_type: data_type_identifier: INTEGER - keyword: AUTOINCREMENT - bracketed: - start_bracket: ( - numeric_literal: '10000' - comma: ',' - numeric_literal: '1' - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_table_column_action: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: my_column - data_type: data_type_identifier: INTEGER - keyword: IDENTITY - keyword: START - numeric_literal: '10000' - keyword: INCREMENT - numeric_literal: '1' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_table_column_action: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: my_column - data_type: data_type_identifier: INTEGER - keyword: MASKING - keyword: POLICY - function_name: function_name_identifier: my_policy - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_table_column_action: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: my_column - data_type: data_type_identifier: INTEGER - keyword: WITH - keyword: MASKING - keyword: POLICY - function_name: function_name_identifier: my_policy - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_table_column_action: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: my_column - data_type: data_type_identifier: INTEGER - keyword: WITH - keyword: MASKING - keyword: POLICY - function_name: - naked_identifier: adatabase - dot: . - naked_identifier: aschema - dot: . - function_name_identifier: apolicy - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_table_column_action: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: my_column - data_type: data_type_identifier: INTEGER - keyword: WITH - keyword: MASKING - keyword: POLICY - function_name: function_name_identifier: my_policy - keyword: USING - bracketed: start_bracket: ( column_reference: naked_identifier: my_column comma: ',' expression: column_reference: naked_identifier: my_column comparison_operator: raw_comparison_operator: '>' numeric_literal: '10' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: reporting_tbl - alter_table_table_column_action: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: reporting_group - data_type: data_type_identifier: VARCHAR - comment_clause: keyword: COMMENT quoted_literal: "'internal reporting group defined by DE team'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: rpt_enc_table - alter_table_table_column_action: keyword: ADD column_reference: naked_identifier: encounter_count data_type: data_type_identifier: INTEGER comment_clause: keyword: COMMENT quoted_literal: "'count of encounters past year'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: empl_info - alter_table_table_column_action: - keyword: RENAME - keyword: COLUMN - column_reference: naked_identifier: old_col_name - keyword: TO - column_reference: naked_identifier: new_col_name - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - alter_table_table_column_action: - keyword: alter - keyword: column - column_reference: naked_identifier: c1 - keyword: drop - keyword: not - keyword: 'null' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - alter_table_table_column_action: keyword: alter column_reference: naked_identifier: c5 comment_clause: keyword: comment quoted_literal: "'50 character column'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - alter_table_table_column_action: - keyword: modify - column_reference: naked_identifier: c2 - keyword: drop - keyword: default - comma: ',' - column_reference: naked_identifier: c3 - keyword: set - keyword: default - naked_identifier: seq5 - dot: . - keyword: nextval - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - alter_table_table_column_action: - keyword: alter - column_reference: naked_identifier: c4 - keyword: set - keyword: data - keyword: type - data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '50' end_bracket: ) - comma: ',' - keyword: column - column_reference: naked_identifier: c4 - keyword: drop - keyword: default - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: xxxx - dot: . - naked_identifier: example_table - alter_table_table_column_action: - keyword: MODIFY - keyword: COLUMN - column_reference: naked_identifier: employeeCode - keyword: SET - keyword: MASKING - keyword: POLICY - function_name: function_name_identifier: example_MASKING_POLICY - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: aschema - dot: . - naked_identifier: atable - alter_table_table_column_action: - keyword: MODIFY - keyword: COLUMN - column_reference: naked_identifier: acolumn - keyword: SET - keyword: MASKING - keyword: POLICY - function_name: - naked_identifier: adatabase - dot: . - naked_identifier: aschema - dot: . - function_name_identifier: apolicy - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: empl_info - alter_table_table_column_action: - keyword: modify - keyword: column - column_reference: naked_identifier: empl_id - keyword: set - keyword: masking - keyword: policy - function_name: function_name_identifier: mask_empl_id - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: empl_info - alter_table_table_column_action: - keyword: modify - keyword: column - column_reference: naked_identifier: empl_id - keyword: set - keyword: masking - keyword: policy - function_name: function_name_identifier: mask_empl_id - keyword: using - bracketed: start_bracket: ( column_reference: naked_identifier: empl_id comma: ',' expression: column_reference: naked_identifier: empl_id comparison_operator: raw_comparison_operator: '>' numeric_literal: '10' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: empl_info - alter_table_table_column_action: - keyword: modify - keyword: column - column_reference: naked_identifier: empl_id - keyword: set - keyword: masking - keyword: policy - function_name: function_name_identifier: mask_empl_id - comma: ',' - keyword: column - column_reference: naked_identifier: empl_dob - keyword: set - keyword: masking - keyword: policy - function_name: function_name_identifier: mask_empl_dob - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: empl_info - alter_table_table_column_action: - keyword: modify - keyword: column - column_reference: naked_identifier: empl_id - keyword: unset - keyword: masking - keyword: policy - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: empl_info - alter_table_table_column_action: - keyword: modify - keyword: column - column_reference: naked_identifier: empl_id - keyword: unset - keyword: masking - keyword: policy - comma: ',' - keyword: column - column_reference: naked_identifier: empl_dob - keyword: unset - keyword: masking - keyword: policy - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_table_column_action: - keyword: MODIFY - keyword: COLUMN - column_reference: naked_identifier: my_column - keyword: SET - keyword: TAG - tag_reference: naked_identifier: my_tag - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'tagged'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_table_column_action: - keyword: MODIFY - keyword: COLUMN - column_reference: naked_identifier: my_column - keyword: UNSET - keyword: TAG - tag_reference: naked_identifier: my_tag - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: empl_info - alter_table_table_column_action: - keyword: DROP - keyword: COLUMN - column_reference: naked_identifier: my_column - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: some_schema - dot: . - naked_identifier: empl_info - alter_table_table_column_action: - keyword: DROP - keyword: COLUMN - column_reference: naked_identifier: my_column - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_table_column_action: - keyword: DROP - keyword: COLUMN - column_reference: naked_identifier: column_1 - comma: ',' - column_reference: naked_identifier: column_2 - comma: ',' - column_reference: naked_identifier: column_3 - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: demo_db - dot: . - naked_identifier: public - alter_table_table_column_action: - keyword: DROP - keyword: column - keyword: IF - keyword: EXISTS - column_reference: naked_identifier: public_name - comma: ',' - keyword: IF - keyword: EXISTS - column_reference: naked_identifier: description_text - comma: ',' - keyword: IF - keyword: EXISTS - column_reference: naked_identifier: type_alias - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: demo_db - dot: . - naked_identifier: public - alter_table_table_column_action: - keyword: DROP - keyword: column - column_reference: naked_identifier: public_name - comma: ',' - column_reference: naked_identifier: description_text - comma: ',' - column_reference: naked_identifier: type_alias - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: demo_db - dot: . - naked_identifier: public - alter_table_table_column_action: - keyword: DROP - column_reference: naked_identifier: public_name - comma: ',' - column_reference: naked_identifier: description_text - comma: ',' - column_reference: naked_identifier: type_alias - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: demo_db - dot: . - naked_identifier: public - alter_table_table_column_action: - keyword: DROP - keyword: IF - keyword: EXISTS - column_reference: naked_identifier: public_name - comma: ',' - keyword: IF - keyword: EXISTS - column_reference: naked_identifier: description_text - comma: ',' - keyword: IF - keyword: EXISTS - column_reference: naked_identifier: type_alias - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: my_table - alter_table_table_column_action: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: my_column - data_type: data_type_identifier: INTEGER - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: empl_info - alter_table_table_column_action: - keyword: DROP - keyword: COLUMN - column_reference: naked_identifier: my_column - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: empl_info - alter_table_table_column_action: keyword: DROP column_reference: naked_identifier: my_column - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: empl_info - alter_table_table_column_action: - keyword: RENAME - keyword: COLUMN - column_reference: naked_identifier: old_col_name - keyword: TO - column_reference: naked_identifier: new_col_name - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: my_schema - dot: . - naked_identifier: my_table - keyword: drop - keyword: PRIMARY - keyword: KEY - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: my_schema - dot: . - naked_identifier: my_table - alter_table_constraint_action: keyword: ADD constraint_properties_segment: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: TABLE_ID end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: some_schema - dot: . - naked_identifier: some_table - alter_table_table_column_action: - keyword: ADD - column_reference: naked_identifier: some_column_upr - data_type: data_type_identifier: VARCHAR - keyword: AS - expression: function: function_name: function_name_identifier: UPPER function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: some_column end_bracket: ) - comment_clause: keyword: COMMENT quoted_literal: "'This is a virtual column'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: some_schema - dot: . - naked_identifier: some_table - alter_table_table_column_action: - keyword: ADD - keyword: column - keyword: IF - keyword: NOT - keyword: EXISTS - column_reference: naked_identifier: some_other_column_upr - data_type: data_type_identifier: VARCHAR - keyword: AS - expression: - function: function_name: function_name_identifier: UPPER function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: some_column end_bracket: ) - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "'some characters'" - binary_operator: - pipe: '|' - pipe: '|' - function: function_name: function_name_identifier: LOWER function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: some_column end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: some_schema - dot: . - naked_identifier: some_table - alter_table_table_column_action: - keyword: ADD - keyword: column - keyword: IF - keyword: NOT - keyword: EXISTS - column_reference: naked_identifier: some_column_upr - data_type: data_type_identifier: VARCHAR - keyword: AS - expression: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: UPPER function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: some_column end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: some_schema - dot: . - naked_identifier: some_table - alter_table_table_column_action: - keyword: ADD - keyword: column - keyword: IF - keyword: NOT - keyword: EXISTS - column_reference: naked_identifier: some_event_date_time_utc - data_type: keyword: TIMESTAMP - keyword: AS - expression: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: IFF function_contents: bracketed: - start_bracket: ( - expression: - column_reference: naked_identifier: is_condition_true - binary_operator: AND - function: function_name: function_name_identifier: TRY_TO_NUMBER function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: some_text_value end_bracket: ) - keyword: IS - keyword: NOT - null_literal: 'NULL' - comma: ',' - expression: function: function_name: function_name_identifier: TO_TIMESTAMP function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: SUBSTR function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: some_text_value - comma: ',' - expression: numeric_literal: '5' - comma: ',' - expression: numeric_literal: '13' - end_bracket: ) end_bracket: ) - comma: ',' - expression: quoted_literal: "'1900-01-01'" - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: table1 - alter_table_table_column_action: - keyword: ADD - keyword: COLUMN - keyword: IF - keyword: NOT - keyword: EXISTS - column_reference: naked_identifier: some_column - data_type: data_type_identifier: INTEGER - keyword: NOT - keyword: 'NULL' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_tag.sql000066400000000000000000000011251503426445100244410ustar00rootroot00000000000000ALTER TAG my_tag RENAME TO your_tag; ALTER TAG IF EXISTS my_tag RENAME TO your_tag; ALTER TAG my_tag SET MASKING POLICY policy_name; ALTER TAG my_tag SET MASKING POLICY policy_name, MASKING POLICY policy_name_2, MASKING POLICY policy_name_3; ALTER TAG my_tag SET COMMENT = 'this is a comment'; ALTER TAG my_tag UNSET COMMENT; ALTER TAG my_tag ADD ALLOWED_VALUES 'my_value'; ALTER TAG my_tag ADD ALLOWED_VALUES 'my_value', 'my_value_2'; ALTER TAG my_tag DROP ALLOWED_VALUES 'my_value'; ALTER TAG my_tag DROP ALLOWED_VALUES 'my_value', 'my_value_2'; ALTER TAG my_tag UNSET ALLOWED_VALUES; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_tag.yml000066400000000000000000000066571503426445100244620ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9f7e2944dc231680c9300fa3f417a62edf56cd4f4fd0bcced7bc7559db755696 file: - statement: alter_tag_statement: - keyword: ALTER - keyword: TAG - object_reference: naked_identifier: my_tag - keyword: RENAME - keyword: TO - object_reference: naked_identifier: your_tag - statement_terminator: ; - statement: alter_tag_statement: - keyword: ALTER - keyword: TAG - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: my_tag - keyword: RENAME - keyword: TO - object_reference: naked_identifier: your_tag - statement_terminator: ; - statement: alter_tag_statement: - keyword: ALTER - keyword: TAG - object_reference: naked_identifier: my_tag - keyword: SET - keyword: MASKING - keyword: POLICY - parameter: policy_name - statement_terminator: ; - statement: alter_tag_statement: - keyword: ALTER - keyword: TAG - object_reference: naked_identifier: my_tag - keyword: SET - keyword: MASKING - keyword: POLICY - parameter: policy_name - comma: ',' - keyword: MASKING - keyword: POLICY - parameter: policy_name_2 - comma: ',' - keyword: MASKING - keyword: POLICY - parameter: policy_name_3 - statement_terminator: ; - statement: alter_tag_statement: - keyword: ALTER - keyword: TAG - object_reference: naked_identifier: my_tag - keyword: SET - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'this is a comment'" - statement_terminator: ; - statement: alter_tag_statement: - keyword: ALTER - keyword: TAG - object_reference: naked_identifier: my_tag - keyword: UNSET - keyword: COMMENT - statement_terminator: ; - statement: alter_tag_statement: - keyword: ALTER - keyword: TAG - object_reference: naked_identifier: my_tag - keyword: ADD - keyword: ALLOWED_VALUES - quoted_literal: "'my_value'" - statement_terminator: ; - statement: alter_tag_statement: - keyword: ALTER - keyword: TAG - object_reference: naked_identifier: my_tag - keyword: ADD - keyword: ALLOWED_VALUES - quoted_literal: "'my_value'" - comma: ',' - quoted_literal: "'my_value_2'" - statement_terminator: ; - statement: alter_tag_statement: - keyword: ALTER - keyword: TAG - object_reference: naked_identifier: my_tag - keyword: DROP - keyword: ALLOWED_VALUES - quoted_literal: "'my_value'" - statement_terminator: ; - statement: alter_tag_statement: - keyword: ALTER - keyword: TAG - object_reference: naked_identifier: my_tag - keyword: DROP - keyword: ALLOWED_VALUES - quoted_literal: "'my_value'" - comma: ',' - quoted_literal: "'my_value_2'" - statement_terminator: ; - statement: alter_tag_statement: - keyword: ALTER - keyword: TAG - object_reference: naked_identifier: my_tag - keyword: UNSET - keyword: ALLOWED_VALUES - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_task_add_after.sql000066400000000000000000000001511503426445100266170ustar00rootroot00000000000000ALTER TASK my_task ADD AFTER another_task; ALTER TASK my_task ADD AFTER another_task, yet_another_task; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_task_add_after.yml000066400000000000000000000017641503426445100266340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 09650d80c9880ab8a03e0f407ee5e046ef92721b8e8e953a8f7e572f43280572 file: - statement: alter_task_statement: - keyword: ALTER - keyword: TASK - object_reference: naked_identifier: my_task - keyword: ADD - keyword: AFTER - object_reference: naked_identifier: another_task - statement_terminator: ; - statement: alter_task_statement: - keyword: ALTER - keyword: TASK - object_reference: naked_identifier: my_task - keyword: ADD - keyword: AFTER - object_reference: naked_identifier: another_task - comma: ',' - object_reference: naked_identifier: yet_another_task - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_task_if_exists_resume.sql000066400000000000000000000000451503426445100302650ustar00rootroot00000000000000ALTER TASK IF EXISTS my_task RESUME; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_task_if_exists_resume.yml000066400000000000000000000011541503426445100302710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4148b620a6f5f504a0e18d9da6811e3c3245df22761f8d1eaa27facc74eff009 file: statement: alter_task_statement: - keyword: ALTER - keyword: TASK - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: my_task - keyword: RESUME statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_task_modify_as.sql000066400000000000000000000000501503426445100266560ustar00rootroot00000000000000ALTER TASK my_task MODIFY AS SELECT 42; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_task_modify_as.yml000066400000000000000000000013421503426445100266650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 48346285af95dc42042da4c534119a8c14f9d0c0142c7faef32a83723e84b751 file: statement: alter_task_statement: - keyword: ALTER - keyword: TASK - object_reference: naked_identifier: my_task - keyword: MODIFY - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '42' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_task_modify_when.sql000066400000000000000000000000451503426445100272200ustar00rootroot00000000000000ALTER TASK my_task MODIFY WHEN TRUE; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_task_modify_when.yml000066400000000000000000000011661503426445100272270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5e508027a2af0aa45d938ebd27a1f7422fadabb4d2c0782e3d116985cea689c5 file: statement: alter_task_statement: - keyword: ALTER - keyword: TASK - object_reference: naked_identifier: my_task - keyword: MODIFY - keyword: WHEN - boolean_literal: 'TRUE' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_task_remove_after.sql000066400000000000000000000001571503426445100273720ustar00rootroot00000000000000ALTER TASK my_task REMOVE AFTER another_task; ALTER TASK my_task REMOVE AFTER another_task, yet_another_task; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_task_remove_after.yml000066400000000000000000000017721503426445100274000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6a263afff3da0701c4916e57dfcbbc98ddf03bdb377e62b14d1a4d6566c7c7db file: - statement: alter_task_statement: - keyword: ALTER - keyword: TASK - object_reference: naked_identifier: my_task - keyword: REMOVE - keyword: AFTER - object_reference: naked_identifier: another_task - statement_terminator: ; - statement: alter_task_statement: - keyword: ALTER - keyword: TASK - object_reference: naked_identifier: my_task - keyword: REMOVE - keyword: AFTER - object_reference: naked_identifier: another_task - comma: ',' - object_reference: naked_identifier: yet_another_task - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_task_resume.sql000066400000000000000000000000331503426445100262050ustar00rootroot00000000000000ALTER TASK my_task RESUME; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_task_resume.yml000066400000000000000000000011041503426445100262070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 43448d972ffbbfd89bcd826bfca0caebfe76ba3fc7aecbd664b67bc46cc46dbf file: statement: alter_task_statement: - keyword: ALTER - keyword: TASK - object_reference: naked_identifier: my_task - keyword: RESUME statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_task_set_full.sql000066400000000000000000000000611503426445100265230ustar00rootroot00000000000000ALTER TASK my_task SET a = 'b', c = 1, d = TRUE; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_task_set_full.yml000066400000000000000000000017541503426445100265370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d55a961588891c217b6c306300d3e1b1aabccc75ef044a3f6a6dbfac656a77b2 file: statement: alter_task_statement: - keyword: ALTER - keyword: TASK - object_reference: naked_identifier: my_task - alter_task_set_clause: - keyword: SET - parameter: a - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'b'" - comma: ',' - parameter: c - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - comma: ',' - parameter: d - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_task_set_simple.sql000066400000000000000000000000401503426445100270470ustar00rootroot00000000000000ALTER TASK my_task SET x = 'y'; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_task_set_simple.yml000066400000000000000000000013271503426445100270620ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d62b8a2ff4659f24d4693f4b81e7db5f20c59326bac09fcbbd2dfd859e677940 file: statement: alter_task_statement: - keyword: ALTER - keyword: TASK - object_reference: naked_identifier: my_task - alter_task_set_clause: keyword: SET parameter: x comparison_operator: raw_comparison_operator: '=' quoted_literal: "'y'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_task_set_special_full.sql000066400000000000000000000001521503426445100302240ustar00rootroot00000000000000ALTER TASK my_task SET WAREHOUSE = my_warehouse SCHEDULE = '2 MINUTE' ALLOW_OVERLAPPING_EXECUTION = TRUE; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_task_set_special_full.yml000066400000000000000000000020361503426445100302310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 48b23479c41ac28c1de58e92176cadcaba242b0f2acfef5323bccb59b8c13e32 file: statement: alter_task_statement: - keyword: ALTER - keyword: TASK - object_reference: naked_identifier: my_task - alter_task_special_set_clause: - keyword: SET - keyword: WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: my_warehouse - keyword: SCHEDULE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'2 MINUTE'" - keyword: ALLOW_OVERLAPPING_EXECUTION - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_task_set_special_simple.sql000066400000000000000000000000611503426445100305520ustar00rootroot00000000000000ALTER TASK my_task SET WAREHOUSE = my_warehouse; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_task_set_special_simple.yml000066400000000000000000000014121503426445100305550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 45fecfe8f9c8243e07accfc63cd05cc8e58ab462c0025be81848ce5b5d8c9e30 file: statement: alter_task_statement: - keyword: ALTER - keyword: TASK - object_reference: naked_identifier: my_task - alter_task_special_set_clause: - keyword: SET - keyword: WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: my_warehouse statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_task_suspend.sql000066400000000000000000000000341503426445100263670ustar00rootroot00000000000000ALTER TASK my_task SUSPEND; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_task_suspend.yml000066400000000000000000000011051503426445100263710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f6a07613214b28476af972d503b2f1e699bf9ad2329c4dd9ee843b966d16b085 file: statement: alter_task_statement: - keyword: ALTER - keyword: TASK - object_reference: naked_identifier: my_task - keyword: SUSPEND statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_task_unset_full.sql000066400000000000000000000000421503426445100270650ustar00rootroot00000000000000ALTER TASK my_task UNSET a, b, c; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_task_unset_full.yml000066400000000000000000000013111503426445100270670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cdb12631349376612d04f1411ce356f215e8b42523d555865b7af18a680e7988 file: statement: alter_task_statement: - keyword: ALTER - keyword: TASK - object_reference: naked_identifier: my_task - alter_task_unset_clause: - keyword: UNSET - parameter: a - comma: ',' - parameter: b - comma: ',' - parameter: c statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_task_unset_simple.sql000066400000000000000000000000341503426445100274150ustar00rootroot00000000000000ALTER TASK my_task UNSET a; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_task_unset_simple.yml000066400000000000000000000011711503426445100274220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0ea1a555d52a62111d506c275aea597b599a1cb5766ab7eda4a7df3d7e8e41c0 file: statement: alter_task_statement: - keyword: ALTER - keyword: TASK - object_reference: naked_identifier: my_task - alter_task_unset_clause: keyword: UNSET parameter: a statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_user_abort_query.sql000066400000000000000000000000601503426445100272550ustar00rootroot00000000000000ALTER USER IF EXISTS my_user ABORT ALL QUERIES; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_user_abort_query.yml000066400000000000000000000012231503426445100272610ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 61ed0eeed7391519e17038ab1675e29add5eba4ebb47f01865451d7e41c910c5 file: statement: alter_user_statement: - keyword: ALTER - keyword: USER - keyword: IF - keyword: EXISTS - role_reference: naked_identifier: my_user - keyword: ABORT - keyword: ALL - keyword: QUERIES statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_user_delegate_auth.sql000066400000000000000000000001251503426445100275160ustar00rootroot00000000000000ALTER USER my_user REMOVE DELEGATED AUTHORIZATIONS FROM SECURITY INTEGRATION my_idp; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_user_delegate_auth.yml000066400000000000000000000013711503426445100275240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b44dc3e20f2ba9931251e2890b3682c088f5c4a5e638f6bede218a098066e9fd file: statement: alter_user_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: my_user - keyword: REMOVE - keyword: DELEGATED - keyword: AUTHORIZATIONS - keyword: FROM - keyword: SECURITY - keyword: INTEGRATION - object_reference: naked_identifier: my_idp statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_user_delegate_auth_role.sql000066400000000000000000000001371503426445100305420ustar00rootroot00000000000000ALTER USER my_user ADD DELEGATED AUTHORIZATION OF ROLE my_role TO SECURITY INTEGRATION my_idp; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_user_delegate_auth_role.yml000066400000000000000000000015231503426445100305440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b03b27b8a541fdff08fbe7541b4affbc54268d993cf46918ca530489978e60eb file: statement: alter_user_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: my_user - keyword: ADD - keyword: DELEGATED - keyword: AUTHORIZATION - keyword: OF - keyword: ROLE - object_reference: naked_identifier: my_role - keyword: TO - keyword: SECURITY - keyword: INTEGRATION - object_reference: naked_identifier: my_idp statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_user_remove_delegate_auth.sql000066400000000000000000000001441503426445100310740ustar00rootroot00000000000000ALTER USER my_user REMOVE DELEGATED AUTHORIZATION OF ROLE my_role FROM SECURITY INTEGRATION my_idp; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_user_remove_delegate_auth.yml000066400000000000000000000015301503426445100310760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: df2710890f25e3dcc3c48047b4cbe87720cfa2e28d5593b350149d87f7c44801 file: statement: alter_user_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: my_user - keyword: REMOVE - keyword: DELEGATED - keyword: AUTHORIZATION - keyword: OF - keyword: ROLE - object_reference: naked_identifier: my_role - keyword: FROM - keyword: SECURITY - keyword: INTEGRATION - object_reference: naked_identifier: my_idp statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_user_rename.sql000066400000000000000000000000631503426445100261730ustar00rootroot00000000000000ALTER USER IF EXISTS my_user RENAME TO "new_name"; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_user_rename.yml000066400000000000000000000012741503426445100262020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f4de6567d814a83219145a5aecb5df2d061cd8cc3df92ae8dcf082b3bfcdde42 file: statement: alter_user_statement: - keyword: ALTER - keyword: USER - keyword: IF - keyword: EXISTS - role_reference: naked_identifier: my_user - keyword: RENAME - keyword: TO - object_reference: quoted_identifier: '"new_name"' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_user_reset_password.sql000066400000000000000000000000431503426445100277660ustar00rootroot00000000000000ALTER USER my_user RESET PASSWORD; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_user_reset_password.yml000066400000000000000000000011311503426445100277670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 59a47d9eb3ce59750ffb3e13a913c9cfe476b0c2c9b3f817557c15542ec40f5f file: statement: alter_user_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: my_user - keyword: RESET - keyword: PASSWORD statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_user_set_values.sql000066400000000000000000000003601503426445100270760ustar00rootroot00000000000000ALTER USER my_user SET password = 'abc123', DEFAULT_ROLE = user_role, type = person; ALTER USER "John" SET DEFAULT_ROLE = JOHNNY DEFAULT_WAREHOUSE = FEU; ALTER USER "John" SET DEFAULT_ROLE = JOHNNY DEFAULT_WAREHOUSE = FEU, COMMENT = "Foo"; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_user_set_values.yml000066400000000000000000000041701503426445100271030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 035722cb5505bcccdf54000e9dd1a4daa7a3e939427af3abb123eaec262fdc5a file: - statement: alter_user_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: my_user - keyword: SET - parameter: password - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'abc123'" - comma: ',' - parameter: DEFAULT_ROLE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: user_role - comma: ',' - parameter: type - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: person - statement_terminator: ; - statement: alter_user_statement: - keyword: ALTER - keyword: USER - role_reference: quoted_identifier: '"John"' - keyword: SET - parameter: DEFAULT_ROLE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: JOHNNY - parameter: DEFAULT_WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: FEU - statement_terminator: ; - statement: alter_user_statement: - keyword: ALTER - keyword: USER - role_reference: quoted_identifier: '"John"' - keyword: SET - parameter: DEFAULT_ROLE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: JOHNNY - parameter: DEFAULT_WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: FEU - comma: ',' - parameter: COMMENT - comparison_operator: raw_comparison_operator: '=' - object_reference: quoted_identifier: '"Foo"' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_user_unset_values.sql000066400000000000000000000001101503426445100274320ustar00rootroot00000000000000ALTER USER my_user unset USE_CACHED_RESULT, must_change_password, type; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_user_unset_values.yml000066400000000000000000000013021503426445100274400ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 90bdf9e61084b14732900e3db2b7689a29e712085b61a8320499c62033945a0d file: statement: alter_user_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: my_user - keyword: unset - parameter: USE_CACHED_RESULT - comma: ',' - parameter: must_change_password - comma: ',' - parameter: type statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_view.sql000066400000000000000000000017611503426445100246460ustar00rootroot00000000000000alter view view1 rename to view2; alter view view1 set secure; alter view view1 unset secure; -- single column alter view user_info_v modify column ssn_number set masking policy ssn_mask_v; -- multiple columns alter view user_info_v modify column ssn_number set masking policy ssn_mask_v , column dob set masking policy dob_mask_v ; -- single column alter view user_info_v modify column ssn_number unset masking policy; -- multiple columns alter view user_info_v modify column ssn_number unset masking policy , column dob unset masking policy ; alter view v1 add row access policy rap_v1 on (empl_id); alter view v1 drop row access policy rap_v1; alter view v1 drop row access policy rap_v1_version_1, add row access policy rap_v1_version_2 on (empl_id), add row access policy rap_v1_version_3 on ("empl_id"); alter view v1 modify column foo set masking policy my.scoped.policy; ALTER VIEW "my_table" ALTER COLUMN "my_column" SET MASKING POLICY my_masking_policy FORCE; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_view.yml000066400000000000000000000126351503426445100246520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3f2b506daf1713ff9acc73d48c9d4fde98eed33469dc6229bb26856750e07cf7 file: - statement: alter_view_statement: - keyword: alter - keyword: view - table_reference: naked_identifier: view1 - keyword: rename - keyword: to - table_reference: naked_identifier: view2 - statement_terminator: ; - statement: alter_view_statement: - keyword: alter - keyword: view - table_reference: naked_identifier: view1 - keyword: set - keyword: secure - statement_terminator: ; - statement: alter_view_statement: - keyword: alter - keyword: view - table_reference: naked_identifier: view1 - keyword: unset - keyword: secure - statement_terminator: ; - statement: alter_view_statement: - keyword: alter - keyword: view - table_reference: naked_identifier: user_info_v - keyword: modify - keyword: column - column_reference: naked_identifier: ssn_number - keyword: set - keyword: masking - keyword: policy - function_name: function_name_identifier: ssn_mask_v - statement_terminator: ; - statement: alter_view_statement: - keyword: alter - keyword: view - table_reference: naked_identifier: user_info_v - keyword: modify - keyword: column - column_reference: naked_identifier: ssn_number - keyword: set - keyword: masking - keyword: policy - function_name: function_name_identifier: ssn_mask_v - comma: ',' - keyword: column - column_reference: naked_identifier: dob - keyword: set - keyword: masking - keyword: policy - function_name: function_name_identifier: dob_mask_v - statement_terminator: ; - statement: alter_view_statement: - keyword: alter - keyword: view - table_reference: naked_identifier: user_info_v - keyword: modify - keyword: column - column_reference: naked_identifier: ssn_number - keyword: unset - keyword: masking - keyword: policy - statement_terminator: ; - statement: alter_view_statement: - keyword: alter - keyword: view - table_reference: naked_identifier: user_info_v - keyword: modify - keyword: column - column_reference: naked_identifier: ssn_number - keyword: unset - keyword: masking - keyword: policy - comma: ',' - keyword: column - column_reference: naked_identifier: dob - keyword: unset - keyword: masking - keyword: policy - statement_terminator: ; - statement: alter_view_statement: - keyword: alter - keyword: view - table_reference: naked_identifier: v1 - keyword: add - keyword: row - keyword: access - keyword: policy - function_name: function_name_identifier: rap_v1 - keyword: 'on' - bracketed: start_bracket: ( column_reference: naked_identifier: empl_id end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: alter - keyword: view - table_reference: naked_identifier: v1 - keyword: drop - keyword: row - keyword: access - keyword: policy - function_name: function_name_identifier: rap_v1 - statement_terminator: ; - statement: alter_view_statement: - keyword: alter - keyword: view - table_reference: naked_identifier: v1 - keyword: drop - keyword: row - keyword: access - keyword: policy - function_name: function_name_identifier: rap_v1_version_1 - comma: ',' - keyword: add - keyword: row - keyword: access - keyword: policy - function_name: function_name_identifier: rap_v1_version_2 - keyword: 'on' - bracketed: start_bracket: ( column_reference: naked_identifier: empl_id end_bracket: ) - comma: ',' - keyword: add - keyword: row - keyword: access - keyword: policy - function_name: function_name_identifier: rap_v1_version_3 - keyword: 'on' - bracketed: start_bracket: ( column_reference: quoted_identifier: '"empl_id"' end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: alter - keyword: view - table_reference: naked_identifier: v1 - keyword: modify - keyword: column - column_reference: naked_identifier: foo - keyword: set - keyword: masking - keyword: policy - function_name: - naked_identifier: my - dot: . - naked_identifier: scoped - dot: . - function_name_identifier: policy - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: quoted_identifier: '"my_table"' - keyword: ALTER - keyword: COLUMN - column_reference: quoted_identifier: '"my_column"' - keyword: SET - keyword: MASKING - keyword: POLICY - function_name: function_name_identifier: my_masking_policy - keyword: FORCE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_warehouse.sql000066400000000000000000000036311503426445100256740ustar00rootroot00000000000000alter warehouse if exists wh1 rename to wh2; alter warehouse my_wh set warehouse_size=medium; alter warehouse LOAD_WH set warehouse_size = XXLARGE; alter warehouse LOAD_WH set WAIT_FOR_COMPLETION = TRUE; alter warehouse LOAD_WH set MAX_CLUSTER_COUNT = 5; alter warehouse LOAD_WH set MIN_CLUSTER_COUNT = 1; alter warehouse LOAD_WH set SCALING_POLICY = STANDARD; alter warehouse LOAD_WH set SCALING_POLICY = 'STANDARD'; alter warehouse LOAD_WH set SCALING_POLICY = ECONOMY; alter warehouse LOAD_WH set SCALING_POLICY = 'ECONOMY'; alter warehouse LOAD_WH set AUTO_SUSPEND = 1; alter warehouse LOAD_WH set AUTO_RESUME = FALSE; alter warehouse LOAD_WH set RESOURCE_MONITOR = monitor_name; alter warehouse LOAD_WH set COMMENT = 'This is a comment'; alter warehouse LOAD_WH set MAX_CONCURRENCY_LEVEL = 1; alter warehouse LOAD_WH set STATEMENT_QUEUED_TIMEOUT_IN_SECONDS = 300; alter warehouse LOAD_WH set STATEMENT_TIMEOUT_IN_SECONDS = 300; alter warehouse LOAD_WH set TAG thetag = 'tag1'; alter warehouse LOAD_WH set TAG thetag1 = 'tag1', thetag2 = 'tag2'; alter warehouse LOAD_WH RESUME IF SUSPENDED; alter warehouse LOAD_WH ABORT ALL QUERIES; alter warehouse LOAD_WH RENAME TO LOAD_WH2; alter warehouse LOAD_WH SET MAX_CONCURRENCY_LEVEL = 1; alter warehouse LOAD_WH UNSET STATEMENT_QUEUED_TIMEOUT_IN_SECONDS; alter warehouse LOAD_WH UNSET WAREHOUSE_SIZE; alter warehouse LOAD_WH UNSET WAREHOUSE_SIZE, WAIT_FOR_COMPLETION; ALTER WAREHOUSE SET WAREHOUSE_SIZE='X-LARGE'; alter warehouse set warehouse_size=medium; alter warehouse LOAD_WH set WAREHOUSE_TYPE = STANDARD; alter warehouse LOAD_WH set WAREHOUSE_TYPE = 'SNOWPARK-OPTIMIZED'; ALTER WAREHOUSE IDENTIFIER($var_wh) SET WAREHOUSE_TYPE = STANDARD; ALTER WAREHOUSE CI_TRANSFORMING SET COMMENT = 'Warehouse for dbt development transformations in CI' , AUTO_RESUME = TRUE , AUTO_SUSPEND=30; ALTER WAREHOUSE LOAD_WH SET ENABLE_QUERY_ACCELERATION = true QUERY_ACCELERATION_MAX_SCALE_FACTOR = 4; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/alter_warehouse.yml000066400000000000000000000302431503426445100256750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b227019a09cb36cf82bd18f0eed983d21a02cb14dead33e699dc264103614cc6 file: - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - keyword: if - keyword: exists - object_reference: naked_identifier: wh1 - keyword: rename - keyword: to - object_reference: naked_identifier: wh2 - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: my_wh - keyword: set - warehouse_object_properties: keyword: warehouse_size comparison_operator: raw_comparison_operator: '=' warehouse_size: medium - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: set - warehouse_object_properties: keyword: warehouse_size comparison_operator: raw_comparison_operator: '=' warehouse_size: XXLARGE - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: set - warehouse_object_properties: keyword: WAIT_FOR_COMPLETION comparison_operator: raw_comparison_operator: '=' boolean_literal: 'TRUE' - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: set - warehouse_object_properties: keyword: MAX_CLUSTER_COUNT comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: set - warehouse_object_properties: keyword: MIN_CLUSTER_COUNT comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: set - warehouse_object_properties: keyword: SCALING_POLICY comparison_operator: raw_comparison_operator: '=' scaling_policy: STANDARD - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: set - warehouse_object_properties: keyword: SCALING_POLICY comparison_operator: raw_comparison_operator: '=' scaling_policy: "'STANDARD'" - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: set - warehouse_object_properties: keyword: SCALING_POLICY comparison_operator: raw_comparison_operator: '=' scaling_policy: ECONOMY - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: set - warehouse_object_properties: keyword: SCALING_POLICY comparison_operator: raw_comparison_operator: '=' scaling_policy: "'ECONOMY'" - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: set - warehouse_object_properties: keyword: AUTO_SUSPEND comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: set - warehouse_object_properties: keyword: AUTO_RESUME comparison_operator: raw_comparison_operator: '=' boolean_literal: 'FALSE' - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: set - warehouse_object_properties: keyword: RESOURCE_MONITOR comparison_operator: raw_comparison_operator: '=' naked_identifier: monitor_name - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: set - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'This is a comment'" - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: set - warehouse_object_properties: keyword: MAX_CONCURRENCY_LEVEL comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: set - warehouse_object_properties: keyword: STATEMENT_QUEUED_TIMEOUT_IN_SECONDS comparison_operator: raw_comparison_operator: '=' numeric_literal: '300' - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: set - warehouse_object_properties: keyword: STATEMENT_TIMEOUT_IN_SECONDS comparison_operator: raw_comparison_operator: '=' numeric_literal: '300' - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: set - tag_equals: keyword: TAG tag_reference: naked_identifier: thetag comparison_operator: raw_comparison_operator: '=' quoted_literal: "'tag1'" - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: set - tag_equals: - keyword: TAG - tag_reference: naked_identifier: thetag1 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'tag1'" - comma: ',' - tag_reference: naked_identifier: thetag2 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'tag2'" - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: RESUME - keyword: IF - keyword: SUSPENDED - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: ABORT - keyword: ALL - keyword: QUERIES - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: RENAME - keyword: TO - object_reference: naked_identifier: LOAD_WH2 - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: SET - warehouse_object_properties: keyword: MAX_CONCURRENCY_LEVEL comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: UNSET - naked_identifier: STATEMENT_QUEUED_TIMEOUT_IN_SECONDS - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: UNSET - naked_identifier: WAREHOUSE_SIZE - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: UNSET - naked_identifier: WAREHOUSE_SIZE - comma: ',' - naked_identifier: WAIT_FOR_COMPLETION - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: ALTER - keyword: WAREHOUSE - keyword: SET - warehouse_object_properties: keyword: WAREHOUSE_SIZE comparison_operator: raw_comparison_operator: '=' warehouse_size: "'X-LARGE'" - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - keyword: set - warehouse_object_properties: keyword: warehouse_size comparison_operator: raw_comparison_operator: '=' warehouse_size: medium - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: set - warehouse_object_properties: keyword: WAREHOUSE_TYPE comparison_operator: raw_comparison_operator: '=' warehouse_size: STANDARD - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: set - warehouse_object_properties: keyword: WAREHOUSE_TYPE comparison_operator: raw_comparison_operator: '=' warehouse_size: "'SNOWPARK-OPTIMIZED'" - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: ALTER - keyword: WAREHOUSE - object_reference: keyword: IDENTIFIER bracketed: start_bracket: ( variable: $var_wh end_bracket: ) - keyword: SET - warehouse_object_properties: keyword: WAREHOUSE_TYPE comparison_operator: raw_comparison_operator: '=' warehouse_size: STANDARD - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: ALTER - keyword: WAREHOUSE - object_reference: naked_identifier: CI_TRANSFORMING - keyword: SET - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Warehouse for dbt development transformations in CI'" - comma: ',' - warehouse_object_properties: keyword: AUTO_RESUME comparison_operator: raw_comparison_operator: '=' boolean_literal: 'TRUE' - comma: ',' - warehouse_object_properties: keyword: AUTO_SUSPEND comparison_operator: raw_comparison_operator: '=' numeric_literal: '30' - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: ALTER - keyword: WAREHOUSE - object_reference: naked_identifier: LOAD_WH - keyword: SET - warehouse_object_properties: - keyword: ENABLE_QUERY_ACCELERATION - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: QUERY_ACCELERATION_MAX_SCALE_FACTOR - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '4' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/at_before_time_travel.sql000066400000000000000000000022401503426445100270170ustar00rootroot00000000000000SELECT * FROM my_table AT ( TIMESTAMP => '2024-06-05 12:30:00'::TIMESTAMP_LTZ ); SELECT * FROM my_table AT ( TIMESTAMP => '2024-06-05 12:30:00'::TIMESTAMP ); SELECT * FROM my_table AT ( TIMESTAMP => '2024-06-05 12:30:00' ); SELECT * FROM my_table AT ( TIMESTAMP => '2024-06-05 12:30:00' ) AS T; SELECT * FROM my_table BEFORE(STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726'); SELECT oldt.* ,newt.* FROM my_table BEFORE(STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726') AS oldt FULL OUTER JOIN my_table AT(STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726') AS newt ON oldt.id = newt.id WHERE oldt.id IS NULL OR newt.id IS NULL; SELECT * FROM db1.public.htt1 AT(TIMESTAMP => '2024-06-05 17:50:00'::TIMESTAMP_LTZ) h JOIN db1.public.tt1 AT(TIMESTAMP => '2024-06-05 17:50:00'::TIMESTAMP_LTZ) t ON h.c1=t.c1; -- https://github.com/sqlfluff/sqlfluff/issues/6070 SELECT * FROM my_table AT (TIMESTAMP => TO_TIMESTAMP(DATEADD('DAY', -1, DATEADD('MONTH', -1, DATEADD('DAY', -1, CURRENT_DATE))))); -- https://github.com/sqlfluff/sqlfluff/issues/5570 SELECT * FROM my_table AT(TIMESTAMP => 'Fri, 01 May 2015 16:20:00 -0700'::timestamp_tz); sqlfluff-3.4.2/test/fixtures/dialects/snowflake/at_before_time_travel.yml000066400000000000000000000340631503426445100270310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c2f4489a191a4d02801453e2ddbe2e9ca05b1dcc6fc4efc6c3abfb4fef3ac770 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table from_at_expression: keyword: AT bracketed: start_bracket: ( keyword: TIMESTAMP parameter_assigner: => expression: cast_expression: quoted_literal: "'2024-06-05 12:30:00'" casting_operator: '::' data_type: data_type_identifier: TIMESTAMP_LTZ end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table from_at_expression: keyword: AT bracketed: start_bracket: ( keyword: TIMESTAMP parameter_assigner: => expression: cast_expression: quoted_literal: "'2024-06-05 12:30:00'" casting_operator: '::' data_type: keyword: TIMESTAMP end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table from_at_expression: keyword: AT bracketed: start_bracket: ( keyword: TIMESTAMP parameter_assigner: => expression: quoted_literal: "'2024-06-05 12:30:00'" end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table from_at_expression: keyword: AT bracketed: start_bracket: ( keyword: TIMESTAMP parameter_assigner: => expression: quoted_literal: "'2024-06-05 12:30:00'" end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: T - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table from_before_expression: keyword: BEFORE bracketed: start_bracket: ( keyword: STATEMENT parameter_assigner: => expression: quoted_literal: "'8e5d0ca9-005e-44e6-b858-a8f5b37c5726'" end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: oldt dot: . star: '*' - comma: ',' - select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: newt dot: . star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table from_before_expression: keyword: BEFORE bracketed: start_bracket: ( keyword: STATEMENT parameter_assigner: => expression: quoted_literal: "'8e5d0ca9-005e-44e6-b858-a8f5b37c5726'" end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: oldt join_clause: - keyword: FULL - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: my_table from_at_expression: keyword: AT bracketed: start_bracket: ( keyword: STATEMENT parameter_assigner: => expression: quoted_literal: "'8e5d0ca9-005e-44e6-b858-a8f5b37c5726'" end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: newt - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: oldt - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: newt - dot: . - naked_identifier: id where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: oldt - dot: . - naked_identifier: id - keyword: IS - null_literal: 'NULL' - binary_operator: OR - column_reference: - naked_identifier: newt - dot: . - naked_identifier: id - keyword: IS - null_literal: 'NULL' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: db1 - dot: . - naked_identifier: public - dot: . - naked_identifier: htt1 from_at_expression: keyword: AT bracketed: start_bracket: ( keyword: TIMESTAMP parameter_assigner: => expression: cast_expression: quoted_literal: "'2024-06-05 17:50:00'" casting_operator: '::' data_type: data_type_identifier: TIMESTAMP_LTZ end_bracket: ) alias_expression: naked_identifier: h join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: - naked_identifier: db1 - dot: . - naked_identifier: public - dot: . - naked_identifier: tt1 from_at_expression: keyword: AT bracketed: start_bracket: ( keyword: TIMESTAMP parameter_assigner: => expression: cast_expression: quoted_literal: "'2024-06-05 17:50:00'" casting_operator: '::' data_type: data_type_identifier: TIMESTAMP_LTZ end_bracket: ) alias_expression: naked_identifier: t join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: h - dot: . - naked_identifier: c1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t - dot: . - naked_identifier: c1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table from_at_expression: keyword: AT bracketed: start_bracket: ( keyword: TIMESTAMP parameter_assigner: => expression: function: function_name: function_name_identifier: TO_TIMESTAMP function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: DATEADD function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'DAY'" - comma: ',' - expression: numeric_literal: sign_indicator: '-' numeric_literal: '1' - comma: ',' - expression: function: function_name: function_name_identifier: DATEADD function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'MONTH'" - comma: ',' - expression: numeric_literal: sign_indicator: '-' numeric_literal: '1' - comma: ',' - expression: function: function_name: function_name_identifier: DATEADD function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'DAY'" - comma: ',' - expression: numeric_literal: sign_indicator: '-' numeric_literal: '1' - comma: ',' - expression: bare_function: CURRENT_DATE - end_bracket: ) - end_bracket: ) - end_bracket: ) end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table from_at_expression: keyword: AT bracketed: start_bracket: ( keyword: TIMESTAMP parameter_assigner: => expression: cast_expression: quoted_literal: "'Fri, 01 May 2015 16:20:00 -0700'" casting_operator: '::' data_type: data_type_identifier: timestamp_tz end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/bare_functions.sql000066400000000000000000000001611503426445100254770ustar00rootroot00000000000000SELECT CURRENT_TIMESTAMP , CURRENT_TIME , CURRENT_DATE , CURRENT_USER , LOCALTIME , LOCALTIMESTAMP ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/bare_functions.yml000066400000000000000000000017651503426445100255140ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ab003ddfbb797cb27d30cc9c29af599f03eeae608c276a8104bb77ed8b79fc78 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: bare_function: CURRENT_TIMESTAMP - comma: ',' - select_clause_element: bare_function: CURRENT_TIME - comma: ',' - select_clause_element: bare_function: CURRENT_DATE - comma: ',' - select_clause_element: bare_function: CURRENT_USER - comma: ',' - select_clause_element: bare_function: LOCALTIME - comma: ',' - select_clause_element: bare_function: LOCALTIMESTAMP statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/begin_end.sql000066400000000000000000000004751503426445100244200ustar00rootroot00000000000000-- NOTE: This is a loop BEGIN, and not a transaction BEGIN, -- because BEGIN is NOT followed immediately by a ";" -- See: https://docs.snowflake.com/en/sql-reference/sql/begin -- See: https://docs.snowflake.com/en/sql-reference/snowflake-scripting/begin begin select 1; select 2; begin select 3; select 4; end; end; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/begin_end.yml000066400000000000000000000027021503426445100244150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cdeba1691b01c319b7ecb004bfef43090c4eb5b684adccf1cff2663b2734b9fe file: statement: scripting_block_statement: - keyword: begin - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '2' - statement_terminator: ; - statement: scripting_block_statement: - keyword: begin - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '3' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '4' - statement_terminator: ; - keyword: end - statement_terminator: ; - keyword: end statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/call.sql000066400000000000000000000001441503426445100234120ustar00rootroot00000000000000CALL sv_proc1('Manitoba', 127.4); SET Variable1 = 49; CALL sv_proc2($Variable1); CALL sv_proc3(); sqlfluff-3.4.2/test/fixtures/dialects/snowflake/call.yml000066400000000000000000000031201503426445100234110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4ece5aba0411ef72b29b98d9436a6578359be09cd01b753fb9edadd233ca2da9 file: - statement: call_segment: keyword: CALL function: function_name: function_name_identifier: sv_proc1 function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'Manitoba'" - comma: ',' - expression: numeric_literal: '127.4' - end_bracket: ) - statement_terminator: ; - statement: set_statement: keyword: SET variable: Variable1 comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '49' - statement_terminator: ; - statement: call_segment: keyword: CALL function: function_name: function_name_identifier: sv_proc2 function_contents: bracketed: start_bracket: ( expression: variable: $Variable1 end_bracket: ) - statement_terminator: ; - statement: call_segment: keyword: CALL function: function_name: function_name_identifier: sv_proc3 function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/call_statement.sql000066400000000000000000000001721503426445100254770ustar00rootroot00000000000000CALL MyStoredProcedure(CURRENT_ROLE()); CALL sv_proc1('Manitoba', 127.4); SET Variable1 = 49; CALL sv_proc2($Variable1); sqlfluff-3.4.2/test/fixtures/dialects/snowflake/call_statement.yml000066400000000000000000000035511503426445100255050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7f7351943cd4f95d2828d2d969298e1d114ab1ce6c75c10db25ebc18cc7bdb71 file: - statement: call_segment: keyword: CALL function: function_name: function_name_identifier: MyStoredProcedure function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: CURRENT_ROLE function_contents: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: call_segment: keyword: CALL function: function_name: function_name_identifier: sv_proc1 function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'Manitoba'" - comma: ',' - expression: numeric_literal: '127.4' - end_bracket: ) - statement_terminator: ; - statement: set_statement: keyword: SET variable: Variable1 comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '49' - statement_terminator: ; - statement: call_segment: keyword: CALL function: function_name: function_name_identifier: sv_proc2 function_contents: bracketed: start_bracket: ( expression: variable: $Variable1 end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/cast_datatype_accessor.sql000066400000000000000000000010351503426445100272060ustar00rootroot00000000000000SELECT bar::array[0] AS channel , foo:bar::array[2] AS channel2 , bar::array[0][1] AS channel3 , raw:foo::array[0]::string AS channel4 FROM my_table; SELECT foo::variant:field::array[0]::string AS name FROM my_table; SELECT DISTINCT payload::variant::object:name::text AS name, payload::variant::object AS details, payload::variant::object:createdAt::timestamp_ntz AS created, payload::variant::object:updatedAt::timestamp_ntz AS updated, payload::variant::object:id::number AS id FROM raw_source_table sqlfluff-3.4.2/test/fixtures/dialects/snowflake/cast_datatype_accessor.yml000066400000000000000000000201631503426445100272130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b18f7a4178447a6fc1c0cc5965aa1cd332b0835ca06a7675ff84c6e2f26d5621 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: cast_expression: column_reference: naked_identifier: bar casting_operator: '::' data_type: data_type_identifier: array array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' alias_expression: alias_operator: keyword: AS naked_identifier: channel - comma: ',' - select_clause_element: expression: cast_expression: column_reference: naked_identifier: foo semi_structured_expression: colon: ':' semi_structured_element: bar casting_operator: '::' data_type: data_type_identifier: array array_accessor: start_square_bracket: '[' numeric_literal: '2' end_square_bracket: ']' alias_expression: alias_operator: keyword: AS naked_identifier: channel2 - comma: ',' - select_clause_element: expression: cast_expression: - column_reference: naked_identifier: bar - casting_operator: '::' - data_type: data_type_identifier: array - array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' - array_accessor: start_square_bracket: '[' numeric_literal: '1' end_square_bracket: ']' alias_expression: alias_operator: keyword: AS naked_identifier: channel3 - comma: ',' - select_clause_element: expression: cast_expression: - column_reference: naked_identifier: raw - semi_structured_expression: colon: ':' semi_structured_element: foo - casting_operator: '::' - data_type: data_type_identifier: array - array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' - casting_operator: '::' - data_type: data_type_identifier: string alias_expression: alias_operator: keyword: AS naked_identifier: channel4 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: - column_reference: naked_identifier: foo - casting_operator: '::' - data_type: data_type_identifier: variant - semi_structured_expression: colon: ':' semi_structured_element: field - casting_operator: '::' - data_type: data_type_identifier: array - array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' - casting_operator: '::' - data_type: data_type_identifier: string alias_expression: alias_operator: keyword: AS naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: keyword: DISTINCT - select_clause_element: expression: cast_expression: - column_reference: naked_identifier: payload - casting_operator: '::' - data_type: data_type_identifier: variant - casting_operator: '::' - data_type: data_type_identifier: object - semi_structured_expression: colon: ':' semi_structured_element: name - casting_operator: '::' - data_type: data_type_identifier: text alias_expression: alias_operator: keyword: AS naked_identifier: name - comma: ',' - select_clause_element: expression: cast_expression: - column_reference: naked_identifier: payload - casting_operator: '::' - data_type: data_type_identifier: variant - casting_operator: '::' - data_type: data_type_identifier: object alias_expression: alias_operator: keyword: AS naked_identifier: details - comma: ',' - select_clause_element: expression: cast_expression: - column_reference: naked_identifier: payload - casting_operator: '::' - data_type: data_type_identifier: variant - casting_operator: '::' - data_type: data_type_identifier: object - semi_structured_expression: colon: ':' semi_structured_element: createdAt - casting_operator: '::' - data_type: data_type_identifier: timestamp_ntz alias_expression: alias_operator: keyword: AS naked_identifier: created - comma: ',' - select_clause_element: expression: cast_expression: - column_reference: naked_identifier: payload - casting_operator: '::' - data_type: data_type_identifier: variant - casting_operator: '::' - data_type: data_type_identifier: object - semi_structured_expression: colon: ':' semi_structured_element: updatedAt - casting_operator: '::' - data_type: data_type_identifier: timestamp_ntz alias_expression: alias_operator: keyword: AS naked_identifier: updated - comma: ',' - select_clause_element: expression: cast_expression: - column_reference: naked_identifier: payload - casting_operator: '::' - data_type: data_type_identifier: variant - casting_operator: '::' - data_type: data_type_identifier: object - semi_structured_expression: colon: ':' semi_structured_element: id - casting_operator: '::' - data_type: data_type_identifier: number alias_expression: alias_operator: keyword: AS naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: raw_source_table sqlfluff-3.4.2/test/fixtures/dialects/snowflake/changes_clause.sql000066400000000000000000000011201503426445100254360ustar00rootroot00000000000000select * from t1 changes(information => default) at(timestamp => 'Fri, 01 May 2015 16:20:00 -0700'::timestamp); select * from t1 changes(information => append_only) at(offset => -60*5); select c1 from t1 changes(information => append_only) at(timestamp => 'Fri, 01 May 2015 16:20:00 -0700'::timestamp) end(timestamp => 'Fri, 05 May 2015 16:20:00 -0700'::timestamp); select * from t1 changes(information => default) before(statement => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726'); select * from st.test changes (information => append_only) at (stream => 'ppr.str_test'); sqlfluff-3.4.2/test/fixtures/dialects/snowflake/changes_clause.yml000066400000000000000000000134461503426445100254560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e973f7208e2679907c15c9166fe4cdafba908d888bbc2e726172bda8bcc1a28d file: - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 changes_clause: - keyword: changes - bracketed: - start_bracket: ( - keyword: information - parameter_assigner: => - keyword: default - end_bracket: ) - keyword: at - bracketed: start_bracket: ( keyword: timestamp parameter_assigner: => expression: cast_expression: quoted_literal: "'Fri, 01 May 2015 16:20:00 -0700'" casting_operator: '::' data_type: keyword: timestamp end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 changes_clause: - keyword: changes - bracketed: - start_bracket: ( - keyword: information - parameter_assigner: => - keyword: append_only - end_bracket: ) - keyword: at - bracketed: start_bracket: ( keyword: offset parameter_assigner: => expression: - numeric_literal: sign_indicator: '-' numeric_literal: '60' - binary_operator: '*' - numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: c1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 changes_clause: - keyword: changes - bracketed: - start_bracket: ( - keyword: information - parameter_assigner: => - keyword: append_only - end_bracket: ) - keyword: at - bracketed: start_bracket: ( keyword: timestamp parameter_assigner: => expression: cast_expression: quoted_literal: "'Fri, 01 May 2015 16:20:00 -0700'" casting_operator: '::' data_type: keyword: timestamp end_bracket: ) - keyword: end - bracketed: start_bracket: ( keyword: timestamp parameter_assigner: => expression: cast_expression: quoted_literal: "'Fri, 05 May 2015 16:20:00 -0700'" casting_operator: '::' data_type: keyword: timestamp end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 changes_clause: - keyword: changes - bracketed: - start_bracket: ( - keyword: information - parameter_assigner: => - keyword: default - end_bracket: ) - keyword: before - bracketed: start_bracket: ( keyword: statement parameter_assigner: => expression: quoted_literal: "'8e5d0ca9-005e-44e6-b858-a8f5b37c5726'" end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: st - dot: . - naked_identifier: test changes_clause: - keyword: changes - bracketed: - start_bracket: ( - keyword: information - parameter_assigner: => - keyword: append_only - end_bracket: ) - keyword: at - bracketed: start_bracket: ( keyword: stream parameter_assigner: => expression: quoted_literal: "'ppr.str_test'" end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/col_position.sql000066400000000000000000000003711503426445100252020ustar00rootroot00000000000000-- In snowflake the column position is denoted with $n syntax (e.g. $1, $2) -- https://docs.snowflake.com/en/sql-reference/sql/select.html#parameters select $1 as type, $2 as price from (values ('toffee', 5), ('starburst', 8), ('flying_saucer', 1)) sqlfluff-3.4.2/test/fixtures/dialects/snowflake/col_position.yml000066400000000000000000000041471503426445100252110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9d3a06e6edc2e9c69ccb277055639c7cb78654a1f519fed42f290f4b29edfa34 file: statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: column_index_identifier_segment: $1 alias_expression: alias_operator: keyword: as naked_identifier: type - comma: ',' - select_clause_element: column_reference: column_index_identifier_segment: $2 alias_expression: alias_operator: keyword: as naked_identifier: price from_clause: keyword: from from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: quoted_literal: "'toffee'" - comma: ',' - expression: numeric_literal: '5' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: quoted_literal: "'starburst'" - comma: ',' - expression: numeric_literal: '8' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: quoted_literal: "'flying_saucer'" - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/snowflake/comment_statement.sql000066400000000000000000000020511503426445100262240ustar00rootroot00000000000000comment on column my_table.my_column is 'comment'; comment on table foo is 'comment'; comment on view foo is 'comment'; comment on schema foo is 'comment'; comment on database foo is 'comment'; comment on warehouse foo is 'comment'; comment on user foo is 'comment'; comment on stage foo is 'comment'; comment on function foo is 'comment'; comment on procedure foo is 'comment'; comment on sequence foo is 'comment'; comment on share foo is 'comment'; comment on pipe foo is 'comment'; comment on stream foo is 'comment'; comment on task foo is 'comment'; comment on network policy foo is 'comment'; comment on api integration foo is 'comment'; comment on notification integration foo is 'comment'; comment on security integration foo is 'comment'; comment on storage integration foo is 'comment'; comment on session policy foo is 'comment'; comment on external table foo is 'comment'; comment on materialized view foo is 'comment'; comment on masking policy foo is 'comment'; comment on row access policy foo is 'comment'; comment on file format foo is 'comment'; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/comment_statement.yml000066400000000000000000000152221503426445100262320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9b7345b3981df9970f08b35fe509c88c16a13ea472e57ee56ad33d45d52cf11b file: - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: column - object_reference: - naked_identifier: my_table - dot: . - naked_identifier: my_column - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: table - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: view - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: schema - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: database - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: warehouse - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: user - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: stage - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: function - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: procedure - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: sequence - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: share - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: pipe - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: stream - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: task - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: network - keyword: policy - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: api - keyword: integration - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: notification - keyword: integration - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: security - keyword: integration - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: storage - keyword: integration - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: session - keyword: policy - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: external - keyword: table - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: materialized - keyword: view - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: masking - keyword: policy - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: row - keyword: access - keyword: policy - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: file - keyword: format - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/connect_by.sql000066400000000000000000000023701503426445100246250ustar00rootroot00000000000000select employee_id, manager_id, title from employees start with title = 'President' connect by manager_id = prior employee_id order by employee_id; select sys_connect_by_path(title, ' -> '), employee_id, manager_id, title from employees start with title = 'President' connect by manager_id = prior employee_id order by employee_id; select description, quantity, component_id, parent_component_id, sys_connect_by_path(component_id, ' -> ') as path from components start with component_id = 1 connect by parent_component_id = prior component_id order by path; select employee_id, manager_id, title, connect_by_root title as root_title from employees start with title = 'President' connect by manager_id = prior employee_id order by employee_id; select description, quantity, component_id, parent_component_id, component_type from components c connect by prior c.parent_component_id = c.component_id AND PRIOR c.component_type = c.component_type order by quantity; with tbl as ( select 'A' as foo, 'B' as bar union all select 'B' as foo, 'C' as bar ) select *, connect_by_root bar as connect_by_root, sys_connect_by_path(bar, '') as path from tbl connect by prior foo = bar and not contains(prior path, bar); sqlfluff-3.4.2/test/fixtures/dialects/snowflake/connect_by.yml000066400000000000000000000321731503426445100246330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2fbb45c59285bbac72e07da978ebe44633acab3eca46ee36a6c83f9a3cb2cfde file: - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: employee_id - comma: ',' - select_clause_element: column_reference: naked_identifier: manager_id - comma: ',' - select_clause_element: column_reference: naked_identifier: title from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees connectby_clause: - keyword: start - keyword: with - expression: column_reference: naked_identifier: title comparison_operator: raw_comparison_operator: '=' quoted_literal: "'President'" - keyword: connect - keyword: by - expression: - column_reference: naked_identifier: manager_id - comparison_operator: raw_comparison_operator: '=' - keyword: prior - column_reference: naked_identifier: employee_id orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: employee_id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: function: function_name: function_name_identifier: sys_connect_by_path function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: title - comma: ',' - expression: quoted_literal: "' -> '" - end_bracket: ) - comma: ',' - select_clause_element: column_reference: naked_identifier: employee_id - comma: ',' - select_clause_element: column_reference: naked_identifier: manager_id - comma: ',' - select_clause_element: column_reference: naked_identifier: title from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees connectby_clause: - keyword: start - keyword: with - expression: column_reference: naked_identifier: title comparison_operator: raw_comparison_operator: '=' quoted_literal: "'President'" - keyword: connect - keyword: by - expression: - column_reference: naked_identifier: manager_id - comparison_operator: raw_comparison_operator: '=' - keyword: prior - column_reference: naked_identifier: employee_id orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: employee_id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: description - comma: ',' - select_clause_element: column_reference: naked_identifier: quantity - comma: ',' - select_clause_element: column_reference: naked_identifier: component_id - comma: ',' - select_clause_element: column_reference: naked_identifier: parent_component_id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sys_connect_by_path function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: component_id - comma: ',' - expression: quoted_literal: "' -> '" - end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: path from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: components connectby_clause: - keyword: start - keyword: with - expression: column_reference: naked_identifier: component_id comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - keyword: connect - keyword: by - expression: - column_reference: naked_identifier: parent_component_id - comparison_operator: raw_comparison_operator: '=' - keyword: prior - column_reference: naked_identifier: component_id orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: path - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: employee_id - comma: ',' - select_clause_element: column_reference: naked_identifier: manager_id - comma: ',' - select_clause_element: column_reference: naked_identifier: title - comma: ',' - select_clause_element: keyword: connect_by_root column_reference: naked_identifier: title alias_expression: alias_operator: keyword: as naked_identifier: root_title from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees connectby_clause: - keyword: start - keyword: with - expression: column_reference: naked_identifier: title comparison_operator: raw_comparison_operator: '=' quoted_literal: "'President'" - keyword: connect - keyword: by - expression: - column_reference: naked_identifier: manager_id - comparison_operator: raw_comparison_operator: '=' - keyword: prior - column_reference: naked_identifier: employee_id orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: employee_id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: description - comma: ',' - select_clause_element: column_reference: naked_identifier: quantity - comma: ',' - select_clause_element: column_reference: naked_identifier: component_id - comma: ',' - select_clause_element: column_reference: naked_identifier: parent_component_id - comma: ',' - select_clause_element: column_reference: naked_identifier: component_type from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: components alias_expression: naked_identifier: c connectby_clause: - keyword: connect - keyword: by - expression: - keyword: prior - column_reference: - naked_identifier: c - dot: . - naked_identifier: parent_component_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: c - dot: . - naked_identifier: component_id - binary_operator: AND - keyword: PRIOR - column_reference: - naked_identifier: c - dot: . - naked_identifier: component_type - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: c - dot: . - naked_identifier: component_type orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: quantity - statement_terminator: ; - statement: with_compound_statement: keyword: with common_table_expression: naked_identifier: tbl keyword: as bracketed: start_bracket: ( set_expression: - select_statement: select_clause: - keyword: select - select_clause_element: quoted_literal: "'A'" alias_expression: alias_operator: keyword: as naked_identifier: foo - comma: ',' - select_clause_element: quoted_literal: "'B'" alias_expression: alias_operator: keyword: as naked_identifier: bar - set_operator: - keyword: union - keyword: all - select_statement: select_clause: - keyword: select - select_clause_element: quoted_literal: "'B'" alias_expression: alias_operator: keyword: as naked_identifier: foo - comma: ',' - select_clause_element: quoted_literal: "'C'" alias_expression: alias_operator: keyword: as naked_identifier: bar end_bracket: ) select_statement: select_clause: - keyword: select - select_clause_element: wildcard_expression: wildcard_identifier: star: '*' - comma: ',' - select_clause_element: keyword: connect_by_root column_reference: naked_identifier: bar alias_expression: alias_operator: keyword: as naked_identifier: connect_by_root - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sys_connect_by_path function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: bar - comma: ',' - expression: quoted_literal: "''" - end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: path from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl connectby_clause: - keyword: connect - keyword: by - expression: - keyword: prior - column_reference: naked_identifier: foo - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: bar - binary_operator: and - keyword: not - function: function_name: function_name_identifier: contains function_contents: bracketed: - start_bracket: ( - expression: keyword: prior column_reference: naked_identifier: path - comma: ',' - expression: column_reference: naked_identifier: bar - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/copy_files_into_location_statement.sql000066400000000000000000000007721503426445100316470ustar00rootroot00000000000000COPY FILES INTO '@stage/folder' FROM '@other_stage/folder'; COPY FILES INTO '@stage/folder' FROM '@other_stage/folder' FILES = ('data.csv', 'data2.csv'); COPY FILES INTO '@stage/folder' FROM '@other_stage/folder' PATTERN = '.*[.]parquet.*'; COPY FILES INTO '@stage/folder' FROM '@other_stage/folder' DETAILED_OUTPUT = TRUE; COPY FILES INTO '@stage/folder' FROM '@other_stage/folder' FILES = ('data.csv', 'data2.csv') PATTERN = '.*[.]parquet.*' DETAILED_OUTPUT = TRUE; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/copy_files_into_location_statement.yml000066400000000000000000000054261503426445100316520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f7bfc93c1767993d1dd8a6df56b366fb922839f315c1bcb86f7fd08154795178 file: - statement: copy_files_into_location_statement: - keyword: COPY - keyword: FILES - keyword: INTO - storage_location: stage_path: "'@stage/folder'" - keyword: FROM - storage_location: stage_path: "'@other_stage/folder'" - statement_terminator: ; - statement: copy_files_into_location_statement: - keyword: COPY - keyword: FILES - keyword: INTO - storage_location: stage_path: "'@stage/folder'" - keyword: FROM - storage_location: stage_path: "'@other_stage/folder'" - keyword: FILES - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'data.csv'" - comma: ',' - quoted_literal: "'data2.csv'" - end_bracket: ) - statement_terminator: ; - statement: copy_files_into_location_statement: - keyword: COPY - keyword: FILES - keyword: INTO - storage_location: stage_path: "'@stage/folder'" - keyword: FROM - storage_location: stage_path: "'@other_stage/folder'" - keyword: PATTERN - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'.*[.]parquet.*'" - statement_terminator: ; - statement: copy_files_into_location_statement: - keyword: COPY - keyword: FILES - keyword: INTO - storage_location: stage_path: "'@stage/folder'" - keyword: FROM - storage_location: stage_path: "'@other_stage/folder'" - keyword: DETAILED_OUTPUT - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - statement_terminator: ; - statement: copy_files_into_location_statement: - keyword: COPY - keyword: FILES - keyword: INTO - storage_location: stage_path: "'@stage/folder'" - keyword: FROM - storage_location: stage_path: "'@other_stage/folder'" - keyword: FILES - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'data.csv'" - comma: ',' - quoted_literal: "'data2.csv'" - end_bracket: ) - keyword: PATTERN - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'.*[.]parquet.*'" - keyword: DETAILED_OUTPUT - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/copy_into_location.sql000066400000000000000000000017441503426445100264010ustar00rootroot00000000000000COPY INTO '@public.dir/airflow-pipelines/' FROM "MODEL"."FCT_ROLLING_ACTIVE_USERS_L28" FILE_FORMAT = (TYPE = PARQUET) SINGLE = FALSE MAX_FILE_SIZE = 1000000000 INCLUDE_QUERY_ID = TRUE HEADER = TRUE; -- mixed order between `copyOptions` and other copy configurations COPY INTO 's3://geotags.csv.gz' FROM ( SELECT DISTINCT ID, CAST(Z.VALUE AS INTEGER) AS LISTING_ADDRESS_POSTALCODE FROM ANALYTICS_PROD.SERVICE.GEO_DATA_LAYER_FLATTEN, LATERAL FLATTEN(ZIPS) AS Z WHERE TYPE IN ('canton', 'region', 'zip') AND PARENTPATHS LIKE '%geo-country-switzerland%' ) STORAGE_INTEGRATION = SI_S3_DS_ASSETS FILE_FORMAT = ( TYPE = CSV NULL_IF = () EMPTY_FIELD_AS_NULL = FALSE COMPRESSION = GZIP ) SINGLE = TRUE OVERWRITE = TRUE HEADER = TRUE MAX_FILE_SIZE = 5368709120; -- with a CTE in the query segment COPY INTO @my_stage/path/to/file.json.gz FROM ( WITH my_cte AS ( SELECT 1 ) SELECT * FROM my_cte ); sqlfluff-3.4.2/test/fixtures/dialects/snowflake/copy_into_location.yml000066400000000000000000000162141503426445100264010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8c42ecb4f022f041d2ba57aa66a0896dda567d95c9d43229604dfe90db1ca859 file: - statement: copy_into_location_statement: - keyword: COPY - keyword: INTO - storage_location: stage_path: "'@public.dir/airflow-pipelines/'" - keyword: FROM - table_reference: - quoted_identifier: '"MODEL"' - dot: . - quoted_identifier: '"FCT_ROLLING_ACTIVE_USERS_L28"' - keyword: FILE_FORMAT - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( parquet_file_format_type_parameters: keyword: TYPE comparison_operator: raw_comparison_operator: '=' file_type: PARQUET end_bracket: ) - keyword: SINGLE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: MAX_FILE_SIZE - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1000000000' - keyword: INCLUDE_QUERY_ID - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - keyword: HEADER - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - statement_terminator: ; - statement: copy_into_location_statement: - keyword: COPY - keyword: INTO - storage_location: bucket_path: "'s3://geotags.csv.gz'" - keyword: FROM - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_modifier: keyword: DISTINCT - select_clause_element: column_reference: naked_identifier: ID - comma: ',' - select_clause_element: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: Z - dot: . - naked_identifier: VALUE keyword: AS data_type: data_type_identifier: INTEGER end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: LISTING_ADDRESS_POSTALCODE from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: ANALYTICS_PROD - dot: . - naked_identifier: SERVICE - dot: . - naked_identifier: GEO_DATA_LAYER_FLATTEN - comma: ',' - from_expression: from_expression_element: keyword: LATERAL table_expression: function: function_name: function_name_identifier: FLATTEN function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: ZIPS end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: Z where_clause: keyword: WHERE expression: - column_reference: naked_identifier: TYPE - keyword: IN - bracketed: - start_bracket: ( - quoted_literal: "'canton'" - comma: ',' - quoted_literal: "'region'" - comma: ',' - quoted_literal: "'zip'" - end_bracket: ) - binary_operator: AND - column_reference: naked_identifier: PARENTPATHS - keyword: LIKE - quoted_literal: "'%geo-country-switzerland%'" end_bracket: ) - stage_parameters: keyword: STORAGE_INTEGRATION comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: SI_S3_DS_ASSETS - keyword: FILE_FORMAT - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( csv_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: CSV - keyword: NULL_IF - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( end_bracket: ) - keyword: EMPTY_FIELD_AS_NULL - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: GZIP end_bracket: ) - keyword: SINGLE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - keyword: OVERWRITE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - keyword: HEADER - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - keyword: MAX_FILE_SIZE - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '5368709120' - statement_terminator: ; - statement: copy_into_location_statement: - keyword: COPY - keyword: INTO - storage_location: stage_path: '@my_stage/path/to/file.json.gz' - keyword: FROM - bracketed: start_bracket: ( with_compound_statement: keyword: WITH common_table_expression: naked_identifier: my_cte keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_cte end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/copy_into_location_Amazon_SC3.sql000066400000000000000000000002561503426445100303530ustar00rootroot00000000000000copy into 's3://mybucket/unload/' from mytable credentials = (aws_key_id='xxxx' aws_secret_key='xxxxx' aws_token='xxxxxx') file_format = (format_name = my_csv_format); sqlfluff-3.4.2/test/fixtures/dialects/snowflake/copy_into_location_Amazon_SC3.yml000066400000000000000000000031341503426445100303530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: db3b90d44ca4034cecf43dd0fef52cc912c21a64d7c7f4664fd075bd97a366ea file: statement: copy_into_location_statement: - keyword: copy - keyword: into - storage_location: bucket_path: "'s3://mybucket/unload/'" - keyword: from - table_reference: naked_identifier: mytable - stage_parameters: keyword: credentials comparison_operator: raw_comparison_operator: '=' bracketed: - start_bracket: ( - keyword: aws_key_id - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'xxxx'" - keyword: aws_secret_key - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'xxxxx'" - keyword: aws_token - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'xxxxxx'" - end_bracket: ) - keyword: file_format - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( keyword: format_name comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: my_csv_format end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/copy_into_location_Azure.sql000066400000000000000000000002551503426445100275430ustar00rootroot00000000000000copy into 'azure://myaccount.blob.core.windows.net/mycontainer/unload/' from mytable credentials=(azure_sas_token='xxxx') file_format = (format_name = my_csv_format); sqlfluff-3.4.2/test/fixtures/dialects/snowflake/copy_into_location_Azure.yml000066400000000000000000000025571503426445100275540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bb9426b1f780352a01f874549967e4021225107d7786cf44b055a28084091c9a file: statement: copy_into_location_statement: - keyword: copy - keyword: into - storage_location: bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/unload/'" - keyword: from - table_reference: naked_identifier: mytable - stage_parameters: keyword: credentials comparison_operator: raw_comparison_operator: '=' bracketed: start_bracket: ( keyword: azure_sas_token comparison_operator: raw_comparison_operator: '=' quoted_literal: "'xxxx'" end_bracket: ) - keyword: file_format - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( keyword: format_name comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: my_csv_format end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/copy_into_location_GoogleCloud.sql000066400000000000000000000002531503426445100306560ustar00rootroot00000000000000copy into 'azure://myaccount.blob.core.windows.net/mycontainer/unload/' from mytable credentials=(azure_sas_token='xxxx') file_format = (format_name = my_csv_format); sqlfluff-3.4.2/test/fixtures/dialects/snowflake/copy_into_location_GoogleCloud.yml000066400000000000000000000025571503426445100306710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bb9426b1f780352a01f874549967e4021225107d7786cf44b055a28084091c9a file: statement: copy_into_location_statement: - keyword: copy - keyword: into - storage_location: bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/unload/'" - keyword: from - table_reference: naked_identifier: mytable - stage_parameters: keyword: credentials comparison_operator: raw_comparison_operator: '=' bracketed: start_bracket: ( keyword: azure_sas_token comparison_operator: raw_comparison_operator: '=' quoted_literal: "'xxxx'" end_bracket: ) - keyword: file_format - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( keyword: format_name comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: my_csv_format end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/copy_into_location_partitionby.sql000066400000000000000000000004211503426445100310140ustar00rootroot00000000000000copy into @%t1 from t1 partition by ('date=' || to_varchar(dt, 'YYYY-MM-DD') || '/hour=' || to_varchar(date_part(hour, ts))) -- Concatenate labels and column values to output meaningful filenames file_format = (type=parquet) max_file_size = 32000000 header=true; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/copy_into_location_partitionby.yml000066400000000000000000000055661503426445100310350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 99c827fb70dfb366195c1af8416a4428e89948d3aad9e9d533276c9cfafcefb6 file: statement: copy_into_location_statement: - keyword: copy - keyword: into - storage_location: stage_path: '@%t1' - keyword: from - table_reference: naked_identifier: t1 - partition_by_segment: - keyword: partition - keyword: by - bracketed: start_bracket: ( expression: - quoted_literal: "'date='" - binary_operator: - pipe: '|' - pipe: '|' - function: function_name: function_name_identifier: to_varchar function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: dt - comma: ',' - expression: quoted_literal: "'YYYY-MM-DD'" - end_bracket: ) - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "'/hour='" - binary_operator: - pipe: '|' - pipe: '|' - function: function_name: function_name_identifier: to_varchar function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: date_part function_contents: bracketed: start_bracket: ( date_part: hour comma: ',' expression: column_reference: naked_identifier: ts end_bracket: ) end_bracket: ) end_bracket: ) - keyword: file_format - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( parquet_file_format_type_parameters: keyword: type comparison_operator: raw_comparison_operator: '=' file_type: parquet end_bracket: ) - keyword: max_file_size - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '32000000' - keyword: header - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/copy_into_table.sql000066400000000000000000000037071503426445100256610ustar00rootroot00000000000000copy into mytable from @my_int_stage; copy into mytable from @my_int_stage file_format = (type = csv); copy into mytable from @my_int_stage file_format = (format_name = 'mycsv'); copy into mytable from @my_int_stage file_format = (type = 'CSV') pattern='.*/.*/.*[.]csv[.]gz'; copy into mytable from @my_int_stage file_format = (format_name = myformat) pattern='.*sales.*[.]csv'; copy into mytable from @my_int_stage file_format = (format_name = myformat) pattern=$my_var; copy into mytable; copy into mytable from @%mytable; copy into mytable from @~/data_files; copy into mytable from @mydb.myschema.mystage; copy into mytable from @mydatabase.myschema.%mytable; copy into mytable purge = true; copy into mytable validation_mode = 'RETURN_ERRORS'; copy into mytable validation_mode = 'RETURN_2_ROWS'; copy into mytable validation_mode = 'RETURN_3_ROWS'; COPY INTO target_table FROM ( SELECT $1 FROM @source_stage ); copy into mytable1 (column1) from 's3://bucket/source' file_format = (TYPE = JSON); copy into mytable1 from (select column1 from @ext.stage/path1) file_format = (TYPE = JSON); copy into mytable1 from 's3://bucket/source' file_format = (type=csv SKIP_HEADER=1); copy into mytable1 (column1) from @public.stage/sub-folder/myfile-1.csv file_format = (TYPE = JSON); copy into mytable1 (column1) from @public.stage/subfolder/ file_format = (TYPE = JSON); COPY INTO table1 FROM @stage1 MATCH_BY_COLUMN_NAME = CASE_INSENSITIVE INCLUDE_METADATA = ( ingestdate = METADATA$START_SCAN_TIME, filename = METADATA$FILENAME); COPY INTO table1 FROM @stage1 MATCH_BY_COLUMN_NAME = CASE_INSENSITIVE FILE_FORMAT = (TYPE = JSON) LOAD_UNCERTAIN_FILES = TRUE INCLUDE_METADATA = ( ingestdate = METADATA$START_SCAN_TIME, filename = METADATA$FILENAME); COPY INTO test.transactions_all FROM @rawdata.STITCH_STAGE_NETSUITE/transactions/ FILE_FORMAT = rawdata.json_format MATCH_BY_COLUMN_NAME = 'case_insensitive'; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/copy_into_table.yml000066400000000000000000000333231503426445100256600ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bb1866f50d55e35a9207b2e6b5e1cd61a384d7ec40f6a021d63bf43146ca148b file: - statement: copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: mytable - keyword: from - storage_location: stage_path: '@my_int_stage' - statement_terminator: ; - statement: copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: mytable - keyword: from - storage_location: stage_path: '@my_int_stage' - keyword: file_format - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( csv_file_format_type_parameters: keyword: type comparison_operator: raw_comparison_operator: '=' file_type: csv end_bracket: ) - statement_terminator: ; - statement: copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: mytable - keyword: from - storage_location: stage_path: '@my_int_stage' - keyword: file_format - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( keyword: format_name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'mycsv'" end_bracket: ) - statement_terminator: ; - statement: copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: mytable - keyword: from - storage_location: stage_path: '@my_int_stage' - keyword: file_format - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( csv_file_format_type_parameters: keyword: type comparison_operator: raw_comparison_operator: '=' file_type: "'CSV'" end_bracket: ) - keyword: pattern - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'.*/.*/.*[.]csv[.]gz'" - statement_terminator: ; - statement: copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: mytable - keyword: from - storage_location: stage_path: '@my_int_stage' - keyword: file_format - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( keyword: format_name comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: myformat end_bracket: ) - keyword: pattern - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'.*sales.*[.]csv'" - statement_terminator: ; - statement: copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: mytable - keyword: from - storage_location: stage_path: '@my_int_stage' - keyword: file_format - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( keyword: format_name comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: myformat end_bracket: ) - keyword: pattern - comparison_operator: raw_comparison_operator: '=' - variable: $my_var - statement_terminator: ; - statement: copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: mytable - statement_terminator: ; - statement: copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: mytable - keyword: from - storage_location: stage_path: '@%mytable' - statement_terminator: ; - statement: copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: mytable - keyword: from - storage_location: stage_path: '@~/data_files' - statement_terminator: ; - statement: copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: mytable - keyword: from - storage_location: stage_path: '@mydb.myschema.mystage' - statement_terminator: ; - statement: copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: mytable - keyword: from - storage_location: stage_path: '@mydatabase.myschema.%mytable' - statement_terminator: ; - statement: copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: mytable - keyword: purge - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - statement_terminator: ; - statement: copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: mytable - keyword: validation_mode - comparison_operator: raw_comparison_operator: '=' - validation_mode_option: "'RETURN_ERRORS'" - statement_terminator: ; - statement: copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: mytable - keyword: validation_mode - comparison_operator: raw_comparison_operator: '=' - validation_mode_option: "'RETURN_2_ROWS'" - statement_terminator: ; - statement: copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: mytable - keyword: validation_mode - comparison_operator: raw_comparison_operator: '=' - validation_mode_option: "'RETURN_3_ROWS'" - statement_terminator: ; - statement: copy_into_table_statement: - keyword: COPY - keyword: INTO - table_reference: naked_identifier: target_table - keyword: FROM - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: column_index_identifier_segment: $1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: stage_path: '@source_stage' end_bracket: ) - statement_terminator: ; - statement: copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: mytable1 - bracketed: start_bracket: ( column_reference: naked_identifier: column1 end_bracket: ) - keyword: from - storage_location: bucket_path: "'s3://bucket/source'" - keyword: file_format - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( json_file_format_type_parameters: keyword: TYPE comparison_operator: raw_comparison_operator: '=' file_type: JSON end_bracket: ) - statement_terminator: ; - statement: copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: mytable1 - keyword: from - bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: column1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: stage_path: '@ext.stage/path1' end_bracket: ) - keyword: file_format - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( json_file_format_type_parameters: keyword: TYPE comparison_operator: raw_comparison_operator: '=' file_type: JSON end_bracket: ) - statement_terminator: ; - statement: copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: mytable1 - keyword: from - storage_location: bucket_path: "'s3://bucket/source'" - keyword: file_format - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( csv_file_format_type_parameters: - keyword: type - comparison_operator: raw_comparison_operator: '=' - file_type: csv - keyword: SKIP_HEADER - comparison_operator: raw_comparison_operator: '=' - integer_literal: '1' end_bracket: ) - statement_terminator: ; - statement: copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: mytable1 - bracketed: start_bracket: ( column_reference: naked_identifier: column1 end_bracket: ) - keyword: from - storage_location: stage_path: '@public.stage/sub-folder/myfile-1.csv' - keyword: file_format - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( json_file_format_type_parameters: keyword: TYPE comparison_operator: raw_comparison_operator: '=' file_type: JSON end_bracket: ) - statement_terminator: ; - statement: copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: mytable1 - bracketed: start_bracket: ( column_reference: naked_identifier: column1 end_bracket: ) - keyword: from - storage_location: stage_path: '@public.stage/subfolder/' - keyword: file_format - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( json_file_format_type_parameters: keyword: TYPE comparison_operator: raw_comparison_operator: '=' file_type: JSON end_bracket: ) - statement_terminator: ; - statement: copy_into_table_statement: - keyword: COPY - keyword: INTO - table_reference: naked_identifier: table1 - keyword: FROM - storage_location: stage_path: '@stage1' - keyword: MATCH_BY_COLUMN_NAME - comparison_operator: raw_comparison_operator: '=' - keyword: CASE_INSENSITIVE - keyword: INCLUDE_METADATA - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - naked_identifier: ingestdate - comparison_operator: raw_comparison_operator: '=' - keyword: METADATA$START_SCAN_TIME - comma: ',' - naked_identifier: filename - comparison_operator: raw_comparison_operator: '=' - keyword: METADATA$FILENAME - end_bracket: ) - statement_terminator: ; - statement: copy_into_table_statement: - keyword: COPY - keyword: INTO - table_reference: naked_identifier: table1 - keyword: FROM - storage_location: stage_path: '@stage1' - keyword: MATCH_BY_COLUMN_NAME - comparison_operator: raw_comparison_operator: '=' - keyword: CASE_INSENSITIVE - keyword: FILE_FORMAT - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( json_file_format_type_parameters: keyword: TYPE comparison_operator: raw_comparison_operator: '=' file_type: JSON end_bracket: ) - keyword: LOAD_UNCERTAIN_FILES - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - keyword: INCLUDE_METADATA - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - naked_identifier: ingestdate - comparison_operator: raw_comparison_operator: '=' - keyword: METADATA$START_SCAN_TIME - comma: ',' - naked_identifier: filename - comparison_operator: raw_comparison_operator: '=' - keyword: METADATA$FILENAME - end_bracket: ) - statement_terminator: ; - statement: copy_into_table_statement: - keyword: COPY - keyword: INTO - table_reference: - naked_identifier: test - dot: . - naked_identifier: transactions_all - keyword: FROM - storage_location: stage_path: '@rawdata.STITCH_STAGE_NETSUITE/transactions/' - keyword: FILE_FORMAT - comparison_operator: raw_comparison_operator: '=' - file_format_segment: object_reference: - naked_identifier: rawdata - dot: . - naked_identifier: json_format - keyword: MATCH_BY_COLUMN_NAME - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'case_insensitive'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_api_integration.sql000066400000000000000000000042721503426445100272040ustar00rootroot00000000000000CREATE OR REPLACE API INTEGRATION aws API_PROVIDER = aws_api_gateway API_AWS_ROLE_ARN = 'arn:aws:iam::123456789012:role/my_cloud_account_role' API_ALLOWED_PREFIXES = ('https://xyz.execute-api.us-west-2.amazonaws.com/production') ENABLED = TRUE; CREATE OR REPLACE API INTEGRATION aws2 API_PROVIDER = aws_private_api_gateway API_AWS_ROLE_ARN = 'arn:aws:iam::123456789012:role/my_cloud_account_role' API_ALLOWED_PREFIXES = ('https://xyz.execute-api.us-west-2.amazonaws.com/production') API_KEY='123' ENABLED = FALSE COMMENT='blabla'; CREATE OR REPLACE API INTEGRATION aws3 API_PROVIDER = aws_gov_api_gateway API_AWS_ROLE_ARN = 'arn:aws:iam::123456789012:role/my_cloud_account_role' API_ALLOWED_PREFIXES = ('https://xyz.execute-api.us-west-2.amazonaws.com/production') ENABLED = TRUE; CREATE OR REPLACE API INTEGRATION aws4 API_PROVIDER = aws_gov_private_api_gateway API_AWS_ROLE_ARN = 'arn:aws:iam::123456789012:role/my_cloud_account_role' API_ALLOWED_PREFIXES = ('https://xyz.execute-api.us-west-2.amazonaws.com/production') ENABLED = TRUE; CREATE OR REPLACE API INTEGRATION azure API_PROVIDER = azure_api_management AZURE_TENANT_ID = '' AZURE_AD_APPLICATION_ID = '' API_KEY = '' API_ALLOWED_PREFIXES = ( 'go' ) API_BLOCKED_PREFIXES = ( 'do_not_go' ) ENABLED = TRUE; CREATE OR REPLACE API INTEGRATION google API_PROVIDER = google_api_gateway GOOGLE_AUDIENCE = '' API_ALLOWED_PREFIXES = ( 'go' ) ENABLED = TRUE; CREATE OR REPLACE API INTEGRATION git API_PROVIDER = git_https_api GOOGLE_AUDIENCE = '' API_ALLOWED_PREFIXES = ( 'go' ) ALLOWED_AUTHENTICATION_SECRETS = ( 'pedo mellon a minno' ) ; CREATE OR REPLACE API INTEGRATION git2 API_PROVIDER = git_https_api GOOGLE_AUDIENCE = '' ALLOWED_AUTHENTICATION_SECRETS = ( all ) ; CREATE OR REPLACE API INTEGRATION git3 API_PROVIDER = git_https_api GOOGLE_AUDIENCE = '' API_ALLOWED_PREFIXES = ( 'go' ) ALLOWED_AUTHENTICATION_SECRETS = ( none ) ; CREATE OR REPLACE API INTEGRATION git2 API_PROVIDER = git_https_api GOOGLE_AUDIENCE = '' ALLOWED_AUTHENTICATION_SECRETS = ('pedo','mellon a','minno') ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_api_integration.yml000066400000000000000000000226451503426445100272120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1d6cbd689505b61f2f784cf5ea98697a0d955dcdccf8c41e5c904a961a7290ee file: - statement: create_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: API - keyword: INTEGRATION - object_reference: naked_identifier: aws - keyword: API_PROVIDER - comparison_operator: raw_comparison_operator: '=' - keyword: aws_api_gateway - keyword: API_AWS_ROLE_ARN - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'arn:aws:iam::123456789012:role/my_cloud_account_role'" - keyword: API_ALLOWED_PREFIXES - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_literal: "'https://xyz.execute-api.us-west-2.amazonaws.com/production'" end_bracket: ) - keyword: ENABLED - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - statement_terminator: ; - statement: create_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: API - keyword: INTEGRATION - object_reference: naked_identifier: aws2 - keyword: API_PROVIDER - comparison_operator: raw_comparison_operator: '=' - keyword: aws_private_api_gateway - keyword: API_AWS_ROLE_ARN - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'arn:aws:iam::123456789012:role/my_cloud_account_role'" - keyword: API_ALLOWED_PREFIXES - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_literal: "'https://xyz.execute-api.us-west-2.amazonaws.com/production'" end_bracket: ) - keyword: API_KEY - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'123'" - keyword: ENABLED - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'blabla'" - statement_terminator: ; - statement: create_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: API - keyword: INTEGRATION - object_reference: naked_identifier: aws3 - keyword: API_PROVIDER - comparison_operator: raw_comparison_operator: '=' - keyword: aws_gov_api_gateway - keyword: API_AWS_ROLE_ARN - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'arn:aws:iam::123456789012:role/my_cloud_account_role'" - keyword: API_ALLOWED_PREFIXES - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_literal: "'https://xyz.execute-api.us-west-2.amazonaws.com/production'" end_bracket: ) - keyword: ENABLED - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - statement_terminator: ; - statement: create_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: API - keyword: INTEGRATION - object_reference: naked_identifier: aws4 - keyword: API_PROVIDER - comparison_operator: raw_comparison_operator: '=' - keyword: aws_gov_private_api_gateway - keyword: API_AWS_ROLE_ARN - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'arn:aws:iam::123456789012:role/my_cloud_account_role'" - keyword: API_ALLOWED_PREFIXES - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_literal: "'https://xyz.execute-api.us-west-2.amazonaws.com/production'" end_bracket: ) - keyword: ENABLED - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - statement_terminator: ; - statement: create_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: API - keyword: INTEGRATION - object_reference: naked_identifier: azure - keyword: API_PROVIDER - comparison_operator: raw_comparison_operator: '=' - keyword: azure_api_management - keyword: AZURE_TENANT_ID - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - keyword: AZURE_AD_APPLICATION_ID - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - keyword: API_KEY - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - keyword: API_ALLOWED_PREFIXES - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_literal: "'go'" end_bracket: ) - keyword: API_BLOCKED_PREFIXES - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_literal: "'do_not_go'" end_bracket: ) - keyword: ENABLED - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - statement_terminator: ; - statement: create_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: API - keyword: INTEGRATION - object_reference: naked_identifier: google - keyword: API_PROVIDER - comparison_operator: raw_comparison_operator: '=' - keyword: google_api_gateway - keyword: GOOGLE_AUDIENCE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - keyword: API_ALLOWED_PREFIXES - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_literal: "'go'" end_bracket: ) - keyword: ENABLED - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - statement_terminator: ; - statement: create_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: API - keyword: INTEGRATION - object_reference: naked_identifier: git - keyword: API_PROVIDER - comparison_operator: raw_comparison_operator: '=' - keyword: git_https_api - keyword: GOOGLE_AUDIENCE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - keyword: API_ALLOWED_PREFIXES - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_literal: "'go'" end_bracket: ) - keyword: ALLOWED_AUTHENTICATION_SECRETS - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_literal: "'pedo mellon a minno'" end_bracket: ) - statement_terminator: ; - statement: create_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: API - keyword: INTEGRATION - object_reference: naked_identifier: git2 - keyword: API_PROVIDER - comparison_operator: raw_comparison_operator: '=' - keyword: git_https_api - keyword: GOOGLE_AUDIENCE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - keyword: ALLOWED_AUTHENTICATION_SECRETS - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( keyword: all end_bracket: ) - statement_terminator: ; - statement: create_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: API - keyword: INTEGRATION - object_reference: naked_identifier: git3 - keyword: API_PROVIDER - comparison_operator: raw_comparison_operator: '=' - keyword: git_https_api - keyword: GOOGLE_AUDIENCE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - keyword: API_ALLOWED_PREFIXES - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_literal: "'go'" end_bracket: ) - keyword: ALLOWED_AUTHENTICATION_SECRETS - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( keyword: none end_bracket: ) - statement_terminator: ; - statement: create_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: API - keyword: INTEGRATION - object_reference: naked_identifier: git2 - keyword: API_PROVIDER - comparison_operator: raw_comparison_operator: '=' - keyword: git_https_api - keyword: GOOGLE_AUDIENCE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - keyword: ALLOWED_AUTHENTICATION_SECRETS - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'pedo'" - comma: ',' - quoted_literal: "'mellon a'" - comma: ',' - quoted_literal: "'minno'" - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_authentication_policy.sql000066400000000000000000000014701503426445100304230ustar00rootroot00000000000000CREATE AUTHENTICATION POLICY my_policy; CREATE AUTHENTICATION POLICY IF NOT EXISTS my_authentication_policy AUTHENTICATION_METHODS = ( 'MY METHOD' ) CLIENT_TYPES = ( 'MY CLIENT' ) COMMENT = 'My Comment' ; CREATE AUTHENTICATION POLICY IF NOT EXISTS my_authentication_policy AUTHENTICATION_METHODS = ( 'METHOD1', 'METHOD2', 'METHOD3' ) CLIENT_TYPES = ( 'CLIENT1', 'CLIENT2', 'CLIENT3' ) COMMENT = 'My Comment' ; CREATE AUTHENTICATION POLICY my_policy MFA_AUTHENTICATION_METHODS = ('METHOD1') MFA_ENROLLMENT = REQUIRED SECURITY_INTEGRATIONS = ('INTEGRATION1'); CREATE OR REPLACE AUTHENTICATION POLICY my_policy MFA_AUTHENTICATION_METHODS = ( 'METHOD1', 'METHOD2', 'METHOD3' ) MFA_ENROLLMENT = REQUIRED SECURITY_INTEGRATIONS = ('INTEGRATION1', 'INTEGRATION2', 'INTEGRATION3'); sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_authentication_policy.yml000066400000000000000000000102751503426445100304300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a5602be28c3567447a96bc25c3b1e835c8dd131ce1241b05df272b90b58ec927 file: - statement: create_authentication_policy_segment: - keyword: CREATE - keyword: AUTHENTICATION - keyword: POLICY - table_reference: naked_identifier: my_policy - statement_terminator: ; - statement: create_authentication_policy_segment: - keyword: CREATE - keyword: AUTHENTICATION - keyword: POLICY - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: my_authentication_policy - keyword: AUTHENTICATION_METHODS - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_literal: "'MY METHOD'" end_bracket: ) - keyword: CLIENT_TYPES - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_literal: "'MY CLIENT'" end_bracket: ) - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'My Comment'" - statement_terminator: ; - statement: create_authentication_policy_segment: - keyword: CREATE - keyword: AUTHENTICATION - keyword: POLICY - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: my_authentication_policy - keyword: AUTHENTICATION_METHODS - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'METHOD1'" - comma: ',' - quoted_literal: "'METHOD2'" - comma: ',' - quoted_literal: "'METHOD3'" - end_bracket: ) - keyword: CLIENT_TYPES - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'CLIENT1'" - comma: ',' - quoted_literal: "'CLIENT2'" - comma: ',' - quoted_literal: "'CLIENT3'" - end_bracket: ) - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'My Comment'" - statement_terminator: ; - statement: create_authentication_policy_segment: - keyword: CREATE - keyword: AUTHENTICATION - keyword: POLICY - table_reference: naked_identifier: my_policy - keyword: MFA_AUTHENTICATION_METHODS - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_literal: "'METHOD1'" end_bracket: ) - keyword: MFA_ENROLLMENT - comparison_operator: raw_comparison_operator: '=' - keyword: REQUIRED - keyword: SECURITY_INTEGRATIONS - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_literal: "'INTEGRATION1'" end_bracket: ) - statement_terminator: ; - statement: create_authentication_policy_segment: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: AUTHENTICATION - keyword: POLICY - table_reference: naked_identifier: my_policy - keyword: MFA_AUTHENTICATION_METHODS - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'METHOD1'" - comma: ',' - quoted_literal: "'METHOD2'" - comma: ',' - quoted_literal: "'METHOD3'" - end_bracket: ) - keyword: MFA_ENROLLMENT - comparison_operator: raw_comparison_operator: '=' - keyword: REQUIRED - keyword: SECURITY_INTEGRATIONS - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'INTEGRATION1'" - comma: ',' - quoted_literal: "'INTEGRATION2'" - comma: ',' - quoted_literal: "'INTEGRATION3'" - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_catalog_integration.sql000066400000000000000000000066451503426445100300530ustar00rootroot00000000000000CREATE CATALOG INTEGRATION glue_int CATALOG_SOURCE = GLUE TABLE_FORMAT = ICEBERG GLUE_AWS_ROLE_ARN = 'arn' GLUE_CATALOG_ID = 'catalog_id' GLUE_REGION = 'region.us.east' CATALOG_NAMESPACE = 'namespace' ENABLED = true REFRESH_INTERVAL_SECONDS = 10 COMMENT = 'comment'; CREATE OR REPLACE CATALOG INTEGRATION IF NOT EXISTS glue_int_2 CATALOG_SOURCE = GLUE TABLE_FORMAT = ICEBERG GLUE_AWS_ROLE_ARN = 'arn' GLUE_CATALOG_ID = 'catalog_id' ENABLED = false; CREATE CATALOG INTEGRATION object_storage_int_iceberg CATALOG_SOURCE = OBJECT_STORE TABLE_FORMAT = ICEBERG ENABLED = TRUE REFRESH_INTERVAL_SECONDS = 10 COMMENT = ''; CREATE CATALOG INTEGRATION object_storage_int_iceberg CATALOG_SOURCE = OBJECT_STORE TABLE_FORMAT = DELTA ENABLED = FALSE; CREATE OR REPLACE CATALOG INTEGRATION snow_open_catalog_int CATALOG_SOURCE = POLARIS TABLE_FORMAT = ICEBERG CATALOG_NAMESPACE = 'my_catalog_namespace' REST_CONFIG = ( CATALOG_URI = 'https://my_org_name-my_snowflake_open_catalog_account_name.snowflakecomputing.com/polaris/api/catalog' CATALOG_NAME = 'my_catalog_name' ) REST_AUTHENTICATION = ( TYPE = OAUTH OAUTH_CLIENT_ID = 'my_client_id' OAUTH_CLIENT_SECRET = 'my_client_secret' OAUTH_ALLOWED_SCOPES = ('PRINCIPAL_ROLE:ALL') ) ENABLED = TRUE; CREATE OR REPLACE CATALOG INTEGRATION snow_open_catalog_2_int CATALOG_SOURCE = POLARIS TABLE_FORMAT = ICEBERG CATALOG_NAMESPACE = 'my_catalog_namespace' REST_CONFIG = ( CATALOG_URI = 'https://my_org_name-my_snowflake_open_catalog_account_name.snowflakecomputing.com/polaris/api/catalog' CATALOG_NAME = 'my_catalog_name' ) REST_AUTHENTICATION = ( TYPE = OAUTH OAUTH_CLIENT_ID = 'my_client_id' OAUTH_CLIENT_SECRET = 'my_client_secret' OAUTH_ALLOWED_SCOPES = ('PRINCIPAL_ROLE:ALL', 'OTHER_ROLE:ALL') ) ENABLED = TRUE; CREATE CATALOG INTEGRATION apache_iceberg_rest_tabular_int CATALOG_SOURCE = ICEBERG_REST TABLE_FORMAT = ICEBERG CATALOG_NAMESPACE = '' REST_CONFIG = ( CATALOG_URI = 'https://api.tabular.io/ws' CATALOG_NAME = '' ) REST_AUTHENTICATION = ( TYPE = OAUTH OAUTH_TOKEN_URI = 'https://api.tabular.io/ws/v1/oauth/tokens' OAUTH_CLIENT_ID = '' OAUTH_CLIENT_SECRET = '' OAUTH_ALLOWED_SCOPES = ('catalog') ) ENABLED = TRUE REFRESH_INTERVAL_SECONDS = 10 COMMENT = ''; CREATE CATALOG INTEGRATION apache_iceberg_rest_sigv4_int CATALOG_SOURCE = ICEBERG_REST TABLE_FORMAT = ICEBERG CATALOG_NAMESPACE = '' REST_CONFIG = ( CATALOG_URI = 'https://glue.us-west-2.amazonaws.com/iceberg' CATALOG_API_TYPE = AWS_GLUE CATALOG_NAME = '123456789012' ) REST_AUTHENTICATION = ( TYPE = SIGV4 SIGV4_IAM_ROLE = 'arn:aws:iam::123456789012:role/my-role' SIGV4_SIGNING_REGION = 'us-west-2' ) ENABLED = TRUE REFRESH_INTERVAL_SECONDS = 10 COMMENT = ''; CREATE CATALOG INTEGRATION apache_iceberg_rest_bearer_int CATALOG_SOURCE = ICEBERG_REST TABLE_FORMAT = ICEBERG CATALOG_NAMESPACE = '' REST_CONFIG = ( CATALOG_URI = 'https://glue.us-west-2.amazonaws.com/iceberg' CATALOG_API_TYPE = AWS_GLUE CATALOG_NAME = '123456789012' ) REST_AUTHENTICATION = ( TYPE = BEARER BEARER_TOKEN = 'bearer-token' ) ENABLED = TRUE REFRESH_INTERVAL_SECONDS = 10 COMMENT = ''; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_catalog_integration.yml000066400000000000000000000350361503426445100300510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 716ca79eed2f8e77be54e8f9c995768fc4f10988f0f9f59dbe8d47a280619cb7 file: - statement: create_statement: - keyword: CREATE - keyword: CATALOG - keyword: INTEGRATION - object_reference: naked_identifier: glue_int - keyword: CATALOG_SOURCE - comparison_operator: raw_comparison_operator: '=' - keyword: GLUE - keyword: TABLE_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: ICEBERG - keyword: GLUE_AWS_ROLE_ARN - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'arn'" - keyword: GLUE_CATALOG_ID - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'catalog_id'" - keyword: GLUE_REGION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'region.us.east'" - keyword: CATALOG_NAMESPACE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'namespace'" - keyword: ENABLED - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: REFRESH_INTERVAL_SECONDS - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '10' - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'comment'" - statement_terminator: ; - statement: create_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: CATALOG - keyword: INTEGRATION - keyword: IF - keyword: NOT - keyword: EXISTS - object_reference: naked_identifier: glue_int_2 - keyword: CATALOG_SOURCE - comparison_operator: raw_comparison_operator: '=' - keyword: GLUE - keyword: TABLE_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: ICEBERG - keyword: GLUE_AWS_ROLE_ARN - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'arn'" - keyword: GLUE_CATALOG_ID - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'catalog_id'" - keyword: ENABLED - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'false' - statement_terminator: ; - statement: create_statement: - keyword: CREATE - keyword: CATALOG - keyword: INTEGRATION - object_reference: naked_identifier: object_storage_int_iceberg - keyword: CATALOG_SOURCE - comparison_operator: raw_comparison_operator: '=' - keyword: OBJECT_STORE - keyword: TABLE_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: ICEBERG - keyword: ENABLED - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - keyword: REFRESH_INTERVAL_SECONDS - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '10' - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "''" - statement_terminator: ; - statement: create_statement: - keyword: CREATE - keyword: CATALOG - keyword: INTEGRATION - object_reference: naked_identifier: object_storage_int_iceberg - keyword: CATALOG_SOURCE - comparison_operator: raw_comparison_operator: '=' - keyword: OBJECT_STORE - keyword: TABLE_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: DELTA - keyword: ENABLED - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - statement_terminator: ; - statement: create_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: CATALOG - keyword: INTEGRATION - object_reference: naked_identifier: snow_open_catalog_int - keyword: CATALOG_SOURCE - comparison_operator: raw_comparison_operator: '=' - keyword: POLARIS - keyword: TABLE_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: ICEBERG - keyword: CATALOG_NAMESPACE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'my_catalog_namespace'" - keyword: REST_CONFIG - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: CATALOG_URI - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'https://my_org_name-my_snowflake_open_catalog_account_name.snowflakecomputing.com/polaris/api/catalog'" - keyword: CATALOG_NAME - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'my_catalog_name'" - end_bracket: ) - keyword: REST_AUTHENTICATION - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: OAUTH - keyword: OAUTH_CLIENT_ID - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'my_client_id'" - keyword: OAUTH_CLIENT_SECRET - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'my_client_secret'" - keyword: OAUTH_ALLOWED_SCOPES - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_literal: "'PRINCIPAL_ROLE:ALL'" end_bracket: ) - end_bracket: ) - keyword: ENABLED - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - statement_terminator: ; - statement: create_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: CATALOG - keyword: INTEGRATION - object_reference: naked_identifier: snow_open_catalog_2_int - keyword: CATALOG_SOURCE - comparison_operator: raw_comparison_operator: '=' - keyword: POLARIS - keyword: TABLE_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: ICEBERG - keyword: CATALOG_NAMESPACE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'my_catalog_namespace'" - keyword: REST_CONFIG - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: CATALOG_URI - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'https://my_org_name-my_snowflake_open_catalog_account_name.snowflakecomputing.com/polaris/api/catalog'" - keyword: CATALOG_NAME - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'my_catalog_name'" - end_bracket: ) - keyword: REST_AUTHENTICATION - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: OAUTH - keyword: OAUTH_CLIENT_ID - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'my_client_id'" - keyword: OAUTH_CLIENT_SECRET - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'my_client_secret'" - keyword: OAUTH_ALLOWED_SCOPES - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'PRINCIPAL_ROLE:ALL'" - comma: ',' - quoted_literal: "'OTHER_ROLE:ALL'" - end_bracket: ) - end_bracket: ) - keyword: ENABLED - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - statement_terminator: ; - statement: create_statement: - keyword: CREATE - keyword: CATALOG - keyword: INTEGRATION - object_reference: naked_identifier: apache_iceberg_rest_tabular_int - keyword: CATALOG_SOURCE - comparison_operator: raw_comparison_operator: '=' - keyword: ICEBERG_REST - keyword: TABLE_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: ICEBERG - keyword: CATALOG_NAMESPACE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - keyword: REST_CONFIG - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: CATALOG_URI - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'https://api.tabular.io/ws'" - keyword: CATALOG_NAME - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - end_bracket: ) - keyword: REST_AUTHENTICATION - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: OAUTH - keyword: OAUTH_TOKEN_URI - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'https://api.tabular.io/ws/v1/oauth/tokens'" - keyword: OAUTH_CLIENT_ID - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - keyword: OAUTH_CLIENT_SECRET - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - keyword: OAUTH_ALLOWED_SCOPES - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_literal: "'catalog'" end_bracket: ) - end_bracket: ) - keyword: ENABLED - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - keyword: REFRESH_INTERVAL_SECONDS - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '10' - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "''" - statement_terminator: ; - statement: create_statement: - keyword: CREATE - keyword: CATALOG - keyword: INTEGRATION - object_reference: naked_identifier: apache_iceberg_rest_sigv4_int - keyword: CATALOG_SOURCE - comparison_operator: raw_comparison_operator: '=' - keyword: ICEBERG_REST - keyword: TABLE_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: ICEBERG - keyword: CATALOG_NAMESPACE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - keyword: REST_CONFIG - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: CATALOG_URI - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'https://glue.us-west-2.amazonaws.com/iceberg'" - keyword: CATALOG_API_TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: AWS_GLUE - keyword: CATALOG_NAME - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'123456789012'" - end_bracket: ) - keyword: REST_AUTHENTICATION - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: SIGV4 - keyword: SIGV4_IAM_ROLE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'arn:aws:iam::123456789012:role/my-role'" - keyword: SIGV4_SIGNING_REGION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'us-west-2'" - end_bracket: ) - keyword: ENABLED - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - keyword: REFRESH_INTERVAL_SECONDS - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '10' - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "''" - statement_terminator: ; - statement: create_statement: - keyword: CREATE - keyword: CATALOG - keyword: INTEGRATION - object_reference: naked_identifier: apache_iceberg_rest_bearer_int - keyword: CATALOG_SOURCE - comparison_operator: raw_comparison_operator: '=' - keyword: ICEBERG_REST - keyword: TABLE_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: ICEBERG - keyword: CATALOG_NAMESPACE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - keyword: REST_CONFIG - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: CATALOG_URI - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'https://glue.us-west-2.amazonaws.com/iceberg'" - keyword: CATALOG_API_TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: AWS_GLUE - keyword: CATALOG_NAME - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'123456789012'" - end_bracket: ) - keyword: REST_AUTHENTICATION - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: BEARER - keyword: BEARER_TOKEN - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'bearer-token'" - end_bracket: ) - keyword: ENABLED - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - keyword: REFRESH_INTERVAL_SECONDS - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '10' - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "''" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_clone.sql000066400000000000000000000007521503426445100251270ustar00rootroot00000000000000CREATE DATABASE mytestdb_clone CLONE mytestdb; CREATE SCHEMA mytestschema_clone CLONE testschema; CREATE TABLE orders_clone CLONE orders; CREATE SCHEMA mytestschema_clone_restore CLONE testschema BEFORE (TIMESTAMP => TO_TIMESTAMP(40*365*86400)); CREATE TABLE orders_clone_restore CLONE orders AT (TIMESTAMP => TO_TIMESTAMP_TZ('04/05/2013 01:02:03', 'mm/dd/yyyy hh24:mi:ss')); CREATE TABLE orders_clone_restore CLONE orders BEFORE (STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726'); sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_clone.yml000066400000000000000000000067531503426445100251400ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 27b5381a102cf1c4db57b068256acbff4094243c5e397c00942e2e19852b9a6c file: - statement: create_clone_statement: - keyword: CREATE - keyword: DATABASE - object_reference: naked_identifier: mytestdb_clone - keyword: CLONE - object_reference: naked_identifier: mytestdb - statement_terminator: ; - statement: create_clone_statement: - keyword: CREATE - keyword: SCHEMA - object_reference: naked_identifier: mytestschema_clone - keyword: CLONE - object_reference: naked_identifier: testschema - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: orders_clone - keyword: CLONE - table_reference: naked_identifier: orders - statement_terminator: ; - statement: create_clone_statement: - keyword: CREATE - keyword: SCHEMA - object_reference: naked_identifier: mytestschema_clone_restore - keyword: CLONE - object_reference: naked_identifier: testschema - from_before_expression: keyword: BEFORE bracketed: start_bracket: ( keyword: TIMESTAMP parameter_assigner: => expression: function: function_name: function_name_identifier: TO_TIMESTAMP function_contents: bracketed: start_bracket: ( expression: - numeric_literal: '40' - binary_operator: '*' - numeric_literal: '365' - binary_operator: '*' - numeric_literal: '86400' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_clone_statement: - keyword: CREATE - keyword: TABLE - object_reference: naked_identifier: orders_clone_restore - keyword: CLONE - object_reference: naked_identifier: orders - from_at_expression: keyword: AT bracketed: start_bracket: ( keyword: TIMESTAMP parameter_assigner: => expression: function: function_name: function_name_identifier: TO_TIMESTAMP_TZ function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'04/05/2013 01:02:03'" - comma: ',' - expression: quoted_literal: "'mm/dd/yyyy hh24:mi:ss'" - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_clone_statement: - keyword: CREATE - keyword: TABLE - object_reference: naked_identifier: orders_clone_restore - keyword: CLONE - object_reference: naked_identifier: orders - from_before_expression: keyword: BEFORE bracketed: start_bracket: ( keyword: STATEMENT parameter_assigner: => expression: quoted_literal: "'8e5d0ca9-005e-44e6-b858-a8f5b37c5726'" end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_cortex_search_service.sql000066400000000000000000000013161503426445100303750ustar00rootroot00000000000000-- create cortex search service my_service; CREATE OR REPLACE CORTEX SEARCH SERVICE mysvc ON transcript_text ATTRIBUTES region,agent_id WAREHOUSE = mywh TARGET_LAG = '1 hour' EMBEDDING_MODEL = 'snowflake-arctic-embed-l-v2.0' AS ( SELECT transcript_text, date, region, agent_id FROM support_db.public.transcripts_etl ); create cortex search service my_service on text attributes id, type, title warehouse = my_warehouse target_lag = '1 days' as select text, id, type, title, from my_db.my_schema.my_table ; create or replace cortex search service my_service on text warehouse = my_warehouse target_lag = '1 days' as select text, id, type, title, from my_db.my_schema.my_table ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_cortex_search_service.yml000066400000000000000000000131531503426445100304010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b9f966e886ace225e5a654835ec52a1d1f44e67308a7072373e40274f6a152e8 file: - statement: create_cortex_search_service_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: CORTEX - keyword: SEARCH - keyword: SERVICE - object_reference: naked_identifier: mysvc - keyword: 'ON' - column_reference: naked_identifier: transcript_text - keyword: ATTRIBUTES - column_reference: naked_identifier: region - comma: ',' - column_reference: naked_identifier: agent_id - keyword: WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: mywh - keyword: TARGET_LAG - comparison_operator: raw_comparison_operator: '=' - dynamic_table_lag_interval_segment: "'1 hour'" - keyword: EMBEDDING_MODEL - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'snowflake-arctic-embed-l-v2.0'" - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: transcript_text - comma: ',' - select_clause_element: column_reference: naked_identifier: date - comma: ',' - select_clause_element: column_reference: naked_identifier: region - comma: ',' - select_clause_element: column_reference: naked_identifier: agent_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: support_db - dot: . - naked_identifier: public - dot: . - naked_identifier: transcripts_etl end_bracket: ) - statement_terminator: ; - statement: create_cortex_search_service_statement: - keyword: create - keyword: cortex - keyword: search - keyword: service - object_reference: naked_identifier: my_service - keyword: 'on' - column_reference: naked_identifier: text - keyword: attributes - column_reference: naked_identifier: id - comma: ',' - column_reference: naked_identifier: type - comma: ',' - column_reference: naked_identifier: title - keyword: warehouse - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: my_warehouse - keyword: target_lag - comparison_operator: raw_comparison_operator: '=' - dynamic_table_lag_interval_segment: "'1 days'" - keyword: as - select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: text - comma: ',' - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: type - comma: ',' - select_clause_element: column_reference: naked_identifier: title - comma: ',' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: my_db - dot: . - naked_identifier: my_schema - dot: . - naked_identifier: my_table - statement_terminator: ; - statement: create_cortex_search_service_statement: - keyword: create - keyword: or - keyword: replace - keyword: cortex - keyword: search - keyword: service - object_reference: naked_identifier: my_service - keyword: 'on' - column_reference: naked_identifier: text - keyword: warehouse - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: my_warehouse - keyword: target_lag - comparison_operator: raw_comparison_operator: '=' - dynamic_table_lag_interval_segment: "'1 days'" - keyword: as - select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: text - comma: ',' - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: type - comma: ',' - select_clause_element: column_reference: naked_identifier: title - comma: ',' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: my_db - dot: . - naked_identifier: my_schema - dot: . - naked_identifier: my_table - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_database.sql000066400000000000000000000002521503426445100255660ustar00rootroot00000000000000CREATE DATABASE MY_DATABASE; CREATE DATABASE IF NOT EXISTS MY_DATABASE; CREATE DATABASE MY_DATABASE FROM SHARE MY_ACCOUNT.MY_SHARE; CREATE OR ALTER DATABASE MY_DATABASE; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_database.yml000066400000000000000000000025221503426445100255720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 83d8056bc29b5b849928ad98ab6bc4deb8f366d9b369986a45bc4b1a0fbebbe3 file: - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: MY_DATABASE - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: MY_DATABASE - statement_terminator: ; - statement: create_database_from_share_statement: - keyword: CREATE - keyword: DATABASE - object_reference: naked_identifier: MY_DATABASE - keyword: FROM - keyword: SHARE - object_reference: - naked_identifier: MY_ACCOUNT - dot: . - naked_identifier: MY_SHARE - statement_terminator: ; - statement: create_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: DATABASE - object_reference: naked_identifier: MY_DATABASE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_database_role.sql000066400000000000000000000000711503426445100266060ustar00rootroot00000000000000create database role dbname.rolename comment = 'TEST'; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_database_role.yml000066400000000000000000000014441503426445100266150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a243c9568421c416b6c9a50c189a3615a6e48c6afe095f8fdaef1bf55ce1b6aa file: statement: create_database_role_statement: - keyword: create - keyword: database - keyword: role - database_role_reference: - naked_identifier: dbname - dot: . - naked_identifier: rolename - comment_equals_clause: keyword: comment comparison_operator: raw_comparison_operator: '=' quoted_literal: "'TEST'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_dynamic_table.sql000066400000000000000000000023101503426445100266120ustar00rootroot00000000000000CREATE OR REPLACE DYNAMIC TABLE names( id, first_name, last_name ) REFRESH_MODE = AUTO TARGET_LAG = '1 minute' INITIALIZE = ON_CREATE WAREHOUSE = 'mywh' AS SELECT var:id::int id, var:fname::string first_name, var:lname::string last_name FROM raw; CREATE OR REPLACE DYNAMIC TABLE product TARGET_LAG = '20 minutes' WAREHOUSE = mywh AS SELECT product_id, product_name FROM staging_table; CREATE DYNAMIC ICEBERG TABLE product (date TIMESTAMP_NTZ, id NUMBER, content STRING) TARGET_LAG = '20 minutes' WAREHOUSE = mywh EXTERNAL_VOLUME = 'my_external_volume' CATALOG = 'SNOWFLAKE' BASE_LOCATION = 'my_iceberg_table' AS SELECT product_id, product_name FROM staging_table; CREATE DYNAMIC TABLE product (date TIMESTAMP_NTZ, id NUMBER, content VARIANT) TARGET_LAG = '20 minutes' WAREHOUSE = mywh CLUSTER BY (date, id) AS SELECT product_id, product_name FROM staging_table; CREATE DYNAMIC TABLE product_clone CLONE product AT (TIMESTAMP => TO_TIMESTAMP_TZ('04/05/2013 01:02:03', 'mm/dd/yyyy hh24:mi:ss')); CREATE DYNAMIC TABLE product TARGET_LAG = 'DOWNSTREAM' WAREHOUSE = mywh INITIALIZE = on_schedule REQUIRE USER AS SELECT product_id, product_name FROM staging_table; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_dynamic_table.yml000066400000000000000000000240571503426445100266300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c50fafc1e621ade3b1f6fba074280b656867d00e09110117437ead25f292e0a8 file: - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: DYNAMIC - keyword: TABLE - table_reference: naked_identifier: names - bracketed: - start_bracket: ( - naked_identifier: id - comma: ',' - naked_identifier: first_name - comma: ',' - naked_identifier: last_name - end_bracket: ) - dynamic_table_options: - keyword: REFRESH_MODE - comparison_operator: raw_comparison_operator: '=' - keyword: AUTO - keyword: TARGET_LAG - comparison_operator: raw_comparison_operator: '=' - dynamic_table_lag_interval_segment: "'1 minute'" - keyword: INITIALIZE - comparison_operator: raw_comparison_operator: '=' - initialize_type: ON_CREATE - keyword: WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'mywh'" - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: cast_expression: column_reference: naked_identifier: var semi_structured_expression: colon: ':' semi_structured_element: id casting_operator: '::' data_type: data_type_identifier: int alias_expression: naked_identifier: id - comma: ',' - select_clause_element: expression: cast_expression: column_reference: naked_identifier: var semi_structured_expression: colon: ':' semi_structured_element: fname casting_operator: '::' data_type: data_type_identifier: string alias_expression: naked_identifier: first_name - comma: ',' - select_clause_element: expression: cast_expression: column_reference: naked_identifier: var semi_structured_expression: colon: ':' semi_structured_element: lname casting_operator: '::' data_type: data_type_identifier: string alias_expression: naked_identifier: last_name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: raw - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: DYNAMIC - keyword: TABLE - table_reference: naked_identifier: product - dynamic_table_options: - keyword: TARGET_LAG - comparison_operator: raw_comparison_operator: '=' - dynamic_table_lag_interval_segment: "'20 minutes'" - keyword: WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: mywh - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: product_id - comma: ',' - select_clause_element: column_reference: naked_identifier: product_name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: staging_table - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: DYNAMIC - keyword: ICEBERG - keyword: TABLE - table_reference: naked_identifier: product - bracketed: - start_bracket: ( - column_definition: naked_identifier: date data_type: data_type_identifier: TIMESTAMP_NTZ - comma: ',' - column_definition: naked_identifier: id data_type: data_type_identifier: NUMBER - comma: ',' - column_definition: naked_identifier: content data_type: data_type_identifier: STRING - end_bracket: ) - dynamic_table_options: - keyword: TARGET_LAG - comparison_operator: raw_comparison_operator: '=' - dynamic_table_lag_interval_segment: "'20 minutes'" - keyword: WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: mywh - keyword: EXTERNAL_VOLUME - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'my_external_volume'" - keyword: CATALOG - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'SNOWFLAKE'" - keyword: BASE_LOCATION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'my_iceberg_table'" - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: product_id - comma: ',' - select_clause_element: column_reference: naked_identifier: product_name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: staging_table - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: DYNAMIC - keyword: TABLE - table_reference: naked_identifier: product - bracketed: - start_bracket: ( - column_definition: naked_identifier: date data_type: data_type_identifier: TIMESTAMP_NTZ - comma: ',' - column_definition: naked_identifier: id data_type: data_type_identifier: NUMBER - comma: ',' - column_definition: naked_identifier: content data_type: data_type_identifier: VARIANT - end_bracket: ) - dynamic_table_options: - keyword: TARGET_LAG - comparison_operator: raw_comparison_operator: '=' - dynamic_table_lag_interval_segment: "'20 minutes'" - keyword: WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: mywh - keyword: CLUSTER - keyword: BY - expression: bracketed: - start_bracket: ( - column_reference: naked_identifier: date - comma: ',' - column_reference: naked_identifier: id - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: product_id - comma: ',' - select_clause_element: column_reference: naked_identifier: product_name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: staging_table - statement_terminator: ; - statement: create_clone_statement: - keyword: CREATE - keyword: DYNAMIC - keyword: TABLE - object_reference: naked_identifier: product_clone - keyword: CLONE - object_reference: naked_identifier: product - from_at_expression: keyword: AT bracketed: start_bracket: ( keyword: TIMESTAMP parameter_assigner: => expression: function: function_name: function_name_identifier: TO_TIMESTAMP_TZ function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'04/05/2013 01:02:03'" - comma: ',' - expression: quoted_literal: "'mm/dd/yyyy hh24:mi:ss'" - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: DYNAMIC - keyword: TABLE - table_reference: naked_identifier: product - dynamic_table_options: - keyword: TARGET_LAG - comparison_operator: raw_comparison_operator: '=' - dynamic_table_lag_interval_segment: "'DOWNSTREAM'" - keyword: WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: mywh - keyword: INITIALIZE - comparison_operator: raw_comparison_operator: '=' - initialize_type: on_schedule - keyword: REQUIRE - keyword: USER - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: product_id - comma: ',' - select_clause_element: column_reference: naked_identifier: product_name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: staging_table - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_event_table.sql000066400000000000000000000010431503426445100263110ustar00rootroot00000000000000CREATE EVENT TABLE my_events; CREATE OR REPLACE EVENT TABLE IF NOT EXISTS my_database.my_schema.my_events; CREATE EVENT TABLE IF NOT EXISTS log_trace_db.public.event_table; CREATE OR REPLACE EVENT TABLE IF NOT EXISTS my_events CLUSTER BY (date, type) DATA_RETENTION_TIME_IN_DAYS = 5 MAX_DATA_EXTENSION_TIME_IN_DAYS = 30 CHANGE_TRACKING = FALSE DEFAULT_DDL_COLLATION = 'en-ci' COPY GRANTS WITH COMMENT = 'My events table' WITH ROW ACCESS POLICY sales_policy ON (type, region) WITH TAG (cost_center = 'sales') ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_event_table.yml000066400000000000000000000064671503426445100263320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1ca95226f6b8602a8c8c20587b6ca025b93e3accc23b3671c261f807526275bd file: - statement: create_event_table_statement: - keyword: CREATE - keyword: EVENT - keyword: TABLE - table_reference: naked_identifier: my_events - statement_terminator: ; - statement: create_event_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: EVENT - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: - naked_identifier: my_database - dot: . - naked_identifier: my_schema - dot: . - naked_identifier: my_events - statement_terminator: ; - statement: create_event_table_statement: - keyword: CREATE - keyword: EVENT - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: - naked_identifier: log_trace_db - dot: . - naked_identifier: public - dot: . - naked_identifier: event_table - statement_terminator: ; - statement: create_event_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: EVENT - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: my_events - keyword: CLUSTER - keyword: BY - bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: date - comma: ',' - expression: column_reference: naked_identifier: type - end_bracket: ) - keyword: DATA_RETENTION_TIME_IN_DAYS - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '5' - keyword: MAX_DATA_EXTENSION_TIME_IN_DAYS - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '30' - keyword: CHANGE_TRACKING - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: DEFAULT_DDL_COLLATION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'en-ci'" - keyword: COPY - keyword: GRANTS - keyword: WITH - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'My events table'" - keyword: WITH - keyword: ROW - keyword: ACCESS - keyword: POLICY - object_reference: naked_identifier: sales_policy - keyword: 'ON' - bracketed: - start_bracket: ( - column_reference: naked_identifier: type - comma: ',' - column_reference: naked_identifier: region - end_bracket: ) - tag_bracketed_equals: - keyword: WITH - keyword: TAG - bracketed: start_bracket: ( tag_reference: naked_identifier: cost_center comparison_operator: raw_comparison_operator: '=' quoted_literal: "'sales'" end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_external_function.sql000066400000000000000000000036471503426445100275640ustar00rootroot00000000000000CREATE OR REPLACE EXTERNAL FUNCTION LOCAL_ECHO(STRING_COL VARCHAR) RETURNS VARIANT API_INTEGRATION = DEMONSTRATION_EXTERNAL_API_INTEGRATION_01 AS 'https://xyz.execute-api.us-west-2.amazonaws.com/prod/remote_echo' ; CREATE OR REPLACE EXTERNAL FUNCTION MY_SCHEMA.LOCAL_ECHO(STRING_COL VARCHAR) RETURNS VARIANT NOT NULL STRICT VOLATILE API_INTEGRATION = DEMONSTRATION_EXTERNAL_API_INTEGRATION_01 HEADERS = ( 'volume-measure' = 'liters', 'distance-measure' = 'kilometers' ) CONTEXT_HEADERS = (CURRENT_TIMESTAMP) MAX_BATCH_ROWS = 50 COMPRESSION = NONE REQUEST_TRANSLATOR = UTILITY.SOME_FUNCTION RESPONSE_TRANSLATOR = UTILITY.SOME_FUNCTION AS 'https://xyz.execute-api.us-west-2.amazonaws.com/prod/remote_echo' ; CREATE OR REPLACE EXTERNAL FUNCTION MY_SCHEMA.LOCAL_ECHO(STRING_COL VARCHAR) RETURNS VARIANT NOT NULL RETURNS NULL ON NULL INPUT VOLATILE API_INTEGRATION = DEMONSTRATION_EXTERNAL_API_INTEGRATION_01 HEADERS = ( 'volume-measure' = 'liters', 'distance-measure' = 'kilometers' ) CONTEXT_HEADERS = (CURRENT_TIMESTAMP) MAX_BATCH_ROWS = 50 COMPRESSION = NONE REQUEST_TRANSLATOR = UTILITY.SOME_FUNCTION RESPONSE_TRANSLATOR = UTILITY.SOME_FUNCTION AS 'https://xyz.execute-api.us-west-2.amazonaws.com/prod/remote_echo' ; CREATE OR REPLACE EXTERNAL FUNCTION MY_SCHEMA.LOCAL_ECHO(OBJ OBJECT) RETURNS VARCHAR NULL CALLED ON NULL INPUT IMMUTABLE COMMENT = 'SQLFluff rocks!' API_INTEGRATION = DEMONSTRATION_EXTERNAL_API_INTEGRATION_01 HEADERS = ( 'volume-measure' = 'liters', 'distance-measure' = 'kilometers' ) CONTEXT_HEADERS = (CURRENT_TIMESTAMP) MAX_BATCH_ROWS = 50 COMPRESSION = NONE REQUEST_TRANSLATOR = UTILITY.SOME_FUNCTION RESPONSE_TRANSLATOR = UTILITY.SOME_FUNCTION AS 'https://xyz.execute-api.us-west-2.amazonaws.com/prod/remote_echo' ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_external_function.yml000066400000000000000000000202561503426445100275610ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5ddd8d4a809b4797dc39ce84830624d2a8217c8f68b7b7400b4b42b454901060 file: - statement: create_external_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: EXTERNAL - keyword: FUNCTION - function_name: function_name_identifier: LOCAL_ECHO - function_parameter_list: bracketed: start_bracket: ( parameter: STRING_COL data_type: data_type_identifier: VARCHAR end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: VARIANT - keyword: API_INTEGRATION - comparison_operator: raw_comparison_operator: '=' - naked_identifier: DEMONSTRATION_EXTERNAL_API_INTEGRATION_01 - keyword: AS - quoted_identifier: "'https://xyz.execute-api.us-west-2.amazonaws.com/prod/remote_echo'" - statement_terminator: ; - statement: create_external_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: EXTERNAL - keyword: FUNCTION - function_name: naked_identifier: MY_SCHEMA dot: . function_name_identifier: LOCAL_ECHO - function_parameter_list: bracketed: start_bracket: ( parameter: STRING_COL data_type: data_type_identifier: VARCHAR end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: VARIANT - keyword: NOT - keyword: 'NULL' - keyword: STRICT - keyword: VOLATILE - keyword: API_INTEGRATION - comparison_operator: raw_comparison_operator: '=' - naked_identifier: DEMONSTRATION_EXTERNAL_API_INTEGRATION_01 - keyword: HEADERS - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_identifier: "'volume-measure'" - comparison_operator: raw_comparison_operator: '=' - quoted_identifier: "'liters'" - comma: ',' - quoted_identifier: "'distance-measure'" - comparison_operator: raw_comparison_operator: '=' - quoted_identifier: "'kilometers'" - end_bracket: ) - keyword: CONTEXT_HEADERS - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( keyword: CURRENT_TIMESTAMP end_bracket: ) - keyword: MAX_BATCH_ROWS - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '50' - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: NONE - keyword: REQUEST_TRANSLATOR - comparison_operator: raw_comparison_operator: '=' - function_name: naked_identifier: UTILITY dot: . function_name_identifier: SOME_FUNCTION - keyword: RESPONSE_TRANSLATOR - comparison_operator: raw_comparison_operator: '=' - function_name: naked_identifier: UTILITY dot: . function_name_identifier: SOME_FUNCTION - keyword: AS - quoted_identifier: "'https://xyz.execute-api.us-west-2.amazonaws.com/prod/remote_echo'" - statement_terminator: ; - statement: create_external_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: EXTERNAL - keyword: FUNCTION - function_name: naked_identifier: MY_SCHEMA dot: . function_name_identifier: LOCAL_ECHO - function_parameter_list: bracketed: start_bracket: ( parameter: STRING_COL data_type: data_type_identifier: VARCHAR end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: VARIANT - keyword: NOT - keyword: 'NULL' - keyword: RETURNS - keyword: 'NULL' - keyword: 'ON' - keyword: 'NULL' - keyword: INPUT - keyword: VOLATILE - keyword: API_INTEGRATION - comparison_operator: raw_comparison_operator: '=' - naked_identifier: DEMONSTRATION_EXTERNAL_API_INTEGRATION_01 - keyword: HEADERS - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_identifier: "'volume-measure'" - comparison_operator: raw_comparison_operator: '=' - quoted_identifier: "'liters'" - comma: ',' - quoted_identifier: "'distance-measure'" - comparison_operator: raw_comparison_operator: '=' - quoted_identifier: "'kilometers'" - end_bracket: ) - keyword: CONTEXT_HEADERS - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( keyword: CURRENT_TIMESTAMP end_bracket: ) - keyword: MAX_BATCH_ROWS - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '50' - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: NONE - keyword: REQUEST_TRANSLATOR - comparison_operator: raw_comparison_operator: '=' - function_name: naked_identifier: UTILITY dot: . function_name_identifier: SOME_FUNCTION - keyword: RESPONSE_TRANSLATOR - comparison_operator: raw_comparison_operator: '=' - function_name: naked_identifier: UTILITY dot: . function_name_identifier: SOME_FUNCTION - keyword: AS - quoted_identifier: "'https://xyz.execute-api.us-west-2.amazonaws.com/prod/remote_echo'" - statement_terminator: ; - statement: create_external_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: EXTERNAL - keyword: FUNCTION - function_name: naked_identifier: MY_SCHEMA dot: . function_name_identifier: LOCAL_ECHO - function_parameter_list: bracketed: start_bracket: ( parameter: OBJ data_type: data_type_identifier: OBJECT end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: VARCHAR - keyword: 'NULL' - keyword: CALLED - keyword: 'ON' - keyword: 'NULL' - keyword: INPUT - keyword: IMMUTABLE - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'SQLFluff rocks!'" - keyword: API_INTEGRATION - comparison_operator: raw_comparison_operator: '=' - naked_identifier: DEMONSTRATION_EXTERNAL_API_INTEGRATION_01 - keyword: HEADERS - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_identifier: "'volume-measure'" - comparison_operator: raw_comparison_operator: '=' - quoted_identifier: "'liters'" - comma: ',' - quoted_identifier: "'distance-measure'" - comparison_operator: raw_comparison_operator: '=' - quoted_identifier: "'kilometers'" - end_bracket: ) - keyword: CONTEXT_HEADERS - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( keyword: CURRENT_TIMESTAMP end_bracket: ) - keyword: MAX_BATCH_ROWS - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '50' - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: NONE - keyword: REQUEST_TRANSLATOR - comparison_operator: raw_comparison_operator: '=' - function_name: naked_identifier: UTILITY dot: . function_name_identifier: SOME_FUNCTION - keyword: RESPONSE_TRANSLATOR - comparison_operator: raw_comparison_operator: '=' - function_name: naked_identifier: UTILITY dot: . function_name_identifier: SOME_FUNCTION - keyword: AS - quoted_identifier: "'https://xyz.execute-api.us-west-2.amazonaws.com/prod/remote_echo'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_external_table.sql000066400000000000000000000036111503426445100270150ustar00rootroot00000000000000create or replace external table ext_table with location = @mystage/path1/ file_format = (type = json) aws_sns_topic = 'arn:aws:sns:us-west-2:001234567890:s3_mybucket'; create or replace external table "_p08" with location=@carpe_datastore_commercial/p08 auto_refresh=true file_format = (type=parquet) pattern='.*[.]parquet.*'; CREATE EXTERNAL TABLE EXTERNAL_TABLES.TRIPS( tripduration integer as try_cast(VALUE:c1::varchar as integer) not null, starttime timestamp as try_cast(VALUE:c2::varchar as timestamp), stoptime timestamp as try_cast(VALUE:c3::varchar as timestamp), start_station_id integer as try_cast(VALUE:c4::varchar as integer) null, start_station_name varchar as (VALUE:c5::varchar), start_station_latitude float as try_cast(VALUE:c6::varchar as float), start_station_longitude float as try_cast(VALUE:c7::varchar as float), end_station_id integer as try_cast(VALUE:c8::varchar as integer), end_station_name varchar as (VALUE:c9::varchar), end_station_latitude float as try_cast(VALUE:c10::varchar as float), end_station_longitude float as try_cast(VALUE:c11::varchar as float), bikeid integer as try_cast(VALUE:c12::varchar as integer), membership_type varchar as (VALUE:c13::varchar), usertype varchar as (VALUE:c14::varchar), birth_year integer as try_cast(VALUE:c15::varchar as integer), gender integer as try_cast(VALUE:c16::varchar as integer), year integer as (substr(metadata$filename, 22, 4)::integer) ) PARTITION BY (year) LOCATION = @external_tables.citibike_trips FILE_FORMAT = ( TYPE = 'CSV' FIELD_OPTIONALLY_ENCLOSED_BY = '"' ); CREATE EXTERNAL TABLE IF NOT EXISTS source_test.test ( yyyymmdd TEXT AS (value:YYYYMMDD::TEXT), product TEXT AS (value:product::TEXT) ) PARTITION BY (yyyymmdd) PARTITION_TYPE = user_specified LOCATION = @public.test_stage FILE_FORMAT = public.parquet_format_convert_binary AUTO_REFRESH = false; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_external_table.yml000066400000000000000000000457341503426445100270330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4cfacc5f961f683b2ce39ebb46725ef077de9a28e53e748eb50e7589daae3ab8 file: - statement: create_external_table_statement: - keyword: create - keyword: or - keyword: replace - keyword: external - keyword: table - table_reference: naked_identifier: ext_table - keyword: with - keyword: location - comparison_operator: raw_comparison_operator: '=' - stage_path: '@mystage/path1/' - keyword: file_format - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( json_file_format_type_parameters: keyword: type comparison_operator: raw_comparison_operator: '=' file_type: json end_bracket: ) - keyword: aws_sns_topic - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'arn:aws:sns:us-west-2:001234567890:s3_mybucket'" - statement_terminator: ; - statement: create_external_table_statement: - keyword: create - keyword: or - keyword: replace - keyword: external - keyword: table - table_reference: quoted_identifier: '"_p08"' - keyword: with - keyword: location - comparison_operator: raw_comparison_operator: '=' - stage_path: '@carpe_datastore_commercial/p08' - keyword: auto_refresh - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: file_format - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( parquet_file_format_type_parameters: keyword: type comparison_operator: raw_comparison_operator: '=' file_type: parquet end_bracket: ) - keyword: pattern - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'.*[.]parquet.*'" - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: EXTERNAL_TABLES - dot: . - naked_identifier: TRIPS - bracketed: - start_bracket: ( - naked_identifier: tripduration - data_type: data_type_identifier: integer - keyword: as - expression: function: function_name: function_name_identifier: try_cast function_contents: bracketed: start_bracket: ( expression: cast_expression: column_reference: naked_identifier: VALUE semi_structured_expression: colon: ':' semi_structured_element: c1 casting_operator: '::' data_type: data_type_identifier: varchar keyword: as data_type: data_type_identifier: integer end_bracket: ) - keyword: not - keyword: 'null' - comma: ',' - naked_identifier: starttime - data_type: keyword: timestamp - keyword: as - expression: function: function_name: function_name_identifier: try_cast function_contents: bracketed: start_bracket: ( expression: cast_expression: column_reference: naked_identifier: VALUE semi_structured_expression: colon: ':' semi_structured_element: c2 casting_operator: '::' data_type: data_type_identifier: varchar keyword: as data_type: keyword: timestamp end_bracket: ) - comma: ',' - naked_identifier: stoptime - data_type: keyword: timestamp - keyword: as - expression: function: function_name: function_name_identifier: try_cast function_contents: bracketed: start_bracket: ( expression: cast_expression: column_reference: naked_identifier: VALUE semi_structured_expression: colon: ':' semi_structured_element: c3 casting_operator: '::' data_type: data_type_identifier: varchar keyword: as data_type: keyword: timestamp end_bracket: ) - comma: ',' - naked_identifier: start_station_id - data_type: data_type_identifier: integer - keyword: as - expression: function: function_name: function_name_identifier: try_cast function_contents: bracketed: start_bracket: ( expression: cast_expression: column_reference: naked_identifier: VALUE semi_structured_expression: colon: ':' semi_structured_element: c4 casting_operator: '::' data_type: data_type_identifier: varchar keyword: as data_type: data_type_identifier: integer end_bracket: ) - keyword: 'null' - comma: ',' - naked_identifier: start_station_name - data_type: data_type_identifier: varchar - keyword: as - bracketed: start_bracket: ( expression: cast_expression: column_reference: naked_identifier: VALUE semi_structured_expression: colon: ':' semi_structured_element: c5 casting_operator: '::' data_type: data_type_identifier: varchar end_bracket: ) - comma: ',' - naked_identifier: start_station_latitude - data_type: data_type_identifier: float - keyword: as - expression: function: function_name: function_name_identifier: try_cast function_contents: bracketed: start_bracket: ( expression: cast_expression: column_reference: naked_identifier: VALUE semi_structured_expression: colon: ':' semi_structured_element: c6 casting_operator: '::' data_type: data_type_identifier: varchar keyword: as data_type: data_type_identifier: float end_bracket: ) - comma: ',' - naked_identifier: start_station_longitude - data_type: data_type_identifier: float - keyword: as - expression: function: function_name: function_name_identifier: try_cast function_contents: bracketed: start_bracket: ( expression: cast_expression: column_reference: naked_identifier: VALUE semi_structured_expression: colon: ':' semi_structured_element: c7 casting_operator: '::' data_type: data_type_identifier: varchar keyword: as data_type: data_type_identifier: float end_bracket: ) - comma: ',' - naked_identifier: end_station_id - data_type: data_type_identifier: integer - keyword: as - expression: function: function_name: function_name_identifier: try_cast function_contents: bracketed: start_bracket: ( expression: cast_expression: column_reference: naked_identifier: VALUE semi_structured_expression: colon: ':' semi_structured_element: c8 casting_operator: '::' data_type: data_type_identifier: varchar keyword: as data_type: data_type_identifier: integer end_bracket: ) - comma: ',' - naked_identifier: end_station_name - data_type: data_type_identifier: varchar - keyword: as - bracketed: start_bracket: ( expression: cast_expression: column_reference: naked_identifier: VALUE semi_structured_expression: colon: ':' semi_structured_element: c9 casting_operator: '::' data_type: data_type_identifier: varchar end_bracket: ) - comma: ',' - naked_identifier: end_station_latitude - data_type: data_type_identifier: float - keyword: as - expression: function: function_name: function_name_identifier: try_cast function_contents: bracketed: start_bracket: ( expression: cast_expression: column_reference: naked_identifier: VALUE semi_structured_expression: colon: ':' semi_structured_element: c10 casting_operator: '::' data_type: data_type_identifier: varchar keyword: as data_type: data_type_identifier: float end_bracket: ) - comma: ',' - naked_identifier: end_station_longitude - data_type: data_type_identifier: float - keyword: as - expression: function: function_name: function_name_identifier: try_cast function_contents: bracketed: start_bracket: ( expression: cast_expression: column_reference: naked_identifier: VALUE semi_structured_expression: colon: ':' semi_structured_element: c11 casting_operator: '::' data_type: data_type_identifier: varchar keyword: as data_type: data_type_identifier: float end_bracket: ) - comma: ',' - naked_identifier: bikeid - data_type: data_type_identifier: integer - keyword: as - expression: function: function_name: function_name_identifier: try_cast function_contents: bracketed: start_bracket: ( expression: cast_expression: column_reference: naked_identifier: VALUE semi_structured_expression: colon: ':' semi_structured_element: c12 casting_operator: '::' data_type: data_type_identifier: varchar keyword: as data_type: data_type_identifier: integer end_bracket: ) - comma: ',' - naked_identifier: membership_type - data_type: data_type_identifier: varchar - keyword: as - bracketed: start_bracket: ( expression: cast_expression: column_reference: naked_identifier: VALUE semi_structured_expression: colon: ':' semi_structured_element: c13 casting_operator: '::' data_type: data_type_identifier: varchar end_bracket: ) - comma: ',' - naked_identifier: usertype - data_type: data_type_identifier: varchar - keyword: as - bracketed: start_bracket: ( expression: cast_expression: column_reference: naked_identifier: VALUE semi_structured_expression: colon: ':' semi_structured_element: c14 casting_operator: '::' data_type: data_type_identifier: varchar end_bracket: ) - comma: ',' - naked_identifier: birth_year - data_type: data_type_identifier: integer - keyword: as - expression: function: function_name: function_name_identifier: try_cast function_contents: bracketed: start_bracket: ( expression: cast_expression: column_reference: naked_identifier: VALUE semi_structured_expression: colon: ':' semi_structured_element: c15 casting_operator: '::' data_type: data_type_identifier: varchar keyword: as data_type: data_type_identifier: integer end_bracket: ) - comma: ',' - naked_identifier: gender - data_type: data_type_identifier: integer - keyword: as - expression: function: function_name: function_name_identifier: try_cast function_contents: bracketed: start_bracket: ( expression: cast_expression: column_reference: naked_identifier: VALUE semi_structured_expression: colon: ':' semi_structured_element: c16 casting_operator: '::' data_type: data_type_identifier: varchar keyword: as data_type: data_type_identifier: integer end_bracket: ) - comma: ',' - naked_identifier: year - data_type: data_type_identifier: integer - keyword: as - bracketed: start_bracket: ( expression: cast_expression: function: function_name: function_name_identifier: substr function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: metadata$filename - comma: ',' - expression: numeric_literal: '22' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) casting_operator: '::' data_type: data_type_identifier: integer end_bracket: ) - end_bracket: ) - keyword: PARTITION - keyword: BY - bracketed: start_bracket: ( naked_identifier: year end_bracket: ) - keyword: LOCATION - comparison_operator: raw_comparison_operator: '=' - stage_path: '@external_tables.citibike_trips' - keyword: FILE_FORMAT - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( csv_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: "'CSV'" - keyword: FIELD_OPTIONALLY_ENCLOSED_BY - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'\"'" end_bracket: ) - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: - naked_identifier: source_test - dot: . - naked_identifier: test - bracketed: - start_bracket: ( - naked_identifier: yyyymmdd - data_type: data_type_identifier: TEXT - keyword: AS - bracketed: start_bracket: ( expression: cast_expression: column_reference: naked_identifier: value semi_structured_expression: colon: ':' semi_structured_element: YYYYMMDD casting_operator: '::' data_type: data_type_identifier: TEXT end_bracket: ) - comma: ',' - naked_identifier: product - data_type: data_type_identifier: TEXT - keyword: AS - bracketed: start_bracket: ( expression: cast_expression: column_reference: naked_identifier: value semi_structured_expression: colon: ':' semi_structured_element: product casting_operator: '::' data_type: data_type_identifier: TEXT end_bracket: ) - end_bracket: ) - keyword: PARTITION - keyword: BY - bracketed: start_bracket: ( naked_identifier: yyyymmdd end_bracket: ) - keyword: PARTITION_TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: user_specified - keyword: LOCATION - comparison_operator: raw_comparison_operator: '=' - stage_path: '@public.test_stage' - keyword: FILE_FORMAT - comparison_operator: raw_comparison_operator: '=' - file_format_segment: object_reference: - naked_identifier: public - dot: . - naked_identifier: parquet_format_convert_binary - keyword: AUTO_REFRESH - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'false' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_external_volume.sql000066400000000000000000000033451503426445100272410ustar00rootroot00000000000000CREATE OR REPLACE EXTERNAL VOLUME exvol STORAGE_LOCATIONS = ( ( NAME = 'my-s3-us-west-2' STORAGE_PROVIDER = 'S3' STORAGE_BASE_URL = 's3://MY_EXAMPLE_BUCKET/' STORAGE_AWS_ROLE_ARN = 'arn:aws:iam::123456789012:role/myrole' ENCRYPTION=(TYPE='AWS_SSE_KMS' KMS_KEY_ID='1234abcd-12ab-34cd-56ef-1234567890ab') ) ) ; CREATE EXTERNAL VOLUME exvol STORAGE_LOCATIONS = ( ( NAME = 'my-us-east-1' STORAGE_PROVIDER = 'GCS' STORAGE_BASE_URL = 'gcs://mybucket1/path1/' ENCRYPTION=(TYPE='GCS_SSE_KMS' KMS_KEY_ID = '1234abcd-12ab-34cd-56ef-1234567890ab') ) ); CREATE EXTERNAL VOLUME exvol STORAGE_LOCATIONS = ( ( NAME = 'my-azure-northeurope' STORAGE_PROVIDER = 'AZURE' STORAGE_BASE_URL = 'azure://exampleacct.blob.core.windows.net/my_container_northeurope/' AZURE_TENANT_ID = 'a123b4c5-1234-123a-a12b-1a23b45678c9' ) ); CREATE EXTERNAL VOLUME IF NOT EXISTS exvol STORAGE_LOCATIONS = ( ( NAME = 'my-s3-us-west-2' STORAGE_PROVIDER = 'S3' STORAGE_BASE_URL = 's3://MY_EXAMPLE_BUCKET/' STORAGE_AWS_ROLE_ARN = 'arn:aws:iam::123456789012:role/myrole' ENCRYPTION=(TYPE='AWS_SSE_KMS' KMS_KEY_ID='1234abcd-12ab-34cd-56ef-1234567890ab') ), ( NAME = 'my-s3-us-west-3' STORAGE_PROVIDER = 'S3' STORAGE_BASE_URL = 's3://MY_EXAMPLE_BUCKET_2/' STORAGE_AWS_ROLE_ARN = 'arn:aws:iam::123456789012:role/myrole' ENCRYPTION=(TYPE='AWS_SSE_KMS' KMS_KEY_ID='1234abcd-12ab-34cd-56ef-1234567890ab') ) ) ALLOW_WRITES=FALSE COMMENT='foo' ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_external_volume.yml000066400000000000000000000167631503426445100272530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2eebd390d4f4b94a0651fa40019d454c1cdf13bd52bf13c9df656319d52a3fb2 file: - statement: create_external_volume_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: EXTERNAL - keyword: VOLUME - external_volume_reference: naked_identifier: exvol - keyword: STORAGE_LOCATIONS - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( bracketed: - start_bracket: ( - keyword: NAME - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'my-s3-us-west-2'" - keyword: STORAGE_PROVIDER - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'S3'" - keyword: STORAGE_BASE_URL - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'s3://MY_EXAMPLE_BUCKET/'" - keyword: STORAGE_AWS_ROLE_ARN - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'arn:aws:iam::123456789012:role/myrole'" - keyword: ENCRYPTION - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - stage_encryption_option: "'AWS_SSE_KMS'" - keyword: KMS_KEY_ID - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'1234abcd-12ab-34cd-56ef-1234567890ab'" - end_bracket: ) - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_external_volume_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: VOLUME - external_volume_reference: naked_identifier: exvol - keyword: STORAGE_LOCATIONS - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( bracketed: - start_bracket: ( - keyword: NAME - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'my-us-east-1'" - keyword: STORAGE_PROVIDER - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'GCS'" - keyword: STORAGE_BASE_URL - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'gcs://mybucket1/path1/'" - keyword: ENCRYPTION - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - stage_encryption_option: "'GCS_SSE_KMS'" - keyword: KMS_KEY_ID - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'1234abcd-12ab-34cd-56ef-1234567890ab'" - end_bracket: ) - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_external_volume_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: VOLUME - external_volume_reference: naked_identifier: exvol - keyword: STORAGE_LOCATIONS - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( bracketed: - start_bracket: ( - keyword: NAME - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'my-azure-northeurope'" - keyword: STORAGE_PROVIDER - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'AZURE'" - keyword: STORAGE_BASE_URL - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'azure://exampleacct.blob.core.windows.net/my_container_northeurope/'" - keyword: AZURE_TENANT_ID - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'a123b4c5-1234-123a-a12b-1a23b45678c9'" - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_external_volume_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: VOLUME - keyword: IF - keyword: NOT - keyword: EXISTS - external_volume_reference: naked_identifier: exvol - keyword: STORAGE_LOCATIONS - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - bracketed: - start_bracket: ( - keyword: NAME - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'my-s3-us-west-2'" - keyword: STORAGE_PROVIDER - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'S3'" - keyword: STORAGE_BASE_URL - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'s3://MY_EXAMPLE_BUCKET/'" - keyword: STORAGE_AWS_ROLE_ARN - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'arn:aws:iam::123456789012:role/myrole'" - keyword: ENCRYPTION - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - stage_encryption_option: "'AWS_SSE_KMS'" - keyword: KMS_KEY_ID - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'1234abcd-12ab-34cd-56ef-1234567890ab'" - end_bracket: ) - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - keyword: NAME - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'my-s3-us-west-3'" - keyword: STORAGE_PROVIDER - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'S3'" - keyword: STORAGE_BASE_URL - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'s3://MY_EXAMPLE_BUCKET_2/'" - keyword: STORAGE_AWS_ROLE_ARN - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'arn:aws:iam::123456789012:role/myrole'" - keyword: ENCRYPTION - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - stage_encryption_option: "'AWS_SSE_KMS'" - keyword: KMS_KEY_ID - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'1234abcd-12ab-34cd-56ef-1234567890ab'" - end_bracket: ) - end_bracket: ) - end_bracket: ) - keyword: ALLOW_WRITES - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'foo'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_file_format.sql000066400000000000000000000103411503426445100263110ustar00rootroot00000000000000CREATE FILE FORMAT my_file_format TYPE = CSV ; CREATE OR REPLACE FILE FORMAT my_csv_format TYPE = CSV, COMPRESSION = AUTO, RECORD_DELIMITER = NONE, FIELD_DELIMITER = NONE, FILE_EXTENSION = 'foobar', SKIP_HEADER = 1, SKIP_BLANK_LINES = TRUE, DATE_FORMAT = AUTO, TIME_FORMAT = AUTO, TIMESTAMP_FORMAT = AUTO, BINARY_FORMAT = HEX, TRIM_SPACE = TRUE, NULL_IF = ('foo', 'bar'), FIELD_OPTIONALLY_ENCLOSED_BY = NONE, ERROR_ON_COLUMN_COUNT_MISMATCH = TRUE, REPLACE_INVALID_CHARACTERS = TRUE, VALIDATE_UTF8 = TRUE, EMPTY_FIELD_AS_NULL = TRUE, SKIP_BYTE_ORDER_MARK = TRUE, ENCODING = UTF8 ; CREATE FILE FORMAT IF NOT EXISTS my_csv_format TYPE = CSV ESCAPE = '\\' FIELD_OPTIONALLY_ENCLOSED_BY = '\"' COMPRESSION = NONE FIELD_DELIMITER ='|' NULL_IF=() ; CREATE FILE FORMAT IF NOT EXISTS my_csv_format TYPE = CSV COMPRESSION = GZIP RECORD_DELIMITER = 'foo' FIELD_DELIMITER = 'bar' FILE_EXTENSION = 'foobar' SKIP_HEADER = 99 SKIP_BLANK_LINES = FALSE DATE_FORMAT = 'foo' TIME_FORMAT = 'bar' TIMESTAMP_FORMAT = 'foobar' BINARY_FORMAT = UTF8 TRIM_SPACE = FALSE NULL_IF = ('foo', 'bar') FIELD_OPTIONALLY_ENCLOSED_BY = 'foo' ERROR_ON_COLUMN_COUNT_MISMATCH = FALSE REPLACE_INVALID_CHARACTERS = FALSE VALIDATE_UTF8 = FALSE EMPTY_FIELD_AS_NULL = FALSE SKIP_BYTE_ORDER_MARK = FALSE ENCODING = 'foo' COMMENT = 'FOOBAR' ; CREATE FILE FORMAT IF NOT EXISTS my_csv_format TYPE = CSV PARSE_HEADER = TRUE ; CREATE OR REPLACE FILE FORMAT my_json_format TYPE = JSON, COMPRESSION = AUTO, DATE_FORMAT = AUTO, TIME_FORMAT = AUTO, TIMESTAMP_FORMAT = AUTO, BINARY_FORMAT = HEX, TRIM_SPACE = TRUE, NULL_IF = ('foo', 'bar'), FILE_EXTENSION = 'foobar', ENABLE_OCTAL = TRUE, ALLOW_DUPLICATE = TRUE, STRIP_OUTER_ARRAY = TRUE, STRIP_NULL_VALUES = TRUE, REPLACE_INVALID_CHARACTERS = TRUE, IGNORE_UTF8_ERRORS = TRUE, SKIP_BYTE_ORDER_MARK = TRUE ; CREATE FILE FORMAT IF NOT EXISTS my_json_format TYPE = JSON COMPRESSION = GZIP DATE_FORMAT = 'foobar' TIME_FORMAT = 'foobar' TIMESTAMP_FORMAT = 'foobar' BINARY_FORMAT = BASE64 TRIM_SPACE = FALSE NULL_IF = ('foo', 'bar') FILE_EXTENSION = 'foobar' ENABLE_OCTAL = FALSE ALLOW_DUPLICATE = FALSE STRIP_OUTER_ARRAY = FALSE STRIP_NULL_VALUES = FALSE REPLACE_INVALID_CHARACTERS = TRUE IGNORE_UTF8_ERRORS = FALSE SKIP_BYTE_ORDER_MARK = FALSE COMMENT = 'FOOBAR' ; CREATE FILE FORMAT IF NOT EXISTS my_json_format TYPE = JSON NULL_IF = () ; CREATE OR REPLACE FILE FORMAT my_avro_format TYPE = AVRO, COMPRESSION = AUTO, TRIM_SPACE = TRUE, NULL_IF = ('foo', 'bar') ; CREATE FILE FORMAT IF NOT EXISTS my_avro_format TYPE = AVRO COMPRESSION = 'GZIP' TRIM_SPACE = FALSE NULL_IF = ('foo', 'bar') COMMENT = 'FOOBAR' ; CREATE OR REPLACE FILE FORMAT my_orc_format TYPE = ORC, TRIM_SPACE = TRUE, NULL_IF = ('foo', 'bar') ; CREATE FILE FORMAT IF NOT EXISTS my_orc_format TYPE = ORC TRIM_SPACE = FALSE NULL_IF = ('foo', 'bar') COMMENT = 'FOOBAR' ; CREATE OR REPLACE FILE FORMAT my_parquet_format TYPE = PARQUET, COMPRESSION = SNAPPY, SNAPPY_COMPRESSION = TRUE, TRIM_SPACE = TRUE, NULL_IF = ('foo', 'bar') ; CREATE FILE FORMAT IF NOT EXISTS my_parquet_format TYPE = PARQUET COMPRESSION = AUTO SNAPPY_COMPRESSION = FALSE TRIM_SPACE = FALSE BINARY_AS_TEXT = TRUE USE_LOGICAL_TYPE = FALSE USE_VECTORIZED_SCANNER = FALSE REPLACE_INVALID_CHARACTERS = FALSE NULL_IF = ('foo', 'bar') COMMENT = 'FOOBAR' ; CREATE OR REPLACE FILE FORMAT my_xml_format TYPE = XML, COMPRESSION = AUTO, IGNORE_UTF8_ERRORS = TRUE, PRESERVE_SPACE = TRUE, STRIP_OUTER_ELEMENT = TRUE, DISABLE_SNOWFLAKE_DATA = TRUE, DISABLE_AUTO_CONVERT = TRUE, SKIP_BYTE_ORDER_MARK = TRUE ; CREATE FILE FORMAT IF NOT EXISTS my_xml_format TYPE = XML COMPRESSION = GZIP IGNORE_UTF8_ERRORS = FALSE PRESERVE_SPACE = FALSE STRIP_OUTER_ELEMENT = FALSE DISABLE_SNOWFLAKE_DATA = FALSE DISABLE_AUTO_CONVERT = FALSE SKIP_BYTE_ORDER_MARK = FALSE COMMENT = 'FOOBAR' ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_file_format.yml000066400000000000000000000601701503426445100263200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fc075dc42d53b1787d69152ebcfc7e00d5461c2377be16f0da2aa8686a0dd4b3 file: - statement: create_file_format_segment: - keyword: CREATE - keyword: FILE - keyword: FORMAT - object_reference: naked_identifier: my_file_format - csv_file_format_type_parameters: keyword: TYPE comparison_operator: raw_comparison_operator: '=' file_type: CSV - statement_terminator: ; - statement: create_file_format_segment: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FILE - keyword: FORMAT - object_reference: naked_identifier: my_csv_format - csv_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: CSV - comma: ',' - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: AUTO - comma: ',' - keyword: RECORD_DELIMITER - comparison_operator: raw_comparison_operator: '=' - keyword: NONE - comma: ',' - keyword: FIELD_DELIMITER - comparison_operator: raw_comparison_operator: '=' - keyword: NONE - comma: ',' - keyword: FILE_EXTENSION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foobar'" - comma: ',' - keyword: SKIP_HEADER - comparison_operator: raw_comparison_operator: '=' - integer_literal: '1' - comma: ',' - keyword: SKIP_BLANK_LINES - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: DATE_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: AUTO - comma: ',' - keyword: TIME_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: AUTO - comma: ',' - keyword: TIMESTAMP_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: AUTO - comma: ',' - keyword: BINARY_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: HEX - comma: ',' - keyword: TRIM_SPACE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: NULL_IF - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'foo'" - comma: ',' - quoted_literal: "'bar'" - end_bracket: ) - comma: ',' - keyword: FIELD_OPTIONALLY_ENCLOSED_BY - comparison_operator: raw_comparison_operator: '=' - keyword: NONE - comma: ',' - keyword: ERROR_ON_COLUMN_COUNT_MISMATCH - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: REPLACE_INVALID_CHARACTERS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: VALIDATE_UTF8 - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: EMPTY_FIELD_AS_NULL - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: SKIP_BYTE_ORDER_MARK - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: ENCODING - comparison_operator: raw_comparison_operator: '=' - keyword: UTF8 - statement_terminator: ; - statement: create_file_format_segment: - keyword: CREATE - keyword: FILE - keyword: FORMAT - keyword: IF - keyword: NOT - keyword: EXISTS - object_reference: naked_identifier: my_csv_format - csv_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: CSV - keyword: ESCAPE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'\\\\'" - keyword: FIELD_OPTIONALLY_ENCLOSED_BY - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'\\\"'" - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: NONE - keyword: FIELD_DELIMITER - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'|'" - keyword: NULL_IF - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_file_format_segment: - keyword: CREATE - keyword: FILE - keyword: FORMAT - keyword: IF - keyword: NOT - keyword: EXISTS - object_reference: naked_identifier: my_csv_format - csv_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: CSV - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: GZIP - keyword: RECORD_DELIMITER - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foo'" - keyword: FIELD_DELIMITER - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'bar'" - keyword: FILE_EXTENSION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foobar'" - keyword: SKIP_HEADER - comparison_operator: raw_comparison_operator: '=' - integer_literal: '99' - keyword: SKIP_BLANK_LINES - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: DATE_FORMAT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foo'" - keyword: TIME_FORMAT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'bar'" - keyword: TIMESTAMP_FORMAT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foobar'" - keyword: BINARY_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: UTF8 - keyword: TRIM_SPACE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: NULL_IF - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'foo'" - comma: ',' - quoted_literal: "'bar'" - end_bracket: ) - keyword: FIELD_OPTIONALLY_ENCLOSED_BY - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foo'" - keyword: ERROR_ON_COLUMN_COUNT_MISMATCH - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: REPLACE_INVALID_CHARACTERS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: VALIDATE_UTF8 - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: EMPTY_FIELD_AS_NULL - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: SKIP_BYTE_ORDER_MARK - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: ENCODING - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foo'" - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'FOOBAR'" - statement_terminator: ; - statement: create_file_format_segment: - keyword: CREATE - keyword: FILE - keyword: FORMAT - keyword: IF - keyword: NOT - keyword: EXISTS - object_reference: naked_identifier: my_csv_format - csv_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: CSV - keyword: PARSE_HEADER - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - statement_terminator: ; - statement: create_file_format_segment: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FILE - keyword: FORMAT - object_reference: naked_identifier: my_json_format - json_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: JSON - comma: ',' - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: AUTO - comma: ',' - keyword: DATE_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: AUTO - comma: ',' - keyword: TIME_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: AUTO - comma: ',' - keyword: TIMESTAMP_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: AUTO - comma: ',' - keyword: BINARY_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: HEX - comma: ',' - keyword: TRIM_SPACE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: NULL_IF - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'foo'" - comma: ',' - quoted_literal: "'bar'" - end_bracket: ) - comma: ',' - keyword: FILE_EXTENSION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foobar'" - comma: ',' - keyword: ENABLE_OCTAL - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: ALLOW_DUPLICATE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: STRIP_OUTER_ARRAY - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: STRIP_NULL_VALUES - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: REPLACE_INVALID_CHARACTERS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: IGNORE_UTF8_ERRORS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: SKIP_BYTE_ORDER_MARK - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - statement_terminator: ; - statement: create_file_format_segment: - keyword: CREATE - keyword: FILE - keyword: FORMAT - keyword: IF - keyword: NOT - keyword: EXISTS - object_reference: naked_identifier: my_json_format - json_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: JSON - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: GZIP - keyword: DATE_FORMAT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foobar'" - keyword: TIME_FORMAT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foobar'" - keyword: TIMESTAMP_FORMAT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foobar'" - keyword: BINARY_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: BASE64 - keyword: TRIM_SPACE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: NULL_IF - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'foo'" - comma: ',' - quoted_literal: "'bar'" - end_bracket: ) - keyword: FILE_EXTENSION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foobar'" - keyword: ENABLE_OCTAL - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: ALLOW_DUPLICATE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: STRIP_OUTER_ARRAY - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: STRIP_NULL_VALUES - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: REPLACE_INVALID_CHARACTERS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - keyword: IGNORE_UTF8_ERRORS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: SKIP_BYTE_ORDER_MARK - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'FOOBAR'" - statement_terminator: ; - statement: create_file_format_segment: - keyword: CREATE - keyword: FILE - keyword: FORMAT - keyword: IF - keyword: NOT - keyword: EXISTS - object_reference: naked_identifier: my_json_format - json_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: JSON - keyword: NULL_IF - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_file_format_segment: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FILE - keyword: FORMAT - object_reference: naked_identifier: my_avro_format - avro_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: AVRO - comma: ',' - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: AUTO - comma: ',' - keyword: TRIM_SPACE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: NULL_IF - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'foo'" - comma: ',' - quoted_literal: "'bar'" - end_bracket: ) - statement_terminator: ; - statement: create_file_format_segment: - keyword: CREATE - keyword: FILE - keyword: FORMAT - keyword: IF - keyword: NOT - keyword: EXISTS - object_reference: naked_identifier: my_avro_format - avro_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: AVRO - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: "'GZIP'" - keyword: TRIM_SPACE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: NULL_IF - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'foo'" - comma: ',' - quoted_literal: "'bar'" - end_bracket: ) - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'FOOBAR'" - statement_terminator: ; - statement: create_file_format_segment: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FILE - keyword: FORMAT - object_reference: naked_identifier: my_orc_format - orc_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: ORC - comma: ',' - keyword: TRIM_SPACE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: NULL_IF - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'foo'" - comma: ',' - quoted_literal: "'bar'" - end_bracket: ) - statement_terminator: ; - statement: create_file_format_segment: - keyword: CREATE - keyword: FILE - keyword: FORMAT - keyword: IF - keyword: NOT - keyword: EXISTS - object_reference: naked_identifier: my_orc_format - orc_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: ORC - keyword: TRIM_SPACE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: NULL_IF - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'foo'" - comma: ',' - quoted_literal: "'bar'" - end_bracket: ) - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'FOOBAR'" - statement_terminator: ; - statement: create_file_format_segment: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FILE - keyword: FORMAT - object_reference: naked_identifier: my_parquet_format - parquet_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: PARQUET - comma: ',' - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: SNAPPY - comma: ',' - keyword: SNAPPY_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: TRIM_SPACE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: NULL_IF - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'foo'" - comma: ',' - quoted_literal: "'bar'" - end_bracket: ) - statement_terminator: ; - statement: create_file_format_segment: - keyword: CREATE - keyword: FILE - keyword: FORMAT - keyword: IF - keyword: NOT - keyword: EXISTS - object_reference: naked_identifier: my_parquet_format - parquet_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: PARQUET - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: AUTO - keyword: SNAPPY_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: TRIM_SPACE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: BINARY_AS_TEXT - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - keyword: USE_LOGICAL_TYPE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: USE_VECTORIZED_SCANNER - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: REPLACE_INVALID_CHARACTERS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: NULL_IF - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'foo'" - comma: ',' - quoted_literal: "'bar'" - end_bracket: ) - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'FOOBAR'" - statement_terminator: ; - statement: create_file_format_segment: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FILE - keyword: FORMAT - object_reference: naked_identifier: my_xml_format - xml_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: XML - comma: ',' - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: AUTO - comma: ',' - keyword: IGNORE_UTF8_ERRORS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: PRESERVE_SPACE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: STRIP_OUTER_ELEMENT - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: DISABLE_SNOWFLAKE_DATA - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: DISABLE_AUTO_CONVERT - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: SKIP_BYTE_ORDER_MARK - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - statement_terminator: ; - statement: create_file_format_segment: - keyword: CREATE - keyword: FILE - keyword: FORMAT - keyword: IF - keyword: NOT - keyword: EXISTS - object_reference: naked_identifier: my_xml_format - xml_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: XML - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: GZIP - keyword: IGNORE_UTF8_ERRORS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: PRESERVE_SPACE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: STRIP_OUTER_ELEMENT - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: DISABLE_SNOWFLAKE_DATA - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: DISABLE_AUTO_CONVERT - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: SKIP_BYTE_ORDER_MARK - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'FOOBAR'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_function.sql000066400000000000000000000073751503426445100256640ustar00rootroot00000000000000CREATE FUNCTION pi_udf() RETURNS FLOAT AS '3.141592654::FLOAT'; CREATE FUNCTION simple_table_function () RETURNS TABLE (x INTEGER, y INTEGER) AS $$ SELECT 1, 2 UNION ALL SELECT 3, 4 $$; CREATE OR REPLACE FUNCTION get_countries_for_user ( id number ) RETURNS TABLE (country_code char, country_name varchar) RETURNS NULL ON NULL INPUT AS 'select distinct c.country_code, c.country_name from user_addresses a, countries c where a.user_id = id and c.country_code = a.country_code'; CREATE SECURE FUNCTION js_factorial(d double) RETURNS double IMMUTABLE LANGUAGE JAVASCRIPT STRICT AS ' if (D <= 0) { return 1; } else { var result = 1; for (var i = 2; i <= D; i++) { result = result * i; } return result; } '; CREATE FUNCTION IF NOT EXISTS simple_table_function () RETURNS TABLE (x INTEGER, y INTEGER) LANGUAGE SQL AS $$ SELECT 1, 2 UNION ALL SELECT 3, 4 $$; create function my_decrement_udf(i numeric(9, 0)) returns numeric language java imports = ('@~/my_decrement_udf_package_dir/my_decrement_udf_jar.jar') handler = 'my_decrement_udf_package.my_decrement_udf_class.my_decrement_udf_method' ; create or replace function echo_varchar(x varchar) returns varchar language java called on null input handler='TestFunc.echoVarchar' target_path='@~/testfunc.jar' as 'class TestFunc { public static String echoVarchar(String x) { return x; } }'; create or replace function py_udf() returns variant language python runtime_version = '3.8' packages = ('numpy','pandas','xgboost==1.5.0') handler = 'udf' as $$ import numpy as np import pandas as pd import xgboost as xgb def udf(): return [np.__version__, pd.__version__, xgb.__version__] $$; create or replace function dream(i int) returns variant language python runtime_version = '3.8' handler = 'sleepy.snore' imports = ('@my_stage/sleepy.py') ; create or replace function addone(i int) returns int language python runtime_version = '3.8' handler = 'addone_py' as $$ def addone_py(i): return i+1 $$; CREATE OR REPLACE FUNCTION echo_varchar(x VARCHAR) RETURNS VARCHAR LANGUAGE SCALA RUNTIME_VERSION = '2.12' HANDLER='Echo.echoVarchar' AS $$ class Echo { def echoVarchar(x : String): String = { return x } } $$; CREATE OR REPLACE FUNCTION google_translate_python(sentence STRING, language STRING) RETURNS STRING LANGUAGE PYTHON RUNTIME_VERSION = '3.8' HANDLER = 'get_translation' EXTERNAL_ACCESS_INTEGRATIONS = (google_apis_access_integration, my_integration ) PACKAGES = ('snowflake-snowpark-python','requests') SECRETS = ('cred' = oauth_token, 'cred2' = DATA_STAGE.AWS_SECRET_KEY ) AS $$ import _snowflake import requests import json session = requests.Session() def get_translation(sentence, language): token = _snowflake.get_oauth_access_token('cred') url = "https://translation.googleapis.com/language/translate/v2" data = {'q': sentence,'target': language} response = session.post(url, json = data, headers = {"Authorization": "Bearer " + token}) return response.json()['data']['translations'][0]['translatedText'] $$; create or replace aggregate function addone(i int) returns int language python runtime_version = '3.8' handler = 'addone_py' as $$ def addone_py(i): return i+1 $$; CREATE OR REPLACE FUNCTION TEST_DB.TEST_SCHEMA.TEST_TABLE( COL_1 VARCHAR DEFAULT NULL , COL_2 VARCHAR DEFAULT NULL ) RETURNS VARCHAR LANGUAGE SQL AS $$ SELECT CASE WHEN (LOWER(COL_1) IS NOT NULL AND LOWER(COL_2) = 'test_marketing') THEN 'marketing_channel' ELSE '(Other)' END $$;; CREATE TEMPORARY FUNCTION pi_udf() RETURNS FLOAT AS '3.141592654::FLOAT'; CREATE TEMP FUNCTION pi_udf() RETURNS FLOAT AS '3.141592654::FLOAT'; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_function.yml000066400000000000000000000370731503426445100256640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: acbe752dbc688f77d81e29201ed6d731756ec967a06479bd4026b87849830d5a file: - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: pi_udf - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: FLOAT - keyword: AS - udf_body: "'3.141592654::FLOAT'" - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: simple_table_function - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - keyword: TABLE - bracketed: - start_bracket: ( - column_definition: naked_identifier: x data_type: data_type_identifier: INTEGER - comma: ',' - column_definition: naked_identifier: y data_type: data_type_identifier: INTEGER - end_bracket: ) - keyword: AS - udf_body: "$$\n SELECT 1, 2\n UNION ALL\n SELECT 3, 4\n $$" - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name: function_name_identifier: get_countries_for_user - function_parameter_list: bracketed: start_bracket: ( parameter: id data_type: data_type_identifier: number end_bracket: ) - keyword: RETURNS - keyword: TABLE - bracketed: - start_bracket: ( - column_definition: naked_identifier: country_code data_type: data_type_identifier: char - comma: ',' - column_definition: naked_identifier: country_name data_type: data_type_identifier: varchar - end_bracket: ) - keyword: RETURNS - keyword: 'NULL' - keyword: 'ON' - keyword: 'NULL' - keyword: INPUT - keyword: AS - udf_body: "'select distinct c.country_code, c.country_name\n from user_addresses\ \ a, countries c\n where a.user_id = id\n and c.country_code = a.country_code'" - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: SECURE - keyword: FUNCTION - function_name: function_name_identifier: js_factorial - function_parameter_list: bracketed: start_bracket: ( parameter: d data_type: data_type_identifier: double end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: double - keyword: IMMUTABLE - keyword: LANGUAGE - keyword: JAVASCRIPT - keyword: STRICT - keyword: AS - udf_body: "'\n if (D <= 0) {\n return 1;\n } else {\n var result = 1;\n\ \ for (var i = 2; i <= D; i++) {\n result = result * i;\n }\n \ \ return result;\n }\n '" - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - keyword: IF - keyword: NOT - keyword: EXISTS - function_name: function_name_identifier: simple_table_function - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - keyword: TABLE - bracketed: - start_bracket: ( - column_definition: naked_identifier: x data_type: data_type_identifier: INTEGER - comma: ',' - column_definition: naked_identifier: y data_type: data_type_identifier: INTEGER - end_bracket: ) - keyword: LANGUAGE - keyword: SQL - keyword: AS - udf_body: "$$\n SELECT 1, 2\n UNION ALL\n SELECT 3, 4\n $$" - statement_terminator: ; - statement: create_function_statement: - keyword: create - keyword: function - function_name: function_name_identifier: my_decrement_udf - function_parameter_list: bracketed: start_bracket: ( parameter: i data_type: data_type_identifier: numeric bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '9' - comma: ',' - numeric_literal: '0' - end_bracket: ) end_bracket: ) - keyword: returns - data_type: data_type_identifier: numeric - keyword: language - keyword: java - keyword: imports - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_literal: "'@~/my_decrement_udf_package_dir/my_decrement_udf_jar.jar'" end_bracket: ) - keyword: handler - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'my_decrement_udf_package.my_decrement_udf_class.my_decrement_udf_method'" - statement_terminator: ; - statement: create_function_statement: - keyword: create - keyword: or - keyword: replace - keyword: function - function_name: function_name_identifier: echo_varchar - function_parameter_list: bracketed: start_bracket: ( parameter: x data_type: data_type_identifier: varchar end_bracket: ) - keyword: returns - data_type: data_type_identifier: varchar - keyword: language - keyword: java - keyword: called - keyword: 'on' - keyword: 'null' - keyword: input - keyword: handler - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'TestFunc.echoVarchar'" - keyword: target_path - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'@~/testfunc.jar'" - keyword: as - udf_body: "'class TestFunc {\n public static String echoVarchar(String x) {\n\ \ return x;\n }\n}'" - statement_terminator: ; - statement: create_function_statement: - keyword: create - keyword: or - keyword: replace - keyword: function - function_name: function_name_identifier: py_udf - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: returns - data_type: data_type_identifier: variant - keyword: language - keyword: python - keyword: runtime_version - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'3.8'" - keyword: packages - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'numpy'" - comma: ',' - quoted_literal: "'pandas'" - comma: ',' - quoted_literal: "'xgboost==1.5.0'" - end_bracket: ) - keyword: handler - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'udf'" - keyword: as - udf_body: "$$\nimport numpy as np\nimport pandas as pd\nimport xgboost as xgb\n\ def udf():\n return [np.__version__, pd.__version__, xgb.__version__]\n\ $$" - statement_terminator: ; - statement: create_function_statement: - keyword: create - keyword: or - keyword: replace - keyword: function - function_name: function_name_identifier: dream - function_parameter_list: bracketed: start_bracket: ( parameter: i data_type: data_type_identifier: int end_bracket: ) - keyword: returns - data_type: data_type_identifier: variant - keyword: language - keyword: python - keyword: runtime_version - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'3.8'" - keyword: handler - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'sleepy.snore'" - keyword: imports - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_literal: "'@my_stage/sleepy.py'" end_bracket: ) - statement_terminator: ; - statement: create_function_statement: - keyword: create - keyword: or - keyword: replace - keyword: function - function_name: function_name_identifier: addone - function_parameter_list: bracketed: start_bracket: ( parameter: i data_type: data_type_identifier: int end_bracket: ) - keyword: returns - data_type: data_type_identifier: int - keyword: language - keyword: python - keyword: runtime_version - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'3.8'" - keyword: handler - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'addone_py'" - keyword: as - udf_body: "$$\ndef addone_py(i):\n return i+1\n$$" - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name: function_name_identifier: echo_varchar - function_parameter_list: bracketed: start_bracket: ( parameter: x data_type: data_type_identifier: VARCHAR end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: VARCHAR - keyword: LANGUAGE - keyword: SCALA - keyword: RUNTIME_VERSION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'2.12'" - keyword: HANDLER - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'Echo.echoVarchar'" - keyword: AS - udf_body: "$$\nclass Echo {\n def echoVarchar(x : String): String = {\n \ \ return x\n }\n}\n$$" - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name: function_name_identifier: google_translate_python - function_parameter_list: bracketed: - start_bracket: ( - parameter: sentence - data_type: data_type_identifier: STRING - comma: ',' - parameter: language - data_type: data_type_identifier: STRING - end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: STRING - keyword: LANGUAGE - keyword: PYTHON - keyword: RUNTIME_VERSION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'3.8'" - keyword: HANDLER - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'get_translation'" - keyword: EXTERNAL_ACCESS_INTEGRATIONS - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - naked_identifier: google_apis_access_integration - comma: ',' - naked_identifier: my_integration - end_bracket: ) - keyword: PACKAGES - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'snowflake-snowpark-python'" - comma: ',' - quoted_literal: "'requests'" - end_bracket: ) - keyword: SECRETS - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'cred'" - comparison_operator: raw_comparison_operator: '=' - naked_identifier: oauth_token - comma: ',' - quoted_literal: "'cred2'" - comparison_operator: raw_comparison_operator: '=' - naked_identifier: DATA_STAGE - dot: . - naked_identifier: AWS_SECRET_KEY - end_bracket: ) - keyword: AS - udf_body: "$$\nimport _snowflake\nimport requests\nimport json\nsession = requests.Session()\n\ def get_translation(sentence, language):\n token = _snowflake.get_oauth_access_token('cred')\n\ \ url = \"https://translation.googleapis.com/language/translate/v2\"\n data\ \ = {'q': sentence,'target': language}\n response = session.post(url, json\ \ = data, headers = {\"Authorization\": \"Bearer \" + token})\n return response.json()['data']['translations'][0]['translatedText']\n\ $$" - statement_terminator: ; - statement: create_function_statement: - keyword: create - keyword: or - keyword: replace - keyword: aggregate - keyword: function - function_name: function_name_identifier: addone - function_parameter_list: bracketed: start_bracket: ( parameter: i data_type: data_type_identifier: int end_bracket: ) - keyword: returns - data_type: data_type_identifier: int - keyword: language - keyword: python - keyword: runtime_version - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'3.8'" - keyword: handler - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'addone_py'" - keyword: as - udf_body: "$$\ndef addone_py(i):\n return i+1\n$$" - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name: - naked_identifier: TEST_DB - dot: . - naked_identifier: TEST_SCHEMA - dot: . - function_name_identifier: TEST_TABLE - function_parameter_list: bracketed: - start_bracket: ( - parameter: COL_1 - data_type: data_type_identifier: VARCHAR - keyword: DEFAULT - expression: null_literal: 'NULL' - comma: ',' - parameter: COL_2 - data_type: data_type_identifier: VARCHAR - keyword: DEFAULT - expression: null_literal: 'NULL' - end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: VARCHAR - keyword: LANGUAGE - keyword: SQL - keyword: AS - udf_body: "$$\nSELECT\n CASE\n WHEN (LOWER(COL_1) IS NOT NULL\ \ AND\n LOWER(COL_2) = 'test_marketing')\n \ \ THEN 'marketing_channel'\n ELSE '(Other)'\nEND\n$$" - statement_terminator: ; - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: FUNCTION - function_name: function_name_identifier: pi_udf - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: FLOAT - keyword: AS - udf_body: "'3.141592654::FLOAT'" - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: TEMP - keyword: FUNCTION - function_name: function_name_identifier: pi_udf - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: FLOAT - keyword: AS - udf_body: "'3.141592654::FLOAT'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_masking_policy.sql000066400000000000000000000037431503426445100270420ustar00rootroot00000000000000CREATE OR REPLACE MASKING POLICY XXXX.XX.example_MASKING_POLICY AS (val VARCHAR) RETURNS VARCHAR -> CASE WHEN is_role_in_session('SNOWFLAKE_PII') THEN val ELSE '*** masked ***' END COMMENT = 'Applied 2021-07-13T03:12:16+0000'; create or replace masking policy email_mask as (val string) returns string -> case when current_role() in ('ANALYST') then val else '*********' end; create or replace masking policy email_mask as (val string) returns string -> case when current_account() in ('') then val else '*********' end; create or replace masking policy email_mask as (val string) returns string -> case when current_role() IN ('ANALYST') then val else NULL end; create or replace masking policy email_mask as (val string) returns string -> case when current_role() in ('ANALYST') then val else '********' end; create or replace masking policy email_mask as (val string) returns string -> case when current_role() in ('ANALYST') then val else sha2(val) -- return hash of the column value end; create or replace masking policy email_mask as (val string) returns string -> case when current_role() in ('ANALYST') then val when current_role() in ('SUPPORT') then regexp_replace(val,'.+\@','*****@') -- leave email domain unmasked else '********' end; create or replace masking policy email_mask as (val string) returns string -> case when current_role() in ('SUPPORT') then val else date_from_parts(0001, 01, 01)::timestamp_ntz -- returns 0001-01-01 00:00:00.000 end; create or replace masking policy email_mask as (val string) returns string -> case when current_role() in ('ANALYST') then val else mask_udf(val) -- custom masking function end; create or replace masking policy email_mask as (val string) returns string -> case when current_role() in ('ANALYST') then val else object_insert(val, 'USER_IPADDRESS', '****', true) end; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_masking_policy.yml000066400000000000000000000432341503426445100270430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: dcd4d6cdf4a9460b035a9d350b079ee7f039806e160aa15bfd12554ccce351a4 file: - statement: create_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: MASKING - keyword: POLICY - object_reference: - naked_identifier: XXXX - dot: . - naked_identifier: XX - dot: . - naked_identifier: example_MASKING_POLICY - keyword: AS - function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: val - expression: column_reference: naked_identifier: VARCHAR - end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: VARCHAR - function_assigner: -> - expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: function: function_name: function_name_identifier: is_role_in_session function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'SNOWFLAKE_PII'" end_bracket: ) - keyword: THEN - expression: column_reference: naked_identifier: val - else_clause: keyword: ELSE expression: quoted_literal: "'*** masked ***'" - keyword: END - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Applied 2021-07-13T03:12:16+0000'" - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: or - keyword: replace - keyword: masking - keyword: policy - object_reference: naked_identifier: email_mask - keyword: as - function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: val - expression: column_reference: naked_identifier: string - end_bracket: ) - keyword: returns - data_type: data_type_identifier: string - function_assigner: -> - expression: case_expression: - keyword: case - when_clause: - keyword: when - expression: function: function_name: function_name_identifier: current_role function_contents: bracketed: start_bracket: ( end_bracket: ) keyword: in bracketed: start_bracket: ( quoted_literal: "'ANALYST'" end_bracket: ) - keyword: then - expression: column_reference: naked_identifier: val - else_clause: keyword: else expression: quoted_literal: "'*********'" - keyword: end - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: or - keyword: replace - keyword: masking - keyword: policy - object_reference: naked_identifier: email_mask - keyword: as - function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: val - expression: column_reference: naked_identifier: string - end_bracket: ) - keyword: returns - data_type: data_type_identifier: string - function_assigner: -> - expression: case_expression: - keyword: case - when_clause: - keyword: when - expression: function: function_name: function_name_identifier: current_account function_contents: bracketed: start_bracket: ( end_bracket: ) keyword: in bracketed: start_bracket: ( quoted_literal: "''" end_bracket: ) - keyword: then - expression: column_reference: naked_identifier: val - else_clause: keyword: else expression: quoted_literal: "'*********'" - keyword: end - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: or - keyword: replace - keyword: masking - keyword: policy - object_reference: naked_identifier: email_mask - keyword: as - function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: val - expression: column_reference: naked_identifier: string - end_bracket: ) - keyword: returns - data_type: data_type_identifier: string - function_assigner: -> - expression: case_expression: - keyword: case - when_clause: - keyword: when - expression: function: function_name: function_name_identifier: current_role function_contents: bracketed: start_bracket: ( end_bracket: ) keyword: IN bracketed: start_bracket: ( quoted_literal: "'ANALYST'" end_bracket: ) - keyword: then - expression: column_reference: naked_identifier: val - else_clause: keyword: else expression: null_literal: 'NULL' - keyword: end - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: or - keyword: replace - keyword: masking - keyword: policy - object_reference: naked_identifier: email_mask - keyword: as - function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: val - expression: column_reference: naked_identifier: string - end_bracket: ) - keyword: returns - data_type: data_type_identifier: string - function_assigner: -> - expression: case_expression: - keyword: case - when_clause: - keyword: when - expression: function: function_name: function_name_identifier: current_role function_contents: bracketed: start_bracket: ( end_bracket: ) keyword: in bracketed: start_bracket: ( quoted_literal: "'ANALYST'" end_bracket: ) - keyword: then - expression: column_reference: naked_identifier: val - else_clause: keyword: else expression: quoted_literal: "'********'" - keyword: end - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: or - keyword: replace - keyword: masking - keyword: policy - object_reference: naked_identifier: email_mask - keyword: as - function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: val - expression: column_reference: naked_identifier: string - end_bracket: ) - keyword: returns - data_type: data_type_identifier: string - function_assigner: -> - expression: case_expression: - keyword: case - when_clause: - keyword: when - expression: function: function_name: function_name_identifier: current_role function_contents: bracketed: start_bracket: ( end_bracket: ) keyword: in bracketed: start_bracket: ( quoted_literal: "'ANALYST'" end_bracket: ) - keyword: then - expression: column_reference: naked_identifier: val - else_clause: keyword: else expression: function: function_name: function_name_identifier: sha2 function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: val end_bracket: ) - keyword: end - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: or - keyword: replace - keyword: masking - keyword: policy - object_reference: naked_identifier: email_mask - keyword: as - function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: val - expression: column_reference: naked_identifier: string - end_bracket: ) - keyword: returns - data_type: data_type_identifier: string - function_assigner: -> - expression: case_expression: - keyword: case - when_clause: - keyword: when - expression: function: function_name: function_name_identifier: current_role function_contents: bracketed: start_bracket: ( end_bracket: ) keyword: in bracketed: start_bracket: ( quoted_literal: "'ANALYST'" end_bracket: ) - keyword: then - expression: column_reference: naked_identifier: val - when_clause: - keyword: when - expression: function: function_name: function_name_identifier: current_role function_contents: bracketed: start_bracket: ( end_bracket: ) keyword: in bracketed: start_bracket: ( quoted_literal: "'SUPPORT'" end_bracket: ) - keyword: then - expression: function: function_name: function_name_identifier: regexp_replace function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: val - comma: ',' - expression: quoted_literal: "'.+\\@'" - comma: ',' - expression: quoted_literal: "'*****@'" - end_bracket: ) - else_clause: keyword: else expression: quoted_literal: "'********'" - keyword: end - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: or - keyword: replace - keyword: masking - keyword: policy - object_reference: naked_identifier: email_mask - keyword: as - function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: val - expression: column_reference: naked_identifier: string - end_bracket: ) - keyword: returns - data_type: data_type_identifier: string - function_assigner: -> - expression: case_expression: - keyword: case - when_clause: - keyword: when - expression: function: function_name: function_name_identifier: current_role function_contents: bracketed: start_bracket: ( end_bracket: ) keyword: in bracketed: start_bracket: ( quoted_literal: "'SUPPORT'" end_bracket: ) - keyword: then - expression: column_reference: naked_identifier: val - else_clause: keyword: else expression: cast_expression: function: function_name: function_name_identifier: date_from_parts function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '0001' - comma: ',' - expression: numeric_literal: '01' - comma: ',' - expression: numeric_literal: '01' - end_bracket: ) casting_operator: '::' data_type: data_type_identifier: timestamp_ntz - keyword: end - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: or - keyword: replace - keyword: masking - keyword: policy - object_reference: naked_identifier: email_mask - keyword: as - function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: val - expression: column_reference: naked_identifier: string - end_bracket: ) - keyword: returns - data_type: data_type_identifier: string - function_assigner: -> - expression: case_expression: - keyword: case - when_clause: - keyword: when - expression: function: function_name: function_name_identifier: current_role function_contents: bracketed: start_bracket: ( end_bracket: ) keyword: in bracketed: start_bracket: ( quoted_literal: "'ANALYST'" end_bracket: ) - keyword: then - expression: column_reference: naked_identifier: val - else_clause: keyword: else expression: function: function_name: function_name_identifier: mask_udf function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: val end_bracket: ) - keyword: end - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: or - keyword: replace - keyword: masking - keyword: policy - object_reference: naked_identifier: email_mask - keyword: as - function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: val - expression: column_reference: naked_identifier: string - end_bracket: ) - keyword: returns - data_type: data_type_identifier: string - function_assigner: -> - expression: case_expression: - keyword: case - when_clause: - keyword: when - expression: function: function_name: function_name_identifier: current_role function_contents: bracketed: start_bracket: ( end_bracket: ) keyword: in bracketed: start_bracket: ( quoted_literal: "'ANALYST'" end_bracket: ) - keyword: then - expression: column_reference: naked_identifier: val - else_clause: keyword: else expression: function: function_name: function_name_identifier: object_insert function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: val - comma: ',' - expression: quoted_literal: "'USER_IPADDRESS'" - comma: ',' - expression: quoted_literal: "'****'" - comma: ',' - expression: boolean_literal: 'true' - end_bracket: ) - keyword: end - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_network_policy.sql000066400000000000000000000007431503426445100270770ustar00rootroot00000000000000create network policy mypolicy1 allowed_ip_list=('192.168.1.0/24') blocked_ip_list=('192.168.1.99'); CREATE OR REPLACE NETWORK POLICY TEST_NW_POLICY ALLOWED_IP_LIST=('xx.xxx.xxx.xx/xx','xx.xxx.xxx.xx/xx') COMMENT='NW Policy' ; CREATE NETWORK POLICY np ALLOWED_NETWORK_RULE_LIST = ('blabla','blabla2','blabla3') COMMENT='comment' ; CREATE NETWORK POLICY np BLOCKED_NETWORK_RULE_LIST = ('blabla','blabla2','blabla3') COMMENT='comment' ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_network_policy.yml000066400000000000000000000056151503426445100271040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: caa607d0870e46ada1b9f71475a659b93ed9f7203dc0e9a27e54510b2c9ff128 file: - statement: create_statement: - keyword: create - keyword: network - keyword: policy - object_reference: naked_identifier: mypolicy1 - keyword: allowed_ip_list - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_literal: "'192.168.1.0/24'" end_bracket: ) - keyword: blocked_ip_list - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_literal: "'192.168.1.99'" end_bracket: ) - statement_terminator: ; - statement: create_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: NETWORK - keyword: POLICY - object_reference: naked_identifier: TEST_NW_POLICY - keyword: ALLOWED_IP_LIST - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'xx.xxx.xxx.xx/xx'" - comma: ',' - quoted_literal: "'xx.xxx.xxx.xx/xx'" - end_bracket: ) - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'NW Policy'" - statement_terminator: ; - statement: create_statement: - keyword: CREATE - keyword: NETWORK - keyword: POLICY - object_reference: naked_identifier: np - keyword: ALLOWED_NETWORK_RULE_LIST - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'blabla'" - comma: ',' - quoted_literal: "'blabla2'" - comma: ',' - quoted_literal: "'blabla3'" - end_bracket: ) - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'comment'" - statement_terminator: ; - statement: create_statement: - keyword: CREATE - keyword: NETWORK - keyword: POLICY - object_reference: naked_identifier: np - keyword: BLOCKED_NETWORK_RULE_LIST - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'blabla'" - comma: ',' - quoted_literal: "'blabla2'" - comma: ',' - quoted_literal: "'blabla3'" - end_bracket: ) - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'comment'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_notification_integration.sql000066400000000000000000000023071503426445100311160ustar00rootroot00000000000000create or replace notification integration if not exists my_notification_int type = queue notification_provider = gcp_pubsub enabled = true gcp_pubsub_subscription_name = 'projects/project-1234/subscriptions/sub2'; create notification integration my_notification_int enabled = true type = queue notification_provider = azure_storage_queue azure_storage_queue_primary_uri = 'https://myqueue.queue.core.windows.net/mystoragequeue' azure_tenant_id = 'a123bcde-1234-5678-abc1-9abc12345678'; create notification integration my_notification_int enabled = true type = queue notification_provider = aws_sns direction = outbound aws_sns_topic_arn = 'arn:aws:sns:us-east-2:111122223333:sns_topic' aws_sns_role_arn = 'arn:aws:iam::111122223333:role/error_sns_role'; create notification integration my_notification_int type = queue direction = outbound notification_provider = gcp_pubsub enabled = true gcp_pubsub_topic_name = 'projects/sdm-prod/topics/mytopic'; create notification integration my_notification_int enabled = true type = queue notification_provider = azure_event_grid direction = outbound azure_event_grid_topic_endpoint = 'https://myaccount.region-1.eventgrid.azure.net/api/events' azure_tenant_id = 'mytenantid'; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_notification_integration.yml000066400000000000000000000115611503426445100311220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 506603bca8afa4447f78d441567b2fbc9dd28e9ef3054e669635862218f4dee6 file: - statement: create_statement: - keyword: create - keyword: or - keyword: replace - keyword: notification - keyword: integration - keyword: if - keyword: not - keyword: exists - object_reference: naked_identifier: my_notification_int - keyword: type - comparison_operator: raw_comparison_operator: '=' - keyword: queue - keyword: notification_provider - comparison_operator: raw_comparison_operator: '=' - keyword: gcp_pubsub - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: gcp_pubsub_subscription_name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'projects/project-1234/subscriptions/sub2'" - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: notification - keyword: integration - object_reference: naked_identifier: my_notification_int - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: type - comparison_operator: raw_comparison_operator: '=' - keyword: queue - keyword: notification_provider - comparison_operator: raw_comparison_operator: '=' - keyword: azure_storage_queue - keyword: azure_storage_queue_primary_uri - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'https://myqueue.queue.core.windows.net/mystoragequeue'" - keyword: azure_tenant_id - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'a123bcde-1234-5678-abc1-9abc12345678'" - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: notification - keyword: integration - object_reference: naked_identifier: my_notification_int - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: type - comparison_operator: raw_comparison_operator: '=' - keyword: queue - keyword: notification_provider - comparison_operator: raw_comparison_operator: '=' - keyword: aws_sns - keyword: direction - comparison_operator: raw_comparison_operator: '=' - keyword: outbound - keyword: aws_sns_topic_arn - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'arn:aws:sns:us-east-2:111122223333:sns_topic'" - keyword: aws_sns_role_arn - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'arn:aws:iam::111122223333:role/error_sns_role'" - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: notification - keyword: integration - object_reference: naked_identifier: my_notification_int - keyword: type - comparison_operator: raw_comparison_operator: '=' - keyword: queue - keyword: direction - comparison_operator: raw_comparison_operator: '=' - keyword: outbound - keyword: notification_provider - comparison_operator: raw_comparison_operator: '=' - keyword: gcp_pubsub - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: gcp_pubsub_topic_name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'projects/sdm-prod/topics/mytopic'" - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: notification - keyword: integration - object_reference: naked_identifier: my_notification_int - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: type - comparison_operator: raw_comparison_operator: '=' - keyword: queue - keyword: notification_provider - comparison_operator: raw_comparison_operator: '=' - keyword: azure_event_grid - keyword: direction - comparison_operator: raw_comparison_operator: '=' - keyword: outbound - keyword: azure_event_grid_topic_endpoint - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'https://myaccount.region-1.eventgrid.azure.net/api/events'" - keyword: azure_tenant_id - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'mytenantid'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_password_policy.sql000066400000000000000000000007051503426445100272460ustar00rootroot00000000000000CREATE PASSWORD POLICY PASSWORD_POLICY_PROD_1 PASSWORD_MIN_LENGTH = 12 PASSWORD_MAX_LENGTH = 24 PASSWORD_MIN_UPPER_CASE_CHARS = 2 PASSWORD_MIN_LOWER_CASE_CHARS = 2 PASSWORD_MIN_NUMERIC_CHARS = 2 PASSWORD_MIN_SPECIAL_CHARS = 2 PASSWORD_MIN_AGE_DAYS = 1 PASSWORD_MAX_AGE_DAYS = 30 PASSWORD_MAX_RETRIES = 3 PASSWORD_LOCKOUT_TIME_MINS = 30 PASSWORD_HISTORY = 5 COMMENT = 'production account password policy'; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_password_policy.yml000066400000000000000000000045001503426445100272450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 47ca3ee016df5b7b01c017acfc414f171e2fb332ad21cf546a544b583be69132 file: statement: create_password_policy_statement: - keyword: CREATE - keyword: PASSWORD - keyword: POLICY - password_policy_reference: naked_identifier: PASSWORD_POLICY_PROD_1 - password_policy_options: - keyword: PASSWORD_MIN_LENGTH - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '12' - keyword: PASSWORD_MAX_LENGTH - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '24' - keyword: PASSWORD_MIN_UPPER_CASE_CHARS - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '2' - keyword: PASSWORD_MIN_LOWER_CASE_CHARS - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '2' - keyword: PASSWORD_MIN_NUMERIC_CHARS - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '2' - keyword: PASSWORD_MIN_SPECIAL_CHARS - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '2' - keyword: PASSWORD_MIN_AGE_DAYS - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - keyword: PASSWORD_MAX_AGE_DAYS - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '30' - keyword: PASSWORD_MAX_RETRIES - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '3' - keyword: PASSWORD_LOCKOUT_TIME_MINS - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '30' - keyword: PASSWORD_HISTORY - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '5' - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'production account password policy'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_pipe.sql000066400000000000000000000013361503426445100247630ustar00rootroot00000000000000create or replace pipe mypipe_s3 auto_ingest = true error_integration = my_error aws_sns_topic = 'arn:aws:blablabla..0:s3_mybucket' as copy into snowpipe_db.public.mytable from @snowpipe_db.public.mystage file_format = (type = 'JSON'); create or replace pipe test_pipe auto_ingest = true integration = notification_integration as copy into table_name ( column1, column2 ) from (select $1, current_timestamp() as column2 from @stage_name/folder); create or replace pipe test_pipe auto_ingest = true integration = 'notification_integration' as copy into table_name ( column1, column2 ) from (select $1, current_timestamp() as column2 from @stage_name/folder); sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_pipe.yml000066400000000000000000000126751503426445100247750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c975d954bb2a045924b2c4f0fdf128579b324ffe86e3087b527a452c230be6cd file: - statement: create_statement: - keyword: create - keyword: or - keyword: replace - keyword: pipe - object_reference: naked_identifier: mypipe_s3 - keyword: auto_ingest - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: error_integration - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: my_error - keyword: aws_sns_topic - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'arn:aws:blablabla..0:s3_mybucket'" - keyword: as - copy_into_table_statement: - keyword: copy - keyword: into - table_reference: - naked_identifier: snowpipe_db - dot: . - naked_identifier: public - dot: . - naked_identifier: mytable - keyword: from - storage_location: stage_path: '@snowpipe_db.public.mystage' - keyword: file_format - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( json_file_format_type_parameters: keyword: type comparison_operator: raw_comparison_operator: '=' file_type: "'JSON'" end_bracket: ) - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: or - keyword: replace - keyword: pipe - object_reference: naked_identifier: test_pipe - keyword: auto_ingest - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: integration - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: notification_integration - keyword: as - copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: table_name - bracketed: - start_bracket: ( - column_reference: naked_identifier: column1 - comma: ',' - column_reference: naked_identifier: column2 - end_bracket: ) - keyword: from - bracketed: start_bracket: ( select_statement: select_clause: - keyword: select - select_clause_element: column_reference: column_index_identifier_segment: $1 - comma: ',' - select_clause_element: function: function_name: function_name_identifier: current_timestamp function_contents: bracketed: start_bracket: ( end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: column2 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: stage_path: '@stage_name/folder' end_bracket: ) - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: or - keyword: replace - keyword: pipe - object_reference: naked_identifier: test_pipe - keyword: auto_ingest - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: integration - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'notification_integration'" - keyword: as - copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: table_name - bracketed: - start_bracket: ( - column_reference: naked_identifier: column1 - comma: ',' - column_reference: naked_identifier: column2 - end_bracket: ) - keyword: from - bracketed: start_bracket: ( select_statement: select_clause: - keyword: select - select_clause_element: column_reference: column_index_identifier_segment: $1 - comma: ',' - select_clause_element: function: function_name: function_name_identifier: current_timestamp function_contents: bracketed: start_bracket: ( end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: column2 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: stage_path: '@stage_name/folder' end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_procedure.sql000066400000000000000000000077001503426445100260170ustar00rootroot00000000000000create or replace procedure sp_pi() returns float not null language javascript as $$ return 3.1415926; $$ ; create or replace procedure stproc1(FLOAT_PARAM1 FLOAT) returns string language javascript strict execute as owner as $$ var sql_command = "INSERT INTO stproc_test_table1 (num_col1) VALUES (" + FLOAT_PARAM1 + ")"; try { snowflake.execute ( {sqlText: sql_command} ); return "Succeeded."; // Return a success/error indicator. } catch (err) { return "Failed: " + err; // Return a success/error indicator. } $$ ; CREATE OR REPLACE PROCEDURE public.test_procedure (test_table VARCHAR(), test_col VARCHAR()) RETURNS VARCHAR() LANGUAGE JAVASCRIPT AS $$ try { var sql_command = "ALTER TABLE " + test_table + " DROP " + tet_col; snowflake.execute ({sqlText: sql_command}); return "Succeeded."; } catch (err) { return "Failed: execute "+ sql_command +". Error : "+ err; // Return a success/error indicator. } $$ ; CREATE OR REPLACE PROCEDURE IF NOT EXISTS UTIL_DB.PUBLIC.PROCEDURE_IF_NOT_EXISTS() RETURNS INT LANGUAGE JAVASCRIPT AS $$ return 1; $$; CREATE OR REPLACE PROCEDURE UTIL_DB.PUBLIC.PROCEDURE_WITHOUT_EXPLICIT_LANGUAGE() RETURNS INT AS $$ BEGIN RETURN 1; END $$; CREATE OR REPLACE PROCEDURE UTIL_DB.PUBLIC.PROCEDURE_LANGUAGE_SQL() RETURNS INT LANGUAGE SQL AS $$ BEGIN RETURN 1; END $$; create or replace procedure UTIL_DB.PUBLIC.PROCEDURE_LANGUAGE_PYTHON() returns variant language python runtime_version = '3.8' packages = ('numpy','pandas','xgboost==1.5.0') handler = 'udf' comment = 'hello_world' as $$ import numpy as np import pandas as pd import xgboost as xgb def udf(): return [np.__version__, pd.__version__, xgb.__version__] $$; create or replace procedure UTIL_DB.PUBLIC.PROCEDURE_LANGUAGE_JAVA(x varchar) returns varchar language java called on null input handler='TestFunc.echoVarchar' target_path='@~/testfunc.jar' as 'class TestFunc { public static String echoVarchar(String x) { return x; } }'; CREATE OR REPLACE PROCEDURE filter_by_role(table_name VARCHAR, role VARCHAR) RETURNS INT --TABLE() LANGUAGE SCALA RUNTIME_VERSION = '2.12' PACKAGES = ('com.snowflake:snowpark:latest') HANDLER = 'Filter.filterByRole' AS $$ import com.snowflake.snowpark.functions._ import com.snowflake.snowpark._ object Filter { def filterByRole(session: Session, tableName: String, role: String): DataFrame = { val table = session.table(tableName) val filteredRows = table.filter(col("role") === role) return filteredRows } } $$; CREATE OR REPLACE PROCEDURE myprocedure( "Id" NUMBER(38,0) ) RETURNS VARCHAR LANGUAGE SQL AS $$ -- Snowflake Scripting code DECLARE radius_of_circle FLOAT; area_of_circle FLOAT; BEGIN radius_of_circle := 3; area_of_circle := pi() * radius_of_circle * radius_of_circle; RETURN area_of_circle; END; $$ ; CREATE OR REPLACE PROCEDURE MY_PROCEDURE( "Id" NUMBER(38,0) ) RETURNS VARCHAR LANGUAGE SQL AS BEGIN select 1; select 2; select 3; select 4; return 5; END; CREATE OR REPLACE PROCEDURE MY_PROCEDURE(hello_world VARCHAR(10000)) COPY GRANTS RETURNS TABLE () LANGUAGE PYTHON RUNTIME_VERSION = '3.11' PACKAGES = ('snowflake-snowpark-python') HANDLER = 'my.path.func_formula_parser_test_script_runner_proc' IMPORTS = ('@MIRROR.PYTHON_SCRIPTS/script.py') EXECUTE AS OWNER ; CREATE OR REPLACE PROCEDURE DATA_STAGE.INGEST_DATA_FROM_GS(P_GOOGLE_SPREADSHEET_ID VARCHAR(16777216), P_SHEET_NAME VARCHAR(16777216), P_STAGING_TABLE VARCHAR(16777216)) COPY GRANTS RETURNS VARCHAR(16777216) LANGUAGE PYTHON RUNTIME_VERSION = '3.11' PACKAGES = ('snowflake-snowpark-python','requests') HANDLER = 'SF_GOOGLE_SHEET_LOADER.ingest_gs_to_staging_table' IMPORTS = ('@DATA_STAGE.PYTHON_SCRIPTS/SF_GOOGLE_SHEET_LOADER.py') EXTERNAL_ACCESS_INTEGRATIONS = (GOOGLESHEET_APIS_ACCESS_INTEGRATION) SECRETS = ('cred' = DATA_STAGE.GS_OAUTH_TOKEN, 'cred2' = my_cred) EXECUTE AS CALLER ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_procedure.yml000066400000000000000000000415011503426445100260160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 72edb2ae4aca805149ab4371f61fa26037ba5d94f168b2f77bc14ef0292de940 file: - statement: create_procedure_statement: - keyword: create - keyword: or - keyword: replace - keyword: procedure - function_name: function_name_identifier: sp_pi - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: returns - data_type: data_type_identifier: float - keyword: not - keyword: 'null' - keyword: language - keyword: javascript - keyword: as - udf_body: "$$\n return 3.1415926;\n $$" - statement_terminator: ; - statement: create_procedure_statement: - keyword: create - keyword: or - keyword: replace - keyword: procedure - function_name: function_name_identifier: stproc1 - function_parameter_list: bracketed: start_bracket: ( parameter: FLOAT_PARAM1 data_type: data_type_identifier: FLOAT end_bracket: ) - keyword: returns - data_type: data_type_identifier: string - keyword: language - keyword: javascript - keyword: strict - keyword: execute - keyword: as - keyword: owner - keyword: as - udf_body: "$$\n var sql_command =\n \"INSERT INTO stproc_test_table1\ \ (num_col1) VALUES (\" + FLOAT_PARAM1 + \")\";\n try {\n snowflake.execute\ \ (\n {sqlText: sql_command}\n );\n return \"\ Succeeded.\"; // Return a success/error indicator.\n }\n catch\ \ (err) {\n return \"Failed: \" + err; // Return a success/error\ \ indicator.\n }\n $$" - statement_terminator: ; - statement: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PROCEDURE - function_name: naked_identifier: public dot: . function_name_identifier: test_procedure - function_parameter_list: bracketed: - start_bracket: ( - parameter: test_table - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( end_bracket: ) - comma: ',' - parameter: test_col - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( end_bracket: ) - end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( end_bracket: ) - keyword: LANGUAGE - keyword: JAVASCRIPT - keyword: AS - udf_body: "$$\ntry {\n var sql_command = \"ALTER TABLE \" + test_table +\ \ \" DROP \" + tet_col;\n snowflake.execute ({sqlText: sql_command});\n\ \ return \"Succeeded.\";\n}\ncatch (err) {\n return \"Failed: execute\ \ \"+ sql_command +\". Error : \"+ err; // Return a success/error indicator.\n\ }\n$$" - statement_terminator: ; - statement: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PROCEDURE - keyword: IF - keyword: NOT - keyword: EXISTS - function_name: - naked_identifier: UTIL_DB - dot: . - naked_identifier: PUBLIC - dot: . - function_name_identifier: PROCEDURE_IF_NOT_EXISTS - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: INT - keyword: LANGUAGE - keyword: JAVASCRIPT - keyword: AS - udf_body: "$$\n return 1;\n$$" - statement_terminator: ; - statement: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PROCEDURE - function_name: - naked_identifier: UTIL_DB - dot: . - naked_identifier: PUBLIC - dot: . - function_name_identifier: PROCEDURE_WITHOUT_EXPLICIT_LANGUAGE - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: INT - keyword: AS - udf_body: "$$\nBEGIN\n RETURN 1;\nEND\n$$" - statement_terminator: ; - statement: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PROCEDURE - function_name: - naked_identifier: UTIL_DB - dot: . - naked_identifier: PUBLIC - dot: . - function_name_identifier: PROCEDURE_LANGUAGE_SQL - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: INT - keyword: LANGUAGE - keyword: SQL - keyword: AS - udf_body: "$$\nBEGIN\n RETURN 1;\nEND\n$$" - statement_terminator: ; - statement: create_procedure_statement: - keyword: create - keyword: or - keyword: replace - keyword: procedure - function_name: - naked_identifier: UTIL_DB - dot: . - naked_identifier: PUBLIC - dot: . - function_name_identifier: PROCEDURE_LANGUAGE_PYTHON - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: returns - data_type: data_type_identifier: variant - keyword: language - keyword: python - keyword: runtime_version - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'3.8'" - keyword: packages - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'numpy'" - comma: ',' - quoted_literal: "'pandas'" - comma: ',' - quoted_literal: "'xgboost==1.5.0'" - end_bracket: ) - keyword: handler - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'udf'" - comment_equals_clause: keyword: comment comparison_operator: raw_comparison_operator: '=' quoted_literal: "'hello_world'" - keyword: as - udf_body: "$$\nimport numpy as np\nimport pandas as pd\nimport xgboost as xgb\n\ def udf():\n return [np.__version__, pd.__version__, xgb.__version__]\n\ $$" - statement_terminator: ; - statement: create_procedure_statement: - keyword: create - keyword: or - keyword: replace - keyword: procedure - function_name: - naked_identifier: UTIL_DB - dot: . - naked_identifier: PUBLIC - dot: . - function_name_identifier: PROCEDURE_LANGUAGE_JAVA - function_parameter_list: bracketed: start_bracket: ( parameter: x data_type: data_type_identifier: varchar end_bracket: ) - keyword: returns - data_type: data_type_identifier: varchar - keyword: language - keyword: java - keyword: called - keyword: 'on' - keyword: 'null' - keyword: input - keyword: handler - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'TestFunc.echoVarchar'" - keyword: target_path - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'@~/testfunc.jar'" - keyword: as - udf_body: "'class TestFunc {\n public static String echoVarchar(String x) {\n\ \ return x;\n }\n}'" - statement_terminator: ; - statement: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PROCEDURE - function_name: function_name_identifier: filter_by_role - function_parameter_list: bracketed: - start_bracket: ( - parameter: table_name - data_type: data_type_identifier: VARCHAR - comma: ',' - parameter: role - data_type: data_type_identifier: VARCHAR - end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: INT - keyword: LANGUAGE - keyword: SCALA - keyword: RUNTIME_VERSION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'2.12'" - keyword: PACKAGES - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_literal: "'com.snowflake:snowpark:latest'" end_bracket: ) - keyword: HANDLER - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'Filter.filterByRole'" - keyword: AS - udf_body: "$$\nimport com.snowflake.snowpark.functions._\nimport com.snowflake.snowpark._\n\ \nobject Filter {\n def filterByRole(session: Session, tableName: String,\ \ role: String): DataFrame = {\n val table = session.table(tableName)\n\ \ val filteredRows = table.filter(col(\"role\") === role)\n \ \ return filteredRows\n }\n}\n$$" - statement_terminator: ; - statement: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PROCEDURE - function_name: function_name_identifier: myprocedure - function_parameter_list: bracketed: start_bracket: ( parameter: '"Id"' data_type: data_type_identifier: NUMBER bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '38' - comma: ',' - numeric_literal: '0' - end_bracket: ) end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: VARCHAR - keyword: LANGUAGE - keyword: SQL - keyword: AS - udf_body: "$$\n-- Snowflake Scripting code\nDECLARE\nradius_of_circle FLOAT;\n\ area_of_circle FLOAT;\nBEGIN\nradius_of_circle := 3;\narea_of_circle := pi()\ \ * radius_of_circle * radius_of_circle;\nRETURN area_of_circle;\nEND;\n$$" - statement_terminator: ; - statement: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PROCEDURE - function_name: function_name_identifier: MY_PROCEDURE - function_parameter_list: bracketed: start_bracket: ( parameter: '"Id"' data_type: data_type_identifier: NUMBER bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '38' - comma: ',' - numeric_literal: '0' - end_bracket: ) end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: VARCHAR - keyword: LANGUAGE - keyword: SQL - keyword: AS - scripting_block_statement: - keyword: BEGIN - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '2' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '3' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '4' - statement_terminator: ; - statement: return_statement: keyword: return expression: numeric_literal: '5' - statement_terminator: ; - keyword: END - statement_terminator: ; - statement: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PROCEDURE - function_name: function_name_identifier: MY_PROCEDURE - function_parameter_list: bracketed: start_bracket: ( parameter: hello_world data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10000' end_bracket: ) end_bracket: ) - keyword: COPY - keyword: GRANTS - keyword: RETURNS - data_type: data_type_identifier: TABLE bracketed_arguments: bracketed: start_bracket: ( end_bracket: ) - keyword: LANGUAGE - keyword: PYTHON - keyword: RUNTIME_VERSION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'3.11'" - keyword: PACKAGES - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_literal: "'snowflake-snowpark-python'" end_bracket: ) - keyword: HANDLER - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'my.path.func_formula_parser_test_script_runner_proc'" - keyword: IMPORTS - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_literal: "'@MIRROR.PYTHON_SCRIPTS/script.py'" end_bracket: ) - keyword: EXECUTE - keyword: AS - keyword: OWNER - statement_terminator: ; - statement: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PROCEDURE - function_name: naked_identifier: DATA_STAGE dot: . function_name_identifier: INGEST_DATA_FROM_GS - function_parameter_list: bracketed: - start_bracket: ( - parameter: P_GOOGLE_SPREADSHEET_ID - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '16777216' end_bracket: ) - comma: ',' - parameter: P_SHEET_NAME - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '16777216' end_bracket: ) - comma: ',' - parameter: P_STAGING_TABLE - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '16777216' end_bracket: ) - end_bracket: ) - keyword: COPY - keyword: GRANTS - keyword: RETURNS - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '16777216' end_bracket: ) - keyword: LANGUAGE - keyword: PYTHON - keyword: RUNTIME_VERSION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'3.11'" - keyword: PACKAGES - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'snowflake-snowpark-python'" - comma: ',' - quoted_literal: "'requests'" - end_bracket: ) - keyword: HANDLER - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'SF_GOOGLE_SHEET_LOADER.ingest_gs_to_staging_table'" - keyword: IMPORTS - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_literal: "'@DATA_STAGE.PYTHON_SCRIPTS/SF_GOOGLE_SHEET_LOADER.py'" end_bracket: ) - keyword: EXTERNAL_ACCESS_INTEGRATIONS - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( naked_identifier: GOOGLESHEET_APIS_ACCESS_INTEGRATION end_bracket: ) - keyword: SECRETS - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'cred'" - comparison_operator: raw_comparison_operator: '=' - naked_identifier: DATA_STAGE - dot: . - naked_identifier: GS_OAUTH_TOKEN - comma: ',' - quoted_literal: "'cred2'" - comparison_operator: raw_comparison_operator: '=' - naked_identifier: my_cred - end_bracket: ) - keyword: EXECUTE - keyword: AS - keyword: CALLER - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_resource_monitor.sql000066400000000000000000000013711503426445100274230ustar00rootroot00000000000000create resource monitor if not exists test; create or replace resource monitor limiter with credit_quota = 1; create or replace resource monitor test with frequency = monthly; create or replace resource monitor limiter with start_timestamp = immediately; create or replace resource monitor limiter with start_timestamp= '2038-01-19 03:14:07'; create or replace resource monitor limiter with credit_quota = 100 NOTIFY_USERS = (joe, "sara", "ashlee") start_timestamp = immediately end_timestamp = '2038-01-19 03:14:07' ; create or replace resource monitor limiter with credit_quota=5000 notify_users = (jdoe, "jane smith", "john doe") triggers on 75 percent do notify on 100 percent do suspend on 110 percent do suspend_immediate ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_resource_monitor.yml000066400000000000000000000111571503426445100274300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6af381138c77c33c9a0d66b5cba1c4f3677e9f8bc00efce29a1c90d48d7e4284 file: - statement: create_statement: - keyword: create - keyword: resource - keyword: monitor - keyword: if - keyword: not - keyword: exists - object_reference: naked_identifier: test - statement_terminator: ; - statement: create_resource_monitor_statement: - keyword: create - keyword: or - keyword: replace - keyword: resource - keyword: monitor - object_reference: naked_identifier: limiter - keyword: with - resource_monitor_options: keyword: credit_quota comparison_operator: raw_comparison_operator: '=' integer_literal: '1' - statement_terminator: ; - statement: create_resource_monitor_statement: - keyword: create - keyword: or - keyword: replace - keyword: resource - keyword: monitor - object_reference: naked_identifier: test - keyword: with - resource_monitor_options: - keyword: frequency - comparison_operator: raw_comparison_operator: '=' - keyword: monthly - statement_terminator: ; - statement: create_resource_monitor_statement: - keyword: create - keyword: or - keyword: replace - keyword: resource - keyword: monitor - object_reference: naked_identifier: limiter - keyword: with - resource_monitor_options: - keyword: start_timestamp - comparison_operator: raw_comparison_operator: '=' - keyword: immediately - statement_terminator: ; - statement: create_resource_monitor_statement: - keyword: create - keyword: or - keyword: replace - keyword: resource - keyword: monitor - object_reference: naked_identifier: limiter - keyword: with - resource_monitor_options: keyword: start_timestamp comparison_operator: raw_comparison_operator: '=' quoted_literal: "'2038-01-19 03:14:07'" - statement_terminator: ; - statement: create_resource_monitor_statement: - keyword: create - keyword: or - keyword: replace - keyword: resource - keyword: monitor - object_reference: naked_identifier: limiter - keyword: with - resource_monitor_options: - keyword: credit_quota - comparison_operator: raw_comparison_operator: '=' - integer_literal: '100' - keyword: NOTIFY_USERS - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - object_reference: naked_identifier: joe - comma: ',' - object_reference: quoted_identifier: '"sara"' - comma: ',' - object_reference: quoted_identifier: '"ashlee"' - end_bracket: ) - keyword: start_timestamp - comparison_operator: raw_comparison_operator: '=' - keyword: immediately - keyword: end_timestamp - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'2038-01-19 03:14:07'" - statement_terminator: ; - statement: create_resource_monitor_statement: - keyword: create - keyword: or - keyword: replace - keyword: resource - keyword: monitor - object_reference: naked_identifier: limiter - keyword: with - resource_monitor_options: - keyword: credit_quota - comparison_operator: raw_comparison_operator: '=' - integer_literal: '5000' - keyword: notify_users - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - object_reference: naked_identifier: jdoe - comma: ',' - object_reference: quoted_identifier: '"jane smith"' - comma: ',' - object_reference: quoted_identifier: '"john doe"' - end_bracket: ) - keyword: triggers - keyword: 'on' - integer_literal: '75' - keyword: percent - keyword: do - keyword: notify - keyword: 'on' - integer_literal: '100' - keyword: percent - keyword: do - keyword: suspend - keyword: 'on' - integer_literal: '110' - keyword: percent - keyword: do - keyword: suspend_immediate - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_role.sql000066400000000000000000000002311503426445100247600ustar00rootroot00000000000000CREATE ROLE MY_ROLE; CREATE ROLE "my_role"; CREATE OR REPLACE ROLE IF NOT EXISTS foo_role COMMENT = 'this is a fake role'; CREATE OR ALTER ROLE MY_ROLE; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_role.yml000066400000000000000000000025251503426445100247720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 74a6a7ef5e5520317c530dd15b1a928e232c866810a41d5fa479361c1d9ca71e file: - statement: create_role_statement: - keyword: CREATE - keyword: ROLE - role_reference: naked_identifier: MY_ROLE - statement_terminator: ; - statement: create_role_statement: - keyword: CREATE - keyword: ROLE - role_reference: quoted_identifier: '"my_role"' - statement_terminator: ; - statement: create_role_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: ROLE - keyword: IF - keyword: NOT - keyword: EXISTS - role_reference: naked_identifier: foo_role - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'this is a fake role'" - statement_terminator: ; - statement: create_role_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: MY_ROLE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_row_access_policy.sql000066400000000000000000000013621503426445100275340ustar00rootroot00000000000000CREATE ROW ACCESS POLICY IF NOT EXISTS my_access_policy AS( TEXT VARCHAR ) RETURNS BOOLEAN -> FALSE; CREATE OR REPLACE ROW ACCESS POLICY my_access_policy AS( val VARCHAR ) RETURNS BOOLEAN -> TRUE; CREATE OR REPLACE ROW ACCESS POLICY IF NOT EXISTS my_access_policy AS( val VARCHAR ) RETURNS BOOLEAN -> TRUE; CREATE OR REPLACE ROW ACCESS POLICY IF NOT EXISTS my_access_policy AS( val VARCHAR ) RETURNS BOOLEAN -> TRUE COMMENT = 'My Comment'; CREATE ROW ACCESS POLICY my_access_policy AS( val VARCHAR ) RETURNS BOOLEAN -> TRUE; CREATE ROW ACCESS POLICY my_access_policy AS( val VARCHAR ) RETURNS BOOLEAN -> TRUE COMMENT = 'My Comment'; CREATE ROW ACCESS POLICY "My-Access-Policy" AS( val VARCHAR ) RETURNS BOOLEAN -> TRUE; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_row_access_policy.yml000066400000000000000000000112561503426445100275410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bd6d590d5303f19aff70b8a8dd3b6ca8eab8e93194a52ebec8160fa7dddd98fb file: - statement: create_row_access_policy_statement: - keyword: CREATE - keyword: ROW - keyword: ACCESS - keyword: POLICY - keyword: IF - keyword: NOT - keyword: EXISTS - naked_identifier: my_access_policy - keyword: AS - function_parameter_list: bracketed: start_bracket: ( parameter: TEXT data_type: data_type_identifier: VARCHAR end_bracket: ) - keyword: RETURNS - keyword: BOOLEAN - function_assigner: -> - expression: boolean_literal: 'FALSE' - statement_terminator: ; - statement: create_row_access_policy_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: ROW - keyword: ACCESS - keyword: POLICY - naked_identifier: my_access_policy - keyword: AS - function_parameter_list: bracketed: start_bracket: ( parameter: val data_type: data_type_identifier: VARCHAR end_bracket: ) - keyword: RETURNS - keyword: BOOLEAN - function_assigner: -> - expression: boolean_literal: 'TRUE' - statement_terminator: ; - statement: create_row_access_policy_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: ROW - keyword: ACCESS - keyword: POLICY - keyword: IF - keyword: NOT - keyword: EXISTS - naked_identifier: my_access_policy - keyword: AS - function_parameter_list: bracketed: start_bracket: ( parameter: val data_type: data_type_identifier: VARCHAR end_bracket: ) - keyword: RETURNS - keyword: BOOLEAN - function_assigner: -> - expression: boolean_literal: 'TRUE' - statement_terminator: ; - statement: create_row_access_policy_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: ROW - keyword: ACCESS - keyword: POLICY - keyword: IF - keyword: NOT - keyword: EXISTS - naked_identifier: my_access_policy - keyword: AS - function_parameter_list: bracketed: start_bracket: ( parameter: val data_type: data_type_identifier: VARCHAR end_bracket: ) - keyword: RETURNS - keyword: BOOLEAN - function_assigner: -> - expression: boolean_literal: 'TRUE' - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'My Comment'" - statement_terminator: ; - statement: create_row_access_policy_statement: - keyword: CREATE - keyword: ROW - keyword: ACCESS - keyword: POLICY - naked_identifier: my_access_policy - keyword: AS - function_parameter_list: bracketed: start_bracket: ( parameter: val data_type: data_type_identifier: VARCHAR end_bracket: ) - keyword: RETURNS - keyword: BOOLEAN - function_assigner: -> - expression: boolean_literal: 'TRUE' - statement_terminator: ; - statement: create_row_access_policy_statement: - keyword: CREATE - keyword: ROW - keyword: ACCESS - keyword: POLICY - naked_identifier: my_access_policy - keyword: AS - function_parameter_list: bracketed: start_bracket: ( parameter: val data_type: data_type_identifier: VARCHAR end_bracket: ) - keyword: RETURNS - keyword: BOOLEAN - function_assigner: -> - expression: boolean_literal: 'TRUE' - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'My Comment'" - statement_terminator: ; - statement: create_row_access_policy_statement: - keyword: CREATE - keyword: ROW - keyword: ACCESS - keyword: POLICY - quoted_identifier: '"My-Access-Policy"' - keyword: AS - function_parameter_list: bracketed: start_bracket: ( parameter: val data_type: data_type_identifier: VARCHAR end_bracket: ) - keyword: RETURNS - keyword: BOOLEAN - function_assigner: -> - expression: boolean_literal: 'TRUE' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_schema.sql000066400000000000000000000012651503426445100252670ustar00rootroot00000000000000create schema mytestschema_clone_restore clone testschema; create schema mytestdatabase1.mytestschema_clone_restore clone mytestdatabase2.testschema; create schema mytestschema_clone_restore clone testschema before (timestamp => to_timestamp(40*365*86400)); create schema mytestschema comment = 'My test schema.'; create schema mytestschema tag (tag1 = 'foo', tag2 = 'bar'); create schema mytestschema with managed access; create transient schema if not exists mytestschema default_ddl_collation = 'de_DE'; CREATE SCHEMA MYDB.MYSCHEMA COMMENT = "Space for landing my data"; CREATE SCHEMA IF NOT EXISTS MYDB.MYSCHEMA COMMENT = "Space for landing my data"; CREATE OR ALTER SCHEMA MYDB.MYSCHEMA; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_schema.yml000066400000000000000000000114331503426445100252670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3f2bcc6cf48ab9d592fc3b42df8febd684db6cee34d0ec6b9161c42955597087 file: - statement: create_clone_statement: - keyword: create - keyword: schema - object_reference: naked_identifier: mytestschema_clone_restore - keyword: clone - object_reference: naked_identifier: testschema - statement_terminator: ; - statement: create_clone_statement: - keyword: create - keyword: schema - object_reference: - naked_identifier: mytestdatabase1 - dot: . - naked_identifier: mytestschema_clone_restore - keyword: clone - object_reference: - naked_identifier: mytestdatabase2 - dot: . - naked_identifier: testschema - statement_terminator: ; - statement: create_clone_statement: - keyword: create - keyword: schema - object_reference: naked_identifier: mytestschema_clone_restore - keyword: clone - object_reference: naked_identifier: testschema - from_before_expression: keyword: before bracketed: start_bracket: ( keyword: timestamp parameter_assigner: => expression: function: function_name: function_name_identifier: to_timestamp function_contents: bracketed: start_bracket: ( expression: - numeric_literal: '40' - binary_operator: '*' - numeric_literal: '365' - binary_operator: '*' - numeric_literal: '86400' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_schema_statement: - keyword: create - keyword: schema - schema_reference: naked_identifier: mytestschema - schema_object_properties: comment_equals_clause: keyword: comment comparison_operator: raw_comparison_operator: '=' quoted_literal: "'My test schema.'" - statement_terminator: ; - statement: create_schema_statement: - keyword: create - keyword: schema - schema_reference: naked_identifier: mytestschema - tag_bracketed_equals: keyword: tag bracketed: - start_bracket: ( - tag_reference: naked_identifier: tag1 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foo'" - comma: ',' - tag_reference: naked_identifier: tag2 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'bar'" - end_bracket: ) - statement_terminator: ; - statement: create_schema_statement: - keyword: create - keyword: schema - schema_reference: naked_identifier: mytestschema - keyword: with - keyword: managed - keyword: access - statement_terminator: ; - statement: create_schema_statement: - keyword: create - keyword: transient - keyword: schema - keyword: if - keyword: not - keyword: exists - schema_reference: naked_identifier: mytestschema - schema_object_properties: keyword: default_ddl_collation comparison_operator: raw_comparison_operator: '=' quoted_literal: "'de_DE'" - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - schema_reference: - naked_identifier: MYDB - dot: . - naked_identifier: MYSCHEMA - schema_object_properties: comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: '"Space for landing my data"' - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - keyword: IF - keyword: NOT - keyword: EXISTS - schema_reference: - naked_identifier: MYDB - dot: . - naked_identifier: MYSCHEMA - schema_object_properties: comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: '"Space for landing my data"' - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: SCHEMA - schema_reference: - naked_identifier: MYDB - dot: . - naked_identifier: MYSCHEMA - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_sequence.sql000066400000000000000000000004671503426445100256420ustar00rootroot00000000000000CREATE SEQUENCE seq; CREATE OR REPLACE SEQUENCE IF NOT EXISTS seq WITH START WITH = 2 INCREMENT BY = 15 ORDER COMMENT = 'this_a_beautiful_sequence'; CREATE OR REPLACE SEQUENCE IF NOT EXISTS seq START = 2 INCREMENT = 15 NOORDER; CREATE SEQUENCE seq START 2; CREATE SEQUENCE seq INCREMENT 2; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_sequence.yml000066400000000000000000000043621503426445100256420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0da1f9dc4ff3d33ae884bd0911fc68407a8cfdef9a7da965e994f74da56b5122 file: - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: seq - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: SEQUENCE - keyword: IF - keyword: NOT - keyword: EXISTS - sequence_reference: naked_identifier: seq - keyword: WITH - keyword: START - keyword: WITH - comparison_operator: raw_comparison_operator: '=' - integer_literal: '2' - keyword: INCREMENT - keyword: BY - comparison_operator: raw_comparison_operator: '=' - integer_literal: '15' - keyword: ORDER - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'this_a_beautiful_sequence'" - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: SEQUENCE - keyword: IF - keyword: NOT - keyword: EXISTS - sequence_reference: naked_identifier: seq - keyword: START - comparison_operator: raw_comparison_operator: '=' - integer_literal: '2' - keyword: INCREMENT - comparison_operator: raw_comparison_operator: '=' - integer_literal: '15' - keyword: NOORDER - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: seq - keyword: START - integer_literal: '2' - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: seq - keyword: INCREMENT - integer_literal: '2' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_stage.sql000066400000000000000000000103261503426445100251300ustar00rootroot00000000000000CREATE STAGE my_int_stage COPY_OPTIONS = (ON_ERROR='skip_file'); CREATE STAGE my_int_stage ENCRYPTION = (TYPE = 'SNOWFLAKE_SSE') COPY_OPTIONS = (ON_ERROR='skip_file'); CREATE TEMPORARY STAGE my_temp_int_stage; CREATE TEMPORARY STAGE my_int_stage FILE_FORMAT = my_csv_format; CREATE STAGE mystage DIRECTORY = (ENABLE = TRUE) FILE_FORMAT = myformat; CREATE STAGE my_ext_stage URL='s3://load/files/' STORAGE_INTEGRATION = myint; CREATE STAGE my_ext_stage URL='s3://load' STORAGE_INTEGRATION = myint; CREATE STAGE my_ext_stage URL='s3://load/' STORAGE_INTEGRATION = myint; CREATE STAGE my_ext_stage URL='s3://load/files' STORAGE_INTEGRATION = myint; CREATE STAGE my_ext_stage1 URL='s3://load/files/' CREDENTIALS=(AWS_KEY_ID='1a2b3c' AWS_SECRET_KEY='4x5y6z'); CREATE STAGE my_ext_stage2 URL='s3://load/encrypted_files/' CREDENTIALS=(AWS_KEY_ID='1a2b3c' AWS_SECRET_KEY='4x5y6z') ENCRYPTION=(MASTER_KEY = 'eSxX0jzYfIamtnBKOEOwq80Au6NbSgPH5r4BDDwOaO8='); CREATE STAGE my_ext_stage3 URL='s3://load/encrypted_files/' CREDENTIALS=(AWS_KEY_ID='1a2b3c' AWS_SECRET_KEY='4x5y6z') ENCRYPTION=(TYPE='AWS_SSE_KMS' KMS_KEY_ID = 'aws/key'); CREATE STAGE my_ext_stage3 URL='s3://load/encrypted_files/' CREDENTIALS=(AWS_ROLE='arn:aws:iam::001234567890:role/mysnowflakerole') ENCRYPTION=(TYPE='AWS_SSE_KMS' KMS_KEY_ID = 'aws/key'); CREATE STAGE mystage URL='s3://load/files/' STORAGE_INTEGRATION = my_storage_int DIRECTORY = ( ENABLE = true AUTO_REFRESH = true ); CREATE STAGE my_ext_stage URL='gcs://load/files/' STORAGE_INTEGRATION = myint; CREATE STAGE mystage URL='gcs://load/files/' STORAGE_INTEGRATION = my_storage_int DIRECTORY = ( ENABLE = true AUTO_REFRESH = true NOTIFICATION_INTEGRATION = 'MY_NOTIFICATION_INT' ); CREATE STAGE my_ext_stage URL='azure://myaccount.blob.core.windows.net/load/files/' STORAGE_INTEGRATION = myint; CREATE STAGE mystage URL='azure://myaccount.blob.core.windows.net/mycontainer/files/' CREDENTIALS=(AZURE_SAS_TOKEN='?sv=2016-05-31&ss=b&srt=sco&sp=rwdl&se=2018-06-27T10:05:50Z&st=2017-06-27T02:05:50Z&spr=https,http&sig=bgqQwoXwxzuD2GJfagRg7VOS8hzNr3QLT7rhS8OFRLQ%3D') ENCRYPTION=(TYPE='AZURE_CSE' MASTER_KEY = 'kPxX0jzYfIamtnJEUTHwq80Au6NbSgPH5r4BDDwOaO8=') FILE_FORMAT = my_csv_format; CREATE STAGE mystage URL='azure://myaccount.blob.core.windows.net/load/files/' STORAGE_INTEGRATION = my_storage_int DIRECTORY = ( ENABLE = true AUTO_REFRESH = true NOTIFICATION_INTEGRATION = 'MY_NOTIFICATION_INT' ); CREATE OR REPLACE STAGE foo.bar URL = 's3://foobar' STORAGE_INTEGRATION = foo FILE_FORMAT = foo.bar.baz ; CREATE OR REPLACE STAGE foo.bar URL = 's3://foobar' STORAGE_INTEGRATION = foo FILE_FORMAT = (FORMAT_NAME = foo.bar.baz) ; CREATE OR REPLACE STAGE your_stage_name URL = 's3://your_s3_bucket/your_path_in_s3'; CREATE OR REPLACE STAGE your_stage_name URL = 's3://your-s3-bucket/your-path-in-s3'; CREATE STAGE mystage URL=$your_variable CREDENTIALS=(AZURE_SAS_TOKEN=$your_variable); CREATE STAGE mystage URL=$your_variable STORAGE_INTEGRATION=$your_variable; CREATE OR REPLACE STAGE foo.bar URL = 's3://foobar' STORAGE_INTEGRATION = foo FILE_FORMAT = ( TYPE = CSV PARSE_HEADER = TRUE ); CREATE OR ALTER STAGE foo.bar URL = 's3://foobar'; CREATE STAGE foo.bar STORAGE_INTEGRATION=$your_variable URL=$your_variable; CREATE OR REPLACE STAGE foo.bar STORAGE_INTEGRATION = foo URL = 's3://foobar' FILE_FORMAT = ( TYPE = CSV PARSE_HEADER = TRUE ); CREATE STAGE my_ext_stage STORAGE_INTEGRATION = myint URL='azure://myaccount.blob.core.windows.net/load/files/'; CREATE STAGE mystage CREDENTIALS=(AZURE_SAS_TOKEN='?sv=2016-05-31&ss=b&srt=sco&sp=rwdl&se=2018-06-27T10:05:50Z&st=2017-06-27T02:05:50Z&spr=https,http&sig=bgqQwoXwxzuD2GJfagRg7VOS8hzNr3QLT7rhS8OFRLQ%3D') ENCRYPTION=(TYPE='AZURE_CSE' MASTER_KEY = 'kPxX0jzYfIamtnJEUTHwq80Au6NbSgPH5r4BDDwOaO8=') URL='azure://myaccount.blob.core.windows.net/mycontainer/files/' FILE_FORMAT = my_csv_format; CREATE STAGE mystage STORAGE_INTEGRATION = my_storage_int DIRECTORY = ( ENABLE = true AUTO_REFRESH = true NOTIFICATION_INTEGRATION = 'MY_NOTIFICATION_INT' ) URL='azure://myaccount.blob.core.windows.net/load/files/'; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_stage.yml000066400000000000000000000575411503426445100251440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 773297b55de87286897d7b13bd1bdfd56341d9e4164171fdd9f1548b4faf2e4d file: - statement: create_stage_statement: - keyword: CREATE - keyword: STAGE - object_reference: naked_identifier: my_int_stage - keyword: COPY_OPTIONS - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( copy_options: keyword: ON_ERROR comparison_operator: raw_comparison_operator: '=' copy_on_error_option: "'skip_file'" end_bracket: ) - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: STAGE - object_reference: naked_identifier: my_int_stage - stage_parameters: keyword: ENCRYPTION comparison_operator: raw_comparison_operator: '=' bracketed: start_bracket: ( keyword: TYPE comparison_operator: raw_comparison_operator: '=' stage_encryption_option: "'SNOWFLAKE_SSE'" end_bracket: ) - keyword: COPY_OPTIONS - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( copy_options: keyword: ON_ERROR comparison_operator: raw_comparison_operator: '=' copy_on_error_option: "'skip_file'" end_bracket: ) - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: STAGE - object_reference: naked_identifier: my_temp_int_stage - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: STAGE - object_reference: naked_identifier: my_int_stage - keyword: FILE_FORMAT - comparison_operator: raw_comparison_operator: '=' - file_format_segment: object_reference: naked_identifier: my_csv_format - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: STAGE - object_reference: naked_identifier: mystage - keyword: DIRECTORY - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( keyword: ENABLE comparison_operator: raw_comparison_operator: '=' boolean_literal: 'TRUE' end_bracket: ) - keyword: FILE_FORMAT - comparison_operator: raw_comparison_operator: '=' - file_format_segment: object_reference: naked_identifier: myformat - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: STAGE - object_reference: naked_identifier: my_ext_stage - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'s3://load/files/'" - stage_parameters: keyword: STORAGE_INTEGRATION comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: myint - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: STAGE - object_reference: naked_identifier: my_ext_stage - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'s3://load'" - stage_parameters: keyword: STORAGE_INTEGRATION comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: myint - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: STAGE - object_reference: naked_identifier: my_ext_stage - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'s3://load/'" - stage_parameters: keyword: STORAGE_INTEGRATION comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: myint - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: STAGE - object_reference: naked_identifier: my_ext_stage - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'s3://load/files'" - stage_parameters: keyword: STORAGE_INTEGRATION comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: myint - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: STAGE - object_reference: naked_identifier: my_ext_stage1 - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'s3://load/files/'" - stage_parameters: keyword: CREDENTIALS comparison_operator: raw_comparison_operator: '=' bracketed: - start_bracket: ( - keyword: AWS_KEY_ID - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'1a2b3c'" - keyword: AWS_SECRET_KEY - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'4x5y6z'" - end_bracket: ) - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: STAGE - object_reference: naked_identifier: my_ext_stage2 - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'s3://load/encrypted_files/'" - stage_parameters: - keyword: CREDENTIALS - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: AWS_KEY_ID - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'1a2b3c'" - keyword: AWS_SECRET_KEY - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'4x5y6z'" - end_bracket: ) - keyword: ENCRYPTION - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( keyword: MASTER_KEY comparison_operator: raw_comparison_operator: '=' quoted_literal: "'eSxX0jzYfIamtnBKOEOwq80Au6NbSgPH5r4BDDwOaO8='" end_bracket: ) - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: STAGE - object_reference: naked_identifier: my_ext_stage3 - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'s3://load/encrypted_files/'" - stage_parameters: - keyword: CREDENTIALS - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: AWS_KEY_ID - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'1a2b3c'" - keyword: AWS_SECRET_KEY - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'4x5y6z'" - end_bracket: ) - keyword: ENCRYPTION - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - stage_encryption_option: "'AWS_SSE_KMS'" - keyword: KMS_KEY_ID - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'aws/key'" - end_bracket: ) - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: STAGE - object_reference: naked_identifier: my_ext_stage3 - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'s3://load/encrypted_files/'" - stage_parameters: - keyword: CREDENTIALS - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( keyword: AWS_ROLE comparison_operator: raw_comparison_operator: '=' quoted_literal: "'arn:aws:iam::001234567890:role/mysnowflakerole'" end_bracket: ) - keyword: ENCRYPTION - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - stage_encryption_option: "'AWS_SSE_KMS'" - keyword: KMS_KEY_ID - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'aws/key'" - end_bracket: ) - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: STAGE - object_reference: naked_identifier: mystage - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'s3://load/files/'" - stage_parameters: keyword: STORAGE_INTEGRATION comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: my_storage_int - keyword: DIRECTORY - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: ENABLE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: AUTO_REFRESH - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - end_bracket: ) - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: STAGE - object_reference: naked_identifier: my_ext_stage - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'gcs://load/files/'" - stage_parameters: keyword: STORAGE_INTEGRATION comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: myint - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: STAGE - object_reference: naked_identifier: mystage - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'gcs://load/files/'" - stage_parameters: keyword: STORAGE_INTEGRATION comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: my_storage_int - keyword: DIRECTORY - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: ENABLE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: AUTO_REFRESH - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: NOTIFICATION_INTEGRATION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'MY_NOTIFICATION_INT'" - end_bracket: ) - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: STAGE - object_reference: naked_identifier: my_ext_stage - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'azure://myaccount.blob.core.windows.net/load/files/'" - stage_parameters: keyword: STORAGE_INTEGRATION comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: myint - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: STAGE - object_reference: naked_identifier: mystage - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/files/'" - stage_parameters: - keyword: CREDENTIALS - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( keyword: AZURE_SAS_TOKEN comparison_operator: raw_comparison_operator: '=' quoted_literal: "'?sv=2016-05-31&ss=b&srt=sco&sp=rwdl&se=2018-06-27T10:05:50Z&st=2017-06-27T02:05:50Z&spr=https,http&sig=bgqQwoXwxzuD2GJfagRg7VOS8hzNr3QLT7rhS8OFRLQ%3D'" end_bracket: ) - keyword: ENCRYPTION - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - stage_encryption_option: "'AZURE_CSE'" - keyword: MASTER_KEY - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'kPxX0jzYfIamtnJEUTHwq80Au6NbSgPH5r4BDDwOaO8='" - end_bracket: ) - keyword: FILE_FORMAT - comparison_operator: raw_comparison_operator: '=' - file_format_segment: object_reference: naked_identifier: my_csv_format - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: STAGE - object_reference: naked_identifier: mystage - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'azure://myaccount.blob.core.windows.net/load/files/'" - stage_parameters: keyword: STORAGE_INTEGRATION comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: my_storage_int - keyword: DIRECTORY - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: ENABLE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: AUTO_REFRESH - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: NOTIFICATION_INTEGRATION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'MY_NOTIFICATION_INT'" - end_bracket: ) - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: STAGE - object_reference: - naked_identifier: foo - dot: . - naked_identifier: bar - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'s3://foobar'" - stage_parameters: keyword: STORAGE_INTEGRATION comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: foo - keyword: FILE_FORMAT - comparison_operator: raw_comparison_operator: '=' - file_format_segment: object_reference: - naked_identifier: foo - dot: . - naked_identifier: bar - dot: . - naked_identifier: baz - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: STAGE - object_reference: - naked_identifier: foo - dot: . - naked_identifier: bar - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'s3://foobar'" - stage_parameters: keyword: STORAGE_INTEGRATION comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: foo - keyword: FILE_FORMAT - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( keyword: FORMAT_NAME comparison_operator: raw_comparison_operator: '=' object_reference: - naked_identifier: foo - dot: . - naked_identifier: bar - dot: . - naked_identifier: baz end_bracket: ) - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: STAGE - object_reference: naked_identifier: your_stage_name - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'s3://your_s3_bucket/your_path_in_s3'" - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: STAGE - object_reference: naked_identifier: your_stage_name - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'s3://your-s3-bucket/your-path-in-s3'" - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: STAGE - object_reference: naked_identifier: mystage - keyword: URL - comparison_operator: raw_comparison_operator: '=' - variable: $your_variable - stage_parameters: keyword: CREDENTIALS comparison_operator: raw_comparison_operator: '=' bracketed: start_bracket: ( keyword: AZURE_SAS_TOKEN comparison_operator: raw_comparison_operator: '=' variable: $your_variable end_bracket: ) - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: STAGE - object_reference: naked_identifier: mystage - keyword: URL - comparison_operator: raw_comparison_operator: '=' - variable: $your_variable - stage_parameters: keyword: STORAGE_INTEGRATION comparison_operator: raw_comparison_operator: '=' object_reference: variable: $your_variable - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: STAGE - object_reference: - naked_identifier: foo - dot: . - naked_identifier: bar - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'s3://foobar'" - stage_parameters: keyword: STORAGE_INTEGRATION comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: foo - keyword: FILE_FORMAT - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( csv_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: CSV - keyword: PARSE_HEADER - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' end_bracket: ) - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: STAGE - object_reference: - naked_identifier: foo - dot: . - naked_identifier: bar - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'s3://foobar'" - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: STAGE - object_reference: - naked_identifier: foo - dot: . - naked_identifier: bar - stage_parameters: keyword: STORAGE_INTEGRATION comparison_operator: raw_comparison_operator: '=' object_reference: variable: $your_variable - keyword: URL - comparison_operator: raw_comparison_operator: '=' - variable: $your_variable - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: STAGE - object_reference: - naked_identifier: foo - dot: . - naked_identifier: bar - stage_parameters: keyword: STORAGE_INTEGRATION comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: foo - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'s3://foobar'" - keyword: FILE_FORMAT - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( csv_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: CSV - keyword: PARSE_HEADER - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' end_bracket: ) - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: STAGE - object_reference: naked_identifier: my_ext_stage - stage_parameters: keyword: STORAGE_INTEGRATION comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: myint - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'azure://myaccount.blob.core.windows.net/load/files/'" - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: STAGE - object_reference: naked_identifier: mystage - stage_parameters: - keyword: CREDENTIALS - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( keyword: AZURE_SAS_TOKEN comparison_operator: raw_comparison_operator: '=' quoted_literal: "'?sv=2016-05-31&ss=b&srt=sco&sp=rwdl&se=2018-06-27T10:05:50Z&st=2017-06-27T02:05:50Z&spr=https,http&sig=bgqQwoXwxzuD2GJfagRg7VOS8hzNr3QLT7rhS8OFRLQ%3D'" end_bracket: ) - keyword: ENCRYPTION - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - stage_encryption_option: "'AZURE_CSE'" - keyword: MASTER_KEY - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'kPxX0jzYfIamtnJEUTHwq80Au6NbSgPH5r4BDDwOaO8='" - end_bracket: ) - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/files/'" - keyword: FILE_FORMAT - comparison_operator: raw_comparison_operator: '=' - file_format_segment: object_reference: naked_identifier: my_csv_format - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: STAGE - object_reference: naked_identifier: mystage - stage_parameters: keyword: STORAGE_INTEGRATION comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: my_storage_int - keyword: DIRECTORY - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: ENABLE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: AUTO_REFRESH - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: NOTIFICATION_INTEGRATION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'MY_NOTIFICATION_INT'" - end_bracket: ) - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'azure://myaccount.blob.core.windows.net/load/files/'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_storage_integration.sql000066400000000000000000000050731503426445100300770ustar00rootroot00000000000000create storage integration s3_int type = external_stage storage_provider = s3 storage_aws_role_arn = 'arn:aws:iam::001234567890:role/myrole' enabled = true storage_allowed_locations = ('s3://mybucket1/path1/', 's3://mybucket2/path2/'); create storage integration s3_int type = external_stage storage_provider = s3 storage_aws_role_arn = 'arn:aws:iam::001234567890:role/myrole' enabled = true storage_allowed_locations = ('s3://mybucket1', 's3://mybucket2/'); create storage integration gcs_int type = external_stage storage_provider = gcs enabled = true storage_allowed_locations = ('gcs://mybucket1/path1/', 'gcs://mybucket2/path2/'); create storage integration azure_int type = external_stage storage_provider = azure enabled = true azure_tenant_id = '' storage_allowed_locations = ('azure://myaccount.blob.core.windows.net/mycontainer/path1/', 'azure://myaccount.blob.core.windows.net/mycontainer/path2/'); create or replace storage integration s3_int type = external_stage storage_provider = s3 storage_aws_role_arn = 'arn:aws:iam::001234567890:role/myrole' enabled = true storage_allowed_locations = ('*') storage_blocked_locations = ('s3://mybucket3/path3/', 's3://mybucket4/path4/'); create or replace storage integration gcs_int type = external_stage storage_provider = gcs enabled = true storage_allowed_locations = ('*') storage_blocked_locations = ('gcs://mybucket3/path3/', 'gcs://mybucket4/path4/'); create or replace storage integration azure_int type = external_stage storage_provider = azure enabled = false azure_tenant_id = 'a123b4c5-1234-123a-a12b-1a23b45678c9' storage_allowed_locations = ('*') storage_blocked_locations = ('azure://myaccount.blob.core.windows.net/mycontainer/path3/', 'azure://myaccount.blob.core.windows.net/mycontainer/path4/'); create storage integration s3_int type = external_stage storage_provider = 's3' storage_aws_role_arn = 'arn:aws:iam::001234567890:role/myrole' enabled = true storage_allowed_locations = ('s3://mybucket1', 's3://mybucket2/'); create storage integration gcs_int type = external_stage storage_provider = 'gcs' enabled = true storage_allowed_locations = ('gcs://mybucket1/path1/', 'gcs://mybucket2/path2/'); create storage integration azure_int type = external_stage storage_provider = 'azure' enabled = true azure_tenant_id = '' storage_allowed_locations = ('azure://myaccount.blob.core.windows.net/mycontainer/path1/', 'azure://myaccount.blob.core.windows.net/mycontainer/path2/') use_privatelink_endpoint = false; ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_storage_integration.yml000066400000000000000000000250451503426445100301020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cf7248179db4474af5a56e319788f109c4e72812456d0e228d199cecf36fe0df file: - statement: create_statement: - keyword: create - keyword: storage - keyword: integration - object_reference: naked_identifier: s3_int - keyword: type - comparison_operator: raw_comparison_operator: '=' - keyword: external_stage - keyword: storage_provider - comparison_operator: raw_comparison_operator: '=' - keyword: s3 - keyword: storage_aws_role_arn - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'arn:aws:iam::001234567890:role/myrole'" - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: storage_allowed_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - bucket_path: "'s3://mybucket1/path1/'" - comma: ',' - bucket_path: "'s3://mybucket2/path2/'" - end_bracket: ) - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: storage - keyword: integration - object_reference: naked_identifier: s3_int - keyword: type - comparison_operator: raw_comparison_operator: '=' - keyword: external_stage - keyword: storage_provider - comparison_operator: raw_comparison_operator: '=' - keyword: s3 - keyword: storage_aws_role_arn - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'arn:aws:iam::001234567890:role/myrole'" - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: storage_allowed_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - bucket_path: "'s3://mybucket1'" - comma: ',' - bucket_path: "'s3://mybucket2/'" - end_bracket: ) - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: storage - keyword: integration - object_reference: naked_identifier: gcs_int - keyword: type - comparison_operator: raw_comparison_operator: '=' - keyword: external_stage - keyword: storage_provider - comparison_operator: raw_comparison_operator: '=' - keyword: gcs - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: storage_allowed_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - bucket_path: "'gcs://mybucket1/path1/'" - comma: ',' - bucket_path: "'gcs://mybucket2/path2/'" - end_bracket: ) - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: storage - keyword: integration - object_reference: naked_identifier: azure_int - keyword: type - comparison_operator: raw_comparison_operator: '=' - keyword: external_stage - keyword: storage_provider - comparison_operator: raw_comparison_operator: '=' - keyword: azure - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: azure_tenant_id - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - keyword: storage_allowed_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/path1/'" - comma: ',' - bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/path2/'" - end_bracket: ) - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: or - keyword: replace - keyword: storage - keyword: integration - object_reference: naked_identifier: s3_int - keyword: type - comparison_operator: raw_comparison_operator: '=' - keyword: external_stage - keyword: storage_provider - comparison_operator: raw_comparison_operator: '=' - keyword: s3 - keyword: storage_aws_role_arn - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'arn:aws:iam::001234567890:role/myrole'" - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: storage_allowed_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_star: "'*'" end_bracket: ) - keyword: storage_blocked_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - bucket_path: "'s3://mybucket3/path3/'" - comma: ',' - bucket_path: "'s3://mybucket4/path4/'" - end_bracket: ) - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: or - keyword: replace - keyword: storage - keyword: integration - object_reference: naked_identifier: gcs_int - keyword: type - comparison_operator: raw_comparison_operator: '=' - keyword: external_stage - keyword: storage_provider - comparison_operator: raw_comparison_operator: '=' - keyword: gcs - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: storage_allowed_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_star: "'*'" end_bracket: ) - keyword: storage_blocked_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - bucket_path: "'gcs://mybucket3/path3/'" - comma: ',' - bucket_path: "'gcs://mybucket4/path4/'" - end_bracket: ) - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: or - keyword: replace - keyword: storage - keyword: integration - object_reference: naked_identifier: azure_int - keyword: type - comparison_operator: raw_comparison_operator: '=' - keyword: external_stage - keyword: storage_provider - comparison_operator: raw_comparison_operator: '=' - keyword: azure - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'false' - keyword: azure_tenant_id - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'a123b4c5-1234-123a-a12b-1a23b45678c9'" - keyword: storage_allowed_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_star: "'*'" end_bracket: ) - keyword: storage_blocked_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/path3/'" - comma: ',' - bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/path4/'" - end_bracket: ) - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: storage - keyword: integration - object_reference: naked_identifier: s3_int - keyword: type - comparison_operator: raw_comparison_operator: '=' - keyword: external_stage - keyword: storage_provider - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'s3'" - keyword: storage_aws_role_arn - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'arn:aws:iam::001234567890:role/myrole'" - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: storage_allowed_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - bucket_path: "'s3://mybucket1'" - comma: ',' - bucket_path: "'s3://mybucket2/'" - end_bracket: ) - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: storage - keyword: integration - object_reference: naked_identifier: gcs_int - keyword: type - comparison_operator: raw_comparison_operator: '=' - keyword: external_stage - keyword: storage_provider - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'gcs'" - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: storage_allowed_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - bucket_path: "'gcs://mybucket1/path1/'" - comma: ',' - bucket_path: "'gcs://mybucket2/path2/'" - end_bracket: ) - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: storage - keyword: integration - object_reference: naked_identifier: azure_int - keyword: type - comparison_operator: raw_comparison_operator: '=' - keyword: external_stage - keyword: storage_provider - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'azure'" - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: azure_tenant_id - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - keyword: storage_allowed_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/path1/'" - comma: ',' - bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/path2/'" - end_bracket: ) - keyword: use_privatelink_endpoint - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'false' - statement_terminator: ; - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_stream.sql000066400000000000000000000021041503426445100253130ustar00rootroot00000000000000create stream new_stream on table table_name; create stream mystream on table mytable before (timestamp => to_timestamp(40*365*86400)); create stream mystream on table mytable at(offset => -60*5); create stream mystream on table mytable before(statement => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726'); create stream new_stream on external table table_name; create stream new_stream on stage stage_name; create stream new_stream on view view_name; create stream new_stream clone source_stream; create or replace stream new_stream on table table_name; create stream if not exists new_stream on table table_name; CREATE OR REPLACE STREAM new_stream COPY GRANTS ON TABLE table_name APPEND_ONLY = TRUE SHOW_INITIAL_ROWS = TRUE COMMENT = 'amazing comment'; CREATE OR REPLACE STREAM new_stream ON EXTERNAL TABLE table_name INSERT_ONLY = TRUE COMMENT = 'amazing comment'; CREATE STREAM IF NOT EXISTS new_stream ON STAGE stage_name COMMENT = 'amazing comment'; CREATE STREAM IF NOT EXISTS new_stream ON VIEW view_name APPEND_ONLY = FALSE SHOW_INITIAL_ROWS = FALSE COMMENT = 'amazing comment'; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_stream.yml000066400000000000000000000160461503426445100253270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d6b9b17076a4d8fdb468ba44e454e9d10855d7e3d71365942f1f5af97d13ebf0 file: - statement: create_stream_statement: - keyword: create - keyword: stream - object_reference: naked_identifier: new_stream - keyword: 'on' - keyword: table - object_reference: naked_identifier: table_name - statement_terminator: ; - statement: create_stream_statement: - keyword: create - keyword: stream - object_reference: naked_identifier: mystream - keyword: 'on' - keyword: table - object_reference: naked_identifier: mytable - from_before_expression: keyword: before bracketed: start_bracket: ( keyword: timestamp parameter_assigner: => expression: function: function_name: function_name_identifier: to_timestamp function_contents: bracketed: start_bracket: ( expression: - numeric_literal: '40' - binary_operator: '*' - numeric_literal: '365' - binary_operator: '*' - numeric_literal: '86400' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_stream_statement: - keyword: create - keyword: stream - object_reference: naked_identifier: mystream - keyword: 'on' - keyword: table - object_reference: naked_identifier: mytable - from_at_expression: keyword: at bracketed: start_bracket: ( keyword: offset parameter_assigner: => expression: - numeric_literal: sign_indicator: '-' numeric_literal: '60' - binary_operator: '*' - numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: create_stream_statement: - keyword: create - keyword: stream - object_reference: naked_identifier: mystream - keyword: 'on' - keyword: table - object_reference: naked_identifier: mytable - from_before_expression: keyword: before bracketed: start_bracket: ( keyword: statement parameter_assigner: => expression: quoted_literal: "'8e5d0ca9-005e-44e6-b858-a8f5b37c5726'" end_bracket: ) - statement_terminator: ; - statement: create_stream_statement: - keyword: create - keyword: stream - object_reference: naked_identifier: new_stream - keyword: 'on' - keyword: external - keyword: table - object_reference: naked_identifier: table_name - statement_terminator: ; - statement: create_stream_statement: - keyword: create - keyword: stream - object_reference: naked_identifier: new_stream - keyword: 'on' - keyword: stage - object_reference: naked_identifier: stage_name - statement_terminator: ; - statement: create_stream_statement: - keyword: create - keyword: stream - object_reference: naked_identifier: new_stream - keyword: 'on' - keyword: view - object_reference: naked_identifier: view_name - statement_terminator: ; - statement: create_clone_statement: - keyword: create - keyword: stream - object_reference: naked_identifier: new_stream - keyword: clone - object_reference: naked_identifier: source_stream - statement_terminator: ; - statement: create_stream_statement: - keyword: create - keyword: or - keyword: replace - keyword: stream - object_reference: naked_identifier: new_stream - keyword: 'on' - keyword: table - object_reference: naked_identifier: table_name - statement_terminator: ; - statement: create_stream_statement: - keyword: create - keyword: stream - keyword: if - keyword: not - keyword: exists - object_reference: naked_identifier: new_stream - keyword: 'on' - keyword: table - object_reference: naked_identifier: table_name - statement_terminator: ; - statement: create_stream_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: STREAM - object_reference: naked_identifier: new_stream - keyword: COPY - keyword: GRANTS - keyword: 'ON' - keyword: TABLE - object_reference: naked_identifier: table_name - keyword: APPEND_ONLY - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - keyword: SHOW_INITIAL_ROWS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'amazing comment'" - statement_terminator: ; - statement: create_stream_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: STREAM - object_reference: naked_identifier: new_stream - keyword: 'ON' - keyword: EXTERNAL - keyword: TABLE - object_reference: naked_identifier: table_name - keyword: INSERT_ONLY - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'amazing comment'" - statement_terminator: ; - statement: create_stream_statement: - keyword: CREATE - keyword: STREAM - keyword: IF - keyword: NOT - keyword: EXISTS - object_reference: naked_identifier: new_stream - keyword: 'ON' - keyword: STAGE - object_reference: naked_identifier: stage_name - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'amazing comment'" - statement_terminator: ; - statement: create_stream_statement: - keyword: CREATE - keyword: STREAM - keyword: IF - keyword: NOT - keyword: EXISTS - object_reference: naked_identifier: new_stream - keyword: 'ON' - keyword: VIEW - object_reference: naked_identifier: view_name - keyword: APPEND_ONLY - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: SHOW_INITIAL_ROWS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'amazing comment'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_streamlit.sql000066400000000000000000000015241503426445100260310ustar00rootroot00000000000000CREATE STREAMLIT new_streamlit ROOT_LOCATION = '@stage_name/folder' MAIN_FILE = 'main.py'; CREATE OR REPLACE STREAMLIT new_streamlit ROOT_LOCATION = '@stage_name/folder' MAIN_FILE = 'main.py'; CREATE STREAMLIT new_streamlit ROOT_LOCATION = '@stage_name/folder' MAIN_FILE = 'main.py' QUERY_WAREHOUSE = my_wh; CREATE OR REPLACE STREAMLIT new_streamlit ROOT_LOCATION = '@stage_name/folder' MAIN_FILE = 'main.py' QUERY_WAREHOUSE = my_wh; CREATE STREAMLIT new_streamlit ROOT_LOCATION = '@stage_name/folder' MAIN_FILE = 'main.py' QUERY_WAREHOUSE = my_wh COMMENT = 'amazing comment'; CREATE OR REPLACE STREAMLIT new_streamlit ROOT_LOCATION = '@stage_name/folder' MAIN_FILE = 'main.py' QUERY_WAREHOUSE = my_wh COMMENT = 'amazing comment'; CREATE STREAMLIT new_streamlit ROOT_LOCATION = '@stage_name/folder' MAIN_FILE = 'main.py' TITLE = 'amazing title'; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_streamlit.yml000066400000000000000000000110531503426445100260310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 54fe7098ea0135daa073e9f93e27df0f9bf1999db35824d7dad06c9f40c9bc88 file: - statement: create_streamlit_statement: - keyword: CREATE - keyword: STREAMLIT - object_reference: naked_identifier: new_streamlit - keyword: ROOT_LOCATION - comparison_operator: raw_comparison_operator: '=' - stage_path: "'@stage_name/folder'" - keyword: MAIN_FILE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'main.py'" - statement_terminator: ; - statement: create_streamlit_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: STREAMLIT - object_reference: naked_identifier: new_streamlit - keyword: ROOT_LOCATION - comparison_operator: raw_comparison_operator: '=' - stage_path: "'@stage_name/folder'" - keyword: MAIN_FILE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'main.py'" - statement_terminator: ; - statement: create_streamlit_statement: - keyword: CREATE - keyword: STREAMLIT - object_reference: naked_identifier: new_streamlit - keyword: ROOT_LOCATION - comparison_operator: raw_comparison_operator: '=' - stage_path: "'@stage_name/folder'" - keyword: MAIN_FILE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'main.py'" - keyword: QUERY_WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: my_wh - statement_terminator: ; - statement: create_streamlit_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: STREAMLIT - object_reference: naked_identifier: new_streamlit - keyword: ROOT_LOCATION - comparison_operator: raw_comparison_operator: '=' - stage_path: "'@stage_name/folder'" - keyword: MAIN_FILE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'main.py'" - keyword: QUERY_WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: my_wh - statement_terminator: ; - statement: create_streamlit_statement: - keyword: CREATE - keyword: STREAMLIT - object_reference: naked_identifier: new_streamlit - keyword: ROOT_LOCATION - comparison_operator: raw_comparison_operator: '=' - stage_path: "'@stage_name/folder'" - keyword: MAIN_FILE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'main.py'" - keyword: QUERY_WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: my_wh - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'amazing comment'" - statement_terminator: ; - statement: create_streamlit_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: STREAMLIT - object_reference: naked_identifier: new_streamlit - keyword: ROOT_LOCATION - comparison_operator: raw_comparison_operator: '=' - stage_path: "'@stage_name/folder'" - keyword: MAIN_FILE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'main.py'" - keyword: QUERY_WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: my_wh - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'amazing comment'" - statement_terminator: ; - statement: create_streamlit_statement: - keyword: CREATE - keyword: STREAMLIT - object_reference: naked_identifier: new_streamlit - keyword: ROOT_LOCATION - comparison_operator: raw_comparison_operator: '=' - stage_path: "'@stage_name/folder'" - keyword: MAIN_FILE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'main.py'" - keyword: TITLE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'amazing title'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_table.sql000066400000000000000000000223431503426445100251160ustar00rootroot00000000000000create table if not exists "p08_base" as select VALUE:id::TEXT id from "_p08"; CREATE TABLE IF NOT EXISTS table_name ( col1 VARCHAR ); create table mytable (amount number); create table mytable (amount number) CLUSTER BY (amount); create table mytable (amount number) CLUSTER BY LINEAR(amount); create table mytable CLUSTER BY (amount) (amount number); create table mytable CLUSTER BY LINEAR(amount) (amount number); create table mytable_copy2 as select b+1 as c from mytable_copy; create table mytable_2 like mytable; create temporary table demo_temporary (i integer); create temp table demo_temp (i integer); create local temporary table demo_local_temporary (i integer); create local temp table demo_local_temp (i integer); create global temporary table demo_global_temporary (i integer); create global temp table demo_global_temp (i integer); create volatile table demo_volatile (i integer); create table example (col1 number comment 'a column comment') comment='a table comment'; create table testtable_summary (name, summary_amount) as select name, amount1 + amount2 from testtable; create table testtable_summary (barry char) as select name, amount1 + amount2 from testtable; create table testtable_summary as select name, amount1 + amount2 from testtable; create or replace table parquet_col ( custkey number default null, orderdate date default null, orderstatus varchar(100) default null, price varchar(255) ) as select $1:o_custkey::number, $1:o_orderdate::date, $1:o_orderstatus::text, $1:o_totalprice::text from @my_stage; create table collation_demo ( uncollated_phrase varchar, utf8_phrase varchar collate 'utf8', english_phrase varchar collate 'en', spanish_phrase varchar collate 'sp' ); create table t2 as select col1 collate 'fr' as col1 from t1; create table mytable using template ( select array_agg(object_construct(*)) from table( infer_schema( location=>'@mystage', file_format=>'my_parquet_format' ) )); create table dollar_sign_table (foo$bar boolean); create table dollar_sign_schema.dollar_sign_table (foo$bar boolean); CREATE TABLE timestamp_column_default_value_demo ( timestamp_col1 TIMESTAMP_TZ NOT NULL DEFAULT CURRENT_TIMESTAMP, timestamp_col2 TIMESTAMP_TZ DEFAULT CURRENT_TIMESTAMP(), timestamp_col3 TIMESTAMP_TZ DEFAULT CURRENT_TIMESTAMP(2), sysdate_col4 TIMESTAMP_TZ DEFAULT SYSDATE() ); create table test_table (test_column NUMBER autoincrement (0, 1)); create table test_schema.test_table (test_column NUMBER autoincrement (0, 1)); create or replace table test_schema.test_table (test_column NUMBER autoincrement (0, 1)); create table test_schema.test_table (test_column INTEGER AUTOINCREMENT); CREATE TABLE test_table (test_column NUMBER WITH MASKING POLICY my_policy USING(test_column, test_column > 10)); CREATE OR REPLACE TABLE SCHEMA1.TABLE1 ( "COL1" varchar(128) NOT NULL, "COL2" varchar(128) NOT NULL ) CHANGE_TRACKING = TRUE WITH TAG ( account_objects.tags.IRM = '{"IRM":[{"Primary":"ABC123"}]}' ); CREATE TABLE my_table ( foo TIMESTAMP_NTZ DEFAULT CURRENT_TIMESTAMP::TIMESTAMP_NTZ ) ; CREATE TABLE IF NOT EXISTS EXAMPLE_TABLE ( EXAMPLE VARCHAR AUTOINCREMENT START 1 INCREMENT 1 ORDER ); CREATE TABLE IF NOT EXISTS EXAMPLE_TABLE ( EXAMPLE VARCHAR AUTOINCREMENT START 1 INCREMENT 1 NOORDER ); CREATE OR REPLACE DYNAMIC TABLE names TARGET_LAG = '1 minute' WAREHOUSE = mywh AS SELECT var:id::int id, var:fname::string first_name, var:lname::string last_name FROM raw; CREATE OR REPLACE DYNAMIC TABLE names TARGET_LAG = '1 minute' REFRESH_MODE = AUTO INITIALIZE = ON_CREATE WAREHOUSE = mywh AS SELECT var:id::int id, var:fname::string first_name, var:lname::string last_name FROM raw; CREATE OR REPLACE DYNAMIC TABLE DT_WITH_DOWNSTREAM_LAG TARGET_LAG = DOWNSTREAM WAREHOUSE = mywh AS SELECT * FROM my_table; CREATE OR REPLACE DYNAMIC TABLE DT_WITH_DOWNSTREAM_QUOTED_LITERAL_LAG TARGET_LAG = 'DOWNSTREAM' WAREHOUSE = mywh AS SELECT * FROM my_table; CREATE OR REPLACE DYNAMIC TABLE DT_WITH_SINGLE_SECOND_LAG TARGET_LAG = '1 second' WAREHOUSE = mywh AS SELECT * FROM my_table; CREATE OR REPLACE DYNAMIC TABLE DT_WITH_MULTIPLE_SECONDS_LAG TARGET_LAG = '5 seconds' WAREHOUSE = mywh AS SELECT * FROM my_table; CREATE OR REPLACE DYNAMIC TABLE DT_WITH_SINGLE_MINUTE_LAG TARGET_LAG = '1 minute' WAREHOUSE = mywh AS SELECT * FROM my_table; CREATE OR REPLACE DYNAMIC TABLE DT_WITH_MULTIPLE_MINUTES_LAG TARGET_LAG = '5 minutes' WAREHOUSE = mywh AS SELECT * FROM my_table; CREATE OR REPLACE DYNAMIC TABLE DT_WITH_SINGLE_HOUR_LAG TARGET_LAG = '1 hour' WAREHOUSE = mywh AS SELECT * FROM my_table; CREATE OR REPLACE DYNAMIC TABLE DT_WITH_MULTIPLE_HOURS_LAG TARGET_LAG = '5 hours' WAREHOUSE = mywh AS SELECT * FROM my_table; CREATE OR REPLACE DYNAMIC TABLE DT_WITH_SINGLE_DAY_LAG TARGET_LAG = '1 day' WAREHOUSE = mywh AS SELECT * FROM my_table; CREATE OR REPLACE DYNAMIC TABLE my_table TARGET_LAG = '5 days' WAREHOUSE = mywh AS SELECT * FROM my_table; CREATE OR REPLACE DYNAMIC TABLE my_table TARGET_LAG = '${my_time_variable}' WAREHOUSE = mywh AS SELECT * FROM my_table; CREATE OR REPLACE TABLE IF NOT EXISTS EXAMPLE_TABLE_WITH_RLS ( EXAMPLE VARCHAR ) WITH ROW ACCESS POLICY my_db.my_schema.rls_policy ON (EXAMPLE); CREATE OR REPLACE TABLE IF NOT EXISTS EXAMPLE_TABLE_WITH_RLS ( EXAMPLE VARCHAR ) WITH ROW ACCESS POLICY rls_policy ON (EXAMPLE); CREATE TABLE IDENTIFIER(:SOME_TABLE) (AMOUNT NUMBER); CREATE OR REPLACE TEMP TABLE mytable ( id INTEGER NOT NULL PRIMARY KEY ENABLE ENFORCED VALIDATE RELY ); CREATE OR REPLACE TABLE myschema.mytable ( id INTEGER NOT NULL , CONSTRAINT mytable_pk PRIMARY KEY (id) NOT ENFORCED NOVALIDATE NORELY ); CREATE TABLE some_schema.some_table ( is_condition_true BOOLEAN , some_text_value VARCHAR(100) , some_event_date_time_utc VARCHAR AS (TO_TIMESTAMP(SUBSTR(some_text_value, 5, 13))) , some_other_event_date_time_utc TIMESTAMP AS (IFF(is_condition_true AND TRY_TO_NUMBER(some_text_value) IS NOT NULL, TO_TIMESTAMP(SUBSTR(some_text_value, 5, 13)), '1900-01-01')) COMMENT 'The date and time of the other event' ); CREATE OR REPLACE TABLE some_table ( id INTEGER NOT NULL, CONSTRAINT MY_FK FOREIGN KEY (id) REFERENCES another_table(id) MATCH SIMPLE ON DELETE RESTRICT ); CREATE OR REPLACE TABLE some_table ( id INTEGER NOT NULL, CONSTRAINT MY_FK FOREIGN KEY (id) REFERENCES another_table MATCH FULL ON DELETE RESTRICT ); CREATE OR REPLACE TABLE some_table ( ID INTEGER NOT NULL CONSTRAINT MY_FK FOREIGN KEY REFERENCES another_table (id) MATCH PARTIAL ON DELETE RESTRICT ON UPDATE SET DEFAULT ); CREATE OR REPLACE TABLE some_table ( ID INTEGER NOT NULL, CONSTRAINT MY_FK FOREIGN KEY (ID) REFERENCES another_table (id) MATCH SIMPLE ON DELETE CASCADE ); CREATE OR ALTER TABLE some_table ( id INTEGER NOT NULL ); CREATE OR ALTER TABLE some_table ( id INTEGER NOT NULL ) DEFAULT_DDL_COLLATION = 'fr'; -- Iceberg tables CREATE ICEBERG TABLE db.archival.iceberg_report_invoicesummary ( _v string, partition_date date, clientid string, invoiceclientid string ) EXTERNAL_VOLUME='iceberg_ext_vol' CATALOG='SNOWFLAKE' BASE_LOCATION ='report_invoicesummary'; CREATE ICEBERG TABLE iceberg1 ( value string, partition_date date, clientid string, amount INTEGER ) CLUSTER BY amount EXTERNAL_VOLUME = '' CATALOG = 'SNOWFLAKE' BASE_LOCATION = '' COPY GRANTS AS SELECT * from example; CREATE ICEBERG TABLE iceberg2 LIKE example CLUSTER BY (amount) (amount number) COPY GRANTS; CREATE ICEBERG TABLE iceberg_glue EXTERNAL_VOLUME = '' CATALOG = '' CATALOG_TABLE_NAME = ' quoted_literal: "'@mystage'" - comma: ',' - snowflake_keyword_expression: parameter: file_format parameter_assigner: => quoted_literal: "'my_parquet_format'" - end_bracket: ) end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: dollar_sign_table - bracketed: start_bracket: ( column_definition: naked_identifier: foo$bar data_type: data_type_identifier: boolean end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: - naked_identifier: dollar_sign_schema - dot: . - naked_identifier: dollar_sign_table - bracketed: start_bracket: ( column_definition: naked_identifier: foo$bar data_type: data_type_identifier: boolean end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: timestamp_column_default_value_demo - bracketed: - start_bracket: ( - column_definition: naked_identifier: timestamp_col1 data_type: data_type_identifier: TIMESTAMP_TZ column_constraint_segment: - keyword: NOT - keyword: 'NULL' - keyword: DEFAULT - expression: bare_function: CURRENT_TIMESTAMP - comma: ',' - column_definition: naked_identifier: timestamp_col2 data_type: data_type_identifier: TIMESTAMP_TZ column_constraint_segment: keyword: DEFAULT expression: function: function_name: function_name_identifier: CURRENT_TIMESTAMP function_contents: bracketed: start_bracket: ( end_bracket: ) - comma: ',' - column_definition: naked_identifier: timestamp_col3 data_type: data_type_identifier: TIMESTAMP_TZ column_constraint_segment: keyword: DEFAULT expression: function: function_name: function_name_identifier: CURRENT_TIMESTAMP function_contents: bracketed: start_bracket: ( expression: numeric_literal: '2' end_bracket: ) - comma: ',' - column_definition: naked_identifier: sysdate_col4 data_type: data_type_identifier: TIMESTAMP_TZ column_constraint_segment: keyword: DEFAULT expression: function: function_name: function_name_identifier: SYSDATE function_contents: bracketed: start_bracket: ( end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: test_table - bracketed: start_bracket: ( column_definition: naked_identifier: test_column data_type: data_type_identifier: NUMBER column_constraint_segment: keyword: autoincrement bracketed: - start_bracket: ( - numeric_literal: '0' - comma: ',' - numeric_literal: '1' - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: - naked_identifier: test_schema - dot: . - naked_identifier: test_table - bracketed: start_bracket: ( column_definition: naked_identifier: test_column data_type: data_type_identifier: NUMBER column_constraint_segment: keyword: autoincrement bracketed: - start_bracket: ( - numeric_literal: '0' - comma: ',' - numeric_literal: '1' - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: or - keyword: replace - keyword: table - table_reference: - naked_identifier: test_schema - dot: . - naked_identifier: test_table - bracketed: start_bracket: ( column_definition: naked_identifier: test_column data_type: data_type_identifier: NUMBER column_constraint_segment: keyword: autoincrement bracketed: - start_bracket: ( - numeric_literal: '0' - comma: ',' - numeric_literal: '1' - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: - naked_identifier: test_schema - dot: . - naked_identifier: test_table - bracketed: start_bracket: ( column_definition: naked_identifier: test_column data_type: data_type_identifier: INTEGER column_constraint_segment: keyword: AUTOINCREMENT end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: test_table - bracketed: start_bracket: ( column_definition: naked_identifier: test_column data_type: data_type_identifier: NUMBER column_constraint_segment: - keyword: WITH - keyword: MASKING - keyword: POLICY - function_name: function_name_identifier: my_policy - keyword: USING - bracketed: start_bracket: ( column_reference: naked_identifier: test_column comma: ',' expression: column_reference: naked_identifier: test_column comparison_operator: raw_comparison_operator: '>' numeric_literal: '10' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - table_reference: - naked_identifier: SCHEMA1 - dot: . - naked_identifier: TABLE1 - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '"COL1"' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '128' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: quoted_identifier: '"COL2"' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '128' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - end_bracket: ) - keyword: CHANGE_TRACKING - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - dynamic_table_options: tag_bracketed_equals: - keyword: WITH - keyword: TAG - bracketed: start_bracket: ( tag_reference: - naked_identifier: account_objects - dot: . - naked_identifier: tags - dot: . - naked_identifier: IRM comparison_operator: raw_comparison_operator: '=' quoted_literal: "'{\"IRM\":[{\"Primary\":\"ABC123\"}]}'" end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: my_table - bracketed: start_bracket: ( column_definition: naked_identifier: foo data_type: data_type_identifier: TIMESTAMP_NTZ column_constraint_segment: keyword: DEFAULT expression: cast_expression: bare_function: CURRENT_TIMESTAMP casting_operator: '::' data_type: data_type_identifier: TIMESTAMP_NTZ end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: EXAMPLE_TABLE - bracketed: start_bracket: ( column_definition: naked_identifier: EXAMPLE data_type: data_type_identifier: VARCHAR column_constraint_segment: - keyword: AUTOINCREMENT - keyword: START - numeric_literal: '1' - keyword: INCREMENT - numeric_literal: '1' - keyword: ORDER end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: EXAMPLE_TABLE - bracketed: start_bracket: ( column_definition: naked_identifier: EXAMPLE data_type: data_type_identifier: VARCHAR column_constraint_segment: - keyword: AUTOINCREMENT - keyword: START - numeric_literal: '1' - keyword: INCREMENT - numeric_literal: '1' - keyword: NOORDER end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: DYNAMIC - keyword: TABLE - table_reference: naked_identifier: names - dynamic_table_options: - keyword: TARGET_LAG - comparison_operator: raw_comparison_operator: '=' - dynamic_table_lag_interval_segment: "'1 minute'" - keyword: WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: mywh - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: cast_expression: column_reference: naked_identifier: var semi_structured_expression: colon: ':' semi_structured_element: id casting_operator: '::' data_type: data_type_identifier: int alias_expression: naked_identifier: id - comma: ',' - select_clause_element: expression: cast_expression: column_reference: naked_identifier: var semi_structured_expression: colon: ':' semi_structured_element: fname casting_operator: '::' data_type: data_type_identifier: string alias_expression: naked_identifier: first_name - comma: ',' - select_clause_element: expression: cast_expression: column_reference: naked_identifier: var semi_structured_expression: colon: ':' semi_structured_element: lname casting_operator: '::' data_type: data_type_identifier: string alias_expression: naked_identifier: last_name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: raw - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: DYNAMIC - keyword: TABLE - table_reference: naked_identifier: names - dynamic_table_options: - keyword: TARGET_LAG - comparison_operator: raw_comparison_operator: '=' - dynamic_table_lag_interval_segment: "'1 minute'" - keyword: REFRESH_MODE - comparison_operator: raw_comparison_operator: '=' - keyword: AUTO - keyword: INITIALIZE - comparison_operator: raw_comparison_operator: '=' - initialize_type: ON_CREATE - keyword: WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: mywh - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: cast_expression: column_reference: naked_identifier: var semi_structured_expression: colon: ':' semi_structured_element: id casting_operator: '::' data_type: data_type_identifier: int alias_expression: naked_identifier: id - comma: ',' - select_clause_element: expression: cast_expression: column_reference: naked_identifier: var semi_structured_expression: colon: ':' semi_structured_element: fname casting_operator: '::' data_type: data_type_identifier: string alias_expression: naked_identifier: first_name - comma: ',' - select_clause_element: expression: cast_expression: column_reference: naked_identifier: var semi_structured_expression: colon: ':' semi_structured_element: lname casting_operator: '::' data_type: data_type_identifier: string alias_expression: naked_identifier: last_name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: raw - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: DYNAMIC - keyword: TABLE - table_reference: naked_identifier: DT_WITH_DOWNSTREAM_LAG - dynamic_table_options: - keyword: TARGET_LAG - comparison_operator: raw_comparison_operator: '=' - keyword: DOWNSTREAM - keyword: WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: mywh - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: DYNAMIC - keyword: TABLE - table_reference: naked_identifier: DT_WITH_DOWNSTREAM_QUOTED_LITERAL_LAG - dynamic_table_options: - keyword: TARGET_LAG - comparison_operator: raw_comparison_operator: '=' - dynamic_table_lag_interval_segment: "'DOWNSTREAM'" - keyword: WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: mywh - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: DYNAMIC - keyword: TABLE - table_reference: naked_identifier: DT_WITH_SINGLE_SECOND_LAG - dynamic_table_options: - keyword: TARGET_LAG - comparison_operator: raw_comparison_operator: '=' - dynamic_table_lag_interval_segment: "'1 second'" - keyword: WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: mywh - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: DYNAMIC - keyword: TABLE - table_reference: naked_identifier: DT_WITH_MULTIPLE_SECONDS_LAG - dynamic_table_options: - keyword: TARGET_LAG - comparison_operator: raw_comparison_operator: '=' - dynamic_table_lag_interval_segment: "'5 seconds'" - keyword: WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: mywh - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: DYNAMIC - keyword: TABLE - table_reference: naked_identifier: DT_WITH_SINGLE_MINUTE_LAG - dynamic_table_options: - keyword: TARGET_LAG - comparison_operator: raw_comparison_operator: '=' - dynamic_table_lag_interval_segment: "'1 minute'" - keyword: WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: mywh - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: DYNAMIC - keyword: TABLE - table_reference: naked_identifier: DT_WITH_MULTIPLE_MINUTES_LAG - dynamic_table_options: - keyword: TARGET_LAG - comparison_operator: raw_comparison_operator: '=' - dynamic_table_lag_interval_segment: "'5 minutes'" - keyword: WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: mywh - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: DYNAMIC - keyword: TABLE - table_reference: naked_identifier: DT_WITH_SINGLE_HOUR_LAG - dynamic_table_options: - keyword: TARGET_LAG - comparison_operator: raw_comparison_operator: '=' - dynamic_table_lag_interval_segment: "'1 hour'" - keyword: WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: mywh - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: DYNAMIC - keyword: TABLE - table_reference: naked_identifier: DT_WITH_MULTIPLE_HOURS_LAG - dynamic_table_options: - keyword: TARGET_LAG - comparison_operator: raw_comparison_operator: '=' - dynamic_table_lag_interval_segment: "'5 hours'" - keyword: WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: mywh - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: DYNAMIC - keyword: TABLE - table_reference: naked_identifier: DT_WITH_SINGLE_DAY_LAG - dynamic_table_options: - keyword: TARGET_LAG - comparison_operator: raw_comparison_operator: '=' - dynamic_table_lag_interval_segment: "'1 day'" - keyword: WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: mywh - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: DYNAMIC - keyword: TABLE - table_reference: naked_identifier: my_table - dynamic_table_options: - keyword: TARGET_LAG - comparison_operator: raw_comparison_operator: '=' - dynamic_table_lag_interval_segment: "'5 days'" - keyword: WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: mywh - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: DYNAMIC - keyword: TABLE - table_reference: naked_identifier: my_table - dynamic_table_options: - keyword: TARGET_LAG - comparison_operator: raw_comparison_operator: '=' - dynamic_table_lag_interval_segment: "'${my_time_variable}'" - keyword: WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: mywh - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: EXAMPLE_TABLE_WITH_RLS - bracketed: start_bracket: ( column_definition: naked_identifier: EXAMPLE data_type: data_type_identifier: VARCHAR end_bracket: ) - keyword: WITH - keyword: ROW - keyword: ACCESS - keyword: POLICY - object_reference: - naked_identifier: my_db - dot: . - naked_identifier: my_schema - dot: . - naked_identifier: rls_policy - keyword: 'ON' - bracketed: start_bracket: ( column_reference: naked_identifier: EXAMPLE end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: EXAMPLE_TABLE_WITH_RLS - bracketed: start_bracket: ( column_definition: naked_identifier: EXAMPLE data_type: data_type_identifier: VARCHAR end_bracket: ) - keyword: WITH - keyword: ROW - keyword: ACCESS - keyword: POLICY - object_reference: naked_identifier: rls_policy - keyword: 'ON' - bracketed: start_bracket: ( column_reference: naked_identifier: EXAMPLE end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: keyword: IDENTIFIER bracketed: start_bracket: ( bind_variable: colon: ':' variable: SOME_TABLE end_bracket: ) - bracketed: start_bracket: ( column_definition: naked_identifier: AMOUNT data_type: data_type_identifier: NUMBER end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TEMP - keyword: TABLE - table_reference: naked_identifier: mytable - bracketed: start_bracket: ( column_definition: naked_identifier: id data_type: data_type_identifier: INTEGER column_constraint_segment: - keyword: NOT - keyword: 'NULL' - constraint_properties_segment: - keyword: PRIMARY - keyword: KEY - keyword: ENABLE - keyword: ENFORCED - keyword: VALIDATE - keyword: RELY end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - table_reference: - naked_identifier: myschema - dot: . - naked_identifier: mytable - bracketed: start_bracket: ( column_definition: naked_identifier: id data_type: data_type_identifier: INTEGER column_constraint_segment: - keyword: NOT - keyword: 'NULL' comma: ',' constraint_properties_segment: - keyword: CONSTRAINT - naked_identifier: mytable_pk - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - keyword: NOT - keyword: ENFORCED - keyword: NOVALIDATE - keyword: NORELY end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: some_schema - dot: . - naked_identifier: some_table - bracketed: - start_bracket: ( - column_definition: naked_identifier: is_condition_true data_type: data_type_identifier: BOOLEAN - comma: ',' - column_definition: naked_identifier: some_text_value data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '100' end_bracket: ) - comma: ',' - naked_identifier: some_event_date_time_utc - data_type: data_type_identifier: VARCHAR - keyword: AS - bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: TO_TIMESTAMP function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: SUBSTR function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: some_text_value - comma: ',' - expression: numeric_literal: '5' - comma: ',' - expression: numeric_literal: '13' - end_bracket: ) end_bracket: ) end_bracket: ) - comma: ',' - naked_identifier: some_other_event_date_time_utc - data_type: keyword: TIMESTAMP - keyword: AS - bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: IFF function_contents: bracketed: - start_bracket: ( - expression: - column_reference: naked_identifier: is_condition_true - binary_operator: AND - function: function_name: function_name_identifier: TRY_TO_NUMBER function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: some_text_value end_bracket: ) - keyword: IS - keyword: NOT - null_literal: 'NULL' - comma: ',' - expression: function: function_name: function_name_identifier: TO_TIMESTAMP function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: SUBSTR function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: some_text_value - comma: ',' - expression: numeric_literal: '5' - comma: ',' - expression: numeric_literal: '13' - end_bracket: ) end_bracket: ) - comma: ',' - expression: quoted_literal: "'1900-01-01'" - end_bracket: ) end_bracket: ) - comment_clause: keyword: COMMENT quoted_literal: "'The date and time of the other event'" - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - table_reference: naked_identifier: some_table - bracketed: start_bracket: ( column_definition: naked_identifier: id data_type: data_type_identifier: INTEGER column_constraint_segment: - keyword: NOT - keyword: 'NULL' comma: ',' constraint_properties_segment: - keyword: CONSTRAINT - naked_identifier: MY_FK - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: another_table - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - keyword: MATCH - keyword: SIMPLE - keyword: 'ON' - keyword: DELETE - keyword: RESTRICT end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - table_reference: naked_identifier: some_table - bracketed: start_bracket: ( column_definition: naked_identifier: id data_type: data_type_identifier: INTEGER column_constraint_segment: - keyword: NOT - keyword: 'NULL' comma: ',' constraint_properties_segment: - keyword: CONSTRAINT - naked_identifier: MY_FK - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: another_table - keyword: MATCH - keyword: FULL - keyword: 'ON' - keyword: DELETE - keyword: RESTRICT end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - table_reference: naked_identifier: some_table - bracketed: start_bracket: ( column_definition: naked_identifier: ID data_type: data_type_identifier: INTEGER column_constraint_segment: - keyword: NOT - keyword: 'NULL' - constraint_properties_segment: - keyword: CONSTRAINT - naked_identifier: MY_FK - keyword: FOREIGN - keyword: KEY - keyword: REFERENCES - table_reference: naked_identifier: another_table - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - keyword: MATCH - keyword: PARTIAL - keyword: 'ON' - keyword: DELETE - keyword: RESTRICT - keyword: 'ON' - keyword: UPDATE - keyword: SET - keyword: DEFAULT end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - table_reference: naked_identifier: some_table - bracketed: start_bracket: ( column_definition: naked_identifier: ID data_type: data_type_identifier: INTEGER column_constraint_segment: - keyword: NOT - keyword: 'NULL' comma: ',' constraint_properties_segment: - keyword: CONSTRAINT - naked_identifier: MY_FK - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: ID end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: another_table - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - keyword: MATCH - keyword: SIMPLE - keyword: 'ON' - keyword: DELETE - keyword: CASCADE end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: some_table - bracketed: start_bracket: ( column_definition: naked_identifier: id data_type: data_type_identifier: INTEGER column_constraint_segment: - keyword: NOT - keyword: 'NULL' end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: some_table - bracketed: start_bracket: ( column_definition: naked_identifier: id data_type: data_type_identifier: INTEGER column_constraint_segment: - keyword: NOT - keyword: 'NULL' end_bracket: ) - keyword: DEFAULT_DDL_COLLATION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'fr'" - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: ICEBERG - keyword: TABLE - table_reference: - naked_identifier: db - dot: . - naked_identifier: archival - dot: . - naked_identifier: iceberg_report_invoicesummary - bracketed: - start_bracket: ( - column_definition: naked_identifier: _v data_type: data_type_identifier: string - comma: ',' - column_definition: naked_identifier: partition_date data_type: data_type_identifier: date - comma: ',' - column_definition: naked_identifier: clientid data_type: data_type_identifier: string - comma: ',' - column_definition: naked_identifier: invoiceclientid data_type: data_type_identifier: string - end_bracket: ) - iceberg_table_options: - keyword: EXTERNAL_VOLUME - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'iceberg_ext_vol'" - keyword: CATALOG - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'SNOWFLAKE'" - keyword: BASE_LOCATION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'report_invoicesummary'" - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: ICEBERG - keyword: TABLE - table_reference: naked_identifier: iceberg1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: value data_type: data_type_identifier: string - comma: ',' - column_definition: naked_identifier: partition_date data_type: data_type_identifier: date - comma: ',' - column_definition: naked_identifier: clientid data_type: data_type_identifier: string - comma: ',' - column_definition: naked_identifier: amount data_type: data_type_identifier: INTEGER - end_bracket: ) - dynamic_table_options: - keyword: CLUSTER - keyword: BY - expression: column_reference: naked_identifier: amount - keyword: EXTERNAL_VOLUME - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - keyword: CATALOG - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'SNOWFLAKE'" - keyword: BASE_LOCATION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - keyword: COPY - keyword: GRANTS - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: example - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: ICEBERG - keyword: TABLE - table_reference: naked_identifier: iceberg2 - keyword: LIKE - table_reference: naked_identifier: example - keyword: CLUSTER - keyword: BY - bracketed: start_bracket: ( expression: column_reference: naked_identifier: amount end_bracket: ) - bracketed: start_bracket: ( column_definition: naked_identifier: amount data_type: data_type_identifier: number end_bracket: ) - keyword: COPY - keyword: GRANTS - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: ICEBERG - keyword: TABLE - table_reference: naked_identifier: iceberg_glue - iceberg_table_options: - keyword: EXTERNAL_VOLUME - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - keyword: CATALOG - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - keyword: CATALOG_TABLE_NAME - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - keyword: REPLACE_INVALID_CHARACTERS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - keyword: AUTO_REFRESH - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "''" - dynamic_table_options: tag_bracketed_equals: - keyword: WITH - keyword: TAG - bracketed: - start_bracket: ( - tag_reference: naked_identifier: tag1 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'r'" - comma: ',' - tag_reference: naked_identifier: tag2 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'rr'" - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: ICEBERG - keyword: TABLE - table_reference: naked_identifier: iceberg_object_storage - iceberg_table_options: - keyword: EXTERNAL_VOLUME - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - keyword: CATALOG - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - keyword: METADATA_FILE_PATH - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - keyword: REPLACE_INVALID_CHARACTERS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'false' - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "''" - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: ICEBERG - keyword: TABLE - table_reference: naked_identifier: iceberg_object_delta - iceberg_table_options: keyword: BASE_LOCATION comparison_operator: raw_comparison_operator: '=' quoted_literal: "''" - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: ICEBERG - keyword: TABLE - table_reference: naked_identifier: iceberg_object_snowflake_open_catalog - iceberg_table_options: keyword: CATALOG_TABLE_NAME comparison_operator: raw_comparison_operator: '=' quoted_literal: "''" - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: HYBRID - keyword: TABLE - table_reference: naked_identifier: foo - bracketed: start_bracket: ( column_definition: naked_identifier: id data_type: data_type_identifier: NUMBER column_constraint_segment: constraint_properties_segment: - keyword: PRIMARY - keyword: KEY end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: HYBRID - keyword: TABLE - table_reference: naked_identifier: ref_hybrid_table - bracketed: - start_bracket: ( - column_definition: naked_identifier: col1 data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '32' end_bracket: ) column_constraint_segment: constraint_properties_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_definition: naked_identifier: col2 data_type: data_type_identifier: NUMBER bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '38' - comma: ',' - numeric_literal: '0' - end_bracket: ) column_constraint_segment: constraint_properties_segment: keyword: UNIQUE - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: HYBRID - keyword: TABLE - table_reference: naked_identifier: fk_hybrid_table - bracketed: - start_bracket: ( - column_definition: naked_identifier: col1 data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '32' end_bracket: ) column_constraint_segment: constraint_properties_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_definition: naked_identifier: col2 data_type: data_type_identifier: NUMBER bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '38' - comma: ',' - numeric_literal: '0' - end_bracket: ) - comma: ',' - column_definition: naked_identifier: col3 data_type: data_type_identifier: NUMBER bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '38' - comma: ',' - numeric_literal: '0' - end_bracket: ) - comma: ',' - constraint_properties_segment: - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: col2 end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: ref_hybrid_table - bracketed: start_bracket: ( column_reference: naked_identifier: col2 end_bracket: ) - comma: ',' - column_definition: naked_identifier: INDEX data_type: data_type_identifier: index_col3 bracketed: start_bracket: ( word: col3 end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: HYBRID - keyword: TABLE - table_reference: naked_identifier: target_hybrid_table - bracketed: - start_bracket: ( - column_definition: naked_identifier: col1 data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '32' end_bracket: ) column_constraint_segment: constraint_properties_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_definition: naked_identifier: col2 data_type: data_type_identifier: NUMBER bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '38' - comma: ',' - numeric_literal: '0' - end_bracket: ) column_constraint_segment: constraint_properties_segment: keyword: UNIQUE - comma: ',' - column_definition: naked_identifier: col3 data_type: data_type_identifier: NUMBER bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '38' - comma: ',' - numeric_literal: '0' - end_bracket: ) - comma: ',' - column_definition: naked_identifier: INDEX data_type: data_type_identifier: index_col3 bracketed: start_bracket: ( word: col3 end_bracket: ) - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 - comma: ',' - select_clause_element: column_reference: naked_identifier: col3 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source_table - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: HYBRID - keyword: TABLE - table_reference: naked_identifier: dept_employees - bracketed: - start_bracket: ( - column_definition: naked_identifier: employee_id data_type: data_type_identifier: INT column_constraint_segment: constraint_properties_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_definition: naked_identifier: department_id data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '200' end_bracket: ) - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: employee_id - comma: ',' - select_clause_element: column_reference: naked_identifier: department_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: company_employees - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: HYBRID - keyword: TABLE - table_reference: naked_identifier: application_log - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: NUMBER column_constraint_segment: constraint_properties_segment: - keyword: PRIMARY - keyword: KEY keyword: AUTOINCREMENT - comma: ',' - column_definition: naked_identifier: col1 data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '20' end_bracket: ) - comma: ',' - column_definition: naked_identifier: col2 data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '20' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_table_comments.sql000066400000000000000000000000651503426445100270200ustar00rootroot00000000000000 CREATE TABLE foo_table (bar INTEGER) COMMENT = '1'; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_table_comments.yml000066400000000000000000000016061503426445100270240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 43cd79085ff9e414a966056c2f320507db48ee0fd47927f2d4172297761fbb00 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: foo_table - bracketed: start_bracket: ( column_definition: naked_identifier: bar data_type: data_type_identifier: INTEGER end_bracket: ) - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'1'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_table_with_partition_as_column_name.sql000066400000000000000000000001311503426445100332710ustar00rootroot00000000000000CREATE TABLE foo ( timestamp_col TIMESTAMP, date_col DATE, partition INTEGER ); sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_table_with_partition_as_column_name.yml000066400000000000000000000020111503426445100332720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 817c71c35394d857f535d0605e556e64bc82e28c33f5edfe3262a640c54dfeca file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_definition: naked_identifier: timestamp_col data_type: keyword: TIMESTAMP - comma: ',' - column_definition: naked_identifier: date_col data_type: data_type_identifier: DATE - comma: ',' - column_definition: naked_identifier: partition data_type: data_type_identifier: INTEGER - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_tag.sql000066400000000000000000000003131503426445100245730ustar00rootroot00000000000000CREATE OR REPLACE TAG IF NOT EXISTS boo; CREATE TAG cost_center COMMENT = 'cost_center tag'; CREATE OR REPLACE TAG IF NOT EXISTS DATA_CLASSIFICATION ALLOWED_VALUES 'RESTRICTED', 'CONFIDENTIAL', 'PII'; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_tag.yml000066400000000000000000000026161503426445100246050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a3fa5ebed600d59a7becb2be812d93f6f8d82f18e3552ec9793be261669bcd7f file: - statement: create_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TAG - keyword: IF - keyword: NOT - keyword: EXISTS - object_reference: naked_identifier: boo - statement_terminator: ; - statement: create_statement: - keyword: CREATE - keyword: TAG - object_reference: naked_identifier: cost_center - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'cost_center tag'" - statement_terminator: ; - statement: create_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TAG - keyword: IF - keyword: NOT - keyword: EXISTS - object_reference: naked_identifier: DATA_CLASSIFICATION - keyword: ALLOWED_VALUES - quoted_literal: "'RESTRICTED'" - comma: ',' - quoted_literal: "'CONFIDENTIAL'" - comma: ',' - quoted_literal: "'PII'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_task.sql000066400000000000000000000053411503426445100247700ustar00rootroot00000000000000-- Examples from the documentation CREATE TASK t1 SCHEDULE = 'USING CRON 0 9-17 * * SUN America/Los_Angeles' TIMESTAMP_INPUT_FORMAT = 'YYYY-MM-DD HH24' USER_TASK_MANAGED_INITIAL_WAREHOUSE_SIZE = 'XSMALL' AS INSERT INTO mytable(ts) VALUES(1); CREATE TASK mytask_hour WAREHOUSE = mywh SCHEDULE = 'USING CRON 0 9-17 * * SUN America/Los_Angeles' TIMESTAMP_INPUT_FORMAT = 'YYYY-MM-DD HH24' AS INSERT INTO mytable(ts) VALUES(1, 2, 3); -- All possible optional clauses CREATE OR REPLACE TASK IF NOT EXISTS t1 SCHEDULE = 'USING CRON 0 9-17 * * SUN America/Los_Angeles' ALLOW_OVERLAPPING_EXECUTION = TRUE TIMESTAMP_INPUT_FORMAT = 'YYYY-MM-DD HH24' USER_TASK_TIMEOUT_MS = 25 USER_TASK_MANAGED_INITIAL_WAREHOUSE_SIZE = 'XSMALL' COPY GRANTS COMMENT = 'Hello world' AFTER dependency_task AS INSERT INTO mytable(ts) VALUES(1); -- Only mandatory clauses CREATE TASK t1 AS INSERT INTO mytable(ts) VALUES(1); -- Real life examples CREATE OR REPLACE TASK insert_session WAREHOUSE = eng_wh SCHEDULE = 'USING CRON 45 6 * * * UTC' AS INSERT INTO sch.s_session SELECT *, sum(break) OVER (PARTITION BY serial ORDER BY datetime) AS session_id FROM ( SELECT * FROM base_table ) ; CREATE OR REPLACE TASK update_session WAREHOUSE = eng_wh AFTER insert_session AS UPDATE sch.s_session SET lag_datetime = v.lag_datetime, row_number = v.row_number FROM ( SELECT *, ( sum(break) OVER (PARTITION BY serial ORDER BY datetime) ) AS session_id FROM ( SELECT * FROM derived_table ) ORDER BY serial, datetime ) AS v WHERE sch.s_session.event_id = v.event_id ; CREATE OR REPLACE TASK sch.truncate_session WAREHOUSE = eng_wh AFTER sch.update_session AS CALL sch.session_agg_insert(); CREATE OR REPLACE TASK insert__agg WAREHOUSE = eng_wh SCHEDULE = 'USING CRON 15 7 2 * * UTC' AS CALL auto_device_insert(); CREATE OR REPLACE TASK SCH.MY_TASK WAREHOUSE = MY_WH SCHEDULE = 'USING CRON 15 7 2 * * UTC' USER_TASK_TIMEOUT_MS = 10800000 WHEN SYSTEM$STREAM_HAS_DATA('SCH.MY_STREAM') AND 1=1 AS CALL SCH.MY_SPROC(); CREATE OR ALTER TASK mytask WAREHOUSE = mywh AS CALL SCH.MY_SPROC(); CREATE TASK task5 AFTER task2, task3, task4 AS INSERT INTO t1(ts) VALUES(CURRENT_TIMESTAMP); SET custom_warehouse = 'mywh'; SET custom_schedule = 'USING CRON 15 7 2 * * UTC'; CREATE OR ALTER TASK mytask WAREHOUSE = $custom_warehouse SCHEDULE = $custom_schedule LOG_LEVEL = TRACE AS CALL SCH.MY_SPROC(); sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_task.yml000066400000000000000000000502451503426445100247750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 48eeb996eeddab106dc4034aecfe49e68406319399b3e06f6a3f9bad378ab8e2 file: - statement: create_task_statement: - keyword: CREATE - keyword: TASK - object_reference: naked_identifier: t1 - keyword: SCHEDULE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'USING CRON 0 9-17 * * SUN America/Los_Angeles'" - parameter: TIMESTAMP_INPUT_FORMAT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'YYYY-MM-DD HH24'" - keyword: USER_TASK_MANAGED_INITIAL_WAREHOUSE_SIZE - comparison_operator: raw_comparison_operator: '=' - warehouse_size: "'XSMALL'" - keyword: AS - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: mytable - bracketed: start_bracket: ( column_reference: naked_identifier: ts end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: create_task_statement: - keyword: CREATE - keyword: TASK - object_reference: naked_identifier: mytask_hour - keyword: WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: mywh - keyword: SCHEDULE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'USING CRON 0 9-17 * * SUN America/Los_Angeles'" - parameter: TIMESTAMP_INPUT_FORMAT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'YYYY-MM-DD HH24'" - keyword: AS - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: mytable - bracketed: start_bracket: ( column_reference: naked_identifier: ts end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - statement_terminator: ; - statement: create_task_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TASK - keyword: IF - keyword: NOT - keyword: EXISTS - object_reference: naked_identifier: t1 - keyword: SCHEDULE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'USING CRON 0 9-17 * * SUN America/Los_Angeles'" - keyword: ALLOW_OVERLAPPING_EXECUTION - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - parameter: TIMESTAMP_INPUT_FORMAT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'YYYY-MM-DD HH24'" - keyword: USER_TASK_TIMEOUT_MS - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '25' - keyword: USER_TASK_MANAGED_INITIAL_WAREHOUSE_SIZE - comparison_operator: raw_comparison_operator: '=' - warehouse_size: "'XSMALL'" - keyword: COPY - keyword: GRANTS - parameter: COMMENT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'Hello world'" - keyword: AFTER - object_reference: naked_identifier: dependency_task - keyword: AS - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: mytable - bracketed: start_bracket: ( column_reference: naked_identifier: ts end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: create_task_statement: - keyword: CREATE - keyword: TASK - object_reference: naked_identifier: t1 - keyword: AS - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: mytable - bracketed: start_bracket: ( column_reference: naked_identifier: ts end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: create_task_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TASK - object_reference: naked_identifier: insert_session - keyword: WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: eng_wh - keyword: SCHEDULE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'USING CRON 45 6 * * * UTC'" - keyword: AS - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: sch - dot: . - naked_identifier: s_session - select_statement: select_clause: - keyword: SELECT - select_clause_element: wildcard_expression: wildcard_identifier: star: '*' - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: break end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: serial orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: datetime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: session_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: base_table end_bracket: ) - statement_terminator: ; - statement: create_task_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TASK - object_reference: naked_identifier: update_session - keyword: WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: eng_wh - keyword: AFTER - object_reference: naked_identifier: insert_session - keyword: AS - statement: update_statement: keyword: UPDATE table_reference: - naked_identifier: sch - dot: . - naked_identifier: s_session set_clause_list: - keyword: SET - set_clause: - column_reference: naked_identifier: lag_datetime - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: v - dot: . - naked_identifier: lag_datetime - comma: ',' - set_clause: - column_reference: naked_identifier: row_number - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: v - dot: . - naked_identifier: row_number from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: wildcard_expression: wildcard_identifier: star: '*' - comma: ',' - select_clause_element: expression: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: break end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: serial orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: datetime end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: session_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: derived_table end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: serial - comma: ',' - column_reference: naked_identifier: datetime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: v where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: sch - dot: . - naked_identifier: s_session - dot: . - naked_identifier: event_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: v - dot: . - naked_identifier: event_id - statement_terminator: ; - statement: create_task_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TASK - object_reference: - naked_identifier: sch - dot: . - naked_identifier: truncate_session - keyword: WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: eng_wh - keyword: AFTER - object_reference: - naked_identifier: sch - dot: . - naked_identifier: update_session - keyword: AS - statement: call_segment: keyword: CALL function: function_name: naked_identifier: sch dot: . function_name_identifier: session_agg_insert function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_task_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TASK - object_reference: naked_identifier: insert__agg - keyword: WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: eng_wh - keyword: SCHEDULE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'USING CRON 15 7 2 * * UTC'" - keyword: AS - statement: call_segment: keyword: CALL function: function_name: function_name_identifier: auto_device_insert function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_task_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TASK - object_reference: - naked_identifier: SCH - dot: . - naked_identifier: MY_TASK - keyword: WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: MY_WH - keyword: SCHEDULE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'USING CRON 15 7 2 * * UTC'" - keyword: USER_TASK_TIMEOUT_MS - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '10800000' - keyword: WHEN - snowflake_task_expression_segment: system_function_name: SYSTEM$STREAM_HAS_DATA bracketed: start_bracket: ( quoted_literal: "'SCH.MY_STREAM'" end_bracket: ) binary_operator: AND expression: - numeric_literal: '1' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - keyword: AS - statement: call_segment: keyword: CALL function: function_name: naked_identifier: SCH dot: . function_name_identifier: MY_SPROC function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_task_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: TASK - object_reference: naked_identifier: mytask - keyword: WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: mywh - keyword: AS - statement: call_segment: keyword: CALL function: function_name: naked_identifier: SCH dot: . function_name_identifier: MY_SPROC function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_task_statement: - keyword: CREATE - keyword: TASK - object_reference: naked_identifier: task5 - keyword: AFTER - object_reference: naked_identifier: task2 - comma: ',' - object_reference: naked_identifier: task3 - comma: ',' - object_reference: naked_identifier: task4 - keyword: AS - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: start_bracket: ( column_reference: naked_identifier: ts end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: bare_function: CURRENT_TIMESTAMP end_bracket: ) - statement_terminator: ; - statement: set_statement: keyword: SET variable: custom_warehouse comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'mywh'" - statement_terminator: ; - statement: set_statement: keyword: SET variable: custom_schedule comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'USING CRON 15 7 2 * * UTC'" - statement_terminator: ; - statement: create_task_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: TASK - object_reference: naked_identifier: mytask - keyword: WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: variable: $custom_warehouse - keyword: SCHEDULE - comparison_operator: raw_comparison_operator: '=' - variable: $custom_schedule - log_level_equals: - keyword: LOG_LEVEL - comparison_operator: raw_comparison_operator: '=' - keyword: TRACE - keyword: AS - statement: call_segment: keyword: CALL function: function_name: naked_identifier: SCH dot: . function_name_identifier: MY_SPROC function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_user.sql000066400000000000000000000027541503426445100250110ustar00rootroot00000000000000create user user1 type = person password='abc123' default_role = myrole display_name = user1 login_name = my_login_name first_name = User1 middle_name = abc last_name = Test1 default_warehouse = my_default_warehouse default_namespace = my_default_namespace default_secondary_roles = ('ALL') must_change_password = true; create user user2 type = 'service' password='abc123' default_role = 'myrole' display_name = 'user 2' login_name = 'test login name' first_name = 'User' middle_name = 'abc' last_name = 'test2' default_warehouse = 'my_default_warehouse' default_namespace = 'my_default_namespace' must_change_password = false; create user user3 type = person rsa_public_key = '' default_role = myrole display_name = user1 login_name = my_login_name first_name = User1 middle_name = abc last_name = Test1 default_warehouse = my_default_warehouse default_namespace = my_default_namespace default_secondary_roles = ('ALL'); create user user4 type = person rsa_public_key = '' rsa_public_key_2 = '' default_role = myrole display_name = user1 login_name = my_login_name first_name = User1 middle_name = abc last_name = Test1 default_warehouse = my_default_warehouse default_namespace = my_default_namespace default_secondary_roles = ('ALL'); sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_user.yml000066400000000000000000000172541503426445100250140ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fee89722cc066c1136e03b31ce63b342e43400340a0f67d72b67fdfdf8cd6ca0 file: - statement: create_user_statement: - keyword: create - keyword: user - object_reference: naked_identifier: user1 - keyword: type - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: person - keyword: password - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'abc123'" - keyword: default_role - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: myrole - keyword: display_name - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: user1 - keyword: login_name - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: my_login_name - keyword: first_name - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: User1 - keyword: middle_name - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: abc - keyword: last_name - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: Test1 - keyword: default_warehouse - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: my_default_warehouse - keyword: default_namespace - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: my_default_namespace - keyword: default_secondary_roles - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_literal: "'ALL'" end_bracket: ) - keyword: must_change_password - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - statement_terminator: ; - statement: create_user_statement: - keyword: create - keyword: user - object_reference: naked_identifier: user2 - keyword: type - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'service'" - keyword: password - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'abc123'" - keyword: default_role - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'myrole'" - keyword: display_name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'user 2'" - keyword: login_name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test login name'" - keyword: first_name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'User'" - keyword: middle_name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'abc'" - keyword: last_name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test2'" - keyword: default_warehouse - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'my_default_warehouse'" - keyword: default_namespace - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'my_default_namespace'" - keyword: must_change_password - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'false' - statement_terminator: ; - statement: create_user_statement: - keyword: create - keyword: user - object_reference: naked_identifier: user3 - keyword: type - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: person - keyword: rsa_public_key - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - keyword: default_role - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: myrole - keyword: display_name - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: user1 - keyword: login_name - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: my_login_name - keyword: first_name - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: User1 - keyword: middle_name - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: abc - keyword: last_name - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: Test1 - keyword: default_warehouse - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: my_default_warehouse - keyword: default_namespace - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: my_default_namespace - keyword: default_secondary_roles - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_literal: "'ALL'" end_bracket: ) - statement_terminator: ; - statement: create_user_statement: - keyword: create - keyword: user - object_reference: naked_identifier: user4 - keyword: type - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: person - keyword: rsa_public_key - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - keyword: rsa_public_key_2 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - keyword: default_role - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: myrole - keyword: display_name - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: user1 - keyword: login_name - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: my_login_name - keyword: first_name - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: User1 - keyword: middle_name - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: abc - keyword: last_name - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: Test1 - keyword: default_warehouse - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: my_default_warehouse - keyword: default_namespace - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: my_default_namespace - keyword: default_secondary_roles - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_literal: "'ALL'" end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_view.sql000066400000000000000000000046521503426445100250040ustar00rootroot00000000000000 create view another_view comment = 'a great description' as select col_1, col_2 from other_table; CREATE VIEW basic_view AS SELECT col1, col2 FROM src_table; CREATE TEMPORARY VIEW view_with_comments COMMENT = 'my comment' AS SELECT col1, col2 FROM src_table; CREATE OR REPLACE VIEW view_with_replace_and_comment COMMENT = 'my comment' AS SELECT col1, col2 FROM src_table; CREATE OR REPLACE SECURE RECURSIVE VIEW IF NOT EXISTS secure_recursive_view_with_comment COMMENT = 'my comment' AS SELECT col1, col2 FROM src_table; CREATE OR REPLACE VIEW view_with_comment_and_copy_grants COMMENT = 'my comment' COPY GRANTS AS SELECT col1, col2 FROM src_table; CREATE OR REPLACE VIEW view_with_tags_and_copy_grants WITH TAG (foo = 'bar', hello = 'world') COPY GRANTS AS SELECT col1, col2 FROM src_table; CREATE OR REPLACE VIEW view_with_tags_and_copy_grants WITH TAG (foo = "bar", hello = "world") COPY GRANTS AS SELECT col1, col2 FROM src_table; CREATE OR REPLACE VIEW view_with_column_comment ( col1, col2 COMMENT 'some comment' ) AS SELECT col1, col2 FROM src_table; CREATE OR REPLACE SECURE RECURSIVE VIEW IF NOT EXISTS view_with_all_implemented_features COMMENT = 'table-level comment' ( col1, col2 COMMENT 'some comment' ) AS WITH cte AS (SELECT col1 FROM table_1) SELECT col1, col2 FROM table_2 INNER JOIN my_cte ON table_1.pk = table_2.pk; CREATE OR REPLACE VIEW vw_appt_latest AS ( WITH most_current as ( SELECT da.* FROM dim_appt da WHERE da.current_appt_id IS NULL ) SELECT * from most_current ); CREATE OR REPLACE VIEW IF NOT EXISTS view_with_rls ( COL1, COL2 ) WITH ROW ACCESS POLICY my_db.my_schema.my_policy ON (COL1) AS ( SELECT COL1, COL2 FROM my_table ); CREATE OR REPLACE VIEW IF NOT EXISTS view_with_rls ( COL1 WITH MASKING POLICY my_db.my_schema.my_policy, COL2 ) WITH ROW ACCESS POLICY my_db.my_schema.my_policy ON (COL1) AS ( SELECT COL1, COL2 FROM my_table ); CREATE OR REPLACE MATERIALIZED VIEW IF NOT EXISTS view_with_rls ( COL1, COL2 ) WITH ROW ACCESS POLICY my_db.my_schema.my_policy ON (COL1) AS ( SELECT COL1, COL2 FROM my_table ); CREATE OR REPLACE VIEW IF NOT EXISTS view_with_column_tags ( COL1 WITH TAG (my_db.my_schema.my_policy='MY_TAG'), COL2 ) AS ( SELECT COL1, COL2 FROM my_table ); CREATE OR ALTER VIEW view_with_column_comment ( col1 ) AS SELECT col1 FROM src_table; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_view.yml000066400000000000000000000515461503426445100250120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 420fdbbf980c6fed8ce72e3b3284bb4c189e165b9f3c7c2a4807711ed9d6c8ff file: - statement: create_view_statement: - keyword: create - keyword: view - table_reference: naked_identifier: another_view - comment_equals_clause: keyword: comment comparison_operator: raw_comparison_operator: '=' quoted_literal: "'a great description'" - keyword: as - select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: col_1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col_2 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: other_table - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: basic_view - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: src_table - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: VIEW - table_reference: naked_identifier: view_with_comments - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'my comment'" - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: src_table - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: VIEW - table_reference: naked_identifier: view_with_replace_and_comment - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'my comment'" - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: src_table - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: SECURE - keyword: RECURSIVE - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: secure_recursive_view_with_comment - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'my comment'" - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: src_table - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: VIEW - table_reference: naked_identifier: view_with_comment_and_copy_grants - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'my comment'" - keyword: COPY - keyword: GRANTS - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: src_table - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: VIEW - table_reference: naked_identifier: view_with_tags_and_copy_grants - tag_bracketed_equals: - keyword: WITH - keyword: TAG - bracketed: - start_bracket: ( - tag_reference: naked_identifier: foo - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'bar'" - comma: ',' - tag_reference: naked_identifier: hello - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'world'" - end_bracket: ) - keyword: COPY - keyword: GRANTS - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: src_table - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: VIEW - table_reference: naked_identifier: view_with_tags_and_copy_grants - tag_bracketed_equals: - keyword: WITH - keyword: TAG - bracketed: - start_bracket: ( - tag_reference: naked_identifier: foo - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"bar"' - comma: ',' - tag_reference: naked_identifier: hello - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"world"' - end_bracket: ) - keyword: COPY - keyword: GRANTS - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: src_table - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: VIEW - table_reference: naked_identifier: view_with_column_comment - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - comment_clause: keyword: COMMENT quoted_literal: "'some comment'" - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: src_table - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: SECURE - keyword: RECURSIVE - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: view_with_all_implemented_features - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'table-level comment'" - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - comment_clause: keyword: COMMENT quoted_literal: "'some comment'" - end_bracket: ) - keyword: AS - with_compound_statement: keyword: WITH common_table_expression: naked_identifier: cte keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_1 end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_2 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: my_cte - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: table_1 - dot: . - naked_identifier: pk - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: table_2 - dot: . - naked_identifier: pk - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: VIEW - table_reference: naked_identifier: vw_appt_latest - keyword: AS - bracketed: start_bracket: ( with_compound_statement: keyword: WITH common_table_expression: naked_identifier: most_current keyword: as bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: da dot: . star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dim_appt alias_expression: naked_identifier: da where_clause: keyword: WHERE expression: column_reference: - naked_identifier: da - dot: . - naked_identifier: current_appt_id keyword: IS null_literal: 'NULL' end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: most_current end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: view_with_rls - bracketed: - start_bracket: ( - column_reference: naked_identifier: COL1 - comma: ',' - column_reference: naked_identifier: COL2 - end_bracket: ) - keyword: WITH - keyword: ROW - keyword: ACCESS - keyword: POLICY - object_reference: - naked_identifier: my_db - dot: . - naked_identifier: my_schema - dot: . - naked_identifier: my_policy - keyword: 'ON' - bracketed: start_bracket: ( column_reference: naked_identifier: COL1 end_bracket: ) - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: COL1 - comma: ',' - select_clause_element: column_reference: naked_identifier: COL2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: view_with_rls - bracketed: - start_bracket: ( - column_reference: naked_identifier: COL1 - keyword: WITH - keyword: MASKING - keyword: POLICY - function_name: - naked_identifier: my_db - dot: . - naked_identifier: my_schema - dot: . - function_name_identifier: my_policy - comma: ',' - column_reference: naked_identifier: COL2 - end_bracket: ) - keyword: WITH - keyword: ROW - keyword: ACCESS - keyword: POLICY - object_reference: - naked_identifier: my_db - dot: . - naked_identifier: my_schema - dot: . - naked_identifier: my_policy - keyword: 'ON' - bracketed: start_bracket: ( column_reference: naked_identifier: COL1 end_bracket: ) - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: COL1 - comma: ',' - select_clause_element: column_reference: naked_identifier: COL2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: view_with_rls - bracketed: - start_bracket: ( - column_reference: naked_identifier: COL1 - comma: ',' - column_reference: naked_identifier: COL2 - end_bracket: ) - keyword: WITH - keyword: ROW - keyword: ACCESS - keyword: POLICY - object_reference: - naked_identifier: my_db - dot: . - naked_identifier: my_schema - dot: . - naked_identifier: my_policy - keyword: 'ON' - bracketed: start_bracket: ( column_reference: naked_identifier: COL1 end_bracket: ) - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: COL1 - comma: ',' - select_clause_element: column_reference: naked_identifier: COL2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: view_with_column_tags - bracketed: - start_bracket: ( - column_reference: naked_identifier: COL1 - tag_bracketed_equals: - keyword: WITH - keyword: TAG - bracketed: start_bracket: ( tag_reference: - naked_identifier: my_db - dot: . - naked_identifier: my_schema - dot: . - naked_identifier: my_policy comparison_operator: raw_comparison_operator: '=' quoted_literal: "'MY_TAG'" end_bracket: ) - comma: ',' - column_reference: naked_identifier: COL2 - end_bracket: ) - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: COL1 - comma: ',' - select_clause_element: column_reference: naked_identifier: COL2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: view_with_column_comment - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: src_table - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_warehouse.sql000066400000000000000000000007731503426445100260340ustar00rootroot00000000000000create or replace warehouse my_wh with warehouse_size='X-LARGE'; create or replace warehouse my_wh warehouse_size=large initially_suspended=true; create warehouse if not exists LOAD_WH warehouse_size='medium'; create warehouse if not exists LOAD_WH warehouse_size='medium' warehouse_type = standard; create warehouse my_wh WAREHOUSE_TYPE = 'SNOWPARK-OPTIMIZED' warehouse_size = 'medium' SCALING_POLICY = ECONOMY comment = 'comment' auto_suspend = 60 ; CREATE OR ALTER WAREHOUSE my_wh; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/create_warehouse.yml000066400000000000000000000064351503426445100260370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: de761543eef0be5d871c4b1a1ac070e8c2048b350c9577099cd604ecc0607484 file: - statement: create_statement: - keyword: create - keyword: or - keyword: replace - keyword: warehouse - object_reference: naked_identifier: my_wh - keyword: with - warehouse_object_properties: keyword: warehouse_size comparison_operator: raw_comparison_operator: '=' warehouse_size: "'X-LARGE'" - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: or - keyword: replace - keyword: warehouse - object_reference: naked_identifier: my_wh - warehouse_object_properties: - keyword: warehouse_size - comparison_operator: raw_comparison_operator: '=' - warehouse_size: large - keyword: initially_suspended - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: warehouse - keyword: if - keyword: not - keyword: exists - object_reference: naked_identifier: LOAD_WH - warehouse_object_properties: keyword: warehouse_size comparison_operator: raw_comparison_operator: '=' warehouse_size: "'medium'" - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: warehouse - keyword: if - keyword: not - keyword: exists - object_reference: naked_identifier: LOAD_WH - warehouse_object_properties: - keyword: warehouse_size - comparison_operator: raw_comparison_operator: '=' - warehouse_size: "'medium'" - keyword: warehouse_type - comparison_operator: raw_comparison_operator: '=' - warehouse_size: standard - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: warehouse - object_reference: naked_identifier: my_wh - warehouse_object_properties: - keyword: WAREHOUSE_TYPE - comparison_operator: raw_comparison_operator: '=' - warehouse_size: "'SNOWPARK-OPTIMIZED'" - keyword: warehouse_size - comparison_operator: raw_comparison_operator: '=' - warehouse_size: "'medium'" - keyword: SCALING_POLICY - comparison_operator: raw_comparison_operator: '=' - scaling_policy: ECONOMY - comment_equals_clause: keyword: comment comparison_operator: raw_comparison_operator: '=' quoted_literal: "'comment'" - warehouse_object_properties: keyword: auto_suspend comparison_operator: raw_comparison_operator: '=' numeric_literal: '60' - statement_terminator: ; - statement: create_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: WAREHOUSE - object_reference: naked_identifier: my_wh - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/current_user.sql000066400000000000000000000004741503426445100252250ustar00rootroot00000000000000SELECT CURRENT_ACCOUNT() as CURRENT_ACCOUNT, CURRENT_ROLE() as CURRENT_ROLE, CURRENT_REGION() AS CURRENT_REGION, CURRENT_SESSION() as CURRENT_SESSION, CURRENT_USER() as CURRENT_USER, CURRENT_ACCOUNT_NAME() as CURRENT_ACCOUNT_NAME, CURRENT_ORGANIZATION_NAME() as CURRENT_ORGANIZATION_NAME sqlfluff-3.4.2/test/fixtures/dialects/snowflake/current_user.yml000066400000000000000000000063161503426445100252300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6aa559f187a52da725339ec1077552d6c17cefab48a833c422f24c3da2bd40ad file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: CURRENT_ACCOUNT function_contents: bracketed: start_bracket: ( end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: CURRENT_ACCOUNT - comma: ',' - select_clause_element: function: function_name: function_name_identifier: CURRENT_ROLE function_contents: bracketed: start_bracket: ( end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: CURRENT_ROLE - comma: ',' - select_clause_element: function: function_name: function_name_identifier: CURRENT_REGION function_contents: bracketed: start_bracket: ( end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: CURRENT_REGION - comma: ',' - select_clause_element: function: function_name: function_name_identifier: CURRENT_SESSION function_contents: bracketed: start_bracket: ( end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: CURRENT_SESSION - comma: ',' - select_clause_element: function: function_name: function_name_identifier: CURRENT_USER function_contents: bracketed: start_bracket: ( end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: CURRENT_USER - comma: ',' - select_clause_element: function: function_name: function_name_identifier: CURRENT_ACCOUNT_NAME function_contents: bracketed: start_bracket: ( end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: CURRENT_ACCOUNT_NAME - comma: ',' - select_clause_element: function: function_name: function_name_identifier: CURRENT_ORGANIZATION_NAME function_contents: bracketed: start_bracket: ( end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: CURRENT_ORGANIZATION_NAME sqlfluff-3.4.2/test/fixtures/dialects/snowflake/datatypes.sql000066400000000000000000000007031503426445100244760ustar00rootroot00000000000000select blob:content::array(integer) as field1, cast(blob:content as array(integer)) as field2, from foo ; CREATE TABLE IF NOT EXISTS table_name ( col1 ARRAY(NUMBER), col2 ARRAY(NUMBER NOT NULL) ); CREATE OR REPLACE FUNCTION my_udtf(check BOOLEAN) RETURNS TABLE(col1 ARRAY(VARCHAR)) AS $$ ... $$; CREATE OR REPLACE PROCEDURE my_procedure(values ARRAY(INTEGER)) RETURNS ARRAY(INTEGER) LANGUAGE SQL AS $$ ... $$; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/datatypes.yml000066400000000000000000000124531503426445100245050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f7b87c2951f9693f9956e2fd7536b9d525c37fc21fda68f3b6a0d33cffd43651 file: - statement: select_statement: select_clause: - keyword: select - select_clause_element: expression: cast_expression: column_reference: naked_identifier: blob semi_structured_expression: colon: ':' semi_structured_element: content casting_operator: '::' data_type: array_type: keyword: array array_type_schema: bracketed: start_bracket: ( data_type: data_type_identifier: integer end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: field1 - comma: ',' - select_clause_element: function: function_name: function_name_identifier: cast function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: blob semi_structured_expression: colon: ':' semi_structured_element: content keyword: as data_type: array_type: keyword: array array_type_schema: bracketed: start_bracket: ( data_type: data_type_identifier: integer end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: field2 - comma: ',' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: table_name - bracketed: - start_bracket: ( - column_definition: naked_identifier: col1 data_type: array_type: keyword: ARRAY array_type_schema: bracketed: start_bracket: ( data_type: data_type_identifier: NUMBER end_bracket: ) - comma: ',' - column_definition: naked_identifier: col2 data_type: array_type: keyword: ARRAY array_type_schema: bracketed: - start_bracket: ( - data_type: data_type_identifier: NUMBER - keyword: NOT - keyword: 'NULL' - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name: function_name_identifier: my_udtf - function_parameter_list: bracketed: start_bracket: ( parameter: check data_type: data_type_identifier: BOOLEAN end_bracket: ) - keyword: RETURNS - keyword: TABLE - bracketed: start_bracket: ( column_definition: naked_identifier: col1 data_type: array_type: keyword: ARRAY array_type_schema: bracketed: start_bracket: ( data_type: data_type_identifier: VARCHAR end_bracket: ) end_bracket: ) - keyword: AS - udf_body: "$$\n ...\n $$" - statement_terminator: ; - statement: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PROCEDURE - function_name: function_name_identifier: my_procedure - function_parameter_list: bracketed: start_bracket: ( parameter: values data_type: array_type: keyword: ARRAY array_type_schema: bracketed: start_bracket: ( data_type: data_type_identifier: INTEGER end_bracket: ) end_bracket: ) - keyword: RETURNS - data_type: array_type: keyword: ARRAY array_type_schema: bracketed: start_bracket: ( data_type: data_type_identifier: INTEGER end_bracket: ) - keyword: LANGUAGE - keyword: SQL - keyword: AS - udf_body: "$$\n ...\n $$" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/datetime_intervals.sql000066400000000000000000000016631503426445100263710ustar00rootroot00000000000000SELECT DATEADD(NANOSECONDS, -1, '2020-01-01'); -- https://docs.snowflake.com/en/sql-reference/data-types-datetime.html#interval-examples select to_date('2018-04-15') + INTERVAL '1 year'; select to_time('04:15:29') + INTERVAL '3 hours, 18 minutes'; select current_timestamp + INTERVAL '1 year, 3 quarters, 4 months, 5 weeks, 6 days, 7 minutes, 8 seconds, 1000 milliseconds, 4000000 microseconds, 5000000001 nanoseconds' as complex_interval1; select to_date('2025-01-17') + INTERVAL '1 y, 3 q, 4 mm, 5 w, 6 d, 7 h, 9 m, 8 s, 1000 ms, 445343232 us, 898498273498 ns' as complex_interval2; select name, hire_date from employees where hire_date > current_date - INTERVAL '2 y, 3 month'; select ts + INTERVAL '4 seconds' from t1 where ts > to_timestamp('2014-04-05 01:02:03'); sqlfluff-3.4.2/test/fixtures/dialects/snowflake/datetime_intervals.yml000066400000000000000000000127721503426445100263760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1fa58ef9c50cdbab9e9e56f7f6f7fcb836b406f740b23508f33f36b81f918060 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: DATEADD function_contents: bracketed: - start_bracket: ( - date_part: NANOSECONDS - comma: ',' - expression: numeric_literal: sign_indicator: '-' numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'2020-01-01'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: function: function_name: function_name_identifier: to_date function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'2018-04-15'" end_bracket: ) binary_operator: + keyword: INTERVAL date_constructor_literal: "'1 year'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: function: function_name: function_name_identifier: to_time function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'04:15:29'" end_bracket: ) binary_operator: + keyword: INTERVAL date_constructor_literal: "'3 hours, 18 minutes'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: bare_function: current_timestamp binary_operator: + keyword: INTERVAL date_constructor_literal: "'1 year, 3 quarters, 4 months, 5 weeks, 6 days,\ \ 7 minutes, 8 seconds,\n 1000 milliseconds,\ \ 4000000 microseconds, 5000000001 nanoseconds'" alias_expression: alias_operator: keyword: as naked_identifier: complex_interval1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: function: function_name: function_name_identifier: to_date function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'2025-01-17'" end_bracket: ) binary_operator: + keyword: INTERVAL date_constructor_literal: "'1 y, 3 q, 4 mm, 5 w, 6 d, 7 h, 9 m, 8 s,\n\ \ 1000 ms, 445343232 us, 898498273498\ \ ns'" alias_expression: alias_operator: keyword: as naked_identifier: complex_interval2 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: hire_date from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: where expression: column_reference: naked_identifier: hire_date comparison_operator: raw_comparison_operator: '>' bare_function: current_date binary_operator: '-' keyword: INTERVAL date_constructor_literal: "'2 y, 3 month'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: column_reference: naked_identifier: ts binary_operator: + keyword: INTERVAL date_constructor_literal: "'4 seconds'" from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 where_clause: keyword: where expression: column_reference: naked_identifier: ts comparison_operator: raw_comparison_operator: '>' function: function_name: function_name_identifier: to_timestamp function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'2014-04-05 01:02:03'" end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/datetime_units.sql000066400000000000000000000152511503426445100255220ustar00rootroot00000000000000SELECT t1.field, EXTRACT(year FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(y FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(yy FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(yyy FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(yyyy FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(yr FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(years FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(yrs FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(month FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(mm FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(mon FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(mons FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(months FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(day FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(d FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(dd FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(days FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(dayofmonth FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(dayofweek FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(weekday FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(dow FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(dw FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(dayofweekiso FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(weekday_iso FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(dow_iso FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(dw_iso FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(dayofyear FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(yearday FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(doy FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(dy FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(week FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(w FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(wk FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(weekofyear FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(woy FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(wy FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(weekiso FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(week_iso FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(weekofyeariso FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(weekofyear_iso FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(quarter FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(q FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(qtr FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(qtrs FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(quarters FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(yearofweek FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(yearofweekiso FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(hour FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(h FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(hh FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(hr FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(hours FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(hrs FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(minute FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(m FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(mi FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(min FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(minutes FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(mins FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(second FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(s FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(sec FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(seconds FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(secs FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(millisecond FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(ms FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(msec FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(milliseconds FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(microsecond FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(us FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(usec FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(microseconds FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(nanosecond FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(ns FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(nsec FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(nanosec FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(nsecond FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(nanoseconds FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(nanosecs FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(nseconds FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(epoch_second FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(epoch FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(epoch_seconds FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(epoch_millisecond FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(epoch_milliseconds FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(epoch_microsecond FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(epoch_microseconds FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(epoch_nanosecond FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(epoch_nanoseconds FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(timezone_hour FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(tzh FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(timezone_minute FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(tzm FROM t1.sometime) AS a FROM t1; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/datetime_units.yml000066400000000000000000003037311503426445100255270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 545f4b69acaf20ee4d8490388de032d0f1b0d2b808895f84d639a32a53dc462b file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: year keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: y keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: yy keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: yyy keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: yyyy keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: yr keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: years keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: yrs keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: month keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: mm keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: mon keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: mons keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: months keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: day keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: d keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: dd keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: days keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: dayofmonth keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: dayofweek keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: weekday keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: dow keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: dw keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: dayofweekiso keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: weekday_iso keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: dow_iso keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: dw_iso keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: dayofyear keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: yearday keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: doy keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: dy keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: week keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: w keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: wk keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: weekofyear keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: woy keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: wy keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: weekiso keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: week_iso keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: weekofyeariso keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: weekofyear_iso keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: quarter keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: q keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: qtr keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: qtrs keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: quarters keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: yearofweek keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: yearofweekiso keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: hour keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: h keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: hh keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: hr keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: hours keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: hrs keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: minute keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: m keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: mi keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: min keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: minutes keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: mins keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: second keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: s keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: sec keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: seconds keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: secs keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: millisecond keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: ms keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: msec keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: milliseconds keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: microsecond keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: us keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: usec keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: microseconds keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: nanosecond keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: ns keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: nsec keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: nanosec keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: nsecond keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: nanoseconds keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: nanosecs keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: nseconds keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: epoch_second keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: epoch keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: epoch_seconds keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: epoch_millisecond keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: epoch_milliseconds keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: epoch_microsecond keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: epoch_microseconds keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: epoch_nanosecond keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: epoch_nanoseconds keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: timezone_hour keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: tzh keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: timezone_minute keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT function_contents: bracketed: start_bracket: ( date_part: tzm keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/declare.sql000066400000000000000000000011351503426445100240770ustar00rootroot00000000000000DECLARE profit number(38, 2) DEFAULT 0.0; revenue number(38, 2) DEFAULT 110.0; c1 CURSOR FOR SELECT price FROM invoices; myexception EXCEPTION (-20000, 'my first exception'); BEGIN profit := 1.0; END; DECLARE res RESULTSET DEFAULT (SELECT price FROM invoices); c1 CURSOR FOR res; BEGIN RETURN c1; END; DECLARE res RESULTSET; DECLARE res RESULTSET DEFAULT; DECLARE res RESULTSET DEFAULT ASYNC (SELECT a FROM t001 ORDER BY a); DECLARE res RESULTSET := (SELECT a FROM t001 ORDER BY a); DECLARE res RESULTSET := ASYNC (SELECT a FROM t001 ORDER BY a); sqlfluff-3.4.2/test/fixtures/dialects/snowflake/declare.yml000066400000000000000000000154771503426445100241170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8f9de1f34769f07e2598fcce4c02bf5944c214d8eded2fe052998c91dd814029 file: - statement: scripting_declare_statement: - keyword: DECLARE - variable: profit - data_type: data_type_identifier: number bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '38' - comma: ',' - numeric_literal: '2' - end_bracket: ) - keyword: DEFAULT - expression: numeric_literal: '0.0' - statement_terminator: ; - variable: revenue - data_type: data_type_identifier: number bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '38' - comma: ',' - numeric_literal: '2' - end_bracket: ) - keyword: DEFAULT - expression: numeric_literal: '110.0' - statement_terminator: ; - variable: c1 - keyword: CURSOR - keyword: FOR - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: price from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: invoices - statement_terminator: ; - variable: myexception - keyword: EXCEPTION - bracketed: start_bracket: ( sign_indicator: '-' exception_code: '20000' comma: ',' quoted_literal: "'my first exception'" end_bracket: ) - statement_terminator: ; - statement: scripting_block_statement: - keyword: BEGIN - statement: scripting_let_statement: variable: profit assignment_operator: := expression: numeric_literal: '1.0' - statement_terminator: ; - keyword: END - statement_terminator: ; - statement: scripting_declare_statement: - keyword: DECLARE - variable: res - data_type: data_type_identifier: RESULTSET - keyword: DEFAULT - expression: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: price from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: invoices end_bracket: ) - statement_terminator: ; - variable: c1 - keyword: CURSOR - keyword: FOR - variable: res - statement_terminator: ; - statement: scripting_block_statement: - keyword: BEGIN - statement: return_statement: keyword: RETURN expression: column_reference: naked_identifier: c1 - statement_terminator: ; - keyword: END - statement_terminator: ; - statement: scripting_declare_statement: - keyword: DECLARE - variable: res - keyword: RESULTSET - statement_terminator: ; - statement: scripting_declare_statement: - keyword: DECLARE - variable: res - keyword: RESULTSET - keyword: DEFAULT - statement_terminator: ; - statement: scripting_declare_statement: - keyword: DECLARE - variable: res - data_type: data_type_identifier: RESULTSET - keyword: DEFAULT - expression: function: function_name: function_name_identifier: ASYNC function_contents: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t001 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: a end_bracket: ) - statement_terminator: ; - statement: scripting_declare_statement: keyword: DECLARE variable: res data_type: data_type_identifier: RESULTSET assignment_operator: := expression: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t001 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: a end_bracket: ) - statement_terminator: ; - statement: scripting_declare_statement: keyword: DECLARE variable: res data_type: data_type_identifier: RESULTSET assignment_operator: := expression: function: function_name: function_name_identifier: ASYNC function_contents: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t001 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: a end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/delete.sql000066400000000000000000000025171503426445100237470ustar00rootroot00000000000000delete from leased_bicycles; delete from leased_bicycles as lb; delete from x using y, z; delete from x where 1 = 2; delete from leased_bicycles using returned_bicycles where leased_bicycles.bicycle_id = returned_bicycles.bicycle_id; delete from leased_bicycles as lb using returned_bicycles as rb where lb.bicycle_id = rb.bicycle_id; delete from leased_bicycles lb using returned_bicycles rb where lb.bicycle_id = rb.bicycle_id; delete from leased_bicycles using returned_bicycles, broken_bicycles where leased_bicycles.bicycle_id = returned_bicycles.bicycle_id and leased_bicycles.bicycle_id = broken_bicycles.bicycle_id; delete from leased_bicycles as lb using returned_bicycles as rb, broken_bicycles as bb where lb.bicycle_id = rb.bicycle_id and lb.bicycle_id = bb.bicycle_id; delete from leased_bicycles lb using returned_bicycles rb, broken_bicycles bb where lb.bicycle_id = rb.bicycle_id and lb.bicycle_id = bb.bicycle_id; delete from leased_bicycles using (select bicycle_id as bicycle_id from returned_bicycles) as returned where leased_bicycles.bicycle_id = returned.bicycle_id; delete from leased_bicycles using (select bicycle_id as bicycle_id from returned_bicycles where 1=2) as returned where leased_bicycles.bicycle_id = returned.bicycle_id; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/delete.yml000066400000000000000000000247441503426445100237570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3f81a445f049a9cad06b1a48a71c9048582c097f966752ec51f575c7b13c3e6a file: - statement: delete_statement: - keyword: delete - keyword: from - table_reference: naked_identifier: leased_bicycles - statement_terminator: ; - statement: delete_statement: - keyword: delete - keyword: from - table_reference: naked_identifier: leased_bicycles - alias_expression: alias_operator: keyword: as naked_identifier: lb - statement_terminator: ; - statement: delete_statement: - keyword: delete - keyword: from - table_reference: naked_identifier: x - keyword: using - table_expression: table_reference: naked_identifier: y - comma: ',' - table_expression: table_reference: naked_identifier: z - statement_terminator: ; - statement: delete_statement: - keyword: delete - keyword: from - table_reference: naked_identifier: x - where_clause: keyword: where expression: - numeric_literal: '1' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '2' - statement_terminator: ; - statement: delete_statement: - keyword: delete - keyword: from - table_reference: naked_identifier: leased_bicycles - keyword: using - table_expression: table_reference: naked_identifier: returned_bicycles - where_clause: keyword: where expression: - column_reference: - naked_identifier: leased_bicycles - dot: . - naked_identifier: bicycle_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: returned_bicycles - dot: . - naked_identifier: bicycle_id - statement_terminator: ; - statement: delete_statement: - keyword: delete - keyword: from - table_reference: naked_identifier: leased_bicycles - alias_expression: alias_operator: keyword: as naked_identifier: lb - keyword: using - table_expression: table_reference: naked_identifier: returned_bicycles - alias_expression: alias_operator: keyword: as naked_identifier: rb - where_clause: keyword: where expression: - column_reference: - naked_identifier: lb - dot: . - naked_identifier: bicycle_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: rb - dot: . - naked_identifier: bicycle_id - statement_terminator: ; - statement: delete_statement: - keyword: delete - keyword: from - table_reference: naked_identifier: leased_bicycles - alias_expression: naked_identifier: lb - keyword: using - table_expression: table_reference: naked_identifier: returned_bicycles - alias_expression: naked_identifier: rb - where_clause: keyword: where expression: - column_reference: - naked_identifier: lb - dot: . - naked_identifier: bicycle_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: rb - dot: . - naked_identifier: bicycle_id - statement_terminator: ; - statement: delete_statement: - keyword: delete - keyword: from - table_reference: naked_identifier: leased_bicycles - keyword: using - table_expression: table_reference: naked_identifier: returned_bicycles - comma: ',' - table_expression: table_reference: naked_identifier: broken_bicycles - where_clause: keyword: where expression: - column_reference: - naked_identifier: leased_bicycles - dot: . - naked_identifier: bicycle_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: returned_bicycles - dot: . - naked_identifier: bicycle_id - binary_operator: and - column_reference: - naked_identifier: leased_bicycles - dot: . - naked_identifier: bicycle_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: broken_bicycles - dot: . - naked_identifier: bicycle_id - statement_terminator: ; - statement: delete_statement: - keyword: delete - keyword: from - table_reference: naked_identifier: leased_bicycles - alias_expression: alias_operator: keyword: as naked_identifier: lb - keyword: using - table_expression: table_reference: naked_identifier: returned_bicycles - alias_expression: alias_operator: keyword: as naked_identifier: rb - comma: ',' - table_expression: table_reference: naked_identifier: broken_bicycles - alias_expression: alias_operator: keyword: as naked_identifier: bb - where_clause: keyword: where expression: - column_reference: - naked_identifier: lb - dot: . - naked_identifier: bicycle_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: rb - dot: . - naked_identifier: bicycle_id - binary_operator: and - column_reference: - naked_identifier: lb - dot: . - naked_identifier: bicycle_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: bb - dot: . - naked_identifier: bicycle_id - statement_terminator: ; - statement: delete_statement: - keyword: delete - keyword: from - table_reference: naked_identifier: leased_bicycles - alias_expression: naked_identifier: lb - keyword: using - table_expression: table_reference: naked_identifier: returned_bicycles - alias_expression: naked_identifier: rb - comma: ',' - table_expression: table_reference: naked_identifier: broken_bicycles - alias_expression: naked_identifier: bb - where_clause: keyword: where expression: - column_reference: - naked_identifier: lb - dot: . - naked_identifier: bicycle_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: rb - dot: . - naked_identifier: bicycle_id - binary_operator: and - column_reference: - naked_identifier: lb - dot: . - naked_identifier: bicycle_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: bb - dot: . - naked_identifier: bicycle_id - statement_terminator: ; - statement: delete_statement: - keyword: delete - keyword: from - table_reference: naked_identifier: leased_bicycles - keyword: using - table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: bicycle_id alias_expression: alias_operator: keyword: as naked_identifier: bicycle_id from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: returned_bicycles end_bracket: ) - alias_expression: alias_operator: keyword: as naked_identifier: returned - where_clause: keyword: where expression: - column_reference: - naked_identifier: leased_bicycles - dot: . - naked_identifier: bicycle_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: returned - dot: . - naked_identifier: bicycle_id - statement_terminator: ; - statement: delete_statement: - keyword: delete - keyword: from - table_reference: naked_identifier: leased_bicycles - keyword: using - table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: bicycle_id alias_expression: alias_operator: keyword: as naked_identifier: bicycle_id from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: returned_bicycles where_clause: keyword: where expression: - numeric_literal: '1' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '2' end_bracket: ) - alias_expression: alias_operator: keyword: as naked_identifier: returned - where_clause: keyword: where expression: - column_reference: - naked_identifier: leased_bicycles - dot: . - naked_identifier: bicycle_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: returned - dot: . - naked_identifier: bicycle_id - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/describe_statements.sql000066400000000000000000000055631503426445100265400ustar00rootroot00000000000000DESCRIBE RESULT 'f2f07bdb-6a08-4689-9ad8-a1ba968a44b6'; DESC RESULT 'f2f07bdb-6a08-4689-9ad8-a1ba968a44b6'; DESCRIBE RESULT LAST_QUERY_ID(); DESC RESULT LAST_QUERY_ID(); DESCRIBE NETWORK POLICY my_policy; DESC NETWORK POLICY my_policy; DESCRIBE SHARE sales_s; DESC SHARE sales_s; DESCRIBE SHARE ab67890.sales_s; DESC SHARE ab67890.sales_s; DESCRIBE USER test_user; DESC USER test_user; DESCRIBE WAREHOUSE my_warehouse; DESC WAREHOUSE my_warehouse; DESCRIBE WAREHOUSE "my warehouse"; DESC WAREHOUSE "my warehouse"; DESCRIBE DATABASE my_database; DESC DATABASE my_database; DESCRIBE API INTEGRATION my_integration; DESC API INTEGRATION my_integration; DESCRIBE NOTIFICATION INTEGRATION my_integration; DESC NOTIFICATION INTEGRATION my_integration; DESCRIBE SECURITY INTEGRATION my_integration; DESC SECURITY INTEGRATION my_integration; DESCRIBE STORAGE INTEGRATION my_integration; DESC STORAGE INTEGRATION my_integration; DESCRIBE INTEGRATION my_integration; DESC INTEGRATION my_integration; DESCRIBE SESSION POLICY my_session_policy; DESC SESSION POLICY my_session_policy; DESCRIBE SCHEMA my_schema; DESC SCHEMA my_schema; DESCRIBE SCHEMA my_database.my_schema; DESC SCHEMA my_database.my_schema; DESCRIBE TABLE my_table; DESC TABLE my_table; DESCRIBE TABLE my_database.my_schema.my_table; DESC TABLE my_database.my_schema.my_table; DESCRIBE TABLE my_table TYPE = COLUMNS; DESC TABLE my_table TYPE = COLUMNS; DESCRIBE TABLE my_table TYPE = STAGE; DESC TABLE my_table TYPE = STAGE; DESCRIBE EXTERNAL TABLE my_table; DESC EXTERNAL TABLE my_table; DESCRIBE EXTERNAL TABLE my_table TYPE = COLUMNS; DESC EXTERNAL TABLE my_table TYPE = COLUMNS; DESCRIBE EXTERNAL TABLE my_table TYPE = STAGE; DESC EXTERNAL TABLE my_table TYPE = STAGE; DESCRIBE VIEW my_view; DESC VIEW my_view; DESCRIBE VIEW my_database.my_schema.my_view; DESC VIEW my_database.my_schema.my_view; DESCRIBE MATERIALIZED VIEW my_view; DESC MATERIALIZED VIEW my_view; DESCRIBE MATERIALIZED VIEW my_database.my_schema.my_view; DESC MATERIALIZED VIEW my_database.my_schema.my_view; DESCRIBE SEQUENCE my_sequence; DESC SEQUENCE my_sequence; DESCRIBE MASKING POLICY my_masking_policy; DESC MASKING POLICY my_masking_policy; DESCRIBE ROW ACCESS POLICY my_row_access_policy; DESC ROW ACCESS POLICY my_row_access_policy; DESCRIBE FILE FORMAT my_file_format; DESC FILE FORMAT my_file_format; DESCRIBE STAGE my_stage; DESC STAGE my_stage; DESCRIBE PIPE my_pipe; DESC PIPE my_pipe; DESCRIBE STREAM my_stream; DESC STREAM my_stream; DESCRIBE TASK my_task; DESC TASK my_task; DESCRIBE FUNCTION multiply(NUMBER, NUMBER); DESC FUNCTION multiply(NUMBER, NUMBER); DESCRIBE PROCEDURE my_pi(); DESC PROCEDURE my_pi(); DESCRIBE PROCEDURE area_of_circle(FLOAT); DESC PROCEDURE area_of_circle(FLOAT); DESCRIBE EXTERNAL VOLUME my_volume; DESC PASSWORD POLICY password_policy_prod_1; DESCRIBE CORTEX SEARCH SERVICE mysvc; DESC CORTEX SEARCH SERVICE mysvc; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/describe_statements.yml000066400000000000000000000423331503426445100265360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2d90ff90e34181f6d5c7eda872c6ad20f6e397214b53e9f53306eb38f82f5e7b file: - statement: describe_statement: - keyword: DESCRIBE - keyword: RESULT - quoted_literal: "'f2f07bdb-6a08-4689-9ad8-a1ba968a44b6'" - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: RESULT - quoted_literal: "'f2f07bdb-6a08-4689-9ad8-a1ba968a44b6'" - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: RESULT - keyword: LAST_QUERY_ID - bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: RESULT - keyword: LAST_QUERY_ID - bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: NETWORK - keyword: POLICY - object_reference: naked_identifier: my_policy - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: NETWORK - keyword: POLICY - object_reference: naked_identifier: my_policy - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: SHARE - object_reference: naked_identifier: sales_s - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: SHARE - object_reference: naked_identifier: sales_s - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: SHARE - object_reference: - naked_identifier: ab67890 - dot: . - naked_identifier: sales_s - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: SHARE - object_reference: - naked_identifier: ab67890 - dot: . - naked_identifier: sales_s - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: USER - object_reference: naked_identifier: test_user - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: USER - object_reference: naked_identifier: test_user - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: WAREHOUSE - object_reference: naked_identifier: my_warehouse - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: WAREHOUSE - object_reference: naked_identifier: my_warehouse - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: WAREHOUSE - object_reference: quoted_identifier: '"my warehouse"' - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: WAREHOUSE - object_reference: quoted_identifier: '"my warehouse"' - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: DATABASE - database_reference: naked_identifier: my_database - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: DATABASE - database_reference: naked_identifier: my_database - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: API - keyword: INTEGRATION - object_reference: naked_identifier: my_integration - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: API - keyword: INTEGRATION - object_reference: naked_identifier: my_integration - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: NOTIFICATION - keyword: INTEGRATION - object_reference: naked_identifier: my_integration - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: NOTIFICATION - keyword: INTEGRATION - object_reference: naked_identifier: my_integration - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: SECURITY - keyword: INTEGRATION - object_reference: naked_identifier: my_integration - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: SECURITY - keyword: INTEGRATION - object_reference: naked_identifier: my_integration - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: STORAGE - keyword: INTEGRATION - object_reference: naked_identifier: my_integration - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: STORAGE - keyword: INTEGRATION - object_reference: naked_identifier: my_integration - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: INTEGRATION - object_reference: naked_identifier: my_integration - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: INTEGRATION - object_reference: naked_identifier: my_integration - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: SESSION - keyword: POLICY - object_reference: naked_identifier: my_session_policy - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: SESSION - keyword: POLICY - object_reference: naked_identifier: my_session_policy - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: SCHEMA - schema_reference: naked_identifier: my_schema - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: SCHEMA - schema_reference: naked_identifier: my_schema - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: SCHEMA - schema_reference: - naked_identifier: my_database - dot: . - naked_identifier: my_schema - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: SCHEMA - schema_reference: - naked_identifier: my_database - dot: . - naked_identifier: my_schema - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: TABLE - table_reference: naked_identifier: my_table - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: TABLE - table_reference: naked_identifier: my_table - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: TABLE - table_reference: - naked_identifier: my_database - dot: . - naked_identifier: my_schema - dot: . - naked_identifier: my_table - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: TABLE - table_reference: - naked_identifier: my_database - dot: . - naked_identifier: my_schema - dot: . - naked_identifier: my_table - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: TABLE - table_reference: naked_identifier: my_table - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: COLUMNS - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: TABLE - table_reference: naked_identifier: my_table - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: COLUMNS - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: TABLE - table_reference: naked_identifier: my_table - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: STAGE - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: TABLE - table_reference: naked_identifier: my_table - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: STAGE - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: EXTERNAL - keyword: TABLE - table_reference: naked_identifier: my_table - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: EXTERNAL - keyword: TABLE - table_reference: naked_identifier: my_table - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: EXTERNAL - keyword: TABLE - table_reference: naked_identifier: my_table - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: COLUMNS - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: EXTERNAL - keyword: TABLE - table_reference: naked_identifier: my_table - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: COLUMNS - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: EXTERNAL - keyword: TABLE - table_reference: naked_identifier: my_table - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: STAGE - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: EXTERNAL - keyword: TABLE - table_reference: naked_identifier: my_table - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: STAGE - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: VIEW - table_reference: naked_identifier: my_view - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: VIEW - table_reference: naked_identifier: my_view - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: VIEW - table_reference: - naked_identifier: my_database - dot: . - naked_identifier: my_schema - dot: . - naked_identifier: my_view - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: VIEW - table_reference: - naked_identifier: my_database - dot: . - naked_identifier: my_schema - dot: . - naked_identifier: my_view - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: my_view - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: my_view - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: MATERIALIZED - keyword: VIEW - table_reference: - naked_identifier: my_database - dot: . - naked_identifier: my_schema - dot: . - naked_identifier: my_view - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: MATERIALIZED - keyword: VIEW - table_reference: - naked_identifier: my_database - dot: . - naked_identifier: my_schema - dot: . - naked_identifier: my_view - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: SEQUENCE - sequence_reference: naked_identifier: my_sequence - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: SEQUENCE - sequence_reference: naked_identifier: my_sequence - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: MASKING - keyword: POLICY - object_reference: naked_identifier: my_masking_policy - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: MASKING - keyword: POLICY - object_reference: naked_identifier: my_masking_policy - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: ROW - keyword: ACCESS - keyword: POLICY - object_reference: naked_identifier: my_row_access_policy - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: ROW - keyword: ACCESS - keyword: POLICY - object_reference: naked_identifier: my_row_access_policy - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: FILE - keyword: FORMAT - object_reference: naked_identifier: my_file_format - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: FILE - keyword: FORMAT - object_reference: naked_identifier: my_file_format - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: STAGE - object_reference: naked_identifier: my_stage - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: STAGE - object_reference: naked_identifier: my_stage - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: PIPE - object_reference: naked_identifier: my_pipe - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: PIPE - object_reference: naked_identifier: my_pipe - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: STREAM - object_reference: naked_identifier: my_stream - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: STREAM - object_reference: naked_identifier: my_stream - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: TASK - object_reference: naked_identifier: my_task - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: TASK - object_reference: naked_identifier: my_task - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: FUNCTION - function_name: function_name_identifier: multiply - bracketed: - start_bracket: ( - data_type: data_type_identifier: NUMBER - comma: ',' - data_type: data_type_identifier: NUMBER - end_bracket: ) - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: FUNCTION - function_name: function_name_identifier: multiply - bracketed: - start_bracket: ( - data_type: data_type_identifier: NUMBER - comma: ',' - data_type: data_type_identifier: NUMBER - end_bracket: ) - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: PROCEDURE - function_name: function_name_identifier: my_pi - bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: PROCEDURE - function_name: function_name_identifier: my_pi - bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: PROCEDURE - function_name: function_name_identifier: area_of_circle - bracketed: start_bracket: ( data_type: data_type_identifier: FLOAT end_bracket: ) - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: PROCEDURE - function_name: function_name_identifier: area_of_circle - bracketed: start_bracket: ( data_type: data_type_identifier: FLOAT end_bracket: ) - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: EXTERNAL - keyword: VOLUME - external_volume_reference: naked_identifier: my_volume - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: PASSWORD - keyword: POLICY - password_policy_reference: naked_identifier: password_policy_prod_1 - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: CORTEX - keyword: SEARCH - keyword: SERVICE - object_reference: naked_identifier: mysvc - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: CORTEX - keyword: SEARCH - keyword: SERVICE - object_reference: naked_identifier: mysvc - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/double_quote_escapes.sql000066400000000000000000000002571503426445100266760ustar00rootroot00000000000000select 1 as foo, 2 as "foo", 3 as """foo""", 4 as """""foo""""", bar, "bar", """bar""", """""bar""""" from """""a"""""."""""b"""""."""""c""""" sqlfluff-3.4.2/test/fixtures/dialects/snowflake/double_quote_escapes.yml000066400000000000000000000040641503426445100267000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 73ef8ab95d643074317bc5120d0805192913a9905afc31b2db4e49a72da8a9d0 file: statement: select_statement: select_clause: - keyword: select - select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: as naked_identifier: foo - comma: ',' - select_clause_element: numeric_literal: '2' alias_expression: alias_operator: keyword: as quoted_identifier: '"foo"' - comma: ',' - select_clause_element: numeric_literal: '3' alias_expression: alias_operator: keyword: as quoted_identifier: '"""foo"""' - comma: ',' - select_clause_element: numeric_literal: '4' alias_expression: alias_operator: keyword: as quoted_identifier: '"""""foo"""""' - comma: ',' - select_clause_element: column_reference: naked_identifier: bar - comma: ',' - select_clause_element: column_reference: quoted_identifier: '"bar"' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '"""bar"""' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '"""""bar"""""' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - quoted_identifier: '"""""a"""""' - dot: . - quoted_identifier: '"""""b"""""' - dot: . - quoted_identifier: '"""""c"""""' sqlfluff-3.4.2/test/fixtures/dialects/snowflake/drop_dynamic_table.sql000066400000000000000000000001051503426445100263130ustar00rootroot00000000000000DROP DYNAMIC TABLE my_table; DROP DYNAMIC TABLE IF EXISTS my_table; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/drop_dynamic_table.yml000066400000000000000000000014641503426445100263260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 768773afafbf00637796a35f27f6e969b650b9b1c040332f15cab4dc776facb4 file: - statement: drop_dynamic_table_segment: - keyword: DROP - keyword: DYNAMIC - keyword: TABLE - table_reference: naked_identifier: my_table - statement_terminator: ; - statement: drop_dynamic_table_segment: - keyword: DROP - keyword: DYNAMIC - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: my_table - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/drop_iceberg_table.sql000066400000000000000000000001051503426445100262670ustar00rootroot00000000000000DROP ICEBERG TABLE my_table; DROP ICEBERG TABLE IF EXISTS my_table; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/drop_iceberg_table.yml000066400000000000000000000014701503426445100262770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9f5643694cd0e3139eb9f6daec5452d1916d4a5b947aa93fcf5520299640ae22 file: - statement: drop_iceberg_table_statement: - keyword: DROP - keyword: ICEBERG - keyword: TABLE - table_reference: naked_identifier: my_table - statement_terminator: ; - statement: drop_iceberg_table_statement: - keyword: DROP - keyword: ICEBERG - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: my_table - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/drop_password_policy.sql000066400000000000000000000000631503426445100267440ustar00rootroot00000000000000DROP PASSWORD POLICY password_policy_production_1; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/drop_password_policy.yml000066400000000000000000000011571503426445100267530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e194ef99ff40734a8b1ec6f81d95194358bc9a657ad1f2fa422c05797e4cedf8 file: statement: drop_password_policy_statement: - keyword: DROP - keyword: PASSWORD - keyword: POLICY - password_policy_reference: naked_identifier: password_policy_production_1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/drop_resource_monitor.sql000066400000000000000000000000471503426445100271230ustar00rootroot00000000000000DROP RESOURCE MONITOR IF EXISTS my_rm; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/drop_resource_monitor.yml000066400000000000000000000011711503426445100271240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0cca54937edbc89731dc4b40d75f884adfbf0c51d98ce4b88adb3ae15448d150 file: statement: drop_resource_monitor_statement: - keyword: DROP - keyword: RESOURCE - keyword: MONITOR - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: my_rm statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/drop_statements.sql000066400000000000000000000043731503426445100257220ustar00rootroot00000000000000DROP CONNECTION MY_SCHEMA.T1; DROP CONNECTION IF EXISTS MY_SCHEMA.T1; DROP DATABASE MYTESTDB2; DROP DATABASE IF EXISTS MYTESTDB2; DROP DATABASE IF EXISTS MYTESTDB2 CASCADE; DROP EXTERNAL TABLE MY_SCHEMA.T1; DROP EXTERNAL TABLE IF EXISTS MY_SCHEMA.T1; DROP EXTERNAL TABLE IF EXISTS MY_SCHEMA.T1 RESTRICT; DROP FILE FORMAT MY_SCHEMA.MY_FORMAT; DROP FILE FORMAT IF EXISTS MY_SCHEMA.MY_FORMAT; DROP FUNCTION MY_SCHEMA.MY_FUNCTION(NUMBER, NUMBER); DROP FUNCTION IF EXISTS MY_SCHEMA.MY_FUNCTION(NUMBER, NUMBER); DROP INTEGRATION T2; DROP API INTEGRATION IF EXISTS T2; DROP MANAGED ACCOUNT READER_ACCT1; DROP MASKING POLICY SSN_MASK; DROP MATERIALIZED VIEW MY_SCHEMA.MV1; DROP MATERIALIZED VIEW IF EXISTS MY_SCHEMA.MV1; DROP NETWORK POLICY MY_POLICY; DROP NETWORK POLICY IF EXISTS MY_POLICY; DROP PIPE MY_SCHEMA.MYPIPE; DROP PIPE IF EXISTS MY_SCHEMA.MYPIPE; DROP PROCEDURE MY_SCHEMA.ADD_ACCOUNTING_USER(VARCHAR); DROP PROCEDURE IF EXISTS MY_SCHEMA.ADD_ACCOUNTING_USER(VARCHAR); DROP RESOURCE MONITOR MY_MONITOR_RESOURCE; DROP ROLE MYROLE; DROP ROLE IF EXISTS MYROLE; DROP ROW ACCESS POLICY RAP_TABLE_EMPLOYEE_INFO; DROP ROW ACCESS POLICY IF EXISTS RAP_TABLE_EMPLOYEE_INFO; DROP SCHEMA MY_SCHEMA; DROP SCHEMA IF EXISTS MY_SCHEMA; DROP SCHEMA IF EXISTS MY_SCHEMA CASCADE; DROP SEQUENCE MY_SCHEMA.INVOICE_SEQUENCE_NUMBER; DROP SEQUENCE IF EXISTS MY_SCHEMA.INVOICE_SEQUENCE_NUMBER; DROP SEQUENCE IF EXISTS MY_SCHEMA.INVOICE_SEQUENCE_NUMBER CASCADE; DROP SESSION POLICY SESSION_POLICY_PRODUCTION_1; DROP SESSION POLICY IF EXISTS SESSION_POLICY_PRODUCTION_1; DROP SHARE SALES_S; DROP STAGE MY_SCHEMA.MY_STAGE; DROP STAGE IF EXISTS MY_SCHEMA.MY_STAGE; DROP STREAM MY_SCHEMA.T2; DROP STREAM IF EXISTS MY_SCHEMA.T2; DROP TABLE MY_SCHEMA.T2; DROP TABLE IF EXISTS MY_SCHEMA.T2; DROP TABLE IF EXISTS MY_SCHEMA.T2 CASCADE; DROP TAG COST_CENTER; DROP TAG IF EXISTS COST_CENTER; DROP TASK MY_SCHEAM.T2; DROP TASK IF EXISTS MY_SCHEAM.T2; DROP USER USER1; DROP USER IF EXISTS USER1; DROP VIEW MY_SCHEMA.MY_VIEW; DROP VIEW IF EXISTS MY_SCHEMA.MY_VIEW; DROP WAREHOUSE MY_WAREHOUSE; DROP WAREHOUSE IF EXISTS MY_WAREHOUSE; DROP EXTERNAL VOLUME my_volume; DROP EXTERNAL VOLUME IF EXISTS foo; DROP CORTEX SEARCH SERVICE mysvc; DROP CORTEX SEARCH SERVICE IF EXISTS mysvc; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/drop_statements.yml000066400000000000000000000340061503426445100257200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fb7e50ac0bf24502555bba92a61b34db04008dba322c5cc2e739c50a51f4adc9 file: - statement: drop_object_statement: - keyword: DROP - keyword: CONNECTION - object_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: T1 - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: CONNECTION - keyword: IF - keyword: EXISTS - object_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: T1 - statement_terminator: ; - statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - database_reference: naked_identifier: MYTESTDB2 - statement_terminator: ; - statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - keyword: IF - keyword: EXISTS - database_reference: naked_identifier: MYTESTDB2 - statement_terminator: ; - statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - keyword: IF - keyword: EXISTS - database_reference: naked_identifier: MYTESTDB2 - keyword: CASCADE - statement_terminator: ; - statement: drop_external_table_statement: - keyword: DROP - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: T1 - statement_terminator: ; - statement: drop_external_table_statement: - keyword: DROP - keyword: EXTERNAL - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: T1 - statement_terminator: ; - statement: drop_external_table_statement: - keyword: DROP - keyword: EXTERNAL - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: T1 - keyword: RESTRICT - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: FILE - keyword: FORMAT - object_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: MY_FORMAT - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: FILE - keyword: FORMAT - keyword: IF - keyword: EXISTS - object_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: MY_FORMAT - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - function_name: naked_identifier: MY_SCHEMA dot: . function_name_identifier: MY_FUNCTION - function_parameter_list: bracketed: - start_bracket: ( - data_type: data_type_identifier: NUMBER - comma: ',' - data_type: data_type_identifier: NUMBER - end_bracket: ) - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - keyword: IF - keyword: EXISTS - function_name: naked_identifier: MY_SCHEMA dot: . function_name_identifier: MY_FUNCTION - function_parameter_list: bracketed: - start_bracket: ( - data_type: data_type_identifier: NUMBER - comma: ',' - data_type: data_type_identifier: NUMBER - end_bracket: ) - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: INTEGRATION - object_reference: naked_identifier: T2 - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: API - keyword: INTEGRATION - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: T2 - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: MANAGED - keyword: ACCOUNT - naked_identifier: READER_ACCT1 - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: MASKING - keyword: POLICY - naked_identifier: SSN_MASK - statement_terminator: ; - statement: drop_materialized_view_statement: - keyword: DROP - keyword: MATERIALIZED - keyword: VIEW - table_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: MV1 - statement_terminator: ; - statement: drop_materialized_view_statement: - keyword: DROP - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: MV1 - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: NETWORK - keyword: POLICY - naked_identifier: MY_POLICY - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: NETWORK - keyword: POLICY - keyword: IF - keyword: EXISTS - naked_identifier: MY_POLICY - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: PIPE - object_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: MYPIPE - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: PIPE - keyword: IF - keyword: EXISTS - object_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: MYPIPE - statement_terminator: ; - statement: drop_procedure_statement: - keyword: DROP - keyword: PROCEDURE - function_name: naked_identifier: MY_SCHEMA dot: . function_name_identifier: ADD_ACCOUNTING_USER - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: VARCHAR end_bracket: ) - statement_terminator: ; - statement: drop_procedure_statement: - keyword: DROP - keyword: PROCEDURE - keyword: IF - keyword: EXISTS - function_name: naked_identifier: MY_SCHEMA dot: . function_name_identifier: ADD_ACCOUNTING_USER - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: VARCHAR end_bracket: ) - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: RESOURCE - keyword: MONITOR - object_reference: naked_identifier: MY_MONITOR_RESOURCE - statement_terminator: ; - statement: drop_role_statement: - keyword: DROP - keyword: ROLE - naked_identifier: MYROLE - statement_terminator: ; - statement: drop_role_statement: - keyword: DROP - keyword: ROLE - keyword: IF - keyword: EXISTS - naked_identifier: MYROLE - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: ROW - keyword: ACCESS - keyword: POLICY - object_reference: naked_identifier: RAP_TABLE_EMPLOYEE_INFO - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: ROW - keyword: ACCESS - keyword: POLICY - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: RAP_TABLE_EMPLOYEE_INFO - statement_terminator: ; - statement: drop_schema_statement: - keyword: DROP - keyword: SCHEMA - schema_reference: naked_identifier: MY_SCHEMA - statement_terminator: ; - statement: drop_schema_statement: - keyword: DROP - keyword: SCHEMA - keyword: IF - keyword: EXISTS - schema_reference: naked_identifier: MY_SCHEMA - statement_terminator: ; - statement: drop_schema_statement: - keyword: DROP - keyword: SCHEMA - keyword: IF - keyword: EXISTS - schema_reference: naked_identifier: MY_SCHEMA - keyword: CASCADE - statement_terminator: ; - statement: drop_sequence_statement: - keyword: DROP - keyword: SEQUENCE - sequence_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: INVOICE_SEQUENCE_NUMBER - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: SEQUENCE - keyword: IF - keyword: EXISTS - object_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: INVOICE_SEQUENCE_NUMBER - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: SEQUENCE - keyword: IF - keyword: EXISTS - object_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: INVOICE_SEQUENCE_NUMBER - keyword: CASCADE - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: SESSION - keyword: POLICY - naked_identifier: SESSION_POLICY_PRODUCTION_1 - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: SESSION - keyword: POLICY - keyword: IF - keyword: EXISTS - naked_identifier: SESSION_POLICY_PRODUCTION_1 - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: SHARE - object_reference: naked_identifier: SALES_S - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: STAGE - object_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: MY_STAGE - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: STAGE - keyword: IF - keyword: EXISTS - object_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: MY_STAGE - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: STREAM - object_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: T2 - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: STREAM - keyword: IF - keyword: EXISTS - object_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: T2 - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: T2 - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: T2 - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: T2 - keyword: CASCADE - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: TAG - object_reference: naked_identifier: COST_CENTER - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: TAG - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: COST_CENTER - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: TASK - object_reference: - naked_identifier: MY_SCHEAM - dot: . - naked_identifier: T2 - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: TASK - keyword: IF - keyword: EXISTS - object_reference: - naked_identifier: MY_SCHEAM - dot: . - naked_identifier: T2 - statement_terminator: ; - statement: drop_user_statement: - keyword: DROP - keyword: USER - role_reference: naked_identifier: USER1 - statement_terminator: ; - statement: drop_user_statement: - keyword: DROP - keyword: USER - keyword: IF - keyword: EXISTS - role_reference: naked_identifier: USER1 - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - table_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: MY_VIEW - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: MY_VIEW - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: WAREHOUSE - naked_identifier: MY_WAREHOUSE - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: WAREHOUSE - keyword: IF - keyword: EXISTS - naked_identifier: MY_WAREHOUSE - statement_terminator: ; - statement: drop_external_volume_statement: - keyword: DROP - keyword: EXTERNAL - keyword: VOLUME - external_volume_reference: naked_identifier: my_volume - statement_terminator: ; - statement: drop_external_volume_statement: - keyword: DROP - keyword: EXTERNAL - keyword: VOLUME - keyword: IF - keyword: EXISTS - external_volume_reference: naked_identifier: foo - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: CORTEX - keyword: SEARCH - keyword: SERVICE - object_reference: naked_identifier: mysvc - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: CORTEX - keyword: SEARCH - keyword: SERVICE - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: mysvc - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/escape.sql000066400000000000000000000001351503426445100237370ustar00rootroot00000000000000-- Backslash escapes work in snowflake select 'c\' ' as escaped, 'c\' '' ' as escaped_double sqlfluff-3.4.2/test/fixtures/dialects/snowflake/escape.yml000066400000000000000000000015551503426445100237500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9abeaf5b6fa5464641a885da264102a6f0980af3db0b2911466cceed0d9a292e file: statement: select_statement: select_clause: - keyword: select - select_clause_element: quoted_literal: "'c\\' '" alias_expression: alias_operator: keyword: as naked_identifier: escaped - comma: ',' - select_clause_element: quoted_literal: "'c\\' '' '" alias_expression: alias_operator: keyword: as naked_identifier: escaped_double sqlfluff-3.4.2/test/fixtures/dialects/snowflake/exception.sql000066400000000000000000000033721503426445100245030ustar00rootroot00000000000000EXCEPTION WHEN EXCEPTION_2 THEN RETURN SQLERRM; WHEN EXCEPTION_1 THEN RETURN SQLERRM; EXCEPTION WHEN MY_FIRST_EXCEPTION OR MY_SECOND_EXCEPTION THEN RETURN 123; WHEN MY_FOURTH_EXCEPTION THEN RETURN 4; WHEN OTHER THEN RETURN 99; EXCEPTION WHEN STATEMENT_ERROR THEN RETURN OBJECT_CONSTRUCT('Error type', 'STATEMENT_ERROR', 'SQLCODE', SQLCODE, 'SQLERRM', SQLERRM, 'SQLSTATE', SQLSTATE); WHEN EXPRESSION_ERROR THEN RETURN OBJECT_CONSTRUCT('Error type', 'EXPRESSION_ERROR', 'SQLCODE', SQLCODE, 'SQLERRM', SQLERRM, 'SQLSTATE', SQLSTATE); WHEN OTHER THEN RETURN OBJECT_CONSTRUCT('Error type', 'Other error', 'SQLCODE', SQLCODE, 'SQLERRM', SQLERRM, 'SQLSTATE', SQLSTATE); EXCEPTION WHEN STATEMENT_ERROR THEN RETURN OBJECT_CONSTRUCT('Error type', 'STATEMENT_ERROR', 'SQLCODE', SQLCODE, 'SQLERRM', SQLERRM, 'SQLSTATE', SQLSTATE); WHEN EXPRESSION_ERROR THEN RETURN OBJECT_CONSTRUCT('Error type', 'EXPRESSION_ERROR', 'SQLCODE', SQLCODE, 'SQLERRM', SQLERRM, 'SQLSTATE', SQLSTATE); WHEN OTHER THEN RETURN OBJECT_CONSTRUCT('Error type', 'Other error', 'SQLCODE', SQLCODE, 'SQLERRM', SQLERRM, 'SQLSTATE', SQLSTATE); sqlfluff-3.4.2/test/fixtures/dialects/snowflake/exception.yml000066400000000000000000000245461503426445100245130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3f88ed4788e2dd7a08a8fe8f57bcd38f0d9b03effb440eacfa9bb183661dcb5c file: - statement: exception_block_statement: - keyword: EXCEPTION - keyword: WHEN - object_reference: naked_identifier: EXCEPTION_2 - keyword: THEN - statement: return_statement: keyword: RETURN expression: column_reference: naked_identifier: SQLERRM - statement_terminator: ; - keyword: WHEN - object_reference: naked_identifier: EXCEPTION_1 - keyword: THEN - statement: return_statement: keyword: RETURN expression: column_reference: naked_identifier: SQLERRM - statement_terminator: ; - statement: exception_block_statement: - keyword: EXCEPTION - keyword: WHEN - object_reference: naked_identifier: MY_FIRST_EXCEPTION - keyword: OR - object_reference: naked_identifier: MY_SECOND_EXCEPTION - keyword: THEN - statement: return_statement: keyword: RETURN expression: numeric_literal: '123' - statement_terminator: ; - keyword: WHEN - object_reference: naked_identifier: MY_FOURTH_EXCEPTION - keyword: THEN - statement: return_statement: keyword: RETURN expression: numeric_literal: '4' - statement_terminator: ; - keyword: WHEN - object_reference: naked_identifier: OTHER - keyword: THEN - statement: return_statement: keyword: RETURN expression: numeric_literal: '99' - statement_terminator: ; - statement: exception_block_statement: - keyword: EXCEPTION - keyword: WHEN - object_reference: naked_identifier: STATEMENT_ERROR - keyword: THEN - statement: return_statement: keyword: RETURN expression: function: function_name: function_name_identifier: OBJECT_CONSTRUCT function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'Error type'" - comma: ',' - expression: quoted_literal: "'STATEMENT_ERROR'" - comma: ',' - expression: quoted_literal: "'SQLCODE'" - comma: ',' - expression: column_reference: naked_identifier: SQLCODE - comma: ',' - expression: quoted_literal: "'SQLERRM'" - comma: ',' - expression: column_reference: naked_identifier: SQLERRM - comma: ',' - expression: quoted_literal: "'SQLSTATE'" - comma: ',' - expression: column_reference: naked_identifier: SQLSTATE - end_bracket: ) - statement_terminator: ; - keyword: WHEN - object_reference: naked_identifier: EXPRESSION_ERROR - keyword: THEN - statement: return_statement: keyword: RETURN expression: function: function_name: function_name_identifier: OBJECT_CONSTRUCT function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'Error type'" - comma: ',' - expression: quoted_literal: "'EXPRESSION_ERROR'" - comma: ',' - expression: quoted_literal: "'SQLCODE'" - comma: ',' - expression: column_reference: naked_identifier: SQLCODE - comma: ',' - expression: quoted_literal: "'SQLERRM'" - comma: ',' - expression: column_reference: naked_identifier: SQLERRM - comma: ',' - expression: quoted_literal: "'SQLSTATE'" - comma: ',' - expression: column_reference: naked_identifier: SQLSTATE - end_bracket: ) - statement_terminator: ; - keyword: WHEN - object_reference: naked_identifier: OTHER - keyword: THEN - statement: return_statement: keyword: RETURN expression: function: function_name: function_name_identifier: OBJECT_CONSTRUCT function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'Error type'" - comma: ',' - expression: quoted_literal: "'Other error'" - comma: ',' - expression: quoted_literal: "'SQLCODE'" - comma: ',' - expression: column_reference: naked_identifier: SQLCODE - comma: ',' - expression: quoted_literal: "'SQLERRM'" - comma: ',' - expression: column_reference: naked_identifier: SQLERRM - comma: ',' - expression: quoted_literal: "'SQLSTATE'" - comma: ',' - expression: column_reference: naked_identifier: SQLSTATE - end_bracket: ) - statement_terminator: ; - statement: exception_block_statement: - keyword: EXCEPTION - keyword: WHEN - object_reference: naked_identifier: STATEMENT_ERROR - keyword: THEN - statement: return_statement: keyword: RETURN expression: function: function_name: function_name_identifier: OBJECT_CONSTRUCT function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'Error type'" - comma: ',' - expression: quoted_literal: "'STATEMENT_ERROR'" - comma: ',' - expression: quoted_literal: "'SQLCODE'" - comma: ',' - expression: column_reference: naked_identifier: SQLCODE - comma: ',' - expression: quoted_literal: "'SQLERRM'" - comma: ',' - expression: column_reference: naked_identifier: SQLERRM - comma: ',' - expression: quoted_literal: "'SQLSTATE'" - comma: ',' - expression: column_reference: naked_identifier: SQLSTATE - end_bracket: ) - statement_terminator: ; - keyword: WHEN - object_reference: naked_identifier: EXPRESSION_ERROR - keyword: THEN - statement: return_statement: keyword: RETURN expression: function: function_name: function_name_identifier: OBJECT_CONSTRUCT function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'Error type'" - comma: ',' - expression: quoted_literal: "'EXPRESSION_ERROR'" - comma: ',' - expression: quoted_literal: "'SQLCODE'" - comma: ',' - expression: column_reference: naked_identifier: SQLCODE - comma: ',' - expression: quoted_literal: "'SQLERRM'" - comma: ',' - expression: column_reference: naked_identifier: SQLERRM - comma: ',' - expression: quoted_literal: "'SQLSTATE'" - comma: ',' - expression: column_reference: naked_identifier: SQLSTATE - end_bracket: ) - statement_terminator: ; - keyword: WHEN - object_reference: naked_identifier: OTHER - keyword: THEN - statement: return_statement: keyword: RETURN expression: function: function_name: function_name_identifier: OBJECT_CONSTRUCT function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'Error type'" - comma: ',' - expression: quoted_literal: "'Other error'" - comma: ',' - expression: quoted_literal: "'SQLCODE'" - comma: ',' - expression: column_reference: naked_identifier: SQLCODE - comma: ',' - expression: quoted_literal: "'SQLERRM'" - comma: ',' - expression: column_reference: naked_identifier: SQLERRM - comma: ',' - expression: quoted_literal: "'SQLSTATE'" - comma: ',' - expression: column_reference: naked_identifier: SQLSTATE - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/execute_immediate.sql000066400000000000000000000006321503426445100261610ustar00rootroot00000000000000EXECUTE IMMEDIATE 'select 1'; EXECUTE IMMEDIATE $$ SELECT PI(); $$; SET pie = $$ SELECT PI(); $$ ; SET one = 1; SET two = 2; EXECUTE IMMEDIATE $pie; EXECUTE IMMEDIATE $pie USING (one, two); SET three = 'select ? + ?'; EXECUTE IMMEDIATE :three; EXECUTE IMMEDIATE :three USING (one, two); EXECUTE IMMEDIATE FROM './insert-inventory.sql'; EXECUTE IMMEDIATE FROM @my_stage/scripts/create-inventory.sql; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/execute_immediate.yml000066400000000000000000000054661503426445100261750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 280a0b56fc2de37f71a8653e8a069e5567f49e74de2018efd9df8149d29300a3 file: - statement: execute_immediate_clause: - keyword: EXECUTE - keyword: IMMEDIATE - quoted_literal: "'select 1'" - statement_terminator: ; - statement: execute_immediate_clause: - keyword: EXECUTE - keyword: IMMEDIATE - quoted_literal: "$$\n SELECT PI();\n$$" - statement_terminator: ; - statement: set_statement: keyword: SET variable: pie comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "$$\n SELECT PI();\n$$" - statement_terminator: ; - statement: set_statement: keyword: SET variable: one comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '1' - statement_terminator: ; - statement: set_statement: keyword: SET variable: two comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '2' - statement_terminator: ; - statement: execute_immediate_clause: - keyword: EXECUTE - keyword: IMMEDIATE - variable: $pie - statement_terminator: ; - statement: execute_immediate_clause: - keyword: EXECUTE - keyword: IMMEDIATE - variable: $pie - keyword: USING - bracketed: - start_bracket: ( - variable: one - comma: ',' - variable: two - end_bracket: ) - statement_terminator: ; - statement: set_statement: keyword: SET variable: three comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'select ? + ?'" - statement_terminator: ; - statement: execute_immediate_clause: - keyword: EXECUTE - keyword: IMMEDIATE - colon: ':' - variable: three - statement_terminator: ; - statement: execute_immediate_clause: - keyword: EXECUTE - keyword: IMMEDIATE - colon: ':' - variable: three - keyword: USING - bracketed: - start_bracket: ( - variable: one - comma: ',' - variable: two - end_bracket: ) - statement_terminator: ; - statement: execute_immediate_clause: - keyword: EXECUTE - keyword: IMMEDIATE - keyword: FROM - quoted_literal: "'./insert-inventory.sql'" - statement_terminator: ; - statement: execute_immediate_clause: - keyword: EXECUTE - keyword: IMMEDIATE - keyword: FROM - storage_location: stage_path: '@my_stage/scripts/create-inventory.sql' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/execute_task.sql000066400000000000000000000000661503426445100251660ustar00rootroot00000000000000EXECUTE TASK my_task; EXECUTE TASK myschema.my_task; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/execute_task.yml000066400000000000000000000014061503426445100251670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: dd10bd2ab1e3898dc8e3124691d2007bbdc93d92d1a00b098cbfd5e3e10430a2 file: - statement: execute_task_clause: - keyword: EXECUTE - keyword: TASK - object_reference: naked_identifier: my_task - statement_terminator: ; - statement: execute_task_clause: - keyword: EXECUTE - keyword: TASK - object_reference: - naked_identifier: myschema - dot: . - naked_identifier: my_task - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/explain.sql000066400000000000000000000001571503426445100241430ustar00rootroot00000000000000explain using tabular select 1; explain using json select 1; explain using text select 1; explain select 1; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/explain.yml000066400000000000000000000026101503426445100241410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bca49ca75c902d69398ecb6af0c0692a1d9fa6d181a81349205f544d0e5091d7 file: - statement: explain_statement: - keyword: explain - keyword: using - keyword: tabular - select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: explain_statement: - keyword: explain - keyword: using - keyword: json - select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: explain_statement: - keyword: explain - keyword: using - keyword: text - select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: explain_statement: keyword: explain select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/first_value_ignore_nulls.sql000066400000000000000000000001741503426445100276050ustar00rootroot00000000000000select a, coalesce(first_value(case when a then b else null end) ignore nulls over (order by e), false) as c from d sqlfluff-3.4.2/test/fixtures/dialects/snowflake/first_value_ignore_nulls.yml000066400000000000000000000054131503426445100276100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e0e011475f60b6c63766af192fd0795497acb770bd400e777951ae7a1cd02efa file: statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: function: function_name: function_name_identifier: coalesce function_contents: bracketed: - start_bracket: ( - expression: function: - function_name: function_name_identifier: first_value - function_contents: bracketed: start_bracket: ( expression: case_expression: - keyword: case - when_clause: - keyword: when - expression: column_reference: naked_identifier: a - keyword: then - expression: column_reference: naked_identifier: b - else_clause: keyword: else expression: null_literal: 'null' - keyword: end end_bracket: ) - keyword: ignore - keyword: nulls - over_clause: keyword: over bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: e end_bracket: ) - comma: ',' - expression: boolean_literal: 'false' - end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: c from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: d sqlfluff-3.4.2/test/fixtures/dialects/snowflake/for_in_loop.sql000066400000000000000000000005761503426445100250150ustar00rootroot00000000000000-- Simple LET LET CUR_TABLE CURSOR FOR SELECT * FROM THE_TABLE; -- Simple LOOP BEGIN SELECT 1; SELECT 2; END; -- Simple FOR FOR THE_ROW IN CUR_TABLE DO LET NEW_VAR := THE_ROW.COL1 || "_SUFFIX"; END FOR; -- FOR and LET in LOOP BEGIN LET CUR_TABLE CURSOR FOR SELECT * FROM THE_TABLE; FOR THE_ROW IN CUR_TABLE DO LET NEW_VAR := THE_ROW.COL1 || "_SUFFIX"; END FOR; END; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/for_in_loop.yml000066400000000000000000000071441503426445100250150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 99f9afd02394f408f5b951832a492a0964ca2fadb73c3a23fae3886c4c4d7ad3 file: - statement: scripting_let_statement: - keyword: LET - variable: CUR_TABLE - keyword: CURSOR - keyword: FOR - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: THE_TABLE - statement_terminator: ; - statement: scripting_block_statement: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '2' - statement_terminator: ; - keyword: END - statement_terminator: ; - statement: for_in_statement: - keyword: FOR - variable: THE_ROW - keyword: IN - variable: CUR_TABLE - keyword: DO - statement: scripting_let_statement: keyword: LET variable: NEW_VAR assignment_operator: := expression: - column_reference: - naked_identifier: THE_ROW - dot: . - naked_identifier: COL1 - binary_operator: - pipe: '|' - pipe: '|' - column_reference: quoted_identifier: '"_SUFFIX"' - statement_terminator: ; - keyword: END - keyword: FOR - statement_terminator: ; - statement: scripting_block_statement: - keyword: BEGIN - statement: scripting_let_statement: - keyword: LET - variable: CUR_TABLE - keyword: CURSOR - keyword: FOR - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: THE_TABLE - statement_terminator: ; - statement: for_in_statement: - keyword: FOR - variable: THE_ROW - keyword: IN - variable: CUR_TABLE - keyword: DO - statement: scripting_let_statement: keyword: LET variable: NEW_VAR assignment_operator: := expression: - column_reference: - naked_identifier: THE_ROW - dot: . - naked_identifier: COL1 - binary_operator: - pipe: '|' - pipe: '|' - column_reference: quoted_identifier: '"_SUFFIX"' - statement_terminator: ; - keyword: END - keyword: FOR - statement_terminator: ; - keyword: END - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/frame_clause.sql000066400000000000000000000006121503426445100251250ustar00rootroot00000000000000SELECT a, LAST_VALUE(foo) IGNORE NULLS OVER ( PARTITION BY bar ORDER BY baz ASC ROWS BETWEEN $my_var PRECEDING AND CURRENT ROW ) AS vehicle_type_id_last_value FROM foo ; SELECT account_id , SUM(amount) OVER (ORDER BY date_created RANGE BETWEEN INTERVAL '7 DAYS' PRECEDING AND CURRENT ROW) AS trailing_7d_sum_amount FROM my_database.my_schema.my_table ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/frame_clause.yml000066400000000000000000000076641503426445100251450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 919d4b9b185787a42189481ae34af1e85d360171abe11a7352ff5f163ade188e file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: function: - function_name: function_name_identifier: LAST_VALUE - function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: foo end_bracket: ) - keyword: IGNORE - keyword: NULLS - over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: bar orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: baz - keyword: ASC frame_clause: - keyword: ROWS - keyword: BETWEEN - variable: $my_var - keyword: PRECEDING - keyword: AND - keyword: CURRENT - keyword: ROW end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: vehicle_type_id_last_value from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: account_id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: amount end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: date_created frame_clause: - keyword: RANGE - keyword: BETWEEN - keyword: INTERVAL - quoted_literal: "'7 DAYS'" - keyword: PRECEDING - keyword: AND - keyword: CURRENT - keyword: ROW end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: trailing_7d_sum_amount from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: my_database - dot: . - naked_identifier: my_schema - dot: . - naked_identifier: my_table - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/get_statement.sql000066400000000000000000000003331503426445100253420ustar00rootroot00000000000000get @%mytable file://C:\temp\load; get @~/myfiles file:///tmp/data/; get @~/myfiles file:///tmp/data/ PATTERN = '.*foo.*'; get @~/myfiles file:///tmp/data/ PATTERN = $foo; get @~/myfiles file:///tmp/data/ PARALLEL = 1; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/get_statement.yml000066400000000000000000000027331503426445100253520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: adea77f32786b1f1d840b06189fe103d5a5180699de9bf6907f0b058afad6630 file: - statement: get_statement: keyword: get stage_path: '@%mytable' unquoted_file_path: file://C:\temp\load - statement_terminator: ; - statement: get_statement: keyword: get stage_path: '@~/myfiles' unquoted_file_path: file:///tmp/data/ - statement_terminator: ; - statement: get_statement: - keyword: get - stage_path: '@~/myfiles' - unquoted_file_path: file:///tmp/data/ - keyword: PATTERN - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'.*foo.*'" - statement_terminator: ; - statement: get_statement: - keyword: get - stage_path: '@~/myfiles' - unquoted_file_path: file:///tmp/data/ - keyword: PATTERN - comparison_operator: raw_comparison_operator: '=' - variable: $foo - statement_terminator: ; - statement: get_statement: - keyword: get - stage_path: '@~/myfiles' - unquoted_file_path: file:///tmp/data/ - keyword: PARALLEL - comparison_operator: raw_comparison_operator: '=' - integer_literal: '1' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/grant_revoke.sql000066400000000000000000000151531503426445100251730ustar00rootroot00000000000000GRANT OWNERSHIP ON SCHEMA MY_DATABASE.MY_SCHEMA TO ROLE MY_ROLE; GRANT ROLE MY_ROLE TO ROLE MY_OTHER_ROLE; GRANT USE_ANY_ROLE ON INTEGRATION EXTERNAL_OAUTH_1 TO ROLE1; GRANT OWNERSHIP ON TABLE MYSCHEMA.MYTABLE TO ROLE ANALYST; GRANT OWNERSHIP ON ALL TABLES IN SCHEMA PUBLIC TO ROLE ANALYST; GRANT OWNERSHIP ON ALL TABLES IN SCHEMA MYDB.PUBLIC TO ROLE ANALYST; GRANT OWNERSHIP ON ALL TABLES IN SCHEMA MYDB.PUBLIC TO ROLE ANALYST COPY CURRENT GRANTS; GRANT ROLE ROLENAME TO ROLE IDENTIFIER($THIS_ROLE); GRANT OWNERSHIP ON ROLE TEST_ROLE TO ROLE DIFFERENT_ROLE; GRANT ALL ON ALL MATERIALIZED VIEWS IN DATABASE MY_DB TO ROLE ANALYST; GRANT ALL ON ALL FILE FORMATS IN DATABASE MY_DB TO ROLE ANALYST; GRANT CREATE TEMPORARY TABLE ON SCHEMA MY_DB.MY_SCHEMA TO ROLE ANALYST; GRANT ALL ON FUTURE PIPES IN DATABASE MY_DB TO ROLE ANALYST; GRANT ALL ON FUTURE FILE FORMATS IN DATABASE MY_DB TO ROLE ANALYST; GRANT ALL ON FUTURE MATERIALIZED VIEWS IN DATABASE MY_DB TO ROLE ANALYST; GRANT ALL ON FUTURE PIPES IN DATABASE MY_DB TO ROLE ANALYST; GRANT USAGE ON ALL SEQUENCES IN DATABASE MY_DB TO ROLE ANALYST; GRANT ALL ON ALL MATERIALIZED VIEWS IN DATABASE MY_DB TO ROLE ANALYST; GRANT ALL ON ALL SEQUENCES IN DATABASE MY_DB TO ROLE ANALYST; GRANT ALL ON ALL FUNCTIONS IN DATABASE MY_DB TO ROLE ANALYST; GRANT ALL ON ALL FILE FORMATS IN DATABASE MY_DB TO ROLE ANALYST; GRANT ALL ON ALL STAGES IN DATABASE MY_DB TO ROLE ANALYST; GRANT SELECT ON ALL VIEWS IN DATABASE MY_DB TO ROLE ANALYST; REVOKE ROLE ANALYST FROM ROLE SYSADMIN; REVOKE SELECT, INSERT ON FUTURE TABLES IN SCHEMA MYDB.MYSCHEMA FROM ROLE ROLE1; REVOKE ALL PRIVILEGES ON FUNCTION add5(number) FROM ROLE ANALYST; REVOKE GRANT OPTION FOR OPERATE ON WAREHOUSE REPORT_WH FROM ROLE ANALYST; REVOKE SELECT ON ALL TABLES IN SCHEMA MYDB.MYSCHEMA FROM ROLE ANALYST; REVOKE OPERATE ON WAREHOUSE REPORT_WH FROM ROLE ANALYST; REVOKE REFERENCE_USAGE ON DATABASE DATABASE2 FROM SHARE SHARE1; REVOKE OWNERSHIP ON ROLE TEST_ROLE FROM ROLE DIFFERENT_ROLE; GRANT IMPORTED PRIVILEGES ON SCHEMA MYDB.MYSCHEMA TO APPLICATION MY_APP; GRANT OPERATE ON WAREHOUSE REPORT_WH TO ROLE ANALYST; GRANT OPERATE ON WAREHOUSE REPORT_WH TO ROLE ANALYST WITH GRANT OPTION; GRANT OPERATE ON FUTURE DYNAMIC TABLES IN SCHEMA MYDB.MYSCHEMA TO ROLE ANALYST; GRANT OPERATE ON ALL DYNAMIC TABLES IN SCHEMA MYDB.MYSCHEMA TO ROLE ANALYST; GRANT SELECT ON ALL TABLES IN SCHEMA MYDB.MYSCHEMA TO ROLE ANALYST; GRANT ALL PRIVILEGES ON FUNCTION mydb.myschema.add5(number) TO ROLE ANALYST; GRANT ALL PRIVILEGES ON FUNCTION mydb.myschema.add5(string) TO ROLE ANALYST; GRANT USAGE ON PROCEDURE mydb.myschema.myprocedure(number) TO ROLE ANALYST; GRANT CREATE MATERIALIZED VIEW ON SCHEMA MYDB.MYSCHEMA TO ROLE MYROLE; GRANT SELECT, INSERT ON FUTURE TABLES IN SCHEMA MYDB.MYSCHEMA TO ROLE ROLE1; GRANT USAGE ON FUTURE SCHEMAS IN DATABASE MYDB TO ROLE ROLE1; GRANT USAGE ON DATABASE DATABASE1 TO SHARE SHARE1; GRANT USAGE ON SCHEMA DATABASE1.SCHEMA1 TO SHARE SHARE1; GRANT REFERENCE_USAGE ON DATABASE DATABASE2 TO SHARE SHARE1; GRANT SELECT ON VIEW VIEW2 TO SHARE SHARE1; GRANT USAGE ON DATABASE MYDB TO SHARE SHARE1; GRANT USAGE ON SCHEMA MYDB.PUBLIC TO SHARE SHARE1; GRANT USAGE ON FUNCTION MYDB.SHARED_SCHEMA.FUNCTION1 TO SHARE SHARE1; GRANT SELECT ON ALL TABLES IN SCHEMA MYDB.PUBLIC TO SHARE SHARE1; GRANT USAGE ON SCHEMA MYDB.SHARED_SCHEMA TO SHARE SHARE1; GRANT SELECT ON VIEW MYDB.SHARED_SCHEMA.VIEW1 TO SHARE SHARE1; GRANT SELECT ON VIEW MYDB.SHARED_SCHEMA.VIEW3 TO SHARE SHARE1; GRANT ROLE ANALYST TO USER USER1; REVOKE ALL PRIVILEGES ON PROCEDURE clean_schema(string) FROM ROLE ANALYST; REVOKE ALL PRIVILEGES ON FUNCTION add5(string) FROM ROLE ANALYST; REVOKE SELECT ON VIEW MYDB.SHARED_SCHEMA.VIEW1 FROM SHARE SHARE1; REVOKE USAGE ON SCHEMA MYDB.SHARED_SCHEMA FROM SHARE SHARE1; REVOKE SELECT ON ALL TABLES IN SCHEMA MYDB.PUBLIC FROM SHARE SHARE1; REVOKE USAGE ON SCHEMA MYDB.PUBLIC FROM SHARE SHARE1; REVOKE USAGE ON DATABASE MYDB FROM SHARE SHARE1; GRANT APPLY MASKING POLICY ON ACCOUNT TO ROLE MY_ROLE; GRANT APPLY ROW ACCESS POLICY ON ACCOUNT TO ROLE MY_ROLE; GRANT APPLY SESSION POLICY ON ACCOUNT TO ROLE MY_ROLE; GRANT APPLY TAG ON ACCOUNT TO ROLE MY_ROLE; GRANT ATTACH POLICY ON ACCOUNT TO ROLE MY_ROLE; GRANT EXECUTE ALERT ON ACCOUNT TO ROLE MY_ROLE; GRANT EXECUTE TASK ON ACCOUNT TO ROLE MY_ROLE; GRANT EXECUTE MANAGED TASK ON ACCOUNT TO ROLE MY_ROLE; GRANT IMPORT SHARE ON ACCOUNT TO ROLE MY_ROLE; GRANT MANAGE GRANTS ON ACCOUNT TO ROLE MY_ROLE; GRANT MONITOR EXECUTION ON ACCOUNT TO ROLE MY_ROLE; GRANT MONITOR USAGE ON ACCOUNT TO ROLE MY_ROLE; GRANT MONITOR ON USER SOME_USER TO ROLE MY_ROLE; REVOKE MONITOR ON USER SOME_USER FROM ROLE MY_ROLE; GRANT OVERRIDE SHARE RESTRICTIONS ON ACCOUNT TO ROLE MY_ROLE; GRANT CREATE ACCOUNT ON ACCOUNT TO ROLE MY_ROLE; GRANT CREATE SHARE ON ACCOUNT TO ROLE MY_ROLE; GRANT CREATE NETWORK POLICY ON ACCOUNT TO ROLE MY_ROLE; GRANT CREATE TAG ON SCHEMA MY_SCHEMA TO ROLE MY_ROLE; GRANT CREATE DATA EXCHANGE LISTING ON ACCOUNT TO ROLE MY_ROLE; GRANT CREATE CORTEX SEARCH SERVICE ON SCHEMA MY_SCHEMA TO ROLE MY_ROLE; GRANT USAGE ON CORTEX SEARCH SERVICE MY_SERVICE TO ROLE MY_ROLE; GRANT MANAGE ACCOUNT SUPPORT CASES ON ACCOUNT TO ROLE MY_ROLE; GRANT MANAGE ORGANIZATION SUPPORT CASES ON ACCOUNT TO ROLE MY_ROLE; GRANT MANAGE USER SUPPORT CASES ON ACCOUNT TO ROLE MY_ROLE; GRANT ADD SEARCH OPTIMIZATION ON SCHEMA MY_SCHEMA TO ROLE MY_ROLE; GRANT DATABASE ROLE DBNAME.ROLENAME TO ROLE PUBLIC; GRANT DATABASE ROLE DBROLENAME TO ROLE PUBLIC; REVOKE DATABASE ROLE DBNAME.ROLENAME FROM ROLE PUBLIC; REVOKE DATABASE ROLE DBROLENAME FROM ROLE PUBLIC; GRANT SELECT ON TABLE DBNAME.SCHEMANAME.TABLENAME TO DATABASE ROLE DBNAME.ROLENAME; GRANT SELECT ON TABLE DBNAME.SCHEMANAME.TABLENAME TO DATABASE ROLE DBROLENAME; REVOKE SELECT ON TABLE DBNAME.SCHEMANAME.TABLENAME FROM DATABASE ROLE DBNAME.ROLENAME; REVOKE SELECT ON TABLE DBNAME.SCHEMANAME.TABLENAME FROM DATABASE ROLE DBROLENAME; GRANT APPLICATION ROLE DBROLENAME TO ROLE PUBLIC; GRANT CREATE NOTEBOOK ON SCHEMA MY_DB.MYSCHEMA TO ROLE MYROLE; GRANT OWNERSHIP ON NOTEBOOK MY_DB.MYSCHEMA.MYNOTEBOOK TO ROLE MYROLE; GRANT USAGE ON NOTEBOOK MY_DB.MYSCHEMA.MYNOTEBOOK TO ROLE MYROLE; REVOKE CREATE NOTEBOOK ON SCHEMA MY_DB.MYSCHEMA FROM ROLE MYROLE; REVOKE USAGE ON NOTEBOOK MY_DB.MYSCHEMA.MYNOTEBOOK FROM ROLE MYROLE; GRANT CREATE MODEL ON SCHEMA MY_DB.MYSCHEMA TO ROLE MYROLE; GRANT OWNERSHIP ON MODEL MY_DB.MYSCHEMA.MYMODEL TO ROLE MYROLE; GRANT USAGE ON MODEL MY_DB.MYSCHEMA.MYMODEL TO ROLE MYROLE; REVOKE CREATE MODEL ON SCHEMA MY_DB.MYSCHEMA FROM ROLE MYROLE; REVOKE USAGE ON MODEL MY_DB.MYSCHEMA.MYMODEL FROM ROLE MYROLE; GRANT USAGE ON EXTERNAL VOLUME ext_vol TO ROLE MY_ROLE; REVOKE USAGE ON EXTERNAL VOLUME ext_vol FROM ROLE MY_ROLE; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/grant_revoke.yml000066400000000000000000001144541503426445100252010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f194fd275c52915911a0c68a8322bfc7deba51306993ae0074b44dba97ec7159 file: - statement: access_statement: - keyword: GRANT - keyword: OWNERSHIP - keyword: 'ON' - keyword: SCHEMA - object_reference: - naked_identifier: MY_DATABASE - dot: . - naked_identifier: MY_SCHEMA - keyword: TO - keyword: ROLE - role_reference: naked_identifier: MY_ROLE - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: ROLE - object_reference: naked_identifier: MY_ROLE - keyword: TO - keyword: ROLE - role_reference: naked_identifier: MY_OTHER_ROLE - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: USE_ANY_ROLE - keyword: 'ON' - keyword: INTEGRATION - object_reference: naked_identifier: EXTERNAL_OAUTH_1 - keyword: TO - role_reference: naked_identifier: ROLE1 - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: OWNERSHIP - keyword: 'ON' - keyword: TABLE - object_reference: - naked_identifier: MYSCHEMA - dot: . - naked_identifier: MYTABLE - keyword: TO - keyword: ROLE - role_reference: naked_identifier: ANALYST - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: OWNERSHIP - keyword: 'ON' - keyword: ALL - keyword: TABLES - keyword: IN - keyword: SCHEMA - object_reference: naked_identifier: PUBLIC - keyword: TO - keyword: ROLE - role_reference: naked_identifier: ANALYST - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: OWNERSHIP - keyword: 'ON' - keyword: ALL - keyword: TABLES - keyword: IN - keyword: SCHEMA - object_reference: - naked_identifier: MYDB - dot: . - naked_identifier: PUBLIC - keyword: TO - keyword: ROLE - role_reference: naked_identifier: ANALYST - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: OWNERSHIP - keyword: 'ON' - keyword: ALL - keyword: TABLES - keyword: IN - keyword: SCHEMA - object_reference: - naked_identifier: MYDB - dot: . - naked_identifier: PUBLIC - keyword: TO - keyword: ROLE - role_reference: naked_identifier: ANALYST - keyword: COPY - keyword: CURRENT - keyword: GRANTS - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: ROLE - object_reference: naked_identifier: ROLENAME - keyword: TO - keyword: ROLE - role_reference: keyword: IDENTIFIER bracketed: start_bracket: ( variable: $THIS_ROLE end_bracket: ) - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: OWNERSHIP - keyword: 'ON' - keyword: ROLE - object_reference: naked_identifier: TEST_ROLE - keyword: TO - keyword: ROLE - role_reference: naked_identifier: DIFFERENT_ROLE - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: ALL - keyword: 'ON' - keyword: ALL - keyword: MATERIALIZED - keyword: VIEWS - keyword: IN - keyword: DATABASE - object_reference: naked_identifier: MY_DB - keyword: TO - keyword: ROLE - role_reference: naked_identifier: ANALYST - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: ALL - keyword: 'ON' - keyword: ALL - keyword: FILE - keyword: FORMATS - keyword: IN - keyword: DATABASE - object_reference: naked_identifier: MY_DB - keyword: TO - keyword: ROLE - role_reference: naked_identifier: ANALYST - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: CREATE - keyword: TEMPORARY - keyword: TABLE - keyword: 'ON' - keyword: SCHEMA - object_reference: - naked_identifier: MY_DB - dot: . - naked_identifier: MY_SCHEMA - keyword: TO - keyword: ROLE - role_reference: naked_identifier: ANALYST - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: ALL - keyword: 'ON' - keyword: FUTURE - keyword: PIPES - keyword: IN - keyword: DATABASE - object_reference: naked_identifier: MY_DB - keyword: TO - keyword: ROLE - role_reference: naked_identifier: ANALYST - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: ALL - keyword: 'ON' - keyword: FUTURE - keyword: FILE - keyword: FORMATS - keyword: IN - keyword: DATABASE - object_reference: naked_identifier: MY_DB - keyword: TO - keyword: ROLE - role_reference: naked_identifier: ANALYST - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: ALL - keyword: 'ON' - keyword: FUTURE - keyword: MATERIALIZED - keyword: VIEWS - keyword: IN - keyword: DATABASE - object_reference: naked_identifier: MY_DB - keyword: TO - keyword: ROLE - role_reference: naked_identifier: ANALYST - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: ALL - keyword: 'ON' - keyword: FUTURE - keyword: PIPES - keyword: IN - keyword: DATABASE - object_reference: naked_identifier: MY_DB - keyword: TO - keyword: ROLE - role_reference: naked_identifier: ANALYST - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: USAGE - keyword: 'ON' - keyword: ALL - keyword: SEQUENCES - keyword: IN - keyword: DATABASE - object_reference: naked_identifier: MY_DB - keyword: TO - keyword: ROLE - role_reference: naked_identifier: ANALYST - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: ALL - keyword: 'ON' - keyword: ALL - keyword: MATERIALIZED - keyword: VIEWS - keyword: IN - keyword: DATABASE - object_reference: naked_identifier: MY_DB - keyword: TO - keyword: ROLE - role_reference: naked_identifier: ANALYST - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: ALL - keyword: 'ON' - keyword: ALL - keyword: SEQUENCES - keyword: IN - keyword: DATABASE - object_reference: naked_identifier: MY_DB - keyword: TO - keyword: ROLE - role_reference: naked_identifier: ANALYST - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: ALL - keyword: 'ON' - keyword: ALL - keyword: FUNCTIONS - keyword: IN - keyword: DATABASE - object_reference: naked_identifier: MY_DB - keyword: TO - keyword: ROLE - role_reference: naked_identifier: ANALYST - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: ALL - keyword: 'ON' - keyword: ALL - keyword: FILE - keyword: FORMATS - keyword: IN - keyword: DATABASE - object_reference: naked_identifier: MY_DB - keyword: TO - keyword: ROLE - role_reference: naked_identifier: ANALYST - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: ALL - keyword: 'ON' - keyword: ALL - keyword: STAGES - keyword: IN - keyword: DATABASE - object_reference: naked_identifier: MY_DB - keyword: TO - keyword: ROLE - role_reference: naked_identifier: ANALYST - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: SELECT - keyword: 'ON' - keyword: ALL - keyword: VIEWS - keyword: IN - keyword: DATABASE - object_reference: naked_identifier: MY_DB - keyword: TO - keyword: ROLE - role_reference: naked_identifier: ANALYST - statement_terminator: ; - statement: access_statement: - keyword: REVOKE - keyword: ROLE - object_reference: naked_identifier: ANALYST - keyword: FROM - keyword: ROLE - object_reference: naked_identifier: SYSADMIN - statement_terminator: ; - statement: access_statement: - keyword: REVOKE - keyword: SELECT - comma: ',' - keyword: INSERT - keyword: 'ON' - keyword: FUTURE - keyword: TABLES - keyword: IN - keyword: SCHEMA - object_reference: - naked_identifier: MYDB - dot: . - naked_identifier: MYSCHEMA - keyword: FROM - keyword: ROLE - object_reference: naked_identifier: ROLE1 - statement_terminator: ; - statement: access_statement: - keyword: REVOKE - keyword: ALL - keyword: PRIVILEGES - keyword: 'ON' - keyword: FUNCTION - function_name: function_name_identifier: add5 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: number end_bracket: ) - keyword: FROM - keyword: ROLE - object_reference: naked_identifier: ANALYST - statement_terminator: ; - statement: access_statement: - keyword: REVOKE - keyword: GRANT - keyword: OPTION - keyword: FOR - keyword: OPERATE - keyword: 'ON' - keyword: WAREHOUSE - object_reference: naked_identifier: REPORT_WH - keyword: FROM - keyword: ROLE - object_reference: naked_identifier: ANALYST - statement_terminator: ; - statement: access_statement: - keyword: REVOKE - keyword: SELECT - keyword: 'ON' - keyword: ALL - keyword: TABLES - keyword: IN - keyword: SCHEMA - object_reference: - naked_identifier: MYDB - dot: . - naked_identifier: MYSCHEMA - keyword: FROM - keyword: ROLE - object_reference: naked_identifier: ANALYST - statement_terminator: ; - statement: access_statement: - keyword: REVOKE - keyword: OPERATE - keyword: 'ON' - keyword: WAREHOUSE - object_reference: naked_identifier: REPORT_WH - keyword: FROM - keyword: ROLE - object_reference: naked_identifier: ANALYST - statement_terminator: ; - statement: access_statement: - keyword: REVOKE - keyword: REFERENCE_USAGE - keyword: 'ON' - keyword: DATABASE - object_reference: naked_identifier: DATABASE2 - keyword: FROM - keyword: SHARE - object_reference: naked_identifier: SHARE1 - statement_terminator: ; - statement: access_statement: - keyword: REVOKE - keyword: OWNERSHIP - keyword: 'ON' - keyword: ROLE - object_reference: naked_identifier: TEST_ROLE - keyword: FROM - keyword: ROLE - object_reference: naked_identifier: DIFFERENT_ROLE - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: IMPORTED - keyword: PRIVILEGES - keyword: 'ON' - keyword: SCHEMA - object_reference: - naked_identifier: MYDB - dot: . - naked_identifier: MYSCHEMA - keyword: TO - keyword: APPLICATION - role_reference: naked_identifier: MY_APP - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: OPERATE - keyword: 'ON' - keyword: WAREHOUSE - object_reference: naked_identifier: REPORT_WH - keyword: TO - keyword: ROLE - role_reference: naked_identifier: ANALYST - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: OPERATE - keyword: 'ON' - keyword: WAREHOUSE - object_reference: naked_identifier: REPORT_WH - keyword: TO - keyword: ROLE - role_reference: naked_identifier: ANALYST - keyword: WITH - keyword: GRANT - keyword: OPTION - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: OPERATE - keyword: 'ON' - keyword: FUTURE - keyword: DYNAMIC - keyword: TABLES - keyword: IN - keyword: SCHEMA - object_reference: - naked_identifier: MYDB - dot: . - naked_identifier: MYSCHEMA - keyword: TO - keyword: ROLE - role_reference: naked_identifier: ANALYST - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: OPERATE - keyword: 'ON' - keyword: ALL - keyword: DYNAMIC - keyword: TABLES - keyword: IN - keyword: SCHEMA - object_reference: - naked_identifier: MYDB - dot: . - naked_identifier: MYSCHEMA - keyword: TO - keyword: ROLE - role_reference: naked_identifier: ANALYST - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: SELECT - keyword: 'ON' - keyword: ALL - keyword: TABLES - keyword: IN - keyword: SCHEMA - object_reference: - naked_identifier: MYDB - dot: . - naked_identifier: MYSCHEMA - keyword: TO - keyword: ROLE - role_reference: naked_identifier: ANALYST - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: ALL - keyword: PRIVILEGES - keyword: 'ON' - keyword: FUNCTION - function_name: - naked_identifier: mydb - dot: . - naked_identifier: myschema - dot: . - function_name_identifier: add5 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: number end_bracket: ) - keyword: TO - keyword: ROLE - role_reference: naked_identifier: ANALYST - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: ALL - keyword: PRIVILEGES - keyword: 'ON' - keyword: FUNCTION - function_name: - naked_identifier: mydb - dot: . - naked_identifier: myschema - dot: . - function_name_identifier: add5 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: string end_bracket: ) - keyword: TO - keyword: ROLE - role_reference: naked_identifier: ANALYST - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: USAGE - keyword: 'ON' - keyword: PROCEDURE - function_name: - naked_identifier: mydb - dot: . - naked_identifier: myschema - dot: . - function_name_identifier: myprocedure - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: number end_bracket: ) - keyword: TO - keyword: ROLE - role_reference: naked_identifier: ANALYST - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - keyword: 'ON' - keyword: SCHEMA - object_reference: - naked_identifier: MYDB - dot: . - naked_identifier: MYSCHEMA - keyword: TO - keyword: ROLE - role_reference: naked_identifier: MYROLE - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: SELECT - comma: ',' - keyword: INSERT - keyword: 'ON' - keyword: FUTURE - keyword: TABLES - keyword: IN - keyword: SCHEMA - object_reference: - naked_identifier: MYDB - dot: . - naked_identifier: MYSCHEMA - keyword: TO - keyword: ROLE - role_reference: naked_identifier: ROLE1 - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: USAGE - keyword: 'ON' - keyword: FUTURE - keyword: SCHEMAS - keyword: IN - keyword: DATABASE - object_reference: naked_identifier: MYDB - keyword: TO - keyword: ROLE - role_reference: naked_identifier: ROLE1 - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: USAGE - keyword: 'ON' - keyword: DATABASE - object_reference: naked_identifier: DATABASE1 - keyword: TO - keyword: SHARE - role_reference: naked_identifier: SHARE1 - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: USAGE - keyword: 'ON' - keyword: SCHEMA - object_reference: - naked_identifier: DATABASE1 - dot: . - naked_identifier: SCHEMA1 - keyword: TO - keyword: SHARE - role_reference: naked_identifier: SHARE1 - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: REFERENCE_USAGE - keyword: 'ON' - keyword: DATABASE - object_reference: naked_identifier: DATABASE2 - keyword: TO - keyword: SHARE - role_reference: naked_identifier: SHARE1 - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: SELECT - keyword: 'ON' - keyword: VIEW - object_reference: naked_identifier: VIEW2 - keyword: TO - keyword: SHARE - role_reference: naked_identifier: SHARE1 - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: USAGE - keyword: 'ON' - keyword: DATABASE - object_reference: naked_identifier: MYDB - keyword: TO - keyword: SHARE - role_reference: naked_identifier: SHARE1 - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: USAGE - keyword: 'ON' - keyword: SCHEMA - object_reference: - naked_identifier: MYDB - dot: . - naked_identifier: PUBLIC - keyword: TO - keyword: SHARE - role_reference: naked_identifier: SHARE1 - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: USAGE - keyword: 'ON' - keyword: FUNCTION - object_reference: - naked_identifier: MYDB - dot: . - naked_identifier: SHARED_SCHEMA - dot: . - naked_identifier: FUNCTION1 - keyword: TO - keyword: SHARE - role_reference: naked_identifier: SHARE1 - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: SELECT - keyword: 'ON' - keyword: ALL - keyword: TABLES - keyword: IN - keyword: SCHEMA - object_reference: - naked_identifier: MYDB - dot: . - naked_identifier: PUBLIC - keyword: TO - keyword: SHARE - role_reference: naked_identifier: SHARE1 - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: USAGE - keyword: 'ON' - keyword: SCHEMA - object_reference: - naked_identifier: MYDB - dot: . - naked_identifier: SHARED_SCHEMA - keyword: TO - keyword: SHARE - role_reference: naked_identifier: SHARE1 - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: SELECT - keyword: 'ON' - keyword: VIEW - object_reference: - naked_identifier: MYDB - dot: . - naked_identifier: SHARED_SCHEMA - dot: . - naked_identifier: VIEW1 - keyword: TO - keyword: SHARE - role_reference: naked_identifier: SHARE1 - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: SELECT - keyword: 'ON' - keyword: VIEW - object_reference: - naked_identifier: MYDB - dot: . - naked_identifier: SHARED_SCHEMA - dot: . - naked_identifier: VIEW3 - keyword: TO - keyword: SHARE - role_reference: naked_identifier: SHARE1 - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: ROLE - object_reference: naked_identifier: ANALYST - keyword: TO - keyword: USER - role_reference: naked_identifier: USER1 - statement_terminator: ; - statement: access_statement: - keyword: REVOKE - keyword: ALL - keyword: PRIVILEGES - keyword: 'ON' - keyword: PROCEDURE - function_name: function_name_identifier: clean_schema - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: string end_bracket: ) - keyword: FROM - keyword: ROLE - object_reference: naked_identifier: ANALYST - statement_terminator: ; - statement: access_statement: - keyword: REVOKE - keyword: ALL - keyword: PRIVILEGES - keyword: 'ON' - keyword: FUNCTION - function_name: function_name_identifier: add5 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: string end_bracket: ) - keyword: FROM - keyword: ROLE - object_reference: naked_identifier: ANALYST - statement_terminator: ; - statement: access_statement: - keyword: REVOKE - keyword: SELECT - keyword: 'ON' - keyword: VIEW - object_reference: - naked_identifier: MYDB - dot: . - naked_identifier: SHARED_SCHEMA - dot: . - naked_identifier: VIEW1 - keyword: FROM - keyword: SHARE - object_reference: naked_identifier: SHARE1 - statement_terminator: ; - statement: access_statement: - keyword: REVOKE - keyword: USAGE - keyword: 'ON' - keyword: SCHEMA - object_reference: - naked_identifier: MYDB - dot: . - naked_identifier: SHARED_SCHEMA - keyword: FROM - keyword: SHARE - object_reference: naked_identifier: SHARE1 - statement_terminator: ; - statement: access_statement: - keyword: REVOKE - keyword: SELECT - keyword: 'ON' - keyword: ALL - keyword: TABLES - keyword: IN - keyword: SCHEMA - object_reference: - naked_identifier: MYDB - dot: . - naked_identifier: PUBLIC - keyword: FROM - keyword: SHARE - object_reference: naked_identifier: SHARE1 - statement_terminator: ; - statement: access_statement: - keyword: REVOKE - keyword: USAGE - keyword: 'ON' - keyword: SCHEMA - object_reference: - naked_identifier: MYDB - dot: . - naked_identifier: PUBLIC - keyword: FROM - keyword: SHARE - object_reference: naked_identifier: SHARE1 - statement_terminator: ; - statement: access_statement: - keyword: REVOKE - keyword: USAGE - keyword: 'ON' - keyword: DATABASE - object_reference: naked_identifier: MYDB - keyword: FROM - keyword: SHARE - object_reference: naked_identifier: SHARE1 - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: APPLY - keyword: MASKING - keyword: POLICY - keyword: 'ON' - keyword: ACCOUNT - keyword: TO - keyword: ROLE - role_reference: naked_identifier: MY_ROLE - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: APPLY - keyword: ROW - keyword: ACCESS - keyword: POLICY - keyword: 'ON' - keyword: ACCOUNT - keyword: TO - keyword: ROLE - role_reference: naked_identifier: MY_ROLE - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: APPLY - keyword: SESSION - keyword: POLICY - keyword: 'ON' - keyword: ACCOUNT - keyword: TO - keyword: ROLE - role_reference: naked_identifier: MY_ROLE - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: APPLY - keyword: TAG - keyword: 'ON' - keyword: ACCOUNT - keyword: TO - keyword: ROLE - role_reference: naked_identifier: MY_ROLE - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: ATTACH - keyword: POLICY - keyword: 'ON' - keyword: ACCOUNT - keyword: TO - keyword: ROLE - role_reference: naked_identifier: MY_ROLE - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: EXECUTE - keyword: ALERT - keyword: 'ON' - keyword: ACCOUNT - keyword: TO - keyword: ROLE - role_reference: naked_identifier: MY_ROLE - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: EXECUTE - keyword: TASK - keyword: 'ON' - keyword: ACCOUNT - keyword: TO - keyword: ROLE - role_reference: naked_identifier: MY_ROLE - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: EXECUTE - keyword: MANAGED - keyword: TASK - keyword: 'ON' - keyword: ACCOUNT - keyword: TO - keyword: ROLE - role_reference: naked_identifier: MY_ROLE - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: IMPORT - keyword: SHARE - keyword: 'ON' - keyword: ACCOUNT - keyword: TO - keyword: ROLE - role_reference: naked_identifier: MY_ROLE - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: MANAGE - keyword: GRANTS - keyword: 'ON' - keyword: ACCOUNT - keyword: TO - keyword: ROLE - role_reference: naked_identifier: MY_ROLE - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: MONITOR - keyword: EXECUTION - keyword: 'ON' - keyword: ACCOUNT - keyword: TO - keyword: ROLE - role_reference: naked_identifier: MY_ROLE - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: MONITOR - keyword: USAGE - keyword: 'ON' - keyword: ACCOUNT - keyword: TO - keyword: ROLE - role_reference: naked_identifier: MY_ROLE - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: MONITOR - keyword: 'ON' - keyword: USER - object_reference: naked_identifier: SOME_USER - keyword: TO - keyword: ROLE - role_reference: naked_identifier: MY_ROLE - statement_terminator: ; - statement: access_statement: - keyword: REVOKE - keyword: MONITOR - keyword: 'ON' - keyword: USER - object_reference: naked_identifier: SOME_USER - keyword: FROM - keyword: ROLE - object_reference: naked_identifier: MY_ROLE - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: OVERRIDE - keyword: SHARE - keyword: RESTRICTIONS - keyword: 'ON' - keyword: ACCOUNT - keyword: TO - keyword: ROLE - role_reference: naked_identifier: MY_ROLE - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: CREATE - keyword: ACCOUNT - keyword: 'ON' - keyword: ACCOUNT - keyword: TO - keyword: ROLE - role_reference: naked_identifier: MY_ROLE - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: CREATE - keyword: SHARE - keyword: 'ON' - keyword: ACCOUNT - keyword: TO - keyword: ROLE - role_reference: naked_identifier: MY_ROLE - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: CREATE - keyword: NETWORK - keyword: POLICY - keyword: 'ON' - keyword: ACCOUNT - keyword: TO - keyword: ROLE - role_reference: naked_identifier: MY_ROLE - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: CREATE - keyword: TAG - keyword: 'ON' - keyword: SCHEMA - object_reference: naked_identifier: MY_SCHEMA - keyword: TO - keyword: ROLE - role_reference: naked_identifier: MY_ROLE - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: CREATE - keyword: DATA - keyword: EXCHANGE - keyword: LISTING - keyword: 'ON' - keyword: ACCOUNT - keyword: TO - keyword: ROLE - role_reference: naked_identifier: MY_ROLE - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: CREATE - keyword: CORTEX - keyword: SEARCH - keyword: SERVICE - keyword: 'ON' - keyword: SCHEMA - object_reference: naked_identifier: MY_SCHEMA - keyword: TO - keyword: ROLE - role_reference: naked_identifier: MY_ROLE - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: USAGE - keyword: 'ON' - keyword: CORTEX - keyword: SEARCH - keyword: SERVICE - object_reference: naked_identifier: MY_SERVICE - keyword: TO - keyword: ROLE - role_reference: naked_identifier: MY_ROLE - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: MANAGE - keyword: ACCOUNT - keyword: SUPPORT - keyword: CASES - keyword: 'ON' - keyword: ACCOUNT - keyword: TO - keyword: ROLE - role_reference: naked_identifier: MY_ROLE - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: MANAGE - keyword: ORGANIZATION - keyword: SUPPORT - keyword: CASES - keyword: 'ON' - keyword: ACCOUNT - keyword: TO - keyword: ROLE - role_reference: naked_identifier: MY_ROLE - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: MANAGE - keyword: USER - keyword: SUPPORT - keyword: CASES - keyword: 'ON' - keyword: ACCOUNT - keyword: TO - keyword: ROLE - role_reference: naked_identifier: MY_ROLE - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: ADD - keyword: SEARCH - keyword: OPTIMIZATION - keyword: 'ON' - keyword: SCHEMA - schema_reference: naked_identifier: MY_SCHEMA - keyword: TO - keyword: ROLE - role_reference: naked_identifier: MY_ROLE - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: DATABASE - keyword: ROLE - database_role_reference: - naked_identifier: DBNAME - dot: . - naked_identifier: ROLENAME - keyword: TO - keyword: ROLE - role_reference: naked_identifier: PUBLIC - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: DATABASE - keyword: ROLE - database_role_reference: naked_identifier: DBROLENAME - keyword: TO - keyword: ROLE - role_reference: naked_identifier: PUBLIC - statement_terminator: ; - statement: access_statement: - keyword: REVOKE - keyword: DATABASE - keyword: ROLE - database_role_reference: - naked_identifier: DBNAME - dot: . - naked_identifier: ROLENAME - keyword: FROM - keyword: ROLE - object_reference: naked_identifier: PUBLIC - statement_terminator: ; - statement: access_statement: - keyword: REVOKE - keyword: DATABASE - keyword: ROLE - database_role_reference: naked_identifier: DBROLENAME - keyword: FROM - keyword: ROLE - object_reference: naked_identifier: PUBLIC - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: SELECT - keyword: 'ON' - keyword: TABLE - object_reference: - naked_identifier: DBNAME - dot: . - naked_identifier: SCHEMANAME - dot: . - naked_identifier: TABLENAME - keyword: TO - keyword: DATABASE - keyword: ROLE - database_role_reference: - naked_identifier: DBNAME - dot: . - naked_identifier: ROLENAME - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: SELECT - keyword: 'ON' - keyword: TABLE - object_reference: - naked_identifier: DBNAME - dot: . - naked_identifier: SCHEMANAME - dot: . - naked_identifier: TABLENAME - keyword: TO - keyword: DATABASE - keyword: ROLE - role_reference: naked_identifier: DBROLENAME - statement_terminator: ; - statement: access_statement: - keyword: REVOKE - keyword: SELECT - keyword: 'ON' - keyword: TABLE - object_reference: - naked_identifier: DBNAME - dot: . - naked_identifier: SCHEMANAME - dot: . - naked_identifier: TABLENAME - keyword: FROM - keyword: DATABASE - keyword: ROLE - object_reference: - naked_identifier: DBNAME - dot: . - naked_identifier: ROLENAME - statement_terminator: ; - statement: access_statement: - keyword: REVOKE - keyword: SELECT - keyword: 'ON' - keyword: TABLE - object_reference: - naked_identifier: DBNAME - dot: . - naked_identifier: SCHEMANAME - dot: . - naked_identifier: TABLENAME - keyword: FROM - keyword: DATABASE - keyword: ROLE - object_reference: naked_identifier: DBROLENAME - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: APPLICATION - keyword: ROLE - object_reference: naked_identifier: DBROLENAME - keyword: TO - keyword: ROLE - role_reference: naked_identifier: PUBLIC - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: CREATE - keyword: NOTEBOOK - keyword: 'ON' - keyword: SCHEMA - object_reference: - naked_identifier: MY_DB - dot: . - naked_identifier: MYSCHEMA - keyword: TO - keyword: ROLE - role_reference: naked_identifier: MYROLE - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: OWNERSHIP - keyword: 'ON' - keyword: NOTEBOOK - object_reference: - naked_identifier: MY_DB - dot: . - naked_identifier: MYSCHEMA - dot: . - naked_identifier: MYNOTEBOOK - keyword: TO - keyword: ROLE - role_reference: naked_identifier: MYROLE - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: USAGE - keyword: 'ON' - keyword: NOTEBOOK - object_reference: - naked_identifier: MY_DB - dot: . - naked_identifier: MYSCHEMA - dot: . - naked_identifier: MYNOTEBOOK - keyword: TO - keyword: ROLE - role_reference: naked_identifier: MYROLE - statement_terminator: ; - statement: access_statement: - keyword: REVOKE - keyword: CREATE - keyword: NOTEBOOK - keyword: 'ON' - keyword: SCHEMA - object_reference: - naked_identifier: MY_DB - dot: . - naked_identifier: MYSCHEMA - keyword: FROM - keyword: ROLE - object_reference: naked_identifier: MYROLE - statement_terminator: ; - statement: access_statement: - keyword: REVOKE - keyword: USAGE - keyword: 'ON' - keyword: NOTEBOOK - object_reference: - naked_identifier: MY_DB - dot: . - naked_identifier: MYSCHEMA - dot: . - naked_identifier: MYNOTEBOOK - keyword: FROM - keyword: ROLE - object_reference: naked_identifier: MYROLE - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: CREATE - keyword: MODEL - keyword: 'ON' - keyword: SCHEMA - object_reference: - naked_identifier: MY_DB - dot: . - naked_identifier: MYSCHEMA - keyword: TO - keyword: ROLE - role_reference: naked_identifier: MYROLE - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: OWNERSHIP - keyword: 'ON' - keyword: MODEL - object_reference: - naked_identifier: MY_DB - dot: . - naked_identifier: MYSCHEMA - dot: . - naked_identifier: MYMODEL - keyword: TO - keyword: ROLE - role_reference: naked_identifier: MYROLE - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: USAGE - keyword: 'ON' - keyword: MODEL - object_reference: - naked_identifier: MY_DB - dot: . - naked_identifier: MYSCHEMA - dot: . - naked_identifier: MYMODEL - keyword: TO - keyword: ROLE - role_reference: naked_identifier: MYROLE - statement_terminator: ; - statement: access_statement: - keyword: REVOKE - keyword: CREATE - keyword: MODEL - keyword: 'ON' - keyword: SCHEMA - object_reference: - naked_identifier: MY_DB - dot: . - naked_identifier: MYSCHEMA - keyword: FROM - keyword: ROLE - object_reference: naked_identifier: MYROLE - statement_terminator: ; - statement: access_statement: - keyword: REVOKE - keyword: USAGE - keyword: 'ON' - keyword: MODEL - object_reference: - naked_identifier: MY_DB - dot: . - naked_identifier: MYSCHEMA - dot: . - naked_identifier: MYMODEL - keyword: FROM - keyword: ROLE - object_reference: naked_identifier: MYROLE - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: USAGE - keyword: 'ON' - keyword: EXTERNAL - keyword: VOLUME - object_reference: naked_identifier: ext_vol - keyword: TO - keyword: ROLE - role_reference: naked_identifier: MY_ROLE - statement_terminator: ; - statement: access_statement: - keyword: REVOKE - keyword: USAGE - keyword: 'ON' - keyword: EXTERNAL - keyword: VOLUME - object_reference: naked_identifier: ext_vol - keyword: FROM - keyword: ROLE - object_reference: naked_identifier: MY_ROLE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/group_by_all.sql000066400000000000000000000001471503426445100251600ustar00rootroot00000000000000select state, city, sum(retail_price * quantity) as gross_revenue from sales group by all; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/group_by_all.yml000066400000000000000000000031241503426445100251600ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bbfa1a56dce4d6d42065a5991b91a1b479a3ac34635697837a879a7effb776f0 file: statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: state - comma: ',' - select_clause_element: column_reference: naked_identifier: city - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: retail_price - binary_operator: '*' - column_reference: naked_identifier: quantity end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: gross_revenue from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sales groupby_clause: - keyword: group - keyword: by - keyword: all statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/identifier_pseudo_function.sql000066400000000000000000000016601503426445100301110ustar00rootroot00000000000000-- https://docs.snowflake.com/en/sql-reference/identifier-literal.html -- Although IDENTIFIER(...) uses the syntax of a function, it is not a true function and is not returned by commands such as SHOW FUNCTIONS. USE SCHEMA identifier('my_schema'); USE SCHEMA identifier('{{ params.schema_name }}'); create or replace database identifier('my_db'); create or replace schema identifier('my_schema'); create or replace table identifier('my_db.my_schema.my_table') (c1 number); create or replace table identifier('"my_table"') (c1 number); show tables in schema identifier('my_schema'); use schema identifier($schema_name); insert into identifier($table_name) values (1), (2), (3); select * from identifier($table_name) order by 1; select * from identifier('my_table') order by 1; select speed_of_light(); select identifier($my_function_name)(); select identifier('my_function_name')(); select identifier('my_function_name')(1, 2, 3); sqlfluff-3.4.2/test/fixtures/dialects/snowflake/identifier_pseudo_function.yml000066400000000000000000000161011503426445100301070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ef04f6116268b1bcc68b85e7471e01acf8b2786766f050db2099a487b5952c08 file: - statement: use_statement: - keyword: USE - keyword: SCHEMA - schema_reference: keyword: identifier bracketed: start_bracket: ( quoted_identifier: "'my_schema'" end_bracket: ) - statement_terminator: ; - statement: use_statement: - keyword: USE - keyword: SCHEMA - schema_reference: keyword: identifier bracketed: start_bracket: ( quoted_identifier: "'{{ params.schema_name }}'" end_bracket: ) - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: or - keyword: replace - keyword: database - object_reference: keyword: identifier bracketed: start_bracket: ( quoted_identifier: "'my_db'" end_bracket: ) - statement_terminator: ; - statement: create_schema_statement: - keyword: create - keyword: or - keyword: replace - keyword: schema - schema_reference: keyword: identifier bracketed: start_bracket: ( quoted_identifier: "'my_schema'" end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: or - keyword: replace - keyword: table - table_reference: keyword: identifier bracketed: start_bracket: ( quoted_identifier: "'my_db.my_schema.my_table'" end_bracket: ) - bracketed: start_bracket: ( column_definition: naked_identifier: c1 data_type: data_type_identifier: number end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: or - keyword: replace - keyword: table - table_reference: keyword: identifier bracketed: start_bracket: ( quoted_identifier: "'\"my_table\"'" end_bracket: ) - bracketed: start_bracket: ( column_definition: naked_identifier: c1 data_type: data_type_identifier: number end_bracket: ) - statement_terminator: ; - statement: show_statement: - keyword: show - keyword: tables - keyword: in - keyword: schema - object_reference: keyword: identifier bracketed: start_bracket: ( quoted_identifier: "'my_schema'" end_bracket: ) - statement_terminator: ; - statement: use_statement: - keyword: use - keyword: schema - schema_reference: keyword: identifier bracketed: start_bracket: ( variable: $schema_name end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: insert - keyword: into - table_reference: keyword: identifier bracketed: start_bracket: ( variable: $table_name end_bracket: ) - values_clause: - keyword: values - bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( expression: numeric_literal: '2' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( expression: numeric_literal: '3' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: keyword: identifier bracketed: start_bracket: ( variable: $table_name end_bracket: ) orderby_clause: - keyword: order - keyword: by - numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: keyword: identifier bracketed: start_bracket: ( quoted_identifier: "'my_table'" end_bracket: ) orderby_clause: - keyword: order - keyword: by - numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: speed_of_light function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: keyword: identifier bracketed: start_bracket: ( variable: $my_function_name end_bracket: ) function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: keyword: identifier bracketed: start_bracket: ( quoted_identifier: "'my_function_name'" end_bracket: ) function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: keyword: identifier bracketed: start_bracket: ( quoted_identifier: "'my_function_name'" end_bracket: ) function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/inline_comment.sql000066400000000000000000000004101503426445100254730ustar00rootroot00000000000000# Classic Inline Comment SELECT 1; -- Classic Inline Comment SELECT 1; # Classic Inline Comment SELECT 1; //Snowflake Inline Comment SELECT 1;-- Classic Inline Comment No Space SELECT 1;# Classic Inline Comment No Space SELECT 1//Snowflake Inline Comment No Space sqlfluff-3.4.2/test/fixtures/dialects/snowflake/inline_comment.yml000066400000000000000000000025401503426445100255030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 64034404eb64dded43239909619cb8fc6801998aac47c42eeb646d053fb5c467 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' sqlfluff-3.4.2/test/fixtures/dialects/snowflake/insert.sql000066400000000000000000000024751503426445100240140ustar00rootroot00000000000000-- Single table INSERT INTO INSERT INTO foo (bar) VALUES(current_timestamp); INSERT OVERWRITE INTO foo (bar) VALUES(current_timestamp); INSERT INTO foo (bar, baz) VALUES(1, 2), (3, 4); INSERT INTO foo (bar) VALUES(DEFAULT); INSERT INTO foo (bar) VALUES(NULL); INSERT INTO films SELECT * FROM tmp_films WHERE date_prod < '2004-05-07'; -- Unconditional multi-table INSERT INTO insert all into t1 into t1 (c1, c2, c3) values (n2, n1, default) into t2 (c1, c2, c3) into t2 values (n3, n2, n1) select n1, n2, n3 from src; insert overwrite all into t1 into t1 (c1, c2, c3) values (n2, n1, default) into t2 (c1, c2, c3) into t2 values (n3, n2, n1) select n1, n2, n3 from src; insert all into t1 values ($1, an_alias, "10 + 20") select 1, 50 as an_alias, 10 + 20; insert all into t1 values (key, a) select src1.key as key, src1.a as a from src1, src2 where src1.key = src2.key; -- Conditional multi-table INSERT INTO insert all when n1 > 100 then into t1 when n1 > 10 then into t1 into t2 else into t2 select n1 from src; insert first when n1 > 100 then into t1 when n1 > 10 then into t1 into t2 else into t2 select n1 from src; insert all when c > 10 then into t1 (col1, col2) values (a, b) select a, b, c from src; INSERT INTO foo.bar ( SELECT foo.bar ); sqlfluff-3.4.2/test/fixtures/dialects/snowflake/insert.yml000066400000000000000000000373721503426445100240220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 03408a7c773ecc58b99e2d8ce959fdcecb37c5f9c2a1d7c3426f106641aa635a file: - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: start_bracket: ( column_reference: naked_identifier: bar end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: bare_function: current_timestamp end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: OVERWRITE - keyword: INTO - table_reference: naked_identifier: foo - bracketed: start_bracket: ( column_reference: naked_identifier: bar end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: bare_function: current_timestamp end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_reference: naked_identifier: bar - comma: ',' - column_reference: naked_identifier: baz - end_bracket: ) - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: start_bracket: ( column_reference: naked_identifier: bar end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( keyword: DEFAULT end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: start_bracket: ( column_reference: naked_identifier: bar end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( keyword: 'NULL' end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: films - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tmp_films where_clause: keyword: WHERE expression: column_reference: naked_identifier: date_prod comparison_operator: raw_comparison_operator: < quoted_literal: "'2004-05-07'" - statement_terminator: ; - statement: insert_statement: - keyword: insert - keyword: all - keyword: into - table_reference: naked_identifier: t1 - keyword: into - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: c1 - comma: ',' - column_reference: naked_identifier: c2 - comma: ',' - column_reference: naked_identifier: c3 - end_bracket: ) - values_clause: keyword: values bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: n2 - comma: ',' - expression: column_reference: naked_identifier: n1 - comma: ',' - keyword: default - end_bracket: ) - keyword: into - table_reference: naked_identifier: t2 - bracketed: - start_bracket: ( - column_reference: naked_identifier: c1 - comma: ',' - column_reference: naked_identifier: c2 - comma: ',' - column_reference: naked_identifier: c3 - end_bracket: ) - keyword: into - table_reference: naked_identifier: t2 - values_clause: keyword: values bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: n3 - comma: ',' - expression: column_reference: naked_identifier: n2 - comma: ',' - expression: column_reference: naked_identifier: n1 - end_bracket: ) - select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: n1 - comma: ',' - select_clause_element: column_reference: naked_identifier: n2 - comma: ',' - select_clause_element: column_reference: naked_identifier: n3 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: src - statement_terminator: ; - statement: insert_statement: - keyword: insert - keyword: overwrite - keyword: all - keyword: into - table_reference: naked_identifier: t1 - keyword: into - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: c1 - comma: ',' - column_reference: naked_identifier: c2 - comma: ',' - column_reference: naked_identifier: c3 - end_bracket: ) - values_clause: keyword: values bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: n2 - comma: ',' - expression: column_reference: naked_identifier: n1 - comma: ',' - keyword: default - end_bracket: ) - keyword: into - table_reference: naked_identifier: t2 - bracketed: - start_bracket: ( - column_reference: naked_identifier: c1 - comma: ',' - column_reference: naked_identifier: c2 - comma: ',' - column_reference: naked_identifier: c3 - end_bracket: ) - keyword: into - table_reference: naked_identifier: t2 - values_clause: keyword: values bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: n3 - comma: ',' - expression: column_reference: naked_identifier: n2 - comma: ',' - expression: column_reference: naked_identifier: n1 - end_bracket: ) - select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: n1 - comma: ',' - select_clause_element: column_reference: naked_identifier: n2 - comma: ',' - select_clause_element: column_reference: naked_identifier: n3 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: src - statement_terminator: ; - statement: insert_statement: - keyword: insert - keyword: all - keyword: into - table_reference: naked_identifier: t1 - values_clause: keyword: values bracketed: - start_bracket: ( - expression: column_reference: column_index_identifier_segment: $1 - comma: ',' - expression: column_reference: naked_identifier: an_alias - comma: ',' - expression: column_reference: quoted_identifier: '"10 + 20"' - end_bracket: ) - select_statement: select_clause: - keyword: select - select_clause_element: numeric_literal: '1' - comma: ',' - select_clause_element: numeric_literal: '50' alias_expression: alias_operator: keyword: as naked_identifier: an_alias - comma: ',' - select_clause_element: expression: - numeric_literal: '10' - binary_operator: + - numeric_literal: '20' - statement_terminator: ; - statement: insert_statement: - keyword: insert - keyword: all - keyword: into - table_reference: naked_identifier: t1 - values_clause: keyword: values bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: key - comma: ',' - expression: column_reference: naked_identifier: a - end_bracket: ) - select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: src1 - dot: . - naked_identifier: key alias_expression: alias_operator: keyword: as naked_identifier: key - comma: ',' - select_clause_element: column_reference: - naked_identifier: src1 - dot: . - naked_identifier: a alias_expression: alias_operator: keyword: as naked_identifier: a from_clause: - keyword: from - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: src1 - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: src2 where_clause: keyword: where expression: - column_reference: - naked_identifier: src1 - dot: . - naked_identifier: key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: src2 - dot: . - naked_identifier: key - statement_terminator: ; - statement: insert_statement: - keyword: insert - keyword: all - keyword: when - expression: column_reference: naked_identifier: n1 comparison_operator: raw_comparison_operator: '>' numeric_literal: '100' - keyword: then - keyword: into - table_reference: naked_identifier: t1 - keyword: when - expression: column_reference: naked_identifier: n1 comparison_operator: raw_comparison_operator: '>' numeric_literal: '10' - keyword: then - keyword: into - table_reference: naked_identifier: t1 - keyword: into - table_reference: naked_identifier: t2 - keyword: else - keyword: into - table_reference: naked_identifier: t2 - select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: n1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: src - statement_terminator: ; - statement: insert_statement: - keyword: insert - keyword: first - keyword: when - expression: column_reference: naked_identifier: n1 comparison_operator: raw_comparison_operator: '>' numeric_literal: '100' - keyword: then - keyword: into - table_reference: naked_identifier: t1 - keyword: when - expression: column_reference: naked_identifier: n1 comparison_operator: raw_comparison_operator: '>' numeric_literal: '10' - keyword: then - keyword: into - table_reference: naked_identifier: t1 - keyword: into - table_reference: naked_identifier: t2 - keyword: else - keyword: into - table_reference: naked_identifier: t2 - select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: n1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: src - statement_terminator: ; - statement: insert_statement: - keyword: insert - keyword: all - keyword: when - expression: column_reference: naked_identifier: c comparison_operator: raw_comparison_operator: '>' numeric_literal: '10' - keyword: then - keyword: into - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - values_clause: keyword: values bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: a - comma: ',' - expression: column_reference: naked_identifier: b - end_bracket: ) - select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: src - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: foo - dot: . - naked_identifier: bar - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: foo - dot: . - naked_identifier: bar end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/json_underscore_key.sql000066400000000000000000000000351503426445100265500ustar00rootroot00000000000000select x.y:_z::string from x sqlfluff-3.4.2/test/fixtures/dialects/snowflake/json_underscore_key.yml000066400000000000000000000021151503426445100265530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8a5489149e51eeeb71a5fed628312d609c54af92e23a45bd9c812272df2568f5 file: statement: select_statement: select_clause: keyword: select select_clause_element: expression: cast_expression: column_reference: - naked_identifier: x - dot: . - naked_identifier: y semi_structured_expression: colon: ':' semi_structured_element: _z casting_operator: '::' data_type: data_type_identifier: string from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: x sqlfluff-3.4.2/test/fixtures/dialects/snowflake/lateral_flatten_after_join.sql000066400000000000000000000004151503426445100300410ustar00rootroot00000000000000select value as p_id, name, iff( rank() over ( partition by id order by t_id desc ) = 1 , true, false ) as most_recent from a inner join b on (b.c_id = a.c_id) , lateral flatten (input => b.cool_ids) sqlfluff-3.4.2/test/fixtures/dialects/snowflake/lateral_flatten_after_join.yml000066400000000000000000000101751503426445100300470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6c9aa8d27592eba6eb381f95f93eec863ddd0d4af605d5d60defd9e14f8ebbb2 file: statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: value alias_expression: alias_operator: keyword: as naked_identifier: p_id - comma: ',' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: function: function_name: function_name_identifier: iff function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: rank function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: over bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: partition - keyword: by - expression: column_reference: naked_identifier: id orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: t_id - keyword: desc end_bracket: ) comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - comma: ',' - expression: boolean_literal: 'true' - comma: ',' - expression: boolean_literal: 'false' - end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: most_recent from_clause: - keyword: from - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: a join_clause: - keyword: inner - keyword: join - from_expression_element: table_expression: table_reference: naked_identifier: b - join_on_condition: keyword: 'on' bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: b - dot: . - naked_identifier: c_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: a - dot: . - naked_identifier: c_id end_bracket: ) - comma: ',' - from_expression: from_expression_element: keyword: lateral table_expression: function: function_name: function_name_identifier: flatten function_contents: bracketed: start_bracket: ( snowflake_keyword_expression: parameter: input parameter_assigner: => column_reference: - naked_identifier: b - dot: . - naked_identifier: cool_ids end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/snowflake/let.sql000066400000000000000000000011451503426445100232650ustar00rootroot00000000000000begin -- variable based let somevariable := 5; let somevariable number(38, 0) := 5; let somevariable number(38, 0) default 5; let somevariable default 5; -- variable reassignment somevariable := 5; -- cursor based let somevariable cursor for select some_col from some_database.schema.some_table; let somevariable cursor for somevariable; let someresult resultset := (select some_col from some_database.schema.some_table); -- resultset reassignment someresult := (select SOME_COL from some_database.schema.some_table); end; LET VARIABLE := SOME_VALUE; SELECT :variable FROM my_table; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/let.yml000066400000000000000000000134101503426445100232650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1ca3842cfc1260c844ba4fb9c33b0b9bfe0a36b89bcd7194d999b1e2e37d6c1e file: - statement: scripting_block_statement: - keyword: begin - statement: scripting_let_statement: keyword: let variable: somevariable assignment_operator: := expression: numeric_literal: '5' - statement_terminator: ; - statement: scripting_let_statement: keyword: let variable: somevariable data_type: data_type_identifier: number bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '38' - comma: ',' - numeric_literal: '0' - end_bracket: ) assignment_operator: := expression: numeric_literal: '5' - statement_terminator: ; - statement: scripting_let_statement: - keyword: let - variable: somevariable - data_type: data_type_identifier: number bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '38' - comma: ',' - numeric_literal: '0' - end_bracket: ) - keyword: default - expression: numeric_literal: '5' - statement_terminator: ; - statement: scripting_let_statement: - keyword: let - variable: somevariable - keyword: default - expression: numeric_literal: '5' - statement_terminator: ; - statement: scripting_let_statement: variable: somevariable assignment_operator: := expression: numeric_literal: '5' - statement_terminator: ; - statement: scripting_let_statement: - keyword: let - variable: somevariable - keyword: cursor - keyword: for - select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: some_col from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: some_database - dot: . - naked_identifier: schema - dot: . - naked_identifier: some_table - statement_terminator: ; - statement: scripting_let_statement: - keyword: let - variable: somevariable - keyword: cursor - keyword: for - variable: somevariable - statement_terminator: ; - statement: scripting_let_statement: keyword: let variable: someresult data_type: data_type_identifier: resultset assignment_operator: := expression: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: some_col from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: some_database - dot: . - naked_identifier: schema - dot: . - naked_identifier: some_table end_bracket: ) - statement_terminator: ; - statement: scripting_let_statement: variable: someresult assignment_operator: := expression: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: SOME_COL from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: some_database - dot: . - naked_identifier: schema - dot: . - naked_identifier: some_table end_bracket: ) - statement_terminator: ; - keyword: end - statement_terminator: ; - statement: scripting_let_statement: keyword: LET variable: VARIABLE assignment_operator: := expression: column_reference: naked_identifier: SOME_VALUE - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: variable: ':' alias_expression: naked_identifier: variable from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/limit.sql000066400000000000000000000014431503426445100236200ustar00rootroot00000000000000select c1 from testtable order by c1 limit 3; select c1 from testtable order by c1 limit 3 offset 3; select * from demo1 order by i limit null offset null; select * from demo1 order by i limit '' offset ''; select * from demo1 order by i limit $$$$ offset $$$$; select c1 from testtable order by c1 fetch 3; select c1 from testtable order by c1 fetch first 3; select c1 from testtable order by c1 fetch next 3; select c1 from testtable order by c1 fetch 1 row; select c1 from testtable order by c1 fetch 3 rows; select c1 from testtable order by c1 fetch 3 only; select c1 from testtable order by c1 offset 3 fetch 3; select c1 from testtable order by c1 offset 1 row fetch 1 row; select c1 from testtable order by c1 offset 3 rows fetch 3 rows; select c1 from testtable offset 3 fetch 3; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/limit.yml000066400000000000000000000233501503426445100236230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 392c73ba9a02de8da0cb27f35adf0791cfbd8a821a51ae336deb86cda2394fb3 file: - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: c1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: testtable orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: c1 limit_clause: keyword: limit numeric_literal: '3' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: c1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: testtable orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: c1 limit_clause: - keyword: limit - numeric_literal: '3' - keyword: offset - numeric_literal: '3' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: demo1 orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: i limit_clause: - keyword: limit - keyword: 'null' - keyword: offset - keyword: 'null' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: demo1 orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: i limit_clause: - keyword: limit - quoted_literal: "''" - keyword: offset - quoted_literal: "''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: demo1 orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: i limit_clause: - keyword: limit - quoted_literal: $$$$ - keyword: offset - quoted_literal: $$$$ - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: c1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: testtable orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: c1 limit_clause: keyword: fetch numeric_literal: '3' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: c1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: testtable orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: c1 limit_clause: - keyword: fetch - keyword: first - numeric_literal: '3' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: c1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: testtable orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: c1 limit_clause: - keyword: fetch - keyword: next - numeric_literal: '3' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: c1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: testtable orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: c1 limit_clause: - keyword: fetch - numeric_literal: '1' - keyword: row - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: c1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: testtable orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: c1 limit_clause: - keyword: fetch - numeric_literal: '3' - keyword: rows - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: c1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: testtable orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: c1 limit_clause: - keyword: fetch - numeric_literal: '3' - keyword: only - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: c1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: testtable orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: c1 limit_clause: - keyword: offset - numeric_literal: '3' - keyword: fetch - numeric_literal: '3' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: c1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: testtable orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: c1 offset_clause: - keyword: offset - numeric_literal: '1' - keyword: row limit_clause: - keyword: fetch - numeric_literal: '1' - keyword: row - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: c1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: testtable orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: c1 offset_clause: - keyword: offset - numeric_literal: '3' - keyword: rows limit_clause: - keyword: fetch - numeric_literal: '3' - keyword: rows - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: c1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: testtable limit_clause: - keyword: offset - numeric_literal: '3' - keyword: fetch - numeric_literal: '3' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/list_statement.sql000066400000000000000000000002101503426445100255300ustar00rootroot00000000000000list @%mytable; list @mystage/path1; list @%mytable pattern='.*data_0.*'; list @my_csv_stage/analysis/ pattern='.*data_0.*'; ls @~; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/list_statement.yml000066400000000000000000000022551503426445100255450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 92b5f1cce819011c953728fbab10f031069f3866d83c8543da01f783b9aa86c4 file: - statement: list_statement: keyword: list stage_path: '@%mytable' - statement_terminator: ; - statement: list_statement: keyword: list stage_path: '@mystage/path1' - statement_terminator: ; - statement: list_statement: - keyword: list - stage_path: '@%mytable' - keyword: pattern - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'.*data_0.*'" - statement_terminator: ; - statement: list_statement: - keyword: list - stage_path: '@my_csv_stage/analysis/' - keyword: pattern - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'.*data_0.*'" - statement_terminator: ; - statement: list_statement: keyword: ls stage_path: '@~' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/match_recognize.sql000066400000000000000000000121251503426445100256420ustar00rootroot00000000000000-- Examples from snowflake docs. select * from stock_price_history match_recognize( partition by company order by price_date measures match_number() as match_number, first(price_date) as start_date, last(price_date) as end_date, count(*) as rows_in_sequence, count(row_with_price_decrease.*) as num_decreases, count(row_with_price_increase.*) as num_increases one row per match after match skip to last row_with_price_increase pattern(row_before_decrease row_with_price_decrease+ row_with_price_increase+) define row_with_price_decrease as price < lag(price), row_with_price_increase as price > lag(price) ) order by company, match_number; select price_date, match_number, msq, price, cl from (select * from stock_price_history where company='ABCD') match_recognize( order by price_date measures match_number() as "MATCH_NUMBER", match_sequence_number() as msq, classifier() as cl all rows per match pattern(any_row up+) define any_row as true, up as price > lag(price) ) order by match_number, msq; select * from stock_price_history match_recognize( partition by company order by price_date measures match_number() as "MATCH_NUMBER" all rows per match omit empty matches pattern(overavg*) define overavg as price > avg(price) over (rows between unbounded preceding and unbounded following) ) order by company, price_date; select * from stock_price_history match_recognize( partition by company order by price_date measures match_number() as "MATCH_NUMBER", classifier() as cl all rows per match with unmatched rows pattern(overavg+) define overavg as price > avg(price) over (rows between unbounded preceding and unbounded following) ) order by company, price_date; select company, price_date, price, "FINAL FIRST(LT45.price)", "FINAL LAST(LT45.price)" from stock_price_history match_recognize ( partition by company order by price_date measures final first(lt45.price) as "FINAL FIRST(LT45.price)", final last(lt45.price) as "FINAL LAST(LT45.price)" all rows per match after match skip past last row pattern (lt45 lt45) define lt45 as price < 45.00 ) where company = 'ABCD' order by price_date; -- Testing all quantifiers. select * from stock_price_history match_recognize( partition by company order by price_date measures match_number() as match_number, first(price_date) as start_date, last(price_date) as end_date, count(*) as rows_in_sequence, count(row_with_price_decrease.*) as num_decreases, count(row_with_price_increase.*) as num_increases one row per match after match skip to last row_with_price_increase pattern(^ S1+ S2* S3? S4{1} S5{1,} S6{,1} S7{1,1} S8*? $) define row_with_price_decrease as price < lag(price), row_with_price_increase as price > lag(price) ) order by company, match_number; -- Testing operators. select * from stock_price_history match_recognize( partition by company order by price_date measures match_number() as match_number, first(price_date) as start_date, last(price_date) as end_date, count(*) as rows_in_sequence, count(row_with_price_decrease.*) as num_decreases, count(row_with_price_increase.*) as num_increases one row per match after match skip to last row_with_price_increase pattern(^ ( S1 | S2* )? S3 PERMUTE(S4+, S5*?) {- S6 -}+ $) define row_with_price_decrease as price < lag(price), row_with_price_increase as price > lag(price) ) order by company, match_number; select * from stock_price_history match_recognize( partition by company order by price_date measures match_number() as match_number, first(price_date) as start_date, last(price_date) as end_date, count(*) as rows_in_sequence, count(row_with_price_decrease.*) as num_decreases, count(row_with_price_increase.*) as num_increases one row per match after match skip to last row_with_price_increase pattern((A {- B+ C+ -} D+)) define row_with_price_decrease as price < lag(price), row_with_price_increase as price > lag(price) ) order by company, match_number; select * from stock_price_history match_recognize( partition by company order by price_date measures match_number() as match_number, first(price_date) as start_date, last(price_date) as end_date, count(*) as rows_in_sequence, count(row_with_price_decrease.*) as num_decreases, count(row_with_price_increase.*) as num_increases one row per match after match skip to last row_with_price_increase pattern((A | B){5} C+) define row_with_price_decrease as price < lag(price), row_with_price_increase as price > lag(price) ) order by company, match_number; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/match_recognize.yml000066400000000000000000001440761503426445100256570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ce10bb5484b8bbd9a672ef2983dd6b3db6f37aae6800b37b1d6b95b930b8595f file: - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: stock_price_history match_recognize_clause: keyword: match_recognize bracketed: - start_bracket: ( - partitionby_clause: - keyword: partition - keyword: by - expression: column_reference: naked_identifier: company - orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: price_date - keyword: measures - expression: function: function_name: function_name_identifier: match_number function_contents: bracketed: start_bracket: ( end_bracket: ) - alias_expression: alias_operator: keyword: as naked_identifier: match_number - comma: ',' - expression: function: function_name: function_name_identifier: first function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: price_date end_bracket: ) - alias_expression: alias_operator: keyword: as naked_identifier: start_date - comma: ',' - expression: function: function_name: function_name_identifier: last function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: price_date end_bracket: ) - alias_expression: alias_operator: keyword: as naked_identifier: end_date - comma: ',' - expression: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) - alias_expression: alias_operator: keyword: as naked_identifier: rows_in_sequence - comma: ',' - expression: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( expression: naked_identifier: row_with_price_decrease dot: . star: '*' end_bracket: ) - alias_expression: alias_operator: keyword: as naked_identifier: num_decreases - comma: ',' - expression: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( expression: naked_identifier: row_with_price_increase dot: . star: '*' end_bracket: ) - alias_expression: alias_operator: keyword: as naked_identifier: num_increases - keyword: one - keyword: row - keyword: per - keyword: match - keyword: after - keyword: match - keyword: skip - keyword: to - keyword: last - naked_identifier: row_with_price_increase - keyword: pattern - bracketed: start_bracket: ( pattern_expression: - naked_identifier: row_before_decrease - naked_identifier: row_with_price_decrease - sign_indicator: + - naked_identifier: row_with_price_increase - sign_indicator: + end_bracket: ) - keyword: define - naked_identifier: row_with_price_decrease - keyword: as - expression: column_reference: naked_identifier: price comparison_operator: raw_comparison_operator: < function: function_name: function_name_identifier: lag function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: price end_bracket: ) - comma: ',' - naked_identifier: row_with_price_increase - keyword: as - expression: column_reference: naked_identifier: price comparison_operator: raw_comparison_operator: '>' function: function_name: function_name_identifier: lag function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: price end_bracket: ) - end_bracket: ) orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: company - comma: ',' - column_reference: naked_identifier: match_number - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: price_date - comma: ',' - select_clause_element: column_reference: naked_identifier: match_number - comma: ',' - select_clause_element: column_reference: naked_identifier: msq - comma: ',' - select_clause_element: column_reference: naked_identifier: price - comma: ',' - select_clause_element: column_reference: naked_identifier: cl from_clause: keyword: from from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: stock_price_history where_clause: keyword: where expression: column_reference: naked_identifier: company comparison_operator: raw_comparison_operator: '=' quoted_literal: "'ABCD'" end_bracket: ) match_recognize_clause: keyword: match_recognize bracketed: - start_bracket: ( - orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: price_date - keyword: measures - expression: function: function_name: function_name_identifier: match_number function_contents: bracketed: start_bracket: ( end_bracket: ) - alias_expression: alias_operator: keyword: as quoted_identifier: '"MATCH_NUMBER"' - comma: ',' - expression: function: function_name: function_name_identifier: match_sequence_number function_contents: bracketed: start_bracket: ( end_bracket: ) - alias_expression: alias_operator: keyword: as naked_identifier: msq - comma: ',' - expression: function: function_name: function_name_identifier: classifier function_contents: bracketed: start_bracket: ( end_bracket: ) - alias_expression: alias_operator: keyword: as naked_identifier: cl - keyword: all - keyword: rows - keyword: per - keyword: match - keyword: pattern - bracketed: start_bracket: ( pattern_expression: - naked_identifier: any_row - naked_identifier: up - sign_indicator: + end_bracket: ) - keyword: define - naked_identifier: any_row - keyword: as - expression: boolean_literal: 'true' - comma: ',' - naked_identifier: up - keyword: as - expression: column_reference: naked_identifier: price comparison_operator: raw_comparison_operator: '>' function: function_name: function_name_identifier: lag function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: price end_bracket: ) - end_bracket: ) orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: match_number - comma: ',' - column_reference: naked_identifier: msq - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: stock_price_history match_recognize_clause: keyword: match_recognize bracketed: - start_bracket: ( - partitionby_clause: - keyword: partition - keyword: by - expression: column_reference: naked_identifier: company - orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: price_date - keyword: measures - expression: function: function_name: function_name_identifier: match_number function_contents: bracketed: start_bracket: ( end_bracket: ) - alias_expression: alias_operator: keyword: as quoted_identifier: '"MATCH_NUMBER"' - keyword: all - keyword: rows - keyword: per - keyword: match - keyword: omit - keyword: empty - keyword: matches - keyword: pattern - bracketed: start_bracket: ( pattern_expression: naked_identifier: overavg star: '*' end_bracket: ) - keyword: define - naked_identifier: overavg - keyword: as - expression: column_reference: naked_identifier: price comparison_operator: raw_comparison_operator: '>' function: function_name: function_name_identifier: avg function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: price end_bracket: ) over_clause: keyword: over bracketed: start_bracket: ( window_specification: frame_clause: - keyword: rows - keyword: between - keyword: unbounded - keyword: preceding - keyword: and - keyword: unbounded - keyword: following end_bracket: ) - end_bracket: ) orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: company - comma: ',' - column_reference: naked_identifier: price_date - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: stock_price_history match_recognize_clause: keyword: match_recognize bracketed: - start_bracket: ( - partitionby_clause: - keyword: partition - keyword: by - expression: column_reference: naked_identifier: company - orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: price_date - keyword: measures - expression: function: function_name: function_name_identifier: match_number function_contents: bracketed: start_bracket: ( end_bracket: ) - alias_expression: alias_operator: keyword: as quoted_identifier: '"MATCH_NUMBER"' - comma: ',' - expression: function: function_name: function_name_identifier: classifier function_contents: bracketed: start_bracket: ( end_bracket: ) - alias_expression: alias_operator: keyword: as naked_identifier: cl - keyword: all - keyword: rows - keyword: per - keyword: match - keyword: with - keyword: unmatched - keyword: rows - keyword: pattern - bracketed: start_bracket: ( pattern_expression: naked_identifier: overavg sign_indicator: + end_bracket: ) - keyword: define - naked_identifier: overavg - keyword: as - expression: column_reference: naked_identifier: price comparison_operator: raw_comparison_operator: '>' function: function_name: function_name_identifier: avg function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: price end_bracket: ) over_clause: keyword: over bracketed: start_bracket: ( window_specification: frame_clause: - keyword: rows - keyword: between - keyword: unbounded - keyword: preceding - keyword: and - keyword: unbounded - keyword: following end_bracket: ) - end_bracket: ) orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: company - comma: ',' - column_reference: naked_identifier: price_date - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: company - comma: ',' - select_clause_element: column_reference: naked_identifier: price_date - comma: ',' - select_clause_element: column_reference: naked_identifier: price - comma: ',' - select_clause_element: column_reference: quoted_identifier: '"FINAL FIRST(LT45.price)"' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '"FINAL LAST(LT45.price)"' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: stock_price_history match_recognize_clause: keyword: match_recognize bracketed: - start_bracket: ( - partitionby_clause: - keyword: partition - keyword: by - expression: column_reference: naked_identifier: company - orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: price_date - keyword: measures - keyword: final - expression: function: function_name: function_name_identifier: first function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: lt45 - dot: . - naked_identifier: price end_bracket: ) - alias_expression: alias_operator: keyword: as quoted_identifier: '"FINAL FIRST(LT45.price)"' - comma: ',' - keyword: final - expression: function: function_name: function_name_identifier: last function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: lt45 - dot: . - naked_identifier: price end_bracket: ) - alias_expression: alias_operator: keyword: as quoted_identifier: '"FINAL LAST(LT45.price)"' - keyword: all - keyword: rows - keyword: per - keyword: match - keyword: after - keyword: match - keyword: skip - keyword: past - keyword: last - keyword: row - keyword: pattern - bracketed: start_bracket: ( pattern_expression: - naked_identifier: lt45 - naked_identifier: lt45 end_bracket: ) - keyword: define - naked_identifier: lt45 - keyword: as - expression: column_reference: naked_identifier: price comparison_operator: raw_comparison_operator: < numeric_literal: '45.00' - end_bracket: ) where_clause: keyword: where expression: column_reference: naked_identifier: company comparison_operator: raw_comparison_operator: '=' quoted_literal: "'ABCD'" orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: price_date - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: stock_price_history match_recognize_clause: keyword: match_recognize bracketed: - start_bracket: ( - partitionby_clause: - keyword: partition - keyword: by - expression: column_reference: naked_identifier: company - orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: price_date - keyword: measures - expression: function: function_name: function_name_identifier: match_number function_contents: bracketed: start_bracket: ( end_bracket: ) - alias_expression: alias_operator: keyword: as naked_identifier: match_number - comma: ',' - expression: function: function_name: function_name_identifier: first function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: price_date end_bracket: ) - alias_expression: alias_operator: keyword: as naked_identifier: start_date - comma: ',' - expression: function: function_name: function_name_identifier: last function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: price_date end_bracket: ) - alias_expression: alias_operator: keyword: as naked_identifier: end_date - comma: ',' - expression: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) - alias_expression: alias_operator: keyword: as naked_identifier: rows_in_sequence - comma: ',' - expression: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( expression: naked_identifier: row_with_price_decrease dot: . star: '*' end_bracket: ) - alias_expression: alias_operator: keyword: as naked_identifier: num_decreases - comma: ',' - expression: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( expression: naked_identifier: row_with_price_increase dot: . star: '*' end_bracket: ) - alias_expression: alias_operator: keyword: as naked_identifier: num_increases - keyword: one - keyword: row - keyword: per - keyword: match - keyword: after - keyword: match - keyword: skip - keyword: to - keyword: last - naked_identifier: row_with_price_increase - keyword: pattern - bracketed: start_bracket: ( pattern_expression: - caret: ^ - naked_identifier: S1 - sign_indicator: + - naked_identifier: S2 - star: '*' - naked_identifier: S3 - question_mark: '?' - naked_identifier: S4 - start_curly_bracket: '{' - numeric_literal: '1' - end_curly_bracket: '}' - naked_identifier: S5 - start_curly_bracket: '{' - numeric_literal: '1' - comma: ',' - end_curly_bracket: '}' - naked_identifier: S6 - start_curly_bracket: '{' - comma: ',' - numeric_literal: '1' - end_curly_bracket: '}' - naked_identifier: S7 - start_curly_bracket: '{' - numeric_literal: '1' - comma: ',' - numeric_literal: '1' - end_curly_bracket: '}' - naked_identifier: S8 - star: '*' - question_mark: '?' - dollar: $ end_bracket: ) - keyword: define - naked_identifier: row_with_price_decrease - keyword: as - expression: column_reference: naked_identifier: price comparison_operator: raw_comparison_operator: < function: function_name: function_name_identifier: lag function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: price end_bracket: ) - comma: ',' - naked_identifier: row_with_price_increase - keyword: as - expression: column_reference: naked_identifier: price comparison_operator: raw_comparison_operator: '>' function: function_name: function_name_identifier: lag function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: price end_bracket: ) - end_bracket: ) orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: company - comma: ',' - column_reference: naked_identifier: match_number - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: stock_price_history match_recognize_clause: keyword: match_recognize bracketed: - start_bracket: ( - partitionby_clause: - keyword: partition - keyword: by - expression: column_reference: naked_identifier: company - orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: price_date - keyword: measures - expression: function: function_name: function_name_identifier: match_number function_contents: bracketed: start_bracket: ( end_bracket: ) - alias_expression: alias_operator: keyword: as naked_identifier: match_number - comma: ',' - expression: function: function_name: function_name_identifier: first function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: price_date end_bracket: ) - alias_expression: alias_operator: keyword: as naked_identifier: start_date - comma: ',' - expression: function: function_name: function_name_identifier: last function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: price_date end_bracket: ) - alias_expression: alias_operator: keyword: as naked_identifier: end_date - comma: ',' - expression: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) - alias_expression: alias_operator: keyword: as naked_identifier: rows_in_sequence - comma: ',' - expression: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( expression: naked_identifier: row_with_price_decrease dot: . star: '*' end_bracket: ) - alias_expression: alias_operator: keyword: as naked_identifier: num_decreases - comma: ',' - expression: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( expression: naked_identifier: row_with_price_increase dot: . star: '*' end_bracket: ) - alias_expression: alias_operator: keyword: as naked_identifier: num_increases - keyword: one - keyword: row - keyword: per - keyword: match - keyword: after - keyword: match - keyword: skip - keyword: to - keyword: last - naked_identifier: row_with_price_increase - keyword: pattern - bracketed: start_bracket: ( pattern_expression: - caret: ^ - bracketed: - start_bracket: ( - naked_identifier: S1 - binary_operator: pipe: '|' - naked_identifier: S2 - star: '*' - end_bracket: ) - question_mark: '?' - naked_identifier: S3 - keyword: PERMUTE - bracketed: - start_bracket: ( - naked_identifier: S4 - sign_indicator: + - comma: ',' - naked_identifier: S5 - star: '*' - question_mark: '?' - end_bracket: ) - bracketed: start_exclude_bracket: '{-' naked_identifier: S6 end_exclude_bracket: -} - sign_indicator: + - dollar: $ end_bracket: ) - keyword: define - naked_identifier: row_with_price_decrease - keyword: as - expression: column_reference: naked_identifier: price comparison_operator: raw_comparison_operator: < function: function_name: function_name_identifier: lag function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: price end_bracket: ) - comma: ',' - naked_identifier: row_with_price_increase - keyword: as - expression: column_reference: naked_identifier: price comparison_operator: raw_comparison_operator: '>' function: function_name: function_name_identifier: lag function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: price end_bracket: ) - end_bracket: ) orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: company - comma: ',' - column_reference: naked_identifier: match_number - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: stock_price_history match_recognize_clause: keyword: match_recognize bracketed: - start_bracket: ( - partitionby_clause: - keyword: partition - keyword: by - expression: column_reference: naked_identifier: company - orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: price_date - keyword: measures - expression: function: function_name: function_name_identifier: match_number function_contents: bracketed: start_bracket: ( end_bracket: ) - alias_expression: alias_operator: keyword: as naked_identifier: match_number - comma: ',' - expression: function: function_name: function_name_identifier: first function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: price_date end_bracket: ) - alias_expression: alias_operator: keyword: as naked_identifier: start_date - comma: ',' - expression: function: function_name: function_name_identifier: last function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: price_date end_bracket: ) - alias_expression: alias_operator: keyword: as naked_identifier: end_date - comma: ',' - expression: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) - alias_expression: alias_operator: keyword: as naked_identifier: rows_in_sequence - comma: ',' - expression: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( expression: naked_identifier: row_with_price_decrease dot: . star: '*' end_bracket: ) - alias_expression: alias_operator: keyword: as naked_identifier: num_decreases - comma: ',' - expression: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( expression: naked_identifier: row_with_price_increase dot: . star: '*' end_bracket: ) - alias_expression: alias_operator: keyword: as naked_identifier: num_increases - keyword: one - keyword: row - keyword: per - keyword: match - keyword: after - keyword: match - keyword: skip - keyword: to - keyword: last - naked_identifier: row_with_price_increase - keyword: pattern - bracketed: start_bracket: ( pattern_expression: bracketed: - start_bracket: ( - naked_identifier: A - bracketed: - start_exclude_bracket: '{-' - naked_identifier: B - sign_indicator: + - naked_identifier: C - sign_indicator: + - end_exclude_bracket: -} - naked_identifier: D - sign_indicator: + - end_bracket: ) end_bracket: ) - keyword: define - naked_identifier: row_with_price_decrease - keyword: as - expression: column_reference: naked_identifier: price comparison_operator: raw_comparison_operator: < function: function_name: function_name_identifier: lag function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: price end_bracket: ) - comma: ',' - naked_identifier: row_with_price_increase - keyword: as - expression: column_reference: naked_identifier: price comparison_operator: raw_comparison_operator: '>' function: function_name: function_name_identifier: lag function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: price end_bracket: ) - end_bracket: ) orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: company - comma: ',' - column_reference: naked_identifier: match_number - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: stock_price_history match_recognize_clause: keyword: match_recognize bracketed: - start_bracket: ( - partitionby_clause: - keyword: partition - keyword: by - expression: column_reference: naked_identifier: company - orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: price_date - keyword: measures - expression: function: function_name: function_name_identifier: match_number function_contents: bracketed: start_bracket: ( end_bracket: ) - alias_expression: alias_operator: keyword: as naked_identifier: match_number - comma: ',' - expression: function: function_name: function_name_identifier: first function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: price_date end_bracket: ) - alias_expression: alias_operator: keyword: as naked_identifier: start_date - comma: ',' - expression: function: function_name: function_name_identifier: last function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: price_date end_bracket: ) - alias_expression: alias_operator: keyword: as naked_identifier: end_date - comma: ',' - expression: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) - alias_expression: alias_operator: keyword: as naked_identifier: rows_in_sequence - comma: ',' - expression: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( expression: naked_identifier: row_with_price_decrease dot: . star: '*' end_bracket: ) - alias_expression: alias_operator: keyword: as naked_identifier: num_decreases - comma: ',' - expression: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( expression: naked_identifier: row_with_price_increase dot: . star: '*' end_bracket: ) - alias_expression: alias_operator: keyword: as naked_identifier: num_increases - keyword: one - keyword: row - keyword: per - keyword: match - keyword: after - keyword: match - keyword: skip - keyword: to - keyword: last - naked_identifier: row_with_price_increase - keyword: pattern - bracketed: start_bracket: ( pattern_expression: bracketed: - start_bracket: ( - naked_identifier: A - binary_operator: pipe: '|' - naked_identifier: B - end_bracket: ) start_curly_bracket: '{' numeric_literal: '5' end_curly_bracket: '}' naked_identifier: C sign_indicator: + end_bracket: ) - keyword: define - naked_identifier: row_with_price_decrease - keyword: as - expression: column_reference: naked_identifier: price comparison_operator: raw_comparison_operator: < function: function_name: function_name_identifier: lag function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: price end_bracket: ) - comma: ',' - naked_identifier: row_with_price_increase - keyword: as - expression: column_reference: naked_identifier: price comparison_operator: raw_comparison_operator: '>' function: function_name: function_name_identifier: lag function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: price end_bracket: ) - end_bracket: ) orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: company - comma: ',' - column_reference: naked_identifier: match_number - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/merge_into.sql000066400000000000000000000007561503426445100246400ustar00rootroot00000000000000ALTER TABLE xxxx.example_table MODIFY COLUMN employeeCode SET MASKING POLICY example_MASKING_POLICY; merge into target_table using source_table on target_table.id = source_table.id when matched then update set target_table.description = source_table.description; merge into t1 using t2 on t1.t1key = t2.t2key when matched and t2.marked = 1 then delete; merge into t1 using t2 on t1.t1key = t2.t2key when not matched and t2.marked = 1 then insert (marked) values (1); sqlfluff-3.4.2/test/fixtures/dialects/snowflake/merge_into.yml000066400000000000000000000112021503426445100246260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4fc9381cf19925291a0cb4b9a028ac88f1518d8d63395677b1d2a1cbfdd6da1a file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: xxxx - dot: . - naked_identifier: example_table - alter_table_table_column_action: - keyword: MODIFY - keyword: COLUMN - column_reference: naked_identifier: employeeCode - keyword: SET - keyword: MASKING - keyword: POLICY - function_name: function_name_identifier: example_MASKING_POLICY - statement_terminator: ; - statement: merge_statement: - keyword: merge - keyword: into - table_reference: naked_identifier: target_table - keyword: using - table_reference: naked_identifier: source_table - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: target_table - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: source_table - dot: . - naked_identifier: id - merge_match: merge_when_matched_clause: - keyword: when - keyword: matched - keyword: then - merge_update_clause: keyword: update set_clause_list: keyword: set set_clause: - column_reference: - naked_identifier: target_table - dot: . - naked_identifier: description - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: source_table - dot: . - naked_identifier: description - statement_terminator: ; - statement: merge_statement: - keyword: merge - keyword: into - table_reference: naked_identifier: t1 - keyword: using - table_reference: naked_identifier: t2 - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: t1key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: t2key - merge_match: merge_when_matched_clause: - keyword: when - keyword: matched - keyword: and - expression: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: marked comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - keyword: then - merge_delete_clause: keyword: delete - statement_terminator: ; - statement: merge_statement: - keyword: merge - keyword: into - table_reference: naked_identifier: t1 - keyword: using - table_reference: naked_identifier: t2 - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: t1key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: t2key - merge_match: merge_when_not_matched_clause: - keyword: when - keyword: not - keyword: matched - keyword: and - expression: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: marked comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - keyword: then - merge_insert_clause: keyword: insert bracketed: start_bracket: ( column_reference: naked_identifier: marked end_bracket: ) values_clause: keyword: values bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/multiple_shorthand_casts.sql000066400000000000000000000002041503426445100275760ustar00rootroot00000000000000select '1'::int::boolean as bool; update table_name set col1 = CURRENT_TIMESTAMP::TIMESTAMP_TZ, col2 = '1'::int::boolean ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/multiple_shorthand_casts.yml000066400000000000000000000036651503426445100276160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8cca7ae444f035b3ca4a0c7264b9723a14de8f6d69a4b7ed6b226545b6719b7e file: - statement: select_statement: select_clause: keyword: select select_clause_element: expression: cast_expression: - quoted_literal: "'1'" - casting_operator: '::' - data_type: data_type_identifier: int - casting_operator: '::' - data_type: data_type_identifier: boolean alias_expression: alias_operator: keyword: as naked_identifier: bool - statement_terminator: ; - statement: update_statement: keyword: update table_reference: naked_identifier: table_name set_clause_list: - keyword: set - set_clause: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' expression: cast_expression: bare_function: CURRENT_TIMESTAMP casting_operator: '::' data_type: data_type_identifier: TIMESTAMP_TZ - comma: ',' - set_clause: column_reference: naked_identifier: col2 comparison_operator: raw_comparison_operator: '=' expression: cast_expression: - quoted_literal: "'1'" - casting_operator: '::' - data_type: data_type_identifier: int - casting_operator: '::' - data_type: data_type_identifier: boolean - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/non_reserved_keywords.sql000066400000000000000000000002141503426445100271150ustar00rootroot00000000000000SELECT account FROM foo; CREATE TABLE IF NOT EXISTS table_name( organization VARCHAR ); with pivot as (select 1) select * from pivot; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/non_reserved_keywords.yml000066400000000000000000000037671503426445100271370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 239f6fcece51a5dbd7ce49cb7468efeb74f8a4d75d4daf47bd322281e85db407 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: account from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: table_name - bracketed: start_bracket: ( column_definition: naked_identifier: organization data_type: data_type_identifier: VARCHAR end_bracket: ) - statement_terminator: ; - statement: with_compound_statement: keyword: with common_table_expression: naked_identifier: pivot keyword: as bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' end_bracket: ) select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: pivot - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/object_literals.sql000066400000000000000000000001071503426445100256430ustar00rootroot00000000000000SELECT {'a': 1, 'b': 'foo', 'c': 4 + 5, 'd': some_column_ref} FROM foo sqlfluff-3.4.2/test/fixtures/dialects/snowflake/object_literals.yml000066400000000000000000000030161503426445100256470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 705d0818661a99e55855c94f1ed4b2467d30be5f7b1fde48cb4264311dfbd9c3 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: object_literal: - start_curly_bracket: '{' - object_literal_element: quoted_literal: "'a'" colon: ':' numeric_literal: '1' - comma: ',' - object_literal_element: - quoted_literal: "'b'" - colon: ':' - quoted_literal: "'foo'" - comma: ',' - object_literal_element: quoted_literal: "'c'" colon: ':' expression: - numeric_literal: '4' - binary_operator: + - numeric_literal: '5' - comma: ',' - object_literal_element: quoted_literal: "'d'" colon: ':' column_reference: naked_identifier: some_column_ref - end_curly_bracket: '}' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo sqlfluff-3.4.2/test/fixtures/dialects/snowflake/pivot.sql000066400000000000000000000026671503426445100236540ustar00rootroot00000000000000-- NB This is a pivot expression With and Alias. The alias should be parsed seperately to the pivot. SELECT * FROM my_tbl PIVOT (min(f_val) FOR f_id IN (1, 2)) AS f (a, b); SELECT * FROM my_tbl UNPIVOT (val FOR col_name IN (a, b)); SELECT * FROM my_tbl UNPIVOT INCLUDE NULLS (val FOR col_name IN (a, b)); SELECT * FROM my_tbl UNPIVOT EXCLUDE NULLS (val FOR col_name IN (a, b)); select * from table_a unpivot (a for b in (col_1, col_2, col_3)) unpivot (c for d in (col_a, col_b, col_c)) ; -- from Snowflake's PIVOT docs SELECT * FROM quarterly_sales PIVOT(SUM(amount) FOR quarter IN (ANY ORDER BY quarter)) ORDER BY empid; -- from Snowflake's PIVOT docs SELECT * FROM quarterly_sales PIVOT(SUM(amount) FOR quarter IN ( SELECT DISTINCT quarter FROM ad_campaign_types_by_quarter WHERE television = TRUE ORDER BY quarter) ) ORDER BY empid; -- from Snowflake's PIVOT docs SELECT * FROM quarterly_sales PIVOT(SUM(amount) FOR quarter IN ( '2023_Q1', '2023_Q2', '2023_Q3', '2023_Q4') ) AS p (empid_renamed, Q1, Q2, Q3, Q4) ORDER BY empid_renamed; -- from Snowflake's PIVOT docs SELECT * FROM quarterly_sales PIVOT(SUM(amount) FOR quarter IN ( '2023_Q1', '2023_Q2', '2023_Q3', '2023_Q4', '2024_Q1') DEFAULT ON NULL (0) ) ORDER BY empid; -- https://github.com/sqlfluff/sqlfluff/issues/5876 select * from to_pivot pivot(sum(val) for col in (any order by col)) order by id; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/pivot.yml000066400000000000000000000357411503426445100236550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 28448a1b8614cf12045b62dcc3677302ea04cc3e8264e7fffd2eafce3c96c703 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_tbl from_pivot_expression: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: min function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: f_val end_bracket: ) - keyword: FOR - naked_identifier: f_id - keyword: IN - bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: f bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_tbl from_unpivot_expression: keyword: UNPIVOT bracketed: - start_bracket: ( - naked_identifier: val - keyword: FOR - naked_identifier: col_name - keyword: IN - bracketed: - start_bracket: ( - naked_identifier: a - comma: ',' - naked_identifier: b - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_tbl from_unpivot_expression: - keyword: UNPIVOT - keyword: INCLUDE - keyword: NULLS - bracketed: - start_bracket: ( - naked_identifier: val - keyword: FOR - naked_identifier: col_name - keyword: IN - bracketed: - start_bracket: ( - naked_identifier: a - comma: ',' - naked_identifier: b - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_tbl from_unpivot_expression: - keyword: UNPIVOT - keyword: EXCLUDE - keyword: NULLS - bracketed: - start_bracket: ( - naked_identifier: val - keyword: FOR - naked_identifier: col_name - keyword: IN - bracketed: - start_bracket: ( - naked_identifier: a - comma: ',' - naked_identifier: b - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: table_a - from_unpivot_expression: keyword: unpivot bracketed: - start_bracket: ( - naked_identifier: a - keyword: for - naked_identifier: b - keyword: in - bracketed: - start_bracket: ( - naked_identifier: col_1 - comma: ',' - naked_identifier: col_2 - comma: ',' - naked_identifier: col_3 - end_bracket: ) - end_bracket: ) - from_unpivot_expression: keyword: unpivot bracketed: - start_bracket: ( - naked_identifier: c - keyword: for - naked_identifier: d - keyword: in - bracketed: - start_bracket: ( - naked_identifier: col_a - comma: ',' - naked_identifier: col_b - comma: ',' - naked_identifier: col_c - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: quarterly_sales from_pivot_expression: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: amount end_bracket: ) - keyword: FOR - naked_identifier: quarter - keyword: IN - bracketed: start_bracket: ( keyword: ANY orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: quarter end_bracket: ) - end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: empid - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: quarterly_sales from_pivot_expression: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: amount end_bracket: ) - keyword: FOR - naked_identifier: quarter - keyword: IN - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_modifier: keyword: DISTINCT select_clause_element: column_reference: naked_identifier: quarter from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: ad_campaign_types_by_quarter where_clause: keyword: WHERE expression: column_reference: naked_identifier: television comparison_operator: raw_comparison_operator: '=' boolean_literal: 'TRUE' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: quarter end_bracket: ) - end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: empid - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: quarterly_sales from_pivot_expression: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: amount end_bracket: ) - keyword: FOR - naked_identifier: quarter - keyword: IN - bracketed: - start_bracket: ( - quoted_literal: "'2023_Q1'" - comma: ',' - quoted_literal: "'2023_Q2'" - comma: ',' - quoted_literal: "'2023_Q3'" - comma: ',' - quoted_literal: "'2023_Q4'" - end_bracket: ) - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: p bracketed: start_bracket: ( identifier_list: - naked_identifier: empid_renamed - comma: ',' - naked_identifier: Q1 - comma: ',' - naked_identifier: Q2 - comma: ',' - naked_identifier: Q3 - comma: ',' - naked_identifier: Q4 end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: empid_renamed - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: quarterly_sales from_pivot_expression: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: amount end_bracket: ) - keyword: FOR - naked_identifier: quarter - keyword: IN - bracketed: - start_bracket: ( - quoted_literal: "'2023_Q1'" - comma: ',' - quoted_literal: "'2023_Q2'" - comma: ',' - quoted_literal: "'2023_Q3'" - comma: ',' - quoted_literal: "'2023_Q4'" - comma: ',' - quoted_literal: "'2024_Q1'" - end_bracket: ) - keyword: DEFAULT - keyword: 'ON' - keyword: 'NULL' - bracketed: start_bracket: ( numeric_literal: '0' end_bracket: ) - end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: empid - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: to_pivot from_pivot_expression: keyword: pivot bracketed: - start_bracket: ( - function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: val end_bracket: ) - keyword: for - naked_identifier: col - keyword: in - bracketed: start_bracket: ( keyword: any orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: col end_bracket: ) - end_bracket: ) orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: id - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/put_statement.sql000066400000000000000000000006551503426445100254020ustar00rootroot00000000000000put file:///tmp/data/mydata.csv @my_int_stage; put file:///tmp/data/orders_001.csv @%orderstiny_ext auto_compress=false; put file:///tmp/data/orders_*01.csv @%orderstiny_ext auto_compress=false; put file://c:\temp\data\mydata.csv @~ auto_compress=true; put file://c:\temp\data\mydata.csv @~ parallel=1; put file://c:\temp\data\mydata.csv @~ source_compression='auto_detect'; put file://c:\temp\data\mydata.csv @~ overwrite=true; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/put_statement.yml000066400000000000000000000043421503426445100254010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bae6678af8eb21430e9d1f396b6b314bb5c01f7be94905de0dca1ff2acd501d4 file: - statement: put_statement: keyword: put unquoted_file_path: file:///tmp/data/mydata.csv stage_path: '@my_int_stage' - statement_terminator: ; - statement: put_statement: - keyword: put - unquoted_file_path: file:///tmp/data/orders_001.csv - stage_path: '@%orderstiny_ext' - keyword: auto_compress - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'false' - statement_terminator: ; - statement: put_statement: - keyword: put - unquoted_file_path: file:///tmp/data/orders_*01.csv - stage_path: '@%orderstiny_ext' - keyword: auto_compress - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'false' - statement_terminator: ; - statement: put_statement: - keyword: put - unquoted_file_path: file://c:\temp\data\mydata.csv - stage_path: '@~' - keyword: auto_compress - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - statement_terminator: ; - statement: put_statement: - keyword: put - unquoted_file_path: file://c:\temp\data\mydata.csv - stage_path: '@~' - keyword: parallel - comparison_operator: raw_comparison_operator: '=' - integer_literal: '1' - statement_terminator: ; - statement: put_statement: - keyword: put - unquoted_file_path: file://c:\temp\data\mydata.csv - stage_path: '@~' - keyword: source_compression - comparison_operator: raw_comparison_operator: '=' - compression_type: "'auto_detect'" - statement_terminator: ; - statement: put_statement: - keyword: put - unquoted_file_path: file://c:\temp\data\mydata.csv - stage_path: '@~' - keyword: overwrite - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/qualify.sql000066400000000000000000000001521503426445100241500ustar00rootroot00000000000000select col1, col2 from some_table qualify row_number() over (partition by col1 order by col1) = 1 sqlfluff-3.4.2/test/fixtures/dialects/snowflake/qualify.yml000066400000000000000000000035141503426445100241570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ec88999dc71fbf852d9a640e8ffca25861598a686620a05994a083b91c15a87d file: statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: some_table qualify_clause: keyword: qualify expression: function: function_name: function_name_identifier: row_number function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: over bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: partition - keyword: by - expression: column_reference: naked_identifier: col1 orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: col1 end_bracket: ) comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' sqlfluff-3.4.2/test/fixtures/dialects/snowflake/qualify_union.sql000066400000000000000000000003361503426445100253640ustar00rootroot00000000000000select col1, col2 from some_table qualify row_number() over (partition by col1 order by col1) = 1 union all select col1, col2 from some_table qualify row_number() over (partition by col1 order by col1) = 1 sqlfluff-3.4.2/test/fixtures/dialects/snowflake/qualify_union.yml000066400000000000000000000070211503426445100253640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b8bd1f8892243194c2c7b59437daf47119352608fa0fed465f05dba50c0d5b7d file: statement: set_expression: - select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: some_table qualify_clause: keyword: qualify expression: function: function_name: function_name_identifier: row_number function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: over bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: partition - keyword: by - expression: column_reference: naked_identifier: col1 orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: col1 end_bracket: ) comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - set_operator: - keyword: union - keyword: all - select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: some_table qualify_clause: keyword: qualify expression: function: function_name: function_name_identifier: row_number function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: over bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: partition - keyword: by - expression: column_reference: naked_identifier: col1 orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: col1 end_bracket: ) comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' sqlfluff-3.4.2/test/fixtures/dialects/snowflake/remove_statement.sql000066400000000000000000000002631503426445100260620ustar00rootroot00000000000000rm @%mytable/myobject; rm @%mytable/myobject/; remove @mystage/path1/subpath2; remove @%orders; rm @~ pattern='.*jun.*'; REMOVE @foo.bar PATTERN = '\w'; RM @foo.foo PATTERN=$bar; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/remove_statement.yml000066400000000000000000000030021503426445100260560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d9ae71bdcceebb122c6f96be57f94d2ac34f69e866957c1ee56238c8709d83cd file: - statement: remove_statement: keyword: rm stage_path: '@%mytable/myobject' - statement_terminator: ; - statement: remove_statement: keyword: rm stage_path: '@%mytable/myobject/' - statement_terminator: ; - statement: remove_statement: keyword: remove stage_path: '@mystage/path1/subpath2' - statement_terminator: ; - statement: remove_statement: keyword: remove stage_path: '@%orders' - statement_terminator: ; - statement: remove_statement: - keyword: rm - stage_path: '@~' - keyword: pattern - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'.*jun.*'" - statement_terminator: ; - statement: remove_statement: - keyword: REMOVE - stage_path: '@foo.bar' - keyword: PATTERN - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'\\w'" - statement_terminator: ; - statement: remove_statement: - keyword: RM - stage_path: '@foo.foo' - keyword: PATTERN - comparison_operator: raw_comparison_operator: '=' - variable: $bar - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/return.sql000066400000000000000000000000511503426445100240130ustar00rootroot00000000000000begin select 1; select 2; return 5; end; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/return.yml000066400000000000000000000020321503426445100240160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 178ee6e8aa5e9d9c76c7df3e02bfe9331dff1cc658b964558210b72a4b1939e3 file: statement: scripting_block_statement: - keyword: begin - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '2' - statement_terminator: ; - statement: return_statement: keyword: return expression: numeric_literal: '5' - statement_terminator: ; - keyword: end statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/sample.sql000066400000000000000000000004561503426445100237660ustar00rootroot00000000000000-- https://github.com/sqlfluff/sqlfluff/issues/547 select * -- 20% sample from real_data sample (20) ; SET sample_size = 10; WITH dummy_data AS ( SELECT SEQ4() AS row_number FROM TABLE(GENERATOR(rowcount => 1000)) ORDER BY row_number ) SELECT * FROM dummy_data SAMPLE ($sample_size ROWS); sqlfluff-3.4.2/test/fixtures/dialects/snowflake/sample.yml000066400000000000000000000074461503426445100237760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8a00ae7ae766b2c3c8bb45b62cb098dc6f2b8a3d331e91e1545e7793fcd4da01 file: - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: real_data sample_expression: keyword: sample bracketed: start_bracket: ( numeric_literal: '20' end_bracket: ) - statement_terminator: ; - statement: set_statement: keyword: SET variable: sample_size comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '10' - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: dummy_data keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: SEQ4 function_contents: bracketed: start_bracket: ( end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: row_number from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: TABLE function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: GENERATOR function_contents: bracketed: start_bracket: ( snowflake_keyword_expression: parameter: rowcount parameter_assigner: => numeric_literal: '1000' end_bracket: ) end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: row_number end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dummy_data sample_expression: keyword: SAMPLE bracketed: start_bracket: ( variable: $sample_size keyword: ROWS end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/select.sql000066400000000000000000000006041503426445100237570ustar00rootroot00000000000000SELECT a FROM b; SELECT view FROM foo; SELECT view FROM case; SELECT issue FROM issue; SELECT customer_id, TRIM(value:cross) AS cross FROM my_table; SELECT customer_id FROM my_table cross join my_table2; select notify from foo; select coalesce(do.a, do.b) as value from delivery_override as do ; SELECT t.id , TRUE AS test FROM mytable t ORDER BY TRUE ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/select.yml000066400000000000000000000136451503426445100237720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7e91a615e51a9371171b3d61b6a68065aa569a024e7cd76ce5c91d2256b0f623 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: b - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: view from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: view from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: case - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: issue from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: issue - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: customer_id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: TRIM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: value semi_structured_expression: colon: ':' semi_structured_element: cross end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: cross from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: customer_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table join_clause: - keyword: cross - keyword: join - from_expression_element: table_expression: table_reference: naked_identifier: my_table2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: notify from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: coalesce function_contents: bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: do - dot: . - naked_identifier: a - comma: ',' - expression: column_reference: - naked_identifier: do - dot: . - naked_identifier: b - end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: value from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: delivery_override alias_expression: alias_operator: keyword: as naked_identifier: do - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: id - comma: ',' - select_clause_element: boolean_literal: 'TRUE' alias_expression: alias_operator: keyword: AS naked_identifier: test from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable alias_expression: naked_identifier: t orderby_clause: - keyword: ORDER - keyword: BY - boolean_literal: 'TRUE' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/select_asof_join.sql000066400000000000000000000022751503426445100260140ustar00rootroot00000000000000SELECT t.stock_symbol, t.trade_time, t.quantity, q.quote_time, q.price FROM trades t ASOF JOIN quotes q MATCH_CONDITION(t.trade_time >= quote_time) ON t.stock_symbol=q.stock_symbol ORDER BY t.stock_symbol; SELECT t.stock_symbol, c.company_name, t.trade_time, t.quantity, q.quote_time, q.price FROM trades t ASOF JOIN quotes q MATCH_CONDITION(t.trade_time >= quote_time) ON t.stock_symbol=q.stock_symbol INNER JOIN companies c ON c.stock_symbol=t.stock_symbol ORDER BY t.stock_symbol; SELECT * FROM trades_unixtime tu ASOF JOIN quotes_unixtime qu MATCH_CONDITION(tu.trade_time>=qu.quote_time); SELECT * FROM preciptime p ASOF JOIN snowtime s MATCH_CONDITION(p.observed>=s.observed); SELECT * FROM snowtime s ASOF JOIN raintime r MATCH_CONDITION(s.observed>=r.observed) ON s.state=r.state ASOF JOIN preciptime p MATCH_CONDITION(s.observed>=p.observed) ON s.state=p.state ORDER BY s.observed; SELECT * FROM snowtime s ASOF JOIN raintime r MATCH_CONDITION(s.observed>r.observed) ON s.state=r.state ASOF JOIN preciptime p MATCH_CONDITION(s.observed' - raw_comparison_operator: '=' - column_reference: naked_identifier: quote_time end_bracket: ) - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: stock_symbol - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: q - dot: . - naked_identifier: stock_symbol orderby_clause: - keyword: ORDER - keyword: BY - column_reference: - naked_identifier: t - dot: . - naked_identifier: stock_symbol - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: stock_symbol - comma: ',' - select_clause_element: column_reference: - naked_identifier: c - dot: . - naked_identifier: company_name - comma: ',' - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: trade_time - comma: ',' - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: quantity - comma: ',' - select_clause_element: column_reference: - naked_identifier: q - dot: . - naked_identifier: quote_time - comma: ',' - select_clause_element: column_reference: - naked_identifier: q - dot: . - naked_identifier: price from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: trades alias_expression: naked_identifier: t - join_clause: - keyword: ASOF - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: quotes alias_expression: naked_identifier: q - match_condition: keyword: MATCH_CONDITION bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: trade_time - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - column_reference: naked_identifier: quote_time end_bracket: ) - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: stock_symbol - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: q - dot: . - naked_identifier: stock_symbol - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: companies alias_expression: naked_identifier: c - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: c - dot: . - naked_identifier: stock_symbol - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t - dot: . - naked_identifier: stock_symbol orderby_clause: - keyword: ORDER - keyword: BY - column_reference: - naked_identifier: t - dot: . - naked_identifier: stock_symbol - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: trades_unixtime alias_expression: naked_identifier: tu join_clause: - keyword: ASOF - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: quotes_unixtime alias_expression: naked_identifier: qu - match_condition: keyword: MATCH_CONDITION bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: tu - dot: . - naked_identifier: trade_time - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - column_reference: - naked_identifier: qu - dot: . - naked_identifier: quote_time end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: preciptime alias_expression: naked_identifier: p join_clause: - keyword: ASOF - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: snowtime alias_expression: naked_identifier: s - match_condition: keyword: MATCH_CONDITION bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: p - dot: . - naked_identifier: observed - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - column_reference: - naked_identifier: s - dot: . - naked_identifier: observed end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: snowtime alias_expression: naked_identifier: s - join_clause: - keyword: ASOF - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: raintime alias_expression: naked_identifier: r - match_condition: keyword: MATCH_CONDITION bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: s - dot: . - naked_identifier: observed - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - column_reference: - naked_identifier: r - dot: . - naked_identifier: observed end_bracket: ) - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: s - dot: . - naked_identifier: state - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: r - dot: . - naked_identifier: state - join_clause: - keyword: ASOF - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: preciptime alias_expression: naked_identifier: p - match_condition: keyword: MATCH_CONDITION bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: s - dot: . - naked_identifier: observed - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - column_reference: - naked_identifier: p - dot: . - naked_identifier: observed end_bracket: ) - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: s - dot: . - naked_identifier: state - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: p - dot: . - naked_identifier: state orderby_clause: - keyword: ORDER - keyword: BY - column_reference: - naked_identifier: s - dot: . - naked_identifier: observed - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: snowtime alias_expression: naked_identifier: s - join_clause: - keyword: ASOF - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: raintime alias_expression: naked_identifier: r - match_condition: keyword: MATCH_CONDITION bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: s - dot: . - naked_identifier: observed - comparison_operator: raw_comparison_operator: '>' - column_reference: - naked_identifier: r - dot: . - naked_identifier: observed end_bracket: ) - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: s - dot: . - naked_identifier: state - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: r - dot: . - naked_identifier: state - join_clause: - keyword: ASOF - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: preciptime alias_expression: naked_identifier: p - match_condition: keyword: MATCH_CONDITION bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: s - dot: . - naked_identifier: observed - comparison_operator: raw_comparison_operator: < - column_reference: - naked_identifier: p - dot: . - naked_identifier: observed end_bracket: ) - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: s - dot: . - naked_identifier: state - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: p - dot: . - naked_identifier: state orderby_clause: - keyword: ORDER - keyword: BY - column_reference: - naked_identifier: s - dot: . - naked_identifier: observed - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/select_clause_modifiers.sql000066400000000000000000000007301503426445100273540ustar00rootroot00000000000000SELECT *, col1, col2, my_table.col1, my_table.* FROM my_table; SELECT DISTINCT * FROM my_table; SELECT DISTINCT col1 FROM my_table; SELECT ALL my_table.* FROM my_table; SELECT TOP 1 * FROM my_table; SELECT TOP 2 col1 FROM my_table; SELECT TOP 3 col1, my_table.* FROM my_table; SELECT ALL TOP 10 col1 FROM my_table; SELECT DISTINCT TOP 20 my_table.col1 FROM my_table; SELECT DISTINCT TOP 30 * FROM my_table; SELECT DISTINCT TOP 40 col1, my_table.* FROM my_table; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/select_clause_modifiers.yml000066400000000000000000000155031503426445100273620ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8258a3eb788c43beddbbe82c824dffe3e0ea9a0d6bd048ea3f6760a9ade2d1fe file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: wildcard_expression: wildcard_identifier: star: '*' - comma: ',' - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 - comma: ',' - select_clause_element: column_reference: - naked_identifier: my_table - dot: . - naked_identifier: col1 - comma: ',' - select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: my_table dot: . star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_modifier: keyword: DISTINCT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_modifier: keyword: DISTINCT select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_modifier: keyword: ALL select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: my_table dot: . star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_modifier: keyword: TOP numeric_literal: '1' select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_modifier: keyword: TOP numeric_literal: '2' select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: keyword: TOP numeric_literal: '3' - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: my_table dot: . star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_modifier: - keyword: ALL - keyword: TOP - numeric_literal: '10' select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_modifier: - keyword: DISTINCT - keyword: TOP - numeric_literal: '20' select_clause_element: column_reference: - naked_identifier: my_table - dot: . - naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_modifier: - keyword: DISTINCT - keyword: TOP - numeric_literal: '30' select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: - keyword: DISTINCT - keyword: TOP - numeric_literal: '40' - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: my_table dot: . star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/select_except.sql000066400000000000000000000000631503426445100253260ustar00rootroot00000000000000select * from table1 EXCEPT (select * from table1) sqlfluff-3.4.2/test/fixtures/dialects/snowflake/select_except.yml000066400000000000000000000025661503426445100253420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1168e16f50bc0b751155f77f5d7ab6479913404503c8886bd22653cf4fa2daf1 file: statement: set_expression: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 set_operator: keyword: EXCEPT bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/snowflake/select_exclude.sql000066400000000000000000000002471503426445100254730ustar00rootroot00000000000000select * exclude col1 from table1; select * exclude (col1) from table1; select * exclude (col1, col2) from table1; select * exclude (col1, col2, coln) from table1; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/select_exclude.yml000066400000000000000000000056551503426445100255050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 715ebad5a3924bf92c80d7bcc4d1a2116ab81a6115381e685b077c9b22f2a302 file: - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_exclude_clause: keyword: exclude naked_identifier: col1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_exclude_clause: keyword: exclude bracketed: start_bracket: ( naked_identifier: col1 end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_exclude_clause: keyword: exclude bracketed: - start_bracket: ( - naked_identifier: col1 - comma: ',' - naked_identifier: col2 - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_exclude_clause: keyword: exclude bracketed: - start_bracket: ( - naked_identifier: col1 - comma: ',' - naked_identifier: col2 - comma: ',' - naked_identifier: coln - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/select_exclude_rename.sql000066400000000000000000000003411503426445100270150ustar00rootroot00000000000000select * exclude col1 rename (col1 as alias1, col2 as alias2) from table1; select * exclude (col1, col2) rename col1 as alias1 from table1; select * exclude (col1, col2) rename (col1 as alias1, col2 as alias2) from table1; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/select_exclude_rename.yml000066400000000000000000000062731503426445100270310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ce51f7f9bc723be4c5e395ec41a9b20c7d1a314b704226037000c5e3693618cb file: - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_exclude_clause: keyword: exclude naked_identifier: col1 select_rename_clause: keyword: rename bracketed: - start_bracket: ( - naked_identifier: col1 - keyword: as - naked_identifier: alias1 - comma: ',' - naked_identifier: col2 - keyword: as - naked_identifier: alias2 - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_exclude_clause: keyword: exclude bracketed: - start_bracket: ( - naked_identifier: col1 - comma: ',' - naked_identifier: col2 - end_bracket: ) select_rename_clause: - keyword: rename - naked_identifier: col1 - keyword: as - naked_identifier: alias1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_exclude_clause: keyword: exclude bracketed: - start_bracket: ( - naked_identifier: col1 - comma: ',' - naked_identifier: col2 - end_bracket: ) select_rename_clause: keyword: rename bracketed: - start_bracket: ( - naked_identifier: col1 - keyword: as - naked_identifier: alias1 - comma: ',' - naked_identifier: col2 - keyword: as - naked_identifier: alias2 - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/select_group_by_cube_rollup.sql000066400000000000000000000003771503426445100302670ustar00rootroot00000000000000-- CUBE within GROUP BY clause SELECT name, age, count(*) AS record_count FROM people GROUP BY CUBE (name, age); -- ROLLUP within GROUP BY clause SELECT name, age, count(*) AS record_count FROM people GROUP BY ROLLUP (name, age); sqlfluff-3.4.2/test/fixtures/dialects/snowflake/select_group_by_cube_rollup.yml000066400000000000000000000054561503426445100302740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 935928ae7e17c4455f72f492823e264a5128c2ebc46d3a981db139869aba2f38 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: record_count from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: people groupby_clause: - keyword: GROUP - keyword: BY - keyword: CUBE - bracketed: - start_bracket: ( - column_reference: naked_identifier: name - comma: ',' - column_reference: naked_identifier: age - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: record_count from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: people groupby_clause: - keyword: GROUP - keyword: BY - keyword: ROLLUP - bracketed: - start_bracket: ( - column_reference: naked_identifier: name - comma: ',' - column_reference: naked_identifier: age - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/select_grouping_sets.sql000066400000000000000000000004601503426445100267270ustar00rootroot00000000000000SELECT foo, bar FROM baz GROUP BY GROUPING SETS (foo, bar); select count(*), medical_license, radio_license from nurses group by grouping sets (medical_license, radio_license); select count(*), medical_license, radio_license from nurses group by grouping sets (medical_license, radio_license); sqlfluff-3.4.2/test/fixtures/dialects/snowflake/select_grouping_sets.yml000066400000000000000000000070021503426445100267300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3ebb65b3bbf3f5f2924ad469ba97ad940ac89d440b90b03acd69eca3cb6b3aba file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: foo - comma: ',' - select_clause_element: column_reference: naked_identifier: bar from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: baz groupby_clause: - keyword: GROUP - keyword: BY - keyword: GROUPING - keyword: SETS - bracketed: - start_bracket: ( - column_reference: naked_identifier: foo - comma: ',' - column_reference: naked_identifier: bar - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) - comma: ',' - select_clause_element: column_reference: naked_identifier: medical_license - comma: ',' - select_clause_element: column_reference: naked_identifier: radio_license from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: nurses groupby_clause: - keyword: group - keyword: by - keyword: grouping - keyword: sets - bracketed: - start_bracket: ( - column_reference: naked_identifier: medical_license - comma: ',' - column_reference: naked_identifier: radio_license - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) - comma: ',' - select_clause_element: column_reference: naked_identifier: medical_license - comma: ',' - select_clause_element: column_reference: naked_identifier: radio_license from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: nurses groupby_clause: - keyword: group - keyword: by - keyword: grouping - keyword: sets - bracketed: - start_bracket: ( - column_reference: naked_identifier: medical_license - comma: ',' - column_reference: naked_identifier: radio_license - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/select_higher_order_function.sql000066400000000000000000000010261503426445100304040ustar00rootroot00000000000000SELECT FILTER(ident, i -> i:value > 0) as sample_filter, TRANSFORM(ident, j -> j:value) as sample_transform, TRANSFORM(ident, k variant -> k:val) as sample_transform_with_type FROM ref; SELECT FILTER("ident", (i INT, j VARIANT) -> (i:value is not null and j:value = 'some_literal')) as sample_filter, TRANSFORM("ident", j -> j) as sample_transform, some_other_function('unusual arguments', x -> 'still a lambda expression', true) as sample_other FROM ref; SELECT REDUCE([1,2,3], 0, (acc, val) -> acc + val); sqlfluff-3.4.2/test/fixtures/dialects/snowflake/select_higher_order_function.yml000066400000000000000000000206061503426445100304130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d2b1ef7069b1ce9b4ec653bff0e7ce75be9e473529914583cb114852a6f13d1b file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: FILTER function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: ident comma: ',' lambda_function: parameter: i lambda_arrow: -> expression: column_reference: naked_identifier: i semi_structured_expression: colon: ':' semi_structured_element: value comparison_operator: raw_comparison_operator: '>' numeric_literal: '0' end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: sample_filter - comma: ',' - select_clause_element: function: function_name: function_name_identifier: TRANSFORM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: ident comma: ',' lambda_function: parameter: j lambda_arrow: -> expression: column_reference: naked_identifier: j semi_structured_expression: colon: ':' semi_structured_element: value end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: sample_transform - comma: ',' - select_clause_element: function: function_name: function_name_identifier: TRANSFORM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: ident comma: ',' lambda_function: parameter: k data_type: data_type_identifier: variant lambda_arrow: -> expression: column_reference: naked_identifier: k semi_structured_expression: colon: ':' semi_structured_element: val end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: sample_transform_with_type from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: ref - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: FILTER function_contents: bracketed: start_bracket: ( expression: column_reference: quoted_identifier: '"ident"' comma: ',' lambda_function: bracketed: - start_bracket: ( - parameter: i - data_type: data_type_identifier: INT - comma: ',' - parameter: j - data_type: data_type_identifier: VARIANT - end_bracket: ) lambda_arrow: -> expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: i - semi_structured_expression: colon: ':' semi_structured_element: value - keyword: is - keyword: not - null_literal: 'null' - binary_operator: and - column_reference: naked_identifier: j - semi_structured_expression: colon: ':' semi_structured_element: value - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'some_literal'" end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: sample_filter - comma: ',' - select_clause_element: function: function_name: function_name_identifier: TRANSFORM function_contents: bracketed: start_bracket: ( expression: column_reference: quoted_identifier: '"ident"' comma: ',' lambda_function: parameter: j lambda_arrow: -> expression: column_reference: naked_identifier: j end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: sample_transform - comma: ',' - select_clause_element: function: function_name: function_name_identifier: some_other_function function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'unusual arguments'" - comma: ',' - lambda_function: parameter: x lambda_arrow: -> expression: quoted_literal: "'still a lambda expression'" - comma: ',' - expression: boolean_literal: 'true' - end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: sample_other from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: ref - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: REDUCE function_contents: bracketed: - start_bracket: ( - expression: array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - end_square_bracket: ']' - comma: ',' - expression: numeric_literal: '0' - comma: ',' - lambda_function: bracketed: - start_bracket: ( - parameter: acc - comma: ',' - parameter: val - end_bracket: ) lambda_arrow: -> expression: - column_reference: naked_identifier: acc - binary_operator: + - column_reference: naked_identifier: val - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/select_like_clause.sql000066400000000000000000000013341503426445100263200ustar00rootroot00000000000000SELECT a, b FROM person WHERE name LIKE 'M%'; SELECT a, b FROM person WHERE name NOT ILIKE 'M_ry'; SELECT a, b FROM person WHERE name RLIKE 'M+'; SELECT a, b FROM person WHERE name REGEXP 'M+'; SELECT a, b FROM person WHERE name LIKE '%$_%' ESCAPE '$'; SELECT a, b FROM person WHERE name LIKE ANY ('%an%', '%an'); SELECT a, b FROM person WHERE name NOT LIKE ANY ('%an%', '%an'); SELECT a, b FROM person WHERE name LIKE ALL ('%an%', '%an'); SELECT a, b FROM person WHERE name NOT LIKE ALL ('%an%', '%an'); SELECT a, b FROM person WHERE name ILIKE ANY ('%an%', '%an'); SELECT a, b FROM person WHERE name NOT ILIKE ANY ('%an%', '%an'); sqlfluff-3.4.2/test/fixtures/dialects/snowflake/select_like_clause.yml000066400000000000000000000217031503426445100263240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9dbd50db9217ce81f466c2e38d9e4091da4e8b7c1f830aac2c2edb5c404e1002 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: column_reference: naked_identifier: name keyword: LIKE quoted_literal: "'M%'" - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: naked_identifier: name - keyword: NOT - keyword: ILIKE - quoted_literal: "'M_ry'" - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: column_reference: naked_identifier: name keyword: RLIKE quoted_literal: "'M+'" - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: column_reference: naked_identifier: name keyword: REGEXP quoted_literal: "'M+'" - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: naked_identifier: name - keyword: LIKE - quoted_literal: "'%$_%'" - keyword: ESCAPE - quoted_literal: "'$'" - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: naked_identifier: name - keyword: LIKE - keyword: ANY - bracketed: - start_bracket: ( - quoted_literal: "'%an%'" - comma: ',' - quoted_literal: "'%an'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: naked_identifier: name - keyword: NOT - keyword: LIKE - keyword: ANY - bracketed: - start_bracket: ( - quoted_literal: "'%an%'" - comma: ',' - quoted_literal: "'%an'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: naked_identifier: name - keyword: LIKE - keyword: ALL - bracketed: - start_bracket: ( - quoted_literal: "'%an%'" - comma: ',' - quoted_literal: "'%an'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: naked_identifier: name - keyword: NOT - keyword: LIKE - keyword: ALL - bracketed: - start_bracket: ( - quoted_literal: "'%an%'" - comma: ',' - quoted_literal: "'%an'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: naked_identifier: name - keyword: ILIKE - keyword: ANY - bracketed: - start_bracket: ( - quoted_literal: "'%an%'" - comma: ',' - quoted_literal: "'%an'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: naked_identifier: name - keyword: NOT - keyword: ILIKE - keyword: ANY - bracketed: - start_bracket: ( - quoted_literal: "'%an%'" - comma: ',' - quoted_literal: "'%an'" - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/select_rename.sql000066400000000000000000000002301503426445100253010ustar00rootroot00000000000000select * rename col1 as alias from table1; select * rename (col1 as alias) from table1; select * rename (col1 as alias1, col2 as alias2) from table1; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/select_rename.yml000066400000000000000000000047031503426445100253140ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 87de1a02545a55820d8cdf2dbcd84308a29eb689f10d14667be31c8c55f41872 file: - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_rename_clause: - keyword: rename - naked_identifier: col1 - keyword: as - naked_identifier: alias from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_rename_clause: keyword: rename bracketed: - start_bracket: ( - naked_identifier: col1 - keyword: as - naked_identifier: alias - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_rename_clause: keyword: rename bracketed: - start_bracket: ( - naked_identifier: col1 - keyword: as - naked_identifier: alias1 - comma: ',' - naked_identifier: col2 - keyword: as - naked_identifier: alias2 - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/select_replace.sql000066400000000000000000000002441503426445100254520ustar00rootroot00000000000000select * replace ('DEPT-' || department_id as department_id) from table1; select * replace ('prefix1' || col1 as alias1, 'prefix2' || col2 as alias2) from table1; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/select_replace.yml000066400000000000000000000047701503426445100254640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0a4c4ecf1700c3cea499d37cf09386b137b97a2a777424732585c202f759c9df file: - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_replace_clause: keyword: replace bracketed: start_bracket: ( expression: quoted_literal: "'DEPT-'" binary_operator: - pipe: '|' - pipe: '|' column_reference: naked_identifier: department_id keyword: as naked_identifier: department_id end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_replace_clause: keyword: replace bracketed: - start_bracket: ( - expression: quoted_literal: "'prefix1'" binary_operator: - pipe: '|' - pipe: '|' column_reference: naked_identifier: col1 - keyword: as - naked_identifier: alias1 - comma: ',' - expression: quoted_literal: "'prefix2'" binary_operator: - pipe: '|' - pipe: '|' column_reference: naked_identifier: col2 - keyword: as - naked_identifier: alias2 - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/select_stages_files.sql000066400000000000000000000005501503426445100265070ustar00rootroot00000000000000SELECT t.$1, t.$2 FROM @mystage1 (file_format => myformat) t ; select t.$1, t.$2 from @mystage1 (file_format => 'myformat', pattern=>'.*data.*[.]csv.gz') t; select t.$1, t.$2 from @mystage1 (pattern=>'.*data.*[.]csv.gz', file_format => 'myformat') t; select t.$1, t.$2 from @mystage1 (pattern=>'.*data.*[.]csv.gz') t; select t.$1, t.$2 from @mystage1 t; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/select_stages_files.yml000066400000000000000000000117551503426445100265220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 314e7e8442d63152ce4e97805c9f271aec8d2019510fb7489827ebd9739837ab file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: t dot: . column_index_identifier_segment: $1 - comma: ',' - select_clause_element: column_reference: naked_identifier: t dot: . column_index_identifier_segment: $2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: stage_path: '@mystage1' bracketed: start_bracket: ( keyword: file_format parameter_assigner: => file_format_segment: object_reference: naked_identifier: myformat end_bracket: ) alias_expression: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: t dot: . column_index_identifier_segment: $1 - comma: ',' - select_clause_element: column_reference: naked_identifier: t dot: . column_index_identifier_segment: $2 from_clause: keyword: from from_expression: from_expression_element: table_expression: stage_path: '@mystage1' bracketed: - start_bracket: ( - keyword: file_format - parameter_assigner: => - file_format_segment: quoted_literal: "'myformat'" - comma: ',' - keyword: pattern - parameter_assigner: => - quoted_literal: "'.*data.*[.]csv.gz'" - end_bracket: ) alias_expression: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: t dot: . column_index_identifier_segment: $1 - comma: ',' - select_clause_element: column_reference: naked_identifier: t dot: . column_index_identifier_segment: $2 from_clause: keyword: from from_expression: from_expression_element: table_expression: stage_path: '@mystage1' bracketed: - start_bracket: ( - keyword: pattern - parameter_assigner: => - quoted_literal: "'.*data.*[.]csv.gz'" - comma: ',' - keyword: file_format - parameter_assigner: => - file_format_segment: quoted_literal: "'myformat'" - end_bracket: ) alias_expression: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: t dot: . column_index_identifier_segment: $1 - comma: ',' - select_clause_element: column_reference: naked_identifier: t dot: . column_index_identifier_segment: $2 from_clause: keyword: from from_expression: from_expression_element: table_expression: stage_path: '@mystage1' bracketed: start_bracket: ( keyword: pattern parameter_assigner: => quoted_literal: "'.*data.*[.]csv.gz'" end_bracket: ) alias_expression: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: t dot: . column_index_identifier_segment: $1 - comma: ',' - select_clause_element: column_reference: naked_identifier: t dot: . column_index_identifier_segment: $2 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: stage_path: '@mystage1' alias_expression: naked_identifier: t - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/select_system_function.sql000066400000000000000000000002451503426445100272710ustar00rootroot00000000000000SELECT SYSTEM$STREAM_HAS_DATA('SCH.MY_STREAM'); SELECT SYSTEM$USER_TASK_CANCEL_ONGOING_EXECUTIONS('MY_TASK'); SELECT SYSTEM$SHOW_STREAMLITS_IN_ACCOUNT(true, false); sqlfluff-3.4.2/test/fixtures/dialects/snowflake/select_system_function.yml000066400000000000000000000026061503426445100272760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6284f2f98c2749b670977fcba03ddd2d813d020225a0b34b8521beca19a95110 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: system_function_name: SYSTEM$STREAM_HAS_DATA bracketed: start_bracket: ( quoted_literal: "'SCH.MY_STREAM'" end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: system_function_name: SYSTEM$USER_TASK_CANCEL_ONGOING_EXECUTIONS bracketed: start_bracket: ( quoted_literal: "'MY_TASK'" end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: system_function_name: SYSTEM$SHOW_STREAMLITS_IN_ACCOUNT bracketed: - start_bracket: ( - boolean_literal: 'true' - comma: ',' - boolean_literal: 'false' - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/select_transient_table.sql000066400000000000000000000000771503426445100272210ustar00rootroot00000000000000CREATE OR REPLACE TRANSIENT TABLE new_tab AS SELECT * FROM tab sqlfluff-3.4.2/test/fixtures/dialects/snowflake/select_transient_table.yml000066400000000000000000000020111503426445100272110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6ff289a22d04fe83a31450eace665ffa9db6ea0bdb1e5f3fc987512a00b33296 file: statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TRANSIENT - keyword: TABLE - table_reference: naked_identifier: new_tab - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tab sqlfluff-3.4.2/test/fixtures/dialects/snowflake/select_union.sql000066400000000000000000000002131503426445100251630ustar00rootroot00000000000000SELECT 1 UNION SELECT 2 ORDER BY 1 ; SELECT 1 AS foo UNION ALL BY NAME SELECT 2 AS foo ; SELECT 1 AS foo UNION BY NAME SELECT 2 AS foo ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/select_union.yml000066400000000000000000000042651503426445100252000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e1ab69b4177cfa5b47b9f671f10058fa9845ed715ec16860b26973e9493bdf33 file: - statement: set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - set_operator: keyword: UNION - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '2' - orderby_clause: - keyword: ORDER - keyword: BY - numeric_literal: '1' - statement_terminator: ; - statement: set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: AS naked_identifier: foo - set_operator: - keyword: UNION - keyword: ALL - keyword: BY - keyword: NAME - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '2' alias_expression: alias_operator: keyword: AS naked_identifier: foo - statement_terminator: ; - statement: set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: AS naked_identifier: foo - set_operator: - keyword: UNION - keyword: BY - keyword: NAME - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '2' alias_expression: alias_operator: keyword: AS naked_identifier: foo - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/select_values.sql000066400000000000000000000001711503426445100253350ustar00rootroot00000000000000select * from (values (1, 'one'), (2, 'two'), (3, 'three')); select * from values (1, 'one'), (2, 'two'), (3, 'three'); sqlfluff-3.4.2/test/fixtures/dialects/snowflake/select_values.yml000066400000000000000000000060231503426445100253410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ab33181187116aa89a4c02f261cbc2b4cc3c8cf256f3ea054bb772b98b87b134 file: - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'one'" - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '2' - comma: ',' - expression: quoted_literal: "'two'" - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: quoted_literal: "'three'" - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'one'" - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '2' - comma: ',' - expression: quoted_literal: "'two'" - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: quoted_literal: "'three'" - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/select_where_is_distinct_from.sql000066400000000000000000000001751503426445100305730ustar00rootroot00000000000000SELECT a, b FROM person where a IS DISTINCT FROM b; SELECT a, b FROM person where a IS NOT DISTINCT FROM b; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/select_where_is_distinct_from.yml000066400000000000000000000036371503426445100306030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 41fdee8d74dfd8b584cfc398396a214dbcc8f09be2edf68e0fdfe2332a0ebfa3 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: where expression: - column_reference: naked_identifier: a - keyword: IS - keyword: DISTINCT - keyword: FROM - column_reference: naked_identifier: b - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: where expression: - column_reference: naked_identifier: a - keyword: IS - keyword: NOT - keyword: DISTINCT - keyword: FROM - column_reference: naked_identifier: b - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/semi_structured.sql000066400000000000000000000010301503426445100257130ustar00rootroot00000000000000-- tests parsing of table functions and semi structured accessing. SELECT ticket_id, value:value AS uncasted, value:id::bigint AS field_id, value:value::STRING AS field_val, value:thing[4].foo AS another_val, value:thing[4].bar.baz[0].foo::bigint AS another_val, array_field[0].array_element_property as test_array_access FROM raw_tickets, lateral flatten(INPUT => custom_fields); SELECT value:point:from:latitude::NUMBER(10, 6) AS lat, value:point:from:longitude::NUMBER(10, 6) AS lng FROM table1; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/semi_structured.yml000066400000000000000000000157011503426445100257270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0fe13d614c4812dcff87c863d704cb3a6222971088f10e4ddde48f8c217d72f8 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: ticket_id - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: value semi_structured_expression: colon: ':' semi_structured_element: value alias_expression: alias_operator: keyword: AS naked_identifier: uncasted - comma: ',' - select_clause_element: expression: cast_expression: column_reference: naked_identifier: value semi_structured_expression: colon: ':' semi_structured_element: id casting_operator: '::' data_type: data_type_identifier: bigint alias_expression: alias_operator: keyword: AS naked_identifier: field_id - comma: ',' - select_clause_element: expression: cast_expression: column_reference: naked_identifier: value semi_structured_expression: colon: ':' semi_structured_element: value casting_operator: '::' data_type: data_type_identifier: STRING alias_expression: alias_operator: keyword: AS naked_identifier: field_val - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: value semi_structured_expression: - colon: ':' - semi_structured_element: thing - array_accessor: start_square_bracket: '[' numeric_literal: '4' end_square_bracket: ']' - dot: . - semi_structured_element: foo alias_expression: alias_operator: keyword: AS naked_identifier: another_val - comma: ',' - select_clause_element: expression: cast_expression: column_reference: naked_identifier: value semi_structured_expression: - colon: ':' - semi_structured_element: thing - array_accessor: start_square_bracket: '[' numeric_literal: '4' end_square_bracket: ']' - dot: . - semi_structured_element: bar - dot: . - semi_structured_element: baz - array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' - dot: . - semi_structured_element: foo casting_operator: '::' data_type: data_type_identifier: bigint alias_expression: alias_operator: keyword: AS naked_identifier: another_val - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: array_field array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' semi_structured_expression: dot: . semi_structured_element: array_element_property alias_expression: alias_operator: keyword: as naked_identifier: test_array_access from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: raw_tickets - comma: ',' - from_expression: from_expression_element: keyword: lateral table_expression: function: function_name: function_name_identifier: flatten function_contents: bracketed: start_bracket: ( snowflake_keyword_expression: parameter: INPUT parameter_assigner: => column_reference: naked_identifier: custom_fields end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: cast_expression: column_reference: naked_identifier: value semi_structured_expression: - colon: ':' - semi_structured_element: point - colon: ':' - semi_structured_element: from - colon: ':' - semi_structured_element: latitude casting_operator: '::' data_type: data_type_identifier: NUMBER bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '10' - comma: ',' - numeric_literal: '6' - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: lat - comma: ',' - select_clause_element: expression: cast_expression: column_reference: naked_identifier: value semi_structured_expression: - colon: ':' - semi_structured_element: point - colon: ':' - semi_structured_element: from - colon: ':' - semi_structured_element: longitude casting_operator: '::' data_type: data_type_identifier: NUMBER bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '10' - comma: ',' - numeric_literal: '6' - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: lng from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/semi_structured_2.sql000066400000000000000000000001751503426445100261450ustar00rootroot00000000000000select value:data:to::string AS TO_PHONE_NUMBER, value:data:from::string AS FROM_PHONE_NUMBER FROM a.b.ticket_audits sqlfluff-3.4.2/test/fixtures/dialects/snowflake/semi_structured_2.yml000066400000000000000000000036671503426445100261600ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6000c3363f7bd831c2c020950ca04721d60fa595ef47ffd842fb399f8817ef7c file: statement: select_statement: select_clause: - keyword: select - select_clause_element: expression: cast_expression: column_reference: naked_identifier: value semi_structured_expression: - colon: ':' - semi_structured_element: data - colon: ':' - semi_structured_element: to casting_operator: '::' data_type: data_type_identifier: string alias_expression: alias_operator: keyword: AS naked_identifier: TO_PHONE_NUMBER - comma: ',' - select_clause_element: expression: cast_expression: column_reference: naked_identifier: value semi_structured_expression: - colon: ':' - semi_structured_element: data - colon: ':' - semi_structured_element: from casting_operator: '::' data_type: data_type_identifier: string alias_expression: alias_operator: keyword: AS naked_identifier: FROM_PHONE_NUMBER from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: a - dot: . - naked_identifier: b - dot: . - naked_identifier: ticket_audits sqlfluff-3.4.2/test/fixtures/dialects/snowflake/semi_structured_3.sql000066400000000000000000000002051503426445100261400ustar00rootroot00000000000000SELECT PARSE_JSON(t.metadata)['names'][0] AS first_name, PARSE_JSON(t.metadata):customer_id AS customer_id FROM tickets AS t sqlfluff-3.4.2/test/fixtures/dialects/snowflake/semi_structured_3.yml000066400000000000000000000046131503426445100261510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bae42bbb3def0dc54eebfd2ef3c5c36180b4175315309ba55d41ef491961ad5b file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: - function: function_name: function_name_identifier: PARSE_JSON function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: t - dot: . - naked_identifier: metadata end_bracket: ) - array_accessor: start_square_bracket: '[' expression: quoted_literal: "'names'" end_square_bracket: ']' - array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' alias_expression: alias_operator: keyword: AS naked_identifier: first_name - comma: ',' - select_clause_element: expression: function: function_name: function_name_identifier: PARSE_JSON function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: t - dot: . - naked_identifier: metadata end_bracket: ) semi_structured_expression: colon: ':' semi_structured_element: customer_id alias_expression: alias_operator: keyword: AS naked_identifier: customer_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tickets alias_expression: alias_operator: keyword: AS naked_identifier: t sqlfluff-3.4.2/test/fixtures/dialects/snowflake/semi_structured_4.sql000066400000000000000000000011101503426445100261350ustar00rootroot00000000000000SELECT SEMI_STRUCTURED_OBJECT:ID_FIELD::VARCHAR AS ID_FIELD, SEMI_STRUCTURED_OBJECT:METADATA$FILENAME::VARCHAR AS METADATA$FILENAME, PARSE_JSON($1):FILE_CONTENT_KEY FILE_CONTENT_KEY, DATEADD(MS, PARSE_JSON($1):EVENT_TIME '1970-01-01') EVENT_TIME, parse_json($1):METADATA$FILENAME METADATA$FILENAME, DATEADD(MS, PARSE_JSON($1):METADATA$INSERTION_TIME, '1970-01-01') METADATA$INSERTION_TIME, DATEADD(MS, PARSE_JSON($1):METADATA$LAST_EVENT_INSERTION_TIME, '1970-01-01') METADATA$LAST_EVENT_INSERTION_TIME FROM @staging_area_root/ingest/data/file_name.ndjson sqlfluff-3.4.2/test/fixtures/dialects/snowflake/semi_structured_4.yml000066400000000000000000000142501503426445100261500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: afa5e178d25ab8454dea8c33d10b9cffef0003598a5b89391159391a17276ffd file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: cast_expression: column_reference: naked_identifier: SEMI_STRUCTURED_OBJECT semi_structured_expression: colon: ':' semi_structured_element: ID_FIELD casting_operator: '::' data_type: data_type_identifier: VARCHAR alias_expression: alias_operator: keyword: AS naked_identifier: ID_FIELD - comma: ',' - select_clause_element: expression: cast_expression: column_reference: naked_identifier: SEMI_STRUCTURED_OBJECT semi_structured_expression: colon: ':' semi_structured_element: METADATA$FILENAME casting_operator: '::' data_type: data_type_identifier: VARCHAR alias_expression: alias_operator: keyword: AS naked_identifier: METADATA$FILENAME - comma: ',' - select_clause_element: expression: function: function_name: function_name_identifier: PARSE_JSON function_contents: bracketed: start_bracket: ( expression: column_reference: column_index_identifier_segment: $1 end_bracket: ) semi_structured_expression: colon: ':' semi_structured_element: FILE_CONTENT_KEY alias_expression: naked_identifier: FILE_CONTENT_KEY - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEADD function_contents: bracketed: - start_bracket: ( - date_part: MS - comma: ',' - expression: function: function_name: function_name_identifier: PARSE_JSON function_contents: bracketed: start_bracket: ( expression: column_reference: column_index_identifier_segment: $1 end_bracket: ) semi_structured_expression: colon: ':' semi_structured_element: EVENT_TIME - expression: quoted_literal: "'1970-01-01'" - end_bracket: ) alias_expression: naked_identifier: EVENT_TIME - comma: ',' - select_clause_element: expression: function: function_name: function_name_identifier: parse_json function_contents: bracketed: start_bracket: ( expression: column_reference: column_index_identifier_segment: $1 end_bracket: ) semi_structured_expression: colon: ':' semi_structured_element: METADATA$FILENAME alias_expression: naked_identifier: METADATA$FILENAME - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEADD function_contents: bracketed: - start_bracket: ( - date_part: MS - comma: ',' - expression: function: function_name: function_name_identifier: PARSE_JSON function_contents: bracketed: start_bracket: ( expression: column_reference: column_index_identifier_segment: $1 end_bracket: ) semi_structured_expression: colon: ':' semi_structured_element: METADATA$INSERTION_TIME - comma: ',' - expression: quoted_literal: "'1970-01-01'" - end_bracket: ) alias_expression: naked_identifier: METADATA$INSERTION_TIME - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEADD function_contents: bracketed: - start_bracket: ( - date_part: MS - comma: ',' - expression: function: function_name: function_name_identifier: PARSE_JSON function_contents: bracketed: start_bracket: ( expression: column_reference: column_index_identifier_segment: $1 end_bracket: ) semi_structured_expression: colon: ':' semi_structured_element: METADATA$LAST_EVENT_INSERTION_TIME - comma: ',' - expression: quoted_literal: "'1970-01-01'" - end_bracket: ) alias_expression: naked_identifier: METADATA$LAST_EVENT_INSERTION_TIME from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: stage_path: '@staging_area_root/ingest/data/file_name.ndjson' sqlfluff-3.4.2/test/fixtures/dialects/snowflake/set_call_variable.sql000066400000000000000000000000651503426445100261340ustar00rootroot00000000000000SET _VARIABLE1 = 'Hello World'; SELECT $_VARIABLE1; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/set_call_variable.yml000066400000000000000000000014251503426445100261370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 03718652add1e3ad5b83ae9aaccf7215f8068f940bcbb2a0cc261e79aaaae665 file: - statement: set_statement: keyword: SET variable: _VARIABLE1 comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'Hello World'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: variable: $_VARIABLE1 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/set_command.sql000066400000000000000000000003271503426445100247730ustar00rootroot00000000000000set v1 = 10; set v2 = 'example'; set (v1, v2) = (10, 'example'); set id_threshold = (select count(*) from table1) / 2; set (min, max) = (40, 70); set (min, max) = (50, 2 * $min); SET THIS_ROLE=CURRENT_ROLE(); sqlfluff-3.4.2/test/fixtures/dialects/snowflake/set_command.yml000066400000000000000000000071221503426445100247750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8406aaf776583428fb10e8c4ffb8d33d7ad1d0ab00908336e579669bd1419ae7 file: - statement: set_statement: keyword: set variable: v1 comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '10' - statement_terminator: ; - statement: set_statement: keyword: set variable: v2 comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'example'" - statement_terminator: ; - statement: set_statement: - keyword: set - bracketed: - start_bracket: ( - variable: v1 - comma: ',' - variable: v2 - end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: quoted_literal: "'example'" - end_bracket: ) - statement_terminator: ; - statement: set_statement: keyword: set variable: id_threshold comparison_operator: raw_comparison_operator: '=' expression: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 end_bracket: ) binary_operator: / numeric_literal: '2' - statement_terminator: ; - statement: set_statement: - keyword: set - bracketed: - start_bracket: ( - variable: min - comma: ',' - variable: max - end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - expression: numeric_literal: '40' - comma: ',' - expression: numeric_literal: '70' - end_bracket: ) - statement_terminator: ; - statement: set_statement: - keyword: set - bracketed: - start_bracket: ( - variable: min - comma: ',' - variable: max - end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - expression: numeric_literal: '50' - comma: ',' - expression: numeric_literal: '2' binary_operator: '*' variable: $min - end_bracket: ) - statement_terminator: ; - statement: set_statement: keyword: SET variable: THIS_ROLE comparison_operator: raw_comparison_operator: '=' expression: function: function_name: function_name_identifier: CURRENT_ROLE function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/show.sql000066400000000000000000000001151503426445100234550ustar00rootroot00000000000000SHOW PASSWORD POLICIES; SHOW CORTEX SEARCH SERVICES; SHOW RESOURCE MONITORS; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/show.yml000066400000000000000000000014331503426445100234630ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e9a6c51fbfd28e298bbce017d102b6c51de4d7e4167dfd7c39eeea9bb234cd04 file: - statement: show_statement: - keyword: SHOW - keyword: PASSWORD - keyword: POLICIES - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: CORTEX - keyword: SEARCH - keyword: SERVICES - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: RESOURCE - keyword: MONITORS - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/string_literal.sql000066400000000000000000000004061503426445100255220ustar00rootroot00000000000000-- In snowflake, a double single quote resolves as a single quote in the string. -- https://docs.snowflake.com/en/sql-reference/data-types-text.html#single-quoted-string-constants SELECT '['']'; -- Snowflake allows dollar quoted string literals select $$abc$$; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/string_literal.yml000066400000000000000000000013411503426445100255230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 98d9d41cc947ca918f4a0483ada8f10fc990694a3ee25072c01a92abc2f8c321 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'['']'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: quoted_literal: $$abc$$ - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/transactions.sql000066400000000000000000000002611503426445100252070ustar00rootroot00000000000000begin; begin work; begin transaction; begin name t4; begin work name t4; begin transaction name t4; start transaction; start transaction name t4; rollback; commit; commit work; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/transactions.yml000066400000000000000000000034301503426445100252120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6550f6f5929d6d869bf393fbef0c42b2a484fb882175303e39c534b4ac54b510 file: - statement: transaction_statement: keyword: begin - statement_terminator: ; - statement: transaction_statement: - keyword: begin - keyword: work - statement_terminator: ; - statement: transaction_statement: - keyword: begin - keyword: transaction - statement_terminator: ; - statement: transaction_statement: - keyword: begin - keyword: name - object_reference: naked_identifier: t4 - statement_terminator: ; - statement: transaction_statement: - keyword: begin - keyword: work - keyword: name - object_reference: naked_identifier: t4 - statement_terminator: ; - statement: transaction_statement: - keyword: begin - keyword: transaction - keyword: name - object_reference: naked_identifier: t4 - statement_terminator: ; - statement: transaction_statement: - keyword: start - keyword: transaction - statement_terminator: ; - statement: transaction_statement: - keyword: start - keyword: transaction - keyword: name - object_reference: naked_identifier: t4 - statement_terminator: ; - statement: transaction_statement: keyword: rollback - statement_terminator: ; - statement: transaction_statement: keyword: commit - statement_terminator: ; - statement: transaction_statement: - keyword: commit - keyword: work - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/truncate_table.sql000066400000000000000000000001231503426445100254700ustar00rootroot00000000000000truncate table temp; truncate table if exists temp; truncate table something.temp; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/truncate_table.yml000066400000000000000000000016771503426445100255110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4efc78fdb1e3d21be500ec3c7eb1efb94708abf5cd62c35683a2395f46d8a766 file: - statement: truncate_table: - keyword: truncate - keyword: table - table_reference: naked_identifier: temp - statement_terminator: ; - statement: truncate_table: - keyword: truncate - keyword: table - keyword: if - keyword: exists - table_reference: naked_identifier: temp - statement_terminator: ; - statement: truncate_table: - keyword: truncate - keyword: table - table_reference: - naked_identifier: something - dot: . - naked_identifier: temp - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/undrop.sql000066400000000000000000000001571503426445100240120ustar00rootroot00000000000000UNDROP database mytestdb2; undrop schema myschema; undrop table t2; UNDROP EXTERNAL VOLUME my_external_volume; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/undrop.yml000066400000000000000000000021021503426445100240040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c47d8836ce8905ed2c4d306b94fd4a67c05d8968dc55da83ce18f543281f1688 file: - statement: undrop_statement: - keyword: UNDROP - keyword: database - database_reference: naked_identifier: mytestdb2 - statement_terminator: ; - statement: undrop_statement: - keyword: undrop - keyword: schema - schema_reference: naked_identifier: myschema - statement_terminator: ; - statement: undrop_statement: - keyword: undrop - keyword: table - table_reference: naked_identifier: t2 - statement_terminator: ; - statement: undrop_statement: - keyword: UNDROP - keyword: EXTERNAL - keyword: VOLUME - external_volume_reference: naked_identifier: my_external_volume - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/unset.sql000066400000000000000000000000461503426445100236360ustar00rootroot00000000000000unset v1; unset v2; unset (v1, v2); sqlfluff-3.4.2/test/fixtures/dialects/snowflake/unset.yml000066400000000000000000000014411503426445100236400ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 17c30e3033979c450023ffe9cb916954587005749f7d32c4841c4b91bd1adde0 file: - statement: unset_statement: keyword: unset variable: v1 - statement_terminator: ; - statement: unset_statement: keyword: unset variable: v2 - statement_terminator: ; - statement: unset_statement: keyword: unset bracketed: - start_bracket: ( - variable: v1 - comma: ',' - variable: v2 - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/use.sql000066400000000000000000000005331503426445100232750ustar00rootroot00000000000000use role my_role; use warehouse my_warehouse; use database my_database; use schema my_schema; USE ROLE "MY_ROLE"; USE WAREHOUSE "MY_WAREHOUSE"; USE DATABASE "MY_DATABASE"; USE "MY_DATABASE"; USE SCHEMA "MY_DATABASE"."MY_SCHEMA"; USE SCHEMA "MY_SCHEMA"; USE "MY_DATABASE"."MY_SCHEMA"; USE SECONDARY ROLES ALL; USE SECONDARY ROLES NONE; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/use.yml000066400000000000000000000047751503426445100233130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ccec964f5d1d6a5487cf768b3f963f6a47fe9b86abda53240e605d8e8580a8e1 file: - statement: use_statement: - keyword: use - keyword: role - object_reference: naked_identifier: my_role - statement_terminator: ; - statement: use_statement: - keyword: use - keyword: warehouse - object_reference: naked_identifier: my_warehouse - statement_terminator: ; - statement: use_statement: - keyword: use - keyword: database - database_reference: naked_identifier: my_database - statement_terminator: ; - statement: use_statement: - keyword: use - keyword: schema - schema_reference: naked_identifier: my_schema - statement_terminator: ; - statement: use_statement: - keyword: USE - keyword: ROLE - object_reference: quoted_identifier: '"MY_ROLE"' - statement_terminator: ; - statement: use_statement: - keyword: USE - keyword: WAREHOUSE - object_reference: quoted_identifier: '"MY_WAREHOUSE"' - statement_terminator: ; - statement: use_statement: - keyword: USE - keyword: DATABASE - database_reference: quoted_identifier: '"MY_DATABASE"' - statement_terminator: ; - statement: use_statement: keyword: USE database_reference: quoted_identifier: '"MY_DATABASE"' - statement_terminator: ; - statement: use_statement: - keyword: USE - keyword: SCHEMA - schema_reference: - quoted_identifier: '"MY_DATABASE"' - dot: . - quoted_identifier: '"MY_SCHEMA"' - statement_terminator: ; - statement: use_statement: - keyword: USE - keyword: SCHEMA - schema_reference: quoted_identifier: '"MY_SCHEMA"' - statement_terminator: ; - statement: use_statement: keyword: USE database_reference: - quoted_identifier: '"MY_DATABASE"' - dot: . - quoted_identifier: '"MY_SCHEMA"' - statement_terminator: ; - statement: use_statement: - keyword: USE - keyword: SECONDARY - keyword: ROLES - keyword: ALL - statement_terminator: ; - statement: use_statement: - keyword: USE - keyword: SECONDARY - keyword: ROLES - keyword: NONE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/window_function_ignore_nulls.sql000066400000000000000000000003321503426445100304720ustar00rootroot00000000000000 SELECT FIRST_VALUE(foo) IGNORE NULLS over ( PARTITION BY buzz ORDER BY bar ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW ) AS bat from some_table sqlfluff-3.4.2/test/fixtures/dialects/snowflake/window_function_ignore_nulls.yml000066400000000000000000000040431503426445100304770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 22802403eaf8fe4aade4efce5598e9d1ac92de0812e6f5b642977c98d3e63224 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: - function_name: function_name_identifier: FIRST_VALUE - function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: foo end_bracket: ) - keyword: IGNORE - keyword: NULLS - over_clause: keyword: over bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: buzz orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: bar frame_clause: - keyword: ROWS - keyword: BETWEEN - keyword: UNBOUNDED - keyword: PRECEDING - keyword: AND - keyword: CURRENT - keyword: ROW end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: bat from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: some_table sqlfluff-3.4.2/test/fixtures/dialects/snowflake/within_group.sql000066400000000000000000000031411503426445100252150ustar00rootroot00000000000000-- Snowflake style WITHIN GROUP window functions with favourite_fruits as ( select column1 as name, column2 as colour from (values ('apple', 'green'), ('unripe banana', 'green'), ('kiwi', 'green'), ('blueberry', 'blue'), ('strawberry', 'red'), ('grape', 'red') ) ) select colour, listagg(name, ', ') within group (order by name) as fruits from favourite_fruits group by colour; SELECT ARRAY_AGG(o_orderkey) WITHIN GROUP (ORDER BY o_orderkey ASC) FROM orders; select array_agg(o_orderkey) within group (order by o_orderkey asc) from orders where o_totalprice > 450000; select array_agg(distinct o_orderstatus) within group (order by o_orderstatus asc) from orders where o_totalprice > 450000 order by o_orderstatus asc; select o_orderstatus, array_agg(o_clerk) within group (order by o_totalprice desc) from orders where o_totalprice > 450000 group by o_orderstatus order by o_orderstatus desc; select listagg(o_orderkey, ' ') from orders where o_totalprice > 450000; select listagg(distinct o_orderstatus, '|') from orders where o_totalprice > 450000; select o_orderstatus, listagg(o_clerk, ', ') within group (order by o_totalprice desc) from orders where o_totalprice > 450000 group by o_orderstatus; select listagg(spanish_phrase, '|') within group (order by collate(spanish_phrase, 'sp')) from collation_demo group by english_phrase; select listagg(spanish_phrase, '|') within group (order by collate(spanish_phrase, 'utf8')) from collation_demo group by english_phrase; sqlfluff-3.4.2/test/fixtures/dialects/snowflake/within_group.yml000066400000000000000000000434201503426445100252230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 19ee3a44e0810323ecff24de553c563195970ba643356896f0c48d2aae2ac6dc file: - statement: with_compound_statement: keyword: with common_table_expression: naked_identifier: favourite_fruits keyword: as bracketed: start_bracket: ( select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: column1 alias_expression: alias_operator: keyword: as naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: column2 alias_expression: alias_operator: keyword: as naked_identifier: colour from_clause: keyword: from from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: quoted_literal: "'apple'" - comma: ',' - expression: quoted_literal: "'green'" - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: quoted_literal: "'unripe banana'" - comma: ',' - expression: quoted_literal: "'green'" - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: quoted_literal: "'kiwi'" - comma: ',' - expression: quoted_literal: "'green'" - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: quoted_literal: "'blueberry'" - comma: ',' - expression: quoted_literal: "'blue'" - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: quoted_literal: "'strawberry'" - comma: ',' - expression: quoted_literal: "'red'" - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: quoted_literal: "'grape'" - comma: ',' - expression: quoted_literal: "'red'" - end_bracket: ) end_bracket: ) end_bracket: ) select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: colour - comma: ',' - select_clause_element: function: function_name: function_name_identifier: listagg function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: name - comma: ',' - expression: quoted_literal: "', '" - end_bracket: ) withingroup_clause: - keyword: within - keyword: group - bracketed: start_bracket: ( orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: name end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: fruits from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: favourite_fruits groupby_clause: - keyword: group - keyword: by - column_reference: naked_identifier: colour - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: ARRAY_AGG function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: o_orderkey end_bracket: ) withingroup_clause: - keyword: WITHIN - keyword: GROUP - bracketed: start_bracket: ( orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: o_orderkey - keyword: ASC end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: array_agg function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: o_orderkey end_bracket: ) withingroup_clause: - keyword: within - keyword: group - bracketed: start_bracket: ( orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: o_orderkey - keyword: asc end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders where_clause: keyword: where expression: column_reference: naked_identifier: o_totalprice comparison_operator: raw_comparison_operator: '>' numeric_literal: '450000' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: array_agg function_contents: bracketed: start_bracket: ( keyword: distinct expression: column_reference: naked_identifier: o_orderstatus end_bracket: ) withingroup_clause: - keyword: within - keyword: group - bracketed: start_bracket: ( orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: o_orderstatus - keyword: asc end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders where_clause: keyword: where expression: column_reference: naked_identifier: o_totalprice comparison_operator: raw_comparison_operator: '>' numeric_literal: '450000' orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: o_orderstatus - keyword: asc - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: o_orderstatus - comma: ',' - select_clause_element: function: function_name: function_name_identifier: array_agg function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: o_clerk end_bracket: ) withingroup_clause: - keyword: within - keyword: group - bracketed: start_bracket: ( orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: o_totalprice - keyword: desc end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders where_clause: keyword: where expression: column_reference: naked_identifier: o_totalprice comparison_operator: raw_comparison_operator: '>' numeric_literal: '450000' groupby_clause: - keyword: group - keyword: by - column_reference: naked_identifier: o_orderstatus orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: o_orderstatus - keyword: desc - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: listagg function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: o_orderkey - comma: ',' - expression: quoted_literal: "' '" - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders where_clause: keyword: where expression: column_reference: naked_identifier: o_totalprice comparison_operator: raw_comparison_operator: '>' numeric_literal: '450000' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: listagg function_contents: bracketed: - start_bracket: ( - keyword: distinct - expression: column_reference: naked_identifier: o_orderstatus - comma: ',' - expression: quoted_literal: "'|'" - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders where_clause: keyword: where expression: column_reference: naked_identifier: o_totalprice comparison_operator: raw_comparison_operator: '>' numeric_literal: '450000' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: o_orderstatus - comma: ',' - select_clause_element: function: function_name: function_name_identifier: listagg function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: o_clerk - comma: ',' - expression: quoted_literal: "', '" - end_bracket: ) withingroup_clause: - keyword: within - keyword: group - bracketed: start_bracket: ( orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: o_totalprice - keyword: desc end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders where_clause: keyword: where expression: column_reference: naked_identifier: o_totalprice comparison_operator: raw_comparison_operator: '>' numeric_literal: '450000' groupby_clause: - keyword: group - keyword: by - column_reference: naked_identifier: o_orderstatus - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: listagg function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: spanish_phrase - comma: ',' - expression: quoted_literal: "'|'" - end_bracket: ) withingroup_clause: - keyword: within - keyword: group - bracketed: start_bracket: ( orderby_clause: - keyword: order - keyword: by - expression: function: function_name: function_name_identifier: collate function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: spanish_phrase - comma: ',' - expression: quoted_literal: "'sp'" - end_bracket: ) end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: collation_demo groupby_clause: - keyword: group - keyword: by - column_reference: naked_identifier: english_phrase - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: listagg function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: spanish_phrase - comma: ',' - expression: quoted_literal: "'|'" - end_bracket: ) withingroup_clause: - keyword: within - keyword: group - bracketed: start_bracket: ( orderby_clause: - keyword: order - keyword: by - expression: function: function_name: function_name_identifier: collate function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: spanish_phrase - comma: ',' - expression: quoted_literal: "'utf8'" - end_bracket: ) end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: collation_demo groupby_clause: - keyword: group - keyword: by - column_reference: naked_identifier: english_phrase - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/soql/000077500000000000000000000000001503426445100207445ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/dialects/soql/.sqlfluff000066400000000000000000000000321503426445100225620ustar00rootroot00000000000000[sqlfluff] dialect = soql sqlfluff-3.4.2/test/fixtures/dialects/soql/date_literals.sql000066400000000000000000000001661503426445100243040ustar00rootroot00000000000000SELECT * FROM LiveChatTranscript WHERE LastModifiedDate >= 2022-08-28T00:00:00Z AND LastModifiedDate < 2023-01-01 sqlfluff-3.4.2/test/fixtures/dialects/soql/date_literals.yml000066400000000000000000000024531503426445100243070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a2a975041ccad2721f7111224ee87a04580072232caaebb4419c3e79557ba39c file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: LiveChatTranscript where_clause: keyword: WHERE expression: - column_reference: naked_identifier: LastModifiedDate - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - datetime_literal: '2022-08-28T00:00:00Z' - binary_operator: AND - column_reference: naked_identifier: LastModifiedDate - comparison_operator: raw_comparison_operator: < - date_literal: '2023-01-01' sqlfluff-3.4.2/test/fixtures/dialects/soql/select_where_date_literals.sql000066400000000000000000000000701503426445100270270ustar00rootroot00000000000000SELECT Id FROM Account WHERE CreatedDate = NEXT_90_DAYS sqlfluff-3.4.2/test/fixtures/dialects/soql/select_where_date_literals.yml000066400000000000000000000017651503426445100270450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: db28f921856ff81069b50adffd26b6355e3611d71897aa8f86aa4e2d2eaaa9f5 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: Id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Account where_clause: keyword: WHERE expression: column_reference: naked_identifier: CreatedDate comparison_operator: raw_comparison_operator: '=' bare_function: NEXT_90_DAYS sqlfluff-3.4.2/test/fixtures/dialects/soql/select_where_date_n_literals.sql000066400000000000000000000000721503426445100273460ustar00rootroot00000000000000SELECT Id FROM Account WHERE CreatedDate = LAST_N_WEEKS:5 sqlfluff-3.4.2/test/fixtures/dialects/soql/select_where_date_n_literals.yml000066400000000000000000000021031503426445100273450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a902fc817e4b3517a9998c2a8f9f2d7d30b6a511b4d18933c25dab439932d2d6 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: Id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Account where_clause: keyword: WHERE expression: column_reference: naked_identifier: CreatedDate comparison_operator: raw_comparison_operator: '=' date_n_literal: keyword: LAST_N_WEEKS colon: ':' numeric_literal: '5' sqlfluff-3.4.2/test/fixtures/dialects/sparksql/000077500000000000000000000000001503426445100216265ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/dialects/sparksql/.sqlfluff000066400000000000000000000000361503426445100234500ustar00rootroot00000000000000[sqlfluff] dialect = sparksql sqlfluff-3.4.2/test/fixtures/dialects/sparksql/add_file.sql000066400000000000000000000003431503426445100240760ustar00rootroot00000000000000ADD FILE "/path/to/file/abc.txt"; ADD FILE '/another/test.txt'; ADD FILE "/path with space/abc.txt"; ADD FILE "/path/to/some/directory"; ADD FILES "/path with space/cde.txt" '/path with space/fgh.txt'; ADD FILE /tmp/test; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/add_file.yml000066400000000000000000000026371503426445100241100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b9c62f710bfa3ebc86ec9b61d388f66ff88fa3d4f58d8ed0345c4af0c1057843 file: - statement: add_file_statement: keyword: ADD file_keyword: FILE quoted_literal: '"/path/to/file/abc.txt"' - statement_terminator: ; - statement: add_file_statement: keyword: ADD file_keyword: FILE quoted_literal: "'/another/test.txt'" - statement_terminator: ; - statement: add_file_statement: keyword: ADD file_keyword: FILE quoted_literal: '"/path with space/abc.txt"' - statement_terminator: ; - statement: add_file_statement: keyword: ADD file_keyword: FILE quoted_literal: '"/path/to/some/directory"' - statement_terminator: ; - statement: add_file_statement: - keyword: ADD - file_keyword: FILES - quoted_literal: '"/path with space/cde.txt"' - quoted_literal: "'/path with space/fgh.txt'" - statement_terminator: ; - statement: add_file_statement: keyword: ADD file_keyword: FILE file_literal: - slash: / - path_segment: tmp - slash: / - path_segment: test - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/add_jar.sql000066400000000000000000000011731503426445100237350ustar00rootroot00000000000000ADD JAR "/path/to/some.jar"; ADD JAR '/some/other.jar'; ADD JAR "/path with space/abc.jar"; ADD JARS "/path with space/def.jar" '/path with space/ghi.jar'; ADD JAR "ivy://group:module:version"; ADD JAR "ivy://group:module:version?transitive=false"; ADD JAR "ivy://group:module:version?transitive=true"; ADD JAR "ivy://group:module:version?exclude=group:module&transitive=true"; ADD JAR ivy://group:module:version?exclude=group:module&transitive=true; ADD JAR /path/to/some.jar; ADD JAR path/to/some.jar; ADD JAR ivy://path/to/some.jar; -- NB: Non-quoted paths do not currently support whitespaces -- e.g. /path to/some.jar sqlfluff-3.4.2/test/fixtures/dialects/sparksql/add_jar.yml000066400000000000000000000052161503426445100237410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1f4d8b977e9c4218926f0256036b4bca8e2378b7470065bcca0cbfd694fe6fec file: - statement: add_jar_statement: keyword: ADD file_keyword: JAR quoted_literal: '"/path/to/some.jar"' - statement_terminator: ; - statement: add_jar_statement: keyword: ADD file_keyword: JAR quoted_literal: "'/some/other.jar'" - statement_terminator: ; - statement: add_jar_statement: keyword: ADD file_keyword: JAR quoted_literal: '"/path with space/abc.jar"' - statement_terminator: ; - statement: add_jar_statement: - keyword: ADD - file_keyword: JARS - quoted_literal: '"/path with space/def.jar"' - quoted_literal: "'/path with space/ghi.jar'" - statement_terminator: ; - statement: add_jar_statement: keyword: ADD file_keyword: JAR quoted_literal: '"ivy://group:module:version"' - statement_terminator: ; - statement: add_jar_statement: keyword: ADD file_keyword: JAR quoted_literal: '"ivy://group:module:version?transitive=false"' - statement_terminator: ; - statement: add_jar_statement: keyword: ADD file_keyword: JAR quoted_literal: '"ivy://group:module:version?transitive=true"' - statement_terminator: ; - statement: add_jar_statement: keyword: ADD file_keyword: JAR quoted_literal: '"ivy://group:module:version?exclude=group:module&transitive=true"' - statement_terminator: ; - statement: add_jar_statement: keyword: ADD file_keyword: JAR file_literal: literal: ivy://group:module:version?exclude=group:module&transitive=true - statement_terminator: ; - statement: add_jar_statement: keyword: ADD file_keyword: JAR file_literal: - slash: / - path_segment: path - slash: / - path_segment: to - slash: / - path_segment: some - dot: . - path_segment: jar - statement_terminator: ; - statement: add_jar_statement: keyword: ADD file_keyword: JAR file_literal: - path_segment: path - slash: / - path_segment: to - slash: / - path_segment: some - dot: . - path_segment: jar - statement_terminator: ; - statement: add_jar_statement: keyword: ADD file_keyword: JAR file_literal: literal: ivy://path/to/some.jar - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/alter_database.sql000066400000000000000000000010101503426445100252720ustar00rootroot00000000000000ALTER DATABASE inventory SET DBPROPERTIES ( 'Edited-by' = 'John' ); ALTER DATABASE inventory SET DBPROPERTIES ( 'Edited-by' = 'John', 'Edit-date' = '01/01/2001' ); ALTER SCHEMA inventory SET DBPROPERTIES ( 'Edited-by' = 'John' ); ALTER SCHEMA inventory SET DBPROPERTIES ( 'Edited-by' = 'John', 'Edit-date' = '01/01/2001' ); ALTER DATABASE inventory SET LOCATION 'file:/temp/spark-warehouse/new_inventory.db'; ALTER SCHEMA inventory SET LOCATION 'file:/temp/spark-warehouse/new_inventory.db'; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/alter_database.yml000066400000000000000000000062441503426445100253120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a8aafb289a530d36bd48e8ddfaac798d58e539a2b74f5fa586873092693017ae file: - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: inventory - keyword: SET - keyword: DBPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'Edited-by'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'John'" end_bracket: ) - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: inventory - keyword: SET - keyword: DBPROPERTIES - bracketed: - start_bracket: ( - property_name_identifier: quoted_identifier: "'Edited-by'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'John'" - comma: ',' - property_name_identifier: quoted_identifier: "'Edit-date'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'01/01/2001'" - end_bracket: ) - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: SCHEMA - database_reference: naked_identifier: inventory - keyword: SET - keyword: DBPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'Edited-by'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'John'" end_bracket: ) - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: SCHEMA - database_reference: naked_identifier: inventory - keyword: SET - keyword: DBPROPERTIES - bracketed: - start_bracket: ( - property_name_identifier: quoted_identifier: "'Edited-by'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'John'" - comma: ',' - property_name_identifier: quoted_identifier: "'Edit-date'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'01/01/2001'" - end_bracket: ) - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: inventory - keyword: SET - keyword: LOCATION - quoted_literal: "'file:/temp/spark-warehouse/new_inventory.db'" - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: SCHEMA - database_reference: naked_identifier: inventory - keyword: SET - keyword: LOCATION - quoted_literal: "'file:/temp/spark-warehouse/new_inventory.db'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/alter_table.sql000066400000000000000000000035211503426445100246260ustar00rootroot00000000000000---- RENAME table ALTER TABLE Student RENAME TO StudentInfo; ---- RENAME partition ALTER TABLE Default.StudentInfo PARTITION ( Age = '10' ) RENAME TO PARTITION ( Age = '15' ); -- Add new columns to a table ALTER TABLE StudentInfo ADD COLUMNS (LastName STRING, DOB TIMESTAMP); -- ALTER OR CHANGE COLUMNS ALTER TABLE StudentInfo ALTER COLUMN Name COMMENT "new comment"; ALTER TABLE StudentInfo CHANGE COLUMN Name COMMENT "new comment"; ---- Add a new partition to a table ALTER TABLE StudentInfo ADD IF NOT EXISTS PARTITION (Age = 18); -- Adding multiple partitions to the table ALTER TABLE StudentInfo ADD IF NOT EXISTS PARTITION ( Age = 18 ) PARTITION (Age = 20); -- Drop a partition from the table ALTER TABLE StudentInfo DROP IF EXISTS PARTITION (Age = 18); -- SET TABLE PROPERTIES ALTER TABLE Dbx.Tab1 SET TBLPROPERTIES ('winner' = 'loser'); -- SET TABLE COMMENT Using SET PROPERTIES ALTER TABLE Dbx.Tab1 SET TBLPROPERTIES ('comment' = 'A table comment.'); -- Alter TABLE COMMENT Using SET PROPERTIES ALTER TABLE Dbx.Tab1 SET TBLPROPERTIES ('comment' = 'This is a new comment.'); -- DROP TABLE PROPERTIES ALTER TABLE Dbx.Tab1 UNSET TBLPROPERTIES ('winner'); -- SET SERDE/ SERDE Properties ALTER TABLE Table_Identifier SET SERDEPROPERTIES ( "key1" = "val1", "key2" = "val2"); ALTER TABLE Test_Tab SET SERDE 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe'; ALTER TABLE Dbx.Tab1 SET SERDE 'org.apache.hadoop' WITH SERDEPROPERTIES ('k' = 'v', 'kay' = 'vee'); -- Change the fileformat ALTER TABLE Loc_Orc SET FILEFORMAT ORC; ALTER TABLE P1 PARTITION (Month = 2, Day = 2) SET FILEFORMAT PARQUET; -- Change the file Location ALTER TABLE Dbx.Tab1 SET LOCATION '/path/to/part/ways'; ALTER TABLE Dbx.Tab1 PARTITION (A = '1', B = '2') SET LOCATION '/path/to/part/ways'; -- Recover Partitions ALTER TABLE Dbx.Tab1 RECOVER PARTITIONS; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/alter_table.yml000066400000000000000000000237771503426445100246470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8ab010709e40c560a464020e4c67bb7ee1ab08342c383ed3ed51eb6ff6a90bbb file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: Student - keyword: RENAME - keyword: TO - table_reference: naked_identifier: StudentInfo - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: Default - dot: . - naked_identifier: StudentInfo - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: Age comparison_operator: raw_comparison_operator: '=' quoted_literal: "'10'" end_bracket: ) - keyword: RENAME - keyword: TO - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: Age comparison_operator: raw_comparison_operator: '=' quoted_literal: "'15'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: StudentInfo - keyword: ADD - keyword: COLUMNS - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: LastName data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: DOB data_type: primitive_type: keyword: TIMESTAMP - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: StudentInfo - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: Name - keyword: COMMENT - quoted_literal: '"new comment"' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: StudentInfo - keyword: CHANGE - keyword: COLUMN - column_reference: naked_identifier: Name - keyword: COMMENT - quoted_literal: '"new comment"' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: StudentInfo - keyword: ADD - keyword: IF - keyword: NOT - keyword: EXISTS - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: Age comparison_operator: raw_comparison_operator: '=' numeric_literal: '18' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: StudentInfo - keyword: ADD - keyword: IF - keyword: NOT - keyword: EXISTS - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: Age comparison_operator: raw_comparison_operator: '=' numeric_literal: '18' end_bracket: ) - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: Age comparison_operator: raw_comparison_operator: '=' numeric_literal: '20' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: StudentInfo - keyword: DROP - keyword: IF - keyword: EXISTS - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: Age comparison_operator: raw_comparison_operator: '=' numeric_literal: '18' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: Dbx - dot: . - naked_identifier: Tab1 - keyword: SET - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'winner'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'loser'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: Dbx - dot: . - naked_identifier: Tab1 - keyword: SET - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'comment'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'A table comment.'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: Dbx - dot: . - naked_identifier: Tab1 - keyword: SET - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'comment'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'This is a new comment.'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: Dbx - dot: . - naked_identifier: Tab1 - keyword: UNSET - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'winner'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: Table_Identifier - keyword: SET - keyword: SERDEPROPERTIES - bracketed: - start_bracket: ( - property_name_identifier: quoted_identifier: '"key1"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"val1"' - comma: ',' - property_name_identifier: quoted_identifier: '"key2"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"val2"' - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: Test_Tab - keyword: SET - keyword: SERDE - quoted_literal: "'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: Dbx - dot: . - naked_identifier: Tab1 - keyword: SET - keyword: SERDE - quoted_literal: "'org.apache.hadoop'" - keyword: WITH - keyword: SERDEPROPERTIES - bracketed: - start_bracket: ( - property_name_identifier: quoted_identifier: "'k'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'v'" - comma: ',' - property_name_identifier: quoted_identifier: "'kay'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'vee'" - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: Loc_Orc - keyword: SET - keyword: FILEFORMAT - data_source_format: keyword: ORC - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: P1 - keyword: PARTITION - bracketed: - start_bracket: ( - column_reference: naked_identifier: Month - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '2' - comma: ',' - column_reference: naked_identifier: Day - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '2' - end_bracket: ) - keyword: SET - keyword: FILEFORMAT - data_source_format: keyword: PARQUET - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: Dbx - dot: . - naked_identifier: Tab1 - keyword: SET - keyword: LOCATION - quoted_literal: "'/path/to/part/ways'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: Dbx - dot: . - naked_identifier: Tab1 - keyword: PARTITION - bracketed: - start_bracket: ( - column_reference: naked_identifier: A - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'1'" - comma: ',' - column_reference: naked_identifier: B - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'2'" - end_bracket: ) - keyword: SET - keyword: LOCATION - quoted_literal: "'/path/to/part/ways'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: Dbx - dot: . - naked_identifier: Tab1 - keyword: RECOVER - keyword: PARTITIONS - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/alter_view.sql000066400000000000000000000013501503426445100245070ustar00rootroot00000000000000-- RENAME View ALTER VIEW view_identifier RENAME TO view_identifier; ALTER VIEW tempdb1.v1 RENAME TO tempdb1.v2; --SET View Properties ALTER VIEW view_identifier SET TBLPROPERTIES ( "property_key" = "property_val"); ALTER VIEW tempdb1.v2 SET TBLPROPERTIES ( 'created.by.user' = "John", 'created.date' = '01-01-2001' ); --UNSET View Properties ALTER VIEW view_identifier UNSET TBLPROPERTIES ( "property_key"); ALTER VIEW view_identifier UNSET TBLPROPERTIES IF EXISTS ( "property_key"); ALTER VIEW tempdb1.v2 UNSET TBLPROPERTIES ('created.by.user', 'created.date'); --ALTER View AS SELECT ALTER VIEW view_identifier AS ( SELECT a, b FROM tempdb1.v1 ); ALTER VIEW tempdb1.v2 AS SELECT a, b FROM tempdb1.v1; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/alter_view.yml000066400000000000000000000120161503426445100245120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6f67fec9e7dfdde2df2601aa70249fa1791b81b87d99917515dc3535f6d91901 file: - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: view_identifier - keyword: RENAME - keyword: TO - table_reference: naked_identifier: view_identifier - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: - naked_identifier: tempdb1 - dot: . - naked_identifier: v1 - keyword: RENAME - keyword: TO - table_reference: - naked_identifier: tempdb1 - dot: . - naked_identifier: v2 - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: view_identifier - keyword: SET - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: '"property_key"' comparison_operator: raw_comparison_operator: '=' quoted_literal: '"property_val"' end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: - naked_identifier: tempdb1 - dot: . - naked_identifier: v2 - keyword: SET - keyword: TBLPROPERTIES - bracketed: - start_bracket: ( - property_name_identifier: quoted_identifier: "'created.by.user'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"John"' - comma: ',' - property_name_identifier: quoted_identifier: "'created.date'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'01-01-2001'" - end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: view_identifier - keyword: UNSET - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: '"property_key"' end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: view_identifier - keyword: UNSET - keyword: TBLPROPERTIES - keyword: IF - keyword: EXISTS - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: '"property_key"' end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: - naked_identifier: tempdb1 - dot: . - naked_identifier: v2 - keyword: UNSET - keyword: TBLPROPERTIES - bracketed: - start_bracket: ( - property_name_identifier: quoted_identifier: "'created.by.user'" - comma: ',' - property_name_identifier: quoted_identifier: "'created.date'" - end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: view_identifier - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: tempdb1 - dot: . - naked_identifier: v1 end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: - naked_identifier: tempdb1 - dot: . - naked_identifier: v2 - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: tempdb1 - dot: . - naked_identifier: v1 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/analyze_table.sql000066400000000000000000000005031503426445100251570ustar00rootroot00000000000000ANALYZE TABLE students COMPUTE STATISTICS NOSCAN; ANALYZE TABLE students COMPUTE STATISTICS; ANALYZE TABLE students PARTITION (student_id = 111111) COMPUTE STATISTICS; ANALYZE TABLE students COMPUTE STATISTICS FOR COLUMNS name; ANALYZE TABLES IN school_db COMPUTE STATISTICS NOSCAN; ANALYZE TABLES COMPUTE STATISTICS; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/analyze_table.yml000066400000000000000000000040301503426445100251600ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 056710a8f94006a6da745c58745d9f8198f33ed161a700216108d294b2de9b04 file: - statement: analyze_table_statement: - keyword: ANALYZE - keyword: TABLE - table_reference: naked_identifier: students - keyword: COMPUTE - keyword: STATISTICS - keyword: NOSCAN - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: TABLE - table_reference: naked_identifier: students - keyword: COMPUTE - keyword: STATISTICS - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: TABLE - table_reference: naked_identifier: students - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: student_id comparison_operator: raw_comparison_operator: '=' numeric_literal: '111111' end_bracket: ) - keyword: COMPUTE - keyword: STATISTICS - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: TABLE - table_reference: naked_identifier: students - keyword: COMPUTE - keyword: STATISTICS - keyword: FOR - keyword: COLUMNS - column_reference: naked_identifier: name - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: TABLES - keyword: IN - database_reference: naked_identifier: school_db - keyword: COMPUTE - keyword: STATISTICS - keyword: NOSCAN - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: TABLES - keyword: COMPUTE - keyword: STATISTICS - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/bytes_literal.sql000066400000000000000000000000621503426445100252070ustar00rootroot00000000000000SELECT X'123456' AS col; SELECT X"123456" AS col; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/bytes_literal.yml000066400000000000000000000017321503426445100252160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 44113dd6cc5106fe7000939aa995bc2ea87a834ed41959f6f7821679de8d2236 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: bytes_quoted_literal: "X'123456'" alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: bytes_quoted_literal: X"123456" alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/cache_table.sql000066400000000000000000000005771503426445100245720ustar00rootroot00000000000000CACHE TABLE TESTCACHE OPTIONS ('storageLevel' 'DISK_ONLY') SELECT A, B FROM TESTDATA; CACHE LAZY TABLE TESTCACHE OPTIONS ('storageLevel' 'DISK_ONLY') SELECT A FROM TESTDATA; CACHE TABLE TESTCACHE OPTIONS ('storageLevel' 'DISK_ONLY') AS SELECT A FROM TESTDATA; CACHE TABLE TESTCACHE OPTIONS ('storageLevel' = 'DISK_ONLY') AS SELECT A FROM TESTDATA; CACHE TABLE TESTCACHE; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/cache_table.yml000066400000000000000000000073311503426445100245670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 95f34ac1be5768ab605eec92c82499535d67bf28559d8e9462549b9eef6bfd85 file: - statement: cache_table: - keyword: CACHE - keyword: TABLE - table_reference: naked_identifier: TESTCACHE - keyword: OPTIONS - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'storageLevel'" quoted_literal: "'DISK_ONLY'" end_bracket: ) - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: A - comma: ',' - select_clause_element: column_reference: naked_identifier: B from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: TESTDATA - statement_terminator: ; - statement: cache_table: - keyword: CACHE - keyword: LAZY - keyword: TABLE - table_reference: naked_identifier: TESTCACHE - keyword: OPTIONS - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'storageLevel'" quoted_literal: "'DISK_ONLY'" end_bracket: ) - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: A from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: TESTDATA - statement_terminator: ; - statement: cache_table: - keyword: CACHE - keyword: TABLE - table_reference: naked_identifier: TESTCACHE - keyword: OPTIONS - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'storageLevel'" quoted_literal: "'DISK_ONLY'" end_bracket: ) - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: A from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: TESTDATA - statement_terminator: ; - statement: cache_table: - keyword: CACHE - keyword: TABLE - table_reference: naked_identifier: TESTCACHE - keyword: OPTIONS - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'storageLevel'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'DISK_ONLY'" end_bracket: ) - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: A from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: TESTDATA - statement_terminator: ; - statement: cache_table: - keyword: CACHE - keyword: TABLE - table_reference: naked_identifier: TESTCACHE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/case_clause.sql000066400000000000000000000010151503426445100246130ustar00rootroot00000000000000SELECT id, CASE WHEN id > 200 THEN 'bigger' ELSE 'small' END FROM person; SELECT id, CASE WHEN id > 200 THEN 'bigger' ELSE 'small' END AS id_size FROM person; SELECT id, CASE id WHEN 100 THEN 'bigger' WHEN id > 300 THEN '300' ELSE 'small' END FROM person; SELECT id FROM person WHERE CASE 1 = 1 WHEN 100 THEN 'big' WHEN 200 THEN 'bigger' WHEN 300 THEN 'biggest' ELSE 'small' END = 'small'; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/case_clause.yml000066400000000000000000000130431503426445100246210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3c0e744d5cba1910c6d4a276e9a3efbd62599cf64ea53c748f6461537c9be892 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: id comparison_operator: raw_comparison_operator: '>' numeric_literal: '200' - keyword: THEN - expression: quoted_literal: "'bigger'" - else_clause: keyword: ELSE expression: quoted_literal: "'small'" - keyword: END from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: id comparison_operator: raw_comparison_operator: '>' numeric_literal: '200' - keyword: THEN - expression: quoted_literal: "'bigger'" - else_clause: keyword: ELSE expression: quoted_literal: "'small'" - keyword: END alias_expression: alias_operator: keyword: AS naked_identifier: id_size from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: expression: case_expression: - keyword: CASE - expression: column_reference: naked_identifier: id - when_clause: - keyword: WHEN - expression: numeric_literal: '100' - keyword: THEN - expression: quoted_literal: "'bigger'" - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: id comparison_operator: raw_comparison_operator: '>' numeric_literal: '300' - keyword: THEN - expression: quoted_literal: "'300'" - else_clause: keyword: ELSE expression: quoted_literal: "'small'" - keyword: END from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: case_expression: - keyword: CASE - expression: - numeric_literal: '1' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - when_clause: - keyword: WHEN - expression: numeric_literal: '100' - keyword: THEN - expression: quoted_literal: "'big'" - when_clause: - keyword: WHEN - expression: numeric_literal: '200' - keyword: THEN - expression: quoted_literal: "'bigger'" - when_clause: - keyword: WHEN - expression: numeric_literal: '300' - keyword: THEN - expression: quoted_literal: "'biggest'" - else_clause: keyword: ELSE expression: quoted_literal: "'small'" - keyword: END comparison_operator: raw_comparison_operator: '=' quoted_literal: "'small'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/clear_cache.sql000066400000000000000000000000151503426445100245540ustar00rootroot00000000000000CLEAR CACHE; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/clear_cache.yml000066400000000000000000000007541503426445100245700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 88187c4784eba3d8ed01791d6bb354c7e716f93c2526abf5e4b76d77e47499c9 file: statement: clear_cache: - keyword: CLEAR - keyword: CACHE statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/common_table_expressions.sql000066400000000000000000000016331503426445100274530ustar00rootroot00000000000000-- CTE with multiple column aliases WITH t(x, y) AS ( SELECT 1, 2 ) SELECT * FROM t WHERE x = 1 AND y = 2; -- CTE in CTE definition WITH t AS ( WITH t2 AS (SELECT 1) SELECT * FROM t2 ) SELECT * FROM t; -- CTE in subquery SELECT max(c) FROM ( WITH t(c) AS (SELECT 1) SELECT * FROM t ); -- CTE in subquery expression SELECT ( WITH t AS (SELECT 1) SELECT * FROM t ); -- CTE in CREATE VIEW statement CREATE VIEW v AS WITH t(a, b, c, d) AS ( SELECT 1, 2, 3, 4 ) SELECT * FROM t; SELECT * FROM v; -- If name conflict is detected in nested CTE, then AnalysisException is thrown by default. -- SET spark.sql.legacy.ctePrecedencePolicy = CORRECTED (which is recommended), -- inner CTE definitions take precedence over outer definitions. WITH t AS ( SELECT 1 ), t2 AS ( WITH t AS (SELECT 2) SELECT * FROM t ) SELECT * FROM t2; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/common_table_expressions.yml000066400000000000000000000251521503426445100274570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 19b6645165be70ef581581387fcbd95c5fe6a14ac711567a4f5ec721146de027 file: - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: t cte_column_list: bracketed: start_bracket: ( identifier_list: - naked_identifier: x - comma: ',' - naked_identifier: y end_bracket: ) keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '1' - comma: ',' - select_clause_element: numeric_literal: '2' end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t where_clause: keyword: WHERE expression: - column_reference: naked_identifier: x - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - binary_operator: AND - column_reference: naked_identifier: y - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '2' - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: t keyword: AS bracketed: start_bracket: ( with_compound_statement: keyword: WITH common_table_expression: naked_identifier: t2 keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: max function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: c end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( with_compound_statement: keyword: WITH common_table_expression: naked_identifier: t cte_column_list: bracketed: start_bracket: ( identifier_list: naked_identifier: c end_bracket: ) keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bracketed: start_bracket: ( with_compound_statement: keyword: WITH common_table_expression: naked_identifier: t keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: v - keyword: AS - with_compound_statement: keyword: WITH common_table_expression: naked_identifier: t cte_column_list: bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b - comma: ',' - naked_identifier: c - comma: ',' - naked_identifier: d end_bracket: ) keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '1' - comma: ',' - select_clause_element: numeric_literal: '2' - comma: ',' - select_clause_element: numeric_literal: '3' - comma: ',' - select_clause_element: numeric_literal: '4' end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: v - statement_terminator: ; - statement: with_compound_statement: - keyword: WITH - common_table_expression: naked_identifier: t keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' end_bracket: ) - comma: ',' - common_table_expression: naked_identifier: t2 keyword: AS bracketed: start_bracket: ( with_compound_statement: keyword: WITH common_table_expression: naked_identifier: t keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '2' end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t end_bracket: ) - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/create_database.sql000066400000000000000000000016371503426445100254450ustar00rootroot00000000000000-- Create database with all optional syntax CREATE DATABASE IF NOT EXISTS database_name COMMENT "database_comment" LOCATION "root/database_directory" WITH DBPROPERTIES ( "property_name" = "property_value"); -- Create schema with all optional syntax CREATE SCHEMA IF NOT EXISTS database_name COMMENT "database_comment" LOCATION "root/database_directory" WITH DBPROPERTIES ( "property_name" = "property_value" ); -- Create database `customer_db`. CREATE DATABASE customer_db; -- Create database `customer_db` only if database with same name doesn't exist. CREATE DATABASE IF NOT EXISTS customer_db; -- `Comments`,`Specific Location` and `Database properties`. CREATE DATABASE IF NOT EXISTS customer_db COMMENT 'This is customer database' LOCATION '/user' WITH DBPROPERTIES ("ID" = "001", "Name" = 'John'); -- Create `inventory_db` Database CREATE DATABASE inventory_db COMMENT 'This database is used to maintain Inventory'; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/create_database.yml000066400000000000000000000063461503426445100254510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8c9b4502cfb54af63e849c29e0f20e55006fd531b18889c055e1cc02eb821983 file: - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: database_name - keyword: COMMENT - quoted_literal: '"database_comment"' - keyword: LOCATION - quoted_literal: '"root/database_directory"' - keyword: WITH - keyword: DBPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: '"property_name"' comparison_operator: raw_comparison_operator: '=' quoted_literal: '"property_value"' end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: SCHEMA - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: database_name - keyword: COMMENT - quoted_literal: '"database_comment"' - keyword: LOCATION - quoted_literal: '"root/database_directory"' - keyword: WITH - keyword: DBPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: '"property_name"' comparison_operator: raw_comparison_operator: '=' quoted_literal: '"property_value"' end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: customer_db - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: customer_db - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: customer_db - keyword: COMMENT - quoted_literal: "'This is customer database'" - keyword: LOCATION - quoted_literal: "'/user'" - keyword: WITH - keyword: DBPROPERTIES - bracketed: - start_bracket: ( - property_name_identifier: quoted_identifier: '"ID"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"001"' - comma: ',' - property_name_identifier: quoted_identifier: '"Name"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'John'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: inventory_db - keyword: COMMENT - quoted_literal: "'This database is used to maintain Inventory'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/create_function.sql000066400000000000000000000014561503426445100255250ustar00rootroot00000000000000-- Create FUNCTION with all optional syntax CREATE OR REPLACE TEMPORARY FUNCTION IF NOT EXISTS function_name AS "class_name" USING FILE "resource_locations"; -- Create a permanent function called `simple_udf`. CREATE FUNCTION simple_udf AS 'SimpleUdf' USING JAR '/tmp/SimpleUdf.jar'; -- Created a temporary function. CREATE TEMPORARY FUNCTION simple_temp_udf AS 'SimpleUdf' USING JAR '/tmp/SimpleUdf.jar'; -- Replace the implementation of `simple_udf` CREATE OR REPLACE FUNCTION simple_udf AS 'SimpleUdfR' USING JAR '/tmp/SimpleUdfR.jar'; -- Create a permanent function `test_avg` CREATE FUNCTION test_avg AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDAFAverage'; ---- Create Temporary function `test_avg` CREATE TEMPORARY FUNCTION test_avg AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDAFAverage'; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/create_function.yml000066400000000000000000000044561503426445100255320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 741b41dba0f066e0ce3e0580e49d32d9b701a9fb3dc371ee17844b1021e6512e file: - statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TEMPORARY - keyword: FUNCTION - keyword: IF - keyword: NOT - keyword: EXISTS - function_name_identifier: function_name - keyword: AS - quoted_literal: '"class_name"' - keyword: USING - file_keyword: FILE - quoted_literal: '"resource_locations"' - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name_identifier: simple_udf - keyword: AS - quoted_literal: "'SimpleUdf'" - keyword: USING - file_keyword: JAR - quoted_literal: "'/tmp/SimpleUdf.jar'" - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: FUNCTION - function_name_identifier: simple_temp_udf - keyword: AS - quoted_literal: "'SimpleUdf'" - keyword: USING - file_keyword: JAR - quoted_literal: "'/tmp/SimpleUdf.jar'" - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name_identifier: simple_udf - keyword: AS - quoted_literal: "'SimpleUdfR'" - keyword: USING - file_keyword: JAR - quoted_literal: "'/tmp/SimpleUdfR.jar'" - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name_identifier: test_avg - keyword: AS - quoted_literal: "'org.apache.hadoop.hive.ql.udf.generic.GenericUDAFAverage'" - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: FUNCTION - function_name_identifier: test_avg - keyword: AS - quoted_literal: "'org.apache.hadoop.hive.ql.udf.generic.GenericUDAFAverage'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/create_table_complex_datatypes.sql000066400000000000000000000024121503426445100305650ustar00rootroot00000000000000--Create Table with complex datatypes CREATE TABLE table_identifier ( a STRUCT, d MAP, e ARRAY); --Create Table with complex datatypes without : in struct CREATE TABLE table_identifier ( a STRUCT, d MAP, e ARRAY); --Create Table with complex datatypes and comments CREATE TABLE table_identifier ( a STRUCT COMMENT 'col_comment', d MAP COMMENT 'col_comment', e ARRAY COMMENT 'col_comment'); --Create Table with nested complex datatypes CREATE TABLE table_identifier ( a STRUCT>, d MAP>>, g ARRAY>>); --Create Table with nested complex datatypes without : in struct CREATE TABLE table_identifier ( a STRUCT>, d MAP>>, g ARRAY>>); --Create Table with complex datatypes and quoted identifiers CREATE TABLE table_identifier ( a STRUCT<`b`: STRING, c: BOOLEAN>, `d` MAP, e ARRAY); CREATE TABLE my_table ( field_a STRING, field_b VARIANT ); sqlfluff-3.4.2/test/fixtures/dialects/sparksql/create_table_complex_datatypes.yml000066400000000000000000000347071503426445100306030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fcbf733feff8487db5937413091aed2a901c59c4f36316f78e54e2a8a568a9fc file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: table_identifier - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: a data_type: struct_type: keyword: STRUCT struct_type_schema: - start_angle_bracket: < - naked_identifier: b - colon: ':' - data_type: primitive_type: keyword: STRING - comma: ',' - naked_identifier: c - colon: ':' - data_type: primitive_type: keyword: BOOLEAN - end_angle_bracket: '>' - comma: ',' - column_definition: column_reference: naked_identifier: d data_type: - keyword: MAP - start_angle_bracket: < - data_type: primitive_type: keyword: STRING - comma: ',' - data_type: primitive_type: keyword: BOOLEAN - end_angle_bracket: '>' - comma: ',' - column_definition: column_reference: naked_identifier: e data_type: array_type: keyword: ARRAY start_angle_bracket: < data_type: primitive_type: keyword: STRING end_angle_bracket: '>' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: table_identifier - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: a data_type: struct_type: keyword: STRUCT struct_type_schema: - start_angle_bracket: < - naked_identifier: b - data_type: primitive_type: keyword: STRING - comma: ',' - naked_identifier: c - data_type: primitive_type: keyword: BOOLEAN - end_angle_bracket: '>' - comma: ',' - column_definition: column_reference: naked_identifier: d data_type: - keyword: MAP - start_angle_bracket: < - data_type: primitive_type: keyword: STRING - comma: ',' - data_type: primitive_type: keyword: BOOLEAN - end_angle_bracket: '>' - comma: ',' - column_definition: column_reference: naked_identifier: e data_type: array_type: keyword: ARRAY start_angle_bracket: < data_type: primitive_type: keyword: STRING end_angle_bracket: '>' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: table_identifier - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: a data_type: struct_type: keyword: STRUCT struct_type_schema: - start_angle_bracket: < - naked_identifier: b - colon: ':' - data_type: primitive_type: keyword: STRING - keyword: COMMENT - quoted_literal: "'struct_comment'" - comma: ',' - naked_identifier: c - colon: ':' - data_type: primitive_type: keyword: BOOLEAN - end_angle_bracket: '>' column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: "'col_comment'" - comma: ',' - column_definition: column_reference: naked_identifier: d data_type: - keyword: MAP - start_angle_bracket: < - data_type: primitive_type: keyword: STRING - comma: ',' - data_type: primitive_type: keyword: BOOLEAN - end_angle_bracket: '>' column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: "'col_comment'" - comma: ',' - column_definition: column_reference: naked_identifier: e data_type: array_type: keyword: ARRAY start_angle_bracket: < data_type: primitive_type: keyword: STRING end_angle_bracket: '>' column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: "'col_comment'" - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: table_identifier - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: a data_type: struct_type: keyword: STRUCT struct_type_schema: - start_angle_bracket: < - naked_identifier: b - colon: ':' - data_type: primitive_type: keyword: STRING - comma: ',' - naked_identifier: c - colon: ':' - data_type: - keyword: MAP - start_angle_bracket: < - data_type: primitive_type: keyword: STRING - comma: ',' - data_type: primitive_type: keyword: BOOLEAN - end_angle_bracket: '>' - end_angle_bracket: '>' - comma: ',' - column_definition: column_reference: naked_identifier: d data_type: - keyword: MAP - start_angle_bracket: < - data_type: primitive_type: keyword: STRING - comma: ',' - data_type: struct_type: keyword: STRUCT struct_type_schema: - start_angle_bracket: < - naked_identifier: e - colon: ':' - data_type: primitive_type: keyword: STRING - comma: ',' - naked_identifier: f - colon: ':' - data_type: - keyword: MAP - start_angle_bracket: < - data_type: primitive_type: keyword: STRING - comma: ',' - data_type: primitive_type: keyword: BOOLEAN - end_angle_bracket: '>' - end_angle_bracket: '>' - end_angle_bracket: '>' - comma: ',' - column_definition: column_reference: naked_identifier: g data_type: array_type: keyword: ARRAY start_angle_bracket: < data_type: struct_type: keyword: STRUCT struct_type_schema: - start_angle_bracket: < - naked_identifier: h - colon: ':' - data_type: primitive_type: keyword: STRING - comma: ',' - naked_identifier: i - colon: ':' - data_type: - keyword: MAP - start_angle_bracket: < - data_type: primitive_type: keyword: STRING - comma: ',' - data_type: primitive_type: keyword: BOOLEAN - end_angle_bracket: '>' - end_angle_bracket: '>' end_angle_bracket: '>' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: table_identifier - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: a data_type: struct_type: keyword: STRUCT struct_type_schema: - start_angle_bracket: < - naked_identifier: b - data_type: primitive_type: keyword: STRING - comma: ',' - naked_identifier: c - data_type: - keyword: MAP - start_angle_bracket: < - data_type: primitive_type: keyword: STRING - comma: ',' - data_type: primitive_type: keyword: BOOLEAN - end_angle_bracket: '>' - end_angle_bracket: '>' - comma: ',' - column_definition: column_reference: naked_identifier: d data_type: - keyword: MAP - start_angle_bracket: < - data_type: primitive_type: keyword: STRING - comma: ',' - data_type: struct_type: keyword: STRUCT struct_type_schema: - start_angle_bracket: < - naked_identifier: e - data_type: primitive_type: keyword: STRING - comma: ',' - naked_identifier: f - data_type: - keyword: MAP - start_angle_bracket: < - data_type: primitive_type: keyword: STRING - comma: ',' - data_type: primitive_type: keyword: BOOLEAN - end_angle_bracket: '>' - end_angle_bracket: '>' - end_angle_bracket: '>' - comma: ',' - column_definition: column_reference: naked_identifier: g data_type: array_type: keyword: ARRAY start_angle_bracket: < data_type: struct_type: keyword: STRUCT struct_type_schema: - start_angle_bracket: < - naked_identifier: h - data_type: primitive_type: keyword: STRING - comma: ',' - naked_identifier: i - data_type: - keyword: MAP - start_angle_bracket: < - data_type: primitive_type: keyword: STRING - comma: ',' - data_type: primitive_type: keyword: BOOLEAN - end_angle_bracket: '>' - end_angle_bracket: '>' end_angle_bracket: '>' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: table_identifier - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: a data_type: struct_type: keyword: STRUCT struct_type_schema: - start_angle_bracket: < - quoted_identifier: '`b`' - colon: ':' - data_type: primitive_type: keyword: STRING - comma: ',' - naked_identifier: c - colon: ':' - data_type: primitive_type: keyword: BOOLEAN - end_angle_bracket: '>' - comma: ',' - column_definition: column_reference: quoted_identifier: '`d`' data_type: - keyword: MAP - start_angle_bracket: < - data_type: primitive_type: keyword: STRING - comma: ',' - data_type: primitive_type: keyword: BOOLEAN - end_angle_bracket: '>' - comma: ',' - column_definition: column_reference: naked_identifier: e data_type: array_type: keyword: ARRAY start_angle_bracket: < data_type: primitive_type: keyword: STRING end_angle_bracket: '>' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: my_table - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: field_a data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: field_b data_type: primitive_type: keyword: VARIANT - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/create_table_datasource.sql000066400000000000000000000031221503426445100271710ustar00rootroot00000000000000--Create Datasource Table with all optional syntax CREATE TABLE IF NOT EXISTS table_identifier ( test STRING COMMENT "col_comment1" ) USING PARQUET OPTIONS ( "key1" = "val1", "key2" = "val2") PARTITIONED BY ( col_name1, col_name2 ) CLUSTERED BY ( col_name3, col_name4) SORTED BY ( col_name3 ASC, col_name4 DESC) INTO 4 BUCKETS LOCATION 'root/database/table' COMMENT 'this is a comment' TBLPROPERTIES ( "key1" = "val1", "key2" = "val2" ) AS SELECT id, name FROM student; --Use data source CREATE TABLE student (id INT, student_name STRING, age INT) USING CSV; --Use data from another table CREATE TABLE student_copy USING CSV AS SELECT id, name FROM student; --Omit the USING clause --uses the default data source (parquet by default) CREATE TABLE student (id INT, student_name STRING, age INT); --Specify table comment and properties CREATE TABLE student (id INT, student_name STRING, age INT) USING CSV COMMENT 'this is a comment' TBLPROPERTIES ('foo' = 'bar'); --Create partitioned and bucketed table CREATE TABLE student (id INT, student_name STRING, age INT) USING CSV PARTITIONED BY (age) CLUSTERED BY (id) INTO 4 BUCKETS; CREATE EXTERNAL TABLE IF NOT EXISTS student (id INT, student_name STRING, age INT) USING iceberg PARTITIONED BY (age); CREATE TABLE student (id INT, student_name STRING, age INT) USING CSV COMMENT "this is a comment" PARTITIONED BY (age) STORED AS PARQUET; create table if not exists my_table_space.my_test_table ( test_value string, activity_date_partition date ) using DELTA location 's3://some-bucket/test-data/' partitioned by (activity_date_partition); sqlfluff-3.4.2/test/fixtures/dialects/sparksql/create_table_datasource.yml000066400000000000000000000271251503426445100272040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bb8bac809b921a0af431495cde06dbee35a463b361706fbc816fcea754b52792 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: table_identifier - bracketed: start_bracket: ( column_definition: column_reference: naked_identifier: test data_type: primitive_type: keyword: STRING column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: '"col_comment1"' end_bracket: ) - using_clause: keyword: USING data_source_format: keyword: PARQUET - keyword: OPTIONS - bracketed: - start_bracket: ( - property_name_identifier: quoted_identifier: '"key1"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"val1"' - comma: ',' - property_name_identifier: quoted_identifier: '"key2"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"val2"' - end_bracket: ) - keyword: PARTITIONED - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col_name1 - comma: ',' - column_reference: naked_identifier: col_name2 - end_bracket: ) - keyword: CLUSTERED - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col_name3 - comma: ',' - column_reference: naked_identifier: col_name4 - end_bracket: ) - keyword: SORTED - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col_name3 - keyword: ASC - comma: ',' - column_reference: naked_identifier: col_name4 - keyword: DESC - end_bracket: ) - keyword: INTO - numeric_literal: '4' - keyword: BUCKETS - keyword: LOCATION - quoted_literal: "'root/database/table'" - keyword: COMMENT - quoted_literal: "'this is a comment'" - keyword: TBLPROPERTIES - bracketed: - start_bracket: ( - property_name_identifier: quoted_identifier: '"key1"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"val1"' - comma: ',' - property_name_identifier: quoted_identifier: '"key2"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"val2"' - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: student - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: column_reference: naked_identifier: student_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: age data_type: primitive_type: keyword: INT - end_bracket: ) - using_clause: keyword: USING data_source_format: keyword: CSV - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student_copy - using_clause: keyword: USING data_source_format: keyword: CSV - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: student - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: column_reference: naked_identifier: student_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: age data_type: primitive_type: keyword: INT - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: column_reference: naked_identifier: student_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: age data_type: primitive_type: keyword: INT - end_bracket: ) - using_clause: keyword: USING data_source_format: keyword: CSV - keyword: COMMENT - quoted_literal: "'this is a comment'" - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'foo'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'bar'" end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: column_reference: naked_identifier: student_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: age data_type: primitive_type: keyword: INT - end_bracket: ) - using_clause: keyword: USING data_source_format: keyword: CSV - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: age end_bracket: ) - keyword: CLUSTERED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - keyword: INTO - numeric_literal: '4' - keyword: BUCKETS - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: student - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: column_reference: naked_identifier: student_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: age data_type: primitive_type: keyword: INT - end_bracket: ) - using_clause: keyword: USING data_source_format: keyword: iceberg - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: age end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: column_reference: naked_identifier: student_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: age data_type: primitive_type: keyword: INT - end_bracket: ) - using_clause: keyword: USING data_source_format: keyword: CSV - keyword: COMMENT - quoted_literal: '"this is a comment"' - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: age end_bracket: ) - keyword: STORED - keyword: AS - keyword: PARQUET - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - keyword: if - keyword: not - keyword: exists - table_reference: - naked_identifier: my_table_space - dot: . - naked_identifier: my_test_table - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: test_value data_type: primitive_type: keyword: string - comma: ',' - column_definition: column_reference: naked_identifier: activity_date_partition data_type: primitive_type: keyword: date - end_bracket: ) - using_clause: keyword: using data_source_format: keyword: DELTA - keyword: location - quoted_literal: "'s3://some-bucket/test-data/'" - keyword: partitioned - keyword: by - bracketed: start_bracket: ( column_reference: naked_identifier: activity_date_partition end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/create_table_hiveformat.sql000066400000000000000000000063751503426445100272200ustar00rootroot00000000000000--Create Hiveformat Table with all optional syntax CREATE EXTERNAL TABLE IF NOT EXISTS table_identifier ( col_name1 STRING COMMENT "col_comment1") COMMENT "table_comment" PARTITIONED BY ( col_name2 STRING COMMENT "col_comment2" ) CLUSTERED BY ( col_name1, col_name2) SORTED BY ( col_name1 ASC, col_name2 DESC ) INTO 3 BUCKETS ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS PARQUET LOCATION "path/to/files" TBLPROPERTIES ( "key1" = "val1", "key2" = "val2") AS (SELECT * FROM table_identifier); --Use hive format CREATE TABLE student (id INT, student_name STRING, age INT) STORED AS ORC; --Use data from another table CREATE TABLE student_copy STORED AS ORC AS SELECT * FROM student; --Specify table comment and properties CREATE TABLE student (id INT, student_name STRING, age INT) COMMENT 'this is a comment' STORED AS ORC TBLPROPERTIES ('foo' = 'bar'); --Specify table comment and properties with different clauses order CREATE TABLE student (id INT, student_name STRING, age INT) STORED AS ORC TBLPROPERTIES ('foo' = 'bar') COMMENT 'this is a comment'; --Create partitioned table CREATE TABLE student (id INT, student_name STRING) PARTITIONED BY (age INT) STORED AS ORC; --Create partitioned table with different clauses order CREATE TABLE student (id INT, student_name STRING) STORED AS ORC PARTITIONED BY (age INT); --Use Row Format and file format CREATE TABLE student (id INT, student_name STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE; --Use complex datatype CREATE EXTERNAL TABLE family( student_name STRING, friends ARRAY, children MAP, address STRUCT ) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' ESCAPED BY '\\' COLLECTION ITEMS TERMINATED BY '_' MAP KEYS TERMINATED BY ':' LINES TERMINATED BY '\n' NULL DEFINED AS 'foonull' STORED AS TEXTFILE LOCATION '/tmp/family/'; --Use predefined custom SerDe CREATE TABLE avroexample ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat' TBLPROPERTIES ( 'avro.schema.literal' = '{ "namespace": "org.apache.hive", "name": "first_schema", "type": "record", "fields": [ { "name":"string1", "type":"string" }, { "name":"string2", "type":"string" }] }' ); --Use personalized custom SerDe --(we may need to `ADD JAR xxx.jar` first to ensure we can find the serde_class, --or you may run into `CLASSNOTFOUND` exception) ADD JAR '/tmp/hive_serde_example.jar'; CREATE EXTERNAL TABLE family (id INT, family_name STRING) ROW FORMAT SERDE 'com.ly.spark.serde.SerDeExample' STORED AS INPUTFORMAT 'com.ly.spark.example.serde.io.SerDeExampleInputFormat' OUTPUTFORMAT 'com.ly.spark.example.serde.io.SerDeExampleOutputFormat' LOCATION '/tmp/family/'; --Use `CLUSTERED BY` clause to create bucket table without `SORTED BY` CREATE TABLE clustered_by_test1 (id INT, age STRING) CLUSTERED BY (id) INTO 4 BUCKETS STORED AS ORC; --Use `CLUSTERED BY` clause to create bucket table with `SORTED BY` CREATE TABLE clustered_by_test2 (id INT, test_name STRING) PARTITIONED BY (test_year STRING) CLUSTERED BY (id, name) SORTED BY (id ASC) INTO 3 BUCKETS STORED AS PARQUET; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/create_table_hiveformat.yml000066400000000000000000000417061503426445100272170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0073c013e8762f326f646d2dd0633b1fc2f287f1a7ccd4e5ba80c24ff67d544c file: - statement: create_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: table_identifier - bracketed: start_bracket: ( column_definition: column_reference: naked_identifier: col_name1 data_type: primitive_type: keyword: STRING column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: '"col_comment1"' end_bracket: ) - keyword: COMMENT - quoted_literal: '"table_comment"' - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( column_definition: naked_identifier: col_name2 data_type: primitive_type: keyword: STRING column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: '"col_comment2"' end_bracket: ) - keyword: CLUSTERED - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col_name1 - comma: ',' - column_reference: naked_identifier: col_name2 - end_bracket: ) - keyword: SORTED - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col_name1 - keyword: ASC - comma: ',' - column_reference: naked_identifier: col_name2 - keyword: DESC - end_bracket: ) - keyword: INTO - numeric_literal: '3' - keyword: BUCKETS - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: DELIMITED - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "','" - keyword: STORED - keyword: AS - keyword: PARQUET - keyword: LOCATION - quoted_literal: '"path/to/files"' - keyword: TBLPROPERTIES - bracketed: - start_bracket: ( - property_name_identifier: quoted_identifier: '"key1"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"val1"' - comma: ',' - property_name_identifier: quoted_identifier: '"key2"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"val2"' - end_bracket: ) - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_identifier end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: column_reference: naked_identifier: student_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: age data_type: primitive_type: keyword: INT - end_bracket: ) - keyword: STORED - keyword: AS - keyword: ORC - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student_copy - keyword: STORED - keyword: AS - keyword: ORC - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: student - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: column_reference: naked_identifier: student_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: age data_type: primitive_type: keyword: INT - end_bracket: ) - keyword: COMMENT - quoted_literal: "'this is a comment'" - keyword: STORED - keyword: AS - keyword: ORC - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'foo'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'bar'" end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: column_reference: naked_identifier: student_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: age data_type: primitive_type: keyword: INT - end_bracket: ) - keyword: STORED - keyword: AS - keyword: ORC - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'foo'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'bar'" end_bracket: ) - keyword: COMMENT - quoted_literal: "'this is a comment'" - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: column_reference: naked_identifier: student_name data_type: primitive_type: keyword: STRING - end_bracket: ) - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( column_definition: naked_identifier: age data_type: primitive_type: keyword: INT end_bracket: ) - keyword: STORED - keyword: AS - keyword: ORC - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: column_reference: naked_identifier: student_name data_type: primitive_type: keyword: STRING - end_bracket: ) - keyword: STORED - keyword: AS - keyword: ORC - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( column_definition: naked_identifier: age data_type: primitive_type: keyword: INT end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: column_reference: naked_identifier: student_name data_type: primitive_type: keyword: STRING - end_bracket: ) - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: DELIMITED - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "','" - keyword: STORED - keyword: AS - file_format: TEXTFILE - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: naked_identifier: family - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: student_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: friends data_type: array_type: keyword: ARRAY start_angle_bracket: < data_type: primitive_type: keyword: STRING end_angle_bracket: '>' - comma: ',' - column_definition: column_reference: naked_identifier: children data_type: - keyword: MAP - start_angle_bracket: < - data_type: primitive_type: keyword: STRING - comma: ',' - data_type: primitive_type: keyword: INT - end_angle_bracket: '>' - comma: ',' - column_definition: column_reference: naked_identifier: address data_type: struct_type: keyword: STRUCT struct_type_schema: - start_angle_bracket: < - naked_identifier: street - colon: ':' - data_type: primitive_type: keyword: STRING - comma: ',' - naked_identifier: city - colon: ':' - data_type: primitive_type: keyword: STRING - end_angle_bracket: '>' - end_bracket: ) - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: DELIMITED - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "','" - keyword: ESCAPED - keyword: BY - quoted_literal: "'\\\\'" - keyword: COLLECTION - keyword: ITEMS - keyword: TERMINATED - keyword: BY - quoted_literal: "'_'" - keyword: MAP - keyword: KEYS - keyword: TERMINATED - keyword: BY - quoted_literal: "':'" - keyword: LINES - keyword: TERMINATED - keyword: BY - quoted_literal: "'\\n'" - keyword: 'NULL' - keyword: DEFINED - keyword: AS - quoted_literal: "'foonull'" - keyword: STORED - keyword: AS - file_format: TEXTFILE - keyword: LOCATION - quoted_literal: "'/tmp/family/'" - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: avroexample - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: SERDE - quoted_literal: "'org.apache.hadoop.hive.serde2.avro.AvroSerDe'" - keyword: STORED - keyword: AS - keyword: INPUTFORMAT - quoted_literal: "'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat'" - keyword: OUTPUTFORMAT - quoted_literal: "'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat'" - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'avro.schema.literal'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'{\n \"namespace\": \"org.apache.hive\",\n \ \ \"name\": \"first_schema\",\n \"type\": \"record\",\n \"\ fields\": [ { \"name\":\"string1\", \"type\":\"string\" }, { \"name\":\"\ string2\", \"type\":\"string\" }]\n }'" end_bracket: ) - statement_terminator: ; - statement: add_jar_statement: keyword: ADD file_keyword: JAR quoted_literal: "'/tmp/hive_serde_example.jar'" - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: naked_identifier: family - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: column_reference: naked_identifier: family_name data_type: primitive_type: keyword: STRING - end_bracket: ) - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: SERDE - quoted_literal: "'com.ly.spark.serde.SerDeExample'" - keyword: STORED - keyword: AS - keyword: INPUTFORMAT - quoted_literal: "'com.ly.spark.example.serde.io.SerDeExampleInputFormat'" - keyword: OUTPUTFORMAT - quoted_literal: "'com.ly.spark.example.serde.io.SerDeExampleOutputFormat'" - keyword: LOCATION - quoted_literal: "'/tmp/family/'" - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: clustered_by_test1 - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: column_reference: naked_identifier: age data_type: primitive_type: keyword: STRING - end_bracket: ) - keyword: CLUSTERED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - keyword: INTO - numeric_literal: '4' - keyword: BUCKETS - keyword: STORED - keyword: AS - keyword: ORC - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: clustered_by_test2 - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: column_reference: naked_identifier: test_name data_type: primitive_type: keyword: STRING - end_bracket: ) - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( column_definition: naked_identifier: test_year data_type: primitive_type: keyword: STRING end_bracket: ) - keyword: CLUSTERED - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: id - comma: ',' - column_reference: naked_identifier: name - end_bracket: ) - keyword: SORTED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: id keyword: ASC end_bracket: ) - keyword: INTO - numeric_literal: '3' - keyword: BUCKETS - keyword: STORED - keyword: AS - keyword: PARQUET - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/create_table_like.sql000066400000000000000000000013641503426445100257710ustar00rootroot00000000000000-- Create Table Like with all optional syntax CREATE TABLE IF NOT EXISTS table_identifier LIKE source_table_identifier USING PARQUET ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS PARQUET TBLPROPERTIES ( "key1" = "val1", "key2" = "val2") LOCATION "path/to/files"; -- Create table using an existing table CREATE TABLE student_dupli LIKE student; -- Create table like using a data source CREATE TABLE student_dupli LIKE student USING CSV; -- Table is created as external table at the location specified CREATE TABLE student_dupli LIKE student LOCATION '/root1/home'; -- Create table like using a rowformat CREATE TABLE student_dupli LIKE student ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE TBLPROPERTIES ('owner' = 'xxxx'); sqlfluff-3.4.2/test/fixtures/dialects/sparksql/create_table_like.yml000066400000000000000000000063401503426445100257720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8368690cff9c90cbacdc16c5856e36f403ee59c19d845641d768402b2588b62a file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: table_identifier - keyword: LIKE - table_reference: naked_identifier: source_table_identifier - using_clause: keyword: USING data_source_format: keyword: PARQUET - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: DELIMITED - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "','" - keyword: STORED - keyword: AS - keyword: PARQUET - keyword: TBLPROPERTIES - bracketed: - start_bracket: ( - property_name_identifier: quoted_identifier: '"key1"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"val1"' - comma: ',' - property_name_identifier: quoted_identifier: '"key2"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"val2"' - end_bracket: ) - keyword: LOCATION - quoted_literal: '"path/to/files"' - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student_dupli - keyword: LIKE - table_reference: naked_identifier: student - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student_dupli - keyword: LIKE - table_reference: naked_identifier: student - using_clause: keyword: USING data_source_format: keyword: CSV - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student_dupli - keyword: LIKE - table_reference: naked_identifier: student - keyword: LOCATION - quoted_literal: "'/root1/home'" - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student_dupli - keyword: LIKE - table_reference: naked_identifier: student - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: DELIMITED - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "','" - keyword: STORED - keyword: AS - file_format: TEXTFILE - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'owner'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'xxxx'" end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/create_table_primitive_datatypes.sql000066400000000000000000000000741503426445100311300ustar00rootroot00000000000000CREATE TABLE table_identifier ( a LONG, b INT, c SMALLINT); sqlfluff-3.4.2/test/fixtures/dialects/sparksql/create_table_primitive_datatypes.yml000066400000000000000000000022201503426445100311250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4728eebedbf594199cd71916ba2bdf37d0732d7643cfd8ec7d52e63ab409950d file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: table_identifier - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: a data_type: primitive_type: keyword: LONG - comma: ',' - column_definition: column_reference: naked_identifier: b data_type: primitive_type: keyword: INT - comma: ',' - column_definition: column_reference: naked_identifier: c data_type: primitive_type: keyword: SMALLINT - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/create_table_select.sql000066400000000000000000000002461503426445100263220ustar00rootroot00000000000000-- create table select without `AS` keyword CREATE TABLE tab1 SELECT * FROM tab2; -- create table select with `AS` keyword CREATE TABLE tab1 AS SELECT * FROM tab2; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/create_table_select.yml000066400000000000000000000030421503426445100263210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0c638fbc846086000ecd6a0d36354551fe8ed4377abce53076ab6ab93b98bd53 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: tab1 - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tab2 - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: tab1 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tab2 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/create_view.sql000066400000000000000000000015641503426445100246520ustar00rootroot00000000000000-- Create view basic syntax CREATE VIEW experienced_employee_extended AS SELECT * from experienced_employee ; -- Create VIEW with all optional syntax CREATE OR REPLACE GLOBAL TEMPORARY VIEW IF NOT EXISTS experienced_employee (ID COMMENT 'Unique identification number', Name) COMMENT 'View for experienced employees' TBLPROPERTIES ( "key1" = "val1", "key2" = "val2" ) AS SELECT ID, Name from temp2 ; -- Created a temporary function with TEMP. CREATE TEMP VIEW experienced_employee_temp AS SELECT * from experienced_employee limit 2 ; -- Replace the implementation of `simple_udf` CREATE OR REPLACE VIEW experienced_employee_rep AS SELECT * from experienced_employee limit 2 ; CREATE TEMPORARY VIEW pulse_article_search_data USING org.apache.spark.sql.jdbc OPTIONS ( url "jdbc:postgresql:dbserver", dbtable "schema.tablename", user 'username', password 'password' ) sqlfluff-3.4.2/test/fixtures/dialects/sparksql/create_view.yml000066400000000000000000000122721503426445100246520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 24353b258842f0c68d24fd668afcd735f287f5e32e5a53fc53d679562d428796 file: - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: experienced_employee_extended - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: experienced_employee - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: GLOBAL - keyword: TEMPORARY - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: experienced_employee - bracketed: - start_bracket: ( - column_reference: naked_identifier: ID - keyword: COMMENT - quoted_literal: "'Unique identification number'" - comma: ',' - column_reference: naked_identifier: Name - end_bracket: ) - keyword: COMMENT - quoted_literal: "'View for experienced employees'" - keyword: TBLPROPERTIES - bracketed: - start_bracket: ( - property_name_identifier: quoted_identifier: '"key1"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"val1"' - comma: ',' - property_name_identifier: quoted_identifier: '"key2"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"val2"' - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: ID - comma: ',' - select_clause_element: column_reference: naked_identifier: Name from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: temp2 - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: TEMP - keyword: VIEW - table_reference: naked_identifier: experienced_employee_temp - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: experienced_employee limit_clause: keyword: limit numeric_literal: '2' - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: VIEW - table_reference: naked_identifier: experienced_employee_rep - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: experienced_employee limit_clause: keyword: limit numeric_literal: '2' - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: VIEW - table_reference: naked_identifier: pulse_article_search_data - keyword: USING - data_source_format: object_reference: - naked_identifier: org - dot: . - naked_identifier: apache - dot: . - naked_identifier: spark - dot: . - naked_identifier: sql - dot: . - naked_identifier: jdbc - keyword: OPTIONS - bracketed: - start_bracket: ( - property_name_identifier: properties_naked_identifier: url - quoted_literal: '"jdbc:postgresql:dbserver"' - comma: ',' - property_name_identifier: properties_naked_identifier: dbtable - quoted_literal: '"schema.tablename"' - comma: ',' - property_name_identifier: properties_naked_identifier: user - quoted_literal: "'username'" - comma: ',' - property_name_identifier: properties_naked_identifier: password - quoted_literal: "'password'" - end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/sparksql/create_widget.sql000066400000000000000000000003571503426445100251620ustar00rootroot00000000000000-- Create dropdown widget CREATE WIDGET DROPDOWN state DEFAULT "CA" CHOICES SELECT * FROM (VALUES ("CA"), ("IL"), ("MI"), ("NY"), ("OR"), ("VA")); -- Create text widget CREATE WIDGET TEXT database DEFAULT "customers_dev"; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/create_widget.yml000066400000000000000000000051371503426445100251650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: deac0f7076a50c434b37acd68ecc449ad30920e19cd822fe5675f3f20f5fb8eb file: - statement: create_widget_statement: - keyword: CREATE - keyword: WIDGET - keyword: DROPDOWN - widget_name_identifier: state - keyword: DEFAULT - quoted_literal: '"CA"' - keyword: CHOICES - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: values_clause: - keyword: VALUES - bracketed: start_bracket: ( expression: quoted_literal: '"CA"' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( expression: quoted_literal: '"IL"' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( expression: quoted_literal: '"MI"' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( expression: quoted_literal: '"NY"' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( expression: quoted_literal: '"OR"' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( expression: quoted_literal: '"VA"' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_widget_statement: - keyword: CREATE - keyword: WIDGET - keyword: TEXT - widget_name_identifier: database - keyword: DEFAULT - quoted_literal: '"customers_dev"' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/databricks_dlt_apply_changes_into.sql000066400000000000000000000045731503426445100312600ustar00rootroot00000000000000-- Create and populate the target table. CREATE OR REFRESH STREAMING LIVE TABLE target; APPLY CHANGES INTO live.target FROM STREAM(cdc_data.users) KEYS (user_id) APPLY AS DELETE WHEN operation = "DELETE" APPLY AS TRUNCATE WHEN operation = "TRUNCATE" SEQUENCE BY sequence_num COLUMNS * EXCEPT (operation, sequence_num) STORED AS SCD TYPE 1; -- Create and populate the target table. CREATE OR REFRESH STREAMING LIVE TABLE target; APPLY CHANGES INTO live.target FROM STREAM(cdc_data.users) KEYS (userid) APPLY AS DELETE WHEN operation = "DELETE" SEQUENCE BY sequencenum COLUMNS * EXCEPT (operation, sequencenum) STORED AS SCD TYPE 2; -- Create and populate the target table. CREATE OR REFRESH STREAMING LIVE TABLE target; APPLY CHANGES INTO live.target FROM STREAM(cdc_data.users) KEYS (userid) SEQUENCE BY sequencenum COLUMNS * EXCEPT (operation, sequencenum); -- Create and populate the target table. CREATE OR REFRESH STREAMING LIVE TABLE target; APPLY CHANGES INTO live.target FROM STREAM(cdc_data.users) KEYS (user_id) IGNORE NULL UPDATES WHERE state = "NY" APPLY AS DELETE WHEN operation = "DELETE" APPLY AS TRUNCATE WHEN operation = "TRUNCATE" SEQUENCE BY sequence_num COLUMNS * EXCEPT (operation, sequence_num) STORED AS SCD TYPE 1; -- Create and populate the target table. -- "APPLY CHANGES INTO" without a "COLUMNS" clause CREATE OR REFRESH STREAMING LIVE TABLE target; APPLY CHANGES INTO live.target FROM STREAM(cdc_data.users) KEYS (user_id) SEQUENCE BY sequence_num; -- Create and populate the target table. -- "APPLY CHANGES INTO" with a "TRACK HISTORY" clause CREATE OR REFRESH STREAMING LIVE TABLE target; APPLY CHANGES INTO live.target FROM STREAM(cdc_data.users) KEYS (user_id) IGNORE NULL UPDATES WHERE state = "NY" APPLY AS DELETE WHEN operation = "DELETE" APPLY AS TRUNCATE WHEN operation = "TRUNCATE" SEQUENCE BY sequence_num COLUMNS * EXCEPT (operation, sequence_num) STORED AS SCD TYPE 1 TRACK HISTORY ON user_id; -- Create and populate the target table. -- "APPLY CHANGES INTO" with a "TRACK HISTORY ON * EXCEPT" clause CREATE OR REFRESH STREAMING LIVE TABLE target; APPLY CHANGES INTO live.target FROM STREAM(cdc_data.users) KEYS (user_id) IGNORE NULL UPDATES WHERE state = "NY" APPLY AS DELETE WHEN operation = "DELETE" APPLY AS TRUNCATE WHEN operation = "TRUNCATE" SEQUENCE BY sequence_num COLUMNS * EXCEPT (operation, sequence_num) STORED AS SCD TYPE 1 TRACK HISTORY ON * EXCEPT (state); sqlfluff-3.4.2/test/fixtures/dialects/sparksql/databricks_dlt_apply_changes_into.yml000066400000000000000000000361041503426445100312550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b045e6faf0f74c581457e3f8d919938a8ddbe610f4454b06c1b1d2cd06c29ad1 file: - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REFRESH - keyword: STREAMING - keyword: LIVE - keyword: TABLE - table_reference: naked_identifier: target - statement_terminator: ; - statement: apply_changes_into_statement: - keyword: APPLY - keyword: CHANGES - keyword: INTO - table_expression: table_reference: - naked_identifier: live - dot: . - naked_identifier: target - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: STREAM function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: cdc_data - dot: . - naked_identifier: users end_bracket: ) - keyword: KEYS - bracketed: start_bracket: ( column_reference: naked_identifier: user_id end_bracket: ) - keyword: APPLY - keyword: AS - keyword: DELETE - keyword: WHEN - column_reference: naked_identifier: operation - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"DELETE"' - keyword: APPLY - keyword: AS - keyword: TRUNCATE - keyword: WHEN - column_reference: naked_identifier: operation - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"TRUNCATE"' - keyword: SEQUENCE - keyword: BY - column_reference: naked_identifier: sequence_num - keyword: COLUMNS - star: '*' - keyword: EXCEPT - bracketed: - start_bracket: ( - column_reference: naked_identifier: operation - comma: ',' - column_reference: naked_identifier: sequence_num - end_bracket: ) - keyword: STORED - keyword: AS - keyword: SCD - keyword: TYPE - numeric_literal: '1' - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REFRESH - keyword: STREAMING - keyword: LIVE - keyword: TABLE - table_reference: naked_identifier: target - statement_terminator: ; - statement: apply_changes_into_statement: - keyword: APPLY - keyword: CHANGES - keyword: INTO - table_expression: table_reference: - naked_identifier: live - dot: . - naked_identifier: target - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: STREAM function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: cdc_data - dot: . - naked_identifier: users end_bracket: ) - keyword: KEYS - bracketed: start_bracket: ( column_reference: naked_identifier: userid end_bracket: ) - keyword: APPLY - keyword: AS - keyword: DELETE - keyword: WHEN - column_reference: naked_identifier: operation - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"DELETE"' - keyword: SEQUENCE - keyword: BY - column_reference: naked_identifier: sequencenum - keyword: COLUMNS - star: '*' - keyword: EXCEPT - bracketed: - start_bracket: ( - column_reference: naked_identifier: operation - comma: ',' - column_reference: naked_identifier: sequencenum - end_bracket: ) - keyword: STORED - keyword: AS - keyword: SCD - keyword: TYPE - numeric_literal: '2' - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REFRESH - keyword: STREAMING - keyword: LIVE - keyword: TABLE - table_reference: naked_identifier: target - statement_terminator: ; - statement: apply_changes_into_statement: - keyword: APPLY - keyword: CHANGES - keyword: INTO - table_expression: table_reference: - naked_identifier: live - dot: . - naked_identifier: target - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: STREAM function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: cdc_data - dot: . - naked_identifier: users end_bracket: ) - keyword: KEYS - bracketed: start_bracket: ( column_reference: naked_identifier: userid end_bracket: ) - keyword: SEQUENCE - keyword: BY - column_reference: naked_identifier: sequencenum - keyword: COLUMNS - star: '*' - keyword: EXCEPT - bracketed: - start_bracket: ( - column_reference: naked_identifier: operation - comma: ',' - column_reference: naked_identifier: sequencenum - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REFRESH - keyword: STREAMING - keyword: LIVE - keyword: TABLE - table_reference: naked_identifier: target - statement_terminator: ; - statement: apply_changes_into_statement: - keyword: APPLY - keyword: CHANGES - keyword: INTO - table_expression: table_reference: - naked_identifier: live - dot: . - naked_identifier: target - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: STREAM function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: cdc_data - dot: . - naked_identifier: users end_bracket: ) - keyword: KEYS - bracketed: start_bracket: ( column_reference: naked_identifier: user_id end_bracket: ) - keyword: IGNORE - keyword: 'NULL' - keyword: UPDATES - where_clause: keyword: WHERE expression: column_reference: naked_identifier: state comparison_operator: raw_comparison_operator: '=' quoted_literal: '"NY"' - keyword: APPLY - keyword: AS - keyword: DELETE - keyword: WHEN - column_reference: naked_identifier: operation - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"DELETE"' - keyword: APPLY - keyword: AS - keyword: TRUNCATE - keyword: WHEN - column_reference: naked_identifier: operation - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"TRUNCATE"' - keyword: SEQUENCE - keyword: BY - column_reference: naked_identifier: sequence_num - keyword: COLUMNS - star: '*' - keyword: EXCEPT - bracketed: - start_bracket: ( - column_reference: naked_identifier: operation - comma: ',' - column_reference: naked_identifier: sequence_num - end_bracket: ) - keyword: STORED - keyword: AS - keyword: SCD - keyword: TYPE - numeric_literal: '1' - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REFRESH - keyword: STREAMING - keyword: LIVE - keyword: TABLE - table_reference: naked_identifier: target - statement_terminator: ; - statement: apply_changes_into_statement: - keyword: APPLY - keyword: CHANGES - keyword: INTO - table_expression: table_reference: - naked_identifier: live - dot: . - naked_identifier: target - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: STREAM function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: cdc_data - dot: . - naked_identifier: users end_bracket: ) - keyword: KEYS - bracketed: start_bracket: ( column_reference: naked_identifier: user_id end_bracket: ) - keyword: SEQUENCE - keyword: BY - column_reference: naked_identifier: sequence_num - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REFRESH - keyword: STREAMING - keyword: LIVE - keyword: TABLE - table_reference: naked_identifier: target - statement_terminator: ; - statement: apply_changes_into_statement: - keyword: APPLY - keyword: CHANGES - keyword: INTO - table_expression: table_reference: - naked_identifier: live - dot: . - naked_identifier: target - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: STREAM function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: cdc_data - dot: . - naked_identifier: users end_bracket: ) - keyword: KEYS - bracketed: start_bracket: ( column_reference: naked_identifier: user_id end_bracket: ) - keyword: IGNORE - keyword: 'NULL' - keyword: UPDATES - where_clause: keyword: WHERE expression: column_reference: naked_identifier: state comparison_operator: raw_comparison_operator: '=' quoted_literal: '"NY"' - keyword: APPLY - keyword: AS - keyword: DELETE - keyword: WHEN - column_reference: naked_identifier: operation - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"DELETE"' - keyword: APPLY - keyword: AS - keyword: TRUNCATE - keyword: WHEN - column_reference: naked_identifier: operation - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"TRUNCATE"' - keyword: SEQUENCE - keyword: BY - column_reference: naked_identifier: sequence_num - keyword: COLUMNS - star: '*' - keyword: EXCEPT - bracketed: - start_bracket: ( - column_reference: naked_identifier: operation - comma: ',' - column_reference: naked_identifier: sequence_num - end_bracket: ) - keyword: STORED - keyword: AS - keyword: SCD - keyword: TYPE - numeric_literal: '1' - keyword: TRACK - keyword: HISTORY - keyword: 'ON' - column_reference: naked_identifier: user_id - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REFRESH - keyword: STREAMING - keyword: LIVE - keyword: TABLE - table_reference: naked_identifier: target - statement_terminator: ; - statement: apply_changes_into_statement: - keyword: APPLY - keyword: CHANGES - keyword: INTO - table_expression: table_reference: - naked_identifier: live - dot: . - naked_identifier: target - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: STREAM function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: cdc_data - dot: . - naked_identifier: users end_bracket: ) - keyword: KEYS - bracketed: start_bracket: ( column_reference: naked_identifier: user_id end_bracket: ) - keyword: IGNORE - keyword: 'NULL' - keyword: UPDATES - where_clause: keyword: WHERE expression: column_reference: naked_identifier: state comparison_operator: raw_comparison_operator: '=' quoted_literal: '"NY"' - keyword: APPLY - keyword: AS - keyword: DELETE - keyword: WHEN - column_reference: naked_identifier: operation - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"DELETE"' - keyword: APPLY - keyword: AS - keyword: TRUNCATE - keyword: WHEN - column_reference: naked_identifier: operation - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"TRUNCATE"' - keyword: SEQUENCE - keyword: BY - column_reference: naked_identifier: sequence_num - keyword: COLUMNS - star: '*' - keyword: EXCEPT - bracketed: - start_bracket: ( - column_reference: naked_identifier: operation - comma: ',' - column_reference: naked_identifier: sequence_num - end_bracket: ) - keyword: STORED - keyword: AS - keyword: SCD - keyword: TYPE - numeric_literal: '1' - keyword: TRACK - keyword: HISTORY - keyword: 'ON' - star: '*' - keyword: EXCEPT - bracketed: start_bracket: ( column_reference: naked_identifier: state end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/databricks_dlt_constraint.sql000066400000000000000000000004061503426445100275650ustar00rootroot00000000000000CONSTRAINT valid_timestamp EXPECT (event_ts > '2012-01-01'); CONSTRAINT valid_current_page EXPECT ( current_page_id IS NOT NULL AND current_page_title IS NOT NULL ) ON VIOLATION DROP ROW; CONSTRAINT valid_count EXPECT (count > 0) ON VIOLATION FAIL UPDATE; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/databricks_dlt_constraint.yml000066400000000000000000000037651503426445100276020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3d65668c4738f96a0ac7dbcf556d5b2370c89aff6094acfbf485aa44b1d9fc22 file: - statement: constraint_statement: - keyword: CONSTRAINT - object_reference: naked_identifier: valid_timestamp - keyword: EXPECT - bracketed: start_bracket: ( expression: column_reference: naked_identifier: event_ts comparison_operator: raw_comparison_operator: '>' quoted_literal: "'2012-01-01'" end_bracket: ) - statement_terminator: ; - statement: constraint_statement: - keyword: CONSTRAINT - object_reference: naked_identifier: valid_current_page - keyword: EXPECT - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: current_page_id - keyword: IS - keyword: NOT - null_literal: 'NULL' - binary_operator: AND - column_reference: naked_identifier: current_page_title - keyword: IS - keyword: NOT - null_literal: 'NULL' end_bracket: ) - keyword: 'ON' - keyword: VIOLATION - keyword: DROP - keyword: ROW - statement_terminator: ; - statement: constraint_statement: - keyword: CONSTRAINT - object_reference: naked_identifier: valid_count - keyword: EXPECT - bracketed: start_bracket: ( expression: column_reference: naked_identifier: count comparison_operator: raw_comparison_operator: '>' numeric_literal: '0' end_bracket: ) - keyword: 'ON' - keyword: VIOLATION - keyword: FAIL - keyword: UPDATE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/databricks_dlt_create_table.sql000066400000000000000000000016631503426445100300210ustar00rootroot00000000000000CREATE OR REFRESH LIVE TABLE taxi_raw AS SELECT a, b FROM JSON.`/databricks-datasets/nyctaxi/sample/json/`; CREATE OR REFRESH LIVE TABLE filtered_data AS SELECT a, b FROM live.taxi_raw; CREATE OR REFRESH STREAMING LIVE TABLE customers_bronze AS SELECT a, b FROM CLOUD_FILES("/databricks-datasets/retail-org/customers/", "csv"); CREATE OR REFRESH STREAMING LIVE TABLE customers_silver AS SELECT a, b FROM STREAM(live.customers_bronze); CREATE OR REFRESH TEMPORARY LIVE TABLE filtered_data AS SELECT a, b FROM live.taxi_raw; CREATE OR REFRESH TEMPORARY STREAMING LIVE TABLE customers_silver AS SELECT a, b FROM STREAM(live.customers_bronze); CREATE OR REFRESH LIVE TABLE taxi_raw( a STRING COMMENT 'a', b INT COMMENT 'b', CONSTRAINT valid_a EXPECT (a IS NOT NULL), CONSTRAINT valid_b EXPECT (b > 0) ) AS SELECT a, b FROM JSON.`/databricks-datasets/nyctaxi/sample/json/`; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/databricks_dlt_create_table.yml000066400000000000000000000213341503426445100300200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6fda6b7c5e5b1148e8f10df18802ebc12876c3e6ffe484b14fbcd444208a33ba file: - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REFRESH - keyword: LIVE - keyword: TABLE - table_reference: naked_identifier: taxi_raw - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: JSON dot: . quoted_identifier: '`/databricks-datasets/nyctaxi/sample/json/`' - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REFRESH - keyword: LIVE - keyword: TABLE - table_reference: naked_identifier: filtered_data - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: live - dot: . - naked_identifier: taxi_raw - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REFRESH - keyword: STREAMING - keyword: LIVE - keyword: TABLE - table_reference: naked_identifier: customers_bronze - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: CLOUD_FILES function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: '"/databricks-datasets/retail-org/customers/"' - comma: ',' - expression: quoted_literal: '"csv"' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REFRESH - keyword: STREAMING - keyword: LIVE - keyword: TABLE - table_reference: naked_identifier: customers_silver - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: STREAM function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: live - dot: . - naked_identifier: customers_bronze end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REFRESH - keyword: TEMPORARY - keyword: LIVE - keyword: TABLE - table_reference: naked_identifier: filtered_data - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: live - dot: . - naked_identifier: taxi_raw - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REFRESH - keyword: TEMPORARY - keyword: STREAMING - keyword: LIVE - keyword: TABLE - table_reference: naked_identifier: customers_silver - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: STREAM function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: live - dot: . - naked_identifier: customers_bronze end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REFRESH - keyword: LIVE - keyword: TABLE - table_reference: naked_identifier: taxi_raw - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: a data_type: primitive_type: keyword: STRING column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: "'a'" - comma: ',' - column_definition: column_reference: naked_identifier: b data_type: primitive_type: keyword: INT column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: "'b'" - comma: ',' - constraint_statement: - keyword: CONSTRAINT - object_reference: naked_identifier: valid_a - keyword: EXPECT - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: a - keyword: IS - keyword: NOT - null_literal: 'NULL' end_bracket: ) - comma: ',' - constraint_statement: - keyword: CONSTRAINT - object_reference: naked_identifier: valid_b - keyword: EXPECT - bracketed: start_bracket: ( expression: column_reference: naked_identifier: b comparison_operator: raw_comparison_operator: '>' numeric_literal: '0' end_bracket: ) - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: JSON dot: . quoted_identifier: '`/databricks-datasets/nyctaxi/sample/json/`' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/databricks_dlt_create_view.sql000066400000000000000000000013701503426445100276770ustar00rootroot00000000000000-- https://docs.databricks.com/workflows/delta-live-tables/delta-live-tables-sql-ref.html#create-view CREATE TEMPORARY LIVE VIEW filtered_data AS SELECT a, b FROM live.taxi_raw; CREATE TEMPORARY STREAMING LIVE VIEW customers_silver AS SELECT a, b FROM stream(live.customers_bronze); CREATE TEMPORARY LIVE VIEW filtered_data( a COMMENT 'a', b COMMENT 'b', CONSTRAINT valid_a EXPECT (a IS NOT NULL), CONSTRAINT valid_b EXPECT (b > 0) ) AS SELECT a, b FROM live.taxi_raw; CREATE OR REFRESH MATERIALIZED VIEW temp_table AS SELECT 1 AS ID; CREATE OR REFRESH MATERIALIZED VIEW dlt_view ( a STRING COMMENT 'a', b TIMESTAMP COMMENT 'b' ) COMMENT 'DLT materialized view' AS SELECT a, b FROM live.dlt_bronze; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/databricks_dlt_create_view.yml000066400000000000000000000141321503426445100277010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 662d898d3182641df898b2ce6e2764fddffcbeb3003ad42317b6830c756b48ca file: - statement: create_view_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: LIVE - keyword: VIEW - table_reference: naked_identifier: filtered_data - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: live - dot: . - naked_identifier: taxi_raw - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: STREAMING - keyword: LIVE - keyword: VIEW - table_reference: naked_identifier: customers_silver - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: stream function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: live - dot: . - naked_identifier: customers_bronze end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: LIVE - keyword: VIEW - table_reference: naked_identifier: filtered_data - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - keyword: COMMENT - quoted_literal: "'a'" - comma: ',' - column_reference: naked_identifier: b - keyword: COMMENT - quoted_literal: "'b'" - comma: ',' - constraint_statement: - keyword: CONSTRAINT - object_reference: naked_identifier: valid_a - keyword: EXPECT - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: a - keyword: IS - keyword: NOT - null_literal: 'NULL' end_bracket: ) - comma: ',' - constraint_statement: - keyword: CONSTRAINT - object_reference: naked_identifier: valid_b - keyword: EXPECT - bracketed: start_bracket: ( expression: column_reference: naked_identifier: b comparison_operator: raw_comparison_operator: '>' numeric_literal: '0' end_bracket: ) - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: live - dot: . - naked_identifier: taxi_raw - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REFRESH - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: temp_table - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: AS naked_identifier: ID - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REFRESH - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: dlt_view - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - data_type: primitive_type: keyword: STRING - keyword: COMMENT - quoted_literal: "'a'" - comma: ',' - column_reference: naked_identifier: b - data_type: primitive_type: keyword: TIMESTAMP - keyword: COMMENT - quoted_literal: "'b'" - end_bracket: ) - keyword: COMMENT - quoted_literal: "'DLT materialized view'" - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: live - dot: . - naked_identifier: dlt_bronze - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/databricks_operator_colon_sign.sql000066400000000000000000000002041503426445100305770ustar00rootroot00000000000000SELECT c1:price FROM VALUES('{ "price": 5 }') AS T(c1); SELECT c1:['price']::DECIMAL(5, 2) FROM VALUES('{ "price": 5 }') AS T(c1); sqlfluff-3.4.2/test/fixtures/dialects/sparksql/databricks_operator_colon_sign.yml000066400000000000000000000056271503426445100306170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 65708814a09d082cf513eb10c1609ee3a80f9f5e4b1f0b4a8e88df3b18015832 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: column_reference: naked_identifier: c1 semi_structured_expression: colon: ':' semi_structured_element: price from_clause: keyword: FROM from_expression: from_expression_element: table_expression: values_clause: keyword: VALUES bracketed: start_bracket: ( expression: quoted_literal: "'{ \"price\": 5 }'" end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: T bracketed: start_bracket: ( identifier_list: naked_identifier: c1 end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: c1 semi_structured_expression: colon: ':' start_square_bracket: '[' semi_structured_element: "'price'" end_square_bracket: ']' casting_operator: '::' data_type: primitive_type: keyword: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '5' - comma: ',' - numeric_literal: '2' - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: values_clause: keyword: VALUES bracketed: start_bracket: ( expression: quoted_literal: "'{ \"price\": 5 }'" end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: T bracketed: start_bracket: ( identifier_list: naked_identifier: c1 end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/date_functions.sql000066400000000000000000000012461503426445100253570ustar00rootroot00000000000000SELECT my_table.a, other_table.b FROM my_table LEFT JOIN other_table ON DATEDIFF(SECOND, my_table.timestamp_a, other_table.timestamp_b) > 1; SELECT DATE_ADD(MICROSECOND, 5, start_dt) AS date_add_micro, DATE_DIFF(MILLISECOND, start_dt, end_dt) AS datediff_milli, DATEADD(MINUTE, 5, start_dt) AS dateadd_min, DATEDIFF(HOUR, start_dt, end_dt) AS datediff_hr, TIMESTAMPADD(DAYOFYEAR, 5, start_dt) AS ts_add_day_of_yr, TIMESTAMPDIFF(WEEK, start_dt, end_dt) AS ts_diff_week, DATE_ADD(MONTH, 5, start_dt) AS date_add_month, DATE_ADD(QUARTER, 5, start_dt) AS date_add_quarter, DATE_ADD(YEAR, 5, start_dt) AS date_add_year FROM my_table; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/date_functions.yml000066400000000000000000000206531503426445100253640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 063fba4b712b12ad5bb99afdd000b4b12992f9830c257b62a272311d56e77776 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: my_table - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: other_table - dot: . - naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: other_table - join_on_condition: keyword: 'ON' expression: function: function_name: function_name_identifier: DATEDIFF function_contents: bracketed: - start_bracket: ( - date_part: SECOND - comma: ',' - expression: column_reference: - naked_identifier: my_table - dot: . - naked_identifier: timestamp_a - comma: ',' - expression: column_reference: - naked_identifier: other_table - dot: . - naked_identifier: timestamp_b - end_bracket: ) comparison_operator: raw_comparison_operator: '>' numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: DATE_ADD function_contents: bracketed: - start_bracket: ( - date_part: MICROSECOND - comma: ',' - expression: numeric_literal: '5' - comma: ',' - expression: column_reference: naked_identifier: start_dt - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: date_add_micro - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATE_DIFF function_contents: bracketed: - start_bracket: ( - date_part: MILLISECOND - comma: ',' - expression: column_reference: naked_identifier: start_dt - comma: ',' - expression: column_reference: naked_identifier: end_dt - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: datediff_milli - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEADD function_contents: bracketed: - start_bracket: ( - date_part: MINUTE - comma: ',' - expression: numeric_literal: '5' - comma: ',' - expression: column_reference: naked_identifier: start_dt - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: dateadd_min - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEDIFF function_contents: bracketed: - start_bracket: ( - date_part: HOUR - comma: ',' - expression: column_reference: naked_identifier: start_dt - comma: ',' - expression: column_reference: naked_identifier: end_dt - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: datediff_hr - comma: ',' - select_clause_element: function: function_name: function_name_identifier: TIMESTAMPADD function_contents: bracketed: - start_bracket: ( - date_part: DAYOFYEAR - comma: ',' - expression: numeric_literal: '5' - comma: ',' - expression: column_reference: naked_identifier: start_dt - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: ts_add_day_of_yr - comma: ',' - select_clause_element: function: function_name: function_name_identifier: TIMESTAMPDIFF function_contents: bracketed: - start_bracket: ( - date_part: WEEK - comma: ',' - expression: column_reference: naked_identifier: start_dt - comma: ',' - expression: column_reference: naked_identifier: end_dt - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: ts_diff_week - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATE_ADD function_contents: bracketed: - start_bracket: ( - date_part: MONTH - comma: ',' - expression: numeric_literal: '5' - comma: ',' - expression: column_reference: naked_identifier: start_dt - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: date_add_month - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATE_ADD function_contents: bracketed: - start_bracket: ( - date_part: QUARTER - comma: ',' - expression: numeric_literal: '5' - comma: ',' - expression: column_reference: naked_identifier: start_dt - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: date_add_quarter - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATE_ADD function_contents: bracketed: - start_bracket: ( - date_part: YEAR - comma: ',' - expression: numeric_literal: '5' - comma: ',' - expression: column_reference: naked_identifier: start_dt - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: date_add_year from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/delta_change_data_feed.sql000066400000000000000000000004271503426445100267240ustar00rootroot00000000000000CREATE TABLE student (id INT, student_name STRING, age INT) TBLPROPERTIES (delta.enableChangeDataFeed = true); ALTER TABLE my_delta_table SET TBLPROPERTIES (delta.enableChangeDataFeed = true); SET spark.databricks.delta.properties.defaults.enableChangeDataFeed = true; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/delta_change_data_feed.yml000066400000000000000000000051011503426445100267200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 58818cf3ad6aa778426e618fb04ce906ea04b50191cfb3754008291b9e06c0f0 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: column_reference: naked_identifier: student_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: age data_type: primitive_type: keyword: INT - end_bracket: ) - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: - properties_naked_identifier: delta - dot: . - properties_naked_identifier: enableChangeDataFeed comparison_operator: raw_comparison_operator: '=' boolean_literal: 'true' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_delta_table - keyword: SET - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: - properties_naked_identifier: delta - dot: . - properties_naked_identifier: enableChangeDataFeed comparison_operator: raw_comparison_operator: '=' boolean_literal: 'true' end_bracket: ) - statement_terminator: ; - statement: set_statement: keyword: SET property_name_identifier: - properties_naked_identifier: spark - dot: . - properties_naked_identifier: databricks - dot: . - properties_naked_identifier: delta - dot: . - properties_naked_identifier: properties - dot: . - properties_naked_identifier: defaults - dot: . - properties_naked_identifier: enableChangeDataFeed comparison_operator: raw_comparison_operator: '=' boolean_literal: 'true' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/delta_constraints.sql000066400000000000000000000005151503426445100260700ustar00rootroot00000000000000ALTER TABLE default.people10m CHANGE COLUMN middle_name DROP NOT NULL; ALTER TABLE default.people10m ADD CONSTRAINT date_within_range CHECK (birthDate > '1900-01-01'); ALTER TABLE default.people10m DROP CONSTRAINT date_within_range; ALTER TABLE default.people10m ADD CONSTRAINT valid_ids CHECK (id > 1 and id < 99999999); sqlfluff-3.4.2/test/fixtures/dialects/sparksql/delta_constraints.yml000066400000000000000000000047221503426445100260760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3e26f083e9d43fe177c07a49432da679103dc74d85a462d5dac426cfb32bea3f file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - keyword: CHANGE - keyword: COLUMN - column_reference: naked_identifier: middle_name - keyword: DROP - keyword: NOT - keyword: 'NULL' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - keyword: ADD - keyword: CONSTRAINT - column_reference: naked_identifier: date_within_range - keyword: CHECK - bracketed: start_bracket: ( expression: column_reference: naked_identifier: birthDate comparison_operator: raw_comparison_operator: '>' quoted_literal: "'1900-01-01'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - keyword: DROP - keyword: CONSTRAINT - column_reference: naked_identifier: date_within_range - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - keyword: ADD - keyword: CONSTRAINT - column_reference: naked_identifier: valid_ids - keyword: CHECK - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: id - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '1' - binary_operator: and - column_reference: naked_identifier: id - comparison_operator: raw_comparison_operator: < - numeric_literal: '99999999' end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/delta_convert_to.sql000066400000000000000000000010611503426445100257000ustar00rootroot00000000000000-- Convert unpartitioned Parquet table at path '' CONVERT TO DELTA PARQUET.`/data/events/`; -- Convert partitioned Parquet table at path '' -- and partitioned by integer columns named 'part' and 'part2' CONVERT TO DELTA PARQUET.`/data/events/` PARTITIONED BY (part int, part2 int); -- Convert the Iceberg table in the path . CONVERT TO DELTA ICEBERG.`/data/events/`; -- Convert the Iceberg table in the path -- without collecting statistics CONVERT TO DELTA ICEBERG.`/data/events/` NO STATISTICS; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/delta_convert_to.yml000066400000000000000000000034121503426445100257040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a3d34caef12a18197d02c733c3cce7611de20948b410c4900b9995a6c479b36c file: - statement: convert_to_delta_statement: - keyword: CONVERT - keyword: TO - keyword: DELTA - file_reference: keyword: PARQUET dot: . quoted_identifier: '`/data/events/`' - statement_terminator: ; - statement: convert_to_delta_statement: - keyword: CONVERT - keyword: TO - keyword: DELTA - file_reference: keyword: PARQUET dot: . quoted_identifier: '`/data/events/`' - keyword: PARTITIONED - keyword: BY - bracketed: - start_bracket: ( - column_definition: naked_identifier: part data_type: primitive_type: keyword: int - comma: ',' - column_definition: naked_identifier: part2 data_type: primitive_type: keyword: int - end_bracket: ) - statement_terminator: ; - statement: convert_to_delta_statement: - keyword: CONVERT - keyword: TO - keyword: DELTA - file_reference: keyword: ICEBERG dot: . quoted_identifier: '`/data/events/`' - statement_terminator: ; - statement: convert_to_delta_statement: - keyword: CONVERT - keyword: TO - keyword: DELTA - file_reference: keyword: ICEBERG dot: . quoted_identifier: '`/data/events/`' - keyword: 'NO' - keyword: STATISTICS - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/delta_create_table.sql000066400000000000000000000026161503426445100261370ustar00rootroot00000000000000-- Create table if not exists CREATE TABLE IF NOT EXISTS default.people10m ( id INT, first_name STRING, middle_name STRING, last_name STRING, gender STRING, birth_date TIMESTAMP, ssn STRING, salary INT ) USING DELTA; -- Create or replace table CREATE OR REPLACE TABLE default.people10m ( id INT, first_name STRING, middle_name STRING, last_name STRING, gender STRING, birth_date TIMESTAMP, ssn STRING, salary INT ) USING DELTA; -- Create or replace table with path CREATE OR REPLACE TABLE DELTA.`/delta/people10m` ( id INT, first_name STRING, middle_name STRING, last_name STRING, gender STRING, birth_date TIMESTAMP, ssn STRING, salary INT ) USING DELTA; -- Partition data CREATE TABLE default.people10m ( id INT, first_name STRING, middle_name STRING, last_name STRING, gender STRING, birth_date TIMESTAMP, ssn STRING, salary INT ) USING DELTA PARTITIONED BY (gender); -- Control data location CREATE TABLE default.people10m USING DELTA LOCATION '/delta/people10m'; -- Generated columns CREATE TABLE default.people10m ( id INT, first_name STRING, middle_name STRING, last_name STRING, gender STRING, birth_date TIMESTAMP, date_of_birth DATE GENERATED ALWAYS AS (CAST(birth_date AS DATE)), ssn STRING, salary INT ) USING DELTA PARTITIONED BY (gender); sqlfluff-3.4.2/test/fixtures/dialects/sparksql/delta_create_table.yml000066400000000000000000000263451503426445100261460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f194d07abe5e5ffd4006283fb04007e1ab1c1d9a854f5e5fb58b43d66f5d4e30 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: column_reference: naked_identifier: first_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: middle_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: last_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: gender data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: birth_date data_type: primitive_type: keyword: TIMESTAMP - comma: ',' - column_definition: column_reference: naked_identifier: ssn data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: salary data_type: primitive_type: keyword: INT - end_bracket: ) - using_clause: keyword: USING data_source_format: keyword: DELTA - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: column_reference: naked_identifier: first_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: middle_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: last_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: gender data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: birth_date data_type: primitive_type: keyword: TIMESTAMP - comma: ',' - column_definition: column_reference: naked_identifier: ssn data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: salary data_type: primitive_type: keyword: INT - end_bracket: ) - using_clause: keyword: USING data_source_format: keyword: DELTA - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - file_reference: keyword: DELTA dot: . quoted_identifier: '`/delta/people10m`' - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: column_reference: naked_identifier: first_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: middle_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: last_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: gender data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: birth_date data_type: primitive_type: keyword: TIMESTAMP - comma: ',' - column_definition: column_reference: naked_identifier: ssn data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: salary data_type: primitive_type: keyword: INT - end_bracket: ) - using_clause: keyword: USING data_source_format: keyword: DELTA - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: column_reference: naked_identifier: first_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: middle_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: last_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: gender data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: birth_date data_type: primitive_type: keyword: TIMESTAMP - comma: ',' - column_definition: column_reference: naked_identifier: ssn data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: salary data_type: primitive_type: keyword: INT - end_bracket: ) - using_clause: keyword: USING data_source_format: keyword: DELTA - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: gender end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - using_clause: keyword: USING data_source_format: keyword: DELTA - keyword: LOCATION - quoted_literal: "'/delta/people10m'" - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: column_reference: naked_identifier: first_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: middle_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: last_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: gender data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: birth_date data_type: primitive_type: keyword: TIMESTAMP - comma: ',' - generated_column_definition: - naked_identifier: date_of_birth - data_type: primitive_type: keyword: DATE - keyword: GENERATED - keyword: ALWAYS - keyword: AS - bracketed: start_bracket: ( function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: birth_date keyword: AS data_type: primitive_type: keyword: DATE end_bracket: ) end_bracket: ) - comma: ',' - column_definition: column_reference: naked_identifier: ssn data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: salary data_type: primitive_type: keyword: INT - end_bracket: ) - using_clause: keyword: USING data_source_format: keyword: DELTA - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: gender end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/delta_delete_from.sql000066400000000000000000000007151503426445100260100ustar00rootroot00000000000000DELETE FROM events WHERE date < '2017-01-01'; DELETE FROM DELTA.`/data/events/` WHERE date < '2017-01-01'; DELETE FROM all_events WHERE session_time < ( SELECT min(session_time) FROM good_events ); DELETE FROM orders AS t1 WHERE EXISTS ( SELECT returned_orders.oid FROM returned_orders WHERE t1.oid = returned_orders.oid ); DELETE FROM events WHERE category NOT IN ( SELECT category FROM events2 WHERE date > '2001-01-01' ); sqlfluff-3.4.2/test/fixtures/dialects/sparksql/delta_delete_from.yml000066400000000000000000000133541503426445100260150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 25e9f3c540f46477dae33271f1f03fa35a0054211ed1cd07a3b84ef28b4d85c1 file: - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: events where_clause: keyword: WHERE expression: column_reference: naked_identifier: date comparison_operator: raw_comparison_operator: < quoted_literal: "'2017-01-01'" - statement_terminator: ; - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: DELTA dot: . quoted_identifier: '`/data/events/`' where_clause: keyword: WHERE expression: column_reference: naked_identifier: date comparison_operator: raw_comparison_operator: < quoted_literal: "'2017-01-01'" - statement_terminator: ; - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: all_events where_clause: keyword: WHERE expression: column_reference: naked_identifier: session_time comparison_operator: raw_comparison_operator: < bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: min function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: session_time end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: good_events end_bracket: ) - statement_terminator: ; - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders alias_expression: alias_operator: keyword: AS naked_identifier: t1 where_clause: keyword: WHERE expression: keyword: EXISTS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: returned_orders - dot: . - naked_identifier: oid from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: returned_orders where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: oid - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: returned_orders - dot: . - naked_identifier: oid end_bracket: ) - statement_terminator: ; - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: events where_clause: keyword: WHERE expression: - column_reference: naked_identifier: category - keyword: NOT - keyword: IN - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: category from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: events2 where_clause: keyword: WHERE expression: column_reference: naked_identifier: date comparison_operator: raw_comparison_operator: '>' quoted_literal: "'2001-01-01'" end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/delta_describe_detail.sql000066400000000000000000000001471503426445100266240ustar00rootroot00000000000000DESCRIBE DETAIL '/data/events/'; DESCRIBE DETAIL eventstable; DESCRIBE DETAIL DELTA.`/data/events/`; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/delta_describe_detail.yml000066400000000000000000000016641503426445100266330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b25d107aa7b9f0bda368b8470b77a0a79fd5d52e643dfb8ced556aded18afa1c file: - statement: describe_detail_statement: - keyword: DESCRIBE - keyword: DETAIL - quoted_literal: "'/data/events/'" - statement_terminator: ; - statement: describe_detail_statement: - keyword: DESCRIBE - keyword: DETAIL - table_reference: naked_identifier: eventstable - statement_terminator: ; - statement: describe_detail_statement: - keyword: DESCRIBE - keyword: DETAIL - file_reference: keyword: DELTA dot: . quoted_identifier: '`/data/events/`' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/delta_describe_history.sql000066400000000000000000000003311503426445100270560ustar00rootroot00000000000000-- get the full history of the table DESCRIBE HISTORY '/data/events/'; DESCRIBE HISTORY DELTA.`/data/events/`; -- get the last operation only DESCRIBE HISTORY '/data/events/' LIMIT 1; DESCRIBE HISTORY EVENTSTABLE; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/delta_describe_history.yml000066400000000000000000000022401503426445100270610ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5f99fb5a337816b3a7fb490100e7d41efaf3f18687c49d9e3f7125b13d17e809 file: - statement: describe_history_statement: - keyword: DESCRIBE - keyword: HISTORY - quoted_literal: "'/data/events/'" - statement_terminator: ; - statement: describe_history_statement: - keyword: DESCRIBE - keyword: HISTORY - file_reference: keyword: DELTA dot: . quoted_identifier: '`/data/events/`' - statement_terminator: ; - statement: describe_history_statement: - keyword: DESCRIBE - keyword: HISTORY - quoted_literal: "'/data/events/'" - limit_clause: keyword: LIMIT numeric_literal: '1' - statement_terminator: ; - statement: describe_history_statement: - keyword: DESCRIBE - keyword: HISTORY - table_reference: naked_identifier: EVENTSTABLE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/delta_drop_column_metadata_change.sql000066400000000000000000000001541503426445100312060ustar00rootroot00000000000000ALTER TABLE table_name DROP COLUMN col_name; ALTER TABLE table_name DROP COLUMNS (col_name_1, col_name_2); sqlfluff-3.4.2/test/fixtures/dialects/sparksql/delta_drop_column_metadata_change.yml000066400000000000000000000021001503426445100312010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ab0871335f7960fea1a362e0173897fcf4447bacee1fc60e1e85e2be3eab39e6 file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - keyword: DROP - keyword: COLUMN - column_reference: naked_identifier: col_name - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - keyword: DROP - keyword: COLUMNS - bracketed: - start_bracket: ( - column_reference: naked_identifier: col_name_1 - comma: ',' - column_reference: naked_identifier: col_name_2 - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/delta_generate_manifest.sql000066400000000000000000000002631503426445100272010ustar00rootroot00000000000000GENERATE symlink_format_manifest FOR TABLE DELTA.`/data/events/`; GENERATE symlink_format_manifest FOR TABLE '/data/events/'; GENERATE symlink_format_manifest FOR TABLE events; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/delta_generate_manifest.yml000066400000000000000000000022371503426445100272060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7a45223f7fd57196698d81e39222b7c6bbc7e08a43888f27b110fb41ac5449a6 file: - statement: generate_manifest_file_statement: - keyword: GENERATE - symlink_format_manifest: symlink_format_manifest - keyword: FOR - keyword: TABLE - file_reference: keyword: DELTA dot: . quoted_identifier: '`/data/events/`' - statement_terminator: ; - statement: generate_manifest_file_statement: - keyword: GENERATE - symlink_format_manifest: symlink_format_manifest - keyword: FOR - keyword: TABLE - quoted_literal: "'/data/events/'" - statement_terminator: ; - statement: generate_manifest_file_statement: - keyword: GENERATE - symlink_format_manifest: symlink_format_manifest - keyword: FOR - keyword: TABLE - table_reference: naked_identifier: events - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/delta_merge.sql000066400000000000000000000057151503426445100246270ustar00rootroot00000000000000-- upsert into a table MERGE INTO people10m USING people10mupdates ON people10m.id = people10mupdates.id WHEN MATCHED THEN UPDATE SET id = people10mupdates.id, first_name = people10mupdates.first_name, middle_name = people10mupdates.middle_name, last_name = people10mupdates.last_name, gender = people10mupdates.gender, birth_date = people10mupdates.birth_date, ssn = people10mupdates.ssn, salary = people10mupdates.salary WHEN NOT MATCHED THEN INSERT ( id, first_name, middle_name, last_name, gender, birth_date, ssn, salary ) VALUES ( people10mupdates.id, people10mupdates.first_name, people10mupdates.middle_name, people10mupdates.last_name, people10mupdates.gender, people10mupdates.birth_date, people10mupdates.ssn, people10mupdates.salary ); -- data deduplication MERGE INTO logs USING new_deduped_logs ON logs.unique_id = new_deduped_logs.unique_id WHEN NOT MATCHED THEN INSERT *; -- data deduplication with additional predicate MERGE INTO logs USING new_deduped_logs ON logs.unique_id = new_deduped_logs.unique_id AND logs.date > current_date() - INTERVAL 7 DAYS WHEN NOT MATCHED AND new_deduped_logs.date > current_date() - INTERVAL 7 DAYS THEN INSERT *; -- SCD Type 2 using MERGE MERGE INTO customers USING ( SELECT updates.customer_id AS merge_unique_key, updates.* FROM updates UNION ALL SELECT NULL AS merge_unique_key, updates.* FROM updates INNER JOIN customers ON updates.customer_id = customers.customer_id WHERE customers.current = TRUE AND updates.address != customers.address ) staged_updates ON customers.customer_id = merge_unique_key WHEN MATCHED AND customers.current = TRUE AND customers.address != staged_updates.address THEN UPDATE SET current = FALSE, end_date = staged_updates.effective_date WHEN NOT MATCHED THEN INSERT( customer_id, address, current, effective_date, end_date ) VALUES( staged_updates.customer_id, staged_updates.address, TRUE, staged_updates.effective_date, NULL ); -- ingest CDC using MERGE MERGE INTO target t USING ( SELECT changes.unique_key, changes.latest.new_value AS new_value, changes.latest.deleted AS deleted FROM ( SELECT unique_key, max(struct(change_time, new_value, deleted)) AS latest FROM changes GROUP BY unique_key ) ) s ON s.unique_key = t.unique_key WHEN MATCHED AND s.deleted = TRUE THEN DELETE WHEN MATCHED THEN UPDATE SET unique_key = s.unique_key, record_value = s.new_value WHEN NOT MATCHED AND s.deleted = FALSE THEN INSERT ( unique_key, record_value ) VALUES ( unique_key, new_value ); sqlfluff-3.4.2/test/fixtures/dialects/sparksql/delta_merge.yml000066400000000000000000000600741503426445100246300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8ccf6bc5f00c697fe4ccc0dfb790d4fe997c5da83b3595b2c28583be696a103e file: - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: people10m - keyword: USING - table_reference: naked_identifier: people10mupdates - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: people10m - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: people10mupdates - dot: . - naked_identifier: id - merge_match: merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: - keyword: SET - set_clause: - column_reference: naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: people10mupdates - dot: . - naked_identifier: id - comma: ',' - set_clause: - column_reference: naked_identifier: first_name - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: people10mupdates - dot: . - naked_identifier: first_name - comma: ',' - set_clause: - column_reference: naked_identifier: middle_name - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: people10mupdates - dot: . - naked_identifier: middle_name - comma: ',' - set_clause: - column_reference: naked_identifier: last_name - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: people10mupdates - dot: . - naked_identifier: last_name - comma: ',' - set_clause: - column_reference: naked_identifier: gender - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: people10mupdates - dot: . - naked_identifier: gender - comma: ',' - set_clause: - column_reference: naked_identifier: birth_date - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: people10mupdates - dot: . - naked_identifier: birth_date - comma: ',' - set_clause: - column_reference: naked_identifier: ssn - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: people10mupdates - dot: . - naked_identifier: ssn - comma: ',' - set_clause: - column_reference: naked_identifier: salary - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: people10mupdates - dot: . - naked_identifier: salary merge_when_not_matched_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: keyword: INSERT bracketed: - start_bracket: ( - column_reference: naked_identifier: id - comma: ',' - column_reference: naked_identifier: first_name - comma: ',' - column_reference: naked_identifier: middle_name - comma: ',' - column_reference: naked_identifier: last_name - comma: ',' - column_reference: naked_identifier: gender - comma: ',' - column_reference: naked_identifier: birth_date - comma: ',' - column_reference: naked_identifier: ssn - comma: ',' - column_reference: naked_identifier: salary - end_bracket: ) values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: people10mupdates - dot: . - naked_identifier: id - comma: ',' - expression: column_reference: - naked_identifier: people10mupdates - dot: . - naked_identifier: first_name - comma: ',' - expression: column_reference: - naked_identifier: people10mupdates - dot: . - naked_identifier: middle_name - comma: ',' - expression: column_reference: - naked_identifier: people10mupdates - dot: . - naked_identifier: last_name - comma: ',' - expression: column_reference: - naked_identifier: people10mupdates - dot: . - naked_identifier: gender - comma: ',' - expression: column_reference: - naked_identifier: people10mupdates - dot: . - naked_identifier: birth_date - comma: ',' - expression: column_reference: - naked_identifier: people10mupdates - dot: . - naked_identifier: ssn - comma: ',' - expression: column_reference: - naked_identifier: people10mupdates - dot: . - naked_identifier: salary - end_bracket: ) - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: logs - keyword: USING - table_reference: naked_identifier: new_deduped_logs - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: logs - dot: . - naked_identifier: unique_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: new_deduped_logs - dot: . - naked_identifier: unique_id - merge_match: merge_when_not_matched_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: keyword: INSERT wildcard_identifier: star: '*' - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: logs - keyword: USING - table_reference: naked_identifier: new_deduped_logs - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: logs - dot: . - naked_identifier: unique_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: new_deduped_logs - dot: . - naked_identifier: unique_id - binary_operator: AND - column_reference: - naked_identifier: logs - dot: . - naked_identifier: date - comparison_operator: raw_comparison_operator: '>' - function: function_name: function_name_identifier: current_date function_contents: bracketed: start_bracket: ( end_bracket: ) - binary_operator: '-' - interval_expression: keyword: INTERVAL interval_literal: numeric_literal: '7' date_part: DAYS - merge_match: merge_when_not_matched_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: AND - expression: column_reference: - naked_identifier: new_deduped_logs - dot: . - naked_identifier: date comparison_operator: raw_comparison_operator: '>' function: function_name: function_name_identifier: current_date function_contents: bracketed: start_bracket: ( end_bracket: ) binary_operator: '-' interval_expression: keyword: INTERVAL interval_literal: numeric_literal: '7' date_part: DAYS - keyword: THEN - merge_insert_clause: keyword: INSERT wildcard_identifier: star: '*' - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: customers - keyword: USING - bracketed: start_bracket: ( set_expression: - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: updates - dot: . - naked_identifier: customer_id alias_expression: alias_operator: keyword: AS naked_identifier: merge_unique_key - comma: ',' - select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: updates dot: . star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: updates - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: null_literal: 'NULL' alias_expression: alias_operator: keyword: AS naked_identifier: merge_unique_key - comma: ',' - select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: updates dot: . star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: updates join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: customers - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: updates - dot: . - naked_identifier: customer_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: customers - dot: . - naked_identifier: customer_id where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: customers - dot: . - naked_identifier: current - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - binary_operator: AND - column_reference: - naked_identifier: updates - dot: . - naked_identifier: address - comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '=' - column_reference: - naked_identifier: customers - dot: . - naked_identifier: address end_bracket: ) - alias_expression: naked_identifier: staged_updates - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: customers - dot: . - naked_identifier: customer_id - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: merge_unique_key - merge_match: merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: AND - expression: - column_reference: - naked_identifier: customers - dot: . - naked_identifier: current - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - binary_operator: AND - column_reference: - naked_identifier: customers - dot: . - naked_identifier: address - comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '=' - column_reference: - naked_identifier: staged_updates - dot: . - naked_identifier: address - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: current comparison_operator: raw_comparison_operator: '=' boolean_literal: 'FALSE' - comma: ',' - set_clause: - column_reference: naked_identifier: end_date - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: staged_updates - dot: . - naked_identifier: effective_date merge_when_not_matched_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: keyword: INSERT bracketed: - start_bracket: ( - column_reference: naked_identifier: customer_id - comma: ',' - column_reference: naked_identifier: address - comma: ',' - column_reference: naked_identifier: current - comma: ',' - column_reference: naked_identifier: effective_date - comma: ',' - column_reference: naked_identifier: end_date - end_bracket: ) values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: staged_updates - dot: . - naked_identifier: customer_id - comma: ',' - expression: column_reference: - naked_identifier: staged_updates - dot: . - naked_identifier: address - comma: ',' - expression: boolean_literal: 'TRUE' - comma: ',' - expression: column_reference: - naked_identifier: staged_updates - dot: . - naked_identifier: effective_date - comma: ',' - keyword: 'NULL' - end_bracket: ) - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: target - alias_expression: naked_identifier: t - keyword: USING - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: changes - dot: . - naked_identifier: unique_key - comma: ',' - select_clause_element: column_reference: - naked_identifier: changes - dot: . - naked_identifier: latest - dot: . - naked_identifier: new_value alias_expression: alias_operator: keyword: AS naked_identifier: new_value - comma: ',' - select_clause_element: column_reference: - naked_identifier: changes - dot: . - naked_identifier: latest - dot: . - naked_identifier: deleted alias_expression: alias_operator: keyword: AS naked_identifier: deleted from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: unique_key - comma: ',' - select_clause_element: function: function_name: function_name_identifier: max function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: struct function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: change_time - comma: ',' - expression: column_reference: naked_identifier: new_value - comma: ',' - expression: column_reference: naked_identifier: deleted - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: latest from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: changes groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: unique_key end_bracket: ) end_bracket: ) - alias_expression: naked_identifier: s - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: s - dot: . - naked_identifier: unique_key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t - dot: . - naked_identifier: unique_key - merge_match: - merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: AND - expression: column_reference: - naked_identifier: s - dot: . - naked_identifier: deleted comparison_operator: raw_comparison_operator: '=' boolean_literal: 'TRUE' - keyword: THEN - merge_delete_clause: keyword: DELETE - merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: - keyword: SET - set_clause: - column_reference: naked_identifier: unique_key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: s - dot: . - naked_identifier: unique_key - comma: ',' - set_clause: - column_reference: naked_identifier: record_value - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: s - dot: . - naked_identifier: new_value - merge_when_not_matched_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: AND - expression: column_reference: - naked_identifier: s - dot: . - naked_identifier: deleted comparison_operator: raw_comparison_operator: '=' boolean_literal: 'FALSE' - keyword: THEN - merge_insert_clause: keyword: INSERT bracketed: - start_bracket: ( - column_reference: naked_identifier: unique_key - comma: ',' - column_reference: naked_identifier: record_value - end_bracket: ) values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: unique_key - comma: ',' - expression: column_reference: naked_identifier: new_value - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/delta_read_table.sql000066400000000000000000000017051503426445100256050ustar00rootroot00000000000000-- query table in the metastore SELECT a, b FROM default.people10m; -- query table by path SELECT a, b FROM DELTA.`/delta/people10m`; -- query old snapshot by timestamp SELECT a, b FROM default.people10m@20190101000000000; SELECT count(*) FROM DELTA.`/delta/people10m@20190101000000000`; SELECT count(*) FROM DELTA.`/delta/people10m` TIMESTAMP AS OF "2019-01-01"; SELECT count(*) FROM default.people10m TIMESTAMP AS OF "2019-01-01"; SELECT count(*) FROM default.people10m TIMESTAMP AS OF date_sub(current_date(), 1); SELECT count(*) FROM default.people10m TIMESTAMP AS OF "2019-01-01 01:30:00.000"; -- query old snapshot by version SELECT a, b FROM default.people10m@v123; SELECT count(*) FROM default.people10m VERSION AS OF 5238; SELECT count(*) FROM default.people10m@v5238; SELECT count(*) FROM DELTA.`/delta/people10m@v5238`; SELECT count(*) FROM DELTA.`/delta/people10m` VERSION AS OF 5238; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/delta_read_table.yml000066400000000000000000000235641503426445100256160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b34c9265de799012319e8433fbae9c22e7904cbb2b895d145e5548d4b2c411c8 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: DELTA dot: . quoted_identifier: '`/delta/people10m`' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m at_sign_literal: '@20190101000000000' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: DELTA dot: . quoted_identifier: '`/delta/people10m@20190101000000000`' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: - file_reference: keyword: DELTA dot: . quoted_identifier: '`/delta/people10m`' - keyword: TIMESTAMP - keyword: AS - keyword: OF - quoted_literal: '"2019-01-01"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: - table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - keyword: TIMESTAMP - keyword: AS - keyword: OF - quoted_literal: '"2019-01-01"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: - table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - keyword: TIMESTAMP - keyword: AS - keyword: OF - function: function_name: function_name_identifier: date_sub function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: current_date function_contents: bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: - table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - keyword: TIMESTAMP - keyword: AS - keyword: OF - quoted_literal: '"2019-01-01 01:30:00.000"' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m at_sign_literal: '@v123' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: - table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - keyword: VERSION - keyword: AS - keyword: OF - numeric_literal: '5238' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m at_sign_literal: '@v5238' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: DELTA dot: . quoted_identifier: '`/delta/people10m@v5238`' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: - file_reference: keyword: DELTA dot: . quoted_identifier: '`/delta/people10m`' - keyword: VERSION - keyword: AS - keyword: OF - numeric_literal: '5238' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/delta_restore.sql000066400000000000000000000006411503426445100252040ustar00rootroot00000000000000RESTORE TABLE DELTA.`/delta/people10m` TO TIMESTAMP AS OF "2019-01-01"; RESTORE TABLE default.people10m TO TIMESTAMP AS OF "2019-01-01"; RESTORE TABLE default.people10m TO TIMESTAMP AS OF date_sub(current_date(), 1); RESTORE TABLE default.people10m TO TIMESTAMP AS OF "2019-01-01 01:30:00.000"; RESTORE TABLE DELTA.`/delta/people10m` TO VERSION AS OF 5238; RESTORE TABLE default.people10m TO VERSION AS OF 5238; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/delta_restore.yml000066400000000000000000000055361503426445100252160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a7299bd43a17c91f5b044900e37bd0aa7d171cc22ef2a3f8476c92c2a2e1ea7b file: - statement: restore_table_statement: - keyword: RESTORE - keyword: TABLE - file_reference: keyword: DELTA dot: . quoted_identifier: '`/delta/people10m`' - keyword: TO - keyword: TIMESTAMP - keyword: AS - keyword: OF - quoted_literal: '"2019-01-01"' - statement_terminator: ; - statement: restore_table_statement: - keyword: RESTORE - keyword: TABLE - table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - keyword: TO - keyword: TIMESTAMP - keyword: AS - keyword: OF - quoted_literal: '"2019-01-01"' - statement_terminator: ; - statement: restore_table_statement: - keyword: RESTORE - keyword: TABLE - table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - keyword: TO - keyword: TIMESTAMP - keyword: AS - keyword: OF - function: function_name: function_name_identifier: date_sub function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: current_date function_contents: bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) - statement_terminator: ; - statement: restore_table_statement: - keyword: RESTORE - keyword: TABLE - table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - keyword: TO - keyword: TIMESTAMP - keyword: AS - keyword: OF - quoted_literal: '"2019-01-01 01:30:00.000"' - statement_terminator: ; - statement: restore_table_statement: - keyword: RESTORE - keyword: TABLE - file_reference: keyword: DELTA dot: . quoted_identifier: '`/delta/people10m`' - keyword: TO - keyword: VERSION - keyword: AS - keyword: OF - numeric_literal: '5238' - statement_terminator: ; - statement: restore_table_statement: - keyword: RESTORE - keyword: TABLE - table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - keyword: TO - keyword: VERSION - keyword: AS - keyword: OF - numeric_literal: '5238' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/delta_update_table.sql000066400000000000000000000011041503426445100261450ustar00rootroot00000000000000UPDATE events SET event_type = 'click' WHERE event_type = 'clck'; UPDATE DELTA.`/data/events/` SET event_type = 'click' WHERE event_type = 'clck'; UPDATE all_events SET session_time = 0, ignored = true WHERE session_time < ( SELECT min(session_time) FROM good_events ); UPDATE orders AS t1 SET order_status = 'returned' WHERE EXISTS ( SELECT returned_orders.oid FROM returned_orders WHERE t1.oid = returned_orders.oid ); UPDATE events SET category = 'undefined' WHERE category NOT IN ( SELECT category FROM events2 WHERE date > '2001-01-01' ); sqlfluff-3.4.2/test/fixtures/dialects/sparksql/delta_update_table.yml000066400000000000000000000145651503426445100261660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1638106ddb2bfa2c05ba3b9583266fee08ffdedca0bdd8ca3ac6779e48aa6ad2 file: - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: events set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: event_type comparison_operator: raw_comparison_operator: '=' quoted_literal: "'click'" where_clause: keyword: WHERE expression: column_reference: naked_identifier: event_type comparison_operator: raw_comparison_operator: '=' quoted_literal: "'clck'" - statement_terminator: ; - statement: update_statement: keyword: UPDATE file_reference: keyword: DELTA dot: . quoted_identifier: '`/data/events/`' set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: event_type comparison_operator: raw_comparison_operator: '=' quoted_literal: "'click'" where_clause: keyword: WHERE expression: column_reference: naked_identifier: event_type comparison_operator: raw_comparison_operator: '=' quoted_literal: "'clck'" - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: all_events set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: session_time comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' - comma: ',' - set_clause: column_reference: naked_identifier: ignored comparison_operator: raw_comparison_operator: '=' boolean_literal: 'true' where_clause: keyword: WHERE expression: column_reference: naked_identifier: session_time comparison_operator: raw_comparison_operator: < bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: min function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: session_time end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: good_events end_bracket: ) - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: orders alias_expression: alias_operator: keyword: AS naked_identifier: t1 set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: order_status comparison_operator: raw_comparison_operator: '=' quoted_literal: "'returned'" where_clause: keyword: WHERE expression: keyword: EXISTS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: returned_orders - dot: . - naked_identifier: oid from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: returned_orders where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: oid - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: returned_orders - dot: . - naked_identifier: oid end_bracket: ) - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: events set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: category comparison_operator: raw_comparison_operator: '=' quoted_literal: "'undefined'" where_clause: keyword: WHERE expression: - column_reference: naked_identifier: category - keyword: NOT - keyword: IN - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: category from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: events2 where_clause: keyword: WHERE expression: column_reference: naked_identifier: date comparison_operator: raw_comparison_operator: '>' quoted_literal: "'2001-01-01'" end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/delta_update_table_schema.sql000066400000000000000000000035421503426445100274750ustar00rootroot00000000000000-- add columns ALTER TABLE table_name ADD COLUMNS col_name STRING; ALTER TABLE table_name ADD COLUMNS (col_name STRING); ALTER TABLE table_name ADD COLUMNS col_name STRING, col_name2 INT; ALTER TABLE table_name ADD COLUMNS col_name STRING COMMENT "col_comment" FIRST; ALTER TABLE table_name ADD COLUMNS col_name STRING COMMENT "col_comment" FIRST, col_name2 INT COMMENT "col_2_comment" AFTER col_b_name; -- change column comment/ordering ALTER TABLE table_name CHANGE col_name_old col_name_new STRING; ALTER TABLE table_name CHANGE COLUMN col_name_old col_name_new STRING; ALTER TABLE table_name CHANGE COLUMN col_name_old col_name_new STRING COMMENT "new_col_comment"; ALTER TABLE table_name CHANGE COLUMN col_name_old col_name_new STRING COMMENT "new_col_comment" FIRST; ALTER TABLE table_name CHANGE COLUMN col_name_old col_name_new STRING COMMENT "new_col_comment" AFTER col_a_name; ---- change column comment/ordering in a nested field ALTER TABLE table_name CHANGE col_name_1.nested_col_name nested_col_name_new STRING; ALTER TABLE table_name CHANGE COLUMN col_name_1.nested_col_name nested_col_name_new STRING; ALTER TABLE table_name CHANGE COLUMN col_name_1.nested_col_name nested_col_name_new STRING COMMENT "new_col_comment"; ALTER TABLE table_name CHANGE COLUMN col_name_1.nested_col_name nested_col_name_new STRING COMMENT "new_col_comment" FIRST; ALTER TABLE table_name CHANGE COLUMN col_name_1.nested_col_name nested_col_name_new STRING COMMENT "new_col_comment" AFTER col_a_name; ALTER TABLE boxes CHANGE COLUMN col_b.a_key_name a_new_key_name STRING FIRST; -- replace columns ALTER TABLE table_name REPLACE COLUMNS ( col_name1 STRING COMMENT "col_comment1" ); ALTER TABLE boxes REPLACE COLUMNS ( col_c STRING, col_b STRUCT, col_a STRING ); sqlfluff-3.4.2/test/fixtures/dialects/sparksql/delta_update_table_schema.yml000066400000000000000000000244521503426445100275020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 209c3612244aa11fa9c18b0eea34c906d0f38105c9ae65330fa8867fdf2d23dd file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - keyword: ADD - keyword: COLUMNS - column_definition: column_reference: naked_identifier: col_name data_type: primitive_type: keyword: STRING - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - keyword: ADD - keyword: COLUMNS - bracketed: start_bracket: ( column_definition: column_reference: naked_identifier: col_name data_type: primitive_type: keyword: STRING end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - keyword: ADD - keyword: COLUMNS - column_definition: column_reference: naked_identifier: col_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: col_name2 data_type: primitive_type: keyword: INT - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - keyword: ADD - keyword: COLUMNS - column_definition: column_reference: naked_identifier: col_name data_type: primitive_type: keyword: STRING column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: '"col_comment"' - keyword: FIRST - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - keyword: ADD - keyword: COLUMNS - column_definition: column_reference: naked_identifier: col_name data_type: primitive_type: keyword: STRING column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: '"col_comment"' - keyword: FIRST - comma: ',' - column_definition: column_reference: naked_identifier: col_name2 data_type: primitive_type: keyword: INT column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: '"col_2_comment"' - keyword: AFTER - column_reference: naked_identifier: col_b_name - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - keyword: CHANGE - column_reference: naked_identifier: col_name_old - column_reference: naked_identifier: col_name_new - data_type: primitive_type: keyword: STRING - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - keyword: CHANGE - keyword: COLUMN - column_reference: naked_identifier: col_name_old - column_reference: naked_identifier: col_name_new - data_type: primitive_type: keyword: STRING - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - keyword: CHANGE - keyword: COLUMN - column_reference: naked_identifier: col_name_old - column_reference: naked_identifier: col_name_new - data_type: primitive_type: keyword: STRING - keyword: COMMENT - quoted_literal: '"new_col_comment"' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - keyword: CHANGE - keyword: COLUMN - column_reference: naked_identifier: col_name_old - column_reference: naked_identifier: col_name_new - data_type: primitive_type: keyword: STRING - keyword: COMMENT - quoted_literal: '"new_col_comment"' - keyword: FIRST - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - keyword: CHANGE - keyword: COLUMN - column_reference: naked_identifier: col_name_old - column_reference: naked_identifier: col_name_new - data_type: primitive_type: keyword: STRING - keyword: COMMENT - quoted_literal: '"new_col_comment"' - keyword: AFTER - column_reference: naked_identifier: col_a_name - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - keyword: CHANGE - column_reference: - naked_identifier: col_name_1 - dot: . - naked_identifier: nested_col_name - column_reference: naked_identifier: nested_col_name_new - data_type: primitive_type: keyword: STRING - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - keyword: CHANGE - keyword: COLUMN - column_reference: - naked_identifier: col_name_1 - dot: . - naked_identifier: nested_col_name - column_reference: naked_identifier: nested_col_name_new - data_type: primitive_type: keyword: STRING - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - keyword: CHANGE - keyword: COLUMN - column_reference: - naked_identifier: col_name_1 - dot: . - naked_identifier: nested_col_name - column_reference: naked_identifier: nested_col_name_new - data_type: primitive_type: keyword: STRING - keyword: COMMENT - quoted_literal: '"new_col_comment"' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - keyword: CHANGE - keyword: COLUMN - column_reference: - naked_identifier: col_name_1 - dot: . - naked_identifier: nested_col_name - column_reference: naked_identifier: nested_col_name_new - data_type: primitive_type: keyword: STRING - keyword: COMMENT - quoted_literal: '"new_col_comment"' - keyword: FIRST - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - keyword: CHANGE - keyword: COLUMN - column_reference: - naked_identifier: col_name_1 - dot: . - naked_identifier: nested_col_name - column_reference: naked_identifier: nested_col_name_new - data_type: primitive_type: keyword: STRING - keyword: COMMENT - quoted_literal: '"new_col_comment"' - keyword: AFTER - column_reference: naked_identifier: col_a_name - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: boxes - keyword: CHANGE - keyword: COLUMN - column_reference: - naked_identifier: col_b - dot: . - naked_identifier: a_key_name - column_reference: naked_identifier: a_new_key_name - data_type: primitive_type: keyword: STRING - keyword: FIRST - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - keyword: REPLACE - keyword: COLUMNS - bracketed: start_bracket: ( column_definition: naked_identifier: col_name1 data_type: primitive_type: keyword: STRING column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: '"col_comment1"' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: boxes - keyword: REPLACE - keyword: COLUMNS - bracketed: - start_bracket: ( - column_definition: naked_identifier: col_c data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: col_b data_type: struct_type: keyword: STRUCT struct_type_schema: - start_angle_bracket: < - naked_identifier: key2 - colon: ':' - data_type: primitive_type: keyword: STRING - comma: ',' - naked_identifier: nested - colon: ':' - data_type: primitive_type: keyword: STRING - comma: ',' - naked_identifier: key1 - colon: ':' - data_type: primitive_type: keyword: STRING - end_angle_bracket: '>' - comma: ',' - column_definition: naked_identifier: col_a data_type: primitive_type: keyword: STRING - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/delta_vacuum.sql000066400000000000000000000006021503426445100250160ustar00rootroot00000000000000-- vacuum files not required by versions older than the default retention period VACUUM EVENTSTABLE; -- vacuum files in path-based table VACUUM '/data/events'; VACUUM DELTA.`/data/events/`; -- vacuum files not required by versions more than 100 hours old VACUUM DELTA.`/data/events/` RETAIN 100 HOURS; -- do dry run to get the list of files to be deleted VACUUM EVENTSTABLE DRY RUN; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/delta_vacuum.yml000066400000000000000000000024141503426445100250230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: dbff817cd1a620001851a8a94132627618bec4c4dc1c274a410d5686c0e73996 file: - statement: vacuum_statement: keyword: VACUUM table_reference: naked_identifier: EVENTSTABLE - statement_terminator: ; - statement: vacuum_statement: keyword: VACUUM quoted_literal: "'/data/events'" - statement_terminator: ; - statement: vacuum_statement: keyword: VACUUM file_reference: keyword: DELTA dot: . quoted_identifier: '`/data/events/`' - statement_terminator: ; - statement: vacuum_statement: - keyword: VACUUM - file_reference: keyword: DELTA dot: . quoted_identifier: '`/data/events/`' - keyword: RETAIN - numeric_literal: '100' - date_part: HOURS - statement_terminator: ; - statement: vacuum_statement: - keyword: VACUUM - table_reference: naked_identifier: EVENTSTABLE - keyword: DRY - keyword: RUN - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/delta_write_table.sql000066400000000000000000000005271503426445100260250ustar00rootroot00000000000000-- append INSERT INTO default.people10m SELECT * FROM more_people; -- overwrite INSERT OVERWRITE TABLE default.people10m SELECT * FROM more_people; -- with user-defined commit metadata SET spark.databricks.delta.commitInfo.userMetadata = "overwritten-for-fixing-incorrect-data"; INSERT OVERWRITE default.people10m SELECT * FROM more_people; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/delta_write_table.yml000066400000000000000000000054451503426445100260330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3701321590713305bb67b6c867b1d19318a4f46c706df55d2269bfdf8e1ae68b file: - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: more_people - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: OVERWRITE - keyword: TABLE - table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: more_people - statement_terminator: ; - statement: set_statement: keyword: SET property_name_identifier: - properties_naked_identifier: spark - dot: . - properties_naked_identifier: databricks - dot: . - properties_naked_identifier: delta - dot: . - properties_naked_identifier: commitInfo - dot: . - properties_naked_identifier: userMetadata comparison_operator: raw_comparison_operator: '=' quoted_literal: '"overwritten-for-fixing-incorrect-data"' - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: OVERWRITE - table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: more_people - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/describe_database.sql000066400000000000000000000002601503426445100257510ustar00rootroot00000000000000DESCRIBE DATABASE employees; DESCRIBE DATABASE EXTENDED employees; DESC DATABASE deployment; -- Keywords SCHEMA and DATABASE are interchangeable. DESCRIBE SCHEMA employees; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/describe_database.yml000066400000000000000000000021211503426445100257510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e519773c48713b10590187317f664206adf50309a956a1b182dbd5bc46b34917 file: - statement: describe_statement: - keyword: DESCRIBE - keyword: DATABASE - database_reference: naked_identifier: employees - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: DATABASE - keyword: EXTENDED - database_reference: naked_identifier: employees - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: DATABASE - database_reference: naked_identifier: deployment - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: SCHEMA - database_reference: naked_identifier: employees - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/describe_function.sql000066400000000000000000000001751503426445100260370ustar00rootroot00000000000000DESC FUNCTION abs; DESC FUNCTION EXTENDED abs; DESC FUNCTION max; DESC FUNCTION EXTENDED explode; DESCRIBE FUNCTION max; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/describe_function.yml000066400000000000000000000024001503426445100260320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3891454a62d3b2442c3a3c9e281ef184011606ac2a54974bbba7ed19e69b4bcc file: - statement: describe_statement: - keyword: DESC - keyword: FUNCTION - function_name: function_name_identifier: abs - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: FUNCTION - keyword: EXTENDED - function_name: function_name_identifier: abs - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: FUNCTION - function_name: function_name_identifier: max - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: FUNCTION - keyword: EXTENDED - function_name: function_name_identifier: explode - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: FUNCTION - function_name: function_name_identifier: max - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/describe_query.sql000066400000000000000000000004701503426445100253550ustar00rootroot00000000000000DESCRIBE QUERY SELECT age, sum(age) AS sum_age FROM person GROUP BY age; DESCRIBE QUERY WITH all_names_cte AS (SELECT name FROM person) SELECT name FROM all_names_cte; DESC QUERY VALUES(100, 'John', 10000.20D) AS employee(id, name, salary); DESC QUERY TABLE person; DESCRIBE FROM person SELECT age; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/describe_query.yml000066400000000000000000000102271503426445100253600ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 660a139643479a998850c9a939599ba535d578c1d2b8a7c594ee512a67c0dd29 file: - statement: describe_statement: - keyword: DESCRIBE - keyword: QUERY - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: age end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: sum_age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: age - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: QUERY - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: all_names_cte keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: all_names_cte - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: QUERY - statement: values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '100' - comma: ',' - expression: quoted_literal: "'John'" - comma: ',' - expression: numeric_literal: 10000.20D - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: employee bracketed: start_bracket: ( identifier_list: - naked_identifier: id - comma: ',' - naked_identifier: name - comma: ',' - naked_identifier: salary end_bracket: ) - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: QUERY - keyword: TABLE - table_reference: naked_identifier: person - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: FROM - table_reference: naked_identifier: person - keyword: SELECT - column_reference: naked_identifier: age - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/describe_table.sql000066400000000000000000000005171503426445100253010ustar00rootroot00000000000000DESCRIBE TABLE customer; DESCRIBE customer; DESCRIBE TABLE salesdb.customer; DESCRIBE TABLE EXTENDED customer; DESCRIBE TABLE EXTENDED customer PARTITION (state = 'AR'); DESCRIBE customer salesdb.customer.name; DESCRIBE TABLE customer salesdb.customer.name; DESCRIBE TABLE customer customer.name; DESCRIBE TABLE customer name; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/describe_table.yml000066400000000000000000000047501503426445100253060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 37851c6bdee0d627670cc081f33fa25b5a868d7381595ce389951905265f2487 file: - statement: describe_statement: - keyword: DESCRIBE - keyword: TABLE - table_reference: naked_identifier: customer - statement_terminator: ; - statement: describe_statement: keyword: DESCRIBE table_reference: naked_identifier: customer - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: TABLE - table_reference: - naked_identifier: salesdb - dot: . - naked_identifier: customer - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: TABLE - keyword: EXTENDED - table_reference: naked_identifier: customer - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: TABLE - keyword: EXTENDED - table_reference: naked_identifier: customer - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: state comparison_operator: raw_comparison_operator: '=' quoted_literal: "'AR'" end_bracket: ) - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - table_reference: naked_identifier: customer - naked_identifier: salesdb - dot: . - naked_identifier: customer - dot: . - naked_identifier: name - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: TABLE - table_reference: naked_identifier: customer - naked_identifier: salesdb - dot: . - naked_identifier: customer - dot: . - naked_identifier: name - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: TABLE - table_reference: naked_identifier: customer - naked_identifier: customer - dot: . - naked_identifier: name - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: TABLE - table_reference: naked_identifier: customer - naked_identifier: name - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/division_operator.sql000066400000000000000000000002201503426445100261000ustar00rootroot00000000000000select t1.year_earn/t1.avg_cost/t1.rcp_cnt as fourth_cnt from dw.test t1; select t1.year_earn/t1.avg_cost/t1.jar as fourth_cnt from dw.test t1; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/division_operator.yml000066400000000000000000000046741503426445100261230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 311cae3d2250dde4c242e1586159ba83f13bef4b02a6d3cb956a9ec9a48003a7 file: - statement: select_statement: select_clause: keyword: select select_clause_element: expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: year_earn - binary_operator: / - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: avg_cost - binary_operator: / - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: rcp_cnt alias_expression: alias_operator: keyword: as naked_identifier: fourth_cnt from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dw - dot: . - naked_identifier: test alias_expression: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: year_earn - binary_operator: / - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: avg_cost - binary_operator: / - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: jar alias_expression: alias_operator: keyword: as naked_identifier: fourth_cnt from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dw - dot: . - naked_identifier: test alias_expression: naked_identifier: t1 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/drop_database.sql000066400000000000000000000004311503426445100251350ustar00rootroot00000000000000-- Drop DATABASE with all optional syntax DROP DATABASE IF EXISTS dbname RESTRICT; DROP DATABASE IF EXISTS dbname CASCADE; -- Drop the database and it's tables DROP DATABASE inventory_db CASCADE; -- Drop the database using IF EXISTS DROP DATABASE IF EXISTS inventory_db CASCADE; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/drop_database.yml000066400000000000000000000024271503426445100251460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 43308924f6aa5436ed6da39b87145968be7b8145199a2e025055588d2cf5f90f file: - statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - keyword: IF - keyword: EXISTS - database_reference: naked_identifier: dbname - keyword: RESTRICT - statement_terminator: ; - statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - keyword: IF - keyword: EXISTS - database_reference: naked_identifier: dbname - keyword: CASCADE - statement_terminator: ; - statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - database_reference: naked_identifier: inventory_db - keyword: CASCADE - statement_terminator: ; - statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - keyword: IF - keyword: EXISTS - database_reference: naked_identifier: inventory_db - keyword: CASCADE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/drop_function.sql000066400000000000000000000003631503426445100252220ustar00rootroot00000000000000-- Drop FUNCTION with all optional syntax DROP TEMPORARY FUNCTION IF EXISTS function_name; -- Try to drop Permanent function which is not present DROP FUNCTION test_avg; -- Drop Temporary function DROP TEMPORARY FUNCTION IF EXISTS test_avg; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/drop_function.yml000066400000000000000000000020411503426445100252170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3a4bdb250ba955d87c4d9b617ba8b3b561b5701423e49f8fea6e679eb05ca8d3 file: - statement: drop_function_statement: - keyword: DROP - keyword: TEMPORARY - keyword: FUNCTION - keyword: IF - keyword: EXISTS - function_name: function_name_identifier: function_name - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - function_name: function_name_identifier: test_avg - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: TEMPORARY - keyword: FUNCTION - keyword: IF - keyword: EXISTS - function_name: function_name_identifier: test_avg - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/drop_schema.sql000066400000000000000000000004171503426445100246350ustar00rootroot00000000000000-- Drop schema with all optional syntax DROP SCHEMA IF EXISTS dbname RESTRICT; DROP SCHEMA IF EXISTS dbname CASCADE; -- Drop the database and it's tables DROP SCHEMA inventory_db CASCADE; -- Drop the database using IF EXISTS DROP SCHEMA IF EXISTS inventory_db CASCADE; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/drop_schema.yml000066400000000000000000000023771503426445100246460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 763435cb4e9ae153438594ebc352ed261a944aa2bcb99f482894dd5541f500f5 file: - statement: drop_schema_statement: - keyword: DROP - keyword: SCHEMA - keyword: IF - keyword: EXISTS - schema_reference: naked_identifier: dbname - keyword: RESTRICT - statement_terminator: ; - statement: drop_schema_statement: - keyword: DROP - keyword: SCHEMA - keyword: IF - keyword: EXISTS - schema_reference: naked_identifier: dbname - keyword: CASCADE - statement_terminator: ; - statement: drop_schema_statement: - keyword: DROP - keyword: SCHEMA - schema_reference: naked_identifier: inventory_db - keyword: CASCADE - statement_terminator: ; - statement: drop_schema_statement: - keyword: DROP - keyword: SCHEMA - keyword: IF - keyword: EXISTS - schema_reference: naked_identifier: inventory_db - keyword: CASCADE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/drop_table.sql000066400000000000000000000006211503426445100244610ustar00rootroot00000000000000-- Drop TABLE with all optional syntax DROP TABLE IF EXISTS table_identifier; -- Assumes a table named `employeetable` exists. DROP TABLE employeetable; -- Assumes a table named `employeetable` exists in the `userdb` database DROP TABLE userdb.employeetable; -- Assumes a table named `employeetable` does not exist,Try with IF EXISTS -- will not throw exception DROP TABLE IF EXISTS employeetable; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/drop_table.yml000066400000000000000000000022611503426445100244650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d098e81483f0e94616b601124f1e3ae0737dd24ffa4e3b314f160b8ac0bc6c92 file: - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: table_identifier - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: naked_identifier: employeetable - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: - naked_identifier: userdb - dot: . - naked_identifier: employeetable - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: employeetable - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/drop_view.sql000066400000000000000000000006061503426445100243470ustar00rootroot00000000000000-- Drop FUNCTION with all optional syntax DROP VIEW IF EXISTS view_identifier; -- Assumes a view named `employeeView` exists. DROP VIEW employeeview; -- Assumes a view named `employeeView` exists in the `userdb` database DROP VIEW userdb.employeeview; -- Assumes a view named `employeeView` does not exist,Try with IF EXISTS -- will not throw exception DROP VIEW IF EXISTS employeeview; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/drop_view.yml000066400000000000000000000022451503426445100243520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 95065988d2896cb997bdf47c51be36020efe7eab3d8a90e7d25866e970cb7766 file: - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: view_identifier - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - table_reference: naked_identifier: employeeview - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - table_reference: - naked_identifier: userdb - dot: . - naked_identifier: employeeview - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: employeeview - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/exclamation_mark.sql000066400000000000000000000001051503426445100256610ustar00rootroot00000000000000SELECT ! TRUE; SELECT ! NULL; SELECT * FROM tab WHERE ! (col > 0); sqlfluff-3.4.2/test/fixtures/dialects/sparksql/exclamation_mark.yml000066400000000000000000000031531503426445100256710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 93249ecc7ea79bbe24e35c886e7a403f419bd8a88718e2b75b3bbe445291fd61 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: not_operator: '!' boolean_literal: 'TRUE' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: not_operator: '!' null_literal: 'NULL' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tab where_clause: keyword: WHERE expression: not_operator: '!' bracketed: start_bracket: ( expression: column_reference: naked_identifier: col comparison_operator: raw_comparison_operator: '>' numeric_literal: '0' end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/explain.sql000066400000000000000000000030261503426445100240100ustar00rootroot00000000000000EXPLAIN SELECT a, b FROM person; EXPLAIN SELECT TRANSFORM (zip_code, name, age) USING 'cat' AS (a, b, c) FROM person WHERE zip_code > 94511; EXPLAIN ALTER DATABASE inventory SET DBPROPERTIES ( 'Edited-by' = 'John' ); EXPLAIN ALTER TABLE student RENAME TO studentinfo; EXPLAIN ALTER VIEW view_identifier RENAME TO view_identifier; EXPLAIN CREATE DATABASE IF NOT EXISTS database_name COMMENT "database_comment" LOCATION "root/database_directory" WITH DBPROPERTIES ( "property_name" = "property_value"); EXPLAIN CREATE OR REPLACE TEMPORARY FUNCTION IF NOT EXISTS function_name AS "class_name" USING FILE "resource_locations"; EXPLAIN CREATE TABLE student (id INT, student_name STRING, age INT) USING CSV; EXPLAIN CREATE TABLE student (id INT, student_name STRING, age INT) STORED AS ORC; EXPLAIN CREATE TABLE student_dupli LIKE student ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE TBLPROPERTIES ('owner' = 'xxxx'); EXPLAIN CREATE VIEW experienced_employee_extended AS SELECT a FROM experienced_employee; EXPLAIN DROP DATABASE IF EXISTS dbname; EXPLAIN DROP FUNCTION test_avg; EXPLAIN USE database_name; EXPLAIN TRUNCATE TABLE student PARTITION(age = 10); EXPLAIN MSCK REPAIR TABLE table_identifier ADD PARTITIONS; EXPLAIN REFRESH TABLE tbl1; EXPLAIN REFRESH FUNCTION func1; EXPLAIN LOAD DATA LOCAL INPATH '/user/hive/warehouse/students' OVERWRITE INTO TABLE test_load; EXPLAIN INSERT INTO TABLE students VALUES ('Amy Smith', '123 Park Ave, San Jose', 111111); EXPLAIN DROP VIEW IF EXISTS view_identifier; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/explain.yml000066400000000000000000000302511503426445100240120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 17716d4fe5cd0261401bbc935d9e7ad8df5ad3186eb565213f311b4ec17682ea file: - statement: explain_statement: keyword: EXPLAIN statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN statement: select_statement: select_clause: keyword: SELECT transform_clause: - keyword: TRANSFORM - bracketed: - start_bracket: ( - naked_identifier: zip_code - comma: ',' - naked_identifier: name - comma: ',' - naked_identifier: age - end_bracket: ) - keyword: USING - quoted_literal: "'cat'" - keyword: AS - bracketed: - start_bracket: ( - naked_identifier: a - comma: ',' - naked_identifier: b - comma: ',' - naked_identifier: c - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: column_reference: naked_identifier: zip_code comparison_operator: raw_comparison_operator: '>' numeric_literal: '94511' - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: inventory - keyword: SET - keyword: DBPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'Edited-by'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'John'" end_bracket: ) - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: student - keyword: RENAME - keyword: TO - table_reference: naked_identifier: studentinfo - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: view_identifier - keyword: RENAME - keyword: TO - table_reference: naked_identifier: view_identifier - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: database_name - keyword: COMMENT - quoted_literal: '"database_comment"' - keyword: LOCATION - quoted_literal: '"root/database_directory"' - keyword: WITH - keyword: DBPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: '"property_name"' comparison_operator: raw_comparison_operator: '=' quoted_literal: '"property_value"' end_bracket: ) - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TEMPORARY - keyword: FUNCTION - keyword: IF - keyword: NOT - keyword: EXISTS - function_name_identifier: function_name - keyword: AS - quoted_literal: '"class_name"' - keyword: USING - file_keyword: FILE - quoted_literal: '"resource_locations"' - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: column_reference: naked_identifier: student_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: age data_type: primitive_type: keyword: INT - end_bracket: ) - using_clause: keyword: USING data_source_format: keyword: CSV - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: column_reference: naked_identifier: student_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: age data_type: primitive_type: keyword: INT - end_bracket: ) - keyword: STORED - keyword: AS - keyword: ORC - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student_dupli - keyword: LIKE - table_reference: naked_identifier: student - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: DELIMITED - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "','" - keyword: STORED - keyword: AS - file_format: TEXTFILE - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'owner'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'xxxx'" end_bracket: ) - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: experienced_employee_extended - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: experienced_employee - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - keyword: IF - keyword: EXISTS - database_reference: naked_identifier: dbname - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - function_name: function_name_identifier: test_avg - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN statement: use_statement: keyword: USE database_reference: naked_identifier: database_name - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN statement: truncate_table: - keyword: TRUNCATE - keyword: TABLE - table_reference: naked_identifier: student - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: age comparison_operator: raw_comparison_operator: '=' numeric_literal: '10' end_bracket: ) - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN statement: msck_repair_table_statement: - keyword: MSCK - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: table_identifier - keyword: ADD - keyword: PARTITIONS - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN statement: refresh_statement: - keyword: REFRESH - keyword: TABLE - table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN statement: refresh_statement: - keyword: REFRESH - keyword: FUNCTION - function_name: function_name_identifier: func1 - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: LOCAL - keyword: INPATH - quoted_literal: "'/user/hive/warehouse/students'" - keyword: OVERWRITE - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: test_load - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN statement: insert_statement: - keyword: INSERT - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: students - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: quoted_literal: "'Amy Smith'" - comma: ',' - expression: quoted_literal: "'123 Park Ave, San Jose'" - comma: ',' - expression: numeric_literal: '111111' - end_bracket: ) - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN statement: drop_view_statement: - keyword: DROP - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: view_identifier - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/from_supported_tvf.sql000066400000000000000000000007301503426445100262760ustar00rootroot00000000000000--TVFs that are supported in a `FROM` clause -- -- range call with end SELECT id FROM range(6 + cos(3)); SELECT id FROM range(5); -- range call with start and end SELECT id FROM range(5, 10); -- range call with start, end and step SELECT id FROM range(5, 10, 2); -- range call with start, end, step, and numPartitions SELECT id FROM range(0, 10, 2, 200); -- range call with a table alias SELECT test.id FROM range(5, 8) AS test; SELECT test.id FROM range(5, 8) test; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/from_supported_tvf.yml000066400000000000000000000145311503426445100263040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 57f595d5e9b6dc3cc41f15cd6ab02211d98b48ab693fc1ba66d7f8f9016603ca file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: range function_contents: bracketed: start_bracket: ( expression: numeric_literal: '6' binary_operator: + function: function_name: function_name_identifier: cos function_contents: bracketed: start_bracket: ( expression: numeric_literal: '3' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: range function_contents: bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: range function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '5' - comma: ',' - expression: numeric_literal: '10' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: range function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '5' - comma: ',' - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: range function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '0' - comma: ',' - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '200' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: test - dot: . - naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: range function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '5' - comma: ',' - expression: numeric_literal: '8' - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: test - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: test - dot: . - naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: range function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '5' - comma: ',' - expression: numeric_literal: '8' - end_bracket: ) alias_expression: naked_identifier: test - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/iceberg_alter_table.sql000066400000000000000000000076131503426445100263140ustar00rootroot00000000000000-- Iceberg Spark DDL Alter Statements https://iceberg.apache.org/docs/latest/spark-ddl/#alter-table ALTER TABLE prod.db.sample RENAME TO prod.db.new_name; ALTER TABLE prod.db.sample SET TBLPROPERTIES ( 'read.split.target-size'='268435456' ); ALTER TABLE prod.db.sample UNSET TBLPROPERTIES ('read.split.target-size'); ALTER TABLE prod.db.sample SET TBLPROPERTIES ( 'comment' = 'A table comment.' ); ALTER TABLE prod.db.sample ADD COLUMNS ( new_column string comment 'new_column docs' ); -- create a struct column ALTER TABLE prod.db.sample ADD COLUMN point struct; -- add a field to the struct ALTER TABLE prod.db.sample ADD COLUMN point.z double; -- create a nested array column of struct ALTER TABLE prod.db.sample ADD COLUMN points array>; -- add a field to the struct within an array. Using keyword 'element' to access the array's element column. ALTER TABLE prod.db.sample ADD COLUMN points.element.z double; -- create a map column of struct key and struct value ALTER TABLE prod.db.sample ADD COLUMN points map, struct>; -- add a field to the value struct in a map. Using keyword 'value' to access the map's value column. ALTER TABLE prod.db.sample ADD COLUMN points.value.b int; ALTER TABLE prod.db.sample ADD COLUMN new_column bigint AFTER other_column; ALTER TABLE prod.db.sample ADD COLUMN nested.new_column bigint FIRST; ALTER TABLE prod.db.sample RENAME COLUMN data TO payload; ALTER TABLE prod.db.sample RENAME COLUMN location.lat TO latitude; ALTER TABLE prod.db.sample ALTER COLUMN measurement TYPE double; ALTER TABLE prod.db.sample ALTER COLUMN measurement TYPE double COMMENT 'unit is bytes per second'; ALTER TABLE prod.db.sample ALTER COLUMN measurement COMMENT 'unit is kilobytes per second'; ALTER TABLE prod.db.sample ALTER COLUMN col FIRST; ALTER TABLE prod.db.sample ALTER COLUMN nested.col AFTER other_col; ALTER TABLE prod.db.sample ALTER COLUMN id DROP NOT NULL; ALTER TABLE prod.db.sample DROP COLUMN id; ALTER TABLE prod.db.sample DROP COLUMN point.z; ALTER TABLE prod.db.sample ADD PARTITION FIELD catalog; -- identity transform ALTER TABLE prod.db.sample ADD PARTITION FIELD bucket(16, id); ALTER TABLE prod.db.sample ADD PARTITION FIELD truncate(4, data); ALTER TABLE prod.db.sample ADD PARTITION FIELD years(ts); -- use optional AS keyword to specify a custom name for the partition field ALTER TABLE prod.db.sample ADD PARTITION FIELD bucket(16, id) AS shard; ALTER TABLE prod.db.sample DROP PARTITION FIELD catalog; ALTER TABLE prod.db.sample DROP PARTITION FIELD bucket(16, id); ALTER TABLE prod.db.sample DROP PARTITION FIELD truncate(4, data); ALTER TABLE prod.db.sample DROP PARTITION FIELD years(ts); ALTER TABLE prod.db.sample DROP PARTITION FIELD shard; ALTER TABLE prod.db.sample REPLACE PARTITION FIELD ts_day WITH days(ts); -- use optional AS keyword to specify a custom name for the new partition field ALTER TABLE prod.db.sample REPLACE PARTITION FIELD ts_day WITH days(ts) AS day_of_ts; ALTER TABLE prod.db.sample WRITE ORDERED BY category, id; -- use optional ASC/DEC keyword to specify sort order of each field (default ASC) ALTER TABLE prod.db.sample WRITE ORDERED BY category ASC, id DESC; -- use optional NULLS FIRST/NULLS LAST keyword to specify null order of each field (default FIRST) ALTER TABLE prod.db.sample WRITE ORDERED BY category ASC NULLS LAST, id DESC NULLS FIRST; ALTER TABLE prod.db.sample WRITE LOCALLY ORDERED BY category, id; ALTER TABLE prod.db.sample WRITE DISTRIBUTED BY PARTITION; ALTER TABLE prod.db.sample WRITE DISTRIBUTED BY PARTITION LOCALLY ORDERED BY category, id; -- single column ALTER TABLE prod.db.sample SET IDENTIFIER FIELDS id; -- multiple columns ALTER TABLE prod.db.sample SET IDENTIFIER FIELDS id, data; -- single column ALTER TABLE prod.db.sample DROP IDENTIFIER FIELDS id; -- multiple columns ALTER TABLE prod.db.sample DROP IDENTIFIER FIELDS id, data sqlfluff-3.4.2/test/fixtures/dialects/sparksql/iceberg_alter_table.yml000066400000000000000000000565671503426445100263320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 774d35657222c080220d9defa262d46afe2d820f54be34fdb054573213520698 file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: RENAME - keyword: TO - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: new_name - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: SET - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'read.split.target-size'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'268435456'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: UNSET - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'read.split.target-size'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: SET - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'comment'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'A table comment.'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: ADD - keyword: COLUMNS - bracketed: start_bracket: ( column_definition: column_reference: naked_identifier: new_column data_type: primitive_type: keyword: string column_constraint_segment: comment_clause: keyword: comment quoted_literal: "'new_column docs'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: ADD - keyword: COLUMN - column_definition: column_reference: naked_identifier: point data_type: struct_type: keyword: struct struct_type_schema: - start_angle_bracket: < - naked_identifier: x - colon: ':' - data_type: primitive_type: keyword: double - comma: ',' - naked_identifier: y - colon: ':' - data_type: primitive_type: keyword: double - end_angle_bracket: '>' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: ADD - keyword: COLUMN - column_definition: column_reference: - naked_identifier: point - dot: . - naked_identifier: z data_type: primitive_type: keyword: double - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: ADD - keyword: COLUMN - column_definition: column_reference: naked_identifier: points data_type: array_type: keyword: array start_angle_bracket: < data_type: struct_type: keyword: struct struct_type_schema: - start_angle_bracket: < - naked_identifier: x - colon: ':' - data_type: primitive_type: keyword: double - comma: ',' - naked_identifier: y - colon: ':' - data_type: primitive_type: keyword: double - end_angle_bracket: '>' end_angle_bracket: '>' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: ADD - keyword: COLUMN - column_definition: column_reference: - naked_identifier: points - dot: . - naked_identifier: element - dot: . - naked_identifier: z data_type: primitive_type: keyword: double - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: ADD - keyword: COLUMN - column_definition: column_reference: naked_identifier: points data_type: - keyword: map - start_angle_bracket: < - data_type: struct_type: keyword: struct struct_type_schema: start_angle_bracket: < naked_identifier: x colon: ':' data_type: primitive_type: keyword: int end_angle_bracket: '>' - comma: ',' - data_type: struct_type: keyword: struct struct_type_schema: start_angle_bracket: < naked_identifier: a colon: ':' data_type: primitive_type: keyword: int end_angle_bracket: '>' - end_angle_bracket: '>' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: ADD - keyword: COLUMN - column_definition: column_reference: - naked_identifier: points - dot: . - naked_identifier: value - dot: . - naked_identifier: b data_type: primitive_type: keyword: int - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: ADD - keyword: COLUMN - column_definition: column_reference: naked_identifier: new_column data_type: primitive_type: keyword: bigint - keyword: AFTER - column_reference: naked_identifier: other_column - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: ADD - keyword: COLUMN - column_definition: column_reference: - naked_identifier: nested - dot: . - naked_identifier: new_column data_type: primitive_type: keyword: bigint - keyword: FIRST - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: RENAME - keyword: COLUMN - column_reference: naked_identifier: data - keyword: TO - column_reference: naked_identifier: payload - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: RENAME - keyword: COLUMN - column_reference: - naked_identifier: location - dot: . - naked_identifier: lat - keyword: TO - column_reference: naked_identifier: latitude - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: measurement - keyword: TYPE - data_type: primitive_type: keyword: double - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: measurement - keyword: TYPE - data_type: primitive_type: keyword: double - keyword: COMMENT - quoted_literal: "'unit is bytes per second'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: measurement - keyword: COMMENT - quoted_literal: "'unit is kilobytes per second'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: col - keyword: FIRST - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: ALTER - keyword: COLUMN - column_reference: - naked_identifier: nested - dot: . - naked_identifier: col - keyword: AFTER - column_reference: naked_identifier: other_col - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: id - keyword: DROP - keyword: NOT - keyword: 'NULL' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: DROP - keyword: COLUMN - column_reference: naked_identifier: id - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: DROP - keyword: COLUMN - column_reference: - naked_identifier: point - dot: . - naked_identifier: z - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: ADD - keyword: PARTITION - keyword: FIELD - column_reference: naked_identifier: catalog - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: ADD - keyword: PARTITION - keyword: FIELD - iceberg_transformation: keyword: bucket bracketed: start_bracket: ( numeric_literal: '16' comma: ',' column_reference: naked_identifier: id end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: ADD - keyword: PARTITION - keyword: FIELD - iceberg_transformation: keyword: truncate bracketed: start_bracket: ( numeric_literal: '4' comma: ',' column_reference: naked_identifier: data end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: ADD - keyword: PARTITION - keyword: FIELD - iceberg_transformation: keyword: years bracketed: start_bracket: ( column_reference: naked_identifier: ts end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: ADD - keyword: PARTITION - keyword: FIELD - iceberg_transformation: keyword: bucket bracketed: start_bracket: ( numeric_literal: '16' comma: ',' column_reference: naked_identifier: id end_bracket: ) - keyword: AS - naked_identifier: shard - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: DROP - keyword: PARTITION - keyword: FIELD - column_reference: naked_identifier: catalog - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: DROP - keyword: PARTITION - keyword: FIELD - iceberg_transformation: keyword: bucket bracketed: start_bracket: ( numeric_literal: '16' comma: ',' column_reference: naked_identifier: id end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: DROP - keyword: PARTITION - keyword: FIELD - iceberg_transformation: keyword: truncate bracketed: start_bracket: ( numeric_literal: '4' comma: ',' column_reference: naked_identifier: data end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: DROP - keyword: PARTITION - keyword: FIELD - iceberg_transformation: keyword: years bracketed: start_bracket: ( column_reference: naked_identifier: ts end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: DROP - keyword: PARTITION - keyword: FIELD - column_reference: naked_identifier: shard - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: REPLACE - keyword: PARTITION - keyword: FIELD - column_reference: naked_identifier: ts_day - keyword: WITH - iceberg_transformation: keyword: days bracketed: start_bracket: ( column_reference: naked_identifier: ts end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: REPLACE - keyword: PARTITION - keyword: FIELD - column_reference: naked_identifier: ts_day - keyword: WITH - iceberg_transformation: keyword: days bracketed: start_bracket: ( column_reference: naked_identifier: ts end_bracket: ) - keyword: AS - naked_identifier: day_of_ts - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: WRITE - keyword: ORDERED - keyword: BY - column_reference: naked_identifier: category - comma: ',' - column_reference: naked_identifier: id - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: WRITE - keyword: ORDERED - keyword: BY - column_reference: naked_identifier: category - keyword: ASC - comma: ',' - column_reference: naked_identifier: id - keyword: DESC - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: WRITE - keyword: ORDERED - keyword: BY - column_reference: naked_identifier: category - keyword: ASC - keyword: NULLS - keyword: LAST - comma: ',' - column_reference: naked_identifier: id - keyword: DESC - keyword: NULLS - keyword: FIRST - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: WRITE - keyword: LOCALLY - keyword: ORDERED - keyword: BY - column_reference: naked_identifier: category - comma: ',' - column_reference: naked_identifier: id - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: WRITE - keyword: DISTRIBUTED - keyword: BY - keyword: PARTITION - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: WRITE - keyword: DISTRIBUTED - keyword: BY - keyword: PARTITION - keyword: LOCALLY - keyword: ORDERED - keyword: BY - column_reference: naked_identifier: category - comma: ',' - column_reference: naked_identifier: id - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: SET - keyword: IDENTIFIER - keyword: FIELDS - column_reference: naked_identifier: id - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: SET - keyword: IDENTIFIER - keyword: FIELDS - column_reference: naked_identifier: id - comma: ',' - column_reference: naked_identifier: data - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: DROP - keyword: IDENTIFIER - keyword: FIELDS - column_reference: naked_identifier: id - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: DROP - keyword: IDENTIFIER - keyword: FIELDS - column_reference: naked_identifier: id - comma: ',' - column_reference: naked_identifier: data sqlfluff-3.4.2/test/fixtures/dialects/sparksql/iceberg_create_table.sql000066400000000000000000000011241503426445100264370ustar00rootroot00000000000000-- Iceberg Spark DDL Create Table Statements https://iceberg.apache.org/docs/latest/spark-ddl/#create-table CREATE TABLE prod.db.sample ( id bigint COMMENT 'unique id', data string) USING iceberg; CREATE TABLE prod.db.sample ( id bigint, data string, category string) USING iceberg PARTITIONED BY (category); CREATE TABLE prod.db.sample ( id bigint, data string, category string, ts timestamp) USING iceberg PARTITIONED BY (bucket(16, id), days(ts), category); CREATE TABLE prod.db.sample USING iceberg PARTITIONED BY (part) TBLPROPERTIES ('key'='value'); sqlfluff-3.4.2/test/fixtures/dialects/sparksql/iceberg_create_table.yml000066400000000000000000000117021503426445100264440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cc5ed8f11c2fee1bddd19be2b9c59a0e1519b63496b9d9f54e9418b18b4abb23 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: id data_type: primitive_type: keyword: bigint column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: "'unique id'" - comma: ',' - column_definition: column_reference: naked_identifier: data data_type: primitive_type: keyword: string - end_bracket: ) - using_clause: keyword: USING data_source_format: keyword: iceberg - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: id data_type: primitive_type: keyword: bigint - comma: ',' - column_definition: column_reference: naked_identifier: data data_type: primitive_type: keyword: string - comma: ',' - column_definition: column_reference: naked_identifier: category data_type: primitive_type: keyword: string - end_bracket: ) - using_clause: keyword: USING data_source_format: keyword: iceberg - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: category end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: id data_type: primitive_type: keyword: bigint - comma: ',' - column_definition: column_reference: naked_identifier: data data_type: primitive_type: keyword: string - comma: ',' - column_definition: column_reference: naked_identifier: category data_type: primitive_type: keyword: string - comma: ',' - column_definition: column_reference: naked_identifier: ts data_type: primitive_type: keyword: timestamp - end_bracket: ) - using_clause: keyword: USING data_source_format: keyword: iceberg - keyword: PARTITIONED - keyword: BY - bracketed: - start_bracket: ( - iceberg_transformation: keyword: bucket bracketed: start_bracket: ( numeric_literal: '16' comma: ',' column_reference: naked_identifier: id end_bracket: ) - comma: ',' - iceberg_transformation: keyword: days bracketed: start_bracket: ( column_reference: naked_identifier: ts end_bracket: ) - comma: ',' - column_reference: naked_identifier: category - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - using_clause: keyword: USING data_source_format: keyword: iceberg - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: part end_bracket: ) - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'key'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'value'" end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/iceberg_replace_table.sql000066400000000000000000000004751503426445100266170ustar00rootroot00000000000000-- Iceberg Spark DDL Create Table Statements https://iceberg.apache.org/docs/latest/spark-ddl/#replace-table--as-select REPLACE TABLE prod.db.sample USING iceberg; REPLACE TABLE prod.db.sample USING iceberg PARTITIONED BY (part) TBLPROPERTIES ('key'='value'); CREATE OR REPLACE TABLE prod.db.sample USING iceberg; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/iceberg_replace_table.yml000066400000000000000000000036211503426445100266150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 30aed909021d9a27f017ab07cbb50a8b9fc6ef1a2552fbba657455b43ddebaaf file: - statement: replace_table_statement: - keyword: REPLACE - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - using_clause: keyword: USING data_source_format: keyword: iceberg - statement_terminator: ; - statement: replace_table_statement: - keyword: REPLACE - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - using_clause: keyword: USING data_source_format: keyword: iceberg - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: part end_bracket: ) - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'key'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'value'" end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - using_clause: keyword: USING data_source_format: keyword: iceberg - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/insert_overwrite_directory.sql000066400000000000000000000014521503426445100300470ustar00rootroot00000000000000INSERT OVERWRITE DIRECTORY '/tmp/destination' USING PARQUET OPTIONS (col1 = "1", col2 = "2", col3 = 'test', "user" = "a person") SELECT a FROM test_table; INSERT OVERWRITE DIRECTORY USING PARQUET OPTIONS ( path = '/tmp/destination', col1 = "1", col2 = "2", col3 = 'test' ) SELECT a FROM test_table; INSERT OVERWRITE DIRECTORY USING PARQUET OPTIONS (path '/tmp/destination', col1 1, col2 2, col3 'test') SELECT a FROM test_table; INSERT OVERWRITE DIRECTORY '/tmp/destination' USING PARQUET OPTIONS (col1 1, col2 2, col3 'test') SELECT a FROM test_table; WITH cte AS ( SELECT * FROM test_table ) INSERT OVERWRITE DIRECTORY 'destination_dir/path_to' USING CSV OPTIONS ( sep '\t', header 'true', compression 'none', emptyValue '' ) SELECT /*+ COALESCE(1) */ * FROM cte; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/insert_overwrite_directory.yml000066400000000000000000000200331503426445100300450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 19bb961922b50927c64f2236dd3634ef20b195c69dfa091278d51d33374d3fb3 file: - statement: insert_overwrite_directory_statement: - keyword: INSERT - keyword: OVERWRITE - keyword: DIRECTORY - quoted_literal: "'/tmp/destination'" - keyword: USING - data_source_format: keyword: PARQUET - keyword: OPTIONS - bracketed: - start_bracket: ( - property_name_identifier: properties_naked_identifier: col1 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"1"' - comma: ',' - property_name_identifier: properties_naked_identifier: col2 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"2"' - comma: ',' - property_name_identifier: properties_naked_identifier: col3 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test'" - comma: ',' - property_name_identifier: quoted_identifier: '"user"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"a person"' - end_bracket: ) - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table - statement_terminator: ; - statement: insert_overwrite_directory_statement: - keyword: INSERT - keyword: OVERWRITE - keyword: DIRECTORY - keyword: USING - data_source_format: keyword: PARQUET - keyword: OPTIONS - bracketed: - start_bracket: ( - property_name_identifier: properties_naked_identifier: path - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'/tmp/destination'" - comma: ',' - property_name_identifier: properties_naked_identifier: col1 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"1"' - comma: ',' - property_name_identifier: properties_naked_identifier: col2 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"2"' - comma: ',' - property_name_identifier: properties_naked_identifier: col3 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test'" - end_bracket: ) - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table - statement_terminator: ; - statement: insert_overwrite_directory_statement: - keyword: INSERT - keyword: OVERWRITE - keyword: DIRECTORY - keyword: USING - data_source_format: keyword: PARQUET - keyword: OPTIONS - bracketed: - start_bracket: ( - property_name_identifier: properties_naked_identifier: path - quoted_literal: "'/tmp/destination'" - comma: ',' - property_name_identifier: properties_naked_identifier: col1 - numeric_literal: '1' - comma: ',' - property_name_identifier: properties_naked_identifier: col2 - numeric_literal: '2' - comma: ',' - property_name_identifier: properties_naked_identifier: col3 - quoted_literal: "'test'" - end_bracket: ) - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table - statement_terminator: ; - statement: insert_overwrite_directory_statement: - keyword: INSERT - keyword: OVERWRITE - keyword: DIRECTORY - quoted_literal: "'/tmp/destination'" - keyword: USING - data_source_format: keyword: PARQUET - keyword: OPTIONS - bracketed: - start_bracket: ( - property_name_identifier: properties_naked_identifier: col1 - numeric_literal: '1' - comma: ',' - property_name_identifier: properties_naked_identifier: col2 - numeric_literal: '2' - comma: ',' - property_name_identifier: properties_naked_identifier: col3 - quoted_literal: "'test'" - end_bracket: ) - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: cte keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table end_bracket: ) insert_overwrite_directory_statement: - keyword: INSERT - keyword: OVERWRITE - keyword: DIRECTORY - quoted_literal: "'destination_dir/path_to'" - keyword: USING - data_source_format: keyword: CSV - keyword: OPTIONS - bracketed: - start_bracket: ( - property_name_identifier: properties_naked_identifier: sep - quoted_literal: "'\\t'" - comma: ',' - property_name_identifier: properties_naked_identifier: header - quoted_literal: "'true'" - comma: ',' - property_name_identifier: properties_naked_identifier: compression - quoted_literal: "'none'" - comma: ',' - property_name_identifier: properties_naked_identifier: emptyValue - quoted_literal: "''" - end_bracket: ) - select_statement: select_clause: keyword: SELECT select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: COALESCE function_contents: bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) end_hint: '*/' select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: cte - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/insert_overwrite_directory_hive.sql000066400000000000000000000003311503426445100310550ustar00rootroot00000000000000INSERT OVERWRITE LOCAL DIRECTORY '/tmp/destination' STORED AS ORC SELECT * FROM test_table; INSERT OVERWRITE LOCAL DIRECTORY '/tmp/destination' ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' SELECT * FROM test_table; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/insert_overwrite_directory_hive.yml000066400000000000000000000036321503426445100310660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3ae8495d1e0bc05b141ca33ffbca299a332118c02725468fad98933f43c9215d file: - statement: insert_overwrite_directory_hive_fmt_statement: - keyword: INSERT - keyword: OVERWRITE - keyword: LOCAL - keyword: DIRECTORY - quoted_literal: "'/tmp/destination'" - keyword: STORED - keyword: AS - keyword: ORC - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table - statement_terminator: ; - statement: insert_overwrite_directory_hive_fmt_statement: - keyword: INSERT - keyword: OVERWRITE - keyword: LOCAL - keyword: DIRECTORY - quoted_literal: "'/tmp/destination'" - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: DELIMITED - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "','" - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/insert_table.sql000066400000000000000000000034401503426445100250230ustar00rootroot00000000000000-- Single Row Insert Using a VALUES Clause INSERT INTO TABLE students VALUES ('Amy Smith', '123 Park Ave, San Jose', 111111); INSERT INTO students VALUES ('Amy Smith', '123 Park Ave, San Jose', 111111); INSERT OVERWRITE students VALUES ('Amy Smith', '123 Park Ave, San Jose', 111111); -- Multi-Row Insert Using a VALUES Clause INSERT INTO students VALUES ('Bob Brown', '456 Taylor St, Cupertino', 222222), ('Cathy Johnson', '789 Race Ave, Palo Alto', 333333); INSERT OVERWRITE students VALUES ('Bob Brown', '456 Taylor St, Cupertino', 222222), ('Cathy Johnson', '789 Race Ave, Palo Alto', 333333); -- Insert Using a SELECT Statement INSERT INTO students PARTITION (student_id = 444444) SELECT name, address FROM persons WHERE name = "Dora Williams"; INSERT OVERWRITE students PARTITION (student_id = 444444) SELECT name, address FROM persons WHERE name = "Dora Williams"; -- Insert Using a TABLE Statement INSERT INTO students TABLE visiting_students; INSERT OVERWRITE students TABLE visiting_students; -- Insert Using a FROM Statement INSERT INTO students FROM applicants SELECT name, address, id WHERE qualified = TRUE; INSERT OVERWRITE students FROM applicants SELECT name, address, id WHERE qualified = TRUE; -- Insert Using a Typed Date Literal for a Partition Column Value INSERT INTO students PARTITION (birthday = DATE '2019-01-02') VALUES ('Amy Smith', '123 Park Ave, San Jose'); INSERT OVERWRITE students PARTITION (birthday = DATE '2019-01-02') VALUES ('Amy Smith', '123 Park Ave, San Jose'); -- Insert with both a partition spec and a column list INSERT INTO students PARTITION (student_id = 11215017) (address, name) VALUES ('Hangzhou, China', 'Kent Yao Jr.'); INSERT OVERWRITE students PARTITION (student_id = 11215017) (address, name) VALUES ('Hangzhou, China', 'Kent Yao Jr.'); sqlfluff-3.4.2/test/fixtures/dialects/sparksql/insert_table.yml000066400000000000000000000260061503426445100250300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 968d5d3c702a1448221fa10c22f49b99596abfe28865007f84ec6b6eb8398fdb file: - statement: insert_statement: - keyword: INSERT - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: students - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: quoted_literal: "'Amy Smith'" - comma: ',' - expression: quoted_literal: "'123 Park Ave, San Jose'" - comma: ',' - expression: numeric_literal: '111111' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: students - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: quoted_literal: "'Amy Smith'" - comma: ',' - expression: quoted_literal: "'123 Park Ave, San Jose'" - comma: ',' - expression: numeric_literal: '111111' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: OVERWRITE - table_reference: naked_identifier: students - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: quoted_literal: "'Amy Smith'" - comma: ',' - expression: quoted_literal: "'123 Park Ave, San Jose'" - comma: ',' - expression: numeric_literal: '111111' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: students - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: quoted_literal: "'Bob Brown'" - comma: ',' - expression: quoted_literal: "'456 Taylor St, Cupertino'" - comma: ',' - expression: numeric_literal: '222222' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: quoted_literal: "'Cathy Johnson'" - comma: ',' - expression: quoted_literal: "'789 Race Ave, Palo Alto'" - comma: ',' - expression: numeric_literal: '333333' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: OVERWRITE - table_reference: naked_identifier: students - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: quoted_literal: "'Bob Brown'" - comma: ',' - expression: quoted_literal: "'456 Taylor St, Cupertino'" - comma: ',' - expression: numeric_literal: '222222' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: quoted_literal: "'Cathy Johnson'" - comma: ',' - expression: quoted_literal: "'789 Race Ave, Palo Alto'" - comma: ',' - expression: numeric_literal: '333333' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: students - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: student_id comparison_operator: raw_comparison_operator: '=' numeric_literal: '444444' end_bracket: ) - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: address from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: persons where_clause: keyword: WHERE expression: column_reference: naked_identifier: name comparison_operator: raw_comparison_operator: '=' quoted_literal: '"Dora Williams"' - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: OVERWRITE - table_reference: naked_identifier: students - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: student_id comparison_operator: raw_comparison_operator: '=' numeric_literal: '444444' end_bracket: ) - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: address from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: persons where_clause: keyword: WHERE expression: column_reference: naked_identifier: name comparison_operator: raw_comparison_operator: '=' quoted_literal: '"Dora Williams"' - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: students - keyword: TABLE - table_reference: naked_identifier: visiting_students - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: OVERWRITE - table_reference: naked_identifier: students - keyword: TABLE - table_reference: naked_identifier: visiting_students - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: students - keyword: FROM - table_reference: naked_identifier: applicants - keyword: SELECT - column_reference: naked_identifier: name - comma: ',' - column_reference: naked_identifier: address - comma: ',' - column_reference: naked_identifier: id - where_clause: keyword: WHERE expression: column_reference: naked_identifier: qualified comparison_operator: raw_comparison_operator: '=' boolean_literal: 'TRUE' - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: OVERWRITE - table_reference: naked_identifier: students - keyword: FROM - table_reference: naked_identifier: applicants - keyword: SELECT - column_reference: naked_identifier: name - comma: ',' - column_reference: naked_identifier: address - comma: ',' - column_reference: naked_identifier: id - where_clause: keyword: WHERE expression: column_reference: naked_identifier: qualified comparison_operator: raw_comparison_operator: '=' boolean_literal: 'TRUE' - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: students - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: birthday comparison_operator: raw_comparison_operator: '=' keyword: DATE date_constructor_literal: "'2019-01-02'" end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: quoted_literal: "'Amy Smith'" - comma: ',' - expression: quoted_literal: "'123 Park Ave, San Jose'" - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: OVERWRITE - table_reference: naked_identifier: students - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: birthday comparison_operator: raw_comparison_operator: '=' keyword: DATE date_constructor_literal: "'2019-01-02'" end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: quoted_literal: "'Amy Smith'" - comma: ',' - expression: quoted_literal: "'123 Park Ave, San Jose'" - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: students - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: student_id comparison_operator: raw_comparison_operator: '=' numeric_literal: '11215017' end_bracket: ) - bracketed: - start_bracket: ( - column_reference: naked_identifier: address - comma: ',' - column_reference: naked_identifier: name - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: quoted_literal: "'Hangzhou, China'" - comma: ',' - expression: quoted_literal: "'Kent Yao Jr.'" - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: OVERWRITE - table_reference: naked_identifier: students - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: student_id comparison_operator: raw_comparison_operator: '=' numeric_literal: '11215017' end_bracket: ) - bracketed: - start_bracket: ( - column_reference: naked_identifier: address - comma: ',' - column_reference: naked_identifier: name - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: quoted_literal: "'Hangzhou, China'" - comma: ',' - expression: quoted_literal: "'Kent Yao Jr.'" - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/issue_3484.sql000066400000000000000000000002371503426445100241630ustar00rootroot00000000000000-- https://github.com/sqlfluff/sqlfluff/issues/3484 WITH cte AS ( SELECT * FROM source WHERE col1 = 0 DISTRIBUTE BY col1 ), SELECT * FROM cte sqlfluff-3.4.2/test/fixtures/dialects/sparksql/issue_3484.yml000066400000000000000000000036421503426445100241700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ddf3f513580f1ad645ae912a5d1707ed6e1c1a84086d532e5476cde35670a293 file: statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: cte keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source where_clause: keyword: WHERE expression: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' distribute_by_clause: - keyword: DISTRIBUTE - keyword: BY - column_reference: naked_identifier: col1 end_bracket: ) comma: ',' select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: cte sqlfluff-3.4.2/test/fixtures/dialects/sparksql/join_types.sql000066400000000000000000000052711503426445100245370ustar00rootroot00000000000000-- inner join SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee INNER JOIN department ON employee.deptno = department.deptno; -- left join SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee LEFT JOIN department ON employee.deptno = department.deptno; -- right join SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee RIGHT JOIN department ON employee.deptno = department.deptno; -- full join SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee FULL JOIN department ON employee.deptno = department.deptno; SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee FULL OUTER JOIN department ON employee.deptno = department.deptno; -- cross join SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee CROSS JOIN department; -- semi join SELECT employee.id -- noqa: L031 FROM employee SEMI JOIN department ON employee.deptno = department.deptno; SELECT employee.id FROM employee LEFT SEMI JOIN department ON employee.deptno = department.deptno; -- anti join SELECT employee.id FROM employee ANTI JOIN department ON employee.deptno = department.deptno; SELECT employee.id FROM employee LEFT ANTI JOIN department ON employee.deptno = department.deptno; -- natural joins SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee NATURAL INNER JOIN department; SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee NATURAL LEFT JOIN department; SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee NATURAL RIGHT JOIN department; SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee NATURAL FULL JOIN department; SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee NATURAL FULL OUTER JOIN department; SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee NATURAL CROSS JOIN department; SELECT employee.id FROM employee NATURAL SEMI JOIN department; SELECT employee.id FROM employee NATURAL LEFT SEMI JOIN department; SELECT employee.id FROM employee NATURAL ANTI JOIN department; SELECT employee.id FROM employee NATURAL LEFT ANTI JOIN department; -- Multiple join SELECT table1.a, table2.b, table3.c FROM table1 INNER JOIN table2 ON table1.a = table2.a INNER JOIN table3 ON table1.a = table3.a sqlfluff-3.4.2/test/fixtures/dialects/sparksql/join_types.yml000066400000000000000000000647171503426445100245530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e7134cf8b6e1843c7a784f6ca4de6637a21009117c00cd69bbcec9c8d25e306b file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: RIGHT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: FULL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: FULL - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: CROSS - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: SEMI - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: LEFT - keyword: SEMI - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: ANTI - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: LEFT - keyword: ANTI - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: NATURAL - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: NATURAL - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: NATURAL - keyword: RIGHT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: NATURAL - keyword: FULL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: NATURAL - keyword: FULL - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: NATURAL - keyword: CROSS - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: NATURAL - keyword: SEMI - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: NATURAL - keyword: LEFT - keyword: SEMI - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: NATURAL - keyword: ANTI - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: NATURAL - keyword: LEFT - keyword: ANTI - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: table1 - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: table2 - dot: . - naked_identifier: b - comma: ',' - select_clause_element: column_reference: - naked_identifier: table3 - dot: . - naked_identifier: c from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: table1 - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: table1 - dot: . - naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: table2 - dot: . - naked_identifier: a - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table3 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: table1 - dot: . - naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: table3 - dot: . - naked_identifier: a sqlfluff-3.4.2/test/fixtures/dialects/sparksql/list_file.sql000066400000000000000000000003511503426445100243200ustar00rootroot00000000000000LIST FILE "/path/to/file/abc.txt"; LIST FILE '/another/test.txt'; LIST FILE "/path with space/abc.txt"; LIST FILE "/path/to/some/directory"; LIST FILES "/path with space/cde.txt" '/path with space/fgh.txt'; LIST FILE /tmp/test; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/list_file.yml000066400000000000000000000026531503426445100243310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1df40570a22c1428d23345cd18d9af0b2f13a8c0f0359bf88c62c8f7bb126263 file: - statement: list_file_statement: keyword: LIST file_keyword: FILE quoted_literal: '"/path/to/file/abc.txt"' - statement_terminator: ; - statement: list_file_statement: keyword: LIST file_keyword: FILE quoted_literal: "'/another/test.txt'" - statement_terminator: ; - statement: list_file_statement: keyword: LIST file_keyword: FILE quoted_literal: '"/path with space/abc.txt"' - statement_terminator: ; - statement: list_file_statement: keyword: LIST file_keyword: FILE quoted_literal: '"/path/to/some/directory"' - statement_terminator: ; - statement: list_file_statement: - keyword: LIST - file_keyword: FILES - quoted_literal: '"/path with space/cde.txt"' - quoted_literal: "'/path with space/fgh.txt'" - statement_terminator: ; - statement: list_file_statement: keyword: LIST file_keyword: FILE file_literal: - slash: / - path_segment: tmp - slash: / - path_segment: test - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/list_jar.sql000066400000000000000000000007441503426445100241630ustar00rootroot00000000000000LIST JAR "/path/to/some.jar"; LIST JAR '/some/other.jar'; LIST JAR "/path with space/abc.jar"; LIST JARS "/path with space/def.jar" '/path with space/ghi.jar'; LIST JAR "ivy://group:module:version"; LIST JAR "ivy://group:module:version?transitive=false"; LIST JAR "ivy://group:module:version?transitive=true"; LIST JAR "ivy://group:module:version?exclude=group:module&transitive=true"; -- NB: Non-quoted paths are not supported in SQLFluff currently --LIST JAR /tmp/test.jar; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/list_jar.yml000066400000000000000000000033541503426445100241650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 790b0bd95ba548ca47a18181ef96f7ff231a61e686fa3226fd5ea78ea43b1d19 file: - statement: list_jar_statement: keyword: LIST file_keyword: JAR quoted_literal: '"/path/to/some.jar"' - statement_terminator: ; - statement: list_jar_statement: keyword: LIST file_keyword: JAR quoted_literal: "'/some/other.jar'" - statement_terminator: ; - statement: list_jar_statement: keyword: LIST file_keyword: JAR quoted_literal: '"/path with space/abc.jar"' - statement_terminator: ; - statement: list_jar_statement: - keyword: LIST - file_keyword: JARS - quoted_literal: '"/path with space/def.jar"' - quoted_literal: "'/path with space/ghi.jar'" - statement_terminator: ; - statement: list_jar_statement: keyword: LIST file_keyword: JAR quoted_literal: '"ivy://group:module:version"' - statement_terminator: ; - statement: list_jar_statement: keyword: LIST file_keyword: JAR quoted_literal: '"ivy://group:module:version?transitive=false"' - statement_terminator: ; - statement: list_jar_statement: keyword: LIST file_keyword: JAR quoted_literal: '"ivy://group:module:version?transitive=true"' - statement_terminator: ; - statement: list_jar_statement: keyword: LIST file_keyword: JAR quoted_literal: '"ivy://group:module:version?exclude=group:module&transitive=true"' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/literals.sql000066400000000000000000000025141503426445100241700ustar00rootroot00000000000000-- string literals SELECT 'Hello, World!' AS col; SELECT 'SPARK SQL' AS col; SELECT "it's $10." AS col; -- binary literal SELECT X'123456' AS col; -- null literal SELECT NULL AS col; -- boolean literal SELECT TRUE AS col; SELECT FALSE AS col; -- numeric literal SELECT -2147483648 AS col; SELECT 9223372036854775807l AS col; SELECT -32Y AS col; SELECT 482S AS col; -- fractional literals SELECT 12.578 AS col; SELECT -0.1234567 AS col; SELECT -.1234567 AS col; SELECT 123. AS col; SELECT 123.BD AS col; SELECT 5E2 AS col; SELECT 5D AS col; SELECT -5BD AS col; SELECT 12.578e-2d AS col; SELECT -.1234567E+2BD AS col; SELECT +3.e+3 AS col; SELECT -3.E-3D AS col; -- datetime literal SELECT DATE '1997' AS col; SELECT DATE '1997-01' AS col; SELECT DATE '2011-11-11' AS col; SELECT TIMESTAMP '1997-01-31 09:26:56.123' AS col; SELECT TIMESTAMP '1997-01-31 09:26:56.66666666UTC+08:00' AS col; SELECT TIMESTAMP '1997-01' AS col; -- ansi interval literal SELECT INTERVAL '2-3' YEAR TO MONTH AS col; SELECT INTERVAL -'20 15:40:32.99899999' DAY TO SECOND AS col; -- multi-units interval syntax SELECT INTERVAL 3 YEAR AS col; SELECT INTERVAL -2 HOUR '3' MINUTE AS col; SELECT INTERVAL '1 YEAR 2 DAYS 3 HOURS'; SELECT INTERVAL 1 YEARS 2 MONTH 3 WEEK 4 DAYS 5 HOUR 6 MINUTES 7 SECOND 8 MILLISECOND 9 MICROSECONDS AS col; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/literals.yml000066400000000000000000000312021503426445100241660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: da5ca886f22fab6e6980f8be6e0f744ad5c02822ec2194e5e2a1010e3919e824 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'Hello, World!'" alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'SPARK SQL'" alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "\"it's $10.\"" alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: bytes_quoted_literal: "X'123456'" alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: null_literal: 'NULL' alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: boolean_literal: 'TRUE' alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: boolean_literal: 'FALSE' alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: sign_indicator: '-' numeric_literal: '2147483648' alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: 9223372036854775807l alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: sign_indicator: '-' numeric_literal: 32Y alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: 482S alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '12.578' alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: sign_indicator: '-' numeric_literal: '0.1234567' alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: sign_indicator: '-' numeric_literal: '.1234567' alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '123.' alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: 123.BD alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: 5E2 alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: 5D alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: sign_indicator: '-' numeric_literal: 5BD alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: 12.578e-2d alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: sign_indicator: '-' numeric_literal: .1234567E+2BD alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: sign_indicator: + numeric_literal: '3.e+3' alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: sign_indicator: '-' numeric_literal: 3.E-3D alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: keyword: DATE date_constructor_literal: "'1997'" alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: keyword: DATE date_constructor_literal: "'1997-01'" alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: keyword: DATE date_constructor_literal: "'2011-11-11'" alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: keyword: TIMESTAMP date_constructor_literal: "'1997-01-31 09:26:56.123'" alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: keyword: TIMESTAMP date_constructor_literal: "'1997-01-31 09:26:56.66666666UTC+08:00'" alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: keyword: TIMESTAMP date_constructor_literal: "'1997-01'" alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: interval_expression: keyword: INTERVAL interval_literal: - signed_quoted_literal: "'2-3'" - date_part: YEAR - keyword: TO - date_part: MONTH alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: interval_expression: keyword: INTERVAL interval_literal: - sign_indicator: '-' - signed_quoted_literal: "'20 15:40:32.99899999'" - date_part: DAY - keyword: TO - date_part: SECOND alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: interval_expression: keyword: INTERVAL interval_literal: numeric_literal: '3' date_part: YEAR alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: interval_expression: - keyword: INTERVAL - interval_literal: sign_indicator: '-' numeric_literal: '2' date_part: HOUR - interval_literal: signed_quoted_literal: "'3'" date_part: MINUTE alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: keyword: INTERVAL date_constructor_literal: "'1 YEAR 2 DAYS 3 HOURS'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: interval_expression: - keyword: INTERVAL - interval_literal: numeric_literal: '1' date_part: YEARS - interval_literal: numeric_literal: '2' date_part: MONTH - interval_literal: numeric_literal: '3' date_part: WEEK - interval_literal: numeric_literal: '4' date_part: DAYS - interval_literal: numeric_literal: '5' date_part: HOUR - interval_literal: numeric_literal: '6' date_part: MINUTES - interval_literal: numeric_literal: '7' date_part: SECOND - interval_literal: numeric_literal: '8' date_part: MILLISECOND - interval_literal: numeric_literal: '9' date_part: MICROSECONDS alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/load_data.sql000066400000000000000000000015711503426445100242630ustar00rootroot00000000000000-- Assuming the students table is in '/user/hive/warehouse/' LOAD DATA LOCAL INPATH '/user/hive/warehouse/students' OVERWRITE INTO TABLE test_load; -- Assuming the test_partition table is in '/user/hive/warehouse/' LOAD DATA LOCAL INPATH '/user/hive/warehouse/test_partition/c2=2/c3=3' OVERWRITE INTO TABLE test_load_partition PARTITION (c2 = 2, c3 = 3); -- Assuming the students table is in '/user/hive/warehouse/' LOAD DATA INPATH '/user/hive/warehouse/students' OVERWRITE INTO TABLE test_load; -- Assuming the test_partition table is in '/user/hive/warehouse/' LOAD DATA LOCAL INPATH '/user/hive/warehouse/test_partition/c2=2/c3=3' INTO TABLE test_load_partition PARTITION (c2 = 2, c3 = 3); -- Assuming the test_partition table is in '/user/hive/warehouse/' LOAD DATA INPATH '/user/hive/warehouse/test_partition/c2=2/c3=3' INTO TABLE test_load_partition PARTITION (c2 = 2, c3 = 3); sqlfluff-3.4.2/test/fixtures/dialects/sparksql/load_data.yml000066400000000000000000000063071503426445100242670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2cab5a10b6c4d69777e2d5041e104a2b2679b209b69193b9eb3904fff44fef97 file: - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: LOCAL - keyword: INPATH - quoted_literal: "'/user/hive/warehouse/students'" - keyword: OVERWRITE - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: test_load - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: LOCAL - keyword: INPATH - quoted_literal: "'/user/hive/warehouse/test_partition/c2=2/c3=3'" - keyword: OVERWRITE - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: test_load_partition - keyword: PARTITION - bracketed: - start_bracket: ( - column_reference: naked_identifier: c2 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '2' - comma: ',' - column_reference: naked_identifier: c3 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '3' - end_bracket: ) - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INPATH - quoted_literal: "'/user/hive/warehouse/students'" - keyword: OVERWRITE - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: test_load - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: LOCAL - keyword: INPATH - quoted_literal: "'/user/hive/warehouse/test_partition/c2=2/c3=3'" - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: test_load_partition - keyword: PARTITION - bracketed: - start_bracket: ( - column_reference: naked_identifier: c2 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '2' - comma: ',' - column_reference: naked_identifier: c3 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '3' - end_bracket: ) - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INPATH - quoted_literal: "'/user/hive/warehouse/test_partition/c2=2/c3=3'" - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: test_load_partition - keyword: PARTITION - bracketed: - start_bracket: ( - column_reference: naked_identifier: c2 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '2' - comma: ',' - column_reference: naked_identifier: c3 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '3' - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/merge_into.sql000066400000000000000000000014221503426445100244760ustar00rootroot00000000000000-- Merge using Table MERGE INTO t USING u ON (a = b) WHEN MATCHED THEN UPDATE SET a = b WHEN NOT MATCHED THEN INSERT (b) VALUES (c); -- Merge using Select MERGE INTO t USING (SELECT * FROM u) AS u ON (a = b) WHEN MATCHED THEN UPDATE SET a = b WHEN NOT MATCHED THEN INSERT (b) VALUES (c); -- Merge using Delete MERGE INTO t USING u ON (a = b) WHEN MATCHED THEN UPDATE SET a = b WHEN MATCHED THEN DELETE; -- Merge using multiple operations MERGE INTO t USING u ON (a = b) WHEN MATCHED AND a > b THEN UPDATE SET a = b WHEN MATCHED AND ( a < b AND c < d ) THEN DELETE WHEN NOT MATCHED THEN INSERT (a, c) VALUES (b, d); -- Merge using sparksql specific matched clause MERGE INTO t USING u ON (a = b) WHEN MATCHED AND ( a < b AND c < d ) THEN UPDATE SET * WHEN NOT MATCHED THEN INSERT *; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/merge_into.yml000066400000000000000000000237151503426445100245110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4473f5824a0b4e5e2a78c4d301266f8d2ceb8b81ae35cf2d2ef4a8cb3102c06b file: - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: t - keyword: USING - table_reference: naked_identifier: u - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b end_bracket: ) - merge_match: merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: keyword: SET set_clause: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b merge_when_not_matched_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: keyword: INSERT bracketed: start_bracket: ( column_reference: naked_identifier: b end_bracket: ) values_clause: keyword: VALUES bracketed: start_bracket: ( expression: column_reference: naked_identifier: c end_bracket: ) - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: t - keyword: USING - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: u end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: u - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b end_bracket: ) - merge_match: merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: keyword: SET set_clause: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b merge_when_not_matched_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: keyword: INSERT bracketed: start_bracket: ( column_reference: naked_identifier: b end_bracket: ) values_clause: keyword: VALUES bracketed: start_bracket: ( expression: column_reference: naked_identifier: c end_bracket: ) - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: t - keyword: USING - table_reference: naked_identifier: u - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b end_bracket: ) - merge_match: - merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: keyword: SET set_clause: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b - merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_delete_clause: keyword: DELETE - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: t - keyword: USING - table_reference: naked_identifier: u - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b end_bracket: ) - merge_match: - merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: AND - expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '>' - column_reference: naked_identifier: b - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: keyword: SET set_clause: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b - merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: AND - expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: c - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: d end_bracket: ) - keyword: THEN - merge_delete_clause: keyword: DELETE - merge_when_not_matched_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: keyword: INSERT bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: b - comma: ',' - expression: column_reference: naked_identifier: d - end_bracket: ) - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: t - keyword: USING - table_reference: naked_identifier: u - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b end_bracket: ) - merge_match: merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: AND - expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: c - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: d end_bracket: ) - keyword: THEN - merge_update_clause: - keyword: UPDATE - keyword: SET - wildcard_identifier: star: '*' merge_when_not_matched_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: keyword: INSERT wildcard_identifier: star: '*' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/numeric_literal.sql000066400000000000000000000012331503426445100255240ustar00rootroot00000000000000SELECT foo FROM bar WHERE baz > -2147483648 AND baz > 9223372036854775807l AND baz > 9223372036854775807L AND baz > -32y AND baz > -32Y AND baz > 482s AND baz > 482S AND baz > 12.578 AND baz > -0.1234567 AND baz > -.1234567 AND baz > -123. AND baz > 123.bd AND baz > 123.BD AND baz > 5e2 AND baz > 5E2 AND baz > 5d AND baz > 5D AND baz > -5bd AND baz > -5BD AND baz > 12.578e-2d AND baz > 12.578E-2D AND baz > -.1234567e+2bd AND baz > -.1234567E+2BD AND baz > +3.e+3 AND baz > +3.E+3 AND baz > -3.E-3D AND baz > -3.e-3d AND baz > -+-1 AND baz > -+- 1 sqlfluff-3.4.2/test/fixtures/dialects/sparksql/numeric_literal.yml000066400000000000000000000170131503426445100255310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d06536672a4e28d97dd08f5456cf9201d7712426ae8ac0458d0083569d997c3e file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: foo from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar where_clause: keyword: WHERE expression: - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: sign_indicator: '-' numeric_literal: '2147483648' - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: 9223372036854775807l - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: 9223372036854775807L - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: sign_indicator: '-' numeric_literal: 32y - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: sign_indicator: '-' numeric_literal: 32Y - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: 482s - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: 482S - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '12.578' - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: sign_indicator: '-' numeric_literal: '0.1234567' - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: sign_indicator: '-' numeric_literal: '.1234567' - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: sign_indicator: '-' numeric_literal: '123.' - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: 123.bd - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: 123.BD - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: 5e2 - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: 5E2 - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: 5d - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: 5D - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: sign_indicator: '-' numeric_literal: 5bd - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: sign_indicator: '-' numeric_literal: 5BD - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: 12.578e-2d - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: 12.578E-2D - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: sign_indicator: '-' numeric_literal: .1234567e+2bd - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: sign_indicator: '-' numeric_literal: .1234567E+2BD - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: sign_indicator: + numeric_literal: '3.e+3' - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: sign_indicator: + numeric_literal: '3.E+3' - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: sign_indicator: '-' numeric_literal: 3.E-3D - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: sign_indicator: '-' numeric_literal: 3.e-3d - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - sign_indicator: '-' - sign_indicator: + - numeric_literal: sign_indicator: '-' numeric_literal: '1' - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - sign_indicator: '-' - sign_indicator: + - numeric_literal: sign_indicator: '-' numeric_literal: '1' sqlfluff-3.4.2/test/fixtures/dialects/sparksql/parse_integer_type.sql000066400000000000000000000000521503426445100262340ustar00rootroot00000000000000SELECT 123 AS INTEGER, 123 AS INT sqlfluff-3.4.2/test/fixtures/dialects/sparksql/parse_integer_type.yml000066400000000000000000000015311503426445100262410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fd21e68eeac61f8d94f3a334a79dc032cbb6f3e0a54e3615a73bbd6ef200da9e file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '123' alias_expression: alias_operator: keyword: AS naked_identifier: INTEGER - comma: ',' - select_clause_element: numeric_literal: '123' alias_expression: alias_operator: keyword: AS naked_identifier: INT sqlfluff-3.4.2/test/fixtures/dialects/sparksql/parse_timestamp_ltz_ntz_type.sql000066400000000000000000000002301503426445100303640ustar00rootroot00000000000000select timestamp_ntz '1970-01-01', TIMESTAMP_LTZ '1970-01-01', cast('1970-01-01' as TIMESTAMP_NTZ), cast('1970-01-01' as timestamp_ltz) sqlfluff-3.4.2/test/fixtures/dialects/sparksql/parse_timestamp_ltz_ntz_type.yml000066400000000000000000000032011503426445100303670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 33fd8a03943433a4bc8a67be982cabebbf92c0fd8ee690c0325c92b8b4fedbb5 file: statement: select_statement: select_clause: - keyword: select - select_clause_element: keyword: timestamp_ntz date_constructor_literal: "'1970-01-01'" - comma: ',' - select_clause_element: keyword: TIMESTAMP_LTZ date_constructor_literal: "'1970-01-01'" - comma: ',' - select_clause_element: function: function_name: function_name_identifier: cast function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'1970-01-01'" keyword: as data_type: primitive_type: keyword: TIMESTAMP_NTZ end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: cast function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'1970-01-01'" keyword: as data_type: primitive_type: keyword: timestamp_ltz end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/sparksql/pivot_clause.sql000066400000000000000000000032541503426445100250500ustar00rootroot00000000000000SELECT a FROM person PIVOT ( SUM(age) AS a FOR name IN ('John' AS john) ); SELECT a FROM person PIVOT ( SUM(age) AS a FOR name IN ('John' AS john, 'Mike' AS mike) ); SELECT a FROM person PIVOT ( SUM(age) AS a FOR (name) IN ('John' AS john, 'Mike' AS mike) ); SELECT a FROM person PIVOT ( SUM(age) AS a FOR name IN ('John' AS john, 'Mike' AS mike) ); SELECT a, c FROM person PIVOT ( SUM(age) AS a, AVG(class) AS c FOR name IN ('John' AS john, 'Mike' AS mike) ); SELECT a, c FROM person PIVOT ( SUM(age) AS a, AVG(class) AS c FOR name IN ('John' AS john, 'Mike' AS mike) ); SELECT a, c FROM person PIVOT ( SUM(age) AS a, AVG(class) AS c FOR name, age IN (('John', 30) AS c1, ('Mike', 40) AS c2) ); SELECT p.a, p.c FROM person AS p PIVOT ( SUM(age) AS a, AVG(class) AS c FOR name, age IN (('John', 30) AS c1, ('Mike', 40) AS c2) ); -- Will throw error when executed but should parse SELECT a, c FROM person PIVOT ( SUM(age) AS a, AVG(class) AS c FOR (name, age) IN ('John' AS c1, ('Mike', 40) AS c2) ); SELECT * FROM ( some_table ) PIVOT ( min(timestamp_ns) / 1e9 as min_timestamp_s -- this is the offending line FOR run_id in ( test_run_id as test, ctrl_run_id as ctrl ) ); -- double pivot SELECT * FROM (select year, quarter, sales from sales) AS s PIVOT (sum(sales) AS total, avg(sales) AS avg FOR quarter IN (1 AS q1, 2 AS q2, 3 AS q3, 4 AS q4)) PIVOT (sum(q1_avg) AS total FOR year IN (2018, 2019)); sqlfluff-3.4.2/test/fixtures/dialects/sparksql/pivot_clause.yml000066400000000000000000000633341503426445100250570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f8095ce9c2394531c7ec9c7db4334131ebedc6aa84c37d510efc512fc158152a file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person pivot_clause: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: age end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: a - keyword: FOR - naked_identifier: name - keyword: IN - bracketed: start_bracket: ( expression: quoted_literal: "'John'" alias_expression: alias_operator: keyword: AS naked_identifier: john end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person pivot_clause: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: age end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: a - keyword: FOR - naked_identifier: name - keyword: IN - bracketed: - start_bracket: ( - expression: quoted_literal: "'John'" - alias_expression: alias_operator: keyword: AS naked_identifier: john - comma: ',' - expression: quoted_literal: "'Mike'" - alias_expression: alias_operator: keyword: AS naked_identifier: mike - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person pivot_clause: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: age end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: a - keyword: FOR - bracketed: start_bracket: ( naked_identifier: name end_bracket: ) - keyword: IN - bracketed: - start_bracket: ( - expression: quoted_literal: "'John'" - alias_expression: alias_operator: keyword: AS naked_identifier: john - comma: ',' - expression: quoted_literal: "'Mike'" - alias_expression: alias_operator: keyword: AS naked_identifier: mike - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person pivot_clause: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: age end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: a - keyword: FOR - naked_identifier: name - keyword: IN - bracketed: - start_bracket: ( - expression: quoted_literal: "'John'" - alias_expression: alias_operator: keyword: AS naked_identifier: john - comma: ',' - expression: quoted_literal: "'Mike'" - alias_expression: alias_operator: keyword: AS naked_identifier: mike - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person pivot_clause: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: age end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: a - comma: ',' - function: function_name: function_name_identifier: AVG function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: class end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: c - keyword: FOR - naked_identifier: name - keyword: IN - bracketed: - start_bracket: ( - expression: quoted_literal: "'John'" - alias_expression: alias_operator: keyword: AS naked_identifier: john - comma: ',' - expression: quoted_literal: "'Mike'" - alias_expression: alias_operator: keyword: AS naked_identifier: mike - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person pivot_clause: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: age end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: a - comma: ',' - function: function_name: function_name_identifier: AVG function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: class end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: c - keyword: FOR - naked_identifier: name - keyword: IN - bracketed: - start_bracket: ( - expression: quoted_literal: "'John'" - alias_expression: alias_operator: keyword: AS naked_identifier: john - comma: ',' - expression: quoted_literal: "'Mike'" - alias_expression: alias_operator: keyword: AS naked_identifier: mike - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person pivot_clause: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: age end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: a - comma: ',' - function: function_name: function_name_identifier: AVG function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: class end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: c - keyword: FOR - naked_identifier: name - comma: ',' - naked_identifier: age - keyword: IN - bracketed: - start_bracket: ( - bracketed: - start_bracket: ( - expression: quoted_literal: "'John'" - comma: ',' - expression: numeric_literal: '30' - end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: c1 - comma: ',' - bracketed: - start_bracket: ( - expression: quoted_literal: "'Mike'" - comma: ',' - expression: numeric_literal: '40' - end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: c2 - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: p - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: p - dot: . - naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person alias_expression: alias_operator: keyword: AS naked_identifier: p pivot_clause: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: age end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: a - comma: ',' - function: function_name: function_name_identifier: AVG function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: class end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: c - keyword: FOR - naked_identifier: name - comma: ',' - naked_identifier: age - keyword: IN - bracketed: - start_bracket: ( - bracketed: - start_bracket: ( - expression: quoted_literal: "'John'" - comma: ',' - expression: numeric_literal: '30' - end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: c1 - comma: ',' - bracketed: - start_bracket: ( - expression: quoted_literal: "'Mike'" - comma: ',' - expression: numeric_literal: '40' - end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: c2 - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person pivot_clause: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: age end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: a - comma: ',' - function: function_name: function_name_identifier: AVG function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: class end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: c - keyword: FOR - bracketed: - start_bracket: ( - naked_identifier: name - comma: ',' - naked_identifier: age - end_bracket: ) - keyword: IN - bracketed: - start_bracket: ( - expression: quoted_literal: "'John'" - alias_expression: alias_operator: keyword: AS naked_identifier: c1 - comma: ',' - bracketed: - start_bracket: ( - expression: quoted_literal: "'Mike'" - comma: ',' - expression: numeric_literal: '40' - end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: c2 - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: table_reference: naked_identifier: some_table end_bracket: ) pivot_clause: keyword: PIVOT bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: min function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: timestamp_ns end_bracket: ) binary_operator: / numeric_literal: 1e9 - alias_expression: alias_operator: keyword: as naked_identifier: min_timestamp_s - keyword: FOR - naked_identifier: run_id - keyword: in - bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: test_run_id - alias_expression: alias_operator: keyword: as naked_identifier: test - comma: ',' - expression: column_reference: naked_identifier: ctrl_run_id - alias_expression: alias_operator: keyword: as naked_identifier: ctrl - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: year - comma: ',' - select_clause_element: column_reference: naked_identifier: quarter - comma: ',' - select_clause_element: column_reference: naked_identifier: sales from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sales end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: s - pivot_clause: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: sales end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: total - comma: ',' - function: function_name: function_name_identifier: avg function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: sales end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: avg - keyword: FOR - naked_identifier: quarter - keyword: IN - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - alias_expression: alias_operator: keyword: AS naked_identifier: q1 - comma: ',' - expression: numeric_literal: '2' - alias_expression: alias_operator: keyword: AS naked_identifier: q2 - comma: ',' - expression: numeric_literal: '3' - alias_expression: alias_operator: keyword: AS naked_identifier: q3 - comma: ',' - expression: numeric_literal: '4' - alias_expression: alias_operator: keyword: AS naked_identifier: q4 - end_bracket: ) - end_bracket: ) - pivot_clause: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: q1_avg end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: total - keyword: FOR - naked_identifier: year - keyword: IN - bracketed: - start_bracket: ( - expression: numeric_literal: '2018' - comma: ',' - expression: numeric_literal: '2019' - end_bracket: ) - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/quoted_literal.sql000066400000000000000000000001231503426445100253600ustar00rootroot00000000000000SELECT result FROM student WHERE name = "John Smith" OR name = 'Jane Doe'; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/quoted_literal.yml000066400000000000000000000023331503426445100253670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: acd713bf42ec344d138645070f021bde1c8e258469e4e2fce30f3b20acb9b17c file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: result from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: student where_clause: keyword: WHERE expression: - column_reference: naked_identifier: name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"John Smith"' - binary_operator: OR - column_reference: naked_identifier: name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'Jane Doe'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/raw_literal.sql000066400000000000000000000001541503426445100246540ustar00rootroot00000000000000SELECT r'foo\nbar' AS col; SELECT r"foo\nbar" AS col; SELECT R'foo\nbar' AS col; SELECT R"foo\nbar" AS col; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/raw_literal.yml000066400000000000000000000030541503426445100246600ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0ec35af73edf8f05e8afabd877d094642f4ab4f93605504d08ac06cf3482d048 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: raw_quoted_literal: "r'foo\\nbar'" alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: raw_quoted_literal: r"foo\nbar" alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: raw_quoted_literal: "R'foo\\nbar'" alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: raw_quoted_literal: R"foo\nbar" alias_expression: alias_operator: keyword: AS naked_identifier: col - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/refresh.sql000066400000000000000000000001331503426445100240020ustar00rootroot00000000000000-- The Path is resolved using the datasource's File Index. REFRESH "hdfs://path/to/table"; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/refresh.yml000066400000000000000000000010161503426445100240050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 970706fdaea7911cf0c0b819d83665cdbeb973323dd78d19cf9624cf1d3ebeb3 file: statement: refresh_statement: keyword: REFRESH quoted_literal: '"hdfs://path/to/table"' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/refresh_function.sql000066400000000000000000000005321503426445100257120ustar00rootroot00000000000000-- The cached entry of the function will be refreshed -- The function is resolved from the current database -- as the function name is unqualified. REFRESH FUNCTION func1; -- The cached entry of the function will be refreshed -- The function is resolved from tempDB database as the -- function name is qualified. REFRESH FUNCTION db1.func1; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/refresh_function.yml000066400000000000000000000014131503426445100257130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 73c2d61b75bd316b3062a10b9dffa1709c124f3a0c7971c607c8319e60d4015c file: - statement: refresh_statement: - keyword: REFRESH - keyword: FUNCTION - function_name: function_name_identifier: func1 - statement_terminator: ; - statement: refresh_statement: - keyword: REFRESH - keyword: FUNCTION - function_name: naked_identifier: db1 dot: . function_name_identifier: func1 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/refresh_table.sql000066400000000000000000000005711503426445100251570ustar00rootroot00000000000000-- The cached entries of the table will be refreshed -- The table is resolved from the current database as -- the table name is unqualified. REFRESH TABLE tbl1; REFRESH tbl1; -- The cached entries of the view will be refreshed or invalidated -- The view is resolved from tempDB database, as the view -- name is qualified. REFRESH TABLE tempdb.view1; REFRESH tempdb.view1; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/refresh_table.yml000066400000000000000000000021021503426445100251510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ca4dd1564256b67c94d7bff54c52bd2f1ec227aafe2ebe7c17fdd6ddda30d41a file: - statement: refresh_statement: - keyword: REFRESH - keyword: TABLE - table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: refresh_statement: keyword: REFRESH table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: refresh_statement: - keyword: REFRESH - keyword: TABLE - table_reference: - naked_identifier: tempdb - dot: . - naked_identifier: view1 - statement_terminator: ; - statement: refresh_statement: keyword: REFRESH table_reference: - naked_identifier: tempdb - dot: . - naked_identifier: view1 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/remove_widget.sql000066400000000000000000000000561503426445100252100ustar00rootroot00000000000000REMOVE WIDGET state; REMOVE WIDGET database; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/remove_widget.yml000066400000000000000000000012651503426445100252150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1948d24ba464cafe5f0aab52377ee29c4738fed8a52e997c246548b045b912e0 file: - statement: remove_widget_statement: - keyword: REMOVE - keyword: WIDGET - widget_name_identifier: state - statement_terminator: ; - statement: remove_widget_statement: - keyword: REMOVE - keyword: WIDGET - widget_name_identifier: database - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/repair_table.sql000066400000000000000000000005401503426445100247770ustar00rootroot00000000000000-- REPAIR TABLE with all optional syntax MSCK REPAIR TABLE table_identifier ADD PARTITIONS; MSCK REPAIR TABLE table_identifier DROP PARTITIONS; MSCK REPAIR TABLE table_identifier SYNC PARTITIONS; -- REPAIR TABLE with no optional syntax MSCK REPAIR TABLE table_identifier; -- run MSCK REPAIR TABLE to recovers all the partitions MSCK REPAIR TABLE t1; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/repair_table.yml000066400000000000000000000027621503426445100250110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ad69dfd163a37ca9b5693d986cfedd860fe067e937f93faa2718af5dc7ee6f9c file: - statement: msck_repair_table_statement: - keyword: MSCK - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: table_identifier - keyword: ADD - keyword: PARTITIONS - statement_terminator: ; - statement: msck_repair_table_statement: - keyword: MSCK - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: table_identifier - keyword: DROP - keyword: PARTITIONS - statement_terminator: ; - statement: msck_repair_table_statement: - keyword: MSCK - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: table_identifier - keyword: SYNC - keyword: PARTITIONS - statement_terminator: ; - statement: msck_repair_table_statement: - keyword: MSCK - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: table_identifier - statement_terminator: ; - statement: msck_repair_table_statement: - keyword: MSCK - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: t1 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/reset.sql000066400000000000000000000000311503426445100234630ustar00rootroot00000000000000RESET; RESET spark.abc; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/reset.yml000066400000000000000000000011631503426445100234740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3331ebbbdfebac532a05c372437bc04e0f1dfb59d3c22d61acf1d3efa217a5b5 file: - statement: reset_statement: keyword: RESET - statement_terminator: ; - statement: reset_statement: - keyword: RESET - naked_identifier: spark - dot: . - naked_identifier: abc - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_aggregate.sql000066400000000000000000000002201503426445100256260ustar00rootroot00000000000000SELECT aggregate(array(1, 2, 3), 0, (acc, x) -> acc + x); -- 6 SELECT aggregate(array(1, 2, 3), 0, (acc, x) -> acc + x, acc -> acc * 10); -- 60 sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_aggregate.yml000066400000000000000000000076461503426445100256530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bd0e2f13b9bee38a83e6661281bae64e1dfe18014dbbab966576472084ed2f01 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: aggregate function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: array function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - comma: ',' - expression: numeric_literal: '0' - comma: ',' - expression: - bracketed: - start_bracket: ( - column_reference: naked_identifier: acc - comma: ',' - column_reference: naked_identifier: x - end_bracket: ) - binary_operator: -> - column_reference: naked_identifier: acc - binary_operator: + - column_reference: naked_identifier: x - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: aggregate function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: array function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - comma: ',' - expression: numeric_literal: '0' - comma: ',' - expression: - bracketed: - start_bracket: ( - column_reference: naked_identifier: acc - comma: ',' - column_reference: naked_identifier: x - end_bracket: ) - binary_operator: -> - column_reference: naked_identifier: acc - binary_operator: + - column_reference: naked_identifier: x - comma: ',' - expression: - column_reference: naked_identifier: acc - binary_operator: -> - column_reference: naked_identifier: acc - binary_operator: '*' - numeric_literal: '10' - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_cluster_by.sql000066400000000000000000000016571503426445100260720ustar00rootroot00000000000000-- Produces rows clustered by age. Persons with same age are clustered together. -- In the query below, persons with age 18 and 25 are in first partition and the -- persons with age 16 are in the second partition. The rows are sorted based -- on age within each partition. SELECT age, name FROM person CLUSTER BY age; SELECT age, name FROM person CLUSTER BY 1; SELECT age, name FROM person CLUSTER BY name, age; SELECT age, name FROM person CLUSTER BY LEFT(SUBSTRING_INDEX(name, ' ', -1), 1); SELECT age, name FROM person WHERE age <= 100 CLUSTER BY age; SELECT age, name FROM person GROUP BY age CLUSTER BY age; SELECT age, name FROM person GROUP BY age HAVING COUNT(age) > 1 CLUSTER BY age; SELECT age, name FROM person UNION ALL SELECT age, name FROM person_cold CLUSTER BY age; SELECT CURRENT_DATE() AS p_data_date CLUSTER BY p_data_date; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_cluster_by.yml000066400000000000000000000212651503426445100260710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 70a78dbff426ebbacd8b0f13d0d18f82f01f8880505a8d1319a2e8f9101e406e file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person cluster_by_clause: - keyword: CLUSTER - keyword: BY - column_reference: naked_identifier: age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person cluster_by_clause: - keyword: CLUSTER - keyword: BY - numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person cluster_by_clause: - keyword: CLUSTER - keyword: BY - column_reference: naked_identifier: name - comma: ',' - column_reference: naked_identifier: age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person cluster_by_clause: - keyword: CLUSTER - keyword: BY - expression: function: function_name: function_name_identifier: LEFT function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: SUBSTRING_INDEX function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: name - comma: ',' - expression: quoted_literal: "' '" - comma: ',' - expression: numeric_literal: sign_indicator: '-' numeric_literal: '1' - end_bracket: ) - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: column_reference: naked_identifier: age comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '100' cluster_by_clause: - keyword: CLUSTER - keyword: BY - column_reference: naked_identifier: age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: age cluster_by_clause: - keyword: CLUSTER - keyword: BY - column_reference: naked_identifier: age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: age having_clause: keyword: HAVING expression: function: function_name: function_name_identifier: COUNT function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: age end_bracket: ) comparison_operator: raw_comparison_operator: '>' numeric_literal: '1' cluster_by_clause: - keyword: CLUSTER - keyword: BY - column_reference: naked_identifier: age - statement_terminator: ; - statement: set_expression: - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person_cold cluster_by_clause: - keyword: CLUSTER - keyword: BY - column_reference: naked_identifier: age - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CURRENT_DATE function_contents: bracketed: start_bracket: ( end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: p_data_date cluster_by_clause: - keyword: CLUSTER - keyword: BY - column_reference: naked_identifier: p_data_date - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_delimited_identifier.sql000066400000000000000000000002511503426445100300460ustar00rootroot00000000000000select 1 as `delimited_but_regular_identifer_0`; select 2 as `100% unruly-identifier`; select `questionable identifier?` `still-questionable` from `(delimited)-table!`; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_delimited_identifier.yml000066400000000000000000000030141503426445100300500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f77333be0951ed17dc327491f3ad4727b9ccc2f6e9d1ecf7025ce4b7bb9a8680 file: - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: as quoted_identifier: '`delimited_but_regular_identifer_0`' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '2' alias_expression: alias_operator: keyword: as quoted_identifier: '`100% unruly-identifier`' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: quoted_identifier: '`questionable identifier?`' alias_expression: quoted_identifier: '`still-questionable`' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`(delimited)-table!`' - statement_terminator: ; select_delimited_identifier_with_escaped_backticks.sql000066400000000000000000000006521503426445100345310ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/dialects/sparksqlselect `delimited``with escaped` from `some``table` `some``alias`; select `delimited``with escaped` from `some``table` as `some``alias`; select ```delimited` `alias` FROM `some``table` `some````alias`; select `delimited``` `alias` FROM `some``table` ```some````alias```; SELECT `delimited ``identifier` `alias` FROM `some``table` `some``alias`; SELECT `delimited ``identifier` AS `alias` FROM `some``table` AS `some``alias`; select_delimited_identifier_with_escaped_backticks.yml000066400000000000000000000075161503426445100345410ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/dialects/sparksql# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 103c51338e753bab594cfb3bef6a89a64ff84f568c57cc019cda0019d4250555 file: - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: quoted_identifier: '`delimited``with escaped`' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`some``table`' alias_expression: quoted_identifier: '`some``alias`' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: quoted_identifier: '`delimited``with escaped`' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`some``table`' alias_expression: alias_operator: keyword: as quoted_identifier: '`some``alias`' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: quoted_identifier: '```delimited`' alias_expression: quoted_identifier: '`alias`' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`some``table`' alias_expression: quoted_identifier: '`some````alias`' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: quoted_identifier: '`delimited```' alias_expression: quoted_identifier: '`alias`' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`some``table`' alias_expression: quoted_identifier: '```some````alias```' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: quoted_identifier: '`delimited ``identifier`' alias_expression: quoted_identifier: '`alias`' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`some``table`' alias_expression: quoted_identifier: '`some``alias`' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: quoted_identifier: '`delimited ``identifier`' alias_expression: alias_operator: keyword: AS quoted_identifier: '`alias`' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`some``table`' alias_expression: alias_operator: keyword: AS quoted_identifier: '`some``alias`' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_distribute_by.sql000066400000000000000000000017111503426445100265560ustar00rootroot00000000000000-- Produces rows clustered by age. Persons with same age are clustered together. -- Unlike `CLUSTER BY` clause, the rows are not sorted within a partition. SELECT age, name FROM person DISTRIBUTE BY age; SELECT age, name FROM person DISTRIBUTE BY 1; SELECT age, name FROM person DISTRIBUTE BY name, age; SELECT age, name FROM person DISTRIBUTE BY LEFT(SUBSTRING_INDEX(name, ' ', -1), 1); SELECT age, name FROM person WHERE age <= 100 DISTRIBUTE BY age; SELECT age, name FROM person GROUP BY age DISTRIBUTE BY age; SELECT age, name FROM person GROUP BY age HAVING COUNT(age) > 1 DISTRIBUTE BY age; SELECT age, name FROM person GROUP BY age HAVING COUNT(age) > 1 DISTRIBUTE BY age SORT BY age; SELECT age, name FROM person UNION ALL SELECT age, name FROM person_cold DISTRIBUTE BY age SORT BY age; SELECT CURRENT_DATE() AS p_data_date DISTRIBUTE BY p_data_date; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_distribute_by.yml000066400000000000000000000243241503426445100265650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bca3ada640b147729379f01c9cea56095d3f7a57c93c7f3ae5922d8261b88cef file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person distribute_by_clause: - keyword: DISTRIBUTE - keyword: BY - column_reference: naked_identifier: age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person distribute_by_clause: - keyword: DISTRIBUTE - keyword: BY - numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person distribute_by_clause: - keyword: DISTRIBUTE - keyword: BY - column_reference: naked_identifier: name - comma: ',' - column_reference: naked_identifier: age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person distribute_by_clause: - keyword: DISTRIBUTE - keyword: BY - expression: function: function_name: function_name_identifier: LEFT function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: SUBSTRING_INDEX function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: name - comma: ',' - expression: quoted_literal: "' '" - comma: ',' - expression: numeric_literal: sign_indicator: '-' numeric_literal: '1' - end_bracket: ) - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: column_reference: naked_identifier: age comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '100' distribute_by_clause: - keyword: DISTRIBUTE - keyword: BY - column_reference: naked_identifier: age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: age distribute_by_clause: - keyword: DISTRIBUTE - keyword: BY - column_reference: naked_identifier: age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: age having_clause: keyword: HAVING expression: function: function_name: function_name_identifier: COUNT function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: age end_bracket: ) comparison_operator: raw_comparison_operator: '>' numeric_literal: '1' distribute_by_clause: - keyword: DISTRIBUTE - keyword: BY - column_reference: naked_identifier: age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: age having_clause: keyword: HAVING expression: function: function_name: function_name_identifier: COUNT function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: age end_bracket: ) comparison_operator: raw_comparison_operator: '>' numeric_literal: '1' distribute_by_clause: - keyword: DISTRIBUTE - keyword: BY - column_reference: naked_identifier: age sort_by_clause: - keyword: SORT - keyword: BY - column_reference: naked_identifier: age - statement_terminator: ; - statement: set_expression: - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person_cold distribute_by_clause: - keyword: DISTRIBUTE - keyword: BY - column_reference: naked_identifier: age sort_by_clause: - keyword: SORT - keyword: BY - column_reference: naked_identifier: age - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CURRENT_DATE function_contents: bracketed: start_bracket: ( end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: p_data_date distribute_by_clause: - keyword: DISTRIBUTE - keyword: BY - column_reference: naked_identifier: p_data_date - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_div.sql000066400000000000000000000000201503426445100244600ustar00rootroot00000000000000SELECT 3 DIV 2; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_div.yml000066400000000000000000000012431503426445100244720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 51cb10bd9421e08eb8915768a600cb5424ff64f19b8ecc513413f82ea2da8a15 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '3' - binary_operator: keyword: DIV - numeric_literal: '2' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_from_file.sql000066400000000000000000000036651503426445100256620ustar00rootroot00000000000000-- PARQUET file SELECT a, b, c FROM PARQUET.`examples/src/main/resources/users.parquet`; -- Directory of Parquet Files SELECT a, b, c FROM PARQUET.`examples/src/main/resources/users`; -- ORC file SELECT a, b, c FROM ORC.`examples/src/main/resources/users.orc`; -- JSON file SELECT a, b, c FROM JSON.`examples/src/main/resources/people.json`; -- Directory of JSON files SELECT a, b, c FROM JSON.`examples/src/main/resources/people`; -- Text File SELECT a, b, c FROM TEXT.`examples/src/main/resources/people.txt`; -- Tests for Inline Path Glob Filter -- https://spark.apache.org/docs/latest/sql-data-sources-generic-options.html#path-global-filter --noqa: LT05 -- Inline Path Filter using Asterisk (*) SELECT a, b, c FROM TEXT.`//root/*.txt`; -- Inline Path Filter using Question mark (?) SELECT a, b, c FROM TEXT.`//root/200?.txt`; -- Inline Path Filter using Character Class ([ab]) SELECT a, b, c FROM TEXT.`//root/200[23].txt`; -- Inline Path Filter using Negated Character Class ([^ab]) SELECT a, b, c FROM TEXT.`//root/200[^23].txt`; -- Inline Path Filter using Character Range ([a-b]) SELECT a, b, c FROM TEXT.`//root/200[2-5].txt`; -- Inline Path Filter using Negated Character Range ([^a-b]) SELECT a, b, c FROM TEXT.`//root/200[^2-5].txt`; -- Inline Path Filter using Alternation ({a,b}) SELECT a, b, c FROM TEXT.`//root/20{04, 05}.txt`; -- JSON treated as Text File SELECT a, b, c FROM TEXT.`examples/src/main/resources/people.json`; -- BinaryFile SELECT a, b, c FROM BINARYFILE.`/events/events-kafka.json`; -- Directory of BinaryFiles SELECT a, b, c FROM BINARYFILE.`/events/events-kafka`; -- CSV File SELECT a, b, c FROM CSV.`/sales/sales.csv`; -- Delta File; test for Issue #602 SELECT a, b, c FROM DELTA.`/mnt/datalake/table`; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_from_file.yml000066400000000000000000000312421503426445100256540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 515e6a9134ebb0c984be726ff540b20dc6ae7cef764794ee195f210b1c992383 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: PARQUET dot: . quoted_identifier: '`examples/src/main/resources/users.parquet`' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: PARQUET dot: . quoted_identifier: '`examples/src/main/resources/users`' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: ORC dot: . quoted_identifier: '`examples/src/main/resources/users.orc`' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: JSON dot: . quoted_identifier: '`examples/src/main/resources/people.json`' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: JSON dot: . quoted_identifier: '`examples/src/main/resources/people`' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: TEXT dot: . quoted_identifier: '`examples/src/main/resources/people.txt`' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: TEXT dot: . quoted_identifier: '`//root/*.txt`' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: TEXT dot: . quoted_identifier: '`//root/200?.txt`' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: TEXT dot: . quoted_identifier: '`//root/200[23].txt`' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: TEXT dot: . quoted_identifier: '`//root/200[^23].txt`' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: TEXT dot: . quoted_identifier: '`//root/200[2-5].txt`' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: TEXT dot: . quoted_identifier: '`//root/200[^2-5].txt`' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: TEXT dot: . quoted_identifier: '`//root/20{04, 05}.txt`' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: TEXT dot: . quoted_identifier: '`examples/src/main/resources/people.json`' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: file_format: BINARYFILE dot: . quoted_identifier: '`/events/events-kafka.json`' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: file_format: BINARYFILE dot: . quoted_identifier: '`/events/events-kafka`' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: CSV dot: . quoted_identifier: '`/sales/sales.csv`' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: DELTA dot: . quoted_identifier: '`/mnt/datalake/table`' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_from_lateral_view.sql000066400000000000000000000041331503426445100274100ustar00rootroot00000000000000SELECT id, name, age, class, address, c_age, d_age FROM person LATERAL VIEW EXPLODE(ARRAY(30, 60)) tbl_name AS c_age LATERAL VIEW EXPLODE(ARRAY(40, 80)) AS d_age; SELECT c_age, COUNT(*) AS record_count FROM person LATERAL VIEW EXPLODE(ARRAY(30, 60)) AS c_age LATERAL VIEW EXPLODE(ARRAY(40, 80)) AS d_age GROUP BY c_age; SELECT id, name, age, class, address, c_age, d_age FROM person LATERAL VIEW EXPLODE(ARRAY()) tbl_name AS c_age; SELECT id, name, age, class, address, time, c_age FROM person LATERAL VIEW OUTER EXPLODE(ARRAY()) tbl_name AS c_age; SELECT id, name, age, class, address, time, c_age FROM person LATERAL VIEW OUTER EXPLODE(ARRAY()) tbl_name c_age; SELECT id, name, age, class, address, time, c_age FROM person LATERAL VIEW OUTER EXPLODE(ARRAY()) c_age; SELECT person.id, exploded_people.name, exploded_people.age, exploded_people.state FROM person LATERAL VIEW INLINE(array_of_structs) exploded_people AS name, age, state; SELECT p.id, exploded_people.name, exploded_people.age, exploded_people.state FROM person AS p LATERAL VIEW INLINE(array_of_structs) exploded_people AS name, age, state; SELECT p.id, exploded_people.name, exploded_people.age, exploded_people.state FROM person AS p LATERAL VIEW INLINE(array_of_structs) exploded_people; SELECT p.id, exploded_people.name, exploded_people.age, exploded_people.state FROM person AS p LATERAL VIEW INLINE(array_of_structs) exploded_people name, age, state; SELECT p.id, exploded_people.name, exploded_people.age, exploded_people.state FROM person AS p LATERAL VIEW INLINE(array_of_structs) AS name, age, state; SELECT t1.column1, CAST(GET_JSON_OBJECT(things, '$.percentage') AS DECIMAL(16, 8) ) AS ptc FROM table1 AS t1 LEFT JOIN table2 AS t2 ON c.column1 = p.column1 AND t2.type = 'SOMETHING' LATERAL VIEW OUTER EXPLODE(t2.column2) AS things; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_from_lateral_view.yml000066400000000000000000000615031503426445100274160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 67f53a11fc46f46f8aff10b4787811a1dc41dd5ac766b751a19395f52fb5603d file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: class - comma: ',' - select_clause_element: column_reference: naked_identifier: address - comma: ',' - select_clause_element: column_reference: naked_identifier: c_age - comma: ',' - select_clause_element: column_reference: naked_identifier: d_age from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: person - lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: EXPLODE function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ARRAY function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '30' - comma: ',' - expression: numeric_literal: '60' - end_bracket: ) end_bracket: ) - naked_identifier: tbl_name - keyword: AS - naked_identifier: c_age - lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: EXPLODE function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ARRAY function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '40' - comma: ',' - expression: numeric_literal: '80' - end_bracket: ) end_bracket: ) - keyword: AS - naked_identifier: d_age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: c_age - comma: ',' - select_clause_element: function: function_name: function_name_identifier: COUNT function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: record_count from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: person - lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: EXPLODE function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ARRAY function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '30' - comma: ',' - expression: numeric_literal: '60' - end_bracket: ) end_bracket: ) - keyword: AS - naked_identifier: c_age - lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: EXPLODE function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ARRAY function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '40' - comma: ',' - expression: numeric_literal: '80' - end_bracket: ) end_bracket: ) - keyword: AS - naked_identifier: d_age groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: c_age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: class - comma: ',' - select_clause_element: column_reference: naked_identifier: address - comma: ',' - select_clause_element: column_reference: naked_identifier: c_age - comma: ',' - select_clause_element: column_reference: naked_identifier: d_age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: EXPLODE function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ARRAY function_contents: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) - naked_identifier: tbl_name - keyword: AS - naked_identifier: c_age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: class - comma: ',' - select_clause_element: column_reference: naked_identifier: address - comma: ',' - select_clause_element: column_reference: naked_identifier: time - comma: ',' - select_clause_element: column_reference: naked_identifier: c_age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person lateral_view_clause: - keyword: LATERAL - keyword: VIEW - keyword: OUTER - function: function_name: function_name_identifier: EXPLODE function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ARRAY function_contents: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) - naked_identifier: tbl_name - keyword: AS - naked_identifier: c_age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: class - comma: ',' - select_clause_element: column_reference: naked_identifier: address - comma: ',' - select_clause_element: column_reference: naked_identifier: time - comma: ',' - select_clause_element: column_reference: naked_identifier: c_age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person lateral_view_clause: - keyword: LATERAL - keyword: VIEW - keyword: OUTER - function: function_name: function_name_identifier: EXPLODE function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ARRAY function_contents: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) - naked_identifier: tbl_name - naked_identifier: c_age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: class - comma: ',' - select_clause_element: column_reference: naked_identifier: address - comma: ',' - select_clause_element: column_reference: naked_identifier: time - comma: ',' - select_clause_element: column_reference: naked_identifier: c_age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person lateral_view_clause: - keyword: LATERAL - keyword: VIEW - keyword: OUTER - function: function_name: function_name_identifier: EXPLODE function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ARRAY function_contents: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) - naked_identifier: c_age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: person - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: age - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: state from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: INLINE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: array_of_structs end_bracket: ) - naked_identifier: exploded_people - keyword: AS - naked_identifier: name - comma: ',' - naked_identifier: age - comma: ',' - naked_identifier: state - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: p - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: age - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: state from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person alias_expression: alias_operator: keyword: AS naked_identifier: p lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: INLINE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: array_of_structs end_bracket: ) - naked_identifier: exploded_people - keyword: AS - naked_identifier: name - comma: ',' - naked_identifier: age - comma: ',' - naked_identifier: state - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: p - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: age - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: state from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person alias_expression: alias_operator: keyword: AS naked_identifier: p lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: INLINE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: array_of_structs end_bracket: ) - naked_identifier: exploded_people - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: p - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: age - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: state from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person alias_expression: alias_operator: keyword: AS naked_identifier: p lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: INLINE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: array_of_structs end_bracket: ) - naked_identifier: exploded_people - naked_identifier: name - comma: ',' - naked_identifier: age - comma: ',' - naked_identifier: state - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: p - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: age - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: state from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person alias_expression: alias_operator: keyword: AS naked_identifier: p lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: INLINE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: array_of_structs end_bracket: ) - keyword: AS - naked_identifier: name - comma: ',' - naked_identifier: age - comma: ',' - naked_identifier: state - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: column1 - comma: ',' - select_clause_element: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: GET_JSON_OBJECT function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: things - comma: ',' - expression: quoted_literal: "'$.percentage'" - end_bracket: ) keyword: AS data_type: primitive_type: keyword: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '16' - comma: ',' - numeric_literal: '8' - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: ptc from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 alias_expression: alias_operator: keyword: AS naked_identifier: t1 join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 alias_expression: alias_operator: keyword: AS naked_identifier: t2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: c - dot: . - naked_identifier: column1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: p - dot: . - naked_identifier: column1 - binary_operator: AND - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: type - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'SOMETHING'" lateral_view_clause: - keyword: LATERAL - keyword: VIEW - keyword: OUTER - function: function_name: function_name_identifier: EXPLODE function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: column2 end_bracket: ) - keyword: AS - naked_identifier: things - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_from_multiple_values_clauses.sql000066400000000000000000000001161503426445100316600ustar00rootroot00000000000000select * from values 1, values 2; select * from (values 1, 2), (values 2, 3); sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_from_multiple_values_clauses.yml000066400000000000000000000042461503426445100316720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9c1640e3e298203563aa4d91fdcd0176c7882e17f7fb1399af63ab20fd2a6eb7 file: - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: - keyword: from - from_expression: from_expression_element: table_expression: values_clause: keyword: values expression: numeric_literal: '1' - comma: ',' - from_expression: from_expression_element: table_expression: values_clause: keyword: values expression: numeric_literal: '2' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: - keyword: from - from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: values_clause: - keyword: values - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' end_bracket: ) - comma: ',' - from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: values_clause: - keyword: values - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_from_values.sql000066400000000000000000000015431503426445100262330ustar00rootroot00000000000000select * from values 1; select * from values (1); select * from values (1,2); select * from (values 1,2,3); select * from (values (1),(2),(3)); select * from (values (1,2), (3,4)); select * from values 1, values 2; select * from (values (1,2), (3,4)), (values (1,2), (3,4)); select * from (values 1, least(2,3), greatest(4,5)); select * from values 1 as t; select * from values (1,2) as t(a, b); select * from (values (1,2), (3,4)) as t (a, b); select * from (values (1,2), (3,4)) as (a, b); select * from values 1 t; select * from values (1,2) t(a, b); select * from (values (1,2), (3,4)) t (a, b); select * from (values (1,2), (3,4)) (a, b); select * from values 1 , 2; select * from values ( 1 , 2 ) , ( 3 , 4 ); select * from values 1 , 2 , values 3 , 4; select * from values (1) , (2); select * from values 1 , 2 , values 3 , 4; select 1 + 2 == 3 from values 1; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_from_values.yml000066400000000000000000000550121503426445100262350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 419e87742bfb76efab772081a48f64ecacbc24ecd162b44e9d9f82ec7e6f3b43 file: - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: values_clause: keyword: values expression: numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: values_clause: keyword: values bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: values_clause: keyword: values bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: values_clause: - keyword: values - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: values_clause: - keyword: values - bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( expression: numeric_literal: '2' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( expression: numeric_literal: '3' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: - keyword: from - from_expression: from_expression_element: table_expression: values_clause: keyword: values expression: numeric_literal: '1' - comma: ',' - from_expression: from_expression_element: table_expression: values_clause: keyword: values expression: numeric_literal: '2' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: - keyword: from - from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) end_bracket: ) - comma: ',' - from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: values_clause: - keyword: values - expression: numeric_literal: '1' - comma: ',' - expression: function: function_name: function_name_identifier: least function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - comma: ',' - expression: function: function_name: function_name_identifier: greatest function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '4' - comma: ',' - expression: numeric_literal: '5' - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: values_clause: keyword: values expression: numeric_literal: '1' alias_expression: alias_operator: keyword: as naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: values_clause: keyword: values bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: as bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: values_clause: keyword: values expression: numeric_literal: '1' alias_expression: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: values_clause: keyword: values bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) alias_expression: naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) end_bracket: ) alias_expression: naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) end_bracket: ) alias_expression: bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: values_clause: - keyword: values - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: - keyword: from - from_expression: from_expression_element: table_expression: values_clause: - keyword: values - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - from_expression: from_expression_element: table_expression: values_clause: - keyword: values - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: values_clause: - keyword: values - bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( expression: numeric_literal: '2' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: - keyword: from - from_expression: from_expression_element: table_expression: values_clause: - keyword: values - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - from_expression: from_expression_element: table_expression: values_clause: - keyword: values - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - comparison_operator: == - numeric_literal: '3' from_clause: keyword: from from_expression: from_expression_element: table_expression: values_clause: keyword: values expression: numeric_literal: '1' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_from_where_clause.sql000066400000000000000000000016751503426445100274100ustar00rootroot00000000000000-- Comparison operator in `WHERE` clause. SELECT name, age FROM person WHERE id > 200 ORDER BY id; -- Comparison and logical operators in `WHERE` clause. SELECT name, age FROM person WHERE id = 200 OR id = 300 ORDER BY id; -- Function expression in `WHERE` clause. SELECT name, age FROM person WHERE length(name) > 3 ORDER BY id; -- `BETWEEN` expression in `WHERE` clause. SELECT name, age FROM person WHERE id BETWEEN 200 AND 300 ORDER BY id; -- Scalar Subquery in `WHERE` clause. SELECT name, age FROM person WHERE age > (SELECT avg(age) FROM person); -- Correlated Subquery in `WHERE` clause. SELECT name, age FROM person WHERE EXISTS ( SELECT 1 FROM person WHERE person.id = person.id AND person.age IS NULL ); SELECT name, age FROM person WHERE person.id is distinct from person.age; SELECT name, age FROM person WHERE person.id is not distinct from person.age sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_from_where_clause.yml000066400000000000000000000225471503426445100274130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 80e74ea077fbe390734f811dae5188f03bfed89927a630aff64616e782787cb0 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: column_reference: naked_identifier: id comparison_operator: raw_comparison_operator: '>' numeric_literal: '200' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '200' - binary_operator: OR - column_reference: naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '300' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: function: function_name: function_name_identifier: length function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: name end_bracket: ) comparison_operator: raw_comparison_operator: '>' numeric_literal: '3' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: naked_identifier: id - keyword: BETWEEN - numeric_literal: '200' - keyword: AND - numeric_literal: '300' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: column_reference: naked_identifier: age comparison_operator: raw_comparison_operator: '>' bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: avg function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: age end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: keyword: EXISTS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: person - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: person - dot: . - naked_identifier: id - binary_operator: AND - column_reference: - naked_identifier: person - dot: . - naked_identifier: age - keyword: IS - null_literal: 'NULL' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: person - dot: . - naked_identifier: id - keyword: is - keyword: distinct - keyword: from - column_reference: - naked_identifier: person - dot: . - naked_identifier: age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: person - dot: . - naked_identifier: id - keyword: is - keyword: not - keyword: distinct - keyword: from - column_reference: - naked_identifier: person - dot: . - naked_identifier: age sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_group_by.sql000066400000000000000000000056621503426445100255450ustar00rootroot00000000000000-- Sum of quantity per dealership. Group by `id`. SELECT id, sum(quantity) AS sum_quantity FROM dealer GROUP BY id ORDER BY id; -- Use column position in GROUP by clause. SELECT id, sum(quantity) AS sum_quantity FROM dealer GROUP BY 1 ORDER BY 1; -- Multiple aggregations. -- 1. Sum of quantity per dealership. -- 2. Max quantity per dealership. SELECT id, sum(quantity) AS sum_quantity, max(quantity) AS max_quantity FROM dealer GROUP BY id ORDER BY id; -- Count the number of distinct dealer cities per car_model. SELECT car_model, count(DISTINCT city) AS count_distinct_city FROM dealer GROUP BY car_model; -- Sum of only 'Honda Civic' and 'Honda CRV' quantities per dealership. SELECT id, sum(quantity) FILTER ( WHERE car_model IN ('Honda Civic', 'Honda CRV') ) AS `sum(quantity)` FROM dealer GROUP BY id ORDER BY id; -- Aggregations using multiple sets of grouping columns in a single statement. -- Following performs aggregations based on four sets of grouping columns. -- 1. city, car_model -- 2. city -- 3. car_model -- 4. Empty grouping set. Returns quantities for all city and car models. SELECT city, car_model, sum(quantity) AS sum_quantity FROM dealer GROUP BY GROUPING SETS ((city, car_model), (city), (car_model), ()) ORDER BY city; SELECT city, car_model, sum(quantity) AS sum_quantity FROM dealer GROUP BY city, car_model GROUPING SETS ((city, car_model), (city), (car_model), ()) ORDER BY city; SELECT city, car_model, sum(quantity) AS sum_quantity FROM dealer GROUP BY city, car_model, GROUPING SETS ((city, car_model), (city), (car_model), ()) ORDER BY city; -- Group by processing with `ROLLUP` clause. -- Equivalent GROUP BY GROUPING SETS ((city, car_model), (city), ()) SELECT city, car_model, sum(quantity) AS sum_quantity FROM dealer GROUP BY city, car_model WITH ROLLUP ORDER BY city, car_model; -- Group by processing with `CUBE` clause. -- Equivalent GROUP BY: -- GROUPING SETS ((city, car_model), (city), (car_model), ()) SELECT city, car_model, sum(quantity) AS sum_quantity FROM dealer GROUP BY city, car_model WITH CUBE ORDER BY city, car_model; -- Select the first row in column age -- Implicit GROUP BY SELECT first(age) FROM person; -- Implicit GROUP BY SELECT first(age IGNORE NULLS) AS first_age, last(id) AS last_id, sum(id) AS sum_id FROM person; -- CUBE within GROUP BY clause SELECT name, age, count(*) AS record_count FROM people GROUP BY cube(name, age); -- CUBE within GROUP BY clause with single clause on newline SELECT name, count(*) AS record_count FROM people GROUP BY cube( name ); -- CUBE within GROUP BY clause with multiple clauses on newline SELECT name, age, count(*) AS record_count FROM people GROUP BY cube( name, age ); -- ROLLUP within GROUP BY clause SELECT name, age, count(*) AS record_count FROM people GROUP BY rollup(name, age); sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_group_by.yml000066400000000000000000000612051503426445100255420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e48d313c611a5c62fe32a3b2687b0858ccd9529ccd25acfc0eeeafc4cfadfae5 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: sum_quantity from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: id orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: sum_quantity from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer groupby_clause: - keyword: GROUP - keyword: BY - numeric_literal: '1' orderby_clause: - keyword: ORDER - keyword: BY - numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: sum_quantity - comma: ',' - select_clause_element: function: function_name: function_name_identifier: max function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: max_quantity from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: id orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: car_model - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( keyword: DISTINCT expression: column_reference: naked_identifier: city end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: count_distinct_city from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: car_model - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) keyword: FILTER bracketed: start_bracket: ( keyword: WHERE expression: column_reference: naked_identifier: car_model keyword: IN bracketed: - start_bracket: ( - quoted_literal: "'Honda Civic'" - comma: ',' - quoted_literal: "'Honda CRV'" - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS quoted_identifier: '`sum(quantity)`' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: id orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: city - comma: ',' - select_clause_element: column_reference: naked_identifier: car_model - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: sum_quantity from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer groupby_clause: - keyword: GROUP - keyword: BY - grouping_sets_clause: - keyword: GROUPING - keyword: SETS - bracketed: start_bracket: ( grouping_expression_list: - expression: bracketed: - start_bracket: ( - column_reference: naked_identifier: city - comma: ',' - column_reference: naked_identifier: car_model - end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: city end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: car_model end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: city - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: city - comma: ',' - select_clause_element: column_reference: naked_identifier: car_model - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: sum_quantity from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: city - comma: ',' - column_reference: naked_identifier: car_model - grouping_sets_clause: - keyword: GROUPING - keyword: SETS - bracketed: start_bracket: ( grouping_expression_list: - expression: bracketed: - start_bracket: ( - column_reference: naked_identifier: city - comma: ',' - column_reference: naked_identifier: car_model - end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: city end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: car_model end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: city - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: city - comma: ',' - select_clause_element: column_reference: naked_identifier: car_model - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: sum_quantity from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: city - comma: ',' - column_reference: naked_identifier: car_model - comma: ',' - grouping_sets_clause: - keyword: GROUPING - keyword: SETS - bracketed: start_bracket: ( grouping_expression_list: - expression: bracketed: - start_bracket: ( - column_reference: naked_identifier: city - comma: ',' - column_reference: naked_identifier: car_model - end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: city end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: car_model end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: city - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: city - comma: ',' - select_clause_element: column_reference: naked_identifier: car_model - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: sum_quantity from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: city - comma: ',' - column_reference: naked_identifier: car_model - with_cube_rollup_clause: - keyword: WITH - keyword: ROLLUP orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: city - comma: ',' - column_reference: naked_identifier: car_model - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: city - comma: ',' - select_clause_element: column_reference: naked_identifier: car_model - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: sum_quantity from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: city - comma: ',' - column_reference: naked_identifier: car_model - with_cube_rollup_clause: - keyword: WITH - keyword: CUBE orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: city - comma: ',' - column_reference: naked_identifier: car_model - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: first function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: age end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: first function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: age - keyword: IGNORE - keyword: NULLS - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: first_age - comma: ',' - select_clause_element: function: function_name: function_name_identifier: last function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: id end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: last_id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: id end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: sum_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: record_count from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: people groupby_clause: - keyword: GROUP - keyword: BY - cube_rollup_clause: function_name: function_name_identifier: cube bracketed: start_bracket: ( grouping_expression_list: - column_reference: naked_identifier: name - comma: ',' - column_reference: naked_identifier: age end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: record_count from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: people groupby_clause: - keyword: GROUP - keyword: BY - cube_rollup_clause: function_name: function_name_identifier: cube bracketed: start_bracket: ( grouping_expression_list: column_reference: naked_identifier: name end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: record_count from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: people groupby_clause: - keyword: GROUP - keyword: BY - cube_rollup_clause: function_name: function_name_identifier: cube bracketed: start_bracket: ( grouping_expression_list: - column_reference: naked_identifier: name - comma: ',' - column_reference: naked_identifier: age end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: record_count from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: people groupby_clause: - keyword: GROUP - keyword: BY - cube_rollup_clause: function_name: function_name_identifier: rollup bracketed: start_bracket: ( grouping_expression_list: - column_reference: naked_identifier: name - comma: ',' - column_reference: naked_identifier: age end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_having.sql000066400000000000000000000017271503426445100251710ustar00rootroot00000000000000-- `HAVING` clause referring to column in `GROUP BY`. SELECT city, sum(quantity) AS sum_quantity FROM dealer GROUP BY city HAVING city = 'Fremont'; -- `HAVING` clause referring to aggregate function. SELECT city, sum(quantity) AS sum_quantity FROM dealer GROUP BY city HAVING sum(quantity) > 15; -- `HAVING` clause referring to aggregate function -- by its alias. SELECT city, sum(quantity) AS sum_quantity FROM dealer GROUP BY city HAVING sum_quantity > 15; -- `HAVING` clause referring to a different aggregate -- function than what is present in `SELECT` list. SELECT city, sum(quantity) AS sum_quantity FROM dealer GROUP BY city HAVING max(quantity) > 15; -- `HAVING` clause referring to constant expression. SELECT city, sum(quantity) AS sum_quantity FROM dealer GROUP BY city HAVING 1 > 0 ORDER BY city; -- `HAVING` clause without a `GROUP BY` clause. SELECT sum(quantity) AS sum_quantity FROM dealer HAVING sum(quantity) > 10; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_having.yml000066400000000000000000000205771503426445100251770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1922e135b93b8a06e8906af4cb21c0f2772a294843cb1c841dd0e341ee18d3ca file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: city - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: sum_quantity from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: city having_clause: keyword: HAVING expression: column_reference: naked_identifier: city comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Fremont'" - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: city - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: sum_quantity from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: city having_clause: keyword: HAVING expression: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) comparison_operator: raw_comparison_operator: '>' numeric_literal: '15' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: city - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: sum_quantity from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: city having_clause: keyword: HAVING expression: column_reference: naked_identifier: sum_quantity comparison_operator: raw_comparison_operator: '>' numeric_literal: '15' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: city - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: sum_quantity from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: city having_clause: keyword: HAVING expression: function: function_name: function_name_identifier: max function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) comparison_operator: raw_comparison_operator: '>' numeric_literal: '15' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: city - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: sum_quantity from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: city having_clause: keyword: HAVING expression: - numeric_literal: '1' - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '0' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: city - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: sum_quantity from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer having_clause: keyword: HAVING expression: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) comparison_operator: raw_comparison_operator: '>' numeric_literal: '10' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_hints.sql000066400000000000000000000034231503426445100250350ustar00rootroot00000000000000SELECT /*+ COALESCE(3) */ a, b, c FROM t; SELECT /*+ REPARTITION(3) */ a, b, c FROM t; SELECT /*+ REPARTITION(c) */ a, b, c FROM t; SELECT /*+ REPARTITION(3, c) */ a, b, c FROM t; SELECT /*+ REPARTITION_BY_RANGE(c) */ a, b, c FROM t; SELECT /*+ REPARTITION_BY_RANGE(3, c) */ a, b, c FROM t; SELECT /*+ REBALANCE */ a, b, c FROM t; SELECT /*+ REBALANCE(c) */ a, b, c FROM t; -- multiple partitioning hints SELECT /*+ REPARTITION(100), COALESCE(500), REPARTITION_BY_RANGE(3, c) */ a, b, c FROM t; -- Join Hints for broadcast join SELECT /*+ BROADCAST(t1) */ t1.a, t1.b, t2.c FROM t1 INNER JOIN t2 ON t1.key = t2.key; SELECT /*+ BROADCASTJOIN(t1) */ t1.a, t1.b, t2.c FROM t1 LEFT JOIN t2 ON t1.key = t2.key; SELECT /*+ MAPJOIN(t2) */ t1.a, t1.b, t2.c FROM t1 LEFT JOIN t2 ON t1.key = t2.key; -- Join Hints for shuffle sort merge join SELECT /*+ SHUFFLE_MERGE(t1) */ t1.a, t1.b, t2.c FROM t1 INNER JOIN t2 ON t1.key = t2.key; SELECT /*+ MERGEJOIN(t2) */ t1.a, t1.b, t2.c FROM t1 INNER JOIN t2 ON t1.key = t2.key; SELECT /*+ MERGE(t1) */ t1.a, t1.b, t2.c FROM t1 INNER JOIN t2 ON t1.key = t2.key; -- Join Hints for shuffle hash join SELECT /*+ SHUFFLE_HASH(t1) */ t1.a, t1.b, t2.c FROM t1 INNER JOIN t2 ON t1.key = t2.key; -- Join Hints for shuffle-and-replicate nested loop join SELECT /*+ SHUFFLE_REPLICATE_NL(t1) */ t1.a, t1.b, t2.c FROM t1 INNER JOIN t2 ON t1.key = t2.key; SELECT /*+ BROADCAST(t1), MERGE(t1, t2) */ t1.a, t1.b, t2.c FROM t1 INNER JOIN t2 ON t1.key = t2.key; SELECT /*+ BROADCAST(db.t1) */ t1.a, t1.b, t2.c FROM t1 INNER JOIN t2 ON t1.key = t2.key; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_hints.yml000066400000000000000000000730161503426445100250440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b7a5f1a6d838148caf30c0057a70f0d07683f9d092086886c7001c28048731e3 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: COALESCE function_contents: bracketed: start_bracket: ( expression: numeric_literal: '3' end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: REPARTITION function_contents: bracketed: start_bracket: ( expression: numeric_literal: '3' end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: REPARTITION function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: c end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: REPARTITION function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: column_reference: naked_identifier: c - end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: REPARTITION_BY_RANGE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: c end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: REPARTITION_BY_RANGE function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: column_reference: naked_identifier: c - end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: REBALANCE end_hint: '*/' - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: REBALANCE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: c end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: - start_hint: /*+ - hint_function: function_name: function_name_identifier: REPARTITION function_contents: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - hint_function: function_name: function_name_identifier: COALESCE function_contents: bracketed: start_bracket: ( expression: numeric_literal: '500' end_bracket: ) - comma: ',' - hint_function: function_name: function_name_identifier: REPARTITION_BY_RANGE function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: column_reference: naked_identifier: c - end_bracket: ) - end_hint: '*/' - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: BROADCAST function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: t1 end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: b - comma: ',' - select_clause_element: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: key - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: BROADCASTJOIN function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: t1 end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: b - comma: ',' - select_clause_element: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: key - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: MAPJOIN function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: t2 end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: b - comma: ',' - select_clause_element: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: key - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: SHUFFLE_MERGE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: t1 end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: b - comma: ',' - select_clause_element: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: key - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: MERGEJOIN function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: t2 end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: b - comma: ',' - select_clause_element: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: key - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: MERGE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: t1 end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: b - comma: ',' - select_clause_element: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: key - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: SHUFFLE_HASH function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: t1 end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: b - comma: ',' - select_clause_element: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: key - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: SHUFFLE_REPLICATE_NL function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: t1 end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: b - comma: ',' - select_clause_element: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: key - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: - start_hint: /*+ - hint_function: function_name: function_name_identifier: BROADCAST function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: t1 end_bracket: ) - comma: ',' - hint_function: function_name: function_name_identifier: MERGE function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: t1 - comma: ',' - expression: column_reference: naked_identifier: t2 - end_bracket: ) - end_hint: '*/' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: b - comma: ',' - select_clause_element: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: key - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: BROADCAST function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: db - dot: . - naked_identifier: t1 end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: b - comma: ',' - select_clause_element: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: key - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_lateral_view_supported_tvf.sql000066400000000000000000000040771503426445100313600ustar00rootroot00000000000000-- TVFs that can be specified in SELECT/LATERAL VIEW clauses -- explode in a SELECT SELECT explode(array(10, 20)); -- explode_outer in a SELECT SELECT explode_outer(array(10, 20)); -- explode in a LATERAL VIEW clause SELECT test.a, test.b FROM test LATERAL VIEW explode(array(3, 4)) AS c2; -- explode_outer in a LATERAL VIEW clause SELECT test.a, test.b FROM test LATERAL VIEW explode_outer(array(3, 4)) AS c2; -- inline in a SELECT SELECT inline(array(struct(1, 'a'), struct(2, 'b'))); -- inline_outer in a SELECT SELECT inline_outer(array(struct(1, 'a'), struct(2, 'b'))); -- inline in a LATERAL VIEW clause SELECT test.a, test.b FROM test LATERAL VIEW inline(array(struct(1, 'a'), struct(2, 'b'))) AS c1, c2; -- inline_outer in a LATERAL VIEW clause SELECT test.a, test.b FROM test LATERAL VIEW inline_outer(array(struct(1, 'a'), struct(2, 'b'))) AS c1, c2; -- posexplode in a SELECT SELECT posexplode(array(10, 20)); -- posexplode_outer in a SELECT SELECT posexplode_outer(array(10, 20)); -- posexplode in a LATERAL VIEW clause SELECT test.a, test.b FROM test LATERAL VIEW posexplode(array(10, 20)) AS c1; -- posexplode_outer in a LATERAL VIEW clause SELECT test.a, test.b FROM test LATERAL VIEW posexplode_outer(array(10, 20)) AS c1; -- stack in a SELECT SELECT stack(2, 1, 2, 3); -- stack in a LATERAL VIEW clause SELECT test.a, test.b FROM test LATERAL VIEW stack(2, 1, 2, 3) AS c1, c2; -- json_tuple in a SELECT SELECT json_tuple('{"a":1, "b":2}', 'a', 'b'); -- json_tuple in a LATERAL VIEW clause SELECT test.a, test.b FROM test LATERAL VIEW json_tuple('{"a":1, "b":2}', 'a', 'b') AS c1, c2; -- parse_url in a SELECT SELECT parse_url('http://spark.apache.org/path?query=1', 'HOST'); -- parse_url in a LATERAL VIEW clause SELECT test.a, test.b FROM test LATERAL VIEW parse_url( 'http://spark.apache.org/path?query=1', 'HOST' ) AS c1; -- explode in a LATERAL CLAUSE SELECT a.id, b.col FROM range(10) as test, LATERAL explode(array('a', 'b', 'c')) as b; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_lateral_view_supported_tvf.yml000066400000000000000000000643161503426445100313640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b5932744feff93a7b2a8c7baac793affe60f0dcccd4bea3703e9945b2c65dc41 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: explode function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: array function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '20' - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: explode_outer function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: array function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '20' - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: test - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: test - dot: . - naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: explode function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: array function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) end_bracket: ) - keyword: AS - naked_identifier: c2 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: test - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: test - dot: . - naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: explode_outer function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: array function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) end_bracket: ) - keyword: AS - naked_identifier: c2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: inline function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: array function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: struct function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'a'" - end_bracket: ) - comma: ',' - expression: function: function_name: function_name_identifier: struct function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '2' - comma: ',' - expression: quoted_literal: "'b'" - end_bracket: ) - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: inline_outer function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: array function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: struct function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'a'" - end_bracket: ) - comma: ',' - expression: function: function_name: function_name_identifier: struct function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '2' - comma: ',' - expression: quoted_literal: "'b'" - end_bracket: ) - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: test - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: test - dot: . - naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: inline function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: array function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: struct function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'a'" - end_bracket: ) - comma: ',' - expression: function: function_name: function_name_identifier: struct function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '2' - comma: ',' - expression: quoted_literal: "'b'" - end_bracket: ) - end_bracket: ) end_bracket: ) - keyword: AS - naked_identifier: c1 - comma: ',' - naked_identifier: c2 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: test - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: test - dot: . - naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: inline_outer function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: array function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: struct function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'a'" - end_bracket: ) - comma: ',' - expression: function: function_name: function_name_identifier: struct function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '2' - comma: ',' - expression: quoted_literal: "'b'" - end_bracket: ) - end_bracket: ) end_bracket: ) - keyword: AS - naked_identifier: c1 - comma: ',' - naked_identifier: c2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: posexplode function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: array function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '20' - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: posexplode_outer function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: array function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '20' - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: test - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: test - dot: . - naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: posexplode function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: array function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '20' - end_bracket: ) end_bracket: ) - keyword: AS - naked_identifier: c1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: test - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: test - dot: . - naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: posexplode_outer function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: array function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '20' - end_bracket: ) end_bracket: ) - keyword: AS - naked_identifier: c1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: stack function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: test - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: test - dot: . - naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: stack function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - keyword: AS - naked_identifier: c1 - comma: ',' - naked_identifier: c2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: json_tuple function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'{\"a\":1, \"b\":2}'" - comma: ',' - expression: quoted_literal: "'a'" - comma: ',' - expression: quoted_literal: "'b'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: test - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: test - dot: . - naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: json_tuple function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'{\"a\":1, \"b\":2}'" - comma: ',' - expression: quoted_literal: "'a'" - comma: ',' - expression: quoted_literal: "'b'" - end_bracket: ) - keyword: AS - naked_identifier: c1 - comma: ',' - naked_identifier: c2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: parse_url function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'http://spark.apache.org/path?query=1'" - comma: ',' - expression: quoted_literal: "'HOST'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: test - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: test - dot: . - naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: parse_url function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'http://spark.apache.org/path?query=1'" - comma: ',' - expression: quoted_literal: "'HOST'" - end_bracket: ) - keyword: AS - naked_identifier: c1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: a - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: b - dot: . - naked_identifier: col from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: range function_contents: bracketed: start_bracket: ( expression: numeric_literal: '10' end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: test lateral_view_clause: - comma: ',' - keyword: LATERAL - function: function_name: function_name_identifier: explode function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: array function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'a'" - comma: ',' - expression: quoted_literal: "'b'" - comma: ',' - expression: quoted_literal: "'c'" - end_bracket: ) end_bracket: ) - keyword: as - naked_identifier: b - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_like_clause.sql000066400000000000000000000017601503426445100261720ustar00rootroot00000000000000SELECT a, b FROM person WHERE name LIKE 'M%'; SELECT a, b FROM person WHERE name LIKE 'M_ry'; SELECT a, b FROM person WHERE name NOT LIKE 'M_ry'; SELECT a, b FROM person WHERE name RLIKE 'M+'; SELECT a, b FROM person WHERE name REGEXP 'M+'; SELECT a, b FROM person WHERE name LIKE '%\_%'; SELECT a, b FROM person WHERE name LIKE '%$_%' ESCAPE '$'; SELECT a, b FROM person WHERE name LIKE ALL ('%an%', '%an'); SELECT a, b FROM person WHERE name LIKE ANY ('%an%', '%an'); SELECT a, b FROM person WHERE name LIKE SOME ('%an%', '%an'); SELECT a, b FROM person WHERE name NOT LIKE ALL ('%an%', '%an'); SELECT a, b FROM person WHERE name NOT LIKE ANY ('%an%', '%an'); SELECT a, b FROM person WHERE name NOT LIKE SOME ('%an%', '%an'); SELECT company FROM ilike_all_table WHERE company ILIKE ALL ('%oO%', '%Go%'); SELECT company FROM ilike_any_table WHERE company ILIKE ANY ('%oo%', '%IN', 'fA%'); sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_like_clause.yml000066400000000000000000000275471503426445100262070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 28152351496d8b6ec3296cc1d052f6ee4f7cff10f090bee1e9e5a404ce4244ae file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: column_reference: naked_identifier: name keyword: LIKE quoted_literal: "'M%'" - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: column_reference: naked_identifier: name keyword: LIKE quoted_literal: "'M_ry'" - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: naked_identifier: name - keyword: NOT - keyword: LIKE - quoted_literal: "'M_ry'" - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: column_reference: naked_identifier: name keyword: RLIKE quoted_literal: "'M+'" - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: column_reference: naked_identifier: name keyword: REGEXP quoted_literal: "'M+'" - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: column_reference: naked_identifier: name keyword: LIKE quoted_literal: "'%\\_%'" - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: naked_identifier: name - keyword: LIKE - quoted_literal: "'%$_%'" - keyword: ESCAPE - quoted_literal: "'$'" - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: naked_identifier: name - keyword: LIKE - keyword: ALL - bracketed: - start_bracket: ( - quoted_literal: "'%an%'" - comma: ',' - quoted_literal: "'%an'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: naked_identifier: name - keyword: LIKE - keyword: ANY - bracketed: - start_bracket: ( - quoted_literal: "'%an%'" - comma: ',' - quoted_literal: "'%an'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: naked_identifier: name - keyword: LIKE - keyword: SOME - bracketed: - start_bracket: ( - quoted_literal: "'%an%'" - comma: ',' - quoted_literal: "'%an'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: naked_identifier: name - keyword: NOT - keyword: LIKE - keyword: ALL - bracketed: - start_bracket: ( - quoted_literal: "'%an%'" - comma: ',' - quoted_literal: "'%an'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: naked_identifier: name - keyword: NOT - keyword: LIKE - keyword: ANY - bracketed: - start_bracket: ( - quoted_literal: "'%an%'" - comma: ',' - quoted_literal: "'%an'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: naked_identifier: name - keyword: NOT - keyword: LIKE - keyword: SOME - bracketed: - start_bracket: ( - quoted_literal: "'%an%'" - comma: ',' - quoted_literal: "'%an'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: company from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: ilike_all_table where_clause: keyword: WHERE expression: - column_reference: naked_identifier: company - keyword: ILIKE - keyword: ALL - bracketed: - start_bracket: ( - quoted_literal: "'%oO%'" - comma: ',' - quoted_literal: "'%Go%'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: company from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: ilike_any_table where_clause: keyword: WHERE expression: - column_reference: naked_identifier: company - keyword: ILIKE - keyword: ANY - bracketed: - start_bracket: ( - quoted_literal: "'%oo%'" - comma: ',' - quoted_literal: "'%IN'" - comma: ',' - quoted_literal: "'fA%'" - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_limit_clause.sql000066400000000000000000000005131503426445100263570ustar00rootroot00000000000000-- Select the first two rows. SELECT name, age FROM person ORDER BY name LIMIT 2; -- Specifying ALL option on LIMIT returns all the rows. SELECT name, age FROM person ORDER BY name LIMIT ALL; -- A function expression as an input to LIMIT. SELECT name, age FROM person ORDER BY name LIMIT length('SPARK'); sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_limit_clause.yml000066400000000000000000000053061503426445100263660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5aeea32998b316c417a71f78e78062ae4255dcf05105df0168f70dfb8e87b604 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: name limit_clause: keyword: LIMIT numeric_literal: '2' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: name limit_clause: - keyword: LIMIT - keyword: ALL - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: name limit_clause: keyword: LIMIT function: function_name: function_name_identifier: length function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'SPARK'" end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_order_by.sql000066400000000000000000000013211503426445100255100ustar00rootroot00000000000000-- Sort rows SELECT name, age FROM person ORDER BY age; -- Sort rows in ascending manner keeping null values to be last. SELECT name, age FROM person ORDER BY age NULLS LAST; -- Sort rows in descending manner, which defaults to NULL LAST. SELECT name, age FROM person ORDER BY age DESC; -- Sort rows in ascending manner keeping null values to be first. SELECT name, age FROM person ORDER BY age DESC NULLS FIRST; -- Sort rows based on more than one column with each column having different -- sort direction. SELECT name, age FROM person ORDER BY name ASC, age DESC; -- Sort rows using complex expression. SELECT name, age FROM person ORDER BY SUM(age)/SUM(age) DESC; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_order_by.yml000066400000000000000000000120051503426445100255130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5e3ba161795728dd1b34a70b468257fd2a89c00185ffffbe3dec4eba2c98ef72 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: age - keyword: NULLS - keyword: LAST - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: age - keyword: DESC - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: age - keyword: DESC - keyword: NULLS - keyword: FIRST - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: name - keyword: ASC - comma: ',' - column_reference: naked_identifier: age - keyword: DESC - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person orderby_clause: - keyword: ORDER - keyword: BY - expression: - function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: age end_bracket: ) - binary_operator: / - function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: age end_bracket: ) - keyword: DESC - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_qualify.sql000066400000000000000000000024461503426445100253660ustar00rootroot00000000000000SELECT item, RANK() OVER (PARTITION BY category ORDER BY purchases DESC) AS rank FROM Produce QUALIFY rank <= 3; SELECT item, RANK() OVER (PARTITION BY category ORDER BY purchases DESC) AS rank FROM Produce WHERE Produce.category = 'vegetable' QUALIFY rank <= 3; SELECT item, RANK() OVER (PARTITION BY category ORDER BY purchases DESC) AS rank FROM Produce WHERE Produce.category = 'vegetable' QUALIFY rank <= 3 ORDER BY item; SELECT item, RANK() OVER (PARTITION BY category ORDER BY purchases DESC) AS rank FROM Produce WHERE Produce.category = 'vegetable' QUALIFY rank <= 3 LIMIT 5; SELECT item, RANK() OVER (PARTITION BY category ORDER BY purchases DESC) AS rank FROM Produce WHERE Produce.category = 'vegetable' QUALIFY rank <= 3 ORDER BY item LIMIT 5; SELECT item, RANK() OVER (PARTITION BY category ORDER BY purchases DESC) AS rank FROM Produce WHERE Produce.category = 'vegetable' QUALIFY rank <= 3 LIMIT 5; SELECT item, RANK() OVER (PARTITION BY category ORDER BY purchases DESC) AS rank FROM Produce WHERE Produce.category = 'vegetable' WINDOW item_window AS ( PARTITION BY category ORDER BY purchases ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING) QUALIFY rank <= 3 ORDER BY item; SELECT CURRENT_DATE() AS p_data_date QUALIFY ROW_NUMBER() OVER (ORDER BY p_data_date) = 1; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_qualify.yml000066400000000000000000000405411503426445100253660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 54077a4cd5d683769f8485f5a991ccef620eaf3df32299fa53348a05c16165df file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: function: function_name: function_name_identifier: RANK function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases - keyword: DESC end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: rank from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce qualify_clause: keyword: QUALIFY expression: column_reference: naked_identifier: rank comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '3' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: function: function_name: function_name_identifier: RANK function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases - keyword: DESC end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: rank from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce where_clause: keyword: WHERE expression: column_reference: - naked_identifier: Produce - dot: . - naked_identifier: category comparison_operator: raw_comparison_operator: '=' quoted_literal: "'vegetable'" qualify_clause: keyword: QUALIFY expression: column_reference: naked_identifier: rank comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '3' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: function: function_name: function_name_identifier: RANK function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases - keyword: DESC end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: rank from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce where_clause: keyword: WHERE expression: column_reference: - naked_identifier: Produce - dot: . - naked_identifier: category comparison_operator: raw_comparison_operator: '=' quoted_literal: "'vegetable'" qualify_clause: keyword: QUALIFY expression: column_reference: naked_identifier: rank comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '3' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: item - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: function: function_name: function_name_identifier: RANK function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases - keyword: DESC end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: rank from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce where_clause: keyword: WHERE expression: column_reference: - naked_identifier: Produce - dot: . - naked_identifier: category comparison_operator: raw_comparison_operator: '=' quoted_literal: "'vegetable'" qualify_clause: keyword: QUALIFY expression: column_reference: naked_identifier: rank comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '3' limit_clause: keyword: LIMIT numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: function: function_name: function_name_identifier: RANK function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases - keyword: DESC end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: rank from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce where_clause: keyword: WHERE expression: column_reference: - naked_identifier: Produce - dot: . - naked_identifier: category comparison_operator: raw_comparison_operator: '=' quoted_literal: "'vegetable'" qualify_clause: keyword: QUALIFY expression: column_reference: naked_identifier: rank comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '3' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: item limit_clause: keyword: LIMIT numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: function: function_name: function_name_identifier: RANK function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases - keyword: DESC end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: rank from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce where_clause: keyword: WHERE expression: column_reference: - naked_identifier: Produce - dot: . - naked_identifier: category comparison_operator: raw_comparison_operator: '=' quoted_literal: "'vegetable'" qualify_clause: keyword: QUALIFY expression: column_reference: naked_identifier: rank comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '3' limit_clause: keyword: LIMIT numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: function: function_name: function_name_identifier: RANK function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases - keyword: DESC end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: rank from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce where_clause: keyword: WHERE expression: column_reference: - naked_identifier: Produce - dot: . - naked_identifier: category comparison_operator: raw_comparison_operator: '=' quoted_literal: "'vegetable'" named_window: keyword: WINDOW named_window_expression: naked_identifier: item_window keyword: AS bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases frame_clause: - keyword: ROWS - keyword: BETWEEN - numeric_literal: '2' - keyword: PRECEDING - keyword: AND - numeric_literal: '2' - keyword: FOLLOWING end_bracket: ) qualify_clause: keyword: QUALIFY expression: column_reference: naked_identifier: rank comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '3' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: item - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CURRENT_DATE function_contents: bracketed: start_bracket: ( end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: p_data_date qualify_clause: keyword: QUALIFY expression: function: function_name: function_name_identifier: ROW_NUMBER function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: p_data_date end_bracket: ) comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_reduce.sql000066400000000000000000000005421503426445100251560ustar00rootroot00000000000000SELECT reduce(array(1, 2, 3), 0, (acc, x) -> acc + x); -- 6 SELECT reduce(array(1, 2, 3), 0, (acc, x) -> acc + x, acc -> acc * 10); -- 60 SELECT reduce(array(1, 2, 3, 4), -- 2.5 named_struct('sum', 0, 'cnt', 0), (acc, x) -> named_struct('sum', acc.sum + x, 'cnt', acc.cnt + 1), acc -> acc.sum / acc.cnt) AS avg; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_reduce.yml000066400000000000000000000176141503426445100251700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c328f82f79cf03d39b696d237969b1e89e30c62a360a7518b0abe1d6957da637 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: reduce function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: array function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - comma: ',' - expression: numeric_literal: '0' - comma: ',' - expression: - bracketed: - start_bracket: ( - column_reference: naked_identifier: acc - comma: ',' - column_reference: naked_identifier: x - end_bracket: ) - binary_operator: -> - column_reference: naked_identifier: acc - binary_operator: + - column_reference: naked_identifier: x - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: reduce function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: array function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - comma: ',' - expression: numeric_literal: '0' - comma: ',' - expression: - bracketed: - start_bracket: ( - column_reference: naked_identifier: acc - comma: ',' - column_reference: naked_identifier: x - end_bracket: ) - binary_operator: -> - column_reference: naked_identifier: acc - binary_operator: + - column_reference: naked_identifier: x - comma: ',' - expression: - column_reference: naked_identifier: acc - binary_operator: -> - column_reference: naked_identifier: acc - binary_operator: '*' - numeric_literal: '10' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: reduce function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: array function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - comma: ',' - expression: function: function_name: function_name_identifier: named_struct function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'sum'" - comma: ',' - expression: numeric_literal: '0' - comma: ',' - expression: quoted_literal: "'cnt'" - comma: ',' - expression: numeric_literal: '0' - end_bracket: ) - comma: ',' - expression: bracketed: - start_bracket: ( - column_reference: naked_identifier: acc - comma: ',' - column_reference: naked_identifier: x - end_bracket: ) binary_operator: -> function: function_name: function_name_identifier: named_struct function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'sum'" - comma: ',' - expression: - column_reference: - naked_identifier: acc - dot: . - naked_identifier: sum - binary_operator: + - column_reference: naked_identifier: x - comma: ',' - expression: quoted_literal: "'cnt'" - comma: ',' - expression: column_reference: - naked_identifier: acc - dot: . - naked_identifier: cnt binary_operator: + numeric_literal: '1' - end_bracket: ) - comma: ',' - expression: - column_reference: naked_identifier: acc - binary_operator: -> - column_reference: - naked_identifier: acc - dot: . - naked_identifier: sum - binary_operator: / - column_reference: - naked_identifier: acc - dot: . - naked_identifier: cnt - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: avg - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_set_operators.sql000066400000000000000000000013331503426445100265770ustar00rootroot00000000000000-- EXCEPT SELECT c FROM number1 EXCEPT SELECT c FROM number2; -- EXCEPT ALL SELECT c FROM number1 EXCEPT ALL (SELECT c FROM number2); -- MINUS SELECT c FROM number1 MINUS SELECT c FROM number2; -- MINUS ALL SELECT c FROM number1 MINUS ALL (SELECT c FROM number2); -- INTERSECT (SELECT c FROM number1) INTERSECT (SELECT c FROM number2); -- INTERSECT DISTINCT (SELECT c FROM number1) INTERSECT DISTINCT (SELECT c FROM number2); -- INTERSECT ALL (SELECT c FROM number1) INTERSECT ALL (SELECT c FROM number2); -- UNION (SELECT c FROM number1) UNION (SELECT c FROM number2); -- UNION DISTINCT (SELECT c FROM number1) UNION DISTINCT (SELECT c FROM number2); -- UNION ALL SELECT c FROM number1 UNION ALL (SELECT c FROM number2); sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_set_operators.yml000066400000000000000000000244131503426445100266050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 49ae4b69f8e87602726882b414a050071d3a47819eb47ff5449a03aa28bcdebf file: - statement: set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number1 - set_operator: keyword: EXCEPT - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number2 - statement_terminator: ; - statement: set_expression: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number1 set_operator: - keyword: EXCEPT - keyword: ALL bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number2 end_bracket: ) - statement_terminator: ; - statement: set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number1 - set_operator: keyword: MINUS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number2 - statement_terminator: ; - statement: set_expression: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number1 set_operator: - keyword: MINUS - keyword: ALL bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number2 end_bracket: ) - statement_terminator: ; - statement: set_expression: - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number1 end_bracket: ) - set_operator: keyword: INTERSECT - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number2 end_bracket: ) - statement_terminator: ; - statement: set_expression: - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number1 end_bracket: ) - set_operator: - keyword: INTERSECT - keyword: DISTINCT - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number2 end_bracket: ) - statement_terminator: ; - statement: set_expression: - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number1 end_bracket: ) - set_operator: - keyword: INTERSECT - keyword: ALL - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number2 end_bracket: ) - statement_terminator: ; - statement: set_expression: - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number1 end_bracket: ) - set_operator: keyword: UNION - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number2 end_bracket: ) - statement_terminator: ; - statement: set_expression: - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number1 end_bracket: ) - set_operator: - keyword: UNION - keyword: DISTINCT - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number2 end_bracket: ) - statement_terminator: ; - statement: set_expression: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number1 set_operator: - keyword: UNION - keyword: ALL bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number2 end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_sort_by.sql000066400000000000000000000037631503426445100254000ustar00rootroot00000000000000-- Sort rows within each partition in ascending manner SELECT /*+ REPARTITION(zip_code) */ name, age, zip_code FROM person SORT BY name; SELECT name, age, zip_code FROM person SORT BY name; -- Sort rows within each partition using column position. SELECT /*+ REPARTITION(zip_code) */ name, age, zip_code FROM person SORT BY 1; SELECT name, age, zip_code FROM person SORT BY 1; -- Sort rows within partition in ascending -- manner keeping null values to be last. SELECT /*+ REPARTITION(zip_code) */ age, name, zip_code FROM person SORT BY age NULLS LAST; SELECT age, name, zip_code FROM person SORT BY age NULLS LAST; -- Sort rows by age within each partition in -- descending manner, which defaults to NULL LAST. SELECT /*+ REPARTITION(zip_code) */ age, name, zip_code FROM person SORT BY age DESC; SELECT age, name, zip_code FROM person SORT BY age DESC; -- Sort rows by age within each partition in -- descending manner keeping null values to be first. SELECT /*+ REPARTITION(zip_code) */ age, name, zip_code FROM person SORT BY age DESC NULLS FIRST; SELECT age, name, zip_code FROM person SORT BY age DESC NULLS FIRST; -- Sort rows within each partition based on more -- than one column with each column having different -- sort direction. SELECT /*+ REPARTITION(zip_code) */ name, age, zip_code FROM person SORT BY name ASC, age DESC; SELECT name, age, zip_code FROM person SORT BY name ASC, age DESC; -- Sort rows within each partition based on result of a function. SELECT age, name FROM person SORT BY LEFT(SUBSTRING_INDEX(name, ' ', -1), 1); SELECT age, name FROM person WHERE age <= 100 SORT BY age; SELECT age, name FROM person GROUP BY age SORT BY age; SELECT age, name FROM person GROUP BY age HAVING COUNT(age) > 1 SORT BY age; SELECT CURRENT_DATE() AS p_data_date SORT BY p_data_date; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_sort_by.yml000066400000000000000000000427351503426445100254040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cf84a712a11651efaf5033bd0a38ff96cacf0de17df4472a9a6922c540764df5 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: REPARTITION function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: zip_code end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: zip_code from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person sort_by_clause: - keyword: SORT - keyword: BY - column_reference: naked_identifier: name - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: zip_code from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person sort_by_clause: - keyword: SORT - keyword: BY - column_reference: naked_identifier: name - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: REPARTITION function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: zip_code end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: zip_code from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person sort_by_clause: - keyword: SORT - keyword: BY - numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: zip_code from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person sort_by_clause: - keyword: SORT - keyword: BY - numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: REPARTITION function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: zip_code end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: zip_code from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person sort_by_clause: - keyword: SORT - keyword: BY - column_reference: naked_identifier: age - keyword: NULLS - keyword: LAST - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: zip_code from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person sort_by_clause: - keyword: SORT - keyword: BY - column_reference: naked_identifier: age - keyword: NULLS - keyword: LAST - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: REPARTITION function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: zip_code end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: zip_code from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person sort_by_clause: - keyword: SORT - keyword: BY - column_reference: naked_identifier: age - keyword: DESC - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: zip_code from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person sort_by_clause: - keyword: SORT - keyword: BY - column_reference: naked_identifier: age - keyword: DESC - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: REPARTITION function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: zip_code end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: zip_code from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person sort_by_clause: - keyword: SORT - keyword: BY - column_reference: naked_identifier: age - keyword: DESC - keyword: NULLS - keyword: FIRST - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: zip_code from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person sort_by_clause: - keyword: SORT - keyword: BY - column_reference: naked_identifier: age - keyword: DESC - keyword: NULLS - keyword: FIRST - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: REPARTITION function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: zip_code end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: zip_code from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person sort_by_clause: - keyword: SORT - keyword: BY - column_reference: naked_identifier: name - keyword: ASC - comma: ',' - column_reference: naked_identifier: age - keyword: DESC - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: zip_code from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person sort_by_clause: - keyword: SORT - keyword: BY - column_reference: naked_identifier: name - keyword: ASC - comma: ',' - column_reference: naked_identifier: age - keyword: DESC - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person sort_by_clause: - keyword: SORT - keyword: BY - expression: function: function_name: function_name_identifier: LEFT function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: SUBSTRING_INDEX function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: name - comma: ',' - expression: quoted_literal: "' '" - comma: ',' - expression: numeric_literal: sign_indicator: '-' numeric_literal: '1' - end_bracket: ) - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: column_reference: naked_identifier: age comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '100' sort_by_clause: - keyword: SORT - keyword: BY - column_reference: naked_identifier: age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: age sort_by_clause: - keyword: SORT - keyword: BY - column_reference: naked_identifier: age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: age having_clause: keyword: HAVING expression: function: function_name: function_name_identifier: COUNT function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: age end_bracket: ) comparison_operator: raw_comparison_operator: '>' numeric_literal: '1' sort_by_clause: - keyword: SORT - keyword: BY - column_reference: naked_identifier: age - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CURRENT_DATE function_contents: bracketed: start_bracket: ( end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: p_data_date sort_by_clause: - keyword: SORT - keyword: BY - column_reference: naked_identifier: p_data_date - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_star_except.sql000066400000000000000000000003441503426445100262300ustar00rootroot00000000000000select * except (col) from table_name where row_no = 1; select * except (col) from table_name where row_no = 1; select * except (col1, col2, col3, col4, col5) from table_name where row_no = 1; select a.* except (a.b) from a; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_star_except.yml000066400000000000000000000102171503426445100262320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: be4429aa76f902654b502847e39a9679f51e7a8dd4a0e622d75ab8486962b5c6 file: - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_except_clause: keyword: except bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name where_clause: keyword: where expression: column_reference: naked_identifier: row_no comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_except_clause: keyword: except bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name where_clause: keyword: where expression: column_reference: naked_identifier: row_no comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_except_clause: keyword: except bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - comma: ',' - column_reference: naked_identifier: col3 - comma: ',' - column_reference: naked_identifier: col4 - comma: ',' - column_reference: naked_identifier: col5 - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name where_clause: keyword: where expression: column_reference: naked_identifier: row_no comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: a dot: . star: '*' select_except_clause: keyword: except bracketed: start_bracket: ( column_reference: - naked_identifier: a - dot: . - naked_identifier: b end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: a - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_star_in_multiparameter_function.sql000066400000000000000000000001261503426445100323640ustar00rootroot00000000000000SELECT my_function(*, col2) FROM my_table; SELECT my_function(col1, *) FROM my_table; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_star_in_multiparameter_function.yml000066400000000000000000000034541503426445100323750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c43460c3d5e45879e576e2e6530d83b42a6963d60cacaf7748ade75a6cee21bc file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: my_function function_contents: bracketed: start_bracket: ( star: '*' comma: ',' expression: column_reference: naked_identifier: col2 end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: my_function function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: col1 comma: ',' star: '*' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_tablesample.sql000066400000000000000000000012241503426445100261760ustar00rootroot00000000000000SELECT a, b FROM test TABLESAMPLE(50 PERCENT); SELECT t.a, t.b FROM test TABLESAMPLE(50 PERCENT) t; SELECT t.a, t.b FROM test TABLESAMPLE(50 PERCENT) AS t; SELECT a, b FROM test TABLESAMPLE(5 ROWS); SELECT a, b FROM test TABLESAMPLE(BUCKET 4 OUT OF 10); SELECT test_1.a, test_1.b FROM test_1 TABLESAMPLE(5 ROWS) NATURAL JOIN test_2 TABLESAMPLE(BUCKET 4 OUT OF 10); SELECT t1.a, t2.b FROM test_1 TABLESAMPLE(5 ROWS) t1 NATURAL JOIN test_2 TABLESAMPLE(BUCKET 4 OUT OF 10) t2; SELECT t1.a, t2.b FROM test_1 TABLESAMPLE(5 ROWS) AS t1 NATURAL JOIN test_2 TABLESAMPLE(BUCKET 4 OUT OF 10) AS t2; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_tablesample.yml000066400000000000000000000216551503426445100262120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f09761e889c886491b737cfa6a4c0ae9b099fe041dd26de40095bb8d6119924f file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test sample_expression: keyword: TABLESAMPLE bracketed: start_bracket: ( numeric_literal: '50' keyword: PERCENT end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test sample_expression: keyword: TABLESAMPLE bracketed: start_bracket: ( numeric_literal: '50' keyword: PERCENT end_bracket: ) alias_expression: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test sample_expression: keyword: TABLESAMPLE bracketed: start_bracket: ( numeric_literal: '50' keyword: PERCENT end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test sample_expression: keyword: TABLESAMPLE bracketed: start_bracket: ( numeric_literal: '5' keyword: ROWS end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test sample_expression: keyword: TABLESAMPLE bracketed: - start_bracket: ( - keyword: BUCKET - numeric_literal: '4' - keyword: OUT - keyword: OF - numeric_literal: '10' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: test_1 - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: test_1 - dot: . - naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_1 sample_expression: keyword: TABLESAMPLE bracketed: start_bracket: ( numeric_literal: '5' keyword: ROWS end_bracket: ) join_clause: - keyword: NATURAL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test_2 sample_expression: keyword: TABLESAMPLE bracketed: - start_bracket: ( - keyword: BUCKET - numeric_literal: '4' - keyword: OUT - keyword: OF - numeric_literal: '10' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_1 sample_expression: keyword: TABLESAMPLE bracketed: start_bracket: ( numeric_literal: '5' keyword: ROWS end_bracket: ) alias_expression: naked_identifier: t1 join_clause: - keyword: NATURAL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test_2 sample_expression: keyword: TABLESAMPLE bracketed: - start_bracket: ( - keyword: BUCKET - numeric_literal: '4' - keyword: OUT - keyword: OF - numeric_literal: '10' - end_bracket: ) alias_expression: naked_identifier: t2 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_1 sample_expression: keyword: TABLESAMPLE bracketed: start_bracket: ( numeric_literal: '5' keyword: ROWS end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: t1 join_clause: - keyword: NATURAL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test_2 sample_expression: keyword: TABLESAMPLE bracketed: - start_bracket: ( - keyword: BUCKET - numeric_literal: '4' - keyword: OUT - keyword: OF - numeric_literal: '10' - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: t2 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_transform_clause.sql000066400000000000000000000022211503426445100272520ustar00rootroot00000000000000-- With specified output without data type SELECT TRANSFORM (zip_code, name, age) USING 'cat' AS (a, b, c) FROM person WHERE zip_code > 94511; -- With specified output with data type SELECT TRANSFORM(zip_code, name, age) USING 'cat' AS (a string, b string, c string) FROM person WHERE zip_code > 94511; -- Using ROW FORMAT DELIMITED SELECT TRANSFORM(name, age) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' LINES TERMINATED BY '\n' NULL DEFINED AS 'NULL' USING 'cat' AS (name_age string) ROW FORMAT DELIMITED FIELDS TERMINATED BY '@' LINES TERMINATED BY '\n' NULL DEFINED AS 'NULL' FROM person; -- Using Hive Serde SELECT TRANSFORM(zip_code, name, age) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' WITH SERDEPROPERTIES ( 'field.delim' = '\t' ) USING 'cat' AS (a string, b string, c string) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' WITH SERDEPROPERTIES ( 'field.delim' = '\t' ) FROM person WHERE zip_code > 94511; -- Schema-less mode SELECT TRANSFORM(zip_code, name, age) USING 'cat' FROM person WHERE zip_code > 94500; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_transform_clause.yml000066400000000000000000000170241503426445100272630ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7d7b9702f08ab837c631adcbeadb71b03f4cec3bb98cf3b22314abf81532de51 file: - statement: select_statement: select_clause: keyword: SELECT transform_clause: - keyword: TRANSFORM - bracketed: - start_bracket: ( - naked_identifier: zip_code - comma: ',' - naked_identifier: name - comma: ',' - naked_identifier: age - end_bracket: ) - keyword: USING - quoted_literal: "'cat'" - keyword: AS - bracketed: - start_bracket: ( - naked_identifier: a - comma: ',' - naked_identifier: b - comma: ',' - naked_identifier: c - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: column_reference: naked_identifier: zip_code comparison_operator: raw_comparison_operator: '>' numeric_literal: '94511' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT transform_clause: - keyword: TRANSFORM - bracketed: - start_bracket: ( - naked_identifier: zip_code - comma: ',' - naked_identifier: name - comma: ',' - naked_identifier: age - end_bracket: ) - keyword: USING - quoted_literal: "'cat'" - keyword: AS - bracketed: - start_bracket: ( - naked_identifier: a - naked_identifier: string - comma: ',' - naked_identifier: b - naked_identifier: string - comma: ',' - naked_identifier: c - naked_identifier: string - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: column_reference: naked_identifier: zip_code comparison_operator: raw_comparison_operator: '>' numeric_literal: '94511' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT transform_clause: - keyword: TRANSFORM - bracketed: - start_bracket: ( - naked_identifier: name - comma: ',' - naked_identifier: age - end_bracket: ) - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: DELIMITED - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "','" - keyword: LINES - keyword: TERMINATED - keyword: BY - quoted_literal: "'\\n'" - keyword: 'NULL' - keyword: DEFINED - keyword: AS - quoted_literal: "'NULL'" - keyword: USING - quoted_literal: "'cat'" - keyword: AS - bracketed: - start_bracket: ( - naked_identifier: name_age - naked_identifier: string - end_bracket: ) - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: DELIMITED - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "'@'" - keyword: LINES - keyword: TERMINATED - keyword: BY - quoted_literal: "'\\n'" - keyword: 'NULL' - keyword: DEFINED - keyword: AS - quoted_literal: "'NULL'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT transform_clause: - keyword: TRANSFORM - bracketed: - start_bracket: ( - naked_identifier: zip_code - comma: ',' - naked_identifier: name - comma: ',' - naked_identifier: age - end_bracket: ) - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: SERDE - quoted_literal: "'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'" - keyword: WITH - keyword: SERDEPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'field.delim'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'\\t'" end_bracket: ) - keyword: USING - quoted_literal: "'cat'" - keyword: AS - bracketed: - start_bracket: ( - naked_identifier: a - naked_identifier: string - comma: ',' - naked_identifier: b - naked_identifier: string - comma: ',' - naked_identifier: c - naked_identifier: string - end_bracket: ) - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: SERDE - quoted_literal: "'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'" - keyword: WITH - keyword: SERDEPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'field.delim'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'\\t'" end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: column_reference: naked_identifier: zip_code comparison_operator: raw_comparison_operator: '>' numeric_literal: '94511' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT transform_clause: - keyword: TRANSFORM - bracketed: - start_bracket: ( - naked_identifier: zip_code - comma: ',' - naked_identifier: name - comma: ',' - naked_identifier: age - end_bracket: ) - keyword: USING - quoted_literal: "'cat'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: column_reference: naked_identifier: zip_code comparison_operator: raw_comparison_operator: '>' numeric_literal: '94500' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_union.sql000066400000000000000000000000431503426445100250330ustar00rootroot00000000000000SELECT 'a' AS col UNION SELECT 'b' sqlfluff-3.4.2/test/fixtures/dialects/sparksql/select_union.yml000066400000000000000000000015521503426445100250430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2cf4a25f87e17c61fce9178ed34791bacbdd8ff171d991253423825eb591fa5c file: statement: set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'a'" alias_expression: alias_operator: keyword: AS naked_identifier: col - set_operator: keyword: UNION - select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'b'" sqlfluff-3.4.2/test/fixtures/dialects/sparksql/set.sql000066400000000000000000000003041503426445100231370ustar00rootroot00000000000000SET spark.sql.variable.substitute = FALSE; SET -v; SET; SET spark.sql.variable.substitute; SET spark.sql.cache.serializer=org.apache.spark.sql.execution.columnar.DefaultCachedBatchSerializer; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/set.yml000066400000000000000000000041601503426445100231450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1d054a4501b36dd0006dc6e682760051d612bf5ad29ee4f907259886819b57d5 file: - statement: set_statement: keyword: SET property_name_identifier: - properties_naked_identifier: spark - dot: . - properties_naked_identifier: sql - dot: . - properties_naked_identifier: variable - dot: . - properties_naked_identifier: substitute comparison_operator: raw_comparison_operator: '=' boolean_literal: 'FALSE' - statement_terminator: ; - statement: set_statement: keyword: SET sql_conf_option: dash: '-' sql_conf_option: v - statement_terminator: ; - statement: set_statement: keyword: SET - statement_terminator: ; - statement: set_statement: keyword: SET property_name_identifier: - properties_naked_identifier: spark - dot: . - properties_naked_identifier: sql - dot: . - properties_naked_identifier: variable - dot: . - properties_naked_identifier: substitute - statement_terminator: ; - statement: set_statement: - keyword: SET - property_name_identifier: - properties_naked_identifier: spark - dot: . - properties_naked_identifier: sql - dot: . - properties_naked_identifier: cache - dot: . - properties_naked_identifier: serializer - comparison_operator: raw_comparison_operator: '=' - properties_naked_identifier: org - dot: . - properties_naked_identifier: apache - dot: . - properties_naked_identifier: spark - dot: . - properties_naked_identifier: sql - dot: . - properties_naked_identifier: execution - dot: . - properties_naked_identifier: columnar - dot: . - properties_naked_identifier: DefaultCachedBatchSerializer - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/set_variable.sql000066400000000000000000000005341503426445100250110ustar00rootroot00000000000000-- simple assignment SET VAR var1 = 5; -- A complex expression assignment SET VARIABLE var1 = (SELECT max(c1) FROM VALUES(1), (2) AS t(c1)); -- resetting the variable to DEFAULT (set in declare) SET VAR var1 = DEFAULT; -- A multi variable assignment SET VAR (var1, var2) = (SELECT max(c1), CAST(min(c1) AS STRING) FROM VALUES(1), (2) AS t(c1)); sqlfluff-3.4.2/test/fixtures/dialects/sparksql/set_variable.yml000066400000000000000000000127031503426445100250140ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 71c02a8f9b68d177148d660c2d288429ae80793a049057e9a4d06d2710ab68a2 file: - statement: set_variable_statement: - keyword: SET - keyword: VAR - naked_identifier: var1 - comparison_operator: raw_comparison_operator: '=' - expression: numeric_literal: '5' - statement_terminator: ; - statement: set_variable_statement: - keyword: SET - keyword: VARIABLE - naked_identifier: var1 - comparison_operator: raw_comparison_operator: '=' - expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: max function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: c1 end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: values_clause: - keyword: VALUES - bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( expression: numeric_literal: '2' end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: t bracketed: start_bracket: ( identifier_list: naked_identifier: c1 end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: set_variable_statement: - keyword: SET - keyword: VAR - naked_identifier: var1 - comparison_operator: raw_comparison_operator: '=' - keyword: DEFAULT - statement_terminator: ; - statement: set_variable_statement: - keyword: SET - keyword: VAR - bracketed: - start_bracket: ( - naked_identifier: var1 - comma: ',' - naked_identifier: var2 - end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: max function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: c1 end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: min function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: c1 end_bracket: ) keyword: AS data_type: primitive_type: keyword: STRING end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: values_clause: - keyword: VALUES - bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( expression: numeric_literal: '2' end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: t bracketed: start_bracket: ( identifier_list: naked_identifier: c1 end_bracket: ) end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/show_columns.sql000066400000000000000000000004351503426445100250710ustar00rootroot00000000000000-- List the columns of `customer` table in current database. SHOW COLUMNS IN customer; -- List the columns of `customer` table in `salesdb` database. SHOW COLUMNS IN salesdb.customer; -- List the columns of `customer` table in `salesdb` database SHOW COLUMNS IN customer IN salesdb; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/show_columns.yml000066400000000000000000000021671503426445100250770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 67e5fe11f6cfbc707a143431f2a1c59578833cbd0d59aa06f589f92c4e413f59 file: - statement: show_statement: - keyword: SHOW - keyword: COLUMNS - keyword: IN - table_expression: table_reference: naked_identifier: customer - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: COLUMNS - keyword: IN - table_expression: table_reference: - naked_identifier: salesdb - dot: . - naked_identifier: customer - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: COLUMNS - keyword: IN - table_expression: table_reference: naked_identifier: customer - keyword: IN - database_reference: naked_identifier: salesdb - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/show_create_table.sql000066400000000000000000000001471503426445100260230ustar00rootroot00000000000000SHOW CREATE TABLE test; --Generates Hive DDL for a Hive SerDe table. SHOW CREATE TABLE test AS SERDE; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/show_create_table.yml000066400000000000000000000015111503426445100260210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3f18bea7eabb52edaab7ee8bb25937de657a9f286cabda424e1a61b9642ade00 file: - statement: show_statement: - keyword: SHOW - keyword: CREATE - keyword: TABLE - table_expression: table_reference: naked_identifier: test - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: CREATE - keyword: TABLE - table_expression: table_reference: naked_identifier: test - keyword: AS - keyword: SERDE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/show_databases.sql000066400000000000000000000003441503426445100253370ustar00rootroot00000000000000-- Lists all the databases. SHOW DATABASES; -- Lists databases with name starting with string pattern `pay` SHOW DATABASES LIKE 'pay*'; -- Lists all databases. Keywords SCHEMAS and DATABASES are interchangeable. SHOW SCHEMAS; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/show_databases.yml000066400000000000000000000013631503426445100253430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0131d2ffd9cbd02a9a79f34d740d5817f34d6cd55c6b4ea3c8640dcd3a9f4f7c file: - statement: show_statement: - keyword: SHOW - keyword: DATABASES - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: DATABASES - keyword: LIKE - quoted_literal: "'pay*'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: SCHEMAS - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/show_functions.sql000066400000000000000000000013731503426445100254230ustar00rootroot00000000000000-- List a system function `trim` by searching both user defined and system -- defined functions. SHOW FUNCTIONS trim; SHOW ALL FUNCTIONS trim; -- List a system function `concat` by searching system defined functions. SHOW SYSTEM FUNCTIONS concat; -- List a user function `concat_user` by searching user defined functions. SHOW USER FUNCTIONS concat_user; -- List a qualified function `max` from database `salesdb`. SHOW SYSTEM FUNCTIONS salesdb.max; -- List all functions starting with `t` SHOW FUNCTIONS LIKE 't*'; -- List all functions starting with `yea` or `windo` SHOW FUNCTIONS LIKE 'yea*|windo*'; -- Use normal regex pattern to list function names that has 4 characters -- with `t` as the starting character. SHOW FUNCTIONS LIKE 't[a-z][a-z][a-z]'; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/show_functions.yml000066400000000000000000000034541503426445100254270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6400a9136975e904445e94cd5d0c834004f6bcfe7bce3a9ee4d14e7b482fcdad file: - statement: show_statement: - keyword: SHOW - keyword: FUNCTIONS - function_name: function_name_identifier: trim - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: ALL - keyword: FUNCTIONS - function_name: function_name_identifier: trim - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: SYSTEM - keyword: FUNCTIONS - function_name: function_name_identifier: concat - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: USER - keyword: FUNCTIONS - function_name: function_name_identifier: concat_user - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: SYSTEM - keyword: FUNCTIONS - function_name: naked_identifier: salesdb dot: . function_name_identifier: max - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: FUNCTIONS - keyword: LIKE - quoted_literal: "'t*'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: FUNCTIONS - keyword: LIKE - quoted_literal: "'yea*|windo*'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: FUNCTIONS - keyword: LIKE - quoted_literal: "'t[a-z][a-z][a-z]'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/show_partitions.sql000066400000000000000000000010151503426445100256000ustar00rootroot00000000000000-- Lists all partitions for table `customer` SHOW PARTITIONS customer; -- Lists all partitions for the qualified table `customer` SHOW PARTITIONS salesdb.customer; -- Specify a full partition spec to list specific partition SHOW PARTITIONS customer PARTITION (state = 'CA', city = 'Fremont'); -- Specify a partial partition spec to list the specific partitions SHOW PARTITIONS customer PARTITION (state = 'CA'); -- Specify a partial spec to list specific partition SHOW PARTITIONS customer PARTITION (city = 'San Jose'); sqlfluff-3.4.2/test/fixtures/dialects/sparksql/show_partitions.yml000066400000000000000000000042201503426445100256030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1851a55396f3a7398589408a2778f7dbb75d3f43117d2da7315b73dc81e6f712 file: - statement: show_statement: - keyword: SHOW - keyword: PARTITIONS - table_reference: naked_identifier: customer - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: PARTITIONS - table_reference: - naked_identifier: salesdb - dot: . - naked_identifier: customer - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: PARTITIONS - table_reference: naked_identifier: customer - keyword: PARTITION - bracketed: - start_bracket: ( - column_reference: naked_identifier: state - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'CA'" - comma: ',' - column_reference: naked_identifier: city - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'Fremont'" - end_bracket: ) - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: PARTITIONS - table_reference: naked_identifier: customer - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: state comparison_operator: raw_comparison_operator: '=' quoted_literal: "'CA'" end_bracket: ) - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: PARTITIONS - table_reference: naked_identifier: customer - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: city comparison_operator: raw_comparison_operator: '=' quoted_literal: "'San Jose'" end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/show_table_extended.sql000066400000000000000000000006251503426445100263610ustar00rootroot00000000000000-- Show the details of the table SHOW TABLE EXTENDED LIKE 'employee'; -- showing the multiple table details with pattern matching SHOW TABLE EXTENDED LIKE 'employe*'; -- show partition file system details SHOW TABLE EXTENDED IN default LIKE 'employee' PARTITION (grade = 1); -- show partition file system details with pattern matching SHOW TABLE EXTENDED IN default LIKE 'empl*' PARTITION (grade = 1); sqlfluff-3.4.2/test/fixtures/dialects/sparksql/show_table_extended.yml000066400000000000000000000033351503426445100263640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5a84033fcdc2271279fbfa806416330e3ac8fed747f8b7aa5dce7525a93c9d69 file: - statement: show_statement: - keyword: SHOW - keyword: TABLE - keyword: EXTENDED - keyword: LIKE - quoted_literal: "'employee'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TABLE - keyword: EXTENDED - keyword: LIKE - quoted_literal: "'employe*'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TABLE - keyword: EXTENDED - keyword: IN - database_reference: naked_identifier: default - keyword: LIKE - quoted_literal: "'employee'" - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: grade comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TABLE - keyword: EXTENDED - keyword: IN - database_reference: naked_identifier: default - keyword: LIKE - quoted_literal: "'empl*'" - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: grade comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/show_tables.sql000066400000000000000000000005611503426445100246630ustar00rootroot00000000000000-- List all tables in default database SHOW TABLES; -- List all tables from userdb database SHOW TABLES FROM userdb; -- List all tables in userdb database SHOW TABLES IN userdb; -- List all tables from default database matching the pattern `sam*` SHOW TABLES FROM default LIKE 'sam*'; -- List all tables matching the pattern `sam*|suj` SHOW TABLES LIKE 'sam*|suj'; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/show_tables.yml000066400000000000000000000023311503426445100246620ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7fec471a6c2d583df29ff3c1a5c3b174ce067b0df284ce6f21a055135ee2d8b5 file: - statement: show_statement: - keyword: SHOW - keyword: TABLES - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TABLES - keyword: FROM - database_reference: naked_identifier: userdb - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TABLES - keyword: IN - database_reference: naked_identifier: userdb - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TABLES - keyword: FROM - database_reference: naked_identifier: default - keyword: LIKE - quoted_literal: "'sam*'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TABLES - keyword: LIKE - quoted_literal: "'sam*|suj'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/show_tblproperties.sql000066400000000000000000000007061503426445100263100ustar00rootroot00000000000000-- show all the user specified properties for table `customer` SHOW TBLPROPERTIES customer; -- show all the user specified properties for a qualified table `customer` -- in database `salesdb` SHOW TBLPROPERTIES salesdb.customer; -- show value for unquoted property key `created.by.user` SHOW TBLPROPERTIES customer (created.by.user); -- show value for property `created.date`` specified as string literal SHOW TBLPROPERTIES customer ('created.date'); sqlfluff-3.4.2/test/fixtures/dialects/sparksql/show_tblproperties.yml000066400000000000000000000027561503426445100263210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a97829e09da9209cb61bc53da897c3519f4ff40c8508b9984c2b464343e6077d file: - statement: show_statement: - keyword: SHOW - keyword: TBLPROPERTIES - table_reference: naked_identifier: customer - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TBLPROPERTIES - table_reference: - naked_identifier: salesdb - dot: . - naked_identifier: customer - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TBLPROPERTIES - table_reference: naked_identifier: customer - bracketed: start_bracket: ( property_name_identifier: - properties_naked_identifier: created - dot: . - properties_naked_identifier: by - dot: . - properties_naked_identifier: user end_bracket: ) - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TBLPROPERTIES - table_reference: naked_identifier: customer - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'created.date'" end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/show_views.sql000066400000000000000000000006371503426445100245520ustar00rootroot00000000000000-- List all views in default database SHOW VIEWS; -- List all views from userdb database SHOW VIEWS FROM userdb; -- List all views in global temp view database SHOW VIEWS IN global_temp; -- List all views from default database matching the pattern `sam*` SHOW VIEWS FROM default LIKE 'sam*'; -- List all views from the current database -- matching the pattern `sam|suj|temp*` SHOW VIEWS LIKE 'sam|suj|temp*'; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/show_views.yml000066400000000000000000000023361503426445100245520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c6041bda03c717e343e3b381325cdd57376956c82eaaa6d0440587b3a701524b file: - statement: show_statement: - keyword: SHOW - keyword: VIEWS - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: VIEWS - keyword: FROM - database_reference: naked_identifier: userdb - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: VIEWS - keyword: IN - database_reference: naked_identifier: global_temp - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: VIEWS - keyword: FROM - database_reference: naked_identifier: default - keyword: LIKE - quoted_literal: "'sam*'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: VIEWS - keyword: LIKE - quoted_literal: "'sam|suj|temp*'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/structure_accessor.sql000066400000000000000000000002251503426445100262700ustar00rootroot00000000000000select struct_column.inner_array[0].foo as inner_array__foo, try_element_at(struct_column.inner_array, 1).foo as inner_array__foo2 from src; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/structure_accessor.yml000066400000000000000000000041011503426445100262670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4b8b7eed83086d3ed49b5bce7e2fd96d5aad2b385fd2aa71c7e17f835fe86511 file: statement: select_statement: select_clause: - keyword: select - select_clause_element: expression: column_reference: - naked_identifier: struct_column - dot: . - naked_identifier: inner_array array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' semi_structured_expression: dot: . semi_structured_element: foo alias_expression: alias_operator: keyword: as naked_identifier: inner_array__foo - comma: ',' - select_clause_element: expression: function: function_name: function_name_identifier: try_element_at function_contents: bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: struct_column - dot: . - naked_identifier: inner_array - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) semi_structured_expression: dot: . semi_structured_element: foo alias_expression: alias_operator: keyword: as naked_identifier: inner_array__foo2 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: src statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/table_alias.sql000066400000000000000000000003261503426445100246100ustar00rootroot00000000000000select * from u as t ( a, b ); select * from u as t (a, b); select * from u as t(a,b); select * from u as (a,b); select * from u t ( a, b ); select * from u t (a, b); select * from u t(a,b); select * from u (a,b); sqlfluff-3.4.2/test/fixtures/dialects/sparksql/table_alias.yml000066400000000000000000000144561503426445100246230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 42f347aec9a9663f00ae7ca2021a7bcae7c70d010fa83954e8bb1ea981d0b279 file: - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: u alias_expression: alias_operator: keyword: as naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: u alias_expression: alias_operator: keyword: as naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: u alias_expression: alias_operator: keyword: as naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: u alias_expression: alias_operator: keyword: as bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: u alias_expression: naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: u alias_expression: naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: u alias_expression: naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: u function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: a - comma: ',' - expression: column_reference: naked_identifier: b - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/truncate_table.sql000066400000000000000000000002721503426445100253440ustar00rootroot00000000000000-- Removes all rows from the table in the partition specified TRUNCATE TABLE Student PARTITION(Age = 10); -- Removes all rows from the table from all partitions TRUNCATE TABLE Student; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/truncate_table.yml000066400000000000000000000017021503426445100253450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e5d64f9bdcb0ff1769f35a9e32cce65a2474a0a79602712a32e2b35fb25bc7e1 file: - statement: truncate_table: - keyword: TRUNCATE - keyword: TABLE - table_reference: naked_identifier: Student - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: Age comparison_operator: raw_comparison_operator: '=' numeric_literal: '10' end_bracket: ) - statement_terminator: ; - statement: truncate_table: - keyword: TRUNCATE - keyword: TABLE - table_reference: naked_identifier: Student - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/uncache_table.sql000066400000000000000000000000571503426445100251260ustar00rootroot00000000000000UNCACHE TABLE t1; UNCACHE TABLE IF EXISTS t1; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/uncache_table.yml000066400000000000000000000013461503426445100251320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9d792b932b3b37301c279ea86aec744c42da10d2dcf6d2c17936b0980cb1c714 file: - statement: uncache_table: - keyword: UNCACHE - keyword: TABLE - table_reference: naked_identifier: t1 - statement_terminator: ; - statement: uncache_table: - keyword: UNCACHE - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: t1 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/unpivot_clause.sql000066400000000000000000000022741503426445100254140ustar00rootroot00000000000000-- Examples from https://spark.apache.org/docs/3.5.0/sql-ref-syntax-qry-select-unpivot.html -- column names are used as unpivot columns SELECT * FROM sales_quarterly UNPIVOT ( sales FOR quarter IN (q1, q2, q3, q4) ); -- NULL values are excluded by default, they can be included -- unpivot columns can be alias -- unpivot result can be referenced via its alias SELECT up.* FROM sales_quarterly UNPIVOT INCLUDE NULLS ( sales FOR quarter IN (q1 AS Q1, q2 AS Q2, q3 AS Q3, q4 AS Q4) ) AS up; -- multiple value columns can be unpivoted per row SELECT * FROM sales_quarterly UNPIVOT EXCLUDE NULLS ( (first_quarter, second_quarter) FOR half_of_the_year IN ( (q1, q2) AS H1, (q3, q4) AS H2 ) ); SELECT * FROM sales UNPIVOT INCLUDE NULLS (sales FOR quarter IN (q1 AS `Jan-Mar`, q2 AS `Apr-Jun`, q3 AS `Jul-Sep`, sales.q4 AS `Oct-Dec`)), sales UNPIVOT INCLUDE NULLS (sales FOR quarter IN (q1 AS `Jan-Mar`, q2 AS `Apr-Jun`, q3 AS `Jul-Sep`, sales.q4 AS `Oct-Dec`)) ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/unpivot_clause.yml000066400000000000000000000223161503426445100254150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a9148a9d2a8eef3094589c6bc3f657db5266bb7e48c493d308fe6b6ab6b21fe0 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sales_quarterly unpivot_clause: keyword: UNPIVOT bracketed: start_bracket: ( unpivot_single_column: - naked_identifier: sales - keyword: FOR - naked_identifier: quarter - keyword: IN - bracketed: - start_bracket: ( - column_reference: naked_identifier: q1 - comma: ',' - column_reference: naked_identifier: q2 - comma: ',' - column_reference: naked_identifier: q3 - comma: ',' - column_reference: naked_identifier: q4 - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: up dot: . star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sales_quarterly unpivot_clause: - keyword: UNPIVOT - keyword: INCLUDE - keyword: NULLS - bracketed: start_bracket: ( unpivot_single_column: - naked_identifier: sales - keyword: FOR - naked_identifier: quarter - keyword: IN - bracketed: - start_bracket: ( - column_reference: naked_identifier: q1 - alias_expression: alias_operator: keyword: AS naked_identifier: Q1 - comma: ',' - column_reference: naked_identifier: q2 - alias_expression: alias_operator: keyword: AS naked_identifier: Q2 - comma: ',' - column_reference: naked_identifier: q3 - alias_expression: alias_operator: keyword: AS naked_identifier: Q3 - comma: ',' - column_reference: naked_identifier: q4 - alias_expression: alias_operator: keyword: AS naked_identifier: Q4 - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: up - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sales_quarterly unpivot_clause: - keyword: UNPIVOT - keyword: EXCLUDE - keyword: NULLS - bracketed: start_bracket: ( unpivot_multi_column: - bracketed: - start_bracket: ( - naked_identifier: first_quarter - comma: ',' - naked_identifier: second_quarter - end_bracket: ) - keyword: FOR - naked_identifier: half_of_the_year - keyword: IN - bracketed: - start_bracket: ( - bracketed: - start_bracket: ( - column_reference: naked_identifier: q1 - comma: ',' - column_reference: naked_identifier: q2 - end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: H1 - comma: ',' - bracketed: - start_bracket: ( - column_reference: naked_identifier: q3 - comma: ',' - column_reference: naked_identifier: q4 - end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: H2 - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sales unpivot_clause: - keyword: UNPIVOT - keyword: INCLUDE - keyword: NULLS - bracketed: start_bracket: ( unpivot_single_column: - naked_identifier: sales - keyword: FOR - naked_identifier: quarter - keyword: IN - bracketed: - start_bracket: ( - column_reference: naked_identifier: q1 - alias_expression: alias_operator: keyword: AS quoted_identifier: '`Jan-Mar`' - comma: ',' - column_reference: naked_identifier: q2 - alias_expression: alias_operator: keyword: AS quoted_identifier: '`Apr-Jun`' - comma: ',' - column_reference: naked_identifier: q3 - alias_expression: alias_operator: keyword: AS quoted_identifier: '`Jul-Sep`' - comma: ',' - column_reference: - naked_identifier: sales - dot: . - naked_identifier: q4 - alias_expression: alias_operator: keyword: AS quoted_identifier: '`Oct-Dec`' - end_bracket: ) end_bracket: ) - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sales unpivot_clause: - keyword: UNPIVOT - keyword: INCLUDE - keyword: NULLS - bracketed: start_bracket: ( unpivot_single_column: - naked_identifier: sales - keyword: FOR - naked_identifier: quarter - keyword: IN - bracketed: - start_bracket: ( - column_reference: naked_identifier: q1 - alias_expression: alias_operator: keyword: AS quoted_identifier: '`Jan-Mar`' - comma: ',' - column_reference: naked_identifier: q2 - alias_expression: alias_operator: keyword: AS quoted_identifier: '`Apr-Jun`' - comma: ',' - column_reference: naked_identifier: q3 - alias_expression: alias_operator: keyword: AS quoted_identifier: '`Jul-Sep`' - comma: ',' - column_reference: - naked_identifier: sales - dot: . - naked_identifier: q4 - alias_expression: alias_operator: keyword: AS quoted_identifier: '`Oct-Dec`' - end_bracket: ) end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/use_database.sql000066400000000000000000000001271503426445100247670ustar00rootroot00000000000000USE database_name; -- Use the 'userdb' USE userdb; -- Use the 'userdb1' USE userdb1; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/use_database.yml000066400000000000000000000014521503426445100247730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f3c7428b47771dbc65f522c134af5d7e26d17e80be2a6da405ea6afefab130b4 file: - statement: use_statement: keyword: USE database_reference: naked_identifier: database_name - statement_terminator: ; - statement: use_statement: keyword: USE database_reference: naked_identifier: userdb - statement_terminator: ; - statement: use_statement: keyword: USE database_reference: naked_identifier: userdb1 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/values.sql000066400000000000000000000002521503426445100236450ustar00rootroot00000000000000values (1, 2); values (1, 2), (3, 4); values (1, 2), (3, 4), (greatest(5, 6), least(7, 8)); values 1, 2; values 1; values 1 , 2 , 3 limit 1; values 3 , 2 , 1 order by 2; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/values.yml000066400000000000000000000066341503426445100236610ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ec3dc1fe9c14c409f2080b419858302d1d5431a0e6bb8b82f2d380d72ae9cf4d file: - statement: values_clause: keyword: values bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - statement_terminator: ; - statement: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: greatest function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '5' - comma: ',' - expression: numeric_literal: '6' - end_bracket: ) - comma: ',' - expression: function: function_name: function_name_identifier: least function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '7' - comma: ',' - expression: numeric_literal: '8' - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: values_clause: - keyword: values - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - statement_terminator: ; - statement: values_clause: keyword: values expression: numeric_literal: '1' - statement_terminator: ; - statement: values_clause: - keyword: values - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - limit_clause: keyword: limit numeric_literal: '1' - statement_terminator: ; - statement: values_clause: - keyword: values - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '1' - orderby_clause: - keyword: order - keyword: by - numeric_literal: '2' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/values_with_alias.sql000066400000000000000000000003771503426445100260610ustar00rootroot00000000000000values (1, 2) as t; values (1, 2) t; values (1, 2) as t (a, b); values (1, 2), (3, 4) as t(a,b); values (1, 2) as (a,b); values (1, 2) t(a,b); values (1, 2) (a,b); values (1, 2), (3, 4) as (a,b); values (1, 2), (3, 4) t(a,b); values (1, 2), (3, 4) (a,b); sqlfluff-3.4.2/test/fixtures/dialects/sparksql/values_with_alias.yml000066400000000000000000000135401503426445100260570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7ee0530bd712ee7e7d4126a018e9ad96c8c71a4da68c5808c08fb9df47a507d3 file: - statement: values_clause: keyword: values bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: t - statement_terminator: ; - statement: values_clause: keyword: values bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) alias_expression: naked_identifier: t - statement_terminator: ; - statement: values_clause: keyword: values bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - alias_expression: alias_operator: keyword: as naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: values_clause: keyword: values bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) alias_expression: alias_operator: keyword: as bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: values_clause: keyword: values bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) alias_expression: naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: values_clause: keyword: values bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) alias_expression: bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - alias_expression: alias_operator: keyword: as bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - alias_expression: naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - alias_expression: bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/window_functions.sql000066400000000000000000000061501503426445100257500ustar00rootroot00000000000000SELECT name, dept, RANK() OVER ( PARTITION BY dept ORDER BY salary ) AS row_rank FROM employees; SELECT name, dept, DENSE_RANK() OVER ( PARTITION BY dept ORDER BY salary ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW ) AS row_dense_rank FROM employees; SELECT name, dept, age, CUME_DIST() OVER ( PARTITION BY dept ORDER BY age RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW ) AS data_cume_dist FROM employees; SELECT name, dept, salary, MIN(salary) OVER ( PARTITION BY dept ORDER BY salary ) AS salary_min FROM employees; SELECT name, salary, LAG(salary) OVER ( PARTITION BY dept ORDER BY salary ) AS salary_lag, LEAD(salary, 1, 0) OVER ( PARTITION BY dept ORDER BY salary ) AS salary_lead FROM employees; SELECT name, salary, LAG(salary) IGNORE NULLS OVER ( PARTITION BY dept ORDER BY salary ) AS salary_lag, LEAD(salary, 1, 0) IGNORE NULLS OVER ( PARTITION BY dept ORDER BY salary ) AS salary_lead FROM employees; SELECT name, salary, LAG(salary) RESPECT NULLS OVER ( PARTITION BY dept ORDER BY salary ) AS salary_lag, LEAD(salary, 1, 0) RESPECT NULLS OVER ( PARTITION BY dept ORDER BY salary ) AS salary_lead FROM employees; SELECT id, v, LEAD(v, 0) IGNORE NULLS OVER w AS v_lead, LAG(v, 0) IGNORE NULLS OVER w AS v_lag, NTH_VALUE(v, 2) IGNORE NULLS OVER w AS v_nth_value, FIRST_VALUE(v) IGNORE NULLS OVER w AS v_first_value, LAST_VALUE(v) IGNORE NULLS OVER w AS v_last_value FROM test_ignore_null WINDOW w AS (ORDER BY id) ORDER BY id; SELECT id, v, LEAD(v, 0) RESPECT NULLS OVER w AS v_lead, LAG(v, 0) RESPECT NULLS OVER w AS v_lag, NTH_VALUE(v, 2) RESPECT NULLS OVER w AS v_nth_value, FIRST_VALUE(v) RESPECT NULLS OVER w AS v_first_value, LAST_VALUE(v) RESPECT NULLS OVER w AS v_last_value FROM test_ignore_null WINDOW w AS (ORDER BY id) ORDER BY id; SELECT ignore_nulls.id, ignore_nulls.v, LEAD(ignore_nulls.v, 0) RESPECT NULLS OVER w AS v_lead, LAG(ignore_nulls.v, 0) RESPECT NULLS OVER w AS v_lag, NTH_VALUE(ignore_nulls.v, 2) RESPECT NULLS OVER w AS v_nth_value, FIRST_VALUE(ignore_nulls.v) RESPECT NULLS OVER w AS v_first_value, LAST_VALUE(ignore_nulls.v) RESPECT NULLS OVER w AS v_last_value FROM test_ignore_null AS ignore_nulls WINDOW w AS (ORDER BY ignore_nulls.id) ORDER BY ignore_nulls.id; SELECT ignore_nulls.id, ignore_nulls.v, LEAD(ignore_nulls.v, 0) RESPECT NULLS OVER w AS v_lead, LAG(ignore_nulls.v, 0) RESPECT NULLS OVER w AS v_lag, NTH_VALUE(ignore_nulls.v, 2) RESPECT NULLS OVER w AS v_nth_value, FIRST_VALUE(ignore_nulls.v) RESPECT NULLS OVER w AS v_first_value, LAST_VALUE(ignore_nulls.v) RESPECT NULLS OVER w AS v_last_value FROM test_ignore_null AS ignore_nulls WINDOW w AS (ORDER BY ignore_nulls.id range between interval 6 days preceding and current row) ORDER BY ignore_nulls.id; sqlfluff-3.4.2/test/fixtures/dialects/sparksql/window_functions.yml000066400000000000000000001066161503426445100257620ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: dc476388efd344ea47f0a9fc2f657203b53be98e98c4b3edc87e118d17fec208 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: dept - comma: ',' - select_clause_element: function: function_name: function_name_identifier: RANK function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: dept orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: salary end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: row_rank from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: dept - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DENSE_RANK function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: dept orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: salary frame_clause: - keyword: ROWS - keyword: BETWEEN - keyword: UNBOUNDED - keyword: PRECEDING - keyword: AND - keyword: CURRENT - keyword: ROW end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: row_dense_rank from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: dept - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: function: function_name: function_name_identifier: CUME_DIST function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: dept orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: age frame_clause: - keyword: RANGE - keyword: BETWEEN - keyword: UNBOUNDED - keyword: PRECEDING - keyword: AND - keyword: CURRENT - keyword: ROW end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: data_cume_dist from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: dept - comma: ',' - select_clause_element: column_reference: naked_identifier: salary - comma: ',' - select_clause_element: function: function_name: function_name_identifier: MIN function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: salary end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: dept orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: salary end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: salary_min from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: salary - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LAG function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: salary end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: dept orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: salary end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: salary_lag - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LEAD function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: salary - comma: ',' - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '0' - end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: dept orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: salary end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: salary_lead from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: salary - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LAG function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: salary end_bracket: ) over_clause: - keyword: IGNORE - keyword: NULLS - keyword: OVER - bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: dept orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: salary end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: salary_lag - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LEAD function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: salary - comma: ',' - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '0' - end_bracket: ) over_clause: - keyword: IGNORE - keyword: NULLS - keyword: OVER - bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: dept orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: salary end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: salary_lead from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: salary - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LAG function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: salary end_bracket: ) over_clause: - keyword: RESPECT - keyword: NULLS - keyword: OVER - bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: dept orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: salary end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: salary_lag - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LEAD function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: salary - comma: ',' - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '0' - end_bracket: ) over_clause: - keyword: RESPECT - keyword: NULLS - keyword: OVER - bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: dept orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: salary end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: salary_lead from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: v - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LEAD function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: v - comma: ',' - expression: numeric_literal: '0' - end_bracket: ) over_clause: - keyword: IGNORE - keyword: NULLS - keyword: OVER - naked_identifier: w alias_expression: alias_operator: keyword: AS naked_identifier: v_lead - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LAG function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: v - comma: ',' - expression: numeric_literal: '0' - end_bracket: ) over_clause: - keyword: IGNORE - keyword: NULLS - keyword: OVER - naked_identifier: w alias_expression: alias_operator: keyword: AS naked_identifier: v_lag - comma: ',' - select_clause_element: function: function_name: function_name_identifier: NTH_VALUE function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: v - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) over_clause: - keyword: IGNORE - keyword: NULLS - keyword: OVER - naked_identifier: w alias_expression: alias_operator: keyword: AS naked_identifier: v_nth_value - comma: ',' - select_clause_element: function: function_name: function_name_identifier: FIRST_VALUE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: v end_bracket: ) over_clause: - keyword: IGNORE - keyword: NULLS - keyword: OVER - naked_identifier: w alias_expression: alias_operator: keyword: AS naked_identifier: v_first_value - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LAST_VALUE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: v end_bracket: ) over_clause: - keyword: IGNORE - keyword: NULLS - keyword: OVER - naked_identifier: w alias_expression: alias_operator: keyword: AS naked_identifier: v_last_value from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_ignore_null named_window: keyword: WINDOW named_window_expression: naked_identifier: w keyword: AS bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: id end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: v - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LEAD function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: v - comma: ',' - expression: numeric_literal: '0' - end_bracket: ) over_clause: - keyword: RESPECT - keyword: NULLS - keyword: OVER - naked_identifier: w alias_expression: alias_operator: keyword: AS naked_identifier: v_lead - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LAG function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: v - comma: ',' - expression: numeric_literal: '0' - end_bracket: ) over_clause: - keyword: RESPECT - keyword: NULLS - keyword: OVER - naked_identifier: w alias_expression: alias_operator: keyword: AS naked_identifier: v_lag - comma: ',' - select_clause_element: function: function_name: function_name_identifier: NTH_VALUE function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: v - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) over_clause: - keyword: RESPECT - keyword: NULLS - keyword: OVER - naked_identifier: w alias_expression: alias_operator: keyword: AS naked_identifier: v_nth_value - comma: ',' - select_clause_element: function: function_name: function_name_identifier: FIRST_VALUE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: v end_bracket: ) over_clause: - keyword: RESPECT - keyword: NULLS - keyword: OVER - naked_identifier: w alias_expression: alias_operator: keyword: AS naked_identifier: v_first_value - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LAST_VALUE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: v end_bracket: ) over_clause: - keyword: RESPECT - keyword: NULLS - keyword: OVER - naked_identifier: w alias_expression: alias_operator: keyword: AS naked_identifier: v_last_value from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_ignore_null named_window: keyword: WINDOW named_window_expression: naked_identifier: w keyword: AS bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: id end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: ignore_nulls - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: ignore_nulls - dot: . - naked_identifier: v - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LEAD function_contents: bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: ignore_nulls - dot: . - naked_identifier: v - comma: ',' - expression: numeric_literal: '0' - end_bracket: ) over_clause: - keyword: RESPECT - keyword: NULLS - keyword: OVER - naked_identifier: w alias_expression: alias_operator: keyword: AS naked_identifier: v_lead - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LAG function_contents: bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: ignore_nulls - dot: . - naked_identifier: v - comma: ',' - expression: numeric_literal: '0' - end_bracket: ) over_clause: - keyword: RESPECT - keyword: NULLS - keyword: OVER - naked_identifier: w alias_expression: alias_operator: keyword: AS naked_identifier: v_lag - comma: ',' - select_clause_element: function: function_name: function_name_identifier: NTH_VALUE function_contents: bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: ignore_nulls - dot: . - naked_identifier: v - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) over_clause: - keyword: RESPECT - keyword: NULLS - keyword: OVER - naked_identifier: w alias_expression: alias_operator: keyword: AS naked_identifier: v_nth_value - comma: ',' - select_clause_element: function: function_name: function_name_identifier: FIRST_VALUE function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: ignore_nulls - dot: . - naked_identifier: v end_bracket: ) over_clause: - keyword: RESPECT - keyword: NULLS - keyword: OVER - naked_identifier: w alias_expression: alias_operator: keyword: AS naked_identifier: v_first_value - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LAST_VALUE function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: ignore_nulls - dot: . - naked_identifier: v end_bracket: ) over_clause: - keyword: RESPECT - keyword: NULLS - keyword: OVER - naked_identifier: w alias_expression: alias_operator: keyword: AS naked_identifier: v_last_value from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_ignore_null alias_expression: alias_operator: keyword: AS naked_identifier: ignore_nulls named_window: keyword: WINDOW named_window_expression: naked_identifier: w keyword: AS bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: - naked_identifier: ignore_nulls - dot: . - naked_identifier: id end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: - naked_identifier: ignore_nulls - dot: . - naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: ignore_nulls - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: ignore_nulls - dot: . - naked_identifier: v - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LEAD function_contents: bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: ignore_nulls - dot: . - naked_identifier: v - comma: ',' - expression: numeric_literal: '0' - end_bracket: ) over_clause: - keyword: RESPECT - keyword: NULLS - keyword: OVER - naked_identifier: w alias_expression: alias_operator: keyword: AS naked_identifier: v_lead - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LAG function_contents: bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: ignore_nulls - dot: . - naked_identifier: v - comma: ',' - expression: numeric_literal: '0' - end_bracket: ) over_clause: - keyword: RESPECT - keyword: NULLS - keyword: OVER - naked_identifier: w alias_expression: alias_operator: keyword: AS naked_identifier: v_lag - comma: ',' - select_clause_element: function: function_name: function_name_identifier: NTH_VALUE function_contents: bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: ignore_nulls - dot: . - naked_identifier: v - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) over_clause: - keyword: RESPECT - keyword: NULLS - keyword: OVER - naked_identifier: w alias_expression: alias_operator: keyword: AS naked_identifier: v_nth_value - comma: ',' - select_clause_element: function: function_name: function_name_identifier: FIRST_VALUE function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: ignore_nulls - dot: . - naked_identifier: v end_bracket: ) over_clause: - keyword: RESPECT - keyword: NULLS - keyword: OVER - naked_identifier: w alias_expression: alias_operator: keyword: AS naked_identifier: v_first_value - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LAST_VALUE function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: ignore_nulls - dot: . - naked_identifier: v end_bracket: ) over_clause: - keyword: RESPECT - keyword: NULLS - keyword: OVER - naked_identifier: w alias_expression: alias_operator: keyword: AS naked_identifier: v_last_value from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_ignore_null alias_expression: alias_operator: keyword: AS naked_identifier: ignore_nulls named_window: keyword: WINDOW named_window_expression: naked_identifier: w keyword: AS bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: - naked_identifier: ignore_nulls - dot: . - naked_identifier: id frame_clause: - keyword: range - keyword: between - interval_expression: keyword: interval interval_literal: numeric_literal: '6' date_part: days - keyword: preceding - keyword: and - keyword: current - keyword: row end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: - naked_identifier: ignore_nulls - dot: . - naked_identifier: id - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/000077500000000000000000000000001503426445100212675ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/dialects/sqlite/.sqlfluff000066400000000000000000000000341503426445100231070ustar00rootroot00000000000000[sqlfluff] dialect = sqlite sqlfluff-3.4.2/test/fixtures/dialects/sqlite/alter_table.sql000066400000000000000000000005631503426445100242720ustar00rootroot00000000000000ALTER TABLE users RENAME TO people; ALTER TABLE users RENAME COLUMN user_id TO person_id; ALTER TABLE users ADD COLUMN name TEXT UNIQUE; ALTER TABLE users ADD COLUMN credentials_last_changed INTEGER NOT NULL DEFAULT 0; ALTER TABLE users ADD COLUMN credentials_last_changed TEXT NOT NULL DEFAULT '0001-01-01T00:00:00.000000+00:00'; ALTER TABLE users DROP COLUMN age; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/alter_table.yml000066400000000000000000000052021503426445100242670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 752b7fc012b17e70ad36f108082b3eba1114fa8c60a90291ce1f6e1c0dc6020b file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: users - keyword: RENAME - keyword: TO - naked_identifier: people - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: users - keyword: RENAME - keyword: COLUMN - column_reference: naked_identifier: user_id - keyword: TO - naked_identifier: person_id - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: users - keyword: ADD - keyword: COLUMN - column_definition: naked_identifier: name data_type: data_type_identifier: TEXT column_constraint_segment: keyword: UNIQUE - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: users - keyword: ADD - keyword: COLUMN - column_definition: - naked_identifier: credentials_last_changed - data_type: data_type_identifier: INTEGER - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: DEFAULT expression: numeric_literal: '0' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: users - keyword: ADD - keyword: COLUMN - column_definition: - naked_identifier: credentials_last_changed - data_type: data_type_identifier: TEXT - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: DEFAULT expression: quoted_literal: "'0001-01-01T00:00:00.000000+00:00'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: users - keyword: DROP - keyword: COLUMN - column_reference: naked_identifier: age - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/arithmetric_a.sql000066400000000000000000000013561503426445100246300ustar00rootroot00000000000000SELECT 1 + (2 * 3) >= 4 + 6+13 as val; SELECT 1 + ~(~2 * 3) >= 4 + ~6+13 as val; SELECT -1; SELECT -1 + 5; SELECT ~1; SELECT -1 + ~5; SELECT 4 & ~8 | 16; SELECT 8 + ~(3); SELECT 8 | ~ ~ ~4; SELECT 1 * -(5); SELECT 1 * -5; SELECT 1 * - - - 5; SELECT 1 * - - - (5); SELECT 1 * + + (5); SELECT 1 * - - - func(5); SELECT 1 * ~ ~ ~ func(5); SELECT 1 * +(5); SELECT 1 * +5; SELECT 1 * + + 5; SELECT FALSE AND NOT (TRUE); SELECT FALSE AND NOT NOT NOT (TRUE); -- parses middle NOT as column ref SELECT FALSE AND NOT (TRUE); SELECT FALSE AND NOT func(5); SELECT 'abc' LIKE - - 5; -- PG can parse this ok, and then fail due to data type mismatch SELECT 'abc' LIKE ~ ~ 5; -- PG can parse this ok, and then fail due to data type mismatch sqlfluff-3.4.2/test/fixtures/dialects/sqlite/arithmetric_a.yml000066400000000000000000000256061503426445100246360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 14952d9d87f9b9ba951d6f57dcda97a4797a166002c72d1a1d8256d7a2207dd0 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: + - bracketed: start_bracket: ( expression: - numeric_literal: '2' - binary_operator: '*' - numeric_literal: '3' end_bracket: ) - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - numeric_literal: '4' - binary_operator: + - numeric_literal: '6' - binary_operator: + - numeric_literal: '13' alias_expression: alias_operator: keyword: as naked_identifier: val - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: + - tilde: '~' - bracketed: start_bracket: ( expression: - tilde: '~' - numeric_literal: '2' - binary_operator: '*' - numeric_literal: '3' end_bracket: ) - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - numeric_literal: '4' - binary_operator: + - tilde: '~' - numeric_literal: '6' - binary_operator: + - numeric_literal: '13' alias_expression: alias_operator: keyword: as naked_identifier: val - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: sign_indicator: '-' numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: sign_indicator: '-' numeric_literal: '1' - binary_operator: + - numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: tilde: '~' numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: sign_indicator: '-' numeric_literal: '1' - binary_operator: + - tilde: '~' - numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '4' - binary_operator: ampersand: '&' - tilde: '~' - numeric_literal: '8' - binary_operator: pipe: '|' - numeric_literal: '16' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: numeric_literal: '8' binary_operator: + tilde: '~' bracketed: start_bracket: ( expression: numeric_literal: '3' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '8' - binary_operator: pipe: '|' - tilde: '~' - tilde: '~' - tilde: '~' - numeric_literal: '4' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: numeric_literal: '1' binary_operator: '*' sign_indicator: '-' bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '*' - numeric_literal: sign_indicator: '-' numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '*' - sign_indicator: '-' - sign_indicator: '-' - numeric_literal: sign_indicator: '-' numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '*' - sign_indicator: '-' - sign_indicator: '-' - sign_indicator: '-' - bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '*' - sign_indicator: + - sign_indicator: + - bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '*' - sign_indicator: '-' - sign_indicator: '-' - sign_indicator: '-' - function: function_name: function_name_identifier: func function_contents: bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '*' - tilde: '~' - tilde: '~' - tilde: '~' - function: function_name: function_name_identifier: func function_contents: bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: numeric_literal: '1' binary_operator: '*' sign_indicator: + bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '*' - numeric_literal: sign_indicator: + numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '*' - sign_indicator: + - numeric_literal: sign_indicator: + numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: boolean_literal: 'FALSE' binary_operator: AND keyword: NOT bracketed: start_bracket: ( expression: boolean_literal: 'TRUE' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - boolean_literal: 'FALSE' - binary_operator: AND - keyword: NOT - keyword: NOT - keyword: NOT - bracketed: start_bracket: ( expression: boolean_literal: 'TRUE' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: boolean_literal: 'FALSE' binary_operator: AND keyword: NOT bracketed: start_bracket: ( expression: boolean_literal: 'TRUE' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: boolean_literal: 'FALSE' binary_operator: AND keyword: NOT function: function_name: function_name_identifier: func function_contents: bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: quoted_literal: "'abc'" keyword: LIKE sign_indicator: '-' numeric_literal: sign_indicator: '-' numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'abc'" - keyword: LIKE - tilde: '~' - tilde: '~' - numeric_literal: '5' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/block_comment_end_of_input.sql000066400000000000000000000003531503426445100273560ustar00rootroot00000000000000/* According to https://www.sqlite.org/lang_comment.html, it is valid for a C-style comment to end at the "end-of-input", without being closed explicitly. This document is a valid SQLite file, but gives a parsing error in SQLFluff. sqlfluff-3.4.2/test/fixtures/dialects/sqlite/block_comment_end_of_input.yml000066400000000000000000000006171503426445100273630ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fdda373cd9cd649f82a9c5cf7ba9e290375c0ceae29477b0bad5a25f24a52ae3 file: null sqlfluff-3.4.2/test/fixtures/dialects/sqlite/conflict_clause.sql000066400000000000000000000005101503426445100251410ustar00rootroot00000000000000CREATE TABLE users ( user_id INTEGER PRIMARY KEY ON CONFLICT ROLLBACK, user_name TEXT NOT NULL ON CONFLICT ABORT ); ALTER TABLE users ADD COLUMN name TEXT UNIQUE ON CONFLICT FAIL; create table imap_boxes ( account_id integer not null, box_name text not null, unique (account_id, box_name) on conflict replace ); sqlfluff-3.4.2/test/fixtures/dialects/sqlite/conflict_clause.yml000066400000000000000000000055421503426445100251550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ad491a3736ca6192fa02974d8ebb4fecb73b54637c6299f2b8284dfe52b0e0b1 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: users - bracketed: - start_bracket: ( - column_definition: naked_identifier: user_id data_type: data_type_identifier: INTEGER column_constraint_segment: - keyword: PRIMARY - keyword: KEY - conflict_clause: - keyword: 'ON' - keyword: CONFLICT - keyword: ROLLBACK - comma: ',' - column_definition: naked_identifier: user_name data_type: data_type_identifier: TEXT column_constraint_segment: - keyword: NOT - keyword: 'NULL' - conflict_clause: - keyword: 'ON' - keyword: CONFLICT - keyword: ABORT - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: users - keyword: ADD - keyword: COLUMN - column_definition: naked_identifier: name data_type: data_type_identifier: TEXT column_constraint_segment: keyword: UNIQUE conflict_clause: - keyword: 'ON' - keyword: CONFLICT - keyword: FAIL - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: imap_boxes - bracketed: - start_bracket: ( - column_definition: naked_identifier: account_id data_type: data_type_identifier: integer column_constraint_segment: - keyword: not - keyword: 'null' - comma: ',' - column_definition: naked_identifier: box_name data_type: data_type_identifier: text column_constraint_segment: - keyword: not - keyword: 'null' - comma: ',' - table_constraint: keyword: unique bracketed: - start_bracket: ( - column_reference: naked_identifier: account_id - comma: ',' - column_reference: naked_identifier: box_name - end_bracket: ) conflict_clause: - keyword: 'on' - keyword: conflict - keyword: replace - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/create_index.sql000066400000000000000000000006331503426445100244440ustar00rootroot00000000000000CREATE INDEX li1 ON entries_data(id, LENGTH(chunk)); CREATE INDEX acctchng_magnitude ON account_change(acct_no, abs(amt)); CREATE INDEX t2xy ON t2(x+y); CREATE UNIQUE INDEX team_leader ON person(team_id) WHERE is_team_leader; CREATE INDEX ex1 ON tab1(a,b) WHERE a=5 OR b=6; CREATE INDEX po_parent ON purchaseorder(parent_po) WHERE parent_po IS NOT NULL; CREATE INDEX ex2 ON tab2(b,c) WHERE c IS NOT NULL; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/create_index.yml000066400000000000000000000120401503426445100244410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0ce6a99217dcc134718351aa04169d1207cf4df9343dada93f2e3107ac1316cd file: - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: li1 - keyword: 'ON' - table_reference: naked_identifier: entries_data - bracketed: - start_bracket: ( - index_column_definition: naked_identifier: id - comma: ',' - index_column_definition: expression: function: function_name: function_name_identifier: LENGTH function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: chunk end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: acctchng_magnitude - keyword: 'ON' - table_reference: naked_identifier: account_change - bracketed: - start_bracket: ( - index_column_definition: naked_identifier: acct_no - comma: ',' - index_column_definition: expression: function: function_name: function_name_identifier: abs function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: amt end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: t2xy - keyword: 'ON' - table_reference: naked_identifier: t2 - bracketed: start_bracket: ( index_column_definition: expression: - column_reference: naked_identifier: x - binary_operator: + - column_reference: naked_identifier: y end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: UNIQUE - keyword: INDEX - index_reference: naked_identifier: team_leader - keyword: 'ON' - table_reference: naked_identifier: person - bracketed: start_bracket: ( index_column_definition: naked_identifier: team_id end_bracket: ) - where_clause: keyword: WHERE expression: column_reference: naked_identifier: is_team_leader - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: ex1 - keyword: 'ON' - table_reference: naked_identifier: tab1 - bracketed: - start_bracket: ( - index_column_definition: naked_identifier: a - comma: ',' - index_column_definition: naked_identifier: b - end_bracket: ) - where_clause: keyword: WHERE expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '5' - binary_operator: OR - column_reference: naked_identifier: b - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '6' - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: po_parent - keyword: 'ON' - table_reference: naked_identifier: purchaseorder - bracketed: start_bracket: ( index_column_definition: naked_identifier: parent_po end_bracket: ) - where_clause: keyword: WHERE expression: - column_reference: naked_identifier: parent_po - keyword: IS - keyword: NOT - null_literal: 'NULL' - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: ex2 - keyword: 'ON' - table_reference: naked_identifier: tab2 - bracketed: - start_bracket: ( - index_column_definition: naked_identifier: b - comma: ',' - index_column_definition: naked_identifier: c - end_bracket: ) - where_clause: keyword: WHERE expression: - column_reference: naked_identifier: c - keyword: IS - keyword: NOT - null_literal: 'NULL' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/create_table.sql000066400000000000000000000006611503426445100244250ustar00rootroot00000000000000CREATE TABLE users ( user_id INTEGER PRIMARY KEY AUTOINCREMENT, username TEXT NOT NULL UNIQUE, password TEXT NOT NULL COLLATE NOCASE, email TEXT NOT NULL UNIQUE ); CREATE TABLE users ( user_id INTEGER PRIMARY KEY ASC AUTOINCREMENT ); CREATE TABLE users ( user_id INTEGER PRIMARY KEY DESC AUTOINCREMENT ); CREATE TABLE example ( id INTEGER PRIMARY KEY, description CHARACTER VARYING(32) NOT NULL ); sqlfluff-3.4.2/test/fixtures/dialects/sqlite/create_table.yml000066400000000000000000000072041503426445100244270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 04b62dae86f86f9768a3f717a6d5c8c35b47513f9d1bc91b8f40c677f31932fd file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: users - bracketed: - start_bracket: ( - column_definition: naked_identifier: user_id data_type: data_type_identifier: INTEGER column_constraint_segment: - keyword: PRIMARY - keyword: KEY - keyword: AUTOINCREMENT - comma: ',' - column_definition: - naked_identifier: username - data_type: data_type_identifier: TEXT - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: UNIQUE - comma: ',' - column_definition: - naked_identifier: password - data_type: data_type_identifier: TEXT - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: COLLATE collation_reference: naked_identifier: NOCASE - comma: ',' - column_definition: - naked_identifier: email - data_type: data_type_identifier: TEXT - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: UNIQUE - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: users - bracketed: start_bracket: ( column_definition: naked_identifier: user_id data_type: data_type_identifier: INTEGER column_constraint_segment: - keyword: PRIMARY - keyword: KEY - keyword: ASC - keyword: AUTOINCREMENT end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: users - bracketed: start_bracket: ( column_definition: naked_identifier: user_id data_type: data_type_identifier: INTEGER column_constraint_segment: - keyword: PRIMARY - keyword: KEY - keyword: DESC - keyword: AUTOINCREMENT end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: example - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: INTEGER column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_definition: naked_identifier: description data_type: - keyword: CHARACTER - keyword: VARYING - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '32' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/create_table_autoincrement.sql000066400000000000000000000000761503426445100273620ustar00rootroot00000000000000CREATE TABLE foo( id INTEGER PRIMARY KEY AUTOINCREMENT ); sqlfluff-3.4.2/test/fixtures/dialects/sqlite/create_table_autoincrement.yml000066400000000000000000000015451503426445100273660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 35d22c9df0ef9cc9ea9c020085cf4f633f9ea53a48a92bfcd928bbe86df5fe0a file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: foo - bracketed: start_bracket: ( column_definition: naked_identifier: id data_type: data_type_identifier: INTEGER column_constraint_segment: - keyword: PRIMARY - keyword: KEY - keyword: AUTOINCREMENT end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/create_table_check.sql000066400000000000000000000001021503426445100255500ustar00rootroot00000000000000CREATE TABLE foo( num NUMBER NOT NULL, CHECK (num > 0) ); sqlfluff-3.4.2/test/fixtures/dialects/sqlite/create_table_check.yml000066400000000000000000000022451503426445100255640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5465c2be405342b8e453437cd27526acc01a94128274719114d7fe0e3ae8a665 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: foo - bracketed: start_bracket: ( column_definition: naked_identifier: num data_type: data_type_identifier: NUMBER column_constraint_segment: - keyword: NOT - keyword: 'NULL' comma: ',' table_constraint: keyword: CHECK bracketed: start_bracket: ( expression: column_reference: naked_identifier: num comparison_operator: raw_comparison_operator: '>' numeric_literal: '0' end_bracket: ) end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/create_table_constraint_default.sql000066400000000000000000000006151503426445100303740ustar00rootroot00000000000000BEGIN TRANSACTION; CREATE TABLE IF NOT EXISTS "tbl" ( "col" TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ); COMMIT; CREATE TABLE t ( id TEXT DEFAULT datetime('now') ); CREATE TABLE t ( id TEXT DEFAULT (datetime('now')) ); CREATE TABLE t ( id TEXT DEFAULT ('now') ); CREATE TABLE t ( id TEXT DEFAULT 'now' ); CREATE TABLE t ( id TEXT DEFAULT -1 ); CREATE TABLE t ( id TEXT DEFAULT +1 ); sqlfluff-3.4.2/test/fixtures/dialects/sqlite/create_table_constraint_default.yml000066400000000000000000000116701503426445100304010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c742c44c2d976583d668efa4ed4106c167c6823c6af92b7ec26bafe97974815b file: - statement: transaction_statement: - keyword: BEGIN - keyword: TRANSACTION - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: quoted_identifier: '"tbl"' - bracketed: start_bracket: ( column_definition: - quoted_identifier: '"col"' - data_type: data_type_identifier: TIMESTAMP - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: DEFAULT expression: bare_function: CURRENT_TIMESTAMP end_bracket: ) - statement_terminator: ; - statement: transaction_statement: keyword: COMMIT - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( column_definition: naked_identifier: id data_type: data_type_identifier: TEXT column_constraint_segment: keyword: DEFAULT expression: function: function_name: function_name_identifier: datetime function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'now'" end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( column_definition: naked_identifier: id data_type: data_type_identifier: TEXT column_constraint_segment: keyword: DEFAULT expression: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: datetime function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'now'" end_bracket: ) end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( column_definition: naked_identifier: id data_type: data_type_identifier: TEXT column_constraint_segment: keyword: DEFAULT expression: bracketed: start_bracket: ( expression: quoted_literal: "'now'" end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( column_definition: naked_identifier: id data_type: data_type_identifier: TEXT column_constraint_segment: keyword: DEFAULT expression: quoted_literal: "'now'" end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( column_definition: naked_identifier: id data_type: data_type_identifier: TEXT column_constraint_segment: keyword: DEFAULT expression: numeric_literal: sign_indicator: '-' numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( column_definition: naked_identifier: id data_type: data_type_identifier: TEXT column_constraint_segment: keyword: DEFAULT expression: numeric_literal: sign_indicator: + numeric_literal: '1' end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/create_table_constraint_generated.sql000066400000000000000000000004571503426445100307120ustar00rootroot00000000000000CREATE TABLE t1 ( a INTEGER PRIMARY KEY, b INT, c TEXT, d INT GENERATED ALWAYS AS (a*abs(b)) VIRTUAL, e TEXT GENERATED ALWAYS AS (substr(c,b,b+1)) STORED ); CREATE TABLE t1 ( a INTEGER PRIMARY KEY, b INT, c TEXT, d INT AS (a*abs(b)), e TEXT AS (substr(c,b,b+1)) STORED ); sqlfluff-3.4.2/test/fixtures/dialects/sqlite/create_table_constraint_generated.yml000066400000000000000000000131751503426445100307150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c5fa1418f80ca0b40baeccc96319bac5ca9e0ba0a5f45bc93813155b4fa13070 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: a data_type: data_type_identifier: INTEGER column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_definition: naked_identifier: b data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: c data_type: data_type_identifier: TEXT - comma: ',' - column_definition: naked_identifier: d data_type: data_type_identifier: INT column_constraint_segment: - keyword: GENERATED - keyword: ALWAYS - keyword: AS - bracketed: start_bracket: ( expression: column_reference: naked_identifier: a binary_operator: '*' function: function_name: function_name_identifier: abs function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: b end_bracket: ) end_bracket: ) - keyword: VIRTUAL - comma: ',' - column_definition: naked_identifier: e data_type: data_type_identifier: TEXT column_constraint_segment: - keyword: GENERATED - keyword: ALWAYS - keyword: AS - bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: substr function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: c - comma: ',' - expression: column_reference: naked_identifier: b - comma: ',' - expression: column_reference: naked_identifier: b binary_operator: + numeric_literal: '1' - end_bracket: ) end_bracket: ) - keyword: STORED - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: a data_type: data_type_identifier: INTEGER column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_definition: naked_identifier: b data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: c data_type: data_type_identifier: TEXT - comma: ',' - column_definition: naked_identifier: d data_type: data_type_identifier: INT column_constraint_segment: keyword: AS bracketed: start_bracket: ( expression: column_reference: naked_identifier: a binary_operator: '*' function: function_name: function_name_identifier: abs function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: b end_bracket: ) end_bracket: ) - comma: ',' - column_definition: naked_identifier: e data_type: data_type_identifier: TEXT column_constraint_segment: - keyword: AS - bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: substr function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: c - comma: ',' - expression: column_reference: naked_identifier: b - comma: ',' - expression: column_reference: naked_identifier: b binary_operator: + numeric_literal: '1' - end_bracket: ) end_bracket: ) - keyword: STORED - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/create_table_constraint_regexp.sql000066400000000000000000000001341503426445100302360ustar00rootroot00000000000000CREATE TABLE colors ( css_name TEXT, rgb TEXT CHECK(rgb REGEXP '^#[0-9A-F]{6}$') ); sqlfluff-3.4.2/test/fixtures/dialects/sqlite/create_table_constraint_regexp.yml000066400000000000000000000023011503426445100302360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 596b1a2c55287d473971b098d2acc685c6fd7a29e34d9735367b2f8c19d9d7c5 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: colors - bracketed: - start_bracket: ( - column_definition: naked_identifier: css_name data_type: data_type_identifier: TEXT - comma: ',' - column_definition: naked_identifier: rgb data_type: data_type_identifier: TEXT column_constraint_segment: keyword: CHECK bracketed: start_bracket: ( expression: column_reference: naked_identifier: rgb keyword: REGEXP quoted_literal: "'^#[0-9A-F]{6}$'" end_bracket: ) - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/create_table_deferrable.sql000066400000000000000000000033641503426445100266030ustar00rootroot00000000000000-- check deferrable in table constrain segment CREATE TABLE messages( msg_id TEXT, parent_id TEXT, FOREIGN KEY(parent_id) REFERENCES users(id) DEFERRABLE ); CREATE TABLE messages( msg_id TEXT, parent_id TEXT, FOREIGN KEY(parent_id) REFERENCES users(id) DEFERRABLE INITIALLY DEFERRED ); CREATE TABLE messages( msg_id TEXT, parent_id TEXT, FOREIGN KEY(parent_id) REFERENCES users(id) DEFERRABLE INITIALLY IMMEDIATE ); CREATE TABLE messages( msg_id TEXT, parent_id TEXT, FOREIGN KEY(parent_id) REFERENCES users(id) NOT DEFERRABLE ); CREATE TABLE messages( msg_id TEXT, parent_id TEXT, FOREIGN KEY(parent_id) REFERENCES users(id) NOT DEFERRABLE INITIALLY DEFERRED ); CREATE TABLE messages( msg_id TEXT, parent_id TEXT, FOREIGN KEY(parent_id) REFERENCES users(id) NOT DEFERRABLE INITIALLY IMMEDIATE ); -- check deferrable in column constrain segment CREATE TABLE track( trackid INTEGER, trackname TEXT, trackartist INTEGER REFERENCES artist(artistid) DEFERRABLE ); CREATE TABLE track( trackid INTEGER, trackname TEXT, trackartist INTEGER REFERENCES artist(artistid) DEFERRABLE INITIALLY DEFERRED ); CREATE TABLE track( trackid INTEGER, trackname TEXT, trackartist INTEGER REFERENCES artist(artistid) DEFERRABLE INITIALLY IMMEDIATE ); CREATE TABLE track( trackid INTEGER, trackname TEXT, trackartist INTEGER REFERENCES artist(artistid) NOT DEFERRABLE ); CREATE TABLE track( trackid INTEGER, trackname TEXT, trackartist INTEGER REFERENCES artist(artistid) NOT DEFERRABLE INITIALLY DEFERRED ); CREATE TABLE track( trackid INTEGER, trackname TEXT, trackartist INTEGER REFERENCES artist(artistid) NOT DEFERRABLE INITIALLY IMMEDIATE ); sqlfluff-3.4.2/test/fixtures/dialects/sqlite/create_table_deferrable.yml000066400000000000000000000311661503426445100266060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5ceb51beba6740a7f80613179a15a4d2856cefa05e3b9b34ca1e11b4727c6973 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: messages - bracketed: - start_bracket: ( - column_definition: naked_identifier: msg_id data_type: data_type_identifier: TEXT - comma: ',' - column_definition: naked_identifier: parent_id data_type: data_type_identifier: TEXT - comma: ',' - table_constraint: - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: parent_id end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: users - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - keyword: DEFERRABLE - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: messages - bracketed: - start_bracket: ( - column_definition: naked_identifier: msg_id data_type: data_type_identifier: TEXT - comma: ',' - column_definition: naked_identifier: parent_id data_type: data_type_identifier: TEXT - comma: ',' - table_constraint: - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: parent_id end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: users - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - keyword: DEFERRABLE - keyword: INITIALLY - keyword: DEFERRED - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: messages - bracketed: - start_bracket: ( - column_definition: naked_identifier: msg_id data_type: data_type_identifier: TEXT - comma: ',' - column_definition: naked_identifier: parent_id data_type: data_type_identifier: TEXT - comma: ',' - table_constraint: - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: parent_id end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: users - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - keyword: DEFERRABLE - keyword: INITIALLY - keyword: IMMEDIATE - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: messages - bracketed: - start_bracket: ( - column_definition: naked_identifier: msg_id data_type: data_type_identifier: TEXT - comma: ',' - column_definition: naked_identifier: parent_id data_type: data_type_identifier: TEXT - comma: ',' - table_constraint: - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: parent_id end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: users - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - keyword: NOT - keyword: DEFERRABLE - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: messages - bracketed: - start_bracket: ( - column_definition: naked_identifier: msg_id data_type: data_type_identifier: TEXT - comma: ',' - column_definition: naked_identifier: parent_id data_type: data_type_identifier: TEXT - comma: ',' - table_constraint: - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: parent_id end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: users - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - keyword: NOT - keyword: DEFERRABLE - keyword: INITIALLY - keyword: DEFERRED - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: messages - bracketed: - start_bracket: ( - column_definition: naked_identifier: msg_id data_type: data_type_identifier: TEXT - comma: ',' - column_definition: naked_identifier: parent_id data_type: data_type_identifier: TEXT - comma: ',' - table_constraint: - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: parent_id end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: users - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - keyword: NOT - keyword: DEFERRABLE - keyword: INITIALLY - keyword: IMMEDIATE - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: track - bracketed: - start_bracket: ( - column_definition: naked_identifier: trackid data_type: data_type_identifier: INTEGER - comma: ',' - column_definition: naked_identifier: trackname data_type: data_type_identifier: TEXT - comma: ',' - column_definition: naked_identifier: trackartist data_type: data_type_identifier: INTEGER column_constraint_segment: - keyword: REFERENCES - table_reference: naked_identifier: artist - bracketed: start_bracket: ( column_reference: naked_identifier: artistid end_bracket: ) - keyword: DEFERRABLE - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: track - bracketed: - start_bracket: ( - column_definition: naked_identifier: trackid data_type: data_type_identifier: INTEGER - comma: ',' - column_definition: naked_identifier: trackname data_type: data_type_identifier: TEXT - comma: ',' - column_definition: naked_identifier: trackartist data_type: data_type_identifier: INTEGER column_constraint_segment: - keyword: REFERENCES - table_reference: naked_identifier: artist - bracketed: start_bracket: ( column_reference: naked_identifier: artistid end_bracket: ) - keyword: DEFERRABLE - keyword: INITIALLY - keyword: DEFERRED - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: track - bracketed: - start_bracket: ( - column_definition: naked_identifier: trackid data_type: data_type_identifier: INTEGER - comma: ',' - column_definition: naked_identifier: trackname data_type: data_type_identifier: TEXT - comma: ',' - column_definition: naked_identifier: trackartist data_type: data_type_identifier: INTEGER column_constraint_segment: - keyword: REFERENCES - table_reference: naked_identifier: artist - bracketed: start_bracket: ( column_reference: naked_identifier: artistid end_bracket: ) - keyword: DEFERRABLE - keyword: INITIALLY - keyword: IMMEDIATE - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: track - bracketed: - start_bracket: ( - column_definition: naked_identifier: trackid data_type: data_type_identifier: INTEGER - comma: ',' - column_definition: naked_identifier: trackname data_type: data_type_identifier: TEXT - comma: ',' - column_definition: naked_identifier: trackartist data_type: data_type_identifier: INTEGER column_constraint_segment: - keyword: REFERENCES - table_reference: naked_identifier: artist - bracketed: start_bracket: ( column_reference: naked_identifier: artistid end_bracket: ) - keyword: NOT - keyword: DEFERRABLE - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: track - bracketed: - start_bracket: ( - column_definition: naked_identifier: trackid data_type: data_type_identifier: INTEGER - comma: ',' - column_definition: naked_identifier: trackname data_type: data_type_identifier: TEXT - comma: ',' - column_definition: naked_identifier: trackartist data_type: data_type_identifier: INTEGER column_constraint_segment: - keyword: REFERENCES - table_reference: naked_identifier: artist - bracketed: start_bracket: ( column_reference: naked_identifier: artistid end_bracket: ) - keyword: NOT - keyword: DEFERRABLE - keyword: INITIALLY - keyword: DEFERRED - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: track - bracketed: - start_bracket: ( - column_definition: naked_identifier: trackid data_type: data_type_identifier: INTEGER - comma: ',' - column_definition: naked_identifier: trackname data_type: data_type_identifier: TEXT - comma: ',' - column_definition: naked_identifier: trackartist data_type: data_type_identifier: INTEGER column_constraint_segment: - keyword: REFERENCES - table_reference: naked_identifier: artist - bracketed: start_bracket: ( column_reference: naked_identifier: artistid end_bracket: ) - keyword: NOT - keyword: DEFERRABLE - keyword: INITIALLY - keyword: IMMEDIATE - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/create_table_table_end.sql000066400000000000000000000005461503426445100264240ustar00rootroot00000000000000CREATE TABLE foo ( id INTEGER NOT NULL PRIMARY KEY ) WITHOUT ROWID; CREATE TABLE IF NOT EXISTS wordcount( word TEXT PRIMARY KEY, cnt INTEGER ) WITHOUT ROWID; CREATE TABLE IF NOT EXISTS wordcount( word TEXT PRIMARY KEY, cnt INTEGER ) STRICT; CREATE TABLE IF NOT EXISTS wordcount( word TEXT PRIMARY KEY, cnt INTEGER ) WITHOUT ROWID, STRICT; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/create_table_table_end.yml000066400000000000000000000061761503426445100264330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7a2b0510ff0246a3556ac6091c65fd281aa8097764e142abf459b299530d3456 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: foo - bracketed: start_bracket: ( column_definition: - naked_identifier: id - data_type: data_type_identifier: INTEGER - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: - keyword: PRIMARY - keyword: KEY end_bracket: ) - table_end_clause_segment: - keyword: WITHOUT - keyword: ROWID - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: wordcount - bracketed: - start_bracket: ( - column_definition: naked_identifier: word data_type: data_type_identifier: TEXT column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_definition: naked_identifier: cnt data_type: data_type_identifier: INTEGER - end_bracket: ) - table_end_clause_segment: - keyword: WITHOUT - keyword: ROWID - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: wordcount - bracketed: - start_bracket: ( - column_definition: naked_identifier: word data_type: data_type_identifier: TEXT column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_definition: naked_identifier: cnt data_type: data_type_identifier: INTEGER - end_bracket: ) - table_end_clause_segment: keyword: STRICT - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: wordcount - bracketed: - start_bracket: ( - column_definition: naked_identifier: word data_type: data_type_identifier: TEXT column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_definition: naked_identifier: cnt data_type: data_type_identifier: INTEGER - end_bracket: ) - table_end_clause_segment: - keyword: WITHOUT - keyword: ROWID - comma: ',' - keyword: STRICT - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/create_table_unsigned.sql000066400000000000000000000003131503426445100263130ustar00rootroot00000000000000CREATE TABLE "wellplated_format" ( "id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "bottom_row" varchar(1) NOT NULL, "right_column" smallint unsigned NOT NULL CHECK ("right_column" >= 0) ); sqlfluff-3.4.2/test/fixtures/dialects/sqlite/create_table_unsigned.yml000066400000000000000000000040521503426445100263210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6072c4292a682a04c8bf5df58bfd0ede2d9faeb294c2e5f5eb8c877659c6c158 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: quoted_identifier: '"wellplated_format"' - bracketed: - start_bracket: ( - column_definition: - quoted_identifier: '"id"' - data_type: data_type_identifier: integer - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - keyword: AUTOINCREMENT - comma: ',' - column_definition: quoted_identifier: '"bottom_row"' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: - quoted_identifier: '"right_column"' - data_type: data_type_identifier: smallint keyword: unsigned - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: CHECK bracketed: start_bracket: ( expression: column_reference: quoted_identifier: '"right_column"' comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' numeric_literal: '0' end_bracket: ) - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/create_trigger.sql000066400000000000000000000022571503426445100250040ustar00rootroot00000000000000CREATE TRIGGER update_customer_address UPDATE OF address ON customers BEGIN UPDATE orders SET address = new.address WHERE customer_name = old.name; END; CREATE TRIGGER cust_addr_chng INSTEAD OF UPDATE OF cust_addr ON customer_address BEGIN UPDATE customer SET cust_addr=NEW.cust_addr WHERE cust_id=NEW.cust_id; END; CREATE TRIGGER validate_email_before_insert_leads BEFORE INSERT ON leads BEGIN SELECT 1; END; CREATE TRIGGER log_contact_after_update AFTER UPDATE ON leads BEGIN INSERT INTO lead_logs ( old_id, new_id, old_phone, new_phone, old_email, new_email, user_action, created_at ) VALUES ( old.id, new.id, old.phone, new.phone, old.email, new.email, 'UPDATE' ) ; END; CREATE TRIGGER aft_insert AFTER INSERT ON emp_details BEGIN INSERT INTO emp_log(emp_id,salary,edittime) VALUES(NEW.employee_id,NEW.salary,current_date); END; CREATE TRIGGER x AFTER INSERT ON y WHEN new.z IS NULL -- putting this expression in parens allows parsing BEGIN UPDATE y SET z = TRUE WHERE rowid = new.rowid; END; CREATE TRIGGER trigger_name AFTER UPDATE ON table_name BEGIN INSERT INTO table_name_history (action) VALUES ('UPDATE'); END; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/create_trigger.yml000066400000000000000000000221611503426445100250020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 13aaf97dae29df6299a0e6162bba68dda476463abf453b36aedc777abb092c5d file: - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: update_customer_address - keyword: UPDATE - keyword: OF - column_reference: naked_identifier: address - keyword: 'ON' - table_reference: naked_identifier: customers - keyword: BEGIN - update_statement: keyword: UPDATE table_reference: naked_identifier: orders set_clause_list: keyword: SET set_clause: naked_identifier: address comparison_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: new - dot: . - naked_identifier: address where_clause: keyword: WHERE expression: - column_reference: naked_identifier: customer_name - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: old - dot: . - naked_identifier: name - statement_terminator: ; - keyword: END - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: cust_addr_chng - keyword: INSTEAD - keyword: OF - keyword: UPDATE - keyword: OF - column_reference: naked_identifier: cust_addr - keyword: 'ON' - table_reference: naked_identifier: customer_address - keyword: BEGIN - update_statement: keyword: UPDATE table_reference: naked_identifier: customer set_clause_list: keyword: SET set_clause: naked_identifier: cust_addr comparison_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: NEW - dot: . - naked_identifier: cust_addr where_clause: keyword: WHERE expression: - column_reference: naked_identifier: cust_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: NEW - dot: . - naked_identifier: cust_id - statement_terminator: ; - keyword: END - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: validate_email_before_insert_leads - keyword: BEFORE - keyword: INSERT - keyword: 'ON' - table_reference: naked_identifier: leads - keyword: BEGIN - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - statement_terminator: ; - keyword: END - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: log_contact_after_update - keyword: AFTER - keyword: UPDATE - keyword: 'ON' - table_reference: naked_identifier: leads - keyword: BEGIN - insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: lead_logs - bracketed: - start_bracket: ( - column_reference: naked_identifier: old_id - comma: ',' - column_reference: naked_identifier: new_id - comma: ',' - column_reference: naked_identifier: old_phone - comma: ',' - column_reference: naked_identifier: new_phone - comma: ',' - column_reference: naked_identifier: old_email - comma: ',' - column_reference: naked_identifier: new_email - comma: ',' - column_reference: naked_identifier: user_action - comma: ',' - column_reference: naked_identifier: created_at - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: old - dot: . - naked_identifier: id - comma: ',' - expression: column_reference: - naked_identifier: new - dot: . - naked_identifier: id - comma: ',' - expression: column_reference: - naked_identifier: old - dot: . - naked_identifier: phone - comma: ',' - expression: column_reference: - naked_identifier: new - dot: . - naked_identifier: phone - comma: ',' - expression: column_reference: - naked_identifier: old - dot: . - naked_identifier: email - comma: ',' - expression: column_reference: - naked_identifier: new - dot: . - naked_identifier: email - comma: ',' - expression: quoted_literal: "'UPDATE'" - end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: aft_insert - keyword: AFTER - keyword: INSERT - keyword: 'ON' - table_reference: naked_identifier: emp_details - keyword: BEGIN - insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: emp_log - bracketed: - start_bracket: ( - column_reference: naked_identifier: emp_id - comma: ',' - column_reference: naked_identifier: salary - comma: ',' - column_reference: naked_identifier: edittime - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: NEW - dot: . - naked_identifier: employee_id - comma: ',' - expression: column_reference: - naked_identifier: NEW - dot: . - naked_identifier: salary - comma: ',' - expression: bare_function: current_date - end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: x - keyword: AFTER - keyword: INSERT - keyword: 'ON' - table_reference: naked_identifier: y - keyword: WHEN - expression: column_reference: - naked_identifier: new - dot: . - naked_identifier: z keyword: IS null_literal: 'NULL' - keyword: BEGIN - update_statement: keyword: UPDATE table_reference: naked_identifier: y set_clause_list: keyword: SET set_clause: naked_identifier: z comparison_operator: raw_comparison_operator: '=' expression: boolean_literal: 'TRUE' where_clause: keyword: WHERE expression: - column_reference: naked_identifier: rowid - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: new - dot: . - naked_identifier: rowid - statement_terminator: ; - keyword: END - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: trigger_name - keyword: AFTER - keyword: UPDATE - keyword: 'ON' - table_reference: naked_identifier: table_name - keyword: BEGIN - insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: table_name_history - bracketed: start_bracket: ( column_reference: naked_identifier: action end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: quoted_literal: "'UPDATE'" end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/create_view.sql000066400000000000000000000003131503426445100243020ustar00rootroot00000000000000CREATE TEMPORARY VIEW IF NOT EXISTS temp_table AS SELECT * FROM tab WHERE col = 'value'; CREATE VIEW Test.Data (id, name, age) AS SELECT id, name, age FROM temp_table WHERE age > 18 AND name = 'John'; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/create_view.yml000066400000000000000000000055511503426445100243150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ae65d21936bdbcbd6b3967a113fa71fe4ce6dffff3fbc5f55be69c82d61d0d2f file: - statement: create_view_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: temp_table - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tab where_clause: keyword: WHERE expression: column_reference: naked_identifier: col comparison_operator: raw_comparison_operator: '=' quoted_literal: "'value'" - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: - naked_identifier: Test - dot: . - naked_identifier: Data - bracketed: - start_bracket: ( - column_reference: naked_identifier: id - comma: ',' - column_reference: naked_identifier: name - comma: ',' - column_reference: naked_identifier: age - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: temp_table where_clause: keyword: WHERE expression: - column_reference: naked_identifier: age - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '18' - binary_operator: AND - column_reference: naked_identifier: name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'John'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/create_virtual_table_check.sql000066400000000000000000000007411503426445100273270ustar00rootroot00000000000000CREATE VIRTUAL TABLE email USING fts5; CREATE VIRTUAL TABLE email USING fts5(sender, title, body); CREATE VIRTUAL TABLE IF NOT EXISTS email USING fts5(name, phone, email); CREATE VIRTUAL TABLE sample_schema.email USING fts3(content, date); CREATE VIRTUAL TABLE email USING fts5( 'email text', user_id, 100, "complex-field@!#" ); CREATE VIRTUAL TABLE IF NOT EXISTS sample_schema.email USING fts5( 'email text', user_id, 0, "complex-field@!#" ); sqlfluff-3.4.2/test/fixtures/dialects/sqlite/create_virtual_table_check.yml000066400000000000000000000061661503426445100273400ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a5d5c7202136c306e6a940f7bc582106d93cd8b02bac94fcb888c5efbcbf7d32 file: - statement: create_virtual_table_statement: - keyword: CREATE - keyword: VIRTUAL - keyword: TABLE - table_reference: naked_identifier: email - keyword: USING - naked_identifier: fts5 - statement_terminator: ; - statement: create_virtual_table_statement: - keyword: CREATE - keyword: VIRTUAL - keyword: TABLE - table_reference: naked_identifier: email - keyword: USING - naked_identifier: fts5 - bracketed: - start_bracket: ( - naked_identifier: sender - comma: ',' - naked_identifier: title - comma: ',' - naked_identifier: body - end_bracket: ) - statement_terminator: ; - statement: create_virtual_table_statement: - keyword: CREATE - keyword: VIRTUAL - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: email - keyword: USING - naked_identifier: fts5 - bracketed: - start_bracket: ( - naked_identifier: name - comma: ',' - naked_identifier: phone - comma: ',' - naked_identifier: email - end_bracket: ) - statement_terminator: ; - statement: create_virtual_table_statement: - keyword: CREATE - keyword: VIRTUAL - keyword: TABLE - table_reference: - naked_identifier: sample_schema - dot: . - naked_identifier: email - keyword: USING - naked_identifier: fts3 - bracketed: - start_bracket: ( - naked_identifier: content - comma: ',' - naked_identifier: date - end_bracket: ) - statement_terminator: ; - statement: create_virtual_table_statement: - keyword: CREATE - keyword: VIRTUAL - keyword: TABLE - table_reference: naked_identifier: email - keyword: USING - naked_identifier: fts5 - bracketed: - start_bracket: ( - quoted_literal: "'email text'" - comma: ',' - naked_identifier: user_id - comma: ',' - numeric_literal: '100' - comma: ',' - quoted_identifier: '"complex-field@!#"' - end_bracket: ) - statement_terminator: ; - statement: create_virtual_table_statement: - keyword: CREATE - keyword: VIRTUAL - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: - naked_identifier: sample_schema - dot: . - naked_identifier: email - keyword: USING - naked_identifier: fts5 - bracketed: - start_bracket: ( - quoted_literal: "'email text'" - comma: ',' - naked_identifier: user_id - comma: ',' - numeric_literal: '0' - comma: ',' - quoted_identifier: '"complex-field@!#"' - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/delete.sql000066400000000000000000000003201503426445100232450ustar00rootroot00000000000000DELETE FROM table_name WHERE a > 0; DELETE FROM table_name WHERE a > 0 RETURNING * ; DELETE FROM table_name WHERE a > 0 RETURNING *, id ; DELETE FROM table_name WHERE a > 0 RETURNING id foo, id_2 AS bar ; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/delete.yml000066400000000000000000000062301503426445100232550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1ec3adc2a95b48e346be58f900f20168961c8697cdad267f56b70178e086815b file: - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name where_clause: keyword: WHERE expression: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: '>' numeric_literal: '0' - statement_terminator: ; - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name where_clause: keyword: WHERE expression: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: '>' numeric_literal: '0' returning_clause: keyword: RETURNING wildcard_expression: wildcard_identifier: star: '*' - statement_terminator: ; - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name where_clause: keyword: WHERE expression: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: '>' numeric_literal: '0' returning_clause: keyword: RETURNING wildcard_expression: wildcard_identifier: star: '*' comma: ',' expression: column_reference: naked_identifier: id - statement_terminator: ; - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name where_clause: keyword: WHERE expression: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: '>' numeric_literal: '0' returning_clause: - keyword: RETURNING - expression: column_reference: naked_identifier: id - alias_expression: naked_identifier: foo - comma: ',' - expression: column_reference: naked_identifier: id_2 - alias_expression: alias_operator: keyword: AS naked_identifier: bar - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/drop_trigger.sql000066400000000000000000000000461503426445100244770ustar00rootroot00000000000000DROP TRIGGER IF EXISTS MyTestTrigger; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/drop_trigger.yml000066400000000000000000000011271503426445100245020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 56de27b71563494c78c91fd576e749086a7271b6cefb3cb331727e560eaf8b23 file: statement: drop_trigger: - keyword: DROP - keyword: TRIGGER - keyword: IF - keyword: EXISTS - trigger_reference: naked_identifier: MyTestTrigger statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/insert.sql000066400000000000000000000016431503426445100233200ustar00rootroot00000000000000INSERT INTO t1 VALUES (1, 2, 3), (4, 5, 6); INSERT INTO t1 (a, b, c) VALUES (1, 2, 3), (4, 5, 6); INSERT OR ABORT INTO t1 (a, b, c) VALUES (1, 2, 3), (4, 5, 6); INSERT OR FAIL INTO t1 (a, b, c) VALUES (1, 2, 3), (4, 5, 6); INSERT OR IGNORE INTO t1 (a, b, c) VALUES (1, 2, 3), (4, 5, 6); INSERT OR REPLACE INTO t1 (a, b, c) VALUES (1, 2, 3), (4, 5, 6); REPLACE INTO t1 (a, b, c) VALUES (1, 2, 3), (4, 5, 6); INSERT OR ROLLBACK INTO t1 (a, b, c) VALUES (1, 2, 3), (4, 5, 6); INSERT INTO t1 SELECT * FROM (SELECT c, c + d AS e FROM t2) AS dt; INSERT INTO t1 DEFAULT VALUES; INSERT INTO t1 (a, b, c) DEFAULT VALUES; INSERT INTO t1 (a, b, c) DEFAULT VALUES RETURNING *; INSERT INTO t1 (a, b, c) DEFAULT VALUES RETURNING a foo; INSERT INTO t1 (a, b, c) DEFAULT VALUES RETURNING a AS foo, *; INSERT INTO t1 (a, b, c) DEFAULT VALUES RETURNING a AS foo; INSERT INTO users (name, email) VALUES (1, 2) RETURNING *; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/insert.yml000066400000000000000000000345071503426445100233270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6f8514936a7469d9632fa8a551be77b6a186f6870a9b6e97abc85ffbd0565c40 file: - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '4' - comma: ',' - expression: numeric_literal: '5' - comma: ',' - expression: numeric_literal: '6' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '4' - comma: ',' - expression: numeric_literal: '5' - comma: ',' - expression: numeric_literal: '6' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: OR - keyword: ABORT - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '4' - comma: ',' - expression: numeric_literal: '5' - comma: ',' - expression: numeric_literal: '6' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: OR - keyword: FAIL - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '4' - comma: ',' - expression: numeric_literal: '5' - comma: ',' - expression: numeric_literal: '6' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: OR - keyword: IGNORE - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '4' - comma: ',' - expression: numeric_literal: '5' - comma: ',' - expression: numeric_literal: '6' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: OR - keyword: REPLACE - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '4' - comma: ',' - expression: numeric_literal: '5' - comma: ',' - expression: numeric_literal: '6' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: REPLACE - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '4' - comma: ',' - expression: numeric_literal: '5' - comma: ',' - expression: numeric_literal: '6' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: OR - keyword: ROLLBACK - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '4' - comma: ',' - expression: numeric_literal: '5' - comma: ',' - expression: numeric_literal: '6' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: c - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: c - binary_operator: + - column_reference: naked_identifier: d alias_expression: alias_operator: keyword: AS naked_identifier: e from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: dt - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - keyword: DEFAULT - keyword: VALUES - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) - keyword: DEFAULT - keyword: VALUES - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) - keyword: DEFAULT - keyword: VALUES - returning_clause: keyword: RETURNING wildcard_expression: wildcard_identifier: star: '*' - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) - keyword: DEFAULT - keyword: VALUES - returning_clause: keyword: RETURNING expression: column_reference: naked_identifier: a alias_expression: naked_identifier: foo - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) - keyword: DEFAULT - keyword: VALUES - returning_clause: keyword: RETURNING expression: column_reference: naked_identifier: a alias_expression: alias_operator: keyword: AS naked_identifier: foo comma: ',' wildcard_expression: wildcard_identifier: star: '*' - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) - keyword: DEFAULT - keyword: VALUES - returning_clause: keyword: RETURNING expression: column_reference: naked_identifier: a alias_expression: alias_operator: keyword: AS naked_identifier: foo - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: users - bracketed: - start_bracket: ( - column_reference: naked_identifier: name - comma: ',' - column_reference: naked_identifier: email - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - returning_clause: keyword: RETURNING wildcard_expression: wildcard_identifier: star: '*' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/is_clause.sql000066400000000000000000000004261503426445100237610ustar00rootroot00000000000000CREATE TABLE Repro ( col TEXT NOT NULL CHECK (col IS DATE(col)) ); CREATE TABLE Repro ( col TEXT NOT NULL CHECK (col IS NOT DATE(col)) ); SELECT * FROM Tab WHERE col1 IS NOT DATE(col2); SELECT * FROM Tab WHERE col1 IS col2; SELECT * FROM Tab WHERE col1 IS NOT col2; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/is_clause.yml000066400000000000000000000113211503426445100237570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ed91a7180d847c1a84f4496b29a7baed65db225254a9228e27a9fc9f69f78773 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: Repro - bracketed: start_bracket: ( column_definition: - naked_identifier: col - data_type: data_type_identifier: TEXT - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: CHECK bracketed: start_bracket: ( expression: column_reference: naked_identifier: col keyword: IS function: function_name: function_name_identifier: DATE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: col end_bracket: ) end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: Repro - bracketed: start_bracket: ( column_definition: - naked_identifier: col - data_type: data_type_identifier: TEXT - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: CHECK bracketed: start_bracket: ( expression: - column_reference: naked_identifier: col - keyword: IS - keyword: NOT - function: function_name: function_name_identifier: DATE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: col end_bracket: ) end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Tab where_clause: keyword: WHERE expression: - column_reference: naked_identifier: col1 - keyword: IS - keyword: NOT - function: function_name: function_name_identifier: DATE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: col2 end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Tab where_clause: keyword: WHERE expression: - column_reference: naked_identifier: col1 - keyword: IS - column_reference: naked_identifier: col2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Tab where_clause: keyword: WHERE expression: - column_reference: naked_identifier: col1 - keyword: IS - keyword: NOT - column_reference: naked_identifier: col2 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/json_keys.sql000066400000000000000000000001441503426445100240130ustar00rootroot00000000000000SELECT j.key, j.value, j.type, j.atom, j.id, j.parent, j.fullkey, j.path FROM json_tree('{}') AS j; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/json_keys.yml000066400000000000000000000045031503426445100240200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7722a31bfc9f742c5b8b69b5a9367643a55c50ed3f859466be81fd9d8408536c file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: j - dot: . - naked_identifier: key - comma: ',' - select_clause_element: column_reference: - naked_identifier: j - dot: . - naked_identifier: value - comma: ',' - select_clause_element: column_reference: - naked_identifier: j - dot: . - naked_identifier: type - comma: ',' - select_clause_element: column_reference: - naked_identifier: j - dot: . - naked_identifier: atom - comma: ',' - select_clause_element: column_reference: - naked_identifier: j - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: j - dot: . - naked_identifier: parent - comma: ',' - select_clause_element: column_reference: - naked_identifier: j - dot: . - naked_identifier: fullkey - comma: ',' - select_clause_element: column_reference: - naked_identifier: j - dot: . - naked_identifier: path from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: json_tree function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'{}'" end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: j statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/json_operators.sql000066400000000000000000000014741503426445100250650ustar00rootroot00000000000000SELECT value FROM '[11,22,33,44]' -> 3 WHERE '{"x": "y"}' ->> '$.x' = 'y'; SELECT value FROM '{"a":2,"c":[4,5,{"f":7}]}' -> 'c' WHERE Upper('{"x": "y"}') ->> '$.x' = 'y'; SELECT '{"a":2,"c":[4,5,{"f":7}]}' -> '$'; SELECT '{"a":2,"c":[4,5,{"f":7}]}' -> "$"; SELECT '{"a":2,"c":[4,5,{"f":7}]}' -> '$.c'; SELECT '{"a":2,"c":[4,5,{"f":7}]}' -> 'c'; SELECT '{"a":2,"c":[4,5,{"f":7}]}' -> '$.c[2]'; SELECT '{"a":2,"c":[4,5,{"f":7}]}' -> '$.c[2].f'; SELECT '{"a":2,"c":[4,5,{"f":7}]}' ->> '$.c[2].f'; SELECT '{"a":2,"c":[4,5,{"f":7}]}' -> 'c' -> 2 ->> 'f'; SELECT '{"a":2,"c":[4,5],"f":7}' -> '$.c[#-1]'; SELECT '{"a":2,"c":[4,5,{"f":7}]}' -> '$.x'; SELECT '[11,22,33,44]' -> 3; SELECT '[11,22,33,44]' ->> 3; SELECT '{"a":"xyz"}' -> '$.a'; SELECT '{"a":"xyz"}' ->> '$.a'; SELECT '{"a":null}' -> '$.a'; SELECT '{"a":null}' ->> '$.a'; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/json_operators.yml000066400000000000000000000155631503426445100250730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fcf38b03dffc0c41aa7d7b82388214b89bbf69add1a96fbc22f6381275abf211 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: value from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: "'[11,22,33,44]'" column_path_operator: -> numeric_literal: '3' where_clause: keyword: WHERE expression: - quoted_literal: "'{\"x\": \"y\"}'" - column_path_operator: ->> - quoted_literal: "'$.x'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'y'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: value from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: "'{\"a\":2,\"c\":[4,5,{\"f\":7}]}'" column_path_operator: -> quoted_literal: "'c'" where_clause: keyword: WHERE expression: - function: function_name: function_name_identifier: Upper function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'{\"x\": \"y\"}'" end_bracket: ) - column_path_operator: ->> - quoted_literal: "'$.x'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'y'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'{\"a\":2,\"c\":[4,5,{\"f\":7}]}'" - column_path_operator: -> - quoted_literal: "'$'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: quoted_literal: "'{\"a\":2,\"c\":[4,5,{\"f\":7}]}'" column_path_operator: -> column_reference: quoted_identifier: '"$"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'{\"a\":2,\"c\":[4,5,{\"f\":7}]}'" - column_path_operator: -> - quoted_literal: "'$.c'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'{\"a\":2,\"c\":[4,5,{\"f\":7}]}'" - column_path_operator: -> - quoted_literal: "'c'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'{\"a\":2,\"c\":[4,5,{\"f\":7}]}'" - column_path_operator: -> - quoted_literal: "'$.c[2]'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'{\"a\":2,\"c\":[4,5,{\"f\":7}]}'" - column_path_operator: -> - quoted_literal: "'$.c[2].f'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'{\"a\":2,\"c\":[4,5,{\"f\":7}]}'" - column_path_operator: ->> - quoted_literal: "'$.c[2].f'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'{\"a\":2,\"c\":[4,5,{\"f\":7}]}'" - column_path_operator: -> - quoted_literal: "'c'" - column_path_operator: -> - numeric_literal: '2' - column_path_operator: ->> - quoted_literal: "'f'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'{\"a\":2,\"c\":[4,5],\"f\":7}'" - column_path_operator: -> - quoted_literal: "'$.c[#-1]'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'{\"a\":2,\"c\":[4,5,{\"f\":7}]}'" - column_path_operator: -> - quoted_literal: "'$.x'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: quoted_literal: "'[11,22,33,44]'" column_path_operator: -> numeric_literal: '3' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: quoted_literal: "'[11,22,33,44]'" column_path_operator: ->> numeric_literal: '3' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'{\"a\":\"xyz\"}'" - column_path_operator: -> - quoted_literal: "'$.a'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'{\"a\":\"xyz\"}'" - column_path_operator: ->> - quoted_literal: "'$.a'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'{\"a\":null}'" - column_path_operator: -> - quoted_literal: "'$.a'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'{\"a\":null}'" - column_path_operator: ->> - quoted_literal: "'$.a'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/named_parameters.sql000066400000000000000000000004471503426445100253240ustar00rootroot00000000000000SELECT @variable FROM table1 WHERE @variable = 1; SELECT ?2 FROM table1 WHERE ?2 = 1; SELECT :variable FROM table1 WHERE :variable = 1; SELECT $variable FROM table1 WHERE $variable = 1; SELECT @variable FROM table1 GROUP BY @variable HAVING $variable = 1; SELECT ? from table1 where ? = 1; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/named_parameters.yml000066400000000000000000000105671503426445100253320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a4bcc7f0cb8c4e6282de1740a92de5e949266d50bf715196fb19b38a0c82a8ee file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: parameterized_expression: at_sign_literal: '@variable' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 where_clause: keyword: WHERE expression: parameterized_expression: at_sign_literal: '@variable' comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: parameterized_expression: question_literal: ?2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 where_clause: keyword: WHERE expression: parameterized_expression: question_literal: ?2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: parameterized_expression: colon_literal: :variable from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 where_clause: keyword: WHERE expression: parameterized_expression: colon_literal: :variable comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: parameterized_expression: dollar_literal: $variable from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 where_clause: keyword: WHERE expression: parameterized_expression: dollar_literal: $variable comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: parameterized_expression: at_sign_literal: '@variable' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 groupby_clause: - keyword: GROUP - keyword: BY - column_reference: parameterized_expression: at_sign_literal: '@variable' having_clause: keyword: HAVING expression: parameterized_expression: dollar_literal: $variable comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: parameterized_expression: question_mark: '?' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 where_clause: keyword: where expression: parameterized_expression: question_mark: '?' comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/over_clause.sql000066400000000000000000000046371503426445100243310ustar00rootroot00000000000000SELECT x, y, row_number() OVER (ORDER BY y) AS row_number FROM t0 ORDER BY x; SELECT a, b, group_concat(b, '.') OVER ( ORDER BY a ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING ) AS group_concat FROM t1; SELECT c, a, b, group_concat(b, '.') OVER ( PARTITION BY c ORDER BY a RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING ) AS group_concat FROM t1 ORDER BY c, a; SELECT c, a, b, group_concat(b, '.') OVER ( PARTITION BY c ORDER BY a RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING ) AS group_concat FROM t1 ORDER BY a; SELECT a, b, c, group_concat(b, '.') OVER (ORDER BY c) AS group_concat FROM t1 ORDER BY a; SELECT c, a, b, group_concat(b, '.') OVER ( ORDER BY c, a ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING ) AS group_concat FROM t1 ORDER BY c, a; SELECT c, a, b, group_concat(b, '.') OVER ( ORDER BY c GROUPS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW EXCLUDE NO OTHERS ) AS no_others, group_concat(b, '.') OVER ( ORDER BY c GROUPS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW EXCLUDE CURRENT ROW ) AS current_row, group_concat(b, '.') OVER ( ORDER BY c GROUPS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW EXCLUDE GROUP ) AS grp, group_concat(b, '.') OVER ( ORDER BY c GROUPS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW EXCLUDE TIES ) AS ties FROM t1 ORDER BY c, a; SELECT c, a, b, group_concat(b, '.') FILTER (WHERE c != 'two') OVER ( ORDER BY a ) AS group_concat FROM t1 ORDER BY a; SELECT a AS a, row_number() OVER win AS row_number, rank() OVER win AS rank, dense_rank() OVER win AS dense_rank, percent_rank() OVER win AS percent_rank, cume_dist() OVER win AS cume_dist FROM t2 WINDOW win AS (ORDER BY a); SELECT a AS a, b AS b, ntile(2) OVER win AS ntile_2, ntile(4) OVER win AS ntile_4 FROM t2 WINDOW win AS (ORDER BY a); SELECT b AS b, lead(b, 2, 'n/a') OVER win AS lead, lag(b) OVER win AS lag, first_value(b) OVER win AS first_value, last_value(b) OVER win AS last_value, nth_value(b, 3) OVER win AS nth_value_3 FROM t1 WINDOW win AS (ORDER BY b ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW); SELECT group_concat(b, '.') OVER ( win ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW ) FROM t1 WINDOW win AS (PARTITION BY a ORDER BY c); sqlfluff-3.4.2/test/fixtures/dialects/sqlite/over_clause.yml000066400000000000000000000761771503426445100243430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9fb7d741a9cf2b232a9c501c05f182158f546944b70e2336564982211dffcc61 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: x - comma: ',' - select_clause_element: column_reference: naked_identifier: y - comma: ',' - select_clause_element: function: function_name: function_name_identifier: row_number function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: y end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: row_number from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t0 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: x - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: function: function_name: function_name_identifier: group_concat function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: b - comma: ',' - expression: quoted_literal: "'.'" - end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: a frame_clause: - keyword: ROWS - keyword: BETWEEN - expression: numeric_literal: '1' - keyword: PRECEDING - keyword: AND - expression: numeric_literal: '1' - keyword: FOLLOWING end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: group_concat from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: c - comma: ',' - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: function: function_name: function_name_identifier: group_concat function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: b - comma: ',' - expression: quoted_literal: "'.'" - end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: c orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: a frame_clause: - keyword: RANGE - keyword: BETWEEN - keyword: CURRENT - keyword: ROW - keyword: AND - keyword: UNBOUNDED - keyword: FOLLOWING end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: group_concat from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: c - comma: ',' - column_reference: naked_identifier: a - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: c - comma: ',' - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: function: function_name: function_name_identifier: group_concat function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: b - comma: ',' - expression: quoted_literal: "'.'" - end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: c orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: a frame_clause: - keyword: RANGE - keyword: BETWEEN - keyword: CURRENT - keyword: ROW - keyword: AND - keyword: UNBOUNDED - keyword: FOLLOWING end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: group_concat from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: a - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c - comma: ',' - select_clause_element: function: function_name: function_name_identifier: group_concat function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: b - comma: ',' - expression: quoted_literal: "'.'" - end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: c end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: group_concat from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: a - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: c - comma: ',' - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: function: function_name: function_name_identifier: group_concat function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: b - comma: ',' - expression: quoted_literal: "'.'" - end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: c - comma: ',' - column_reference: naked_identifier: a frame_clause: - keyword: ROWS - keyword: BETWEEN - keyword: CURRENT - keyword: ROW - keyword: AND - keyword: UNBOUNDED - keyword: FOLLOWING end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: group_concat from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: c - comma: ',' - column_reference: naked_identifier: a - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: c - comma: ',' - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: function: function_name: function_name_identifier: group_concat function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: b - comma: ',' - expression: quoted_literal: "'.'" - end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: c frame_clause: - keyword: GROUPS - keyword: BETWEEN - keyword: UNBOUNDED - keyword: PRECEDING - keyword: AND - keyword: CURRENT - keyword: ROW - keyword: EXCLUDE - keyword: 'NO' - keyword: OTHERS end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: no_others - comma: ',' - select_clause_element: function: function_name: function_name_identifier: group_concat function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: b - comma: ',' - expression: quoted_literal: "'.'" - end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: c frame_clause: - keyword: GROUPS - keyword: BETWEEN - keyword: UNBOUNDED - keyword: PRECEDING - keyword: AND - keyword: CURRENT - keyword: ROW - keyword: EXCLUDE - keyword: CURRENT - keyword: ROW end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: current_row - comma: ',' - select_clause_element: function: function_name: function_name_identifier: group_concat function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: b - comma: ',' - expression: quoted_literal: "'.'" - end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: c frame_clause: - keyword: GROUPS - keyword: BETWEEN - keyword: UNBOUNDED - keyword: PRECEDING - keyword: AND - keyword: CURRENT - keyword: ROW - keyword: EXCLUDE - keyword: GROUP end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: grp - comma: ',' - select_clause_element: function: function_name: function_name_identifier: group_concat function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: b - comma: ',' - expression: quoted_literal: "'.'" - end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: c frame_clause: - keyword: GROUPS - keyword: BETWEEN - keyword: UNBOUNDED - keyword: PRECEDING - keyword: AND - keyword: CURRENT - keyword: ROW - keyword: EXCLUDE - keyword: TIES end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: ties from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: c - comma: ',' - column_reference: naked_identifier: a - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: c - comma: ',' - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: function: function_name: function_name_identifier: group_concat function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: b - comma: ',' - expression: quoted_literal: "'.'" - end_bracket: ) keyword: FILTER bracketed: start_bracket: ( keyword: WHERE expression: column_reference: naked_identifier: c comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '=' quoted_literal: "'two'" end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: a end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: group_concat from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: a - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a alias_expression: alias_operator: keyword: AS naked_identifier: a - comma: ',' - select_clause_element: function: function_name: function_name_identifier: row_number function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER naked_identifier: win alias_expression: alias_operator: keyword: AS naked_identifier: row_number - comma: ',' - select_clause_element: function: function_name: function_name_identifier: rank function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER naked_identifier: win alias_expression: alias_operator: keyword: AS naked_identifier: rank - comma: ',' - select_clause_element: function: function_name: function_name_identifier: dense_rank function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER naked_identifier: win alias_expression: alias_operator: keyword: AS naked_identifier: dense_rank - comma: ',' - select_clause_element: function: function_name: function_name_identifier: percent_rank function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER naked_identifier: win alias_expression: alias_operator: keyword: AS naked_identifier: percent_rank - comma: ',' - select_clause_element: function: function_name: function_name_identifier: cume_dist function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER naked_identifier: win alias_expression: alias_operator: keyword: AS naked_identifier: cume_dist from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 named_window: keyword: WINDOW named_window_expression: naked_identifier: win keyword: AS bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: a end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a alias_expression: alias_operator: keyword: AS naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b alias_expression: alias_operator: keyword: AS naked_identifier: b - comma: ',' - select_clause_element: function: function_name: function_name_identifier: ntile function_contents: bracketed: start_bracket: ( expression: numeric_literal: '2' end_bracket: ) over_clause: keyword: OVER naked_identifier: win alias_expression: alias_operator: keyword: AS naked_identifier: ntile_2 - comma: ',' - select_clause_element: function: function_name: function_name_identifier: ntile function_contents: bracketed: start_bracket: ( expression: numeric_literal: '4' end_bracket: ) over_clause: keyword: OVER naked_identifier: win alias_expression: alias_operator: keyword: AS naked_identifier: ntile_4 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 named_window: keyword: WINDOW named_window_expression: naked_identifier: win keyword: AS bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: a end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: b alias_expression: alias_operator: keyword: AS naked_identifier: b - comma: ',' - select_clause_element: function: function_name: function_name_identifier: lead function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: b - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: quoted_literal: "'n/a'" - end_bracket: ) over_clause: keyword: OVER naked_identifier: win alias_expression: alias_operator: keyword: AS naked_identifier: lead - comma: ',' - select_clause_element: function: function_name: function_name_identifier: lag function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: b end_bracket: ) over_clause: keyword: OVER naked_identifier: win alias_expression: alias_operator: keyword: AS naked_identifier: lag - comma: ',' - select_clause_element: function: function_name: function_name_identifier: first_value function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: b end_bracket: ) over_clause: keyword: OVER naked_identifier: win alias_expression: alias_operator: keyword: AS naked_identifier: first_value - comma: ',' - select_clause_element: function: function_name: function_name_identifier: last_value function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: b end_bracket: ) over_clause: keyword: OVER naked_identifier: win alias_expression: alias_operator: keyword: AS naked_identifier: last_value - comma: ',' - select_clause_element: function: function_name: function_name_identifier: nth_value function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: b - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) over_clause: keyword: OVER naked_identifier: win alias_expression: alias_operator: keyword: AS naked_identifier: nth_value_3 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 named_window: keyword: WINDOW named_window_expression: naked_identifier: win keyword: AS bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: b frame_clause: - keyword: ROWS - keyword: BETWEEN - keyword: UNBOUNDED - keyword: PRECEDING - keyword: AND - keyword: CURRENT - keyword: ROW end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: group_concat function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: b - comma: ',' - expression: quoted_literal: "'.'" - end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: naked_identifier: win frame_clause: - keyword: ROWS - keyword: BETWEEN - keyword: UNBOUNDED - keyword: PRECEDING - keyword: AND - keyword: CURRENT - keyword: ROW end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 named_window: keyword: WINDOW named_window_expression: naked_identifier: win keyword: AS bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: a orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: c end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/pattern_matching.sql000066400000000000000000000016431503426445100253430ustar00rootroot00000000000000CREATE TABLE IF NOT EXISTS task ( id TEXT PRIMARY KEY CHECK (length(id) = 15), priority TEXT CHECK (priority GLOB '[A-Z]'), regex_col TEXT CHECK (priority REGEXP '[A-Z]'), match_col TEXT CHECK (priority MATCH 'tacos'), title TEXT NOT NULL, note TEXT, created_at DATETIME NOT NULL DEFAULT current_timestamp, updated_at DATETIME NOT NULL DEFAULT current_timestamp ); SELECT col1 FROM tab_a WHERE this_col MATCH 'that'; SELECT col1 FROM tab_a WHERE this_col REGEXP '(that|other)'; SELECT col1 FROM tab_a WHERE this_col GLOB 'one*two'; SELECT col1 FROM tab_a WHERE this_col NOT MATCH 'that'; SELECT col1 FROM tab_a WHERE this_col NOT REGEXP '(that|other)'; SELECT col1 FROM tab_a WHERE this_col NOT GLOB 'one*two'; SELECT col1 FROM tab_a WHERE NOT this_col MATCH 'that'; SELECT col1 FROM tab_a WHERE NOT this_col REGEXP '(that|other)'; SELECT col1 FROM tab_a WHERE NOT this_col GLOB 'one*two'; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/pattern_matching.yml000066400000000000000000000225671503426445100253550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e329f7a4d48f64eeede7c96263fce292dcf593644f4dcb9d6d025bb45c68ee35 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: task - bracketed: - start_bracket: ( - column_definition: - naked_identifier: id - data_type: data_type_identifier: TEXT - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - column_constraint_segment: keyword: CHECK bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: length function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: id end_bracket: ) comparison_operator: raw_comparison_operator: '=' numeric_literal: '15' end_bracket: ) - comma: ',' - column_definition: naked_identifier: priority data_type: data_type_identifier: TEXT column_constraint_segment: keyword: CHECK bracketed: start_bracket: ( expression: column_reference: naked_identifier: priority keyword: GLOB quoted_literal: "'[A-Z]'" end_bracket: ) - comma: ',' - column_definition: naked_identifier: regex_col data_type: data_type_identifier: TEXT column_constraint_segment: keyword: CHECK bracketed: start_bracket: ( expression: column_reference: naked_identifier: priority keyword: REGEXP quoted_literal: "'[A-Z]'" end_bracket: ) - comma: ',' - column_definition: naked_identifier: match_col data_type: data_type_identifier: TEXT column_constraint_segment: keyword: CHECK bracketed: start_bracket: ( expression: column_reference: naked_identifier: priority keyword: MATCH quoted_literal: "'tacos'" end_bracket: ) - comma: ',' - column_definition: naked_identifier: title data_type: data_type_identifier: TEXT column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: note data_type: data_type_identifier: TEXT - comma: ',' - column_definition: - naked_identifier: created_at - data_type: data_type_identifier: DATETIME - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: DEFAULT expression: bare_function: current_timestamp - comma: ',' - column_definition: - naked_identifier: updated_at - data_type: data_type_identifier: DATETIME - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: DEFAULT expression: bare_function: current_timestamp - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tab_a where_clause: keyword: WHERE expression: column_reference: naked_identifier: this_col keyword: MATCH quoted_literal: "'that'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tab_a where_clause: keyword: WHERE expression: column_reference: naked_identifier: this_col keyword: REGEXP quoted_literal: "'(that|other)'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tab_a where_clause: keyword: WHERE expression: column_reference: naked_identifier: this_col keyword: GLOB quoted_literal: "'one*two'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tab_a where_clause: keyword: WHERE expression: - column_reference: naked_identifier: this_col - keyword: NOT - keyword: MATCH - quoted_literal: "'that'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tab_a where_clause: keyword: WHERE expression: - column_reference: naked_identifier: this_col - keyword: NOT - keyword: REGEXP - quoted_literal: "'(that|other)'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tab_a where_clause: keyword: WHERE expression: - column_reference: naked_identifier: this_col - keyword: NOT - keyword: GLOB - quoted_literal: "'one*two'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tab_a where_clause: keyword: WHERE expression: - keyword: NOT - column_reference: naked_identifier: this_col - keyword: MATCH - quoted_literal: "'that'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tab_a where_clause: keyword: WHERE expression: - keyword: NOT - column_reference: naked_identifier: this_col - keyword: REGEXP - quoted_literal: "'(that|other)'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tab_a where_clause: keyword: WHERE expression: - keyword: NOT - column_reference: naked_identifier: this_col - keyword: GLOB - quoted_literal: "'one*two'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/pragma.sql000066400000000000000000000010171503426445100232560ustar00rootroot00000000000000PRAGMA analysis_limit = 7; PRAGMA schema.application_id; PRAGMA schema.auto_vacuum = INCREMENTAL; PRAGMA automatic_index = TRUE; PRAGMA schema.cache_size = -500; PRAGMA collation_list; PRAGMA data_store_directory = 'directory-name'; PRAGMA encoding = 'UTF-16be'; PRAGMA schema.foreign_key_check('table-name'); PRAGMA schema.journal_mode = WAL; PRAGMA schema.locking_mode = NORMAL; PRAGMA schema.secure_delete = FAST; PRAGMA schema.synchronous = 0; PRAGMA temp_store = DEFAULT; PRAGMA schema.wal_checkpoint(FULL); sqlfluff-3.4.2/test/fixtures/dialects/sqlite/pragma.yml000066400000000000000000000102671503426445100232670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3454a43f612f435c955852c9e3fa10911897024068bd7ad2a973494ea81ecfaf file: - statement: pragma_statement: keyword: PRAGMA pragma_reference: naked_identifier: analysis_limit comparison_operator: raw_comparison_operator: '=' numeric_literal: '7' - statement_terminator: ; - statement: pragma_statement: keyword: PRAGMA pragma_reference: - naked_identifier: schema - dot: . - naked_identifier: application_id - statement_terminator: ; - statement: pragma_statement: - keyword: PRAGMA - pragma_reference: - naked_identifier: schema - dot: . - naked_identifier: auto_vacuum - comparison_operator: raw_comparison_operator: '=' - keyword: INCREMENTAL - statement_terminator: ; - statement: pragma_statement: keyword: PRAGMA pragma_reference: naked_identifier: automatic_index comparison_operator: raw_comparison_operator: '=' boolean_literal: 'TRUE' - statement_terminator: ; - statement: pragma_statement: keyword: PRAGMA pragma_reference: - naked_identifier: schema - dot: . - naked_identifier: cache_size comparison_operator: raw_comparison_operator: '=' numeric_literal: sign_indicator: '-' numeric_literal: '500' - statement_terminator: ; - statement: pragma_statement: keyword: PRAGMA pragma_reference: naked_identifier: collation_list - statement_terminator: ; - statement: pragma_statement: keyword: PRAGMA pragma_reference: naked_identifier: data_store_directory comparison_operator: raw_comparison_operator: '=' quoted_literal: "'directory-name'" - statement_terminator: ; - statement: pragma_statement: keyword: PRAGMA pragma_reference: naked_identifier: encoding comparison_operator: raw_comparison_operator: '=' quoted_literal: "'UTF-16be'" - statement_terminator: ; - statement: pragma_statement: keyword: PRAGMA pragma_reference: - naked_identifier: schema - dot: . - naked_identifier: foreign_key_check bracketed: start_bracket: ( quoted_literal: "'table-name'" end_bracket: ) - statement_terminator: ; - statement: pragma_statement: - keyword: PRAGMA - pragma_reference: - naked_identifier: schema - dot: . - naked_identifier: journal_mode - comparison_operator: raw_comparison_operator: '=' - keyword: WAL - statement_terminator: ; - statement: pragma_statement: - keyword: PRAGMA - pragma_reference: - naked_identifier: schema - dot: . - naked_identifier: locking_mode - comparison_operator: raw_comparison_operator: '=' - keyword: NORMAL - statement_terminator: ; - statement: pragma_statement: - keyword: PRAGMA - pragma_reference: - naked_identifier: schema - dot: . - naked_identifier: secure_delete - comparison_operator: raw_comparison_operator: '=' - keyword: FAST - statement_terminator: ; - statement: pragma_statement: keyword: PRAGMA pragma_reference: - naked_identifier: schema - dot: . - naked_identifier: synchronous comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' - statement_terminator: ; - statement: pragma_statement: - keyword: PRAGMA - pragma_reference: naked_identifier: temp_store - comparison_operator: raw_comparison_operator: '=' - keyword: DEFAULT - statement_terminator: ; - statement: pragma_statement: keyword: PRAGMA pragma_reference: - naked_identifier: schema - dot: . - naked_identifier: wal_checkpoint bracketed: start_bracket: ( keyword: FULL end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/quoted_identifiers.sql000066400000000000000000000005241503426445100256770ustar00rootroot00000000000000SELECT `nih`.`userID` FROM `flight_notification_item_history` AS `nih`; -- NOTE: Normally single quoted items are interpreted as strings rather than objects - but this does still run on SQLite. SELECT 'nih'.'userID' FROM 'flight_notification_item_history' AS 'nih'; SELECT "nih"."userID" FROM "flight_notification_item_history" AS "nih"; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/quoted_identifiers.yml000066400000000000000000000043461503426445100257070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ab532251b3d9600dcf60000aded025e23cede64062054b8e4bc8256a735a3008 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - quoted_identifier: '`nih`' - dot: . - quoted_identifier: '`userID`' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`flight_notification_item_history`' alias_expression: alias_operator: keyword: AS quoted_identifier: '`nih`' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - quoted_identifier: "'nih'" - dot: . - quoted_identifier: "'userID'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: "'flight_notification_item_history'" alias_expression: alias_operator: keyword: AS quoted_identifier: "'nih'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - quoted_identifier: '"nih"' - dot: . - quoted_identifier: '"userID"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '"flight_notification_item_history"' alias_expression: alias_operator: keyword: AS quoted_identifier: '"nih"' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/raise_function.sql000066400000000000000000000001471503426445100250220ustar00rootroot00000000000000CREATE TRIGGER x BEFORE UPDATE OF z ON y BEGIN SELECT RAISE (ROLLBACK, 'updating z not allowed'); END; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/raise_function.yml000066400000000000000000000023611503426445100250240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: af3ada717521748d1f9bdb2c5c54c3c60b343b95d5777e2535f653800f369f27 file: statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: x - keyword: BEFORE - keyword: UPDATE - keyword: OF - column_reference: naked_identifier: z - keyword: 'ON' - table_reference: naked_identifier: y - keyword: BEGIN - select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: RAISE function_contents: bracketed: start_bracket: ( keyword: ROLLBACK comma: ',' quoted_literal: "'updating z not allowed'" end_bracket: ) - statement_terminator: ; - keyword: END statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/select.sql000066400000000000000000000013051503426445100232660ustar00rootroot00000000000000SELECT a FROM foo LIMIT 10; SELECT survey_time , AVG(light) AS trips FROM survey GROUP BY survey_time; WITH time_cte AS ( SELECT branch, created_at, time, cast(time - LAG (time, 1, time) OVER (ORDER BY time) as real) AS time_spent FROM heartbeats h WHERE user_id = 1 AND created_at >= DATE('now', 'start of day') ORDER BY id LIMIT 1 OFFSET 1 ) SELECT branch as name, cast(time_spent as real) as time_spent, cast(time_spent / (SELECT SUM(time_spent) FROM time_cte) as real) as time_percentage FROM ( SELECT branch, cast(SUM(time_spent) as real) AS time_spent FROM time_cte GROUP BY branch ORDER BY time_spent DESC ) ; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/select.yml000066400000000000000000000265271503426445100233050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c02b4d2fad2154e410c9144512f7e2b1e114400b1a4f2ad43d36c832226c05c9 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo limit_clause: keyword: LIMIT numeric_literal: '10' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: survey_time - comma: ',' - select_clause_element: function: function_name: function_name_identifier: AVG function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: light end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: trips from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: survey groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: survey_time - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: time_cte keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: branch - comma: ',' - select_clause_element: column_reference: naked_identifier: created_at - comma: ',' - select_clause_element: column_reference: naked_identifier: time - comma: ',' - select_clause_element: function: function_name: function_name_identifier: cast function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: time binary_operator: '-' function: function_name: function_name_identifier: LAG function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: time - comma: ',' - expression: numeric_literal: '1' - comma: ',' - expression: column_reference: naked_identifier: time - end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: time end_bracket: ) keyword: as data_type: data_type_identifier: real end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: time_spent from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: heartbeats alias_expression: naked_identifier: h where_clause: keyword: WHERE expression: - column_reference: naked_identifier: user_id - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - binary_operator: AND - column_reference: naked_identifier: created_at - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - function: function_name: function_name_identifier: DATE function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'now'" - comma: ',' - expression: quoted_literal: "'start of day'" - end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: id limit_clause: - keyword: LIMIT - numeric_literal: '1' - keyword: OFFSET - numeric_literal: '1' end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: branch alias_expression: alias_operator: keyword: as naked_identifier: name - comma: ',' - select_clause_element: function: function_name: function_name_identifier: cast function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: time_spent keyword: as data_type: data_type_identifier: real end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: time_spent - comma: ',' - select_clause_element: function: function_name: function_name_identifier: cast function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: time_spent binary_operator: / bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: time_spent end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: time_cte end_bracket: ) keyword: as data_type: data_type_identifier: real end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: time_percentage from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: branch - comma: ',' - select_clause_element: function: function_name: function_name_identifier: cast function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: time_spent end_bracket: ) keyword: as data_type: data_type_identifier: real end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: time_spent from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: time_cte groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: branch orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: time_spent - keyword: DESC end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/update.sql000066400000000000000000000013271503426445100232750ustar00rootroot00000000000000UPDATE table_name SET column1 = value1, column2 = value2 WHERE a=1; UPDATE table_name SET column1 = value1, column2 = value2 WHERE a=1 RETURNING *; UPDATE table_name SET column1 = value1, column2 = value2 WHERE a=1 RETURNING id foo, id_2 AS bar; UPDATE OR IGNORE table_name SET column1 = value1, column2 = value2 WHERE a=1; UPDATE OR ABORT table_name SET column1 = value1, column2 = value2 WHERE a=1; UPDATE OR FAIL table_name SET column1 = value1, column2 = value2 WHERE a=1; UPDATE OR REPLACE table_name SET column1 = value1, column2 = value2 WHERE a=1; UPDATE OR ROLLBACK table_name SET column1 = value1, column2 = value2 WHERE a=1; UPDATE table_name SET (context, country) = (SELECT context, country FROM Nations); sqlfluff-3.4.2/test/fixtures/dialects/sqlite/update.yml000066400000000000000000000222251503426445100232770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7a53756792abfb59b7429a13eae865b11fa845f82395ef9509a805e8f09d4356 file: - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: table_name set_clause_list: - keyword: SET - set_clause: naked_identifier: column1 comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: value1 - comma: ',' - set_clause: naked_identifier: column2 comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: value2 where_clause: keyword: WHERE expression: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: table_name set_clause_list: - keyword: SET - set_clause: naked_identifier: column1 comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: value1 - comma: ',' - set_clause: naked_identifier: column2 comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: value2 where_clause: keyword: WHERE expression: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' returning_clause: keyword: RETURNING wildcard_expression: wildcard_identifier: star: '*' - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: table_name set_clause_list: - keyword: SET - set_clause: naked_identifier: column1 comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: value1 - comma: ',' - set_clause: naked_identifier: column2 comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: value2 where_clause: keyword: WHERE expression: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' returning_clause: - keyword: RETURNING - expression: column_reference: naked_identifier: id - alias_expression: naked_identifier: foo - comma: ',' - expression: column_reference: naked_identifier: id_2 - alias_expression: alias_operator: keyword: AS naked_identifier: bar - statement_terminator: ; - statement: update_statement: - keyword: UPDATE - keyword: OR - keyword: IGNORE - table_reference: naked_identifier: table_name - set_clause_list: - keyword: SET - set_clause: naked_identifier: column1 comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: value1 - comma: ',' - set_clause: naked_identifier: column2 comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: value2 - where_clause: keyword: WHERE expression: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: update_statement: - keyword: UPDATE - keyword: OR - keyword: ABORT - table_reference: naked_identifier: table_name - set_clause_list: - keyword: SET - set_clause: naked_identifier: column1 comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: value1 - comma: ',' - set_clause: naked_identifier: column2 comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: value2 - where_clause: keyword: WHERE expression: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: update_statement: - keyword: UPDATE - keyword: OR - keyword: FAIL - table_reference: naked_identifier: table_name - set_clause_list: - keyword: SET - set_clause: naked_identifier: column1 comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: value1 - comma: ',' - set_clause: naked_identifier: column2 comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: value2 - where_clause: keyword: WHERE expression: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: update_statement: - keyword: UPDATE - keyword: OR - keyword: REPLACE - table_reference: naked_identifier: table_name - set_clause_list: - keyword: SET - set_clause: naked_identifier: column1 comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: value1 - comma: ',' - set_clause: naked_identifier: column2 comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: value2 - where_clause: keyword: WHERE expression: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: update_statement: - keyword: UPDATE - keyword: OR - keyword: ROLLBACK - table_reference: naked_identifier: table_name - set_clause_list: - keyword: SET - set_clause: naked_identifier: column1 comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: value1 - comma: ',' - set_clause: naked_identifier: column2 comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: value2 - where_clause: keyword: WHERE expression: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: table_name set_clause_list: keyword: SET set_clause: bracketed: - start_bracket: ( - column_reference: naked_identifier: context - comma: ',' - column_reference: naked_identifier: country - end_bracket: ) comparison_operator: raw_comparison_operator: '=' expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: context - comma: ',' - select_clause_element: column_reference: naked_identifier: country from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Nations end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/upsert.sql000066400000000000000000000006501503426445100233330ustar00rootroot00000000000000INSERT INTO t1 (a, b) VALUES (1, 2) ON CONFLICT DO NOTHING; INSERT INTO t1 (a, b) VALUES (1, 2) ON CONFLICT (a, b) DO NOTHING; INSERT INTO t1 (a, b) VALUES (1, 2) ON CONFLICT (a, b) DO UPDATE SET a = excluded.a; INSERT INTO t1 (a, b) VALUES (1, 2) ON CONFLICT (a, b) DO UPDATE SET a = excluded.a WHERE a < 10; INSERT INTO t1 (a, b) VALUES (1, 2) ON CONFLICT (a, b) DO UPDATE SET a = excluded.a WHERE a < 10 RETURNING *; sqlfluff-3.4.2/test/fixtures/dialects/sqlite/upsert.yml000066400000000000000000000147531503426445100233460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f20fb63140dbf48d70b1e52b4db30324a332968e7e33b62144d994727ef3e6d5 file: - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - upsert_clause: - keyword: 'ON' - keyword: CONFLICT - keyword: DO - keyword: NOTHING - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - upsert_clause: - keyword: 'ON' - keyword: CONFLICT - conflict_target: index_column_definition: expression: bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - end_bracket: ) - keyword: DO - keyword: NOTHING - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - upsert_clause: - keyword: 'ON' - keyword: CONFLICT - conflict_target: index_column_definition: expression: bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - end_bracket: ) - keyword: DO - keyword: UPDATE - keyword: SET - naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - expression: column_reference: - naked_identifier: excluded - dot: . - naked_identifier: a - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - upsert_clause: - keyword: 'ON' - keyword: CONFLICT - conflict_target: index_column_definition: expression: bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - end_bracket: ) - keyword: DO - keyword: UPDATE - keyword: SET - naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - expression: column_reference: - naked_identifier: excluded - dot: . - naked_identifier: a - keyword: WHERE - expression: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: < numeric_literal: '10' - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - upsert_clause: - keyword: 'ON' - keyword: CONFLICT - conflict_target: index_column_definition: expression: bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - end_bracket: ) - keyword: DO - keyword: UPDATE - keyword: SET - naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - expression: column_reference: - naked_identifier: excluded - dot: . - naked_identifier: a - keyword: WHERE - expression: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: < numeric_literal: '10' - returning_clause: keyword: RETURNING wildcard_expression: wildcard_identifier: star: '*' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/starrocks/000077500000000000000000000000001503426445100220015ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/dialects/starrocks/.sqlfluff000066400000000000000000000000371503426445100236240ustar00rootroot00000000000000[sqlfluff] dialect = starrocks sqlfluff-3.4.2/test/fixtures/dialects/starrocks/create_external_table.sql000066400000000000000000000005141503426445100270360ustar00rootroot00000000000000CREATE EXTERNAL TABLE example_db.table_mysql ( k1 DATE, k2 INT, k3 SMALLINT, k4 VARCHAR(2048), k5 DATETIME ) ENGINE=mysql PROPERTIES ( "host" = "127.0.0.1", "port" = "8239", "user" = "mysql_user", "password" = "mysql_passwd", "database" = "mysql_db_test", "table" = "mysql_table_test" ); sqlfluff-3.4.2/test/fixtures/dialects/starrocks/create_external_table.yml000066400000000000000000000051421503426445100270420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0b706367ae2434b3e97b6f9e1d908a2f443e6612bd58ff88f9c26d05df909b10 file: statement: create_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: example_db - dot: . - naked_identifier: table_mysql - bracketed: - start_bracket: ( - column_definition: naked_identifier: k1 data_type: data_type_identifier: DATE - comma: ',' - column_definition: naked_identifier: k2 data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: k3 data_type: data_type_identifier: SMALLINT - comma: ',' - column_definition: naked_identifier: k4 data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '2048' end_bracket: ) - comma: ',' - column_definition: naked_identifier: k5 keyword: DATETIME - end_bracket: ) - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - engine_type: mysql - keyword: PROPERTIES - bracketed: - start_bracket: ( - quoted_literal: '"host"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"127.0.0.1"' - comma: ',' - quoted_literal: '"port"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"8239"' - comma: ',' - quoted_literal: '"user"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"mysql_user"' - comma: ',' - quoted_literal: '"password"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"mysql_passwd"' - comma: ',' - quoted_literal: '"database"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"mysql_db_test"' - comma: ',' - quoted_literal: '"table"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"mysql_table_test"' - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/starrocks/create_primary_key_table.sql000066400000000000000000000007571503426445100275600ustar00rootroot00000000000000create table users ( user_id bigint NOT NULL, name string NOT NULL, email string NULL, address string NULL, age tinyint NULL, sex tinyint NULL, last_active datetime, property0 tinyint NOT NULL, property1 tinyint NOT NULL, property2 tinyint NOT NULL, property3 tinyint NOT NULL ) PRIMARY KEY (`user_id`) DISTRIBUTED BY HASH(`user_id`) ORDER BY(`address`,`last_active`) PROPERTIES( "replication_num" = "3", "enable_persistent_index" = "true" ); sqlfluff-3.4.2/test/fixtures/dialects/starrocks/create_primary_key_table.yml000066400000000000000000000077111503426445100275570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: aff533944535794084991dae851dba8db83a6ee96991f242e5ec14f51bd765a6 file: statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: users - bracketed: - start_bracket: ( - column_definition: naked_identifier: user_id data_type: data_type_identifier: bigint column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: name data_type: data_type_identifier: string column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: email data_type: data_type_identifier: string column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: address data_type: data_type_identifier: string column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: age data_type: data_type_identifier: tinyint column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: sex data_type: data_type_identifier: tinyint column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: last_active keyword: datetime - comma: ',' - column_definition: naked_identifier: property0 data_type: data_type_identifier: tinyint column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: property1 data_type: data_type_identifier: tinyint column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: property2 data_type: data_type_identifier: tinyint column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: property3 data_type: data_type_identifier: tinyint column_constraint_segment: - keyword: NOT - keyword: 'NULL' - end_bracket: ) - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: quoted_identifier: '`user_id`' end_bracket: ) - distribution_segment: - keyword: DISTRIBUTED - keyword: BY - keyword: HASH - bracketed: start_bracket: ( column_reference: quoted_identifier: '`user_id`' end_bracket: ) - keyword: ORDER - keyword: BY - bracketed: - start_bracket: ( - column_reference: quoted_identifier: '`address`' - comma: ',' - column_reference: quoted_identifier: '`last_active`' - end_bracket: ) - keyword: PROPERTIES - bracketed: - start_bracket: ( - quoted_literal: '"replication_num"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"3"' - comma: ',' - quoted_literal: '"enable_persistent_index"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"true"' - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/starrocks/create_routine_load.sql000066400000000000000000000007331503426445100265340ustar00rootroot00000000000000CREATE ROUTINE LOAD example_db.example_tbl4_ordertest2 ON example_tbl4 COLUMNS(commodity_id, customer_name, country, pay_time, pay_dt=from_unixtime(pay_time, '%Y%m%d'), price) PROPERTIES ( "format" = "json", "jsonpaths" = "[\"$.commodity_id\",\"$.customer_name\",\"$.country\",\"$.pay_time\",\"$.price\"]" ) FROM KAFKA ( "kafka_broker_list" = ":,:", "kafka_topic" = "ordertest2" ); sqlfluff-3.4.2/test/fixtures/dialects/starrocks/create_routine_load.yml000066400000000000000000000053611503426445100265400ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6454bf6564add6daf3647b0be96b3ab1a5ebe8f4609957d06fbce2b2e9495223 file: statement: create_routine_load_statement: - keyword: CREATE - keyword: ROUTINE - keyword: LOAD - object_reference: - naked_identifier: example_db - dot: . - naked_identifier: example_tbl4_ordertest2 - keyword: 'ON' - table_reference: naked_identifier: example_tbl4 - keyword: COLUMNS - bracketed: - start_bracket: ( - naked_identifier: commodity_id - comma: ',' - naked_identifier: customer_name - comma: ',' - naked_identifier: country - comma: ',' - naked_identifier: pay_time - comma: ',' - naked_identifier: pay_dt - comparison_operator: raw_comparison_operator: '=' - expression: function: function_name: function_name_identifier: from_unixtime function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: pay_time - comma: ',' - expression: quoted_literal: "'%Y%m%d'" - end_bracket: ) - comma: ',' - naked_identifier: price - end_bracket: ) - keyword: PROPERTIES - bracketed: - start_bracket: ( - routine_load_properties: - quoted_literal: '"format"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"json"' - comma: ',' - routine_load_properties: - quoted_literal: '"jsonpaths"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"[\"$.commodity_id\",\"$.customer_name\",\"$.country\",\"$.pay_time\",\"$.price\"]"' - end_bracket: ) - keyword: FROM - keyword: KAFKA - bracketed: - start_bracket: ( - routine_load_data_source_properties: - quoted_literal: '"kafka_broker_list"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '":,:"' - comma: ',' - routine_load_data_source_properties: - quoted_literal: '"kafka_topic"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"ordertest2"' - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/starrocks/create_table_aggregate_key.sql000066400000000000000000000003741503426445100300160ustar00rootroot00000000000000CREATE TABLE example_db.table_hash ( k1 TINYINT, k2 DECIMAL(10, 2) DEFAULT "10.5", v1 CHAR(10), v2 INT ) ENGINE=olap AGGREGATE KEY(k1, k2) COMMENT "my first starrocks table" DISTRIBUTED BY HASH(k1) PROPERTIES ("storage_type"="column"); sqlfluff-3.4.2/test/fixtures/dialects/starrocks/create_table_aggregate_key.yml000066400000000000000000000050741503426445100300220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cf5300552e43f352cc042cefdba419580f21b2d867e46f0dd977e78696f89290 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: example_db - dot: . - naked_identifier: table_hash - bracketed: - start_bracket: ( - column_definition: naked_identifier: k1 data_type: data_type_identifier: TINYINT - comma: ',' - column_definition: naked_identifier: k2 data_type: data_type_identifier: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '10' - comma: ',' - numeric_literal: '2' - end_bracket: ) column_constraint_segment: keyword: DEFAULT quoted_literal: '"10.5"' - comma: ',' - column_definition: naked_identifier: v1 data_type: data_type_identifier: CHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - comma: ',' - column_definition: naked_identifier: v2 data_type: data_type_identifier: INT - end_bracket: ) - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - engine_type: olap - keyword: AGGREGATE - keyword: KEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: k1 - comma: ',' - column_reference: naked_identifier: k2 - end_bracket: ) - comment_clause: keyword: COMMENT quoted_literal: '"my first starrocks table"' - distribution_segment: - keyword: DISTRIBUTED - keyword: BY - keyword: HASH - bracketed: start_bracket: ( column_reference: naked_identifier: k1 end_bracket: ) - keyword: PROPERTIES - bracketed: - start_bracket: ( - quoted_literal: '"storage_type"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"column"' - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/starrocks/create_table_duplicate_key.sql000066400000000000000000000007461503426445100300450ustar00rootroot00000000000000CREATE TABLE example_db.table_range ( k1 DATE, k2 INT, k3 SMALLINT, v1 VARCHAR(2048), v2 DATETIME DEFAULT "2014-02-04 15:36:00" ) ENGINE=olap DUPLICATE KEY(k1, k2, k3) PARTITION BY RANGE (k1) ( PARTITION p1 VALUES LESS THAN ("2014-01-01"), PARTITION p2 VALUES LESS THAN ("2014-06-01"), PARTITION p3 VALUES LESS THAN ("2014-12-01") ) DISTRIBUTED BY HASH(k2) PROPERTIES( "storage_medium" = "SSD", "storage_cooldown_time" = "2015-06-04 00:00:00" ); sqlfluff-3.4.2/test/fixtures/dialects/starrocks/create_table_duplicate_key.yml000066400000000000000000000073671503426445100300550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7a25a7a2467599c412115830117279d02b9559cf6872da2844954b731a399e56 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: example_db - dot: . - naked_identifier: table_range - bracketed: - start_bracket: ( - column_definition: naked_identifier: k1 data_type: data_type_identifier: DATE - comma: ',' - column_definition: naked_identifier: k2 data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: k3 data_type: data_type_identifier: SMALLINT - comma: ',' - column_definition: naked_identifier: v1 data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '2048' end_bracket: ) - comma: ',' - column_definition: - naked_identifier: v2 - keyword: DATETIME - keyword: DEFAULT - quoted_literal: '"2014-02-04 15:36:00"' - end_bracket: ) - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - engine_type: olap - keyword: DUPLICATE - keyword: KEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: k1 - comma: ',' - column_reference: naked_identifier: k2 - comma: ',' - column_reference: naked_identifier: k3 - end_bracket: ) - partition_segment: - keyword: PARTITION - keyword: BY - keyword: RANGE - bracketed: start_bracket: ( column_reference: naked_identifier: k1 end_bracket: ) - bracketed: - start_bracket: ( - keyword: PARTITION - object_reference: naked_identifier: p1 - keyword: VALUES - keyword: LESS - keyword: THAN - bracketed: start_bracket: ( quoted_literal: '"2014-01-01"' end_bracket: ) - comma: ',' - keyword: PARTITION - object_reference: naked_identifier: p2 - keyword: VALUES - keyword: LESS - keyword: THAN - bracketed: start_bracket: ( quoted_literal: '"2014-06-01"' end_bracket: ) - comma: ',' - keyword: PARTITION - object_reference: naked_identifier: p3 - keyword: VALUES - keyword: LESS - keyword: THAN - bracketed: start_bracket: ( quoted_literal: '"2014-12-01"' end_bracket: ) - end_bracket: ) - distribution_segment: - keyword: DISTRIBUTED - keyword: BY - keyword: HASH - bracketed: start_bracket: ( column_reference: naked_identifier: k2 end_bracket: ) - keyword: PROPERTIES - bracketed: - start_bracket: ( - quoted_literal: '"storage_medium"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"SSD"' - comma: ',' - quoted_literal: '"storage_cooldown_time"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"2015-06-04 00:00:00"' - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/starrocks/create_table_dynamic_partition.sql000066400000000000000000000012421503426445100307300ustar00rootroot00000000000000CREATE TABLE example_db.dynamic_partition ( k1 DATE, k2 INT, k3 SMALLINT, v1 VARCHAR(2048), v2 DATETIME DEFAULT "2014-02-04 15:36:00" ) ENGINE=olap DUPLICATE KEY(k1, k2, k3) PARTITION BY RANGE (k1) ( PARTITION p1 VALUES LESS THAN ("2014-01-01"), PARTITION p2 VALUES LESS THAN ("2014-06-01"), PARTITION p3 VALUES LESS THAN ("2014-12-01") ) DISTRIBUTED BY HASH(k2) PROPERTIES( "storage_medium" = "SSD", "dynamic_partition.enable" = "true", "dynamic_partition.time_unit" = "DAY", "dynamic_partition.start" = "-3", "dynamic_partition.end" = "3", "dynamic_partition.prefix" = "p", "dynamic_partition.buckets" = "10" ); sqlfluff-3.4.2/test/fixtures/dialects/starrocks/create_table_dynamic_partition.yml000066400000000000000000000111071503426445100307330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8a8dc4303d7a55270c73bd745a038b20c2819ba0ad24febf91dab9ee2356ba15 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: example_db - dot: . - naked_identifier: dynamic_partition - bracketed: - start_bracket: ( - column_definition: naked_identifier: k1 data_type: data_type_identifier: DATE - comma: ',' - column_definition: naked_identifier: k2 data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: k3 data_type: data_type_identifier: SMALLINT - comma: ',' - column_definition: naked_identifier: v1 data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '2048' end_bracket: ) - comma: ',' - column_definition: - naked_identifier: v2 - keyword: DATETIME - keyword: DEFAULT - quoted_literal: '"2014-02-04 15:36:00"' - end_bracket: ) - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - engine_type: olap - keyword: DUPLICATE - keyword: KEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: k1 - comma: ',' - column_reference: naked_identifier: k2 - comma: ',' - column_reference: naked_identifier: k3 - end_bracket: ) - partition_segment: - keyword: PARTITION - keyword: BY - keyword: RANGE - bracketed: start_bracket: ( column_reference: naked_identifier: k1 end_bracket: ) - bracketed: - start_bracket: ( - keyword: PARTITION - object_reference: naked_identifier: p1 - keyword: VALUES - keyword: LESS - keyword: THAN - bracketed: start_bracket: ( quoted_literal: '"2014-01-01"' end_bracket: ) - comma: ',' - keyword: PARTITION - object_reference: naked_identifier: p2 - keyword: VALUES - keyword: LESS - keyword: THAN - bracketed: start_bracket: ( quoted_literal: '"2014-06-01"' end_bracket: ) - comma: ',' - keyword: PARTITION - object_reference: naked_identifier: p3 - keyword: VALUES - keyword: LESS - keyword: THAN - bracketed: start_bracket: ( quoted_literal: '"2014-12-01"' end_bracket: ) - end_bracket: ) - distribution_segment: - keyword: DISTRIBUTED - keyword: BY - keyword: HASH - bracketed: start_bracket: ( column_reference: naked_identifier: k2 end_bracket: ) - keyword: PROPERTIES - bracketed: - start_bracket: ( - quoted_literal: '"storage_medium"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"SSD"' - comma: ',' - quoted_literal: '"dynamic_partition.enable"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"true"' - comma: ',' - quoted_literal: '"dynamic_partition.time_unit"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"DAY"' - comma: ',' - quoted_literal: '"dynamic_partition.start"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"-3"' - comma: ',' - quoted_literal: '"dynamic_partition.end"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"3"' - comma: ',' - quoted_literal: '"dynamic_partition.prefix"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"p"' - comma: ',' - quoted_literal: '"dynamic_partition.buckets"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"10"' - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/starrocks/create_table_generated_column.sql000066400000000000000000000004031503426445100305240ustar00rootroot00000000000000CREATE TABLE test_tbl1 ( id INT NOT NULL, data_array ARRAY NOT NULL, data_json JSON NOT NULL, newcol1 DOUBLE AS array_avg(data_array), newcol2 String AS json_string(json_query(data_json, "$.a")) ) PRIMARY KEY (id) DISTRIBUTED BY HASH(id); sqlfluff-3.4.2/test/fixtures/dialects/starrocks/create_table_generated_column.yml000066400000000000000000000064761503426445100305460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 467c5491218b1624b9d1eda9b579fe2fdf37496dd750c5d718c5ef785e395c17 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: test_tbl1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: data_array data_type: data_type_identifier: ARRAY column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: data_json data_type: data_type_identifier: JSON column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: newcol1 data_type: data_type_identifier: DOUBLE column_constraint_segment: keyword: AS expression: function: function_name: function_name_identifier: array_avg function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: data_array end_bracket: ) - comma: ',' - column_definition: naked_identifier: newcol2 data_type: data_type_identifier: String column_constraint_segment: keyword: AS expression: function: function_name: function_name_identifier: json_string function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: json_query function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: data_json - comma: ',' - expression: quoted_literal: '"$.a"' - end_bracket: ) end_bracket: ) - end_bracket: ) - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - distribution_segment: - keyword: DISTRIBUTED - keyword: BY - keyword: HASH - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/starrocks/drop_table.sql000066400000000000000000000000521503426445100246320ustar00rootroot00000000000000DROP TABLE IF EXISTS example_db.my_table; sqlfluff-3.4.2/test/fixtures/dialects/starrocks/drop_table.yml000066400000000000000000000012121503426445100246330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 07a2f6a0dc3fadd627cec2ad9869a861868aee54d83b05b75774c3d16c2128fc file: statement: drop_table_statement: - keyword: DROP - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: - naked_identifier: example_db - dot: . - naked_identifier: my_table statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/starrocks/pause_routine_load.sql000066400000000000000000000000731503426445100264030ustar00rootroot00000000000000PAUSE ROUTINE LOAD FOR example_db.example_tbl1_ordertest1; sqlfluff-3.4.2/test/fixtures/dialects/starrocks/pause_routine_load.yml000066400000000000000000000012441503426445100264060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 19da0f65048d7242c953700512163eb1598daa9e24c6ffc1ff3c4810e8d93490 file: statement: pause_routine_load_statement: - keyword: PAUSE - keyword: ROUTINE - keyword: LOAD - keyword: FOR - object_reference: - naked_identifier: example_db - dot: . - naked_identifier: example_tbl1_ordertest1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/starrocks/resume_routine_load.sql000066400000000000000000000000741503426445100265670ustar00rootroot00000000000000RESUME ROUTINE LOAD FOR example_db.example_tbl1_ordertest1; sqlfluff-3.4.2/test/fixtures/dialects/starrocks/resume_routine_load.yml000066400000000000000000000012461503426445100265730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9bd782c24077a3fa9a3618955c91e6cf1c0d6019d27a3ed15a751598d2abc2c0 file: statement: resume_routine_load_statement: - keyword: RESUME - keyword: ROUTINE - keyword: LOAD - keyword: FOR - object_reference: - naked_identifier: example_db - dot: . - naked_identifier: example_tbl1_ordertest1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/starrocks/stop_routine_load.sql000066400000000000000000000000721503426445100262520ustar00rootroot00000000000000STOP ROUTINE LOAD FOR example_db.example_tbl1_ordertest1; sqlfluff-3.4.2/test/fixtures/dialects/starrocks/stop_routine_load.yml000066400000000000000000000012421503426445100262540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 15ffcc5e72a371be995f60b7fae418c9aa642b9b6e5cf80b39c5f9a0f9c88898 file: statement: stop_routine_load_statement: - keyword: STOP - keyword: ROUTINE - keyword: LOAD - keyword: FOR - object_reference: - naked_identifier: example_db - dot: . - naked_identifier: example_tbl1_ordertest1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/teradata/000077500000000000000000000000001503426445100215535ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/dialects/teradata/.sqlfluff000066400000000000000000000000361503426445100233750ustar00rootroot00000000000000[sqlfluff] dialect = teradata sqlfluff-3.4.2/test/fixtures/dialects/teradata/bteq.sql000066400000000000000000000000401503426445100232210ustar00rootroot00000000000000.if errorcode > 0 then .quit 4; sqlfluff-3.4.2/test/fixtures/dialects/teradata/bteq.yml000066400000000000000000000014501503426445100232310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5c9454897c6fb0804ca61cd756db50740fc13aabf09a5232ea602068cc2fa713 file: statement: bteq_statement: - dot: . - bteq_key_word_segment: keyword: if - bteq_key_word_segment: keyword: errorcode - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '0' - bteq_key_word_segment: keyword: then - bteq_key_word_segment: dot: . keyword: quit numeric_literal: '4' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/teradata/collect_stats.sql000066400000000000000000000022061503426445100251370ustar00rootroot00000000000000COLLECT STATISTICS COLUMN ( IND_TIPO_TARJETA ) ON DB_1.TABLE_1; COLLECT STATISTICS INDEX ( COD_TARJETA, COD_EST, IND_TIPO_TARJETA, FEC_ANIO_MES ) ON DB_1.TABLE_1; COLLECT STATISTICS COLUMN o_orderstatus ON orders; COLLECT STATISTICS USING SYSTEM THRESHOLD FOR CURRENT COLUMN (o_orderstatus, o_orderkey) ON orders; COLLECT STATS COLUMN ( IND_TIPO_TARJETA ) ON DB_1.TABLE_1; COLLECT STAT COLUMN ( IND_TIPO_TARJETA ) ON DB_1.TABLE_1; COLLECT STATS COLUMN IND_TIPO_TARJETA ON DB_1.TABLE_1; COLLECT STATS INDEX ( COD_TARJETA, COD_EST, IND_TIPO_TARJETA, FEC_ANIO_MES ) ON DB_1.TABLE_1; collect statistics column (Org_Unit_Code, Org_Unit_Type, Entity_Code) as Org_Descendant_NUPI, column (Org_Unit_Type), column (Entity_Code), column (Org_Unit_Code, Entity_Code), column (Entity_Code, Parent_Org_Unit_Code, Parent_Org_Unit_Type), column (Org_Unit_Code), column (Parent_Org_Unit_Code, Parent_Org_Unit_Type, Parent_Entity_Code) on sandbox_db.Org_Descendant; COLLECT STATISTICS ON table_1 COLUMN (column_1, column_2); COLLECT STATISTICS ON orders COLUMN (quant_ord, PARTITION, quant_shpd); COLLECT STATISTICS ON table_1 COLUMN PARTITION; sqlfluff-3.4.2/test/fixtures/dialects/teradata/collect_stats.yml000066400000000000000000000164021503426445100251440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 08c47cf906873922a007a611f8d281d26a16f4c732d317c923e868c1b9014c79 file: - statement: collect_statistics_statement: - keyword: COLLECT - keyword: STATISTICS - keyword: COLUMN - bracketed: start_bracket: ( column_reference: naked_identifier: IND_TIPO_TARJETA end_bracket: ) - keyword: 'ON' - table_reference: - naked_identifier: DB_1 - dot: . - naked_identifier: TABLE_1 - statement_terminator: ; - statement: collect_statistics_statement: - keyword: COLLECT - keyword: STATISTICS - keyword: INDEX - bracketed: - start_bracket: ( - column_reference: naked_identifier: COD_TARJETA - comma: ',' - column_reference: naked_identifier: COD_EST - comma: ',' - column_reference: naked_identifier: IND_TIPO_TARJETA - comma: ',' - column_reference: naked_identifier: FEC_ANIO_MES - end_bracket: ) - keyword: 'ON' - table_reference: - naked_identifier: DB_1 - dot: . - naked_identifier: TABLE_1 - statement_terminator: ; - statement: collect_statistics_statement: - keyword: COLLECT - keyword: STATISTICS - keyword: COLUMN - column_reference: naked_identifier: o_orderstatus - keyword: 'ON' - table_reference: naked_identifier: orders - statement_terminator: ; - statement: collect_statistics_statement: - keyword: COLLECT - keyword: STATISTICS - keyword: USING - collect_stat_using_option_clause: - keyword: SYSTEM - keyword: THRESHOLD - keyword: FOR - keyword: CURRENT - keyword: COLUMN - bracketed: - start_bracket: ( - column_reference: naked_identifier: o_orderstatus - comma: ',' - column_reference: naked_identifier: o_orderkey - end_bracket: ) - keyword: 'ON' - table_reference: naked_identifier: orders - statement_terminator: ; - statement: collect_statistics_statement: - keyword: COLLECT - keyword: STATS - keyword: COLUMN - bracketed: start_bracket: ( column_reference: naked_identifier: IND_TIPO_TARJETA end_bracket: ) - keyword: 'ON' - table_reference: - naked_identifier: DB_1 - dot: . - naked_identifier: TABLE_1 - statement_terminator: ; - statement: collect_statistics_statement: - keyword: COLLECT - keyword: STAT - keyword: COLUMN - bracketed: start_bracket: ( column_reference: naked_identifier: IND_TIPO_TARJETA end_bracket: ) - keyword: 'ON' - table_reference: - naked_identifier: DB_1 - dot: . - naked_identifier: TABLE_1 - statement_terminator: ; - statement: collect_statistics_statement: - keyword: COLLECT - keyword: STATS - keyword: COLUMN - column_reference: naked_identifier: IND_TIPO_TARJETA - keyword: 'ON' - table_reference: - naked_identifier: DB_1 - dot: . - naked_identifier: TABLE_1 - statement_terminator: ; - statement: collect_statistics_statement: - keyword: COLLECT - keyword: STATS - keyword: INDEX - bracketed: - start_bracket: ( - column_reference: naked_identifier: COD_TARJETA - comma: ',' - column_reference: naked_identifier: COD_EST - comma: ',' - column_reference: naked_identifier: IND_TIPO_TARJETA - comma: ',' - column_reference: naked_identifier: FEC_ANIO_MES - end_bracket: ) - keyword: 'ON' - table_reference: - naked_identifier: DB_1 - dot: . - naked_identifier: TABLE_1 - statement_terminator: ; - statement: collect_statistics_statement: - keyword: collect - keyword: statistics - keyword: column - bracketed: - start_bracket: ( - column_reference: naked_identifier: Org_Unit_Code - comma: ',' - column_reference: naked_identifier: Org_Unit_Type - comma: ',' - column_reference: naked_identifier: Entity_Code - end_bracket: ) - keyword: as - object_reference: naked_identifier: Org_Descendant_NUPI - comma: ',' - keyword: column - bracketed: start_bracket: ( column_reference: naked_identifier: Org_Unit_Type end_bracket: ) - comma: ',' - keyword: column - bracketed: start_bracket: ( column_reference: naked_identifier: Entity_Code end_bracket: ) - comma: ',' - keyword: column - bracketed: - start_bracket: ( - column_reference: naked_identifier: Org_Unit_Code - comma: ',' - column_reference: naked_identifier: Entity_Code - end_bracket: ) - comma: ',' - keyword: column - bracketed: - start_bracket: ( - column_reference: naked_identifier: Entity_Code - comma: ',' - column_reference: naked_identifier: Parent_Org_Unit_Code - comma: ',' - column_reference: naked_identifier: Parent_Org_Unit_Type - end_bracket: ) - comma: ',' - keyword: column - bracketed: start_bracket: ( column_reference: naked_identifier: Org_Unit_Code end_bracket: ) - comma: ',' - keyword: column - bracketed: - start_bracket: ( - column_reference: naked_identifier: Parent_Org_Unit_Code - comma: ',' - column_reference: naked_identifier: Parent_Org_Unit_Type - comma: ',' - column_reference: naked_identifier: Parent_Entity_Code - end_bracket: ) - keyword: 'on' - table_reference: - naked_identifier: sandbox_db - dot: . - naked_identifier: Org_Descendant - statement_terminator: ; - statement: collect_statistics_statement: - keyword: COLLECT - keyword: STATISTICS - keyword: 'ON' - table_reference: naked_identifier: table_1 - keyword: COLUMN - bracketed: - start_bracket: ( - column_reference: naked_identifier: column_1 - comma: ',' - column_reference: naked_identifier: column_2 - end_bracket: ) - statement_terminator: ; - statement: collect_statistics_statement: - keyword: COLLECT - keyword: STATISTICS - keyword: 'ON' - table_reference: naked_identifier: orders - keyword: COLUMN - bracketed: - start_bracket: ( - column_reference: naked_identifier: quant_ord - comma: ',' - keyword: PARTITION - comma: ',' - column_reference: naked_identifier: quant_shpd - end_bracket: ) - statement_terminator: ; - statement: collect_statistics_statement: - keyword: COLLECT - keyword: STATISTICS - keyword: 'ON' - table_reference: naked_identifier: table_1 - keyword: COLUMN - keyword: PARTITION - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/teradata/comment_on_column_stmt.sql000066400000000000000000000001311503426445100270510ustar00rootroot00000000000000comment on column sandbox_db.Org_Descendant.Org_Unit_Code is 'Organisational unit code'; sqlfluff-3.4.2/test/fixtures/dialects/teradata/comment_on_column_stmt.yml000066400000000000000000000013671503426445100270670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9439d2deee55d1aca3add7a3f9a6cb4e37fb309ef42a84885414ae83246d16a1 file: statement: comment_clause: - keyword: comment - keyword: 'on' - keyword: column - column_reference: - naked_identifier: sandbox_db - dot: . - naked_identifier: Org_Descendant - dot: . - naked_identifier: Org_Unit_Code - keyword: is - quoted_literal: "'Organisational unit code'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/teradata/comment_on_table.sql000066400000000000000000000001321503426445100255750ustar00rootroot00000000000000comment on table sandbox_db.Org_Descendant is 'View with all Org_Unit_Ids on all levels'; sqlfluff-3.4.2/test/fixtures/dialects/teradata/comment_on_table.yml000066400000000000000000000013161503426445100256040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8564c2af548d9acd67569876c4f609aea96f71c981f35235e2b320c17bf49019 file: statement: comment_clause: - keyword: comment - keyword: 'on' - keyword: table - table_reference: - naked_identifier: sandbox_db - dot: . - naked_identifier: Org_Descendant - keyword: is - quoted_literal: "'View with all Org_Unit_Ids on all levels'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/teradata/comparison_operators.sql000066400000000000000000000010221503426445100265370ustar00rootroot00000000000000SELECT * FROM MY_TABLE WHERE A >= B; SELECT * FROM MY_TABLE WHERE A GE B; SELECT * FROM MY_TABLE WHERE A <= B; SELECT * FROM MY_TABLE WHERE A LE B; SELECT * FROM MY_TABLE WHERE A = B; SELECT * FROM MY_TABLE WHERE A EQ B; SELECT * FROM MY_TABLE WHERE A <> B; SELECT * FROM MY_TABLE WHERE A ^= B; SELECT * FROM MY_TABLE WHERE A NOT= B; SELECT * FROM MY_TABLE WHERE A NE B; SELECT * FROM MY_TABLE WHERE A GT B; SELECT * FROM MY_TABLE WHERE A > B; SELECT * FROM MY_TABLE WHERE A LT B; SELECT * FROM MY_TABLE WHERE A < B; sqlfluff-3.4.2/test/fixtures/dialects/teradata/comparison_operators.yml000066400000000000000000000233111503426445100265460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6fe3b44345923a0b3030d15821fb486005acb9e39850430f6d05fd6af81a7679 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE where_clause: keyword: WHERE expression: - column_reference: naked_identifier: A - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - column_reference: naked_identifier: B - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE where_clause: keyword: WHERE expression: - column_reference: naked_identifier: A - comparison_operator: GE - column_reference: naked_identifier: B - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE where_clause: keyword: WHERE expression: - column_reference: naked_identifier: A - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - column_reference: naked_identifier: B - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE where_clause: keyword: WHERE expression: - column_reference: naked_identifier: A - comparison_operator: LE - column_reference: naked_identifier: B - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE where_clause: keyword: WHERE expression: - column_reference: naked_identifier: A - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: B - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE where_clause: keyword: WHERE expression: - column_reference: naked_identifier: A - comparison_operator: EQ - column_reference: naked_identifier: B - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE where_clause: keyword: WHERE expression: - column_reference: naked_identifier: A - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '>' - column_reference: naked_identifier: B - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE where_clause: keyword: WHERE expression: - column_reference: naked_identifier: A - comparison_operator: binary_operator: ^ raw_comparison_operator: '=' - column_reference: naked_identifier: B - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE where_clause: keyword: WHERE expression: - column_reference: naked_identifier: A - comparison_operator: keyword: NOT raw_comparison_operator: '=' - column_reference: naked_identifier: B - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE where_clause: keyword: WHERE expression: - column_reference: naked_identifier: A - comparison_operator: NE - column_reference: naked_identifier: B - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE where_clause: keyword: WHERE expression: - column_reference: naked_identifier: A - comparison_operator: GT - column_reference: naked_identifier: B - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE where_clause: keyword: WHERE expression: - column_reference: naked_identifier: A - comparison_operator: raw_comparison_operator: '>' - column_reference: naked_identifier: B - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE where_clause: keyword: WHERE expression: - column_reference: naked_identifier: A - comparison_operator: LT - column_reference: naked_identifier: B - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE where_clause: keyword: WHERE expression: - column_reference: naked_identifier: A - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: B - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/teradata/create_table.sql000066400000000000000000000112001503426445100247000ustar00rootroot00000000000000create table sandbox_db.Org_Descendant ( Org_Unit_Code char(6) character set unicode not null, Org_Unit_Type char(3) character set unicode not null, Entity_Code varchar(10) uppercase not null, Parent_Org_Unit_Code char(6) character set unicode not null, Parent_Org_Unit_Type char(3) character set unicode not null, Parent_Entity_Code varchar(10) uppercase not null ) primary index Org_Descendant_NUPI (Org_Unit_Code, Org_Unit_Type, Entity_Code) ; collect statistics column (Org_Unit_Code, Org_Unit_Type, Entity_Code) as Org_Descendant_NUPI, column (Org_Unit_Type), column (Entity_Code), column (Org_Unit_Code, Entity_Code), column (Entity_Code, Parent_Org_Unit_Code, Parent_Org_Unit_Type), column (Org_Unit_Code), column (Parent_Org_Unit_Code, Parent_Org_Unit_Type, Parent_Entity_Code) on sandbox_db.Org_Descendant; comment on table sandbox_db.Org_Descendant is 'View with all Org_Unit_Ids on all levels'; comment on column sandbox_db.Org_Descendant.Org_Unit_Code is 'Organisational unit code'; comment on column sandbox_db.Org_Descendant.Org_Unit_Type is 'The type of organization such as branch, region, team, call center'; comment on column sandbox_db.Org_Descendant.Entity_Code is 'Owning entity code'; comment on column sandbox_db.Org_Descendant.Parent_Org_Unit_Code is 'Organisational unit code'; comment on column sandbox_db.Org_Descendant.Parent_Org_Unit_Type is 'The type of organization such as branch, region, team, call center'; comment on column sandbox_db.Org_Descendant.Parent_Entity_Code is 'Owning entity code parent'; CREATE VOLATILE MULTISET TABLE date_control (calculation_date DATE FORMAT 'yyyy-mm-dd' ) PRIMARY INDEX (calculation_date); CREATE MULTISET VOLATILE TABLE date_control (calculation_date DATE FORMAT 'yyyy-mm-dd' ) PRIMARY INDEX (calculation_date); -- Testing of the specific create table begin options CREATE MULTISET TABLE CONSUMOS, NO FALLBACK, NO BEFORE JOURNAL, NO AFTER JOURNAL, CHECKSUM = DEFAULT, DEFAULT MERGEBLOCKRATIO ( FIELD1 CHAR(9) ) PRIMARY INDEX( FIELD1 ); -- Testing of the specific column options CREATE MULTISET TABLE TABLE_2 ( CHAR_FIELD CHAR(19) CHARACTER SET LATIN NOT CASESPECIFIC NOT NULL, DATE_FIELD DATE FORMAT 'YYYY-MM-DD' NOT NULL, BYTE_FIELD BYTEINT COMPRESS 0, DECIMAL_FIELD DECIMAL(15, 2) COMPRESS (50.00, 45.50, 40.00, 30.00, 27.80, 27.05, 20.00, 17.87, 17.56, 17.41, 17.26, 17.11, 16.96, 16.82, 16.68), TIMESTAMP_FIELD TIMESTAMP(6) NOT NULL ) PRIMARY INDEX( CHAR_FIELD, DATE_FIELD, BYTE_FIELD ); -- Testing of the specific create table end options CREATE MULTISET TABLE NUM_LTR_DESVINCULADOS_ADH ( DES_EVENTO VARCHAR(255) CHARACTER SET LATIN NOT CASESPECIFIC COMPRESS ('Cambio de bandera', 'Cierre'), IND_CONTINUA BYTEINT COMPRESS ) PRIMARY INDEX( COD_TARJETA, COD_EST, FEC_CIERRE_EST, IND_TIPO_TARJETA ) PARTITION BY RANGE_N (FEC_OPERACION BETWEEN DATE '2007-01-01' AND DATE '2022-01-01' EACH INTERVAL '1' MONTH, NO RANGE OR UNKNOWN) INDEX HOPR_TRN_TRAV_SIN_MP_I ( IND_TIPO_TARJETA ); create table sandbox_db.Org_Descendant ( Org_Unit_Code char(6) character set unicode not null, Org_Unit_Type char(3) character set unicode not null, Entity_Code varchar(10) uppercase not null, Parent_Org_Unit_Code char(6) character set unicode not null, Parent_Org_Unit_Type char(3) character set unicode not null, Parent_Entity_Code varchar(10) uppercase not null ) primary index Org_Descendant_NUPI (Org_Unit_Code, Org_Unit_Type, Entity_Code) ; CREATE VOLATILE TABLE a AS (SELECT 'A' AS B) WITH DATA ON COMMIT PRESERVE ROWS; CREATE VOLATILE TABLE b AS (SELECT 'A' AS B) WITH DATA ON COMMIT DELETE ROWS; CREATE VOLATILE TABLE c AS (SELECT 'A' AS B) WITH NO DATA; CREATE VOLATILE TABLE e AS (SELECT 'A' AS B) WITH NO DATA AND STATS; CREATE VOLATILE TABLE f AS (SELECT 'A' AS B) WITH NO DATA AND NO STATS; CREATE VOLATILE TABLE g AS (SELECT 'A' AS B) WITH NO DATA AND STATISTICS; CREATE VOLATILE TABLE h AS (SELECT 'A' AS B) WITH NO DATA AND NO STATISTICS ON COMMIT PRESERVE ROWS; -- Testing of the set tables with options CREATE SET TABLE TABLE_2, FALLBACK , NO BEFORE JOURNAL, NO AFTER JOURNAL, CHECKSUM = DEFAULT, DEFAULT MERGEBLOCKRATIO, MAP = TD_MAP1 ( CHAR_FIELD CHAR(19) CHARACTER SET LATIN NOT CASESPECIFIC NOT NULL, DATE_FIELD DATE FORMAT 'YYYY-MM-DD' NOT NULL, BYTE_FIELD BYTEINT COMPRESS 0, DECIMAL_FIELD DECIMAL(15, 2) COMPRESS ( 50.00, 45.50, 40.00, 30.00, 27.80, 27.05, 20.00, 17.87, 17.56, 17.41, 17.26, 17.11, 16.96, 16.82, 16.68 ), TIMESTAMP_FIELD TIMESTAMP(6) NOT NULL ) PRIMARY INDEX (CHAR_FIELD, DATE_FIELD, BYTE_FIELD); sqlfluff-3.4.2/test/fixtures/dialects/teradata/create_table.yml000066400000000000000000000753361503426445100247260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 75a610840d7bc8a1820793d9d97af0727a137c1662071040a10c064e1f58b4d2 file: - statement: create_table_statement: - keyword: create - keyword: table - table_reference: - naked_identifier: sandbox_db - dot: . - naked_identifier: Org_Descendant - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: Org_Unit_Code data_type: data_type_identifier: char bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) td_column_attribute_constraint: - keyword: character - keyword: set - naked_identifier: unicode column_constraint_segment: - keyword: not - keyword: 'null' - comma: ',' - column_definition: column_reference: naked_identifier: Org_Unit_Type data_type: data_type_identifier: char bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '3' end_bracket: ) td_column_attribute_constraint: - keyword: character - keyword: set - naked_identifier: unicode column_constraint_segment: - keyword: not - keyword: 'null' - comma: ',' - column_definition: column_reference: naked_identifier: Entity_Code data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) td_column_attribute_constraint: keyword: uppercase column_constraint_segment: - keyword: not - keyword: 'null' - comma: ',' - column_definition: column_reference: naked_identifier: Parent_Org_Unit_Code data_type: data_type_identifier: char bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) td_column_attribute_constraint: - keyword: character - keyword: set - naked_identifier: unicode column_constraint_segment: - keyword: not - keyword: 'null' - comma: ',' - column_definition: column_reference: naked_identifier: Parent_Org_Unit_Type data_type: data_type_identifier: char bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '3' end_bracket: ) td_column_attribute_constraint: - keyword: character - keyword: set - naked_identifier: unicode column_constraint_segment: - keyword: not - keyword: 'null' - comma: ',' - column_definition: column_reference: naked_identifier: Parent_Entity_Code data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) td_column_attribute_constraint: keyword: uppercase column_constraint_segment: - keyword: not - keyword: 'null' - end_bracket: ) - td_table_constraint: - keyword: primary - keyword: index - object_reference: naked_identifier: Org_Descendant_NUPI - bracketed: - start_bracket: ( - naked_identifier: Org_Unit_Code - comma: ',' - naked_identifier: Org_Unit_Type - comma: ',' - naked_identifier: Entity_Code - end_bracket: ) - statement_terminator: ; - statement: collect_statistics_statement: - keyword: collect - keyword: statistics - keyword: column - bracketed: - start_bracket: ( - column_reference: naked_identifier: Org_Unit_Code - comma: ',' - column_reference: naked_identifier: Org_Unit_Type - comma: ',' - column_reference: naked_identifier: Entity_Code - end_bracket: ) - keyword: as - object_reference: naked_identifier: Org_Descendant_NUPI - comma: ',' - keyword: column - bracketed: start_bracket: ( column_reference: naked_identifier: Org_Unit_Type end_bracket: ) - comma: ',' - keyword: column - bracketed: start_bracket: ( column_reference: naked_identifier: Entity_Code end_bracket: ) - comma: ',' - keyword: column - bracketed: - start_bracket: ( - column_reference: naked_identifier: Org_Unit_Code - comma: ',' - column_reference: naked_identifier: Entity_Code - end_bracket: ) - comma: ',' - keyword: column - bracketed: - start_bracket: ( - column_reference: naked_identifier: Entity_Code - comma: ',' - column_reference: naked_identifier: Parent_Org_Unit_Code - comma: ',' - column_reference: naked_identifier: Parent_Org_Unit_Type - end_bracket: ) - comma: ',' - keyword: column - bracketed: start_bracket: ( column_reference: naked_identifier: Org_Unit_Code end_bracket: ) - comma: ',' - keyword: column - bracketed: - start_bracket: ( - column_reference: naked_identifier: Parent_Org_Unit_Code - comma: ',' - column_reference: naked_identifier: Parent_Org_Unit_Type - comma: ',' - column_reference: naked_identifier: Parent_Entity_Code - end_bracket: ) - keyword: 'on' - table_reference: - naked_identifier: sandbox_db - dot: . - naked_identifier: Org_Descendant - statement_terminator: ; - statement: comment_clause: - keyword: comment - keyword: 'on' - keyword: table - table_reference: - naked_identifier: sandbox_db - dot: . - naked_identifier: Org_Descendant - keyword: is - quoted_literal: "'View with all Org_Unit_Ids on all levels'" - statement_terminator: ; - statement: comment_clause: - keyword: comment - keyword: 'on' - keyword: column - column_reference: - naked_identifier: sandbox_db - dot: . - naked_identifier: Org_Descendant - dot: . - naked_identifier: Org_Unit_Code - keyword: is - quoted_literal: "'Organisational unit code'" - statement_terminator: ; - statement: comment_clause: - keyword: comment - keyword: 'on' - keyword: column - column_reference: - naked_identifier: sandbox_db - dot: . - naked_identifier: Org_Descendant - dot: . - naked_identifier: Org_Unit_Type - keyword: is - quoted_literal: "'The type of organization such as branch, region, team, call\ \ center'" - statement_terminator: ; - statement: comment_clause: - keyword: comment - keyword: 'on' - keyword: column - column_reference: - naked_identifier: sandbox_db - dot: . - naked_identifier: Org_Descendant - dot: . - naked_identifier: Entity_Code - keyword: is - quoted_literal: "'Owning entity code'" - statement_terminator: ; - statement: comment_clause: - keyword: comment - keyword: 'on' - keyword: column - column_reference: - naked_identifier: sandbox_db - dot: . - naked_identifier: Org_Descendant - dot: . - naked_identifier: Parent_Org_Unit_Code - keyword: is - quoted_literal: "'Organisational unit code'" - statement_terminator: ; - statement: comment_clause: - keyword: comment - keyword: 'on' - keyword: column - column_reference: - naked_identifier: sandbox_db - dot: . - naked_identifier: Org_Descendant - dot: . - naked_identifier: Parent_Org_Unit_Type - keyword: is - quoted_literal: "'The type of organization such as branch, region, team, call\ \ center'" - statement_terminator: ; - statement: comment_clause: - keyword: comment - keyword: 'on' - keyword: column - column_reference: - naked_identifier: sandbox_db - dot: . - naked_identifier: Org_Descendant - dot: . - naked_identifier: Parent_Entity_Code - keyword: is - quoted_literal: "'Owning entity code parent'" - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: VOLATILE - keyword: MULTISET - keyword: TABLE - table_reference: naked_identifier: date_control - bracketed: start_bracket: ( column_definition: column_reference: naked_identifier: calculation_date data_type: data_type_identifier: DATE keyword: FORMAT quoted_literal: "'yyyy-mm-dd'" end_bracket: ) - td_table_constraint: - keyword: PRIMARY - keyword: INDEX - bracketed: start_bracket: ( naked_identifier: calculation_date end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: MULTISET - keyword: VOLATILE - keyword: TABLE - table_reference: naked_identifier: date_control - bracketed: start_bracket: ( column_definition: column_reference: naked_identifier: calculation_date data_type: data_type_identifier: DATE keyword: FORMAT quoted_literal: "'yyyy-mm-dd'" end_bracket: ) - td_table_constraint: - keyword: PRIMARY - keyword: INDEX - bracketed: start_bracket: ( naked_identifier: calculation_date end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: MULTISET - keyword: TABLE - table_reference: naked_identifier: CONSUMOS - create_table_options_statement: - comma: ',' - keyword: 'NO' - keyword: FALLBACK - comma: ',' - keyword: 'NO' - keyword: BEFORE - keyword: JOURNAL - comma: ',' - keyword: 'NO' - keyword: AFTER - keyword: JOURNAL - comma: ',' - keyword: CHECKSUM - comparison_operator: raw_comparison_operator: '=' - keyword: DEFAULT - comma: ',' - keyword: DEFAULT - keyword: MERGEBLOCKRATIO - bracketed: start_bracket: ( column_definition: column_reference: naked_identifier: FIELD1 data_type: data_type_identifier: CHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '9' end_bracket: ) end_bracket: ) - td_table_constraint: - keyword: PRIMARY - keyword: INDEX - bracketed: start_bracket: ( naked_identifier: FIELD1 end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: MULTISET - keyword: TABLE - table_reference: naked_identifier: TABLE_2 - bracketed: - start_bracket: ( - column_definition: - column_reference: naked_identifier: CHAR_FIELD - data_type: data_type_identifier: CHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '19' end_bracket: ) - td_column_attribute_constraint: - keyword: CHARACTER - keyword: SET - naked_identifier: LATIN - td_column_attribute_constraint: - keyword: NOT - keyword: CASESPECIFIC - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: column_reference: naked_identifier: DATE_FIELD data_type: data_type_identifier: DATE keyword: FORMAT quoted_literal: "'YYYY-MM-DD'" column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: column_reference: naked_identifier: BYTE_FIELD data_type: data_type_identifier: BYTEINT td_column_attribute_constraint: keyword: COMPRESS numeric_literal: '0' - comma: ',' - column_definition: column_reference: naked_identifier: DECIMAL_FIELD data_type: data_type_identifier: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '15' - comma: ',' - numeric_literal: '2' - end_bracket: ) td_column_attribute_constraint: keyword: COMPRESS bracketed: - start_bracket: ( - numeric_literal: '50.00' - comma: ',' - numeric_literal: '45.50' - comma: ',' - numeric_literal: '40.00' - comma: ',' - numeric_literal: '30.00' - comma: ',' - numeric_literal: '27.80' - comma: ',' - numeric_literal: '27.05' - comma: ',' - numeric_literal: '20.00' - comma: ',' - numeric_literal: '17.87' - comma: ',' - numeric_literal: '17.56' - comma: ',' - numeric_literal: '17.41' - comma: ',' - numeric_literal: '17.26' - comma: ',' - numeric_literal: '17.11' - comma: ',' - numeric_literal: '16.96' - comma: ',' - numeric_literal: '16.82' - comma: ',' - numeric_literal: '16.68' - end_bracket: ) - comma: ',' - column_definition: column_reference: naked_identifier: TIMESTAMP_FIELD data_type: data_type_identifier: TIMESTAMP bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - end_bracket: ) - td_table_constraint: - keyword: PRIMARY - keyword: INDEX - bracketed: - start_bracket: ( - naked_identifier: CHAR_FIELD - comma: ',' - naked_identifier: DATE_FIELD - comma: ',' - naked_identifier: BYTE_FIELD - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: MULTISET - keyword: TABLE - table_reference: naked_identifier: NUM_LTR_DESVINCULADOS_ADH - bracketed: - start_bracket: ( - column_definition: - column_reference: naked_identifier: DES_EVENTO - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '255' end_bracket: ) - td_column_attribute_constraint: - keyword: CHARACTER - keyword: SET - naked_identifier: LATIN - td_column_attribute_constraint: - keyword: NOT - keyword: CASESPECIFIC - td_column_attribute_constraint: keyword: COMPRESS bracketed: - start_bracket: ( - quoted_literal: "'Cambio de bandera'" - comma: ',' - quoted_literal: "'Cierre'" - end_bracket: ) - comma: ',' - column_definition: column_reference: naked_identifier: IND_CONTINUA data_type: data_type_identifier: BYTEINT td_column_attribute_constraint: keyword: COMPRESS - end_bracket: ) - td_table_constraint: - keyword: PRIMARY - keyword: INDEX - bracketed: - start_bracket: ( - naked_identifier: COD_TARJETA - comma: ',' - naked_identifier: COD_EST - comma: ',' - naked_identifier: FEC_CIERRE_EST - comma: ',' - naked_identifier: IND_TIPO_TARJETA - end_bracket: ) - keyword: PARTITION - keyword: BY - td_partitioning_level: function_name: function_name_identifier: RANGE_N bracketed: - start_bracket: ( - word: FEC_OPERACION - word: BETWEEN - word: DATE - single_quote: "'2007-01-01'" - word: AND - word: DATE - single_quote: "'2022-01-01'" - word: EACH - word: INTERVAL - single_quote: "'1'" - word: MONTH - comma: ',' - word: 'NO' - word: RANGE - word: OR - word: UNKNOWN - end_bracket: ) - keyword: INDEX - object_reference: naked_identifier: HOPR_TRN_TRAV_SIN_MP_I - bracketed: start_bracket: ( column_reference: naked_identifier: IND_TIPO_TARJETA end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: - naked_identifier: sandbox_db - dot: . - naked_identifier: Org_Descendant - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: Org_Unit_Code data_type: data_type_identifier: char bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) td_column_attribute_constraint: - keyword: character - keyword: set - naked_identifier: unicode column_constraint_segment: - keyword: not - keyword: 'null' - comma: ',' - column_definition: column_reference: naked_identifier: Org_Unit_Type data_type: data_type_identifier: char bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '3' end_bracket: ) td_column_attribute_constraint: - keyword: character - keyword: set - naked_identifier: unicode column_constraint_segment: - keyword: not - keyword: 'null' - comma: ',' - column_definition: column_reference: naked_identifier: Entity_Code data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) td_column_attribute_constraint: keyword: uppercase column_constraint_segment: - keyword: not - keyword: 'null' - comma: ',' - column_definition: column_reference: naked_identifier: Parent_Org_Unit_Code data_type: data_type_identifier: char bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) td_column_attribute_constraint: - keyword: character - keyword: set - naked_identifier: unicode column_constraint_segment: - keyword: not - keyword: 'null' - comma: ',' - column_definition: column_reference: naked_identifier: Parent_Org_Unit_Type data_type: data_type_identifier: char bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '3' end_bracket: ) td_column_attribute_constraint: - keyword: character - keyword: set - naked_identifier: unicode column_constraint_segment: - keyword: not - keyword: 'null' - comma: ',' - column_definition: column_reference: naked_identifier: Parent_Entity_Code data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) td_column_attribute_constraint: keyword: uppercase column_constraint_segment: - keyword: not - keyword: 'null' - end_bracket: ) - td_table_constraint: - keyword: primary - keyword: index - object_reference: naked_identifier: Org_Descendant_NUPI - bracketed: - start_bracket: ( - naked_identifier: Org_Unit_Code - comma: ',' - naked_identifier: Org_Unit_Type - comma: ',' - naked_identifier: Entity_Code - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: VOLATILE - keyword: TABLE - table_reference: naked_identifier: a - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'A'" alias_expression: alias_operator: keyword: AS naked_identifier: B end_bracket: ) - td_table_constraint: - keyword: WITH - keyword: DATA - keyword: 'ON' - keyword: COMMIT - keyword: PRESERVE - keyword: ROWS - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: VOLATILE - keyword: TABLE - table_reference: naked_identifier: b - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'A'" alias_expression: alias_operator: keyword: AS naked_identifier: B end_bracket: ) - td_table_constraint: - keyword: WITH - keyword: DATA - keyword: 'ON' - keyword: COMMIT - keyword: DELETE - keyword: ROWS - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: VOLATILE - keyword: TABLE - table_reference: naked_identifier: c - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'A'" alias_expression: alias_operator: keyword: AS naked_identifier: B end_bracket: ) - td_table_constraint: - keyword: WITH - keyword: 'NO' - keyword: DATA - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: VOLATILE - keyword: TABLE - table_reference: naked_identifier: e - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'A'" alias_expression: alias_operator: keyword: AS naked_identifier: B end_bracket: ) - td_table_constraint: - keyword: WITH - keyword: 'NO' - keyword: DATA - keyword: AND - keyword: STATS - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: VOLATILE - keyword: TABLE - table_reference: naked_identifier: f - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'A'" alias_expression: alias_operator: keyword: AS naked_identifier: B end_bracket: ) - td_table_constraint: - keyword: WITH - keyword: 'NO' - keyword: DATA - keyword: AND - keyword: 'NO' - keyword: STATS - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: VOLATILE - keyword: TABLE - table_reference: naked_identifier: g - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'A'" alias_expression: alias_operator: keyword: AS naked_identifier: B end_bracket: ) - td_table_constraint: - keyword: WITH - keyword: 'NO' - keyword: DATA - keyword: AND - keyword: STATISTICS - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: VOLATILE - keyword: TABLE - table_reference: naked_identifier: h - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'A'" alias_expression: alias_operator: keyword: AS naked_identifier: B end_bracket: ) - td_table_constraint: - keyword: WITH - keyword: 'NO' - keyword: DATA - keyword: AND - keyword: 'NO' - keyword: STATISTICS - keyword: 'ON' - keyword: COMMIT - keyword: PRESERVE - keyword: ROWS - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: SET - keyword: TABLE - table_reference: naked_identifier: TABLE_2 - create_table_options_statement: - comma: ',' - keyword: FALLBACK - comma: ',' - keyword: 'NO' - keyword: BEFORE - keyword: JOURNAL - comma: ',' - keyword: 'NO' - keyword: AFTER - keyword: JOURNAL - comma: ',' - keyword: CHECKSUM - comparison_operator: raw_comparison_operator: '=' - keyword: DEFAULT - comma: ',' - keyword: DEFAULT - keyword: MERGEBLOCKRATIO - comma: ',' - keyword: MAP - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: TD_MAP1 - bracketed: - start_bracket: ( - column_definition: - column_reference: naked_identifier: CHAR_FIELD - data_type: data_type_identifier: CHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '19' end_bracket: ) - td_column_attribute_constraint: - keyword: CHARACTER - keyword: SET - naked_identifier: LATIN - td_column_attribute_constraint: - keyword: NOT - keyword: CASESPECIFIC - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: column_reference: naked_identifier: DATE_FIELD data_type: data_type_identifier: DATE keyword: FORMAT quoted_literal: "'YYYY-MM-DD'" column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: column_reference: naked_identifier: BYTE_FIELD data_type: data_type_identifier: BYTEINT td_column_attribute_constraint: keyword: COMPRESS numeric_literal: '0' - comma: ',' - column_definition: column_reference: naked_identifier: DECIMAL_FIELD data_type: data_type_identifier: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '15' - comma: ',' - numeric_literal: '2' - end_bracket: ) td_column_attribute_constraint: keyword: COMPRESS bracketed: - start_bracket: ( - numeric_literal: '50.00' - comma: ',' - numeric_literal: '45.50' - comma: ',' - numeric_literal: '40.00' - comma: ',' - numeric_literal: '30.00' - comma: ',' - numeric_literal: '27.80' - comma: ',' - numeric_literal: '27.05' - comma: ',' - numeric_literal: '20.00' - comma: ',' - numeric_literal: '17.87' - comma: ',' - numeric_literal: '17.56' - comma: ',' - numeric_literal: '17.41' - comma: ',' - numeric_literal: '17.26' - comma: ',' - numeric_literal: '17.11' - comma: ',' - numeric_literal: '16.96' - comma: ',' - numeric_literal: '16.82' - comma: ',' - numeric_literal: '16.68' - end_bracket: ) - comma: ',' - column_definition: column_reference: naked_identifier: TIMESTAMP_FIELD data_type: data_type_identifier: TIMESTAMP bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - end_bracket: ) - td_table_constraint: - keyword: PRIMARY - keyword: INDEX - bracketed: - start_bracket: ( - naked_identifier: CHAR_FIELD - comma: ',' - naked_identifier: DATE_FIELD - comma: ',' - naked_identifier: BYTE_FIELD - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/teradata/create_view.sql000066400000000000000000000004471503426445100245760ustar00rootroot00000000000000CREATE VIEW a AS SELECT c FROM table1 INNER JOIN table2 ON (table1.id = table2.id); REPLACE VIEW vw_appt_latest AS ( WITH most_current as ( SELECT da.* FROM dim_appt da WHERE da.current_appt_id IS NULL ) SELECT * from most_current ); sqlfluff-3.4.2/test/fixtures/dialects/teradata/create_view.yml000066400000000000000000000073141503426445100246000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b07ff71a9b1c95969b2134fde83b968a608b52c5a23cd0f1f101390df9ad2d3f file: - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: a - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: table1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: table2 - dot: . - naked_identifier: id end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: REPLACE - keyword: VIEW - table_reference: naked_identifier: vw_appt_latest - keyword: AS - bracketed: start_bracket: ( with_compound_statement: keyword: WITH common_table_expression: naked_identifier: most_current keyword: as bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: da dot: . star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dim_appt alias_expression: naked_identifier: da where_clause: keyword: WHERE expression: column_reference: - naked_identifier: da - dot: . - naked_identifier: current_appt_id keyword: IS null_literal: 'NULL' end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: most_current end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/teradata/database.sql000066400000000000000000000000241503426445100240340ustar00rootroot00000000000000DATABASE database1; sqlfluff-3.4.2/test/fixtures/dialects/teradata/database.yml000066400000000000000000000010371503426445100240430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7cac8d54c918136598faf203938b44e330a57396dfae6efc0d65fa18f733c571 file: statement: database_statement: keyword: DATABASE database_reference: naked_identifier: database1 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/teradata/delete.sql000066400000000000000000000004541503426445100235410ustar00rootroot00000000000000DELETE FROM MY_TABLE WHERE 1=1 ; DELETE FROM MY_TABLE WHERE MY_COL > 10 ; DELETE FROM MY_TABLE WHERE ID IN (SELECT ID FROM ANOTHER_TABLE) AND ID <> 5 ; DEL FROM MY_TABLE WHERE 1=1 ; DEL FROM MY_TABLE WHERE MY_COL > 10 ; DEL FROM MY_TABLE WHERE ID IN (SELECT ID FROM ANOTHER_TABLE) AND ID <> 5 ; sqlfluff-3.4.2/test/fixtures/dialects/teradata/delete.yml000066400000000000000000000113371503426445100235450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ee1c9bd2be40a1b3b1133e4b1c0e9a569b519ffe26d5ba228c80511b0bfb214a file: - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE where_clause: keyword: WHERE expression: - numeric_literal: '1' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - statement_terminator: ; - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE where_clause: keyword: WHERE expression: column_reference: naked_identifier: MY_COL comparison_operator: raw_comparison_operator: '>' numeric_literal: '10' - statement_terminator: ; - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE where_clause: keyword: WHERE expression: - column_reference: naked_identifier: ID - keyword: IN - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: ID from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: ANOTHER_TABLE end_bracket: ) - binary_operator: AND - column_reference: naked_identifier: ID - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '>' - numeric_literal: '5' - statement_terminator: ; - statement: delete_statement: keyword: DEL from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE where_clause: keyword: WHERE expression: - numeric_literal: '1' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - statement_terminator: ; - statement: delete_statement: keyword: DEL from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE where_clause: keyword: WHERE expression: column_reference: naked_identifier: MY_COL comparison_operator: raw_comparison_operator: '>' numeric_literal: '10' - statement_terminator: ; - statement: delete_statement: keyword: DEL from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE where_clause: keyword: WHERE expression: - column_reference: naked_identifier: ID - keyword: IN - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: ID from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: ANOTHER_TABLE end_bracket: ) - binary_operator: AND - column_reference: naked_identifier: ID - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '>' - numeric_literal: '5' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/teradata/locking.sql000066400000000000000000000004121503426445100237170ustar00rootroot00000000000000LOCKING DATABASE database_name FOR ACCESS SELECT a FROM database.mytable; LOCKING TABLE table_name FOR READ SELECT a FROM table_name; LOCK ROW FOR WRITE SELECT a FROM table_name; LOCKING VIEW v FOR EXCLUSIVE WITH cte AS ( SELECT a FROM v ) SELECT a FROM cte; sqlfluff-3.4.2/test/fixtures/dialects/teradata/locking.yml000066400000000000000000000064551503426445100237360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 38785caf0721bbb22cdf03305388f64518102e303e2f3c7ddc2fa543db8f7a53 file: - statement: select_statement: locking_clause: - keyword: LOCKING - keyword: DATABASE - object_reference: naked_identifier: database_name - keyword: FOR - keyword: ACCESS select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: database - dot: . - naked_identifier: mytable - statement_terminator: ; - statement: select_statement: locking_clause: - keyword: LOCKING - keyword: TABLE - object_reference: naked_identifier: table_name - keyword: FOR - keyword: READ select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name - statement_terminator: ; - statement: select_statement: locking_clause: - keyword: LOCK - keyword: ROW - keyword: FOR - keyword: WRITE select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name - statement_terminator: ; - statement: with_compound_statement: locking_clause: - keyword: LOCKING - keyword: VIEW - object_reference: naked_identifier: v - keyword: FOR - keyword: EXCLUSIVE keyword: WITH common_table_expression: naked_identifier: cte keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: v end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: cte - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/teradata/qualify.sql000066400000000000000000000003011503426445100237400ustar00rootroot00000000000000SELECT id FROM mytable qualify x = 1; SELECT id FROM mytable qualify x = 1 UNION ALL SELECT id FROM mytable qualify x = 1; SELECT id FROM mytable qualify count(*) over (partition by id) > 1; sqlfluff-3.4.2/test/fixtures/dialects/teradata/qualify.yml000066400000000000000000000070641503426445100237570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0484ae2cd5544435691f74d092fcecae32d0ec0d80c6e67b7d571402c3d455be file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable qualify_clause: keyword: qualify expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable qualify_clause: keyword: qualify expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable qualify_clause: keyword: qualify expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable qualify_clause: keyword: qualify expression: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) over_clause: keyword: over bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: partition - keyword: by - expression: column_reference: naked_identifier: id end_bracket: ) comparison_operator: raw_comparison_operator: '>' numeric_literal: '1' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/teradata/select.sql000066400000000000000000000025071503426445100235570ustar00rootroot00000000000000SELECT DATE; CREATE TABLE t1 (f1 DATE); SELECT DATE (FORMAT 'MMMbdd,bYYYY'); -- (CHAR(12), UC); -- https://docs.teradata.com/r/S0Fw2AVH8ff3MDA0wDOHlQ/ryoeKJsEr22NqKahaktP5g -- Disabled CHAR(12, UC) for now, see #1665 SELECT ADD_MONTHS(abandono.FEC_CIERRE_EST, -12) AS FEC_CIERRE_EST_ULT12, CAST('200010' AS DATE FORMAT 'YYYYMM') AS CAST_STATEMENT_EXAMPLE FROM EXAMPLE_TABLE; SEL * FROM CUSTOMERS; SELECT * FROM CUSTOMERS; SEL 1; SELECT 1; SELECT '9999-12-31' (DATE), '9999-12-31' (DATE FORMAT 'YYYY-MM-DD'), '100000' (SMALLINT) from test_table; select normalize on meets or overlaps id ,period(vld_fm, vld_to) as vld_prd from mydb.mytable where id = 12345; SELECT NORMALIZE ON MEETS OR OVERLAPS emp_id, duration FROM project; SELECT NORMALIZE project_name, duration FROM project; SELECT NORMALIZE project_name, dept_id, duration FROM project; SELECT NORMALIZE ON OVERLAPS project_name, dept_id, duration FROM project; SELECT NORMALIZE ON OVERLAPS OR MEETS project_name, dept_id, duration FROM project; SELECT TOP 100 * FROM MY_TABLE; SELECT * FROM MY_TABLE; SELECT TOP 100 COL_A, COL_B FROM MY_TABLE; SELECT DISTINCT * FROM MY_TABLE; SELECT TOP 10 PERCENT * FROM MY_TABLE; SELECT TOP 0.1 PERCENT COL_A FROM MY_TABLE; SELECT TOP 0.1 PERCENT WITH TIES COL_A, COL_B FROM MY_TABLE ORDER BY COL_B; sqlfluff-3.4.2/test/fixtures/dialects/teradata/select.yml000066400000000000000000000345551503426445100235710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ab5b8fb2577e778a0f3b3d343155b4f189e66f2893f09de6be04cba4301fb4d5 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: bare_function: DATE - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: start_bracket: ( column_definition: column_reference: naked_identifier: f1 data_type: data_type_identifier: DATE end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: DATE function_contents: bracketed: start_bracket: ( expression: data_type: data_type_identifier: FORMAT quoted_literal: "'MMMbdd,bYYYY'" end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: ADD_MONTHS function_contents: bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: abandono - dot: . - naked_identifier: FEC_CIERRE_EST - comma: ',' - expression: numeric_literal: sign_indicator: '-' numeric_literal: '12' - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: FEC_CIERRE_EST_ULT12 - comma: ',' - select_clause_element: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'200010'" keyword: AS data_type: data_type_identifier: DATE keyword: FORMAT quoted_literal: "'YYYYMM'" end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: CAST_STATEMENT_EXAMPLE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: EXAMPLE_TABLE - statement_terminator: ; - statement: select_statement: select_clause: keyword: SEL select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: CUSTOMERS - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: CUSTOMERS - statement_terminator: ; - statement: select_statement: select_clause: keyword: SEL select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: quoted_literal: "'9999-12-31'" cast_expression: bracketed: start_bracket: ( data_type: data_type_identifier: DATE end_bracket: ) - comma: ',' - select_clause_element: expression: quoted_literal: "'9999-12-31'" cast_expression: bracketed: start_bracket: ( data_type: data_type_identifier: DATE keyword: FORMAT quoted_literal: "'YYYY-MM-DD'" end_bracket: ) - comma: ',' - select_clause_element: expression: quoted_literal: "'100000'" cast_expression: bracketed: start_bracket: ( data_type: data_type_identifier: SMALLINT end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_modifier: - keyword: normalize - keyword: 'on' - keyword: meets - keyword: or - keyword: overlaps - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: period function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: vld_fm - comma: ',' - expression: column_reference: naked_identifier: vld_to - end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: vld_prd from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: mydb - dot: . - naked_identifier: mytable where_clause: keyword: where expression: column_reference: naked_identifier: id comparison_operator: raw_comparison_operator: '=' numeric_literal: '12345' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: - keyword: NORMALIZE - keyword: 'ON' - keyword: MEETS - keyword: OR - keyword: OVERLAPS - select_clause_element: column_reference: naked_identifier: emp_id - comma: ',' - select_clause_element: column_reference: naked_identifier: duration from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: project - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: keyword: NORMALIZE - select_clause_element: column_reference: naked_identifier: project_name - comma: ',' - select_clause_element: column_reference: naked_identifier: duration from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: project - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: keyword: NORMALIZE - select_clause_element: column_reference: naked_identifier: project_name - comma: ',' - select_clause_element: column_reference: naked_identifier: dept_id - comma: ',' - select_clause_element: column_reference: naked_identifier: duration from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: project - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: - keyword: NORMALIZE - keyword: 'ON' - keyword: OVERLAPS - select_clause_element: column_reference: naked_identifier: project_name - comma: ',' - select_clause_element: column_reference: naked_identifier: dept_id - comma: ',' - select_clause_element: column_reference: naked_identifier: duration from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: project - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: - keyword: NORMALIZE - keyword: 'ON' - keyword: OVERLAPS - keyword: OR - keyword: MEETS - select_clause_element: column_reference: naked_identifier: project_name - comma: ',' - select_clause_element: column_reference: naked_identifier: dept_id - comma: ',' - select_clause_element: column_reference: naked_identifier: duration from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: project - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_modifier: keyword: TOP expression: numeric_literal: '100' select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: keyword: TOP expression: numeric_literal: '100' - select_clause_element: column_reference: naked_identifier: COL_A - comma: ',' - select_clause_element: column_reference: naked_identifier: COL_B from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_modifier: keyword: DISTINCT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_modifier: - keyword: TOP - expression: numeric_literal: '10' - keyword: PERCENT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_modifier: - keyword: TOP - expression: numeric_literal: '0.1' - keyword: PERCENT select_clause_element: column_reference: naked_identifier: COL_A from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: - keyword: TOP - expression: numeric_literal: '0.1' - keyword: PERCENT - keyword: WITH - keyword: TIES - select_clause_element: column_reference: naked_identifier: COL_A - comma: ',' - select_clause_element: column_reference: naked_identifier: COL_B from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: COL_B - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/teradata/set_query_band.sql000066400000000000000000000005741503426445100253060ustar00rootroot00000000000000SET QUERY_BAND = 'cat=siamese;dog=akita;' UPDATE FOR SESSION VOLATILE; SET QUERY_BAND = 'area=west;city=sandiego;tree=maple;flower=rose;' FOR SESSION; SET QUERY_BAND = 'city=san diego;' UPDATE FOR SESSION; SET QUERY_BAND='PROXYUSER=fred;' FOR TRANSACTION; SET QUERY_BAND = NONE FOR TRANSACTION; SET QUERY_BAND=NONE FOR TRANSACTION; SET QUERY_BAND = '' FOR TRANSACTION; sqlfluff-3.4.2/test/fixtures/dialects/teradata/set_query_band.yml000066400000000000000000000043641503426445100253110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a3d14870d12be52d65f2e1939f1ede0eb56a02f4f2899835133fa373cd836218 file: - statement: set_query_band_statement: - keyword: SET - keyword: QUERY_BAND - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'cat=siamese;dog=akita;'" - keyword: UPDATE - keyword: FOR - keyword: SESSION - keyword: VOLATILE - statement_terminator: ; - statement: set_query_band_statement: - keyword: SET - keyword: QUERY_BAND - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'area=west;city=sandiego;tree=maple;flower=rose;'" - keyword: FOR - keyword: SESSION - statement_terminator: ; - statement: set_query_band_statement: - keyword: SET - keyword: QUERY_BAND - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'city=san diego;'" - keyword: UPDATE - keyword: FOR - keyword: SESSION - statement_terminator: ; - statement: set_query_band_statement: - keyword: SET - keyword: QUERY_BAND - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'PROXYUSER=fred;'" - keyword: FOR - keyword: TRANSACTION - statement_terminator: ; - statement: set_query_band_statement: - keyword: SET - keyword: QUERY_BAND - comparison_operator: raw_comparison_operator: '=' - keyword: NONE - keyword: FOR - keyword: TRANSACTION - statement_terminator: ; - statement: set_query_band_statement: - keyword: SET - keyword: QUERY_BAND - comparison_operator: raw_comparison_operator: '=' - keyword: NONE - keyword: FOR - keyword: TRANSACTION - statement_terminator: ; - statement: set_query_band_statement: - keyword: SET - keyword: QUERY_BAND - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - keyword: FOR - keyword: TRANSACTION - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/teradata/set_session.sql000066400000000000000000000000671503426445100246350ustar00rootroot00000000000000SET SESSION DATABASE database1; SS DATABASE database1; sqlfluff-3.4.2/test/fixtures/dialects/teradata/set_session.yml000066400000000000000000000014661503426445100246430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 41c693de7ef7b9f05e2ff7ad60eb113c8afc7f8281ab1109d15d44949b99f446 file: - statement: set_session_statement: - keyword: SET - keyword: SESSION - database_statement: keyword: DATABASE database_reference: naked_identifier: database1 - statement_terminator: ; - statement: set_session_statement: keyword: SS database_statement: keyword: DATABASE database_reference: naked_identifier: database1 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/teradata/update_from.sql000066400000000000000000000003671503426445100246070ustar00rootroot00000000000000UPDATE table_name FROM ( SELECT a, b, c, d FROM t_b INNER JOIN t_c ON t_b.d = t_c.d WHERE b = 'F' -- AND SUBSTR(c, 1, 1) = 'T' ) AS t_d SET column1 = value1, column2 = 'value2' WHERE a=1; sqlfluff-3.4.2/test/fixtures/dialects/teradata/update_from.yml000066400000000000000000000071151503426445100246070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fba6cf39f7c4ae9198a483d68763f3811f0d996dc386522336f8ad08c869b772 file: statement: update_statement: keyword: UPDATE table_reference: naked_identifier: table_name from_in_update_clause: keyword: FROM from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c - comma: ',' - select_clause_element: column_reference: naked_identifier: d from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t_b join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t_c - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t_b - dot: . - naked_identifier: d - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t_c - dot: . - naked_identifier: d where_clause: keyword: WHERE expression: column_reference: naked_identifier: b comparison_operator: raw_comparison_operator: '=' quoted_literal: "'F'" end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: t_d set_clause_list: - keyword: SET - set_clause: - column_reference: naked_identifier: column1 - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: value1 - comma: ',' - set_clause: column_reference: naked_identifier: column2 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'value2'" where_clause: keyword: WHERE expression: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/trino/000077500000000000000000000000001503426445100211215ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/dialects/trino/.sqlfluff000066400000000000000000000000331503426445100227400ustar00rootroot00000000000000[sqlfluff] dialect = trino sqlfluff-3.4.2/test/fixtures/dialects/trino/alter_table.sql000066400000000000000000000023311503426445100241170ustar00rootroot00000000000000ALTER TABLE t1 RENAME TO t2; ALTER TABLE IF EXISTS t1 RENAME TO t2; ALTER TABLE t1 ADD COLUMN col1 VARCHAR; ALTER TABLE t1 ADD COLUMN IF NOT EXISTS col1 VARCHAR; ALTER TABLE t1 ADD COLUMN col1 VARCHAR NOT NULL; ALTER TABLE t1 ADD COLUMN col1 VARCHAR COMMENT 'comment'; ALTER TABLE t1 ADD COLUMN col1 VARCHAR WITH (x = 'y'); ALTER TABLE t1 ADD COLUMN col1 VARCHAR FIRST; ALTER TABLE t1 ADD COLUMN col1 VARCHAR LAST; ALTER TABLE t1 ADD COLUMN col1 VARCHAR AFTER col2; ALTER TABLE t1 DROP COLUMN col1; ALTER TABLE t1 DROP COLUMN IF EXISTS col1; ALTER TABLE t1 RENAME COLUMN col1 TO col2; ALTER TABLE t1 RENAME COLUMN IF EXISTS col1 TO col2; ALTER TABLE t1 ALTER COLUMN col1 SET DATA TYPE INTEGER; ALTER TABLE t1 ALTER COLUMN col1 SET DATA TYPE VARCHAR(100); ALTER TABLE t1 ALTER COLUMN col1 DROP NOT NULL; ALTER TABLE t1 SET AUTHORIZATION u1; ALTER TABLE t1 SET AUTHORIZATION USER u1; ALTER TABLE t1 SET AUTHORIZATION ROLE r1; ALTER TABLE t1 SET PROPERTIES x = 'y'; ALTER TABLE t1 SET PROPERTIES x = DEFAULT; ALTER TABLE t1 SET PROPERTIES foo = 123, bar = 456; ALTER TABLE t1 EXECUTE func; ALTER TABLE t1 EXECUTE func(x => 'y'); ALTER TABLE t1 EXECUTE func(foo => 123, bar => 456); ALTER TABLE t1 EXECUTE func(x => 'y') WHERE col1 > 0; sqlfluff-3.4.2/test/fixtures/dialects/trino/alter_table.yml000066400000000000000000000237171503426445100241340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bda187ec3f0ea6b3bbdd96b0591804d07b326a24d0d9e4345e10ba56a7905f48 file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: RENAME - keyword: TO - table_reference: naked_identifier: t2 - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: t1 - keyword: RENAME - keyword: TO - table_reference: naked_identifier: t2 - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: ADD - keyword: COLUMN - column_definition: naked_identifier: col1 data_type: keyword: VARCHAR - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: ADD - keyword: COLUMN - keyword: IF - keyword: NOT - keyword: EXISTS - column_definition: naked_identifier: col1 data_type: keyword: VARCHAR - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: ADD - keyword: COLUMN - column_definition: - naked_identifier: col1 - data_type: keyword: VARCHAR - keyword: NOT - keyword: 'NULL' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: ADD - keyword: COLUMN - column_definition: naked_identifier: col1 data_type: keyword: VARCHAR comment_clause: keyword: COMMENT quoted_literal: "'comment'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: ADD - keyword: COLUMN - column_definition: naked_identifier: col1 data_type: keyword: VARCHAR keyword: WITH bracketed: start_bracket: ( expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: '=' quoted_literal: "'y'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: ADD - keyword: COLUMN - column_definition: naked_identifier: col1 data_type: keyword: VARCHAR - keyword: FIRST - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: ADD - keyword: COLUMN - column_definition: naked_identifier: col1 data_type: keyword: VARCHAR - keyword: LAST - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: ADD - keyword: COLUMN - column_definition: naked_identifier: col1 data_type: keyword: VARCHAR - keyword: AFTER - column_reference: naked_identifier: col2 - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: DROP - keyword: COLUMN - column_reference: naked_identifier: col1 - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: DROP - keyword: COLUMN - keyword: IF - keyword: EXISTS - column_reference: naked_identifier: col1 - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: RENAME - keyword: COLUMN - column_reference: naked_identifier: col1 - keyword: TO - column_reference: naked_identifier: col2 - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: RENAME - keyword: COLUMN - keyword: IF - keyword: EXISTS - column_reference: naked_identifier: col1 - keyword: TO - column_reference: naked_identifier: col2 - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: col1 - keyword: SET - keyword: DATA - keyword: TYPE - data_type: keyword: INTEGER - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: col1 - keyword: SET - keyword: DATA - keyword: TYPE - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '100' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: col1 - keyword: DROP - keyword: NOT - keyword: 'NULL' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: SET - keyword: AUTHORIZATION - role_reference: naked_identifier: u1 - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: SET - keyword: AUTHORIZATION - keyword: USER - role_reference: naked_identifier: u1 - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: SET - keyword: AUTHORIZATION - keyword: ROLE - role_reference: naked_identifier: r1 - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: SET - keyword: PROPERTIES - parameter: x - comparison_operator: raw_comparison_operator: '=' - expression: quoted_literal: "'y'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: SET - keyword: PROPERTIES - parameter: x - comparison_operator: raw_comparison_operator: '=' - expression: column_reference: naked_identifier: DEFAULT - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: SET - keyword: PROPERTIES - parameter: foo - comparison_operator: raw_comparison_operator: '=' - expression: numeric_literal: '123' - comma: ',' - parameter: bar - comparison_operator: raw_comparison_operator: '=' - expression: numeric_literal: '456' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: EXECUTE - function_name: function_name_identifier: func - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: EXECUTE - function_name: function_name_identifier: func - bracketed: start_bracket: ( parameter: x execute_arrow: => expression: quoted_literal: "'y'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: EXECUTE - function_name: function_name_identifier: func - bracketed: - start_bracket: ( - parameter: foo - execute_arrow: => - expression: numeric_literal: '123' - comma: ',' - parameter: bar - execute_arrow: => - expression: numeric_literal: '456' - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: EXECUTE - function_name: function_name_identifier: func - bracketed: start_bracket: ( parameter: x execute_arrow: => expression: quoted_literal: "'y'" end_bracket: ) - keyword: WHERE - expression: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '>' numeric_literal: '0' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/trino/analyze.sql000066400000000000000000000001361503426445100233050ustar00rootroot00000000000000ANALYZE web; ANALYZE hive.default.stores; ANALYZE hive.default.sales WITH (partitions = 1); sqlfluff-3.4.2/test/fixtures/dialects/trino/analyze.yml000066400000000000000000000023251503426445100233110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8d13b64555b1b727a481eb1af1a2fa2a8d501d1bf078a1774ffff6d8dc22b3b4 file: - statement: analyze_statement: keyword: ANALYZE table_reference: naked_identifier: web - statement_terminator: ; - statement: analyze_statement: keyword: ANALYZE table_reference: - naked_identifier: hive - dot: . - naked_identifier: default - dot: . - naked_identifier: stores - statement_terminator: ; - statement: analyze_statement: - keyword: ANALYZE - table_reference: - naked_identifier: hive - dot: . - naked_identifier: default - dot: . - naked_identifier: sales - keyword: WITH - bracketed: start_bracket: ( parameter: partitions comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '1' end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/trino/array.sql000066400000000000000000000007171503426445100227650ustar00rootroot00000000000000SELECT ARRAY[1,2] || ARRAY[3,4]; SELECT ARRAY[ARRAY['meeting', 'lunch'], ARRAY['training', 'presentation']]; SELECT column FROM UNNEST(ARRAY[1, 2]); SELECT FILTER(ARRAY[5, -6, NULL, 7], x -> x > 0); SELECT ANY_MATCH(ARRAY[5, -6, NULL, 7], x -> x > 0); SELECT ELEMENT_AT(ARRAY['apple', 'banana', 'orange'], 2); SELECT CAST(ARRAY[1,2] AS ARRAY); SELECT CAST(ARRAY[1,2] AS ARRAY(DOUBLE)); SELECT CAST(JSON_PARSE('[1, 2, 3, 4, 5]') AS ARRAY); sqlfluff-3.4.2/test/fixtures/dialects/trino/array.yml000066400000000000000000000235331503426445100227700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3a6b352e64b02f1f980732b97192496070288befe982266f03b65313ce40f506 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_square_bracket: ']' - binary_operator: - pipe: '|' - pipe: '|' - typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '3' - comma: ',' - numeric_literal: '4' - end_square_bracket: ']' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - quoted_literal: "'meeting'" - comma: ',' - quoted_literal: "'lunch'" - end_square_bracket: ']' - comma: ',' - typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - quoted_literal: "'training'" - comma: ',' - quoted_literal: "'presentation'" - end_square_bracket: ']' - end_square_bracket: ']' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: column from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: UNNEST function_contents: bracketed: start_bracket: ( expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_square_bracket: ']' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: FILTER function_contents: bracketed: start_bracket: ( expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '5' - comma: ',' - numeric_literal: sign_indicator: '-' numeric_literal: '6' - comma: ',' - null_literal: 'NULL' - comma: ',' - numeric_literal: '7' - end_square_bracket: ']' comma: ',' lambda_function: parameter: x lambda_arrow: -> expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: '>' numeric_literal: '0' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: ANY_MATCH function_contents: bracketed: start_bracket: ( expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '5' - comma: ',' - numeric_literal: sign_indicator: '-' numeric_literal: '6' - comma: ',' - null_literal: 'NULL' - comma: ',' - numeric_literal: '7' - end_square_bracket: ']' comma: ',' lambda_function: parameter: x lambda_arrow: -> expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: '>' numeric_literal: '0' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: ELEMENT_AT function_contents: bracketed: - start_bracket: ( - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - quoted_literal: "'apple'" - comma: ',' - quoted_literal: "'banana'" - comma: ',' - quoted_literal: "'orange'" - end_square_bracket: ']' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_square_bracket: ']' keyword: AS data_type: array_type: keyword: ARRAY array_type_schema: start_angle_bracket: < data_type: keyword: DOUBLE end_angle_bracket: '>' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_square_bracket: ']' keyword: AS data_type: array_type: keyword: ARRAY array_type_schema: bracketed: start_bracket: ( data_type: keyword: DOUBLE end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: JSON_PARSE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'[1, 2, 3, 4, 5]'" end_bracket: ) keyword: AS data_type: array_type: keyword: ARRAY array_type_schema: start_angle_bracket: < data_type: keyword: DOUBLE end_angle_bracket: '>' end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/trino/bare_functions.sql000066400000000000000000000002041503426445100246370ustar00rootroot00000000000000SELECT current_date AS col1, current_timestamp AS col2, current_time as col3, localtime as col4, localtimestamp as col5 ; sqlfluff-3.4.2/test/fixtures/dialects/trino/bare_functions.yml000066400000000000000000000027461503426445100246560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5cc20668a975c2474b6ae132995e618adb281307f3987a056b2b0a8a374f4504 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: bare_function: current_date alias_expression: alias_operator: keyword: AS naked_identifier: col1 - comma: ',' - select_clause_element: bare_function: current_timestamp alias_expression: alias_operator: keyword: AS naked_identifier: col2 - comma: ',' - select_clause_element: bare_function: current_time alias_expression: alias_operator: keyword: as naked_identifier: col3 - comma: ',' - select_clause_element: bare_function: localtime alias_expression: alias_operator: keyword: as naked_identifier: col4 - comma: ',' - select_clause_element: bare_function: localtimestamp alias_expression: alias_operator: keyword: as naked_identifier: col5 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/trino/commit.sql000066400000000000000000000000261503426445100231300ustar00rootroot00000000000000COMMIT; COMMIT WORK; sqlfluff-3.4.2/test/fixtures/dialects/trino/commit.yml000066400000000000000000000011161503426445100231330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 95b62869617eee4a40684ab854747635c230d7fdab58141f42c9468563f355ff file: - statement: transaction_statement: keyword: COMMIT - statement_terminator: ; - statement: transaction_statement: - keyword: COMMIT - keyword: WORK - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/trino/common_on.sql000066400000000000000000000002771503426445100236340ustar00rootroot00000000000000COMMENT ON TABLE abc IS 'xyz'; COMMENT ON VIEW abc IS 'xyz'; COMMENT ON COLUMN table1.column1 IS 'abc'; COMMENT ON COLUMN table1.column2 IS 'abc'; COMMENT ON COLUMN table1.column3 IS 'abc.'; sqlfluff-3.4.2/test/fixtures/dialects/trino/common_on.yml000066400000000000000000000032141503426445100236300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9d5a7b78e3c35ccaf58e5b2ed1a708e23ba2961143e69e5ffd8460420a3c11ae file: - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: TABLE - table_reference: naked_identifier: abc - keyword: IS - quoted_literal: "'xyz'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: VIEW - table_reference: naked_identifier: abc - keyword: IS - quoted_literal: "'xyz'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: COLUMN - column_reference: - naked_identifier: table1 - dot: . - naked_identifier: column1 - keyword: IS - quoted_literal: "'abc'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: COLUMN - column_reference: - naked_identifier: table1 - dot: . - naked_identifier: column2 - keyword: IS - quoted_literal: "'abc'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: COLUMN - column_reference: - naked_identifier: table1 - dot: . - naked_identifier: column3 - keyword: IS - quoted_literal: "'abc.'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/trino/create_table.sql000066400000000000000000000021641503426445100242570ustar00rootroot00000000000000CREATE TABLE a ( str varchar ); create table if not exists foo.bar.baz ( date_nk date, date_ts timestamp, site varchar(30), partition_date date ) with ( format = 'parquet', partitioned_by = array ['partition_date'] ); CREATE TABLE orders ( orderkey bigint, orderstatus varchar, totalprice double, orderdate date ) WITH (format = 'ORC') ; CREATE TABLE IF NOT EXISTS orders ( orderkey bigint, orderstatus varchar, totalprice double COMMENT 'Price in cents.', shipmentstatus varchar not null, orderdate date ) COMMENT 'A table to keep track of orders.' ; CREATE TABLE bigger_orders ( another_orderkey bigint, LIKE orders, another_orderdate date ) ; CREATE TABLE orders_column_aliased (order_date, total_price) AS SELECT orderdate, totalprice FROM orders ; CREATE TABLE orders_by_date COMMENT 'Summary of orders by date' WITH (format = 'ORC') AS SELECT orderdate, sum(totalprice) AS price FROM orders GROUP BY orderdate ; CREATE TABLE IF NOT EXISTS orders_by_date AS SELECT orderdate, sum(totalprice) AS price FROM orders GROUP BY orderdate ; CREATE TABLE empty_nation AS SELECT * FROM nation WITH NO DATA ; sqlfluff-3.4.2/test/fixtures/dialects/trino/create_table.yml000066400000000000000000000231571503426445100242660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9fec1b748fee758a5b704b2a453c2f9c13dffe90b88875e0896754da12f18aad file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: a - bracketed: start_bracket: ( column_definition: naked_identifier: str data_type: keyword: varchar end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - keyword: if - keyword: not - keyword: exists - table_reference: - naked_identifier: foo - dot: . - naked_identifier: bar - dot: . - naked_identifier: baz - bracketed: - start_bracket: ( - column_definition: naked_identifier: date_nk data_type: keyword: date - comma: ',' - column_definition: naked_identifier: date_ts data_type: keyword: timestamp - comma: ',' - column_definition: naked_identifier: site data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '30' end_bracket: ) - comma: ',' - column_definition: naked_identifier: partition_date data_type: keyword: date - end_bracket: ) - keyword: with - bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: format comparison_operator: raw_comparison_operator: '=' quoted_literal: "'parquet'" - comma: ',' - expression: column_reference: naked_identifier: partitioned_by comparison_operator: raw_comparison_operator: '=' typed_array_literal: array_type: keyword: array array_literal: start_square_bracket: '[' quoted_literal: "'partition_date'" end_square_bracket: ']' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: orders - bracketed: - start_bracket: ( - column_definition: naked_identifier: orderkey data_type: keyword: bigint - comma: ',' - column_definition: naked_identifier: orderstatus data_type: keyword: varchar - comma: ',' - column_definition: naked_identifier: totalprice data_type: keyword: double - comma: ',' - column_definition: naked_identifier: orderdate data_type: keyword: date - end_bracket: ) - keyword: WITH - bracketed: start_bracket: ( expression: column_reference: naked_identifier: format comparison_operator: raw_comparison_operator: '=' quoted_literal: "'ORC'" end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: orders - bracketed: - start_bracket: ( - column_definition: naked_identifier: orderkey data_type: keyword: bigint - comma: ',' - column_definition: naked_identifier: orderstatus data_type: keyword: varchar - comma: ',' - column_definition: naked_identifier: totalprice data_type: keyword: double comment_clause: keyword: COMMENT quoted_literal: "'Price in cents.'" - comma: ',' - column_definition: - naked_identifier: shipmentstatus - data_type: keyword: varchar - keyword: not - keyword: 'null' - comma: ',' - column_definition: naked_identifier: orderdate data_type: keyword: date - end_bracket: ) - comment_clause: keyword: COMMENT quoted_literal: "'A table to keep track of orders.'" - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: bigger_orders - bracketed: - start_bracket: ( - column_definition: naked_identifier: another_orderkey data_type: keyword: bigint - comma: ',' - keyword: LIKE - table_reference: naked_identifier: orders - comma: ',' - column_definition: naked_identifier: another_orderdate data_type: keyword: date - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: orders_column_aliased - bracketed: - start_bracket: ( - column_reference: naked_identifier: order_date - comma: ',' - column_reference: naked_identifier: total_price - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: orderdate - comma: ',' - select_clause_element: column_reference: naked_identifier: totalprice from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: orders_by_date - comment_clause: keyword: COMMENT quoted_literal: "'Summary of orders by date'" - keyword: WITH - bracketed: start_bracket: ( expression: column_reference: naked_identifier: format comparison_operator: raw_comparison_operator: '=' quoted_literal: "'ORC'" end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: orderdate - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: totalprice end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: price from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: orderdate - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: orders_by_date - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: orderdate - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: totalprice end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: price from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: orderdate - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: empty_nation - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: nation - keyword: WITH - keyword: 'NO' - keyword: DATA - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/trino/filter_aggregate.sql000066400000000000000000000003471503426445100251410ustar00rootroot00000000000000SELECT id, COUNT(*) FILTER (WHERE o IS NOT NULL) AS count FROM (VALUES (100, 2, 'a'), (100, 1, 'b'), (200, NULL, 'c'), (200, 2, 'a'), (300, NULL, 'b'), (300, NULL, 'c') ) t(id, o, value) GROUP BY id; sqlfluff-3.4.2/test/fixtures/dialects/trino/filter_aggregate.yml000066400000000000000000000102341503426445100251370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1951167ee9047bd891870afbe239799aae2545197bd5f0ef559b9d89590a753f file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: COUNT function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) keyword: FILTER bracketed: start_bracket: ( keyword: WHERE expression: - column_reference: naked_identifier: o - keyword: IS - keyword: NOT - null_literal: 'NULL' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: count from_clause: keyword: FROM from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: VALUES - expression: bracketed: - start_bracket: ( - numeric_literal: '100' - comma: ',' - numeric_literal: '2' - comma: ',' - quoted_literal: "'a'" - end_bracket: ) - comma: ',' - expression: bracketed: - start_bracket: ( - numeric_literal: '100' - comma: ',' - numeric_literal: '1' - comma: ',' - quoted_literal: "'b'" - end_bracket: ) - comma: ',' - expression: bracketed: - start_bracket: ( - numeric_literal: '200' - comma: ',' - null_literal: 'NULL' - comma: ',' - quoted_literal: "'c'" - end_bracket: ) - comma: ',' - expression: bracketed: - start_bracket: ( - numeric_literal: '200' - comma: ',' - numeric_literal: '2' - comma: ',' - quoted_literal: "'a'" - end_bracket: ) - comma: ',' - expression: bracketed: - start_bracket: ( - numeric_literal: '300' - comma: ',' - null_literal: 'NULL' - comma: ',' - quoted_literal: "'b'" - end_bracket: ) - comma: ',' - expression: bracketed: - start_bracket: ( - numeric_literal: '300' - comma: ',' - null_literal: 'NULL' - comma: ',' - quoted_literal: "'c'" - end_bracket: ) end_bracket: ) alias_expression: naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: id - comma: ',' - naked_identifier: o - comma: ',' - naked_identifier: value end_bracket: ) groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: id statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/trino/grouping_sets.sql000066400000000000000000000003171503426445100245330ustar00rootroot00000000000000 WITH tmp_view AS (SELECT name, price, store FROM customers, sales WHERE customers.c_id=sales.c_id) SELECT sum(price) AS volume, name, store FROM tmp_view GROUP BY GROUPING SETS (name,store,()); sqlfluff-3.4.2/test/fixtures/dialects/trino/grouping_sets.yml000066400000000000000000000071371503426445100245440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f9bceb6c6582f68314a2a832fb20711b9610e6799791898457a70f9acceba56e file: statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: tmp_view keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: price - comma: ',' - select_clause_element: column_reference: naked_identifier: store from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: customers - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sales where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: customers - dot: . - naked_identifier: c_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: sales - dot: . - naked_identifier: c_id end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: price end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: volume - comma: ',' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: store from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tmp_view groupby_clause: - keyword: GROUP - keyword: BY - grouping_sets_clause: - keyword: GROUPING - keyword: SETS - bracketed: start_bracket: ( grouping_expression_list: - column_reference: naked_identifier: name - comma: ',' - column_reference: naked_identifier: store - comma: ',' - expression: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/trino/insert.sql000066400000000000000000000002521503426445100231450ustar00rootroot00000000000000INSERT INTO t1 SELECT * FROM t2; INSERT INTO t1 VALUES (1, 'San Francisco'); INSERT INTO t1 (a,b,c) SELECT * FROM t2; INSERT INTO t1 (a,b,c) VALUES (26, 'POLAND', 3); sqlfluff-3.4.2/test/fixtures/dialects/trino/insert.yml000066400000000000000000000055501503426445100231550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1dc3f1f809d4499a91b27089b61aae1ed008179a2e91ad350b56db9d7db4359c file: - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - values_clause: keyword: VALUES expression: bracketed: start_bracket: ( numeric_literal: '1' comma: ',' quoted_literal: "'San Francisco'" end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) - values_clause: keyword: VALUES expression: bracketed: - start_bracket: ( - numeric_literal: '26' - comma: ',' - quoted_literal: "'POLAND'" - comma: ',' - numeric_literal: '3' - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/trino/integer_types.sql000066400000000000000000000003361503426445100245250ustar00rootroot00000000000000 SELECT CAST(1 AS TINYINT); SELECT CAST(2 AS SMALLINT); -- Synonyms of INTEGER -- https://trino.io/docs/422/language/types.html#integer-or-int SELECT CAST(3 AS INTEGER); SELECT CAST(4 AS INT); SELECT CAST(5 AS BIGINT); sqlfluff-3.4.2/test/fixtures/dialects/trino/integer_types.yml000066400000000000000000000055501503426445100245320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b6908cc207c828aa100f14d2e5527109c021eb95b5aa5cbbaa1ab27075c4943a file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: numeric_literal: '1' keyword: AS data_type: keyword: TINYINT end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: numeric_literal: '2' keyword: AS data_type: keyword: SMALLINT end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: numeric_literal: '3' keyword: AS data_type: keyword: INTEGER end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: numeric_literal: '4' keyword: AS data_type: keyword: INT end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: numeric_literal: '5' keyword: AS data_type: keyword: BIGINT end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/trino/json_functions.sql000066400000000000000000000010661503426445100247060ustar00rootroot00000000000000select json_query(payload format json, 'lax $.unstructured.abcd[*].field?(@ > 0.5)' with array wrapper), json_query(payload format json encoding utf8, 'lax $.unstructured.abcd[*].field?(@ > 0.5)' without array wrapper), json_query(payload format json encoding utf16, 'lax $.unstructured.abcd[*].field?(@ > 0.5)' with conditional wrapper), json_query(payload format json encoding utf32, 'lax $.unstructured.abcd[*].field?(@ > 0.5)' with unconditional array wrapper), json_query(payload format json, 'lax $.unstructured.abcd[*].field?(@ > 0.5)') ; sqlfluff-3.4.2/test/fixtures/dialects/trino/json_functions.yml000066400000000000000000000076131503426445100247140ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 03b392bd977fce1dbcbc460dcb6e9b5765213d1d2192edb3c16fc98f18c074d7 file: statement: select_statement: select_clause: - keyword: select - select_clause_element: function: function_name: function_name_identifier: json_query function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: payload - keyword: format - keyword: json - comma: ',' - expression: quoted_literal: "'lax $.unstructured.abcd[*].field?(@ > 0.5)'" - keyword: with - keyword: array - keyword: wrapper - end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: json_query function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: payload - keyword: format - keyword: json - keyword: encoding - keyword: utf8 - comma: ',' - expression: quoted_literal: "'lax $.unstructured.abcd[*].field?(@ > 0.5)'" - keyword: without - keyword: array - keyword: wrapper - end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: json_query function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: payload - keyword: format - keyword: json - keyword: encoding - keyword: utf16 - comma: ',' - expression: quoted_literal: "'lax $.unstructured.abcd[*].field?(@ > 0.5)'" - keyword: with - keyword: conditional - keyword: wrapper - end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: json_query function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: payload - keyword: format - keyword: json - keyword: encoding - keyword: utf32 - comma: ',' - expression: quoted_literal: "'lax $.unstructured.abcd[*].field?(@ > 0.5)'" - keyword: with - keyword: unconditional - keyword: array - keyword: wrapper - end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: json_query function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: payload - keyword: format - keyword: json - comma: ',' - expression: quoted_literal: "'lax $.unstructured.abcd[*].field?(@ > 0.5)'" - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/trino/range_offset.sql000066400000000000000000000012051503426445100243020ustar00rootroot00000000000000-- https://trino.io/blog/2021/03/10/introducing-new-window-features.html SELECT student_id, result, count(*) OVER ( ORDER BY result RANGE BETWEEN 1 FOLLOWING AND 2 FOLLOWING) AS close_better_scores_count FROM students_results; SELECT avg(totalprice) OVER ( PARTITION BY custkey ORDER BY orderdate RANGE BETWEEN interval '1' DAY PRECEDING AND interval '1' DAY FOLLOWING) FROM orders; SELECT avg(totalprice) OVER w, sum(totalprice) OVER w, max(totalprice) OVER w FROM orders WINDOW w AS ( PARTITION BY custkey ORDER BY orderdate RANGE BETWEEN interval '1' month PRECEDING AND CURRENT ROW) sqlfluff-3.4.2/test/fixtures/dialects/trino/range_offset.yml000066400000000000000000000144541503426445100243160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 89f6aa8b84d3dda114e84346eb6645fe7329fdfa76ea36a66184e220a8e0bff1 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: student_id - comma: ',' - select_clause_element: column_reference: naked_identifier: result - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: result frame_clause: - keyword: RANGE - keyword: BETWEEN - numeric_literal: '1' - keyword: FOLLOWING - keyword: AND - numeric_literal: '2' - keyword: FOLLOWING end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: close_better_scores_count from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: students_results - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: avg function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: totalprice end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: custkey orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: orderdate frame_clause: - keyword: RANGE - keyword: BETWEEN - interval_expression: - keyword: interval - quoted_literal: "'1'" - keyword: DAY - keyword: PRECEDING - keyword: AND - interval_expression: - keyword: interval - quoted_literal: "'1'" - keyword: DAY - keyword: FOLLOWING end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: avg function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: totalprice end_bracket: ) over_clause: keyword: OVER naked_identifier: w - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: totalprice end_bracket: ) over_clause: keyword: OVER naked_identifier: w - comma: ',' - select_clause_element: function: function_name: function_name_identifier: max function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: totalprice end_bracket: ) over_clause: keyword: OVER naked_identifier: w from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders named_window: keyword: WINDOW named_window_expression: naked_identifier: w keyword: AS bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: custkey orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: orderdate frame_clause: - keyword: RANGE - keyword: BETWEEN - interval_expression: - keyword: interval - quoted_literal: "'1'" - keyword: month - keyword: PRECEDING - keyword: AND - keyword: CURRENT - keyword: ROW end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/trino/regexp_replace_with_lambda.sql000066400000000000000000000005261503426445100271650ustar00rootroot00000000000000-- The variant of REGEXP_REPLACE that accepts a function -- lambda expression as an argument can be tricky to parse. -- Signature: -- regexp_replace(string, pattern, function) → varchar -- Reference: -- https://trino.io/docs/422/functions/regexp.html SELECT REGEXP_REPLACE('new york', '(\w)(\w*)', x -> UPPER(x[1]) || LOWER(x[2])); sqlfluff-3.4.2/test/fixtures/dialects/trino/regexp_replace_with_lambda.yml000066400000000000000000000046021503426445100271660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ac7532cfe4e089b5c0ea6520979cfcafcef2c5b66742a95556de01b8278eac6e file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: REGEXP_REPLACE function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'new york'" - comma: ',' - expression: quoted_literal: "'(\\w)(\\w*)'" - comma: ',' - lambda_function: parameter: x lambda_arrow: -> expression: - function: function_name: function_name_identifier: UPPER function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x array_accessor: start_square_bracket: '[' numeric_literal: '1' end_square_bracket: ']' end_bracket: ) - binary_operator: - pipe: '|' - pipe: '|' - function: function_name: function_name_identifier: LOWER function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x array_accessor: start_square_bracket: '[' numeric_literal: '2' end_square_bracket: ']' end_bracket: ) - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/trino/rollback.sql000066400000000000000000000000321503426445100234260ustar00rootroot00000000000000ROLLBACK; ROLLBACK WORK; sqlfluff-3.4.2/test/fixtures/dialects/trino/rollback.yml000066400000000000000000000011221503426445100234310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a9ad4899f06283cb3341a99a1a72249d1f5612c3c8665b4bab6ddf576376c888 file: - statement: transaction_statement: keyword: ROLLBACK - statement_terminator: ; - statement: transaction_statement: - keyword: ROLLBACK - keyword: WORK - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/trino/row_datatype.sql000066400000000000000000000002431503426445100243430ustar00rootroot00000000000000SELECT name, CAST(ROW(price, store) AS ROW(price REAL, store VARCHAR)) AS data_row FROM customers; select CAST(ROW(1, 2.0) AS ROW(x BIGINT, y DOUBLE)).x; sqlfluff-3.4.2/test/fixtures/dialects/trino/row_datatype.yml000066400000000000000000000074601503426445100243550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 446c1e57c1da93b9914fab19ffa2a5749bb91c7b453e61296adc0d53a790adc9 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ROW function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: price - comma: ',' - expression: column_reference: naked_identifier: store - end_bracket: ) keyword: AS data_type: struct_type: keyword: ROW struct_type_schema: bracketed: - start_bracket: ( - parameter: price - data_type: keyword: REAL - comma: ',' - parameter: store - data_type: keyword: VARCHAR - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: data_row from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: customers - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ROW function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2.0' - end_bracket: ) keyword: AS data_type: struct_type: keyword: ROW struct_type_schema: bracketed: - start_bracket: ( - parameter: x - data_type: keyword: BIGINT - comma: ',' - parameter: y - data_type: keyword: DOUBLE - end_bracket: ) end_bracket: ) semi_structured_expression: dot: . naked_identifier: x - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/trino/select_interval.sql000066400000000000000000000004111503426445100250210ustar00rootroot00000000000000SELECT CURRENT_DATE + INTERVAL '1' YEAR; ; SELECT CURRENT_DATE - INTERVAL '14' MONTH; ; SELECT CURRENT_DATE + INTERVAL '32' DAY; ; SELECT CURRENT_TIME - INTERVAL '13' HOUR; ; SELECT CURRENT_TIME + INTERVAL '64' MINUTE; ; SELECT CURRENT_TIME - INTERVAL '61' SECOND; sqlfluff-3.4.2/test/fixtures/dialects/trino/select_interval.yml000066400000000000000000000052051503426445100250310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9ca5f4d19bdf0807ff145d1c8d8cfcc632eca6c7c2ea8fb6accb29af5f7ea8bf file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bare_function: CURRENT_DATE binary_operator: + interval_expression: - keyword: INTERVAL - quoted_literal: "'1'" - keyword: YEAR - statement_terminator: ; - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bare_function: CURRENT_DATE binary_operator: '-' interval_expression: - keyword: INTERVAL - quoted_literal: "'14'" - keyword: MONTH - statement_terminator: ; - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bare_function: CURRENT_DATE binary_operator: + interval_expression: - keyword: INTERVAL - quoted_literal: "'32'" - keyword: DAY - statement_terminator: ; - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bare_function: CURRENT_TIME binary_operator: '-' interval_expression: - keyword: INTERVAL - quoted_literal: "'13'" - keyword: HOUR - statement_terminator: ; - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bare_function: CURRENT_TIME binary_operator: + interval_expression: - keyword: INTERVAL - quoted_literal: "'64'" - keyword: MINUTE - statement_terminator: ; - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bare_function: CURRENT_TIME binary_operator: '-' interval_expression: - keyword: INTERVAL - quoted_literal: "'61'" - keyword: SECOND - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/trino/set_session.sql000066400000000000000000000001031503426445100241720ustar00rootroot00000000000000SET SESSION name = 'expression'; SET SESSION catalog.value = 100; sqlfluff-3.4.2/test/fixtures/dialects/trino/set_session.yml000066400000000000000000000016371503426445100242110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 345a8a26081492c5e23d7a3bad66ad23eaf1c291b9bdf9f70ec4e9772a087c9a file: - statement: set_session_statement: - keyword: SET - keyword: SESSION - parameter: name - comparison_operator: raw_comparison_operator: '=' - expression: quoted_literal: "'expression'" - statement_terminator: ; - statement: set_session_statement: - keyword: SET - keyword: SESSION - parameter: catalog - dot: . - parameter: value - comparison_operator: raw_comparison_operator: '=' - expression: numeric_literal: '100' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/trino/start_transaction.sql000066400000000000000000000003351503426445100254050ustar00rootroot00000000000000START TRANSACTION; START TRANSACTION ISOLATION LEVEL REPEATABLE READ; START TRANSACTION READ WRITE; START TRANSACTION ISOLATION LEVEL READ COMMITTED, READ ONLY; START TRANSACTION READ WRITE, ISOLATION LEVEL SERIALIZABLE; sqlfluff-3.4.2/test/fixtures/dialects/trino/start_transaction.yml000066400000000000000000000025411503426445100254100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b1f490adc0ae53134c1c5c6ccdad64f4eeaca844b57196f898c1051aaac319e3 file: - statement: transaction_statement: - keyword: START - keyword: TRANSACTION - statement_terminator: ; - statement: transaction_statement: - keyword: START - keyword: TRANSACTION - keyword: ISOLATION - keyword: LEVEL - keyword: REPEATABLE - keyword: READ - statement_terminator: ; - statement: transaction_statement: - keyword: START - keyword: TRANSACTION - keyword: READ - keyword: WRITE - statement_terminator: ; - statement: transaction_statement: - keyword: START - keyword: TRANSACTION - keyword: ISOLATION - keyword: LEVEL - keyword: READ - keyword: COMMITTED - comma: ',' - keyword: READ - keyword: ONLY - statement_terminator: ; - statement: transaction_statement: - keyword: START - keyword: TRANSACTION - keyword: READ - keyword: WRITE - comma: ',' - keyword: ISOLATION - keyword: LEVEL - keyword: SERIALIZABLE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/trino/timestamp_resolutions.sql000066400000000000000000000024401503426445100263130ustar00rootroot00000000000000-- Trino supports timestamp datatypes at various levels of -- precision from TIMESTAMP(0) to TIMESTAMP(12). These -- correspond to precision from seconds to picoseconds. -- Bare TIMESTAMP is an alias for TIMESTAMP(3). -- https://trino.io/docs/current/language/types.html#timestamp -- https://trino.io/docs/current/language/types.html#timestamp-p -- Basic Timestamp SELECT CAST((TIMESTAMP '2012-10-31 01:00 UTC') AS TIMESTAMP); -- Timestamp with minimum precision (seconds) SELECT CAST((TIMESTAMP '2012-10-31 01:00 UTC') AS TIMESTAMP(0)); -- Timestamp with maximum precision (picoseconds) SELECT CAST((TIMESTAMP '2012-10-31 01:00 UTC') AS TIMESTAMP(12)); -- Timestamp with time zone SELECT CAST( (TIMESTAMP '2001-08-22 03:04:05.321 America/Chicago') AS TIMESTAMP WITH TIME ZONE ); -- Timestamp without time zone SELECT CAST( (TIMESTAMP '2001-08-22 03:04:05.321 America/Chicago') AS TIMESTAMP WITHOUT TIME ZONE ); -- Timestamp with precision and time zone SELECT CAST( (TIMESTAMP '2001-08-22 03:04:05.321 America/Chicago') AS TIMESTAMP(6) WITH TIME ZONE ); -- Timestamp with precision and without time zone SELECT CAST( (TIMESTAMP '2001-08-22 03:04:05.321 America/Chicago') AS TIMESTAMP(6) WITHOUT TIME ZONE ); sqlfluff-3.4.2/test/fixtures/dialects/trino/timestamp_resolutions.yml000066400000000000000000000147451503426445100263300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 351e0f55b3561e752332d82fa7478e3c75b97e39a1f457890aceb9c14db716e0 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: keyword: TIMESTAMP date_constructor_literal: "'2012-10-31 01:00 UTC'" end_bracket: ) keyword: AS data_type: keyword: TIMESTAMP end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: keyword: TIMESTAMP date_constructor_literal: "'2012-10-31 01:00 UTC'" end_bracket: ) keyword: AS data_type: keyword: TIMESTAMP bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '0' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: keyword: TIMESTAMP date_constructor_literal: "'2012-10-31 01:00 UTC'" end_bracket: ) keyword: AS data_type: keyword: TIMESTAMP bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '12' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: keyword: TIMESTAMP date_constructor_literal: "'2001-08-22 03:04:05.321 America/Chicago'" end_bracket: ) keyword: AS data_type: - keyword: TIMESTAMP - keyword: WITH - keyword: TIME - keyword: ZONE end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: keyword: TIMESTAMP date_constructor_literal: "'2001-08-22 03:04:05.321 America/Chicago'" end_bracket: ) keyword: AS data_type: - keyword: TIMESTAMP - keyword: WITHOUT - keyword: TIME - keyword: ZONE end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: keyword: TIMESTAMP date_constructor_literal: "'2001-08-22 03:04:05.321 America/Chicago'" end_bracket: ) keyword: AS data_type: - keyword: TIMESTAMP - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) - keyword: WITH - keyword: TIME - keyword: ZONE end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CAST function_contents: bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: keyword: TIMESTAMP date_constructor_literal: "'2001-08-22 03:04:05.321 America/Chicago'" end_bracket: ) keyword: AS data_type: - keyword: TIMESTAMP - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) - keyword: WITHOUT - keyword: TIME - keyword: ZONE end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/trino/unnest_with_ordinality.sql000066400000000000000000000023561503426445100264550ustar00rootroot00000000000000-- Single UNNEST argument, 2-argument ordinality. WITH t AS ( SELECT ARRAY['a', 'b', 'c'] AS array_column ) SELECT u.element, u.ordinality FROM t CROSS JOIN UNNEST(t.array_column) WITH ORDINALITY AS u(element, ordinality); -- Single UNNEST argument, 2-argument ordinality, no 'AS' after ORDINALITY WITH t AS ( SELECT ARRAY['a', 'b', 'c'] AS array_column ) SELECT u.element, u.ordinality FROM t CROSS JOIN UNNEST(t.array_column) WITH ORDINALITY u(element, ordinality); -- Single UNNEST argument, 2-argument ordinality, space between "u" and ordinality spec. WITH t AS ( SELECT ARRAY['a', 'b', 'c'] AS array_column ) SELECT u.element, u.ordinality FROM t CROSS JOIN UNNEST(t.array_column) WITH ORDINALITY u (element, ordinality); -- Multiple UNNEST arguments, 3-argument ordinality. WITH t AS ( SELECT ARRAY['a', 'b', 'c'] AS array_column1, ARRAY[1, 2] AS array_column2 ) SELECT u.element1, u.element2, u.ordinality FROM t CROSS JOIN UNNEST(t.array_column1, t.array_column2) WITH ORDINALITY AS u(element1, element2, ordinality); -- A basic UNNEST, no ORDINALITY WITH t AS ( SELECT ARRAY[1, 2] AS array_column ) SELECT u.number FROM t CROSS JOIN UNNEST(t.array_column) AS u (number); sqlfluff-3.4.2/test/fixtures/dialects/trino/unnest_with_ordinality.yml000066400000000000000000000342271503426445100264610ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bee0af621e962311b7c1d4e7880c5f20f3d7622a56e5fc5d1f2f26bfd1831108 file: - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: t keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - quoted_literal: "'a'" - comma: ',' - quoted_literal: "'b'" - comma: ',' - quoted_literal: "'c'" - end_square_bracket: ']' alias_expression: alias_operator: keyword: AS naked_identifier: array_column end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: u - dot: . - naked_identifier: element - comma: ',' - select_clause_element: column_reference: - naked_identifier: u - dot: . - naked_identifier: ordinality from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t join_clause: - keyword: CROSS - keyword: JOIN - from_expression_element: table_expression: function: function_name: function_name_identifier: UNNEST function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: t - dot: . - naked_identifier: array_column end_bracket: ) withordinality_clause: - keyword: WITH - keyword: ORDINALITY alias_expression: alias_operator: keyword: AS naked_identifier: u bracketed: start_bracket: ( identifier_list: - naked_identifier: element - comma: ',' - naked_identifier: ordinality end_bracket: ) - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: t keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - quoted_literal: "'a'" - comma: ',' - quoted_literal: "'b'" - comma: ',' - quoted_literal: "'c'" - end_square_bracket: ']' alias_expression: alias_operator: keyword: AS naked_identifier: array_column end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: u - dot: . - naked_identifier: element - comma: ',' - select_clause_element: column_reference: - naked_identifier: u - dot: . - naked_identifier: ordinality from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t join_clause: - keyword: CROSS - keyword: JOIN - from_expression_element: table_expression: function: function_name: function_name_identifier: UNNEST function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: t - dot: . - naked_identifier: array_column end_bracket: ) withordinality_clause: - keyword: WITH - keyword: ORDINALITY alias_expression: naked_identifier: u bracketed: start_bracket: ( identifier_list: - naked_identifier: element - comma: ',' - naked_identifier: ordinality end_bracket: ) - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: t keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - quoted_literal: "'a'" - comma: ',' - quoted_literal: "'b'" - comma: ',' - quoted_literal: "'c'" - end_square_bracket: ']' alias_expression: alias_operator: keyword: AS naked_identifier: array_column end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: u - dot: . - naked_identifier: element - comma: ',' - select_clause_element: column_reference: - naked_identifier: u - dot: . - naked_identifier: ordinality from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t join_clause: - keyword: CROSS - keyword: JOIN - from_expression_element: table_expression: function: function_name: function_name_identifier: UNNEST function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: t - dot: . - naked_identifier: array_column end_bracket: ) withordinality_clause: - keyword: WITH - keyword: ORDINALITY alias_expression: naked_identifier: u bracketed: start_bracket: ( identifier_list: - naked_identifier: element - comma: ',' - naked_identifier: ordinality end_bracket: ) - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: t keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - quoted_literal: "'a'" - comma: ',' - quoted_literal: "'b'" - comma: ',' - quoted_literal: "'c'" - end_square_bracket: ']' alias_expression: alias_operator: keyword: AS naked_identifier: array_column1 - comma: ',' - select_clause_element: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_square_bracket: ']' alias_expression: alias_operator: keyword: AS naked_identifier: array_column2 end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: u - dot: . - naked_identifier: element1 - comma: ',' - select_clause_element: column_reference: - naked_identifier: u - dot: . - naked_identifier: element2 - comma: ',' - select_clause_element: column_reference: - naked_identifier: u - dot: . - naked_identifier: ordinality from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t join_clause: - keyword: CROSS - keyword: JOIN - from_expression_element: table_expression: function: function_name: function_name_identifier: UNNEST function_contents: bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: t - dot: . - naked_identifier: array_column1 - comma: ',' - expression: column_reference: - naked_identifier: t - dot: . - naked_identifier: array_column2 - end_bracket: ) withordinality_clause: - keyword: WITH - keyword: ORDINALITY alias_expression: alias_operator: keyword: AS naked_identifier: u bracketed: start_bracket: ( identifier_list: - naked_identifier: element1 - comma: ',' - naked_identifier: element2 - comma: ',' - naked_identifier: ordinality end_bracket: ) - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: t keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_square_bracket: ']' alias_expression: alias_operator: keyword: AS naked_identifier: array_column end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: u - dot: . - naked_identifier: number from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t join_clause: - keyword: CROSS - keyword: JOIN - from_expression_element: table_expression: function: function_name: function_name_identifier: UNNEST function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: t - dot: . - naked_identifier: array_column end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: u bracketed: start_bracket: ( identifier_list: naked_identifier: number end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/trino/values.sql000066400000000000000000000002221503426445100231350ustar00rootroot00000000000000VALUES 42, 13; VALUES 1, 2, 3; VALUES 5, 2, 4, 1, 3; VALUES (1, 'a'), (2, 'b'), (3, 'c'); VALUES (26, 'POLAND', 3, 'no comment'); sqlfluff-3.4.2/test/fixtures/dialects/trino/values.yml000066400000000000000000000042671503426445100231540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 53a12e94e9c580f8189e086531cbc800ba2050da2765ef6534893bf3d39c5631 file: - statement: values_clause: - keyword: VALUES - expression: numeric_literal: '42' - comma: ',' - expression: numeric_literal: '13' - statement_terminator: ; - statement: values_clause: - keyword: VALUES - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - statement_terminator: ; - statement: values_clause: - keyword: VALUES - expression: numeric_literal: '5' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '4' - comma: ',' - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '3' - statement_terminator: ; - statement: values_clause: - keyword: VALUES - expression: bracketed: start_bracket: ( numeric_literal: '1' comma: ',' quoted_literal: "'a'" end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( numeric_literal: '2' comma: ',' quoted_literal: "'b'" end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( numeric_literal: '3' comma: ',' quoted_literal: "'c'" end_bracket: ) - statement_terminator: ; - statement: values_clause: keyword: VALUES expression: bracketed: - start_bracket: ( - numeric_literal: '26' - comma: ',' - quoted_literal: "'POLAND'" - comma: ',' - numeric_literal: '3' - comma: ',' - quoted_literal: "'no comment'" - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/trino/within_group.sql000066400000000000000000000016461503426445100243670ustar00rootroot00000000000000--https://trino.io/docs/current/functions/aggregate.html#array_agg SELECT listagg(value, ',') WITHIN GROUP (ORDER BY value) csv_value FROM (VALUES 'a', 'c', 'b') t(value); SELECT listagg(value, ',' ON OVERFLOW ERROR) WITHIN GROUP (ORDER BY value) csv_value FROM (VALUES 'a', 'b', 'c') t(value); SELECT LISTAGG(value, ',' ON OVERFLOW TRUNCATE '.....' WITH COUNT) WITHIN GROUP (ORDER BY value) FROM (VALUES 'a', 'b', 'c') t(value); SELECT id, LISTAGG(value, ',') WITHIN GROUP (ORDER BY o) csv_value FROM (VALUES (100, 1, 'a'), (200, 3, 'c'), (200, 2, 'b') ) t(id, o, value) GROUP BY id ORDER BY id; -- Handle a WITHIN GROUP followed by a FILTER SELECT id, LISTAGG(value, ',') WITHIN GROUP (ORDER BY o) FILTER (WHERE o IS NOT NULL) AS csv_value FROM (VALUES (100, 2, 'a'), (100, 1, 'b'), (200, NULL, 'c'), (200, 2, 'a'), (300, NULL, 'b'), (300, 1, 'c') ) t(id, o, value) GROUP BY id ORDER BY id; sqlfluff-3.4.2/test/fixtures/dialects/trino/within_group.yml000066400000000000000000000324111503426445100243630ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 356be70db6bb5939094d8a2f1e6b7e75c328d902cb378a71ac54d4315ecc28d9 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: listagg function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: value - comma: ',' - expression: quoted_literal: "','" - end_bracket: ) withingroup_clause: - keyword: WITHIN - keyword: GROUP - bracketed: start_bracket: ( orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: value end_bracket: ) alias_expression: naked_identifier: csv_value from_clause: keyword: FROM from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: VALUES - expression: quoted_literal: "'a'" - comma: ',' - expression: quoted_literal: "'c'" - comma: ',' - expression: quoted_literal: "'b'" end_bracket: ) alias_expression: naked_identifier: t bracketed: start_bracket: ( identifier_list: naked_identifier: value end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: listagg function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: value - comma: ',' - expression: quoted_literal: "','" - listagg_overflow_clause: - keyword: 'ON' - keyword: OVERFLOW - keyword: ERROR - end_bracket: ) withingroup_clause: - keyword: WITHIN - keyword: GROUP - bracketed: start_bracket: ( orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: value end_bracket: ) alias_expression: naked_identifier: csv_value from_clause: keyword: FROM from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: VALUES - expression: quoted_literal: "'a'" - comma: ',' - expression: quoted_literal: "'b'" - comma: ',' - expression: quoted_literal: "'c'" end_bracket: ) alias_expression: naked_identifier: t bracketed: start_bracket: ( identifier_list: naked_identifier: value end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: LISTAGG function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: value - comma: ',' - expression: quoted_literal: "','" - listagg_overflow_clause: - keyword: 'ON' - keyword: OVERFLOW - keyword: TRUNCATE - quoted_literal: "'.....'" - keyword: WITH - keyword: COUNT - end_bracket: ) withingroup_clause: - keyword: WITHIN - keyword: GROUP - bracketed: start_bracket: ( orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: value end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: VALUES - expression: quoted_literal: "'a'" - comma: ',' - expression: quoted_literal: "'b'" - comma: ',' - expression: quoted_literal: "'c'" end_bracket: ) alias_expression: naked_identifier: t bracketed: start_bracket: ( identifier_list: naked_identifier: value end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LISTAGG function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: value - comma: ',' - expression: quoted_literal: "','" - end_bracket: ) withingroup_clause: - keyword: WITHIN - keyword: GROUP - bracketed: start_bracket: ( orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: o end_bracket: ) alias_expression: naked_identifier: csv_value from_clause: keyword: FROM from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: VALUES - expression: bracketed: - start_bracket: ( - numeric_literal: '100' - comma: ',' - numeric_literal: '1' - comma: ',' - quoted_literal: "'a'" - end_bracket: ) - comma: ',' - expression: bracketed: - start_bracket: ( - numeric_literal: '200' - comma: ',' - numeric_literal: '3' - comma: ',' - quoted_literal: "'c'" - end_bracket: ) - comma: ',' - expression: bracketed: - start_bracket: ( - numeric_literal: '200' - comma: ',' - numeric_literal: '2' - comma: ',' - quoted_literal: "'b'" - end_bracket: ) end_bracket: ) alias_expression: naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: id - comma: ',' - naked_identifier: o - comma: ',' - naked_identifier: value end_bracket: ) groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: id orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LISTAGG function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: value - comma: ',' - expression: quoted_literal: "','" - end_bracket: ) withingroup_clause: - keyword: WITHIN - keyword: GROUP - bracketed: start_bracket: ( orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: o end_bracket: ) - keyword: FILTER - bracketed: start_bracket: ( keyword: WHERE expression: - column_reference: naked_identifier: o - keyword: IS - keyword: NOT - null_literal: 'NULL' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: csv_value from_clause: keyword: FROM from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: VALUES - expression: bracketed: - start_bracket: ( - numeric_literal: '100' - comma: ',' - numeric_literal: '2' - comma: ',' - quoted_literal: "'a'" - end_bracket: ) - comma: ',' - expression: bracketed: - start_bracket: ( - numeric_literal: '100' - comma: ',' - numeric_literal: '1' - comma: ',' - quoted_literal: "'b'" - end_bracket: ) - comma: ',' - expression: bracketed: - start_bracket: ( - numeric_literal: '200' - comma: ',' - null_literal: 'NULL' - comma: ',' - quoted_literal: "'c'" - end_bracket: ) - comma: ',' - expression: bracketed: - start_bracket: ( - numeric_literal: '200' - comma: ',' - numeric_literal: '2' - comma: ',' - quoted_literal: "'a'" - end_bracket: ) - comma: ',' - expression: bracketed: - start_bracket: ( - numeric_literal: '300' - comma: ',' - null_literal: 'NULL' - comma: ',' - quoted_literal: "'b'" - end_bracket: ) - comma: ',' - expression: bracketed: - start_bracket: ( - numeric_literal: '300' - comma: ',' - numeric_literal: '1' - comma: ',' - quoted_literal: "'c'" - end_bracket: ) end_bracket: ) alias_expression: naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: id - comma: ',' - naked_identifier: o - comma: ',' - naked_identifier: value end_bracket: ) groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: id orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: id - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/000077500000000000000000000000001503426445100207515ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/dialects/tsql/.sqlfluff000066400000000000000000000000321503426445100225670ustar00rootroot00000000000000[sqlfluff] dialect = tsql sqlfluff-3.4.2/test/fixtures/dialects/tsql/add_index.sql000066400000000000000000000040411503426445100234100ustar00rootroot00000000000000IF NOT EXISTS(SELECT * FROM sys.indexes WHERE NAME='IX_INTER_VIMR_INFECTIOUS_PEOPLE') CREATE NONCLUSTERED INDEX IX_INTER_VIMR_INFECTIOUS_PEOPLE ON dbo.VIMR_INFECTIOUS_PEOPLE(DATE_LAST_INSERTED); GO IF NOT EXISTS(SELECT * FROM sys.indexes WHERE NAME='IX_INTER_FOUNDATION_NICE_IC_INTAKE_COUNT') CREATE NONCLUSTERED INDEX IX_INTER_FOUNDATION_NICE_IC_INTAKE_COUNT ON dbo.FOUNDATION_NICE_IC_INTAKE_COUNT(DATE_LAST_INSERTED); GO IF EXISTS(SELECT * FROM sys.indexes WHERE NAME='IX_INTER_VIMR_REPRODUCTION_NUMBER') CREATE CLUSTERED INDEX IX_INTER_VIMR_REPRODUCTION_NUMBER ON dbo.VIMR_INFECTIOUS_PEOPLE(DATE_LAST_INSERTED); GO CREATE NONCLUSTERED INDEX [ind_1] ON [schema1].[table1]([column1] ASC) INCLUDE([column2]) WHERE ([column3] IS NULL); GO CREATE NONCLUSTERED INDEX [NI_name] ON [schema1].[table1]([column1] DESC) ON [PRIMARY]; GO CREATE UNIQUE INDEX [ix_name] ON [schema1].[table1]([column1], [column2]) WITH FILLFACTOR = 80; GO CREATE UNIQUE INDEX [ix_name] ON [schema1].[table1]([column1]) WITH (PAD_INDEX = OFF, SORT_IN_TEMPDB = ON); CREATE UNIQUE INDEX [ix_name] ON [schema1].[table1]([column1]) WITH (ONLINE = ON); GO CREATE UNIQUE INDEX [ix_name] ON [schema1].[table1]([column1]) WITH (ONLINE = ON (WAIT_AT_LOW_PRIORITY ( MAX_DURATION = 5 MINUTES, ABORT_AFTER_WAIT = NONE ) ) ); GO CREATE UNIQUE INDEX [ix_name] ON [schema1].[table1]([column1]) WITH DATA_COMPRESSION = ROW ON PARTITIONS (2, 4, 6 TO 8) GO CREATE STATISTICS Stats_Population ON [Reporting].[Population] ([ID],[Facility],[Population]) GO UPDATE STATISTICS Reporting.Population Stats_Population; GO UPDATE STATISTICS Reporting.Population (Stats_Facility, Stats_Population); GO UPDATE STATISTICS Reporting.Population (Stats_Facility, Stats_Population) WITH FULLSCAN; GO UPDATE STATISTICS Reporting.Population (Stats_Facility, Stats_Population) WITH RESAMPLE; GO DROP STATISTICS Reporting.Population.Stats_Population GO DROP INDEX IX_INTER_VIMR_REPRODUCTION_NUMBER ON dbo.VIMR_INFECTIOUS_PEOPLE; GO sqlfluff-3.4.2/test/fixtures/dialects/tsql/add_index.yml000066400000000000000000000405521503426445100234210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 333acdf9243a058e27457a9e769f500611d59400530ba6f5e77acc40f92644c0 file: - batch: statement: if_then_statement: if_clause: keyword: IF expression: - keyword: NOT - keyword: EXISTS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sys - dot: . - naked_identifier: indexes where_clause: keyword: WHERE expression: column_reference: naked_identifier: NAME comparison_operator: raw_comparison_operator: '=' quoted_literal: "'IX_INTER_VIMR_INFECTIOUS_PEOPLE'" end_bracket: ) statement: create_index_statement: - keyword: CREATE - keyword: NONCLUSTERED - keyword: INDEX - index_reference: naked_identifier: IX_INTER_VIMR_INFECTIOUS_PEOPLE - keyword: 'ON' - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: VIMR_INFECTIOUS_PEOPLE - bracketed_index_column_list_grammar: bracketed: start_bracket: ( index_column_definition: naked_identifier: DATE_LAST_INSERTED end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: if_then_statement: if_clause: keyword: IF expression: - keyword: NOT - keyword: EXISTS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sys - dot: . - naked_identifier: indexes where_clause: keyword: WHERE expression: column_reference: naked_identifier: NAME comparison_operator: raw_comparison_operator: '=' quoted_literal: "'IX_INTER_FOUNDATION_NICE_IC_INTAKE_COUNT'" end_bracket: ) statement: create_index_statement: - keyword: CREATE - keyword: NONCLUSTERED - keyword: INDEX - index_reference: naked_identifier: IX_INTER_FOUNDATION_NICE_IC_INTAKE_COUNT - keyword: 'ON' - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: FOUNDATION_NICE_IC_INTAKE_COUNT - bracketed_index_column_list_grammar: bracketed: start_bracket: ( index_column_definition: naked_identifier: DATE_LAST_INSERTED end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: if_then_statement: if_clause: keyword: IF expression: keyword: EXISTS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sys - dot: . - naked_identifier: indexes where_clause: keyword: WHERE expression: column_reference: naked_identifier: NAME comparison_operator: raw_comparison_operator: '=' quoted_literal: "'IX_INTER_VIMR_REPRODUCTION_NUMBER'" end_bracket: ) statement: create_index_statement: - keyword: CREATE - keyword: CLUSTERED - keyword: INDEX - index_reference: naked_identifier: IX_INTER_VIMR_REPRODUCTION_NUMBER - keyword: 'ON' - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: VIMR_INFECTIOUS_PEOPLE - bracketed_index_column_list_grammar: bracketed: start_bracket: ( index_column_definition: naked_identifier: DATE_LAST_INSERTED end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_index_statement: - keyword: CREATE - keyword: NONCLUSTERED - keyword: INDEX - index_reference: quoted_identifier: '[ind_1]' - keyword: 'ON' - table_reference: - quoted_identifier: '[schema1]' - dot: . - quoted_identifier: '[table1]' - bracketed_index_column_list_grammar: bracketed: start_bracket: ( index_column_definition: quoted_identifier: '[column1]' keyword: ASC end_bracket: ) - keyword: INCLUDE - bracketed: start_bracket: ( column_reference: quoted_identifier: '[column2]' end_bracket: ) - where_clause: keyword: WHERE bracketed: start_bracket: ( expression: column_reference: quoted_identifier: '[column3]' keyword: IS null_literal: 'NULL' end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_index_statement: - keyword: CREATE - keyword: NONCLUSTERED - keyword: INDEX - index_reference: quoted_identifier: '[NI_name]' - keyword: 'ON' - table_reference: - quoted_identifier: '[schema1]' - dot: . - quoted_identifier: '[table1]' - bracketed_index_column_list_grammar: bracketed: start_bracket: ( index_column_definition: quoted_identifier: '[column1]' keyword: DESC end_bracket: ) - on_partition_or_filegroup_statement: filegroup_clause: keyword: 'ON' filegroup_name: quoted_identifier: '[PRIMARY]' - statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_index_statement: - keyword: CREATE - keyword: UNIQUE - keyword: INDEX - index_reference: quoted_identifier: '[ix_name]' - keyword: 'ON' - table_reference: - quoted_identifier: '[schema1]' - dot: . - quoted_identifier: '[table1]' - bracketed_index_column_list_grammar: bracketed: - start_bracket: ( - index_column_definition: quoted_identifier: '[column1]' - comma: ',' - index_column_definition: quoted_identifier: '[column2]' - end_bracket: ) - relational_index_options: - keyword: WITH - keyword: FILLFACTOR - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '80' - statement_terminator: ; - go_statement: keyword: GO - batch: - statement: create_index_statement: - keyword: CREATE - keyword: UNIQUE - keyword: INDEX - index_reference: quoted_identifier: '[ix_name]' - keyword: 'ON' - table_reference: - quoted_identifier: '[schema1]' - dot: . - quoted_identifier: '[table1]' - bracketed_index_column_list_grammar: bracketed: start_bracket: ( index_column_definition: quoted_identifier: '[column1]' end_bracket: ) - relational_index_options: keyword: WITH bracketed: - start_bracket: ( - keyword: PAD_INDEX - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - comma: ',' - keyword: SORT_IN_TEMPDB - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: UNIQUE - keyword: INDEX - index_reference: quoted_identifier: '[ix_name]' - keyword: 'ON' - table_reference: - quoted_identifier: '[schema1]' - dot: . - quoted_identifier: '[table1]' - bracketed_index_column_list_grammar: bracketed: start_bracket: ( index_column_definition: quoted_identifier: '[column1]' end_bracket: ) - relational_index_options: keyword: WITH bracketed: - start_bracket: ( - keyword: ONLINE - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_index_statement: - keyword: CREATE - keyword: UNIQUE - keyword: INDEX - index_reference: quoted_identifier: '[ix_name]' - keyword: 'ON' - table_reference: - quoted_identifier: '[schema1]' - dot: . - quoted_identifier: '[table1]' - bracketed_index_column_list_grammar: bracketed: start_bracket: ( index_column_definition: quoted_identifier: '[column1]' end_bracket: ) - relational_index_options: keyword: WITH bracketed: - start_bracket: ( - keyword: ONLINE - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - bracketed: start_bracket: ( keyword: WAIT_AT_LOW_PRIORITY bracketed: - start_bracket: ( - max_duration: - keyword: MAX_DURATION - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '5' - keyword: MINUTES - comma: ',' - keyword: ABORT_AFTER_WAIT - comparison_operator: raw_comparison_operator: '=' - keyword: NONE - end_bracket: ) end_bracket: ) - end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_index_statement: - keyword: CREATE - keyword: UNIQUE - keyword: INDEX - index_reference: quoted_identifier: '[ix_name]' - keyword: 'ON' - table_reference: - quoted_identifier: '[schema1]' - dot: . - quoted_identifier: '[table1]' - bracketed_index_column_list_grammar: bracketed: start_bracket: ( index_column_definition: quoted_identifier: '[column1]' end_bracket: ) - relational_index_options: - keyword: WITH - keyword: DATA_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - keyword: ROW - on_partitions_clause: - keyword: 'ON' - keyword: PARTITIONS - bracketed: - start_bracket: ( - numeric_literal: '2' - comma: ',' - numeric_literal: '4' - comma: ',' - numeric_literal: '6' - keyword: TO - numeric_literal: '8' - end_bracket: ) - go_statement: keyword: GO - batch: statement: create_index_statement: - keyword: CREATE - keyword: STATISTICS - index_reference: naked_identifier: Stats_Population - keyword: 'ON' - table_reference: - quoted_identifier: '[Reporting]' - dot: . - quoted_identifier: '[Population]' - bracketed_index_column_list_grammar: bracketed: - start_bracket: ( - index_column_definition: quoted_identifier: '[ID]' - comma: ',' - index_column_definition: quoted_identifier: '[Facility]' - comma: ',' - index_column_definition: quoted_identifier: '[Population]' - end_bracket: ) - go_statement: keyword: GO - batch: statement: update_statistics_statement: - keyword: UPDATE - keyword: STATISTICS - object_reference: - naked_identifier: Reporting - dot: . - naked_identifier: Population - naked_identifier: Stats_Population - statement_terminator: ; - go_statement: keyword: GO - batch: statement: update_statistics_statement: - keyword: UPDATE - keyword: STATISTICS - object_reference: - naked_identifier: Reporting - dot: . - naked_identifier: Population - bracketed: - start_bracket: ( - naked_identifier: Stats_Facility - comma: ',' - naked_identifier: Stats_Population - end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: update_statistics_statement: - keyword: UPDATE - keyword: STATISTICS - object_reference: - naked_identifier: Reporting - dot: . - naked_identifier: Population - bracketed: - start_bracket: ( - naked_identifier: Stats_Facility - comma: ',' - naked_identifier: Stats_Population - end_bracket: ) - keyword: WITH - keyword: FULLSCAN statement_terminator: ; - go_statement: keyword: GO - batch: statement: update_statistics_statement: - keyword: UPDATE - keyword: STATISTICS - object_reference: - naked_identifier: Reporting - dot: . - naked_identifier: Population - bracketed: - start_bracket: ( - naked_identifier: Stats_Facility - comma: ',' - naked_identifier: Stats_Population - end_bracket: ) - keyword: WITH - keyword: RESAMPLE statement_terminator: ; - go_statement: keyword: GO - batch: statement: drop_statement: - keyword: DROP - keyword: STATISTICS - index_reference: - naked_identifier: Reporting - dot: . - naked_identifier: Population - dot: . - naked_identifier: Stats_Population - go_statement: keyword: GO - batch: statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: naked_identifier: IX_INTER_VIMR_REPRODUCTION_NUMBER - keyword: 'ON' - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: VIMR_INFECTIOUS_PEOPLE - statement_terminator: ; - go_statement: keyword: GO sqlfluff-3.4.2/test/fixtures/dialects/tsql/alter_and_drop.sql000066400000000000000000000002651503426445100244520ustar00rootroot00000000000000ALTER TABLE [REPORTING].[UN_NEW] SWITCH to [REPORTING].[UN_BASE] WITH (TRUNCATE_TARGET = ON); DROP TABLE [REPORTING].[UN_NEW]; ALTER TABLE table_name DROP COLUMN column1, column2; sqlfluff-3.4.2/test/fixtures/dialects/tsql/alter_and_drop.yml000066400000000000000000000032051503426445100244510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a8b6e1aaba8aed7dfd5d931dce3d5026c7baabe0b7d291ea7d007bacacc5bff2 file: batch: - statement: alter_table_switch_statement: - keyword: ALTER - keyword: TABLE - object_reference: - quoted_identifier: '[REPORTING]' - dot: . - quoted_identifier: '[UN_NEW]' - keyword: SWITCH - keyword: to - object_reference: - quoted_identifier: '[REPORTING]' - dot: . - quoted_identifier: '[UN_BASE]' - keyword: WITH - bracketed: - start_bracket: ( - keyword: TRUNCATE_TARGET - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: - quoted_identifier: '[REPORTING]' - dot: . - quoted_identifier: '[UN_NEW]' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - keyword: DROP - keyword: COLUMN - column_reference: naked_identifier: column1 - comma: ',' - column_reference: naked_identifier: column2 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/alter_index.sql000066400000000000000000000054111503426445100237710ustar00rootroot00000000000000ALTER INDEX index1 ON table1 REBUILD; ALTER INDEX ALL ON table1 REBUILD; ALTER INDEX idxcci_cci_target ON cci_target REORGANIZE WITH (COMPRESS_ALL_ROW_GROUPS = ON); ALTER INDEX cci_FactInternetSales2 ON FactInternetSales2 REORGANIZE; ALTER INDEX cci_FactInternetSales2 ON FactInternetSales2 REORGANIZE PARTITION = 0; ALTER INDEX cci_FactInternetSales2 ON FactInternetSales2 REORGANIZE WITH (COMPRESS_ALL_ROW_GROUPS = ON); ALTER INDEX cci_FactInternetSales2 ON FactInternetSales2 REORGANIZE PARTITION = 0 WITH (COMPRESS_ALL_ROW_GROUPS = ON); ALTER INDEX cci_FactInternetSales2 ON FactInternetSales2 REORGANIZE; ALTER INDEX cci_fact3ON ON fact3 REBUILD PARTITION = 12; ALTER INDEX cci_SimpleTable ON SimpleTable REBUILD WITH (DATA_COMPRESSION = COLUMNSTORE_ARCHIVE); ALTER INDEX cci_SimpleTable ON SimpleTable REBUILD WITH (DATA_COMPRESSION = COLUMNSTORE); ALTER INDEX PK_ProductPhoto_ProductPhotoID ON Production.ProductPhoto REORGANIZE WITH (LOB_COMPACTION = ON); ALTER INDEX IX_Employee_ManagerID ON HumanResources.Employee DISABLE; ALTER INDEX IX_INDEX1 ON T1 REBUILD WITH (XML_COMPRESSION = ON); ALTER INDEX ALL ON Production.Product REBUILD WITH (FILLFACTOR = 80, SORT_IN_TEMPDB = ON, STATISTICS_NORECOMPUTE = ON); ALTER INDEX test_idx on test_table REBUILD WITH (ONLINE = ON, MAXDOP = 1, RESUMABLE = ON); ALTER INDEX test_idx on test_table PAUSE; ALTER INDEX test_idx on test_table ABORT; ALTER INDEX test_idx on test_table REBUILD WITH (XML_COMPRESSION = ON ON PARTITIONS (2, 4, 6 TO 8)); ALTER INDEX test_idx on test_table REBUILD WITH (DATA_COMPRESSION = PAGE ON PARTITIONS (3, 5)); ALTER INDEX test_idx on test_table REBUILD WITH (DATA_COMPRESSION = NONE ON PARTITIONS (1)); ALTER INDEX IX_TransactionHistory_TransactionDate ON Production.TransactionHistory REBUILD Partition = 5 WITH (ONLINE = ON (WAIT_AT_LOW_PRIORITY (MAX_DURATION = 10, ABORT_AFTER_WAIT = SELF))); ALTER INDEX cci_FactInternetSales2 ON FactInternetSales2 SET (ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = OFF); ALTER INDEX cci_FactInternetSales2 ON FactInternetSales2 SET (OPTIMIZE_FOR_SEQUENTIAL_KEY = ON, IGNORE_DUP_KEY = OFF, STATISTICS_NORECOMPUTE = ON); ALTER INDEX cci_FactInternetSales2 ON FactInternetSales2 SET (COMPRESSION_DELAY = 0); ALTER INDEX cci_FactInternetSales2 ON FactInternetSales2 SET (COMPRESSION_DELAY = 100 minutes); ALTER INDEX cci_FactInternetSales2 ON FactInternetSales2 RESUME; ALTER INDEX cci_FactInternetSales2 ON FactInternetSales2 RESUME WITH (MAXDOP = 100, MAX_DURATION = 500 minutes, WAIT_AT_LOW_PRIORITY (MAX_DURATION = 10, ABORT_AFTER_WAIT = SELF)); ALTER INDEX cci_FactInternetSales2 ON FactInternetSales2 RESUME WITH (MAX_DURATION = 500); ALTER INDEX cci_FactInternetSales2 ON FactInternetSales2 RESUME WITH (WAIT_AT_LOW_PRIORITY (MAX_DURATION = 10, ABORT_AFTER_WAIT = SELF)); sqlfluff-3.4.2/test/fixtures/dialects/tsql/alter_index.yml000066400000000000000000000427431503426445100240040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ae559c6095a99da7a6eaa94388edbca341d6980ecac15a47f886246db5ce0e8e file: batch: - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: index1 - keyword: 'ON' - table_reference: naked_identifier: table1 - keyword: REBUILD - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - keyword: ALL - keyword: 'ON' - table_reference: naked_identifier: table1 - keyword: REBUILD - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: idxcci_cci_target - keyword: 'ON' - table_reference: naked_identifier: cci_target - keyword: REORGANIZE - keyword: WITH - bracketed: - start_bracket: ( - keyword: COMPRESS_ALL_ROW_GROUPS - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: cci_FactInternetSales2 - keyword: 'ON' - table_reference: naked_identifier: FactInternetSales2 - keyword: REORGANIZE - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: cci_FactInternetSales2 - keyword: 'ON' - table_reference: naked_identifier: FactInternetSales2 - keyword: REORGANIZE - keyword: PARTITION - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '0' - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: cci_FactInternetSales2 - keyword: 'ON' - table_reference: naked_identifier: FactInternetSales2 - keyword: REORGANIZE - keyword: WITH - bracketed: - start_bracket: ( - keyword: COMPRESS_ALL_ROW_GROUPS - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: cci_FactInternetSales2 - keyword: 'ON' - table_reference: naked_identifier: FactInternetSales2 - keyword: REORGANIZE - keyword: PARTITION - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '0' - keyword: WITH - bracketed: - start_bracket: ( - keyword: COMPRESS_ALL_ROW_GROUPS - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: cci_FactInternetSales2 - keyword: 'ON' - table_reference: naked_identifier: FactInternetSales2 - keyword: REORGANIZE - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: cci_fact3ON - keyword: 'ON' - table_reference: naked_identifier: fact3 - keyword: REBUILD - keyword: PARTITION - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '12' - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: cci_SimpleTable - keyword: 'ON' - table_reference: naked_identifier: SimpleTable - keyword: REBUILD - keyword: WITH - bracketed: - start_bracket: ( - keyword: DATA_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - keyword: COLUMNSTORE_ARCHIVE - end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: cci_SimpleTable - keyword: 'ON' - table_reference: naked_identifier: SimpleTable - keyword: REBUILD - keyword: WITH - bracketed: - start_bracket: ( - keyword: DATA_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - keyword: COLUMNSTORE - end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: PK_ProductPhoto_ProductPhotoID - keyword: 'ON' - table_reference: - naked_identifier: Production - dot: . - naked_identifier: ProductPhoto - keyword: REORGANIZE - keyword: WITH - bracketed: - start_bracket: ( - keyword: LOB_COMPACTION - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: IX_Employee_ManagerID - keyword: 'ON' - table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: Employee - keyword: DISABLE - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: IX_INDEX1 - keyword: 'ON' - table_reference: naked_identifier: T1 - keyword: REBUILD - keyword: WITH - bracketed: - start_bracket: ( - keyword: XML_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - keyword: ALL - keyword: 'ON' - table_reference: - naked_identifier: Production - dot: . - naked_identifier: Product - keyword: REBUILD - keyword: WITH - bracketed: - start_bracket: ( - keyword: FILLFACTOR - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '80' - comma: ',' - keyword: SORT_IN_TEMPDB - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - comma: ',' - keyword: STATISTICS_NORECOMPUTE - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: test_idx - keyword: 'on' - table_reference: naked_identifier: test_table - keyword: REBUILD - keyword: WITH - bracketed: - start_bracket: ( - keyword: ONLINE - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - comma: ',' - keyword: MAXDOP - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - comma: ',' - keyword: RESUMABLE - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: test_idx - keyword: 'on' - table_reference: naked_identifier: test_table - keyword: PAUSE - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: test_idx - keyword: 'on' - table_reference: naked_identifier: test_table - keyword: ABORT - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: test_idx - keyword: 'on' - table_reference: naked_identifier: test_table - keyword: REBUILD - keyword: WITH - bracketed: - start_bracket: ( - keyword: XML_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - keyword: 'ON' - keyword: PARTITIONS - bracketed: - start_bracket: ( - numeric_literal: '2' - comma: ',' - numeric_literal: '4' - comma: ',' - numeric_literal: '6' - keyword: TO - numeric_literal: '8' - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: test_idx - keyword: 'on' - table_reference: naked_identifier: test_table - keyword: REBUILD - keyword: WITH - bracketed: - start_bracket: ( - keyword: DATA_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - keyword: PAGE - keyword: 'ON' - keyword: PARTITIONS - bracketed: - start_bracket: ( - numeric_literal: '3' - comma: ',' - numeric_literal: '5' - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: test_idx - keyword: 'on' - table_reference: naked_identifier: test_table - keyword: REBUILD - keyword: WITH - bracketed: - start_bracket: ( - keyword: DATA_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - keyword: NONE - keyword: 'ON' - keyword: PARTITIONS - bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: IX_TransactionHistory_TransactionDate - keyword: 'ON' - table_reference: - naked_identifier: Production - dot: . - naked_identifier: TransactionHistory - keyword: REBUILD - keyword: Partition - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '5' - keyword: WITH - bracketed: - start_bracket: ( - keyword: ONLINE - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - bracketed: start_bracket: ( keyword: WAIT_AT_LOW_PRIORITY bracketed: - start_bracket: ( - keyword: MAX_DURATION - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '10' - comma: ',' - keyword: ABORT_AFTER_WAIT - comparison_operator: raw_comparison_operator: '=' - keyword: SELF - end_bracket: ) end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: cci_FactInternetSales2 - keyword: 'ON' - table_reference: naked_identifier: FactInternetSales2 - keyword: SET - bracketed: - start_bracket: ( - keyword: ALLOW_ROW_LOCKS - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - comma: ',' - keyword: ALLOW_PAGE_LOCKS - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: cci_FactInternetSales2 - keyword: 'ON' - table_reference: naked_identifier: FactInternetSales2 - keyword: SET - bracketed: - start_bracket: ( - keyword: OPTIMIZE_FOR_SEQUENTIAL_KEY - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - comma: ',' - keyword: IGNORE_DUP_KEY - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - comma: ',' - keyword: STATISTICS_NORECOMPUTE - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: cci_FactInternetSales2 - keyword: 'ON' - table_reference: naked_identifier: FactInternetSales2 - keyword: SET - bracketed: start_bracket: ( keyword: COMPRESSION_DELAY comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: cci_FactInternetSales2 - keyword: 'ON' - table_reference: naked_identifier: FactInternetSales2 - keyword: SET - bracketed: - start_bracket: ( - keyword: COMPRESSION_DELAY - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '100' - keyword: minutes - end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: cci_FactInternetSales2 - keyword: 'ON' - table_reference: naked_identifier: FactInternetSales2 - keyword: RESUME - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: cci_FactInternetSales2 - keyword: 'ON' - table_reference: naked_identifier: FactInternetSales2 - keyword: RESUME - keyword: WITH - bracketed: - start_bracket: ( - keyword: MAXDOP - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '100' - comma: ',' - keyword: MAX_DURATION - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '500' - keyword: minutes - comma: ',' - keyword: WAIT_AT_LOW_PRIORITY - bracketed: - start_bracket: ( - keyword: MAX_DURATION - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '10' - comma: ',' - keyword: ABORT_AFTER_WAIT - comparison_operator: raw_comparison_operator: '=' - keyword: SELF - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: cci_FactInternetSales2 - keyword: 'ON' - table_reference: naked_identifier: FactInternetSales2 - keyword: RESUME - keyword: WITH - bracketed: start_bracket: ( keyword: MAX_DURATION comparison_operator: raw_comparison_operator: '=' numeric_literal: '500' end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: cci_FactInternetSales2 - keyword: 'ON' - table_reference: naked_identifier: FactInternetSales2 - keyword: RESUME - keyword: WITH - bracketed: start_bracket: ( keyword: WAIT_AT_LOW_PRIORITY bracketed: - start_bracket: ( - keyword: MAX_DURATION - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '10' - comma: ',' - keyword: ABORT_AFTER_WAIT - comparison_operator: raw_comparison_operator: '=' - keyword: SELF - end_bracket: ) end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/alter_table.sql000066400000000000000000000104171503426445100237530ustar00rootroot00000000000000CREATE TABLE dbo.doc_exa (column_a INT) ; GO ALTER TABLE dbo.doc_exa ADD column_b VARCHAR(20) NULL ; GO CREATE TABLE dbo.doc_exc (column_a INT) ; GO ALTER TABLE dbo.doc_exc ADD column_b VARCHAR(20) NULL CONSTRAINT exb_unique UNIQUE, DROP COLUMN column_a, DROP COLUMN IF EXISTS column_c ; GO EXEC sp_help doc_exc ; GO DROP TABLE dbo.doc_exc ; GO CREATE TABLE dbo.doc_exz (column_a INT, column_b INT) ; GO INSERT INTO dbo.doc_exz (column_a) VALUES (7) ; GO ALTER TABLE dbo.doc_exz ADD CONSTRAINT col_b_def DEFAULT 50 FOR column_b ; GO INSERT INTO dbo.doc_exz (column_a) VALUES (10) ; GO SELECT * FROM dbo.doc_exz ; GO DROP TABLE dbo.doc_exz ; GO ALTER TABLE Production.TransactionHistoryArchive ADD CONSTRAINT PK_TransactionHistoryArchive_TransactionID PRIMARY KEY CLUSTERED (TransactionID) GO ALTER TABLE Production.TransactionHistoryArchive ALTER COLUMN rec_number VARCHAR(36) GO ALTER TABLE Production.TransactionHistoryArchive DROP CONSTRAINT PK_TransactionHistoryArchive_TransactionID ALTER TABLE Production.TransactionHistoryArchive DROP CONSTRAINT IF EXISTS PK_TransactionHistoryArchive_TransactionID ALTER TABLE [Production].[ProductCostHistory] WITH CHECK ADD CONSTRAINT [FK_ProductCostHistory_Product_ProductID] FOREIGN KEY([ProductID]) REFERENCES [Production].[Product] ([ProductID]) GO ALTER TABLE [Production].[ProductCostHistory] CHECK CONSTRAINT [FK_ProductCostHistory_Product_ProductID] GO ALTER TABLE my_table ADD my_col_1 INT , my_col_2 INT GO ALTER TABLE TestTable SET (SYSTEM_VERSIONING = ON); GO ALTER TABLE TestTable SET (SYSTEM_VERSIONING = OFF); GO ALTER TABLE TestTable SET (SYSTEM_VERSIONING = OFF ( HISTORY_TABLE = TestTableHistory )); GO ALTER TABLE TestTable SET (SYSTEM_VERSIONING = OFF ( HISTORY_TABLE = TestTableHistory, DATA_CONSISTENCY_CHECK = ON )); GO ALTER TABLE TestTable SET (SYSTEM_VERSIONING = OFF ( HISTORY_TABLE = TestTableHistory, DATA_CONSISTENCY_CHECK = ON, HISTORY_RETENTION_PERIOD = INFINITE )); GO ALTER TABLE TestTable SET (SYSTEM_VERSIONING = OFF ( HISTORY_TABLE = TestTableHistory, DATA_CONSISTENCY_CHECK = ON, HISTORY_RETENTION_PERIOD = 1 YEAR )); GO ALTER TABLE TestTable SET (SYSTEM_VERSIONING = OFF ( HISTORY_TABLE = TestTableHistory, DATA_CONSISTENCY_CHECK = ON, HISTORY_RETENTION_PERIOD = 7 MONTHS )); GO ALTER TABLE TestTable SET (FILESTREAM_ON = "NULL"); GO ALTER TABLE TestTable SET (FILESTREAM_ON = "default"); GO ALTER TABLE TestTable SET (FILESTREAM_ON = PartitionSchemeName); GO ALTER TABLE TestTable SET (DATA_DELETION = ON); GO ALTER TABLE TestTable SET (DATA_DELETION = OFF(FILTER_COLUMN = ColumnName)); GO ALTER TABLE TestTable SET (DATA_DELETION = OFF(FILTER_COLUMN = ColumnName, RETENTION_PERIOD = 1 YEAR)); GO ALTER TABLE TestTable SET (DATA_DELETION = OFF(FILTER_COLUMN = ColumnName, RETENTION_PERIOD = INFINITE)); GO ALTER TABLE TestTable SET (DATA_DELETION = OFF(FILTER_COLUMN = ColumnName, RETENTION_PERIOD = 7 YEARS)); GO ALTER TABLE TestTable SET (DATA_DELETION = OFF(FILTER_COLUMN = ColumnName, RETENTION_PERIOD = 7 DAYS)); GO -- computed columm -- https://learn.microsoft.com/en-us/sql/relational-databases/tables/specify-computed-columns-in-a-table?view=sql-server-ver16 ALTER TABLE dbo.Products ADD RetailValue AS [QtyAvailable] * UnitPrice * 1.5 PERSISTED; GO ALTER TABLE dbo.Products ADD RetailValue AS (QtyAvailable * [UnitPrice] * 1.5) PERSISTED NOT NULL; GO ALTER TABLE dbo.Products ADD InventoyDate AS CAST([InventoryTs] AS date); GO ALTER TABLE [HangFire].[JobParameter] ADD CONSTRAINT [FK_HangFire_JobParameter_Job] FOREIGN KEY ([JobId]) REFERENCES [HangFire].[Job] ([Id]) ON UPDATE CASCADE ON DELETE CASCADE; GO -- Drop multiple columns in one statement ALTER TABLE UserData DROP COLUMN [StrSkill], [StrItem], [StrSerial]; ALTER TABLE UserData DROP COLUMN IF EXISTS StrSkill, StrItem, StrSerial; -- Check hexadecimal defaults in constraints CREATE TABLE [dbo].[UserData] ( [strUserId] [char](21) NOT NULL, [strItem] [binary](400) NULL, [strSkill] [binary](400) NULL, CONSTRAINT PK_UserData PRIMARY KEY CLUSTERED ([strUserId] ASC) ); ALTER TABLE [dbo].[UserData] ADD CONSTRAINT [DF_UserData_strSkill] DEFAULT (0x00) FOR [strSkill]; GO ALTER TABLE [TestTable] DROP PERIOD FOR SYSTEM_TIME; ALTER TABLE [TestTable] ADD PERIOD FOR SYSTEM_TIME (StartDate, EndDate); sqlfluff-3.4.2/test/fixtures/dialects/tsql/alter_table.yml000066400000000000000000000746221503426445100237650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5589a876d8cca343f15539310fb540c839f80b2f4ad9f2525b28d8910794430c file: - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: doc_exa - bracketed: start_bracket: ( column_definition: naked_identifier: column_a data_type: data_type_identifier: INT end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: doc_exa - keyword: ADD - column_definition: naked_identifier: column_b data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '20' end_bracket: ) column_constraint_segment: keyword: 'NULL' statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: doc_exc - bracketed: start_bracket: ( column_definition: naked_identifier: column_a data_type: data_type_identifier: INT end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: doc_exc - keyword: ADD - column_definition: - naked_identifier: column_b - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '20' end_bracket: ) - column_constraint_segment: keyword: 'NULL' - column_constraint_segment: - keyword: CONSTRAINT - object_reference: naked_identifier: exb_unique - keyword: UNIQUE - comma: ',' - keyword: DROP - keyword: COLUMN - column_reference: naked_identifier: column_a - comma: ',' - keyword: DROP - keyword: COLUMN - keyword: IF - keyword: EXISTS - column_reference: naked_identifier: column_c statement_terminator: ; - go_statement: keyword: GO - batch: statement: execute_script_statement: keyword: EXEC object_reference: naked_identifier: sp_help expression: column_reference: naked_identifier: doc_exc statement_terminator: ; - go_statement: keyword: GO - batch: statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: doc_exc - statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: doc_exz - bracketed: - start_bracket: ( - column_definition: naked_identifier: column_a data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: column_b data_type: data_type_identifier: INT - end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: doc_exz - bracketed: start_bracket: ( column_reference: naked_identifier: column_a end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( numeric_literal: '7' end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: doc_exz - keyword: ADD - column_constraint_segment: - keyword: CONSTRAINT - object_reference: naked_identifier: col_b_def - keyword: DEFAULT - numeric_literal: '50' - keyword: FOR - column_reference: naked_identifier: column_b statement_terminator: ; - go_statement: keyword: GO - batch: statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: doc_exz - bracketed: start_bracket: ( column_reference: naked_identifier: column_a end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: doc_exz statement_terminator: ; - go_statement: keyword: GO - batch: statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: doc_exz - statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: Production - dot: . - naked_identifier: TransactionHistoryArchive - keyword: ADD - table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: PK_TransactionHistoryArchive_TransactionID - keyword: PRIMARY - keyword: KEY - keyword: CLUSTERED - bracketed_index_column_list_grammar: bracketed: start_bracket: ( index_column_definition: naked_identifier: TransactionID end_bracket: ) - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: Production - dot: . - naked_identifier: TransactionHistoryArchive - keyword: ALTER - keyword: COLUMN - column_definition: naked_identifier: rec_number data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '36' end_bracket: ) - go_statement: keyword: GO - batch: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: Production - dot: . - naked_identifier: TransactionHistoryArchive - keyword: DROP - keyword: CONSTRAINT - object_reference: naked_identifier: PK_TransactionHistoryArchive_TransactionID - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: Production - dot: . - naked_identifier: TransactionHistoryArchive - keyword: DROP - keyword: CONSTRAINT - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: PK_TransactionHistoryArchive_TransactionID - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '[Production]' - dot: . - quoted_identifier: '[ProductCostHistory]' - keyword: WITH - keyword: CHECK - keyword: ADD - table_constraint: - keyword: CONSTRAINT - object_reference: quoted_identifier: '[FK_ProductCostHistory_Product_ProductID]' - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: quoted_identifier: '[ProductID]' end_bracket: ) - references_constraint_grammar: keyword: REFERENCES table_reference: - quoted_identifier: '[Production]' - dot: . - quoted_identifier: '[Product]' bracketed: start_bracket: ( column_reference: quoted_identifier: '[ProductID]' end_bracket: ) - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '[Production]' - dot: . - quoted_identifier: '[ProductCostHistory]' - keyword: CHECK - keyword: CONSTRAINT - object_reference: quoted_identifier: '[FK_ProductCostHistory_Product_ProductID]' - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - keyword: ADD - column_definition: naked_identifier: my_col_1 data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: my_col_2 data_type: data_type_identifier: INT - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: TestTable - keyword: SET - bracketed: - start_bracket: ( - keyword: SYSTEM_VERSIONING - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: TestTable - keyword: SET - bracketed: - start_bracket: ( - keyword: SYSTEM_VERSIONING - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: TestTable - keyword: SET - bracketed: - start_bracket: ( - keyword: SYSTEM_VERSIONING - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - bracketed: start_bracket: ( keyword: HISTORY_TABLE comparison_operator: raw_comparison_operator: '=' table_reference: naked_identifier: TestTableHistory end_bracket: ) - end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: TestTable - keyword: SET - bracketed: - start_bracket: ( - keyword: SYSTEM_VERSIONING - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - bracketed: - start_bracket: ( - keyword: HISTORY_TABLE - comparison_operator: raw_comparison_operator: '=' - table_reference: naked_identifier: TestTableHistory - comma: ',' - keyword: DATA_CONSISTENCY_CHECK - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) - end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: TestTable - keyword: SET - bracketed: - start_bracket: ( - keyword: SYSTEM_VERSIONING - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - bracketed: - start_bracket: ( - keyword: HISTORY_TABLE - comparison_operator: raw_comparison_operator: '=' - table_reference: naked_identifier: TestTableHistory - comma: ',' - keyword: DATA_CONSISTENCY_CHECK - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - comma: ',' - keyword: HISTORY_RETENTION_PERIOD - comparison_operator: raw_comparison_operator: '=' - date_part: INFINITE - end_bracket: ) - end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: TestTable - keyword: SET - bracketed: - start_bracket: ( - keyword: SYSTEM_VERSIONING - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - bracketed: - start_bracket: ( - keyword: HISTORY_TABLE - comparison_operator: raw_comparison_operator: '=' - table_reference: naked_identifier: TestTableHistory - comma: ',' - keyword: DATA_CONSISTENCY_CHECK - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - comma: ',' - keyword: HISTORY_RETENTION_PERIOD - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - date_part: YEAR - end_bracket: ) - end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: TestTable - keyword: SET - bracketed: - start_bracket: ( - keyword: SYSTEM_VERSIONING - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - bracketed: - start_bracket: ( - keyword: HISTORY_TABLE - comparison_operator: raw_comparison_operator: '=' - table_reference: naked_identifier: TestTableHistory - comma: ',' - keyword: DATA_CONSISTENCY_CHECK - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - comma: ',' - keyword: HISTORY_RETENTION_PERIOD - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '7' - date_part: MONTHS - end_bracket: ) - end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: TestTable - keyword: SET - bracketed: start_bracket: ( keyword: FILESTREAM_ON comparison_operator: raw_comparison_operator: '=' filegroup_name: quoted_identifier: '"NULL"' end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: TestTable - keyword: SET - bracketed: start_bracket: ( keyword: FILESTREAM_ON comparison_operator: raw_comparison_operator: '=' filegroup_name: quoted_identifier: '"default"' end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: TestTable - keyword: SET - bracketed: start_bracket: ( keyword: FILESTREAM_ON comparison_operator: raw_comparison_operator: '=' filegroup_name: naked_identifier: PartitionSchemeName end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: TestTable - keyword: SET - bracketed: - start_bracket: ( - keyword: DATA_DELETION - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: TestTable - keyword: SET - bracketed: - start_bracket: ( - keyword: DATA_DELETION - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - bracketed: start_bracket: ( keyword: FILTER_COLUMN comparison_operator: raw_comparison_operator: '=' column_reference: naked_identifier: ColumnName end_bracket: ) - end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: TestTable - keyword: SET - bracketed: - start_bracket: ( - keyword: DATA_DELETION - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - bracketed: - start_bracket: ( - keyword: FILTER_COLUMN - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: ColumnName - comma: ',' - keyword: RETENTION_PERIOD - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - date_part: YEAR - end_bracket: ) - end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: TestTable - keyword: SET - bracketed: - start_bracket: ( - keyword: DATA_DELETION - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - bracketed: - start_bracket: ( - keyword: FILTER_COLUMN - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: ColumnName - comma: ',' - keyword: RETENTION_PERIOD - comparison_operator: raw_comparison_operator: '=' - date_part: INFINITE - end_bracket: ) - end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: TestTable - keyword: SET - bracketed: - start_bracket: ( - keyword: DATA_DELETION - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - bracketed: - start_bracket: ( - keyword: FILTER_COLUMN - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: ColumnName - comma: ',' - keyword: RETENTION_PERIOD - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '7' - date_part: YEARS - end_bracket: ) - end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: TestTable - keyword: SET - bracketed: - start_bracket: ( - keyword: DATA_DELETION - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - bracketed: - start_bracket: ( - keyword: FILTER_COLUMN - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: ColumnName - comma: ',' - keyword: RETENTION_PERIOD - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '7' - date_part: DAYS - end_bracket: ) - end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Products - keyword: ADD - computed_column_definition: - naked_identifier: RetailValue - keyword: AS - expression: - column_reference: quoted_identifier: '[QtyAvailable]' - binary_operator: '*' - column_reference: naked_identifier: UnitPrice - binary_operator: '*' - numeric_literal: '1.5' - keyword: PERSISTED statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Products - keyword: ADD - computed_column_definition: - naked_identifier: RetailValue - keyword: AS - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: QtyAvailable - binary_operator: '*' - column_reference: quoted_identifier: '[UnitPrice]' - binary_operator: '*' - numeric_literal: '1.5' end_bracket: ) - keyword: PERSISTED - keyword: NOT - keyword: 'NULL' statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Products - keyword: ADD - computed_column_definition: naked_identifier: InventoyDate keyword: AS function: function_name: keyword: CAST function_contents: bracketed: start_bracket: ( expression: column_reference: quoted_identifier: '[InventoryTs]' keyword: AS data_type: data_type_identifier: date end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '[HangFire]' - dot: . - quoted_identifier: '[JobParameter]' - keyword: ADD - table_constraint: - keyword: CONSTRAINT - object_reference: quoted_identifier: '[FK_HangFire_JobParameter_Job]' - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: quoted_identifier: '[JobId]' end_bracket: ) - references_constraint_grammar: - keyword: REFERENCES - table_reference: - quoted_identifier: '[HangFire]' - dot: . - quoted_identifier: '[Job]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[Id]' end_bracket: ) - keyword: 'ON' - keyword: UPDATE - keyword: CASCADE - keyword: 'ON' - keyword: DELETE - keyword: CASCADE statement_terminator: ; - go_statement: keyword: GO - batch: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: UserData - keyword: DROP - keyword: COLUMN - column_reference: quoted_identifier: '[StrSkill]' - comma: ',' - column_reference: quoted_identifier: '[StrItem]' - comma: ',' - column_reference: quoted_identifier: '[StrSerial]' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: UserData - keyword: DROP - keyword: COLUMN - keyword: IF - keyword: EXISTS - column_reference: naked_identifier: StrSkill - comma: ',' - column_reference: naked_identifier: StrItem - comma: ',' - column_reference: naked_identifier: StrSerial - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[UserData]' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '[strUserId]' data_type: data_type_identifier: '[char]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '21' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: quoted_identifier: '[strItem]' data_type: data_type_identifier: '[binary]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '400' end_bracket: ) column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: quoted_identifier: '[strSkill]' data_type: data_type_identifier: '[binary]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '400' end_bracket: ) column_constraint_segment: keyword: 'NULL' - comma: ',' - table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: PK_UserData - keyword: PRIMARY - keyword: KEY - keyword: CLUSTERED - bracketed_index_column_list_grammar: bracketed: start_bracket: ( index_column_definition: quoted_identifier: '[strUserId]' keyword: ASC end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[UserData]' - keyword: ADD - column_constraint_segment: - keyword: CONSTRAINT - object_reference: quoted_identifier: '[DF_UserData_strSkill]' - keyword: DEFAULT - bracketed: start_bracket: ( numeric_literal: '0x00' end_bracket: ) - keyword: FOR - column_reference: quoted_identifier: '[strSkill]' - statement_terminator: ; - go_statement: keyword: GO - batch: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '[TestTable]' - keyword: DROP - period_segment: - keyword: PERIOD - keyword: FOR - keyword: SYSTEM_TIME - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '[TestTable]' - keyword: ADD - period_segment: - keyword: PERIOD - keyword: FOR - keyword: SYSTEM_TIME - bracketed: - start_bracket: ( - column_reference: naked_identifier: StartDate - comma: ',' - column_reference: naked_identifier: EndDate - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/alter_table_switch.sql000066400000000000000000000013641503426445100253350ustar00rootroot00000000000000--TRUNCATE_TARGET is Azure Synapse Analytics specific ALTER TABLE [Facility].[PL_stage] SWITCH TO [Facility].[PL_BASE] WITH (TRUNCATE_TARGET = ON); -- https://learn.microsoft.com/en-us/sql/t-sql/statements/alter-table-transact-sql ALTER TABLE [PartitionTable] SWITCH PARTITION 1 TO NonPartitionTable; ALTER TABLE Orders SWITCH PARTITION 2 TO [OrdersHistory] PARTITION 2; ALTER TABLE Orders SWITCH PARTITION 3 TO [OrdersHistory] PARTITION 3; ALTER TABLE Orders SWITCH PARTITION 4 TO [OrdersHistory] PARTITION 4 WITH ( WAIT_AT_LOW_PRIORITY ( MAX_DURATION = 15 MINUTES, ABORT_AFTER_WAIT = NONE ) ); ALTER TABLE Orders SWITCH PARTITION 5 TO [OrdersHistory] PARTITION 5 WITH ( WAIT_AT_LOW_PRIORITY ( MAX_DURATION = 25, ABORT_AFTER_WAIT = SELF ) ); sqlfluff-3.4.2/test/fixtures/dialects/tsql/alter_table_switch.yml000066400000000000000000000101071503426445100253320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0f5c6dde85625695134ea93ec3cf15cb09894dcf3d84d9adc6472b88f8caff2b file: batch: - statement: alter_table_switch_statement: - keyword: ALTER - keyword: TABLE - object_reference: - quoted_identifier: '[Facility]' - dot: . - quoted_identifier: '[PL_stage]' - keyword: SWITCH - keyword: TO - object_reference: - quoted_identifier: '[Facility]' - dot: . - quoted_identifier: '[PL_BASE]' - keyword: WITH - bracketed: - start_bracket: ( - keyword: TRUNCATE_TARGET - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) - statement_terminator: ; - statement: alter_table_switch_statement: - keyword: ALTER - keyword: TABLE - object_reference: quoted_identifier: '[PartitionTable]' - keyword: SWITCH - keyword: PARTITION - numeric_literal: '1' - keyword: TO - object_reference: naked_identifier: NonPartitionTable - statement_terminator: ; - statement: alter_table_switch_statement: - keyword: ALTER - keyword: TABLE - object_reference: naked_identifier: Orders - keyword: SWITCH - keyword: PARTITION - numeric_literal: '2' - keyword: TO - object_reference: quoted_identifier: '[OrdersHistory]' - keyword: PARTITION - numeric_literal: '2' - statement_terminator: ; - statement: alter_table_switch_statement: - keyword: ALTER - keyword: TABLE - object_reference: naked_identifier: Orders - keyword: SWITCH - keyword: PARTITION - numeric_literal: '3' - keyword: TO - object_reference: quoted_identifier: '[OrdersHistory]' - keyword: PARTITION - numeric_literal: '3' - statement_terminator: ; - statement: alter_table_switch_statement: - keyword: ALTER - keyword: TABLE - object_reference: naked_identifier: Orders - keyword: SWITCH - keyword: PARTITION - numeric_literal: '4' - keyword: TO - object_reference: quoted_identifier: '[OrdersHistory]' - keyword: PARTITION - numeric_literal: '4' - keyword: WITH - bracketed: start_bracket: ( keyword: WAIT_AT_LOW_PRIORITY bracketed: - start_bracket: ( - keyword: MAX_DURATION - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '15' - keyword: MINUTES - comma: ',' - keyword: ABORT_AFTER_WAIT - comparison_operator: raw_comparison_operator: '=' - keyword: NONE - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: alter_table_switch_statement: - keyword: ALTER - keyword: TABLE - object_reference: naked_identifier: Orders - keyword: SWITCH - keyword: PARTITION - numeric_literal: '5' - keyword: TO - object_reference: quoted_identifier: '[OrdersHistory]' - keyword: PARTITION - numeric_literal: '5' - keyword: WITH - bracketed: start_bracket: ( keyword: WAIT_AT_LOW_PRIORITY bracketed: - start_bracket: ( - keyword: MAX_DURATION - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '25' - comma: ',' - keyword: ABORT_AFTER_WAIT - comparison_operator: raw_comparison_operator: '=' - keyword: SELF - end_bracket: ) end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/arithmetic_operations.sql000066400000000000000000000002601503426445100260640ustar00rootroot00000000000000DECLARE @I INT = 0; SELECT @I + 1; SELECT @I += 2; SELECT @I - 3; SELECT @I -= 4; SELECT @I * 5; SELECT @I *= 6; SELECT @I / 7; SELECT @I /= 8; SELECT @I % 9; SELECT @I %= 10; sqlfluff-3.4.2/test/fixtures/dialects/tsql/arithmetic_operations.yml000066400000000000000000000074611503426445100261000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fa20731e4aa3e225ef7f46049c46e10961ea9303f05321c808a85c949d51d6c2 file: batch: - statement: declare_segment: keyword: DECLARE parameter: '@I' data_type: data_type_identifier: INT comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '0' statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: parameter: '@I' binary_operator: + numeric_literal: '1' statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: parameter: '@I' binary_operator: - raw_comparison_operator: + - raw_comparison_operator: '=' numeric_literal: '2' statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: parameter: '@I' binary_operator: '-' numeric_literal: '3' statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: parameter: '@I' binary_operator: - raw_comparison_operator: '-' - raw_comparison_operator: '=' numeric_literal: '4' statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: parameter: '@I' binary_operator: '*' numeric_literal: '5' statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: parameter: '@I' binary_operator: - raw_comparison_operator: '*' - raw_comparison_operator: '=' numeric_literal: '6' statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: parameter: '@I' binary_operator: / numeric_literal: '7' statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: parameter: '@I' binary_operator: - raw_comparison_operator: / - raw_comparison_operator: '=' numeric_literal: '8' statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: parameter: '@I' binary_operator: '%' numeric_literal: '9' statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: parameter: '@I' binary_operator: - raw_comparison_operator: '%' - raw_comparison_operator: '=' numeric_literal: '10' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/begin_end.sql000066400000000000000000000001731503426445100234050ustar00rootroot00000000000000BEGIN SELECT 'Weekend'; END BEGIN; SELECT 'Weekend'; END; BEGIN; SELECT 'Weekend'; END BEGIN SELECT 'Weekend'; END; sqlfluff-3.4.2/test/fixtures/dialects/tsql/begin_end.yml000066400000000000000000000033071503426445100234110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ffbc0ecdf536b063d30d26fd5269c077bae06a66ef5d54682d67ba89b680e10f file: batch: - statement: begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'Weekend'" statement_terminator: ; - keyword: END - statement: begin_end_block: - keyword: BEGIN - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'Weekend'" statement_terminator: ; - keyword: END - statement_terminator: ; - statement: begin_end_block: - keyword: BEGIN - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'Weekend'" statement_terminator: ; - keyword: END - statement: begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'Weekend'" statement_terminator: ; - keyword: END - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/begin_end_nested.sql000066400000000000000000000000531503426445100247440ustar00rootroot00000000000000BEGIN BEGIN SELECT 'Weekend'; END END; sqlfluff-3.4.2/test/fixtures/dialects/tsql/begin_end_nested.yml000066400000000000000000000015341503426445100247530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7d185b989be2989cda4e3d79709a548d83029aeaba1963825f354fec67141687 file: batch: statement: begin_end_block: - keyword: BEGIN - statement: begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'Weekend'" statement_terminator: ; - keyword: END - keyword: END statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/begin_end_no_semicolon.sql000066400000000000000000000000251503426445100261450ustar00rootroot00000000000000BEGIN SELECT 1 END sqlfluff-3.4.2/test/fixtures/dialects/tsql/begin_end_no_semicolon.yml000066400000000000000000000012271503426445100261540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5fb241b3172412eea845ce528dfdcda03e3470d89a9411bacbacd61898db3b8f file: batch: statement: begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - keyword: END sqlfluff-3.4.2/test/fixtures/dialects/tsql/bulk_insert.sql000066400000000000000000000003741503426445100240170ustar00rootroot00000000000000-- Plain BULK insert BULK INSERT my_schema.my_table FROM 'data.csv'; -- BULK insert with options BULK INSERT my_schema.my_table FROM 'data.csv' WITH ( BATCHSIZE = 1024, CHECK_CONSTRAINTS, ORDER (col1 ASC, col2 DESC), FORMAT = 'CSV' ); sqlfluff-3.4.2/test/fixtures/dialects/tsql/bulk_insert.yml000066400000000000000000000034711503426445100240220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f59962f7301c0cb73e1f54786fce79b348cb48010870a5a23c83bb413f0c35ec file: batch: - statement: bulk_insert_statement: - keyword: BULK - keyword: INSERT - table_reference: - naked_identifier: my_schema - dot: . - naked_identifier: my_table - keyword: FROM - quoted_literal: "'data.csv'" - statement_terminator: ; - statement: bulk_insert_statement: - keyword: BULK - keyword: INSERT - table_reference: - naked_identifier: my_schema - dot: . - naked_identifier: my_table - keyword: FROM - quoted_literal: "'data.csv'" - bulk_insert_with_segment: keyword: WITH bracketed: - start_bracket: ( - keyword: BATCHSIZE - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1024' - comma: ',' - keyword: CHECK_CONSTRAINTS - comma: ',' - keyword: ORDER - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - keyword: ASC - comma: ',' - column_reference: naked_identifier: col2 - keyword: DESC - end_bracket: ) - comma: ',' - keyword: FORMAT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'CSV'" - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/cast_variable.sql000066400000000000000000000003721503426445100242730ustar00rootroot00000000000000DECLARE @DateNow date = ISNULL(Shared.GetESTDateTime(GETDATE()), GETDATE()) select enc.personid as personid, cast('1900-01-01' as datetime2(7)) as DataRefreshDate from encounter enc; declare @sample nvarchar(max) = cast(100 as nvarchar(max)) sqlfluff-3.4.2/test/fixtures/dialects/tsql/cast_variable.yml000066400000000000000000000104411503426445100242730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f0d6ed4515b8bc43701b4f6fbd3ae50b9b9f43470178a3f4f0f741439f7d0e4e file: batch: - statement: declare_segment: keyword: DECLARE parameter: '@DateNow' data_type: data_type_identifier: date comparison_operator: raw_comparison_operator: '=' expression: function: function_name: function_name_identifier: ISNULL function_contents: bracketed: - start_bracket: ( - expression: function: function_name: naked_identifier: Shared dot: . function_name_identifier: GetESTDateTime function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: GETDATE function_contents: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) - comma: ',' - expression: function: function_name: function_name_identifier: GETDATE function_contents: bracketed: start_bracket: ( end_bracket: ) - end_bracket: ) - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: enc - dot: . - naked_identifier: personid alias_expression: alias_operator: keyword: as naked_identifier: personid - comma: ',' - select_clause_element: function: function_name: keyword: cast function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'1900-01-01'" keyword: as data_type: data_type_identifier: datetime2 bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '7' end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: DataRefreshDate from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: encounter alias_expression: naked_identifier: enc statement_terminator: ; - statement: declare_segment: keyword: declare parameter: '@sample' data_type: data_type_identifier: nvarchar bracketed_arguments: bracketed: start_bracket: ( keyword: max end_bracket: ) comparison_operator: raw_comparison_operator: '=' expression: function: function_name: keyword: cast function_contents: bracketed: start_bracket: ( expression: numeric_literal: '100' keyword: as data_type: data_type_identifier: nvarchar bracketed_arguments: bracketed: start_bracket: ( keyword: max end_bracket: ) end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/tsql/collate.sql000066400000000000000000000007631503426445100231230ustar00rootroot00000000000000-- `COLLATE` in JOIN condition SELECT table1.col FROM table1 INNER JOIN table2 ON table1.col = table2.col COLLATE Latin1_GENERAL_CS_AS; SELECT table1.col FROM table1 INNER JOIN table2 ON table1.col COLLATE Latin1_GENERAL_CS_AS = table2.col; -- `COLLATE` in ORDER BY clause SELECT col FROM my_table ORDER BY col COLLATE Latin1_General_CS_AS_KS_WS DESC; -- `COLLATE` in SELECT SELECT col COLLATE Latin1_General_CS_AS_KS_WS FROM my_table; SELECT col COLLATE database_default FROM my_table; sqlfluff-3.4.2/test/fixtures/dialects/tsql/collate.yml000066400000000000000000000116271503426445100231260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: dca7d5478d7b46625258401d664c36b95e94cffc6a4e046d2f72e1dc54ce2b34 file: batch: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: table1 - dot: . - naked_identifier: col from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: table1 - dot: . - naked_identifier: col - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: table2 - dot: . - naked_identifier: col - keyword: COLLATE - collation_reference: naked_identifier: Latin1_GENERAL_CS_AS statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: table1 - dot: . - naked_identifier: col from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: table1 - dot: . - naked_identifier: col - keyword: COLLATE - collation_reference: naked_identifier: Latin1_GENERAL_CS_AS - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: table2 - dot: . - naked_identifier: col statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table orderby_clause: - keyword: ORDER - keyword: BY - expression: column_reference: naked_identifier: col keyword: COLLATE collation_reference: naked_identifier: Latin1_General_CS_AS_KS_WS - keyword: DESC statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: column_reference: naked_identifier: col keyword: COLLATE collation_reference: naked_identifier: Latin1_General_CS_AS_KS_WS from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: column_reference: naked_identifier: col keyword: COLLATE collation_reference: naked_identifier: database_default from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/comment_blocks.sql000066400000000000000000000007421503426445100244740ustar00rootroot00000000000000/* birds /* live /* in /* nests */ */ */fdsfdlsjf */ /* although /* so /**/ do */ wasps */ SELECT foo /*nest/*/*nest/*/*/* nest nest /* */*/*/*/nest*/*/*/ FROM bar -- A recursive block comment (fun patternwise - no actual recursion going on ofc) /* A block comment looks like /* A block comment looks like /* A block comment looks like /* A block comment looks like /* A block comment looks like /* ... */ */ */ */ */ */ -- Test cases from #2086. /** **/ /** ( **/ /** ' **/ sqlfluff-3.4.2/test/fixtures/dialects/tsql/comment_blocks.yml000066400000000000000000000014441503426445100244760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fcb82f7553b7e5c50a26a1af66d146eed4778441a2332740b9f8388837cc56d9 file: batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: foo from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar sqlfluff-3.4.2/test/fixtures/dialects/tsql/convert.sql000066400000000000000000000001661503426445100231550ustar00rootroot00000000000000SELECT CONVERT(nvarchar(100), first_column) as first, TRY_CONVERT(float, second_column) as second FROM some_table sqlfluff-3.4.2/test/fixtures/dialects/tsql/convert.yml000066400000000000000000000042011503426445100231510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a66102525efeec69422f37346be8e4f9742506e60d7c1cb30bd79d8b565e7614 file: batch: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: keyword: CONVERT function_contents: bracketed: start_bracket: ( data_type: data_type_identifier: nvarchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) comma: ',' expression: column_reference: naked_identifier: first_column end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: first - comma: ',' - select_clause_element: function: function_name: keyword: TRY_CONVERT function_contents: bracketed: start_bracket: ( data_type: data_type_identifier: float comma: ',' expression: column_reference: naked_identifier: second_column end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: second from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: some_table sqlfluff-3.4.2/test/fixtures/dialects/tsql/copy.sql000066400000000000000000000035071503426445100224510ustar00rootroot00000000000000COPY INTO dbo.[lineitem] FROM 'https://unsecureaccount.blob.core.windows.net/customerdatasets/folder1/lineitem.csv'; COPY INTO test_1 FROM 'https://myaccount.blob.core.windows.net/myblobcontainer/folder1/' WITH ( FILE_TYPE = 'CSV', CREDENTIAL=(IDENTITY= 'Shared Access Signature', SECRET=''), --CREDENTIAL should look something like this: --CREDENTIAL=(IDENTITY= 'Shared Access Signature', SECRET='?sv=2018-03-28&ss=bfqt&srt=sco&sp=rl&st=2016-10-17T20%3A14%3A55Z&se=2021-10-18T20%3A19%3A00Z&sig=IEoOdmeYnE9%2FKiJDSHFSYsz4AkNa%2F%2BTx61FuQ%2FfKHefqoBE%3D'), FIELDQUOTE = '"', FIELDTERMINATOR=';', ROWTERMINATOR='0X0A', ENCODING = 'UTF8', DATEFORMAT = 'ymd', MAXERRORS = 10, ERRORFILE = '/errorsfolder',--path starting from the storage container IDENTITY_INSERT = 'ON' ); COPY INTO test_parquet FROM 'https://myaccount.blob.core.windows.net/myblobcontainer/folder1/*.parquet' WITH ( FILE_FORMAT = myFileFormat, CREDENTIAL=(IDENTITY= 'Shared Access Signature', SECRET='') ); COPY INTO t1 FROM 'https://myaccount.blob.core.windows.net/myblobcontainer/folder0/*.txt', 'https://myaccount.blob.core.windows.net/myblobcontainer/folder1' WITH ( FILE_TYPE = 'CSV', CREDENTIAL=(IDENTITY= '@',SECRET=''), FIELDTERMINATOR = '|' ); COPY INTO dbo.myCOPYDemoTable FROM 'https://myaccount.blob.core.windows.net/myblobcontainer/folder0/*.txt' WITH ( FILE_TYPE = 'CSV', CREDENTIAL = (IDENTITY = 'Managed Identity'), FIELDQUOTE = '"', FIELDTERMINATOR=',' ); COPY INTO [myCOPYDemoTable] FROM 'https://myaccount.blob.core.windows.net/customerdatasets/folder1/lineitem.parquet' WITH ( FILE_TYPE = 'Parquet', CREDENTIAL = ( IDENTITY = 'Shared Access Signature', SECRET=''), AUTO_CREATE_TABLE = 'ON' ); sqlfluff-3.4.2/test/fixtures/dialects/tsql/copy.yml000066400000000000000000000217701503426445100224550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e8e2bfb3a926d3f3d928cea74eec4915018efae08706b81043eecd3ea8f148bd file: batch: - statement: copy_into_table_statement: - keyword: COPY - keyword: INTO - table_reference: naked_identifier: dbo dot: . quoted_identifier: '[lineitem]' - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: storage_location: external_location: "'https://unsecureaccount.blob.core.windows.net/customerdatasets/folder1/lineitem.csv'" statement_terminator: ; - statement: copy_into_table_statement: - keyword: COPY - keyword: INTO - table_reference: naked_identifier: test_1 - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: storage_location: external_location: "'https://myaccount.blob.core.windows.net/myblobcontainer/folder1/'" - keyword: WITH - bracketed: - start_bracket: ( - keyword: FILE_TYPE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'CSV'" - comma: ',' - keyword: CREDENTIAL - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: IDENTITY - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'Shared Access Signature'" - comma: ',' - keyword: SECRET - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - end_bracket: ) - comma: ',' - keyword: FIELDQUOTE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'\"'" - comma: ',' - keyword: FIELDTERMINATOR - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "';'" - comma: ',' - keyword: ROWTERMINATOR - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'0X0A'" - comma: ',' - keyword: ENCODING - comparison_operator: raw_comparison_operator: '=' - file_encoding: "'UTF8'" - comma: ',' - keyword: DATEFORMAT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'ymd'" - comma: ',' - keyword: MAXERRORS - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '10' - comma: ',' - keyword: ERRORFILE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'/errorsfolder'" - comma: ',' - keyword: IDENTITY_INSERT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'ON'" - end_bracket: ) - statement_terminator: ; - statement: copy_into_table_statement: - keyword: COPY - keyword: INTO - table_reference: naked_identifier: test_parquet - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: storage_location: external_location: "'https://myaccount.blob.core.windows.net/myblobcontainer/folder1/*.parquet'" - keyword: WITH - bracketed: - start_bracket: ( - keyword: FILE_FORMAT - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: myFileFormat - comma: ',' - keyword: CREDENTIAL - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: IDENTITY - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'Shared Access Signature'" - comma: ',' - keyword: SECRET - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: copy_into_table_statement: - keyword: COPY - keyword: INTO - table_reference: naked_identifier: t1 - from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: storage_location: external_location: "'https://myaccount.blob.core.windows.net/myblobcontainer/folder0/*.txt'" - comma: ',' - from_expression: from_expression_element: table_expression: storage_location: external_location: "'https://myaccount.blob.core.windows.net/myblobcontainer/folder1'" - keyword: WITH - bracketed: - start_bracket: ( - keyword: FILE_TYPE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'CSV'" - comma: ',' - keyword: CREDENTIAL - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: IDENTITY - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'@'" - comma: ',' - keyword: SECRET - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - end_bracket: ) - comma: ',' - keyword: FIELDTERMINATOR - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'|'" - end_bracket: ) - statement_terminator: ; - statement: copy_into_table_statement: - keyword: COPY - keyword: INTO - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: myCOPYDemoTable - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: storage_location: external_location: "'https://myaccount.blob.core.windows.net/myblobcontainer/folder0/*.txt'" - keyword: WITH - bracketed: - start_bracket: ( - keyword: FILE_TYPE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'CSV'" - comma: ',' - keyword: CREDENTIAL - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( keyword: IDENTITY comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Managed Identity'" end_bracket: ) - comma: ',' - keyword: FIELDQUOTE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'\"'" - comma: ',' - keyword: FIELDTERMINATOR - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "','" - end_bracket: ) - statement_terminator: ; - statement: copy_into_table_statement: - keyword: COPY - keyword: INTO - table_reference: quoted_identifier: '[myCOPYDemoTable]' - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: storage_location: external_location: "'https://myaccount.blob.core.windows.net/customerdatasets/folder1/lineitem.parquet'" - keyword: WITH - bracketed: - start_bracket: ( - keyword: FILE_TYPE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'Parquet'" - comma: ',' - keyword: CREDENTIAL - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: IDENTITY - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'Shared Access Signature'" - comma: ',' - keyword: SECRET - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - end_bracket: ) - comma: ',' - keyword: AUTO_CREATE_TABLE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'ON'" - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_columnstore_index.sql000066400000000000000000000013721503426445100265610ustar00rootroot00000000000000CREATE CLUSTERED COLUMNSTORE INDEX cci ON dbo.Orders WITH (MAXDOP = 2); CREATE CLUSTERED COLUMNSTORE INDEX cci_comp ON dbo.Compression WITH ( COMPRESSION_DELAY = 10 MINUTES ); CREATE CLUSTERED COLUMNSTORE INDEX cci_data ON dbo.DataColumns WITH ( DATA_COMPRESSION = COLUMNSTORE_ARCHIVE ); CREATE CLUSTERED COLUMNSTORE INDEX cci_online ON dbo.OnlineTable WITH ( ONLINE = ON ); CREATE NONCLUSTERED COLUMNSTORE INDEX ncci ON dbo.Orders (StockItemID, Quantity, UnitPrice, TaxRate) WITH ( ONLINE = ON ); CREATE COLUMNSTORE INDEX ncci2 ON dbo.Orders2 (StockItemID, Quantity, UnitPrice, TaxRate) WITH ( ONLINE = ON ); CREATE NONCLUSTERED COLUMNSTORE INDEX FIBillOfMaterialsWithEndDate ON dbo.Materials (ComponentID, StartDate) WHERE EndDate IS NOT NULL; sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_columnstore_index.yml000066400000000000000000000135401503426445100265630ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4e0508f9f5cf5774f0e80700de25b87366f6c2cddc77a309e1a0f29b429fdfb8 file: batch: - statement: create_columnstore_index_statement: - keyword: CREATE - keyword: CLUSTERED - keyword: COLUMNSTORE - keyword: INDEX - index_reference: naked_identifier: cci - keyword: 'ON' - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Orders - keyword: WITH - bracketed: start_bracket: ( keyword: MAXDOP comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' end_bracket: ) - statement_terminator: ; - statement: create_columnstore_index_statement: - keyword: CREATE - keyword: CLUSTERED - keyword: COLUMNSTORE - keyword: INDEX - index_reference: naked_identifier: cci_comp - keyword: 'ON' - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Compression - keyword: WITH - bracketed: - start_bracket: ( - keyword: COMPRESSION_DELAY - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '10' - keyword: MINUTES - end_bracket: ) - statement_terminator: ; - statement: create_columnstore_index_statement: - keyword: CREATE - keyword: CLUSTERED - keyword: COLUMNSTORE - keyword: INDEX - index_reference: naked_identifier: cci_data - keyword: 'ON' - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: DataColumns - keyword: WITH - bracketed: - start_bracket: ( - keyword: DATA_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - keyword: COLUMNSTORE_ARCHIVE - end_bracket: ) - statement_terminator: ; - statement: create_columnstore_index_statement: - keyword: CREATE - keyword: CLUSTERED - keyword: COLUMNSTORE - keyword: INDEX - index_reference: naked_identifier: cci_online - keyword: 'ON' - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: OnlineTable - keyword: WITH - bracketed: - start_bracket: ( - keyword: ONLINE - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) - statement_terminator: ; - statement: create_columnstore_index_statement: - keyword: CREATE - keyword: NONCLUSTERED - keyword: COLUMNSTORE - keyword: INDEX - index_reference: naked_identifier: ncci - keyword: 'ON' - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Orders - bracketed_index_column_list_grammar: bracketed: - start_bracket: ( - index_column_definition: naked_identifier: StockItemID - comma: ',' - index_column_definition: naked_identifier: Quantity - comma: ',' - index_column_definition: naked_identifier: UnitPrice - comma: ',' - index_column_definition: naked_identifier: TaxRate - end_bracket: ) - keyword: WITH - bracketed: - start_bracket: ( - keyword: ONLINE - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) - statement_terminator: ; - statement: create_columnstore_index_statement: - keyword: CREATE - keyword: COLUMNSTORE - keyword: INDEX - index_reference: naked_identifier: ncci2 - keyword: 'ON' - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Orders2 - bracketed_index_column_list_grammar: bracketed: - start_bracket: ( - index_column_definition: naked_identifier: StockItemID - comma: ',' - index_column_definition: naked_identifier: Quantity - comma: ',' - index_column_definition: naked_identifier: UnitPrice - comma: ',' - index_column_definition: naked_identifier: TaxRate - end_bracket: ) - keyword: WITH - bracketed: - start_bracket: ( - keyword: ONLINE - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) - statement_terminator: ; - statement: create_columnstore_index_statement: - keyword: CREATE - keyword: NONCLUSTERED - keyword: COLUMNSTORE - keyword: INDEX - index_reference: naked_identifier: FIBillOfMaterialsWithEndDate - keyword: 'ON' - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Materials - bracketed_index_column_list_grammar: bracketed: - start_bracket: ( - index_column_definition: naked_identifier: ComponentID - comma: ',' - index_column_definition: naked_identifier: StartDate - end_bracket: ) - where_clause: keyword: WHERE expression: - column_reference: naked_identifier: EndDate - keyword: IS - keyword: NOT - null_literal: 'NULL' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_database_scoped_credential.sql000066400000000000000000000002611503426445100303070ustar00rootroot00000000000000CREATE DATABASE SCOPED CREDENTIAL AppCred WITH IDENTITY = 'Mary5'; CREATE DATABASE SCOPED CREDENTIAL AppCred WITH IDENTITY = 'Mary5', SECRET = ''; sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_database_scoped_credential.yml000066400000000000000000000025501503426445100303140ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 406c9a33536dc1357f7ee5d759e330bee8265e502bc14e3e438251a445d91276 file: batch: - statement: create_database_scoped_credential_statement: - keyword: CREATE - keyword: DATABASE - keyword: SCOPED - keyword: CREDENTIAL - object_reference: naked_identifier: AppCred - keyword: WITH - keyword: IDENTITY - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'Mary5'" - statement_terminator: ; - statement: create_database_scoped_credential_statement: - keyword: CREATE - keyword: DATABASE - keyword: SCOPED - keyword: CREDENTIAL - object_reference: naked_identifier: AppCred - keyword: WITH - keyword: IDENTITY - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'Mary5'" - comma: ',' - keyword: SECRET - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_external_data_source.sql000066400000000000000000000010551503426445100272110ustar00rootroot00000000000000CREATE EXTERNAL DATA SOURCE MyOracleServer WITH ( LOCATION = 'oracle://145.145.145.145:1521', CREDENTIAL = OracleProxyAccount, PUSHDOWN = ON ); CREATE EXTERNAL DATA SOURCE [OracleSalesSrvr] WITH ( LOCATION = 'oracle://145.145.145.145:1521', CONNECTION_OPTIONS = 'ImpersonateUser=%CURRENT_USER', CREDENTIAL = [OracleProxyCredential] ); CREATE EXTERNAL DATA SOURCE [external_data_source_name] WITH ( LOCATION = N'oracle://XE', CREDENTIAL = [OracleCredentialTest], CONNECTION_OPTIONS = N'TNSNamesFile=C:\Temp\tnsnames.ora;ServerName=XE' ); sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_external_data_source.yml000066400000000000000000000062141503426445100272150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1130bd8d6d273d4f49c52a4eb01877f3cb15d60c58084d81ef83729d23d9de3c file: batch: - statement: create_external_data_source_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: DATA - keyword: SOURCE - object_reference: naked_identifier: MyOracleServer - keyword: WITH - bracketed: - start_bracket: ( - table_location_clause: keyword: LOCATION comparison_operator: raw_comparison_operator: '=' quoted_literal: "'oracle://145.145.145.145:1521'" - comma: ',' - keyword: CREDENTIAL - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: OracleProxyAccount - comma: ',' - keyword: PUSHDOWN - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) - statement_terminator: ; - statement: create_external_data_source_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: DATA - keyword: SOURCE - object_reference: quoted_identifier: '[OracleSalesSrvr]' - keyword: WITH - bracketed: - start_bracket: ( - table_location_clause: keyword: LOCATION comparison_operator: raw_comparison_operator: '=' quoted_literal: "'oracle://145.145.145.145:1521'" - comma: ',' - keyword: CONNECTION_OPTIONS - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'ImpersonateUser=%CURRENT_USER'" - comma: ',' - keyword: CREDENTIAL - comparison_operator: raw_comparison_operator: '=' - object_reference: quoted_identifier: '[OracleProxyCredential]' - end_bracket: ) - statement_terminator: ; - statement: create_external_data_source_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: DATA - keyword: SOURCE - object_reference: quoted_identifier: '[external_data_source_name]' - keyword: WITH - bracketed: - start_bracket: ( - table_location_clause: keyword: LOCATION comparison_operator: raw_comparison_operator: '=' quoted_literal: "N'oracle://XE'" - comma: ',' - keyword: CREDENTIAL - comparison_operator: raw_comparison_operator: '=' - object_reference: quoted_identifier: '[OracleCredentialTest]' - comma: ',' - keyword: CONNECTION_OPTIONS - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "N'TNSNamesFile=C:\\Temp\\tnsnames.ora;ServerName=XE'" - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_external_file_format.sql000066400000000000000000000025341503426445100272120ustar00rootroot00000000000000/* https://learn.microsoft.com/en-us/sql/t-sql/statements/create-external-file-format-transact-sql?view=sql-server-ver16&tabs=delimited#examples */ CREATE EXTERNAL FILE FORMAT textdelimited1 WITH ( FORMAT_TYPE = DELIMITEDTEXT, FORMAT_OPTIONS ( FIELD_TERMINATOR = '|', DATE_FORMAT = 'MM/dd/yyyy' ), DATA_COMPRESSION = 'org.apache.hadoop.io.compress.GzipCodec' ); CREATE EXTERNAL FILE FORMAT skipHeader_CSV WITH ( FORMAT_TYPE = DELIMITEDTEXT, FORMAT_OPTIONS ( FIELD_TERMINATOR = ',', STRING_DELIMITER = '"', FIRST_ROW = 2, USE_TYPE_DEFAULT = True ) ); CREATE EXTERNAL FILE FORMAT [rcfile1] WITH ( FORMAT_TYPE = RCFILE, SERDE_METHOD = 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe', DATA_COMPRESSION = 'org.apache.hadoop.io.compress.DefaultCodec' ); CREATE EXTERNAL FILE FORMAT orcfile1 WITH ( FORMAT_TYPE = ORC, DATA_COMPRESSION = 'org.apache.hadoop.io.compress.SnappyCodec' ); CREATE EXTERNAL FILE FORMAT parquetfile1 WITH ( FORMAT_TYPE = PARQUET, DATA_COMPRESSION = 'org.apache.hadoop.io.compress.SnappyCodec' ); CREATE EXTERNAL FILE FORMAT jsonFileFormat WITH ( FORMAT_TYPE = JSON, DATA_COMPRESSION = 'org.apache.hadoop.io.compress.SnappyCodec' ); CREATE EXTERNAL FILE FORMAT DeltaFileFormat WITH ( FORMAT_TYPE = DELTA ); sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_external_file_format.yml000066400000000000000000000156071503426445100272210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c72f9b067f1e19be5b1a471617fccccd19cfb32df352e2ee477ef63d3c48572b file: batch: - statement: create_external_file_format: - keyword: CREATE - keyword: EXTERNAL - keyword: FILE - keyword: FORMAT - object_reference: naked_identifier: textdelimited1 - keyword: WITH - bracketed: start_bracket: ( external_file_delimited_text_clause: - keyword: FORMAT_TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: DELIMITEDTEXT - comma: ',' - keyword: FORMAT_OPTIONS - bracketed: - start_bracket: ( - external_file_delimited_text_format_options_clause: keyword: FIELD_TERMINATOR comparison_operator: raw_comparison_operator: '=' quoted_literal: "'|'" - comma: ',' - external_file_delimited_text_format_options_clause: keyword: DATE_FORMAT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'MM/dd/yyyy'" - end_bracket: ) - comma: ',' - keyword: DATA_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - file_compression: "'org.apache.hadoop.io.compress.GzipCodec'" end_bracket: ) - statement_terminator: ; - statement: create_external_file_format: - keyword: CREATE - keyword: EXTERNAL - keyword: FILE - keyword: FORMAT - object_reference: naked_identifier: skipHeader_CSV - keyword: WITH - bracketed: start_bracket: ( external_file_delimited_text_clause: - keyword: FORMAT_TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: DELIMITEDTEXT - comma: ',' - keyword: FORMAT_OPTIONS - bracketed: - start_bracket: ( - external_file_delimited_text_format_options_clause: keyword: FIELD_TERMINATOR comparison_operator: raw_comparison_operator: '=' quoted_literal: "','" - comma: ',' - external_file_delimited_text_format_options_clause: keyword: STRING_DELIMITER comparison_operator: raw_comparison_operator: '=' quoted_literal: "'\"'" - comma: ',' - external_file_delimited_text_format_options_clause: keyword: FIRST_ROW comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - comma: ',' - external_file_delimited_text_format_options_clause: keyword: USE_TYPE_DEFAULT comparison_operator: raw_comparison_operator: '=' boolean_literal: 'True' - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_external_file_format: - keyword: CREATE - keyword: EXTERNAL - keyword: FILE - keyword: FORMAT - object_reference: quoted_identifier: '[rcfile1]' - keyword: WITH - bracketed: start_bracket: ( external_file_rc_clause: - keyword: FORMAT_TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: RCFILE - comma: ',' - keyword: SERDE_METHOD - comparison_operator: raw_comparison_operator: '=' - serde_method: "'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe'" - comma: ',' - keyword: DATA_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - file_compression: "'org.apache.hadoop.io.compress.DefaultCodec'" end_bracket: ) - statement_terminator: ; - statement: create_external_file_format: - keyword: CREATE - keyword: EXTERNAL - keyword: FILE - keyword: FORMAT - object_reference: naked_identifier: orcfile1 - keyword: WITH - bracketed: start_bracket: ( external_file_orc_clause: - keyword: FORMAT_TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: ORC - comma: ',' - keyword: DATA_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - file_compression: "'org.apache.hadoop.io.compress.SnappyCodec'" end_bracket: ) - statement_terminator: ; - statement: create_external_file_format: - keyword: CREATE - keyword: EXTERNAL - keyword: FILE - keyword: FORMAT - object_reference: naked_identifier: parquetfile1 - keyword: WITH - bracketed: start_bracket: ( external_file_parquet_clause: - keyword: FORMAT_TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: PARQUET - comma: ',' - keyword: DATA_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - file_compression: "'org.apache.hadoop.io.compress.SnappyCodec'" end_bracket: ) - statement_terminator: ; - statement: create_external_file_format: - keyword: CREATE - keyword: EXTERNAL - keyword: FILE - keyword: FORMAT - object_reference: naked_identifier: jsonFileFormat - keyword: WITH - bracketed: start_bracket: ( external_file_json_clause: - keyword: FORMAT_TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: JSON - comma: ',' - keyword: DATA_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - file_compression: "'org.apache.hadoop.io.compress.SnappyCodec'" end_bracket: ) - statement_terminator: ; - statement: create_external_file_format: - keyword: CREATE - keyword: EXTERNAL - keyword: FILE - keyword: FORMAT - object_reference: naked_identifier: DeltaFileFormat - keyword: WITH - bracketed: start_bracket: ( external_file_delta_clause: - keyword: FORMAT_TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: DELTA end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_external_table.sql000066400000000000000000000025071503426445100260120ustar00rootroot00000000000000CREATE EXTERNAL TABLE schema_name.table_name ( column_name_1 VARCHAR(50), column_name_2 VARCHAR(50) NULL, column_name_3 VARCHAR(50) NOT NULL ) WITH ( LOCATION = N'/path/to/folder/', DATA_SOURCE = external_data_source, FILE_FORMAT = parquetfileformat, REJECT_TYPE = VALUE, REJECT_VALUE = 0, REJECTED_ROW_LOCATION = '/REJECT_Directory' ) CREATE EXTERNAL TABLE schema_name.table_name ( column_name_1 VARCHAR(50), column_name_2 VARCHAR(50) NULL, column_name_3 VARCHAR(50) NOT NULL ) WITH ( LOCATION = N'/path/to/folder/', DATA_SOURCE = external_data_source, FILE_FORMAT = parquetfileformat, REJECT_TYPE = PERCENTAGE, REJECT_VALUE = 0, REJECT_SAMPLE_VALUE = 0, REJECTED_ROW_LOCATION = '/REJECT_DIRECTORY' ) CREATE EXTERNAL TABLE customers ( o_orderkey DECIMAL(38) NOT NULL, o_custkey DECIMAL(38) NOT NULL, o_orderstatus CHAR COLLATE latin1_general_bin NOT NULL, o_totalprice DECIMAL(15, 2) NOT NULL, o_orderdate DATETIME2(0) NOT NULL, o_orderpriority CHAR(15) COLLATE latin1_general_bin NOT NULL, o_clerk CHAR(15) COLLATE latin1_general_bin NOT NULL, o_shippriority DECIMAL(38) NOT NULL, o_comment VARCHAR(79) COLLATE latin1_general_bin NOT NULL ) WITH ( LOCATION = 'DB1.mySchema.customer', DATA_SOURCE = external_data_source_name ); sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_external_table.yml000066400000000000000000000262341503426445100260170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0011b4bff405995cd8650f894336a6ccc8fe336b1989056543054c7b8a7628d2 file: batch: - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - object_reference: - naked_identifier: schema_name - dot: . - naked_identifier: table_name - bracketed: - start_bracket: ( - column_definition: naked_identifier: column_name_1 data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '50' end_bracket: ) - comma: ',' - column_definition: naked_identifier: column_name_2 data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '50' end_bracket: ) column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: column_name_3 data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '50' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - end_bracket: ) - keyword: WITH - bracketed: - start_bracket: ( - table_location_clause: keyword: LOCATION comparison_operator: raw_comparison_operator: '=' quoted_literal: "N'/path/to/folder/'" - comma: ',' - keyword: DATA_SOURCE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: external_data_source - comma: ',' - keyword: FILE_FORMAT - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: parquetfileformat - comma: ',' - keyword: REJECT_TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: VALUE - comma: ',' - keyword: REJECT_VALUE - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '0' - comma: ',' - keyword: REJECTED_ROW_LOCATION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'/REJECT_Directory'" - end_bracket: ) - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - object_reference: - naked_identifier: schema_name - dot: . - naked_identifier: table_name - bracketed: - start_bracket: ( - column_definition: naked_identifier: column_name_1 data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '50' end_bracket: ) - comma: ',' - column_definition: naked_identifier: column_name_2 data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '50' end_bracket: ) column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: column_name_3 data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '50' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - end_bracket: ) - keyword: WITH - bracketed: - start_bracket: ( - table_location_clause: keyword: LOCATION comparison_operator: raw_comparison_operator: '=' quoted_literal: "N'/path/to/folder/'" - comma: ',' - keyword: DATA_SOURCE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: external_data_source - comma: ',' - keyword: FILE_FORMAT - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: parquetfileformat - comma: ',' - keyword: REJECT_TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: PERCENTAGE - comma: ',' - keyword: REJECT_VALUE - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '0' - comma: ',' - keyword: REJECT_SAMPLE_VALUE - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '0' - comma: ',' - keyword: REJECTED_ROW_LOCATION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'/REJECT_DIRECTORY'" - end_bracket: ) - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - object_reference: naked_identifier: customers - bracketed: - start_bracket: ( - column_definition: naked_identifier: o_orderkey data_type: data_type_identifier: DECIMAL bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '38' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: o_custkey data_type: data_type_identifier: DECIMAL bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '38' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: - naked_identifier: o_orderstatus - data_type: data_type_identifier: CHAR - column_constraint_segment: keyword: COLLATE collation_reference: naked_identifier: latin1_general_bin - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: o_totalprice data_type: data_type_identifier: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '15' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: o_orderdate data_type: data_type_identifier: DATETIME2 bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '0' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: - naked_identifier: o_orderpriority - data_type: data_type_identifier: CHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '15' end_bracket: ) - column_constraint_segment: keyword: COLLATE collation_reference: naked_identifier: latin1_general_bin - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: - naked_identifier: o_clerk - data_type: data_type_identifier: CHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '15' end_bracket: ) - column_constraint_segment: keyword: COLLATE collation_reference: naked_identifier: latin1_general_bin - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: o_shippriority data_type: data_type_identifier: DECIMAL bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '38' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: - naked_identifier: o_comment - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '79' end_bracket: ) - column_constraint_segment: keyword: COLLATE collation_reference: naked_identifier: latin1_general_bin - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - end_bracket: ) - keyword: WITH - bracketed: start_bracket: ( table_location_clause: keyword: LOCATION comparison_operator: raw_comparison_operator: '=' quoted_literal: "'DB1.mySchema.customer'" comma: ',' keyword: DATA_SOURCE comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: external_data_source_name end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_fulltext_index.sql000066400000000000000000000070641503426445100260620ustar00rootroot00000000000000CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062, [test] STATISTICAL_SEMANTICS ) KEY INDEX [KEY_INDEX]; CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 'french' ) KEY INDEX [KEY_INDEX]; CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] STATISTICAL_SEMANTICS ) KEY INDEX [KEY_INDEX]; CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE ) KEY INDEX [KEY_INDEX]; -- catalog_filegroup_options CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062 ) KEY INDEX [PK_IDENTIFIER] ON [ft_catalog_name]; CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062 ) KEY INDEX [PK_IDENTIFIER] ON [ft_catalog_name], FILEGROUP [filegroup_name]; CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062 ) KEY INDEX [PK_IDENTIFIER] ON FILEGROUP [filegroup_name], [ft_catalog_name],; CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062 ) KEY INDEX [PK_IDENTIFIER] ON FILEGROUP [filegroup_name]; -- change_tracking (MANUAL | AUTO | OFF | OFF, NO POPULATION) CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062 ) KEY INDEX [PK_IDENTIFIER] ON [ft_catalog_name] WITH ( CHANGE_TRACKING MANUAL ); CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062 ) KEY INDEX [PK_IDENTIFIER] ON [ft_catalog_name] WITH ( CHANGE_TRACKING = MANUAL ); CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062 ) KEY INDEX [PK_IDENTIFIER] ON [ft_catalog_name] WITH ( CHANGE_TRACKING AUTO ); CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062 ) KEY INDEX [PK_IDENTIFIER] ON [ft_catalog_name] WITH ( CHANGE_TRACKING = AUTO ); CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062 ) KEY INDEX [PK_IDENTIFIER] WITH ( CHANGE_TRACKING OFF ); CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062 ) KEY INDEX [PK_IDENTIFIER] WITH ( CHANGE_TRACKING = OFF ); CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062 ) KEY INDEX [PK_IDENTIFIER] WITH ( CHANGE_TRACKING OFF, NO POPULATION ); CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062 ) KEY INDEX [PK_IDENTIFIER] WITH ( CHANGE_TRACKING = OFF, NO POPULATION ); -- stoplist (OFF | SYSTEM | stoplist_name) CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062 ) KEY INDEX [PK_IDENTIFIER] WITH ( STOPLIST OFF ); CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062 ) KEY INDEX [PK_IDENTIFIER] WITH ( STOPLIST = OFF ); CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062 ) KEY INDEX [PK_IDENTIFIER] WITH ( STOPLIST SYSTEM ); CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062 ) KEY INDEX [PK_IDENTIFIER] WITH ( STOPLIST = SYSTEM ); CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062 ) KEY INDEX [PK_IDENTIFIER] WITH ( STOPLIST [custom_stoplist_name] ); CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062 ) KEY INDEX [PK_IDENTIFIER] WITH ( STOPLIST = [custom_stoplist_name] ); -- search property list CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062 ) KEY INDEX [PK_IDENTIFIER] WITH ( SEARCH PROPERTY LIST [property_list_name] ); CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062 ) KEY INDEX [PK_IDENTIFIER] WITH ( SEARCH PROPERTY LIST = [property_list_name] ); sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_fulltext_index.yml000066400000000000000000000470121503426445100260610ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e3646996d80b4d049d753038f31a94a15947ef7b41600bc21f6ec6e5b41697b2 file: batch: - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: - start_bracket: ( - column_reference: quoted_identifier: '[id]' - keyword: LANGUAGE - numeric_literal: '1062' - comma: ',' - column_reference: quoted_identifier: '[test]' - keyword: STATISTICAL_SEMANTICS - end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[KEY_INDEX]' - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE quoted_literal: "'french'" end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[KEY_INDEX]' - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: STATISTICAL_SEMANTICS end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[KEY_INDEX]' - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[KEY_INDEX]' - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE numeric_literal: '1062' end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[PK_IDENTIFIER]' - keyword: 'ON' - object_reference: quoted_identifier: '[ft_catalog_name]' - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE numeric_literal: '1062' end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[PK_IDENTIFIER]' - keyword: 'ON' - object_reference: quoted_identifier: '[ft_catalog_name]' - comma: ',' - keyword: FILEGROUP - object_reference: quoted_identifier: '[filegroup_name]' - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE numeric_literal: '1062' end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[PK_IDENTIFIER]' - keyword: 'ON' - keyword: FILEGROUP - object_reference: quoted_identifier: '[filegroup_name]' - comma: ',' - object_reference: quoted_identifier: '[ft_catalog_name]' - comma: ',' - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE numeric_literal: '1062' end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[PK_IDENTIFIER]' - keyword: 'ON' - keyword: FILEGROUP - object_reference: quoted_identifier: '[filegroup_name]' - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE numeric_literal: '1062' end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[PK_IDENTIFIER]' - keyword: 'ON' - object_reference: quoted_identifier: '[ft_catalog_name]' - keyword: WITH - bracketed: - start_bracket: ( - keyword: CHANGE_TRACKING - keyword: MANUAL - end_bracket: ) - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE numeric_literal: '1062' end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[PK_IDENTIFIER]' - keyword: 'ON' - object_reference: quoted_identifier: '[ft_catalog_name]' - keyword: WITH - bracketed: - start_bracket: ( - keyword: CHANGE_TRACKING - comparison_operator: raw_comparison_operator: '=' - keyword: MANUAL - end_bracket: ) - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE numeric_literal: '1062' end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[PK_IDENTIFIER]' - keyword: 'ON' - object_reference: quoted_identifier: '[ft_catalog_name]' - keyword: WITH - bracketed: - start_bracket: ( - keyword: CHANGE_TRACKING - keyword: AUTO - end_bracket: ) - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE numeric_literal: '1062' end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[PK_IDENTIFIER]' - keyword: 'ON' - object_reference: quoted_identifier: '[ft_catalog_name]' - keyword: WITH - bracketed: - start_bracket: ( - keyword: CHANGE_TRACKING - comparison_operator: raw_comparison_operator: '=' - keyword: AUTO - end_bracket: ) - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE numeric_literal: '1062' end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[PK_IDENTIFIER]' - keyword: WITH - bracketed: - start_bracket: ( - keyword: CHANGE_TRACKING - keyword: 'OFF' - end_bracket: ) - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE numeric_literal: '1062' end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[PK_IDENTIFIER]' - keyword: WITH - bracketed: - start_bracket: ( - keyword: CHANGE_TRACKING - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - end_bracket: ) - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE numeric_literal: '1062' end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[PK_IDENTIFIER]' - keyword: WITH - bracketed: - start_bracket: ( - keyword: CHANGE_TRACKING - keyword: 'OFF' - comma: ',' - keyword: 'NO' - keyword: POPULATION - end_bracket: ) - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE numeric_literal: '1062' end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[PK_IDENTIFIER]' - keyword: WITH - bracketed: - start_bracket: ( - keyword: CHANGE_TRACKING - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - comma: ',' - keyword: 'NO' - keyword: POPULATION - end_bracket: ) - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE numeric_literal: '1062' end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[PK_IDENTIFIER]' - keyword: WITH - bracketed: - start_bracket: ( - keyword: STOPLIST - keyword: 'OFF' - end_bracket: ) - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE numeric_literal: '1062' end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[PK_IDENTIFIER]' - keyword: WITH - bracketed: - start_bracket: ( - keyword: STOPLIST - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - end_bracket: ) - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE numeric_literal: '1062' end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[PK_IDENTIFIER]' - keyword: WITH - bracketed: - start_bracket: ( - keyword: STOPLIST - keyword: SYSTEM - end_bracket: ) - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE numeric_literal: '1062' end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[PK_IDENTIFIER]' - keyword: WITH - bracketed: - start_bracket: ( - keyword: STOPLIST - comparison_operator: raw_comparison_operator: '=' - keyword: SYSTEM - end_bracket: ) - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE numeric_literal: '1062' end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[PK_IDENTIFIER]' - keyword: WITH - bracketed: start_bracket: ( keyword: STOPLIST object_reference: quoted_identifier: '[custom_stoplist_name]' end_bracket: ) - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE numeric_literal: '1062' end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[PK_IDENTIFIER]' - keyword: WITH - bracketed: start_bracket: ( keyword: STOPLIST comparison_operator: raw_comparison_operator: '=' object_reference: quoted_identifier: '[custom_stoplist_name]' end_bracket: ) - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE numeric_literal: '1062' end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[PK_IDENTIFIER]' - keyword: WITH - bracketed: - start_bracket: ( - keyword: SEARCH - keyword: PROPERTY - keyword: LIST - object_reference: quoted_identifier: '[property_list_name]' - end_bracket: ) - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE numeric_literal: '1062' end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[PK_IDENTIFIER]' - keyword: WITH - bracketed: - start_bracket: ( - keyword: SEARCH - keyword: PROPERTY - keyword: LIST - comparison_operator: raw_comparison_operator: '=' - object_reference: quoted_identifier: '[property_list_name]' - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_function.sql000066400000000000000000000024721503426445100246470ustar00rootroot00000000000000CREATE FUNCTION dbo.ISOweek (@DATE datetime) RETURNS int WITH EXECUTE AS CALLER AS BEGIN DECLARE @ISOweek int; SET @ISOweek= DATEPART(wk,@DATE)+1 -DATEPART(wk,CAST(DATEPART(yy,@DATE) as CHAR(4))+'0104'); --Special cases: Jan 1-3 may belong to the previous year IF (@ISOweek=0) SET @ISOweek=dbo.ISOweek(CAST(DATEPART(yy,@DATE)-1 AS CHAR(4))+'12'+ CAST(24+DATEPART(DAY,@DATE) AS CHAR(2)))+1; --Special case: Dec 29-31 may belong to the next year IF ((DATEPART(mm,@DATE)=12) AND ((DATEPART(dd,@DATE)-DATEPART(dw,@DATE))>= 28)) SET @ISOweek=1; RETURN(@ISOweek); END; GO CREATE FUNCTION f () RETURNS @t TABLE (i int) AS BEGIN INSERT INTO @t SELECT 1; RETURN; END; GO CREATE OR ALTER FUNCTION F (@DATE as datetime) RETURNS INT AS BEGIN RETURN 1 END; GO ALTER FUNCTION F (@DATE as datetime) RETURNS INT AS BEGIN RETURN 0 END; GO CREATE FUNCTION [UTIL].[getItemList] ( @list ItemList READONLY ) RETURNS nvarchar(max) AS BEGIN DECLARE @str nvarchar(max) = '' SELECT @str = @str + [item] FROM ( SELECT TOP (9999) [item] FROM @list ORDER BY [order] ) i RETURN @str END; GO create function my_function(@my_parameter int) returns int with schemabinding, returns null on null input begin return @my_parameter end go sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_function.yml000066400000000000000000000511111503426445100246430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7a1735a4a357946077c4f0899da92e7c31142c471811f180afe87c51e1d3dc10 file: - batch: statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: ISOweek - function_parameter_list: bracketed: start_bracket: ( parameter: '@DATE' data_type: data_type_identifier: datetime end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: int - function_option_segment: keyword: WITH execute_as_clause: - keyword: EXECUTE - keyword: AS - keyword: CALLER - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: declare_segment: keyword: DECLARE parameter: '@ISOweek' data_type: data_type_identifier: int statement_terminator: ; - statement: set_segment: keyword: SET parameter: '@ISOweek' assignment_operator: raw_comparison_operator: '=' expression: - function: function_name: function_name_identifier: DATEPART function_contents: bracketed: start_bracket: ( date_part: wk comma: ',' expression: parameter: '@DATE' end_bracket: ) - binary_operator: + - numeric_literal: '1' - binary_operator: '-' - function: function_name: function_name_identifier: DATEPART function_contents: bracketed: start_bracket: ( date_part: wk comma: ',' expression: function: function_name: keyword: CAST function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: DATEPART function_contents: bracketed: start_bracket: ( date_part: yy comma: ',' expression: parameter: '@DATE' end_bracket: ) keyword: as data_type: data_type_identifier: CHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '4' end_bracket: ) end_bracket: ) binary_operator: + quoted_literal: "'0104'" end_bracket: ) statement_terminator: ; - statement: if_then_statement: if_clause: keyword: IF expression: bracketed: start_bracket: ( expression: parameter: '@ISOweek' comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' end_bracket: ) statement: set_segment: keyword: SET parameter: '@ISOweek' assignment_operator: raw_comparison_operator: '=' expression: function: function_name: naked_identifier: dbo dot: . function_name_identifier: ISOweek function_contents: bracketed: start_bracket: ( expression: - function: function_name: keyword: CAST function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: DATEPART function_contents: bracketed: start_bracket: ( date_part: yy comma: ',' expression: parameter: '@DATE' end_bracket: ) binary_operator: '-' numeric_literal: '1' keyword: AS data_type: data_type_identifier: CHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '4' end_bracket: ) end_bracket: ) - binary_operator: + - quoted_literal: "'12'" - binary_operator: + - function: function_name: keyword: CAST function_contents: bracketed: start_bracket: ( expression: numeric_literal: '24' binary_operator: + function: function_name: function_name_identifier: DATEPART function_contents: bracketed: start_bracket: ( date_part: DAY comma: ',' expression: parameter: '@DATE' end_bracket: ) keyword: AS data_type: data_type_identifier: CHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '2' end_bracket: ) end_bracket: ) end_bracket: ) binary_operator: + numeric_literal: '1' statement_terminator: ; - statement: if_then_statement: if_clause: keyword: IF expression: bracketed: start_bracket: ( expression: - bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: DATEPART function_contents: bracketed: start_bracket: ( date_part: mm comma: ',' expression: parameter: '@DATE' end_bracket: ) comparison_operator: raw_comparison_operator: '=' numeric_literal: '12' end_bracket: ) - binary_operator: AND - bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: - function: function_name: function_name_identifier: DATEPART function_contents: bracketed: start_bracket: ( date_part: dd comma: ',' expression: parameter: '@DATE' end_bracket: ) - binary_operator: '-' - function: function_name: function_name_identifier: DATEPART function_contents: bracketed: start_bracket: ( date_part: dw comma: ',' expression: parameter: '@DATE' end_bracket: ) end_bracket: ) comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' numeric_literal: '28' end_bracket: ) end_bracket: ) statement: set_segment: keyword: SET parameter: '@ISOweek' assignment_operator: raw_comparison_operator: '=' expression: numeric_literal: '1' statement_terminator: ; - statement: return_segment: keyword: RETURN expression: bracketed: start_bracket: ( expression: parameter: '@ISOweek' end_bracket: ) statement_terminator: ; - keyword: END statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - object_reference: naked_identifier: f - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - parameter: '@t' - keyword: TABLE - bracketed: start_bracket: ( column_definition: naked_identifier: i data_type: data_type_identifier: int end_bracket: ) - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: parameter: '@t' - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' statement_terminator: ; - statement: return_segment: keyword: RETURN statement_terminator: ; - keyword: END statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: FUNCTION - object_reference: naked_identifier: F - function_parameter_list: bracketed: start_bracket: ( parameter: '@DATE' keyword: as data_type: data_type_identifier: datetime end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: INT - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: return_segment: keyword: RETURN expression: numeric_literal: '1' - keyword: END statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_function_statement: - keyword: ALTER - keyword: FUNCTION - object_reference: naked_identifier: F - function_parameter_list: bracketed: start_bracket: ( parameter: '@DATE' keyword: as data_type: data_type_identifier: datetime end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: INT - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: return_segment: keyword: RETURN expression: numeric_literal: '0' - keyword: END statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - object_reference: - quoted_identifier: '[UTIL]' - dot: . - quoted_identifier: '[getItemList]' - function_parameter_list: bracketed: start_bracket: ( parameter: '@list' data_type: data_type_identifier: ItemList keyword: READONLY end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: nvarchar bracketed_arguments: bracketed: start_bracket: ( keyword: max end_bracket: ) - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: declare_segment: keyword: DECLARE parameter: '@str' data_type: data_type_identifier: nvarchar bracketed_arguments: bracketed: start_bracket: ( keyword: max end_bracket: ) comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "''" - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - parameter: '@str' - comparison_operator: raw_comparison_operator: '=' - parameter: '@str' - binary_operator: + - column_reference: quoted_identifier: '[item]' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_modifier: keyword: TOP bracketed: start_bracket: ( expression: numeric_literal: '9999' end_bracket: ) select_clause_element: column_reference: quoted_identifier: '[item]' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: parameter: '@list' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: quoted_identifier: '[order]' end_bracket: ) alias_expression: naked_identifier: i - statement: return_segment: keyword: RETURN expression: parameter: '@str' - keyword: END statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_function_statement: - keyword: create - keyword: function - object_reference: naked_identifier: my_function - function_parameter_list: bracketed: start_bracket: ( parameter: '@my_parameter' data_type: data_type_identifier: int end_bracket: ) - keyword: returns - data_type: data_type_identifier: int - function_option_segment: - keyword: with - keyword: schemabinding - comma: ',' - keyword: returns - keyword: 'null' - keyword: 'on' - keyword: 'null' - keyword: input - procedure_statement: statement: begin_end_block: - keyword: begin - statement: return_segment: keyword: return expression: parameter: '@my_parameter' - keyword: end - go_statement: keyword: go sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_login.sql000066400000000000000000000016661503426445100241360ustar00rootroot00000000000000CREATE LOGIN [MyUserName] WITH PASSWORD = ''; CREATE LOGIN [MyUserName] WITH PASSWORD = '' MUST_CHANGE; CREATE LOGIN [MyUserName] WITH PASSWORD = '', CHECK_EXPIRATION = ON; CREATE LOGIN [MyUserName] WITH PASSWORD = '' MUST_CHANGE, CHECK_EXPIRATION = ON; CREATE LOGIN [MyUserName] WITH PASSWORD = '', DEFAULT_DATABASE = 'TestDb'; CREATE LOGIN [MyUserName] WITH PASSWORD = '', SID = 0x241C11948AEEB749B0D22646DB1A19F2; CREATE LOGIN [MyUserName] WITH PASSWORD = '' FROM WINDOWS; CREATE LOGIN [MyUserName] WITH PASSWORD = '', CHECK_EXPIRATION = ON, CHECK_POLICY = ON, CREDENTIAL = [MyCredential]; CREATE LOGIN [myapp] FROM EXTERNAL PROVIDER; CREATE LOGIN [myapp] FROM CERTIFICATE [MyCert]; CREATE LOGIN [myapp] FROM ASYMMETRIC KEY [MyAsymKey]; sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_login.yml000066400000000000000000000123531503426445100241330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a37f693c347dfe13b88ba1f444497a58bebd55e30bcc0d92604bb749d2adc2e4 file: batch: - statement: create_login_statement: - keyword: CREATE - keyword: LOGIN - object_reference: quoted_identifier: '[MyUserName]' - keyword: WITH - keyword: PASSWORD - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - statement_terminator: ; - statement: create_login_statement: - keyword: CREATE - keyword: LOGIN - object_reference: quoted_identifier: '[MyUserName]' - keyword: WITH - keyword: PASSWORD - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - keyword: MUST_CHANGE - statement_terminator: ; - statement: create_login_statement: - keyword: CREATE - keyword: LOGIN - object_reference: quoted_identifier: '[MyUserName]' - keyword: WITH - keyword: PASSWORD - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - comma: ',' - keyword: CHECK_EXPIRATION - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - statement_terminator: ; - statement: create_login_statement: - keyword: CREATE - keyword: LOGIN - object_reference: quoted_identifier: '[MyUserName]' - keyword: WITH - keyword: PASSWORD - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - keyword: MUST_CHANGE - comma: ',' - keyword: CHECK_EXPIRATION - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - statement_terminator: ; - statement: create_login_statement: - keyword: CREATE - keyword: LOGIN - object_reference: quoted_identifier: '[MyUserName]' - keyword: WITH - keyword: PASSWORD - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - comma: ',' - keyword: DEFAULT_DATABASE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'TestDb'" - statement_terminator: ; - statement: create_login_statement: - keyword: CREATE - keyword: LOGIN - object_reference: quoted_identifier: '[MyUserName]' - keyword: WITH - keyword: PASSWORD - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - comma: ',' - keyword: SID - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '0x241C11948AEEB749B0D22646DB1A19F2' - statement_terminator: ; - statement: create_login_statement: - keyword: CREATE - keyword: LOGIN - object_reference: quoted_identifier: '[MyUserName]' - keyword: WITH - keyword: PASSWORD - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - keyword: FROM - keyword: WINDOWS - statement_terminator: ; - statement: create_login_statement: - keyword: CREATE - keyword: LOGIN - object_reference: quoted_identifier: '[MyUserName]' - keyword: WITH - keyword: PASSWORD - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - comma: ',' - keyword: CHECK_EXPIRATION - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - comma: ',' - keyword: CHECK_POLICY - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - comma: ',' - keyword: CREDENTIAL - comparison_operator: raw_comparison_operator: '=' - object_reference: quoted_identifier: '[MyCredential]' - statement_terminator: ; - statement: create_login_statement: - keyword: CREATE - keyword: LOGIN - object_reference: quoted_identifier: '[myapp]' - keyword: FROM - keyword: EXTERNAL - keyword: PROVIDER - statement_terminator: ; - statement: create_login_statement: - keyword: CREATE - keyword: LOGIN - object_reference: quoted_identifier: '[myapp]' - keyword: FROM - keyword: CERTIFICATE - object_reference: quoted_identifier: '[MyCert]' - statement_terminator: ; - statement: create_login_statement: - keyword: CREATE - keyword: LOGIN - object_reference: quoted_identifier: '[myapp]' - keyword: FROM - keyword: ASYMMETRIC - keyword: KEY - object_reference: quoted_identifier: '[MyAsymKey]' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_master_key.sql000066400000000000000000000013621503426445100251620ustar00rootroot00000000000000-- https://learn.microsoft.com/en-us/sql/t-sql/statements/create-master-key-transact-sql -- https://learn.microsoft.com/en-us/sql/t-sql/statements/alter-master-key-transact-sql -- https://learn.microsoft.com/en-us/sql/t-sql/statements/drop-master-key-transact-sql CREATE MASTER KEY ENCRYPTION BY PASSWORD = ''; CREATE MASTER KEY; ALTER MASTER KEY REGENERATE WITH ENCRYPTION BY PASSWORD = ''; ALTER MASTER KEY FORCE REGENERATE WITH ENCRYPTION BY PASSWORD = ''; ALTER MASTER KEY ADD ENCRYPTION BY PASSWORD = ''; ALTER MASTER KEY ADD ENCRYPTION BY SERVICE MASTER KEY; ALTER MASTER KEY DROP ENCRYPTION BY PASSWORD = ''; DROP MASTER KEY; sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_master_key.yml000066400000000000000000000057721503426445100251750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: df081acfc90fc98a3ad8bbcac2acf6d07207a20ac79be86fed319f0e9fc35f67 file: batch: - statement: create_master_key_statement: - keyword: CREATE - keyword: MASTER - keyword: KEY - keyword: ENCRYPTION - keyword: BY - keyword: PASSWORD - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - statement_terminator: ; - statement: create_master_key_statement: - keyword: CREATE - keyword: MASTER - keyword: KEY - statement_terminator: ; - statement: alter_master_key_statement: - keyword: ALTER - keyword: MASTER - keyword: KEY - keyword: REGENERATE - keyword: WITH - keyword: ENCRYPTION - keyword: BY - master_key_encryption_option: keyword: PASSWORD comparison_operator: raw_comparison_operator: '=' quoted_literal: "''" - statement_terminator: ; - statement: alter_master_key_statement: - keyword: ALTER - keyword: MASTER - keyword: KEY - keyword: FORCE - keyword: REGENERATE - keyword: WITH - keyword: ENCRYPTION - keyword: BY - master_key_encryption_option: keyword: PASSWORD comparison_operator: raw_comparison_operator: '=' quoted_literal: "''" - statement_terminator: ; - statement: alter_master_key_statement: - keyword: ALTER - keyword: MASTER - keyword: KEY - keyword: ADD - keyword: ENCRYPTION - keyword: BY - master_key_encryption_option: keyword: PASSWORD comparison_operator: raw_comparison_operator: '=' quoted_literal: "''" - statement_terminator: ; - statement: alter_master_key_statement: - keyword: ALTER - keyword: MASTER - keyword: KEY - keyword: ADD - keyword: ENCRYPTION - keyword: BY - master_key_encryption_option: - keyword: SERVICE - keyword: MASTER - keyword: KEY - statement_terminator: ; - statement: alter_master_key_statement: - keyword: ALTER - keyword: MASTER - keyword: KEY - keyword: DROP - keyword: ENCRYPTION - keyword: BY - master_key_encryption_option: keyword: PASSWORD comparison_operator: raw_comparison_operator: '=' quoted_literal: "''" - statement_terminator: ; - statement: drop_master_key_statement: - keyword: DROP - keyword: MASTER - keyword: KEY - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_partition_function.sql000066400000000000000000000011731503426445100267350ustar00rootroot00000000000000-- https://learn.microsoft.com/en-us/sql/t-sql/statements/create-partition-scheme-transact-sql#examples CREATE PARTITION FUNCTION myIntRangePF1 (INT) AS RANGE LEFT FOR VALUES (1, 100, 1000); CREATE PARTITION FUNCTION myCharRangePF2 (CHAR(1)) AS RANGE RIGHT FOR VALUES ('A', 'B', 'C'); CREATE PARTITION FUNCTION [myDateRangePF1] (date) AS RANGE RIGHT FOR VALUES ( '20030201', '20030301', '20030401', '20030501', '20030601', '20030701', '20030801', '20030901', '20031001', '20031101', '20031201' ); ALTER PARTITION FUNCTION myIntRangePF1() SPLIT RANGE (500); ALTER PARTITION FUNCTION myCharRangePF2() MERGE RANGE ('D'); sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_partition_function.yml000066400000000000000000000077161503426445100267500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d0f2bf71cbbadcbcccc4ffd6b5aea72c6b73113033f8150b639bb27aff680a6e file: batch: - statement: create_partition_function_statement: - keyword: CREATE - keyword: PARTITION - keyword: FUNCTION - object_reference: naked_identifier: myIntRangePF1 - bracketed: start_bracket: ( data_type: data_type_identifier: INT end_bracket: ) - keyword: AS - keyword: RANGE - keyword: LEFT - keyword: FOR - keyword: VALUES - bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '100' - comma: ',' - numeric_literal: '1000' - end_bracket: ) - statement_terminator: ; - statement: create_partition_function_statement: - keyword: CREATE - keyword: PARTITION - keyword: FUNCTION - object_reference: naked_identifier: myCharRangePF2 - bracketed: start_bracket: ( data_type: data_type_identifier: CHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) end_bracket: ) - keyword: AS - keyword: RANGE - keyword: RIGHT - keyword: FOR - keyword: VALUES - bracketed: - start_bracket: ( - quoted_literal: "'A'" - comma: ',' - quoted_literal: "'B'" - comma: ',' - quoted_literal: "'C'" - end_bracket: ) - statement_terminator: ; - statement: create_partition_function_statement: - keyword: CREATE - keyword: PARTITION - keyword: FUNCTION - object_reference: quoted_identifier: '[myDateRangePF1]' - bracketed: start_bracket: ( data_type: data_type_identifier: date end_bracket: ) - keyword: AS - keyword: RANGE - keyword: RIGHT - keyword: FOR - keyword: VALUES - bracketed: - start_bracket: ( - quoted_literal: "'20030201'" - comma: ',' - quoted_literal: "'20030301'" - comma: ',' - quoted_literal: "'20030401'" - comma: ',' - quoted_literal: "'20030501'" - comma: ',' - quoted_literal: "'20030601'" - comma: ',' - quoted_literal: "'20030701'" - comma: ',' - quoted_literal: "'20030801'" - comma: ',' - quoted_literal: "'20030901'" - comma: ',' - quoted_literal: "'20031001'" - comma: ',' - quoted_literal: "'20031101'" - comma: ',' - quoted_literal: "'20031201'" - end_bracket: ) - statement_terminator: ; - statement: alter_partition_function_statement: - keyword: ALTER - keyword: PARTITION - keyword: FUNCTION - object_reference: naked_identifier: myIntRangePF1 - bracketed: start_bracket: ( end_bracket: ) - keyword: SPLIT - keyword: RANGE - bracketed: start_bracket: ( numeric_literal: '500' end_bracket: ) - statement_terminator: ; - statement: alter_partition_function_statement: - keyword: ALTER - keyword: PARTITION - keyword: FUNCTION - object_reference: naked_identifier: myCharRangePF2 - bracketed: start_bracket: ( end_bracket: ) - keyword: MERGE - keyword: RANGE - bracketed: start_bracket: ( quoted_literal: "'D'" end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_partition_scheme.sql000066400000000000000000000006611503426445100263550ustar00rootroot00000000000000-- https://learn.microsoft.com/en-us/sql/t-sql/statements/create-partition-function-transact-sql#BKMK_examples CREATE PARTITION SCHEME myRangePS1 AS PARTITION myRangePF1 TO (test1fg, [test2fg], test3fg, test4fg); CREATE PARTITION SCHEME myRangePS3 AS PARTITION myRangePF3 ALL TO ( test1fg ); CREATE PARTITION SCHEME [myRangePS1] AS PARTITION myRangePF1 ALL TO ( [PRIMARY] ); ALTER PARTITION SCHEME MyRangePS1 NEXT USED test5fg; sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_partition_scheme.yml000066400000000000000000000047401503426445100263610ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 47c08845b29014f2f961466e6eae00ad8b76269b473e425980669e16af02a36a file: batch: - statement: create_partition_scheme_statement: - keyword: CREATE - keyword: PARTITION - keyword: SCHEME - object_reference: naked_identifier: myRangePS1 - keyword: AS - keyword: PARTITION - object_reference: naked_identifier: myRangePF1 - keyword: TO - bracketed: - start_bracket: ( - object_reference: naked_identifier: test1fg - comma: ',' - object_reference: quoted_identifier: '[test2fg]' - comma: ',' - object_reference: naked_identifier: test3fg - comma: ',' - object_reference: naked_identifier: test4fg - end_bracket: ) - statement_terminator: ; - statement: create_partition_scheme_statement: - keyword: CREATE - keyword: PARTITION - keyword: SCHEME - object_reference: naked_identifier: myRangePS3 - keyword: AS - keyword: PARTITION - object_reference: naked_identifier: myRangePF3 - keyword: ALL - keyword: TO - bracketed: start_bracket: ( object_reference: naked_identifier: test1fg end_bracket: ) - statement_terminator: ; - statement: create_partition_scheme_statement: - keyword: CREATE - keyword: PARTITION - keyword: SCHEME - object_reference: quoted_identifier: '[myRangePS1]' - keyword: AS - keyword: PARTITION - object_reference: naked_identifier: myRangePF1 - keyword: ALL - keyword: TO - bracketed: start_bracket: ( object_reference: quoted_identifier: '[PRIMARY]' end_bracket: ) - statement_terminator: ; - statement: alter_partition_scheme_statement: - keyword: ALTER - keyword: PARTITION - keyword: SCHEME - object_reference: naked_identifier: MyRangePS1 - keyword: NEXT - keyword: USED - object_reference: naked_identifier: test5fg - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_procedure.sql000066400000000000000000000051261503426445100250110ustar00rootroot00000000000000-- Minimal stored procedure CREATE PROC [PROCEDURE_NAME] AS BEGIN SELECT 1; END; GO CREATE PROCEDURE [dbo].[TEST] AS BEGIN SELECT 1; END; GO ALTER PROC [PROCEDURE_NAME] AS BEGIN SELECT 1; END; GO ALTER PROCEDURE [PROCEDURE_NAME] AS BEGIN SELECT 1; END; GO CREATE OR ALTER PROC [PROCEDURE_NAME] AS BEGIN SELECT 1; END; GO CREATE OR ALTER PROCEDURE [PROCEDURE_NAME] AS BEGIN SELECT 1; END; GO -- Stored procedure with parameters CREATE PROCEDURE [dbo].[TEST] (@id UNIQUEIDENTIFIER) AS SELECT 1; GO CREATE PROCEDURE [dbo].[TEST] ( @id UNIQUEIDENTIFIER NULL = NULL, @fooReadonly NVARCHAR(42) = N'foo' READONLY, @bar BIT VARYING NULL = NULL OUTPUT, @output TINYINT OUT ) AS BEGIN SET @output = ( SELECT tinyint_value FROM dbo.TEST ); IF @id IS NULL BEGIN SELECT @bar, @fooReadonly; END; END; GO CREATE PROCEDURE [dbo].[TEST] ( @id UNIQUEIDENTIFIER NULL = NULL, @bar NVARCHAR(32) NULL = NULL ) WITH ENCRYPTION, RECOMPILE, EXECUTE AS 'sa' AS BEGIN SELECT 1; END; GO CREATE PROCEDURE [dbo].[TEST] ( @id UNIQUEIDENTIFIER NULL = NULL, @bar NVARCHAR(32) NULL = NULL ) WITH ENCRYPTION, RECOMPILE, EXECUTE AS 'sa' FOR REPLICATION AS BEGIN SELECT @id, @bar; END; GO -- Natively compiled stored procedure CREATE OR ALTER PROCEDURE [dbo].[TEST] (@id INT NOT NULL) WITH NATIVE_COMPILATION, SCHEMABINDING, EXECUTE AS OWNER AS BEGIN ATOMIC WITH ( LANGUAGE = N'us_english', TRANSACTION ISOLATION LEVEL = SERIALIZABLE, DATEFIRST = 10, DATEFORMAT = dym, DELAYED_DURABILITY = ON ) SELECT 1; END; GO CREATE OR ALTER PROCEDURE [dbo].[TEST] (@id INT NOT NULL) WITH NATIVE_COMPILATION, SCHEMABINDING, EXECUTE AS OWNER AS BEGIN ATOMIC WITH ( TRANSACTION ISOLATION LEVEL = SNAPSHOT, LANGUAGE = 'us_english' ) SELECT 1; END; GO CREATE OR ALTER PROCEDURE [dbo].[TEST] (@id INT NOT NULL) WITH NATIVE_COMPILATION, SCHEMABINDING, EXECUTE AS OWNER AS BEGIN ATOMIC WITH ( TRANSACTION ISOLATION LEVEL = REPEATABLE READ, LANGUAGE = N'us_english', DELAYED_DURABILITY = OFF, DATEFORMAT = myd ) SELECT 1; END; GO -- CLR stored procedure CREATE PROCEDURE [dbo].[TEST] AS EXTERNAL NAME [dbo].[class_name].[static_method]; GO CREATE PROCEDURE [dbo].[TEST]; 1064 AS EXTERNAL NAME [dbo].[class_name].[static_method]; GO CREATE OR ALTER PROCEDURE [dbo].[TEST] ( @id UNIQUEIDENTIFIER = NEWID(), @output NVARCHAR(32) OUTPUT, @activated BIT OUT READONLY ) WITH EXECUTE AS 'sa' AS EXTERNAL NAME [dbo].[class_name].[static_method]; GO CREATE OR ALTER PROCEDURE dbo.DoSomething AS BEGIN SET NOCOUNT ON; SET XACT_ABORT ON; END GO CREATE OR ALTER PROCEDURE dbo.DoSomething AS BEGIN SET NOCOUNT, XACT_ABORT ON; END GO sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_procedure.yml000066400000000000000000000531741503426445100250210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b198e5208243b2a9a21749c951b837f04f4f7059012a4402190ea080e07ffb9b file: - batch: create_procedure_statement: - keyword: CREATE - keyword: PROC - object_reference: quoted_identifier: '[PROCEDURE_NAME]' - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' statement_terminator: ; - keyword: END statement_terminator: ; - go_statement: keyword: GO - batch: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST]' - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' statement_terminator: ; - keyword: END statement_terminator: ; - go_statement: keyword: GO - batch: create_procedure_statement: - keyword: ALTER - keyword: PROC - object_reference: quoted_identifier: '[PROCEDURE_NAME]' - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' statement_terminator: ; - keyword: END statement_terminator: ; - go_statement: keyword: GO - batch: create_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - object_reference: quoted_identifier: '[PROCEDURE_NAME]' - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' statement_terminator: ; - keyword: END statement_terminator: ; - go_statement: keyword: GO - batch: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: PROC - object_reference: quoted_identifier: '[PROCEDURE_NAME]' - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' statement_terminator: ; - keyword: END statement_terminator: ; - go_statement: keyword: GO - batch: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: PROCEDURE - object_reference: quoted_identifier: '[PROCEDURE_NAME]' - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' statement_terminator: ; - keyword: END statement_terminator: ; - go_statement: keyword: GO - batch: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST]' - procedure_parameter_list: bracketed: start_bracket: ( parameter: '@id' data_type: data_type_identifier: UNIQUEIDENTIFIER end_bracket: ) - keyword: AS - procedure_statement: statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' statement_terminator: ; - go_statement: keyword: GO - batch: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST]' - procedure_parameter_list: bracketed: - start_bracket: ( - parameter: '@id' - data_type: data_type_identifier: UNIQUEIDENTIFIER - keyword: 'NULL' - comparison_operator: raw_comparison_operator: '=' - expression: null_literal: 'NULL' - comma: ',' - parameter: '@fooReadonly' - data_type: data_type_identifier: NVARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '42' end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - expression: quoted_literal: "N'foo'" - keyword: READONLY - comma: ',' - parameter: '@bar' - data_type: data_type_identifier: BIT keyword: VARYING - keyword: 'NULL' - comparison_operator: raw_comparison_operator: '=' - expression: null_literal: 'NULL' - keyword: OUTPUT - comma: ',' - parameter: '@output' - data_type: data_type_identifier: TINYINT - keyword: OUT - end_bracket: ) - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: set_segment: keyword: SET parameter: '@output' assignment_operator: raw_comparison_operator: '=' expression: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: tinyint_value from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: TEST end_bracket: ) statement_terminator: ; - statement: if_then_statement: if_clause: keyword: IF expression: parameter: '@id' keyword: IS null_literal: 'NULL' statement: begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: parameter: '@bar' - comma: ',' - select_clause_element: parameter: '@fooReadonly' statement_terminator: ; - keyword: END statement_terminator: ; - keyword: END statement_terminator: ; - go_statement: keyword: GO - batch: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST]' - procedure_parameter_list: bracketed: - start_bracket: ( - parameter: '@id' - data_type: data_type_identifier: UNIQUEIDENTIFIER - keyword: 'NULL' - comparison_operator: raw_comparison_operator: '=' - expression: null_literal: 'NULL' - comma: ',' - parameter: '@bar' - data_type: data_type_identifier: NVARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '32' end_bracket: ) - keyword: 'NULL' - comparison_operator: raw_comparison_operator: '=' - expression: null_literal: 'NULL' - end_bracket: ) - keyword: WITH - keyword: ENCRYPTION - comma: ',' - keyword: RECOMPILE - comma: ',' - execute_as_clause: - keyword: EXECUTE - keyword: AS - quoted_literal: "'sa'" - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' statement_terminator: ; - keyword: END statement_terminator: ; - go_statement: keyword: GO - batch: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST]' - procedure_parameter_list: bracketed: - start_bracket: ( - parameter: '@id' - data_type: data_type_identifier: UNIQUEIDENTIFIER - keyword: 'NULL' - comparison_operator: raw_comparison_operator: '=' - expression: null_literal: 'NULL' - comma: ',' - parameter: '@bar' - data_type: data_type_identifier: NVARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '32' end_bracket: ) - keyword: 'NULL' - comparison_operator: raw_comparison_operator: '=' - expression: null_literal: 'NULL' - end_bracket: ) - keyword: WITH - keyword: ENCRYPTION - comma: ',' - keyword: RECOMPILE - comma: ',' - execute_as_clause: - keyword: EXECUTE - keyword: AS - quoted_literal: "'sa'" - keyword: FOR - keyword: REPLICATION - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: parameter: '@id' - comma: ',' - select_clause_element: parameter: '@bar' statement_terminator: ; - keyword: END statement_terminator: ; - go_statement: keyword: GO - batch: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: PROCEDURE - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST]' - procedure_parameter_list: bracketed: - start_bracket: ( - parameter: '@id' - data_type: data_type_identifier: INT - keyword: NOT - keyword: 'NULL' - end_bracket: ) - keyword: WITH - keyword: NATIVE_COMPILATION - comma: ',' - keyword: SCHEMABINDING - comma: ',' - execute_as_clause: - keyword: EXECUTE - keyword: AS - keyword: OWNER - keyword: AS - procedure_statement: statement: atomic_begin_end_block: - keyword: BEGIN - keyword: ATOMIC - keyword: WITH - bracketed: - start_bracket: ( - keyword: LANGUAGE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "N'us_english'" - comma: ',' - keyword: TRANSACTION - keyword: ISOLATION - keyword: LEVEL - comparison_operator: raw_comparison_operator: '=' - keyword: SERIALIZABLE - comma: ',' - keyword: DATEFIRST - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '10' - comma: ',' - keyword: DATEFORMAT - comparison_operator: raw_comparison_operator: '=' - date_format: dym - comma: ',' - keyword: DELAYED_DURABILITY - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' statement_terminator: ; - keyword: END statement_terminator: ; - go_statement: keyword: GO - batch: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: PROCEDURE - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST]' - procedure_parameter_list: bracketed: - start_bracket: ( - parameter: '@id' - data_type: data_type_identifier: INT - keyword: NOT - keyword: 'NULL' - end_bracket: ) - keyword: WITH - keyword: NATIVE_COMPILATION - comma: ',' - keyword: SCHEMABINDING - comma: ',' - execute_as_clause: - keyword: EXECUTE - keyword: AS - keyword: OWNER - keyword: AS - procedure_statement: statement: atomic_begin_end_block: - keyword: BEGIN - keyword: ATOMIC - keyword: WITH - bracketed: - start_bracket: ( - keyword: TRANSACTION - keyword: ISOLATION - keyword: LEVEL - comparison_operator: raw_comparison_operator: '=' - keyword: SNAPSHOT - comma: ',' - keyword: LANGUAGE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'us_english'" - end_bracket: ) - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' statement_terminator: ; - keyword: END statement_terminator: ; - go_statement: keyword: GO - batch: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: PROCEDURE - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST]' - procedure_parameter_list: bracketed: - start_bracket: ( - parameter: '@id' - data_type: data_type_identifier: INT - keyword: NOT - keyword: 'NULL' - end_bracket: ) - keyword: WITH - keyword: NATIVE_COMPILATION - comma: ',' - keyword: SCHEMABINDING - comma: ',' - execute_as_clause: - keyword: EXECUTE - keyword: AS - keyword: OWNER - keyword: AS - procedure_statement: statement: atomic_begin_end_block: - keyword: BEGIN - keyword: ATOMIC - keyword: WITH - bracketed: - start_bracket: ( - keyword: TRANSACTION - keyword: ISOLATION - keyword: LEVEL - comparison_operator: raw_comparison_operator: '=' - keyword: REPEATABLE - keyword: READ - comma: ',' - keyword: LANGUAGE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "N'us_english'" - comma: ',' - keyword: DELAYED_DURABILITY - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - comma: ',' - keyword: DATEFORMAT - comparison_operator: raw_comparison_operator: '=' - date_format: myd - end_bracket: ) - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' statement_terminator: ; - keyword: END statement_terminator: ; - go_statement: keyword: GO - batch: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST]' - keyword: AS - procedure_statement: - keyword: EXTERNAL - keyword: NAME - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[class_name]' - dot: . - quoted_identifier: '[static_method]' - statement_terminator: ; - go_statement: keyword: GO - batch: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST]' - statement_terminator: ; - numeric_literal: '1064' - keyword: AS - procedure_statement: - keyword: EXTERNAL - keyword: NAME - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[class_name]' - dot: . - quoted_identifier: '[static_method]' - statement_terminator: ; - go_statement: keyword: GO - batch: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: PROCEDURE - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST]' - procedure_parameter_list: bracketed: - start_bracket: ( - parameter: '@id' - data_type: data_type_identifier: UNIQUEIDENTIFIER - comparison_operator: raw_comparison_operator: '=' - expression: function: function_name: function_name_identifier: NEWID function_contents: bracketed: start_bracket: ( end_bracket: ) - comma: ',' - parameter: '@output' - data_type: data_type_identifier: NVARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '32' end_bracket: ) - keyword: OUTPUT - comma: ',' - parameter: '@activated' - data_type: data_type_identifier: BIT - keyword: OUT - keyword: READONLY - end_bracket: ) - keyword: WITH - execute_as_clause: - keyword: EXECUTE - keyword: AS - quoted_literal: "'sa'" - keyword: AS - procedure_statement: - keyword: EXTERNAL - keyword: NAME - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[class_name]' - dot: . - quoted_identifier: '[static_method]' - statement_terminator: ; - go_statement: keyword: GO - batch: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: PROCEDURE - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: DoSomething - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: set_segment: - keyword: SET - keyword: NOCOUNT - keyword: 'ON' - statement_terminator: ; - statement: set_segment: - keyword: SET - keyword: XACT_ABORT - keyword: 'ON' - statement_terminator: ; - keyword: END - go_statement: keyword: GO - batch: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: PROCEDURE - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: DoSomething - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: set_segment: - keyword: SET - keyword: NOCOUNT - comma: ',' - keyword: XACT_ABORT - keyword: 'ON' - statement_terminator: ; - keyword: END - go_statement: keyword: GO sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_role.sql000066400000000000000000000000761503426445100237610ustar00rootroot00000000000000CREATE ROLE testuser; CREATE ROLE testuser AUTHORIZATION dbo; sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_role.yml000066400000000000000000000015201503426445100237560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1830cd153260fb341d79a750dc0c6191650a02e59b7e76acdc5081597c844c89 file: batch: - statement: create_role_statement: - keyword: CREATE - keyword: ROLE - role_reference: naked_identifier: testuser - statement_terminator: ; - statement: create_role_statement: - keyword: CREATE - keyword: ROLE - role_reference: naked_identifier: testuser - keyword: AUTHORIZATION - role_reference: naked_identifier: dbo - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_schema.sql000066400000000000000000000001201503426445100242460ustar00rootroot00000000000000CREATE SCHEMA [Reporting] GO CREATE SCHEMA [Extracts] AUTHORIZATION [dbo]; GO sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_schema.yml000066400000000000000000000016351503426445100242640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c308748e09e74b7c6c4e3a3cdbf45e63a4a756b19637c7fc16cf053dabb11c88 file: - batch: statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - schema_reference: quoted_identifier: '[Reporting]' - go_statement: keyword: GO - batch: statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - schema_reference: quoted_identifier: '[Extracts]' - keyword: AUTHORIZATION - role_reference: quoted_identifier: '[dbo]' - statement_terminator: ; - go_statement: keyword: GO sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_security_policy.sql000066400000000000000000000031471503426445100262500ustar00rootroot00000000000000-- https://learn.microsoft.com/en-us/sql/t-sql/statements/create-security-policy-transact-sql CREATE SECURITY POLICY [FederatedSecurityPolicy] ADD FILTER PREDICATE [rls].[fn_securitypredicate]([CustomerId]) ON [dbo].[Customer]; CREATE SECURITY POLICY [FederatedSecurityPolicy] ADD FILTER PREDICATE [rls].[fn_securitypredicate1]([CustomerId]) ON [dbo].[Customer], ADD FILTER PREDICATE [rls].[fn_securitypredicate1]([VendorId]) ON [dbo].[Vendor], ADD FILTER PREDICATE [rls].[fn_securitypredicate2]([WingId]) ON [dbo].[Patient] WITH (STATE = ON); CREATE SECURITY POLICY rls.SecPol ADD FILTER PREDICATE rls.tenantAccessPredicate(TenantId) ON dbo.Sales, ADD BLOCK PREDICATE rls.tenantAccessPredicate(TenantId) ON dbo.Sales AFTER INSERT ; -- https://learn.microsoft.com/en-us/sql/t-sql/statements/alter-security-policy-transact-sql ALTER SECURITY POLICY pol1 ADD FILTER PREDICATE schema_preds.SecPredicate(column1) ON myschema.mytable; ALTER SECURITY POLICY pol1 WITH ( STATE = ON ); ALTER SECURITY POLICY pol1 ADD FILTER PREDICATE schema_preds.SecPredicate1(column1) ON myschema.mytable1, DROP FILTER PREDICATE ON myschema.mytable2, ADD FILTER PREDICATE schema_preds.SecPredicate2(column2, 1) ON myschema.mytable3; ALTER SECURITY POLICY pol1 ALTER FILTER PREDICATE schema_preds.SecPredicate2(column1) ON myschema.mytable; ALTER SECURITY POLICY rls.SecPol ALTER BLOCK PREDICATE rls.tenantAccessPredicate_v2(TenantId) ON dbo.Sales AFTER INSERT; -- https://learn.microsoft.com/en-us/sql/t-sql/statements/drop-security-policy-transact-sql DROP SECURITY POLICY secPolicy; sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_security_policy.yml000066400000000000000000000207151503426445100262520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f9f30712fc6f77236180561f9d6bc365f45cb03949696416abebf223228a0da1 file: batch: - statement: create_security_policy_statement: - keyword: CREATE - keyword: SECURITY - keyword: POLICY - object_reference: quoted_identifier: '[FederatedSecurityPolicy]' - keyword: ADD - keyword: FILTER - keyword: PREDICATE - object_reference: - quoted_identifier: '[rls]' - dot: . - quoted_identifier: '[fn_securitypredicate]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[CustomerId]' end_bracket: ) - keyword: 'ON' - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[Customer]' - statement_terminator: ; - statement: create_security_policy_statement: - keyword: CREATE - keyword: SECURITY - keyword: POLICY - object_reference: quoted_identifier: '[FederatedSecurityPolicy]' - keyword: ADD - keyword: FILTER - keyword: PREDICATE - object_reference: - quoted_identifier: '[rls]' - dot: . - quoted_identifier: '[fn_securitypredicate1]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[CustomerId]' end_bracket: ) - keyword: 'ON' - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[Customer]' - comma: ',' - keyword: ADD - keyword: FILTER - keyword: PREDICATE - object_reference: - quoted_identifier: '[rls]' - dot: . - quoted_identifier: '[fn_securitypredicate1]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[VendorId]' end_bracket: ) - keyword: 'ON' - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[Vendor]' - comma: ',' - keyword: ADD - keyword: FILTER - keyword: PREDICATE - object_reference: - quoted_identifier: '[rls]' - dot: . - quoted_identifier: '[fn_securitypredicate2]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[WingId]' end_bracket: ) - keyword: 'ON' - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[Patient]' - keyword: WITH - bracketed: - start_bracket: ( - keyword: STATE - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) - statement_terminator: ; - statement: create_security_policy_statement: - keyword: CREATE - keyword: SECURITY - keyword: POLICY - object_reference: - naked_identifier: rls - dot: . - naked_identifier: SecPol - keyword: ADD - keyword: FILTER - keyword: PREDICATE - object_reference: - naked_identifier: rls - dot: . - naked_identifier: tenantAccessPredicate - bracketed: start_bracket: ( column_reference: naked_identifier: TenantId end_bracket: ) - keyword: 'ON' - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: Sales - comma: ',' - keyword: ADD - keyword: BLOCK - keyword: PREDICATE - object_reference: - naked_identifier: rls - dot: . - naked_identifier: tenantAccessPredicate - bracketed: start_bracket: ( column_reference: naked_identifier: TenantId end_bracket: ) - keyword: 'ON' - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: Sales - keyword: AFTER - keyword: INSERT - statement_terminator: ; - statement: alter_security_policy_statement: - keyword: ALTER - keyword: SECURITY - keyword: POLICY - object_reference: naked_identifier: pol1 - keyword: ADD - keyword: FILTER - keyword: PREDICATE - object_reference: - naked_identifier: schema_preds - dot: . - naked_identifier: SecPredicate - bracketed: start_bracket: ( column_reference: naked_identifier: column1 end_bracket: ) - keyword: 'ON' - object_reference: - naked_identifier: myschema - dot: . - naked_identifier: mytable - statement_terminator: ; - statement: alter_security_policy_statement: - keyword: ALTER - keyword: SECURITY - keyword: POLICY - object_reference: naked_identifier: pol1 - keyword: WITH - bracketed: - start_bracket: ( - keyword: STATE - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) - statement_terminator: ; - statement: alter_security_policy_statement: - keyword: ALTER - keyword: SECURITY - keyword: POLICY - object_reference: naked_identifier: pol1 - keyword: ADD - keyword: FILTER - keyword: PREDICATE - object_reference: - naked_identifier: schema_preds - dot: . - naked_identifier: SecPredicate1 - bracketed: start_bracket: ( column_reference: naked_identifier: column1 end_bracket: ) - keyword: 'ON' - object_reference: - naked_identifier: myschema - dot: . - naked_identifier: mytable1 - comma: ',' - keyword: DROP - keyword: FILTER - keyword: PREDICATE - keyword: 'ON' - object_reference: - naked_identifier: myschema - dot: . - naked_identifier: mytable2 - comma: ',' - keyword: ADD - keyword: FILTER - keyword: PREDICATE - object_reference: - naked_identifier: schema_preds - dot: . - naked_identifier: SecPredicate2 - bracketed: start_bracket: ( column_reference: naked_identifier: column2 comma: ',' expression: numeric_literal: '1' end_bracket: ) - keyword: 'ON' - object_reference: - naked_identifier: myschema - dot: . - naked_identifier: mytable3 - statement_terminator: ; - statement: alter_security_policy_statement: - keyword: ALTER - keyword: SECURITY - keyword: POLICY - object_reference: naked_identifier: pol1 - keyword: ALTER - keyword: FILTER - keyword: PREDICATE - object_reference: - naked_identifier: schema_preds - dot: . - naked_identifier: SecPredicate2 - bracketed: start_bracket: ( column_reference: naked_identifier: column1 end_bracket: ) - keyword: 'ON' - object_reference: - naked_identifier: myschema - dot: . - naked_identifier: mytable - statement_terminator: ; - statement: alter_security_policy_statement: - keyword: ALTER - keyword: SECURITY - keyword: POLICY - object_reference: - naked_identifier: rls - dot: . - naked_identifier: SecPol - keyword: ALTER - keyword: BLOCK - keyword: PREDICATE - object_reference: - naked_identifier: rls - dot: . - naked_identifier: tenantAccessPredicate_v2 - bracketed: start_bracket: ( column_reference: naked_identifier: TenantId end_bracket: ) - keyword: 'ON' - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: Sales - keyword: AFTER - keyword: INSERT - statement_terminator: ; - statement: drop_security_policy: - keyword: DROP - keyword: SECURITY - keyword: POLICY - object_reference: naked_identifier: secPolicy - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_table.sql000066400000000000000000000026041503426445100241060ustar00rootroot00000000000000CREATE TABLE [dbo].[EC DC] ( [Column B] [varchar](100), [ColumnC] varchar(100), [ColumnDecimal] decimal(10,3) ) -- Test various forms of quoted data types CREATE TABLE foo ( pk int PRIMARY KEY, quoted_name [custom udt], qualified_name sch.qualified, quoted_qualified "my schema".qualified, more_quoted "my schema"."custom udt", quoted_udt sch.[custom udt] ); -- computed column -- https://learn.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql?view=sql-server-ver16#column_name-as-computed_column_expression -- https://learn.microsoft.com/en-us/sql/relational-databases/tables/specify-computed-columns-in-a-table?view=sql-server-ver16 CREATE TABLE dbo.Products ( ProductID int IDENTITY (1,1) NOT NULL , InventoryTs datetime2(0) , QtyAvailable smallint , QtySold smallint , UnitPrice money , InventoryValue1 AS QtyAvailable * UnitPrice PERSISTED , InventoryValue2 AS QtyAvailable * UnitPrice PERSISTED NOT NULL , InventoryValue3 AS QtyAvailable * UnitPrice , InventoryValue4 AS QtyAvailable * UnitPrice PRIMARY KEY , [SoldValue] AS (QtySold * UnitPrice) , InventoyDate AS CAST(InventoryTs AS date) ); -- issue #6340 CREATE TABLE [dbo].[Foo]( [ID] [int] IDENTITY(1,1) NOT NULL CONSTRAINT [PK_Foo_ID] PRIMARY KEY CLUSTERED ([ID] ASC), [other_ID] [int] FOREIGN KEY REFERENCES [dbo].[Bar] (id) UNIQUE ); sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_table.yml000066400000000000000000000231621503426445100241120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5da583c0e5899600af6cd8bd9abd5e14a54d6374cf5f53cdf7b03802e8c554ca file: batch: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '[Column B]' data_type: data_type_identifier: '[varchar]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnC]' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnDecimal]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - end_bracket: ) - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_definition: naked_identifier: pk data_type: data_type_identifier: int column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_definition: naked_identifier: quoted_name data_type: quoted_identifier: '[custom udt]' - comma: ',' - column_definition: naked_identifier: qualified_name data_type: naked_identifier: sch dot: . data_type_identifier: qualified - comma: ',' - column_definition: naked_identifier: quoted_qualified data_type: quoted_identifier: '"my schema"' dot: . data_type_identifier: qualified - comma: ',' - column_definition: naked_identifier: more_quoted data_type: - quoted_identifier: '"my schema"' - dot: . - quoted_identifier: '"custom udt"' - comma: ',' - column_definition: naked_identifier: quoted_udt data_type: naked_identifier: sch dot: . quoted_identifier: '[custom udt]' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Products - bracketed: - start_bracket: ( - column_definition: - naked_identifier: ProductID - data_type: data_type_identifier: int - column_constraint_segment: identity_grammar: keyword: IDENTITY bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '1' - end_bracket: ) - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: InventoryTs data_type: data_type_identifier: datetime2 bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '0' end_bracket: ) - comma: ',' - column_definition: naked_identifier: QtyAvailable data_type: data_type_identifier: smallint - comma: ',' - column_definition: naked_identifier: QtySold data_type: data_type_identifier: smallint - comma: ',' - column_definition: naked_identifier: UnitPrice data_type: data_type_identifier: money - comma: ',' - computed_column_definition: - naked_identifier: InventoryValue1 - keyword: AS - expression: - column_reference: naked_identifier: QtyAvailable - binary_operator: '*' - column_reference: naked_identifier: UnitPrice - keyword: PERSISTED - comma: ',' - computed_column_definition: - naked_identifier: InventoryValue2 - keyword: AS - expression: - column_reference: naked_identifier: QtyAvailable - binary_operator: '*' - column_reference: naked_identifier: UnitPrice - keyword: PERSISTED - keyword: NOT - keyword: 'NULL' - comma: ',' - computed_column_definition: naked_identifier: InventoryValue3 keyword: AS expression: - column_reference: naked_identifier: QtyAvailable - binary_operator: '*' - column_reference: naked_identifier: UnitPrice - comma: ',' - computed_column_definition: naked_identifier: InventoryValue4 keyword: AS expression: - column_reference: naked_identifier: QtyAvailable - binary_operator: '*' - column_reference: naked_identifier: UnitPrice column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - computed_column_definition: quoted_identifier: '[SoldValue]' keyword: AS bracketed: start_bracket: ( expression: - column_reference: naked_identifier: QtySold - binary_operator: '*' - column_reference: naked_identifier: UnitPrice end_bracket: ) - comma: ',' - computed_column_definition: naked_identifier: InventoyDate keyword: AS function: function_name: keyword: CAST function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: InventoryTs keyword: AS data_type: data_type_identifier: date end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[Foo]' - bracketed: - start_bracket: ( - column_definition: - quoted_identifier: '[ID]' - data_type: data_type_identifier: '[int]' - column_constraint_segment: identity_grammar: keyword: IDENTITY bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '1' - end_bracket: ) - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: table_constraint: - keyword: CONSTRAINT - object_reference: quoted_identifier: '[PK_Foo_ID]' - keyword: PRIMARY - keyword: KEY - keyword: CLUSTERED - bracketed_index_column_list_grammar: bracketed: start_bracket: ( index_column_definition: quoted_identifier: '[ID]' keyword: ASC end_bracket: ) - comma: ',' - column_definition: - quoted_identifier: '[other_ID]' - data_type: data_type_identifier: '[int]' - column_constraint_segment: - keyword: FOREIGN - keyword: KEY - column_constraint_segment: references_constraint_grammar: keyword: REFERENCES table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[Bar]' bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - column_constraint_segment: keyword: UNIQUE - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_table_as_select.sql000066400000000000000000000017431503426445100261330ustar00rootroot00000000000000--Azure Synapse Analytics specific CREATE TABLE [dbo].[PL_stage] WITH (DISTRIBUTION = HASH([ID]), HEAP) AS WITH CommentsTracking AS ( SELECT 'Program' AS Program ) SELECT e.[ID] ,e.[ArriveDate] ,e.[Contribution] ,e.[DischargeDate] ,e.[Encounter] ,e.[Facility] ,e.[Region] ,e.[LOS] FROM dbo.Encounter e JOIN dbo.Finance f ON e.[ID] = f.[ID] DROP TABLE [dbo].[PL_stage] CREATE TABLE [dbo].[PL_stage] WITH (DISTRIBUTION = HASH([ID]), HEAP) AS SELECT e.[ID] ,e.[ArriveDate] ,e.[Contribution] ,e.[DischargeDate] ,e.[Encounter] ,e.[Facility] ,e.[Region] ,e.[LOS] FROM dbo.Encounter e JOIN dbo.Finance f ON e.[ID] = f.[ID]; DROP TABLE [dbo].[PL_stage]; CREATE TABLE [dbo].[PL_stage] WITH (DISTRIBUTION = HASH([ID]), HEAP) AS ( SELECT e.[ID] ,e.[ArriveDate] ,e.[Contribution] ,e.[DischargeDate] ,e.[Encounter] ,e.[Facility] ,e.[Region] ,e.[LOS] FROM dbo.Encounter e JOIN dbo.Finance f ON e.[ID] = f.[ID] ) OPTION (LABEL = 'Test_Label') sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_table_as_select.yml000066400000000000000000000317451503426445100261420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d0d10ccb882786095bf5c20d3e192d9ea0ededc4490a9b2baf5f1f076ab947b1 file: batch: - statement: create_table_as_select_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[PL_stage]' - table_distribution_index_clause: keyword: WITH bracketed: start_bracket: ( table_distribution_clause: - keyword: DISTRIBUTION - comparison_operator: raw_comparison_operator: '=' - keyword: HASH - bracketed: start_bracket: ( column_reference: quoted_identifier: '[ID]' end_bracket: ) comma: ',' table_index_clause: keyword: HEAP end_bracket: ) - keyword: AS - with_compound_statement: keyword: WITH common_table_expression: naked_identifier: CommentsTracking keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'Program'" alias_expression: alias_operator: keyword: AS naked_identifier: Program end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[ID]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[ArriveDate]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[Contribution]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[DischargeDate]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[Encounter]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[Facility]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[Region]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[LOS]' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Encounter alias_expression: naked_identifier: e join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Finance alias_expression: naked_identifier: f join_on_condition: keyword: 'ON' expression: - column_reference: naked_identifier: e dot: . quoted_identifier: '[ID]' - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: f dot: . quoted_identifier: '[ID]' - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[PL_stage]' - statement: create_table_as_select_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[PL_stage]' - table_distribution_index_clause: keyword: WITH bracketed: start_bracket: ( table_distribution_clause: - keyword: DISTRIBUTION - comparison_operator: raw_comparison_operator: '=' - keyword: HASH - bracketed: start_bracket: ( column_reference: quoted_identifier: '[ID]' end_bracket: ) comma: ',' table_index_clause: keyword: HEAP end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[ID]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[ArriveDate]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[Contribution]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[DischargeDate]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[Encounter]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[Facility]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[Region]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[LOS]' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Encounter alias_expression: naked_identifier: e join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Finance alias_expression: naked_identifier: f join_on_condition: keyword: 'ON' expression: - column_reference: naked_identifier: e dot: . quoted_identifier: '[ID]' - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: f dot: . quoted_identifier: '[ID]' statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[PL_stage]' - statement_terminator: ; - statement: create_table_as_select_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[PL_stage]' - table_distribution_index_clause: keyword: WITH bracketed: start_bracket: ( table_distribution_clause: - keyword: DISTRIBUTION - comparison_operator: raw_comparison_operator: '=' - keyword: HASH - bracketed: start_bracket: ( column_reference: quoted_identifier: '[ID]' end_bracket: ) comma: ',' table_index_clause: keyword: HEAP end_bracket: ) - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[ID]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[ArriveDate]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[Contribution]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[DischargeDate]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[Encounter]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[Facility]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[Region]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[LOS]' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Encounter alias_expression: naked_identifier: e join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Finance alias_expression: naked_identifier: f join_on_condition: keyword: 'ON' expression: - column_reference: naked_identifier: e dot: . quoted_identifier: '[ID]' - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: f dot: . quoted_identifier: '[ID]' end_bracket: ) - option_clause: keyword: OPTION bracketed: start_bracket: ( query_hint_segment: keyword: LABEL comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Test_Label'" end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_table_constraints.sql000066400000000000000000000024671503426445100265440ustar00rootroot00000000000000CREATE TABLE [dbo].[example]( [Column A] [int] IDENTITY, [Column B] [int] IDENTITY(1, 1) NOT NULL, [ColumnC] varchar(100) DEFAULT 'mydefault', [ColumnDecimal] DATE DEFAULT GETDATE(), [ColumnUser] char(30) DEFAULT CURRENT_USER, [col1] int default ((-1)) not null, [col1] int default (-1) not null, [col1] int default -1 not null, [col1] INT DEFAULT (NULL) NULL ) GO create table [schema1].[table1] ( [col1] INT , PRIMARY KEY CLUSTERED ([col1] ASC) ) GO create table [schema1].[table1] ( [col1] INT , CONSTRAINT [Pk_Id] PRIMARY KEY NONCLUSTERED ([col1] DESC) ) GO CREATE TABLE [dbo].[table1] ( [ColumnB] [varchar](100) FILESTREAM MASKED WITH (FUNCTION = 'my_func'), [ColumnC] varchar(100) NULL NOT FOR REPLICATION, [ColumnDecimal] decimal(10,3) GENERATED ALWAYS AS ROW START HIDDEN, [columnE] varchar(100) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = key_name, ENCRYPTION_TYPE = RANDOMIZED, ALGORITHM = 'AEAD_AES_256_CBC_HMAC_SHA_256' ), [column1] varchar (100) collate Latin1_General_BIN ) GO CREATE TABLE table_name ( id UNIQUEIDENTIFIER NOT NULL CONSTRAINT constraint_name REFERENCES referenced_table_name ON DELETE NO ACTION ); sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_table_constraints.yml000066400000000000000000000267431503426445100265510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 698fc3e052539d86352bbfca199a76a7809282b7a4834e12b7671cbfc94b00e7 file: - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[example]' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '[Column A]' data_type: data_type_identifier: '[int]' column_constraint_segment: identity_grammar: keyword: IDENTITY - comma: ',' - column_definition: - quoted_identifier: '[Column B]' - data_type: data_type_identifier: '[int]' - column_constraint_segment: identity_grammar: keyword: IDENTITY bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '1' - end_bracket: ) - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: quoted_identifier: '[ColumnC]' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) column_constraint_segment: keyword: DEFAULT quoted_literal: "'mydefault'" - comma: ',' - column_definition: quoted_identifier: '[ColumnDecimal]' data_type: data_type_identifier: DATE column_constraint_segment: keyword: DEFAULT function: function_name: function_name_identifier: GETDATE function_contents: bracketed: start_bracket: ( end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnUser]' data_type: data_type_identifier: char bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '30' end_bracket: ) column_constraint_segment: keyword: DEFAULT bare_function: CURRENT_USER - comma: ',' - column_definition: - quoted_identifier: '[col1]' - data_type: data_type_identifier: int - column_constraint_segment: keyword: default bracketed: start_bracket: ( bracketed: start_bracket: ( numeric_literal: sign_indicator: '-' numeric_literal: '1' end_bracket: ) end_bracket: ) - column_constraint_segment: - keyword: not - keyword: 'null' - comma: ',' - column_definition: - quoted_identifier: '[col1]' - data_type: data_type_identifier: int - column_constraint_segment: keyword: default bracketed: start_bracket: ( numeric_literal: sign_indicator: '-' numeric_literal: '1' end_bracket: ) - column_constraint_segment: - keyword: not - keyword: 'null' - comma: ',' - column_definition: - quoted_identifier: '[col1]' - data_type: data_type_identifier: int - column_constraint_segment: keyword: default numeric_literal: sign_indicator: '-' numeric_literal: '1' - column_constraint_segment: - keyword: not - keyword: 'null' - comma: ',' - column_definition: - quoted_identifier: '[col1]' - data_type: data_type_identifier: INT - column_constraint_segment: keyword: DEFAULT bracketed: start_bracket: ( null_literal: 'NULL' end_bracket: ) - column_constraint_segment: keyword: 'NULL' - end_bracket: ) - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: create - keyword: table - table_reference: - quoted_identifier: '[schema1]' - dot: . - quoted_identifier: '[table1]' - bracketed: start_bracket: ( column_definition: quoted_identifier: '[col1]' data_type: data_type_identifier: INT comma: ',' table_constraint: - keyword: PRIMARY - keyword: KEY - keyword: CLUSTERED - bracketed_index_column_list_grammar: bracketed: start_bracket: ( index_column_definition: quoted_identifier: '[col1]' keyword: ASC end_bracket: ) end_bracket: ) - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: create - keyword: table - table_reference: - quoted_identifier: '[schema1]' - dot: . - quoted_identifier: '[table1]' - bracketed: start_bracket: ( column_definition: quoted_identifier: '[col1]' data_type: data_type_identifier: INT comma: ',' table_constraint: - keyword: CONSTRAINT - object_reference: quoted_identifier: '[Pk_Id]' - keyword: PRIMARY - keyword: KEY - keyword: NONCLUSTERED - bracketed_index_column_list_grammar: bracketed: start_bracket: ( index_column_definition: quoted_identifier: '[col1]' keyword: DESC end_bracket: ) end_bracket: ) - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[table1]' - bracketed: - start_bracket: ( - column_definition: - quoted_identifier: '[ColumnB]' - data_type: data_type_identifier: '[varchar]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - column_constraint_segment: keyword: FILESTREAM - column_constraint_segment: - keyword: MASKED - keyword: WITH - bracketed: start_bracket: ( keyword: FUNCTION comparison_operator: raw_comparison_operator: '=' quoted_literal: "'my_func'" end_bracket: ) - comma: ',' - column_definition: - quoted_identifier: '[ColumnC]' - data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - column_constraint_segment: keyword: 'NULL' - column_constraint_segment: - keyword: NOT - keyword: FOR - keyword: REPLICATION - comma: ',' - column_definition: quoted_identifier: '[ColumnDecimal]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) column_constraint_segment: - keyword: GENERATED - keyword: ALWAYS - keyword: AS - keyword: ROW - keyword: START - keyword: HIDDEN - comma: ',' - column_definition: quoted_identifier: '[columnE]' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) column_constraint_segment: encrypted_with_grammar: - keyword: ENCRYPTED - keyword: WITH - bracketed: - start_bracket: ( - keyword: COLUMN_ENCRYPTION_KEY - comparison_operator: raw_comparison_operator: '=' - naked_identifier: key_name - comma: ',' - keyword: ENCRYPTION_TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: RANDOMIZED - comma: ',' - keyword: ALGORITHM - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'AEAD_AES_256_CBC_HMAC_SHA_256'" - end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[column1]' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) column_constraint_segment: keyword: collate collation_reference: naked_identifier: Latin1_General_BIN - end_bracket: ) - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: table_name - bracketed: start_bracket: ( column_definition: - naked_identifier: id - data_type: data_type_identifier: UNIQUEIDENTIFIER - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: CONSTRAINT object_reference: naked_identifier: constraint_name references_constraint_grammar: - keyword: REFERENCES - table_reference: naked_identifier: referenced_table_name - keyword: 'ON' - keyword: DELETE - keyword: 'NO' - keyword: ACTION end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_table_graph.sql000066400000000000000000000014521503426445100252670ustar00rootroot00000000000000-- Simple node table with a user defined attributes CREATE TABLE [dbo].[Person] ( ID INTEGER PRIMARY KEY, [name] VARCHAR(100) ) AS NODE; -- A simple edge table with a user defined attribute CREATE TABLE friends ( id INTEGER PRIMARY KEY, start_date DATE ) AS EDGE; -- Create a likes edge table, this table does not have any user defined attributes CREATE TABLE likes AS EDGE; -- Create friend edge table with CONSTRAINT, restricts for nodes and its direction CREATE TABLE dbo.FriendOf( CONSTRAINT cnt_Person_FriendOf_Person CONNECTION (dbo.Person TO dbo.Person) ) AS EDGE; -- Create friend edge table with CONSTRAINT, -- with ON DELETE CASCADE option CREATE TABLE dbo.FriendOf( CONSTRAINT cnt_Person_FriendOf_Person CONNECTION (dbo.Person TO dbo.Person) ON DELETE CASCADE ) AS EDGE; sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_table_graph.yml000066400000000000000000000103461503426445100252730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: eec36e50396bb61abe0713b0338d60f4c17688597f014d5ea215db882d52c97d file: batch: - statement: create_table_graph_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[Person]' - bracketed: - start_bracket: ( - column_definition: naked_identifier: ID data_type: data_type_identifier: INTEGER column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_definition: quoted_identifier: '[name]' data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - end_bracket: ) - keyword: AS - keyword: NODE - statement_terminator: ; - statement: create_table_graph_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: friends - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: INTEGER column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_definition: naked_identifier: start_date data_type: data_type_identifier: DATE - end_bracket: ) - keyword: AS - keyword: EDGE - statement_terminator: ; - statement: create_table_graph_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: likes - keyword: AS - keyword: EDGE - statement_terminator: ; - statement: create_table_graph_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: FriendOf - bracketed: start_bracket: ( graph_table_constraint: keyword: CONSTRAINT object_reference: naked_identifier: cnt_Person_FriendOf_Person connection_constraint_grammar: keyword: CONNECTION bracketed: - start_bracket: ( - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Person - keyword: TO - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Person - end_bracket: ) end_bracket: ) - keyword: AS - keyword: EDGE - statement_terminator: ; - statement: create_table_graph_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: FriendOf - bracketed: start_bracket: ( graph_table_constraint: keyword: CONSTRAINT object_reference: naked_identifier: cnt_Person_FriendOf_Person connection_constraint_grammar: - keyword: CONNECTION - bracketed: - start_bracket: ( - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Person - keyword: TO - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Person - end_bracket: ) - keyword: 'ON' - keyword: DELETE - keyword: CASCADE end_bracket: ) - keyword: AS - keyword: EDGE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_table_on_filegroup.sql000066400000000000000000000002131503426445100266500ustar00rootroot00000000000000CREATE TABLE [dbo].[EC DC] ( [Column B] [varchar](100), [ColumnC] varchar(100), [ColumnDecimal] decimal(10,3) ) ON MyFileGroup sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_table_on_filegroup.yml000066400000000000000000000037611503426445100266650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e98064d324c4b081392469410dbfab5dddbc12b258a107db6cd00d212abb8f2e file: batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '[Column B]' data_type: data_type_identifier: '[varchar]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnC]' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnDecimal]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - end_bracket: ) - on_partition_or_filegroup_statement: filegroup_clause: keyword: 'ON' filegroup_name: naked_identifier: MyFileGroup sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_table_with_distribution.sql000066400000000000000000000030241503426445100277350ustar00rootroot00000000000000--Azure Synapse Analytics specific CREATE TABLE [dbo].[EC DC] ( [Column B] [varchar](100), [ColumnC] varchar(100), [ColumnDecimal] decimal(10,3) ) WITH (CLUSTERED COLUMNSTORE INDEX, DISTRIBUTION = ROUND_ROBIN); GO DROP TABLE [dbo].[EC DC] GO CREATE TABLE [dbo].[EC DC] ( [Column B] [varchar](100), [ColumnC] varchar(100), [ColumnDecimal] decimal(10,3) ) WITH (HEAP, DISTRIBUTION = REPLICATE); GO DROP TABLE [dbo].[EC DC] GO CREATE TABLE [dbo].[EC DC] ( [Column B] [varchar](100), [ColumnC] varchar(100), [ColumnDecimal] decimal(10,3) ) WITH (LOCATION = USER_DB, DISTRIBUTION = HASH([Column B])); GO DROP TABLE [dbo].[EC DC] GO CREATE TABLE [dbo].[EC DC] ( [Column B] [varchar](100), [ColumnC] varchar(100), [ColumnDecimal] decimal(10,3) ) WITH (CLUSTERED COLUMNSTORE INDEX, LOCATION = USER_DB, DISTRIBUTION = HASH([Column B])); GO DROP TABLE [dbo].[EC DC] GO CREATE TABLE [dbo].[EC DC] ( [Column B] [varchar](100), [ColumnC] varchar(100), [ColumnDecimal] decimal(10,3) ) WITH (CLUSTERED INDEX ([Column B]), DISTRIBUTION = HASH([Column B])); GO DROP TABLE [dbo].[EC DC]; GO CREATE TABLE [dbo].[EC DC] ( [Column B] [varchar](100), [ColumnC] varchar(100), [ColumnDecimal] decimal(10,3) ) WITH (CLUSTERED COLUMNSTORE INDEX ORDER ([Column B]), DISTRIBUTION = HASH([Column B])); GO DROP TABLE [dbo].[EC DC]; GO CREATE TABLE [dbo].[table] ( [name] [varchar](100) NOT NULL, [month_num] [int] NULL ) WITH ( DISTRIBUTION = REPLICATE, CLUSTERED INDEX ( [name] ASC, [month_num] ASC ) ) GO sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_table_with_distribution.yml000066400000000000000000000411451503426445100277450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2b5704dab85931563d9a29f1d454c0ee1fbb3a2278481306089d4be3505ddc96 file: - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '[Column B]' data_type: data_type_identifier: '[varchar]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnC]' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnDecimal]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - end_bracket: ) - table_distribution_index_clause: keyword: WITH bracketed: start_bracket: ( table_index_clause: - keyword: CLUSTERED - keyword: COLUMNSTORE - keyword: INDEX comma: ',' table_distribution_clause: - keyword: DISTRIBUTION - comparison_operator: raw_comparison_operator: '=' - keyword: ROUND_ROBIN end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '[Column B]' data_type: data_type_identifier: '[varchar]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnC]' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnDecimal]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - end_bracket: ) - table_distribution_index_clause: keyword: WITH bracketed: start_bracket: ( table_index_clause: keyword: HEAP comma: ',' table_distribution_clause: - keyword: DISTRIBUTION - comparison_operator: raw_comparison_operator: '=' - keyword: REPLICATE end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '[Column B]' data_type: data_type_identifier: '[varchar]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnC]' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnDecimal]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - end_bracket: ) - table_distribution_index_clause: keyword: WITH bracketed: start_bracket: ( table_location_clause: - keyword: LOCATION - comparison_operator: raw_comparison_operator: '=' - keyword: USER_DB comma: ',' table_distribution_clause: - keyword: DISTRIBUTION - comparison_operator: raw_comparison_operator: '=' - keyword: HASH - bracketed: start_bracket: ( column_reference: quoted_identifier: '[Column B]' end_bracket: ) end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '[Column B]' data_type: data_type_identifier: '[varchar]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnC]' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnDecimal]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - end_bracket: ) - table_distribution_index_clause: keyword: WITH bracketed: - start_bracket: ( - table_index_clause: - keyword: CLUSTERED - keyword: COLUMNSTORE - keyword: INDEX - comma: ',' - table_location_clause: - keyword: LOCATION - comparison_operator: raw_comparison_operator: '=' - keyword: USER_DB - comma: ',' - table_distribution_clause: - keyword: DISTRIBUTION - comparison_operator: raw_comparison_operator: '=' - keyword: HASH - bracketed: start_bracket: ( column_reference: quoted_identifier: '[Column B]' end_bracket: ) - end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '[Column B]' data_type: data_type_identifier: '[varchar]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnC]' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnDecimal]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - end_bracket: ) - table_distribution_index_clause: keyword: WITH bracketed: start_bracket: ( table_index_clause: - keyword: CLUSTERED - keyword: INDEX - bracketed: start_bracket: ( column_reference: quoted_identifier: '[Column B]' end_bracket: ) comma: ',' table_distribution_clause: - keyword: DISTRIBUTION - comparison_operator: raw_comparison_operator: '=' - keyword: HASH - bracketed: start_bracket: ( column_reference: quoted_identifier: '[Column B]' end_bracket: ) end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '[Column B]' data_type: data_type_identifier: '[varchar]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnC]' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnDecimal]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - end_bracket: ) - table_distribution_index_clause: keyword: WITH bracketed: start_bracket: ( table_index_clause: - keyword: CLUSTERED - keyword: COLUMNSTORE - keyword: INDEX - keyword: ORDER - bracketed: start_bracket: ( column_reference: quoted_identifier: '[Column B]' end_bracket: ) comma: ',' table_distribution_clause: - keyword: DISTRIBUTION - comparison_operator: raw_comparison_operator: '=' - keyword: HASH - bracketed: start_bracket: ( column_reference: quoted_identifier: '[Column B]' end_bracket: ) end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[table]' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '[name]' data_type: data_type_identifier: '[varchar]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: quoted_identifier: '[month_num]' data_type: data_type_identifier: '[int]' column_constraint_segment: keyword: 'NULL' - end_bracket: ) - table_distribution_index_clause: keyword: WITH bracketed: start_bracket: ( table_distribution_clause: - keyword: DISTRIBUTION - comparison_operator: raw_comparison_operator: '=' - keyword: REPLICATE comma: ',' table_index_clause: - keyword: CLUSTERED - keyword: INDEX - bracketed: - start_bracket: ( - column_reference: quoted_identifier: '[name]' - keyword: ASC - comma: ',' - column_reference: quoted_identifier: '[month_num]' - keyword: ASC - end_bracket: ) end_bracket: ) - go_statement: keyword: GO sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_table_with_sequence.sql000066400000000000000000000004101503426445100270220ustar00rootroot00000000000000CREATE TABLE DEST.MELDER( [ID] INT PRIMARY KEY NOT NULL DEFAULT NEXT VALUE FOR [dbo].[SEQ_MELDER] ,[DOWNLOADED_TOTAL] INT NULL ,[WARNED_DAILY] INT NULL ,[DATE_OF_REPORT] DATETIME NULL ,DATE_LAST_INSERTED DATETIME DEFAULT GETDATE() ); sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_table_with_sequence.yml000066400000000000000000000050031503426445100270270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 76d320dce1e41205e4befa162fe983feed2b22bc38f36a5a389097e7a12cb1d7 file: batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: DEST - dot: . - naked_identifier: MELDER - bracketed: - start_bracket: ( - column_definition: - quoted_identifier: '[ID]' - data_type: data_type_identifier: INT - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: DEFAULT sequence_next_value: - keyword: NEXT - keyword: VALUE - keyword: FOR - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[SEQ_MELDER]' - comma: ',' - column_definition: quoted_identifier: '[DOWNLOADED_TOTAL]' data_type: data_type_identifier: INT column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: quoted_identifier: '[WARNED_DAILY]' data_type: data_type_identifier: INT column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: quoted_identifier: '[DATE_OF_REPORT]' data_type: data_type_identifier: DATETIME column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: DATE_LAST_INSERTED data_type: data_type_identifier: DATETIME column_constraint_segment: keyword: DEFAULT function: function_name: function_name_identifier: GETDATE function_contents: bracketed: start_bracket: ( end_bracket: ) - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_table_with_sequence_bracketed.sql000066400000000000000000000045471503426445100310450ustar00rootroot00000000000000IF NOT EXISTS(SELECT * FROM sys.sequences WHERE object_id = OBJECT_ID(N'[dbo].[SEQ_SCHEMA_NAME_TABLE_NAME]') AND type = 'SO') CREATE SEQUENCE SEQ_SCHEMA_NAME_TABLE_NAME START WITH 1 INCREMENT BY 1; GO CREATE TABLE SCHEMA_NAME.TABLE_NAME( [ID] INT PRIMARY KEY NOT NULL DEFAULT (NEXT VALUE FOR [dbo].[SEQ_SCHEMA_NAME_TABLE_NAME]), [WEEK_UNIX] BIGINT, GMCODE VARCHAR(100), AVERAGE_RNA_FLOW_PER_100000 DECIMAL(16,2) NULL, NUMBER_OF_MEASUREMENTS INT NULL, NUMBER_OF_LOCATIONS INT NULL, TOTAL_LOCATIONS INT NULL, DATE_LAST_INSERTED DATETIME DEFAULT GETDATE() ); IF NOT EXISTS(SELECT * FROM sys.sequences WHERE object_id = OBJECT_ID(N'[dbo].[SEQ_STAGE_CBS_POPULATION_BASE]') AND type = 'SO') CREATE SEQUENCE SEQ_STAGE_CBS_POPULATION_BASE START WITH 1 INCREMENT BY 1; GO CREATE TABLE STAGE.CBS_POPULATION_BASE( [ID] INT PRIMARY KEY NONCLUSTERED NOT NULL DEFAULT (NEXT VALUE FOR [dbo].[SEQ_STAGE_CBS_POPULATION_BASE]), GEMEENTE_CODE VARCHAR(100) NULL, GEMEENTE VARCHAR(100) NULL, LEEFTIJD VARCHAR(100) NULL, GESLACHT VARCHAR(100) NULL, DATUM_PEILING VARCHAR(100) NULL, POPULATIE VARCHAR(100) NULL, VEILIGHEIDSREGIO_CODE VARCHAR(100) NULL, VEILIGHEIDSREGIO_NAAM VARCHAR(100) NULL, PROVINCIE_CODE VARCHAR(100) NULL, PROVINCIE_NAAM VARCHAR(100) NULL, GGD_CODE VARCHAR(100) NULL, GGD_NAAM VARCHAR(100) NULL, DATE_LAST_INSERTED DATETIME DEFAULT GETDATE() ); GO CREATE CLUSTERED INDEX CI_DLI_STAGE_CBS_POPULATION_BASE ON STAGE.CBS_POPULATION_BASE (DATE_LAST_INSERTED) GO CREATE NONCLUSTERED INDEX NCI_DLI_STAGE_CIMS_VACCINATED_AGE_GROUP ON STAGE.CBS_POPULATION_BASE (DATE_LAST_INSERTED, GEMEENTE_CODE, GEMEENTE, LEEFTIJD, GESLACHT, DATUM_PEILING, POPULATIE, VEILIGHEIDSREGIO_CODE, VEILIGHEIDSREGIO_NAAM, PROVINCIE_CODE, PROVINCIE_NAAM, GGD_CODE, GGD_NAAM); CREATE TABLE DEST.POSITIVE_TESTED_PEOPLE( [ID] INT PRIMARY KEY NOT NULL DEFAULT (NEXT VALUE FOR [dbo].[SEQ_DEST_POSITIVE_TESTED_PEOPLE]), DATE_OF_REPORT DATETIME NULL, DATE_OF_REPORT_UNIX BIGINT NULL, INFECTED_DAILY_INCREASE DECIMAL(16, 1) NULL, INFECTED_DAILY_TOTAL INT NULL, DATE_LAST_INSERTED DATETIME DEFAULT GETDATE(), [DATE_RANGE_START] datetime, [DATE_OF_REPORTS_LAG] datetime, [DATE_RANGE_START_LAG] datetime, [7D_AVERAGE_INFECTED_DAILY_INCREASE_TOTAL] decimal (16,2), [7D_AVERAGE_INFECTED_DAILY_INCREASE_LAG] decimal (16,2), [7D_AVERAGE_INFECTED_DAILY_INCREASE_ABSOLUTE] decimal (16,2) ); sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_table_with_sequence_bracketed.yml000066400000000000000000000541401503426445100310410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d2ba1222dc34facb043345ddade735eaea43fc02bd63840d53eb3c4cbb4571ae file: - batch: statement: if_then_statement: if_clause: keyword: IF expression: - keyword: NOT - keyword: EXISTS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sys - dot: . - naked_identifier: sequences where_clause: keyword: WHERE expression: - column_reference: naked_identifier: object_id - comparison_operator: raw_comparison_operator: '=' - function: function_name: function_name_identifier: OBJECT_ID function_contents: bracketed: start_bracket: ( expression: quoted_literal: "N'[dbo].[SEQ_SCHEMA_NAME_TABLE_NAME]'" end_bracket: ) - binary_operator: AND - column_reference: naked_identifier: type - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'SO'" end_bracket: ) statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: SEQ_SCHEMA_NAME_TABLE_NAME - create_sequence_options_segment: - keyword: START - keyword: WITH - numeric_literal: '1' - create_sequence_options_segment: - keyword: INCREMENT - keyword: BY - numeric_literal: '1' statement_terminator: ; - go_statement: keyword: GO - batch: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: SCHEMA_NAME - dot: . - naked_identifier: TABLE_NAME - bracketed: - start_bracket: ( - column_definition: - quoted_identifier: '[ID]' - data_type: data_type_identifier: INT - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: DEFAULT bracketed: start_bracket: ( sequence_next_value: - keyword: NEXT - keyword: VALUE - keyword: FOR - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[SEQ_SCHEMA_NAME_TABLE_NAME]' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[WEEK_UNIX]' data_type: data_type_identifier: BIGINT - comma: ',' - column_definition: naked_identifier: GMCODE data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: naked_identifier: AVERAGE_RNA_FLOW_PER_100000 data_type: data_type_identifier: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '16' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: NUMBER_OF_MEASUREMENTS data_type: data_type_identifier: INT column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: NUMBER_OF_LOCATIONS data_type: data_type_identifier: INT column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: TOTAL_LOCATIONS data_type: data_type_identifier: INT column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: DATE_LAST_INSERTED data_type: data_type_identifier: DATETIME column_constraint_segment: keyword: DEFAULT function: function_name: function_name_identifier: GETDATE function_contents: bracketed: start_bracket: ( end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: if_then_statement: if_clause: keyword: IF expression: - keyword: NOT - keyword: EXISTS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sys - dot: . - naked_identifier: sequences where_clause: keyword: WHERE expression: - column_reference: naked_identifier: object_id - comparison_operator: raw_comparison_operator: '=' - function: function_name: function_name_identifier: OBJECT_ID function_contents: bracketed: start_bracket: ( expression: quoted_literal: "N'[dbo].[SEQ_STAGE_CBS_POPULATION_BASE]'" end_bracket: ) - binary_operator: AND - column_reference: naked_identifier: type - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'SO'" end_bracket: ) statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: SEQ_STAGE_CBS_POPULATION_BASE - create_sequence_options_segment: - keyword: START - keyword: WITH - numeric_literal: '1' - create_sequence_options_segment: - keyword: INCREMENT - keyword: BY - numeric_literal: '1' statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: STAGE - dot: . - naked_identifier: CBS_POPULATION_BASE - bracketed: - start_bracket: ( - column_definition: - quoted_identifier: '[ID]' - data_type: data_type_identifier: INT - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - keyword: NONCLUSTERED - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: DEFAULT bracketed: start_bracket: ( sequence_next_value: - keyword: NEXT - keyword: VALUE - keyword: FOR - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[SEQ_STAGE_CBS_POPULATION_BASE]' end_bracket: ) - comma: ',' - column_definition: naked_identifier: GEMEENTE_CODE data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: GEMEENTE data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: LEEFTIJD data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: GESLACHT data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: DATUM_PEILING data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: POPULATIE data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: VEILIGHEIDSREGIO_CODE data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: VEILIGHEIDSREGIO_NAAM data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: PROVINCIE_CODE data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: PROVINCIE_NAAM data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: GGD_CODE data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: GGD_NAAM data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: DATE_LAST_INSERTED data_type: data_type_identifier: DATETIME column_constraint_segment: keyword: DEFAULT function: function_name: function_name_identifier: GETDATE function_contents: bracketed: start_bracket: ( end_bracket: ) - end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_index_statement: - keyword: CREATE - keyword: CLUSTERED - keyword: INDEX - index_reference: naked_identifier: CI_DLI_STAGE_CBS_POPULATION_BASE - keyword: 'ON' - table_reference: - naked_identifier: STAGE - dot: . - naked_identifier: CBS_POPULATION_BASE - bracketed_index_column_list_grammar: bracketed: start_bracket: ( index_column_definition: naked_identifier: DATE_LAST_INSERTED end_bracket: ) - go_statement: keyword: GO - batch: - statement: create_index_statement: - keyword: CREATE - keyword: NONCLUSTERED - keyword: INDEX - index_reference: naked_identifier: NCI_DLI_STAGE_CIMS_VACCINATED_AGE_GROUP - keyword: 'ON' - table_reference: - naked_identifier: STAGE - dot: . - naked_identifier: CBS_POPULATION_BASE - bracketed_index_column_list_grammar: bracketed: - start_bracket: ( - index_column_definition: naked_identifier: DATE_LAST_INSERTED - comma: ',' - index_column_definition: naked_identifier: GEMEENTE_CODE - comma: ',' - index_column_definition: naked_identifier: GEMEENTE - comma: ',' - index_column_definition: naked_identifier: LEEFTIJD - comma: ',' - index_column_definition: naked_identifier: GESLACHT - comma: ',' - index_column_definition: naked_identifier: DATUM_PEILING - comma: ',' - index_column_definition: naked_identifier: POPULATIE - comma: ',' - index_column_definition: naked_identifier: VEILIGHEIDSREGIO_CODE - comma: ',' - index_column_definition: naked_identifier: VEILIGHEIDSREGIO_NAAM - comma: ',' - index_column_definition: naked_identifier: PROVINCIE_CODE - comma: ',' - index_column_definition: naked_identifier: PROVINCIE_NAAM - comma: ',' - index_column_definition: naked_identifier: GGD_CODE - comma: ',' - index_column_definition: naked_identifier: GGD_NAAM - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: DEST - dot: . - naked_identifier: POSITIVE_TESTED_PEOPLE - bracketed: - start_bracket: ( - column_definition: - quoted_identifier: '[ID]' - data_type: data_type_identifier: INT - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: DEFAULT bracketed: start_bracket: ( sequence_next_value: - keyword: NEXT - keyword: VALUE - keyword: FOR - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[SEQ_DEST_POSITIVE_TESTED_PEOPLE]' end_bracket: ) - comma: ',' - column_definition: naked_identifier: DATE_OF_REPORT data_type: data_type_identifier: DATETIME column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: DATE_OF_REPORT_UNIX data_type: data_type_identifier: BIGINT column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: INFECTED_DAILY_INCREASE data_type: data_type_identifier: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '16' - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: INFECTED_DAILY_TOTAL data_type: data_type_identifier: INT column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: DATE_LAST_INSERTED data_type: data_type_identifier: DATETIME column_constraint_segment: keyword: DEFAULT function: function_name: function_name_identifier: GETDATE function_contents: bracketed: start_bracket: ( end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[DATE_RANGE_START]' data_type: data_type_identifier: datetime - comma: ',' - column_definition: quoted_identifier: '[DATE_OF_REPORTS_LAG]' data_type: data_type_identifier: datetime - comma: ',' - column_definition: quoted_identifier: '[DATE_RANGE_START_LAG]' data_type: data_type_identifier: datetime - comma: ',' - column_definition: quoted_identifier: '[7D_AVERAGE_INFECTED_DAILY_INCREASE_TOTAL]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '16' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[7D_AVERAGE_INFECTED_DAILY_INCREASE_LAG]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '16' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[7D_AVERAGE_INFECTED_DAILY_INCREASE_ABSOLUTE]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '16' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_table_with_table_option_segment.sql000066400000000000000000000020541503426445100314210ustar00rootroot00000000000000CREATE TABLE [dbo].[TEST] ( [id] INT NOT NULL ) ON [PRIMARY] WITH ( SYSTEM_VERSIONING = ON ( HISTORY_TABLE = [dbo].[TEST_HISTORY], HISTORY_RETENTION_PERIOD = INFINITE ) ); CREATE TABLE [dbo].[TEST] ( [id] INT NOT NULL ) ON [PRIMARY] WITH ( SYSTEM_VERSIONING = ON ( HISTORY_TABLE = [dbo].[TEST_HISTORY], HISTORY_RETENTION_PERIOD = 1 DAYS, DATA_CONSISTENCY_CHECK = ON ) ); CREATE TABLE [dbo].[TEST] ( [id] INT NOT NULL ) ON [PRIMARY] WITH ( SYSTEM_VERSIONING = ON ( HISTORY_TABLE = [dbo].[TEST_HISTORY], HISTORY_RETENTION_PERIOD = 10 WEEKS ) ); CREATE TABLE [dbo].[TEST] ( [id] INT NOT NULL ) ON [PRIMARY] WITH ( SYSTEM_VERSIONING = ON ( HISTORY_TABLE = [dbo].[TEST_HISTORY], HISTORY_RETENTION_PERIOD = 4 MONTHS ) ); CREATE TABLE [dbo].[TEST] ( [id] INT NOT NULL ) ON [PRIMARY] WITH ( SYSTEM_VERSIONING = ON ( HISTORY_TABLE = [dbo].[TEST_HISTORY], HISTORY_RETENTION_PERIOD = 1 YEARS ) ); CREATE TABLE [dbo].[TEST] ( [id] INT NOT NULL ) ON [PRIMARY] WITH ( SYSTEM_VERSIONING = ON ( HISTORY_TABLE = [dbo].[TEST_HISTORY] ) ); sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_table_with_table_option_segment.yml000066400000000000000000000226421503426445100314300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a78aab88572e4946b8b3c63814c0754aa063e66d94dc1dffc3ea9dcdf7659559 file: batch: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST]' - bracketed: start_bracket: ( column_definition: quoted_identifier: '[id]' data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' end_bracket: ) - on_partition_or_filegroup_statement: filegroup_clause: keyword: 'ON' filegroup_name: quoted_identifier: '[PRIMARY]' - table_option_statement: keyword: WITH bracketed: - start_bracket: ( - keyword: SYSTEM_VERSIONING - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - bracketed: - start_bracket: ( - keyword: HISTORY_TABLE - comparison_operator: raw_comparison_operator: '=' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_HISTORY]' - comma: ',' - keyword: HISTORY_RETENTION_PERIOD - comparison_operator: raw_comparison_operator: '=' - keyword: INFINITE - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST]' - bracketed: start_bracket: ( column_definition: quoted_identifier: '[id]' data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' end_bracket: ) - on_partition_or_filegroup_statement: filegroup_clause: keyword: 'ON' filegroup_name: quoted_identifier: '[PRIMARY]' - table_option_statement: keyword: WITH bracketed: - start_bracket: ( - keyword: SYSTEM_VERSIONING - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - bracketed: - start_bracket: ( - keyword: HISTORY_TABLE - comparison_operator: raw_comparison_operator: '=' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_HISTORY]' - comma: ',' - keyword: HISTORY_RETENTION_PERIOD - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - keyword: DAYS - comma: ',' - keyword: DATA_CONSISTENCY_CHECK - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST]' - bracketed: start_bracket: ( column_definition: quoted_identifier: '[id]' data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' end_bracket: ) - on_partition_or_filegroup_statement: filegroup_clause: keyword: 'ON' filegroup_name: quoted_identifier: '[PRIMARY]' - table_option_statement: keyword: WITH bracketed: - start_bracket: ( - keyword: SYSTEM_VERSIONING - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - bracketed: - start_bracket: ( - keyword: HISTORY_TABLE - comparison_operator: raw_comparison_operator: '=' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_HISTORY]' - comma: ',' - keyword: HISTORY_RETENTION_PERIOD - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '10' - keyword: WEEKS - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST]' - bracketed: start_bracket: ( column_definition: quoted_identifier: '[id]' data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' end_bracket: ) - on_partition_or_filegroup_statement: filegroup_clause: keyword: 'ON' filegroup_name: quoted_identifier: '[PRIMARY]' - table_option_statement: keyword: WITH bracketed: - start_bracket: ( - keyword: SYSTEM_VERSIONING - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - bracketed: - start_bracket: ( - keyword: HISTORY_TABLE - comparison_operator: raw_comparison_operator: '=' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_HISTORY]' - comma: ',' - keyword: HISTORY_RETENTION_PERIOD - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '4' - keyword: MONTHS - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST]' - bracketed: start_bracket: ( column_definition: quoted_identifier: '[id]' data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' end_bracket: ) - on_partition_or_filegroup_statement: filegroup_clause: keyword: 'ON' filegroup_name: quoted_identifier: '[PRIMARY]' - table_option_statement: keyword: WITH bracketed: - start_bracket: ( - keyword: SYSTEM_VERSIONING - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - bracketed: - start_bracket: ( - keyword: HISTORY_TABLE - comparison_operator: raw_comparison_operator: '=' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_HISTORY]' - comma: ',' - keyword: HISTORY_RETENTION_PERIOD - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - keyword: YEARS - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST]' - bracketed: start_bracket: ( column_definition: quoted_identifier: '[id]' data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' end_bracket: ) - on_partition_or_filegroup_statement: filegroup_clause: keyword: 'ON' filegroup_name: quoted_identifier: '[PRIMARY]' - table_option_statement: keyword: WITH bracketed: - start_bracket: ( - keyword: SYSTEM_VERSIONING - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - bracketed: start_bracket: ( keyword: HISTORY_TABLE comparison_operator: raw_comparison_operator: '=' table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_HISTORY]' end_bracket: ) - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_table_with_trailing_comma.sql000066400000000000000000000000751503426445100302060ustar00rootroot00000000000000CREATE TABLE [dbo].[test] ( [Column B] [varchar](100), ) sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_table_with_trailing_comma.yml000066400000000000000000000020331503426445100302040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1741f41c3ad5d4387b08448fc575cf531f48835b024826f368d22069ecf693d9 file: batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[test]' - bracketed: start_bracket: ( column_definition: quoted_identifier: '[Column B]' data_type: data_type_identifier: '[varchar]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) comma: ',' end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_type.sql000066400000000000000000000002001503426445100237660ustar00rootroot00000000000000CREATE TYPE person AS TABLE ( name nvarchar(10), height int, favorite_color int ); CREATE TYPE weird_int FROM int; sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_type.yml000066400000000000000000000031001503426445100237720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1873d9ddf12a690dee729b24d8f6678ce9686f646e8321db57a10d32d349aadb file: batch: - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - object_reference: naked_identifier: person - keyword: AS - keyword: TABLE - bracketed: - start_bracket: ( - column_definition: naked_identifier: name data_type: data_type_identifier: nvarchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '10' end_bracket: ) - comma: ',' - column_definition: naked_identifier: height data_type: data_type_identifier: int - comma: ',' - column_definition: naked_identifier: favorite_color data_type: data_type_identifier: int - end_bracket: ) - statement_terminator: ; - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - object_reference: naked_identifier: weird_int - keyword: FROM - object_reference: naked_identifier: int - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_user.sql000066400000000000000000000014621503426445100237760ustar00rootroot00000000000000CREATE USER user_name FOR LOGIN login_name; CREATE USER user_name FROM LOGIN login_name; CREATE USER [TestUser]; CREATE USER Barry WITH PASSWORD = 'sdjklalie8rew8337!$d'; CREATE USER [TestUser] FROM EXTERNAL PROVIDER; CREATE USER RIGHTSHOLDER WITHOUT LOGIN; CREATE USER RIGHTSHOLDER WITHOUT LOGIN WITH DEFAULT_SCHEMA = Other; CREATE USER [Barry] WITH ALLOW_ENCRYPTED_VALUE_MODIFICATIONS = ON; CREATE USER Wanida FOR LOGIN WanidaBenshoof WITH DEFAULT_SCHEMA = Marketing; CREATE USER Wanida FOR LOGIN WanidaSmith WITH DEFAULT_SCHEMA = Tech, ALLOW_ENCRYPTED_VALUE_MODIFICATIONS = ON; CREATE USER JinghaoLiu FOR CERTIFICATE CarnationProduction50; CREATE USER JinghaoLiu FOR ASYMMETRIC KEY CarnationProduction; CREATE USER Barry WITH PASSWORD = 'sdjklalie8rew8337!$d', SID = 0x241C11948AEEB749B0D22646DB1A19F2; sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_user.yml000066400000000000000000000116141503426445100240000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: dec9cf9f72ec3b863a3155b45a150641d1f0dd342a7aece9c66dd6824d3d9c6b file: batch: - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: user_name - keyword: FOR - keyword: LOGIN - object_reference: naked_identifier: login_name - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: user_name - keyword: FROM - keyword: LOGIN - object_reference: naked_identifier: login_name - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: quoted_identifier: '[TestUser]' - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: Barry - keyword: WITH - keyword: PASSWORD - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'sdjklalie8rew8337!$d'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: quoted_identifier: '[TestUser]' - keyword: FROM - keyword: EXTERNAL - keyword: PROVIDER - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: RIGHTSHOLDER - keyword: WITHOUT - keyword: LOGIN - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: RIGHTSHOLDER - keyword: WITHOUT - keyword: LOGIN - keyword: WITH - keyword: DEFAULT_SCHEMA - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: Other - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: quoted_identifier: '[Barry]' - keyword: WITH - keyword: ALLOW_ENCRYPTED_VALUE_MODIFICATIONS - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: Wanida - keyword: FOR - keyword: LOGIN - object_reference: naked_identifier: WanidaBenshoof - keyword: WITH - keyword: DEFAULT_SCHEMA - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: Marketing - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: Wanida - keyword: FOR - keyword: LOGIN - object_reference: naked_identifier: WanidaSmith - keyword: WITH - keyword: DEFAULT_SCHEMA - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: Tech - comma: ',' - keyword: ALLOW_ENCRYPTED_VALUE_MODIFICATIONS - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: JinghaoLiu - keyword: FOR - keyword: CERTIFICATE - object_reference: naked_identifier: CarnationProduction50 - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: JinghaoLiu - keyword: FOR - keyword: ASYMMETRIC - keyword: KEY - object_reference: naked_identifier: CarnationProduction - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: Barry - keyword: WITH - keyword: PASSWORD - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'sdjklalie8rew8337!$d'" - comma: ',' - keyword: SID - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '0x241C11948AEEB749B0D22646DB1A19F2' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_view.sql000066400000000000000000000030401503426445100237640ustar00rootroot00000000000000CREATE OR ALTER VIEW Sales.SalesPersonPerform AS SELECT TOP (100) SalesPersonID, SUM(TotalDue) AS TotalSales FROM Sales.SalesOrderHeader WHERE OrderDate > CONVERT(DATETIME, '20001231', 101) GROUP BY SalesPersonID; CREATE OR ALTER VIEW Sales.SalesPersonPerform AS SELECT TOP (100) SalesPersonID, SUM(TotalDue) AS TotalSales FROM Sales.SalesOrderHeader WHERE OrderDate > CONVERT(DATETIME, '20001231', 101) GROUP BY SalesPersonID; CREATE VIEW Purchasing.PurchaseOrderReject WITH SCHEMABINDING AS SELECT PurchaseOrderID, ReceivedQty, RejectedQty, RejectedQty / ReceivedQty AS RejectRatio, DueDate FROM Purchasing.PurchaseOrderDetail WHERE RejectedQty / ReceivedQty > 0 AND DueDate > CONVERT(DATETIME,'20010630',101) ; CREATE VIEW dbo.SeattleOnly AS SELECT p.LastName, p.FirstName, e.JobTitle, a.City, sp.StateProvinceCode FROM HumanResources.Employee e INNER JOIN Person.Person p ON p.BusinessEntityID = e.BusinessEntityID INNER JOIN Person.BusinessEntityAddress bea ON bea.BusinessEntityID = e.BusinessEntityID INNER JOIN Person.Address a ON a.AddressID = bea.AddressID INNER JOIN Person.StateProvince sp ON sp.StateProvinceID = a.StateProvinceID WHERE a.City = 'Seattle' WITH CHECK OPTION ; CREATE VIEW dbo.all_supplier_view WITH SCHEMABINDING AS SELECT supplyID, supplier FROM dbo.SUPPLY1 UNION ALL SELECT supplyID, supplier FROM dbo.SUPPLY2 UNION ALL SELECT supplyID, supplier FROM dbo.SUPPLY3 UNION ALL SELECT supplyID, supplier FROM dbo.SUPPLY4; create view vw_view with schemabinding, view_metadata as select A.ID from A sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_view.yml000066400000000000000000000426361503426445100240040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 48977d26c7882b0c980d91d20a2bb803892e87ddf2bcd0b2e78dd5d73c69fee3 file: batch: - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: VIEW - object_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesPersonPerform - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_modifier: keyword: TOP bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - select_clause_element: column_reference: naked_identifier: SalesPersonID - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: TotalDue end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: TotalSales from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesOrderHeader where_clause: keyword: WHERE expression: column_reference: naked_identifier: OrderDate comparison_operator: raw_comparison_operator: '>' function: function_name: keyword: CONVERT function_contents: bracketed: - start_bracket: ( - data_type: data_type_identifier: DATETIME - comma: ',' - expression: quoted_literal: "'20001231'" - comma: ',' - numeric_literal: '101' - end_bracket: ) groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: SalesPersonID statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: VIEW - object_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesPersonPerform - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_modifier: keyword: TOP bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - select_clause_element: column_reference: naked_identifier: SalesPersonID - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: TotalDue end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: TotalSales from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesOrderHeader where_clause: keyword: WHERE expression: column_reference: naked_identifier: OrderDate comparison_operator: raw_comparison_operator: '>' function: function_name: keyword: CONVERT function_contents: bracketed: - start_bracket: ( - data_type: data_type_identifier: DATETIME - comma: ',' - expression: quoted_literal: "'20001231'" - comma: ',' - numeric_literal: '101' - end_bracket: ) groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: SalesPersonID statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - object_reference: - naked_identifier: Purchasing - dot: . - naked_identifier: PurchaseOrderReject - keyword: WITH - keyword: SCHEMABINDING - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: PurchaseOrderID - comma: ',' - select_clause_element: column_reference: naked_identifier: ReceivedQty - comma: ',' - select_clause_element: column_reference: naked_identifier: RejectedQty - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: RejectedQty - binary_operator: / - column_reference: naked_identifier: ReceivedQty alias_expression: alias_operator: keyword: AS naked_identifier: RejectRatio - comma: ',' - select_clause_element: column_reference: naked_identifier: DueDate from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Purchasing - dot: . - naked_identifier: PurchaseOrderDetail where_clause: keyword: WHERE expression: - column_reference: naked_identifier: RejectedQty - binary_operator: / - column_reference: naked_identifier: ReceivedQty - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '0' - binary_operator: AND - column_reference: naked_identifier: DueDate - comparison_operator: raw_comparison_operator: '>' - function: function_name: keyword: CONVERT function_contents: bracketed: - start_bracket: ( - data_type: data_type_identifier: DATETIME - comma: ',' - expression: quoted_literal: "'20010630'" - comma: ',' - numeric_literal: '101' - end_bracket: ) statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: SeattleOnly - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: p - dot: . - naked_identifier: LastName - comma: ',' - select_clause_element: column_reference: - naked_identifier: p - dot: . - naked_identifier: FirstName - comma: ',' - select_clause_element: column_reference: - naked_identifier: e - dot: . - naked_identifier: JobTitle - comma: ',' - select_clause_element: column_reference: - naked_identifier: a - dot: . - naked_identifier: City - comma: ',' - select_clause_element: column_reference: - naked_identifier: sp - dot: . - naked_identifier: StateProvinceCode from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: Employee alias_expression: naked_identifier: e - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: Person - dot: . - naked_identifier: Person alias_expression: naked_identifier: p - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: p - dot: . - naked_identifier: BusinessEntityID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: e - dot: . - naked_identifier: BusinessEntityID - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: Person - dot: . - naked_identifier: BusinessEntityAddress alias_expression: naked_identifier: bea - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: bea - dot: . - naked_identifier: BusinessEntityID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: e - dot: . - naked_identifier: BusinessEntityID - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: Person - dot: . - naked_identifier: Address alias_expression: naked_identifier: a - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: a - dot: . - naked_identifier: AddressID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: bea - dot: . - naked_identifier: AddressID - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: Person - dot: . - naked_identifier: StateProvince alias_expression: naked_identifier: sp - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: sp - dot: . - naked_identifier: StateProvinceID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: a - dot: . - naked_identifier: StateProvinceID where_clause: keyword: WHERE expression: column_reference: - naked_identifier: a - dot: . - naked_identifier: City comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Seattle'" - keyword: WITH - keyword: CHECK - keyword: OPTION - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: all_supplier_view - keyword: WITH - keyword: SCHEMABINDING - keyword: AS - set_expression: - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: supplyID - comma: ',' - select_clause_element: column_reference: naked_identifier: supplier from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: SUPPLY1 - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: supplyID - comma: ',' - select_clause_element: column_reference: naked_identifier: supplier from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: SUPPLY2 - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: supplyID - comma: ',' - select_clause_element: column_reference: naked_identifier: supplier from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: SUPPLY3 - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: supplyID - comma: ',' - select_clause_element: column_reference: naked_identifier: supplier from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: SUPPLY4 statement_terminator: ; - statement: create_view_statement: - keyword: create - keyword: view - object_reference: naked_identifier: vw_view - keyword: with - keyword: schemabinding - comma: ',' - keyword: view_metadata - keyword: as - select_statement: select_clause: keyword: select select_clause_element: column_reference: - naked_identifier: A - dot: . - naked_identifier: ID from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: A sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_view_with_columns.sql000066400000000000000000000001331503426445100265570ustar00rootroot00000000000000CREATE VIEW my_view ( col1, col2 ) AS SELECT col1, col2 FROM source_table; sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_view_with_columns.yml000066400000000000000000000025621503426445100265710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2db271fceda6ccd8c8d621d9b6d8774a19de4d4c85bd20d81494d816ed3249f4 file: batch: statement: create_view_statement: - keyword: CREATE - keyword: VIEW - object_reference: naked_identifier: my_view - bracketed: - start_bracket: ( - index_column_definition: naked_identifier: col1 - comma: ',' - index_column_definition: naked_identifier: col2 - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source_table statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_view_with_cte.sql000066400000000000000000000007011503426445100256530ustar00rootroot00000000000000CREATE VIEW vwCTE AS --Creates an infinite loop WITH cte (EmployeeID, ManagerID, Title) AS ( SELECT EmployeeID, ManagerID, Title FROM HumanResources.Employee WHERE ManagerID IS NOT NULL UNION ALL SELECT cte.EmployeeID, cte.ManagerID, cte.Title FROM cte JOIN HumanResources.Employee AS e ON cte.ManagerID = e.EmployeeID ) -- Notice the MAXRECURSION option is removed SELECT EmployeeID, ManagerID, Title FROM cte GO sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_view_with_cte.yml000066400000000000000000000126301503426445100256610ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 51599c477f3a608ab3b9d7d7bc3ba93834953c0a11ae4d82dbbc141f422c3793 file: batch: statement: create_view_statement: - keyword: CREATE - keyword: VIEW - object_reference: naked_identifier: vwCTE - keyword: AS - with_compound_statement: keyword: WITH common_table_expression: naked_identifier: cte cte_column_list: bracketed: start_bracket: ( identifier_list: - naked_identifier: EmployeeID - comma: ',' - naked_identifier: ManagerID - comma: ',' - naked_identifier: Title end_bracket: ) keyword: AS bracketed: start_bracket: ( set_expression: - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: EmployeeID - comma: ',' - select_clause_element: column_reference: naked_identifier: ManagerID - comma: ',' - select_clause_element: column_reference: naked_identifier: Title from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: Employee where_clause: keyword: WHERE expression: - column_reference: naked_identifier: ManagerID - keyword: IS - keyword: NOT - null_literal: 'NULL' - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: cte - dot: . - naked_identifier: EmployeeID - comma: ',' - select_clause_element: column_reference: - naked_identifier: cte - dot: . - naked_identifier: ManagerID - comma: ',' - select_clause_element: column_reference: - naked_identifier: cte - dot: . - naked_identifier: Title from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: cte join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: Employee alias_expression: alias_operator: keyword: AS naked_identifier: e join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: cte - dot: . - naked_identifier: ManagerID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: e - dot: . - naked_identifier: EmployeeID end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: EmployeeID - comma: ',' - select_clause_element: column_reference: naked_identifier: ManagerID - comma: ',' - select_clause_element: column_reference: naked_identifier: Title from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: cte go_statement: keyword: GO sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_view_with_pivot.sql000066400000000000000000000006071503426445100262460ustar00rootroot00000000000000CREATE OR ALTER VIEW DEST.V_HOSPITAL_ADMISSIONS_OVERTIME_BYAGEGROUP AS -- Pivot table with one row and five columns SELECT 'AverageCost' AS Cost_Sorted_By_Production_Days, [0], [1], [2], [3], [4] FROM ( SELECT DaysToManufacture, StandardCost FROM Production.Product ) AS SourceTable PIVOT ( AVG(StandardCost) FOR DaysToManufacture IN ([0], [1], [2], [3], [4]) ) AS PivotTable; sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_view_with_pivot.yml000066400000000000000000000106151503426445100262500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f26c258d9b159c8497649ba7c4917b7290beacdd3bc036c48779b3faa80d992c file: batch: statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: VIEW - object_reference: - naked_identifier: DEST - dot: . - naked_identifier: V_HOSPITAL_ADMISSIONS_OVERTIME_BYAGEGROUP - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: quoted_literal: "'AverageCost'" alias_expression: alias_operator: keyword: AS naked_identifier: Cost_Sorted_By_Production_Days - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[0]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[1]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[2]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[3]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[4]' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: DaysToManufacture - comma: ',' - select_clause_element: column_reference: naked_identifier: StandardCost from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Production - dot: . - naked_identifier: Product end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: SourceTable from_pivot_expression: - keyword: PIVOT - bracketed: - start_bracket: ( - function: function_name: function_name_identifier: AVG function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: StandardCost end_bracket: ) - keyword: FOR - column_reference: naked_identifier: DaysToManufacture - keyword: IN - bracketed: - start_bracket: ( - pivot_column_reference: quoted_identifier: '[0]' - comma: ',' - pivot_column_reference: quoted_identifier: '[1]' - comma: ',' - pivot_column_reference: quoted_identifier: '[2]' - comma: ',' - pivot_column_reference: quoted_identifier: '[3]' - comma: ',' - pivot_column_reference: quoted_identifier: '[4]' - end_bracket: ) - end_bracket: ) - keyword: AS - table_reference: naked_identifier: PivotTable statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_view_with_set_statements.sql000066400000000000000000000015211503426445100301430ustar00rootroot00000000000000SET ANSI_NULLS ON GO SET QUOTED_IDENTIFIER ON GO CREATE VIEW [DEST].[V_DIFFERENCE_NURSING_HOME__INFECTED_LOCATIONS_TOTAL_PER_REGION] AS SELECT TOP 1 DATE_OF_REPORT FROM BASE_CTE; GO CREATE OR ALTER VIEW [DEST].[V_DIFFERENCE_NURSING_HOME__INFECTED_LOCATIONS_TOTAL_PER_REGION] AS SELECT TOP 1 DATE_OF_REPORT FROM BASE_CTE ORDER BY DATE_OF_REPORT; GO ALTER VIEW [DEST].[V_DIFFERENCE_NURSING_HOME__INFECTED_LOCATIONS_TOTAL_PER_REGION] AS SELECT DATE_OF_REPORT ,NEW_DATE_OF_REPORT_UNIX AS NEW_DATE_UNIX ,OLD_DATE_OF_REPORT_UNIX AS OLD_DATE_UNIX ,T1.VRCODE ,CASE WHEN OLD_VALUE IS NULL THEN 0 ELSE OLD_VALUE END AS OLD_VALUE ,CASE WHEN [DIFFERENCE] IS NULL THEN 0 ELSE [DIFFERENCE] END AS [DIFFERENCE] FROM BASE_CTE T1 LEFT JOIN LAST_DATE_OF_REPORT T2 ON T1.[VRCODE] = T2.[VRCODE] WHERE DATE_OF_REPORT = LAST_DATE_OF_REPORT; GO sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_view_with_set_statements.yml000066400000000000000000000162201503426445100301470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b6b6764cc3073636b3fd1cc45be9aae79045f732e7cf74c2d0590c663ece8748 file: - batch: statement: set_segment: - keyword: SET - keyword: ANSI_NULLS - keyword: 'ON' - go_statement: keyword: GO - batch: statement: set_segment: - keyword: SET - keyword: QUOTED_IDENTIFIER - keyword: 'ON' - go_statement: keyword: GO - batch: statement: create_view_statement: - keyword: CREATE - keyword: VIEW - object_reference: - quoted_identifier: '[DEST]' - dot: . - quoted_identifier: '[V_DIFFERENCE_NURSING_HOME__INFECTED_LOCATIONS_TOTAL_PER_REGION]' - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_modifier: keyword: TOP expression: numeric_literal: '1' select_clause_element: column_reference: naked_identifier: DATE_OF_REPORT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: BASE_CTE statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: VIEW - object_reference: - quoted_identifier: '[DEST]' - dot: . - quoted_identifier: '[V_DIFFERENCE_NURSING_HOME__INFECTED_LOCATIONS_TOTAL_PER_REGION]' - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_modifier: keyword: TOP expression: numeric_literal: '1' select_clause_element: column_reference: naked_identifier: DATE_OF_REPORT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: BASE_CTE orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: DATE_OF_REPORT statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_view_statement: - keyword: ALTER - keyword: VIEW - object_reference: - quoted_identifier: '[DEST]' - dot: . - quoted_identifier: '[V_DIFFERENCE_NURSING_HOME__INFECTED_LOCATIONS_TOTAL_PER_REGION]' - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: DATE_OF_REPORT - comma: ',' - select_clause_element: column_reference: naked_identifier: NEW_DATE_OF_REPORT_UNIX alias_expression: alias_operator: keyword: AS naked_identifier: NEW_DATE_UNIX - comma: ',' - select_clause_element: column_reference: naked_identifier: OLD_DATE_OF_REPORT_UNIX alias_expression: alias_operator: keyword: AS naked_identifier: OLD_DATE_UNIX - comma: ',' - select_clause_element: column_reference: - naked_identifier: T1 - dot: . - naked_identifier: VRCODE - comma: ',' - select_clause_element: expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: OLD_VALUE keyword: IS null_literal: 'NULL' - keyword: THEN - expression: numeric_literal: '0' - else_clause: keyword: ELSE expression: column_reference: naked_identifier: OLD_VALUE - keyword: END alias_expression: alias_operator: keyword: AS naked_identifier: OLD_VALUE - comma: ',' - select_clause_element: expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: column_reference: quoted_identifier: '[DIFFERENCE]' keyword: IS null_literal: 'NULL' - keyword: THEN - expression: numeric_literal: '0' - else_clause: keyword: ELSE expression: column_reference: quoted_identifier: '[DIFFERENCE]' - keyword: END alias_expression: alias_operator: keyword: AS quoted_identifier: '[DIFFERENCE]' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: BASE_CTE alias_expression: naked_identifier: T1 join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: LAST_DATE_OF_REPORT alias_expression: naked_identifier: T2 - join_on_condition: keyword: 'ON' expression: - column_reference: naked_identifier: T1 dot: . quoted_identifier: '[VRCODE]' - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: T2 dot: . quoted_identifier: '[VRCODE]' where_clause: keyword: WHERE expression: - column_reference: naked_identifier: DATE_OF_REPORT - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: LAST_DATE_OF_REPORT statement_terminator: ; - go_statement: keyword: GO sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_view_with_unpivot.sql000066400000000000000000000003631503426445100266100ustar00rootroot00000000000000CREATE VIEW UnpivotView AS -- Unpivot the table. SELECT VendorID, Employee, Orders FROM (SELECT VendorID, Emp1, Emp2, Emp3, Emp4, Emp5 FROM pvt) p UNPIVOT (Orders FOR Employee IN (Emp1, Emp2, Emp3, Emp4, Emp5) ) AS unpvt; sqlfluff-3.4.2/test/fixtures/dialects/tsql/create_view_with_unpivot.yml000066400000000000000000000076551503426445100266250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3ff9accf3326e5c3ff779931121cfa95f74c8d8e9ad0ffd8ecc256fc44f14798 file: batch: statement: create_view_statement: - keyword: CREATE - keyword: VIEW - object_reference: naked_identifier: UnpivotView - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: VendorID - comma: ',' - select_clause_element: column_reference: naked_identifier: Employee - comma: ',' - select_clause_element: column_reference: naked_identifier: Orders from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: VendorID - comma: ',' - select_clause_element: column_reference: naked_identifier: Emp1 - comma: ',' - select_clause_element: column_reference: naked_identifier: Emp2 - comma: ',' - select_clause_element: column_reference: naked_identifier: Emp3 - comma: ',' - select_clause_element: column_reference: naked_identifier: Emp4 - comma: ',' - select_clause_element: column_reference: naked_identifier: Emp5 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: pvt end_bracket: ) alias_expression: naked_identifier: p from_pivot_expression: - keyword: UNPIVOT - bracketed: - start_bracket: ( - column_reference: naked_identifier: Orders - keyword: FOR - column_reference: naked_identifier: Employee - keyword: IN - bracketed: - start_bracket: ( - pivot_column_reference: naked_identifier: Emp1 - comma: ',' - pivot_column_reference: naked_identifier: Emp2 - comma: ',' - pivot_column_reference: naked_identifier: Emp3 - comma: ',' - pivot_column_reference: naked_identifier: Emp4 - comma: ',' - pivot_column_reference: naked_identifier: Emp5 - end_bracket: ) - end_bracket: ) - keyword: AS - table_reference: naked_identifier: unpvt statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/cte_s.sql000066400000000000000000000004011503426445100225620ustar00rootroot00000000000000WITH Sales_CTE (SalesPersonID, NumberOfOrders) AS ( SELECT SalesPersonID, COUNT(*) FROM Sales.SalesOrderHeader WHERE SalesPersonID IS NOT NULL GROUP BY SalesPersonID ) SELECT AVG(NumberOfOrders) AS "Average Sales Per Person" FROM Sales_CTE; sqlfluff-3.4.2/test/fixtures/dialects/tsql/cte_s.yml000066400000000000000000000061771503426445100226040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4967c7c34e0466738ec70c0e908a88adea4bd6c8eec1bb942bc30d239e095115 file: batch: statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: Sales_CTE cte_column_list: bracketed: start_bracket: ( identifier_list: - naked_identifier: SalesPersonID - comma: ',' - naked_identifier: NumberOfOrders end_bracket: ) keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: SalesPersonID - comma: ',' - select_clause_element: function: function_name: function_name_identifier: COUNT function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesOrderHeader where_clause: keyword: WHERE expression: - column_reference: naked_identifier: SalesPersonID - keyword: IS - keyword: NOT - null_literal: 'NULL' groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: SalesPersonID end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: AVG function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: NumberOfOrders end_bracket: ) alias_expression: alias_operator: keyword: AS quoted_identifier: '"Average Sales Per Person"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Sales_CTE statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/cursor.sql000066400000000000000000000004151503426445100230070ustar00rootroot00000000000000DECLARE pointy CURSOR LOCAL FORWARD_ONLY READ_ONLY FOR SELECT column_a, column_b FROM some_table WHERE column_a IS NOT NULL ORDER BY column_b OPEN pointy; FETCH FIRST FROM @pointy into @result; FETCH NEXT FROM GLOBAL pointy; CLOSE GLOBAL pointy; DEALLOCATE pointy; sqlfluff-3.4.2/test/fixtures/dialects/tsql/cursor.yml000066400000000000000000000045441503426445100230200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fa41197cd43f3e5dafdf93790658245f09fe3af641711997f525e23cf60b8b00 file: batch: - statement: declare_segment: - keyword: DECLARE - naked_identifier: pointy - keyword: CURSOR - keyword: LOCAL - keyword: FORWARD_ONLY - keyword: READ_ONLY - keyword: FOR - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: column_a - comma: ',' - select_clause_element: column_reference: naked_identifier: column_b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: some_table where_clause: keyword: WHERE expression: - column_reference: naked_identifier: column_a - keyword: IS - keyword: NOT - null_literal: 'NULL' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: column_b - statement: open_cursor_statement: keyword: OPEN naked_identifier: pointy - statement_terminator: ; - statement: fetch_cursor_statement: - keyword: FETCH - keyword: FIRST - keyword: FROM - parameter: '@pointy' - keyword: into - parameter: '@result' - statement_terminator: ; - statement: fetch_cursor_statement: - keyword: FETCH - keyword: NEXT - keyword: FROM - keyword: GLOBAL - naked_identifier: pointy - statement_terminator: ; - statement: close_cursor_statement: - keyword: CLOSE - keyword: GLOBAL - naked_identifier: pointy - statement_terminator: ; - statement: deallocate_cursor_statement: keyword: DEALLOCATE naked_identifier: pointy - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/date_functions.sql000066400000000000000000000006041503426445100244770ustar00rootroot00000000000000select convert( date, dateadd( month, datediff( month, 0, t.valid_from_date ), 0 ) ) as valid_from_date from t as t where t.activity_month >= convert( date, dateadd( yy, datediff(yy, 0, getdate() ) - 1, 0) ) sqlfluff-3.4.2/test/fixtures/dialects/tsql/date_functions.yml000066400000000000000000000116741503426445100245120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6ccdd4fc819582a748c5513e359b50dc00c38f2478fb31ebf838e4b491e1c8d1 file: batch: statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: keyword: convert function_contents: bracketed: start_bracket: ( data_type: data_type_identifier: date comma: ',' expression: function: function_name: function_name_identifier: dateadd function_contents: bracketed: - start_bracket: ( - date_part: month - comma: ',' - expression: function: function_name: function_name_identifier: datediff function_contents: bracketed: - start_bracket: ( - date_part: month - comma: ',' - expression: numeric_literal: '0' - comma: ',' - expression: column_reference: - naked_identifier: t - dot: . - naked_identifier: valid_from_date - end_bracket: ) - comma: ',' - expression: numeric_literal: '0' - end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: valid_from_date from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t alias_expression: alias_operator: keyword: as naked_identifier: t where_clause: keyword: where expression: column_reference: - naked_identifier: t - dot: . - naked_identifier: activity_month comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' function: function_name: keyword: convert function_contents: bracketed: start_bracket: ( data_type: data_type_identifier: date comma: ',' expression: function: function_name: function_name_identifier: dateadd function_contents: bracketed: - start_bracket: ( - date_part: yy - comma: ',' - expression: function: function_name: function_name_identifier: datediff function_contents: bracketed: - start_bracket: ( - date_part: yy - comma: ',' - expression: numeric_literal: '0' - comma: ',' - expression: function: function_name: function_name_identifier: getdate function_contents: bracketed: start_bracket: ( end_bracket: ) - end_bracket: ) binary_operator: '-' numeric_literal: '1' - comma: ',' - expression: numeric_literal: '0' - end_bracket: ) end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/tsql/datepart.sql000066400000000000000000000000771503426445100233020ustar00rootroot00000000000000SELECT DATEPART(DW, my_table.date) AS dayofweek FROM my_table; sqlfluff-3.4.2/test/fixtures/dialects/tsql/datepart.yml000066400000000000000000000025551503426445100233070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2bccc1cabb2a21e0201760dbd80ab5246b79711c1109beda2cf372ad31d7fd6d file: batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: DATEPART function_contents: bracketed: start_bracket: ( date_part: DW comma: ',' expression: column_reference: - naked_identifier: my_table - dot: . - naked_identifier: date end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: dayofweek from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/datetrunc.sql000066400000000000000000000002511503426445100234610ustar00rootroot00000000000000SELECT DATETRUNC(YEAR, my_table.date) AS [beginningOfYear] , DATETRUNC(MONTH, my_table.date) AS [FirstOfMonth] , DATETRUNC(DAY, my_table.date) AS [Today] FROM my_table; sqlfluff-3.4.2/test/fixtures/dialects/tsql/datetrunc.yml000066400000000000000000000052761503426445100234770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8fb3da5a31d076c927319bd6c956671f96f982e41ea90e18e98312eb6eeaa63c file: batch: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: DATETRUNC function_contents: bracketed: start_bracket: ( date_part: YEAR comma: ',' expression: column_reference: - naked_identifier: my_table - dot: . - naked_identifier: date end_bracket: ) alias_expression: alias_operator: keyword: AS quoted_identifier: '[beginningOfYear]' - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATETRUNC function_contents: bracketed: start_bracket: ( date_part: MONTH comma: ',' expression: column_reference: - naked_identifier: my_table - dot: . - naked_identifier: date end_bracket: ) alias_expression: alias_operator: keyword: AS quoted_identifier: '[FirstOfMonth]' - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATETRUNC function_contents: bracketed: start_bracket: ( date_part: DAY comma: ',' expression: column_reference: - naked_identifier: my_table - dot: . - naked_identifier: date end_bracket: ) alias_expression: alias_operator: keyword: AS quoted_identifier: '[Today]' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/declare_table_type.sql000066400000000000000000000002351503426445100253010ustar00rootroot00000000000000DECLARE @MyTableVar TABLE( EmpID INT NOT NULL, OldVacationHours INT, NewVacationHours INT, ModifiedDate DATETIME, PRIMARY KEY (EmpID) ); sqlfluff-3.4.2/test/fixtures/dialects/tsql/declare_table_type.yml000066400000000000000000000031661503426445100253110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 82f903b527bab32939d1faabb97886364d5cc3ad173215ad572f30e922c54460 file: batch: statement: declare_segment: - keyword: DECLARE - parameter: '@MyTableVar' - keyword: TABLE - bracketed: - start_bracket: ( - column_definition: naked_identifier: EmpID data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: OldVacationHours data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: NewVacationHours data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: ModifiedDate data_type: data_type_identifier: DATETIME - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed_index_column_list_grammar: bracketed: start_bracket: ( index_column_definition: naked_identifier: EmpID end_bracket: ) - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/declare_with_following_statements.sql000066400000000000000000000006761503426445100304640ustar00rootroot00000000000000CREATE PROC Reporting.DeclareProblem AS BEGIN DECLARE @startdate AS DATE; DECLARE @DateNow DATE = GETDATE(); DECLARE @DateStart DATETIME2 = GETDATE() ,@DateEnd DATETIME2 = GETDATE() DECLARE @EOMONTH DATE = ('1900-01-01') DECLARE @USER DATE = SYSTEM_USER; DECLARE @CURRENTTIME DATE = CURRENT_TIMESTAMP; SET @EOMONTH = ('2000-01-01') SET @EOMONTH = ('2001-01-01'); IF OBJECT_ID('tempdb..#UP') IS NOT NULL DROP TABLE #UP; END sqlfluff-3.4.2/test/fixtures/dialects/tsql/declare_with_following_statements.yml000066400000000000000000000132371503426445100304630ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 60f66371567e448c02d8ea3039d4cf4fd9ab619c766fc64c6d8369bf750cb94d file: batch: create_procedure_statement: - keyword: CREATE - keyword: PROC - object_reference: - naked_identifier: Reporting - dot: . - naked_identifier: DeclareProblem - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: declare_segment: - keyword: DECLARE - parameter: '@startdate' - keyword: AS - data_type: data_type_identifier: DATE - statement_terminator: ; - statement: declare_segment: keyword: DECLARE parameter: '@DateNow' data_type: data_type_identifier: DATE comparison_operator: raw_comparison_operator: '=' expression: function: function_name: function_name_identifier: GETDATE function_contents: bracketed: start_bracket: ( end_bracket: ) statement_terminator: ; - statement: declare_segment: - keyword: DECLARE - parameter: '@DateStart' - data_type: data_type_identifier: DATETIME2 - comparison_operator: raw_comparison_operator: '=' - expression: function: function_name: function_name_identifier: GETDATE function_contents: bracketed: start_bracket: ( end_bracket: ) - comma: ',' - parameter: '@DateEnd' - data_type: data_type_identifier: DATETIME2 - comparison_operator: raw_comparison_operator: '=' - expression: function: function_name: function_name_identifier: GETDATE function_contents: bracketed: start_bracket: ( end_bracket: ) - statement: declare_segment: keyword: DECLARE parameter: '@EOMONTH' data_type: data_type_identifier: DATE comparison_operator: raw_comparison_operator: '=' expression: bracketed: start_bracket: ( expression: quoted_literal: "'1900-01-01'" end_bracket: ) - statement: declare_segment: keyword: DECLARE parameter: '@USER' data_type: data_type_identifier: DATE comparison_operator: raw_comparison_operator: '=' expression: bare_function: SYSTEM_USER statement_terminator: ; - statement: declare_segment: keyword: DECLARE parameter: '@CURRENTTIME' data_type: data_type_identifier: DATE comparison_operator: raw_comparison_operator: '=' expression: bare_function: CURRENT_TIMESTAMP statement_terminator: ; - statement: set_segment: keyword: SET parameter: '@EOMONTH' assignment_operator: raw_comparison_operator: '=' expression: bracketed: start_bracket: ( expression: quoted_literal: "'2000-01-01'" end_bracket: ) - statement: set_segment: keyword: SET parameter: '@EOMONTH' assignment_operator: raw_comparison_operator: '=' expression: bracketed: start_bracket: ( expression: quoted_literal: "'2001-01-01'" end_bracket: ) statement_terminator: ; - statement: if_then_statement: if_clause: keyword: IF expression: - function: function_name: function_name_identifier: OBJECT_ID function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'tempdb..#UP'" end_bracket: ) - keyword: IS - keyword: NOT - null_literal: 'NULL' statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: hash_identifier: '#UP' - statement_terminator: ; - keyword: END sqlfluff-3.4.2/test/fixtures/dialects/tsql/delete.sql000066400000000000000000000054471503426445100227460ustar00rootroot00000000000000DELETE FROM Sales.SalesPersonQuotaHistory; GO DELETE FROM Production.ProductCostHistory WHERE StandardCost > 1000.00; GO DELETE Production.ProductCostHistory WHERE StandardCost BETWEEN 12.00 AND 14.00 AND EndDate IS NULL; PRINT 'Number of rows deleted is ' + CAST(@@ROWCOUNT as char(3)); GO DECLARE complex_cursor CURSOR FOR SELECT a.BusinessEntityID FROM HumanResources.EmployeePayHistory AS a WHERE RateChangeDate <> (SELECT MAX(RateChangeDate) FROM HumanResources.EmployeePayHistory AS b WHERE a.BusinessEntityID = b.BusinessEntityID) ; OPEN complex_cursor; FETCH FROM complex_cursor; DELETE FROM HumanResources.EmployeePayHistory WHERE CURRENT OF complex_cursor; CLOSE complex_cursor; DEALLOCATE complex_cursor; GO -- SQL-2003 Standard subquery DELETE FROM Sales.SalesPersonQuotaHistory WHERE BusinessEntityID IN (SELECT BusinessEntityID FROM Sales.SalesPerson WHERE SalesYTD > 2500000.00); GO -- Transact-SQL extension DELETE FROM Sales.SalesPersonQuotaHistory FROM Sales.SalesPersonQuotaHistory AS spqh INNER JOIN Sales.SalesPerson AS sp ON spqh.BusinessEntityID = sp.BusinessEntityID WHERE sp.SalesYTD > 2500000.00; GO -- No need to mention target table more than once. DELETE spqh FROM Sales.SalesPersonQuotaHistory AS spqh INNER JOIN Sales.SalesPerson AS sp ON spqh.BusinessEntityID = sp.BusinessEntityID WHERE sp.SalesYTD > 2500000.00; DELETE TOP (20) FROM Purchasing.PurchaseOrderDetail WHERE DueDate < '20020701'; GO DELETE FROM Purchasing.PurchaseOrderDetail WHERE PurchaseOrderDetailID IN (SELECT TOP 10 PurchaseOrderDetailID FROM Purchasing.PurchaseOrderDetail ORDER BY DueDate ASC); GO -- Specify the remote data source using a four-part name -- in the form linked_server.catalog.schema.object. DELETE MyLinkServer.AdventureWorks2012.HumanResources.Department WHERE DepartmentID > 16; GO DELETE OPENQUERY (MyLinkServer, 'SELECT Name, GroupName FROM AdventureWorks2012.HumanResources.Department WHERE DepartmentID = 18'); GO DELETE OPENROWSET('SQLNCLI', 'Server=Seattle1;Trusted_Connection=yes;', Department) GO DELETE FROM OPENDATASOURCE('SQLNCLI', 'Data Source= ; Integrated Security=SSPI') .AdventureWorks2012.HumanResources.Department WHERE DepartmentID = 17; DELETE Sales.ShoppingCartItem OUTPUT DELETED.* WHERE ShoppingCartID = 20621; DECLARE @MyTableVar table ( ProductID int NOT NULL, ProductName nvarchar(50)NOT NULL, ProductModelID int NOT NULL, PhotoID int NOT NULL); DELETE Production.ProductProductPhoto OUTPUT DELETED.ProductID, p.Name, p.ProductModelID, DELETED.ProductPhotoID INTO @MyTableVar FROM Production.ProductProductPhoto AS ph JOIN Production.Product as p ON ph.ProductID = p.ProductID WHERE p.ProductModelID BETWEEN 120 and 130; sqlfluff-3.4.2/test/fixtures/dialects/tsql/delete.yml000066400000000000000000000514641503426445100227500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2432afab98214e095edba7cfb916f170a6681733d13b00b8c75639bf3228c300 file: - batch: statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesPersonQuotaHistory - statement_terminator: ; - go_statement: keyword: GO - batch: statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: - naked_identifier: Production - dot: . - naked_identifier: ProductCostHistory - where_clause: keyword: WHERE expression: column_reference: naked_identifier: StandardCost comparison_operator: raw_comparison_operator: '>' numeric_literal: '1000.00' - statement_terminator: ; - go_statement: keyword: GO - batch: - statement: delete_statement: keyword: DELETE table_reference: - naked_identifier: Production - dot: . - naked_identifier: ProductCostHistory where_clause: keyword: WHERE expression: - column_reference: naked_identifier: StandardCost - keyword: BETWEEN - numeric_literal: '12.00' - keyword: AND - numeric_literal: '14.00' - binary_operator: AND - column_reference: naked_identifier: EndDate - keyword: IS - null_literal: 'NULL' statement_terminator: ; - statement: print_statement: keyword: PRINT expression: quoted_literal: "'Number of rows deleted is '" binary_operator: + function: function_name: keyword: CAST function_contents: bracketed: start_bracket: ( expression: system_variable: '@@ROWCOUNT' keyword: as data_type: data_type_identifier: char bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '3' end_bracket: ) end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: - statement: declare_segment: - keyword: DECLARE - naked_identifier: complex_cursor - keyword: CURSOR - keyword: FOR - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: a - dot: . - naked_identifier: BusinessEntityID from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: EmployeePayHistory alias_expression: alias_operator: keyword: AS naked_identifier: a where_clause: keyword: WHERE expression: column_reference: naked_identifier: RateChangeDate comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '>' bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: MAX function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: RateChangeDate end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: EmployeePayHistory alias_expression: alias_operator: keyword: AS naked_identifier: b where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: a - dot: . - naked_identifier: BusinessEntityID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: b - dot: . - naked_identifier: BusinessEntityID end_bracket: ) statement_terminator: ; - statement: open_cursor_statement: keyword: OPEN naked_identifier: complex_cursor - statement_terminator: ; - statement: fetch_cursor_statement: - keyword: FETCH - keyword: FROM - naked_identifier: complex_cursor - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: EmployeePayHistory - keyword: WHERE - keyword: CURRENT - keyword: OF - naked_identifier: complex_cursor - statement_terminator: ; - statement: close_cursor_statement: keyword: CLOSE naked_identifier: complex_cursor - statement_terminator: ; - statement: deallocate_cursor_statement: keyword: DEALLOCATE naked_identifier: complex_cursor - statement_terminator: ; - go_statement: keyword: GO - batch: statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesPersonQuotaHistory - where_clause: keyword: WHERE expression: column_reference: naked_identifier: BusinessEntityID keyword: IN bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: BusinessEntityID from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesPerson where_clause: keyword: WHERE expression: column_reference: naked_identifier: SalesYTD comparison_operator: raw_comparison_operator: '>' numeric_literal: '2500000.00' end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesPersonQuotaHistory - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesPersonQuotaHistory alias_expression: alias_operator: keyword: AS naked_identifier: spqh join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesPerson alias_expression: alias_operator: keyword: AS naked_identifier: sp - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: spqh - dot: . - naked_identifier: BusinessEntityID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: sp - dot: . - naked_identifier: BusinessEntityID - where_clause: keyword: WHERE expression: column_reference: - naked_identifier: sp - dot: . - naked_identifier: SalesYTD comparison_operator: raw_comparison_operator: '>' numeric_literal: '2500000.00' - statement_terminator: ; - go_statement: keyword: GO - batch: - statement: delete_statement: keyword: DELETE table_reference: naked_identifier: spqh from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesPersonQuotaHistory alias_expression: alias_operator: keyword: AS naked_identifier: spqh join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesPerson alias_expression: alias_operator: keyword: AS naked_identifier: sp - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: spqh - dot: . - naked_identifier: BusinessEntityID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: sp - dot: . - naked_identifier: BusinessEntityID where_clause: keyword: WHERE expression: column_reference: - naked_identifier: sp - dot: . - naked_identifier: SalesYTD comparison_operator: raw_comparison_operator: '>' numeric_literal: '2500000.00' statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: TOP - bracketed: start_bracket: ( expression: numeric_literal: '20' end_bracket: ) - keyword: FROM - table_reference: - naked_identifier: Purchasing - dot: . - naked_identifier: PurchaseOrderDetail - where_clause: keyword: WHERE expression: column_reference: naked_identifier: DueDate comparison_operator: raw_comparison_operator: < quoted_literal: "'20020701'" - statement_terminator: ; - go_statement: keyword: GO - batch: statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: - naked_identifier: Purchasing - dot: . - naked_identifier: PurchaseOrderDetail - where_clause: keyword: WHERE expression: column_reference: naked_identifier: PurchaseOrderDetailID keyword: IN bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_modifier: keyword: TOP expression: numeric_literal: '10' select_clause_element: column_reference: naked_identifier: PurchaseOrderDetailID from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Purchasing - dot: . - naked_identifier: PurchaseOrderDetail orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: DueDate - keyword: ASC end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: delete_statement: keyword: DELETE table_reference: - naked_identifier: MyLinkServer - dot: . - naked_identifier: AdventureWorks2012 - dot: . - naked_identifier: HumanResources - dot: . - naked_identifier: Department where_clause: keyword: WHERE expression: column_reference: naked_identifier: DepartmentID comparison_operator: raw_comparison_operator: '>' numeric_literal: '16' statement_terminator: ; - go_statement: keyword: GO - batch: statement: delete_statement: - keyword: DELETE - keyword: OPENQUERY - bracketed: start_bracket: ( naked_identifier: MyLinkServer comma: ',' quoted_literal: "'SELECT Name, GroupName\nFROM AdventureWorks2012.HumanResources.Department\n\ WHERE DepartmentID = 18'" end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: delete_statement: keyword: DELETE openrowset_segment: keyword: OPENROWSET bracketed: - start_bracket: ( - quoted_literal: "'SQLNCLI'" - comma: ',' - quoted_literal: "'Server=Seattle1;Trusted_Connection=yes;'" - comma: ',' - table_reference: naked_identifier: Department - end_bracket: ) - go_statement: keyword: GO - batch: - statement: delete_statement: - keyword: DELETE - keyword: FROM - keyword: OPENDATASOURCE - bracketed: - start_bracket: ( - quoted_literal: "'SQLNCLI'" - comma: ',' - quoted_literal: "'Data Source= ; Integrated Security=SSPI'" - end_bracket: ) - dot: . - table_reference: - naked_identifier: AdventureWorks2012 - dot: . - naked_identifier: HumanResources - dot: . - naked_identifier: Department - where_clause: keyword: WHERE expression: column_reference: naked_identifier: DepartmentID comparison_operator: raw_comparison_operator: '=' numeric_literal: '17' - statement_terminator: ; - statement: delete_statement: keyword: DELETE table_reference: - naked_identifier: Sales - dot: . - naked_identifier: ShoppingCartItem output_clause: keyword: OUTPUT wildcard_expression: wildcard_identifier: naked_identifier: DELETED dot: . star: '*' where_clause: keyword: WHERE expression: column_reference: naked_identifier: ShoppingCartID comparison_operator: raw_comparison_operator: '=' numeric_literal: '20621' statement_terminator: ; - statement: declare_segment: - keyword: DECLARE - parameter: '@MyTableVar' - keyword: table - bracketed: - start_bracket: ( - column_definition: naked_identifier: ProductID data_type: data_type_identifier: int column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: ProductName data_type: data_type_identifier: nvarchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '50' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: ProductModelID data_type: data_type_identifier: int column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: PhotoID data_type: data_type_identifier: int column_constraint_segment: - keyword: NOT - keyword: 'NULL' - end_bracket: ) - statement_terminator: ; - statement: delete_statement: keyword: DELETE table_reference: - naked_identifier: Production - dot: . - naked_identifier: ProductProductPhoto output_clause: - keyword: OUTPUT - column_reference: - naked_identifier: DELETED - dot: . - naked_identifier: ProductID - comma: ',' - column_reference: - naked_identifier: p - dot: . - naked_identifier: Name - comma: ',' - column_reference: - naked_identifier: p - dot: . - naked_identifier: ProductModelID - comma: ',' - column_reference: - naked_identifier: DELETED - dot: . - naked_identifier: ProductPhotoID - keyword: INTO - table_reference: parameter: '@MyTableVar' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Production - dot: . - naked_identifier: ProductProductPhoto alias_expression: alias_operator: keyword: AS naked_identifier: ph join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: - naked_identifier: Production - dot: . - naked_identifier: Product alias_expression: alias_operator: keyword: as naked_identifier: p join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: ph - dot: . - naked_identifier: ProductID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: p - dot: . - naked_identifier: ProductID where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: p - dot: . - naked_identifier: ProductModelID - keyword: BETWEEN - numeric_literal: '120' - keyword: and - numeric_literal: '130' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/delete_azure_synapse_analytics.sql000066400000000000000000000015071503426445100277560ustar00rootroot00000000000000DELETE dbo.Table2 FROM dbo.Table2 INNER JOIN dbo.Table1 ON (dbo.Table2.ColA = dbo.Table1.ColA) WHERE dboTable2.ColA = 1; DELETE FROM dodos WITH(NOLOCK) OUTPUT age INTO ages DELETE FROM Table1; DELETE FROM Table1 WHERE StandardCost > 1000.00; DELETE FROM Table1 OPTION ( LABEL = N'label1' ); DELETE FROM dbo.FactInternetSales WHERE ProductKey IN ( SELECT T1.ProductKey FROM dbo.DimProduct T1 JOIN dbo.DimProductSubcategory T2 ON T1.ProductSubcategoryKey = T2.ProductSubcategoryKey WHERE T2.EnglishProductSubcategoryName = 'Road Bikes' ) OPTION ( LABEL = N'CustomJoin', HASH JOIN ) ; DELETE tableA WHERE EXISTS ( SELECT TOP 1 1 FROM tableB tb WHERE tb.col1 = tableA.col1 ) DELETE dbo.Table2 FROM dbo.Table2 INNER JOIN dbo.Table1 ON (dbo.Table2.ColA = dbo.Table1.ColA) WHERE dboTable2.ColA = 1; sqlfluff-3.4.2/test/fixtures/dialects/tsql/delete_azure_synapse_analytics.yml000066400000000000000000000241341503426445100277610ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c0a9ed1e7a6d9e29a2859dda74b848edb7c0aef68dbfea288ec41f07be04aeb7 file: batch: - statement: delete_statement: keyword: DELETE table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Table2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Table2 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Table1 - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: dbo - dot: . - naked_identifier: Table2 - dot: . - naked_identifier: ColA - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: dbo - dot: . - naked_identifier: Table1 - dot: . - naked_identifier: ColA end_bracket: ) where_clause: keyword: WHERE expression: column_reference: - naked_identifier: dboTable2 - dot: . - naked_identifier: ColA comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: dodos - post_table_expression: keyword: WITH bracketed: start_bracket: ( query_hint_segment: keyword: NOLOCK end_bracket: ) - output_clause: - keyword: OUTPUT - column_reference: naked_identifier: age - keyword: INTO - table_reference: naked_identifier: ages - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: Table1 - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: Table1 - where_clause: keyword: WHERE expression: column_reference: naked_identifier: StandardCost comparison_operator: raw_comparison_operator: '>' numeric_literal: '1000.00' - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: Table1 - option_clause: keyword: OPTION bracketed: start_bracket: ( query_hint_segment: keyword: LABEL comparison_operator: raw_comparison_operator: '=' quoted_literal: "N'label1'" end_bracket: ) - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: FactInternetSales - where_clause: keyword: WHERE expression: column_reference: naked_identifier: ProductKey keyword: IN bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: T1 - dot: . - naked_identifier: ProductKey from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: DimProduct alias_expression: naked_identifier: T1 join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: DimProductSubcategory alias_expression: naked_identifier: T2 join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: T1 - dot: . - naked_identifier: ProductSubcategoryKey - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: T2 - dot: . - naked_identifier: ProductSubcategoryKey where_clause: keyword: WHERE expression: column_reference: - naked_identifier: T2 - dot: . - naked_identifier: EnglishProductSubcategoryName comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Road Bikes'" end_bracket: ) - option_clause: keyword: OPTION bracketed: - start_bracket: ( - query_hint_segment: keyword: LABEL comparison_operator: raw_comparison_operator: '=' quoted_literal: "N'CustomJoin'" - comma: ',' - query_hint_segment: - keyword: HASH - keyword: JOIN - end_bracket: ) - statement_terminator: ; - statement: delete_statement: keyword: DELETE table_reference: naked_identifier: tableA where_clause: keyword: WHERE expression: keyword: EXISTS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_modifier: keyword: TOP expression: numeric_literal: '1' select_clause_element: numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tableB alias_expression: naked_identifier: tb where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: tb - dot: . - naked_identifier: col1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: tableA - dot: . - naked_identifier: col1 end_bracket: ) - statement: delete_statement: keyword: DELETE table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Table2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Table2 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Table1 - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: dbo - dot: . - naked_identifier: Table2 - dot: . - naked_identifier: ColA - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: dbo - dot: . - naked_identifier: Table1 - dot: . - naked_identifier: ColA end_bracket: ) where_clause: keyword: WHERE expression: column_reference: - naked_identifier: dboTable2 - dot: . - naked_identifier: ColA comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/drop_external_table.sql000066400000000000000000000003711503426445100255100ustar00rootroot00000000000000/* https://learn.microsoft.com/en-us/sql/t-sql/statements/drop-external-table-transact-sql?view=sql-server-ver16#examples */ DROP EXTERNAL TABLE SalesPerson; DROP EXTERNAL TABLE dbo.SalesPerson; DROP EXTERNAL TABLE EasternDivision.dbo.SalesPerson; sqlfluff-3.4.2/test/fixtures/dialects/tsql/drop_external_table.yml000066400000000000000000000022721503426445100255140ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7d546bbd3f07452542aa466df7d6a782e74f556f200487bafe3bf1a8c3af2dc3 file: batch: - statement: drop_external_table_statement: - keyword: DROP - keyword: EXTERNAL - keyword: TABLE - table_reference: naked_identifier: SalesPerson - statement_terminator: ; - statement: drop_external_table_statement: - keyword: DROP - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: SalesPerson - statement_terminator: ; - statement: drop_external_table_statement: - keyword: DROP - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: EasternDivision - dot: . - naked_identifier: dbo - dot: . - naked_identifier: SalesPerson - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/drop_function.sql000066400000000000000000000001141503426445100243370ustar00rootroot00000000000000DROP FUNCTION Sales.fn_SalesByStore; DROP FUNCTION IF EXISTS sales, sales2; sqlfluff-3.4.2/test/fixtures/dialects/tsql/drop_function.yml000066400000000000000000000017171503426445100243530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 53f098315280e3585eb8fd5446073bad29baa39a2b9c800df558b87c597cc397 file: batch: - statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - function_name: naked_identifier: Sales dot: . function_name_identifier: fn_SalesByStore - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - keyword: IF - keyword: EXISTS - function_name: function_name_identifier: sales - comma: ',' - function_name: function_name_identifier: sales2 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/drop_procedure.sql000066400000000000000000000002261503426445100245060ustar00rootroot00000000000000DROP PROCEDURE procedure_name; DROP PROC procedure_name; DROP PROCEDURE IF EXISTS procedure_name; DROP PROCEDURE procedure_name1, procedure_name2; sqlfluff-3.4.2/test/fixtures/dialects/tsql/drop_procedure.yml000066400000000000000000000024501503426445100245110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 59e007e67128ce2a44db60701bcfb4572bcd6224e924c6eca1c0c5910e1c3092 file: batch: - statement: drop_procedure_statement: - keyword: DROP - keyword: PROCEDURE - object_reference: naked_identifier: procedure_name - statement_terminator: ; - statement: drop_procedure_statement: - keyword: DROP - keyword: PROC - object_reference: naked_identifier: procedure_name - statement_terminator: ; - statement: drop_procedure_statement: - keyword: DROP - keyword: PROCEDURE - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: procedure_name - statement_terminator: ; - statement: drop_procedure_statement: - keyword: DROP - keyword: PROCEDURE - object_reference: naked_identifier: procedure_name1 - comma: ',' - object_reference: naked_identifier: procedure_name2 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/drop_table.sql000066400000000000000000000003171503426445100236060ustar00rootroot00000000000000drop table some_table; drop table if exists some_table; drop table some_table restrict; drop table if exists some_table restrict; drop table some_table cascade; drop table if exists some_table cascade; sqlfluff-3.4.2/test/fixtures/dialects/tsql/drop_table.yml000066400000000000000000000033151503426445100236110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3ff08804605c6d9e21042415241ff0949254b85c9f87268efc3d28094f880ad3 file: batch: - statement: drop_table_statement: - keyword: drop - keyword: table - table_reference: naked_identifier: some_table - statement_terminator: ; - statement: drop_table_statement: - keyword: drop - keyword: table - keyword: if - keyword: exists - table_reference: naked_identifier: some_table - statement_terminator: ; - statement: drop_table_statement: - keyword: drop - keyword: table - table_reference: naked_identifier: some_table - keyword: restrict - statement_terminator: ; - statement: drop_table_statement: - keyword: drop - keyword: table - keyword: if - keyword: exists - table_reference: naked_identifier: some_table - keyword: restrict - statement_terminator: ; - statement: drop_table_statement: - keyword: drop - keyword: table - table_reference: naked_identifier: some_table - keyword: cascade - statement_terminator: ; - statement: drop_table_statement: - keyword: drop - keyword: table - keyword: if - keyword: exists - table_reference: naked_identifier: some_table - keyword: cascade - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/drop_user.sql000066400000000000000000000000651503426445100234750ustar00rootroot00000000000000drop user some_user; drop user if exists some_user; sqlfluff-3.4.2/test/fixtures/dialects/tsql/drop_user.yml000066400000000000000000000014471503426445100235040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5595c4db5e8e74996759f138fd01eb37611cddb31ababccad98cda3094b05676 file: batch: - statement: drop_user_statement: - keyword: drop - keyword: user - role_reference: naked_identifier: some_user - statement_terminator: ; - statement: drop_user_statement: - keyword: drop - keyword: user - keyword: if - keyword: exists - role_reference: naked_identifier: some_user - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/drop_view.sql000066400000000000000000000003031503426445100234640ustar00rootroot00000000000000drop view some_view; drop view if exists some_view; drop view some_view restrict; drop view if exists some_view restrict; drop view some_view cascade; drop view if exists some_view cascade; sqlfluff-3.4.2/test/fixtures/dialects/tsql/drop_view.yml000066400000000000000000000032731503426445100234770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f976367f46436f7644ecd237eb486bc3e16deb73403f9ff1886f6e683f190589 file: batch: - statement: drop_view_statement: - keyword: drop - keyword: view - table_reference: naked_identifier: some_view - statement_terminator: ; - statement: drop_view_statement: - keyword: drop - keyword: view - keyword: if - keyword: exists - table_reference: naked_identifier: some_view - statement_terminator: ; - statement: drop_view_statement: - keyword: drop - keyword: view - table_reference: naked_identifier: some_view - keyword: restrict - statement_terminator: ; - statement: drop_view_statement: - keyword: drop - keyword: view - keyword: if - keyword: exists - table_reference: naked_identifier: some_view - keyword: restrict - statement_terminator: ; - statement: drop_view_statement: - keyword: drop - keyword: view - table_reference: naked_identifier: some_view - keyword: cascade - statement_terminator: ; - statement: drop_view_statement: - keyword: drop - keyword: view - keyword: if - keyword: exists - table_reference: naked_identifier: some_view - keyword: cascade - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/execute.sql000066400000000000000000000107611503426445100231410ustar00rootroot00000000000000EXEC [Reporting].[Load_CLL] -- Specifying a value only for one parameter (@p2). EXECUTE dbo.ProcTestDefaults @p2 = 'A'; -- Specifying a value for the first two parameters. EXECUTE dbo.ProcTestDefaults 68, 'B'; -- Specifying a value for all three parameters. EXECUTE dbo.ProcTestDefaults 68, 'C', 'House'; -- Using the DEFAULT keyword for the first parameter. EXECUTE dbo.ProcTestDefaults @p1 = DEFAULT, @p2 = 'D'; -- Specifying the parameters in an order different from the order defined in the procedure. EXECUTE dbo.ProcTestDefaults DEFAULT, @p3 = 'Local', @p2 = 'E'; -- Using the DEFAULT keyword for the first and third parameters. EXECUTE dbo.ProcTestDefaults DEFAULT, 'H', DEFAULT; EXECUTE dbo.ProcTestDefaults DEFAULT, 'I', @p3 = DEFAULT; EXECUTE sp_addextendedproperty @name = N'MS_Description', @value = 'my text description', @level0type = N'SCHEMA', @level0name = N'my_schema_name', @level1type = N'my_object_type', @level1name = N'my_object_name' -- Executing a stored procedure and capturing the RETURN value in a variable EXEC @pRes = dbo.ProcTestDefaults; EXEC @pRes = dbo.ProcTestDefaults @p1 = DEFAULT; EXECUTE @pRes = dbo.ProcTestDefaults; EXECUTE @pRes = dbo.ProcTestDefaults @p1 = DEFAULT; -- Executing statement from a variable DECLARE @statement nvarchar(max) = 'SELECT 1' EXEC (@statement); EXEC ('DROP TABLE BoardInventory.BoardInventoryFact_Stage;'); DECLARE @s1 AS varchar(10) = NULL; DECLARE @s2 varchar(10) = NULL; SET @s1 = 'select '; SET @s2 = '123'; EXECUTE (@s1 + @s2); EXEC ('select ' + '123'); -- Use EXECUTE with a character string EXECUTE ('USE AdventureWorks2022; SELECT BusinessEntityID, JobTitle FROM HumanResources.Employee;'); -- Use EXECUTE to pass a single parameter EXECUTE dbo.uspGetEmployeeManagers 6; -- The variable can be explicitly named in the execution: EXECUTE dbo.uspGetEmployeeManagers @EmployeeID = 6; -- Use EXECUTE to pass a parameter and capture the output EXECUTE dbo.uspGetEmployeeManagers @EmployeeID, @ManagerID OUTPUT; GO -- first statement in a batch or a sqlcmd script, EXECUTE isn't required. dbo.uspGetEmployeeManagers @EmployeeID = 6; GO -- Use multiple parameters DECLARE @CheckDate AS DATETIME = GETDATE(); EXECUTE dbo.uspGetWhereUsedProductID 819, @CheckDate; -- Use EXECUTE 'tsql_string' with a variable DECLARE @schemaname AS sysname; DECLARE @tablename AS sysname; EXECUTE ('ALTER INDEX ALL ON ' + @schemaname + '.' + @tablename + ' REBUILD;'); -- Use EXECUTE with a remote stored procedure DECLARE @retstat AS INT; EXECUTE @retstat = SQLSERVER1.AdventureWorks2022.dbo.uspGetEmployeeManagers @BusinessEntityID = 6; -- Use EXECUTE with a stored procedure variable DECLARE @proc_name AS VARCHAR (30) = 'sys.sp_who'; EXECUTE @proc_name; -- Using the DEFAULT keyword for the first parameter. EXECUTE dbo.ProcTestDefaults @p1 = DEFAULT, @p2 = 'D'; -- Using the DEFAULT keyword for the first and third parameters. EXECUTE dbo.ProcTestDefaults DEFAULT, 'H', DEFAULT; EXECUTE dbo.ProcTestDefaults DEFAULT, 'I', @p3 = DEFAULT; -- Use EXECUTE with AT linked_server_name EXECUTE ('CREATE TABLE AdventureWorks2022.dbo.SalesTbl (SalesID INT, SalesName VARCHAR(10)); ') AT SeattleSales; EXECUTE ('SELECT * FROM scott.emp WHERE MGR = ?', 7902) AT ORACLE; -- Use EXECUTE WITH RECOMPILE EXECUTE dbo.Proc_Test_Defaults @p2 = 'A' WITH RECOMPILE; -- Use EXECUTE with a user-defined function DECLARE @returnstatus AS NVARCHAR (15); EXECUTE @returnstatus = dbo.ufnGetSalesOrderStatusText @Status = 2; -- Use EXECUTE AS USER to switch context to another user EXECUTE ('CREATE TABLE Sales.SalesTable (SalesID INT, SalesName VARCHAR(10));') AS USER = 'User1'; -- Use EXECUTE to redefine a single result set EXECUTE uspGetEmployeeManagers 16 WITH RESULT SETS (( [Reporting Level] INT NOT NULL, [ID of Employee] INT NOT NULL, [Employee First Name] NVARCHAR (50) NOT NULL, [Employee Last Name] NVARCHAR (50) NOT NULL, [Employee ID of Manager] NVARCHAR (MAX) NOT NULL, [Manager First Name] NVARCHAR (50) NOT NULL, [Manager Last Name] NVARCHAR (50) NOT NULL )); -- Use EXECUTE to redefine a two result sets EXECUTE Production.ProductList '%tire%' WITH RESULT SETS ( -- first result set definition starts here (ProductID INT, [Name] NAME, ListPrice MONEY) -- comma separates result set definitions , -- second result set definition starts here ([Name] NAME, NumberOfOrders INT) ); -- Use EXECUTE with AT DATA_SOURCE data_source_name to query a remote SQL Server EXECUTE ( 'SELECT @@SERVERNAME' ) AT DATA_SOURCE my_sql_server; sqlfluff-3.4.2/test/fixtures/dialects/tsql/execute.yml000066400000000000000000000552471503426445100231530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8e92edc5714c409b37dc9cb648090c17447331c402a710bdba3dccdfeb01cd52 file: - batch: - statement: execute_script_statement: keyword: EXEC object_reference: - quoted_identifier: '[Reporting]' - dot: . - quoted_identifier: '[Load_CLL]' - statement: execute_script_statement: keyword: EXECUTE object_reference: - naked_identifier: dbo - dot: . - naked_identifier: ProcTestDefaults parameter: '@p2' comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'A'" statement_terminator: ; - statement: execute_script_statement: - keyword: EXECUTE - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: ProcTestDefaults - expression: numeric_literal: '68' - comma: ',' - expression: quoted_literal: "'B'" - statement_terminator: ; - statement: execute_script_statement: - keyword: EXECUTE - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: ProcTestDefaults - expression: numeric_literal: '68' - comma: ',' - expression: quoted_literal: "'C'" - comma: ',' - expression: quoted_literal: "'House'" - statement_terminator: ; - statement: execute_script_statement: - keyword: EXECUTE - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: ProcTestDefaults - parameter: '@p1' - comparison_operator: raw_comparison_operator: '=' - expression: keyword: DEFAULT - comma: ',' - parameter: '@p2' - comparison_operator: raw_comparison_operator: '=' - expression: quoted_literal: "'D'" - statement_terminator: ; - statement: execute_script_statement: - keyword: EXECUTE - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: ProcTestDefaults - expression: keyword: DEFAULT - comma: ',' - parameter: '@p3' - comparison_operator: raw_comparison_operator: '=' - expression: quoted_literal: "'Local'" - comma: ',' - parameter: '@p2' - comparison_operator: raw_comparison_operator: '=' - expression: quoted_literal: "'E'" - statement_terminator: ; - statement: execute_script_statement: - keyword: EXECUTE - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: ProcTestDefaults - expression: keyword: DEFAULT - comma: ',' - expression: quoted_literal: "'H'" - comma: ',' - expression: keyword: DEFAULT - statement_terminator: ; - statement: execute_script_statement: - keyword: EXECUTE - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: ProcTestDefaults - expression: keyword: DEFAULT - comma: ',' - expression: quoted_literal: "'I'" - comma: ',' - parameter: '@p3' - comparison_operator: raw_comparison_operator: '=' - expression: keyword: DEFAULT - statement_terminator: ; - statement: execute_script_statement: - keyword: EXECUTE - object_reference: naked_identifier: sp_addextendedproperty - parameter: '@name' - comparison_operator: raw_comparison_operator: '=' - expression: quoted_literal: "N'MS_Description'" - comma: ',' - parameter: '@value' - comparison_operator: raw_comparison_operator: '=' - expression: quoted_literal: "'my text description'" - comma: ',' - parameter: '@level0type' - comparison_operator: raw_comparison_operator: '=' - expression: quoted_literal: "N'SCHEMA'" - comma: ',' - parameter: '@level0name' - comparison_operator: raw_comparison_operator: '=' - expression: quoted_literal: "N'my_schema_name'" - comma: ',' - parameter: '@level1type' - comparison_operator: raw_comparison_operator: '=' - expression: quoted_literal: "N'my_object_type'" - comma: ',' - parameter: '@level1name' - comparison_operator: raw_comparison_operator: '=' - expression: quoted_literal: "N'my_object_name'" - statement: execute_script_statement: keyword: EXEC parameter: '@pRes' raw_comparison_operator: '=' object_reference: - naked_identifier: dbo - dot: . - naked_identifier: ProcTestDefaults statement_terminator: ; - statement: execute_script_statement: - keyword: EXEC - parameter: '@pRes' - raw_comparison_operator: '=' - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: ProcTestDefaults - parameter: '@p1' - comparison_operator: raw_comparison_operator: '=' - expression: keyword: DEFAULT - statement_terminator: ; - statement: execute_script_statement: keyword: EXECUTE parameter: '@pRes' raw_comparison_operator: '=' object_reference: - naked_identifier: dbo - dot: . - naked_identifier: ProcTestDefaults statement_terminator: ; - statement: execute_script_statement: - keyword: EXECUTE - parameter: '@pRes' - raw_comparison_operator: '=' - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: ProcTestDefaults - parameter: '@p1' - comparison_operator: raw_comparison_operator: '=' - expression: keyword: DEFAULT - statement_terminator: ; - statement: declare_segment: keyword: DECLARE parameter: '@statement' data_type: data_type_identifier: nvarchar bracketed_arguments: bracketed: start_bracket: ( keyword: max end_bracket: ) comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'SELECT 1'" - statement: execute_script_statement: keyword: EXEC bracketed: start_bracket: ( parameter: '@statement' end_bracket: ) statement_terminator: ; - statement: execute_script_statement: keyword: EXEC bracketed: start_bracket: ( quoted_literal: "'DROP TABLE BoardInventory.BoardInventoryFact_Stage;'" end_bracket: ) statement_terminator: ; - statement: declare_segment: - keyword: DECLARE - parameter: '@s1' - keyword: AS - data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '10' end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - expression: null_literal: 'NULL' - statement_terminator: ; - statement: declare_segment: keyword: DECLARE parameter: '@s2' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '10' end_bracket: ) comparison_operator: raw_comparison_operator: '=' expression: null_literal: 'NULL' statement_terminator: ; - statement: set_segment: keyword: SET parameter: '@s1' assignment_operator: raw_comparison_operator: '=' expression: quoted_literal: "'select '" statement_terminator: ; - statement: set_segment: keyword: SET parameter: '@s2' assignment_operator: raw_comparison_operator: '=' expression: quoted_literal: "'123'" statement_terminator: ; - statement: execute_script_statement: keyword: EXECUTE bracketed: - start_bracket: ( - parameter: '@s1' - binary_operator: + - parameter: '@s2' - end_bracket: ) statement_terminator: ; - statement: execute_script_statement: keyword: EXEC bracketed: - start_bracket: ( - quoted_literal: "'select '" - binary_operator: + - quoted_literal: "'123'" - end_bracket: ) statement_terminator: ; - statement: execute_script_statement: keyword: EXECUTE bracketed: start_bracket: ( quoted_literal: "'USE AdventureWorks2022; SELECT BusinessEntityID, JobTitle\ \ FROM HumanResources.Employee;'" end_bracket: ) statement_terminator: ; - statement: execute_script_statement: keyword: EXECUTE object_reference: - naked_identifier: dbo - dot: . - naked_identifier: uspGetEmployeeManagers expression: numeric_literal: '6' statement_terminator: ; - statement: execute_script_statement: keyword: EXECUTE object_reference: - naked_identifier: dbo - dot: . - naked_identifier: uspGetEmployeeManagers parameter: '@EmployeeID' comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '6' statement_terminator: ; - statement: execute_script_statement: - keyword: EXECUTE - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: uspGetEmployeeManagers - expression: parameter: '@EmployeeID' - comma: ',' - parameter: '@ManagerID' - keyword: OUTPUT - statement_terminator: ; - go_statement: keyword: GO - batch: statement: execute_script_statement: object_reference: - naked_identifier: dbo - dot: . - naked_identifier: uspGetEmployeeManagers parameter: '@EmployeeID' comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '6' statement_terminator: ; - go_statement: keyword: GO - batch: - statement: declare_segment: - keyword: DECLARE - parameter: '@CheckDate' - keyword: AS - data_type: data_type_identifier: DATETIME - comparison_operator: raw_comparison_operator: '=' - expression: function: function_name: function_name_identifier: GETDATE function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: execute_script_statement: - keyword: EXECUTE - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: uspGetWhereUsedProductID - expression: numeric_literal: '819' - comma: ',' - expression: parameter: '@CheckDate' - statement_terminator: ; - statement: declare_segment: - keyword: DECLARE - parameter: '@schemaname' - keyword: AS - data_type: data_type_identifier: sysname - statement_terminator: ; - statement: declare_segment: - keyword: DECLARE - parameter: '@tablename' - keyword: AS - data_type: data_type_identifier: sysname - statement_terminator: ; - statement: execute_script_statement: keyword: EXECUTE bracketed: - start_bracket: ( - quoted_literal: "'ALTER INDEX ALL ON '" - binary_operator: + - parameter: '@schemaname' - binary_operator: + - quoted_literal: "'.'" - binary_operator: + - parameter: '@tablename' - binary_operator: + - quoted_literal: "' REBUILD;'" - end_bracket: ) statement_terminator: ; - statement: declare_segment: - keyword: DECLARE - parameter: '@retstat' - keyword: AS - data_type: data_type_identifier: INT - statement_terminator: ; - statement: execute_script_statement: - keyword: EXECUTE - parameter: '@retstat' - raw_comparison_operator: '=' - object_reference: - naked_identifier: SQLSERVER1 - dot: . - naked_identifier: AdventureWorks2022 - dot: . - naked_identifier: dbo - dot: . - naked_identifier: uspGetEmployeeManagers - parameter: '@BusinessEntityID' - comparison_operator: raw_comparison_operator: '=' - expression: numeric_literal: '6' - statement_terminator: ; - statement: declare_segment: - keyword: DECLARE - parameter: '@proc_name' - keyword: AS - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '30' end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - expression: quoted_literal: "'sys.sp_who'" - statement_terminator: ; - statement: execute_script_statement: keyword: EXECUTE object_reference: parameter: '@proc_name' statement_terminator: ; - statement: execute_script_statement: - keyword: EXECUTE - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: ProcTestDefaults - parameter: '@p1' - comparison_operator: raw_comparison_operator: '=' - expression: keyword: DEFAULT - comma: ',' - parameter: '@p2' - comparison_operator: raw_comparison_operator: '=' - expression: quoted_literal: "'D'" - statement_terminator: ; - statement: execute_script_statement: - keyword: EXECUTE - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: ProcTestDefaults - expression: keyword: DEFAULT - comma: ',' - expression: quoted_literal: "'H'" - comma: ',' - expression: keyword: DEFAULT - statement_terminator: ; - statement: execute_script_statement: - keyword: EXECUTE - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: ProcTestDefaults - expression: keyword: DEFAULT - comma: ',' - expression: quoted_literal: "'I'" - comma: ',' - parameter: '@p3' - comparison_operator: raw_comparison_operator: '=' - expression: keyword: DEFAULT - statement_terminator: ; - statement: execute_script_statement: - keyword: EXECUTE - bracketed: start_bracket: ( quoted_literal: "'CREATE TABLE AdventureWorks2022.dbo.SalesTbl\n(SalesID\ \ INT, SalesName VARCHAR(10)); '" end_bracket: ) - keyword: AT - object_reference: naked_identifier: SeattleSales - statement_terminator: ; - statement: execute_script_statement: - keyword: EXECUTE - bracketed: start_bracket: ( quoted_literal: "'SELECT * FROM scott.emp WHERE MGR = ?'" comma: ',' expression: numeric_literal: '7902' end_bracket: ) - keyword: AT - object_reference: naked_identifier: ORACLE - statement_terminator: ; - statement: execute_script_statement: - keyword: EXECUTE - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: Proc_Test_Defaults - parameter: '@p2' - comparison_operator: raw_comparison_operator: '=' - expression: quoted_literal: "'A'" - keyword: WITH - execute_option: keyword: RECOMPILE - statement_terminator: ; - statement: declare_segment: - keyword: DECLARE - parameter: '@returnstatus' - keyword: AS - data_type: data_type_identifier: NVARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '15' end_bracket: ) - statement_terminator: ; - statement: execute_script_statement: - keyword: EXECUTE - parameter: '@returnstatus' - raw_comparison_operator: '=' - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: ufnGetSalesOrderStatusText - parameter: '@Status' - comparison_operator: raw_comparison_operator: '=' - expression: numeric_literal: '2' - statement_terminator: ; - statement: execute_script_statement: keyword: EXECUTE bracketed: start_bracket: ( quoted_literal: "'CREATE TABLE Sales.SalesTable (SalesID INT, SalesName\ \ VARCHAR(10));'" end_bracket: ) login_user_segment: - keyword: AS - keyword: USER - raw_comparison_operator: '=' - quoted_literal: "'User1'" statement_terminator: ; - statement: execute_script_statement: - keyword: EXECUTE - object_reference: naked_identifier: uspGetEmployeeManagers - expression: numeric_literal: '16' - keyword: WITH - execute_option: - keyword: RESULT - keyword: SETS - bracketed: start_bracket: ( bracketed: - start_bracket: ( - column_reference: quoted_identifier: '[Reporting Level]' - data_type: data_type_identifier: INT - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: quoted_identifier: '[ID of Employee]' - data_type: data_type_identifier: INT - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: quoted_identifier: '[Employee First Name]' - data_type: data_type_identifier: NVARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '50' end_bracket: ) - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: quoted_identifier: '[Employee Last Name]' - data_type: data_type_identifier: NVARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '50' end_bracket: ) - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: quoted_identifier: '[Employee ID of Manager]' - data_type: data_type_identifier: NVARCHAR bracketed_arguments: bracketed: start_bracket: ( keyword: MAX end_bracket: ) - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: quoted_identifier: '[Manager First Name]' - data_type: data_type_identifier: NVARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '50' end_bracket: ) - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: quoted_identifier: '[Manager Last Name]' - data_type: data_type_identifier: NVARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '50' end_bracket: ) - keyword: NOT - keyword: 'NULL' - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: execute_script_statement: - keyword: EXECUTE - object_reference: - naked_identifier: Production - dot: . - naked_identifier: ProductList - expression: quoted_literal: "'%tire%'" - keyword: WITH - execute_option: - keyword: RESULT - keyword: SETS - bracketed: - start_bracket: ( - bracketed: - start_bracket: ( - column_reference: naked_identifier: ProductID - data_type: data_type_identifier: INT - comma: ',' - column_reference: quoted_identifier: '[Name]' - data_type: data_type_identifier: NAME - comma: ',' - column_reference: naked_identifier: ListPrice - data_type: data_type_identifier: MONEY - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - column_reference: quoted_identifier: '[Name]' - data_type: data_type_identifier: NAME - comma: ',' - column_reference: naked_identifier: NumberOfOrders - data_type: data_type_identifier: INT - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: execute_script_statement: - keyword: EXECUTE - bracketed: start_bracket: ( quoted_literal: "'SELECT @@SERVERNAME'" end_bracket: ) - keyword: AT - keyword: DATA_SOURCE - object_reference: naked_identifier: my_sql_server - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/function_default_params.sql000066400000000000000000000001541503426445100263660ustar00rootroot00000000000000create or alter procedure name @param1 nvarchar(10) = 'test', @param2 int = 21 as begin return 1; end sqlfluff-3.4.2/test/fixtures/dialects/tsql/function_default_params.yml000066400000000000000000000030241503426445100263670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 73f2f93f13c48a72885f185a7c4a70c7e61e414b62902d00d852e59bb96d93b5 file: batch: create_procedure_statement: - keyword: create - keyword: or - keyword: alter - keyword: procedure - object_reference: naked_identifier: name - procedure_parameter_list: - parameter: '@param1' - data_type: data_type_identifier: nvarchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '10' end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - expression: quoted_literal: "'test'" - comma: ',' - parameter: '@param2' - data_type: data_type_identifier: int - comparison_operator: raw_comparison_operator: '=' - expression: numeric_literal: '21' - keyword: as - procedure_statement: statement: begin_end_block: - keyword: begin - statement: return_segment: keyword: return expression: numeric_literal: '1' statement_terminator: ; - keyword: end sqlfluff-3.4.2/test/fixtures/dialects/tsql/function_no_return.sql000066400000000000000000000004661503426445100254200ustar00rootroot00000000000000CREATE PROCEDURE findjobs @nm sysname = NULL AS IF @nm IS NULL BEGIN PRINT 'You must give a user name' RETURN END ELSE BEGIN SELECT o.name, o.id, o.uid FROM sysobjects o INNER JOIN master..syslogins l ON o.uid = l.sid WHERE l.name = @nm END; sqlfluff-3.4.2/test/fixtures/dialects/tsql/function_no_return.yml000066400000000000000000000105551503426445100254220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bf7784bb5eda5e1f45d053f31abb531e108c9048c5b0a9fc1a870986bdb7fee7 file: batch: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - object_reference: naked_identifier: findjobs - procedure_parameter_list: parameter: '@nm' data_type: data_type_identifier: sysname comparison_operator: raw_comparison_operator: '=' expression: null_literal: 'NULL' - keyword: AS - procedure_statement: statement: if_then_statement: - if_clause: keyword: IF expression: parameter: '@nm' keyword: IS null_literal: 'NULL' - statement: begin_end_block: - keyword: BEGIN - statement: print_statement: keyword: PRINT expression: quoted_literal: "'You must give a user name'" - statement: return_segment: keyword: RETURN - keyword: END - keyword: ELSE - statement: begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: o - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: o - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: o - dot: . - naked_identifier: uid from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sysobjects alias_expression: naked_identifier: o join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: master - dot: . - dot: . - naked_identifier: syslogins alias_expression: naked_identifier: l - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: o - dot: . - naked_identifier: uid - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: l - dot: . - naked_identifier: sid where_clause: keyword: WHERE expression: column_reference: - naked_identifier: l - dot: . - naked_identifier: name comparison_operator: raw_comparison_operator: '=' parameter: '@nm' - keyword: END - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/function_with_variable.sql000066400000000000000000000003211503426445100262130ustar00rootroot00000000000000CREATE OR ALTER FUNCTION [dbo].[CONVERT_ISO_WEEK_TO_UNIX] (@year INT, @week INT) RETURNS BIGINT AS BEGIN DECLARE @result BIGINT SET @result=4 RETURN @result + @year + @week END sqlfluff-3.4.2/test/fixtures/dialects/tsql/function_with_variable.yml000066400000000000000000000040041503426445100262170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a4f778729ae8ba6dcd284c585c2f3167abdc3a32fadc7682c3c620a00161f1db file: batch: statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: FUNCTION - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[CONVERT_ISO_WEEK_TO_UNIX]' - function_parameter_list: bracketed: - start_bracket: ( - parameter: '@year' - data_type: data_type_identifier: INT - comma: ',' - parameter: '@week' - data_type: data_type_identifier: INT - end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: BIGINT - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: declare_segment: keyword: DECLARE parameter: '@result' data_type: data_type_identifier: BIGINT - statement: set_segment: keyword: SET parameter: '@result' assignment_operator: raw_comparison_operator: '=' expression: numeric_literal: '4' - statement: return_segment: keyword: RETURN expression: - parameter: '@result' - binary_operator: + - parameter: '@year' - binary_operator: + - parameter: '@week' - keyword: END sqlfluff-3.4.2/test/fixtures/dialects/tsql/functions_a.sql000066400000000000000000000011661503426445100240060ustar00rootroot00000000000000SELECT DATE(t) AS t_date, ROUND(b, 2) AS b_round, LEFT(RIGHT(s, 5), LEN(s + 6)) AS compound, DATEADD(month, -1, column1) AS column1_lastmonth, convert(varchar, tbl_b.column1, 23) AS column1_varchar FROM tbl_b GO CREATE FUNCTION dbo.RandDate ( @admit DATE ) RETURNS TABLE AS RETURN ( SELECT @admit FROM dbo.[RandomDate] ); GO CREATE FUNCTION dbo.no_paramters() RETURNS INT AS BEGIN RETURN 2; END GO /* SQLFluff should parse this FROM as a keyword and not as a function name */ SELECT a.* FROM ( SELECT FIN FROM enc ) AS a LEFT JOIN b ON a.FIN = b.FIN WHERE b.FIN IS NULL ; GO sqlfluff-3.4.2/test/fixtures/dialects/tsql/functions_a.yml000066400000000000000000000222351503426445100240100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 610e3a8885bb24b75c3624c2a831e5417a251003063d62117010328282521049 file: - batch: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: DATE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: t end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: t_date - comma: ',' - select_clause_element: function: function_name: function_name_identifier: ROUND function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: b - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: b_round - comma: ',' - select_clause_element: function: function_name: keyword: LEFT function_contents: bracketed: - start_bracket: ( - expression: function: function_name: keyword: RIGHT function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: s - comma: ',' - expression: numeric_literal: '5' - end_bracket: ) - comma: ',' - expression: function: function_name: function_name_identifier: LEN function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: s binary_operator: + numeric_literal: '6' end_bracket: ) - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: compound - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEADD function_contents: bracketed: - start_bracket: ( - date_part: month - comma: ',' - expression: numeric_literal: sign_indicator: '-' numeric_literal: '1' - comma: ',' - expression: column_reference: naked_identifier: column1 - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: column1_lastmonth - comma: ',' - select_clause_element: function: function_name: keyword: convert function_contents: bracketed: - start_bracket: ( - data_type: data_type_identifier: varchar - comma: ',' - expression: column_reference: - naked_identifier: tbl_b - dot: . - naked_identifier: column1 - comma: ',' - numeric_literal: '23' - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: column1_varchar from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl_b - go_statement: keyword: GO - batch: statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: RandDate - function_parameter_list: bracketed: start_bracket: ( parameter: '@admit' data_type: data_type_identifier: DATE end_bracket: ) - keyword: RETURNS - keyword: TABLE - keyword: AS - procedure_statement: statement: return_segment: keyword: RETURN expression: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: parameter: '@admit' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dbo dot: . quoted_identifier: '[RandomDate]' end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: no_paramters - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: INT - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: return_segment: keyword: RETURN expression: numeric_literal: '2' statement_terminator: ; - keyword: END - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: a dot: . star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: FIN from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: enc end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: b - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: a - dot: . - naked_identifier: FIN - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: b - dot: . - naked_identifier: FIN where_clause: keyword: WHERE expression: column_reference: - naked_identifier: b - dot: . - naked_identifier: FIN keyword: IS null_literal: 'NULL' statement_terminator: ; - go_statement: keyword: GO sqlfluff-3.4.2/test/fixtures/dialects/tsql/functions_agg.sql000066400000000000000000000006431503426445100243230ustar00rootroot00000000000000SELECT string_agg(t.v, '; ') within group (order by v) as column_name1 ,PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY t.Rate) OVER (PARTITION BY Name) AS MedianCont ,PERCENTILE_DISC(0.5) WITHIN GROUP (ORDER BY t.Rate) OVER (PARTITION BY Name) AS MedianDisc from table1 t group by employee_id HAVING MIN([ArrivalDt]) <= MAX([DischargeDt]) DROP TABLE #Mercury; sqlfluff-3.4.2/test/fixtures/dialects/tsql/functions_agg.yml000066400000000000000000000126061503426445100243270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 616dc7a275c62c2218e0f643f17738931af69181a8e57c7db10c9f09403cca21 file: batch: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: keyword: string_agg function_contents: bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: t - dot: . - naked_identifier: v - comma: ',' - expression: quoted_literal: "'; '" - end_bracket: ) within_group_clause: - keyword: within - keyword: group - bracketed: start_bracket: ( orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: v end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: column_name1 - comma: ',' - select_clause_element: function: function_name: keyword: PERCENTILE_CONT function_contents: bracketed: start_bracket: ( expression: numeric_literal: '0.5' end_bracket: ) within_group_clause: - keyword: WITHIN - keyword: GROUP - bracketed: start_bracket: ( orderby_clause: - keyword: ORDER - keyword: BY - column_reference: - naked_identifier: t - dot: . - naked_identifier: Rate end_bracket: ) - keyword: OVER - bracketed: start_bracket: ( partitionby_clause: - keyword: PARTITION - keyword: BY - column_reference: naked_identifier: Name end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: MedianCont - comma: ',' - select_clause_element: function: function_name: keyword: PERCENTILE_DISC function_contents: bracketed: start_bracket: ( expression: numeric_literal: '0.5' end_bracket: ) within_group_clause: - keyword: WITHIN - keyword: GROUP - bracketed: start_bracket: ( orderby_clause: - keyword: ORDER - keyword: BY - column_reference: - naked_identifier: t - dot: . - naked_identifier: Rate end_bracket: ) - keyword: OVER - bracketed: start_bracket: ( partitionby_clause: - keyword: PARTITION - keyword: BY - column_reference: naked_identifier: Name end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: MedianDisc from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 alias_expression: naked_identifier: t groupby_clause: - keyword: group - keyword: by - column_reference: naked_identifier: employee_id having_clause: keyword: HAVING expression: - function: function_name: function_name_identifier: MIN function_contents: bracketed: start_bracket: ( expression: column_reference: quoted_identifier: '[ArrivalDt]' end_bracket: ) - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - function: function_name: function_name_identifier: MAX function_contents: bracketed: start_bracket: ( expression: column_reference: quoted_identifier: '[DischargeDt]' end_bracket: ) - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: hash_identifier: '#Mercury' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/go_delimiters.sql000066400000000000000000000002701503426445100243170ustar00rootroot00000000000000-- It's possible to have a file starting with GO GO -- It's possible to have multiple GO between batches. SELECT foo FROM bar GO GO SELECT foo FROM bar GO GO GO SELECT foo FROM bar GO sqlfluff-3.4.2/test/fixtures/dialects/tsql/go_delimiters.yml000066400000000000000000000034701503426445100243260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b5da83f2dfc435ece584287628e0fcc92d30a506e7f8ca3b44ac5f8deed4f9ff file: - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: foo from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar - go_statement: keyword: GO - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: foo from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar - go_statement: keyword: GO - go_statement: keyword: GO - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: foo from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar - go_statement: keyword: GO sqlfluff-3.4.2/test/fixtures/dialects/tsql/goto_statement.sql000066400000000000000000000000541503426445100245250ustar00rootroot00000000000000GOTO Branch_Three; Branch_Three: RETURN sqlfluff-3.4.2/test/fixtures/dialects/tsql/goto_statement.yml000066400000000000000000000012541503426445100245320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 96f38c44bd2bbf296264d6719fca15641b7a0951aa17203ad06b7e98f3db981b file: batch: - statement: goto_statement: keyword: GOTO naked_identifier: Branch_Three - statement_terminator: ; - statement: label_segment: naked_identifier: Branch_Three colon: ':' - statement: return_segment: keyword: RETURN sqlfluff-3.4.2/test/fixtures/dialects/tsql/grant_deny_revoke.sql000066400000000000000000000014561503426445100252050ustar00rootroot00000000000000GRANT SELECT ON OBJECT::Person.Address TO RosaQdM; GO USE AdventureWorks2012; GRANT EXECUTE ON OBJECT::HumanResources.uspUpdateEmployeeHireInfo TO Recruiting11; GO GRANT REFERENCES (BusinessEntityID) ON OBJECT::HumanResources.vEmployee TO Wanida WITH GRANT OPTION; GO GRANT SELECT ON Person.Address TO RosaQdM; GO GRANT SELECT ON Person.Address TO [AdventureWorks2012\RosaQdM]; GO CREATE ROLE newrole ; GRANT EXECUTE ON dbo.uspGetBillOfMaterials TO newrole ; GO GRANT SELECT ON SCHEMA :: Sales TO Vendors; GO REVOKE SELECT ON SCHEMA :: Sales TO Vendors; GO DENY SELECT ON OBJECT::Person.Address TO RosaQdM; GO DENY EXECUTE ON OBJECT::HumanResources.uspUpdateEmployeeHireInfo TO Recruiting11; GO DENY REFERENCES (BusinessEntityID) ON OBJECT::HumanResources.vEmployee TO Wanida CASCADE; GO sqlfluff-3.4.2/test/fixtures/dialects/tsql/grant_deny_revoke.yml000066400000000000000000000130271503426445100252040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f49b9b7af9353f7d07866b8964a00f9de08c3000e479e3d4a6349a5badd15bf9 file: - batch: statement: access_statement: - keyword: GRANT - keyword: SELECT - keyword: 'ON' - keyword: OBJECT - casting_operator: '::' - object_reference: - naked_identifier: Person - dot: . - naked_identifier: Address - keyword: TO - role_reference: naked_identifier: RosaQdM statement_terminator: ; - go_statement: keyword: GO - batch: - statement: use_statement: keyword: USE database_reference: naked_identifier: AdventureWorks2012 - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: EXECUTE - keyword: 'ON' - keyword: OBJECT - casting_operator: '::' - object_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: uspUpdateEmployeeHireInfo - keyword: TO - role_reference: naked_identifier: Recruiting11 - statement_terminator: ; - go_statement: keyword: GO - batch: statement: access_statement: - keyword: GRANT - keyword: REFERENCES - bracketed: start_bracket: ( column_reference: naked_identifier: BusinessEntityID end_bracket: ) - keyword: 'ON' - keyword: OBJECT - casting_operator: '::' - object_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: vEmployee - keyword: TO - role_reference: naked_identifier: Wanida - keyword: WITH - keyword: GRANT - keyword: OPTION statement_terminator: ; - go_statement: keyword: GO - batch: statement: access_statement: - keyword: GRANT - keyword: SELECT - keyword: 'ON' - object_reference: - naked_identifier: Person - dot: . - naked_identifier: Address - keyword: TO - role_reference: naked_identifier: RosaQdM statement_terminator: ; - go_statement: keyword: GO - batch: statement: access_statement: - keyword: GRANT - keyword: SELECT - keyword: 'ON' - object_reference: - naked_identifier: Person - dot: . - naked_identifier: Address - keyword: TO - role_reference: quoted_identifier: '[AdventureWorks2012\RosaQdM]' statement_terminator: ; - go_statement: keyword: GO - batch: - statement: create_role_statement: - keyword: CREATE - keyword: ROLE - role_reference: naked_identifier: newrole - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: EXECUTE - keyword: 'ON' - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: uspGetBillOfMaterials - keyword: TO - role_reference: naked_identifier: newrole - statement_terminator: ; - go_statement: keyword: GO - batch: statement: access_statement: - keyword: GRANT - keyword: SELECT - keyword: 'ON' - keyword: SCHEMA - casting_operator: '::' - object_reference: naked_identifier: Sales - keyword: TO - role_reference: naked_identifier: Vendors statement_terminator: ; - go_statement: keyword: GO - batch: statement: access_statement: - keyword: REVOKE - keyword: SELECT - keyword: 'ON' - keyword: SCHEMA - casting_operator: '::' - object_reference: naked_identifier: Sales - keyword: TO - role_reference: naked_identifier: Vendors statement_terminator: ; - go_statement: keyword: GO - batch: statement: access_statement: - keyword: DENY - keyword: SELECT - keyword: 'ON' - keyword: OBJECT - casting_operator: '::' - object_reference: - naked_identifier: Person - dot: . - naked_identifier: Address - keyword: TO - role_reference: naked_identifier: RosaQdM statement_terminator: ; - go_statement: keyword: GO - batch: statement: access_statement: - keyword: DENY - keyword: EXECUTE - keyword: 'ON' - keyword: OBJECT - casting_operator: '::' - object_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: uspUpdateEmployeeHireInfo - keyword: TO - role_reference: naked_identifier: Recruiting11 statement_terminator: ; - go_statement: keyword: GO - batch: statement: access_statement: - keyword: DENY - keyword: REFERENCES - bracketed: start_bracket: ( column_reference: naked_identifier: BusinessEntityID end_bracket: ) - keyword: 'ON' - keyword: OBJECT - casting_operator: '::' - object_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: vEmployee - keyword: TO - role_reference: naked_identifier: Wanida - keyword: CASCADE statement_terminator: ; - go_statement: keyword: GO sqlfluff-3.4.2/test/fixtures/dialects/tsql/group_by.sql000066400000000000000000000004011503426445100233130ustar00rootroot00000000000000CREATE TABLE #n WITH (DISTRIBUTION = ROUND_ROBIN) AS ( Select acto.ActionDTS FROM Orders_Action acto ) SELECT t.actiondts FROM #n t GROUP BY t.ActionDTS; DROP TABLE #n; SELECT st, count(*), count(DISTINCT id) FROM #3 GROUP BY st WITH ROLLUP; sqlfluff-3.4.2/test/fixtures/dialects/tsql/group_by.yml000066400000000000000000000074721503426445100233340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 185300b83b1779585e6206cc559146785c004722e829ea7e464a52efed7de5ac file: batch: - statement: create_table_as_select_statement: - keyword: CREATE - keyword: TABLE - table_reference: hash_identifier: '#n' - table_distribution_index_clause: keyword: WITH bracketed: start_bracket: ( table_distribution_clause: - keyword: DISTRIBUTION - comparison_operator: raw_comparison_operator: '=' - keyword: ROUND_ROBIN end_bracket: ) - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: Select select_clause_element: column_reference: - naked_identifier: acto - dot: . - naked_identifier: ActionDTS from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Orders_Action alias_expression: naked_identifier: acto end_bracket: ) - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: actiondts from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: hash_identifier: '#n' alias_expression: naked_identifier: t groupby_clause: - keyword: GROUP - keyword: BY - column_reference: - naked_identifier: t - dot: . - naked_identifier: ActionDTS statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: hash_identifier: '#n' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: st - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( keyword: DISTINCT expression: column_reference: naked_identifier: id end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: hash_identifier: '#3' groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: st - with_rollup_clause: - keyword: WITH - keyword: ROLLUP statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/hints.sql000066400000000000000000000053031503426445100226200ustar00rootroot00000000000000SELECT * FROM Sales.Customer AS c INNER JOIN Sales.CustomerAddress AS ca ON c.CustomerID = ca.CustomerID WHERE TerritoryID = 5 OPTION (MERGE JOIN); GO CREATE PROCEDURE dbo.RetrievePersonAddress @city_name NVARCHAR(30), @postal_code NVARCHAR(15) AS SELECT * FROM Person.Address WHERE City = @city_name AND PostalCode = @postal_code OPTION ( OPTIMIZE FOR (@city_name = 'Seattle', @postal_code UNKNOWN) ); GO --Creates an infinite loop WITH cte (CustomerID, PersonID, StoreID) AS ( SELECT CustomerID, PersonID, StoreID FROM Sales.Customer WHERE PersonID IS NOT NULL UNION ALL SELECT cte.CustomerID, cte.PersonID, cte.StoreID FROM cte JOIN Sales.Customer AS e ON cte.PersonID = e.CustomerID ) --Uses MAXRECURSION to limit the recursive levels to 2 SELECT CustomerID, PersonID, StoreID FROM cte OPTION (MAXRECURSION 2); GO SELECT * FROM HumanResources.Employee AS e1 UNION SELECT * FROM HumanResources.Employee AS e2 OPTION (MERGE UNION); GO SELECT ProductID, OrderQty, SUM(LineTotal) AS Total FROM Sales.SalesOrderDetail WHERE UnitPrice < 5 GROUP BY ProductID, OrderQty ORDER BY ProductID, OrderQty OPTION (HASH GROUP, FAST 10); GO SELECT ProductID, OrderQty, SUM(LineTotal) AS Total FROM Sales.SalesOrderDetail WHERE UnitPrice < 5 GROUP BY ProductID, OrderQty ORDER BY ProductID, OrderQty OPTION (MAXDOP 2); GO SELECT * FROM Person.Address WHERE City = 'SEATTLE' AND PostalCode = 98104 OPTION (RECOMPILE, USE HINT ('ASSUME_MIN_SELECTIVITY_FOR_FILTER_ESTIMATES', 'DISABLE_PARAMETER_SNIFFING')); GO SELECT * FROM Person.Address WHERE City = 'SEATTLE' AND PostalCode = 98104 OPTION (QUERYTRACEON 4199); GO SELECT * FROM Person.Address WHERE City = 'SEATTLE' AND PostalCode = 98104 OPTION (QUERYTRACEON 4199, QUERYTRACEON 4137); GO UPDATE Production.Product WITH (TABLOCK) SET ListPrice = ListPrice * 1.10 WHERE ProductNumber LIKE 'BK-%'; GO SELECT * FROM Sales.SalesOrderHeader AS h INNER JOIN Sales.SalesOrderDetail AS d WITH (FORCESEEK) ON h.SalesOrderID = d.SalesOrderID WHERE h.TotalDue > 100 AND (d.OrderQty > 5 OR d.LineTotal < 1000.00); GO SELECT h.SalesOrderID, h.TotalDue, d.OrderQty FROM Sales.SalesOrderHeader AS h INNER JOIN Sales.SalesOrderDetail AS d WITH (FORCESEEK (PK_SalesOrderDetail_SalesOrderID_SalesOrderDetailID (SalesOrderID))) ON h.SalesOrderID = d.SalesOrderID WHERE h.TotalDue > 100 AND (d.OrderQty > 5 OR d.LineTotal < 1000.00); GO SELECT h.SalesOrderID, h.TotalDue, d.OrderQty FROM Sales.SalesOrderHeader AS h INNER JOIN Sales.SalesOrderDetail AS d WITH (FORCESCAN) ON h.SalesOrderID = d.SalesOrderID WHERE h.TotalDue > 100 AND (d.OrderQty > 5 OR d.LineTotal < 1000.00); GO SELECT ID FROM dbo.tableX WITH(NOLOCK) GO SELECT ID FROM dbo.tableX (NOLOCK) GO sqlfluff-3.4.2/test/fixtures/dialects/tsql/hints.yml000066400000000000000000001004051503426445100226210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3a1902cc6ad1478fa1a51f3170e8b348c241f15e527959e4ea3f7300d3bc6907 file: - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: Customer alias_expression: alias_operator: keyword: AS naked_identifier: c join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: CustomerAddress alias_expression: alias_operator: keyword: AS naked_identifier: ca - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: c - dot: . - naked_identifier: CustomerID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: ca - dot: . - naked_identifier: CustomerID where_clause: keyword: WHERE expression: column_reference: naked_identifier: TerritoryID comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' option_clause: keyword: OPTION bracketed: start_bracket: ( query_hint_segment: - keyword: MERGE - keyword: JOIN end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: RetrievePersonAddress - procedure_parameter_list: - parameter: '@city_name' - data_type: data_type_identifier: NVARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '30' end_bracket: ) - comma: ',' - parameter: '@postal_code' - data_type: data_type_identifier: NVARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '15' end_bracket: ) - keyword: AS - procedure_statement: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Person - dot: . - naked_identifier: Address where_clause: keyword: WHERE expression: - column_reference: naked_identifier: City - comparison_operator: raw_comparison_operator: '=' - parameter: '@city_name' - binary_operator: AND - column_reference: naked_identifier: PostalCode - comparison_operator: raw_comparison_operator: '=' - parameter: '@postal_code' option_clause: keyword: OPTION bracketed: start_bracket: ( query_hint_segment: - keyword: OPTIMIZE - keyword: FOR - bracketed: - start_bracket: ( - parameter: '@city_name' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'Seattle'" - comma: ',' - parameter: '@postal_code' - keyword: UNKNOWN - end_bracket: ) end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: cte cte_column_list: bracketed: start_bracket: ( identifier_list: - naked_identifier: CustomerID - comma: ',' - naked_identifier: PersonID - comma: ',' - naked_identifier: StoreID end_bracket: ) keyword: AS bracketed: start_bracket: ( set_expression: - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: CustomerID - comma: ',' - select_clause_element: column_reference: naked_identifier: PersonID - comma: ',' - select_clause_element: column_reference: naked_identifier: StoreID from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: Customer where_clause: keyword: WHERE expression: - column_reference: naked_identifier: PersonID - keyword: IS - keyword: NOT - null_literal: 'NULL' - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: cte - dot: . - naked_identifier: CustomerID - comma: ',' - select_clause_element: column_reference: - naked_identifier: cte - dot: . - naked_identifier: PersonID - comma: ',' - select_clause_element: column_reference: - naked_identifier: cte - dot: . - naked_identifier: StoreID from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: cte join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: Customer alias_expression: alias_operator: keyword: AS naked_identifier: e join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: cte - dot: . - naked_identifier: PersonID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: e - dot: . - naked_identifier: CustomerID end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: CustomerID - comma: ',' - select_clause_element: column_reference: naked_identifier: PersonID - comma: ',' - select_clause_element: column_reference: naked_identifier: StoreID from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: cte option_clause: keyword: OPTION bracketed: start_bracket: ( query_hint_segment: keyword: MAXRECURSION numeric_literal: '2' end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: Employee alias_expression: alias_operator: keyword: AS naked_identifier: e1 - set_operator: keyword: UNION - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: Employee alias_expression: alias_operator: keyword: AS naked_identifier: e2 - option_clause: keyword: OPTION bracketed: start_bracket: ( query_hint_segment: - keyword: MERGE - keyword: UNION end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: ProductID - comma: ',' - select_clause_element: column_reference: naked_identifier: OrderQty - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: LineTotal end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: Total from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesOrderDetail where_clause: keyword: WHERE expression: column_reference: naked_identifier: UnitPrice comparison_operator: raw_comparison_operator: < numeric_literal: '5' groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: ProductID - comma: ',' - column_reference: naked_identifier: OrderQty orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: ProductID - comma: ',' - column_reference: naked_identifier: OrderQty option_clause: keyword: OPTION bracketed: - start_bracket: ( - query_hint_segment: - keyword: HASH - keyword: GROUP - comma: ',' - query_hint_segment: keyword: FAST numeric_literal: '10' - end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: ProductID - comma: ',' - select_clause_element: column_reference: naked_identifier: OrderQty - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: LineTotal end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: Total from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesOrderDetail where_clause: keyword: WHERE expression: column_reference: naked_identifier: UnitPrice comparison_operator: raw_comparison_operator: < numeric_literal: '5' groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: ProductID - comma: ',' - column_reference: naked_identifier: OrderQty orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: ProductID - comma: ',' - column_reference: naked_identifier: OrderQty option_clause: keyword: OPTION bracketed: start_bracket: ( query_hint_segment: keyword: MAXDOP numeric_literal: '2' end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Person - dot: . - naked_identifier: Address where_clause: keyword: WHERE expression: - column_reference: naked_identifier: City - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'SEATTLE'" - binary_operator: AND - column_reference: naked_identifier: PostalCode - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '98104' option_clause: keyword: OPTION bracketed: - start_bracket: ( - query_hint_segment: keyword: RECOMPILE - comma: ',' - query_hint_segment: - keyword: USE - keyword: HINT - bracketed: - start_bracket: ( - quoted_literal: "'ASSUME_MIN_SELECTIVITY_FOR_FILTER_ESTIMATES'" - comma: ',' - quoted_literal: "'DISABLE_PARAMETER_SNIFFING'" - end_bracket: ) - end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Person - dot: . - naked_identifier: Address where_clause: keyword: WHERE expression: - column_reference: naked_identifier: City - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'SEATTLE'" - binary_operator: AND - column_reference: naked_identifier: PostalCode - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '98104' option_clause: keyword: OPTION bracketed: start_bracket: ( query_hint_segment: keyword: QUERYTRACEON numeric_literal: '4199' end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Person - dot: . - naked_identifier: Address where_clause: keyword: WHERE expression: - column_reference: naked_identifier: City - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'SEATTLE'" - binary_operator: AND - column_reference: naked_identifier: PostalCode - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '98104' option_clause: keyword: OPTION bracketed: - start_bracket: ( - query_hint_segment: keyword: QUERYTRACEON numeric_literal: '4199' - comma: ',' - query_hint_segment: keyword: QUERYTRACEON numeric_literal: '4137' - end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: update_statement: keyword: UPDATE table_reference: - naked_identifier: Production - dot: . - naked_identifier: Product post_table_expression: keyword: WITH bracketed: start_bracket: ( query_hint_segment: keyword: TABLOCK end_bracket: ) set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: ListPrice assignment_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: ListPrice binary_operator: '*' numeric_literal: '1.10' where_clause: keyword: WHERE expression: column_reference: naked_identifier: ProductNumber keyword: LIKE quoted_literal: "'BK-%'" statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesOrderHeader alias_expression: alias_operator: keyword: AS naked_identifier: h join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesOrderDetail alias_expression: alias_operator: keyword: AS naked_identifier: d post_table_expression: keyword: WITH bracketed: start_bracket: ( query_hint_segment: keyword: FORCESEEK end_bracket: ) - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: h - dot: . - naked_identifier: SalesOrderID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: d - dot: . - naked_identifier: SalesOrderID where_clause: keyword: WHERE expression: column_reference: - naked_identifier: h - dot: . - naked_identifier: TotalDue comparison_operator: raw_comparison_operator: '>' numeric_literal: '100' binary_operator: AND bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: d - dot: . - naked_identifier: OrderQty - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '5' - binary_operator: OR - column_reference: - naked_identifier: d - dot: . - naked_identifier: LineTotal - comparison_operator: raw_comparison_operator: < - numeric_literal: '1000.00' end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: h - dot: . - naked_identifier: SalesOrderID - comma: ',' - select_clause_element: column_reference: - naked_identifier: h - dot: . - naked_identifier: TotalDue - comma: ',' - select_clause_element: column_reference: - naked_identifier: d - dot: . - naked_identifier: OrderQty from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesOrderHeader alias_expression: alias_operator: keyword: AS naked_identifier: h join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesOrderDetail alias_expression: alias_operator: keyword: AS naked_identifier: d post_table_expression: keyword: WITH bracketed: start_bracket: ( query_hint_segment: keyword: FORCESEEK bracketed: start_bracket: ( index_reference: naked_identifier: PK_SalesOrderDetail_SalesOrderID_SalesOrderDetailID bracketed: start_bracket: ( naked_identifier: SalesOrderID end_bracket: ) end_bracket: ) end_bracket: ) - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: h - dot: . - naked_identifier: SalesOrderID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: d - dot: . - naked_identifier: SalesOrderID where_clause: keyword: WHERE expression: column_reference: - naked_identifier: h - dot: . - naked_identifier: TotalDue comparison_operator: raw_comparison_operator: '>' numeric_literal: '100' binary_operator: AND bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: d - dot: . - naked_identifier: OrderQty - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '5' - binary_operator: OR - column_reference: - naked_identifier: d - dot: . - naked_identifier: LineTotal - comparison_operator: raw_comparison_operator: < - numeric_literal: '1000.00' end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: h - dot: . - naked_identifier: SalesOrderID - comma: ',' - select_clause_element: column_reference: - naked_identifier: h - dot: . - naked_identifier: TotalDue - comma: ',' - select_clause_element: column_reference: - naked_identifier: d - dot: . - naked_identifier: OrderQty from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesOrderHeader alias_expression: alias_operator: keyword: AS naked_identifier: h join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesOrderDetail alias_expression: alias_operator: keyword: AS naked_identifier: d post_table_expression: keyword: WITH bracketed: start_bracket: ( query_hint_segment: keyword: FORCESCAN end_bracket: ) - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: h - dot: . - naked_identifier: SalesOrderID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: d - dot: . - naked_identifier: SalesOrderID where_clause: keyword: WHERE expression: column_reference: - naked_identifier: h - dot: . - naked_identifier: TotalDue comparison_operator: raw_comparison_operator: '>' numeric_literal: '100' binary_operator: AND bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: d - dot: . - naked_identifier: OrderQty - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '5' - binary_operator: OR - column_reference: - naked_identifier: d - dot: . - naked_identifier: LineTotal - comparison_operator: raw_comparison_operator: < - numeric_literal: '1000.00' end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: ID from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: tableX post_table_expression: keyword: WITH bracketed: start_bracket: ( query_hint_segment: keyword: NOLOCK end_bracket: ) - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: ID from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: tableX post_table_expression: bracketed: start_bracket: ( query_hint_segment: keyword: NOLOCK end_bracket: ) - go_statement: keyword: GO sqlfluff-3.4.2/test/fixtures/dialects/tsql/if_else.sql000066400000000000000000000007621503426445100231050ustar00rootroot00000000000000IF 1 <= (SELECT Weight from DimProduct WHERE ProductKey = 1) SELECT ProductKey, EnglishDescription, Weight, 'This product is too heavy to ship and is only available for pickup.' AS ShippingStatus FROM DimProduct WHERE ProductKey = 1 ELSE SELECT ProductKey, EnglishDescription, Weight, 'This product is available for shipping or pickup.' AS ShippingStatus FROM DimProduct WHERE ProductKey = 1 if exists (select * from #a union all select * from #b) set @var = 1; sqlfluff-3.4.2/test/fixtures/dialects/tsql/if_else.yml000066400000000000000000000137031503426445100231060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8607a11ed091582eb2c75b9a1b7a7d170112ffee6b2390f2c9688af774a36c44 file: batch: - statement: if_then_statement: - if_clause: keyword: IF expression: numeric_literal: '1' comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: Weight from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: DimProduct where_clause: keyword: WHERE expression: column_reference: naked_identifier: ProductKey comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' end_bracket: ) - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: ProductKey - comma: ',' - select_clause_element: column_reference: naked_identifier: EnglishDescription - comma: ',' - select_clause_element: column_reference: naked_identifier: Weight - comma: ',' - select_clause_element: quoted_literal: "'This product is too heavy to ship and is only available\ \ for pickup.'" alias_expression: alias_operator: keyword: AS naked_identifier: ShippingStatus from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: DimProduct where_clause: keyword: WHERE expression: column_reference: naked_identifier: ProductKey comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - keyword: ELSE - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: ProductKey - comma: ',' - select_clause_element: column_reference: naked_identifier: EnglishDescription - comma: ',' - select_clause_element: column_reference: naked_identifier: Weight - comma: ',' - select_clause_element: quoted_literal: "'This product is available for shipping or pickup.'" alias_expression: alias_operator: keyword: AS naked_identifier: ShippingStatus from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: DimProduct where_clause: keyword: WHERE expression: column_reference: naked_identifier: ProductKey comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement: if_then_statement: if_clause: keyword: if expression: keyword: exists bracketed: start_bracket: ( set_expression: - select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: hash_identifier: '#a' - set_operator: - keyword: union - keyword: all - select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: hash_identifier: '#b' end_bracket: ) statement: set_segment: keyword: set parameter: '@var' assignment_operator: raw_comparison_operator: '=' expression: numeric_literal: '1' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/if_else_begin_end.sql000066400000000000000000000007441503426445100250770ustar00rootroot00000000000000IF 1 <= (SELECT Weight from DimProduct WHERE ProductKey = 1) BEGIN SELECT ProductKey, EnglishDescription, Weight, 'This product is too heavy to ship and is only available for pickup.' AS ShippingStatus FROM DimProduct WHERE ProductKey = 1 END ELSE BEGIN SELECT ProductKey, EnglishDescription, Weight, 'This product is available for shipping or pickup.' AS ShippingStatus FROM DimProduct WHERE ProductKey = 1 END sqlfluff-3.4.2/test/fixtures/dialects/tsql/if_else_begin_end.yml000066400000000000000000000114011503426445100250710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6ede8bc950734e4f460c930ab13abafa579e442cb7f319f5e19748aa28d30447 file: batch: statement: if_then_statement: - if_clause: keyword: IF expression: numeric_literal: '1' comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: Weight from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: DimProduct where_clause: keyword: WHERE expression: column_reference: naked_identifier: ProductKey comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' end_bracket: ) - statement: begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: ProductKey - comma: ',' - select_clause_element: column_reference: naked_identifier: EnglishDescription - comma: ',' - select_clause_element: column_reference: naked_identifier: Weight - comma: ',' - select_clause_element: quoted_literal: "'This product is too heavy to ship and is only\ \ available for pickup.'" alias_expression: alias_operator: keyword: AS naked_identifier: ShippingStatus from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: DimProduct where_clause: keyword: WHERE expression: column_reference: naked_identifier: ProductKey comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - keyword: END - keyword: ELSE - statement: begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: ProductKey - comma: ',' - select_clause_element: column_reference: naked_identifier: EnglishDescription - comma: ',' - select_clause_element: column_reference: naked_identifier: Weight - comma: ',' - select_clause_element: quoted_literal: "'This product is available for shipping or pickup.'" alias_expression: alias_operator: keyword: AS naked_identifier: ShippingStatus from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: DimProduct where_clause: keyword: WHERE expression: column_reference: naked_identifier: ProductKey comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - keyword: END sqlfluff-3.4.2/test/fixtures/dialects/tsql/ignore_nulls.sql000066400000000000000000000002751503426445100241760ustar00rootroot00000000000000SELECT FIRST_VALUE( [entrypunt]) IGNORE NULLS OVER ( PARTITION BY ([reisnummer]) ORDER BY [reismutatie starttijdstip] ) AS [entrypunt] FROM [reizen] sqlfluff-3.4.2/test/fixtures/dialects/tsql/ignore_nulls.yml000066400000000000000000000037721503426445100242050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4707e00fe08873a6633dfe082e20b582274ce0690293bcf930777e658ab46c60 file: batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: FIRST_VALUE function_contents: bracketed: start_bracket: ( expression: column_reference: quoted_identifier: '[entrypunt]' end_bracket: ) over_clause: - keyword: IGNORE - keyword: NULLS - keyword: OVER - bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - bracketed: start_bracket: ( column_reference: quoted_identifier: '[reisnummer]' end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: quoted_identifier: '[reismutatie starttijdstip]' end_bracket: ) alias_expression: alias_operator: keyword: AS quoted_identifier: '[entrypunt]' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '[reizen]' sqlfluff-3.4.2/test/fixtures/dialects/tsql/insert_default.sql000066400000000000000000000001231503426445100244760ustar00rootroot00000000000000-- Simple statement for setting default values INSERT INTO mytable DEFAULT VALUES; sqlfluff-3.4.2/test/fixtures/dialects/tsql/insert_default.yml000066400000000000000000000011621503426445100245040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 28093a4e7856024d44d9aab2f51c43db14664b9b0e98cb86ec6d744581cc9c52 file: batch: statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: mytable - keyword: DEFAULT - keyword: VALUES statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/insert_statement.sql000066400000000000000000000025111503426445100250610ustar00rootroot00000000000000INSERT INTO INTER.ECDC_CASES ( [COUNTRY], [COUNTRY_CODE], [CONTINENT], [POPULATION], [INDICATOR], [WEEKLY_COUNT], [YEAR_WEEK], [WEEK_START], [WEEK_END], [RATE_14_DAY], [CUMULATIVE_COUNT], [SOURCE] ) SELECT [COUNTRY], [COUNTRY_CODE], [CONTINENT], CAST([POPULATION] AS BIGINT) AS [POPULATION], [INDICATOR], CAST([WEEKLY_COUNT] AS BIGINT) AS [WEEKLY_COUNT], [YEAR_WEEK], CAST([dbo].[CONVERT_ISO_WEEK_TO_DATETIME](LEFT(YEAR_WEEK,4),RIGHT(YEAR_WEEK,2)) AS DATE) AS [WEEK_START], CAST([dbo].[WEEK_END]([dbo].[CONVERT_ISO_WEEK_TO_DATETIME](LEFT(YEAR_WEEK,4),RIGHT(YEAR_WEEK,2))) AS DATE ) AS [WEEK_END], CAST([RATE_14_DAY] AS FLOAT) AS [RATE_14_DAY], CAST([CUMULATIVE_COUNT] AS BIGINT) AS [CUMULATIVE_COUNT], [SOURCE] FROM STAGE.ECDC_CASES GO BEGIN INSERT INTO HumanResources.NewEmployee SELECT EmpID, LastName, FirstName, Phone, Address, City, StateProvince, PostalCode, CurrentFlag FROM EmployeeTemp; END GO INSERT INTO HumanResources.NewEmployee SELECT EmpID, LastName, FirstName, Phone, Address, City, StateProvince, PostalCode, CurrentFlag FROM EmployeeTemp; GO INSERT INTO HumanResources.NewEmployee WITH(TABLOCK) OUTPUT * INTO Results EXEC FindEmployeesFunc @lastName = 'Picard' GO INSERT HumanResources.NewEmployee (LastName, FirstName) values ('Kirk', 'James') GO sqlfluff-3.4.2/test/fixtures/dialects/tsql/insert_statement.yml000066400000000000000000000406251503426445100250730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 16df891b928c5d8c9d95f87eadca7ebd741602622fb956784bdde800ab4bcea1 file: - batch: statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: INTER - dot: . - naked_identifier: ECDC_CASES - bracketed: - start_bracket: ( - column_reference: quoted_identifier: '[COUNTRY]' - comma: ',' - column_reference: quoted_identifier: '[COUNTRY_CODE]' - comma: ',' - column_reference: quoted_identifier: '[CONTINENT]' - comma: ',' - column_reference: quoted_identifier: '[POPULATION]' - comma: ',' - column_reference: quoted_identifier: '[INDICATOR]' - comma: ',' - column_reference: quoted_identifier: '[WEEKLY_COUNT]' - comma: ',' - column_reference: quoted_identifier: '[YEAR_WEEK]' - comma: ',' - column_reference: quoted_identifier: '[WEEK_START]' - comma: ',' - column_reference: quoted_identifier: '[WEEK_END]' - comma: ',' - column_reference: quoted_identifier: '[RATE_14_DAY]' - comma: ',' - column_reference: quoted_identifier: '[CUMULATIVE_COUNT]' - comma: ',' - column_reference: quoted_identifier: '[SOURCE]' - end_bracket: ) - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: quoted_identifier: '[COUNTRY]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[COUNTRY_CODE]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[CONTINENT]' - comma: ',' - select_clause_element: function: function_name: keyword: CAST function_contents: bracketed: start_bracket: ( expression: column_reference: quoted_identifier: '[POPULATION]' keyword: AS data_type: data_type_identifier: BIGINT end_bracket: ) alias_expression: alias_operator: keyword: AS quoted_identifier: '[POPULATION]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[INDICATOR]' - comma: ',' - select_clause_element: function: function_name: keyword: CAST function_contents: bracketed: start_bracket: ( expression: column_reference: quoted_identifier: '[WEEKLY_COUNT]' keyword: AS data_type: data_type_identifier: BIGINT end_bracket: ) alias_expression: alias_operator: keyword: AS quoted_identifier: '[WEEKLY_COUNT]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[YEAR_WEEK]' - comma: ',' - select_clause_element: function: function_name: keyword: CAST function_contents: bracketed: start_bracket: ( expression: function: function_name: quoted_identifier: '[dbo]' dot: . function_name_identifier: '[CONVERT_ISO_WEEK_TO_DATETIME]' function_contents: bracketed: - start_bracket: ( - expression: function: function_name: keyword: LEFT function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: YEAR_WEEK - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - comma: ',' - expression: function: function_name: keyword: RIGHT function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: YEAR_WEEK - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - end_bracket: ) keyword: AS data_type: data_type_identifier: DATE end_bracket: ) alias_expression: alias_operator: keyword: AS quoted_identifier: '[WEEK_START]' - comma: ',' - select_clause_element: function: function_name: keyword: CAST function_contents: bracketed: start_bracket: ( expression: function: function_name: quoted_identifier: '[dbo]' dot: . function_name_identifier: '[WEEK_END]' function_contents: bracketed: start_bracket: ( expression: function: function_name: quoted_identifier: '[dbo]' dot: . function_name_identifier: '[CONVERT_ISO_WEEK_TO_DATETIME]' function_contents: bracketed: - start_bracket: ( - expression: function: function_name: keyword: LEFT function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: YEAR_WEEK - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - comma: ',' - expression: function: function_name: keyword: RIGHT function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: YEAR_WEEK - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - end_bracket: ) end_bracket: ) keyword: AS data_type: data_type_identifier: DATE end_bracket: ) alias_expression: alias_operator: keyword: AS quoted_identifier: '[WEEK_END]' - comma: ',' - select_clause_element: function: function_name: keyword: CAST function_contents: bracketed: start_bracket: ( expression: column_reference: quoted_identifier: '[RATE_14_DAY]' keyword: AS data_type: data_type_identifier: FLOAT end_bracket: ) alias_expression: alias_operator: keyword: AS quoted_identifier: '[RATE_14_DAY]' - comma: ',' - select_clause_element: function: function_name: keyword: CAST function_contents: bracketed: start_bracket: ( expression: column_reference: quoted_identifier: '[CUMULATIVE_COUNT]' keyword: AS data_type: data_type_identifier: BIGINT end_bracket: ) alias_expression: alias_operator: keyword: AS quoted_identifier: '[CUMULATIVE_COUNT]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[SOURCE]' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: STAGE - dot: . - naked_identifier: ECDC_CASES - go_statement: keyword: GO - batch: statement: begin_end_block: - keyword: BEGIN - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: NewEmployee - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: EmpID - comma: ',' - select_clause_element: column_reference: naked_identifier: LastName - comma: ',' - select_clause_element: column_reference: naked_identifier: FirstName - comma: ',' - select_clause_element: column_reference: naked_identifier: Phone - comma: ',' - select_clause_element: column_reference: naked_identifier: Address - comma: ',' - select_clause_element: column_reference: naked_identifier: City - comma: ',' - select_clause_element: column_reference: naked_identifier: StateProvince - comma: ',' - select_clause_element: column_reference: naked_identifier: PostalCode - comma: ',' - select_clause_element: column_reference: naked_identifier: CurrentFlag from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: EmployeeTemp statement_terminator: ; - keyword: END - go_statement: keyword: GO - batch: statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: NewEmployee - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: EmpID - comma: ',' - select_clause_element: column_reference: naked_identifier: LastName - comma: ',' - select_clause_element: column_reference: naked_identifier: FirstName - comma: ',' - select_clause_element: column_reference: naked_identifier: Phone - comma: ',' - select_clause_element: column_reference: naked_identifier: Address - comma: ',' - select_clause_element: column_reference: naked_identifier: City - comma: ',' - select_clause_element: column_reference: naked_identifier: StateProvince - comma: ',' - select_clause_element: column_reference: naked_identifier: PostalCode - comma: ',' - select_clause_element: column_reference: naked_identifier: CurrentFlag from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: EmployeeTemp statement_terminator: ; - go_statement: keyword: GO - batch: statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: NewEmployee - post_table_expression: keyword: WITH bracketed: start_bracket: ( query_hint_segment: keyword: TABLOCK end_bracket: ) - output_clause: - keyword: OUTPUT - wildcard_expression: wildcard_identifier: star: '*' - keyword: INTO - table_reference: naked_identifier: Results - execute_script_statement: keyword: EXEC object_reference: naked_identifier: FindEmployeesFunc parameter: '@lastName' comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'Picard'" - go_statement: keyword: GO - batch: statement: insert_statement: keyword: INSERT table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: NewEmployee bracketed: - start_bracket: ( - column_reference: naked_identifier: LastName - comma: ',' - column_reference: naked_identifier: FirstName - end_bracket: ) values_clause: keyword: values bracketed: - start_bracket: ( - quoted_literal: "'Kirk'" - comma: ',' - quoted_literal: "'James'" - end_bracket: ) - go_statement: keyword: GO sqlfluff-3.4.2/test/fixtures/dialects/tsql/insert_with_identity_insert.sql000066400000000000000000000001741503426445100273300ustar00rootroot00000000000000SET IDENTITY_INSERT someTable ON; INSERT INTO someTable (ID, Value) VALUES (1, 2); SET IDENTITY_INSERT someTable OFF; sqlfluff-3.4.2/test/fixtures/dialects/tsql/insert_with_identity_insert.yml000066400000000000000000000026161503426445100273350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 252552cfe5864de52f3e01b4db756fa4e21bdf58b0f2d0468bdfffa4c65ea343 file: batch: - statement: set_segment: - keyword: SET - keyword: IDENTITY_INSERT - table_reference: naked_identifier: someTable - keyword: 'ON' - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: someTable - bracketed: - start_bracket: ( - column_reference: naked_identifier: ID - comma: ',' - column_reference: naked_identifier: Value - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: set_segment: - keyword: SET - keyword: IDENTITY_INSERT - table_reference: naked_identifier: someTable - keyword: 'OFF' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/join_hints.sql000066400000000000000000000004711503426445100236400ustar00rootroot00000000000000-- HASH JOIN SELECT table1.col FROM table1 INNER HASH JOIN table2 ON table1.col = table2.col; -- OUTER MERGE JOIN SELECT table1.col FROM table1 FULL OUTER MERGE JOIN table2 ON table1.col = table2.col; -- LEFT LOOP JOIN SELECT table1.col FROM table1 LEFT LOOP JOIN table2 ON table1.col = table2.col; sqlfluff-3.4.2/test/fixtures/dialects/tsql/join_hints.yml000066400000000000000000000100671503426445100236440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 42b1d9e81e443eb8a7372ec44596ca40406cfc582fb11a9e1bb04dafac2a41fd file: batch: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: table1 - dot: . - naked_identifier: col from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: INNER - keyword: HASH - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: table1 - dot: . - naked_identifier: col - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: table2 - dot: . - naked_identifier: col statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: table1 - dot: . - naked_identifier: col from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: FULL - keyword: OUTER - keyword: MERGE - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: table1 - dot: . - naked_identifier: col - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: table2 - dot: . - naked_identifier: col statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: table1 - dot: . - naked_identifier: col from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: LEFT - keyword: LOOP - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: table1 - dot: . - naked_identifier: col - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: table2 - dot: . - naked_identifier: col statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/json_functions.sql000066400000000000000000000021541503426445100245350ustar00rootroot00000000000000SELECT JSON_OBJECT(ABSENT ON NULL); SELECT JSON_ARRAY('a', 1, 'b', 2); SELECT JSON_ARRAY('a', 1, NULL, 2, NULL ON NULL); SELECT JSON_OBJECT('name': 'value', 'new': 1); SELECT JSON_OBJECT('name': 'value', 'type': NULL ABSENT ON NULL) SELECT JSON_OBJECT('name': 'value', 'type': JSON_ARRAY(1, 2)) SELECT JSON_OBJECT('name': 'value', 'type': JSON_OBJECT('type_id': 1, 'name': 'a')) DECLARE @id_key nvarchar(10) = N'id', @id_value nvarchar(64) = NEWID(); SELECT JSON_OBJECT('user_name': USER_NAME(), @id_key: @id_value, 'sid': (SELECT @@SPID)); SELECT s.session_id, JSON_OBJECT('security_id': s.security_id, 'login': s.login_name, 'status': s.status) AS info FROM sys.dm_exec_sessions AS s WHERE s.is_user_process = 1; SELECT JSON_ARRAY('a', JSON_OBJECT('name': 'value', 'type': 1)); SELECT JSON_ARRAY('a', JSON_OBJECT('name': 'value', 'type': 1), JSON_ARRAY(1, NULL, 2 NULL ON NULL)); DECLARE @id_value nvarchar(64) = NEWID(); SELECT JSON_ARRAY(1, @id_value, (SELECT @@SPID)); SELECT s.session_id, JSON_ARRAY(s.host_name, s.program_name, s.client_interface_name) FROM sys.dm_exec_sessions AS s WHERE s.is_user_process = 1; sqlfluff-3.4.2/test/fixtures/dialects/tsql/json_functions.yml000066400000000000000000000406051503426445100245420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3ae13d24b15b3833d9645ad5f737db733e900dbd565bca98af772df22af0cb4b file: batch: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: keyword: JSON_OBJECT function_contents: bracketed: - start_bracket: ( - keyword: ABSENT - keyword: 'ON' - keyword: 'NULL' - end_bracket: ) statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: JSON_ARRAY function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'a'" - comma: ',' - expression: numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'b'" - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: keyword: JSON_ARRAY function_contents: bracketed: - start_bracket: ( - quoted_literal: "'a'" - comma: ',' - numeric_literal: '1' - comma: ',' - keyword: 'NULL' - comma: ',' - numeric_literal: '2' - comma: ',' - keyword: 'NULL' - keyword: 'ON' - keyword: 'NULL' - end_bracket: ) statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: keyword: JSON_OBJECT function_contents: bracketed: - start_bracket: ( - quoted_literal: "'name'" - colon: ':' - quoted_literal: "'value'" - comma: ',' - quoted_literal: "'new'" - colon: ':' - numeric_literal: '1' - end_bracket: ) statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: keyword: JSON_OBJECT function_contents: bracketed: - start_bracket: ( - quoted_literal: "'name'" - colon: ':' - quoted_literal: "'value'" - comma: ',' - quoted_literal: "'type'" - colon: ':' - null_literal: 'NULL' - keyword: ABSENT - keyword: 'ON' - keyword: 'NULL' - end_bracket: ) - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: keyword: JSON_OBJECT function_contents: bracketed: - start_bracket: ( - quoted_literal: "'name'" - colon: ':' - quoted_literal: "'value'" - comma: ',' - quoted_literal: "'type'" - colon: ':' - function: function_name: function_name_identifier: JSON_ARRAY function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - end_bracket: ) - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: keyword: JSON_OBJECT function_contents: bracketed: - start_bracket: ( - quoted_literal: "'name'" - colon: ':' - quoted_literal: "'value'" - comma: ',' - quoted_literal: "'type'" - colon: ':' - function: function_name: keyword: JSON_OBJECT function_contents: bracketed: - start_bracket: ( - quoted_literal: "'type_id'" - colon: ':' - numeric_literal: '1' - comma: ',' - quoted_literal: "'name'" - colon: ':' - quoted_literal: "'a'" - end_bracket: ) - end_bracket: ) - statement: declare_segment: - keyword: DECLARE - parameter: '@id_key' - data_type: data_type_identifier: nvarchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '10' end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - expression: quoted_literal: "N'id'" - comma: ',' - parameter: '@id_value' - data_type: data_type_identifier: nvarchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '64' end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - expression: function: function_name: function_name_identifier: NEWID function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: keyword: JSON_OBJECT function_contents: bracketed: - start_bracket: ( - quoted_literal: "'user_name'" - colon: ':' - function: function_name: function_name_identifier: USER_NAME function_contents: bracketed: start_bracket: ( end_bracket: ) - comma: ',' - parameter: '@id_key' - colon: ':' - parameter: '@id_value' - comma: ',' - quoted_literal: "'sid'" - colon: ':' - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: system_variable: '@@SPID' end_bracket: ) - end_bracket: ) statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: s - dot: . - naked_identifier: session_id - comma: ',' - select_clause_element: function: function_name: keyword: JSON_OBJECT function_contents: bracketed: - start_bracket: ( - quoted_literal: "'security_id'" - colon: ':' - column_reference: - naked_identifier: s - dot: . - naked_identifier: security_id - comma: ',' - quoted_literal: "'login'" - colon: ':' - column_reference: - naked_identifier: s - dot: . - naked_identifier: login_name - comma: ',' - quoted_literal: "'status'" - colon: ':' - column_reference: - naked_identifier: s - dot: . - naked_identifier: status - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: info from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sys - dot: . - naked_identifier: dm_exec_sessions alias_expression: alias_operator: keyword: AS naked_identifier: s where_clause: keyword: WHERE expression: column_reference: - naked_identifier: s - dot: . - naked_identifier: is_user_process comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: JSON_ARRAY function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'a'" - comma: ',' - expression: function: function_name: keyword: JSON_OBJECT function_contents: bracketed: - start_bracket: ( - quoted_literal: "'name'" - colon: ':' - quoted_literal: "'value'" - comma: ',' - quoted_literal: "'type'" - colon: ':' - numeric_literal: '1' - end_bracket: ) - end_bracket: ) statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: JSON_ARRAY function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'a'" - comma: ',' - expression: function: function_name: keyword: JSON_OBJECT function_contents: bracketed: - start_bracket: ( - quoted_literal: "'name'" - colon: ':' - quoted_literal: "'value'" - comma: ',' - quoted_literal: "'type'" - colon: ':' - numeric_literal: '1' - end_bracket: ) - comma: ',' - expression: function: function_name: keyword: JSON_ARRAY function_contents: bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - keyword: 'NULL' - comma: ',' - numeric_literal: '2' - keyword: 'NULL' - keyword: 'ON' - keyword: 'NULL' - end_bracket: ) - end_bracket: ) statement_terminator: ; - statement: declare_segment: keyword: DECLARE parameter: '@id_value' data_type: data_type_identifier: nvarchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '64' end_bracket: ) comparison_operator: raw_comparison_operator: '=' expression: function: function_name: function_name_identifier: NEWID function_contents: bracketed: start_bracket: ( end_bracket: ) statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: JSON_ARRAY function_contents: bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: parameter: '@id_value' - comma: ',' - expression: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: system_variable: '@@SPID' end_bracket: ) - end_bracket: ) statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: s - dot: . - naked_identifier: session_id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: JSON_ARRAY function_contents: bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: s - dot: . - naked_identifier: host_name - comma: ',' - expression: column_reference: - naked_identifier: s - dot: . - naked_identifier: program_name - comma: ',' - expression: column_reference: - naked_identifier: s - dot: . - naked_identifier: client_interface_name - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sys - dot: . - naked_identifier: dm_exec_sessions alias_expression: alias_operator: keyword: AS naked_identifier: s where_clause: keyword: WHERE expression: column_reference: - naked_identifier: s - dot: . - naked_identifier: is_user_process comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/merge.sql000066400000000000000000000120301503426445100225650ustar00rootroot00000000000000merge schema1.table1 dst using schema1.table1 src on src.rn = 1 and dst.e_date_to is null and dst.cc_id = src.cc_id when matched then update set dst.l_id = src.l_id, dst.e_date_to = src.e_date_from go with source_data as ( select cc_id , cc_name , cc_description from DW.sch1.tbl1 where e_date_to is null and l_id >= dd and l_id <= dd ) merge DM.sch1.tbl2 dst using source_data src on src.cc_id = dst.cc_id when matched then update set dst.cc_name = src.cc_name , dst.cc_description = src.cc_description when not matched then insert ( cc_id , cc_name , cc_description ) values ( cc_id , cc_name , cc_description ); go merge DW.tt.dd dst using LA.tt.dd src on dst.s_id = src.s_id and dst.c_id = src.c_id when matched then update set dst.c_name = src.c_name , dst.col1 = src.col1 , dst.col2 = src.col2 when not matched by target and src.c_id is not null then insert ( s_id , c_id , c_name , col1 , col2 ) values ( src.s_id , src.c_id , src.c_name , src.col1 , src.col2 ) when not matched by source and s_id =1 in ( select s_id from LA.g.tbl3) then update set dst.col1 = 'N' , dst.col2 = col2 ; go MERGE Production.UnitMeasure AS tgt USING (SELECT @UnitMeasureCode, @Name) as src (UnitMeasureCode, Name) ON (tgt.UnitMeasureCode = src.UnitMeasureCode) WHEN MATCHED THEN UPDATE SET Name = src.Name WHEN NOT MATCHED THEN INSERT (UnitMeasureCode, Name) VALUES (src.UnitMeasureCode, src.Name) OUTPUT deleted.*, $action, inserted.* INTO #MyTempTable; GO MERGE Production.ProductInventory AS tgt USING (SELECT ProductID, SUM(OrderQty) FROM Sales.SalesOrderDetail AS sod JOIN Sales.SalesOrderHeader AS soh ON sod.SalesOrderID = soh.SalesOrderID AND soh.OrderDate = @OrderDate GROUP BY ProductID) as src (ProductID, OrderQty) ON (tgt.ProductID = src.ProductID) WHEN MATCHED AND tgt.Quantity - src.OrderQty <= 0 THEN DELETE WHEN MATCHED THEN UPDATE SET tgt.Quantity = tgt.Quantity - src.OrderQty, tgt.ModifiedDate = GETDATE() OUTPUT $action, Inserted.ProductID, Inserted.Quantity, Inserted.ModifiedDate, Deleted.ProductID, Deleted.Quantity, Deleted.ModifiedDate; GO MERGE Production.ProductInventory AS pi USING (SELECT ProductID, SUM(OrderQty) FROM Sales.SalesOrderDetail AS sod JOIN Sales.SalesOrderHeader AS soh ON sod.SalesOrderID = soh.SalesOrderID AND soh.OrderDate BETWEEN '20030701' AND '20030731' GROUP BY ProductID) AS src (ProductID, OrderQty) ON pi.ProductID = src.ProductID WHEN MATCHED AND pi.Quantity - src.OrderQty >= 0 THEN UPDATE SET pi.Quantity = pi.Quantity - src.OrderQty WHEN MATCHED AND pi.Quantity - src.OrderQty <= 0 THEN DELETE OUTPUT $action, Inserted.ProductID, Inserted.LocationID, Inserted.Quantity AS NewQty, Deleted.Quantity AS PreviousQty; GO insert into sch1.table1 ( columnC ) select upd.columnC from ( merge sch1.table1 trg using ( select gr.columnC from sch2.table2 as gr ) src on trg.columnC = src.columnC when matched then update set columnC = src.columnC output inserted.columnC ) as upd ; GO MERGE Production.UnitMeasure WITH (PAGLOCK) AS tgt USING (SELECT @UnitMeasureCode, @Name) as src (UnitMeasureCode, Name) ON (tgt.UnitMeasureCode = src.UnitMeasureCode) WHEN MATCHED THEN UPDATE SET Name = src.Name WHEN NOT MATCHED THEN INSERT (UnitMeasureCode, Name) VALUES (src.UnitMeasureCode, src.Name) OUTPUT deleted.*, $action, inserted.* INTO #MyTempTable; GO MERGE INTO Production.ProductInventory WITH (ROWLOCK, INDEX(myindex, myindex2)) AS pi USING (SELECT ProductID, SUM(OrderQty) FROM Sales.SalesOrderDetail AS sod JOIN Sales.SalesOrderHeader AS soh ON sod.SalesOrderID = soh.SalesOrderID AND soh.OrderDate BETWEEN '20030701' AND '20030731' GROUP BY ProductID) AS src (ProductID, OrderQty) ON pi.ProductID = src.ProductID WHEN MATCHED AND pi.Quantity - src.OrderQty >= 0 THEN UPDATE SET pi.Quantity = pi.Quantity - src.OrderQty WHEN MATCHED AND pi.Quantity - src.OrderQty <= 0 THEN DELETE OUTPUT $action, Inserted.ProductID, Inserted.LocationID, Inserted.Quantity AS NewQty, Deleted.Quantity AS PreviousQty; GO MERGE INTO dbo.target USING ( SELECT 1 AS i ) AS source ON source.i = target.i WHEN MATCHED THEN UPDATE SET target.i = source.i; sqlfluff-3.4.2/test/fixtures/dialects/tsql/merge.yml000066400000000000000000001443711503426445100226050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0d3e1a239cfc078f09e11cf7d587ef50fa821012b42cb3e13079f6e3d457e410 file: - batch: statement: merge_statement: - keyword: merge - table_reference: - naked_identifier: schema1 - dot: . - naked_identifier: table1 - alias_expression: naked_identifier: dst - keyword: using - table_reference: - naked_identifier: schema1 - dot: . - naked_identifier: table1 - alias_expression: naked_identifier: src - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: src - dot: . - naked_identifier: rn - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - binary_operator: and - column_reference: - naked_identifier: dst - dot: . - naked_identifier: e_date_to - keyword: is - null_literal: 'null' - binary_operator: and - column_reference: - naked_identifier: dst - dot: . - naked_identifier: cc_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: src - dot: . - naked_identifier: cc_id - merge_match: merge_when_matched_clause: - keyword: when - keyword: matched - keyword: then - merge_update_clause: keyword: update set_clause_list: - keyword: set - set_clause: column_reference: - naked_identifier: dst - dot: . - naked_identifier: l_id assignment_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: src - dot: . - naked_identifier: l_id - comma: ',' - set_clause: column_reference: - naked_identifier: dst - dot: . - naked_identifier: e_date_to assignment_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: src - dot: . - naked_identifier: e_date_from - go_statement: keyword: go - batch: statement: with_compound_statement: keyword: with common_table_expression: naked_identifier: source_data keyword: as bracketed: start_bracket: ( select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: cc_id - comma: ',' - select_clause_element: column_reference: naked_identifier: cc_name - comma: ',' - select_clause_element: column_reference: naked_identifier: cc_description from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: DW - dot: . - naked_identifier: sch1 - dot: . - naked_identifier: tbl1 where_clause: keyword: where expression: - column_reference: naked_identifier: e_date_to - keyword: is - null_literal: 'null' - binary_operator: and - column_reference: naked_identifier: l_id - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - column_reference: naked_identifier: dd - binary_operator: and - column_reference: naked_identifier: l_id - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - column_reference: naked_identifier: dd end_bracket: ) merge_statement: - keyword: merge - table_reference: - naked_identifier: DM - dot: . - naked_identifier: sch1 - dot: . - naked_identifier: tbl2 - alias_expression: naked_identifier: dst - keyword: using - table_reference: naked_identifier: source_data - alias_expression: naked_identifier: src - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: src - dot: . - naked_identifier: cc_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: dst - dot: . - naked_identifier: cc_id - merge_match: merge_when_matched_clause: - keyword: when - keyword: matched - keyword: then - merge_update_clause: keyword: update set_clause_list: - keyword: set - set_clause: column_reference: - naked_identifier: dst - dot: . - naked_identifier: cc_name assignment_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: src - dot: . - naked_identifier: cc_name - comma: ',' - set_clause: column_reference: - naked_identifier: dst - dot: . - naked_identifier: cc_description assignment_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: src - dot: . - naked_identifier: cc_description merge_when_not_matched_clause: - keyword: when - keyword: not - keyword: matched - keyword: then - merge_insert_clause: - keyword: insert - bracketed: - start_bracket: ( - column_reference: naked_identifier: cc_id - comma: ',' - column_reference: naked_identifier: cc_name - comma: ',' - column_reference: naked_identifier: cc_description - end_bracket: ) - keyword: values - bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: cc_id - comma: ',' - expression: column_reference: naked_identifier: cc_name - comma: ',' - expression: column_reference: naked_identifier: cc_description - end_bracket: ) statement_terminator: ; - go_statement: keyword: go - batch: statement: merge_statement: - keyword: merge - table_reference: - naked_identifier: DW - dot: . - naked_identifier: tt - dot: . - naked_identifier: dd - alias_expression: naked_identifier: dst - keyword: using - table_reference: - naked_identifier: LA - dot: . - naked_identifier: tt - dot: . - naked_identifier: dd - alias_expression: naked_identifier: src - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: dst - dot: . - naked_identifier: s_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: src - dot: . - naked_identifier: s_id - binary_operator: and - column_reference: - naked_identifier: dst - dot: . - naked_identifier: c_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: src - dot: . - naked_identifier: c_id - merge_match: - merge_when_matched_clause: - keyword: when - keyword: matched - keyword: then - merge_update_clause: keyword: update set_clause_list: - keyword: set - set_clause: column_reference: - naked_identifier: dst - dot: . - naked_identifier: c_name assignment_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: src - dot: . - naked_identifier: c_name - comma: ',' - set_clause: column_reference: - naked_identifier: dst - dot: . - naked_identifier: col1 assignment_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: src - dot: . - naked_identifier: col1 - comma: ',' - set_clause: column_reference: - naked_identifier: dst - dot: . - naked_identifier: col2 assignment_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: src - dot: . - naked_identifier: col2 - merge_when_not_matched_clause: - keyword: when - keyword: not - keyword: matched - keyword: by - keyword: target - keyword: and - expression: - column_reference: - naked_identifier: src - dot: . - naked_identifier: c_id - keyword: is - keyword: not - null_literal: 'null' - keyword: then - merge_insert_clause: - keyword: insert - bracketed: - start_bracket: ( - column_reference: naked_identifier: s_id - comma: ',' - column_reference: naked_identifier: c_id - comma: ',' - column_reference: naked_identifier: c_name - comma: ',' - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: values - bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: src - dot: . - naked_identifier: s_id - comma: ',' - expression: column_reference: - naked_identifier: src - dot: . - naked_identifier: c_id - comma: ',' - expression: column_reference: - naked_identifier: src - dot: . - naked_identifier: c_name - comma: ',' - expression: column_reference: - naked_identifier: src - dot: . - naked_identifier: col1 - comma: ',' - expression: column_reference: - naked_identifier: src - dot: . - naked_identifier: col2 - end_bracket: ) - merge_when_not_matched_clause: - keyword: when - keyword: not - keyword: matched - keyword: by - keyword: source - keyword: and - expression: column_reference: naked_identifier: s_id comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' keyword: in bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: s_id from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: LA - dot: . - naked_identifier: g - dot: . - naked_identifier: tbl3 end_bracket: ) - keyword: then - merge_update_clause: keyword: update set_clause_list: - keyword: set - set_clause: column_reference: - naked_identifier: dst - dot: . - naked_identifier: col1 assignment_operator: raw_comparison_operator: '=' expression: quoted_literal: "'N'" - comma: ',' - set_clause: column_reference: - naked_identifier: dst - dot: . - naked_identifier: col2 assignment_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: col2 statement_terminator: ; - go_statement: keyword: go - batch: statement: merge_statement: - keyword: MERGE - table_reference: - naked_identifier: Production - dot: . - naked_identifier: UnitMeasure - alias_expression: alias_operator: keyword: AS naked_identifier: tgt - keyword: USING - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: parameter: '@UnitMeasureCode' - comma: ',' - select_clause_element: parameter: '@Name' end_bracket: ) - alias_expression: alias_operator: keyword: as naked_identifier: src bracketed: start_bracket: ( identifier_list: - naked_identifier: UnitMeasureCode - comma: ',' - naked_identifier: Name end_bracket: ) - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: tgt - dot: . - naked_identifier: UnitMeasureCode - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: src - dot: . - naked_identifier: UnitMeasureCode end_bracket: ) - merge_match: merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: Name assignment_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: src - dot: . - naked_identifier: Name merge_when_not_matched_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: - keyword: INSERT - bracketed: - start_bracket: ( - column_reference: naked_identifier: UnitMeasureCode - comma: ',' - column_reference: naked_identifier: Name - end_bracket: ) - keyword: VALUES - bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: src - dot: . - naked_identifier: UnitMeasureCode - comma: ',' - expression: column_reference: - naked_identifier: src - dot: . - naked_identifier: Name - end_bracket: ) output_clause: - keyword: OUTPUT - wildcard_expression: wildcard_identifier: naked_identifier: deleted dot: . star: '*' - comma: ',' - column_reference: variable_identifier: $action - comma: ',' - wildcard_expression: wildcard_identifier: naked_identifier: inserted dot: . star: '*' - keyword: INTO - table_reference: hash_identifier: '#MyTempTable' statement_terminator: ; - go_statement: keyword: GO - batch: statement: merge_statement: - keyword: MERGE - table_reference: - naked_identifier: Production - dot: . - naked_identifier: ProductInventory - alias_expression: alias_operator: keyword: AS naked_identifier: tgt - keyword: USING - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: ProductID - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: OrderQty end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesOrderDetail alias_expression: alias_operator: keyword: AS naked_identifier: sod join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesOrderHeader alias_expression: alias_operator: keyword: AS naked_identifier: soh join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: sod - dot: . - naked_identifier: SalesOrderID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: soh - dot: . - naked_identifier: SalesOrderID - binary_operator: AND - column_reference: - naked_identifier: soh - dot: . - naked_identifier: OrderDate - comparison_operator: raw_comparison_operator: '=' - parameter: '@OrderDate' groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: ProductID end_bracket: ) - alias_expression: alias_operator: keyword: as naked_identifier: src bracketed: start_bracket: ( identifier_list: - naked_identifier: ProductID - comma: ',' - naked_identifier: OrderQty end_bracket: ) - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: tgt - dot: . - naked_identifier: ProductID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: src - dot: . - naked_identifier: ProductID end_bracket: ) - merge_match: - merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: AND - expression: - column_reference: - naked_identifier: tgt - dot: . - naked_identifier: Quantity - binary_operator: '-' - column_reference: - naked_identifier: src - dot: . - naked_identifier: OrderQty - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - numeric_literal: '0' - keyword: THEN - merge_delete_clause: keyword: DELETE - merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: - keyword: SET - set_clause: column_reference: - naked_identifier: tgt - dot: . - naked_identifier: Quantity assignment_operator: raw_comparison_operator: '=' expression: - column_reference: - naked_identifier: tgt - dot: . - naked_identifier: Quantity - binary_operator: '-' - column_reference: - naked_identifier: src - dot: . - naked_identifier: OrderQty - comma: ',' - set_clause: column_reference: - naked_identifier: tgt - dot: . - naked_identifier: ModifiedDate assignment_operator: raw_comparison_operator: '=' expression: function: function_name: function_name_identifier: GETDATE function_contents: bracketed: start_bracket: ( end_bracket: ) - output_clause: - keyword: OUTPUT - column_reference: variable_identifier: $action - comma: ',' - column_reference: - naked_identifier: Inserted - dot: . - naked_identifier: ProductID - comma: ',' - column_reference: - naked_identifier: Inserted - dot: . - naked_identifier: Quantity - comma: ',' - column_reference: - naked_identifier: Inserted - dot: . - naked_identifier: ModifiedDate - comma: ',' - column_reference: - naked_identifier: Deleted - dot: . - naked_identifier: ProductID - comma: ',' - column_reference: - naked_identifier: Deleted - dot: . - naked_identifier: Quantity - comma: ',' - column_reference: - naked_identifier: Deleted - dot: . - naked_identifier: ModifiedDate statement_terminator: ; - go_statement: keyword: GO - batch: statement: merge_statement: - keyword: MERGE - table_reference: - naked_identifier: Production - dot: . - naked_identifier: ProductInventory - alias_expression: alias_operator: keyword: AS naked_identifier: pi - keyword: USING - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: ProductID - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: OrderQty end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesOrderDetail alias_expression: alias_operator: keyword: AS naked_identifier: sod join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesOrderHeader alias_expression: alias_operator: keyword: AS naked_identifier: soh join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: sod - dot: . - naked_identifier: SalesOrderID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: soh - dot: . - naked_identifier: SalesOrderID - binary_operator: AND - column_reference: - naked_identifier: soh - dot: . - naked_identifier: OrderDate - keyword: BETWEEN - quoted_literal: "'20030701'" - keyword: AND - quoted_literal: "'20030731'" groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: ProductID end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: src bracketed: start_bracket: ( identifier_list: - naked_identifier: ProductID - comma: ',' - naked_identifier: OrderQty end_bracket: ) - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: pi - dot: . - naked_identifier: ProductID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: src - dot: . - naked_identifier: ProductID - merge_match: - merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: AND - expression: - column_reference: - naked_identifier: pi - dot: . - naked_identifier: Quantity - binary_operator: '-' - column_reference: - naked_identifier: src - dot: . - naked_identifier: OrderQty - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - numeric_literal: '0' - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: keyword: SET set_clause: column_reference: - naked_identifier: pi - dot: . - naked_identifier: Quantity assignment_operator: raw_comparison_operator: '=' expression: - column_reference: - naked_identifier: pi - dot: . - naked_identifier: Quantity - binary_operator: '-' - column_reference: - naked_identifier: src - dot: . - naked_identifier: OrderQty - merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: AND - expression: - column_reference: - naked_identifier: pi - dot: . - naked_identifier: Quantity - binary_operator: '-' - column_reference: - naked_identifier: src - dot: . - naked_identifier: OrderQty - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - numeric_literal: '0' - keyword: THEN - merge_delete_clause: keyword: DELETE - output_clause: - keyword: OUTPUT - column_reference: variable_identifier: $action - comma: ',' - column_reference: - naked_identifier: Inserted - dot: . - naked_identifier: ProductID - comma: ',' - column_reference: - naked_identifier: Inserted - dot: . - naked_identifier: LocationID - comma: ',' - column_reference: - naked_identifier: Inserted - dot: . - naked_identifier: Quantity - alias_expression: alias_operator: keyword: AS naked_identifier: NewQty - comma: ',' - column_reference: - naked_identifier: Deleted - dot: . - naked_identifier: Quantity - alias_expression: alias_operator: keyword: AS naked_identifier: PreviousQty statement_terminator: ; - go_statement: keyword: GO - batch: statement: insert_statement: - keyword: insert - keyword: into - table_reference: - naked_identifier: sch1 - dot: . - naked_identifier: table1 - bracketed: start_bracket: ( column_reference: naked_identifier: columnC end_bracket: ) - select_statement: select_clause: keyword: select select_clause_element: column_reference: - naked_identifier: upd - dot: . - naked_identifier: columnC from_clause: keyword: from from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( merge_statement: - keyword: merge - table_reference: - naked_identifier: sch1 - dot: . - naked_identifier: table1 - alias_expression: naked_identifier: trg - keyword: using - bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: - naked_identifier: gr - dot: . - naked_identifier: columnC from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sch2 - dot: . - naked_identifier: table2 alias_expression: alias_operator: keyword: as naked_identifier: gr end_bracket: ) - alias_expression: naked_identifier: src - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: trg - dot: . - naked_identifier: columnC - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: src - dot: . - naked_identifier: columnC - merge_match: merge_when_matched_clause: - keyword: when - keyword: matched - keyword: then - merge_update_clause: keyword: update set_clause_list: keyword: set set_clause: column_reference: naked_identifier: columnC assignment_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: src - dot: . - naked_identifier: columnC output_clause: keyword: output column_reference: - naked_identifier: inserted - dot: . - naked_identifier: columnC end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: upd statement_terminator: ; - go_statement: keyword: GO - batch: statement: merge_statement: - keyword: MERGE - table_reference: - naked_identifier: Production - dot: . - naked_identifier: UnitMeasure - keyword: WITH - bracketed: start_bracket: ( query_hint_segment: keyword: PAGLOCK end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: tgt - keyword: USING - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: parameter: '@UnitMeasureCode' - comma: ',' - select_clause_element: parameter: '@Name' end_bracket: ) - alias_expression: alias_operator: keyword: as naked_identifier: src bracketed: start_bracket: ( identifier_list: - naked_identifier: UnitMeasureCode - comma: ',' - naked_identifier: Name end_bracket: ) - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: tgt - dot: . - naked_identifier: UnitMeasureCode - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: src - dot: . - naked_identifier: UnitMeasureCode end_bracket: ) - merge_match: merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: Name assignment_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: src - dot: . - naked_identifier: Name merge_when_not_matched_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: - keyword: INSERT - bracketed: - start_bracket: ( - column_reference: naked_identifier: UnitMeasureCode - comma: ',' - column_reference: naked_identifier: Name - end_bracket: ) - keyword: VALUES - bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: src - dot: . - naked_identifier: UnitMeasureCode - comma: ',' - expression: column_reference: - naked_identifier: src - dot: . - naked_identifier: Name - end_bracket: ) output_clause: - keyword: OUTPUT - wildcard_expression: wildcard_identifier: naked_identifier: deleted dot: . star: '*' - comma: ',' - column_reference: variable_identifier: $action - comma: ',' - wildcard_expression: wildcard_identifier: naked_identifier: inserted dot: . star: '*' - keyword: INTO - table_reference: hash_identifier: '#MyTempTable' statement_terminator: ; - go_statement: keyword: GO - batch: statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: - naked_identifier: Production - dot: . - naked_identifier: ProductInventory - keyword: WITH - bracketed: - start_bracket: ( - query_hint_segment: keyword: ROWLOCK - comma: ',' - query_hint_segment: keyword: INDEX bracketed: - start_bracket: ( - index_reference: naked_identifier: myindex - comma: ',' - index_reference: naked_identifier: myindex2 - end_bracket: ) - end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: pi - keyword: USING - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: ProductID - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: OrderQty end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesOrderDetail alias_expression: alias_operator: keyword: AS naked_identifier: sod join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesOrderHeader alias_expression: alias_operator: keyword: AS naked_identifier: soh join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: sod - dot: . - naked_identifier: SalesOrderID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: soh - dot: . - naked_identifier: SalesOrderID - binary_operator: AND - column_reference: - naked_identifier: soh - dot: . - naked_identifier: OrderDate - keyword: BETWEEN - quoted_literal: "'20030701'" - keyword: AND - quoted_literal: "'20030731'" groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: ProductID end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: src bracketed: start_bracket: ( identifier_list: - naked_identifier: ProductID - comma: ',' - naked_identifier: OrderQty end_bracket: ) - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: pi - dot: . - naked_identifier: ProductID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: src - dot: . - naked_identifier: ProductID - merge_match: - merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: AND - expression: - column_reference: - naked_identifier: pi - dot: . - naked_identifier: Quantity - binary_operator: '-' - column_reference: - naked_identifier: src - dot: . - naked_identifier: OrderQty - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - numeric_literal: '0' - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: keyword: SET set_clause: column_reference: - naked_identifier: pi - dot: . - naked_identifier: Quantity assignment_operator: raw_comparison_operator: '=' expression: - column_reference: - naked_identifier: pi - dot: . - naked_identifier: Quantity - binary_operator: '-' - column_reference: - naked_identifier: src - dot: . - naked_identifier: OrderQty - merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: AND - expression: - column_reference: - naked_identifier: pi - dot: . - naked_identifier: Quantity - binary_operator: '-' - column_reference: - naked_identifier: src - dot: . - naked_identifier: OrderQty - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - numeric_literal: '0' - keyword: THEN - merge_delete_clause: keyword: DELETE - output_clause: - keyword: OUTPUT - column_reference: variable_identifier: $action - comma: ',' - column_reference: - naked_identifier: Inserted - dot: . - naked_identifier: ProductID - comma: ',' - column_reference: - naked_identifier: Inserted - dot: . - naked_identifier: LocationID - comma: ',' - column_reference: - naked_identifier: Inserted - dot: . - naked_identifier: Quantity - alias_expression: alias_operator: keyword: AS naked_identifier: NewQty - comma: ',' - column_reference: - naked_identifier: Deleted - dot: . - naked_identifier: Quantity - alias_expression: alias_operator: keyword: AS naked_identifier: PreviousQty statement_terminator: ; - go_statement: keyword: GO - batch: statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: target - keyword: USING - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: AS naked_identifier: i end_bracket: ) - alias_expression: alias_operator: keyword: AS naked_identifier: source - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: source - dot: . - naked_identifier: i - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: target - dot: . - naked_identifier: i - merge_match: merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: keyword: SET set_clause: column_reference: - naked_identifier: target - dot: . - naked_identifier: i assignment_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: source - dot: . - naked_identifier: i statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/minimal_function.sql000066400000000000000000000001631503426445100250250ustar00rootroot00000000000000CREATE OR ALTER FUNCTION [dbo].[add] (@add_1 int, @add_2 int) RETURNS integer AS BEGIN RETURN @add_1 + @add_2 END sqlfluff-3.4.2/test/fixtures/dialects/tsql/minimal_function.yml000066400000000000000000000026671503426445100250420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c1ee4c4a5132279380bf93678e9edc3063d5bd6e6c5b1e84652c8fd36d2301c9 file: batch: statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: FUNCTION - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[add]' - function_parameter_list: bracketed: - start_bracket: ( - parameter: '@add_1' - data_type: data_type_identifier: int - comma: ',' - parameter: '@add_2' - data_type: data_type_identifier: int - end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: integer - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: return_segment: keyword: RETURN expression: - parameter: '@add_1' - binary_operator: + - parameter: '@add_2' - keyword: END sqlfluff-3.4.2/test/fixtures/dialects/tsql/minimal_function_no_alter.sql000066400000000000000000000004511503426445100267100ustar00rootroot00000000000000-- including just in case; Azure Synapse Analytics does not support OR ALTER -- https://docs.microsoft.com/en-us/sql/t-sql/statements/create-function-sql-data-warehouse?view=aps-pdw-2016-au7 CREATE FUNCTION [dbo].[add] (@add_1 int, @add_2 int) RETURNS integer AS BEGIN RETURN @add_1 + @add_2 END sqlfluff-3.4.2/test/fixtures/dialects/tsql/minimal_function_no_alter.yml000066400000000000000000000026141503426445100267150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7ef5fdb06c6bf32c08a8b240339f63760eab274afdfd3921f0881cb69c18ff13 file: batch: statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[add]' - function_parameter_list: bracketed: - start_bracket: ( - parameter: '@add_1' - data_type: data_type_identifier: int - comma: ',' - parameter: '@add_2' - data_type: data_type_identifier: int - end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: integer - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: return_segment: keyword: RETURN expression: - parameter: '@add_1' - binary_operator: + - parameter: '@add_2' - keyword: END sqlfluff-3.4.2/test/fixtures/dialects/tsql/multi_statement_without_semicolon.sql000066400000000000000000000000541503426445100305420ustar00rootroot00000000000000select a from tbl1 GO select b from tbl2 GO sqlfluff-3.4.2/test/fixtures/dialects/tsql/multi_statement_without_semicolon.yml000066400000000000000000000023741503426445100305530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 00434c5df2aba3c7d90dd38aa4ffc873f52105e27a1d2df9496025becb226785 file: - batch: statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: b from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl2 - go_statement: keyword: GO sqlfluff-3.4.2/test/fixtures/dialects/tsql/nested_joins.sql000066400000000000000000000015351503426445100241620ustar00rootroot00000000000000SELECT 1 AS RegionCode FROM BA LEFT OUTER JOIN I LEFT OUTER JOIN P ON I.Pcd = P.Iid ON BA.Iid = I.Bcd; GO SELECT 1 FROM BA RIGHT OUTER JOIN I LEFT OUTER JOIN P AS P_1 LEFT OUTER JOIN IP AS IP_1 ON P_1.NID = IP_1.NID ON I.PID = CAST(P_1.IDEID AS varchar) LEFT OUTER JOIN P AS P_2 LEFT OUTER JOIN IP AS IP_2 ON P_2.NID = IP_2.NID ON I.SecondaryPID = CAST(P_2.IDEID AS varchar) ON CAST(BA.IDEID AS varchar) = I.BAID SELECT 1 AS RegionCode FROM BA LEFT OUTER JOIN ( I JOIN P ON I.Pcd = P.Iid ) ON BA.Iid = I.Bcd; GO SELECT tst1.Name, tst2.OtherName FROM dbo.Test1 AS tst1 LEFT OUTER JOIN (dbo.Test2 AS tst2 INNER JOIN dbo.FilterTable AS fltr1 ON tst2.Id = fltr1.Id) ON tst1.id = tst2.id; sqlfluff-3.4.2/test/fixtures/dialects/tsql/nested_joins.yml000066400000000000000000000333301503426445100241620ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e09eedf30ed29ce19cc60d5792c9a59fe7a64e75171aa1240e3d3196a3dd7828 file: - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: AS naked_identifier: RegionCode from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: BA join_clause: - keyword: LEFT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: I - join_clause: - keyword: LEFT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: P - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: I - dot: . - naked_identifier: Pcd - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: P - dot: . - naked_identifier: Iid - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: BA - dot: . - naked_identifier: Iid - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: I - dot: . - naked_identifier: Bcd statement_terminator: ; - go_statement: keyword: GO - batch: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: BA join_clause: - keyword: RIGHT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: I - join_clause: - keyword: LEFT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: P alias_expression: alias_operator: keyword: AS naked_identifier: P_1 - join_clause: - keyword: LEFT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: IP alias_expression: alias_operator: keyword: AS naked_identifier: IP_1 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: P_1 - dot: . - naked_identifier: NID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: IP_1 - dot: . - naked_identifier: NID - join_on_condition: keyword: 'ON' expression: column_reference: - naked_identifier: I - dot: . - naked_identifier: PID comparison_operator: raw_comparison_operator: '=' function: function_name: keyword: CAST function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: P_1 - dot: . - naked_identifier: IDEID keyword: AS data_type: data_type_identifier: varchar end_bracket: ) - join_clause: - keyword: LEFT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: P alias_expression: alias_operator: keyword: AS naked_identifier: P_2 - join_clause: - keyword: LEFT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: IP alias_expression: alias_operator: keyword: AS naked_identifier: IP_2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: P_2 - dot: . - naked_identifier: NID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: IP_2 - dot: . - naked_identifier: NID - join_on_condition: keyword: 'ON' expression: column_reference: - naked_identifier: I - dot: . - naked_identifier: SecondaryPID comparison_operator: raw_comparison_operator: '=' function: function_name: keyword: CAST function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: P_2 - dot: . - naked_identifier: IDEID keyword: AS data_type: data_type_identifier: varchar end_bracket: ) - join_on_condition: keyword: 'ON' expression: function: function_name: keyword: CAST function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: BA - dot: . - naked_identifier: IDEID keyword: AS data_type: data_type_identifier: varchar end_bracket: ) comparison_operator: raw_comparison_operator: '=' column_reference: - naked_identifier: I - dot: . - naked_identifier: BAID - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: AS naked_identifier: RegionCode from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: BA join_clause: - keyword: LEFT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: bracketed: start_bracket: ( table_expression: table_reference: naked_identifier: I join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: P join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: I - dot: . - naked_identifier: Pcd - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: P - dot: . - naked_identifier: Iid end_bracket: ) - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: BA - dot: . - naked_identifier: Iid - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: I - dot: . - naked_identifier: Bcd statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: tst1 - dot: . - naked_identifier: Name - comma: ',' - select_clause_element: column_reference: - naked_identifier: tst2 - dot: . - naked_identifier: OtherName from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Test1 alias_expression: alias_operator: keyword: AS naked_identifier: tst1 join_clause: - keyword: LEFT - keyword: OUTER - keyword: JOIN - from_expression_element: bracketed: start_bracket: ( table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Test2 alias_expression: alias_operator: keyword: AS naked_identifier: tst2 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: FilterTable alias_expression: alias_operator: keyword: AS naked_identifier: fltr1 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: tst2 - dot: . - naked_identifier: Id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: fltr1 - dot: . - naked_identifier: Id end_bracket: ) - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: tst1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: tst2 - dot: . - naked_identifier: id statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/offset.sql000066400000000000000000000004551503426445100227640ustar00rootroot00000000000000SELECT client.reference, client.name FROM client GROUP BY client.reference, client.name ORDER BY client.reference OFFSET 10 ROWS; SELECT client.reference, client.name FROM client GROUP BY client.reference, client.name ORDER BY client.reference OFFSET 10 ROWS FETCH NEXT 10 ROWS ONLY; sqlfluff-3.4.2/test/fixtures/dialects/tsql/offset.yml000066400000000000000000000062221503426445100227640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6da1a4e79d3ada0f861b69d8662ef4add5b78441b71e303eb1af697be3a3c414 file: batch: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: client - dot: . - naked_identifier: reference - comma: ',' - select_clause_element: column_reference: - naked_identifier: client - dot: . - naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: client groupby_clause: - keyword: GROUP - keyword: BY - column_reference: - naked_identifier: client - dot: . - naked_identifier: reference - comma: ',' - column_reference: - naked_identifier: client - dot: . - naked_identifier: name orderby_clause: - keyword: ORDER - keyword: BY - column_reference: - naked_identifier: client - dot: . - naked_identifier: reference - offset_clause: - keyword: OFFSET - numeric_literal: '10' - keyword: ROWS statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: client - dot: . - naked_identifier: reference - comma: ',' - select_clause_element: column_reference: - naked_identifier: client - dot: . - naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: client groupby_clause: - keyword: GROUP - keyword: BY - column_reference: - naked_identifier: client - dot: . - naked_identifier: reference - comma: ',' - column_reference: - naked_identifier: client - dot: . - naked_identifier: name orderby_clause: - keyword: ORDER - keyword: BY - column_reference: - naked_identifier: client - dot: . - naked_identifier: reference - offset_clause: - keyword: OFFSET - numeric_literal: '10' - keyword: ROWS - fetch_clause: - keyword: FETCH - keyword: NEXT - numeric_literal: '10' - keyword: ROWS - keyword: ONLY statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/open_symmetric_key.sql000066400000000000000000000011751503426445100254030ustar00rootroot00000000000000CREATE PROCEDURE dbo.procedure_name AS BEGIN SET NOCOUNT ON; OPEN SYMMETRIC KEY [Key01] DECRYPTION BY CERTIFICATE [Cert]; SELECT column1 FROM sys.tables; END; GO OPEN SYMMETRIC KEY [Key01] DECRYPTION BY CERTIFICATE [Cert]; OPEN SYMMETRIC KEY [Key01] DECRYPTION BY CERTIFICATE [Cert] WITH PASSWORD = 'testPass123'; OPEN SYMMETRIC KEY [Key01] DECRYPTION BY ASYMMETRIC KEY [asym_key]; OPEN SYMMETRIC KEY [Key01] DECRYPTION BY ASYMMETRIC KEY [asym_key] WITH PASSWORD = 'testPass123'; OPEN SYMMETRIC KEY [Key01] DECRYPTION BY SYMMETRIC KEY [sym_key]; OPEN SYMMETRIC KEY [Key01] DECRYPTION BY PASSWORD = 'decryptPass123'; sqlfluff-3.4.2/test/fixtures/dialects/tsql/open_symmetric_key.yml000066400000000000000000000110501503426445100253760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 54f1c4f8d7b6c4dc1b306ef6e84b8c64f72b5df036861dea9dfe049f86ad370a file: - batch: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: procedure_name - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: set_segment: - keyword: SET - keyword: NOCOUNT - keyword: 'ON' - statement_terminator: ; - statement: open_symmetric_key_statement: - keyword: OPEN - keyword: SYMMETRIC - keyword: KEY - object_reference: quoted_identifier: '[Key01]' - keyword: DECRYPTION - keyword: BY - keyword: CERTIFICATE - object_reference: quoted_identifier: '[Cert]' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: column1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sys - dot: . - naked_identifier: tables statement_terminator: ; - keyword: END statement_terminator: ; - go_statement: keyword: GO - batch: - statement: open_symmetric_key_statement: - keyword: OPEN - keyword: SYMMETRIC - keyword: KEY - object_reference: quoted_identifier: '[Key01]' - keyword: DECRYPTION - keyword: BY - keyword: CERTIFICATE - object_reference: quoted_identifier: '[Cert]' - statement_terminator: ; - statement: open_symmetric_key_statement: - keyword: OPEN - keyword: SYMMETRIC - keyword: KEY - object_reference: quoted_identifier: '[Key01]' - keyword: DECRYPTION - keyword: BY - keyword: CERTIFICATE - object_reference: quoted_identifier: '[Cert]' - keyword: WITH - keyword: PASSWORD - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'testPass123'" - statement_terminator: ; - statement: open_symmetric_key_statement: - keyword: OPEN - keyword: SYMMETRIC - keyword: KEY - object_reference: quoted_identifier: '[Key01]' - keyword: DECRYPTION - keyword: BY - keyword: ASYMMETRIC - keyword: KEY - object_reference: quoted_identifier: '[asym_key]' - statement_terminator: ; - statement: open_symmetric_key_statement: - keyword: OPEN - keyword: SYMMETRIC - keyword: KEY - object_reference: quoted_identifier: '[Key01]' - keyword: DECRYPTION - keyword: BY - keyword: ASYMMETRIC - keyword: KEY - object_reference: quoted_identifier: '[asym_key]' - keyword: WITH - keyword: PASSWORD - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'testPass123'" - statement_terminator: ; - statement: open_symmetric_key_statement: - keyword: OPEN - keyword: SYMMETRIC - keyword: KEY - object_reference: quoted_identifier: '[Key01]' - keyword: DECRYPTION - keyword: BY - keyword: SYMMETRIC - keyword: KEY - object_reference: quoted_identifier: '[sym_key]' - statement_terminator: ; - statement: open_symmetric_key_statement: - keyword: OPEN - keyword: SYMMETRIC - keyword: KEY - object_reference: quoted_identifier: '[Key01]' - keyword: DECRYPTION - keyword: BY - keyword: PASSWORD - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'decryptPass123'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/openjson.sql000066400000000000000000000027101503426445100233250ustar00rootroot00000000000000/* https://learn.microsoft.com/en-us/sql/t-sql/functions/openjson-transact-sql?view=sql-server-ver16#examples */ SELECT * FROM products INNER JOIN OPENJSON(N'[1,2,3,4]') AS productTypes ON product.productTypeID = productTypes.value ; SELECT * FROM OPENJSON(@json) WITH ( month VARCHAR(3), temp int, month_id tinyint '$.sql:identity()') as months ; SELECT * FROM OPENJSON ( @json ) WITH ( Number VARCHAR(200) '$.Order.Number', Date DATETIME '$.Order.Date', Customer VARCHAR(200) '$.AccountNumber', Quantity INT '$.Item.Quantity', [Order] NVARCHAR(MAX) AS JSON ); SELECT SalesOrderID, OrderDate, value AS Reason FROM Sales.SalesOrderHeader CROSS APPLY OPENJSON (SalesReasons) WITH (value NVARCHAR(100) '$') ; SELECT store.title, location.street, location.lat, location.long FROM store CROSS APPLY OPENJSON(store.jsonCol, 'lax $.location') WITH (street VARCHAR(500) , postcode VARCHAR(500) '$.postcode' , lon int '$.geo.longitude', lat int '$.geo.latitude') AS location ; INSERT INTO Person SELECT * FROM OPENJSON(@json) WITH (id INT, firstName NVARCHAR(50), lastName NVARCHAR(50), isAlive BIT, age INT, dateOfBirth DATETIME, spouse NVARCHAR(50)) ; SELECT root.[key] AS [Order],TheValues.[key], TheValues.[value] FROM OPENJSON ( @JSON ) AS root CROSS APPLY OPENJSON ( root.value) AS TheValues ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/openjson.yml000066400000000000000000000426661503426445100233450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6ac05f7d3868d577713bf0c9fd2bb14064734dfeda97fe0636bd3747732a926c file: batch: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: products join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: function: function_name: function_name_identifier: OPENJSON function_contents: bracketed: start_bracket: ( expression: quoted_literal: "N'[1,2,3,4]'" end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: productTypes - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: product - dot: . - naked_identifier: productTypeID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: productTypes - dot: . - naked_identifier: value statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: openjson_segment: keyword: OPENJSON bracketed: start_bracket: ( column_reference: parameter: '@json' end_bracket: ) openjson_with_clause: keyword: WITH bracketed: - start_bracket: ( - column_reference: naked_identifier: month - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '3' end_bracket: ) - comma: ',' - column_reference: naked_identifier: temp - data_type: data_type_identifier: int - comma: ',' - column_reference: naked_identifier: month_id - data_type: data_type_identifier: tinyint - quoted_literal: "'$.sql:identity()'" - end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: months statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: openjson_segment: keyword: OPENJSON bracketed: start_bracket: ( column_reference: parameter: '@json' end_bracket: ) openjson_with_clause: keyword: WITH bracketed: - start_bracket: ( - column_reference: naked_identifier: Number - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '200' end_bracket: ) - quoted_literal: "'$.Order.Number'" - comma: ',' - column_reference: naked_identifier: Date - data_type: data_type_identifier: DATETIME - quoted_literal: "'$.Order.Date'" - comma: ',' - column_reference: naked_identifier: Customer - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '200' end_bracket: ) - quoted_literal: "'$.AccountNumber'" - comma: ',' - column_reference: naked_identifier: Quantity - data_type: data_type_identifier: INT - quoted_literal: "'$.Item.Quantity'" - comma: ',' - column_reference: quoted_identifier: '[Order]' - data_type: data_type_identifier: NVARCHAR bracketed_arguments: bracketed: start_bracket: ( keyword: MAX end_bracket: ) - keyword: AS - keyword: JSON - end_bracket: ) statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: SalesOrderID - comma: ',' - select_clause_element: column_reference: naked_identifier: OrderDate - comma: ',' - select_clause_element: column_reference: naked_identifier: value alias_expression: alias_operator: keyword: AS naked_identifier: Reason from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesOrderHeader join_clause: - keyword: CROSS - keyword: APPLY - from_expression_element: table_expression: openjson_segment: keyword: OPENJSON bracketed: start_bracket: ( column_reference: naked_identifier: SalesReasons end_bracket: ) openjson_with_clause: keyword: WITH bracketed: start_bracket: ( column_reference: naked_identifier: value data_type: data_type_identifier: NVARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) quoted_literal: "'$'" end_bracket: ) statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: store - dot: . - naked_identifier: title - comma: ',' - select_clause_element: column_reference: - naked_identifier: location - dot: . - naked_identifier: street - comma: ',' - select_clause_element: column_reference: - naked_identifier: location - dot: . - naked_identifier: lat - comma: ',' - select_clause_element: column_reference: - naked_identifier: location - dot: . - naked_identifier: long from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: store join_clause: - keyword: CROSS - keyword: APPLY - from_expression_element: table_expression: openjson_segment: keyword: OPENJSON bracketed: start_bracket: ( column_reference: - naked_identifier: store - dot: . - naked_identifier: jsonCol comma: ',' quoted_literal: "'lax $.location'" end_bracket: ) openjson_with_clause: keyword: WITH bracketed: - start_bracket: ( - column_reference: naked_identifier: street - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '500' end_bracket: ) - comma: ',' - column_reference: naked_identifier: postcode - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '500' end_bracket: ) - quoted_literal: "'$.postcode'" - comma: ',' - column_reference: naked_identifier: lon - data_type: data_type_identifier: int - quoted_literal: "'$.geo.longitude'" - comma: ',' - column_reference: naked_identifier: lat - data_type: data_type_identifier: int - quoted_literal: "'$.geo.latitude'" - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: location statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: Person - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: openjson_segment: keyword: OPENJSON bracketed: start_bracket: ( column_reference: parameter: '@json' end_bracket: ) openjson_with_clause: keyword: WITH bracketed: - start_bracket: ( - column_reference: naked_identifier: id - data_type: data_type_identifier: INT - comma: ',' - column_reference: naked_identifier: firstName - data_type: data_type_identifier: NVARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '50' end_bracket: ) - comma: ',' - column_reference: naked_identifier: lastName - data_type: data_type_identifier: NVARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '50' end_bracket: ) - comma: ',' - column_reference: naked_identifier: isAlive - data_type: data_type_identifier: BIT - comma: ',' - column_reference: naked_identifier: age - data_type: data_type_identifier: INT - comma: ',' - column_reference: naked_identifier: dateOfBirth - data_type: data_type_identifier: DATETIME - comma: ',' - column_reference: naked_identifier: spouse - data_type: data_type_identifier: NVARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '50' end_bracket: ) - end_bracket: ) statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: root dot: . quoted_identifier: '[key]' alias_expression: alias_operator: keyword: AS quoted_identifier: '[Order]' - comma: ',' - select_clause_element: column_reference: naked_identifier: TheValues dot: . quoted_identifier: '[key]' - comma: ',' - select_clause_element: column_reference: naked_identifier: TheValues dot: . quoted_identifier: '[value]' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: OPENJSON function_contents: bracketed: start_bracket: ( expression: parameter: '@JSON' end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: root join_clause: - keyword: CROSS - keyword: APPLY - from_expression_element: table_expression: function: function_name: function_name_identifier: OPENJSON function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: root - dot: . - naked_identifier: value end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: TheValues statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/openquery.sql000066400000000000000000000005541503426445100235250ustar00rootroot00000000000000SELECT column1, column2 FROM OPENQUERY ([testlinkedserver], 'select * from table_name'); UPDATE OPENQUERY (OracleSvr, 'SELECT name FROM joe.titles WHERE id = 101') SET name = 'ADifferentName'; INSERT OPENQUERY (OracleSvr, 'SELECT name FROM joe.titles') VALUES ('NewTitle'); DELETE OPENQUERY (OracleSvr, 'SELECT name FROM joe.titles WHERE name = ''NewTitle'''); sqlfluff-3.4.2/test/fixtures/dialects/tsql/openquery.yml000066400000000000000000000054141503426445100235270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e2ae210c5d4f27e2e5da725888bea3e6471e2d20da967196553459fadeedfbbb file: batch: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: column1 - comma: ',' - select_clause_element: column_reference: naked_identifier: column2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: openquery_segment: keyword: OPENQUERY bracketed: start_bracket: ( object_reference: quoted_identifier: '[testlinkedserver]' comma: ',' quoted_literal: "'select * from table_name'" end_bracket: ) statement_terminator: ; - statement: update_statement: keyword: UPDATE openquery_segment: keyword: OPENQUERY bracketed: start_bracket: ( object_reference: naked_identifier: OracleSvr comma: ',' quoted_literal: "'SELECT name FROM joe.titles WHERE id = 101'" end_bracket: ) set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: name assignment_operator: raw_comparison_operator: '=' expression: quoted_literal: "'ADifferentName'" statement_terminator: ; - statement: insert_statement: keyword: INSERT openquery_segment: keyword: OPENQUERY bracketed: start_bracket: ( object_reference: naked_identifier: OracleSvr comma: ',' quoted_literal: "'SELECT name FROM joe.titles'" end_bracket: ) values_clause: keyword: VALUES bracketed: start_bracket: ( quoted_literal: "'NewTitle'" end_bracket: ) - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: OPENQUERY - bracketed: start_bracket: ( naked_identifier: OracleSvr comma: ',' quoted_literal: "'SELECT name FROM joe.titles WHERE name = ''NewTitle'''" end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/openrowset.sql000066400000000000000000000037131503426445100237030ustar00rootroot00000000000000SELECT a.* FROM OPENROWSET('Microsoft.Jet.OLEDB.4.0', 'C:\SAMPLES\Northwind.mdb'; 'admin'; 'password', Customers) AS a; GO SELECT d.* FROM OPENROWSET('SQLNCLI', 'Server=Seattle1;Trusted_Connection=yes;', Department) AS d; GO SELECT d.* FROM OPENROWSET('SQLNCLI', 'Server=Seattle1;Trusted_Connection=yes;', AdventureWorks2012.HumanResources.Department) AS d; GO SELECT a.* FROM OPENROWSET('SQLNCLI', 'Server=Seattle1;Trusted_Connection=yes;', 'SELECT TOP 10 GroupName, Name FROM AdventureWorks2012.HumanResources.Department') AS a; GO SELECT * FROM OPENROWSET( BULK 'C:\DATA\inv-2017-01-19.csv', SINGLE_CLOB) AS DATA; GO SELECT * FROM OPENROWSET(BULK N'C:\Text1.txt', SINGLE_NCLOB) AS Document; GO SELECT * FROM OPENROWSET(BULK N'D:\XChange\test-csv.csv', FORMATFILE = N'D:\XChange\test-csv.fmt', FIRSTROW=2, FORMAT='CSV') AS cars; GO SELECT TOP 10 * from OPENROWSET(BULK 'https://pandemicdatalake.blob.core.windows.net/public/curated/covid-19/ecdc_cases/latest/ecdc_cases.parquet', FORMAT = 'PARQUET') as rows GO SELECT TOP 10 * FROM OPENROWSET( BULK 'https://pandemicdatalake.blob.core.windows.net/public/curated/covid-19/ecdc_cases/latest/ecdc_cases.parquet', FORMAT = 'PARQUET' ) WITH ( [country_code] VARCHAR(5) COLLATE Latin1_General_BIN2, [country_name] VARCHAR(100) COLLATE Latin1_General_BIN2 2, [year] smallint, [population] bigint ) as rows GO SELECT TOP 1 * FROM OPENROWSET( BULK 'https://azureopendatastorage.blob.core.windows.net/censusdatacontainer/release/us_population_county/year=20*/*.parquet', FORMAT='PARQUET' ) WITH ( [stateName] VARCHAR(50), [stateName_explicit_path] VARCHAR(50) '$.stateName', [COUNTYNAME] VARCHAR(50), [countyName_explicit_path] VARCHAR(50) '$.COUNTYNAME', [population] bigint 'strict $.population' ) AS [r] GO sqlfluff-3.4.2/test/fixtures/dialects/tsql/openrowset.yml000066400000000000000000000372071503426445100237120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ce469a0d8d36ac5394266038687470feed3f42c351aa2f2256f63407ab35663b file: - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: a dot: . star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: openrowset_segment: keyword: OPENROWSET bracketed: - start_bracket: ( - quoted_literal: "'Microsoft.Jet.OLEDB.4.0'" - comma: ',' - quoted_literal: "'C:\\SAMPLES\\Northwind.mdb'" - statement_terminator: ; - quoted_literal: "'admin'" - statement_terminator: ; - quoted_literal: "'password'" - comma: ',' - table_reference: naked_identifier: Customers - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: d dot: . star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: openrowset_segment: keyword: OPENROWSET bracketed: - start_bracket: ( - quoted_literal: "'SQLNCLI'" - comma: ',' - quoted_literal: "'Server=Seattle1;Trusted_Connection=yes;'" - comma: ',' - table_reference: naked_identifier: Department - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: d statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: d dot: . star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: openrowset_segment: keyword: OPENROWSET bracketed: - start_bracket: ( - quoted_literal: "'SQLNCLI'" - comma: ',' - quoted_literal: "'Server=Seattle1;Trusted_Connection=yes;'" - comma: ',' - table_reference: - naked_identifier: AdventureWorks2012 - dot: . - naked_identifier: HumanResources - dot: . - naked_identifier: Department - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: d statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: a dot: . star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: openrowset_segment: keyword: OPENROWSET bracketed: - start_bracket: ( - quoted_literal: "'SQLNCLI'" - comma: ',' - quoted_literal: "'Server=Seattle1;Trusted_Connection=yes;'" - comma: ',' - quoted_literal: "'SELECT TOP 10 GroupName, Name\n FROM AdventureWorks2012.HumanResources.Department'" - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: openrowset_segment: keyword: OPENROWSET bracketed: - start_bracket: ( - keyword: BULK - quoted_literal: "'C:\\DATA\\inv-2017-01-19.csv'" - comma: ',' - keyword: SINGLE_CLOB - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: DATA statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: openrowset_segment: keyword: OPENROWSET bracketed: - start_bracket: ( - keyword: BULK - quoted_literal: "N'C:\\Text1.txt'" - comma: ',' - keyword: SINGLE_NCLOB - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: Document statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: openrowset_segment: keyword: OPENROWSET bracketed: - start_bracket: ( - keyword: BULK - quoted_literal: "N'D:\\XChange\\test-csv.csv'" - comma: ',' - keyword: FORMATFILE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "N'D:\\XChange\\test-csv.fmt'" - comma: ',' - keyword: FIRSTROW - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '2' - comma: ',' - keyword: FORMAT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'CSV'" - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: cars statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_modifier: keyword: TOP expression: numeric_literal: '10' select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: openrowset_segment: keyword: OPENROWSET bracketed: - start_bracket: ( - keyword: BULK - quoted_literal: "'https://pandemicdatalake.blob.core.windows.net/public/curated/covid-19/ecdc_cases/latest/ecdc_cases.parquet'" - comma: ',' - keyword: FORMAT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'PARQUET'" - end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: rows - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_modifier: keyword: TOP expression: numeric_literal: '10' select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: openrowset_segment: keyword: OPENROWSET bracketed: - start_bracket: ( - keyword: BULK - quoted_literal: "'https://pandemicdatalake.blob.core.windows.net/public/curated/covid-19/ecdc_cases/latest/ecdc_cases.parquet'" - comma: ',' - keyword: FORMAT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'PARQUET'" - end_bracket: ) openrowset_with_clause: keyword: WITH bracketed: - start_bracket: ( - quoted_identifier: '[country_code]' - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) - keyword: COLLATE - collation_reference: naked_identifier: Latin1_General_BIN2 - comma: ',' - quoted_identifier: '[country_name]' - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - keyword: COLLATE - collation_reference: naked_identifier: Latin1_General_BIN2 - numeric_literal: '2' - comma: ',' - quoted_identifier: '[year]' - data_type: data_type_identifier: smallint - comma: ',' - quoted_identifier: '[population]' - data_type: data_type_identifier: bigint - end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: rows - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_modifier: keyword: TOP expression: numeric_literal: '1' select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: openrowset_segment: keyword: OPENROWSET bracketed: - start_bracket: ( - keyword: BULK - quoted_literal: "'https://azureopendatastorage.blob.core.windows.net/censusdatacontainer/release/us_population_county/year=20*/*.parquet'" - comma: ',' - keyword: FORMAT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'PARQUET'" - end_bracket: ) openrowset_with_clause: keyword: WITH bracketed: - start_bracket: ( - quoted_identifier: '[stateName]' - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '50' end_bracket: ) - comma: ',' - quoted_identifier: '[stateName_explicit_path]' - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '50' end_bracket: ) - quoted_literal: "'$.stateName'" - comma: ',' - quoted_identifier: '[COUNTYNAME]' - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '50' end_bracket: ) - comma: ',' - quoted_identifier: '[countyName_explicit_path]' - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '50' end_bracket: ) - quoted_literal: "'$.COUNTYNAME'" - comma: ',' - quoted_identifier: '[population]' - data_type: data_type_identifier: bigint - quoted_literal: "'strict $.population'" - end_bracket: ) alias_expression: alias_operator: keyword: AS quoted_identifier: '[r]' - go_statement: keyword: GO sqlfluff-3.4.2/test/fixtures/dialects/tsql/outer_apply.sql000066400000000000000000000002641503426445100240370ustar00rootroot00000000000000-- JOIN should not be parsed as nested in OUTER APPLY SELECT table1.* FROM table1 OUTER APPLY table2 INNER JOIN table3 ON table1.col = table3.col WHERE table1.Column1 ='blah'; sqlfluff-3.4.2/test/fixtures/dialects/tsql/outer_apply.yml000066400000000000000000000042211503426445100240360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 62fbfe0de61ba9d0ee242fb960407a74ee5ae5004b0cb194c080a2e6b39b68a0 file: batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: table1 dot: . star: '*' from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: table1 - join_clause: - keyword: OUTER - keyword: APPLY - from_expression_element: table_expression: table_reference: naked_identifier: table2 - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table3 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: table1 - dot: . - naked_identifier: col - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: table3 - dot: . - naked_identifier: col where_clause: keyword: WHERE expression: column_reference: - naked_identifier: table1 - dot: . - naked_identifier: Column1 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'blah'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/print.sql000066400000000000000000000001641503426445100226270ustar00rootroot00000000000000DECLARE @TestVal VARCHAR(20) = 'Test Print' PRINT '#Dates' PRINT CAST(GETDATE() AS VARCHAR(50)); PRINT @TestVal sqlfluff-3.4.2/test/fixtures/dialects/tsql/print.yml000066400000000000000000000037551503426445100226420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0b4d19ca1ba6503fc63c2d7023df597e2aa76bd3fff30b9918ca52c6b6d7054b file: batch: - statement: declare_segment: keyword: DECLARE parameter: '@TestVal' data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '20' end_bracket: ) comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'Test Print'" - statement: print_statement: keyword: PRINT expression: quoted_literal: "'#Dates'" - statement: print_statement: keyword: PRINT expression: function: function_name: keyword: CAST function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: GETDATE function_contents: bracketed: start_bracket: ( end_bracket: ) keyword: AS data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '50' end_bracket: ) end_bracket: ) statement_terminator: ; - statement: print_statement: keyword: PRINT expression: parameter: '@TestVal' sqlfluff-3.4.2/test/fixtures/dialects/tsql/raiserror.sql000066400000000000000000000014341503426445100235040ustar00rootroot00000000000000RAISERROR(15600, -1, -1, 'mysp_CreateCustomer'); RAISERROR('This is message %s %d.', 10, 1, 'number'); RAISERROR('Error raised in TRY block.', 16, 1); RAISERROR (N'Unicode error', 16, 1); RAISERROR ('WITH option', 16, 1) WITH LOG; RAISERROR ('Error with lots of arguments %a %b %c %d %e %f %g %h %i %j %k %l %m %n %o %p %q %r %s %t', 16, 1, 'a', N'b', @c, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20); RAISERROR (@ErrorMessage, -- Message text. @ErrorSeverity, -- Severity. @ErrorState -- State. ); RAISERROR ( 'The specified table does not exist. Please enter @tableName in the following format: Schemaname.Tablename OR [Schemaname].[Tablename]' ,11 ,- 1 ); sqlfluff-3.4.2/test/fixtures/dialects/tsql/raiserror.yml000066400000000000000000000107461503426445100235140ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 029fcbb024458cf4a54b3cbdb1c5b0d447b5d6426ce1e4d4ccf8c5265a7fee33 file: batch: - statement: raiserror_statement: keyword: RAISERROR bracketed: - start_bracket: ( - numeric_literal: '15600' - comma: ',' - numeric_literal: sign_indicator: '-' numeric_literal: '1' - comma: ',' - numeric_literal: sign_indicator: '-' numeric_literal: '1' - comma: ',' - quoted_literal: "'mysp_CreateCustomer'" - end_bracket: ) - statement_terminator: ; - statement: raiserror_statement: keyword: RAISERROR bracketed: - start_bracket: ( - quoted_literal: "'This is message %s %d.'" - comma: ',' - numeric_literal: '10' - comma: ',' - numeric_literal: '1' - comma: ',' - quoted_literal: "'number'" - end_bracket: ) - statement_terminator: ; - statement: raiserror_statement: keyword: RAISERROR bracketed: - start_bracket: ( - quoted_literal: "'Error raised in TRY block.'" - comma: ',' - numeric_literal: '16' - comma: ',' - numeric_literal: '1' - end_bracket: ) - statement_terminator: ; - statement: raiserror_statement: keyword: RAISERROR bracketed: - start_bracket: ( - quoted_literal: "N'Unicode error'" - comma: ',' - numeric_literal: '16' - comma: ',' - numeric_literal: '1' - end_bracket: ) - statement_terminator: ; - statement: raiserror_statement: - keyword: RAISERROR - bracketed: - start_bracket: ( - quoted_literal: "'WITH option'" - comma: ',' - numeric_literal: '16' - comma: ',' - numeric_literal: '1' - end_bracket: ) - keyword: WITH - keyword: LOG - statement_terminator: ; - statement: raiserror_statement: keyword: RAISERROR bracketed: - start_bracket: ( - quoted_literal: "'Error with lots of arguments %a %b %c %d %e %f %g %h %i\ \ %j %k %l %m %n %o %p %q %r %s %t'" - comma: ',' - numeric_literal: '16' - comma: ',' - numeric_literal: '1' - comma: ',' - quoted_literal: "'a'" - comma: ',' - quoted_literal: "N'b'" - comma: ',' - parameter: '@c' - comma: ',' - numeric_literal: '4' - comma: ',' - numeric_literal: '5' - comma: ',' - numeric_literal: '6' - comma: ',' - numeric_literal: '7' - comma: ',' - numeric_literal: '8' - comma: ',' - numeric_literal: '9' - comma: ',' - numeric_literal: '10' - comma: ',' - numeric_literal: '11' - comma: ',' - numeric_literal: '12' - comma: ',' - numeric_literal: '13' - comma: ',' - numeric_literal: '14' - comma: ',' - numeric_literal: '15' - comma: ',' - numeric_literal: '16' - comma: ',' - numeric_literal: '17' - comma: ',' - numeric_literal: '18' - comma: ',' - numeric_literal: '19' - comma: ',' - numeric_literal: '20' - end_bracket: ) - statement_terminator: ; - statement: raiserror_statement: keyword: RAISERROR bracketed: - start_bracket: ( - parameter: '@ErrorMessage' - comma: ',' - parameter: '@ErrorSeverity' - comma: ',' - parameter: '@ErrorState' - end_bracket: ) - statement_terminator: ; - statement: raiserror_statement: keyword: RAISERROR bracketed: - start_bracket: ( - quoted_literal: "'The specified table does not exist. Please enter @tableName\ \ in the following format: Schemaname.Tablename OR [Schemaname].[Tablename]'" - comma: ',' - numeric_literal: '11' - comma: ',' - numeric_literal: sign_indicator: '-' numeric_literal: '1' - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/reconfigure.sql000066400000000000000000000000511503426445100237760ustar00rootroot00000000000000RECONFIGURE; RECONFIGURE WITH OVERRIDE; sqlfluff-3.4.2/test/fixtures/dialects/tsql/reconfigure.yml000066400000000000000000000012151503426445100240030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3ffd6a57755f1dc6f64fdfd0b7bf33808727b96708a0cda4c918c75a5d24c49c file: batch: - statement: reconfigure_statement: keyword: RECONFIGURE - statement_terminator: ; - statement: reconfigure_statement: - keyword: RECONFIGURE - keyword: WITH - keyword: OVERRIDE - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/rename_table.sql000066400000000000000000000001321503426445100241040ustar00rootroot00000000000000--Azure Synapse Analytics specific RENAME OBJECT [Reporting].[TABLE_NEW] to [TABLE_BASE]; sqlfluff-3.4.2/test/fixtures/dialects/tsql/rename_table.yml000066400000000000000000000013111503426445100241060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 96147ebb74fd822dd58899940d78bf266f6eb6c299a286b677b88408937c0e06 file: batch: statement: rename_statement: - keyword: RENAME - keyword: OBJECT - object_reference: - quoted_identifier: '[Reporting]' - dot: . - quoted_identifier: '[TABLE_NEW]' - keyword: to - quoted_identifier: '[TABLE_BASE]' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/replicate.sql000066400000000000000000000003531503426445100234430ustar00rootroot00000000000000SELECT REPLICATE('0', 3 - DATALENGTH(c1)) + c1 AS 'Varchar Column', REPLICATE('0', 3 - DATALENGTH(c2)) + c2 AS 'Char Column' FROM t1; DECLARE @BinVar varbinary(128); SET @BinVar = CAST(REPLICATE(0x20, 128) AS varbinary(128) ); sqlfluff-3.4.2/test/fixtures/dialects/tsql/replicate.yml000066400000000000000000000111251503426445100234440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f9e4be1cf7752f39ca82f62cc70f98250c8695d27f87372723b9e0b4497aecd5 file: batch: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: function: function_name: keyword: REPLICATE function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'0'" - comma: ',' - expression: numeric_literal: '3' binary_operator: '-' function: function_name: function_name_identifier: DATALENGTH function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: c1 end_bracket: ) - end_bracket: ) binary_operator: + column_reference: naked_identifier: c1 alias_expression: alias_operator: keyword: AS quoted_identifier: "'Varchar Column'" - comma: ',' - select_clause_element: expression: function: function_name: keyword: REPLICATE function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'0'" - comma: ',' - expression: numeric_literal: '3' binary_operator: '-' function: function_name: function_name_identifier: DATALENGTH function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: c2 end_bracket: ) - end_bracket: ) binary_operator: + column_reference: naked_identifier: c2 alias_expression: alias_operator: keyword: AS quoted_identifier: "'Char Column'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 statement_terminator: ; - statement: declare_segment: keyword: DECLARE parameter: '@BinVar' data_type: data_type_identifier: varbinary bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '128' end_bracket: ) statement_terminator: ; - statement: set_segment: keyword: SET parameter: '@BinVar' assignment_operator: raw_comparison_operator: '=' expression: function: function_name: keyword: CAST function_contents: bracketed: start_bracket: ( expression: function: function_name: keyword: REPLICATE function_contents: bracketed: start_bracket: ( numeric_literal: '0x20' comma: ',' expression: numeric_literal: '128' end_bracket: ) keyword: AS data_type: data_type_identifier: varbinary bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '128' end_bracket: ) end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/select.sql000066400000000000000000000061161503426445100227550ustar00rootroot00000000000000--For testing valid select clause elements SELECT CASE WHEN 1 = 1 THEN 'True' WHEN 1 > 1 THEN 'False' WHEN 1 < 1 THEN 'False' WHEN 1 >= 1 THEN 'True' WHEN 1 > = 1 THEN 'True' WHEN 1 <= 1 THEN 'True' WHEN 1 < = 1 THEN 'True' WHEN 1 <> 1 THEN 'False' WHEN 1 < > 1 THEN 'False' WHEN 1 !< 1 THEN 'Why is this a thing?' WHEN 1 ! < 1 THEN 'Or this sort of thing?' WHEN 1 != 1 THEN 'False' WHEN 1 ! = 1 THEN 'False' WHEN 1 !> 1 THEN 'NULL Handling, Probably' WHEN 1 ! > 1 THEN 'NULL Handling, Probably' ELSE 'Silly Tests' END, all_pop. [Arrival Date], all_pop.Row#, all_pop.b@nanas, [# POAs], 'TSQLs escaping quotes test', 'TSQL''s escaping quotes test', 'TSQL' 's escaping quotes test', 'TSQL' AS 's escaping quotes test', '', '''', --unreserved words all_pop.Language, ANSI_DEFAULTS , ANSI_NULL_DFLT_OFF , ANSI_NULL_DFLT_ON , ANSI_NULLS , ANSI_PADDING , ANSI_WARNINGS , ARITHABORT , ARITHIGNORE , CONCAT_NULL_YIELDS_NULL , CURSOR_CLOSE_ON_COMMIT , DATEFIRST , DATEFORMAT , DEADLOCK_PRIORITY , DISK , DUMP , FIPS_FLAGGER , FMTONLY , FORCEPLAN , IMPLICIT_TRANSACTIONS , LOAD , LOCK_TIMEOUT , NOCOUNT , NOEXEC , NUMERIC_ROUNDABORT , PARSEONLY , PRECISION , PROPERTY , QUERY_GOVERNOR_COST_LIMIT , QUOTED_IDENTIFIER , REMOTE_PROC_TRANSACTIONS , SECURITYAUDIT , SHOWPLAN_ALL , SHOWPLAN_TEXT , SHOWPLAN_XML , XACT_ABORT, --TSQL non-keywords Rows, NaN, Rlike, Ilike, Separator, Auto_Increment, Unsigned, Describe, Comment, Ml, Modify, Minus, ROW_NUMBER()OVER(PARTITION BY [EventNM], [PersonID] ORDER BY [DateofEvent] desc) AS [RN], RANK()OVER(PARTITION BY [EventNM] ORDER BY [DateofEvent] desc) AS [R], DENSE_RANK()OVER(PARTITION BY [EventNM] ORDER BY [DateofEvent] desc) AS [DR], NTILE(5)OVER(PARTITION BY [EventNM] ORDER BY [DateofEvent] desc) AS [NT], sum(t.col1) over (partition by t.col2, t.col3), ROW_NUMBER() OVER (PARTITION BY (SELECT mediaty FROM dbo.MediaTypes ms WHERE ms.MediaTypeID = f.mediatypeid) ORDER BY AdjustedPriorityScore DESC) AS Subselect_Partition, ROW_NUMBER() OVER (PARTITION BY COALESCE(NPI1, NPI2) ORDER BY COALESCE(SystemEffectiveDTS1, SystemEffectiveDTS2) DESC) AS Coalesce_Partition, ROW_NUMBER() OVER (PARTITION BY (DayInMonth), (DaySuffix) ORDER BY Month ASC), COUNT(*) OVER (PARTITION BY NULL), [preceding] = count(*) over(order by object_id ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW ), [central] = count(*) over(order by object_id ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING ), [following] = count(*) over(order by object_id ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING), EqualsAlias = ColumnName, OtherColumnName AS AsAlias, cast(1 as character varying(1)), cast([central] as int), --unbracketed functions CURRENT_TIMESTAMP, CURRENT_USER, SESSION_USER, SYSTEM_USER, USER, test(default, 2) FROM dbo . all_pop; SELECT DISTINCT TOP 5 some_value FROM some_table; select 'Tabellen' as Objekt, Count(*) as Anzahl from dbo.sql_modules; -- naked identifier with extended Unicode characters select field1 AS 日期差多少天; sqlfluff-3.4.2/test/fixtures/dialects/tsql/select.yml000066400000000000000000001052201503426445100227530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8272450206c2b86996fda2b977fa3b1b22dd363b3c6c39a5986e6e4193c02ad0 file: batch: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: - numeric_literal: '1' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - keyword: THEN - expression: quoted_literal: "'True'" - when_clause: - keyword: WHEN - expression: - numeric_literal: '1' - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '1' - keyword: THEN - expression: quoted_literal: "'False'" - when_clause: - keyword: WHEN - expression: - numeric_literal: '1' - comparison_operator: raw_comparison_operator: < - numeric_literal: '1' - keyword: THEN - expression: quoted_literal: "'False'" - when_clause: - keyword: WHEN - expression: - numeric_literal: '1' - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - numeric_literal: '1' - keyword: THEN - expression: quoted_literal: "'True'" - when_clause: - keyword: WHEN - expression: - numeric_literal: '1' - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - numeric_literal: '1' - keyword: THEN - expression: quoted_literal: "'True'" - when_clause: - keyword: WHEN - expression: - numeric_literal: '1' - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - numeric_literal: '1' - keyword: THEN - expression: quoted_literal: "'True'" - when_clause: - keyword: WHEN - expression: - numeric_literal: '1' - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - numeric_literal: '1' - keyword: THEN - expression: quoted_literal: "'True'" - when_clause: - keyword: WHEN - expression: - numeric_literal: '1' - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '>' - numeric_literal: '1' - keyword: THEN - expression: quoted_literal: "'False'" - when_clause: - keyword: WHEN - expression: - numeric_literal: '1' - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '>' - numeric_literal: '1' - keyword: THEN - expression: quoted_literal: "'False'" - when_clause: - keyword: WHEN - expression: - numeric_literal: '1' - comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: < - numeric_literal: '1' - keyword: THEN - expression: quoted_literal: "'Why is this a thing?'" - when_clause: - keyword: WHEN - expression: - numeric_literal: '1' - comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: < - numeric_literal: '1' - keyword: THEN - expression: quoted_literal: "'Or this sort of thing?'" - when_clause: - keyword: WHEN - expression: - numeric_literal: '1' - comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '=' - numeric_literal: '1' - keyword: THEN - expression: quoted_literal: "'False'" - when_clause: - keyword: WHEN - expression: - numeric_literal: '1' - comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '=' - numeric_literal: '1' - keyword: THEN - expression: quoted_literal: "'False'" - when_clause: - keyword: WHEN - expression: - numeric_literal: '1' - comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '>' - numeric_literal: '1' - keyword: THEN - expression: quoted_literal: "'NULL Handling, Probably'" - when_clause: - keyword: WHEN - expression: - numeric_literal: '1' - comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '>' - numeric_literal: '1' - keyword: THEN - expression: quoted_literal: "'NULL Handling, Probably'" - else_clause: keyword: ELSE expression: quoted_literal: "'Silly Tests'" - keyword: END - comma: ',' - select_clause_element: column_reference: naked_identifier: all_pop dot: . quoted_identifier: '[Arrival Date]' - comma: ',' - select_clause_element: column_reference: - naked_identifier: all_pop - dot: . - naked_identifier: Row# - comma: ',' - select_clause_element: column_reference: - naked_identifier: all_pop - dot: . - naked_identifier: b@nanas - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[# POAs]' - comma: ',' - select_clause_element: quoted_literal: "'TSQLs escaping quotes test'" - comma: ',' - select_clause_element: quoted_literal: "'TSQL''s escaping quotes test'" - comma: ',' - select_clause_element: quoted_literal: "'TSQL'" alias_expression: quoted_identifier: "'s escaping quotes test'" - comma: ',' - select_clause_element: quoted_literal: "'TSQL'" alias_expression: alias_operator: keyword: AS quoted_identifier: "'s escaping quotes test'" - comma: ',' - select_clause_element: quoted_literal: "''" - comma: ',' - select_clause_element: quoted_literal: "''''" - comma: ',' - select_clause_element: column_reference: - naked_identifier: all_pop - dot: . - naked_identifier: Language - comma: ',' - select_clause_element: column_reference: naked_identifier: ANSI_DEFAULTS - comma: ',' - select_clause_element: column_reference: naked_identifier: ANSI_NULL_DFLT_OFF - comma: ',' - select_clause_element: column_reference: naked_identifier: ANSI_NULL_DFLT_ON - comma: ',' - select_clause_element: column_reference: naked_identifier: ANSI_NULLS - comma: ',' - select_clause_element: column_reference: naked_identifier: ANSI_PADDING - comma: ',' - select_clause_element: column_reference: naked_identifier: ANSI_WARNINGS - comma: ',' - select_clause_element: column_reference: naked_identifier: ARITHABORT - comma: ',' - select_clause_element: column_reference: naked_identifier: ARITHIGNORE - comma: ',' - select_clause_element: column_reference: naked_identifier: CONCAT_NULL_YIELDS_NULL - comma: ',' - select_clause_element: column_reference: naked_identifier: CURSOR_CLOSE_ON_COMMIT - comma: ',' - select_clause_element: column_reference: naked_identifier: DATEFIRST - comma: ',' - select_clause_element: column_reference: naked_identifier: DATEFORMAT - comma: ',' - select_clause_element: column_reference: naked_identifier: DEADLOCK_PRIORITY - comma: ',' - select_clause_element: column_reference: naked_identifier: DISK - comma: ',' - select_clause_element: column_reference: naked_identifier: DUMP - comma: ',' - select_clause_element: column_reference: naked_identifier: FIPS_FLAGGER - comma: ',' - select_clause_element: column_reference: naked_identifier: FMTONLY - comma: ',' - select_clause_element: column_reference: naked_identifier: FORCEPLAN - comma: ',' - select_clause_element: column_reference: naked_identifier: IMPLICIT_TRANSACTIONS - comma: ',' - select_clause_element: column_reference: naked_identifier: LOAD - comma: ',' - select_clause_element: column_reference: naked_identifier: LOCK_TIMEOUT - comma: ',' - select_clause_element: column_reference: naked_identifier: NOCOUNT - comma: ',' - select_clause_element: column_reference: naked_identifier: NOEXEC - comma: ',' - select_clause_element: column_reference: naked_identifier: NUMERIC_ROUNDABORT - comma: ',' - select_clause_element: column_reference: naked_identifier: PARSEONLY - comma: ',' - select_clause_element: column_reference: naked_identifier: PRECISION - comma: ',' - select_clause_element: column_reference: naked_identifier: PROPERTY - comma: ',' - select_clause_element: column_reference: naked_identifier: QUERY_GOVERNOR_COST_LIMIT - comma: ',' - select_clause_element: column_reference: naked_identifier: QUOTED_IDENTIFIER - comma: ',' - select_clause_element: column_reference: naked_identifier: REMOTE_PROC_TRANSACTIONS - comma: ',' - select_clause_element: column_reference: naked_identifier: SECURITYAUDIT - comma: ',' - select_clause_element: column_reference: naked_identifier: SHOWPLAN_ALL - comma: ',' - select_clause_element: column_reference: naked_identifier: SHOWPLAN_TEXT - comma: ',' - select_clause_element: column_reference: naked_identifier: SHOWPLAN_XML - comma: ',' - select_clause_element: column_reference: naked_identifier: XACT_ABORT - comma: ',' - select_clause_element: column_reference: naked_identifier: Rows - comma: ',' - select_clause_element: column_reference: naked_identifier: NaN - comma: ',' - select_clause_element: column_reference: naked_identifier: Rlike - comma: ',' - select_clause_element: column_reference: naked_identifier: Ilike - comma: ',' - select_clause_element: column_reference: naked_identifier: Separator - comma: ',' - select_clause_element: column_reference: naked_identifier: Auto_Increment - comma: ',' - select_clause_element: column_reference: naked_identifier: Unsigned - comma: ',' - select_clause_element: column_reference: naked_identifier: Describe - comma: ',' - select_clause_element: column_reference: naked_identifier: Comment - comma: ',' - select_clause_element: column_reference: naked_identifier: Ml - comma: ',' - select_clause_element: column_reference: naked_identifier: Modify - comma: ',' - select_clause_element: column_reference: naked_identifier: Minus - comma: ',' - select_clause_element: function: function_name: keyword: ROW_NUMBER function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - column_reference: quoted_identifier: '[EventNM]' - comma: ',' - column_reference: quoted_identifier: '[PersonID]' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: quoted_identifier: '[DateofEvent]' - keyword: desc end_bracket: ) alias_expression: alias_operator: keyword: AS quoted_identifier: '[RN]' - comma: ',' - select_clause_element: function: function_name: keyword: RANK function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - column_reference: quoted_identifier: '[EventNM]' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: quoted_identifier: '[DateofEvent]' - keyword: desc end_bracket: ) alias_expression: alias_operator: keyword: AS quoted_identifier: '[R]' - comma: ',' - select_clause_element: function: function_name: keyword: DENSE_RANK function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - column_reference: quoted_identifier: '[EventNM]' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: quoted_identifier: '[DateofEvent]' - keyword: desc end_bracket: ) alias_expression: alias_operator: keyword: AS quoted_identifier: '[DR]' - comma: ',' - select_clause_element: function: function_name: keyword: NTILE function_contents: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - column_reference: quoted_identifier: '[EventNM]' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: quoted_identifier: '[DateofEvent]' - keyword: desc end_bracket: ) alias_expression: alias_operator: keyword: AS quoted_identifier: '[NT]' - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: t - dot: . - naked_identifier: col1 end_bracket: ) over_clause: keyword: over bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: partition - keyword: by - column_reference: - naked_identifier: t - dot: . - naked_identifier: col2 - comma: ',' - column_reference: - naked_identifier: t - dot: . - naked_identifier: col3 end_bracket: ) - comma: ',' - select_clause_element: function: function_name: keyword: ROW_NUMBER function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: mediaty from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: MediaTypes alias_expression: naked_identifier: ms where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: ms - dot: . - naked_identifier: MediaTypeID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: f - dot: . - naked_identifier: mediatypeid end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: AdjustedPriorityScore - keyword: DESC end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: Subselect_Partition - comma: ',' - select_clause_element: function: function_name: keyword: ROW_NUMBER function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: function: function_name: keyword: COALESCE function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: NPI1 - comma: ',' - expression: column_reference: naked_identifier: NPI2 - end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - expression: function: function_name: keyword: COALESCE function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: SystemEffectiveDTS1 - comma: ',' - expression: column_reference: naked_identifier: SystemEffectiveDTS2 - end_bracket: ) - keyword: DESC end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: Coalesce_Partition - comma: ',' - select_clause_element: function: function_name: keyword: ROW_NUMBER function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: DayInMonth end_bracket: ) - comma: ',' - bracketed: start_bracket: ( column_reference: naked_identifier: DaySuffix end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: Month - keyword: ASC end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: COUNT function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: null_literal: 'NULL' end_bracket: ) - comma: ',' - select_clause_element: alias_expression: quoted_identifier: '[preceding]' alias_operator: raw_comparison_operator: '=' function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) over_clause: keyword: over bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: object_id frame_clause: - keyword: ROWS - keyword: BETWEEN - keyword: UNBOUNDED - keyword: PRECEDING - keyword: AND - keyword: CURRENT - keyword: ROW end_bracket: ) - comma: ',' - select_clause_element: alias_expression: quoted_identifier: '[central]' alias_operator: raw_comparison_operator: '=' function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) over_clause: keyword: over bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: object_id frame_clause: - keyword: ROWS - keyword: BETWEEN - numeric_literal: '2' - keyword: PRECEDING - keyword: AND - numeric_literal: '2' - keyword: FOLLOWING end_bracket: ) - comma: ',' - select_clause_element: alias_expression: quoted_identifier: '[following]' alias_operator: raw_comparison_operator: '=' function: function_name: function_name_identifier: count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) over_clause: keyword: over bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: object_id frame_clause: - keyword: ROWS - keyword: BETWEEN - keyword: CURRENT - keyword: ROW - keyword: AND - keyword: UNBOUNDED - keyword: FOLLOWING end_bracket: ) - comma: ',' - select_clause_element: alias_expression: naked_identifier: EqualsAlias alias_operator: raw_comparison_operator: '=' column_reference: naked_identifier: ColumnName - comma: ',' - select_clause_element: column_reference: naked_identifier: OtherColumnName alias_expression: alias_operator: keyword: AS naked_identifier: AsAlias - comma: ',' - select_clause_element: function: function_name: keyword: cast function_contents: bracketed: start_bracket: ( expression: numeric_literal: '1' keyword: as data_type: data_type_identifier: character keyword: varying bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) end_bracket: ) - comma: ',' - select_clause_element: function: function_name: keyword: cast function_contents: bracketed: start_bracket: ( expression: column_reference: quoted_identifier: '[central]' keyword: as data_type: data_type_identifier: int end_bracket: ) - comma: ',' - select_clause_element: bare_function: CURRENT_TIMESTAMP - comma: ',' - select_clause_element: bare_function: CURRENT_USER - comma: ',' - select_clause_element: bare_function: SESSION_USER - comma: ',' - select_clause_element: bare_function: SYSTEM_USER - comma: ',' - select_clause_element: bare_function: USER - comma: ',' - select_clause_element: function: function_name: function_name_identifier: test function_contents: bracketed: - start_bracket: ( - expression: keyword: default - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: all_pop statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_modifier: - keyword: DISTINCT - keyword: TOP - expression: numeric_literal: '5' select_clause_element: column_reference: naked_identifier: some_value from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: some_table statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: quoted_literal: "'Tabellen'" alias_expression: alias_operator: keyword: as naked_identifier: Objekt - comma: ',' - select_clause_element: function: function_name: function_name_identifier: Count function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: Anzahl from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: sql_modules statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: field1 alias_expression: alias_operator: keyword: AS naked_identifier: 日期差多少天 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/select_assign_parameter.sql000066400000000000000000000002541503426445100263560ustar00rootroot00000000000000select userid = c.id from mydb.myschema.customer c where c.name = 'drjwelch'; select @userid_parameter = c.id from mydb.myschema.customer c where c.name = 'drjwelch'; sqlfluff-3.4.2/test/fixtures/dialects/tsql/select_assign_parameter.yml000066400000000000000000000052301503426445100263570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6be0a9325607e51c6aa15fba0677b99c6d51e618dadc4623d74ce1bc4406c2c2 file: batch: - statement: select_statement: select_clause: keyword: select select_clause_element: alias_expression: naked_identifier: userid alias_operator: raw_comparison_operator: '=' column_reference: - naked_identifier: c - dot: . - naked_identifier: id from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: mydb - dot: . - naked_identifier: myschema - dot: . - naked_identifier: customer alias_expression: naked_identifier: c where_clause: keyword: where expression: column_reference: - naked_identifier: c - dot: . - naked_identifier: name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'drjwelch'" statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: parameter: '@userid_parameter' comparison_operator: raw_comparison_operator: '=' column_reference: - naked_identifier: c - dot: . - naked_identifier: id from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: mydb - dot: . - naked_identifier: myschema - dot: . - naked_identifier: customer alias_expression: naked_identifier: c where_clause: keyword: where expression: column_reference: - naked_identifier: c - dot: . - naked_identifier: name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'drjwelch'" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/select_cross_apply.sql000066400000000000000000000012131503426445100253640ustar00rootroot00000000000000SELECT DeptID, DeptName, DeptMgrID, EmpID, EmpLastName, EmpSalary FROM Departments d CROSS APPLY dbo.GetReports(d.DeptMgrID) ; SELECT d.DeptID, d.DeptName, DeptMgrID, reps.EmpID, reps.EmpLastName, reps.EmpSalary FROM Departments AS d CROSS APPLY dbo.GetReports(d.DeptMgrID) AS reps WHERE d.DeptMgrID = 10; SELECT * FROM Department D OUTER APPLY dbo.fn_GetAllEmployeeOfADepartment(D.DepartmentID); SELECT * FROM Department D OUTER APPLY dbo.fn_GetAllEmployeeOfADepartment(D.DepartmentID) AS AllEmp WHERE D.DepartmentId = 10; select s.column_id , sp.value from table1 as s cross apply string_split(replace(s.some_path, '->', '{'), '{') as sp; sqlfluff-3.4.2/test/fixtures/dialects/tsql/select_cross_apply.yml000066400000000000000000000241461503426445100254000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 70a659b03e18eb966422c6ae7e3ff75ac3840dd748eacbb124064e0baa82e65f file: batch: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: DeptID - comma: ',' - select_clause_element: column_reference: naked_identifier: DeptName - comma: ',' - select_clause_element: column_reference: naked_identifier: DeptMgrID - comma: ',' - select_clause_element: column_reference: naked_identifier: EmpID - comma: ',' - select_clause_element: column_reference: naked_identifier: EmpLastName - comma: ',' - select_clause_element: column_reference: naked_identifier: EmpSalary from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Departments alias_expression: naked_identifier: d join_clause: - keyword: CROSS - keyword: APPLY - from_expression_element: table_expression: function: function_name: naked_identifier: dbo dot: . function_name_identifier: GetReports function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: d - dot: . - naked_identifier: DeptMgrID end_bracket: ) statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: d - dot: . - naked_identifier: DeptID - comma: ',' - select_clause_element: column_reference: - naked_identifier: d - dot: . - naked_identifier: DeptName - comma: ',' - select_clause_element: column_reference: naked_identifier: DeptMgrID - comma: ',' - select_clause_element: column_reference: - naked_identifier: reps - dot: . - naked_identifier: EmpID - comma: ',' - select_clause_element: column_reference: - naked_identifier: reps - dot: . - naked_identifier: EmpLastName - comma: ',' - select_clause_element: column_reference: - naked_identifier: reps - dot: . - naked_identifier: EmpSalary from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Departments alias_expression: alias_operator: keyword: AS naked_identifier: d join_clause: - keyword: CROSS - keyword: APPLY - from_expression_element: table_expression: function: function_name: naked_identifier: dbo dot: . function_name_identifier: GetReports function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: d - dot: . - naked_identifier: DeptMgrID end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: reps where_clause: keyword: WHERE expression: column_reference: - naked_identifier: d - dot: . - naked_identifier: DeptMgrID comparison_operator: raw_comparison_operator: '=' numeric_literal: '10' statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Department alias_expression: naked_identifier: D join_clause: - keyword: OUTER - keyword: APPLY - from_expression_element: table_expression: function: function_name: naked_identifier: dbo dot: . function_name_identifier: fn_GetAllEmployeeOfADepartment function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: D - dot: . - naked_identifier: DepartmentID end_bracket: ) statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Department alias_expression: naked_identifier: D join_clause: - keyword: OUTER - keyword: APPLY - from_expression_element: table_expression: function: function_name: naked_identifier: dbo dot: . function_name_identifier: fn_GetAllEmployeeOfADepartment function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: D - dot: . - naked_identifier: DepartmentID end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: AllEmp where_clause: keyword: WHERE expression: column_reference: - naked_identifier: D - dot: . - naked_identifier: DepartmentId comparison_operator: raw_comparison_operator: '=' numeric_literal: '10' statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: s - dot: . - naked_identifier: column_id - comma: ',' - select_clause_element: column_reference: - naked_identifier: sp - dot: . - naked_identifier: value from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 alias_expression: alias_operator: keyword: as naked_identifier: s join_clause: - keyword: cross - keyword: apply - from_expression_element: table_expression: function: function_name: function_name_identifier: string_split function_contents: bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: replace function_contents: bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: s - dot: . - naked_identifier: some_path - comma: ',' - expression: quoted_literal: "'->'" - comma: ',' - expression: quoted_literal: "'{'" - end_bracket: ) - comma: ',' - expression: quoted_literal: "'{'" - end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: sp statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/select_date_functions.sql000066400000000000000000000027501503426445100260420ustar00rootroot00000000000000SELECT [hello], DATEDIFF(day, [mydate], GETDATE()) AS [test], DATEPART(day, [mydate], GETDATE()) AS [test2], DATEDIFF(year, '2005-12-31 23:59:59.9999999', '2006-01-01 00:00:00.0000000'), DATEDIFF(quarter, '2005-12-31 23:59:59.9999999', '2006-01-01 00:00:00.0000000'), DATEDIFF(month, '2005-12-31 23:59:59.9999999', '2006-01-01 00:00:00.0000000'), DATEDIFF(dayofyear, '2005-12-31 23:59:59.9999999', '2006-01-01 00:00:00.0000000'), DATEDIFF(day, '2005-12-31 23:59:59.9999999', '2006-01-01 00:00:00.0000000'), DATEDIFF(week, '2005-12-31 23:59:59.9999999', '2006-01-01 00:00:00.0000000'), DATEDIFF(hour, '2005-12-31 23:59:59.9999999', '2006-01-01 00:00:00.0000000'), DATEDIFF(minute, '2005-12-31 23:59:59.9999999', '2006-01-01 00:00:00.0000000'), DATEDIFF(second, '2005-12-31 23:59:59.9999999', '2006-01-01 00:00:00.0000000'), DATEDIFF(millisecond, '2005-12-31 23:59:59.9999999', '2006-01-01 00:00:00.0000000'), DATEDIFF(microsecond, '2005-12-31 23:59:59.9999999', '2006-01-01 00:00:00.0000000'), DATEDIFF_BIG(microsecond, '2005-12-31 23:59:59.9999999', '2006-01-01 00:00:00.0000000'), DATEADD(year,2147483647, '20060731'), DATEADD(year,-2147483647, '20060731'), DATENAME(year, '12:10:30.123'), DATENAME(month, '12:10:30.123'), DATENAME(day, '12:10:30.123'), DATENAME(dayofyear, '12:10:30.123'), DATENAME(weekday, '12:10:30.123'), DAY(GetDate()) as today FROM mytable; sqlfluff-3.4.2/test/fixtures/dialects/tsql/select_date_functions.yml000066400000000000000000000325321503426445100260450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bbe4d3700aee81fbadf5534299ef5f204e99c5e8c105976dcc2da4f8aab8d4a3 file: batch: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: quoted_identifier: '[hello]' - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEDIFF function_contents: bracketed: - start_bracket: ( - date_part: day - comma: ',' - expression: column_reference: quoted_identifier: '[mydate]' - comma: ',' - expression: function: function_name: function_name_identifier: GETDATE function_contents: bracketed: start_bracket: ( end_bracket: ) - end_bracket: ) alias_expression: alias_operator: keyword: AS quoted_identifier: '[test]' - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEPART function_contents: bracketed: - start_bracket: ( - date_part: day - comma: ',' - expression: column_reference: quoted_identifier: '[mydate]' - comma: ',' - expression: function: function_name: function_name_identifier: GETDATE function_contents: bracketed: start_bracket: ( end_bracket: ) - end_bracket: ) alias_expression: alias_operator: keyword: AS quoted_identifier: '[test2]' - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEDIFF function_contents: bracketed: - start_bracket: ( - date_part: year - comma: ',' - expression: quoted_literal: "'2005-12-31 23:59:59.9999999'" - comma: ',' - expression: quoted_literal: "'2006-01-01 00:00:00.0000000'" - end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEDIFF function_contents: bracketed: - start_bracket: ( - date_part: quarter - comma: ',' - expression: quoted_literal: "'2005-12-31 23:59:59.9999999'" - comma: ',' - expression: quoted_literal: "'2006-01-01 00:00:00.0000000'" - end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEDIFF function_contents: bracketed: - start_bracket: ( - date_part: month - comma: ',' - expression: quoted_literal: "'2005-12-31 23:59:59.9999999'" - comma: ',' - expression: quoted_literal: "'2006-01-01 00:00:00.0000000'" - end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEDIFF function_contents: bracketed: - start_bracket: ( - date_part: dayofyear - comma: ',' - expression: quoted_literal: "'2005-12-31 23:59:59.9999999'" - comma: ',' - expression: quoted_literal: "'2006-01-01 00:00:00.0000000'" - end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEDIFF function_contents: bracketed: - start_bracket: ( - date_part: day - comma: ',' - expression: quoted_literal: "'2005-12-31 23:59:59.9999999'" - comma: ',' - expression: quoted_literal: "'2006-01-01 00:00:00.0000000'" - end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEDIFF function_contents: bracketed: - start_bracket: ( - date_part: week - comma: ',' - expression: quoted_literal: "'2005-12-31 23:59:59.9999999'" - comma: ',' - expression: quoted_literal: "'2006-01-01 00:00:00.0000000'" - end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEDIFF function_contents: bracketed: - start_bracket: ( - date_part: hour - comma: ',' - expression: quoted_literal: "'2005-12-31 23:59:59.9999999'" - comma: ',' - expression: quoted_literal: "'2006-01-01 00:00:00.0000000'" - end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEDIFF function_contents: bracketed: - start_bracket: ( - date_part: minute - comma: ',' - expression: quoted_literal: "'2005-12-31 23:59:59.9999999'" - comma: ',' - expression: quoted_literal: "'2006-01-01 00:00:00.0000000'" - end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEDIFF function_contents: bracketed: - start_bracket: ( - date_part: second - comma: ',' - expression: quoted_literal: "'2005-12-31 23:59:59.9999999'" - comma: ',' - expression: quoted_literal: "'2006-01-01 00:00:00.0000000'" - end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEDIFF function_contents: bracketed: - start_bracket: ( - date_part: millisecond - comma: ',' - expression: quoted_literal: "'2005-12-31 23:59:59.9999999'" - comma: ',' - expression: quoted_literal: "'2006-01-01 00:00:00.0000000'" - end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEDIFF function_contents: bracketed: - start_bracket: ( - date_part: microsecond - comma: ',' - expression: quoted_literal: "'2005-12-31 23:59:59.9999999'" - comma: ',' - expression: quoted_literal: "'2006-01-01 00:00:00.0000000'" - end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEDIFF_BIG function_contents: bracketed: - start_bracket: ( - date_part: microsecond - comma: ',' - expression: quoted_literal: "'2005-12-31 23:59:59.9999999'" - comma: ',' - expression: quoted_literal: "'2006-01-01 00:00:00.0000000'" - end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEADD function_contents: bracketed: - start_bracket: ( - date_part: year - comma: ',' - expression: numeric_literal: '2147483647' - comma: ',' - expression: quoted_literal: "'20060731'" - end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEADD function_contents: bracketed: - start_bracket: ( - date_part: year - comma: ',' - expression: numeric_literal: sign_indicator: '-' numeric_literal: '2147483647' - comma: ',' - expression: quoted_literal: "'20060731'" - end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATENAME function_contents: bracketed: start_bracket: ( date_part: year comma: ',' expression: quoted_literal: "'12:10:30.123'" end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATENAME function_contents: bracketed: start_bracket: ( date_part: month comma: ',' expression: quoted_literal: "'12:10:30.123'" end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATENAME function_contents: bracketed: start_bracket: ( date_part: day comma: ',' expression: quoted_literal: "'12:10:30.123'" end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATENAME function_contents: bracketed: start_bracket: ( date_part: dayofyear comma: ',' expression: quoted_literal: "'12:10:30.123'" end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATENAME function_contents: bracketed: start_bracket: ( date_part: weekday comma: ',' expression: quoted_literal: "'12:10:30.123'" end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DAY function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: GetDate function_contents: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: today from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/select_for.sql000066400000000000000000000016161503426445100236230ustar00rootroot00000000000000-- FOR JSON SELECT name, surname FROM emp FOR JSON AUTO; GO SELECT 1 AS a FOR JSON PATH; GO SELECT 1 AS a FOR JSON PATH, WITHOUT_ARRAY_WRAPPER GO SELECT c.ClassName, s.StudentName FROM #tabClass AS c RIGHT JOIN #tabStudent AS s ON s.ClassGuid = c.ClassGuid ORDER BY c.ClassName, s.StudentName FOR JSON AUTO; GO SELECT 1 AS a FOR JSON PATH, ROOT ('RootName'), WITHOUT_ARRAY_WRAPPER, INCLUDE_NULL_VALUES; GO -- FOR XML SELECT ProductModelID, Name FROM Production.ProductModel WHERE ProductModelID=122 or ProductModelID=119 FOR XML RAW; SELECT ProductPhotoID, ThumbNailPhoto FROM Production.ProductPhoto WHERE ProductPhotoID=70 FOR XML AUTO; SELECT 1 as Tag FROM HumanResources.Employee AS E FOR XML EXPLICIT; SELECT ProductModelID, Name FROM Production.ProductModel WHERE ProductModelID=122 OR ProductModelID=119 FOR XML PATH ('root'); -- FOR BROWSE SELECT 1 AS a FOR BROWSE GO sqlfluff-3.4.2/test/fixtures/dialects/tsql/select_for.yml000066400000000000000000000225551503426445100236320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: eeca5acfbc969b788e1b66970470ed58514391d9232c51a024d64d3fdd6f6550 file: - batch: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: surname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: emp for_clause: - keyword: FOR - keyword: JSON - keyword: AUTO statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: AS naked_identifier: a for_clause: - keyword: FOR - keyword: JSON - keyword: PATH statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: AS naked_identifier: a for_clause: - keyword: FOR - keyword: JSON - keyword: PATH - comma: ',' - keyword: WITHOUT_ARRAY_WRAPPER - go_statement: keyword: GO - batch: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: c - dot: . - naked_identifier: ClassName - comma: ',' - select_clause_element: column_reference: - naked_identifier: s - dot: . - naked_identifier: StudentName from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: hash_identifier: '#tabClass' alias_expression: alias_operator: keyword: AS naked_identifier: c join_clause: - keyword: RIGHT - keyword: JOIN - from_expression_element: table_expression: table_reference: hash_identifier: '#tabStudent' alias_expression: alias_operator: keyword: AS naked_identifier: s - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: s - dot: . - naked_identifier: ClassGuid - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: c - dot: . - naked_identifier: ClassGuid orderby_clause: - keyword: ORDER - keyword: BY - column_reference: - naked_identifier: c - dot: . - naked_identifier: ClassName - comma: ',' - column_reference: - naked_identifier: s - dot: . - naked_identifier: StudentName for_clause: - keyword: FOR - keyword: JSON - keyword: AUTO statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: AS naked_identifier: a for_clause: - keyword: FOR - keyword: JSON - keyword: PATH - comma: ',' - keyword: ROOT - bracketed: start_bracket: ( quoted_literal: "'RootName'" end_bracket: ) - comma: ',' - keyword: WITHOUT_ARRAY_WRAPPER - comma: ',' - keyword: INCLUDE_NULL_VALUES statement_terminator: ; - go_statement: keyword: GO - batch: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: ProductModelID - comma: ',' - select_clause_element: column_reference: naked_identifier: Name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Production - dot: . - naked_identifier: ProductModel where_clause: keyword: WHERE expression: - column_reference: naked_identifier: ProductModelID - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '122' - binary_operator: or - column_reference: naked_identifier: ProductModelID - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '119' for_clause: - keyword: FOR - keyword: XML - keyword: RAW - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: ProductPhotoID - comma: ',' - select_clause_element: column_reference: naked_identifier: ThumbNailPhoto from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Production - dot: . - naked_identifier: ProductPhoto where_clause: keyword: WHERE expression: column_reference: naked_identifier: ProductPhotoID comparison_operator: raw_comparison_operator: '=' numeric_literal: '70' for_clause: - keyword: FOR - keyword: XML - keyword: AUTO - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: as naked_identifier: Tag from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: Employee alias_expression: alias_operator: keyword: AS naked_identifier: E for_clause: - keyword: FOR - keyword: XML - keyword: EXPLICIT - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: ProductModelID - comma: ',' - select_clause_element: column_reference: naked_identifier: Name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Production - dot: . - naked_identifier: ProductModel where_clause: keyword: WHERE expression: - column_reference: naked_identifier: ProductModelID - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '122' - binary_operator: OR - column_reference: naked_identifier: ProductModelID - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '119' for_clause: - keyword: FOR - keyword: XML - keyword: PATH - bracketed: start_bracket: ( quoted_literal: "'root'" end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: alias_operator: keyword: AS naked_identifier: a for_clause: - keyword: FOR - keyword: BROWSE - go_statement: keyword: GO sqlfluff-3.4.2/test/fixtures/dialects/tsql/select_into.sql000066400000000000000000000002001503426445100237720ustar00rootroot00000000000000SELECT [ID] ,[FIN] ,[Unit] ,[EventNM] ,[Date] ,[CHGFlag] INTO #CHG FROM Final GROUP BY [FIN] ,[EventNM] ,[Unit] ,[Date] sqlfluff-3.4.2/test/fixtures/dialects/tsql/select_into.yml000066400000000000000000000036601503426445100240110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 571e816a81663534057907cc5669e5ee844180a98182353e94f101aca0e2c5c4 file: batch: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: quoted_identifier: '[ID]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[FIN]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[Unit]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[EventNM]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[Date]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[CHGFlag]' into_table_clause: keyword: INTO object_reference: hash_identifier: '#CHG' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Final groupby_clause: - keyword: GROUP - keyword: BY - column_reference: quoted_identifier: '[FIN]' - comma: ',' - column_reference: quoted_identifier: '[EventNM]' - comma: ',' - column_reference: quoted_identifier: '[Unit]' - comma: ',' - column_reference: quoted_identifier: '[Date]' sqlfluff-3.4.2/test/fixtures/dialects/tsql/select_named_window.sql000066400000000000000000000032231503426445100255040ustar00rootroot00000000000000SELECT ROW_NUMBER() OVER win AS [Row Number], p.LastName, s.SalesYTD, a.PostalCode FROM Sales.SalesPerson AS s INNER JOIN Person.Person AS p ON s.BusinessEntityID = p.BusinessEntityID INNER JOIN Person.Address AS a ON a.AddressID = p.BusinessEntityID WHERE TerritoryID IS NOT NULL AND SalesYTD <> 0 WINDOW win AS ( PARTITION BY PostalCode ORDER BY SalesYTD DESC ) ORDER BY PostalCode; SELECT SalesOrderID, ProductID, OrderQty, SUM(OrderQty) OVER win AS [Total], AVG(OrderQty) OVER win AS [Avg], COUNT(OrderQty) OVER win AS [Count], MIN(OrderQty) OVER win AS [Min], MAX(OrderQty) OVER win AS [Max] FROM Sales.SalesOrderDetail WHERE SalesOrderID IN (43659, 43664) WINDOW win AS (PARTITION BY SalesOrderID); SELECT SalesOrderID AS OrderNumber, ProductID, OrderQty AS Qty, SUM(OrderQty) OVER win AS Total, AVG(OrderQty) OVER (win PARTITION BY SalesOrderID) AS Avg, COUNT(OrderQty) OVER ( win ROWS BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING ) AS Count FROM Sales.SalesOrderDetail WHERE SalesOrderID IN (43659, 43664) AND ProductID LIKE '71%' WINDOW win AS ( ORDER BY SalesOrderID, ProductID ); SELECT SalesOrderID AS OrderNumber, ProductID, OrderQty AS Qty, SUM(OrderQty) OVER win2 AS Total, AVG(OrderQty) OVER win1 AS Avg FROM Sales.SalesOrderDetail WHERE SalesOrderID IN(43659,43664) AND ProductID LIKE '71%' WINDOW win1 AS (win3), win2 AS (ORDER BY SalesOrderID, ProductID), win3 AS (win2 PARTITION BY SalesOrderID); select row_number() over win as x from information_schema.tables window win as (order by table_name) ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/select_named_window.yml000066400000000000000000000467551503426445100255270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 24dceed7a3e011ab5c6f65e97a705d9a988ca9bd6a99da65557acf056d3653b5 file: batch: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: keyword: ROW_NUMBER function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER naked_identifier: win alias_expression: alias_operator: keyword: AS quoted_identifier: '[Row Number]' - comma: ',' - select_clause_element: column_reference: - naked_identifier: p - dot: . - naked_identifier: LastName - comma: ',' - select_clause_element: column_reference: - naked_identifier: s - dot: . - naked_identifier: SalesYTD - comma: ',' - select_clause_element: column_reference: - naked_identifier: a - dot: . - naked_identifier: PostalCode from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesPerson alias_expression: alias_operator: keyword: AS naked_identifier: s - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: Person - dot: . - naked_identifier: Person alias_expression: alias_operator: keyword: AS naked_identifier: p - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: s - dot: . - naked_identifier: BusinessEntityID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: p - dot: . - naked_identifier: BusinessEntityID - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: Person - dot: . - naked_identifier: Address alias_expression: alias_operator: keyword: AS naked_identifier: a - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: a - dot: . - naked_identifier: AddressID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: p - dot: . - naked_identifier: BusinessEntityID where_clause: keyword: WHERE expression: - column_reference: naked_identifier: TerritoryID - keyword: IS - keyword: NOT - null_literal: 'NULL' - binary_operator: AND - column_reference: naked_identifier: SalesYTD - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '>' - numeric_literal: '0' named_window: keyword: WINDOW named_window_expression: naked_identifier: win keyword: AS bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - column_reference: naked_identifier: PostalCode orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: SalesYTD - keyword: DESC end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: PostalCode statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: SalesOrderID - comma: ',' - select_clause_element: column_reference: naked_identifier: ProductID - comma: ',' - select_clause_element: column_reference: naked_identifier: OrderQty - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: OrderQty end_bracket: ) over_clause: keyword: OVER naked_identifier: win alias_expression: alias_operator: keyword: AS quoted_identifier: '[Total]' - comma: ',' - select_clause_element: function: function_name: function_name_identifier: AVG function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: OrderQty end_bracket: ) over_clause: keyword: OVER naked_identifier: win alias_expression: alias_operator: keyword: AS quoted_identifier: '[Avg]' - comma: ',' - select_clause_element: function: function_name: function_name_identifier: COUNT function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: OrderQty end_bracket: ) over_clause: keyword: OVER naked_identifier: win alias_expression: alias_operator: keyword: AS quoted_identifier: '[Count]' - comma: ',' - select_clause_element: function: function_name: function_name_identifier: MIN function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: OrderQty end_bracket: ) over_clause: keyword: OVER naked_identifier: win alias_expression: alias_operator: keyword: AS quoted_identifier: '[Min]' - comma: ',' - select_clause_element: function: function_name: function_name_identifier: MAX function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: OrderQty end_bracket: ) over_clause: keyword: OVER naked_identifier: win alias_expression: alias_operator: keyword: AS quoted_identifier: '[Max]' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesOrderDetail where_clause: keyword: WHERE expression: column_reference: naked_identifier: SalesOrderID keyword: IN bracketed: - start_bracket: ( - numeric_literal: '43659' - comma: ',' - numeric_literal: '43664' - end_bracket: ) named_window: keyword: WINDOW named_window_expression: naked_identifier: win keyword: AS bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - column_reference: naked_identifier: SalesOrderID end_bracket: ) statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: SalesOrderID alias_expression: alias_operator: keyword: AS naked_identifier: OrderNumber - comma: ',' - select_clause_element: column_reference: naked_identifier: ProductID - comma: ',' - select_clause_element: column_reference: naked_identifier: OrderQty alias_expression: alias_operator: keyword: AS naked_identifier: Qty - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: OrderQty end_bracket: ) over_clause: keyword: OVER naked_identifier: win alias_expression: alias_operator: keyword: AS naked_identifier: Total - comma: ',' - select_clause_element: function: function_name: function_name_identifier: AVG function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: OrderQty end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: naked_identifier: win partitionby_clause: - keyword: PARTITION - keyword: BY - column_reference: naked_identifier: SalesOrderID end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: Avg - comma: ',' - select_clause_element: function: function_name: function_name_identifier: COUNT function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: OrderQty end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: naked_identifier: win frame_clause: - keyword: ROWS - keyword: BETWEEN - keyword: UNBOUNDED - keyword: PRECEDING - keyword: AND - numeric_literal: '1' - keyword: FOLLOWING end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: Count from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesOrderDetail where_clause: keyword: WHERE expression: - column_reference: naked_identifier: SalesOrderID - keyword: IN - bracketed: - start_bracket: ( - numeric_literal: '43659' - comma: ',' - numeric_literal: '43664' - end_bracket: ) - binary_operator: AND - column_reference: naked_identifier: ProductID - keyword: LIKE - quoted_literal: "'71%'" named_window: keyword: WINDOW named_window_expression: naked_identifier: win keyword: AS bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: SalesOrderID - comma: ',' - column_reference: naked_identifier: ProductID end_bracket: ) statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: SalesOrderID alias_expression: alias_operator: keyword: AS naked_identifier: OrderNumber - comma: ',' - select_clause_element: column_reference: naked_identifier: ProductID - comma: ',' - select_clause_element: column_reference: naked_identifier: OrderQty alias_expression: alias_operator: keyword: AS naked_identifier: Qty - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: OrderQty end_bracket: ) over_clause: keyword: OVER naked_identifier: win2 alias_expression: alias_operator: keyword: AS naked_identifier: Total - comma: ',' - select_clause_element: function: function_name: function_name_identifier: AVG function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: OrderQty end_bracket: ) over_clause: keyword: OVER naked_identifier: win1 alias_expression: alias_operator: keyword: AS naked_identifier: Avg from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesOrderDetail where_clause: keyword: WHERE expression: - column_reference: naked_identifier: SalesOrderID - keyword: IN - bracketed: - start_bracket: ( - numeric_literal: '43659' - comma: ',' - numeric_literal: '43664' - end_bracket: ) - binary_operator: AND - column_reference: naked_identifier: ProductID - keyword: LIKE - quoted_literal: "'71%'" named_window: - keyword: WINDOW - named_window_expression: naked_identifier: win1 keyword: AS bracketed: start_bracket: ( window_specification: naked_identifier: win3 end_bracket: ) - comma: ',' - named_window_expression: naked_identifier: win2 keyword: AS bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: SalesOrderID - comma: ',' - column_reference: naked_identifier: ProductID end_bracket: ) - comma: ',' - named_window_expression: naked_identifier: win3 keyword: AS bracketed: start_bracket: ( window_specification: naked_identifier: win2 partitionby_clause: - keyword: PARTITION - keyword: BY - column_reference: naked_identifier: SalesOrderID end_bracket: ) statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: keyword: row_number function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: over naked_identifier: win alias_expression: alias_operator: keyword: as naked_identifier: x from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: information_schema - dot: . - naked_identifier: tables named_window: keyword: window named_window_expression: naked_identifier: win keyword: as bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: table_name end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/select_natural_join.sql000066400000000000000000000003451503426445100255200ustar00rootroot00000000000000SELECT * FROM table1 natural -- this should parse as an alias as TSQL does not have NATURAL joins JOIN table2; SELECT * FROM table1 natural -- this should parse as an alias as TSQL does not have NATURAL joins INNER JOIN table2; sqlfluff-3.4.2/test/fixtures/dialects/tsql/select_natural_join.yml000066400000000000000000000036041503426445100255230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1fe1d14060793fedd68e83b08daf90de7e961caccf510607300b656d76e0ee75 file: batch: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 alias_expression: naked_identifier: natural join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: table2 statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 alias_expression: naked_identifier: natural join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/select_pivot.sql000066400000000000000000000006711503426445100241760ustar00rootroot00000000000000select [1], [2], [3] from table1 as t1 pivot (max(value) for rn in ([1], [2], [3]) ) as pvt; select [1], [2], [3] from table1 as t1 pivot (max(value) for rn in ([1], [2], [3]) ) pvt; GO SELECT unpvt.Program , dd.[Month Number] AS Month FROM p UNPIVOT ( MonthValue FOR MonthColumn IN (Jan, Feb, Mar, Apr, May, Jun, Jul, Aug, Sep, Oct, Nov, Dec) ) AS unpvt INNER JOIN d ON [Month Name] = unpvt.MonthColumn; GO sqlfluff-3.4.2/test/fixtures/dialects/tsql/select_pivot.yml000066400000000000000000000171561503426445100242060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 42bc55b8cb475283121f1b764e6cbc0c8ec49b0889a46566400f1073b355b7f4 file: - batch: - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: quoted_identifier: '[1]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[2]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[3]' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 alias_expression: alias_operator: keyword: as naked_identifier: t1 from_pivot_expression: - keyword: pivot - bracketed: - start_bracket: ( - function: function_name: function_name_identifier: max function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: value end_bracket: ) - keyword: for - column_reference: naked_identifier: rn - keyword: in - bracketed: - start_bracket: ( - pivot_column_reference: quoted_identifier: '[1]' - comma: ',' - pivot_column_reference: quoted_identifier: '[2]' - comma: ',' - pivot_column_reference: quoted_identifier: '[3]' - end_bracket: ) - end_bracket: ) - keyword: as - table_reference: naked_identifier: pvt statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: quoted_identifier: '[1]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[2]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[3]' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 alias_expression: alias_operator: keyword: as naked_identifier: t1 from_pivot_expression: keyword: pivot bracketed: - start_bracket: ( - function: function_name: function_name_identifier: max function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: value end_bracket: ) - keyword: for - column_reference: naked_identifier: rn - keyword: in - bracketed: - start_bracket: ( - pivot_column_reference: quoted_identifier: '[1]' - comma: ',' - pivot_column_reference: quoted_identifier: '[2]' - comma: ',' - pivot_column_reference: quoted_identifier: '[3]' - end_bracket: ) - end_bracket: ) table_reference: naked_identifier: pvt statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: unpvt - dot: . - naked_identifier: Program - comma: ',' - select_clause_element: column_reference: naked_identifier: dd dot: . quoted_identifier: '[Month Number]' alias_expression: alias_operator: keyword: AS naked_identifier: Month from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: p from_pivot_expression: - keyword: UNPIVOT - bracketed: - start_bracket: ( - column_reference: naked_identifier: MonthValue - keyword: FOR - column_reference: naked_identifier: MonthColumn - keyword: IN - bracketed: - start_bracket: ( - pivot_column_reference: naked_identifier: Jan - comma: ',' - pivot_column_reference: naked_identifier: Feb - comma: ',' - pivot_column_reference: naked_identifier: Mar - comma: ',' - pivot_column_reference: naked_identifier: Apr - comma: ',' - pivot_column_reference: naked_identifier: May - comma: ',' - pivot_column_reference: naked_identifier: Jun - comma: ',' - pivot_column_reference: naked_identifier: Jul - comma: ',' - pivot_column_reference: naked_identifier: Aug - comma: ',' - pivot_column_reference: naked_identifier: Sep - comma: ',' - pivot_column_reference: naked_identifier: Oct - comma: ',' - pivot_column_reference: naked_identifier: Nov - comma: ',' - pivot_column_reference: naked_identifier: Dec - end_bracket: ) - end_bracket: ) - keyword: AS - table_reference: naked_identifier: unpvt join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: d - join_on_condition: keyword: 'ON' expression: - column_reference: quoted_identifier: '[Month Name]' - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: unpvt - dot: . - naked_identifier: MonthColumn statement_terminator: ; - go_statement: keyword: GO sqlfluff-3.4.2/test/fixtures/dialects/tsql/select_top.sql000066400000000000000000000012511503426445100236320ustar00rootroot00000000000000select top 1 t.date_column1 as last_date_column1 from t1.t2.table_name t order by t.column1 desc; SELECT TOP(10)JobTitle, HireDate FROM HumanResources.Employee; SELECT TOP(10)JobTitle, HireDate FROM HumanResources.Employee ORDER BY HireDate DESC; SELECT TOP(5)PERCENT JobTitle, HireDate FROM HumanResources.Employee ORDER BY HireDate DESC; SELECT TOP(10) PERCENT WITH TIES pp.FirstName, pp.LastName, e.JobTitle, e.Gender, r.Rate FROM Person.Person AS pp INNER JOIN HumanResources.Employee AS e ON pp.BusinessEntityID = e.BusinessEntityID INNER JOIN HumanResources.EmployeePayHistory AS r ON r.BusinessEntityID = e.BusinessEntityID ORDER BY Rate DESC; sqlfluff-3.4.2/test/fixtures/dialects/tsql/select_top.yml000066400000000000000000000200631503426445100236360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 79179b70d53285f84676528487a6a71ca11f416ec858ecdfa0c5909f1f467d30 file: batch: - statement: select_statement: select_clause: keyword: select select_clause_modifier: keyword: top expression: numeric_literal: '1' select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: date_column1 alias_expression: alias_operator: keyword: as naked_identifier: last_date_column1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: t1 - dot: . - naked_identifier: t2 - dot: . - naked_identifier: table_name alias_expression: naked_identifier: t orderby_clause: - keyword: order - keyword: by - column_reference: - naked_identifier: t - dot: . - naked_identifier: column1 - keyword: desc statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: keyword: TOP bracketed: start_bracket: ( expression: numeric_literal: '10' end_bracket: ) - select_clause_element: column_reference: naked_identifier: JobTitle - comma: ',' - select_clause_element: column_reference: naked_identifier: HireDate from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: Employee statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: keyword: TOP bracketed: start_bracket: ( expression: numeric_literal: '10' end_bracket: ) - select_clause_element: column_reference: naked_identifier: JobTitle - comma: ',' - select_clause_element: column_reference: naked_identifier: HireDate from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: Employee orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: HireDate - keyword: DESC statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: - keyword: TOP - bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) - keyword: PERCENT - select_clause_element: column_reference: naked_identifier: JobTitle - comma: ',' - select_clause_element: column_reference: naked_identifier: HireDate from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: Employee orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: HireDate - keyword: DESC statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: - keyword: TOP - bracketed: start_bracket: ( expression: numeric_literal: '10' end_bracket: ) - keyword: PERCENT - keyword: WITH - keyword: TIES - select_clause_element: column_reference: - naked_identifier: pp - dot: . - naked_identifier: FirstName - comma: ',' - select_clause_element: column_reference: - naked_identifier: pp - dot: . - naked_identifier: LastName - comma: ',' - select_clause_element: column_reference: - naked_identifier: e - dot: . - naked_identifier: JobTitle - comma: ',' - select_clause_element: column_reference: - naked_identifier: e - dot: . - naked_identifier: Gender - comma: ',' - select_clause_element: column_reference: - naked_identifier: r - dot: . - naked_identifier: Rate from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: - naked_identifier: Person - dot: . - naked_identifier: Person alias_expression: alias_operator: keyword: AS naked_identifier: pp - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: Employee alias_expression: alias_operator: keyword: AS naked_identifier: e - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: pp - dot: . - naked_identifier: BusinessEntityID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: e - dot: . - naked_identifier: BusinessEntityID - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: EmployeePayHistory alias_expression: alias_operator: keyword: AS naked_identifier: r - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: r - dot: . - naked_identifier: BusinessEntityID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: e - dot: . - naked_identifier: BusinessEntityID orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: Rate - keyword: DESC statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/sequence.sql000066400000000000000000000003141503426445100233000ustar00rootroot00000000000000CREATE SEQUENCE SEQ_MELDER START WITH 1 INCREMENT BY 1 GO CREATE SEQUENCE Test.DecSeq AS decimal(3,0) START WITH 125 INCREMENT BY 25 MINVALUE 100 MAXVALUE 200 CYCLE CACHE 3 ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/sequence.yml000066400000000000000000000041031503426445100233020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e0e8fd69019db0ed39c2baf5e6e04d8015b32483129f0e256d19de7838d1a0f7 file: - batch: statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: SEQ_MELDER - create_sequence_options_segment: - keyword: START - keyword: WITH - numeric_literal: '1' - create_sequence_options_segment: - keyword: INCREMENT - keyword: BY - numeric_literal: '1' - go_statement: keyword: GO - batch: statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: - naked_identifier: Test - dot: . - naked_identifier: DecSeq - create_sequence_options_segment: keyword: AS data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '0' - end_bracket: ) - create_sequence_options_segment: - keyword: START - keyword: WITH - numeric_literal: '125' - create_sequence_options_segment: - keyword: INCREMENT - keyword: BY - numeric_literal: '25' - create_sequence_options_segment: keyword: MINVALUE numeric_literal: '100' - create_sequence_options_segment: keyword: MAXVALUE numeric_literal: '200' - create_sequence_options_segment: keyword: CYCLE - create_sequence_options_segment: keyword: CACHE numeric_literal: '3' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/set_context_info.sql000066400000000000000000000002761503426445100250510ustar00rootroot00000000000000SET CONTEXT_INFO 0x01010101; DECLARE @BinVar varbinary(128); SET @BinVar = CAST(REPLICATE(0x20, 128) AS varbinary(128) ); SET CONTEXT_INFO @BinVar; SELECT CONTEXT_INFO() AS MyContextInfo; sqlfluff-3.4.2/test/fixtures/dialects/tsql/set_context_info.yml000066400000000000000000000053521503426445100250530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ac90b1cbf372890a0cdd68884d7cb6035083f8dfde4433230a8afbae43d79000 file: batch: - statement: set_context_info_statement: - keyword: SET - keyword: CONTEXT_INFO - numeric_literal: '0x01010101' - statement_terminator: ; - statement: declare_segment: keyword: DECLARE parameter: '@BinVar' data_type: data_type_identifier: varbinary bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '128' end_bracket: ) statement_terminator: ; - statement: set_segment: keyword: SET parameter: '@BinVar' assignment_operator: raw_comparison_operator: '=' expression: function: function_name: keyword: CAST function_contents: bracketed: start_bracket: ( expression: function: function_name: keyword: REPLICATE function_contents: bracketed: start_bracket: ( numeric_literal: '0x20' comma: ',' expression: numeric_literal: '128' end_bracket: ) keyword: AS data_type: data_type_identifier: varbinary bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '128' end_bracket: ) end_bracket: ) statement_terminator: ; - statement: set_context_info_statement: - keyword: SET - keyword: CONTEXT_INFO - parameter: '@BinVar' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CONTEXT_INFO function_contents: bracketed: start_bracket: ( end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: MyContextInfo statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/set_statements.sql000066400000000000000000000011021503426445100245260ustar00rootroot00000000000000SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; -- Single params SET @param1 = 1 ; -- Multiple params SET @param1 = 1, @param2 = 2 ; -- Comma separated params with comment with comma SET @param1 = "test, test", @param2 = 2 ; -- Params with expression SET @param1 = ("test", "test"), @param2 = 2 ; -- Assignment operators SET @param1 += 1, @param2 -= 2, @param3 *= 3, @param4 /= 4, @param5 %= 5, @param5 ^= 6, @param5 &= 7, @param5 |= 8 ; -- Param with sequence in expression SET @param1 = (NEXT VALUE FOR [dbo].[SEQUENCE_NAME]) ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/set_statements.yml000066400000000000000000000107611503426445100245430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ad529eb73bf8da610ceddcbb5d0a7f5cbfa86bc470a92596bf7c4c82704278c1 file: batch: - statement: set_segment: - keyword: SET - keyword: TRANSACTION - keyword: ISOLATION - keyword: LEVEL - keyword: READ - keyword: UNCOMMITTED - statement_terminator: ; - statement: set_segment: keyword: SET parameter: '@param1' assignment_operator: raw_comparison_operator: '=' expression: numeric_literal: '1' statement_terminator: ; - statement: set_segment: - keyword: SET - parameter: '@param1' - assignment_operator: raw_comparison_operator: '=' - expression: numeric_literal: '1' - comma: ',' - parameter: '@param2' - assignment_operator: raw_comparison_operator: '=' - expression: numeric_literal: '2' - statement_terminator: ; - statement: set_segment: - keyword: SET - parameter: '@param1' - assignment_operator: raw_comparison_operator: '=' - expression: column_reference: quoted_identifier: '"test, test"' - comma: ',' - parameter: '@param2' - assignment_operator: raw_comparison_operator: '=' - expression: numeric_literal: '2' - statement_terminator: ; - statement: set_segment: - keyword: SET - parameter: '@param1' - assignment_operator: raw_comparison_operator: '=' - expression: bracketed: - start_bracket: ( - column_reference: quoted_identifier: '"test"' - comma: ',' - column_reference: quoted_identifier: '"test"' - end_bracket: ) - comma: ',' - parameter: '@param2' - assignment_operator: raw_comparison_operator: '=' - expression: numeric_literal: '2' - statement_terminator: ; - statement: set_segment: - keyword: SET - parameter: '@param1' - assignment_operator: binary_operator: + raw_comparison_operator: '=' - expression: numeric_literal: '1' - comma: ',' - parameter: '@param2' - assignment_operator: binary_operator: '-' raw_comparison_operator: '=' - expression: numeric_literal: '2' - comma: ',' - parameter: '@param3' - assignment_operator: binary_operator: '*' raw_comparison_operator: '=' - expression: numeric_literal: '3' - comma: ',' - parameter: '@param4' - assignment_operator: binary_operator: / raw_comparison_operator: '=' - expression: numeric_literal: '4' - comma: ',' - parameter: '@param5' - assignment_operator: binary_operator: '%' raw_comparison_operator: '=' - expression: numeric_literal: '5' - comma: ',' - parameter: '@param5' - assignment_operator: binary_operator: ^ raw_comparison_operator: '=' - expression: numeric_literal: '6' - comma: ',' - parameter: '@param5' - assignment_operator: binary_operator: ampersand: '&' raw_comparison_operator: '=' - expression: numeric_literal: '7' - comma: ',' - parameter: '@param5' - assignment_operator: binary_operator: pipe: '|' raw_comparison_operator: '=' - expression: numeric_literal: '8' - statement_terminator: ; - statement: set_segment: keyword: SET parameter: '@param1' assignment_operator: raw_comparison_operator: '=' expression: bracketed: start_bracket: ( expression: sequence_next_value: - keyword: NEXT - keyword: VALUE - keyword: FOR - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[SEQUENCE_NAME]' end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/sqlcmd_command.sql000066400000000000000000000005711503426445100244560ustar00rootroot00000000000000/* https://learn.microsoft.com/en-us/sql/tools/sqlcmd/sqlcmd-utility?view=sql-server-ver16#sqlcmd-commands */ -- reference / execute other SQL files :r script.sql :r script#01_a-b.sql :r ...\folder\script.SQL :r .\folder_1\folder_2\folder_3\folder_4\script.sql -- define *sqlcmd* scripting variable :setvar variable_name variable_value :setvar variable_name "variable_value" sqlfluff-3.4.2/test/fixtures/dialects/tsql/sqlcmd_command.yml000066400000000000000000000026271503426445100244640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 140f2fcb33d59f14b4ca6e0615627f232ebce91d797029e84fe854515cb4213f file: batch: - statement: sqlcmd_command_segment: colon: ':' sqlcmd_operator: r unquoted_relative_sql_file_path: script.sql - statement: sqlcmd_command_segment: colon: ':' sqlcmd_operator: r unquoted_relative_sql_file_path: script#01_a-b.sql - statement: sqlcmd_command_segment: colon: ':' sqlcmd_operator: r unquoted_relative_sql_file_path: '...\folder\script.SQL' - statement: sqlcmd_command_segment: colon: ':' sqlcmd_operator: r unquoted_relative_sql_file_path: .\folder_1\folder_2\folder_3\folder_4\script.sql - statement: sqlcmd_command_segment: colon: ':' sqlcmd_operator: setvar object_reference: naked_identifier: variable_name word: variable_value - statement: sqlcmd_command_segment: colon: ':' sqlcmd_operator: setvar object_reference: naked_identifier: variable_name double_quote: '"variable_value"' sqlfluff-3.4.2/test/fixtures/dialects/tsql/stored_procedure_begin_end.sql000066400000000000000000000021641503426445100270370ustar00rootroot00000000000000CREATE PROCEDURE dbo.Test_Begin_End AS BEGIN SELECT 'Weekend'; select a from tbl1; select b from tbl2; END; GO CREATE PROCEDURE [dbo].[usp_process_tran_log] @out_vchCode uddt_output_code OUTPUT , @out_vchMsg uddt_output_msg OUTPUT , @in_debug INT = 1 AS --******************************************************************************************* SET NOCOUNT ON; BEGIN SELECT '8' END; GO CREATE OR ALTER PROCEDURE [dbo].[usp_process_tran_log] @out_vchCode uddt_output_code OUTPUT , @out_vchMsg uddt_output_msg OUT , @in_debug INT = 1 READONLY AS --******************************************************************************************* SET NOCOUNT ON; BEGIN SELECT '8' END; GO ALTER PROCEDURE [dbo].[usp_process_tran_log] @out_vchCode uddt_output_code OUTPUT , @out_vchMsg uddt_output_msg OUTPUT , @in_debug INT = 1 AS SET NOCOUNT ON; BEGIN BEGIN TRY SELECT '8'; END TRY BEGIN CATCH SET @v_nSysErrorNum = ERROR_NUMBER(); SET @v_vchCode = ERROR_LINE(); SET @v_vchMsg = N'Missing control type.'; SET @v_vchMsg = @v_vchMsg + N' SQL Error = ' + ERROR_MESSAGE(); GOTO ERROR_HANDLER; END CATCH END; GO sqlfluff-3.4.2/test/fixtures/dialects/tsql/stored_procedure_begin_end.yml000066400000000000000000000215401503426445100270400ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 21356df263327efa381bdb326b08c823c86242dab0bb18cc5b26b1a2aba9f7b4 file: - batch: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: Test_Begin_End - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'Weekend'" statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: b from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl2 statement_terminator: ; - keyword: END statement_terminator: ; - go_statement: keyword: GO - batch: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[usp_process_tran_log]' - procedure_parameter_list: - parameter: '@out_vchCode' - data_type: data_type_identifier: uddt_output_code - keyword: OUTPUT - comma: ',' - parameter: '@out_vchMsg' - data_type: data_type_identifier: uddt_output_msg - keyword: OUTPUT - comma: ',' - parameter: '@in_debug' - data_type: data_type_identifier: INT - comparison_operator: raw_comparison_operator: '=' - expression: numeric_literal: '1' - keyword: AS - procedure_statement: - statement: set_segment: - keyword: SET - keyword: NOCOUNT - keyword: 'ON' - statement_terminator: ; - statement: begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'8'" - keyword: END - statement_terminator: ; - go_statement: keyword: GO - batch: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: PROCEDURE - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[usp_process_tran_log]' - procedure_parameter_list: - parameter: '@out_vchCode' - data_type: data_type_identifier: uddt_output_code - keyword: OUTPUT - comma: ',' - parameter: '@out_vchMsg' - data_type: data_type_identifier: uddt_output_msg - keyword: OUT - comma: ',' - parameter: '@in_debug' - data_type: data_type_identifier: INT - comparison_operator: raw_comparison_operator: '=' - expression: numeric_literal: '1' - keyword: READONLY - keyword: AS - procedure_statement: - statement: set_segment: - keyword: SET - keyword: NOCOUNT - keyword: 'ON' - statement_terminator: ; - statement: begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'8'" - keyword: END - statement_terminator: ; - go_statement: keyword: GO - batch: create_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[usp_process_tran_log]' - procedure_parameter_list: - parameter: '@out_vchCode' - data_type: data_type_identifier: uddt_output_code - keyword: OUTPUT - comma: ',' - parameter: '@out_vchMsg' - data_type: data_type_identifier: uddt_output_msg - keyword: OUTPUT - comma: ',' - parameter: '@in_debug' - data_type: data_type_identifier: INT - comparison_operator: raw_comparison_operator: '=' - expression: numeric_literal: '1' - keyword: AS - procedure_statement: - statement: set_segment: - keyword: SET - keyword: NOCOUNT - keyword: 'ON' - statement_terminator: ; - statement: begin_end_block: - keyword: BEGIN - statement: try_catch: - keyword: BEGIN - keyword: TRY - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'8'" statement_terminator: ; - keyword: END - keyword: TRY - keyword: BEGIN - keyword: CATCH - statement: set_segment: keyword: SET parameter: '@v_nSysErrorNum' assignment_operator: raw_comparison_operator: '=' expression: function: function_name: function_name_identifier: ERROR_NUMBER function_contents: bracketed: start_bracket: ( end_bracket: ) statement_terminator: ; - statement: set_segment: keyword: SET parameter: '@v_vchCode' assignment_operator: raw_comparison_operator: '=' expression: function: function_name: function_name_identifier: ERROR_LINE function_contents: bracketed: start_bracket: ( end_bracket: ) statement_terminator: ; - statement: set_segment: keyword: SET parameter: '@v_vchMsg' assignment_operator: raw_comparison_operator: '=' expression: quoted_literal: "N'Missing control type.'" statement_terminator: ; - statement: set_segment: keyword: SET parameter: '@v_vchMsg' assignment_operator: raw_comparison_operator: '=' expression: - parameter: '@v_vchMsg' - binary_operator: + - quoted_literal: "N' SQL Error = '" - binary_operator: + - function: function_name: function_name_identifier: ERROR_MESSAGE function_contents: bracketed: start_bracket: ( end_bracket: ) statement_terminator: ; - statement: goto_statement: keyword: GOTO naked_identifier: ERROR_HANDLER - statement_terminator: ; - keyword: END - keyword: CATCH - keyword: END - statement_terminator: ; - go_statement: keyword: GO sqlfluff-3.4.2/test/fixtures/dialects/tsql/stored_procedure_simple.sql000066400000000000000000000000561503426445100264140ustar00rootroot00000000000000CREATE PROC ProcedureName AS SELECT DB_NAME() sqlfluff-3.4.2/test/fixtures/dialects/tsql/stored_procedure_simple.yml000066400000000000000000000017301503426445100264160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3910763517513e7ea6dbda1a8a1635760254af12f4a764e1db41d3b306fdf832 file: batch: create_procedure_statement: - keyword: CREATE - keyword: PROC - object_reference: naked_identifier: ProcedureName - keyword: AS - procedure_statement: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: DB_NAME function_contents: bracketed: start_bracket: ( end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/tsql/stored_procedure_single_statement.sql000066400000000000000000000020231503426445100304640ustar00rootroot00000000000000CREATE OR ALTER PROCEDURE DBO.SP_ECDC_CASES_INTER (@Apple [int], @Orange varchar(100)) AS INSERT INTO INTER.ECDC_CASES ( [COUNTRY], [COUNTRY_CODE], [CONTINENT], [POPULATION], [INDICATOR], [WEEKLY_COUNT], [YEAR_WEEK], [WEEK_START], [WEEK_END], [RATE_14_DAY], [CUMULATIVE_COUNT], [SOURCE] ) SELECT [COUNTRY], [COUNTRY_CODE], [CONTINENT], CAST([POPULATION] AS BIGINT) AS [POPULATION], [INDICATOR], CAST([WEEKLY_COUNT] AS BIGINT) AS [WEEKLY_COUNT], [YEAR_WEEK], CAST([dbo].[CONVERT_ISO_WEEK_TO_DATETIME](LEFT(YEAR_WEEK,4),RIGHT(YEAR_WEEK,2)) AS DATE) AS [WEEK_START], CAST([dbo].[WEEK_END]([dbo].[CONVERT_ISO_WEEK_TO_DATETIME](LEFT(YEAR_WEEK,4),RIGHT(YEAR_WEEK,2))) AS DATE ) AS [WEEK_END], CAST([RATE_14_DAY] AS FLOAT) AS [RATE_14_DAY], CAST([CUMULATIVE_COUNT] AS BIGINT) AS [CUMULATIVE_COUNT], [SOURCE] FROM STAGE.ECDC_CASES sqlfluff-3.4.2/test/fixtures/dialects/tsql/stored_procedure_single_statement.yml000066400000000000000000000315251503426445100304770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4cec33d7a30307ccab775ec70548a5db14c8f2998805f50e01a54d2fb05b5903 file: batch: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: PROCEDURE - object_reference: - naked_identifier: DBO - dot: . - naked_identifier: SP_ECDC_CASES_INTER - procedure_parameter_list: bracketed: - start_bracket: ( - parameter: '@Apple' - data_type: data_type_identifier: '[int]' - comma: ',' - parameter: '@Orange' - data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - end_bracket: ) - keyword: AS - procedure_statement: statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: INTER - dot: . - naked_identifier: ECDC_CASES - bracketed: - start_bracket: ( - column_reference: quoted_identifier: '[COUNTRY]' - comma: ',' - column_reference: quoted_identifier: '[COUNTRY_CODE]' - comma: ',' - column_reference: quoted_identifier: '[CONTINENT]' - comma: ',' - column_reference: quoted_identifier: '[POPULATION]' - comma: ',' - column_reference: quoted_identifier: '[INDICATOR]' - comma: ',' - column_reference: quoted_identifier: '[WEEKLY_COUNT]' - comma: ',' - column_reference: quoted_identifier: '[YEAR_WEEK]' - comma: ',' - column_reference: quoted_identifier: '[WEEK_START]' - comma: ',' - column_reference: quoted_identifier: '[WEEK_END]' - comma: ',' - column_reference: quoted_identifier: '[RATE_14_DAY]' - comma: ',' - column_reference: quoted_identifier: '[CUMULATIVE_COUNT]' - comma: ',' - column_reference: quoted_identifier: '[SOURCE]' - end_bracket: ) - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: quoted_identifier: '[COUNTRY]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[COUNTRY_CODE]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[CONTINENT]' - comma: ',' - select_clause_element: function: function_name: keyword: CAST function_contents: bracketed: start_bracket: ( expression: column_reference: quoted_identifier: '[POPULATION]' keyword: AS data_type: data_type_identifier: BIGINT end_bracket: ) alias_expression: alias_operator: keyword: AS quoted_identifier: '[POPULATION]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[INDICATOR]' - comma: ',' - select_clause_element: function: function_name: keyword: CAST function_contents: bracketed: start_bracket: ( expression: column_reference: quoted_identifier: '[WEEKLY_COUNT]' keyword: AS data_type: data_type_identifier: BIGINT end_bracket: ) alias_expression: alias_operator: keyword: AS quoted_identifier: '[WEEKLY_COUNT]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[YEAR_WEEK]' - comma: ',' - select_clause_element: function: function_name: keyword: CAST function_contents: bracketed: start_bracket: ( expression: function: function_name: quoted_identifier: '[dbo]' dot: . function_name_identifier: '[CONVERT_ISO_WEEK_TO_DATETIME]' function_contents: bracketed: - start_bracket: ( - expression: function: function_name: keyword: LEFT function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: YEAR_WEEK - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - comma: ',' - expression: function: function_name: keyword: RIGHT function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: YEAR_WEEK - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - end_bracket: ) keyword: AS data_type: data_type_identifier: DATE end_bracket: ) alias_expression: alias_operator: keyword: AS quoted_identifier: '[WEEK_START]' - comma: ',' - select_clause_element: function: function_name: keyword: CAST function_contents: bracketed: start_bracket: ( expression: function: function_name: quoted_identifier: '[dbo]' dot: . function_name_identifier: '[WEEK_END]' function_contents: bracketed: start_bracket: ( expression: function: function_name: quoted_identifier: '[dbo]' dot: . function_name_identifier: '[CONVERT_ISO_WEEK_TO_DATETIME]' function_contents: bracketed: - start_bracket: ( - expression: function: function_name: keyword: LEFT function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: YEAR_WEEK - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - comma: ',' - expression: function: function_name: keyword: RIGHT function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: YEAR_WEEK - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - end_bracket: ) end_bracket: ) keyword: AS data_type: data_type_identifier: DATE end_bracket: ) alias_expression: alias_operator: keyword: AS quoted_identifier: '[WEEK_END]' - comma: ',' - select_clause_element: function: function_name: keyword: CAST function_contents: bracketed: start_bracket: ( expression: column_reference: quoted_identifier: '[RATE_14_DAY]' keyword: AS data_type: data_type_identifier: FLOAT end_bracket: ) alias_expression: alias_operator: keyword: AS quoted_identifier: '[RATE_14_DAY]' - comma: ',' - select_clause_element: function: function_name: keyword: CAST function_contents: bracketed: start_bracket: ( expression: column_reference: quoted_identifier: '[CUMULATIVE_COUNT]' keyword: AS data_type: data_type_identifier: BIGINT end_bracket: ) alias_expression: alias_operator: keyword: AS quoted_identifier: '[CUMULATIVE_COUNT]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[SOURCE]' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: STAGE - dot: . - naked_identifier: ECDC_CASES sqlfluff-3.4.2/test/fixtures/dialects/tsql/stored_procedured_mixed_statements.sql000066400000000000000000000010001503426445100306320ustar00rootroot00000000000000CREATE PROC [Reporting].[PowerPlan] AS DECLARE @DATEFUNCTION DATE = GETDATE() DROP TABLE [Reporting].[PowerPlan_BASE]; DECLARE @deadlock_var NCHAR(3); SET @deadlock_var = N'LOW'; BEGIN SET NOCOUNT ON SET DEADLOCK_PRIORITY LOW SET DEADLOCK_PRIORITY NORMAL SET DEADLOCK_PRIORITY HIGH SET DEADLOCK_PRIORITY @deadlock_var SET DEADLOCK_PRIORITY 10 SET DEADLOCK_PRIORITY -5 SELECT 1 CREATE TABLE #TempTest WITH (DISTRIBUTION = ROUND_ROBIN, HEAP) AS (SELECT 2 AS Two, 3 AS Three, 4 AS Four ) END sqlfluff-3.4.2/test/fixtures/dialects/tsql/stored_procedured_mixed_statements.yml000066400000000000000000000124621503426445100306520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 37c411f58e73aa807252c6eff57edf164fe38d89e906945a85a598540c22d54d file: batch: create_procedure_statement: - keyword: CREATE - keyword: PROC - object_reference: - quoted_identifier: '[Reporting]' - dot: . - quoted_identifier: '[PowerPlan]' - keyword: AS - procedure_statement: - statement: declare_segment: keyword: DECLARE parameter: '@DATEFUNCTION' data_type: data_type_identifier: DATE comparison_operator: raw_comparison_operator: '=' expression: function: function_name: function_name_identifier: GETDATE function_contents: bracketed: start_bracket: ( end_bracket: ) - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: - quoted_identifier: '[Reporting]' - dot: . - quoted_identifier: '[PowerPlan_BASE]' - statement_terminator: ; - statement: declare_segment: keyword: DECLARE parameter: '@deadlock_var' data_type: data_type_identifier: NCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '3' end_bracket: ) statement_terminator: ; - statement: set_segment: keyword: SET parameter: '@deadlock_var' assignment_operator: raw_comparison_operator: '=' expression: quoted_literal: "N'LOW'" statement_terminator: ; - statement: begin_end_block: - keyword: BEGIN - statement: set_segment: - keyword: SET - keyword: NOCOUNT - keyword: 'ON' - statement: set_segment: - keyword: SET - keyword: DEADLOCK_PRIORITY - keyword: LOW - statement: set_segment: - keyword: SET - keyword: DEADLOCK_PRIORITY - keyword: NORMAL - statement: set_segment: - keyword: SET - keyword: DEADLOCK_PRIORITY - keyword: HIGH - statement: set_segment: - keyword: SET - keyword: DEADLOCK_PRIORITY - parameter: '@deadlock_var' - statement: set_segment: - keyword: SET - keyword: DEADLOCK_PRIORITY - numeric_literal: '10' - statement: set_segment: - keyword: SET - keyword: DEADLOCK_PRIORITY - numeric_literal: sign_indicator: '-' numeric_literal: '5' - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - statement: create_table_as_select_statement: - keyword: CREATE - keyword: TABLE - table_reference: hash_identifier: '#TempTest' - table_distribution_index_clause: keyword: WITH bracketed: start_bracket: ( table_distribution_clause: - keyword: DISTRIBUTION - comparison_operator: raw_comparison_operator: '=' - keyword: ROUND_ROBIN comma: ',' table_index_clause: keyword: HEAP end_bracket: ) - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '2' alias_expression: alias_operator: keyword: AS naked_identifier: Two - comma: ',' - select_clause_element: numeric_literal: '3' alias_expression: alias_operator: keyword: AS naked_identifier: Three - comma: ',' - select_clause_element: numeric_literal: '4' alias_expression: alias_operator: keyword: AS naked_identifier: Four end_bracket: ) - keyword: END sqlfluff-3.4.2/test/fixtures/dialects/tsql/synonym.sql000066400000000000000000000005331503426445100232070ustar00rootroot00000000000000-- Create a synonym CREATE SYNONYM my_synonym FOR mytable; -- Create a synonym for a multi-part schema CREATE SYNONYM my_synonym FOR otherdb.dbo.mytable; -- Drop a synonym DROP SYNONYM my_synonym; -- Conditionally drop synonym DROP SYNONYM IF EXISTS my_synonym; -- Conditionally drop synonym with schema DROP SYNONYM IF EXISTS dbo.my_synonym; sqlfluff-3.4.2/test/fixtures/dialects/tsql/synonym.yml000066400000000000000000000033221503426445100232100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5c9c7a9f4248765659055b697db0cc3d3399394303e85f20554b55bf3d83aa6d file: batch: - statement: create_synonym_statement: - keyword: CREATE - keyword: SYNONYM - synonym_reference: naked_identifier: my_synonym - keyword: FOR - object_reference: naked_identifier: mytable - statement_terminator: ; - statement: create_synonym_statement: - keyword: CREATE - keyword: SYNONYM - synonym_reference: naked_identifier: my_synonym - keyword: FOR - object_reference: - naked_identifier: otherdb - dot: . - naked_identifier: dbo - dot: . - naked_identifier: mytable - statement_terminator: ; - statement: drop_synonym_statement: - keyword: DROP - keyword: SYNONYM - synonym_reference: naked_identifier: my_synonym - statement_terminator: ; - statement: drop_synonym_statement: - keyword: DROP - keyword: SYNONYM - keyword: IF - keyword: EXISTS - synonym_reference: naked_identifier: my_synonym - statement_terminator: ; - statement: drop_synonym_statement: - keyword: DROP - keyword: SYNONYM - keyword: IF - keyword: EXISTS - synonym_reference: - naked_identifier: dbo - dot: . - naked_identifier: my_synonym - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/system-variables.sql000066400000000000000000000005371503426445100247710ustar00rootroot00000000000000UPDATE HumanResources.Employee SET JobTitle = N'Executive' WHERE NationalIDNumber = 123456789 IF @@ROWCOUNT = 0 PRINT 'Warning: No rows were updated'; IF @@ERROR = 547 BEGIN PRINT N'A check constraint violation occurred.'; END GO SELECT @@IDENTITY AS 'Identity'; GO PRINT @@TRANCOUNT GO SELECT @@PACK_RECEIVED AS 'Packets Received'; GO sqlfluff-3.4.2/test/fixtures/dialects/tsql/system-variables.yml000066400000000000000000000060601503426445100247700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2157e88d8d683ea28e7a4362d18a121c05a29bc66ca4b8dc02657d787f45d099 file: - batch: - statement: update_statement: keyword: UPDATE table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: Employee set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: JobTitle assignment_operator: raw_comparison_operator: '=' expression: quoted_literal: "N'Executive'" where_clause: keyword: WHERE expression: column_reference: naked_identifier: NationalIDNumber comparison_operator: raw_comparison_operator: '=' numeric_literal: '123456789' - statement: if_then_statement: if_clause: keyword: IF expression: system_variable: '@@ROWCOUNT' comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' statement: print_statement: keyword: PRINT expression: quoted_literal: "'Warning: No rows were updated'" statement_terminator: ; - statement: if_then_statement: if_clause: keyword: IF expression: system_variable: '@@ERROR' comparison_operator: raw_comparison_operator: '=' numeric_literal: '547' statement: begin_end_block: - keyword: BEGIN - statement: print_statement: keyword: PRINT expression: quoted_literal: "N'A check constraint violation occurred.'" statement_terminator: ; - keyword: END - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: system_variable: '@@IDENTITY' alias_expression: alias_operator: keyword: AS quoted_identifier: "'Identity'" statement_terminator: ; - go_statement: keyword: GO - batch: statement: print_statement: keyword: PRINT expression: system_variable: '@@TRANCOUNT' - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: system_variable: '@@PACK_RECEIVED' alias_expression: alias_operator: keyword: AS quoted_identifier: "'Packets Received'" statement_terminator: ; - go_statement: keyword: GO sqlfluff-3.4.2/test/fixtures/dialects/tsql/table_object_references.sql000066400000000000000000000002211503426445100263030ustar00rootroot00000000000000select column_1 from ."#my_table" select column_1 from .[#my_table]; select column_1 from ..[#my_table]; select column_1 from ...[#my_table]; sqlfluff-3.4.2/test/fixtures/dialects/tsql/table_object_references.yml000066400000000000000000000045141503426445100263160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2279ed87a2eaac228ed9fca50f2829ef6989c0a3170357ee97c640507a3356a5 file: batch: - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: column_1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: leading_dot: . quoted_identifier: '"#my_table"' - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: column_1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: leading_dot: . quoted_identifier: '[#my_table]' statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: column_1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: leading_dot: . dot: . quoted_identifier: '[#my_table]' statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: column_1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - leading_dot: . - dot: . - dot: . - quoted_identifier: '[#my_table]' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/table_variables.sql000066400000000000000000000000671503426445100246140ustar00rootroot00000000000000declare @queue table ( id int, url nvarchar(100) ) sqlfluff-3.4.2/test/fixtures/dialects/tsql/table_variables.yml000066400000000000000000000020541503426445100246140ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 862b2714c5dba3bc89677f4191a6c8ab1c6135281db59a5ba2e5b9183b80c83c file: batch: statement: declare_segment: - keyword: declare - parameter: '@queue' - keyword: table - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: int - comma: ',' - column_definition: naked_identifier: url data_type: data_type_identifier: nvarchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/tsql/tablesample.sql000066400000000000000000000003601503426445100237620ustar00rootroot00000000000000SELECT * FROM Sales.Customer TABLESAMPLE SYSTEM (10 PERCENT); SELECT * FROM Sales.Customer TABLESAMPLE (10 ROWS); SELECT * FROM Sales.Customer TABLESAMPLE (10); SELECT * FROM Sales.Customer TABLESAMPLE SYSTEM (10 ROWS) REPEATABLE (100); sqlfluff-3.4.2/test/fixtures/dialects/tsql/tablesample.yml000066400000000000000000000071621503426445100237730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d871c98295ef570f2ce25f7f7b737c474e7cee35204d5e3e0143b5cbfb93f541 file: batch: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: Customer sample_expression: - keyword: TABLESAMPLE - keyword: SYSTEM - bracketed: start_bracket: ( numeric_literal: '10' keyword: PERCENT end_bracket: ) statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: Customer sample_expression: keyword: TABLESAMPLE bracketed: start_bracket: ( numeric_literal: '10' keyword: ROWS end_bracket: ) statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: Customer sample_expression: keyword: TABLESAMPLE bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: Customer sample_expression: - keyword: TABLESAMPLE - keyword: SYSTEM - bracketed: start_bracket: ( numeric_literal: '10' keyword: ROWS end_bracket: ) - keyword: REPEATABLE - bracketed: start_bracket: ( numeric_literal: '100' end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/temp_tables.sql000066400000000000000000000001051503426445100237650ustar00rootroot00000000000000SELECT a ,b ,c FROM #UnionA; Select d ,e ,f FROM ##UnionB; sqlfluff-3.4.2/test/fixtures/dialects/tsql/temp_tables.yml000066400000000000000000000033301503426445100237720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 29d56dbe3d227788a944c46dc2fd1e4277b8b54abf05e66bd1caf23e8df20212 file: batch: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: hash_identifier: '#UnionA' statement_terminator: ; - statement: select_statement: select_clause: - keyword: Select - select_clause_element: column_reference: naked_identifier: d - comma: ',' - select_clause_element: column_reference: naked_identifier: e - comma: ',' - select_clause_element: column_reference: naked_identifier: f from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: hash_identifier: '##UnionB' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/temporal_tables.sql000066400000000000000000000073521503426445100246560ustar00rootroot00000000000000-- Select Query Temporal Tables SELECT * FROM Employee FOR SYSTEM_TIME BETWEEN '2021-01-01 00:00:00.0000000' AND '2022-01-01 00:00:00.0000000'; SELECT * FROM Employee FOR SYSTEM_TIME ALL; SELECT * FROM Employee FOR SYSTEM_TIME FROM '2021-01-01 00:00:00.0000000' TO '2022-01-01 00:00:00.0000000'; SELECT * FROM Employee FOR SYSTEM_TIME AS OF '2021-01-01 00:00:00.0000000'; SELECT * FROM Employee FOR SYSTEM_TIME CONTAINED IN ('2021-01-01 00:00:00.0000000', '2022-01-01 00:00:00.0000000'); DECLARE @StartTime DATETIME2 = '2021-01-01 00:00:00'; DECLARE @EndTime DATETIME2 = '2022-01-01 00:00:00'; SELECT * FROM Employee FOR SYSTEM_TIME FROM @StartTime TO @EndTime; DECLARE @PointInTime DATETIME2 = '2021-01-01 00:00:00'; SELECT * FROM Employee FOR SYSTEM_TIME AS OF @PointInTime; -- Create Temporal Tables CREATE TABLE [dbo].[EC DC] ( [Column B] [varchar](100), [ColumnC] varchar(100), [ColumnDecimal] decimal(10,3) ) WITH (SYSTEM_VERSIONING = ON (HISTORY_TABLE = dbo.EmployeeHistory), DURABILITY = SCHEMA_ONLY ); ; GO -- https://learn.microsoft.com/en-us/sql/relational-databases/tables/creating-a-system-versioned-temporal-table?view=sql-server-ver16#creating-a-temporal-table-with-a-default-history-table CREATE TABLE Department ( DeptID INT NOT NULL PRIMARY KEY CLUSTERED , DeptName VARCHAR(50) NOT NULL , ManagerID INT NULL , ParentDeptID INT NULL , ValidFrom DATETIME2 GENERATED ALWAYS AS ROW START NOT NULL , ValidTo DATETIME2 GENERATED ALWAYS AS ROW END NOT NULL , PERIOD FOR SYSTEM_TIME (ValidFrom, ValidTo) ) WITH (SYSTEM_VERSIONING = ON (HISTORY_TABLE = dbo.DepartmentHistory)) ; GO CREATE TABLE [dbo].[EC DC] ( [Column B] [varchar](100), [ColumnC] varchar(100), [ColumnDecimal] decimal(10,3) ) WITH (FILETABLE_PRIMARY_KEY_CONSTRAINT_NAME = COLUMNC ); ; GO CREATE TABLE [dbo].[EC DC] ( [Column B] [varchar](100), [ColumnC] varchar(100), [ColumnDecimal] decimal(10,3) ) WITH (DATA_DELETION = ON (FILTER_COLUMN = ColumnC, RETENTION_PERIOD = INFINITE)); ; GO CREATE TABLE [dbo].[EC DC] ( [Column B] [varchar](100), [ColumnC] varchar(100), [ColumnDecimal] decimal(10,3) ) WITH ( MEMORY_OPTIMIZED = ON, DURABILITY = SCHEMA_AND_DATA, SYSTEM_VERSIONING = ON (HISTORY_TABLE = History.DepartmentHistory) ); GO CREATE TABLE [dbo].[EC DC] ( [Column B] [varchar](100), [ColumnC] varchar(100), [ColumnDecimal] decimal(10,3) ) WITH ( REMOTE_DATA_ARCHIVE = OFF ( MIGRATION_STATE = PAUSED ), LEDGER = ON (LEDGER_VIEW = dbo.ABC (TRANSACTION_ID_COLUMN_NAME = [ColumnC], SEQUENCE_NUMBER_COLUMN_NAME = [ColumnDecimal])) ); GO CREATE TABLE [dbo].[EC DC] ( [Column B] [varchar](100), [ColumnC] varchar(100), [ColumnDecimal] decimal(10,3) ) WITH ( DATA_COMPRESSION = ROW XML_COMPRESSION = ON ON PARTITIONS (2) ); GO CREATE TABLE [dbo].[EC DC] ( [Column B] [varchar](100), [ColumnC] varchar(100), [ColumnDecimal] decimal(10,3) ) WITH ( DATA_COMPRESSION = PAGE ON PARTITIONS (3, 5) XML_COMPRESSION = OFF ); GO CREATE TABLE [dbo].[EC DC] ( [Column B] [varchar](100), [ColumnC] varchar(100), [ColumnDecimal] decimal(10,3) ) WITH ( XML_COMPRESSION = ON ON PARTITIONS (3 TO 5), FILETABLE_DIRECTORY = '/path1/path2', FILETABLE_COLLATE_FILENAME = constraint1, FILETABLE_PRIMARY_KEY_CONSTRAINT_NAME = constraint2, FILETABLE_STREAMID_UNIQUE_CONSTRAINT_NAME = constraint3, FILETABLE_FULLPATH_UNIQUE_CONSTRAINT_NAME = constraint4 ); GO CREATE TABLE [dbo].[EC DC] ( [Column B] [varchar](100), [ColumnC] varchar(100), [ColumnDecimal] decimal(10,3) ) WITH ( REMOTE_DATA_ARCHIVE = ON ( FILTER_PREDICATE = NULL, MIGRATION_STATE = OUTBOUND), LEDGER = ON (LEDGER_VIEW = dbo.ABC, APPEND_ONLY = ON) ); GO sqlfluff-3.4.2/test/fixtures/dialects/tsql/temporal_tables.yml000066400000000000000000000767631503426445100246740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c6b1721989e1c1a27945606b69a0849014796d39f869e76969243459b0a0d603 file: - batch: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Employee temporal_query: - keyword: FOR - keyword: SYSTEM_TIME - keyword: BETWEEN - quoted_literal: "'2021-01-01 00:00:00.0000000'" - keyword: AND - quoted_literal: "'2022-01-01 00:00:00.0000000'" statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Employee temporal_query: - keyword: FOR - keyword: SYSTEM_TIME - keyword: ALL statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Employee temporal_query: - keyword: FOR - keyword: SYSTEM_TIME - keyword: FROM - quoted_literal: "'2021-01-01 00:00:00.0000000'" - keyword: TO - quoted_literal: "'2022-01-01 00:00:00.0000000'" statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Employee temporal_query: - keyword: FOR - keyword: SYSTEM_TIME - keyword: AS - keyword: OF - quoted_literal: "'2021-01-01 00:00:00.0000000'" statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Employee temporal_query: - keyword: FOR - keyword: SYSTEM_TIME - keyword: CONTAINED - keyword: IN - bracketed: - start_bracket: ( - quoted_literal: "'2021-01-01 00:00:00.0000000'" - comma: ',' - quoted_literal: "'2022-01-01 00:00:00.0000000'" - end_bracket: ) statement_terminator: ; - statement: declare_segment: keyword: DECLARE parameter: '@StartTime' data_type: data_type_identifier: DATETIME2 comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'2021-01-01 00:00:00'" statement_terminator: ; - statement: declare_segment: keyword: DECLARE parameter: '@EndTime' data_type: data_type_identifier: DATETIME2 comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'2022-01-01 00:00:00'" statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Employee temporal_query: - keyword: FOR - keyword: SYSTEM_TIME - keyword: FROM - parameter: '@StartTime' - keyword: TO - parameter: '@EndTime' statement_terminator: ; - statement: declare_segment: keyword: DECLARE parameter: '@PointInTime' data_type: data_type_identifier: DATETIME2 comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'2021-01-01 00:00:00'" statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Employee temporal_query: - keyword: FOR - keyword: SYSTEM_TIME - keyword: AS - keyword: OF - parameter: '@PointInTime' statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '[Column B]' data_type: data_type_identifier: '[varchar]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnC]' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnDecimal]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - end_bracket: ) - table_option_statement: keyword: WITH bracketed: - start_bracket: ( - keyword: SYSTEM_VERSIONING - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - bracketed: start_bracket: ( keyword: HISTORY_TABLE comparison_operator: raw_comparison_operator: '=' table_reference: - naked_identifier: dbo - dot: . - naked_identifier: EmployeeHistory end_bracket: ) - comma: ',' - keyword: DURABILITY - comparison_operator: raw_comparison_operator: '=' - keyword: SCHEMA_ONLY - end_bracket: ) - statement_terminator: ; - statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: Department - bracketed: - start_bracket: ( - column_definition: - naked_identifier: DeptID - data_type: data_type_identifier: INT - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - keyword: CLUSTERED - comma: ',' - column_definition: naked_identifier: DeptName data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '50' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: ManagerID data_type: data_type_identifier: INT column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: ParentDeptID data_type: data_type_identifier: INT column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: - naked_identifier: ValidFrom - data_type: data_type_identifier: DATETIME2 - column_constraint_segment: - keyword: GENERATED - keyword: ALWAYS - keyword: AS - keyword: ROW - keyword: START - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: - naked_identifier: ValidTo - data_type: data_type_identifier: DATETIME2 - column_constraint_segment: - keyword: GENERATED - keyword: ALWAYS - keyword: AS - keyword: ROW - keyword: END - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - period_segment: - keyword: PERIOD - keyword: FOR - keyword: SYSTEM_TIME - bracketed: - start_bracket: ( - column_reference: naked_identifier: ValidFrom - comma: ',' - column_reference: naked_identifier: ValidTo - end_bracket: ) - end_bracket: ) - table_option_statement: keyword: WITH bracketed: - start_bracket: ( - keyword: SYSTEM_VERSIONING - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - bracketed: start_bracket: ( keyword: HISTORY_TABLE comparison_operator: raw_comparison_operator: '=' table_reference: - naked_identifier: dbo - dot: . - naked_identifier: DepartmentHistory end_bracket: ) - end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '[Column B]' data_type: data_type_identifier: '[varchar]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnC]' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnDecimal]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - end_bracket: ) - table_option_statement: keyword: WITH bracketed: start_bracket: ( keyword: FILETABLE_PRIMARY_KEY_CONSTRAINT_NAME comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: COLUMNC end_bracket: ) - statement_terminator: ; statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '[Column B]' data_type: data_type_identifier: '[varchar]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnC]' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnDecimal]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - end_bracket: ) - table_option_statement: keyword: WITH bracketed: - start_bracket: ( - keyword: DATA_DELETION - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - bracketed: - start_bracket: ( - keyword: FILTER_COLUMN - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: ColumnC - comma: ',' - keyword: RETENTION_PERIOD - comparison_operator: raw_comparison_operator: '=' - date_part: INFINITE - end_bracket: ) - end_bracket: ) - statement_terminator: ; statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '[Column B]' data_type: data_type_identifier: '[varchar]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnC]' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnDecimal]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - end_bracket: ) - table_option_statement: keyword: WITH bracketed: - start_bracket: ( - keyword: MEMORY_OPTIMIZED - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - comma: ',' - keyword: DURABILITY - comparison_operator: raw_comparison_operator: '=' - keyword: SCHEMA_AND_DATA - comma: ',' - keyword: SYSTEM_VERSIONING - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - bracketed: start_bracket: ( keyword: HISTORY_TABLE comparison_operator: raw_comparison_operator: '=' table_reference: - naked_identifier: History - dot: . - naked_identifier: DepartmentHistory end_bracket: ) - end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '[Column B]' data_type: data_type_identifier: '[varchar]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnC]' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnDecimal]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - end_bracket: ) - table_option_statement: keyword: WITH bracketed: - start_bracket: ( - keyword: REMOTE_DATA_ARCHIVE - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - bracketed: - start_bracket: ( - keyword: MIGRATION_STATE - comparison_operator: raw_comparison_operator: '=' - keyword: PAUSED - end_bracket: ) - comma: ',' - keyword: LEDGER - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - bracketed: start_bracket: ( keyword: LEDGER_VIEW comparison_operator: raw_comparison_operator: '=' table_reference: - naked_identifier: dbo - dot: . - naked_identifier: ABC bracketed: - start_bracket: ( - keyword: TRANSACTION_ID_COLUMN_NAME - comparison_operator: raw_comparison_operator: '=' - column_reference: quoted_identifier: '[ColumnC]' - comma: ',' - keyword: SEQUENCE_NUMBER_COLUMN_NAME - comparison_operator: raw_comparison_operator: '=' - column_reference: quoted_identifier: '[ColumnDecimal]' - end_bracket: ) end_bracket: ) - end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '[Column B]' data_type: data_type_identifier: '[varchar]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnC]' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnDecimal]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - end_bracket: ) - table_option_statement: keyword: WITH bracketed: - start_bracket: ( - keyword: DATA_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - keyword: ROW - keyword: XML_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - keyword: 'ON' - keyword: PARTITIONS - bracketed: start_bracket: ( numeric_literal: '2' end_bracket: ) - end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '[Column B]' data_type: data_type_identifier: '[varchar]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnC]' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnDecimal]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - end_bracket: ) - table_option_statement: keyword: WITH bracketed: - start_bracket: ( - keyword: DATA_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - keyword: PAGE - keyword: 'ON' - keyword: PARTITIONS - bracketed: - start_bracket: ( - numeric_literal: '3' - comma: ',' - numeric_literal: '5' - end_bracket: ) - keyword: XML_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '[Column B]' data_type: data_type_identifier: '[varchar]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnC]' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnDecimal]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - end_bracket: ) - table_option_statement: keyword: WITH bracketed: - start_bracket: ( - keyword: XML_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - keyword: 'ON' - keyword: PARTITIONS - bracketed: - start_bracket: ( - numeric_literal: '3' - keyword: TO - numeric_literal: '5' - end_bracket: ) - comma: ',' - keyword: FILETABLE_DIRECTORY - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'/path1/path2'" - comma: ',' - keyword: FILETABLE_COLLATE_FILENAME - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: constraint1 - comma: ',' - keyword: FILETABLE_PRIMARY_KEY_CONSTRAINT_NAME - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: constraint2 - comma: ',' - keyword: FILETABLE_STREAMID_UNIQUE_CONSTRAINT_NAME - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: constraint3 - comma: ',' - keyword: FILETABLE_FULLPATH_UNIQUE_CONSTRAINT_NAME - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: constraint4 - end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '[Column B]' data_type: data_type_identifier: '[varchar]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnC]' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnDecimal]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - end_bracket: ) - table_option_statement: keyword: WITH bracketed: - start_bracket: ( - keyword: REMOTE_DATA_ARCHIVE - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - bracketed: - start_bracket: ( - keyword: FILTER_PREDICATE - comparison_operator: raw_comparison_operator: '=' - keyword: 'NULL' - comma: ',' - keyword: MIGRATION_STATE - comparison_operator: raw_comparison_operator: '=' - keyword: OUTBOUND - end_bracket: ) - comma: ',' - keyword: LEDGER - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - bracketed: - start_bracket: ( - keyword: LEDGER_VIEW - comparison_operator: raw_comparison_operator: '=' - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: ABC - comma: ',' - keyword: APPEND_ONLY - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) - end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO sqlfluff-3.4.2/test/fixtures/dialects/tsql/transaction.sql000066400000000000000000000004541503426445100240220ustar00rootroot00000000000000BEGIN TRANSACTION; DELETE FROM HumanResources.JobCandidate WHERE JobCandidateID = 13; COMMIT; BEGIN TRAN; DELETE FROM HumanResources.JobCandidate WHERE JobCandidateID = 13; ROLLBACK TRAN; BEGIN TRAN; SAVE TRANSACTION; BEGIN TRAN namey; ROLLBACK namey; SAVE TRAN @variable; COMMIT @variable; sqlfluff-3.4.2/test/fixtures/dialects/tsql/transaction.yml000066400000000000000000000052031503426445100240210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fd3b8479ff5e72d7996d7e9a7a1b284d2130dd66ea72112371626a50ab94b12c file: batch: - statement: transaction_statement: - keyword: BEGIN - keyword: TRANSACTION - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: JobCandidate - where_clause: keyword: WHERE expression: column_reference: naked_identifier: JobCandidateID comparison_operator: raw_comparison_operator: '=' numeric_literal: '13' - statement_terminator: ; - statement: transaction_statement: keyword: COMMIT statement_terminator: ; - statement: transaction_statement: - keyword: BEGIN - keyword: TRAN - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: JobCandidate - where_clause: keyword: WHERE expression: column_reference: naked_identifier: JobCandidateID comparison_operator: raw_comparison_operator: '=' numeric_literal: '13' - statement_terminator: ; - statement: transaction_statement: - keyword: ROLLBACK - keyword: TRAN - statement_terminator: ; - statement: transaction_statement: - keyword: BEGIN - keyword: TRAN - statement_terminator: ; - statement: transaction_statement: - keyword: SAVE - keyword: TRANSACTION - statement_terminator: ; - statement: transaction_statement: - keyword: BEGIN - keyword: TRAN - naked_identifier: namey - statement_terminator: ; - statement: transaction_statement: keyword: ROLLBACK naked_identifier: namey statement_terminator: ; - statement: transaction_statement: - keyword: SAVE - keyword: TRAN - parameter: '@variable' - statement_terminator: ; - statement: transaction_statement: keyword: COMMIT parameter: '@variable' statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/triggers.sql000066400000000000000000000053371503426445100233300ustar00rootroot00000000000000CREATE TRIGGER reminder1 ON Sales.Customer AFTER INSERT, UPDATE AS RAISERROR ('Notify Customer Relations', 16, 10); GO CREATE TRIGGER reminder2 ON Sales.Customer AFTER INSERT, UPDATE, DELETE AS EXEC msdb.dbo.sp_send_dbmail @profile_name = 'AdventureWorks2012 Administrator', @recipients = 'danw@Adventure-Works.com', @body = 'Don''t forget to print a report for the sales force.', @subject = 'Reminder'; GO CREATE TRIGGER Purchasing.LowCredit ON Purchasing.PurchaseOrderHeader AFTER INSERT AS IF (ROWCOUNT_BIG() = 0) RETURN; IF EXISTS (SELECT 1 FROM inserted AS i JOIN Purchasing.Vendor AS v ON v.BusinessEntityID = i.VendorID WHERE v.CreditRating = 5 ) BEGIN RAISERROR ('A vendor''s credit rating is too low to accept new purchase orders.', 16, 1); ROLLBACK TRANSACTION; RETURN END; GO CREATE TRIGGER safety ON DATABASE FOR DROP_SYNONYM AS IF (@@ROWCOUNT = 0) RETURN; RAISERROR ('You must disable Trigger "safety" to remove synonyms!', 10, 1) ROLLBACK GO DROP TRIGGER safety ON DATABASE; GO CREATE TRIGGER ddl_trig_database ON ALL SERVER FOR CREATE_DATABASE AS PRINT 'Database Created.' SELECT 1 GO CREATE TRIGGER ddl_trig_database ON ALL SERVER FOR CREATE_DATABASE AS PRINT 'Database Created.'; SELECT 1 GO DROP TRIGGER ddl_trig_database ON ALL SERVER; GO CREATE TRIGGER connection_limit_trigger ON ALL SERVER WITH EXECUTE AS 'login_test' FOR LOGON AS BEGIN IF ORIGINAL_LOGIN()= 'login_test' AND (SELECT COUNT(*) FROM sys.dm_exec_sessions WHERE is_user_process = 1 AND original_login_name = 'login_test') > 3 ROLLBACK; END; GO Create TRIGGER dbo.tr_SP_BALS_L2_ATTRIBUTES ON dbo.SP_BALS_L2_ATTRIBUTES AFTER UPDATE AS UPDATE dbo.SP_BALS_L2_ATTRIBUTES SET PDW_LAST_UPDATED = Getdate() FROM dbo.SP_BALS_L2_ATTRIBUTES o INNER JOIN Inserted i ON o.PK_L2_BALS = i.PK_L2_BALS go disable trigger dbo.tr_SP_BALS_L2_ATTRIBUTES on dbo.SP_BALS_L2_ATTRIBUTES go Create TRIGGER dbo.tr_u_SP_BALS_L2_ATTRIBUTES ON dbo.SP_BALS_L2_ATTRIBUTES AFTER UPDATE AS UPDATE dbo.SP_BALS_L2_ATTRIBUTES SET PDW_LAST_UPDATED = sysdatetime() FROM dbo.SP_BALS_L2_ATTRIBUTES o INNER JOIN Inserted i ON o.PK_L2_BALS = i.PK_L2_BALS GO DROP TRIGGER employee_insupd; GO DROP TRIGGER safety ON DATABASE; GO disable trigger dbo.tr_u_SP_BALS_L2_ATTRIBUTES on dbo.SP_BALS_L2_ATTRIBUTES GO DISABLE TRIGGER safety ON DATABASE; GO CREATE OR ALTER TRIGGER reminder1 ON Sales.Customer AFTER INSERT, UPDATE AS RAISERROR ('Notify Customer Relations', 16, 10); GO CREATE TRIGGER reminder ON person.address AFTER UPDATE AS IF (UPDATE(stateprovinceid) OR UPDATE(postalcode)) BEGIN RAISERROR (50009, 16, 10) END; GO sqlfluff-3.4.2/test/fixtures/dialects/tsql/triggers.yml000066400000000000000000000566261503426445100233410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 96633a795ee98a2b3cd19c5e41d5414cc4903d923a633eb661385fe896b4bd91 file: - batch: statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: reminder1 - keyword: 'ON' - table_reference: - naked_identifier: Sales - dot: . - naked_identifier: Customer - keyword: AFTER - keyword: INSERT - comma: ',' - keyword: UPDATE - keyword: AS - statement: raiserror_statement: keyword: RAISERROR bracketed: - start_bracket: ( - quoted_literal: "'Notify Customer Relations'" - comma: ',' - numeric_literal: '16' - comma: ',' - numeric_literal: '10' - end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: reminder2 - keyword: 'ON' - table_reference: - naked_identifier: Sales - dot: . - naked_identifier: Customer - keyword: AFTER - keyword: INSERT - comma: ',' - keyword: UPDATE - comma: ',' - keyword: DELETE - keyword: AS - statement: execute_script_statement: - keyword: EXEC - object_reference: - naked_identifier: msdb - dot: . - naked_identifier: dbo - dot: . - naked_identifier: sp_send_dbmail - parameter: '@profile_name' - comparison_operator: raw_comparison_operator: '=' - expression: quoted_literal: "'AdventureWorks2012 Administrator'" - comma: ',' - parameter: '@recipients' - comparison_operator: raw_comparison_operator: '=' - expression: quoted_literal: "'danw@Adventure-Works.com'" - comma: ',' - parameter: '@body' - comparison_operator: raw_comparison_operator: '=' - expression: quoted_literal: "'Don''t forget to print a report for the sales force.'" - comma: ',' - parameter: '@subject' - comparison_operator: raw_comparison_operator: '=' - expression: quoted_literal: "'Reminder'" - statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: - naked_identifier: Purchasing - dot: . - naked_identifier: LowCredit - keyword: 'ON' - table_reference: - naked_identifier: Purchasing - dot: . - naked_identifier: PurchaseOrderHeader - keyword: AFTER - keyword: INSERT - keyword: AS - statement: if_then_statement: if_clause: keyword: IF expression: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ROWCOUNT_BIG function_contents: bracketed: start_bracket: ( end_bracket: ) comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' end_bracket: ) statement: return_segment: keyword: RETURN statement_terminator: ; - statement: if_then_statement: if_clause: keyword: IF expression: keyword: EXISTS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: inserted alias_expression: alias_operator: keyword: AS naked_identifier: i join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: - naked_identifier: Purchasing - dot: . - naked_identifier: Vendor alias_expression: alias_operator: keyword: AS naked_identifier: v join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: v - dot: . - naked_identifier: BusinessEntityID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: i - dot: . - naked_identifier: VendorID where_clause: keyword: WHERE expression: column_reference: - naked_identifier: v - dot: . - naked_identifier: CreditRating comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' end_bracket: ) statement: begin_end_block: - keyword: BEGIN - statement: raiserror_statement: keyword: RAISERROR bracketed: - start_bracket: ( - quoted_literal: "'A vendor''s credit rating is too low to accept\ \ new\npurchase orders.'" - comma: ',' - numeric_literal: '16' - comma: ',' - numeric_literal: '1' - end_bracket: ) - statement_terminator: ; - statement: transaction_statement: - keyword: ROLLBACK - keyword: TRANSACTION - statement_terminator: ; - statement: return_segment: keyword: RETURN - keyword: END statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: safety - keyword: 'ON' - keyword: DATABASE - keyword: FOR - naked_identifier: DROP_SYNONYM - keyword: AS - statement: if_then_statement: if_clause: keyword: IF expression: bracketed: start_bracket: ( expression: system_variable: '@@ROWCOUNT' comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' end_bracket: ) statement: return_segment: keyword: RETURN statement_terminator: ; - statement: raiserror_statement: keyword: RAISERROR bracketed: - start_bracket: ( - quoted_literal: "'You must disable Trigger \"safety\" to remove synonyms!'" - comma: ',' - numeric_literal: '10' - comma: ',' - numeric_literal: '1' - end_bracket: ) - statement: transaction_statement: keyword: ROLLBACK - go_statement: keyword: GO - batch: statement: drop_trigger: - keyword: DROP - keyword: TRIGGER - trigger_reference: naked_identifier: safety - keyword: 'ON' - keyword: DATABASE statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: ddl_trig_database - keyword: 'ON' - keyword: ALL - keyword: SERVER - keyword: FOR - naked_identifier: CREATE_DATABASE - keyword: AS - statement: print_statement: keyword: PRINT expression: quoted_literal: "'Database Created.'" - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - go_statement: keyword: GO - batch: statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: ddl_trig_database - keyword: 'ON' - keyword: ALL - keyword: SERVER - keyword: FOR - naked_identifier: CREATE_DATABASE - keyword: AS - statement: print_statement: keyword: PRINT expression: quoted_literal: "'Database Created.'" statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - go_statement: keyword: GO - batch: statement: drop_trigger: - keyword: DROP - keyword: TRIGGER - trigger_reference: naked_identifier: ddl_trig_database - keyword: 'ON' - keyword: ALL - keyword: SERVER statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: connection_limit_trigger - keyword: 'ON' - keyword: ALL - keyword: SERVER - keyword: WITH - execute_as_clause: - keyword: EXECUTE - keyword: AS - quoted_identifier: "'login_test'" - keyword: FOR - naked_identifier: LOGON - keyword: AS - statement: begin_end_block: - keyword: BEGIN - statement: if_then_statement: if_clause: keyword: IF expression: - function: function_name: function_name_identifier: ORIGINAL_LOGIN function_contents: bracketed: start_bracket: ( end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'login_test'" - binary_operator: AND - bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: COUNT function_contents: bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sys - dot: . - naked_identifier: dm_exec_sessions where_clause: keyword: WHERE expression: - column_reference: naked_identifier: is_user_process - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - binary_operator: AND - column_reference: naked_identifier: original_login_name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'login_test'" end_bracket: ) - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '3' statement: transaction_statement: keyword: ROLLBACK statement_terminator: ; - keyword: END - statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_trigger: - keyword: Create - keyword: TRIGGER - trigger_reference: - naked_identifier: dbo - dot: . - naked_identifier: tr_SP_BALS_L2_ATTRIBUTES - keyword: 'ON' - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: SP_BALS_L2_ATTRIBUTES - keyword: AFTER - keyword: UPDATE - keyword: AS - statement: update_statement: keyword: UPDATE table_reference: - naked_identifier: dbo - dot: . - naked_identifier: SP_BALS_L2_ATTRIBUTES set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: PDW_LAST_UPDATED assignment_operator: raw_comparison_operator: '=' expression: function: function_name: function_name_identifier: Getdate function_contents: bracketed: start_bracket: ( end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: SP_BALS_L2_ATTRIBUTES alias_expression: naked_identifier: o join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: Inserted alias_expression: naked_identifier: i - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: o - dot: . - naked_identifier: PK_L2_BALS - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: i - dot: . - naked_identifier: PK_L2_BALS - go_statement: keyword: go - batch: statement: disable_trigger: - keyword: disable - keyword: trigger - trigger_reference: - naked_identifier: dbo - dot: . - naked_identifier: tr_SP_BALS_L2_ATTRIBUTES - keyword: 'on' - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: SP_BALS_L2_ATTRIBUTES - go_statement: keyword: go - batch: statement: create_trigger: - keyword: Create - keyword: TRIGGER - trigger_reference: - naked_identifier: dbo - dot: . - naked_identifier: tr_u_SP_BALS_L2_ATTRIBUTES - keyword: 'ON' - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: SP_BALS_L2_ATTRIBUTES - keyword: AFTER - keyword: UPDATE - keyword: AS - statement: update_statement: keyword: UPDATE table_reference: - naked_identifier: dbo - dot: . - naked_identifier: SP_BALS_L2_ATTRIBUTES set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: PDW_LAST_UPDATED assignment_operator: raw_comparison_operator: '=' expression: function: function_name: function_name_identifier: sysdatetime function_contents: bracketed: start_bracket: ( end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: SP_BALS_L2_ATTRIBUTES alias_expression: naked_identifier: o join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: Inserted alias_expression: naked_identifier: i - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: o - dot: . - naked_identifier: PK_L2_BALS - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: i - dot: . - naked_identifier: PK_L2_BALS - go_statement: keyword: GO - batch: statement: drop_trigger: - keyword: DROP - keyword: TRIGGER - trigger_reference: naked_identifier: employee_insupd statement_terminator: ; - go_statement: keyword: GO - batch: statement: drop_trigger: - keyword: DROP - keyword: TRIGGER - trigger_reference: naked_identifier: safety - keyword: 'ON' - keyword: DATABASE statement_terminator: ; - go_statement: keyword: GO - batch: statement: disable_trigger: - keyword: disable - keyword: trigger - trigger_reference: - naked_identifier: dbo - dot: . - naked_identifier: tr_u_SP_BALS_L2_ATTRIBUTES - keyword: 'on' - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: SP_BALS_L2_ATTRIBUTES - go_statement: keyword: GO - batch: statement: disable_trigger: - keyword: DISABLE - keyword: TRIGGER - trigger_reference: naked_identifier: safety - keyword: 'ON' - keyword: DATABASE statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_trigger: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: TRIGGER - trigger_reference: naked_identifier: reminder1 - keyword: 'ON' - table_reference: - naked_identifier: Sales - dot: . - naked_identifier: Customer - keyword: AFTER - keyword: INSERT - comma: ',' - keyword: UPDATE - keyword: AS - statement: raiserror_statement: keyword: RAISERROR bracketed: - start_bracket: ( - quoted_literal: "'Notify Customer Relations'" - comma: ',' - numeric_literal: '16' - comma: ',' - numeric_literal: '10' - end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: reminder - keyword: 'ON' - table_reference: - naked_identifier: person - dot: . - naked_identifier: address - keyword: AFTER - keyword: UPDATE - keyword: AS - statement: if_then_statement: if_clause: keyword: IF expression: bracketed: start_bracket: ( expression: - function: function_name: function_name_identifier: UPDATE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: stateprovinceid end_bracket: ) - binary_operator: OR - function: function_name: function_name_identifier: UPDATE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: postalcode end_bracket: ) end_bracket: ) statement: begin_end_block: - keyword: BEGIN - statement: raiserror_statement: keyword: RAISERROR bracketed: - start_bracket: ( - numeric_literal: '50009' - comma: ',' - numeric_literal: '16' - comma: ',' - numeric_literal: '10' - end_bracket: ) - keyword: END statement_terminator: ; - go_statement: keyword: GO sqlfluff-3.4.2/test/fixtures/dialects/tsql/try_catch.sql000066400000000000000000000005461503426445100234570ustar00rootroot00000000000000BEGIN TRY -- Table does not exist; object name resolution -- error not caught. SELECT * FROM NonexistentTable; END TRY BEGIN CATCH SELECT ERROR_NUMBER() AS ErrorNumber ,ERROR_MESSAGE() AS ErrorMessage; THROW END CATCH GO THROW 50005, N'an error occurred', 1; BEGIN TRY EXEC spSomeProc END TRY BEGIN CATCH END CATCH; sqlfluff-3.4.2/test/fixtures/dialects/tsql/try_catch.yml000066400000000000000000000056101503426445100234560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 092dacfb7da71f95d1ac27c2db9f0318b02db23cb37afaf0fdb9e4119661175e file: - batch: statement: try_catch: - keyword: BEGIN - keyword: TRY - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: NonexistentTable statement_terminator: ; - keyword: END - keyword: TRY - keyword: BEGIN - keyword: CATCH - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: ERROR_NUMBER function_contents: bracketed: start_bracket: ( end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: ErrorNumber - comma: ',' - select_clause_element: function: function_name: function_name_identifier: ERROR_MESSAGE function_contents: bracketed: start_bracket: ( end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: ErrorMessage statement_terminator: ; - statement: throw_statement: keyword: THROW - keyword: END - keyword: CATCH - go_statement: keyword: GO - batch: - statement: throw_statement: - keyword: THROW - numeric_literal: '50005' - comma: ',' - quoted_literal: "N'an error occurred'" - comma: ',' - numeric_literal: '1' - statement_terminator: ; - statement: try_catch: - keyword: BEGIN - keyword: TRY - statement: execute_script_statement: keyword: EXEC object_reference: naked_identifier: spSomeProc - keyword: END - keyword: TRY - keyword: BEGIN - keyword: CATCH - keyword: END - keyword: CATCH - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/tsql/update.sql000066400000000000000000000003761503426445100227620ustar00rootroot00000000000000update dbo.Cases set [Flg] = 1 where ID in (select distinct [ID] from dbo.CX) OPTION (Label = 'Cases') ; update tt set tt.rn += 1 from table1 as tt join src on tt._id = src._id; UPDATE stuff SET deleted = 1 OUTPUT * INTO trash WHERE useless = 1 sqlfluff-3.4.2/test/fixtures/dialects/tsql/update.yml000066400000000000000000000106101503426445100227540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a1d84202124669863cfc26296b19136a7fb21c2ffb3c1e9bea329f7607e2a170 file: batch: - statement: update_statement: keyword: update table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Cases set_clause_list: keyword: set set_clause: column_reference: quoted_identifier: '[Flg]' assignment_operator: raw_comparison_operator: '=' expression: numeric_literal: '1' where_clause: keyword: where expression: column_reference: naked_identifier: ID keyword: in bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_modifier: keyword: distinct select_clause_element: column_reference: quoted_identifier: '[ID]' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: CX end_bracket: ) option_clause: keyword: OPTION bracketed: start_bracket: ( query_hint_segment: keyword: Label comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Cases'" end_bracket: ) statement_terminator: ; - statement: update_statement: keyword: update table_reference: naked_identifier: tt set_clause_list: keyword: set set_clause: column_reference: - naked_identifier: tt - dot: . - naked_identifier: rn assignment_operator: binary_operator: + raw_comparison_operator: '=' expression: numeric_literal: '1' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 alias_expression: alias_operator: keyword: as naked_identifier: tt join_clause: keyword: join from_expression_element: table_expression: table_reference: naked_identifier: src join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: tt - dot: . - naked_identifier: _id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: src - dot: . - naked_identifier: _id statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: stuff set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: deleted assignment_operator: raw_comparison_operator: '=' expression: numeric_literal: '1' output_clause: - keyword: OUTPUT - wildcard_expression: wildcard_identifier: star: '*' - keyword: INTO - table_reference: naked_identifier: trash where_clause: keyword: WHERE expression: column_reference: naked_identifier: useless comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' sqlfluff-3.4.2/test/fixtures/dialects/tsql/waitfor.sql000066400000000000000000000003471503426445100231510ustar00rootroot00000000000000EXECUTE sp_add_job @job_name = 'TestJob'; BEGIN WAITFOR TIME '22:20'; EXECUTE sp_update_job @job_name = 'TestJob', @new_name = 'UpdatedJob'; END; GO BEGIN WAITFOR DELAY '02:00'; EXECUTE sp_helpdb; END; GO sqlfluff-3.4.2/test/fixtures/dialects/tsql/waitfor.yml000066400000000000000000000042151503426445100231510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 02172d4fd48d357bd7583d06bcbbfb00ef906891d5ae5df93dd5c293175dad73 file: - batch: - statement: execute_script_statement: keyword: EXECUTE object_reference: naked_identifier: sp_add_job parameter: '@job_name' comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'TestJob'" statement_terminator: ; - statement: begin_end_block: - keyword: BEGIN - statement: waitfor_statement: - keyword: WAITFOR - keyword: TIME - expression: quoted_literal: "'22:20'" - statement_terminator: ; - statement: execute_script_statement: - keyword: EXECUTE - object_reference: naked_identifier: sp_update_job - parameter: '@job_name' - comparison_operator: raw_comparison_operator: '=' - expression: quoted_literal: "'TestJob'" - comma: ',' - parameter: '@new_name' - comparison_operator: raw_comparison_operator: '=' - expression: quoted_literal: "'UpdatedJob'" - statement_terminator: ; - keyword: END - statement_terminator: ; - go_statement: keyword: GO - batch: statement: begin_end_block: - keyword: BEGIN - statement: waitfor_statement: - keyword: WAITFOR - keyword: DELAY - expression: quoted_literal: "'02:00'" - statement_terminator: ; - statement: execute_script_statement: keyword: EXECUTE object_reference: naked_identifier: sp_helpdb statement_terminator: ; - keyword: END statement_terminator: ; - go_statement: keyword: GO sqlfluff-3.4.2/test/fixtures/dialects/tsql/while_statement.sql000066400000000000000000000004441503426445100246700ustar00rootroot00000000000000WHILE (1=1) BEGIN IF EXISTS (SELECT * FROM ##MyTempTable WHERE EventCode = 'Done') BEGIN BREAK; -- 'Done' row has finally been inserted and detected, so end this loop. END PRINT N'The other process is not yet done.'; -- Re-confirm the non-done status to the console. END sqlfluff-3.4.2/test/fixtures/dialects/tsql/while_statement.yml000066400000000000000000000051441503426445100246740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8abf6cf853a6b3e42fcef7ddf3e8c207f72aa1e176555baf9a04efadc50063c6 file: batch: statement: while_statement: keyword: WHILE expression: bracketed: start_bracket: ( expression: - numeric_literal: '1' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' end_bracket: ) statement: begin_end_block: - keyword: BEGIN - statement: if_then_statement: if_clause: keyword: IF expression: keyword: EXISTS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: hash_identifier: '##MyTempTable' where_clause: keyword: WHERE expression: column_reference: naked_identifier: EventCode comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Done'" end_bracket: ) statement: begin_end_block: - keyword: BEGIN - statement: break_statement: keyword: BREAK - statement_terminator: ; - keyword: END - statement: print_statement: keyword: PRINT expression: quoted_literal: "N'The other process is not yet done.'" statement_terminator: ; - keyword: END sqlfluff-3.4.2/test/fixtures/dialects/tsql/window_functions.sql000066400000000000000000000014251503426445100250730ustar00rootroot00000000000000-- Classical partition/order by SELECT ROW_NUMBER() OVER(PARTITION BY t.col1 ORDER BY t.col2) rn FROM mytable t; -- Partition by constant SELECT ROW_NUMBER() OVER(PARTITION BY 1 ORDER BY t.col2) rn FROM mytable t; -- Partition by expression SELECT ROW_NUMBER() OVER(PARTITION BY CASE WHEN t.col1 = 'value' THEN 1 END ORDER BY t.col2) rn FROM mytable t; -- Partition by expression and column SELECT ROW_NUMBER() OVER(PARTITION BY t.col3, CASE WHEN t.col1 = 'value' THEN 1 END, t.col4 ORDER BY t.col2) rn FROM mytable t; -- Partition by select statement SELECT ROW_NUMBER() OVER(PARTITION BY (SELECT col1 FROM othertable) ORDER BY t.col2) rn FROM mytable t; -- Partition by aggregate SELECT ROW_NUMBER() OVER(PARTITION BY SUM(t.col1) ORDER BY t.col2) rn FROM mytable t GROUP BY t.col2; sqlfluff-3.4.2/test/fixtures/dialects/tsql/window_functions.yml000066400000000000000000000272311503426445100251000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bbb3c4c89f2e59ba5265dee316396e1ec222dd96735873bb59313661160a9936 file: batch: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: keyword: ROW_NUMBER function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - column_reference: - naked_identifier: t - dot: . - naked_identifier: col1 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: - naked_identifier: t - dot: . - naked_identifier: col2 end_bracket: ) alias_expression: naked_identifier: rn from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable alias_expression: naked_identifier: t statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: keyword: ROW_NUMBER function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: numeric_literal: '1' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: - naked_identifier: t - dot: . - naked_identifier: col2 end_bracket: ) alias_expression: naked_identifier: rn from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable alias_expression: naked_identifier: t statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: keyword: ROW_NUMBER function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: column_reference: - naked_identifier: t - dot: . - naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'value'" - keyword: THEN - expression: numeric_literal: '1' - keyword: END orderby_clause: - keyword: ORDER - keyword: BY - column_reference: - naked_identifier: t - dot: . - naked_identifier: col2 end_bracket: ) alias_expression: naked_identifier: rn from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable alias_expression: naked_identifier: t statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: keyword: ROW_NUMBER function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - column_reference: - naked_identifier: t - dot: . - naked_identifier: col3 - comma: ',' - expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: column_reference: - naked_identifier: t - dot: . - naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'value'" - keyword: THEN - expression: numeric_literal: '1' - keyword: END - comma: ',' - column_reference: - naked_identifier: t - dot: . - naked_identifier: col4 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: - naked_identifier: t - dot: . - naked_identifier: col2 end_bracket: ) alias_expression: naked_identifier: rn from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable alias_expression: naked_identifier: t statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: keyword: ROW_NUMBER function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: othertable end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: - naked_identifier: t - dot: . - naked_identifier: col2 end_bracket: ) alias_expression: naked_identifier: rn from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable alias_expression: naked_identifier: t statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: keyword: ROW_NUMBER function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: t - dot: . - naked_identifier: col1 end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: - naked_identifier: t - dot: . - naked_identifier: col2 end_bracket: ) alias_expression: naked_identifier: rn from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable alias_expression: naked_identifier: t groupby_clause: - keyword: GROUP - keyword: BY - column_reference: - naked_identifier: t - dot: . - naked_identifier: col2 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/vertica/000077500000000000000000000000001503426445100214235ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/dialects/vertica/.sqlfluff000066400000000000000000000000351503426445100232440ustar00rootroot00000000000000[sqlfluff] dialect = vertica sqlfluff-3.4.2/test/fixtures/dialects/vertica/alter_schema.sql000066400000000000000000000004351503426445100245750ustar00rootroot00000000000000-- https://docs.vertica.com/latest/en/sql-reference/statements/alter-statements/alter-schema/#examples ALTER SCHEMA ms OWNER TO dbadmin CASCADE; ALTER SCHEMA s1, s2 RENAME TO s3, s4; ALTER SCHEMA s1 DEFAULT INCLUDE SCHEMA PRIVILEGES; ALTER SCHEMA s1 DEFAULT EXCLUDE SCHEMA PRIVILEGES; sqlfluff-3.4.2/test/fixtures/dialects/vertica/alter_schema.yml000066400000000000000000000031771503426445100246050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e5bbe0352bad903acf977be524ef15ccd8464a03a67b54d46dad5610a49a5012 file: - statement: alter_schema_statement: - keyword: ALTER - keyword: SCHEMA - schema_reference: naked_identifier: ms - keyword: OWNER - keyword: TO - role_reference: naked_identifier: dbadmin - keyword: CASCADE - statement_terminator: ; - statement: alter_schema_statement: - keyword: ALTER - keyword: SCHEMA - schema_reference: naked_identifier: s1 - comma: ',' - schema_reference: naked_identifier: s2 - keyword: RENAME - keyword: TO - schema_reference: naked_identifier: s3 - comma: ',' - schema_reference: naked_identifier: s4 - statement_terminator: ; - statement: alter_schema_statement: - keyword: ALTER - keyword: SCHEMA - schema_reference: naked_identifier: s1 - keyword: DEFAULT - schema_privileges_segment: - keyword: INCLUDE - keyword: SCHEMA - keyword: PRIVILEGES - statement_terminator: ; - statement: alter_schema_statement: - keyword: ALTER - keyword: SCHEMA - schema_reference: naked_identifier: s1 - keyword: DEFAULT - schema_privileges_segment: - keyword: EXCLUDE - keyword: SCHEMA - keyword: PRIVILEGES - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/vertica/alter_table.sql000066400000000000000000000012221503426445100244170ustar00rootroot00000000000000-- https://docs.vertica.com/latest/en/admin/working-with-native-tables/altering-table-definitions/ ALTER TABLE public.store_orders ADD COLUMN expected_ship_date date; ALTER TABLE public.store_orders ADD COLUMN delivery_date date PROJECTIONS (store_orders_p); ALTER TABLE x ALTER COLUMN b DROP DEFAULT; ALTER TABLE t DROP COLUMN y; ALTER TABLE x DROP COLUMN a CASCADE; ALTER TABLE t DROP COLUMN x RESTRICT; ALTER TABLE t DROP x CASCADE; ALTER TABLE public.new_sales ALTER CONSTRAINT C_PRIMARY ENABLED; ALTER TABLE s1.t1, s1.t2 RENAME TO u1, u2; ALTER TABLE t1, t2, temp RENAME TO temp, t1, t2; ALTER TABLE s1.t1 SET SCHEMA s2; ALTER TABLE t33 OWNER TO Alice; sqlfluff-3.4.2/test/fixtures/dialects/vertica/alter_table.yml000066400000000000000000000107251503426445100244310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f6e20a1413d392fdc48e3cc3cad8a49e61b86fa419e3a37282e19a8ff395f417 file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: public - dot: . - naked_identifier: store_orders - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: expected_ship_date - data_type: keyword: date - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: public - dot: . - naked_identifier: store_orders - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: delivery_date - data_type: keyword: date - keyword: PROJECTIONS - bracketed: start_bracket: ( table_reference: naked_identifier: store_orders_p end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: b - keyword: DROP - keyword: DEFAULT - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t - keyword: DROP - keyword: COLUMN - column_reference: naked_identifier: y - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: x - keyword: DROP - keyword: COLUMN - column_reference: naked_identifier: a - keyword: CASCADE - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t - keyword: DROP - keyword: COLUMN - column_reference: naked_identifier: x - keyword: RESTRICT - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t - keyword: DROP - column_reference: naked_identifier: x - keyword: CASCADE - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: public - dot: . - naked_identifier: new_sales - alter_table_action_segment: - keyword: ALTER - keyword: CONSTRAINT - parameter: C_PRIMARY - keyword: ENABLED - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: s1 - dot: . - naked_identifier: t1 - comma: ',' - table_reference: - naked_identifier: s1 - dot: . - naked_identifier: t2 - keyword: RENAME - keyword: TO - table_reference: naked_identifier: u1 - comma: ',' - table_reference: naked_identifier: u2 - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - comma: ',' - table_reference: naked_identifier: t2 - comma: ',' - table_reference: naked_identifier: temp - keyword: RENAME - keyword: TO - table_reference: naked_identifier: temp - comma: ',' - table_reference: naked_identifier: t1 - comma: ',' - table_reference: naked_identifier: t2 - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: s1 - dot: . - naked_identifier: t1 - keyword: SET - keyword: SCHEMA - schema_reference: naked_identifier: s2 - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t33 - alter_table_action_segment: - keyword: OWNER - keyword: TO - parameter: Alice - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/vertica/alter_view.sql000066400000000000000000000004021503426445100243010ustar00rootroot00000000000000-- https://docs.vertica.com/latest/en/sql-reference/statements/alter-statements/alter-view/#examples ALTER VIEW view1, view2 RENAME TO view3, view4; ALTER VIEW view1 OWNER TO Alice; ALTER VIEW view1 SET SCHEMA s1; ALTER VIEW view1 INCLUDE SCHEMA PRIVILEGES; sqlfluff-3.4.2/test/fixtures/dialects/vertica/alter_view.yml000066400000000000000000000027071503426445100243150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1b20745fb40bbeb31afe80876f75688757de19ea92de688b186211d4b1a48287 file: - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: view1 - comma: ',' - table_reference: naked_identifier: view2 - keyword: RENAME - keyword: TO - parameter: view3 - comma: ',' - parameter: view4 - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: view1 - keyword: OWNER - keyword: TO - parameter: Alice - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: view1 - keyword: SET - keyword: SCHEMA - schema_reference: naked_identifier: s1 - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: view1 - schema_privileges_segment: - keyword: INCLUDE - keyword: SCHEMA - keyword: PRIVILEGES - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/vertica/array.sql000066400000000000000000000012631503426445100232640ustar00rootroot00000000000000-- https://docs.vertica.com/latest/en/sql-reference/data-types/complex-types/array/ SELECT ARRAY[1, 2, 3]; SELECT ARRAY[ARRAY[1], ARRAY[2]]; CREATE TABLE sal_emp ( name varchar, pay_by_quarter ARRAY[int], schedule ARRAY[varchar(50)] ); SELECT ARRAY[[1, 2], [3, 4]]; -- Need to add support for complex datatypes -- SELECT ARRAY[row(1, 2), row(1, 3)]; -- SELECT -- name, -- num, -- gpa -- FROM students -- WHERE major = ARRAY[row('Science', 'Physics')]; SELECT (ARRAY['a', 'b', 'c', 'd', 'e'])[1]; SELECT (ARRAY['a', 'b', 'c', 'd', 'e', 'f', 'g'])[1:4]; SELECT (ARRAY[ARRAY[1, 2], ARRAY[3, 4]])[0][0]; SELECT ARRAY[1, 3] IS NULL; SELECT ARRAY[1, 3] <=> NULL; sqlfluff-3.4.2/test/fixtures/dialects/vertica/array.yml000066400000000000000000000202371503426445100232700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bb6533cd54350021938d2ac3266b8503cabd6d23560dad5eb5d90ef5b18f3eda file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - end_square_bracket: ']' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - typed_array_literal: array_type: keyword: ARRAY array_literal: start_square_bracket: '[' numeric_literal: '1' end_square_bracket: ']' - comma: ',' - typed_array_literal: array_type: keyword: ARRAY array_literal: start_square_bracket: '[' numeric_literal: '2' end_square_bracket: ']' - end_square_bracket: ']' - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: sal_emp - bracketed: - start_bracket: ( - column_reference: naked_identifier: name - data_type: keyword: varchar - comma: ',' - column_reference: naked_identifier: pay_by_quarter - data_type: array_type: keyword: ARRAY start_square_bracket: '[' data_type: keyword: int end_square_bracket: ']' - comma: ',' - column_reference: naked_identifier: schedule - data_type: array_type: keyword: ARRAY start_square_bracket: '[' data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '50' end_bracket: ) end_square_bracket: ']' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_square_bracket: ']' - comma: ',' - array_literal: - start_square_bracket: '[' - numeric_literal: '3' - comma: ',' - numeric_literal: '4' - end_square_bracket: ']' - end_square_bracket: ']' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bracketed: start_bracket: ( expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - quoted_literal: "'a'" - comma: ',' - quoted_literal: "'b'" - comma: ',' - quoted_literal: "'c'" - comma: ',' - quoted_literal: "'d'" - comma: ',' - quoted_literal: "'e'" - end_square_bracket: ']' end_bracket: ) array_accessor: start_square_bracket: '[' numeric_literal: '1' end_square_bracket: ']' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bracketed: start_bracket: ( expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - quoted_literal: "'a'" - comma: ',' - quoted_literal: "'b'" - comma: ',' - quoted_literal: "'c'" - comma: ',' - quoted_literal: "'d'" - comma: ',' - quoted_literal: "'e'" - comma: ',' - quoted_literal: "'f'" - comma: ',' - quoted_literal: "'g'" - end_square_bracket: ']' end_bracket: ) array_accessor: - start_square_bracket: '[' - numeric_literal: '1' - slice: ':' - numeric_literal: '4' - end_square_bracket: ']' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - bracketed: start_bracket: ( expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_square_bracket: ']' - comma: ',' - typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '3' - comma: ',' - numeric_literal: '4' - end_square_bracket: ']' - end_square_bracket: ']' end_bracket: ) - array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' - array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '3' - end_square_bracket: ']' keyword: IS null_literal: 'NULL' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '3' - end_square_bracket: ']' comparison_operator: null_equals_operator: <=> null_literal: 'NULL' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/vertica/bare_functions.sql000066400000000000000000000002421503426445100251430ustar00rootroot00000000000000SELECT current_timestamp AS col1, current_time AS col2, current_date AS col3, localtime AS col4, localtimestamp AS col5, sysdate AS col6; sqlfluff-3.4.2/test/fixtures/dialects/vertica/bare_functions.yml000066400000000000000000000032561503426445100251550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ead151d8c2865c8b3d0272cce417e58b557450ff7ef14634a7df12b15d5e7595 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: bare_function: current_timestamp alias_expression: alias_operator: keyword: AS naked_identifier: col1 - comma: ',' - select_clause_element: bare_function: current_time alias_expression: alias_operator: keyword: AS naked_identifier: col2 - comma: ',' - select_clause_element: bare_function: current_date alias_expression: alias_operator: keyword: AS naked_identifier: col3 - comma: ',' - select_clause_element: bare_function: localtime alias_expression: alias_operator: keyword: AS naked_identifier: col4 - comma: ',' - select_clause_element: bare_function: localtimestamp alias_expression: alias_operator: keyword: AS naked_identifier: col5 - comma: ',' - select_clause_element: bare_function: sysdate alias_expression: alias_operator: keyword: AS naked_identifier: col6 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/vertica/cast_with_whitespaces.sql000066400000000000000000000020011503426445100265210ustar00rootroot00000000000000/* Several valid queries where there is whitespace surrounding the Vertica cast operator (::). Copy of Postgres cast_with_whitespaces.sql */ SELECT amount_of_honey :: FLOAT FROM bear_inventory; -- should be able to support an arbitrary amount of whitespace SELECT amount_of_honey :: FLOAT FROM bear_inventory; SELECT amount_of_honey:: FLOAT FROM bear_inventory; SELECT amount_of_honey ::FLOAT FROM bear_inventory; -- should support a wide variety of typecasts SELECT amount_of_honey :: time FROM bear_inventory; SELECT amount_of_honey :: VARCHAR( 512 ) FROM bear_inventory; SELECT amount_of_honey :: TIMESTAMPTZ FROM bear_inventory; SELECT amount_of_honey :: TIMESTAMP WITHOUT TIME ZONE FROM bear_inventory; -- should support casts with arbitrary amount of whitespace in join statements SELECT bi.amount_of_honey FROM bear_inventory bi LEFT JOIN favorite_cola fc ON fc.bear_id :: VARCHAR(512) = bi.bear_id ::VARCHAR(512) WHERE fc.favorite_cola = 'RC Cola'; sqlfluff-3.4.2/test/fixtures/dialects/vertica/cast_with_whitespaces.yml000066400000000000000000000166171503426445100265450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d9497ccebaa52506363624f81e7bf1eb3a3a5dc0b6111bbbd3fbc4c44c861cd2 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: keyword: FLOAT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: keyword: FLOAT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: keyword: FLOAT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: keyword: FLOAT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: keyword: time from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '512' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: data_type_identifier: TIMESTAMPTZ from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: - keyword: TIMESTAMP - keyword: WITHOUT - keyword: TIME - keyword: ZONE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: bi - dot: . - naked_identifier: amount_of_honey from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory alias_expression: naked_identifier: bi join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: favorite_cola alias_expression: naked_identifier: fc - join_on_condition: keyword: 'ON' expression: - cast_expression: column_reference: - naked_identifier: fc - dot: . - naked_identifier: bear_id casting_operator: '::' data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '512' end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - cast_expression: column_reference: - naked_identifier: bi - dot: . - naked_identifier: bear_id casting_operator: '::' data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '512' end_bracket: ) where_clause: keyword: WHERE expression: column_reference: - naked_identifier: fc - dot: . - naked_identifier: favorite_cola comparison_operator: raw_comparison_operator: '=' quoted_literal: "'RC Cola'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/vertica/comment_on.sql000066400000000000000000000037111503426445100243040ustar00rootroot00000000000000-- This test file includes all examples from the Vertica docs, -- but not all are implemented so some are commented out for now. -- See https://docs.vertica.com/latest/en/sql-reference/statements/comment-on-statements/ COMMENT ON AGGREGATE FUNCTION APPROXIMATE_MEDIAN(x FLOAT) IS 'alias of APPROXIMATE_PERCENTILE with 0.5 as its parameter'; COMMENT ON AGGREGATE FUNCTION APPROXIMATE_MEDIAN(x FLOAT) IS NULL; COMMENT ON ANALYTIC FUNCTION an_rank() IS 'built from the AnalyticFunctions library'; COMMENT ON ANALYTIC FUNCTION an_rank() IS NULL; COMMENT ON CONSTRAINT constraint_x ON promotion_dimension IS 'Primary key'; COMMENT ON CONSTRAINT constraint_x ON promotion_dimension IS NULL; COMMENT ON FUNCTION macros.zerowhennull(x INT) IS 'Returns a 0 if not NULL'; COMMENT ON FUNCTION macros.zerowhennull(x INT) IS NULL; COMMENT ON LIBRARY MyFunctions IS 'In development'; COMMENT ON LIBRARY MyFunctions IS NULL; COMMENT ON NODE initiator IS 'Initiator node'; COMMENT ON NODE initiator IS NULL; COMMENT ON PROJECTION customer_dimension_vmart_node01 IS 'Test data'; COMMENT ON PROJECTION customer_dimension_vmart_node01 IS NULL; COMMENT ON COLUMN customer_dimension_vmart_node01.customer_name IS 'Last name only'; COMMENT ON COLUMN customer_dimension_vmart_node01.customer_name IS NULL; COMMENT ON SCHEMA public IS 'All users can access this schema'; COMMENT ON SCHEMA public IS NULL; COMMENT ON SEQUENCE prom_seq IS 'Promotion codes'; COMMENT ON SEQUENCE prom_seq IS NULL; COMMENT ON TABLE promotion_dimension IS '2011 Promotions'; COMMENT ON TABLE promotion_dimension IS NULL; COMMENT ON COLUMN store.store_sales_fact.transaction_time IS 'GMT'; COMMENT ON COLUMN store.store_sales_fact.transaction_time IS NULL; COMMENT ON TRANSFORM FUNCTION macros.zerowhennull(x INT) IS 'Returns a 0 if not NULL'; COMMENT ON TRANSFORM FUNCTION macros.zerowhennull(x INT) IS NULL; COMMENT ON VIEW curr_month_ship IS 'Shipping data for the current month'; COMMENT ON VIEW curr_month_ship IS NULL; sqlfluff-3.4.2/test/fixtures/dialects/vertica/comment_on.yml000066400000000000000000000220711503426445100243060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 855a8a73207f67f4fab7ed3ab54b18272cb39dc59df5580696c3a62e9e879a3d file: - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: AGGREGATE - keyword: FUNCTION - function_name: function_name_identifier: APPROXIMATE_MEDIAN - function_parameter_list: bracketed: start_bracket: ( parameter: x data_type: keyword: FLOAT end_bracket: ) - keyword: IS - quoted_literal: "'alias of APPROXIMATE_PERCENTILE with 0.5 as its parameter'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: AGGREGATE - keyword: FUNCTION - function_name: function_name_identifier: APPROXIMATE_MEDIAN - function_parameter_list: bracketed: start_bracket: ( parameter: x data_type: keyword: FLOAT end_bracket: ) - keyword: IS - keyword: 'NULL' - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: ANALYTIC - keyword: FUNCTION - function_name: function_name_identifier: an_rank - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: IS - quoted_literal: "'built from the AnalyticFunctions library'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: ANALYTIC - keyword: FUNCTION - function_name: function_name_identifier: an_rank - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: IS - keyword: 'NULL' - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: CONSTRAINT - object_reference: naked_identifier: constraint_x - keyword: 'ON' - object_reference: naked_identifier: promotion_dimension - keyword: IS - quoted_literal: "'Primary key'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: CONSTRAINT - object_reference: naked_identifier: constraint_x - keyword: 'ON' - object_reference: naked_identifier: promotion_dimension - keyword: IS - keyword: 'NULL' - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: FUNCTION - function_name: naked_identifier: macros dot: . function_name_identifier: zerowhennull - function_parameter_list: bracketed: start_bracket: ( parameter: x data_type: keyword: INT end_bracket: ) - keyword: IS - quoted_literal: "'Returns a 0 if not NULL'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: FUNCTION - function_name: naked_identifier: macros dot: . function_name_identifier: zerowhennull - function_parameter_list: bracketed: start_bracket: ( parameter: x data_type: keyword: INT end_bracket: ) - keyword: IS - keyword: 'NULL' - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: LIBRARY - object_reference: naked_identifier: MyFunctions - keyword: IS - quoted_literal: "'In development'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: LIBRARY - object_reference: naked_identifier: MyFunctions - keyword: IS - keyword: 'NULL' - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: NODE - parameter: initiator - keyword: IS - quoted_literal: "'Initiator node'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: NODE - parameter: initiator - keyword: IS - keyword: 'NULL' - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: PROJECTION - table_reference: naked_identifier: customer_dimension_vmart_node01 - keyword: IS - quoted_literal: "'Test data'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: PROJECTION - table_reference: naked_identifier: customer_dimension_vmart_node01 - keyword: IS - keyword: 'NULL' - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: COLUMN - column_reference: - naked_identifier: customer_dimension_vmart_node01 - dot: . - naked_identifier: customer_name - keyword: IS - quoted_literal: "'Last name only'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: COLUMN - column_reference: - naked_identifier: customer_dimension_vmart_node01 - dot: . - naked_identifier: customer_name - keyword: IS - keyword: 'NULL' - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: SCHEMA - schema_reference: naked_identifier: public - keyword: IS - quoted_literal: "'All users can access this schema'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: SCHEMA - schema_reference: naked_identifier: public - keyword: IS - keyword: 'NULL' - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: SEQUENCE - object_reference: naked_identifier: prom_seq - keyword: IS - quoted_literal: "'Promotion codes'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: SEQUENCE - object_reference: naked_identifier: prom_seq - keyword: IS - keyword: 'NULL' - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: TABLE - table_reference: naked_identifier: promotion_dimension - keyword: IS - quoted_literal: "'2011 Promotions'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: TABLE - table_reference: naked_identifier: promotion_dimension - keyword: IS - keyword: 'NULL' - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: COLUMN - column_reference: - naked_identifier: store - dot: . - naked_identifier: store_sales_fact - dot: . - naked_identifier: transaction_time - keyword: IS - quoted_literal: "'GMT'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: COLUMN - column_reference: - naked_identifier: store - dot: . - naked_identifier: store_sales_fact - dot: . - naked_identifier: transaction_time - keyword: IS - keyword: 'NULL' - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: TRANSFORM - keyword: FUNCTION - function_name: naked_identifier: macros dot: . function_name_identifier: zerowhennull - function_parameter_list: bracketed: start_bracket: ( parameter: x data_type: keyword: INT end_bracket: ) - keyword: IS - quoted_literal: "'Returns a 0 if not NULL'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: TRANSFORM - keyword: FUNCTION - function_name: naked_identifier: macros dot: . function_name_identifier: zerowhennull - function_parameter_list: bracketed: start_bracket: ( parameter: x data_type: keyword: INT end_bracket: ) - keyword: IS - keyword: 'NULL' - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: VIEW - table_reference: naked_identifier: curr_month_ship - keyword: IS - quoted_literal: "'Shipping data for the current month'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: VIEW - table_reference: naked_identifier: curr_month_ship - keyword: IS - keyword: 'NULL' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/vertica/copy.sql000066400000000000000000000015271503426445100231230ustar00rootroot00000000000000-- https://docs.vertica.com/latest/en/sql-reference/statements/copy/examples/ -- Some functionality isn't covered yet, so I commented it COPY public.customer_dimension ( customer_since FORMAT 'YYYY' ) FROM STDIN DELIMITER ',' NULL AS 'null' ENCLOSED BY '"'; COPY sampletab FROM '/home/dbadmin/one.dat', 'home/dbadmin/two.dat'; COPY myTable FROM 'webhdfs:///mydirectory/ofmanyfiles/*.dat'; COPY myTable FROM 'webhdfs:///mydirectory/*_[0-9]'; COPY myTable FROM 'webhdfs:///data/sales/01/*.dat', 'webhdfs:///data/sales/02/*.dat', 'webhdfs:///data/sales/historical.dat'; COPY t FROM 'webhdfs:///opt/data/file1.dat'; COPY t FROM 'webhdfs://testNS/opt/data/file2.csv'; COPY t FROM 's3://AWS_DataLake/*' ORC; COPY names ( first, middle FILLER VARCHAR(20), last -- , full AS first||' '||middle||' '||last ) FROM STDIN; sqlfluff-3.4.2/test/fixtures/dialects/vertica/copy.yml000066400000000000000000000072221503426445100231230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6af1160b5519660d3a63334855a4a8f8d05ee20948549beab870ef5dfaff018b file: - statement: copy_statement: - keyword: COPY - table_reference: - naked_identifier: public - dot: . - naked_identifier: customer_dimension - bracketed: start_bracket: ( copy_column_options: column_reference: naked_identifier: customer_since copy_options_for_columns: keyword: FORMAT quoted_literal: "'YYYY'" end_bracket: ) - keyword: FROM - keyword: STDIN - copy_options: copy_options_for_columns: - keyword: DELIMITER - quoted_literal: "','" - keyword: 'NULL' - keyword: AS - quoted_literal: "'null'" - keyword: ENCLOSED - keyword: BY - quoted_literal: "'\"'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: sampletab - keyword: FROM - quoted_literal: "'/home/dbadmin/one.dat'" - comma: ',' - quoted_literal: "'home/dbadmin/two.dat'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: myTable - keyword: FROM - quoted_literal: "'webhdfs:///mydirectory/ofmanyfiles/*.dat'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: myTable - keyword: FROM - quoted_literal: "'webhdfs:///mydirectory/*_[0-9]'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: myTable - keyword: FROM - quoted_literal: "'webhdfs:///data/sales/01/*.dat'" - comma: ',' - quoted_literal: "'webhdfs:///data/sales/02/*.dat'" - comma: ',' - quoted_literal: "'webhdfs:///data/sales/historical.dat'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: t - keyword: FROM - quoted_literal: "'webhdfs:///opt/data/file1.dat'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: t - keyword: FROM - quoted_literal: "'webhdfs://testNS/opt/data/file2.csv'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: t - keyword: FROM - quoted_literal: "'s3://AWS_DataLake/*'" - keyword: ORC - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: names - bracketed: - start_bracket: ( - copy_column_options: column_reference: naked_identifier: first - comma: ',' - copy_column_options: column_reference: naked_identifier: middle copy_options_for_columns: keyword: FILLER data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '20' end_bracket: ) - comma: ',' - copy_column_options: column_reference: naked_identifier: last - end_bracket: ) - keyword: FROM - keyword: STDIN - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/vertica/create_projection.sql000066400000000000000000000006101503426445100256400ustar00rootroot00000000000000CREATE PROJECTION public.employee_dimension_super AS SELECT * FROM public.employee_dimension ORDER BY employee_key SEGMENTED BY hash(employee_key) ALL NODES; CREATE PROJECTION store.store_dimension_proj (storekey, name, city, state) AS SELECT store_key, store_name, store_city, store_state FROM store.store_dimension UNSEGMENTED ALL NODES; sqlfluff-3.4.2/test/fixtures/dialects/vertica/create_projection.yml000066400000000000000000000063151503426445100256520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d9d047907d529440ecdc5b92785147d403b3794fbcd9aa70429c9659807177a8 file: - statement: create_projection_statement: - keyword: CREATE - keyword: PROJECTION - table_reference: - naked_identifier: public - dot: . - naked_identifier: employee_dimension_super - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: public - dot: . - naked_identifier: employee_dimension - orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: employee_key - segmentedby_clause: - keyword: SEGMENTED - keyword: BY - function: function_name: function_name_identifier: hash function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: employee_key end_bracket: ) - keyword: ALL - keyword: NODES - statement_terminator: ; - statement: create_projection_statement: - keyword: CREATE - keyword: PROJECTION - table_reference: - naked_identifier: store - dot: . - naked_identifier: store_dimension_proj - bracketed: - start_bracket: ( - column_reference: naked_identifier: storekey - comma: ',' - column_reference: naked_identifier: name - comma: ',' - column_reference: naked_identifier: city - comma: ',' - column_reference: naked_identifier: state - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: store_key - comma: ',' - select_clause_element: column_reference: naked_identifier: store_name - comma: ',' - select_clause_element: column_reference: naked_identifier: store_city - comma: ',' - select_clause_element: column_reference: naked_identifier: store_state from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: store - dot: . - naked_identifier: store_dimension - segmentedby_clause: - keyword: UNSEGMENTED - keyword: ALL - keyword: NODES - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/vertica/create_schema.sql000066400000000000000000000001501503426445100247230ustar00rootroot00000000000000CREATE SCHEMA s1; CREATE SCHEMA IF NOT EXISTS s2; CREATE SCHEMA s3 DEFAULT INCLUDE SCHEMA PRIVILEGES; sqlfluff-3.4.2/test/fixtures/dialects/vertica/create_schema.yml000066400000000000000000000020711503426445100247310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3e238bdfeceb1322ad34c88f4cc41efcdc21d408e25200c2b9723d4c30c522f6 file: - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - schema_reference: naked_identifier: s1 - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - keyword: IF - keyword: NOT - keyword: EXISTS - schema_reference: naked_identifier: s2 - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - schema_reference: naked_identifier: s3 - keyword: DEFAULT - schema_privileges_segment: - keyword: INCLUDE - keyword: SCHEMA - keyword: PRIVILEGES - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/vertica/create_table.sql000066400000000000000000000016771503426445100245710ustar00rootroot00000000000000-- https://docs.vertica.com/latest/en/sql-reference/statements/create-statements/create-table/#examples -- Some functionality isn't covered yet, so I commented it CREATE TABLE public.Premium_Customer ( ID IDENTITY, lname varchar(25), fname varchar(25), store_membership_card int ); CREATE TABLE orders( orderkey INT ENCODING AUTO, custkey INT, prodkey ARRAY[VARCHAR(10)], orderprices ARRAY[DECIMAL(12,2)], orderdate DATE ); CREATE TABLE orders( orderkey INT ENCODING AUTO, custkey INT, prodkey ARRAY[VARCHAR(10)], orderprices ARRAY[DECIMAL(12,2)], orderdate DATE ) partition by orderdate::date group by CALENDAR_HIERARCHY_DAY(orderdate::DATE, 3, 2) REORGANIZE; -- CREATE TABLE inventory -- (store INT, products ROW(name VARCHAR, code VARCHAR)); CREATE TEMPORARY TABLE tempDelete (a int, b int); CREATE TEMPORARY TABLE tempPreserve (a int, b int) ON COMMIT PRESERVE ROWS; sqlfluff-3.4.2/test/fixtures/dialects/vertica/create_table.yml000066400000000000000000000153771503426445100245750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: aa7138fe0757fd77b284c8d6b28fa0a4db39b8eda0686953b3b6b5ecad288a5c file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: public - dot: . - naked_identifier: Premium_Customer - bracketed: - start_bracket: ( - column_reference: naked_identifier: ID - data_type: data_type_identifier: IDENTITY - comma: ',' - column_reference: naked_identifier: lname - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '25' end_bracket: ) - comma: ',' - column_reference: naked_identifier: fname - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '25' end_bracket: ) - comma: ',' - column_reference: naked_identifier: store_membership_card - data_type: keyword: int - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: orders - bracketed: - start_bracket: ( - column_reference: naked_identifier: orderkey - data_type: keyword: INT - column_encoding: keyword: ENCODING encoding_type: AUTO - comma: ',' - column_reference: naked_identifier: custkey - data_type: keyword: INT - comma: ',' - column_reference: naked_identifier: prodkey - data_type: array_type: keyword: ARRAY start_square_bracket: '[' data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) end_square_bracket: ']' - comma: ',' - column_reference: naked_identifier: orderprices - data_type: array_type: keyword: ARRAY start_square_bracket: '[' data_type: keyword: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '12' - comma: ',' - numeric_literal: '2' - end_bracket: ) end_square_bracket: ']' - comma: ',' - column_reference: naked_identifier: orderdate - data_type: keyword: DATE - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: orders - bracketed: - start_bracket: ( - column_reference: naked_identifier: orderkey - data_type: keyword: INT - column_encoding: keyword: ENCODING encoding_type: AUTO - comma: ',' - column_reference: naked_identifier: custkey - data_type: keyword: INT - comma: ',' - column_reference: naked_identifier: prodkey - data_type: array_type: keyword: ARRAY start_square_bracket: '[' data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) end_square_bracket: ']' - comma: ',' - column_reference: naked_identifier: orderprices - data_type: array_type: keyword: ARRAY start_square_bracket: '[' data_type: keyword: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '12' - comma: ',' - numeric_literal: '2' - end_bracket: ) end_square_bracket: ']' - comma: ',' - column_reference: naked_identifier: orderdate - data_type: keyword: DATE - end_bracket: ) - partitionby_clause: - keyword: partition - keyword: by - expression: cast_expression: column_reference: naked_identifier: orderdate casting_operator: '::' data_type: keyword: date - groupby_clause: - keyword: group - keyword: by - expression: function: function_name: function_name_identifier: CALENDAR_HIERARCHY_DAY function_contents: bracketed: - start_bracket: ( - expression: cast_expression: column_reference: naked_identifier: orderdate casting_operator: '::' data_type: keyword: DATE - comma: ',' - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - keyword: REORGANIZE - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: tempDelete - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: b - data_type: keyword: int - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: tempPreserve - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: b - data_type: keyword: int - end_bracket: ) - keyword: 'ON' - keyword: COMMIT - keyword: PRESERVE - keyword: ROWS - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/vertica/create_table_as.sql000066400000000000000000000003111503426445100252340ustar00rootroot00000000000000CREATE TABLE cust_basic_profile AS SELECT customer_key, customer_gender, customer_age, marital_status, annual_income, occupation FROM customer_dimension WHERE customer_age>18 AND customer_gender !=''; sqlfluff-3.4.2/test/fixtures/dialects/vertica/create_table_as.yml000066400000000000000000000041711503426445100252460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1cec959c8cbb94d1cd25132cf91f134494e8c41823ef81b6b6e1fa77e1bb2218 file: statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: cust_basic_profile - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: customer_key - comma: ',' - select_clause_element: column_reference: naked_identifier: customer_gender - comma: ',' - select_clause_element: column_reference: naked_identifier: customer_age - comma: ',' - select_clause_element: column_reference: naked_identifier: marital_status - comma: ',' - select_clause_element: column_reference: naked_identifier: annual_income - comma: ',' - select_clause_element: column_reference: naked_identifier: occupation from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: customer_dimension where_clause: keyword: WHERE expression: - column_reference: naked_identifier: customer_age - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '18' - binary_operator: AND - column_reference: naked_identifier: customer_gender - comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '=' - quoted_literal: "''" statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/vertica/create_table_like.sql000066400000000000000000000001551503426445100255630ustar00rootroot00000000000000CREATE TABLE All_Customers like Premium_Customer; CREATE TABLE newstates LIKE states INCLUDING PROJECTIONS; sqlfluff-3.4.2/test/fixtures/dialects/vertica/create_table_like.yml000066400000000000000000000017371503426445100255740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d53cda26a6930b6ac0a0fb8da204254ecf1dabc0319e1bb600f76bd373732d50 file: - statement: create_table_like_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: All_Customers - keyword: like - table_reference: naked_identifier: Premium_Customer - statement_terminator: ; - statement: create_table_like_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: newstates - keyword: LIKE - table_reference: naked_identifier: states - like_option_segment: - keyword: INCLUDING - keyword: PROJECTIONS - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/vertica/create_view.sql000066400000000000000000000003041503426445100244360ustar00rootroot00000000000000CREATE VIEW temp_t0 AS SELECT * from t0_p1 UNION ALL SELECT * from t0_p2 UNION ALL SELECT * from t0_p3 UNION ALL SELECT * from t0_p4 UNION ALL SELECT * from t0_p5; sqlfluff-3.4.2/test/fixtures/dialects/vertica/create_view.yml000066400000000000000000000060331503426445100244450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0db732adecf3da212c3020a4b911199734dd9d5bf21b382703ffa5db22285781 file: statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: temp_t0 - keyword: AS - set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t0_p1 - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t0_p2 - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t0_p3 - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t0_p4 - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t0_p5 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/vertica/datatypes.sql000066400000000000000000000026751503426445100241540ustar00rootroot00000000000000-- binary create table a ( a BINARY, b VARBINARY, ba LONG VARBINARY ); -- Boolean create table b ( a boolean ); -- Character / Long create table c ( a char, aa char(7), b varchar, bb varchar(10), c long varchar, cc long varchar(100000) ); -- Date / Time create table d ( a date, b TIME, c TIME WITH TIME ZONE, d TIMESTAMP, e DATETIME, f SMALLDATETIME, g TIMESTAMP WITH TIME ZONE, h INTERVAL, i INTERVAL DAY TO SECOND, j INTERVAL YEAR TO MONTH ); -- Approximate numeric create table e( a double precision, b float, bb float(7), bbb float (7), c float8, d real ); -- Binary create table f( a BINARY, b VARBINARY, c LONG VARBINARY, d BYTEA, e RAW ); -- Exact numeric create table g( a INTEGER, b INT, c BIGINT, d INT8, e SMALLINT, f TINYINT, g DECIMAL, gg DECIMAL(5), ggg DECIMAL(5, 2), h NUMERIC, hh NUMERIC(5), hhh NUMERIC(5, 2), hhhh NUMERIC (5, 2), i NUMBER, ii NUMBER(5), iii NUMBER(5, 2), j MONEY, jj MONEY(5), jjj MONEY(5, 2) ); -- UUID create table h( a uuid ); -- Spatial create table i( a GEOMETRY, aa GEOMETRY(10), b GEOGRAPHY, bb GEOGRAPHY(10) ); -- Arrays create table p ( a array[integer], b array[varchar(50)] -- It should be covered in the future -- bb array[varchar(50), 100], -- bbb array[varchar(50)](100), ); sqlfluff-3.4.2/test/fixtures/dialects/vertica/datatypes.yml000066400000000000000000000310671503426445100241530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9161ddb2a56e8fd5cc2ada0f788444a01cfb93ed8c26b6487310667e95c9702e file: - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: a - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - data_type: keyword: BINARY - comma: ',' - column_reference: naked_identifier: b - data_type: keyword: VARBINARY - comma: ',' - column_reference: naked_identifier: ba - data_type: - keyword: LONG - keyword: VARBINARY - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: b - bracketed: start_bracket: ( column_reference: naked_identifier: a data_type: keyword: boolean end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: c - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - data_type: keyword: char - comma: ',' - column_reference: naked_identifier: aa - data_type: keyword: char bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '7' end_bracket: ) - comma: ',' - column_reference: naked_identifier: b - data_type: keyword: varchar - comma: ',' - column_reference: naked_identifier: bb - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - comma: ',' - column_reference: naked_identifier: c - data_type: - keyword: long - keyword: varchar - comma: ',' - column_reference: naked_identifier: cc - data_type: - keyword: long - keyword: varchar - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '100000' end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: d - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - data_type: keyword: date - comma: ',' - column_reference: naked_identifier: b - data_type: keyword: TIME - comma: ',' - column_reference: naked_identifier: c - data_type: - keyword: TIME - keyword: WITH - keyword: TIME - keyword: ZONE - comma: ',' - column_reference: naked_identifier: d - data_type: keyword: TIMESTAMP - comma: ',' - column_reference: naked_identifier: e - data_type: keyword: DATETIME - comma: ',' - column_reference: naked_identifier: f - data_type: keyword: SMALLDATETIME - comma: ',' - column_reference: naked_identifier: g - data_type: - keyword: TIMESTAMP - keyword: WITH - keyword: TIME - keyword: ZONE - comma: ',' - column_reference: naked_identifier: h - data_type: keyword: INTERVAL - comma: ',' - column_reference: naked_identifier: i - data_type: - keyword: INTERVAL - keyword: DAY - keyword: TO - keyword: SECOND - comma: ',' - column_reference: naked_identifier: j - data_type: - keyword: INTERVAL - keyword: YEAR - keyword: TO - keyword: MONTH - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: e - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - data_type: - keyword: double - keyword: precision - comma: ',' - column_reference: naked_identifier: b - data_type: keyword: float - comma: ',' - column_reference: naked_identifier: bb - data_type: keyword: float bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '7' end_bracket: ) - comma: ',' - column_reference: naked_identifier: bbb - data_type: keyword: float bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '7' end_bracket: ) - comma: ',' - column_reference: naked_identifier: c - data_type: keyword: float8 - comma: ',' - column_reference: naked_identifier: d - data_type: keyword: real - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: f - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - data_type: keyword: BINARY - comma: ',' - column_reference: naked_identifier: b - data_type: keyword: VARBINARY - comma: ',' - column_reference: naked_identifier: c - data_type: - keyword: LONG - keyword: VARBINARY - comma: ',' - column_reference: naked_identifier: d - data_type: keyword: BYTEA - comma: ',' - column_reference: naked_identifier: e - data_type: keyword: RAW - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: g - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - data_type: keyword: INTEGER - comma: ',' - column_reference: naked_identifier: b - data_type: keyword: INT - comma: ',' - column_reference: naked_identifier: c - data_type: keyword: BIGINT - comma: ',' - column_reference: naked_identifier: d - data_type: keyword: INT8 - comma: ',' - column_reference: naked_identifier: e - data_type: keyword: SMALLINT - comma: ',' - column_reference: naked_identifier: f - data_type: keyword: TINYINT - comma: ',' - column_reference: naked_identifier: g - data_type: keyword: DECIMAL - comma: ',' - column_reference: naked_identifier: gg - data_type: keyword: DECIMAL bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - comma: ',' - column_reference: naked_identifier: ggg - data_type: keyword: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '5' - comma: ',' - numeric_literal: '2' - end_bracket: ) - comma: ',' - column_reference: naked_identifier: h - data_type: keyword: NUMERIC - comma: ',' - column_reference: naked_identifier: hh - data_type: keyword: NUMERIC bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - comma: ',' - column_reference: naked_identifier: hhh - data_type: keyword: NUMERIC bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '5' - comma: ',' - numeric_literal: '2' - end_bracket: ) - comma: ',' - column_reference: naked_identifier: hhhh - data_type: keyword: NUMERIC bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '5' - comma: ',' - numeric_literal: '2' - end_bracket: ) - comma: ',' - column_reference: naked_identifier: i - data_type: keyword: NUMBER - comma: ',' - column_reference: naked_identifier: ii - data_type: keyword: NUMBER bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - comma: ',' - column_reference: naked_identifier: iii - data_type: keyword: NUMBER bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '5' - comma: ',' - numeric_literal: '2' - end_bracket: ) - comma: ',' - column_reference: naked_identifier: j - data_type: keyword: MONEY - comma: ',' - column_reference: naked_identifier: jj - data_type: keyword: MONEY bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - comma: ',' - column_reference: naked_identifier: jjj - data_type: keyword: MONEY bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '5' - comma: ',' - numeric_literal: '2' - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: h - bracketed: start_bracket: ( column_reference: naked_identifier: a data_type: keyword: uuid end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: i - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - data_type: keyword: GEOMETRY - comma: ',' - column_reference: naked_identifier: aa - data_type: keyword: GEOMETRY bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - comma: ',' - column_reference: naked_identifier: b - data_type: keyword: GEOGRAPHY - comma: ',' - column_reference: naked_identifier: bb - data_type: keyword: GEOGRAPHY bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: p - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - data_type: array_type: keyword: array start_square_bracket: '[' data_type: keyword: integer end_square_bracket: ']' - comma: ',' - column_reference: naked_identifier: b - data_type: array_type: keyword: array start_square_bracket: '[' data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '50' end_bracket: ) end_square_bracket: ']' - end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/vertica/datetime_units.sql000066400000000000000000000046331503426445100251700ustar00rootroot00000000000000 select t1.field, extract(MILLENNIUM FROM t1.sometime) AS a from t1; select t1.field, extract(CENTURY FROM t1.sometime) AS a from t1; select t1.field, extract(DECADE FROM t1.sometime) AS a from t1; select t1.field, extract(EPOCH FROM t1.sometime) AS a from t1; select t1.field, extract(YEAR FROM t1.sometime) AS a from t1; select t1.field, extract(ISOYEAR FROM t1.sometime) AS a from t1; select t1.field, extract(QUARTER FROM t1.sometime) AS a from t1; select t1.field, extract(MONTH FROM t1.sometime) AS a from t1; select t1.field, extract(WEEK FROM t1.sometime) AS a from t1; select t1.field, extract(ISOWEEK FROM t1.sometime) AS a from t1; select t1.field, extract(ISODOW FROM t1.sometime) AS a from t1; select t1.field, extract(DOW FROM t1.sometime) AS a from t1; select t1.field, extract(DOY FROM t1.sometime) AS a from t1; select t1.field, extract(DAY FROM t1.sometime) AS a from t1; select t1.field, extract(HOUR FROM t1.sometime) AS a from t1; select t1.field, extract(MINUTE FROM t1.sometime) AS a from t1; select t1.field, extract(SECOND FROM t1.sometime) AS a from t1; select t1.field, extract(MILLISECONDS FROM t1.sometime) AS a from t1; select t1.field, extract(MICROSECONDS FROM t1.sometime) AS a from t1; select t1.field, extract(TIME ZONE FROM t1.sometime) AS a from t1; select t1.field, extract(TIMEZONE_HOUR FROM t1.sometime) AS a from t1; select t1.field, extract(TIMEZONE_MINUTE FROM t1.sometime) AS a from t1; SELECT t1.field, DATE '2001-09-28' + 7 + t1.data AS a FROM t1; SELECT t1.field, NOW() + INTERVAL '1 HOUR' + t1.data AS a FROM t1; SELECT t1.field, t1.data + TIME '03:00' AS a FROM t1; SELECT t1.field, INTERVAL '1 DAY' + INTERVAL '1 HOUR 2 MINUTES' + t1.data AS a FROM t1; SELECT t1.field, TIMESTAMP '2001-09-28 01:00' + INTERVAL '23 HOURS' + t1.data AS a FROM t1; SELECT t1.field, TIME '01:00' + INTERVAL '3S' + t1.data AS a FROM t1; SELECT t1.field, - INTERVAL '23 HOURS' + t1.data AS a FROM t1; SELECT t1.field, INTERVAL '1 HOUR' / 1.5 + t1.data AS a FROM t1; SELECT t1.field, INTERVAL '1' HOUR * 3.5 + t1.data AS a FROM t1; SELECT t1.field, TIMESTAMP WITH TIME ZONE '2005-04-02 12:00:00-07' + INTERVAL '24 HOURS' + t1.data AS a FROM t1; sqlfluff-3.4.2/test/fixtures/dialects/vertica/datetime_units.yml000066400000000000000000001022471503426445100251720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: af0b72f6733018a58d976ae0f205fa6610ad29269df023308fa23af385a78e44 file: - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: MILLENNIUM keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: CENTURY keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: DECADE keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: EPOCH keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: YEAR keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: ISOYEAR keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: QUARTER keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: MONTH keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: WEEK keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: ISOWEEK keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: ISODOW keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: DOW keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: DOY keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: DAY keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: HOUR keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: MINUTE keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: SECOND keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: MILLISECONDS keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: MICROSECONDS keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: - start_bracket: ( - expression: keyword: TIME - expression: column_reference: naked_identifier: ZONE - keyword: FROM - expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: TIMEZONE_HOUR keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: TIMEZONE_MINUTE keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: expression: - keyword: DATE - date_constructor_literal: "'2001-09-28'" - binary_operator: + - numeric_literal: '7' - binary_operator: + - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: data alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: expression: - function: function_name: function_name_identifier: NOW function_contents: bracketed: start_bracket: ( end_bracket: ) - binary_operator: + - keyword: INTERVAL - date_constructor_literal: "'1 HOUR'" - binary_operator: + - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: data alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: data binary_operator: + keyword: TIME date_constructor_literal: "'03:00'" alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: expression: - keyword: INTERVAL - date_constructor_literal: "'1 DAY'" - binary_operator: + - keyword: INTERVAL - date_constructor_literal: "'1 HOUR 2 MINUTES'" - binary_operator: + - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: data alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: expression: - keyword: TIMESTAMP - date_constructor_literal: "'2001-09-28 01:00'" - binary_operator: + - keyword: INTERVAL - date_constructor_literal: "'23 HOURS'" - binary_operator: + - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: data alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: expression: - keyword: TIME - date_constructor_literal: "'01:00'" - binary_operator: + - keyword: INTERVAL - date_constructor_literal: "'3S'" - binary_operator: + - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: data alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: expression: sign_indicator: '-' keyword: INTERVAL date_constructor_literal: "'23 HOURS'" binary_operator: + column_reference: - naked_identifier: t1 - dot: . - naked_identifier: data alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: expression: - keyword: INTERVAL - date_constructor_literal: "'1 HOUR'" - binary_operator: / - numeric_literal: '1.5' - binary_operator: + - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: data alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: expression: - keyword: INTERVAL - date_constructor_literal: "'1'" - keyword: HOUR - binary_operator: '*' - numeric_literal: '3.5' - binary_operator: + - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: data alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: expression: - data_type: - keyword: TIMESTAMP - keyword: WITH - keyword: TIME - keyword: ZONE - quoted_literal: "'2005-04-02 12:00:00-07'" - binary_operator: + - keyword: INTERVAL - date_constructor_literal: "'24 HOURS'" - binary_operator: + - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: data alias_expression: alias_operator: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/vertica/delete.sql000066400000000000000000000004121503426445100234030ustar00rootroot00000000000000-- https://docs.vertica.com/latest/en/sql-reference/statements/delete/#examples DELETE FROM temp1; DELETE FROM retail.customer WHERE state IN ('MA', 'NH'); DELETE FROM new_addresses WHERE new_cust_id IN (SELECT cust_id FROM addresses WHERE address='New Address'); sqlfluff-3.4.2/test/fixtures/dialects/vertica/delete.yml000066400000000000000000000051401503426445100234100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2ee18adab611181da87af591ac42728e827b886e84ffad63be1c5ef56903b845 file: - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: temp1 - statement_terminator: ; - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: retail - dot: . - naked_identifier: customer where_clause: keyword: WHERE expression: column_reference: naked_identifier: state keyword: IN bracketed: - start_bracket: ( - quoted_literal: "'MA'" - comma: ',' - quoted_literal: "'NH'" - end_bracket: ) - statement_terminator: ; - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: new_addresses where_clause: keyword: WHERE expression: column_reference: naked_identifier: new_cust_id keyword: IN bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: cust_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: addresses where_clause: keyword: WHERE expression: column_reference: naked_identifier: address comparison_operator: raw_comparison_operator: '=' quoted_literal: "'New Address'" end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/vertica/drop_view.sql000066400000000000000000000000571503426445100241440ustar00rootroot00000000000000DROP VIEW myview; DROP VIEW IF EXISTS myview; sqlfluff-3.4.2/test/fixtures/dialects/vertica/drop_view.yml000066400000000000000000000013621503426445100241460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b08b2bd7aea8ed808ac6fc056d84333cc1e958c4826f96c307e6a0589ceac4e4 file: - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - table_reference: naked_identifier: myview - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/vertica/escape.sql000066400000000000000000000003201503426445100233770ustar00rootroot00000000000000SELECT replace(some_field, e'\r\n', ', ') FROM table_name; SELECT replace(some_field, E'\r\n', ', ') FROM table_name; SELECT E'\''; SELECT E''''; SELECT E'''\''; SELECT E'\\\''''; SELECT E' \\ '' \\'; sqlfluff-3.4.2/test/fixtures/dialects/vertica/escape.yml000066400000000000000000000057061503426445100234160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8059bc0b23b0f5b7633dea2c5728afbdcebcbb9aac059d4869d0750376c34dc9 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: replace function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: some_field - comma: ',' - expression: quoted_literal: "e'\\r\\n'" - comma: ',' - expression: quoted_literal: "', '" - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: replace function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: some_field - comma: ',' - expression: quoted_literal: "E'\\r\\n'" - comma: ',' - expression: quoted_literal: "', '" - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "E'\\''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "E''''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "E'''\\''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "E'\\\\\\''''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "E'\n\n\\\\\n''\n\\\\'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/vertica/flex_functions.sql000066400000000000000000000003721503426445100251740ustar00rootroot00000000000000select applicant_verification_id, etl_created_at, etl_updated_at, mapkeys(some_data) over ( partition by col_1, col2 ) as ( json_table_keys ) from json_table sqlfluff-3.4.2/test/fixtures/dialects/vertica/flex_functions.yml000066400000000000000000000042601503426445100251760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5c7a6a062150880babc30c11c32b00fabd4f4b08b1017ea3c0942126be81eb54 file: statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: applicant_verification_id - comma: ',' - select_clause_element: column_reference: naked_identifier: etl_created_at - comma: ',' - select_clause_element: column_reference: naked_identifier: etl_updated_at - comma: ',' - select_clause_element: function: function_name: function_name_identifier: mapkeys function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: some_data end_bracket: ) over_clause: keyword: over bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: partition - keyword: by - expression: column_reference: naked_identifier: col_1 - comma: ',' - expression: column_reference: naked_identifier: col2 end_bracket: ) alias_expression: alias_operator: keyword: as bracketed: start_bracket: ( identifier_list: naked_identifier: json_table_keys end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: json_table sqlfluff-3.4.2/test/fixtures/dialects/vertica/grant_privileges.sql000066400000000000000000000003111503426445100255030ustar00rootroot00000000000000GRANT USAGE ON SCHEMA online_sales TO Joe; GRANT ALL PRIVILEGES ON TABLE customer_dimension TO Joe; GRANT ALL PRIVILEGES EXTEND ON TABLE customer_dimension TO Joe; GRANT ALL PRIVILEGES ON ship TO Joe; sqlfluff-3.4.2/test/fixtures/dialects/vertica/grant_privileges.yml000066400000000000000000000031601503426445100255120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6f5550a83e6157b9c9b4ecd251f4505c40fa52cd55e87f914fae578c4a30b4b7 file: - statement: access_statement: - keyword: GRANT - keyword: USAGE - keyword: 'ON' - keyword: SCHEMA - object_reference: naked_identifier: online_sales - keyword: TO - role_reference: naked_identifier: Joe - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: ALL - keyword: PRIVILEGES - keyword: 'ON' - keyword: TABLE - object_reference: naked_identifier: customer_dimension - keyword: TO - role_reference: naked_identifier: Joe - statement_terminator: ; - statement: alter_default_privileges_grant: - keyword: GRANT - alter_default_privileges_object_privilege: - keyword: ALL - keyword: PRIVILEGES - keyword: EXTEND - keyword: 'ON' - keyword: TABLE - table_reference: naked_identifier: customer_dimension - keyword: TO - role_reference: naked_identifier: Joe - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: ALL - keyword: PRIVILEGES - keyword: 'ON' - object_reference: naked_identifier: ship - keyword: TO - role_reference: naked_identifier: Joe - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/vertica/group_by.sql000066400000000000000000000007651503426445100240020ustar00rootroot00000000000000SELECT employee_last_name, SUM(vacation_days) FROM employee_dimension WHERE employee_last_name ILIKE 'S%' GROUP BY employee_last_name; SELECT vendor_region, MAX(deal_size) AS "Biggest Deal" FROM vendor_dimension GROUP BY vendor_region; SELECT vendor_region, MAX(deal_size) as "Biggest Deal" FROM vendor_dimension GROUP BY vendor_region HAVING MAX(deal_size) > 900000; SELECT department, grants, SUM(apply_sum(grant_values)) FROM employees GROUP BY grants, department; sqlfluff-3.4.2/test/fixtures/dialects/vertica/group_by.yml000066400000000000000000000127221503426445100240000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 14a9cba2d414c0fd3fc9b6933c552b94ecb38a26ce107e3f7d1fa7f2795e4cf7 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: employee_last_name - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: vacation_days end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee_dimension where_clause: keyword: WHERE expression: column_reference: naked_identifier: employee_last_name keyword: ILIKE quoted_literal: "'S%'" groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: employee_last_name - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: vendor_region - comma: ',' - select_clause_element: function: function_name: function_name_identifier: MAX function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: deal_size end_bracket: ) alias_expression: alias_operator: keyword: AS quoted_identifier: '"Biggest Deal"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: vendor_dimension groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: vendor_region - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: vendor_region - comma: ',' - select_clause_element: function: function_name: function_name_identifier: MAX function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: deal_size end_bracket: ) alias_expression: alias_operator: keyword: as quoted_identifier: '"Biggest Deal"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: vendor_dimension groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: vendor_region having_clause: keyword: HAVING expression: function: function_name: function_name_identifier: MAX function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: deal_size end_bracket: ) comparison_operator: raw_comparison_operator: '>' numeric_literal: '900000' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: department - comma: ',' - select_clause_element: column_reference: naked_identifier: grants - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: apply_sum function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: grant_values end_bracket: ) end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: grants - comma: ',' - column_reference: naked_identifier: department - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/vertica/insert.sql000066400000000000000000000005371503426445100234550ustar00rootroot00000000000000INSERT INTO t1 VALUES (101, 102, 103, 104); INSERT INTO customer VALUES (10, 'male', 'DPR', 'MA', 35); INSERT INTO start_time VALUES (12, 'film','05:10:00:01'); INSERT INTO retail.t1 (C0, C1) VALUES (1, 1001); INSERT INTO films SELECT * FROM tmp_films WHERE date_prod < '2004-05-07'; INSERT INTO t1 (col1, col2) (SELECT 'abc', mycolumn FROM mytable); sqlfluff-3.4.2/test/fixtures/dialects/vertica/insert.yml000066400000000000000000000101431503426445100234510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d6e551923293e16de309bc1eb7a3b0663ffaf8ce1b189a64028c3dea0773f460 file: - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - values_clause: keyword: VALUES bracketed: - start_bracket: ( - numeric_literal: '101' - comma: ',' - numeric_literal: '102' - comma: ',' - numeric_literal: '103' - comma: ',' - numeric_literal: '104' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: customer - values_clause: keyword: VALUES bracketed: - start_bracket: ( - numeric_literal: '10' - comma: ',' - quoted_literal: "'male'" - comma: ',' - quoted_literal: "'DPR'" - comma: ',' - quoted_literal: "'MA'" - comma: ',' - numeric_literal: '35' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: start_time - values_clause: keyword: VALUES bracketed: - start_bracket: ( - numeric_literal: '12' - comma: ',' - quoted_literal: "'film'" - comma: ',' - quoted_literal: "'05:10:00:01'" - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: retail - dot: . - naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: C0 - comma: ',' - column_reference: naked_identifier: C1 - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '1001' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: films - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tmp_films where_clause: keyword: WHERE expression: column_reference: naked_identifier: date_prod comparison_operator: raw_comparison_operator: < quoted_literal: "'2004-05-07'" - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: quoted_literal: "'abc'" - comma: ',' - select_clause_element: column_reference: naked_identifier: mycolumn from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/vertica/join_no_space.sql000066400000000000000000000002161503426445100247510ustar00rootroot00000000000000-- Not missing space before ON SELECT * FROM "my_table2" INNER JOIN "my_database"."my_schema"."my_table"ON ("my_table2".foo = "my_table".foo) sqlfluff-3.4.2/test/fixtures/dialects/vertica/join_no_space.yml000066400000000000000000000033451503426445100247610ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 035e5deaa786c505125b234894eff38e45deeaebc72d8dd2b29c345d09871420 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '"my_table2"' join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: - quoted_identifier: '"my_database"' - dot: . - quoted_identifier: '"my_schema"' - dot: . - quoted_identifier: '"my_table"' - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: quoted_identifier: '"my_table2"' dot: . naked_identifier: foo - comparison_operator: raw_comparison_operator: '=' - column_reference: quoted_identifier: '"my_table"' dot: . naked_identifier: foo end_bracket: ) sqlfluff-3.4.2/test/fixtures/dialects/vertica/join_types.sql000066400000000000000000000100201503426445100243200ustar00rootroot00000000000000-- inner join SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee INNER JOIN department ON employee.deptno = department.deptno; SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee NATURAL INNER JOIN department ON employee.deptno = department.deptno; -- left join SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee LEFT JOIN department ON employee.deptno = department.deptno; SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee LEFT OUTER JOIN department ON employee.deptno = department.deptno; SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee NATURAL LEFT OUTER JOIN department ON employee.deptno = department.deptno; -- right join SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee RIGHT JOIN department ON employee.deptno = department.deptno; SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee RIGHT OUTER JOIN department ON employee.deptno = department.deptno; SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee NATURAL RIGHT OUTER JOIN department ON employee.deptno = department.deptno; -- full join SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee FULL JOIN department ON employee.deptno = department.deptno; SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee FULL OUTER JOIN department ON employee.deptno = department.deptno; SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee NATURAL FULL OUTER JOIN department ON employee.deptno = department.deptno; -- cross join SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee CROSS JOIN department; SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee, department; -- semi join SELECT /*+ syntactic_join */ product_dimension.product_description AS product_description FROM (public.product_dimension AS product_dimension/*+projs('public.product_dimension')*/ SEMI JOIN /*+Distrib(F,R),JType(H)*/ (SELECT inventory_fact.qty_in_stock AS qty_in_stock FROM public.inventory_fact AS inventory_fact/*+projs('public.inventory_fact')*/) AS subQ_1 ON (product_dimension.product_key = subQ_1.qty_in_stock)); -- nullaware anti join SELECT /*+ syntactic_join */ product_dimension.product_description AS product_description FROM (public.product_dimension AS product_dimension/*+projs('public.product_dimension')*/ NULLAWARE ANTI JOIN /*+Distrib(L,B),JType(H)*/ (SELECT inventory_fact.qty_in_stock AS qty_in_stock FROM public.inventory_fact AS inventory_fact/*+projs('public.inventory_fact')*/) AS subQ_1 ON (product_dimension.product_key = subQ_1.qty_in_stock)); -- semiall join SELECT /*+ syntactic_join */ product_dimension.product_key AS product_key, product_dimension.product_description AS product_description FROM (public.product_dimension AS product_dimension/*+projs('public.product_dimension')*/ SEMIALL JOIN /*+Distrib(F,B),JType(H)*/ (SELECT inventory_fact.product_key AS product_key FROM public.inventory_fact AS inventory_fact/*+projs('public.inventory_fact')*/) AS subQ_1 ON (product_dimension.product_key > subQ_1.product_key)); -- anti join SELECT /*+ syntactic_join */ product_dimension.product_key AS product_key, product_dimension.product_description AS product_description FROM (public.product_dimension AS product_dimension/*+projs('public.product_dimension')*/ ANTI JOIN /*+Distrib(F,L),JType(H)*/ (SELECT inventory_fact.product_key AS "inventory_fact.product_key" FROM public.inventory_fact AS inventory_fact/*+projs('public.inventory_fact')*/) AS subQ_1 ON (subQ_1."inventory_fact.product_key" = product_dimension.product_key)); sqlfluff-3.4.2/test/fixtures/dialects/vertica/join_types.yml000066400000000000000000001034401503426445100243330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 242536300beae69849759bdc141c47c9a1a5931495e7932de2c78938b089e0cb file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: NATURAL - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: LEFT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: NATURAL - keyword: LEFT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: RIGHT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: RIGHT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: NATURAL - keyword: RIGHT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: FULL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: FULL - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: NATURAL - keyword: FULL - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: CROSS - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: department - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: product_dimension - dot: . - naked_identifier: product_description alias_expression: alias_operator: keyword: AS naked_identifier: product_description from_clause: keyword: FROM from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: table_reference: - naked_identifier: public - dot: . - naked_identifier: product_dimension alias_expression: alias_operator: keyword: AS naked_identifier: product_dimension join_clause: - keyword: SEMI - keyword: JOIN - from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: inventory_fact - dot: . - naked_identifier: qty_in_stock alias_expression: alias_operator: keyword: AS naked_identifier: qty_in_stock from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: public - dot: . - naked_identifier: inventory_fact alias_expression: alias_operator: keyword: AS naked_identifier: inventory_fact end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: subQ_1 - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: product_dimension - dot: . - naked_identifier: product_key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: subQ_1 - dot: . - naked_identifier: qty_in_stock end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: product_dimension - dot: . - naked_identifier: product_description alias_expression: alias_operator: keyword: AS naked_identifier: product_description from_clause: keyword: FROM from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: table_reference: - naked_identifier: public - dot: . - naked_identifier: product_dimension alias_expression: alias_operator: keyword: AS naked_identifier: product_dimension join_clause: - keyword: NULLAWARE - keyword: ANTI - keyword: JOIN - from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: inventory_fact - dot: . - naked_identifier: qty_in_stock alias_expression: alias_operator: keyword: AS naked_identifier: qty_in_stock from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: public - dot: . - naked_identifier: inventory_fact alias_expression: alias_operator: keyword: AS naked_identifier: inventory_fact end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: subQ_1 - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: product_dimension - dot: . - naked_identifier: product_key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: subQ_1 - dot: . - naked_identifier: qty_in_stock end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: product_dimension - dot: . - naked_identifier: product_key alias_expression: alias_operator: keyword: AS naked_identifier: product_key - comma: ',' - select_clause_element: column_reference: - naked_identifier: product_dimension - dot: . - naked_identifier: product_description alias_expression: alias_operator: keyword: AS naked_identifier: product_description from_clause: keyword: FROM from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: table_reference: - naked_identifier: public - dot: . - naked_identifier: product_dimension alias_expression: alias_operator: keyword: AS naked_identifier: product_dimension join_clause: - keyword: SEMIALL - keyword: JOIN - from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: inventory_fact - dot: . - naked_identifier: product_key alias_expression: alias_operator: keyword: AS naked_identifier: product_key from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: public - dot: . - naked_identifier: inventory_fact alias_expression: alias_operator: keyword: AS naked_identifier: inventory_fact end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: subQ_1 - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: product_dimension - dot: . - naked_identifier: product_key - comparison_operator: raw_comparison_operator: '>' - column_reference: - naked_identifier: subQ_1 - dot: . - naked_identifier: product_key end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: product_dimension - dot: . - naked_identifier: product_key alias_expression: alias_operator: keyword: AS naked_identifier: product_key - comma: ',' - select_clause_element: column_reference: - naked_identifier: product_dimension - dot: . - naked_identifier: product_description alias_expression: alias_operator: keyword: AS naked_identifier: product_description from_clause: keyword: FROM from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: table_reference: - naked_identifier: public - dot: . - naked_identifier: product_dimension alias_expression: alias_operator: keyword: AS naked_identifier: product_dimension join_clause: - keyword: ANTI - keyword: JOIN - from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: inventory_fact - dot: . - naked_identifier: product_key alias_expression: alias_operator: keyword: AS quoted_identifier: '"inventory_fact.product_key"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: public - dot: . - naked_identifier: inventory_fact alias_expression: alias_operator: keyword: AS naked_identifier: inventory_fact end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: subQ_1 - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: naked_identifier: subQ_1 dot: . quoted_identifier: '"inventory_fact.product_key"' - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: product_dimension - dot: . - naked_identifier: product_key end_bracket: ) end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/vertica/json_operators.sql000066400000000000000000000001621503426445100252120ustar00rootroot00000000000000select mapjsonextractor('{"data":' || col || '}' using parameters flatten_maps=false) as mapped from prepared sqlfluff-3.4.2/test/fixtures/dialects/vertica/json_operators.yml000066400000000000000000000032061503426445100252160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ee56ff437d0b214b28b55c02d2abf4530cbe76eb2d026627c8c5774f9f46c474 file: statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: mapjsonextractor function_contents: bracketed: - start_bracket: ( - expression: - quoted_literal: "'{\"data\":'" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: naked_identifier: col - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "'}'" - keyword: using - keyword: parameters - parameter: flatten_maps - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'false' - end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: mapped from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: prepared sqlfluff-3.4.2/test/fixtures/dialects/vertica/limit_clause.sql000066400000000000000000000006331503426445100246200ustar00rootroot00000000000000SELECT store_region, store_city||', '||store_state location, store_name, number_of_employees FROM store.store_dimension WHERE number_of_employees <= 12 ORDER BY store_region, number_of_employees LIMIT 10; SELECT store_region, store_city||', '||store_state location, store_name, number_of_employees FROM store.store_dimension LIMIT 2 OVER (PARTITION BY store_region ORDER BY number_of_employees ASC); sqlfluff-3.4.2/test/fixtures/dialects/vertica/limit_clause.yml000066400000000000000000000077021503426445100246260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7c47446fc41af297defece1192fa0dddaf8a26964ce60caa5e167cd4cc6b9420 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: store_region - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: store_city - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "', '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: naked_identifier: store_state alias_expression: naked_identifier: location - comma: ',' - select_clause_element: column_reference: naked_identifier: store_name - comma: ',' - select_clause_element: column_reference: naked_identifier: number_of_employees from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: store - dot: . - naked_identifier: store_dimension where_clause: keyword: WHERE expression: column_reference: naked_identifier: number_of_employees comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '12' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: store_region - comma: ',' - column_reference: naked_identifier: number_of_employees limit_clause: keyword: LIMIT numeric_literal: '10' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: store_region - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: store_city - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "', '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: naked_identifier: store_state alias_expression: naked_identifier: location - comma: ',' - select_clause_element: column_reference: naked_identifier: store_name - comma: ',' - select_clause_element: column_reference: naked_identifier: number_of_employees from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: store - dot: . - naked_identifier: store_dimension limit_clause: keyword: LIMIT numeric_literal: '2' over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: store_region orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: number_of_employees - keyword: ASC end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/vertica/null_cast_with_whitespaces.sql000066400000000000000000000020141503426445100275570ustar00rootroot00000000000000/* Several valid queries where there is whitespace surrounding the Vertica cast operator (::!). Copy of Postgres cast_with_whitespaces.sql */ SELECT amount_of_honey ::! FLOAT FROM bear_inventory; -- should be able to support an arbitrary amount of whitespace SELECT amount_of_honey ::! FLOAT FROM bear_inventory; SELECT amount_of_honey::! FLOAT FROM bear_inventory; SELECT amount_of_honey ::!FLOAT FROM bear_inventory; -- should support a wide variety of typecasts SELECT amount_of_honey ::! time FROM bear_inventory; SELECT amount_of_honey ::! VARCHAR( 512 ) FROM bear_inventory; SELECT amount_of_honey ::! TIMESTAMPTZ FROM bear_inventory; SELECT amount_of_honey ::! TIMESTAMP WITHOUT TIME ZONE FROM bear_inventory; -- should support casts with arbitrary amount of whitespace in join statements SELECT bi.amount_of_honey FROM bear_inventory bi LEFT JOIN favorite_cola fc ON fc.bear_id ::! VARCHAR(512) = bi.bear_id ::!VARCHAR(512) WHERE fc.favorite_cola = 'RC Cola'; sqlfluff-3.4.2/test/fixtures/dialects/vertica/null_cast_with_whitespaces.yml000066400000000000000000000166671503426445100276040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d33ffac4570c4d1d571b58bf60d499a43ea83f3bfa2d6cf58a9dfbaf9bd6f346 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey null_casting_operator: ::! data_type: keyword: FLOAT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey null_casting_operator: ::! data_type: keyword: FLOAT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey null_casting_operator: ::! data_type: keyword: FLOAT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey null_casting_operator: ::! data_type: keyword: FLOAT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey null_casting_operator: ::! data_type: keyword: time from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey null_casting_operator: ::! data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '512' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey null_casting_operator: ::! data_type: data_type_identifier: TIMESTAMPTZ from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey null_casting_operator: ::! data_type: - keyword: TIMESTAMP - keyword: WITHOUT - keyword: TIME - keyword: ZONE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: bi - dot: . - naked_identifier: amount_of_honey from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory alias_expression: naked_identifier: bi join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: favorite_cola alias_expression: naked_identifier: fc - join_on_condition: keyword: 'ON' expression: - cast_expression: column_reference: - naked_identifier: fc - dot: . - naked_identifier: bear_id null_casting_operator: ::! data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '512' end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - cast_expression: column_reference: - naked_identifier: bi - dot: . - naked_identifier: bear_id null_casting_operator: ::! data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '512' end_bracket: ) where_clause: keyword: WHERE expression: column_reference: - naked_identifier: fc - dot: . - naked_identifier: favorite_cola comparison_operator: raw_comparison_operator: '=' quoted_literal: "'RC Cola'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/vertica/null_filters.sql000066400000000000000000000006421503426445100246500ustar00rootroot00000000000000-- Check nullability tests with standard and non-standard syntax SELECT nullable_field IS NULL as standard_is_null, nullable_field ISNULL as non_standard_is_null, nullable_field IS NOT NULL as standard_not_null, nullable_field NOTNULL as non_standard_not_null FROM t_test WHERE nullable_field IS NULL OR nullable_field ISNULL OR nullable_field IS NOT NULL OR nullable_field NOTNULL sqlfluff-3.4.2/test/fixtures/dialects/vertica/null_filters.yml000066400000000000000000000050531503426445100246530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c65e8649bb622ca0c092ee6651dac408914b2bbe39213b74fc320dc94e45ca5e file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: column_reference: naked_identifier: nullable_field keyword: IS null_literal: 'NULL' alias_expression: alias_operator: keyword: as naked_identifier: standard_is_null - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: nullable_field keyword: ISNULL alias_expression: alias_operator: keyword: as naked_identifier: non_standard_is_null - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: nullable_field - keyword: IS - keyword: NOT - null_literal: 'NULL' alias_expression: alias_operator: keyword: as naked_identifier: standard_not_null - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: nullable_field keyword: NOTNULL alias_expression: alias_operator: keyword: as naked_identifier: non_standard_not_null from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t_test where_clause: keyword: WHERE expression: - column_reference: naked_identifier: nullable_field - keyword: IS - null_literal: 'NULL' - binary_operator: OR - column_reference: naked_identifier: nullable_field - keyword: ISNULL - binary_operator: OR - column_reference: naked_identifier: nullable_field - keyword: IS - keyword: NOT - null_literal: 'NULL' - binary_operator: OR - column_reference: naked_identifier: nullable_field - keyword: NOTNULL sqlfluff-3.4.2/test/fixtures/dialects/vertica/position.sql000066400000000000000000000000731503426445100240100ustar00rootroot00000000000000SELECT POSITION(VARBINARY '456' IN VARBINARY '123456789'); sqlfluff-3.4.2/test/fixtures/dialects/vertica/position.yml000066400000000000000000000017351503426445100240200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b7000882a69fb6a1603558056eca90a4e676e24314dbcc9a32b2f28deca1eba9 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: POSITION function_contents: bracketed: - start_bracket: ( - data_type: keyword: VARBINARY - quoted_literal: "'456'" - keyword: IN - data_type: keyword: VARBINARY - quoted_literal: "'123456789'" - end_bracket: ) statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/vertica/revoke.sql000066400000000000000000000000401503426445100234310ustar00rootroot00000000000000REVOKE SELECT ON s1.t1 FROM u3; sqlfluff-3.4.2/test/fixtures/dialects/vertica/revoke.yml000066400000000000000000000012611503426445100234410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 84fcd836b35665359a97e54f3d3ee455f75bca5f89d41ce1e9e603714763d986 file: statement: access_statement: - keyword: REVOKE - keyword: SELECT - keyword: 'ON' - object_reference: - naked_identifier: s1 - dot: . - naked_identifier: t1 - keyword: FROM - object_reference: naked_identifier: u3 statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/vertica/select.sql000066400000000000000000000104371503426445100234300ustar00rootroot00000000000000-- https://docs.vertica.com/latest/en/sql-reference/statements/select/ -- Some functionality isn't covered yet, so I commented it SELECT * FROM T1 WHERE T1.x IN (SELECT MAX(c1) FROM T2 EXCEPT SELECT MAX(cc1) FROM T3 EXCEPT SELECT MAX(d1) FROM T4); SELECT user_id.id, user_name.name FROM user_name JOIN user_id ON user_name.id = user_id.id; SELECT employee_last_name, SUM(vacation_days) FROM employee_dimension WHERE employee_last_name ILIKE 'S%' GROUP BY employee_last_name; SELECT employee_last_name, MAX(annual_salary) as highest_salary FROM employee_dimension GROUP BY employee_last_name HAVING MAX(annual_salary) > 800000 ORDER BY highest_salary DESC; SELECT * FROM T1 WHERE T1.x IN (SELECT MAX(c1) FROM T2 INTERSECT SELECT MAX(cc1) FROM T3 INTERSECT SELECT MAX(d1) FROM T4); -- SELECT * INTO TABLE newTable FROM customer_dimension; SELECT store_region, store_city||', '||store_state location, store_name, number_of_employees FROM store.store_dimension LIMIT 2 OVER (PARTITION BY store_region ORDER BY number_of_employees ASC); -- SELECT uid, -- sid, -- ts, -- refurl, -- pageurl, -- action, -- event_name(), -- pattern_id(), -- match_id() -- FROM clickstream_log -- MATCH -- (PARTITION BY uid, sid ORDER BY ts -- DEFINE -- Entry AS RefURL NOT ILIKE '%website2.com%' AND PageURL ILIKE '%website2.com%', -- Onsite AS PageURL ILIKE '%website2.com%' AND Action='V', -- Purchase AS PageURL ILIKE '%website2.com%' AND Action = 'P' -- PATTERN -- P AS (Entry Onsite* Purchase) -- ROWS MATCH FIRST EVENT); -- Can't generate yml with this query for some reason -- SELECT customer_name, customer_gender FROM customer_dimension -- WHERE occupation='Dancer' AND customer_city = 'San Francisco' ORDER BY customer_name OFFSET 8; SELECT PolygonPoint(geom) OVER(PARTITION BY geom) AS SEL_0 FROM t ORDER BY geog; SELECT symbol, AVG(first_bid) as avg_bid FROM ( SELECT symbol, slice_time, TS_FIRST_VALUE(bid1) AS first_bid FROM Tickstore WHERE symbol IN ('MSFT', 'IBM') TIMESERIES slice_time AS '5 seconds' OVER (PARTITION BY symbol ORDER BY ts) ) AS resultOfGFI GROUP BY symbol; (SELECT id, emp_name FROM company_a ORDER BY emp_name LIMIT 2) UNION ALL (SELECT id, emp_name FROM company_b ORDER BY emp_name LIMIT 2); SELECT DISTINCT customer_key, customer_name FROM public.customer_dimension WHERE customer_key IN (SELECT customer_key FROM store.store_sales_fact WHERE sales_dollar_amount > 500 UNION ALL SELECT customer_key FROM online_sales.online_sales_fact WHERE sales_dollar_amount > 500) AND customer_state = 'CT'; SELECT DISTINCT customer_name FROM customer_dimension WHERE customer_region = 'East' AND customer_name ILIKE 'Amer%'; /* https://docs.vertica.com/24.3.x/en/sql-reference/language-elements/identifiers/ * Unquoted SQL identifiers must begin with one of the following: * * Non-Unicode letters: A–Z or a-z * -- /actually Vertica accepts also non-ASCII UTF-8 Unicode * characters here, which is not well documented/ * * Underscore (_) * Subsequent characters in an identifier can be any combination of * the following: * * Non-Unicode letters: A–Z or a-z * * Underscore (_) * * Digits(0–9) * * Unicode letters (letters with diacriticals or not in the Latin * alphabet), unsupported for model names * * Dollar sign ($), unsupported for model names * * Vertica accepts **non-ASCII UTF-8 Unicode characters** for table * names, column names, and other identifiers, * extending the cases where upper/lower case distinctions are * ignored (case-folded) to all alphabets, * including Latin, Cyrillic, and Greek. */ -- unqouted identifiers SELECT * FROM public.sales; SELECT * FROM public.sales1; SELECT * FROM public.sales_; SELECT * FROM public.s$ales$; SELECT * FROM public._sales; SELECT * FROM public._1234sales; SELECT * FROM public1.sales; SELECT * FROM public_.sales; SELECT * FROM p$ublic$.sales; SELECT * FROM _public.sales; SELECT * FROM _1234public.sales; SELECT * FROM public1.sales1; SELECT * FROM public1_.sales1_; SELECT * FROM p$ublic1_$.s$ales1_$; -- quoted identifiers SELECT * FROM "12public"."12344gr"; SELECT * FROM "_1234public"."_1234sales"; sqlfluff-3.4.2/test/fixtures/dialects/vertica/select.yml000066400000000000000000000765061503426445100234430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0a3a08c056244c123a5681b91c70b16a691513a90febfd454573b296e17dac7b file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: T1 where_clause: keyword: WHERE expression: column_reference: - naked_identifier: T1 - dot: . - naked_identifier: x keyword: IN bracketed: start_bracket: ( set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: MAX function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: c1 end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: T2 - set_operator: keyword: EXCEPT - select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: MAX function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: cc1 end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: T3 - set_operator: keyword: EXCEPT - select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: MAX function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: d1 end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: T4 end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: user_id - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: user_name - dot: . - naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: user_name join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: user_id join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: user_name - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: user_id - dot: . - naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: employee_last_name - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: vacation_days end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee_dimension where_clause: keyword: WHERE expression: column_reference: naked_identifier: employee_last_name keyword: ILIKE quoted_literal: "'S%'" groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: employee_last_name - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: employee_last_name - comma: ',' - select_clause_element: function: function_name: function_name_identifier: MAX function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: annual_salary end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: highest_salary from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee_dimension groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: employee_last_name having_clause: keyword: HAVING expression: function: function_name: function_name_identifier: MAX function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: annual_salary end_bracket: ) comparison_operator: raw_comparison_operator: '>' numeric_literal: '800000' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: highest_salary - keyword: DESC - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: T1 where_clause: keyword: WHERE expression: column_reference: - naked_identifier: T1 - dot: . - naked_identifier: x keyword: IN bracketed: start_bracket: ( set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: MAX function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: c1 end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: T2 - set_operator: keyword: INTERSECT - select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: MAX function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: cc1 end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: T3 - set_operator: keyword: INTERSECT - select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: MAX function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: d1 end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: T4 end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: store_region - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: store_city - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "', '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: naked_identifier: store_state alias_expression: naked_identifier: location - comma: ',' - select_clause_element: column_reference: naked_identifier: store_name - comma: ',' - select_clause_element: column_reference: naked_identifier: number_of_employees from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: store - dot: . - naked_identifier: store_dimension limit_clause: keyword: LIMIT numeric_literal: '2' over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: store_region orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: number_of_employees - keyword: ASC end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: PolygonPoint function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: geom end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: geom end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: SEL_0 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: geog - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: symbol - comma: ',' - select_clause_element: function: function_name: function_name_identifier: AVG function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: first_bid end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: avg_bid from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: symbol - comma: ',' - select_clause_element: column_reference: naked_identifier: slice_time - comma: ',' - select_clause_element: function: function_name: function_name_identifier: TS_FIRST_VALUE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: bid1 end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: first_bid from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Tickstore where_clause: keyword: WHERE expression: column_reference: naked_identifier: symbol keyword: IN bracketed: - start_bracket: ( - quoted_literal: "'MSFT'" - comma: ',' - quoted_literal: "'IBM'" - end_bracket: ) timeseries_clause_statement: - keyword: TIMESERIES - alias_expression: naked_identifier: slice_time - keyword: AS - quoted_literal: "'5 seconds'" - over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: symbol orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: ts end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: resultOfGFI groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: symbol - statement_terminator: ; - statement: set_expression: - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: emp_name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: company_a orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: emp_name limit_clause: keyword: LIMIT numeric_literal: '2' end_bracket: ) - set_operator: - keyword: UNION - keyword: ALL - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: emp_name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: company_b orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: emp_name limit_clause: keyword: LIMIT numeric_literal: '2' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: keyword: DISTINCT - select_clause_element: column_reference: naked_identifier: customer_key - comma: ',' - select_clause_element: column_reference: naked_identifier: customer_name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: public - dot: . - naked_identifier: customer_dimension where_clause: keyword: WHERE expression: - column_reference: naked_identifier: customer_key - keyword: IN - bracketed: start_bracket: ( set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: customer_key from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: store - dot: . - naked_identifier: store_sales_fact where_clause: keyword: WHERE expression: column_reference: naked_identifier: sales_dollar_amount comparison_operator: raw_comparison_operator: '>' numeric_literal: '500' - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: customer_key from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: online_sales - dot: . - naked_identifier: online_sales_fact where_clause: keyword: WHERE expression: column_reference: naked_identifier: sales_dollar_amount comparison_operator: raw_comparison_operator: '>' numeric_literal: '500' end_bracket: ) - binary_operator: AND - column_reference: naked_identifier: customer_state - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'CT'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_modifier: keyword: DISTINCT select_clause_element: column_reference: naked_identifier: customer_name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: customer_dimension where_clause: keyword: WHERE expression: - column_reference: naked_identifier: customer_region - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'East'" - binary_operator: AND - column_reference: naked_identifier: customer_name - keyword: ILIKE - quoted_literal: "'Amer%'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: public - dot: . - naked_identifier: sales - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: public - dot: . - naked_identifier: sales1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: public - dot: . - naked_identifier: sales_ - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: public - dot: . - naked_identifier: s$ales$ - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: public - dot: . - naked_identifier: _sales - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: public - dot: . - naked_identifier: _1234sales - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: public1 - dot: . - naked_identifier: sales - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: public_ - dot: . - naked_identifier: sales - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: p$ublic$ - dot: . - naked_identifier: sales - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: _public - dot: . - naked_identifier: sales - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: _1234public - dot: . - naked_identifier: sales - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: public1 - dot: . - naked_identifier: sales1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: public1_ - dot: . - naked_identifier: sales1_ - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: p$ublic1_$ - dot: . - naked_identifier: s$ales1_$ - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - quoted_identifier: '"12public"' - dot: . - quoted_identifier: '"12344gr"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - quoted_identifier: '"_1234public"' - dot: . - quoted_identifier: '"_1234sales"' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/vertica/select_case_cast.sql000066400000000000000000000005611503426445100254320ustar00rootroot00000000000000select col0, case when col1 then col2 else col3 end::vachar as mycol from table1; select col0, case when col1 then col2 else col3 end::int::float as mycol from table1; select col0, cast(case when col1 then col2 else col3 end as varchar) as mycol from table1; sqlfluff-3.4.2/test/fixtures/dialects/vertica/select_case_cast.yml000066400000000000000000000110471503426445100254350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c2adab05af89b83ba4d69568dec3861552641af8168370c88266578378469e75 file: - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: col0 - comma: ',' - select_clause_element: expression: cast_expression: case_expression: - keyword: case - when_clause: - keyword: when - expression: column_reference: naked_identifier: col1 - keyword: then - expression: column_reference: naked_identifier: col2 - else_clause: keyword: else expression: column_reference: naked_identifier: col3 - keyword: end casting_operator: '::' data_type: data_type_identifier: vachar alias_expression: alias_operator: keyword: as naked_identifier: mycol from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: col0 - comma: ',' - select_clause_element: expression: cast_expression: - case_expression: - keyword: case - when_clause: - keyword: when - expression: column_reference: naked_identifier: col1 - keyword: then - expression: column_reference: naked_identifier: col2 - else_clause: keyword: else expression: column_reference: naked_identifier: col3 - keyword: end - casting_operator: '::' - data_type: keyword: int - casting_operator: '::' - data_type: keyword: float alias_expression: alias_operator: keyword: as naked_identifier: mycol from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: col0 - comma: ',' - select_clause_element: function: function_name: function_name_identifier: cast function_contents: bracketed: start_bracket: ( expression: case_expression: - keyword: case - when_clause: - keyword: when - expression: column_reference: naked_identifier: col1 - keyword: then - expression: column_reference: naked_identifier: col2 - else_clause: keyword: else expression: column_reference: naked_identifier: col3 - keyword: end keyword: as data_type: keyword: varchar end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: mycol from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/vertica/select_case_null_cast.sql000066400000000000000000000005641503426445100264670ustar00rootroot00000000000000select col0, case when col1 then col2 else col3 end::!vachar as mycol from table1; select col0, case when col1 then col2 else col3 end::!int::!float as mycol from table1; select col0, cast(case when col1 then col2 else col3 end as varchar) as mycol from table1; sqlfluff-3.4.2/test/fixtures/dialects/vertica/select_case_null_cast.yml000066400000000000000000000110631503426445100264650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b539a5ce012288a033b458425bdb559c512dff71173eba1cdc67839f7951cdcd file: - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: col0 - comma: ',' - select_clause_element: expression: cast_expression: case_expression: - keyword: case - when_clause: - keyword: when - expression: column_reference: naked_identifier: col1 - keyword: then - expression: column_reference: naked_identifier: col2 - else_clause: keyword: else expression: column_reference: naked_identifier: col3 - keyword: end null_casting_operator: ::! data_type: data_type_identifier: vachar alias_expression: alias_operator: keyword: as naked_identifier: mycol from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: col0 - comma: ',' - select_clause_element: expression: cast_expression: - case_expression: - keyword: case - when_clause: - keyword: when - expression: column_reference: naked_identifier: col1 - keyword: then - expression: column_reference: naked_identifier: col2 - else_clause: keyword: else expression: column_reference: naked_identifier: col3 - keyword: end - null_casting_operator: ::! - data_type: keyword: int - null_casting_operator: ::! - data_type: keyword: float alias_expression: alias_operator: keyword: as naked_identifier: mycol from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: col0 - comma: ',' - select_clause_element: function: function_name: function_name_identifier: cast function_contents: bracketed: start_bracket: ( expression: case_expression: - keyword: case - when_clause: - keyword: when - expression: column_reference: naked_identifier: col1 - keyword: then - expression: column_reference: naked_identifier: col2 - else_clause: keyword: else expression: column_reference: naked_identifier: col3 - keyword: end keyword: as data_type: keyword: varchar end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: mycol from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/vertica/select_frame_clause.sql000066400000000000000000000026001503426445100261270ustar00rootroot00000000000000-- https://docs.vertica.com/latest/en/sql-reference/functions/analytic-functions/ SELECT dev_group, product_name, users, ARGMAX(users, product_name) OVER (ORDER BY dev_group ASC) FROM service_info; SELECT dev_group, product_name, users, ARGMIN(users, product_name) OVER (ORDER BY dev_group ASC) FROM service_info; SELECT calendar_month_number_in_year Mo, SUM(product_price) Sales, AVG(SUM(product_price)) OVER (ORDER BY calendar_month_number_in_year ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING)::INTEGER Average FROM product_dimension pd, date_dimension dm, inventory_fact if WHERE dm.date_key = if.date_key AND pd.product_key = if.product_key GROUP BY Mo; SELECT employee_region region, employee_key, annual_salary, RANK() OVER (PARTITION BY employee_region ORDER BY annual_salary) Rank, DENSE_RANK() OVER (PARTITION BY employee_region ORDER BY annual_salary) "Dense Rank" FROM employee_dimension; SELECT customer_state, customer_key, annual_income, PERCENTILE_CONT(0.5) WITHIN GROUP(ORDER BY annual_income) OVER (PARTITION BY customer_state) AS PERCENTILE_CONT FROM customer_dimension WHERE customer_state IN ('DC','WI') AND customer_key < 300 ORDER BY customer_state, customer_key; SELECT employee_last_name, annual_salary, STDDEV(annual_salary) OVER (ORDER BY hire_date) as "stddev" FROM employee_dimension WHERE job_title = 'Assistant Director'; sqlfluff-3.4.2/test/fixtures/dialects/vertica/select_frame_clause.yml000066400000000000000000000344151503426445100261420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9e8f1d1b6c8d60ecebdc2d673f4fc9ded0fdf869f9eebc03ab9e1070bd91b311 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: dev_group - comma: ',' - select_clause_element: column_reference: naked_identifier: product_name - comma: ',' - select_clause_element: column_reference: naked_identifier: users - comma: ',' - select_clause_element: function: function_name: function_name_identifier: ARGMAX function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: users - comma: ',' - expression: column_reference: naked_identifier: product_name - end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: dev_group - keyword: ASC end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: service_info - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: dev_group - comma: ',' - select_clause_element: column_reference: naked_identifier: product_name - comma: ',' - select_clause_element: column_reference: naked_identifier: users - comma: ',' - select_clause_element: function: function_name: function_name_identifier: ARGMIN function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: users - comma: ',' - expression: column_reference: naked_identifier: product_name - end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: dev_group - keyword: ASC end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: service_info - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: calendar_month_number_in_year alias_expression: naked_identifier: Mo - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: product_price end_bracket: ) alias_expression: naked_identifier: Sales - comma: ',' - select_clause_element: expression: cast_expression: function: function_name: function_name_identifier: AVG function_contents: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: product_price end_bracket: ) end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: calendar_month_number_in_year frame_clause: - keyword: ROWS - keyword: BETWEEN - numeric_literal: '2' - keyword: PRECEDING - keyword: AND - numeric_literal: '2' - keyword: FOLLOWING end_bracket: ) casting_operator: '::' data_type: keyword: INTEGER alias_expression: naked_identifier: Average from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: product_dimension alias_expression: naked_identifier: pd - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: date_dimension alias_expression: naked_identifier: dm - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: inventory_fact alias_expression: naked_identifier: if where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: dm - dot: . - naked_identifier: date_key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: if - dot: . - naked_identifier: date_key - binary_operator: AND - column_reference: - naked_identifier: pd - dot: . - naked_identifier: product_key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: if - dot: . - naked_identifier: product_key groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: Mo - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: employee_region alias_expression: naked_identifier: region - comma: ',' - select_clause_element: column_reference: naked_identifier: employee_key - comma: ',' - select_clause_element: column_reference: naked_identifier: annual_salary - comma: ',' - select_clause_element: function: function_name: function_name_identifier: RANK function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: employee_region orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: annual_salary end_bracket: ) alias_expression: naked_identifier: Rank - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DENSE_RANK function_contents: bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: employee_region orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: annual_salary end_bracket: ) alias_expression: quoted_identifier: '"Dense Rank"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee_dimension - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: customer_state - comma: ',' - select_clause_element: column_reference: naked_identifier: customer_key - comma: ',' - select_clause_element: column_reference: naked_identifier: annual_income - comma: ',' - select_clause_element: function: function_name: function_name_identifier: PERCENTILE_CONT function_contents: bracketed: start_bracket: ( expression: numeric_literal: '0.5' end_bracket: ) within_group_clause_statement: - keyword: WITHIN - keyword: GROUP - bracketed: start_bracket: ( orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: annual_income end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: customer_state end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: PERCENTILE_CONT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: customer_dimension where_clause: keyword: WHERE expression: - column_reference: naked_identifier: customer_state - keyword: IN - bracketed: - start_bracket: ( - quoted_literal: "'DC'" - comma: ',' - quoted_literal: "'WI'" - end_bracket: ) - binary_operator: AND - column_reference: naked_identifier: customer_key - comparison_operator: raw_comparison_operator: < - numeric_literal: '300' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: customer_state - comma: ',' - column_reference: naked_identifier: customer_key - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: employee_last_name - comma: ',' - select_clause_element: column_reference: naked_identifier: annual_salary - comma: ',' - select_clause_element: function: function_name: function_name_identifier: STDDEV function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: annual_salary end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: hire_date end_bracket: ) alias_expression: alias_operator: keyword: as quoted_identifier: '"stddev"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee_dimension where_clause: keyword: WHERE expression: column_reference: naked_identifier: job_title comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Assistant Director'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/vertica/set.sql000066400000000000000000000007341503426445100227430ustar00rootroot00000000000000SET DATESTYLE TO German; SET ESCAPE_STRING_WARNING TO OFF; SET LOCALE TO en_GB; SET ROLE applogs; SET SEARCH_PATH TO store, public; SET SESSION AUTHORIZATION debuguser; SET SESSION AUTOCOMMIT TO on; SET SESSION MEMORYCAP '2G'; SET SESSION MULTIPLEACTIVERESULTSETS TO on; SET SESSION RESOURCE_POOL = ceo_pool; SET SESSION RUNTIMECAP '10 minutes'; SET SESSION TEMPSPACECAP '20G'; SET SESSION WORKLOAD analytics; SET STANDARD_CONFORMING_STRINGS TO OFF; SET TIME ZONE TO DEFAULT; sqlfluff-3.4.2/test/fixtures/dialects/vertica/set.yml000066400000000000000000000053651503426445100227520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0ff14789df29a4cff441fc4a3c0198bd94d883c907f428f140d79d42abc912ef file: - statement: set_statement: - keyword: SET - keyword: DATESTYLE - keyword: TO - parameter: German - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: ESCAPE_STRING_WARNING - keyword: TO - parameter: 'OFF' - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: LOCALE - keyword: TO - parameter: en_GB - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: ROLE - parameter: applogs - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: SEARCH_PATH - keyword: TO - parameter: store - comma: ',' - parameter: public - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: SESSION - keyword: AUTHORIZATION - parameter: debuguser - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: SESSION - keyword: AUTOCOMMIT - keyword: TO - keyword: 'on' - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: SESSION - keyword: MEMORYCAP - quoted_literal: "'2G'" - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: SESSION - keyword: MULTIPLEACTIVERESULTSETS - keyword: TO - keyword: 'on' - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: SESSION - keyword: RESOURCE_POOL - comparison_operator: raw_comparison_operator: '=' - parameter: ceo_pool - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: SESSION - keyword: RUNTIMECAP - quoted_literal: "'10 minutes'" - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: SESSION - keyword: TEMPSPACECAP - quoted_literal: "'20G'" - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: SESSION - keyword: WORKLOAD - parameter: analytics - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: STANDARD_CONFORMING_STRINGS - keyword: TO - parameter: 'OFF' - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: TIME - keyword: ZONE - keyword: TO - parameter: DEFAULT - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/vertica/single_quote.sql000066400000000000000000000002251503426445100246410ustar00rootroot00000000000000SELECT ''; SELECT ''''; SELECT ' '; SELECT '''aaa'''; SELECT ' '' '; -- TODO: Need to think how to fix this case in the dialect -- SELECT '\'; sqlfluff-3.4.2/test/fixtures/dialects/vertica/single_quote.yml000066400000000000000000000023451503426445100246500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ab16c8de2a3a0bbe0d3622d15033e9c569ec1f2bdacab3a410cfa7b9b996fcc5 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "''''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'\n\n'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'''aaa'''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'\n''\n'" - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/vertica/subquery.sql000066400000000000000000000002731503426445100240250ustar00rootroot00000000000000INSERT INTO target_table (target_column) SELECT table1.column1 FROM table1 INNER JOIN ( SELECT table2.join_column FROM table2 ) AS temp3 ON table1.join_column = temp3.join_column sqlfluff-3.4.2/test/fixtures/dialects/vertica/subquery.yml000066400000000000000000000051661503426445100240350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: aff16529f248261edab60ac321e09ce7ee0346048a6b61470a6057e9b8d2c858 file: statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: target_table - bracketed: start_bracket: ( column_reference: naked_identifier: target_column end_bracket: ) - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: table1 - dot: . - naked_identifier: column1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: table2 - dot: . - naked_identifier: join_column from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table2 end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: temp3 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: table1 - dot: . - naked_identifier: join_column - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: temp3 - dot: . - naked_identifier: join_column sqlfluff-3.4.2/test/fixtures/dialects/vertica/update_table.sql000066400000000000000000000003631503426445100245770ustar00rootroot00000000000000UPDATE fact SET price = price - cost * 80 WHERE cost > 100; UPDATE retail.customer SET state = 'NH' WHERE CID > 100; UPDATE addresses SET address='New Address' WHERE cust_id IN (SELECT new_cust_id FROM new_addresses WHERE new_address='T'); sqlfluff-3.4.2/test/fixtures/dialects/vertica/update_table.yml000066400000000000000000000063041503426445100246020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3c4a427d7f4a8d4280c5ee80cd9500497e0bcae982d1b928068e02909487b03d file: - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: fact set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: price comparison_operator: raw_comparison_operator: '=' expression: - column_reference: naked_identifier: price - binary_operator: '-' - column_reference: naked_identifier: cost - binary_operator: '*' - numeric_literal: '80' where_clause: keyword: WHERE expression: column_reference: naked_identifier: cost comparison_operator: raw_comparison_operator: '>' numeric_literal: '100' - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: - naked_identifier: retail - dot: . - naked_identifier: customer set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: state comparison_operator: raw_comparison_operator: '=' quoted_literal: "'NH'" where_clause: keyword: WHERE expression: column_reference: naked_identifier: CID comparison_operator: raw_comparison_operator: '>' numeric_literal: '100' - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: addresses set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: address comparison_operator: raw_comparison_operator: '=' quoted_literal: "'New Address'" where_clause: keyword: WHERE expression: column_reference: naked_identifier: cust_id keyword: IN bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: new_cust_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: new_addresses where_clause: keyword: WHERE expression: column_reference: naked_identifier: new_address comparison_operator: raw_comparison_operator: '=' quoted_literal: "'T'" end_bracket: ) - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/vertica/utf8.sql000066400000000000000000000244371503426445100230440ustar00rootroot00000000000000SELECT amount+1 AS 'amount' FROM num1; SELECT höhe+1 AS 'höhe' FROM num1; SELECT amount*2 AS 'amount' FROM num1; SELECT höhe*2 AS 'höhe' FROM num1; SELECT employees.personal.name, neighbors.area FROM neighbors, employees WHERE employees.personal.address.zipcode=neighbors.area.zipcode AND neighbors.num_neighbors > 1; SELECT mitarbeiter.persönlicher.name, nachbarn.bereich FROM nachbarn, mitarbeiter WHERE mitarbeiter.persönlicher.adresse.zipcode=nachbarn.gebiet.zipcode AND nachbarn.nummer_nachbarn > 1; SELECT itemkey AS key, IMPLODE(itemprice) WITHIN GROUP (ORDER BY itemprice) AS prices FROM filtered GROUP BY itemkey ORDER BY itemkey; SELECT ключтовара AS key, IMPLODE(ценатовара) WITHIN GROUP (ORDER BY ценатовара) AS цены FROM отфильтровано GROUP BY ключтовара ORDER BY ключтовара; SELECT State, APPROXIMATE_PERCENTILE(sales USING PARAMETERS percentiles='0.5') AS median FROM allsales GROUP BY state; SELECT Χώρα, APPROXIMATE_PERCENTILE(πωλήσεις USING PARAMETERS percentiles='0.5') AS διάμεσος FROM όλεςτιςπωλήσεις GROUP BY χώρα; SELECT customer_state, customer_key, annual_income, PERCENTILE_CONT(0.5) WITHIN GROUP(ORDER BY annual_income) OVER (PARTITION BY customer_state) AS PERCENTILE_CONT FROM customer_dimension WHERE customer_state IN ('DC','WI') ORDER BY customer_state, customer_key; SELECT état_du_client, clé_client, revenu_annuel, PERCENTILE_CONT(0.5) WITHIN GROUP(ORDER BY revenu_annuel) OVER (PARTITION BY état_du_client) AS PERCENTILE_CONT FROM dimension_client WHERE état_du_client IN ('Provence','Сhampagne') ORDER BY état_du_client, clé_client; SELECT customer_state, customer_key, annual_income, PERCENTILE_DISC(.2) WITHIN GROUP(ORDER BY annual_income) OVER (PARTITION BY customer_state) AS PERCENTILE_DISC FROM customer_dimension WHERE customer_state IN ('DC','WI') AND customer_key < 300 ORDER BY customer_state, customer_key; SELECT état_du_client, clé_client, revenu_annuel, PERCENTILE_DISC(.2) WITHIN GROUP(ORDER BY annual_income) OVER (PARTITION BY état_du_client) AS PERCENTILE_DISC FROM dimension_client WHERE état_du_client IN ('Provence','Сhampagne') AND clé_client < 300 ORDER BY état_du_client, clé_client; SELECT customer_state, customer_key, annual_income, PERCENTILE_CONT(0.5) WITHIN GROUP(ORDER BY annual_income) OVER (PARTITION BY customer_state) AS PERCENTILE_CONT FROM customer_dimension WHERE customer_state IN ('DC','WI') AND customer_key < 300 ORDER BY customer_state, customer_key; SELECT état_du_client, clé_client, revenu_annuel, PERCENTILE_CONT(0.5) WITHIN GROUP(ORDER BY revenu_annuel) OVER (PARTITION BY état_du_client) AS PERCENTILE_CONT FROM dimension_client WHERE état_du_client IN ('Provence','Сhampagne') AND clé_client < 300 ORDER BY état_du_client, clé_client; SELECT store_region, store_city||', '||store_state location, store_name, number_of_employees FROM store.store_dimension LIMIT 2 OVER (PARTITION BY store_region ORDER BY number_of_employees ASC); SELECT регион_магазина, город_магазина||', '||область_магазина местоположение, имя_магазина, количество_сотрудников FROM магазины.измерение_магазины LIMIT 2 OVER (PARTITION BY регион_магазина ORDER BY количество_сотрудников ASC); SELECT PREDICT_LINEAR_REG(waiting USING PARAMETERS model_name='myLinearRegModel') FROM faithful ORDER BY id; SELECT PREDICT_LINEAR_REG(attente USING PARAMETERS model_name='monRegModèleLinéaire') FROM fidèle ORDER BY id; SELECT INFER_EXTERNAL_TABLE_DDL('/data/people/*.parquet' USING PARAMETERS format = 'parquet', table_name = 'employees'); SELECT INFER_EXTERNAL_TABLE_DDL('/data/άνθρωποι/*.parquet' USING PARAMETERS format = 'parquet', table_name = 'εργαζόμενοι'); SELECT PREDICT_ARIMA(temperature USING PARAMETERS model_name='arima_temp', start=100, npredictions=10) OVER(ORDER BY time) FROM temp_data; SELECT PREDICT_ARIMA(температура USING PARAMETERS model_name='arima_temp', start=100, npredictions=10) OVER(ORDER BY time) FROM временные_данные; SELECT INFER_TABLE_DDL ('/data/*.json' USING PARAMETERS table_name='restaurants', format='json', max_files=3, max_candidates=3); SELECT INFER_TABLE_DDL ('/data/*.json' USING PARAMETERS table_name='εστιατόρια', format='json', max_files=3, max_candidates=3); SELECT PURGE_TABLE('store.store_sales_fact'); SELECT PURGE_TABLE('المتجر.متجر_مبيعات_المتجر'); SELECT MSE(obs, prediction) OVER() FROM (SELECT eruptions AS obs, PREDICT_LINEAR_REG (waiting USING PARAMETERS model_name='myLinearRegModel') AS prediction FROM faithful_testing) AS PredictionOutput; SELECT MSE(наблюдения, предсказания) OVER() FROM (SELECT извержения AS наблюдения, PREDICT_LINEAR_REG (ожидания USING PARAMETERS model_name='myLinearRegModel') AS прогноз FROM верное_испытание) AS РезультатПрогноза; SELECT ps[0] as q0, ps[1] as q1, ps[2] as q2, ps[3] as q3, ps[4] as q4 FROM (SELECT APPROXIMATE_PERCENTILE(sales USING PARAMETERS percentiles='0, 0.25, 0.5, 0.75, 1') AS ps FROM allsales GROUP BY state) as s1; SELECT pz[0] as q0, pz[1] as q1, pz[2] as q2, pz[3] as q3, pz[4] as q4 FROM (SELECT APPROXIMATE_PERCENTILE(Verkäufe USING PARAMETERS percentiles='0, 0.25, 0.5, 0.75, 1') AS pz FROM alleVerkäufe GROUP BY Staat) as s1; SELECT id.name, major, GPA FROM students WHERE id = ROW('alice',119, ARRAY['alice@example.com','ap16@cs.example.edu']); SELECT ид.имя, курс, СРБАЛЛ FROM студенты WHERE ид = ROW('алиса',119, ARRAY['alice@example.com','ap16@cs.example.edu']); SELECT E'first part o' 'f a long line'; SELECT E'πρώτο μέρος μι' 'ας μακράς γραμμής'; SELECT STRING_TO_ARRAY(name USING PARAMETERS collection_delimiter=' ') FROM employee; SELECT STRING_TO_ARRAY(имя USING PARAMETERS collection_delimiter=' ') FROM сотрудники; -- ALTER SCHEMA block ALTER SCHEMA ms OWNER TO dbadmin CASCADE; ALTER SCHEMA επιμελητεία OWNER TO διαχειριστής CASCADE; ALTER SCHEMA логистика OWNER TO алиса CASCADE; ALTER SCHEMA s1, s2 RENAME TO s3, s4; ALTER SCHEMA εμπορικός, s2 RENAME TO продажи, s4; -- ALTER TABLE block ALTER TABLE public.store_orders ADD COLUMN expected_ship_date date; ALTER TABLE public.κατάστημα_παραγγελίες ADD COLUMN αναμενόμενη_ημερομηνία_αποστολής date; ALTER TABLE public.заказы_магазина ADD COLUMN ожиддаемая_дата_отгрузки date; ALTER TABLE t33 OWNER TO Alice; ALTER TABLE επιμελητεία OWNER TO διαχειριστής; ALTER TABLE заказы OWNER TO алиса; -- ARRAY block SELECT (ARRAY['مسؤل', 'διαχειριστής', 'логистика', 'd', 'e'])[1]; -- Cast w/ whitespace SELECT amount_of_honey :: FLOAT FROM bear_inventory; SELECT ποσότητα_μελιού :: FLOAT FROM αρκούδα_αποθήκη; SELECT количество_мёда :: FLOAT FROM медвежий_склад; -- COMMENT ON block COMMENT ON AGGREGATE FUNCTION APPROXIMATE_MEDIAN(x FLOAT) IS 'alias of APPROXIMATE_PERCENTILE with 0.5 as its parameter'; COMMENT ON AGGREGATE FUNCTION APPROXIMATE_MEDIAN(x FLOAT) IS 'ψευδώνυμο APPROXIMATE_PERCENTILE με 0,5 ως παράμετρό του'; COMMENT ON AGGREGATE FUNCTION APPROXIMATE_MEDIAN(x FLOAT) IS 'псевдоним APPROXIMATE_PERCENTILE с 0,5 в качестве параметра'; COMMENT ON SCHEMA public IS 'All users can access this schema'; COMMENT ON SCHEMA public IS 'Όλοι οι χρήστες έχουν πρόσβαση σε αυτό το σχήμα'; COMMENT ON SCHEMA public IS 'Все пользователи могут получить доступ к этой схеме'; -- COPY block COPY public.customer_dimension ( customer_since FORMAT 'YYYY' ) FROM STDIN DELIMITER ',' NULL AS 'null' ENCLOSED BY '"'; COPY παραγγελίες.παραγγελίες_ανά_ημέρα ( πελάτη_αφού FORMAT 'YYYY' ) FROM STDIN DELIMITER ',' NULL AS 'null' ENCLOSED BY '"'; COPY заказы.заказы_на_день ( клиент_с_даты FORMAT 'YYYY' ) FROM STDIN DELIMITER ',' NULL AS 'null' ENCLOSED BY '"'; -- CREATE PROJECTION block CREATE PROJECTION public.employee_dimension_super AS SELECT * FROM public.employee_dimension ORDER BY employee_key SEGMENTED BY hash(employee_key) ALL NODES; CREATE PROJECTION εμπορικός.παραγγελίες_ανά_ημέρα AS SELECT * FROM εμπορικός.παραγγελίες ORDER BY employee_key SEGMENTED BY hash(employee_key) ALL NODES; CREATE PROJECTION продажи.продажи_на_по_клиенту AS SELECT * FROM продажи.продажи_на_сегодня ORDER BY клиент SEGMENTED BY hash(клиент) ALL NODES; -- CREATE SCHEMA block CREATE SCHEMA s3 DEFAULT INCLUDE SCHEMA PRIVILEGES; CREATE SCHEMA εμπορικός DEFAULT INCLUDE SCHEMA PRIVILEGES; CREATE SCHEMA продажи DEFAULT INCLUDE SCHEMA PRIVILEGES; -- unqouted identifiers SELECT * FROM логистика.εμπορικός; SELECT * FROM логистика.εμπορικός1; SELECT * FROM логистика.εμπορικός_; SELECT * FROM логистика.s$ales$; SELECT * FROM логистика._εμπορικός; SELECT * FROM логистика._1234εμπορικός; SELECT * FROM логистика1.εμπορικός; SELECT * FROM логистика_.εμπορικός; SELECT * FROM p$ublic$.εμπορικός; SELECT * FROM _логистика.εμπορικός; SELECT * FROM _1234логистика.εμπορικός; SELECT * FROM логистика1.εμπορικός1; SELECT * FROM логистика1_.εμπορικός1_; SELECT * FROM p$ublic1_$.s$ales1_$; -- quoted identifiers SELECT * FROM "12логистика"."12344εμπορικός"; SELECT * FROM "_1234логистика"."_1234εμπορικός"; sqlfluff-3.4.2/test/fixtures/dialects/vertica/utf8.yml000066400000000000000000002500211503426445100230340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 98e2195318f7224754ce6e522918264b87333348c65b157cf3c7b35fe1b6dc7e file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: column_reference: naked_identifier: amount binary_operator: + numeric_literal: '1' alias_expression: alias_operator: keyword: AS quoted_identifier: "'amount'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: num1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: column_reference: naked_identifier: höhe binary_operator: + numeric_literal: '1' alias_expression: alias_operator: keyword: AS quoted_identifier: "'höhe'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: num1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: column_reference: naked_identifier: amount binary_operator: '*' numeric_literal: '2' alias_expression: alias_operator: keyword: AS quoted_identifier: "'amount'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: num1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: column_reference: naked_identifier: höhe binary_operator: '*' numeric_literal: '2' alias_expression: alias_operator: keyword: AS quoted_identifier: "'höhe'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: num1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employees - dot: . - naked_identifier: personal - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: neighbors - dot: . - naked_identifier: area from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: neighbors - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: employees - dot: . - naked_identifier: personal - dot: . - naked_identifier: address - dot: . - naked_identifier: zipcode - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: neighbors - dot: . - naked_identifier: area - dot: . - naked_identifier: zipcode - binary_operator: AND - column_reference: - naked_identifier: neighbors - dot: . - naked_identifier: num_neighbors - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: mitarbeiter - dot: . - naked_identifier: persönlicher - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: nachbarn - dot: . - naked_identifier: bereich from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: nachbarn - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mitarbeiter where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: mitarbeiter - dot: . - naked_identifier: persönlicher - dot: . - naked_identifier: adresse - dot: . - naked_identifier: zipcode - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: nachbarn - dot: . - naked_identifier: gebiet - dot: . - naked_identifier: zipcode - binary_operator: AND - column_reference: - naked_identifier: nachbarn - dot: . - naked_identifier: nummer_nachbarn - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: itemkey alias_expression: alias_operator: keyword: AS naked_identifier: key - comma: ',' - select_clause_element: function: function_name: function_name_identifier: IMPLODE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: itemprice end_bracket: ) within_group_clause_statement: - keyword: WITHIN - keyword: GROUP - bracketed: start_bracket: ( orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: itemprice end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: prices from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: filtered groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: itemkey orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: itemkey - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: ключтовара alias_expression: alias_operator: keyword: AS naked_identifier: key - comma: ',' - select_clause_element: function: function_name: function_name_identifier: IMPLODE function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: ценатовара end_bracket: ) within_group_clause_statement: - keyword: WITHIN - keyword: GROUP - bracketed: start_bracket: ( orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: ценатовара end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: цены from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: отфильтровано groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: ключтовара orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: ключтовара - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: State - comma: ',' - select_clause_element: function: function_name: function_name_identifier: APPROXIMATE_PERCENTILE function_contents: bracketed: - start_bracket: ( - column_reference: naked_identifier: sales - keyword: USING - keyword: PARAMETERS - parameter: percentiles - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'0.5'" - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: median from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: allsales groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: state - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: Χώρα - comma: ',' - select_clause_element: function: function_name: function_name_identifier: APPROXIMATE_PERCENTILE function_contents: bracketed: - start_bracket: ( - column_reference: naked_identifier: πωλήσεις - keyword: USING - keyword: PARAMETERS - parameter: percentiles - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'0.5'" - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: διάμεσος from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: όλεςτιςπωλήσεις groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: χώρα - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: customer_state - comma: ',' - select_clause_element: column_reference: naked_identifier: customer_key - comma: ',' - select_clause_element: column_reference: naked_identifier: annual_income - comma: ',' - select_clause_element: function: function_name: function_name_identifier: PERCENTILE_CONT function_contents: bracketed: start_bracket: ( expression: numeric_literal: '0.5' end_bracket: ) within_group_clause_statement: - keyword: WITHIN - keyword: GROUP - bracketed: start_bracket: ( orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: annual_income end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: customer_state end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: PERCENTILE_CONT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: customer_dimension where_clause: keyword: WHERE expression: column_reference: naked_identifier: customer_state keyword: IN bracketed: - start_bracket: ( - quoted_literal: "'DC'" - comma: ',' - quoted_literal: "'WI'" - end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: customer_state - comma: ',' - column_reference: naked_identifier: customer_key - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: état_du_client - comma: ',' - select_clause_element: column_reference: naked_identifier: clé_client - comma: ',' - select_clause_element: column_reference: naked_identifier: revenu_annuel - comma: ',' - select_clause_element: function: function_name: function_name_identifier: PERCENTILE_CONT function_contents: bracketed: start_bracket: ( expression: numeric_literal: '0.5' end_bracket: ) within_group_clause_statement: - keyword: WITHIN - keyword: GROUP - bracketed: start_bracket: ( orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: revenu_annuel end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: état_du_client end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: PERCENTILE_CONT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dimension_client where_clause: keyword: WHERE expression: column_reference: naked_identifier: état_du_client keyword: IN bracketed: - start_bracket: ( - quoted_literal: "'Provence'" - comma: ',' - quoted_literal: "'Сhampagne'" - end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: état_du_client - comma: ',' - column_reference: naked_identifier: clé_client - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: customer_state - comma: ',' - select_clause_element: column_reference: naked_identifier: customer_key - comma: ',' - select_clause_element: column_reference: naked_identifier: annual_income - comma: ',' - select_clause_element: function: function_name: function_name_identifier: PERCENTILE_DISC function_contents: bracketed: start_bracket: ( expression: numeric_literal: '.2' end_bracket: ) within_group_clause_statement: - keyword: WITHIN - keyword: GROUP - bracketed: start_bracket: ( orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: annual_income end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: customer_state end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: PERCENTILE_DISC from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: customer_dimension where_clause: keyword: WHERE expression: - column_reference: naked_identifier: customer_state - keyword: IN - bracketed: - start_bracket: ( - quoted_literal: "'DC'" - comma: ',' - quoted_literal: "'WI'" - end_bracket: ) - binary_operator: AND - column_reference: naked_identifier: customer_key - comparison_operator: raw_comparison_operator: < - numeric_literal: '300' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: customer_state - comma: ',' - column_reference: naked_identifier: customer_key - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: état_du_client - comma: ',' - select_clause_element: column_reference: naked_identifier: clé_client - comma: ',' - select_clause_element: column_reference: naked_identifier: revenu_annuel - comma: ',' - select_clause_element: function: function_name: function_name_identifier: PERCENTILE_DISC function_contents: bracketed: start_bracket: ( expression: numeric_literal: '.2' end_bracket: ) within_group_clause_statement: - keyword: WITHIN - keyword: GROUP - bracketed: start_bracket: ( orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: annual_income end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: état_du_client end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: PERCENTILE_DISC from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dimension_client where_clause: keyword: WHERE expression: - column_reference: naked_identifier: état_du_client - keyword: IN - bracketed: - start_bracket: ( - quoted_literal: "'Provence'" - comma: ',' - quoted_literal: "'Сhampagne'" - end_bracket: ) - binary_operator: AND - column_reference: naked_identifier: clé_client - comparison_operator: raw_comparison_operator: < - numeric_literal: '300' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: état_du_client - comma: ',' - column_reference: naked_identifier: clé_client - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: customer_state - comma: ',' - select_clause_element: column_reference: naked_identifier: customer_key - comma: ',' - select_clause_element: column_reference: naked_identifier: annual_income - comma: ',' - select_clause_element: function: function_name: function_name_identifier: PERCENTILE_CONT function_contents: bracketed: start_bracket: ( expression: numeric_literal: '0.5' end_bracket: ) within_group_clause_statement: - keyword: WITHIN - keyword: GROUP - bracketed: start_bracket: ( orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: annual_income end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: customer_state end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: PERCENTILE_CONT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: customer_dimension where_clause: keyword: WHERE expression: - column_reference: naked_identifier: customer_state - keyword: IN - bracketed: - start_bracket: ( - quoted_literal: "'DC'" - comma: ',' - quoted_literal: "'WI'" - end_bracket: ) - binary_operator: AND - column_reference: naked_identifier: customer_key - comparison_operator: raw_comparison_operator: < - numeric_literal: '300' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: customer_state - comma: ',' - column_reference: naked_identifier: customer_key - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: état_du_client - comma: ',' - select_clause_element: column_reference: naked_identifier: clé_client - comma: ',' - select_clause_element: column_reference: naked_identifier: revenu_annuel - comma: ',' - select_clause_element: function: function_name: function_name_identifier: PERCENTILE_CONT function_contents: bracketed: start_bracket: ( expression: numeric_literal: '0.5' end_bracket: ) within_group_clause_statement: - keyword: WITHIN - keyword: GROUP - bracketed: start_bracket: ( orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: revenu_annuel end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: état_du_client end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: PERCENTILE_CONT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dimension_client where_clause: keyword: WHERE expression: - column_reference: naked_identifier: état_du_client - keyword: IN - bracketed: - start_bracket: ( - quoted_literal: "'Provence'" - comma: ',' - quoted_literal: "'Сhampagne'" - end_bracket: ) - binary_operator: AND - column_reference: naked_identifier: clé_client - comparison_operator: raw_comparison_operator: < - numeric_literal: '300' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: état_du_client - comma: ',' - column_reference: naked_identifier: clé_client - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: store_region - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: store_city - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "', '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: naked_identifier: store_state alias_expression: naked_identifier: location - comma: ',' - select_clause_element: column_reference: naked_identifier: store_name - comma: ',' - select_clause_element: column_reference: naked_identifier: number_of_employees from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: store - dot: . - naked_identifier: store_dimension limit_clause: keyword: LIMIT numeric_literal: '2' over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: store_region orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: number_of_employees - keyword: ASC end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: регион_магазина - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: город_магазина - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "', '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: naked_identifier: область_магазина alias_expression: naked_identifier: местоположение - comma: ',' - select_clause_element: column_reference: naked_identifier: имя_магазина - comma: ',' - select_clause_element: column_reference: naked_identifier: количество_сотрудников from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: магазины - dot: . - naked_identifier: измерение_магазины limit_clause: keyword: LIMIT numeric_literal: '2' over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: регион_магазина orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: количество_сотрудников - keyword: ASC end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: PREDICT_LINEAR_REG function_contents: bracketed: - start_bracket: ( - column_reference: naked_identifier: waiting - keyword: USING - keyword: PARAMETERS - parameter: model_name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'myLinearRegModel'" - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: faithful orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: PREDICT_LINEAR_REG function_contents: bracketed: - start_bracket: ( - column_reference: naked_identifier: attente - keyword: USING - keyword: PARAMETERS - parameter: model_name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'monRegModèleLinéaire'" - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: fidèle orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: INFER_EXTERNAL_TABLE_DDL function_contents: bracketed: - start_bracket: ( - quoted_literal: "'/data/people/*.parquet'" - keyword: USING - keyword: PARAMETERS - parameter: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'parquet'" - comma: ',' - parameter: table_name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'employees'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: INFER_EXTERNAL_TABLE_DDL function_contents: bracketed: - start_bracket: ( - quoted_literal: "'/data/άνθρωποι/*.parquet'" - keyword: USING - keyword: PARAMETERS - parameter: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'parquet'" - comma: ',' - parameter: table_name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'εργαζόμενοι'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: PREDICT_ARIMA function_contents: bracketed: - start_bracket: ( - column_reference: naked_identifier: temperature - keyword: USING - keyword: PARAMETERS - parameter: model_name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'arima_temp'" - comma: ',' - parameter: start - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '100' - comma: ',' - parameter: npredictions - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '10' - end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: time end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: temp_data - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: PREDICT_ARIMA function_contents: bracketed: - start_bracket: ( - column_reference: naked_identifier: температура - keyword: USING - keyword: PARAMETERS - parameter: model_name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'arima_temp'" - comma: ',' - parameter: start - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '100' - comma: ',' - parameter: npredictions - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '10' - end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: time end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: временные_данные - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: INFER_TABLE_DDL function_contents: bracketed: - start_bracket: ( - quoted_literal: "'/data/*.json'" - keyword: USING - keyword: PARAMETERS - parameter: table_name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'restaurants'" - comma: ',' - parameter: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'json'" - comma: ',' - parameter: max_files - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '3' - comma: ',' - parameter: max_candidates - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '3' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: INFER_TABLE_DDL function_contents: bracketed: - start_bracket: ( - quoted_literal: "'/data/*.json'" - keyword: USING - keyword: PARAMETERS - parameter: table_name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'εστιατόρια'" - comma: ',' - parameter: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'json'" - comma: ',' - parameter: max_files - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '3' - comma: ',' - parameter: max_candidates - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '3' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: PURGE_TABLE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'store.store_sales_fact'" end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: PURGE_TABLE function_contents: bracketed: start_bracket: ( expression: quoted_literal: "'المتجر.متجر_مبيعات_المتجر'" end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: MSE function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: obs - comma: ',' - expression: column_reference: naked_identifier: prediction - end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: eruptions alias_expression: alias_operator: keyword: AS naked_identifier: obs - comma: ',' - select_clause_element: function: function_name: function_name_identifier: PREDICT_LINEAR_REG function_contents: bracketed: - start_bracket: ( - column_reference: naked_identifier: waiting - keyword: USING - keyword: PARAMETERS - parameter: model_name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'myLinearRegModel'" - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: prediction from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: faithful_testing end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: PredictionOutput - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: MSE function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: наблюдения - comma: ',' - expression: column_reference: naked_identifier: предсказания - end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: извержения alias_expression: alias_operator: keyword: AS naked_identifier: наблюдения - comma: ',' - select_clause_element: function: function_name: function_name_identifier: PREDICT_LINEAR_REG function_contents: bracketed: - start_bracket: ( - column_reference: naked_identifier: ожидания - keyword: USING - keyword: PARAMETERS - parameter: model_name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'myLinearRegModel'" - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: прогноз from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: верное_испытание end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: РезультатПрогноза - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: column_reference: naked_identifier: ps array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' alias_expression: alias_operator: keyword: as naked_identifier: q0 - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: ps array_accessor: start_square_bracket: '[' numeric_literal: '1' end_square_bracket: ']' alias_expression: alias_operator: keyword: as naked_identifier: q1 - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: ps array_accessor: start_square_bracket: '[' numeric_literal: '2' end_square_bracket: ']' alias_expression: alias_operator: keyword: as naked_identifier: q2 - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: ps array_accessor: start_square_bracket: '[' numeric_literal: '3' end_square_bracket: ']' alias_expression: alias_operator: keyword: as naked_identifier: q3 - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: ps array_accessor: start_square_bracket: '[' numeric_literal: '4' end_square_bracket: ']' alias_expression: alias_operator: keyword: as naked_identifier: q4 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: APPROXIMATE_PERCENTILE function_contents: bracketed: - start_bracket: ( - column_reference: naked_identifier: sales - keyword: USING - keyword: PARAMETERS - parameter: percentiles - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'0, 0.25, 0.5, 0.75, 1'" - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: ps from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: allsales groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: state end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: s1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: column_reference: naked_identifier: pz array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' alias_expression: alias_operator: keyword: as naked_identifier: q0 - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: pz array_accessor: start_square_bracket: '[' numeric_literal: '1' end_square_bracket: ']' alias_expression: alias_operator: keyword: as naked_identifier: q1 - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: pz array_accessor: start_square_bracket: '[' numeric_literal: '2' end_square_bracket: ']' alias_expression: alias_operator: keyword: as naked_identifier: q2 - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: pz array_accessor: start_square_bracket: '[' numeric_literal: '3' end_square_bracket: ']' alias_expression: alias_operator: keyword: as naked_identifier: q3 - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: pz array_accessor: start_square_bracket: '[' numeric_literal: '4' end_square_bracket: ']' alias_expression: alias_operator: keyword: as naked_identifier: q4 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: APPROXIMATE_PERCENTILE function_contents: bracketed: - start_bracket: ( - column_reference: naked_identifier: Verkäufe - keyword: USING - keyword: PARAMETERS - parameter: percentiles - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'0, 0.25, 0.5, 0.75, 1'" - end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: pz from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: alleVerkäufe groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: Staat end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: s1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: id - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: major - comma: ',' - select_clause_element: column_reference: naked_identifier: GPA from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: students where_clause: keyword: WHERE expression: column_reference: naked_identifier: id comparison_operator: raw_comparison_operator: '=' function: function_name: function_name_identifier: ROW function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'alice'" - comma: ',' - expression: numeric_literal: '119' - comma: ',' - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - quoted_literal: "'alice@example.com'" - comma: ',' - quoted_literal: "'ap16@cs.example.edu'" - end_square_bracket: ']' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: ид - dot: . - naked_identifier: имя - comma: ',' - select_clause_element: column_reference: naked_identifier: курс - comma: ',' - select_clause_element: column_reference: naked_identifier: СРБАЛЛ from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: студенты where_clause: keyword: WHERE expression: column_reference: naked_identifier: ид comparison_operator: raw_comparison_operator: '=' function: function_name: function_name_identifier: ROW function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'алиса'" - comma: ',' - expression: numeric_literal: '119' - comma: ',' - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - quoted_literal: "'alice@example.com'" - comma: ',' - quoted_literal: "'ap16@cs.example.edu'" - end_square_bracket: ']' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "E'first part o'" alias_expression: quoted_identifier: "'f a long line'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "E'πρώτο μέρος μι'" alias_expression: quoted_identifier: "'ας μακράς γραμμής'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: STRING_TO_ARRAY function_contents: bracketed: - start_bracket: ( - column_reference: naked_identifier: name - keyword: USING - keyword: PARAMETERS - parameter: collection_delimiter - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "' '" - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: STRING_TO_ARRAY function_contents: bracketed: - start_bracket: ( - column_reference: naked_identifier: имя - keyword: USING - keyword: PARAMETERS - parameter: collection_delimiter - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "' '" - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: сотрудники - statement_terminator: ; - statement: alter_schema_statement: - keyword: ALTER - keyword: SCHEMA - schema_reference: naked_identifier: ms - keyword: OWNER - keyword: TO - role_reference: naked_identifier: dbadmin - keyword: CASCADE - statement_terminator: ; - statement: alter_schema_statement: - keyword: ALTER - keyword: SCHEMA - schema_reference: naked_identifier: επιμελητεία - keyword: OWNER - keyword: TO - role_reference: naked_identifier: διαχειριστής - keyword: CASCADE - statement_terminator: ; - statement: alter_schema_statement: - keyword: ALTER - keyword: SCHEMA - schema_reference: naked_identifier: логистика - keyword: OWNER - keyword: TO - role_reference: naked_identifier: алиса - keyword: CASCADE - statement_terminator: ; - statement: alter_schema_statement: - keyword: ALTER - keyword: SCHEMA - schema_reference: naked_identifier: s1 - comma: ',' - schema_reference: naked_identifier: s2 - keyword: RENAME - keyword: TO - schema_reference: naked_identifier: s3 - comma: ',' - schema_reference: naked_identifier: s4 - statement_terminator: ; - statement: alter_schema_statement: - keyword: ALTER - keyword: SCHEMA - schema_reference: naked_identifier: εμπορικός - comma: ',' - schema_reference: naked_identifier: s2 - keyword: RENAME - keyword: TO - schema_reference: naked_identifier: продажи - comma: ',' - schema_reference: naked_identifier: s4 - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: public - dot: . - naked_identifier: store_orders - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: expected_ship_date - data_type: keyword: date - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: public - dot: . - naked_identifier: κατάστημα_παραγγελίες - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: αναμενόμενη_ημερομηνία_αποστολής - data_type: keyword: date - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: public - dot: . - naked_identifier: заказы_магазина - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: ожиддаемая_дата_отгрузки - data_type: keyword: date - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t33 - alter_table_action_segment: - keyword: OWNER - keyword: TO - parameter: Alice - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: επιμελητεία - alter_table_action_segment: - keyword: OWNER - keyword: TO - parameter: διαχειριστής - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: заказы - alter_table_action_segment: - keyword: OWNER - keyword: TO - parameter: алиса - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bracketed: start_bracket: ( expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - quoted_literal: "'مسؤل'" - comma: ',' - quoted_literal: "'διαχειριστής'" - comma: ',' - quoted_literal: "'логистика'" - comma: ',' - quoted_literal: "'d'" - comma: ',' - quoted_literal: "'e'" - end_square_bracket: ']' end_bracket: ) array_accessor: start_square_bracket: '[' numeric_literal: '1' end_square_bracket: ']' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: keyword: FLOAT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: ποσότητα_μελιού casting_operator: '::' data_type: keyword: FLOAT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: αρκούδα_αποθήκη - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: количество_мёда casting_operator: '::' data_type: keyword: FLOAT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: медвежий_склад - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: AGGREGATE - keyword: FUNCTION - function_name: function_name_identifier: APPROXIMATE_MEDIAN - function_parameter_list: bracketed: start_bracket: ( parameter: x data_type: keyword: FLOAT end_bracket: ) - keyword: IS - quoted_literal: "'alias of APPROXIMATE_PERCENTILE with 0.5 as its parameter'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: AGGREGATE - keyword: FUNCTION - function_name: function_name_identifier: APPROXIMATE_MEDIAN - function_parameter_list: bracketed: start_bracket: ( parameter: x data_type: keyword: FLOAT end_bracket: ) - keyword: IS - quoted_literal: "'ψευδώνυμο APPROXIMATE_PERCENTILE με 0,5 ως παράμετρό του'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: AGGREGATE - keyword: FUNCTION - function_name: function_name_identifier: APPROXIMATE_MEDIAN - function_parameter_list: bracketed: start_bracket: ( parameter: x data_type: keyword: FLOAT end_bracket: ) - keyword: IS - quoted_literal: "'псевдоним APPROXIMATE_PERCENTILE с 0,5 в качестве параметра'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: SCHEMA - schema_reference: naked_identifier: public - keyword: IS - quoted_literal: "'All users can access this schema'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: SCHEMA - schema_reference: naked_identifier: public - keyword: IS - quoted_literal: "'Όλοι οι χρήστες έχουν πρόσβαση σε αυτό το σχήμα'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: SCHEMA - schema_reference: naked_identifier: public - keyword: IS - quoted_literal: "'Все пользователи могут получить доступ к этой схеме'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: - naked_identifier: public - dot: . - naked_identifier: customer_dimension - bracketed: start_bracket: ( copy_column_options: column_reference: naked_identifier: customer_since copy_options_for_columns: keyword: FORMAT quoted_literal: "'YYYY'" end_bracket: ) - keyword: FROM - keyword: STDIN - copy_options: copy_options_for_columns: - keyword: DELIMITER - quoted_literal: "','" - keyword: 'NULL' - keyword: AS - quoted_literal: "'null'" - keyword: ENCLOSED - keyword: BY - quoted_literal: "'\"'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: - naked_identifier: παραγγελίες - dot: . - naked_identifier: παραγγελίες_ανά_ημέρα - bracketed: start_bracket: ( copy_column_options: column_reference: naked_identifier: πελάτη_αφού copy_options_for_columns: keyword: FORMAT quoted_literal: "'YYYY'" end_bracket: ) - keyword: FROM - keyword: STDIN - copy_options: copy_options_for_columns: - keyword: DELIMITER - quoted_literal: "','" - keyword: 'NULL' - keyword: AS - quoted_literal: "'null'" - keyword: ENCLOSED - keyword: BY - quoted_literal: "'\"'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: - naked_identifier: заказы - dot: . - naked_identifier: заказы_на_день - bracketed: start_bracket: ( copy_column_options: column_reference: naked_identifier: клиент_с_даты copy_options_for_columns: keyword: FORMAT quoted_literal: "'YYYY'" end_bracket: ) - keyword: FROM - keyword: STDIN - copy_options: copy_options_for_columns: - keyword: DELIMITER - quoted_literal: "','" - keyword: 'NULL' - keyword: AS - quoted_literal: "'null'" - keyword: ENCLOSED - keyword: BY - quoted_literal: "'\"'" - statement_terminator: ; - statement: create_projection_statement: - keyword: CREATE - keyword: PROJECTION - table_reference: - naked_identifier: public - dot: . - naked_identifier: employee_dimension_super - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: public - dot: . - naked_identifier: employee_dimension - orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: employee_key - segmentedby_clause: - keyword: SEGMENTED - keyword: BY - function: function_name: function_name_identifier: hash function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: employee_key end_bracket: ) - keyword: ALL - keyword: NODES - statement_terminator: ; - statement: create_projection_statement: - keyword: CREATE - keyword: PROJECTION - table_reference: - naked_identifier: εμπορικός - dot: . - naked_identifier: παραγγελίες_ανά_ημέρα - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: εμπορικός - dot: . - naked_identifier: παραγγελίες - orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: employee_key - segmentedby_clause: - keyword: SEGMENTED - keyword: BY - function: function_name: function_name_identifier: hash function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: employee_key end_bracket: ) - keyword: ALL - keyword: NODES - statement_terminator: ; - statement: create_projection_statement: - keyword: CREATE - keyword: PROJECTION - table_reference: - naked_identifier: продажи - dot: . - naked_identifier: продажи_на_по_клиенту - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: продажи - dot: . - naked_identifier: продажи_на_сегодня - orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: клиент - segmentedby_clause: - keyword: SEGMENTED - keyword: BY - function: function_name: function_name_identifier: hash function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: клиент end_bracket: ) - keyword: ALL - keyword: NODES - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - schema_reference: naked_identifier: s3 - keyword: DEFAULT - schema_privileges_segment: - keyword: INCLUDE - keyword: SCHEMA - keyword: PRIVILEGES - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - schema_reference: naked_identifier: εμπορικός - keyword: DEFAULT - schema_privileges_segment: - keyword: INCLUDE - keyword: SCHEMA - keyword: PRIVILEGES - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - schema_reference: naked_identifier: продажи - keyword: DEFAULT - schema_privileges_segment: - keyword: INCLUDE - keyword: SCHEMA - keyword: PRIVILEGES - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: логистика - dot: . - naked_identifier: εμπορικός - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: логистика - dot: . - naked_identifier: εμπορικός1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: логистика - dot: . - naked_identifier: εμπορικός_ - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: логистика - dot: . - naked_identifier: s$ales$ - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: логистика - dot: . - naked_identifier: _εμπορικός - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: логистика - dot: . - naked_identifier: _1234εμπορικός - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: логистика1 - dot: . - naked_identifier: εμπορικός - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: логистика_ - dot: . - naked_identifier: εμπορικός - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: p$ublic$ - dot: . - naked_identifier: εμπορικός - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: _логистика - dot: . - naked_identifier: εμπορικός - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: _1234логистика - dot: . - naked_identifier: εμπορικός - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: логистика1 - dot: . - naked_identifier: εμπορικός1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: логистика1_ - dot: . - naked_identifier: εμπορικός1_ - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: p$ublic1_$ - dot: . - naked_identifier: s$ales1_$ - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - quoted_identifier: '"12логистика"' - dot: . - quoted_identifier: '"12344εμπορικός"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - quoted_identifier: '"_1234логистика"' - dot: . - quoted_identifier: '"_1234εμπορικός"' - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/vertica/with.sql000066400000000000000000000035561503426445100231300ustar00rootroot00000000000000-- define WITH clause WITH revenue ( vkey, total_revenue ) AS ( SELECT vendor_key, SUM(total_order_cost) FROM store.store_orders_fact GROUP BY vendor_key ORDER BY 1) -- End WITH clause -- primary query SELECT v.vendor_name, v.vendor_address, v.vendor_city, r.total_revenue FROM vendor_dimension v JOIN revenue r ON v.vendor_key = r.vkey WHERE r.total_revenue = (SELECT MAX(total_revenue) FROM revenue ) ORDER BY vendor_name; WITH -- query sale amounts for each region regional_sales (region, total_sales) AS ( SELECT sd.store_region, SUM(of.total_order_cost) AS total_sales FROM store.store_dimension sd JOIN store.store_orders_fact of ON sd.store_key = of.store_key GROUP BY store_region ), -- query previous result set top_regions AS ( SELECT region, total_sales FROM regional_sales ORDER BY total_sales DESC LIMIT 3 ) -- primary query -- aggregate sales in top_regions result set SELECT sd.store_region AS region, pd.department_description AS department, SUM(of.total_order_cost) AS product_sales FROM store.store_orders_fact of JOIN store.store_dimension sd ON sd.store_key = of.store_key JOIN public.product_dimension pd ON of.product_key = pd.product_key WHERE sd.store_region IN (SELECT region FROM top_regions) GROUP BY ROLLUP (region, department) ORDER BY region, product_sales DESC, GROUPING_ID(); INSERT INTO total_store_sales WITH store_sales AS ( SELECT sd.store_key, sd.store_region::VARCHAR(20), SUM (of.total_order_cost) FROM store.store_dimension sd JOIN store.store_orders_fact of ON sd.store_key = of.store_key GROUP BY sd.store_region, sd.store_key ORDER BY sd.store_region, sd.store_key) SELECT * FROM store_sales; WITH RECURSIVE nums (n) AS ( SELECT 1 -- non-recursive (base) term UNION ALL SELECT n+1 FROM nums -- recursive term ) SELECT n FROM nums; -- primary query sqlfluff-3.4.2/test/fixtures/dialects/vertica/with.yml000066400000000000000000000506231503426445100231270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c43875273f3e7c3a9af3f05d66bc400a7c69770f5740483a8b7466bad4f67448 file: - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: revenue cte_column_list: bracketed: start_bracket: ( identifier_list: - naked_identifier: vkey - comma: ',' - naked_identifier: total_revenue end_bracket: ) keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: vendor_key - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: total_order_cost end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: store - dot: . - naked_identifier: store_orders_fact groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: vendor_key orderby_clause: - keyword: ORDER - keyword: BY - numeric_literal: '1' end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: v - dot: . - naked_identifier: vendor_name - comma: ',' - select_clause_element: column_reference: - naked_identifier: v - dot: . - naked_identifier: vendor_address - comma: ',' - select_clause_element: column_reference: - naked_identifier: v - dot: . - naked_identifier: vendor_city - comma: ',' - select_clause_element: column_reference: - naked_identifier: r - dot: . - naked_identifier: total_revenue from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: vendor_dimension alias_expression: naked_identifier: v join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: revenue alias_expression: naked_identifier: r join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: v - dot: . - naked_identifier: vendor_key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: r - dot: . - naked_identifier: vkey where_clause: keyword: WHERE expression: column_reference: - naked_identifier: r - dot: . - naked_identifier: total_revenue comparison_operator: raw_comparison_operator: '=' bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: MAX function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: total_revenue end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: revenue end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: vendor_name - statement_terminator: ; - statement: with_compound_statement: - keyword: WITH - common_table_expression: naked_identifier: regional_sales cte_column_list: bracketed: start_bracket: ( identifier_list: - naked_identifier: region - comma: ',' - naked_identifier: total_sales end_bracket: ) keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: sd - dot: . - naked_identifier: store_region - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: of - dot: . - naked_identifier: total_order_cost end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: total_sales from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: store - dot: . - naked_identifier: store_dimension alias_expression: naked_identifier: sd join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: - naked_identifier: store - dot: . - naked_identifier: store_orders_fact alias_expression: naked_identifier: of join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: sd - dot: . - naked_identifier: store_key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: of - dot: . - naked_identifier: store_key groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: store_region end_bracket: ) - comma: ',' - common_table_expression: naked_identifier: top_regions keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: region - comma: ',' - select_clause_element: column_reference: naked_identifier: total_sales from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: regional_sales orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: total_sales - keyword: DESC limit_clause: keyword: LIMIT numeric_literal: '3' end_bracket: ) - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: sd - dot: . - naked_identifier: store_region alias_expression: alias_operator: keyword: AS naked_identifier: region - comma: ',' - select_clause_element: column_reference: - naked_identifier: pd - dot: . - naked_identifier: department_description alias_expression: alias_operator: keyword: AS naked_identifier: department - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: of - dot: . - naked_identifier: total_order_cost end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: product_sales from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: - naked_identifier: store - dot: . - naked_identifier: store_orders_fact alias_expression: naked_identifier: of - join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: - naked_identifier: store - dot: . - naked_identifier: store_dimension alias_expression: naked_identifier: sd join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: sd - dot: . - naked_identifier: store_key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: of - dot: . - naked_identifier: store_key - join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: - naked_identifier: public - dot: . - naked_identifier: product_dimension alias_expression: naked_identifier: pd join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: of - dot: . - naked_identifier: product_key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: pd - dot: . - naked_identifier: product_key where_clause: keyword: WHERE expression: column_reference: - naked_identifier: sd - dot: . - naked_identifier: store_region keyword: IN bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: region from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: top_regions end_bracket: ) groupby_clause: - keyword: GROUP - keyword: BY - expression: function: function_name: function_name_identifier: ROLLUP function_contents: bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: region - comma: ',' - expression: column_reference: naked_identifier: department - end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: region - comma: ',' - column_reference: naked_identifier: product_sales - keyword: DESC - comma: ',' - expression: function: function_name: function_name_identifier: GROUPING_ID function_contents: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: total_store_sales - with_compound_statement: keyword: WITH common_table_expression: naked_identifier: store_sales keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: sd - dot: . - naked_identifier: store_key - comma: ',' - select_clause_element: expression: cast_expression: column_reference: - naked_identifier: sd - dot: . - naked_identifier: store_region casting_operator: '::' data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '20' end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM function_contents: bracketed: start_bracket: ( expression: column_reference: - naked_identifier: of - dot: . - naked_identifier: total_order_cost end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: store - dot: . - naked_identifier: store_dimension alias_expression: naked_identifier: sd join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: - naked_identifier: store - dot: . - naked_identifier: store_orders_fact alias_expression: naked_identifier: of join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: sd - dot: . - naked_identifier: store_key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: of - dot: . - naked_identifier: store_key groupby_clause: - keyword: GROUP - keyword: BY - column_reference: - naked_identifier: sd - dot: . - naked_identifier: store_region - comma: ',' - column_reference: - naked_identifier: sd - dot: . - naked_identifier: store_key orderby_clause: - keyword: ORDER - keyword: BY - column_reference: - naked_identifier: sd - dot: . - naked_identifier: store_region - comma: ',' - column_reference: - naked_identifier: sd - dot: . - naked_identifier: store_key end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: store_sales - statement_terminator: ; - statement: with_compound_statement: - keyword: WITH - keyword: RECURSIVE - common_table_expression: naked_identifier: nums cte_column_list: bracketed: start_bracket: ( identifier_list: naked_identifier: n end_bracket: ) keyword: AS bracketed: start_bracket: ( set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: keyword: SELECT select_clause_element: expression: column_reference: naked_identifier: n binary_operator: + numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: nums end_bracket: ) - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: n from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: nums - statement_terminator: ; sqlfluff-3.4.2/test/fixtures/dialects/vertica/within_group.sql000066400000000000000000000004451503426445100246650ustar00rootroot00000000000000WITH cd AS (SELECT DISTINCT (customer_city) city, customer_state, customer_region FROM customer_dimension) SELECT customer_region Region, LISTAGG(city||', '||customer_state USING PARAMETERS separator=' | ') WITHIN GROUP (ORDER BY city) CityAndState FROM cd GROUP BY region ORDER BY region; sqlfluff-3.4.2/test/fixtures/dialects/vertica/within_group.yml000066400000000000000000000074551503426445100246770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 64f17930a7b5c76be25ed21da73629733ffd94e246a83ca3c396e5b7369350ae file: statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: cd keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_modifier: keyword: DISTINCT - select_clause_element: expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: customer_city end_bracket: ) alias_expression: naked_identifier: city - comma: ',' - select_clause_element: column_reference: naked_identifier: customer_state - comma: ',' - select_clause_element: column_reference: naked_identifier: customer_region from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: customer_dimension end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: customer_region alias_expression: naked_identifier: Region - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LISTAGG function_contents: bracketed: - start_bracket: ( - expression: - column_reference: naked_identifier: city - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "', '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: naked_identifier: customer_state - keyword: USING - keyword: PARAMETERS - parameter: separator - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "' | '" - end_bracket: ) within_group_clause_statement: - keyword: WITHIN - keyword: GROUP - bracketed: start_bracket: ( orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: city end_bracket: ) alias_expression: naked_identifier: CityAndState from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: cd groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: region orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: region statement_terminator: ; sqlfluff-3.4.2/test/fixtures/lexer/000077500000000000000000000000001503426445100173155ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/lexer/basic.sql000066400000000000000000000000521503426445100211140ustar00rootroot00000000000000SELECT a.id, a.name FROM tbl as a sqlfluff-3.4.2/test/fixtures/lexer/block_comment.sql000066400000000000000000000002711503426445100226520ustar00rootroot00000000000000SELECT a.id, /* Block comment with ending */ a.something, a.name, /* Block comment on newlines */ /* Some block comments go over multiple lines */ FROM tbl as a sqlfluff-3.4.2/test/fixtures/lexer/dummy.md000066400000000000000000000000721503426445100207710ustar00rootroot00000000000000# this is a dummy file for testing detection of sql files sqlfluff-3.4.2/test/fixtures/lexer/inline_comment.sql000066400000000000000000000001571503426445100230410ustar00rootroot00000000000000SELECT a.id, -- This is an inline comment -- Sometimes they're on a new line a.name FROM tbl as a sqlfluff-3.4.2/test/fixtures/linter/000077500000000000000000000000001503426445100174735ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/.gitignore000066400000000000000000000000431503426445100214600ustar00rootroot00000000000000# Results of fixed tests *_fix.sql sqlfluff-3.4.2/test/fixtures/linter/autofix/000077500000000000000000000000001503426445100211525ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/README.md000066400000000000000000000011611503426445100224300ustar00rootroot00000000000000# Automated linter fix tests The `autofix` directory contains the files for automated linter fix tests. The structure is: - First level is folders for each `dialect` (e.g. `ansi`, `mysql`). - Second level is a series of folders for each test. Typically these are of the for `001_test_description`, to help contributors understand the purpose of the test. - Within that folder there will be a `before.sql` file, an `after.sql` file, and a config file named `test-config.yml`. Additionally if a `violations.json` file is provided, it will be used to check that the relevant violations are found in the first place. sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/000077500000000000000000000000001503426445100221045ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/001_long_line/000077500000000000000000000000001503426445100244325ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/001_long_line/after.sql000066400000000000000000000016031503426445100262540ustar00rootroot00000000000000WITH all_upstream_matches AS ( SELECT ROW_NUMBER() OVER ( PARTITION BY low_business_type, low_size_label, low_gender_label, low_age_label ORDER BY business_type DESC, size_label DESC, gender_label DESC, age_label DESC ) AS rownum, business_type FROM acceptable_buckets JOIN small_buckets ON (business_type = low_business_type AND size_label = low_size_label AND gender_label = low_gender_label AND age_label = low_age_label) ) SELECT business_type, user_counts FROM acceptable_buckets UNION ALL SELECT business_type, user_counts FROM substituted_buckets sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/001_long_line/before.sql000066400000000000000000000012351503426445100264160ustar00rootroot00000000000000WITH all_upstream_matches AS ( SELECT ROW_NUMBER() OVER (PARTITION BY low_business_type, low_size_label, low_gender_label, low_age_label ORDER BY business_type DESC, size_label DESC, gender_label DESC, age_label DESC) AS rownum, business_type FROM acceptable_buckets JOIN small_buckets ON (business_type = low_business_type AND size_label = low_size_label AND gender_label = low_gender_label AND age_label = low_age_label) ) SELECT business_type, user_counts FROM acceptable_buckets UNION ALL SELECT business_type, user_counts FROM substituted_buckets sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/001_long_line/test-config.yml000066400000000000000000000000411503426445100273720ustar00rootroot00000000000000test-config: rules: - LT05 sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/001_long_line/violations.json000066400000000000000000000001661503426445100275170ustar00rootroot00000000000000{ "violations":{ "linting":{ "LT05": [ [3, 9] ] } } } sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/002_indentation/000077500000000000000000000000001503426445100250015ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/002_indentation/after.sql000066400000000000000000000010641503426445100266240ustar00rootroot00000000000000SELECT a.id, -- 3 Spaces a.name, a.training_spaces, ( a.under_indented_line ) as foo, ( a.over_indented_line ) as bar, a.line + ( a.with + a.hanging_indent ) as actually_ok, a.line + ( a.with + a.bad_hanging_indent ) as problem, a.line + ( a.something_indented_well + least( a.good_example, a.bad_example, a.really_bad_example, a.nother_good_example ) ) as some_harder_problems FROM tbl as a sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/002_indentation/before.sql000066400000000000000000000010201503426445100267550ustar00rootroot00000000000000SELECT a.id, -- 3 Spaces a.name, a.training_spaces, ( a.under_indented_line ) as foo, ( a.over_indented_line ) as bar, a.line + (a.with + a.hanging_indent) as actually_ok, a.line + (a.with + a.bad_hanging_indent) as problem, a.line + ( a.something_indented_well + least( a.good_example, a.bad_example, a.really_bad_example, a.nother_good_example ) ) as some_harder_problems FROM tbl as a sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/002_indentation/test-config.yml000066400000000000000000000000411503426445100277410ustar00rootroot00000000000000test-config: rules: - LT02 sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/003_long_line/000077500000000000000000000000001503426445100244345ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/003_long_line/after.sql000066400000000000000000000003641503426445100262610ustar00rootroot00000000000000SELECT GREATEST(1, 2 + 7, SQRT(a.long_variable_name_of_some_kind)) AS first_one, GREATEST( 2 / 3.4322348982348, 5 + 6, SQRT(a.nother_long_variable_name_of_some_kind) ) AS second_one FROM this_other_table sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/003_long_line/before.sql000066400000000000000000000003161503426445100264170ustar00rootroot00000000000000SELECT GREATEST(1, 2 + 7, SQRT(a.long_variable_name_of_some_kind)) AS first_one, GREATEST(2 / 3.4322348982348, 5 + 6, SQRT(a.nother_long_variable_name_of_some_kind)) AS second_one FROM this_other_table sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/003_long_line/test-config.yml000066400000000000000000000000411503426445100273740ustar00rootroot00000000000000test-config: rules: - LT05 sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/004_indentation/000077500000000000000000000000001503426445100250035ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/004_indentation/after.sql000066400000000000000000000016761503426445100266370ustar00rootroot00000000000000WITH audience_counts AS ( SELECT user_id, list_id, COUNT(email_id) AS audience FROM lists_emails AS list_emails WHERE list_emails.active != 'D' GROUP BY user_id, list_id ) SELECT user_id, list_id, audience, CASE WHEN audience > 0 AND audience <= 200 THEN '< 200' WHEN audience > 200 AND audience <= 3000 -- NB: This one is a hanging indent, which should be modified. AND audience <= 2000 THEN '200 - 2,000' WHEN audience > 2000 AND audience <= 10000 THEN '2,000 - 10,000' WHEN audience > 10000 AND audience <= 50000 THEN '10,000 - 50,000' WHEN audience > 50000 AND audience <= 500000 THEN '50,000 - 500,000' WHEN audience > 500000 THEN '> 500,000' END AS size_bucket FROM audience_counts JOIN gdpr_safe_users USING (user_id) sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/004_indentation/before.sql000066400000000000000000000017041503426445100267700ustar00rootroot00000000000000WITH audience_counts AS ( SELECT user_id, list_id, COUNT(email_id) AS audience FROM lists_emails AS list_emails WHERE list_emails.active != 'D' GROUP BY user_id, list_id) SELECT user_id, list_id, audience, CASE WHEN audience > 0 AND audience <= 200 THEN '< 200' WHEN audience > 200 AND audience <= 3000 -- NB: This one is a hanging indent, which should be modified. AND audience <= 2000 THEN '200 - 2,000' WHEN audience > 2000 AND audience <= 10000 THEN '2,000 - 10,000' WHEN audience > 10000 AND audience <= 50000 THEN '10,000 - 50,000' WHEN audience > 50000 AND audience <= 500000 THEN '50,000 - 500,000' WHEN audience > 500000 THEN '> 500,000' END AS size_bucket FROM audience_counts JOIN gdpr_safe_users USING (user_id) sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/004_indentation/test-config.yml000066400000000000000000000000411503426445100277430ustar00rootroot00000000000000test-config: rules: - LT02 sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/005_function_spacing/000077500000000000000000000000001503426445100260215ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/005_function_spacing/after.sql000066400000000000000000000001371503426445100276440ustar00rootroot00000000000000SELECT min(col_a) as foo, max /* a really obnoxious comment */ (col_b) as bar FROM tbl sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/005_function_spacing/before.sql000066400000000000000000000001401503426445100277770ustar00rootroot00000000000000SELECT min (col_a) as foo, max /* a really obnoxious comment */ (col_b) as bar FROM tbl sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/005_function_spacing/test-config.yml000066400000000000000000000000411503426445100307610ustar00rootroot00000000000000test-config: rules: - LT06 sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/006_indentation/000077500000000000000000000000001503426445100250055ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/006_indentation/after.sql000066400000000000000000000012751503426445100266340ustar00rootroot00000000000000insert overwrite into forecast_daily_product_base with orders_monthly as ( select period, status, region, forecast_id, value from forecast_monthly where metric = 'orders' ), penetrations_monthly as ( select period, status, region, product_category, forecast_id, value from forecast_monthly where metric = 'penetration' union all -- Add in the dry penetrations as 1.0 select period, status, region, 'dry' as product_category, forecast_id, 1.0 as value from forecast_monthly where metric = 'orders' ) select * from orders_monthly inner join penetrations_monthly using(period, status, region, forecast_id) sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/006_indentation/before.sql000066400000000000000000000013751503426445100267760ustar00rootroot00000000000000insert overwrite into forecast_daily_product_base with orders_monthly as ( select period, status, region, forecast_id, value from forecast_monthly where metric = 'orders' ), penetrations_monthly as ( select period, status, region, product_category, forecast_id, value from forecast_monthly where metric = 'penetration' union all -- Add in the dry penetrations as 1.0 select period, status, region, 'dry' as product_category, forecast_id, 1.0 as value from forecast_monthly where metric = 'orders' ) select * from orders_monthly inner join penetrations_monthly using(period, status, region, forecast_id) sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/006_indentation/test-config.yml000066400000000000000000000000411503426445100277450ustar00rootroot00000000000000test-config: rules: - LT02 sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/007_with_clause/000077500000000000000000000000001503426445100250015ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/007_with_clause/after.sql000066400000000000000000000001621503426445100266220ustar00rootroot00000000000000-- Dealing with complicated indents before with clauses. WITH cte as ( select a from tbla ) select a from cte sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/007_with_clause/before.sql000066400000000000000000000002061503426445100267620ustar00rootroot00000000000000-- Dealing with complicated indents before with clauses. WITH cte as ( select a from tbla) select a from cte sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/007_with_clause/test-config.yml000066400000000000000000000000541503426445100277450ustar00rootroot00000000000000test-config: rules: - LT07 - LT02 sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/008_looping_rules_LT02_LT04_LT05/000077500000000000000000000000001503426445100274245ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/008_looping_rules_LT02_LT04_LT05/after.sql000066400000000000000000000013371503426445100312520ustar00rootroot00000000000000SELECT COUNT(1) AS campaign_count, state_user_v_peer_open, business_type, -- The following is the slope of the regression line. Note that CORR (which is the Pearson's correlation -- coefficient is symmetric in its arguments, but since STDDEV_POP(open_rate_su) appears in the -- numerator this is the slope of the regression line considering STDDEV_POP(open_rate_su) to be -- the "y variable" and uses_small_subject_line to be the "x variable" in terms of the regression line. SAFE_DIVIDE( SAFE_MULTIPLY( CORR(open_rate_su, uses_small_subject_line), STDDEV_POP(open_rate_su) ), STDDEV_POP(uses_small_subject_line) ) FROM global_actions_states sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/008_looping_rules_LT02_LT04_LT05/before.sql000066400000000000000000000012031503426445100314030ustar00rootroot00000000000000SELECT COUNT(1) AS campaign_count, state_user_v_peer_open ,business_type -- The following is the slope of the regression line. Note that CORR (which is the Pearson's correlation -- coefficient is symmetric in its arguments, but since STDDEV_POP(open_rate_su) appears in the -- numerator this is the slope of the regression line considering STDDEV_POP(open_rate_su) to be -- the "y variable" and uses_small_subject_line to be the "x variable" in terms of the regression line. ,SAFE_DIVIDE(SAFE_MULTIPLY(CORR(open_rate_su, uses_small_subject_line), STDDEV_POP(open_rate_su)), STDDEV_POP(uses_small_subject_line)) FROM global_actions_states sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/008_looping_rules_LT02_LT04_LT05/test-config.yml000066400000000000000000000000671503426445100323740ustar00rootroot00000000000000test-config: rules: - LT02 - LT05 - LT04 sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/008_with_clause/000077500000000000000000000000001503426445100250025ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/008_with_clause/after.sql000066400000000000000000000001111503426445100266150ustar00rootroot00000000000000with a as (select 1), b as (select 2) select * from a join b using (z) sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/008_with_clause/before.sql000066400000000000000000000001041503426445100267600ustar00rootroot00000000000000with a as(select 1), b as(select 2) select * from a join b using(z) sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/008_with_clause/test-config.yml000066400000000000000000000000541503426445100277460ustar00rootroot00000000000000test-config: rules: - LT01 - LT08 sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/009_keyword_capitalisation/000077500000000000000000000000001503426445100272445ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/009_keyword_capitalisation/.sqlfluff000066400000000000000000000001071503426445100310650ustar00rootroot00000000000000[sqlfluff:rules:capitalisation.keywords] capitalisation_policy = lower sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/009_keyword_capitalisation/after.sql000066400000000000000000000042541503426445100310730ustar00rootroot00000000000000select credits.*, min(party.created_datetime) as first_party_created_datetime, listagg(distinct party.product_category_code, ', ') as party_product_category_codes, listagg(distinct party.product_category_name, ', ') as party_product_category_names, listagg(distinct party.party_type_id, ', ') as party_type_ids, listagg(distinct party.party_type, ', ') as party_types, listagg(distinct party.party_action_id, ', ') as party_action_ids, listagg(distinct party.party_action, ', ') as party_actions, listagg(distinct party.party_incident_id, ', ') as party_incident_ids, listagg(distinct party.incident, ', ') as party_incidents, listagg(distinct party.product_party_package_id, ', ') as party_product_party_package_ids, listagg(distinct party.product_party_party_type, ', ') as party_product_party_party_types from ( select created, party_transaction_id as big_transaction_id, punter_id, credit_amount, promo_punter_reward_id, NULLIF(SUBSTRING(regexp_substr(cr.description,'Ticket ref: [0-9]*'), 13), '') ::INT as ticket_id, case when cr.description like 'Requesting Punter: %' then left( SUBSTRING(regexp_substr(cr.description,'Requesting Punter: [^/]*'), 18), length(SUBSTRING(regexp_substr(cr.description,'Requesting Punter: [^/]*'), 18) )-1) else null end as punter_name, cr.description, party_reason_id, car.description as reason from {{ ref("party_default__party_transaction") }} cr join {{ ref("party_default__party_reason") }} car using (party_reason_id) ) group by 1,2,3,4,5,6,7,8,9 union select created, mgm_big_transaction_id as big_transaction_id, punter_id, credit_amount, promo_punter_reward_id, null as ticket_id, null as punter_name, null as description, null as reason_id, 'raf' as reason, null as first_party_created_datetime, null as party_product_category_codes, null as party_product_category_names, null as party_type_ids, null as party_types, null as party_action_ids, null as party_actions, null as party_incident_ids, null as party_incidents, null as party_product_party_package_ids, -- NULL as party_product_party_product_types, null as party_product_party_party_types from {{ ref("party_default__mgm_big_transaction") }} sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/009_keyword_capitalisation/before.sql000066400000000000000000000042541503426445100312340ustar00rootroot00000000000000SELECT credits.*, min(party.created_datetime) as first_party_created_datetime, listagg(distinct party.product_category_code, ', ') as party_product_category_codes, listagg(distinct party.product_category_name, ', ') as party_product_category_names, listagg(distinct party.party_type_id, ', ') as party_type_ids, listagg(distinct party.party_type, ', ') as party_types, listagg(distinct party.party_action_id, ', ') as party_action_ids, listagg(distinct party.party_action, ', ') as party_actions, listagg(distinct party.party_incident_id, ', ') as party_incident_ids, listagg(distinct party.incident, ', ') as party_incidents, listagg(distinct party.product_party_package_id, ', ') as party_product_party_package_ids, listagg(distinct party.product_party_party_type, ', ') as party_product_party_party_types FROM ( SELECT created, party_transaction_id as big_transaction_id, punter_id, credit_amount, promo_punter_reward_id, NULLIF(SUBSTRING(regexp_substr(cr.description,'Ticket ref: [0-9]*'), 13), '') ::INT as ticket_id, case when cr.description like 'Requesting Punter: %' then left( SUBSTRING(regexp_substr(cr.description,'Requesting Punter: [^/]*'), 18), length(SUBSTRING(regexp_substr(cr.description,'Requesting Punter: [^/]*'), 18) )-1) else null end as punter_name, cr.description, party_reason_id, car.description as reason from {{ ref("party_default__party_transaction") }} cr join {{ ref("party_default__party_reason") }} car using (party_reason_id) ) group by 1,2,3,4,5,6,7,8,9 UNION SELECT created, mgm_big_transaction_id as big_transaction_id, punter_id, credit_amount, promo_punter_reward_id, null as ticket_id, null as punter_name, null as description, null as reason_id, 'raf' as reason, NULL as first_party_created_datetime, NULL as party_product_category_codes, NULL as party_product_category_names, NULL as party_type_ids, NULL as party_types, NULL as party_action_ids, NULL as party_actions, NULL as party_incident_ids, NULL as party_incidents, NULL as party_product_party_package_ids, -- NULL as party_product_party_product_types, NULL as party_product_party_party_types from {{ ref("party_default__mgm_big_transaction") }} sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/009_keyword_capitalisation/test-config.yml000066400000000000000000000000541503426445100322100ustar00rootroot00000000000000test-config: rules: - CP01 - CP04 sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/010_CTEs_and_newlines/000077500000000000000000000000001503426445100260105ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/010_CTEs_and_newlines/after.sql000066400000000000000000000003541503426445100276340ustar00rootroot00000000000000with my_cte as ( select 1 ) , that_cte as ( select 1 ), -- This Comment should stick to the CTE other_cte as ( select 1 ), this_cte as (select 1), final_cte as ( select 1 ) select * from my_cte cross join other_cte sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/010_CTEs_and_newlines/before.sql000066400000000000000000000003471503426445100277770ustar00rootroot00000000000000with my_cte as ( select 1 ) , that_cte as ( select 1 ), -- This Comment should stick to the CTE other_cte as ( select 1 ), this_cte as (select 1), final_cte as ( select 1 ) select * from my_cte cross join other_cte sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/010_CTEs_and_newlines/test-config.yml000066400000000000000000000000411503426445100307500ustar00rootroot00000000000000test-config: rules: - LT08 sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/011_indentation/000077500000000000000000000000001503426445100250015ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/011_indentation/after.sql000066400000000000000000000003511503426445100266220ustar00rootroot00000000000000SELECT a {# My Comment #} , b {% for i in [1, 2, 3] %} , c_{{i}} + 42 AS the_meaning_of_li{{ 'f' * i }} {% endfor %} , boo {% for i in [1, 2, 3] %} , d_{{i}} {% endfor %} FROM my_table sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/011_indentation/before.sql000066400000000000000000000003461503426445100267670ustar00rootroot00000000000000SELECT a {# My Comment #} , b {% for i in [1, 2, 3] %} , c_{{i}}+42 AS the_meaning_of_li{{ 'f' * i }} {% endfor %} , boo {% for i in [1, 2, 3] %} , d_{{i}} {% endfor %} FROM my_table sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/011_indentation/test-config.yml000066400000000000000000000000541503426445100277450ustar00rootroot00000000000000test-config: rules: - LT01 - LT02 sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/012_templating/000077500000000000000000000000001503426445100246325ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/012_templating/.sqlfluff000066400000000000000000000001611503426445100264530ustar00rootroot00000000000000[sqlfluff:templater:jinja:context] test_expression_1=4+5+6 test_expression_2=+2+ test_expression_3=barfoo as bf, sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/012_templating/after.sql000066400000000000000000000004431503426445100264550ustar00rootroot00000000000000-- Templated query aimed to stress the fixing templated sections. SELECT foo, 1 + 2 + 3 + 4 + 5 as bar1, 1 + {{test_expression_1}} + 3 as bar2, 1 {{test_expression_2}} 3 as bar3, {% if 1 == 0 %} {{test_expression_3}} {% endif %} foobar FROM example_table sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/012_templating/before.sql000066400000000000000000000004251503426445100266160ustar00rootroot00000000000000-- Templated query aimed to stress the fixing templated sections. SELECT foo, 1+2+3+4+5 as bar1, 1+{{test_expression_1}}+3 as bar2, 1{{test_expression_2}}3 as bar3, {% if 1 == 0 %} {{test_expression_3}} {% endif %} foobar FROM example_table sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/012_templating/test-config.yml000066400000000000000000000000411503426445100275720ustar00rootroot00000000000000test-config: rules: - LT01 sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/013_order_by_explicit/000077500000000000000000000000001503426445100261755ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/013_order_by_explicit/after.sql000066400000000000000000000000471503426445100300200ustar00rootroot00000000000000SELECT * FROM t ORDER BY a DESC, b ASC sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/013_order_by_explicit/before.sql000066400000000000000000000000431503426445100301550ustar00rootroot00000000000000SELECT * FROM t ORDER BY a DESC, b sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/013_order_by_explicit/test-config.yml000066400000000000000000000000411503426445100311350ustar00rootroot00000000000000test-config: rules: - AM03 sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/014_looping_interaction_between_l008_and_l030/000077500000000000000000000000001503426445100323725ustar00rootroot00000000000000after.sql000066400000000000000000000000451503426445100341340ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/014_looping_interaction_between_l008_and_l030SELECT FLOOR(dt), COUNT(*) FROM test before.sql000066400000000000000000000000451503426445100342750ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/014_looping_interaction_between_l008_and_l030SELECT FLOOR(dt) ,count(*) FROM test test-config.yml000066400000000000000000000000541503426445100352570ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/014_looping_interaction_between_l008_and_l030test-config: rules: - LT01 - CP03 sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/015_jinja_leading_whitespace/000077500000000000000000000000001503426445100274635ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/015_jinja_leading_whitespace/after.sql000066400000000000000000000001001503426445100312740ustar00rootroot00000000000000 {%- set x = 42 %} SELECT 1, 2; {% set x = 42 %} SELECT 1, 2 sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/015_jinja_leading_whitespace/before.sql000066400000000000000000000001001503426445100314350ustar00rootroot00000000000000 {%- set x = 42 %} SELECT 1, 2; {% set x = 42 %} SELECT 1, 2 sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/015_jinja_leading_whitespace/test-config.yml000066400000000000000000000000411503426445100324230ustar00rootroot00000000000000test-config: rules: - LT09 sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/016_index_error_with_jinja_if/000077500000000000000000000000001503426445100276765ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/016_index_error_with_jinja_if/after.sql000066400000000000000000000005301503426445100315160ustar00rootroot00000000000000-- This file combines product data from individual brands into a staging table {% set products = [ 'table1', 'table2'] %} {% for product in products %} SELECT brand, country_code, category, name, id FROM {{ product }} {% if not loop.last -%} UNION ALL {%- endif %} {% endfor %} sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/016_index_error_with_jinja_if/before.sql000066400000000000000000000004501503426445100316600ustar00rootroot00000000000000-- This file combines product data from individual brands into a staging table {% set products = [ 'table1', 'table2'] %} {% for product in products %} SELECT brand, country_code, category, name, id FROM {{ product }} {% if not loop.last -%} UNION ALL {%- endif %} {% endfor %} sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/016_index_error_with_jinja_if/test-config.yml000066400000000000000000000000541503426445100326420ustar00rootroot00000000000000test-config: rules: - LT02 - LT05 sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/016_index_error_with_jinja_if2/000077500000000000000000000000001503426445100277605ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/016_index_error_with_jinja_if2/after.sql000066400000000000000000000005411503426445100316020ustar00rootroot00000000000000-- This file combines product data from individual brands into a staging table {% set products = [ 'table1', 'table2'] %} {% for product in products %} SELECT brand, country_code, category, name, id FROM {{ product }} {% if not loop.last %} UNION ALL {% endif %} {% endfor %} ORDER BY 1 sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/016_index_error_with_jinja_if2/before.sql000066400000000000000000000004611503426445100317440ustar00rootroot00000000000000-- This file combines product data from individual brands into a staging table {% set products = [ 'table1', 'table2'] %} {% for product in products %} SELECT brand, country_code, category, name, id FROM {{ product }} {% if not loop.last %} UNION ALL {% endif %} {% endfor %} ORDER BY 1 sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/016_index_error_with_jinja_if2/test-config.yml000066400000000000000000000000541503426445100327240ustar00rootroot00000000000000test-config: rules: - LT02 - LT05 sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/018_LT02_indent_templated_code/000077500000000000000000000000001503426445100275475ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/018_LT02_indent_templated_code/.sqlfluff000066400000000000000000000006451503426445100313770ustar00rootroot00000000000000[sqlfluff] # TODO: This setting defaults to true. I had to set it false in order to allow a # rule (LT02) to indent a templated table name. Technically, indenting templated # code is not "touching" templated code, but in order for SQLFluff to detect # this and allow the fixes to be applied using default settings, we'd need to # tweak some of the anchor and create logic for LintResult. ignore_templated_areas = false sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/018_LT02_indent_templated_code/after.sql000066400000000000000000000004351503426445100313730ustar00rootroot00000000000000-- This file combines product data from individual brands into a staging table {% set products = ['table1'] %} {% for product in products %} SELECT brand, country_code FROM {{ product }} {% if not loop.last -%} UNION ALL {%- endif %} {% endfor %} sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/018_LT02_indent_templated_code/before.sql000066400000000000000000000004031503426445100315270ustar00rootroot00000000000000-- This file combines product data from individual brands into a staging table {% set products = ['table1'] %} {% for product in products %} SELECT brand, country_code FROM {{ product }} {% if not loop.last -%} UNION ALL {%- endif %} {% endfor %} sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/018_LT02_indent_templated_code/test-config.yml000066400000000000000000000000411503426445100325070ustar00rootroot00000000000000test-config: rules: - LT02 sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/019_trailing_comma_to_leading/000077500000000000000000000000001503426445100276475ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/019_trailing_comma_to_leading/.sqlfluff000066400000000000000000000000651503426445100314730ustar00rootroot00000000000000[sqlfluff:layout:type:comma] line_position = leading sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/019_trailing_comma_to_leading/after.sql000066400000000000000000000003741503426445100314750ustar00rootroot00000000000000WITH first_cte AS (SELECT id , one FROM first) , second_cte AS (SELECT id , two FROM {{ source('schema', 'table') }} ) SELECT id , one , two FROM first_cte LEFT JOIN second_cte ON first_cte.id = second_cte.id; sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/019_trailing_comma_to_leading/before.sql000066400000000000000000000003731503426445100316350ustar00rootroot00000000000000WITH first_cte AS (SELECT id , one FROM first), second_cte AS (SELECT id , two FROM {{ source('schema', 'table') }} ) SELECT id , one , two FROM first_cte LEFT JOIN second_cte ON first_cte.id = second_cte.id; sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/019_trailing_comma_to_leading/test-config.yml000066400000000000000000000000411503426445100326070ustar00rootroot00000000000000test-config: rules: - LT04 sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/020_L008_trailing_comma/000077500000000000000000000000001503426445100261555ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/020_L008_trailing_comma/after.sql000066400000000000000000000000121503426445100277700ustar00rootroot00000000000000select *, sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/020_L008_trailing_comma/before.sql000066400000000000000000000000121503426445100301310ustar00rootroot00000000000000select *, sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/020_L008_trailing_comma/test-config.yml000066400000000000000000000000411503426445100311150ustar00rootroot00000000000000test-config: rules: - LT01 sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/021_fix_respects_noqa/000077500000000000000000000000001503426445100262025ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/021_fix_respects_noqa/after.sql000066400000000000000000000007421503426445100300270ustar00rootroot00000000000000--noqa: disable=ST06 SELECT DISTINCT TO_CHAR(a, 'YYYY-MM-dd HH:MM:ss') AS the_date, a AS b FROM table1; SELECT col_a AS a, col_b b, --noqa: disable=AL02 col_c c, col_d AS d, --noqa: enable=AL02 col_e AS e, col_f AS f, col_g g, --noqa col_h AS h, col_i i, --noqa:AL02 col_j AS j, col_k AS k, --noqa:AL03 col_l AS l, col_m AS m, col_n n, --noqa: disable=all col_o o, col_p AS p --noqa: enable=all FROM foo sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/021_fix_respects_noqa/before.sql000066400000000000000000000007041503426445100301660ustar00rootroot00000000000000--noqa: disable=ST06 SELECT DISTINCT TO_CHAR(a, 'YYYY-MM-dd HH:MM:ss') as the_date, a AS b FROM table1; SELECT col_a a, col_b b, --noqa: disable=AL02 col_c c, col_d d, --noqa: enable=AL02 col_e e, col_f f, col_g g, --noqa col_h h, col_i i, --noqa:AL02 col_j j, col_k k, --noqa:AL03 col_l l, col_m m, col_n n, --noqa: disable=all col_o o, col_p p --noqa: enable=all FROM foo sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/021_fix_respects_noqa/test-config.yml000066400000000000000000000000671503426445100311520ustar00rootroot00000000000000test-config: rules: - CP01 - AL02 - ST06 022_LT04_corrupts_parse_tree_and_causes_ST06_to_corrupt_sql/000077500000000000000000000000001503426445100353455ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansiafter.sql000066400000000000000000000001031503426445100371610ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/022_LT04_corrupts_parse_tree_and_causes_ST06_to_corrupt_sqlSELECT ID, DataDate, COALESCE(a, 1) AS CoalesceOutput FROM temp1 before.sql000066400000000000000000000001051503426445100373240ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/022_LT04_corrupts_parse_tree_and_causes_ST06_to_corrupt_sqlSELECT ID , COALESCE(a, 1) AS CoalesceOutput , DataDate FROM temp1 test-config.yml000066400000000000000000000000541503426445100403110ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/022_LT04_corrupts_parse_tree_and_causes_ST06_to_corrupt_sqltest-config: rules: - LT04 - ST06 sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/023_LT05_confuses_LT10/000077500000000000000000000000001503426445100256215ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/023_LT05_confuses_LT10/.sqlfluff000066400000000000000000000000401503426445100274360ustar00rootroot00000000000000[sqlfluff] max_line_length = 70 sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/023_LT05_confuses_LT10/after.sql000066400000000000000000000003401503426445100274400ustar00rootroot00000000000000SELECT * FROM superverylongtablenamereallyreally1 WHERE long_varname_to_trigger_Rule_LT05_id in ( SELECT distinct id FROM superverylongtablenamereallyreally2 WHERE deletedat IS NULL ) sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/023_LT05_confuses_LT10/before.sql000066400000000000000000000003261503426445100276050ustar00rootroot00000000000000SELECT * FROM superverylongtablenamereallyreally1 WHERE long_varname_to_trigger_Rule_LT05_id in (SELECT distinct id FROM superverylongtablenamereallyreally2 WHERE deletedat IS NULL) sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/023_LT05_confuses_LT10/test-config.yml000066400000000000000000000003241503426445100305650ustar00rootroot00000000000000test-config: # NOTE: LT02 is included in this test case because the fix for # LT05 doesn't really make sense without it as the existing # query is poorly indented. rules: - LT02 - LT05 - LT10 sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/024_remove_templated_errors/000077500000000000000000000000001503426445100274215ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/024_remove_templated_errors/.sqlfluff000066400000000000000000000001521503426445100312420ustar00rootroot00000000000000[sqlfluff:templater:jinja:macros] par_wrap = {% macro par_wrap() %} ( col ) AS col {% endmacro %} sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/024_remove_templated_errors/after.sql000066400000000000000000000002731503426445100312450ustar00rootroot00000000000000-- Templated query aimed to test the BaseRule.remove_templated_errors() -- function's behavior of not modifying templated sections. SELECT {{ par_wrap() }} , line_two AS line_two sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/024_remove_templated_errors/before.sql000066400000000000000000000002701503426445100314030ustar00rootroot00000000000000-- Templated query aimed to test the BaseRule.remove_templated_errors() -- function's behavior of not modifying templated sections. SELECT {{ par_wrap() }} , line_two as line_two sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/024_remove_templated_errors/test-config.yml000066400000000000000000000001021503426445100323570ustar00rootroot00000000000000test-config: rules: - LT02 - CP01 - LT04 - ST06 sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/025_ST02_and_ST01_interaction/000077500000000000000000000000001503426445100271525ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/025_ST02_and_ST01_interaction/after.sql000066400000000000000000000000521503426445100307710ustar00rootroot00000000000000select foo, bar as test from baz; sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/025_ST02_and_ST01_interaction/before.sql000066400000000000000000000001671503426445100311410ustar00rootroot00000000000000select foo, case when bar is not null then bar else null end as test from baz; sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/025_ST02_and_ST01_interaction/test-config.yml000066400000000000000000000000541503426445100321160ustar00rootroot00000000000000test-config: rules: - ST01 - ST02 sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/026_LT05_line_length_includes_earlier_fixes/000077500000000000000000000000001503426445100323165ustar00rootroot00000000000000.sqlfluff000066400000000000000000000000651503426445100340630ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/026_LT05_line_length_includes_earlier_fixes[sqlfluff:rules:aliasing.forbid] force_enable = true after.sql000066400000000000000000000005551503426445100340660ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/026_LT05_line_length_includes_earlier_fixesSELECT abs(round(foo_bar_report.metricx-xxx_yyy_report.metricx)) as col_c_rel_diff, abs( ( round(foo_bar_report.metricx-xxx_yyy_report.metricx) /foo_bar_report.metricx ) *100 ) as metric_x_rel_diff FROM foo_bar_report LEFT JOIN xxx_yyy_report ON foo_bar_report.event_date = xxx_yyy_report.event_date; before.sql000066400000000000000000000003301503426445100342160ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/026_LT05_line_length_includes_earlier_fixesSELECT abs(round(a.metricx-b.metricx)) as col_c_rel_diff, abs((round(a.metricx-b.metricx)/a.metricx)*100) as metric_x_rel_diff FROM foo_bar_report a LEFT JOIN xxx_yyy_report b ON a.event_date = b.event_date; test-config.yml000066400000000000000000000000671503426445100352070ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/026_LT05_line_length_includes_earlier_fixestest-config: rules: - LT02 - LT05 - AL07 sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/027_LT02_LT07_LT08_wrong_indent_cte/000077500000000000000000000000001503426445100301025ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/027_LT02_LT07_LT08_wrong_indent_cte/after.sql000066400000000000000000000000601503426445100317200ustar00rootroot00000000000000WITH cte AS ( SELECT 1 ) SELECT * FROM cte sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/027_LT02_LT07_LT08_wrong_indent_cte/before.sql000066400000000000000000000000631503426445100320640ustar00rootroot00000000000000WITH cte AS ( SELECT 1 ) SELECT * FROM cte sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/027_LT02_LT07_LT08_wrong_indent_cte/test-config.yml000066400000000000000000000000671503426445100330520ustar00rootroot00000000000000test-config: rules: - LT02 - LT07 - LT08 sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/027_LT02_LT07_templated_block_without_newline_cte/000077500000000000000000000000001503426445100332735ustar00rootroot00000000000000after.sql000066400000000000000000000003521503426445100350360ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/027_LT02_LT07_templated_block_without_newline_cteWITH {% for i in [0, 1] %} {% if i == 0 %} cte0 AS ( SELECT 1 ), {% else %} cte1 AS ( SELECT 2 ) {% endif %} {% endfor %} SELECT * FROM cte0 UNION SELECT * FROM cte1 before.sql000066400000000000000000000003301503426445100351730ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/027_LT02_LT07_templated_block_without_newline_cteWITH {% for i in [0, 1] %} {% if i == 0 %} cte0 AS ( SELECT 1), {% else %} cte1 AS ( SELECT 2) {% endif %} {% endfor %} SELECT * FROM cte0 UNION SELECT * FROM cte1 test-config.yml000066400000000000000000000000541503426445100361600ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/027_LT02_LT07_templated_block_without_newline_ctetest-config: rules: - LT02 - LT07 sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/027_LT02_LT07_templated_block_wrong_indent_cte/000077500000000000000000000000001503426445100325445ustar00rootroot00000000000000after.sql000066400000000000000000000001261503426445100343060ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/027_LT02_LT07_templated_block_wrong_indent_cteWITH {% if true %} cte AS ( SELECT 2 ) {% endif %} SELECT * FROM cte before.sql000066400000000000000000000001221503426445100344430ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/027_LT02_LT07_templated_block_wrong_indent_cteWITH {% if true %} cte AS ( SELECT 2 ) {% endif %} SELECT * FROM cte test-config.yml000066400000000000000000000000541503426445100354310ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/027_LT02_LT07_templated_block_wrong_indent_ctetest-config: rules: - LT02 - LT07 sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/027_LT02_LT07_wrong_indent_with/000077500000000000000000000000001503426445100275335ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/027_LT02_LT07_wrong_indent_with/after.sql000066400000000000000000000000601503426445100313510ustar00rootroot00000000000000WITH cte AS ( SELECT 1 ) SELECT * FROM cte sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/027_LT02_LT07_wrong_indent_with/before.sql000066400000000000000000000000701503426445100315130ustar00rootroot00000000000000 WITH cte AS ( SELECT 1 ) SELECT * FROM cte sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/027_LT02_LT07_wrong_indent_with/test-config.yml000066400000000000000000000000541503426445100324770ustar00rootroot00000000000000test-config: rules: - LT02 - LT07 sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/028_leading_comma_with_jinja/000077500000000000000000000000001503426445100274625ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/028_leading_comma_with_jinja/after.sql000066400000000000000000000002651503426445100313070ustar00rootroot00000000000000{% set isENTER = true %} SELECT myt.c1 {% if isENTER %} , myt.c2 {% endif %} , myt.dt, coalesce(myt.c3, 0) as c3, coalesce(myt.c4, 0) as c4 from myt sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/028_leading_comma_with_jinja/before.sql000066400000000000000000000002671503426445100314520ustar00rootroot00000000000000{% set isENTER = true %} SELECT myt.c1 {% if isENTER %} , myt.c2 {% endif %} , coalesce(myt.c3, 0) as c3 , coalesce(myt.c4, 0) as c4 , myt.dt from myt sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/028_leading_comma_with_jinja/test-config.yml000066400000000000000000000000541503426445100324260ustar00rootroot00000000000000test-config: rules: - LT04 - ST06 sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/029_indentation_join_reorder_LT02_ST09/000077500000000000000000000000001503426445100310735ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/029_indentation_join_reorder_LT02_ST09/.sqlfluff000066400000000000000000000006071503426445100327210ustar00rootroot00000000000000[sqlfluff:indentation] indent_unit = space tab_space_size = 2 indented_joins = true indented_using_on = true template_blocks_indent = false [sqlfluff:templater:jinja:macros] snapshot_date = {% macro snapshot_date(delta_days=0, format_as="DATE") %}{% if format_as == 'DATE' %}{{ "CAST('2022-09-23' AS DATE)" }}{% else %}{{ "CAST('2022-09-23T00-00' AS TIMESTAMP)" }}{% endif %}{% endmacro %} sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/029_indentation_join_reorder_LT02_ST09/after.sql000066400000000000000000000002341503426445100327140ustar00rootroot00000000000000SELECT table_a.* FROM schema.table_a INNER JOIN schema.table_b ON table_a.col_a = table_b.col_a AND table_b.col_b = {{ snapshot_date() }} sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/029_indentation_join_reorder_LT02_ST09/before.sql000066400000000000000000000002261503426445100330560ustar00rootroot00000000000000SELECT table_a.* FROM schema.table_a INNER JOIN schema.table_b ON table_b.col_a = table_a.col_a AND table_b.col_b = {{ snapshot_date() }} test-config.yml000066400000000000000000000000541503426445100337600ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/029_indentation_join_reorder_LT02_ST09test-config: rules: - LT02 - ST09 sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/ST05_subqueries_in_joins/000077500000000000000000000000001503426445100267365ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/ST05_subqueries_in_joins/after.sql000066400000000000000000000003211503426445100305540ustar00rootroot00000000000000{% set x = "col" %} -- We find the error with the subquery and then have to dump it again -- due to the template SELECT * FROM A_TABLE INNER JOIN ( SELECT *, {{ x }} FROM B_TABLE ) USING (SOME_COLUMN) sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/ST05_subqueries_in_joins/before.sql000066400000000000000000000003211503426445100307150ustar00rootroot00000000000000{% set x = "col" %} -- We find the error with the subquery and then have to dump it again -- due to the template SELECT * FROM A_TABLE INNER JOIN ( SELECT *, {{ x }} FROM B_TABLE ) USING (SOME_COLUMN) sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/ST05_subqueries_in_joins/test-config.yml000066400000000000000000000000541503426445100317020ustar00rootroot00000000000000test-config: rules: - ST05 - JJ01 sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/macro_file_jinja_include_undefined_variable/000077500000000000000000000000001503426445100327505ustar00rootroot00000000000000.sqlfluff000066400000000000000000000002431503426445100345130ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/macro_file_jinja_include_undefined_variable[sqlfluff] dialect = ansi [sqlfluff:templater:jinja] load_macros_from_path = test/fixtures/linter/autofix/ansi/macro_file_jinja_include_undefined_variable/macros after.sql000066400000000000000000000000271503426445100345120ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/macro_file_jinja_include_undefined_variableSELECT id FROM records before.sql000066400000000000000000000000271503426445100346530ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/macro_file_jinja_include_undefined_variableSELECT id FROM records sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/macro_file_jinja_include_undefined_variable/macros/000077500000000000000000000000001503426445100342345ustar00rootroot00000000000000utils.sql000066400000000000000000000000271503426445100360350ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/macro_file_jinja_include_undefined_variable/macros{% include sql_file %} test-config.yml000066400000000000000000000000411503426445100356310ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/ansi/macro_file_jinja_include_undefined_variabletest-config: rules: - LT09 sqlfluff-3.4.2/test/fixtures/linter/autofix/bigquery/000077500000000000000000000000001503426445100230015ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/bigquery/001_templating/000077500000000000000000000000001503426445100255255ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/bigquery/001_templating/.sqlfluff000066400000000000000000000002371503426445100273520ustar00rootroot00000000000000[sqlfluff:templater:jinja:context] project=marketing_segmentation dataset=dataset label_prob_threshold=0.8 [sqlfluff:indentation] allow_implicit_indents=True sqlfluff-3.4.2/test/fixtures/linter/autofix/bigquery/001_templating/after.sql000066400000000000000000000003071503426445100273470ustar00rootroot00000000000000select * from `{{project}}.{{dataset}}.user_labels_with_probs` where prob_max >= {{label_prob_threshold}} --- only focus on 3 segments and label_str not in ('marketing_maven', 'growth_services') sqlfluff-3.4.2/test/fixtures/linter/autofix/bigquery/001_templating/before.sql000066400000000000000000000003031503426445100275040ustar00rootroot00000000000000select * from `{{project}}.{{dataset}}.user_labels_with_probs` where prob_max >={{label_prob_threshold}} --- only focus on 3 segments and label_str not in ('marketing_maven', 'growth_services') sqlfluff-3.4.2/test/fixtures/linter/autofix/bigquery/001_templating/test-config.yml000066400000000000000000000000671503426445100304750ustar00rootroot00000000000000test-config: rules: - LT02 - LT01 - LT12 sqlfluff-3.4.2/test/fixtures/linter/autofix/bigquery/002_templating/000077500000000000000000000000001503426445100255265ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/bigquery/002_templating/.sqlfluff000066400000000000000000000002701503426445100273500ustar00rootroot00000000000000[sqlfluff] max_line_length = 50 [sqlfluff:templater:jinja:context] considered_actions=['ussl', 'ups', 'upt'] corr_states="suvpo\n ,biz_type" dst=my_dataset gcp=my_project metric=open sqlfluff-3.4.2/test/fixtures/linter/autofix/bigquery/002_templating/after.sql000066400000000000000000000006211503426445100273470ustar00rootroot00000000000000-- A subset of the hairy test. SELECT COUNT(1) AS campaign_count, {{corr_states}} {% for action in considered_actions %} , aaa( bbb( ccc({{metric}}_r, {{action}}), ddd({{metric}}_r) ), eee({{action}}) ) AS {{metric}}_{{action}} {% endfor %} FROM `{{gcp}}.{{dst}}.gas` GROUP BY {{corr_states}} sqlfluff-3.4.2/test/fixtures/linter/autofix/bigquery/002_templating/before.sql000066400000000000000000000004561503426445100275160ustar00rootroot00000000000000-- A subset of the hairy test. SELECT COUNT(1) AS campaign_count, {{corr_states}} {% for action in considered_actions %} ,aaa(bbb(ccc({{metric}}_r, {{action}}), ddd({{metric}}_r)), eee({{action}})) AS {{metric}}_{{action}} {% endfor %} FROM `{{gcp}}.{{dst}}.gas` GROUP BY {{corr_states}} sqlfluff-3.4.2/test/fixtures/linter/autofix/bigquery/002_templating/test-config.yml000066400000000000000000000001021503426445100304640ustar00rootroot00000000000000test-config: rules: - LT01 - LT02 - LT12 - LT05 sqlfluff-3.4.2/test/fixtures/linter/autofix/bigquery/003_templating/000077500000000000000000000000001503426445100255275ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/bigquery/003_templating/.sqlfluff000066400000000000000000000003011503426445100273440ustar00rootroot00000000000000[sqlfluff:templater:jinja:context] considered_actions=['uses_small_subject_line', 'uses_personal_subject', 'uses_personal_to'] corr_states="state_user_v_peer_open\n ,business_type" metric=open sqlfluff-3.4.2/test/fixtures/linter/autofix/bigquery/003_templating/after.sql000066400000000000000000000012451503426445100273530ustar00rootroot00000000000000-- A subset of the hairy test. -- NOTE: This is not perfect, but reflects -- functionality as at Nov 2020. In future -- the logic should be updated to lint this -- better. -- Force indentation linting. -- sqlfluff: indentation: template_blocks_indent: force SELECT {{corr_states}} {% for action in considered_actions %} , {{metric}}_{{action}} , campaign_count_{{action}} {% endfor %} FROM {% for action in considered_actions %} {% if loop.first %} {{action}}_raw_effect_sizes {% else %} JOIN {{action}}_raw_effect_sizes USING ({{corr_states}}) {% endif %} {% endfor %} CROSS JOIN action_states sqlfluff-3.4.2/test/fixtures/linter/autofix/bigquery/003_templating/before.sql000066400000000000000000000012151503426445100275110ustar00rootroot00000000000000-- A subset of the hairy test. -- NOTE: This is not perfect, but reflects -- functionality as at Nov 2020. In future -- the logic should be updated to lint this -- better. -- Force indentation linting. -- sqlfluff: indentation: template_blocks_indent: force SELECT {{corr_states}} {% for action in considered_actions %} ,{{metric}}_{{action}} ,campaign_count_{{action}} {% endfor %} FROM {% for action in considered_actions %} {% if loop.first %} {{action}}_raw_effect_sizes {% else %} JOIN {{action}}_raw_effect_sizes USING ({{corr_states}}) {% endif %} {% endfor %} CROSS JOIN action_states sqlfluff-3.4.2/test/fixtures/linter/autofix/bigquery/003_templating/test-config.yml000066400000000000000000000000671503426445100304770ustar00rootroot00000000000000test-config: rules: - LT01 - LT02 - LT12 sqlfluff-3.4.2/test/fixtures/linter/autofix/bigquery/004_templating/000077500000000000000000000000001503426445100255305ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/bigquery/004_templating/.sqlfluff000066400000000000000000000005161503426445100273550ustar00rootroot00000000000000[sqlfluff:templater:jinja:context] action_states="has_used_small_subject_line\n ,has_used_personal_subject\n ,has_used_personal_to\n" considered_actions=['uses_small_subject_line', 'uses_personal_subject', 'uses_personal_to'] corr_states="state_user_v_peer_open\n ,business_type" dataset=my_dataset gcp_project=my_project metric=open sqlfluff-3.4.2/test/fixtures/linter/autofix/bigquery/004_templating/after.sql000066400000000000000000000053071503426445100273570ustar00rootroot00000000000000 /* A nice hairy templated query to really stretch and test templating and fixing. This file should fail the safety checks, and so the position of the templated tokens shouldn't move. */ WITH raw_effect_sizes AS ( SELECT COUNT(1) AS campaign_count, {{corr_states}} {% for action in considered_actions %} , SAFE_DIVIDE(SAFE_MULTIPLY(CORR({{metric}}_rate_su, {{action}}), STDDEV_POP({{metric}}_rate_su)), STDDEV_POP({{action}})) AS {{metric}}_{{action}} {% endfor %} FROM `{{gcp_project}}.{{dataset}}.global_actions_states` GROUP BY {{corr_states}} ), {% for action in considered_actions %} {{action}}_raw_effect_sizes AS ( SELECT COUNT(1) AS campaign_count_{{action}}, {{corr_states}} -- NOTE: The LT02 fix routine behaves a little strangely here around the templated -- code, specifically the indentation of STDDEV_POP and preceding comments. This -- is a bug currently with no obvious solution. , SAFE_DIVIDE( SAFE_MULTIPLY(CORR({{metric}}_rate_su, {{action}}), STDDEV_POP({{metric}}_rate_su)), STDDEV_POP({{action}}) ) AS {{metric}}_{{action}} FROM `{{gcp_project}}.{{dataset}}.global_actions_states` WHERE {{action}} != -1 GROUP BY {{corr_states}} ), {% endfor %} new_raw_effect_sizes AS ( SELECT {{corr_states}} {% for action in considered_actions %} , {{metric}}_{{action}} , campaign_count_{{action}} {% endfor %} FROM {% for action in considered_actions %} {% if loop.first %} {{action}}_raw_effect_sizes {% else %} JOIN {{action}}_raw_effect_sizes USING ({{corr_states}}) {% endif %} {% endfor %} ), imputed_effect_sizes AS ( SELECT {{corr_states}} , o.campaign_count AS campaign_count {% for action in considered_actions %} , COALESCE(IF(IS_NAN(o.{{metric}}_{{action}}), 0, o.{{metric}}_{{action}}), 0) AS {{metric}}_{{action}} , COALESCE(IF(IS_NAN(n.{{metric}}_{{action}}), 0, n.{{metric}}_{{action}}), 0) AS new_{{metric}}_{{action}} , n.campaign_count_{{action}} {% endfor %} FROM raw_effect_sizes o JOIN new_raw_effect_sizes n USING ({{corr_states}}) ), action_states AS ( SELECT {{action_states}} FROM `{{gcp_project}}.{{dataset}}.global_state_space` GROUP BY {{action_states}}) SELECT imputed_effect_sizes.*, {{action_states}} FROM imputed_effect_sizes CROSS JOIN action_states sqlfluff-3.4.2/test/fixtures/linter/autofix/bigquery/004_templating/before.sql000066400000000000000000000045631503426445100275230ustar00rootroot00000000000000 /* A nice hairy templated query to really stretch and test templating and fixing. This file should fail the safety checks, and so the position of the templated tokens shouldn't move. */ WITH raw_effect_sizes AS ( SELECT COUNT(1) AS campaign_count, {{corr_states}} {% for action in considered_actions %} ,SAFE_DIVIDE(SAFE_MULTIPLY(CORR({{metric}}_rate_su, {{action}}), STDDEV_POP({{metric}}_rate_su)), STDDEV_POP({{action}})) AS {{metric}}_{{action}} {% endfor %} FROM `{{gcp_project}}.{{dataset}}.global_actions_states` GROUP BY {{corr_states}} ), {% for action in considered_actions %} {{action}}_raw_effect_sizes AS ( SELECT COUNT(1) AS campaign_count_{{action}}, {{corr_states}} -- NOTE: The LT02 fix routine behaves a little strangely here around the templated -- code, specifically the indentation of STDDEV_POP and preceding comments. This -- is a bug currently with no obvious solution. ,SAFE_DIVIDE(SAFE_MULTIPLY(CORR({{metric}}_rate_su, {{action}}), STDDEV_POP({{metric}}_rate_su)), STDDEV_POP({{action}})) AS {{metric}}_{{action}} FROM `{{gcp_project}}.{{dataset}}.global_actions_states` WHERE {{action}} != -1 GROUP BY {{corr_states}} ), {% endfor %} new_raw_effect_sizes AS ( SELECT {{corr_states}} {% for action in considered_actions %} ,{{metric}}_{{action}} ,campaign_count_{{action}} {% endfor %} FROM {% for action in considered_actions %} {% if loop.first %} {{action}}_raw_effect_sizes {% else %} JOIN {{action}}_raw_effect_sizes USING ({{corr_states}}) {% endif %} {% endfor %} ), imputed_effect_sizes AS ( SELECT {{corr_states}} ,o.campaign_count AS campaign_count {% for action in considered_actions %} ,COALESCE(IF(IS_NAN(o.{{metric}}_{{action}}), 0, o.{{metric}}_{{action}}), 0) AS {{metric}}_{{action}} ,COALESCE(IF(IS_NAN(n.{{metric}}_{{action}}), 0, n.{{metric}}_{{action}}), 0) AS new_{{metric}}_{{action}} ,n.campaign_count_{{action}} {% endfor %} FROM raw_effect_sizes o JOIN new_raw_effect_sizes n USING ({{corr_states}}) ), action_states AS ( SELECT {{action_states}} FROM `{{gcp_project}}.{{dataset}}.global_state_space` GROUP BY {{action_states}}) SELECT imputed_effect_sizes.*, {{action_states}} FROM imputed_effect_sizes CROSS JOIN action_states sqlfluff-3.4.2/test/fixtures/linter/autofix/bigquery/004_templating/test-config.yml000066400000000000000000000000671503426445100305000ustar00rootroot00000000000000test-config: rules: - LT01 - LT02 - LT12 sqlfluff-3.4.2/test/fixtures/linter/autofix/bigquery/005_unnest_spacing/000077500000000000000000000000001503426445100264055ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/bigquery/005_unnest_spacing/after.sql000066400000000000000000000001201503426445100302200ustar00rootroot00000000000000SELECT category, value FROM table1, UNNEST(1, 2, 3) AS category sqlfluff-3.4.2/test/fixtures/linter/autofix/bigquery/005_unnest_spacing/before.sql000066400000000000000000000001141503426445100303640ustar00rootroot00000000000000SELECT category, value FROM table1, UNNEST(1, 2, 3) AS category sqlfluff-3.4.2/test/fixtures/linter/autofix/bigquery/005_unnest_spacing/test-config.yml000066400000000000000000000000541503426445100313510ustar00rootroot00000000000000test-config: rules: - LT02 - AL05 sqlfluff-3.4.2/test/fixtures/linter/autofix/bigquery/006_fix_ignore_templating/000077500000000000000000000000001503426445100277435ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/bigquery/006_fix_ignore_templating/.sqlfluff000066400000000000000000000002251503426445100315650ustar00rootroot00000000000000[sqlfluff] dialect = bigquery ignore = templating fix_even_unparsable = True [sqlfluff:rules:capitalisation.keywords] capitalisation_policy = upper sqlfluff-3.4.2/test/fixtures/linter/autofix/bigquery/006_fix_ignore_templating/after.sql000066400000000000000000000013151503426445100315650ustar00rootroot00000000000000SELECT * EXCEPT ({% include query %}) FROM ( SELECT tbl1.*, row_number() OVER ( PARTITION BY tbl1.the_name, {{ context_columns | join(', ') }} ORDER BY created_at DESC ) AS rnk {% if context_columns | default("abc") == "abc" %} FROM tbl1 {% endif %} INNER JOIN tbl2 ON tbl1.the_name = tbl2.the_name AND tbl1.run_id = tbl2.run_id WHERE {{ run_rnk }} = {% include "foobar.sql" %} ) {% if +level - -level + level.level + level + level["key"] >= 0 %} WHERE rnk = 1 {% endif %} sqlfluff-3.4.2/test/fixtures/linter/autofix/bigquery/006_fix_ignore_templating/before.sql000066400000000000000000000007741503426445100317360ustar00rootroot00000000000000select * except({% include query %}) from ( select tbl1.*, row_number() over (partition by tbl1.the_name, {{ context_columns | join(', ') }} order by created_at desc) rnk {% if context_columns | default("abc") == "abc" %} from tbl1 {% endif %} inner join tbl2 on tbl1.the_name = tbl2.the_name and tbl1.run_id = tbl2.run_id where {{ run_rnk }} = {% include "foobar.sql" %} ) {% if +level - -level + level.level + level + level["key"] >= 0 %} where rnk = 1 {% endif %} sqlfluff-3.4.2/test/fixtures/linter/autofix/bigquery/006_fix_ignore_templating/test-config.yml000066400000000000000000000001301503426445100327020ustar00rootroot00000000000000test-config: rules: - LT02 - LT01 - CP01 - AL02 - LT05 - LT09 sqlfluff-3.4.2/test/fixtures/linter/autofix/snowflake/000077500000000000000000000000001503426445100231435ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/snowflake/001_semi_structured/000077500000000000000000000000001503426445100267445ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/snowflake/001_semi_structured/.sqlfluff000066400000000000000000000001121503426445100305610ustar00rootroot00000000000000[sqlfluff:rules:capitalisation.identifiers] capitalisation_policy = lower sqlfluff-3.4.2/test/fixtures/linter/autofix/snowflake/001_semi_structured/after.sql000066400000000000000000000001751503426445100305710ustar00rootroot00000000000000select value:data:to::string AS to_phone_number, value:data:from::string AS from_phone_number FROM a.b.ticket_audits sqlfluff-3.4.2/test/fixtures/linter/autofix/snowflake/001_semi_structured/before.sql000066400000000000000000000001751503426445100307320ustar00rootroot00000000000000select value:data:to::string AS TO_PHONE_NUMBER, value:data:from::string AS FROM_PHONE_NUMBER FROM a.b.ticket_audits sqlfluff-3.4.2/test/fixtures/linter/autofix/snowflake/001_semi_structured/test-config.yml000066400000000000000000000000411503426445100317040ustar00rootroot00000000000000test-config: rules: - CP02 sqlfluff-3.4.2/test/fixtures/linter/autofix/snowflake/002_previously_parse_tree_damaging/000077500000000000000000000000001503426445100320055ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/snowflake/002_previously_parse_tree_damaging/.sqlfluff000066400000000000000000000012361503426445100336320ustar00rootroot00000000000000[sqlfluff] dialect = snowflake templater = jinja exclude_rules = AL01,LT05,AL07,ST06,ST01 output_line_length = 120 max_line_length = 120 [sqlfluff:layout:type:binary_operator] line_position = leading [sqlfluff:layout:type:comparison_operator] line_position = leading [sqlfluff:indentation] tab_space_size = 2 [sqlfluff:rules:capitalisation.keywords] # Keywords capitalisation_policy = upper [sqlfluff:rules:AL03] # Column expressions allow_scalar = False [sqlfluff:rules:capitalisation.identifiers] # Unquoted identifiers extended_capitalisation_policy = lower [sqlfluff:rules:capitalisation.functions] # Function names extended_capitalisation_policy = lower sqlfluff-3.4.2/test/fixtures/linter/autofix/snowflake/002_previously_parse_tree_damaging/after.sql000066400000000000000000000004131503426445100336250ustar00rootroot00000000000000MERGE INTO foo.bar AS tgt USING ( SELECT foo::DATE AS bar FROM foo.bar WHERE split(foo, '|')[2] REGEXP '^\\d+\\-\\d+\\-\\d+ \\d+\\:\\d+$' OR foo IN ('BAR', 'FOO') ) AS src ON src.foo = tgt.foo WHEN MATCHED THEN UPDATE SET tgt.foo = src.foo; sqlfluff-3.4.2/test/fixtures/linter/autofix/snowflake/002_previously_parse_tree_damaging/before.sql000066400000000000000000000003671503426445100337760ustar00rootroot00000000000000merge into foo.bar as tgt using ( select foo::DATE as bar from foo.bar where split(foo, '|')[2] REGEXP '^\\d+\\-\\d+\\-\\d+ \\d+\\:\\d+$' OR foo IN ('BAR','FOO') ) as src on src.foo = tgt.foo when matched then update set tgt.foo = src.foo ; test-config.yml000066400000000000000000000001301503426445100346650ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/snowflake/002_previously_parse_tree_damagingtest-config: rules: - LT01 - LT02 - LT03 - CP01 - LT09 - CV06 sqlfluff-3.4.2/test/fixtures/linter/autofix/snowflake/003_previously_parse_tree_damaging/000077500000000000000000000000001503426445100320065ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/snowflake/003_previously_parse_tree_damaging/.sqlfluff000066400000000000000000000001121503426445100336230ustar00rootroot00000000000000[sqlfluff] dialect = snowflake [sqlfluff:indentation] tab_space_size = 2 sqlfluff-3.4.2/test/fixtures/linter/autofix/snowflake/003_previously_parse_tree_damaging/after.sql000066400000000000000000000000401503426445100336220ustar00rootroot00000000000000set cutoff = ( select foo ); sqlfluff-3.4.2/test/fixtures/linter/autofix/snowflake/003_previously_parse_tree_damaging/before.sql000066400000000000000000000000431503426445100337660ustar00rootroot00000000000000set cutoff = (select foo ); test-config.yml000066400000000000000000000000541503426445100346730ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/snowflake/003_previously_parse_tree_damagingtest-config: rules: - LT02 - LT09 sqlfluff-3.4.2/test/fixtures/linter/autofix/tsql/000077500000000000000000000000001503426445100221355ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/tsql/001_alias_aligning/000077500000000000000000000000001503426445100254565ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/autofix/tsql/001_alias_aligning/.sqlfluff000066400000000000000000000001621503426445100273000ustar00rootroot00000000000000[sqlfluff:layout:type:alias_operator] spacing_before = align align_within = select_clause align_scope = bracketed sqlfluff-3.4.2/test/fixtures/linter/autofix/tsql/001_alias_aligning/after.sql000066400000000000000000000003561503426445100273040ustar00rootroot00000000000000SELECT test.d AS fourth_column, test.te AS fifth_column, first_column = test.a, second_column = test.b, third_column_long_name = (test.a + test.b) / 2 FROM foo AS test sqlfluff-3.4.2/test/fixtures/linter/autofix/tsql/001_alias_aligning/before.sql000066400000000000000000000002671503426445100274460ustar00rootroot00000000000000SELECT test.d fourth_column, test.te AS fifth_column, first_column = test.a, second_column=test.b, third_column_long_name = (test.a + test.b) / 2 FROM foo AS test sqlfluff-3.4.2/test/fixtures/linter/autofix/tsql/001_alias_aligning/test-config.yml000066400000000000000000000000671503426445100304260ustar00rootroot00000000000000test-config: rules: - LT01 - LT02 - AL02 sqlfluff-3.4.2/test/fixtures/linter/block_comment_errors.sql000066400000000000000000000005161503426445100244260ustar00rootroot00000000000000select my_field from my_table_asdlfkhsldfjkhsadlfjksahdf /* this is a long multi-line comment that seeks dajkfhsalkjfdhs to explain why we are selecting from this table sfafdjkafdksajdfhsdajkfhsakldjfhsakldjfhksdajf rather than another table dsafdafsdfsadfsadfasdfasfdsadfasdfa fdsaf sdafa sadf asdf sdfa sdf asdf */ my_table sqlfluff-3.4.2/test/fixtures/linter/block_comment_errors_2.sql000066400000000000000000000002531503426445100246450ustar00rootroot00000000000000/*********************************************************************************** ************************************************************************************/ sqlfluff-3.4.2/test/fixtures/linter/block_comment_errors_3.sql000066400000000000000000000001271503426445100246460ustar00rootroot00000000000000/********************************************************************************* */ sqlfluff-3.4.2/test/fixtures/linter/column_references.sql000066400000000000000000000002301503426445100237050ustar00rootroot00000000000000select a, b.c, d.g, f as f1, f1 + 1 as f2 from z as a JOIN d using(f) where f2 > 1 -- NB: `f` appears in the USING clause and so shouldn't fail on RF02 sqlfluff-3.4.2/test/fixtures/linter/column_references_bare_function.sql000066400000000000000000000001771503426445100266150ustar00rootroot00000000000000select ta.column_a, current_timestamp as column_b, tb.column_c from table_a as ta join table_b as tb using(id) sqlfluff-3.4.2/test/fixtures/linter/comma_errors.sql000066400000000000000000000001251503426445100227020ustar00rootroot00000000000000-- Checking leading/trailing commas Select a , b , c d , e, f FROM g sqlfluff-3.4.2/test/fixtures/linter/diffquality/000077500000000000000000000000001503426445100220145ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/diffquality/.sqlfluff000066400000000000000000000000321503426445100236320ustar00rootroot00000000000000[sqlfluff] ignore=parsing sqlfluff-3.4.2/test/fixtures/linter/diffquality/parse_error.sql000066400000000000000000000000071503426445100250550ustar00rootroot00000000000000SELECT sqlfluff-3.4.2/test/fixtures/linter/discovery_file.txt000066400000000000000000000001211503426445100232340ustar00rootroot00000000000000-- This is a text file to test discovery of configured file extensions. SELECT 1 sqlfluff-3.4.2/test/fixtures/linter/discovery_file.txt.j2000066400000000000000000000001651503426445100235560ustar00rootroot00000000000000-- This is a text file to test discovery of configured file extensions with multiple parts, e.gz ".sql.j2". SELECT 1 sqlfluff-3.4.2/test/fixtures/linter/encoding-utf-8-sig.sql000066400000000000000000000000331503426445100235170ustar00rootroot00000000000000select a from b sqlfluff-3.4.2/test/fixtures/linter/encoding-utf-8.sql000066400000000000000000000000301503426445100227340ustar00rootroot00000000000000select a from b sqlfluff-3.4.2/test/fixtures/linter/exit_codes/000077500000000000000000000000001503426445100216215ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/exit_codes/multifile_a/000077500000000000000000000000001503426445100241135ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/exit_codes/multifile_a/1_pass.sql000066400000000000000000000000141503426445100260150ustar00rootroot00000000000000select a, b sqlfluff-3.4.2/test/fixtures/linter/exit_codes/multifile_a/2_fail.sql000066400000000000000000000000251503426445100257650ustar00rootroot00000000000000select a, B FROM FOO sqlfluff-3.4.2/test/fixtures/linter/exit_codes/multifile_b/000077500000000000000000000000001503426445100241145ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/exit_codes/multifile_b/1_fail.sql000066400000000000000000000000251503426445100257650ustar00rootroot00000000000000select a, B from FOO sqlfluff-3.4.2/test/fixtures/linter/exit_codes/multifile_b/2_pass.sql000066400000000000000000000000141503426445100260170ustar00rootroot00000000000000select a, b sqlfluff-3.4.2/test/fixtures/linter/heavy_templating.sql000066400000000000000000000004631503426445100235570ustar00rootroot00000000000000{% set properties = { "id": "id", "type": "post_type", "channel_id": "episode_id" } %} select {% for prop, col in properties.items() %} {% if not loop.first %} , {% endif %} {{prop}} as {{ col}} {% endfor %} from {{ ref("snowplow_events_dev") }} sqlfluff-3.4.2/test/fixtures/linter/identifier_capitalisation.sql000066400000000000000000000001071503426445100254200ustar00rootroot00000000000000-- sqlfluff:warnings:CP01 select foo BAR FROM tbla, tblB, TBLC sqlfluff-3.4.2/test/fixtures/linter/indentation_error_contained.sql000066400000000000000000000002561503426445100257700ustar00rootroot00000000000000-- Line 4 of this query has a closing bracket indent which we should test handling of. SELECT user_id FROM ( SELECT c.user_id AS user_id FROM c ) sqlfluff-3.4.2/test/fixtures/linter/indentation_error_hard.sql000066400000000000000000000010201503426445100247300ustar00rootroot00000000000000SELECT a.id, -- 3 Spaces a.name, a.training_spaces, ( a.under_indented_line ) as foo, ( a.over_indented_line ) as bar, a.line + (a.with + a.hanging_indent) as actually_ok, a.line + (a.with + a.bad_hanging_indent) as problem, a.line + ( a.something_indented_well + least( a.good_example, a.bad_example, a.really_bad_example, a.nother_good_example ) ) as some_harder_problems FROM tbl as a sqlfluff-3.4.2/test/fixtures/linter/indentation_error_simple.sql000066400000000000000000000001141503426445100253060ustar00rootroot00000000000000SELECT a.id, -- 3 Spaces a.name, a.training_spaces FROM tbl as a sqlfluff-3.4.2/test/fixtures/linter/indentation_errors.sql000066400000000000000000000002101503426445100241150ustar00rootroot00000000000000SELECT a.id, -- 3 Spaces a.name, -- tabs and spaces, 2 spaces a.trailing_spaces, a.tabs_alone_after_spaces FROM tbl as a sqlfluff-3.4.2/test/fixtures/linter/jinja_spacing.sql000066400000000000000000000001601503426445100230100ustar00rootroot00000000000000-- This file is for testing source only fixes -- sqlfluff:templater:jinja:context:foo:bar SELECT * FROM {{foo}} sqlfluff-3.4.2/test/fixtures/linter/jinja_variants/000077500000000000000000000000001503426445100224755ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/jinja_variants/simple_CP01.sql000066400000000000000000000001511503426445100252270ustar00rootroot00000000000000-- sqlfluff:render_variant_limit:10 select 1 AS foo, {% if 1 > 2 %}2 AS boo{% else %}3 AS boo{% endif %} sqlfluff-3.4.2/test/fixtures/linter/multiple_files/000077500000000000000000000000001503426445100225105ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/multiple_files/passing.1.sql000066400000000000000000000000211503426445100250250ustar00rootroot00000000000000select a from b; sqlfluff-3.4.2/test/fixtures/linter/multiple_files/passing.2.sql000066400000000000000000000000211503426445100250260ustar00rootroot00000000000000select a from b; sqlfluff-3.4.2/test/fixtures/linter/multiple_files/passing.3.sql000066400000000000000000000000211503426445100250270ustar00rootroot00000000000000select a from b; sqlfluff-3.4.2/test/fixtures/linter/multiple_sql_errors.sql000066400000000000000000000027431503426445100243300ustar00rootroot00000000000000WITH organizations AS ( SELECT id, organization_key FROM {{ ref('platform_stg__organizations') }} ), accounts AS ( SELECT app_key, organization_id FROM {{ ref('platform_stg__accounts') }} ), owners_packages AS ( SELECT owner_id, owner_type, package_id, created_at, updated_at, started_at FROM {{ ref('platform_stg__owners_packages') }} ), owner_packages_organization_app AS ( SELECT accounts.app_key AS store_id, package_id, owners_packages.created_at, owners_packages.updated_at, owners_packages.started_at FROM owners_packages INNER JOIN organizations ON organizations.organization_key = owner_id INNER JOIN accounts ON accounts.organization_id = organizations.id WHERE LOWER(owner_type) = 'organization' AND app_key IS NOT NULL ), owner_packages_app AS ( SELECT owner_id AS store_id, package_id, created_at, updated_at, started_at FROM owners_packages WHERE LOWER(owner_type) = 'store' ), owner_packages_store_view AS ( SELECT store_id, package_id, created_at, updated_at, started_at FROM owner_packages_organization_app UNION ALL SELECT store_id, package_id, created_at, updated_at, started_at FROM owner_packages_app ) SELECT * FROM owner_packages_store_view sqlfluff-3.4.2/test/fixtures/linter/operator_errors.sql000066400000000000000000000002601503426445100234410ustar00rootroot00000000000000SELECT a.a + a.b AS good, a.a - a.b AS bad_1, a.a * a.b AS bad_2, a.b / a.a AS bad_3, 2+(3+6)+7 AS bad_4, a.b AND a.a AS good_4 FROM tbl AS a sqlfluff-3.4.2/test/fixtures/linter/operator_errors_ignore.sql000066400000000000000000000006371503426445100250140ustar00rootroot00000000000000/* This is a file to test the inline ignoring of certain rules. Errors should be found in line 10, but not on line 9. Line 10 has rules ignored, but there are rules which *arent* ignored, which are still present. No errors should be found on line 8 at all. */ SELECT a.a + a.b AS good, a.a-a.b AS bad_1, -- noqa a.a*a.b AS bad_2, -- noqa: LT01, LT03 a.a*a.b AS bad_3 -- noqa: LT03 FROM tbl AS a sqlfluff-3.4.2/test/fixtures/linter/operator_errors_negative.sql000066400000000000000000000001141503426445100253210ustar00rootroot00000000000000SELECT a - b AS c, -2 AS d, a - b AS e, 4-7 AS f FROM tbl sqlfluff-3.4.2/test/fixtures/linter/parse_error.sql000066400000000000000000000000071503426445100225340ustar00rootroot00000000000000SELECT sqlfluff-3.4.2/test/fixtures/linter/parse_error_2.sql000066400000000000000000000000721503426445100227570ustar00rootroot00000000000000SELECT a as b, 42, `multiple_x` as c from cte sqlfluff-3.4.2/test/fixtures/linter/parse_lex_error.sql000066400000000000000000000005011503426445100234030ustar00rootroot00000000000000-- file with both parsing and lexing errors. -- Used for checking ignore functionality and -- the ability to work around issues. SELECT a.id, -- 3 Spaces a.name, a.training_spaces, some_function(SELECT LIMIT WHERE BY ORDER) AS not_parsable, another_function(🤷‍♀️) AS not_lexable FROM tbl AS a sqlfluff-3.4.2/test/fixtures/linter/passing.sql000066400000000000000000000000201503426445100216500ustar00rootroot00000000000000select a from b sqlfluff-3.4.2/test/fixtures/linter/passing_cap_extension.SQL000066400000000000000000000000201503426445100244270ustar00rootroot00000000000000select a from b sqlfluff-3.4.2/test/fixtures/linter/select_distinct_group_by.sql000066400000000000000000000000541503426445100253010ustar00rootroot00000000000000sELECT distinct a, b FROM tbl GROUP BY a, b sqlfluff-3.4.2/test/fixtures/linter/sqlfluffignore/000077500000000000000000000000001503426445100225215ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/sqlfluffignore/.sqlfluffignore000066400000000000000000000000301503426445100255410ustar00rootroot00000000000000# Ignore path_a path_a/ sqlfluff-3.4.2/test/fixtures/linter/sqlfluffignore/README.md000066400000000000000000000010441503426445100237770ustar00rootroot00000000000000# SQLFluff ignore file tests In this folder there are five queries. Only one of them should be found as the others are all covered by various ignore files. * `path_a/query_a.sql` is ignored by the root `.sqlfluffignore`, by ignoring the whole of `path_a/`. * `path_b/query_b.sql` is *not ignored*. * `path_b/query_c.sql` is ignored by name in `path_b/.sqlfluffignore`. * `path_b/query_d.sql` is ignored by name in `path_b/.sqlfluff`. * `path_c/query_e.sql` is ignored by the `pyproject.toml` config file which ignores the whole of `path_c/`. sqlfluff-3.4.2/test/fixtures/linter/sqlfluffignore/path_a/000077500000000000000000000000001503426445100237555ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/sqlfluffignore/path_a/query_a.sql000066400000000000000000000000121503426445100261340ustar00rootroot00000000000000SELECT 1+2sqlfluff-3.4.2/test/fixtures/linter/sqlfluffignore/path_b/000077500000000000000000000000001503426445100237565ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/sqlfluffignore/path_b/.sqlfluff000066400000000000000000000000621503426445100255770ustar00rootroot00000000000000[sqlfluff] ignore_paths = query_d.sql,query_e.sql sqlfluff-3.4.2/test/fixtures/linter/sqlfluffignore/path_b/.sqlfluffignore000066400000000000000000000000551503426445100270050ustar00rootroot00000000000000# Ignore query_c query_c.sql query_c_fix.sql sqlfluff-3.4.2/test/fixtures/linter/sqlfluffignore/path_b/query_b.sql000066400000000000000000000000121503426445100261360ustar00rootroot00000000000000SELECT 1+2sqlfluff-3.4.2/test/fixtures/linter/sqlfluffignore/path_b/query_c.sql000066400000000000000000000000121503426445100261370ustar00rootroot00000000000000SELECT 1+2sqlfluff-3.4.2/test/fixtures/linter/sqlfluffignore/path_b/query_d.sql000066400000000000000000000000111503426445100261370ustar00rootroot00000000000000SELECT 1 sqlfluff-3.4.2/test/fixtures/linter/sqlfluffignore/path_c/000077500000000000000000000000001503426445100237575ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/linter/sqlfluffignore/path_c/query_e.sql000066400000000000000000000000111503426445100261410ustar00rootroot00000000000000SELECT 1 sqlfluff-3.4.2/test/fixtures/linter/sqlfluffignore/pyproject.toml000066400000000000000000000000671503426445100254400ustar00rootroot00000000000000[tool.sqlfluff.core] ignore_paths = [ "path_c/", ] sqlfluff-3.4.2/test/fixtures/linter/whitespace_errors.sql000066400000000000000000000002761503426445100237510ustar00rootroot00000000000000SELECT a.id , -- Comma with leading spaces a.name, a.training_spaces , -- Comma on newline, trailing spaces but with comment! a.normal_comma, a.should_work FROM tbl as a sqlfluff-3.4.2/test/fixtures/rules/000077500000000000000000000000001503426445100173305ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/rules/R001_global_config_test.yml000066400000000000000000000003501503426445100243770ustar00rootroot00000000000000rule: R001 configs: core: dialect: exasol tc1: pass_str: | create table if not exists tab.xxx (col1 varchar(10)) configs: core: dialect: ansi tc2: pass_str: | create table tab.xxx (col1 varchar(10)) sqlfluff-3.4.2/test/fixtures/rules/custom/000077500000000000000000000000001503426445100206425ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/rules/custom/L000.py000066400000000000000000000001571503426445100216320ustar00rootroot00000000000000"""Test std rule import.""" class Rule_L000: """Test std rule import.""" groups = ("all",) pass sqlfluff-3.4.2/test/fixtures/rules/custom/S000.py000066400000000000000000000001571503426445100216410ustar00rootroot00000000000000"""Test std rule import.""" class Rule_S000: """Test std rule import.""" groups = ("all",) pass sqlfluff-3.4.2/test/fixtures/rules/custom/bad_rule_name/000077500000000000000000000000001503426445100234175ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/rules/custom/bad_rule_name/E000.py000066400000000000000000000001751503426445100244000ustar00rootroot00000000000000"""Test std rule import.""" class E000: """This will fail to import because it does not start with Rule_.""" pass sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/000077500000000000000000000000001503426445100223275ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/AL01.yml000066400000000000000000000113411503426445100235070ustar00rootroot00000000000000rule: AL01 test_fail_default_explicit: # Add whitespace when fixing implicit aliasing fail_str: select foo.bar from table1 foo fix_str: select foo.bar from table1 AS foo test_fail_explicit: # Add whitespace when fixing implicit aliasing fail_str: select foo.bar from table1 foo fix_str: select foo.bar from table1 AS foo configs: rules: aliasing.table: aliasing: explicit test_fail_implicit: # Add whitespace when fixing implicit aliasing fail_str: select foo.bar from table1 AS foo fix_str: select foo.bar from table1 foo configs: rules: aliasing.table: aliasing: implicit test_fail_implicit_alias: # Add whitespace when fixing implicit aliasing fail_str: select foo.bar from (select 1 as bar)foo fix_str: select foo.bar from (select 1 as bar) AS foo test_fail_implicit_alias_space: # No unnecessary whitespace added when fixing implicit aliasing fail_str: select foo.bar from (select 1 as bar) foo fix_str: select foo.bar from (select 1 as bar) AS foo test_fail_implicit_alias_explicit: # Test when explicitly setting explicit fail_str: select foo.bar from (select 1 as bar) foo fix_str: select foo.bar from (select 1 as bar) AS foo configs: rules: aliasing.table: aliasing: explicit test_fail_implicit_alias_implicit: # Test implicit fail_str: select foo.bar from (select 1 as bar) AS foo fix_str: select foo.bar from (select 1 as bar) foo configs: rules: aliasing.table: aliasing: implicit test_fail_implicit_alias_implicit_multiple: # Test implicit with multiple tables fail_str: select foo.bar from (select 1 as bar) AS bar, (select 1 as foo) AS foo fix_str: select foo.bar from (select 1 as bar) bar, (select 1 as foo) foo configs: rules: aliasing.table: aliasing: implicit test_fail_implicit_alias_implicit_newline: # NOTE: Even when removing by a newline, we should still remove any duplicate # whitespace. fail_str: | select foo.bar from (select 1 as bar) AS foo fix_str: | select foo.bar from (select 1 as bar) foo configs: rules: aliasing.table: aliasing: implicit test_fail_default_explicit_alias_merge: # Add whitespace when fixing implicit aliasing fail_str: | MERGE dataset.inventory t USING dataset.newarrivals s ON t.product = s.product WHEN MATCHED THEN UPDATE SET quantity = t.quantity + s.quantity; fix_str: | MERGE dataset.inventory AS t USING dataset.newarrivals AS s ON t.product = s.product WHEN MATCHED THEN UPDATE SET quantity = t.quantity + s.quantity; configs: core: dialect: bigquery test_fail_explicit_alias_merge: # Add whitespace when fixing implicit aliasing fail_str: | MERGE dataset.inventory t USING dataset.newarrivals s ON t.product = s.product WHEN MATCHED THEN UPDATE SET quantity = t.quantity + s.quantity; fix_str: | MERGE dataset.inventory AS t USING dataset.newarrivals AS s ON t.product = s.product WHEN MATCHED THEN UPDATE SET quantity = t.quantity + s.quantity; configs: core: dialect: bigquery rules: aliasing.table: aliasing: explicit test_pass_implicit_alias_merge: # Add whitespace when fixing implicit aliasing pass_str: | MERGE dataset.inventory t USING dataset.newarrivals s ON t.product = s.product WHEN MATCHED THEN UPDATE SET quantity = t.quantity + s.quantity; configs: core: dialect: bigquery rules: aliasing.table: aliasing: implicit test_alias_expression_4492: # Test failing alias expressions # https://github.com/sqlfluff/sqlfluff/issues/4492 fail_str: SELECT voo.a FROM foo voo fix_str: SELECT voo.a FROM foo AS voo configs: core: dialect: snowflake layout: type: alias_expression: spacing_before: align test_alias_expression_4089: # Test failing alias expressions # https://github.com/sqlfluff/sqlfluff/issues/4089 fail_str: SELECT RANK() OVER (PARTITION BY Id ORDER BY Id DESC) nr_rank FROM (values ('Amsterdam', 1), ('London', 2)) Cities(Name, Id) fix_str: SELECT RANK() OVER (PARTITION BY Id ORDER BY Id DESC) nr_rank FROM (values ('Amsterdam', 1), ('London', 2)) AS Cities(Name, Id) configs: layout: type: alias_expression: spacing_before: align test_pass_alias_expression_oracle_tables: # Test failing alias expressions # https://github.com/sqlfluff/sqlfluff/issues/5038 # Skip aliasing with 'AS' for Oracle table expressions pass_str: SELECT base.id, base.customer_id FROM {{ ref('customers') }} base configs: core: dialect: oracle sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/AL02.yml000066400000000000000000000043471503426445100235200ustar00rootroot00000000000000rule: AL02 issue_561: # Test for https://github.com/sqlfluff/sqlfluff/issues/561 pass_str: | select array_agg(catalog_item_id) within group (order by product_position asc) over (partition by (event_id, shelf_position)) as shelf_catalog_items from x configs: core: dialect: snowflake test_fail_explicit_column_default: # Test explicit column alias fail_str: select 1 bar from table1 b fix_str: select 1 AS bar from table1 b test_fail_explicit_column_explicit: # Test explicit column alias fail_str: select 1 bar from table1 b fix_str: select 1 AS bar from table1 b configs: rules: aliasing.column: aliasing: explicit test_fail_explicit_column_implicit: # Test explicit column alias fail_str: select 1 AS bar from table1 b fix_str: select 1 bar from table1 b configs: rules: aliasing.column: aliasing: implicit test_pass_tsql_alternative_alias: # Test explicit column alias pass_str: select alias1 = col1 configs: &tsql_dialect core: dialect: tsql test_pass_tsql_alternative_alias_no_spaces: # Test explicit column alias pass_str: select alias1=col1 configs: *tsql_dialect test_fail_alias_ending_equals: # Test explicit column alias doesn't catch false positives fail_str: select col1 "example=" fix_str: select col1 AS "example=" test_fail_alias_ending_raw_equals: # Test explicit column alias doesn't catch false positives fail_str: select col1 raw_equals fix_str: select col1 AS raw_equals test_fail_alias_expression_oracle_columns: # Test failing alias expressions # https://github.com/sqlfluff/sqlfluff/issues/5038 # Skip aliasing with 'AS' for Oracle table expressions # Still alias the columns with 'AS' fail_str: SELECT base.id id, base.customer_id cust_id FROM {{ ref('customers') }} base fix_str: SELECT base.id AS id, base.customer_id AS cust_id FROM {{ ref('customers') }} base configs: core: dialect: oracle test_pass_bigquery_replace_clause_implicit: pass_str: | select * replace (concat(name, "_new") AS name_test) FROM my_db.test; configs: core: dialect: bigquery rules: aliasing.column: aliasing: implicit sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/AL02_LT01.yml000066400000000000000000000023211503426445100242460ustar00rootroot00000000000000rule: AL02, LT01 test_alias_expression_align_4515_1: # Test more failing alias expressions fail_str: | select test a from example_table fix_str: | select test AS a from example_table configs: layout: type: alias_expression: spacing_before: align align_within: select_clause align_scope: bracketed test_alias_expression_align_4515_2: # Test more failing alias expressions fail_str: | select test a, test b from example_table fix_str: | select test AS a, test AS b from example_table configs: layout: type: alias_expression: spacing_before: align align_within: select_clause align_scope: bracketed test_alias_expression_align_4515_3: # Test more failing alias expressions fail_str: | select testy_testy_testy a, test b from example_table fix_str: | select testy_testy_testy AS a, test AS b from example_table configs: layout: type: alias_expression: spacing_before: align align_within: select_clause align_scope: bracketed sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/AL03.yml000066400000000000000000000056331503426445100235200ustar00rootroot00000000000000rule: AL03 test_pass_column_exp_without_alias_1: pass_str: SELECT *, foo from blah test_pass_column_exp_without_alias_2: # AL03 fix with https://github.com/sqlfluff/sqlfluff/issues/449 pass_str: select ps.*, pandgs.blah from ps join pandgs using(moo) test_pass_column_exp_without_alias_allow_scalar_true: # Don't expect alias if allow_scalar = True (default) pass_str: SELECT 1 from blah test_fail_column_exp_without_alias: fail_str: SELECT upper(foo), bar from blah # Casting (via "::TYPE" syntax) has no effect on column output naming # and AL03 therefore shouldn't be applied test_pass_column_exp_without_alias_if_only_cast: pass_str: SELECT foo_col::VARCHAR(28) , bar from blah test_pass_column_exp_without_alias_if_only_cast_inc_double_cast: pass_str: SELECT foo_col::INT::VARCHAR , bar from blah # No catch useless brackets # output column name is unchanged test_pass_column_exp_without_alias_if_bracketed: pass_str: SELECT (foo_col::INT)::VARCHAR , bar from blah test_fail_column_exp_without_alias_and_cast_fn: fail_str: SELECT CAST(foo_col AS INT)::VARCHAR , bar from blah test_fail_column_exp_without_alias_allow_scalar_false: # Expect alias if allow_scalar = False fail_str: SELECT 1 from blah configs: rules: allow_scalar: false test_pass_column_exp_with_alias: pass_str: SELECT upper(foo) as foo_up, bar from blah test_pass_function_emits: # Don't expect alias if allow_scalar = True (default) pass_str: SELECT json_extract(json_str, '$.AFIELD', '$.BFIELD') emits (cola char(1), colb char(1)) FROM table1 configs: core: dialect: exasol test_fail_cte_no_column_list: fail_str: | WITH cte AS ( SELECT col_a, min(col_b) FROM my_table GROUP BY 1 ) SELECT a, b FROM cte test_pass_cte_column_list: pass_str: | WITH cte(a, b) AS ( SELECT col_a, min(col_b) FROM my_table GROUP BY 1 ) SELECT a, b FROM cte test_pass_duckdb_columns_expression: pass_str: | SELECT COLUMNS(c -> c LIKE '%num%'), 1 AS x FROM numbers; configs: core: dialect: duckdb test_pass_duckdb_nested_columns_expression: pass_str: | SELECT MIN(COLUMNS(c -> c LIKE '%num%')), 1 AS x FROM numbers; configs: core: dialect: duckdb test_pass_duckdb_exclude_expression: pass_str: | select * exclude (y), 6 as z from tabx; configs: core: dialect: duckdb test_pass_duckdb_replace_expression: pass_str: | select * replace (3 as x), 6 as z from tabx; configs: core: dialect: duckdb test_fail_subquery_without_alias: fail_str: | SELECT (SELECT MAX(t.col) AS m FROM t), (SELECT MAX(t.col2) AS m FROM t) FROM tbl; test_pass_subquery_with_alias: pass_str: | SELECT (SELECT MAX(t.col) AS m FROM t) AS a, (SELECT MAX(t.col2) AS m FROM t) AS b FROM tbl; sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/AL04.yml000066400000000000000000000033011503426445100235070ustar00rootroot00000000000000rule: AL04 test_fail_exactly_once_duplicated_aliases: # duplicate aliases fail_str: select 1 from table_1 as a join table_2 as a using(pk) test_fail_two_duplicated_aliases: fail_str: | select 1 from table_1 as a join table_2 as a on a.pk = b.pk join table_3 as b on a.pk = b.pk join table_4 as b on b.pk = b.pk test_fail_subquery: fail_str: | SELECT 1 FROM ( select 1 from table_1 as a join table_2 as a on a.pk = b.pk join table_3 as b on a.pk = b.pk join table_4 as b on b.pk = b.pk ) test_pass_subquery: # This query should pass as the different 'a' # aliases are in different subquery levels. pass_str: | SELECT 1 FROM ( select 1 from table_1 as a join table_2 as b on a.pk = b.pk ) AS a test_pass_bigquery_function: pass_str: | SELECT gcpproject.functions.timestamp_parsing(log_tbl.orderdate) AS orderdate FROM `gcp-project.data.year_2021` AS log_tbl configs: core: dialect: bigquery test_pass_tsql_table_variable: pass_str: | select @someVar = someColumn from @someTableVar configs: core: dialect: tsql test_fail_subquery_same_alias: fail_str: | SELECT * FROM tbl AS t WHERE t.val IN (SELECT t.val FROM tbl2 AS t) test_fail_subquery_alias_matches_base_table: fail_str: | SELECT * FROM tbl WHERE val IN (SELECT tbl.val FROM tbl2 AS tbl) test_fail_subquery_table_matches_base_table: fail_str: | SELECT * FROM tbl WHERE val IN (SELECT tbl.val FROM tbl) test_fail_subquery_join_matches_base_table: fail_str: | SELECT tbl.val FROM tbl LEFT JOIN (SELECT tbl.val FROM tbl2 AS tbl); sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/AL05.yml000066400000000000000000000647331503426445100235300ustar00rootroot00000000000000rule: AL05 test_fail_table_alias_not_referenced_1: # Aliases not referenced. fail_str: SELECT * FROM my_tbl AS foo fix_str: SELECT * FROM my_tbl test_fail_table_alias_not_referenced_1_subquery: # Aliases not referenced. fail_str: SELECT * FROM (SELECT * FROM my_tbl AS foo) fix_str: SELECT * FROM (SELECT * FROM my_tbl) test_pass_table_alias_referenced_subquery: pass_str: SELECT * FROM (SELECT foo.bar FROM my_tbl AS foo) test_pass_table_alias_referenced: pass_str: SELECT * FROM my_tbl AS foo JOIN other_tbl on other_tbl.x = foo.x test_pass_unaliased_table_referenced: # AL05 fix with https://github.com/sqlfluff/sqlfluff/issues/449 pass_str: select ps.*, pandgs.blah from ps join pandgs using(moo) test_ignore_bigquery_value_table_functions: # AL05 fix with https://github.com/sqlfluff/sqlfluff/issues/356 pass_str: | select * from unnest(generate_timestamp_array( '2020-01-01', '2020-01-30', interval 1 day)) as ts configs: core: dialect: bigquery test_ignore_postgres_value_table_functions: # AL05 fix with https://github.com/sqlfluff/sqlfluff/issues/3051 pass_str: | SELECT json_build_object( 'name', 'ticket_status', 'type', 'enum', 'values', json_agg(status_name) ) FROM unnest(enum_range(NULL::my_enum)) AS status_name; configs: core: dialect: postgres test_ignore_postgres_value_table_functions_generate_series: # AL05 fix with https://github.com/sqlfluff/sqlfluff/issues/3462 pass_str: | SELECT date_trunc('day', dd):: timestamp with time zone FROM generate_series ( '2022-02-01'::timestamp , NOW()::timestamp , '1 day'::interval ) dd ; configs: core: dialect: postgres test_fail_table_alias_not_referenced_2: # Similar to test_1, but with implicit alias. fail_str: SELECT * FROM my_tbl foo fix_str: SELECT * FROM my_tbl test_fail_table_alias_not_referenced_2_subquery: # Aliases not referenced. fail_str: SELECT * FROM (SELECT * FROM my_tbl foo) fix_str: SELECT * FROM (SELECT * FROM my_tbl) test_pass_subquery_alias_not_referenced: pass_str: select * from (select 1 as a) subquery test_pass_bigquery_unaliased_table_with_hyphens: # Test non-quoted table name containing hyphens: https://github.com/sqlfluff/sqlfluff/issues/895 # This is more of a smoke test to exercise the # ObjectReferenceSegment.extract_reference() function, which is used by AL05 # and in turn calls HyphenatedObjectReferenceSegment.iter_raw_references(). pass_str: | select * from project-a.dataset-b.table-c configs: core: dialect: bigquery test_pass_bigquery_aliased_table_with_ticks_referenced: # Test ambiguous column reference caused by use of BigQuery structure fields. # Here, 'et2' could either be a schema name or a table name. # https://github.com/sqlfluff/sqlfluff/issues/1079 pass_str: | SELECT et2.txn.amount FROM `example_dataset2.example_table2` AS et2 configs: core: dialect: bigquery test_pass_tsql_object_reference_override: # T-SQL Overrides the ObjectReferenceSegment so needs to have the _level_to_int # static method set (as a static method!) or rule AL05 fails. # https://github.com/sqlfluff/sqlfluff/issues/1669 pass_str: SELECT a FROM b configs: core: dialect: tsql test_pass_subselect_uses_alias_1: pass_str: | SELECT col1, ( SELECT count(*) FROM base WHERE a.col2 = base.col2 ) FROM without_dup AS a test_pass_subselect_uses_alias_2: pass_str: | select COL_A , COL_B from INSERTS INS where COL_B != (select max(COL_B) from INSERTS X where INS.COL_A = X.COL_A) test_pass_subselect_uses_alias_3: pass_str: | SELECT col_1 FROM table_a AS a WHERE NOT EXISTS (SELECT TRUE FROM table_b AS b WHERE a.col_4 = b.col_1) test_ansi_function_not_table_parameter: fail_str: | SELECT TO_JSON_STRING(t) FROM my_table AS t fix_str: | SELECT TO_JSON_STRING(t) FROM my_table test_bigquery_function_takes_tablealias_parameter: pass_str: | SELECT TO_JSON_STRING(t) FROM my_table AS t configs: core: dialect: bigquery test_bigquery_function_takes_tablealias_column_parameter: pass_str: | SELECT TO_JSON_STRING(t.c) FROM my_table AS t configs: core: dialect: bigquery test_bigquery_function_takes_tablealias_column_struct_parameter: pass_str: | SELECT TO_JSON_STRING(t.c.structure) FROM my_table AS t configs: core: dialect: bigquery test_snowflake_delete_cte: fail_str: | DELETE FROM MYTABLE1 USING ( WITH MYCTE AS (SELECT COLUMN2 FROM MYTABLE3 AS MT3) SELECT COLUMN3 FROM MYTABLE3 ) X WHERE COLUMN1 = X.COLUMN3 fix_str: | DELETE FROM MYTABLE1 USING ( WITH MYCTE AS (SELECT COLUMN2 FROM MYTABLE3) SELECT COLUMN3 FROM MYTABLE3 ) X WHERE COLUMN1 = X.COLUMN3 configs: core: dialect: snowflake test_pass_exasol_values_clause: pass_str: | SELECT * FROM ( VALUES (1, 2), (3, 4) ) configs: core: dialect: exasol test_fail_exasol_values_clause: fail_str: | SELECT * FROM ( VALUES (1, 2), (3, 4) ) AS t(c1, c2) fix_str: | SELECT * FROM ( VALUES (1, 2), (3, 4) ) configs: core: dialect: exasol test_pass_sparksql_values_clause: pass_str: | SELECT * FROM ( VALUES (1, 2), (3, 4) ) configs: core: dialect: sparksql test_fail_sparksql_values_clause: fail_str: | SELECT * FROM ( VALUES (1, 2), (3, 4) ) AS t(c1, c2) fix_str: | SELECT * FROM ( VALUES (1, 2), (3, 4) ) configs: core: dialect: sparksql test_pass_snowflake_values: # Tests a fix for issue 3301. pass_str: | SELECT thing_1 , thing_2 FROM VALUES ( 'foo', 'bar') , ( 'foo', 'bar') my_table_alias(thing_1, thing_2) configs: core: dialect: snowflake test_pass_tsql_values_clause_in_parentheses: # Tests a fix for issue 3522. In tsql, the parentheses surrounding "values" are # required (otherwise syntax error). SQLFluff was incorrectly complaining that # the alias 't' was unused. pass_str: | SELECT * FROM (VALUES ('a1', 'b1'), ('a2', 'b2'), ('a3', 'b3')) t(a,b) configs: core: dialect: tsql test_pass_join_on_expression_in_parentheses: pass_str: | SELECT table1.c1 FROM table1 AS tbl1 INNER JOIN table2 AS tbl2 ON (tbl2.col2 = tbl1.col2) INNER JOIN table3 AS tbl3 ON (tbl3.col3 = tbl2.col3) test_pass_bigquery_qualify_clause: pass_str: | SELECT * FROM table1 AS tbl1 INNER JOIN tbl2 AS tbl2 WHERE TRUE QUALIFY ROW_NUMBER() OVER ( PARTITION BY tbl1.col1 ORDER BY tbl2.col3 ) = 1 configs: core: dialect: bigquery test_pass_bigquery_nested_inner_join: pass_str: | with abh as ( select ceb.emailaddresskey, dac.accountkey from table2 as dac inner join table3 as ceb on ceb.col2 = dac.col2 ) select col1 from table1 as abg inner join abh on abg.col1 = abh.col1 configs: core: dialect: bigquery test_fail_snowflake_flatten_function: # Tests a fix for issue 3178. fail_str: | SELECT r.rec:foo::string, value:bar::string FROM foo.bar AS r, LATERAL FLATTEN(input => rec:result) AS x fix_str: | SELECT r.rec:foo::string, value:bar::string FROM foo.bar AS r, LATERAL FLATTEN(input => rec:result) configs: core: dialect: snowflake test_pass_derived_query_requires_alias_1: # Case 1: Simple derived query pass_str: | SELECT * FROM ( SELECT 1 ) as a test_pass_derived_query_requires_alias_2: # Case 2: Derived query uses set operation (UNION) pass_str: | SELECT * FROM ( SELECT col FROM dbo.tab UNION SELECT -1 AS col ) AS a test_pass_derived_query_requires_alias_3: # Case 3: Derived query includes a WITH statement pass_str: | SELECT * FROM ( WITH foo AS ( SELECT col FROM dbo.tab ) SELECT * FROM foo ) AS a test_pass_redshift_semi_structured_op: # Redshift _requires_ aliasing when doing semi-structured operations. # https://docs.aws.amazon.com/redshift/latest/dg/query-super.html#unnest # The logic here should be that if references _overlap_ (i.e. some # aliases refer to other tables in the same FROM clause). pass_str: | SELECT tt.resource_id FROM top_table AS tt , tt.nested_column AS co configs: core: dialect: redshift test_pass_postgres_values_clause: pass_str: | SELECT * FROM ( VALUES (1, 2), (3, 4) ) AS t (x, y) configs: core: dialect: postgres # Quoted identifiers case-sensitive tests test_pass_naked_select_single_quoted_table: pass_str: | SELECT a.col1 FROM tab1 as 'A' test_fail_naked_select_single_quoted_table_wrong_case: fail_str: | SELECT a.col1 FROM tab1 as 'a' fix_str: | SELECT a.col1 FROM tab1 test_pass_naked_select_single_quoted_table_ci: pass_str: | SELECT a.col1 FROM tab1 as 'a' configs: rules: aliasing.unused: alias_case_check: case_insensitive test_pass_naked_select_single_quoted_table_qcs_nup: pass_str: | SELECT a.col_1 FROM table_a AS 'A' configs: rules: aliasing.unused: alias_case_check: quoted_cs_naked_upper test_fail_naked_select_single_quoted_table_wrong_case_qcs_nup: fail_str: | SELECT a.col_1 FROM table_a AS 'a' fix_str: | SELECT a.col_1 FROM table_a configs: rules: aliasing.unused: alias_case_check: quoted_cs_naked_upper test_pass_naked_select_single_quoted_table_qcs_nlow: pass_str: | SELECT a.col_1 FROM table_a AS 'a' configs: rules: aliasing.unused: alias_case_check: quoted_cs_naked_lower test_fail_naked_select_single_quoted_table_wrong_case_qcs_nlow: fail_str: | SELECT a.col_1 FROM table_a AS 'A' fix_str: | SELECT a.col_1 FROM table_a configs: rules: aliasing.unused: alias_case_check: quoted_cs_naked_lower test_pass_naked_select_single_quoted_table_lower_cs: pass_str: | SELECT a.col_1 FROM table_a AS 'a' configs: rules: aliasing.unused: alias_case_check: case_sensitive test_pass_naked_select_single_quoted_table_upper_cs: pass_str: | SELECT A.col_1 FROM table_a AS 'A' configs: rules: aliasing.unused: alias_case_check: case_sensitive test_fail_naked_select_single_quoted_table_wrong_case_cs: fail_str: | SELECT a.col_1 FROM table_a AS 'A' fix_str: | SELECT a.col_1 FROM table_a configs: rules: aliasing.unused: alias_case_check: case_sensitive test_pass_naked_select_quoted_table: pass_str: | SELECT a.col1 FROM tab1 as "A" test_pass_quoted_select_naked_table: pass_str: | SELECT "A".col1 FROM tab1 as a test_pass_naked_select_quoted_table_subquery: pass_str: | SELECT col_1 FROM table_a AS "A" WHERE NOT EXISTS (SELECT TRUE FROM table_b AS b WHERE a.col_4 = b.col_1) test_pass_quoted_select_naked_table_subquery: pass_str: | SELECT col_1 FROM table_a AS a WHERE NOT EXISTS (SELECT TRUE FROM table_b AS b WHERE "A".col_4 = b.col_1) test_pass_naked_select_double_quoted_table_ci: pass_str: | SELECT a.col1 FROM tab1 as "a" configs: rules: aliasing.unused: alias_case_check: case_insensitive test_pass_naked_select_double_quoted_table_qcs_nup: pass_str: | SELECT a.col_1 FROM table_a AS "A" configs: rules: aliasing.unused: alias_case_check: quoted_cs_naked_upper test_fail_naked_select_double_quoted_table_wrong_case_qcs_nup: fail_str: | SELECT a.col_1 FROM table_a AS "a" fix_str: | SELECT a.col_1 FROM table_a configs: rules: aliasing.unused: alias_case_check: quoted_cs_naked_upper test_pass_naked_select_double_quoted_table_qcs_nlow: pass_str: | SELECT a.col_1 FROM table_a AS "a" configs: rules: aliasing.unused: alias_case_check: quoted_cs_naked_lower test_fail_naked_select_double_quoted_table_wrong_case_qcs_nlow: fail_str: | SELECT a.col_1 FROM table_a AS "A" fix_str: | SELECT a.col_1 FROM table_a configs: rules: aliasing.unused: alias_case_check: quoted_cs_naked_lower test_pass_naked_select_double_quoted_table_lower_cs: pass_str: | SELECT a.col_1 FROM table_a AS "a" configs: rules: aliasing.unused: alias_case_check: case_sensitive test_pass_naked_select_double_quoted_table_upper_cs: pass_str: | SELECT A.col_1 FROM table_a AS "A" configs: rules: aliasing.unused: alias_case_check: case_sensitive test_fail_naked_select_double_quoted_table_wrong_case_cs: fail_str: | SELECT a.col_1 FROM table_a AS "A" fix_str: | SELECT a.col_1 FROM table_a configs: rules: aliasing.unused: alias_case_check: case_sensitive test_pass_athena_naked_select_quoted_table: pass_str: | SELECT a.col1 FROM tab1 as "A" configs: core: dialect: athena test_pass_athena_quoted_select_naked_table: pass_str: | SELECT "A".col1 FROM tab1 as a configs: core: dialect: athena test_pass_athena_naked_select_back_quoted_table: pass_str: | SELECT a.col1 FROM tab1 as `A` configs: core: dialect: athena test_pass_athena_back_quoted_select_naked_table: pass_str: | SELECT `A`.col1 FROM tab1 as a configs: core: dialect: athena test_pass_bigquery_naked_select_quoted_table: pass_str: | SELECT a.col1 FROM tab1 as `A` configs: core: dialect: bigquery test_pass_bigquery_quoted_select_naked_table: pass_str: | SELECT `A`.col1 FROM tab1 as a configs: core: dialect: bigquery test_pass_bigquery_quoted_escapes: pass_str: | SELECT `\`a`.col1 FROM tab1 as `\`A` configs: core: dialect: bigquery test_pass_clickhouse_naked_select_quoted_table: pass_str: | SELECT a.col1, b.col2 FROM tab1 as `a` CROSS JOIN tab2 as "b" configs: core: dialect: clickhouse test_pass_clickhouse_quoted_select_naked_table: pass_str: | SELECT `a`.col1, "b".col2 FROM tab1 as a CROSS JOIN tab2 as b configs: core: dialect: clickhouse test_pass_clickhouse_quoted_escapes: pass_str: | SELECT "\"`a`""".col1, FROM tab1 as `"\`a``"` configs: core: dialect: clickhouse test_fail_clickhouse_naked_select_quoted_table_cs: fail_str: | SELECT a.col1 FROM tab1 as `A` fix_str: | SELECT a.col1 FROM tab1 configs: core: dialect: clickhouse test_pass_clickhouse_quoted_select_naked_table_cs: fail_str: | SELECT `A`.col1 FROM tab1 as a fix_str: | SELECT `A`.col1 FROM tab1 configs: core: dialect: clickhouse test_pass_db2_naked_select_double_quoted_table: pass_str: | SELECT a.col_1 FROM table_a AS "A" configs: core: dialect: db2 test_fail_db2_naked_select_double_quoted_table_wrong_case: fail_str: | SELECT a.col_1 FROM table_a AS "a" fix_str: | SELECT a.col_1 FROM table_a configs: core: dialect: db2 test_pass_duckdb_naked_select_double_quoted_table: pass_str: | SELECT a.col_1 FROM table_a AS "A" configs: core: dialect: duckdb test_pass_duckdb_quoted_select_naked_table: pass_str: | SELECT "a".col_1 FROM table_a AS A configs: core: dialect: duckdb test_pass_duckdb_quoted_select_single_quote_table: pass_str: | SELECT "'""a""'".col_1 FROM table_a AS '''"A"''' configs: core: dialect: duckdb test_fail_duckdb_single_quote_select_naked_table: fail_str: | SELECT 'a'[1] FROM table_a AS A fix_str: | SELECT 'a'[1] FROM table_a configs: core: dialect: duckdb test_pass_exasol_naked_select_double_quoted_table: pass_str: | SELECT a.col_1 FROM table_a AS "A" configs: core: dialect: exasol test_pass_exasol_quoted_select_naked_table: pass_str: | SELECT "A".col_1 FROM table_a AS a configs: core: dialect: exasol test_fail_exasol_quoted_select_naked_table_wrong_case: fail_str: | SELECT "a".col_1 FROM table_a AS A fix_str: | SELECT "a".col_1 FROM table_a configs: core: dialect: exasol test_pass_exasol_quoted_select_single_quote_table: pass_str: | SELECT "'""A""'".col_1 FROM table_a AS '''"A"''' configs: core: dialect: exasol test_pass_hive_naked_select_quoted_table: pass_str: | SELECT a.col1 FROM tab1 as `A` configs: core: dialect: hive test_pass_hive_quoted_select_naked_table: pass_str: | SELECT `a`.col1 FROM tab1 as A configs: core: dialect: hive test_pass_mysql_quoted_select_quoted_table: pass_str: | SELECT `nih`.`userID` FROM `flight_notification_item_history` AS `nih` configs: core: dialect: mysql test_pass_mysql_naked_select_quoted_table: pass_str: | SELECT nih.`userID` FROM `flight_notification_item_history` AS `nih` configs: core: dialect: mysql test_pass_mysql_quoted_select_naked_table: pass_str: | SELECT `nih`.`userID` FROM `flight_notification_item_history` AS nih configs: core: dialect: mysql test_pass_oracle_naked_select_double_quoted_table: pass_str: | SELECT a.col_1 FROM table_a AS "A" configs: core: dialect: oracle test_fail_oracle_naked_select_double_quoted_table_wrong_case: fail_str: | SELECT a.col_1 FROM table_a AS "a" fix_str: | SELECT a.col_1 FROM table_a configs: core: dialect: oracle test_pass_postgres_naked_select_double_quoted_table: pass_str: | SELECT A.col_1 FROM table_a AS "a" configs: core: dialect: postgres test_fail_postgres_naked_select_double_quoted_table: fail_str: | SELECT a.col_1 FROM table_a AS "A" fix_str: | SELECT a.col_1 FROM table_a configs: core: dialect: postgres test_pass_postgres_quoted_select_naked_table: pass_str: | SELECT "a".col_1 FROM table_a AS a configs: core: dialect: postgres test_pass_postgres_quoted_select_single_quote_table: pass_str: | SELECT "A""".col_1 FROM table_a AS "A""" configs: core: dialect: postgres test_pass_redshift_naked_select_double_quoted_table: pass_str: | SELECT A.col_1 FROM table_a AS "a" configs: core: dialect: redshift test_fail_redshift_naked_select_double_quoted_table: fail_str: | SELECT a.col_1 FROM table_a AS "A" fix_str: | SELECT a.col_1 FROM table_a configs: core: dialect: redshift test_pass_redshift_quoted_select_naked_table: pass_str: | SELECT "a".col_1 FROM table_a AS a configs: core: dialect: redshift test_pass_redshift_quoted_select_single_quote_table: pass_str: | SELECT "A""".col_1 FROM table_a AS "A""" configs: core: dialect: redshift test_pass_snowflake_naked_select_double_quoted_table: pass_str: | SELECT a.col_1 FROM table_a AS "A" configs: core: dialect: snowflake test_fail_snowflake_naked_select_double_quoted_table_wrong_case: fail_str: | SELECT a.col_1 FROM table_a AS "a" fix_str: | SELECT a.col_1 FROM table_a configs: core: dialect: snowflake test_pass_sparksql_naked_select_double_quoted_table: pass_str: | SELECT a.col_1 FROM table_a AS `A` configs: core: dialect: sparksql test_pass_sparksql_quoted_select_naked_table: pass_str: | SELECT `a`.col_1 FROM table_a AS A configs: core: dialect: sparksql test_pass_sqlite_naked_select_quoted_table: pass_str: | SELECT a.col1 FROM tab1 as `a` configs: core: dialect: sqlite test_pass_sqlite_quoted_select_naked_table: pass_str: | SELECT `a`.col1 FROM tab1 as a configs: core: dialect: sqlite test_pass_sqlite_naked_select_quoted_table_subquery: pass_str: | SELECT col_1 FROM table_a AS `a` WHERE NOT EXISTS (SELECT TRUE FROM table_b AS b WHERE a.col_4 = b.col_1) configs: core: dialect: sqlite test_pass_sqlite_quoted_select_naked_table_subquery: pass_str: | SELECT col_1 FROM table_a AS a WHERE NOT EXISTS (SELECT TRUE FROM table_b AS b WHERE `a`.col_4 = b.col_1) configs: core: dialect: sqlite test_pass_sqlite_different_escaped_quotes: pass_str: | SELECT "`'a""".id, '`''a"'.id FROM table_a AS ```'A"` configs: core: dialect: sqlite test_pass_teradata_naked_select_double_quoted_table: pass_str: | SELECT a.col_1 FROM table_a AS "A" configs: core: dialect: teradata test_pass_teradata_quoted_select_naked_table: pass_str: | SELECT "a".col_1 FROM table_a AS A configs: core: dialect: teradata test_pass_trino_naked_select_double_quoted_table: pass_str: | SELECT a.col_1 FROM table_a AS "A" configs: core: dialect: trino test_pass_trino_quoted_select_naked_table: pass_str: | SELECT "a".col_1 FROM table_a AS A configs: core: dialect: trino test_pass_tsql_naked_select_quoted_table: pass_str: | SELECT a.col1 FROM tab1 as [a] configs: core: dialect: tsql test_pass_tsql_quoted_select_naked_table: pass_str: | SELECT [a].col1 FROM tab1 as a configs: core: dialect: tsql test_pass_vertica_naked_select_double_quoted_table: pass_str: | SELECT a.col_1 FROM table_a AS "A" configs: core: dialect: vertica test_pass_vertica_quoted_select_naked_table: pass_str: | SELECT "a".col_1 FROM table_a AS A configs: core: dialect: vertica # Tables referenced multiple times without a select reference test_pass_repeat_referenced_table: pass_str: | SELECT ROW_NUMBER() OVER(PARTITION BY a.object_id ORDER BY a.object_id) FROM sys.objects a CROSS JOIN sys.objects b CROSS JOIN sys.objects c test_pass_case_insensitive: pass_str: | SELECT a.foo , b.bar FROM foo.baz a INNER JOIN foo.qux B ON a.thing = b.thing ; test_pass_quoted_case_insensitive: pass_str: | SELECT a.foo , b.bar FROM foo.bar a INNER JOIN foo.qux 'B' ON a.thing = b.thing ; test_pass_postgres_different_quoted_case_insensitive: pass_str: | SELECT a.col1 , B.id FROM prd.tbl1 a INNER JOIN tbl1 "b" ON a.col1 = B.id ; configs: core: dialect: postgres test_pass_snowflake_flatten_lateral: pass_str: | SELECT a.test1, a.test2, b.test3, f.test4 FROM table1 AS a, LATERAL flatten(input => some_field) AS b, LATERAL flatten(input => b.value) AS c, LATERAL flatten(input => c.value) AS d, LATERAL flatten(input => d.value) AS e, LATERAL flatten(input => e.value) AS f; configs: core: dialect: snowflake test_fail_snowflake_flatten_lateral: fail_str: | SELECT a.test1, a.test2, b.test3 FROM table1 AS a, LATERAL flatten(input => some_field) AS b, LATERAL flatten(input => b.value) AS c, LATERAL flatten(input => c.value) AS d, LATERAL flatten(input => d.value) AS e, LATERAL flatten(input => e.value) AS f; fix_str: | SELECT a.test1, a.test2, b.test3 FROM table1 AS a, LATERAL flatten(input => some_field) AS b, LATERAL flatten(input => b.value) AS c, LATERAL flatten(input => c.value) AS d, LATERAL flatten(input => d.value) AS e, LATERAL flatten(input => e.value); configs: core: dialect: snowflake test_pass_bigquery_cross_join_unnest: pass_str: | with sequences as ( select [0, 1, 1, 2] as some_numbers union all select [2, 4, 8] ) select num from sequences as s cross join unnest(s.some_numbers) as num configs: core: dialect: bigquery test_pass_bigquery_cross_join_array: pass_str: | with table_arr as (select [1,2,4,2] as arr) SELECT arr FROM table_arr t, t.arr configs: core: dialect: bigquery test_pass_redshift_qualify_follows_from: pass_str: | SELECT * FROM #store_sales AS ss QUALIFY row_number() OVER (PARTITION BY ss_sold_date ORDER BY ss_sales_price DESC) <= 2 configs: core: dialect: redshift # Redshift requires an alias when a qualify clause follows from test_fail_redshift_qualify_follows_from: fail_str: | SELECT * FROM #store as s INNER JOIN #store_sales AS ss QUALIFY row_number() OVER (PARTITION BY ss_sold_date ORDER BY ss_sales_price DESC) <= 2 fix_str: | SELECT * FROM #store INNER JOIN #store_sales AS ss QUALIFY row_number() OVER (PARTITION BY ss_sold_date ORDER BY ss_sales_price DESC) <= 2 configs: core: dialect: redshift test_fail_redshift_qualify_does_not_follows_from: fail_str: | SELECT * FROM #store as s INNER JOIN #store_sales AS ss WHERE col = 1 QUALIFY row_number() OVER (PARTITION BY ss_sold_date ORDER BY ss_sales_price DESC) <= 2 fix_str: | SELECT * FROM #store INNER JOIN #store_sales WHERE col = 1 QUALIFY row_number() OVER (PARTITION BY ss_sold_date ORDER BY ss_sales_price DESC) <= 2 configs: core: dialect: redshift test_pass_postgres_jsonb_array_elements_text: # https://github.com/sqlfluff/sqlfluff/issues/4623 pass_str: | SELECT t FROM jsonb_array_elements_text('["orange","banana","watermelon"]') AS t; configs: core: dialect: postgres test_pass_postgres_json_array_elements: # https://github.com/sqlfluff/sqlfluff/issues/4623 pass_str: | SELECT a FROM json_array_elements('[{"id": "a1"}]') AS a, json_array_elements('[{"id": "A1"}, {"id": "A2"}]') AS b WHERE lower(a ->> 'id') = lower(b ->> 'id'); configs: core: dialect: postgres test_pass_bigquery_unnest: # https://github.com/sqlfluff/sqlfluff/issues/4623 pass_str: | with sequences as ( select [0, 1, 1, 2] as some_numbers union all select [2, 4, 8] ) select num from sequences as s cross join unnest(s.some_numbers) as num configs: core: dialect: bigquery test_pass_redshift_array_value: # https://github.com/sqlfluff/sqlfluff/issues/4623 pass_str: | SELECT my_column, my_array_value FROM my_schema.my_table AS t, t.super_array AS my_array_value configs: core: dialect: redshift sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/AL05_CV12.yml000066400000000000000000000007451503426445100242540ustar00rootroot00000000000000rule: AL05,CV12 test_rule_fix_conflict_deleted_where_reference: fail_str: | select a.a from table_a as a left join table_b as b where a.id = b.id fix_str: | select a.a from table_a as a left join table_b as b ON a.id = b.id test_rule_fix_conflict_modified_where_reference: fail_str: | SELECT foo.a, bar.b FROM foo JOIN bar WHERE foo.x = bar.y AND foo.x = 3 fix_str: | SELECT foo.a, bar.b FROM foo JOIN bar ON foo.x = bar.y WHERE foo.x = 3 sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/AL06.yml000066400000000000000000000034201503426445100235130ustar00rootroot00000000000000rule: AL06 test_pass_no_config: pass_str: | select x.a, x_2.b from x left join x as x_2 on x.foreign_key = x.foreign_key test_fail_alias_too_short: fail_str: | SELECT u.id, c.first_name, c.last_name, COUNT(o.user_id) FROM users as u JOIN customers as c on u.id = c.user_id JOIN orders as o on u.id = o.user_id; configs: rules: aliasing.length: min_alias_length: 4 test_fail_alias_too_long: fail_str: | SELECT u.id, customers_customers_customers.first_name, customers_customers_customers.last_name, COUNT(o.user_id) FROM users as u JOIN customers as customers_customers_customers on u.id = customers_customers_customers.user_id JOIN orders as o on u.id = o.user_id; configs: rules: aliasing.length: max_alias_length: 10 test_fail_alias_min_and_max: fail_str: | SELECT u.id, customers_customers_customers.first_name, customers_customers_customers.last_name, COUNT(o.user_id) FROM users as u JOIN customers as customers_customers_customers on u.id = customers_customers_customers.user_id JOIN orders as o on u.id = o.user_id; configs: rules: aliasing.length: min_alias_length: 4 max_alias_length: 10 test_pass_with_config: pass_str: | SELECT users.id, customers_customers_customers.first_name, customers_customers_customers.last_name, COUNT(latest_orders.user_id) FROM users JOIN customers as customers_customers_customers on users.id = customers_customers_customers.user_id JOIN orders as latest_orders on users.id = latest_orders.user_id; configs: rules: aliasing.length: min_alias_length: 10 max_alias_length: 30 sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/AL07.yml000066400000000000000000000300471503426445100235210ustar00rootroot00000000000000rule: AL07 test_pass_allow_self_join_alias: # AL07 Allow self-joins pass_str: | select x.a, x_2.b from x left join x as x_2 on x.foreign_key = x.foreign_key configs: rules: aliasing.forbid: force_enable: true test_fail_avoid_aliases_1: fail_str: | SELECT u.id, c.first_name, c.last_name, COUNT(o.user_id) FROM users as u JOIN customers as c on u.id = c.user_id JOIN orders as o on u.id = o.user_id; fix_str: | SELECT users.id, customers.first_name, customers.last_name, COUNT(orders.user_id) FROM users JOIN customers on users.id = customers.user_id JOIN orders on users.id = orders.user_id; configs: rules: aliasing.forbid: force_enable: true test_fail_avoid_aliases_2: # AL07 order by fail_str: | SELECT u.id, c.first_name, c.last_name, COUNT(o.user_id) FROM users as u JOIN customers as c on u.id = c.user_id JOIN orders as o on u.id = o.user_id order by o.user_id desc fix_str: | SELECT users.id, customers.first_name, customers.last_name, COUNT(orders.user_id) FROM users JOIN customers on users.id = customers.user_id JOIN orders on users.id = orders.user_id order by orders.user_id desc configs: rules: aliasing.forbid: force_enable: true test_fail_avoid_aliases_3: # AL07 order by identifier which is the same raw as an alias but refers to a column fail_str: | SELECT u.id, c.first_name, c.last_name, COUNT(o.user_id) FROM users as u JOIN customers as c on u.id = c.user_id JOIN orders as o on u.id = o.user_id order by o desc fix_str: | SELECT users.id, customers.first_name, customers.last_name, COUNT(orders.user_id) FROM users JOIN customers on users.id = customers.user_id JOIN orders on users.id = orders.user_id order by o desc configs: rules: aliasing.forbid: force_enable: true alias_single_char_identifiers: fail_str: "select b from tbl as a" fix_str: "select b from tbl" configs: rules: aliasing.forbid: force_enable: true alias_with_wildcard_identifier: fail_str: "select * from tbl as a" fix_str: "select * from tbl" configs: rules: aliasing.forbid: force_enable: true select_from_values: pass_str: | select * from values(1, 2, 3) configs: rules: aliasing.forbid: force_enable: true select_from_table_generator: pass_str: | select * from table( generator( rowcount=>10000 ) ) configs: core: dialect: snowflake rules: aliasing.forbid: force_enable: true issue_635: pass_str: | select id::varchar as id, obj:userid::varchar as user_id, redemptions.value:awardedreceiptid::varchar as awarded_receipt_id from a, lateral flatten(input => a.obj:redemptions) redemptions configs: core: dialect: snowflake rules: aliasing.forbid: force_enable: true # This query was causing a runtime error in the rule. issue_239: pass_str: | WITH confusion_matrix AS ( SELECT expected_label, commerce, digital, traditional_services FROM ML.CONFUSION_MATRIX(MODEL model3, ( SELECT * FROM table1 WHERE training = 0 ))) SELECT *, commerce pct_commerce FROM confusion_matrix configs: core: dialect: bigquery # The rule was removing the aliases from this query, causing incorrect behavior. # (Aliases may not only be used in select targets; they also influence whether # multiple joins to a table are independent or not). issue_610: pass_str: | SELECT aaaaaa.c FROM aaaaaa JOIN bbbbbb AS b ON b.a = aaaaaa.id JOIN bbbbbb AS b2 ON b2.other = b.id configs: rules: aliasing.forbid: force_enable: true issue_1589: pass_str: | select * from (select random() as v from (values(1))) t1, (select max(repl) as m from data) t2, (select * from data where repl=t2.m and rnd>=t.v order by rnd limit 1) configs: rules: aliasing.forbid: force_enable: true issue_1639: fail_str: | DECLARE @VariableE date = GETDATE() CREATE TABLE #TempTable AS ( Select ColumnD from SchemaA.TableB AliasC where ColumnD >= @VariableE ) fix_str: | DECLARE @VariableE date = GETDATE() CREATE TABLE #TempTable AS ( Select ColumnD from SchemaA.TableB where ColumnD >= @VariableE ) configs: core: dialect: tsql rules: aliasing.forbid: force_enable: true test_fail_no_copy_code_out_of_template: # The rule wants to replace "t" with "foobar", but # LintFix.has_template_conflicts() correctly prevents it copying code out # of the templated region. Hence, the query is not modified. fail_str: | SELECT t.repo_id FROM {{ source_table }} AS t configs: templater: jinja: context: source_table: foobar rules: aliasing.forbid: force_enable: true test_bigquery_skip_multipart_names: pass_str: | SELECT t.col1 FROM shema1.table1 AS t configs: core: dialect: bigquery test_bigquery_force_enable: fail_str: | SELECT t.col1 FROM schema1.table1 AS t # TRICKY: The fix_str does not parse in the real BigQuery, due to backtick # requirements. That's why the rule is disabled by default. # TODO (low priority): Update this test to test for a case where the rule # produces valid SQL. fix_str: | SELECT schema1.table1.col1 FROM schema1.table1 configs: core: dialect: bigquery rules: aliasing.forbid: force_enable: true test_violation_locations: fail_str: | SELECT u.id, c.first_name, c.last_name, COUNT(o.user_id) FROM users as u JOIN customers as c on u.id = c.user_id JOIN orders as o on u.id = o.user_id; fix_str: | SELECT users.id, customers.first_name, customers.last_name, COUNT(orders.user_id) FROM users JOIN customers on users.id = customers.user_id JOIN orders on users.id = orders.user_id; configs: rules: aliasing.forbid: force_enable: true violations: - code: AL07 description: Avoid aliases in from clauses and join conditions. name: aliasing.forbid warning: false start_line_no: 6 start_line_pos: 15 start_file_pos: 87 end_line_no: 6 end_line_pos: 16 end_file_pos: 88 fixes: - edit: '' end_file_pos: 88 end_line_no: 6 end_line_pos: 16 start_file_pos: 84 start_line_no: 6 start_line_pos: 12 type: delete - edit: '' end_file_pos: 84 end_line_no: 6 end_line_pos: 12 start_file_pos: 83 start_line_no: 6 start_line_pos: 11 type: delete - edit: users end_file_pos: 88 end_line_no: 6 end_line_pos: 16 start_file_pos: 87 start_line_no: 6 start_line_pos: 15 type: replace - edit: users end_file_pos: 12 end_line_no: 2 end_line_pos: 6 start_file_pos: 11 start_line_no: 2 start_line_pos: 5 type: replace - edit: users end_file_pos: 113 end_line_no: 7 end_line_pos: 25 start_file_pos: 112 start_line_no: 7 start_line_pos: 24 type: replace - edit: users end_file_pos: 150 end_line_no: 8 end_line_pos: 22 start_file_pos: 149 start_line_no: 8 start_line_pos: 21 type: replace - code: AL07 description: Avoid aliases in from clauses and join conditions. name: aliasing.forbid warning: false start_line_no: 7 start_line_pos: 19 start_file_pos: 107 end_line_no: 7 end_line_pos: 20 end_file_pos: 108 fixes: - edit: '' end_file_pos: 108 end_line_no: 7 end_line_pos: 20 start_file_pos: 104 start_line_no: 7 start_line_pos: 16 type: delete - edit: '' end_file_pos: 104 end_line_no: 7 end_line_pos: 16 start_file_pos: 103 start_line_no: 7 start_line_pos: 15 type: delete - edit: customers end_file_pos: 108 end_line_no: 7 end_line_pos: 20 start_file_pos: 107 start_line_no: 7 start_line_pos: 19 type: replace - edit: customers end_file_pos: 22 end_line_no: 3 end_line_pos: 6 start_file_pos: 21 start_line_no: 3 start_line_pos: 5 type: replace - edit: customers end_file_pos: 40 end_line_no: 4 end_line_pos: 6 start_file_pos: 39 start_line_no: 4 start_line_pos: 5 type: replace - edit: customers end_file_pos: 120 end_line_no: 7 end_line_pos: 32 start_file_pos: 119 start_line_no: 7 start_line_pos: 31 type: replace - code: AL07 description: Avoid aliases in from clauses and join conditions. name: aliasing.forbid warning: false start_line_no: 8 start_line_pos: 16 start_file_pos: 144 end_line_no: 8 end_line_pos: 17 end_file_pos: 145 fixes: - edit: '' end_file_pos: 145 end_line_no: 8 end_line_pos: 17 start_file_pos: 141 start_line_no: 8 start_line_pos: 13 type: delete - edit: '' end_file_pos: 141 end_line_no: 8 end_line_pos: 13 start_file_pos: 140 start_line_no: 8 start_line_pos: 12 type: delete - edit: orders end_file_pos: 145 end_line_no: 8 end_line_pos: 17 start_file_pos: 144 start_line_no: 8 start_line_pos: 16 type: replace - edit: orders end_file_pos: 63 end_line_no: 5 end_line_pos: 12 start_file_pos: 62 start_line_no: 5 start_line_pos: 11 type: replace - edit: orders end_file_pos: 157 end_line_no: 8 end_line_pos: 29 start_file_pos: 156 start_line_no: 8 start_line_pos: 28 type: replace test_fail_fix_command: # Test originally from commands_test.py fail_str: | SELECT u.id, c.first_name, c.last_name, COUNT(o.user_id) FROM users as u JOIN customers as c on u.id = c.user_id JOIN orders as o on u.id = o.user_id; fix_str: | SELECT users.id, customers.first_name, customers.last_name, COUNT(orders.user_id) FROM users JOIN customers on users.id = customers.user_id JOIN orders on users.id = orders.user_id; configs: rules: aliasing.forbid: force_enable: true test_fail_fix_self_aliased_table_5954: fail_str: | with foo as ( select * from vee ), bar as ( select * from baz ), final as ( select foo.col1, foo.col2, foo.col3, bar.col4, bar.col5, bar.col6, bar.col7 from foo as foo left join bar on foo.col1 = bar.col2 ) select * from final fix_str: | with foo as ( select * from vee ), bar as ( select * from baz ), final as ( select foo.col1, foo.col2, foo.col3, bar.col4, bar.col5, bar.col6, bar.col7 from foo left join bar on foo.col1 = bar.col2 ) select * from final configs: rules: aliasing.forbid: force_enable: true sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/AL08.yml000066400000000000000000000030761503426445100235240ustar00rootroot00000000000000rule: AL08 test_fail_references: fail_str: | select foo, foo test_fail_aliases: fail_str: | select a as foo, b as foo test_fail_alias_refs: fail_str: | select foo, b as foo test_fail_locs: fail_str: | select foo, b as foo, c as bar, bar, d foo, violations: - code: AL08 description: Reuse of column alias 'foo' from line 2. name: aliasing.unique.column warning: false fixes: [] start_line_no: 3 start_line_pos: 8 start_file_pos: 21 end_line_no: 3 end_line_pos: 11 end_file_pos: 24 - code: AL08 description: Reuse of column alias 'bar' from line 4. name: aliasing.unique.column warning: false fixes: [] start_line_no: 5 start_line_pos: 3 start_file_pos: 40 end_line_no: 5 end_line_pos: 6 end_file_pos: 43 - code: AL08 description: Reuse of column alias 'foo' from line 2. name: aliasing.unique.column warning: false fixes: [] start_line_no: 6 start_line_pos: 5 start_file_pos: 49 end_line_no: 6 end_line_pos: 8 end_file_pos: 52 test_fail_alias_quoted: fail_str: | select foo, b as "foo" configs: core: dialect: snowflake test_fail_alias_case: fail_str: | select foo, b as FOO test_fail_qualified: fail_str: | select a.foo , b as foo from a test_pass_table_names: pass_str: | select a.b, b.c, c.d from a, b, c sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/AL09.yml000066400000000000000000000160611503426445100235230ustar00rootroot00000000000000rule: AL09 test_pass_no_self_alias: # Test that the rule passes when there's no self alias. pass_str: | select no_alias, col_b as rename_alias, max(sum) as max_sum from foo test_pass_no_self_alias_tsql: # Test that the rule passes when there's no self alias. pass_str: | select no_alias, rename_alias = col_b, max_sum = max(sum) from foo configs: &tsql_dialect core: dialect: tsql test_pass_no_self_alias_tsql_no_spaces: # Test that the rule passes when there's no self alias. pass_str: | select no_alias, col_b=rename_alias, rename_alias_2 AS col_c, max_sum=max(sum) from foo configs: *tsql_dialect test_fail_no_quotes_same_case: # When unquoted and the same case, we should always remove the alias. fail_str: | select col_a as this_alias_is_fine, col_b as col_b, COL_C as COL_C, Col_D as Col_D, col_e col_e, COL_F COL_F, Col_G Col_G from foo fix_str: | select col_a as this_alias_is_fine, col_b, COL_C, Col_D, col_e, COL_F, Col_G from foo test_fail_no_quotes_same_case_tsql: # When unquoted and the same case, we should always remove the alias. fail_str: | select this_alias_is_fine = col_a, col_b = col_b, COL_C AS COL_C, Col_D = Col_D, col_e col_e, COL_F COL_F, Col_G Col_G from foo fix_str: | select this_alias_is_fine = col_a, col_b, COL_C, Col_D, col_e, COL_F, Col_G from foo configs: *tsql_dialect test_fail_with_quotes_same_case: # When quoted and the same case, we should always remove the alias. fail_str: | select "col_a" as "this_alias_is_fine", "col_b" as "col_b", "COL_C" as "COL_C", "Col_D" as "Col_D", "col_e" "col_e", "COL_F" "COL_F", "Col_G" "Col_G" from foo fix_str: | select "col_a" as "this_alias_is_fine", "col_b", "COL_C", "Col_D", "col_e", "COL_F", "Col_G" from foo test_fail_different_case: # If the casing is different, even if the quoting is the same, we # should never propose a fix. However in most dialects we should still # flag the issue. If CP02 is active, this situation will be resolved # before an error shows. fail_str: | select col_a as this_alias_is_fine, col_b as Col_B, COL_C as col_c, Col_D as COL_D, col_e Col_e, COL_F col_f, Col_G COL_G, "col_b" as "Col_B", "COL_C" as "col_c", "Col_D" as "COL_D", "col_e" "Col_e", "COL_F" "col_f", "Col_G" "COL_G" from foo violations: - code: AL09 description: >- Ambiguous self alias. Either remove unnecessary alias, or quote alias/reference to make case change explicit. name: aliasing.self_alias.column warning: false fixes: [] start_line_no: 3 start_line_pos: 5 start_file_pos: 44 end_line_no: 3 end_line_pos: 10 end_file_pos: 49 - code: AL09 description: >- Ambiguous self alias. Either remove unnecessary alias, or quote alias/reference to make case change explicit. name: aliasing.self_alias.column warning: false fixes: [] start_line_no: 4 start_line_pos: 5 start_file_pos: 64 end_line_no: 4 end_line_pos: 10 end_file_pos: 69 - code: AL09 description: >- Ambiguous self alias. Either remove unnecessary alias, or quote alias/reference to make case change explicit. name: aliasing.self_alias.column warning: false fixes: [] start_line_no: 5 start_line_pos: 5 start_file_pos: 84 end_line_no: 5 end_line_pos: 10 end_file_pos: 89 - code: AL09 description: >- Ambiguous self alias. Either remove unnecessary alias, or quote alias/reference to make case change explicit. name: aliasing.self_alias.column warning: false fixes: [] start_line_no: 6 start_line_pos: 5 start_file_pos: 104 end_line_no: 6 end_line_pos: 10 end_file_pos: 109 - code: AL09 description: >- Ambiguous self alias. Either remove unnecessary alias, or quote alias/reference to make case change explicit. name: aliasing.self_alias.column warning: false fixes: [] start_line_no: 7 start_line_pos: 5 start_file_pos: 121 end_line_no: 7 end_line_pos: 10 end_file_pos: 126 - code: AL09 description: >- Ambiguous self alias. Either remove unnecessary alias, or quote alias/reference to make case change explicit. name: aliasing.self_alias.column warning: false fixes: [] start_line_no: 8 start_line_pos: 5 start_file_pos: 138 end_line_no: 8 end_line_pos: 10 end_file_pos: 143 test_pass_different_case_clickhouse: # If the casing is different, even if the quoting is the same, we # should never propose a fix. In clickhouse, different cases are # always different objects, even when unquoted - so never flag # aliases as unnecessary if the casing is different. pass_str: | select col_a as this_alias_is_fine, col_b as Col_B, COL_C as col_c, Col_D as COL_D, col_e Col_e, COL_F col_f, Col_G COL_G, "col_b" as "Col_B", "COL_C" as "col_c", "Col_D" as "COL_D", "col_e" "Col_e", "COL_F" "col_f", "Col_G" "COL_G" from foo configs: core: dialect: clickhouse test_pass_different_quotes: # If the quoting is different, even if the casing is the same, we # should never fail/fix the rule. If RF06 changes the quoting we # might trigger after that, but by then the quoting will be different. pass_str: | select col_a as this_alias_is_fine, "col_b" as col_b, COL_C as "COL_C", "Col_D" as Col_D, col_e "col_e", "COL_F" COL_F, Col_G "Col_G" from foo test_pass_mysql_quoted_identifiers: pass_str: | SELECT users.email AS "Email_in_double_quotes", users.email AS "Email""with_escaped_double_quotes", users.email AS `Email_in_backticks`, users.email AS 'Email_in_single_quotes' FROM users; configs: core: dialect: mysql sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/AM01.yml000066400000000000000000000003161503426445100235100ustar00rootroot00000000000000rule: AM01 test_pass_only_group_by: # check if using select distinct and group by pass_str: select a from b group by a test_fail_distinct_and_group_by: fail_str: select distinct a from b group by a sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/AM02.yml000066400000000000000000000043551503426445100235200ustar00rootroot00000000000000rule: AM02 test_pass_union_all: pass_str: | SELECT a, b FROM tbl UNION ALL SELECT c, d FROM tbl1 test_fail_bare_union: fail_str: | SELECT a, b FROM tbl UNION SELECT c, d FROM tbl1 fix_str: | SELECT a, b FROM tbl UNION DISTINCT SELECT c, d FROM tbl1 test_pass_union_distinct: pass_str: | SELECT a, b FROM tbl UNION DISTINCT SELECT c, d FROM tbl1 test_pass_union_distinct_with_comment: pass_str: | SELECT a, b FROM tbl --selecting a and b UNION DISTINCT SELECT c, d FROM tbl1 test_fail_triple_join_with_one_bad: fail_str: | SELECT a, b FROM tbl UNION DISTINCT SELECT c, d FROM tbl1 UNION SELECT e, f FROM tbl2 fix_str: | SELECT a, b FROM tbl UNION DISTINCT SELECT c, d FROM tbl1 UNION DISTINCT SELECT e, f FROM tbl2 test_fail_triple_join_with_one_bad_lowercase: fail_str: | select a, b from tbl union distinct select c, d from tbl1 union select e, f from tbl2 fix_str: | select a, b from tbl union distinct select c, d from tbl1 union distinct select e, f from tbl2 test_exasol: pass_str: | select a, b from tbl1 union select c, d from tbl2 configs: core: dialect: exasol test_exasol_union_all: pass_str: | select a, b from tbl1 union all select c, d from tbl2 configs: core: dialect: exasol test_postgres: pass_str: | select a, b from tbl1 union select c, d from tbl2 configs: core: dialect: postgres test_redshift: fail_str: | SELECT a, b FROM tbl1 UNION SELECT c, d FROM tbl2 fix_str: | SELECT a, b FROM tbl1 UNION DISTINCT SELECT c, d FROM tbl2 configs: core: dialect: redshift sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/AM03.yml000066400000000000000000000013461503426445100235160ustar00rootroot00000000000000rule: AM03 test_unspecified: pass_str: SELECT * FROM t ORDER BY a test_unspecified_unspecified: pass_str: SELECT * FROM t ORDER BY a, b test_unspecified_desc: fail_str: SELECT * FROM t ORDER BY a, b DESC fix_str: SELECT * FROM t ORDER BY a ASC, b DESC test_asc_desc: pass_str: SELECT * FROM t ORDER BY a ASC, b DESC test_desc_unspecified: fail_str: SELECT * FROM t ORDER BY a DESC, b fix_str: SELECT * FROM t ORDER BY a DESC, b ASC test_desc_asc: pass_str: SELECT * FROM t ORDER BY a DESC, b ASC test_nulls_last: fail_str: SELECT * FROM t ORDER BY a DESC, b NULLS LAST fix_str: SELECT * FROM t ORDER BY a DESC, b ASC NULLS LAST test_comment: pass_str: SELECT * FROM t ORDER BY a /* Comment */ DESC, b ASC sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/AM04.yml000066400000000000000000000270421503426445100235200ustar00rootroot00000000000000rule: AM04 test_pass_known_number_of_result_columns_1: pass_str: select a, b from t test_fail_unknown_number_of_result_columns_1: fail_str: select * from t test_pass_known_number_of_result_columns_2: desc: Columns are specified in CTE so * in final query will return only columns specified earlier. pass_str: | with cte as ( select a, b from t ) select * from cte test_fail_unknown_number_of_result_columns_2: fail_str: | with cte as ( select * from t ) select * from cte test_pass_known_number_of_result_columns_3: pass_str: | with cte as ( select * from t ) select a, b from cte test_pass_known_number_of_result_columns_4: desc: | CTE1 has * but columns are specified in final select. CTE2 has columns specified so cte2.* in final select will return only columns specified in CTE2's body. pass_str: | with cte1 as ( select * from t ), cte2 as ( select a, b from t ) select cte1.a, cte2.* from cte1 join cte2 using (a) test_fail_unknown_number_of_result_columns_3: fail_str: | with cte1 as ( select * from t ), cte2 as ( select a, b from t ) select cte1.*, cte2.* from cte1 join cte2 using (a) test_pass_known_number_of_result_columns_5: desc: Columns specified in subquery so * in final select will return only those columns. pass_str: | select * from ( select a, b from t ) test_fail_unknown_number_of_result_columns_4: desc: Select t.* will return unknown number of columns. fail_str: | with cte as ( select a, b from t ) select cte.*, t.* from cte1 join t using (a) test_fail_unknown_number_of_result_columns_5: desc: Select t_alias.* will return unknown number of columns since the * is used in subquery. fail_str: | with cte as ( select a, b from t ) select cte.*, t_alias.* from cte1 join (select * from t) as t_alias using (a) test_pass_known_number_of_result_columns_6: desc: Select t_alias.* will return known number of columns since they are defined in subquery. pass_str: | select t_alias.* from cte1 join (select a from t) as t_alias using (a) test_fail_unknown_number_of_result_columns_6: fail_str: | select t_alias.* from t1 join (select * from t) as t_alias using (a) test_pass_known_number_of_result_columns_7: pass_str: | with cte as ( select a, b from t ) select cte.*, t_alias.a from cte1 join (select * from t) as t_alias using (a) test_fail_unknown_number_of_result_columns_7: fail_str: select *, t.*, t.a, b from t test_pass_known_number_of_result_columns_8: pass_str: | select a from t1 union all select b from t2 test_fail_unknown_number_of_result_columns_8: fail_str: | select a from t1 union all select * from t2 test_fail_unknown_number_of_result_columns_9: fail_str: | select * from t1 union all select b from t2 test_fail_unknown_number_of_result_columns_10: fail_str: | with cte as ( select * from t1 union all select b from t2 ) select * from cte union all select b from t3 test_pass_known_number_of_result_columns_9: pass_str: | with cte as ( select a from t1 union all select b from t2 ) select * from cte union all select b from t3 test_pass_known_number_of_result_columns_10: desc: Columns are specified in cte_orders's body so cte_orders.* will return only these columns. pass_str: | WITH cte_orders AS ( SELECT customer_id, total FROM orders ) SELECT customers.name, cte_orders.* FROM customers, cte_orders WHERE clients.id = orders.clientId test_pass_known_number_of_result_columns_11: desc: Columns are specified in cte_orders's body so cte_orders.* will return only these columns. pass_str: | WITH cte_orders AS ( SELECT customer_id, total FROM orders ) SELECT * FROM cte_orders AS orders test_fail_unknown_number_of_result_columns_11: fail_str: | WITH cte_orders AS ( SELECT * FROM orders ) SELECT * FROM cte_orders AS orders test_fail_unknown_number_of_result_columns_12: desc: CTE is unused. We select * from orders table which means it's unknown what columns will be returned. fail_str: | WITH cte_orders AS ( SELECT customer_id, total FROM orders ) SELECT * FROM orders AS cte_orders test_fail_unknown_number_of_result_columns_13: fail_str: SELECT p.* FROM races, UNNEST(participants) AS p test_pass_known_number_of_result_columns_12: pass_str: SELECT p FROM races, UNNEST(participants) AS p test_fail_unknown_number_of_result_columns_14: fail_str: SELECT * FROM a JOIN b test_fail_unknown_number_of_result_columns_15: desc: We know what columns will cte return but we don't know what columns will be returned from joined table b. fail_str: | WITH cte AS ( SELECT a FROM t ) SELECT * FROM cte JOIN b test_pass_known_number_of_result_columns_13: desc: Both CTEs define returned columns so * in final select will return known number of columns. pass_str: | WITH cte1 AS ( SELECT a FROM t ), cte2 AS ( SELECT b FROM u ) SELECT * FROM cte1 JOIN cte2 test_pass_known_number_of_result_columns_14: pass_str: select a, b from `d.t` configs: core: dialect: bigquery test_fail_unknown_number_of_result_columns_16: fail_str: select * from `d.t` configs: core: dialect: bigquery test_pass_known_number_of_result_columns_15: # Issue 915: Crash on statements that don't have a SELECT pass_str: CREATE TABLE my_table (id INTEGER) test_fail_unknown_number_of_result_columns_17: # Issue 930: Infinite recursion if CTE queries itself. fail_str: | with hubspot__engagement_calls as ( select * from hubspot__engagement_calls ) select * from hubspot__engagement_calls test_fail_unknown_number_of_result_columns_18: # Another test for issue #930 fail_str: | with hubspot__contacts as ( select * from ANALYTICS.PUBLIC_intermediate.hubspot__contacts ), final as ( select * from hubspot__contacts where not coalesce(_fivetran_deleted, false) ) select * from final test_pass_nested_ctes_1: # Test for issue 1984 pass_str: | with a as ( with b as ( select 1 from c ) select * from b ) select * from a test_fail_nested_ctes_1: # Test for issue 1984 fail_str: | with a as ( with b as ( select * from c ) select * from b ) select * from a test_fail_nested_ctes_2: # Test for issue 1984 fail_str: | with a as ( with b as ( select 1 from t1 ), c AS ( SELECT * FROM u ) select b.*, c.* from b join c ) select * from a test_pass_nested_ctes_3: # Test for issue 1984 pass_str: with a as ( with b as ( select * from c ) select 1 from b ) select * from a test_pass_nested_ctes_4: # Test for issue 1984 pass_str: with a as ( with b as ( select * from c ) select * from b ) select 1 from a test_cte_reference_outer_5: pass_str: with a as ( select 1 from b ) select * from ( select * from a ) test_cte_tricky_nesting_6: pass_str: with b as ( select 1 from c ) select * from ( with a as ( select * from b ) select * from a ) test_nested_and_same_level_ctes_7: pass_str: with a as ( with c as ( select 1 from d ), b as ( select * from c ) select * from b ) select * from a test_nested_cte_references_outer_8: pass_str: with c as ( select 1 from d ), a as ( with b as ( select * from c ) select * from b ) select * from a test_pass_join_inside_cte_with_unqualified: pass_str: with cte as ( select * from t1 inner join t2 ) select a, b from cte; test_pass_known_number_of_columns_in_two_join_subqueries: pass_str: select * from ( select a from foo ) t1 inner join ( select b from bar ) t2; test_fail_two_join_subqueries_one_with_unknown_number_of_columns: fail_str: select * from ( select * from foo ) t1 inner join ( select b from bar ) t2; test_fail_no_source_table: fail_str: | SELECT * test_query_on_snowflake_stage: pass_str: select mycolumn1 from @public.mytable1 configs: core: dialect: snowflake test_snowflake_delete_cte: pass_str: | DELETE FROM MYTABLE1 USING ( WITH MYCTE AS (SELECT COLUMN2 FROM MYTABLE3) SELECT COLUMN3 FROM MYTABLE3 ) X WHERE COLUMN1 = X.COLUMN3 configs: core: dialect: snowflake test_pass_exasol_values_clause_cte_1: pass_str: | WITH txt AS ( VALUES (1) AS t (id) ) SELECT * FROM txt configs: core: dialect: exasol test_pass_exasol_values_clause_cte_2: pass_str: | WITH txt AS ( VALUES (1, 'foo') AS t (id, name) ) SELECT * FROM txt configs: core: dialect: exasol test_pass_exasol_values_clause: pass_str: SELECT * FROM ( VALUES (1, 2), (3, 4) ) AS t(c1, c2) configs: core: dialect: exasol test_fail_exasol_invalid_foreign_key_from: # This query should fail (I think) because it # returns more than just the foreign key and # that could be an unknown number of columns. fail_str: | SELECT * WITH INVALID FOREIGN KEY (nr) FROM T1 REFERENCING T2 (id) configs: core: dialect: exasol test_pass_exasol_invalid_foreign_key_from: # This query should pass because it will return # just the foreign key. pass_str: | SELECT INVALID FOREIGN KEY (nr) FROM T1 REFERENCING T2 (id) configs: core: dialect: exasol test_pass_cte_no_select_final_statement: pass_str: WITH mycte AS ( SELECT foo, bar FROM mytable1 ) UPDATE sometable SET sometable.baz = mycte.bar FROM mycte; test_tsql_select_system_as_identifier: pass_str: | SELECT @@IDENTITY AS 'Identity' configs: core: dialect: tsql test_pass_sparksql_values_clause_cte_1: pass_str: | WITH txt AS ( VALUES (1) AS t (id) ) SELECT * FROM txt configs: core: dialect: sparksql test_pass_sparksql_values_clause_cte_2: pass_str: | WITH txt AS ( VALUES (1, 'foo') AS t (id, name) ) SELECT * FROM txt configs: core: dialect: sparksql test_pass_sparksql_values_clause: pass_str: SELECT * FROM ( VALUES (1, 2), (3, 4) ) AS t(c1, c2) configs: core: dialect: sparksql sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/AM05.yml000066400000000000000000000153021503426445100235150ustar00rootroot00000000000000rule: AM05 # Default config test_fail_lone_join_default: fail_str: "SELECT foo.a, bar.b FROM foo JOIN bar;\n" fix_str: "SELECT foo.a, bar.b FROM foo INNER JOIN bar;\n" test_fail_lone_join_lowercase_default: fail_str: "SELECT foo.a, bar.b FROM foo join bar;\n" fix_str: "SELECT foo.a, bar.b FROM foo inner join bar;\n" test_pass_inner_join_default: pass_str: "SELECT foo.a, bar.b FROM foo INNER JOIN bar;\n" test_pass_left_join_default: pass_str: "SELECT foo.a, bar.b FROM foo LEFT JOIN bar;\n" test_pass_right_join_default: pass_str: "SELECT foo.a, bar.b FROM foo RIGHT JOIN bar;\n" test_pass_full_join_default: pass_str: "SELECT foo.a, bar.b FROM foo FULL JOIN bar;\n" test_pass_left_outer_join_default: pass_str: "SELECT foo.a, bar.b FROM foo LEFT OUTER JOIN bar;\n" test_pass_right_outer_join_default: pass_str: "SELECT foo.a, bar.b FROM foo RIGHT OUTER JOIN bar;\n" test_pass_full_outer_join_default: pass_str: "SELECT foo.a, bar.b FROM foo FULL OUTER JOIN bar;\n" test_pass_cross_join_default: pass_str: "SELECT foo.a, bar.b FROM foo CROSS JOIN bar;\n" # Config = "inner" test_fail_lone_join_inner: fail_str: "SELECT foo.a, bar.b FROM foo JOIN bar;\n" fix_str: "SELECT foo.a, bar.b FROM foo INNER JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: inner test_fail_lone_join_lowercase_inner: fail_str: "SELECT foo.a, bar.b FROM foo join bar;\n" fix_str: "SELECT foo.a, bar.b FROM foo inner join bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: inner test_pass_inner_join_inner: pass_str: "SELECT foo.a, bar.b FROM foo INNER JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: inner test_pass_left_join_inner: pass_str: "SELECT foo.a, bar.b FROM foo LEFT JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: inner test_pass_right_join_inner: pass_str: "SELECT foo.a, bar.b FROM foo RIGHT JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: inner test_pass_full_join_inner: pass_str: "SELECT foo.a, bar.b FROM foo FULL JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: inner test_pass_left_outer_join_inner: pass_str: "SELECT foo.a, bar.b FROM foo LEFT OUTER JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: inner test_pass_right_outer_join_inner: pass_str: "SELECT foo.a, bar.b FROM foo RIGHT OUTER JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: inner test_pass_full_outer_join_inner: pass_str: "SELECT foo.a, bar.b FROM foo FULL OUTER JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: inner # Config = "outer" test_pass_lone_join_outer: pass_str: "SELECT foo.a, bar.b FROM foo JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: outer test_pass_inner_join_outer: pass_str: "SELECT foo.a, bar.b FROM foo INNER JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: outer test_fail_left_join_outer: fail_str: "SELECT foo.a, bar.b FROM foo LEFT JOIN bar;\n" fix_str: "SELECT foo.a, bar.b FROM foo LEFT OUTER JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: outer test_fail_right_join_outer: fail_str: "SELECT foo.a, bar.b FROM foo RIGHT JOIN bar;\n" fix_str: "SELECT foo.a, bar.b FROM foo RIGHT OUTER JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: outer test_fail_full_join_outer: fail_str: "SELECT foo.a, bar.b FROM foo FULL JOIN bar;\n" fix_str: "SELECT foo.a, bar.b FROM foo FULL OUTER JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: outer test_fail_full_join_lowercase_outer: fail_str: "SELECT foo.a, bar.b FROM foo full join bar;\n" fix_str: "SELECT foo.a, bar.b FROM foo full outer join bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: outer test_pass_left_outer_join_outer: pass_str: "SELECT foo.a, bar.b FROM foo LEFT OUTER JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: outer test_pass_right_outer_join_outer: pass_str: "SELECT foo.a, bar.b FROM foo RIGHT OUTER JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: outer test_pass_full_outer_join_outer: pass_str: "SELECT foo.a, bar.b FROM foo FULL OUTER JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: outer # Config = "both" test_fail_lone_join_both: fail_str: "SELECT foo.a, bar.b FROM foo JOIN bar;\n" fix_str: "SELECT foo.a, bar.b FROM foo INNER JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: both test_fail_lone_join_lowercase_both: fail_str: "SELECT foo.a, bar.b FROM foo join bar;\n" fix_str: "SELECT foo.a, bar.b FROM foo inner join bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: both test_pass_inner_join_both: pass_str: "SELECT foo.a, bar.b FROM foo INNER JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: both test_fail_left_join_both: fail_str: "SELECT foo.a, bar.b FROM foo LEFT JOIN bar;\n" fix_str: "SELECT foo.a, bar.b FROM foo LEFT OUTER JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: both test_fail_right_join_both: fail_str: "SELECT foo.a, bar.b FROM foo RIGHT JOIN bar;\n" fix_str: "SELECT foo.a, bar.b FROM foo RIGHT OUTER JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: both test_fail_full_join_both: fail_str: "SELECT foo.a, bar.b FROM foo FULL JOIN bar;\n" fix_str: "SELECT foo.a, bar.b FROM foo FULL OUTER JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: both test_fail_full_join_lowercase_both: fail_str: "SELECT foo.a, bar.b FROM foo full join bar;\n" fix_str: "SELECT foo.a, bar.b FROM foo full outer join bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: both test_pass_left_outer_join_both: pass_str: "SELECT foo.a, bar.b FROM foo LEFT OUTER JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: both test_pass_right_outer_join_both: pass_str: "SELECT foo.a, bar.b FROM foo RIGHT OUTER JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: both test_pass_full_outer_join_both: pass_str: "SELECT foo.a, bar.b FROM foo FULL OUTER JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: both sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/AM06.yml000066400000000000000000000317721503426445100235270ustar00rootroot00000000000000rule: AM06 test_pass_explicit_group_by_default: pass_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY 1, 2; test_pass_implicit_group_by_default: pass_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY foo, bar; test_pass_explicit_order_by_default: pass_str: | SELECT foo, bar FROM fake_table ORDER BY 1, 2; test_fail_implicit_order_by_default: pass_str: | SELECT foo, bar FROM fake_table ORDER BY foo, bar; test_fail_mix_group_by_default: fail_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY 1, bar; test_pass_implicit_group_by_and_order_by_default: pass_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY 1, 2 ORDER BY 1, 2; test_pass_explicit_group_by_and_order_by_default: pass_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY foo, bar ORDER BY foo, bar; test_fail_within_line_mix_group_by_and_order_by_default: fail_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY 1, bar ORDER BY foo, 2; test_fail_across_line_mix_group_by_and_order_by_default: fail_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY 1, 2 ORDER BY foo, bar; test_pass_explicit_expression_order_by_default: pass_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table ORDER BY foo, power(bar, 2) test_fail_implicit_expression_order_by_default: fail_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table ORDER BY 1, power(bar, 2) test_pass_explicit_group_by_custom_explicit: pass_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY foo, bar; configs: rules: ambiguous.column_references: group_by_and_order_by_style: explicit test_fail_implicit_group_by_custom_explicit: fail_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY 1, 2; configs: rules: ambiguous.column_references: group_by_and_order_by_style: explicit test_fail_mix_group_by_custom_explicit: fail_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY 1, bar; configs: rules: ambiguous.column_references: group_by_and_order_by_style: explicit test_pass_explicit_order_by_custom_explicit: pass_str: | SELECT foo, bar FROM fake_table ORDER BY foo, bar; configs: rules: ambiguous.column_references: group_by_and_order_by_style: explicit test_fail_implicit_order_by_custom_explicit: fail_str: | SELECT foo, bar FROM fake_table ORDER BY 1, 2; configs: rules: ambiguous.column_references: group_by_and_order_by_style: explicit test_pass_explicit_group_by_and_order_by_custom_explicit: pass_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY foo, bar ORDER BY foo, bar; configs: rules: ambiguous.column_references: group_by_and_order_by_style: explicit test_fail_implicit_group_by_and_order_by_custom_explicit: fail_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY 1, 2 ORDER BY 1, 2; configs: rules: ambiguous.column_references: group_by_and_order_by_style: explicit test_fail_within_line_mix_group_by_and_order_by_custom_explicit: fail_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY 1, bar ORDER BY foo, 2; configs: rules: ambiguous.column_references: group_by_and_order_by_style: explicit test_fail_across_line_mix_group_by_and_order_by_custom_explicit: fail_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY 1, 2 ORDER BY foo, bar; configs: rules: ambiguous.column_references: group_by_and_order_by_style: explicit test_pass_explicit_expression_order_by_custom_explicit: pass_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table ORDER BY foo, power(bar, 2) configs: rules: ambiguous.column_references: group_by_and_order_by_style: explicit test_fail_implicit_expression_order_by_custom_explicit: fail_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table ORDER BY 1, power(bar, 2) configs: rules: ambiguous.column_references: group_by_and_order_by_style: explicit test_pass_explicit_group_by_custom_implicit: pass_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY 1, 2; configs: rules: ambiguous.column_references: group_by_and_order_by_style: implicit test_fail_implicit_group_by_custom_implicit: fail_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY foo, bar; configs: rules: ambiguous.column_references: group_by_and_order_by_style: implicit test_pass_explicit_order_by_custom_implicit: pass_str: | SELECT foo, bar FROM fake_table ORDER BY 1, 2; configs: rules: ambiguous.column_references: group_by_and_order_by_style: implicit test_fail_implicit_order_by_custom_implicit: fail_str: | SELECT foo, bar FROM fake_table ORDER BY foo, bar; configs: rules: ambiguous.column_references: group_by_and_order_by_style: implicit test_fail_mix_group_by_custom_implicit: fail_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY 1, bar; configs: rules: ambiguous.column_references: group_by_and_order_by_style: implicit test_pass_implicit_group_by_and_order_by_custom_implicit: pass_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY 1, 2 ORDER BY 1, 2; configs: rules: ambiguous.column_references: group_by_and_order_by_style: implicit test_fail_explicit_group_by_and_order_by_custom_implicit: fail_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY foo, bar ORDER BY foo, bar; configs: rules: ambiguous.column_references: group_by_and_order_by_style: implicit test_fail_within_line_mix_group_by_and_order_by_custom_implicit: fail_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY 1, bar ORDER BY foo, 2; configs: rules: ambiguous.column_references: group_by_and_order_by_style: implicit test_fail_across_line_mix_group_by_and_order_by_custom_implicit: fail_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY 1, 2 ORDER BY foo, bar; configs: rules: ambiguous.column_references: group_by_and_order_by_style: implicit test_fail_explicit_expression_order_by_custom_implicit: fail_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table ORDER BY foo, power(bar, 2) configs: rules: ambiguous.column_references: group_by_and_order_by_style: implicit test_fail_implicit_expression_order_by_custom_implicit: fail_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table ORDER BY 1, power(bar, 2) configs: rules: ambiguous.column_references: group_by_and_order_by_style: implicit test_fail_consistent_snowflake: fail_str: | select a, b, c from test_table group by 1, b order by 1, 2 configs: core: dialect: snowflake rules: ambiguous.column_references: group_by_and_order_by_style: consistent test_fail_consistent_exasol: fail_str: | select a, b, c from test_table group by 1, b order by 1, 2 configs: core: dialect: exasol rules: ambiguous.column_references: group_by_and_order_by_style: consistent test_pass_window: pass_str: | SELECT field_1 , field_2 , SUM(field_3) as field_3_total , SUM(field_3) OVER (ORDER BY field_1) AS field_3_window_sum FROM table1 GROUP BY 1, 2 ORDER BY 1, 2 test_pass_window_snowflake: pass_str: | SELECT field_1 , field_2 , SUM(field_3) as field_3_total , SUM(field_3) OVER (ORDER BY field_1) AS field_3_window_sum FROM table1 GROUP BY 1, 2 ORDER BY 1, 2 configs: core: dialect: snowflake test_pass_withingroup_snowflake: pass_str: | SELECT LISTAGG(x) WITHIN GROUP (ORDER BY list_order) AS my_list FROM main GROUP BY 1 configs: core: dialect: snowflake test_pass_groupby_rollup_bigquery: pass_str: | SELECT column1, column2 FROM table_name GROUP BY 1, 2 UNION ALL SELECT column1, column2 FROM table_name2 GROUP BY ROLLUP(1, 2) configs: core: dialect: bigquery test_fail_groupby_rollup_bigquery: fail_str: | SELECT column1, column2 FROM table_name GROUP BY column1, column2 UNION ALL SELECT column1, column2 FROM table_name2 GROUP BY ROLLUP(1, 2) configs: core: dialect: bigquery test_pass_groupby_rollup_postgres: pass_str: | SELECT column1, column2 FROM table_name GROUP BY 1, 2 UNION ALL SELECT column1, column2 FROM table_name2 GROUP BY ROLLUP(1, 2) configs: core: dialect: postgres test_fail_groupby_rollup_postgres: fail_str: | SELECT column1, column2 FROM table_name GROUP BY column1, column2 UNION ALL SELECT column1, column2 FROM table_name2 GROUP BY ROLLUP(1, 2) configs: core: dialect: postgres test_pass_groupby_rollup_exasol: pass_str: | SELECT column1, column2 FROM table_name GROUP BY 1, 2 UNION ALL SELECT column1, column2 FROM table_name2 GROUP BY ROLLUP(1, 2) configs: core: dialect: exasol test_fail_groupby_rollup_exasol: fail_str: | SELECT column1, column2 FROM table_name GROUP BY column1, column2 UNION ALL SELECT column1, column2 FROM table_name2 GROUP BY ROLLUP(1, 2) configs: core: dialect: exasol test_pass_groupby_rollup_athena: pass_str: | SELECT column1, column2 FROM table_name GROUP BY 1, 2 UNION ALL SELECT column1, column2 FROM table_name2 GROUP BY ROLLUP(1, 2) configs: core: dialect: athena test_fail_groupby_rollup_athena: fail_str: | SELECT column1, column2 FROM table_name GROUP BY column1, column2 UNION ALL SELECT column1, column2 FROM table_name2 GROUP BY ROLLUP(1, 2) configs: core: dialect: athena test_pass_groupby_rollup_sparksql: pass_str: | SELECT column1, column2 FROM table_name GROUP BY 1, 2 UNION ALL SELECT column1, column2 FROM table_name2 GROUP BY ROLLUP(1, 2) configs: core: dialect: sparksql test_fail_groupby_rollup_sparksql: fail_str: | SELECT column1, column2 FROM table_name GROUP BY column1, column2 UNION ALL SELECT column1, column2 FROM table_name2 GROUP BY ROLLUP(1, 2) configs: core: dialect: sparksql test_pass_array_agg_bigquery: pass_str: | SELECT to_json_string(array_agg(product_id order by started_at desc)) AS products FROM purchased GROUP by 1 configs: core: dialect: bigquery test_pass_array_expression_bigquery: pass_str: | SELECT poi.country_code , poi.po_id , ARRAY( SELECT STRUCT( p.product_name , p.sku_id , p.created_at ) AS products_purchased FROM UNNEST(poi.products_purchased) AS p ORDER BY p.created_at ) AS products_purchased FROM `my_project.my_dataset.purchase_orders_products` AS poi configs: core: dialect: bigquery sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/AM07.yml000066400000000000000000000203031503426445100235140ustar00rootroot00000000000000rule: AM07 test_pass_known_number_of_result_columns_1: pass_str: | select a, b from t union all select c, d from k test_fail_known_number_of_result_columns_1: fail_str: | select a from t union all select c, d from k test_pass_known_number_of_result_columns_2: desc: Columns are specified in CTE so * in final query will return only columns specified earlier. pass_str: | with cte as ( select a, b from t ) select * from cte union select c, d from t2 test_fail_known_number_of_result_columns_2: fail_str: | with cte as ( select a, b, c from t ) select * from cte union select d, e from t test_pass_known_number_of_result_columns_3: pass_str: | with cte as ( select * from t ) select a, b from cte test_pass_known_number_of_result_columns_4: pass_str: | with cte1 as ( select * from t ), cte2 as ( select a, b from t ) select cte1.a , cte1.d , cte2.* from cte1 join cte2 using (a) union select e, f, g, h from cte3 test_fail_known_number_of_result_columns_3: fail_str: | with cte1 as ( select * from t ), cte2 as ( select a, b from t ) select cte1.a, cte2.* from cte1 join cte2 using (a) union select e, f from cte3 test_pass_known_number_of_result_columns_5: pass_str: | select * from ( select a, b from t ) union select c, d from t2 test_pass_known_number_of_result_columns_6: pass_str: | with cte2 as ( select b from t1 ) select t_alias.* from t2 join (select a from t) as t_alias using (a) union select * from cte2 test_fail_unknown_number_of_result_columns_4: fail_str: | select t_alias.* from t1 join (select a from t) as t_alias using (a) union select a,b from t2 test_pass_known_number_of_result_columns_7: pass_str: | select a from t1 union all select b from t2 test_pass_unknown_wildcard_number_of_result_columns_8: pass_str: | select a from t1 union all select * from t2 test_pass_known_number_of_result_columns_9: pass_str: | with cte as ( select a from t1 union all select b from t2 ) select * from cte union all select b from t3 test_pass_known_number_of_result_columns_10: desc: Columns are specified in cte_orders's body so cte_orders.* will return only these columns. pass_str: | WITH cte_orders AS ( SELECT customer_id, total FROM orders ) SELECT customers.name, cte_orders.* FROM customers, cte_orders WHERE clients.id = orders.clientId test_pass_known_number_of_result_columns_11: pass_str: select a, b from `d.t` union all select c, d from `d.t` configs: core: dialect: bigquery test_fail_unknown_number_of_result_columns_5: fail_str: select a, b, c from `d.t` union all select c, d from `d.t` configs: core: dialect: bigquery test_pass_known_number_of_result_columns_13: # Issue 915: Crash on statements that don't have a SELECT pass_str: CREATE TABLE my_table (id INTEGER) test_pass_known_number_of_result_columns_14: pass_str: | SELECT * FROM ( WITH cte2 AS ( SELECT a, b FROM table2 ) SELECT * from cte2 as cte_al UNION SELECT e, f FROM table3 ) UNION SELECT k, l FROM table4 test_pass_known_number_of_result_columns_15: pass_str: | SELECT * FROM ( WITH cte2 AS ( SELECT * FROM table2 ) SELECT * from cte2 as cte_al UNION SELECT e, f FROM table3 ) UNION SELECT k, l FROM table4 test_fail_unknown_number_of_result_columns_6: fail_str: | SELECT * FROM ( WITH cte2 AS ( SELECT a FROM table2 ) SELECT * from cte2 as cte_al UNION SELECT e, f FROM table3 ) UNION SELECT k, l FROM table4 test_pass_known_number_of_result_columns_16: pass_str: | SELECT * FROM ( WITH cte2 AS ( SELECT a, b FROM table2 ) SELECT * from cte2 as cte_al ) UNION SELECT e, f FROM table3 test_pass_known_number_of_result_columns_17: pass_str: | SELECT * FROM table1 as table2 UNION SELECT e, f FROM table3 test_fail_known_number_of_result_columns_7: fail_str: | SELECT * FROM ( WITH cte2 AS ( SELECT a FROM table2 ) SELECT * from cte2 as cte_al ) UNION SELECT e, f FROM table3 test_pass_nested_ctes_1: # Test for issue 1984 pass_str: | with a as ( with b as ( select 1 from c ) select * from b ) select * from a union all select k from t2 test_fail_nested_ctes_1: # Test for issue 1984 fail_str: | with a as ( with b as ( select a from c ) select * from b ) select * from a union select a, b from t2 test_cte_reference_outer_2: pass_str: with a as ( select 1 from b ) select * from ( select * from a ) union select 2 from c test_cte_tricky_nesting_3: pass_str: with b as ( select 1 from c ) select * from ( with a as ( select * from b ) select * from a ) union select a from t2 test_nested_and_same_level_ctes_4: pass_str: with a as ( with c as ( select 1 from d ), b as ( select * from c ) select * from b ) select * from a union select k from t2 test_nested_cte_references_outer_5: pass_str: with c as ( select 1 from d ), a as ( with b as ( select * from c ) select * from b ) select * from a union select k from t2 test_pass_join_inside_cte_with_unqualified: pass_str: with cte as ( select * from t1 inner join t2 ) select a, b from cte union select c, d from cte2; test_pass_known_number_of_columns_in_two_join_subqueries: pass_str: select * from ( select a from foo ) t1 inner join ( select b from bar ) t2 union select c, d from t3; test_fail_two_join_subqueries_one_with_unknown_number_of_columns: fail_str: select * from ( select b from foo ) t1 inner join ( select b from bar ) t2 union select c, d, e from t3; test_query_on_snowflake_stage: pass_str: select mycolumn1 from @public.mytable1 union select mycolumn2 from table2 configs: core: dialect: snowflake test_pass_cte_no_select_final_statement: pass_str: WITH mycte AS ( SELECT foo, bar FROM mytable1 ) UPDATE sometable SET sometable.baz = mycte.bar FROM mycte; test_fail_cte_no_select_final_statement: fail_str: UPDATE sometable SET sometable.baz = mycte.bar FROM (SELECT foo, bar FROM mytable1 UNION ALL SELECT bar FROM mytable2) as k test_tsql_select_system_as_identifier: pass_str: | SELECT @@IDENTITY AS 'Identity' configs: core: dialect: tsql test_pass_dbt_union_relations_output: pass_str: | with unified_partitions as ( ( select cast("id" as INTEGER) as "id" from "dev"."main"."my_first_dbt_model" where id = 1 ) union all ( select cast("id" as INTEGER) as "id" from "dev"."main"."my_second_dbt_model" where id = 1 ) ) select * from unified_partitions sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/AM08.yml000066400000000000000000000110241503426445100235150ustar00rootroot00000000000000rule: AM08 test_pass_select_comma: # this syntax is currently not covered by AM08 pass_str: | SELECT a.*, b.* FROM a, b test_fail_missing_clause: fail_str: | SELECT foo.a, bar.b FROM foo INNER JOIN bar test_fail_missing_clause_lowercase_default: fail_str: | SELECT foo.a, bar.b FROM foo left join bar test_fail_missing_multiple_joins: fail_str: | SELECT foo.a, bar.b FROM foo left join bar on 1=2 left join baz; test_fail_missing_multiple_joins_mixed_types: fail_str: | SELECT foo.a, bar.b FROM foo left join bar on foo.x = bar.y left join baz; test_fail_missing_multiple_joins_not_last: fail_str: | SELECT foo.a, bar.b FROM foo left join bar left join baz on foo.x = bar.y; test_noop_join_where: pass_str: | SELECT foo.a, bar.b FROM foo left join bar where foo.x = bar.y; test_pass_cross_join_with_complex_where_clause_not_simplifiable: pass_str: | SELECT foo.a, bar.b FROM foo JOIN bar WHERE foo.a = bar.a OR foo.x = 3; test_pass_cross_join_with_complex_where_clause_and_subqueries: pass_str: | SELECT foo.a, bar.b FROM (SELECT * FROM abc) foo JOIN (SELECT * FROM def WHERE x = 3) bar WHERE foo.a = bar.a; test_pass_inner_join_default: pass_str: | SELECT foo.a, bar.b FROM foo INNER JOIN bar ON 1=1; test_pass_left_join_default: pass_str: | SELECT foo.a, bar.b FROM foo LEFT JOIN bar ON 1=1; test_pass_right_join_default: pass_str: | SELECT foo.a, bar.b FROM foo RIGHT JOIN bar ON 1=1; test_pass_full_join_default: pass_str: | SELECT foo.a, bar.b FROM foo FULL JOIN bar ON 1=1; test_pass_left_outer_join_default: pass_str: | SELECT foo.a, bar.b FROM foo LEFT OUTER JOIN bar ON 1=1; test_pass_right_outer_join_default: pass_str: | SELECT foo.a, bar.b FROM foo RIGHT OUTER JOIN bar ON 1=1; test_pass_full_outer_join_default: pass_str: | SELECT foo.a, bar.b FROM foo FULL OUTER JOIN bar ON 1=1; test_pass_using: pass_str: | SELECT foo.id, bar.id FROM foo LEFT JOIN bar USING (id) configs: core: dialect: snowflake test_pass_cross_join_default: pass_str: | SELECT foo.a, bar.b FROM foo CROSS JOIN bar; test_pass_global_cross_join_clickhouse: pass_str: | SELECT foo.a, bar.b FROM foo GLOBAL CROSS JOIN bar; configs: core: dialect: clickhouse test_pass_positional_join_duckdb: pass_str: | SELECT foo.a, bar.b FROM foo POSITIONAL JOIN bar; configs: core: dialect: duckdb test_pass_cross_join_where_clickhouse: pass_str: | SELECT m.name AS name, g.genre AS genre FROM movies AS m CROSS JOIN genres AS g WHERE m.id = g.movie_id; configs: core: dialect: clickhouse test_pass_asof_join_clickhouse: pass_str: | SELECT t.*, q.* FROM trades t ASOF LEFT JOIN quotes q ON t.symbol = q.symbol AND t.time >= q.time; configs: core: dialect: clickhouse test_noop_delete: pass_str: | DELETE t1, t2 FROM t1 JOIN t2 JOIN t3 WHERE t1.id=t2.id AND t2.id=t3.id; configs: core: dialect: mysql test_noop_cross_apply: pass_str: | SELECT DeptID, DeptName, DeptMgrID, EmpID, EmpLastName, EmpSalary FROM Departments d CROSS APPLY dbo.GetReports(d.DeptMgrID); configs: core: dialect: tsql test_noop_outer_apply: pass_str: | SELECT * FROM Department D OUTER APPLY dbo.fn_GetAllEmployeeOfADepartment(D.DepartmentID); configs: core: dialect: tsql test_pass_unnest: pass_str: | WITH Races AS ( SELECT "800M" AS Race, [ STRUCT("Rudisha" AS `Name`, [23.4, 26.3, 26.4, 26.1] AS Laps), STRUCT("Makhloufi" AS `Name`, [24.5, 25.4, 26.6, 26.1] AS Laps), STRUCT("Murphy" AS `Name`, [23.9, 26.0, 27.0, 26.0] AS Laps), STRUCT("Bosse" AS `Name`, [23.6, 26.2, 26.5, 27.1] AS Laps), STRUCT("Rotich" AS `Name`, [24.7, 25.6, 26.9, 26.4] AS Laps), STRUCT("Lewandowski" AS `Name`, [25.0, 25.7, 26.3, 27.2] AS Laps), STRUCT("Kipketer" AS `Name`, [23.2, 26.1, 27.3, 29.4] AS Laps), STRUCT("Berian" AS `Name`, [23.7, 26.1, 27.0, 29.3] AS Laps) ] AS Participants, ) SELECT Race, Participant, FROM Races AS R INNER JOIN UNNEST(R.Participants) AS Participant; configs: core: dialect: bigquery test_pass_lowercase_unnest: pass_str: | select ix, v from t left join unnest(t.value_list) as v with offset as ix configs: core: dialect: bigquery test_pass_natural_join: pass_str: | select foo.x from foo natural join bar configs: core: dialect: redshift sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/CP01.yml000066400000000000000000000131741503426445100235230ustar00rootroot00000000000000rule: CP01 test_fail_inconsistent_capitalisation_1: # Test that we don't have the "inconsistent" bug fail_str: SeLeCt 1 fix_str: SELECT 1 test_fail_inconsistent_capitalisation_2: fail_str: SeLeCt 1 from blah fix_str: SELECT 1 FROM blah test_fail_capitalisation_policy_lower: # Fix for https://github.com/sqlfluff/sqlfluff/issues/476 fail_str: SELECT * FROM MOO ORDER BY dt DESC fix_str: select * from MOO order by dt desc configs: rules: capitalisation.keywords: capitalisation_policy: lower test_fail_capitalisation_policy_upper: # Fix for https://github.com/sqlfluff/sqlfluff/issues/476 fail_str: select * from MOO order by dt desc fix_str: SELECT * FROM MOO ORDER BY dt DESC configs: rules: capitalisation.keywords: capitalisation_policy: upper test_fail_capitalisation_policy_capitalise: # Test for capitalised casing fail_str: SELECT * FROM MOO ORDER BY dt DESC fix_str: Select * From MOO Order By dt Desc configs: rules: capitalisation.keywords: capitalisation_policy: capitalise test_fail_date_part_inconsistent_capitalisation: # Test that time unit capitalization is fixed fail_str: SELECT dt + interval 2 day, interval 3 HOUR fix_str: SELECT dt + INTERVAL 2 DAY, INTERVAL 3 HOUR test_fail_date_part_capitalisation_policy_lower: # Test that capitalization policy is applied on time units fail_str: SELECT dt + interval 2 day, interval 3 HOUR fix_str: select dt + interval 2 day, interval 3 hour configs: rules: capitalisation.keywords: capitalisation_policy: lower test_fail_date_part_capitalisation_policy_upper: # Test that capitalization policy is applied on time units fail_str: SELECT dt + interval 2 day, interval 3 HOUR fix_str: SELECT dt + INTERVAL 2 DAY, INTERVAL 3 HOUR configs: rules: capitalisation.keywords: capitalisation_policy: upper test_pass_date_part_consistent_capitalisation: # Test that correctly capitalized time units are left unchanged pass_str: SELECT dt + INTERVAL 2 DAY, INTERVAL 3 HOUR test_pass_data_type_inconsistent_capitalisation: # Test that we don't have the "inconsistent" bug pass_str: CREATE TABLE table1 (account_id bigint); configs: rules: capitalisation.keywords: capitalisation_policy: upper test_pass_bigquery_date: pass_str: SELECT DATE_ADD(date, INTERVAL 5 YEAR) AS display_date configs: core: dialect: bigquery rules: capitalisation.keywords: capitalisation_policy: upper test_pass_ignore_word: pass_str: SeleCT 1 configs: rules: capitalisation.keywords: capitalisation_policy: upper ignore_words: select test_pass_ignore_words: pass_str: SeleCT 1 configs: rules: capitalisation.keywords: capitalisation_policy: upper ignore_words: select,from test_pass_ignore_words_regex_simple: pass_str: SeleCT 1 configs: rules: capitalisation.keywords: capitalisation_policy: upper ignore_words_regex: ^Se test_fail_ignore_words_regex_simple: fail_str: SeleCT 1 FrOM t_table fix_str: SeleCT 1 FROM t_table configs: rules: capitalisation.keywords: capitalisation_policy: upper ignore_words_regex: ^Se test_pass_ignore_words_complex: pass_str: SeleCT 1 FrOM t_table configs: rules: capitalisation.keywords: capitalisation_policy: upper ignore_words_regex: (^Se|^Fr) test_pass_ignore_templated_code_true: pass_str: | {{ "select" }} a FROM foo WHERE 1 configs: core: ignore_templated_areas: true test_fail_ignore_templated_code_false: fail_str: | {{ "select" }} a FROM foo WHERE 1 fix_str: | {{ "select" }} a from foo where 1 configs: core: ignore_templated_areas: false test_fail_snowflake_group_by_cube: fail_str: | SELECT state, city, sum((s.retail_price - p.wholesale_price) * s.quantity) AS profit FROM products AS p, sales AS s WHERE s.product_id = p.product_id GROUP BY cube (state, city) ORDER BY state, city NULLS LAST ; fix_str: | SELECT state, city, sum((s.retail_price - p.wholesale_price) * s.quantity) AS profit FROM products AS p, sales AS s WHERE s.product_id = p.product_id GROUP BY CUBE (state, city) ORDER BY state, city NULLS LAST ; configs: core: dialect: snowflake rules: capitalisation.keywords: capitalisation_policy: upper test_pass_ignore_null: pass_str: | SELECT null FROM foo WHERE 1 test_pass_ignore_true: pass_str: | SELECT true FROM foo WHERE 1 test_pass_ignore_false: pass_str: | SELECT false FROM foo WHERE 1 test_fail_bigquery_week: fail_str: SELECT LAST_DAY(col, WEEK(monday)) fix_str: SELECT LAST_DAY(col, WEEK(MONDAY)) configs: core: dialect: bigquery rules: capitalisation.keywords: capitalisation_policy: upper test_fail_select_lower: # Test for issue #3399, a bug in the core apply_fixes() function that surfaced # with various rules, including this one. fail_str: | select * FROM {{ source("ids","shop") }} fix_str: | SELECT * FROM {{ source("ids","shop") }} configs: core: dialect: tsql rules: capitalisation.keywords: capitalisation_policy: upper test_fail_select_lower_keyword_functions: # Test for issue #3520 fail_str: | SELECT cast(5 AS int) AS test1, coalesce(1, 2) AS test3 fix_str: | SELECT CAST(5 AS int) AS test1, COALESCE(1, 2) AS test3 configs: core: dialect: tsql rules: capitalisation.keywords: capitalisation_policy: upper sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/CP02.yml000066400000000000000000000242611503426445100235230ustar00rootroot00000000000000rule: CP02 test_pass_consistent_capitalisation_1: pass_str: SELECT a, b test_pass_consistent_capitalisation_2: pass_str: SELECT A, B test_pass_consistent_capitalisation_with_null: # Test that NULL is classed as a keyword and not an identifier pass_str: SELECT NULL, a test_pass_consistent_capitalisation_with_single_letter_upper: # Single-letter ambiguity: Upper vs Capitalise pass_str: SELECT A, Boo test_pass_consistent_capitalisation_with_single_word_snake: # Snake is refuted as ambiguous pass_str: SELECT apple, banana_split test_fail_consistent_capitalisation_with_single_word_pascal: # Single-word ambiguity: Pascal vs Capitalise fail_str: SELECT AppleFritter, Banana fix_str: SELECT APPLEFRITTER, BANANA test_fail_consistent_capitalisation_with_multiple_words_with_numbers: # Numbers count as part of words so following letter can be upper or lower fail_str: SELECT AppleFritter, Apple123fritter, Apple123Fritter fix_str: SELECT APPLEFRITTER, APPLE123FRITTER, APPLE123FRITTER test_pass_consistent_capitalisation_with_leading_underscore: pass_str: SELECT _a, b test_fail_inconsistent_capitalisation_lowercase_1: # Test that fixes are consistent, and that we capture # table names and aliases. fail_str: SELECT a, B AS C FROM FOO AS BAR fix_str: SELECT a, b AS c FROM foo AS bar test_fail_inconsistent_capitalisation_lowercase_2: # Test that consistency between columns and tables still counts. fail_str: SELECT a FROM FOO fix_str: SELECT a FROM foo test_fail_inconsistent_capitalisation_uppercase: fail_str: SELECT B, a FROM foo fix_str: SELECT B, A FROM FOO # PascalCase tests are based on this comment by @alanmcruickshank: # https://github.com/sqlfluff/sqlfluff/issues/820#issuecomment-787050507 test_pass_consistent_capitalisation_policy_pascal_1: pass_str: SELECT PascalCase configs: rules: capitalisation.identifiers: extended_capitalisation_policy: pascal test_pass_consistent_capitalisation_policy_pascal_2: pass_str: SELECT Pascalcase configs: rules: capitalisation.identifiers: extended_capitalisation_policy: pascal test_pass_consistent_capitalisation_policy_pascal_3: fail_str: SELECT pascalCase fix_str: SELECT PascalCase configs: rules: capitalisation.identifiers: extended_capitalisation_policy: pascal test_pass_consistent_capitalisation_policy_pascal_4: pass_str: SELECT PasCalCaSe configs: rules: capitalisation.identifiers: extended_capitalisation_policy: pascal test_pass_consistent_capitalisation_policy_pascal_5: pass_str: SELECT PAscalcase configs: rules: capitalisation.identifiers: extended_capitalisation_policy: pascal test_pass_consistent_capitalisation_policy_pascal_6: # This could be argued as not PascalCase but technically it is # unless we introduce a full dictionarry of words to recognise # where word breaks are (an impossible task!). Also what about # abbreviations (e.g. NASA)? pass_str: SELECT PASCALCASE configs: rules: capitalisation.identifiers: extended_capitalisation_policy: pascal test_fail_inconsistent_capitalisation_policy_pascal_1: fail_str: SELECT pascalcase fix_str: SELECT Pascalcase configs: rules: capitalisation.identifiers: extended_capitalisation_policy: pascal test_fail_inconsistent_capitalisation_policy_pascal_2: fail_str: SELECT pascal_case fix_str: SELECT Pascal_Case configs: rules: capitalisation.identifiers: extended_capitalisation_policy: pascal test_fail_inconsistent_capitalisation_policy_pascal_3: # Similar to above, you could argue the fixed string is # Not really Pascal Case, but it's closer than it was! fail_str: SELECT pASCAL_CASE fix_str: SELECT PASCAL_CASE configs: rules: capitalisation.identifiers: extended_capitalisation_policy: pascal test_fail_inconsistent_capitalisation_policy_pascal_4: fail_str: SELECT PasCalCase fix_str: SELECT pascalcase configs: rules: capitalisation.identifiers: extended_capitalisation_policy: lower test_fail_consistent_capitalisation_policy_pascal_5: fail_str: SELECT PascalCaseNAME fix_str: SELECT PASCALCASENAME configs: rules: capitalisation.identifiers: extended_capitalisation_policy: upper test_fail_inconsistent_capitalisation_pascal_v_capitalise: # Pascal vs Capitalise fail_str: SELECT AppleFritter, Banana_split fix_str: SELECT AppleFritter, Banana_Split configs: rules: capitalisation.identifiers: extended_capitalisation_policy: pascal test_fail_inconsistent_capitalisation_policy_camel_1: fail_str: SELECT Camelcase fix_str: SELECT camelcase configs: rules: capitalisation.identifiers: extended_capitalisation_policy: camel test_fail_inconsistent_capitalisation_policy_camel_2: fail_str: SELECT Camel_Case fix_str: SELECT camel_case configs: rules: capitalisation.identifiers: extended_capitalisation_policy: camel test_fail_inconsistent_capitalisation_policy_camel_3: # Similar to above, you could argue the fixed string is # Not really Pascal Case, but it's closer than it was! fail_str: SELECT cAMEL_CASE fix_str: SELECT cAMEL_cASE configs: rules: capitalisation.identifiers: extended_capitalisation_policy: camel test_pass_policy_unquoted_identifiers_aliases_1: pass_str: SELECT a, B configs: rules: capitalisation.identifiers: unquoted_identifiers_policy: aliases test_pass_policy_unquoted_identifiers_aliases_2: pass_str: SELECT B, a configs: rules: capitalisation.identifiers: unquoted_identifiers_policy: aliases test_pass_policy_unquoted_identifiers_aliases_3: # See above commentsin regards to whether this should # really be considered PascalCase (we treat as yes) pass_str: SELECT PASCAL_CASE configs: rules: capitalisation.identifiers: extended_capitalisation_policy: pascal unquoted_identifiers_policy: aliases test_pass_policy_unquoted_identifiers_aliases_4: pass_str: SELECT UPPER_CASE AS low_case, PascalCase AS low_case configs: rules: capitalisation.identifiers: unquoted_identifiers_policy: aliases test_policy_unquoted_identifiers_aliases_5: fail_str: SELECT UPPER_CASE AS PascalCase, PascalCase AS lower_case fix_str: SELECT UPPER_CASE AS PASCALCASE, PascalCase AS LOWER_CASE configs: rules: capitalisation.identifiers: unquoted_identifiers_policy: aliases test_policy_unquoted_identifiers_aliases_6: fail_str: SELECT UPPER_CASE AS PascalCase, PascalCase AS lower_case fix_str: SELECT UPPER_CASE AS PASCALCASE, PascalCase AS LOWER_CASE configs: rules: capitalisation.identifiers: extended_capitalisation_policy: upper unquoted_identifiers_policy: aliases test_policy_unquoted_identifiers_aliases_7: fail_str: SELECT UPPER_CASE AS low_case, PascalCase AS low_case FROM UPPER_CASE AS UPPER_CASE fix_str: SELECT UPPER_CASE AS low_case, PascalCase AS low_case FROM UPPER_CASE AS upper_case configs: rules: capitalisation.identifiers: unquoted_identifiers_policy: aliases test_policy_unquoted_identifiers_aliases_8: fail_str: SELECT UPPER_CASE AS PascalCase, PascalCase AS lower_case FROM lower_case AS lower_case fix_str: SELECT UPPER_CASE AS PASCALCASE, PascalCase AS LOWER_CASE FROM lower_case AS LOWER_CASE configs: rules: capitalisation.identifiers: unquoted_identifiers_policy: aliases test_policy_unquoted_identifiers_column_aliases_1: pass_str: SELECT UPPER_CASE AS low_case, PascalCase AS low_case FROM UPPER_CASE AS UPPER_CASE configs: rules: capitalisation.identifiers: unquoted_identifiers_policy: column_aliases test_policy_unquoted_identifiers_aliases_2: fail_str: SELECT UPPER_CASE AS PascalCase, PascalCase AS lower_case FROM lower_case AS lower_case fix_str: SELECT UPPER_CASE AS PASCALCASE, PascalCase AS LOWER_CASE FROM lower_case AS lower_case configs: rules: capitalisation.identifiers: unquoted_identifiers_policy: column_aliases test_pass_ignore_word: pass_str: SELECT A, b configs: rules: capitalisation.identifiers: capitalisation_policy: upper ignore_words: b test_pass_consistent_capitalisation_properties_naked_identifier: pass_str: SHOW TBLPROPERTIES customer (created.by.user) configs: core: dialect: sparksql test_fail_inconsistent_capitalisation_properties_naked_identifier: # Test that fixes are consistent fail_str: SHOW TBLPROPERTIES customer (created.BY.user) fix_str: SHOW TBLPROPERTIES customer (created.by.user) configs: core: dialect: sparksql test_fail_inconsistent_capitalisation_properties_naked_identifier_2: fail_str: SHOW TBLPROPERTIES customer (Created.By.User) fix_str: SHOW TBLPROPERTIES customer (created.by.user) configs: core: dialect: sparksql test_pass_bigquery_safe_does_not_trigger: pass_str: SELECT SAFE.myFunction(1) AS col1 configs: core: dialect: bigquery test_pass_databricks_case_sensitive_property: pass_str: SET spark.databricks.delta.properties.defaults.enableChangeDataFeed = true; configs: core: dialect: databricks test_fail_snake_aliases: # Test for issue #5470 # similar to PascalCase, case logic defined in CP01, but tested in CP02 fail_str: | SELECT test1, test_2, testColumn3, TestColumn4, TESTCOLUMN5, TEST_COLUMN6, test_column_7_ fix_str: | SELECT test_1, test_2, test_column_3, test_column_4, testcolumn_5, test_column_6, test_column_7_ configs: core: dialect: tsql rules: capitalisation.identifiers: extended_capitalisation_policy: snake test_fail_camel_aliases: # Test for issue #5470 # similar to PascalCase, case logic defined in CP01, but tested in CP02 fail_str: | SELECT test1, test_2, testColumn3, TestColumn4, TESTCOLUMN5, TEST_COLUMN6, test_colUmn_7 fix_str: | SELECT test1, test_2, testColumn3, testColumn4, tESTCOLUMN5, tEST_cOLUMN6, test_colUmn_7 configs: core: dialect: tsql rules: capitalisation.identifiers: extended_capitalisation_policy: camel sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/CP02_LT01.yml000066400000000000000000000032471503426445100242640ustar00rootroot00000000000000rule: CP02,LT01 # Sanity tests test_fail_cp02_fail_lt01: fail_str: SELECT a,B fix_str: SELECT a, b test_fail_cp02_pass_lt01: fail_str: SELECT a, B fix_str: SELECT a, b test_pass_cp02_fail_lt01: fail_str: SELECT a,b fix_str: SELECT a, b test_pass_cp02_pass_lt01: pass_str: SELECT a, b # `ProblemHere` has two errors in the same location, but templating # previously caused issues with fixes. The two fixes should be # 1) Uppercase `PROBLEMHERE` # 2) Add a space after `PROBLEMHERE` and `(` test_fail_fix_cp02_lt01_with_templating_6678: fail_str: | create task ${env}_ENT_LANDING.SCHEMA_NAME.TASK_NAME warehouse=${lnd_hist_wkl_default} schedule='${repl_cdc_schedule}' as COPY INTO ${env}_ENT_LANDING.SCHEMA_NAME.ProblemHere( ONE_OR_MORE_COLUMN_NAMES_HERE ) FROM ( SELECT * FROM @${env}_ENT_COMMON.GLOBAL.FILEINGESTION_STAGE/file ) FILE_FORMAT = ( TYPE = JSON ) ON_ERROR = 'SKIP_FILE' ; fix_str: | create task ${env}_ENT_LANDING.SCHEMA_NAME.TASK_NAME warehouse = ${lnd_hist_wkl_default} schedule = '${repl_cdc_schedule}' as COPY INTO ${env}_ENT_LANDING.SCHEMA_NAME.PROBLEMHERE ( ONE_OR_MORE_COLUMN_NAMES_HERE ) FROM ( SELECT * FROM @${env}_ENT_COMMON.GLOBAL.FILEINGESTION_STAGE/file ) FILE_FORMAT = ( TYPE = JSON ) ON_ERROR = 'SKIP_FILE' ; configs: core: dialect: snowflake templater: placeholder templater: placeholder: param_style: flyway_var sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/CP03.yml000066400000000000000000000064521503426445100235260ustar00rootroot00000000000000rule: CP03 # Inconsistent capitalisation of functions test_fail_inconsistent_function_capitalisation_1: fail_str: SELECT MAX(id), min(id) from table fix_str: SELECT MAX(id), MIN(id) from table test_fail_inconsistent_function_capitalisation_2: fail_str: SELECT MAX(id), min(id) from table fix_str: SELECT max(id), min(id) from table configs: rules: capitalisation.functions: extended_capitalisation_policy: lower test_bare_functions: fail_str: SELECT current_timestamp from table fix_str: SELECT CURRENT_TIMESTAMP from table configs: rules: capitalisation.functions: extended_capitalisation_policy: upper test_bare_functions_2: fail_str: SELECT current_timestamp, min(a) from table fix_str: SELECT CURRENT_TIMESTAMP, MIN(a) from table configs: rules: capitalisation.functions: extended_capitalisation_policy: upper test_bare_functions_3: fail_str: SELECT current_timestamp, min(a) from table fix_str: SELECT Current_Timestamp, Min(a) from table configs: rules: capitalisation.functions: extended_capitalisation_policy: pascal test_fail_capitalization_after_comma: fail_str: SELECT FLOOR(dt) ,count(*) FROM test fix_str: SELECT FLOOR(dt) ,COUNT(*) FROM test test_pass_fully_qualified_function_mixed_functions: pass_str: SELECT COUNT(*), project1.foo(value1) AS value2 test_pass_fully_qualified_function_pascal_case: pass_str: SELECT project1.FoO(value1) AS value2 test_pass_ignore_word: pass_str: SELECT MAX(id), min(id) FROM TABLE1 configs: rules: capitalisation.functions: ignore_words: min test_pass_ignore_templated_code_true: pass_str: | SELECT {{ "greatest(a, b)" }}, GREATEST(i, j) configs: core: ignore_templated_areas: true test_fail_ignore_templated_code_false: fail_str: | SELECT {{ "greatest(a, b)" }}, GREATEST(i, j) fix_str: | SELECT {{ "greatest(a, b)" }}, greatest(i, j) configs: core: ignore_templated_areas: false test_pass_func_name_templated_literal_mix: # Issue 3022. This was actually a bug in linter.patch._iter_templated_patches(). pass_str: SELECT RO(), {{ "t" }}.func() test_pass_ignore_words_regex_simple: pass_str: SELECT MAX(id), f_test_udf(id) FROM TABLE1 configs: rules: capitalisation.functions: ignore_words_regex: ^f_ test_pass_ignore_words_regex_complex: pass_str: SELECT MAX(id), f_test_udf(id), g_test_udf(id) FROM TABLE1 configs: rules: capitalisation.functions: ignore_words_regex: (^f_|^g_) test_pass_ignore_words_regex_bigquery_simple: pass_str: SELECT MAX(id), project.dataset._f_test_udf(id) FROM TABLE1 configs: core: dialect: bigquery rules: capitalisation.functions: ignore_words_regex: ^_f_ test_pass_ignore_words_regex_bigquery_complex: pass_str: SELECT MAX(id), project.dataset._f_test_udf(id), `project.dataset._f_test_udf`(id) FROM TABLE1 configs: core: dialect: bigquery rules: capitalisation.functions: ignore_words_regex: (^_f_|\._f_) test_bare_functions_4: fail_str: SELECT Current_Timestamp, Min(a) from table fix_str: SELECT current_timestamp, min(a) from table configs: rules: capitalisation.functions: extended_capitalisation_policy: snake sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/CP04.yml000066400000000000000000000014461503426445100235250ustar00rootroot00000000000000rule: CP04 test_fail_inconsistent_boolean_capitalisation_1: fail_str: SeLeCt true, FALSE, NULL fix_str: SeLeCt true, false, null test_fail_inconsistent_boolean_capitalisation_2: fail_str: SeLeCt TRUE, false, NULL fix_str: SeLeCt TRUE, FALSE, NULL test_pass_ignore_word: pass_str: SELECT true, FALSE, NULL configs: rules: capitalisation.literals: ignore_words: true test_fail_upper_boolean_capitalisation: fail_str: SeLeCt true, FALSE, NULL fix_str: SeLeCt TRUE, FALSE, NULL configs: rules: capitalisation.literals: capitalisation_policy: upper test_fail_lower_boolean_capitalisation: fail_str: SeLeCt TRUE, false, NULL fix_str: SeLeCt true, false, null configs: rules: capitalisation.literals: capitalisation_policy: lower sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/CP05.yml000066400000000000000000000117611503426445100235270ustar00rootroot00000000000000rule: CP05 test_pass_default_consistent_lower: # Test that we don't have the "inconsistent" bug pass_str: | CREATE TABLE distributors ( did integer, name varchar(40), ts time with time zone ); test_pass_default_consistent_upper: # Test that we don't have the "inconsistent" bug pass_str: | CREATE TABLE distributors ( did INTEGER, name VARCHAR(40), ts TIME WITH TIME ZONE ); test_pass_default_consistent_capitalised: # Test that we don't have the "inconsistent" bug pass_str: | CREATE TABLE distributors ( did Integer, name Varchar(40), ts Time With Time Zone ); test_fail_default_consistent_pascal: # Attempting Pascal without config defaults to capitalize fail_str: | CREATE TABLE distributors ( did Integer, name VarChar(40), ts Time With Time Zone ); fix_str: | CREATE TABLE distributors ( did Integer, name Varchar(40), ts Time With Time Zone ); test_fail_data_type_inconsistent_capitalisation_1: # Test that we don't have the "inconsistent" bug fail_str: CREATE TABLE table1 (account_id BiGinT); fix_str: CREATE TABLE table1 (account_id BIGINT); configs: rules: capitalisation.types: extended_capitalisation_policy: upper test_fail_data_type_inconsistent_capitalisation_2: fail_str: CREATE TABLE table1 (account_id BiGinT); fix_str: CREATE TABLE table1 (account_id bigint); configs: rules: capitalisation.types: extended_capitalisation_policy: lower test_fail_data_type_inconsistent_capitalisation_3: fail_str: CREATE TABLE table1 (account_id BiGinT); fix_str: CREATE TABLE table1 (account_id Bigint); configs: rules: capitalisation.types: extended_capitalisation_policy: capitalise test_fail_data_type_capitalisation_policy_lower: fail_str: CREATE TABLE table1 (account_id BIGINT); fix_str: CREATE TABLE table1 (account_id bigint); configs: rules: capitalisation.types: extended_capitalisation_policy: lower test_fail_data_type_capitalisation_policy_lower_2: fail_str: CREATE TABLE table1 (account_id BIGINT, column_two varchar(255)); fix_str: CREATE TABLE table1 (account_id bigint, column_two varchar(255)); configs: rules: capitalisation.types: extended_capitalisation_policy: lower test_fail_data_type_capitalisation_policy_upper: fail_str: CREATE TABLE table1 (account_id bigint); fix_str: CREATE TABLE table1 (account_id BIGINT); configs: rules: capitalisation.types: extended_capitalisation_policy: upper test_fail_data_type_capitalisation_policy_upper_2: fail_str: CREATE TABLE table1 (account_id BIGINT, column_two varchar(255)); fix_str: CREATE TABLE table1 (account_id BIGINT, column_two VARCHAR(255)); configs: rules: capitalisation.types: extended_capitalisation_policy: upper test_fail_data_type_capitalisation_policy_capitalise: # Test for capitalised casing fail_str: CREATE TABLE table1 (account_id BIGINT); fix_str: CREATE TABLE table1 (account_id Bigint); configs: rules: capitalisation.types: extended_capitalisation_policy: capitalise test_fail_data_type_capitalisation_policy_keywords_1: # Test cases where data types are keywords, not data_type_identifiers # See: https://github.com/sqlfluff/sqlfluff/pull/2121 fail_str: CREATE TABLE table1 (account_id BIGINT, column_two timestamp); fix_str: CREATE TABLE table1 (account_id BIGINT, column_two TIMESTAMP); configs: rules: capitalisation.types: extended_capitalisation_policy: upper test_fail_data_type_capitalisation_policy_keywords_2: fail_str: CREATE TABLE table1 (account_id BIGINT, column_two timestamp with time zone); fix_str: CREATE TABLE table1 (account_id BIGINT, column_two TIMESTAMP WITH TIME ZONE); configs: rules: capitalisation.types: extended_capitalisation_policy: upper test_pass_sparksql_complex_data_types: pass_str: | CREATE TABLE table_identifier( a STRUCT COMMENT 'col_comment', d MAP COMMENT 'col_comment', e ARRAY COMMENT 'col_comment' ); configs: core: dialect: sparksql rules: capitalisation.types: extended_capitalisation_policy: upper test_pass_bigquery_struct_params: pass_str: | CREATE TEMPORARY FUNCTION getTableInfo(payload STRING) RETURNS STRUCT LANGUAGE js AS ''' return 1 '''; configs: core: dialect: bigquery rules: capitalisation.types: extended_capitalisation_policy: upper # See https://github.com/sqlfluff/sqlfluff/issues/3277 test_pass_typless_structs_dont_trigger_rule: pass_str: | SELECT STRUCT( some_field, some_other_field ) AS col FROM table configs: core: dialect: bigquery rules: capitalisation.types: extended_capitalisation_policy: upper sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/CV01.yml000066400000000000000000000100221503426445100235160ustar00rootroot00000000000000rule: CV01 # tests test_pass_consistent_c_style_not_equal_to: pass_str: | SELECT * FROM X WHERE 1 != 2 test_fail_consistent_ansi_not_equal_to: pass_str: | SELECT * FROM X WHERE 1 <> 2 test_pass_consistent_less_than: pass_str: | SELECT * FROM X WHERE 1 < 2 test_pass_consistent_non_comparison: pass_str: | SELECT col1 AS "alias_<>" FROM X test_fail_consistent_c_style_not_equal_to_multi: fail_str: | SELECT * FROM X WHERE 1 != 2 AND 2 <> 1 AND 3 <> 1 fix_str: | SELECT * FROM X WHERE 1 != 2 AND 2 != 1 AND 3 != 1 test_fail_consistent_ansi_not_equal_to_multi: fail_str: | SELECT * FROM X WHERE 1 <> 2 AND 2 != 1 AND 3 != 1 fix_str: | SELECT * FROM X WHERE 1 <> 2 AND 2 <> 1 AND 3 <> 1 test_pass_consistent_c_style_not_equal_to_tsql: pass_str: | SELECT * FROM X WHERE 1 ! = 2 configs: core: dialect: tsql test_pass_consistent_ansi_not_equal_to_tsql: pass_str: | SELECT * FROM X WHERE 1 < -- some comment > 2 configs: core: dialect: tsql # c_style tests test_pass_c_style_not_equal_to: pass_str: | SELECT * FROM X WHERE 1 != 2 configs: rules: convention.not_equal: preferred_not_equal_style: "c_style" test_fail_c_style_not_equal_to: fail_str: | SELECT * FROM X WHERE 1 <> 2 fix_str: | SELECT * FROM X WHERE 1 != 2 configs: rules: convention.not_equal: preferred_not_equal_style: "c_style" test_pass_c_style_less_than: pass_str: | SELECT * FROM X WHERE 1 < 2 configs: rules: convention.not_equal: preferred_not_equal_style: "c_style" test_pass_c_style_non_comparison: pass_str: | SELECT col1 AS "alias_<>" FROM X configs: rules: convention.not_equal: preferred_not_equal_style: "c_style" test_fail_c_style_not_equal_to_multi: fail_str: | SELECT * FROM X WHERE 1 <> 2 AND 2 <> 1 AND 3 != 1 fix_str: | SELECT * FROM X WHERE 1 != 2 AND 2 != 1 AND 3 != 1 configs: rules: convention.not_equal: preferred_not_equal_style: "c_style" test_pass_c_style_not_equal_to_tsql: pass_str: | SELECT * FROM X WHERE 1 ! = 2 configs: core: dialect: tsql rules: convention.not_equal: preferred_not_equal_style: "c_style" test_fail_c_style_not_equal_to_tsql: fail_str: | SELECT * FROM X WHERE 1 < -- some comment > 2 fix_str: | SELECT * FROM X WHERE 1 ! -- some comment = 2 configs: core: dialect: tsql rules: convention.not_equal: preferred_not_equal_style: "c_style" # ansi tests test_pass_ansi_not_equal_to: pass_str: | SELECT * FROM X WHERE 1 <> 2 configs: rules: convention.not_equal: preferred_not_equal_style: "ansi" test_fail_ansi_not_equal_to: fail_str: | SELECT * FROM X WHERE 1 != 2 fix_str: | SELECT * FROM X WHERE 1 <> 2 configs: rules: convention.not_equal: preferred_not_equal_style: "ansi" test_pass_ansi_less_than: pass_str: | SELECT * FROM X WHERE 1 < 2 configs: rules: convention.not_equal: preferred_not_equal_style: "ansi" test_pass_ansi_non_comparison: pass_str: | SELECT col1 AS "alias_<>" FROM X configs: rules: convention.not_equal: preferred_not_equal_style: "ansi" test_fail_ansi_not_equal_to_multi: fail_str: | SELECT * FROM X WHERE 1 <> 2 AND 2 != 1 AND 3 != 1 fix_str: | SELECT * FROM X WHERE 1 <> 2 AND 2 <> 1 AND 3 <> 1 configs: rules: convention.not_equal: preferred_not_equal_style: "ansi" test_pass_ansi_not_equal_to_tsql: pass_str: | SELECT * FROM X WHERE 1 < > 2 configs: core: dialect: tsql rules: convention.not_equal: preferred_not_equal_style: "ansi" test_fail_ansi_not_equal_to_tsql: fail_str: | SELECT * FROM X WHERE 1 ! -- some comment = 2 fix_str: | SELECT * FROM X WHERE 1 < -- some comment > 2 configs: core: dialect: tsql rules: convention.not_equal: preferred_not_equal_style: "ansi" sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/CV02.yml000066400000000000000000000005721503426445100235300ustar00rootroot00000000000000rule: CV02 test_pass_coalesce: pass_str: | SELECT coalesce(foo, 0) AS bar, FROM baz; test_fail_ifnull: fail_str: | SELECT ifnull(foo, 0) AS bar, FROM baz; fix_str: | SELECT COALESCE(foo, 0) AS bar, FROM baz; test_fail_nvl: fail_str: | SELECT nvl(foo, 0) AS bar, FROM baz; fix_str: | SELECT COALESCE(foo, 0) AS bar, FROM baz; sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/CV03.yml000066400000000000000000000026441503426445100235330ustar00rootroot00000000000000rule: CV03 test_require_pass: pass_str: SELECT a, b, FROM foo configs: rules: convention.select_trailing_comma: select_clause_trailing_comma: require test_require_fail: fail_str: SELECT a, b FROM foo fix_str: SELECT a, b, FROM foo configs: rules: convention.select_trailing_comma: select_clause_trailing_comma: require test_forbid_pass: pass_str: SELECT a, b FROM foo configs: rules: convention.select_trailing_comma: select_clause_trailing_comma: forbid test_forbid_fail: fail_str: SELECT a, b, FROM foo fix_str: SELECT a, b FROM foo configs: rules: convention.select_trailing_comma: select_clause_trailing_comma: forbid test_fail_templated: # NOTE: Check no fix, because it's not safe. fail_str: | SELECT {% for col in ['a', 'b', 'c'] %} {{col}}, {% endfor %} FROM tbl fix_str: | SELECT {% for col in ['a', 'b', 'c'] %} {{col}}, {% endfor %} FROM tbl violations_after_fix: - code: CV03 description: Trailing comma in select statement forbidden name: "convention.select_trailing_comma" warning: false fixes: [] start_line_no: 3 start_line_pos: 16 start_file_pos: 59 end_line_no: 3 end_line_pos: 17 end_file_pos: 60 configs: rules: convention.select_trailing_comma: select_clause_trailing_comma: forbid sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/CV04.yml000066400000000000000000000063011503426445100235260ustar00rootroot00000000000000rule: CV04 passes_on_count_star: pass_str: | select foo, count(*) from my_table group by foo passes_on_count_1: pass_str: | select foo, count(1) from my_table group by foo configs: &prefer_count_1 rules: convention.count_rows: prefer_count_1: true changes_count_0_to_count_star: fail_str: | select foo, count(0) from my_table group by foo fix_str: | select foo, count(*) from my_table group by foo passes_on_count_0: pass_str: | select foo, count(0) from my_table group by foo configs: &prefer_count_0 rules: convention.count_rows: prefer_count_0: true passes_on_count_1_if_both_present: pass_str: | select foo, count(1) from my_table group by foo configs: &prefer_both rules: convention.count_rows: prefer_count_0: true prefer_count_1: true changes_to_count_1_if_both_present: fail_str: | select foo, count(*) from my_table group by foo fix_str: | select foo, count(1) from my_table group by foo configs: *prefer_both changes_count_1_to_count_star: fail_str: | select foo, count(1) from my_table group by foo fix_str: | select foo, count(*) from my_table group by foo handles_whitespaces: fail_str: | select foo, count( 1 ) from my_table group by foo fix_str: | select foo, count( * ) from my_table group by foo changes_count_star_to_count_0: fail_str: | select foo, count(*) from my_table group by foo fix_str: | select foo, count(0) from my_table group by foo configs: *prefer_count_0 changes_count_star_to_count_1: fail_str: | select foo, count(*) from my_table group by foo fix_str: | select foo, count(1) from my_table group by foo configs: *prefer_count_1 changes_count_1_to_count_0: fail_str: | select foo, count(1) from my_table group by foo fix_str: | select foo, count(0) from my_table group by foo configs: *prefer_count_0 changes_count_0_to_count_1: fail_str: | select foo, count(0) from my_table group by foo fix_str: | select foo, count(1) from my_table group by foo configs: *prefer_count_1 changes_count_star_to_count_1_handle_new_line: fail_str: | select foo, count( * ) from my_table group by foo fix_str: | select foo, count( 1 ) from my_table group by foo configs: *prefer_count_1 no_false_positive_on_count_col: pass_str: | select foo, count(bar) from my_table no_false_positive_on_expression: pass_str: | select foo, count(1 + 10) from my_table sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/CV05.yml000066400000000000000000000037451503426445100235400ustar00rootroot00000000000000rule: CV05 test_is_null: pass_str: | SELECT a FROM foo WHERE a IS NULL test_is_not_null: pass_str: | SELECT a FROM foo WHERE a IS NOT NULL test_not_equals_null_upper: fail_str: | SELECT a FROM foo WHERE a <> NULL fix_str: | SELECT a FROM foo WHERE a IS NOT NULL test_not_equals_null_multi_nulls: fail_str: | SELECT a FROM foo WHERE a <> NULL AND b != NULL AND c = 'foo' fix_str: | SELECT a FROM foo WHERE a IS NOT NULL AND b IS NOT NULL AND c = 'foo' test_not_equals_null_lower: fail_str: | SELECT a FROM foo WHERE a <> null fix_str: | SELECT a FROM foo WHERE a is not null test_equals_null_spaces: fail_str: | SELECT a FROM foo WHERE a = NULL fix_str: | SELECT a FROM foo WHERE a IS NULL test_equals_null_no_spaces: fail_str: | SELECT a FROM foo WHERE a=NULL fix_str: | SELECT a FROM foo WHERE a IS NULL test_complex_case_1: fail_str: | SELECT a FROM foo WHERE a = b or (c > d or e = NULL) fix_str: | SELECT a FROM foo WHERE a = b or (c > d or e IS NULL) test_set_clause: pass_str: | UPDATE table1 SET col = NULL WHERE col = "" test_bigquery_set_options: pass_str: | ALTER TABLE table SET OPTIONS (expiration_timestamp = NULL) ; configs: core: dialect: bigquery test_tsql_exec_clause: pass_str: | exec something @param1 = 'blah', @param2 = 'blah', @param3 = null, @param4 = 'blah'; configs: core: dialect: tsql test_tsql_alternate_alias_syntax: pass_str: | select name = null from t configs: core: dialect: tsql test_exclude_constraint: pass_str: | alter table abc add constraint xyz exclude (field WITH =); configs: core: dialect: postgres test_mysql_system_variable: pass_str: | SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0; configs: core: dialect: mysql sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/CV06.yml000066400000000000000000000233011503426445100235270ustar00rootroot00000000000000rule: CV06 test_pass_semi_colon_same_line_default: pass_str: | SELECT a FROM foo; test_pass_semi_colon_custom_newline: pass_str: | SELECT a FROM foo; configs: rules: convention.terminator: multiline_newline: true test_fail_semi_colon_same_line_custom_newline: fail_str: | SELECT a FROM foo; fix_str: | SELECT a FROM foo ; configs: rules: convention.terminator: multiline_newline: true test_pass_no_semi_colon_default: pass_str: | SELECT a FROM foo test_pass_no_semi_colon_custom_newline: pass_str: | SELECT a FROM foo configs: rules: convention.terminator: multiline_newline: true test_fail_no_semi_colon_custom_require: fail_str: | SELECT a FROM foo fix_str: | SELECT a FROM foo; configs: rules: convention.terminator: require_final_semicolon: true test_fail_no_semi_colon_custom_require_oneline: fail_str: | SELECT a FROM foo fix_str: | SELECT a FROM foo; configs: rules: convention.terminator: require_final_semicolon: true multiline_newline: true test_fail_no_semi_colon_custom_require_multiline: fail_str: | SELECT a FROM foo fix_str: | SELECT a FROM foo ; configs: rules: convention.terminator: require_final_semicolon: true multiline_newline: true test_pass_multi_statement_semi_colon_default: pass_str: | SELECT a FROM foo; SELECT b FROM bar; test_pass_multi_statement_semi_colon_custom_oneline: pass_str: | SELECT a FROM foo; SELECT b FROM bar; test_fail_multi_statement_semi_colon_custom_multiline: fail_str: | SELECT a FROM foo; SELECT b FROM bar; fix_str: | SELECT a FROM foo ; SELECT b FROM bar ; configs: rules: convention.terminator: multiline_newline: true test_pass_multi_statement_no_trailing_semi_colon_default: pass_str: | SELECT a FROM foo; SELECT b FROM bar test_pass_multi_statement_no_trailing_semi_colon_custom_require: fail_str: | SELECT a FROM foo; SELECT b FROM bar fix_str: | SELECT a FROM foo; SELECT b FROM bar; configs: rules: convention.terminator: require_final_semicolon: true test_fail_multi_statement_no_trailing_semi_colon_custom_require_oneline: fail_str: | SELECT a FROM foo; SELECT b FROM bar fix_str: | SELECT a FROM foo; SELECT b FROM bar; configs: rules: convention.terminator: require_final_semicolon: true multiline_newline: true test_fail_multi_statement_no_trailing_semi_colon_custom_require_multiline: fail_str: | SELECT a FROM foo; SELECT b FROM bar fix_str: | SELECT a FROM foo ; SELECT b FROM bar ; configs: rules: convention.terminator: require_final_semicolon: true multiline_newline: true test_fail_space_semi_colon_default: fail_str: | SELECT a FROM foo ; fix_str: | SELECT a FROM foo; test_fail_newline_semi_colon_default: fail_str: | SELECT a FROM foo ; fix_str: | SELECT a FROM foo; test_pass_newline_semi_colon_custom_newline: pass_str: | SELECT a FROM foo ; configs: rules: convention.terminator: multiline_newline: true test_fail_multi_statement_semi_colon_default: fail_str: | SELECT a FROM foo ; SELECT b FROM bar ; fix_str: | SELECT a FROM foo; SELECT b FROM bar; test_fail_multi_statement_semi_colon_custom_require_multiline: fail_str: | SELECT a FROM foo ; SELECT b FROM bar ; fix_str: | SELECT a FROM foo ; SELECT b FROM bar ; configs: rules: convention.terminator: require_final_semicolon: true multiline_newline: true test_fail_multiple_newlines_semi_colon_custom_require_newline: fail_str: | SELECT a FROM foo ; fix_str: | SELECT a FROM foo ; configs: rules: convention.terminator: require_final_semicolon: true multiline_newline: true test_fail_final_semi_colon_same_line_inline_comment: fail_str: | SELECT a FROM foo -- inline comment fix_str: | SELECT a FROM foo; -- inline comment configs: rules: convention.terminator: require_final_semicolon: true test_fail_final_semi_colon_same_line_inline_comment_custom_oneline: fail_str: | SELECT a FROM foo -- inline comment fix_str: | SELECT a FROM foo; -- inline comment configs: rules: convention.terminator: require_final_semicolon: true multiline_newline: true test_fail_final_semi_colon_newline_inline_comment_custom_multiline: fail_str: | SELECT a FROM foo -- inline comment fix_str: | SELECT a FROM foo -- inline comment ; configs: rules: convention.terminator: require_final_semicolon: true multiline_newline: true test_fail_same_line_inline_comment: fail_str: | SELECT a FROM foo -- inline comment ; fix_str: | SELECT a FROM foo; -- inline comment test_fail_same_line_multiple_inline_comment: fail_str: | SELECT a FROM foo -- inline comment #1 -- inline comment #2 ; fix_str: | SELECT a FROM foo; -- inline comment #1 -- inline comment #2 test_pass_newline_inline_comment: pass_str: | SELECT a FROM foo -- inline comment ; configs: rules: convention.terminator: multiline_newline: true test_fail_newline_inline_comment: fail_str: | SELECT a FROM foo -- inline comment ; fix_str: | SELECT a FROM foo -- inline comment ; configs: rules: convention.terminator: multiline_newline: true test_fail_newline_multiple_inline_comments_custom_oneline: fail_str: | SELECT a FROM foo -- inline comment #1 -- inline comment #2 ; fix_str: | SELECT a FROM foo; -- inline comment #1 -- inline comment #2 configs: rules: convention.terminator: multiline_newline: true test_fail_newline_multiple_inline_comments_custom_multiline: fail_str: | SELECT a FROM foo -- inline comment #1 -- inline comment #2 ; fix_str: | SELECT a FROM foo -- inline comment #1 ; -- inline comment #2 configs: rules: convention.terminator: multiline_newline: true test_fail_newline_trailing_inline_comment: fail_str: | SELECT a FROM foo ; -- inline comment fix_str: | SELECT a FROM foo -- inline comment ; configs: rules: convention.terminator: multiline_newline: true test_fail_newline_preceding_block_comment_custom_oneline: fail_str: | SELECT foo FROM bar /* multiline comment */ ; fix_str: | SELECT foo FROM bar; /* multiline comment */ configs: rules: convention.terminator: multiline_newline: true test_fail_newline_preceding_block_comment_custom_multiline: fail_str: | SELECT foo FROM bar /* multiline comment */ ; fix_str: | SELECT foo FROM bar ; /* multiline comment */ configs: rules: convention.terminator: multiline_newline: true test_fail_newline_trailing_block_comment: fail_str: | SELECT foo FROM bar; /* multiline comment */ fix_str: | SELECT foo FROM bar ; /* multiline comment */ configs: rules: convention.terminator: multiline_newline: true test_fail_newline_block_comment_semi_colon_before: fail_str: | SELECT foo FROM bar; /* multiline comment */ fix_str: | SELECT foo FROM bar ; /* multiline comment */ configs: rules: convention.terminator: multiline_newline: true test_fail_newline_block_comment_semi_colon_after: fail_str: | SELECT foo FROM bar /* multiline comment */ ; fix_str: | SELECT foo FROM bar ; /* multiline comment */ configs: rules: convention.terminator: multiline_newline: true test_fail_newline_create_table: # https://github.com/sqlfluff/sqlfluff/issues/2268 fail_str: | CREATE TABLE my_table ( id INTEGER ); fix_str: | CREATE TABLE my_table ( id INTEGER ) ; configs: rules: convention.terminator: multiline_newline: true test_fail_newline_create_table_inline_comment: fail_str: | CREATE TABLE my_table ( id INTEGER ); --inline comment fix_str: | CREATE TABLE my_table ( id INTEGER ) --inline comment ; configs: rules: convention.terminator: multiline_newline: true test_fail_whitespace_after_simple_select: fail_str: | SELECT 1 ; fix_str: | SELECT 1; test_fail_whitespace_after_snowflake_set: fail_str: | SET foo = (SELECT foo FROM foo.foo) ; fix_str: | SET foo = (SELECT foo FROM foo.foo); configs: core: dialect: snowflake test_fail_templated_fix_crosses_block_boundary: # The rule wants to move the semicolon to the same line as the SELECT, but # the core linter prevents it because it crosses a template block boundary. fail_str: | {% if True %} SELECT 1 {% else %} SELECT 2 {% endif %} ; configs: rules: convention.terminator: require_final_semicolon: true test_pass_empty_file: pass_str: "" test_pass_empty_file_with_require_final_semicolon: pass_str: "" configs: rules: convention.terminator: require_final_semicolon: true test_pass_file_with_only_comments: pass_str: | -- just an empty file test_pass_file_with_only_comments_with_require_final_semicolon: pass_str: | -- just an empty file configs: rules: convention.terminator: require_final_semicolon: true sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/CV07.yml000066400000000000000000000035241503426445100235350ustar00rootroot00000000000000rule: CV07 test_pass_no_outer_brackets: pass_str: | SELECT foo FROM bar test_fail_outer_brackets: fail_str: | (SELECT foo FROM bar) fix_str: | SELECT foo FROM bar test_fail_outer_brackets_inner_subquery: fail_str: | (SELECT foo FROM (select * from bar)) fix_str: | SELECT foo FROM (select * from bar) test_pass_set_statement_brackets: pass_str: | (SELECT 1) UNION (SELECT 1) test_pass_no_outer_brackets_tsql: pass_str: | SELECT foo FROM bar configs: core: dialect: tsql test_fail_outer_brackets_tsql: fail_str: | (SELECT foo FROM bar) fix_str: | SELECT foo FROM bar configs: core: dialect: tsql test_fail_outer_brackets_inner_subquery_tsql: fail_str: | (SELECT foo FROM (select * from bar)) fix_str: | SELECT foo FROM (select * from bar) configs: core: dialect: tsql test_pass_begin_end_statement_brackets_tsql: pass_str: | BEGIN (SELECT 1) END configs: core: dialect: tsql test_fail_leading_trailing_whitespace: # This previously caused the post-fix parse check to fail. fail_str: "(\n SELECT\n foo,\n bar,\n baz\n FROM mycte2\n);\n" # Yes, the formatting looks bad, but that's because we're only running CV07 # here. In the real world, other rules will tidy up the formatting. fix_str: "\n SELECT\n foo,\n bar,\n baz\n FROM mycte2\n;\n" test_fail_leading_whitespace_and_comment: fail_str: "( -- This\n SELECT\n foo,\n bar,\n baz\n FROM mycte2\n)\n" # Yes, the formatting looks bad, but that's because we're only running CV07 # here. In the real world, other rules will tidy up the formatting. fix_str: " -- This\n SELECT\n foo,\n bar,\n baz\n FROM mycte2\n\n" sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/CV08.yml000066400000000000000000000053571503426445100235440ustar00rootroot00000000000000rule: CV08 test_fail_right_join: fail_str: | SELECT foo.col1, bar.col2 FROM foo RIGHT JOIN bar ON foo.bar_id = bar.id; test_pass_left_join: pass_str: | SELECT foo.col1, bar.col2 FROM bar LEFT JOIN foo ON foo.bar_id = bar.id; test_pass_inner_join: pass_str: | SELECT foo.col1, bar.col2 FROM bar INNER JOIN foo ON foo.bar_id = bar.id; test_fail_right_and_right_join: fail_str: | SELECT foo.col1, bar.col2, baz.col3 FROM foo RIGHT JOIN bar ON foo.bar_id = bar.id RIGHT JOIN baz ON foo.baz_id = baz.id; test_fail_right_and_left_join: fail_str: | SELECT foo.col1, bar.col2, baz.col3 FROM foo RIGHT JOIN bar ON foo.bar_id = bar.id LEFT JOIN baz ON foo.baz_id = baz.id; test_fail_right_and_inner_join: fail_str: | SELECT foo.col1, bar.col2, baz.col3 FROM foo RIGHT JOIN bar ON foo.bar_id = bar.id INNER JOIN baz ON foo.baz_id = baz.id; test_pass_left_inner_join: pass_str: | SELECT foo.col1, bar.col2, baz.col3 FROM bar LEFT JOIN foo ON foo.bar_id = bar.id INNER JOIN baz ON foo.baz_id = baz.id; test_fail_subquery_right_join: fail_str: | SELECT col1, col2 FROM ( SELECT foo.col1, bar.col2 FROM foo RIGHT JOIN bar ON foo.bar_id = bar.id ); test_pass_subquery_left_join: pass_str: | SELECT col1, col2 FROM ( SELECT foo.col1, bar.col2 FROM bar LEFT JOIN foo ON foo.bar_id = bar.id ); test_pass_subquery_inner_join: pass_str: | SELECT col1, col2 FROM ( SELECT foo.col1, bar.col2 FROM bar INNER JOIN foo ON foo.bar_id = bar.id ); test_fail_with_right_join: fail_str: | WITH cte AS ( SELECT foo.col1, bar.col2 FROM foo RIGHT JOIN bar ON foo.bar_id = bar.id ) SELECT col1, col2 FROM cte; test_pass_with_left_join: pass_str: | WITH cte AS ( SELECT foo.col1, bar.col2 FROM bar LEFT JOIN foo ON foo.bar_id = bar.id ) SELECT col1, col2 FROM cte; test_pass_with_inner_join: pass_str: | WITH cte AS ( SELECT foo.col1, bar.col2 FROM bar INNER JOIN foo ON foo.bar_id = bar.id ) SELECT col1, col2 FROM cte; sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/CV09.yml000066400000000000000000000120371503426445100235360ustar00rootroot00000000000000rule: CV09 test_pass_default_none: pass_str: | SELECT col1 FROM None test_fail_deny_word: fail_str: | SELECT col1 FROM deprecated_table configs: rules: convention.blocked_words: blocked_words: deprecated_table test_fail_deny_word_case_difference1: fail_str: | SELECT col1 FROM deprecated_table configs: rules: convention.blocked_words: blocked_words: Deprecated_Table test_fail_deny_word_case_difference2: fail_str: | SELECT col1 FROM Deprecated_Table configs: rules: convention.blocked_words: blocked_words: deprecated_table test_fail_multiple_deny_words1: fail_str: | SELECT myOldFunction(col1) FROM table1 configs: rules: convention.blocked_words: blocked_words: deprecated_table,myoldFunction test_fail_multiple_deny_words2: fail_str: | SELECT col1 FROM deprecated_table configs: rules: convention.blocked_words: blocked_words: deprecated_table,myoldFunction test_pass_not_complete_match: pass_str: | SELECT col1 FROM deprecated_table1 configs: rules: convention.blocked_words: blocked_words: deprecated_table test_pass_is_comment: pass_str: | -- deprecated_table SELECT col1 FROM new_table configs: rules: convention.blocked_words: blocked_words: deprecated_table test_pass_in_comment: pass_str: | -- This used to use the deprecated_table SELECT col1 FROM new_table configs: rules: convention.blocked_words: blocked_words: deprecated_table test_fail_bool: fail_str: | CREATE TABLE myschema.t1 (a BOOL); configs: core: dialect: exasol rules: convention.blocked_words: blocked_words: bool test_pass_bool: pass_str: | CREATE TABLE myschema.t1 (a BOOLEAN); configs: core: dialect: exasol rules: convention.blocked_words: blocked_words: bool test_pass_bigquery: pass_str: | SELECT * FROM `owner.schema.table_2022_07_01_desktop` configs: core: dialect: bigquery rules: convention.blocked_words: blocked_regex: ^.*(2022_06_01|2022_05_01).*$ test_fail_bigquery: fail_str: | SELECT * FROM `owner.schema.table_2022_06_01_desktop` configs: core: dialect: bigquery rules: convention.blocked_words: blocked_regex: ^.*(2022_06_01|2022_05_01).*$ test_fail_bigquery2: fail_str: | SELECT * FROM `owner.schema.table_2022_06_01_desktop` configs: core: dialect: bigquery rules: convention.blocked_words: blocked_regex: .*(2022_06_01|2022_05_01).* test_fail_bigquery3: fail_str: | SELECT * FROM `owner.schema.table_2022_06_01_desktop` configs: core: dialect: bigquery rules: convention.blocked_words: blocked_regex: (2022_06_01|2022_05_01) test_pass_comment_word1: pass_str: | SELECT * FROM table1 -- TABLESAMPLE SYSTEM (.05 PERCENT) configs: core: dialect: bigquery rules: convention.blocked_words: blocked_words: TABLESAMPLE test_pass_comment_word2: pass_str: | SELECT * FROM table1 # TABLESAMPLE SYSTEM (.05 PERCENT) configs: core: dialect: bigquery rules: convention.blocked_words: blocked_words: TABLESAMPLE test_pass_comment_word3: pass_str: | SELECT * FROM table1 /* TABLESAMPLE SYSTEM (.05 PERCENT) */ configs: core: dialect: bigquery rules: convention.blocked_words: blocked_words: TABLESAMPLE test_pass_comment_regex1: pass_str: | SELECT * FROM table1 -- TABLESAMPLE SYSTEM (.05 PERCENT) configs: core: dialect: bigquery rules: convention.blocked_words: blocked_regex: (TABLESAMPLE) test_pass_comment_regex2: pass_str: | SELECT * FROM table1 # TABLESAMPLE SYSTEM (.05 PERCENT) configs: core: dialect: bigquery rules: convention.blocked_words: blocked_regex: (TABLESAMPLE) test_pass_comment_regex3: pass_str: | SELECT * FROM table1 /* TABLESAMPLE SYSTEM (.05 PERCENT) */ configs: core: dialect: bigquery rules: convention.blocked_words: blocked_regex: (TABLESAMPLE) test_pass_match_source1: pass_str: | SELECT * FROM {{ ref('deprecated_table') }} configs: core: templater: jinja ignore_templated_areas: true rules: convention.blocked_words: blocked_regex: ref\('deprecated_ match_source: true test_pass_match_source2: pass_str: | SELECT * FROM {{ ref('deprecated_table') }} configs: core: templater: jinja ignore_templated_areas: false rules: convention.blocked_words: blocked_regex: ref\('deprecated_ match_source: false test_fail_match_source1: fail_str: | SELECT * FROM {{ ref('deprecated_table') }} configs: core: templater: jinja ignore_templated_areas: false rules: convention.blocked_words: blocked_regex: ref\('deprecated_ match_source: true sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/CV10.yml000066400000000000000000000224021503426445100235230ustar00rootroot00000000000000rule: CV10 test_fail_result_of_fix_is_valid_bigquery: fail_str: | SELECT "some string", 'some string' fix_str: | SELECT "some string", "some string" configs: core: dialect: bigquery test_fail_result_of_fix_is_valid_hive: fail_str: | SELECT "some string", 'some string' fix_str: | SELECT "some string", "some string" configs: core: dialect: hive test_fail_result_of_fix_is_valid_mysql: fail_str: | SELECT "some string", 'some string' fix_str: | SELECT "some string", "some string" configs: core: dialect: mysql test_fail_result_of_fix_is_valid_sparksql: fail_str: | SELECT "some string", 'some string' fix_str: | SELECT "some string", "some string" configs: core: dialect: sparksql test_pass_preferred_tripple_quotes: pass_str: | SELECT """some_string""" configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_fail_alternate_tripple_quotes: fail_str: | SELECT '''some_string''' fix_str: | SELECT """some_string""" configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_fail_unnecessary_escaping: fail_str: | SELECT 'unnecessary \"\"escaping', "unnecessary \'\'escaping" fix_str: | SELECT 'unnecessary ""escaping', "unnecessary ''escaping" configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_fail_bigquery_string_prefixes: fail_str: | SELECT r'some_string', b'some_string', R'some_string', B'some_string' fix_str: | SELECT r"some_string", b"some_string", R"some_string", B"some_string" configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_fail_bigquery_string_prefixes_when_style_is_consistent: fail_str: | SELECT r'some_string', b"some_string" fix_str: | SELECT r'some_string', b'some_string' configs: core: dialect: bigquery test_fail_tripple_quoted_strings_with_quotes_in_them: fail_str: | SELECT """Strings with "" in them""", '''Strings with "" in them''' fix_str: | SELECT """Strings with "" in them""", """Strings with "" in them""" configs: core: dialect: bigquery test_fail_tripple_quoted_strings_dont_remove_escapes_single_quotes: fail_str: | SELECT """Strings escaped quotes \" and \' in them""", '''Strings escaped quotes \" and \' in them''' fix_str: | SELECT '''Strings escaped quotes \" and \' in them''', '''Strings escaped quotes \" and \' in them''' configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: single_quotes test_fail_tripple_quoted_strings_dont_remove_escapes_double_quotes: fail_str: | SELECT """Strings escaped quotes \" and \' in them""", '''Strings escaped quotes \" and \' in them''' fix_str: | SELECT """Strings escaped quotes \" and \' in them""", """Strings escaped quotes \" and \' in them""" configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_fail_edge_case_tripple_quoted_string_ending_with_double_quote: # Test that a trailing preferred quote in triple quote scenario doesn't break fail_str: | SELECT '''Here's a "''', '''Here's a " ''' fix_str: | SELECT '''Here's a "''', """Here's a " """ configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_pass_lots_of_quotes: # Test that we can handle complex quoting scenarios pass_str: | SELECT '\\""', "\\''" configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_fail_lots_of_quotes: # Test that we can handle complex quoting scenarios fail_str: | SELECT 'Lots of \\\\\\\\\'quotes\'' fix_str: | SELECT "Lots of \\\\\\\\'quotes'" configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_fail_quote_replace_in_raw_strings: # Test that we can handle complex quoting scenarios fail_str: | SELECT r'Tricky "quote', r'Not-so-tricky \"quote' fix_str: | SELECT r'Tricky "quote', r"Not-so-tricky \"quote" configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_pass_dollar_quoted_strings_are_ignored: # Test that we don't interfere with dollar quoted strings pass_str: | SELECT 'some string', $$some_other_string$$ configs: core: dialect: postgres rules: convention.quoted_literals: force_enable: true preferred_quoted_literal_style: single_quotes test_pass_date_constructor_strings_are_ignored_1: # Test that we don't interfere with date constructor strings pass_str: | SELECT "quoted string", DATE'some string' test_pass_date_constructor_strings_are_ignored_2: # Test that we don't interfere with date constructor strings pass_str: | SELECT DATE'some string' configs: rules: convention.quoted_literals: force_enable: true preferred_quoted_literal_style: double_quotes test_pass_empty_string: pass_str: | SELECT "" configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_fail_empty_string: fail_str: | SELECT '' fix_str: | SELECT "" configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_pass_partially_templated_quoted_literals_simple: pass_str: | SELECT "{{ 'a string' }}" configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_fail_partially_templated_quoted_literals_simple: fail_str: | SELECT '{{ "a string" }}' configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_pass_partially_templated_quoted_literals_complex: pass_str: | SELECT "this_is_a_lintable_{{ 'string' }}" configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_fail_partially_templated_quoted_literals_complex: fail_str: | SELECT 'this_is_a_lintable_{{ "string" }}' configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_pass_partially_templated_quoted_literals_with_multiple_templates: pass_str: | SELECT "this_{{ 'is' }}_{{ 'a_lintable' }}_{{ 'string' }}" configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_fail_partially_templated_quoted_literals_with_multiple_templates: fail_str: | SELECT 'this_{{ "is" }}_{{ "a_lintable" }}_{{ "string" }}' configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_fail_partially_templated_quoted_literals_inside_blocks: fail_str: | SELECT {% if true %} '{{ "another_templated_string" }}' {% endif %} configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_pass_fully_templated_quoted_literals_are_ignored: pass_str: | SELECT {{ "'a_non_lintable_string'" }} configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_pass_partially_templated_literals_are_ignored_when_some_quotes_are_inside_the_template_1: pass_str: | SELECT '{{ "string' FROM table1" }} configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_pass_partially_templated_literals_are_ignored_when_some_quotes_are_inside_the_template_2: pass_str: | {{ "SELECT 'stri" -}}ng' FROM table1 configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_pass_prefix_chars_are_correctly_detected_as_unlintable: pass_str: | SELECT r{{ "''" }}, r{{ "'project' FROM table1" }} configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/CV11.yml000066400000000000000000000224571503426445100235360ustar00rootroot00000000000000rule: CV11 test_pass_cast: pass_str: | select cast(1 as varchar) as bar from foo; test_pass_casting_operator: pass_str: | select 1::varchar as bar from foo; test_pass_multi_casting_operator: pass_str: | select 1::int::varchar as bar from foo; test_pass_convert: pass_str: | select convert(varchar, 1) as bar from foo; test_pass_3_argument_convert: pass_str: | select convert(varchar, 1, 126) as bar from foo; # maybe someday we can have fixes for cast and convert with comments test_pass_convert_with_comment: pass_str: | select convert( -- convert the value int, /* to an integer */ 1) as bar; test_pass_cast_with_comment: pass_str: | select cast( -- cast the value 1 /* to an integer */ as int) as bar; test_fail_cast_with_comment_when_config_is_set_to_convert: fail_str: | select cast( -- cast the value 1 /* to an integer */ as int) as bar; configs: rules: convention.casting_style: preferred_type_casting_style: convert test_fail_cast_with_comment_when_config_is_set_to_shorthand: fail_str: | select cast( -- cast the value 1 /* to an integer */ as int) as bar; configs: rules: convention.casting_style: preferred_type_casting_style: shorthand test_fail_3_argument_convert_when_config_is_set_to_cast: fail_str: | select convert(varchar, 1, 126) as bar from foo; configs: rules: convention.casting_style: preferred_type_casting_style: cast test_fail_3_argument_convert_when_config_is_set_to_shorthand: fail_str: | select convert(varchar, 1, 126) as bar from foo; configs: rules: convention.casting_style: preferred_type_casting_style: shorthand test_fail_inconsistent_type_casting_prior_convert: fail_str: | select convert(int, 1) as bar, 100::int::text, cast(10 as text) as coo from foo; fix_str: | select convert(int, 1) as bar, convert(text, convert(int, 100)), convert(text, 10) as coo from foo; test_fail_inconsistent_type_casting_with_comment: fail_str: | select cast(10 as text) as coo, convert( -- Convert the value int, /* to an integer */ 1) as bar, 100::int::text from foo; fix_str: | select cast(10 as text) as coo, convert( -- Convert the value int, /* to an integer */ 1) as bar, cast(cast(100 as int) as text) from foo; test_fail_inconsistent_type_casting_prior_cast: fail_str: | select cast(10 as text) as coo, convert(int, 1) as bar, 100::int::text, from foo; fix_str: | select cast(10 as text) as coo, cast(1 as int) as bar, cast(cast(100 as int) as text), from foo; test_fail_inconsistent_type_casting_prior_cast_3_arguments_convert: fail_str: | select cast(10 as text) as coo, convert(int, 1, 126) as bar, 100::int::text from foo; fix_str: | select cast(10 as text) as coo, convert(int, 1, 126) as bar, cast(cast(100 as int) as text) from foo; test_fail_inconsistent_type_casting_prior_convert_cast_with_comment: fail_str: | select convert(int, 126) as bar, cast( 1 /* cast the value to an integer */ as int) as coo, 100::int::text from foo; fix_str: | select convert(int, 126) as bar, cast( 1 /* cast the value to an integer */ as int) as coo, convert(text, convert(int, 100)) from foo; test_fail_inconsistent_type_casting_prior_shorthand: fail_str: | select 100::int::text, cast(10 as text) as coo, convert(int, 1) as bar from foo; fix_str: | select 100::int::text, 10::text as coo, 1::int as bar from foo; test_fail_inconsistent_type_casting_prior_shorthand_3_arguments_convert: fail_str: | select 100::int::text, convert(int, 1, 126) as bar, cast(10 as text) as coo from foo; fix_str: | select 100::int::text, convert(int, 1, 126) as bar, 10::text as coo from foo; test_fail_inconsistent_type_casting_prior_shorthand_cast_with_comment: fail_str: | select 100::int::text, convert(int, 126) as bar, cast( 1 /* cast the value to an integer */ as int) as coo from foo; fix_str: | select 100::int::text, 126::int as bar, cast( 1 /* cast the value to an integer */ as int) as coo from foo; test_fail_inconsistent_type_casting_when_config_cast: fail_str: | select convert(int, 1) as bar, 100::int::text, cast(10 as text) as coo from foo; fix_str: | select cast(1 as int) as bar, cast(cast(100 as int) as text), cast(10 as text) as coo from foo; configs: rules: convention.casting_style: preferred_type_casting_style: cast test_fail_inconsistent_type_casting_3_arguments_convert_when_config_cast: fail_str: | select convert(int, 1, 126) as bar, 100::int::text, cast(10 as text) as coo from foo; fix_str: | select convert(int, 1, 126) as bar, cast(cast(100 as int) as text), cast(10 as text) as coo from foo; violations_after_fix: - code: CV11 description: Used type casting style is different from the preferred type casting style. name: "convention.casting_style" warning: false fixes: [] start_line_no: 2 start_line_pos: 5 start_file_pos: 11 end_line_no: 2 end_line_pos: 25 end_file_pos: 31 configs: rules: convention.casting_style: preferred_type_casting_style: cast test_fail_inconsistent_type_casting_when_config_convert: fail_str: | select convert(int, 1) as bar, 100::int::text, cast(10 as text) as coo from foo; fix_str: | select convert(int, 1) as bar, convert(text, convert(int, 100)), convert(text, 10) as coo from foo; configs: rules: convention.casting_style: preferred_type_casting_style: convert test_fail_inconsistent_type_casting_when_config_shorthand: fail_str: | select convert(int, 1) as bar, 100::int::text, cast(10 as text) as coo from foo; fix_str: | select 1::int as bar, 100::int::text, 10::text as coo from foo; configs: rules: convention.casting_style: preferred_type_casting_style: shorthand test_fail_inconsistent_type_casting_3_arguments_convert_when_config_shorthand: fail_str: | select convert(int, 1, 126) as bar, 100::int::text, cast(10 as text) as coo from foo; fix_str: | select convert(int, 1, 126) as bar, 100::int::text, 10::text as coo from foo; violations_after_fix: - code: CV11 description: Used type casting style is different from the preferred type casting style. name: "convention.casting_style" warning: false fixes: [] start_line_no: 2 start_line_pos: 5 start_file_pos: 11 end_line_no: 2 end_line_pos: 25 end_file_pos: 31 configs: rules: convention.casting_style: preferred_type_casting_style: shorthand test_pass_when_dialect_is_teradata: pass_str: | select convert(varchar, 1) as bar from foo; configs: core: dialect: teradata test_fail_parenthesize_expression_when_config_shorthand_from_cast: fail_str: | select id::int, cast(calendar_date||' 11:00:00' as timestamp) as calendar_datetime from foo; fix_str: | select id::int, (calendar_date||' 11:00:00')::timestamp as calendar_datetime from foo; configs: rules: convention.casting_style: preferred_type_casting_style: shorthand test_fail_parenthesize_expression_when_config_shorthand_from_convert: fail_str: | select id::int, convert(timestamp, calendar_date||' 11:00:00') as calendar_datetime from foo; fix_str: | select id::int, (calendar_date||' 11:00:00')::timestamp as calendar_datetime from foo; configs: rules: convention.casting_style: preferred_type_casting_style: shorthand test_fail_snowflake_semi_structured_cast_4453: # https://github.com/sqlfluff/sqlfluff/issues/4453 fail_str: | select (trim(value:Longitude::varchar))::double as longitude; select col:a.b:c::varchar as bar; fix_str: | select cast((trim(cast(value:Longitude as varchar))) as double) as longitude; select cast(col:a.b:c as varchar) as bar; configs: core: dialect: snowflake rules: convention.casting_style: preferred_type_casting_style: cast test_pass_macro_cast: # If a cast is in a templated section, it shouldn't # mean the ones in the file are inconsistent. pass_str: | {% macro cast_macro(col) %} CAST(col as int) {% endmacro %} select a, {{ cast_macro('b') }}, c::date as d from tbl sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/CV12.yml000066400000000000000000000061161503426445100235310ustar00rootroot00000000000000rule: CV12 test_noop_select_comma: # this syntax is currently not covered by CV12 pass_str: | SELECT a.*, b.* FROM a, b test_noop_no_where_clause: pass_str: | SELECT foo.a, bar.b FROM foo JOIN bar test_noop_delete: pass_str: | DELETE t1, t2 FROM t1 JOIN t2 JOIN t3 WHERE t1.id=t2.id AND t2.id=t3.id; configs: core: dialect: mysql test_noop_condition: pass_str: | SELECT foo.a, bar.b FROM foo LEFT JOIN bar ON foo.x = bar.x test_noop_crossjoin: pass_str: | SELECT foo.a, bar.b FROM foo CROSS JOIN bar WHERE bar.x > 3 test_noop_using: pass_str: | SELECT foo.id, bar.id FROM foo LEFT JOIN bar USING (id) configs: core: dialect: snowflake test_noop_global_cross_join_clickhouse: pass_str: | SELECT foo.a, bar.b FROM foo GLOBAL CROSS JOIN bar; configs: core: dialect: clickhouse test_noop_positional_join_duckdb: pass_str: | SELECT foo.a, bar.b FROM foo POSITIONAL JOIN bar WHERE bar.x > 3; configs: core: dialect: duckdb test_noop_outer_apply_tsql: pass_str: | SELECT foo.a, bar.b FROM foo OUTER APPLY bar(foo.a) AS bar WHERE bar.x > 3; configs: core: dialect: tsql test_fail_unqualified: # ambiguous, fail without fix fail_str: | SELECT foo.a, bar.b FROM foo JOIN bar WHERE a = b test_fail_missing_clause: fail_str: | SELECT foo.a, bar.b FROM foo JOIN bar WHERE foo.x = bar.y fix_str: | SELECT foo.a, bar.b FROM foo JOIN bar ON foo.x = bar.y test_fail_missing_clause_and_stmt: fail_str: | SELECT foo.a, bar.b FROM foo JOIN bar WHERE foo.x = bar.y AND foo.x = 3 fix_str: | SELECT foo.a, bar.b FROM foo JOIN bar ON foo.x = bar.y WHERE foo.x = 3 test_fail_missing_clause_and_stmt_bracketed: fail_str: | SELECT foo.a, bar.b FROM foo JOIN bar WHERE (foo.x = bar.y) AND foo.t = 3 fix_str: | SELECT foo.a, bar.b FROM foo JOIN bar ON (foo.x = bar.y) WHERE foo.t = 3 test_fail_missing_clause_two_joins: fail_str: | SELECT foo.a, bar.b FROM foo JOIN bar JOIN baz WHERE foo.x = bar.y AND foo.x = baz.t AND foo.c = 3 fix_str: | SELECT foo.a, bar.b FROM foo JOIN bar ON foo.x = bar.y JOIN baz ON foo.x = baz.t WHERE foo.c = 3 test_fail_not_simplifable: # Fail without fix fail_str: | SELECT foo.a, bar.b FROM foo JOIN bar WHERE (foo.x = bar.y) OR foo.x != foo.z test_fail_missing_clause_and_stmt_subquery: fail_str: | SELECT foo.a, bar.b FROM (SELECT a, x FROM t1) AS foo JOIN (SELECT b, y FROM t2) AS bar WHERE foo.x = bar.y AND foo.x = 3 fix_str: | SELECT foo.a, bar.b FROM (SELECT a, x FROM t1) AS foo JOIN (SELECT b, y FROM t2) AS bar ON foo.x = bar.y WHERE foo.x = 3 test_fail_missing_clause_and_stmt_qualified: fail_str: | SELECT foo.a, bar.b FROM schema.foo JOIN schema.bar WHERE schema.foo.x = schema.bar.y AND schema.foo.x = 3 fix_str: | SELECT foo.a, bar.b FROM schema.foo JOIN schema.bar ON schema.foo.x = schema.bar.y WHERE schema.foo.x = 3 test_fail_join_with_bracketed_join: fail_str: | SELECT * FROM bar JOIN ( foo1 JOIN foo2 ON (foo1.id = foo2.id) ) WHERE bar.id = foo1.id; sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/JJ01.yml000066400000000000000000000035221503426445100235200ustar00rootroot00000000000000rule: JJ01 test_simple: pass_str: SELECT 1 from {{ ref('foo') }} test_simple_modified: # Test that the plus/minus notation works fine. pass_str: SELECT 1 from {%+ if true -%} foo {%- endif %} test_simple_modified_fail: # Test that the plus/minus notation works fine. fail_str: SELECT 1 from {%+if true-%} {{ref('foo')}} {%-endif%} fix_str: SELECT 1 from {%+ if true -%} {{ ref('foo') }} {%- endif %} test_fail_jinja_tags_no_space: fail_str: SELECT 1 from {{ref('foo')}} fix_str: SELECT 1 from {{ ref('foo') }} test_fail_jinja_tags_multiple_spaces: fail_str: SELECT 1 from {{ ref('foo') }} fix_str: SELECT 1 from {{ ref('foo') }} test_fail_jinja_tags_no_space_2: fail_str: SELECT 1 from {{+ref('foo')-}} fix_str: SELECT 1 from {{+ ref('foo') -}} test_pass_newlines: # It's ok if there are newlines. pass_str: SELECT 1 from {{ ref('foo') }} test_fail_templated_segment_contains_leading_literal: fail_str: | SELECT user_id FROM `{{"gcp_project"}}.{{"dataset"}}.campaign_performance` fix_str: | SELECT user_id FROM `{{ "gcp_project" }}.{{ "dataset" }}.campaign_performance` configs: core: dialect: bigquery test_fail_segment_contains_multiple_templated_slices_last_one_bad: fail_str: CREATE TABLE `{{ "project" }}.{{ "dataset" }}.{{"table"}}` fix_str: CREATE TABLE `{{ "project" }}.{{ "dataset" }}.{{ "table" }}` configs: core: dialect: bigquery test_fail_jinja_tags_no_space_no_content: fail_str: SELECT {{""-}}1 fix_str: SELECT {{ "" -}}1 test_fail_jinja_tags_across_segment_boundaries: fail_str: SELECT a{{-"1 + b"}}2 fix_str: SELECT a{{- "1 + b" }}2 test_pass_python_templater: pass_str: SELECT * FROM hello.{my_table}; configs: core: templater: python templater: python: context: my_table: foo sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/LT01-alignment.yml000066400000000000000000000243761503426445100255220ustar00rootroot00000000000000rule: LT01 test_excess_space_without_align_alias: fail_str: | SELECT a AS first_column, b AS second_column, (a + b) / 2 AS third_column FROM foo fix_str: | SELECT a AS first_column, b AS second_column, (a + b) / 2 AS third_column FROM foo configs: # This is the default config but we're being explicit # here for testing. layout: type: alias_expression: spacing_before: single test_excess_space_with_align_alias: # NOTE: The config here shouldn't move the table alias fail_str: | SELECT a AS first_column, b AS second_column, (a + b) / 2 AS third_column FROM foo AS bar fix_str: | SELECT a AS first_column, b AS second_column, (a + b) / 2 AS third_column FROM foo AS bar configs: &align_alias layout: type: alias_expression: spacing_before: align align_within: select_clause align_scope: bracketed test_missing_keyword_with_align_alias: fail_str: | SELECT a first_column, b AS second_column, (a + b) / 2 AS third_column FROM foo fix_str: | SELECT a first_column, b AS second_column, (a + b) / 2 AS third_column FROM foo configs: *align_alias test_skip_alias_with_align_alias: fail_str: | SELECT a , b , (a + b) / 2 FROM foo fix_str: | SELECT a, b, (a + b) / 2 FROM foo configs: *align_alias test_excess_space_with_align_alias_wider: # NOTE: The config here SHOULD move the table alias # NOTE: The combined LT01 also fixes the missing space # between `USING` and `(a)`. fail_str: | SELECT a AS first_column, b AS second_column, (a + b) / 2 AS third_column FROM foo AS first_table JOIN my_tbl AS second_table USING(a) fix_str: | SELECT a AS first_column, b AS second_column, (a + b) / 2 AS third_column FROM foo AS first_table JOIN my_tbl AS second_table USING (a) configs: &align_alias_wider layout: type: alias_expression: spacing_before: align align_within: select_statement align_scope: bracketed test_align_alias_boundary: # The alias inside the expression shouldn't move. fail_str: | SELECT a AS first_column, (SELECT b AS c) AS second_column fix_str: | SELECT a AS first_column, (SELECT b AS c) AS second_column configs: *align_alias test_align_alias_inline_pass: # The aliases on the same line shouldn't panic. pass_str: SELECT a AS b, c AS d FROM tbl configs: *align_alias test_align_alias_inline_fail: # The aliases on the same line shouldn't panic. fail_str: SELECT a AS b , c AS d FROM tbl fix_str: SELECT a AS b, c AS d FROM tbl configs: *align_alias # Test the alignments also for the tsql = operator test_excess_space_without_align_alias_tsql: fail_str: | SELECT first_column = a, second_column = b, third_column = (a + b) / 2 FROM foo fix_str: | SELECT first_column = a, second_column = b, third_column = (a + b) / 2 FROM foo configs: # This is the default config but we're being explicit # here for testing. core: dialect: tsql layout: type: alias_operator: spacing_before: single test_excess_space_with_align_alias_tsql: # NOTE: The config here shouldn't move the table alias fail_str: | SELECT first_column = a, second_column = b, third_column = (a + b) / 2 FROM foo AS bar fix_str: | SELECT first_column = a, second_column = b, third_column = (a + b) / 2 FROM foo AS bar configs: &tsql_align_alias core: dialect: tsql layout: type: alias_operator: spacing_before: align align_within: select_clause align_scope: bracketed test_missing_keyword_with_align_alias_tsql: fail_str: | SELECT first_column a, second_column = b, third_column = (a + b) / 2 FROM foo fix_str: | SELECT first_column a, second_column = b, third_column = (a + b) / 2 FROM foo configs: *tsql_align_alias test_excess_space_with_align_alias_wider_tsql: # NOTE: The config here SHOULD move the table alias # NOTE: The combined LT01 also fixes the missing space # between `USING` and `(a)`. fail_str: | SELECT first_column = a, second_column = b, third_column = (a + b) / 2 FROM foo AS first_table JOIN my_tbl AS second_table USING(a) fix_str: | SELECT first_column = a, second_column = b, third_column = (a + b) / 2 FROM foo AS first_table JOIN my_tbl AS second_table USING (a) configs: &tsql_align_alias_wider core: dialect: tsql layout: type: alias_operator: spacing_before: align align_within: select_statement align_scope: bracketed test_align_alias_boundary_tsql: # The alias inside the expression shouldn't move. fail_str: | SELECT first_column = a, second_column = (SELECT b AS c) fix_str: | SELECT first_column = a, second_column = (SELECT b AS c) configs: *tsql_align_alias test_align_alias_inline_pass_tsql: # The aliases on the same line shouldn't panic. pass_str: SELECT a = b, c = d FROM tbl configs: *tsql_align_alias test_align_alias_inline_fail_tsql: # The aliases on the same line shouldn't panic. fail_str: SELECT a = b , c = d FROM tbl fix_str: SELECT a = b, c = d FROM tbl configs: *tsql_align_alias test_excess_space_with_align_alias_tsql_mixed: # NOTE: The config here shouldn't move the table alias fail_str: | SELECT a AS first_column, second_column = b, third_column_long_name = (a + b) / 2 FROM foo fix_str: | SELECT a AS first_column, second_column = b, third_column_long_name = (a + b) / 2 FROM foo configs: *tsql_align_alias test_align_multiple_a: # https://github.com/sqlfluff/sqlfluff/issues/4023 fail_str: | CREATE TABLE tbl ( foo VARCHAR(25) NOT NULL, barbar INT NULL ) fix_str: | CREATE TABLE tbl ( foo VARCHAR(25) NOT NULL, barbar INT NULL ) configs: layout: type: data_type: spacing_before: align align_within: create_table_statement column_constraint_segment: spacing_before: align align_within: create_table_statement test_align_multiple_b: # If there are multiple options on the same line, choose the first # to align with (i.e. `not null` rather than `unique`). # https://github.com/sqlfluff/sqlfluff/issues/4023 # https://github.com/sqlfluff/sqlfluff/pull/5238 fail_str: | create table tab ( foo varchar(25) not null, barbar int not null unique ) fix_str: | create table tab ( foo varchar(25) not null, barbar int not null unique ) configs: layout: type: data_type: spacing_before: align align_within: create_table_statement column_constraint_segment: spacing_before: align align_within: create_table_statement test_align_multiple_multiple_lines_a: # If there are multiple options on several lines, always choose the first # to align with, related to https://github.com/sqlfluff/sqlfluff/pull/5823 fail_str: | create table tab ( foo varchar(25) not null, barbar int not null unique, barbara varchar(5) not null primary key ) fix_str: | create table tab ( foo varchar(25) not null, barbar int not null unique, barbara varchar(5) not null primary key ) configs: layout: type: data_type: spacing_before: align align_within: create_table_statement column_constraint_segment: spacing_before: align align_within: create_table_statement test_align_multiple_multiple_lines_b: # If there are multiple options only on one of several lines, still align # correctly, related to https://github.com/sqlfluff/sqlfluff/pull/5823 pass_str: | CREATE TABLE foo ( x INT NOT NULL PRIMARY KEY, y INT NULL, z INT NULL ); configs: layout: type: data_type: spacing_before: align align_within: create_table_statement column_constraint_segment: spacing_before: align align_within: create_table_statement test_align_multiple_all: fail_str: | create table tab1 ( a varchar(25) not null, b int not null unique references tab2(b), c varchar(5) not null primary key references tab2(c) ) fix_str: | create table tab1 ( a varchar(25) not null, b int not null unique references tab2 (b), c varchar(5) not null primary key references tab2 (c) ) configs: layout: type: data_type: spacing_before: align align_within: create_table_statement column_constraint_segment: spacing_before: align align_within: create_table_statement test_align_multiple_operators: # Unrealistic but more stretching with several lines which # have different numbers of the same target. fail_str: | select 1+123+12345 as foo, 123456+1+12+56 as bar, 1234+123456789 as baz from t fix_str: | select 1 + 123 + 12345 as foo, 123456 + 1 + 12 + 56 as bar, 1234 + 123456789 as baz from t configs: layout: type: binary_operator: spacing_before: align align_within: select_statement sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/LT01-brackets.yml000066400000000000000000000031071503426445100253270ustar00rootroot00000000000000rule: LT01 test_pass_parenthesis_block_isolated: pass_str: | SELECT * FROM (SELECT 1 AS C1) AS T1; test_pass_parenthesis_block_isolated_template: pass_str: | {{ 'SELECT * FROM (SELECT 1 AS C1) AS T1;' }} configs: core: ignore_templated_areas: false test_fail_parenthesis_block_not_isolated: fail_str: | SELECT * FROM(SELECT 1 AS C1)AS T1; fix_str: | SELECT * FROM (SELECT 1 AS C1) AS T1; test_fail_parenthesis_block_not_isolated_templated: fail_str: | {{ 'SELECT * FROM(SELECT 1 AS C1)AS T1;' }} configs: core: ignore_templated_areas: false test_pass_parenthesis_function: pass_str: | SELECT foo(5) FROM T1; test_pass_snowflake_match_condition: pass_str: | select * from table1 asof join table2 match_condition(t1 > t2) on pk1 = pk2; configs: core: dialect: snowflake test_fail_snowflake_match_condition: fail_str: | select table1.pk1, table1.t1 from table1 asof join table2 match_condition (t1 > t2) on table1.pk1 = table2.pk2; fix_str: | select table1.pk1, table1.t1 from table1 asof join table2 match_condition(t1 > t2) on table1.pk1 = table2.pk2; configs: core: dialect: snowflake test_pass_ansi_bracketed_data_types: pass_str: | CREATE TABLE fractest (c1 TIME(2), c2 DATETIME(2), c3 TIMESTAMP(2)); test_pass_mysql_bracketed_data_types: pass_str: | CREATE TABLE fractest (c1 TIME(2), c2 DATETIME(2), c3 TIMESTAMP(2)); configs: core: dialect: mysql sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/LT01-commas.yml000066400000000000000000000022151503426445100250070ustar00rootroot00000000000000rule: LT01 test_fail_whitespace_before_comma: fail_str: SELECT 1 ,4 fix_str: SELECT 1, 4 test_fail_whitespace_before_comma_template: fail_str: | {{ 'SELECT 1 ,4' }} configs: core: ignore_templated_areas: false test_pass_errors_only_in_templated_and_ignore: pass_str: | {{ 'SELECT 1 ,4' }}, 5, 6 configs: core: ignore_templated_areas: true test_fail_errors_only_in_non_templated_and_ignore: fail_str: | {{ 'SELECT 1, 4' }}, 5 , 6 fix_str: | {{ 'SELECT 1, 4' }}, 5, 6 configs: core: ignore_templated_areas: true test_pass_single_whitespace_after_comma: pass_str: SELECT 1, 4 test_pass_single_whitespace_after_comma_template: pass_str: | {{ 'SELECT 1, 4' }} configs: core: ignore_templated_areas: false test_fail_multiple_whitespace_after_comma: fail_str: SELECT 1, 4 fix_str: SELECT 1, 4 test_fail_no_whitespace_after_comma: fail_str: SELECT 1,4 fix_str: SELECT 1, 4 test_fail_no_whitespace_after_comma_2: fail_str: SELECT FLOOR(dt) ,count(*) FROM test fix_str: SELECT FLOOR(dt), count(*) FROM test test_pass_bigquery_trailing_comma: pass_str: SELECT 1, 2, sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/LT01-excessive.yml000066400000000000000000000223111503426445100255250ustar00rootroot00000000000000rule: LT01 test_basic: pass_str: SELECT 1 test_basic_template: pass_str: | {{ 'SELECT 1' }} configs: core: ignore_templated_areas: false test_basic_fix: fail_str: SELECT 1 fix_str: SELECT 1 test_basic_fail_template: fail_str: | {{ 'SELECT 1' }} configs: core: ignore_templated_areas: false test_simple_fix: fail_str: | select 1 + 2 + 3 + 4 -- Comment from foo fix_str: | select 1 + 2 + 3 + 4 -- Comment from foo test_identifier_fix: fail_str: | SELECT [thistable] . [col] FROM [thisdatabase] . [thisschema] . [thistable] fix_str: | SELECT [thistable].[col] FROM [thisdatabase].[thisschema].[thistable] configs: core: dialect: tsql test_comparison_operator_fix: fail_str: | SELECT foo FROM bar WHERE baz > = 10; fix_str: | SELECT foo FROM bar WHERE baz >= 10; configs: core: dialect: tsql test_comparison_operator_pass: pass_str: | SELECT foo FROM bar WHERE baz >= 10; configs: core: dialect: tsql test_casting_operator_fix: fail_str: | SELECT '1' :: INT; fix_str: | SELECT '1'::INT; configs: core: dialect: postgres test_casting_operator_pass: pass_str: | SELECT '1'::INT; configs: core: dialect: postgres test_fix_tsql_spaced_chars: fail_str: | SELECT col1 FROM table1 WHERE 1 > = 1 fix_str: | SELECT col1 FROM table1 WHERE 1 >= 1 configs: core: dialect: tsql # Check CASE Statement parses with newlines properly # See https://github.com/sqlfluff/sqlfluff/issues/2495 test_pass_postgres_case_statement: pass_str: | SELECT a, CASE WHEN 1 THEN 'one' WHEN 2 THEN 'two' ELSE 'other' END AS b FROM test; configs: core: dialect: postgres test_excess_space_cast: fail_str: | select '1' :: INT as id1, '2'::int as id2 from table_a fix_str: | select '1'::INT as id1, '2'::int as id2 from table_a test_redshift_at_time_zone: pass_str: | SELECT date_w_tz[0] AT TIME ZONE 'Etc/UTC' AS bar FROM foo configs: core: dialect: redshift test_pass_snowflake_semi_structured: pass_str: "SELECT to_array(a.b:c) FROM d" configs: core: dialect: snowflake test_fail_snowflake_semi_structured_single: fail_str: | SELECT to_array(a.b : c) as d, e : f : g::string as h FROM j fix_str: | SELECT to_array(a.b:c) as d, e:f:g::string as h FROM j configs: core: dialect: snowflake test_fail_snowflake_semi_structured_multi: fail_str: | SELECT to_array(a.b : c) as d, e : f : g::string as h FROM j fix_str: | SELECT to_array(a.b:c) as d, e:f:g::string as h FROM j configs: core: dialect: snowflake test_pass_bigquery_specific: # Test a selection of bigquery specific spacings work. # Specifically EXCEPT & qualified functions. pass_str: | SELECT * EXCEPT (order_id); SELECT NET.HOST(LOWER(url)) AS host FROM urls; configs: core: dialect: bigquery test_pass_bigquery_specific_arrays_1: # An example of _no whitespace_ after an array type pass_str: | SELECT ARRAY[1, 2, 3] AS floats; configs: core: dialect: bigquery test_pass_bigquery_specific_arrays_2: # An example of _whitespace_ after an array type pass_str: | CREATE TEMPORARY FUNCTION DoSomething(param1 STRING, param2 STRING) RETURNS ARRAY LANGUAGE js AS """Some JS"""; SELECT DoSomething(col1) FROM table1 configs: core: dialect: bigquery test_pass_bigquery_array_function: # Test spacing of Array Generator function brackets pass_str: | SELECT ARRAY(SELECT 1 FROM table1); configs: core: dialect: bigquery test_pass_bigquery_specific_structs: # Test spacing of complex STRUCT brackets pass_str: | create table testing.array_struct_tbl ( address_array_of_nested_structs ARRAY, col2 STRING>> ) configs: core: dialect: bigquery test_pass_bigquery_specific_struct_access: # Test spacing of function access pass_str: | SELECT testFunction(a).b AS field, testFunction(a).* AS wildcard, testFunction(a).b.c AS field_with_field, testFunction(a).b.* AS field_with_wildcard, testFunction(a)[OFFSET(0)].* AS field_with_offset_wildcard, testFunction(a)[SAFE_OFFSET(0)].* AS field_with_safe_offset_wildcard, testFunction(a)[ORDINAL(1)].* AS field_with_ordinal_wildcard, testFunction(a)[ORDINAL(1)].a AS field_with_ordinal_field FROM table1 configs: core: dialect: bigquery test_pass_bigquery_struct_function_no_spaces: # Test struct function does not flag for missing spaces # e.g. doesn't flag `STRUCT()` as should be `STRUCT ()` pass_str: | SELECT TO_JSON(STRUCT()), TO_JSON(STRUCT(1, 2, 3)), STRUCT(1, 2, 3) FROM table1 configs: core: dialect: bigquery test_postgres_datatype: # https://github.com/sqlfluff/sqlfluff/issues/4521 # https://github.com/sqlfluff/sqlfluff/issues/4565 pass_str: | select 1::NUMERIC(3, 1), 2::double precision, '2020-01-01'::timestamp with time zone, 'foo'::character varying, B'10101'::bit(3), B'10101'::bit varying(3), B'10101'::bit varying configs: core: dialect: postgres test_redshift_datatype: pass_str: | select 1::NUMERIC(3, 1), 2::double precision, '2020-01-01'::timestamp with time zone, 'foo'::character varying, 'foo'::character varying(MAX), 'foo'::character varying(255), '10101'::binary varying(6) configs: core: dialect: redshift test_bigquery_datatype: pass_str: | select 1::NUMERIC(3, 1) configs: core: dialect: bigquery test_athena_datatype: pass_str: | select 1::DECIMAL(3, 1), 'foo'::VARCHAR(4), 'bar'::CHAR(3), col1::STRUCT, col2::ARRAY, '2020-01-01'::timestamp with time zone configs: core: dialect: athena test_hive_datatype: pass_str: | select 1::DECIMAL(3, 1), 1::DEC(3, 1), 1::NUMERIC(3, 1), col1::STRUCT, col2::ARRAY, col3::ARRAY[4] configs: core: dialect: hive test_sqlite_datatype: pass_str: | select 1::double precision, 1::DECIMAL(10, 5), 1::unsigned big int, 'foo'::varying character(255), 'foo'::character(20), 'foo'::nvarchar(200) configs: core: dialect: sqlite test_sparksql_datatype: pass_str: | select 1::DECIMAL(3, 1), 1::DEC(3, 1), 1::NUMERIC(3, 1), 'bar'::CHAR(3), col1::STRUCT, col2::ARRAY configs: core: dialect: sparksql test_exasol_datatype: pass_str: | select 1::double precision, 1::DECIMAL(3, 1), 1::NUMERIC(3, 1), 'bar'::VARCHAR(2000 CHAR), col1::INTERVAL DAY(2) TO SECOND(1) configs: core: dialect: exasol test_teradata_datatype: pass_str: | select 1::DECIMAL(3, 1), 1::DEC(3, 1), 1::NUMERIC(3, 1), 'bar'::CHAR(3) configs: core: dialect: teradata test_tsql_datatype: pass_str: | select 1::DECIMAL(3, 1), 1::DEC(3, 1), 1::NUMERIC(3, 1), 'bar'::character varying(3) configs: core: dialect: tsql test_snowflake_match_pattern: # Check that the spacing within the pattern isn't changed. # The MATCH_RECOGNIZE & PATTERN keywords however act as keywords and not as functions # therefore _should_ have a space after them. # See: https://docs.snowflake.com/en/sql-reference/constructs/match_recognize pass_str: | select * from stock_price_history match_recognize ( partition by company order by price_date measures match_number() as match_number, first(price_date) as start_date, last(price_date) as end_date, count(*) as rows_in_sequence, count(row_with_price_decrease.*) as num_decreases, count(row_with_price_increase.*) as num_increases one row per match after match skip to last row_with_price_increase pattern ((A | B){5} C+) define row_with_price_decrease as price < lag(price), row_with_price_increase as price > lag(price) ) order by company, match_number; configs: core: dialect: snowflake test_hive_set_statement: # This should use ColonDelimiter so it shouldn't have spacing around it. pass_str: | set hivevar:cat = "Chloe"; configs: core: dialect: hive test_spark_set_statement: pass_str: | SET -v; configs: core: dialect: sparksql test_clickhouse_system_path: # We shouldn't introduce extra spaces within the path. pass_str: | SYSTEM RELOAD MODEL /model/path; configs: core: dialect: clickhouse sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/LT01-functions.yml000066400000000000000000000013371503426445100255440ustar00rootroot00000000000000rule: LT01 test_pass_bare_function: pass_str: COMMENT ON FUNCTION x IS 'y'; configs: core: dialect: postgres test_fail_spaced_function: fail_str: COMMENT ON FUNCTION x (foo) IS 'y'; fix_str: COMMENT ON FUNCTION x(foo) IS 'y'; configs: core: dialect: postgres test_pass_rebalance_hint: pass_str: SELECT /*+ REBALANCE */ 1 AS a FROM xxx.yyy; configs: core: dialect: sparksql test_pass_drop_function_go: pass_str: | DROP FUNCTION IF EXISTS INTERNAL_ETL.DIL_md_ScdTest GO configs: core: dialect: tsql test_pass_select_hint: pass_str: | select /*+ repartition(200) */ one, two from mytable configs: core: dialect: sparksql sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/LT01-literals.yml000066400000000000000000000104331503426445100253500ustar00rootroot00000000000000rule: LT01 test_pass_simple_select: pass_str: "SELECT 'foo'" test_pass_expression: # Test that brackets don't trigger it pass_str: "SELECT ('foo' || 'bar') as buzz" test_fail_as: fail_str: | SELECT 'foo'AS bar FROM foo fix_str: | SELECT 'foo' AS bar FROM foo test_fail_expression: fail_str: "SELECT ('foo'||'bar') as buzz" fix_str: "SELECT ('foo' || 'bar') as buzz" test_pass_comma: pass_str: | SELECT col1, 'string literal' AS new_column_literal, CASE WHEN col2 IN ('a', 'b') THEN 'Y' ELSE 'N' END AS new_column_case FROM some_table WHERE col2 IN ('a', 'b', 'c', 'd'); test_pass_semicolon: pass_str: | ALTER SESSION SET TIMEZONE = 'UTC'; configs: core: dialect: snowflake test_pass_bigquery_udf_triple_single_quote: pass_str: | CREATE TEMPORARY FUNCTION a() LANGUAGE js AS ''' CODE GOES HERE '''; configs: core: dialect: bigquery test_pass_bigquery_udf_triple_double_quote: pass_str: | CREATE TEMPORARY FUNCTION a() LANGUAGE js AS """ CODE GOES HERE """; configs: core: dialect: bigquery test_pass_ansi_single_quote: pass_str: "SELECT a + 'b' + 'c' FROM tbl;" test_fail_ansi_single_quote: fail_str: "SELECT a +'b'+ 'c' FROM tbl;" fix_str: "SELECT a + 'b' + 'c' FROM tbl;" test_pass_tsql_unicode_single_quote: pass_str: "SELECT a + N'b' + N'c' FROM tbl;" configs: core: dialect: tsql test_fail_tsql_unicode_single_quote: fail_str: "SELECT a +N'b'+N'c' FROM tbl;" fix_str: "SELECT a + N'b' + N'c' FROM tbl;" configs: core: dialect: tsql test_fail_ansi_unicode_single_quote: fail_str: "SELECT a + N'b' + N'c' FROM tbl;" fix_str: "SELECT a + N 'b' + N 'c' FROM tbl;" configs: core: dialect: ansi test_pass_casting_expression: pass_str: "SELECT my_date = '2022-01-01'::DATE AS is_current FROM t;" test_fail_bigquery_casting: fail_str: "SELECT DATE'2007-01-01';" fix_str: "SELECT DATE '2007-01-01';" configs: core: dialect: bigquery test_fail_teradata_casting_type1: fail_str: "SELECT DATE'2007-01-01' AS the_date;" fix_str: "SELECT DATE '2007-01-01' AS the_date;" configs: core: dialect: teradata test_pass_teradata_casting_type2: fail_str: "SELECT '9999-12-31'(DATE);" fix_str: "SELECT '9999-12-31' (DATE);" configs: core: dialect: teradata test_pass_sparksql_ansi_interval_minus: pass_str: SELECT INTERVAL -'20 15:40:32.99899999' DAY TO SECOND AS col; configs: core: dialect: sparksql test_pass_sparksql_multi_units_interval_minus: pass_str: SELECT INTERVAL 2 HOUR -'3' MINUTE AS col; configs: core: dialect: sparksql test_fail_old_python_test: fail_str: SELECT a +'b'+'c' FROM tbl; fix_str: SELECT a + 'b' + 'c' FROM tbl; violations: - code: LT01 description: Expected single whitespace between binary operator '+' and quoted literal. name: layout.spacing warning: false start_file_pos: 10 start_line_no: 1 start_line_pos: 11 end_file_pos: 13 end_line_no: 1 end_line_pos: 14 fixes: - edit: ' ' end_file_pos: 10 end_line_no: 1 end_line_pos: 11 start_file_pos: 10 start_line_no: 1 start_line_pos: 11 type: create_after - code: LT01 description: Expected single whitespace between quoted literal and binary operator '+'. name: layout.spacing warning: false start_file_pos: 13 start_line_no: 1 start_line_pos: 14 end_file_pos: 14 end_line_no: 1 end_line_pos: 15 fixes: - edit: ' ' end_file_pos: 13 end_line_no: 1 end_line_pos: 14 start_file_pos: 13 start_line_no: 1 start_line_pos: 14 type: create_after - code: LT01 description: Expected single whitespace between binary operator '+' and quoted literal. name: layout.spacing warning: false start_file_pos: 14 start_line_no: 1 start_line_pos: 15 end_file_pos: 17 end_line_no: 1 end_line_pos: 18 fixes: - edit: ' ' end_file_pos: 14 end_line_no: 1 end_line_pos: 15 start_file_pos: 14 start_line_no: 1 start_line_pos: 15 type: create_after sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/LT01-missing.yml000066400000000000000000000026121503426445100252020ustar00rootroot00000000000000rule: LT01 test_fail_no_space_after_using_clause: fail_str: select * from a JOIN b USING(x) fix_str: select * from a JOIN b USING (x) test_pass_newline_after_using_clause: # Check LT01 passes if there's a newline between pass_str: | select * from a JOIN b USING (x) test_fail_cte_no_space_after_as: # Check fixing of single space rule when space is missing fail_str: WITH a AS(select 1) select * from a fix_str: WITH a AS (select 1) select * from a test_fail_multiple_spaces_after_as: # Check fixing of single space rule on multiple spaces fail_str: WITH a AS (select 1) select * from a fix_str: WITH a AS (select 1) select * from a test_fail_cte_newline_after_as: # Check fixing of replacing newline with space fail_str: | WITH a AS ( select 1 ) select * from a fix_str: | WITH a AS ( select 1 ) select * from a test_fail_cte_newline_and_spaces_after_as: # Check stripping newlines and extra whitespace fail_str: | WITH a AS ( select 1 ) select * from a fix_str: | WITH a AS ( select 1 ) select * from a test_pass_cte_comment_after_as: # https://github.com/sqlfluff/sqlfluff/issues/6125 # When handling the 'single:inline' constraint, # prohibit stripping newlines after comment segments. pass_str: | WITH a AS -- comment ( select 1 ) select * from a sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/LT01-operators.yml000066400000000000000000000060011503426445100255430ustar00rootroot00000000000000rule: LT01 test_pass_brackets: # Test that we don't fail * operators in brackets pass_str: "SELECT COUNT(*) FROM tbl\n\n" test_pass_expression: # Github Bug #207 pass_str: | select field, date(field_1) - date(field_2) as diff from table test_fail_expression: # Github Bug #207 fail_str: | select field, date(field_1)-date(field_2) as diff from table fix_str: | select field, date(field_1) - date(field_2) as diff from table # Check we don't get false alarms with newlines, or sign indicators # ------------------- test_pass_newline_1: pass_str: | SELECT 1 + 2 test_pass_newline_2: pass_str: | SELECT 1 + 2 test_pass_newline_£: pass_str: | SELECT 1 + 2 test_pass_sign_indicators: pass_str: SELECT 1, +2, -4 test_pass_tilde: pass_str: SELECT ~1 # ------------------- fail_simple: fail_str: "SELECT 1+2" fix_str: "SELECT 1 + 2" pass_bigquery_hyphen: # hyphenated table reference should not fail pass_str: SELECT col_foo FROM foo-bar.foo.bar configs: core: dialect: bigquery pass_sparksql_ansi_interval_minus: pass_str: SELECT INTERVAL -'20 15:40:32.99899999' DAY TO SECOND AS col; configs: core: dialect: sparksql test_pass_sparksql_multi_units_interval_minus: pass_str: SELECT INTERVAL -2 HOUR '3' MINUTE AS col; configs: core: dialect: sparksql pass_tsql_assignment_operator: # Test that we fix the outer whitespace but don't add any in between + and =. fail_str: SET @param1+=1 fix_str: SET @param1 += 1 configs: core: dialect: tsql pass_concat_string: pass_str: SELECT 'barry' || 'pollard' test_pass_placeholder_spacing: # Test for spacing issues around placeholders # https://github.com/sqlfluff/sqlfluff/issues/4253 pass_str: | {% set is_dev_environment = true %} SELECT * FROM table WHERE some_col IS TRUE {% if is_dev_environment %} AND created_at >= DATE_SUB(CURRENT_DATE, INTERVAL 7 DAY) {% else %} AND created_at >= DATE_SUB(CURRENT_DATE, INTERVAL 30 DAY) {% endif %} AND TRUE ; fail_bigquery_whitespaces_in_function_reference: fail_str: SELECT dataset . AddFourAndDivide(5, 10) fix_str: SELECT dataset.AddFourAndDivide(5, 10) configs: core: dialect: bigquery pass_bigquery_safe_prefix_function: # SAFE prefix to function calls should not fail pass_str: SELECT SAFE.STRING(JSON '1') configs: core: dialect: bigquery fail_bigquery_safe_prefix_function: # Check that additional whitespaces introduced by # https://github.com/sqlfluff/sqlfluff/issues/4645 # get fixed. fail_str: SELECT SAFE . STRING(JSON '1') fix_str: SELECT SAFE.STRING(JSON '1') configs: core: dialect: bigquery pass_sparksql_file_literal: pass_str: ADD JAR path/to/some.jar; configs: core: dialect: sparksql pass_sqlite_column_path_operator: pass_str: SELECT a -> 'b'; configs: core: dialect: sqlite sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/LT01-trailing.yml000066400000000000000000000014601503426445100253420ustar00rootroot00000000000000rule: LT01 test_fail_trailing_whitespace: fail_str: "SELECT 1 \n" fix_str: "SELECT 1\n" test_fail_trailing_whitespace_on_initial_blank_line: fail_str: " \nSELECT 1 \n" fix_str: "\nSELECT 1\n" test_pass_trailing_whitespace_before_template_code: pass_str: | SELECT {% for elem in ["a", "b"] %} {{ elem }}, {% endfor %} 0 test_fail_trailing_whitespace_and_whitespace_control: fail_str: "{%- set temp = 'temp' -%}\n\nSELECT\n 1, \n 2,\n" fix_str: "{%- set temp = 'temp' -%}\n\nSELECT\n 1,\n 2,\n" test_pass_macro_trailing: pass_str: | {% macro foo(bar) %} {{bar}} {% endmacro %} with base as ( select a, b, {{ foo(1) }} as c from tblb ) select * from tbl sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/LT02-indent.yml000066400000000000000000001463561503426445100250310ustar00rootroot00000000000000rule: LT02 test_fail_reindent_first_line_1: fail_str: " SELECT 1" fix_str: SELECT 1 violations: - code: LT02 description: First line should not be indented. name: layout.indent warning: false start_line_no: 1 start_line_pos: 1 start_file_pos: 0 end_line_no: 1 end_line_pos: 6 end_file_pos: 5 fixes: - edit: "" end_file_pos: 5 end_line_no: 1 end_line_pos: 6 start_file_pos: 0 start_line_no: 1 start_line_pos: 1 type: delete test_fail_reindent_first_line_2: # Github Bug #99. Python2 Issues with fixing LT02 fail_str: " select 1 from tbl;" fix_str: select 1 from tbl; test_pass_indentation_of_comments_1: # Github Bug #203 # Comments should be aligned to the following line. pass_str: | SELECT -- Compute the thing (a + b) AS c FROM acceptable_buckets test_pass_indentation_of_comments_2: # Comments should be aligned to the following line. pass_str: | SELECT user_id FROM age_data JOIN audience_size USING (user_id, list_id) -- We LEFT JOIN because blah LEFT JOIN verts USING (user_id) test_fail_tab_indentation: # Using tabs as indents works fail_str: | SELECT a, b FROM my_tbl fix_str: | SELECT a, b FROM my_tbl configs: indentation: indent_unit: tab violations: - code: LT02 description: Expected indent of 1 tabs. name: layout.indent warning: false start_line_no: 3 start_line_pos: 1 start_file_pos: 11 end_line_no: 3 end_line_pos: 2 end_file_pos: 12 fixes: - edit: "\n\t" end_file_pos: 11 end_line_no: 3 end_line_pos: 1 start_file_pos: 10 start_line_no: 2 start_line_pos: 4 type: replace test_pass_indented_joins_default: # Configurable indents work. # a) default pass_str: | SELECT a, b, c FROM my_tbl LEFT JOIN another_tbl USING(a) test_pass_indented_joins_false: # b) specific pass_str: | SELECT a, b, c FROM my_tbl LEFT JOIN another_tbl USING(a) configs: indentation: indented_joins: false test_pass_indented_joins_true: # c) specific True, but passing pass_str: | SELECT a, b, c FROM my_tbl LEFT JOIN another_tbl USING(a) configs: indentation: indented_joins: true test_fail_indented_joins_true_fix: # d) specific True, but failing fail_str: | SELECT a, b, c FROM my_tbl LEFT JOIN another_tbl USING(a) fix_str: | SELECT a, b, c FROM my_tbl LEFT JOIN another_tbl USING(a) configs: indentation: indented_joins: true violations: - code: LT02 description: Expected indent of 4 spaces. name: layout.indent warning: false start_line_no: 3 start_line_pos: 1 start_file_pos: 27 end_line_no: 3 end_line_pos: 5 end_file_pos: 31 fixes: - edit: "\n " end_file_pos: 27 end_line_no: 3 end_line_pos: 1 start_file_pos: 26 start_line_no: 2 start_line_pos: 12 type: replace test_fail_indented_joins_false_fix: # e) specific False, and failing fail_str: | SELECT a, b, c FROM my_tbl LEFT JOIN another_tbl USING(a) fix_str: | SELECT a, b, c FROM my_tbl LEFT JOIN another_tbl USING(a) configs: indentation: indented_joins: false test_pass_indented_using_on_default: # Configurable using_on indents work. # 2.a) default pass_str: | SELECT a, b, c FROM my_tbl LEFT JOIN another_tbl USING(a) test_pass_indented_using_on_true: # 2.b) specific pass_str: | SELECT a, b, c FROM my_tbl LEFT JOIN another_tbl USING(a) configs: indentation: indented_joins: true indented_using_on: true test_pass_indented_using_on_false: # 2.c) specific False, but passing pass_str: | SELECT a, b, c FROM my_tbl LEFT JOIN another_tbl USING(a) configs: indentation: indented_joins: true indented_using_on: false test_fail_indented_using_on_false: # 2.d) specific False, but failing fail_str: | SELECT a, b, c FROM my_tbl LEFT JOIN another_tbl USING(a) fix_str: | SELECT a, b, c FROM my_tbl LEFT JOIN another_tbl USING(a) configs: indentation: indented_joins: true indented_using_on: false test_fail_indented_joins_using_on_true: # 2.e) specific True, and failing fail_str: | SELECT a, b, c FROM my_tbl LEFT JOIN another_tbl USING(a) fix_str: | SELECT a, b, c FROM my_tbl LEFT JOIN another_tbl USING(a) configs: indentation: indented_joins: true indented_using_on: true test_fail_indented_joins_using_on_false: # 2.f) specific false for both, and failing fail_str: | SELECT a, b, c FROM my_tbl LEFT JOIN another_tbl USING(a) fix_str: | SELECT a, b, c FROM my_tbl LEFT JOIN another_tbl USING(a) configs: indentation: indented_joins: false indented_using_on: false test_fail_indented_using_on_merge_statment_default: # indented_using_on also covers MERGE INTO statements fail_str: | MERGE INTO t USING u ON t.a = u.b WHEN MATCHED THEN UPDATE SET a = 1 fix_str: | MERGE INTO t USING u ON t.a = u.b WHEN MATCHED THEN UPDATE SET a = 1 test_pass_indented_using_on_merge_statment_false: # indented_using_on also covers MERGE INTO statements pass_str: | MERGE INTO t USING u ON t.a = u.b WHEN MATCHED THEN UPDATE SET a = 1 configs: indentation: indented_using_on: false test_pass_indented_on_contents_default: # Test indented_on_contents when default (true) pass_str: | SELECT r.a, s.b FROM r JOIN s ON r.a = s.a AND true test_pass_indented_on_contents_true: # Test indented_on_contents when true (default) fail_str: | SELECT r.a, s.b FROM r JOIN s ON r.a = s.a AND true fix_str: | SELECT r.a, s.b FROM r JOIN s ON r.a = s.a AND true configs: indentation: indented_on_contents: true test_pass_indented_on_contents_false: # Test indented_on_contents when false (non-default) pass_str: | SELECT r.a, s.b FROM r JOIN s ON r.a = s.a AND true configs: indentation: indented_on_contents: false test_fail_indented_on_contents_default_fix_a: # Default config for indented_on_contents is true fail_str: | SELECT * FROM t1 JOIN t2 ON true AND true fix_str: | SELECT * FROM t1 JOIN t2 ON true AND true test_fail_indented_on_contents_default_fix_b: # Default config for indented_on_contents is true. # This is an alternate interpretation of untaken indents. fail_str: | SELECT * FROM t1 JOIN t2 ON true AND true fix_str: | SELECT * FROM t1 JOIN t2 ON true AND true test_fail_indented_on_contents_false_fix: fail_str: | SELECT t1.a, t2.b FROM t1 JOIN t2 ON true AND true fix_str: | SELECT t1.a, t2.b FROM t1 JOIN t2 ON true AND true configs: indentation: indented_on_contents: false test_pass_indented_from_with_comment: pass_str: | SELECT * FROM t1 -- Comment JOIN t2 USING (user_id) test_pass_ignored_comment: # Test ignoring comments entirely. # https://github.com/sqlfluff/sqlfluff/issues/3311 pass_str: | SELECT * FROM t1 -- Comment JOIN t2 USING (user_id) configs: indentation: ignore_comment_lines: true test_fail_indented_from_with_comment_alternate: # This shows the alternative position of comments being allowed. pass_str: | SELECT * FROM t1 -- Comment JOIN t2 USING (user_id) test_fail_indented_from_with_comment_fix: # This shows the fix still returns to the primary location. fail_str: | SELECT * FROM t1 -- Comment JOIN t2 USING (user_id) fix_str: | SELECT * FROM t1 -- Comment JOIN t2 USING (user_id) test_fail_indented_multi_line_comment: fail_str: | SELECT business_type, -- The following is the slope of the regression line. Note that CORR (which is the Pearson's correlation -- coefficient is symmetric in its arguments, but since STDDEV_POP(open_rate_su) appears in the customer_type FROM global_actions_states fix_str: | SELECT business_type, -- The following is the slope of the regression line. Note that CORR (which is the Pearson's correlation -- coefficient is symmetric in its arguments, but since STDDEV_POP(open_rate_su) appears in the customer_type FROM global_actions_states test_jinja_with_disbalanced_pairs: # The range(3) -%} results in swallowing the \n # N.B. The way LT02 handles this is questionable, # and this test seals in that behaviour. pass_str: | SELECT cohort_month {% for i in range(3) -%} , {{ i }} AS index_{{ i }} {% endfor -%} , TRUE AS overall FROM orders test_fail_attempted_hanger_fix: # Check messy hanger correction. fail_str: | SELECT coalesce(foo, bar) FROM tbl fix_str: | SELECT coalesce( foo, bar ) FROM tbl test_fail_possible_hanger_fix: # Same note as above, but with a messier example. fail_str: | SELECT coalesce(foo, bar) FROM tbl fix_str: | SELECT coalesce( foo, bar ) FROM tbl test_fail_consecutive_hangers: fail_str: | select * from foo where a like 'a%' and b like 'b%' and c like 'c%' and d like 'd%' and e like 'e%' and f like 'f%' fix_str: | select * from foo where a like 'a%' and b like 'b%' and c like 'c%' and d like 'd%' and e like 'e%' and f like 'f%' test_fail_consecutive_hangers_implicit: # NOTE: The allowed implicit indent in the WHERE clause, # but by default they're not enabled. fail_str: | select * from foo where a like 'a%' and b like 'b%' and c like 'c%' and d like 'd%' and e like 'e%' and f like 'f%' fix_str: | select * from foo where a like 'a%' and b like 'b%' and c like 'c%' and d like 'd%' and e like 'e%' and f like 'f%' configs: indentation: allow_implicit_indents: true test_fail_clean_reindent_fix: # A "clean" indent is where the previous line ends with an # indent token (as per this example). We should use the # default approach and indent by 1 step. # NOTE: That because the indent opened before "coalesce" # isn't closed before the end of the line, we force an # additional indent before it. fail_str: | SELECT coalesce( foo, bar) FROM tbl fix_str: | SELECT coalesce( foo, bar ) FROM tbl # https://github.com/sqlfluff/sqlfluff/issues/643 test_pass_indent_snowflake: pass_str: | with source_data as ( select * from {{ source('source_name', 'xxx_yyy_zzz') }} ) select * from source_data configs: core: dialect: snowflake # https://github.com/sqlfluff/sqlfluff/issues/643 test_pass_indent_indent_bigquery: pass_str: | with source_data as ( select * from {{ source('source_name', 'xxx_yyy_zzz') }} ) select * from source_data configs: core: dialect: bigquery test_jinja_indent_templated_table_name_a: fail_str: | -- This file combines product data from individual brands into a staging table {% for product in ['table1', 'table2'] %} SELECT brand, country_code, category, name, id FROM {{ product }} {% if not loop.last -%} UNION ALL {%- endif %} {% endfor %} fix_str: | -- This file combines product data from individual brands into a staging table {% for product in ['table1', 'table2'] %} SELECT brand, country_code, category, name, id FROM {{ product }} {% if not loop.last -%} UNION ALL {%- endif %} {% endfor %} # Like test_jinja_indent_1_a but "FROM" table not initially # indented. test_jinja_indent_templated_table_name_b: fail_str: | -- This file combines product data from individual brands into a staging table {% for product in ['table1', 'table2'] %} SELECT brand, country_code, category, name, id FROM {{ product }} {% if not loop.last -%} UNION ALL {%- endif %} {% endfor %} fix_str: | -- This file combines product data from individual brands into a staging table {% for product in ['table1', 'table2'] %} SELECT brand, country_code, category, name, id FROM {{ product }} {% if not loop.last -%} UNION ALL {%- endif %} {% endfor %} test_jinja_nested_blocks: fail_str: | WITH raw_effect_sizes AS ( SELECT {% for action in ['a'] %} {% if True %} rate_su_{{action}}, {% endif %} {% endfor %} ) SELECT 1 fix_str: | WITH raw_effect_sizes AS ( SELECT {% for action in ['a'] %} {% if True %} rate_su_{{action}}, {% endif %} {% endfor %} ) SELECT 1 # LIMIT, QUALIFY, and WINDOW both indent test_limit_and_qualify_and_window_indent: fail_str: | SELECT a, b FROM my_tbl QUALIFY 1 LIMIT 1 WINDOW some_window AS (PARTITION BY 1) fix_str: | SELECT a, b FROM my_tbl QUALIFY 1 LIMIT 1 WINDOW some_window AS (PARTITION BY 1) configs: core: dialect: bigquery # LIMIT, QUALIFY and WINDOW both acceptable on single line test_limit_and_qualify_and_window_single_line: pass_str: | SELECT a, b FROM my_tbl QUALIFY 1 LIMIT 1 WINDOW some_window AS (PARTITION BY 1) configs: core: dialect: bigquery # By default CTEs should not be indented test_pass_cte: pass_str: | WITH some_cte AS ( SELECT 1 FROM table1 ), some_other_cte AS ( SELECT 1 FROM table1 ) SELECT 1 FROM table1 configs: core: dialect: bigquery # CTEs can be configured to be indented test_fail_indented_cte: fail_str: | WITH some_cte AS ( SELECT 1 FROM table1 ), some_other_cte AS ( SELECT 1 FROM table1 ) SELECT 1 FROM table1 fix_str: | WITH some_cte AS ( SELECT 1 FROM table1 ), some_other_cte AS ( SELECT 1 FROM table1 ) SELECT 1 FROM table1 configs: core: dialect: bigquery indentation: indented_ctes: true # Exasol LUA script test_exasol_script: pass_str: | CREATE OR REPLACE LUA SCRIPT ASCRIPT (APARAM) RETURNS ROWCOUNT AS res = 1 suc = true if not suc then error("ERROR") end return res / configs: core: dialect: exasol test_pass_tsql_else_if: pass_str: | IF (1 > 1) PRINT 'A'; ELSE IF (2 > 2) PRINT 'B'; ELSE IF (3 > 3) PRINT 'C'; ELSE PRINT 'D'; configs: core: dialect: tsql test_fail_tsql_else_if: fail_str: | IF (1 > 1) PRINT 'A'; ELSE IF (2 > 2) PRINT 'B'; ELSE IF (3 > 3) PRINT 'C'; ELSE PRINT 'D'; fix_str: | IF (1 > 1) PRINT 'A'; ELSE IF (2 > 2) PRINT 'B'; ELSE IF (3 > 3) PRINT 'C'; ELSE PRINT 'D'; configs: core: dialect: tsql test_fail_tsql_else_if_successive: fail_str: | IF (1 > 1) PRINT 'A'; ELSE IF (2 > 2) PRINT 'B'; ELSE IF (3 > 3) PRINT 'C'; ELSE PRINT 'D'; fix_str: | IF (1 > 1) PRINT 'A'; ELSE IF (2 > 2) PRINT 'B'; ELSE IF (3 > 3) PRINT 'C'; ELSE PRINT 'D'; configs: core: dialect: tsql # TSQL function test_tsql_function: fail_str: | CREATE FUNCTION dbo.isoweek (@DATE datetime) RETURNS int WITH EXECUTE AS CALLER AS BEGIN DECLARE @ISOweek int; SET @ISOweek = DATEPART(wk, @DATE) + 1 - DATEPART(wk, CAST(DATEPART(yy, @DATE) AS char(4)) + '0104'); --Special cases Jan 1-3 may belong to the previous year IF (@ISOweek = 0) SET @ISOweek = dbo.ISOWEEK(CAST(DATEPART(yy, @DATE) - 1 AS char(4) ) + '12' + CAST(24 + DATEPART(day, @DATE) AS char(2))) + 1; --Special case Dec 29-31 may belong to the next year IF ((DATEPART(mm, @DATE) = 12) AND ((DATEPART(dd, @DATE) - DATEPART(dw, @DATE)) >= 28)) SET @ISOweek = 1; RETURN(@ISOweek); END; GO fix_str: | CREATE FUNCTION dbo.isoweek (@DATE datetime) RETURNS int WITH EXECUTE AS CALLER AS BEGIN DECLARE @ISOweek int; SET @ISOweek = DATEPART(wk, @DATE) + 1 - DATEPART(wk, CAST(DATEPART(yy, @DATE) AS char(4)) + '0104'); --Special cases Jan 1-3 may belong to the previous year IF (@ISOweek = 0) SET @ISOweek = dbo.ISOWEEK(CAST( DATEPART(yy, @DATE) - 1 AS char(4) ) + '12' + CAST(24 + DATEPART(day, @DATE) AS char(2))) + 1; --Special case Dec 29-31 may belong to the next year IF ( (DATEPART(mm, @DATE) = 12) AND ((DATEPART(dd, @DATE) - DATEPART(dw, @DATE)) >= 28) ) SET @ISOweek = 1; RETURN(@ISOweek); END; GO configs: core: dialect: tsql test_pass_ignore_templated_whitespace: pass_str: | SELECT c1, {{ " c2" }} FROM my_table configs: core: ignore_templated_areas: false test_fail_ignore_templated_whitespace_1: fail_str: | SELECT c1, d{{ " c2" }} FROM my_table fix_str: | SELECT c1, d{{ " c2" }} FROM my_table configs: core: ignore_templated_areas: false test_fail_ignore_templated_whitespace_2: fail_str: | SELECT c1, d{{ " c2" }} FROM my_table fix_str: | SELECT c1, d{{ " c2" }} FROM my_table configs: core: ignore_templated_areas: false test_fail_ignore_templated_whitespace_3: fail_str: | SELECT c1, d{{ " c2" }} FROM my_table fix_str: | SELECT c1, d{{ " c2" }} FROM my_table configs: core: ignore_templated_areas: false test_pass_ignore_templated_whitespace_4: # Note the newline after c2. This causes "AS other_id" to be on a different # line in templated space, but not raw space. LT02 should ignore lines like # this. pass_str: | SELECT c1, {{ " c2\n" }} AS other_id FROM my_table test_pass_ignore_templated_newline_not_last_line: pass_str: | select * from {{ "\n\nmy_table" }} inner join my_table2 using (id) test_pass_ignore_templated_newline_last_line: pass_str: | select * from {{ "\n\nmy_table" }} test_fail_fix_template_indentation_1: fail_str: | SELECT c1, {{ "c2" }} fix_str: | SELECT c1, {{ "c2" }} test_fail_fix_template_indentation_2: fail_str: | with first_join as ( select {{ "c1" }}, c2 from helper {{ "group by 1" }} ) select * from first_join fix_str: | with first_join as ( select {{ "c1" }}, c2 from helper {{ "group by 1" }} ) select * from first_join test_pass_tsql_update_indent: pass_str: | update Extracts.itt_parm_base set DateF = convert(varchar, @from_date, 112), DateT = convert(varchar, @to_date, 112) configs: core: dialect: tsql test_pass_tsql_declare_indent: fail_str: | DECLARE @prv_qtr_1st_dt DATETIME, @last_qtr INT, @last_qtr_first_mn INT, @last_qtr_yr INT; fix_str: | DECLARE @prv_qtr_1st_dt DATETIME, @last_qtr INT, @last_qtr_first_mn INT, @last_qtr_yr INT; configs: core: dialect: tsql test_pass_tsql_set_indent: pass_str: | SET @prv_qtr_1st_dt = CAST(@last_qtr_yr AS VARCHAR(4)) + '-' + CAST(@last_qtr_first_mn AS VARCHAR(2)) + '-01' configs: core: dialect: tsql test_pass_tsql_set_indent_multiple_params: pass_str: | SET @param1 = 1, @param2 = 2 configs: core: dialect: tsql test_pass_tsql_if_indent: pass_str: | IF 1 > 1 AND 2 < 2 SELECT 1; configs: core: dialect: tsql test_pass_exasol_func_indent: pass_str: | CREATE FUNCTION schem.func ( p1 VARCHAR(6), p2 VARCHAR(10) ) RETURN VARCHAR (20) IS res VARCHAR(20); BEGIN IF p1 IS NOT NULL AND p2 IS NOT NULL THEN IF p1 = 1 THEN res:= 'Hello World'; ELSE IF p2 = 3 THEN res:= 'ABC'; END IF; res:= 'WOHOOOO'; END IF; END IF; RETURN res; END schem.func; / configs: core: dialect: exasol test_fail_fix_exa_func_format: fail_str: | CREATE FUNCTION schem.func ( p1 VARCHAR(6) ) RETURN VARCHAR (20) IS res VARCHAR(20); BEGIN IF p1 = 1 THEN res:= 'Hello World'; END IF; RETURN res; END schem.func; / fix_str: | CREATE FUNCTION schem.func ( p1 VARCHAR(6) ) RETURN VARCHAR (20) IS res VARCHAR(20); BEGIN IF p1 = 1 THEN res:= 'Hello World'; END IF; RETURN res; END schem.func; / configs: core: dialect: exasol test_pass_tsql_index_indent: pass_str: | CREATE UNIQUE INDEX AK_UnitMeasure_Name ON Production.UnitMeasure(Name); configs: core: dialect: tsql test_pass_tsql_statistics_indent: pass_str: | CREATE STATISTICS [stat_ccode] ON [dbo].[CodeValues]([ccode]); configs: core: dialect: tsql test_fail_snowflake_merge_statement: fail_str: | merge into foo.bar as tgt using ( select foo::date as bar from foo.bar where split(foo, '|')[2] REGEXP '^\\d+\\-\\d+\\-\\d+ \\d+\\:\\d+$' OR foo IN ('BAR','FOO') ) as src on src.foo = tgt.foo when matched then update set tgt.foo = src.foo ; fix_str: | merge into foo.bar as tgt using ( select foo::date as bar from foo.bar where split(foo, '|')[2] REGEXP '^\\d+\\-\\d+\\-\\d+ \\d+\\:\\d+$' OR foo IN ('BAR','FOO') ) as src on src.foo = tgt.foo when matched then update set tgt.foo = src.foo ; configs: core: dialect: snowflake test_fail_hanging_indents_convert_to_normal_indent: # This takes advantage of new indent treatment in 2.0.x fail_str: | SELECT a.line + (a.with + a.hanging_indent) as actually_not_ok, FROM tbl as a fix_str: | SELECT a.line + ( a.with + a.hanging_indent ) as actually_not_ok, FROM tbl as a test_fail_hanging_indents_fix_mixed_indents: # The tab is removed. fail_str: | SELECT a.line + ( a.something_indented_well + least( a.good_example, -- there is a tab here a.bad_example, a.really_bad_example, a.nother_good_example ) ) as some_harder_problems FROM tbl as a fix_str: | SELECT a.line + ( a.something_indented_well + least( a.good_example, -- there is a tab here a.bad_example, a.really_bad_example, a.nother_good_example ) ) as some_harder_problems FROM tbl as a test_pass_indented_procedure_parameters: pass_str: | CREATE OR ALTER PROCEDURE some_procedure @param1 int AS SELECT * FROM dbo configs: core: dialect: tsql test_fail_unindented_procedure_parameters: fail_str: | CREATE OR ALTER PROCEDURE someOtherProcedure @param1 nvarchar(100), @param2 nvarchar(20) AS SELECT * FROM dbo fix_str: | CREATE OR ALTER PROCEDURE someOtherProcedure @param1 nvarchar(100), @param2 nvarchar(20) AS SELECT * FROM dbo configs: core: dialect: tsql test_tsql_bubble_up_newline_after_fix: # Tests issue 3303, where an LT02 fix leaves a newline as the final child # segment that has to be "bubbled up" two levels to avoid violating the # _is_code_or_meta() check in core/parser/segments/base.py. fail_str: | create procedure name as begin drop table if exists #something end fix_str: | create procedure name as begin drop table if exists #something end configs: core: dialect: tsql test_tsql_cross_apply_indentation: # Test for behavior in issue #3672 pass_str: | SELECT table1.col, table2.col FROM table1 CROSS APPLY ( VALUES ((1), (2)) ) AS table2(col) INNER JOIN table3 ON table1.col = table3.col; configs: core: dialect: tsql test_tsql_cross_join_indentation: # Test for behavior in issue #3672 pass_str: | SELECT table1.col, table2.col FROM table1 CROSS JOIN table2 INNER JOIN table3 ON table1.col = table3.col; configs: core: dialect: tsql test_tsql_nested_join: # Test for behavior prior to issue #3672 fail_str: | SELECT table1.col, table2.col FROM table1 INNER JOIN table2 INNER JOIN table3 ON table1.col = table2.col AND table1.col = table3.col; fix_str: | SELECT table1.col, table2.col FROM table1 INNER JOIN table2 INNER JOIN table3 ON table1.col = table2.col AND table1.col = table3.col; configs: core: dialect: tsql test_tsql_outer_apply_indentation: # Test for behavior in issue #3685 pass_str: | SELECT table1.* FROM table1 OUTER APPLY table2 INNER JOIN table3 ON table1.col = table3.col configs: core: dialect: tsql test_tsql_outer_apply_indentation_fix: # Test for behavior in issue #3685 fail_str: | SELECT table1.* FROM table1 OUTER APPLY table2 INNER JOIN table3 ON table1.col = table3.col fix_str: | SELECT table1.* FROM table1 OUTER APPLY table2 INNER JOIN table3 ON table1.col = table3.col configs: core: dialect: tsql test_fail_consuming_whitespace_a: # Test that this works even with tags which consume whitespace. fail_str: | {% for item in [1, 2] -%} SELECT * FROM some_table {{ 'UNION ALL\n' if not loop.last }} {%- endfor %} fix_str: | {% for item in [1, 2] -%} SELECT * FROM some_table {{ 'UNION ALL\n' if not loop.last }} {%- endfor %} test_fail_consuming_whitespace_b: # Additional test to make sure that crazy things don't happen # with the first newline. fail_str: | {% for item in [1, 2] -%} SELECT * FROM some_table {{ 'UNION ALL\n' if not loop.last }} {%- endfor %} fix_str: | {% for item in [1, 2] -%} SELECT * FROM some_table {{ 'UNION ALL\n' if not loop.last }} {%- endfor %} test_pass_consuming_whitespace_stable: # Test for stability in fixes with loops and consuming tags. # https://github.com/sqlfluff/sqlfluff/issues/3185 pass_str: | {% for item in [1, 2] -%} SELECT * FROM some_table {{ 'UNION ALL\n' if not loop.last }} {%- endfor %} test_fail_trailing_comments: # Additional test to make sure that crazy things don't happen # with the first newline. fail_str: | SELECT 1 -- foo -- bar fix_str: | SELECT 1 -- foo -- bar test_fail_case_statement: # Test for issue with case statement indentation: # https://github.com/sqlfluff/sqlfluff/issues/3836 fail_str: | SELECT foo , CASE WHEN 1 = 1 THEN 2 END AS example FROM tbl fix_str: | SELECT foo , CASE WHEN 1 = 1 THEN 2 END AS example FROM tbl configs: indentation: tab_space_size: 2 test_pass_templated_case_statement: # Test for template block in case statement indentation # https://github.com/sqlfluff/sqlfluff/issues/3988 pass_str: | {%- set json_keys = ["a", "b", "c"] -%} with dummy as ( select {% for json_key in json_keys -%} case when 1 = 1 {% if json_key in ["b"] %} then 0 {% else %} then 1 {% endif %} else null end as {{ json_key }}_suffix{% if not loop.last %}, {% endif %} {% endfor %} ) select * from dummy test_pass_jinja_tag_multiline: # Test that jinja block tags which contain newlines # aren't linted, because we can't reliably fix them. # The default fixing routine would only moving the # start of the tag, which is ok but potentially strange. # TODO: At some point we should find a better solution for # this. pass_str: | SELECT 1, {{ "my_jinja_tag_with_odd_indents" }}, 2, {% if True %} 3, -- NOTE: indented because within block {% endif %} 4 test_pass_trailing_inline_noqa: pass_str: | SELECT col1, col2 FROM table1 -- noqa: CV09 test_pass_implicit_indent: # Test for ImplicitIndent. # The theoretical indent between WHERE and "a" is implicit. pass_str: | SELECT * FROM foo WHERE a AND b configs: indentation: allow_implicit_indents: true test_fail_deny_implicit_indent: # Test for ImplicitIndent. # The theoretical indent between WHERE and "a" is implicit. fail_str: | SELECT * FROM foo WHERE a AND b fix_str: | SELECT * FROM foo WHERE a AND b configs: indentation: allow_implicit_indents: false test_pass_templated_newlines: # NOTE: The macro has many newlines in it, # and the calling of it is indented. Check that # this doesn't panic. pass_str: | {% macro my_macro() %} macro + with_newlines {% endmacro %} SELECT {{ my_macro() }} as awkward_indentation FROM foo test_fail_fix_beside_templated: # Check that templated code checks aren't too aggressive. # https://github.com/sqlfluff/sqlfluff/issues/4215 fail_str: | {% if False %} SELECT 1 {% else %} SELECT c FROM t WHERE c < 0 {% endif %} fix_str: | {% if False %} SELECT 1 {% else %} SELECT c FROM t WHERE c < 0 {% endif %} test_pass_block_comment: # Check that subsequent block comment lines are ok to be indented. # https://github.com/sqlfluff/sqlfluff/issues/4224 pass_str: | SELECT /* This comment is unusually indented - and contains - even more indents */ foo FROM bar test_fix_block_comment: # Check other comments are still fixed. # https://github.com/sqlfluff/sqlfluff/issues/4224 fail_str: | SELECT -- bad -- good foo, /* bad */ foo_bad, /* long comment which should keep indent - including this */ good_foo, /* and this this is ok this is NOT ok */ bar FROM tbl fix_str: | SELECT -- bad -- good foo, /* bad */ foo_bad, /* long comment which should keep indent - including this */ good_foo, /* and this this is ok this is NOT ok */ bar FROM tbl test_fail_case_else_end_clause: # Checks linting of missing newline in CASE statement. # More specifically this is a case of a multi-dedent # not being handled properly when one of the indents # it covers is taken, but the other is untaken. # https://github.com/sqlfluff/sqlfluff/issues/4222 fail_str: | select case when a then 'abc' when b then 'def' else 'ghi' end as field, bar from foo fix_str: | select case when a then 'abc' when b then 'def' else 'ghi' end as field, bar from foo test_fail_hard_templated_indents: # Test for consumed initial indents and consumed line indents. # https://github.com/sqlfluff/sqlfluff/issues/4230 # NOTE: We're using a block indentation indicator because the # test query has initial leading whitespace. # https://yaml.org/spec/1.2.2/#8111-block-indentation-indicator fail_str: |2 {%- if true -%} SELECT * FROM {{ "t1" }} {%- endif %} fix_str: |2 {%- if true -%} SELECT * FROM {{ "t1" }} {%- endif %} test_fail_fix_consistency_around_comments: # Check that comments don't make fixes inconsistent. # https://github.com/sqlfluff/sqlfluff/issues/4223 fail_str: | select case when a then b end as foo, case when a -- bar then b end as bar from c fix_str: | select case when a then b end as foo, case when a -- bar then b end as bar from c test_fail_coverage_indent_trough: # This test primarily tests the handling of closing trough indents fail_str: | WITH bar as (SELECT 1 FROM foo) SELECT a FROM bar fix_str: | WITH bar as ( SELECT 1 FROM foo ) SELECT a FROM bar test_pass_combined_comment_impulses: # This tests issue #4252 # https://github.com/sqlfluff/sqlfluff/issues/4252 pass_str: | WITH cte AS ( SELECT * FROM ( SELECT * FROM table WHERE NOT bool_column AND NOT bool_column AND some_column >= 1 -- This is a comment ) ), SELECT * FROM cte ; SELECT * FROM table3 ; test_indented_comment_tsql: # TSQL redefines the block_comment. This checks that is done correctly. # https://github.com/sqlfluff/sqlfluff/issues/4249 pass_str: | /* Author: tester Create date: 2021-03-16 */ SELECT 1 AS a configs: core: dialect: tsql test_pass_join_comment_indents_1: # https://github.com/sqlfluff/sqlfluff/issues/4291 pass_str: | select * from a left join b -- comment on (a.x = b.x) test_pass_join_comment_indents_2: # https://github.com/sqlfluff/sqlfluff/issues/4291 pass_str: | select * from a left join b -- comment on (a.x = b.x) test_comment_effect_indents_default: # https://github.com/sqlfluff/sqlfluff/issues/4294 fail_str: | SELECT * FROM table WHERE TRUE -- comment AND TRUE fix_str: | SELECT * FROM table WHERE TRUE -- comment AND TRUE test_comment_effect_indents_implicit: # https://github.com/sqlfluff/sqlfluff/issues/4294 fail_str: | SELECT * FROM table WHERE TRUE -- comment AND TRUE fix_str: | SELECT * FROM table WHERE TRUE -- comment AND TRUE configs: indentation: allow_implicit_indents: true test_untaken_negative_1: # https://github.com/sqlfluff/sqlfluff/issues/4234 fail_str: | CREATE TABLE mytable AS (SELECT id, user_id FROM another_table ) ; fix_str: | CREATE TABLE mytable AS ( SELECT id, user_id FROM another_table ) ; test_untaken_negative_2: # https://github.com/sqlfluff/sqlfluff/issues/4234 fail_str: | WITH m AS (SELECT firstCol , secondCol FROM dbo.myTable ) SELECT * FROM m fix_str: | WITH m AS ( SELECT firstCol , secondCol FROM dbo.myTable ) SELECT * FROM m test_untaken_negative_implicit: # NOTE: Check that implicit indents don't # apply before single brackets. pass_str: | SELECT * FROM foo WHERE ( a = b ) GROUP BY a configs: indentation: allow_implicit_indents: true test_fail_mixed_tabs_and_spaces: # NOTE: This used to be L002 (rather than L003) fail_str: "SELECT\n \t 1" fix_str: "SELECT\n 1" test_fix_implicit_indents_4467_a: # https://github.com/sqlfluff/sqlfluff/issues/4467 fail_str: | SELECT * FROM d LEFT JOIN l ON d.a = l.a AND d.b = l.b fix_str: | SELECT * FROM d LEFT JOIN l ON d.a = l.a AND d.b = l.b configs: indentation: allow_implicit_indents: true test_fix_implicit_indents_4467_b: # https://github.com/sqlfluff/sqlfluff/issues/4467 pass_str: | SELECT * FROM d LEFT JOIN l ON d.a = l.a AND d.b = l.b configs: indentation: allow_implicit_indents: true tab_space_size: 2 test_fix_macro_indents_4367: # https://github.com/sqlfluff/sqlfluff/issues/4367 fail_str: | {% macro my_macro(col) %} {{ col }} {% endmacro %} SELECT something, {{ my_macro("mycol") }}, something_else FROM mytable fix_str: | {% macro my_macro(col) %} {{ col }} {% endmacro %} SELECT something, {{ my_macro("mycol") }}, something_else FROM mytable test_fix_untaken_positive_4433: # https://github.com/sqlfluff/sqlfluff/issues/4433 fail_str: | CREATE TABLE mytable AS (SELECT id, user_id FROM another_table WHERE TRUE ) ; fix_str: | CREATE TABLE mytable AS ( SELECT id, user_id FROM another_table WHERE TRUE ) ; test_implicit_case_4542: # https://github.com/sqlfluff/sqlfluff/issues/4542 pass_str: | select a, case when b is null then 0 else 1 end as c from my_table; configs: indentation: allow_implicit_indents: true test_indented_joins_4484: # https://github.com/sqlfluff/sqlfluff/issues/4484 pass_str: | select * from table_1 inner join table_2 on table_1.key = table_2.key inner join table_3 on table_2.key = table_3.key configs: indentation: indented_joins: true test_tsql_where_implicit_4559: # https://github.com/sqlfluff/sqlfluff/issues/4559 pass_str: | SELECT t.col1 WHERE t.col2 = 'foo' AND t.col3 = 'bar' configs: core: dialect: tsql indentation: allow_implicit_indents: true test_jinja_nested_tracking: # This tests the caching features of BlockTracker # in the lexer. If that's not functioning properly # the indentation of the nested jinja blocks in this # query will likely fail. pass_str: | SELECT * FROM {% for action in ['a', 'b'] %} {% if loop.first %} {{action}}_var {% else %} JOIN {{action}}_var USING (c, d, e) {% endif %} {% endfor %} test_configure_no_indent_before_then_4589: # THEN can be configured to not be indented pass_str: | SELECT a, CASE WHEN b >= 42 THEN 1 ELSE 0 END AS c FROM some_table configs: core: dialect: ansi indentation: indented_then: false test_bigquery_insert_statement_values_clause: pass_str: | INSERT dataset.inventory (product, quantity) VALUES("top load washer", 10); configs: core: dialect: bigquery test_bigquery_merge_statement_values_clause: fail_str: | MERGE dataset.detailedinventory AS t USING dataset.inventory AS s ON t.product = s.product WHEN NOT MATCHED AND quantity < 20 THEN INSERT (product, quantity, supply_constrained) VALUES (product, quantity, TRUE) WHEN NOT MATCHED THEN INSERT (product, quantity, supply_constrained) VALUES (product, quantity, FALSE); fix_str: | MERGE dataset.detailedinventory AS t USING dataset.inventory AS s ON t.product = s.product WHEN NOT MATCHED AND quantity < 20 THEN INSERT (product, quantity, supply_constrained) VALUES (product, quantity, TRUE) WHEN NOT MATCHED THEN INSERT (product, quantity, supply_constrained) VALUES (product, quantity, FALSE); configs: core: dialect: bigquery test_fail_issue_4680: # NOTE: It doesn't reindent the second clause, but the important # thing is that we don't get an exception. fail_str: | SELECT col1 FROM table WHERE {% if true %} col1 > 1 {% else %} col1 > 0 {% endif %} fix_str: | SELECT col1 FROM table WHERE {% if true %} col1 > 1 {% else %} col1 > 0 {% endif %} test_implicit_indent_when: fail_str: | SELECT col1, CASE WHEN col2 = 1 THEN col2 + 1 END AS col2 FROM table1 fix_str: | SELECT col1, CASE WHEN col2 = 1 THEN col2 + 1 END AS col2 FROM table1 configs: indentation: allow_implicit_indents: true indented_then: false test_implicit_indent_nested_when: fail_str: | SELECT col1, CASE WHEN col2 = 1 THEN CASE WHEN col2 = 2 THEN col2 + 1 END END AS col2 FROM table1 fix_str: | SELECT col1, CASE WHEN col2 = 1 THEN CASE WHEN col2 = 2 THEN col2 + 1 END END AS col2 FROM table1 configs: indentation: allow_implicit_indents: true indented_then: false indented_then_contents: false test_fail_issue_4745: fail_str: | with {% for a in [1, 2, 3] %}{% for b in ['C'] %} {{ b }}_fill_{{ a }} as ( select * from data ), {% endfor %}{% endfor %} select 1 fix_str: | with {% for a in [1, 2, 3] %}{% for b in ['C'] %} {{ b }}_fill_{{ a }} as ( select * from data ), {% endfor %}{% endfor %} select 1 test_pass_trailing_comment_1: # NOTE: This checks that we allow the alternative placement of comments pass_str: | select bar -- comment from foo test_pass_trailing_comment_2: # NOTE: This checks that we allow the alternative placement of comments pass_str: | select bar /* comment with more lines */ from foo test_pass_issue_4582: # https://github.com/sqlfluff/sqlfluff/issues/4582 pass_str: | select a.col /* Multi line comment 1 */ from a /* Multi line comment 2 */ inner join b on a.id = b.id; select a.col /* Single line comment 1 */ from a /* Single line comment 2 */ inner join b on a.id = b.id test_pass_issue_4540: # https://github.com/sqlfluff/sqlfluff/issues/4540 pass_str: | with cte as ( select a from b qualify row_number() over ( partition by a ) = 1 ) select a from cte qualify row_number() over ( partition by a ) = 1; configs: core: dialect: snowflake test_pass_closed_bracketed_implicit: pass_str: | select * from a where (b = a) and (c = d) configs: indentation: allow_implicit_indents: true test_fix_unclosed_bracketed_implicit: fail_str: | select * from a where (b = a and c = d) fix_str: | select * from a where ( b = a and c = d ) configs: indentation: allow_implicit_indents: true test_pass_implicit_where: pass_str: | SELECT a FROM b WHERE c = d AND e = f ; configs: indentation: allow_implicit_indents: true test_pass_templated_join: # See: https://github.com/sqlfluff/sqlfluff/issues/5290 pass_str: | select * from a {% if True %} left join b using(x) {% endif %} test_whitespace_control_issue_5277: # https://github.com/sqlfluff/sqlfluff/issues/5277 fail_str: | WITH a AS ( SELECT * FROM tbl WHERE TRUE {% if True -%} AND b > (SELECT 1 FROM {{ this }}) {%- endif %} ) select * from a fix_str: | WITH a AS ( SELECT * FROM tbl WHERE TRUE {% if True -%} AND b > (SELECT 1 FROM {{ this }}) {%- endif %} ) select * from a test_inconsistent_indent: # In specific circumstances the indentation algorithm can behave # unexpectedly, this is a test case to catch one, where the # fix was unexpected. # https://github.com/sqlfluff/sqlfluff/issues/5277 fail_str: | WITH x AS ( SELECT o.p AS p {% for action in ["a", "b"] %} , n.campaign_count_{{ action }} {% endfor %} FROM o ) SELECT * FROM x fix_str: | WITH x AS ( SELECT o.p AS p {% for action in ["a", "b"] %} , n.campaign_count_{{ action }} {% endfor %} FROM o ) SELECT * FROM x test_pass_loop_indent_0: # NOTE: This biguqery test is designed to be a kind of base case # for the next few cases. Bigquery allows a trailing comma in a # select so is a good candidate for this. pass_str: | select {% for i in range(1, 3) %} 1, {% endfor %} from foo configs: core: dialect: bigquery test_pass_loop_indent_1: pass_str: | select {% for i in range(1, 3) %} {% for j in range(1, 3) %} 1 {% if not loop.last %},{% endif %} {% endfor %} {% if not loop.last %},{% endif %} {% endfor %} from foo test_pass_loop_indent_2: pass_str: | select {% for i in range(1, 3) %} 1 {% if not loop.last %},{% endif %} {% endfor %} from foo test_pass_loop_indent_3: pass_str: | select {% for i in range(1, 3) %} 1 {% if not loop.last %} , {% endif %} {% endfor %} from foo test_pass_loop_indent_4: pass_str: | select {% for i in range(1, 3) %} 1 {%- if not loop.last -%} , {%- endif -%} {% endfor %} from foo test_pass_loop_indent_5: pass_str: | select {% for i in range(1, 3) %} {% if not loop.first %},{% endif %} 1 {% endfor %} from foo test_pass_loop_indent_6: pass_str: | select {% for i in range(1, 3) %} {% if not loop.first %},{% endif %} {% for j in range(1, 3) %} {% if not loop.first %},{% endif %} 1 {% endfor %} {% endfor %} from foo test_fail_sqlite_update_returning: fail_str: | UPDATE foo SET updated = now() WHERE bar = $1 RETURNING updated; fix_str: | UPDATE foo SET updated = now() WHERE bar = $1 RETURNING updated; configs: core: dialect: sqlite test_fail_postgres_update_returning: fail_str: | UPDATE foo SET updated = now() WHERE bar = '' RETURNING updated; fix_str: | UPDATE foo SET updated = now() WHERE bar = '' RETURNING updated; configs: core: dialect: postgres test_fail_postgres_insert_returning: fail_str: | INSERT INTO foo (updated) VALUES (now()) RETURNING updated; fix_str: | INSERT INTO foo (updated) VALUES (now()) RETURNING updated; configs: core: dialect: postgres test_fail_postgres_on_conflict_returning: fail_str: | INSERT INTO foo ( id, bar ) VALUES ( $1, $2 ) ON CONFLICT (id) DO UPDATE SET bar = $2; fix_str: | INSERT INTO foo ( id, bar ) VALUES ( $1, $2 ) ON CONFLICT (id) DO UPDATE SET bar = $2; configs: core: dialect: postgres test_fail_ansi_update: fail_str: | UPDATE foo SET updated = now() WHERE bar = ''; fix_str: | UPDATE foo SET updated = now() WHERE bar = ''; configs: core: dialect: ansi test_fail_exasol_update: fail_str: | UPDATE foo SET updated = now() WHERE bar = ''; fix_str: | UPDATE foo SET updated = now() WHERE bar = ''; configs: core: dialect: exasol test_fail_mysql_update: fail_str: | UPDATE foo SET updated = now() WHERE bar = ''; fix_str: | UPDATE foo SET updated = now() WHERE bar = ''; configs: core: dialect: mysql test_fail_teradata_update: fail_str: | UPDATE foo SET updated = now() WHERE bar = ''; fix_str: | UPDATE foo SET updated = now() WHERE bar = ''; configs: core: dialect: teradata test_fail_tsql_update: fail_str: | UPDATE foo SET updated = now() WHERE bar = ''; fix_str: | UPDATE foo SET updated = now() WHERE bar = ''; configs: core: dialect: tsql test_jinja_value_error_6378: # https://github.com/sqlfluff/sqlfluff/issues/6378 pass_str: | CREATE OR REPLACE TRANSIENT TABLE TEST AS WITH abc AS ( SELECT spine.some_id AS some_id {% for cols in each_table_cols -%} {%- set src_loop = loop -%} {%- for c in cols -%} , t{{ src_loop.index }}."{{ c }}" {% endfor %} {%- endfor %} FROM tbl AS spine {% for t in tables %} LEFT JOIN {{ t }} AS t{{ loop.index }} ON spine.some_id = t{{ loop.index }}.some_id {% endfor %} ) SELECT * FROM abc; configs: core: templater: jinja indentation: template_blocks_indent: false templater: jinja: context: tables: ["tbl_a", "tbl_b", "tbl_c"] each_table_cols: [["aa", "ab", "ac"], ["ba", "bb", "bc"], ["ca", "cb", "cc"]] sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/LT02-tab-space.yml000066400000000000000000000030431503426445100253700ustar00rootroot00000000000000rule: LT02 spaces_pass_default: pass_str: "SELECT\n 1" spaces_fail: fail_str: "SELECT\n 1" fix_str: "SELECT\n\t1" configs: rules: indent_unit: tab spaces_fail_custom_tab_space_size: fail_str: "SELECT\n MAX(\n a\n )" fix_str: "SELECT\n\tMAX(\n\t\ta\n\t)" configs: rules: indent_unit: tab tab_space_size: 2 tabs_fail_default: fail_str: "SELECT\n\t\t1\n" fix_str: "SELECT\n 1\n" tabs_fail_default_set_tab_space_size: fail_str: "SELECT\n\t\t1\n" fix_str: "SELECT\n 1\n" configs: rules: tab_space_size: 2 tabs_pass: pass_str: "SELECT\n\t1" configs: rules: indent_unit: tab indented_comments: pass_str: | SELECT a, -- Some comment longer_col -- A lined up comment FROM spam indented_comments_default_config: fail_str: | SELECT a, -- Some comment longer_col -- A lined up comment FROM spam # The rule will only fix the indent before the select targets. # Here tab indent is replaced with spaces. fix_str: | SELECT a, -- Some comment longer_col -- A lined up comment FROM spam indented_comments_tab_config: fail_str: | SELECT a, -- Some comment longer_col -- A lined up comment FROM spam # The rule will only fix the indent before the select targets. # Here spaces indent is replaced with tab. fix_str: | SELECT a, -- Some comment longer_col -- A lined up comment FROM spam configs: rules: indent_unit: tab sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/LT03.yml000066400000000000000000000107001503426445100235320ustar00rootroot00000000000000rule: LT03 passes_on_before_default: pass_str: | select a + b from foo fails_on_after_default: fail_str: | select a + b from foo fix_str: | select a + b from foo fails_on_after_default_on_and: fail_str: | select a AND b from foo fix_str: | select a AND b from foo fails_on_after_with_comment_order_preserved: fail_str: | select a AND -- comment1! -- comment2! b from foo fix_str: | select a -- comment1! -- comment2! AND b from foo passes_on_before_explicit: pass_str: | select a + b from foo configs: &operator_after layout: type: binary_operator: line_position: leading comparison_operator: line_position: leading fails_on_after_explicit: fail_str: | select a + b from foo fix_str: | select a + b from foo configs: *operator_after fails_on_after_multi_explicit: fail_str: | SELECT * FROM foo WHERE (g > z) AND ( (a = 'a' AND b = 'b') OR (a = 'a' AND b = 'e') OR (a = 'c' AND b = 'g') OR (a = 'c' AND b = 'e') OR 1 = 1 ); fix_str: | SELECT * FROM foo WHERE (g > z) AND ( (a = 'a' AND b = 'b') OR (a = 'a' AND b = 'e') OR (a = 'c' AND b = 'g') OR (a = 'c' AND b = 'e') OR 1 = 1 ); configs: *operator_after fails_on_before_override: fail_str: | select a + b from foo fix_str: | select a + b from foo configs: &operator_before layout: type: binary_operator: line_position: trailing comparison_operator: line_position: trailing passes_on_after_override: pass_str: | select a + b from foo configs: *operator_before fails_on_before_override_with_comment_order: fail_str: | select a -- comment1! -- comment2! -- comment3! + b from foo fix_str: | select a + -- comment1! -- comment2! -- comment3! b from foo configs: *operator_before fails_on_after_override_with_comment_order: fail_str: | select a + -- comment1! -- comment2! -- comment3! b from foo fix_str: | select a -- comment1! -- comment2! -- comment3! + b from foo configs: *operator_after # Fix the different variations of problematic comments in a leading case. fails_leading_with_comments: fail_str: | SELECT 1 + /* foo */ 2, 1 + -- foo 2, 1 /* foo */ + 2, 1 /* foo */ + -- foo /* foo */ 2 fix_str: | SELECT 1 /* foo */ + 2, 1 -- foo + 2, 1 /* foo */ + 2, 1 /* foo */ -- foo /* foo */ + 2 configs: *operator_after # Fix the different variations of problematic comments in a trailing case. fails_trailing_with_comments: fail_str: | SELECT 1 + /* foo */ 2, 1 -- foo + 2, 1 /* foo */ + 2, 1 -- foo /* foo */ + /* foo */ 2 fix_str: | SELECT 1 + /* foo */ 2, 1 + -- foo 2, 1 + /* foo */ 2, 1 + -- foo /* foo */ /* foo */ 2 configs: *operator_before passes_templated_newline: pass_str: | {% macro binary_literal(expression) %} X'{{ expression }}' {% endmacro %} select * from my_table where a = {{ binary_literal("0000") }} fails_templated_code_non_templated_newline: fail_str: | {% macro binary_literal(expression) %} X'{{ expression }}' {% endmacro %} select * from my_table where a = {{ binary_literal("0000") }} passes_operator_alone_on_line: # Special case: An operator on a line by itself is always okay. pass_str: | SELECT 'asdf' || 'jklm' fixes_tuple_error_issue: # https://github.com/sqlfluff/sqlfluff/issues/4184 # NB: This one isn't fixable. fail_str: | select * from foo where c is not null and -- comment {% if true -%}a >= b and -- comment. {% endif %} true configs: indentation: template_blocks_indent: false sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/LT04.yml000066400000000000000000000176531503426445100235510ustar00rootroot00000000000000rule: LT04 leading_comma_violations: fail_str: | SELECT a , b FROM c fix_str: | SELECT a, b FROM c leading_comma_violation_with_inline_comment: fail_str: | SELECT a , b -- inline comment , c /* non inline comment */ , d FROM e fix_str: | SELECT a, b, -- inline comment c, /* non inline comment */ d FROM e leading_commas_allowed: pass_str: | SELECT a , b FROM c configs: layout: type: comma: line_position: leading leading_commas_allowed_with_header: pass_str: | SELECT a , b FROM c configs: layout: type: comma: line_position: leading leading_comma_violations_in_with_statement: fail_str: | WITH cte_1 as ( SELECT * FROM table_1 ) , cte_2 as ( SELECT * FROM table_2 ) SELECT * FROM table_3 fix_str: | WITH cte_1 as ( SELECT * FROM table_1 ), cte_2 as ( SELECT * FROM table_2 ) SELECT * FROM table_3 leading_commas_allowed_in_with_statement: pass_str: | WITH cte_1 as ( SELECT * FROM table_1 ) , cte_2 as ( SELECT * FROM table_2 ) SELECT * FROM table_3 configs: layout: type: comma: line_position: leading trailing_comma_violations: fail_str: | SELECT a, b FROM c fix_str: | SELECT a , b FROM c configs: layout: type: comma: line_position: leading trailing_commas_allowed: pass_str: | SELECT a, b FROM c trailing_comma_fixing_removes_extra_whitespace: fail_str: | SELECT field_1 , field_2 ,field_3, field_4, field_5 FROM a fix_str: | SELECT field_1, field_2, field_3, field_4, field_5 FROM a leading_comma_fixing_flows_around_comments: fail_str: | SELECT a.foo -- another comment , a.baz, -- another comment a.bar, -- This is an important comment with awkward line spacing a.foobar /* Which might also be followed by a multiline one */ , a.barfoo FROM a WHERE a.field4 in ( '1', '2', '3' ,'4' ) fix_str: | SELECT a.foo -- another comment , a.baz -- another comment , a.bar -- This is an important comment with awkward line spacing , a.foobar /* Which might also be followed by a multiline one */ , a.barfoo FROM a WHERE a.field4 in ( '1' , '2' , '3' ,'4' ) configs: layout: type: comma: line_position: leading trailing_comma_move_past_several_comment_lines: fail_str: | SELECT COUNT(1) AS campaign_count, state_user_v_peer_open ,business_type -- The following is the slope of the regression line. Note that CORR (which is the Pearson's correlation -- coefficient is symmetric in its arguments, but since STDDEV_POP(open_rate_su) appears in the -- numerator this is the slope of the regression line considering STDDEV_POP(open_rate_su) to be -- the "y variable" and uses_small_subject_line to be the "x variable" in terms of the regression line. ,SAFE_DIVIDE(SAFE_MULTIPLY(CORR(open_rate_su, uses_small_subject_line), STDDEV_POP(open_rate_su)), STDDEV_POP(uses_small_subject_line)) FROM global_actions_states fix_str: | SELECT COUNT(1) AS campaign_count, state_user_v_peer_open, business_type, -- The following is the slope of the regression line. Note that CORR (which is the Pearson's correlation -- coefficient is symmetric in its arguments, but since STDDEV_POP(open_rate_su) appears in the -- numerator this is the slope of the regression line considering STDDEV_POP(open_rate_su) to be -- the "y variable" and uses_small_subject_line to be the "x variable" in terms of the regression line. SAFE_DIVIDE(SAFE_MULTIPLY(CORR(open_rate_su, uses_small_subject_line), STDDEV_POP(open_rate_su)), STDDEV_POP(uses_small_subject_line)) FROM global_actions_states configs: core: # Set runaway_limit=2 to verify the fix only requires one pass. In an # earlier version, the comma before "SAFE_DIVIDE()" was being moved one # line per pass. Too lazy! runaway_limit: 2 leading_comma_move_past_several_comment_lines: fail_str: | SELECT COUNT(1) AS campaign_count ,state_user_v_peer_open, business_type, -- The following is the slope of the regression line. Note that CORR (which is the Pearson's correlation -- coefficient is symmetric in its arguments, but since STDDEV_POP(open_rate_su) appears in the -- numerator this is the slope of the regression line considering STDDEV_POP(open_rate_su) to be -- the "y variable" and uses_small_subject_line to be the "x variable" in terms of the regression line. SAFE_DIVIDE(SAFE_MULTIPLY(CORR(open_rate_su, uses_small_subject_line), STDDEV_POP(open_rate_su)), STDDEV_POP(uses_small_subject_line)) FROM global_actions_states fix_str: | SELECT COUNT(1) AS campaign_count ,state_user_v_peer_open , business_type -- The following is the slope of the regression line. Note that CORR (which is the Pearson's correlation -- coefficient is symmetric in its arguments, but since STDDEV_POP(open_rate_su) appears in the -- numerator this is the slope of the regression line considering STDDEV_POP(open_rate_su) to be -- the "y variable" and uses_small_subject_line to be the "x variable" in terms of the regression line. , SAFE_DIVIDE(SAFE_MULTIPLY(CORR(open_rate_su, uses_small_subject_line), STDDEV_POP(open_rate_su)), STDDEV_POP(uses_small_subject_line)) FROM global_actions_states configs: core: # Set runaway_limit=2 to verify the fix only requires one pass. In an # earlier version for the trailing comma case, commas were being moved # "through" comment blocks one line per pass. Too lazy! runaway_limit: 2 layout: type: comma: line_position: leading leading_comma_with_templated_column_1: fail_str: | SELECT c1, {{ "c2" }} AS days_since FROM logs fix_str: | SELECT c1 , {{ "c2" }} AS days_since FROM logs configs: layout: type: comma: line_position: leading leading_comma_with_templated_column_2: pass_str: | SELECT c1 , {{ " c2" }} AS days_since FROM logs configs: layout: type: comma: line_position: leading trailing_comma_with_templated_column_1: fail_str: | SELECT {{ "c1" }} , c2 AS days_since FROM logs fix_str: | SELECT {{ "c1" }}, c2 AS days_since FROM logs trailing_comma_with_templated_column_2: pass_str: | SELECT {{ "c1 " }}, c2 AS days_since FROM logs leading_comma_fix_mixed_indent: # See: https://github.com/sqlfluff/sqlfluff/issues/4255 # NOTE: Undisturbed mixed indent. fail_str: | select B ,C from A fix_str: | select B, C from A configs: layout: type: comma: line_position: trailing trailing_comma_fix_mixed_indent: # See: https://github.com/sqlfluff/sqlfluff/issues/4255 # NOTE: Undisturbed mixed indent. fail_str: | select B, C from A fix_str: | select B , C from A configs: layout: type: comma: line_position: leading trailing_comma_no_space_comment: fail_str: | SELECT a--comment , b FROM t fix_str: | SELECT a,--comment b FROM t sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/LT05.yml000066400000000000000000000574501503426445100235510ustar00rootroot00000000000000rule: LT05 test_pass_line_too_long_config_override: # Long lines (with config override) pass_str: "SELECT COUNT(*) FROM tbl\n" configs: core: max_line_length: 30 test_fail_line_too_long_with_comments_1: # Check we move comments correctly fail_str: "SELECT 1 -- Some Comment\n" fix_str: "-- Some Comment\nSELECT 1\n" configs: core: max_line_length: 18 test_fail_line_too_long_with_comments_1_after: # Check we move comments correctly fail_str: "SELECT 1 -- Some Comment\n" fix_str: "SELECT 1\n-- Some Comment\n" configs: core: max_line_length: 18 indentation: trailing_comments: after test_fail_line_too_long_with_comments_1_no_newline: # Check we move comments correctly, and that it # still works when there isn't a trailing newline. # https://github.com/sqlfluff/sqlfluff/issues/4386 fail_str: "SELECT 1 -- Some Comment" fix_str: "-- Some Comment\nSELECT 1" configs: core: max_line_length: 18 test_fail_line_too_long_with_comments_2: # Check we can add newlines after dedents (with an indent). # NOTE: That for LT05, we don't repair the initial indent # but that the following lines will be fixed as though it # has been corrected. Ideally LT02 would have been run _first_ # on this file. fail_str: " SELECT COUNT(*) FROM tbl\n" fix_str: " SELECT COUNT(*)\nFROM tbl\n" configs: core: max_line_length: 20 test_fail_line_too_long_with_comments_3: # Check priority of fixes fail_str: "SELECT COUNT(*) FROM tbl -- Some Comment\n" fix_str: "-- Some Comment\nSELECT COUNT(*)\nFROM tbl\n" configs: core: max_line_length: 18 test_fail_line_too_long_with_comments_4: # In this case, the inline comment is NOT on a line by itself (note the # leading comma), but even if we move it onto a line by itself, it's still # too long. In this case, the rule should do nothing, otherwise it triggers # an endless cycle of "fixes" that simply keeps adding blank lines. fail_str: | SELECT c1 ,-- the "y variable" and uses_small_subject_line to be the "x variable" in terms of the regression line. c2 configs: core: max_line_length: 80 test_pass_line_too_long_with_comments_ignore_comment_lines: # Same case as above, but should pass as ignore_comment_lines is set to true pass_str: | SELECT c1 ,-- the "y variable" and uses_small_subject_line to be the "x variable" in terms of the regression line. c2 configs: core: max_line_length: 80 rules: layout.long_lines: ignore_comment_lines: true test_fail_line_too_long_only_comments: # Check long lines that are only comments are linted correctly fail_str: "-- Some really long comments on their own line\n\nSELECT 1" configs: core: max_line_length: 18 test_fail_line_too_long_handling_indents: # Check we handle indents nicely fail_str: "SELECT 12345\n" fix_str: "SELECT\n 12345\n" configs: core: max_line_length: 10 test_pass_line_too_long_ignore_comments_true: # Check we can ignore comments if we want pass_str: "SELECT 1\n-- Some long comment over 10 characters\n" configs: core: max_line_length: 10 rules: layout.long_lines: ignore_comment_lines: true test_pass_line_too_long_ignore_comments_false: # Check we still pick up long comments if we don't want to ignore fail_str: "SELECT 1\n-- Some long comment over 10 characters\n" configs: core: max_line_length: 10 rules: layout.long_lines: ignore_comment_lines: false test_compute_line_length_before_template_expansion_1: # Line 3 is fine before expansion. Too long after expansion is NOT considered # a violation. pass_str: | SELECT user_id FROM `{{bi_ecommerce_orders}}` {{table_at_job_start}} configs: core: dialect: bigquery templater: jinja: context: table_at_job_start: FOR SYSTEM_TIME AS OF CAST('2021-03-02T01:22:59+00:00' AS TIMESTAMP) bi_ecommerce_orders: bq-business-intelligence.user.ecommerce_orders test_compute_line_length_before_template_expansion_2: # Line 3 is too long before expansion. It's fine after expansion, but the rule # does not look at that. fail_str: | SELECT user_id FROM `{{bi_ecommerce_orders_bi_ecommerce_orders}}` AS {{table_alias_table_alias_table_alias_table_alias_table_alias_table_alias}} fix_str: | SELECT user_id FROM `{{bi_ecommerce_orders_bi_ecommerce_orders}}` AS {{table_alias_table_alias_table_alias_table_alias_table_alias_table_alias}} violations_after_fix: # Even after fixing, the final line is still too long. - description: Line is too long (86 > 80). name: layout.long_lines warning: false fixes: [] start_line_no: 4 start_line_pos: 9 start_file_pos: 78 end_line_no: 4 end_line_pos: 11 end_file_pos: 80 configs: core: dialect: bigquery templater: jinja: context: bi_ecommerce_orders_bi_ecommerce_orders: bq-business-intelligence.user.ecommerce_orders table_alias_table_alias_table_alias_table_alias_table_alias_table_alias: t test_long_jinja_comment: fail_str: | SELECT * {# comment #} {# ........................................................................... #} FROM table configs: core: max_line_length: 80 rules: layout.long_lines: ignore_comment_lines: false test_long_jinja_comment_ignore: # A Jinja comment is a comment. pass_str: | SELECT * {# comment #} {# ........................................................................... #} FROM table configs: core: max_line_length: 80 rules: layout.long_lines: ignore_comment_lines: true test_for_loop: # A Jinja for loop pass_str: | {% for elem in 'foo' %} SELECT '{{ elem }}' FROM table1; SELECT '{{ elem }}' FROM table2; {% endfor %} test_for_loop_repeating_elements_starts_with_literal: # A Jinja for loop with repeating elements (that are difficult to match) # but starting with a literal that can be used to match pass_str: | {% set elements = 'foo' %} SELECT CASE {% for elem in elements %} WHEN '{{ elem }}' = '' THEN 1 WHEN '{{ elem }}' = '' THEN 1 {% endfor %} END test_for_loop_starting_with_templated_piece: # A Jinja for loop starting with non-literals # But unique parts can be used to match pass_str: | {% set elements = 'foo' %} {% set when = 'WHEN' %} SELECT CASE {% for elem in elements %} {{ when }} '{{ elem }}' = '' THEN 1 {{ when }} '{{ elem }}' = '' THEN 2 {% endfor %} END test_for_loop_fail_complex_match: # A Jinja for loop starting with non-literals # But non-unique parts which therefore cannot # be used to match pass_str: | {% set elements = 'foo' %} {% set when = 'WHEN' %} SELECT CASE {% for elem in elements %} {{ when }} '{{ elem }}' = '' THEN 1 {{ when }} '{{ elem }}' = '' THEN 1 {% endfor %} END test_for_loop_fail_simple_match: # If for loop only contains literals it should still pass pass_str: | {% set elements = 'foo' %} SELECT CASE {% for elem in elements %} WHEN 'f' THEN a {% endfor %} END test_set_statement: # A Jinja set statement pass_str: | {% set statement = "SELECT 1 from table1;" %} {{ statement }}{{ statement }} configs: core: max_line_length: 80 test_issue_1666_line_too_long_unfixable_jinja: # Note the trailing space at the end of line 1. This is a necessary part of # the test, because the space (which is passed through to the output) was # "tricking" LT05 into trying to split the line, then encountering an internal # error. fail_str: "{{ config (schema='bronze', materialized='view', sort =['id','number'], dist = 'all', tags =['longlonglonglonglong']) }} \n\nselect 1\n" test_fail_ignore_comment_clauses_1: # Too long, comment clause not ignored fail_str: | CREATE OR REPLACE TABLE mytable ( col1 NUMBER COMMENT 'col1 comment', col2 BOOLEAN COMMENT 'very long comment line, exceeding max_line_lengthvery long comment line, exceeding max_line_length very long comment line, exceeding max_line_length very long comment line, exceeding max_line_length', col3 NUMBER COMMENT 'col3 comment' ) test_fail_ignore_comment_clauses_2: # Too long even after ignoring comment clause fail_str: | CREATE OR REPLACE TABLE mytable ( col1 NUMBER COMMENT 'col1 comment', colaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbcccccccccccccccddddddddddddddddeeeeeeeeeeeeeee2 BOOLEAN COMMENT 'very long comment line, exceeding max_line_lengthvery long comment line, exceeding max_line_length very long comment line, exceeding max_line_length very long comment line, exceeding max_line_length', col3 NUMBER COMMENT 'col3 comment' ) configs: rules: layout.long_lines: ignore_comment_clauses: true test_pass_ignore_comment_clauses: pass_str: | CREATE OR REPLACE TABLE mytable ( col1 NUMBER COMMENT 'col1 comment', col2 BOOLEAN COMMENT 'very long comment line, exceeding max_line_lengthvery long comment line, exceeding max_line_length very long comment line, exceeding max_line_length very long comment line, exceeding max_line_length', col3 NUMBER COMMENT 'col3 comment' ) configs: rules: layout.long_lines: ignore_comment_clauses: true test_pass_ignore_comment_clauses_teradata: pass_str: | comment on table sandbox_db.Org_Descendant is 'very long comment line, exceeding max_line_lengthvery long comment line, exceeding max_line_length very long comment line, exceeding max_line_length very long comment line, exceeding max_line_length'; configs: core: dialect: teradata rules: layout.long_lines: ignore_comment_clauses: true test_pass_ignore_comment_clauses_exasol: pass_str: | CREATE TABLE IF NOT EXISTS SCHEM.TAB ( ID DECIMAL(18, 0) IDENTITY CONSTRAINT PRIMARY KEY DISABLE COMMENT IS 'without constraint name' ) COMMENT IS 'very long comment line, exceeding max_line_lengthvery long comment line, exceeding max_line_length very long comment line, exceeding max_line_length very long comment line, exceeding max_line_length'; configs: core: dialect: exasol rules: layout.long_lines: ignore_comment_clauses: true test_pass_ignore_comment_clauses_snowflake: pass_str: | CREATE TABLE foo_table (bar INTEGER) COMMENT = 'very long comment line, exceeding max_line_lengthvery long comment line, exceeding max_line_length very long comment line, exceeding max_line_length very long comment line, exceeding max_line_length' configs: core: dialect: snowflake rules: layout.long_lines: ignore_comment_clauses: true test_pass_ignore_comment_clauses_postgres: pass_str: | CREATE TABLE IF NOT EXISTS foo ( id UUID DEFAULT uuid_generate_v4() PRIMARY KEY, name TEXT NOT NULL ); COMMENT ON TABLE foo IS 'Windows Phone 8, however, was never able to overcome a long string of disappointments for Microsoft. '; configs: core: dialect: postgres rules: layout.long_lines: ignore_comment_clauses: true test_fail_templated_comment_line: fail_str: | SELECT * {# ........................................................................... #} FROM table configs: templater: jinja: context: {} test_pass_ignore_templated_comment_lines: # NOTE: This is potentially a behaviour change in 2.0.0. # This was erroneously using the `ignore_comment_clauses` # config when this query contains no comment clauses. pass_str: | SELECT * {# ........................................................................... #} FROM table configs: rules: layout.long_lines: ignore_comment_lines: true templater: jinja: context: {} test_fail_operator_precedence_1: # Make sure we split at the + operator. fail_str: | select ISNULL(count, '0') * 10000 + ISNULL(planned, 100) from blah fix_str: | select ISNULL(count, '0') * 10000 + ISNULL(planned, 100) from blah configs: core: max_line_length: 30 test_fail_operator_precedence_2: # Make sure we split at the AND operator. fail_str: | select recommendation_list[ORDINAL(1)] = 'uses_small_subject_line' AND uses_small_subject_line != CAST(effect_size_list[ORDINAL(1)] AS FLOAT64) from blah fix_str: | select recommendation_list[ORDINAL(1)] = 'uses_small_subject_line' AND uses_small_subject_line != CAST(effect_size_list[ORDINAL(1)] AS FLOAT64) from blah configs: core: max_line_length: 120 test_fail_operator_precedence_3: # Stretching cases for operators and comma fail_str: | select a, b + c, long_name + long_name * long_name - long_name as foo, long_name AND long_name OR long_name OR long_name as bar from blah fix_str: | select a, b + c, long_name + long_name * long_name - long_name as foo, long_name AND long_name OR long_name OR long_name as bar from blah configs: core: max_line_length: 30 test_pass_long_multiline_jinja: # None of the lines are longer than 30 # but the whole tag is. It shouldn't # cause issues. pass_str: | select {{ 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 }} from blah configs: core: max_line_length: 30 test_fail_long_inline_statement: # Tests that breaks happen between clauses properly fail_str: | select distinct a + b from c join d using (e) where f = g and h = i order by j fix_str: | select distinct a + b from c join d using (e) where f = g and h = i order by j configs: core: max_line_length: 50 test_pass_check_off_1: # Tests that we can disable the check (using 0). pass_str: | select my_really_really_really_really_really_really_really_really_really_really_really_long_var from tbl configs: core: max_line_length: 0 test_pass_check_off_2: # Tests that we can disable the check (using -1). pass_str: | select my_really_really_really_really_really_really_really_really_really_really_really_long_var from tbl configs: core: max_line_length: -1 test_comment_move_mid_query: fail_str: | select my_long_long_line as foo -- with some comment from foo fix_str: | select -- with some comment my_long_long_line as foo from foo configs: core: max_line_length: 40 test_fix_implicit_indent: # Test for ImplicitIndent. # The theoretical indent between WHERE and "a" is implicit. fail_str: | SELECT CASE WHEN longer_and_longer AND much_much_much_longer THEN longer_and_longer AND much_much_much_longer ELSE longer_and_longer AND much_much_much_longer END as foobar, CASE WHEN a THEN b END as bar FROM foo WHERE a_really_long_field AND a_nother_really_long_field HAVING a_really_long_field AND a_nother_really_long_field fix_str: | SELECT CASE WHEN longer_and_longer AND much_much_much_longer THEN longer_and_longer AND much_much_much_longer ELSE longer_and_longer AND much_much_much_longer END as foobar, CASE WHEN a THEN b END as bar FROM foo WHERE a_really_long_field AND a_nother_really_long_field HAVING a_really_long_field AND a_nother_really_long_field configs: core: max_line_length: 45 indentation: allow_implicit_indents: true test_fix_no_implicit_indent: # Test explicitly preventing implicit indents. fail_str: | SELECT CASE WHEN longer_and_longer AND much_much_much_longer THEN longer_and_longer AND much_much_much_longer ELSE longer_and_longer AND much_much_much_longer END as foobar, CASE WHEN a THEN b END as bar FROM foo WHERE a_really_long_field AND a_nother_really_long_field HAVING a_really_long_field AND a_nother_really_long_field fix_str: | SELECT CASE WHEN longer_and_longer AND much_much_much_longer THEN longer_and_longer AND much_much_much_longer ELSE longer_and_longer AND much_much_much_longer END as foobar, CASE WHEN a THEN b END as bar FROM foo WHERE a_really_long_field AND a_nother_really_long_field HAVING a_really_long_field AND a_nother_really_long_field configs: core: max_line_length: 45 indentation: allow_implicit_indents: false test_fix_window_function: # https://github.com/sqlfluff/sqlfluff/issues/4292 fail_str: | select * from t qualify a = coalesce( first_value(iff(b = 'none', null, a)) ignore nulls over (partition by c order by d desc), first_value(a) respect nulls over (partition by c order by d desc) ) fix_str: | select * from t qualify a = coalesce( first_value( iff(b = 'none', null, a) ) ignore nulls over (partition by c order by d desc), first_value(a) respect nulls over (partition by c order by d desc) ) configs: core: max_line_length: 50 dialect: snowflake test_fail_do_not_fix_noqa: # https://github.com/sqlfluff/sqlfluff/issues/4248 # NOTE: No fix_str, because this should be unfixable. fail_str: | SELECT col1, col2, col3 FROM really_really_really_really_really_really_long_schema_name.TABLE1 -- noqa: L014 test_block_comment_single_line_noqa: # Ignore long line in block comment pass_str: | /* A very long single-line block comment which is ignored --noqa: LT05 */ /* Another single-line block comment which is also ignored. I'm too lazy to specify which rule to ignore though. -- noqa */ /* A very long multi-line block comment which is ignored -- noqa: LT05 */ /* noqa: disable=all */ -- A noqa by itself in a block comment is followed /* noqa: enable=all */ /* -- noqa: disable=all */ -- A noqa in a block comment preceded by a single line comment marker (--) is followed /* noqa: enable=all */ /* Ignore warnings for a bit! -- noqa: disable=all */ -- If the noqa is accompanied by preceding text in a block comment it -- must be preceded by a single-line comment marker, and it must be -- the last part of the line. /* noqa: enable=all */ select 1 configs: core: max_line_length: 30 test_fail_block_comment_single_line_noqa: # Don't ignore long line in single-line block comment fail_str: | /* A very long single-line block comment which is not ignored */ configs: core: max_line_length: 30 test_fail_block_comment_multi_line_noqa: # Don't ignore long line in multi-line block comment fail_str: | /* A very long multi-line block comment which is not ignored */ configs: core: max_line_length: 30 test_fail_block_comment_noqa_following_preceding_text_noqa: # Don't follow noqa if it follows preceding text and is not # immediately preceded by a single line comment marker (--) fail_str: | /* This won't work: noqa: disable=all */ -- A noqa will NOT be followed if it follows preceding text, and is -- not immediately preceded by a single-line comment marker (--). /* noqa: enable=all */ configs: core: max_line_length: 30 test_fail_block_comment_noqa_not_at_end_of_line_noqa: # Don't follow noqa if it is not at the end of the line fail_str: | /* -- noqa: disable=all Invalid noqa declaration */ -- A noqa will NOT be followed if it is not the last part of the line. /* noqa: enable=all */ select 1 configs: core: max_line_length: 55 test_operator_precedence: fail_str: | SELECT * FROM foo left join abcdef_abcd_details on foo.abcdefgh_id = abcdef_abcd_details.abcdefgh_id and abcdef_abcd_details.abcdef_abcdef_abcdef_abcdef = 1 fix_str: | SELECT * FROM foo left join abcdef_abcd_details on foo.abcdefgh_id = abcdef_abcd_details.abcdefgh_id and abcdef_abcd_details.abcdef_abcdef_abcdef_abcdef = 1 configs: core: max_line_length: 100 dialect: snowflake test_long_functions_and_aliases: # https://github.com/sqlfluff/sqlfluff/issues/4033 fail_str: | SELECT my_function(col1 + col2, arg2, arg3) over (partition by col3, col4 order by col5 rows between unbounded preceding and current row) as my_relatively_long_alias, my_other_function(col6, col7 + col8, arg4) as my_other_relatively_long_alias, my_expression_function(col6, col7 + col8, arg4) = col9 + col10 as another_relatively_long_alias FROM my_table fix_str: | SELECT my_function(col1 + col2, arg2, arg3) over ( partition by col3, col4 order by col5 rows between unbounded preceding and current row ) as my_relatively_long_alias, my_other_function(col6, col7 + col8, arg4) as my_other_relatively_long_alias, my_expression_function(col6, col7 + col8, arg4) = col9 + col10 as another_relatively_long_alias FROM my_table test_order_by_rebreak_span: # This tests that we can correctly rebreak an "order by" expressions. fail_str: | select * from ( select tbl1.*, row_number() over ( partition by tbl1.the_name, {{ ['a', 'b', 'c', 'd'] | join(', ') }} order by created_at desc ) rnk from foo inner join tbl2 on tbl1.the_name = tbl2.the_name ) fix_str: | select * from ( select tbl1.*, row_number() over ( partition by tbl1.the_name, {{ ['a', 'b', 'c', 'd'] | join(', ') }} order by created_at desc ) rnk from foo inner join tbl2 on tbl1.the_name = tbl2.the_name ) test_trailing_semicolon_moves: # The checks that we don't move the semicolon or the comma. fail_str: | SELECT my_very_long_field, FROM foo ORDER BY my_very_long_field; fix_str: | SELECT my_very_long_field, FROM foo ORDER BY my_very_long_field; configs: core: dialect: bigquery max_line_length: 20 # After fixing there are still issues, but we're still keeping # the comma and semicolon where they are. violations_after_fix: - description: Line is too long (23 > 20). name: layout.long_lines warning: false fixes: [] start_line_no: 2 start_line_pos: 5 start_file_pos: 11 end_line_no: 2 end_line_pos: 23 end_file_pos: 29 - description: Line is too long (23 > 20). name: layout.long_lines warning: false fixes: [] start_line_no: 5 start_line_pos: 5 start_file_pos: 53 end_line_no: 5 end_line_pos: 23 end_file_pos: 71 test_pass_window_function: # Test that we don't flag too eagerly on window functions. pass_str: | select col, rank() over ( partition by a, b, c order by d desc ) as rnk from foo test_fail_no_fix_long_templated: # Test we fail but don't try and fix a long templated line fail_str: | select '{{ "', '".join(["foo", "bar", "whatever", "whatever", "whatever", "whatever"]) }}' sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/LT06.yml000066400000000000000000000005261503426445100235420ustar00rootroot00000000000000rule: LT06 passing_example: pass_str: SELECT SUM(1) passing_example_window_function: pass_str: SELECT AVG(c) OVER (PARTITION BY a) simple_fail: fail_str: SELECT SUM (1) fix_str: SELECT SUM(1) complex_fail_1: fail_str: SELECT SUM /* SOMETHING */ (1) complex_fail_2: fail_str: | SELECT SUM -- COMMENT (1) sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/LT07.yml000066400000000000000000000053301503426445100235410ustar00rootroot00000000000000rule: LT07 test_pass_with_clause_closing_aligned: # with statement indentation pass_str: | with cte as ( select 1 ) select * from cte test_pass_with_clause_closing_aligned_whitespace_consumption_a: pass_str: | with cte as ( select 1 {{- ' from i_consume_whitespace ' -}} ) select * from cte test_pass_with_clause_closing_aligned_whitespace_consumption_b: pass_str: | with cte as ( select 1 {#- I'm a comment which consumes whitespace -#} ) select * from cte test_pass_with_clause_closing_aligned_whitespace_consumption_c: pass_str: | with cte as ( select 1 {%- if False -%}{%- endif -%} ) select * from cte test_fix_with_clause_closing_aligned_whitespace_consumption_d: fail_str: | with cte as ( select 1 {%- if False -%}{%- endif -%}) select * from cte fix_str: | with cte as ( select 1 {%- if False -%}{%- endif -%} ) select * from cte test_pass_with_clause_closing_oneline: # with statement oneline pass_str: with cte as (select 1) select * from cte test_pass_with_clause_closing_misaligned_indentation: # Fix with statement indentation pass_str: | with cte as ( select 1 ) select * from cte test_pass_with_clause_closing_misaligned_negative_indentation: # Fix with statement that has negative indentation pass_str: |2 with cte as ( select 1 ) select * from cte test_move_parenthesis_to_next_line: fail_str: | with cte_1 as ( select foo from tbl_1) -- Foobar select cte_1.foo from cte_1 fix_str: | with cte_1 as ( select foo from tbl_1 ) -- Foobar select cte_1.foo from cte_1 test_pass_cte_with_column_list: # Issue 2851: Ignore the CTE column list, only check the query. pass_str: | with search_path (node_ids, total_time) as ( select 1 ) select * from search_path test_pass_with_clause_closing_misaligned_indentation_in_templated_block: pass_str: | with {% if true %} cte as ( select 1 ) {% else %} cte as ( select 2 ) {% endif %} select * from cte test_move_parenthesis_to_next_line_in_templated_block: fail_str: | with {% if true %} cte as ( select 1) {% endif %} select * from cte fix_str: | with {% if true %} cte as ( select 1 ) {% endif %} select * from cte test_pass_templated_clauses: pass_str: | with {% for tbl in ['a', 'b'] %} {{ tbl }} as ( SELECT 1 ), {% endfor %} final as ( SELECT 1 ) select * from final join a using (x) join b using (x) sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/LT08.yml000066400000000000000000000105751503426445100235510ustar00rootroot00000000000000rule: LT08 test_pass_blank_line_after_cte_trailing_comma: # Test cases for LT08, both leading and trailing commas. pass_str: | with my_cte as ( select 1 ), other_cte as ( select 1 ) select * from my_cte cross join other_cte test_pass_blank_line_after_cte_leading_comma: pass_str: | with my_cte as ( select 1 ) , other_cte as ( select 1 ) select * from my_cte cross join other_cte test_fail_no_blank_line_after_each_cte: fail_str: | with my_cte as ( select 1 ), other_cte as ( select 1 ) select * from my_cte cross join other_cte fix_str: | with my_cte as ( select 1 ), other_cte as ( select 1 ) select * from my_cte cross join other_cte test_fail_no_blank_line_after_cte_before_comment: fail_str: | with my_cte as ( select 1 ), -- Comment other_cte as ( select 1 ) select * from my_cte cross join other_cte fix_str: | with my_cte as ( select 1 ), -- Comment other_cte as ( select 1 ) select * from my_cte cross join other_cte test_fail_no_blank_line_after_cte_and_comment: # Issue #2136 fail_str: | WITH mycte AS ( SELECT col FROM my_table ) /* cte comment */ SELECT col FROM mycte fix_str: | WITH mycte AS ( SELECT col FROM my_table ) /* cte comment */ SELECT col FROM mycte test_fail_no_blank_line_after_last_cte_trailing_comma: fail_str: | with my_cte as ( select 1 ), other_cte as ( select 1 ) select * from my_cte cross join other_cte fix_str: | with my_cte as ( select 1 ), other_cte as ( select 1 ) select * from my_cte cross join other_cte test_fail_no_blank_line_after_last_cte_leading_comma: fail_str: | with my_cte as ( select 1 ) , other_cte as ( select 1 ) select * from my_cte cross join other_cte fix_str: | with my_cte as ( select 1 ) , other_cte as ( select 1 ) select * from my_cte cross join other_cte test_fail_oneline_cte_leading_comma: # Fixes oneline cte with leading comma style fail_str: | with my_cte as (select 1), other_cte as (select 1) select * from my_cte cross join other_cte fix_str: | with my_cte as (select 1) , other_cte as (select 1) select * from my_cte cross join other_cte # NOTE: we're using the global comma position config configs: layout: type: comma: line_position: leading test_fail_cte_floating_comma: # Fixes cte with a floating comma fail_str: | with my_cte as (select 1) , other_cte as (select 1) select * from my_cte cross join other_cte fix_str: | with my_cte as (select 1) , other_cte as (select 1) select * from my_cte cross join other_cte test_pass_column_name_definition: # Issue #2136 pass_str: | with recursive t(n) as ( select 1 union all select n + 1 from t ) select n from t limit 100; test_pass_column_name_definition_multiple: # Issue #3474 pass_str: | WITH cte_1 AS ( SELECT 1 AS var ), cte_2 (var) AS ( SELECT 2 ) SELECT cte_1.var, cte_2.var FROM cte_1, cte_2; test_fail_column_name_definition_newline: fail_str: | WITH cte_1 (var) AS ( SELECT 2 ) SELECT cte_1.var, cte_2.var FROM cte_1, cte_2; fix_str: | WITH cte_1 (var) AS ( SELECT 2 ) SELECT cte_1.var, cte_2.var FROM cte_1, cte_2; test_fail_column_name_definition_comment: fail_str: | WITH cte_1 (var) AS /* random comment */ ( SELECT 2 ) SELECT cte_1.var, cte_2.var FROM cte_1, cte_2; fix_str: | WITH cte_1 (var) AS /* random comment */ ( SELECT 2 ) SELECT cte_1.var, cte_2.var FROM cte_1, cte_2; test_pass_recursive_with_argument_list: pass_str: | WITH RECURSIVE my_cte (n) AS ( select 1 ) select * from my_cte test_pass_recursive_with_argument_list_postgres: pass_str: | WITH RECURSIVE my_cte (n) AS ( select 1 ) select * from my_cte configs: core: dialect: postgres sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/LT09.yml000066400000000000000000000171051503426445100235460ustar00rootroot00000000000000rule: LT09 test_single_select_target_and_no_newline_between_select_and_select_target: pass_str: select a from x test_single_wildcard_select_target_and_no_newline_between_select_and_select_target_1: fail_str: | select * from x fix_str: | select * from x configs: rules: layout.select_targets: wildcard_policy: multiple test_single_wildcard_select_target_and_no_newline_between_select_and_select_target_2: pass_str: | select * from x test_single_select_target_and_newline_after_select_target_1: pass_str: | select * from x test_single_select_target_and_newline_after_select_target_2: fail_str: | select * from x fix_str: | select * from x configs: rules: layout.select_targets: wildcard_policy: multiple test_single_select_target_and_newline_before_select_target: fail_str: | select a from x fix_str: | select a from x test_multiple_select_targets_on_newlines_and_newline_after_select: pass_str: | select a, b, c from x test_single_wildcard_select_target_and_newline_before_select_target_1: pass_str: | select * from x test_single_wildcard_select_target_and_newline_before_select_target_2: pass_str: | select * from x configs: rules: layout.select_targets: wildcard_policy: multiple test_single_wildcard_select_target_and_newline_before_select_target_plus_from_on_same_line_1: fail_str: | select * from x fix_str: | select * from x configs: rules: layout.select_targets: wildcard_policy: multiple test_single_wildcard_select_target_and_newline_before_select_target_plus_from_on_same_line_2: fail_str: | select * from x fix_str: | select * from x test_multiple_select_targets_all_on_the_same_line: fail_str: | select a, b, c from x fix_str: "select\na,\nb,\nc\nfrom x\n" test_multiple_select_targets_all_on_the_same_line_plus_from_clause: fail_str: | select a, b, c from x fix_str: "select\na,\nb,\nc\nfrom x\n" test_multiple_select_targets_including_wildcard_all_on_the_same_line_plus_from_clause: fail_str: | select *, b, c from x fix_str: "select\n*,\nb,\nc\nfrom x\n" test_multiple_select_target_plus_from_clause_on_the_same_line: fail_str: | select a, b, c from x fix_str: | select a, b, c from x test_multiple_select_targets_trailing_whitespace_after_select: # TRICKY: Use explicit newlines to preserve the trailing space after "SELECT". pass_str: "SELECT \n a,\n b\nFROM t\n" test_single_select_with_comment_after_select: # Currently not autofixed because dealing with the comment is tricky. # Could be supported later. fail_str: | SELECT --some comment a test_comment_between_select_and_single_select_target: fail_str: | SELECT -- This is the user's ID. user_id FROM safe_user fix_str: | SELECT user_id -- This is the user's ID. FROM safe_user test_multiple_select_targets_some_newlines_missing_1: fail_str: | select a, b, c, d, e, f, g, h from x # The spaces before a, d, and h look odd, but these are places where the # select targets were already on a separate line, and the rule made no # changes. fix_str: | select a, b, c, d, e, f, g, h from x test_multiple_select_targets_some_newlines_missing_2: fail_str: | select a, b, c, d, e, f, g, h from x # The spaces before d, and h look odd, but these are places where the # select targets were already on a separate line, and the rule made no # changes. fix_str: | select a, b, c, d, e, f, g, h from x test_cte: fail_str: | WITH cte1 AS ( SELECT c1 AS c FROM t ) SELECT 1 FROM cte1 fix_str: | WITH cte1 AS ( SELECT c1 AS c FROM t ) SELECT 1 FROM cte1 test_single_newline_no_from: fail_str: | SELECT id fix_str: | SELECT id test_single_distinct_no_from: fail_str: | SELECT DISTINCT id fix_str: | SELECT DISTINCT id test_distinct_many: fail_str: | SELECT distinct a, b, c FROM my_table fix_str: | SELECT distinct a, b, c FROM my_table test_distinct_single_pass: pass_str: | SELECT distinct a FROM my_table test_distinct_single_fail_a: fail_str: | SELECT distinct a FROM my_table fix_str: | SELECT distinct a FROM my_table test_distinct_single_fail_b: fail_str: | SELECT distinct a FROM my_table fix_str: | SELECT distinct a FROM my_table test_single_select_with_no_from: fail_str: "SELECT\n 10000000\n" fix_str: "SELECT 10000000\n" test_single_select_with_no_from_previous_comment: fail_str: "SELECT\n /* test */ 10000000\n" fix_str: "SELECT 10000000 /* test */\n" test_single_select_with_comment_after_column: fail_str: | SELECT 1 -- this is a comment FROM my_table fix_str: | SELECT 1 -- this is a comment FROM my_table test_single_select_with_comment_after_column_no_space: fail_str: | SELECT 1-- this is a comment FROM my_table fix_str: | SELECT 1 -- this is a comment FROM my_table test_single_select_with_multiple_mixed_comments: fail_str: | SELECT -- previous comment 1 -- this is a comment FROM my_table fix_str: | SELECT 1 -- previous comment -- this is a comment FROM my_table test_single_select_with_comment before: fail_str: | SELECT /* comment before */ 1 FROM my_table fix_str: | SELECT 1 /* comment before */ FROM my_table test_create_view: fail_str: | CREATE VIEW a AS SELECT c FROM table1 INNER JOIN table2 ON (table1.id = table2.id); fix_str: | CREATE VIEW a AS SELECT c FROM table1 INNER JOIN table2 ON (table1.id = table2.id); test_multiline_single: # https://github.com/sqlfluff/sqlfluff/issues/4516 pass_str: | SELECT SUM( 1 + SUM( 2 + 3 ) ) AS col FROM test_table test_multiline_expressions: # NOTE: LT09 doesn't fix the indentation, so that may still look strange here, # but we should make sure we're inserting new line breaks in the right places. # https://github.com/sqlfluff/sqlfluff/issues/5258 fail_str: | SELECT a, b1, b2, COUNT(DISTINCT id) AS c1, COUNT(DISTINCT name) AS c2, COUNT(DISTINCT city) AS c3, COUNT( DISTINCT id) AS d1, COUNT(DISTINCT name) AS d2, COUNT(DISTINCT city) AS d3, COUNT(DISTINCT id) AS e1, COUNT(DISTINCT name) AS e2, COUNT(DISTINCT city) AS e3 FROM some_table; fix_str: | SELECT a, b1, b2, COUNT(DISTINCT id) AS c1, COUNT(DISTINCT name) AS c2, COUNT(DISTINCT city) AS c3, COUNT( DISTINCT id) AS d1, COUNT(DISTINCT name) AS d2, COUNT(DISTINCT city) AS d3, COUNT(DISTINCT id) AS e1, COUNT(DISTINCT name) AS e2, COUNT(DISTINCT city) AS e3 FROM some_table; test_pass_leading_commas: # https://github.com/sqlfluff/sqlfluff/issues/5329 # NOTE: We shouldn't even need to configure the leading commas here. # Commas shouldn't be accounted for in whether elements are on the same line. pass_str: | select a , b , c sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/LT10.yml000066400000000000000000000022161503426445100235330ustar00rootroot00000000000000rule: LT10 test_fail_distinct_on_next_line_1: fail_str: | SELECT DISTINCT user_id, list_id FROM safe_user fix_str: | SELECT DISTINCT user_id, list_id FROM safe_user test_fail_distinct_on_next_line_2: fail_str: | SELECT -- The table contains duplicates, so we use DISTINCT. DISTINCT user_id FROM safe_user fix_str: | SELECT DISTINCT -- The table contains duplicates, so we use DISTINCT. user_id FROM safe_user test_fail_distinct_on_next_line_3: fail_str: | select distinct abc, def from a; fix_str: | select distinct abc, def from a; test_fail_distinct_on_next_line_4: fail_str: | CREATE OR REPLACE TABLE myschema.mytable AS ( SELECT DISTINCT cola , colb FROM myschema.mytable ); fix_str: | CREATE OR REPLACE TABLE myschema.mytable AS ( SELECT DISTINCT cola , colb FROM myschema.mytable ); test_pass_distinct_on_same_line_with_select: pass_str: SELECT DISTINCT user_id FROM safe_user sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/LT11.yml000066400000000000000000000044301503426445100235340ustar00rootroot00000000000000rule: LT11 test_fail_simple_fix_union_all_before: fail_str: | SELECT 'a' UNION ALL SELECT 'b' fix_str: | SELECT 'a' UNION ALL SELECT 'b' test_fail_simple_fix_union_all_after: fail_str: | SELECT 'a' UNION ALL SELECT 'b' fix_str: | SELECT 'a' UNION ALL SELECT 'b' test_fail_simple_fix_union_all_before_and_after: fail_str: | SELECT 'a' UNION ALL SELECT 'b' fix_str: | SELECT 'a' UNION ALL SELECT 'b' test_pass_multiple_newlines_are_allowed: pass_str: | SELECT 'a' UNION ALL SELECT 'b' # The autofix of LT11 doesn't respect indentation of the surrounding query. # Hence, the fix result of only LT11 looks ugly. But LT02 will fix the indentation # in a second step. # See the test blow. test_fail_fix_works_in_subqueries: fail_str: | SELECT * FROM ( SELECT 'g' UNION ALL SELECT 'h' UNION ALL SELECT 'j' ) fix_str: | SELECT * FROM ( SELECT 'g' UNION ALL SELECT 'h' UNION ALL SELECT 'j' ) # Test autofix after LT02 passes LT11 test_pass_fix_works_in_subqueries_after_LT02_fix: pass_str: | SELECT * FROM ( SELECT 'g' UNION ALL SELECT 'h' UNION ALL SELECT 'j' ) test_fail_simple_fix_union_before_and_after: fail_str: | SELECT 'a' UNION SELECT 'b' fix_str: | SELECT 'a' UNION SELECT 'b' test_fail_simple_fix_intersect_before_and_after: fail_str: | SELECT 'a' INTERSECT SELECT 'b' fix_str: | SELECT 'a' INTERSECT SELECT 'b' test_fail_simple_fix_except_before_and_after: fail_str: | SELECT 'a' EXCEPT SELECT 'b' fix_str: | SELECT 'a' EXCEPT SELECT 'b' test_fail_simple_fix_minus_before_and_after: fail_str: | SELECT 'a' EXCEPT SELECT 'b' fix_str: | SELECT 'a' EXCEPT SELECT 'b' test_fail_simple_fix_bigquery_intersect_distinct_before_and_after: fail_str: | SELECT 'a' INTERSECT DISTINCT SELECT 'b' fix_str: | SELECT 'a' INTERSECT DISTINCT SELECT 'b' configs: core: dialect: bigquery # NOTE: We used to exclude TSQL from fixing these queries, but # the reflow logic now enables this. sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/LT12.yml000066400000000000000000000035441503426445100235420ustar00rootroot00000000000000rule: LT12 test_pass_single_final_newline: pass_str: "SELECT foo FROM bar\n" test_fail_no_final_newline: fail_str: "SELECT foo FROM bar" fix_str: "SELECT foo FROM bar\n" test_fail_multiple_final_newlines: fail_str: "SELECT foo FROM bar\n\n" fix_str: "SELECT foo FROM bar\n" test_pass_templated_plus_raw_newlines: pass_str: "{{ '\n\n' }}\n" test_pass_templated_consume_whitespace: pass_str: "select * from {{ 'trim_whitespace_table' -}}\n" test_fail_templated_consume_whitespace_nothing: fail_str: "select * from {{ 'trim_whitespace_table' -}}" fix_str: "select * from {{ 'trim_whitespace_table' -}}\n" test_fail_templated_consume_whitespace_whitespace: fail_str: "select * from {{ 'trim_whitespace_table' -}} " fix_str: "select * from {{ 'trim_whitespace_table' -}}\n" test_fail_templated_consume_whitespace_extra: fail_str: "select * from {{ 'trim_whitespace_table' -}}\n\n\n" fix_str: "select * from {{ 'trim_whitespace_table' -}}\n" test_fail_templated_plus_raw_newlines: fail_str: "{{ '\n\n' }}" fix_str: "{{ '\n\n' }}\n" test_fail_templated_plus_raw_newlines_extra_newline: fail_str: "{{ '\n\n' }}\n\n" fix_str: "{{ '\n\n' }}\n" test_pass_templated_macro_newlines: # Tricky because the rendered code ends with two newlines: # - Literal newline inserted by the macro # - Literal newline at the end of the file # The slicing algorithm should treat the first newline as "templated" because # it was inserted when *expanding* the templated macro call. pass_str: | {% macro get_keyed_nulls(columns) %} {{ columns }} {% endmacro %} SELECT {{ get_keyed_nulls("other_id") }} test_fail_templated_no_newline: # Tricky because there's no newline at the end of the file (following the # templated code). fail_str: "{% if true %}\nSELECT 1 + 1\n{%- endif %}" fix_str: "{% if true %}\nSELECT 1 + 1\n{%- endif %}\n" sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/LT13.yml000066400000000000000000000033511503426445100235370ustar00rootroot00000000000000rule: LT13 test_pass_leading_whitespace_statement: pass_str: "SELECT foo FROM bar\n" test_pass_leading_whitespace_comment: pass_str: "/*I am a comment*/\nSELECT foo FROM bar\n" test_pass_leading_whitespace_inline_comment: pass_str: "--I am a comment\nSELECT foo FROM bar\n" test_pass_leading_whitespace_inline_comment_hash: pass_str: "# I am a comment\nSELECT foo FROM bar\n" configs: core: dialect: bigquery test_pass_leading_whitespace_jinja_comment: pass_str: "{# I am a comment #}\nSELECT foo FROM bar\n" test_pass_leading_whitespace_jinja_if: pass_str: "{% if True %}\nSELECT foo\nFROM bar;\n{% endif %}\n" test_pass_leading_whitespace_jinja_for: pass_str: "{% for item in range(10) %}\nSELECT foo_{{ item }}\nFROM bar;\n{% endfor %}\n" test_fail_leading_whitespace_statement: fail_str: "\n SELECT foo FROM bar\n" fix_str: "SELECT foo FROM bar\n" test_fail_leading_whitespace_comment: fail_str: "\n /*I am a comment*/\nSELECT foo FROM bar\n" fix_str: "/*I am a comment*/\nSELECT foo FROM bar\n" test_fail_leading_whitespace_inline_comment: fail_str: "\n --I am a comment\nSELECT foo FROM bar\n" fix_str: "--I am a comment\nSELECT foo FROM bar\n" test_fail_leading_whitespace_jinja_comment: fail_str: "\n {# I am a comment #}\nSELECT foo FROM bar\n" fix_str: "{# I am a comment #}\nSELECT foo FROM bar\n" test_fail_leading_whitespace_jinja_if: fail_str: "\n {% if True %}\nSELECT foo\nFROM bar;\n{% endif %}\n" fix_str: "{% if True %}\nSELECT foo\nFROM bar;\n{% endif %}\n" test_fail_leading_whitespace_jinja_for: fail_str: "\n {% for item in range(10) %}\nSELECT foo_{{ item }}\nFROM bar;\n{% endfor %}\n" fix_str: "{% for item in range(10) %}\nSELECT foo_{{ item }}\nFROM bar;\n{% endfor %}\n" sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/LT14.yml000066400000000000000000000266571503426445100235560ustar00rootroot00000000000000rule: LT14 test_pass_leading_from_clause: pass_str: | SELECT foo FROM bar configs: layout: type: from_clause: keyword_line_position: leading test_pass_alone_from_clause: pass_str: | SELECT foo FROM bar configs: layout: type: from_clause: keyword_line_position: alone test_fail_leading_from_clause: fail_str: | SELECT foo FROM bar fix_str: | SELECT foo FROM bar configs: layout: type: from_clause: keyword_line_position: leading test_fail_alone_from_clause: fail_str: | SELECT foo FROM bar fix_str: | SELECT foo FROM bar configs: layout: type: from_clause: keyword_line_position: alone test_pass_leading_join_clause: pass_str: | SELECT foo FROM bar a JOIN baz b ON a.id = b.id INNER JOIN qux c ON a.id = c.id LEFT OUTER JOIN quux d ON a.id = d.id configs: layout: type: join_clause: keyword_line_position: leading test_fail_leading_join_clause: fail_str: | SELECT foo FROM bar a JOIN baz b ON a.id = b.id INNER JOIN qux c ON a.id = c.id LEFT OUTER JOIN quux d ON a.id = d.id fix_str: | SELECT foo FROM bar a JOIN baz b ON a.id = b.id INNER JOIN qux c ON a.id = c.id LEFT OUTER JOIN quux d ON a.id = d.id configs: layout: type: join_clause: keyword_line_position: leading test_pass_alone_join_clause: pass_str: | SELECT foo FROM bar a JOIN baz b ON a.id = b.id configs: layout: type: join_clause: keyword_line_position: alone test_fail_alone_join_clause: fail_str: | SELECT foo FROM bar a JOIN baz b ON a.id = b.id INNER JOIN qux c ON a.id = c.id LEFT OUTER JOIN quux d ON a.id = d.id fix_str: | SELECT foo FROM bar a JOIN baz b ON a.id = b.id INNER JOIN qux c ON a.id = c.id LEFT OUTER JOIN quux d ON a.id = d.id configs: layout: type: join_clause: keyword_line_position: alone test_pass_leading_join_clause_on_condition: pass_str: | SELECT foo FROM bar a JOIN baz b ON a.id = b.id configs: layout: type: join_clause: keyword_line_position: leading join_on_condition: keyword_line_position: leading test_fail_leading_join_clause_on_condition: fail_str: | SELECT foo FROM bar a JOIN baz b ON a.id = b.id fix_str: | SELECT foo FROM bar a JOIN baz b ON a.id = b.id configs: layout: type: join_clause: keyword_line_position: leading join_on_condition: keyword_line_position: leading test_pass_alone_join_clause_on_condition: pass_str: | SELECT foo FROM bar a JOIN baz b ON a.id = b.id configs: layout: type: join_clause: keyword_line_position: alone join_on_condition: keyword_line_position: alone test_pass_leading_join_clause_trailing_on_condition: pass_str: | SELECT foo FROM bar a JOIN baz b ON a.id = b.id configs: layout: type: join_clause: keyword_line_position: leading join_on_condition: keyword_line_position: trailing test_fail_leading_join_clause_trailing_on_condition: fail_str: | SELECT foo FROM bar a JOIN baz b ON a.id = b.id fix_str: | SELECT foo FROM bar a JOIN baz b ON a.id = b.id configs: layout: type: join_clause: keyword_line_position: leading join_on_condition: keyword_line_position: trailing test_fail_alone_join_clause_on_condition: fail_str: | SELECT foo FROM bar a JOIN baz b ON a.id = b.id fix_str: | SELECT foo FROM bar a JOIN baz b ON a.id = b.id configs: layout: type: join_clause: keyword_line_position: alone join_on_condition: keyword_line_position: alone test_pass_leading_window_function: pass_str: | SELECT a, b, ROW_NUMBER() OVER ( PARTITION BY c ORDER BY d ) AS e FROM f JOIN g ON g.h = f.h configs: layout: type: partitionby_clause: keyword_line_position: leading orderby_clause: keyword_line_position: leading test_pass_leading_window_comment_function: pass_str: | SELECT a, b, ROW_NUMBER() OVER ( /* comment here */ PARTITION BY c ORDER BY d ) AS e FROM f JOIN g ON g.h = f.h configs: layout: type: partitionby_clause: keyword_line_position: leading orderby_clause: keyword_line_position: leading test_fail_leading_window_function: fail_str: | SELECT a, b, ROW_NUMBER() OVER (PARTITION BY c ORDER BY d) AS e FROM f JOIN g ON g.h = f.h fix_str: | SELECT a, b, ROW_NUMBER() OVER ( PARTITION BY c ORDER BY d) AS e FROM f JOIN g ON g.h = f.h configs: layout: type: partitionby_clause: keyword_line_position: leading orderby_clause: keyword_line_position: leading keyword_line_position_exclusions: None test_pass_alone_window_function: pass_str: | SELECT a, b, ROW_NUMBER() OVER ( PARTITION BY c ORDER BY d ) AS e FROM f JOIN g ON g.h = f.h configs: layout: type: partitionby_clause: keyword_line_position: alone orderby_clause: keyword_line_position: alone test_fail_alone_window_function: fail_str: | SELECT a, b, ROW_NUMBER() OVER (PARTITION BY c ORDER BY d) AS e FROM f JOIN g ON g.h = f.h fix_str: | SELECT a, b, ROW_NUMBER() OVER ( PARTITION BY c ORDER BY d) AS e FROM f JOIN g ON g.h = f.h configs: layout: type: partitionby_clause: keyword_line_position: alone orderby_clause: keyword_line_position: alone keyword_line_position_exclusions: None test_pass_leading_qualify: pass_str: | SELECT a, b FROM f JOIN g ON g.h = f.h QUALIFY ROW_NUMBER() OVER (PARTITION BY c ORDER BY d) = 1 configs: core: dialect: duckdb layout: type: qualify_clause: keyword_line_position: leading test_pass_alone_qualify: pass_str: | SELECT a, b FROM f JOIN g ON g.h = f.h QUALIFY ROW_NUMBER() OVER (PARTITION BY c ORDER BY d) = 1 configs: core: dialect: duckdb layout: type: qualify_clause: keyword_line_position: alone test_fail_leading_qualify: fail_str: | SELECT a, b FROM f JOIN g ON g.h = f.h QUALIFY ROW_NUMBER() OVER (PARTITION BY c ORDER BY d) = 1 fix_str: | SELECT a, b FROM f JOIN g ON g.h = f.h QUALIFY ROW_NUMBER() OVER (PARTITION BY c ORDER BY d) = 1 configs: core: dialect: duckdb layout: type: qualify_clause: keyword_line_position: leading orderby_clause: keyword_line_position_exclusions: None test_fail_alone_qualify: fail_str: | SELECT a, b FROM f JOIN g ON g.h = f.h QUALIFY ROW_NUMBER() OVER (PARTITION BY c ORDER BY d) = 1 fix_str: | SELECT a, b FROM f JOIN g ON g.h = f.h QUALIFY ROW_NUMBER() OVER (PARTITION BY c ORDER BY d) = 1 configs: core: dialect: duckdb layout: type: qualify_clause: keyword_line_position: alone test_fail_cte_selects_3419: fail_str: | WITH some_cte AS (SELECT column1, column2 FROM some_table ) SELECT column1, column2 FROM some_cte fix_str: | WITH some_cte AS ( SELECT column1, column2 FROM some_table ) SELECT column1, column2 FROM some_cte configs: layout: type: select_clause: keyword_line_position: alone test_fail_all_keyword_segment: # This is a bit strange, but handles for a case where the # segment is all keywords. fail_str: | CREATE TABLE t (c1 DOUBLE PRECISION NOT NULL) fix_str: | CREATE TABLE t (c1 DOUBLE PRECISION NOT NULL) configs: layout: type: data_type: keyword_line_position: alone test_pass_leading_non_indent_first_segment: # a window_specification doesn't have a leading keyword, # this rule should have no effect on it. For now. pass_str: | select a, sum(b) over (named_window partition by c) as sum_b from t window named_window as (partition by e) configs: layout: type: window_specification: keyword_line_position: leading test_pass_templated_leading_from_clause: pass_str: | {% macro join_type(expression) %} {{ expression }} {% endmacro %} SELECT a.foo, b.bar FROM baz a {{ join_type("LEFT") }} JOIN tab b ON a.col = b.col configs: layout: type: join_clause: keyword_line_position: alone test_pass_none_where_clause: pass_str: | SELECT a, b FROM tabx WHERE b = 2; configs: layout: type: where_clause: keyword_line_position: none test_pass_single_line_set: # A single line set expression should be possible without forcing a newline. pass_str: | SET increment_key = (SELECT MAX(COALESCE(date_key, '2021-01-01') - 1) AS increment_key FROM foo.bar); configs: core: dialect: snowflake test_pass_leading_orderby_except_window_function: pass_str: | SELECT a, b, ROW_NUMBER() OVER (PARTITION BY c ORDER BY d) AS e FROM f JOIN g ON g.h = f.h configs: layout: type: orderby_clause: keyword_line_position: leading keyword_line_position_exclusions: window_specification test_pass_leading_orderby_except_list_function: pass_str: | SELECT a, b, ROW_NUMBER() OVER (PARTITION BY c ORDER BY d) AS e, STRING_AGG(a ORDER BY b, c) FROM f JOIN g ON g.h = f.h configs: layout: type: orderby_clause: keyword_line_position: leading keyword_line_position_exclusions: window_specification, aggregate_order_by test_fail_leading_orderby_except_window_function_outer_orderby: fail_str: | SELECT a, b, ROW_NUMBER() OVER (PARTITION BY c ORDER BY d) AS e FROM f JOIN g ON g.h = f.h ORDER BY a fix_str: | SELECT a, b, ROW_NUMBER() OVER (PARTITION BY c ORDER BY d) AS e FROM f JOIN g ON g.h = f.h ORDER BY a configs: layout: type: orderby_clause: keyword_line_position: leading keyword_line_position_exclusions: window_specification test_fail_leading_orderby_except_list_outer_orderby: fail_str: | SELECT a, b, ROW_NUMBER() OVER (PARTITION BY c ORDER BY d) AS e, STRING_AGG(a ORDER BY b, c) FROM f JOIN g ON g.h = f.h ORDER BY a fix_str: | SELECT a, b, ROW_NUMBER() OVER (PARTITION BY c ORDER BY d) AS e, STRING_AGG(a ORDER BY b, c) FROM f JOIN g ON g.h = f.h ORDER BY a configs: layout: type: orderby_clause: keyword_line_position: leading keyword_line_position_exclusions: window_specification, aggregate_order_by sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/LT15.yml000066400000000000000000000031141503426445100235360ustar00rootroot00000000000000rule: LT15 test_pass_no_empty_lines: pass_str: | SELECT foo FROM bar test_pass_one_empty_line: pass_str: | SELECT foo FROM bar configs: rules: layout.newlines: maximum_empty_lines_inside_statements: 1 test_fail_no_empty_lines: fail_str: | SELECT foo FROM bar fix_str: | SELECT foo FROM bar configs: rules: layout.newlines: maximum_empty_lines_inside_statements: 0 test_fail_one_empty_line_between_statements: fail_str: | SELECT foo FROM bar ; SELECT foo ; fix_str: | SELECT foo FROM bar ; SELECT foo ; configs: rules: layout.newlines: maximum_empty_lines_between_statements: 1 test_fail_bad_edge_case: # This test case is a little controversial. Currently the gap before and # after the semicolon is considered *between* statements. While this is # on the lenient side, and also allows people to operate with "leading semicolons" # if they want - it's very possible that some users will want this to be # further tightened. Recommendation for the future is to add an additional # config to control restrict whether the gap a) before b) after or c) both # before and after should be considered "between" statements. fail_str: | SELECT foo FROM bar ; SELECT foo ; fix_str: | SELECT foo FROM bar ; SELECT foo ; configs: rules: layout.newlines: maximum_empty_lines_between_statements: 1 maximum_empty_lines_within_statements: 0 sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/README.md000066400000000000000000000026461503426445100236160ustar00rootroot00000000000000# Rule Tests All the individual rule unit tests are defined in yml files. Note that all the enumerated test names (test_1 etc.) were copied over from a previous format with an automated script. All new tests should be named descriptively with comments for more context if needed. ## Making a test case ### Writing a test for a query that should pass linting A test for a passing query is declared like this: ``` descriptive_test_name: pass_str: select * from x ``` ### Writing a test for a query that should fail linting A test for a failing query is declared like this: ``` descriptive_test_name: fail_str: select * FROM x ``` and can optionally include a test for the fixed query that the rule returns: ``` descriptive_test_name: fail_str: select * FROM x fix_str: select * from x ``` ### Rule Configuration If your test needs additional rule configuration, this can be supplied through a `configs` key, such as: ``` test_keyword_as_identifier: fail_str: SELECT parameter configs: rules: references.keywords: only_aliases: false ``` ## Yaml Syntax Using yaml make it really easy to flexibly create test cases. You can create single line test cases with explicit newlines and tabs using `\n` and `\t` when it makes sense, or create multi-line test cases which are much easier to read for longer queries. A good reference on multiline yaml syntax can be found [here](https://yaml-multiline.info/). sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/RF01.yml000066400000000000000000000266571503426445100235420ustar00rootroot00000000000000rule: RF01 test_pass_object_referenced_1: # References in quotes in bigquery pass_str: SELECT bar.user_id FROM `foo.far.bar` configs: core: dialect: bigquery rules: references.from: force_enable: true test_fail_object_not_referenced_1: desc: Name foo is not referenced in FROM clause. It would need to be at the end of identifier in ticks or an alias. fail_str: SELECT foo.user_id FROM `foo.far.bar` configs: core: dialect: bigquery rules: references.from: force_enable: true test_fail_object_not_referenced_2: # References in WHERE clause fail_str: SELECT * FROM my_tbl WHERE foo.bar > 0 test_pass_object_referenced_2: pass_str: | SELECT * FROM db.sc.tbl2 WHERE a NOT IN (SELECT a FROM db.sc.tbl1) test_pass_object_referenced_3: pass_str: | SELECT * FROM db.sc.tbl2 WHERE a NOT IN (SELECT tbl2.a FROM db.sc.tbl1) test_pass_object_referenced_4a: # Test ambiguous column reference caused by use of BigQuery structure fields. # Here, 'et2' could either be a schema name or a table name. # https://github.com/sqlfluff/sqlfluff/issues/1079 pass_str: | SELECT et2.txn.amount FROM `example_dataset2.example_table2` AS et2 configs: core: dialect: bigquery rules: references.from: force_enable: true test_pass_object_referenced_4b: # DuckDB allows dot-access to its MAP objects, which requires special handling # to ensure `ex.x` is not interpreted as `{table}.{field}` instead of # `{schema}.{table}`. The following returns `'there'` if executed. pass_str: | SELECT ex.x.hi FROM (SELECT { 'hi': 'there' } AS x) AS ex configs: core: dialect: duckdb test_pass_object_referenced_5a: # Test ambiguous column reference caused by use of BigQuery structure fields. # Here, column,field should not trigger the rule as by default this rule is # disabled for bigquery # https://github.com/sqlfluff/sqlfluff/issues/1503 pass_str: | SELECT col1.field, col FROM `example_dataset2.example_table2` configs: core: dialect: bigquery test_pass_object_referenced_5b: # Same test as above but default (ANSI) should trigger fail_str: | SELECT col1.field FROM table1 test_pass_object_referenced_5c: # Same test as above but for BigQuery but force is # enabled so should fail fail_str: | SELECT col1.field FROM `example_dataset2.example_table2` configs: core: dialect: bigquery rules: references.from: force_enable: true test_pass_object_referenced_5d: # Test for extra dialect (hive) compatibility pass_str: SELECT col1.field, col2 FROM example_table configs: core: dialect: hive test_pass_object_referenced_5e: # Test for extra dialect (redshift) compatibility pass_str: SELECT col1.field, col2 FROM example_table configs: core: dialect: redshift test_pass_object_referenced_6: # Test references in subqueries (see issue #1939) pass_str: | select cc.c1 from ( select table1.c1 from table1 inner join table2 on table1.x_id = table2.x_id inner join table3 on table2.y_id = table3.y_id ) as cc test_pass_object_referenced_7: pass_str: | UPDATE my_table SET row_sum = ( SELECT COUNT(*) AS row_sum FROM another_table WHERE another_table.id = my_table.id ) test_fail_object_referenced_7: fail_str: | UPDATE my_table SET row_sum = ( SELECT COUNT(*) AS row_sum FROM another_table WHERE another_table.id = my_tableeee.id ) test_pass_object_referenced_8: pass_str: | DELETE FROM agent1 WHERE EXISTS( SELECT customer.cust_id FROM customer WHERE agent1.agent_code <> customer.agent_code); test_pass_two_part_reference_8: pass_str: | delete from public.agent1 where exists( select customer.cust_id from customer where agent1.agent_code <> customer.agent_code) test_pass_two_part_reference_9: pass_str: | delete from public.agent1 where exists( select customer.cust_id from customer where public.agent1.agent_code <> customer.agent_code) test_fail_two_part_reference_10: fail_str: | select * from schema1.agent1 where schema2.agent1.agent_code <> 'abc' test_fail_two_part_reference_11: fail_str: | delete from schema1.agent1 where exists( select customer.cust_id from customer where schema2.agent1.agent_code <> customer.agent_code) test_pass_two_part_reference_11: pass_str: | select * from agent1 where public.agent1.agent_code <> '3' test_pass_bigquery_dash: # References in quotes in bigquery pass_str: SELECT bar.user_id FROM foo-far.bar configs: core: dialect: bigquery rules: references.from: force_enable: true test_pass_exasol_select_into: pass_str: | select * into table schm.tab from schm.tab2 configs: core: dialect: exasol test_pass_simple_delete: pass_str: | delete from table1 where 1 = 1 test_exasol_invalid_foreign_key_from: pass_str: | SELECT * WITH INVALID FOREIGN KEY (nr) FROM T1 REFERENCING T2 (id) configs: core: dialect: exasol test_tsql_select_system_as_identifier: pass_str: | SELECT @@IDENTITY AS 'Identity' configs: core: dialect: tsql test_mysql_select_no_from_should_not_except: pass_str: | SELECT DATE_SUB('1992-12-31 23:59:59.000002', INTERVAL '1.999999' SECOND_MICROSECOND); configs: core: dialect: mysql test_nested_join_clause_does_not_flag: pass_str: | SELECT 1 AS RegionCode FROM BA LEFT OUTER JOIN I LEFT OUTER JOIN P ON I.Pcd = P.Iid ON BA.Iid = I.Bcd configs: core: dialect: tsql test_nested_join_clauses_do_not_flag: pass_str: | SELECT 1 AS RegionCode FROM BA LEFT OUTER JOIN I LEFT OUTER JOIN P ON I.Pcd = P.Iid LEFT OUTER JOIN C ON I.Pcd = C.Iid ON BA.Iid = I.Bcd configs: core: dialect: tsql test_parenthesized_join_clauses_do_not_flag: pass_str: | SELECT 1 AS RegionCode FROM BA LEFT OUTER JOIN ( I LEFT OUTER JOIN P ON I.Pcd = P.Iid ) ON BA.Iid = I.Bcd configs: core: dialect: tsql test_soql_ignore_rule: pass_str: | SELECT Account.Name FROM Contact configs: core: dialect: soql test_postgres_value_table_alias: pass_str: | select sc.col1 as colx , pn.col1 as coly from sch1.tbl1 as sc cross join unnest(array[111, 222]) as pn(col1) configs: core: dialect: postgres test_pass_update_with_alias: pass_str: | UPDATE tbl AS dest SET t.title = 'TEST' WHERE t.id = 101 AND EXISTS ( SELECT 1 FROM foobar AS tmp WHERE tmp.idx = dest.idx) test_pass_postgres_merge_with_alias: pass_str: | merge dw.sch.tbl dest using land.sch.tbl src on src.idx = dest.idx and src.name = dest.name and src.idy = dest.idy when not matched by source and exists ( select 1 as tmp from land.sch.tag as ld where ld.idx = dest.idx and ld.name = dest.name ) then update set dest.ac = 'N' configs: core: dialect: tsql test_pass_tsql_nested_join_alias: pass_str: | SELECT tst1.Name, tst2.OtherName FROM dbo.Test1 AS tst1 LEFT OUTER JOIN (dbo.Test2 AS tst2 INNER JOIN dbo.FilterTable AS fltr1 ON tst2.Id = fltr1.Id) ON tst1.id = tst2.id configs: core: dialect: tsql test_pass_trino_lambda_expression: pass_str: | SELECT a_column, TRANSFORM(array_col, x -> x.example) AS array_col_example FROM example_table; configs: core: dialect: trino test_athena_ignore_rule_if_single_table: pass_str: | select a.complex.key, a.complex.structure.val from sch.t1 configs: core: dialect: athena test_athena_apply_rule_if_single_table_and_force_enable: fail_str: | select a.complex.key, a.complex.structure.val from sch.t1 configs: core: dialect: athena rules: references.from: force_enable: true test_athena_apply_rule_if_multiple_tables: fail_str: | select a.complex.key, a.complex.structure.val from sch.table1 as t1 left join sch.table2 as t2 on t1.id = t2.id configs: core: dialect: athena test_athena_apply_rule_if_one_aliased_table: fail_str: | select a.complex.key, a.complex.structure.val from sch.table1 as t1 configs: core: dialect: athena test_sqlite_create_trigger_after_insert_reference: # https://github.com/sqlfluff/sqlfluff/issues/6402 # https://www.sqlite.org/lang_createtrigger.html pass_str: | CREATE TRIGGER x AFTER INSERT ON y BEGIN SELECT 1 WHERE new.xyz = 3; END configs: core: dialect: sqlite test_sqlite_create_trigger_after_delete_reference: # https://github.com/sqlfluff/sqlfluff/issues/6402 # https://www.sqlite.org/lang_createtrigger.html pass_str: | CREATE TRIGGER x AFTER DELETE ON y BEGIN SELECT 1 WHERE old.xyz = 3; END configs: core: dialect: sqlite test_sqlite_create_trigger_after_update_reference: # https://github.com/sqlfluff/sqlfluff/issues/6402 # https://www.sqlite.org/lang_createtrigger.html pass_str: | CREATE TRIGGER x AFTER UPDATE ON y BEGIN SELECT 1 WHERE old.xyz != new.xyz; END; configs: core: dialect: sqlite test_sqlite_outside_of_trigger: # https://github.com/sqlfluff/sqlfluff/issues/6402 # https://www.sqlite.org/lang_createtrigger.html fail_str: | SELECT foo FROM bar WHERE old.xyz != new.xyz; configs: core: dialect: sqlite test_mysql_identifier_with_backticks_should_not_except: pass_str: | SELECT `f`.`bar` FROM `foo` AS `f`; configs: core: dialect: mysql test_snowflake_pass_dot_inside_quotes: pass_str: | WITH CTE_DUMMY AS ( SELECT 1 AS "DUMMY.ALIAS" , 2 AS "DUMMY_ALIAS" FROM DUAL ) SELECT C1."DUMMY.ALIAS" , C1."DUMMY_ALIAS" FROM CTE_DUMMY AS C1 configs: core: dialect: snowflake test_postgres_pass_dot_inside_quotes: pass_str: | SELECT "a.b" FROM table configs: core: dialect: postgres test_tsql_bracketed_identifiers_pass: pass_str: | select [t1].[field1], [t2].[field2], t1.field3, "t1".field4, [t3].field5, "t3".field6, t3.field7 from [table1] as [t1] inner join [table2] as [t2] on [t2].[pk] = [t1].[pk] inner join [table3] as t3 on t3.pk = t1.pk configs: core: dialect: tsql test_bigquery_unnest_with_offset_pass: pass_str: | select ix, v from t left join unnest(t.value_list) as v with offset as ix configs: core: dialect: bigquery test_pass_tsql_pivot_table_alias: pass_str: | select pvt.cl1 , pvt.cl2 , pvt.cl3 , [1] as lvl_1 , [2] as lvl_2 , [3] as lvl_3 from levels as lvl pivot (max(value) for rn in([1], [2], [3]) ) as pvt configs: core: dialect: tsql sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/RF02.yml000066400000000000000000000330721503426445100235300ustar00rootroot00000000000000rule: RF02 test_pass_qualified_references_multi_table_statements: pass_str: | SELECT foo.a, vee.b FROM foo LEFT JOIN vee ON vee.a = foo.a test_fail_unqualified_references_multi_table_statements: fail_str: | SELECT a, b FROM foo LEFT JOIN vee ON vee.a = foo.a test_pass_qualified_references_multi_table_statements_subquery: pass_str: | SELECT a FROM ( SELECT foo.a, vee.b FROM foo LEFT JOIN vee ON vee.a = foo.a ) test_fail_unqualified_references_multi_table_statements_subquery: fail_str: | SELECT a FROM ( SELECT a, b FROM foo LEFT JOIN vee ON vee.a = foo.a ) test_pass_qualified_references_multi_table_statements_subquery_mix: pass_str: | SELECT foo.a, vee.b FROM ( SELECT c FROM bar ) AS foo LEFT JOIN vee ON vee.a = foo.a test_allow_date_parts_as_function_parameter_bigquery: # Allow use of BigQuery date parts (which are not quoted and were previously # mistaken for column references and flagged by this rule). pass_str: | SELECT timestamp_trunc(a.ts, month) AS t FROM a JOIN b ON a.id = b.id configs: core: dialect: bigquery test_allow_date_parts_as_function_parameter_snowflake: # Allow use of Snowflake date parts (which are not quoted and were previously # mistaken for column references and flagged by this rule). pass_str: | SELECT datediff(year, a.column1, b.column2) FROM a JOIN b ON a.id = b.id configs: core: dialect: snowflake test_ignore_value_table_functions_when_counting_tables: # Allow use of unnested value tables from bigquery without counting as a # table reference. This test passes despite unqualified reference # because we "only select from one table" pass_str: | select unqualified_reference_from_table_a, _t_start from a left join unnest(generate_timestamp_array( '2020-01-01', '2020-01-30', interval 1 day)) as _t_start on true configs: core: dialect: bigquery test_ignore_value_table_functions_when_counting_unqualified_aliases: # Allow use of unnested value tables from bigquery without qualification. # The function `unnest` returns a table which is only one unnamed column. # This is impossible to qualify further, and as such the rule allows it. pass_str: | select a.*, b.*, _t_start from a left join b on true left join unnest(generate_timestamp_array( '2020-01-01', '2020-01-30', interval 1 day)) as _t_start on true configs: core: dialect: bigquery test_allow_unqualified_references_in_sparksql_lambdas: pass_str: | SELECT transform(array(1, 2, 3), x -> x + 1); configs: core: dialect: sparksql test_pass_databricks_lambdas: pass_str: | select i.*, aggregate(i.some_column, 0, (acc, x) -> acc + x) as y from some_table as o inner join some_other_table as i on o.id = i.id; configs: core: dialect: databricks test_allow_unqualified_references_in_snowflake_lambdas: pass_str: | select t.v, o.v, transform(t.arr1, x int -> x + 1) as f1, filter(t.arr2, y -> y:value > 0) as f2, reduce(o.arr, 0, (acc, val) -> acc + val) as f3 from some_table as t inner join some_other_table as o on t.id = o.id; configs: core: dialect: snowflake test_allow_unqualified_references_in_athena_lambdas: pass_str: | select t1.id, filter(array[t1.col1, t1.col2, t2.col3], x -> x is not null) as flt from t1 inner join t2 on t1.id = t2.id configs: core: dialect: athena test_allow_unqualified_references_in_athena_lambdas_with_several_arguments: pass_str: | select t1.id, filter(array[(t1.col1, t1.col2)], (x, y) -> x + y) as flt from t1 inner join t2 on t1.id = t2.id configs: core: dialect: athena test_disallow_unqualified_references_in_malformed_lambdas: fail_str: | select t1.id, filter(array[(t1.col1, t1.col2)], (x, y), z -> x + y) as flt from t1 inner join t2 on t1.id = t2.id configs: core: dialect: athena test_fail_column_and_alias_same_name: # See issue #2169 fail_str: | SELECT foo AS foo, bar AS bar FROM a LEFT JOIN b ON a.id = b.id test_pass_column_and_alias_same_name_1: pass_str: | SELECT a.foo AS foo, b.bar AS bar FROM a LEFT JOIN b ON a.id = b.id test_pass_column_and_alias_same_name_2: # Possible for unqualified columns if # it is actually an alias of another column. pass_str: | SELECT a.bar AS baz, baz FROM a LEFT JOIN b ON a.id = b.id test_pass_qualified_references_multi_table_statements_mysql: pass_str: | SELECT foo.a, vee.b FROM foo LEFT JOIN vee ON vee.a = foo.a configs: core: dialect: mysql test_fail_unqualified_references_multi_table_statements_mysql: fail_str: | SELECT a, b FROM foo LEFT JOIN vee ON vee.a = foo.a configs: core: dialect: mysql test_fail_column_and_alias_same_name_mysql: # See issue #2169 fail_str: | SELECT foo AS foo, bar AS bar FROM a LEFT JOIN b ON a.id = b.id configs: core: dialect: mysql test_pass_column_and_alias_same_name_1_mysql: pass_str: | SELECT a.foo AS foo, b.bar AS bar FROM a LEFT JOIN b ON a.id = b.id configs: core: dialect: mysql test_pass_column_and_alias_same_name_2_mysql: # Possible for unqualified columns if # it is actually an alias of another column. pass_str: | SELECT a.bar AS baz, baz FROM a LEFT JOIN b ON a.id = b.id configs: core: dialect: mysql test_pass_variable_reference_in_where_clause_mysql: pass_str: | SET @someVar = 1; SELECT Table1.Col1, Table2.Col2 FROM Table1 LEFT JOIN Table2 ON Table1.Join1 = Table2.Join1 WHERE Table1.FilterCol = @someVar; configs: core: dialect: mysql test_pass_qualified_references_multi_table_statements_tsql: pass_str: | SELECT foo.a, vee.b FROM foo LEFT JOIN vee ON vee.a = foo.a configs: core: dialect: tsql test_fail_unqualified_references_multi_table_statements_tsql: fail_str: | SELECT a, b FROM foo LEFT JOIN vee ON vee.a = foo.a configs: core: dialect: tsql test_fail_column_and_alias_same_name_tsql: # See issue #2169 fail_str: | SELECT foo AS foo, bar AS bar FROM a LEFT JOIN b ON a.id = b.id configs: core: dialect: tsql test_pass_column_and_alias_same_name_1_tsql: pass_str: | SELECT a.foo AS foo, b.bar AS bar FROM a LEFT JOIN b ON a.id = b.id configs: core: dialect: tsql test_pass_column_and_alias_same_name_2_tsql: # Possible for unqualified columns if # it is actually an alias of another column. pass_str: | SELECT a.bar AS baz, baz FROM a LEFT JOIN b ON a.id = b.id configs: core: dialect: tsql test_pass_rowtype_with_join: # Check we don't wrongly interpret rowtype attributes # as field alias when more than one tables in join pass_str: | select cast(row(t1.attr, t2.attr) as row(fld1 double, fld2 double)) as flds from sch.tab1 as t1 join sch.tab2 as t2 on t2.id = t1.id configs: core: dialect: hive test_fail_table_plus_flatten_snowflake_1: # FLATTEN() returns a table, thus there are two tables, thus lint failure. fail_str: | SELECT r.rec:foo::string AS foo, value:bar::string AS bar FROM foo.bar AS r, LATERAL FLATTEN(input => r.rec:result) AS x configs: core: dialect: snowflake test_fail_table_plus_flatten_snowflake_2: # FLATTEN() returns a table, thus there are two tables, thus lint failure, # even though there's no alias provided for FLATTEN(). fail_str: | SELECT r.rec:foo::string AS foo, value:bar::string AS bar FROM foo.bar AS r, LATERAL FLATTEN(input => r.rec:result) configs: core: dialect: snowflake test_pass_table_plus_flatten_snowflake_1: # FLATTEN() returns a table, thus there are two tables. This one passes, # unlike the above, because both aliases are used. pass_str: | SELECT r.rec:foo::string AS foo, x.value:bar::string AS bar FROM foo.bar AS r, LATERAL FLATTEN(input => r.rec:result) AS x configs: core: dialect: snowflake test_pass_ignore_words_column_name: pass_str: | SELECT test1, test2 FROM t_table1 LEFT JOIN t_table_2 ON TRUE configs: rules: references.qualification: ignore_words: test1,test2 test_pass_ignore_words_regex_column_name: pass_str: | SELECT _test1, _test2 FROM t_table1 LEFT JOIN t_table_2 ON TRUE configs: rules: references.qualification: ignore_words_regex: ^_ test_pass_ignore_words_regex_bigquery_declare_example: pass_str: DECLARE _test INT64 DEFAULT 42; SELECT _test FROM t_table1 LEFT JOIN t_table_2 ON TRUE configs: core: dialect: bigquery rules: references.qualification: ignore_words_regex: ^_ test_pass_redshift: # This was failing in issue 3380. pass_str: SELECT account.id FROM salesforce_sd.account INNER JOIN salesforce_sd."user" ON salesforce_sd."user".id = account.ownerid configs: core: dialect: redshift test_pass_tsql: # This was failing in issue 3342. pass_str: select psc.col1 from tbl1 as psc where exists ( select 1 as data from tbl2 as pr join tbl2 as c on c.cid = pr.cid where c.col1 = 'x' and pr.col2 <= convert(date, getdate()) and pr.pid = psc.pid ) configs: core: dialect: tsql test_pass_ansi: # This was failing in issue 3055. pass_str: | SELECT my_col FROM my_table WHERE EXISTS ( SELECT 1 FROM other_table INNER JOIN mapping_table ON (mapping_table.other_fk = other_table.id_pk) WHERE mapping_table.kind = my_table.kind ) test_pass_redshift_convert: # This was failing in issue 3651. pass_str: | SELECT sellers.name, CONVERT(integer, sales.pricepaid) AS price FROM sales LEFT JOIN sellers ON sales.sellerid = sellers.sellerid WHERE sales.salesid = 100 configs: core: dialect: redshift test_fail_unreferenced_subquery_column: # Issue 6067 fail_str: | SELECT a FROM foo WHERE a IN (SELECT a FROM bar) test_pass_referenced_subquery_column: # Issue 6067 pass_str: | SELECT a FROM foo WHERE a IN (SELECT bar.a FROM bar) test_pass_referenced_subquery_is_self: pass_str: | SELECT * FROM (SELECT a FROM table) test_pass_declared_bigquery_variable: pass_str: | DECLARE run_time TIMESTAMP DEFAULT '2020-01-01 00:00:00'; SELECT table_a.age FROM table_a INNER JOIN table_b ON table_a.id = table_b.id WHERE table_a.start_date <= run_time; configs: core: dialect: bigquery test_pass_from_clause_subquery: pass_str: | SELECT a.id AS a_id, b.id AS b_id FROM ( SELECT id FROM foo ) AS a INNER JOIN bar AS b ON a.id = b.ib; test_pass_join_clause_subquery: pass_str: | SELECT a.id AS a_id, b.id AS b_id FROM bar AS b JOIN ( SELECT id FROM foo ) AS a ON a.id = b.id; test_fail_nested_correlated_subquery_inside_from_clause: fail_str: | SELECT a.id AS a_id, b.id AS b_id FROM ( SELECT id FROM foo WHERE id IN (SELECT id FROM baz) ) AS a INNER JOIN bar AS b ON a.id = b.id; test_fail_select_scalar_subquery: fail_str: | SELECT (SELECT max(id) FROM foo2) AS f1 FROM bar; test_fail_exists_subquery: fail_str: | SELECT id FROM bar WHERE EXISTS ( SELECT 1 FROM foo2 WHERE bar.id = id ); test_pass_ignore_deeper_alias_6389: pass_str: | SELECT (SELECT MAX(x.col) AS m FROM x) AS _stats FROM stats_today AS t LEFT JOIN stats_this_month AS tm ON tm.x = t.x WHERE _stats IS NOT NULL; test_fail_unqual_refs_multi_table_statements_ignore_external_references: fail_str: | SELECT a, b FROM foo LEFT JOIN vee ON vee.a = foo.a configs: rules: references.qualification: subqueries_ignore_external_references: True test_fail_unqual_refs_multi_table_statements_subq_ignore_external_references: fail_str: | SELECT a FROM ( SELECT a, b FROM foo LEFT JOIN vee ON vee.a = foo.a ) configs: rules: references.qualification: subqueries_ignore_external_references: True test_pass_unreferenced_subquery_column_subqueries_ignore_external_references: pass_str: | SELECT a FROM foo WHERE a IN (SELECT a FROM bar) configs: rules: references.qualification: subqueries_ignore_external_references: True test_pass_select_scalar_subquery_subqueries_ignore_external_references: pass_str: | SELECT (SELECT max(id) FROM foo2) AS f1 FROM bar; configs: rules: references.qualification: subqueries_ignore_external_references: True test_pass_exists_subquery_subqueries_ignore_external_references: pass_str: | SELECT id FROM bar WHERE EXISTS ( SELECT 1 FROM foo2 WHERE bar.id = id ); configs: rules: references.qualification: subqueries_ignore_external_references: True sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/RF03.yml000066400000000000000000000277331503426445100235400ustar00rootroot00000000000000rule: RF03 # Mixed qualification of references. test_fail_single_table_mixed_qualification_of_references: fail_str: SELECT my_tbl.bar, baz FROM my_tbl fix_str: SELECT my_tbl.bar, my_tbl.baz FROM my_tbl test_pass_single_table_consistent_references_1: pass_str: SELECT bar FROM my_tbl test_pass_single_table_consistent_references_2: pass_str: SELECT my_tbl.bar FROM my_tbl test_pass_on_tableless_table: # tests particular code branch for coverage pass_str: SELECT (SELECT MAX(bar) FROM tbl) + 1 AS col test_fail_single_table_mixed_qualification_of_references_subquery: # NOTE: Even though there's a subquery here, we can still fix it # because there is no ambiguity about which table we're referencing. fail_str: SELECT * FROM (SELECT my_tbl.bar, baz FROM my_tbl) fix_str: SELECT * FROM (SELECT my_tbl.bar, my_tbl.baz FROM my_tbl) test_pass_lateral_table_ref: pass_str: | SELECT tbl.a, tbl.b, tbl.a + tbl.b AS col_created_right_here, col_created_right_here + 1 AS sub_self_ref FROM tbl test_pass_single_table_consistent_references_1_subquery: pass_str: SELECT * FROM (SELECT bar FROM my_tbl) test_pass_single_table_consistent_references_2_subquery: pass_str: SELECT * FROM (SELECT my_tbl.bar FROM my_tbl) test_fail_single_table_reference_when_unqualified_config: fail_str: SELECT my_tbl.bar FROM my_tbl fix_str: SELECT bar FROM my_tbl configs: rules: references.consistent: single_table_references: unqualified test_fail_single_table_reference_when_qualified_config: fail_str: SELECT bar FROM my_tbl WHERE foo fix_str: SELECT my_tbl.bar FROM my_tbl WHERE my_tbl.foo configs: rules: references.consistent: single_table_references: qualified test_pass_single_table_reference_in_subquery: # Catch issues with subqueries properly pass_str: | SELECT * FROM db.sc.tbl2 WHERE a NOT IN (SELECT a FROM db.sc.tbl1) test_value_table_functions_do_not_require_qualification: pass_str: | select a.*, _t_start from a left join unnest(generate_timestamp_array( '2020-01-01', '2020-01-30', interval 1 day)) as _t_start on true configs: core: dialect: bigquery rules: references.consistent: force_enable: true test_object_references_1a: # This should fail as "a" is an unreferenced object # We don't try to be smart. fail_str: SELECT a.bar, b FROM my_tbl fix_str: SELECT a.bar, my_tbl.b FROM my_tbl test_object_references_1b: # This should not-fail as "a" is potentially a STRUCT pass_str: SELECT a.bar, b FROM my_tbl configs: core: dialect: bigquery test_object_references_1c: # This should fail as even though "a" is potenitally a STRUCT # The config has been set to enforce the rule fail_str: SELECT a.bar, b FROM my_tbl AS c fix_str: SELECT c.a.bar, c.b FROM my_tbl AS c configs: core: dialect: bigquery rules: references.consistent: force_enable: true single_table_references: qualified test_object_references_1d: # "a" is not a named table and therefore is a STRUCT pass_str: SELECT a.bar, b FROM my_tbl configs: core: dialect: bigquery rules: references.consistent: force_enable: true test_object_references_1e: pass_str: SELECT my_tbl.a.bar, my_tbl.b FROM my_tbl configs: core: dialect: bigquery rules: references.consistent: force_enable: true test_object_references_struct_inconsistent_fix_a: fail_str: SELECT a.bar, my_tbl.b FROM my_tbl fix_str: SELECT my_tbl.a.bar, my_tbl.b FROM my_tbl configs: core: dialect: bigquery rules: references.consistent: force_enable: true test_object_references_1f: # This should not-fail as "a" is potentially a STRUCT pass_str: SELECT a.bar, b FROM my_tbl configs: core: dialect: hive test_object_references_1g: # This should not-fail as "a" is potentially a STRUCT pass_str: SELECT a.bar, b FROM my_tbl configs: core: dialect: redshift test_tsql_pivot_are_excluded: # This should pass as tsql PIVOT columns do not need to be # qualified pass_str: | select t1._id , [1] as lvl_1 , [2] as lvl_2 , [3] as lvl_3 from table1 t1 pivot (max(value) for rn in([1], [2], [3]) ) as pvt configs: core: dialect: tsql test_date_functions_are_excluded: # This should pass as date keywords columns do not need to be # qualified pass_str: | SELECT a.[hello], DATEDIFF(day, a.[mydate], GETDATE()) AS [test] FROM mytable AS a configs: core: dialect: tsql test_select_alias_in_where_clause_1: # This should pass for certain dialects pass_str: | select t.col0, t.col1 + 1 as alias_col1 from table1 as t where alias_col1 > 5 configs: core: dialect: redshift test_select_alias_in_where_clause_2: # This should pass for certain dialects pass_str: | select t.col0, t.col1 + 1 as alias_col1 from table1 as t where alias_col1 > 5 configs: core: dialect: snowflake test_pass_group_by_alias: pass_str: | select t.col1 + 1 as alias_col1, count(1) from table1 as t group by alias_col1 test_pass_order_by_alias: pass_str: | select t.col0, t.col1 + 1 as alias_col1 from table1 as t order by alias_col1 test_pass_having: pass_str: | select t.col0, t.col1 + 1 as alias_col1 from table1 as t having alias_col1 > 5 test_fail_select_alias_in_where_clause_5: # This should fail for ansi (and be fixable) fail_str: | select t.col0, t.col1 + 1 as alias_col1 from table1 as t where alias_col1 > 5 fix_str: | select col0, col1 + 1 as alias_col1 from table1 as t where alias_col1 > 5 configs: rules: references.consistent: single_table_references: unqualified test_pass_tsql_parameter: # This should pass for certain dialects pass_str: | DECLARE @id_date int; SET @id_date = 20211108; SELECT sometable.some_column FROM sometable WHERE 1 = 1 AND sometable.column_with_date = @id_date configs: core: dialect: tsql test_pass_tsql_pivot: # This should pass for certain dialects pass_str: | SELECT 1 FROM ( SELECT DaysToManufacture, StandardCost FROM Production.Product ) AS SourceTable PIVOT ( AVG(StandardCost) FOR DaysToManufacture IN ([0], [1], [2], [3], [4]) ) AS PivotTable; configs: core: dialect: tsql test_unfixable_ambiguous_reference_subquery: # `field_2` could be from the outer query or the inner # query (i.e. from `other_table` or `my_alias`) and because # it's ambiguous we shouldn't provide a fix. fail_str: | SELECT ( SELECT other_table.other_table_field_1 FROM other_table WHERE other_table.id = field_2 ) FROM (SELECT * FROM some_table) AS my_alias test_pass_snowflake_flatten_function: # Tests a fix for issue 3178. This query passes because the second column # could refer to either 'r' or the table returned by FLATTEN(). pass_str: | SELECT r.rec:foo::string, value:bar::string FROM foo.bar AS r, LATERAL FLATTEN(input => rec:result) configs: core: dialect: snowflake passes_tql_table_variable: # Issue 3243 pass_str: select a, b from @tablevar configs: core: dialect: tsql rules: references.consistent: single_table_references: qualified fail_but_dont_fix_templated_table_name_consistent: fail_str: | SELECT a, {{ "foo" }}.b FROM {{ "foo" }} fail_but_dont_fix_templated_table_name_qualified: fail_str: | SELECT a, {{ "foo" }}.b FROM {{ "foo" }} configs: rules: references.consistent: single_table_references: qualified pass_tsql_subselect_merge_statement: pass_str: | INSERT INTO [DIL].[md_GLAccount_HIST] ( [ChartOfAccounts_KEY] , [GLAccount_KEY] , [ValidFrom] , [ValidTo] , [GLAccount_DESC_short] ) SELECT MRG.[ChartOfAccounts_KEY] , MRG.[GLAccount_KEY] , MRG.[ValidFrom] , MRG.[ValidTo] , MRG.[GLAccount_DESC_short] FROM ( MERGE [DIL].[md_GLAccount_HIST] WITH (TABLOCK) t USING [INTERNAL_MERGE].[DIL_md_GLAccount_HIST] s ON ( t.[ChartOfAccounts_KEY] = s.[ChartOfAccounts_KEY] AND t.[GLAccount_KEY] = s.[GLAccount_KEY] ) WHEN NOT MATCHED BY TARGET THEN INSERT ( [ChartOfAccounts_KEY] , [GLAccount_KEY] , [ValidFrom] , [ValidTo] , [GLAccount_DESC_short] ) VALUES ( s.[ChartOfAccounts_KEY] , s.[GLAccount_KEY] , '19900101' , '99991231' , s.[GLAccount_DESC_short] ) OUTPUT $ACTION AS [Action_OUT] , s.[ChartOfAccounts_KEY] , s.[GLAccount_KEY] , '19900101' , '99991231' , s.[GLAccount_DESC_short] ) MRG WHERE MRG.Action_OUT = 'UPDATE' AND MRG.[ChartOfAccounts_KEY] IS NOT NULL AND MRG.[GLAccount_KEY] IS NOT NULL; configs: core: dialect: tsql test_pass_subselect_unqualified_one_reference_3987: pass_str: | SELECT 1 FROM mydataset.table1 AS a WHERE EXISTS ( SELECT 1 FROM mydataset.table2 WHERE a.id = id ); configs: rules: references.consistent: single_table_references: unqualified test_pass_subselect_unqualified_two_references_3987: pass_str: | SELECT 1 FROM mydataset.table1 AS a WHERE EXISTS ( SELECT 1 FROM mydataset.table2 AS b WHERE a.id = b.id ); configs: rules: references.consistent: single_table_references: unqualified test_pass_subselect_qualified_reference_5599: pass_str: | insert into dim.clients ( data_source ) select s.data_source from dim.clients_stg as s where not exists ( select 1 from dim.clients as t where s.id = t.id ) ; configs: rules: references.consistent: single_table_references: qualified test_fail_no_fix_unqualified_correlated_subquery_5983: fail_str: | SELECT *, ( SELECT C.JobId FROM customer AS C WHERE C.Id = CustomerId LIMIT 1 ) AS CustomerId FROM ( SELECT F.*, B.* FROM foo AS F LEFT JOIN bar AS B ON P.Id = B.ParentId ); test_fail_cte_qualified_to_unqualified_6014: fail_str: | with final as ( select my_table.first, second from my_table ) select * from final fix_str: | with final as ( select first, second from my_table ) select * from final configs: rules: references.consistent: single_table_references: unqualified test_fail_cte_unqualified_to_qualified_6014: fail_str: | with final as ( select my_table.first, second from my_table ) select * from final fix_str: | with final as ( select my_table.first, my_table.second from my_table ) select * from final configs: rules: references.consistent: single_table_references: qualified test_pass_postgres_named_arguments: pass_str: | select t1.b from __test__(a := 1) t1; configs: core: dialect: postgres test_pass_trino_lambda_expression: pass_str: | SELECT a_column, TRANSFORM(array_col, x -> x.example) AS array_col_example FROM example_table; configs: core: dialect: trino test_pass_snowflake_lambda_expression: pass_str: | select a_column, transform(array_col, x -> x + 1) as array_col_example from example_table; configs: core: dialect: snowflake sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/RF04.yml000066400000000000000000000072501503426445100235310ustar00rootroot00000000000000rule: RF04 test_pass_valid_identifier: pass_str: CREATE TABLE artist(artist_name TEXT) test_fail_keyword_as_identifier_column: fail_str: CREATE TABLE artist(create TEXT) test_fail_keyword_as_identifier_column_alias: fail_str: SELECT 1 as parameter test_fail_keyword_as_identifier_table_alias: fail_str: SELECT x FROM tbl AS parameter test_pass_valid_identifier_not_alias: # should pass on default config as not alias pass_str: SELECT parameter test_fail_keyword_as_identifier_not_alias_all: fail_str: SELECT parameter configs: rules: references.keywords: unquoted_identifiers_policy: all test_pass_valid_identifier_table_alias_column_alias_config: pass_str: SELECT x FROM tbl AS parameter configs: rules: references.keywords: unquoted_identifiers_policy: column_aliases test_fail_keyword_as_identifier_column_alias_config: fail_str: SELECT x AS date FROM tbl AS parameter configs: rules: references.keywords: unquoted_identifiers_policy: column_aliases test_pass_valid_quoted_identifier: pass_str: CREATE TABLE [artist]([artist_name] TEXT) configs: rules: references.keywords: quoted_identifiers_policy: aliases core: dialect: tsql test_fail_keyword_as_quoted_identifier_column: fail_str: CREATE TABLE "artist"("create" TEXT) configs: rules: references.keywords: quoted_identifiers_policy: aliases test_pass_keyword_as_quoted_identifier_column_none_policy: pass_str: CREATE TABLE "artist"("create" TEXT) configs: rules: references.keywords: quoted_identifiers_policy: none test_fail_keyword_as_quoted_identifier_column_alias: fail_str: SELECT 1 as [parameter] configs: rules: references.keywords: quoted_identifiers_policy: aliases core: dialect: tsql test_fail_keyword_as_quoted_identifier_table_alias: fail_str: SELECT [x] FROM [tbl] AS [parameter] configs: rules: references.keywords: quoted_identifiers_policy: aliases core: dialect: tsql test_pass_valid_quoted_identifier_not_alias: # should pass on default config as not alias pass_str: SELECT [parameter] configs: rules: references.keywords: quoted_identifiers_policy: aliases core: dialect: tsql test_fail_keyword_as_quoted_identifier_not_alias_all: fail_str: SELECT [parameter] configs: rules: references.keywords: quoted_identifiers_policy: all core: dialect: tsql test_pass_valid_quoted_identifier_table_alias_column_alias_config: pass_str: SELECT [x] FROM [tbl] AS [parameter] configs: rules: references.keywords: quoted_identifiers_policy: column_aliases core: dialect: tsql test_fail_keyword_as_quoted_identifier_column_alias_config: fail_str: SELECT [x] AS [date] FROM [tbl] AS [parameter] configs: rules: references.keywords: quoted_identifiers_policy: column_aliases core: dialect: tsql test_pass_ignore_word1: pass_str: CREATE TABLE artist(create TEXT) configs: rules: references.keywords: ignore_words: create test_pass_ignore_word2: pass_str: SELECT col1 AS date FROM table1 configs: rules: references.keywords: ignore_words: date test_pass_ignore_words_regex1: pass_str: CREATE TABLE artist(create TEXT) configs: rules: references.keywords: ignore_words_regex: ^cr test_pass_ignore_words_regex2: pass_str: SELECT col1 AS date FROM table1 configs: rules: references.keywords: ignore_words_regex: ^da test_pass_one_character_identifier: pass_str: SELECT d.col1 FROM table1 d configs: core: dialect: snowflake sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/RF05.yml000066400000000000000000000254631503426445100235400ustar00rootroot00000000000000rule: RF05 test_fail_special_chars_create_table_space: fail_str: | CREATE TABLE DBO.ColumnNames ( "Internal Space" INT ) test_fail_special_chars_create_table_gt: fail_str: | CREATE TABLE DBO.ColumnNames ( "Greater>Than" INT ) test_fail_special_chars_create_table_lt: fail_str: | CREATE TABLE DBO.ColumnNames ( "Less 0 as is_fab from fancy_table test_pass_case_cannot_be_reduced_2: pass_str: | select case when fab > 0 then true end as is_fab from fancy_table test_pass_case_cannot_be_reduced_3: pass_str: | select case when fab is not null then false end as is_fab from fancy_table test_pass_case_cannot_be_reduced_4: pass_str: | select case when fab > 0 then true else true end as is_fab from fancy_table test_pass_case_cannot_be_reduced_5: pass_str: | select case when fab <> 0 then 'just a string' end as fab_category from fancy_table test_pass_case_cannot_be_reduced_6: pass_str: | select case when fab <> 0 then true when fab < 0 then 'not a bool' end as fab_category from fancy_table test_pass_case_cannot_be_reduced_7: pass_str: | select foo, case when bar is null then bar else '123' end as test from baz; test_pass_case_cannot_be_reduced_8: pass_str: | select foo, case when bar is not null then '123' else bar end as test from baz; test_pass_case_cannot_be_reduced_9: pass_str: | select foo, case when bar is not null then '123' when foo is not null then '456' else bar end as test from baz; test_pass_case_cannot_be_reduced_10: pass_str: | select foo, case when bar is not null and abs(foo) > 0 then '123' else bar end as test from baz; test_pass_case_cannot_be_reduced_11: pass_str: | SELECT dv_runid, CASE WHEN LEAD(dv_startdateutc) OVER ( PARTITION BY rowid ORDER BY dv_startdateutc ) IS NULL THEN 1 ELSE 0 END AS loadstate FROM d; test_pass_case_cannot_be_reduced_12: pass_str: | select field_1, field_2, field_3, case when coalesce(field_2, field_3) is null then 1 else 0 end as field_4 from my_table; test_pass_case_cannot_be_reduced_13: pass_str: | SELECT CASE WHEN item.submitted_timestamp IS NOT NULL THEN item.sitting_id END configs: core: dialect: postgres test_pass_array_accessors: pass_str: | SELECT CASE WHEN genres[0] IS NULL THEN 'x' ELSE genres END AS g FROM table_t configs: core: dialect: snowflake test_fail_unnecessary_case_1: fail_str: | select case when fab > 0 then true else false end as is_fab from fancy_table fix_str: | select coalesce(fab > 0, false) as is_fab from fancy_table test_fail_unnecessary_case_2: fail_str: | select case when fab > 0 then false else true end as is_fab from fancy_table fix_str: | select not coalesce(fab > 0, false) as is_fab from fancy_table test_fail_unnecessary_case_3: fail_str: | select case when fab > 0 and tot > 0 then true else false end as is_fab from fancy_table fix_str: | select coalesce(fab > 0 and tot > 0, false) as is_fab from fancy_table test_fail_unnecessary_case_4: fail_str: | select case when fab > 0 and tot > 0 then false else true end as is_fab from fancy_table fix_str: | select not coalesce(fab > 0 and tot > 0, false) as is_fab from fancy_table test_fail_unnecessary_case_5: fail_str: | select case when not fab > 0 or tot > 0 then false else true end as is_fab from fancy_table fix_str: | select not coalesce(not fab > 0 or tot > 0, false) as is_fab from fancy_table test_fail_unnecessary_case_6: fail_str: | select subscriptions_xf.metadata_migrated, case -- BEFORE ST02 FIX when perks.perk is null then false else true end as perk_redeemed, perks.received_at as perk_received_at from subscriptions_xf fix_str: | select subscriptions_xf.metadata_migrated, not coalesce(perks.perk is null, false) as perk_redeemed, perks.received_at as perk_received_at from subscriptions_xf test_fail_unnecessary_case_7: fail_str: | select foo, case when bar is null then '123' else bar end as test from baz; fix_str: | select foo, coalesce(bar, '123') as test from baz; test_fail_unnecessary_case_8: fail_str: | select foo, case when bar is not null then bar else '123' end as test from baz; fix_str: | select foo, coalesce(bar, '123') as test from baz; test_fail_unnecessary_case_9: fail_str: | select foo, case when bar is null then null else bar end as test from baz; fix_str: | select foo, bar as test from baz; test_fail_unnecessary_case_10: fail_str: | select foo, case when bar is not null then bar else null end as test from baz; fix_str: | select foo, bar as test from baz; test_fail_unnecessary_case_11: fail_str: | select foo, case when bar is not null then bar end as test from baz; fix_str: | select foo, bar as test from baz; test_fail_no_copy_code_out_of_template: # The rule wants to replace the case statement with coalesce(), but # LintFix.has_template_conflicts() correctly prevents it copying code out # of the templated region. Hence, the query is not modified. fail_str: | select foo, case when bar is null then {{ result }} else bar end as test from baz; configs: core: ignore_templated_areas: false templater: jinja: context: result: "'123'" sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/ST03.yml000066400000000000000000000173311503426445100235500ustar00rootroot00000000000000rule: ST03 test_pass_no_cte_defined_1: pass_str: select * from t test_pass_cte_defined_and_used_1: pass_str: | with cte as ( select a, b from t ) select * from cte test_pass_cte_defined_and_used_2: pass_str: | WITH cte1 AS ( SELECT a FROM t ), cte2 AS ( SELECT b FROM u ) SELECT * FROM cte1 JOIN cte2 test_pass_cte_defined_and_used_case_insensitive: pass_str: | WITH cte1 AS ( SELECT a FROM t ), cte2 AS ( SELECT b FROM u ) SELECT * FROM cte1 JOIN Cte2 test_fail_cte_defined_but_unused_1: desc: Two CTEs defined but only one used in final query. fail_str: | WITH cte1 AS ( SELECT a FROM t ), cte2 AS ( SELECT b FROM u ) SELECT * FROM cte1 test_fail_cte_defined_but_unused_2: desc: CTE defined but unused in final query even though table alias mimics CTE's name. fail_str: | WITH cte_orders AS ( SELECT customer_id, total FROM orders ) SELECT * FROM orders AS cte_orders test_pass_cte_defined_and_used_3: pass_str: | WITH cte1 AS ( SELECT a FROM t ), cte2 AS ( SELECT b FROM cte1 ) SELECT * FROM cte2 test_fail_cte_defined_but_unused_3: desc: Two CTEs are defined. CTE2 references CTE1 but in final query only CTE1 is used. fail_str: | WITH cte1 AS ( SELECT a FROM t ), cte2 AS ( SELECT b FROM cte1 ) SELECT * FROM cte1 test_pass_no_cte_defined_2: # Issue 915: Crash on statements that don't have a SELECT pass_str: CREATE TABLE my_table (id INTEGER) test_pass_cte_defined_and_used_4: # Issue 944: Detecting use of CTE in nested SELECT pass_str: | WITH max_date_cte AS ( SELECT MAX(row_updated_date) AS max_date FROM warehouse.loaded_monthly ) SELECT stuff FROM warehouse.updated_weekly WHERE row_updated_date <= (SELECT max_date FROM max_date_cte) test_pass_cte_defined_and_used_5: # Variant on test_9, the WHERE with a nested SELECT is in a CTE pass_str: | WITH max_date_cte AS ( SELECT MAX(row_updated_date) AS max_date FROM warehouse.loaded_monthly ), uses_max_date_cte AS ( SELECT stuff FROM warehouse.updated_weekly WHERE row_updated_date <= (SELECT max_date FROM max_date_cte) ) SELECT stuff FROM uses_max_date_cte test_pass_cte_defined_and_used_6: # Issue 963: Infinite recursion when a CTE references itself pass_str: | with pages_xf as ( select pages.received_at from pages where pages.received_at > (select max(received_at) from pages_xf ) ), final as ( select pages_xf.received_at from pages_xf ) select * from final test_fail_cte_defined_but_unused_4: # Variant on test_11 where there *is* an unused CTE fail_str: | with pages_xf as ( select pages.received_at from pages where pages.received_at > (select max(received_at) from pages_xf ) ), final as ( select pages_xf.received_at from pages_xf ), unused as ( select pages.received_at from pages ) select * from final test_pass_cte_defined_and_used_7: # Variant on test_11 where the CTE references itself indirectly pass_str: | with pages_xf as ( select pages.received_at from pages where pages.received_at > (select max(received_at) from final ) ), final as ( select pages_xf.received_at from pages_xf ) select * from final test_snowflake_delete_cte: fail_str: | DELETE FROM MYTABLE1 USING ( WITH MYCTE AS (SELECT COLUMN2 FROM MYTABLE3) SELECT COLUMN3 FROM MYTABLE3 ) X WHERE COLUMN1 = X.COLUMN3 configs: core: dialect: snowflake test_pass_exasol_values_clause_cte_1: pass_str: | WITH txt AS ( VALUES (1) AS t (id) ) SELECT * FROM txt configs: core: dialect: exasol test_pass_exasol_values_clause_cte_2: pass_str: | WITH txt AS ( VALUES (1, 'foo') AS t (id, name) ) SELECT * FROM txt configs: core: dialect: exasol test_pass_sparksql_values_clause_cte_1: pass_str: | WITH txt AS ( VALUES (1) AS t (id) ) SELECT * FROM txt configs: core: dialect: sparksql test_pass_sparksql_values_clause_cte_2: pass_str: | WITH txt AS ( VALUES (1, 'foo') AS t (id, name) ) SELECT * FROM txt configs: core: dialect: sparksql test_fail_query_uses_templating: fail_str: | WITH random_gibberish AS ( SELECT 1 ) SELECT var_bar FROM {{ ref('issue_2235') }} test_pass_update_cte: pass_str: | WITH cte AS ( SELECT id, name, description FROM table1 ) UPDATE table2 SET name = cte.name, description = cte.description FROM cte WHERE table2.id = cte.id; configs: core: dialect: postgres test_fail_update_cte: fail_str: | WITH cte AS ( SELECT id, name, description FROM table1 ) UPDATE table2 SET name = 1, description = 2 configs: core: dialect: postgres test_fail_nested_cte: fail_str: | with a as ( with b as ( select 1 from foo ) select 1 ) select * from a test_pass_nested_query: pass_str: | WITH foo AS ( SELECT * FROM zipcode ), bar AS ( SELECT * FROM county ), stage AS ( (SELECT * FROM foo) UNION ALL (SELECT * FROM bar) ) SELECT * FROM stage test_fail_nested_query: fail_str: | WITH foo AS ( SELECT * FROM zipcode ), bar AS ( SELECT * FROM county ), stage AS ( (SELECT * FROM foo) UNION ALL (SELECT * FROM foo) ) SELECT * FROM stage test_pass_nested_query_in_from_clause: pass_str: | WITH foo AS ( SELECT * FROM zipcode ), stage AS ( SELECT * FROM ( SELECT * FROM foo ) ) SELECT * FROM stage test_fail_nested_query_in_from_clause: fail_str: | WITH foo AS ( SELECT * FROM zipcode ), stage AS ( SELECT * FROM ( SELECT * FROM foofoo ) ) SELECT * FROM stage test_pass_nested_with_cte: pass_str: | WITH example_cte AS ( SELECT 1 ), container_cte AS ( WITH nested_cte AS ( SELECT * FROM example_cte ) SELECT * FROM nested_cte ) SELECT * FROM container_cte test_pass_bigquery_quoted_cte_unquoted_ref: pass_str: | with `tabx` as ( select 1 ) select * from tabx; configs: core: dialect: bigquery test_pass_bigquery_quoted_cte_quoted_ref: pass_str: | with `tabx` as ( select 1 ) select * from `tabx`; configs: core: dialect: bigquery test_pass_bigquery_unquoted_cte_quoted_ref: pass_str: | with tabx as ( select 1 ) select * from `tabx`; configs: core: dialect: bigquery sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/ST04.yml000066400000000000000000000216731503426445100235550ustar00rootroot00000000000000rule: ST04 test_pass_1: # The nested CASE is under a "WHEN", not an "ELSE". pass_str: | SELECT c1, CASE WHEN species = 'Rat' THEN CASE WHEN colour = 'Black' THEN 'Growl' WHEN colour = 'Grey' THEN 'Squeak' END END AS sound FROM mytable test_pass_2: # Issue 3110. The nested CASE is part of a larger expression. Cannot flatten. pass_str: | SELECT CASE 'b' WHEN 'a' THEN TRUE ELSE '2022-01-01'::date > CURRENT_DATE + CASE 'b' WHEN 'b' THEN 8 WHEN 'c' THEN 9 END AND (c > 10) END AS test test_fail_1: # Simple case. fail_str: | SELECT c1, CASE WHEN species = 'Rat' THEN 'Squeak' ELSE CASE WHEN species = 'Dog' THEN 'Woof' END END AS sound FROM mytable fix_str: | SELECT c1, CASE WHEN species = 'Rat' THEN 'Squeak' WHEN species = 'Dog' THEN 'Woof' END AS sound FROM mytable test_fail_2: # The nested "CASE" has two "WHEN" clauses. Getting # reasonable indentation is tricky. fail_str: | SELECT c1, CASE WHEN species = 'Rat' THEN 'Squeak' ELSE CASE WHEN species = 'Dog' THEN 'Woof' WHEN species = 'Mouse' THEN 'Squeak' END END AS sound FROM mytable fix_str: | SELECT c1, CASE WHEN species = 'Rat' THEN 'Squeak' WHEN species = 'Dog' THEN 'Woof' WHEN species = 'Mouse' THEN 'Squeak' END AS sound FROM mytable test_fail_3: fail_str: | SELECT c1, CASE WHEN species = 'Rat' THEN 'Squeak' ELSE CASE WHEN species = 'Dog' THEN 'Woof' WHEN species = 'Mouse' THEN 'Squeak' ELSE "Whaa" END END AS sound FROM mytable fix_str: | SELECT c1, CASE WHEN species = 'Rat' THEN 'Squeak' WHEN species = 'Dog' THEN 'Woof' WHEN species = 'Mouse' THEN 'Squeak' ELSE "Whaa" END AS sound FROM mytable test_fail_4: fail_str: | SELECT c1, CASE WHEN species = 'Rat' THEN 'Squeak' ELSE CASE ELSE "Whaa" END END AS sound FROM mytable fix_str: | SELECT c1, CASE WHEN species = 'Rat' THEN 'Squeak' ELSE "Whaa" END AS sound FROM mytable test_fail_5: # The nested "CASE" is a one-liner. fail_str: | SELECT c1, CASE WHEN species = 'Rat' THEN 'Squeak' ELSE CASE WHEN species = 'Dog' THEN 'Woof' END END AS sound FROM mytable fix_str: | SELECT c1, CASE WHEN species = 'Rat' THEN 'Squeak' WHEN species = 'Dog' THEN 'Woof' END AS sound FROM mytable test_double_nesting_1: fail_str: | SELECT c1, CASE WHEN species = 'Rat' THEN CASE WHEN species = 'Dog' THEN 'Woof' ELSE CASE WHEN species = 'Bird' THEN 'tweet' END END END AS sound FROM mytable fix_str: | SELECT c1, CASE WHEN species = 'Rat' THEN CASE WHEN species = 'Dog' THEN 'Woof' WHEN species = 'Bird' THEN 'tweet' END END AS sound FROM mytable test_double_nesting_2: # NOTE: This could be simplified more, but the current version of the rule # only unnests "ELSE" statements. To do this safely, it'd have to verify # that the various "WHEN" clauses are mutually exclusive. fail_str: | SELECT c1, CASE WHEN species = 'Rat' THEN CASE WHEN species = 'Dog' THEN 'Woof' ELSE CASE WHEN species = 'Bird' THEN 'tweet' END END ELSE CASE WHEN species = 'Hyena' THEN 'Cackle' END END AS sound FROM mytable fix_str: | SELECT c1, CASE WHEN species = 'Rat' THEN CASE WHEN species = 'Dog' THEN 'Woof' WHEN species = 'Bird' THEN 'tweet' END WHEN species = 'Hyena' THEN 'Cackle' END AS sound FROM mytable test_fail_no_copy_code_out_of_template: # The rule wants to replace the case statement with coalesce(), but # LintFix.has_template_conflicts() correctly prevents it copying code out # of the templated region. Hence, the query is not modified. fail_str: | SELECT c1, CASE WHEN species = 'Rat' THEN 'Squeak' ELSE CASE {{ inner_when }} END END AS sound FROM mytable configs: core: ignore_templated_areas: false templater: jinja: context: inner_when: "WHEN species = 'Dog' THEN 'Woof'" test_pass_different_case_expressions1: pass_str: | SELECT CASE WHEN DayOfMonth IN (11, 12, 13) THEN 'TH' ELSE CASE MOD(DayOfMonth, 10) WHEN 1 THEN 'ST' WHEN 2 THEN 'ND' WHEN 3 THEN 'RD' ELSE 'TH' END END AS OrdinalSuffix FROM Calendar; test_pass_different_case_expressions2: pass_str: | SELECT CASE DayOfMonth WHEN 11 THEN 'TH' WHEN 12 THEN 'TH' WHEN 13 THEN 'TH' ELSE CASE MOD(DayOfMonth, 10) WHEN 1 THEN 'ST' WHEN 2 THEN 'ND' WHEN 3 THEN 'RD' ELSE 'TH' END END AS OrdinalSuffix FROM Calendar; test_fail_nested_same_case: fail_str: | SELECT CASE x WHEN 0 THEN 'zero' WHEN 5 THEN 'five' ELSE CASE x WHEN 10 THEN 'ten' WHEN 20 THEN 'twenty' ELSE 'other' END END FROM tab_a; fix_str: | SELECT CASE x WHEN 0 THEN 'zero' WHEN 5 THEN 'five' WHEN 10 THEN 'ten' WHEN 20 THEN 'twenty' ELSE 'other' END FROM tab_a; test_fail_retain_comments: fail_str: | SELECT CASE WHEN FALSE THEN "value1" -- a comment ELSE CASE -- another comment WHEN TRUE -- and here THEN "value2" -- but also here END END FROM table; fix_str: | SELECT CASE WHEN FALSE THEN "value1" -- a comment -- another comment WHEN TRUE -- and here THEN "value2" -- but also here END FROM table; test_fail_retain_comments_after_end: fail_str: | SELECT CASE -- no spaces here WHEN FALSE THEN "value1" -- a comment ELSE CASE -- after case -- another comment /* before the when */ WHEN TRUE -- and here THEN "value2" -- but also here END /* after the end */ /* but wait there's more! */ -- but here too END FROM table; fix_str: | SELECT CASE -- no spaces here WHEN FALSE THEN "value1" -- a comment -- after case -- another comment /* before the when */ WHEN TRUE -- and here THEN "value2" -- but also here /* after the end */ /* but wait there's more! */ -- but here too END FROM table; test_fail_retain_comments_after_else: fail_str: | SELECT CASE WHEN FALSE THEN "value1" -- a comment /* before else*/ ELSE --after else /*before case*/ CASE -- else case -- another comment WHEN TRUE -- and here THEN "value2" -- but also here END END FROM table; fix_str: | SELECT CASE WHEN FALSE THEN "value1" -- a comment /* before else*/ --after else /*before case*/ -- else case -- another comment WHEN TRUE -- and here THEN "value2" -- but also here END FROM table; sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/ST05.yml000066400000000000000000000565121503426445100235560ustar00rootroot00000000000000rule: ST05 select_fail: fail_str: | select a.x, a.y, b.z from a join ( select x, z from b ) as b on (a.x = b.x) fix_str: | with b as ( select x, z from b ) select a.x, a.y, b.z from a join b on (a.x = b.x) cte_select_fail: fail_str: | with prep as ( select 1 as x, 2 as z ) select a.x, a.y, b.z from a join ( select x, z from b ) as b on (a.x = b.x) fix_str: | with prep as ( select 1 as x, 2 as z ), b as ( select x, z from b ) select a.x, a.y, b.z from a join b on (a.x = b.x) cte_with_clashing_name: fail_str: | with prep_1 as ( select 1 as x, 2 as z ) select a.x, a.y, z from a join ( select x, z from b ) on a.x = z fix_str: | with prep_1 as ( select 1 as x, 2 as z ), prep_2 as ( select x, z from b ) select a.x, a.y, z from a join prep_2 on a.x = z double_nested_fail: fail_str: | with p_cte as ( select 1 as x, 2 as z UNION ALL select 1 as x, 2 as z ) select a.x, a.y, b.z from a join ( select x, z from ( select x, z from p_cte ) as c ) as b on (a.x = b.x) fix_str: | with p_cte as ( select 1 as x, 2 as z UNION ALL select 1 as x, 2 as z ), c as ( select x, z from p_cte ), b as ( select x, z from c ) select a.x, a.y, b.z from a join b on (a.x = b.x) configs: rules: structure.subquery: forbid_subquery_in: both double_nested_fail_2: fail_str: | select a.x, a.y, b.z from a join ( select x, z from ( select x, z from p_cte ) as b ) as b on (a.x = b.x) fix_str: | with b as ( select x, z from ( select x, z from p_cte ) as b ) select a.x, a.y, b.z from a join b on (a.x = b.x) violations_after_fix: - description: select_statement clauses should not contain subqueries. Use CTEs instead name: structure.subquery warning: false fixes: [] start_line_no: 2 start_line_pos: 20 start_file_pos: 31 end_line_no: 2 end_line_pos: 21 end_file_pos: 32 configs: rules: structure.subquery: forbid_subquery_in: both unfixable_cte_clash: fail_str: | with "b" as ( select x, z from p_cte ) select a.x, a.y, b.z from a join ( select x, z from ( select 1 ) as b ) as c on (a.x = b.x) fix_str: | with "b" as ( select x, z from p_cte ), c as ( select x, z from ( select 1 ) as b ) select a.x, a.y, b.z from a join c on (a.x = b.x) violations_after_fix: - description: select_statement clauses should not contain subqueries. Use CTEs instead name: structure.subquery warning: false fixes: [] start_line_no: 5 start_line_pos: 20 start_file_pos: 68 end_line_no: 5 end_line_pos: 21 end_file_pos: 69 configs: rules: structure.subquery: forbid_subquery_in: both with_recursive_fail_no_fix: fail_str: | with recursive p_cte as ( select 1 from tbl_foo ) select a.x, a.y, b.z from a join ( select x, z from p_cte ) as b on a.x = b.x select_multijoin_fail: fail_str: | select a.x, d.x as foo, a.y, b.z from (select a, x from foo) a join d using(x) join ( select x, z from b ) as b using (x) fix_str: | with a as (select a, x from foo), b as ( select x, z from b ) select a.x, d.x as foo, a.y, b.z from a join d using(x) join b using (x) configs: rules: structure.subquery: forbid_subquery_in: both with_fail: fail_str: | select a.x, a.y, b.z from a join ( with d as ( select x, z from b ) select * from d ) using (x) fix_str: | with prep_1 as ( with d as ( select x, z from b ) select * from d ) select a.x, a.y, b.z from a join prep_1 using (x) set_fail: fail_str: | SELECT a.x, a.y, b.z FROM a JOIN ( select x, z from b union select x, z from d ) USING (x) fix_str: | WITH prep_1 AS ( select x, z from b union select x, z from d ) SELECT a.x, a.y, b.z FROM a JOIN prep_1 USING (x) simple_pass: pass_str: | with c as ( select x, z from b ) select a.x, a.y, c.z from a join c on (a.x = c.x) from_clause_pass: pass_str: | select a.x, a.y from ( select * from b ) as a configs: rules: structure.subquery: forbid_subquery_in: join from_clause_fail: fail_str: | select a.x, a.y from ( select * from b ) as a fix_str: | with a as ( select * from b ) select a.x, a.y from a configs: rules: structure.subquery: forbid_subquery_in: from both_clause_fail: fail_str: | select a.x, a.y from ( select * from b ) as a fix_str: | with a as ( select * from b ) select a.x, a.y from a configs: rules: structure.subquery: forbid_subquery_in: both no_inner_from_pass: pass_str: | select a from (select 1 as a) uses_templating: fail_str: | SELECT a_table.id, b_table.id FROM a_table INNER JOIN ( SELECT id, {{"mrgn"}} AS margin FROM b_tbl ) AS b_table ON a_table.some_column = b_table.some_column issue_2898_redshift_attribute_error: fail_str: | INSERT INTO target_table (target_column) SELECT table1.column1 FROM table1 INNER JOIN ( SELECT table2.join_column FROM table2 ) AS temp3 ON table1.join_column = temp3.join_column fix_str: | INSERT INTO target_table (target_column) WITH temp3 AS ( SELECT table2.join_column FROM table2 ) SELECT table1.column1 FROM table1 INNER JOIN temp3 ON table1.join_column = temp3.join_column configs: core: dialect: postgres issue_3623_internal_error_multiple_templated_files: fail_str: | CREATE TABLE #procs WITH (DISTRIBUTION = HASH([eid])) AS WITH proc_icd AS ( SELECT * FROM fbp ) SELECT * FROM ( SELECT * FROM proc_icd ) sub ; CREATE TABLE #tem WITH (DISTRIBUTION = HASH([eid])) AS SELECT * FROM ( SELECT * FROM a ) b ; fix_str: | CREATE TABLE #procs WITH (DISTRIBUTION = HASH([eid])) AS WITH proc_icd AS ( SELECT * FROM fbp ), sub AS ( SELECT * FROM proc_icd ) SELECT * FROM sub ; CREATE TABLE #tem WITH (DISTRIBUTION = HASH([eid])) AS WITH b AS ( SELECT * FROM a ) SELECT * FROM b ; configs: core: dialect: tsql rules: structure.subquery: forbid_subquery_in: both issue_3622_no_space_after_from: fail_str: | CREATE TABLE t AS SELECT col1 FROM( SELECT 'x' AS col1 ) x fix_str: | CREATE TABLE t AS WITH x AS ( SELECT 'x' AS col1 ) SELECT col1 FROM x configs: rules: structure.subquery: forbid_subquery_in: both issue_3617_parentheses_around_ctas_select: fail_str: | CREATE TABLE t AS (SELECT Col1 FROM ( SELECT 'x' AS COl1 ) x ) configs: rules: structure.subquery: forbid_subquery_in: both issue_3572_correlated_subquery_1: pass_str: | select pd.* from person_dates join (select * from events where events.name = person_dates.name) issue_3572_correlated_subquery_2: pass_str: | select pd.* from person_dates as pd join (select * from events as ce where ce.name = pd.name) issue_3572_correlated_subquery_3: pass_str: | select pd.* from person_dates as pd join (select * from events as ce where ce.name = person_dates.name) issue_3598_avoid_looping_1: fail_str: | WITH cte1 AS ( SELECT a FROM (SELECT a) ) SELECT a FROM cte1 fix_str: | WITH prep_1 AS (SELECT a), cte1 AS ( SELECT a FROM prep_1 ) SELECT a FROM cte1 configs: rules: structure.subquery: forbid_subquery_in: both issue_3598_avoid_looping_2: fail_str: | WITH cte1 AS ( SELECT * FROM (SELECT * FROM mongo.temp) ) SELECT * FROM cte1 fix_str: | WITH prep_1 AS (SELECT * FROM mongo.temp), cte1 AS ( SELECT * FROM prep_1 ) SELECT * FROM cte1 configs: rules: structure.subquery: forbid_subquery_in: both test_fail_subquery_in_cte: fail_str: | with b as ( select x, z from ( select x, z from p_cte ) ) select b.z from b fix_str: | with prep_1 as ( select x, z from p_cte ), b as ( select x, z from prep_1 ) select b.z from b configs: rules: structure.subquery: forbid_subquery_in: both test_fail_subquery_in_cte_2: fail_str: | with b as ( select x, y from (select x, y, z from a) ) select x, y from a union all select x, y from b fix_str: | with prep_1 as (select x, y, z from a), b as ( select x, y from prep_1 ) select x, y from a union all select x, y from b configs: rules: structure.subquery: forbid_subquery_in: both test_fail_subquery_in_cte_3: fail_str: | with b as ( select x, y from(select x, y, z from a) ) select x, y from b fix_str: | with prep_1 as (select x, y, z from a), b as ( select x, y from prep_1 ) select x, y from b configs: rules: structure.subquery: forbid_subquery_in: both test_cte_with_multiple_subqueries: fail_str: | WITH q AS ( SELECT t1.a FROM table_1 AS t1 INNER JOIN table_2 AS t2 USING (a) LEFT JOIN ( SELECT DISTINCT a FROM table_3 WHERE c = 'v1' ) AS dns USING (a) LEFT JOIN ( SELECT DISTINCT a FROM table_4 WHERE b = 'v2' ) AS dcod USING (a) ) SELECT a FROM q ; fix_str: | WITH dns AS ( SELECT DISTINCT a FROM table_3 WHERE c = 'v1' ), dcod AS ( SELECT DISTINCT a FROM table_4 WHERE b = 'v2' ), q AS ( SELECT t1.a FROM table_1 AS t1 INNER JOIN table_2 AS t2 USING (a) LEFT JOIN dns USING (a) LEFT JOIN dcod USING (a) ) SELECT a FROM q ; test_cte_with_double_nested_subquery: fail_str: | WITH q AS ( SELECT t1.a FROM table_1 AS t1 INNER JOIN table_2 AS t2 USING (a) LEFT JOIN ( SELECT DISTINCT a FROM table_3 WHERE c = 'v1' ) AS dns USING (a) LEFT JOIN ( SELECT DISTINCT a FROM table_5 LEFT JOIN ( SELECT DISTINCT a, b FROM table_6 WHERE c < 5 ) AS t4 USING (a) WHERE table_5.b = 'v2' ) AS dcod USING (a) ) SELECT a FROM q; fix_str: | WITH dns AS ( SELECT DISTINCT a FROM table_3 WHERE c = 'v1' ), t4 AS ( SELECT DISTINCT a, b FROM table_6 WHERE c < 5 ), dcod AS ( SELECT DISTINCT a FROM table_5 LEFT JOIN t4 USING (a) WHERE table_5.b = 'v2' ), q AS ( SELECT t1.a FROM table_1 AS t1 INNER JOIN table_2 AS t2 USING (a) LEFT JOIN dns USING (a) LEFT JOIN dcod USING (a) ) SELECT a FROM q; test_two_ctes_with_same_named_nested_subquery: fail_str: | with purchases_in_the_last_year as ( select customer_id , arrayagg(distinct attr) within group (order by attr asc) as attrlist from ( select o.customer_id , p.attr from order_line_item as o inner join product as p on o.product_id = p.product_id and o.time_placed >= dateadd(year, -1, current_date()) ) group by customer_id ) , purchases_in_the_last_three_years as ( select customer_id , arrayagg(distinct attr) within group (order by attr asc) as attrlist from ( select o.customer_id , p.attr from order_line_item as o inner join product as p on o.product_id = p.product_id and o.time_placed >= dateadd(year, -3, current_date()) ) group by customer_id ) select distinct c.customer_id , ly.attrlist as attrlist_last_year , l3y.attrlist as attrlist_last_three_years from customers as c left outer join purchases_in_the_last_year as ly on c.customer_id = ly.customer_id left outer join purchases_in_the_last_three_years as l3y on c.customer_id = l3y.customer_id ; fix_str: | with prep_1 as ( select o.customer_id , p.attr from order_line_item as o inner join product as p on o.product_id = p.product_id and o.time_placed >= dateadd(year, -1, current_date()) ), purchases_in_the_last_year as ( select customer_id , arrayagg(distinct attr) within group (order by attr asc) as attrlist from prep_1 group by customer_id ), prep_2 as ( select o.customer_id , p.attr from order_line_item as o inner join product as p on o.product_id = p.product_id and o.time_placed >= dateadd(year, -3, current_date()) ), purchases_in_the_last_three_years as ( select customer_id , arrayagg(distinct attr) within group (order by attr asc) as attrlist from prep_2 group by customer_id ) select distinct c.customer_id , ly.attrlist as attrlist_last_year , l3y.attrlist as attrlist_last_three_years from customers as c left outer join purchases_in_the_last_year as ly on c.customer_id = ly.customer_id left outer join purchases_in_the_last_three_years as l3y on c.customer_id = l3y.customer_id ; configs: core: dialect: snowflake rules: structure.subquery: forbid_subquery_in: both test_fail_set_subquery_in_second_query: fail_str: | SELECT 1 AS value_name UNION SELECT value FROM (SELECT 2 AS value_name); fix_str: | WITH prep_1 AS (SELECT 2 AS value_name) SELECT 1 AS value_name UNION SELECT value FROM prep_1; configs: rules: structure.subquery: forbid_subquery_in: both test_fail_set_subquery_in_second_query_join: fail_str: | SELECT 1 AS value_name UNION SELECT value FROM (SELECT 2 AS value_name) CROSS JOIN (SELECT 1 as v2); fix_str: | WITH prep_1 AS (SELECT 2 AS value_name), prep_2 AS (SELECT 1 as v2) SELECT 1 AS value_name UNION SELECT value FROM prep_1 CROSS JOIN prep_2; configs: rules: structure.subquery: forbid_subquery_in: both test_fail_order_4782: fail_str: | WITH cte_1 AS ( SELECT subquery_a.field_a, subquery_a.field_b FROM ( SELECT subquery_b.field_a, alias_a.field_d, alias_a.field_b, alias_b.field_c FROM table_b AS alias_a INNER JOIN (SELECT * FROM table_a) AS subquery_b ON subquery_b.field_a >= alias_a.field_d LEFT OUTER JOIN table_b AS alias_b ON alias_a.field_b = alias_b.field_c ) AS subquery_a ), cte_2 AS ( SELECT * FROM table_c WHERE field_a > 0 ORDER BY field_b DESC ), join_ctes AS ( SELECT * FROM cte_1 LEFT OUTER JOIN cte_2 ON cte_1.field_a = cte_2.field_a ) SELECT * FROM join_ctes; fix_str: | WITH subquery_b AS (SELECT * FROM table_a), subquery_a AS ( SELECT subquery_b.field_a, alias_a.field_d, alias_a.field_b, alias_b.field_c FROM table_b AS alias_a INNER JOIN subquery_b ON subquery_b.field_a >= alias_a.field_d LEFT OUTER JOIN table_b AS alias_b ON alias_a.field_b = alias_b.field_c ), cte_1 AS ( SELECT subquery_a.field_a, subquery_a.field_b FROM subquery_a ), cte_2 AS ( SELECT * FROM table_c WHERE field_a > 0 ORDER BY field_b DESC ), join_ctes AS ( SELECT * FROM cte_1 LEFT OUTER JOIN cte_2 ON cte_1.field_a = cte_2.field_a ) SELECT * FROM join_ctes; configs: rules: structure.subquery: forbid_subquery_in: both test_fail_order_5265: violations_after_fix: - description: select_statement clauses should not contain subqueries. Use CTEs instead name: structure.subquery warning: false fixes: [] start_line_no: 18 start_line_pos: 15 start_file_pos: 343 end_line_no: 18 end_line_pos: 16 end_file_pos: 344 fail_str: | WITH cte1 AS ( SELECT COUNT(*) AS qty FROM some_table AS st LEFT JOIN ( SELECT 'first' AS id ) AS oops ON st.id = oops.id ), cte2 AS ( SELECT COUNT(*) AS other_qty FROM other_table AS sot LEFT JOIN ( SELECT 'middle' AS id ) AS another ON sot.id = another.id LEFT JOIN ( SELECT 'last' AS id ) AS oops ON sot.id = oops.id ) SELECT CURRENT_DATE(); fix_str: | WITH oops AS ( SELECT 'first' AS id ), cte1 AS ( SELECT COUNT(*) AS qty FROM some_table AS st LEFT JOIN oops ON st.id = oops.id ), another AS ( SELECT 'middle' AS id ), cte2 AS ( SELECT COUNT(*) AS other_qty FROM other_table AS sot LEFT JOIN another ON sot.id = another.id LEFT JOIN ( SELECT 'last' AS id ) AS oops ON sot.id = oops.id ) SELECT CURRENT_DATE(); configs: rules: structure.subquery: forbid_subquery_in: both test_fail_no_fix_nested_subquery_join: fail_str: | SELECT x.a, w2.b FROM x LEFT JOIN ( ( SELECT w.a, w.b, w.c FROM w ) AS w2 LEFT JOIN y ON w2.a = y.a ) ON x.a = w2.a; test_pass_nested_table_function: pass_str: | SELECT * FROM `func`(( SELECT 1 )); configs: core: dialect: bigquery rules: structure.subquery: forbid_subquery_in: both test_pass_nested_table_function_with_subquery: pass_str: | SELECT t.col1, res.result FROM t, TABLE( utils.udfs.udtf( t.col1, (SELECT dist.stats FROM dist) ) OVER ( PARTITION BY t.col1 ORDER BY t.col2 DESC ) ) AS res; configs: core: dialect: snowflake test_fail_mariadb_insert_cte_select: fail_str: | INSERT INTO test_hold ( uid, date_created, date_updated, earliest_activity, latest_activity, reason ) SELECT uid, sub.earliest_date_created AS date_created, sub.latest_date_updated AS date_updated, earliest_activity, latest_activity, sub.reason FROM test AS t INNER JOIN ( SELECT reason, MIN(date_created) AS earliest_date_created, MAX(date_updated) AS latest_date_updated FROM test GROUP BY reason ) AS sub ON t.reason = sub.reason GROUP BY t.reason; fix_str: | INSERT INTO test_hold ( uid, date_created, date_updated, earliest_activity, latest_activity, reason ) WITH sub AS ( SELECT reason, MIN(date_created) AS earliest_date_created, MAX(date_updated) AS latest_date_updated FROM test GROUP BY reason ) SELECT uid, sub.earliest_date_created AS date_created, sub.latest_date_updated AS date_updated, earliest_activity, latest_activity, sub.reason FROM test AS t INNER JOIN sub ON t.reason = sub.reason GROUP BY t.reason; configs: core: dialect: mariadb test_fail_tsql_insert: fail_str: | INSERT INTO Table1 (Id,Name,Attribute) SELECT Main.Id ,Main.Name ,Subq.Attribute FROM MainTable AS Main LEFT JOIN (SELECT Id ,Attribute FROM Table2) Subq ON Main.Id = Subq.Id fix_str: | WITH Subq AS (SELECT Id ,Attribute FROM Table2) INSERT INTO Table1 (Id,Name,Attribute) SELECT Main.Id ,Main.Name ,Subq.Attribute FROM MainTable AS Main LEFT JOIN Subq ON Main.Id = Subq.Id configs: core: dialect: tsql test_fail_tsql_existing_cte_with_insert: fail_str: | WITH MainTable as ( select * from sales ) INSERT INTO Table1 (Id,Name,Attribute) SELECT Main.Id ,Main.Name ,Subq.Attribute FROM MainTable AS Main LEFT JOIN (SELECT Id ,Attribute FROM Table2) Subq ON Main.Id = Subq.Id fix_str: | WITH MainTable as ( select * from sales ), Subq AS (SELECT Id ,Attribute FROM Table2) INSERT INTO Table1 (Id,Name,Attribute) SELECT Main.Id ,Main.Name ,Subq.Attribute FROM MainTable AS Main LEFT JOIN Subq ON Main.Id = Subq.Id configs: core: dialect: tsql sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/ST06.yml000066400000000000000000000140101503426445100235420ustar00rootroot00000000000000rule: ST06 test_pass_select_statement_order: pass_str: | select a, cast(b as int) as b, c from x test_fail_select_statement_order_1: fail_str: | select a, row_number() over (partition by id order by date) as y, b from x line_numbers: [1] fix_str: | select a, b, row_number() over (partition by id order by date) as y from x test_fail_select_statement_order_2: fail_str: | select row_number() over (partition by id order by date) as y, *, cast(b as int) as b_int from x line_numbers: [1] fix_str: | select *, cast(b as int) as b_int, row_number() over (partition by id order by date) as y from x test_fail_select_statement_order_3: fail_str: | select row_number() over (partition by id order by date) as y, cast(b as int) as b_int, * from x line_numbers: [1] fix_str: | select *, cast(b as int) as b_int, row_number() over (partition by id order by date) as y from x test_fail_select_statement_order_4: fail_str: | select row_number() over (partition by id order by date) as y, b::int, * from x line_numbers: [1] fix_str: | select *, b::int, row_number() over (partition by id order by date) as y from x test_fail_select_statement_order_5: fail_str: | select row_number() over (partition by id order by date) as y, *, 2::int + 4 as sum, cast(b) as c from x line_numbers: [1] fix_str: | select *, cast(b) as c, row_number() over (partition by id order by date) as y, 2::int + 4 as sum from x test_union_statements_ignored: pass_str: | select a + b as c, d from table_a union all select c, d from table_b test_insert_statements_ignored: pass_str: | INSERT INTO example_schema.example_table (id, example_column, rank_asc, rank_desc) SELECT id, CASE WHEN col_a IN('a', 'b', 'c') THEN col_a END AS example_column, rank_asc, rank_desc FROM another_schema.another_table test_insert_statement_with_cte_ignored: pass_str: | INSERT INTO my_table WITH my_cte AS (SELECT * FROM t1) SELECT MAX(field1), field2 FROM t1 test_merge_statements_ignored: pass_str: | MERGE INTO t USING ( SELECT DATE_TRUNC('DAY', end_time) AS time_day, b FROM u ) AS u ON (a = b) WHEN MATCHED THEN UPDATE SET a = b WHEN NOT MATCHED THEN INSERT (b) VALUES (c) test_merge_statement_with_cte_ignored: pass_str: | MERGE INTO t USING ( WITH my_cte AS (SELECT * FROM t1) SELECT MAX(field1), field2 FROM t1 ) AS u ON (a = b) WHEN MATCHED THEN UPDATE SET a = b WHEN NOT MATCHED THEN INSERT (b) VALUES (c) test_create_table_as_select_statements_ignored: pass_str: | CREATE TABLE new_table AS ( SELECT id, CASE WHEN col_a IN('a', 'b', 'c') THEN col_a END AS example_column, rank_asc, rank_desc FROM another_schema.another_table ) test_create_table_as_select_with_cte_ignored: pass_str: | CREATE TABLE new_table AS ( WITH my_cte AS (SELECT * FROM t1) SELECT MAX(field1), field2 FROM t1 ) test_fail_fix_explicit_column_references_1: fail_str: | SELECT DATE_TRUNC('DAY', end_time) AS time_day, b_field FROM table_name GROUP BY time_day, b_field fix_str: | SELECT b_field, DATE_TRUNC('DAY', end_time) AS time_day FROM table_name GROUP BY time_day, b_field test_fail_fix_explicit_column_references_2: fail_str: | SELECT SUM(a_field) OVER (ORDER BY 1) AS a_field_window_sum, b_field FROM table_name GROUP BY a_field_window_sum, b_field fix_str: | SELECT b_field, SUM(a_field) OVER (ORDER BY 1) AS a_field_window_sum FROM table_name GROUP BY a_field_window_sum, b_field test_fail_no_fix_implicit_column_references: fail_str: | SELECT DATE_TRUNC('DAY', end_time) AS time_day, b_field FROM table_name GROUP BY 1, 2 test_pass_cte_used_in_set: pass_str: | WITH T1 AS ( SELECT 'a'::varchar AS A, 1::bigint AS B ), T2 AS ( SELECT CASE WHEN COL > 1 THEN 'x' ELSE 'y' END AS A, COL AS B FROM T ) SELECT * FROM T1 UNION ALL SELECT * FROM T2; test_pass_subquery_used_in_set: pass_str: | SELECT * FROM (SELECT 'a'::varchar AS A, 1::bigint AS B) UNION ALL SELECT * FROM (SELECT CASE WHEN COL > 1 THEN 'x' ELSE 'y' END AS A, COL AS B FROM T); test_fail_cte_used_in_set: fail_str: | WITH T1 AS ( SELECT 'a'::varchar AS A, 1::bigint AS B ), T2 AS ( SELECT CASE WHEN COL > 1 THEN 'x' ELSE 'y' END AS A, COL AS B FROM T ) SELECT A, B FROM T1 UNION ALL SELECT A, B FROM T2; fix_str: | WITH T1 AS ( SELECT 'a'::varchar AS A, 1::bigint AS B ), T2 AS ( SELECT COL AS B, CASE WHEN COL > 1 THEN 'x' ELSE 'y' END AS A FROM T ) SELECT A, B FROM T1 UNION ALL SELECT A, B FROM T2; test_fail_cte_used_in_select: fail_str: | WITH T1 AS ( SELECT 'a'::varchar AS A, 1::bigint AS B ), T2 AS ( SELECT CASE WHEN COL > 1 THEN 'x' ELSE 'y' END AS A, COL AS B FROM T ) SELECT * FROM T1 INNER JOIN T2 ON T1.A = T2.A; fix_str: | WITH T1 AS ( SELECT 'a'::varchar AS A, 1::bigint AS B ), T2 AS ( SELECT COL AS B, CASE WHEN COL > 1 THEN 'x' ELSE 'y' END AS A FROM T ) SELECT * FROM T1 INNER JOIN T2 ON T1.A = T2.A; sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/ST07.yml000066400000000000000000000045151503426445100235540ustar00rootroot00000000000000rule: ST07 test_pass_specify_join_keys: pass_str: select x.a from x inner join y on x.id = y.id test_fail_specify_join_keys_1: fail_str: select x.a from x inner join y using (id) fix_str: select x.a from x inner join y ON x.id = y.id test_fail_specify_join_keys_1_with_alias: fail_str: select x.a from foo_table AS x inner join y using (id) fix_str: select x.a from foo_table AS x inner join y ON x.id = y.id test_fail_specify_join_keys_1_with_subquery: fail_str: select x.a from (SELECT 1 AS a) AS x inner join y using (id) fix_str: select x.a from (SELECT 1 AS a) AS x inner join y ON x.id = y.id test_fail_specify_join_keys_1_with_multi_using: fail_str: select x.a from x inner join y using (id, a) fix_str: select x.a from x inner join y ON x.id = y.id AND x.a = y.a test_fail_specify_join_keys_2: desc: Keys were specified for first join but not the second one. fail_str: select x.a from x inner join y on x.id = y.id inner join z using (id) test_partial_fixed_up_to_2nd_join: fail_str: | select x.a from x inner join y using(id, foo) inner join z using(id) fix_str: | select x.a from x inner join y ON x.id = y.id AND x.foo = y.foo inner join z using(id) violations_after_fix: - description: Found USING statement. Expected only ON statements. name: structure.using warning: false fixes: [] start_line_no: 4 start_line_pos: 14 start_file_pos: 77 end_line_no: 4 end_line_pos: 19 end_file_pos: 82 select_using_fail: fail_str: | SELECT * FROM A_TABLE INNER JOIN ( SELECT margin FROM B_TABLE ) USING (SOME_COLUMN) test_fail_parent_child_positioning: # Check for issue from https://github.com/sqlfluff/sqlfluff/issues/3656 fail_str: | select * from c1 join c2 using (ID) join (select * from c3 join c4 using (ID)) as c5 on c1.ID = c5.ID fix_str: | select * from c1 join c2 ON c1.ID = c2.ID join (select * from c3 join c4 ON c3.ID = c4.ID) as c5 on c1.ID = c5.ID fail_but_dont_fix_templated_table_names: fail_str: | SELECT {{ "table_a" }}.field_1, table_b.field_2 FROM {{ "table_a" }} INNER JOIN table_b USING (id) test_pass_clickhouse: pass_str: SELECT * FROM test1 as t1 LEFT SEMI JOIN test2 USING ty1,ty2; configs: core: dialect: clickhouse sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/ST08.yml000066400000000000000000000030541503426445100235520ustar00rootroot00000000000000rule: ST08 test_fail_distinct_with_parenthesis_1: # Check we get fails for using DISTINCT apparently incorrectly fail_str: SELECT DISTINCT(a) fix_str: SELECT DISTINCT a test_fail_distinct_with_parenthesis_2: fail_str: SELECT DISTINCT(a + b) * c fix_str: SELECT DISTINCT (a + b) * c test_fail_distinct_with_parenthesis_3: fail_str: SELECT DISTINCT (a) fix_str: SELECT DISTINCT a test_fail_distinct_with_parenthesis_4: pass_str: SELECT DISTINCT (a + b) * c test_fail_distinct_with_parenthesis_5: fail_str: | SELECT DISTINCT(field_1) FROM my_table fix_str: | SELECT DISTINCT field_1 FROM my_table test_fail_distinct_with_parenthesis_6: fail_str: | SELECT DISTINCT(a), b fix_str: | SELECT DISTINCT a, b test_fail_distinct_with_parenthesis_7: pass_str: | SELECT DISTINCT ON(bcolor) bcolor, fcolor FROM distinct_demo configs: core: dialect: postgres test_pass_no_distinct: pass_str: | SELECT a, b test_fail_distinct_column_inside_count: fail_str: | SELECT COUNT(DISTINCT(unique_key)) fix_str: | SELECT COUNT(DISTINCT unique_key) test_fail_distinct_concat_inside_count: fail_str: | SELECT COUNT(DISTINCT(CONCAT(col1, '-', col2, '-', col3))) fix_str: | SELECT COUNT(DISTINCT CONCAT(col1, '-', col2, '-', col3)) test_pass_distinct_subquery_inside_count: pass_str: | SELECT COUNT( DISTINCT( SELECT ANY_VALUE(id) FROM UNNEST(tag) t ) ) FROM dataset_name.table_name; sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/ST09.yml000066400000000000000000000203411503426445100235510ustar00rootroot00000000000000rule: ST09 test_pass_no_join_clauses: pass_str: select * from foo test_pass_no_join_on_conditions: pass_str: select foo.a, bar.b from foo left join bar using (a) test_pass_ignored_subconditions: pass_str: select foo.a, bar.b from foo left join bar on bar.a between foo.a and foo.b test_pass_unqualified_column_reference: pass_str: select foo.a, bar.b from foo left join bar on bar.b = a test_pass_earlier_table_first: pass_str: | select foo.a, bar.b from foo left join bar on foo.a = bar.a test_pass_later_table_first: pass_str: | select foo.a, bar.b from foo left join bar on bar.a = foo.a configs: rules: structure.join_condition_order: preferred_first_table_in_join_clause: later test_fail_earlier_table_first: fail_str: | select foo.a, bar.b from foo left join bar on foo.a = bar.a fix_str: | select foo.a, bar.b from foo left join bar on bar.a = foo.a configs: rules: structure.join_condition_order: preferred_first_table_in_join_clause: later test_fail_later_table_first: fail_str: | select foo.a, bar.b from foo left join bar on bar.a = foo.a fix_str: | select foo.a, bar.b from foo left join bar on foo.a = bar.a test_fail_later_table_first_left_outer: fail_str: | select foo.a, bar.b from foo left outer join bar on bar.a = foo.a fix_str: | select foo.a, bar.b from foo left outer join bar on foo.a = bar.a test_fail_later_table_first_inner: fail_str: | select foo.a, bar.b from foo inner join bar on bar.a = foo.a fix_str: | select foo.a, bar.b from foo inner join bar on foo.a = bar.a test_fail_later_table_first_right: fail_str: | select foo.a, bar.b from foo right join bar on bar.a = foo.a fix_str: | select foo.a, bar.b from foo right join bar on foo.a = bar.a test_fail_later_table_first_right_outer: fail_str: | select foo.a, bar.b from foo right outer join bar on bar.a = foo.a fix_str: | select foo.a, bar.b from foo right outer join bar on foo.a = bar.a test_fail_later_table_first_full_outer: fail_str: | select foo.a, bar.b from foo full outer join bar on bar.a = foo.a fix_str: | select foo.a, bar.b from foo full outer join bar on foo.a = bar.a test_pass_later_table_first_cross: pass_str: | select foo.a, bar.b from foo cross join bar where bar.a = foo.a test_fail_later_table_first_multiple_subconditions: fail_str: | select foo.a, foo.b, bar.c from foo left join bar on bar.a = foo.a and bar.b = foo.b fix_str: | select foo.a, foo.b, bar.c from foo left join bar on foo.a = bar.a and foo.b = bar.b test_fail_later_table_first_multiple_comparison_operators: fail_str: | select foo.a, bar.b, baz.c from foo left join bar on bar.a != foo.a and bar.b > foo.b and bar.c <= foo.c left join baz on baz.a <> foo.a and baz.b >= foo.b and baz.c < foo.c fix_str: | select foo.a, bar.b, baz.c from foo left join bar on foo.a != bar.a and foo.b < bar.b and foo.c >= bar.c left join baz on foo.a <> baz.a and foo.b <= baz.b and foo.c > baz.c test_fail_later_table_first_subquery: fail_str: | select foo.a, bar.b from ( select baz.a, qux.b from baz left join qux on qux.a = baz.a ) foo left join bar on bar.a = foo.a fix_str: | select foo.a, bar.b from ( select baz.a, qux.b from baz left join qux on baz.a = qux.a ) foo left join bar on foo.a = bar.a test_fail_later_table_first_cte: fail_str: | with foo as ( select baz.a, qux.b from baz left join qux on qux.a = baz.a ) select foo.a, bar.b from foo left join bar on bar.a = foo.a fix_str: | with foo as ( select baz.a, qux.b from baz left join qux on baz.a = qux.a ) select foo.a, bar.b from foo left join bar on foo.a = bar.a test_fail_later_table_no_join_clause_in_cte: fail_str: | with foo as ( select * from bar ) select foo.a, baz.b from foo left join baz on baz.a = foo.a fix_str: | with foo as ( select * from bar ) select foo.a, baz.b from foo left join baz on foo.a = baz.a test_fail_later_table_no_join_clause_in_main_query: fail_str: | with foo as ( select bar.b from bar left join baz on baz.a = bar.a ) select b from foo fix_str: | with foo as ( select bar.b from bar left join baz on bar.a = baz.a ) select b from foo test_fail_later_table_first_brackets_after_on: fail_str: | select foo.a, bar.b from foo left join bar on (bar.a = foo.a) fix_str: | select foo.a, bar.b from foo left join bar on (foo.a = bar.a) test_fail_later_table_first_brackets_after_from: fail_str: | select foo.a, bar.b from ( foo left join bar on bar.a = foo.a ) fix_str: | select foo.a, bar.b from ( foo left join bar on foo.a = bar.a ) test_fail_later_table_first_quoted_table_and_column: fail_str: | select "foo"."a", "bar"."b" from "foo" left join "bar" on "bar"."a" = "foo"."a" fix_str: | select "foo"."a", "bar"."b" from "foo" left join "bar" on "foo"."a" = "bar"."a" test_fail_sparksql_lt_eq_gt_operator: fail_str: | SELECT bt.test FROM base_table AS bt INNER JOIN second_table AS st ON st.test <=> bt.test fix_str: | SELECT bt.test FROM base_table AS bt INNER JOIN second_table AS st ON bt.test <=> st.test configs: core: dialect: sparksql test_pass_jinja_templated_tables_correct_order: pass_str: | select t1.a, t2.b from `{{ params.project_id }}.my_dataset.table_1` as t1 left join `{{ params.project_id }}.my_dataset.table_2` as t2 on t1.id = t2.id configs: core: dialect: bigquery templater: jinja: context: params: project_id: my_project test_fail_jinja_templated_issue_5506_reproduction: # This test reproduces the exact issue from GitHub #5506 fail_str: | SELECT t1.a , t1.b , t2.c FROM `{{ params.project_id }}.my_dataset.table_1` AS t1 LEFT JOIN `{{ params.project_id }}.my_dataset.table_2` AS t2 ON t1.id = t2.id LEFT JOIN `{{ params.project_id }}.my_dataset.table_3` AS t3 ON t3.id = t1.id fix_str: | SELECT t1.a , t1.b , t2.c FROM `{{ params.project_id }}.my_dataset.table_1` AS t1 LEFT JOIN `{{ params.project_id }}.my_dataset.table_2` AS t2 ON t1.id = t2.id LEFT JOIN `{{ params.project_id }}.my_dataset.table_3` AS t3 ON t1.id = t3.id configs: core: dialect: bigquery templater: jinja: context: params: project_id: my_project rules: structure.join_condition_order: preferred_first_table_in_join_clause: earlier sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/ST10.yml000066400000000000000000000055551503426445100235530ustar00rootroot00000000000000rule: ST10 test_pass_where: pass_str: | select * from foo where col = 3 test_pass_select: pass_str: | select bar = baz AS are_equal FROM foo test_pass_join: pass_str: | select foo.a, bar.b from foo left join bar on foo.a = bar.a test_fail_where: fail_str: | select * from foo where col = col test_fail_select: fail_str: | select bar = bar AS are_equal FROM foo test_fail_join: fail_str: | select foo.a, bar.b from foo left join bar on foo.a = foo.a test_pass_allowed_literal_true: # often used in code-generated SQL pass_str: | select col from foo WHERE 1=1 AND col = 'val' test_pass_allowed_literal_false: # often used in code-generated SQL pass_str: | select col from foo WHERE 1=0 OR col = 'val' test_fail_disallowed_literal_true: # 'a'!='b' should be true (or match allow-list) fail_str: | select col from foo WHERE 'a'!='b' AND col = 'val' test_fail_disallowed_literal_false: # 1 = 2 should be false (or match allow-list) fail_str: | select col from foo WHERE 1 = 2 OR col = 'val' test_fail_disallowed_literal_false2: # often used in code-generated SQL fail_str: | select col from foo WHERE 1 <> 1 OR col = 'val' test_fail_bracketed: # brackets produce nested subexpression which should be tested fail_str: | select col from foo WHERE cond=1 AND (score=score OR avg_score >= 3) test_pass_bracketed: # brackets produce nested subexpression which should be tested pass_str: | select col from foo WHERE ((c1 = 'x' OR c2 != 'y') AND (c3 <> 'z')) test_noop_true_literal: pass_str: | select * from foo where true and x > 3 test_noop_false_literal: pass_str: | select * from foo where false OR x < 1 OR y != z test_noop_between: # example of not-checked operator pass_str: | select col from foo WHERE score BETWEEN 1 and 10 test_other_operators_with_unknown_precedence: pass_str: | with cte as ( select 1 as num union all select 2 as num union all select 3 as num union all select 4 as num ) select *, case when num % 2 = 0 then 1 else 2 end as test from cte test_other_operators_with_bracketed_precedence: pass_str: | with cte as ( select 1 as num union all select 2 as num union all select 3 as num union all select 4 as num ) select *, case when (num % 2) = 0 then 1 else 2 end as test from cte test_pass_templated_queries: pass_str: | SELECT a, col FROM t WHERE a = :a configs: core: templater: placeholder templater: placeholder: param_style: colon a: a test_pass_array_access: pass_str: | SELECT my_array[1] = [1] FROM t sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/ST11.yml000066400000000000000000000102341503426445100235420ustar00rootroot00000000000000rule: ST11 test_fail_simple: fail_str: | select 1 from b left join c on b.x = c.x test_pass_single_table: # If there's only one table, even if not referenced, then # don't raise an error. pass_str: | select 1 from foo test_pass_values: # If there's only one table, even if not referenced, then # don't raise an error. pass_str: | select 1 from (VALUES (1, 'one'), (2, 'two'), (3, 'three')) # If there are any unqualified references, we shouldn't raise # an issue until they're resolved. test_pass_unqualified_select: pass_str: | select a from b left join c using(d) test_pass_unqualified_where: pass_str: | select 1 from b left join c using(d) where e test_pass_unqualified_group_by: pass_str: | select 1 from b left join c using(d) group by e test_fail_unused_table_in_join: fail_str: | select widget.id, widget.name, from widget left join inventor on widget.inventor_id = inventor.id test_pass_unused_table_in_join: pass_str: | select widget.id, widget.name, inventor.id from widget left join inventor on widget.inventor_id = inventor.id test_pass_inner_unreferenced: # In the following example, "b" is being used as a filter, and the rule # should only trigger if it's an explicit left, right or full join. pass_str: | select a.* from a inner join b using(x) test_pass_unqualified_unreferenced: # Same as above, but an implicit INNER (unqualified joins are usually # interpreted as inner joins). pass_str: | select a.* from a join b using(x) test_fail_left_unreferenced: fail_str: | select a.* from a left join c using(x) test_fail_right_unreferenced: fail_str: | select a.* from a right join d using(x) test_pass_subquery: # "b" is referenced in the subquery, so not unused. pass_str: | SELECT a.col1 FROM a LEFT JOIN b ON a.id = b.a_id WHERE a.some_column IN ( SELECT c.some_column FROM c WHERE c.other_column = b.col ) test_fail_subquery: # "b" is not referenced outside its join (despite subquery), so unused. fail_str: | SELECT a.col1 FROM a LEFT JOIN b ON a.id = b.a_id WHERE a.some_column IN ( SELECT c.some_column FROM c WHERE c.other_column = a.col ) test_fail_inner_subquery: fail_str: | SELECT * FROM ( SELECT t1.col1 FROM db1.t1 LEFT JOIN t2 ON t1.id = t2.id ); test_pass_table_with_schema_6496: pass_str: | SELECT t1.col1, t2.col2 FROM db1.t1 LEFT JOIN t2 ON t1.id = t2.id test_pass_wildcard_6511: pass_str: | select simulation_source_data_reference.*, sourcings.* exclude sourcing_job_id from simulation_source_data_reference left join sourcings on simulation_source_data_reference.sourcing_job_id = sourcings.sourcing_job_id; configs: core: dialect: snowflake test_pass_cross_join_6511: pass_str: | select cast(fpids.value as integer) as party_id from kyc_dossiers as kds, lateral flatten(input => kds.party_ids) as fpids; configs: core: dialect: snowflake test_pass_table_expression_function_6558: pass_str: | SELECT ft.id, n.generic_field FROM fact_table AS ft LEFT JOIN UNNEST(ft.generic_array) AS g LEFT JOIN UNNEST(g.nested_array) AS n; configs: core: dialect: bigquery test_mysql_identifier_with_backticks_should_not_except: pass_str: | SELECT `f`.`bar`, `g`.`baz` FROM `foo` AS `f` LEFT JOIN `foobar` AS `g`; configs: core: dialect: mysql test_pass_quoted_table_name: pass_str: | SELECT test.one, "test-2".two FROM test LEFT JOIN "test-2" ON test.id = "test-2".id test_pass_quoted_brackets_table_name: pass_str: | SELECT test.one, [test-2].two FROM test LEFT JOIN [test-2] ON test.id = [test-2].id configs: core: dialect: tsql sqlfluff-3.4.2/test/fixtures/rules/std_rule_cases/TQ01.yml000066400000000000000000000026331503426445100235430ustar00rootroot00000000000000rule: TQ01 test_fail_sp_prefix_1: fail_str: | CREATE PROCEDURE dbo.sp_pull_data AS SELECT ID, DataDate, CaseOutput FROM table1 configs: core: dialect: tsql test_fail_sp_prefix_2: fail_str: | CREATE PROCEDURE dbo.[sp_pull_data] AS SELECT ID, DataDate, CaseOutput FROM table1 configs: core: dialect: tsql test_fail_sp_prefix_3: fail_str: | CREATE PROCEDURE dbo."sp_pull_data" AS SELECT ID, DataDate, CaseOutput FROM table1 configs: core: dialect: tsql test_pass_non_sp_prefix_1: pass_str: | CREATE PROCEDURE dbo.pull_data AS SELECT ID, DataDate, CaseOutput FROM table1 configs: core: dialect: tsql test_pass_non_sp_prefix_2: pass_str: | CREATE PROCEDURE dbo.usp_pull_data AS SELECT ID, DataDate, CaseOutput FROM table1 configs: core: dialect: tsql test_pass_non_sp_prefix_3: pass_str: | CREATE PROCEDURE dbo.[usp_pull_data] AS SELECT ID, DataDate, CaseOutput FROM table1 configs: core: dialect: tsql test_pass_non_sp_prefix_4: pass_str: | CREATE PROCEDURE dbo."usp_pull_data" AS SELECT ID, DataDate, CaseOutput FROM table1 configs: core: dialect: tsql sqlfluff-3.4.2/test/fixtures/templater/000077500000000000000000000000001503426445100201735ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_a/000077500000000000000000000000001503426445100215665ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_a/.sqlfluff000066400000000000000000000001121503426445100234030ustar00rootroot00000000000000[sqlfluff:templater:jinja:context] testing_schema=sch1 testing_table=tbl2 sqlfluff-3.4.2/test/fixtures/templater/jinja_a/jinja.sql000066400000000000000000000000701503426445100233770ustar00rootroot00000000000000SELECT 56 FROM {{ testing_schema }}.{{ testing_table }} sqlfluff-3.4.2/test/fixtures/templater/jinja_a/jinja.yml000066400000000000000000000006421503426445100234060ustar00rootroot00000000000000file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: 56 from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sch1 - dot: "." - naked_identifier: tbl2 sqlfluff-3.4.2/test/fixtures/templater/jinja_b/000077500000000000000000000000001503426445100215675ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_b/.sqlfluff000066400000000000000000000001771503426445100234170ustar00rootroot00000000000000[sqlfluff:templater:jinja:macros] some_macro = {% macro some_func(something) %}{{something}} + {{something * 2}}{% endmacro %} sqlfluff-3.4.2/test/fixtures/templater/jinja_b/jinja.sql000066400000000000000000000002311503426445100233770ustar00rootroot00000000000000SELECT {% for key, value in (("a", 3), ("b", 7)) %}{{ some_func(value) }} as {{ key }}{% if not loop.last %},{% endif %}{% endfor %} FROM some_table sqlfluff-3.4.2/test/fixtures/templater/jinja_b/jinja.yml000066400000000000000000000026501503426445100234100ustar00rootroot00000000000000# Testing that templating works as expected with macros file: statement: select_statement: - select_clause: - keyword: SELECT - newline: "\n" - whitespace: " " - select_clause_element: - expression: - numeric_literal: 3 - whitespace: " " - binary_operator: + - whitespace: " " - numeric_literal: 6 - whitespace: " " - alias_expression: - alias_operator: - keyword: as - whitespace: " " - naked_identifier: a - comma: "," - select_clause_element: - expression: - numeric_literal: 7 - whitespace: " " - binary_operator: + - whitespace: " " - numeric_literal: 14 - whitespace: " " - alias_expression: - alias_operator: - keyword: as - whitespace: " " - naked_identifier: b - newline: "\n" - from_clause: - keyword: FROM - whitespace: " " - from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: some_table newline: "\n" sqlfluff-3.4.2/test/fixtures/templater/jinja_c_dbt/000077500000000000000000000000001503426445100224215ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_c_dbt/dbt_builtins_config.sql000066400000000000000000000000431503426445100271460ustar00rootroot00000000000000{{ config(blah=60) }} SELECT TRUE sqlfluff-3.4.2/test/fixtures/templater/jinja_c_dbt/dbt_builtins_config.yml000066400000000000000000000002271503426445100271540ustar00rootroot00000000000000file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: boolean_literal: 'TRUE' sqlfluff-3.4.2/test/fixtures/templater/jinja_c_dbt/dbt_builtins_cross_ref.sql000066400000000000000000000000701503426445100276660ustar00rootroot00000000000000SELECT col1 FROM {{ ref('other_project', 'my_table') }} sqlfluff-3.4.2/test/fixtures/templater/jinja_c_dbt/dbt_builtins_cross_ref.yml000066400000000000000000000006011503426445100276700ustar00rootroot00000000000000file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table sqlfluff-3.4.2/test/fixtures/templater/jinja_c_dbt/dbt_builtins_is_incremental.sql000066400000000000000000000001351503426445100306770ustar00rootroot00000000000000SELECT {{ is_incremental() }} FROM t_table1 {% if is_incremental() %} WHERE TRUE {% endif %} sqlfluff-3.4.2/test/fixtures/templater/jinja_c_dbt/dbt_builtins_is_incremental.yml000066400000000000000000000007051503426445100307040ustar00rootroot00000000000000file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: boolean_literal: 'True' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t_table1 where_clause: - keyword: WHERE - expression: boolean_literal: 'TRUE' sqlfluff-3.4.2/test/fixtures/templater/jinja_c_dbt/dbt_builtins_ref.sql000066400000000000000000000000471503426445100264610ustar00rootroot00000000000000SELECT col1 FROM {{ ref('my_table') }} sqlfluff-3.4.2/test/fixtures/templater/jinja_c_dbt/dbt_builtins_ref.yml000066400000000000000000000006011503426445100264570ustar00rootroot00000000000000file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table sqlfluff-3.4.2/test/fixtures/templater/jinja_c_dbt/dbt_builtins_source.sql000066400000000000000000000000611503426445100272010ustar00rootroot00000000000000SELECT col1 FROM {{ source('source', 'table') }} sqlfluff-3.4.2/test/fixtures/templater/jinja_c_dbt/dbt_builtins_source.yml000066400000000000000000000006051503426445100272070ustar00rootroot00000000000000file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source_table sqlfluff-3.4.2/test/fixtures/templater/jinja_c_dbt/dbt_builtins_test.sql000066400000000000000000000004011503426445100266560ustar00rootroot00000000000000{% test my_cool_test(model, column_name, kwarg1=none, kwarg2=none) %} SELECT {{ column_name }} FROM {{ model }} WHERE thing = 1 {% if kwarg1 %} AND otherthing = 2 {% endif %} {% if kwarg2 %} AND anotherthing = 3 {% endif %} {% endtest %} -- no sql produced sqlfluff-3.4.2/test/fixtures/templater/jinja_c_dbt/dbt_builtins_test.yml000066400000000000000000000000111503426445100266550ustar00rootroot00000000000000file: [] sqlfluff-3.4.2/test/fixtures/templater/jinja_c_dbt/dbt_builtins_this.sql000066400000000000000000000000341503426445100266500ustar00rootroot00000000000000SELECT col1 FROM {{ this }} sqlfluff-3.4.2/test/fixtures/templater/jinja_c_dbt/dbt_builtins_this.yml000066400000000000000000000006031503426445100266540ustar00rootroot00000000000000file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: this_model sqlfluff-3.4.2/test/fixtures/templater/jinja_c_dbt/dbt_builtins_this_callable.sql000066400000000000000000000000451503426445100304710ustar00rootroot00000000000000SELECT col1 FROM {{ this.render() }} sqlfluff-3.4.2/test/fixtures/templater/jinja_c_dbt/dbt_builtins_this_callable.yml000066400000000000000000000006031503426445100304730ustar00rootroot00000000000000file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: this_model sqlfluff-3.4.2/test/fixtures/templater/jinja_c_dbt/dbt_builtins_var_default.sql000066400000000000000000000000341503426445100301750ustar00rootroot00000000000000SELECT {{ var('foo', 42) }} sqlfluff-3.4.2/test/fixtures/templater/jinja_c_dbt/dbt_builtins_var_default.yml000066400000000000000000000002641503426445100302040ustar00rootroot00000000000000file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item sqlfluff-3.4.2/test/fixtures/templater/jinja_c_dbt/dbt_builtins_zip.sql000066400000000000000000000002031503426445100265010ustar00rootroot00000000000000{% set not_iterable = 1 %} {% set iterable = ['a1', 'a2'] %} {% set result = zip(not_iterable, iterable) %} select {{ result }} sqlfluff-3.4.2/test/fixtures/templater/jinja_c_dbt/dbt_builtins_zip.yml000066400000000000000000000003021503426445100265030ustar00rootroot00000000000000file: - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: None sqlfluff-3.4.2/test/fixtures/templater/jinja_c_dbt/dbt_builtins_zip_strict.sql000066400000000000000000000003051503426445100300740ustar00rootroot00000000000000{% set cols = ['c1', 'c2'] %} {% set aliases = ['a1', 'a2'] %} select {% for (col, alias) in zip_strict(cols, aliases) %} {{ col }} as {{ alias }}{% if not loop.last %},{% endif %} {% endfor %} sqlfluff-3.4.2/test/fixtures/templater/jinja_c_dbt/dbt_builtins_zip_strict.yml000066400000000000000000000010551503426445100301010ustar00rootroot00000000000000file: - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: c1 alias_expression: alias_operator: keyword: as naked_identifier: a1 - comma: ',' - select_clause_element: column_reference: naked_identifier: c2 alias_expression: alias_operator: keyword: as naked_identifier: a2 sqlfluff-3.4.2/test/fixtures/templater/jinja_d_roundtrip/000077500000000000000000000000001503426445100236775ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_d_roundtrip/.sqlfluff000066400000000000000000000001311503426445100255150ustar00rootroot00000000000000[sqlfluff:templater:jinja:context] some_field=nothing_interesting my_table=another_table sqlfluff-3.4.2/test/fixtures/templater/jinja_d_roundtrip/test.sql000066400000000000000000000003441503426445100254000ustar00rootroot00000000000000select {{some_field}}, (1+2 ) AS kev, "wrongly indented field" as something_else, trailing_whitespace , 4678.9 from {{my_table}} where indentation = "wrong" AND NotSpacedProperly AND 4+6 > 9 sqlfluff-3.4.2/test/fixtures/templater/jinja_e/000077500000000000000000000000001503426445100215725ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_e/jinja.sql000066400000000000000000000004531503426445100234100ustar00rootroot00000000000000{%- set evens = [] -%} {%- for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] -%} {%- if x % 2 == 0 -%} {%- do evens.append(x) -%} {%- endif -%} {%- endfor -%} select {% for x in evens -%} {{ x }} as {{ 'col' ~ x }} {%- if not loop.last -%}, {% endif %} {% endfor -%} sqlfluff-3.4.2/test/fixtures/templater/jinja_e/jinja.yml000066400000000000000000000020431503426445100234070ustar00rootroot00000000000000file: statement: select_statement: select_clause: - keyword: select - select_clause_element: numeric_literal: '2' alias_expression: alias_operator: keyword: as naked_identifier: col2 - comma: ',' - select_clause_element: numeric_literal: '4' alias_expression: alias_operator: keyword: as naked_identifier: col4 - comma: ',' - select_clause_element: numeric_literal: '6' alias_expression: alias_operator: keyword: as naked_identifier: col6 - comma: ',' - select_clause_element: numeric_literal: '8' alias_expression: alias_operator: keyword: as naked_identifier: col8 - comma: ',' - select_clause_element: numeric_literal: '10' alias_expression: alias_operator: keyword: as naked_identifier: col10 sqlfluff-3.4.2/test/fixtures/templater/jinja_exclude_macro_path/000077500000000000000000000000001503426445100251745ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_exclude_macro_path/.sqlfluff000066400000000000000000000002041503426445100270130ustar00rootroot00000000000000[sqlfluff:templater:jinja] load_macros_from_path = macros exclude_macros_from_path = macros/macros_exclude, macros/macros_exclude_2 sqlfluff-3.4.2/test/fixtures/templater/jinja_exclude_macro_path/jinja.sql000066400000000000000000000000701503426445100270050ustar00rootroot00000000000000select {{ foo1() }}, {{ foo2() }} from my_table sqlfluff-3.4.2/test/fixtures/templater/jinja_exclude_macro_path/jinja.yml000066400000000000000000000006661503426445100270220ustar00rootroot00000000000000file: statement: select_statement: select_clause: - keyword: select - select_clause_element: numeric_literal: '101' - comma: ',' - select_clause_element: numeric_literal: '102' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table sqlfluff-3.4.2/test/fixtures/templater/jinja_exclude_macro_path/macros/000077500000000000000000000000001503426445100264605ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_exclude_macro_path/macros/macro_include.sql000066400000000000000000000001201503426445100317760ustar00rootroot00000000000000{%- macro foo1() -%}101{%- endmacro -%} {%- macro foo2() -%}102{%- endmacro -%} sqlfluff-3.4.2/test/fixtures/templater/jinja_exclude_macro_path/macros/macros_exclude/000077500000000000000000000000001503426445100314555ustar00rootroot00000000000000macro_exclude.sql000066400000000000000000000001711503426445100347300ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_exclude_macro_path/macros/macros_exclude-- Testing that these macros are ignored {%- macro foo1() -%}105{%- endmacro -%} {%- macro foo2() -%}106{%- endmacro -%} macro_materialization.sql000066400000000000000000000002731503426445100364760ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_exclude_macro_path/macros/macros_exclude-- materialization is a custom jinja tag, testing that the templater does not error {% materialization my_materialization_name, default %} -- materialization... {% endmaterialization %} sqlfluff-3.4.2/test/fixtures/templater/jinja_exclude_macro_path/macros/macros_exclude_2/000077500000000000000000000000001503426445100316765ustar00rootroot00000000000000macro_exclude.sql000066400000000000000000000001711503426445100351510ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_exclude_macro_path/macros/macros_exclude_2-- Testing that these macros are ignored {%- macro foo1() -%}107{%- endmacro -%} {%- macro foo2() -%}108{%- endmacro -%} macro_materialization.sql000066400000000000000000000002731503426445100367170ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_exclude_macro_path/macros/macros_exclude_2-- materialization is a custom jinja tag, testing that the templater does not error {% materialization my_materialization_name, default %} -- materialization... {% endmaterialization %} sqlfluff-3.4.2/test/fixtures/templater/jinja_exclude_macro_path/model_directory/000077500000000000000000000000001503426445100303605ustar00rootroot00000000000000jinja_sub_directory.sql000066400000000000000000000000701503426445100350470ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_exclude_macro_path/model_directoryselect {{ foo1() }}, {{ foo2() }} from my_table jinja_sub_directory.yml000066400000000000000000000006661503426445100350640ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_exclude_macro_path/model_directoryfile: statement: select_statement: select_clause: - keyword: select - select_clause_element: numeric_literal: '101' - comma: ',' - select_clause_element: numeric_literal: '102' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table sqlfluff-3.4.2/test/fixtures/templater/jinja_f/000077500000000000000000000000001503426445100215735ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_f/.sqlfluff000066400000000000000000000002001503426445100234060ustar00rootroot00000000000000[sqlfluff:templater:jinja:context] top_words=['shop', 'products', 'code'] NUM_EMBEDDING_COMPONENTS=2 num_embedding_components=4 sqlfluff-3.4.2/test/fixtures/templater/jinja_f/jinja.sql000066400000000000000000000004541503426445100234120ustar00rootroot00000000000000SELECT job_id {% for var in top_words %} , MAX(CASE WHEN word = '{{var}}' THEN 1 ELSE 0 END) AS {{var}}_word {% endfor %} {% for position in range(NUM_EMBEDDING_COMPONENTS) %} , safe_cast(vector_array[ORDINAL({{position}})] AS FLOAT64) AS v{{position}} {% endfor %} FROM tbl LIMIT 1 sqlfluff-3.4.2/test/fixtures/templater/jinja_f/jinja.yml000066400000000000000000000137371503426445100234240ustar00rootroot00000000000000file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: job_id - comma: "," - select_clause_element: function: function_name: function_name_identifier: MAX function_contents: bracketed: start_bracket: ( expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: word comparison_operator: raw_comparison_operator: '=' quoted_literal: "'shop'" - keyword: THEN - expression: numeric_literal: "1" - else_clause: - keyword: ELSE - expression: numeric_literal: "0" - keyword: END end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: shop_word - comma: "," - select_clause_element: function: function_name: function_name_identifier: MAX function_contents: bracketed: start_bracket: ( expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: word comparison_operator: raw_comparison_operator: '=' quoted_literal: "'products'" - keyword: THEN - expression: numeric_literal: "1" - else_clause: - keyword: ELSE - expression: numeric_literal: "0" - keyword: END end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: products_word - comma: "," - select_clause_element: function: function_name: function_name_identifier: MAX function_contents: bracketed: start_bracket: ( expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: word comparison_operator: raw_comparison_operator: '=' quoted_literal: "'code'" - keyword: THEN - expression: numeric_literal: "1" - else_clause: - keyword: ELSE - expression: numeric_literal: "0" - keyword: END end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: code_word - comma: "," - select_clause_element: function: function_name: function_name_identifier: safe_cast function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: vector_array array_accessor: start_square_bracket: "[" expression: function: function_name: function_name_identifier: ORDINAL function_contents: bracketed: start_bracket: ( expression: numeric_literal: "0" end_bracket: ) end_square_bracket: "]" keyword: AS data_type: data_type_identifier: FLOAT64 end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: v0 - comma: "," - select_clause_element: function: function_name: function_name_identifier: safe_cast function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: vector_array array_accessor: start_square_bracket: "[" expression: function: function_name: function_name_identifier: ORDINAL function_contents: bracketed: start_bracket: ( expression: numeric_literal: "1" end_bracket: ) end_square_bracket: "]" keyword: AS data_type: data_type_identifier: FLOAT64 end_bracket: ) alias_expression: alias_operator: keyword: AS naked_identifier: v1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl limit_clause: keyword: LIMIT numeric_literal: "1" sqlfluff-3.4.2/test/fixtures/templater/jinja_g_macros/000077500000000000000000000000001503426445100231405ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_g_macros/.sqlfluff000066400000000000000000000003261503426445100247640ustar00rootroot00000000000000[sqlfluff] dialect=postgres [sqlfluff:templater:jinja] load_macros_from_path=macros [sqlfluff:templater:jinja:context] top_words=['shop', 'products', 'code'] NUM_EMBEDDING_COMPONENTS=2 num_embedding_components=4 sqlfluff-3.4.2/test/fixtures/templater/jinja_g_macros/jinja.sql000066400000000000000000000004251503426445100247550ustar00rootroot00000000000000{{ config( materialized = "incremental", unique_key = 'id' ) }} -- Test macro loading from folder. select distinct on (id) (json -> 'type' ->> 'id')::int as id, (json -> 'type' ->> 'name') as name from {{ sb_incremental(this, 'sb_route_events') }} as e sqlfluff-3.4.2/test/fixtures/templater/jinja_g_macros/jinja.yml000066400000000000000000000075271503426445100247710ustar00rootroot00000000000000file: statement: select_statement: select_clause: - keyword: select - select_clause_modifier: - keyword: distinct - keyword: 'on' - bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: id - end_bracket: ) - select_clause_element: expression: cast_expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: json - binary_operator: -> - quoted_literal: "'type'" - binary_operator: ->> - quoted_literal: "'id'" end_bracket: ) casting_operator: '::' data_type: keyword: int alias_expression: alias_operator: keyword: as naked_identifier: id - comma: ',' - select_clause_element: expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: json - binary_operator: -> - quoted_literal: "'type'" - binary_operator: ->> - quoted_literal: "'name'" end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: name from_clause: keyword: from from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sb_route_events alias_expression: alias_operator: keyword: as naked_identifier: s where_clause: keyword: where bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: s - dot: . - naked_identifier: match_id - keyword: not - keyword: in - bracketed: - start_bracket: ( - select_statement: select_clause: keyword: select select_clause_modifier: keyword: distinct select_clause_element: column_reference: naked_identifier: match_id from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: this_model - end_bracket: ) end_bracket: ) end_bracket: ) alias_expression: alias_operator: keyword: as naked_identifier: e sqlfluff-3.4.2/test/fixtures/templater/jinja_g_macros/macros/000077500000000000000000000000001503426445100244245ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_g_macros/macros/macro_1.sql000066400000000000000000000005261503426445100264710ustar00rootroot00000000000000{% macro sb_incremental(tbl, source, tbl_id='match_id', source_id='match_id') %} {% if is_incremental() %} ( select * from {{ source }} as s where ( s.{{ source_id }} not in (select distinct {{ tbl_id }} from {{ tbl.name }}) ) ) {% else %} {{ source }} {% endif %} {% endmacro %} sqlfluff-3.4.2/test/fixtures/templater/jinja_h_macros/000077500000000000000000000000001503426445100231415ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_h_macros/.sqlfluff000066400000000000000000000000701503426445100247610ustar00rootroot00000000000000[sqlfluff:templater:jinja] load_macros_from_path=macros sqlfluff-3.4.2/test/fixtures/templater/jinja_h_macros/jinja.sql000066400000000000000000000001761503426445100247610ustar00rootroot00000000000000-- Spacing errors inside and outside of the macro. -- This test make select 1 + 2 + {{ bad_macro() }} + 999+101 from my_table sqlfluff-3.4.2/test/fixtures/templater/jinja_h_macros/jinja.yml000066400000000000000000000014511503426445100247600ustar00rootroot00000000000000file: statement: select_statement: select_clause: - keyword: select - select_clause_element: expression: - numeric_literal: 1 - binary_operator: + - numeric_literal: 2 - binary_operator: + - numeric_literal: 5 - binary_operator: + - numeric_literal: 6 - binary_operator: + - numeric_literal: 7 - binary_operator: + - numeric_literal: 999 - binary_operator: + - numeric_literal: 101 from_clause: - keyword: from - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table sqlfluff-3.4.2/test/fixtures/templater/jinja_h_macros/macros/000077500000000000000000000000001503426445100244255ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_h_macros/macros/bad_macro.sql000066400000000000000000000000531503426445100270530ustar00rootroot00000000000000{% macro bad_macro() %}5+6+7{% endmacro %} sqlfluff-3.4.2/test/fixtures/templater/jinja_i_raw/000077500000000000000000000000001503426445100224475ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_i_raw/raw_tag.sql000066400000000000000000000002011503426445100246050ustar00rootroot00000000000000SELECT col1, {% raw %} col2, '{{ a_tag_which_should_be_treated_as_raw }}' as col3 {% endraw %} FROM my_table sqlfluff-3.4.2/test/fixtures/templater/jinja_i_raw/raw_tag.yml000066400000000000000000000013421503426445100246160ustar00rootroot00000000000000file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 - comma: ',' - select_clause_element: quoted_literal: "'{{ a_tag_which_should_be_treated_as_raw }}'" alias_expression: alias_operator: keyword: as naked_identifier: col3 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table sqlfluff-3.4.2/test/fixtures/templater/jinja_i_raw/raw_tag_2.sql000066400000000000000000000004401503426445100250330ustar00rootroot00000000000000-- Example from https://github.com/sqlfluff/sqlfluff/pull/737 SELECT {% raw %} lower(note_text) NOT LIKE '%daycare: {%' AND lower(note_text) NOT LIKE '%grade/ school name: {%' AND lower(note_text) NOT LIKE '%social history: {%' {% endraw %} AS foo FROM my_table sqlfluff-3.4.2/test/fixtures/templater/jinja_i_raw/raw_tag_2.yml000066400000000000000000000035151503426445100250430ustar00rootroot00000000000000file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - function: function_name: function_name_identifier: lower function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: note_text end_bracket: ) - keyword: NOT - keyword: LIKE - quoted_literal: "'%daycare: {%'" - binary_operator: AND - function: function_name: function_name_identifier: lower function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: note_text end_bracket: ) - keyword: NOT - keyword: LIKE - quoted_literal: "'%grade/ school name: {%'" - binary_operator: AND - function: function_name: function_name_identifier: lower function_contents: bracketed: start_bracket: ( expression: column_reference: naked_identifier: note_text end_bracket: ) - keyword: NOT - keyword: LIKE - quoted_literal: "'%social history: {%'" alias_expression: alias_operator: keyword: AS naked_identifier: foo from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table sqlfluff-3.4.2/test/fixtures/templater/jinja_j_libraries/000077500000000000000000000000001503426445100236335ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_j_libraries/.sqlfluff000066400000000000000000000000551503426445100254560ustar00rootroot00000000000000[sqlfluff:templater:jinja] library_path=libs sqlfluff-3.4.2/test/fixtures/templater/jinja_j_libraries/jinja.sql000066400000000000000000000001271503426445100254470ustar00rootroot00000000000000SELECT 56 FROM {{ foo.schema }}.{{ foo.table("xyz") }} WHERE {{ bar.equals("x", 23) }} sqlfluff-3.4.2/test/fixtures/templater/jinja_j_libraries/jinja.yml000066400000000000000000000012021503426445100254440ustar00rootroot00000000000000file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '56' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sch1 - dot: . - naked_identifier: foo_xyz where_clause: keyword: WHERE expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: '=' numeric_literal: '23' sqlfluff-3.4.2/test/fixtures/templater/jinja_j_libraries/libs/000077500000000000000000000000001503426445100245645ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_j_libraries/libs/bar.py000066400000000000000000000002331503426445100257000ustar00rootroot00000000000000"""Module used to test bar within the jinja template.""" def equals(col, val): """Return a string that has col = val.""" return f"{col} = {val}" sqlfluff-3.4.2/test/fixtures/templater/jinja_j_libraries/libs/foo.py000066400000000000000000000002571503426445100257250ustar00rootroot00000000000000"""Module used to test foo within the jinja template.""" schema = "sch1" def table(name): """Return the parameter with foo_ in front of it.""" return f"foo_{name}" sqlfluff-3.4.2/test/fixtures/templater/jinja_j_libraries/libs/not_python.txt000066400000000000000000000000261503426445100275240ustar00rootroot00000000000000I am just a text file sqlfluff-3.4.2/test/fixtures/templater/jinja_k_config_override_path_macros/000077500000000000000000000000001503426445100274045ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_k_config_override_path_macros/.sqlfluff000066400000000000000000000002751503426445100312330ustar00rootroot00000000000000[sqlfluff:templater:jinja] load_macros_from_path=macros [sqlfluff:templater:jinja:macros] foo2_def={%- macro foo2() -%}202{%- endmacro -%} foo3_def={%- macro foo3() -%}203{%- endmacro -%} sqlfluff-3.4.2/test/fixtures/templater/jinja_k_config_override_path_macros/jinja.sql000066400000000000000000000001121503426445100312120ustar00rootroot00000000000000select {{ foo1() }}, {{ foo2() }}, {{ foo3() }} from my_table sqlfluff-3.4.2/test/fixtures/templater/jinja_k_config_override_path_macros/jinja.yml000066400000000000000000000010111503426445100312130ustar00rootroot00000000000000file: statement: select_statement: select_clause: - keyword: select - select_clause_element: numeric_literal: '101' - comma: ',' - select_clause_element: numeric_literal: '202' - comma: ',' - select_clause_element: numeric_literal: '203' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table sqlfluff-3.4.2/test/fixtures/templater/jinja_k_config_override_path_macros/macros/000077500000000000000000000000001503426445100306705ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_k_config_override_path_macros/macros/foo.sql000066400000000000000000000001211503426445100321660ustar00rootroot00000000000000{%- macro foo1() -%}101{%- endmacro -%} {%- macro foo2() -%}102{%- endmacro -%} sqlfluff-3.4.2/test/fixtures/templater/jinja_l_metas/000077500000000000000000000000001503426445100227725ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_l_metas/.sqlfluff000066400000000000000000000001331503426445100246120ustar00rootroot00000000000000[sqlfluff:templater:jinja:context] actions=['a', 'b', 'c'] states="foo\n ,bar" metric=open sqlfluff-3.4.2/test/fixtures/templater/jinja_l_metas/001.sql000066400000000000000000000003011503426445100240050ustar00rootroot00000000000000{% set some_condition %}TRUE{% endset %} WITH cust AS (SELECT SNAPSHOT_DATE FROM DATAHUB.SNAPSHOT_DAILY WHERE {{some_condition}} ) SELECT DISTINCT cust.SNAPSHOT_DATE FROM custsqlfluff-3.4.2/test/fixtures/templater/jinja_l_metas/001.yml000066400000000000000000000047171503426445100240260ustar00rootroot00000000000000file: - placeholder: '{% set some_condition %}' - indent: '' - placeholder: 'TRUE' - dedent: '' - placeholder: '{% endset %}' - newline: "\n" - newline: "\n" - statement: with_compound_statement: - keyword: WITH - whitespace: ' ' - common_table_expression: - naked_identifier: cust - whitespace: ' ' - keyword: AS - newline: "\n" - whitespace: ' ' - bracketed: - start_bracket: ( - indent: '' - select_statement: - select_clause: - keyword: SELECT - indent: '' - whitespace: ' ' - select_clause_element: column_reference: naked_identifier: SNAPSHOT_DATE - dedent: '' - newline: "\n" - whitespace: ' ' - from_clause: - keyword: FROM - whitespace: ' ' - from_expression: indent: '' from_expression_element: table_expression: table_reference: - naked_identifier: DATAHUB - dot: . - naked_identifier: SNAPSHOT_DAILY dedent: '' - newline: "\n" - whitespace: ' ' - where_clause: keyword: WHERE indent: '' whitespace: ' ' expression: boolean_literal: 'TRUE' dedent: '' - newline: "\n" - whitespace: ' ' - dedent: '' - end_bracket: ) - newline: "\n" - newline: "\n" - select_statement: select_clause: - keyword: SELECT - whitespace: ' ' - select_clause_modifier: keyword: DISTINCT - indent: '' - whitespace: ' ' - select_clause_element: column_reference: - naked_identifier: cust - dot: . - naked_identifier: SNAPSHOT_DATE - dedent: '' newline: "\n" from_clause: keyword: FROM whitespace: ' ' from_expression: indent: '' from_expression_element: table_expression: table_reference: naked_identifier: cust dedent: '' - end_of_file: '' sqlfluff-3.4.2/test/fixtures/templater/jinja_l_metas/002.sql000066400000000000000000000007111503426445100240130ustar00rootroot00000000000000SELECT {{ " c2\n" }} AS other_id, {{ states }} {% for action in actions %} , {{metric}}_{{action}} , campaign_count_{{action}} {% endfor %} FROM {% for action in actions %} {% if loop.first %} {{action}}_raw_effect_sizes {% else %} JOIN {{action}}_raw_effect_sizes USING ({{ states }}) {% endif %} {% endfor %} CROSS JOIN action_states sqlfluff-3.4.2/test/fixtures/templater/jinja_l_metas/002.yml000066400000000000000000000151151503426445100240210ustar00rootroot00000000000000file: statement: select_statement: - select_clause: - keyword: SELECT - indent: '' - newline: "\n" # NB: We end up with double whitespace here # because one is literal and one is templated. - whitespace: ' ' - whitespace: ' ' - select_clause_element: column_reference: naked_identifier: c2 newline: "\n" whitespace: ' ' alias_expression: indent: '' alias_operator: keyword: AS whitespace: ' ' naked_identifier: other_id dedent: '' - comma: ',' - newline: "\n" - whitespace: ' ' - select_clause_element: column_reference: naked_identifier: foo - newline: "\n" - whitespace: ' ' - comma: ',' - select_clause_element: column_reference: naked_identifier: bar - newline: "\n" - whitespace: ' ' - placeholder: '{% for action in actions %}' - indent: '' - newline: "\n" - whitespace: ' ' - comma: ',' - whitespace: ' ' - select_clause_element: column_reference: naked_identifier: open_a - newline: "\n" - whitespace: ' ' - comma: ',' - whitespace: ' ' - select_clause_element: column_reference: naked_identifier: campaign_count_a - newline: "\n" - whitespace: ' ' - dedent: '' - template_loop: '' - indent: '' - newline: "\n" - whitespace: ' ' - comma: ',' - whitespace: ' ' - select_clause_element: column_reference: naked_identifier: open_b - newline: "\n" - whitespace: ' ' - comma: ',' - whitespace: ' ' - select_clause_element: column_reference: naked_identifier: campaign_count_b - newline: "\n" - whitespace: ' ' - dedent: '' - template_loop: '' - indent: '' - newline: "\n" - whitespace: ' ' - comma: ',' - whitespace: ' ' - select_clause_element: column_reference: naked_identifier: open_c - newline: "\n" - whitespace: ' ' - comma: ',' - whitespace: ' ' - select_clause_element: column_reference: naked_identifier: campaign_count_c - dedent: '' - newline: "\n" - whitespace: ' ' - dedent: '' - placeholder: '{% endfor %}' - newline: "\n" - from_clause: - keyword: FROM - newline: "\n" - whitespace: ' ' - placeholder: '{% for action in actions %}' - indent: '' - newline: "\n" - whitespace: ' ' - placeholder: '{% if loop.first %}' - indent: '' - newline: "\n" - whitespace: ' ' - from_expression: - indent: '' - from_expression_element: table_expression: table_reference: naked_identifier: a_raw_effect_sizes - newline: "\n" - whitespace: ' ' - dedent: '' - placeholder: '{% else %}' - indent: '' - placeholder: '... [103 unused template characters] ...' - dedent: '' - placeholder: '{% endif %}' - newline: "\n" - whitespace: ' ' - dedent: '' - template_loop: '' - indent: '' - newline: "\n" - whitespace: ' ' - placeholder: '{% if loop.first %}' - indent: '' - placeholder: '... [49 unused template characters] ...' - dedent: '' - placeholder: '{% else %}' - indent: '' - newline: "\n" - whitespace: ' ' - dedent: '' - join_clause: - keyword: JOIN - indent: '' - newline: "\n" - whitespace: ' ' - from_expression_element: table_expression: table_reference: naked_identifier: b_raw_effect_sizes - newline: "\n" - whitespace: ' ' - dedent: '' - indent: '' - keyword: USING - indent: '' - newline: "\n" - whitespace: ' ' - bracketed: - start_bracket: ( - indent: '' - naked_identifier: foo - newline: "\n" - whitespace: ' ' - comma: ',' - naked_identifier: bar - dedent: '' - end_bracket: ) - dedent: '' - dedent: '' - newline: "\n" - whitespace: ' ' - dedent: '' - placeholder: '{% endif %}' - newline: "\n" - whitespace: ' ' - dedent: '' - template_loop: '' - indent: '' - newline: "\n" - whitespace: ' ' - placeholder: '{% if loop.first %}' - indent: '' - placeholder: '... [49 unused template characters] ...' - dedent: '' - placeholder: '{% else %}' - indent: '' - newline: "\n" - whitespace: ' ' - join_clause: - keyword: JOIN - indent: '' - newline: "\n" - whitespace: ' ' - from_expression_element: table_expression: table_reference: naked_identifier: c_raw_effect_sizes - newline: "\n" - whitespace: ' ' - dedent: '' - indent: '' - keyword: USING - indent: '' - newline: "\n" - whitespace: ' ' - bracketed: - start_bracket: ( - indent: '' - naked_identifier: foo - newline: "\n" - whitespace: ' ' - comma: ',' - naked_identifier: bar - dedent: '' - end_bracket: ) - dedent: '' - dedent: '' - newline: "\n" - whitespace: ' ' - dedent: '' - placeholder: '{% endif %}' - newline: "\n" - whitespace: ' ' - dedent: '' - placeholder: '{% endfor %}' - newline: "\n" - join_clause: - keyword: CROSS - whitespace: ' ' - keyword: JOIN - indent: '' - whitespace: ' ' - from_expression_element: table_expression: table_reference: naked_identifier: action_states - dedent: '' newline: "\n" end_of_file: '' sqlfluff-3.4.2/test/fixtures/templater/jinja_l_metas/003.sql000066400000000000000000000000611503426445100240120ustar00rootroot00000000000000select 1 {% if false %} + 2 {% endif %}sqlfluff-3.4.2/test/fixtures/templater/jinja_l_metas/003.yml000066400000000000000000000006701503426445100240220ustar00rootroot00000000000000file: - statement: select_statement: select_clause: keyword: select indent: "" newline: "\n" whitespace: ' ' select_clause_element: numeric_literal: '1' dedent: "" - newline: "\n" - whitespace: ' ' - placeholder: '{% if false %}' - indent: "" - placeholder: "\n + 2\n " - dedent: "" - placeholder: '{% endif %}' - end_of_file: "" sqlfluff-3.4.2/test/fixtures/templater/jinja_l_metas/004.sql000066400000000000000000000000601503426445100240120ustar00rootroot00000000000000select 1 {% if true %} + 2 {% endif %}sqlfluff-3.4.2/test/fixtures/templater/jinja_l_metas/004.yml000066400000000000000000000011541503426445100240210ustar00rootroot00000000000000file: statement: select_statement: select_clause: keyword: select indent: "" newline: "\n" whitespace: ' ' select_clause_element: expression: - numeric_literal: '1' - newline: "\n" - whitespace: ' ' - placeholder: '{% if true %}' - indent: '' - newline: "\n" - whitespace: ' ' - binary_operator: + - whitespace: ' ' - numeric_literal: '2' dedent: "" newline: "\n" whitespace: ' ' dedent: '' placeholder: '{% endif %}' end_of_file: "" sqlfluff-3.4.2/test/fixtures/templater/jinja_l_metas/005.sql000066400000000000000000000001001503426445100240060ustar00rootroot00000000000000select 0, {% for i in [1, 2, 3] %} i, {% endfor %} 4sqlfluff-3.4.2/test/fixtures/templater/jinja_l_metas/005.yml000066400000000000000000000024161503426445100240240ustar00rootroot00000000000000file: statement: select_statement: select_clause: - keyword: select - indent: "" - newline: "\n" - whitespace: ' ' - select_clause_element: numeric_literal: '0' - comma: ',' - newline: "\n" - whitespace: ' ' - placeholder: '{% for i in [1, 2, 3] %}' - indent: "" - newline: "\n" - whitespace: ' ' - select_clause_element: column_reference: naked_identifier: i - comma: ',' - newline: "\n" - whitespace: ' ' - dedent: "" - template_loop: "" - indent: "" - newline: "\n" - whitespace: ' ' - select_clause_element: column_reference: naked_identifier: i - comma: ',' - newline: "\n" - whitespace: ' ' - dedent: "" - template_loop: "" - indent: "" - newline: "\n" - whitespace: ' ' - select_clause_element: column_reference: naked_identifier: i - comma: ',' - newline: "\n" - whitespace: ' ' - dedent: "" - placeholder: '{% endfor %}' - newline: "\n" - whitespace: ' ' - select_clause_element: numeric_literal: '4' - dedent: "" end_of_file: "" sqlfluff-3.4.2/test/fixtures/templater/jinja_l_metas/006.sql000066400000000000000000000000501503426445100240130ustar00rootroot00000000000000{% if true %} SELECT 1 + 1 {%- endif %} sqlfluff-3.4.2/test/fixtures/templater/jinja_l_metas/006.yml000066400000000000000000000010531503426445100240210ustar00rootroot00000000000000file: - placeholder: '{% if true %}' - indent: "" - newline: "\n" - statement: select_statement: select_clause: keyword: SELECT indent: "" whitespace: ' ' select_clause_element: expression: - numeric_literal: '1' - whitespace: ' ' - binary_operator: + - whitespace: ' ' - numeric_literal: '1' dedent: "" - placeholder: "\n" - dedent: "" - placeholder: "{%- endif %}" - newline: "\n" - end_of_file: "" sqlfluff-3.4.2/test/fixtures/templater/jinja_l_metas/007.sql000066400000000000000000000001131503426445100240140ustar00rootroot00000000000000SELECT 1 {{ " + 2" if false }} FROM {%+if true-%} {{ref('foo')}} {%-endif%}sqlfluff-3.4.2/test/fixtures/templater/jinja_l_metas/007.yml000066400000000000000000000013221503426445100240210ustar00rootroot00000000000000file: - statement: select_statement: - select_clause: keyword: SELECT indent: "" whitespace: ' ' select_clause_element: numeric_literal: '1' dedent: "" - whitespace: ' ' - placeholder: '{{ " + 2" if false }}' - whitespace: ' ' - from_clause: - keyword: FROM - whitespace: ' ' - placeholder: '{%+if true-%}' - indent: "" - placeholder: ' ' - from_expression: indent: "" from_expression_element: table_expression: table_reference: naked_identifier: foo dedent: "" - placeholder: ' ' - dedent: "" - placeholder: '{%-endif%}' - end_of_file: "" sqlfluff-3.4.2/test/fixtures/templater/jinja_l_metas/008.sql000066400000000000000000000001441503426445100240210ustar00rootroot00000000000000{% for item in [1,2] -%} SELECT * FROM some_table {{ "UNION ALL\n" if not loop.last }} {%- endfor %}sqlfluff-3.4.2/test/fixtures/templater/jinja_l_metas/008.yml000066400000000000000000000034021503426445100240230ustar00rootroot00000000000000file: - placeholder: "{% for item in [1,2] -%}" - indent: "" - placeholder: "\n" - statement: set_expression: - select_statement: select_clause: keyword: SELECT indent: "" whitespace: ' ' select_clause_element: wildcard_expression: wildcard_identifier: star: '*' dedent: "" newline: "\n" from_clause: keyword: FROM whitespace: ' ' from_expression: indent: "" from_expression_element: table_expression: table_reference: naked_identifier: some_table dedent: "" - newline: "\n" - set_operator: - keyword: UNION - whitespace: ' ' - keyword: ALL - newline: "\n" - placeholder: "\n" - dedent: "" - template_loop: "" - indent: "" - placeholder: "\n" - select_statement: select_clause: keyword: SELECT indent: "" whitespace: ' ' select_clause_element: wildcard_expression: wildcard_identifier: star: '*' dedent: "" newline: "\n" from_clause: keyword: FROM whitespace: ' ' from_expression: indent: "" from_expression_element: table_expression: table_reference: naked_identifier: some_table dedent: "" - newline: "\n" - placeholder: '{{ "UNION ALL\n" if not loop.last }}' - placeholder: "\n" - dedent: "" - placeholder: '{%- endfor %}' - end_of_file: "" sqlfluff-3.4.2/test/fixtures/templater/jinja_l_metas/009.sql000066400000000000000000000000751503426445100240250ustar00rootroot00000000000000SELECT 1 {% if true %} ,2 FROM a {% endif %} LIMIT 1 sqlfluff-3.4.2/test/fixtures/templater/jinja_l_metas/009.yml000066400000000000000000000017311503426445100240270ustar00rootroot00000000000000file: statement: select_statement: - select_clause: - keyword: SELECT - indent: "" - newline: "\n" - whitespace: ' ' - select_clause_element: numeric_literal: '1' - newline: "\n" - placeholder: '{% if true %}' - indent: "" - newline: "\n" - whitespace: ' ' - comma: ',' - select_clause_element: numeric_literal: '2' - dedent: "" - newline: "\n" - from_clause: - keyword: FROM - whitespace: ' ' - from_expression: indent: "" from_expression_element: table_expression: table_reference: naked_identifier: a dedent: "" - newline: "\n" - dedent: "" - placeholder: '{% endif %}' - newline: "\n" - limit_clause: keyword: LIMIT indent: "" whitespace: ' ' numeric_literal: '1' dedent: "" newline: "\n" end_of_file: "" sqlfluff-3.4.2/test/fixtures/templater/jinja_l_metas/010.sql000066400000000000000000000001121503426445100240050ustar00rootroot00000000000000{% macro test_macro() %} SELECT 2; {% endmacro %} {{ test_macro() }} sqlfluff-3.4.2/test/fixtures/templater/jinja_l_metas/010.yml000066400000000000000000000010261503426445100240140ustar00rootroot00000000000000file: - placeholder: '{% macro test_macro() %}' - indent: '' - placeholder: "\n SELECT 2;\n" - dedent: '' - placeholder: '{% endmacro %}' - newline: "\n" - newline: "\n" - newline: "\n" - whitespace: ' ' - statement: select_statement: select_clause: keyword: SELECT indent: '' whitespace: ' ' select_clause_element: numeric_literal: '2' dedent: '' - statement_terminator: ; - newline: "\n" - newline: "\n" - end_of_file: '' sqlfluff-3.4.2/test/fixtures/templater/jinja_l_metas/011.sql000066400000000000000000000000651503426445100240150ustar00rootroot00000000000000{% macro test_macro() %} SELECT 2; {% endmacro %}sqlfluff-3.4.2/test/fixtures/templater/jinja_l_metas/011.yml000066400000000000000000000002531503426445100240160ustar00rootroot00000000000000file: - placeholder: '{% macro test_macro() %}' - indent: '' - placeholder: "\n SELECT 2;\n" - dedent: '' - placeholder: '{% endmacro %}' - end_of_file: '' sqlfluff-3.4.2/test/fixtures/templater/jinja_lint_unreached_code/000077500000000000000000000000001503426445100253245ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_lint_unreached_code/if_elif_else.sql000066400000000000000000000001201503426445100304430ustar00rootroot00000000000000{% if True %} SELECT 1 {% elif True %} SELECT 2 {% else %} SELECT 3 {% endif %} sqlfluff-3.4.2/test/fixtures/templater/jinja_lint_unreached_code/if_elif_else_chain_scoring.sql000066400000000000000000000004121503426445100333350ustar00rootroot00000000000000{% if True %} SELECT 1 {% elif True %} SELECT 10 {% elif True %} SELECT 100 {% elif True %} SELECT 1000 {% elif True %} SELECT 10000 {% elif True %} SELECT 100000 {% elif True %} SELECT 1000000 {% elif True %} SELECT 10000000 {% else %} SELECT 100000000 {% endif %} sqlfluff-3.4.2/test/fixtures/templater/jinja_lint_unreached_code/if_else_if_nested.sql000066400000000000000000000001451503426445100314730ustar00rootroot00000000000000{% if True %} SELECT 1 {% else %} {% if True %} SELECT 2 {% else %} SELECT 3 {% endif %} {% endif %} sqlfluff-3.4.2/test/fixtures/templater/jinja_lint_unreached_code/if_true_elif_type_error_else.sql000066400000000000000000000001341503426445100337610ustar00rootroot00000000000000{% if True %} SELECT 1 {% elif True %} SELECT {{ 1 + "2" }} {% else %} SELECT 2 {% endif %} sqlfluff-3.4.2/test/fixtures/templater/jinja_lint_unreached_code/inline_select.sql000066400000000000000000000000551503426445100306620ustar00rootroot00000000000000select {% if 1 > 2 %}1{% else %}2{% endif %} sqlfluff-3.4.2/test/fixtures/templater/jinja_lint_unreached_code/simple_if_false.sql000066400000000000000000000000701503426445100311630ustar00rootroot00000000000000{% if False %} SELECT 1 {% else %} SELECT 2 {% endif %} sqlfluff-3.4.2/test/fixtures/templater/jinja_lint_unreached_code/simple_if_true.sql000066400000000000000000000000671503426445100310560ustar00rootroot00000000000000{% if True %} SELECT 1 {% else %} SELECT 2 {% endif %} sqlfluff-3.4.2/test/fixtures/templater/jinja_m_libraries_module/000077500000000000000000000000001503426445100252035ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_m_libraries_module/.sqlfluff000066400000000000000000000001531503426445100270250ustar00rootroot00000000000000[sqlfluff] # Test setting the library_path via the global setting (not via jinja config) library_path=libs sqlfluff-3.4.2/test/fixtures/templater/jinja_m_libraries_module/jinja.sql000066400000000000000000000001761503426445100270230ustar00rootroot00000000000000SELECT 56 FROM {{ foo.schema }}.{{ foo.table("xyz") }} WHERE {{ foo.bar.baz.equals("x", 23) }} and {{ root_equals("y", 42) }} sqlfluff-3.4.2/test/fixtures/templater/jinja_m_libraries_module/jinja.yml000066400000000000000000000015331503426445100270230ustar00rootroot00000000000000file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '56' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sch1 - dot: . - naked_identifier: foo_xyz where_clause: keyword: WHERE expression: - column_reference: naked_identifier: x - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '23' - binary_operator: and - column_reference: naked_identifier: y - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '42' sqlfluff-3.4.2/test/fixtures/templater/jinja_m_libraries_module/libs/000077500000000000000000000000001503426445100261345ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_m_libraries_module/libs/__init__.py000066400000000000000000000002711503426445100302450ustar00rootroot00000000000000"""Module used to test __init__.py within the jinja template.""" def root_equals(col: str, val: str) -> str: """Return a string that has col = val.""" return f"{col} = {val}" sqlfluff-3.4.2/test/fixtures/templater/jinja_m_libraries_module/libs/foo/000077500000000000000000000000001503426445100267175ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_m_libraries_module/libs/foo/__init__.py000066400000000000000000000002571503426445100310340ustar00rootroot00000000000000"""Module used to test foo within the jinja template.""" schema = "sch1" def table(name): """Return the parameter with foo_ in front of it.""" return f"foo_{name}" sqlfluff-3.4.2/test/fixtures/templater/jinja_m_libraries_module/libs/foo/bar/000077500000000000000000000000001503426445100274635ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_m_libraries_module/libs/foo/bar/__init__.py000066400000000000000000000000561503426445100315750ustar00rootroot00000000000000"""Module used to create module hierarchy.""" sqlfluff-3.4.2/test/fixtures/templater/jinja_m_libraries_module/libs/foo/bar/baz.py000066400000000000000000000002331503426445100306070ustar00rootroot00000000000000"""Module used to test bar within the jinja template.""" def equals(col, val): """Return a string that has col = val.""" return f"{col} = {val}" sqlfluff-3.4.2/test/fixtures/templater/jinja_m_libraries_module/libs/not_python.txt000066400000000000000000000000261503426445100310740ustar00rootroot00000000000000I am just a text file sqlfluff-3.4.2/test/fixtures/templater/jinja_m_libraries_module/other/000077500000000000000000000000001503426445100263245ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_m_libraries_module/other/__init__.py000066400000000000000000000001331503426445100304320ustar00rootroot00000000000000"""Module that should not be loaded.""" raise Exception("this file should not be loaded") sqlfluff-3.4.2/test/fixtures/templater/jinja_macro_path_does_not_exist/000077500000000000000000000000001503426445100265715ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_macro_path_does_not_exist/.sqlfluff000066400000000000000000000001151503426445100304110ustar00rootroot00000000000000[sqlfluff:templater:jinja] load_macros_from_path=nonexistent_macro_directory sqlfluff-3.4.2/test/fixtures/templater/jinja_n_nested_macros/000077500000000000000000000000001503426445100245115ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_n_nested_macros/.sqlfluff000066400000000000000000000002751503426445100263400ustar00rootroot00000000000000[sqlfluff:templater:jinja] load_macros_from_path=macros [sqlfluff:templater:jinja:macros] foo2_def={%- macro foo2() -%}202{%- endmacro -%} foo3_def={%- macro foo3() -%}203{%- endmacro -%} sqlfluff-3.4.2/test/fixtures/templater/jinja_n_nested_macros/jinja.sql000066400000000000000000000000621503426445100263230ustar00rootroot00000000000000select {{ school_year_start_date('2021-05-01') }} sqlfluff-3.4.2/test/fixtures/templater/jinja_n_nested_macros/jinja.yml000066400000000000000000000267771503426445100263520ustar00rootroot00000000000000file: statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: date_trunc function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'week'" - comma: ',' - expression: function: function_name: function_name_identifier: TO_DATE function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'01 July'" binary_operator: - pipe: '|' - pipe: '|' case_expression: - keyword: case - when_clause: - keyword: when - expression: - function: function_name: function_name_identifier: TO_DATE function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'01 July'" binary_operator: - pipe: '|' - pipe: '|' function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: year keyword: from expression: function: function_name: function_name_identifier: date_trunc function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'week'" - comma: ',' - expression: function: function_name: function_name_identifier: CONVERT_TIMEZONE function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'UTC'" - comma: ',' - expression: quoted_literal: "'America/New_York'" - comma: ',' - expression: - numeric_literal: '2021' - binary_operator: '-' - numeric_literal: '05' - binary_operator: '-' - numeric_literal: '01' - end_bracket: ) - end_bracket: ) end_bracket: ) - comma: ',' - expression: quoted_literal: "'DD Mon YYYY'" - end_bracket: ) - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - function: function_name: function_name_identifier: CONVERT_TIMEZONE function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'UTC'" - comma: ',' - expression: quoted_literal: "'America/New_York'" - comma: ',' - expression: - numeric_literal: '2021' - binary_operator: '-' - numeric_literal: '05' - binary_operator: '-' - numeric_literal: '01' - end_bracket: ) - keyword: then - expression: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: year keyword: from expression: function: function_name: function_name_identifier: date_trunc function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'week'" - comma: ',' - expression: function: function_name: function_name_identifier: CONVERT_TIMEZONE function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'UTC'" - comma: ',' - expression: quoted_literal: "'America/New_York'" - comma: ',' - expression: - numeric_literal: '2021' - binary_operator: '-' - numeric_literal: '05' - binary_operator: '-' - numeric_literal: '01' - end_bracket: ) - end_bracket: ) end_bracket: ) - else_clause: keyword: else expression: function: function_name: function_name_identifier: extract function_contents: bracketed: start_bracket: ( date_part: year keyword: from expression: function: function_name: function_name_identifier: date_trunc function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'week'" - comma: ',' - expression: function: function_name: function_name_identifier: CONVERT_TIMEZONE function_contents: bracketed: - start_bracket: ( - expression: quoted_literal: "'UTC'" - comma: ',' - expression: quoted_literal: "'America/New_York'" - comma: ',' - expression: - numeric_literal: '2021' - binary_operator: '-' - numeric_literal: '05' - binary_operator: '-' - numeric_literal: '01' - end_bracket: ) - end_bracket: ) end_bracket: ) binary_operator: '-' numeric_literal: '1' - keyword: end - comma: ',' - expression: quoted_literal: "'DD Mon YYYY'" - end_bracket: ) - end_bracket: ) sqlfluff-3.4.2/test/fixtures/templater/jinja_n_nested_macros/macros/000077500000000000000000000000001503426445100257755ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_n_nested_macros/macros/school_year_start_date.sql000066400000000000000000000013751503426445100332450ustar00rootroot00000000000000{% from 'week_start_date.sql' import week_start_date %} {% macro school_year_start_date( date ) %} -- Each new school year starts at the beginning of the week July 1 falls in: date_trunc( 'week', TO_DATE( '01 July' || -- If date is on or after this calendar year's school year start, -- then date is in the school year that started this calendar year case when TO_DATE( '01 July' || extract(year from {{ week_start_date( date ) }}) , 'DD Mon YYYY' ) <= CONVERT_TIMEZONE( 'UTC', 'America/New_York', {{date}} ) then extract(year from {{ week_start_date( date ) }} ) -- Otherwise, school year started in previous calendar year else extract(year from {{ week_start_date( date ) }} ) - 1 end , 'DD Mon YYYY' ) ) {% endmacro %} sqlfluff-3.4.2/test/fixtures/templater/jinja_n_nested_macros/macros/week_start_date.sql000066400000000000000000000002531503426445100316630ustar00rootroot00000000000000-- Start of the week the date belongs to {% macro week_start_date(date) -%} date_trunc('week', CONVERT_TIMEZONE( 'UTC', 'America/New_York', {{date}} ) ) {% endmacro %} sqlfluff-3.4.2/test/fixtures/templater/jinja_o_config_override_dbt_builtins/000077500000000000000000000000001503426445100275725ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_o_config_override_dbt_builtins/.sqlfluff000066400000000000000000000001471503426445100314170ustar00rootroot00000000000000[sqlfluff:templater:jinja:macros] dbt_is_incremental = {% macro is_incremental() %}False{% endmacro %} override_dbt_builtins.sql000066400000000000000000000002061503426445100346130ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_o_config_override_dbt_builtinsSELECT col1 FROM t_table1 {% if is_incremental() is true %} --This is should not be part of the rendered SQL! WHERE FALSE {% endif %} override_dbt_builtins.yml000066400000000000000000000006011503426445100346140ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_o_config_override_dbt_builtinsfile: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t_table1 sqlfluff-3.4.2/test/fixtures/templater/jinja_p_disable_dbt_builtins/000077500000000000000000000000001503426445100260325ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_p_disable_dbt_builtins/.sqlfluff000066400000000000000000000001431503426445100276530ustar00rootroot00000000000000[sqlfluff:templater:jinja] apply_dbt_builtins = False [sqlfluff:templater:jinja:context] var = 30 sqlfluff-3.4.2/test/fixtures/templater/jinja_p_disable_dbt_builtins/disable_dbt_builtins.sql000066400000000000000000000001761503426445100327240ustar00rootroot00000000000000-- To test if dbt builtins have been disabled we try to call -- `var` as a variable instead of as a function SELECT {{ var }} sqlfluff-3.4.2/test/fixtures/templater/jinja_p_disable_dbt_builtins/disable_dbt_builtins.yml000066400000000000000000000002231503426445100327170ustar00rootroot00000000000000file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: 30 sqlfluff-3.4.2/test/fixtures/templater/jinja_q_multiple_path_macros/000077500000000000000000000000001503426445100261015ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_q_multiple_path_macros/.sqlfluff000066400000000000000000000001251503426445100277220ustar00rootroot00000000000000[sqlfluff:templater:jinja] load_macros_from_path=macros,more_macros,even_more_macros sqlfluff-3.4.2/test/fixtures/templater/jinja_q_multiple_path_macros/even_more_macros/000077500000000000000000000000001503426445100314245ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_q_multiple_path_macros/even_more_macros/subdir/000077500000000000000000000000001503426445100327145ustar00rootroot00000000000000subdir_foo.sql000066400000000000000000000000501503426445100355040ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_q_multiple_path_macros/even_more_macros/subdir{%- macro foo4() -%}104{%- endmacro -%} ultimate_foo.sql000066400000000000000000000000501503426445100345500ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_q_multiple_path_macros/even_more_macros{%- macro foo3() -%}103{%- endmacro -%} sqlfluff-3.4.2/test/fixtures/templater/jinja_q_multiple_path_macros/jinja.sql000066400000000000000000000001341503426445100277130ustar00rootroot00000000000000select {{ foo1() }}, {{ foo2() }}, {{ foo3() }}, {{ foo4() }} from my_table sqlfluff-3.4.2/test/fixtures/templater/jinja_q_multiple_path_macros/jinja.yml000066400000000000000000000011341503426445100277160ustar00rootroot00000000000000file: statement: select_statement: select_clause: - keyword: select - select_clause_element: numeric_literal: '101' - comma: ',' - select_clause_element: numeric_literal: '102' - comma: ',' - select_clause_element: numeric_literal: '103' - comma: ',' - select_clause_element: numeric_literal: '104' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table sqlfluff-3.4.2/test/fixtures/templater/jinja_q_multiple_path_macros/macros/000077500000000000000000000000001503426445100273655ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_q_multiple_path_macros/macros/foo.sql000066400000000000000000000000501503426445100306640ustar00rootroot00000000000000{%- macro foo1() -%}101{%- endmacro -%} sqlfluff-3.4.2/test/fixtures/templater/jinja_q_multiple_path_macros/more_macros/000077500000000000000000000000001503426445100304075ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_q_multiple_path_macros/more_macros/other_foo.sql000066400000000000000000000000501503426445100331070ustar00rootroot00000000000000{%- macro foo2() -%}102{%- endmacro -%} sqlfluff-3.4.2/test/fixtures/templater/jinja_r_library_in_macro/000077500000000000000000000000001503426445100252025ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_r_library_in_macro/jinja.sql000066400000000000000000000000311503426445100270100ustar00rootroot00000000000000{{ query_proxy('xyz') }} sqlfluff-3.4.2/test/fixtures/templater/jinja_r_library_in_macro/jinja.yml000066400000000000000000000012021503426445100270130ustar00rootroot00000000000000file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '56' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sch1 - dot: . - naked_identifier: foo_xyz where_clause: keyword: WHERE expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: '=' numeric_literal: '23' sqlfluff-3.4.2/test/fixtures/templater/jinja_r_library_in_macro/libs/000077500000000000000000000000001503426445100261335ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_r_library_in_macro/libs/bar.py000066400000000000000000000002331503426445100272470ustar00rootroot00000000000000"""Module used to test bar within the jinja template.""" def equals(col, val): """Return a string that has col = val.""" return f"{col} = {val}" sqlfluff-3.4.2/test/fixtures/templater/jinja_r_library_in_macro/libs/foo.py000066400000000000000000000002571503426445100272740ustar00rootroot00000000000000"""Module used to test foo within the jinja template.""" schema = "sch1" def table(name): """Return the parameter with foo_ in front of it.""" return f"foo_{name}" sqlfluff-3.4.2/test/fixtures/templater/jinja_r_library_in_macro/libs/not_python.txt000066400000000000000000000000261503426445100310730ustar00rootroot00000000000000I am just a text file sqlfluff-3.4.2/test/fixtures/templater/jinja_r_library_in_macro/macros/000077500000000000000000000000001503426445100264665ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_r_library_in_macro/macros/query_proxy.sql000066400000000000000000000002001503426445100316050ustar00rootroot00000000000000{% macro query_proxy(tbl) %}SELECT 56 FROM {{ foo.schema }}.{{ foo.bar("xyz") }} WHERE {{ bar.equals("x", 23) }} {% endmacro %} sqlfluff-3.4.2/test/fixtures/templater/jinja_s_filters_in_library/000077500000000000000000000000001503426445100255525ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_s_filters_in_library/.sqlfluff000066400000000000000000000000571503426445100273770ustar00rootroot00000000000000[sqlfluff:templater:jinja] library_path = libs sqlfluff-3.4.2/test/fixtures/templater/jinja_s_filters_in_library/jinja.sql000066400000000000000000000000311503426445100273600ustar00rootroot00000000000000SELECT "{{ now | ds }}"; sqlfluff-3.4.2/test/fixtures/templater/jinja_s_filters_in_library/jinja.yml000066400000000000000000000004211503426445100273650ustar00rootroot00000000000000file: statement: select_statement: select_clause: keyword: 'SELECT' select_clause_element: column_reference: quoted_identifier: '"2006-01-02"' statement_terminator: ';' sqlfluff-3.4.2/test/fixtures/templater/jinja_s_filters_in_library/libs/000077500000000000000000000000001503426445100265035ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_s_filters_in_library/libs/__init__.py000066400000000000000000000010101503426445100306040ustar00rootroot00000000000000"""Module used to test filters within the jinja template.""" from __future__ import annotations import datetime # https://github.com/apache/airflow/blob/main/airflow/templates.py#L50 def ds_filter(value: datetime.date | datetime.time | None) -> str | None: """Date filter.""" if value is None: return None return value.strftime("%Y-%m-%d") SQLFLUFF_JINJA_FILTERS = {"ds": ds_filter} now = datetime.datetime( 2006, 1, 2, 3, 4, 5, 0, tzinfo=datetime.timezone(-datetime.timedelta(hours=7)) ) sqlfluff-3.4.2/test/fixtures/templater/jinja_slice_template_macros/000077500000000000000000000000001503426445100257045ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_slice_template_macros/.sqlfluff000066400000000000000000000001041503426445100275220ustar00rootroot00000000000000[sqlfluff:templater:jinja] load_macros_from_path=macros,more_macros sqlfluff-3.4.2/test/fixtures/templater/jinja_slice_template_macros/macros/000077500000000000000000000000001503426445100271705ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_slice_template_macros/macros/echo.sql000066400000000000000000000000571503426445100306310ustar00rootroot00000000000000{% macro echo(text) %} {{text}} {% endmacro %} sqlfluff-3.4.2/test/fixtures/templater/jinja_slice_template_macros/macros/subdir/000077500000000000000000000000001503426445100304605ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_slice_template_macros/macros/subdir/include_comment.sql000066400000000000000000000000221503426445100343400ustar00rootroot00000000000000-- Just a comment sqlfluff-3.4.2/test/fixtures/templater/jinja_slice_template_macros/more_macros/000077500000000000000000000000001503426445100302125ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_slice_template_macros/more_macros/echoecho.sql000066400000000000000000000000741503426445100325110ustar00rootroot00000000000000{% macro echoecho(text) %} {{text}} {{text}} {% endmacro %} sqlfluff-3.4.2/test/fixtures/templater/jinja_t_loader_search_path/000077500000000000000000000000001503426445100255005ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_t_loader_search_path/.sqlfluff000066400000000000000000000001411503426445100273170ustar00rootroot00000000000000[sqlfluff:templater:jinja] load_macros_from_path=macro_load loader_search_path=search_a,search_b sqlfluff-3.4.2/test/fixtures/templater/jinja_t_loader_search_path/jinja.sql000066400000000000000000000011371503426445100273160ustar00rootroot00000000000000{% import 'search_a.sql' as search_a_pkg %} {% import 'subdir/search_a_subdir.sql' as search_a_subdir_pkg %} {% import 'search_b.sql' as search_b_pkg %} select -- the second expression on each line should evaluate to nothing, -- since these macros are not loaded into the global namespace. {{ search_a_pkg.search_a() }} {{ search_a() }}, {{ search_a_subdir_pkg.search_a_subdir() }} {{ search_a_subdir() }}, {{ search_b_pkg.search_b() }} {{ search_b() }}, -- these are still being loaded from the global namespace {{ macro_load() }}, {{ macro_load_subdir() }} from my_table sqlfluff-3.4.2/test/fixtures/templater/jinja_t_loader_search_path/jinja.yml000066400000000000000000000013371503426445100273220ustar00rootroot00000000000000file: statement: select_statement: select_clause: - keyword: select - select_clause_element: quoted_literal: "'search_a'" - comma: ',' - select_clause_element: quoted_literal: "'search_a_subdir'" - comma: ',' - select_clause_element: quoted_literal: "'search_b'" - comma: ',' - select_clause_element: quoted_literal: "'macro_load'" - comma: ',' - select_clause_element: quoted_literal: "'macro_load_subdir'" from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table sqlfluff-3.4.2/test/fixtures/templater/jinja_t_loader_search_path/macro_load/000077500000000000000000000000001503426445100276005ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_t_loader_search_path/macro_load/macro_load.sql000066400000000000000000000000671503426445100324240ustar00rootroot00000000000000{%- macro macro_load() -%}'macro_load'{%- endmacro -%} sqlfluff-3.4.2/test/fixtures/templater/jinja_t_loader_search_path/macro_load/subdir/000077500000000000000000000000001503426445100310705ustar00rootroot00000000000000macro_load_subdir.sql000066400000000000000000000001051503426445100351760ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_t_loader_search_path/macro_load/subdir{%- macro macro_load_subdir() -%}'macro_load_subdir'{%- endmacro -%} sqlfluff-3.4.2/test/fixtures/templater/jinja_t_loader_search_path/search_a/000077500000000000000000000000001503426445100272455ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_t_loader_search_path/search_a/search_a.sql000066400000000000000000000000631503426445100315320ustar00rootroot00000000000000{%- macro search_a() -%}'search_a'{%- endmacro -%} sqlfluff-3.4.2/test/fixtures/templater/jinja_t_loader_search_path/search_a/subdir/000077500000000000000000000000001503426445100305355ustar00rootroot00000000000000search_a_subdir.sql000066400000000000000000000001011503426445100343040ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_t_loader_search_path/search_a/subdir{%- macro search_a_subdir() -%}'search_a_subdir'{%- endmacro -%} sqlfluff-3.4.2/test/fixtures/templater/jinja_t_loader_search_path/search_b/000077500000000000000000000000001503426445100272465ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/jinja_t_loader_search_path/search_b/search_b.sql000066400000000000000000000000631503426445100315340ustar00rootroot00000000000000{%- macro search_b() -%}'search_b'{%- endmacro -%} sqlfluff-3.4.2/test/fixtures/templater/placeholder_flyway_var/000077500000000000000000000000001503426445100247205ustar00rootroot00000000000000sqlfluff-3.4.2/test/fixtures/templater/placeholder_flyway_var/placeholder_flyway_var_a.sql000066400000000000000000000003061503426445100324650ustar00rootroot00000000000000USE ${flyway:database}.test_schema; CREATE OR REPLACE STAGE stg_data_export_${env_name} URL = 's3://${s3_data_lake_bucket}/${env_name}/exports/stg_data_export' STORAGE_INTEGRATION = s3_integ_main; sqlfluff-3.4.2/test/generate_parse_fixture_yml.py000066400000000000000000000160731503426445100223210ustar00rootroot00000000000000"""Utility to generate yml files for all the parsing examples.""" import fnmatch import multiprocessing import os import re import sys import time from collections import defaultdict from typing import Callable, Optional, TypeVar import click import yaml from conftest import ( ParseExample, compute_parse_tree_hash, get_parse_fixtures, parse_example_file, ) from sqlfluff.core.errors import SQLParseError S = TypeVar("S", bound="ParseExample") def distribute_work(work_items: list[S], work_fn: Callable[[S], None]) -> None: """Distribute work keep track of progress.""" # Build up a dict of sets, where the key is the dialect and the set # contains all the expected cases. As cases return we'll check them # off. success_map = {} expected_cases = defaultdict(set) for case in work_items: expected_cases[case.dialect].add(case) errors = [] with multiprocessing.Pool(multiprocessing.cpu_count()) as pool: for example, result in pool.imap_unordered(work_fn, work_items): if result is not None: errors.append(result) success_map[example] = False else: success_map[example] = True expected_cases[example.dialect].remove(example) # Check to see whether a dialect is complete if not expected_cases[example.dialect]: # It's done. Report success rate. local_success_map = { k: v for k, v in success_map.items() if k.dialect == example.dialect } if all(local_success_map.values()): print(f"{example.dialect!r} complete.\t\tAll Success ✅") else: fail_files = [ k.sqlfile for k, v in local_success_map.items() if not v ] print( f"{example.dialect!r} complete.\t\t{len(fail_files)} fails. ⚠️" ) for fname in fail_files: print(f" - {fname!r}") if errors: print(errors) print("FAILED TO GENERATE ALL CASES") sys.exit(1) def _create_file_path(example: ParseExample, ext: str = ".yml") -> str: dialect, sqlfile = example root, _ = os.path.splitext(sqlfile) path = os.path.join("test", "fixtures", "dialects", dialect, root + ext) return path def _is_matching_new_criteria(example: ParseExample): """Is the Yaml doesn't exist or is older than the SQL.""" yaml_path = _create_file_path(example) if not os.path.exists(yaml_path): return True sql_path = os.path.join( "test", "fixtures", "dialects", example.dialect, example.sqlfile, ) return os.path.getmtime(yaml_path) < os.path.getmtime(sql_path) def generate_one_parse_fixture( example: ParseExample, ) -> tuple[ParseExample, Optional[SQLParseError]]: """Parse example SQL file, write parse tree to YAML file.""" dialect, sqlfile = example sql_path = _create_file_path(example, ".sql") try: tree = parse_example_file(dialect, sqlfile) except Exception as err: # Catch parsing errors, and wrap the file path only it. return example, SQLParseError(f"Fatal parsing error: {sql_path}: {err}") # Check we don't have any base types or unparsable sections types = tree.type_set() if "base" in types: return example, SQLParseError(f"Unnamed base section when parsing: {sql_path}") if "unparsable" in types: return example, SQLParseError(f"Could not parse: {sql_path}") _hash = compute_parse_tree_hash(tree) # Remove the .sql file extension path = _create_file_path(example) with open(path, "w", newline="\n", encoding="utf8") as f: r: Optional[dict[str, Optional[str]]] = None if not tree: f.write("") return example, None records = tree.as_record(code_only=True, show_raw=True) assert records, "TypeGuard" r = dict([("_hash", _hash), *list(records.items())]) print( "# YML test files are auto-generated from SQL files and should not be " "edited by", '# hand. To help enforce this, the "hash" field in the file must match ' "a hash", "# computed by SQLFluff when running the tests. Please run", "# `python test/generate_parse_fixture_yml.py` to generate them after " "adding or", "# altering SQL files.", file=f, sep="\n", ) yaml.dump( data=r, stream=f, default_flow_style=False, sort_keys=False, allow_unicode=True, ) return example, None def gather_file_list( dialect: Optional[str] = None, glob_match_pattern: Optional[str] = None, new_only: bool = False, ) -> list[ParseExample]: """Gather the list of files to generate fixtures for. Apply filters as required.""" parse_success_examples, _ = get_parse_fixtures() if new_only: parse_success_examples = [ example for example in parse_success_examples if _is_matching_new_criteria(example) ] if dialect: dialect = dialect.lower() parse_success_examples = [ example for example in parse_success_examples if example[0] == dialect ] if len(parse_success_examples) == 0: raise ValueError(f'Unknown Dialect "{dialect}"') if not glob_match_pattern: return parse_success_examples regex = re.compile(fnmatch.translate(glob_match_pattern)) return [ example for example in parse_success_examples if regex.match(example[1]) is not None ] @click.command() @click.option( "--filter", "-f", default=None, help="A glob filter to apply to file names." ) @click.option("--dialect", "-d", default=None, help="Filter to a given dialect.") @click.option( "--new-only", "new_only", is_flag=True, default=False, help="Only create missing fixtures.", ) def generate_parse_fixtures( filter: Optional[str], dialect: Optional[str], new_only: bool ): """Generate fixture or a subset based on dialect or filename glob match.""" filter_str = filter or "*" dialect_str = dialect or "all" print("Match Pattern Received:") print(f"\tfilter={filter_str} dialect={dialect_str} new-only={new_only}") parse_success_examples = gather_file_list(dialect, filter, new_only) print(f"Found {len(parse_success_examples)} file(s) to generate") t0 = time.monotonic() try: distribute_work(parse_success_examples, generate_one_parse_fixture) except SQLParseError as err: # If one fails, exit early and cleanly. print(f"PARSING FAILED: {err}") sys.exit(1) dt = time.monotonic() - t0 print(f"Built {len(parse_success_examples)} fixtures in {dt:.2f}s.") def main(): """Find all example SQL files, parse and create YAML files.""" generate_parse_fixtures() if __name__ == "__main__": main() sqlfluff-3.4.2/test/patch_lcov.py000066400000000000000000000023511503426445100170220ustar00rootroot00000000000000"""Replaces .tox/ paths in the lcov file with paths relative to repo root. Context: When the CI build runs tests, it uses tox, which installs SQLFluff in a virtual environment. Thus, the coverage.lcov file generated by the tests contains paths to the virtual environment. This script replaces those paths with paths relative to the repo root. This allows the lcov file to be used by Coveralls. Without this, Coveralls has valid coverage info, but it generates URLs that point to source files that don't exist in the SQLFluff GitHub repo. For example, we want to change this: SF:.tox/py/lib/python3.10/site-packages/sqlfluff/__init__.py to this: SF:src/sqlfluff/__init__.py """ import re from pathlib import Path path = Path("coverage.lcov") if path.exists(): lines = path.read_text().splitlines() modified_lines = [] for line in lines: if line.startswith("SF:.tox"): m = re.search(r"^(SF:).*(sqlfluff/.*)", line) if m: modified_lines.append(f"{m.group(1)}src/{m.group(2)}") else: print(f"Could not patch line: {line}") modified_lines.append(line) else: modified_lines.append(line) path.write_text("\n".join(modified_lines)) sqlfluff-3.4.2/test/rules/000077500000000000000000000000001503426445100154575ustar00rootroot00000000000000sqlfluff-3.4.2/test/rules/std_AL04_test.py000066400000000000000000000046261503426445100204120ustar00rootroot00000000000000"""Tests the python routines within AL04.""" import sqlfluff def test__rules__std_AL04_one_aliases_one_duplicate(): """Verify correct error message for one duplicate table aliases occur one times.""" sql = """ SELECT a.pk FROM table_1 AS a JOIN table_2 AS a ON a.pk = a.pk """ result = sqlfluff.lint(sql) assert "AL04" in [r["code"] for r in result] assert [r["code"] for r in result].count("AL04") == 1 def test__rules__std_AL04_one_aliases_two_duplicate(): """Verify correct error message for one duplicate table aliases occur two times.""" sql = """ SELECT a.pk FROM table_1 AS a JOIN table_2 AS a ON a.pk = a.pk JOIN table_3 AS a ON a.pk = a.pk """ result = sqlfluff.lint(sql) result_filter = [r for r in result if r["code"] == "AL04"] # Error message only show two times, not three assert len(result_filter) == 2 assert ( len( [ r for r in result_filter if "Duplicate table alias 'a'" in r["description"] ] ) == 2 ) # Test specific line number assert result_filter[0]["start_line_no"] == 5 assert result_filter[1]["start_line_no"] == 6 def test__rules__std_AL04_complex(): """Verify that AL04 returns the correct error message for complex example.""" sql = """ SELECT a.pk, b.pk FROM table_1 AS a JOIN table_2 AS a ON a.pk = a.pk JOIN table_3 AS b ON a.pk = b.pk JOIN table_4 AS b ON b.pk = b.pk JOIN table_5 AS a ON b.pk = a.pk """ result = sqlfluff.lint(sql) result_filter = [r for r in result if r["code"] == "AL04"] # Error message only show two times, not three assert len(result_filter) == 3 assert ( len( [ r for r in result_filter if "Duplicate table alias 'a'" in r["description"] ] ) == 2 ) assert ( len( [ r for r in result_filter if "Duplicate table alias 'b'" in r["description"] ] ) == 1 ) # Test specific line number assert result_filter[0]["start_line_no"] == 6 assert result_filter[1]["start_line_no"] == 8 assert result_filter[2]["start_line_no"] == 9 sqlfluff-3.4.2/test/rules/std_AL09_CP02_RF06_combo_test.py000066400000000000000000000112601503426445100230470ustar00rootroot00000000000000"""Test the interactions of AL09, CP02 & RF06. AL09: Self aliasing CP02: Identifier Capitalisation RF06: Identifier Quoting """ import pytest from sqlfluff.core import Linter input_query = """ select a as A, B as b, "C" as C, "d" as d, "E" as e, "f" as F, g as "G", h as h, I as I from foo """ @pytest.mark.parametrize( "rules,dialect,fixed_sql,post_fix_errors", [ # NOTE: The first few examples here are with ANSI which is # configured as a natively UPPERCASE dialect. ( ["AL09"], "ansi", """ select a as A, B as b, "C" as C, "d" as d, "E" as e, "f" as F, g as "G", h, I from foo """, [ # These two (A & B) are detected as self aliases, but not # auto-fixed, because the intent is ambiguous. # Should the alias/reference be quoted or removed? ("AL09", 3, 5), ("AL09", 4, 5), ], ), ( ["CP02"], "ansi", """ select a as a, b as b, "C" as c, "d" as d, "E" as e, "f" as f, g as "G", h as h, i as i from foo """, [], ), ( ["RF06"], "ansi", """ select a as A, B as b, C as C, "d" as d, E as e, "f" as F, g as G, h as h, I as I from foo """, [], ), ( ["AL09", "CP02"], "ansi", """ select a, b, "C" as c, "d" as d, "E" as e, "f" as f, g as "G", h, i from foo """, # NOTE: When CP02 is active, AL09 errors are no longer # present, because CP02 allowed them to be resolved. [], ), ( ["AL09", "RF06"], "ansi", """ select a as A, B as b, C, "d" as d, E as e, "f" as F, g as G, h, I from foo """, [ # Without CPO2, the errors on line 3 & 5 are present. They're # detected as self-aliases, but with ambiguous fixes (A & B). ("AL09", 3, 5), ("AL09", 4, 5), # Additionally, with RF06 removing quotes, it creates two # new issues, where the previously quoted aliases are now # unquoted, but still different cases (E & G). ("AL09", 7, 5), ("AL09", 9, 5), ], ), ( ["CP02", "RF06"], "ansi", """ select a as a, b as b, c as c, "d" as d, e as e, "f" as f, g as g, h as h, i as i from foo """, [], ), ( ["AL09", "CP02", "RF06"], "ansi", """ select a, b, c, "d" as d, e, "f" as f, g, h, i from foo """, [], ), # Postgres is natively lowercase, and so the results are # different. ( ["AL09", "CP02", "RF06"], "postgres", """ select a, b, "C" as c, d, "E" as e, f, g as "G", h, i from foo """, [], ), # DuckDB is always case insensitive so likewise has a different result. ( ["AL09", "CP02", "RF06"], "duckdb", """ select a, b, c, d, e, f, g, h, i from foo """, [], ), # Clickhouse is always case sensitive has a more conservative result. # All the quotes are gone, but all the aliases with a case change remain. ( # NOTE: Testing without CP02 as that rule is much less appropriate # for clickhouse. ["AL09", "RF06"], "clickhouse", """ select a as A, B as b, C, d, E as e, f as F, g as G, h, I from foo """, # None of those aliases should be flagged as an issue. [], ), ], ) def test__rules__std_AL09_CP02_RF06(rules, dialect, fixed_sql, post_fix_errors): """Test interactions between AL09, CP02 & RF06.""" print(f"Running with rules: {rules}") linter = Linter(dialect=dialect, rules=rules) result = linter.lint_string(input_query, fix=True) fixed, _ = result.fix_string() assert fixed == fixed_sql # Check violations after fix. # NOTE: We should really use the rules testing utilities here # but they don't yet support multiple rules. post_fix_result = linter.lint_string(fixed, fix=False) assert post_fix_result.check_tuples() == post_fix_errors sqlfluff-3.4.2/test/rules/std_AM06_test.py000066400000000000000000000031651503426445100204120ustar00rootroot00000000000000"""Tests the python routines within AM06.""" import sqlfluff def test__rules__std_AM06_raised() -> None: """Test case for multiple AM06 errors raised with 'consistent' setting.""" sql = """ SELECT foo, bar, sum(baz) AS sum_value FROM ( SELECT foo, bar, sum(baz) AS baz FROM fake_table GROUP BY foo, bar ) GROUP BY 1, 2 ORDER BY 1, 2; """ result = sqlfluff.lint(sql) results_AM06 = [r for r in result if r["code"] == "AM06"] assert len(results_AM06) == 2 assert ( results_AM06[0]["description"] == "Inconsistent column references in 'GROUP BY/ORDER BY' clauses." ) def test__rules__std_AM06_unparsable() -> None: """Test unparsable group by doesn't result in bad rule AM06 error.""" sql = """ SELECT foo.set.barr FROM foo GROUP BY foo.set.barr """ result = sqlfluff.lint(sql) results_AM06 = [r for r in result if r["code"] == "AM06"] results_prs = [r for r in result if r["code"] == "PRS"] assert len(results_AM06) == 0 assert len(results_prs) > 0 def test__rules__std_AM06_noqa() -> None: """Test unparsable group by with no qa doesn't result in bad rule AM06 error.""" sql = """ SELECT foo.set.barr --noqa: PRS FROM foo GROUP BY f@oo.set.bar.r --noqa: PRS """ result = sqlfluff.lint(sql) results_AM06 = [r for r in result if r["code"] == "AM06"] results_prs = [r for r in result if r["code"] == "PRS"] assert len(results_AM06) == 0 assert len(results_prs) == 0 sqlfluff-3.4.2/test/rules/std_CV02_test.py000066400000000000000000000007061503426445100204170ustar00rootroot00000000000000"""Tests the python routines within CV02.""" import sqlfluff def test__rules__std_CV02_raised() -> None: """CV02 is raised for use of ``IFNULL`` or ``NVL``.""" sql = "SELECT\n\tIFNULL(NULL, 100),\n\tNVL(NULL,100);" result = sqlfluff.lint(sql, rules=["CV02"]) assert len(result) == 2 assert result[0]["description"] == "Use 'COALESCE' instead of 'IFNULL'." assert result[1]["description"] == "Use 'COALESCE' instead of 'NVL'." sqlfluff-3.4.2/test/rules/std_CV09_test.py000066400000000000000000000014531503426445100204260ustar00rootroot00000000000000"""Tests the python routines within CV09.""" from sqlfluff.core import FluffConfig, Linter def test__rules__std_CV09_raised() -> None: """CV09 is raised for use of blocked words with correct error message.""" sql = "SELECT MYOLDFUNCTION(col1) FROM deprecated_table;\n" cfg = FluffConfig(overrides={"dialect": "ansi"}) cfg.set_value( config_path=["rules", "convention.blocked_words", "blocked_words"], val="myoldfunction,deprecated_table", ) linter = Linter(config=cfg) result_records = linter.lint_string_wrapped(sql).as_records() result = result_records[0]["violations"] assert len(result) == 2 assert result[0]["description"] == "Use of blocked word 'MYOLDFUNCTION'." assert result[1]["description"] == "Use of blocked word 'deprecated_table'." sqlfluff-3.4.2/test/rules/std_JJ01_test.py000066400000000000000000000027651503426445100204200ustar00rootroot00000000000000"""Tests JJ01 in a parallel environment. This rule has had issues in the past with raising exceptions in parallel environments because it tries to access the templater. """ import pickle from sqlfluff.core import FluffConfig, Linter def test_lint_jj01_pickled_config(): """Tests the error catching behavior of _lint_path_parallel_wrapper(). Test on MultiThread runner because otherwise we have pickling issues. """ fname = "test/fixtures/linter/jinja_spacing.sql" fresh_cfg = FluffConfig(overrides={"dialect": "ansi", "rules": "JJ01"}) # Parse the file with the fresh config. linter = Linter(config=fresh_cfg) parsed = next(linter.parse_path(fname)) rule_pack = linter.get_rulepack(config=fresh_cfg) rule = rule_pack.rules[0] # Check we got the right rule. assert rule.code == "JJ01" # Pickle the config and rehydrate to simulate threaded operation pickled = pickle.dumps(fresh_cfg) unpickled_cfg = pickle.loads(pickled) # Crawl with the pickled config. Check we don't get an error. linting_errors, _, fixes, _ = rule.crawl( parsed.tree, dialect=unpickled_cfg.get("dialect_obj"), fix=True, templated_file=parsed.parsed_variants[0].templated_file, ignore_mask=None, fname=fname, config=unpickled_cfg, # <- NOTE: This is the important part. ) # Check we successfully got the right results. assert len(linting_errors) == 1 assert linting_errors[0].check_tuple() == ("JJ01", 3, 15) sqlfluff-3.4.2/test/rules/std_LT01_LT02_LT09_combo_test.py000066400000000000000000000012671503426445100231200ustar00rootroot00000000000000"""Tests issue #1373 doesn't reoccur. The combination of LT02 (incorrect indentation), LT09 (select targets), and LT01 (unnecessary white space) can result in incorrect indentation. """ import sqlfluff def test__rules__std_LT02_LT09_LT01(): """Verify that double indents don't flag LT01.""" sql = """ WITH example AS ( SELECT my_id, other_thing, one_more FROM my_table ) SELECT my_id FROM example\n""" fixed_sql = """ WITH example AS ( SELECT my_id, other_thing, one_more FROM my_table ) SELECT my_id FROM example\n""" result = sqlfluff.fix(sql, exclude_rules=["LT13"]) assert result == fixed_sql sqlfluff-3.4.2/test/rules/std_LT01_LT04_test.py000066400000000000000000000011551503426445100211670ustar00rootroot00000000000000"""Tests the python routines within LT01.""" import sqlfluff def test__rules__std_LT01_single_raise() -> None: """Test case for multiple LT01 errors raised when no post comma whitespace.""" # This query used to triple count LT01. Added memory to log previously fixed commas # (issue #2001). sql = """ SELECT col_a AS a ,col_b AS b FROM foo; """ result = sqlfluff.lint(sql, rules=["LT01", "LT04"]) results_LT01 = [r for r in result if r["code"] == "LT01"] results_LT04 = [r for r in result if r["code"] == "LT04"] assert len(results_LT01) == 1 assert len(results_LT04) == 1 sqlfluff-3.4.2/test/rules/std_LT01_ST02_test.py000066400000000000000000000021561503426445100211760ustar00rootroot00000000000000"""Tests the python routines within LT04 and ST06.""" import pytest from sqlfluff.core import FluffConfig, Linter @pytest.mark.parametrize( ["in_sql", "out_sql"], [ ( """ select case when ended_at is null or date(ended_at) > current_date() then true else false end as is_active from foo """, """ select coalesce(ended_at is null or date(ended_at) > current_date(), false) as is_active from foo """, ), ], ) def test_rules_std_LT01_and_ST02_interaction(in_sql, out_sql) -> None: """Test interaction between LT04 and ST06. Test sql with two newlines with leading commas expecting trailing. """ # Lint expected rules. cfg = FluffConfig.from_string( """[sqlfluff] dialect = ansi rules = LT01,ST02 """ ) linter = Linter(config=cfg) # Return linted/fixed file. linted_file = linter.lint_string(in_sql, fix=True) # Check expected lint errors are raised. assert set([v.rule.code for v in linted_file.violations]) == {"ST02"} # Check file is fixed. assert linted_file.fix_string()[0] == out_sql sqlfluff-3.4.2/test/rules/std_LT02_LT04_test.py000066400000000000000000000027701503426445100211740ustar00rootroot00000000000000"""Tests the python routines within LT02 and LT04.""" import pytest from sqlfluff.core import FluffConfig, Linter @pytest.mark.parametrize( ["in_sql", "out_sql"], [ ( """SELECT acct_id, date_x, 't' AS test, CASE WHEN condition_1 = '1' THEN '' ELSE condition_1 END AS case_1, CASE WHEN condition_2 = '2' THEN '' ELSE condition_2 END AS case_2, dollar_amt, FROM table_x""", """SELECT acct_id , date_x , 't' AS test , CASE WHEN condition_1 = '1' THEN '' ELSE condition_1 END AS case_1 , CASE WHEN condition_2 = '2' THEN '' ELSE condition_2 END AS case_2 , dollar_amt, FROM table_x""", ), ], ) def test_rules_std_LT02_LT04_interaction_indentation_leading(in_sql, out_sql) -> None: """Test interaction between LT02 and LT04. Test sql with two newlines with trailing commas expecting leading. """ # Lint expected rules. cfg = FluffConfig.from_string( """[sqlfluff] dialect = snowflake rules = LT02, LT04 [sqlfluff:layout:type:comma] spacing_before = touch line_position = leading """ ) linter = Linter(config=cfg) # Return linted/fixed file. linted_file = linter.lint_string(in_sql, fix=True) # Check expected lint errors are raised. assert set([v.rule.code for v in linted_file.violations]) == {"LT04"} # Check file is fixed. assert linted_file.fix_string()[0] == out_sql sqlfluff-3.4.2/test/rules/std_LT02_LT11_combo_test.py000066400000000000000000000023701503426445100223450ustar00rootroot00000000000000"""Tests the combination of LT02 and LT11. LT02: Indentation not consistent with previous lines LT11: Set operators should be surrounded by newlines Auto fix of LT11 does not insert correct indentation but just Newlines. It relies on LT02 to sort out the indentation later. This is what is getting tested here. """ import sqlfluff def test__rules__std_LT02_LT11_union_all_in_subquery_lint(): """Verify a that LT11 reports lint errors in subqueries.""" sql = ( "SELECT * FROM (\n" " SELECT 'g' UNION ALL\n" " SELECT 'h'\n" " UNION ALL SELECT 'j'\n" ")\n" ) result = sqlfluff.lint(sql) assert "LT11" in [r["code"] for r in result] def test__rules__std_LT02_LT11_union_all_in_subquery_fix(): """Verify combination of rules LT02 and LT11 produces a correct indentation.""" sql = ( "SELECT c FROM (\n" " SELECT 'g' UNION ALL\n" " SELECT 'h'\n" " UNION ALL SELECT 'j'\n" ")\n" ) fixed_sql = ( "SELECT c FROM (\n" " SELECT 'g'\n" " UNION ALL\n" " SELECT 'h'\n" " UNION ALL\n" " SELECT 'j'\n" ")\n" ) result = sqlfluff.fix(sql) assert result == fixed_sql sqlfluff-3.4.2/test/rules/std_LT03_test.py000066400000000000000000000044041503426445100204260ustar00rootroot00000000000000"""Tests the python routines within LT03.""" import sqlfluff from sqlfluff.core import Linter from sqlfluff.core.config import FluffConfig EXPECTED_LEADING_MESSAGE = ( "Found trailing binary operator. Expected only leading near line breaks." ) EXPECTED_TRAILING_MESSAGE = ( "Found leading binary operator. Expected only trailing near line breaks." ) def test__rules__std_LT03_default(): """Verify that LT03 returns the correct error message for default (trailing).""" sql = """ SELECT a, b FROM foo WHERE a = 1 AND b = 2 """ result = sqlfluff.lint(sql) assert "LT03" in [r["code"] for r in result] assert EXPECTED_LEADING_MESSAGE in [r["description"] for r in result] def test__rules__std_LT03_leading(): """Verify correct error message when leading is used.""" sql = """ SELECT a, b FROM foo WHERE a = 1 AND b = 2 """ config = FluffConfig( configs={"layout": {"type": {"binary_operator": {"line_position": "leading"}}}}, overrides={"dialect": "ansi"}, ) # The sqlfluff.lint API doesn't allow us to pass config so need to do what it does linter = Linter(config=config) result_records = linter.lint_string_wrapped(sql).as_records() result = result_records[0]["violations"] assert "LT03" in [r["code"] for r in result] assert EXPECTED_LEADING_MESSAGE in [r["description"] for r in result] def test__rules__std_LT03_trailing(): """Verify correct error message when trailing is used.""" sql = """ SELECT a, b FROM foo WHERE a = 1 AND b = 2 """ config = FluffConfig( configs={ "layout": {"type": {"binary_operator": {"line_position": "trailing"}}} }, overrides={"dialect": "ansi"}, ) # The sqlfluff.lint API doesn't allow us to pass config so need to do what it does linter = Linter(config=config) result_records = linter.lint_string_wrapped(sql).as_records() result = result_records[0]["violations"] assert "LT03" in [r["code"] for r in result] assert EXPECTED_TRAILING_MESSAGE in [r["description"] for r in result] sqlfluff-3.4.2/test/rules/std_LT04_ST06_test.py000066400000000000000000000036741503426445100212130ustar00rootroot00000000000000"""Tests the python routines within LT04 and ST06.""" import pytest from sqlfluff.core import FluffConfig, Linter @pytest.mark.parametrize( ["in_sql", "out_sql"], [ ( """SELECT COALESCE(a, 0) AS b , COALESCE(c, 0) AS d , e FROM t""", """SELECT e, COALESCE(a, 0) AS b, COALESCE(c, 0) AS d FROM t""", ), ( """SELECT COALESCE(a, 0) AS b--comment , COALESCE(c, 0) AS d , e FROM t""", """SELECT e,--comment COALESCE(a, 0) AS b, COALESCE(c, 0) AS d FROM t""", ), ( """with cte1 as ( select "a" ,"b" ,coalesce("g1" ,"g2" ,"g3" ) as "g_combined" ,"i" ,"j" from test ), cte2 as ( select "col1" ,'start: ' + "col2" as "new_col2" ,'start2: ' + "col3" as "new_col3" ,"col4" ,"col5" from cte1 ), select * from cte2""", """with cte1 as ( select "a", "b", "i", "j", coalesce("g1", "g2", "g3" ) as "g_combined" from test ), cte2 as ( select "col1", "col4", "col5", 'start: ' + "col2" as "new_col2", 'start2: ' + "col3" as "new_col3" from cte1 ), select * from cte2""", ), ], ) def test_rules_std_LT04_and_ST06_interaction_trailing(in_sql, out_sql) -> None: """Test interaction between LT04 and ST06. Test sql with two newlines with leading commas expecting trailing. """ # Lint expected rules. cfg = FluffConfig.from_string( """[sqlfluff] dialect = ansi rules = LT04, ST06 """ ) linter = Linter(config=cfg) # Return linted/fixed file. linted_file = linter.lint_string(in_sql, fix=True) # Check expected lint errors are raised. assert set([v.rule.code for v in linted_file.violations]) == {"LT04", "ST06"} # Check file is fixed. assert linted_file.fix_string()[0] == out_sql sqlfluff-3.4.2/test/rules/std_LT04_test.py000066400000000000000000000025621503426445100204320ustar00rootroot00000000000000"""Tests the python routines within LT04.""" import sqlfluff def test__rules__std_LT04_unparseable(): """Verify that LT04 doesn't try to fix queries with parse errors. This has been observed to frequently cause syntax errors, especially in combination with Jinja templating, e.g. undefined template variables. """ # This example comes almost directly from a real-world example. The user # accidentally ran "sqlfluff fix" without defining # "readability_features_numeric" and "readability_features_count_list", and # doing so corrupted their query. sql = """ SELECT user_id, campaign_id, business_type, SPLIT(intents, ",") AS intent_list, {% for feature in readability_features_numeric %} CAST(JSON_EXTRACT(readability_scores, '$.data.{{feature}}') AS float64) AS {{feature}} {% if not loop.last %} , {% endif %} {% endfor %}, {% for feature in readability_features_count_list %} CAST(JSON_EXTRACT(asset_structure, '$.{{feature}}') AS float64) AS {{feature}}_count {% if not loop.last %} , {% endif %} {% endfor %}, track_clicks_text, track_clicks_html FROM t """ result = sqlfluff.lint(sql) assert "LT04" not in [r["code"] for r in result] sqlfluff-3.4.2/test/rules/std_LT05_LT09_combo_test.py000066400000000000000000000027521503426445100223630ustar00rootroot00000000000000"""Tests the combination of LT05 and LT09. LT05: no long lines LT09: single selects should be on SELECT line """ import sqlfluff def test__rules__std_LT05_LT09_long_line_lint(): """Verify a long line that causes a clash between LT05 and LT09 is not changed.""" sql = ( "SELECT\n1000000000000000000000000000000000000000000000000000000000000000000000" "000000000000000000000000000000\n" ) result = sqlfluff.lint(sql) assert "LT05" in [r["code"] for r in result] assert "LT09" in [r["code"] for r in result] def test__rules__std_LT05_LT09_long_line_fix(): """Verify clash between LT05 & LT09 does not add multiple newlines (see #1424).""" sql = ( "SELECT 10000000000000000000000000000000000000000000000000000000000000000000000" "00000000000000000000000000000\n" ) result = sqlfluff.fix(sql) assert result == ( "SELECT\n 100000000000000000000000000000000000000000000000000000000000000000" "0000000000000000000000000000000000\n" ) def test__rules__std_LT05_LT09_long_line_fix2(): """Verify clash between LT05 & LT09 does not add multiple newlines (see #1424).""" sql = ( "SELECT\n 100000000000000000000000000000000000000000000000000000000000000000" "0000000000000000000000000000000000\n" ) result = sqlfluff.fix(sql) assert result == ( "SELECT 10000000000000000000000000000000000000000000000000000000000000000000000" "00000000000000000000000000000\n" ) sqlfluff-3.4.2/test/rules/std_LT12_CV06_test.py000066400000000000000000000016211503426445100211620ustar00rootroot00000000000000"""Tests the python routines within LT12 and CV06.""" from sqlfluff.core import FluffConfig, Linter def test__rules__std_LT12_and_CV06_interaction() -> None: """Test interaction between LT12 and CV06 doesn't stop CV06 from being applied.""" # Test sql with no final newline and no final semicolon. sql = "SELECT foo FROM bar" # Ensure final semicolon requirement is active. cfg = FluffConfig(overrides={"dialect": "ansi"}) cfg.set_value( config_path=["rules", "convention.terminator", "require_final_semicolon"], val=True, ) linter = Linter(config=cfg) # Return linted/fixed file. linted_file = linter.lint_string(sql, fix=True) # Check expected lint errors are raised. assert set([v.rule.code for v in linted_file.violations]) == {"LT12", "CV06"} # Check file is fixed. assert linted_file.fix_string()[0] == "SELECT foo FROM bar;\n" sqlfluff-3.4.2/test/rules/std_RF01_LT09_test.py000066400000000000000000000013451503426445100211650ustar00rootroot00000000000000"""Tests observed conflict between RF01 & LT09. Root cause was BaseSegment.copy(). """ from sqlfluff.core import FluffConfig, Linter def test__rules__std_RF01_LT09_copy() -> None: """Tests observed conflict between RF01 & LT09. https://github.com/sqlfluff/sqlfluff/issues/5203 """ sql = """ SELECT DISTINCT `FIELD` FROM `TABLE`; """ cfg = FluffConfig.from_kwargs( dialect="mysql", rules=["RF01", "LT09"], ) result = Linter(config=cfg).lint_string(sql) for violation in result.violations: assert "Unexpected exception" not in violation.description assert len(result.violations) == 1 only_violation = result.violations[0] assert only_violation.rule_code() == "LT09" sqlfluff-3.4.2/test/rules/std_RF02_test.py000066400000000000000000000006351503426445100204170ustar00rootroot00000000000000"""Tests the python routines within RF02.""" import sqlfluff def test__rules__std_RF02_wildcard_single_count(): """Verify that RF02 is only raised once for wildcard (see issue #1973).""" sql = """ SELECT * FROM foo INNER JOIN bar; """ result = sqlfluff.lint(sql) assert "RF02" in [r["code"] for r in result] assert [r["code"] for r in result].count("RF02") == 1 sqlfluff-3.4.2/test/rules/std_ST03_test.py000066400000000000000000000033231503426445100204340ustar00rootroot00000000000000"""Tests the python routines within ST03.""" import sqlfluff def test__rules__std_ST03_multiple_unused_ctes(): """Verify that ST03 returns multiple lint issues, one per unused CTE.""" sql = """ WITH cte_1 AS ( SELECT 1 ), cte_2 AS ( SELECT 2 ), cte_3 AS ( SELECT 3 ), cte_4 AS ( SELECT 4 ) SELECT var_bar FROM cte_3 """ result = sqlfluff.lint(sql, rules=["ST03"]) assert result == [ { "code": "ST03", "description": 'Query defines CTE "cte_1" but does not use it.', "name": "structure.unused_cte", "warning": False, "fixes": [], "start_line_no": 3, "start_line_pos": 5, "start_file_pos": 14, "end_line_no": 3, "end_line_pos": 10, "end_file_pos": 19, }, { "code": "ST03", "description": 'Query defines CTE "cte_2" but does not use it.', "name": "structure.unused_cte", "warning": False, "fixes": [], "start_line_no": 6, "start_line_pos": 5, "start_file_pos": 53, "end_line_no": 6, "end_line_pos": 10, "end_file_pos": 58, }, { "code": "ST03", "description": 'Query defines CTE "cte_4" but does not use it.', "name": "structure.unused_cte", "warning": False, "fixes": [], "start_line_no": 12, "start_line_pos": 5, "start_file_pos": 131, "end_line_no": 12, "end_line_pos": 10, "end_file_pos": 136, }, ] sqlfluff-3.4.2/test/rules/std_ST05_LT08_test.py000066400000000000000000000027461503426445100212150ustar00rootroot00000000000000"""Tests observed conflict between ST05 & LT08.""" from sqlfluff.core import FluffConfig, Linter def test__rules__std_ST05_LT08_5265() -> None: """Tests observed conflict between ST05 & LT08. In this case, the moved `oops` and `another` table was created after the first usage. The `oops` from the `cte2` is no longer deleted. https://github.com/sqlfluff/sqlfluff/issues/5265 """ sql = """ WITH cte1 AS ( SELECT COUNT(*) AS qty FROM some_table AS st LEFT JOIN ( SELECT 'first' AS id ) AS oops ON st.id = oops.id ), cte2 AS ( SELECT COUNT(*) AS other_qty FROM other_table AS sot LEFT JOIN ( SELECT 'middle' AS id ) AS another ON sot.id = another.id LEFT JOIN ( SELECT 'last' AS id ) AS oops ON sot.id = oops.id ) SELECT CURRENT_DATE(); """ fixed_sql = """ WITH oops AS ( SELECT 'first' AS id ), cte1 AS ( SELECT COUNT(*) AS qty FROM some_table AS st LEFT JOIN oops ON st.id = oops.id ), another AS ( SELECT 'middle' AS id ), cte2 AS ( SELECT COUNT(*) AS other_qty FROM other_table AS sot LEFT JOIN another ON sot.id = another.id LEFT JOIN ( SELECT 'last' AS id ) AS oops ON sot.id = oops.id ) SELECT CURRENT_DATE(); """ cfg = FluffConfig.from_kwargs( dialect="ansi", rules=["ST05", "LT08"], ) result = Linter(config=cfg).lint_string(sql, fix=True) assert result.fix_string()[0] == fixed_sql sqlfluff-3.4.2/test/rules/std_ST05_LT09_test.py000066400000000000000000000036201503426445100212060ustar00rootroot00000000000000"""Tests observed conflict between ST05 & LT09.""" from sqlfluff.core import FluffConfig, Linter def test__rules__std_ST05_LT09_4137() -> None: """Tests observed conflict between ST05 & LT09. In this case, the moved `t2` table was created after the first usage. https://github.com/sqlfluff/sqlfluff/issues/4137 """ sql = """ with cte1 as ( select t1.x, t2.y from tbl1 t1 join (select x, y from tbl2) t2 on t1.x = t2.x ) , cte2 as ( select x, y from tbl2 t2 ) select x, y from cte1 union all select x, y from cte2 ; """ fixed_sql = """ with t2 as (select x, y from tbl2), cte1 as ( select t1.x, t2.y from tbl1 t1 join t2 on t1.x = t2.x ), cte2 as ( select x, y from tbl2 t2 ) select x, y from cte1 union all select x, y from cte2 ; """ cfg = FluffConfig.from_kwargs( dialect="ansi", rules=["ST05", "LT09"], ) result = Linter(config=cfg).lint_string(sql, fix=True) assert result.fix_string()[0] == fixed_sql def test__rules__std_ST05_LT09_5265() -> None: """Tests observed conflict between ST05 & LT09. In this case, the moved `t2` table was created after the first usage. https://github.com/sqlfluff/sqlfluff/issues/4137 """ sql = """ with cte1 as ( select t1.x, t2.y from tbl1 t1 join (select x, y from tbl2) t2 on t1.x = t2.x ) , cte2 as ( select x, y from tbl2 t2 ) select x, y from cte1 union all select x, y from cte2 ; """ fixed_sql = """ with t2 as (select x, y from tbl2), cte1 as ( select t1.x, t2.y from tbl1 t1 join t2 on t1.x = t2.x ), cte2 as ( select x, y from tbl2 t2 ) select x, y from cte1 union all select x, y from cte2 ; """ cfg = FluffConfig.from_kwargs( dialect="ansi", rules=["ST05", "LT09"], ) result = Linter(config=cfg).lint_string(sql, fix=True) assert result.fix_string()[0] == fixed_sql sqlfluff-3.4.2/test/rules/std_fix_auto_test.py000066400000000000000000000136101503426445100215610ustar00rootroot00000000000000"""Automated tests for fixing violations. Any files in the test/fixtures/linter/autofix directory will be picked up and automatically tested against the appropriate dialect. """ import json import logging import os import shutil import tempfile from typing import Optional import pytest import yaml from sqlfluff.core import FluffConfig, Linter from sqlfluff.core.config import clear_config_caches # Construct the tests from the filepath test_cases = [] base_auto_fix_path = ("test", "fixtures", "linter", "autofix") # Generate the filenames for each dialect from the parser test directory for dialect in os.listdir(os.path.join(*base_auto_fix_path)): # Ignore documentation if dialect.endswith(".md"): continue # assume that d is now the name of a dialect dirlist = os.listdir(os.path.join(*base_auto_fix_path, dialect)) for test_case in dirlist: test_cases.append( ( # The dialect dialect, # The directory name test_case, ) ) def make_dialect_path(dialect, fname): """Work out how to find paths given a dialect and a file name.""" return os.path.join("test", "fixtures", "dialects", dialect, fname) def auto_fix_test(dialect, folder, caplog): """A test for roundtrip testing, take a file buffer, lint, fix and lint. This is explicitly different from the linter version of this, in that it uses the command line rather than the direct api. """ # Log just the rules logger for this test. # NOTE: In debugging it may be instructive to enable some of # the other loggers listed here to debug particular issues. # Enabling all of them results in very long logs so use # wisely. # caplog.set_level(logging.DEBUG, logger="sqlfluff.templater") # caplog.set_level(logging.DEBUG, logger="sqlfluff.lexer") caplog.set_level(logging.DEBUG, logger="sqlfluff.linter") caplog.set_level(logging.DEBUG, logger="sqlfluff.rules") filename = "testing.sql" # Lets get the path of a file to use tempdir_path = tempfile.mkdtemp() filepath = os.path.join(tempdir_path, filename) cfgpath = os.path.join(tempdir_path, ".sqlfluff") src_filepath = os.path.join(*base_auto_fix_path, dialect, folder, "before.sql") cmp_filepath = os.path.join(*base_auto_fix_path, dialect, folder, "after.sql") vio_filepath = os.path.join(*base_auto_fix_path, dialect, folder, "violations.json") cfg_filepath = os.path.join(*base_auto_fix_path, dialect, folder, ".sqlfluff") test_conf_filepath = os.path.join( *base_auto_fix_path, dialect, folder, "test-config.yml" ) # Load the config file for the test: with open(test_conf_filepath) as cfg_file: cfg = yaml.safe_load(cfg_file) print("## Config: ", cfg) rules: Optional[str] = ",".join(cfg["test-config"].get("rules")).upper() if "ALL" in rules: rules = None raise_on_non_linting_violations = cfg["test-config"].get( "raise_on_non_linting_violations", True ) # Open the example file and write the content to it print_buff = "" with open(filepath, mode="w") as dest_file: with open(src_filepath) as source_file: for line in source_file: dest_file.write(line) print_buff += line # Copy the config file too try: with open(cfgpath, mode="w") as dest_file: with open(cfg_filepath) as source_file: print("## Config File Found.") for line in source_file: dest_file.write(line) except FileNotFoundError: # No config file? No big deal print("## No Config File Found.") pass print(f"## Input file:\n{print_buff}") # Do we need to do a violations check? try: with open(vio_filepath) as vio_file: violations = json.load(vio_file) except FileNotFoundError: # No violations file. Let's not worry violations = None # Run the fix command overrides = {"dialect": dialect} if rules: overrides["rules"] = rules # Clear config caches before loading. The way we move files around # makes the filepath based caching inaccurate, which leads to unstable # test cases unless we regularly clear the cache. clear_config_caches() cfg = FluffConfig.from_root(overrides=overrides) lnt = Linter(config=cfg) res = lnt.lint_path(filepath, fix=True) if not res.files: raise ValueError("LintedDir empty: Parsing likely failed.") print(f"## Templated file:\n{res.tree.raw}") # We call the check_tuples here, even to makes sure any non-linting # violations are raised, and the test fails. vs = set( res.check_tuples( raise_on_non_linting_violations=raise_on_non_linting_violations ) ) # If we have a violations structure, let's enforce it. if violations: # Format the violations file expected_vs = set() for rule_key in violations["violations"]["linting"]: for elem in violations["violations"]["linting"][rule_key]: expected_vs.add((rule_key, *elem)) assert expected_vs == vs # Actually do the fixes res = res.persist_changes() # Read the fixed file with open(filepath) as fixed_file: fixed_buff = fixed_file.read() # Clear up once read shutil.rmtree(tempdir_path) # Also clear the config cache again so it's not polluted for later tests. clear_config_caches() # Read the comparison file with open(cmp_filepath) as comp_file: comp_buff = comp_file.read() # Make sure we were successful assert res # Assert that we fixed as expected assert fixed_buff == comp_buff @pytest.mark.parametrize("dialect,folder", test_cases) def test__std_fix_auto(dialect, folder, caplog): """Automated Fixing Tests.""" auto_fix_test(dialect=dialect, folder=folder, caplog=caplog) sqlfluff-3.4.2/test/rules/std_roundtrip_test.py000066400000000000000000000110371503426445100217720ustar00rootroot00000000000000"""Round trip tests for rules with a fix method.""" import os import re import shutil import tempfile from io import StringIO import pytest from click.testing import CliRunner from sqlfluff.cli.commands import fix, lint def generic_roundtrip_test(source_file, rulestring): """Run a roundtrip test given a sql file and a rule. We take a file buffer, lint, fix and lint, finally checking that the file fails initially but not after fixing. """ if isinstance(source_file, str): # If it's a string, treat it as a path so lets load it. with open(source_file) as f: source_file = StringIO(f.read()) filename = "testing.sql" # Lets get the path of a file to use tempdir_path = tempfile.mkdtemp() filepath = os.path.join(tempdir_path, filename) # Open the example file and write the content to it with open(filepath, mode="w") as dest_file: for line in source_file: dest_file.write(line) runner = CliRunner() # Check that we first detect the issue result = runner.invoke(lint, ["--rules", rulestring, "--dialect=ansi", filepath]) assert result.exit_code == 1 # Fix the file (in force mode) result = runner.invoke( fix, ["--rules", rulestring, "--dialect=ansi", "-f", filepath] ) assert result.exit_code == 0 # Now lint the file and check for exceptions result = runner.invoke(lint, ["--rules", rulestring, "--dialect=ansi", filepath]) assert result.exit_code == 0 shutil.rmtree(tempdir_path) def jinja_roundtrip_test( source_path, rulestring, sqlfile="test.sql", cfgfile=".sqlfluff" ): """Run a roundtrip test path and rule. We take a file buffer, lint, fix and lint, finally checking that the file fails initially but not after fixing. Additionally we also check that we haven't messed up the templating tags in the process. """ tempdir_path = tempfile.mkdtemp() sql_filepath = os.path.join(tempdir_path, sqlfile) cfg_filepath = os.path.join(tempdir_path, cfgfile) # Copy the SQL file with open(sql_filepath, mode="w") as dest_file: with open(os.path.join(source_path, sqlfile)) as source_file: for line in source_file: dest_file.write(line) # Copy the Config file with open(cfg_filepath, mode="w") as dest_file: with open(os.path.join(source_path, cfgfile)) as source_file: for line in source_file: dest_file.write(line) with open(sql_filepath) as f: # Get a record of the pre-existing jinja tags tags = re.findall(r"{{[^}]*}}|{%[^}%]*%}", f.read(), flags=0) runner = CliRunner() # Check that we first detect the issue result = runner.invoke( lint, ["--rules", rulestring, "--dialect=ansi", sql_filepath] ) assert result.exit_code == 1 # Fix the file (in force mode) result = runner.invoke( fix, ["--rules", rulestring, "-f", "--dialect=ansi", sql_filepath] ) assert result.exit_code == 0 # Now lint the file and check for exceptions result = runner.invoke( lint, ["--rules", rulestring, "--dialect=ansi", sql_filepath] ) if result.exit_code != 0: # Output the file content for debugging print("File content:") with open(sql_filepath) as f: print(repr(f.read())) print("Command output:") print(result.output) assert result.exit_code == 0 with open(sql_filepath) as f: # Check that the tags are all still there! new_tags = re.findall(r"{{[^}]*}}|{%[^}%]*%}", f.read(), flags=0) # Clear up the temp dir shutil.rmtree(tempdir_path) # Assert that the tags are the same assert tags == new_tags @pytest.mark.parametrize( "rule,path", [ ("LT01", "test/fixtures/linter/indentation_errors.sql"), ("LT01", "test/fixtures/linter/whitespace_errors.sql"), ("LT01", "test/fixtures/linter/indentation_errors.sql"), ("CP01", "test/fixtures/linter/whitespace_errors.sql"), ("AL01", "test/fixtures/dialects/ansi/select_simple_i.sql"), ("AL02", "test/fixtures/dialects/ansi/select_simple_i.sql"), ], ) def test__cli__command__fix(rule, path): """Test the round trip of detecting, fixing and then not detecting given rule.""" generic_roundtrip_test(path, rule) @pytest.mark.parametrize("rule", ["CP01", "LT01"]) def test__cli__command__fix_templated(rule): """Roundtrip test, making sure that we don't drop tags while templating.""" jinja_roundtrip_test("test/fixtures/templater/jinja_d_roundtrip", rule) sqlfluff-3.4.2/test/rules/std_test.py000066400000000000000000000071261503426445100176700ustar00rootroot00000000000000"""Tests for the standard set of rules.""" import pytest from sqlfluff.core.config import FluffConfig from sqlfluff.core.rules import get_ruleset from sqlfluff.utils.testing.rules import assert_rule_raises_violations_in_file @pytest.mark.parametrize( "rule,path,violations", [ ("LT01", "indentation_errors.sql", [(4, 24)]), ( "LT02", "indentation_errors.sql", [(2, 1), (3, 1), (4, 1), (5, 1)], ), # Check we get comma whitespace errors ("LT01", "whitespace_errors.sql", [(2, 9), (3, 12)]), # Check we get operator whitespace errors and it works with brackets ( "LT01", "operator_errors.sql", [(3, 8), (4, 10), (7, 6), (7, 7), (7, 9), (7, 10), (7, 12), (7, 13)], ), ("LT03", "operator_errors.sql", [(5, 9)]), ( "LT01", "operator_errors_negative.sql", [(2, 6), (2, 9), (5, 6), (5, 7)], ), # Hard indentation errors ( "LT02", "indentation_error_hard.sql", [ (2, 1), (6, 1), (9, 1), (11, 15), (12, 1), (12, 33), (13, 15), (14, 1), (14, 36), (18, 1), (19, 1), (20, 1), ], ), # Check bracket handling with closing brackets and contained indents works. ("LT02", "indentation_error_contained.sql", []), # Check we handle block comments as expect. Github #236 ( "LT05", "block_comment_errors.sql", # Errors should flag on the first element of the line. [(1, 1), (2, 5), (4, 5)], ), ("LT05", "block_comment_errors_2.sql", [(1, 1), (2, 1)]), # Column references ("RF02", "column_references.sql", [(1, 8)]), ("RF02", "column_references_bare_function.sql", []), ("RF01", "column_references.sql", [(1, 11)]), ("AL05", "column_references.sql", [(2, 11)]), # Distinct and Group by ("AM01", "select_distinct_group_by.sql", [(1, 8)]), # Make sure that ignoring works as expected ("LT01", "operator_errors_ignore.sql", [(10, 8), (10, 9)]), ( "JJ01", "heavy_templating.sql", [(12, 13), (12, 25)], ), ], ) def test__rules__std_file(rule, path, violations): """Test the linter finds the given errors in (and only in) the right places.""" assert_rule_raises_violations_in_file( rule=rule, fpath="test/fixtures/linter/" + path, violations=violations, fluff_config=FluffConfig(overrides=dict(rules=rule, dialect="ansi")), ) @pytest.mark.parametrize( "rule_config_dict", [ {"allow_scalar": "blah"}, {"single_table_references": "blah"}, {"unquoted_identifiers_policy": "blah"}, {"capitalisation.keywords": {"capitalisation_policy": "blah"}}, {"aliasing.table": {"aliasing": "blah"}}, {"aliasing.column": {"aliasing": "blah"}}, {"capitalisation.identifiers": {"extended_capitalisation_policy": "blah"}}, {"capitalisation.functions": {"capitalisation_policy": "blah"}}, ], ) def test_improper_configs_are_rejected(rule_config_dict): """Ensure that unsupported configs raise a ValueError.""" config = FluffConfig( configs={"rules": rule_config_dict}, overrides={"dialect": "ansi"} ) with pytest.raises(ValueError): get_ruleset().get_rulepack(config) sqlfluff-3.4.2/test/rules/yaml_test_cases_test.py000066400000000000000000000034031503426445100222470ustar00rootroot00000000000000"""Runs the rule test cases.""" import logging import os import pytest from sqlfluff.utils.testing.rules import ( RuleTestCase, load_test_cases, ) def pytest_generate_tests(metafunc): """Generate yaml test cases from file. This is a predefined pytest hook which allows us to parametrize all other test cases defined in this module. https://docs.pytest.org/en/stable/how-to/parametrize.html#pytest-generate-tests """ ids, test_cases = load_test_cases( test_cases_path="test/fixtures/rules/std_rule_cases/*.yml" ) # Only parametrize methods which include `test_case` in their # list of required fixtures. if "test_case" in metafunc.fixturenames: metafunc.parametrize("test_case", test_cases, ids=ids) @pytest.mark.integration @pytest.mark.rules_suite def test__rule_test_case(test_case: RuleTestCase, caplog): """Execute each of the rule test cases. The cases themselves are parametrized using the above `pytest_generate_tests` function, which both loads them from the yaml files do generate `RuleTestCase` objects, but also sets appropriate IDs for each test to improve the user feedback. """ with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules"): with caplog.at_level(logging.DEBUG, logger="sqlfluff.linter"): test_case.evaluate() def test__rule_test_global_config(): """Test global config in rule test cases.""" ids, test_cases = load_test_cases( os.path.join("test/fixtures/rules/R001_global_config_test.yml") ) assert len(test_cases) == 2 # tc1: overwrites global config assert test_cases[0].configs["core"]["dialect"] == "ansi" # tc2: global config is used assert test_cases[1].configs["core"]["dialect"] == "exasol" sqlfluff-3.4.2/test/testing_test.py000066400000000000000000000047571503426445100174300ustar00rootroot00000000000000"""Test the sqlfluff.utils.testing module.""" import pytest from _pytest.outcomes import Failed, Skipped from sqlfluff.utils.testing.rules import ( RuleTestCase, assert_rule_fail_in_sql, assert_rule_pass_in_sql, ) def test_assert_rule_fail_in_sql_handle_parse_error(): """Util assert_rule_fail_in_sql should handle parse errors.""" with pytest.raises(Failed) as failed_test: assert_rule_fail_in_sql(code="L000", sql="select from") failed_test.match("Found the following parse errors in test case:") def test_assert_rule_fail_in_sql_should_fail_queries_that_unexpectedly_pass(): """Util assert_rule_fail_in_sql should fail if no failure.""" with pytest.raises(Failed) as failed_test: assert_rule_fail_in_sql(code="LT01", sql="select 1") failed_test.match("No LT01 failures found in query which should fail") def test_assert_rule_pass_in_sql_should_handle_parse_error(): """Util assert_rule_pass_in_sql should handle parse errors.""" with pytest.raises(Failed) as failed_test: assert_rule_pass_in_sql(code="LT01", sql="select from") failed_test.match("Found unparsable section:") def test_assert_rule_pass_in_sql_should_fail_when_there_are_violations(): """Util assert_rule_pass_in_sql should fail when there are violations.""" with pytest.raises(Failed) as failed_test: assert_rule_pass_in_sql(code="LT01", sql="select a , b from t") failed_test.match("Found LT01 failures in query which should pass") def test_rules_test_case_skipped_when_test_case_skipped(): """Test functionality of the `RuleTestCase` skip attribute.""" rule_test_case = RuleTestCase(rule="CP01", skip="Skip this one for now") with pytest.raises(Skipped) as skipped_test: rule_test_case.evaluate() skipped_test.match("Skip this one for now") def test_rules_test_case_has_variable_introspection(test_verbosity_level): """Make sure the helper gives variable introspection information on failure.""" rule_test_case = RuleTestCase( rule="LT02", fail_str=""" select a, b from table """, # extra comma on purpose fix_str=""" select a, b, from table """, ) with pytest.raises(AssertionError) as skipped_test: rule_test_case.evaluate() if test_verbosity_level >= 2: # Enough to check that a query diff is displayed skipped_test.match("select") sqlfluff-3.4.2/test/utils/000077500000000000000000000000001503426445100154655ustar00rootroot00000000000000sqlfluff-3.4.2/test/utils/analysis/000077500000000000000000000000001503426445100173105ustar00rootroot00000000000000sqlfluff-3.4.2/test/utils/analysis/query_test.py000066400000000000000000000233571503426445100221000ustar00rootroot00000000000000"""Test the select_crawler module.""" import pytest from sqlfluff.core.linter.linter import Linter from sqlfluff.utils.analysis.query import Query def _parse_and_crawl_outer(sql): """Helper function for select crawlers. Given a SQL statement this crawls the SQL and instantiates a Query on the outer relevant segment. """ linter = Linter(dialect="ansi") parsed = linter.parse_string(sql) # Make sure it's fully parsable. assert "unparsable" not in parsed.tree.descendant_type_set # Create a crawler from the root segment. query = Query.from_root(parsed.tree, linter.dialect) # Analyse the segment. return query, linter @pytest.mark.parametrize( "sql, expected_json", [ ( # Test trivial query. "select 1", {"selectables": ["select 1"]}, ), ( # Test set expression. "select 1 union select 2", {"selectables": ["select 1", "select 2"]}, ), ( # Test multiple CTEs. "with cte1 as (select 1 as x), cte2 as (select 2 as y) " "select * from cte1 join cte2 using (x)", { "ctes": { "CTE1": {"selectables": ["select 1 as x"]}, "CTE2": {"selectables": ["select 2 as y"]}, }, "query_type": "WithCompound", "selectables": ["select * from cte1 join cte2 using (x)"], }, ), ( # Nested CTEs (from AM04 test suite) """ with a as ( with b as (select 1 from c) select * from b ) select * from a """, { "ctes": { "A": { "ctes": {"B": {"selectables": ["select 1 from c"]}}, "query_type": "WithCompound", "selectables": ["select * from b"], } }, "query_type": "WithCompound", "selectables": ["select * from a"], }, ), ( # Nested CTEs (from AM04 test suite) """ with b as (select 1 from c) select * from ( with a as (select * from b) select * from a ) """, { "ctes": {"B": {"selectables": ["select 1 from c"]}}, "query_type": "WithCompound", "selectables": [ "select * from (\n" " with a as (select * from b)\n" " select * from a\n" " )" ], "subqueries": [ # NOTE: Subquery from the FROM clause. { "ctes": {"A": {"selectables": ["select * from b"]}}, "query_type": "WithCompound", "selectables": ["select * from a"], }, ], }, ), ( # Test that subquery in "from" not included. "select a.x from (select z from b)", { "selectables": ["select a.x from (select z from b)"], "subqueries": [{"selectables": ["select z from b"]}], }, ), ( # Test that subquery in "from" / "join" not included. "select a.x from a join (select z from b) as b on (a.x = b.x)", { "selectables": [ "select a.x from a join (select z from b) as b on (a.x = b.x)" ], "subqueries": [{"selectables": ["select z from b"]}], }, ), ( # In CTE main query, test that subquery in "from" not included. "with prep as (select 1) select a.x from (select z from b)", { "ctes": {"PREP": {"selectables": ["select 1"]}}, "query_type": "WithCompound", "selectables": ["select a.x from (select z from b)"], "subqueries": [{"selectables": ["select z from b"]}], }, ), ( # In CTE main query, test that subquery in "from" / "join" not included. "with prep as (select 1) " "select a.x from a join (select z from b) as b on (a.x = b.x)", { "ctes": {"PREP": {"selectables": ["select 1"]}}, "query_type": "WithCompound", "selectables": [ "select a.x from a join (select z from b) as b on (a.x = " "b.x)" ], "subqueries": [{"selectables": ["select z from b"]}], }, ), ( """with prep_1 as ( with d as ( select x, z from b ) select * from d ) select a.x, a.y, b.z from a join prep_1 using (x) """, { "ctes": { "PREP_1": { "ctes": { "D": {"selectables": ["select x, z from b"]}, }, "query_type": "WithCompound", "selectables": ["select * from d"], } }, "query_type": "WithCompound", "selectables": [ "select\n a.x, a.y, b.z\nfrom a\njoin prep_1 using (x)" ], }, ), # Test with a UNION as the main selectable of a WITH ( "with a as (select 1), b as (select 2) " "select * from a union select * from b\n", { "ctes": { "A": {"selectables": ["select 1"]}, "B": {"selectables": ["select 2"]}, }, "query_type": "WithCompound", "selectables": [ "select * from a", "select * from b", ], }, ), # Test with a VALUES clause in a WITH ( "WITH txt AS ( VALUES (1, 'foo') ) SELECT * FROM txt\n", { "ctes": { "TXT": {"selectables": ["VALUES (1, 'foo')"]}, }, "query_type": "WithCompound", "selectables": [ "SELECT * FROM txt", ], }, ), # Test with Subqueries ( "SELECT (\n" " SELECT other_table.other_table_field_1 FROM other_table\n" " WHERE other_table.id = field_2\n" ") FROM\n" "(SELECT * FROM some_table) AS my_alias\n", { "selectables": [ "SELECT (\n" " SELECT other_table.other_table_field_1 FROM other_table\n" " WHERE other_table.id = field_2\n" ") FROM\n" "(SELECT * FROM some_table) AS my_alias", ], "subqueries": [ { "selectables": [ "SELECT other_table.other_table_field_1 FROM other_table\n" " WHERE other_table.id = field_2", ] }, {"selectables": ["SELECT * FROM some_table"]}, ], }, ), # Test a MERGE ( """MERGE INTO t USING (SELECT * FROM u) AS u ON (a = b) WHEN MATCHED THEN UPDATE SET a = b WHEN NOT MATCHED THEN INSERT (b) VALUES (c);""", { "selectables": [ """MERGE INTO t USING (SELECT * FROM u) AS u ON (a = b) WHEN MATCHED THEN UPDATE SET a = b WHEN NOT MATCHED THEN INSERT (b) VALUES (c)""" # NOTE: No trailing semicolon ], "subqueries": [{"selectables": ["SELECT * FROM u"]}], }, ), # Test a DELETE ( """DELETE FROM agent1 WHERE EXISTS( SELECT customer.cust_id FROM customer WHERE agent1.agent_code <> customer.agent_code);""", { "selectables": [ """SELECT customer.cust_id FROM customer WHERE agent1.agent_code <> customer.agent_code""" ] }, ), # Test an UPDATE ( """UPDATE my_table SET row_sum = ( SELECT COUNT(*) AS row_sum FROM another_table WHERE another_table.id = my_tableeee.id )""", { "selectables": [ """SELECT COUNT(*) AS row_sum FROM another_table WHERE another_table.id = my_tableeee.id""" ] }, ), ], ) def test_select_crawler_constructor(sql, expected_json): """Test Query when created using constructor.""" query, _ = _parse_and_crawl_outer(sql) assert all(cte.cte_definition_segment is not None for cte in query.ctes.values()) query_dict = query.as_dict() assert expected_json == query_dict def test_select_crawler_nested(): """Test invoking with an outer from_expression_segment.""" sql = """ select a.x, a.y, b.z from a join ( with d as ( select x, z from b ) select * from d ) using (x) """ query, linter = _parse_and_crawl_outer(sql) inner_from = ( query.selectables[0].select_info.table_aliases[1].from_expression_element ) inner_select = next(inner_from.recursive_crawl("with_compound_statement")) inner_query = Query.from_segment(inner_select, linter.dialect) assert inner_query.as_dict() == { "selectables": [ "select * from d", ], "ctes": {"D": {"selectables": ["select x, z from b"]}}, "query_type": "WithCompound", } sqlfluff-3.4.2/test/utils/reflow/000077500000000000000000000000001503426445100167635ustar00rootroot00000000000000sqlfluff-3.4.2/test/utils/reflow/conftest.py000066400000000000000000000003731503426445100211650ustar00rootroot00000000000000"""Common test fixtures for reflow modules.""" import pytest from sqlfluff.core import FluffConfig @pytest.fixture() def default_config(): """Return the default config for reflow tests.""" return FluffConfig(overrides={"dialect": "ansi"}) sqlfluff-3.4.2/test/utils/reflow/depthmap_test.py000066400000000000000000000076571503426445100222150ustar00rootroot00000000000000"""Tests for the depthmap object.""" from sqlfluff.core import Linter from sqlfluff.utils.reflow.depthmap import DepthMap, StackPosition def parse_ansi_string(sql, config): """Parse an ansi sql string for testing.""" linter = Linter(config=config) return linter.parse_string(sql).tree def test_reflow_depthmap_from_parent(default_config): """Test map construction from a root segment.""" sql = "SELECT 1" root = parse_ansi_string(sql, default_config) dm = DepthMap.from_parent(root) # We use UUIDS in the depth map so we can't assert their value. # What we can do is use them. # Check that we get the right depths. assert [dm.depth_info[seg.uuid].stack_depth for seg in root.raw_segments] == [ 4, 4, 4, 5, 4, 1, ] # Check they all share the same first three hash and # class type elements (except the end of file marker at the end). # These should be the file, statement and select statement. expected = ({"file", "base"}, {"statement", "base"}, {"select_statement", "base"}) assert all( dm.depth_info[seg.uuid].stack_class_types[:3] == expected for seg in root.raw_segments[:-1] ) first_hashes = dm.depth_info[root.raw_segments[0].uuid].stack_hashes[:3] assert all( dm.depth_info[seg.uuid].stack_hashes[:3] == first_hashes for seg in root.raw_segments[:-1] ) # While we're here, test the DepthInfo.common_with method select_keyword_di = dm.depth_info[root.raw_segments[0].uuid] numeric_one_di = dm.depth_info[root.raw_segments[3].uuid] assert len(select_keyword_di.common_with(numeric_one_di)) == 4 def test_reflow_depthmap_from_raws_and_root(default_config): """Test that the indirect route is equivalent to the direct route.""" sql = "SELECT 1" root = parse_ansi_string(sql, default_config) # Direct route dm_direct = DepthMap.from_parent(root) # Indirect route. dm_indirect = DepthMap.from_raws_and_root(root.raw_segments, root) # The depth info dict depends on the sequence so we only need # to check those are equal. assert dm_direct.depth_info == dm_indirect.depth_info def test_reflow_depthmap_order_by(default_config): """Test depth mapping of an order by clause.""" sql = "SELECT * FROM foo ORDER BY bar DESC\n" root = parse_ansi_string(sql, default_config) # Get the `ORDER` and `DESC` segments. order_seg = None desc_seg = None for raw in root.raw_segments: if raw.raw_upper == "ORDER": order_seg = raw elif raw.raw_upper == "DESC": desc_seg = raw # Make sure we find them assert order_seg assert desc_seg # Generate a depth map depth_map = DepthMap.from_parent(root) # Check their depth info order_seg_di = depth_map.get_depth_info(order_seg) desc_seg_di = depth_map.get_depth_info(desc_seg) # Make sure they both contain an order by clause. assert frozenset({"base", "orderby_clause"}) in order_seg_di.stack_class_types assert frozenset({"base", "orderby_clause"}) in desc_seg_di.stack_class_types # Get the ID of one and make sure it's in the other order_by_hash = order_seg_di.stack_hashes[ order_seg_di.stack_class_types.index(frozenset({"base", "orderby_clause"})) ] assert order_by_hash in order_seg_di.stack_hashes assert order_by_hash in desc_seg_di.stack_hashes # Get the position information order_stack_pos = order_seg_di.stack_positions[order_by_hash] desc_stack_pos = desc_seg_di.stack_positions[order_by_hash] # Make sure the position information is correct print(order_stack_pos) print(desc_stack_pos) assert order_stack_pos == StackPosition(idx=0, len=9, type="start") # NOTE: Even though idx 7 is not the end, the _type_ of this location # is still an "end" because the following elements are non-code. assert desc_stack_pos == StackPosition(idx=7, len=9, type="end") sqlfluff-3.4.2/test/utils/reflow/rebreak_test.py000066400000000000000000000060651503426445100220160ustar00rootroot00000000000000"""Tests for rebreak methods. Specifically: - ReflowSequence.rebreak() """ import logging import pytest from sqlfluff.core import Linter from sqlfluff.utils.reflow.sequence import ReflowSequence def parse_ansi_string(sql, config): """Parse an ansi sql string for testing.""" linter = Linter(config=config) return linter.parse_string(sql).tree @pytest.mark.parametrize( "raw_sql_in,raw_sql_out", [ # Trivial Case ("select 1", "select 1"), # These rely on the default config being for leading operators ("select 1\n+2", "select 1\n+2"), ("select 1+\n2", "select 1\n+ 2"), # NOTE: Implicit respace. ("select\n 1 +\n 2", "select\n 1\n + 2"), ("select\n 1 +\n -- comment\n 2", "select\n 1\n -- comment\n + 2"), # These rely on the default config being for trailing commas ("select a,b", "select a,b"), ("select a\n,b", "select a,\nb"), ("select\n a\n , b", "select\n a,\n b"), ("select\n a\n , b", "select\n a,\n b"), ("select\n a\n , b", "select\n a,\n b"), ("select\n a\n -- comment\n , b", "select\n a,\n -- comment\n b"), ], ) def test_reflow__sequence_rebreak_root(raw_sql_in, raw_sql_out, default_config, caplog): """Test the ReflowSequence.rebreak() method directly. Focused around a whole segment. """ root = parse_ansi_string(raw_sql_in, default_config) print(root.stringify()) seq = ReflowSequence.from_root(root, config=default_config) for idx, elem in enumerate(seq.elements): print(idx, elem) with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"): new_seq = seq.rebreak() print(new_seq.get_fixes()) assert new_seq.get_raw() == raw_sql_out @pytest.mark.parametrize( "raw_sql_in,target_idx,seq_sql_in,seq_sql_out", [ ("select 1+\n(2+3)", 4, "1+\n(", "1\n+ ("), ("select a,\n(b+c)", 4, "a,\n(", "a,\n("), ("select a\n , (b+c)", 6, "a\n , (", "a,\n ("), # Here we don't have enough context to rebreak it so # it should be left unaltered. ("select a,\n(b+c)", 6, ",\n(b", ",\n(b"), # This intentionally targets an incomplete span. ("select a<=b", 4, "a<=", "a<="), ], ) def test_reflow__sequence_rebreak_target( raw_sql_in, target_idx, seq_sql_in, seq_sql_out, default_config, caplog ): """Test the ReflowSequence.rebreak() method directly. Focused around a target segment. This intentionally stretches some of the span logic. """ root = parse_ansi_string(raw_sql_in, default_config) print(root.stringify()) target = root.raw_segments[target_idx] print("Target: ", target) seq = ReflowSequence.from_around_target(target, root, config=default_config) for idx, elem in enumerate(seq.elements): print(idx, elem) assert seq.get_raw() == seq_sql_in with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"): new_seq = seq.rebreak() print(new_seq.get_fixes()) assert new_seq.get_raw() == seq_sql_out sqlfluff-3.4.2/test/utils/reflow/reindent_test.py000066400000000000000000000726231503426445100222160ustar00rootroot00000000000000"""Tests for reindenting methods. Specifically: - ReflowPoint.indent_to() - ReflowPoint.get_indent() - deduce_line_indent() """ import logging import sys from typing import Callable import pytest from sqlfluff.core import FluffConfig, Linter from sqlfluff.core.linter.fix import apply_fixes, compute_anchor_edit_info from sqlfluff.core.plugin import hookimpl from sqlfluff.core.plugin.host import get_plugin_manager, purge_plugin_manager from sqlfluff.core.templaters import RawTemplater from sqlfluff.core.templaters.base import RawFileSlice, TemplatedFileSlice from sqlfluff.core.templaters.jinja import JinjaTemplater from sqlfluff.utils.reflow.helpers import deduce_line_indent, fixes_from_results from sqlfluff.utils.reflow.reindent import ( _crawl_indent_points, _IndentLine, _IndentPoint, lint_indent_points, ) from sqlfluff.utils.reflow.sequence import ReflowSequence def parse_ansi_string(sql, config): """Parse an ansi sql string for testing.""" linter = Linter(config=config) return linter.parse_string(sql).tree class SpecialMarkerInserter(JinjaTemplater): """Inserts special marker slices in a sliced file. Some templater plugins might insert custom marker slices that are of zero source string length, including an empty source string. This mock templater simulates this behavior by adding a marker slice like this after every block_start slice. """ name = "special_marker_inserter" def slice_file( self, raw_str: str, render_func: Callable[[str], str], config=None ) -> tuple[list[RawFileSlice], list[TemplatedFileSlice], str]: """Patch a sliced file returned by the superclass.""" raw_sliced, sliced_file, templated_str = super().slice_file( raw_str, render_func, config ) patched_sliced_file = [] for templated_slice in sliced_file: patched_sliced_file.append(templated_slice) # Add an EMPTY special_marker slice after every block_start. if templated_slice.slice_type == "block_start": # Note that both the source_slice AND the templated_slice are empty. source_pos = templated_slice.source_slice.stop templated_pos = templated_slice.templated_slice.stop patched_sliced_file.append( TemplatedFileSlice( "special_marker", slice(source_pos, source_pos), slice(templated_pos, templated_pos), ) ) return raw_sliced, patched_sliced_file, templated_str @hookimpl def get_templaters() -> list[type[RawTemplater]]: """Return templaters provided by this test module.""" return [SpecialMarkerInserter] @pytest.mark.parametrize( "raw_sql_in,elem_idx,indent_to,point_sql_out", [ # Trivial Case ("select\n 1", 1, " ", "\n "), # Change existing indents ("select\n 1", 1, " ", "\n "), ("select\n 1", 1, " ", "\n "), ("select\n1", 1, " ", "\n "), ("select\n 1", 1, "", "\n"), # Create new indents ("select 1", 1, " ", "\n "), ("select 1", 1, " ", "\n "), ("select 1", 1, "", "\n"), ("select 1", 1, " ", "\n "), ], ) def test_reflow__point_indent_to( raw_sql_in, elem_idx, indent_to, point_sql_out, default_config, caplog ): """Test the ReflowPoint.indent_to() method directly.""" root = parse_ansi_string(raw_sql_in, default_config) print(root.stringify()) seq = ReflowSequence.from_root(root, config=default_config) elem = seq.elements[elem_idx] print("Element: ", elem) with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"): new_fixes, new_point = elem.indent_to( indent_to, before=seq.elements[elem_idx - 1].segments[-1], after=seq.elements[elem_idx + 1].segments[0], ) print(new_fixes) assert new_point.raw == point_sql_out @pytest.mark.parametrize( "raw_sql_in,elem_idx,indent_out", [ # Null case ("select 1", 1, None), # Trivial Case ("select\n 1", 1, " "), # Harder Case (i.e. take the last indent) ("select\n \n \n 1", 1, " "), ], ) def test_reflow__point_get_indent( raw_sql_in, elem_idx, indent_out, default_config, caplog ): """Test the ReflowPoint.get_indent() method directly.""" root = parse_ansi_string(raw_sql_in, default_config) print(root.stringify()) seq = ReflowSequence.from_root(root, config=default_config) elem = seq.elements[elem_idx] print("Element: ", elem) with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"): result = elem.get_indent() assert result == indent_out @pytest.mark.parametrize( "raw_sql_in,target_raw,indent_out", [ # Trivial case ("select 1", "select", ""), ("select 1", "1", ""), # Easy Case ("select\n 1", "1", " "), # Harder Cases (i.e. take the last indent) ("select\n \n \n 1", "1", " "), ("select\n \n \n 1+2+3+4", "4", " "), ("select\n 1 + 2", "2", " "), ], ) def test_reflow__deduce_line_indent( raw_sql_in, target_raw, indent_out, default_config, caplog ): """Test the deduce_line_indent() method directly.""" root = parse_ansi_string(raw_sql_in, default_config) print(root.stringify()) for target_seg in root.raw_segments: if target_seg.raw == target_raw: break else: raise ValueError("Target Raw Not Found") print("Target: ", target_seg) with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"): result = deduce_line_indent(target_seg, root) assert result == indent_out @pytest.mark.parametrize( "raw_sql_in,templater,points_out", [ # Trivial ( "select 1", "raw", [ # No point at the start. # Point after select (not newline) _IndentPoint( idx=1, indent_impulse=1, indent_trough=0, initial_indent_balance=0, last_line_break_idx=None, is_line_break=False, untaken_indents=(), ), # Point after 1 (not newline either) _IndentPoint( idx=3, indent_impulse=-1, indent_trough=-1, initial_indent_balance=1, last_line_break_idx=None, is_line_break=False, untaken_indents=(1,), ), ], ), ( "\nselect 1\n", "raw", [ # Start point _IndentPoint( idx=0, indent_impulse=0, indent_trough=0, initial_indent_balance=0, last_line_break_idx=None, is_line_break=True, untaken_indents=(), ), # Point after select (not newline) _IndentPoint( idx=2, indent_impulse=1, indent_trough=0, initial_indent_balance=0, last_line_break_idx=0, is_line_break=False, untaken_indents=(), ), # Point after 1 (is newline) _IndentPoint( idx=4, indent_impulse=-1, indent_trough=-1, initial_indent_balance=1, last_line_break_idx=0, is_line_break=True, untaken_indents=(1,), ), ], ), ( "select\n1", "raw", [ # No point at the start. # Point after select (not newline) _IndentPoint( idx=1, indent_impulse=1, indent_trough=0, initial_indent_balance=0, last_line_break_idx=None, is_line_break=True, untaken_indents=(), ), # Point after 1 (is not newline) _IndentPoint( idx=3, indent_impulse=-1, indent_trough=-1, initial_indent_balance=1, last_line_break_idx=1, is_line_break=False, untaken_indents=(), ), ], ), # More stretching cases. ( "SELECT\n r.a,\n s.b\nFROM r\nJOIN s\n " "ON\n r.a = s.a\n AND true", "raw", [ # No point at the start. # After SELECT _IndentPoint( idx=1, indent_impulse=1, indent_trough=0, initial_indent_balance=0, last_line_break_idx=None, is_line_break=True, untaken_indents=(), ), _IndentPoint( idx=9, indent_impulse=0, indent_trough=0, initial_indent_balance=1, last_line_break_idx=1, is_line_break=True, untaken_indents=(), ), # Before FROM _IndentPoint( idx=15, indent_impulse=-1, indent_trough=-1, initial_indent_balance=1, last_line_break_idx=9, is_line_break=True, untaken_indents=(), ), # Untaken indent before "r" _IndentPoint( idx=17, indent_impulse=1, indent_trough=0, initial_indent_balance=0, last_line_break_idx=15, is_line_break=False, untaken_indents=(), ), # Before JOIN (-1 balance to take us back to # baseline (in line with FROM)) # NOTE: It keeps the untaken indent from the # previous point, but shouldn't use it. _IndentPoint( idx=19, indent_impulse=-1, indent_trough=-1, initial_indent_balance=1, last_line_break_idx=15, is_line_break=True, untaken_indents=(1,), ), # Untaken indent before "s" _IndentPoint( idx=21, indent_impulse=1, indent_trough=0, initial_indent_balance=0, last_line_break_idx=19, is_line_break=False, untaken_indents=(), ), # NOTE: this is an interesting one. It's a Dedent-Indent pair. # There's a zero balance, and a trough of -1. We carry in the previous # untaken indent. But should pass if forward after this. _IndentPoint( idx=23, indent_impulse=0, indent_trough=-1, initial_indent_balance=1, last_line_break_idx=19, is_line_break=True, untaken_indents=(1,), ), # After ON. Default is indented_on_contents = True, so there is # an indent here. We *SHOULDN'T* have an untaken indent here, # because while there was one at the last point, the trough # of the last point should have cleared it. _IndentPoint( idx=25, indent_impulse=1, indent_trough=0, initial_indent_balance=1, last_line_break_idx=23, is_line_break=True, untaken_indents=(), ), # Before AND _IndentPoint( idx=39, indent_impulse=0, indent_trough=0, initial_indent_balance=2, last_line_break_idx=25, is_line_break=True, untaken_indents=(), ), # after "true" _IndentPoint( idx=43, indent_impulse=-2, indent_trough=-2, initial_indent_balance=2, last_line_break_idx=39, is_line_break=False, untaken_indents=(), ), ], ), ( "SELECT *\nFROM t1\nJOIN t2 ON true\nAND true", "raw", [ # No point at the start. # NOTE: Abbreviated notation given much is the same as above. # After SELECT _IndentPoint(1, 1, 0, 0, None, False, ()), _IndentPoint(3, -1, -1, 1, None, True, (1,)), _IndentPoint(5, 1, 0, 0, 3, False, ()), _IndentPoint(7, -1, -1, 1, 3, True, (1,)), # JOIN _IndentPoint(9, 1, 0, 0, 7, False, ()), # TRICKY POINT (we're between "t2" and "ON"). # The indent between Join and t2 wasn't taken, but we're # also climbing down from that here. It should be in the # untaken indents _here_ but not passed forward. There is # however another indent opportunity here which ALSO isn't # taken, so that one *should* be passed forward. _IndentPoint(11, 0, -1, 1, 7, False, (1,)), # TRICKY POINT (we're between "ON" and "true"). # Default is indented_on_contents = True. # This means that there is an additional indent here. # It's not taken though. The incoming balance of 1 # isn't taken yet either (hence a 1 in the untaken indent). _IndentPoint(13, 1, 0, 1, 7, False, (1,)), # Between "true" and "AND". # Balance is 2, but both untaken. _IndentPoint(15, 0, 0, 2, 7, True, (1, 2)), # End point _IndentPoint(19, -2, -2, 2, 15, False, (1, 2)), ], ), # Trailing comment case: delays indent until after the comment ( "SELECT -- comment\n 1;", "raw", [ # No point at the start. # After SELECT _IndentPoint(1, 0, 0, 0, None, False, ()), # After comment _IndentPoint(3, 1, 0, 0, None, True, ()), # After 1 _IndentPoint(5, -1, -1, 1, 3, False, ()), # After ; _IndentPoint(7, 0, 0, 0, 3, False, ()), ], ), # Two trailing comments ( "SELECT /* first comment */ /* second comment */\n 1;", "raw", [ # No point at the start. # After SELECT _IndentPoint(1, 0, 0, 0, None, False, ()), # After first comment _IndentPoint(3, 0, 0, 0, None, False, ()), # After second comment _IndentPoint(5, 1, 0, 0, None, True, ()), # After 1 _IndentPoint(7, -1, -1, 1, 5, False, ()), # After ; _IndentPoint(9, 0, 0, 0, 5, False, ()), ], ), # Templated case ( "SELECT\n" " {{ 'a' }}\n" " {% for c in ['d', 'e'] %}\n" " ,{{ c }}_val\n" " {% endfor %}\n", "jinja", [ # No initial indent (this is the first newline). _IndentPoint(1, 1, 0, 0, None, True, ()), # point after a _IndentPoint(3, 0, 0, 1, 1, True, ()), # point after for _IndentPoint(5, 1, 0, 1, 3, True, ()), # point after d_val _IndentPoint(9, -1, -1, 2, 5, True, ()), # point after loop _IndentPoint(11, 1, 0, 1, 9, True, ()), # point after e_val _IndentPoint(15, -2, -2, 2, 11, True, ()), # point after endfor _IndentPoint(17, 0, 0, 0, 15, True, ()), ], ), # Templated case (with consuming whitespace) ( "{% for item in [1, 2] -%}\n" "SELECT *\n" "FROM some_table\n" "{{ 'UNION ALL\n' if not loop.last }}\n" "{%- endfor %}", "jinja", [ # No initial indent (this is the first newline). # Importantly this first point - IS a newline # even though that newline segment is consumed # it should still be True here. _IndentPoint(1, 1, 0, 0, None, True, ()), # point between SELECT & * _IndentPoint(3, 1, 0, 1, 1, False, ()), # point after * _IndentPoint(5, -1, -1, 2, 1, True, (2,)), # point after FROM _IndentPoint(7, 1, 0, 1, 5, False, ()), # point after some_table _IndentPoint(9, -1, -1, 2, 5, True, (2,)), # point after ALL (we dedent down to the loop marker). _IndentPoint(13, -1, -1, 1, 9, True, ()), # There should be a loop marker here. # point after loop marker and before SELECT # (we indent back up after the loop). _IndentPoint(15, 1, 0, 0, 13, True, ()), # point between SELECT & * _IndentPoint(17, 1, 0, 1, 15, False, ()), # point after * _IndentPoint(19, -1, -1, 2, 15, True, (2,)), # point after FROM _IndentPoint(21, 1, 0, 1, 19, False, ()), # point after some_table (and before unused placeholder) _IndentPoint(23, -1, -1, 2, 19, True, (2,)), # Point after placeholder and dedenting down to endfor _IndentPoint(25, -1, -1, 1, 23, True, ()), # Point between endfor and end-of-file _IndentPoint(27, 0, 0, 0, 25, False, ()), ], ), # Templated case (with templated newline and indent) ( "SELECT\n {{'1 \n, 2'}}\nFROM foo", "jinja", [ # After SELECT _IndentPoint(1, 1, 0, 0, None, True, ()), # NOTE: The newline inside the tag isn't reported. # After the templated section (hence why 7) _IndentPoint(7, -1, -1, 1, 1, True, ()), # After FROM _IndentPoint(9, 1, 0, 0, 7, False, ()), # After foo _IndentPoint(11, -1, -1, 1, 7, False, (1,)), ], ), # Templated case (with special marker slice that has no source string) ( # The invisible special marker slice will be inserted immediately after # the first normal template section. "{% if True %}\n SELECT 1;\n{% endif %}\n", "special_marker_inserter", [ # No point at the start. # After the {% if True %} block: this should not yet indent because # there's still the upcoming zero-length special_marker # TemplateSegment. This is handled similar to the trailing comment # test case. _IndentPoint(1, 0, 0, 0, None, False, ()), # After the zero-length special_marker TemplateSegment inserted by # the special templater: only after this do we want to indent. _IndentPoint(3, 1, 0, 0, None, True, ()), # After SELECT _IndentPoint(5, 1, 0, 1, 3, False, ()), # After 1 _IndentPoint(7, -1, -1, 2, 3, False, (2,)), # After ; _IndentPoint(9, -1, -1, 1, 3, True, ()), # After {% endif %} _IndentPoint(11, 0, 0, 0, 9, True, ()), ], ), ], ) def test_reflow__crawl_indent_points(raw_sql_in, templater, points_out, caplog): """Test _crawl_indent_points directly.""" # Register the mock templater in this module. purge_plugin_manager() get_plugin_manager().register(sys.modules[__name__], name="reindent_test") config = FluffConfig(overrides={"dialect": "ansi", "templater": templater}) root = parse_ansi_string(raw_sql_in, config) print(root.stringify()) seq = ReflowSequence.from_root(root, config=config) with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"): points = list(_crawl_indent_points(seq.elements)) assert points == points_out @pytest.mark.parametrize( "raw_sql_in,raw_sql_out", [ # Trivial ( "select 1", "select 1", ), # Initial Indent ( " select 1", "select 1", ), # Trailing Newline ( " select 1\n", "select 1\n", ), # Basic Multiline ( "select\n1", "select\n 1", ), # Advanced Multiline ( "select\n1+(\n2+3\n),\n4\nfrom foo", "select\n 1+(\n 2+3\n ),\n 4\nfrom foo", ), ( "select\n 1+(\n 2+3\n ),\n 4\n from foo", "select\n 1+(\n 2+3\n ),\n 4\nfrom foo", ), # Multiple untaken indents. We should only indent as many # times as required. ( " select ((((\n1\n))))", "select ((((\n 1\n))))", ), ( "select (((\n((\n3\n))\n)))", "select (((\n ((\n 3\n ))\n)))", ), # ### Templated Multiline Cases ### # NOTE: the templated tags won't show here, but they # should still be indented. # Trailing tag. NOTE: Last tag indented ( "select\n1\n{% if true %}\n+ 2\n{% endif %}", "select\n 1\n \n + 2\n ", ), # Cutting across the parse tree ( "select\n1\n{% if true %}\n,2\nFROM a\n{% endif %}", # This set of template tags cuts across the parse # tree. We should indent them appropriately. In this case # that should mean "case 3", picking the lowest of the # existing indents which should mean no indent for either. # We also shouldn't indent the contents between them either # when taking this option. "select\n 1\n\n ,2\nFROM a\n", ), # Template tags at file ends ( "{% if true %}\nSELECT 1\n{% endif %}", "\n SELECT 1\n", ), # Template loops: ( "select\n 0,\n {% for i in [1, 2, 3] %}\n {{i}},\n {% endfor %}\n 4", "select\n 0,\n \n 1,\n \n 2,\n \n 3,\n \n 4", ), # Correction and handling of hanging indents ( "select 1, 2", "select 1, 2", ), ( "select 1,\n2", "select\n 1,\n 2", ), ( "select 1,\n 2", "select\n 1,\n 2", ), # A hanging example where we're modifying a currently empty point. ( "select greatest(1,\n2)", "select greatest(\n 1,\n 2\n)", ), # Test handling of many blank lines. # NOTE: # 1. Initial whitespace should remain, because it's not an indent. # 2. Blank lines should also remain, because they're also not an indent. ( "\n\n \n\nselect\n\n\n\n \n\n 1\n\n \n\n", "\n\n \n\nselect\n\n\n\n \n\n 1\n\n \n\n", ), # Templated cases. # NOTE: We're just rendering the fixed file in the templated space # so that for these tests we don't touch the fix routines. That's # why the template tags aren't visible - BUT THEIR INDENTS SHOULD BE. # This one is useful for ensuring the tags have the same indent. # ... first with a FROM ( "SELECT\n" " {{ 'a' }}\n" " {% for c in ['d', 'e'] %}\n" " ,{{ c }}_val\n" " {% endfor %}\n" "FROM foo", "SELECT\n a\n \n ,d_val\n \n ,e_val\n \nFROM foo", ), # ... then without a FROM ( "SELECT\n" " {{ 'a' }}\n" " {% for c in ['d', 'e'] %}\n" " ,{{ c }}_val\n" " {% endfor %}\n", "SELECT\n a\n \n ,d_val\n \n ,e_val\n \n", ), # This one is useful for if statements get handled right. # NOTE: There's a template loop in the middle. ( "SELECT\n" " {{ 'a' }}\n" " {% for c in ['d', 'e'] %}\n" " {% if c == 'd' %}\n" " ,{{ c }}_val_a\n" " {% else %}\n" " ,{{ c }}_val_b\n" "{% endif %}\n" " {% endfor %}\n", "SELECT\n" " a\n" " \n" " \n" " ,d_val_a\n" " \n" " \n" " \n" " ,e_val_b\n" " \n" " \n", ), # Test leading templated newlines. # https://github.com/sqlfluff/sqlfluff/issues/4485 ( "{{ '\\n \\n ' }}\nSELECT 1", # NOTE: This looks a little strange, but what's important # here is that it doesn't raise an exception. "\n \n \nSELECT 1", ), ], ) def test_reflow__lint_indent_points(raw_sql_in, raw_sql_out, default_config, caplog): """Test the lint_indent_points() method directly. Rather than testing directly, for brevity we check the raw output it produces. This results in a more compact test. """ root = parse_ansi_string(raw_sql_in, default_config) print(root.stringify()) seq = ReflowSequence.from_root(root, config=default_config) with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"): elements, results = lint_indent_points(seq.elements, single_indent=" ") result_raw = "".join(elem.raw for elem in elements) assert result_raw == raw_sql_out, "Raw Element Check Failed!" # Now we've checked the elements - check that applying the fixes gets us to # the same place. print("Results:", results) anchor_info = compute_anchor_edit_info(fixes_from_results(results)) fixed_tree, _, _, valid = apply_fixes( root, default_config.get("dialect_obj"), "TEST", anchor_info ) assert valid, f"Reparse check failed: {fixed_tree.raw!r}" assert fixed_tree.raw == raw_sql_out, "Element check passed - but fix check failed!" @pytest.mark.parametrize( "indent_line, forced_indents, expected_units", [ # Trivial case of a first line. ( _IndentLine(0, [_IndentPoint(0, 0, 0, 0, None, False, ())]), [], 0, ), # Simple cases of a normal lines. ( _IndentLine(3, [_IndentPoint(6, 0, 0, 3, 1, True, ())]), [], 3, ), ( # NOTE: Initial indent for *line* is different to *point*. # The *line* takes precedence. _IndentLine(1, [_IndentPoint(6, 0, 0, 3, 1, True, ())]), [], 1, ), # Indents and dedents on the line break. # NOTE: The line indent still takes precedence here. ( _IndentLine(3, [_IndentPoint(6, 1, 0, 3, 1, True, ())]), [], 3, ), ( _IndentLine(3, [_IndentPoint(6, -1, -1, 3, 1, True, ())]), [], 3, ), # Handle untaken indents. ( _IndentLine(3, [_IndentPoint(6, 0, 0, 3, 1, True, (1,))]), [], 2, ), ( _IndentLine(3, [_IndentPoint(6, 0, 0, 3, 1, True, (1, 2))]), [], 1, ), ( _IndentLine(3, [_IndentPoint(6, 0, 0, 3, 1, True, (2,))]), # Forced indent takes us back up. [2], 3, ), ( _IndentLine(3, [_IndentPoint(6, 0, 0, 3, 1, True, (3,))]), [], 2, ), ( _IndentLine(3, [_IndentPoint(6, 0, -1, 3, 1, True, (3,))]), # Untaken indent is pruned by trough. [], 3, ), ], ) def test_reflow__desired_indent_units(indent_line, forced_indents, expected_units): """Test _IndentLine.desired_indent_units() directly.""" assert indent_line.desired_indent_units(forced_indents) == expected_units sqlfluff-3.4.2/test/utils/reflow/respace_test.py000066400000000000000000000070731503426445100220250ustar00rootroot00000000000000"""Tests for respacing methods. These are mostly on the ReflowPoint class. """ import logging import pytest from sqlfluff.core import Linter from sqlfluff.utils.reflow.elements import ReflowPoint from sqlfluff.utils.reflow.helpers import fixes_from_results from sqlfluff.utils.reflow.sequence import ReflowSequence def parse_ansi_string(sql, config): """Parse an ansi sql string for testing.""" linter = Linter(config=config) return linter.parse_string(sql).tree @pytest.mark.parametrize( "raw_sql_in,kwargs,raw_sql_out", [ # Basic cases ("select 1+2", {}, "select 1 + 2"), ("select 1 + 2 ", {}, "select 1 + 2"), # Check newline handling ("select\n 1 + 2", {}, "select\n 1 + 2"), ("select\n 1 + 2", {}, "select\n 1 + 2"), ("select\n 1 + 2", {"strip_newlines": True}, "select 1 + 2"), # Check filtering ("select \n 1 + 2 \n ", {}, "select\n 1 + 2\n"), ("select \n 1 + 2 \n ", {"filter": "all"}, "select\n 1 + 2\n"), ("select \n 1 + 2 \n ", {"filter": "inline"}, "select \n 1 + 2 \n "), ("select \n 1 + 2 \n ", {"filter": "newline"}, "select\n 1 + 2\n"), ], ) def test_reflow__sequence_respace( raw_sql_in, kwargs, raw_sql_out, default_config, caplog ): """Test the ReflowSequence.respace() method directly.""" root = parse_ansi_string(raw_sql_in, default_config) seq = ReflowSequence.from_root(root, config=default_config) with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"): new_seq = seq.respace(**kwargs) assert new_seq.get_raw() == raw_sql_out @pytest.mark.parametrize( "raw_sql_in,point_idx,kwargs,raw_point_sql_out,fixes_out", [ # Basic cases ("select 1", 1, {}, " ", {("replace", " ")}), ("select 1+2", 3, {}, " ", {("create_after", "1")}), ("select (1+2)", 3, {}, "", set()), ("select ( 1+2)", 3, {}, "", {("delete", " ")}), # Newline handling ("select\n1", 1, {}, "\n", set()), ("select\n 1", 1, {}, "\n ", set()), ("select \n 1", 1, {}, "\n ", {("delete", " ")}), ( "select \n 1", 1, {"strip_newlines": True}, " ", {("delete", "\n"), ("delete", " "), ("replace", " ")}, ), ( "select ( \n 1)", 3, {"strip_newlines": True}, "", {("delete", "\n"), ("delete", " "), ("delete", " ")}, ), ], ) def test_reflow__point_respace_point( raw_sql_in, point_idx, kwargs, raw_point_sql_out, fixes_out, default_config, caplog ): """Test the ReflowPoint.respace_point() method directly. NOTE: This doesn't check any pre-existing fixes. That should be a separate more specific test. """ root = parse_ansi_string(raw_sql_in, default_config) seq = ReflowSequence.from_root(root, config=default_config) pnt = seq.elements[point_idx] assert isinstance(pnt, ReflowPoint) with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"): results, new_pnt = pnt.respace_point( prev_block=seq.elements[point_idx - 1], next_block=seq.elements[point_idx + 1], root_segment=root, lint_results=[], **kwargs, ) assert new_pnt.raw == raw_point_sql_out # NOTE: We use set comparison, because ordering isn't important for fixes. assert { (fix.edit_type, fix.anchor.raw) for fix in fixes_from_results(results) } == fixes_out sqlfluff-3.4.2/test/utils/reflow/sequence_test.py000066400000000000000000000163251503426445100222130ustar00rootroot00000000000000"""Tests for the reflow module.""" import logging import pytest from sqlfluff.core import Linter from sqlfluff.core.rules.base import LintFix from sqlfluff.utils.reflow.elements import ReflowBlock, ReflowPoint from sqlfluff.utils.reflow.sequence import ReflowSequence def parse_ansi_string(sql, config): """Parse an ansi sql string for testing.""" linter = Linter(config=config) return linter.parse_string(sql).tree def assert_reflow_structure(sequence, StartClass, raw_elems): """Assert a ReflowSequence has the defined structure.""" assert [ [seg.raw for seg in elem.segments] for elem in sequence.elements ] == raw_elems # We can assert all the classes just by knowing which we should start with assert all(type(elem) is StartClass for elem in sequence.elements[::2]) OtherClass = ReflowBlock if StartClass is ReflowPoint else ReflowPoint assert all(type(elem) is OtherClass for elem in sequence.elements[1::2]) @pytest.mark.parametrize( "raw_sql,StartClass,raw_elems", [ ( "select 1 +2", ReflowBlock, [ ["select"], # NOTE: The empty strings are indents and dedents ["", " "], ["1"], [" "], ["+"], [], ["2"], # indent (as point) [""], # end_of_file (as block) [""], ], ) ], ) def test_reflow_sequence_from_segments( raw_sql, StartClass, raw_elems, default_config, caplog ): """Test direct sequence construction from segments.""" root = parse_ansi_string(raw_sql, default_config) with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"): result = ReflowSequence.from_raw_segments( root.raw_segments, root, config=default_config ) assert_reflow_structure(result, StartClass, raw_elems) @pytest.mark.parametrize( "raw_sql,sides,target_idx,target_raw,StartClass,raw_elems", [ ( "select 1 +2", "both", 5, "+", ReflowBlock, [ # We should have expanded as far as the blocks either side ["1"], [" "], ["+"], [], ["2"], ], ), ( "select 1 +2", "before", 5, "+", ReflowBlock, [ ["1"], [" "], ["+"], ], ), ( "select 1 +2", "after", 5, "+", ReflowBlock, [ ["+"], [], ["2"], ], ), ( "select 1 +2", "before", 6, "2", ReflowBlock, [ ["+"], [], ["2"], ], ), ( "select 1 +2", "both", 4, " ", ReflowBlock, [ # Even targeting whitespace, we should get points either side. ["1"], [" "], ["+"], ], ), ( "select (1+2)", "both", 5, "1", ReflowBlock, [ # NOTE: We don't just stop at the indent, we go as far as code. ["("], # The indent sits in the point. [""], ["1"], [], ["+"], ], ), ( " SELECT 1 ", "both", 1, "SELECT", ReflowPoint, [ # We'll hit the edge of the file so start with a point. [" "], ["SELECT"], ["", " "], ["1"], ], ), ], ) def test_reflow_sequence_from_around_target( raw_sql, sides, target_idx, target_raw, StartClass, raw_elems, default_config, caplog, ): """Test direct sequence construction from a target.""" root = parse_ansi_string(raw_sql, default_config) print("Raw Segments:", root.raw_segments) target = root.raw_segments[target_idx] # Check we're aiming at the right place assert target.raw == target_raw with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"): result = ReflowSequence.from_around_target( target, root, config=default_config, sides=sides ) assert_reflow_structure(result, StartClass, raw_elems) def test_reflow_sequence_from_around_target_non_raw(default_config, caplog): """Test direct sequence construction from a target. This time we use a target which isn't a RawSegment. """ sql = " SELECT 1 " root = parse_ansi_string(sql, default_config) # We should have a statement as a first level child. statement = root.segments[1] assert statement.is_type("statement") assert statement.raw == "SELECT 1" with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"): result = ReflowSequence.from_around_target( statement, root, config=default_config ) # We should start with a point, because we hit the start of the file. # It should also hit the end of the file and effectively cover all # the raw segments of the file. assert_reflow_structure( result, ReflowPoint, [ [" "], ["SELECT"], ["", " "], ["1"], # dedent - ws ["", " "], # end of file [""], ], ) @pytest.mark.parametrize( "raw_sql,filter,delete_indices,edit_indices", [ # NOTE: These tests rely on the position of code *and non code* elements # in the parsed sequence, so may need to be altered if the parse structure # changes. ("SELECT \n 4", "all", [2], []), ("SELECT \n 4, \n 6", "all", [2, 7], []), ("SELECT \n 4, \n 6 ", "all", [2, 7, 12], []), ("SELECT \n 4, 5, 6 , 7 \n 6 ", "newline", [2, 17, 21], []), ("SELECT \n 4, 5, 6 , 7 \n 6 ", "inline", [12], [10, 14]), ("SELECT \n 4, 5, 6 , 7 \n 6 ", "all", [2, 12, 17, 21], [10, 14]), ], ) def test_reflow_sequence_respace_filter( raw_sql, filter, delete_indices, edit_indices, default_config, caplog ): """Test iteration of trailing whitespace fixes.""" root = parse_ansi_string(raw_sql, default_config) with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"): sequence = ReflowSequence.from_root(root, config=default_config) fixes = sequence.respace(filter=filter).get_fixes() # assert deletes assert [fix for fix in fixes if fix.edit_type == "delete"] == [ LintFix("delete", root.raw_segments[idx]) for idx in delete_indices ] # assert edits (with slightly less detail) assert [ root.raw_segments.index(fix.anchor) for fix in fixes if fix.edit_type == "replace" ] == edit_indices sqlfluff-3.4.2/tox.ini000066400000000000000000000165571503426445100146770ustar00rootroot00000000000000[tox] envlist = generate-fixture-yml, linting, doclinting, ruleslinting, docbuild, cov-init, doctests, py{39,310,311,312,313}, dbt{140,150,160,170,180,190}, cov-report, mypy, mypyc, winpy, dbt{150,180,190}-winpy, yamllint min_version = 4.0 # Require 4.0+ for proper pyproject.toml support [testenv] passenv = CI, CIRCLECI, CIRCLE_*, HOME, SQLFLUFF_BENCHMARK_API_KEY # Have option to explicitly set TMPDIR for python as on GitHub Action Windows # machines it doesn't read this from env and resets to system default, which # is often on different drive (C) from working dir (D), which causes problems. setenv = SQLFLUFF_TESTENV = 1 COVERAGE_FILE = .coverage.{envname} winpy: TMPDIR = temp_pytest allowlist_externals = make pip_pre = false deps = -r requirements_dev.txt # Apply the constraints files _as requirements_ files here so that # we force the right installation version up front in each environment. # NOTE: This is a bit of a hack around tox, but it does achieve reasonably # consistent results. dbt{140,150,160,170,180,190}: -r constraints/{envname}.txt # Include any other steps necessary for testing below. # {posargs} is there to allow us to specify specific tests, which # can then be invoked from tox by calling e.g. # tox -e py35 -- project/tests/test_file.py::TestClassName::test_method commands = # Install the plugins as required. # NOTE: We do them here, so that when version numbers update, we don't # get install errors for version conflicts. The dbt templater has a version # number pinned to the same version number of the main sqlfluff library # so it _must_ be installed second in the context of a version which isn't # yet released (and so not available on pypi). dbt{140,150,160,170,180,190}: python -m pip install "{toxinidir}/plugins/sqlfluff-templater-dbt" # Add the example plugin. # NOTE: The trailing comma is important because in the github test suite # the python version is not specified and instead the "py" or "winpy" # environment is invoked. Leaving the trailing comma ensures that this # environment still installs the relevant plugins. {py,winpy}{39,310,311,312,313,}: python -m pip install "{toxinidir}/plugins/sqlfluff-plugin-example" # Clean up from previous tests python "{toxinidir}/util.py" clean-tests # Run tests pytest -vv -rsfE --cov-report=lcov {posargs: "{toxinidir}/test"} python test/patch_lcov.py [testenv:cov-init] setenv = COVERAGE_FILE = .coverage commands = coverage erase [testenv:cov-report] setenv = COVERAGE_FILE = .coverage commands = coverage combine # Exclude dbt templater by default as those tests do not run without dbt coverage report --fail-under=100 --show-missing --omit "*templaters/dbt.py,*/dbt_templater/*" # Have copy of above for full coverage - including dbt - for those that want it [testenv:cov-report-dbt] setenv = COVERAGE_FILE = .coverage commands = coverage combine coverage report --fail-under=100 --show-missing [testenv:generate-fixture-yml] commands = python "{toxinidir}/test/generate_parse_fixture_yml.py" {posargs} [testenv:linting] # NOTE: We do install sqlfluff to run linting. This is # because lint-imports requires the module to be installed. commands = flake8 --version black --version ruff --version # ruff is configured to check linting and imports. # see: .ruff.toml and https://docs.astral.sh/ruff/rules/ ruff check . # Ruff *should* catch everything that flake8 does. Until # that has been confirmed, flake8 remains, but ruff runs first. black --diff . flake8 # Check import references. lint-imports [testenv:doctests] commands = pytest -vv -rsfE --doctest-modules {posargs: "{toxinidir}/src"} [testenv:yamllint] skip_install = true deps = yamllint commands = yamllint -c .yamllint . [testenv:doclinting] deps = -rdocs/requirements.txt commands = # Before linting, generate the rule & dialect docs. # If we don't we get import errors. python "{toxinidir}/docs/generate-auto-docs.py" doc8 "{toxinidir}/docs/source" --file-encoding utf8 [testenv:docbuild] deps = -rdocs/requirements.txt commands = make -C "{toxinidir}/docs" html [testenv:mypy] # NOTE: We do install sqlfluff to run mypy, this # is so we can refer to the package as a package. # It also appears to make --strict checking more # stable. commands = # Standard MyPy on the main package mypy -p sqlfluff # Strict MyPy on the core package mypy -p sqlfluff.core --strict [testenv:mypyc] skip_install = true changedir = src commands = mypyc --config-file ../pyproject.toml -p sqlfluff.api mypyc --config-file ../pyproject.toml -p sqlfluff.cli mypyc --config-file ../pyproject.toml -p sqlfluff.core.config mypyc --config-file ../pyproject.toml -p sqlfluff.core.dialects mypyc --config-file ../pyproject.toml -p sqlfluff.core.helpers mypyc --config-file ../pyproject.toml -p sqlfluff.core.linter mypyc --config-file ../pyproject.toml -p sqlfluff.core.parser.grammar mypyc --config-file ../pyproject.toml -p sqlfluff.core.plugin mypyc --config-file ../pyproject.toml -p sqlfluff.utils.reflow [testenv:build-dist] skip_install = true deps = build commands = python -m build --sdist --wheel {posargs: "{toxinidir}"} [testenv:check-dist] skip_install = true deps = twine commands = twine check "{toxinidir}/dist/*" [testenv:publish-dist] skip_install = true deps = {[testenv:build-dist]deps} twine commands = {[testenv:build-dist]commands} twine upload --skip-existing "{toxinidir}/dist/*" [testenv:pre-commit] skip_install = true deps = pre-commit commands = pre-commit {posargs:run --all-files} [flake8] # Ignore: # W503: Line break before binary operator # D105: Missing docstring in magic method # D107: Missing docstring in __init__ # D418: Function/ Method decorated with @overload shouldn’t contain a docstring # C812: Missing trailing comma ignore = W503, D107, D105, D418, C812 exclude = .git,__pycache__,env,.tox,build,.venv,venv,.coverage.py,plugins/sqlfluff-templater-dbt/sqlfluff_templater_dbt/osmosis/*.py max-line-length = 88 inline-quotes = double extend-ignore = # See https://github.com/PyCQA/pycodestyle/issues/373 E203, # sqlfluff uses flake8-docstrings https://pypi.org/project/flake8-docstrings/ # this is to assist with the sphinx based autodoc docstring-convention = google [coverage:run] source = src/sqlfluff omit = src/sqlfluff/__main__.py plugins/sqlfluff-templater-dbt/sqlfluff_templater_dbt/osmosis/*.py [coverage:report] exclude_lines = sys.version_info pragma: no cover # See: https://github.com/nedbat/coveragepy/issues/970 # NOTE: Prefer not including the ... instead @overload # See: https://github.com/pytest-dev/pytest-cov/issues/428 @abstractmethod # NOTE: Prefer not including the ... instead [coverage:paths] source = # Local path src/ # These are the Github likely source paths D:\a\sqlfluff\sqlfluff\src\ D:\a\sqlfluff\sqlfluff\.tox\winpy\Lib\site-packages\ # Github Actions are now using C: C:\a\sqlfluff\sqlfluff\src\ C:\a\sqlfluff\sqlfluff\.tox\winpy\Lib\site-packages\ /home/runner/work/sqlfluff/sqlfluff/src/ /home/runner/work/sqlfluff/sqlfluff/.tox/*/lib/*/site-packages/ dbt_templater = plugins/sqlfluff-templater-dbt/sqlfluff_templater_dbt/ .tox/*/lib/*/site-packages/sqlfluff_templater_dbt/ sqlfluff-3.4.2/util.py000066400000000000000000000234411503426445100147010ustar00rootroot00000000000000#!/usr/bin/env python """Utility strings for use during deployment. NB: This is not part of the core sqlfluff code. """ # This contains various utility scripts import os import re import shutil import time import click from fastcore.net import HTTPError from ghapi.all import GhApi @click.group() def cli(): """Launch the utility cli.""" pass @cli.command() @click.option("--path", default=".test-reports") def clean_tests(path): """Clear up the tests directory. NB: Using scripts allows platform independence Makes a new one afterward """ try: shutil.rmtree(path) click.echo(f"Removed {path!r}...") # OSError is for python 27 # in py36 its FileNotFoundError (but that inherits from IOError, which exists in # py27) except OSError: click.echo(f"Directory {path!r} does not exist. Skipping...") os.mkdir(path) click.echo(f"Created {path!r}") @cli.command() @click.argument("new_version_num") def release(new_version_num): """Change version number in the cfg files. NOTE: For fine grained personal access tokens, this requires _write_ access to the "contents" scope. For dome reason, if you only grant the _read_ access, you can't see any *draft* PRs which are necessary for this script to run. """ api = GhApi( owner=os.environ["GITHUB_REPOSITORY_OWNER"], repo="sqlfluff", token=os.environ["GITHUB_TOKEN"], ) try: releases = api.repos.list_releases(per_page=100) except HTTPError as err: raise click.UsageError( "HTTP Error from GitHub API. Check your credentials.\n" "(i.e. GITHUB_REPOSITORY_OWNER & GITHUB_TOKEN)\n" f"{err}" ) latest_draft_release = None for rel in releases: if rel["draft"]: latest_draft_release = rel break if not latest_draft_release: raise click.UsageError( "No draft release found on GitHub.\n" "This could be because the GitHub action which generates it is broken, " "but is more likely due to using an API token which only has read-only " "access to the `sqlfluff/sqlfluff` repository. This script requires an " "API token with `read and write` access to the `contents` scope in " "order to be able to view draft releases." ) # Pre-releases are identifiable because they contain letters. # https://peps.python.org/pep-0440/ is_pre_release = any(char.isalpha() for char in new_version_num) click.echo( f"Preparing for release {new_version_num}. (Pre-release: {is_pre_release})" ) # Linkify the PRs and authors draft_body_parts = latest_draft_release["body"].split("\n") potential_new_contributors = [] for i, p in enumerate(draft_body_parts): draft_body_parts[i] = re.sub( r"\(#([0-9]*)\) @([^ ]*)$", r"[#\1](https://github.com/sqlfluff/sqlfluff/pull/\1) [@\2](https://github.com/\2)", # noqa E501 p, ) new_contrib_string = re.sub( r".*\(#([0-9]*)\) @([^ ]*)$", r"* [@\2](https://github.com/\2) made their first contribution in [#\1](https://github.com/sqlfluff/sqlfluff/pull/\1)", # noqa E501 p, ) if new_contrib_string.startswith("* "): new_contrib_name = re.sub(r"\* \[(.*?)\].*", r"\1", new_contrib_string) potential_new_contributors.append( {"name": new_contrib_name, "line": new_contrib_string} ) whats_changed_text = "\n".join(draft_body_parts) # Find the first commit for each contributor in this release potential_new_contributors.reverse() seen_contributors = set() deduped_potential_new_contributors = [] for c in potential_new_contributors: if c["name"] not in seen_contributors: seen_contributors.add(c["name"]) deduped_potential_new_contributors.append(c) click.echo("Updating CHANGELOG.md...") input_changelog = open("CHANGELOG.md", encoding="utf8").readlines() write_changelog = open("CHANGELOG.md", "w", encoding="utf8") for i, line in enumerate(input_changelog): write_changelog.write(line) if "DO NOT DELETE THIS LINE" in line: existing_entry_start = i + 2 new_heading = f"## [{new_version_num}] - {time.strftime('%Y-%m-%d')}\n" # If the release is already in the changelog, update it if f"## [{new_version_num}]" in input_changelog[existing_entry_start]: click.echo(f"...found existing entry for {new_version_num}") # Update the existing heading with the new date. input_changelog[existing_entry_start] = new_heading # Delete the existing What’s Changed and New Contributors sections remaining_changelog = input_changelog[existing_entry_start:] existing_whats_changed_start = ( next( j for j, line in enumerate(remaining_changelog) if line.startswith("## What’s Changed") ) + existing_entry_start ) existing_new_contributors_start = ( next( j for j, line in enumerate(remaining_changelog) if line.startswith("## New Contributors") ) + existing_entry_start ) existing_new_contributors_length = ( next( j for j, line in enumerate( input_changelog[existing_new_contributors_start:] ) if line.startswith("## [") ) - 1 ) del input_changelog[ existing_whats_changed_start : existing_new_contributors_start + existing_new_contributors_length ] # Now that we've cleared the previous sections, we will accurately # find if contributors have been previously mentioned in the changelog new_contributor_lines = [] input_changelog_str = "".join( input_changelog[existing_whats_changed_start:] ) for c in deduped_potential_new_contributors: if c["name"] not in input_changelog_str: new_contributor_lines.append(c["line"]) input_changelog[existing_whats_changed_start] = ( whats_changed_text + "\n\n## New Contributors\n" + "\n".join(new_contributor_lines) + "\n\n" ) else: click.echo(f"...creating new entry for {new_version_num}") write_changelog.write(f"\n{new_heading}\n## Highlights\n\n") write_changelog.write(whats_changed_text) write_changelog.write("\n## New Contributors\n\n") # Ensure contributor names don't appear in input_changelog list new_contributor_lines = [] input_changelog_str = "".join(input_changelog) for c in deduped_potential_new_contributors: if c["name"] not in input_changelog_str: new_contributor_lines.append(c["line"]) write_changelog.write("\n".join(new_contributor_lines)) write_changelog.write("\n") write_changelog.close() click.echo("Updating plugins/sqlfluff-templater-dbt/pyproject.toml") for filename in ["plugins/sqlfluff-templater-dbt/pyproject.toml"]: # NOTE: Toml files are always encoded in UTF-8. input_file = open(filename, "r", encoding="utf-8").readlines() # Regardless of platform, write newlines as \n write_file = open(filename, "w", encoding="utf-8", newline="\n") for line in input_file: if line.startswith("version"): line = f'version = "{new_version_num}"\n' elif line.startswith(' "sqlfluff=='): line = f' "sqlfluff=={new_version_num}",\n' write_file.write(line) write_file.close() keys = ["version"] if not is_pre_release: # Only update stable_version if it's not a pre-release. keys.append("stable_version") click.echo("Updating pyproject.toml") for filename in ["pyproject.toml"]: input_file = open(filename, "r", encoding="utf-8").readlines() # Regardless of platform, write newlines as \n write_file = open(filename, "w", encoding="utf-8", newline="\n") for line in input_file: for key in keys: if line.startswith(key): # For pyproject.toml we quote the version identifier. line = f'{key} = "{new_version_num}"\n' break write_file.write(line) write_file.close() if not is_pre_release: click.echo("Updating gettingstarted.rst") for filename in ["docs/source/gettingstarted.rst"]: input_file = open(filename, "r").readlines() # Regardless of platform, write newlines as \n write_file = open(filename, "w", newline="\n") change_next_line = False for line in input_file: if change_next_line: line = f" {new_version_num}\n" change_next_line = False elif line.startswith(" $ sqlfluff version"): change_next_line = True write_file.write(line) write_file.close() click.echo("DONE") if __name__ == "__main__": cli()